scipy 1.16.2__cp312-cp312-win_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1530) hide show
  1. scipy/__config__.py +161 -0
  2. scipy/__init__.py +150 -0
  3. scipy/_cyutility.cp312-win_arm64.lib +0 -0
  4. scipy/_cyutility.cp312-win_arm64.pyd +0 -0
  5. scipy/_distributor_init.py +18 -0
  6. scipy/_lib/__init__.py +14 -0
  7. scipy/_lib/_array_api.py +931 -0
  8. scipy/_lib/_array_api_compat_vendor.py +9 -0
  9. scipy/_lib/_array_api_no_0d.py +103 -0
  10. scipy/_lib/_bunch.py +229 -0
  11. scipy/_lib/_ccallback.py +251 -0
  12. scipy/_lib/_ccallback_c.cp312-win_arm64.lib +0 -0
  13. scipy/_lib/_ccallback_c.cp312-win_arm64.pyd +0 -0
  14. scipy/_lib/_disjoint_set.py +254 -0
  15. scipy/_lib/_docscrape.py +761 -0
  16. scipy/_lib/_elementwise_iterative_method.py +346 -0
  17. scipy/_lib/_fpumode.cp312-win_arm64.lib +0 -0
  18. scipy/_lib/_fpumode.cp312-win_arm64.pyd +0 -0
  19. scipy/_lib/_gcutils.py +105 -0
  20. scipy/_lib/_pep440.py +487 -0
  21. scipy/_lib/_sparse.py +41 -0
  22. scipy/_lib/_test_ccallback.cp312-win_arm64.lib +0 -0
  23. scipy/_lib/_test_ccallback.cp312-win_arm64.pyd +0 -0
  24. scipy/_lib/_test_deprecation_call.cp312-win_arm64.lib +0 -0
  25. scipy/_lib/_test_deprecation_call.cp312-win_arm64.pyd +0 -0
  26. scipy/_lib/_test_deprecation_def.cp312-win_arm64.lib +0 -0
  27. scipy/_lib/_test_deprecation_def.cp312-win_arm64.pyd +0 -0
  28. scipy/_lib/_testutils.py +373 -0
  29. scipy/_lib/_threadsafety.py +58 -0
  30. scipy/_lib/_tmpdirs.py +86 -0
  31. scipy/_lib/_uarray/LICENSE +29 -0
  32. scipy/_lib/_uarray/__init__.py +116 -0
  33. scipy/_lib/_uarray/_backend.py +707 -0
  34. scipy/_lib/_uarray/_uarray.cp312-win_arm64.lib +0 -0
  35. scipy/_lib/_uarray/_uarray.cp312-win_arm64.pyd +0 -0
  36. scipy/_lib/_util.py +1283 -0
  37. scipy/_lib/array_api_compat/__init__.py +22 -0
  38. scipy/_lib/array_api_compat/_internal.py +59 -0
  39. scipy/_lib/array_api_compat/common/__init__.py +1 -0
  40. scipy/_lib/array_api_compat/common/_aliases.py +727 -0
  41. scipy/_lib/array_api_compat/common/_fft.py +213 -0
  42. scipy/_lib/array_api_compat/common/_helpers.py +1058 -0
  43. scipy/_lib/array_api_compat/common/_linalg.py +232 -0
  44. scipy/_lib/array_api_compat/common/_typing.py +192 -0
  45. scipy/_lib/array_api_compat/cupy/__init__.py +13 -0
  46. scipy/_lib/array_api_compat/cupy/_aliases.py +156 -0
  47. scipy/_lib/array_api_compat/cupy/_info.py +336 -0
  48. scipy/_lib/array_api_compat/cupy/_typing.py +31 -0
  49. scipy/_lib/array_api_compat/cupy/fft.py +36 -0
  50. scipy/_lib/array_api_compat/cupy/linalg.py +49 -0
  51. scipy/_lib/array_api_compat/dask/__init__.py +0 -0
  52. scipy/_lib/array_api_compat/dask/array/__init__.py +12 -0
  53. scipy/_lib/array_api_compat/dask/array/_aliases.py +376 -0
  54. scipy/_lib/array_api_compat/dask/array/_info.py +416 -0
  55. scipy/_lib/array_api_compat/dask/array/fft.py +21 -0
  56. scipy/_lib/array_api_compat/dask/array/linalg.py +72 -0
  57. scipy/_lib/array_api_compat/numpy/__init__.py +28 -0
  58. scipy/_lib/array_api_compat/numpy/_aliases.py +190 -0
  59. scipy/_lib/array_api_compat/numpy/_info.py +366 -0
  60. scipy/_lib/array_api_compat/numpy/_typing.py +30 -0
  61. scipy/_lib/array_api_compat/numpy/fft.py +35 -0
  62. scipy/_lib/array_api_compat/numpy/linalg.py +143 -0
  63. scipy/_lib/array_api_compat/torch/__init__.py +22 -0
  64. scipy/_lib/array_api_compat/torch/_aliases.py +855 -0
  65. scipy/_lib/array_api_compat/torch/_info.py +369 -0
  66. scipy/_lib/array_api_compat/torch/_typing.py +3 -0
  67. scipy/_lib/array_api_compat/torch/fft.py +85 -0
  68. scipy/_lib/array_api_compat/torch/linalg.py +121 -0
  69. scipy/_lib/array_api_extra/__init__.py +38 -0
  70. scipy/_lib/array_api_extra/_delegation.py +171 -0
  71. scipy/_lib/array_api_extra/_lib/__init__.py +1 -0
  72. scipy/_lib/array_api_extra/_lib/_at.py +463 -0
  73. scipy/_lib/array_api_extra/_lib/_backends.py +46 -0
  74. scipy/_lib/array_api_extra/_lib/_funcs.py +937 -0
  75. scipy/_lib/array_api_extra/_lib/_lazy.py +357 -0
  76. scipy/_lib/array_api_extra/_lib/_testing.py +278 -0
  77. scipy/_lib/array_api_extra/_lib/_utils/__init__.py +1 -0
  78. scipy/_lib/array_api_extra/_lib/_utils/_compat.py +74 -0
  79. scipy/_lib/array_api_extra/_lib/_utils/_compat.pyi +45 -0
  80. scipy/_lib/array_api_extra/_lib/_utils/_helpers.py +559 -0
  81. scipy/_lib/array_api_extra/_lib/_utils/_typing.py +10 -0
  82. scipy/_lib/array_api_extra/_lib/_utils/_typing.pyi +105 -0
  83. scipy/_lib/array_api_extra/testing.py +359 -0
  84. scipy/_lib/cobyqa/__init__.py +20 -0
  85. scipy/_lib/cobyqa/framework.py +1240 -0
  86. scipy/_lib/cobyqa/main.py +1506 -0
  87. scipy/_lib/cobyqa/models.py +1529 -0
  88. scipy/_lib/cobyqa/problem.py +1296 -0
  89. scipy/_lib/cobyqa/settings.py +132 -0
  90. scipy/_lib/cobyqa/subsolvers/__init__.py +14 -0
  91. scipy/_lib/cobyqa/subsolvers/geometry.py +387 -0
  92. scipy/_lib/cobyqa/subsolvers/optim.py +1203 -0
  93. scipy/_lib/cobyqa/utils/__init__.py +18 -0
  94. scipy/_lib/cobyqa/utils/exceptions.py +22 -0
  95. scipy/_lib/cobyqa/utils/math.py +77 -0
  96. scipy/_lib/cobyqa/utils/versions.py +67 -0
  97. scipy/_lib/decorator.py +399 -0
  98. scipy/_lib/deprecation.py +274 -0
  99. scipy/_lib/doccer.py +366 -0
  100. scipy/_lib/messagestream.cp312-win_arm64.lib +0 -0
  101. scipy/_lib/messagestream.cp312-win_arm64.pyd +0 -0
  102. scipy/_lib/pyprima/__init__.py +212 -0
  103. scipy/_lib/pyprima/cobyla/__init__.py +0 -0
  104. scipy/_lib/pyprima/cobyla/cobyla.py +559 -0
  105. scipy/_lib/pyprima/cobyla/cobylb.py +714 -0
  106. scipy/_lib/pyprima/cobyla/geometry.py +226 -0
  107. scipy/_lib/pyprima/cobyla/initialize.py +215 -0
  108. scipy/_lib/pyprima/cobyla/trustregion.py +492 -0
  109. scipy/_lib/pyprima/cobyla/update.py +289 -0
  110. scipy/_lib/pyprima/common/__init__.py +0 -0
  111. scipy/_lib/pyprima/common/_bounds.py +34 -0
  112. scipy/_lib/pyprima/common/_linear_constraints.py +46 -0
  113. scipy/_lib/pyprima/common/_nonlinear_constraints.py +54 -0
  114. scipy/_lib/pyprima/common/_project.py +173 -0
  115. scipy/_lib/pyprima/common/checkbreak.py +93 -0
  116. scipy/_lib/pyprima/common/consts.py +47 -0
  117. scipy/_lib/pyprima/common/evaluate.py +99 -0
  118. scipy/_lib/pyprima/common/history.py +38 -0
  119. scipy/_lib/pyprima/common/infos.py +30 -0
  120. scipy/_lib/pyprima/common/linalg.py +435 -0
  121. scipy/_lib/pyprima/common/message.py +290 -0
  122. scipy/_lib/pyprima/common/powalg.py +131 -0
  123. scipy/_lib/pyprima/common/preproc.py +277 -0
  124. scipy/_lib/pyprima/common/present.py +5 -0
  125. scipy/_lib/pyprima/common/ratio.py +54 -0
  126. scipy/_lib/pyprima/common/redrho.py +47 -0
  127. scipy/_lib/pyprima/common/selectx.py +296 -0
  128. scipy/_lib/tests/__init__.py +0 -0
  129. scipy/_lib/tests/test__gcutils.py +110 -0
  130. scipy/_lib/tests/test__pep440.py +67 -0
  131. scipy/_lib/tests/test__testutils.py +32 -0
  132. scipy/_lib/tests/test__threadsafety.py +51 -0
  133. scipy/_lib/tests/test__util.py +641 -0
  134. scipy/_lib/tests/test_array_api.py +322 -0
  135. scipy/_lib/tests/test_bunch.py +169 -0
  136. scipy/_lib/tests/test_ccallback.py +196 -0
  137. scipy/_lib/tests/test_config.py +45 -0
  138. scipy/_lib/tests/test_deprecation.py +10 -0
  139. scipy/_lib/tests/test_doccer.py +143 -0
  140. scipy/_lib/tests/test_import_cycles.py +18 -0
  141. scipy/_lib/tests/test_public_api.py +482 -0
  142. scipy/_lib/tests/test_scipy_version.py +28 -0
  143. scipy/_lib/tests/test_tmpdirs.py +48 -0
  144. scipy/_lib/tests/test_warnings.py +137 -0
  145. scipy/_lib/uarray.py +31 -0
  146. scipy/cluster/__init__.py +31 -0
  147. scipy/cluster/_hierarchy.cp312-win_arm64.lib +0 -0
  148. scipy/cluster/_hierarchy.cp312-win_arm64.pyd +0 -0
  149. scipy/cluster/_optimal_leaf_ordering.cp312-win_arm64.lib +0 -0
  150. scipy/cluster/_optimal_leaf_ordering.cp312-win_arm64.pyd +0 -0
  151. scipy/cluster/_vq.cp312-win_arm64.lib +0 -0
  152. scipy/cluster/_vq.cp312-win_arm64.pyd +0 -0
  153. scipy/cluster/hierarchy.py +4348 -0
  154. scipy/cluster/tests/__init__.py +0 -0
  155. scipy/cluster/tests/hierarchy_test_data.py +145 -0
  156. scipy/cluster/tests/test_disjoint_set.py +202 -0
  157. scipy/cluster/tests/test_hierarchy.py +1238 -0
  158. scipy/cluster/tests/test_vq.py +434 -0
  159. scipy/cluster/vq.py +832 -0
  160. scipy/conftest.py +683 -0
  161. scipy/constants/__init__.py +358 -0
  162. scipy/constants/_codata.py +2266 -0
  163. scipy/constants/_constants.py +369 -0
  164. scipy/constants/codata.py +21 -0
  165. scipy/constants/constants.py +53 -0
  166. scipy/constants/tests/__init__.py +0 -0
  167. scipy/constants/tests/test_codata.py +78 -0
  168. scipy/constants/tests/test_constants.py +83 -0
  169. scipy/datasets/__init__.py +90 -0
  170. scipy/datasets/_download_all.py +71 -0
  171. scipy/datasets/_fetchers.py +225 -0
  172. scipy/datasets/_registry.py +26 -0
  173. scipy/datasets/_utils.py +81 -0
  174. scipy/datasets/tests/__init__.py +0 -0
  175. scipy/datasets/tests/test_data.py +128 -0
  176. scipy/differentiate/__init__.py +27 -0
  177. scipy/differentiate/_differentiate.py +1129 -0
  178. scipy/differentiate/tests/__init__.py +0 -0
  179. scipy/differentiate/tests/test_differentiate.py +694 -0
  180. scipy/fft/__init__.py +114 -0
  181. scipy/fft/_backend.py +196 -0
  182. scipy/fft/_basic.py +1650 -0
  183. scipy/fft/_basic_backend.py +197 -0
  184. scipy/fft/_debug_backends.py +22 -0
  185. scipy/fft/_fftlog.py +223 -0
  186. scipy/fft/_fftlog_backend.py +200 -0
  187. scipy/fft/_helper.py +348 -0
  188. scipy/fft/_pocketfft/LICENSE.md +25 -0
  189. scipy/fft/_pocketfft/__init__.py +9 -0
  190. scipy/fft/_pocketfft/basic.py +251 -0
  191. scipy/fft/_pocketfft/helper.py +249 -0
  192. scipy/fft/_pocketfft/pypocketfft.cp312-win_arm64.lib +0 -0
  193. scipy/fft/_pocketfft/pypocketfft.cp312-win_arm64.pyd +0 -0
  194. scipy/fft/_pocketfft/realtransforms.py +109 -0
  195. scipy/fft/_pocketfft/tests/__init__.py +0 -0
  196. scipy/fft/_pocketfft/tests/test_basic.py +1011 -0
  197. scipy/fft/_pocketfft/tests/test_real_transforms.py +505 -0
  198. scipy/fft/_realtransforms.py +706 -0
  199. scipy/fft/_realtransforms_backend.py +63 -0
  200. scipy/fft/tests/__init__.py +0 -0
  201. scipy/fft/tests/mock_backend.py +96 -0
  202. scipy/fft/tests/test_backend.py +98 -0
  203. scipy/fft/tests/test_basic.py +504 -0
  204. scipy/fft/tests/test_fftlog.py +215 -0
  205. scipy/fft/tests/test_helper.py +558 -0
  206. scipy/fft/tests/test_multithreading.py +84 -0
  207. scipy/fft/tests/test_real_transforms.py +247 -0
  208. scipy/fftpack/__init__.py +103 -0
  209. scipy/fftpack/_basic.py +428 -0
  210. scipy/fftpack/_helper.py +115 -0
  211. scipy/fftpack/_pseudo_diffs.py +554 -0
  212. scipy/fftpack/_realtransforms.py +598 -0
  213. scipy/fftpack/basic.py +20 -0
  214. scipy/fftpack/convolve.cp312-win_arm64.lib +0 -0
  215. scipy/fftpack/convolve.cp312-win_arm64.pyd +0 -0
  216. scipy/fftpack/helper.py +19 -0
  217. scipy/fftpack/pseudo_diffs.py +22 -0
  218. scipy/fftpack/realtransforms.py +19 -0
  219. scipy/fftpack/tests/__init__.py +0 -0
  220. scipy/fftpack/tests/fftw_double_ref.npz +0 -0
  221. scipy/fftpack/tests/fftw_longdouble_ref.npz +0 -0
  222. scipy/fftpack/tests/fftw_single_ref.npz +0 -0
  223. scipy/fftpack/tests/test.npz +0 -0
  224. scipy/fftpack/tests/test_basic.py +877 -0
  225. scipy/fftpack/tests/test_helper.py +54 -0
  226. scipy/fftpack/tests/test_import.py +33 -0
  227. scipy/fftpack/tests/test_pseudo_diffs.py +388 -0
  228. scipy/fftpack/tests/test_real_transforms.py +836 -0
  229. scipy/integrate/__init__.py +122 -0
  230. scipy/integrate/_bvp.py +1160 -0
  231. scipy/integrate/_cubature.py +729 -0
  232. scipy/integrate/_dop.cp312-win_arm64.lib +0 -0
  233. scipy/integrate/_dop.cp312-win_arm64.pyd +0 -0
  234. scipy/integrate/_ivp/__init__.py +8 -0
  235. scipy/integrate/_ivp/base.py +290 -0
  236. scipy/integrate/_ivp/bdf.py +478 -0
  237. scipy/integrate/_ivp/common.py +451 -0
  238. scipy/integrate/_ivp/dop853_coefficients.py +193 -0
  239. scipy/integrate/_ivp/ivp.py +755 -0
  240. scipy/integrate/_ivp/lsoda.py +224 -0
  241. scipy/integrate/_ivp/radau.py +572 -0
  242. scipy/integrate/_ivp/rk.py +601 -0
  243. scipy/integrate/_ivp/tests/__init__.py +0 -0
  244. scipy/integrate/_ivp/tests/test_ivp.py +1287 -0
  245. scipy/integrate/_ivp/tests/test_rk.py +37 -0
  246. scipy/integrate/_lebedev.py +5450 -0
  247. scipy/integrate/_lsoda.cp312-win_arm64.lib +0 -0
  248. scipy/integrate/_lsoda.cp312-win_arm64.pyd +0 -0
  249. scipy/integrate/_ode.py +1395 -0
  250. scipy/integrate/_odepack.cp312-win_arm64.lib +0 -0
  251. scipy/integrate/_odepack.cp312-win_arm64.pyd +0 -0
  252. scipy/integrate/_odepack_py.py +273 -0
  253. scipy/integrate/_quad_vec.py +674 -0
  254. scipy/integrate/_quadpack.cp312-win_arm64.lib +0 -0
  255. scipy/integrate/_quadpack.cp312-win_arm64.pyd +0 -0
  256. scipy/integrate/_quadpack_py.py +1283 -0
  257. scipy/integrate/_quadrature.py +1336 -0
  258. scipy/integrate/_rules/__init__.py +12 -0
  259. scipy/integrate/_rules/_base.py +518 -0
  260. scipy/integrate/_rules/_gauss_kronrod.py +202 -0
  261. scipy/integrate/_rules/_gauss_legendre.py +62 -0
  262. scipy/integrate/_rules/_genz_malik.py +210 -0
  263. scipy/integrate/_tanhsinh.py +1385 -0
  264. scipy/integrate/_test_multivariate.cp312-win_arm64.lib +0 -0
  265. scipy/integrate/_test_multivariate.cp312-win_arm64.pyd +0 -0
  266. scipy/integrate/_test_odeint_banded.cp312-win_arm64.lib +0 -0
  267. scipy/integrate/_test_odeint_banded.cp312-win_arm64.pyd +0 -0
  268. scipy/integrate/_vode.cp312-win_arm64.lib +0 -0
  269. scipy/integrate/_vode.cp312-win_arm64.pyd +0 -0
  270. scipy/integrate/dop.py +15 -0
  271. scipy/integrate/lsoda.py +15 -0
  272. scipy/integrate/odepack.py +17 -0
  273. scipy/integrate/quadpack.py +23 -0
  274. scipy/integrate/tests/__init__.py +0 -0
  275. scipy/integrate/tests/test__quad_vec.py +211 -0
  276. scipy/integrate/tests/test_banded_ode_solvers.py +305 -0
  277. scipy/integrate/tests/test_bvp.py +714 -0
  278. scipy/integrate/tests/test_cubature.py +1375 -0
  279. scipy/integrate/tests/test_integrate.py +840 -0
  280. scipy/integrate/tests/test_odeint_jac.py +74 -0
  281. scipy/integrate/tests/test_quadpack.py +680 -0
  282. scipy/integrate/tests/test_quadrature.py +730 -0
  283. scipy/integrate/tests/test_tanhsinh.py +1171 -0
  284. scipy/integrate/vode.py +15 -0
  285. scipy/interpolate/__init__.py +228 -0
  286. scipy/interpolate/_bary_rational.py +715 -0
  287. scipy/interpolate/_bsplines.py +2469 -0
  288. scipy/interpolate/_cubic.py +973 -0
  289. scipy/interpolate/_dfitpack.cp312-win_arm64.lib +0 -0
  290. scipy/interpolate/_dfitpack.cp312-win_arm64.pyd +0 -0
  291. scipy/interpolate/_dierckx.cp312-win_arm64.lib +0 -0
  292. scipy/interpolate/_dierckx.cp312-win_arm64.pyd +0 -0
  293. scipy/interpolate/_fitpack.cp312-win_arm64.lib +0 -0
  294. scipy/interpolate/_fitpack.cp312-win_arm64.pyd +0 -0
  295. scipy/interpolate/_fitpack2.py +2397 -0
  296. scipy/interpolate/_fitpack_impl.py +811 -0
  297. scipy/interpolate/_fitpack_py.py +898 -0
  298. scipy/interpolate/_fitpack_repro.py +996 -0
  299. scipy/interpolate/_interpnd.cp312-win_arm64.lib +0 -0
  300. scipy/interpolate/_interpnd.cp312-win_arm64.pyd +0 -0
  301. scipy/interpolate/_interpolate.py +2266 -0
  302. scipy/interpolate/_ndbspline.py +415 -0
  303. scipy/interpolate/_ndgriddata.py +329 -0
  304. scipy/interpolate/_pade.py +67 -0
  305. scipy/interpolate/_polyint.py +1025 -0
  306. scipy/interpolate/_ppoly.cp312-win_arm64.lib +0 -0
  307. scipy/interpolate/_ppoly.cp312-win_arm64.pyd +0 -0
  308. scipy/interpolate/_rbf.py +290 -0
  309. scipy/interpolate/_rbfinterp.py +550 -0
  310. scipy/interpolate/_rbfinterp_pythran.cp312-win_arm64.lib +0 -0
  311. scipy/interpolate/_rbfinterp_pythran.cp312-win_arm64.pyd +0 -0
  312. scipy/interpolate/_rgi.py +764 -0
  313. scipy/interpolate/_rgi_cython.cp312-win_arm64.lib +0 -0
  314. scipy/interpolate/_rgi_cython.cp312-win_arm64.pyd +0 -0
  315. scipy/interpolate/dfitpack.py +24 -0
  316. scipy/interpolate/fitpack.py +31 -0
  317. scipy/interpolate/fitpack2.py +29 -0
  318. scipy/interpolate/interpnd.py +24 -0
  319. scipy/interpolate/interpolate.py +30 -0
  320. scipy/interpolate/ndgriddata.py +23 -0
  321. scipy/interpolate/polyint.py +24 -0
  322. scipy/interpolate/rbf.py +18 -0
  323. scipy/interpolate/tests/__init__.py +0 -0
  324. scipy/interpolate/tests/data/bug-1310.npz +0 -0
  325. scipy/interpolate/tests/data/estimate_gradients_hang.npy +0 -0
  326. scipy/interpolate/tests/data/gcvspl.npz +0 -0
  327. scipy/interpolate/tests/test_bary_rational.py +368 -0
  328. scipy/interpolate/tests/test_bsplines.py +3754 -0
  329. scipy/interpolate/tests/test_fitpack.py +519 -0
  330. scipy/interpolate/tests/test_fitpack2.py +1431 -0
  331. scipy/interpolate/tests/test_gil.py +64 -0
  332. scipy/interpolate/tests/test_interpnd.py +452 -0
  333. scipy/interpolate/tests/test_interpolate.py +2630 -0
  334. scipy/interpolate/tests/test_ndgriddata.py +308 -0
  335. scipy/interpolate/tests/test_pade.py +107 -0
  336. scipy/interpolate/tests/test_polyint.py +972 -0
  337. scipy/interpolate/tests/test_rbf.py +246 -0
  338. scipy/interpolate/tests/test_rbfinterp.py +534 -0
  339. scipy/interpolate/tests/test_rgi.py +1151 -0
  340. scipy/io/__init__.py +116 -0
  341. scipy/io/_fast_matrix_market/__init__.py +600 -0
  342. scipy/io/_fast_matrix_market/_fmm_core.cp312-win_arm64.lib +0 -0
  343. scipy/io/_fast_matrix_market/_fmm_core.cp312-win_arm64.pyd +0 -0
  344. scipy/io/_fortran.py +354 -0
  345. scipy/io/_harwell_boeing/__init__.py +7 -0
  346. scipy/io/_harwell_boeing/_fortran_format_parser.py +316 -0
  347. scipy/io/_harwell_boeing/hb.py +571 -0
  348. scipy/io/_harwell_boeing/tests/__init__.py +0 -0
  349. scipy/io/_harwell_boeing/tests/test_fortran_format.py +74 -0
  350. scipy/io/_harwell_boeing/tests/test_hb.py +70 -0
  351. scipy/io/_idl.py +917 -0
  352. scipy/io/_mmio.py +968 -0
  353. scipy/io/_netcdf.py +1104 -0
  354. scipy/io/_test_fortran.cp312-win_arm64.lib +0 -0
  355. scipy/io/_test_fortran.cp312-win_arm64.pyd +0 -0
  356. scipy/io/arff/__init__.py +28 -0
  357. scipy/io/arff/_arffread.py +873 -0
  358. scipy/io/arff/arffread.py +19 -0
  359. scipy/io/arff/tests/__init__.py +0 -0
  360. scipy/io/arff/tests/data/iris.arff +225 -0
  361. scipy/io/arff/tests/data/missing.arff +8 -0
  362. scipy/io/arff/tests/data/nodata.arff +11 -0
  363. scipy/io/arff/tests/data/quoted_nominal.arff +13 -0
  364. scipy/io/arff/tests/data/quoted_nominal_spaces.arff +13 -0
  365. scipy/io/arff/tests/data/test1.arff +10 -0
  366. scipy/io/arff/tests/data/test10.arff +8 -0
  367. scipy/io/arff/tests/data/test11.arff +11 -0
  368. scipy/io/arff/tests/data/test2.arff +15 -0
  369. scipy/io/arff/tests/data/test3.arff +6 -0
  370. scipy/io/arff/tests/data/test4.arff +11 -0
  371. scipy/io/arff/tests/data/test5.arff +26 -0
  372. scipy/io/arff/tests/data/test6.arff +12 -0
  373. scipy/io/arff/tests/data/test7.arff +15 -0
  374. scipy/io/arff/tests/data/test8.arff +12 -0
  375. scipy/io/arff/tests/data/test9.arff +14 -0
  376. scipy/io/arff/tests/test_arffread.py +421 -0
  377. scipy/io/harwell_boeing.py +17 -0
  378. scipy/io/idl.py +17 -0
  379. scipy/io/matlab/__init__.py +66 -0
  380. scipy/io/matlab/_byteordercodes.py +75 -0
  381. scipy/io/matlab/_mio.py +375 -0
  382. scipy/io/matlab/_mio4.py +632 -0
  383. scipy/io/matlab/_mio5.py +901 -0
  384. scipy/io/matlab/_mio5_params.py +281 -0
  385. scipy/io/matlab/_mio5_utils.cp312-win_arm64.lib +0 -0
  386. scipy/io/matlab/_mio5_utils.cp312-win_arm64.pyd +0 -0
  387. scipy/io/matlab/_mio_utils.cp312-win_arm64.lib +0 -0
  388. scipy/io/matlab/_mio_utils.cp312-win_arm64.pyd +0 -0
  389. scipy/io/matlab/_miobase.py +435 -0
  390. scipy/io/matlab/_streams.cp312-win_arm64.lib +0 -0
  391. scipy/io/matlab/_streams.cp312-win_arm64.pyd +0 -0
  392. scipy/io/matlab/byteordercodes.py +17 -0
  393. scipy/io/matlab/mio.py +16 -0
  394. scipy/io/matlab/mio4.py +17 -0
  395. scipy/io/matlab/mio5.py +19 -0
  396. scipy/io/matlab/mio5_params.py +18 -0
  397. scipy/io/matlab/mio5_utils.py +17 -0
  398. scipy/io/matlab/mio_utils.py +17 -0
  399. scipy/io/matlab/miobase.py +16 -0
  400. scipy/io/matlab/streams.py +16 -0
  401. scipy/io/matlab/tests/__init__.py +0 -0
  402. scipy/io/matlab/tests/data/bad_miuint32.mat +0 -0
  403. scipy/io/matlab/tests/data/bad_miutf8_array_name.mat +0 -0
  404. scipy/io/matlab/tests/data/big_endian.mat +0 -0
  405. scipy/io/matlab/tests/data/broken_utf8.mat +0 -0
  406. scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat +0 -0
  407. scipy/io/matlab/tests/data/corrupted_zlib_data.mat +0 -0
  408. scipy/io/matlab/tests/data/debigged_m4.mat +0 -0
  409. scipy/io/matlab/tests/data/japanese_utf8.txt +5 -0
  410. scipy/io/matlab/tests/data/little_endian.mat +0 -0
  411. scipy/io/matlab/tests/data/logical_sparse.mat +0 -0
  412. scipy/io/matlab/tests/data/malformed1.mat +0 -0
  413. scipy/io/matlab/tests/data/miuint32_for_miint32.mat +0 -0
  414. scipy/io/matlab/tests/data/miutf8_array_name.mat +0 -0
  415. scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat +0 -0
  416. scipy/io/matlab/tests/data/one_by_zero_char.mat +0 -0
  417. scipy/io/matlab/tests/data/parabola.mat +0 -0
  418. scipy/io/matlab/tests/data/single_empty_string.mat +0 -0
  419. scipy/io/matlab/tests/data/some_functions.mat +0 -0
  420. scipy/io/matlab/tests/data/sqr.mat +0 -0
  421. scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat +0 -0
  422. scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat +0 -0
  423. scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat +0 -0
  424. scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat +0 -0
  425. scipy/io/matlab/tests/data/test_empty_struct.mat +0 -0
  426. scipy/io/matlab/tests/data/test_mat4_le_floats.mat +0 -0
  427. scipy/io/matlab/tests/data/test_skip_variable.mat +0 -0
  428. scipy/io/matlab/tests/data/testbool_8_WIN64.mat +0 -0
  429. scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat +0 -0
  430. scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat +0 -0
  431. scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat +0 -0
  432. scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat +0 -0
  433. scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat +0 -0
  434. scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat +0 -0
  435. scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat +0 -0
  436. scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat +0 -0
  437. scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat +0 -0
  438. scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat +0 -0
  439. scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat +0 -0
  440. scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat +0 -0
  441. scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat +0 -0
  442. scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat +0 -0
  443. scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat +0 -0
  444. scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat +0 -0
  445. scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat +0 -0
  446. scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat +0 -0
  447. scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat +0 -0
  448. scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat +0 -0
  449. scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat +0 -0
  450. scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat +0 -0
  451. scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat +0 -0
  452. scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat +0 -0
  453. scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat +0 -0
  454. scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat +0 -0
  455. scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat +0 -0
  456. scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat +0 -0
  457. scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat +0 -0
  458. scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat +0 -0
  459. scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat +0 -0
  460. scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat +0 -0
  461. scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat +0 -0
  462. scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat +0 -0
  463. scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat +0 -0
  464. scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat +0 -0
  465. scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat +0 -0
  466. scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat +0 -0
  467. scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat +0 -0
  468. scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat +0 -0
  469. scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat +0 -0
  470. scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat +0 -0
  471. scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat +0 -0
  472. scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat +0 -0
  473. scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat +0 -0
  474. scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat +0 -0
  475. scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat +0 -0
  476. scipy/io/matlab/tests/data/testsimplecell.mat +0 -0
  477. scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat +0 -0
  478. scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat +0 -0
  479. scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat +0 -0
  480. scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat +0 -0
  481. scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat +0 -0
  482. scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat +0 -0
  483. scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat +0 -0
  484. scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat +0 -0
  485. scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat +0 -0
  486. scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat +0 -0
  487. scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat +0 -0
  488. scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat +0 -0
  489. scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat +0 -0
  490. scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat +0 -0
  491. scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat +0 -0
  492. scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat +0 -0
  493. scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat +0 -0
  494. scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat +0 -0
  495. scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat +0 -0
  496. scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat +0 -0
  497. scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat +0 -0
  498. scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat +0 -0
  499. scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat +0 -0
  500. scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat +0 -0
  501. scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat +0 -0
  502. scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat +0 -0
  503. scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat +0 -0
  504. scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat +0 -0
  505. scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat +0 -0
  506. scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat +0 -0
  507. scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat +0 -0
  508. scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat +0 -0
  509. scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat +0 -0
  510. scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat +0 -0
  511. scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat +0 -0
  512. scipy/io/matlab/tests/data/testvec_4_GLNX86.mat +0 -0
  513. scipy/io/matlab/tests/test_byteordercodes.py +29 -0
  514. scipy/io/matlab/tests/test_mio.py +1399 -0
  515. scipy/io/matlab/tests/test_mio5_utils.py +179 -0
  516. scipy/io/matlab/tests/test_mio_funcs.py +51 -0
  517. scipy/io/matlab/tests/test_mio_utils.py +45 -0
  518. scipy/io/matlab/tests/test_miobase.py +32 -0
  519. scipy/io/matlab/tests/test_pathological.py +33 -0
  520. scipy/io/matlab/tests/test_streams.py +241 -0
  521. scipy/io/mmio.py +17 -0
  522. scipy/io/netcdf.py +17 -0
  523. scipy/io/tests/__init__.py +0 -0
  524. scipy/io/tests/data/Transparent Busy.ani +0 -0
  525. scipy/io/tests/data/array_float32_1d.sav +0 -0
  526. scipy/io/tests/data/array_float32_2d.sav +0 -0
  527. scipy/io/tests/data/array_float32_3d.sav +0 -0
  528. scipy/io/tests/data/array_float32_4d.sav +0 -0
  529. scipy/io/tests/data/array_float32_5d.sav +0 -0
  530. scipy/io/tests/data/array_float32_6d.sav +0 -0
  531. scipy/io/tests/data/array_float32_7d.sav +0 -0
  532. scipy/io/tests/data/array_float32_8d.sav +0 -0
  533. scipy/io/tests/data/array_float32_pointer_1d.sav +0 -0
  534. scipy/io/tests/data/array_float32_pointer_2d.sav +0 -0
  535. scipy/io/tests/data/array_float32_pointer_3d.sav +0 -0
  536. scipy/io/tests/data/array_float32_pointer_4d.sav +0 -0
  537. scipy/io/tests/data/array_float32_pointer_5d.sav +0 -0
  538. scipy/io/tests/data/array_float32_pointer_6d.sav +0 -0
  539. scipy/io/tests/data/array_float32_pointer_7d.sav +0 -0
  540. scipy/io/tests/data/array_float32_pointer_8d.sav +0 -0
  541. scipy/io/tests/data/example_1.nc +0 -0
  542. scipy/io/tests/data/example_2.nc +0 -0
  543. scipy/io/tests/data/example_3_maskedvals.nc +0 -0
  544. scipy/io/tests/data/fortran-3x3d-2i.dat +0 -0
  545. scipy/io/tests/data/fortran-mixed.dat +0 -0
  546. scipy/io/tests/data/fortran-sf8-11x1x10.dat +0 -0
  547. scipy/io/tests/data/fortran-sf8-15x10x22.dat +0 -0
  548. scipy/io/tests/data/fortran-sf8-1x1x1.dat +0 -0
  549. scipy/io/tests/data/fortran-sf8-1x1x5.dat +0 -0
  550. scipy/io/tests/data/fortran-sf8-1x1x7.dat +0 -0
  551. scipy/io/tests/data/fortran-sf8-1x3x5.dat +0 -0
  552. scipy/io/tests/data/fortran-si4-11x1x10.dat +0 -0
  553. scipy/io/tests/data/fortran-si4-15x10x22.dat +0 -0
  554. scipy/io/tests/data/fortran-si4-1x1x1.dat +0 -0
  555. scipy/io/tests/data/fortran-si4-1x1x5.dat +0 -0
  556. scipy/io/tests/data/fortran-si4-1x1x7.dat +0 -0
  557. scipy/io/tests/data/fortran-si4-1x3x5.dat +0 -0
  558. scipy/io/tests/data/invalid_pointer.sav +0 -0
  559. scipy/io/tests/data/null_pointer.sav +0 -0
  560. scipy/io/tests/data/scalar_byte.sav +0 -0
  561. scipy/io/tests/data/scalar_byte_descr.sav +0 -0
  562. scipy/io/tests/data/scalar_complex32.sav +0 -0
  563. scipy/io/tests/data/scalar_complex64.sav +0 -0
  564. scipy/io/tests/data/scalar_float32.sav +0 -0
  565. scipy/io/tests/data/scalar_float64.sav +0 -0
  566. scipy/io/tests/data/scalar_heap_pointer.sav +0 -0
  567. scipy/io/tests/data/scalar_int16.sav +0 -0
  568. scipy/io/tests/data/scalar_int32.sav +0 -0
  569. scipy/io/tests/data/scalar_int64.sav +0 -0
  570. scipy/io/tests/data/scalar_string.sav +0 -0
  571. scipy/io/tests/data/scalar_uint16.sav +0 -0
  572. scipy/io/tests/data/scalar_uint32.sav +0 -0
  573. scipy/io/tests/data/scalar_uint64.sav +0 -0
  574. scipy/io/tests/data/struct_arrays.sav +0 -0
  575. scipy/io/tests/data/struct_arrays_byte_idl80.sav +0 -0
  576. scipy/io/tests/data/struct_arrays_replicated.sav +0 -0
  577. scipy/io/tests/data/struct_arrays_replicated_3d.sav +0 -0
  578. scipy/io/tests/data/struct_inherit.sav +0 -0
  579. scipy/io/tests/data/struct_pointer_arrays.sav +0 -0
  580. scipy/io/tests/data/struct_pointer_arrays_replicated.sav +0 -0
  581. scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav +0 -0
  582. scipy/io/tests/data/struct_pointers.sav +0 -0
  583. scipy/io/tests/data/struct_pointers_replicated.sav +0 -0
  584. scipy/io/tests/data/struct_pointers_replicated_3d.sav +0 -0
  585. scipy/io/tests/data/struct_scalars.sav +0 -0
  586. scipy/io/tests/data/struct_scalars_replicated.sav +0 -0
  587. scipy/io/tests/data/struct_scalars_replicated_3d.sav +0 -0
  588. scipy/io/tests/data/test-1234Hz-le-1ch-10S-20bit-extra.wav +0 -0
  589. scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav +0 -0
  590. scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav +0 -0
  591. scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav +0 -0
  592. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav +0 -0
  593. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav +0 -0
  594. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav +0 -0
  595. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-rf64.wav +0 -0
  596. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav +0 -0
  597. scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav +0 -0
  598. scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav +0 -0
  599. scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav +0 -0
  600. scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav +0 -0
  601. scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav +0 -0
  602. scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-rf64.wav +0 -0
  603. scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav +0 -0
  604. scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav +0 -0
  605. scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav +0 -0
  606. scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav +0 -0
  607. scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav +0 -0
  608. scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav +0 -0
  609. scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav +0 -0
  610. scipy/io/tests/data/various_compressed.sav +0 -0
  611. scipy/io/tests/test_fortran.py +264 -0
  612. scipy/io/tests/test_idl.py +483 -0
  613. scipy/io/tests/test_mmio.py +831 -0
  614. scipy/io/tests/test_netcdf.py +550 -0
  615. scipy/io/tests/test_paths.py +93 -0
  616. scipy/io/tests/test_wavfile.py +501 -0
  617. scipy/io/wavfile.py +938 -0
  618. scipy/linalg/__init__.pxd +1 -0
  619. scipy/linalg/__init__.py +236 -0
  620. scipy/linalg/_basic.py +2146 -0
  621. scipy/linalg/_blas_subroutines.h +164 -0
  622. scipy/linalg/_cythonized_array_utils.cp312-win_arm64.lib +0 -0
  623. scipy/linalg/_cythonized_array_utils.cp312-win_arm64.pyd +0 -0
  624. scipy/linalg/_cythonized_array_utils.pxd +40 -0
  625. scipy/linalg/_cythonized_array_utils.pyi +16 -0
  626. scipy/linalg/_decomp.py +1645 -0
  627. scipy/linalg/_decomp_cholesky.py +413 -0
  628. scipy/linalg/_decomp_cossin.py +236 -0
  629. scipy/linalg/_decomp_interpolative.cp312-win_arm64.lib +0 -0
  630. scipy/linalg/_decomp_interpolative.cp312-win_arm64.pyd +0 -0
  631. scipy/linalg/_decomp_ldl.py +356 -0
  632. scipy/linalg/_decomp_lu.py +401 -0
  633. scipy/linalg/_decomp_lu_cython.cp312-win_arm64.lib +0 -0
  634. scipy/linalg/_decomp_lu_cython.cp312-win_arm64.pyd +0 -0
  635. scipy/linalg/_decomp_lu_cython.pyi +6 -0
  636. scipy/linalg/_decomp_polar.py +113 -0
  637. scipy/linalg/_decomp_qr.py +494 -0
  638. scipy/linalg/_decomp_qz.py +452 -0
  639. scipy/linalg/_decomp_schur.py +336 -0
  640. scipy/linalg/_decomp_svd.py +545 -0
  641. scipy/linalg/_decomp_update.cp312-win_arm64.lib +0 -0
  642. scipy/linalg/_decomp_update.cp312-win_arm64.pyd +0 -0
  643. scipy/linalg/_expm_frechet.py +417 -0
  644. scipy/linalg/_fblas.cp312-win_arm64.lib +0 -0
  645. scipy/linalg/_fblas.cp312-win_arm64.pyd +0 -0
  646. scipy/linalg/_flapack.cp312-win_arm64.lib +0 -0
  647. scipy/linalg/_flapack.cp312-win_arm64.pyd +0 -0
  648. scipy/linalg/_lapack_subroutines.h +1521 -0
  649. scipy/linalg/_linalg_pythran.cp312-win_arm64.lib +0 -0
  650. scipy/linalg/_linalg_pythran.cp312-win_arm64.pyd +0 -0
  651. scipy/linalg/_matfuncs.py +1050 -0
  652. scipy/linalg/_matfuncs_expm.cp312-win_arm64.lib +0 -0
  653. scipy/linalg/_matfuncs_expm.cp312-win_arm64.pyd +0 -0
  654. scipy/linalg/_matfuncs_expm.pyi +6 -0
  655. scipy/linalg/_matfuncs_inv_ssq.py +886 -0
  656. scipy/linalg/_matfuncs_schur_sqrtm.cp312-win_arm64.lib +0 -0
  657. scipy/linalg/_matfuncs_schur_sqrtm.cp312-win_arm64.pyd +0 -0
  658. scipy/linalg/_matfuncs_sqrtm.py +107 -0
  659. scipy/linalg/_matfuncs_sqrtm_triu.cp312-win_arm64.lib +0 -0
  660. scipy/linalg/_matfuncs_sqrtm_triu.cp312-win_arm64.pyd +0 -0
  661. scipy/linalg/_misc.py +191 -0
  662. scipy/linalg/_procrustes.py +113 -0
  663. scipy/linalg/_sketches.py +189 -0
  664. scipy/linalg/_solve_toeplitz.cp312-win_arm64.lib +0 -0
  665. scipy/linalg/_solve_toeplitz.cp312-win_arm64.pyd +0 -0
  666. scipy/linalg/_solvers.py +862 -0
  667. scipy/linalg/_special_matrices.py +1322 -0
  668. scipy/linalg/_testutils.py +65 -0
  669. scipy/linalg/basic.py +23 -0
  670. scipy/linalg/blas.py +495 -0
  671. scipy/linalg/cython_blas.cp312-win_arm64.lib +0 -0
  672. scipy/linalg/cython_blas.cp312-win_arm64.pyd +0 -0
  673. scipy/linalg/cython_blas.pxd +169 -0
  674. scipy/linalg/cython_blas.pyx +1432 -0
  675. scipy/linalg/cython_lapack.cp312-win_arm64.lib +0 -0
  676. scipy/linalg/cython_lapack.cp312-win_arm64.pyd +0 -0
  677. scipy/linalg/cython_lapack.pxd +1528 -0
  678. scipy/linalg/cython_lapack.pyx +12045 -0
  679. scipy/linalg/decomp.py +23 -0
  680. scipy/linalg/decomp_cholesky.py +21 -0
  681. scipy/linalg/decomp_lu.py +21 -0
  682. scipy/linalg/decomp_qr.py +20 -0
  683. scipy/linalg/decomp_schur.py +21 -0
  684. scipy/linalg/decomp_svd.py +21 -0
  685. scipy/linalg/interpolative.py +989 -0
  686. scipy/linalg/lapack.py +1081 -0
  687. scipy/linalg/matfuncs.py +23 -0
  688. scipy/linalg/misc.py +21 -0
  689. scipy/linalg/special_matrices.py +22 -0
  690. scipy/linalg/tests/__init__.py +0 -0
  691. scipy/linalg/tests/_cython_examples/extending.pyx +23 -0
  692. scipy/linalg/tests/_cython_examples/meson.build +34 -0
  693. scipy/linalg/tests/data/carex_15_data.npz +0 -0
  694. scipy/linalg/tests/data/carex_18_data.npz +0 -0
  695. scipy/linalg/tests/data/carex_19_data.npz +0 -0
  696. scipy/linalg/tests/data/carex_20_data.npz +0 -0
  697. scipy/linalg/tests/data/carex_6_data.npz +0 -0
  698. scipy/linalg/tests/data/gendare_20170120_data.npz +0 -0
  699. scipy/linalg/tests/test_basic.py +2074 -0
  700. scipy/linalg/tests/test_batch.py +588 -0
  701. scipy/linalg/tests/test_blas.py +1127 -0
  702. scipy/linalg/tests/test_cython_blas.py +118 -0
  703. scipy/linalg/tests/test_cython_lapack.py +22 -0
  704. scipy/linalg/tests/test_cythonized_array_utils.py +130 -0
  705. scipy/linalg/tests/test_decomp.py +3189 -0
  706. scipy/linalg/tests/test_decomp_cholesky.py +268 -0
  707. scipy/linalg/tests/test_decomp_cossin.py +314 -0
  708. scipy/linalg/tests/test_decomp_ldl.py +137 -0
  709. scipy/linalg/tests/test_decomp_lu.py +308 -0
  710. scipy/linalg/tests/test_decomp_polar.py +110 -0
  711. scipy/linalg/tests/test_decomp_update.py +1701 -0
  712. scipy/linalg/tests/test_extending.py +46 -0
  713. scipy/linalg/tests/test_fblas.py +607 -0
  714. scipy/linalg/tests/test_interpolative.py +232 -0
  715. scipy/linalg/tests/test_lapack.py +3620 -0
  716. scipy/linalg/tests/test_matfuncs.py +1125 -0
  717. scipy/linalg/tests/test_matmul_toeplitz.py +136 -0
  718. scipy/linalg/tests/test_procrustes.py +214 -0
  719. scipy/linalg/tests/test_sketches.py +118 -0
  720. scipy/linalg/tests/test_solve_toeplitz.py +150 -0
  721. scipy/linalg/tests/test_solvers.py +844 -0
  722. scipy/linalg/tests/test_special_matrices.py +636 -0
  723. scipy/misc/__init__.py +6 -0
  724. scipy/misc/common.py +6 -0
  725. scipy/misc/doccer.py +6 -0
  726. scipy/ndimage/__init__.py +174 -0
  727. scipy/ndimage/_ctest.cp312-win_arm64.lib +0 -0
  728. scipy/ndimage/_ctest.cp312-win_arm64.pyd +0 -0
  729. scipy/ndimage/_cytest.cp312-win_arm64.lib +0 -0
  730. scipy/ndimage/_cytest.cp312-win_arm64.pyd +0 -0
  731. scipy/ndimage/_delegators.py +303 -0
  732. scipy/ndimage/_filters.py +2422 -0
  733. scipy/ndimage/_fourier.py +306 -0
  734. scipy/ndimage/_interpolation.py +1033 -0
  735. scipy/ndimage/_measurements.py +1689 -0
  736. scipy/ndimage/_morphology.py +2634 -0
  737. scipy/ndimage/_nd_image.cp312-win_arm64.lib +0 -0
  738. scipy/ndimage/_nd_image.cp312-win_arm64.pyd +0 -0
  739. scipy/ndimage/_ndimage_api.py +16 -0
  740. scipy/ndimage/_ni_docstrings.py +214 -0
  741. scipy/ndimage/_ni_label.cp312-win_arm64.lib +0 -0
  742. scipy/ndimage/_ni_label.cp312-win_arm64.pyd +0 -0
  743. scipy/ndimage/_ni_support.py +139 -0
  744. scipy/ndimage/_rank_filter_1d.cp312-win_arm64.lib +0 -0
  745. scipy/ndimage/_rank_filter_1d.cp312-win_arm64.pyd +0 -0
  746. scipy/ndimage/_support_alternative_backends.py +84 -0
  747. scipy/ndimage/filters.py +27 -0
  748. scipy/ndimage/fourier.py +21 -0
  749. scipy/ndimage/interpolation.py +22 -0
  750. scipy/ndimage/measurements.py +24 -0
  751. scipy/ndimage/morphology.py +27 -0
  752. scipy/ndimage/tests/__init__.py +12 -0
  753. scipy/ndimage/tests/data/label_inputs.txt +21 -0
  754. scipy/ndimage/tests/data/label_results.txt +294 -0
  755. scipy/ndimage/tests/data/label_strels.txt +42 -0
  756. scipy/ndimage/tests/dots.png +0 -0
  757. scipy/ndimage/tests/test_c_api.py +102 -0
  758. scipy/ndimage/tests/test_datatypes.py +67 -0
  759. scipy/ndimage/tests/test_filters.py +3083 -0
  760. scipy/ndimage/tests/test_fourier.py +187 -0
  761. scipy/ndimage/tests/test_interpolation.py +1491 -0
  762. scipy/ndimage/tests/test_measurements.py +1592 -0
  763. scipy/ndimage/tests/test_morphology.py +2950 -0
  764. scipy/ndimage/tests/test_ni_support.py +78 -0
  765. scipy/ndimage/tests/test_splines.py +70 -0
  766. scipy/odr/__init__.py +131 -0
  767. scipy/odr/__odrpack.cp312-win_arm64.lib +0 -0
  768. scipy/odr/__odrpack.cp312-win_arm64.pyd +0 -0
  769. scipy/odr/_add_newdocs.py +34 -0
  770. scipy/odr/_models.py +315 -0
  771. scipy/odr/_odrpack.py +1154 -0
  772. scipy/odr/models.py +20 -0
  773. scipy/odr/odrpack.py +21 -0
  774. scipy/odr/tests/__init__.py +0 -0
  775. scipy/odr/tests/test_odr.py +607 -0
  776. scipy/optimize/__init__.pxd +1 -0
  777. scipy/optimize/__init__.py +460 -0
  778. scipy/optimize/_basinhopping.py +741 -0
  779. scipy/optimize/_bglu_dense.cp312-win_arm64.lib +0 -0
  780. scipy/optimize/_bglu_dense.cp312-win_arm64.pyd +0 -0
  781. scipy/optimize/_bracket.py +706 -0
  782. scipy/optimize/_chandrupatla.py +551 -0
  783. scipy/optimize/_cobyla_py.py +297 -0
  784. scipy/optimize/_cobyqa_py.py +72 -0
  785. scipy/optimize/_constraints.py +598 -0
  786. scipy/optimize/_dcsrch.py +728 -0
  787. scipy/optimize/_differentiable_functions.py +835 -0
  788. scipy/optimize/_differentialevolution.py +1970 -0
  789. scipy/optimize/_direct.cp312-win_arm64.lib +0 -0
  790. scipy/optimize/_direct.cp312-win_arm64.pyd +0 -0
  791. scipy/optimize/_direct_py.py +280 -0
  792. scipy/optimize/_dual_annealing.py +732 -0
  793. scipy/optimize/_elementwise.py +798 -0
  794. scipy/optimize/_group_columns.cp312-win_arm64.lib +0 -0
  795. scipy/optimize/_group_columns.cp312-win_arm64.pyd +0 -0
  796. scipy/optimize/_hessian_update_strategy.py +479 -0
  797. scipy/optimize/_highspy/__init__.py +0 -0
  798. scipy/optimize/_highspy/_core.cp312-win_arm64.lib +0 -0
  799. scipy/optimize/_highspy/_core.cp312-win_arm64.pyd +0 -0
  800. scipy/optimize/_highspy/_highs_options.cp312-win_arm64.lib +0 -0
  801. scipy/optimize/_highspy/_highs_options.cp312-win_arm64.pyd +0 -0
  802. scipy/optimize/_highspy/_highs_wrapper.py +338 -0
  803. scipy/optimize/_isotonic.py +157 -0
  804. scipy/optimize/_lbfgsb.cp312-win_arm64.lib +0 -0
  805. scipy/optimize/_lbfgsb.cp312-win_arm64.pyd +0 -0
  806. scipy/optimize/_lbfgsb_py.py +634 -0
  807. scipy/optimize/_linesearch.py +896 -0
  808. scipy/optimize/_linprog.py +733 -0
  809. scipy/optimize/_linprog_doc.py +1434 -0
  810. scipy/optimize/_linprog_highs.py +422 -0
  811. scipy/optimize/_linprog_ip.py +1141 -0
  812. scipy/optimize/_linprog_rs.py +572 -0
  813. scipy/optimize/_linprog_simplex.py +663 -0
  814. scipy/optimize/_linprog_util.py +1521 -0
  815. scipy/optimize/_lsap.cp312-win_arm64.lib +0 -0
  816. scipy/optimize/_lsap.cp312-win_arm64.pyd +0 -0
  817. scipy/optimize/_lsq/__init__.py +5 -0
  818. scipy/optimize/_lsq/bvls.py +183 -0
  819. scipy/optimize/_lsq/common.py +731 -0
  820. scipy/optimize/_lsq/dogbox.py +345 -0
  821. scipy/optimize/_lsq/givens_elimination.cp312-win_arm64.lib +0 -0
  822. scipy/optimize/_lsq/givens_elimination.cp312-win_arm64.pyd +0 -0
  823. scipy/optimize/_lsq/least_squares.py +1044 -0
  824. scipy/optimize/_lsq/lsq_linear.py +361 -0
  825. scipy/optimize/_lsq/trf.py +587 -0
  826. scipy/optimize/_lsq/trf_linear.py +249 -0
  827. scipy/optimize/_milp.py +394 -0
  828. scipy/optimize/_minimize.py +1199 -0
  829. scipy/optimize/_minpack.cp312-win_arm64.lib +0 -0
  830. scipy/optimize/_minpack.cp312-win_arm64.pyd +0 -0
  831. scipy/optimize/_minpack_py.py +1178 -0
  832. scipy/optimize/_moduleTNC.cp312-win_arm64.lib +0 -0
  833. scipy/optimize/_moduleTNC.cp312-win_arm64.pyd +0 -0
  834. scipy/optimize/_nnls.py +96 -0
  835. scipy/optimize/_nonlin.py +1634 -0
  836. scipy/optimize/_numdiff.py +963 -0
  837. scipy/optimize/_optimize.py +4169 -0
  838. scipy/optimize/_pava_pybind.cp312-win_arm64.lib +0 -0
  839. scipy/optimize/_pava_pybind.cp312-win_arm64.pyd +0 -0
  840. scipy/optimize/_qap.py +760 -0
  841. scipy/optimize/_remove_redundancy.py +522 -0
  842. scipy/optimize/_root.py +732 -0
  843. scipy/optimize/_root_scalar.py +538 -0
  844. scipy/optimize/_shgo.py +1606 -0
  845. scipy/optimize/_shgo_lib/__init__.py +0 -0
  846. scipy/optimize/_shgo_lib/_complex.py +1225 -0
  847. scipy/optimize/_shgo_lib/_vertex.py +460 -0
  848. scipy/optimize/_slsqp_py.py +603 -0
  849. scipy/optimize/_slsqplib.cp312-win_arm64.lib +0 -0
  850. scipy/optimize/_slsqplib.cp312-win_arm64.pyd +0 -0
  851. scipy/optimize/_spectral.py +260 -0
  852. scipy/optimize/_tnc.py +438 -0
  853. scipy/optimize/_trlib/__init__.py +12 -0
  854. scipy/optimize/_trlib/_trlib.cp312-win_arm64.lib +0 -0
  855. scipy/optimize/_trlib/_trlib.cp312-win_arm64.pyd +0 -0
  856. scipy/optimize/_trustregion.py +318 -0
  857. scipy/optimize/_trustregion_constr/__init__.py +6 -0
  858. scipy/optimize/_trustregion_constr/canonical_constraint.py +390 -0
  859. scipy/optimize/_trustregion_constr/equality_constrained_sqp.py +231 -0
  860. scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py +584 -0
  861. scipy/optimize/_trustregion_constr/projections.py +411 -0
  862. scipy/optimize/_trustregion_constr/qp_subproblem.py +637 -0
  863. scipy/optimize/_trustregion_constr/report.py +49 -0
  864. scipy/optimize/_trustregion_constr/tests/__init__.py +0 -0
  865. scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py +296 -0
  866. scipy/optimize/_trustregion_constr/tests/test_nested_minimize.py +39 -0
  867. scipy/optimize/_trustregion_constr/tests/test_projections.py +214 -0
  868. scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py +645 -0
  869. scipy/optimize/_trustregion_constr/tests/test_report.py +34 -0
  870. scipy/optimize/_trustregion_constr/tr_interior_point.py +361 -0
  871. scipy/optimize/_trustregion_dogleg.py +122 -0
  872. scipy/optimize/_trustregion_exact.py +437 -0
  873. scipy/optimize/_trustregion_krylov.py +65 -0
  874. scipy/optimize/_trustregion_ncg.py +126 -0
  875. scipy/optimize/_tstutils.py +972 -0
  876. scipy/optimize/_zeros.cp312-win_arm64.lib +0 -0
  877. scipy/optimize/_zeros.cp312-win_arm64.pyd +0 -0
  878. scipy/optimize/_zeros_py.py +1475 -0
  879. scipy/optimize/cobyla.py +19 -0
  880. scipy/optimize/cython_optimize/__init__.py +133 -0
  881. scipy/optimize/cython_optimize/_zeros.cp312-win_arm64.lib +0 -0
  882. scipy/optimize/cython_optimize/_zeros.cp312-win_arm64.pyd +0 -0
  883. scipy/optimize/cython_optimize/_zeros.pxd +33 -0
  884. scipy/optimize/cython_optimize/c_zeros.pxd +26 -0
  885. scipy/optimize/cython_optimize.pxd +11 -0
  886. scipy/optimize/elementwise.py +38 -0
  887. scipy/optimize/lbfgsb.py +23 -0
  888. scipy/optimize/linesearch.py +18 -0
  889. scipy/optimize/minpack.py +27 -0
  890. scipy/optimize/minpack2.py +17 -0
  891. scipy/optimize/moduleTNC.py +19 -0
  892. scipy/optimize/nonlin.py +29 -0
  893. scipy/optimize/optimize.py +40 -0
  894. scipy/optimize/slsqp.py +22 -0
  895. scipy/optimize/tests/__init__.py +0 -0
  896. scipy/optimize/tests/_cython_examples/extending.pyx +43 -0
  897. scipy/optimize/tests/_cython_examples/meson.build +32 -0
  898. scipy/optimize/tests/test__basinhopping.py +535 -0
  899. scipy/optimize/tests/test__differential_evolution.py +1703 -0
  900. scipy/optimize/tests/test__dual_annealing.py +416 -0
  901. scipy/optimize/tests/test__linprog_clean_inputs.py +312 -0
  902. scipy/optimize/tests/test__numdiff.py +885 -0
  903. scipy/optimize/tests/test__remove_redundancy.py +228 -0
  904. scipy/optimize/tests/test__root.py +124 -0
  905. scipy/optimize/tests/test__shgo.py +1164 -0
  906. scipy/optimize/tests/test__spectral.py +226 -0
  907. scipy/optimize/tests/test_bracket.py +896 -0
  908. scipy/optimize/tests/test_chandrupatla.py +982 -0
  909. scipy/optimize/tests/test_cobyla.py +195 -0
  910. scipy/optimize/tests/test_cobyqa.py +252 -0
  911. scipy/optimize/tests/test_constraint_conversion.py +286 -0
  912. scipy/optimize/tests/test_constraints.py +255 -0
  913. scipy/optimize/tests/test_cython_optimize.py +92 -0
  914. scipy/optimize/tests/test_differentiable_functions.py +1025 -0
  915. scipy/optimize/tests/test_direct.py +321 -0
  916. scipy/optimize/tests/test_extending.py +28 -0
  917. scipy/optimize/tests/test_hessian_update_strategy.py +300 -0
  918. scipy/optimize/tests/test_isotonic_regression.py +167 -0
  919. scipy/optimize/tests/test_lbfgsb_hessinv.py +65 -0
  920. scipy/optimize/tests/test_lbfgsb_setulb.py +122 -0
  921. scipy/optimize/tests/test_least_squares.py +986 -0
  922. scipy/optimize/tests/test_linear_assignment.py +116 -0
  923. scipy/optimize/tests/test_linesearch.py +328 -0
  924. scipy/optimize/tests/test_linprog.py +2577 -0
  925. scipy/optimize/tests/test_lsq_common.py +297 -0
  926. scipy/optimize/tests/test_lsq_linear.py +287 -0
  927. scipy/optimize/tests/test_milp.py +459 -0
  928. scipy/optimize/tests/test_minimize_constrained.py +845 -0
  929. scipy/optimize/tests/test_minpack.py +1194 -0
  930. scipy/optimize/tests/test_nnls.py +469 -0
  931. scipy/optimize/tests/test_nonlin.py +572 -0
  932. scipy/optimize/tests/test_optimize.py +3344 -0
  933. scipy/optimize/tests/test_quadratic_assignment.py +455 -0
  934. scipy/optimize/tests/test_regression.py +40 -0
  935. scipy/optimize/tests/test_slsqp.py +645 -0
  936. scipy/optimize/tests/test_tnc.py +345 -0
  937. scipy/optimize/tests/test_trustregion.py +110 -0
  938. scipy/optimize/tests/test_trustregion_exact.py +351 -0
  939. scipy/optimize/tests/test_trustregion_krylov.py +170 -0
  940. scipy/optimize/tests/test_zeros.py +998 -0
  941. scipy/optimize/tnc.py +22 -0
  942. scipy/optimize/zeros.py +26 -0
  943. scipy/signal/__init__.py +316 -0
  944. scipy/signal/_arraytools.py +264 -0
  945. scipy/signal/_czt.py +575 -0
  946. scipy/signal/_delegators.py +568 -0
  947. scipy/signal/_filter_design.py +5893 -0
  948. scipy/signal/_fir_filter_design.py +1458 -0
  949. scipy/signal/_lti_conversion.py +534 -0
  950. scipy/signal/_ltisys.py +3546 -0
  951. scipy/signal/_max_len_seq.py +139 -0
  952. scipy/signal/_max_len_seq_inner.cp312-win_arm64.lib +0 -0
  953. scipy/signal/_max_len_seq_inner.cp312-win_arm64.pyd +0 -0
  954. scipy/signal/_peak_finding.py +1310 -0
  955. scipy/signal/_peak_finding_utils.cp312-win_arm64.lib +0 -0
  956. scipy/signal/_peak_finding_utils.cp312-win_arm64.pyd +0 -0
  957. scipy/signal/_polyutils.py +172 -0
  958. scipy/signal/_savitzky_golay.py +357 -0
  959. scipy/signal/_short_time_fft.py +2228 -0
  960. scipy/signal/_signal_api.py +30 -0
  961. scipy/signal/_signaltools.py +5309 -0
  962. scipy/signal/_sigtools.cp312-win_arm64.lib +0 -0
  963. scipy/signal/_sigtools.cp312-win_arm64.pyd +0 -0
  964. scipy/signal/_sosfilt.cp312-win_arm64.lib +0 -0
  965. scipy/signal/_sosfilt.cp312-win_arm64.pyd +0 -0
  966. scipy/signal/_spectral_py.py +2471 -0
  967. scipy/signal/_spline.cp312-win_arm64.lib +0 -0
  968. scipy/signal/_spline.cp312-win_arm64.pyd +0 -0
  969. scipy/signal/_spline.pyi +34 -0
  970. scipy/signal/_spline_filters.py +848 -0
  971. scipy/signal/_support_alternative_backends.py +73 -0
  972. scipy/signal/_upfirdn.py +219 -0
  973. scipy/signal/_upfirdn_apply.cp312-win_arm64.lib +0 -0
  974. scipy/signal/_upfirdn_apply.cp312-win_arm64.pyd +0 -0
  975. scipy/signal/_waveforms.py +687 -0
  976. scipy/signal/_wavelets.py +29 -0
  977. scipy/signal/bsplines.py +21 -0
  978. scipy/signal/filter_design.py +28 -0
  979. scipy/signal/fir_filter_design.py +21 -0
  980. scipy/signal/lti_conversion.py +20 -0
  981. scipy/signal/ltisys.py +25 -0
  982. scipy/signal/signaltools.py +27 -0
  983. scipy/signal/spectral.py +21 -0
  984. scipy/signal/spline.py +18 -0
  985. scipy/signal/tests/__init__.py +0 -0
  986. scipy/signal/tests/_scipy_spectral_test_shim.py +311 -0
  987. scipy/signal/tests/mpsig.py +122 -0
  988. scipy/signal/tests/test_array_tools.py +111 -0
  989. scipy/signal/tests/test_bsplines.py +365 -0
  990. scipy/signal/tests/test_cont2discrete.py +424 -0
  991. scipy/signal/tests/test_czt.py +221 -0
  992. scipy/signal/tests/test_dltisys.py +599 -0
  993. scipy/signal/tests/test_filter_design.py +4744 -0
  994. scipy/signal/tests/test_fir_filter_design.py +851 -0
  995. scipy/signal/tests/test_ltisys.py +1225 -0
  996. scipy/signal/tests/test_max_len_seq.py +71 -0
  997. scipy/signal/tests/test_peak_finding.py +915 -0
  998. scipy/signal/tests/test_result_type.py +51 -0
  999. scipy/signal/tests/test_savitzky_golay.py +363 -0
  1000. scipy/signal/tests/test_short_time_fft.py +1107 -0
  1001. scipy/signal/tests/test_signaltools.py +4735 -0
  1002. scipy/signal/tests/test_spectral.py +2141 -0
  1003. scipy/signal/tests/test_splines.py +427 -0
  1004. scipy/signal/tests/test_upfirdn.py +322 -0
  1005. scipy/signal/tests/test_waveforms.py +400 -0
  1006. scipy/signal/tests/test_wavelets.py +59 -0
  1007. scipy/signal/tests/test_windows.py +987 -0
  1008. scipy/signal/waveforms.py +20 -0
  1009. scipy/signal/wavelets.py +17 -0
  1010. scipy/signal/windows/__init__.py +52 -0
  1011. scipy/signal/windows/_windows.py +2513 -0
  1012. scipy/signal/windows/windows.py +23 -0
  1013. scipy/sparse/__init__.py +350 -0
  1014. scipy/sparse/_base.py +1613 -0
  1015. scipy/sparse/_bsr.py +880 -0
  1016. scipy/sparse/_compressed.py +1328 -0
  1017. scipy/sparse/_construct.py +1454 -0
  1018. scipy/sparse/_coo.py +1581 -0
  1019. scipy/sparse/_csc.py +367 -0
  1020. scipy/sparse/_csparsetools.cp312-win_arm64.lib +0 -0
  1021. scipy/sparse/_csparsetools.cp312-win_arm64.pyd +0 -0
  1022. scipy/sparse/_csr.py +558 -0
  1023. scipy/sparse/_data.py +569 -0
  1024. scipy/sparse/_dia.py +677 -0
  1025. scipy/sparse/_dok.py +669 -0
  1026. scipy/sparse/_extract.py +178 -0
  1027. scipy/sparse/_index.py +444 -0
  1028. scipy/sparse/_lil.py +632 -0
  1029. scipy/sparse/_matrix.py +169 -0
  1030. scipy/sparse/_matrix_io.py +167 -0
  1031. scipy/sparse/_sparsetools.cp312-win_arm64.lib +0 -0
  1032. scipy/sparse/_sparsetools.cp312-win_arm64.pyd +0 -0
  1033. scipy/sparse/_spfuncs.py +76 -0
  1034. scipy/sparse/_sputils.py +632 -0
  1035. scipy/sparse/base.py +24 -0
  1036. scipy/sparse/bsr.py +22 -0
  1037. scipy/sparse/compressed.py +20 -0
  1038. scipy/sparse/construct.py +38 -0
  1039. scipy/sparse/coo.py +23 -0
  1040. scipy/sparse/csc.py +22 -0
  1041. scipy/sparse/csgraph/__init__.py +210 -0
  1042. scipy/sparse/csgraph/_flow.cp312-win_arm64.lib +0 -0
  1043. scipy/sparse/csgraph/_flow.cp312-win_arm64.pyd +0 -0
  1044. scipy/sparse/csgraph/_laplacian.py +563 -0
  1045. scipy/sparse/csgraph/_matching.cp312-win_arm64.lib +0 -0
  1046. scipy/sparse/csgraph/_matching.cp312-win_arm64.pyd +0 -0
  1047. scipy/sparse/csgraph/_min_spanning_tree.cp312-win_arm64.lib +0 -0
  1048. scipy/sparse/csgraph/_min_spanning_tree.cp312-win_arm64.pyd +0 -0
  1049. scipy/sparse/csgraph/_reordering.cp312-win_arm64.lib +0 -0
  1050. scipy/sparse/csgraph/_reordering.cp312-win_arm64.pyd +0 -0
  1051. scipy/sparse/csgraph/_shortest_path.cp312-win_arm64.lib +0 -0
  1052. scipy/sparse/csgraph/_shortest_path.cp312-win_arm64.pyd +0 -0
  1053. scipy/sparse/csgraph/_tools.cp312-win_arm64.lib +0 -0
  1054. scipy/sparse/csgraph/_tools.cp312-win_arm64.pyd +0 -0
  1055. scipy/sparse/csgraph/_traversal.cp312-win_arm64.lib +0 -0
  1056. scipy/sparse/csgraph/_traversal.cp312-win_arm64.pyd +0 -0
  1057. scipy/sparse/csgraph/_validation.py +66 -0
  1058. scipy/sparse/csgraph/tests/__init__.py +0 -0
  1059. scipy/sparse/csgraph/tests/test_connected_components.py +119 -0
  1060. scipy/sparse/csgraph/tests/test_conversions.py +61 -0
  1061. scipy/sparse/csgraph/tests/test_flow.py +209 -0
  1062. scipy/sparse/csgraph/tests/test_graph_laplacian.py +368 -0
  1063. scipy/sparse/csgraph/tests/test_matching.py +307 -0
  1064. scipy/sparse/csgraph/tests/test_pydata_sparse.py +197 -0
  1065. scipy/sparse/csgraph/tests/test_reordering.py +70 -0
  1066. scipy/sparse/csgraph/tests/test_shortest_path.py +540 -0
  1067. scipy/sparse/csgraph/tests/test_spanning_tree.py +66 -0
  1068. scipy/sparse/csgraph/tests/test_traversal.py +148 -0
  1069. scipy/sparse/csr.py +22 -0
  1070. scipy/sparse/data.py +18 -0
  1071. scipy/sparse/dia.py +22 -0
  1072. scipy/sparse/dok.py +22 -0
  1073. scipy/sparse/extract.py +23 -0
  1074. scipy/sparse/lil.py +22 -0
  1075. scipy/sparse/linalg/__init__.py +148 -0
  1076. scipy/sparse/linalg/_dsolve/__init__.py +71 -0
  1077. scipy/sparse/linalg/_dsolve/_add_newdocs.py +147 -0
  1078. scipy/sparse/linalg/_dsolve/_superlu.cp312-win_arm64.lib +0 -0
  1079. scipy/sparse/linalg/_dsolve/_superlu.cp312-win_arm64.pyd +0 -0
  1080. scipy/sparse/linalg/_dsolve/linsolve.py +882 -0
  1081. scipy/sparse/linalg/_dsolve/tests/__init__.py +0 -0
  1082. scipy/sparse/linalg/_dsolve/tests/test_linsolve.py +928 -0
  1083. scipy/sparse/linalg/_eigen/__init__.py +22 -0
  1084. scipy/sparse/linalg/_eigen/_svds.py +540 -0
  1085. scipy/sparse/linalg/_eigen/_svds_doc.py +382 -0
  1086. scipy/sparse/linalg/_eigen/arpack/COPYING +45 -0
  1087. scipy/sparse/linalg/_eigen/arpack/__init__.py +20 -0
  1088. scipy/sparse/linalg/_eigen/arpack/_arpack.cp312-win_arm64.lib +0 -0
  1089. scipy/sparse/linalg/_eigen/arpack/_arpack.cp312-win_arm64.pyd +0 -0
  1090. scipy/sparse/linalg/_eigen/arpack/arpack.py +1706 -0
  1091. scipy/sparse/linalg/_eigen/arpack/tests/__init__.py +0 -0
  1092. scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py +717 -0
  1093. scipy/sparse/linalg/_eigen/lobpcg/__init__.py +16 -0
  1094. scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py +1110 -0
  1095. scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py +0 -0
  1096. scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py +725 -0
  1097. scipy/sparse/linalg/_eigen/tests/__init__.py +0 -0
  1098. scipy/sparse/linalg/_eigen/tests/test_svds.py +886 -0
  1099. scipy/sparse/linalg/_expm_multiply.py +816 -0
  1100. scipy/sparse/linalg/_interface.py +920 -0
  1101. scipy/sparse/linalg/_isolve/__init__.py +20 -0
  1102. scipy/sparse/linalg/_isolve/_gcrotmk.py +503 -0
  1103. scipy/sparse/linalg/_isolve/iterative.py +1051 -0
  1104. scipy/sparse/linalg/_isolve/lgmres.py +230 -0
  1105. scipy/sparse/linalg/_isolve/lsmr.py +486 -0
  1106. scipy/sparse/linalg/_isolve/lsqr.py +589 -0
  1107. scipy/sparse/linalg/_isolve/minres.py +372 -0
  1108. scipy/sparse/linalg/_isolve/tests/__init__.py +0 -0
  1109. scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py +183 -0
  1110. scipy/sparse/linalg/_isolve/tests/test_iterative.py +809 -0
  1111. scipy/sparse/linalg/_isolve/tests/test_lgmres.py +225 -0
  1112. scipy/sparse/linalg/_isolve/tests/test_lsmr.py +185 -0
  1113. scipy/sparse/linalg/_isolve/tests/test_lsqr.py +120 -0
  1114. scipy/sparse/linalg/_isolve/tests/test_minres.py +97 -0
  1115. scipy/sparse/linalg/_isolve/tests/test_utils.py +9 -0
  1116. scipy/sparse/linalg/_isolve/tfqmr.py +179 -0
  1117. scipy/sparse/linalg/_isolve/utils.py +121 -0
  1118. scipy/sparse/linalg/_matfuncs.py +940 -0
  1119. scipy/sparse/linalg/_norm.py +195 -0
  1120. scipy/sparse/linalg/_onenormest.py +467 -0
  1121. scipy/sparse/linalg/_propack/_cpropack.cp312-win_arm64.lib +0 -0
  1122. scipy/sparse/linalg/_propack/_cpropack.cp312-win_arm64.pyd +0 -0
  1123. scipy/sparse/linalg/_propack/_dpropack.cp312-win_arm64.lib +0 -0
  1124. scipy/sparse/linalg/_propack/_dpropack.cp312-win_arm64.pyd +0 -0
  1125. scipy/sparse/linalg/_propack/_spropack.cp312-win_arm64.lib +0 -0
  1126. scipy/sparse/linalg/_propack/_spropack.cp312-win_arm64.pyd +0 -0
  1127. scipy/sparse/linalg/_propack/_zpropack.cp312-win_arm64.lib +0 -0
  1128. scipy/sparse/linalg/_propack/_zpropack.cp312-win_arm64.pyd +0 -0
  1129. scipy/sparse/linalg/_special_sparse_arrays.py +949 -0
  1130. scipy/sparse/linalg/_svdp.py +309 -0
  1131. scipy/sparse/linalg/dsolve.py +22 -0
  1132. scipy/sparse/linalg/eigen.py +21 -0
  1133. scipy/sparse/linalg/interface.py +20 -0
  1134. scipy/sparse/linalg/isolve.py +22 -0
  1135. scipy/sparse/linalg/matfuncs.py +18 -0
  1136. scipy/sparse/linalg/tests/__init__.py +0 -0
  1137. scipy/sparse/linalg/tests/propack_test_data.npz +0 -0
  1138. scipy/sparse/linalg/tests/test_expm_multiply.py +367 -0
  1139. scipy/sparse/linalg/tests/test_interface.py +561 -0
  1140. scipy/sparse/linalg/tests/test_matfuncs.py +592 -0
  1141. scipy/sparse/linalg/tests/test_norm.py +154 -0
  1142. scipy/sparse/linalg/tests/test_onenormest.py +252 -0
  1143. scipy/sparse/linalg/tests/test_propack.py +165 -0
  1144. scipy/sparse/linalg/tests/test_pydata_sparse.py +272 -0
  1145. scipy/sparse/linalg/tests/test_special_sparse_arrays.py +337 -0
  1146. scipy/sparse/sparsetools.py +17 -0
  1147. scipy/sparse/spfuncs.py +17 -0
  1148. scipy/sparse/sputils.py +17 -0
  1149. scipy/sparse/tests/__init__.py +0 -0
  1150. scipy/sparse/tests/data/csc_py2.npz +0 -0
  1151. scipy/sparse/tests/data/csc_py3.npz +0 -0
  1152. scipy/sparse/tests/test_arithmetic1d.py +341 -0
  1153. scipy/sparse/tests/test_array_api.py +561 -0
  1154. scipy/sparse/tests/test_base.py +5870 -0
  1155. scipy/sparse/tests/test_common1d.py +447 -0
  1156. scipy/sparse/tests/test_construct.py +872 -0
  1157. scipy/sparse/tests/test_coo.py +1119 -0
  1158. scipy/sparse/tests/test_csc.py +98 -0
  1159. scipy/sparse/tests/test_csr.py +214 -0
  1160. scipy/sparse/tests/test_dok.py +209 -0
  1161. scipy/sparse/tests/test_extract.py +51 -0
  1162. scipy/sparse/tests/test_indexing1d.py +603 -0
  1163. scipy/sparse/tests/test_matrix_io.py +109 -0
  1164. scipy/sparse/tests/test_minmax1d.py +128 -0
  1165. scipy/sparse/tests/test_sparsetools.py +344 -0
  1166. scipy/sparse/tests/test_spfuncs.py +97 -0
  1167. scipy/sparse/tests/test_sputils.py +424 -0
  1168. scipy/spatial/__init__.py +129 -0
  1169. scipy/spatial/_ckdtree.cp312-win_arm64.lib +0 -0
  1170. scipy/spatial/_ckdtree.cp312-win_arm64.pyd +0 -0
  1171. scipy/spatial/_distance_pybind.cp312-win_arm64.lib +0 -0
  1172. scipy/spatial/_distance_pybind.cp312-win_arm64.pyd +0 -0
  1173. scipy/spatial/_distance_wrap.cp312-win_arm64.lib +0 -0
  1174. scipy/spatial/_distance_wrap.cp312-win_arm64.pyd +0 -0
  1175. scipy/spatial/_geometric_slerp.py +238 -0
  1176. scipy/spatial/_hausdorff.cp312-win_arm64.lib +0 -0
  1177. scipy/spatial/_hausdorff.cp312-win_arm64.pyd +0 -0
  1178. scipy/spatial/_kdtree.py +920 -0
  1179. scipy/spatial/_plotutils.py +274 -0
  1180. scipy/spatial/_procrustes.py +132 -0
  1181. scipy/spatial/_qhull.cp312-win_arm64.lib +0 -0
  1182. scipy/spatial/_qhull.cp312-win_arm64.pyd +0 -0
  1183. scipy/spatial/_qhull.pyi +213 -0
  1184. scipy/spatial/_spherical_voronoi.py +341 -0
  1185. scipy/spatial/_voronoi.cp312-win_arm64.lib +0 -0
  1186. scipy/spatial/_voronoi.cp312-win_arm64.pyd +0 -0
  1187. scipy/spatial/_voronoi.pyi +4 -0
  1188. scipy/spatial/ckdtree.py +18 -0
  1189. scipy/spatial/distance.py +3147 -0
  1190. scipy/spatial/distance.pyi +210 -0
  1191. scipy/spatial/kdtree.py +25 -0
  1192. scipy/spatial/qhull.py +25 -0
  1193. scipy/spatial/qhull_src/COPYING_QHULL.txt +39 -0
  1194. scipy/spatial/tests/__init__.py +0 -0
  1195. scipy/spatial/tests/data/cdist-X1.txt +10 -0
  1196. scipy/spatial/tests/data/cdist-X2.txt +20 -0
  1197. scipy/spatial/tests/data/degenerate_pointset.npz +0 -0
  1198. scipy/spatial/tests/data/iris.txt +150 -0
  1199. scipy/spatial/tests/data/pdist-boolean-inp.txt +20 -0
  1200. scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt +1 -0
  1201. scipy/spatial/tests/data/pdist-chebyshev-ml.txt +1 -0
  1202. scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt +1 -0
  1203. scipy/spatial/tests/data/pdist-cityblock-ml.txt +1 -0
  1204. scipy/spatial/tests/data/pdist-correlation-ml-iris.txt +1 -0
  1205. scipy/spatial/tests/data/pdist-correlation-ml.txt +1 -0
  1206. scipy/spatial/tests/data/pdist-cosine-ml-iris.txt +1 -0
  1207. scipy/spatial/tests/data/pdist-cosine-ml.txt +1 -0
  1208. scipy/spatial/tests/data/pdist-double-inp.txt +20 -0
  1209. scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt +1 -0
  1210. scipy/spatial/tests/data/pdist-euclidean-ml.txt +1 -0
  1211. scipy/spatial/tests/data/pdist-hamming-ml.txt +1 -0
  1212. scipy/spatial/tests/data/pdist-jaccard-ml.txt +1 -0
  1213. scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt +1 -0
  1214. scipy/spatial/tests/data/pdist-jensenshannon-ml.txt +1 -0
  1215. scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt +1 -0
  1216. scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt +1 -0
  1217. scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt +1 -0
  1218. scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt +1 -0
  1219. scipy/spatial/tests/data/pdist-seuclidean-ml.txt +1 -0
  1220. scipy/spatial/tests/data/pdist-spearman-ml.txt +1 -0
  1221. scipy/spatial/tests/data/random-bool-data.txt +100 -0
  1222. scipy/spatial/tests/data/random-double-data.txt +100 -0
  1223. scipy/spatial/tests/data/random-int-data.txt +100 -0
  1224. scipy/spatial/tests/data/random-uint-data.txt +100 -0
  1225. scipy/spatial/tests/data/selfdual-4d-polytope.txt +27 -0
  1226. scipy/spatial/tests/test__plotutils.py +91 -0
  1227. scipy/spatial/tests/test__procrustes.py +116 -0
  1228. scipy/spatial/tests/test_distance.py +2389 -0
  1229. scipy/spatial/tests/test_hausdorff.py +199 -0
  1230. scipy/spatial/tests/test_kdtree.py +1536 -0
  1231. scipy/spatial/tests/test_qhull.py +1313 -0
  1232. scipy/spatial/tests/test_slerp.py +417 -0
  1233. scipy/spatial/tests/test_spherical_voronoi.py +358 -0
  1234. scipy/spatial/transform/__init__.py +31 -0
  1235. scipy/spatial/transform/_rigid_transform.cp312-win_arm64.lib +0 -0
  1236. scipy/spatial/transform/_rigid_transform.cp312-win_arm64.pyd +0 -0
  1237. scipy/spatial/transform/_rotation.cp312-win_arm64.lib +0 -0
  1238. scipy/spatial/transform/_rotation.cp312-win_arm64.pyd +0 -0
  1239. scipy/spatial/transform/_rotation_groups.py +140 -0
  1240. scipy/spatial/transform/_rotation_spline.py +460 -0
  1241. scipy/spatial/transform/rotation.py +21 -0
  1242. scipy/spatial/transform/tests/__init__.py +0 -0
  1243. scipy/spatial/transform/tests/test_rigid_transform.py +1221 -0
  1244. scipy/spatial/transform/tests/test_rotation.py +2569 -0
  1245. scipy/spatial/transform/tests/test_rotation_groups.py +169 -0
  1246. scipy/spatial/transform/tests/test_rotation_spline.py +183 -0
  1247. scipy/special/__init__.pxd +1 -0
  1248. scipy/special/__init__.py +841 -0
  1249. scipy/special/_add_newdocs.py +9961 -0
  1250. scipy/special/_basic.py +3576 -0
  1251. scipy/special/_comb.cp312-win_arm64.lib +0 -0
  1252. scipy/special/_comb.cp312-win_arm64.pyd +0 -0
  1253. scipy/special/_ellip_harm.py +214 -0
  1254. scipy/special/_ellip_harm_2.cp312-win_arm64.lib +0 -0
  1255. scipy/special/_ellip_harm_2.cp312-win_arm64.pyd +0 -0
  1256. scipy/special/_gufuncs.cp312-win_arm64.lib +0 -0
  1257. scipy/special/_gufuncs.cp312-win_arm64.pyd +0 -0
  1258. scipy/special/_input_validation.py +17 -0
  1259. scipy/special/_lambertw.py +149 -0
  1260. scipy/special/_logsumexp.py +426 -0
  1261. scipy/special/_mptestutils.py +453 -0
  1262. scipy/special/_multiufuncs.py +610 -0
  1263. scipy/special/_orthogonal.py +2592 -0
  1264. scipy/special/_orthogonal.pyi +330 -0
  1265. scipy/special/_precompute/__init__.py +0 -0
  1266. scipy/special/_precompute/cosine_cdf.py +17 -0
  1267. scipy/special/_precompute/expn_asy.py +54 -0
  1268. scipy/special/_precompute/gammainc_asy.py +116 -0
  1269. scipy/special/_precompute/gammainc_data.py +124 -0
  1270. scipy/special/_precompute/hyp2f1_data.py +484 -0
  1271. scipy/special/_precompute/lambertw.py +68 -0
  1272. scipy/special/_precompute/loggamma.py +43 -0
  1273. scipy/special/_precompute/struve_convergence.py +131 -0
  1274. scipy/special/_precompute/utils.py +38 -0
  1275. scipy/special/_precompute/wright_bessel.py +342 -0
  1276. scipy/special/_precompute/wright_bessel_data.py +152 -0
  1277. scipy/special/_precompute/wrightomega.py +41 -0
  1278. scipy/special/_precompute/zetac.py +27 -0
  1279. scipy/special/_sf_error.py +15 -0
  1280. scipy/special/_specfun.cp312-win_arm64.lib +0 -0
  1281. scipy/special/_specfun.cp312-win_arm64.pyd +0 -0
  1282. scipy/special/_special_ufuncs.cp312-win_arm64.lib +0 -0
  1283. scipy/special/_special_ufuncs.cp312-win_arm64.pyd +0 -0
  1284. scipy/special/_spfun_stats.py +106 -0
  1285. scipy/special/_spherical_bessel.py +397 -0
  1286. scipy/special/_support_alternative_backends.py +295 -0
  1287. scipy/special/_test_internal.cp312-win_arm64.lib +0 -0
  1288. scipy/special/_test_internal.cp312-win_arm64.pyd +0 -0
  1289. scipy/special/_test_internal.pyi +9 -0
  1290. scipy/special/_testutils.py +321 -0
  1291. scipy/special/_ufuncs.cp312-win_arm64.lib +0 -0
  1292. scipy/special/_ufuncs.cp312-win_arm64.pyd +0 -0
  1293. scipy/special/_ufuncs.pyi +522 -0
  1294. scipy/special/_ufuncs.pyx +13173 -0
  1295. scipy/special/_ufuncs_cxx.cp312-win_arm64.lib +0 -0
  1296. scipy/special/_ufuncs_cxx.cp312-win_arm64.pyd +0 -0
  1297. scipy/special/_ufuncs_cxx.pxd +142 -0
  1298. scipy/special/_ufuncs_cxx.pyx +427 -0
  1299. scipy/special/_ufuncs_cxx_defs.h +147 -0
  1300. scipy/special/_ufuncs_defs.h +57 -0
  1301. scipy/special/add_newdocs.py +15 -0
  1302. scipy/special/basic.py +87 -0
  1303. scipy/special/cython_special.cp312-win_arm64.lib +0 -0
  1304. scipy/special/cython_special.cp312-win_arm64.pyd +0 -0
  1305. scipy/special/cython_special.pxd +259 -0
  1306. scipy/special/cython_special.pyi +3 -0
  1307. scipy/special/orthogonal.py +45 -0
  1308. scipy/special/sf_error.py +20 -0
  1309. scipy/special/specfun.py +24 -0
  1310. scipy/special/spfun_stats.py +17 -0
  1311. scipy/special/tests/__init__.py +0 -0
  1312. scipy/special/tests/_cython_examples/extending.pyx +12 -0
  1313. scipy/special/tests/_cython_examples/meson.build +34 -0
  1314. scipy/special/tests/data/__init__.py +0 -0
  1315. scipy/special/tests/data/boost.npz +0 -0
  1316. scipy/special/tests/data/gsl.npz +0 -0
  1317. scipy/special/tests/data/local.npz +0 -0
  1318. scipy/special/tests/test_basic.py +4815 -0
  1319. scipy/special/tests/test_bdtr.py +112 -0
  1320. scipy/special/tests/test_boost_ufuncs.py +64 -0
  1321. scipy/special/tests/test_boxcox.py +125 -0
  1322. scipy/special/tests/test_cdflib.py +712 -0
  1323. scipy/special/tests/test_cdft_asymptotic.py +49 -0
  1324. scipy/special/tests/test_cephes_intp_cast.py +29 -0
  1325. scipy/special/tests/test_cosine_distr.py +83 -0
  1326. scipy/special/tests/test_cython_special.py +363 -0
  1327. scipy/special/tests/test_data.py +719 -0
  1328. scipy/special/tests/test_dd.py +42 -0
  1329. scipy/special/tests/test_digamma.py +45 -0
  1330. scipy/special/tests/test_ellip_harm.py +278 -0
  1331. scipy/special/tests/test_erfinv.py +89 -0
  1332. scipy/special/tests/test_exponential_integrals.py +118 -0
  1333. scipy/special/tests/test_extending.py +28 -0
  1334. scipy/special/tests/test_faddeeva.py +85 -0
  1335. scipy/special/tests/test_gamma.py +12 -0
  1336. scipy/special/tests/test_gammainc.py +152 -0
  1337. scipy/special/tests/test_hyp2f1.py +2566 -0
  1338. scipy/special/tests/test_hypergeometric.py +234 -0
  1339. scipy/special/tests/test_iv_ratio.py +249 -0
  1340. scipy/special/tests/test_kolmogorov.py +491 -0
  1341. scipy/special/tests/test_lambertw.py +109 -0
  1342. scipy/special/tests/test_legendre.py +1518 -0
  1343. scipy/special/tests/test_log1mexp.py +85 -0
  1344. scipy/special/tests/test_loggamma.py +70 -0
  1345. scipy/special/tests/test_logit.py +162 -0
  1346. scipy/special/tests/test_logsumexp.py +469 -0
  1347. scipy/special/tests/test_mpmath.py +2293 -0
  1348. scipy/special/tests/test_nan_inputs.py +65 -0
  1349. scipy/special/tests/test_ndtr.py +77 -0
  1350. scipy/special/tests/test_ndtri_exp.py +94 -0
  1351. scipy/special/tests/test_orthogonal.py +821 -0
  1352. scipy/special/tests/test_orthogonal_eval.py +275 -0
  1353. scipy/special/tests/test_owens_t.py +53 -0
  1354. scipy/special/tests/test_pcf.py +24 -0
  1355. scipy/special/tests/test_pdtr.py +48 -0
  1356. scipy/special/tests/test_powm1.py +65 -0
  1357. scipy/special/tests/test_precompute_expn_asy.py +24 -0
  1358. scipy/special/tests/test_precompute_gammainc.py +108 -0
  1359. scipy/special/tests/test_precompute_utils.py +36 -0
  1360. scipy/special/tests/test_round.py +18 -0
  1361. scipy/special/tests/test_sf_error.py +146 -0
  1362. scipy/special/tests/test_sici.py +36 -0
  1363. scipy/special/tests/test_specfun.py +48 -0
  1364. scipy/special/tests/test_spence.py +32 -0
  1365. scipy/special/tests/test_spfun_stats.py +61 -0
  1366. scipy/special/tests/test_sph_harm.py +85 -0
  1367. scipy/special/tests/test_spherical_bessel.py +400 -0
  1368. scipy/special/tests/test_support_alternative_backends.py +248 -0
  1369. scipy/special/tests/test_trig.py +72 -0
  1370. scipy/special/tests/test_ufunc_signatures.py +46 -0
  1371. scipy/special/tests/test_wright_bessel.py +205 -0
  1372. scipy/special/tests/test_wrightomega.py +117 -0
  1373. scipy/special/tests/test_zeta.py +301 -0
  1374. scipy/stats/__init__.py +670 -0
  1375. scipy/stats/_ansari_swilk_statistics.cp312-win_arm64.lib +0 -0
  1376. scipy/stats/_ansari_swilk_statistics.cp312-win_arm64.pyd +0 -0
  1377. scipy/stats/_axis_nan_policy.py +692 -0
  1378. scipy/stats/_biasedurn.cp312-win_arm64.lib +0 -0
  1379. scipy/stats/_biasedurn.cp312-win_arm64.pyd +0 -0
  1380. scipy/stats/_biasedurn.pxd +27 -0
  1381. scipy/stats/_binned_statistic.py +795 -0
  1382. scipy/stats/_binomtest.py +375 -0
  1383. scipy/stats/_bws_test.py +177 -0
  1384. scipy/stats/_censored_data.py +459 -0
  1385. scipy/stats/_common.py +5 -0
  1386. scipy/stats/_constants.py +42 -0
  1387. scipy/stats/_continued_fraction.py +387 -0
  1388. scipy/stats/_continuous_distns.py +12486 -0
  1389. scipy/stats/_correlation.py +210 -0
  1390. scipy/stats/_covariance.py +636 -0
  1391. scipy/stats/_crosstab.py +204 -0
  1392. scipy/stats/_discrete_distns.py +2098 -0
  1393. scipy/stats/_distn_infrastructure.py +4201 -0
  1394. scipy/stats/_distr_params.py +299 -0
  1395. scipy/stats/_distribution_infrastructure.py +5750 -0
  1396. scipy/stats/_entropy.py +428 -0
  1397. scipy/stats/_finite_differences.py +145 -0
  1398. scipy/stats/_fit.py +1351 -0
  1399. scipy/stats/_hypotests.py +2060 -0
  1400. scipy/stats/_kde.py +732 -0
  1401. scipy/stats/_ksstats.py +600 -0
  1402. scipy/stats/_levy_stable/__init__.py +1231 -0
  1403. scipy/stats/_levy_stable/levyst.cp312-win_arm64.lib +0 -0
  1404. scipy/stats/_levy_stable/levyst.cp312-win_arm64.pyd +0 -0
  1405. scipy/stats/_mannwhitneyu.py +492 -0
  1406. scipy/stats/_mgc.py +550 -0
  1407. scipy/stats/_morestats.py +4626 -0
  1408. scipy/stats/_mstats_basic.py +3658 -0
  1409. scipy/stats/_mstats_extras.py +521 -0
  1410. scipy/stats/_multicomp.py +449 -0
  1411. scipy/stats/_multivariate.py +7281 -0
  1412. scipy/stats/_new_distributions.py +452 -0
  1413. scipy/stats/_odds_ratio.py +466 -0
  1414. scipy/stats/_page_trend_test.py +486 -0
  1415. scipy/stats/_probability_distribution.py +1964 -0
  1416. scipy/stats/_qmc.py +2956 -0
  1417. scipy/stats/_qmc_cy.cp312-win_arm64.lib +0 -0
  1418. scipy/stats/_qmc_cy.cp312-win_arm64.pyd +0 -0
  1419. scipy/stats/_qmc_cy.pyi +54 -0
  1420. scipy/stats/_qmvnt.py +454 -0
  1421. scipy/stats/_qmvnt_cy.cp312-win_arm64.lib +0 -0
  1422. scipy/stats/_qmvnt_cy.cp312-win_arm64.pyd +0 -0
  1423. scipy/stats/_quantile.py +335 -0
  1424. scipy/stats/_rcont/__init__.py +4 -0
  1425. scipy/stats/_rcont/rcont.cp312-win_arm64.lib +0 -0
  1426. scipy/stats/_rcont/rcont.cp312-win_arm64.pyd +0 -0
  1427. scipy/stats/_relative_risk.py +263 -0
  1428. scipy/stats/_resampling.py +2352 -0
  1429. scipy/stats/_result_classes.py +40 -0
  1430. scipy/stats/_sampling.py +1314 -0
  1431. scipy/stats/_sensitivity_analysis.py +713 -0
  1432. scipy/stats/_sobol.cp312-win_arm64.lib +0 -0
  1433. scipy/stats/_sobol.cp312-win_arm64.pyd +0 -0
  1434. scipy/stats/_sobol.pyi +54 -0
  1435. scipy/stats/_sobol_direction_numbers.npz +0 -0
  1436. scipy/stats/_stats.cp312-win_arm64.lib +0 -0
  1437. scipy/stats/_stats.cp312-win_arm64.pyd +0 -0
  1438. scipy/stats/_stats.pxd +10 -0
  1439. scipy/stats/_stats_mstats_common.py +322 -0
  1440. scipy/stats/_stats_py.py +11089 -0
  1441. scipy/stats/_stats_pythran.cp312-win_arm64.lib +0 -0
  1442. scipy/stats/_stats_pythran.cp312-win_arm64.pyd +0 -0
  1443. scipy/stats/_survival.py +683 -0
  1444. scipy/stats/_tukeylambda_stats.py +199 -0
  1445. scipy/stats/_unuran/__init__.py +0 -0
  1446. scipy/stats/_unuran/unuran_wrapper.cp312-win_arm64.lib +0 -0
  1447. scipy/stats/_unuran/unuran_wrapper.cp312-win_arm64.pyd +0 -0
  1448. scipy/stats/_unuran/unuran_wrapper.pyi +179 -0
  1449. scipy/stats/_variation.py +126 -0
  1450. scipy/stats/_warnings_errors.py +38 -0
  1451. scipy/stats/_wilcoxon.py +265 -0
  1452. scipy/stats/biasedurn.py +16 -0
  1453. scipy/stats/contingency.py +521 -0
  1454. scipy/stats/distributions.py +24 -0
  1455. scipy/stats/kde.py +18 -0
  1456. scipy/stats/morestats.py +27 -0
  1457. scipy/stats/mstats.py +140 -0
  1458. scipy/stats/mstats_basic.py +42 -0
  1459. scipy/stats/mstats_extras.py +25 -0
  1460. scipy/stats/mvn.py +17 -0
  1461. scipy/stats/qmc.py +236 -0
  1462. scipy/stats/sampling.py +73 -0
  1463. scipy/stats/stats.py +41 -0
  1464. scipy/stats/tests/__init__.py +0 -0
  1465. scipy/stats/tests/common_tests.py +356 -0
  1466. scipy/stats/tests/data/_mvt.py +171 -0
  1467. scipy/stats/tests/data/fisher_exact_results_from_r.py +607 -0
  1468. scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy +0 -0
  1469. scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy +0 -0
  1470. scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy +0 -0
  1471. scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy +0 -0
  1472. scipy/stats/tests/data/nist_anova/AtmWtAg.dat +108 -0
  1473. scipy/stats/tests/data/nist_anova/SiRstv.dat +85 -0
  1474. scipy/stats/tests/data/nist_anova/SmLs01.dat +249 -0
  1475. scipy/stats/tests/data/nist_anova/SmLs02.dat +1869 -0
  1476. scipy/stats/tests/data/nist_anova/SmLs03.dat +18069 -0
  1477. scipy/stats/tests/data/nist_anova/SmLs04.dat +249 -0
  1478. scipy/stats/tests/data/nist_anova/SmLs05.dat +1869 -0
  1479. scipy/stats/tests/data/nist_anova/SmLs06.dat +18069 -0
  1480. scipy/stats/tests/data/nist_anova/SmLs07.dat +249 -0
  1481. scipy/stats/tests/data/nist_anova/SmLs08.dat +1869 -0
  1482. scipy/stats/tests/data/nist_anova/SmLs09.dat +18069 -0
  1483. scipy/stats/tests/data/nist_linregress/Norris.dat +97 -0
  1484. scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy +0 -0
  1485. scipy/stats/tests/data/studentized_range_mpmath_ref.json +1499 -0
  1486. scipy/stats/tests/test_axis_nan_policy.py +1388 -0
  1487. scipy/stats/tests/test_binned_statistic.py +568 -0
  1488. scipy/stats/tests/test_censored_data.py +152 -0
  1489. scipy/stats/tests/test_contingency.py +294 -0
  1490. scipy/stats/tests/test_continued_fraction.py +173 -0
  1491. scipy/stats/tests/test_continuous.py +2198 -0
  1492. scipy/stats/tests/test_continuous_basic.py +1053 -0
  1493. scipy/stats/tests/test_continuous_fit_censored.py +683 -0
  1494. scipy/stats/tests/test_correlation.py +80 -0
  1495. scipy/stats/tests/test_crosstab.py +115 -0
  1496. scipy/stats/tests/test_discrete_basic.py +580 -0
  1497. scipy/stats/tests/test_discrete_distns.py +700 -0
  1498. scipy/stats/tests/test_distributions.py +10413 -0
  1499. scipy/stats/tests/test_entropy.py +322 -0
  1500. scipy/stats/tests/test_fast_gen_inversion.py +435 -0
  1501. scipy/stats/tests/test_fit.py +1090 -0
  1502. scipy/stats/tests/test_hypotests.py +1991 -0
  1503. scipy/stats/tests/test_kdeoth.py +676 -0
  1504. scipy/stats/tests/test_marray.py +289 -0
  1505. scipy/stats/tests/test_mgc.py +217 -0
  1506. scipy/stats/tests/test_morestats.py +3259 -0
  1507. scipy/stats/tests/test_mstats_basic.py +2071 -0
  1508. scipy/stats/tests/test_mstats_extras.py +172 -0
  1509. scipy/stats/tests/test_multicomp.py +405 -0
  1510. scipy/stats/tests/test_multivariate.py +4381 -0
  1511. scipy/stats/tests/test_odds_ratio.py +148 -0
  1512. scipy/stats/tests/test_qmc.py +1492 -0
  1513. scipy/stats/tests/test_quantile.py +199 -0
  1514. scipy/stats/tests/test_rank.py +345 -0
  1515. scipy/stats/tests/test_relative_risk.py +95 -0
  1516. scipy/stats/tests/test_resampling.py +2000 -0
  1517. scipy/stats/tests/test_sampling.py +1450 -0
  1518. scipy/stats/tests/test_sensitivity_analysis.py +310 -0
  1519. scipy/stats/tests/test_stats.py +9707 -0
  1520. scipy/stats/tests/test_survival.py +466 -0
  1521. scipy/stats/tests/test_tukeylambda_stats.py +85 -0
  1522. scipy/stats/tests/test_variation.py +216 -0
  1523. scipy/version.py +12 -0
  1524. scipy-1.16.2.dist-info/DELVEWHEEL +2 -0
  1525. scipy-1.16.2.dist-info/LICENSE.txt +912 -0
  1526. scipy-1.16.2.dist-info/METADATA +1061 -0
  1527. scipy-1.16.2.dist-info/RECORD +1530 -0
  1528. scipy-1.16.2.dist-info/WHEEL +4 -0
  1529. scipy.libs/msvcp140-5f1c5dd31916990d94181e07bc3afb32.dll +0 -0
  1530. scipy.libs/scipy_openblas-f3ac85b1f412f7e86514c923dc4058d1.dll +0 -0
@@ -0,0 +1,2060 @@
1
+ from collections import namedtuple
2
+ from dataclasses import dataclass
3
+ from math import comb
4
+ import numpy as np
5
+ import warnings
6
+ from itertools import combinations
7
+ import scipy.stats
8
+ from scipy.optimize import shgo
9
+ from . import distributions
10
+ from ._common import ConfidenceInterval
11
+ from ._continuous_distns import norm
12
+ from scipy.special import gamma, kv, gammaln
13
+ from scipy.fft import ifft
14
+ from ._stats_pythran import _a_ij_Aij_Dij2
15
+ from ._stats_pythran import (
16
+ _concordant_pairs as _P, _discordant_pairs as _Q
17
+ )
18
+ from ._axis_nan_policy import _axis_nan_policy_factory
19
+ from scipy.stats import _stats_py
20
+
21
+ __all__ = ['epps_singleton_2samp', 'cramervonmises', 'somersd',
22
+ 'barnard_exact', 'boschloo_exact', 'cramervonmises_2samp',
23
+ 'tukey_hsd', 'poisson_means_test']
24
+
25
+ Epps_Singleton_2sampResult = namedtuple('Epps_Singleton_2sampResult',
26
+ ('statistic', 'pvalue'))
27
+
28
+
29
+ @_axis_nan_policy_factory(Epps_Singleton_2sampResult, n_samples=2, too_small=4)
30
+ def epps_singleton_2samp(x, y, t=(0.4, 0.8)):
31
+ """Compute the Epps-Singleton (ES) test statistic.
32
+
33
+ Test the null hypothesis that two samples have the same underlying
34
+ probability distribution.
35
+
36
+ Parameters
37
+ ----------
38
+ x, y : array-like
39
+ The two samples of observations to be tested. Input must not have more
40
+ than one dimension. Samples can have different lengths, but both
41
+ must have at least five observations.
42
+ t : array-like, optional
43
+ The points (t1, ..., tn) where the empirical characteristic function is
44
+ to be evaluated. It should be positive distinct numbers. The default
45
+ value (0.4, 0.8) is proposed in [1]_. Input must not have more than
46
+ one dimension.
47
+
48
+ Returns
49
+ -------
50
+ statistic : float
51
+ The test statistic.
52
+ pvalue : float
53
+ The associated p-value based on the asymptotic chi2-distribution.
54
+
55
+ See Also
56
+ --------
57
+ ks_2samp, anderson_ksamp
58
+
59
+ Notes
60
+ -----
61
+ Testing whether two samples are generated by the same underlying
62
+ distribution is a classical question in statistics. A widely used test is
63
+ the Kolmogorov-Smirnov (KS) test which relies on the empirical
64
+ distribution function. Epps and Singleton introduce a test based on the
65
+ empirical characteristic function in [1]_.
66
+
67
+ One advantage of the ES test compared to the KS test is that is does
68
+ not assume a continuous distribution. In [1]_, the authors conclude
69
+ that the test also has a higher power than the KS test in many
70
+ examples. They recommend the use of the ES test for discrete samples as
71
+ well as continuous samples with at least 25 observations each, whereas
72
+ `anderson_ksamp` is recommended for smaller sample sizes in the
73
+ continuous case.
74
+
75
+ The p-value is computed from the asymptotic distribution of the test
76
+ statistic which follows a `chi2` distribution. If the sample size of both
77
+ `x` and `y` is below 25, the small sample correction proposed in [1]_ is
78
+ applied to the test statistic.
79
+
80
+ The default values of `t` are determined in [1]_ by considering
81
+ various distributions and finding good values that lead to a high power
82
+ of the test in general. Table III in [1]_ gives the optimal values for
83
+ the distributions tested in that study. The values of `t` are scaled by
84
+ the semi-interquartile range in the implementation, see [1]_.
85
+
86
+ References
87
+ ----------
88
+ .. [1] T. W. Epps and K. J. Singleton, "An omnibus test for the two-sample
89
+ problem using the empirical characteristic function", Journal of
90
+ Statistical Computation and Simulation 26, p. 177--203, 1986.
91
+
92
+ .. [2] S. J. Goerg and J. Kaiser, "Nonparametric testing of distributions
93
+ - the Epps-Singleton two-sample test using the empirical characteristic
94
+ function", The Stata Journal 9(3), p. 454--465, 2009.
95
+
96
+ """
97
+ # x and y are converted to arrays by the decorator
98
+ t = np.asarray(t)
99
+ # check if x and y are valid inputs
100
+ nx, ny = len(x), len(y)
101
+ if (nx < 5) or (ny < 5):
102
+ raise ValueError('x and y should have at least 5 elements, but len(x) '
103
+ f'= {nx} and len(y) = {ny}.')
104
+ if not np.isfinite(x).all():
105
+ raise ValueError('x must not contain nonfinite values.')
106
+ if not np.isfinite(y).all():
107
+ raise ValueError('y must not contain nonfinite values.')
108
+ n = nx + ny
109
+
110
+ # check if t is valid
111
+ if t.ndim > 1:
112
+ raise ValueError(f't must be 1d, but t.ndim equals {t.ndim}.')
113
+ if np.less_equal(t, 0).any():
114
+ raise ValueError('t must contain positive elements only.')
115
+
116
+ # rescale t with semi-iqr as proposed in [1]; import iqr here to avoid
117
+ # circular import
118
+ from scipy.stats import iqr
119
+ sigma = iqr(np.hstack((x, y))) / 2
120
+ ts = np.reshape(t, (-1, 1)) / sigma
121
+
122
+ # covariance estimation of ES test
123
+ gx = np.vstack((np.cos(ts*x), np.sin(ts*x))).T # shape = (nx, 2*len(t))
124
+ gy = np.vstack((np.cos(ts*y), np.sin(ts*y))).T
125
+ cov_x = np.cov(gx.T, bias=True) # the test uses biased cov-estimate
126
+ cov_y = np.cov(gy.T, bias=True)
127
+ est_cov = (n/nx)*cov_x + (n/ny)*cov_y
128
+ est_cov_inv = np.linalg.pinv(est_cov)
129
+ r = np.linalg.matrix_rank(est_cov_inv)
130
+ if r < 2*len(t):
131
+ warnings.warn('Estimated covariance matrix does not have full rank. '
132
+ 'This indicates a bad choice of the input t and the '
133
+ 'test might not be consistent.', # see p. 183 in [1]_
134
+ stacklevel=2)
135
+
136
+ # compute test statistic w distributed asympt. as chisquare with df=r
137
+ g_diff = np.mean(gx, axis=0) - np.mean(gy, axis=0)
138
+ w = n*np.dot(g_diff.T, np.dot(est_cov_inv, g_diff))
139
+
140
+ # apply small-sample correction
141
+ if (max(nx, ny) < 25):
142
+ corr = 1.0/(1.0 + n**(-0.45) + 10.1*(nx**(-1.7) + ny**(-1.7)))
143
+ w = corr * w
144
+
145
+ chi2 = _stats_py._SimpleChi2(r)
146
+ p = _stats_py._get_pvalue(w, chi2, alternative='greater', symmetric=False, xp=np)
147
+
148
+ return Epps_Singleton_2sampResult(w, p)
149
+
150
+
151
+ def poisson_means_test(k1, n1, k2, n2, *, diff=0, alternative='two-sided'):
152
+ r"""
153
+ Performs the Poisson means test, AKA the "E-test".
154
+
155
+ This is a test of the null hypothesis that the difference between means of
156
+ two Poisson distributions is `diff`. The samples are provided as the
157
+ number of events `k1` and `k2` observed within measurement intervals
158
+ (e.g. of time, space, number of observations) of sizes `n1` and `n2`.
159
+
160
+ Parameters
161
+ ----------
162
+ k1 : int
163
+ Number of events observed from distribution 1.
164
+ n1: float
165
+ Size of sample from distribution 1.
166
+ k2 : int
167
+ Number of events observed from distribution 2.
168
+ n2 : float
169
+ Size of sample from distribution 2.
170
+ diff : float, default=0
171
+ The hypothesized difference in means between the distributions
172
+ underlying the samples.
173
+ alternative : {'two-sided', 'less', 'greater'}, optional
174
+ Defines the alternative hypothesis.
175
+ The following options are available (default is 'two-sided'):
176
+
177
+ * 'two-sided': the difference between distribution means is not
178
+ equal to `diff`
179
+ * 'less': the difference between distribution means is less than
180
+ `diff`
181
+ * 'greater': the difference between distribution means is greater
182
+ than `diff`
183
+
184
+ Returns
185
+ -------
186
+ statistic : float
187
+ The test statistic (see [1]_ equation 3.3).
188
+ pvalue : float
189
+ The probability of achieving such an extreme value of the test
190
+ statistic under the null hypothesis.
191
+
192
+ Notes
193
+ -----
194
+
195
+ Let:
196
+
197
+ .. math:: X_1 \sim \mbox{Poisson}(\mathtt{n1}\lambda_1)
198
+
199
+ be a random variable independent of
200
+
201
+ .. math:: X_2 \sim \mbox{Poisson}(\mathtt{n2}\lambda_2)
202
+
203
+ and let ``k1`` and ``k2`` be the observed values of :math:`X_1`
204
+ and :math:`X_2`, respectively. Then `poisson_means_test` uses the number
205
+ of observed events ``k1`` and ``k2`` from samples of size ``n1`` and
206
+ ``n2``, respectively, to test the null hypothesis that
207
+
208
+ .. math::
209
+ H_0: \lambda_1 - \lambda_2 = \mathtt{diff}
210
+
211
+ A benefit of the E-test is that it has good power for small sample sizes,
212
+ which can reduce sampling costs [1]_. It has been evaluated and determined
213
+ to be more powerful than the comparable C-test, sometimes referred to as
214
+ the Poisson exact test.
215
+
216
+ References
217
+ ----------
218
+ .. [1] Krishnamoorthy, K., & Thomson, J. (2004). A more powerful test for
219
+ comparing two Poisson means. Journal of Statistical Planning and
220
+ Inference, 119(1), 23-35.
221
+
222
+ .. [2] Przyborowski, J., & Wilenski, H. (1940). Homogeneity of results in
223
+ testing samples from Poisson series: With an application to testing
224
+ clover seed for dodder. Biometrika, 31(3/4), 313-323.
225
+
226
+ Examples
227
+ --------
228
+
229
+ Suppose that a gardener wishes to test the number of dodder (weed) seeds
230
+ in a sack of clover seeds that they buy from a seed company. It has
231
+ previously been established that the number of dodder seeds in clover
232
+ follows the Poisson distribution.
233
+
234
+ A 100 gram sample is drawn from the sack before being shipped to the
235
+ gardener. The sample is analyzed, and it is found to contain no dodder
236
+ seeds; that is, `k1` is 0. However, upon arrival, the gardener draws
237
+ another 100 gram sample from the sack. This time, three dodder seeds are
238
+ found in the sample; that is, `k2` is 3. The gardener would like to
239
+ know if the difference is significant and not due to chance. The
240
+ null hypothesis is that the difference between the two samples is merely
241
+ due to chance, or that :math:`\lambda_1 - \lambda_2 = \mathtt{diff}`
242
+ where :math:`\mathtt{diff} = 0`. The alternative hypothesis is that the
243
+ difference is not due to chance, or :math:`\lambda_1 - \lambda_2 \ne 0`.
244
+ The gardener selects a significance level of 5% to reject the null
245
+ hypothesis in favor of the alternative [2]_.
246
+
247
+ >>> import scipy.stats as stats
248
+ >>> res = stats.poisson_means_test(0, 100, 3, 100)
249
+ >>> res.statistic, res.pvalue
250
+ (-1.7320508075688772, 0.08837900929018157)
251
+
252
+ The p-value is .088, indicating a near 9% chance of observing a value of
253
+ the test statistic under the null hypothesis. This exceeds 5%, so the
254
+ gardener does not reject the null hypothesis as the difference cannot be
255
+ regarded as significant at this level.
256
+ """
257
+
258
+ _poisson_means_test_iv(k1, n1, k2, n2, diff, alternative)
259
+
260
+ # "for a given k_1 and k_2, an estimate of \lambda_2 is given by" [1] (3.4)
261
+ lmbd_hat2 = ((k1 + k2) / (n1 + n2) - diff * n1 / (n1 + n2))
262
+
263
+ # "\hat{\lambda_{2k}} may be less than or equal to zero ... and in this
264
+ # case the null hypothesis cannot be rejected ... [and] it is not necessary
265
+ # to compute the p-value". [1] page 26 below eq. (3.6).
266
+ if lmbd_hat2 <= 0:
267
+ return _stats_py.SignificanceResult(0, 1)
268
+
269
+ # The unbiased variance estimate [1] (3.2)
270
+ var = k1 / (n1 ** 2) + k2 / (n2 ** 2)
271
+
272
+ # The _observed_ pivot statistic from the input. It follows the
273
+ # unnumbered equation following equation (3.3) This is used later in
274
+ # comparison with the computed pivot statistics in an indicator function.
275
+ t_k1k2 = (k1 / n1 - k2 / n2 - diff) / np.sqrt(var)
276
+
277
+ # Equation (3.5) of [1] is lengthy, so it is broken into several parts,
278
+ # beginning here. Note that the probability mass function of poisson is
279
+ # exp^(-\mu)*\mu^k/k!, so and this is called with shape \mu, here noted
280
+ # here as nlmbd_hat*. The strategy for evaluating the double summation in
281
+ # (3.5) is to create two arrays of the values of the two products inside
282
+ # the summation and then broadcast them together into a matrix, and then
283
+ # sum across the entire matrix.
284
+
285
+ # Compute constants (as seen in the first and second separated products in
286
+ # (3.5).). (This is the shape (\mu) parameter of the poisson distribution.)
287
+ nlmbd_hat1 = n1 * (lmbd_hat2 + diff)
288
+ nlmbd_hat2 = n2 * lmbd_hat2
289
+
290
+ # Determine summation bounds for tail ends of distribution rather than
291
+ # summing to infinity. `x1*` is for the outer sum and `x2*` is the inner
292
+ # sum.
293
+ x1_lb, x1_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat1)
294
+ x2_lb, x2_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat2)
295
+
296
+ # Construct arrays to function as the x_1 and x_2 counters on the summation
297
+ # in (3.5). `x1` is in columns and `x2` is in rows to allow for
298
+ # broadcasting.
299
+ x1 = np.arange(x1_lb, x1_ub + 1)
300
+ x2 = np.arange(x2_lb, x2_ub + 1)[:, None]
301
+
302
+ # These are the two products in equation (3.5) with `prob_x1` being the
303
+ # first (left side) and `prob_x2` being the second (right side). (To
304
+ # make as clear as possible: the 1st contains a "+ d" term, the 2nd does
305
+ # not.)
306
+ prob_x1 = distributions.poisson.pmf(x1, nlmbd_hat1)
307
+ prob_x2 = distributions.poisson.pmf(x2, nlmbd_hat2)
308
+
309
+ # compute constants for use in the "pivot statistic" per the
310
+ # unnumbered equation following (3.3).
311
+ lmbd_x1 = x1 / n1
312
+ lmbd_x2 = x2 / n2
313
+ lmbds_diff = lmbd_x1 - lmbd_x2 - diff
314
+ var_x1x2 = lmbd_x1 / n1 + lmbd_x2 / n2
315
+
316
+ # This is the 'pivot statistic' for use in the indicator of the summation
317
+ # (left side of "I[.]").
318
+ with np.errstate(invalid='ignore', divide='ignore'):
319
+ t_x1x2 = lmbds_diff / np.sqrt(var_x1x2)
320
+
321
+ # `[indicator]` implements the "I[.] ... the indicator function" per
322
+ # the paragraph following equation (3.5).
323
+ if alternative == 'two-sided':
324
+ indicator = np.abs(t_x1x2) >= np.abs(t_k1k2)
325
+ elif alternative == 'less':
326
+ indicator = t_x1x2 <= t_k1k2
327
+ else:
328
+ indicator = t_x1x2 >= t_k1k2
329
+
330
+ # Multiply all combinations of the products together, exclude terms
331
+ # based on the `indicator` and then sum. (3.5)
332
+ pvalue = np.sum((prob_x1 * prob_x2)[indicator])
333
+ return _stats_py.SignificanceResult(t_k1k2, pvalue)
334
+
335
+
336
+ def _poisson_means_test_iv(k1, n1, k2, n2, diff, alternative):
337
+ # """check for valid types and values of input to `poisson_mean_test`."""
338
+ if k1 != int(k1) or k2 != int(k2):
339
+ raise TypeError('`k1` and `k2` must be integers.')
340
+
341
+ count_err = '`k1` and `k2` must be greater than or equal to 0.'
342
+ if k1 < 0 or k2 < 0:
343
+ raise ValueError(count_err)
344
+
345
+ if n1 <= 0 or n2 <= 0:
346
+ raise ValueError('`n1` and `n2` must be greater than 0.')
347
+
348
+ if diff < 0:
349
+ raise ValueError('diff must be greater than or equal to 0.')
350
+
351
+ alternatives = {'two-sided', 'less', 'greater'}
352
+ if alternative.lower() not in alternatives:
353
+ raise ValueError(f"Alternative must be one of '{alternatives}'.")
354
+
355
+
356
+ class CramerVonMisesResult:
357
+ def __init__(self, statistic, pvalue):
358
+ self.statistic = statistic
359
+ self.pvalue = pvalue
360
+
361
+ def __repr__(self):
362
+ return (f"{self.__class__.__name__}(statistic={self.statistic}, "
363
+ f"pvalue={self.pvalue})")
364
+
365
+
366
+ def _psi1_mod(x):
367
+ """
368
+ psi1 is defined in equation 1.10 in Csörgő, S. and Faraway, J. (1996).
369
+ This implements a modified version by excluding the term V(x) / 12
370
+ (here: _cdf_cvm_inf(x) / 12) to avoid evaluating _cdf_cvm_inf(x)
371
+ twice in _cdf_cvm.
372
+
373
+ Implementation based on MAPLE code of Julian Faraway and R code of the
374
+ function pCvM in the package goftest (v1.1.1), permission granted
375
+ by Adrian Baddeley. Main difference in the implementation: the code
376
+ here keeps adding terms of the series until the terms are small enough.
377
+ """
378
+
379
+ def _ed2(y):
380
+ z = y**2 / 4
381
+ b = kv(1/4, z) + kv(3/4, z)
382
+ return np.exp(-z) * (y/2)**(3/2) * b / np.sqrt(np.pi)
383
+
384
+ def _ed3(y):
385
+ z = y**2 / 4
386
+ c = np.exp(-z) / np.sqrt(np.pi)
387
+ return c * (y/2)**(5/2) * (2*kv(1/4, z) + 3*kv(3/4, z) - kv(5/4, z))
388
+
389
+ def _Ak(k, x):
390
+ m = 2*k + 1
391
+ sx = 2 * np.sqrt(x)
392
+ y1 = x**(3/4)
393
+ y2 = x**(5/4)
394
+
395
+ e1 = m * gamma(k + 1/2) * _ed2((4 * k + 3)/sx) / (9 * y1)
396
+ e2 = gamma(k + 1/2) * _ed3((4 * k + 1) / sx) / (72 * y2)
397
+ e3 = 2 * (m + 2) * gamma(k + 3/2) * _ed3((4 * k + 5) / sx) / (12 * y2)
398
+ e4 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 1) / sx) / (144 * y1)
399
+ e5 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 5) / sx) / (144 * y1)
400
+
401
+ return e1 + e2 + e3 + e4 + e5
402
+
403
+ x = np.asarray(x)
404
+ tot = np.zeros_like(x, dtype='float')
405
+ cond = np.ones_like(x, dtype='bool')
406
+ k = 0
407
+ while np.any(cond):
408
+ z = -_Ak(k, x[cond]) / (np.pi * gamma(k + 1))
409
+ tot[cond] = tot[cond] + z
410
+ cond[cond] = np.abs(z) >= 1e-7
411
+ k += 1
412
+
413
+ return tot
414
+
415
+
416
+ def _cdf_cvm_inf(x):
417
+ """
418
+ Calculate the cdf of the Cramér-von Mises statistic (infinite sample size).
419
+
420
+ See equation 1.2 in Csörgő, S. and Faraway, J. (1996).
421
+
422
+ Implementation based on MAPLE code of Julian Faraway and R code of the
423
+ function pCvM in the package goftest (v1.1.1), permission granted
424
+ by Adrian Baddeley. Main difference in the implementation: the code
425
+ here keeps adding terms of the series until the terms are small enough.
426
+
427
+ The function is not expected to be accurate for large values of x, say
428
+ x > 4, when the cdf is very close to 1.
429
+ """
430
+ x = np.asarray(x)
431
+
432
+ def term(x, k):
433
+ # this expression can be found in [2], second line of (1.3)
434
+ u = np.exp(gammaln(k + 0.5) - gammaln(k+1)) / (np.pi**1.5 * np.sqrt(x))
435
+ y = 4*k + 1
436
+ q = y**2 / (16*x)
437
+ b = kv(0.25, q)
438
+ return u * np.sqrt(y) * np.exp(-q) * b
439
+
440
+ tot = np.zeros_like(x, dtype='float')
441
+ cond = np.ones_like(x, dtype='bool')
442
+ k = 0
443
+ while np.any(cond):
444
+ z = term(x[cond], k)
445
+ tot[cond] = tot[cond] + z
446
+ cond[cond] = np.abs(z) >= 1e-7
447
+ k += 1
448
+
449
+ return tot
450
+
451
+
452
+ def _cdf_cvm(x, n=None):
453
+ """
454
+ Calculate the cdf of the Cramér-von Mises statistic for a finite sample
455
+ size n. If N is None, use the asymptotic cdf (n=inf).
456
+
457
+ See equation 1.8 in Csörgő, S. and Faraway, J. (1996) for finite samples,
458
+ 1.2 for the asymptotic cdf.
459
+
460
+ The function is not expected to be accurate for large values of x, say
461
+ x > 2, when the cdf is very close to 1 and it might return values > 1
462
+ in that case, e.g. _cdf_cvm(2.0, 12) = 1.0000027556716846. Moreover, it
463
+ is not accurate for small values of n, especially close to the bounds of
464
+ the distribution's domain, [1/(12*n), n/3], where the value jumps to 0
465
+ and 1, respectively. These are limitations of the approximation by Csörgő
466
+ and Faraway (1996) implemented in this function.
467
+ """
468
+ x = np.asarray(x)
469
+ if n is None:
470
+ y = _cdf_cvm_inf(x)
471
+ else:
472
+ # support of the test statistic is [12/n, n/3], see 1.1 in [2]
473
+ y = np.zeros_like(x, dtype='float')
474
+ sup = (1./(12*n) < x) & (x < n/3.)
475
+ # note: _psi1_mod does not include the term _cdf_cvm_inf(x) / 12
476
+ # therefore, we need to add it here
477
+ y[sup] = _cdf_cvm_inf(x[sup]) * (1 + 1./(12*n)) + _psi1_mod(x[sup]) / n
478
+ y[x >= n/3] = 1
479
+
480
+ if y.ndim == 0:
481
+ return y[()]
482
+ return y
483
+
484
+
485
+ def _cvm_result_to_tuple(res, _):
486
+ return res.statistic, res.pvalue
487
+
488
+
489
+ @_axis_nan_policy_factory(CramerVonMisesResult, n_samples=1, too_small=1,
490
+ result_to_tuple=_cvm_result_to_tuple)
491
+ def cramervonmises(rvs, cdf, args=()):
492
+ """Perform the one-sample Cramér-von Mises test for goodness of fit.
493
+
494
+ This performs a test of the goodness of fit of a cumulative distribution
495
+ function (cdf) :math:`F` compared to the empirical distribution function
496
+ :math:`F_n` of observed random variates :math:`X_1, ..., X_n` that are
497
+ assumed to be independent and identically distributed ([1]_).
498
+ The null hypothesis is that the :math:`X_i` have cumulative distribution
499
+ :math:`F`.
500
+
501
+ Parameters
502
+ ----------
503
+ rvs : array_like
504
+ A 1-D array of observed values of the random variables :math:`X_i`.
505
+ The sample must contain at least two observations.
506
+ cdf : str or callable
507
+ The cumulative distribution function :math:`F` to test the
508
+ observations against. If a string, it should be the name of a
509
+ distribution in `scipy.stats`. If a callable, that callable is used
510
+ to calculate the cdf: ``cdf(x, *args) -> float``.
511
+ args : tuple, optional
512
+ Distribution parameters. These are assumed to be known; see Notes.
513
+
514
+ Returns
515
+ -------
516
+ res : object with attributes
517
+ statistic : float
518
+ Cramér-von Mises statistic.
519
+ pvalue : float
520
+ The p-value.
521
+
522
+ See Also
523
+ --------
524
+ kstest, cramervonmises_2samp
525
+
526
+ Notes
527
+ -----
528
+ .. versionadded:: 1.6.0
529
+
530
+ The p-value relies on the approximation given by equation 1.8 in [2]_.
531
+ It is important to keep in mind that the p-value is only accurate if
532
+ one tests a simple hypothesis, i.e. the parameters of the reference
533
+ distribution are known. If the parameters are estimated from the data
534
+ (composite hypothesis), the computed p-value is not reliable.
535
+
536
+ References
537
+ ----------
538
+ .. [1] Cramér-von Mises criterion, Wikipedia,
539
+ https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93von_Mises_criterion
540
+ .. [2] Csörgő, S. and Faraway, J. (1996). The Exact and Asymptotic
541
+ Distribution of Cramér-von Mises Statistics. Journal of the
542
+ Royal Statistical Society, pp. 221-234.
543
+
544
+ Examples
545
+ --------
546
+
547
+ Suppose we wish to test whether data generated by ``scipy.stats.norm.rvs``
548
+ were, in fact, drawn from the standard normal distribution. We choose a
549
+ significance level of ``alpha=0.05``.
550
+
551
+ >>> import numpy as np
552
+ >>> from scipy import stats
553
+ >>> rng = np.random.default_rng(165417232101553420507139617764912913465)
554
+ >>> x = stats.norm.rvs(size=500, random_state=rng)
555
+ >>> res = stats.cramervonmises(x, 'norm')
556
+ >>> res.statistic, res.pvalue
557
+ (0.1072085112565724, 0.5508482238203407)
558
+
559
+ The p-value exceeds our chosen significance level, so we do not
560
+ reject the null hypothesis that the observed sample is drawn from the
561
+ standard normal distribution.
562
+
563
+ Now suppose we wish to check whether the same samples shifted by 2.1 is
564
+ consistent with being drawn from a normal distribution with a mean of 2.
565
+
566
+ >>> y = x + 2.1
567
+ >>> res = stats.cramervonmises(y, 'norm', args=(2,))
568
+ >>> res.statistic, res.pvalue
569
+ (0.8364446265294695, 0.00596286797008283)
570
+
571
+ Here we have used the `args` keyword to specify the mean (``loc``)
572
+ of the normal distribution to test the data against. This is equivalent
573
+ to the following, in which we create a frozen normal distribution with
574
+ mean 2.1, then pass its ``cdf`` method as an argument.
575
+
576
+ >>> frozen_dist = stats.norm(loc=2)
577
+ >>> res = stats.cramervonmises(y, frozen_dist.cdf)
578
+ >>> res.statistic, res.pvalue
579
+ (0.8364446265294695, 0.00596286797008283)
580
+
581
+ In either case, we would reject the null hypothesis that the observed
582
+ sample is drawn from a normal distribution with a mean of 2 (and default
583
+ variance of 1) because the p-value is less than our chosen
584
+ significance level.
585
+
586
+ """
587
+ if isinstance(cdf, str):
588
+ cdf = getattr(distributions, cdf).cdf
589
+
590
+ vals = np.sort(np.asarray(rvs))
591
+
592
+ if vals.size <= 1:
593
+ raise ValueError('The sample must contain at least two observations.')
594
+
595
+ n = len(vals)
596
+ cdfvals = cdf(vals, *args)
597
+
598
+ u = (2*np.arange(1, n+1) - 1)/(2*n)
599
+ w = 1/(12*n) + np.sum((u - cdfvals)**2)
600
+
601
+ # avoid small negative values that can occur due to the approximation
602
+ p = np.clip(1. - _cdf_cvm(w, n), 0., None)
603
+
604
+ return CramerVonMisesResult(statistic=w, pvalue=p)
605
+
606
+
607
+ def _get_wilcoxon_distr(n):
608
+ """
609
+ Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum
610
+ of ranks of positive differences).
611
+ Returns an array with the probabilities of all the possible ranks
612
+ r = 0, ..., n*(n+1)/2
613
+ """
614
+ c = np.ones(1, dtype=np.float64)
615
+ for k in range(1, n + 1):
616
+ prev_c = c
617
+ c = np.zeros(k * (k + 1) // 2 + 1, dtype=np.float64)
618
+ m = len(prev_c)
619
+ c[:m] = prev_c * 0.5
620
+ c[-m:] += prev_c * 0.5
621
+ return c
622
+
623
+
624
+ def _get_wilcoxon_distr2(n):
625
+ """
626
+ Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum
627
+ of ranks of positive differences).
628
+ Returns an array with the probabilities of all the possible ranks
629
+ r = 0, ..., n*(n+1)/2
630
+ This is a slower reference function
631
+ References
632
+ ----------
633
+ .. [1] 1. Harris T, Hardin JW. Exact Wilcoxon Signed-Rank and Wilcoxon
634
+ Mann-Whitney Ranksum Tests. The Stata Journal. 2013;13(2):337-343.
635
+ """
636
+ ai = np.arange(1, n+1)[:, None]
637
+ t = n*(n+1)/2
638
+ q = 2*t
639
+ j = np.arange(q)
640
+ theta = 2*np.pi/q*j
641
+ phi_sp = np.prod(np.cos(theta*ai), axis=0)
642
+ phi_s = np.exp(1j*theta*t) * phi_sp
643
+ p = np.real(ifft(phi_s))
644
+ res = np.zeros(int(t)+1)
645
+ res[:-1:] = p[::2]
646
+ res[0] /= 2
647
+ res[-1] = res[0]
648
+ return res
649
+
650
+
651
+ def _tau_b(A):
652
+ """Calculate Kendall's tau-b and p-value from contingency table."""
653
+ # See [2] 2.2 and 4.2
654
+
655
+ # contingency table must be truly 2D
656
+ if A.shape[0] == 1 or A.shape[1] == 1:
657
+ return np.nan, np.nan
658
+
659
+ NA = A.sum()
660
+ PA = _P(A)
661
+ QA = _Q(A)
662
+ Sri2 = (A.sum(axis=1)**2).sum()
663
+ Scj2 = (A.sum(axis=0)**2).sum()
664
+ denominator = (NA**2 - Sri2)*(NA**2 - Scj2)
665
+
666
+ tau = (PA-QA)/(denominator)**0.5
667
+
668
+ numerator = 4*(_a_ij_Aij_Dij2(A) - (PA - QA)**2 / NA)
669
+ s02_tau_b = numerator/denominator
670
+ if s02_tau_b == 0: # Avoid divide by zero
671
+ return tau, 0
672
+ Z = tau/s02_tau_b**0.5
673
+ p = 2*norm.sf(abs(Z)) # 2-sided p-value
674
+
675
+ return tau, p
676
+
677
+
678
+ def _somers_d(A, alternative='two-sided'):
679
+ """Calculate Somers' D and p-value from contingency table."""
680
+ # See [3] page 1740
681
+
682
+ # contingency table must be truly 2D
683
+ if A.shape[0] <= 1 or A.shape[1] <= 1:
684
+ return np.nan, np.nan
685
+
686
+ NA = A.sum()
687
+ NA2 = NA**2
688
+ PA = _P(A)
689
+ QA = _Q(A)
690
+ Sri2 = (A.sum(axis=1)**2).sum()
691
+
692
+ d = (PA - QA)/(NA2 - Sri2)
693
+
694
+ S = _a_ij_Aij_Dij2(A) - (PA-QA)**2/NA
695
+
696
+ with np.errstate(divide='ignore'):
697
+ Z = (PA - QA)/(4*(S))**0.5
698
+
699
+ norm = _stats_py._SimpleNormal()
700
+ p = _stats_py._get_pvalue(Z, norm, alternative, xp=np)
701
+
702
+ return d, p
703
+
704
+
705
+ @dataclass
706
+ class SomersDResult:
707
+ statistic: float
708
+ pvalue: float
709
+ table: np.ndarray
710
+
711
+
712
+ def somersd(x, y=None, alternative='two-sided'):
713
+ r"""Calculates Somers' D, an asymmetric measure of ordinal association.
714
+
715
+ Like Kendall's :math:`\tau`, Somers' :math:`D` is a measure of the
716
+ correspondence between two rankings. Both statistics consider the
717
+ difference between the number of concordant and discordant pairs in two
718
+ rankings :math:`X` and :math:`Y`, and both are normalized such that values
719
+ close to 1 indicate strong agreement and values close to -1 indicate
720
+ strong disagreement. They differ in how they are normalized. To show the
721
+ relationship, Somers' :math:`D` can be defined in terms of Kendall's
722
+ :math:`\tau_a`:
723
+
724
+ .. math::
725
+ D(Y|X) = \frac{\tau_a(X, Y)}{\tau_a(X, X)}
726
+
727
+ Suppose the first ranking :math:`X` has :math:`r` distinct ranks and the
728
+ second ranking :math:`Y` has :math:`s` distinct ranks. These two lists of
729
+ :math:`n` rankings can also be viewed as an :math:`r \times s` contingency
730
+ table in which element :math:`i, j` is the number of rank pairs with rank
731
+ :math:`i` in ranking :math:`X` and rank :math:`j` in ranking :math:`Y`.
732
+ Accordingly, `somersd` also allows the input data to be supplied as a
733
+ single, 2D contingency table instead of as two separate, 1D rankings.
734
+
735
+ Note that the definition of Somers' :math:`D` is asymmetric: in general,
736
+ :math:`D(Y|X) \neq D(X|Y)`. ``somersd(x, y)`` calculates Somers'
737
+ :math:`D(Y|X)`: the "row" variable :math:`X` is treated as an independent
738
+ variable, and the "column" variable :math:`Y` is dependent. For Somers'
739
+ :math:`D(X|Y)`, swap the input lists or transpose the input table.
740
+
741
+ Parameters
742
+ ----------
743
+ x : array_like
744
+ 1D array of rankings, treated as the (row) independent variable.
745
+ Alternatively, a 2D contingency table.
746
+ y : array_like, optional
747
+ If `x` is a 1D array of rankings, `y` is a 1D array of rankings of the
748
+ same length, treated as the (column) dependent variable.
749
+ If `x` is 2D, `y` is ignored.
750
+ alternative : {'two-sided', 'less', 'greater'}, optional
751
+ Defines the alternative hypothesis. Default is 'two-sided'.
752
+ The following options are available:
753
+ * 'two-sided': the rank correlation is nonzero
754
+ * 'less': the rank correlation is negative (less than zero)
755
+ * 'greater': the rank correlation is positive (greater than zero)
756
+
757
+ Returns
758
+ -------
759
+ res : SomersDResult
760
+ A `SomersDResult` object with the following fields:
761
+
762
+ statistic : float
763
+ The Somers' :math:`D` statistic.
764
+ pvalue : float
765
+ The p-value for a hypothesis test whose null
766
+ hypothesis is an absence of association, :math:`D=0`.
767
+ See notes for more information.
768
+ table : 2D array
769
+ The contingency table formed from rankings `x` and `y` (or the
770
+ provided contingency table, if `x` is a 2D array)
771
+
772
+ See Also
773
+ --------
774
+ kendalltau : Calculates Kendall's tau, another correlation measure.
775
+ weightedtau : Computes a weighted version of Kendall's tau.
776
+ spearmanr : Calculates a Spearman rank-order correlation coefficient.
777
+ pearsonr : Calculates a Pearson correlation coefficient.
778
+
779
+ Notes
780
+ -----
781
+ This function follows the contingency table approach of [2]_ and
782
+ [3]_. *p*-values are computed based on an asymptotic approximation of
783
+ the test statistic distribution under the null hypothesis :math:`D=0`.
784
+
785
+ Theoretically, hypothesis tests based on Kendall's :math:`tau` and Somers'
786
+ :math:`D` should be identical.
787
+ However, the *p*-values returned by `kendalltau` are based
788
+ on the null hypothesis of *independence* between :math:`X` and :math:`Y`
789
+ (i.e. the population from which pairs in :math:`X` and :math:`Y` are
790
+ sampled contains equal numbers of all possible pairs), which is more
791
+ specific than the null hypothesis :math:`D=0` used here. If the null
792
+ hypothesis of independence is desired, it is acceptable to use the
793
+ *p*-value returned by `kendalltau` with the statistic returned by
794
+ `somersd` and vice versa. For more information, see [2]_.
795
+
796
+ Contingency tables are formatted according to the convention used by
797
+ SAS and R: the first ranking supplied (``x``) is the "row" variable, and
798
+ the second ranking supplied (``y``) is the "column" variable. This is
799
+ opposite the convention of Somers' original paper [1]_.
800
+
801
+ References
802
+ ----------
803
+ .. [1] Robert H. Somers, "A New Asymmetric Measure of Association for
804
+ Ordinal Variables", *American Sociological Review*, Vol. 27, No. 6,
805
+ pp. 799--811, 1962.
806
+
807
+ .. [2] Morton B. Brown and Jacqueline K. Benedetti, "Sampling Behavior of
808
+ Tests for Correlation in Two-Way Contingency Tables", *Journal of
809
+ the American Statistical Association* Vol. 72, No. 358, pp.
810
+ 309--315, 1977.
811
+
812
+ .. [3] SAS Institute, Inc., "The FREQ Procedure (Book Excerpt)",
813
+ *SAS/STAT 9.2 User's Guide, Second Edition*, SAS Publishing, 2009.
814
+
815
+ .. [4] Laerd Statistics, "Somers' d using SPSS Statistics", *SPSS
816
+ Statistics Tutorials and Statistical Guides*,
817
+ https://statistics.laerd.com/spss-tutorials/somers-d-using-spss-statistics.php,
818
+ Accessed July 31, 2020.
819
+
820
+ Examples
821
+ --------
822
+ We calculate Somers' D for the example given in [4]_, in which a hotel
823
+ chain owner seeks to determine the association between hotel room
824
+ cleanliness and customer satisfaction. The independent variable, hotel
825
+ room cleanliness, is ranked on an ordinal scale: "below average (1)",
826
+ "average (2)", or "above average (3)". The dependent variable, customer
827
+ satisfaction, is ranked on a second scale: "very dissatisfied (1)",
828
+ "moderately dissatisfied (2)", "neither dissatisfied nor satisfied (3)",
829
+ "moderately satisfied (4)", or "very satisfied (5)". 189 customers
830
+ respond to the survey, and the results are cast into a contingency table
831
+ with the hotel room cleanliness as the "row" variable and customer
832
+ satisfaction as the "column" variable.
833
+
834
+ +-----+-----+-----+-----+-----+-----+
835
+ | | (1) | (2) | (3) | (4) | (5) |
836
+ +=====+=====+=====+=====+=====+=====+
837
+ | (1) | 27 | 25 | 14 | 7 | 0 |
838
+ +-----+-----+-----+-----+-----+-----+
839
+ | (2) | 7 | 14 | 18 | 35 | 12 |
840
+ +-----+-----+-----+-----+-----+-----+
841
+ | (3) | 1 | 3 | 2 | 7 | 17 |
842
+ +-----+-----+-----+-----+-----+-----+
843
+
844
+ For example, 27 customers assigned their room a cleanliness ranking of
845
+ "below average (1)" and a corresponding satisfaction of "very
846
+ dissatisfied (1)". We perform the analysis as follows.
847
+
848
+ >>> from scipy.stats import somersd
849
+ >>> table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]]
850
+ >>> res = somersd(table)
851
+ >>> res.statistic
852
+ 0.6032766111513396
853
+ >>> res.pvalue
854
+ 1.0007091191074533e-27
855
+
856
+ The value of the Somers' D statistic is approximately 0.6, indicating
857
+ a positive correlation between room cleanliness and customer satisfaction
858
+ in the sample.
859
+ The *p*-value is very small, indicating a very small probability of
860
+ observing such an extreme value of the statistic under the null
861
+ hypothesis that the statistic of the entire population (from which
862
+ our sample of 189 customers is drawn) is zero. This supports the
863
+ alternative hypothesis that the true value of Somers' D for the population
864
+ is nonzero.
865
+
866
+ """
867
+ x, y = np.array(x), np.array(y)
868
+ if x.ndim == 1:
869
+ if x.size != y.size:
870
+ raise ValueError("Rankings must be of equal length.")
871
+ table = scipy.stats.contingency.crosstab(x, y)[1]
872
+ elif x.ndim == 2:
873
+ if np.any(x < 0):
874
+ raise ValueError("All elements of the contingency table must be "
875
+ "non-negative.")
876
+ if np.any(x != x.astype(int)):
877
+ raise ValueError("All elements of the contingency table must be "
878
+ "integer.")
879
+ if x.nonzero()[0].size < 2:
880
+ raise ValueError("At least two elements of the contingency table "
881
+ "must be nonzero.")
882
+ table = x
883
+ else:
884
+ raise ValueError("x must be either a 1D or 2D array")
885
+ # The table type is converted to a float to avoid an integer overflow
886
+ d, p = _somers_d(table.astype(float), alternative)
887
+
888
+ # add alias for consistency with other correlation functions
889
+ res = SomersDResult(d, p, table)
890
+ res.correlation = d
891
+ return res
892
+
893
+
894
+ # This could be combined with `_all_partitions` in `_resampling.py`
895
+ def _all_partitions(nx, ny):
896
+ """
897
+ Partition a set of indices into two fixed-length sets in all possible ways
898
+
899
+ Partition a set of indices 0 ... nx + ny - 1 into two sets of length nx and
900
+ ny in all possible ways (ignoring order of elements).
901
+ """
902
+ z = np.arange(nx+ny)
903
+ for c in combinations(z, nx):
904
+ x = np.array(c)
905
+ mask = np.ones(nx+ny, bool)
906
+ mask[x] = False
907
+ y = z[mask]
908
+ yield x, y
909
+
910
+
911
+ def _compute_log_combinations(n):
912
+ """Compute all log combination of C(n, k)."""
913
+ gammaln_arr = gammaln(np.arange(n + 1) + 1)
914
+ return gammaln(n + 1) - gammaln_arr - gammaln_arr[::-1]
915
+
916
+
917
+ @dataclass
918
+ class BarnardExactResult:
919
+ statistic: float
920
+ pvalue: float
921
+
922
+
923
+ def barnard_exact(table, alternative="two-sided", pooled=True, n=32):
924
+ r"""Perform a Barnard exact test on a 2x2 contingency table.
925
+
926
+ Parameters
927
+ ----------
928
+ table : array_like of ints
929
+ A 2x2 contingency table. Elements should be non-negative integers.
930
+
931
+ alternative : {'two-sided', 'less', 'greater'}, optional
932
+ Defines the null and alternative hypotheses. Default is 'two-sided'.
933
+ Please see explanations in the Notes section below.
934
+
935
+ pooled : bool, optional
936
+ Whether to compute score statistic with pooled variance (as in
937
+ Student's t-test, for example) or unpooled variance (as in Welch's
938
+ t-test). Default is ``True``.
939
+
940
+ n : int, optional
941
+ Number of sampling points used in the construction of the sampling
942
+ method. Note that this argument will automatically be converted to
943
+ the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
944
+ select sample points. Default is 32. Must be positive. In most cases,
945
+ 32 points is enough to reach good precision. More points comes at
946
+ performance cost.
947
+
948
+ Returns
949
+ -------
950
+ ber : BarnardExactResult
951
+ A result object with the following attributes.
952
+
953
+ statistic : float
954
+ The Wald statistic with pooled or unpooled variance, depending
955
+ on the user choice of `pooled`.
956
+
957
+ pvalue : float
958
+ P-value, the probability of obtaining a distribution at least as
959
+ extreme as the one that was actually observed, assuming that the
960
+ null hypothesis is true.
961
+
962
+ See Also
963
+ --------
964
+ chi2_contingency : Chi-square test of independence of variables in a
965
+ contingency table.
966
+ fisher_exact : Fisher exact test on a 2x2 contingency table.
967
+ boschloo_exact : Boschloo's exact test on a 2x2 contingency table,
968
+ which is an uniformly more powerful alternative to Fisher's exact test.
969
+
970
+ Notes
971
+ -----
972
+ Barnard's test is an exact test used in the analysis of contingency
973
+ tables. It examines the association of two categorical variables, and
974
+ is a more powerful alternative than Fisher's exact test
975
+ for 2x2 contingency tables.
976
+
977
+ Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
978
+ where each column stores the binomial experiment, as in the example
979
+ below. Let's also define :math:`p_1, p_2` the theoretical binomial
980
+ probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
981
+ Barnard exact test, we can assert three different null hypotheses :
982
+
983
+ - :math:`H_0 : p_1 \geq p_2` versus :math:`H_1 : p_1 < p_2`,
984
+ with `alternative` = "less"
985
+
986
+ - :math:`H_0 : p_1 \leq p_2` versus :math:`H_1 : p_1 > p_2`,
987
+ with `alternative` = "greater"
988
+
989
+ - :math:`H_0 : p_1 = p_2` versus :math:`H_1 : p_1 \neq p_2`,
990
+ with `alternative` = "two-sided" (default one)
991
+
992
+ In order to compute Barnard's exact test, we are using the Wald
993
+ statistic [3]_ with pooled or unpooled variance.
994
+ Under the default assumption that both variances are equal
995
+ (``pooled = True``), the statistic is computed as:
996
+
997
+ .. math::
998
+
999
+ T(X) = \frac{
1000
+ \hat{p}_1 - \hat{p}_2
1001
+ }{
1002
+ \sqrt{
1003
+ \hat{p}(1 - \hat{p})
1004
+ (\frac{1}{c_1} +
1005
+ \frac{1}{c_2})
1006
+ }
1007
+ }
1008
+
1009
+ with :math:`\hat{p}_1, \hat{p}_2` and :math:`\hat{p}` the estimator of
1010
+ :math:`p_1, p_2` and :math:`p`, the latter being the combined probability,
1011
+ given the assumption that :math:`p_1 = p_2`.
1012
+
1013
+ If this assumption is invalid (``pooled = False``), the statistic is:
1014
+
1015
+ .. math::
1016
+
1017
+ T(X) = \frac{
1018
+ \hat{p}_1 - \hat{p}_2
1019
+ }{
1020
+ \sqrt{
1021
+ \frac{\hat{p}_1 (1 - \hat{p}_1)}{c_1} +
1022
+ \frac{\hat{p}_2 (1 - \hat{p}_2)}{c_2}
1023
+ }
1024
+ }
1025
+
1026
+ The p-value is then computed as:
1027
+
1028
+ .. math::
1029
+
1030
+ \sum
1031
+ \binom{c_1}{x_{11}}
1032
+ \binom{c_2}{x_{12}}
1033
+ \pi^{x_{11} + x_{12}}
1034
+ (1 - \pi)^{t - x_{11} - x_{12}}
1035
+
1036
+ where the sum is over all 2x2 contingency tables :math:`X` such that:
1037
+ * :math:`T(X) \leq T(X_0)` when `alternative` = "less",
1038
+ * :math:`T(X) \geq T(X_0)` when `alternative` = "greater", or
1039
+ * :math:`T(X) \geq |T(X_0)|` when `alternative` = "two-sided".
1040
+ Above, :math:`c_1, c_2` are the sum of the columns 1 and 2,
1041
+ and :math:`t` the total (sum of the 4 sample's element).
1042
+
1043
+ The returned p-value is the maximum p-value taken over the nuisance
1044
+ parameter :math:`\pi`, where :math:`0 \leq \pi \leq 1`.
1045
+
1046
+ This function's complexity is :math:`O(n c_1 c_2)`, where `n` is the
1047
+ number of sample points.
1048
+
1049
+ References
1050
+ ----------
1051
+ .. [1] Barnard, G. A. "Significance Tests for 2x2 Tables". *Biometrika*.
1052
+ 34.1/2 (1947): 123-138. :doi:`dpgkg3`
1053
+
1054
+ .. [2] Mehta, Cyrus R., and Pralay Senchaudhuri. "Conditional versus
1055
+ unconditional exact tests for comparing two binomials."
1056
+ *Cytel Software Corporation* 675 (2003): 1-5.
1057
+
1058
+ .. [3] "Wald Test". *Wikipedia*. https://en.wikipedia.org/wiki/Wald_test
1059
+
1060
+ Examples
1061
+ --------
1062
+ An example use of Barnard's test is presented in [2]_.
1063
+
1064
+ Consider the following example of a vaccine efficacy study
1065
+ (Chan, 1998). In a randomized clinical trial of 30 subjects, 15 were
1066
+ inoculated with a recombinant DNA influenza vaccine and the 15 were
1067
+ inoculated with a placebo. Twelve of the 15 subjects in the placebo
1068
+ group (80%) eventually became infected with influenza whereas for the
1069
+ vaccine group, only 7 of the 15 subjects (47%) became infected. The
1070
+ data are tabulated as a 2 x 2 table::
1071
+
1072
+ Vaccine Placebo
1073
+ Yes 7 12
1074
+ No 8 3
1075
+
1076
+ When working with statistical hypothesis testing, we usually use a
1077
+ threshold probability or significance level upon which we decide
1078
+ to reject the null hypothesis :math:`H_0`. Suppose we choose the common
1079
+ significance level of 5%.
1080
+
1081
+ Our alternative hypothesis is that the vaccine will lower the chance of
1082
+ becoming infected with the virus; that is, the probability :math:`p_1` of
1083
+ catching the virus with the vaccine will be *less than* the probability
1084
+ :math:`p_2` of catching the virus without the vaccine. Therefore, we call
1085
+ `barnard_exact` with the ``alternative="less"`` option:
1086
+
1087
+ >>> import scipy.stats as stats
1088
+ >>> res = stats.barnard_exact([[7, 12], [8, 3]], alternative="less")
1089
+ >>> res.statistic
1090
+ -1.894
1091
+ >>> res.pvalue
1092
+ 0.03407
1093
+
1094
+ Under the null hypothesis that the vaccine will not lower the chance of
1095
+ becoming infected, the probability of obtaining test results at least as
1096
+ extreme as the observed data is approximately 3.4%. Since this p-value is
1097
+ less than our chosen significance level, we have evidence to reject
1098
+ :math:`H_0` in favor of the alternative.
1099
+
1100
+ Suppose we had used Fisher's exact test instead:
1101
+
1102
+ >>> _, pvalue = stats.fisher_exact([[7, 12], [8, 3]], alternative="less")
1103
+ >>> pvalue
1104
+ 0.0640
1105
+
1106
+ With the same threshold significance of 5%, we would not have been able
1107
+ to reject the null hypothesis in favor of the alternative. As stated in
1108
+ [2]_, Barnard's test is uniformly more powerful than Fisher's exact test
1109
+ because Barnard's test does not condition on any margin. Fisher's test
1110
+ should only be used when both sets of marginals are fixed.
1111
+
1112
+ """
1113
+ if n <= 0:
1114
+ raise ValueError(
1115
+ "Number of points `n` must be strictly positive, "
1116
+ f"found {n!r}"
1117
+ )
1118
+
1119
+ table = np.asarray(table, dtype=np.int64)
1120
+
1121
+ if not table.shape == (2, 2):
1122
+ raise ValueError("The input `table` must be of shape (2, 2).")
1123
+
1124
+ if np.any(table < 0):
1125
+ raise ValueError("All values in `table` must be nonnegative.")
1126
+
1127
+ if 0 in table.sum(axis=0):
1128
+ # If both values in column are zero, the p-value is 1 and
1129
+ # the score's statistic is NaN.
1130
+ return BarnardExactResult(np.nan, 1.0)
1131
+
1132
+ total_col_1, total_col_2 = table.sum(axis=0)
1133
+
1134
+ x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(-1, 1)
1135
+ x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(1, -1)
1136
+
1137
+ # We need to calculate the wald statistics for each combination of x1 and
1138
+ # x2.
1139
+ p1, p2 = x1 / total_col_1, x2 / total_col_2
1140
+
1141
+ if pooled:
1142
+ p = (x1 + x2) / (total_col_1 + total_col_2)
1143
+ variances = p * (1 - p) * (1 / total_col_1 + 1 / total_col_2)
1144
+ else:
1145
+ variances = p1 * (1 - p1) / total_col_1 + p2 * (1 - p2) / total_col_2
1146
+
1147
+ # To avoid warning when dividing by 0
1148
+ with np.errstate(divide="ignore", invalid="ignore"):
1149
+ wald_statistic = np.divide((p1 - p2), np.sqrt(variances))
1150
+
1151
+ wald_statistic[p1 == p2] = 0 # Removing NaN values
1152
+
1153
+ wald_stat_obs = wald_statistic[table[0, 0], table[0, 1]]
1154
+
1155
+ if alternative == "two-sided":
1156
+ index_arr = np.abs(wald_statistic) >= abs(wald_stat_obs)
1157
+ elif alternative == "less":
1158
+ index_arr = wald_statistic <= wald_stat_obs
1159
+ elif alternative == "greater":
1160
+ index_arr = wald_statistic >= wald_stat_obs
1161
+ else:
1162
+ msg = (
1163
+ "`alternative` should be one of {'two-sided', 'less', 'greater'},"
1164
+ f" found {alternative!r}"
1165
+ )
1166
+ raise ValueError(msg)
1167
+
1168
+ x1_sum_x2 = x1 + x2
1169
+
1170
+ x1_log_comb = _compute_log_combinations(total_col_1)
1171
+ x2_log_comb = _compute_log_combinations(total_col_2)
1172
+ x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
1173
+
1174
+ result = shgo(
1175
+ _get_binomial_log_p_value_with_nuisance_param,
1176
+ args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
1177
+ bounds=((0, 1),),
1178
+ n=n,
1179
+ sampling_method="sobol",
1180
+ )
1181
+
1182
+ # result.fun is the negative log pvalue and therefore needs to be
1183
+ # changed before return
1184
+ p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
1185
+ return BarnardExactResult(wald_stat_obs, p_value)
1186
+
1187
+
1188
+ @dataclass
1189
+ class BoschlooExactResult:
1190
+ statistic: float
1191
+ pvalue: float
1192
+
1193
+
1194
+ def boschloo_exact(table, alternative="two-sided", n=32):
1195
+ r"""Perform Boschloo's exact test on a 2x2 contingency table.
1196
+
1197
+ Parameters
1198
+ ----------
1199
+ table : array_like of ints
1200
+ A 2x2 contingency table. Elements should be non-negative integers.
1201
+
1202
+ alternative : {'two-sided', 'less', 'greater'}, optional
1203
+ Defines the null and alternative hypotheses. Default is 'two-sided'.
1204
+ Please see explanations in the Notes section below.
1205
+
1206
+ n : int, optional
1207
+ Number of sampling points used in the construction of the sampling
1208
+ method. Note that this argument will automatically be converted to
1209
+ the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
1210
+ select sample points. Default is 32. Must be positive. In most cases,
1211
+ 32 points is enough to reach good precision. More points comes at
1212
+ performance cost.
1213
+
1214
+ Returns
1215
+ -------
1216
+ ber : BoschlooExactResult
1217
+ A result object with the following attributes.
1218
+
1219
+ statistic : float
1220
+ The statistic used in Boschloo's test; that is, the p-value
1221
+ from Fisher's exact test.
1222
+
1223
+ pvalue : float
1224
+ P-value, the probability of obtaining a distribution at least as
1225
+ extreme as the one that was actually observed, assuming that the
1226
+ null hypothesis is true.
1227
+
1228
+ See Also
1229
+ --------
1230
+ chi2_contingency : Chi-square test of independence of variables in a
1231
+ contingency table.
1232
+ fisher_exact : Fisher exact test on a 2x2 contingency table.
1233
+ barnard_exact : Barnard's exact test, which is a more powerful alternative
1234
+ than Fisher's exact test for 2x2 contingency tables.
1235
+
1236
+ Notes
1237
+ -----
1238
+ Boschloo's test is an exact test used in the analysis of contingency
1239
+ tables. It examines the association of two categorical variables, and
1240
+ is a uniformly more powerful alternative to Fisher's exact test
1241
+ for 2x2 contingency tables.
1242
+
1243
+ Boschloo's exact test uses the p-value of Fisher's exact test as a
1244
+ statistic, and Boschloo's p-value is the probability under the null
1245
+ hypothesis of observing such an extreme value of this statistic.
1246
+
1247
+ Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
1248
+ where each column stores the binomial experiment, as in the example
1249
+ below. Let's also define :math:`p_1, p_2` the theoretical binomial
1250
+ probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
1251
+ Boschloo exact test, we can assert three different alternative hypotheses:
1252
+
1253
+ - :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 < p_2`,
1254
+ with `alternative` = "less"
1255
+
1256
+ - :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 > p_2`,
1257
+ with `alternative` = "greater"
1258
+
1259
+ - :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 \neq p_2`,
1260
+ with `alternative` = "two-sided" (default)
1261
+
1262
+ There are multiple conventions for computing a two-sided p-value when the
1263
+ null distribution is asymmetric. Here, we apply the convention that the
1264
+ p-value of a two-sided test is twice the minimum of the p-values of the
1265
+ one-sided tests (clipped to 1.0). Note that `fisher_exact` follows a
1266
+ different convention, so for a given `table`, the statistic reported by
1267
+ `boschloo_exact` may differ from the p-value reported by `fisher_exact`
1268
+ when ``alternative='two-sided'``.
1269
+
1270
+ .. versionadded:: 1.7.0
1271
+
1272
+ References
1273
+ ----------
1274
+ .. [1] R.D. Boschloo. "Raised conditional level of significance for the
1275
+ 2 x 2-table when testing the equality of two probabilities",
1276
+ Statistica Neerlandica, 24(1), 1970
1277
+
1278
+ .. [2] "Boschloo's test", Wikipedia,
1279
+ https://en.wikipedia.org/wiki/Boschloo%27s_test
1280
+
1281
+ .. [3] Lise M. Saari et al. "Employee attitudes and job satisfaction",
1282
+ Human Resource Management, 43(4), 395-407, 2004,
1283
+ :doi:`10.1002/hrm.20032`.
1284
+
1285
+ Examples
1286
+ --------
1287
+ In the following example, we consider the article "Employee
1288
+ attitudes and job satisfaction" [3]_
1289
+ which reports the results of a survey from 63 scientists and 117 college
1290
+ professors. Of the 63 scientists, 31 said they were very satisfied with
1291
+ their jobs, whereas 74 of the college professors were very satisfied
1292
+ with their work. Is this significant evidence that college
1293
+ professors are happier with their work than scientists?
1294
+ The following table summarizes the data mentioned above::
1295
+
1296
+ college professors scientists
1297
+ Very Satisfied 74 31
1298
+ Dissatisfied 43 32
1299
+
1300
+ When working with statistical hypothesis testing, we usually use a
1301
+ threshold probability or significance level upon which we decide
1302
+ to reject the null hypothesis :math:`H_0`. Suppose we choose the common
1303
+ significance level of 5%.
1304
+
1305
+ Our alternative hypothesis is that college professors are truly more
1306
+ satisfied with their work than scientists. Therefore, we expect
1307
+ :math:`p_1` the proportion of very satisfied college professors to be
1308
+ greater than :math:`p_2`, the proportion of very satisfied scientists.
1309
+ We thus call `boschloo_exact` with the ``alternative="greater"`` option:
1310
+
1311
+ >>> import scipy.stats as stats
1312
+ >>> res = stats.boschloo_exact([[74, 31], [43, 32]], alternative="greater")
1313
+ >>> res.statistic
1314
+ 0.0483
1315
+ >>> res.pvalue
1316
+ 0.0355
1317
+
1318
+ Under the null hypothesis that scientists are happier in their work than
1319
+ college professors, the probability of obtaining test
1320
+ results at least as extreme as the observed data is approximately 3.55%.
1321
+ Since this p-value is less than our chosen significance level, we have
1322
+ evidence to reject :math:`H_0` in favor of the alternative hypothesis.
1323
+
1324
+ """
1325
+ hypergeom = distributions.hypergeom
1326
+
1327
+ if n <= 0:
1328
+ raise ValueError(
1329
+ "Number of points `n` must be strictly positive,"
1330
+ f" found {n!r}"
1331
+ )
1332
+
1333
+ table = np.asarray(table, dtype=np.int64)
1334
+
1335
+ if not table.shape == (2, 2):
1336
+ raise ValueError("The input `table` must be of shape (2, 2).")
1337
+
1338
+ if np.any(table < 0):
1339
+ raise ValueError("All values in `table` must be nonnegative.")
1340
+
1341
+ if 0 in table.sum(axis=0):
1342
+ # If both values in column are zero, the p-value is 1 and
1343
+ # the score's statistic is NaN.
1344
+ return BoschlooExactResult(np.nan, np.nan)
1345
+
1346
+ total_col_1, total_col_2 = table.sum(axis=0)
1347
+ total = total_col_1 + total_col_2
1348
+ x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(1, -1)
1349
+ x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(-1, 1)
1350
+ x1_sum_x2 = x1 + x2
1351
+
1352
+ if alternative == 'less':
1353
+ pvalues = hypergeom.cdf(x1, total, x1_sum_x2, total_col_1).T
1354
+ elif alternative == 'greater':
1355
+ # Same formula as the 'less' case, but with the second column.
1356
+ pvalues = hypergeom.cdf(x2, total, x1_sum_x2, total_col_2).T
1357
+ elif alternative == 'two-sided':
1358
+ boschloo_less = boschloo_exact(table, alternative="less", n=n)
1359
+ boschloo_greater = boschloo_exact(table, alternative="greater", n=n)
1360
+
1361
+ res = (
1362
+ boschloo_less if boschloo_less.pvalue < boschloo_greater.pvalue
1363
+ else boschloo_greater
1364
+ )
1365
+
1366
+ # Two-sided p-value is defined as twice the minimum of the one-sided
1367
+ # p-values
1368
+ pvalue = np.clip(2 * res.pvalue, a_min=0, a_max=1)
1369
+ return BoschlooExactResult(res.statistic, pvalue)
1370
+ else:
1371
+ msg = (
1372
+ f"`alternative` should be one of {'two-sided', 'less', 'greater'},"
1373
+ f" found {alternative!r}"
1374
+ )
1375
+ raise ValueError(msg)
1376
+
1377
+ fisher_stat = pvalues[table[0, 0], table[0, 1]]
1378
+
1379
+ # fisher_stat * (1+1e-13) guards us from small numerical error. It is
1380
+ # equivalent to np.isclose with relative tol of 1e-13 and absolute tol of 0
1381
+ # For more throughout explanations, see gh-14178
1382
+ index_arr = pvalues <= fisher_stat * (1+1e-13)
1383
+
1384
+ x1, x2, x1_sum_x2 = x1.T, x2.T, x1_sum_x2.T
1385
+ x1_log_comb = _compute_log_combinations(total_col_1)
1386
+ x2_log_comb = _compute_log_combinations(total_col_2)
1387
+ x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
1388
+
1389
+ result = shgo(
1390
+ _get_binomial_log_p_value_with_nuisance_param,
1391
+ args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
1392
+ bounds=((0, 1),),
1393
+ n=n,
1394
+ sampling_method="sobol",
1395
+ )
1396
+
1397
+ # result.fun is the negative log pvalue and therefore needs to be
1398
+ # changed before return
1399
+ p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
1400
+ return BoschlooExactResult(fisher_stat, p_value)
1401
+
1402
+
1403
+ def _get_binomial_log_p_value_with_nuisance_param(
1404
+ nuisance_param, x1_sum_x2, x1_sum_x2_log_comb, index_arr
1405
+ ):
1406
+ r"""
1407
+ Compute the log pvalue in respect of a nuisance parameter considering
1408
+ a 2x2 sample space.
1409
+
1410
+ Parameters
1411
+ ----------
1412
+ nuisance_param : float
1413
+ nuisance parameter used in the computation of the maximisation of
1414
+ the p-value. Must be between 0 and 1
1415
+
1416
+ x1_sum_x2 : ndarray
1417
+ Sum of x1 and x2 inside barnard_exact
1418
+
1419
+ x1_sum_x2_log_comb : ndarray
1420
+ sum of the log combination of x1 and x2
1421
+
1422
+ index_arr : ndarray of boolean
1423
+
1424
+ Returns
1425
+ -------
1426
+ p_value : float
1427
+ Return the maximum p-value considering every nuisance parameter
1428
+ between 0 and 1
1429
+
1430
+ Notes
1431
+ -----
1432
+
1433
+ Both Barnard's test and Boschloo's test iterate over a nuisance parameter
1434
+ :math:`\pi \in [0, 1]` to find the maximum p-value. To search this
1435
+ maxima, this function return the negative log pvalue with respect to the
1436
+ nuisance parameter passed in params. This negative log p-value is then
1437
+ used in `shgo` to find the minimum negative pvalue which is our maximum
1438
+ pvalue.
1439
+
1440
+ Also, to compute the different combination used in the
1441
+ p-values' computation formula, this function uses `gammaln` which is
1442
+ more tolerant for large value than `scipy.special.comb`. `gammaln` gives
1443
+ a log combination. For the little precision loss, performances are
1444
+ improved a lot.
1445
+ """
1446
+ t1, t2 = x1_sum_x2.shape
1447
+ n = t1 + t2 - 2
1448
+ with np.errstate(divide="ignore", invalid="ignore"):
1449
+ log_nuisance = np.log(
1450
+ nuisance_param,
1451
+ out=np.zeros_like(nuisance_param),
1452
+ where=nuisance_param >= 0,
1453
+ )
1454
+ log_1_minus_nuisance = np.log(
1455
+ 1 - nuisance_param,
1456
+ out=np.zeros_like(nuisance_param),
1457
+ where=1 - nuisance_param >= 0,
1458
+ )
1459
+
1460
+ nuisance_power_x1_x2 = log_nuisance * x1_sum_x2
1461
+ nuisance_power_x1_x2[(x1_sum_x2 == 0)[:, :]] = 0
1462
+
1463
+ nuisance_power_n_minus_x1_x2 = log_1_minus_nuisance * (n - x1_sum_x2)
1464
+ nuisance_power_n_minus_x1_x2[(x1_sum_x2 == n)[:, :]] = 0
1465
+
1466
+ tmp_log_values_arr = (
1467
+ x1_sum_x2_log_comb
1468
+ + nuisance_power_x1_x2
1469
+ + nuisance_power_n_minus_x1_x2
1470
+ )
1471
+
1472
+ tmp_values_from_index = tmp_log_values_arr[index_arr]
1473
+
1474
+ # To avoid dividing by zero in log function and getting inf value,
1475
+ # values are centered according to the max
1476
+ max_value = tmp_values_from_index.max()
1477
+
1478
+ # To have better result's precision, the log pvalue is taken here.
1479
+ # Indeed, pvalue is included inside [0, 1] interval. Passing the
1480
+ # pvalue to log makes the interval a lot bigger ([-inf, 0]), and thus
1481
+ # help us to achieve better precision
1482
+ with np.errstate(divide="ignore", invalid="ignore"):
1483
+ log_probs = np.exp(tmp_values_from_index - max_value).sum()
1484
+ log_pvalue = max_value + np.log(
1485
+ log_probs,
1486
+ out=np.full_like(log_probs, -np.inf),
1487
+ where=log_probs > 0,
1488
+ )
1489
+
1490
+ # Since shgo find the minima, minus log pvalue is returned
1491
+ return -log_pvalue
1492
+
1493
+
1494
+ def _pval_cvm_2samp_exact(s, m, n):
1495
+ """
1496
+ Compute the exact p-value of the Cramer-von Mises two-sample test
1497
+ for a given value s of the test statistic.
1498
+ m and n are the sizes of the samples.
1499
+
1500
+ [1] Y. Xiao, A. Gordon, and A. Yakovlev, "A C++ Program for
1501
+ the Cramér-Von Mises Two-Sample Test", J. Stat. Soft.,
1502
+ vol. 17, no. 8, pp. 1-15, Dec. 2006.
1503
+ [2] T. W. Anderson "On the Distribution of the Two-Sample Cramer-von Mises
1504
+ Criterion," The Annals of Mathematical Statistics, Ann. Math. Statist.
1505
+ 33(3), 1148-1159, (September, 1962)
1506
+ """
1507
+
1508
+ # [1, p. 3]
1509
+ lcm = np.lcm(m, n)
1510
+ # [1, p. 4], below eq. 3
1511
+ a = lcm // m
1512
+ b = lcm // n
1513
+ # Combine Eq. 9 in [2] with Eq. 2 in [1] and solve for $\zeta$
1514
+ # Hint: `s` is $U$ in [2], and $T_2$ in [1] is $T$ in [2]
1515
+ mn = m * n
1516
+ zeta = lcm ** 2 * (m + n) * (6 * s - mn * (4 * mn - 1)) // (6 * mn ** 2)
1517
+
1518
+ # bound maximum value that may appear in `gs` (remember both rows!)
1519
+ zeta_bound = lcm**2 * (m + n) # bound elements in row 1
1520
+ combinations = comb(m + n, m) # sum of row 2
1521
+ max_gs = max(zeta_bound, combinations)
1522
+ dtype = np.min_scalar_type(max_gs)
1523
+
1524
+ # the frequency table of $g_{u, v}^+$ defined in [1, p. 6]
1525
+ gs = ([np.array([[0], [1]], dtype=dtype)]
1526
+ + [np.empty((2, 0), dtype=dtype) for _ in range(m)])
1527
+ for u in range(n + 1):
1528
+ next_gs = []
1529
+ tmp = np.empty((2, 0), dtype=dtype)
1530
+ for v, g in enumerate(gs):
1531
+ # Calculate g recursively with eq. 11 in [1]. Even though it
1532
+ # doesn't look like it, this also does 12/13 (all of Algorithm 1).
1533
+ vi, i0, i1 = np.intersect1d(tmp[0], g[0], return_indices=True)
1534
+ tmp = np.concatenate([
1535
+ np.stack([vi, tmp[1, i0] + g[1, i1]]),
1536
+ np.delete(tmp, i0, 1),
1537
+ np.delete(g, i1, 1)
1538
+ ], 1)
1539
+ res = (a * v - b * u) ** 2
1540
+ tmp[0] += res.astype(dtype)
1541
+ next_gs.append(tmp)
1542
+ gs = next_gs
1543
+ value, freq = gs[m]
1544
+ return np.float64(np.sum(freq[value >= zeta]) / combinations)
1545
+
1546
+
1547
+ @_axis_nan_policy_factory(CramerVonMisesResult, n_samples=2, too_small=1,
1548
+ result_to_tuple=_cvm_result_to_tuple)
1549
+ def cramervonmises_2samp(x, y, method='auto'):
1550
+ """Perform the two-sample Cramér-von Mises test for goodness of fit.
1551
+
1552
+ This is the two-sample version of the Cramér-von Mises test ([1]_):
1553
+ for two independent samples :math:`X_1, ..., X_n` and
1554
+ :math:`Y_1, ..., Y_m`, the null hypothesis is that the samples
1555
+ come from the same (unspecified) continuous distribution.
1556
+
1557
+ Parameters
1558
+ ----------
1559
+ x : array_like
1560
+ A 1-D array of observed values of the random variables :math:`X_i`.
1561
+ Must contain at least two observations.
1562
+ y : array_like
1563
+ A 1-D array of observed values of the random variables :math:`Y_i`.
1564
+ Must contain at least two observations.
1565
+ method : {'auto', 'asymptotic', 'exact'}, optional
1566
+ The method used to compute the p-value, see Notes for details.
1567
+ The default is 'auto'.
1568
+
1569
+ Returns
1570
+ -------
1571
+ res : object with attributes
1572
+ statistic : float
1573
+ Cramér-von Mises statistic.
1574
+ pvalue : float
1575
+ The p-value.
1576
+
1577
+ See Also
1578
+ --------
1579
+ cramervonmises, anderson_ksamp, epps_singleton_2samp, ks_2samp
1580
+
1581
+ Notes
1582
+ -----
1583
+ .. versionadded:: 1.7.0
1584
+
1585
+ The statistic is computed according to equation 9 in [2]_. The
1586
+ calculation of the p-value depends on the keyword `method`:
1587
+
1588
+ - ``asymptotic``: The p-value is approximated by using the limiting
1589
+ distribution of the test statistic.
1590
+ - ``exact``: The exact p-value is computed by enumerating all
1591
+ possible combinations of the test statistic, see [2]_.
1592
+
1593
+ If ``method='auto'``, the exact approach is used
1594
+ if both samples contain equal to or less than 20 observations,
1595
+ otherwise the asymptotic distribution is used.
1596
+
1597
+ If the underlying distribution is not continuous, the p-value is likely to
1598
+ be conservative (Section 6.2 in [3]_). When ranking the data to compute
1599
+ the test statistic, midranks are used if there are ties.
1600
+
1601
+ References
1602
+ ----------
1603
+ .. [1] https://en.wikipedia.org/wiki/Cramer-von_Mises_criterion
1604
+ .. [2] Anderson, T.W. (1962). On the distribution of the two-sample
1605
+ Cramer-von-Mises criterion. The Annals of Mathematical
1606
+ Statistics, pp. 1148-1159.
1607
+ .. [3] Conover, W.J., Practical Nonparametric Statistics, 1971.
1608
+
1609
+ Examples
1610
+ --------
1611
+
1612
+ Suppose we wish to test whether two samples generated by
1613
+ ``scipy.stats.norm.rvs`` have the same distribution. We choose a
1614
+ significance level of alpha=0.05.
1615
+
1616
+ >>> import numpy as np
1617
+ >>> from scipy import stats
1618
+ >>> rng = np.random.default_rng()
1619
+ >>> x = stats.norm.rvs(size=100, random_state=rng)
1620
+ >>> y = stats.norm.rvs(size=70, random_state=rng)
1621
+ >>> res = stats.cramervonmises_2samp(x, y)
1622
+ >>> res.statistic, res.pvalue
1623
+ (0.29376470588235293, 0.1412873014573014)
1624
+
1625
+ The p-value exceeds our chosen significance level, so we do not
1626
+ reject the null hypothesis that the observed samples are drawn from the
1627
+ same distribution.
1628
+
1629
+ For small sample sizes, one can compute the exact p-values:
1630
+
1631
+ >>> x = stats.norm.rvs(size=7, random_state=rng)
1632
+ >>> y = stats.t.rvs(df=2, size=6, random_state=rng)
1633
+ >>> res = stats.cramervonmises_2samp(x, y, method='exact')
1634
+ >>> res.statistic, res.pvalue
1635
+ (0.197802197802198, 0.31643356643356646)
1636
+
1637
+ The p-value based on the asymptotic distribution is a good approximation
1638
+ even though the sample size is small.
1639
+
1640
+ >>> res = stats.cramervonmises_2samp(x, y, method='asymptotic')
1641
+ >>> res.statistic, res.pvalue
1642
+ (0.197802197802198, 0.2966041181527128)
1643
+
1644
+ Independent of the method, one would not reject the null hypothesis at the
1645
+ chosen significance level in this example.
1646
+
1647
+ """
1648
+ xa = np.sort(np.asarray(x))
1649
+ ya = np.sort(np.asarray(y))
1650
+
1651
+ if xa.size <= 1 or ya.size <= 1:
1652
+ raise ValueError('x and y must contain at least two observations.')
1653
+ if method not in ['auto', 'exact', 'asymptotic']:
1654
+ raise ValueError('method must be either auto, exact or asymptotic.')
1655
+
1656
+ nx = len(xa)
1657
+ ny = len(ya)
1658
+
1659
+ if method == 'auto':
1660
+ if max(nx, ny) > 20:
1661
+ method = 'asymptotic'
1662
+ else:
1663
+ method = 'exact'
1664
+
1665
+ # get ranks of x and y in the pooled sample
1666
+ z = np.concatenate([xa, ya])
1667
+ # in case of ties, use midrank (see [1])
1668
+ r = scipy.stats.rankdata(z, method='average')
1669
+ rx = r[:nx]
1670
+ ry = r[nx:]
1671
+
1672
+ # compute U (eq. 10 in [2])
1673
+ u = nx * np.sum((rx - np.arange(1, nx+1))**2)
1674
+ u += ny * np.sum((ry - np.arange(1, ny+1))**2)
1675
+
1676
+ # compute T (eq. 9 in [2])
1677
+ k, N = nx*ny, nx + ny
1678
+ t = u / (k*N) - (4*k - 1)/(6*N)
1679
+
1680
+ if method == 'exact':
1681
+ p = _pval_cvm_2samp_exact(u, nx, ny)
1682
+ else:
1683
+ # compute expected value and variance of T (eq. 11 and 14 in [2])
1684
+ et = (1 + 1/N)/6
1685
+ vt = (N+1) * (4*k*N - 3*(nx**2 + ny**2) - 2*k)
1686
+ vt = vt / (45 * N**2 * 4 * k)
1687
+
1688
+ # computed the normalized statistic (eq. 15 in [2])
1689
+ tn = 1/6 + (t - et) / np.sqrt(45 * vt)
1690
+
1691
+ # approximate distribution of tn with limiting distribution
1692
+ # of the one-sample test statistic
1693
+ # if tn < 0.003, the _cdf_cvm_inf(tn) < 1.28*1e-18, return 1.0 directly
1694
+ if tn < 0.003:
1695
+ p = 1.0
1696
+ else:
1697
+ p = max(0, 1. - _cdf_cvm_inf(tn))
1698
+
1699
+ return CramerVonMisesResult(statistic=t, pvalue=p)
1700
+
1701
+
1702
+ class TukeyHSDResult:
1703
+ """Result of `scipy.stats.tukey_hsd`.
1704
+
1705
+ Attributes
1706
+ ----------
1707
+ statistic : float ndarray
1708
+ The computed statistic of the test for each comparison. The element
1709
+ at index ``(i, j)`` is the statistic for the comparison between groups
1710
+ ``i`` and ``j``.
1711
+ pvalue : float ndarray
1712
+ The associated p-value from the studentized range distribution. The
1713
+ element at index ``(i, j)`` is the p-value for the comparison
1714
+ between groups ``i`` and ``j``.
1715
+
1716
+ Notes
1717
+ -----
1718
+ The string representation of this object displays the most recently
1719
+ calculated confidence interval, and if none have been previously
1720
+ calculated, it will evaluate ``confidence_interval()``.
1721
+
1722
+ References
1723
+ ----------
1724
+ .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's
1725
+ Method."
1726
+ https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
1727
+ 28 November 2020.
1728
+ .. [2] P. A. Games and J. F. Howell, "Pairwise Multiple Comparison Procedures
1729
+ with Unequal N's and/or Variances: A Monte Carlo Study," Journal of
1730
+ Educational Statistics, vol. 1, no. 2, pp. 113-125, Jun. 1976,
1731
+ doi: https://doi.org/10.3102/10769986001002113.
1732
+ """
1733
+
1734
+ def __init__(self, statistic, pvalue, _ntreatments, _df, _stand_err):
1735
+ self.statistic = statistic
1736
+ self.pvalue = pvalue
1737
+ self._ntreatments = _ntreatments
1738
+ self._df = _df
1739
+ self._stand_err = _stand_err
1740
+ self._ci = None
1741
+ self._ci_cl = None
1742
+
1743
+ def __str__(self):
1744
+ # Note: `__str__` prints the confidence intervals from the most
1745
+ # recent call to `confidence_interval`. If it has not been called,
1746
+ # it will be called with the default CL of .95.
1747
+ if self._ci is None:
1748
+ self.confidence_interval(confidence_level=.95)
1749
+ s = ("Pairwise Group Comparisons"
1750
+ f" ({self._ci_cl*100:.1f}% Confidence Interval)\n")
1751
+ s += "Comparison Statistic p-value Lower CI Upper CI\n"
1752
+ for i in range(self.pvalue.shape[0]):
1753
+ for j in range(self.pvalue.shape[0]):
1754
+ if i != j:
1755
+ s += (f" ({i} - {j}) {self.statistic[i, j]:>10.3f}"
1756
+ f"{self.pvalue[i, j]:>10.3f}"
1757
+ f"{self._ci.low[i, j]:>10.3f}"
1758
+ f"{self._ci.high[i, j]:>10.3f}\n")
1759
+ return s
1760
+
1761
+ def confidence_interval(self, confidence_level=.95):
1762
+ """Compute the confidence interval for the specified confidence level.
1763
+
1764
+ Parameters
1765
+ ----------
1766
+ confidence_level : float, optional
1767
+ Confidence level for the computed confidence interval
1768
+ of the estimated proportion. Default is .95.
1769
+
1770
+ Returns
1771
+ -------
1772
+ ci : ``ConfidenceInterval`` object
1773
+ The object has attributes ``low`` and ``high`` that hold the
1774
+ lower and upper bounds of the confidence intervals for each
1775
+ comparison. The high and low values are accessible for each
1776
+ comparison at index ``(i, j)`` between groups ``i`` and ``j``.
1777
+
1778
+ References
1779
+ ----------
1780
+ .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1.
1781
+ Tukey's Method."
1782
+ https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
1783
+ 28 November 2020.
1784
+ .. [2] P. A. Games and J. F. Howell, "Pairwise Multiple Comparison Procedures
1785
+ with Unequal N's and/or Variances: A Monte Carlo Study," Journal of
1786
+ Educational Statistics, vol. 1, no. 2, pp. 113-125, Jun. 1976,
1787
+ doi: https://doi.org/10.3102/10769986001002113.
1788
+
1789
+ Examples
1790
+ --------
1791
+ >>> from scipy.stats import tukey_hsd
1792
+ >>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
1793
+ >>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
1794
+ >>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
1795
+ >>> result = tukey_hsd(group0, group1, group2)
1796
+ >>> ci = result.confidence_interval()
1797
+ >>> ci.low
1798
+ array([[-3.649159, -8.249159, -3.909159],
1799
+ [ 0.950841, -3.649159, 0.690841],
1800
+ [-3.389159, -7.989159, -3.649159]])
1801
+ >>> ci.high
1802
+ array([[ 3.649159, -0.950841, 3.389159],
1803
+ [ 8.249159, 3.649159, 7.989159],
1804
+ [ 3.909159, -0.690841, 3.649159]])
1805
+ """
1806
+ # check to see if the supplied confidence level matches that of the
1807
+ # previously computed CI.
1808
+ if (self._ci is not None and self._ci_cl is not None and
1809
+ confidence_level == self._ci_cl):
1810
+ return self._ci
1811
+
1812
+ if not 0 < confidence_level < 1:
1813
+ raise ValueError("Confidence level must be between 0 and 1.")
1814
+ # determine the critical value of the studentized range using the
1815
+ # appropriate confidence level, number of treatments, and degrees
1816
+ # of freedom. See [1] "Confidence limits for Tukey's method" / [2] p.117
1817
+ # "H0 was rejected if...". Note that in the cases of unequal sample sizes,
1818
+ # there will be a criterion for each group comparison.
1819
+ params = (confidence_level, self._ntreatments, self._df)
1820
+ srd = distributions.studentized_range.ppf(*params)
1821
+ # also called maximum critical value, the confidence_radius is the
1822
+ # studentized range critical value * the square root of mean square
1823
+ # error over the sample size.
1824
+ confidence_radius = srd * self._stand_err
1825
+ # the confidence levels are determined by the
1826
+ # `mean_differences` +- `confidence_radius`
1827
+ upper_conf = self.statistic + confidence_radius
1828
+ lower_conf = self.statistic - confidence_radius
1829
+ self._ci = ConfidenceInterval(low=lower_conf, high=upper_conf)
1830
+ self._ci_cl = confidence_level
1831
+ return self._ci
1832
+
1833
+
1834
+ def _tukey_hsd_iv(args, equal_var):
1835
+ if (len(args)) < 2:
1836
+ raise ValueError("There must be more than 1 treatment.")
1837
+ if not isinstance(equal_var, bool):
1838
+ raise TypeError("Expected a boolean value for 'equal_var'")
1839
+ args = [np.asarray(arg) for arg in args]
1840
+ for arg in args:
1841
+ if arg.ndim != 1:
1842
+ raise ValueError("Input samples must be one-dimensional.")
1843
+ if arg.size <= 1:
1844
+ raise ValueError("Input sample size must be greater than one.")
1845
+ if np.isinf(arg).any():
1846
+ raise ValueError("Input samples must be finite.")
1847
+ return args
1848
+
1849
+
1850
+ def tukey_hsd(*args, equal_var=True):
1851
+ """Perform Tukey's HSD test for equality of means over multiple treatments.
1852
+
1853
+ Tukey's honestly significant difference (HSD) test performs pairwise
1854
+ comparison of means for a set of samples. Whereas ANOVA (e.g. `f_oneway`)
1855
+ assesses whether the true means underlying each sample are identical,
1856
+ Tukey's HSD is a post hoc test used to compare the mean of each sample
1857
+ to the mean of each other sample.
1858
+
1859
+ The null hypothesis is that the distributions underlying the samples all
1860
+ have the same mean. The test statistic, which is computed for every
1861
+ possible pairing of samples, is simply the difference between the sample
1862
+ means. For each pair, the p-value is the probability under the null
1863
+ hypothesis (and other assumptions; see notes) of observing such an extreme
1864
+ value of the statistic, considering that many pairwise comparisons are
1865
+ being performed. Confidence intervals for the difference between each pair
1866
+ of means are also available.
1867
+
1868
+ Parameters
1869
+ ----------
1870
+ sample1, sample2, ... : array_like
1871
+ The sample measurements for each group. There must be at least
1872
+ two arguments.
1873
+ equal_var: bool, optional
1874
+ If True (default) and equal sample size, perform Tukey-HSD test [6].
1875
+ If True and unequal sample size, perform Tukey-Kramer test [4]_.
1876
+ If False, perform Games-Howell test [7]_, which does not assume equal variances.
1877
+
1878
+ Returns
1879
+ -------
1880
+ result : `~scipy.stats._result_classes.TukeyHSDResult` instance
1881
+ The return value is an object with the following attributes:
1882
+
1883
+ statistic : float ndarray
1884
+ The computed statistic of the test for each comparison. The element
1885
+ at index ``(i, j)`` is the statistic for the comparison between
1886
+ groups ``i`` and ``j``.
1887
+ pvalue : float ndarray
1888
+ The computed p-value of the test for each comparison. The element
1889
+ at index ``(i, j)`` is the p-value for the comparison between
1890
+ groups ``i`` and ``j``.
1891
+
1892
+ The object has the following methods:
1893
+
1894
+ confidence_interval(confidence_level=0.95):
1895
+ Compute the confidence interval for the specified confidence level.
1896
+
1897
+ See Also
1898
+ --------
1899
+ dunnett : performs comparison of means against a control group.
1900
+
1901
+ Notes
1902
+ -----
1903
+ The use of this test relies on several assumptions.
1904
+
1905
+ 1. The observations are independent within and among groups.
1906
+ 2. The observations within each group are normally distributed.
1907
+ 3. The distributions from which the samples are drawn have the same finite
1908
+ variance.
1909
+
1910
+ The original formulation of the test was for samples of equal size drawn from
1911
+ populations assumed to have equal variances [6]_. In case of unequal sample sizes,
1912
+ the test uses the Tukey-Kramer method [4]_. When equal variances are not assumed
1913
+ (``equal_var=False``), the test uses the Games-Howell method [7]_.
1914
+
1915
+ References
1916
+ ----------
1917
+ .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's
1918
+ Method."
1919
+ https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
1920
+ 28 November 2020.
1921
+ .. [2] Abdi, Herve & Williams, Lynne. (2021). "Tukey's Honestly Significant
1922
+ Difference (HSD) Test."
1923
+ https://personal.utdallas.edu/~herve/abdi-HSD2010-pretty.pdf
1924
+ .. [3] "One-Way ANOVA Using SAS PROC ANOVA & PROC GLM." SAS
1925
+ Tutorials, 2007, www.stattutorials.com/SAS/TUTORIAL-PROC-GLM.htm.
1926
+ .. [4] Kramer, Clyde Young. "Extension of Multiple Range Tests to Group
1927
+ Means with Unequal Numbers of Replications." Biometrics, vol. 12,
1928
+ no. 3, 1956, pp. 307-310. JSTOR, www.jstor.org/stable/3001469.
1929
+ Accessed 25 May 2021.
1930
+ .. [5] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.3.3.
1931
+ The ANOVA table and tests of hypotheses about means"
1932
+ https://www.itl.nist.gov/div898/handbook/prc/section4/prc433.htm,
1933
+ 2 June 2021.
1934
+ .. [6] Tukey, John W. "Comparing Individual Means in the Analysis of
1935
+ Variance." Biometrics, vol. 5, no. 2, 1949, pp. 99-114. JSTOR,
1936
+ www.jstor.org/stable/3001913. Accessed 14 June 2021.
1937
+ .. [7] P. A. Games and J. F. Howell, "Pairwise Multiple Comparison Procedures
1938
+ with Unequal N's and/or Variances: A Monte Carlo Study," Journal of
1939
+ Educational Statistics, vol. 1, no. 2, pp. 113-125, Jun. 1976,
1940
+ doi: https://doi.org/10.3102/10769986001002113.
1941
+
1942
+
1943
+ Examples
1944
+ --------
1945
+ Here are some data comparing the time to relief of three brands of
1946
+ headache medicine, reported in minutes. Data adapted from [3]_.
1947
+
1948
+ >>> import numpy as np
1949
+ >>> from scipy.stats import tukey_hsd
1950
+ >>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
1951
+ >>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
1952
+ >>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
1953
+
1954
+ We would like to see if the means between any of the groups are
1955
+ significantly different. First, visually examine a box and whisker plot.
1956
+
1957
+ >>> import matplotlib.pyplot as plt
1958
+ >>> fig, ax = plt.subplots(1, 1)
1959
+ >>> ax.boxplot([group0, group1, group2])
1960
+ >>> ax.set_xticklabels(["group0", "group1", "group2"]) # doctest: +SKIP
1961
+ >>> ax.set_ylabel("mean") # doctest: +SKIP
1962
+ >>> plt.show()
1963
+
1964
+ From the box and whisker plot, we can see overlap in the interquartile
1965
+ ranges group 1 to group 2 and group 3, but we can apply the ``tukey_hsd``
1966
+ test to determine if the difference between means is significant. We
1967
+ set a significance level of .05 to reject the null hypothesis.
1968
+
1969
+ >>> res = tukey_hsd(group0, group1, group2)
1970
+ >>> print(res)
1971
+ Pairwise Group Comparisons (95.0% Confidence Interval)
1972
+ Comparison Statistic p-value Lower CI Upper CI
1973
+ (0 - 1) -4.600 0.014 -8.249 -0.951
1974
+ (0 - 2) -0.260 0.980 -3.909 3.389
1975
+ (1 - 0) 4.600 0.014 0.951 8.249
1976
+ (1 - 2) 4.340 0.020 0.691 7.989
1977
+ (2 - 0) 0.260 0.980 -3.389 3.909
1978
+ (2 - 1) -4.340 0.020 -7.989 -0.691
1979
+
1980
+ The null hypothesis is that each group has the same mean. The p-value for
1981
+ comparisons between ``group0`` and ``group1`` as well as ``group1`` and
1982
+ ``group2`` do not exceed .05, so we reject the null hypothesis that they
1983
+ have the same means. The p-value of the comparison between ``group0``
1984
+ and ``group2`` exceeds .05, so we accept the null hypothesis that there
1985
+ is not a significant difference between their means.
1986
+
1987
+ We can also compute the confidence interval associated with our chosen
1988
+ confidence level.
1989
+
1990
+ >>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
1991
+ >>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
1992
+ >>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
1993
+ >>> result = tukey_hsd(group0, group1, group2)
1994
+ >>> conf = res.confidence_interval(confidence_level=.99)
1995
+ >>> for ((i, j), l) in np.ndenumerate(conf.low):
1996
+ ... # filter out self comparisons
1997
+ ... if i != j:
1998
+ ... h = conf.high[i,j]
1999
+ ... print(f"({i} - {j}) {l:>6.3f} {h:>6.3f}")
2000
+ (0 - 1) -9.480 0.280
2001
+ (0 - 2) -5.140 4.620
2002
+ (1 - 0) -0.280 9.480
2003
+ (1 - 2) -0.540 9.220
2004
+ (2 - 0) -4.620 5.140
2005
+ (2 - 1) -9.220 0.540
2006
+ """
2007
+ args = _tukey_hsd_iv(args, equal_var)
2008
+ ntreatments = len(args)
2009
+ means = np.asarray([np.mean(arg) for arg in args])
2010
+ nsamples_treatments = np.asarray([a.size for a in args])
2011
+ nobs = np.sum(nsamples_treatments)
2012
+ vars_ = np.asarray([np.var(arg, ddof=1) for arg in args])
2013
+
2014
+ if equal_var:
2015
+ # determine mean square error [5]. Note that this is sometimes called
2016
+ # mean square error within.
2017
+ mse = (np.sum(vars_ * (nsamples_treatments - 1)) / (nobs - ntreatments))
2018
+
2019
+ # The calculation of the standard error differs when treatments differ in
2020
+ # size. See ("Unequal sample sizes")[1].
2021
+ if np.unique(nsamples_treatments).size == 1:
2022
+ # all input groups are the same length, so only one value needs to be
2023
+ # calculated [1].
2024
+ normalize = 2 / nsamples_treatments[0]
2025
+ else:
2026
+ # to compare groups of differing sizes, we must compute a variance
2027
+ # value for each individual comparison. Use broadcasting to get the
2028
+ # resulting matrix. [3], verified against [4] (page 308).
2029
+ normalize = 1 / nsamples_treatments + 1 / nsamples_treatments[None].T
2030
+
2031
+ # the standard error is used in the computation of the tukey criterion and
2032
+ # finding the p-values.
2033
+ stand_err = np.sqrt(normalize * mse / 2)
2034
+ df = nobs - ntreatments
2035
+ else:
2036
+ # `stand_err` is the denominator of the Behrens-Fisher statistic ($v$)
2037
+ # with a factor of $\sqrt{2}$. Compare [7] p.116 "t-solution rejects H0 if...",
2038
+ # [7] p. 117 "H0 was rejected", and definition of `t_stat` below.
2039
+ sj2_nj = vars_ / nsamples_treatments
2040
+ si2_ni = sj2_nj[:, np.newaxis]
2041
+ stand_err = np.sqrt(si2_ni + sj2_nj) / 2**0.5
2042
+
2043
+ # `df` is the Welch degree of freedom $\nu$.
2044
+ # See [7] p. 116 "and the degrees of freedom, $\nu$, are given by...".
2045
+ njm1 = nsamples_treatments - 1
2046
+ nim1 = njm1[:, np.newaxis]
2047
+ df = (si2_ni + sj2_nj)**2 / (si2_ni**2 / nim1 + sj2_nj**2 / njm1)
2048
+
2049
+ # the mean difference is the test statistic.
2050
+ mean_differences = means[None].T - means
2051
+
2052
+ # Calculate the t-statistic to use within the survival function of the
2053
+ # studentized range to get the p-value.
2054
+ t_stat = np.abs(mean_differences) / stand_err
2055
+
2056
+ params = t_stat, ntreatments, df
2057
+ pvalues = distributions.studentized_range.sf(*params)
2058
+
2059
+ return TukeyHSDResult(mean_differences, pvalues, ntreatments,
2060
+ df, stand_err)