scipy 1.16.2__cp314-cp314t-win_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1530) hide show
  1. scipy/__config__.py +161 -0
  2. scipy/__init__.py +150 -0
  3. scipy/_cyutility.cp314t-win_arm64.lib +0 -0
  4. scipy/_cyutility.cp314t-win_arm64.pyd +0 -0
  5. scipy/_distributor_init.py +18 -0
  6. scipy/_lib/__init__.py +14 -0
  7. scipy/_lib/_array_api.py +931 -0
  8. scipy/_lib/_array_api_compat_vendor.py +9 -0
  9. scipy/_lib/_array_api_no_0d.py +103 -0
  10. scipy/_lib/_bunch.py +229 -0
  11. scipy/_lib/_ccallback.py +251 -0
  12. scipy/_lib/_ccallback_c.cp314t-win_arm64.lib +0 -0
  13. scipy/_lib/_ccallback_c.cp314t-win_arm64.pyd +0 -0
  14. scipy/_lib/_disjoint_set.py +254 -0
  15. scipy/_lib/_docscrape.py +761 -0
  16. scipy/_lib/_elementwise_iterative_method.py +346 -0
  17. scipy/_lib/_fpumode.cp314t-win_arm64.lib +0 -0
  18. scipy/_lib/_fpumode.cp314t-win_arm64.pyd +0 -0
  19. scipy/_lib/_gcutils.py +105 -0
  20. scipy/_lib/_pep440.py +487 -0
  21. scipy/_lib/_sparse.py +41 -0
  22. scipy/_lib/_test_ccallback.cp314t-win_arm64.lib +0 -0
  23. scipy/_lib/_test_ccallback.cp314t-win_arm64.pyd +0 -0
  24. scipy/_lib/_test_deprecation_call.cp314t-win_arm64.lib +0 -0
  25. scipy/_lib/_test_deprecation_call.cp314t-win_arm64.pyd +0 -0
  26. scipy/_lib/_test_deprecation_def.cp314t-win_arm64.lib +0 -0
  27. scipy/_lib/_test_deprecation_def.cp314t-win_arm64.pyd +0 -0
  28. scipy/_lib/_testutils.py +373 -0
  29. scipy/_lib/_threadsafety.py +58 -0
  30. scipy/_lib/_tmpdirs.py +86 -0
  31. scipy/_lib/_uarray/LICENSE +29 -0
  32. scipy/_lib/_uarray/__init__.py +116 -0
  33. scipy/_lib/_uarray/_backend.py +707 -0
  34. scipy/_lib/_uarray/_uarray.cp314t-win_arm64.lib +0 -0
  35. scipy/_lib/_uarray/_uarray.cp314t-win_arm64.pyd +0 -0
  36. scipy/_lib/_util.py +1283 -0
  37. scipy/_lib/array_api_compat/__init__.py +22 -0
  38. scipy/_lib/array_api_compat/_internal.py +59 -0
  39. scipy/_lib/array_api_compat/common/__init__.py +1 -0
  40. scipy/_lib/array_api_compat/common/_aliases.py +727 -0
  41. scipy/_lib/array_api_compat/common/_fft.py +213 -0
  42. scipy/_lib/array_api_compat/common/_helpers.py +1058 -0
  43. scipy/_lib/array_api_compat/common/_linalg.py +232 -0
  44. scipy/_lib/array_api_compat/common/_typing.py +192 -0
  45. scipy/_lib/array_api_compat/cupy/__init__.py +13 -0
  46. scipy/_lib/array_api_compat/cupy/_aliases.py +156 -0
  47. scipy/_lib/array_api_compat/cupy/_info.py +336 -0
  48. scipy/_lib/array_api_compat/cupy/_typing.py +31 -0
  49. scipy/_lib/array_api_compat/cupy/fft.py +36 -0
  50. scipy/_lib/array_api_compat/cupy/linalg.py +49 -0
  51. scipy/_lib/array_api_compat/dask/__init__.py +0 -0
  52. scipy/_lib/array_api_compat/dask/array/__init__.py +12 -0
  53. scipy/_lib/array_api_compat/dask/array/_aliases.py +376 -0
  54. scipy/_lib/array_api_compat/dask/array/_info.py +416 -0
  55. scipy/_lib/array_api_compat/dask/array/fft.py +21 -0
  56. scipy/_lib/array_api_compat/dask/array/linalg.py +72 -0
  57. scipy/_lib/array_api_compat/numpy/__init__.py +28 -0
  58. scipy/_lib/array_api_compat/numpy/_aliases.py +190 -0
  59. scipy/_lib/array_api_compat/numpy/_info.py +366 -0
  60. scipy/_lib/array_api_compat/numpy/_typing.py +30 -0
  61. scipy/_lib/array_api_compat/numpy/fft.py +35 -0
  62. scipy/_lib/array_api_compat/numpy/linalg.py +143 -0
  63. scipy/_lib/array_api_compat/torch/__init__.py +22 -0
  64. scipy/_lib/array_api_compat/torch/_aliases.py +855 -0
  65. scipy/_lib/array_api_compat/torch/_info.py +369 -0
  66. scipy/_lib/array_api_compat/torch/_typing.py +3 -0
  67. scipy/_lib/array_api_compat/torch/fft.py +85 -0
  68. scipy/_lib/array_api_compat/torch/linalg.py +121 -0
  69. scipy/_lib/array_api_extra/__init__.py +38 -0
  70. scipy/_lib/array_api_extra/_delegation.py +171 -0
  71. scipy/_lib/array_api_extra/_lib/__init__.py +1 -0
  72. scipy/_lib/array_api_extra/_lib/_at.py +463 -0
  73. scipy/_lib/array_api_extra/_lib/_backends.py +46 -0
  74. scipy/_lib/array_api_extra/_lib/_funcs.py +937 -0
  75. scipy/_lib/array_api_extra/_lib/_lazy.py +357 -0
  76. scipy/_lib/array_api_extra/_lib/_testing.py +278 -0
  77. scipy/_lib/array_api_extra/_lib/_utils/__init__.py +1 -0
  78. scipy/_lib/array_api_extra/_lib/_utils/_compat.py +74 -0
  79. scipy/_lib/array_api_extra/_lib/_utils/_compat.pyi +45 -0
  80. scipy/_lib/array_api_extra/_lib/_utils/_helpers.py +559 -0
  81. scipy/_lib/array_api_extra/_lib/_utils/_typing.py +10 -0
  82. scipy/_lib/array_api_extra/_lib/_utils/_typing.pyi +105 -0
  83. scipy/_lib/array_api_extra/testing.py +359 -0
  84. scipy/_lib/cobyqa/__init__.py +20 -0
  85. scipy/_lib/cobyqa/framework.py +1240 -0
  86. scipy/_lib/cobyqa/main.py +1506 -0
  87. scipy/_lib/cobyqa/models.py +1529 -0
  88. scipy/_lib/cobyqa/problem.py +1296 -0
  89. scipy/_lib/cobyqa/settings.py +132 -0
  90. scipy/_lib/cobyqa/subsolvers/__init__.py +14 -0
  91. scipy/_lib/cobyqa/subsolvers/geometry.py +387 -0
  92. scipy/_lib/cobyqa/subsolvers/optim.py +1203 -0
  93. scipy/_lib/cobyqa/utils/__init__.py +18 -0
  94. scipy/_lib/cobyqa/utils/exceptions.py +22 -0
  95. scipy/_lib/cobyqa/utils/math.py +77 -0
  96. scipy/_lib/cobyqa/utils/versions.py +67 -0
  97. scipy/_lib/decorator.py +399 -0
  98. scipy/_lib/deprecation.py +274 -0
  99. scipy/_lib/doccer.py +366 -0
  100. scipy/_lib/messagestream.cp314t-win_arm64.lib +0 -0
  101. scipy/_lib/messagestream.cp314t-win_arm64.pyd +0 -0
  102. scipy/_lib/pyprima/__init__.py +212 -0
  103. scipy/_lib/pyprima/cobyla/__init__.py +0 -0
  104. scipy/_lib/pyprima/cobyla/cobyla.py +559 -0
  105. scipy/_lib/pyprima/cobyla/cobylb.py +714 -0
  106. scipy/_lib/pyprima/cobyla/geometry.py +226 -0
  107. scipy/_lib/pyprima/cobyla/initialize.py +215 -0
  108. scipy/_lib/pyprima/cobyla/trustregion.py +492 -0
  109. scipy/_lib/pyprima/cobyla/update.py +289 -0
  110. scipy/_lib/pyprima/common/__init__.py +0 -0
  111. scipy/_lib/pyprima/common/_bounds.py +34 -0
  112. scipy/_lib/pyprima/common/_linear_constraints.py +46 -0
  113. scipy/_lib/pyprima/common/_nonlinear_constraints.py +54 -0
  114. scipy/_lib/pyprima/common/_project.py +173 -0
  115. scipy/_lib/pyprima/common/checkbreak.py +93 -0
  116. scipy/_lib/pyprima/common/consts.py +47 -0
  117. scipy/_lib/pyprima/common/evaluate.py +99 -0
  118. scipy/_lib/pyprima/common/history.py +38 -0
  119. scipy/_lib/pyprima/common/infos.py +30 -0
  120. scipy/_lib/pyprima/common/linalg.py +435 -0
  121. scipy/_lib/pyprima/common/message.py +290 -0
  122. scipy/_lib/pyprima/common/powalg.py +131 -0
  123. scipy/_lib/pyprima/common/preproc.py +277 -0
  124. scipy/_lib/pyprima/common/present.py +5 -0
  125. scipy/_lib/pyprima/common/ratio.py +54 -0
  126. scipy/_lib/pyprima/common/redrho.py +47 -0
  127. scipy/_lib/pyprima/common/selectx.py +296 -0
  128. scipy/_lib/tests/__init__.py +0 -0
  129. scipy/_lib/tests/test__gcutils.py +110 -0
  130. scipy/_lib/tests/test__pep440.py +67 -0
  131. scipy/_lib/tests/test__testutils.py +32 -0
  132. scipy/_lib/tests/test__threadsafety.py +51 -0
  133. scipy/_lib/tests/test__util.py +641 -0
  134. scipy/_lib/tests/test_array_api.py +322 -0
  135. scipy/_lib/tests/test_bunch.py +169 -0
  136. scipy/_lib/tests/test_ccallback.py +196 -0
  137. scipy/_lib/tests/test_config.py +45 -0
  138. scipy/_lib/tests/test_deprecation.py +10 -0
  139. scipy/_lib/tests/test_doccer.py +143 -0
  140. scipy/_lib/tests/test_import_cycles.py +18 -0
  141. scipy/_lib/tests/test_public_api.py +482 -0
  142. scipy/_lib/tests/test_scipy_version.py +28 -0
  143. scipy/_lib/tests/test_tmpdirs.py +48 -0
  144. scipy/_lib/tests/test_warnings.py +137 -0
  145. scipy/_lib/uarray.py +31 -0
  146. scipy/cluster/__init__.py +31 -0
  147. scipy/cluster/_hierarchy.cp314t-win_arm64.lib +0 -0
  148. scipy/cluster/_hierarchy.cp314t-win_arm64.pyd +0 -0
  149. scipy/cluster/_optimal_leaf_ordering.cp314t-win_arm64.lib +0 -0
  150. scipy/cluster/_optimal_leaf_ordering.cp314t-win_arm64.pyd +0 -0
  151. scipy/cluster/_vq.cp314t-win_arm64.lib +0 -0
  152. scipy/cluster/_vq.cp314t-win_arm64.pyd +0 -0
  153. scipy/cluster/hierarchy.py +4348 -0
  154. scipy/cluster/tests/__init__.py +0 -0
  155. scipy/cluster/tests/hierarchy_test_data.py +145 -0
  156. scipy/cluster/tests/test_disjoint_set.py +202 -0
  157. scipy/cluster/tests/test_hierarchy.py +1238 -0
  158. scipy/cluster/tests/test_vq.py +434 -0
  159. scipy/cluster/vq.py +832 -0
  160. scipy/conftest.py +683 -0
  161. scipy/constants/__init__.py +358 -0
  162. scipy/constants/_codata.py +2266 -0
  163. scipy/constants/_constants.py +369 -0
  164. scipy/constants/codata.py +21 -0
  165. scipy/constants/constants.py +53 -0
  166. scipy/constants/tests/__init__.py +0 -0
  167. scipy/constants/tests/test_codata.py +78 -0
  168. scipy/constants/tests/test_constants.py +83 -0
  169. scipy/datasets/__init__.py +90 -0
  170. scipy/datasets/_download_all.py +71 -0
  171. scipy/datasets/_fetchers.py +225 -0
  172. scipy/datasets/_registry.py +26 -0
  173. scipy/datasets/_utils.py +81 -0
  174. scipy/datasets/tests/__init__.py +0 -0
  175. scipy/datasets/tests/test_data.py +128 -0
  176. scipy/differentiate/__init__.py +27 -0
  177. scipy/differentiate/_differentiate.py +1129 -0
  178. scipy/differentiate/tests/__init__.py +0 -0
  179. scipy/differentiate/tests/test_differentiate.py +694 -0
  180. scipy/fft/__init__.py +114 -0
  181. scipy/fft/_backend.py +196 -0
  182. scipy/fft/_basic.py +1650 -0
  183. scipy/fft/_basic_backend.py +197 -0
  184. scipy/fft/_debug_backends.py +22 -0
  185. scipy/fft/_fftlog.py +223 -0
  186. scipy/fft/_fftlog_backend.py +200 -0
  187. scipy/fft/_helper.py +348 -0
  188. scipy/fft/_pocketfft/LICENSE.md +25 -0
  189. scipy/fft/_pocketfft/__init__.py +9 -0
  190. scipy/fft/_pocketfft/basic.py +251 -0
  191. scipy/fft/_pocketfft/helper.py +249 -0
  192. scipy/fft/_pocketfft/pypocketfft.cp314t-win_arm64.lib +0 -0
  193. scipy/fft/_pocketfft/pypocketfft.cp314t-win_arm64.pyd +0 -0
  194. scipy/fft/_pocketfft/realtransforms.py +109 -0
  195. scipy/fft/_pocketfft/tests/__init__.py +0 -0
  196. scipy/fft/_pocketfft/tests/test_basic.py +1011 -0
  197. scipy/fft/_pocketfft/tests/test_real_transforms.py +505 -0
  198. scipy/fft/_realtransforms.py +706 -0
  199. scipy/fft/_realtransforms_backend.py +63 -0
  200. scipy/fft/tests/__init__.py +0 -0
  201. scipy/fft/tests/mock_backend.py +96 -0
  202. scipy/fft/tests/test_backend.py +98 -0
  203. scipy/fft/tests/test_basic.py +504 -0
  204. scipy/fft/tests/test_fftlog.py +215 -0
  205. scipy/fft/tests/test_helper.py +558 -0
  206. scipy/fft/tests/test_multithreading.py +84 -0
  207. scipy/fft/tests/test_real_transforms.py +247 -0
  208. scipy/fftpack/__init__.py +103 -0
  209. scipy/fftpack/_basic.py +428 -0
  210. scipy/fftpack/_helper.py +115 -0
  211. scipy/fftpack/_pseudo_diffs.py +554 -0
  212. scipy/fftpack/_realtransforms.py +598 -0
  213. scipy/fftpack/basic.py +20 -0
  214. scipy/fftpack/convolve.cp314t-win_arm64.lib +0 -0
  215. scipy/fftpack/convolve.cp314t-win_arm64.pyd +0 -0
  216. scipy/fftpack/helper.py +19 -0
  217. scipy/fftpack/pseudo_diffs.py +22 -0
  218. scipy/fftpack/realtransforms.py +19 -0
  219. scipy/fftpack/tests/__init__.py +0 -0
  220. scipy/fftpack/tests/fftw_double_ref.npz +0 -0
  221. scipy/fftpack/tests/fftw_longdouble_ref.npz +0 -0
  222. scipy/fftpack/tests/fftw_single_ref.npz +0 -0
  223. scipy/fftpack/tests/test.npz +0 -0
  224. scipy/fftpack/tests/test_basic.py +877 -0
  225. scipy/fftpack/tests/test_helper.py +54 -0
  226. scipy/fftpack/tests/test_import.py +33 -0
  227. scipy/fftpack/tests/test_pseudo_diffs.py +388 -0
  228. scipy/fftpack/tests/test_real_transforms.py +836 -0
  229. scipy/integrate/__init__.py +122 -0
  230. scipy/integrate/_bvp.py +1160 -0
  231. scipy/integrate/_cubature.py +729 -0
  232. scipy/integrate/_dop.cp314t-win_arm64.lib +0 -0
  233. scipy/integrate/_dop.cp314t-win_arm64.pyd +0 -0
  234. scipy/integrate/_ivp/__init__.py +8 -0
  235. scipy/integrate/_ivp/base.py +290 -0
  236. scipy/integrate/_ivp/bdf.py +478 -0
  237. scipy/integrate/_ivp/common.py +451 -0
  238. scipy/integrate/_ivp/dop853_coefficients.py +193 -0
  239. scipy/integrate/_ivp/ivp.py +755 -0
  240. scipy/integrate/_ivp/lsoda.py +224 -0
  241. scipy/integrate/_ivp/radau.py +572 -0
  242. scipy/integrate/_ivp/rk.py +601 -0
  243. scipy/integrate/_ivp/tests/__init__.py +0 -0
  244. scipy/integrate/_ivp/tests/test_ivp.py +1287 -0
  245. scipy/integrate/_ivp/tests/test_rk.py +37 -0
  246. scipy/integrate/_lebedev.py +5450 -0
  247. scipy/integrate/_lsoda.cp314t-win_arm64.lib +0 -0
  248. scipy/integrate/_lsoda.cp314t-win_arm64.pyd +0 -0
  249. scipy/integrate/_ode.py +1395 -0
  250. scipy/integrate/_odepack.cp314t-win_arm64.lib +0 -0
  251. scipy/integrate/_odepack.cp314t-win_arm64.pyd +0 -0
  252. scipy/integrate/_odepack_py.py +273 -0
  253. scipy/integrate/_quad_vec.py +674 -0
  254. scipy/integrate/_quadpack.cp314t-win_arm64.lib +0 -0
  255. scipy/integrate/_quadpack.cp314t-win_arm64.pyd +0 -0
  256. scipy/integrate/_quadpack_py.py +1283 -0
  257. scipy/integrate/_quadrature.py +1336 -0
  258. scipy/integrate/_rules/__init__.py +12 -0
  259. scipy/integrate/_rules/_base.py +518 -0
  260. scipy/integrate/_rules/_gauss_kronrod.py +202 -0
  261. scipy/integrate/_rules/_gauss_legendre.py +62 -0
  262. scipy/integrate/_rules/_genz_malik.py +210 -0
  263. scipy/integrate/_tanhsinh.py +1385 -0
  264. scipy/integrate/_test_multivariate.cp314t-win_arm64.lib +0 -0
  265. scipy/integrate/_test_multivariate.cp314t-win_arm64.pyd +0 -0
  266. scipy/integrate/_test_odeint_banded.cp314t-win_arm64.lib +0 -0
  267. scipy/integrate/_test_odeint_banded.cp314t-win_arm64.pyd +0 -0
  268. scipy/integrate/_vode.cp314t-win_arm64.lib +0 -0
  269. scipy/integrate/_vode.cp314t-win_arm64.pyd +0 -0
  270. scipy/integrate/dop.py +15 -0
  271. scipy/integrate/lsoda.py +15 -0
  272. scipy/integrate/odepack.py +17 -0
  273. scipy/integrate/quadpack.py +23 -0
  274. scipy/integrate/tests/__init__.py +0 -0
  275. scipy/integrate/tests/test__quad_vec.py +211 -0
  276. scipy/integrate/tests/test_banded_ode_solvers.py +305 -0
  277. scipy/integrate/tests/test_bvp.py +714 -0
  278. scipy/integrate/tests/test_cubature.py +1375 -0
  279. scipy/integrate/tests/test_integrate.py +840 -0
  280. scipy/integrate/tests/test_odeint_jac.py +74 -0
  281. scipy/integrate/tests/test_quadpack.py +680 -0
  282. scipy/integrate/tests/test_quadrature.py +730 -0
  283. scipy/integrate/tests/test_tanhsinh.py +1171 -0
  284. scipy/integrate/vode.py +15 -0
  285. scipy/interpolate/__init__.py +228 -0
  286. scipy/interpolate/_bary_rational.py +715 -0
  287. scipy/interpolate/_bsplines.py +2469 -0
  288. scipy/interpolate/_cubic.py +973 -0
  289. scipy/interpolate/_dfitpack.cp314t-win_arm64.lib +0 -0
  290. scipy/interpolate/_dfitpack.cp314t-win_arm64.pyd +0 -0
  291. scipy/interpolate/_dierckx.cp314t-win_arm64.lib +0 -0
  292. scipy/interpolate/_dierckx.cp314t-win_arm64.pyd +0 -0
  293. scipy/interpolate/_fitpack.cp314t-win_arm64.lib +0 -0
  294. scipy/interpolate/_fitpack.cp314t-win_arm64.pyd +0 -0
  295. scipy/interpolate/_fitpack2.py +2397 -0
  296. scipy/interpolate/_fitpack_impl.py +811 -0
  297. scipy/interpolate/_fitpack_py.py +898 -0
  298. scipy/interpolate/_fitpack_repro.py +996 -0
  299. scipy/interpolate/_interpnd.cp314t-win_arm64.lib +0 -0
  300. scipy/interpolate/_interpnd.cp314t-win_arm64.pyd +0 -0
  301. scipy/interpolate/_interpolate.py +2266 -0
  302. scipy/interpolate/_ndbspline.py +415 -0
  303. scipy/interpolate/_ndgriddata.py +329 -0
  304. scipy/interpolate/_pade.py +67 -0
  305. scipy/interpolate/_polyint.py +1025 -0
  306. scipy/interpolate/_ppoly.cp314t-win_arm64.lib +0 -0
  307. scipy/interpolate/_ppoly.cp314t-win_arm64.pyd +0 -0
  308. scipy/interpolate/_rbf.py +290 -0
  309. scipy/interpolate/_rbfinterp.py +550 -0
  310. scipy/interpolate/_rbfinterp_pythran.cp314t-win_arm64.lib +0 -0
  311. scipy/interpolate/_rbfinterp_pythran.cp314t-win_arm64.pyd +0 -0
  312. scipy/interpolate/_rgi.py +764 -0
  313. scipy/interpolate/_rgi_cython.cp314t-win_arm64.lib +0 -0
  314. scipy/interpolate/_rgi_cython.cp314t-win_arm64.pyd +0 -0
  315. scipy/interpolate/dfitpack.py +24 -0
  316. scipy/interpolate/fitpack.py +31 -0
  317. scipy/interpolate/fitpack2.py +29 -0
  318. scipy/interpolate/interpnd.py +24 -0
  319. scipy/interpolate/interpolate.py +30 -0
  320. scipy/interpolate/ndgriddata.py +23 -0
  321. scipy/interpolate/polyint.py +24 -0
  322. scipy/interpolate/rbf.py +18 -0
  323. scipy/interpolate/tests/__init__.py +0 -0
  324. scipy/interpolate/tests/data/bug-1310.npz +0 -0
  325. scipy/interpolate/tests/data/estimate_gradients_hang.npy +0 -0
  326. scipy/interpolate/tests/data/gcvspl.npz +0 -0
  327. scipy/interpolate/tests/test_bary_rational.py +368 -0
  328. scipy/interpolate/tests/test_bsplines.py +3754 -0
  329. scipy/interpolate/tests/test_fitpack.py +519 -0
  330. scipy/interpolate/tests/test_fitpack2.py +1431 -0
  331. scipy/interpolate/tests/test_gil.py +64 -0
  332. scipy/interpolate/tests/test_interpnd.py +452 -0
  333. scipy/interpolate/tests/test_interpolate.py +2630 -0
  334. scipy/interpolate/tests/test_ndgriddata.py +308 -0
  335. scipy/interpolate/tests/test_pade.py +107 -0
  336. scipy/interpolate/tests/test_polyint.py +972 -0
  337. scipy/interpolate/tests/test_rbf.py +246 -0
  338. scipy/interpolate/tests/test_rbfinterp.py +534 -0
  339. scipy/interpolate/tests/test_rgi.py +1151 -0
  340. scipy/io/__init__.py +116 -0
  341. scipy/io/_fast_matrix_market/__init__.py +600 -0
  342. scipy/io/_fast_matrix_market/_fmm_core.cp314t-win_arm64.lib +0 -0
  343. scipy/io/_fast_matrix_market/_fmm_core.cp314t-win_arm64.pyd +0 -0
  344. scipy/io/_fortran.py +354 -0
  345. scipy/io/_harwell_boeing/__init__.py +7 -0
  346. scipy/io/_harwell_boeing/_fortran_format_parser.py +316 -0
  347. scipy/io/_harwell_boeing/hb.py +571 -0
  348. scipy/io/_harwell_boeing/tests/__init__.py +0 -0
  349. scipy/io/_harwell_boeing/tests/test_fortran_format.py +74 -0
  350. scipy/io/_harwell_boeing/tests/test_hb.py +70 -0
  351. scipy/io/_idl.py +917 -0
  352. scipy/io/_mmio.py +968 -0
  353. scipy/io/_netcdf.py +1104 -0
  354. scipy/io/_test_fortran.cp314t-win_arm64.lib +0 -0
  355. scipy/io/_test_fortran.cp314t-win_arm64.pyd +0 -0
  356. scipy/io/arff/__init__.py +28 -0
  357. scipy/io/arff/_arffread.py +873 -0
  358. scipy/io/arff/arffread.py +19 -0
  359. scipy/io/arff/tests/__init__.py +0 -0
  360. scipy/io/arff/tests/data/iris.arff +225 -0
  361. scipy/io/arff/tests/data/missing.arff +8 -0
  362. scipy/io/arff/tests/data/nodata.arff +11 -0
  363. scipy/io/arff/tests/data/quoted_nominal.arff +13 -0
  364. scipy/io/arff/tests/data/quoted_nominal_spaces.arff +13 -0
  365. scipy/io/arff/tests/data/test1.arff +10 -0
  366. scipy/io/arff/tests/data/test10.arff +8 -0
  367. scipy/io/arff/tests/data/test11.arff +11 -0
  368. scipy/io/arff/tests/data/test2.arff +15 -0
  369. scipy/io/arff/tests/data/test3.arff +6 -0
  370. scipy/io/arff/tests/data/test4.arff +11 -0
  371. scipy/io/arff/tests/data/test5.arff +26 -0
  372. scipy/io/arff/tests/data/test6.arff +12 -0
  373. scipy/io/arff/tests/data/test7.arff +15 -0
  374. scipy/io/arff/tests/data/test8.arff +12 -0
  375. scipy/io/arff/tests/data/test9.arff +14 -0
  376. scipy/io/arff/tests/test_arffread.py +421 -0
  377. scipy/io/harwell_boeing.py +17 -0
  378. scipy/io/idl.py +17 -0
  379. scipy/io/matlab/__init__.py +66 -0
  380. scipy/io/matlab/_byteordercodes.py +75 -0
  381. scipy/io/matlab/_mio.py +375 -0
  382. scipy/io/matlab/_mio4.py +632 -0
  383. scipy/io/matlab/_mio5.py +901 -0
  384. scipy/io/matlab/_mio5_params.py +281 -0
  385. scipy/io/matlab/_mio5_utils.cp314t-win_arm64.lib +0 -0
  386. scipy/io/matlab/_mio5_utils.cp314t-win_arm64.pyd +0 -0
  387. scipy/io/matlab/_mio_utils.cp314t-win_arm64.lib +0 -0
  388. scipy/io/matlab/_mio_utils.cp314t-win_arm64.pyd +0 -0
  389. scipy/io/matlab/_miobase.py +435 -0
  390. scipy/io/matlab/_streams.cp314t-win_arm64.lib +0 -0
  391. scipy/io/matlab/_streams.cp314t-win_arm64.pyd +0 -0
  392. scipy/io/matlab/byteordercodes.py +17 -0
  393. scipy/io/matlab/mio.py +16 -0
  394. scipy/io/matlab/mio4.py +17 -0
  395. scipy/io/matlab/mio5.py +19 -0
  396. scipy/io/matlab/mio5_params.py +18 -0
  397. scipy/io/matlab/mio5_utils.py +17 -0
  398. scipy/io/matlab/mio_utils.py +17 -0
  399. scipy/io/matlab/miobase.py +16 -0
  400. scipy/io/matlab/streams.py +16 -0
  401. scipy/io/matlab/tests/__init__.py +0 -0
  402. scipy/io/matlab/tests/data/bad_miuint32.mat +0 -0
  403. scipy/io/matlab/tests/data/bad_miutf8_array_name.mat +0 -0
  404. scipy/io/matlab/tests/data/big_endian.mat +0 -0
  405. scipy/io/matlab/tests/data/broken_utf8.mat +0 -0
  406. scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat +0 -0
  407. scipy/io/matlab/tests/data/corrupted_zlib_data.mat +0 -0
  408. scipy/io/matlab/tests/data/debigged_m4.mat +0 -0
  409. scipy/io/matlab/tests/data/japanese_utf8.txt +5 -0
  410. scipy/io/matlab/tests/data/little_endian.mat +0 -0
  411. scipy/io/matlab/tests/data/logical_sparse.mat +0 -0
  412. scipy/io/matlab/tests/data/malformed1.mat +0 -0
  413. scipy/io/matlab/tests/data/miuint32_for_miint32.mat +0 -0
  414. scipy/io/matlab/tests/data/miutf8_array_name.mat +0 -0
  415. scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat +0 -0
  416. scipy/io/matlab/tests/data/one_by_zero_char.mat +0 -0
  417. scipy/io/matlab/tests/data/parabola.mat +0 -0
  418. scipy/io/matlab/tests/data/single_empty_string.mat +0 -0
  419. scipy/io/matlab/tests/data/some_functions.mat +0 -0
  420. scipy/io/matlab/tests/data/sqr.mat +0 -0
  421. scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat +0 -0
  422. scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat +0 -0
  423. scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat +0 -0
  424. scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat +0 -0
  425. scipy/io/matlab/tests/data/test_empty_struct.mat +0 -0
  426. scipy/io/matlab/tests/data/test_mat4_le_floats.mat +0 -0
  427. scipy/io/matlab/tests/data/test_skip_variable.mat +0 -0
  428. scipy/io/matlab/tests/data/testbool_8_WIN64.mat +0 -0
  429. scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat +0 -0
  430. scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat +0 -0
  431. scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat +0 -0
  432. scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat +0 -0
  433. scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat +0 -0
  434. scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat +0 -0
  435. scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat +0 -0
  436. scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat +0 -0
  437. scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat +0 -0
  438. scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat +0 -0
  439. scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat +0 -0
  440. scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat +0 -0
  441. scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat +0 -0
  442. scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat +0 -0
  443. scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat +0 -0
  444. scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat +0 -0
  445. scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat +0 -0
  446. scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat +0 -0
  447. scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat +0 -0
  448. scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat +0 -0
  449. scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat +0 -0
  450. scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat +0 -0
  451. scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat +0 -0
  452. scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat +0 -0
  453. scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat +0 -0
  454. scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat +0 -0
  455. scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat +0 -0
  456. scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat +0 -0
  457. scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat +0 -0
  458. scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat +0 -0
  459. scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat +0 -0
  460. scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat +0 -0
  461. scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat +0 -0
  462. scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat +0 -0
  463. scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat +0 -0
  464. scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat +0 -0
  465. scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat +0 -0
  466. scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat +0 -0
  467. scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat +0 -0
  468. scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat +0 -0
  469. scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat +0 -0
  470. scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat +0 -0
  471. scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat +0 -0
  472. scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat +0 -0
  473. scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat +0 -0
  474. scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat +0 -0
  475. scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat +0 -0
  476. scipy/io/matlab/tests/data/testsimplecell.mat +0 -0
  477. scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat +0 -0
  478. scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat +0 -0
  479. scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat +0 -0
  480. scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat +0 -0
  481. scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat +0 -0
  482. scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat +0 -0
  483. scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat +0 -0
  484. scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat +0 -0
  485. scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat +0 -0
  486. scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat +0 -0
  487. scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat +0 -0
  488. scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat +0 -0
  489. scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat +0 -0
  490. scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat +0 -0
  491. scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat +0 -0
  492. scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat +0 -0
  493. scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat +0 -0
  494. scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat +0 -0
  495. scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat +0 -0
  496. scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat +0 -0
  497. scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat +0 -0
  498. scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat +0 -0
  499. scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat +0 -0
  500. scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat +0 -0
  501. scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat +0 -0
  502. scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat +0 -0
  503. scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat +0 -0
  504. scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat +0 -0
  505. scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat +0 -0
  506. scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat +0 -0
  507. scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat +0 -0
  508. scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat +0 -0
  509. scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat +0 -0
  510. scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat +0 -0
  511. scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat +0 -0
  512. scipy/io/matlab/tests/data/testvec_4_GLNX86.mat +0 -0
  513. scipy/io/matlab/tests/test_byteordercodes.py +29 -0
  514. scipy/io/matlab/tests/test_mio.py +1399 -0
  515. scipy/io/matlab/tests/test_mio5_utils.py +179 -0
  516. scipy/io/matlab/tests/test_mio_funcs.py +51 -0
  517. scipy/io/matlab/tests/test_mio_utils.py +45 -0
  518. scipy/io/matlab/tests/test_miobase.py +32 -0
  519. scipy/io/matlab/tests/test_pathological.py +33 -0
  520. scipy/io/matlab/tests/test_streams.py +241 -0
  521. scipy/io/mmio.py +17 -0
  522. scipy/io/netcdf.py +17 -0
  523. scipy/io/tests/__init__.py +0 -0
  524. scipy/io/tests/data/Transparent Busy.ani +0 -0
  525. scipy/io/tests/data/array_float32_1d.sav +0 -0
  526. scipy/io/tests/data/array_float32_2d.sav +0 -0
  527. scipy/io/tests/data/array_float32_3d.sav +0 -0
  528. scipy/io/tests/data/array_float32_4d.sav +0 -0
  529. scipy/io/tests/data/array_float32_5d.sav +0 -0
  530. scipy/io/tests/data/array_float32_6d.sav +0 -0
  531. scipy/io/tests/data/array_float32_7d.sav +0 -0
  532. scipy/io/tests/data/array_float32_8d.sav +0 -0
  533. scipy/io/tests/data/array_float32_pointer_1d.sav +0 -0
  534. scipy/io/tests/data/array_float32_pointer_2d.sav +0 -0
  535. scipy/io/tests/data/array_float32_pointer_3d.sav +0 -0
  536. scipy/io/tests/data/array_float32_pointer_4d.sav +0 -0
  537. scipy/io/tests/data/array_float32_pointer_5d.sav +0 -0
  538. scipy/io/tests/data/array_float32_pointer_6d.sav +0 -0
  539. scipy/io/tests/data/array_float32_pointer_7d.sav +0 -0
  540. scipy/io/tests/data/array_float32_pointer_8d.sav +0 -0
  541. scipy/io/tests/data/example_1.nc +0 -0
  542. scipy/io/tests/data/example_2.nc +0 -0
  543. scipy/io/tests/data/example_3_maskedvals.nc +0 -0
  544. scipy/io/tests/data/fortran-3x3d-2i.dat +0 -0
  545. scipy/io/tests/data/fortran-mixed.dat +0 -0
  546. scipy/io/tests/data/fortran-sf8-11x1x10.dat +0 -0
  547. scipy/io/tests/data/fortran-sf8-15x10x22.dat +0 -0
  548. scipy/io/tests/data/fortran-sf8-1x1x1.dat +0 -0
  549. scipy/io/tests/data/fortran-sf8-1x1x5.dat +0 -0
  550. scipy/io/tests/data/fortran-sf8-1x1x7.dat +0 -0
  551. scipy/io/tests/data/fortran-sf8-1x3x5.dat +0 -0
  552. scipy/io/tests/data/fortran-si4-11x1x10.dat +0 -0
  553. scipy/io/tests/data/fortran-si4-15x10x22.dat +0 -0
  554. scipy/io/tests/data/fortran-si4-1x1x1.dat +0 -0
  555. scipy/io/tests/data/fortran-si4-1x1x5.dat +0 -0
  556. scipy/io/tests/data/fortran-si4-1x1x7.dat +0 -0
  557. scipy/io/tests/data/fortran-si4-1x3x5.dat +0 -0
  558. scipy/io/tests/data/invalid_pointer.sav +0 -0
  559. scipy/io/tests/data/null_pointer.sav +0 -0
  560. scipy/io/tests/data/scalar_byte.sav +0 -0
  561. scipy/io/tests/data/scalar_byte_descr.sav +0 -0
  562. scipy/io/tests/data/scalar_complex32.sav +0 -0
  563. scipy/io/tests/data/scalar_complex64.sav +0 -0
  564. scipy/io/tests/data/scalar_float32.sav +0 -0
  565. scipy/io/tests/data/scalar_float64.sav +0 -0
  566. scipy/io/tests/data/scalar_heap_pointer.sav +0 -0
  567. scipy/io/tests/data/scalar_int16.sav +0 -0
  568. scipy/io/tests/data/scalar_int32.sav +0 -0
  569. scipy/io/tests/data/scalar_int64.sav +0 -0
  570. scipy/io/tests/data/scalar_string.sav +0 -0
  571. scipy/io/tests/data/scalar_uint16.sav +0 -0
  572. scipy/io/tests/data/scalar_uint32.sav +0 -0
  573. scipy/io/tests/data/scalar_uint64.sav +0 -0
  574. scipy/io/tests/data/struct_arrays.sav +0 -0
  575. scipy/io/tests/data/struct_arrays_byte_idl80.sav +0 -0
  576. scipy/io/tests/data/struct_arrays_replicated.sav +0 -0
  577. scipy/io/tests/data/struct_arrays_replicated_3d.sav +0 -0
  578. scipy/io/tests/data/struct_inherit.sav +0 -0
  579. scipy/io/tests/data/struct_pointer_arrays.sav +0 -0
  580. scipy/io/tests/data/struct_pointer_arrays_replicated.sav +0 -0
  581. scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav +0 -0
  582. scipy/io/tests/data/struct_pointers.sav +0 -0
  583. scipy/io/tests/data/struct_pointers_replicated.sav +0 -0
  584. scipy/io/tests/data/struct_pointers_replicated_3d.sav +0 -0
  585. scipy/io/tests/data/struct_scalars.sav +0 -0
  586. scipy/io/tests/data/struct_scalars_replicated.sav +0 -0
  587. scipy/io/tests/data/struct_scalars_replicated_3d.sav +0 -0
  588. scipy/io/tests/data/test-1234Hz-le-1ch-10S-20bit-extra.wav +0 -0
  589. scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav +0 -0
  590. scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav +0 -0
  591. scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav +0 -0
  592. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav +0 -0
  593. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav +0 -0
  594. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav +0 -0
  595. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-rf64.wav +0 -0
  596. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav +0 -0
  597. scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav +0 -0
  598. scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav +0 -0
  599. scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav +0 -0
  600. scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav +0 -0
  601. scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav +0 -0
  602. scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-rf64.wav +0 -0
  603. scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav +0 -0
  604. scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav +0 -0
  605. scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav +0 -0
  606. scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav +0 -0
  607. scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav +0 -0
  608. scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav +0 -0
  609. scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav +0 -0
  610. scipy/io/tests/data/various_compressed.sav +0 -0
  611. scipy/io/tests/test_fortran.py +264 -0
  612. scipy/io/tests/test_idl.py +483 -0
  613. scipy/io/tests/test_mmio.py +831 -0
  614. scipy/io/tests/test_netcdf.py +550 -0
  615. scipy/io/tests/test_paths.py +93 -0
  616. scipy/io/tests/test_wavfile.py +501 -0
  617. scipy/io/wavfile.py +938 -0
  618. scipy/linalg/__init__.pxd +1 -0
  619. scipy/linalg/__init__.py +236 -0
  620. scipy/linalg/_basic.py +2146 -0
  621. scipy/linalg/_blas_subroutines.h +164 -0
  622. scipy/linalg/_cythonized_array_utils.cp314t-win_arm64.lib +0 -0
  623. scipy/linalg/_cythonized_array_utils.cp314t-win_arm64.pyd +0 -0
  624. scipy/linalg/_cythonized_array_utils.pxd +40 -0
  625. scipy/linalg/_cythonized_array_utils.pyi +16 -0
  626. scipy/linalg/_decomp.py +1645 -0
  627. scipy/linalg/_decomp_cholesky.py +413 -0
  628. scipy/linalg/_decomp_cossin.py +236 -0
  629. scipy/linalg/_decomp_interpolative.cp314t-win_arm64.lib +0 -0
  630. scipy/linalg/_decomp_interpolative.cp314t-win_arm64.pyd +0 -0
  631. scipy/linalg/_decomp_ldl.py +356 -0
  632. scipy/linalg/_decomp_lu.py +401 -0
  633. scipy/linalg/_decomp_lu_cython.cp314t-win_arm64.lib +0 -0
  634. scipy/linalg/_decomp_lu_cython.cp314t-win_arm64.pyd +0 -0
  635. scipy/linalg/_decomp_lu_cython.pyi +6 -0
  636. scipy/linalg/_decomp_polar.py +113 -0
  637. scipy/linalg/_decomp_qr.py +494 -0
  638. scipy/linalg/_decomp_qz.py +452 -0
  639. scipy/linalg/_decomp_schur.py +336 -0
  640. scipy/linalg/_decomp_svd.py +545 -0
  641. scipy/linalg/_decomp_update.cp314t-win_arm64.lib +0 -0
  642. scipy/linalg/_decomp_update.cp314t-win_arm64.pyd +0 -0
  643. scipy/linalg/_expm_frechet.py +417 -0
  644. scipy/linalg/_fblas.cp314t-win_arm64.lib +0 -0
  645. scipy/linalg/_fblas.cp314t-win_arm64.pyd +0 -0
  646. scipy/linalg/_flapack.cp314t-win_arm64.lib +0 -0
  647. scipy/linalg/_flapack.cp314t-win_arm64.pyd +0 -0
  648. scipy/linalg/_lapack_subroutines.h +1521 -0
  649. scipy/linalg/_linalg_pythran.cp314t-win_arm64.lib +0 -0
  650. scipy/linalg/_linalg_pythran.cp314t-win_arm64.pyd +0 -0
  651. scipy/linalg/_matfuncs.py +1050 -0
  652. scipy/linalg/_matfuncs_expm.cp314t-win_arm64.lib +0 -0
  653. scipy/linalg/_matfuncs_expm.cp314t-win_arm64.pyd +0 -0
  654. scipy/linalg/_matfuncs_expm.pyi +6 -0
  655. scipy/linalg/_matfuncs_inv_ssq.py +886 -0
  656. scipy/linalg/_matfuncs_schur_sqrtm.cp314t-win_arm64.lib +0 -0
  657. scipy/linalg/_matfuncs_schur_sqrtm.cp314t-win_arm64.pyd +0 -0
  658. scipy/linalg/_matfuncs_sqrtm.py +107 -0
  659. scipy/linalg/_matfuncs_sqrtm_triu.cp314t-win_arm64.lib +0 -0
  660. scipy/linalg/_matfuncs_sqrtm_triu.cp314t-win_arm64.pyd +0 -0
  661. scipy/linalg/_misc.py +191 -0
  662. scipy/linalg/_procrustes.py +113 -0
  663. scipy/linalg/_sketches.py +189 -0
  664. scipy/linalg/_solve_toeplitz.cp314t-win_arm64.lib +0 -0
  665. scipy/linalg/_solve_toeplitz.cp314t-win_arm64.pyd +0 -0
  666. scipy/linalg/_solvers.py +862 -0
  667. scipy/linalg/_special_matrices.py +1322 -0
  668. scipy/linalg/_testutils.py +65 -0
  669. scipy/linalg/basic.py +23 -0
  670. scipy/linalg/blas.py +495 -0
  671. scipy/linalg/cython_blas.cp314t-win_arm64.lib +0 -0
  672. scipy/linalg/cython_blas.cp314t-win_arm64.pyd +0 -0
  673. scipy/linalg/cython_blas.pxd +169 -0
  674. scipy/linalg/cython_blas.pyx +1432 -0
  675. scipy/linalg/cython_lapack.cp314t-win_arm64.lib +0 -0
  676. scipy/linalg/cython_lapack.cp314t-win_arm64.pyd +0 -0
  677. scipy/linalg/cython_lapack.pxd +1528 -0
  678. scipy/linalg/cython_lapack.pyx +12045 -0
  679. scipy/linalg/decomp.py +23 -0
  680. scipy/linalg/decomp_cholesky.py +21 -0
  681. scipy/linalg/decomp_lu.py +21 -0
  682. scipy/linalg/decomp_qr.py +20 -0
  683. scipy/linalg/decomp_schur.py +21 -0
  684. scipy/linalg/decomp_svd.py +21 -0
  685. scipy/linalg/interpolative.py +989 -0
  686. scipy/linalg/lapack.py +1081 -0
  687. scipy/linalg/matfuncs.py +23 -0
  688. scipy/linalg/misc.py +21 -0
  689. scipy/linalg/special_matrices.py +22 -0
  690. scipy/linalg/tests/__init__.py +0 -0
  691. scipy/linalg/tests/_cython_examples/extending.pyx +23 -0
  692. scipy/linalg/tests/_cython_examples/meson.build +34 -0
  693. scipy/linalg/tests/data/carex_15_data.npz +0 -0
  694. scipy/linalg/tests/data/carex_18_data.npz +0 -0
  695. scipy/linalg/tests/data/carex_19_data.npz +0 -0
  696. scipy/linalg/tests/data/carex_20_data.npz +0 -0
  697. scipy/linalg/tests/data/carex_6_data.npz +0 -0
  698. scipy/linalg/tests/data/gendare_20170120_data.npz +0 -0
  699. scipy/linalg/tests/test_basic.py +2074 -0
  700. scipy/linalg/tests/test_batch.py +588 -0
  701. scipy/linalg/tests/test_blas.py +1127 -0
  702. scipy/linalg/tests/test_cython_blas.py +118 -0
  703. scipy/linalg/tests/test_cython_lapack.py +22 -0
  704. scipy/linalg/tests/test_cythonized_array_utils.py +130 -0
  705. scipy/linalg/tests/test_decomp.py +3189 -0
  706. scipy/linalg/tests/test_decomp_cholesky.py +268 -0
  707. scipy/linalg/tests/test_decomp_cossin.py +314 -0
  708. scipy/linalg/tests/test_decomp_ldl.py +137 -0
  709. scipy/linalg/tests/test_decomp_lu.py +308 -0
  710. scipy/linalg/tests/test_decomp_polar.py +110 -0
  711. scipy/linalg/tests/test_decomp_update.py +1701 -0
  712. scipy/linalg/tests/test_extending.py +46 -0
  713. scipy/linalg/tests/test_fblas.py +607 -0
  714. scipy/linalg/tests/test_interpolative.py +232 -0
  715. scipy/linalg/tests/test_lapack.py +3620 -0
  716. scipy/linalg/tests/test_matfuncs.py +1125 -0
  717. scipy/linalg/tests/test_matmul_toeplitz.py +136 -0
  718. scipy/linalg/tests/test_procrustes.py +214 -0
  719. scipy/linalg/tests/test_sketches.py +118 -0
  720. scipy/linalg/tests/test_solve_toeplitz.py +150 -0
  721. scipy/linalg/tests/test_solvers.py +844 -0
  722. scipy/linalg/tests/test_special_matrices.py +636 -0
  723. scipy/misc/__init__.py +6 -0
  724. scipy/misc/common.py +6 -0
  725. scipy/misc/doccer.py +6 -0
  726. scipy/ndimage/__init__.py +174 -0
  727. scipy/ndimage/_ctest.cp314t-win_arm64.lib +0 -0
  728. scipy/ndimage/_ctest.cp314t-win_arm64.pyd +0 -0
  729. scipy/ndimage/_cytest.cp314t-win_arm64.lib +0 -0
  730. scipy/ndimage/_cytest.cp314t-win_arm64.pyd +0 -0
  731. scipy/ndimage/_delegators.py +303 -0
  732. scipy/ndimage/_filters.py +2422 -0
  733. scipy/ndimage/_fourier.py +306 -0
  734. scipy/ndimage/_interpolation.py +1033 -0
  735. scipy/ndimage/_measurements.py +1689 -0
  736. scipy/ndimage/_morphology.py +2634 -0
  737. scipy/ndimage/_nd_image.cp314t-win_arm64.lib +0 -0
  738. scipy/ndimage/_nd_image.cp314t-win_arm64.pyd +0 -0
  739. scipy/ndimage/_ndimage_api.py +16 -0
  740. scipy/ndimage/_ni_docstrings.py +214 -0
  741. scipy/ndimage/_ni_label.cp314t-win_arm64.lib +0 -0
  742. scipy/ndimage/_ni_label.cp314t-win_arm64.pyd +0 -0
  743. scipy/ndimage/_ni_support.py +139 -0
  744. scipy/ndimage/_rank_filter_1d.cp314t-win_arm64.lib +0 -0
  745. scipy/ndimage/_rank_filter_1d.cp314t-win_arm64.pyd +0 -0
  746. scipy/ndimage/_support_alternative_backends.py +84 -0
  747. scipy/ndimage/filters.py +27 -0
  748. scipy/ndimage/fourier.py +21 -0
  749. scipy/ndimage/interpolation.py +22 -0
  750. scipy/ndimage/measurements.py +24 -0
  751. scipy/ndimage/morphology.py +27 -0
  752. scipy/ndimage/tests/__init__.py +12 -0
  753. scipy/ndimage/tests/data/label_inputs.txt +21 -0
  754. scipy/ndimage/tests/data/label_results.txt +294 -0
  755. scipy/ndimage/tests/data/label_strels.txt +42 -0
  756. scipy/ndimage/tests/dots.png +0 -0
  757. scipy/ndimage/tests/test_c_api.py +102 -0
  758. scipy/ndimage/tests/test_datatypes.py +67 -0
  759. scipy/ndimage/tests/test_filters.py +3083 -0
  760. scipy/ndimage/tests/test_fourier.py +187 -0
  761. scipy/ndimage/tests/test_interpolation.py +1491 -0
  762. scipy/ndimage/tests/test_measurements.py +1592 -0
  763. scipy/ndimage/tests/test_morphology.py +2950 -0
  764. scipy/ndimage/tests/test_ni_support.py +78 -0
  765. scipy/ndimage/tests/test_splines.py +70 -0
  766. scipy/odr/__init__.py +131 -0
  767. scipy/odr/__odrpack.cp314t-win_arm64.lib +0 -0
  768. scipy/odr/__odrpack.cp314t-win_arm64.pyd +0 -0
  769. scipy/odr/_add_newdocs.py +34 -0
  770. scipy/odr/_models.py +315 -0
  771. scipy/odr/_odrpack.py +1154 -0
  772. scipy/odr/models.py +20 -0
  773. scipy/odr/odrpack.py +21 -0
  774. scipy/odr/tests/__init__.py +0 -0
  775. scipy/odr/tests/test_odr.py +607 -0
  776. scipy/optimize/__init__.pxd +1 -0
  777. scipy/optimize/__init__.py +460 -0
  778. scipy/optimize/_basinhopping.py +741 -0
  779. scipy/optimize/_bglu_dense.cp314t-win_arm64.lib +0 -0
  780. scipy/optimize/_bglu_dense.cp314t-win_arm64.pyd +0 -0
  781. scipy/optimize/_bracket.py +706 -0
  782. scipy/optimize/_chandrupatla.py +551 -0
  783. scipy/optimize/_cobyla_py.py +297 -0
  784. scipy/optimize/_cobyqa_py.py +72 -0
  785. scipy/optimize/_constraints.py +598 -0
  786. scipy/optimize/_dcsrch.py +728 -0
  787. scipy/optimize/_differentiable_functions.py +835 -0
  788. scipy/optimize/_differentialevolution.py +1970 -0
  789. scipy/optimize/_direct.cp314t-win_arm64.lib +0 -0
  790. scipy/optimize/_direct.cp314t-win_arm64.pyd +0 -0
  791. scipy/optimize/_direct_py.py +280 -0
  792. scipy/optimize/_dual_annealing.py +732 -0
  793. scipy/optimize/_elementwise.py +798 -0
  794. scipy/optimize/_group_columns.cp314t-win_arm64.lib +0 -0
  795. scipy/optimize/_group_columns.cp314t-win_arm64.pyd +0 -0
  796. scipy/optimize/_hessian_update_strategy.py +479 -0
  797. scipy/optimize/_highspy/__init__.py +0 -0
  798. scipy/optimize/_highspy/_core.cp314t-win_arm64.lib +0 -0
  799. scipy/optimize/_highspy/_core.cp314t-win_arm64.pyd +0 -0
  800. scipy/optimize/_highspy/_highs_options.cp314t-win_arm64.lib +0 -0
  801. scipy/optimize/_highspy/_highs_options.cp314t-win_arm64.pyd +0 -0
  802. scipy/optimize/_highspy/_highs_wrapper.py +338 -0
  803. scipy/optimize/_isotonic.py +157 -0
  804. scipy/optimize/_lbfgsb.cp314t-win_arm64.lib +0 -0
  805. scipy/optimize/_lbfgsb.cp314t-win_arm64.pyd +0 -0
  806. scipy/optimize/_lbfgsb_py.py +634 -0
  807. scipy/optimize/_linesearch.py +896 -0
  808. scipy/optimize/_linprog.py +733 -0
  809. scipy/optimize/_linprog_doc.py +1434 -0
  810. scipy/optimize/_linprog_highs.py +422 -0
  811. scipy/optimize/_linprog_ip.py +1141 -0
  812. scipy/optimize/_linprog_rs.py +572 -0
  813. scipy/optimize/_linprog_simplex.py +663 -0
  814. scipy/optimize/_linprog_util.py +1521 -0
  815. scipy/optimize/_lsap.cp314t-win_arm64.lib +0 -0
  816. scipy/optimize/_lsap.cp314t-win_arm64.pyd +0 -0
  817. scipy/optimize/_lsq/__init__.py +5 -0
  818. scipy/optimize/_lsq/bvls.py +183 -0
  819. scipy/optimize/_lsq/common.py +731 -0
  820. scipy/optimize/_lsq/dogbox.py +345 -0
  821. scipy/optimize/_lsq/givens_elimination.cp314t-win_arm64.lib +0 -0
  822. scipy/optimize/_lsq/givens_elimination.cp314t-win_arm64.pyd +0 -0
  823. scipy/optimize/_lsq/least_squares.py +1044 -0
  824. scipy/optimize/_lsq/lsq_linear.py +361 -0
  825. scipy/optimize/_lsq/trf.py +587 -0
  826. scipy/optimize/_lsq/trf_linear.py +249 -0
  827. scipy/optimize/_milp.py +394 -0
  828. scipy/optimize/_minimize.py +1199 -0
  829. scipy/optimize/_minpack.cp314t-win_arm64.lib +0 -0
  830. scipy/optimize/_minpack.cp314t-win_arm64.pyd +0 -0
  831. scipy/optimize/_minpack_py.py +1178 -0
  832. scipy/optimize/_moduleTNC.cp314t-win_arm64.lib +0 -0
  833. scipy/optimize/_moduleTNC.cp314t-win_arm64.pyd +0 -0
  834. scipy/optimize/_nnls.py +96 -0
  835. scipy/optimize/_nonlin.py +1634 -0
  836. scipy/optimize/_numdiff.py +963 -0
  837. scipy/optimize/_optimize.py +4169 -0
  838. scipy/optimize/_pava_pybind.cp314t-win_arm64.lib +0 -0
  839. scipy/optimize/_pava_pybind.cp314t-win_arm64.pyd +0 -0
  840. scipy/optimize/_qap.py +760 -0
  841. scipy/optimize/_remove_redundancy.py +522 -0
  842. scipy/optimize/_root.py +732 -0
  843. scipy/optimize/_root_scalar.py +538 -0
  844. scipy/optimize/_shgo.py +1606 -0
  845. scipy/optimize/_shgo_lib/__init__.py +0 -0
  846. scipy/optimize/_shgo_lib/_complex.py +1225 -0
  847. scipy/optimize/_shgo_lib/_vertex.py +460 -0
  848. scipy/optimize/_slsqp_py.py +603 -0
  849. scipy/optimize/_slsqplib.cp314t-win_arm64.lib +0 -0
  850. scipy/optimize/_slsqplib.cp314t-win_arm64.pyd +0 -0
  851. scipy/optimize/_spectral.py +260 -0
  852. scipy/optimize/_tnc.py +438 -0
  853. scipy/optimize/_trlib/__init__.py +12 -0
  854. scipy/optimize/_trlib/_trlib.cp314t-win_arm64.lib +0 -0
  855. scipy/optimize/_trlib/_trlib.cp314t-win_arm64.pyd +0 -0
  856. scipy/optimize/_trustregion.py +318 -0
  857. scipy/optimize/_trustregion_constr/__init__.py +6 -0
  858. scipy/optimize/_trustregion_constr/canonical_constraint.py +390 -0
  859. scipy/optimize/_trustregion_constr/equality_constrained_sqp.py +231 -0
  860. scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py +584 -0
  861. scipy/optimize/_trustregion_constr/projections.py +411 -0
  862. scipy/optimize/_trustregion_constr/qp_subproblem.py +637 -0
  863. scipy/optimize/_trustregion_constr/report.py +49 -0
  864. scipy/optimize/_trustregion_constr/tests/__init__.py +0 -0
  865. scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py +296 -0
  866. scipy/optimize/_trustregion_constr/tests/test_nested_minimize.py +39 -0
  867. scipy/optimize/_trustregion_constr/tests/test_projections.py +214 -0
  868. scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py +645 -0
  869. scipy/optimize/_trustregion_constr/tests/test_report.py +34 -0
  870. scipy/optimize/_trustregion_constr/tr_interior_point.py +361 -0
  871. scipy/optimize/_trustregion_dogleg.py +122 -0
  872. scipy/optimize/_trustregion_exact.py +437 -0
  873. scipy/optimize/_trustregion_krylov.py +65 -0
  874. scipy/optimize/_trustregion_ncg.py +126 -0
  875. scipy/optimize/_tstutils.py +972 -0
  876. scipy/optimize/_zeros.cp314t-win_arm64.lib +0 -0
  877. scipy/optimize/_zeros.cp314t-win_arm64.pyd +0 -0
  878. scipy/optimize/_zeros_py.py +1475 -0
  879. scipy/optimize/cobyla.py +19 -0
  880. scipy/optimize/cython_optimize/__init__.py +133 -0
  881. scipy/optimize/cython_optimize/_zeros.cp314t-win_arm64.lib +0 -0
  882. scipy/optimize/cython_optimize/_zeros.cp314t-win_arm64.pyd +0 -0
  883. scipy/optimize/cython_optimize/_zeros.pxd +33 -0
  884. scipy/optimize/cython_optimize/c_zeros.pxd +26 -0
  885. scipy/optimize/cython_optimize.pxd +11 -0
  886. scipy/optimize/elementwise.py +38 -0
  887. scipy/optimize/lbfgsb.py +23 -0
  888. scipy/optimize/linesearch.py +18 -0
  889. scipy/optimize/minpack.py +27 -0
  890. scipy/optimize/minpack2.py +17 -0
  891. scipy/optimize/moduleTNC.py +19 -0
  892. scipy/optimize/nonlin.py +29 -0
  893. scipy/optimize/optimize.py +40 -0
  894. scipy/optimize/slsqp.py +22 -0
  895. scipy/optimize/tests/__init__.py +0 -0
  896. scipy/optimize/tests/_cython_examples/extending.pyx +43 -0
  897. scipy/optimize/tests/_cython_examples/meson.build +32 -0
  898. scipy/optimize/tests/test__basinhopping.py +535 -0
  899. scipy/optimize/tests/test__differential_evolution.py +1703 -0
  900. scipy/optimize/tests/test__dual_annealing.py +416 -0
  901. scipy/optimize/tests/test__linprog_clean_inputs.py +312 -0
  902. scipy/optimize/tests/test__numdiff.py +885 -0
  903. scipy/optimize/tests/test__remove_redundancy.py +228 -0
  904. scipy/optimize/tests/test__root.py +124 -0
  905. scipy/optimize/tests/test__shgo.py +1164 -0
  906. scipy/optimize/tests/test__spectral.py +226 -0
  907. scipy/optimize/tests/test_bracket.py +896 -0
  908. scipy/optimize/tests/test_chandrupatla.py +982 -0
  909. scipy/optimize/tests/test_cobyla.py +195 -0
  910. scipy/optimize/tests/test_cobyqa.py +252 -0
  911. scipy/optimize/tests/test_constraint_conversion.py +286 -0
  912. scipy/optimize/tests/test_constraints.py +255 -0
  913. scipy/optimize/tests/test_cython_optimize.py +92 -0
  914. scipy/optimize/tests/test_differentiable_functions.py +1025 -0
  915. scipy/optimize/tests/test_direct.py +321 -0
  916. scipy/optimize/tests/test_extending.py +28 -0
  917. scipy/optimize/tests/test_hessian_update_strategy.py +300 -0
  918. scipy/optimize/tests/test_isotonic_regression.py +167 -0
  919. scipy/optimize/tests/test_lbfgsb_hessinv.py +65 -0
  920. scipy/optimize/tests/test_lbfgsb_setulb.py +122 -0
  921. scipy/optimize/tests/test_least_squares.py +986 -0
  922. scipy/optimize/tests/test_linear_assignment.py +116 -0
  923. scipy/optimize/tests/test_linesearch.py +328 -0
  924. scipy/optimize/tests/test_linprog.py +2577 -0
  925. scipy/optimize/tests/test_lsq_common.py +297 -0
  926. scipy/optimize/tests/test_lsq_linear.py +287 -0
  927. scipy/optimize/tests/test_milp.py +459 -0
  928. scipy/optimize/tests/test_minimize_constrained.py +845 -0
  929. scipy/optimize/tests/test_minpack.py +1194 -0
  930. scipy/optimize/tests/test_nnls.py +469 -0
  931. scipy/optimize/tests/test_nonlin.py +572 -0
  932. scipy/optimize/tests/test_optimize.py +3344 -0
  933. scipy/optimize/tests/test_quadratic_assignment.py +455 -0
  934. scipy/optimize/tests/test_regression.py +40 -0
  935. scipy/optimize/tests/test_slsqp.py +645 -0
  936. scipy/optimize/tests/test_tnc.py +345 -0
  937. scipy/optimize/tests/test_trustregion.py +110 -0
  938. scipy/optimize/tests/test_trustregion_exact.py +351 -0
  939. scipy/optimize/tests/test_trustregion_krylov.py +170 -0
  940. scipy/optimize/tests/test_zeros.py +998 -0
  941. scipy/optimize/tnc.py +22 -0
  942. scipy/optimize/zeros.py +26 -0
  943. scipy/signal/__init__.py +316 -0
  944. scipy/signal/_arraytools.py +264 -0
  945. scipy/signal/_czt.py +575 -0
  946. scipy/signal/_delegators.py +568 -0
  947. scipy/signal/_filter_design.py +5893 -0
  948. scipy/signal/_fir_filter_design.py +1458 -0
  949. scipy/signal/_lti_conversion.py +534 -0
  950. scipy/signal/_ltisys.py +3546 -0
  951. scipy/signal/_max_len_seq.py +139 -0
  952. scipy/signal/_max_len_seq_inner.cp314t-win_arm64.lib +0 -0
  953. scipy/signal/_max_len_seq_inner.cp314t-win_arm64.pyd +0 -0
  954. scipy/signal/_peak_finding.py +1310 -0
  955. scipy/signal/_peak_finding_utils.cp314t-win_arm64.lib +0 -0
  956. scipy/signal/_peak_finding_utils.cp314t-win_arm64.pyd +0 -0
  957. scipy/signal/_polyutils.py +172 -0
  958. scipy/signal/_savitzky_golay.py +357 -0
  959. scipy/signal/_short_time_fft.py +2228 -0
  960. scipy/signal/_signal_api.py +30 -0
  961. scipy/signal/_signaltools.py +5309 -0
  962. scipy/signal/_sigtools.cp314t-win_arm64.lib +0 -0
  963. scipy/signal/_sigtools.cp314t-win_arm64.pyd +0 -0
  964. scipy/signal/_sosfilt.cp314t-win_arm64.lib +0 -0
  965. scipy/signal/_sosfilt.cp314t-win_arm64.pyd +0 -0
  966. scipy/signal/_spectral_py.py +2471 -0
  967. scipy/signal/_spline.cp314t-win_arm64.lib +0 -0
  968. scipy/signal/_spline.cp314t-win_arm64.pyd +0 -0
  969. scipy/signal/_spline.pyi +34 -0
  970. scipy/signal/_spline_filters.py +848 -0
  971. scipy/signal/_support_alternative_backends.py +73 -0
  972. scipy/signal/_upfirdn.py +219 -0
  973. scipy/signal/_upfirdn_apply.cp314t-win_arm64.lib +0 -0
  974. scipy/signal/_upfirdn_apply.cp314t-win_arm64.pyd +0 -0
  975. scipy/signal/_waveforms.py +687 -0
  976. scipy/signal/_wavelets.py +29 -0
  977. scipy/signal/bsplines.py +21 -0
  978. scipy/signal/filter_design.py +28 -0
  979. scipy/signal/fir_filter_design.py +21 -0
  980. scipy/signal/lti_conversion.py +20 -0
  981. scipy/signal/ltisys.py +25 -0
  982. scipy/signal/signaltools.py +27 -0
  983. scipy/signal/spectral.py +21 -0
  984. scipy/signal/spline.py +18 -0
  985. scipy/signal/tests/__init__.py +0 -0
  986. scipy/signal/tests/_scipy_spectral_test_shim.py +311 -0
  987. scipy/signal/tests/mpsig.py +122 -0
  988. scipy/signal/tests/test_array_tools.py +111 -0
  989. scipy/signal/tests/test_bsplines.py +365 -0
  990. scipy/signal/tests/test_cont2discrete.py +424 -0
  991. scipy/signal/tests/test_czt.py +221 -0
  992. scipy/signal/tests/test_dltisys.py +599 -0
  993. scipy/signal/tests/test_filter_design.py +4744 -0
  994. scipy/signal/tests/test_fir_filter_design.py +851 -0
  995. scipy/signal/tests/test_ltisys.py +1225 -0
  996. scipy/signal/tests/test_max_len_seq.py +71 -0
  997. scipy/signal/tests/test_peak_finding.py +915 -0
  998. scipy/signal/tests/test_result_type.py +51 -0
  999. scipy/signal/tests/test_savitzky_golay.py +363 -0
  1000. scipy/signal/tests/test_short_time_fft.py +1107 -0
  1001. scipy/signal/tests/test_signaltools.py +4735 -0
  1002. scipy/signal/tests/test_spectral.py +2141 -0
  1003. scipy/signal/tests/test_splines.py +427 -0
  1004. scipy/signal/tests/test_upfirdn.py +322 -0
  1005. scipy/signal/tests/test_waveforms.py +400 -0
  1006. scipy/signal/tests/test_wavelets.py +59 -0
  1007. scipy/signal/tests/test_windows.py +987 -0
  1008. scipy/signal/waveforms.py +20 -0
  1009. scipy/signal/wavelets.py +17 -0
  1010. scipy/signal/windows/__init__.py +52 -0
  1011. scipy/signal/windows/_windows.py +2513 -0
  1012. scipy/signal/windows/windows.py +23 -0
  1013. scipy/sparse/__init__.py +350 -0
  1014. scipy/sparse/_base.py +1613 -0
  1015. scipy/sparse/_bsr.py +880 -0
  1016. scipy/sparse/_compressed.py +1328 -0
  1017. scipy/sparse/_construct.py +1454 -0
  1018. scipy/sparse/_coo.py +1581 -0
  1019. scipy/sparse/_csc.py +367 -0
  1020. scipy/sparse/_csparsetools.cp314t-win_arm64.lib +0 -0
  1021. scipy/sparse/_csparsetools.cp314t-win_arm64.pyd +0 -0
  1022. scipy/sparse/_csr.py +558 -0
  1023. scipy/sparse/_data.py +569 -0
  1024. scipy/sparse/_dia.py +677 -0
  1025. scipy/sparse/_dok.py +669 -0
  1026. scipy/sparse/_extract.py +178 -0
  1027. scipy/sparse/_index.py +444 -0
  1028. scipy/sparse/_lil.py +632 -0
  1029. scipy/sparse/_matrix.py +169 -0
  1030. scipy/sparse/_matrix_io.py +167 -0
  1031. scipy/sparse/_sparsetools.cp314t-win_arm64.lib +0 -0
  1032. scipy/sparse/_sparsetools.cp314t-win_arm64.pyd +0 -0
  1033. scipy/sparse/_spfuncs.py +76 -0
  1034. scipy/sparse/_sputils.py +632 -0
  1035. scipy/sparse/base.py +24 -0
  1036. scipy/sparse/bsr.py +22 -0
  1037. scipy/sparse/compressed.py +20 -0
  1038. scipy/sparse/construct.py +38 -0
  1039. scipy/sparse/coo.py +23 -0
  1040. scipy/sparse/csc.py +22 -0
  1041. scipy/sparse/csgraph/__init__.py +210 -0
  1042. scipy/sparse/csgraph/_flow.cp314t-win_arm64.lib +0 -0
  1043. scipy/sparse/csgraph/_flow.cp314t-win_arm64.pyd +0 -0
  1044. scipy/sparse/csgraph/_laplacian.py +563 -0
  1045. scipy/sparse/csgraph/_matching.cp314t-win_arm64.lib +0 -0
  1046. scipy/sparse/csgraph/_matching.cp314t-win_arm64.pyd +0 -0
  1047. scipy/sparse/csgraph/_min_spanning_tree.cp314t-win_arm64.lib +0 -0
  1048. scipy/sparse/csgraph/_min_spanning_tree.cp314t-win_arm64.pyd +0 -0
  1049. scipy/sparse/csgraph/_reordering.cp314t-win_arm64.lib +0 -0
  1050. scipy/sparse/csgraph/_reordering.cp314t-win_arm64.pyd +0 -0
  1051. scipy/sparse/csgraph/_shortest_path.cp314t-win_arm64.lib +0 -0
  1052. scipy/sparse/csgraph/_shortest_path.cp314t-win_arm64.pyd +0 -0
  1053. scipy/sparse/csgraph/_tools.cp314t-win_arm64.lib +0 -0
  1054. scipy/sparse/csgraph/_tools.cp314t-win_arm64.pyd +0 -0
  1055. scipy/sparse/csgraph/_traversal.cp314t-win_arm64.lib +0 -0
  1056. scipy/sparse/csgraph/_traversal.cp314t-win_arm64.pyd +0 -0
  1057. scipy/sparse/csgraph/_validation.py +66 -0
  1058. scipy/sparse/csgraph/tests/__init__.py +0 -0
  1059. scipy/sparse/csgraph/tests/test_connected_components.py +119 -0
  1060. scipy/sparse/csgraph/tests/test_conversions.py +61 -0
  1061. scipy/sparse/csgraph/tests/test_flow.py +209 -0
  1062. scipy/sparse/csgraph/tests/test_graph_laplacian.py +368 -0
  1063. scipy/sparse/csgraph/tests/test_matching.py +307 -0
  1064. scipy/sparse/csgraph/tests/test_pydata_sparse.py +197 -0
  1065. scipy/sparse/csgraph/tests/test_reordering.py +70 -0
  1066. scipy/sparse/csgraph/tests/test_shortest_path.py +540 -0
  1067. scipy/sparse/csgraph/tests/test_spanning_tree.py +66 -0
  1068. scipy/sparse/csgraph/tests/test_traversal.py +148 -0
  1069. scipy/sparse/csr.py +22 -0
  1070. scipy/sparse/data.py +18 -0
  1071. scipy/sparse/dia.py +22 -0
  1072. scipy/sparse/dok.py +22 -0
  1073. scipy/sparse/extract.py +23 -0
  1074. scipy/sparse/lil.py +22 -0
  1075. scipy/sparse/linalg/__init__.py +148 -0
  1076. scipy/sparse/linalg/_dsolve/__init__.py +71 -0
  1077. scipy/sparse/linalg/_dsolve/_add_newdocs.py +147 -0
  1078. scipy/sparse/linalg/_dsolve/_superlu.cp314t-win_arm64.lib +0 -0
  1079. scipy/sparse/linalg/_dsolve/_superlu.cp314t-win_arm64.pyd +0 -0
  1080. scipy/sparse/linalg/_dsolve/linsolve.py +882 -0
  1081. scipy/sparse/linalg/_dsolve/tests/__init__.py +0 -0
  1082. scipy/sparse/linalg/_dsolve/tests/test_linsolve.py +928 -0
  1083. scipy/sparse/linalg/_eigen/__init__.py +22 -0
  1084. scipy/sparse/linalg/_eigen/_svds.py +540 -0
  1085. scipy/sparse/linalg/_eigen/_svds_doc.py +382 -0
  1086. scipy/sparse/linalg/_eigen/arpack/COPYING +45 -0
  1087. scipy/sparse/linalg/_eigen/arpack/__init__.py +20 -0
  1088. scipy/sparse/linalg/_eigen/arpack/_arpack.cp314t-win_arm64.lib +0 -0
  1089. scipy/sparse/linalg/_eigen/arpack/_arpack.cp314t-win_arm64.pyd +0 -0
  1090. scipy/sparse/linalg/_eigen/arpack/arpack.py +1706 -0
  1091. scipy/sparse/linalg/_eigen/arpack/tests/__init__.py +0 -0
  1092. scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py +717 -0
  1093. scipy/sparse/linalg/_eigen/lobpcg/__init__.py +16 -0
  1094. scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py +1110 -0
  1095. scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py +0 -0
  1096. scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py +725 -0
  1097. scipy/sparse/linalg/_eigen/tests/__init__.py +0 -0
  1098. scipy/sparse/linalg/_eigen/tests/test_svds.py +886 -0
  1099. scipy/sparse/linalg/_expm_multiply.py +816 -0
  1100. scipy/sparse/linalg/_interface.py +920 -0
  1101. scipy/sparse/linalg/_isolve/__init__.py +20 -0
  1102. scipy/sparse/linalg/_isolve/_gcrotmk.py +503 -0
  1103. scipy/sparse/linalg/_isolve/iterative.py +1051 -0
  1104. scipy/sparse/linalg/_isolve/lgmres.py +230 -0
  1105. scipy/sparse/linalg/_isolve/lsmr.py +486 -0
  1106. scipy/sparse/linalg/_isolve/lsqr.py +589 -0
  1107. scipy/sparse/linalg/_isolve/minres.py +372 -0
  1108. scipy/sparse/linalg/_isolve/tests/__init__.py +0 -0
  1109. scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py +183 -0
  1110. scipy/sparse/linalg/_isolve/tests/test_iterative.py +809 -0
  1111. scipy/sparse/linalg/_isolve/tests/test_lgmres.py +225 -0
  1112. scipy/sparse/linalg/_isolve/tests/test_lsmr.py +185 -0
  1113. scipy/sparse/linalg/_isolve/tests/test_lsqr.py +120 -0
  1114. scipy/sparse/linalg/_isolve/tests/test_minres.py +97 -0
  1115. scipy/sparse/linalg/_isolve/tests/test_utils.py +9 -0
  1116. scipy/sparse/linalg/_isolve/tfqmr.py +179 -0
  1117. scipy/sparse/linalg/_isolve/utils.py +121 -0
  1118. scipy/sparse/linalg/_matfuncs.py +940 -0
  1119. scipy/sparse/linalg/_norm.py +195 -0
  1120. scipy/sparse/linalg/_onenormest.py +467 -0
  1121. scipy/sparse/linalg/_propack/_cpropack.cp314t-win_arm64.lib +0 -0
  1122. scipy/sparse/linalg/_propack/_cpropack.cp314t-win_arm64.pyd +0 -0
  1123. scipy/sparse/linalg/_propack/_dpropack.cp314t-win_arm64.lib +0 -0
  1124. scipy/sparse/linalg/_propack/_dpropack.cp314t-win_arm64.pyd +0 -0
  1125. scipy/sparse/linalg/_propack/_spropack.cp314t-win_arm64.lib +0 -0
  1126. scipy/sparse/linalg/_propack/_spropack.cp314t-win_arm64.pyd +0 -0
  1127. scipy/sparse/linalg/_propack/_zpropack.cp314t-win_arm64.lib +0 -0
  1128. scipy/sparse/linalg/_propack/_zpropack.cp314t-win_arm64.pyd +0 -0
  1129. scipy/sparse/linalg/_special_sparse_arrays.py +949 -0
  1130. scipy/sparse/linalg/_svdp.py +309 -0
  1131. scipy/sparse/linalg/dsolve.py +22 -0
  1132. scipy/sparse/linalg/eigen.py +21 -0
  1133. scipy/sparse/linalg/interface.py +20 -0
  1134. scipy/sparse/linalg/isolve.py +22 -0
  1135. scipy/sparse/linalg/matfuncs.py +18 -0
  1136. scipy/sparse/linalg/tests/__init__.py +0 -0
  1137. scipy/sparse/linalg/tests/propack_test_data.npz +0 -0
  1138. scipy/sparse/linalg/tests/test_expm_multiply.py +367 -0
  1139. scipy/sparse/linalg/tests/test_interface.py +561 -0
  1140. scipy/sparse/linalg/tests/test_matfuncs.py +592 -0
  1141. scipy/sparse/linalg/tests/test_norm.py +154 -0
  1142. scipy/sparse/linalg/tests/test_onenormest.py +252 -0
  1143. scipy/sparse/linalg/tests/test_propack.py +165 -0
  1144. scipy/sparse/linalg/tests/test_pydata_sparse.py +272 -0
  1145. scipy/sparse/linalg/tests/test_special_sparse_arrays.py +337 -0
  1146. scipy/sparse/sparsetools.py +17 -0
  1147. scipy/sparse/spfuncs.py +17 -0
  1148. scipy/sparse/sputils.py +17 -0
  1149. scipy/sparse/tests/__init__.py +0 -0
  1150. scipy/sparse/tests/data/csc_py2.npz +0 -0
  1151. scipy/sparse/tests/data/csc_py3.npz +0 -0
  1152. scipy/sparse/tests/test_arithmetic1d.py +341 -0
  1153. scipy/sparse/tests/test_array_api.py +561 -0
  1154. scipy/sparse/tests/test_base.py +5870 -0
  1155. scipy/sparse/tests/test_common1d.py +447 -0
  1156. scipy/sparse/tests/test_construct.py +872 -0
  1157. scipy/sparse/tests/test_coo.py +1119 -0
  1158. scipy/sparse/tests/test_csc.py +98 -0
  1159. scipy/sparse/tests/test_csr.py +214 -0
  1160. scipy/sparse/tests/test_dok.py +209 -0
  1161. scipy/sparse/tests/test_extract.py +51 -0
  1162. scipy/sparse/tests/test_indexing1d.py +603 -0
  1163. scipy/sparse/tests/test_matrix_io.py +109 -0
  1164. scipy/sparse/tests/test_minmax1d.py +128 -0
  1165. scipy/sparse/tests/test_sparsetools.py +344 -0
  1166. scipy/sparse/tests/test_spfuncs.py +97 -0
  1167. scipy/sparse/tests/test_sputils.py +424 -0
  1168. scipy/spatial/__init__.py +129 -0
  1169. scipy/spatial/_ckdtree.cp314t-win_arm64.lib +0 -0
  1170. scipy/spatial/_ckdtree.cp314t-win_arm64.pyd +0 -0
  1171. scipy/spatial/_distance_pybind.cp314t-win_arm64.lib +0 -0
  1172. scipy/spatial/_distance_pybind.cp314t-win_arm64.pyd +0 -0
  1173. scipy/spatial/_distance_wrap.cp314t-win_arm64.lib +0 -0
  1174. scipy/spatial/_distance_wrap.cp314t-win_arm64.pyd +0 -0
  1175. scipy/spatial/_geometric_slerp.py +238 -0
  1176. scipy/spatial/_hausdorff.cp314t-win_arm64.lib +0 -0
  1177. scipy/spatial/_hausdorff.cp314t-win_arm64.pyd +0 -0
  1178. scipy/spatial/_kdtree.py +920 -0
  1179. scipy/spatial/_plotutils.py +274 -0
  1180. scipy/spatial/_procrustes.py +132 -0
  1181. scipy/spatial/_qhull.cp314t-win_arm64.lib +0 -0
  1182. scipy/spatial/_qhull.cp314t-win_arm64.pyd +0 -0
  1183. scipy/spatial/_qhull.pyi +213 -0
  1184. scipy/spatial/_spherical_voronoi.py +341 -0
  1185. scipy/spatial/_voronoi.cp314t-win_arm64.lib +0 -0
  1186. scipy/spatial/_voronoi.cp314t-win_arm64.pyd +0 -0
  1187. scipy/spatial/_voronoi.pyi +4 -0
  1188. scipy/spatial/ckdtree.py +18 -0
  1189. scipy/spatial/distance.py +3147 -0
  1190. scipy/spatial/distance.pyi +210 -0
  1191. scipy/spatial/kdtree.py +25 -0
  1192. scipy/spatial/qhull.py +25 -0
  1193. scipy/spatial/qhull_src/COPYING_QHULL.txt +39 -0
  1194. scipy/spatial/tests/__init__.py +0 -0
  1195. scipy/spatial/tests/data/cdist-X1.txt +10 -0
  1196. scipy/spatial/tests/data/cdist-X2.txt +20 -0
  1197. scipy/spatial/tests/data/degenerate_pointset.npz +0 -0
  1198. scipy/spatial/tests/data/iris.txt +150 -0
  1199. scipy/spatial/tests/data/pdist-boolean-inp.txt +20 -0
  1200. scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt +1 -0
  1201. scipy/spatial/tests/data/pdist-chebyshev-ml.txt +1 -0
  1202. scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt +1 -0
  1203. scipy/spatial/tests/data/pdist-cityblock-ml.txt +1 -0
  1204. scipy/spatial/tests/data/pdist-correlation-ml-iris.txt +1 -0
  1205. scipy/spatial/tests/data/pdist-correlation-ml.txt +1 -0
  1206. scipy/spatial/tests/data/pdist-cosine-ml-iris.txt +1 -0
  1207. scipy/spatial/tests/data/pdist-cosine-ml.txt +1 -0
  1208. scipy/spatial/tests/data/pdist-double-inp.txt +20 -0
  1209. scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt +1 -0
  1210. scipy/spatial/tests/data/pdist-euclidean-ml.txt +1 -0
  1211. scipy/spatial/tests/data/pdist-hamming-ml.txt +1 -0
  1212. scipy/spatial/tests/data/pdist-jaccard-ml.txt +1 -0
  1213. scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt +1 -0
  1214. scipy/spatial/tests/data/pdist-jensenshannon-ml.txt +1 -0
  1215. scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt +1 -0
  1216. scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt +1 -0
  1217. scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt +1 -0
  1218. scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt +1 -0
  1219. scipy/spatial/tests/data/pdist-seuclidean-ml.txt +1 -0
  1220. scipy/spatial/tests/data/pdist-spearman-ml.txt +1 -0
  1221. scipy/spatial/tests/data/random-bool-data.txt +100 -0
  1222. scipy/spatial/tests/data/random-double-data.txt +100 -0
  1223. scipy/spatial/tests/data/random-int-data.txt +100 -0
  1224. scipy/spatial/tests/data/random-uint-data.txt +100 -0
  1225. scipy/spatial/tests/data/selfdual-4d-polytope.txt +27 -0
  1226. scipy/spatial/tests/test__plotutils.py +91 -0
  1227. scipy/spatial/tests/test__procrustes.py +116 -0
  1228. scipy/spatial/tests/test_distance.py +2389 -0
  1229. scipy/spatial/tests/test_hausdorff.py +199 -0
  1230. scipy/spatial/tests/test_kdtree.py +1536 -0
  1231. scipy/spatial/tests/test_qhull.py +1313 -0
  1232. scipy/spatial/tests/test_slerp.py +417 -0
  1233. scipy/spatial/tests/test_spherical_voronoi.py +358 -0
  1234. scipy/spatial/transform/__init__.py +31 -0
  1235. scipy/spatial/transform/_rigid_transform.cp314t-win_arm64.lib +0 -0
  1236. scipy/spatial/transform/_rigid_transform.cp314t-win_arm64.pyd +0 -0
  1237. scipy/spatial/transform/_rotation.cp314t-win_arm64.lib +0 -0
  1238. scipy/spatial/transform/_rotation.cp314t-win_arm64.pyd +0 -0
  1239. scipy/spatial/transform/_rotation_groups.py +140 -0
  1240. scipy/spatial/transform/_rotation_spline.py +460 -0
  1241. scipy/spatial/transform/rotation.py +21 -0
  1242. scipy/spatial/transform/tests/__init__.py +0 -0
  1243. scipy/spatial/transform/tests/test_rigid_transform.py +1221 -0
  1244. scipy/spatial/transform/tests/test_rotation.py +2569 -0
  1245. scipy/spatial/transform/tests/test_rotation_groups.py +169 -0
  1246. scipy/spatial/transform/tests/test_rotation_spline.py +183 -0
  1247. scipy/special/__init__.pxd +1 -0
  1248. scipy/special/__init__.py +841 -0
  1249. scipy/special/_add_newdocs.py +9961 -0
  1250. scipy/special/_basic.py +3576 -0
  1251. scipy/special/_comb.cp314t-win_arm64.lib +0 -0
  1252. scipy/special/_comb.cp314t-win_arm64.pyd +0 -0
  1253. scipy/special/_ellip_harm.py +214 -0
  1254. scipy/special/_ellip_harm_2.cp314t-win_arm64.lib +0 -0
  1255. scipy/special/_ellip_harm_2.cp314t-win_arm64.pyd +0 -0
  1256. scipy/special/_gufuncs.cp314t-win_arm64.lib +0 -0
  1257. scipy/special/_gufuncs.cp314t-win_arm64.pyd +0 -0
  1258. scipy/special/_input_validation.py +17 -0
  1259. scipy/special/_lambertw.py +149 -0
  1260. scipy/special/_logsumexp.py +426 -0
  1261. scipy/special/_mptestutils.py +453 -0
  1262. scipy/special/_multiufuncs.py +610 -0
  1263. scipy/special/_orthogonal.py +2592 -0
  1264. scipy/special/_orthogonal.pyi +330 -0
  1265. scipy/special/_precompute/__init__.py +0 -0
  1266. scipy/special/_precompute/cosine_cdf.py +17 -0
  1267. scipy/special/_precompute/expn_asy.py +54 -0
  1268. scipy/special/_precompute/gammainc_asy.py +116 -0
  1269. scipy/special/_precompute/gammainc_data.py +124 -0
  1270. scipy/special/_precompute/hyp2f1_data.py +484 -0
  1271. scipy/special/_precompute/lambertw.py +68 -0
  1272. scipy/special/_precompute/loggamma.py +43 -0
  1273. scipy/special/_precompute/struve_convergence.py +131 -0
  1274. scipy/special/_precompute/utils.py +38 -0
  1275. scipy/special/_precompute/wright_bessel.py +342 -0
  1276. scipy/special/_precompute/wright_bessel_data.py +152 -0
  1277. scipy/special/_precompute/wrightomega.py +41 -0
  1278. scipy/special/_precompute/zetac.py +27 -0
  1279. scipy/special/_sf_error.py +15 -0
  1280. scipy/special/_specfun.cp314t-win_arm64.lib +0 -0
  1281. scipy/special/_specfun.cp314t-win_arm64.pyd +0 -0
  1282. scipy/special/_special_ufuncs.cp314t-win_arm64.lib +0 -0
  1283. scipy/special/_special_ufuncs.cp314t-win_arm64.pyd +0 -0
  1284. scipy/special/_spfun_stats.py +106 -0
  1285. scipy/special/_spherical_bessel.py +397 -0
  1286. scipy/special/_support_alternative_backends.py +295 -0
  1287. scipy/special/_test_internal.cp314t-win_arm64.lib +0 -0
  1288. scipy/special/_test_internal.cp314t-win_arm64.pyd +0 -0
  1289. scipy/special/_test_internal.pyi +9 -0
  1290. scipy/special/_testutils.py +321 -0
  1291. scipy/special/_ufuncs.cp314t-win_arm64.lib +0 -0
  1292. scipy/special/_ufuncs.cp314t-win_arm64.pyd +0 -0
  1293. scipy/special/_ufuncs.pyi +522 -0
  1294. scipy/special/_ufuncs.pyx +13173 -0
  1295. scipy/special/_ufuncs_cxx.cp314t-win_arm64.lib +0 -0
  1296. scipy/special/_ufuncs_cxx.cp314t-win_arm64.pyd +0 -0
  1297. scipy/special/_ufuncs_cxx.pxd +142 -0
  1298. scipy/special/_ufuncs_cxx.pyx +427 -0
  1299. scipy/special/_ufuncs_cxx_defs.h +147 -0
  1300. scipy/special/_ufuncs_defs.h +57 -0
  1301. scipy/special/add_newdocs.py +15 -0
  1302. scipy/special/basic.py +87 -0
  1303. scipy/special/cython_special.cp314t-win_arm64.lib +0 -0
  1304. scipy/special/cython_special.cp314t-win_arm64.pyd +0 -0
  1305. scipy/special/cython_special.pxd +259 -0
  1306. scipy/special/cython_special.pyi +3 -0
  1307. scipy/special/orthogonal.py +45 -0
  1308. scipy/special/sf_error.py +20 -0
  1309. scipy/special/specfun.py +24 -0
  1310. scipy/special/spfun_stats.py +17 -0
  1311. scipy/special/tests/__init__.py +0 -0
  1312. scipy/special/tests/_cython_examples/extending.pyx +12 -0
  1313. scipy/special/tests/_cython_examples/meson.build +34 -0
  1314. scipy/special/tests/data/__init__.py +0 -0
  1315. scipy/special/tests/data/boost.npz +0 -0
  1316. scipy/special/tests/data/gsl.npz +0 -0
  1317. scipy/special/tests/data/local.npz +0 -0
  1318. scipy/special/tests/test_basic.py +4815 -0
  1319. scipy/special/tests/test_bdtr.py +112 -0
  1320. scipy/special/tests/test_boost_ufuncs.py +64 -0
  1321. scipy/special/tests/test_boxcox.py +125 -0
  1322. scipy/special/tests/test_cdflib.py +712 -0
  1323. scipy/special/tests/test_cdft_asymptotic.py +49 -0
  1324. scipy/special/tests/test_cephes_intp_cast.py +29 -0
  1325. scipy/special/tests/test_cosine_distr.py +83 -0
  1326. scipy/special/tests/test_cython_special.py +363 -0
  1327. scipy/special/tests/test_data.py +719 -0
  1328. scipy/special/tests/test_dd.py +42 -0
  1329. scipy/special/tests/test_digamma.py +45 -0
  1330. scipy/special/tests/test_ellip_harm.py +278 -0
  1331. scipy/special/tests/test_erfinv.py +89 -0
  1332. scipy/special/tests/test_exponential_integrals.py +118 -0
  1333. scipy/special/tests/test_extending.py +28 -0
  1334. scipy/special/tests/test_faddeeva.py +85 -0
  1335. scipy/special/tests/test_gamma.py +12 -0
  1336. scipy/special/tests/test_gammainc.py +152 -0
  1337. scipy/special/tests/test_hyp2f1.py +2566 -0
  1338. scipy/special/tests/test_hypergeometric.py +234 -0
  1339. scipy/special/tests/test_iv_ratio.py +249 -0
  1340. scipy/special/tests/test_kolmogorov.py +491 -0
  1341. scipy/special/tests/test_lambertw.py +109 -0
  1342. scipy/special/tests/test_legendre.py +1518 -0
  1343. scipy/special/tests/test_log1mexp.py +85 -0
  1344. scipy/special/tests/test_loggamma.py +70 -0
  1345. scipy/special/tests/test_logit.py +162 -0
  1346. scipy/special/tests/test_logsumexp.py +469 -0
  1347. scipy/special/tests/test_mpmath.py +2293 -0
  1348. scipy/special/tests/test_nan_inputs.py +65 -0
  1349. scipy/special/tests/test_ndtr.py +77 -0
  1350. scipy/special/tests/test_ndtri_exp.py +94 -0
  1351. scipy/special/tests/test_orthogonal.py +821 -0
  1352. scipy/special/tests/test_orthogonal_eval.py +275 -0
  1353. scipy/special/tests/test_owens_t.py +53 -0
  1354. scipy/special/tests/test_pcf.py +24 -0
  1355. scipy/special/tests/test_pdtr.py +48 -0
  1356. scipy/special/tests/test_powm1.py +65 -0
  1357. scipy/special/tests/test_precompute_expn_asy.py +24 -0
  1358. scipy/special/tests/test_precompute_gammainc.py +108 -0
  1359. scipy/special/tests/test_precompute_utils.py +36 -0
  1360. scipy/special/tests/test_round.py +18 -0
  1361. scipy/special/tests/test_sf_error.py +146 -0
  1362. scipy/special/tests/test_sici.py +36 -0
  1363. scipy/special/tests/test_specfun.py +48 -0
  1364. scipy/special/tests/test_spence.py +32 -0
  1365. scipy/special/tests/test_spfun_stats.py +61 -0
  1366. scipy/special/tests/test_sph_harm.py +85 -0
  1367. scipy/special/tests/test_spherical_bessel.py +400 -0
  1368. scipy/special/tests/test_support_alternative_backends.py +248 -0
  1369. scipy/special/tests/test_trig.py +72 -0
  1370. scipy/special/tests/test_ufunc_signatures.py +46 -0
  1371. scipy/special/tests/test_wright_bessel.py +205 -0
  1372. scipy/special/tests/test_wrightomega.py +117 -0
  1373. scipy/special/tests/test_zeta.py +301 -0
  1374. scipy/stats/__init__.py +670 -0
  1375. scipy/stats/_ansari_swilk_statistics.cp314t-win_arm64.lib +0 -0
  1376. scipy/stats/_ansari_swilk_statistics.cp314t-win_arm64.pyd +0 -0
  1377. scipy/stats/_axis_nan_policy.py +692 -0
  1378. scipy/stats/_biasedurn.cp314t-win_arm64.lib +0 -0
  1379. scipy/stats/_biasedurn.cp314t-win_arm64.pyd +0 -0
  1380. scipy/stats/_biasedurn.pxd +27 -0
  1381. scipy/stats/_binned_statistic.py +795 -0
  1382. scipy/stats/_binomtest.py +375 -0
  1383. scipy/stats/_bws_test.py +177 -0
  1384. scipy/stats/_censored_data.py +459 -0
  1385. scipy/stats/_common.py +5 -0
  1386. scipy/stats/_constants.py +42 -0
  1387. scipy/stats/_continued_fraction.py +387 -0
  1388. scipy/stats/_continuous_distns.py +12486 -0
  1389. scipy/stats/_correlation.py +210 -0
  1390. scipy/stats/_covariance.py +636 -0
  1391. scipy/stats/_crosstab.py +204 -0
  1392. scipy/stats/_discrete_distns.py +2098 -0
  1393. scipy/stats/_distn_infrastructure.py +4201 -0
  1394. scipy/stats/_distr_params.py +299 -0
  1395. scipy/stats/_distribution_infrastructure.py +5750 -0
  1396. scipy/stats/_entropy.py +428 -0
  1397. scipy/stats/_finite_differences.py +145 -0
  1398. scipy/stats/_fit.py +1351 -0
  1399. scipy/stats/_hypotests.py +2060 -0
  1400. scipy/stats/_kde.py +732 -0
  1401. scipy/stats/_ksstats.py +600 -0
  1402. scipy/stats/_levy_stable/__init__.py +1231 -0
  1403. scipy/stats/_levy_stable/levyst.cp314t-win_arm64.lib +0 -0
  1404. scipy/stats/_levy_stable/levyst.cp314t-win_arm64.pyd +0 -0
  1405. scipy/stats/_mannwhitneyu.py +492 -0
  1406. scipy/stats/_mgc.py +550 -0
  1407. scipy/stats/_morestats.py +4626 -0
  1408. scipy/stats/_mstats_basic.py +3658 -0
  1409. scipy/stats/_mstats_extras.py +521 -0
  1410. scipy/stats/_multicomp.py +449 -0
  1411. scipy/stats/_multivariate.py +7281 -0
  1412. scipy/stats/_new_distributions.py +452 -0
  1413. scipy/stats/_odds_ratio.py +466 -0
  1414. scipy/stats/_page_trend_test.py +486 -0
  1415. scipy/stats/_probability_distribution.py +1964 -0
  1416. scipy/stats/_qmc.py +2956 -0
  1417. scipy/stats/_qmc_cy.cp314t-win_arm64.lib +0 -0
  1418. scipy/stats/_qmc_cy.cp314t-win_arm64.pyd +0 -0
  1419. scipy/stats/_qmc_cy.pyi +54 -0
  1420. scipy/stats/_qmvnt.py +454 -0
  1421. scipy/stats/_qmvnt_cy.cp314t-win_arm64.lib +0 -0
  1422. scipy/stats/_qmvnt_cy.cp314t-win_arm64.pyd +0 -0
  1423. scipy/stats/_quantile.py +335 -0
  1424. scipy/stats/_rcont/__init__.py +4 -0
  1425. scipy/stats/_rcont/rcont.cp314t-win_arm64.lib +0 -0
  1426. scipy/stats/_rcont/rcont.cp314t-win_arm64.pyd +0 -0
  1427. scipy/stats/_relative_risk.py +263 -0
  1428. scipy/stats/_resampling.py +2352 -0
  1429. scipy/stats/_result_classes.py +40 -0
  1430. scipy/stats/_sampling.py +1314 -0
  1431. scipy/stats/_sensitivity_analysis.py +713 -0
  1432. scipy/stats/_sobol.cp314t-win_arm64.lib +0 -0
  1433. scipy/stats/_sobol.cp314t-win_arm64.pyd +0 -0
  1434. scipy/stats/_sobol.pyi +54 -0
  1435. scipy/stats/_sobol_direction_numbers.npz +0 -0
  1436. scipy/stats/_stats.cp314t-win_arm64.lib +0 -0
  1437. scipy/stats/_stats.cp314t-win_arm64.pyd +0 -0
  1438. scipy/stats/_stats.pxd +10 -0
  1439. scipy/stats/_stats_mstats_common.py +322 -0
  1440. scipy/stats/_stats_py.py +11089 -0
  1441. scipy/stats/_stats_pythran.cp314t-win_arm64.lib +0 -0
  1442. scipy/stats/_stats_pythran.cp314t-win_arm64.pyd +0 -0
  1443. scipy/stats/_survival.py +683 -0
  1444. scipy/stats/_tukeylambda_stats.py +199 -0
  1445. scipy/stats/_unuran/__init__.py +0 -0
  1446. scipy/stats/_unuran/unuran_wrapper.cp314t-win_arm64.lib +0 -0
  1447. scipy/stats/_unuran/unuran_wrapper.cp314t-win_arm64.pyd +0 -0
  1448. scipy/stats/_unuran/unuran_wrapper.pyi +179 -0
  1449. scipy/stats/_variation.py +126 -0
  1450. scipy/stats/_warnings_errors.py +38 -0
  1451. scipy/stats/_wilcoxon.py +265 -0
  1452. scipy/stats/biasedurn.py +16 -0
  1453. scipy/stats/contingency.py +521 -0
  1454. scipy/stats/distributions.py +24 -0
  1455. scipy/stats/kde.py +18 -0
  1456. scipy/stats/morestats.py +27 -0
  1457. scipy/stats/mstats.py +140 -0
  1458. scipy/stats/mstats_basic.py +42 -0
  1459. scipy/stats/mstats_extras.py +25 -0
  1460. scipy/stats/mvn.py +17 -0
  1461. scipy/stats/qmc.py +236 -0
  1462. scipy/stats/sampling.py +73 -0
  1463. scipy/stats/stats.py +41 -0
  1464. scipy/stats/tests/__init__.py +0 -0
  1465. scipy/stats/tests/common_tests.py +356 -0
  1466. scipy/stats/tests/data/_mvt.py +171 -0
  1467. scipy/stats/tests/data/fisher_exact_results_from_r.py +607 -0
  1468. scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy +0 -0
  1469. scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy +0 -0
  1470. scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy +0 -0
  1471. scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy +0 -0
  1472. scipy/stats/tests/data/nist_anova/AtmWtAg.dat +108 -0
  1473. scipy/stats/tests/data/nist_anova/SiRstv.dat +85 -0
  1474. scipy/stats/tests/data/nist_anova/SmLs01.dat +249 -0
  1475. scipy/stats/tests/data/nist_anova/SmLs02.dat +1869 -0
  1476. scipy/stats/tests/data/nist_anova/SmLs03.dat +18069 -0
  1477. scipy/stats/tests/data/nist_anova/SmLs04.dat +249 -0
  1478. scipy/stats/tests/data/nist_anova/SmLs05.dat +1869 -0
  1479. scipy/stats/tests/data/nist_anova/SmLs06.dat +18069 -0
  1480. scipy/stats/tests/data/nist_anova/SmLs07.dat +249 -0
  1481. scipy/stats/tests/data/nist_anova/SmLs08.dat +1869 -0
  1482. scipy/stats/tests/data/nist_anova/SmLs09.dat +18069 -0
  1483. scipy/stats/tests/data/nist_linregress/Norris.dat +97 -0
  1484. scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy +0 -0
  1485. scipy/stats/tests/data/studentized_range_mpmath_ref.json +1499 -0
  1486. scipy/stats/tests/test_axis_nan_policy.py +1388 -0
  1487. scipy/stats/tests/test_binned_statistic.py +568 -0
  1488. scipy/stats/tests/test_censored_data.py +152 -0
  1489. scipy/stats/tests/test_contingency.py +294 -0
  1490. scipy/stats/tests/test_continued_fraction.py +173 -0
  1491. scipy/stats/tests/test_continuous.py +2198 -0
  1492. scipy/stats/tests/test_continuous_basic.py +1053 -0
  1493. scipy/stats/tests/test_continuous_fit_censored.py +683 -0
  1494. scipy/stats/tests/test_correlation.py +80 -0
  1495. scipy/stats/tests/test_crosstab.py +115 -0
  1496. scipy/stats/tests/test_discrete_basic.py +580 -0
  1497. scipy/stats/tests/test_discrete_distns.py +700 -0
  1498. scipy/stats/tests/test_distributions.py +10413 -0
  1499. scipy/stats/tests/test_entropy.py +322 -0
  1500. scipy/stats/tests/test_fast_gen_inversion.py +435 -0
  1501. scipy/stats/tests/test_fit.py +1090 -0
  1502. scipy/stats/tests/test_hypotests.py +1991 -0
  1503. scipy/stats/tests/test_kdeoth.py +676 -0
  1504. scipy/stats/tests/test_marray.py +289 -0
  1505. scipy/stats/tests/test_mgc.py +217 -0
  1506. scipy/stats/tests/test_morestats.py +3259 -0
  1507. scipy/stats/tests/test_mstats_basic.py +2071 -0
  1508. scipy/stats/tests/test_mstats_extras.py +172 -0
  1509. scipy/stats/tests/test_multicomp.py +405 -0
  1510. scipy/stats/tests/test_multivariate.py +4381 -0
  1511. scipy/stats/tests/test_odds_ratio.py +148 -0
  1512. scipy/stats/tests/test_qmc.py +1492 -0
  1513. scipy/stats/tests/test_quantile.py +199 -0
  1514. scipy/stats/tests/test_rank.py +345 -0
  1515. scipy/stats/tests/test_relative_risk.py +95 -0
  1516. scipy/stats/tests/test_resampling.py +2000 -0
  1517. scipy/stats/tests/test_sampling.py +1450 -0
  1518. scipy/stats/tests/test_sensitivity_analysis.py +310 -0
  1519. scipy/stats/tests/test_stats.py +9707 -0
  1520. scipy/stats/tests/test_survival.py +466 -0
  1521. scipy/stats/tests/test_tukeylambda_stats.py +85 -0
  1522. scipy/stats/tests/test_variation.py +216 -0
  1523. scipy/version.py +12 -0
  1524. scipy-1.16.2.dist-info/DELVEWHEEL +2 -0
  1525. scipy-1.16.2.dist-info/LICENSE.txt +912 -0
  1526. scipy-1.16.2.dist-info/METADATA +1061 -0
  1527. scipy-1.16.2.dist-info/RECORD +1530 -0
  1528. scipy-1.16.2.dist-info/WHEEL +4 -0
  1529. scipy.libs/msvcp140-5f1c5dd31916990d94181e07bc3afb32.dll +0 -0
  1530. scipy.libs/scipy_openblas-f3ac85b1f412f7e86514c923dc4058d1.dll +0 -0
@@ -0,0 +1,3658 @@
1
+ """
2
+ An extension of scipy.stats._stats_py to support masked arrays
3
+
4
+ """
5
+ # Original author (2007): Pierre GF Gerard-Marchant
6
+
7
+
8
+ __all__ = ['argstoarray',
9
+ 'count_tied_groups',
10
+ 'describe',
11
+ 'f_oneway', 'find_repeats','friedmanchisquare',
12
+ 'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis',
13
+ 'ks_twosamp', 'ks_2samp', 'kurtosis', 'kurtosistest',
14
+ 'ks_1samp', 'kstest',
15
+ 'linregress',
16
+ 'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign',
17
+ 'normaltest',
18
+ 'obrientransform',
19
+ 'pearsonr','plotting_positions','pointbiserialr',
20
+ 'rankdata',
21
+ 'scoreatpercentile','sem',
22
+ 'sen_seasonal_slopes','skew','skewtest','spearmanr',
23
+ 'siegelslopes', 'theilslopes',
24
+ 'tmax','tmean','tmin','trim','trimboth',
25
+ 'trimtail','trima','trimr','trimmed_mean','trimmed_std',
26
+ 'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp',
27
+ 'ttest_ind','ttest_rel','tvar',
28
+ 'variation',
29
+ 'winsorize',
30
+ 'brunnermunzel',
31
+ ]
32
+
33
+ import numpy as np
34
+ from numpy import ndarray
35
+ import numpy.ma as ma
36
+ from numpy.ma import masked, nomask
37
+ import math
38
+
39
+ import itertools
40
+ import warnings
41
+ from collections import namedtuple
42
+
43
+ from . import distributions
44
+ from scipy._lib._util import _rename_parameter, _contains_nan
45
+ from scipy._lib._bunch import _make_tuple_bunch
46
+ import scipy.special as special
47
+ import scipy.stats._stats_py
48
+ import scipy.stats._stats_py as _stats_py
49
+
50
+ from ._stats_mstats_common import (
51
+ _find_repeats,
52
+ theilslopes as stats_theilslopes,
53
+ siegelslopes as stats_siegelslopes
54
+ )
55
+
56
+
57
+ def _chk_asarray(a, axis):
58
+ # Always returns a masked array, raveled for axis=None
59
+ a = ma.asanyarray(a)
60
+ if axis is None:
61
+ a = ma.ravel(a)
62
+ outaxis = 0
63
+ else:
64
+ outaxis = axis
65
+ return a, outaxis
66
+
67
+
68
+ def _chk2_asarray(a, b, axis):
69
+ a = ma.asanyarray(a)
70
+ b = ma.asanyarray(b)
71
+ if axis is None:
72
+ a = ma.ravel(a)
73
+ b = ma.ravel(b)
74
+ outaxis = 0
75
+ else:
76
+ outaxis = axis
77
+ return a, b, outaxis
78
+
79
+
80
+ def _chk_size(a, b):
81
+ a = ma.asanyarray(a)
82
+ b = ma.asanyarray(b)
83
+ (na, nb) = (a.size, b.size)
84
+ if na != nb:
85
+ raise ValueError("The size of the input array should match!"
86
+ f" ({na} <> {nb})")
87
+ return (a, b, na)
88
+
89
+
90
+ def _ttest_finish(df, t, alternative):
91
+ """Common code between all 3 t-test functions."""
92
+ # We use ``stdtr`` directly here to preserve masked arrays
93
+
94
+ if alternative == 'less':
95
+ pval = special._ufuncs.stdtr(df, t)
96
+ elif alternative == 'greater':
97
+ pval = special._ufuncs.stdtr(df, -t)
98
+ elif alternative == 'two-sided':
99
+ pval = special._ufuncs.stdtr(df, -np.abs(t))*2
100
+ else:
101
+ raise ValueError("alternative must be "
102
+ "'less', 'greater' or 'two-sided'")
103
+
104
+ if t.ndim == 0:
105
+ t = t[()]
106
+ if pval.ndim == 0:
107
+ pval = pval[()]
108
+
109
+ return t, pval
110
+
111
+
112
+ def argstoarray(*args):
113
+ """
114
+ Constructs a 2D array from a group of sequences.
115
+
116
+ Sequences are filled with missing values to match the length of the longest
117
+ sequence.
118
+
119
+ Parameters
120
+ ----------
121
+ *args : sequences
122
+ Group of sequences.
123
+
124
+ Returns
125
+ -------
126
+ argstoarray : MaskedArray
127
+ A ( `m` x `n` ) masked array, where `m` is the number of arguments and
128
+ `n` the length of the longest argument.
129
+
130
+ Notes
131
+ -----
132
+ `numpy.ma.vstack` has identical behavior, but is called with a sequence
133
+ of sequences.
134
+
135
+ Examples
136
+ --------
137
+ A 2D masked array constructed from a group of sequences is returned.
138
+
139
+ >>> from scipy.stats.mstats import argstoarray
140
+ >>> argstoarray([1, 2, 3], [4, 5, 6])
141
+ masked_array(
142
+ data=[[1.0, 2.0, 3.0],
143
+ [4.0, 5.0, 6.0]],
144
+ mask=[[False, False, False],
145
+ [False, False, False]],
146
+ fill_value=1e+20)
147
+
148
+ The returned masked array filled with missing values when the lengths of
149
+ sequences are different.
150
+
151
+ >>> argstoarray([1, 3], [4, 5, 6])
152
+ masked_array(
153
+ data=[[1.0, 3.0, --],
154
+ [4.0, 5.0, 6.0]],
155
+ mask=[[False, False, True],
156
+ [False, False, False]],
157
+ fill_value=1e+20)
158
+
159
+ """
160
+ if len(args) == 1 and not isinstance(args[0], ndarray):
161
+ output = ma.asarray(args[0])
162
+ if output.ndim != 2:
163
+ raise ValueError("The input should be 2D")
164
+ else:
165
+ n = len(args)
166
+ m = max([len(k) for k in args])
167
+ output = ma.array(np.empty((n,m), dtype=float), mask=True)
168
+ for (k,v) in enumerate(args):
169
+ output[k,:len(v)] = v
170
+
171
+ output[np.logical_not(np.isfinite(output._data))] = masked
172
+ return output
173
+
174
+
175
+ def find_repeats(arr):
176
+ """Find repeats in arr and return a tuple (repeats, repeat_count).
177
+
178
+ The input is cast to float64. Masked values are discarded.
179
+
180
+ Parameters
181
+ ----------
182
+ arr : sequence
183
+ Input array. The array is flattened if it is not 1D.
184
+
185
+ Returns
186
+ -------
187
+ repeats : ndarray
188
+ Array of repeated values.
189
+ counts : ndarray
190
+ Array of counts.
191
+
192
+ Examples
193
+ --------
194
+ >>> from scipy.stats import mstats
195
+ >>> mstats.find_repeats([2, 1, 2, 3, 2, 2, 5])
196
+ (array([2.]), array([4]))
197
+
198
+ In the above example, 2 repeats 4 times.
199
+
200
+ >>> mstats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
201
+ (array([4., 5.]), array([2, 2]))
202
+
203
+ In the above example, both 4 and 5 repeat 2 times.
204
+
205
+ """
206
+ # Make sure we get a copy. ma.compressed promises a "new array", but can
207
+ # actually return a reference.
208
+ compr = np.asarray(ma.compressed(arr), dtype=np.float64)
209
+ try:
210
+ need_copy = np.may_share_memory(compr, arr)
211
+ except AttributeError:
212
+ # numpy < 1.8.2 bug: np.may_share_memory([], []) raises,
213
+ # while in numpy 1.8.2 and above it just (correctly) returns False.
214
+ need_copy = False
215
+ if need_copy:
216
+ compr = compr.copy()
217
+ return _find_repeats(compr)
218
+
219
+
220
+ def count_tied_groups(x, use_missing=False):
221
+ """
222
+ Counts the number of tied values.
223
+
224
+ Parameters
225
+ ----------
226
+ x : sequence
227
+ Sequence of data on which to counts the ties
228
+ use_missing : bool, optional
229
+ Whether to consider missing values as tied.
230
+
231
+ Returns
232
+ -------
233
+ count_tied_groups : dict
234
+ Returns a dictionary (nb of ties: nb of groups).
235
+
236
+ Examples
237
+ --------
238
+ >>> from scipy.stats import mstats
239
+ >>> import numpy as np
240
+ >>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6]
241
+ >>> mstats.count_tied_groups(z)
242
+ {2: 1, 3: 2}
243
+
244
+ In the above example, the ties were 0 (3x), 2 (3x) and 3 (2x).
245
+
246
+ >>> z = np.ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6])
247
+ >>> mstats.count_tied_groups(z)
248
+ {2: 2, 3: 1}
249
+ >>> z[[1,-1]] = np.ma.masked
250
+ >>> mstats.count_tied_groups(z, use_missing=True)
251
+ {2: 2, 3: 1}
252
+
253
+ """
254
+ nmasked = ma.getmask(x).sum()
255
+ # We need the copy as find_repeats will overwrite the initial data
256
+ data = ma.compressed(x).copy()
257
+ (ties, counts) = find_repeats(data)
258
+ nties = {}
259
+ if len(ties):
260
+ nties = dict(zip(np.unique(counts), itertools.repeat(1)))
261
+ nties.update(dict(zip(*find_repeats(counts))))
262
+
263
+ if nmasked and use_missing:
264
+ try:
265
+ nties[nmasked] += 1
266
+ except KeyError:
267
+ nties[nmasked] = 1
268
+
269
+ return nties
270
+
271
+
272
+ def rankdata(data, axis=None, use_missing=False):
273
+ """Returns the rank (also known as order statistics) of each data point
274
+ along the given axis.
275
+
276
+ If some values are tied, their rank is averaged.
277
+ If some values are masked, their rank is set to 0 if use_missing is False,
278
+ or set to the average rank of the unmasked values if use_missing is True.
279
+
280
+ Parameters
281
+ ----------
282
+ data : sequence
283
+ Input data. The data is transformed to a masked array
284
+ axis : {None,int}, optional
285
+ Axis along which to perform the ranking.
286
+ If None, the array is first flattened. An exception is raised if
287
+ the axis is specified for arrays with a dimension larger than 2
288
+ use_missing : bool, optional
289
+ Whether the masked values have a rank of 0 (False) or equal to the
290
+ average rank of the unmasked values (True).
291
+
292
+ """
293
+ def _rank1d(data, use_missing=False):
294
+ n = data.count()
295
+ rk = np.empty(data.size, dtype=float)
296
+ idx = data.argsort()
297
+ rk[idx[:n]] = np.arange(1,n+1)
298
+
299
+ if use_missing:
300
+ rk[idx[n:]] = (n+1)/2.
301
+ else:
302
+ rk[idx[n:]] = 0
303
+
304
+ repeats = find_repeats(data.copy())
305
+ for r in repeats[0]:
306
+ condition = (data == r).filled(False)
307
+ rk[condition] = rk[condition].mean()
308
+ return rk
309
+
310
+ data = ma.array(data, copy=False)
311
+ if axis is None:
312
+ if data.ndim > 1:
313
+ return _rank1d(data.ravel(), use_missing).reshape(data.shape)
314
+ else:
315
+ return _rank1d(data, use_missing)
316
+ else:
317
+ return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray)
318
+
319
+
320
+ ModeResult = namedtuple('ModeResult', ('mode', 'count'))
321
+
322
+
323
+ def mode(a, axis=0):
324
+ """
325
+ Returns an array of the modal (most common) value in the passed array.
326
+
327
+ Parameters
328
+ ----------
329
+ a : array_like
330
+ n-dimensional array of which to find mode(s).
331
+ axis : int or None, optional
332
+ Axis along which to operate. Default is 0. If None, compute over
333
+ the whole array `a`.
334
+
335
+ Returns
336
+ -------
337
+ mode : ndarray
338
+ Array of modal values.
339
+ count : ndarray
340
+ Array of counts for each mode.
341
+
342
+ Notes
343
+ -----
344
+ For more details, see `scipy.stats.mode`.
345
+
346
+ Examples
347
+ --------
348
+ >>> import numpy as np
349
+ >>> from scipy import stats
350
+ >>> from scipy.stats import mstats
351
+ >>> m_arr = np.ma.array([1, 1, 0, 0, 0, 0], mask=[0, 0, 1, 1, 1, 0])
352
+ >>> mstats.mode(m_arr) # note that most zeros are masked
353
+ ModeResult(mode=array([1.]), count=array([2.]))
354
+
355
+ """
356
+ return _mode(a, axis=axis, keepdims=True)
357
+
358
+
359
+ def _mode(a, axis=0, keepdims=True):
360
+ # Don't want to expose `keepdims` from the public `mstats.mode`
361
+ a, axis = _chk_asarray(a, axis)
362
+
363
+ def _mode1D(a):
364
+ (rep,cnt) = find_repeats(a)
365
+ if not cnt.ndim:
366
+ return (0, 0)
367
+ elif cnt.size:
368
+ return (rep[cnt.argmax()], cnt.max())
369
+ else:
370
+ return (a.min(), 1)
371
+
372
+ if axis is None:
373
+ output = _mode1D(ma.ravel(a))
374
+ output = (ma.array(output[0]), ma.array(output[1]))
375
+ else:
376
+ output = ma.apply_along_axis(_mode1D, axis, a)
377
+ if keepdims is None or keepdims:
378
+ newshape = list(a.shape)
379
+ newshape[axis] = 1
380
+ slices = [slice(None)] * output.ndim
381
+ slices[axis] = 0
382
+ modes = output[tuple(slices)].reshape(newshape)
383
+ slices[axis] = 1
384
+ counts = output[tuple(slices)].reshape(newshape)
385
+ output = (modes, counts)
386
+ else:
387
+ output = np.moveaxis(output, axis, 0)
388
+
389
+ return ModeResult(*output)
390
+
391
+
392
+ def _betai(a, b, x):
393
+ x = np.asanyarray(x)
394
+ x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
395
+ return special.betainc(a, b, x)
396
+
397
+
398
+ def msign(x):
399
+ """Returns the sign of x, or 0 if x is masked."""
400
+ return ma.filled(np.sign(x), 0)
401
+
402
+
403
+ def pearsonr(x, y):
404
+ r"""
405
+ Pearson correlation coefficient and p-value for testing non-correlation.
406
+
407
+ The Pearson correlation coefficient [1]_ measures the linear relationship
408
+ between two datasets. The calculation of the p-value relies on the
409
+ assumption that each dataset is normally distributed. (See Kowalski [3]_
410
+ for a discussion of the effects of non-normality of the input on the
411
+ distribution of the correlation coefficient.) Like other correlation
412
+ coefficients, this one varies between -1 and +1 with 0 implying no
413
+ correlation. Correlations of -1 or +1 imply an exact linear relationship.
414
+
415
+ Parameters
416
+ ----------
417
+ x : (N,) array_like
418
+ Input array.
419
+ y : (N,) array_like
420
+ Input array.
421
+
422
+ Returns
423
+ -------
424
+ r : float
425
+ Pearson's correlation coefficient.
426
+ p-value : float
427
+ Two-tailed p-value.
428
+
429
+ Warns
430
+ -----
431
+ `~scipy.stats.ConstantInputWarning`
432
+ Raised if an input is a constant array. The correlation coefficient
433
+ is not defined in this case, so ``np.nan`` is returned.
434
+
435
+ `~scipy.stats.NearConstantInputWarning`
436
+ Raised if an input is "nearly" constant. The array ``x`` is considered
437
+ nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
438
+ Numerical errors in the calculation ``x - mean(x)`` in this case might
439
+ result in an inaccurate calculation of r.
440
+
441
+ See Also
442
+ --------
443
+ spearmanr : Spearman rank-order correlation coefficient.
444
+ kendalltau : Kendall's tau, a correlation measure for ordinal data.
445
+
446
+ Notes
447
+ -----
448
+ The correlation coefficient is calculated as follows:
449
+
450
+ .. math::
451
+
452
+ r = \frac{\sum (x - m_x) (y - m_y)}
453
+ {\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
454
+
455
+ where :math:`m_x` is the mean of the vector x and :math:`m_y` is
456
+ the mean of the vector y.
457
+
458
+ Under the assumption that x and y are drawn from
459
+ independent normal distributions (so the population correlation coefficient
460
+ is 0), the probability density function of the sample correlation
461
+ coefficient r is ([1]_, [2]_):
462
+
463
+ .. math::
464
+
465
+ f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
466
+
467
+ where n is the number of samples, and B is the beta function. This
468
+ is sometimes referred to as the exact distribution of r. This is
469
+ the distribution that is used in `pearsonr` to compute the p-value.
470
+ The distribution is a beta distribution on the interval [-1, 1],
471
+ with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
472
+ implementation of the beta distribution, the distribution of r is::
473
+
474
+ dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
475
+
476
+ The p-value returned by `pearsonr` is a two-sided p-value. The p-value
477
+ roughly indicates the probability of an uncorrelated system
478
+ producing datasets that have a Pearson correlation at least as extreme
479
+ as the one computed from these datasets. More precisely, for a
480
+ given sample with correlation coefficient r, the p-value is
481
+ the probability that abs(r') of a random sample x' and y' drawn from
482
+ the population with zero correlation would be greater than or equal
483
+ to abs(r). In terms of the object ``dist`` shown above, the p-value
484
+ for a given r and length n can be computed as::
485
+
486
+ p = 2*dist.cdf(-abs(r))
487
+
488
+ When n is 2, the above continuous distribution is not well-defined.
489
+ One can interpret the limit of the beta distribution as the shape
490
+ parameters a and b approach a = b = 0 as a discrete distribution with
491
+ equal probability masses at r = 1 and r = -1. More directly, one
492
+ can observe that, given the data x = [x1, x2] and y = [y1, y2], and
493
+ assuming x1 != x2 and y1 != y2, the only possible values for r are 1
494
+ and -1. Because abs(r') for any sample x' and y' with length 2 will
495
+ be 1, the two-sided p-value for a sample of length 2 is always 1.
496
+
497
+ References
498
+ ----------
499
+ .. [1] "Pearson correlation coefficient", Wikipedia,
500
+ https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
501
+ .. [2] Student, "Probable error of a correlation coefficient",
502
+ Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
503
+ .. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
504
+ of the Sample Product-Moment Correlation Coefficient"
505
+ Journal of the Royal Statistical Society. Series C (Applied
506
+ Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
507
+
508
+ Examples
509
+ --------
510
+ >>> import numpy as np
511
+ >>> from scipy import stats
512
+ >>> from scipy.stats import mstats
513
+ >>> mstats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
514
+ (-0.7426106572325057, 0.1505558088534455)
515
+
516
+ There is a linear dependence between x and y if y = a + b*x + e, where
517
+ a,b are constants and e is a random error term, assumed to be independent
518
+ of x. For simplicity, assume that x is standard normal, a=0, b=1 and let
519
+ e follow a normal distribution with mean zero and standard deviation s>0.
520
+
521
+ >>> s = 0.5
522
+ >>> x = stats.norm.rvs(size=500)
523
+ >>> e = stats.norm.rvs(scale=s, size=500)
524
+ >>> y = x + e
525
+ >>> mstats.pearsonr(x, y)
526
+ (0.9029601878969703, 8.428978827629898e-185) # may vary
527
+
528
+ This should be close to the exact value given by
529
+
530
+ >>> 1/np.sqrt(1 + s**2)
531
+ 0.8944271909999159
532
+
533
+ For s=0.5, we observe a high level of correlation. In general, a large
534
+ variance of the noise reduces the correlation, while the correlation
535
+ approaches one as the variance of the error goes to zero.
536
+
537
+ It is important to keep in mind that no correlation does not imply
538
+ independence unless (x, y) is jointly normal. Correlation can even be zero
539
+ when there is a very simple dependence structure: if X follows a
540
+ standard normal distribution, let y = abs(x). Note that the correlation
541
+ between x and y is zero. Indeed, since the expectation of x is zero,
542
+ cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero
543
+ by symmetry. The following lines of code illustrate this observation:
544
+
545
+ >>> y = np.abs(x)
546
+ >>> mstats.pearsonr(x, y)
547
+ (-0.016172891856853524, 0.7182823678751942) # may vary
548
+
549
+ A non-zero correlation coefficient can be misleading. For example, if X has
550
+ a standard normal distribution, define y = x if x < 0 and y = 0 otherwise.
551
+ A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797...,
552
+ implying a high level of correlation:
553
+
554
+ >>> y = np.where(x < 0, x, 0)
555
+ >>> mstats.pearsonr(x, y)
556
+ (0.8537091583771509, 3.183461621422181e-143) # may vary
557
+
558
+ This is unintuitive since there is no dependence of x and y if x is larger
559
+ than zero which happens in about half of the cases if we sample x and y.
560
+ """
561
+ (x, y, n) = _chk_size(x, y)
562
+ (x, y) = (x.ravel(), y.ravel())
563
+ # Get the common mask and the total nb of unmasked elements
564
+ m = ma.mask_or(ma.getmask(x), ma.getmask(y))
565
+ n -= m.sum()
566
+ df = n-2
567
+ if df < 0:
568
+ return (masked, masked)
569
+
570
+ return scipy.stats._stats_py.pearsonr(
571
+ ma.masked_array(x, mask=m).compressed(),
572
+ ma.masked_array(y, mask=m).compressed())
573
+
574
+
575
+ def spearmanr(x, y=None, use_ties=True, axis=None, nan_policy='propagate',
576
+ alternative='two-sided'):
577
+ """
578
+ Calculates a Spearman rank-order correlation coefficient and the p-value
579
+ to test for non-correlation.
580
+
581
+ The Spearman correlation is a nonparametric measure of the linear
582
+ relationship between two datasets. Unlike the Pearson correlation, the
583
+ Spearman correlation does not assume that both datasets are normally
584
+ distributed. Like other correlation coefficients, this one varies
585
+ between -1 and +1 with 0 implying no correlation. Correlations of -1 or
586
+ +1 imply a monotonic relationship. Positive correlations imply that
587
+ as `x` increases, so does `y`. Negative correlations imply that as `x`
588
+ increases, `y` decreases.
589
+
590
+ Missing values are discarded pair-wise: if a value is missing in `x`, the
591
+ corresponding value in `y` is masked.
592
+
593
+ The p-value roughly indicates the probability of an uncorrelated system
594
+ producing datasets that have a Spearman correlation at least as extreme
595
+ as the one computed from these datasets. The p-values are not entirely
596
+ reliable but are probably reasonable for datasets larger than 500 or so.
597
+
598
+ Parameters
599
+ ----------
600
+ x, y : 1D or 2D array_like, y is optional
601
+ One or two 1-D or 2-D arrays containing multiple variables and
602
+ observations. When these are 1-D, each represents a vector of
603
+ observations of a single variable. For the behavior in the 2-D case,
604
+ see under ``axis``, below.
605
+ use_ties : bool, optional
606
+ DO NOT USE. Does not do anything, keyword is only left in place for
607
+ backwards compatibility reasons.
608
+ axis : int or None, optional
609
+ If axis=0 (default), then each column represents a variable, with
610
+ observations in the rows. If axis=1, the relationship is transposed:
611
+ each row represents a variable, while the columns contain observations.
612
+ If axis=None, then both arrays will be raveled.
613
+ nan_policy : {'propagate', 'raise', 'omit'}, optional
614
+ Defines how to handle when input contains nan. 'propagate' returns nan,
615
+ 'raise' throws an error, 'omit' performs the calculations ignoring nan
616
+ values. Default is 'propagate'.
617
+ alternative : {'two-sided', 'less', 'greater'}, optional
618
+ Defines the alternative hypothesis. Default is 'two-sided'.
619
+ The following options are available:
620
+
621
+ * 'two-sided': the correlation is nonzero
622
+ * 'less': the correlation is negative (less than zero)
623
+ * 'greater': the correlation is positive (greater than zero)
624
+
625
+ .. versionadded:: 1.7.0
626
+
627
+ Returns
628
+ -------
629
+ res : SignificanceResult
630
+ An object containing attributes:
631
+
632
+ statistic : float or ndarray (2-D square)
633
+ Spearman correlation matrix or correlation coefficient (if only 2
634
+ variables are given as parameters). Correlation matrix is square
635
+ with length equal to total number of variables (columns or rows) in
636
+ ``a`` and ``b`` combined.
637
+ pvalue : float
638
+ The p-value for a hypothesis test whose null hypothesis
639
+ is that two sets of data are linearly uncorrelated. See
640
+ `alternative` above for alternative hypotheses. `pvalue` has the
641
+ same shape as `statistic`.
642
+
643
+ References
644
+ ----------
645
+ [CRCProbStat2000] section 14.7
646
+
647
+ """
648
+ if not use_ties:
649
+ raise ValueError("`use_ties=False` is not supported in SciPy >= 1.2.0")
650
+
651
+ # Always returns a masked array, raveled if axis=None
652
+ x, axisout = _chk_asarray(x, axis)
653
+ if y is not None:
654
+ # Deal only with 2-D `x` case.
655
+ y, _ = _chk_asarray(y, axis)
656
+ if axisout == 0:
657
+ x = ma.column_stack((x, y))
658
+ else:
659
+ x = ma.vstack((x, y))
660
+
661
+ if axisout == 1:
662
+ # To simplify the code that follow (always use `n_obs, n_vars` shape)
663
+ x = x.T
664
+
665
+ if nan_policy == 'omit':
666
+ x = ma.masked_invalid(x)
667
+
668
+ def _spearmanr_2cols(x):
669
+ # Mask the same observations for all variables, and then drop those
670
+ # observations (can't leave them masked, rankdata is weird).
671
+ x = ma.mask_rowcols(x, axis=0)
672
+ x = x[~x.mask.any(axis=1), :]
673
+
674
+ # If either column is entirely NaN or Inf
675
+ if not np.any(x.data):
676
+ res = scipy.stats._stats_py.SignificanceResult(np.nan, np.nan)
677
+ res.correlation = np.nan
678
+ return res
679
+
680
+ m = ma.getmask(x)
681
+ n_obs = x.shape[0]
682
+ dof = n_obs - 2 - int(m.sum(axis=0)[0])
683
+ if dof < 0:
684
+ raise ValueError("The input must have at least 3 entries!")
685
+
686
+ # Gets the ranks and rank differences
687
+ x_ranked = rankdata(x, axis=0)
688
+ rs = ma.corrcoef(x_ranked, rowvar=False).data
689
+
690
+ # rs can have elements equal to 1, so avoid zero division warnings
691
+ with np.errstate(divide='ignore'):
692
+ # clip the small negative values possibly caused by rounding
693
+ # errors before taking the square root
694
+ t = rs * np.sqrt((dof / ((rs+1.0) * (1.0-rs))).clip(0))
695
+
696
+ t, prob = _ttest_finish(dof, t, alternative)
697
+
698
+ # For backwards compatibility, return scalars when comparing 2 columns
699
+ if rs.shape == (2, 2):
700
+ res = scipy.stats._stats_py.SignificanceResult(rs[1, 0],
701
+ prob[1, 0])
702
+ res.correlation = rs[1, 0]
703
+ return res
704
+ else:
705
+ res = scipy.stats._stats_py.SignificanceResult(rs, prob)
706
+ res.correlation = rs
707
+ return res
708
+
709
+ # Need to do this per pair of variables, otherwise the dropped observations
710
+ # in a third column mess up the result for a pair.
711
+ n_vars = x.shape[1]
712
+ if n_vars == 2:
713
+ return _spearmanr_2cols(x)
714
+ else:
715
+ rs = np.ones((n_vars, n_vars), dtype=float)
716
+ prob = np.zeros((n_vars, n_vars), dtype=float)
717
+ for var1 in range(n_vars - 1):
718
+ for var2 in range(var1+1, n_vars):
719
+ result = _spearmanr_2cols(x[:, [var1, var2]])
720
+ rs[var1, var2] = result.correlation
721
+ rs[var2, var1] = result.correlation
722
+ prob[var1, var2] = result.pvalue
723
+ prob[var2, var1] = result.pvalue
724
+
725
+ res = scipy.stats._stats_py.SignificanceResult(rs, prob)
726
+ res.correlation = rs
727
+ return res
728
+
729
+
730
+ def _kendall_p_exact(n, c, alternative='two-sided'):
731
+
732
+ # Use the fact that distribution is symmetric: always calculate a CDF in
733
+ # the left tail.
734
+ # This will be the one-sided p-value if `c` is on the side of
735
+ # the null distribution predicted by the alternative hypothesis.
736
+ # The two-sided p-value will be twice this value.
737
+ # If `c` is on the other side of the null distribution, we'll need to
738
+ # take the complement and add back the probability mass at `c`.
739
+ in_right_tail = (c >= (n*(n-1))//2 - c)
740
+ alternative_greater = (alternative == 'greater')
741
+ c = int(min(c, (n*(n-1))//2 - c))
742
+
743
+ # Exact p-value, see Maurice G. Kendall, "Rank Correlation Methods"
744
+ # (4th Edition), Charles Griffin & Co., 1970.
745
+ if n <= 0:
746
+ raise ValueError(f'n ({n}) must be positive')
747
+ elif c < 0 or 4*c > n*(n-1):
748
+ raise ValueError(f'c ({c}) must satisfy 0 <= 4c <= n(n-1) = {n*(n-1)}.')
749
+ elif n == 1:
750
+ prob = 1.0
751
+ p_mass_at_c = 1
752
+ elif n == 2:
753
+ prob = 1.0
754
+ p_mass_at_c = 0.5
755
+ elif c == 0:
756
+ prob = 2.0/math.factorial(n) if n < 171 else 0.0
757
+ p_mass_at_c = prob/2
758
+ elif c == 1:
759
+ prob = 2.0/math.factorial(n-1) if n < 172 else 0.0
760
+ p_mass_at_c = (n-1)/math.factorial(n)
761
+ elif 4*c == n*(n-1) and alternative == 'two-sided':
762
+ # I'm sure there's a simple formula for p_mass_at_c in this
763
+ # case, but I don't know it. Use generic formula for one-sided p-value.
764
+ prob = 1.0
765
+ elif n < 171:
766
+ new = np.zeros(c+1)
767
+ new[0:2] = 1.0
768
+ for j in range(3,n+1):
769
+ new = np.cumsum(new)
770
+ if j <= c:
771
+ new[j:] -= new[:c+1-j]
772
+ prob = 2.0*np.sum(new)/math.factorial(n)
773
+ p_mass_at_c = new[-1]/math.factorial(n)
774
+ else:
775
+ new = np.zeros(c+1)
776
+ new[0:2] = 1.0
777
+ for j in range(3, n+1):
778
+ new = np.cumsum(new)/j
779
+ if j <= c:
780
+ new[j:] -= new[:c+1-j]
781
+ prob = np.sum(new)
782
+ p_mass_at_c = new[-1]/2
783
+
784
+ if alternative != 'two-sided':
785
+ # if the alternative hypothesis and alternative agree,
786
+ # one-sided p-value is half the two-sided p-value
787
+ if in_right_tail == alternative_greater:
788
+ prob /= 2
789
+ else:
790
+ prob = 1 - prob/2 + p_mass_at_c
791
+
792
+ prob = np.clip(prob, 0, 1)
793
+
794
+ return prob
795
+
796
+
797
+ def kendalltau(x, y, use_ties=True, use_missing=False, method='auto',
798
+ alternative='two-sided'):
799
+ """
800
+ Computes Kendall's rank correlation tau on two variables *x* and *y*.
801
+
802
+ Parameters
803
+ ----------
804
+ x : sequence
805
+ First data list (for example, time).
806
+ y : sequence
807
+ Second data list.
808
+ use_ties : {True, False}, optional
809
+ Whether ties correction should be performed.
810
+ use_missing : {False, True}, optional
811
+ Whether missing data should be allocated a rank of 0 (False) or the
812
+ average rank (True)
813
+ method : {'auto', 'asymptotic', 'exact'}, optional
814
+ Defines which method is used to calculate the p-value [1]_.
815
+ 'asymptotic' uses a normal approximation valid for large samples.
816
+ 'exact' computes the exact p-value, but can only be used if no ties
817
+ are present. As the sample size increases, the 'exact' computation
818
+ time may grow and the result may lose some precision.
819
+ 'auto' is the default and selects the appropriate
820
+ method based on a trade-off between speed and accuracy.
821
+ alternative : {'two-sided', 'less', 'greater'}, optional
822
+ Defines the alternative hypothesis. Default is 'two-sided'.
823
+ The following options are available:
824
+
825
+ * 'two-sided': the rank correlation is nonzero
826
+ * 'less': the rank correlation is negative (less than zero)
827
+ * 'greater': the rank correlation is positive (greater than zero)
828
+
829
+ Returns
830
+ -------
831
+ res : SignificanceResult
832
+ An object containing attributes:
833
+
834
+ statistic : float
835
+ The tau statistic.
836
+ pvalue : float
837
+ The p-value for a hypothesis test whose null hypothesis is
838
+ an absence of association, tau = 0.
839
+
840
+ References
841
+ ----------
842
+ .. [1] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
843
+ Charles Griffin & Co., 1970.
844
+
845
+ """
846
+ (x, y, n) = _chk_size(x, y)
847
+ (x, y) = (x.flatten(), y.flatten())
848
+ m = ma.mask_or(ma.getmask(x), ma.getmask(y))
849
+ if m is not nomask:
850
+ x = ma.array(x, mask=m, copy=True)
851
+ y = ma.array(y, mask=m, copy=True)
852
+ # need int() here, otherwise numpy defaults to 32 bit
853
+ # integer on all Windows architectures, causing overflow.
854
+ # int() will keep it infinite precision.
855
+ n -= int(m.sum())
856
+
857
+ if n < 2:
858
+ res = scipy.stats._stats_py.SignificanceResult(np.nan, np.nan)
859
+ res.correlation = np.nan
860
+ return res
861
+
862
+ rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0)
863
+ ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0)
864
+ idx = rx.argsort()
865
+ (rx, ry) = (rx[idx], ry[idx])
866
+ C = np.sum([((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).filled(0).sum()
867
+ for i in range(len(ry)-1)], dtype=float)
868
+ D = np.sum([((ry[i+1:] < ry[i])*(rx[i+1:] > rx[i])).filled(0).sum()
869
+ for i in range(len(ry)-1)], dtype=float)
870
+ xties = count_tied_groups(x)
871
+ yties = count_tied_groups(y)
872
+ if use_ties:
873
+ corr_x = np.sum([v*k*(k-1) for (k,v) in xties.items()], dtype=float)
874
+ corr_y = np.sum([v*k*(k-1) for (k,v) in yties.items()], dtype=float)
875
+ denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.)
876
+ else:
877
+ denom = n*(n-1)/2.
878
+ tau = (C-D) / denom
879
+
880
+ if method == 'exact' and (xties or yties):
881
+ raise ValueError("Ties found, exact method cannot be used.")
882
+
883
+ if method == 'auto':
884
+ if (not xties and not yties) and (n <= 33 or min(C, n*(n-1)/2.0-C) <= 1):
885
+ method = 'exact'
886
+ else:
887
+ method = 'asymptotic'
888
+
889
+ if not xties and not yties and method == 'exact':
890
+ prob = _kendall_p_exact(n, C, alternative)
891
+
892
+ elif method == 'asymptotic':
893
+ var_s = n*(n-1)*(2*n+5)
894
+ if use_ties:
895
+ var_s -= np.sum([v*k*(k-1)*(2*k+5)*1. for (k,v) in xties.items()])
896
+ var_s -= np.sum([v*k*(k-1)*(2*k+5)*1. for (k,v) in yties.items()])
897
+ v1 = (np.sum([v*k*(k-1) for (k, v) in xties.items()], dtype=float) *
898
+ np.sum([v*k*(k-1) for (k, v) in yties.items()], dtype=float))
899
+ v1 /= 2.*n*(n-1)
900
+ if n > 2:
901
+ v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in xties.items()],
902
+ dtype=float) * \
903
+ np.sum([v*k*(k-1)*(k-2) for (k,v) in yties.items()],
904
+ dtype=float)
905
+ v2 /= 9.*n*(n-1)*(n-2)
906
+ else:
907
+ v2 = 0
908
+ else:
909
+ v1 = v2 = 0
910
+
911
+ var_s /= 18.
912
+ var_s += (v1 + v2)
913
+ z = (C-D)/np.sqrt(var_s)
914
+ prob = scipy.stats._stats_py._get_pvalue(z, distributions.norm, alternative)
915
+ else:
916
+ raise ValueError("Unknown method "+str(method)+" specified, please "
917
+ "use auto, exact or asymptotic.")
918
+
919
+ res = scipy.stats._stats_py.SignificanceResult(tau[()], prob[()])
920
+ res.correlation = tau
921
+ return res
922
+
923
+
924
+ def kendalltau_seasonal(x):
925
+ """
926
+ Computes a multivariate Kendall's rank correlation tau, for seasonal data.
927
+
928
+ Parameters
929
+ ----------
930
+ x : 2-D ndarray
931
+ Array of seasonal data, with seasons in columns.
932
+
933
+ """
934
+ x = ma.array(x, subok=True, copy=False, ndmin=2)
935
+ (n,m) = x.shape
936
+ n_p = x.count(0)
937
+
938
+ S_szn = sum(msign(x[i:]-x[i]).sum(0) for i in range(n))
939
+ S_tot = S_szn.sum()
940
+
941
+ n_tot = x.count()
942
+ ties = count_tied_groups(x.compressed())
943
+ corr_ties = sum(v*k*(k-1) for (k,v) in ties.items())
944
+ denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2.
945
+
946
+ R = rankdata(x, axis=0, use_missing=True)
947
+ K = ma.empty((m,m), dtype=int)
948
+ covmat = ma.empty((m,m), dtype=float)
949
+ denom_szn = ma.empty(m, dtype=float)
950
+ for j in range(m):
951
+ ties_j = count_tied_groups(x[:,j].compressed())
952
+ corr_j = sum(v*k*(k-1) for (k,v) in ties_j.items())
953
+ cmb = n_p[j]*(n_p[j]-1)
954
+ for k in range(j,m,1):
955
+ K[j,k] = sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum()
956
+ for i in range(n))
957
+ covmat[j,k] = (K[j,k] + 4*(R[:,j]*R[:,k]).sum() -
958
+ n*(n_p[j]+1)*(n_p[k]+1))/3.
959
+ K[k,j] = K[j,k]
960
+ covmat[k,j] = covmat[j,k]
961
+
962
+ denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2.
963
+
964
+ var_szn = covmat.diagonal()
965
+
966
+ z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn)
967
+ z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum())
968
+ z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum())
969
+
970
+ prob_szn = special.erfc(abs(z_szn.data)/np.sqrt(2))
971
+ prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2))
972
+ prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2))
973
+
974
+ chi2_tot = (z_szn*z_szn).sum()
975
+ chi2_trd = m * z_szn.mean()**2
976
+ output = {'seasonal tau': S_szn/denom_szn,
977
+ 'global tau': S_tot/denom_tot,
978
+ 'global tau (alt)': S_tot/denom_szn.sum(),
979
+ 'seasonal p-value': prob_szn,
980
+ 'global p-value (indep)': prob_tot_ind,
981
+ 'global p-value (dep)': prob_tot_dep,
982
+ 'chi2 total': chi2_tot,
983
+ 'chi2 trend': chi2_trd,
984
+ }
985
+ return output
986
+
987
+
988
+ PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
989
+ 'pvalue'))
990
+
991
+
992
+ def pointbiserialr(x, y):
993
+ """Calculates a point biserial correlation coefficient and its p-value.
994
+
995
+ Parameters
996
+ ----------
997
+ x : array_like of bools
998
+ Input array.
999
+ y : array_like
1000
+ Input array.
1001
+
1002
+ Returns
1003
+ -------
1004
+ correlation : float
1005
+ R value
1006
+ pvalue : float
1007
+ 2-tailed p-value
1008
+
1009
+ Notes
1010
+ -----
1011
+ Missing values are considered pair-wise: if a value is missing in x,
1012
+ the corresponding value in y is masked.
1013
+
1014
+ For more details on `pointbiserialr`, see `scipy.stats.pointbiserialr`.
1015
+
1016
+ """
1017
+ x = ma.fix_invalid(x, copy=True).astype(bool)
1018
+ y = ma.fix_invalid(y, copy=True).astype(float)
1019
+ # Get rid of the missing data
1020
+ m = ma.mask_or(ma.getmask(x), ma.getmask(y))
1021
+ if m is not nomask:
1022
+ unmask = np.logical_not(m)
1023
+ x = x[unmask]
1024
+ y = y[unmask]
1025
+
1026
+ n = len(x)
1027
+ # phat is the fraction of x values that are True
1028
+ phat = x.sum() / float(n)
1029
+ y0 = y[~x] # y-values where x is False
1030
+ y1 = y[x] # y-values where x is True
1031
+ y0m = y0.mean()
1032
+ y1m = y1.mean()
1033
+
1034
+ rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std()
1035
+
1036
+ df = n-2
1037
+ t = rpb*ma.sqrt(df/(1.0-rpb**2))
1038
+ prob = _betai(0.5*df, 0.5, df/(df+t*t))
1039
+
1040
+ return PointbiserialrResult(rpb, prob)
1041
+
1042
+
1043
+ def linregress(x, y=None):
1044
+ r"""
1045
+ Calculate a linear least-squares regression for two sets of measurements.
1046
+
1047
+ Parameters
1048
+ ----------
1049
+ x, y : array_like
1050
+ Two sets of measurements. Both arrays should have the same length N. If
1051
+ only `x` is given (and ``y=None``), then it must be a two-dimensional
1052
+ array where one dimension has length 2. The two sets of measurements
1053
+ are then found by splitting the array along the length-2 dimension. In
1054
+ the case where ``y=None`` and `x` is a 2xN array, ``linregress(x)`` is
1055
+ equivalent to ``linregress(x[0], x[1])``.
1056
+
1057
+ Returns
1058
+ -------
1059
+ result : ``LinregressResult`` instance
1060
+ The return value is an object with the following attributes:
1061
+
1062
+ slope : float
1063
+ Slope of the regression line.
1064
+ intercept : float
1065
+ Intercept of the regression line.
1066
+ rvalue : float
1067
+ The Pearson correlation coefficient. The square of ``rvalue``
1068
+ is equal to the coefficient of determination.
1069
+ pvalue : float
1070
+ The p-value for a hypothesis test whose null hypothesis is
1071
+ that the slope is zero, using Wald Test with t-distribution of
1072
+ the test statistic. See `alternative` above for alternative
1073
+ hypotheses.
1074
+ stderr : float
1075
+ Standard error of the estimated slope (gradient), under the
1076
+ assumption of residual normality.
1077
+ intercept_stderr : float
1078
+ Standard error of the estimated intercept, under the assumption
1079
+ of residual normality.
1080
+
1081
+ See Also
1082
+ --------
1083
+ scipy.optimize.curve_fit :
1084
+ Use non-linear least squares to fit a function to data.
1085
+ scipy.optimize.leastsq :
1086
+ Minimize the sum of squares of a set of equations.
1087
+
1088
+ Notes
1089
+ -----
1090
+ Missing values are considered pair-wise: if a value is missing in `x`,
1091
+ the corresponding value in `y` is masked.
1092
+
1093
+ For compatibility with older versions of SciPy, the return value acts
1094
+ like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``,
1095
+ ``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write::
1096
+
1097
+ slope, intercept, r, p, se = linregress(x, y)
1098
+
1099
+ With that style, however, the standard error of the intercept is not
1100
+ available. To have access to all the computed values, including the
1101
+ standard error of the intercept, use the return value as an object
1102
+ with attributes, e.g.::
1103
+
1104
+ result = linregress(x, y)
1105
+ print(result.intercept, result.intercept_stderr)
1106
+
1107
+ Examples
1108
+ --------
1109
+ >>> import numpy as np
1110
+ >>> import matplotlib.pyplot as plt
1111
+ >>> from scipy import stats
1112
+ >>> rng = np.random.default_rng()
1113
+
1114
+ Generate some data:
1115
+
1116
+ >>> x = rng.random(10)
1117
+ >>> y = 1.6*x + rng.random(10)
1118
+
1119
+ Perform the linear regression:
1120
+
1121
+ >>> res = stats.mstats.linregress(x, y)
1122
+
1123
+ Coefficient of determination (R-squared):
1124
+
1125
+ >>> print(f"R-squared: {res.rvalue**2:.6f}")
1126
+ R-squared: 0.717533
1127
+
1128
+ Plot the data along with the fitted line:
1129
+
1130
+ >>> plt.plot(x, y, 'o', label='original data')
1131
+ >>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line')
1132
+ >>> plt.legend()
1133
+ >>> plt.show()
1134
+
1135
+ Calculate 95% confidence interval on slope and intercept:
1136
+
1137
+ >>> # Two-sided inverse Students t-distribution
1138
+ >>> # p - probability, df - degrees of freedom
1139
+ >>> from scipy.stats import t
1140
+ >>> tinv = lambda p, df: abs(t.ppf(p/2, df))
1141
+
1142
+ >>> ts = tinv(0.05, len(x)-2)
1143
+ >>> print(f"slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}")
1144
+ slope (95%): 1.453392 +/- 0.743465
1145
+ >>> print(f"intercept (95%): {res.intercept:.6f}"
1146
+ ... f" +/- {ts*res.intercept_stderr:.6f}")
1147
+ intercept (95%): 0.616950 +/- 0.544475
1148
+
1149
+ """
1150
+ if y is None:
1151
+ x = ma.array(x)
1152
+ if x.shape[0] == 2:
1153
+ x, y = x
1154
+ elif x.shape[1] == 2:
1155
+ x, y = x.T
1156
+ else:
1157
+ raise ValueError("If only `x` is given as input, "
1158
+ "it has to be of shape (2, N) or (N, 2), "
1159
+ f"provided shape was {x.shape}")
1160
+ else:
1161
+ x = ma.array(x)
1162
+ y = ma.array(y)
1163
+
1164
+ x = x.flatten()
1165
+ y = y.flatten()
1166
+
1167
+ if np.amax(x) == np.amin(x) and len(x) > 1:
1168
+ raise ValueError("Cannot calculate a linear regression "
1169
+ "if all x values are identical")
1170
+
1171
+ m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False)
1172
+ if m is not nomask:
1173
+ x = ma.array(x, mask=m)
1174
+ y = ma.array(y, mask=m)
1175
+ if np.any(~m):
1176
+ result = _stats_py.linregress(x.data[~m], y.data[~m])
1177
+ else:
1178
+ # All data is masked
1179
+ result = _stats_py.LinregressResult(slope=None, intercept=None,
1180
+ rvalue=None, pvalue=None,
1181
+ stderr=None,
1182
+ intercept_stderr=None)
1183
+ else:
1184
+ result = _stats_py.linregress(x.data, y.data)
1185
+
1186
+ return result
1187
+
1188
+
1189
+ def theilslopes(y, x=None, alpha=0.95, method='separate'):
1190
+ r"""
1191
+ Computes the Theil-Sen estimator for a set of points (x, y).
1192
+
1193
+ `theilslopes` implements a method for robust linear regression. It
1194
+ computes the slope as the median of all slopes between paired values.
1195
+
1196
+ Parameters
1197
+ ----------
1198
+ y : array_like
1199
+ Dependent variable.
1200
+ x : array_like or None, optional
1201
+ Independent variable. If None, use ``arange(len(y))`` instead.
1202
+ alpha : float, optional
1203
+ Confidence degree between 0 and 1. Default is 95% confidence.
1204
+ Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
1205
+ interpreted as "find the 90% confidence interval".
1206
+ method : {'joint', 'separate'}, optional
1207
+ Method to be used for computing estimate for intercept.
1208
+ Following methods are supported,
1209
+
1210
+ * 'joint': Uses np.median(y - slope * x) as intercept.
1211
+ * 'separate': Uses np.median(y) - slope * np.median(x)
1212
+ as intercept.
1213
+
1214
+ The default is 'separate'.
1215
+
1216
+ .. versionadded:: 1.8.0
1217
+
1218
+ Returns
1219
+ -------
1220
+ result : ``TheilslopesResult`` instance
1221
+ The return value is an object with the following attributes:
1222
+
1223
+ slope : float
1224
+ Theil slope.
1225
+ intercept : float
1226
+ Intercept of the Theil line.
1227
+ low_slope : float
1228
+ Lower bound of the confidence interval on `slope`.
1229
+ high_slope : float
1230
+ Upper bound of the confidence interval on `slope`.
1231
+
1232
+ See Also
1233
+ --------
1234
+ siegelslopes : a similar technique using repeated medians
1235
+
1236
+
1237
+ Notes
1238
+ -----
1239
+ For more details on `theilslopes`, see `scipy.stats.theilslopes`.
1240
+
1241
+ """
1242
+ y = ma.asarray(y).flatten()
1243
+ if x is None:
1244
+ x = ma.arange(len(y), dtype=float)
1245
+ else:
1246
+ x = ma.asarray(x).flatten()
1247
+ if len(x) != len(y):
1248
+ raise ValueError(f"Incompatible lengths ! ({len(y)}<>{len(x)})")
1249
+
1250
+ m = ma.mask_or(ma.getmask(x), ma.getmask(y))
1251
+ y._mask = x._mask = m
1252
+ # Disregard any masked elements of x or y
1253
+ y = y.compressed()
1254
+ x = x.compressed().astype(float)
1255
+ # We now have unmasked arrays so can use `scipy.stats.theilslopes`
1256
+ return stats_theilslopes(y, x, alpha=alpha, method=method)
1257
+
1258
+
1259
+ def siegelslopes(y, x=None, method="hierarchical"):
1260
+ r"""
1261
+ Computes the Siegel estimator for a set of points (x, y).
1262
+
1263
+ `siegelslopes` implements a method for robust linear regression
1264
+ using repeated medians to fit a line to the points (x, y).
1265
+ The method is robust to outliers with an asymptotic breakdown point
1266
+ of 50%.
1267
+
1268
+ Parameters
1269
+ ----------
1270
+ y : array_like
1271
+ Dependent variable.
1272
+ x : array_like or None, optional
1273
+ Independent variable. If None, use ``arange(len(y))`` instead.
1274
+ method : {'hierarchical', 'separate'}
1275
+ If 'hierarchical', estimate the intercept using the estimated
1276
+ slope ``slope`` (default option).
1277
+ If 'separate', estimate the intercept independent of the estimated
1278
+ slope. See Notes for details.
1279
+
1280
+ Returns
1281
+ -------
1282
+ result : ``SiegelslopesResult`` instance
1283
+ The return value is an object with the following attributes:
1284
+
1285
+ slope : float
1286
+ Estimate of the slope of the regression line.
1287
+ intercept : float
1288
+ Estimate of the intercept of the regression line.
1289
+
1290
+ See Also
1291
+ --------
1292
+ theilslopes : a similar technique without repeated medians
1293
+
1294
+ Notes
1295
+ -----
1296
+ For more details on `siegelslopes`, see `scipy.stats.siegelslopes`.
1297
+
1298
+ """
1299
+ y = ma.asarray(y).ravel()
1300
+ if x is None:
1301
+ x = ma.arange(len(y), dtype=float)
1302
+ else:
1303
+ x = ma.asarray(x).ravel()
1304
+ if len(x) != len(y):
1305
+ raise ValueError(f"Incompatible lengths ! ({len(y)}<>{len(x)})")
1306
+
1307
+ m = ma.mask_or(ma.getmask(x), ma.getmask(y))
1308
+ y._mask = x._mask = m
1309
+ # Disregard any masked elements of x or y
1310
+ y = y.compressed()
1311
+ x = x.compressed().astype(float)
1312
+ # We now have unmasked arrays so can use `scipy.stats.siegelslopes`
1313
+ return stats_siegelslopes(y, x, method=method)
1314
+
1315
+
1316
+ SenSeasonalSlopesResult = _make_tuple_bunch('SenSeasonalSlopesResult',
1317
+ ['intra_slope', 'inter_slope'])
1318
+
1319
+
1320
+ def sen_seasonal_slopes(x):
1321
+ r"""
1322
+ Computes seasonal Theil-Sen and Kendall slope estimators.
1323
+
1324
+ The seasonal generalization of Sen's slope computes the slopes between all
1325
+ pairs of values within a "season" (column) of a 2D array. It returns an
1326
+ array containing the median of these "within-season" slopes for each
1327
+ season (the Theil-Sen slope estimator of each season), and it returns the
1328
+ median of the within-season slopes across all seasons (the seasonal Kendall
1329
+ slope estimator).
1330
+
1331
+ Parameters
1332
+ ----------
1333
+ x : 2D array_like
1334
+ Each column of `x` contains measurements of the dependent variable
1335
+ within a season. The independent variable (usually time) of each season
1336
+ is assumed to be ``np.arange(x.shape[0])``.
1337
+
1338
+ Returns
1339
+ -------
1340
+ result : ``SenSeasonalSlopesResult`` instance
1341
+ The return value is an object with the following attributes:
1342
+
1343
+ intra_slope : ndarray
1344
+ For each season, the Theil-Sen slope estimator: the median of
1345
+ within-season slopes.
1346
+ inter_slope : float
1347
+ The seasonal Kendall slope estimator: the median of within-season
1348
+ slopes *across all* seasons.
1349
+
1350
+ See Also
1351
+ --------
1352
+ theilslopes : the analogous function for non-seasonal data
1353
+ scipy.stats.theilslopes : non-seasonal slopes for non-masked arrays
1354
+
1355
+ Notes
1356
+ -----
1357
+ The slopes :math:`d_{ijk}` within season :math:`i` are:
1358
+
1359
+ .. math::
1360
+
1361
+ d_{ijk} = \frac{x_{ij} - x_{ik}}
1362
+ {j - k}
1363
+
1364
+ for pairs of distinct integer indices :math:`j, k` of :math:`x`.
1365
+
1366
+ Element :math:`i` of the returned `intra_slope` array is the median of the
1367
+ :math:`d_{ijk}` over all :math:`j < k`; this is the Theil-Sen slope
1368
+ estimator of season :math:`i`. The returned `inter_slope` value, better
1369
+ known as the seasonal Kendall slope estimator, is the median of the
1370
+ :math:`d_{ijk}` over all :math:`i, j, k`.
1371
+
1372
+ References
1373
+ ----------
1374
+ .. [1] Hirsch, Robert M., James R. Slack, and Richard A. Smith.
1375
+ "Techniques of trend analysis for monthly water quality data."
1376
+ *Water Resources Research* 18.1 (1982): 107-121.
1377
+
1378
+ Examples
1379
+ --------
1380
+ Suppose we have 100 observations of a dependent variable for each of four
1381
+ seasons:
1382
+
1383
+ >>> import numpy as np
1384
+ >>> rng = np.random.default_rng()
1385
+ >>> x = rng.random(size=(100, 4))
1386
+
1387
+ We compute the seasonal slopes as:
1388
+
1389
+ >>> from scipy import stats
1390
+ >>> intra_slope, inter_slope = stats.mstats.sen_seasonal_slopes(x)
1391
+
1392
+ If we define a function to compute all slopes between observations within
1393
+ a season:
1394
+
1395
+ >>> def dijk(yi):
1396
+ ... n = len(yi)
1397
+ ... x = np.arange(n)
1398
+ ... dy = yi - yi[:, np.newaxis]
1399
+ ... dx = x - x[:, np.newaxis]
1400
+ ... # we only want unique pairs of distinct indices
1401
+ ... mask = np.triu(np.ones((n, n), dtype=bool), k=1)
1402
+ ... return dy[mask]/dx[mask]
1403
+
1404
+ then element ``i`` of ``intra_slope`` is the median of ``dijk[x[:, i]]``:
1405
+
1406
+ >>> i = 2
1407
+ >>> np.allclose(np.median(dijk(x[:, i])), intra_slope[i])
1408
+ True
1409
+
1410
+ and ``inter_slope`` is the median of the values returned by ``dijk`` for
1411
+ all seasons:
1412
+
1413
+ >>> all_slopes = np.concatenate([dijk(x[:, i]) for i in range(x.shape[1])])
1414
+ >>> np.allclose(np.median(all_slopes), inter_slope)
1415
+ True
1416
+
1417
+ Because the data are randomly generated, we would expect the median slopes
1418
+ to be nearly zero both within and across all seasons, and indeed they are:
1419
+
1420
+ >>> intra_slope.data
1421
+ array([ 0.00124504, -0.00277761, -0.00221245, -0.00036338])
1422
+ >>> inter_slope
1423
+ -0.0010511779872922058
1424
+
1425
+ """
1426
+ x = ma.array(x, subok=True, copy=False, ndmin=2)
1427
+ (n,_) = x.shape
1428
+ # Get list of slopes per season
1429
+ szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None]
1430
+ for i in range(n)])
1431
+ szn_medslopes = ma.median(szn_slopes, axis=0)
1432
+ medslope = ma.median(szn_slopes, axis=None)
1433
+ return SenSeasonalSlopesResult(szn_medslopes, medslope)
1434
+
1435
+
1436
+ Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
1437
+
1438
+
1439
+ def ttest_1samp(a, popmean, axis=0, alternative='two-sided'):
1440
+ """
1441
+ Calculates the T-test for the mean of ONE group of scores.
1442
+
1443
+ Parameters
1444
+ ----------
1445
+ a : array_like
1446
+ sample observation
1447
+ popmean : float or array_like
1448
+ expected value in null hypothesis, if array_like than it must have the
1449
+ same shape as `a` excluding the axis dimension
1450
+ axis : int or None, optional
1451
+ Axis along which to compute test. If None, compute over the whole
1452
+ array `a`.
1453
+ alternative : {'two-sided', 'less', 'greater'}, optional
1454
+ Defines the alternative hypothesis.
1455
+ The following options are available (default is 'two-sided'):
1456
+
1457
+ * 'two-sided': the mean of the underlying distribution of the sample
1458
+ is different than the given population mean (`popmean`)
1459
+ * 'less': the mean of the underlying distribution of the sample is
1460
+ less than the given population mean (`popmean`)
1461
+ * 'greater': the mean of the underlying distribution of the sample is
1462
+ greater than the given population mean (`popmean`)
1463
+
1464
+ .. versionadded:: 1.7.0
1465
+
1466
+ Returns
1467
+ -------
1468
+ statistic : float or array
1469
+ t-statistic
1470
+ pvalue : float or array
1471
+ The p-value
1472
+
1473
+ Notes
1474
+ -----
1475
+ For more details on `ttest_1samp`, see `scipy.stats.ttest_1samp`.
1476
+
1477
+ """
1478
+ a, axis = _chk_asarray(a, axis)
1479
+ if a.size == 0:
1480
+ return (np.nan, np.nan)
1481
+
1482
+ x = a.mean(axis=axis)
1483
+ v = a.var(axis=axis, ddof=1)
1484
+ n = a.count(axis=axis)
1485
+ # force df to be an array for masked division not to throw a warning
1486
+ df = ma.asanyarray(n - 1.0)
1487
+ svar = ((n - 1.0) * v) / df
1488
+ with np.errstate(divide='ignore', invalid='ignore'):
1489
+ t = (x - popmean) / ma.sqrt(svar / n)
1490
+
1491
+ t, prob = _ttest_finish(df, t, alternative)
1492
+ return Ttest_1sampResult(t, prob)
1493
+
1494
+
1495
+ ttest_onesamp = ttest_1samp
1496
+
1497
+
1498
+ Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
1499
+
1500
+
1501
+ def ttest_ind(a, b, axis=0, equal_var=True, alternative='two-sided'):
1502
+ """
1503
+ Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
1504
+
1505
+ Parameters
1506
+ ----------
1507
+ a, b : array_like
1508
+ The arrays must have the same shape, except in the dimension
1509
+ corresponding to `axis` (the first, by default).
1510
+ axis : int or None, optional
1511
+ Axis along which to compute test. If None, compute over the whole
1512
+ arrays, `a`, and `b`.
1513
+ equal_var : bool, optional
1514
+ If True, perform a standard independent 2 sample test that assumes equal
1515
+ population variances.
1516
+ If False, perform Welch's t-test, which does not assume equal population
1517
+ variance.
1518
+
1519
+ .. versionadded:: 0.17.0
1520
+ alternative : {'two-sided', 'less', 'greater'}, optional
1521
+ Defines the alternative hypothesis.
1522
+ The following options are available (default is 'two-sided'):
1523
+
1524
+ * 'two-sided': the means of the distributions underlying the samples
1525
+ are unequal.
1526
+ * 'less': the mean of the distribution underlying the first sample
1527
+ is less than the mean of the distribution underlying the second
1528
+ sample.
1529
+ * 'greater': the mean of the distribution underlying the first
1530
+ sample is greater than the mean of the distribution underlying
1531
+ the second sample.
1532
+
1533
+ .. versionadded:: 1.7.0
1534
+
1535
+ Returns
1536
+ -------
1537
+ statistic : float or array
1538
+ The calculated t-statistic.
1539
+ pvalue : float or array
1540
+ The p-value.
1541
+
1542
+ Notes
1543
+ -----
1544
+ For more details on `ttest_ind`, see `scipy.stats.ttest_ind`.
1545
+
1546
+ """
1547
+ a, b, axis = _chk2_asarray(a, b, axis)
1548
+
1549
+ if a.size == 0 or b.size == 0:
1550
+ return Ttest_indResult(np.nan, np.nan)
1551
+
1552
+ (x1, x2) = (a.mean(axis), b.mean(axis))
1553
+ (v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1))
1554
+ (n1, n2) = (a.count(axis), b.count(axis))
1555
+
1556
+ if equal_var:
1557
+ # force df to be an array for masked division not to throw a warning
1558
+ df = ma.asanyarray(n1 + n2 - 2.0)
1559
+ svar = ((n1-1)*v1+(n2-1)*v2) / df
1560
+ denom = ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # n-D computation here!
1561
+ else:
1562
+ vn1 = v1/n1
1563
+ vn2 = v2/n2
1564
+ with np.errstate(divide='ignore', invalid='ignore'):
1565
+ df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
1566
+
1567
+ # If df is undefined, variances are zero.
1568
+ # It doesn't matter what df is as long as it is not NaN.
1569
+ df = np.where(np.isnan(df), 1, df)
1570
+ denom = ma.sqrt(vn1 + vn2)
1571
+
1572
+ with np.errstate(divide='ignore', invalid='ignore'):
1573
+ t = (x1-x2) / denom
1574
+
1575
+ t, prob = _ttest_finish(df, t, alternative)
1576
+ return Ttest_indResult(t, prob)
1577
+
1578
+
1579
+ Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
1580
+
1581
+
1582
+ def ttest_rel(a, b, axis=0, alternative='two-sided'):
1583
+ """
1584
+ Calculates the T-test on TWO RELATED samples of scores, a and b.
1585
+
1586
+ Parameters
1587
+ ----------
1588
+ a, b : array_like
1589
+ The arrays must have the same shape.
1590
+ axis : int or None, optional
1591
+ Axis along which to compute test. If None, compute over the whole
1592
+ arrays, `a`, and `b`.
1593
+ alternative : {'two-sided', 'less', 'greater'}, optional
1594
+ Defines the alternative hypothesis.
1595
+ The following options are available (default is 'two-sided'):
1596
+
1597
+ * 'two-sided': the means of the distributions underlying the samples
1598
+ are unequal.
1599
+ * 'less': the mean of the distribution underlying the first sample
1600
+ is less than the mean of the distribution underlying the second
1601
+ sample.
1602
+ * 'greater': the mean of the distribution underlying the first
1603
+ sample is greater than the mean of the distribution underlying
1604
+ the second sample.
1605
+
1606
+ .. versionadded:: 1.7.0
1607
+
1608
+ Returns
1609
+ -------
1610
+ statistic : float or array
1611
+ t-statistic
1612
+ pvalue : float or array
1613
+ two-tailed p-value
1614
+
1615
+ Notes
1616
+ -----
1617
+ For more details on `ttest_rel`, see `scipy.stats.ttest_rel`.
1618
+
1619
+ """
1620
+ a, b, axis = _chk2_asarray(a, b, axis)
1621
+ if len(a) != len(b):
1622
+ raise ValueError('unequal length arrays')
1623
+
1624
+ if a.size == 0 or b.size == 0:
1625
+ return Ttest_relResult(np.nan, np.nan)
1626
+
1627
+ n = a.count(axis)
1628
+ df = ma.asanyarray(n-1.0)
1629
+ d = (a-b).astype('d')
1630
+ dm = d.mean(axis)
1631
+ v = d.var(axis=axis, ddof=1)
1632
+ denom = ma.sqrt(v / n)
1633
+ with np.errstate(divide='ignore', invalid='ignore'):
1634
+ t = dm / denom
1635
+
1636
+ t, prob = _ttest_finish(df, t, alternative)
1637
+ return Ttest_relResult(t, prob)
1638
+
1639
+
1640
+ MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
1641
+ 'pvalue'))
1642
+
1643
+
1644
+ def mannwhitneyu(x,y, use_continuity=True):
1645
+ """
1646
+ Computes the Mann-Whitney statistic
1647
+
1648
+ Missing values in `x` and/or `y` are discarded.
1649
+
1650
+ Parameters
1651
+ ----------
1652
+ x : sequence
1653
+ Input
1654
+ y : sequence
1655
+ Input
1656
+ use_continuity : {True, False}, optional
1657
+ Whether a continuity correction (1/2.) should be taken into account.
1658
+
1659
+ Returns
1660
+ -------
1661
+ statistic : float
1662
+ The minimum of the Mann-Whitney statistics
1663
+ pvalue : float
1664
+ Approximate two-sided p-value assuming a normal distribution.
1665
+
1666
+ """
1667
+ x = ma.asarray(x).compressed().view(ndarray)
1668
+ y = ma.asarray(y).compressed().view(ndarray)
1669
+ ranks = rankdata(np.concatenate([x,y]))
1670
+ (nx, ny) = (len(x), len(y))
1671
+ nt = nx + ny
1672
+ U = ranks[:nx].sum() - nx*(nx+1)/2.
1673
+ U = max(U, nx*ny - U)
1674
+ u = nx*ny - U
1675
+
1676
+ mu = (nx*ny)/2.
1677
+ sigsq = (nt**3 - nt)/12.
1678
+ ties = count_tied_groups(ranks)
1679
+ sigsq -= sum(v*(k**3-k) for (k,v) in ties.items())/12.
1680
+ sigsq *= nx*ny/float(nt*(nt-1))
1681
+
1682
+ if use_continuity:
1683
+ z = (U - 1/2. - mu) / ma.sqrt(sigsq)
1684
+ else:
1685
+ z = (U - mu) / ma.sqrt(sigsq)
1686
+
1687
+ prob = special.erfc(abs(z)/np.sqrt(2))
1688
+ return MannwhitneyuResult(u, prob)
1689
+
1690
+
1691
+ KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
1692
+
1693
+
1694
+ def kruskal(*args):
1695
+ """
1696
+ Compute the Kruskal-Wallis H-test for independent samples
1697
+
1698
+ Parameters
1699
+ ----------
1700
+ sample1, sample2, ... : array_like
1701
+ Two or more arrays with the sample measurements can be given as
1702
+ arguments.
1703
+
1704
+ Returns
1705
+ -------
1706
+ statistic : float
1707
+ The Kruskal-Wallis H statistic, corrected for ties
1708
+ pvalue : float
1709
+ The p-value for the test using the assumption that H has a chi
1710
+ square distribution
1711
+
1712
+ Notes
1713
+ -----
1714
+ For more details on `kruskal`, see `scipy.stats.kruskal`.
1715
+
1716
+ Examples
1717
+ --------
1718
+ >>> from scipy.stats.mstats import kruskal
1719
+
1720
+ Random samples from three different brands of batteries were tested
1721
+ to see how long the charge lasted. Results were as follows:
1722
+
1723
+ >>> a = [6.3, 5.4, 5.7, 5.2, 5.0]
1724
+ >>> b = [6.9, 7.0, 6.1, 7.9]
1725
+ >>> c = [7.2, 6.9, 6.1, 6.5]
1726
+
1727
+ Test the hypothesis that the distribution functions for all of the brands'
1728
+ durations are identical. Use 5% level of significance.
1729
+
1730
+ >>> kruskal(a, b, c)
1731
+ KruskalResult(statistic=7.113812154696133, pvalue=0.028526948491942164)
1732
+
1733
+ The null hypothesis is rejected at the 5% level of significance
1734
+ because the returned p-value is less than the critical value of 5%.
1735
+
1736
+ """
1737
+ output = argstoarray(*args)
1738
+ ranks = ma.masked_equal(rankdata(output, use_missing=False), 0)
1739
+ sumrk = ranks.sum(-1)
1740
+ ngrp = ranks.count(-1)
1741
+ ntot = ranks.count()
1742
+ H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1)
1743
+ # Tie correction
1744
+ ties = count_tied_groups(ranks)
1745
+ T = 1. - sum(v*(k**3-k) for (k,v) in ties.items())/float(ntot**3-ntot)
1746
+ if T == 0:
1747
+ raise ValueError('All numbers are identical in kruskal')
1748
+
1749
+ H /= T
1750
+ df = len(output) - 1
1751
+ prob = distributions.chi2.sf(H, df)
1752
+ return KruskalResult(H, prob)
1753
+
1754
+
1755
+ kruskalwallis = kruskal
1756
+
1757
+
1758
+ @_rename_parameter("mode", "method")
1759
+ def ks_1samp(x, cdf, args=(), alternative="two-sided", method='auto'):
1760
+ """
1761
+ Computes the Kolmogorov-Smirnov test on one sample of masked values.
1762
+
1763
+ Missing values in `x` are discarded.
1764
+
1765
+ Parameters
1766
+ ----------
1767
+ x : array_like
1768
+ a 1-D array of observations of random variables.
1769
+ cdf : str or callable
1770
+ If a string, it should be the name of a distribution in `scipy.stats`.
1771
+ If a callable, that callable is used to calculate the cdf.
1772
+ args : tuple, sequence, optional
1773
+ Distribution parameters, used if `cdf` is a string.
1774
+ alternative : {'two-sided', 'less', 'greater'}, optional
1775
+ Indicates the alternative hypothesis. Default is 'two-sided'.
1776
+ method : {'auto', 'exact', 'asymp'}, optional
1777
+ Defines the method used for calculating the p-value.
1778
+ The following options are available (default is 'auto'):
1779
+
1780
+ * 'auto' : use 'exact' for small size arrays, 'asymp' for large
1781
+ * 'exact' : use approximation to exact distribution of test statistic
1782
+ * 'asymp' : use asymptotic distribution of test statistic
1783
+
1784
+ Returns
1785
+ -------
1786
+ d : float
1787
+ Value of the Kolmogorov Smirnov test
1788
+ p : float
1789
+ Corresponding p-value.
1790
+
1791
+ """
1792
+ alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
1793
+ alternative.lower()[0], alternative)
1794
+ return scipy.stats._stats_py.ks_1samp(
1795
+ x, cdf, args=args, alternative=alternative, method=method)
1796
+
1797
+
1798
+ @_rename_parameter("mode", "method")
1799
+ def ks_2samp(data1, data2, alternative="two-sided", method='auto'):
1800
+ """
1801
+ Computes the Kolmogorov-Smirnov test on two samples.
1802
+
1803
+ Missing values in `x` and/or `y` are discarded.
1804
+
1805
+ Parameters
1806
+ ----------
1807
+ data1 : array_like
1808
+ First data set
1809
+ data2 : array_like
1810
+ Second data set
1811
+ alternative : {'two-sided', 'less', 'greater'}, optional
1812
+ Indicates the alternative hypothesis. Default is 'two-sided'.
1813
+ method : {'auto', 'exact', 'asymp'}, optional
1814
+ Defines the method used for calculating the p-value.
1815
+ The following options are available (default is 'auto'):
1816
+
1817
+ * 'auto' : use 'exact' for small size arrays, 'asymp' for large
1818
+ * 'exact' : use approximation to exact distribution of test statistic
1819
+ * 'asymp' : use asymptotic distribution of test statistic
1820
+
1821
+ Returns
1822
+ -------
1823
+ d : float
1824
+ Value of the Kolmogorov Smirnov test
1825
+ p : float
1826
+ Corresponding p-value.
1827
+
1828
+ """
1829
+ # Ideally this would be accomplished by
1830
+ # ks_2samp = scipy.stats._stats_py.ks_2samp
1831
+ # but the circular dependencies between _mstats_basic and stats prevent that.
1832
+ alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
1833
+ alternative.lower()[0], alternative)
1834
+ return scipy.stats._stats_py.ks_2samp(data1, data2,
1835
+ alternative=alternative,
1836
+ method=method)
1837
+
1838
+
1839
+ ks_twosamp = ks_2samp
1840
+
1841
+
1842
+ @_rename_parameter("mode", "method")
1843
+ def kstest(data1, data2, args=(), alternative='two-sided', method='auto'):
1844
+ """
1845
+
1846
+ Parameters
1847
+ ----------
1848
+ data1 : array_like
1849
+ data2 : str, callable or array_like
1850
+ args : tuple, sequence, optional
1851
+ Distribution parameters, used if `data1` or `data2` are strings.
1852
+ alternative : str, as documented in stats.kstest
1853
+ method : str, as documented in stats.kstest
1854
+
1855
+ Returns
1856
+ -------
1857
+ tuple of (K-S statistic, probability)
1858
+
1859
+ """
1860
+ return scipy.stats._stats_py.kstest(data1, data2, args,
1861
+ alternative=alternative, method=method)
1862
+
1863
+
1864
+ def trima(a, limits=None, inclusive=(True,True)):
1865
+ """
1866
+ Trims an array by masking the data outside some given limits.
1867
+
1868
+ Returns a masked version of the input array.
1869
+
1870
+ Parameters
1871
+ ----------
1872
+ a : array_like
1873
+ Input array.
1874
+ limits : {None, tuple}, optional
1875
+ Tuple of (lower limit, upper limit) in absolute values.
1876
+ Values of the input array lower (greater) than the lower (upper) limit
1877
+ will be masked. A limit is None indicates an open interval.
1878
+ inclusive : (bool, bool) tuple, optional
1879
+ Tuple of (lower flag, upper flag), indicating whether values exactly
1880
+ equal to the lower (upper) limit are allowed.
1881
+
1882
+ Examples
1883
+ --------
1884
+ >>> from scipy.stats.mstats import trima
1885
+ >>> import numpy as np
1886
+
1887
+ >>> a = np.arange(10)
1888
+
1889
+ The interval is left-closed and right-open, i.e., `[2, 8)`.
1890
+ Trim the array by keeping only values in the interval.
1891
+
1892
+ >>> trima(a, limits=(2, 8), inclusive=(True, False))
1893
+ masked_array(data=[--, --, 2, 3, 4, 5, 6, 7, --, --],
1894
+ mask=[ True, True, False, False, False, False, False, False,
1895
+ True, True],
1896
+ fill_value=999999)
1897
+
1898
+ """
1899
+ a = ma.asarray(a)
1900
+ a.unshare_mask()
1901
+ if (limits is None) or (limits == (None, None)):
1902
+ return a
1903
+
1904
+ (lower_lim, upper_lim) = limits
1905
+ (lower_in, upper_in) = inclusive
1906
+ condition = False
1907
+ if lower_lim is not None:
1908
+ if lower_in:
1909
+ condition |= (a < lower_lim)
1910
+ else:
1911
+ condition |= (a <= lower_lim)
1912
+
1913
+ if upper_lim is not None:
1914
+ if upper_in:
1915
+ condition |= (a > upper_lim)
1916
+ else:
1917
+ condition |= (a >= upper_lim)
1918
+
1919
+ a[condition.filled(True)] = masked
1920
+ return a
1921
+
1922
+
1923
+ def trimr(a, limits=None, inclusive=(True, True), axis=None):
1924
+ """
1925
+ Trims an array by masking some proportion of the data on each end.
1926
+ Returns a masked version of the input array.
1927
+
1928
+ Parameters
1929
+ ----------
1930
+ a : sequence
1931
+ Input array.
1932
+ limits : {None, tuple}, optional
1933
+ Tuple of the percentages to cut on each side of the array, with respect
1934
+ to the number of unmasked data, as floats between 0. and 1.
1935
+ Noting n the number of unmasked data before trimming, the
1936
+ (n*limits[0])th smallest data and the (n*limits[1])th largest data are
1937
+ masked, and the total number of unmasked data after trimming is
1938
+ n*(1.-sum(limits)). The value of one limit can be set to None to
1939
+ indicate an open interval.
1940
+ inclusive : {(True,True) tuple}, optional
1941
+ Tuple of flags indicating whether the number of data being masked on
1942
+ the left (right) end should be truncated (True) or rounded (False) to
1943
+ integers.
1944
+ axis : {None,int}, optional
1945
+ Axis along which to trim. If None, the whole array is trimmed, but its
1946
+ shape is maintained.
1947
+
1948
+ """
1949
+ def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
1950
+ n = a.count()
1951
+ idx = a.argsort()
1952
+ if low_limit:
1953
+ if low_inclusive:
1954
+ lowidx = int(low_limit*n)
1955
+ else:
1956
+ lowidx = int(np.round(low_limit*n))
1957
+ a[idx[:lowidx]] = masked
1958
+ if up_limit is not None:
1959
+ if up_inclusive:
1960
+ upidx = n - int(n*up_limit)
1961
+ else:
1962
+ upidx = n - int(np.round(n*up_limit))
1963
+ a[idx[upidx:]] = masked
1964
+ return a
1965
+
1966
+ a = ma.asarray(a)
1967
+ a.unshare_mask()
1968
+ if limits is None:
1969
+ return a
1970
+
1971
+ # Check the limits
1972
+ (lolim, uplim) = limits
1973
+ errmsg = "The proportion to cut from the %s should be between 0. and 1."
1974
+ if lolim is not None:
1975
+ if lolim > 1. or lolim < 0:
1976
+ raise ValueError(errmsg % 'beginning' + f"(got {lolim})")
1977
+ if uplim is not None:
1978
+ if uplim > 1. or uplim < 0:
1979
+ raise ValueError(errmsg % 'end' + f"(got {uplim})")
1980
+
1981
+ (loinc, upinc) = inclusive
1982
+
1983
+ if axis is None:
1984
+ shp = a.shape
1985
+ return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp)
1986
+ else:
1987
+ return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc)
1988
+
1989
+
1990
+ trimdoc = """
1991
+ Parameters
1992
+ ----------
1993
+ a : sequence
1994
+ Input array
1995
+ limits : {None, tuple}, optional
1996
+ If `relative` is False, tuple (lower limit, upper limit) in absolute values.
1997
+ Values of the input array lower (greater) than the lower (upper) limit are
1998
+ masked.
1999
+
2000
+ If `relative` is True, tuple (lower percentage, upper percentage) to cut
2001
+ on each side of the array, with respect to the number of unmasked data.
2002
+
2003
+ Noting n the number of unmasked data before trimming, the (n*limits[0])th
2004
+ smallest data and the (n*limits[1])th largest data are masked, and the
2005
+ total number of unmasked data after trimming is n*(1.-sum(limits))
2006
+ In each case, the value of one limit can be set to None to indicate an
2007
+ open interval.
2008
+
2009
+ If limits is None, no trimming is performed
2010
+ inclusive : {(bool, bool) tuple}, optional
2011
+ If `relative` is False, tuple indicating whether values exactly equal
2012
+ to the absolute limits are allowed.
2013
+ If `relative` is True, tuple indicating whether the number of data
2014
+ being masked on each side should be rounded (True) or truncated
2015
+ (False).
2016
+ relative : bool, optional
2017
+ Whether to consider the limits as absolute values (False) or proportions
2018
+ to cut (True).
2019
+ axis : int, optional
2020
+ Axis along which to trim.
2021
+ """
2022
+
2023
+
2024
+ def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None):
2025
+ """
2026
+ Trims an array by masking the data outside some given limits.
2027
+
2028
+ Returns a masked version of the input array.
2029
+
2030
+ %s
2031
+
2032
+ Examples
2033
+ --------
2034
+ >>> from scipy.stats.mstats import trim
2035
+ >>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10]
2036
+ >>> print(trim(z,(3,8)))
2037
+ [-- -- 3 4 5 6 7 8 -- --]
2038
+ >>> print(trim(z,(0.1,0.2),relative=True))
2039
+ [-- 2 3 4 5 6 7 8 -- --]
2040
+
2041
+ """
2042
+ if relative:
2043
+ return trimr(a, limits=limits, inclusive=inclusive, axis=axis)
2044
+ else:
2045
+ return trima(a, limits=limits, inclusive=inclusive)
2046
+
2047
+
2048
+ if trim.__doc__:
2049
+ trim.__doc__ = trim.__doc__ % trimdoc
2050
+
2051
+
2052
+ def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None):
2053
+ """
2054
+ Trims the smallest and largest data values.
2055
+
2056
+ Trims the `data` by masking the ``int(proportiontocut * n)`` smallest and
2057
+ ``int(proportiontocut * n)`` largest values of data along the given axis,
2058
+ where n is the number of unmasked values before trimming.
2059
+
2060
+ Parameters
2061
+ ----------
2062
+ data : ndarray
2063
+ Data to trim.
2064
+ proportiontocut : float, optional
2065
+ Percentage of trimming (as a float between 0 and 1).
2066
+ If n is the number of unmasked values before trimming, the number of
2067
+ values after trimming is ``(1 - 2*proportiontocut) * n``.
2068
+ Default is 0.2.
2069
+ inclusive : {(bool, bool) tuple}, optional
2070
+ Tuple indicating whether the number of data being masked on each side
2071
+ should be rounded (True) or truncated (False).
2072
+ axis : int, optional
2073
+ Axis along which to perform the trimming.
2074
+ If None, the input array is first flattened.
2075
+
2076
+ """
2077
+ return trimr(data, limits=(proportiontocut,proportiontocut),
2078
+ inclusive=inclusive, axis=axis)
2079
+
2080
+
2081
+ def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True),
2082
+ axis=None):
2083
+ """
2084
+ Trims the data by masking values from one tail.
2085
+
2086
+ Parameters
2087
+ ----------
2088
+ data : array_like
2089
+ Data to trim.
2090
+ proportiontocut : float, optional
2091
+ Percentage of trimming. If n is the number of unmasked values
2092
+ before trimming, the number of values after trimming is
2093
+ ``(1 - proportiontocut) * n``. Default is 0.2.
2094
+ tail : {'left','right'}, optional
2095
+ If 'left' the `proportiontocut` lowest values will be masked.
2096
+ If 'right' the `proportiontocut` highest values will be masked.
2097
+ Default is 'left'.
2098
+ inclusive : {(bool, bool) tuple}, optional
2099
+ Tuple indicating whether the number of data being masked on each side
2100
+ should be rounded (True) or truncated (False). Default is
2101
+ (True, True).
2102
+ axis : int, optional
2103
+ Axis along which to perform the trimming.
2104
+ If None, the input array is first flattened. Default is None.
2105
+
2106
+ Returns
2107
+ -------
2108
+ trimtail : ndarray
2109
+ Returned array of same shape as `data` with masked tail values.
2110
+
2111
+ """
2112
+ tail = str(tail).lower()[0]
2113
+ if tail == 'l':
2114
+ limits = (proportiontocut,None)
2115
+ elif tail == 'r':
2116
+ limits = (None, proportiontocut)
2117
+ else:
2118
+ raise TypeError("The tail argument should be in ('left','right')")
2119
+
2120
+ return trimr(data, limits=limits, axis=axis, inclusive=inclusive)
2121
+
2122
+
2123
+ trim1 = trimtail
2124
+
2125
+
2126
+ def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
2127
+ axis=None):
2128
+ """Returns the trimmed mean of the data along the given axis.
2129
+
2130
+ %s
2131
+
2132
+ """
2133
+ if (not isinstance(limits,tuple)) and isinstance(limits,float):
2134
+ limits = (limits, limits)
2135
+ if relative:
2136
+ return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis)
2137
+ else:
2138
+ return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis)
2139
+
2140
+
2141
+ if trimmed_mean.__doc__:
2142
+ trimmed_mean.__doc__ = trimmed_mean.__doc__ % trimdoc
2143
+
2144
+
2145
+ def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
2146
+ axis=None, ddof=0):
2147
+ """Returns the trimmed variance of the data along the given axis.
2148
+
2149
+ %s
2150
+ ddof : {0,integer}, optional
2151
+ Means Delta Degrees of Freedom. The denominator used during computations
2152
+ is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
2153
+ biased estimate of the variance.
2154
+
2155
+ """
2156
+ if (not isinstance(limits,tuple)) and isinstance(limits,float):
2157
+ limits = (limits, limits)
2158
+ if relative:
2159
+ out = trimr(a,limits=limits, inclusive=inclusive,axis=axis)
2160
+ else:
2161
+ out = trima(a,limits=limits,inclusive=inclusive)
2162
+
2163
+ return out.var(axis=axis, ddof=ddof)
2164
+
2165
+
2166
+ if trimmed_var.__doc__:
2167
+ trimmed_var.__doc__ = trimmed_var.__doc__ % trimdoc
2168
+
2169
+
2170
+ def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
2171
+ axis=None, ddof=0):
2172
+ """Returns the trimmed standard deviation of the data along the given axis.
2173
+
2174
+ %s
2175
+ ddof : {0,integer}, optional
2176
+ Means Delta Degrees of Freedom. The denominator used during computations
2177
+ is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
2178
+ biased estimate of the variance.
2179
+
2180
+ """
2181
+ if (not isinstance(limits,tuple)) and isinstance(limits,float):
2182
+ limits = (limits, limits)
2183
+ if relative:
2184
+ out = trimr(a,limits=limits,inclusive=inclusive,axis=axis)
2185
+ else:
2186
+ out = trima(a,limits=limits,inclusive=inclusive)
2187
+ return out.std(axis=axis,ddof=ddof)
2188
+
2189
+
2190
+ if trimmed_std.__doc__:
2191
+ trimmed_std.__doc__ = trimmed_std.__doc__ % trimdoc
2192
+
2193
+
2194
+ def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None):
2195
+ """
2196
+ Returns the standard error of the trimmed mean along the given axis.
2197
+
2198
+ Parameters
2199
+ ----------
2200
+ a : sequence
2201
+ Input array
2202
+ limits : {(0.1,0.1), tuple of float}, optional
2203
+ tuple (lower percentage, upper percentage) to cut on each side of the
2204
+ array, with respect to the number of unmasked data.
2205
+
2206
+ If n is the number of unmasked data before trimming, the values
2207
+ smaller than ``n * limits[0]`` and the values larger than
2208
+ ``n * `limits[1]`` are masked, and the total number of unmasked
2209
+ data after trimming is ``n * (1.-sum(limits))``. In each case,
2210
+ the value of one limit can be set to None to indicate an open interval.
2211
+ If `limits` is None, no trimming is performed.
2212
+ inclusive : {(bool, bool) tuple} optional
2213
+ Tuple indicating whether the number of data being masked on each side
2214
+ should be rounded (True) or truncated (False).
2215
+ axis : int, optional
2216
+ Axis along which to trim.
2217
+
2218
+ Returns
2219
+ -------
2220
+ trimmed_stde : scalar or ndarray
2221
+
2222
+ """
2223
+ def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
2224
+ "Returns the standard error of the trimmed mean for a 1D input data."
2225
+ n = a.count()
2226
+ idx = a.argsort()
2227
+ if low_limit:
2228
+ if low_inclusive:
2229
+ lowidx = int(low_limit*n)
2230
+ else:
2231
+ lowidx = np.round(low_limit*n)
2232
+ a[idx[:lowidx]] = masked
2233
+ if up_limit is not None:
2234
+ if up_inclusive:
2235
+ upidx = n - int(n*up_limit)
2236
+ else:
2237
+ upidx = n - np.round(n*up_limit)
2238
+ a[idx[upidx:]] = masked
2239
+ a[idx[:lowidx]] = a[idx[lowidx]]
2240
+ a[idx[upidx:]] = a[idx[upidx-1]]
2241
+ winstd = a.std(ddof=1)
2242
+ return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a)))
2243
+
2244
+ a = ma.array(a, copy=True, subok=True)
2245
+ a.unshare_mask()
2246
+ if limits is None:
2247
+ return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis))
2248
+ if (not isinstance(limits,tuple)) and isinstance(limits,float):
2249
+ limits = (limits, limits)
2250
+
2251
+ # Check the limits
2252
+ (lolim, uplim) = limits
2253
+ errmsg = "The proportion to cut from the %s should be between 0. and 1."
2254
+ if lolim is not None:
2255
+ if lolim > 1. or lolim < 0:
2256
+ raise ValueError(errmsg % 'beginning' + f"(got {lolim})")
2257
+ if uplim is not None:
2258
+ if uplim > 1. or uplim < 0:
2259
+ raise ValueError(errmsg % 'end' + f"(got {uplim})")
2260
+
2261
+ (loinc, upinc) = inclusive
2262
+ if (axis is None):
2263
+ return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc)
2264
+ else:
2265
+ if a.ndim > 2:
2266
+ raise ValueError(f"Array 'a' must be at most two dimensional, "
2267
+ f"but got a.ndim = {a.ndim}")
2268
+ return ma.apply_along_axis(_trimmed_stde_1D, axis, a,
2269
+ lolim,uplim,loinc,upinc)
2270
+
2271
+
2272
+ def _mask_to_limits(a, limits, inclusive):
2273
+ """Mask an array for values outside of given limits.
2274
+
2275
+ This is primarily a utility function.
2276
+
2277
+ Parameters
2278
+ ----------
2279
+ a : array
2280
+ limits : (float or None, float or None)
2281
+ A tuple consisting of the (lower limit, upper limit). Values in the
2282
+ input array less than the lower limit or greater than the upper limit
2283
+ will be masked out. None implies no limit.
2284
+ inclusive : (bool, bool)
2285
+ A tuple consisting of the (lower flag, upper flag). These flags
2286
+ determine whether values exactly equal to lower or upper are allowed.
2287
+
2288
+ Returns
2289
+ -------
2290
+ A MaskedArray.
2291
+
2292
+ Raises
2293
+ ------
2294
+ A ValueError if there are no values within the given limits.
2295
+ """
2296
+ lower_limit, upper_limit = limits
2297
+ lower_include, upper_include = inclusive
2298
+ am = ma.MaskedArray(a)
2299
+ if lower_limit is not None:
2300
+ if lower_include:
2301
+ am = ma.masked_less(am, lower_limit)
2302
+ else:
2303
+ am = ma.masked_less_equal(am, lower_limit)
2304
+
2305
+ if upper_limit is not None:
2306
+ if upper_include:
2307
+ am = ma.masked_greater(am, upper_limit)
2308
+ else:
2309
+ am = ma.masked_greater_equal(am, upper_limit)
2310
+
2311
+ if am.count() == 0:
2312
+ raise ValueError("No array values within given limits")
2313
+
2314
+ return am
2315
+
2316
+
2317
+ def tmean(a, limits=None, inclusive=(True, True), axis=None):
2318
+ """
2319
+ Compute the trimmed mean.
2320
+
2321
+ Parameters
2322
+ ----------
2323
+ a : array_like
2324
+ Array of values.
2325
+ limits : None or (lower limit, upper limit), optional
2326
+ Values in the input array less than the lower limit or greater than the
2327
+ upper limit will be ignored. When limits is None (default), then all
2328
+ values are used. Either of the limit values in the tuple can also be
2329
+ None representing a half-open interval.
2330
+ inclusive : (bool, bool), optional
2331
+ A tuple consisting of the (lower flag, upper flag). These flags
2332
+ determine whether values exactly equal to the lower or upper limits
2333
+ are included. The default value is (True, True).
2334
+ axis : int or None, optional
2335
+ Axis along which to operate. If None, compute over the
2336
+ whole array. Default is None.
2337
+
2338
+ Returns
2339
+ -------
2340
+ tmean : float
2341
+
2342
+ Notes
2343
+ -----
2344
+ For more details on `tmean`, see `scipy.stats.tmean`.
2345
+
2346
+ Examples
2347
+ --------
2348
+ >>> import numpy as np
2349
+ >>> from scipy.stats import mstats
2350
+ >>> a = np.array([[6, 8, 3, 0],
2351
+ ... [3, 9, 1, 2],
2352
+ ... [8, 7, 8, 2],
2353
+ ... [5, 6, 0, 2],
2354
+ ... [4, 5, 5, 2]])
2355
+ ...
2356
+ ...
2357
+ >>> mstats.tmean(a, (2,5))
2358
+ 3.3
2359
+ >>> mstats.tmean(a, (2,5), axis=0)
2360
+ masked_array(data=[4.0, 5.0, 4.0, 2.0],
2361
+ mask=[False, False, False, False],
2362
+ fill_value=1e+20)
2363
+
2364
+ """
2365
+ return trima(a, limits=limits, inclusive=inclusive).mean(axis=axis)
2366
+
2367
+
2368
+ def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
2369
+ """
2370
+ Compute the trimmed variance
2371
+
2372
+ This function computes the sample variance of an array of values,
2373
+ while ignoring values which are outside of given `limits`.
2374
+
2375
+ Parameters
2376
+ ----------
2377
+ a : array_like
2378
+ Array of values.
2379
+ limits : None or (lower limit, upper limit), optional
2380
+ Values in the input array less than the lower limit or greater than the
2381
+ upper limit will be ignored. When limits is None, then all values are
2382
+ used. Either of the limit values in the tuple can also be None
2383
+ representing a half-open interval. The default value is None.
2384
+ inclusive : (bool, bool), optional
2385
+ A tuple consisting of the (lower flag, upper flag). These flags
2386
+ determine whether values exactly equal to the lower or upper limits
2387
+ are included. The default value is (True, True).
2388
+ axis : int or None, optional
2389
+ Axis along which to operate. If None, compute over the
2390
+ whole array. Default is zero.
2391
+ ddof : int, optional
2392
+ Delta degrees of freedom. Default is 1.
2393
+
2394
+ Returns
2395
+ -------
2396
+ tvar : float
2397
+ Trimmed variance.
2398
+
2399
+ Notes
2400
+ -----
2401
+ For more details on `tvar`, see `scipy.stats.tvar`.
2402
+
2403
+ """
2404
+ a = a.astype(float).ravel()
2405
+ if limits is None:
2406
+ n = (~a.mask).sum() # todo: better way to do that?
2407
+ return np.ma.var(a) * n/(n-1.)
2408
+ am = _mask_to_limits(a, limits=limits, inclusive=inclusive)
2409
+
2410
+ return np.ma.var(am, axis=axis, ddof=ddof)
2411
+
2412
+
2413
+ def tmin(a, lowerlimit=None, axis=0, inclusive=True):
2414
+ """
2415
+ Compute the trimmed minimum
2416
+
2417
+ Parameters
2418
+ ----------
2419
+ a : array_like
2420
+ array of values
2421
+ lowerlimit : None or float, optional
2422
+ Values in the input array less than the given limit will be ignored.
2423
+ When lowerlimit is None, then all values are used. The default value
2424
+ is None.
2425
+ axis : int or None, optional
2426
+ Axis along which to operate. Default is 0. If None, compute over the
2427
+ whole array `a`.
2428
+ inclusive : {True, False}, optional
2429
+ This flag determines whether values exactly equal to the lower limit
2430
+ are included. The default value is True.
2431
+
2432
+ Returns
2433
+ -------
2434
+ tmin : float, int or ndarray
2435
+
2436
+ Notes
2437
+ -----
2438
+ For more details on `tmin`, see `scipy.stats.tmin`.
2439
+
2440
+ Examples
2441
+ --------
2442
+ >>> import numpy as np
2443
+ >>> from scipy.stats import mstats
2444
+ >>> a = np.array([[6, 8, 3, 0],
2445
+ ... [3, 2, 1, 2],
2446
+ ... [8, 1, 8, 2],
2447
+ ... [5, 3, 0, 2],
2448
+ ... [4, 7, 5, 2]])
2449
+ ...
2450
+ >>> mstats.tmin(a, 5)
2451
+ masked_array(data=[5, 7, 5, --],
2452
+ mask=[False, False, False, True],
2453
+ fill_value=999999)
2454
+
2455
+ """
2456
+ a, axis = _chk_asarray(a, axis)
2457
+ am = trima(a, (lowerlimit, None), (inclusive, False))
2458
+ return ma.minimum.reduce(am, axis)
2459
+
2460
+
2461
+ def tmax(a, upperlimit=None, axis=0, inclusive=True):
2462
+ """
2463
+ Compute the trimmed maximum
2464
+
2465
+ This function computes the maximum value of an array along a given axis,
2466
+ while ignoring values larger than a specified upper limit.
2467
+
2468
+ Parameters
2469
+ ----------
2470
+ a : array_like
2471
+ array of values
2472
+ upperlimit : None or float, optional
2473
+ Values in the input array greater than the given limit will be ignored.
2474
+ When upperlimit is None, then all values are used. The default value
2475
+ is None.
2476
+ axis : int or None, optional
2477
+ Axis along which to operate. Default is 0. If None, compute over the
2478
+ whole array `a`.
2479
+ inclusive : {True, False}, optional
2480
+ This flag determines whether values exactly equal to the upper limit
2481
+ are included. The default value is True.
2482
+
2483
+ Returns
2484
+ -------
2485
+ tmax : float, int or ndarray
2486
+
2487
+ Notes
2488
+ -----
2489
+ For more details on `tmax`, see `scipy.stats.tmax`.
2490
+
2491
+ Examples
2492
+ --------
2493
+ >>> import numpy as np
2494
+ >>> from scipy.stats import mstats
2495
+ >>> a = np.array([[6, 8, 3, 0],
2496
+ ... [3, 9, 1, 2],
2497
+ ... [8, 7, 8, 2],
2498
+ ... [5, 6, 0, 2],
2499
+ ... [4, 5, 5, 2]])
2500
+ ...
2501
+ ...
2502
+ >>> mstats.tmax(a, 4)
2503
+ masked_array(data=[4, --, 3, 2],
2504
+ mask=[False, True, False, False],
2505
+ fill_value=999999)
2506
+
2507
+ """
2508
+ a, axis = _chk_asarray(a, axis)
2509
+ am = trima(a, (None, upperlimit), (False, inclusive))
2510
+ return ma.maximum.reduce(am, axis)
2511
+
2512
+
2513
+ def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
2514
+ """
2515
+ Compute the trimmed standard error of the mean.
2516
+
2517
+ This function finds the standard error of the mean for given
2518
+ values, ignoring values outside the given `limits`.
2519
+
2520
+ Parameters
2521
+ ----------
2522
+ a : array_like
2523
+ array of values
2524
+ limits : None or (lower limit, upper limit), optional
2525
+ Values in the input array less than the lower limit or greater than the
2526
+ upper limit will be ignored. When limits is None, then all values are
2527
+ used. Either of the limit values in the tuple can also be None
2528
+ representing a half-open interval. The default value is None.
2529
+ inclusive : (bool, bool), optional
2530
+ A tuple consisting of the (lower flag, upper flag). These flags
2531
+ determine whether values exactly equal to the lower or upper limits
2532
+ are included. The default value is (True, True).
2533
+ axis : int or None, optional
2534
+ Axis along which to operate. If None, compute over the
2535
+ whole array. Default is zero.
2536
+ ddof : int, optional
2537
+ Delta degrees of freedom. Default is 1.
2538
+
2539
+ Returns
2540
+ -------
2541
+ tsem : float
2542
+
2543
+ Notes
2544
+ -----
2545
+ For more details on `tsem`, see `scipy.stats.tsem`.
2546
+
2547
+ """
2548
+ a = ma.asarray(a).ravel()
2549
+ if limits is None:
2550
+ n = float(a.count())
2551
+ return a.std(axis=axis, ddof=ddof)/ma.sqrt(n)
2552
+
2553
+ am = trima(a.ravel(), limits, inclusive)
2554
+ sd = np.sqrt(am.var(axis=axis, ddof=ddof))
2555
+ return sd / np.sqrt(am.count())
2556
+
2557
+
2558
+ def winsorize(a, limits=None, inclusive=(True, True), inplace=False,
2559
+ axis=None, nan_policy='propagate'):
2560
+ """Returns a Winsorized version of the input array.
2561
+
2562
+ The (limits[0])th lowest values are set to the (limits[0])th percentile,
2563
+ and the (limits[1])th highest values are set to the (1 - limits[1])th
2564
+ percentile.
2565
+ Masked values are skipped.
2566
+
2567
+
2568
+ Parameters
2569
+ ----------
2570
+ a : sequence
2571
+ Input array.
2572
+ limits : {None, tuple of float}, optional
2573
+ Tuple of the percentages to cut on each side of the array, with respect
2574
+ to the number of unmasked data, as floats between 0. and 1.
2575
+ Noting n the number of unmasked data before trimming, the
2576
+ (n*limits[0])th smallest data and the (n*limits[1])th largest data are
2577
+ masked, and the total number of unmasked data after trimming
2578
+ is n*(1.-sum(limits)) The value of one limit can be set to None to
2579
+ indicate an open interval.
2580
+ inclusive : {(True, True) tuple}, optional
2581
+ Tuple indicating whether the number of data being masked on each side
2582
+ should be truncated (True) or rounded (False).
2583
+ inplace : {False, True}, optional
2584
+ Whether to winsorize in place (True) or to use a copy (False)
2585
+ axis : {None, int}, optional
2586
+ Axis along which to trim. If None, the whole array is trimmed, but its
2587
+ shape is maintained.
2588
+ nan_policy : {'propagate', 'raise', 'omit'}, optional
2589
+ Defines how to handle when input contains nan.
2590
+ The following options are available (default is 'propagate'):
2591
+
2592
+ * 'propagate': allows nan values and may overwrite or propagate them
2593
+ * 'raise': throws an error
2594
+ * 'omit': performs the calculations ignoring nan values
2595
+
2596
+ Notes
2597
+ -----
2598
+ This function is applied to reduce the effect of possibly spurious outliers
2599
+ by limiting the extreme values.
2600
+
2601
+ Examples
2602
+ --------
2603
+ >>> import numpy as np
2604
+ >>> from scipy.stats.mstats import winsorize
2605
+
2606
+ A shuffled array contains integers from 1 to 10.
2607
+
2608
+ >>> a = np.array([10, 4, 9, 8, 5, 3, 7, 2, 1, 6])
2609
+
2610
+ The 10% of the lowest value (i.e., ``1``) and the 20% of the highest
2611
+ values (i.e., ``9`` and ``10``) are replaced.
2612
+
2613
+ >>> winsorize(a, limits=[0.1, 0.2])
2614
+ masked_array(data=[8, 4, 8, 8, 5, 3, 7, 2, 2, 6],
2615
+ mask=False,
2616
+ fill_value=999999)
2617
+
2618
+ """
2619
+ def _winsorize1D(a, low_limit, up_limit, low_include, up_include,
2620
+ contains_nan, nan_policy):
2621
+ n = a.count()
2622
+ idx = a.argsort()
2623
+ if contains_nan:
2624
+ nan_count = np.count_nonzero(np.isnan(a))
2625
+ if low_limit:
2626
+ if low_include:
2627
+ lowidx = int(low_limit * n)
2628
+ else:
2629
+ lowidx = np.round(low_limit * n).astype(int)
2630
+ if contains_nan and nan_policy == 'omit':
2631
+ lowidx = min(lowidx, n-nan_count-1)
2632
+ a[idx[:lowidx]] = a[idx[lowidx]]
2633
+ if up_limit is not None:
2634
+ if up_include:
2635
+ upidx = n - int(n * up_limit)
2636
+ else:
2637
+ upidx = n - np.round(n * up_limit).astype(int)
2638
+ if contains_nan and nan_policy == 'omit':
2639
+ a[idx[upidx:-nan_count]] = a[idx[upidx - 1]]
2640
+ else:
2641
+ a[idx[upidx:]] = a[idx[upidx - 1]]
2642
+ return a
2643
+
2644
+ contains_nan = _contains_nan(a, nan_policy)
2645
+ # We are going to modify a: better make a copy
2646
+ a = ma.array(a, copy=np.logical_not(inplace))
2647
+
2648
+ if limits is None:
2649
+ return a
2650
+ if (not isinstance(limits, tuple)) and isinstance(limits, float):
2651
+ limits = (limits, limits)
2652
+
2653
+ # Check the limits
2654
+ (lolim, uplim) = limits
2655
+ errmsg = "The proportion to cut from the %s should be between 0. and 1."
2656
+ if lolim is not None:
2657
+ if lolim > 1. or lolim < 0:
2658
+ raise ValueError(errmsg % 'beginning' + f"(got {lolim})")
2659
+ if uplim is not None:
2660
+ if uplim > 1. or uplim < 0:
2661
+ raise ValueError(errmsg % 'end' + f"(got {uplim})")
2662
+
2663
+ (loinc, upinc) = inclusive
2664
+
2665
+ if axis is None:
2666
+ shp = a.shape
2667
+ return _winsorize1D(a.ravel(), lolim, uplim, loinc, upinc,
2668
+ contains_nan, nan_policy).reshape(shp)
2669
+ else:
2670
+ return ma.apply_along_axis(_winsorize1D, axis, a, lolim, uplim, loinc,
2671
+ upinc, contains_nan, nan_policy)
2672
+
2673
+
2674
+ def moment(a, moment=1, axis=0):
2675
+ """
2676
+ Calculates the nth moment about the mean for a sample.
2677
+
2678
+ Parameters
2679
+ ----------
2680
+ a : array_like
2681
+ data
2682
+ moment : int, optional
2683
+ order of central moment that is returned
2684
+ axis : int or None, optional
2685
+ Axis along which the central moment is computed. Default is 0.
2686
+ If None, compute over the whole array `a`.
2687
+
2688
+ Returns
2689
+ -------
2690
+ n-th central moment : ndarray or float
2691
+ The appropriate moment along the given axis or over all values if axis
2692
+ is None. The denominator for the moment calculation is the number of
2693
+ observations, no degrees of freedom correction is done.
2694
+
2695
+ Notes
2696
+ -----
2697
+ For more details about `moment`, see `scipy.stats.moment`.
2698
+
2699
+ """
2700
+ a, axis = _chk_asarray(a, axis)
2701
+ if a.size == 0:
2702
+ moment_shape = list(a.shape)
2703
+ del moment_shape[axis]
2704
+ dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
2705
+ # empty array, return nan(s) with shape matching `moment`
2706
+ out_shape = (moment_shape if np.isscalar(moment)
2707
+ else [len(moment)] + moment_shape)
2708
+ if len(out_shape) == 0:
2709
+ return dtype(np.nan)
2710
+ else:
2711
+ return ma.array(np.full(out_shape, np.nan, dtype=dtype))
2712
+
2713
+ # for array_like moment input, return a value for each.
2714
+ if not np.isscalar(moment):
2715
+ mean = a.mean(axis, keepdims=True)
2716
+ mmnt = [_moment(a, i, axis, mean=mean) for i in moment]
2717
+ return ma.array(mmnt)
2718
+ else:
2719
+ return _moment(a, moment, axis)
2720
+
2721
+
2722
+ # Moment with optional pre-computed mean, equal to a.mean(axis, keepdims=True)
2723
+ def _moment(a, moment, axis, *, mean=None):
2724
+ if np.abs(moment - np.round(moment)) > 0:
2725
+ raise ValueError("All moment parameters must be integers")
2726
+
2727
+ if moment == 0 or moment == 1:
2728
+ # By definition the zeroth moment about the mean is 1, and the first
2729
+ # moment is 0.
2730
+ shape = list(a.shape)
2731
+ del shape[axis]
2732
+ dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
2733
+
2734
+ if len(shape) == 0:
2735
+ return dtype(1.0 if moment == 0 else 0.0)
2736
+ else:
2737
+ return (ma.ones(shape, dtype=dtype) if moment == 0
2738
+ else ma.zeros(shape, dtype=dtype))
2739
+ else:
2740
+ # Exponentiation by squares: form exponent sequence
2741
+ n_list = [moment]
2742
+ current_n = moment
2743
+ while current_n > 2:
2744
+ if current_n % 2:
2745
+ current_n = (current_n-1)/2
2746
+ else:
2747
+ current_n /= 2
2748
+ n_list.append(current_n)
2749
+
2750
+ # Starting point for exponentiation by squares
2751
+ mean = a.mean(axis, keepdims=True) if mean is None else mean
2752
+ a_zero_mean = a - mean
2753
+ if n_list[-1] == 1:
2754
+ s = a_zero_mean.copy()
2755
+ else:
2756
+ s = a_zero_mean**2
2757
+
2758
+ # Perform multiplications
2759
+ for n in n_list[-2::-1]:
2760
+ s = s**2
2761
+ if n % 2:
2762
+ s *= a_zero_mean
2763
+ return s.mean(axis)
2764
+
2765
+
2766
+ def variation(a, axis=0, ddof=0):
2767
+ """
2768
+ Compute the coefficient of variation.
2769
+
2770
+ The coefficient of variation is the standard deviation divided by the
2771
+ mean. This function is equivalent to::
2772
+
2773
+ np.std(x, axis=axis, ddof=ddof) / np.mean(x)
2774
+
2775
+ The default for ``ddof`` is 0, but many definitions of the coefficient
2776
+ of variation use the square root of the unbiased sample variance
2777
+ for the sample standard deviation, which corresponds to ``ddof=1``.
2778
+
2779
+ Parameters
2780
+ ----------
2781
+ a : array_like
2782
+ Input array.
2783
+ axis : int or None, optional
2784
+ Axis along which to calculate the coefficient of variation. Default
2785
+ is 0. If None, compute over the whole array `a`.
2786
+ ddof : int, optional
2787
+ Delta degrees of freedom. Default is 0.
2788
+
2789
+ Returns
2790
+ -------
2791
+ variation : ndarray
2792
+ The calculated variation along the requested axis.
2793
+
2794
+ Notes
2795
+ -----
2796
+ For more details about `variation`, see `scipy.stats.variation`.
2797
+
2798
+ Examples
2799
+ --------
2800
+ >>> import numpy as np
2801
+ >>> from scipy.stats.mstats import variation
2802
+ >>> a = np.array([2,8,4])
2803
+ >>> variation(a)
2804
+ 0.5345224838248487
2805
+ >>> b = np.array([2,8,3,4])
2806
+ >>> c = np.ma.masked_array(b, mask=[0,0,1,0])
2807
+ >>> variation(c)
2808
+ 0.5345224838248487
2809
+
2810
+ In the example above, it can be seen that this works the same as
2811
+ `scipy.stats.variation` except 'stats.mstats.variation' ignores masked
2812
+ array elements.
2813
+
2814
+ """
2815
+ a, axis = _chk_asarray(a, axis)
2816
+ return a.std(axis, ddof=ddof)/a.mean(axis)
2817
+
2818
+
2819
+ def skew(a, axis=0, bias=True):
2820
+ """
2821
+ Computes the skewness of a data set.
2822
+
2823
+ Parameters
2824
+ ----------
2825
+ a : ndarray
2826
+ data
2827
+ axis : int or None, optional
2828
+ Axis along which skewness is calculated. Default is 0.
2829
+ If None, compute over the whole array `a`.
2830
+ bias : bool, optional
2831
+ If False, then the calculations are corrected for statistical bias.
2832
+
2833
+ Returns
2834
+ -------
2835
+ skewness : ndarray
2836
+ The skewness of values along an axis, returning 0 where all values are
2837
+ equal.
2838
+
2839
+ Notes
2840
+ -----
2841
+ For more details about `skew`, see `scipy.stats.skew`.
2842
+
2843
+ """
2844
+ a, axis = _chk_asarray(a,axis)
2845
+ mean = a.mean(axis, keepdims=True)
2846
+ m2 = _moment(a, 2, axis, mean=mean)
2847
+ m3 = _moment(a, 3, axis, mean=mean)
2848
+ zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
2849
+ with np.errstate(all='ignore'):
2850
+ vals = ma.where(zero, 0, m3 / m2**1.5)
2851
+
2852
+ if not bias and zero is not ma.masked and m2 is not ma.masked:
2853
+ n = a.count(axis)
2854
+ can_correct = ~zero & (n > 2)
2855
+ if can_correct.any():
2856
+ n = np.extract(can_correct, n)
2857
+ m2 = np.extract(can_correct, m2)
2858
+ m3 = np.extract(can_correct, m3)
2859
+ nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5
2860
+ np.place(vals, can_correct, nval)
2861
+ return vals
2862
+
2863
+
2864
+ def kurtosis(a, axis=0, fisher=True, bias=True):
2865
+ """
2866
+ Computes the kurtosis (Fisher or Pearson) of a dataset.
2867
+
2868
+ Kurtosis is the fourth central moment divided by the square of the
2869
+ variance. If Fisher's definition is used, then 3.0 is subtracted from
2870
+ the result to give 0.0 for a normal distribution.
2871
+
2872
+ If bias is False then the kurtosis is calculated using k statistics to
2873
+ eliminate bias coming from biased moment estimators
2874
+
2875
+ Use `kurtosistest` to see if result is close enough to normal.
2876
+
2877
+ Parameters
2878
+ ----------
2879
+ a : array
2880
+ data for which the kurtosis is calculated
2881
+ axis : int or None, optional
2882
+ Axis along which the kurtosis is calculated. Default is 0.
2883
+ If None, compute over the whole array `a`.
2884
+ fisher : bool, optional
2885
+ If True, Fisher's definition is used (normal ==> 0.0). If False,
2886
+ Pearson's definition is used (normal ==> 3.0).
2887
+ bias : bool, optional
2888
+ If False, then the calculations are corrected for statistical bias.
2889
+
2890
+ Returns
2891
+ -------
2892
+ kurtosis : array
2893
+ The kurtosis of values along an axis. If all values are equal,
2894
+ return -3 for Fisher's definition and 0 for Pearson's definition.
2895
+
2896
+ Notes
2897
+ -----
2898
+ For more details about `kurtosis`, see `scipy.stats.kurtosis`.
2899
+
2900
+ """
2901
+ a, axis = _chk_asarray(a, axis)
2902
+ mean = a.mean(axis, keepdims=True)
2903
+ m2 = _moment(a, 2, axis, mean=mean)
2904
+ m4 = _moment(a, 4, axis, mean=mean)
2905
+ zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
2906
+ with np.errstate(all='ignore'):
2907
+ vals = ma.where(zero, 0, m4 / m2**2.0)
2908
+
2909
+ if not bias and zero is not ma.masked and m2 is not ma.masked:
2910
+ n = a.count(axis)
2911
+ can_correct = ~zero & (n > 3)
2912
+ if can_correct.any():
2913
+ n = np.extract(can_correct, n)
2914
+ m2 = np.extract(can_correct, m2)
2915
+ m4 = np.extract(can_correct, m4)
2916
+ nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0)
2917
+ np.place(vals, can_correct, nval+3.0)
2918
+ if fisher:
2919
+ return vals - 3
2920
+ else:
2921
+ return vals
2922
+
2923
+
2924
+ DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
2925
+ 'variance', 'skewness',
2926
+ 'kurtosis'))
2927
+
2928
+
2929
+ def describe(a, axis=0, ddof=0, bias=True):
2930
+ """
2931
+ Computes several descriptive statistics of the passed array.
2932
+
2933
+ Parameters
2934
+ ----------
2935
+ a : array_like
2936
+ Data array
2937
+ axis : int or None, optional
2938
+ Axis along which to calculate statistics. Default 0. If None,
2939
+ compute over the whole array `a`.
2940
+ ddof : int, optional
2941
+ degree of freedom (default 0); note that default ddof is different
2942
+ from the same routine in stats.describe
2943
+ bias : bool, optional
2944
+ If False, then the skewness and kurtosis calculations are corrected for
2945
+ statistical bias.
2946
+
2947
+ Returns
2948
+ -------
2949
+ nobs : int
2950
+ (size of the data (discarding missing values)
2951
+
2952
+ minmax : (int, int)
2953
+ min, max
2954
+
2955
+ mean : float
2956
+ arithmetic mean
2957
+
2958
+ variance : float
2959
+ unbiased variance
2960
+
2961
+ skewness : float
2962
+ biased skewness
2963
+
2964
+ kurtosis : float
2965
+ biased kurtosis
2966
+
2967
+ Examples
2968
+ --------
2969
+ >>> import numpy as np
2970
+ >>> from scipy.stats.mstats import describe
2971
+ >>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1])
2972
+ >>> describe(ma)
2973
+ DescribeResult(nobs=np.int64(3), minmax=(masked_array(data=0,
2974
+ mask=False,
2975
+ fill_value=999999), masked_array(data=2,
2976
+ mask=False,
2977
+ fill_value=999999)), mean=np.float64(1.0),
2978
+ variance=np.float64(0.6666666666666666),
2979
+ skewness=masked_array(data=0., mask=False, fill_value=1e+20),
2980
+ kurtosis=np.float64(-1.5))
2981
+
2982
+ """
2983
+ a, axis = _chk_asarray(a, axis)
2984
+ n = a.count(axis)
2985
+ mm = (ma.minimum.reduce(a, axis=axis), ma.maximum.reduce(a, axis=axis))
2986
+ m = a.mean(axis)
2987
+ v = a.var(axis, ddof=ddof)
2988
+ sk = skew(a, axis, bias=bias)
2989
+ kurt = kurtosis(a, axis, bias=bias)
2990
+
2991
+ return DescribeResult(n, mm, m, v, sk, kurt)
2992
+
2993
+
2994
+ def stde_median(data, axis=None):
2995
+ """Returns the McKean-Schrader estimate of the standard error of the sample
2996
+ median along the given axis. masked values are discarded.
2997
+
2998
+ Parameters
2999
+ ----------
3000
+ data : ndarray
3001
+ Data to trim.
3002
+ axis : {None,int}, optional
3003
+ Axis along which to perform the trimming.
3004
+ If None, the input array is first flattened.
3005
+
3006
+ """
3007
+ def _stdemed_1D(data):
3008
+ data = np.sort(data.compressed())
3009
+ n = len(data)
3010
+ z = 2.5758293035489004
3011
+ k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0))
3012
+ return ((data[n-k] - data[k-1])/(2.*z))
3013
+
3014
+ data = ma.array(data, copy=False, subok=True)
3015
+ if (axis is None):
3016
+ return _stdemed_1D(data)
3017
+ else:
3018
+ if data.ndim > 2:
3019
+ raise ValueError(f"Array 'data' must be at most two dimensional, "
3020
+ f"but got data.ndim = {data.ndim}")
3021
+ return ma.apply_along_axis(_stdemed_1D, axis, data)
3022
+
3023
+
3024
+ SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
3025
+
3026
+
3027
+ def skewtest(a, axis=0, alternative='two-sided'):
3028
+ """
3029
+ Tests whether the skew is different from the normal distribution.
3030
+
3031
+ Parameters
3032
+ ----------
3033
+ a : array_like
3034
+ The data to be tested
3035
+ axis : int or None, optional
3036
+ Axis along which statistics are calculated. Default is 0.
3037
+ If None, compute over the whole array `a`.
3038
+ alternative : {'two-sided', 'less', 'greater'}, optional
3039
+ Defines the alternative hypothesis. Default is 'two-sided'.
3040
+ The following options are available:
3041
+
3042
+ * 'two-sided': the skewness of the distribution underlying the sample
3043
+ is different from that of the normal distribution (i.e. 0)
3044
+ * 'less': the skewness of the distribution underlying the sample
3045
+ is less than that of the normal distribution
3046
+ * 'greater': the skewness of the distribution underlying the sample
3047
+ is greater than that of the normal distribution
3048
+
3049
+ .. versionadded:: 1.7.0
3050
+
3051
+ Returns
3052
+ -------
3053
+ statistic : array_like
3054
+ The computed z-score for this test.
3055
+ pvalue : array_like
3056
+ A p-value for the hypothesis test
3057
+
3058
+ Notes
3059
+ -----
3060
+ For more details about `skewtest`, see `scipy.stats.skewtest`.
3061
+
3062
+ """
3063
+ a, axis = _chk_asarray(a, axis)
3064
+ if axis is None:
3065
+ a = a.ravel()
3066
+ axis = 0
3067
+ b2 = skew(a,axis)
3068
+ n = a.count(axis)
3069
+ if np.min(n) < 8:
3070
+ raise ValueError(f"skewtest is not valid with less than 8 samples; "
3071
+ f"{np.min(n)} samples were given.")
3072
+
3073
+ y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2)))
3074
+ beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9))
3075
+ W2 = -1 + ma.sqrt(2*(beta2-1))
3076
+ delta = 1/ma.sqrt(0.5*ma.log(W2))
3077
+ alpha = ma.sqrt(2.0/(W2-1))
3078
+ y = ma.where(y == 0, 1, y)
3079
+ Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1))
3080
+ pvalue = scipy.stats._stats_py._get_pvalue(Z, distributions.norm, alternative)
3081
+
3082
+ return SkewtestResult(Z[()], pvalue[()])
3083
+
3084
+
3085
+ KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
3086
+
3087
+
3088
+ def kurtosistest(a, axis=0, alternative='two-sided'):
3089
+ """
3090
+ Tests whether a dataset has normal kurtosis
3091
+
3092
+ Parameters
3093
+ ----------
3094
+ a : array_like
3095
+ array of the sample data
3096
+ axis : int or None, optional
3097
+ Axis along which to compute test. Default is 0. If None,
3098
+ compute over the whole array `a`.
3099
+ alternative : {'two-sided', 'less', 'greater'}, optional
3100
+ Defines the alternative hypothesis.
3101
+ The following options are available (default is 'two-sided'):
3102
+
3103
+ * 'two-sided': the kurtosis of the distribution underlying the sample
3104
+ is different from that of the normal distribution
3105
+ * 'less': the kurtosis of the distribution underlying the sample
3106
+ is less than that of the normal distribution
3107
+ * 'greater': the kurtosis of the distribution underlying the sample
3108
+ is greater than that of the normal distribution
3109
+
3110
+ .. versionadded:: 1.7.0
3111
+
3112
+ Returns
3113
+ -------
3114
+ statistic : array_like
3115
+ The computed z-score for this test.
3116
+ pvalue : array_like
3117
+ The p-value for the hypothesis test
3118
+
3119
+ Notes
3120
+ -----
3121
+ For more details about `kurtosistest`, see `scipy.stats.kurtosistest`.
3122
+
3123
+ """
3124
+ a, axis = _chk_asarray(a, axis)
3125
+ n = a.count(axis=axis)
3126
+ if np.min(n) < 5:
3127
+ raise ValueError(f"kurtosistest requires at least 5 observations; "
3128
+ f"{np.min(n)} observations were given.")
3129
+ if np.min(n) < 20:
3130
+ warnings.warn(f"kurtosistest only valid for n>=20 ... continuing "
3131
+ f"anyway, n={np.min(n)}", stacklevel=2)
3132
+
3133
+ b2 = kurtosis(a, axis, fisher=False)
3134
+ E = 3.0*(n-1) / (n+1)
3135
+ varb2 = 24.0*n*(n-2.)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
3136
+ x = (b2-E)/ma.sqrt(varb2)
3137
+ sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
3138
+ (n*(n-2)*(n-3)))
3139
+ A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
3140
+ term1 = 1 - 2./(9.0*A)
3141
+ denom = 1 + x*ma.sqrt(2/(A-4.0))
3142
+ if np.ma.isMaskedArray(denom):
3143
+ # For multi-dimensional array input
3144
+ denom[denom == 0.0] = masked
3145
+ elif denom == 0.0:
3146
+ denom = masked
3147
+
3148
+ term2 = np.ma.where(denom > 0, ma.power((1-2.0/A)/denom, 1/3.0),
3149
+ -ma.power(-(1-2.0/A)/denom, 1/3.0))
3150
+ Z = (term1 - term2) / np.sqrt(2/(9.0*A))
3151
+ pvalue = scipy.stats._stats_py._get_pvalue(Z, distributions.norm, alternative)
3152
+
3153
+ return KurtosistestResult(Z[()], pvalue[()])
3154
+
3155
+
3156
+ NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
3157
+
3158
+
3159
+ def normaltest(a, axis=0):
3160
+ """
3161
+ Tests whether a sample differs from a normal distribution.
3162
+
3163
+ Parameters
3164
+ ----------
3165
+ a : array_like
3166
+ The array containing the data to be tested.
3167
+ axis : int or None, optional
3168
+ Axis along which to compute test. Default is 0. If None,
3169
+ compute over the whole array `a`.
3170
+
3171
+ Returns
3172
+ -------
3173
+ statistic : float or array
3174
+ ``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
3175
+ ``k`` is the z-score returned by `kurtosistest`.
3176
+ pvalue : float or array
3177
+ A 2-sided chi squared probability for the hypothesis test.
3178
+
3179
+ Notes
3180
+ -----
3181
+ For more details about `normaltest`, see `scipy.stats.normaltest`.
3182
+
3183
+ """
3184
+ a, axis = _chk_asarray(a, axis)
3185
+ s, _ = skewtest(a, axis)
3186
+ k, _ = kurtosistest(a, axis)
3187
+ k2 = s*s + k*k
3188
+
3189
+ return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
3190
+
3191
+
3192
+ def mquantiles(a, prob=(.25, .5, .75), alphap=.4, betap=.4, axis=None,
3193
+ limit=()):
3194
+ """
3195
+ Computes empirical quantiles for a data array.
3196
+
3197
+ Samples quantile are defined by ``Q(p) = (1-gamma)*x[j] + gamma*x[j+1]``,
3198
+ where ``x[j]`` is the j-th order statistic, and gamma is a function of
3199
+ ``j = floor(n*p + m)``, ``m = alphap + p*(1 - alphap - betap)`` and
3200
+ ``g = n*p + m - j``.
3201
+
3202
+ Reinterpreting the above equations to compare to **R** lead to the
3203
+ equation: ``p(k) = (k - alphap)/(n + 1 - alphap - betap)``
3204
+
3205
+ Typical values of (alphap,betap) are:
3206
+ - (0,1) : ``p(k) = k/n`` : linear interpolation of cdf
3207
+ (**R** type 4)
3208
+ - (.5,.5) : ``p(k) = (k - 1/2.)/n`` : piecewise linear function
3209
+ (**R** type 5)
3210
+ - (0,0) : ``p(k) = k/(n+1)`` :
3211
+ (**R** type 6)
3212
+ - (1,1) : ``p(k) = (k-1)/(n-1)``: p(k) = mode[F(x[k])].
3213
+ (**R** type 7, **R** default)
3214
+ - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``: Then p(k) ~ median[F(x[k])].
3215
+ The resulting quantile estimates are approximately median-unbiased
3216
+ regardless of the distribution of x.
3217
+ (**R** type 8)
3218
+ - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``: Blom.
3219
+ The resulting quantile estimates are approximately unbiased
3220
+ if x is normally distributed
3221
+ (**R** type 9)
3222
+ - (.4,.4) : approximately quantile unbiased (Cunnane)
3223
+ - (.35,.35): APL, used with PWM
3224
+
3225
+ Parameters
3226
+ ----------
3227
+ a : array_like
3228
+ Input data, as a sequence or array of dimension at most 2.
3229
+ prob : array_like, optional
3230
+ List of quantiles to compute.
3231
+ alphap : float, optional
3232
+ Plotting positions parameter, default is 0.4.
3233
+ betap : float, optional
3234
+ Plotting positions parameter, default is 0.4.
3235
+ axis : int, optional
3236
+ Axis along which to perform the trimming.
3237
+ If None (default), the input array is first flattened.
3238
+ limit : tuple, optional
3239
+ Tuple of (lower, upper) values.
3240
+ Values of `a` outside this open interval are ignored.
3241
+
3242
+ Returns
3243
+ -------
3244
+ mquantiles : MaskedArray
3245
+ An array containing the calculated quantiles.
3246
+
3247
+ Notes
3248
+ -----
3249
+ This formulation is very similar to **R** except the calculation of
3250
+ ``m`` from ``alphap`` and ``betap``, where in **R** ``m`` is defined
3251
+ with each type.
3252
+
3253
+ References
3254
+ ----------
3255
+ .. [1] *R* statistical software: https://www.r-project.org/
3256
+ .. [2] *R* ``quantile`` function:
3257
+ http://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html
3258
+
3259
+ Examples
3260
+ --------
3261
+ >>> import numpy as np
3262
+ >>> from scipy.stats.mstats import mquantiles
3263
+ >>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.])
3264
+ >>> mquantiles(a)
3265
+ array([ 19.2, 40. , 42.8])
3266
+
3267
+ Using a 2D array, specifying axis and limit.
3268
+
3269
+ >>> data = np.array([[ 6., 7., 1.],
3270
+ ... [ 47., 15., 2.],
3271
+ ... [ 49., 36., 3.],
3272
+ ... [ 15., 39., 4.],
3273
+ ... [ 42., 40., -999.],
3274
+ ... [ 41., 41., -999.],
3275
+ ... [ 7., -999., -999.],
3276
+ ... [ 39., -999., -999.],
3277
+ ... [ 43., -999., -999.],
3278
+ ... [ 40., -999., -999.],
3279
+ ... [ 36., -999., -999.]])
3280
+ >>> print(mquantiles(data, axis=0, limit=(0, 50)))
3281
+ [[19.2 14.6 1.45]
3282
+ [40. 37.5 2.5 ]
3283
+ [42.8 40.05 3.55]]
3284
+
3285
+ >>> data[:, 2] = -999.
3286
+ >>> print(mquantiles(data, axis=0, limit=(0, 50)))
3287
+ [[19.200000000000003 14.6 --]
3288
+ [40.0 37.5 --]
3289
+ [42.800000000000004 40.05 --]]
3290
+
3291
+ """
3292
+ def _quantiles1D(data,m,p):
3293
+ x = np.sort(data.compressed())
3294
+ n = len(x)
3295
+ if n == 0:
3296
+ return ma.array(np.empty(len(p), dtype=float), mask=True)
3297
+ elif n == 1:
3298
+ return ma.array(np.resize(x, p.shape), mask=nomask)
3299
+ aleph = (n*p + m)
3300
+ k = np.floor(aleph.clip(1, n-1)).astype(int)
3301
+ gamma = (aleph-k).clip(0,1)
3302
+ return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()]
3303
+
3304
+ data = ma.array(a, copy=False)
3305
+ if data.ndim > 2:
3306
+ raise TypeError("Array should be 2D at most !")
3307
+
3308
+ if limit:
3309
+ condition = (limit[0] < data) & (data < limit[1])
3310
+ data[~condition.filled(True)] = masked
3311
+
3312
+ p = np.atleast_1d(np.asarray(prob))
3313
+ m = alphap + p*(1.-alphap-betap)
3314
+ # Computes quantiles along axis (or globally)
3315
+ if (axis is None):
3316
+ return _quantiles1D(data, m, p)
3317
+
3318
+ return ma.apply_along_axis(_quantiles1D, axis, data, m, p)
3319
+
3320
+
3321
+ def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4):
3322
+ """Calculate the score at the given 'per' percentile of the
3323
+ sequence a. For example, the score at per=50 is the median.
3324
+
3325
+ This function is a shortcut to mquantile
3326
+
3327
+ """
3328
+ if (per < 0) or (per > 100.):
3329
+ raise ValueError(f"The percentile should be between 0. and 100. ! (got {per})")
3330
+
3331
+ return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap,
3332
+ limit=limit, axis=0).squeeze()
3333
+
3334
+
3335
+ def plotting_positions(data, alpha=0.4, beta=0.4):
3336
+ """
3337
+ Returns plotting positions (or empirical percentile points) for the data.
3338
+
3339
+ Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where:
3340
+ - i is the rank order statistics
3341
+ - n is the number of unmasked values along the given axis
3342
+ - `alpha` and `beta` are two parameters.
3343
+
3344
+ Typical values for `alpha` and `beta` are:
3345
+ - (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4)
3346
+ - (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function
3347
+ (R, type 5)
3348
+ - (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6)
3349
+ - (1,1) : ``p(k) = (k-1)/(n-1)``, in this case,
3350
+ ``p(k) = mode[F(x[k])]``. That's R default (R type 7)
3351
+ - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then
3352
+ ``p(k) ~ median[F(x[k])]``.
3353
+ The resulting quantile estimates are approximately median-unbiased
3354
+ regardless of the distribution of x. (R type 8)
3355
+ - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom.
3356
+ The resulting quantile estimates are approximately unbiased
3357
+ if x is normally distributed (R type 9)
3358
+ - (.4,.4) : approximately quantile unbiased (Cunnane)
3359
+ - (.35,.35): APL, used with PWM
3360
+ - (.3175, .3175): used in scipy.stats.probplot
3361
+
3362
+ Parameters
3363
+ ----------
3364
+ data : array_like
3365
+ Input data, as a sequence or array of dimension at most 2.
3366
+ alpha : float, optional
3367
+ Plotting positions parameter. Default is 0.4.
3368
+ beta : float, optional
3369
+ Plotting positions parameter. Default is 0.4.
3370
+
3371
+ Returns
3372
+ -------
3373
+ positions : MaskedArray
3374
+ The calculated plotting positions.
3375
+
3376
+ """
3377
+ data = ma.array(data, copy=False).reshape(1,-1)
3378
+ n = data.count()
3379
+ plpos = np.empty(data.size, dtype=float)
3380
+ plpos[n:] = 0
3381
+ plpos[data.argsort(axis=None)[:n]] = ((np.arange(1, n+1) - alpha) /
3382
+ (n + 1.0 - alpha - beta))
3383
+ return ma.array(plpos, mask=data._mask)
3384
+
3385
+
3386
+ meppf = plotting_positions
3387
+
3388
+
3389
+ def obrientransform(*args):
3390
+ """
3391
+ Computes a transform on input data (any number of columns). Used to
3392
+ test for homogeneity of variance prior to running one-way stats. Each
3393
+ array in ``*args`` is one level of a factor. If an `f_oneway()` run on
3394
+ the transformed data and found significant, variances are unequal. From
3395
+ Maxwell and Delaney, p.112.
3396
+
3397
+ Returns: transformed data for use in an ANOVA
3398
+ """
3399
+ data = argstoarray(*args).T
3400
+ v = data.var(axis=0,ddof=1)
3401
+ m = data.mean(0)
3402
+ n = data.count(0).astype(float)
3403
+ # result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2))
3404
+ data -= m
3405
+ data **= 2
3406
+ data *= (n-1.5)*n
3407
+ data -= 0.5*v*(n-1)
3408
+ data /= (n-1.)*(n-2.)
3409
+ if not ma.allclose(v,data.mean(0)):
3410
+ raise ValueError("Lack of convergence in obrientransform.")
3411
+
3412
+ return data
3413
+
3414
+
3415
+ def sem(a, axis=0, ddof=1):
3416
+ """
3417
+ Calculates the standard error of the mean of the input array.
3418
+
3419
+ Also sometimes called standard error of measurement.
3420
+
3421
+ Parameters
3422
+ ----------
3423
+ a : array_like
3424
+ An array containing the values for which the standard error is
3425
+ returned.
3426
+ axis : int or None, optional
3427
+ If axis is None, ravel `a` first. If axis is an integer, this will be
3428
+ the axis over which to operate. Defaults to 0.
3429
+ ddof : int, optional
3430
+ Delta degrees-of-freedom. How many degrees of freedom to adjust
3431
+ for bias in limited samples relative to the population estimate
3432
+ of variance. Defaults to 1.
3433
+
3434
+ Returns
3435
+ -------
3436
+ s : ndarray or float
3437
+ The standard error of the mean in the sample(s), along the input axis.
3438
+
3439
+ Notes
3440
+ -----
3441
+ The default value for `ddof` changed in scipy 0.15.0 to be consistent with
3442
+ `scipy.stats.sem` as well as with the most common definition used (like in
3443
+ the R documentation).
3444
+
3445
+ Examples
3446
+ --------
3447
+ Find standard error along the first axis:
3448
+
3449
+ >>> import numpy as np
3450
+ >>> from scipy import stats
3451
+ >>> a = np.arange(20).reshape(5,4)
3452
+ >>> print(stats.mstats.sem(a))
3453
+ [2.8284271247461903 2.8284271247461903 2.8284271247461903
3454
+ 2.8284271247461903]
3455
+
3456
+ Find standard error across the whole array, using n degrees of freedom:
3457
+
3458
+ >>> print(stats.mstats.sem(a, axis=None, ddof=0))
3459
+ 1.2893796958227628
3460
+
3461
+ """
3462
+ a, axis = _chk_asarray(a, axis)
3463
+ n = a.count(axis=axis)
3464
+ s = a.std(axis=axis, ddof=ddof) / ma.sqrt(n)
3465
+ return s
3466
+
3467
+
3468
+ F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
3469
+
3470
+
3471
+ def f_oneway(*args):
3472
+ """
3473
+ Performs a 1-way ANOVA, returning an F-value and probability given
3474
+ any number of groups. From Heiman, pp.394-7.
3475
+
3476
+ Usage: ``f_oneway(*args)``, where ``*args`` is 2 or more arrays,
3477
+ one per treatment group.
3478
+
3479
+ Returns
3480
+ -------
3481
+ statistic : float
3482
+ The computed F-value of the test.
3483
+ pvalue : float
3484
+ The associated p-value from the F-distribution.
3485
+
3486
+ """
3487
+ # Construct a single array of arguments: each row is a group
3488
+ data = argstoarray(*args)
3489
+ ngroups = len(data)
3490
+ ntot = data.count()
3491
+ sstot = (data**2).sum() - (data.sum())**2/float(ntot)
3492
+ ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum()
3493
+ sswg = sstot-ssbg
3494
+ dfbg = ngroups-1
3495
+ dfwg = ntot - ngroups
3496
+ msb = ssbg/float(dfbg)
3497
+ msw = sswg/float(dfwg)
3498
+ f = msb/msw
3499
+ prob = special.fdtrc(dfbg, dfwg, f) # equivalent to stats.f.sf
3500
+
3501
+ return F_onewayResult(f, prob)
3502
+
3503
+
3504
+ FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
3505
+ ('statistic', 'pvalue'))
3506
+
3507
+
3508
+ def friedmanchisquare(*args):
3509
+ """Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA.
3510
+ This function calculates the Friedman Chi-square test for repeated measures
3511
+ and returns the result, along with the associated probability value.
3512
+
3513
+ Each input is considered a given group. Ideally, the number of treatments
3514
+ among each group should be equal. If this is not the case, only the first
3515
+ n treatments are taken into account, where n is the number of treatments
3516
+ of the smallest group.
3517
+ If a group has some missing values, the corresponding treatments are masked
3518
+ in the other groups.
3519
+ The test statistic is corrected for ties.
3520
+
3521
+ Masked values in one group are propagated to the other groups.
3522
+
3523
+ Returns
3524
+ -------
3525
+ statistic : float
3526
+ the test statistic.
3527
+ pvalue : float
3528
+ the associated p-value.
3529
+
3530
+ """
3531
+ data = argstoarray(*args).astype(float)
3532
+ k = len(data)
3533
+ if k < 3:
3534
+ raise ValueError(f"Less than 3 groups ({k}): the Friedman test "
3535
+ f"is NOT appropriate.")
3536
+
3537
+ ranked = ma.masked_values(rankdata(data, axis=0), 0)
3538
+ if ranked._mask is not nomask:
3539
+ ranked = ma.mask_cols(ranked)
3540
+ ranked = ranked.compressed().reshape(k,-1).view(ndarray)
3541
+ else:
3542
+ ranked = ranked._data
3543
+ (k,n) = ranked.shape
3544
+ # Ties correction
3545
+ repeats = [find_repeats(row) for row in ranked.T]
3546
+ ties = np.array([y for x, y in repeats if x.size > 0])
3547
+ tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k))
3548
+
3549
+ ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2)
3550
+ chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction
3551
+
3552
+ return FriedmanchisquareResult(chisq,
3553
+ distributions.chi2.sf(chisq, k-1))
3554
+
3555
+
3556
+ BrunnerMunzelResult = namedtuple('BrunnerMunzelResult', ('statistic', 'pvalue'))
3557
+
3558
+
3559
+ def brunnermunzel(x, y, alternative="two-sided", distribution="t"):
3560
+ """
3561
+ Compute the Brunner-Munzel test on samples x and y.
3562
+
3563
+ Any missing values in `x` and/or `y` are discarded.
3564
+
3565
+ The Brunner-Munzel test is a nonparametric test of the null hypothesis that
3566
+ when values are taken one by one from each group, the probabilities of
3567
+ getting large values in both groups are equal.
3568
+ Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the
3569
+ assumption of equivariance of two groups. Note that this does not assume
3570
+ the distributions are same. This test works on two independent samples,
3571
+ which may have different sizes.
3572
+
3573
+ Parameters
3574
+ ----------
3575
+ x, y : array_like
3576
+ Array of samples, should be one-dimensional.
3577
+ alternative : 'less', 'two-sided', or 'greater', optional
3578
+ Whether to get the p-value for the one-sided hypothesis ('less'
3579
+ or 'greater') or for the two-sided hypothesis ('two-sided').
3580
+ Defaults value is 'two-sided' .
3581
+ distribution : 't' or 'normal', optional
3582
+ Whether to get the p-value by t-distribution or by standard normal
3583
+ distribution.
3584
+ Defaults value is 't' .
3585
+
3586
+ Returns
3587
+ -------
3588
+ statistic : float
3589
+ The Brunner-Munzer W statistic.
3590
+ pvalue : float
3591
+ p-value assuming an t distribution. One-sided or
3592
+ two-sided, depending on the choice of `alternative` and `distribution`.
3593
+
3594
+ See Also
3595
+ --------
3596
+ mannwhitneyu : Mann-Whitney rank test on two samples.
3597
+
3598
+ Notes
3599
+ -----
3600
+ For more details on `brunnermunzel`, see `scipy.stats.brunnermunzel`.
3601
+
3602
+ Examples
3603
+ --------
3604
+ >>> from scipy.stats.mstats import brunnermunzel
3605
+ >>> import numpy as np
3606
+ >>> x1 = [1, 2, np.nan, np.nan, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1]
3607
+ >>> x2 = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
3608
+ >>> brunnermunzel(x1, x2)
3609
+ BrunnerMunzelResult(statistic=1.4723186918922935, pvalue=0.15479415300426624) # may vary
3610
+
3611
+ """ # noqa: E501
3612
+ x = ma.asarray(x).compressed().view(ndarray)
3613
+ y = ma.asarray(y).compressed().view(ndarray)
3614
+ nx = len(x)
3615
+ ny = len(y)
3616
+ if nx == 0 or ny == 0:
3617
+ return BrunnerMunzelResult(np.nan, np.nan)
3618
+ rankc = rankdata(np.concatenate((x,y)))
3619
+ rankcx = rankc[0:nx]
3620
+ rankcy = rankc[nx:nx+ny]
3621
+ rankcx_mean = np.mean(rankcx)
3622
+ rankcy_mean = np.mean(rankcy)
3623
+ rankx = rankdata(x)
3624
+ ranky = rankdata(y)
3625
+ rankx_mean = np.mean(rankx)
3626
+ ranky_mean = np.mean(ranky)
3627
+
3628
+ Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
3629
+ Sx /= nx - 1
3630
+ Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
3631
+ Sy /= ny - 1
3632
+
3633
+ wbfn = nx * ny * (rankcy_mean - rankcx_mean)
3634
+ wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
3635
+
3636
+ if distribution == "t":
3637
+ df_numer = np.power(nx * Sx + ny * Sy, 2.0)
3638
+ df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
3639
+ df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
3640
+ df = df_numer / df_denom
3641
+ p = distributions.t.cdf(wbfn, df)
3642
+ elif distribution == "normal":
3643
+ p = distributions.norm.cdf(wbfn)
3644
+ else:
3645
+ raise ValueError(
3646
+ "distribution should be 't' or 'normal'")
3647
+
3648
+ if alternative == "greater":
3649
+ pass
3650
+ elif alternative == "less":
3651
+ p = 1 - p
3652
+ elif alternative == "two-sided":
3653
+ p = 2 * np.min([p, 1-p])
3654
+ else:
3655
+ raise ValueError(
3656
+ "alternative should be 'less', 'greater' or 'two-sided'")
3657
+
3658
+ return BrunnerMunzelResult(wbfn, p)