scipy 1.15.3__cp312-cp312-macosx_12_0_arm64.whl → 1.16.0rc2__cp312-cp312-macosx_12_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (629) hide show
  1. scipy/.dylibs/libscipy_openblas.dylib +0 -0
  2. scipy/__config__.py +8 -8
  3. scipy/__init__.py +3 -6
  4. scipy/_cyutility.cpython-312-darwin.so +0 -0
  5. scipy/_lib/_array_api.py +486 -161
  6. scipy/_lib/_array_api_compat_vendor.py +9 -0
  7. scipy/_lib/_bunch.py +4 -0
  8. scipy/_lib/_ccallback_c.cpython-312-darwin.so +0 -0
  9. scipy/_lib/_docscrape.py +1 -1
  10. scipy/_lib/_elementwise_iterative_method.py +15 -26
  11. scipy/_lib/_sparse.py +41 -0
  12. scipy/_lib/_test_deprecation_call.cpython-312-darwin.so +0 -0
  13. scipy/_lib/_test_deprecation_def.cpython-312-darwin.so +0 -0
  14. scipy/_lib/_testutils.py +6 -2
  15. scipy/_lib/_util.py +222 -125
  16. scipy/_lib/array_api_compat/__init__.py +4 -4
  17. scipy/_lib/array_api_compat/_internal.py +19 -6
  18. scipy/_lib/array_api_compat/common/__init__.py +1 -1
  19. scipy/_lib/array_api_compat/common/_aliases.py +365 -193
  20. scipy/_lib/array_api_compat/common/_fft.py +94 -64
  21. scipy/_lib/array_api_compat/common/_helpers.py +413 -180
  22. scipy/_lib/array_api_compat/common/_linalg.py +116 -40
  23. scipy/_lib/array_api_compat/common/_typing.py +179 -10
  24. scipy/_lib/array_api_compat/cupy/__init__.py +1 -4
  25. scipy/_lib/array_api_compat/cupy/_aliases.py +61 -41
  26. scipy/_lib/array_api_compat/cupy/_info.py +16 -6
  27. scipy/_lib/array_api_compat/cupy/_typing.py +24 -39
  28. scipy/_lib/array_api_compat/dask/array/__init__.py +6 -3
  29. scipy/_lib/array_api_compat/dask/array/_aliases.py +267 -108
  30. scipy/_lib/array_api_compat/dask/array/_info.py +105 -34
  31. scipy/_lib/array_api_compat/dask/array/fft.py +5 -8
  32. scipy/_lib/array_api_compat/dask/array/linalg.py +21 -22
  33. scipy/_lib/array_api_compat/numpy/__init__.py +13 -15
  34. scipy/_lib/array_api_compat/numpy/_aliases.py +98 -49
  35. scipy/_lib/array_api_compat/numpy/_info.py +36 -16
  36. scipy/_lib/array_api_compat/numpy/_typing.py +27 -43
  37. scipy/_lib/array_api_compat/numpy/fft.py +11 -5
  38. scipy/_lib/array_api_compat/numpy/linalg.py +75 -22
  39. scipy/_lib/array_api_compat/torch/__init__.py +3 -5
  40. scipy/_lib/array_api_compat/torch/_aliases.py +262 -159
  41. scipy/_lib/array_api_compat/torch/_info.py +27 -16
  42. scipy/_lib/array_api_compat/torch/_typing.py +3 -0
  43. scipy/_lib/array_api_compat/torch/fft.py +17 -18
  44. scipy/_lib/array_api_compat/torch/linalg.py +16 -16
  45. scipy/_lib/array_api_extra/__init__.py +26 -3
  46. scipy/_lib/array_api_extra/_delegation.py +171 -0
  47. scipy/_lib/array_api_extra/_lib/__init__.py +1 -0
  48. scipy/_lib/array_api_extra/_lib/_at.py +463 -0
  49. scipy/_lib/array_api_extra/_lib/_backends.py +46 -0
  50. scipy/_lib/array_api_extra/_lib/_funcs.py +937 -0
  51. scipy/_lib/array_api_extra/_lib/_lazy.py +357 -0
  52. scipy/_lib/array_api_extra/_lib/_testing.py +278 -0
  53. scipy/_lib/array_api_extra/_lib/_utils/__init__.py +1 -0
  54. scipy/_lib/array_api_extra/_lib/_utils/_compat.py +74 -0
  55. scipy/_lib/array_api_extra/_lib/_utils/_compat.pyi +45 -0
  56. scipy/_lib/array_api_extra/_lib/_utils/_helpers.py +559 -0
  57. scipy/_lib/array_api_extra/_lib/_utils/_typing.py +10 -0
  58. scipy/_lib/array_api_extra/_lib/_utils/_typing.pyi +105 -0
  59. scipy/_lib/array_api_extra/testing.py +359 -0
  60. scipy/_lib/decorator.py +2 -2
  61. scipy/_lib/doccer.py +1 -7
  62. scipy/_lib/messagestream.cpython-312-darwin.so +0 -0
  63. scipy/_lib/pyprima/__init__.py +212 -0
  64. scipy/_lib/pyprima/cobyla/__init__.py +0 -0
  65. scipy/_lib/pyprima/cobyla/cobyla.py +559 -0
  66. scipy/_lib/pyprima/cobyla/cobylb.py +714 -0
  67. scipy/_lib/pyprima/cobyla/geometry.py +226 -0
  68. scipy/_lib/pyprima/cobyla/initialize.py +215 -0
  69. scipy/_lib/pyprima/cobyla/trustregion.py +492 -0
  70. scipy/_lib/pyprima/cobyla/update.py +289 -0
  71. scipy/_lib/pyprima/common/__init__.py +0 -0
  72. scipy/_lib/pyprima/common/_bounds.py +34 -0
  73. scipy/_lib/pyprima/common/_linear_constraints.py +46 -0
  74. scipy/_lib/pyprima/common/_nonlinear_constraints.py +54 -0
  75. scipy/_lib/pyprima/common/_project.py +173 -0
  76. scipy/_lib/pyprima/common/checkbreak.py +93 -0
  77. scipy/_lib/pyprima/common/consts.py +47 -0
  78. scipy/_lib/pyprima/common/evaluate.py +99 -0
  79. scipy/_lib/pyprima/common/history.py +38 -0
  80. scipy/_lib/pyprima/common/infos.py +30 -0
  81. scipy/_lib/pyprima/common/linalg.py +435 -0
  82. scipy/_lib/pyprima/common/message.py +290 -0
  83. scipy/_lib/pyprima/common/powalg.py +131 -0
  84. scipy/_lib/pyprima/common/preproc.py +277 -0
  85. scipy/_lib/pyprima/common/present.py +5 -0
  86. scipy/_lib/pyprima/common/ratio.py +54 -0
  87. scipy/_lib/pyprima/common/redrho.py +47 -0
  88. scipy/_lib/pyprima/common/selectx.py +296 -0
  89. scipy/_lib/tests/test__util.py +105 -121
  90. scipy/_lib/tests/test_array_api.py +166 -35
  91. scipy/_lib/tests/test_bunch.py +7 -0
  92. scipy/_lib/tests/test_ccallback.py +2 -10
  93. scipy/_lib/tests/test_public_api.py +13 -0
  94. scipy/cluster/_hierarchy.cpython-312-darwin.so +0 -0
  95. scipy/cluster/_optimal_leaf_ordering.cpython-312-darwin.so +0 -0
  96. scipy/cluster/_vq.cpython-312-darwin.so +0 -0
  97. scipy/cluster/hierarchy.py +393 -223
  98. scipy/cluster/tests/test_hierarchy.py +273 -335
  99. scipy/cluster/tests/test_vq.py +45 -61
  100. scipy/cluster/vq.py +39 -35
  101. scipy/conftest.py +263 -157
  102. scipy/constants/_constants.py +4 -1
  103. scipy/constants/tests/test_codata.py +2 -2
  104. scipy/constants/tests/test_constants.py +11 -18
  105. scipy/datasets/_download_all.py +15 -1
  106. scipy/datasets/_fetchers.py +7 -1
  107. scipy/datasets/_utils.py +1 -1
  108. scipy/differentiate/_differentiate.py +25 -25
  109. scipy/differentiate/tests/test_differentiate.py +24 -25
  110. scipy/fft/_basic.py +20 -0
  111. scipy/fft/_helper.py +3 -34
  112. scipy/fft/_pocketfft/helper.py +29 -1
  113. scipy/fft/_pocketfft/tests/test_basic.py +2 -4
  114. scipy/fft/_pocketfft/tests/test_real_transforms.py +4 -4
  115. scipy/fft/_realtransforms.py +13 -0
  116. scipy/fft/tests/test_basic.py +27 -25
  117. scipy/fft/tests/test_fftlog.py +16 -7
  118. scipy/fft/tests/test_helper.py +18 -34
  119. scipy/fft/tests/test_real_transforms.py +8 -10
  120. scipy/fftpack/convolve.cpython-312-darwin.so +0 -0
  121. scipy/fftpack/tests/test_basic.py +2 -4
  122. scipy/fftpack/tests/test_real_transforms.py +8 -9
  123. scipy/integrate/_bvp.py +9 -3
  124. scipy/integrate/_cubature.py +3 -2
  125. scipy/integrate/_dop.cpython-312-darwin.so +0 -0
  126. scipy/integrate/_lsoda.cpython-312-darwin.so +0 -0
  127. scipy/integrate/_ode.py +9 -2
  128. scipy/integrate/_odepack.cpython-312-darwin.so +0 -0
  129. scipy/integrate/_quad_vec.py +21 -29
  130. scipy/integrate/_quadpack.cpython-312-darwin.so +0 -0
  131. scipy/integrate/_quadpack_py.py +11 -7
  132. scipy/integrate/_quadrature.py +3 -3
  133. scipy/integrate/_rules/_base.py +2 -2
  134. scipy/integrate/_tanhsinh.py +48 -47
  135. scipy/integrate/_test_odeint_banded.cpython-312-darwin.so +0 -0
  136. scipy/integrate/_vode.cpython-312-darwin.so +0 -0
  137. scipy/integrate/tests/test__quad_vec.py +0 -6
  138. scipy/integrate/tests/test_banded_ode_solvers.py +85 -0
  139. scipy/integrate/tests/test_cubature.py +21 -35
  140. scipy/integrate/tests/test_quadrature.py +6 -8
  141. scipy/integrate/tests/test_tanhsinh.py +56 -48
  142. scipy/interpolate/__init__.py +70 -58
  143. scipy/interpolate/_bary_rational.py +22 -22
  144. scipy/interpolate/_bsplines.py +119 -66
  145. scipy/interpolate/_cubic.py +65 -50
  146. scipy/interpolate/_dfitpack.cpython-312-darwin.so +0 -0
  147. scipy/interpolate/_dierckx.cpython-312-darwin.so +0 -0
  148. scipy/interpolate/_fitpack.cpython-312-darwin.so +0 -0
  149. scipy/interpolate/_fitpack2.py +9 -6
  150. scipy/interpolate/_fitpack_impl.py +32 -26
  151. scipy/interpolate/_fitpack_repro.py +23 -19
  152. scipy/interpolate/_interpnd.cpython-312-darwin.so +0 -0
  153. scipy/interpolate/_interpolate.py +30 -12
  154. scipy/interpolate/_ndbspline.py +13 -18
  155. scipy/interpolate/_ndgriddata.py +5 -8
  156. scipy/interpolate/_polyint.py +95 -31
  157. scipy/interpolate/_ppoly.cpython-312-darwin.so +0 -0
  158. scipy/interpolate/_rbf.py +2 -2
  159. scipy/interpolate/_rbfinterp.py +1 -1
  160. scipy/interpolate/_rbfinterp_pythran.cpython-312-darwin.so +0 -0
  161. scipy/interpolate/_rgi.py +31 -26
  162. scipy/interpolate/_rgi_cython.cpython-312-darwin.so +0 -0
  163. scipy/interpolate/dfitpack.py +0 -20
  164. scipy/interpolate/interpnd.py +1 -2
  165. scipy/interpolate/tests/test_bary_rational.py +2 -2
  166. scipy/interpolate/tests/test_bsplines.py +97 -1
  167. scipy/interpolate/tests/test_fitpack2.py +39 -1
  168. scipy/interpolate/tests/test_interpnd.py +32 -20
  169. scipy/interpolate/tests/test_interpolate.py +48 -4
  170. scipy/interpolate/tests/test_rgi.py +2 -1
  171. scipy/io/_fast_matrix_market/__init__.py +2 -0
  172. scipy/io/_harwell_boeing/_fortran_format_parser.py +19 -16
  173. scipy/io/_harwell_boeing/hb.py +7 -11
  174. scipy/io/_idl.py +5 -7
  175. scipy/io/_netcdf.py +15 -5
  176. scipy/io/_test_fortran.cpython-312-darwin.so +0 -0
  177. scipy/io/arff/tests/test_arffread.py +3 -3
  178. scipy/io/matlab/__init__.py +5 -3
  179. scipy/io/matlab/_mio.py +4 -1
  180. scipy/io/matlab/_mio5.py +19 -13
  181. scipy/io/matlab/_mio5_utils.cpython-312-darwin.so +0 -0
  182. scipy/io/matlab/_mio_utils.cpython-312-darwin.so +0 -0
  183. scipy/io/matlab/_miobase.py +4 -1
  184. scipy/io/matlab/_streams.cpython-312-darwin.so +0 -0
  185. scipy/io/matlab/tests/test_mio.py +46 -18
  186. scipy/io/matlab/tests/test_mio_funcs.py +1 -1
  187. scipy/io/tests/test_mmio.py +7 -1
  188. scipy/io/tests/test_wavfile.py +41 -0
  189. scipy/io/wavfile.py +57 -10
  190. scipy/linalg/_basic.py +113 -86
  191. scipy/linalg/_cythonized_array_utils.cpython-312-darwin.so +0 -0
  192. scipy/linalg/_decomp.py +22 -9
  193. scipy/linalg/_decomp_cholesky.py +28 -13
  194. scipy/linalg/_decomp_cossin.py +45 -30
  195. scipy/linalg/_decomp_interpolative.cpython-312-darwin.so +0 -0
  196. scipy/linalg/_decomp_ldl.py +4 -1
  197. scipy/linalg/_decomp_lu.py +18 -6
  198. scipy/linalg/_decomp_lu_cython.cpython-312-darwin.so +0 -0
  199. scipy/linalg/_decomp_polar.py +2 -0
  200. scipy/linalg/_decomp_qr.py +6 -2
  201. scipy/linalg/_decomp_qz.py +3 -0
  202. scipy/linalg/_decomp_schur.py +3 -1
  203. scipy/linalg/_decomp_svd.py +13 -2
  204. scipy/linalg/_decomp_update.cpython-312-darwin.so +0 -0
  205. scipy/linalg/_expm_frechet.py +4 -0
  206. scipy/linalg/_fblas.cpython-312-darwin.so +0 -0
  207. scipy/linalg/_flapack.cpython-312-darwin.so +0 -0
  208. scipy/linalg/_linalg_pythran.cpython-312-darwin.so +0 -0
  209. scipy/linalg/_matfuncs.py +187 -4
  210. scipy/linalg/_matfuncs_expm.cpython-312-darwin.so +0 -0
  211. scipy/linalg/_matfuncs_schur_sqrtm.cpython-312-darwin.so +0 -0
  212. scipy/linalg/_matfuncs_sqrtm.py +1 -99
  213. scipy/linalg/_matfuncs_sqrtm_triu.cpython-312-darwin.so +0 -0
  214. scipy/linalg/_procrustes.py +2 -0
  215. scipy/linalg/_sketches.py +17 -6
  216. scipy/linalg/_solve_toeplitz.cpython-312-darwin.so +0 -0
  217. scipy/linalg/_solvers.py +7 -2
  218. scipy/linalg/_special_matrices.py +26 -36
  219. scipy/linalg/cython_blas.cpython-312-darwin.so +0 -0
  220. scipy/linalg/cython_lapack.cpython-312-darwin.so +0 -0
  221. scipy/linalg/lapack.py +22 -2
  222. scipy/linalg/tests/_cython_examples/meson.build +7 -0
  223. scipy/linalg/tests/test_basic.py +31 -16
  224. scipy/linalg/tests/test_batch.py +588 -0
  225. scipy/linalg/tests/test_cythonized_array_utils.py +0 -2
  226. scipy/linalg/tests/test_decomp.py +40 -3
  227. scipy/linalg/tests/test_decomp_cossin.py +14 -0
  228. scipy/linalg/tests/test_decomp_ldl.py +1 -1
  229. scipy/linalg/tests/test_lapack.py +115 -7
  230. scipy/linalg/tests/test_matfuncs.py +157 -102
  231. scipy/linalg/tests/test_procrustes.py +0 -7
  232. scipy/linalg/tests/test_solve_toeplitz.py +1 -1
  233. scipy/linalg/tests/test_special_matrices.py +1 -5
  234. scipy/ndimage/__init__.py +1 -0
  235. scipy/ndimage/_cytest.cpython-312-darwin.so +0 -0
  236. scipy/ndimage/_delegators.py +8 -2
  237. scipy/ndimage/_filters.py +453 -5
  238. scipy/ndimage/_interpolation.py +36 -6
  239. scipy/ndimage/_measurements.py +4 -2
  240. scipy/ndimage/_morphology.py +5 -0
  241. scipy/ndimage/_nd_image.cpython-312-darwin.so +0 -0
  242. scipy/ndimage/_ni_docstrings.py +5 -1
  243. scipy/ndimage/_ni_label.cpython-312-darwin.so +0 -0
  244. scipy/ndimage/_ni_support.py +1 -5
  245. scipy/ndimage/_rank_filter_1d.cpython-312-darwin.so +0 -0
  246. scipy/ndimage/_support_alternative_backends.py +18 -6
  247. scipy/ndimage/tests/test_filters.py +370 -259
  248. scipy/ndimage/tests/test_fourier.py +7 -9
  249. scipy/ndimage/tests/test_interpolation.py +68 -61
  250. scipy/ndimage/tests/test_measurements.py +18 -35
  251. scipy/ndimage/tests/test_morphology.py +143 -131
  252. scipy/ndimage/tests/test_splines.py +1 -3
  253. scipy/odr/__odrpack.cpython-312-darwin.so +0 -0
  254. scipy/optimize/_basinhopping.py +13 -7
  255. scipy/optimize/_bglu_dense.cpython-312-darwin.so +0 -0
  256. scipy/optimize/_bracket.py +17 -24
  257. scipy/optimize/_chandrupatla.py +9 -10
  258. scipy/optimize/_cobyla_py.py +104 -123
  259. scipy/optimize/_constraints.py +14 -10
  260. scipy/optimize/_differentiable_functions.py +371 -230
  261. scipy/optimize/_differentialevolution.py +4 -3
  262. scipy/optimize/_direct.cpython-312-darwin.so +0 -0
  263. scipy/optimize/_dual_annealing.py +1 -1
  264. scipy/optimize/_elementwise.py +1 -4
  265. scipy/optimize/_group_columns.cpython-312-darwin.so +0 -0
  266. scipy/optimize/_lbfgsb.cpython-312-darwin.so +0 -0
  267. scipy/optimize/_lbfgsb_py.py +57 -16
  268. scipy/optimize/_linprog_doc.py +2 -2
  269. scipy/optimize/_linprog_highs.py +2 -2
  270. scipy/optimize/_linprog_ip.py +25 -10
  271. scipy/optimize/_linprog_util.py +14 -16
  272. scipy/optimize/_lsap.cpython-312-darwin.so +0 -0
  273. scipy/optimize/_lsq/common.py +3 -3
  274. scipy/optimize/_lsq/dogbox.py +16 -2
  275. scipy/optimize/_lsq/givens_elimination.cpython-312-darwin.so +0 -0
  276. scipy/optimize/_lsq/least_squares.py +198 -126
  277. scipy/optimize/_lsq/lsq_linear.py +6 -6
  278. scipy/optimize/_lsq/trf.py +35 -8
  279. scipy/optimize/_milp.py +3 -1
  280. scipy/optimize/_minimize.py +105 -36
  281. scipy/optimize/_minpack.cpython-312-darwin.so +0 -0
  282. scipy/optimize/_minpack_py.py +21 -14
  283. scipy/optimize/_moduleTNC.cpython-312-darwin.so +0 -0
  284. scipy/optimize/_nnls.py +20 -21
  285. scipy/optimize/_nonlin.py +34 -3
  286. scipy/optimize/_numdiff.py +288 -110
  287. scipy/optimize/_optimize.py +86 -48
  288. scipy/optimize/_pava_pybind.cpython-312-darwin.so +0 -0
  289. scipy/optimize/_remove_redundancy.py +5 -5
  290. scipy/optimize/_root_scalar.py +1 -1
  291. scipy/optimize/_shgo.py +6 -0
  292. scipy/optimize/_shgo_lib/_complex.py +1 -1
  293. scipy/optimize/_slsqp_py.py +216 -124
  294. scipy/optimize/_slsqplib.cpython-312-darwin.so +0 -0
  295. scipy/optimize/_spectral.py +1 -1
  296. scipy/optimize/_tnc.py +8 -1
  297. scipy/optimize/_trlib/_trlib.cpython-312-darwin.so +0 -0
  298. scipy/optimize/_trustregion.py +20 -6
  299. scipy/optimize/_trustregion_constr/canonical_constraint.py +7 -7
  300. scipy/optimize/_trustregion_constr/equality_constrained_sqp.py +1 -1
  301. scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py +11 -3
  302. scipy/optimize/_trustregion_constr/projections.py +12 -8
  303. scipy/optimize/_trustregion_constr/qp_subproblem.py +9 -9
  304. scipy/optimize/_trustregion_constr/tests/test_projections.py +7 -7
  305. scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py +77 -77
  306. scipy/optimize/_trustregion_constr/tr_interior_point.py +5 -5
  307. scipy/optimize/_trustregion_exact.py +0 -1
  308. scipy/optimize/_zeros.cpython-312-darwin.so +0 -0
  309. scipy/optimize/_zeros_py.py +97 -17
  310. scipy/optimize/cython_optimize/_zeros.cpython-312-darwin.so +0 -0
  311. scipy/optimize/slsqp.py +0 -1
  312. scipy/optimize/tests/test__basinhopping.py +1 -1
  313. scipy/optimize/tests/test__differential_evolution.py +4 -4
  314. scipy/optimize/tests/test__linprog_clean_inputs.py +5 -3
  315. scipy/optimize/tests/test__numdiff.py +66 -22
  316. scipy/optimize/tests/test__remove_redundancy.py +2 -2
  317. scipy/optimize/tests/test__shgo.py +9 -1
  318. scipy/optimize/tests/test_bracket.py +36 -46
  319. scipy/optimize/tests/test_chandrupatla.py +133 -135
  320. scipy/optimize/tests/test_cobyla.py +74 -45
  321. scipy/optimize/tests/test_constraints.py +1 -1
  322. scipy/optimize/tests/test_differentiable_functions.py +226 -6
  323. scipy/optimize/tests/test_lbfgsb_hessinv.py +22 -0
  324. scipy/optimize/tests/test_least_squares.py +125 -13
  325. scipy/optimize/tests/test_linear_assignment.py +3 -3
  326. scipy/optimize/tests/test_linprog.py +3 -3
  327. scipy/optimize/tests/test_lsq_linear.py +6 -6
  328. scipy/optimize/tests/test_minimize_constrained.py +2 -2
  329. scipy/optimize/tests/test_minpack.py +4 -4
  330. scipy/optimize/tests/test_nnls.py +43 -3
  331. scipy/optimize/tests/test_nonlin.py +36 -0
  332. scipy/optimize/tests/test_optimize.py +95 -17
  333. scipy/optimize/tests/test_slsqp.py +36 -4
  334. scipy/optimize/tests/test_zeros.py +34 -1
  335. scipy/signal/__init__.py +12 -23
  336. scipy/signal/_delegators.py +568 -0
  337. scipy/signal/_filter_design.py +459 -241
  338. scipy/signal/_fir_filter_design.py +262 -90
  339. scipy/signal/_lti_conversion.py +3 -2
  340. scipy/signal/_ltisys.py +118 -91
  341. scipy/signal/_max_len_seq_inner.cpython-312-darwin.so +0 -0
  342. scipy/signal/_peak_finding_utils.cpython-312-darwin.so +0 -0
  343. scipy/signal/_polyutils.py +172 -0
  344. scipy/signal/_short_time_fft.py +519 -70
  345. scipy/signal/_signal_api.py +30 -0
  346. scipy/signal/_signaltools.py +719 -399
  347. scipy/signal/_sigtools.cpython-312-darwin.so +0 -0
  348. scipy/signal/_sosfilt.cpython-312-darwin.so +0 -0
  349. scipy/signal/_spectral_py.py +230 -50
  350. scipy/signal/_spline.cpython-312-darwin.so +0 -0
  351. scipy/signal/_spline_filters.py +108 -68
  352. scipy/signal/_support_alternative_backends.py +73 -0
  353. scipy/signal/_upfirdn.py +4 -1
  354. scipy/signal/_upfirdn_apply.cpython-312-darwin.so +0 -0
  355. scipy/signal/_waveforms.py +2 -11
  356. scipy/signal/_wavelets.py +1 -1
  357. scipy/signal/fir_filter_design.py +1 -0
  358. scipy/signal/spline.py +4 -11
  359. scipy/signal/tests/_scipy_spectral_test_shim.py +2 -171
  360. scipy/signal/tests/test_bsplines.py +114 -79
  361. scipy/signal/tests/test_cont2discrete.py +9 -2
  362. scipy/signal/tests/test_filter_design.py +721 -481
  363. scipy/signal/tests/test_fir_filter_design.py +332 -140
  364. scipy/signal/tests/test_savitzky_golay.py +4 -3
  365. scipy/signal/tests/test_short_time_fft.py +221 -3
  366. scipy/signal/tests/test_signaltools.py +2144 -1348
  367. scipy/signal/tests/test_spectral.py +50 -6
  368. scipy/signal/tests/test_splines.py +161 -96
  369. scipy/signal/tests/test_upfirdn.py +84 -50
  370. scipy/signal/tests/test_waveforms.py +20 -0
  371. scipy/signal/tests/test_windows.py +607 -466
  372. scipy/signal/windows/_windows.py +287 -148
  373. scipy/sparse/__init__.py +23 -4
  374. scipy/sparse/_base.py +270 -108
  375. scipy/sparse/_bsr.py +7 -4
  376. scipy/sparse/_compressed.py +59 -231
  377. scipy/sparse/_construct.py +90 -38
  378. scipy/sparse/_coo.py +115 -181
  379. scipy/sparse/_csc.py +4 -4
  380. scipy/sparse/_csparsetools.cpython-312-darwin.so +0 -0
  381. scipy/sparse/_csr.py +2 -2
  382. scipy/sparse/_data.py +48 -48
  383. scipy/sparse/_dia.py +105 -18
  384. scipy/sparse/_dok.py +0 -23
  385. scipy/sparse/_index.py +4 -4
  386. scipy/sparse/_matrix.py +23 -0
  387. scipy/sparse/_sparsetools.cpython-312-darwin.so +0 -0
  388. scipy/sparse/_sputils.py +37 -22
  389. scipy/sparse/base.py +0 -9
  390. scipy/sparse/bsr.py +0 -14
  391. scipy/sparse/compressed.py +0 -23
  392. scipy/sparse/construct.py +0 -6
  393. scipy/sparse/coo.py +0 -14
  394. scipy/sparse/csc.py +0 -3
  395. scipy/sparse/csgraph/_flow.cpython-312-darwin.so +0 -0
  396. scipy/sparse/csgraph/_matching.cpython-312-darwin.so +0 -0
  397. scipy/sparse/csgraph/_min_spanning_tree.cpython-312-darwin.so +0 -0
  398. scipy/sparse/csgraph/_reordering.cpython-312-darwin.so +0 -0
  399. scipy/sparse/csgraph/_shortest_path.cpython-312-darwin.so +0 -0
  400. scipy/sparse/csgraph/_tools.cpython-312-darwin.so +0 -0
  401. scipy/sparse/csgraph/_traversal.cpython-312-darwin.so +0 -0
  402. scipy/sparse/csgraph/tests/test_matching.py +14 -2
  403. scipy/sparse/csgraph/tests/test_pydata_sparse.py +4 -1
  404. scipy/sparse/csgraph/tests/test_shortest_path.py +83 -27
  405. scipy/sparse/csr.py +0 -5
  406. scipy/sparse/data.py +1 -6
  407. scipy/sparse/dia.py +0 -7
  408. scipy/sparse/dok.py +0 -10
  409. scipy/sparse/linalg/_dsolve/_superlu.cpython-312-darwin.so +0 -0
  410. scipy/sparse/linalg/_dsolve/linsolve.py +9 -0
  411. scipy/sparse/linalg/_dsolve/tests/test_linsolve.py +35 -28
  412. scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-312-darwin.so +0 -0
  413. scipy/sparse/linalg/_eigen/arpack/arpack.py +23 -17
  414. scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py +6 -6
  415. scipy/sparse/linalg/_interface.py +17 -18
  416. scipy/sparse/linalg/_isolve/_gcrotmk.py +4 -4
  417. scipy/sparse/linalg/_isolve/iterative.py +51 -45
  418. scipy/sparse/linalg/_isolve/lgmres.py +6 -6
  419. scipy/sparse/linalg/_isolve/minres.py +5 -5
  420. scipy/sparse/linalg/_isolve/tfqmr.py +7 -7
  421. scipy/sparse/linalg/_isolve/utils.py +2 -8
  422. scipy/sparse/linalg/_matfuncs.py +1 -1
  423. scipy/sparse/linalg/_norm.py +1 -1
  424. scipy/sparse/linalg/_propack/_cpropack.cpython-312-darwin.so +0 -0
  425. scipy/sparse/linalg/_propack/_dpropack.cpython-312-darwin.so +0 -0
  426. scipy/sparse/linalg/_propack/_spropack.cpython-312-darwin.so +0 -0
  427. scipy/sparse/linalg/_propack/_zpropack.cpython-312-darwin.so +0 -0
  428. scipy/sparse/linalg/_special_sparse_arrays.py +39 -38
  429. scipy/sparse/linalg/tests/test_pydata_sparse.py +14 -0
  430. scipy/sparse/tests/test_arithmetic1d.py +5 -2
  431. scipy/sparse/tests/test_base.py +214 -42
  432. scipy/sparse/tests/test_common1d.py +7 -7
  433. scipy/sparse/tests/test_construct.py +1 -1
  434. scipy/sparse/tests/test_coo.py +272 -4
  435. scipy/sparse/tests/test_sparsetools.py +5 -0
  436. scipy/sparse/tests/test_sputils.py +36 -7
  437. scipy/spatial/_ckdtree.cpython-312-darwin.so +0 -0
  438. scipy/spatial/_distance_pybind.cpython-312-darwin.so +0 -0
  439. scipy/spatial/_distance_wrap.cpython-312-darwin.so +0 -0
  440. scipy/spatial/_hausdorff.cpython-312-darwin.so +0 -0
  441. scipy/spatial/_qhull.cpython-312-darwin.so +0 -0
  442. scipy/spatial/_voronoi.cpython-312-darwin.so +0 -0
  443. scipy/spatial/distance.py +49 -42
  444. scipy/spatial/tests/test_distance.py +15 -1
  445. scipy/spatial/tests/test_kdtree.py +1 -0
  446. scipy/spatial/tests/test_qhull.py +7 -2
  447. scipy/spatial/transform/__init__.py +5 -3
  448. scipy/spatial/transform/_rigid_transform.cpython-312-darwin.so +0 -0
  449. scipy/spatial/transform/_rotation.cpython-312-darwin.so +0 -0
  450. scipy/spatial/transform/tests/test_rigid_transform.py +1221 -0
  451. scipy/spatial/transform/tests/test_rotation.py +1213 -832
  452. scipy/spatial/transform/tests/test_rotation_groups.py +3 -3
  453. scipy/spatial/transform/tests/test_rotation_spline.py +29 -8
  454. scipy/special/__init__.py +1 -47
  455. scipy/special/_add_newdocs.py +34 -772
  456. scipy/special/_basic.py +22 -25
  457. scipy/special/_comb.cpython-312-darwin.so +0 -0
  458. scipy/special/_ellip_harm_2.cpython-312-darwin.so +0 -0
  459. scipy/special/_gufuncs.cpython-312-darwin.so +0 -0
  460. scipy/special/_logsumexp.py +67 -58
  461. scipy/special/_orthogonal.pyi +1 -1
  462. scipy/special/_specfun.cpython-312-darwin.so +0 -0
  463. scipy/special/_special_ufuncs.cpython-312-darwin.so +0 -0
  464. scipy/special/_spherical_bessel.py +4 -4
  465. scipy/special/_support_alternative_backends.py +212 -119
  466. scipy/special/_test_internal.cpython-312-darwin.so +0 -0
  467. scipy/special/_testutils.py +4 -4
  468. scipy/special/_ufuncs.cpython-312-darwin.so +0 -0
  469. scipy/special/_ufuncs.pyi +1 -0
  470. scipy/special/_ufuncs.pyx +215 -1400
  471. scipy/special/_ufuncs_cxx.cpython-312-darwin.so +0 -0
  472. scipy/special/_ufuncs_cxx.pxd +2 -15
  473. scipy/special/_ufuncs_cxx.pyx +5 -44
  474. scipy/special/_ufuncs_cxx_defs.h +2 -16
  475. scipy/special/_ufuncs_defs.h +0 -8
  476. scipy/special/cython_special.cpython-312-darwin.so +0 -0
  477. scipy/special/cython_special.pxd +1 -1
  478. scipy/special/tests/_cython_examples/meson.build +10 -1
  479. scipy/special/tests/test_basic.py +153 -20
  480. scipy/special/tests/test_boost_ufuncs.py +3 -0
  481. scipy/special/tests/test_cdflib.py +35 -11
  482. scipy/special/tests/test_gammainc.py +16 -0
  483. scipy/special/tests/test_hyp2f1.py +2 -2
  484. scipy/special/tests/test_log1mexp.py +85 -0
  485. scipy/special/tests/test_logsumexp.py +206 -64
  486. scipy/special/tests/test_mpmath.py +1 -0
  487. scipy/special/tests/test_nan_inputs.py +1 -1
  488. scipy/special/tests/test_orthogonal.py +17 -18
  489. scipy/special/tests/test_sf_error.py +3 -2
  490. scipy/special/tests/test_sph_harm.py +6 -7
  491. scipy/special/tests/test_support_alternative_backends.py +211 -76
  492. scipy/stats/__init__.py +4 -1
  493. scipy/stats/_ansari_swilk_statistics.cpython-312-darwin.so +0 -0
  494. scipy/stats/_axis_nan_policy.py +5 -12
  495. scipy/stats/_biasedurn.cpython-312-darwin.so +0 -0
  496. scipy/stats/_continued_fraction.py +387 -0
  497. scipy/stats/_continuous_distns.py +277 -310
  498. scipy/stats/_correlation.py +1 -1
  499. scipy/stats/_covariance.py +6 -3
  500. scipy/stats/_discrete_distns.py +39 -32
  501. scipy/stats/_distn_infrastructure.py +39 -12
  502. scipy/stats/_distribution_infrastructure.py +900 -238
  503. scipy/stats/_entropy.py +9 -10
  504. scipy/{_lib → stats}/_finite_differences.py +1 -1
  505. scipy/stats/_hypotests.py +83 -50
  506. scipy/stats/_kde.py +53 -49
  507. scipy/stats/_ksstats.py +1 -1
  508. scipy/stats/_levy_stable/__init__.py +7 -15
  509. scipy/stats/_levy_stable/levyst.cpython-312-darwin.so +0 -0
  510. scipy/stats/_morestats.py +118 -73
  511. scipy/stats/_mstats_basic.py +13 -17
  512. scipy/stats/_mstats_extras.py +8 -8
  513. scipy/stats/_multivariate.py +89 -113
  514. scipy/stats/_new_distributions.py +97 -20
  515. scipy/stats/_page_trend_test.py +12 -5
  516. scipy/stats/_probability_distribution.py +265 -43
  517. scipy/stats/_qmc.py +14 -9
  518. scipy/stats/_qmc_cy.cpython-312-darwin.so +0 -0
  519. scipy/stats/_qmvnt.py +16 -95
  520. scipy/stats/_qmvnt_cy.cpython-312-darwin.so +0 -0
  521. scipy/stats/_quantile.py +335 -0
  522. scipy/stats/_rcont/rcont.cpython-312-darwin.so +0 -0
  523. scipy/stats/_resampling.py +4 -29
  524. scipy/stats/_sampling.py +1 -1
  525. scipy/stats/_sobol.cpython-312-darwin.so +0 -0
  526. scipy/stats/_stats.cpython-312-darwin.so +0 -0
  527. scipy/stats/_stats_mstats_common.py +21 -2
  528. scipy/stats/_stats_py.py +550 -476
  529. scipy/stats/_stats_pythran.cpython-312-darwin.so +0 -0
  530. scipy/stats/_unuran/unuran_wrapper.cpython-312-darwin.so +0 -0
  531. scipy/stats/_unuran/unuran_wrapper.pyi +2 -1
  532. scipy/stats/_variation.py +6 -8
  533. scipy/stats/_wilcoxon.py +13 -7
  534. scipy/stats/tests/common_tests.py +6 -4
  535. scipy/stats/tests/test_axis_nan_policy.py +62 -24
  536. scipy/stats/tests/test_continued_fraction.py +173 -0
  537. scipy/stats/tests/test_continuous.py +379 -60
  538. scipy/stats/tests/test_continuous_basic.py +18 -12
  539. scipy/stats/tests/test_discrete_basic.py +14 -8
  540. scipy/stats/tests/test_discrete_distns.py +16 -16
  541. scipy/stats/tests/test_distributions.py +95 -75
  542. scipy/stats/tests/test_entropy.py +40 -48
  543. scipy/stats/tests/test_fit.py +4 -3
  544. scipy/stats/tests/test_hypotests.py +153 -24
  545. scipy/stats/tests/test_kdeoth.py +109 -41
  546. scipy/stats/tests/test_marray.py +289 -0
  547. scipy/stats/tests/test_morestats.py +79 -47
  548. scipy/stats/tests/test_mstats_basic.py +3 -3
  549. scipy/stats/tests/test_multivariate.py +434 -83
  550. scipy/stats/tests/test_qmc.py +13 -10
  551. scipy/stats/tests/test_quantile.py +199 -0
  552. scipy/stats/tests/test_rank.py +119 -112
  553. scipy/stats/tests/test_resampling.py +47 -56
  554. scipy/stats/tests/test_sampling.py +9 -4
  555. scipy/stats/tests/test_stats.py +799 -939
  556. scipy/stats/tests/test_variation.py +8 -6
  557. scipy/version.py +2 -2
  558. {scipy-1.15.3.dist-info → scipy-1.16.0rc2.dist-info}/LICENSE.txt +4 -4
  559. {scipy-1.15.3.dist-info → scipy-1.16.0rc2.dist-info}/METADATA +11 -11
  560. {scipy-1.15.3.dist-info → scipy-1.16.0rc2.dist-info}/RECORD +561 -568
  561. scipy-1.16.0rc2.dist-info/WHEEL +6 -0
  562. scipy/_lib/array_api_extra/_funcs.py +0 -484
  563. scipy/_lib/array_api_extra/_typing.py +0 -8
  564. scipy/interpolate/_bspl.cpython-312-darwin.so +0 -0
  565. scipy/optimize/_cobyla.cpython-312-darwin.so +0 -0
  566. scipy/optimize/_cython_nnls.cpython-312-darwin.so +0 -0
  567. scipy/optimize/_slsqp.cpython-312-darwin.so +0 -0
  568. scipy/spatial/qhull_src/COPYING.txt +0 -38
  569. scipy/special/libsf_error_state.dylib +0 -0
  570. scipy/special/tests/test_log_softmax.py +0 -109
  571. scipy/special/tests/test_xsf_cuda.py +0 -114
  572. scipy/special/xsf/binom.h +0 -89
  573. scipy/special/xsf/cdflib.h +0 -100
  574. scipy/special/xsf/cephes/airy.h +0 -307
  575. scipy/special/xsf/cephes/besselpoly.h +0 -51
  576. scipy/special/xsf/cephes/beta.h +0 -257
  577. scipy/special/xsf/cephes/cbrt.h +0 -131
  578. scipy/special/xsf/cephes/chbevl.h +0 -85
  579. scipy/special/xsf/cephes/chdtr.h +0 -193
  580. scipy/special/xsf/cephes/const.h +0 -87
  581. scipy/special/xsf/cephes/ellie.h +0 -293
  582. scipy/special/xsf/cephes/ellik.h +0 -251
  583. scipy/special/xsf/cephes/ellpe.h +0 -107
  584. scipy/special/xsf/cephes/ellpk.h +0 -117
  585. scipy/special/xsf/cephes/expn.h +0 -260
  586. scipy/special/xsf/cephes/gamma.h +0 -398
  587. scipy/special/xsf/cephes/hyp2f1.h +0 -596
  588. scipy/special/xsf/cephes/hyperg.h +0 -361
  589. scipy/special/xsf/cephes/i0.h +0 -149
  590. scipy/special/xsf/cephes/i1.h +0 -158
  591. scipy/special/xsf/cephes/igam.h +0 -421
  592. scipy/special/xsf/cephes/igam_asymp_coeff.h +0 -195
  593. scipy/special/xsf/cephes/igami.h +0 -313
  594. scipy/special/xsf/cephes/j0.h +0 -225
  595. scipy/special/xsf/cephes/j1.h +0 -198
  596. scipy/special/xsf/cephes/jv.h +0 -715
  597. scipy/special/xsf/cephes/k0.h +0 -164
  598. scipy/special/xsf/cephes/k1.h +0 -163
  599. scipy/special/xsf/cephes/kn.h +0 -243
  600. scipy/special/xsf/cephes/lanczos.h +0 -112
  601. scipy/special/xsf/cephes/ndtr.h +0 -275
  602. scipy/special/xsf/cephes/poch.h +0 -85
  603. scipy/special/xsf/cephes/polevl.h +0 -167
  604. scipy/special/xsf/cephes/psi.h +0 -194
  605. scipy/special/xsf/cephes/rgamma.h +0 -111
  606. scipy/special/xsf/cephes/scipy_iv.h +0 -811
  607. scipy/special/xsf/cephes/shichi.h +0 -248
  608. scipy/special/xsf/cephes/sici.h +0 -224
  609. scipy/special/xsf/cephes/sindg.h +0 -221
  610. scipy/special/xsf/cephes/tandg.h +0 -139
  611. scipy/special/xsf/cephes/trig.h +0 -58
  612. scipy/special/xsf/cephes/unity.h +0 -186
  613. scipy/special/xsf/cephes/zeta.h +0 -172
  614. scipy/special/xsf/config.h +0 -304
  615. scipy/special/xsf/digamma.h +0 -205
  616. scipy/special/xsf/error.h +0 -57
  617. scipy/special/xsf/evalpoly.h +0 -47
  618. scipy/special/xsf/expint.h +0 -266
  619. scipy/special/xsf/hyp2f1.h +0 -694
  620. scipy/special/xsf/iv_ratio.h +0 -173
  621. scipy/special/xsf/lambertw.h +0 -150
  622. scipy/special/xsf/loggamma.h +0 -163
  623. scipy/special/xsf/sici.h +0 -200
  624. scipy/special/xsf/tools.h +0 -427
  625. scipy/special/xsf/trig.h +0 -164
  626. scipy/special/xsf/wright_bessel.h +0 -843
  627. scipy/special/xsf/zlog1.h +0 -35
  628. scipy/stats/_mvn.cpython-312-darwin.so +0 -0
  629. scipy-1.15.3.dist-info/WHEEL +0 -4
scipy/stats/_stats_py.py CHANGED
@@ -26,9 +26,10 @@ References
26
26
  York. 2000.
27
27
 
28
28
  """
29
- import warnings
29
+ import functools
30
30
  import math
31
- from math import gcd
31
+ import operator
32
+ import warnings
32
33
  from collections import namedtuple
33
34
  from collections.abc import Sequence
34
35
 
@@ -41,9 +42,8 @@ from scipy.spatial import distance_matrix
41
42
  from scipy.optimize import milp, LinearConstraint
42
43
  from scipy._lib._util import (check_random_state, _get_nan,
43
44
  _rename_parameter, _contains_nan,
44
- AxisError, _lazywhere)
45
- from scipy._lib.deprecation import _deprecate_positional_args
46
-
45
+ normalize_axis_index, np_vecdot, AxisError)
46
+ from scipy._lib.deprecation import _deprecate_positional_args, _deprecated
47
47
 
48
48
  import scipy.special as special
49
49
  # Import unused here but needs to stay until end of deprecation periode
@@ -70,19 +70,23 @@ from ._binomtest import _binary_search_for_binom_tst as _binary_search
70
70
  from scipy._lib._bunch import _make_tuple_bunch
71
71
  from scipy import stats
72
72
  from scipy.optimize import root_scalar
73
- from scipy._lib._util import normalize_axis_index
74
73
  from scipy._lib._array_api import (
75
74
  _asarray,
76
75
  array_namespace,
76
+ is_lazy_array,
77
77
  is_numpy,
78
+ is_marray,
79
+ is_cupy,
78
80
  xp_size,
79
- xp_moveaxis_to_end,
80
- xp_sign,
81
81
  xp_vector_norm,
82
- xp_broadcast_promote,
82
+ xp_promote,
83
+ xp_capabilities,
84
+ xp_ravel,
85
+ xp_swapaxes,
86
+ xp_default_dtype,
83
87
  )
84
- from scipy._lib import array_api_extra as xpx
85
- from scipy._lib.deprecation import _deprecated
88
+ import scipy._lib.array_api_extra as xpx
89
+
86
90
 
87
91
 
88
92
  # Functions/classes in other files should be added in `__init__.py`, not here
@@ -144,24 +148,27 @@ def _chk2_asarray(a, b, axis):
144
148
  return a, b, outaxis
145
149
 
146
150
 
147
- def _convert_common_float(*arrays, xp=None):
148
- xp = array_namespace(*arrays) if xp is None else xp
149
- arrays = [_asarray(array, subok=True) for array in arrays]
150
- dtypes = [(xp.asarray(1.).dtype if xp.isdtype(array.dtype, 'integral')
151
- else array.dtype) for array in arrays]
152
- dtype = xp.result_type(*dtypes)
153
- arrays = [xp.astype(array, dtype, copy=False) for array in arrays]
154
- return arrays[0] if len(arrays)==1 else tuple(arrays)
155
-
156
-
157
151
  SignificanceResult = _make_tuple_bunch('SignificanceResult',
158
152
  ['statistic', 'pvalue'], [])
153
+ # Let's call a SignificanceResult with legacy :correlation" attribute a
154
+ # "CorrelationResult". Don't add to `extra_field_names`- shouldn't be in repr.
155
+
156
+
157
+ def _pack_CorrelationResult(statistic, pvalue, correlation):
158
+ res = SignificanceResult(statistic, pvalue)
159
+ res.correlation = correlation
160
+ return res
161
+
162
+
163
+ def _unpack_CorrelationResult(res, _):
164
+ return res.statistic, res.pvalue, res.correlation
159
165
 
160
166
 
161
167
  # note that `weights` are paired with `x`
168
+ @xp_capabilities()
162
169
  @_axis_nan_policy_factory(
163
170
  lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
164
- result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
171
+ result_to_tuple=lambda x, _: (x,), kwd_samples=['weights'])
165
172
  def gmean(a, axis=0, dtype=None, weights=None):
166
173
  r"""Compute the weighted geometric mean along the specified axis.
167
174
 
@@ -242,9 +249,10 @@ def gmean(a, axis=0, dtype=None, weights=None):
242
249
  return xp.exp(_xp_mean(log_a, axis=axis, weights=weights))
243
250
 
244
251
 
252
+ @xp_capabilities(jax_jit=False, allow_dask_compute=1)
245
253
  @_axis_nan_policy_factory(
246
254
  lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
247
- result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
255
+ result_to_tuple=lambda x, _: (x,), kwd_samples=['weights'])
248
256
  def hmean(a, axis=0, dtype=None, *, weights=None):
249
257
  r"""Calculate the weighted harmonic mean along the specified axis.
250
258
 
@@ -342,9 +350,10 @@ def hmean(a, axis=0, dtype=None, *, weights=None):
342
350
  return 1.0 / _xp_mean(1.0 / a, axis=axis, weights=weights)
343
351
 
344
352
 
353
+ @xp_capabilities(jax_jit=False, allow_dask_compute=1)
345
354
  @_axis_nan_policy_factory(
346
355
  lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
347
- result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
356
+ result_to_tuple=lambda x, _: (x,), kwd_samples=['weights'])
348
357
  def pmean(a, p, *, axis=0, dtype=None, weights=None):
349
358
  r"""Calculate the weighted power mean along the specified axis.
350
359
 
@@ -444,7 +453,7 @@ def pmean(a, p, *, axis=0, dtype=None, weights=None):
444
453
  2.80668351922014
445
454
 
446
455
  """
447
- if not isinstance(p, (int, float)):
456
+ if not isinstance(p, int | float):
448
457
  raise ValueError("Power mean only defined for exponent of type int or "
449
458
  "float.")
450
459
  if p == 0:
@@ -485,8 +494,7 @@ def _mode_result(mode, count):
485
494
  return ModeResult(mode, count)
486
495
 
487
496
 
488
- @_axis_nan_policy_factory(_mode_result, override={'vectorization': True,
489
- 'nan_propagation': False})
497
+ @_axis_nan_policy_factory(_mode_result, override={'nan_propagation': False})
490
498
  def mode(a, axis=0, nan_policy='propagate', keepdims=False):
491
499
  r"""Return an array of the modal (most common) value in the passed array.
492
500
 
@@ -562,8 +570,27 @@ def mode(a, axis=0, nan_policy='propagate', keepdims=False):
562
570
  NaN = _get_nan(a)
563
571
  return ModeResult(*np.array([NaN, 0], dtype=NaN.dtype))
564
572
 
565
- vals, cnts = np.unique(a, return_counts=True)
566
- modes, counts = vals[cnts.argmax()], cnts.max()
573
+ if a.ndim == 1:
574
+ vals, cnts = np.unique(a, return_counts=True)
575
+ modes, counts = vals[cnts.argmax()], cnts.max()
576
+ return ModeResult(modes[()], counts[()])
577
+
578
+ # `axis` is always -1 after the `_axis_nan_policy` decorator
579
+ y = np.sort(a, axis=-1)
580
+ # Get boolean array of elements that are different from the previous element
581
+ i = np.concatenate([np.ones(y.shape[:-1] + (1,), dtype=bool),
582
+ (y[..., :-1] != y[..., 1:]) & ~np.isnan(y[..., :-1])], axis=-1)
583
+ # Get linear integer indices of these elements in a raveled array
584
+ indices = np.arange(y.size)[i.ravel()]
585
+ # The difference between integer indices is the number of repeats
586
+ counts = np.diff(indices, append=y.size)
587
+ # Now we form an array of `counts` corresponding with each element of `y`...
588
+ counts = np.reshape(np.repeat(counts, counts), y.shape)
589
+ # ... so we can get the argmax of *each slice* separately.
590
+ k = np.argmax(counts, axis=-1, keepdims=True)
591
+ # Extract the corresponding element/count, and eliminate the reduced dimension
592
+ modes = np.take_along_axis(y, k, axis=-1)[..., 0]
593
+ counts = np.take_along_axis(counts, k, axis=-1)[..., 0]
567
594
  return ModeResult(modes[()], counts[()])
568
595
 
569
596
 
@@ -587,7 +614,7 @@ def _put_val_to_limits(a, limits, inclusive, val=np.nan, xp=None):
587
614
 
588
615
  """
589
616
  xp = array_namespace(a) if xp is None else xp
590
- mask = xp.zeros(a.shape, dtype=xp.bool)
617
+ mask = xp.zeros_like(a, dtype=xp.bool)
591
618
  if limits is None:
592
619
  return a, mask
593
620
  lower_limit, upper_limit = limits
@@ -596,19 +623,18 @@ def _put_val_to_limits(a, limits, inclusive, val=np.nan, xp=None):
596
623
  mask |= (a < lower_limit) if lower_include else a <= lower_limit
597
624
  if upper_limit is not None:
598
625
  mask |= (a > upper_limit) if upper_include else a >= upper_limit
599
- if xp.all(mask):
626
+ lazy = is_lazy_array(mask)
627
+ if not lazy and xp.all(mask):
600
628
  raise ValueError("No array values within given limits")
601
- if xp.any(mask):
602
- # hopefully this (and many other instances of this idiom) are temporary when
603
- # data-apis/array-api#807 is resolved
604
- dtype = xp.asarray(1.).dtype if xp.isdtype(a.dtype, 'integral') else a.dtype
605
- a = xp.where(mask, xp.asarray(val, dtype=dtype), a)
629
+ if lazy or xp.any(mask):
630
+ a = xp.where(mask, val, a)
606
631
  return a, mask
607
632
 
608
633
 
634
+ @xp_capabilities()
609
635
  @_axis_nan_policy_factory(
610
636
  lambda x: x, n_outputs=1, default_axis=None,
611
- result_to_tuple=lambda x: (x,)
637
+ result_to_tuple=lambda x, _: (x,)
612
638
  )
613
639
  def tmean(a, limits=None, inclusive=(True, True), axis=None):
614
640
  """Compute the trimmed mean.
@@ -657,12 +683,13 @@ def tmean(a, limits=None, inclusive=(True, True), axis=None):
657
683
  # explicit dtype specification required due to data-apis/array-api-compat#152
658
684
  sum = xp.sum(a, axis=axis, dtype=a.dtype)
659
685
  n = xp.sum(xp.asarray(~mask, dtype=a.dtype), axis=axis, dtype=a.dtype)
660
- mean = _lazywhere(n != 0, (sum, n), xp.divide, xp.nan)
686
+ mean = xpx.apply_where(n != 0, (sum, n), operator.truediv, fill_value=xp.nan)
661
687
  return mean[()] if mean.ndim == 0 else mean
662
688
 
663
689
 
690
+ @xp_capabilities()
664
691
  @_axis_nan_policy_factory(
665
- lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
692
+ lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
666
693
  )
667
694
  def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
668
695
  """Compute the trimmed variance.
@@ -719,8 +746,10 @@ def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
719
746
  # by the axis_nan_policy decorator shortly.
720
747
  return _xp_var(a, correction=ddof, axis=axis, nan_policy='omit', xp=xp)
721
748
 
749
+
750
+ @xp_capabilities()
722
751
  @_axis_nan_policy_factory(
723
- lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
752
+ lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
724
753
  )
725
754
  def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
726
755
  """Compute the trimmed minimum.
@@ -766,24 +795,25 @@ def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
766
795
  """
767
796
  xp = array_namespace(a)
768
797
 
769
- # remember original dtype; _put_val_to_limits might need to change it
770
- dtype = a.dtype
798
+ max_ = xp.iinfo(a.dtype).max if xp.isdtype(a.dtype, 'integral') else xp.inf
771
799
  a, mask = _put_val_to_limits(a, (lowerlimit, None), (inclusive, None),
772
- val=xp.inf, xp=xp)
800
+ val=max_, xp=xp)
773
801
 
774
- min = xp.min(a, axis=axis)
775
- n = xp.sum(xp.asarray(~mask, dtype=a.dtype), axis=axis)
776
- res = xp.where(n != 0, min, xp.nan)
802
+ res = xp.min(a, axis=axis)
803
+ invalid = xp.all(mask, axis=axis) # All elements are below lowerlimit
777
804
 
778
- if not xp.any(xp.isnan(res)):
779
- # needed if input is of integer dtype
780
- res = xp.astype(res, dtype, copy=False)
805
+ # For eager backends, output dtype is data-dependent
806
+ if is_lazy_array(invalid) or xp.any(invalid):
807
+ # Possible loss of precision for int types
808
+ res = xp_promote(res, force_floating=True, xp=xp)
809
+ res = xp.where(invalid, xp.nan, res)
781
810
 
782
811
  return res[()] if res.ndim == 0 else res
783
812
 
784
813
 
814
+ @xp_capabilities()
785
815
  @_axis_nan_policy_factory(
786
- lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
816
+ lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
787
817
  )
788
818
  def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
789
819
  """Compute the trimmed maximum.
@@ -828,24 +858,25 @@ def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
828
858
  """
829
859
  xp = array_namespace(a)
830
860
 
831
- # remember original dtype; _put_val_to_limits might need to change it
832
- dtype = a.dtype
861
+ min_ = xp.iinfo(a.dtype).min if xp.isdtype(a.dtype, 'integral') else -xp.inf
833
862
  a, mask = _put_val_to_limits(a, (None, upperlimit), (None, inclusive),
834
- val=-xp.inf, xp=xp)
863
+ val=min_, xp=xp)
835
864
 
836
- max = xp.max(a, axis=axis)
837
- n = xp.sum(xp.asarray(~mask, dtype=a.dtype), axis=axis)
838
- res = xp.where(n != 0, max, xp.nan)
865
+ res = xp.max(a, axis=axis)
866
+ invalid = xp.all(mask, axis=axis) # All elements are above upperlimit
839
867
 
840
- if not xp.any(xp.isnan(res)):
841
- # needed if input is of integer dtype
842
- res = xp.astype(res, dtype, copy=False)
868
+ # For eager backends, output dtype is data-dependent
869
+ if is_lazy_array(invalid) or xp.any(invalid):
870
+ # Possible loss of precision for int types
871
+ res = xp_promote(res, force_floating=True, xp=xp)
872
+ res = xp.where(invalid, xp.nan, res)
843
873
 
844
874
  return res[()] if res.ndim == 0 else res
845
875
 
846
876
 
877
+ @xp_capabilities()
847
878
  @_axis_nan_policy_factory(
848
- lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
879
+ lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
849
880
  )
850
881
  def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
851
882
  """Compute the trimmed sample standard deviation.
@@ -896,8 +927,9 @@ def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
896
927
  return tvar(a, limits, inclusive, axis, ddof, _no_deco=True)**0.5
897
928
 
898
929
 
930
+ @xp_capabilities()
899
931
  @_axis_nan_policy_factory(
900
- lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
932
+ lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
901
933
  )
902
934
  def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
903
935
  """Compute the trimmed standard error of the mean.
@@ -955,7 +987,8 @@ def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
955
987
  # by the axis_nan_policy decorator shortly.
956
988
  sd = _xp_var(a, correction=ddof, axis=axis, nan_policy='omit', xp=xp)**0.5
957
989
 
958
- n_obs = xp.sum(~xp.isnan(a), axis=axis, dtype=sd.dtype)
990
+ not_nan = xp.astype(~xp.isnan(a), a.dtype)
991
+ n_obs = xp.sum(not_nan, axis=axis, dtype=sd.dtype)
959
992
  return sd / n_obs**0.5
960
993
 
961
994
 
@@ -1009,6 +1042,7 @@ def _moment_tuple(x, n_out):
1009
1042
  # empty, there is no distinction between the `moment` function being called
1010
1043
  # with parameter `order=1` and `order=[1]`; the latter *should* produce
1011
1044
  # the same as the former but with a singleton zeroth dimension.
1045
+ @xp_capabilities(jax_jit=False, allow_dask_compute=True)
1012
1046
  @_rename_parameter('moment', 'order')
1013
1047
  @_axis_nan_policy_factory( # noqa: E302
1014
1048
  _moment_result_object, n_samples=1, result_to_tuple=_moment_tuple,
@@ -1086,10 +1120,7 @@ def moment(a, order=1, axis=0, nan_policy='propagate', *, center=None):
1086
1120
  xp = array_namespace(a)
1087
1121
  a, axis = _chk_asarray(a, axis, xp=xp)
1088
1122
 
1089
- if xp.isdtype(a.dtype, 'integral'):
1090
- a = xp.asarray(a, dtype=xp.float64)
1091
- else:
1092
- a = xp.asarray(a)
1123
+ a = xp_promote(a, force_floating=True, xp=xp)
1093
1124
 
1094
1125
  order = xp.asarray(order, dtype=a.dtype)
1095
1126
  if xp_size(order) == 0:
@@ -1126,7 +1157,8 @@ def _demean(a, mean, axis, *, xp, precision_warning=True):
1126
1157
  # Used in e.g. `_moment`, `_zscore`, `_xp_var`. See gh-15905.
1127
1158
  a_zero_mean = a - mean
1128
1159
 
1129
- if xp_size(a_zero_mean) == 0:
1160
+ if (xp_size(a_zero_mean) == 0 or not precision_warning
1161
+ or is_lazy_array(a_zero_mean)):
1130
1162
  return a_zero_mean
1131
1163
 
1132
1164
  eps = xp.finfo(mean.dtype).eps * 10
@@ -1134,13 +1166,12 @@ def _demean(a, mean, axis, *, xp, precision_warning=True):
1134
1166
  with np.errstate(divide='ignore', invalid='ignore'):
1135
1167
  rel_diff = xp.max(xp.abs(a_zero_mean), axis=axis,
1136
1168
  keepdims=True) / xp.abs(mean)
1169
+
1170
+ n = _length_nonmasked(a, axis, xp=xp)
1137
1171
  with np.errstate(invalid='ignore'):
1138
- precision_loss = xp.any(rel_diff < eps)
1139
- n = (xp_size(a) if axis is None
1140
- # compact way to deal with axis tuples or ints
1141
- else np.prod(np.asarray(a.shape)[np.asarray(axis)]))
1172
+ precision_loss = xp.any(xp.asarray(rel_diff < eps) & xp.asarray(n > 1))
1142
1173
 
1143
- if precision_loss and n > 1 and precision_warning:
1174
+ if precision_loss:
1144
1175
  message = ("Precision loss occurred in moment calculation due to "
1145
1176
  "catastrophic cancellation. This occurs when the data "
1146
1177
  "are nearly identical. Results may be unreliable.")
@@ -1157,9 +1188,7 @@ def _moment(a, order, axis, *, mean=None, xp=None):
1157
1188
  """
1158
1189
  xp = array_namespace(a) if xp is None else xp
1159
1190
 
1160
- if xp.isdtype(a.dtype, 'integral'):
1161
- a = xp.asarray(a, dtype=xp.float64)
1162
-
1191
+ a = xp_promote(a, force_floating=True, xp=xp)
1163
1192
  dtype = a.dtype
1164
1193
 
1165
1194
  # moment of empty array is the same regardless of order
@@ -1210,13 +1239,33 @@ def _var(x, axis=0, ddof=0, mean=None, xp=None):
1210
1239
  xp = array_namespace(x) if xp is None else xp
1211
1240
  var = _moment(x, 2, axis, mean=mean, xp=xp)
1212
1241
  if ddof != 0:
1213
- n = x.shape[axis] if axis is not None else xp_size(x)
1242
+ n = _length_nonmasked(x, axis, xp=xp)
1214
1243
  var *= np.divide(n, n-ddof) # to avoid error on division by zero
1215
1244
  return var
1216
1245
 
1217
1246
 
1247
+ def _length_nonmasked(x, axis, keepdims=False, xp=None):
1248
+ xp = array_namespace(x) if xp is None else xp
1249
+ if is_marray(xp):
1250
+ if np.iterable(axis):
1251
+ message = '`axis` must be an integer or None for use with `MArray`.'
1252
+ raise NotImplementedError(message)
1253
+ return xp.astype(xp.count(x, axis=axis, keepdims=keepdims), x.dtype)
1254
+ return (xp_size(x) if axis is None else
1255
+ # compact way to deal with axis tuples or ints
1256
+ int(np.prod(np.asarray(x.shape)[np.asarray(axis)])))
1257
+
1258
+
1259
+ def _share_masks(*args, xp):
1260
+ if is_marray(xp):
1261
+ mask = functools.reduce(operator.or_, (arg.mask for arg in args))
1262
+ args = [xp.asarray(arg.data, mask=mask) for arg in args]
1263
+ return args[0] if len(args) == 1 else args
1264
+
1265
+
1266
+ @xp_capabilities(jax_jit=False, allow_dask_compute=2)
1218
1267
  @_axis_nan_policy_factory(
1219
- lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1
1268
+ lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1
1220
1269
  )
1221
1270
  # nan_policy handled by `_axis_nan_policy`, but needs to be left
1222
1271
  # in signature to preserve use as a positional argument
@@ -1296,7 +1345,7 @@ def skew(a, axis=0, bias=True, nan_policy='propagate'):
1296
1345
  """
1297
1346
  xp = array_namespace(a)
1298
1347
  a, axis = _chk_asarray(a, axis, xp=xp)
1299
- n = a.shape[axis]
1348
+ n = _length_nonmasked(a, axis, xp=xp)
1300
1349
 
1301
1350
  mean = xp.mean(a, axis=axis, keepdims=True)
1302
1351
  mean_reduced = xp.squeeze(mean, axis=axis) # needed later
@@ -1305,20 +1354,19 @@ def skew(a, axis=0, bias=True, nan_policy='propagate'):
1305
1354
  with np.errstate(all='ignore'):
1306
1355
  eps = xp.finfo(m2.dtype).eps
1307
1356
  zero = m2 <= (eps * mean_reduced)**2
1308
- vals = xp.where(zero, xp.asarray(xp.nan), m3 / m2**1.5)
1357
+ vals = xp.where(zero, xp.nan, m3 / m2**1.5)
1309
1358
  if not bias:
1310
1359
  can_correct = ~zero & (n > 2)
1311
- if xp.any(can_correct):
1312
- m2 = m2[can_correct]
1313
- m3 = m3[can_correct]
1360
+ if is_lazy_array(can_correct) or xp.any(can_correct):
1314
1361
  nval = ((n - 1.0) * n)**0.5 / (n - 2.0) * m3 / m2**1.5
1315
- vals[can_correct] = nval
1362
+ vals = xp.where(can_correct, nval, vals)
1316
1363
 
1317
1364
  return vals[()] if vals.ndim == 0 else vals
1318
1365
 
1319
1366
 
1367
+ @xp_capabilities(jax_jit=False, allow_dask_compute=2)
1320
1368
  @_axis_nan_policy_factory(
1321
- lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1
1369
+ lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1
1322
1370
  )
1323
1371
  # nan_policy handled by `_axis_nan_policy`, but needs to be left
1324
1372
  # in signature to preserve use as a positional argument
@@ -1406,23 +1454,20 @@ def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
1406
1454
  xp = array_namespace(a)
1407
1455
  a, axis = _chk_asarray(a, axis, xp=xp)
1408
1456
 
1409
- n = a.shape[axis]
1457
+ n = _length_nonmasked(a, axis, xp=xp)
1410
1458
  mean = xp.mean(a, axis=axis, keepdims=True)
1411
1459
  mean_reduced = xp.squeeze(mean, axis=axis) # needed later
1412
1460
  m2 = _moment(a, 2, axis, mean=mean, xp=xp)
1413
1461
  m4 = _moment(a, 4, axis, mean=mean, xp=xp)
1414
1462
  with np.errstate(all='ignore'):
1415
1463
  zero = m2 <= (xp.finfo(m2.dtype).eps * mean_reduced)**2
1416
- NaN = _get_nan(m4, xp=xp)
1417
- vals = xp.where(zero, NaN, m4 / m2**2.0)
1464
+ vals = xp.where(zero, xp.nan, m4 / m2**2.0)
1418
1465
 
1419
1466
  if not bias:
1420
1467
  can_correct = ~zero & (n > 3)
1421
- if xp.any(can_correct):
1422
- m2 = m2[can_correct]
1423
- m4 = m4[can_correct]
1468
+ if is_lazy_array(can_correct) or xp.any(can_correct):
1424
1469
  nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
1425
- vals[can_correct] = nval + 3.0
1470
+ vals = xp.where(can_correct, nval + 3.0, vals)
1426
1471
 
1427
1472
  vals = vals - 3 if fisher else vals
1428
1473
  return vals[()] if vals.ndim == 0 else vals
@@ -1433,6 +1478,7 @@ DescribeResult = namedtuple('DescribeResult',
1433
1478
  'kurtosis'))
1434
1479
 
1435
1480
 
1481
+ @xp_capabilities(jax_jit=False, allow_dask_compute=True)
1436
1482
  def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
1437
1483
  """Compute several descriptive statistics of the passed array.
1438
1484
 
@@ -1506,9 +1552,11 @@ def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
1506
1552
  xp = array_namespace(a)
1507
1553
  a, axis = _chk_asarray(a, axis, xp=xp)
1508
1554
 
1509
- contains_nan, nan_policy = _contains_nan(a, nan_policy)
1555
+ contains_nan = _contains_nan(a, nan_policy)
1510
1556
 
1511
- if contains_nan and nan_policy == 'omit':
1557
+ # Test nan_policy before the implicit call to bool(contains_nan)
1558
+ # to avoid raising on lazy xps on the default nan_policy='propagate'
1559
+ if nan_policy == 'omit' and contains_nan:
1512
1560
  # only NumPy gets here; `_contains_nan` raises error for the rest
1513
1561
  a = ma.masked_invalid(a)
1514
1562
  return mstats_basic.describe(a, axis, ddof, bias)
@@ -1516,7 +1564,9 @@ def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
1516
1564
  if xp_size(a) == 0:
1517
1565
  raise ValueError("The input must not be empty.")
1518
1566
 
1519
- n = a.shape[axis]
1567
+ # use xp.astype when data-apis/array-api-compat#226 is resolved
1568
+ n = xp.asarray(_length_nonmasked(a, axis, xp=xp), dtype=xp.int64)
1569
+ n = n[()] if n.ndim == 0 else n
1520
1570
  mm = (xp.min(a, axis=axis), xp.max(a, axis=axis))
1521
1571
  m = xp.mean(a, axis=axis)
1522
1572
  v = _var(a, axis=axis, ddof=ddof, xp=xp)
@@ -1552,6 +1602,7 @@ def _get_pvalue(statistic, distribution, alternative, symmetric=True, xp=None):
1552
1602
  SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
1553
1603
 
1554
1604
 
1605
+ @xp_capabilities(jax_jit=False, allow_dask_compute=True)
1555
1606
  @_axis_nan_policy_factory(SkewtestResult, n_samples=1, too_small=7)
1556
1607
  # nan_policy handled by `_axis_nan_policy`, but needs to be left
1557
1608
  # in signature to preserve use as a positional argument
@@ -1634,22 +1685,24 @@ def skewtest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
1634
1685
  a, axis = _chk_asarray(a, axis, xp=xp)
1635
1686
 
1636
1687
  b2 = skew(a, axis, _no_deco=True)
1637
- n = a.shape[axis]
1638
- if n < 8:
1639
- message = ("`skewtest` requires at least 8 observations; "
1640
- f"only {n=} observations were given.")
1641
- raise ValueError(message)
1642
1688
 
1643
- y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
1644
- beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
1645
- ((n-2.0) * (n+5) * (n+7) * (n+9)))
1646
- W2 = -1 + math.sqrt(2 * (beta2 - 1))
1647
- delta = 1 / math.sqrt(0.5 * math.log(W2))
1648
- alpha = math.sqrt(2.0 / (W2 - 1))
1649
- y = xp.where(y == 0, xp.asarray(1, dtype=y.dtype), y)
1650
- Z = delta * xp.log(y / alpha + xp.sqrt((y / alpha)**2 + 1))
1689
+ n = xp.asarray(_length_nonmasked(a, axis), dtype=b2.dtype)
1690
+ n = xpx.at(n, n < 8).set(xp.nan)
1691
+ if xp.any(xp.isnan(n)):
1692
+ message = ("`skewtest` requires at least 8 valid observations;"
1693
+ "slices with fewer observations will produce NaNs.")
1694
+ warnings.warn(message, SmallSampleWarning, stacklevel=2)
1651
1695
 
1652
- pvalue = _get_pvalue(Z, _SimpleNormal(), alternative, xp=xp)
1696
+ with np.errstate(divide='ignore', invalid='ignore'):
1697
+ y = b2 * xp.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
1698
+ beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
1699
+ ((n-2.0) * (n+5) * (n+7) * (n+9)))
1700
+ W2 = -1 + xp.sqrt(2 * (beta2 - 1))
1701
+ delta = 1 / xp.sqrt(0.5 * xp.log(W2))
1702
+ alpha = xp.sqrt(2.0 / (W2 - 1))
1703
+ y = xp.where(y == 0, 1., y)
1704
+ Z = delta * xp.log(y / alpha + xp.sqrt((y / alpha)**2 + 1))
1705
+ pvalue = _get_pvalue(Z, _SimpleNormal(), alternative, xp=xp)
1653
1706
 
1654
1707
  Z = Z[()] if Z.ndim == 0 else Z
1655
1708
  pvalue = pvalue[()] if pvalue.ndim == 0 else pvalue
@@ -1659,6 +1712,7 @@ def skewtest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
1659
1712
  KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
1660
1713
 
1661
1714
 
1715
+ @xp_capabilities(jax_jit=False, allow_dask_compute=True)
1662
1716
  @_axis_nan_policy_factory(KurtosistestResult, n_samples=1, too_small=4)
1663
1717
  def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
1664
1718
  r"""Test whether a dataset has normal kurtosis.
@@ -1735,18 +1789,15 @@ def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
1735
1789
  xp = array_namespace(a)
1736
1790
  a, axis = _chk_asarray(a, axis, xp=xp)
1737
1791
 
1738
- n = a.shape[axis]
1739
-
1740
- if n < 5:
1741
- message = ("`kurtosistest` requires at least 5 observations; "
1742
- f"only {n=} observations were given.")
1743
- raise ValueError(message)
1744
- if n < 20:
1745
- message = ("`kurtosistest` p-value may be inaccurate with fewer than 20 "
1746
- f"observations; only {n=} observations were given.")
1747
- warnings.warn(message, stacklevel=2)
1748
1792
  b2 = kurtosis(a, axis, fisher=False, _no_deco=True)
1749
1793
 
1794
+ n = xp.asarray(_length_nonmasked(a, axis), dtype=b2.dtype)
1795
+ n = xpx.at(n, n < 5).set(xp.nan)
1796
+ if xp.any(xp.isnan(n)):
1797
+ message = ("`kurtosistest` requires at least 5 valid observations; "
1798
+ "slices with fewer observations will produce NaNs.")
1799
+ warnings.warn(message, SmallSampleWarning, stacklevel=2)
1800
+
1750
1801
  E = 3.0*(n-1) / (n+1)
1751
1802
  varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
1752
1803
  x = (b2-E) / varb2**0.5 # [1]_ Eq. 4
@@ -1757,8 +1808,7 @@ def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
1757
1808
  A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + (1+4.0/(sqrtbeta1**2))**0.5)
1758
1809
  term1 = 1 - 2/(9.0*A)
1759
1810
  denom = 1 + x * (2/(A-4.0))**0.5
1760
- NaN = _get_nan(x, xp=xp)
1761
- term2 = xp_sign(denom) * xp.where(denom == 0.0, NaN,
1811
+ term2 = xp.sign(denom) * xp.where(denom == 0.0, xp.nan,
1762
1812
  ((1-2.0/A)/xp.abs(denom))**(1/3))
1763
1813
  if xp.any(denom == 0):
1764
1814
  msg = ("Test statistic not defined in some cases due to division by "
@@ -1766,7 +1816,6 @@ def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
1766
1816
  warnings.warn(msg, RuntimeWarning, stacklevel=2)
1767
1817
 
1768
1818
  Z = (term1 - term2) / (2/(9.0*A))**0.5 # [1]_ Eq. 5
1769
-
1770
1819
  pvalue = _get_pvalue(Z, _SimpleNormal(), alternative, xp=xp)
1771
1820
 
1772
1821
  Z = Z[()] if Z.ndim == 0 else Z
@@ -1777,6 +1826,7 @@ def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
1777
1826
  NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
1778
1827
 
1779
1828
 
1829
+ @xp_capabilities(jax_jit=False, allow_dask_compute=True)
1780
1830
  @_axis_nan_policy_factory(NormaltestResult, n_samples=1, too_small=7)
1781
1831
  def normaltest(a, axis=0, nan_policy='propagate'):
1782
1832
  r"""Test whether a sample differs from a normal distribution.
@@ -1845,7 +1895,7 @@ def normaltest(a, axis=0, nan_policy='propagate'):
1845
1895
  k, _ = kurtosistest(a, axis, _no_deco=True)
1846
1896
  statistic = s*s + k*k
1847
1897
 
1848
- chi2 = _SimpleChi2(xp.asarray(2.))
1898
+ chi2 = _SimpleChi2(xp.asarray(2., dtype=statistic.dtype))
1849
1899
  pvalue = _get_pvalue(statistic, chi2, alternative='greater', symmetric=False, xp=xp)
1850
1900
 
1851
1901
  statistic = statistic[()] if statistic.ndim == 0 else statistic
@@ -1854,6 +1904,7 @@ def normaltest(a, axis=0, nan_policy='propagate'):
1854
1904
  return NormaltestResult(statistic, pvalue)
1855
1905
 
1856
1906
 
1907
+ @xp_capabilities(jax_jit=False, allow_dask_compute=True)
1857
1908
  @_axis_nan_policy_factory(SignificanceResult, default_axis=None)
1858
1909
  def jarque_bera(x, *, axis=None):
1859
1910
  r"""Perform the Jarque-Bera goodness of fit test on sample data.
@@ -1913,22 +1964,17 @@ def jarque_bera(x, *, axis=None):
1913
1964
  For a more detailed example, see :ref:`hypothesis_jarque_bera`.
1914
1965
  """
1915
1966
  xp = array_namespace(x)
1916
- x = xp.asarray(x)
1917
- if axis is None:
1918
- x = xp.reshape(x, (-1,))
1919
- axis = 0
1967
+ x, axis = _chk_asarray(x, axis, xp=xp)
1920
1968
 
1921
- n = x.shape[axis]
1922
- if n == 0:
1923
- raise ValueError('At least one observation is required.')
1924
-
1925
- mu = xp.mean(x, axis=axis, keepdims=True)
1969
+ mu = _xp_mean(x, axis=axis, keepdims=True)
1926
1970
  diffx = x - mu
1927
1971
  s = skew(diffx, axis=axis, _no_deco=True)
1928
1972
  k = kurtosis(diffx, axis=axis, _no_deco=True)
1973
+
1974
+ n = xp.asarray(_length_nonmasked(x, axis), dtype=mu.dtype)
1929
1975
  statistic = n / 6 * (s**2 + k**2 / 4)
1930
1976
 
1931
- chi2 = _SimpleChi2(xp.asarray(2.))
1977
+ chi2 = _SimpleChi2(xp.asarray(2., dtype=mu.dtype))
1932
1978
  pvalue = _get_pvalue(statistic, chi2, alternative='greater', symmetric=False, xp=xp)
1933
1979
 
1934
1980
  statistic = statistic[()] if statistic.ndim == 0 else statistic
@@ -2161,11 +2207,8 @@ def percentileofscore(a, score, kind='rank', nan_policy='propagate'):
2161
2207
  score = np.asarray(score)
2162
2208
 
2163
2209
  # Nan treatment
2164
- cna, npa = _contains_nan(a, nan_policy)
2165
- cns, nps = _contains_nan(score, nan_policy)
2166
-
2167
- if (cna or cns) and nan_policy == 'raise':
2168
- raise ValueError("The input contains nan values")
2210
+ cna = _contains_nan(a, nan_policy)
2211
+ cns = _contains_nan(score, nan_policy)
2169
2212
 
2170
2213
  if cns:
2171
2214
  # If a score is nan, then the output should be nan
@@ -2557,8 +2600,9 @@ def obrientransform(*samples):
2557
2600
  return np.array(arrays)
2558
2601
 
2559
2602
 
2603
+ @xp_capabilities()
2560
2604
  @_axis_nan_policy_factory(
2561
- lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, too_small=1
2605
+ lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1, too_small=1
2562
2606
  )
2563
2607
  def sem(a, axis=0, ddof=1, nan_policy='propagate'):
2564
2608
  """Compute standard error of the mean.
@@ -2617,7 +2661,7 @@ def sem(a, axis=0, ddof=1, nan_policy='propagate'):
2617
2661
  a = xp.reshape(a, (-1,))
2618
2662
  axis = 0
2619
2663
  a = xpx.atleast_nd(xp.asarray(a), ndim=1, xp=xp)
2620
- n = a.shape[axis]
2664
+ n = _length_nonmasked(a, axis, xp=xp)
2621
2665
  s = xp.std(a, axis=axis, correction=ddof) / n**0.5
2622
2666
  return s
2623
2667
 
@@ -2638,6 +2682,7 @@ def _isconst(x):
2638
2682
  return (y[0] == y).all(keepdims=True)
2639
2683
 
2640
2684
 
2685
+ @xp_capabilities()
2641
2686
  def zscore(a, axis=0, ddof=0, nan_policy='propagate'):
2642
2687
  """
2643
2688
  Compute the z score.
@@ -2723,6 +2768,7 @@ def zscore(a, axis=0, ddof=0, nan_policy='propagate'):
2723
2768
  return zmap(a, a, axis=axis, ddof=ddof, nan_policy=nan_policy)
2724
2769
 
2725
2770
 
2771
+ @xp_capabilities()
2726
2772
  def gzscore(a, *, axis=0, ddof=0, nan_policy='propagate'):
2727
2773
  """
2728
2774
  Compute the geometric standard score.
@@ -2812,11 +2858,12 @@ def gzscore(a, *, axis=0, ddof=0, nan_policy='propagate'):
2812
2858
 
2813
2859
  """
2814
2860
  xp = array_namespace(a)
2815
- a = _convert_common_float(a, xp=xp)
2861
+ a = xp_promote(a, force_floating=True, xp=xp)
2816
2862
  log = ma.log if isinstance(a, ma.MaskedArray) else xp.log
2817
2863
  return zscore(log(a), axis=axis, ddof=ddof, nan_policy=nan_policy)
2818
2864
 
2819
2865
 
2866
+ @xp_capabilities()
2820
2867
  def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'):
2821
2868
  """
2822
2869
  Calculate the relative z-scores.
@@ -2873,7 +2920,7 @@ def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'):
2873
2920
 
2874
2921
  like_zscore = (scores is compare)
2875
2922
  xp = array_namespace(scores, compare)
2876
- scores, compare = _convert_common_float(scores, compare, xp=xp)
2923
+ scores, compare = xp_promote(scores, compare, force_floating=True, xp=xp)
2877
2924
 
2878
2925
  with warnings.catch_warnings():
2879
2926
  if like_zscore: # zscore should not emit SmallSampleWarning
@@ -2892,12 +2939,13 @@ def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'):
2892
2939
  eps = xp.finfo(z.dtype).eps
2893
2940
  zero = std <= xp.abs(eps * mn)
2894
2941
  zero = xp.broadcast_to(zero, z.shape)
2895
- z[zero] = xp.nan
2942
+ z = xpx.at(z, zero).set(xp.nan)
2896
2943
 
2897
2944
  return z
2898
2945
 
2899
2946
 
2900
- def gstd(a, axis=0, ddof=1):
2947
+ @xp_capabilities()
2948
+ def gstd(a, axis=0, ddof=1, *, keepdims=False, nan_policy='propagate'):
2901
2949
  r"""
2902
2950
  Calculate the geometric standard deviation of an array.
2903
2951
 
@@ -2912,17 +2960,27 @@ def gstd(a, axis=0, ddof=1):
2912
2960
  ----------
2913
2961
  a : array_like
2914
2962
  An array containing finite, strictly positive, real numbers.
2915
-
2916
- .. deprecated:: 1.14.0
2917
- Support for masked array input was deprecated in
2918
- SciPy 1.14.0 and will be removed in version 1.16.0.
2919
-
2920
2963
  axis : int, tuple or None, optional
2921
2964
  Axis along which to operate. Default is 0. If None, compute over
2922
2965
  the whole array `a`.
2923
2966
  ddof : int, optional
2924
2967
  Degree of freedom correction in the calculation of the
2925
2968
  geometric standard deviation. Default is 1.
2969
+ keepdims : boolean, optional
2970
+ If this is set to ``True``, the axes which are reduced are left
2971
+ in the result as dimensions with length one. With this option,
2972
+ the result will broadcast correctly against the input array.
2973
+ nan_policy : {'propagate', 'omit', 'raise'}, default: 'propagate'
2974
+ Defines how to handle input NaNs.
2975
+
2976
+ - ``propagate``: if a NaN is present in the axis slice (e.g. row) along
2977
+ which the statistic is computed, the corresponding entry of the output
2978
+ will be NaN.
2979
+ - ``omit``: NaNs will be omitted when performing the calculation.
2980
+ If insufficient data remains in the axis slice along which the
2981
+ statistic is computed, the corresponding entry of the output will be
2982
+ NaN.
2983
+ - ``raise``: if a NaN is present, a ``ValueError`` will be raised.
2926
2984
 
2927
2985
  Returns
2928
2986
  -------
@@ -2992,19 +3050,14 @@ def gstd(a, axis=0, ddof=1):
2992
3050
  array([2.12939215, 1.22120169])
2993
3051
 
2994
3052
  """
2995
- a = np.asanyarray(a)
2996
- if isinstance(a, ma.MaskedArray):
2997
- message = ("`gstd` support for masked array input was deprecated in "
2998
- "SciPy 1.14.0 and will be removed in version 1.16.0.")
2999
- warnings.warn(message, DeprecationWarning, stacklevel=2)
3000
- log = ma.log
3001
- else:
3002
- log = np.log
3053
+ xp = array_namespace(a)
3054
+ a = xp_promote(a, force_floating=True, xp=xp)
3003
3055
 
3056
+ kwargs = dict(axis=axis, correction=ddof, keepdims=keepdims, nan_policy=nan_policy)
3004
3057
  with np.errstate(invalid='ignore', divide='ignore'):
3005
- res = np.exp(np.std(log(a), axis=axis, ddof=ddof))
3058
+ res = xp.exp(_xp_var(xp.log(a), **kwargs)**0.5)
3006
3059
 
3007
- if (a <= 0).any():
3060
+ if not is_lazy_array(a) and xp.any(a <= 0):
3008
3061
  message = ("The geometric standard deviation is only defined if all elements "
3009
3062
  "are greater than or equal to zero; otherwise, the result is NaN.")
3010
3063
  warnings.warn(message, RuntimeWarning, stacklevel=2)
@@ -3017,7 +3070,7 @@ _scale_conversions = {'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
3017
3070
 
3018
3071
 
3019
3072
  @_axis_nan_policy_factory(
3020
- lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1,
3073
+ lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1,
3021
3074
  default_axis=None, override={'nan_propagation': False}
3022
3075
  )
3023
3076
  def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate',
@@ -3143,9 +3196,9 @@ def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate',
3143
3196
  scale = _scale_conversions[scale_key]
3144
3197
 
3145
3198
  # Select the percentile function to use based on nans and policy
3146
- contains_nan, nan_policy = _contains_nan(x, nan_policy)
3199
+ contains_nan = _contains_nan(x, nan_policy)
3147
3200
 
3148
- if contains_nan and nan_policy == 'omit':
3201
+ if nan_policy == 'omit' and contains_nan:
3149
3202
  percentile_func = np.nanpercentile
3150
3203
  else:
3151
3204
  percentile_func = np.percentile
@@ -3323,7 +3376,7 @@ def median_abs_deviation(x, axis=0, center=np.median, scale=1.0,
3323
3376
  return np.nan
3324
3377
  return np.full(nan_shape, np.nan)
3325
3378
 
3326
- contains_nan, nan_policy = _contains_nan(x, nan_policy)
3379
+ contains_nan = _contains_nan(x, nan_policy)
3327
3380
 
3328
3381
  if contains_nan:
3329
3382
  if axis is None:
@@ -3720,7 +3773,7 @@ def _f_oneway_is_too_small(samples, kwargs=None, axis=-1):
3720
3773
 
3721
3774
  @_axis_nan_policy_factory(
3722
3775
  F_onewayResult, n_samples=None, too_small=_f_oneway_is_too_small)
3723
- def f_oneway(*samples, axis=0):
3776
+ def f_oneway(*samples, axis=0, equal_var=True):
3724
3777
  """Perform one-way ANOVA.
3725
3778
 
3726
3779
  The one-way ANOVA tests the null hypothesis that two or more groups have
@@ -3736,6 +3789,13 @@ def f_oneway(*samples, axis=0):
3736
3789
  axis : int, optional
3737
3790
  Axis of the input arrays along which the test is applied.
3738
3791
  Default is 0.
3792
+ equal_var: bool, optional
3793
+ If True (default), perform a standard one-way ANOVA test that
3794
+ assumes equal population variances [2]_.
3795
+ If False, perform Welch's ANOVA test, which does not assume
3796
+ equal population variances [4]_.
3797
+
3798
+ .. versionadded:: 1.15.0
3739
3799
 
3740
3800
  Returns
3741
3801
  -------
@@ -3797,6 +3857,10 @@ def f_oneway(*samples, axis=0):
3797
3857
  .. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA.
3798
3858
  http://www.biostathandbook.com/onewayanova.html
3799
3859
 
3860
+ .. [4] B. L. Welch, "On the Comparison of Several Mean Values:
3861
+ An Alternative Approach", Biometrika, vol. 38, no. 3/4,
3862
+ pp. 330-336, 1951, doi: 10.2307/2332579.
3863
+
3800
3864
  Examples
3801
3865
  --------
3802
3866
  >>> import numpy as np
@@ -3848,6 +3912,8 @@ def f_oneway(*samples, axis=0):
3848
3912
  >>> F.pvalue
3849
3913
  array([0.20630784, 0.96375203, 0.04733157])
3850
3914
 
3915
+ Welch ANOVA will be performed if `equal_var` is False.
3916
+
3851
3917
  """
3852
3918
  if len(samples) < 2:
3853
3919
  raise TypeError('at least two inputs are required;'
@@ -3896,34 +3962,85 @@ def f_oneway(*samples, axis=0):
3896
3962
  # slice are the same (e.g. [[3, 3, 3], [3, 3, 3, 3], [3, 3, 3]]).
3897
3963
  all_same_const = (_first(alldata, axis) == alldata).all(axis=axis)
3898
3964
 
3899
- # Determine the mean of the data, and subtract that from all inputs to a
3900
- # variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariant
3901
- # to a shift in location, and centering all data around zero vastly
3902
- # improves numerical stability.
3903
- offset = alldata.mean(axis=axis, keepdims=True)
3904
- alldata = alldata - offset
3965
+ if not isinstance(equal_var, bool):
3966
+ raise TypeError("Expected a boolean value for 'equal_var'")
3967
+
3968
+ if equal_var:
3969
+ # Determine the mean of the data, and subtract that from all inputs to a
3970
+ # variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariant
3971
+ # to a shift in location, and centering all data around zero vastly
3972
+ # improves numerical stability.
3973
+ offset = alldata.mean(axis=axis, keepdims=True)
3974
+ alldata = alldata - offset
3975
+
3976
+ normalized_ss = _square_of_sums(alldata, axis=axis) / bign
3977
+
3978
+ sstot = _sum_of_squares(alldata, axis=axis) - normalized_ss
3979
+
3980
+ ssbn = 0
3981
+ for sample in samples:
3982
+ smo_ss = _square_of_sums(sample - offset, axis=axis)
3983
+ ssbn = ssbn + smo_ss / sample.shape[axis]
3984
+
3985
+ # Naming: variables ending in bn/b are for "between treatments", wn/w are
3986
+ # for "within treatments"
3987
+ ssbn = ssbn - normalized_ss
3988
+ sswn = sstot - ssbn
3989
+ dfbn = num_groups - 1
3990
+ dfwn = bign - num_groups
3991
+ msb = ssbn / dfbn
3992
+ msw = sswn / dfwn
3993
+ with np.errstate(divide='ignore', invalid='ignore'):
3994
+ f = msb / msw
3905
3995
 
3906
- normalized_ss = _square_of_sums(alldata, axis=axis) / bign
3996
+ prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
3907
3997
 
3908
- sstot = _sum_of_squares(alldata, axis=axis) - normalized_ss
3998
+ else:
3999
+ # calculate basic statistics for each sample
4000
+ # Beginning of second paragraph [4] page 1:
4001
+ # "As a particular case $y_t$ may be the means ... of samples
4002
+ y_t = np.asarray([np.mean(sample, axis=axis) for sample in samples])
4003
+ # "... of $n_t$ observations..."
4004
+ n_t = np.asarray([sample.shape[axis] for sample in samples])
4005
+ n_t = np.reshape(n_t, (-1,) + (1,) * (y_t.ndim - 1))
4006
+ # "... from $k$ different normal populations..."
4007
+ k = len(samples)
4008
+ # "The separate samples provide estimates $s_t^2$ of the $\sigma_t^2$."
4009
+ s_t2= np.asarray([np.var(sample, axis=axis, ddof=1) for sample in samples])
4010
+
4011
+ # calculate weight by number of data and variance
4012
+ # "we have $\lambda_t = 1 / n_t$ ... where w_t = 1 / {\lambda_t s_t^2}$"
4013
+ w_t = n_t / s_t2
4014
+ # sum of w_t
4015
+ s_w_t = np.sum(w_t, axis=0)
4016
+
4017
+ # calculate adjusted grand mean
4018
+ # "... and $\hat{y} = \sum w_t y_t / \sum w_t$. When all..."
4019
+ y_hat = np_vecdot(w_t, y_t, axis=0) / np.sum(w_t, axis=0)
4020
+
4021
+ # adjust f statistic
4022
+ # ref.[4] p.334 eq.29
4023
+ numerator = np_vecdot(w_t, (y_t - y_hat)**2, axis=0) / (k - 1)
4024
+ denominator = (
4025
+ 1 + 2 * (k - 2) / (k**2 - 1) *
4026
+ np_vecdot(1 / (n_t - 1), (1 - w_t / s_w_t)**2, axis=0)
4027
+ )
4028
+ f = numerator / denominator
3909
4029
 
3910
- ssbn = 0
3911
- for sample in samples:
3912
- smo_ss = _square_of_sums(sample - offset, axis=axis)
3913
- ssbn = ssbn + smo_ss / sample.shape[axis]
3914
-
3915
- # Naming: variables ending in bn/b are for "between treatments", wn/w are
3916
- # for "within treatments"
3917
- ssbn = ssbn - normalized_ss
3918
- sswn = sstot - ssbn
3919
- dfbn = num_groups - 1
3920
- dfwn = bign - num_groups
3921
- msb = ssbn / dfbn
3922
- msw = sswn / dfwn
3923
- with np.errstate(divide='ignore', invalid='ignore'):
3924
- f = msb / msw
4030
+ # degree of freedom 1
4031
+ # ref.[4] p.334 eq.30
4032
+ hat_f1 = k - 1
3925
4033
 
3926
- prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
4034
+ # adjusted degree of freedom 2
4035
+ # ref.[4] p.334 eq.30
4036
+ hat_f2 = (
4037
+ (k**2 - 1) /
4038
+ (3 * np_vecdot(1 / (n_t - 1), (1 - w_t / s_w_t)**2, axis=0))
4039
+ )
4040
+
4041
+ # calculate p value
4042
+ # ref.[4] p.334 eq.28
4043
+ prob = stats.f.sf(f, hat_f1, hat_f2)
3927
4044
 
3928
4045
  # Fix any f values that should be inf or nan because the corresponding
3929
4046
  # inputs were constant.
@@ -3951,7 +4068,7 @@ class AlexanderGovernResult:
3951
4068
 
3952
4069
  @_axis_nan_policy_factory(
3953
4070
  AlexanderGovernResult, n_samples=None,
3954
- result_to_tuple=lambda x: (x.statistic, x.pvalue),
4071
+ result_to_tuple=lambda x, _: (x.statistic, x.pvalue),
3955
4072
  too_small=1
3956
4073
  )
3957
4074
  def alexandergovern(*samples, nan_policy='propagate', axis=0):
@@ -4070,6 +4187,7 @@ def alexandergovern(*samples, nan_policy='propagate', axis=0):
4070
4187
  weights = inv_sq_se / np.sum(inv_sq_se, axis=0, keepdims=True)
4071
4188
 
4072
4189
  # (3) determine variance-weighted estimate of the common mean
4190
+ # Consider replacing with vecdot when data-apis/array-api#910 is resolved
4073
4191
  var_w = np.sum(weights * means, axis=0, keepdims=True)
4074
4192
 
4075
4193
  # (4) determine one-sample t statistic for each group
@@ -4089,7 +4207,7 @@ def alexandergovern(*samples, nan_policy='propagate', axis=0):
4089
4207
  (b**2*10 + 8*b*c**4 + 1000*b)))
4090
4208
 
4091
4209
  # (9) calculate statistic
4092
- A = np.sum(z**2, axis=0)
4210
+ A = np_vecdot(z, z, axis=0)
4093
4211
 
4094
4212
  # "[the p value is determined from] central chi-square random deviates
4095
4213
  # with k - 1 degrees of freedom". Alexander, Govern (94)
@@ -4269,6 +4387,8 @@ class PearsonRResult(PearsonRResultBase):
4269
4387
  return ci
4270
4388
 
4271
4389
 
4390
+ @xp_capabilities(cpu_only=True, exceptions=['cupy'],
4391
+ jax_jit=False, allow_dask_compute=True)
4272
4392
  def pearsonr(x, y, *, alternative='two-sided', method=None, axis=0):
4273
4393
  r"""
4274
4394
  Pearson correlation coefficient and p-value for testing non-correlation.
@@ -4299,7 +4419,7 @@ def pearsonr(x, y, *, alternative='two-sided', method=None, axis=0):
4299
4419
  Axis along which to perform the calculation. Default is 0.
4300
4420
  If None, ravel both arrays before performing the calculation.
4301
4421
 
4302
- .. versionadded:: 1.13.0
4422
+ .. versionadded:: 1.14.0
4303
4423
  alternative : {'two-sided', 'greater', 'less'}, optional
4304
4424
  Defines the alternative hypothesis. Default is 'two-sided'.
4305
4425
  The following options are available:
@@ -4365,6 +4485,7 @@ def pearsonr(x, y, *, alternative='two-sided', method=None, axis=0):
4365
4485
  --------
4366
4486
  spearmanr : Spearman rank-order correlation coefficient.
4367
4487
  kendalltau : Kendall's tau, a correlation measure for ordinal data.
4488
+ :ref:`hypothesis_pearsonr` : Extended example
4368
4489
 
4369
4490
  Notes
4370
4491
  -----
@@ -4526,10 +4647,12 @@ def pearsonr(x, y, *, alternative='two-sided', method=None, axis=0):
4526
4647
  This is unintuitive since there is no dependence of x and y if x is larger
4527
4648
  than zero which happens in about half of the cases if we sample x and y.
4528
4649
 
4650
+ For a more detailed example, see :ref:`hypothesis_pearsonr`.
4651
+
4529
4652
  """
4530
4653
  xp = array_namespace(x, y)
4531
- x = xp.asarray(x)
4532
- y = xp.asarray(y)
4654
+ x, y = xp_promote(x, y, force_floating=True, xp=xp)
4655
+ dtype = x.dtype
4533
4656
 
4534
4657
  if not is_numpy(xp) and method is not None:
4535
4658
  method = 'invalid'
@@ -4544,6 +4667,20 @@ def pearsonr(x, y, *, alternative='two-sided', method=None, axis=0):
4544
4667
  raise ValueError('`axis` must be an integer.')
4545
4668
  axis = axis_int
4546
4669
 
4670
+ try:
4671
+ np.broadcast_shapes(x.shape, y.shape)
4672
+ # For consistency with other `stats` functions, we need to
4673
+ # match the dimensionalities before looking at `axis`.
4674
+ # (Note: this is not the NEP 5 / gufunc order of operations;
4675
+ # see TestPearsonr::test_different_dimensionality for more information.)
4676
+ ndim = max(x.ndim, y.ndim)
4677
+ x = xp.reshape(x, (1,) * (ndim - x.ndim) + x.shape)
4678
+ y = xp.reshape(y, (1,) * (ndim - y.ndim) + y.shape)
4679
+
4680
+ except (ValueError, RuntimeError) as e:
4681
+ message = '`x` and `y` must be broadcastable.'
4682
+ raise ValueError(message) from e
4683
+
4547
4684
  n = x.shape[axis]
4548
4685
  if n != y.shape[axis]:
4549
4686
  raise ValueError('`x` and `y` must have the same length along `axis`.')
@@ -4551,22 +4688,10 @@ def pearsonr(x, y, *, alternative='two-sided', method=None, axis=0):
4551
4688
  if n < 2:
4552
4689
  raise ValueError('`x` and `y` must have length at least 2.')
4553
4690
 
4554
- try:
4555
- x, y = xp.broadcast_arrays(x, y)
4556
- except (ValueError, RuntimeError) as e:
4557
- message = '`x` and `y` must be broadcastable.'
4558
- raise ValueError(message) from e
4559
-
4560
- # `moveaxis` only recently added to array API, so it's not yey available in
4561
- # array_api_strict. Replace with e.g. `xp.moveaxis(x, axis, -1)` when available.
4562
- x = xp_moveaxis_to_end(x, axis, xp=xp)
4563
- y = xp_moveaxis_to_end(y, axis, xp=xp)
4691
+ x = xp.moveaxis(x, axis, -1)
4692
+ y = xp.moveaxis(y, axis, -1)
4564
4693
  axis = -1
4565
4694
 
4566
- dtype = xp.result_type(x.dtype, y.dtype)
4567
- if xp.isdtype(dtype, "integral"):
4568
- dtype = xp.asarray(1.).dtype
4569
-
4570
4695
  if xp.isdtype(dtype, "complex floating"):
4571
4696
  raise ValueError('This function does not support complex data')
4572
4697
 
@@ -4582,6 +4707,8 @@ def pearsonr(x, y, *, alternative='two-sided', method=None, axis=0):
4582
4707
  msg = ("An input array is constant; the correlation coefficient "
4583
4708
  "is not defined.")
4584
4709
  warnings.warn(stats.ConstantInputWarning(msg), stacklevel=2)
4710
+ x = xp.where(const_x[..., xp.newaxis], xp.nan, x)
4711
+ y = xp.where(const_y[..., xp.newaxis], xp.nan, y)
4585
4712
 
4586
4713
  if isinstance(method, PermutationMethod):
4587
4714
  def statistic(y, axis):
@@ -4646,13 +4773,12 @@ def pearsonr(x, y, *, alternative='two-sided', method=None, axis=0):
4646
4773
  warnings.warn(stats.NearConstantInputWarning(msg), stacklevel=2)
4647
4774
 
4648
4775
  with np.errstate(invalid='ignore', divide='ignore'):
4649
- r = xp.sum(xm/normxm * ym/normym, axis=axis)
4776
+ r = xp.vecdot(xm / normxm, ym / normym, axis=axis)
4650
4777
 
4651
4778
  # Presumably, if abs(r) > 1, then it is only some small artifact of
4652
4779
  # floating point arithmetic.
4653
- one = xp.asarray(1, dtype=dtype)
4654
- r = xp.asarray(xp.clip(r, -one, one))
4655
- r[const_xy] = xp.nan
4780
+ r = xp.clip(r, -1., 1.)
4781
+ r = xpx.at(r, const_xy).set(xp.nan)
4656
4782
 
4657
4783
  # Make sure we return exact 1.0 or -1.0 values for n == 2 case as promised
4658
4784
  # in the docs.
@@ -5272,7 +5398,7 @@ def spearmanr(a, b=None, axis=0, nan_policy='propagate',
5272
5398
  res.correlation = np.nan
5273
5399
  return res
5274
5400
 
5275
- a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
5401
+ a_contains_nan = _contains_nan(a, nan_policy)
5276
5402
  variable_has_nan = np.zeros(n_vars, dtype=bool)
5277
5403
  if a_contains_nan:
5278
5404
  if nan_policy == 'omit':
@@ -5314,6 +5440,9 @@ def spearmanr(a, b=None, axis=0, nan_policy='propagate',
5314
5440
  return res
5315
5441
 
5316
5442
 
5443
+ @_axis_nan_policy_factory(_pack_CorrelationResult, n_samples=2,
5444
+ result_to_tuple=_unpack_CorrelationResult, paired=True,
5445
+ too_small=1, n_outputs=3)
5317
5446
  def pointbiserialr(x, y):
5318
5447
  r"""Calculate a point biserial correlation coefficient and its p-value.
5319
5448
 
@@ -5409,6 +5538,9 @@ def pointbiserialr(x, y):
5409
5538
  return res
5410
5539
 
5411
5540
 
5541
+ @_axis_nan_policy_factory(_pack_CorrelationResult, default_axis=None, n_samples=2,
5542
+ result_to_tuple=_unpack_CorrelationResult, paired=True,
5543
+ too_small=1, n_outputs=3)
5412
5544
  def kendalltau(x, y, *, nan_policy='propagate',
5413
5545
  method='auto', variant='b', alternative='two-sided'):
5414
5546
  r"""Calculate Kendall's tau, a correlation measure for ordinal data.
@@ -5489,8 +5621,8 @@ def kendalltau(x, y, *, nan_policy='propagate',
5489
5621
  tau_c = 2 (P - Q) / (n**2 * (m - 1) / m)
5490
5622
 
5491
5623
  where P is the number of concordant pairs, Q the number of discordant
5492
- pairs, T the number of ties only in `x`, and U the number of ties only in
5493
- `y`. If a tie occurs for the same pair in both `x` and `y`, it is not
5624
+ pairs, T the number of tied pairs only in `x`, and U the number of tied pairs only
5625
+ in `y`. If a tie occurs for the same pair in both `x` and `y`, it is not
5494
5626
  added to either T or U. n is the total number of samples, and m is the
5495
5627
  number of unique values in either `x` or `y`, whichever is smaller.
5496
5628
 
@@ -5526,37 +5658,14 @@ def kendalltau(x, y, *, nan_policy='propagate',
5526
5658
  y = np.asarray(y).ravel()
5527
5659
 
5528
5660
  if x.size != y.size:
5529
- raise ValueError("All inputs to `kendalltau` must be of the same "
5530
- f"size, found x-size {x.size} and y-size {y.size}")
5661
+ raise ValueError("Array shapes are incompatible for broadcasting.")
5531
5662
  elif not x.size or not y.size:
5532
5663
  # Return NaN if arrays are empty
5533
- res = SignificanceResult(np.nan, np.nan)
5534
- res.correlation = np.nan
5535
- return res
5536
-
5537
- # check both x and y
5538
- cnx, npx = _contains_nan(x, nan_policy)
5539
- cny, npy = _contains_nan(y, nan_policy)
5540
- contains_nan = cnx or cny
5541
- if npx == 'omit' or npy == 'omit':
5542
- nan_policy = 'omit'
5543
-
5544
- if contains_nan and nan_policy == 'propagate':
5545
- res = SignificanceResult(np.nan, np.nan)
5546
- res.correlation = np.nan
5664
+ NaN = _get_nan(x, y)
5665
+ res = SignificanceResult(NaN, NaN)
5666
+ res.correlation = NaN
5547
5667
  return res
5548
5668
 
5549
- elif contains_nan and nan_policy == 'omit':
5550
- x = ma.masked_invalid(x)
5551
- y = ma.masked_invalid(y)
5552
- if variant == 'b':
5553
- return mstats_basic.kendalltau(x, y, method=method, use_ties=True,
5554
- alternative=alternative)
5555
- else:
5556
- message = ("nan_policy='omit' is currently compatible only with "
5557
- "variant='b'.")
5558
- raise ValueError(message)
5559
-
5560
5669
  def count_rank_tie(ranks):
5561
5670
  cnt = np.bincount(ranks).astype('int64', copy=False)
5562
5671
  cnt = cnt[cnt > 1]
@@ -5587,8 +5696,9 @@ def kendalltau(x, y, *, nan_policy='propagate',
5587
5696
  tot = (size * (size - 1)) // 2
5588
5697
 
5589
5698
  if xtie == tot or ytie == tot:
5590
- res = SignificanceResult(np.nan, np.nan)
5591
- res.correlation = np.nan
5699
+ NaN = _get_nan(x, y)
5700
+ res = SignificanceResult(NaN, NaN)
5701
+ res.correlation = NaN
5592
5702
  return res
5593
5703
 
5594
5704
  # Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
@@ -5637,6 +5747,15 @@ def kendalltau(x, y, *, nan_policy='propagate',
5637
5747
  return res
5638
5748
 
5639
5749
 
5750
+ def _weightedtau_n_samples(kwargs):
5751
+ rank = kwargs.get('rank', False)
5752
+ return 2 if (isinstance(rank, bool) or rank is None) else 3
5753
+
5754
+
5755
+ @_axis_nan_policy_factory(_pack_CorrelationResult, default_axis=None,
5756
+ n_samples=_weightedtau_n_samples,
5757
+ result_to_tuple=_unpack_CorrelationResult, paired=True,
5758
+ too_small=1, n_outputs=3, override={'nan_propagation': False})
5640
5759
  def weightedtau(x, y, rank=True, weigher=None, additive=True):
5641
5760
  r"""Compute a weighted version of Kendall's :math:`\tau`.
5642
5761
 
@@ -5772,15 +5891,14 @@ def weightedtau(x, y, rank=True, weigher=None, additive=True):
5772
5891
  """
5773
5892
  x = np.asarray(x).ravel()
5774
5893
  y = np.asarray(y).ravel()
5894
+ NaN = _get_nan(x, y)
5775
5895
 
5776
5896
  if x.size != y.size:
5777
- raise ValueError("All inputs to `weightedtau` must be "
5778
- "of the same size, "
5779
- f"found x-size {x.size} and y-size {y.size}")
5897
+ raise ValueError("Array shapes are incompatible for broadcasting.")
5780
5898
  if not x.size:
5781
5899
  # Return NaN if arrays are empty
5782
- res = SignificanceResult(np.nan, np.nan)
5783
- res.correlation = np.nan
5900
+ res = SignificanceResult(NaN, NaN)
5901
+ res.correlation = NaN
5784
5902
  return res
5785
5903
 
5786
5904
  # If there are NaNs we apply _toint64()
@@ -5801,11 +5919,11 @@ def weightedtau(x, y, rank=True, weigher=None, additive=True):
5801
5919
  y = _toint64(y)
5802
5920
 
5803
5921
  if rank is True:
5804
- tau = (
5922
+ tau = np.asarray(
5805
5923
  _weightedrankedtau(x, y, None, weigher, additive) +
5806
5924
  _weightedrankedtau(y, x, None, weigher, additive)
5807
- ) / 2
5808
- res = SignificanceResult(tau, np.nan)
5925
+ )[()] / 2
5926
+ res = SignificanceResult(tau, NaN)
5809
5927
  res.correlation = tau
5810
5928
  return res
5811
5929
 
@@ -5813,14 +5931,15 @@ def weightedtau(x, y, rank=True, weigher=None, additive=True):
5813
5931
  rank = np.arange(x.size, dtype=np.intp)
5814
5932
  elif rank is not None:
5815
5933
  rank = np.asarray(rank).ravel()
5934
+ rank = _toint64(rank).astype(np.intp)
5816
5935
  if rank.size != x.size:
5817
5936
  raise ValueError(
5818
5937
  "All inputs to `weightedtau` must be of the same size, "
5819
5938
  f"found x-size {x.size} and rank-size {rank.size}"
5820
5939
  )
5821
5940
 
5822
- tau = _weightedrankedtau(x, y, rank, weigher, additive)
5823
- res = SignificanceResult(tau, np.nan)
5941
+ tau = np.asarray(_weightedrankedtau(x, y, rank, weigher, additive))[()]
5942
+ res = SignificanceResult(tau, NaN)
5824
5943
  res.correlation = tau
5825
5944
  return res
5826
5945
 
@@ -5908,11 +6027,13 @@ def pack_TtestResult(statistic, pvalue, df, alternative, standard_error,
5908
6027
  standard_error=standard_error, estimate=estimate)
5909
6028
 
5910
6029
 
5911
- def unpack_TtestResult(res):
6030
+ def unpack_TtestResult(res, _):
5912
6031
  return (res.statistic, res.pvalue, res.df, res._alternative,
5913
6032
  res._standard_error, res._estimate)
5914
6033
 
5915
6034
 
6035
+ @xp_capabilities(cpu_only=True, exceptions=["cupy", "jax.numpy"],
6036
+ jax_jit=False, allow_dask_compute=True)
5916
6037
  @_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2,
5917
6038
  result_to_tuple=unpack_TtestResult, n_outputs=6)
5918
6039
  # nan_policy handled by `_axis_nan_policy`, but needs to be left
@@ -6074,10 +6195,10 @@ def ttest_1samp(a, popmean, axis=0, nan_policy="propagate", alternative="two-sid
6074
6195
  xp = array_namespace(a)
6075
6196
  a, axis = _chk_asarray(a, axis, xp=xp)
6076
6197
 
6077
- n = a.shape[axis]
6198
+ n = _length_nonmasked(a, axis)
6078
6199
  df = n - 1
6079
6200
 
6080
- if n == 0:
6201
+ if a.shape[axis] == 0:
6081
6202
  # This is really only needed for *testing* _axis_nan_policy decorator
6082
6203
  # It won't happen when the decorator is used.
6083
6204
  NaN = _get_nan(a)
@@ -6118,27 +6239,29 @@ def _t_confidence_interval(df, t, confidence_level, alternative, dtype=None, xp=
6118
6239
  dtype = t.dtype if dtype is None else dtype
6119
6240
  xp = array_namespace(t) if xp is None else xp
6120
6241
 
6121
- # stdtrit not dispatched yet; use NumPy
6122
- df, t = np.asarray(df), np.asarray(t)
6123
-
6124
6242
  if confidence_level < 0 or confidence_level > 1:
6125
6243
  message = "`confidence_level` must be a number between 0 and 1."
6126
6244
  raise ValueError(message)
6127
6245
 
6246
+ confidence_level = xp.asarray(confidence_level, dtype=dtype)
6247
+ inf = xp.asarray(xp.inf, dtype=dtype)
6248
+
6128
6249
  if alternative < 0: # 'less'
6129
6250
  p = confidence_level
6130
- low, high = np.broadcast_arrays(-np.inf, special.stdtrit(df, p))
6251
+ low, high = xp.broadcast_arrays(-inf, special.stdtrit(df, p))
6131
6252
  elif alternative > 0: # 'greater'
6132
6253
  p = 1 - confidence_level
6133
- low, high = np.broadcast_arrays(special.stdtrit(df, p), np.inf)
6254
+ low, high = xp.broadcast_arrays(special.stdtrit(df, p), inf)
6134
6255
  elif alternative == 0: # 'two-sided'
6135
6256
  tail_probability = (1 - confidence_level)/2
6136
- p = tail_probability, 1-tail_probability
6257
+ p = xp.stack((tail_probability, 1-tail_probability))
6137
6258
  # axis of p must be the zeroth and orthogonal to all the rest
6138
- p = np.reshape(p, [2] + [1]*np.asarray(df).ndim)
6139
- low, high = special.stdtrit(df, p)
6259
+ p = xp.reshape(p, tuple([2] + [1]*xp.asarray(df).ndim))
6260
+ ci = special.stdtrit(df, p)
6261
+ low, high = ci[0, ...], ci[1, ...]
6140
6262
  else: # alternative is NaN when input is empty (see _axis_nan_policy)
6141
- p, nans = np.broadcast_arrays(t, np.nan)
6263
+ nan = xp.asarray(xp.nan)
6264
+ p, nans = xp.broadcast_arrays(t, nan)
6142
6265
  low, high = nans, nans
6143
6266
 
6144
6267
  low = xp.asarray(low, dtype=dtype)
@@ -6155,10 +6278,9 @@ def _ttest_ind_from_stats(mean1, mean2, denom, df, alternative, xp=None):
6155
6278
  with np.errstate(divide='ignore', invalid='ignore'):
6156
6279
  t = xp.divide(d, denom)
6157
6280
 
6158
- t_np = np.asarray(t)
6159
- df_np = np.asarray(df)
6160
- prob = _get_pvalue(t_np, distributions.t(df_np), alternative, xp=np)
6161
- prob = xp.asarray(prob, dtype=t.dtype)
6281
+ dist = _SimpleStudentT(xp.asarray(df, dtype=t.dtype))
6282
+ prob = _get_pvalue(t, dist, alternative, xp=xp)
6283
+ prob = prob[()] if prob.ndim == 0 else prob
6162
6284
 
6163
6285
  t = t[()] if t.ndim == 0 else t
6164
6286
  prob = prob[()] if prob.ndim == 0 else prob
@@ -6174,7 +6296,7 @@ def _unequal_var_ttest_denom(v1, n1, v2, n2, xp=None):
6174
6296
 
6175
6297
  # If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
6176
6298
  # Hence it doesn't matter what df is as long as it's not NaN.
6177
- df = xp.where(xp.isnan(df), xp.asarray(1.), df)
6299
+ df = xp.where(xp.isnan(df), 1., df)
6178
6300
  denom = xp.sqrt(vn1 + vn2)
6179
6301
  return df, denom
6180
6302
 
@@ -6187,19 +6309,20 @@ def _equal_var_ttest_denom(v1, n1, v2, n2, xp=None):
6187
6309
  # The pooled variance is still defined, though, because the (n-1) in the
6188
6310
  # numerator should cancel with the (n-1) in the denominator, leaving only
6189
6311
  # the sum of squared differences from the mean: zero.
6190
- zero = xp.asarray(0.)
6191
- v1 = xp.where(xp.asarray(n1 == 1), zero, v1)
6192
- v2 = xp.where(xp.asarray(n2 == 1), zero, v2)
6312
+ v1 = xp.where(xp.asarray(n1 == 1), 0., v1)
6313
+ v2 = xp.where(xp.asarray(n2 == 1), 0., v2)
6193
6314
 
6194
6315
  df = n1 + n2 - 2.0
6195
6316
  svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
6196
6317
  denom = xp.sqrt(svar * (1.0 / n1 + 1.0 / n2))
6318
+ df = xp.asarray(df, dtype=denom.dtype)
6197
6319
  return df, denom
6198
6320
 
6199
6321
 
6200
6322
  Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
6201
6323
 
6202
6324
 
6325
+ @xp_capabilities(cpu_only=True, exceptions=["cupy", "jax.numpy"])
6203
6326
  def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
6204
6327
  equal_var=True, alternative="two-sided"):
6205
6328
  r"""
@@ -6343,6 +6466,9 @@ def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
6343
6466
 
6344
6467
 
6345
6468
  _ttest_ind_dep_msg = "Use ``method`` to perform a permutation test."
6469
+
6470
+
6471
+ @xp_capabilities(cpu_only=True, exceptions=["cupy", "jax.numpy"])
6346
6472
  @_deprecate_positional_args(version='1.17.0',
6347
6473
  deprecated_args={'permutations', 'random_state'},
6348
6474
  custom_message=_ttest_ind_dep_msg)
@@ -6623,11 +6749,10 @@ def ttest_ind(a, b, *, axis=0, equal_var=True, nan_policy='propagate',
6623
6749
  """
6624
6750
  xp = array_namespace(a, b)
6625
6751
 
6626
- default_float = xp.asarray(1.).dtype
6627
- if xp.isdtype(a.dtype, 'integral'):
6628
- a = xp.astype(a, default_float)
6629
- if xp.isdtype(b.dtype, 'integral'):
6630
- b = xp.astype(b, default_float)
6752
+ a, b = xp_promote(a, b, force_floating=True, xp=xp)
6753
+
6754
+ if axis is None:
6755
+ a, b, axis = xp_ravel(a), xp_ravel(b), 0
6631
6756
 
6632
6757
  if not (0 <= trim < .5):
6633
6758
  raise ValueError("Trimming percentage should be 0 <= `trim` < .5.")
@@ -6642,8 +6767,7 @@ def ttest_ind(a, b, *, axis=0, equal_var=True, nan_policy='propagate',
6642
6767
  raise NotImplementedError(message)
6643
6768
 
6644
6769
  result_shape = _broadcast_array_shapes_remove_axis((a, b), axis=axis)
6645
- NaN = xp.full(result_shape, _get_nan(a, b, xp=xp))
6646
- NaN = NaN[()] if NaN.ndim == 0 else NaN
6770
+ NaN = _get_nan(a, b, shape=result_shape, xp=xp)
6647
6771
  if xp_size(a) == 0 or xp_size(b) == 0:
6648
6772
  return TtestResult(NaN, NaN, df=NaN, alternative=NaN,
6649
6773
  standard_error=NaN, estimate=NaN)
@@ -6671,8 +6795,8 @@ def ttest_ind(a, b, *, axis=0, equal_var=True, nan_policy='propagate',
6671
6795
  return TtestResult(t, prob, df=df, alternative=alternative_nums[alternative],
6672
6796
  standard_error=denom, estimate=estimate)
6673
6797
 
6674
- n1 = xp.asarray(a.shape[axis], dtype=a.dtype)
6675
- n2 = xp.asarray(b.shape[axis], dtype=b.dtype)
6798
+ n1 = _length_nonmasked(a, axis)
6799
+ n2 = _length_nonmasked(b, axis)
6676
6800
 
6677
6801
  if trim == 0:
6678
6802
  with np.errstate(divide='ignore', invalid='ignore'):
@@ -6932,6 +7056,8 @@ def _get_len(a, axis, msg):
6932
7056
  return n
6933
7057
 
6934
7058
 
7059
+ @xp_capabilities(cpu_only=True, exceptions=["cupy", "jax.numpy"],
7060
+ jax_jit=False, allow_dask_compute=True)
6935
7061
  @_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2,
6936
7062
  result_to_tuple=unpack_TtestResult, n_outputs=6,
6937
7063
  paired=True)
@@ -7034,7 +7160,7 @@ def ttest_rel(a, b, axis=0, nan_policy='propagate', alternative="two-sided"):
7034
7160
  TtestResult(statistic=-5.879467544540889, pvalue=7.540777129099917e-09, df=499)
7035
7161
 
7036
7162
  """
7037
- return ttest_1samp(a - b, popmean=0, axis=axis, alternative=alternative,
7163
+ return ttest_1samp(a - b, popmean=0., axis=axis, alternative=alternative,
7038
7164
  _no_deco=True)
7039
7165
 
7040
7166
 
@@ -7049,50 +7175,17 @@ _power_div_lambda_names = {
7049
7175
  }
7050
7176
 
7051
7177
 
7052
- def _m_count(a, *, axis, xp):
7053
- """Count the number of non-masked elements of an array.
7054
-
7055
- This function behaves like `np.ma.count`, but is much faster
7056
- for ndarrays.
7057
- """
7058
- if hasattr(a, 'count'):
7059
- num = a.count(axis=axis)
7060
- if isinstance(num, np.ndarray) and num.ndim == 0:
7061
- # In some cases, the `count` method returns a scalar array (e.g.
7062
- # np.array(3)), but we want a plain integer.
7063
- num = int(num)
7064
- else:
7065
- if axis is None:
7066
- num = xp_size(a)
7067
- else:
7068
- num = a.shape[axis]
7069
- return num
7070
-
7071
-
7072
- def _m_broadcast_to(a, shape, *, xp):
7073
- if np.ma.isMaskedArray(a):
7074
- return np.ma.masked_array(np.broadcast_to(a, shape),
7075
- mask=np.broadcast_to(a.mask, shape))
7076
- return xp.broadcast_to(a, shape)
7077
-
7078
-
7079
- def _m_sum(a, *, axis, preserve_mask, xp):
7080
- if np.ma.isMaskedArray(a):
7081
- sum = a.sum(axis)
7082
- return sum if preserve_mask else np.asarray(sum)
7083
- return xp.sum(a, axis=axis)
7084
-
7085
-
7086
- def _m_mean(a, *, axis, keepdims, xp):
7087
- if np.ma.isMaskedArray(a):
7088
- return np.asarray(a.mean(axis=axis, keepdims=keepdims))
7089
- return xp.mean(a, axis=axis, keepdims=keepdims)
7090
-
7091
-
7092
7178
  Power_divergenceResult = namedtuple('Power_divergenceResult',
7093
7179
  ('statistic', 'pvalue'))
7094
7180
 
7095
7181
 
7182
+ def _pd_nsamples(kwargs):
7183
+ return 2 if kwargs.get('f_exp', None) is not None else 1
7184
+
7185
+
7186
+ @xp_capabilities(jax_jit=False, allow_dask_compute=True)
7187
+ @_axis_nan_policy_factory(Power_divergenceResult, paired=True, n_samples=_pd_nsamples,
7188
+ too_small=-1)
7096
7189
  def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
7097
7190
  """Cressie-Read power divergence statistic and goodness of fit test.
7098
7191
 
@@ -7104,19 +7197,9 @@ def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
7104
7197
  ----------
7105
7198
  f_obs : array_like
7106
7199
  Observed frequencies in each category.
7107
-
7108
- .. deprecated:: 1.14.0
7109
- Support for masked array input was deprecated in
7110
- SciPy 1.14.0 and will be removed in version 1.16.0.
7111
-
7112
7200
  f_exp : array_like, optional
7113
7201
  Expected frequencies in each category. By default the categories are
7114
7202
  assumed to be equally likely.
7115
-
7116
- .. deprecated:: 1.14.0
7117
- Support for masked array input was deprecated in
7118
- SciPy 1.14.0 and will be removed in version 1.16.0.
7119
-
7120
7203
  ddof : int, optional
7121
7204
  "Delta degrees of freedom": adjustment to the degrees of freedom
7122
7205
  for the p-value. The p-value is computed using a chi-squared
@@ -7265,8 +7348,8 @@ def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
7265
7348
 
7266
7349
 
7267
7350
  def _power_divergence(f_obs, f_exp, ddof, axis, lambda_, sum_check=True):
7268
- xp = array_namespace(f_obs)
7269
- default_float = xp.asarray(1.).dtype
7351
+ xp = array_namespace(f_obs, f_exp)
7352
+ f_obs, f_exp = xp_promote(f_obs, f_exp, force_floating=True, xp=xp)
7270
7353
 
7271
7354
  # Convert the input argument `lambda_` to a numerical value.
7272
7355
  if isinstance(lambda_, str):
@@ -7278,38 +7361,20 @@ def _power_divergence(f_obs, f_exp, ddof, axis, lambda_, sum_check=True):
7278
7361
  elif lambda_ is None:
7279
7362
  lambda_ = 1
7280
7363
 
7281
- def warn_masked(arg):
7282
- if isinstance(arg, ma.MaskedArray):
7283
- message = (
7284
- "`power_divergence` and `chisquare` support for masked array input was "
7285
- "deprecated in SciPy 1.14.0 and will be removed in version 1.16.0.")
7286
- warnings.warn(message, DeprecationWarning, stacklevel=2)
7287
-
7288
- warn_masked(f_obs)
7289
- f_obs = f_obs if np.ma.isMaskedArray(f_obs) else xp.asarray(f_obs)
7290
- dtype = default_float if xp.isdtype(f_obs.dtype, 'integral') else f_obs.dtype
7291
- f_obs = (f_obs.astype(dtype) if np.ma.isMaskedArray(f_obs)
7292
- else xp.asarray(f_obs, dtype=dtype))
7293
- f_obs_float = (f_obs.astype(np.float64) if hasattr(f_obs, 'mask')
7294
- else xp.asarray(f_obs, dtype=xp.float64))
7295
-
7296
7364
  if f_exp is not None:
7297
- warn_masked(f_exp)
7298
- f_exp = f_exp if np.ma.isMaskedArray(f_obs) else xp.asarray(f_exp)
7299
- dtype = default_float if xp.isdtype(f_exp.dtype, 'integral') else f_exp.dtype
7300
- f_exp = (f_exp.astype(dtype) if np.ma.isMaskedArray(f_exp)
7301
- else xp.asarray(f_exp, dtype=dtype))
7302
-
7365
+ # not sure why we force to float64, but not going to touch it
7366
+ f_obs_float = xp.asarray(f_obs, dtype=xp.float64)
7303
7367
  bshape = _broadcast_shapes((f_obs_float.shape, f_exp.shape))
7304
- f_obs_float = _m_broadcast_to(f_obs_float, bshape, xp=xp)
7305
- f_exp = _m_broadcast_to(f_exp, bshape, xp=xp)
7368
+ f_obs_float = xp.broadcast_to(f_obs_float, bshape)
7369
+ f_exp = xp.broadcast_to(f_exp, bshape)
7370
+ f_obs_float, f_exp = _share_masks(f_obs_float, f_exp, xp=xp)
7306
7371
 
7307
7372
  if sum_check:
7308
7373
  dtype_res = xp.result_type(f_obs.dtype, f_exp.dtype)
7309
7374
  rtol = xp.finfo(dtype_res).eps**0.5 # to pass existing tests
7310
7375
  with np.errstate(invalid='ignore'):
7311
- f_obs_sum = _m_sum(f_obs_float, axis=axis, preserve_mask=False, xp=xp)
7312
- f_exp_sum = _m_sum(f_exp, axis=axis, preserve_mask=False, xp=xp)
7376
+ f_obs_sum = xp.sum(f_obs_float, axis=axis)
7377
+ f_exp_sum = xp.sum(f_exp, axis=axis)
7313
7378
  relative_diff = (xp.abs(f_obs_sum - f_exp_sum) /
7314
7379
  xp.minimum(f_obs_sum, f_exp_sum))
7315
7380
  diff_gt_tol = xp.any(relative_diff > rtol, axis=None)
@@ -7322,10 +7387,10 @@ def _power_divergence(f_obs, f_exp, ddof, axis, lambda_, sum_check=True):
7322
7387
  raise ValueError(msg)
7323
7388
 
7324
7389
  else:
7325
- # Ignore 'invalid' errors so the edge case of a data set with length 0
7326
- # is handled without spurious warnings.
7327
- with np.errstate(invalid='ignore'):
7328
- f_exp = _m_mean(f_obs, axis=axis, keepdims=True, xp=xp)
7390
+ # Avoid warnings with the edge case of a data set with length 0
7391
+ with warnings.catch_warnings():
7392
+ warnings.simplefilter("ignore")
7393
+ f_exp = xp.mean(f_obs, axis=axis, keepdims=True)
7329
7394
 
7330
7395
  # `terms` is the array of terms that are summed along `axis` to create
7331
7396
  # the test statistic. We use some specialized code for a few special
@@ -7344,12 +7409,11 @@ def _power_divergence(f_obs, f_exp, ddof, axis, lambda_, sum_check=True):
7344
7409
  terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
7345
7410
  terms /= 0.5 * lambda_ * (lambda_ + 1)
7346
7411
 
7347
- stat = _m_sum(terms, axis=axis, preserve_mask=True, xp=xp)
7412
+ stat = xp.sum(terms, axis=axis)
7348
7413
 
7349
- num_obs = _m_count(terms, axis=axis, xp=xp)
7350
- ddof = xp.asarray(ddof)
7414
+ num_obs = xp.asarray(_length_nonmasked(terms, axis))
7351
7415
 
7352
- df = xp.asarray(num_obs - 1 - ddof)
7416
+ df = num_obs - 1 - ddof
7353
7417
  chi2 = _SimpleChi2(df)
7354
7418
  pvalue = _get_pvalue(stat, chi2 , alternative='greater', symmetric=False, xp=xp)
7355
7419
 
@@ -7359,6 +7423,10 @@ def _power_divergence(f_obs, f_exp, ddof, axis, lambda_, sum_check=True):
7359
7423
  return Power_divergenceResult(stat, pvalue)
7360
7424
 
7361
7425
 
7426
+
7427
+ @xp_capabilities(jax_jit=False, allow_dask_compute=True)
7428
+ @_axis_nan_policy_factory(Power_divergenceResult, paired=True, n_samples=_pd_nsamples,
7429
+ too_small=-1)
7362
7430
  def chisquare(f_obs, f_exp=None, ddof=0, axis=0, *, sum_check=True):
7363
7431
  """Perform Pearson's chi-squared test.
7364
7432
 
@@ -7565,7 +7633,7 @@ def _tuple_to_KstestResult(statistic, pvalue,
7565
7633
  statistic_sign=statistic_sign)
7566
7634
 
7567
7635
 
7568
- def _KstestResult_to_tuple(res):
7636
+ def _KstestResult_to_tuple(res, _):
7569
7637
  return *res, res.statistic_location, res.statistic_sign
7570
7638
 
7571
7639
 
@@ -8117,7 +8185,7 @@ def ks_2samp(data1, data2, alternative='two-sided', method='auto'):
8117
8185
  d = maxS
8118
8186
  d_location = loc_maxS
8119
8187
  d_sign = 1
8120
- g = gcd(n1, n2)
8188
+ g = math.gcd(n1, n2)
8121
8189
  n1g = n1 // g
8122
8190
  n2g = n2 // g
8123
8191
  prob = -np.inf
@@ -8810,9 +8878,11 @@ def brunnermunzel(x, y, alternative="two-sided", distribution="t",
8810
8878
  rankx_mean = np.mean(rankx)
8811
8879
  ranky_mean = np.mean(ranky)
8812
8880
 
8813
- Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
8881
+ temp_x = rankcx - rankx - rankcx_mean + rankx_mean
8882
+ Sx = np_vecdot(temp_x, temp_x)
8814
8883
  Sx /= nx - 1
8815
- Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
8884
+ temp_y = rankcy - ranky - rankcy_mean + ranky_mean
8885
+ Sy = np_vecdot(temp_y, temp_y)
8816
8886
  Sy /= ny - 1
8817
8887
 
8818
8888
  wbfn = nx * ny * (rankcy_mean - rankcx_mean)
@@ -8842,6 +8912,9 @@ def brunnermunzel(x, y, alternative="two-sided", distribution="t",
8842
8912
  return BrunnerMunzelResult(wbfn, p)
8843
8913
 
8844
8914
 
8915
+ @xp_capabilities(cpu_only=True, exceptions=['cupy', 'jax.numpy'],
8916
+ reason='Delegation for `special.stdtr` only implemented for CuPy and JAX.',
8917
+ jax_jit=False, allow_dask_compute=True)
8845
8918
  @_axis_nan_policy_factory(SignificanceResult, kwd_samples=['weights'], paired=True)
8846
8919
  def combine_pvalues(pvalues, method='fisher', weights=None, *, axis=0):
8847
8920
  """
@@ -8964,51 +9037,49 @@ def combine_pvalues(pvalues, method='fisher', weights=None, *, axis=0):
8964
9037
  .. [8] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
8965
9038
 
8966
9039
  """
8967
- xp = array_namespace(pvalues)
8968
- pvalues = xp.asarray(pvalues)
9040
+ xp = array_namespace(pvalues, weights)
9041
+ pvalues, weights = xp_promote(pvalues, weights, broadcast=True,
9042
+ force_floating=True, xp=xp)
9043
+
8969
9044
  if xp_size(pvalues) == 0:
8970
9045
  # This is really only needed for *testing* _axis_nan_policy decorator
8971
9046
  # It won't happen when the decorator is used.
8972
9047
  NaN = _get_nan(pvalues)
8973
9048
  return SignificanceResult(NaN, NaN)
8974
9049
 
8975
- n = pvalues.shape[axis]
8976
- # used to convert Python scalar to the right dtype
8977
- one = xp.asarray(1, dtype=pvalues.dtype)
9050
+ n = _length_nonmasked(pvalues, axis)
9051
+ n = xp.asarray(n, dtype=pvalues.dtype)
8978
9052
 
8979
9053
  if method == 'fisher':
8980
9054
  statistic = -2 * xp.sum(xp.log(pvalues), axis=axis)
8981
- chi2 = _SimpleChi2(2*n*one)
9055
+ chi2 = _SimpleChi2(2*n)
8982
9056
  pval = _get_pvalue(statistic, chi2, alternative='greater',
8983
9057
  symmetric=False, xp=xp)
8984
9058
  elif method == 'pearson':
8985
9059
  statistic = 2 * xp.sum(xp.log1p(-pvalues), axis=axis)
8986
- chi2 = _SimpleChi2(2*n*one)
9060
+ chi2 = _SimpleChi2(2*n)
8987
9061
  pval = _get_pvalue(-statistic, chi2, alternative='less', symmetric=False, xp=xp)
8988
9062
  elif method == 'mudholkar_george':
8989
- normalizing_factor = math.sqrt(3/n)/xp.pi
9063
+ normalizing_factor = xp.sqrt(3/n)/xp.pi
8990
9064
  statistic = (-xp.sum(xp.log(pvalues), axis=axis)
8991
9065
  + xp.sum(xp.log1p(-pvalues), axis=axis))
8992
- nu = 5*n + 4
8993
- approx_factor = math.sqrt(nu / (nu - 2))
8994
- t = _SimpleStudentT(nu*one)
9066
+ nu = 5*n + 4
9067
+ approx_factor = xp.sqrt(nu / (nu - 2))
9068
+ t = _SimpleStudentT(nu)
8995
9069
  pval = _get_pvalue(statistic * normalizing_factor * approx_factor, t,
8996
9070
  alternative="greater", xp=xp)
8997
9071
  elif method == 'tippett':
8998
9072
  statistic = xp.min(pvalues, axis=axis)
8999
- beta = _SimpleBeta(one, n*one)
9073
+ beta = _SimpleBeta(xp.ones_like(n), n)
9000
9074
  pval = _get_pvalue(statistic, beta, alternative='less', symmetric=False, xp=xp)
9001
9075
  elif method == 'stouffer':
9002
9076
  if weights is None:
9003
9077
  weights = xp.ones_like(pvalues, dtype=pvalues.dtype)
9004
- elif weights.shape[axis] != n:
9005
- raise ValueError("pvalues and weights must be of the same "
9006
- "length along `axis`.")
9078
+ pvalues, weights = _share_masks(pvalues, weights, xp=xp)
9007
9079
 
9008
9080
  norm = _SimpleNormal()
9009
9081
  Zi = norm.isf(pvalues)
9010
- # could use `einsum` or clever `matmul` for performance,
9011
- # but this is the most readable
9082
+ # Consider `vecdot` when data-apis/array-api#910 is resolved
9012
9083
  statistic = (xp.sum(weights * Zi, axis=axis)
9013
9084
  / xp_vector_norm(weights, axis=axis))
9014
9085
  pval = _get_pvalue(statistic, norm, alternative="greater", xp=xp)
@@ -9431,8 +9502,8 @@ def quantile_test(x, *, q=0, p=0.5, alternative='two-sided'):
9431
9502
  # "We will use two test statistics in this test. Let T1 equal "
9432
9503
  # "the number of observations less than or equal to x*, and "
9433
9504
  # "let T2 equal the number of observations less than x*."
9434
- T1 = (X <= x_star).sum()
9435
- T2 = (X < x_star).sum()
9505
+ T1 = np.count_nonzero(X <= x_star)
9506
+ T2 = np.count_nonzero(X < x_star)
9436
9507
 
9437
9508
  # "The null distribution of the test statistics T1 and T2 is "
9438
9509
  # "the binomial distribution, with parameters n = sample size, and "
@@ -9950,11 +10021,10 @@ def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
9950
10021
  # If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
9951
10022
  # of about 15%.
9952
10023
  if p == 1:
9953
- return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas))
10024
+ return np_vecdot(np.abs(u_cdf - v_cdf), deltas)
9954
10025
  if p == 2:
9955
- return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas)))
9956
- return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p),
9957
- deltas)), 1/p)
10026
+ return np.sqrt(np_vecdot(np.square(u_cdf - v_cdf), deltas))
10027
+ return np.power(np_vecdot(np.power(np.abs(u_cdf - v_cdf), p), deltas), 1/p)
9958
10028
 
9959
10029
 
9960
10030
  def _validate_distribution(values, weights):
@@ -10073,7 +10143,7 @@ def _sum_of_squares(a, axis=0):
10073
10143
 
10074
10144
  """
10075
10145
  a, axis = _chk_asarray(a, axis)
10076
- return np.sum(a*a, axis)
10146
+ return np_vecdot(a, a, axis=axis)
10077
10147
 
10078
10148
 
10079
10149
  def _square_of_sums(a, axis=0):
@@ -10105,6 +10175,10 @@ def _square_of_sums(a, axis=0):
10105
10175
  return float(s) * s
10106
10176
 
10107
10177
 
10178
+ @xp_capabilities(skip_backends=[("torch", "no `repeat`"),
10179
+ ("cupy", "`repeat` can't handle array second arg"),
10180
+ ("dask.array", "no `take_along_axis`")],
10181
+ jax_jit=False, allow_dask_compute=True)
10108
10182
  def rankdata(a, method='average', *, axis=None, nan_policy='propagate'):
10109
10183
  """Assign ranks to data, dealing with ties appropriately.
10110
10184
 
@@ -10196,61 +10270,71 @@ def rankdata(a, method='average', *, axis=None, nan_policy='propagate'):
10196
10270
  if method not in methods:
10197
10271
  raise ValueError(f'unknown method "{method}"')
10198
10272
 
10199
- x = np.asarray(a)
10273
+ xp = array_namespace(a)
10274
+ x = xp.asarray(a)
10200
10275
 
10201
10276
  if axis is None:
10202
- x = x.ravel()
10277
+ x = xp_ravel(x)
10203
10278
  axis = -1
10204
10279
 
10205
- if x.size == 0:
10206
- dtype = float if method == 'average' else np.dtype("long")
10207
- return np.empty(x.shape, dtype=dtype)
10280
+ if xp_size(x) == 0:
10281
+ dtype = xp.asarray(1.).dtype if method == 'average' else xp.asarray(1).dtype
10282
+ return xp.empty(x.shape, dtype=dtype)
10208
10283
 
10209
- contains_nan, nan_policy = _contains_nan(x, nan_policy)
10284
+ contains_nan = _contains_nan(x, nan_policy)
10210
10285
 
10211
- x = np.swapaxes(x, axis, -1)
10212
- ranks = _rankdata(x, method)
10286
+ x = xp_swapaxes(x, axis, -1, xp=xp)
10287
+ ranks = _rankdata(x, method, xp=xp)
10213
10288
 
10214
10289
  if contains_nan:
10215
- i_nan = (np.isnan(x) if nan_policy == 'omit'
10216
- else np.isnan(x).any(axis=-1))
10217
- ranks = ranks.astype(float, copy=False)
10218
- ranks[i_nan] = np.nan
10290
+ default_float = xp_default_dtype(xp)
10291
+ i_nan = (xp.isnan(x) if nan_policy == 'omit'
10292
+ else xp.any(xp.isnan(x), axis=-1))
10293
+ ranks = xp.asarray(ranks, dtype=default_float) # copy=False when implemented
10294
+ ranks[i_nan] = xp.nan
10219
10295
 
10220
- ranks = np.swapaxes(ranks, axis, -1)
10296
+ ranks = xp_swapaxes(ranks, axis, -1, xp=xp)
10221
10297
  return ranks
10222
10298
 
10223
10299
 
10224
- def _order_ranks(ranks, j):
10300
+ def _order_ranks(ranks, j, *, xp):
10225
10301
  # Reorder ascending order `ranks` according to `j`
10226
- ordered_ranks = np.empty(j.shape, dtype=ranks.dtype)
10227
- np.put_along_axis(ordered_ranks, j, ranks, axis=-1)
10302
+ xp = array_namespace(ranks) if xp is None else xp
10303
+ if is_numpy(xp) or is_cupy(xp):
10304
+ ordered_ranks = xp.empty(j.shape, dtype=ranks.dtype)
10305
+ xp.put_along_axis(ordered_ranks, j, ranks, axis=-1)
10306
+ else:
10307
+ # `put_along_axis` not in array API (data-apis/array-api#177)
10308
+ # so argsort the argsort and take_along_axis...
10309
+ j_inv = xp.argsort(j, axis=-1)
10310
+ ordered_ranks = xp.take_along_axis(ranks, j_inv, axis=-1)
10228
10311
  return ordered_ranks
10229
10312
 
10230
10313
 
10231
- def _rankdata(x, method, return_ties=False):
10314
+ def _rankdata(x, method, return_ties=False, xp=None):
10232
10315
  # Rank data `x` by desired `method`; `return_ties` if desired
10316
+ xp = array_namespace(x) if xp is None else xp
10233
10317
  shape = x.shape
10318
+ dtype = xp.asarray(1.).dtype if method == 'average' else xp.asarray(1).dtype
10234
10319
 
10235
10320
  # Get sort order
10236
- kind = 'mergesort' if method == 'ordinal' else 'quicksort'
10237
- j = np.argsort(x, axis=-1, kind=kind)
10238
- ordinal_ranks = np.broadcast_to(np.arange(1, shape[-1]+1, dtype=int), shape)
10321
+ j = xp.argsort(x, axis=-1)
10322
+ ordinal_ranks = xp.broadcast_to(xp.arange(1, shape[-1]+1, dtype=dtype), shape)
10239
10323
 
10240
10324
  # Ordinal ranks is very easy because ties don't matter. We're done.
10241
10325
  if method == 'ordinal':
10242
- return _order_ranks(ordinal_ranks, j) # never return ties
10326
+ return _order_ranks(ordinal_ranks, j, xp=xp) # never return ties
10243
10327
 
10244
10328
  # Sort array
10245
- y = np.take_along_axis(x, j, axis=-1)
10329
+ y = xp.take_along_axis(x, j, axis=-1)
10246
10330
  # Logical indices of unique elements
10247
- i = np.concatenate([np.ones(shape[:-1] + (1,), dtype=np.bool_),
10248
- y[..., :-1] != y[..., 1:]], axis=-1)
10331
+ i = xp.concat([xp.ones(shape[:-1] + (1,), dtype=xp.bool),
10332
+ y[..., :-1] != y[..., 1:]], axis=-1)
10249
10333
 
10250
10334
  # Integer indices of unique elements
10251
- indices = np.arange(y.size)[i.ravel()]
10335
+ indices = xp.arange(xp_size(y))[xp.reshape(i, (-1,))] # i gets raveled
10252
10336
  # Counts of unique elements
10253
- counts = np.diff(indices, append=y.size)
10337
+ counts = xp.diff(indices, append=xp.asarray([xp_size(y)], dtype=indices.dtype))
10254
10338
 
10255
10339
  # Compute `'min'`, `'max'`, and `'mid'` ranks of unique elements
10256
10340
  if method == 'min':
@@ -10258,12 +10342,13 @@ def _rankdata(x, method, return_ties=False):
10258
10342
  elif method == 'max':
10259
10343
  ranks = ordinal_ranks[i] + counts - 1
10260
10344
  elif method == 'average':
10261
- ranks = ordinal_ranks[i] + (counts - 1)/2
10345
+ # array API doesn't promote integers to floats
10346
+ ranks = ordinal_ranks[i] + (xp.asarray(counts, dtype=dtype) - 1)/2
10262
10347
  elif method == 'dense':
10263
- ranks = np.cumsum(i, axis=-1)[i]
10348
+ ranks = xp.cumulative_sum(xp.astype(i, dtype, copy=False), axis=-1)[i]
10264
10349
 
10265
- ranks = np.repeat(ranks, counts).reshape(shape)
10266
- ranks = _order_ranks(ranks, j)
10350
+ ranks = xp.reshape(xp.repeat(ranks, counts), shape)
10351
+ ranks = _order_ranks(ranks, j, xp=xp)
10267
10352
 
10268
10353
  if return_ties:
10269
10354
  # Tie information is returned in a format that is useful to functions that
@@ -10282,7 +10367,7 @@ def _rankdata(x, method, return_ties=False):
10282
10367
  # sorted order, so this does not unnecessarily reorder them.
10283
10368
  # - One exception is `wilcoxon`, which needs the number of zeros. Zeros always
10284
10369
  # have the lowest rank, so it is easy to find them at the zeroth index.
10285
- t = np.zeros(shape, dtype=float)
10370
+ t = xp.zeros(shape, dtype=xp.float64)
10286
10371
  t[i] = counts
10287
10372
  return ranks, t
10288
10373
  return ranks
@@ -10380,7 +10465,8 @@ def expectile(a, alpha=0.5, *, weights=None):
10380
10465
  >>> expectile(a, alpha=0.8)
10381
10466
  2.5714285714285716
10382
10467
  >>> weights = [1, 3, 1, 1]
10383
-
10468
+ >>> expectile(a, alpha=0.8, weights=weights)
10469
+ 3.3333333333333335
10384
10470
  """
10385
10471
  if alpha < 0 or alpha > 1:
10386
10472
  raise ValueError(
@@ -10456,7 +10542,7 @@ def _br(x, *, r=0):
10456
10542
  x = np.triu(x)
10457
10543
  j = np.arange(n, dtype=x.dtype)
10458
10544
  n = np.asarray(n, dtype=x.dtype)[()]
10459
- return (np.sum(special.binom(j, r[:, np.newaxis])*x, axis=-1)
10545
+ return (np_vecdot(special.binom(j, r[:, np.newaxis]), x, axis=-1)
10460
10546
  / special.binom(n-1, r) / n)
10461
10547
 
10462
10548
 
@@ -10546,7 +10632,7 @@ def lmoment(sample, order=None, *, axis=0, sorted=False, standardize=True):
10546
10632
  n = sample.shape[-1]
10547
10633
  bk[..., n:] = 0 # remove NaNs due to n_moments > n
10548
10634
 
10549
- lmoms = np.sum(prk * bk, axis=-1)
10635
+ lmoms = np_vecdot(prk, bk, axis=-1)
10550
10636
  if standardize and n_moments > 2:
10551
10637
  lmoms[2:] /= lmoms[1]
10552
10638
 
@@ -10560,24 +10646,26 @@ LinregressResult = _make_tuple_bunch('LinregressResult',
10560
10646
  extra_field_names=['intercept_stderr'])
10561
10647
 
10562
10648
 
10563
- def linregress(x, y=None, alternative='two-sided'):
10649
+ def _pack_LinregressResult(slope, intercept, rvalue, pvalue, stderr, intercept_stderr):
10650
+ return LinregressResult(slope, intercept, rvalue, pvalue, stderr,
10651
+ intercept_stderr=intercept_stderr)
10652
+
10653
+
10654
+ def _unpack_LinregressResult(res, _):
10655
+ return tuple(res) + (res.intercept_stderr,)
10656
+
10657
+
10658
+ @_axis_nan_policy_factory(_pack_LinregressResult, n_samples=2,
10659
+ result_to_tuple=_unpack_LinregressResult, paired=True,
10660
+ too_small=1, n_outputs=6)
10661
+ def linregress(x, y, alternative='two-sided'):
10564
10662
  """
10565
10663
  Calculate a linear least-squares regression for two sets of measurements.
10566
10664
 
10567
10665
  Parameters
10568
10666
  ----------
10569
10667
  x, y : array_like
10570
- Two sets of measurements. Both arrays should have the same length N. If
10571
- only `x` is given (and ``y=None``), then it must be a two-dimensional
10572
- array where one dimension has length 2. The two sets of measurements
10573
- are then found by splitting the array along the length-2 dimension. In
10574
- the case where ``y=None`` and `x` is a 2xN array, ``linregress(x)`` is
10575
- equivalent to ``linregress(x[0], x[1])``.
10576
-
10577
- .. deprecated:: 1.14.0
10578
- Inference of the two sets of measurements from a single argument `x`
10579
- is deprecated will result in an error in SciPy 1.16.0; the sets
10580
- must be specified separately as `x` and `y`.
10668
+ Two sets of measurements. Both arrays should have the same length N.
10581
10669
  alternative : {'two-sided', 'less', 'greater'}, optional
10582
10670
  Defines the alternative hypothesis. Default is 'two-sided'.
10583
10671
  The following options are available:
@@ -10679,24 +10767,8 @@ def linregress(x, y=None, alternative='two-sided'):
10679
10767
 
10680
10768
  """
10681
10769
  TINY = 1.0e-20
10682
- if y is None: # x is a (2, N) or (N, 2) shaped array_like
10683
- message = ('Inference of the two sets of measurements from a single "'
10684
- 'argument `x` is deprecated will result in an error in "'
10685
- 'SciPy 1.16.0; the sets must be specified separately as "'
10686
- '`x` and `y`.')
10687
- warnings.warn(message, DeprecationWarning, stacklevel=2)
10688
- x = np.asarray(x)
10689
- if x.shape[0] == 2:
10690
- x, y = x
10691
- elif x.shape[1] == 2:
10692
- x, y = x.T
10693
- else:
10694
- raise ValueError("If only `x` is given as input, it has to "
10695
- "be of shape (2, N) or (N, 2); provided shape "
10696
- f"was {x.shape}.")
10697
- else:
10698
- x = np.asarray(x)
10699
- y = np.asarray(y)
10770
+ x = np.asarray(x)
10771
+ y = np.asarray(y)
10700
10772
 
10701
10773
  if x.size == 0 or y.size == 0:
10702
10774
  raise ValueError("Inputs must not be empty.")
@@ -10718,7 +10790,7 @@ def linregress(x, y=None, alternative='two-sided'):
10718
10790
  # r = ssxym / sqrt( ssxm * ssym )
10719
10791
  if ssxm == 0.0 or ssym == 0.0:
10720
10792
  # If the denominator was going to be 0
10721
- r = 0.0
10793
+ r = np.asarray(np.nan if ssxym == 0 else 0.0)[()]
10722
10794
  else:
10723
10795
  r = ssxym / np.sqrt(ssxm * ssym)
10724
10796
  # Test for numerical error propagation (make sure -1 < r < 1)
@@ -10844,7 +10916,9 @@ def _xp_mean(x, /, *, axis=None, weights=None, keepdims=False, nan_policy='propa
10844
10916
  or (weights is not None and xp_size(weights) == 0)):
10845
10917
  return gmean(x, weights=weights, axis=axis, keepdims=keepdims)
10846
10918
 
10847
- x, weights = xp_broadcast_promote(x, weights, force_floating=True)
10919
+ x, weights = xp_promote(x, weights, broadcast=True, force_floating=True, xp=xp)
10920
+ if weights is not None:
10921
+ x, weights = _share_masks(x, weights, xp=xp)
10848
10922
 
10849
10923
  # handle the special case of zero-sized arrays
10850
10924
  message = (too_small_1d_not_omit if (x.ndim == 1 or axis is None)
@@ -10857,29 +10931,33 @@ def _xp_mean(x, /, *, axis=None, weights=None, keepdims=False, nan_policy='propa
10857
10931
  warnings.warn(message, SmallSampleWarning, stacklevel=2)
10858
10932
  return res
10859
10933
 
10860
- contains_nan, _ = _contains_nan(x, nan_policy, xp_omit_okay=True, xp=xp)
10934
+ contains_nan = _contains_nan(x, nan_policy, xp_omit_okay=True, xp=xp)
10861
10935
  if weights is not None:
10862
- contains_nan_w, _ = _contains_nan(weights, nan_policy, xp_omit_okay=True, xp=xp)
10936
+ contains_nan_w = _contains_nan(weights, nan_policy, xp_omit_okay=True, xp=xp)
10863
10937
  contains_nan = contains_nan | contains_nan_w
10864
10938
 
10865
10939
  # Handle `nan_policy='omit'` by giving zero weight to NaNs, whether they
10866
10940
  # appear in `x` or `weights`. Emit warning if there is an all-NaN slice.
10867
- message = (too_small_1d_omit if (x.ndim == 1 or axis is None)
10868
- else too_small_nd_omit)
10869
- if contains_nan and nan_policy == 'omit':
10941
+ # Test nan_policy before the implicit call to bool(contains_nan)
10942
+ # to avoid raising on lazy xps on the default nan_policy='propagate'
10943
+ lazy = is_lazy_array(x)
10944
+ if nan_policy == 'omit' and (lazy or contains_nan):
10870
10945
  nan_mask = xp.isnan(x)
10871
10946
  if weights is not None:
10872
10947
  nan_mask |= xp.isnan(weights)
10873
- if xp.any(xp.all(nan_mask, axis=axis)):
10948
+ if not lazy and xp.any(xp.all(nan_mask, axis=axis)):
10949
+ message = (too_small_1d_omit if (x.ndim == 1 or axis is None)
10950
+ else too_small_nd_omit)
10874
10951
  warnings.warn(message, SmallSampleWarning, stacklevel=2)
10875
10952
  weights = xp.ones_like(x) if weights is None else weights
10876
- x = xp.where(nan_mask, xp.asarray(0, dtype=x.dtype), x)
10877
- weights = xp.where(nan_mask, xp.asarray(0, dtype=x.dtype), weights)
10953
+ x = xp.where(nan_mask, 0., x)
10954
+ weights = xp.where(nan_mask, 0., weights)
10878
10955
 
10879
10956
  # Perform the mean calculation itself
10880
10957
  if weights is None:
10881
10958
  return xp.mean(x, axis=axis, keepdims=keepdims)
10882
10959
 
10960
+ # consider using `vecdot` if `axis` tuple support is added (data-apis/array-api#910)
10883
10961
  norm = xp.sum(weights, axis=axis)
10884
10962
  wsum = xp.sum(x * weights, axis=axis)
10885
10963
  with np.errstate(divide='ignore', invalid='ignore'):
@@ -10924,12 +11002,7 @@ def _xp_var(x, /, *, axis=None, correction=0, keepdims=False, nan_policy='propag
10924
11002
  var = _xp_mean(x_mean * x_mean_conj, keepdims=keepdims, **kwargs)
10925
11003
 
10926
11004
  if correction != 0:
10927
- if axis is None:
10928
- n = xp_size(x)
10929
- elif np.iterable(axis): # note: using NumPy on `axis` is OK
10930
- n = math.prod(x.shape[i] for i in axis)
10931
- else:
10932
- n = x.shape[axis]
11005
+ n = _length_nonmasked(x, axis, xp=xp)
10933
11006
  # Or two lines with ternaries : )
10934
11007
  # axis = range(x.ndim) if axis is None else axis
10935
11008
  # n = math.prod(x.shape[i] for i in axis) if iterable(axis) else x.shape[axis]
@@ -10941,7 +11014,8 @@ def _xp_var(x, /, *, axis=None, correction=0, keepdims=False, nan_policy='propag
10941
11014
  n = n - xp.sum(nan_mask, axis=axis, keepdims=keepdims)
10942
11015
 
10943
11016
  # Produce NaNs silently when n - correction <= 0
10944
- factor = _lazywhere(n-correction > 0, (n, n-correction), xp.divide, xp.nan)
11017
+ nc = n - correction
11018
+ factor = xpx.apply_where(nc > 0, (n, nc), operator.truediv, fill_value=xp.nan)
10945
11019
  var *= factor
10946
11020
 
10947
11021
  return var[()] if var.ndim == 0 else var