scipy 1.16.2__cp313-cp313t-win_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scipy/__config__.py +161 -0
- scipy/__init__.py +150 -0
- scipy/_cyutility.cp313t-win_arm64.lib +0 -0
- scipy/_cyutility.cp313t-win_arm64.pyd +0 -0
- scipy/_distributor_init.py +18 -0
- scipy/_lib/__init__.py +14 -0
- scipy/_lib/_array_api.py +931 -0
- scipy/_lib/_array_api_compat_vendor.py +9 -0
- scipy/_lib/_array_api_no_0d.py +103 -0
- scipy/_lib/_bunch.py +229 -0
- scipy/_lib/_ccallback.py +251 -0
- scipy/_lib/_ccallback_c.cp313t-win_arm64.lib +0 -0
- scipy/_lib/_ccallback_c.cp313t-win_arm64.pyd +0 -0
- scipy/_lib/_disjoint_set.py +254 -0
- scipy/_lib/_docscrape.py +761 -0
- scipy/_lib/_elementwise_iterative_method.py +346 -0
- scipy/_lib/_fpumode.cp313t-win_arm64.lib +0 -0
- scipy/_lib/_fpumode.cp313t-win_arm64.pyd +0 -0
- scipy/_lib/_gcutils.py +105 -0
- scipy/_lib/_pep440.py +487 -0
- scipy/_lib/_sparse.py +41 -0
- scipy/_lib/_test_ccallback.cp313t-win_arm64.lib +0 -0
- scipy/_lib/_test_ccallback.cp313t-win_arm64.pyd +0 -0
- scipy/_lib/_test_deprecation_call.cp313t-win_arm64.lib +0 -0
- scipy/_lib/_test_deprecation_call.cp313t-win_arm64.pyd +0 -0
- scipy/_lib/_test_deprecation_def.cp313t-win_arm64.lib +0 -0
- scipy/_lib/_test_deprecation_def.cp313t-win_arm64.pyd +0 -0
- scipy/_lib/_testutils.py +373 -0
- scipy/_lib/_threadsafety.py +58 -0
- scipy/_lib/_tmpdirs.py +86 -0
- scipy/_lib/_uarray/LICENSE +29 -0
- scipy/_lib/_uarray/__init__.py +116 -0
- scipy/_lib/_uarray/_backend.py +707 -0
- scipy/_lib/_uarray/_uarray.cp313t-win_arm64.lib +0 -0
- scipy/_lib/_uarray/_uarray.cp313t-win_arm64.pyd +0 -0
- scipy/_lib/_util.py +1283 -0
- scipy/_lib/array_api_compat/__init__.py +22 -0
- scipy/_lib/array_api_compat/_internal.py +59 -0
- scipy/_lib/array_api_compat/common/__init__.py +1 -0
- scipy/_lib/array_api_compat/common/_aliases.py +727 -0
- scipy/_lib/array_api_compat/common/_fft.py +213 -0
- scipy/_lib/array_api_compat/common/_helpers.py +1058 -0
- scipy/_lib/array_api_compat/common/_linalg.py +232 -0
- scipy/_lib/array_api_compat/common/_typing.py +192 -0
- scipy/_lib/array_api_compat/cupy/__init__.py +13 -0
- scipy/_lib/array_api_compat/cupy/_aliases.py +156 -0
- scipy/_lib/array_api_compat/cupy/_info.py +336 -0
- scipy/_lib/array_api_compat/cupy/_typing.py +31 -0
- scipy/_lib/array_api_compat/cupy/fft.py +36 -0
- scipy/_lib/array_api_compat/cupy/linalg.py +49 -0
- scipy/_lib/array_api_compat/dask/__init__.py +0 -0
- scipy/_lib/array_api_compat/dask/array/__init__.py +12 -0
- scipy/_lib/array_api_compat/dask/array/_aliases.py +376 -0
- scipy/_lib/array_api_compat/dask/array/_info.py +416 -0
- scipy/_lib/array_api_compat/dask/array/fft.py +21 -0
- scipy/_lib/array_api_compat/dask/array/linalg.py +72 -0
- scipy/_lib/array_api_compat/numpy/__init__.py +28 -0
- scipy/_lib/array_api_compat/numpy/_aliases.py +190 -0
- scipy/_lib/array_api_compat/numpy/_info.py +366 -0
- scipy/_lib/array_api_compat/numpy/_typing.py +30 -0
- scipy/_lib/array_api_compat/numpy/fft.py +35 -0
- scipy/_lib/array_api_compat/numpy/linalg.py +143 -0
- scipy/_lib/array_api_compat/torch/__init__.py +22 -0
- scipy/_lib/array_api_compat/torch/_aliases.py +855 -0
- scipy/_lib/array_api_compat/torch/_info.py +369 -0
- scipy/_lib/array_api_compat/torch/_typing.py +3 -0
- scipy/_lib/array_api_compat/torch/fft.py +85 -0
- scipy/_lib/array_api_compat/torch/linalg.py +121 -0
- scipy/_lib/array_api_extra/__init__.py +38 -0
- scipy/_lib/array_api_extra/_delegation.py +171 -0
- scipy/_lib/array_api_extra/_lib/__init__.py +1 -0
- scipy/_lib/array_api_extra/_lib/_at.py +463 -0
- scipy/_lib/array_api_extra/_lib/_backends.py +46 -0
- scipy/_lib/array_api_extra/_lib/_funcs.py +937 -0
- scipy/_lib/array_api_extra/_lib/_lazy.py +357 -0
- scipy/_lib/array_api_extra/_lib/_testing.py +278 -0
- scipy/_lib/array_api_extra/_lib/_utils/__init__.py +1 -0
- scipy/_lib/array_api_extra/_lib/_utils/_compat.py +74 -0
- scipy/_lib/array_api_extra/_lib/_utils/_compat.pyi +45 -0
- scipy/_lib/array_api_extra/_lib/_utils/_helpers.py +559 -0
- scipy/_lib/array_api_extra/_lib/_utils/_typing.py +10 -0
- scipy/_lib/array_api_extra/_lib/_utils/_typing.pyi +105 -0
- scipy/_lib/array_api_extra/testing.py +359 -0
- scipy/_lib/cobyqa/__init__.py +20 -0
- scipy/_lib/cobyqa/framework.py +1240 -0
- scipy/_lib/cobyqa/main.py +1506 -0
- scipy/_lib/cobyqa/models.py +1529 -0
- scipy/_lib/cobyqa/problem.py +1296 -0
- scipy/_lib/cobyqa/settings.py +132 -0
- scipy/_lib/cobyqa/subsolvers/__init__.py +14 -0
- scipy/_lib/cobyqa/subsolvers/geometry.py +387 -0
- scipy/_lib/cobyqa/subsolvers/optim.py +1203 -0
- scipy/_lib/cobyqa/utils/__init__.py +18 -0
- scipy/_lib/cobyqa/utils/exceptions.py +22 -0
- scipy/_lib/cobyqa/utils/math.py +77 -0
- scipy/_lib/cobyqa/utils/versions.py +67 -0
- scipy/_lib/decorator.py +399 -0
- scipy/_lib/deprecation.py +274 -0
- scipy/_lib/doccer.py +366 -0
- scipy/_lib/messagestream.cp313t-win_arm64.lib +0 -0
- scipy/_lib/messagestream.cp313t-win_arm64.pyd +0 -0
- scipy/_lib/pyprima/__init__.py +212 -0
- scipy/_lib/pyprima/cobyla/__init__.py +0 -0
- scipy/_lib/pyprima/cobyla/cobyla.py +559 -0
- scipy/_lib/pyprima/cobyla/cobylb.py +714 -0
- scipy/_lib/pyprima/cobyla/geometry.py +226 -0
- scipy/_lib/pyprima/cobyla/initialize.py +215 -0
- scipy/_lib/pyprima/cobyla/trustregion.py +492 -0
- scipy/_lib/pyprima/cobyla/update.py +289 -0
- scipy/_lib/pyprima/common/__init__.py +0 -0
- scipy/_lib/pyprima/common/_bounds.py +34 -0
- scipy/_lib/pyprima/common/_linear_constraints.py +46 -0
- scipy/_lib/pyprima/common/_nonlinear_constraints.py +54 -0
- scipy/_lib/pyprima/common/_project.py +173 -0
- scipy/_lib/pyprima/common/checkbreak.py +93 -0
- scipy/_lib/pyprima/common/consts.py +47 -0
- scipy/_lib/pyprima/common/evaluate.py +99 -0
- scipy/_lib/pyprima/common/history.py +38 -0
- scipy/_lib/pyprima/common/infos.py +30 -0
- scipy/_lib/pyprima/common/linalg.py +435 -0
- scipy/_lib/pyprima/common/message.py +290 -0
- scipy/_lib/pyprima/common/powalg.py +131 -0
- scipy/_lib/pyprima/common/preproc.py +277 -0
- scipy/_lib/pyprima/common/present.py +5 -0
- scipy/_lib/pyprima/common/ratio.py +54 -0
- scipy/_lib/pyprima/common/redrho.py +47 -0
- scipy/_lib/pyprima/common/selectx.py +296 -0
- scipy/_lib/tests/__init__.py +0 -0
- scipy/_lib/tests/test__gcutils.py +110 -0
- scipy/_lib/tests/test__pep440.py +67 -0
- scipy/_lib/tests/test__testutils.py +32 -0
- scipy/_lib/tests/test__threadsafety.py +51 -0
- scipy/_lib/tests/test__util.py +641 -0
- scipy/_lib/tests/test_array_api.py +322 -0
- scipy/_lib/tests/test_bunch.py +169 -0
- scipy/_lib/tests/test_ccallback.py +196 -0
- scipy/_lib/tests/test_config.py +45 -0
- scipy/_lib/tests/test_deprecation.py +10 -0
- scipy/_lib/tests/test_doccer.py +143 -0
- scipy/_lib/tests/test_import_cycles.py +18 -0
- scipy/_lib/tests/test_public_api.py +482 -0
- scipy/_lib/tests/test_scipy_version.py +28 -0
- scipy/_lib/tests/test_tmpdirs.py +48 -0
- scipy/_lib/tests/test_warnings.py +137 -0
- scipy/_lib/uarray.py +31 -0
- scipy/cluster/__init__.py +31 -0
- scipy/cluster/_hierarchy.cp313t-win_arm64.lib +0 -0
- scipy/cluster/_hierarchy.cp313t-win_arm64.pyd +0 -0
- scipy/cluster/_optimal_leaf_ordering.cp313t-win_arm64.lib +0 -0
- scipy/cluster/_optimal_leaf_ordering.cp313t-win_arm64.pyd +0 -0
- scipy/cluster/_vq.cp313t-win_arm64.lib +0 -0
- scipy/cluster/_vq.cp313t-win_arm64.pyd +0 -0
- scipy/cluster/hierarchy.py +4348 -0
- scipy/cluster/tests/__init__.py +0 -0
- scipy/cluster/tests/hierarchy_test_data.py +145 -0
- scipy/cluster/tests/test_disjoint_set.py +202 -0
- scipy/cluster/tests/test_hierarchy.py +1238 -0
- scipy/cluster/tests/test_vq.py +434 -0
- scipy/cluster/vq.py +832 -0
- scipy/conftest.py +683 -0
- scipy/constants/__init__.py +358 -0
- scipy/constants/_codata.py +2266 -0
- scipy/constants/_constants.py +369 -0
- scipy/constants/codata.py +21 -0
- scipy/constants/constants.py +53 -0
- scipy/constants/tests/__init__.py +0 -0
- scipy/constants/tests/test_codata.py +78 -0
- scipy/constants/tests/test_constants.py +83 -0
- scipy/datasets/__init__.py +90 -0
- scipy/datasets/_download_all.py +71 -0
- scipy/datasets/_fetchers.py +225 -0
- scipy/datasets/_registry.py +26 -0
- scipy/datasets/_utils.py +81 -0
- scipy/datasets/tests/__init__.py +0 -0
- scipy/datasets/tests/test_data.py +128 -0
- scipy/differentiate/__init__.py +27 -0
- scipy/differentiate/_differentiate.py +1129 -0
- scipy/differentiate/tests/__init__.py +0 -0
- scipy/differentiate/tests/test_differentiate.py +694 -0
- scipy/fft/__init__.py +114 -0
- scipy/fft/_backend.py +196 -0
- scipy/fft/_basic.py +1650 -0
- scipy/fft/_basic_backend.py +197 -0
- scipy/fft/_debug_backends.py +22 -0
- scipy/fft/_fftlog.py +223 -0
- scipy/fft/_fftlog_backend.py +200 -0
- scipy/fft/_helper.py +348 -0
- scipy/fft/_pocketfft/LICENSE.md +25 -0
- scipy/fft/_pocketfft/__init__.py +9 -0
- scipy/fft/_pocketfft/basic.py +251 -0
- scipy/fft/_pocketfft/helper.py +249 -0
- scipy/fft/_pocketfft/pypocketfft.cp313t-win_arm64.lib +0 -0
- scipy/fft/_pocketfft/pypocketfft.cp313t-win_arm64.pyd +0 -0
- scipy/fft/_pocketfft/realtransforms.py +109 -0
- scipy/fft/_pocketfft/tests/__init__.py +0 -0
- scipy/fft/_pocketfft/tests/test_basic.py +1011 -0
- scipy/fft/_pocketfft/tests/test_real_transforms.py +505 -0
- scipy/fft/_realtransforms.py +706 -0
- scipy/fft/_realtransforms_backend.py +63 -0
- scipy/fft/tests/__init__.py +0 -0
- scipy/fft/tests/mock_backend.py +96 -0
- scipy/fft/tests/test_backend.py +98 -0
- scipy/fft/tests/test_basic.py +504 -0
- scipy/fft/tests/test_fftlog.py +215 -0
- scipy/fft/tests/test_helper.py +558 -0
- scipy/fft/tests/test_multithreading.py +84 -0
- scipy/fft/tests/test_real_transforms.py +247 -0
- scipy/fftpack/__init__.py +103 -0
- scipy/fftpack/_basic.py +428 -0
- scipy/fftpack/_helper.py +115 -0
- scipy/fftpack/_pseudo_diffs.py +554 -0
- scipy/fftpack/_realtransforms.py +598 -0
- scipy/fftpack/basic.py +20 -0
- scipy/fftpack/convolve.cp313t-win_arm64.lib +0 -0
- scipy/fftpack/convolve.cp313t-win_arm64.pyd +0 -0
- scipy/fftpack/helper.py +19 -0
- scipy/fftpack/pseudo_diffs.py +22 -0
- scipy/fftpack/realtransforms.py +19 -0
- scipy/fftpack/tests/__init__.py +0 -0
- scipy/fftpack/tests/fftw_double_ref.npz +0 -0
- scipy/fftpack/tests/fftw_longdouble_ref.npz +0 -0
- scipy/fftpack/tests/fftw_single_ref.npz +0 -0
- scipy/fftpack/tests/test.npz +0 -0
- scipy/fftpack/tests/test_basic.py +877 -0
- scipy/fftpack/tests/test_helper.py +54 -0
- scipy/fftpack/tests/test_import.py +33 -0
- scipy/fftpack/tests/test_pseudo_diffs.py +388 -0
- scipy/fftpack/tests/test_real_transforms.py +836 -0
- scipy/integrate/__init__.py +122 -0
- scipy/integrate/_bvp.py +1160 -0
- scipy/integrate/_cubature.py +729 -0
- scipy/integrate/_dop.cp313t-win_arm64.lib +0 -0
- scipy/integrate/_dop.cp313t-win_arm64.pyd +0 -0
- scipy/integrate/_ivp/__init__.py +8 -0
- scipy/integrate/_ivp/base.py +290 -0
- scipy/integrate/_ivp/bdf.py +478 -0
- scipy/integrate/_ivp/common.py +451 -0
- scipy/integrate/_ivp/dop853_coefficients.py +193 -0
- scipy/integrate/_ivp/ivp.py +755 -0
- scipy/integrate/_ivp/lsoda.py +224 -0
- scipy/integrate/_ivp/radau.py +572 -0
- scipy/integrate/_ivp/rk.py +601 -0
- scipy/integrate/_ivp/tests/__init__.py +0 -0
- scipy/integrate/_ivp/tests/test_ivp.py +1287 -0
- scipy/integrate/_ivp/tests/test_rk.py +37 -0
- scipy/integrate/_lebedev.py +5450 -0
- scipy/integrate/_lsoda.cp313t-win_arm64.lib +0 -0
- scipy/integrate/_lsoda.cp313t-win_arm64.pyd +0 -0
- scipy/integrate/_ode.py +1395 -0
- scipy/integrate/_odepack.cp313t-win_arm64.lib +0 -0
- scipy/integrate/_odepack.cp313t-win_arm64.pyd +0 -0
- scipy/integrate/_odepack_py.py +273 -0
- scipy/integrate/_quad_vec.py +674 -0
- scipy/integrate/_quadpack.cp313t-win_arm64.lib +0 -0
- scipy/integrate/_quadpack.cp313t-win_arm64.pyd +0 -0
- scipy/integrate/_quadpack_py.py +1283 -0
- scipy/integrate/_quadrature.py +1336 -0
- scipy/integrate/_rules/__init__.py +12 -0
- scipy/integrate/_rules/_base.py +518 -0
- scipy/integrate/_rules/_gauss_kronrod.py +202 -0
- scipy/integrate/_rules/_gauss_legendre.py +62 -0
- scipy/integrate/_rules/_genz_malik.py +210 -0
- scipy/integrate/_tanhsinh.py +1385 -0
- scipy/integrate/_test_multivariate.cp313t-win_arm64.lib +0 -0
- scipy/integrate/_test_multivariate.cp313t-win_arm64.pyd +0 -0
- scipy/integrate/_test_odeint_banded.cp313t-win_arm64.lib +0 -0
- scipy/integrate/_test_odeint_banded.cp313t-win_arm64.pyd +0 -0
- scipy/integrate/_vode.cp313t-win_arm64.lib +0 -0
- scipy/integrate/_vode.cp313t-win_arm64.pyd +0 -0
- scipy/integrate/dop.py +15 -0
- scipy/integrate/lsoda.py +15 -0
- scipy/integrate/odepack.py +17 -0
- scipy/integrate/quadpack.py +23 -0
- scipy/integrate/tests/__init__.py +0 -0
- scipy/integrate/tests/test__quad_vec.py +211 -0
- scipy/integrate/tests/test_banded_ode_solvers.py +305 -0
- scipy/integrate/tests/test_bvp.py +714 -0
- scipy/integrate/tests/test_cubature.py +1375 -0
- scipy/integrate/tests/test_integrate.py +840 -0
- scipy/integrate/tests/test_odeint_jac.py +74 -0
- scipy/integrate/tests/test_quadpack.py +680 -0
- scipy/integrate/tests/test_quadrature.py +730 -0
- scipy/integrate/tests/test_tanhsinh.py +1171 -0
- scipy/integrate/vode.py +15 -0
- scipy/interpolate/__init__.py +228 -0
- scipy/interpolate/_bary_rational.py +715 -0
- scipy/interpolate/_bsplines.py +2469 -0
- scipy/interpolate/_cubic.py +973 -0
- scipy/interpolate/_dfitpack.cp313t-win_arm64.lib +0 -0
- scipy/interpolate/_dfitpack.cp313t-win_arm64.pyd +0 -0
- scipy/interpolate/_dierckx.cp313t-win_arm64.lib +0 -0
- scipy/interpolate/_dierckx.cp313t-win_arm64.pyd +0 -0
- scipy/interpolate/_fitpack.cp313t-win_arm64.lib +0 -0
- scipy/interpolate/_fitpack.cp313t-win_arm64.pyd +0 -0
- scipy/interpolate/_fitpack2.py +2397 -0
- scipy/interpolate/_fitpack_impl.py +811 -0
- scipy/interpolate/_fitpack_py.py +898 -0
- scipy/interpolate/_fitpack_repro.py +996 -0
- scipy/interpolate/_interpnd.cp313t-win_arm64.lib +0 -0
- scipy/interpolate/_interpnd.cp313t-win_arm64.pyd +0 -0
- scipy/interpolate/_interpolate.py +2266 -0
- scipy/interpolate/_ndbspline.py +415 -0
- scipy/interpolate/_ndgriddata.py +329 -0
- scipy/interpolate/_pade.py +67 -0
- scipy/interpolate/_polyint.py +1025 -0
- scipy/interpolate/_ppoly.cp313t-win_arm64.lib +0 -0
- scipy/interpolate/_ppoly.cp313t-win_arm64.pyd +0 -0
- scipy/interpolate/_rbf.py +290 -0
- scipy/interpolate/_rbfinterp.py +550 -0
- scipy/interpolate/_rbfinterp_pythran.cp313t-win_arm64.lib +0 -0
- scipy/interpolate/_rbfinterp_pythran.cp313t-win_arm64.pyd +0 -0
- scipy/interpolate/_rgi.py +764 -0
- scipy/interpolate/_rgi_cython.cp313t-win_arm64.lib +0 -0
- scipy/interpolate/_rgi_cython.cp313t-win_arm64.pyd +0 -0
- scipy/interpolate/dfitpack.py +24 -0
- scipy/interpolate/fitpack.py +31 -0
- scipy/interpolate/fitpack2.py +29 -0
- scipy/interpolate/interpnd.py +24 -0
- scipy/interpolate/interpolate.py +30 -0
- scipy/interpolate/ndgriddata.py +23 -0
- scipy/interpolate/polyint.py +24 -0
- scipy/interpolate/rbf.py +18 -0
- scipy/interpolate/tests/__init__.py +0 -0
- scipy/interpolate/tests/data/bug-1310.npz +0 -0
- scipy/interpolate/tests/data/estimate_gradients_hang.npy +0 -0
- scipy/interpolate/tests/data/gcvspl.npz +0 -0
- scipy/interpolate/tests/test_bary_rational.py +368 -0
- scipy/interpolate/tests/test_bsplines.py +3754 -0
- scipy/interpolate/tests/test_fitpack.py +519 -0
- scipy/interpolate/tests/test_fitpack2.py +1431 -0
- scipy/interpolate/tests/test_gil.py +64 -0
- scipy/interpolate/tests/test_interpnd.py +452 -0
- scipy/interpolate/tests/test_interpolate.py +2630 -0
- scipy/interpolate/tests/test_ndgriddata.py +308 -0
- scipy/interpolate/tests/test_pade.py +107 -0
- scipy/interpolate/tests/test_polyint.py +972 -0
- scipy/interpolate/tests/test_rbf.py +246 -0
- scipy/interpolate/tests/test_rbfinterp.py +534 -0
- scipy/interpolate/tests/test_rgi.py +1151 -0
- scipy/io/__init__.py +116 -0
- scipy/io/_fast_matrix_market/__init__.py +600 -0
- scipy/io/_fast_matrix_market/_fmm_core.cp313t-win_arm64.lib +0 -0
- scipy/io/_fast_matrix_market/_fmm_core.cp313t-win_arm64.pyd +0 -0
- scipy/io/_fortran.py +354 -0
- scipy/io/_harwell_boeing/__init__.py +7 -0
- scipy/io/_harwell_boeing/_fortran_format_parser.py +316 -0
- scipy/io/_harwell_boeing/hb.py +571 -0
- scipy/io/_harwell_boeing/tests/__init__.py +0 -0
- scipy/io/_harwell_boeing/tests/test_fortran_format.py +74 -0
- scipy/io/_harwell_boeing/tests/test_hb.py +70 -0
- scipy/io/_idl.py +917 -0
- scipy/io/_mmio.py +968 -0
- scipy/io/_netcdf.py +1104 -0
- scipy/io/_test_fortran.cp313t-win_arm64.lib +0 -0
- scipy/io/_test_fortran.cp313t-win_arm64.pyd +0 -0
- scipy/io/arff/__init__.py +28 -0
- scipy/io/arff/_arffread.py +873 -0
- scipy/io/arff/arffread.py +19 -0
- scipy/io/arff/tests/__init__.py +0 -0
- scipy/io/arff/tests/data/iris.arff +225 -0
- scipy/io/arff/tests/data/missing.arff +8 -0
- scipy/io/arff/tests/data/nodata.arff +11 -0
- scipy/io/arff/tests/data/quoted_nominal.arff +13 -0
- scipy/io/arff/tests/data/quoted_nominal_spaces.arff +13 -0
- scipy/io/arff/tests/data/test1.arff +10 -0
- scipy/io/arff/tests/data/test10.arff +8 -0
- scipy/io/arff/tests/data/test11.arff +11 -0
- scipy/io/arff/tests/data/test2.arff +15 -0
- scipy/io/arff/tests/data/test3.arff +6 -0
- scipy/io/arff/tests/data/test4.arff +11 -0
- scipy/io/arff/tests/data/test5.arff +26 -0
- scipy/io/arff/tests/data/test6.arff +12 -0
- scipy/io/arff/tests/data/test7.arff +15 -0
- scipy/io/arff/tests/data/test8.arff +12 -0
- scipy/io/arff/tests/data/test9.arff +14 -0
- scipy/io/arff/tests/test_arffread.py +421 -0
- scipy/io/harwell_boeing.py +17 -0
- scipy/io/idl.py +17 -0
- scipy/io/matlab/__init__.py +66 -0
- scipy/io/matlab/_byteordercodes.py +75 -0
- scipy/io/matlab/_mio.py +375 -0
- scipy/io/matlab/_mio4.py +632 -0
- scipy/io/matlab/_mio5.py +901 -0
- scipy/io/matlab/_mio5_params.py +281 -0
- scipy/io/matlab/_mio5_utils.cp313t-win_arm64.lib +0 -0
- scipy/io/matlab/_mio5_utils.cp313t-win_arm64.pyd +0 -0
- scipy/io/matlab/_mio_utils.cp313t-win_arm64.lib +0 -0
- scipy/io/matlab/_mio_utils.cp313t-win_arm64.pyd +0 -0
- scipy/io/matlab/_miobase.py +435 -0
- scipy/io/matlab/_streams.cp313t-win_arm64.lib +0 -0
- scipy/io/matlab/_streams.cp313t-win_arm64.pyd +0 -0
- scipy/io/matlab/byteordercodes.py +17 -0
- scipy/io/matlab/mio.py +16 -0
- scipy/io/matlab/mio4.py +17 -0
- scipy/io/matlab/mio5.py +19 -0
- scipy/io/matlab/mio5_params.py +18 -0
- scipy/io/matlab/mio5_utils.py +17 -0
- scipy/io/matlab/mio_utils.py +17 -0
- scipy/io/matlab/miobase.py +16 -0
- scipy/io/matlab/streams.py +16 -0
- scipy/io/matlab/tests/__init__.py +0 -0
- scipy/io/matlab/tests/data/bad_miuint32.mat +0 -0
- scipy/io/matlab/tests/data/bad_miutf8_array_name.mat +0 -0
- scipy/io/matlab/tests/data/big_endian.mat +0 -0
- scipy/io/matlab/tests/data/broken_utf8.mat +0 -0
- scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat +0 -0
- scipy/io/matlab/tests/data/corrupted_zlib_data.mat +0 -0
- scipy/io/matlab/tests/data/debigged_m4.mat +0 -0
- scipy/io/matlab/tests/data/japanese_utf8.txt +5 -0
- scipy/io/matlab/tests/data/little_endian.mat +0 -0
- scipy/io/matlab/tests/data/logical_sparse.mat +0 -0
- scipy/io/matlab/tests/data/malformed1.mat +0 -0
- scipy/io/matlab/tests/data/miuint32_for_miint32.mat +0 -0
- scipy/io/matlab/tests/data/miutf8_array_name.mat +0 -0
- scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat +0 -0
- scipy/io/matlab/tests/data/one_by_zero_char.mat +0 -0
- scipy/io/matlab/tests/data/parabola.mat +0 -0
- scipy/io/matlab/tests/data/single_empty_string.mat +0 -0
- scipy/io/matlab/tests/data/some_functions.mat +0 -0
- scipy/io/matlab/tests/data/sqr.mat +0 -0
- scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/test_empty_struct.mat +0 -0
- scipy/io/matlab/tests/data/test_mat4_le_floats.mat +0 -0
- scipy/io/matlab/tests/data/test_skip_variable.mat +0 -0
- scipy/io/matlab/tests/data/testbool_8_WIN64.mat +0 -0
- scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsimplecell.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testvec_4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/test_byteordercodes.py +29 -0
- scipy/io/matlab/tests/test_mio.py +1399 -0
- scipy/io/matlab/tests/test_mio5_utils.py +179 -0
- scipy/io/matlab/tests/test_mio_funcs.py +51 -0
- scipy/io/matlab/tests/test_mio_utils.py +45 -0
- scipy/io/matlab/tests/test_miobase.py +32 -0
- scipy/io/matlab/tests/test_pathological.py +33 -0
- scipy/io/matlab/tests/test_streams.py +241 -0
- scipy/io/mmio.py +17 -0
- scipy/io/netcdf.py +17 -0
- scipy/io/tests/__init__.py +0 -0
- scipy/io/tests/data/Transparent Busy.ani +0 -0
- scipy/io/tests/data/array_float32_1d.sav +0 -0
- scipy/io/tests/data/array_float32_2d.sav +0 -0
- scipy/io/tests/data/array_float32_3d.sav +0 -0
- scipy/io/tests/data/array_float32_4d.sav +0 -0
- scipy/io/tests/data/array_float32_5d.sav +0 -0
- scipy/io/tests/data/array_float32_6d.sav +0 -0
- scipy/io/tests/data/array_float32_7d.sav +0 -0
- scipy/io/tests/data/array_float32_8d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_1d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_2d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_3d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_4d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_5d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_6d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_7d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_8d.sav +0 -0
- scipy/io/tests/data/example_1.nc +0 -0
- scipy/io/tests/data/example_2.nc +0 -0
- scipy/io/tests/data/example_3_maskedvals.nc +0 -0
- scipy/io/tests/data/fortran-3x3d-2i.dat +0 -0
- scipy/io/tests/data/fortran-mixed.dat +0 -0
- scipy/io/tests/data/fortran-sf8-11x1x10.dat +0 -0
- scipy/io/tests/data/fortran-sf8-15x10x22.dat +0 -0
- scipy/io/tests/data/fortran-sf8-1x1x1.dat +0 -0
- scipy/io/tests/data/fortran-sf8-1x1x5.dat +0 -0
- scipy/io/tests/data/fortran-sf8-1x1x7.dat +0 -0
- scipy/io/tests/data/fortran-sf8-1x3x5.dat +0 -0
- scipy/io/tests/data/fortran-si4-11x1x10.dat +0 -0
- scipy/io/tests/data/fortran-si4-15x10x22.dat +0 -0
- scipy/io/tests/data/fortran-si4-1x1x1.dat +0 -0
- scipy/io/tests/data/fortran-si4-1x1x5.dat +0 -0
- scipy/io/tests/data/fortran-si4-1x1x7.dat +0 -0
- scipy/io/tests/data/fortran-si4-1x3x5.dat +0 -0
- scipy/io/tests/data/invalid_pointer.sav +0 -0
- scipy/io/tests/data/null_pointer.sav +0 -0
- scipy/io/tests/data/scalar_byte.sav +0 -0
- scipy/io/tests/data/scalar_byte_descr.sav +0 -0
- scipy/io/tests/data/scalar_complex32.sav +0 -0
- scipy/io/tests/data/scalar_complex64.sav +0 -0
- scipy/io/tests/data/scalar_float32.sav +0 -0
- scipy/io/tests/data/scalar_float64.sav +0 -0
- scipy/io/tests/data/scalar_heap_pointer.sav +0 -0
- scipy/io/tests/data/scalar_int16.sav +0 -0
- scipy/io/tests/data/scalar_int32.sav +0 -0
- scipy/io/tests/data/scalar_int64.sav +0 -0
- scipy/io/tests/data/scalar_string.sav +0 -0
- scipy/io/tests/data/scalar_uint16.sav +0 -0
- scipy/io/tests/data/scalar_uint32.sav +0 -0
- scipy/io/tests/data/scalar_uint64.sav +0 -0
- scipy/io/tests/data/struct_arrays.sav +0 -0
- scipy/io/tests/data/struct_arrays_byte_idl80.sav +0 -0
- scipy/io/tests/data/struct_arrays_replicated.sav +0 -0
- scipy/io/tests/data/struct_arrays_replicated_3d.sav +0 -0
- scipy/io/tests/data/struct_inherit.sav +0 -0
- scipy/io/tests/data/struct_pointer_arrays.sav +0 -0
- scipy/io/tests/data/struct_pointer_arrays_replicated.sav +0 -0
- scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav +0 -0
- scipy/io/tests/data/struct_pointers.sav +0 -0
- scipy/io/tests/data/struct_pointers_replicated.sav +0 -0
- scipy/io/tests/data/struct_pointers_replicated_3d.sav +0 -0
- scipy/io/tests/data/struct_scalars.sav +0 -0
- scipy/io/tests/data/struct_scalars_replicated.sav +0 -0
- scipy/io/tests/data/struct_scalars_replicated_3d.sav +0 -0
- scipy/io/tests/data/test-1234Hz-le-1ch-10S-20bit-extra.wav +0 -0
- scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav +0 -0
- scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav +0 -0
- scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-rf64.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav +0 -0
- scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav +0 -0
- scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-rf64.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav +0 -0
- scipy/io/tests/data/various_compressed.sav +0 -0
- scipy/io/tests/test_fortran.py +264 -0
- scipy/io/tests/test_idl.py +483 -0
- scipy/io/tests/test_mmio.py +831 -0
- scipy/io/tests/test_netcdf.py +550 -0
- scipy/io/tests/test_paths.py +93 -0
- scipy/io/tests/test_wavfile.py +501 -0
- scipy/io/wavfile.py +938 -0
- scipy/linalg/__init__.pxd +1 -0
- scipy/linalg/__init__.py +236 -0
- scipy/linalg/_basic.py +2146 -0
- scipy/linalg/_blas_subroutines.h +164 -0
- scipy/linalg/_cythonized_array_utils.cp313t-win_arm64.lib +0 -0
- scipy/linalg/_cythonized_array_utils.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/_cythonized_array_utils.pxd +40 -0
- scipy/linalg/_cythonized_array_utils.pyi +16 -0
- scipy/linalg/_decomp.py +1645 -0
- scipy/linalg/_decomp_cholesky.py +413 -0
- scipy/linalg/_decomp_cossin.py +236 -0
- scipy/linalg/_decomp_interpolative.cp313t-win_arm64.lib +0 -0
- scipy/linalg/_decomp_interpolative.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/_decomp_ldl.py +356 -0
- scipy/linalg/_decomp_lu.py +401 -0
- scipy/linalg/_decomp_lu_cython.cp313t-win_arm64.lib +0 -0
- scipy/linalg/_decomp_lu_cython.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/_decomp_lu_cython.pyi +6 -0
- scipy/linalg/_decomp_polar.py +113 -0
- scipy/linalg/_decomp_qr.py +494 -0
- scipy/linalg/_decomp_qz.py +452 -0
- scipy/linalg/_decomp_schur.py +336 -0
- scipy/linalg/_decomp_svd.py +545 -0
- scipy/linalg/_decomp_update.cp313t-win_arm64.lib +0 -0
- scipy/linalg/_decomp_update.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/_expm_frechet.py +417 -0
- scipy/linalg/_fblas.cp313t-win_arm64.lib +0 -0
- scipy/linalg/_fblas.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/_flapack.cp313t-win_arm64.lib +0 -0
- scipy/linalg/_flapack.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/_lapack_subroutines.h +1521 -0
- scipy/linalg/_linalg_pythran.cp313t-win_arm64.lib +0 -0
- scipy/linalg/_linalg_pythran.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/_matfuncs.py +1050 -0
- scipy/linalg/_matfuncs_expm.cp313t-win_arm64.lib +0 -0
- scipy/linalg/_matfuncs_expm.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/_matfuncs_expm.pyi +6 -0
- scipy/linalg/_matfuncs_inv_ssq.py +886 -0
- scipy/linalg/_matfuncs_schur_sqrtm.cp313t-win_arm64.lib +0 -0
- scipy/linalg/_matfuncs_schur_sqrtm.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/_matfuncs_sqrtm.py +107 -0
- scipy/linalg/_matfuncs_sqrtm_triu.cp313t-win_arm64.lib +0 -0
- scipy/linalg/_matfuncs_sqrtm_triu.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/_misc.py +191 -0
- scipy/linalg/_procrustes.py +113 -0
- scipy/linalg/_sketches.py +189 -0
- scipy/linalg/_solve_toeplitz.cp313t-win_arm64.lib +0 -0
- scipy/linalg/_solve_toeplitz.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/_solvers.py +862 -0
- scipy/linalg/_special_matrices.py +1322 -0
- scipy/linalg/_testutils.py +65 -0
- scipy/linalg/basic.py +23 -0
- scipy/linalg/blas.py +495 -0
- scipy/linalg/cython_blas.cp313t-win_arm64.lib +0 -0
- scipy/linalg/cython_blas.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/cython_blas.pxd +169 -0
- scipy/linalg/cython_blas.pyx +1432 -0
- scipy/linalg/cython_lapack.cp313t-win_arm64.lib +0 -0
- scipy/linalg/cython_lapack.cp313t-win_arm64.pyd +0 -0
- scipy/linalg/cython_lapack.pxd +1528 -0
- scipy/linalg/cython_lapack.pyx +12045 -0
- scipy/linalg/decomp.py +23 -0
- scipy/linalg/decomp_cholesky.py +21 -0
- scipy/linalg/decomp_lu.py +21 -0
- scipy/linalg/decomp_qr.py +20 -0
- scipy/linalg/decomp_schur.py +21 -0
- scipy/linalg/decomp_svd.py +21 -0
- scipy/linalg/interpolative.py +989 -0
- scipy/linalg/lapack.py +1081 -0
- scipy/linalg/matfuncs.py +23 -0
- scipy/linalg/misc.py +21 -0
- scipy/linalg/special_matrices.py +22 -0
- scipy/linalg/tests/__init__.py +0 -0
- scipy/linalg/tests/_cython_examples/extending.pyx +23 -0
- scipy/linalg/tests/_cython_examples/meson.build +34 -0
- scipy/linalg/tests/data/carex_15_data.npz +0 -0
- scipy/linalg/tests/data/carex_18_data.npz +0 -0
- scipy/linalg/tests/data/carex_19_data.npz +0 -0
- scipy/linalg/tests/data/carex_20_data.npz +0 -0
- scipy/linalg/tests/data/carex_6_data.npz +0 -0
- scipy/linalg/tests/data/gendare_20170120_data.npz +0 -0
- scipy/linalg/tests/test_basic.py +2074 -0
- scipy/linalg/tests/test_batch.py +588 -0
- scipy/linalg/tests/test_blas.py +1127 -0
- scipy/linalg/tests/test_cython_blas.py +118 -0
- scipy/linalg/tests/test_cython_lapack.py +22 -0
- scipy/linalg/tests/test_cythonized_array_utils.py +130 -0
- scipy/linalg/tests/test_decomp.py +3189 -0
- scipy/linalg/tests/test_decomp_cholesky.py +268 -0
- scipy/linalg/tests/test_decomp_cossin.py +314 -0
- scipy/linalg/tests/test_decomp_ldl.py +137 -0
- scipy/linalg/tests/test_decomp_lu.py +308 -0
- scipy/linalg/tests/test_decomp_polar.py +110 -0
- scipy/linalg/tests/test_decomp_update.py +1701 -0
- scipy/linalg/tests/test_extending.py +46 -0
- scipy/linalg/tests/test_fblas.py +607 -0
- scipy/linalg/tests/test_interpolative.py +232 -0
- scipy/linalg/tests/test_lapack.py +3620 -0
- scipy/linalg/tests/test_matfuncs.py +1125 -0
- scipy/linalg/tests/test_matmul_toeplitz.py +136 -0
- scipy/linalg/tests/test_procrustes.py +214 -0
- scipy/linalg/tests/test_sketches.py +118 -0
- scipy/linalg/tests/test_solve_toeplitz.py +150 -0
- scipy/linalg/tests/test_solvers.py +844 -0
- scipy/linalg/tests/test_special_matrices.py +636 -0
- scipy/misc/__init__.py +6 -0
- scipy/misc/common.py +6 -0
- scipy/misc/doccer.py +6 -0
- scipy/ndimage/__init__.py +174 -0
- scipy/ndimage/_ctest.cp313t-win_arm64.lib +0 -0
- scipy/ndimage/_ctest.cp313t-win_arm64.pyd +0 -0
- scipy/ndimage/_cytest.cp313t-win_arm64.lib +0 -0
- scipy/ndimage/_cytest.cp313t-win_arm64.pyd +0 -0
- scipy/ndimage/_delegators.py +303 -0
- scipy/ndimage/_filters.py +2422 -0
- scipy/ndimage/_fourier.py +306 -0
- scipy/ndimage/_interpolation.py +1033 -0
- scipy/ndimage/_measurements.py +1689 -0
- scipy/ndimage/_morphology.py +2634 -0
- scipy/ndimage/_nd_image.cp313t-win_arm64.lib +0 -0
- scipy/ndimage/_nd_image.cp313t-win_arm64.pyd +0 -0
- scipy/ndimage/_ndimage_api.py +16 -0
- scipy/ndimage/_ni_docstrings.py +214 -0
- scipy/ndimage/_ni_label.cp313t-win_arm64.lib +0 -0
- scipy/ndimage/_ni_label.cp313t-win_arm64.pyd +0 -0
- scipy/ndimage/_ni_support.py +139 -0
- scipy/ndimage/_rank_filter_1d.cp313t-win_arm64.lib +0 -0
- scipy/ndimage/_rank_filter_1d.cp313t-win_arm64.pyd +0 -0
- scipy/ndimage/_support_alternative_backends.py +84 -0
- scipy/ndimage/filters.py +27 -0
- scipy/ndimage/fourier.py +21 -0
- scipy/ndimage/interpolation.py +22 -0
- scipy/ndimage/measurements.py +24 -0
- scipy/ndimage/morphology.py +27 -0
- scipy/ndimage/tests/__init__.py +12 -0
- scipy/ndimage/tests/data/label_inputs.txt +21 -0
- scipy/ndimage/tests/data/label_results.txt +294 -0
- scipy/ndimage/tests/data/label_strels.txt +42 -0
- scipy/ndimage/tests/dots.png +0 -0
- scipy/ndimage/tests/test_c_api.py +102 -0
- scipy/ndimage/tests/test_datatypes.py +67 -0
- scipy/ndimage/tests/test_filters.py +3083 -0
- scipy/ndimage/tests/test_fourier.py +187 -0
- scipy/ndimage/tests/test_interpolation.py +1491 -0
- scipy/ndimage/tests/test_measurements.py +1592 -0
- scipy/ndimage/tests/test_morphology.py +2950 -0
- scipy/ndimage/tests/test_ni_support.py +78 -0
- scipy/ndimage/tests/test_splines.py +70 -0
- scipy/odr/__init__.py +131 -0
- scipy/odr/__odrpack.cp313t-win_arm64.lib +0 -0
- scipy/odr/__odrpack.cp313t-win_arm64.pyd +0 -0
- scipy/odr/_add_newdocs.py +34 -0
- scipy/odr/_models.py +315 -0
- scipy/odr/_odrpack.py +1154 -0
- scipy/odr/models.py +20 -0
- scipy/odr/odrpack.py +21 -0
- scipy/odr/tests/__init__.py +0 -0
- scipy/odr/tests/test_odr.py +607 -0
- scipy/optimize/__init__.pxd +1 -0
- scipy/optimize/__init__.py +460 -0
- scipy/optimize/_basinhopping.py +741 -0
- scipy/optimize/_bglu_dense.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_bglu_dense.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_bracket.py +706 -0
- scipy/optimize/_chandrupatla.py +551 -0
- scipy/optimize/_cobyla_py.py +297 -0
- scipy/optimize/_cobyqa_py.py +72 -0
- scipy/optimize/_constraints.py +598 -0
- scipy/optimize/_dcsrch.py +728 -0
- scipy/optimize/_differentiable_functions.py +835 -0
- scipy/optimize/_differentialevolution.py +1970 -0
- scipy/optimize/_direct.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_direct.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_direct_py.py +280 -0
- scipy/optimize/_dual_annealing.py +732 -0
- scipy/optimize/_elementwise.py +798 -0
- scipy/optimize/_group_columns.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_group_columns.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_hessian_update_strategy.py +479 -0
- scipy/optimize/_highspy/__init__.py +0 -0
- scipy/optimize/_highspy/_core.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_highspy/_core.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_highspy/_highs_options.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_highspy/_highs_options.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_highspy/_highs_wrapper.py +338 -0
- scipy/optimize/_isotonic.py +157 -0
- scipy/optimize/_lbfgsb.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_lbfgsb.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_lbfgsb_py.py +634 -0
- scipy/optimize/_linesearch.py +896 -0
- scipy/optimize/_linprog.py +733 -0
- scipy/optimize/_linprog_doc.py +1434 -0
- scipy/optimize/_linprog_highs.py +422 -0
- scipy/optimize/_linprog_ip.py +1141 -0
- scipy/optimize/_linprog_rs.py +572 -0
- scipy/optimize/_linprog_simplex.py +663 -0
- scipy/optimize/_linprog_util.py +1521 -0
- scipy/optimize/_lsap.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_lsap.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_lsq/__init__.py +5 -0
- scipy/optimize/_lsq/bvls.py +183 -0
- scipy/optimize/_lsq/common.py +731 -0
- scipy/optimize/_lsq/dogbox.py +345 -0
- scipy/optimize/_lsq/givens_elimination.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_lsq/givens_elimination.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_lsq/least_squares.py +1044 -0
- scipy/optimize/_lsq/lsq_linear.py +361 -0
- scipy/optimize/_lsq/trf.py +587 -0
- scipy/optimize/_lsq/trf_linear.py +249 -0
- scipy/optimize/_milp.py +394 -0
- scipy/optimize/_minimize.py +1199 -0
- scipy/optimize/_minpack.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_minpack.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_minpack_py.py +1178 -0
- scipy/optimize/_moduleTNC.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_moduleTNC.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_nnls.py +96 -0
- scipy/optimize/_nonlin.py +1634 -0
- scipy/optimize/_numdiff.py +963 -0
- scipy/optimize/_optimize.py +4169 -0
- scipy/optimize/_pava_pybind.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_pava_pybind.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_qap.py +760 -0
- scipy/optimize/_remove_redundancy.py +522 -0
- scipy/optimize/_root.py +732 -0
- scipy/optimize/_root_scalar.py +538 -0
- scipy/optimize/_shgo.py +1606 -0
- scipy/optimize/_shgo_lib/__init__.py +0 -0
- scipy/optimize/_shgo_lib/_complex.py +1225 -0
- scipy/optimize/_shgo_lib/_vertex.py +460 -0
- scipy/optimize/_slsqp_py.py +603 -0
- scipy/optimize/_slsqplib.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_slsqplib.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_spectral.py +260 -0
- scipy/optimize/_tnc.py +438 -0
- scipy/optimize/_trlib/__init__.py +12 -0
- scipy/optimize/_trlib/_trlib.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_trlib/_trlib.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_trustregion.py +318 -0
- scipy/optimize/_trustregion_constr/__init__.py +6 -0
- scipy/optimize/_trustregion_constr/canonical_constraint.py +390 -0
- scipy/optimize/_trustregion_constr/equality_constrained_sqp.py +231 -0
- scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py +584 -0
- scipy/optimize/_trustregion_constr/projections.py +411 -0
- scipy/optimize/_trustregion_constr/qp_subproblem.py +637 -0
- scipy/optimize/_trustregion_constr/report.py +49 -0
- scipy/optimize/_trustregion_constr/tests/__init__.py +0 -0
- scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py +296 -0
- scipy/optimize/_trustregion_constr/tests/test_nested_minimize.py +39 -0
- scipy/optimize/_trustregion_constr/tests/test_projections.py +214 -0
- scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py +645 -0
- scipy/optimize/_trustregion_constr/tests/test_report.py +34 -0
- scipy/optimize/_trustregion_constr/tr_interior_point.py +361 -0
- scipy/optimize/_trustregion_dogleg.py +122 -0
- scipy/optimize/_trustregion_exact.py +437 -0
- scipy/optimize/_trustregion_krylov.py +65 -0
- scipy/optimize/_trustregion_ncg.py +126 -0
- scipy/optimize/_tstutils.py +972 -0
- scipy/optimize/_zeros.cp313t-win_arm64.lib +0 -0
- scipy/optimize/_zeros.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/_zeros_py.py +1475 -0
- scipy/optimize/cobyla.py +19 -0
- scipy/optimize/cython_optimize/__init__.py +133 -0
- scipy/optimize/cython_optimize/_zeros.cp313t-win_arm64.lib +0 -0
- scipy/optimize/cython_optimize/_zeros.cp313t-win_arm64.pyd +0 -0
- scipy/optimize/cython_optimize/_zeros.pxd +33 -0
- scipy/optimize/cython_optimize/c_zeros.pxd +26 -0
- scipy/optimize/cython_optimize.pxd +11 -0
- scipy/optimize/elementwise.py +38 -0
- scipy/optimize/lbfgsb.py +23 -0
- scipy/optimize/linesearch.py +18 -0
- scipy/optimize/minpack.py +27 -0
- scipy/optimize/minpack2.py +17 -0
- scipy/optimize/moduleTNC.py +19 -0
- scipy/optimize/nonlin.py +29 -0
- scipy/optimize/optimize.py +40 -0
- scipy/optimize/slsqp.py +22 -0
- scipy/optimize/tests/__init__.py +0 -0
- scipy/optimize/tests/_cython_examples/extending.pyx +43 -0
- scipy/optimize/tests/_cython_examples/meson.build +32 -0
- scipy/optimize/tests/test__basinhopping.py +535 -0
- scipy/optimize/tests/test__differential_evolution.py +1703 -0
- scipy/optimize/tests/test__dual_annealing.py +416 -0
- scipy/optimize/tests/test__linprog_clean_inputs.py +312 -0
- scipy/optimize/tests/test__numdiff.py +885 -0
- scipy/optimize/tests/test__remove_redundancy.py +228 -0
- scipy/optimize/tests/test__root.py +124 -0
- scipy/optimize/tests/test__shgo.py +1164 -0
- scipy/optimize/tests/test__spectral.py +226 -0
- scipy/optimize/tests/test_bracket.py +896 -0
- scipy/optimize/tests/test_chandrupatla.py +982 -0
- scipy/optimize/tests/test_cobyla.py +195 -0
- scipy/optimize/tests/test_cobyqa.py +252 -0
- scipy/optimize/tests/test_constraint_conversion.py +286 -0
- scipy/optimize/tests/test_constraints.py +255 -0
- scipy/optimize/tests/test_cython_optimize.py +92 -0
- scipy/optimize/tests/test_differentiable_functions.py +1025 -0
- scipy/optimize/tests/test_direct.py +321 -0
- scipy/optimize/tests/test_extending.py +28 -0
- scipy/optimize/tests/test_hessian_update_strategy.py +300 -0
- scipy/optimize/tests/test_isotonic_regression.py +167 -0
- scipy/optimize/tests/test_lbfgsb_hessinv.py +65 -0
- scipy/optimize/tests/test_lbfgsb_setulb.py +122 -0
- scipy/optimize/tests/test_least_squares.py +986 -0
- scipy/optimize/tests/test_linear_assignment.py +116 -0
- scipy/optimize/tests/test_linesearch.py +328 -0
- scipy/optimize/tests/test_linprog.py +2577 -0
- scipy/optimize/tests/test_lsq_common.py +297 -0
- scipy/optimize/tests/test_lsq_linear.py +287 -0
- scipy/optimize/tests/test_milp.py +459 -0
- scipy/optimize/tests/test_minimize_constrained.py +845 -0
- scipy/optimize/tests/test_minpack.py +1194 -0
- scipy/optimize/tests/test_nnls.py +469 -0
- scipy/optimize/tests/test_nonlin.py +572 -0
- scipy/optimize/tests/test_optimize.py +3344 -0
- scipy/optimize/tests/test_quadratic_assignment.py +455 -0
- scipy/optimize/tests/test_regression.py +40 -0
- scipy/optimize/tests/test_slsqp.py +645 -0
- scipy/optimize/tests/test_tnc.py +345 -0
- scipy/optimize/tests/test_trustregion.py +110 -0
- scipy/optimize/tests/test_trustregion_exact.py +351 -0
- scipy/optimize/tests/test_trustregion_krylov.py +170 -0
- scipy/optimize/tests/test_zeros.py +998 -0
- scipy/optimize/tnc.py +22 -0
- scipy/optimize/zeros.py +26 -0
- scipy/signal/__init__.py +316 -0
- scipy/signal/_arraytools.py +264 -0
- scipy/signal/_czt.py +575 -0
- scipy/signal/_delegators.py +568 -0
- scipy/signal/_filter_design.py +5893 -0
- scipy/signal/_fir_filter_design.py +1458 -0
- scipy/signal/_lti_conversion.py +534 -0
- scipy/signal/_ltisys.py +3546 -0
- scipy/signal/_max_len_seq.py +139 -0
- scipy/signal/_max_len_seq_inner.cp313t-win_arm64.lib +0 -0
- scipy/signal/_max_len_seq_inner.cp313t-win_arm64.pyd +0 -0
- scipy/signal/_peak_finding.py +1310 -0
- scipy/signal/_peak_finding_utils.cp313t-win_arm64.lib +0 -0
- scipy/signal/_peak_finding_utils.cp313t-win_arm64.pyd +0 -0
- scipy/signal/_polyutils.py +172 -0
- scipy/signal/_savitzky_golay.py +357 -0
- scipy/signal/_short_time_fft.py +2228 -0
- scipy/signal/_signal_api.py +30 -0
- scipy/signal/_signaltools.py +5309 -0
- scipy/signal/_sigtools.cp313t-win_arm64.lib +0 -0
- scipy/signal/_sigtools.cp313t-win_arm64.pyd +0 -0
- scipy/signal/_sosfilt.cp313t-win_arm64.lib +0 -0
- scipy/signal/_sosfilt.cp313t-win_arm64.pyd +0 -0
- scipy/signal/_spectral_py.py +2471 -0
- scipy/signal/_spline.cp313t-win_arm64.lib +0 -0
- scipy/signal/_spline.cp313t-win_arm64.pyd +0 -0
- scipy/signal/_spline.pyi +34 -0
- scipy/signal/_spline_filters.py +848 -0
- scipy/signal/_support_alternative_backends.py +73 -0
- scipy/signal/_upfirdn.py +219 -0
- scipy/signal/_upfirdn_apply.cp313t-win_arm64.lib +0 -0
- scipy/signal/_upfirdn_apply.cp313t-win_arm64.pyd +0 -0
- scipy/signal/_waveforms.py +687 -0
- scipy/signal/_wavelets.py +29 -0
- scipy/signal/bsplines.py +21 -0
- scipy/signal/filter_design.py +28 -0
- scipy/signal/fir_filter_design.py +21 -0
- scipy/signal/lti_conversion.py +20 -0
- scipy/signal/ltisys.py +25 -0
- scipy/signal/signaltools.py +27 -0
- scipy/signal/spectral.py +21 -0
- scipy/signal/spline.py +18 -0
- scipy/signal/tests/__init__.py +0 -0
- scipy/signal/tests/_scipy_spectral_test_shim.py +311 -0
- scipy/signal/tests/mpsig.py +122 -0
- scipy/signal/tests/test_array_tools.py +111 -0
- scipy/signal/tests/test_bsplines.py +365 -0
- scipy/signal/tests/test_cont2discrete.py +424 -0
- scipy/signal/tests/test_czt.py +221 -0
- scipy/signal/tests/test_dltisys.py +599 -0
- scipy/signal/tests/test_filter_design.py +4744 -0
- scipy/signal/tests/test_fir_filter_design.py +851 -0
- scipy/signal/tests/test_ltisys.py +1225 -0
- scipy/signal/tests/test_max_len_seq.py +71 -0
- scipy/signal/tests/test_peak_finding.py +915 -0
- scipy/signal/tests/test_result_type.py +51 -0
- scipy/signal/tests/test_savitzky_golay.py +363 -0
- scipy/signal/tests/test_short_time_fft.py +1107 -0
- scipy/signal/tests/test_signaltools.py +4735 -0
- scipy/signal/tests/test_spectral.py +2141 -0
- scipy/signal/tests/test_splines.py +427 -0
- scipy/signal/tests/test_upfirdn.py +322 -0
- scipy/signal/tests/test_waveforms.py +400 -0
- scipy/signal/tests/test_wavelets.py +59 -0
- scipy/signal/tests/test_windows.py +987 -0
- scipy/signal/waveforms.py +20 -0
- scipy/signal/wavelets.py +17 -0
- scipy/signal/windows/__init__.py +52 -0
- scipy/signal/windows/_windows.py +2513 -0
- scipy/signal/windows/windows.py +23 -0
- scipy/sparse/__init__.py +350 -0
- scipy/sparse/_base.py +1613 -0
- scipy/sparse/_bsr.py +880 -0
- scipy/sparse/_compressed.py +1328 -0
- scipy/sparse/_construct.py +1454 -0
- scipy/sparse/_coo.py +1581 -0
- scipy/sparse/_csc.py +367 -0
- scipy/sparse/_csparsetools.cp313t-win_arm64.lib +0 -0
- scipy/sparse/_csparsetools.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/_csr.py +558 -0
- scipy/sparse/_data.py +569 -0
- scipy/sparse/_dia.py +677 -0
- scipy/sparse/_dok.py +669 -0
- scipy/sparse/_extract.py +178 -0
- scipy/sparse/_index.py +444 -0
- scipy/sparse/_lil.py +632 -0
- scipy/sparse/_matrix.py +169 -0
- scipy/sparse/_matrix_io.py +167 -0
- scipy/sparse/_sparsetools.cp313t-win_arm64.lib +0 -0
- scipy/sparse/_sparsetools.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/_spfuncs.py +76 -0
- scipy/sparse/_sputils.py +632 -0
- scipy/sparse/base.py +24 -0
- scipy/sparse/bsr.py +22 -0
- scipy/sparse/compressed.py +20 -0
- scipy/sparse/construct.py +38 -0
- scipy/sparse/coo.py +23 -0
- scipy/sparse/csc.py +22 -0
- scipy/sparse/csgraph/__init__.py +210 -0
- scipy/sparse/csgraph/_flow.cp313t-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_flow.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_laplacian.py +563 -0
- scipy/sparse/csgraph/_matching.cp313t-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_matching.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_min_spanning_tree.cp313t-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_min_spanning_tree.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_reordering.cp313t-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_reordering.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_shortest_path.cp313t-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_shortest_path.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_tools.cp313t-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_tools.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_traversal.cp313t-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_traversal.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_validation.py +66 -0
- scipy/sparse/csgraph/tests/__init__.py +0 -0
- scipy/sparse/csgraph/tests/test_connected_components.py +119 -0
- scipy/sparse/csgraph/tests/test_conversions.py +61 -0
- scipy/sparse/csgraph/tests/test_flow.py +209 -0
- scipy/sparse/csgraph/tests/test_graph_laplacian.py +368 -0
- scipy/sparse/csgraph/tests/test_matching.py +307 -0
- scipy/sparse/csgraph/tests/test_pydata_sparse.py +197 -0
- scipy/sparse/csgraph/tests/test_reordering.py +70 -0
- scipy/sparse/csgraph/tests/test_shortest_path.py +540 -0
- scipy/sparse/csgraph/tests/test_spanning_tree.py +66 -0
- scipy/sparse/csgraph/tests/test_traversal.py +148 -0
- scipy/sparse/csr.py +22 -0
- scipy/sparse/data.py +18 -0
- scipy/sparse/dia.py +22 -0
- scipy/sparse/dok.py +22 -0
- scipy/sparse/extract.py +23 -0
- scipy/sparse/lil.py +22 -0
- scipy/sparse/linalg/__init__.py +148 -0
- scipy/sparse/linalg/_dsolve/__init__.py +71 -0
- scipy/sparse/linalg/_dsolve/_add_newdocs.py +147 -0
- scipy/sparse/linalg/_dsolve/_superlu.cp313t-win_arm64.lib +0 -0
- scipy/sparse/linalg/_dsolve/_superlu.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_dsolve/linsolve.py +882 -0
- scipy/sparse/linalg/_dsolve/tests/__init__.py +0 -0
- scipy/sparse/linalg/_dsolve/tests/test_linsolve.py +928 -0
- scipy/sparse/linalg/_eigen/__init__.py +22 -0
- scipy/sparse/linalg/_eigen/_svds.py +540 -0
- scipy/sparse/linalg/_eigen/_svds_doc.py +382 -0
- scipy/sparse/linalg/_eigen/arpack/COPYING +45 -0
- scipy/sparse/linalg/_eigen/arpack/__init__.py +20 -0
- scipy/sparse/linalg/_eigen/arpack/_arpack.cp313t-win_arm64.lib +0 -0
- scipy/sparse/linalg/_eigen/arpack/_arpack.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_eigen/arpack/arpack.py +1706 -0
- scipy/sparse/linalg/_eigen/arpack/tests/__init__.py +0 -0
- scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py +717 -0
- scipy/sparse/linalg/_eigen/lobpcg/__init__.py +16 -0
- scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py +1110 -0
- scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py +0 -0
- scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py +725 -0
- scipy/sparse/linalg/_eigen/tests/__init__.py +0 -0
- scipy/sparse/linalg/_eigen/tests/test_svds.py +886 -0
- scipy/sparse/linalg/_expm_multiply.py +816 -0
- scipy/sparse/linalg/_interface.py +920 -0
- scipy/sparse/linalg/_isolve/__init__.py +20 -0
- scipy/sparse/linalg/_isolve/_gcrotmk.py +503 -0
- scipy/sparse/linalg/_isolve/iterative.py +1051 -0
- scipy/sparse/linalg/_isolve/lgmres.py +230 -0
- scipy/sparse/linalg/_isolve/lsmr.py +486 -0
- scipy/sparse/linalg/_isolve/lsqr.py +589 -0
- scipy/sparse/linalg/_isolve/minres.py +372 -0
- scipy/sparse/linalg/_isolve/tests/__init__.py +0 -0
- scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py +183 -0
- scipy/sparse/linalg/_isolve/tests/test_iterative.py +809 -0
- scipy/sparse/linalg/_isolve/tests/test_lgmres.py +225 -0
- scipy/sparse/linalg/_isolve/tests/test_lsmr.py +185 -0
- scipy/sparse/linalg/_isolve/tests/test_lsqr.py +120 -0
- scipy/sparse/linalg/_isolve/tests/test_minres.py +97 -0
- scipy/sparse/linalg/_isolve/tests/test_utils.py +9 -0
- scipy/sparse/linalg/_isolve/tfqmr.py +179 -0
- scipy/sparse/linalg/_isolve/utils.py +121 -0
- scipy/sparse/linalg/_matfuncs.py +940 -0
- scipy/sparse/linalg/_norm.py +195 -0
- scipy/sparse/linalg/_onenormest.py +467 -0
- scipy/sparse/linalg/_propack/_cpropack.cp313t-win_arm64.lib +0 -0
- scipy/sparse/linalg/_propack/_cpropack.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_propack/_dpropack.cp313t-win_arm64.lib +0 -0
- scipy/sparse/linalg/_propack/_dpropack.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_propack/_spropack.cp313t-win_arm64.lib +0 -0
- scipy/sparse/linalg/_propack/_spropack.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_propack/_zpropack.cp313t-win_arm64.lib +0 -0
- scipy/sparse/linalg/_propack/_zpropack.cp313t-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_special_sparse_arrays.py +949 -0
- scipy/sparse/linalg/_svdp.py +309 -0
- scipy/sparse/linalg/dsolve.py +22 -0
- scipy/sparse/linalg/eigen.py +21 -0
- scipy/sparse/linalg/interface.py +20 -0
- scipy/sparse/linalg/isolve.py +22 -0
- scipy/sparse/linalg/matfuncs.py +18 -0
- scipy/sparse/linalg/tests/__init__.py +0 -0
- scipy/sparse/linalg/tests/propack_test_data.npz +0 -0
- scipy/sparse/linalg/tests/test_expm_multiply.py +367 -0
- scipy/sparse/linalg/tests/test_interface.py +561 -0
- scipy/sparse/linalg/tests/test_matfuncs.py +592 -0
- scipy/sparse/linalg/tests/test_norm.py +154 -0
- scipy/sparse/linalg/tests/test_onenormest.py +252 -0
- scipy/sparse/linalg/tests/test_propack.py +165 -0
- scipy/sparse/linalg/tests/test_pydata_sparse.py +272 -0
- scipy/sparse/linalg/tests/test_special_sparse_arrays.py +337 -0
- scipy/sparse/sparsetools.py +17 -0
- scipy/sparse/spfuncs.py +17 -0
- scipy/sparse/sputils.py +17 -0
- scipy/sparse/tests/__init__.py +0 -0
- scipy/sparse/tests/data/csc_py2.npz +0 -0
- scipy/sparse/tests/data/csc_py3.npz +0 -0
- scipy/sparse/tests/test_arithmetic1d.py +341 -0
- scipy/sparse/tests/test_array_api.py +561 -0
- scipy/sparse/tests/test_base.py +5870 -0
- scipy/sparse/tests/test_common1d.py +447 -0
- scipy/sparse/tests/test_construct.py +872 -0
- scipy/sparse/tests/test_coo.py +1119 -0
- scipy/sparse/tests/test_csc.py +98 -0
- scipy/sparse/tests/test_csr.py +214 -0
- scipy/sparse/tests/test_dok.py +209 -0
- scipy/sparse/tests/test_extract.py +51 -0
- scipy/sparse/tests/test_indexing1d.py +603 -0
- scipy/sparse/tests/test_matrix_io.py +109 -0
- scipy/sparse/tests/test_minmax1d.py +128 -0
- scipy/sparse/tests/test_sparsetools.py +344 -0
- scipy/sparse/tests/test_spfuncs.py +97 -0
- scipy/sparse/tests/test_sputils.py +424 -0
- scipy/spatial/__init__.py +129 -0
- scipy/spatial/_ckdtree.cp313t-win_arm64.lib +0 -0
- scipy/spatial/_ckdtree.cp313t-win_arm64.pyd +0 -0
- scipy/spatial/_distance_pybind.cp313t-win_arm64.lib +0 -0
- scipy/spatial/_distance_pybind.cp313t-win_arm64.pyd +0 -0
- scipy/spatial/_distance_wrap.cp313t-win_arm64.lib +0 -0
- scipy/spatial/_distance_wrap.cp313t-win_arm64.pyd +0 -0
- scipy/spatial/_geometric_slerp.py +238 -0
- scipy/spatial/_hausdorff.cp313t-win_arm64.lib +0 -0
- scipy/spatial/_hausdorff.cp313t-win_arm64.pyd +0 -0
- scipy/spatial/_kdtree.py +920 -0
- scipy/spatial/_plotutils.py +274 -0
- scipy/spatial/_procrustes.py +132 -0
- scipy/spatial/_qhull.cp313t-win_arm64.lib +0 -0
- scipy/spatial/_qhull.cp313t-win_arm64.pyd +0 -0
- scipy/spatial/_qhull.pyi +213 -0
- scipy/spatial/_spherical_voronoi.py +341 -0
- scipy/spatial/_voronoi.cp313t-win_arm64.lib +0 -0
- scipy/spatial/_voronoi.cp313t-win_arm64.pyd +0 -0
- scipy/spatial/_voronoi.pyi +4 -0
- scipy/spatial/ckdtree.py +18 -0
- scipy/spatial/distance.py +3147 -0
- scipy/spatial/distance.pyi +210 -0
- scipy/spatial/kdtree.py +25 -0
- scipy/spatial/qhull.py +25 -0
- scipy/spatial/qhull_src/COPYING_QHULL.txt +39 -0
- scipy/spatial/tests/__init__.py +0 -0
- scipy/spatial/tests/data/cdist-X1.txt +10 -0
- scipy/spatial/tests/data/cdist-X2.txt +20 -0
- scipy/spatial/tests/data/degenerate_pointset.npz +0 -0
- scipy/spatial/tests/data/iris.txt +150 -0
- scipy/spatial/tests/data/pdist-boolean-inp.txt +20 -0
- scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-chebyshev-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-cityblock-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-correlation-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-correlation-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-cosine-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-cosine-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-double-inp.txt +20 -0
- scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-euclidean-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-hamming-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-jaccard-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-jensenshannon-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-seuclidean-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-spearman-ml.txt +1 -0
- scipy/spatial/tests/data/random-bool-data.txt +100 -0
- scipy/spatial/tests/data/random-double-data.txt +100 -0
- scipy/spatial/tests/data/random-int-data.txt +100 -0
- scipy/spatial/tests/data/random-uint-data.txt +100 -0
- scipy/spatial/tests/data/selfdual-4d-polytope.txt +27 -0
- scipy/spatial/tests/test__plotutils.py +91 -0
- scipy/spatial/tests/test__procrustes.py +116 -0
- scipy/spatial/tests/test_distance.py +2389 -0
- scipy/spatial/tests/test_hausdorff.py +199 -0
- scipy/spatial/tests/test_kdtree.py +1536 -0
- scipy/spatial/tests/test_qhull.py +1313 -0
- scipy/spatial/tests/test_slerp.py +417 -0
- scipy/spatial/tests/test_spherical_voronoi.py +358 -0
- scipy/spatial/transform/__init__.py +31 -0
- scipy/spatial/transform/_rigid_transform.cp313t-win_arm64.lib +0 -0
- scipy/spatial/transform/_rigid_transform.cp313t-win_arm64.pyd +0 -0
- scipy/spatial/transform/_rotation.cp313t-win_arm64.lib +0 -0
- scipy/spatial/transform/_rotation.cp313t-win_arm64.pyd +0 -0
- scipy/spatial/transform/_rotation_groups.py +140 -0
- scipy/spatial/transform/_rotation_spline.py +460 -0
- scipy/spatial/transform/rotation.py +21 -0
- scipy/spatial/transform/tests/__init__.py +0 -0
- scipy/spatial/transform/tests/test_rigid_transform.py +1221 -0
- scipy/spatial/transform/tests/test_rotation.py +2569 -0
- scipy/spatial/transform/tests/test_rotation_groups.py +169 -0
- scipy/spatial/transform/tests/test_rotation_spline.py +183 -0
- scipy/special/__init__.pxd +1 -0
- scipy/special/__init__.py +841 -0
- scipy/special/_add_newdocs.py +9961 -0
- scipy/special/_basic.py +3576 -0
- scipy/special/_comb.cp313t-win_arm64.lib +0 -0
- scipy/special/_comb.cp313t-win_arm64.pyd +0 -0
- scipy/special/_ellip_harm.py +214 -0
- scipy/special/_ellip_harm_2.cp313t-win_arm64.lib +0 -0
- scipy/special/_ellip_harm_2.cp313t-win_arm64.pyd +0 -0
- scipy/special/_gufuncs.cp313t-win_arm64.lib +0 -0
- scipy/special/_gufuncs.cp313t-win_arm64.pyd +0 -0
- scipy/special/_input_validation.py +17 -0
- scipy/special/_lambertw.py +149 -0
- scipy/special/_logsumexp.py +426 -0
- scipy/special/_mptestutils.py +453 -0
- scipy/special/_multiufuncs.py +610 -0
- scipy/special/_orthogonal.py +2592 -0
- scipy/special/_orthogonal.pyi +330 -0
- scipy/special/_precompute/__init__.py +0 -0
- scipy/special/_precompute/cosine_cdf.py +17 -0
- scipy/special/_precompute/expn_asy.py +54 -0
- scipy/special/_precompute/gammainc_asy.py +116 -0
- scipy/special/_precompute/gammainc_data.py +124 -0
- scipy/special/_precompute/hyp2f1_data.py +484 -0
- scipy/special/_precompute/lambertw.py +68 -0
- scipy/special/_precompute/loggamma.py +43 -0
- scipy/special/_precompute/struve_convergence.py +131 -0
- scipy/special/_precompute/utils.py +38 -0
- scipy/special/_precompute/wright_bessel.py +342 -0
- scipy/special/_precompute/wright_bessel_data.py +152 -0
- scipy/special/_precompute/wrightomega.py +41 -0
- scipy/special/_precompute/zetac.py +27 -0
- scipy/special/_sf_error.py +15 -0
- scipy/special/_specfun.cp313t-win_arm64.lib +0 -0
- scipy/special/_specfun.cp313t-win_arm64.pyd +0 -0
- scipy/special/_special_ufuncs.cp313t-win_arm64.lib +0 -0
- scipy/special/_special_ufuncs.cp313t-win_arm64.pyd +0 -0
- scipy/special/_spfun_stats.py +106 -0
- scipy/special/_spherical_bessel.py +397 -0
- scipy/special/_support_alternative_backends.py +295 -0
- scipy/special/_test_internal.cp313t-win_arm64.lib +0 -0
- scipy/special/_test_internal.cp313t-win_arm64.pyd +0 -0
- scipy/special/_test_internal.pyi +9 -0
- scipy/special/_testutils.py +321 -0
- scipy/special/_ufuncs.cp313t-win_arm64.lib +0 -0
- scipy/special/_ufuncs.cp313t-win_arm64.pyd +0 -0
- scipy/special/_ufuncs.pyi +522 -0
- scipy/special/_ufuncs.pyx +13173 -0
- scipy/special/_ufuncs_cxx.cp313t-win_arm64.lib +0 -0
- scipy/special/_ufuncs_cxx.cp313t-win_arm64.pyd +0 -0
- scipy/special/_ufuncs_cxx.pxd +142 -0
- scipy/special/_ufuncs_cxx.pyx +427 -0
- scipy/special/_ufuncs_cxx_defs.h +147 -0
- scipy/special/_ufuncs_defs.h +57 -0
- scipy/special/add_newdocs.py +15 -0
- scipy/special/basic.py +87 -0
- scipy/special/cython_special.cp313t-win_arm64.lib +0 -0
- scipy/special/cython_special.cp313t-win_arm64.pyd +0 -0
- scipy/special/cython_special.pxd +259 -0
- scipy/special/cython_special.pyi +3 -0
- scipy/special/orthogonal.py +45 -0
- scipy/special/sf_error.py +20 -0
- scipy/special/specfun.py +24 -0
- scipy/special/spfun_stats.py +17 -0
- scipy/special/tests/__init__.py +0 -0
- scipy/special/tests/_cython_examples/extending.pyx +12 -0
- scipy/special/tests/_cython_examples/meson.build +34 -0
- scipy/special/tests/data/__init__.py +0 -0
- scipy/special/tests/data/boost.npz +0 -0
- scipy/special/tests/data/gsl.npz +0 -0
- scipy/special/tests/data/local.npz +0 -0
- scipy/special/tests/test_basic.py +4815 -0
- scipy/special/tests/test_bdtr.py +112 -0
- scipy/special/tests/test_boost_ufuncs.py +64 -0
- scipy/special/tests/test_boxcox.py +125 -0
- scipy/special/tests/test_cdflib.py +712 -0
- scipy/special/tests/test_cdft_asymptotic.py +49 -0
- scipy/special/tests/test_cephes_intp_cast.py +29 -0
- scipy/special/tests/test_cosine_distr.py +83 -0
- scipy/special/tests/test_cython_special.py +363 -0
- scipy/special/tests/test_data.py +719 -0
- scipy/special/tests/test_dd.py +42 -0
- scipy/special/tests/test_digamma.py +45 -0
- scipy/special/tests/test_ellip_harm.py +278 -0
- scipy/special/tests/test_erfinv.py +89 -0
- scipy/special/tests/test_exponential_integrals.py +118 -0
- scipy/special/tests/test_extending.py +28 -0
- scipy/special/tests/test_faddeeva.py +85 -0
- scipy/special/tests/test_gamma.py +12 -0
- scipy/special/tests/test_gammainc.py +152 -0
- scipy/special/tests/test_hyp2f1.py +2566 -0
- scipy/special/tests/test_hypergeometric.py +234 -0
- scipy/special/tests/test_iv_ratio.py +249 -0
- scipy/special/tests/test_kolmogorov.py +491 -0
- scipy/special/tests/test_lambertw.py +109 -0
- scipy/special/tests/test_legendre.py +1518 -0
- scipy/special/tests/test_log1mexp.py +85 -0
- scipy/special/tests/test_loggamma.py +70 -0
- scipy/special/tests/test_logit.py +162 -0
- scipy/special/tests/test_logsumexp.py +469 -0
- scipy/special/tests/test_mpmath.py +2293 -0
- scipy/special/tests/test_nan_inputs.py +65 -0
- scipy/special/tests/test_ndtr.py +77 -0
- scipy/special/tests/test_ndtri_exp.py +94 -0
- scipy/special/tests/test_orthogonal.py +821 -0
- scipy/special/tests/test_orthogonal_eval.py +275 -0
- scipy/special/tests/test_owens_t.py +53 -0
- scipy/special/tests/test_pcf.py +24 -0
- scipy/special/tests/test_pdtr.py +48 -0
- scipy/special/tests/test_powm1.py +65 -0
- scipy/special/tests/test_precompute_expn_asy.py +24 -0
- scipy/special/tests/test_precompute_gammainc.py +108 -0
- scipy/special/tests/test_precompute_utils.py +36 -0
- scipy/special/tests/test_round.py +18 -0
- scipy/special/tests/test_sf_error.py +146 -0
- scipy/special/tests/test_sici.py +36 -0
- scipy/special/tests/test_specfun.py +48 -0
- scipy/special/tests/test_spence.py +32 -0
- scipy/special/tests/test_spfun_stats.py +61 -0
- scipy/special/tests/test_sph_harm.py +85 -0
- scipy/special/tests/test_spherical_bessel.py +400 -0
- scipy/special/tests/test_support_alternative_backends.py +248 -0
- scipy/special/tests/test_trig.py +72 -0
- scipy/special/tests/test_ufunc_signatures.py +46 -0
- scipy/special/tests/test_wright_bessel.py +205 -0
- scipy/special/tests/test_wrightomega.py +117 -0
- scipy/special/tests/test_zeta.py +301 -0
- scipy/stats/__init__.py +670 -0
- scipy/stats/_ansari_swilk_statistics.cp313t-win_arm64.lib +0 -0
- scipy/stats/_ansari_swilk_statistics.cp313t-win_arm64.pyd +0 -0
- scipy/stats/_axis_nan_policy.py +692 -0
- scipy/stats/_biasedurn.cp313t-win_arm64.lib +0 -0
- scipy/stats/_biasedurn.cp313t-win_arm64.pyd +0 -0
- scipy/stats/_biasedurn.pxd +27 -0
- scipy/stats/_binned_statistic.py +795 -0
- scipy/stats/_binomtest.py +375 -0
- scipy/stats/_bws_test.py +177 -0
- scipy/stats/_censored_data.py +459 -0
- scipy/stats/_common.py +5 -0
- scipy/stats/_constants.py +42 -0
- scipy/stats/_continued_fraction.py +387 -0
- scipy/stats/_continuous_distns.py +12486 -0
- scipy/stats/_correlation.py +210 -0
- scipy/stats/_covariance.py +636 -0
- scipy/stats/_crosstab.py +204 -0
- scipy/stats/_discrete_distns.py +2098 -0
- scipy/stats/_distn_infrastructure.py +4201 -0
- scipy/stats/_distr_params.py +299 -0
- scipy/stats/_distribution_infrastructure.py +5750 -0
- scipy/stats/_entropy.py +428 -0
- scipy/stats/_finite_differences.py +145 -0
- scipy/stats/_fit.py +1351 -0
- scipy/stats/_hypotests.py +2060 -0
- scipy/stats/_kde.py +732 -0
- scipy/stats/_ksstats.py +600 -0
- scipy/stats/_levy_stable/__init__.py +1231 -0
- scipy/stats/_levy_stable/levyst.cp313t-win_arm64.lib +0 -0
- scipy/stats/_levy_stable/levyst.cp313t-win_arm64.pyd +0 -0
- scipy/stats/_mannwhitneyu.py +492 -0
- scipy/stats/_mgc.py +550 -0
- scipy/stats/_morestats.py +4626 -0
- scipy/stats/_mstats_basic.py +3658 -0
- scipy/stats/_mstats_extras.py +521 -0
- scipy/stats/_multicomp.py +449 -0
- scipy/stats/_multivariate.py +7281 -0
- scipy/stats/_new_distributions.py +452 -0
- scipy/stats/_odds_ratio.py +466 -0
- scipy/stats/_page_trend_test.py +486 -0
- scipy/stats/_probability_distribution.py +1964 -0
- scipy/stats/_qmc.py +2956 -0
- scipy/stats/_qmc_cy.cp313t-win_arm64.lib +0 -0
- scipy/stats/_qmc_cy.cp313t-win_arm64.pyd +0 -0
- scipy/stats/_qmc_cy.pyi +54 -0
- scipy/stats/_qmvnt.py +454 -0
- scipy/stats/_qmvnt_cy.cp313t-win_arm64.lib +0 -0
- scipy/stats/_qmvnt_cy.cp313t-win_arm64.pyd +0 -0
- scipy/stats/_quantile.py +335 -0
- scipy/stats/_rcont/__init__.py +4 -0
- scipy/stats/_rcont/rcont.cp313t-win_arm64.lib +0 -0
- scipy/stats/_rcont/rcont.cp313t-win_arm64.pyd +0 -0
- scipy/stats/_relative_risk.py +263 -0
- scipy/stats/_resampling.py +2352 -0
- scipy/stats/_result_classes.py +40 -0
- scipy/stats/_sampling.py +1314 -0
- scipy/stats/_sensitivity_analysis.py +713 -0
- scipy/stats/_sobol.cp313t-win_arm64.lib +0 -0
- scipy/stats/_sobol.cp313t-win_arm64.pyd +0 -0
- scipy/stats/_sobol.pyi +54 -0
- scipy/stats/_sobol_direction_numbers.npz +0 -0
- scipy/stats/_stats.cp313t-win_arm64.lib +0 -0
- scipy/stats/_stats.cp313t-win_arm64.pyd +0 -0
- scipy/stats/_stats.pxd +10 -0
- scipy/stats/_stats_mstats_common.py +322 -0
- scipy/stats/_stats_py.py +11089 -0
- scipy/stats/_stats_pythran.cp313t-win_arm64.lib +0 -0
- scipy/stats/_stats_pythran.cp313t-win_arm64.pyd +0 -0
- scipy/stats/_survival.py +683 -0
- scipy/stats/_tukeylambda_stats.py +199 -0
- scipy/stats/_unuran/__init__.py +0 -0
- scipy/stats/_unuran/unuran_wrapper.cp313t-win_arm64.lib +0 -0
- scipy/stats/_unuran/unuran_wrapper.cp313t-win_arm64.pyd +0 -0
- scipy/stats/_unuran/unuran_wrapper.pyi +179 -0
- scipy/stats/_variation.py +126 -0
- scipy/stats/_warnings_errors.py +38 -0
- scipy/stats/_wilcoxon.py +265 -0
- scipy/stats/biasedurn.py +16 -0
- scipy/stats/contingency.py +521 -0
- scipy/stats/distributions.py +24 -0
- scipy/stats/kde.py +18 -0
- scipy/stats/morestats.py +27 -0
- scipy/stats/mstats.py +140 -0
- scipy/stats/mstats_basic.py +42 -0
- scipy/stats/mstats_extras.py +25 -0
- scipy/stats/mvn.py +17 -0
- scipy/stats/qmc.py +236 -0
- scipy/stats/sampling.py +73 -0
- scipy/stats/stats.py +41 -0
- scipy/stats/tests/__init__.py +0 -0
- scipy/stats/tests/common_tests.py +356 -0
- scipy/stats/tests/data/_mvt.py +171 -0
- scipy/stats/tests/data/fisher_exact_results_from_r.py +607 -0
- scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy +0 -0
- scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy +0 -0
- scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy +0 -0
- scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy +0 -0
- scipy/stats/tests/data/nist_anova/AtmWtAg.dat +108 -0
- scipy/stats/tests/data/nist_anova/SiRstv.dat +85 -0
- scipy/stats/tests/data/nist_anova/SmLs01.dat +249 -0
- scipy/stats/tests/data/nist_anova/SmLs02.dat +1869 -0
- scipy/stats/tests/data/nist_anova/SmLs03.dat +18069 -0
- scipy/stats/tests/data/nist_anova/SmLs04.dat +249 -0
- scipy/stats/tests/data/nist_anova/SmLs05.dat +1869 -0
- scipy/stats/tests/data/nist_anova/SmLs06.dat +18069 -0
- scipy/stats/tests/data/nist_anova/SmLs07.dat +249 -0
- scipy/stats/tests/data/nist_anova/SmLs08.dat +1869 -0
- scipy/stats/tests/data/nist_anova/SmLs09.dat +18069 -0
- scipy/stats/tests/data/nist_linregress/Norris.dat +97 -0
- scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy +0 -0
- scipy/stats/tests/data/studentized_range_mpmath_ref.json +1499 -0
- scipy/stats/tests/test_axis_nan_policy.py +1388 -0
- scipy/stats/tests/test_binned_statistic.py +568 -0
- scipy/stats/tests/test_censored_data.py +152 -0
- scipy/stats/tests/test_contingency.py +294 -0
- scipy/stats/tests/test_continued_fraction.py +173 -0
- scipy/stats/tests/test_continuous.py +2198 -0
- scipy/stats/tests/test_continuous_basic.py +1053 -0
- scipy/stats/tests/test_continuous_fit_censored.py +683 -0
- scipy/stats/tests/test_correlation.py +80 -0
- scipy/stats/tests/test_crosstab.py +115 -0
- scipy/stats/tests/test_discrete_basic.py +580 -0
- scipy/stats/tests/test_discrete_distns.py +700 -0
- scipy/stats/tests/test_distributions.py +10413 -0
- scipy/stats/tests/test_entropy.py +322 -0
- scipy/stats/tests/test_fast_gen_inversion.py +435 -0
- scipy/stats/tests/test_fit.py +1090 -0
- scipy/stats/tests/test_hypotests.py +1991 -0
- scipy/stats/tests/test_kdeoth.py +676 -0
- scipy/stats/tests/test_marray.py +289 -0
- scipy/stats/tests/test_mgc.py +217 -0
- scipy/stats/tests/test_morestats.py +3259 -0
- scipy/stats/tests/test_mstats_basic.py +2071 -0
- scipy/stats/tests/test_mstats_extras.py +172 -0
- scipy/stats/tests/test_multicomp.py +405 -0
- scipy/stats/tests/test_multivariate.py +4381 -0
- scipy/stats/tests/test_odds_ratio.py +148 -0
- scipy/stats/tests/test_qmc.py +1492 -0
- scipy/stats/tests/test_quantile.py +199 -0
- scipy/stats/tests/test_rank.py +345 -0
- scipy/stats/tests/test_relative_risk.py +95 -0
- scipy/stats/tests/test_resampling.py +2000 -0
- scipy/stats/tests/test_sampling.py +1450 -0
- scipy/stats/tests/test_sensitivity_analysis.py +310 -0
- scipy/stats/tests/test_stats.py +9707 -0
- scipy/stats/tests/test_survival.py +466 -0
- scipy/stats/tests/test_tukeylambda_stats.py +85 -0
- scipy/stats/tests/test_variation.py +216 -0
- scipy/version.py +12 -0
- scipy-1.16.2.dist-info/DELVEWHEEL +2 -0
- scipy-1.16.2.dist-info/LICENSE.txt +912 -0
- scipy-1.16.2.dist-info/METADATA +1061 -0
- scipy-1.16.2.dist-info/RECORD +1530 -0
- scipy-1.16.2.dist-info/WHEEL +4 -0
- scipy.libs/msvcp140-5f1c5dd31916990d94181e07bc3afb32.dll +0 -0
- scipy.libs/scipy_openblas-f3ac85b1f412f7e86514c923dc4058d1.dll +0 -0
@@ -0,0 +1,3259 @@
|
|
1
|
+
# Author: Travis Oliphant, 2002
|
2
|
+
#
|
3
|
+
# Further enhancements and tests added by numerous SciPy developers.
|
4
|
+
#
|
5
|
+
import contextlib
|
6
|
+
import math
|
7
|
+
import re
|
8
|
+
import sys
|
9
|
+
import warnings
|
10
|
+
from functools import partial
|
11
|
+
|
12
|
+
import numpy as np
|
13
|
+
from numpy.random import RandomState
|
14
|
+
from numpy.testing import (assert_array_equal, assert_almost_equal,
|
15
|
+
assert_array_less, assert_array_almost_equal,
|
16
|
+
assert_, assert_allclose, assert_equal,
|
17
|
+
suppress_warnings)
|
18
|
+
import pytest
|
19
|
+
from pytest import raises as assert_raises
|
20
|
+
|
21
|
+
from scipy import optimize, stats, special
|
22
|
+
from scipy.stats._morestats import _abw_state, _get_As_weibull, _Avals_weibull
|
23
|
+
from .common_tests import check_named_results
|
24
|
+
from .._hypotests import _get_wilcoxon_distr, _get_wilcoxon_distr2
|
25
|
+
from scipy.stats._binomtest import _binary_search_for_binom_tst
|
26
|
+
from scipy.stats._distr_params import distcont
|
27
|
+
from scipy.stats._axis_nan_policy import (SmallSampleWarning, too_small_nd_omit,
|
28
|
+
too_small_1d_omit, too_small_1d_not_omit)
|
29
|
+
|
30
|
+
from scipy._lib._array_api import is_numpy
|
31
|
+
from scipy._lib._array_api_no_0d import (
|
32
|
+
xp_assert_close,
|
33
|
+
xp_assert_equal,
|
34
|
+
xp_assert_less,
|
35
|
+
)
|
36
|
+
|
37
|
+
|
38
|
+
skip_xp_backends = pytest.mark.skip_xp_backends
|
39
|
+
|
40
|
+
distcont = dict(distcont) # type: ignore
|
41
|
+
|
42
|
+
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
|
43
|
+
# check if it's available
|
44
|
+
try:
|
45
|
+
import matplotlib
|
46
|
+
matplotlib.rcParams['backend'] = 'Agg'
|
47
|
+
import matplotlib.pyplot as plt
|
48
|
+
have_matplotlib = True
|
49
|
+
except Exception:
|
50
|
+
have_matplotlib = False
|
51
|
+
|
52
|
+
|
53
|
+
# test data gear.dat from NIST for Levene and Bartlett test
|
54
|
+
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda3581.htm
|
55
|
+
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
|
56
|
+
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
|
57
|
+
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
|
58
|
+
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
|
59
|
+
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
|
60
|
+
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
|
61
|
+
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
|
62
|
+
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
|
63
|
+
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
|
64
|
+
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
|
65
|
+
|
66
|
+
|
67
|
+
# The loggamma RVS stream is changing due to gh-13349; this version
|
68
|
+
# preserves the old stream so that tests don't change.
|
69
|
+
def _old_loggamma_rvs(*args, **kwargs):
|
70
|
+
return np.log(stats.gamma.rvs(*args, **kwargs))
|
71
|
+
|
72
|
+
|
73
|
+
class TestBayes_mvs:
|
74
|
+
def test_basic(self):
|
75
|
+
# Expected values in this test simply taken from the function. For
|
76
|
+
# some checks regarding correctness of implementation, see review in
|
77
|
+
# gh-674
|
78
|
+
data = [6, 9, 12, 7, 8, 8, 13]
|
79
|
+
mean, var, std = stats.bayes_mvs(data)
|
80
|
+
assert_almost_equal(mean.statistic, 9.0)
|
81
|
+
assert_allclose(mean.minmax, (7.103650222492964, 10.896349777507034),
|
82
|
+
rtol=1e-6)
|
83
|
+
|
84
|
+
assert_almost_equal(var.statistic, 10.0)
|
85
|
+
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
|
86
|
+
rtol=1e-09)
|
87
|
+
|
88
|
+
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
|
89
|
+
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
|
90
|
+
rtol=1e-14)
|
91
|
+
|
92
|
+
def test_empty_input(self):
|
93
|
+
assert_raises(ValueError, stats.bayes_mvs, [])
|
94
|
+
|
95
|
+
def test_result_attributes(self):
|
96
|
+
x = np.arange(15)
|
97
|
+
attributes = ('statistic', 'minmax')
|
98
|
+
res = stats.bayes_mvs(x)
|
99
|
+
|
100
|
+
for i in res:
|
101
|
+
check_named_results(i, attributes)
|
102
|
+
|
103
|
+
|
104
|
+
class TestMvsdist:
|
105
|
+
def test_basic(self):
|
106
|
+
data = [6, 9, 12, 7, 8, 8, 13]
|
107
|
+
mean, var, std = stats.mvsdist(data)
|
108
|
+
assert_almost_equal(mean.mean(), 9.0)
|
109
|
+
assert_allclose(mean.interval(0.9), (7.103650222492964,
|
110
|
+
10.896349777507034), rtol=1e-14)
|
111
|
+
|
112
|
+
assert_almost_equal(var.mean(), 10.0)
|
113
|
+
assert_allclose(var.interval(0.9), (3.1767242068607087,
|
114
|
+
24.45910381334018), rtol=1e-09)
|
115
|
+
|
116
|
+
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
|
117
|
+
assert_allclose(std.interval(0.9), (1.7823367265645145,
|
118
|
+
4.9456146050146312), rtol=1e-14)
|
119
|
+
|
120
|
+
def test_empty_input(self):
|
121
|
+
assert_raises(ValueError, stats.mvsdist, [])
|
122
|
+
|
123
|
+
def test_bad_arg(self):
|
124
|
+
# Raise ValueError if fewer than two data points are given.
|
125
|
+
data = [1]
|
126
|
+
assert_raises(ValueError, stats.mvsdist, data)
|
127
|
+
|
128
|
+
def test_warns(self):
|
129
|
+
# regression test for gh-5270
|
130
|
+
# make sure there are no spurious divide-by-zero warnings
|
131
|
+
with warnings.catch_warnings():
|
132
|
+
warnings.simplefilter('error', RuntimeWarning)
|
133
|
+
[x.mean() for x in stats.mvsdist([1, 2, 3])]
|
134
|
+
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
|
135
|
+
|
136
|
+
|
137
|
+
class TestShapiro:
|
138
|
+
def test_basic(self):
|
139
|
+
x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
|
140
|
+
4.43, 0.21, 4.75, 0.71, 1.52, 3.24,
|
141
|
+
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]
|
142
|
+
w, pw = stats.shapiro(x1)
|
143
|
+
shapiro_test = stats.shapiro(x1)
|
144
|
+
assert_almost_equal(w, 0.90047299861907959, decimal=6)
|
145
|
+
assert_almost_equal(shapiro_test.statistic, 0.90047299861907959, decimal=6)
|
146
|
+
assert_almost_equal(pw, 0.042089745402336121, decimal=6)
|
147
|
+
assert_almost_equal(shapiro_test.pvalue, 0.042089745402336121, decimal=6)
|
148
|
+
|
149
|
+
x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
|
150
|
+
3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
|
151
|
+
0.08, 3.67, 2.81, 3.49]
|
152
|
+
w, pw = stats.shapiro(x2)
|
153
|
+
shapiro_test = stats.shapiro(x2)
|
154
|
+
assert_almost_equal(w, 0.9590270, decimal=6)
|
155
|
+
assert_almost_equal(shapiro_test.statistic, 0.9590270, decimal=6)
|
156
|
+
assert_almost_equal(pw, 0.52460, decimal=3)
|
157
|
+
assert_almost_equal(shapiro_test.pvalue, 0.52460, decimal=3)
|
158
|
+
|
159
|
+
# Verified against R
|
160
|
+
x3 = stats.norm.rvs(loc=5, scale=3, size=100, random_state=12345678)
|
161
|
+
w, pw = stats.shapiro(x3)
|
162
|
+
shapiro_test = stats.shapiro(x3)
|
163
|
+
assert_almost_equal(w, 0.9772805571556091, decimal=6)
|
164
|
+
assert_almost_equal(shapiro_test.statistic, 0.9772805571556091, decimal=6)
|
165
|
+
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
|
166
|
+
assert_almost_equal(shapiro_test.pvalue, 0.08144091814756393, decimal=3)
|
167
|
+
|
168
|
+
# Extracted from original paper
|
169
|
+
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
|
170
|
+
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
|
171
|
+
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
|
172
|
+
W_expected = 0.83467
|
173
|
+
p_expected = 0.000914
|
174
|
+
w, pw = stats.shapiro(x4)
|
175
|
+
shapiro_test = stats.shapiro(x4)
|
176
|
+
assert_almost_equal(w, W_expected, decimal=4)
|
177
|
+
assert_almost_equal(shapiro_test.statistic, W_expected, decimal=4)
|
178
|
+
assert_almost_equal(pw, p_expected, decimal=5)
|
179
|
+
assert_almost_equal(shapiro_test.pvalue, p_expected, decimal=5)
|
180
|
+
|
181
|
+
def test_2d(self):
|
182
|
+
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
|
183
|
+
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
|
184
|
+
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
|
185
|
+
w, pw = stats.shapiro(x1)
|
186
|
+
shapiro_test = stats.shapiro(x1)
|
187
|
+
assert_almost_equal(w, 0.90047299861907959, decimal=6)
|
188
|
+
assert_almost_equal(shapiro_test.statistic, 0.90047299861907959, decimal=6)
|
189
|
+
assert_almost_equal(pw, 0.042089745402336121, decimal=6)
|
190
|
+
assert_almost_equal(shapiro_test.pvalue, 0.042089745402336121, decimal=6)
|
191
|
+
|
192
|
+
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
|
193
|
+
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
|
194
|
+
0.08, 3.67, 2.81, 3.49]]
|
195
|
+
w, pw = stats.shapiro(x2)
|
196
|
+
shapiro_test = stats.shapiro(x2)
|
197
|
+
assert_almost_equal(w, 0.9590270, decimal=6)
|
198
|
+
assert_almost_equal(shapiro_test.statistic, 0.9590270, decimal=6)
|
199
|
+
assert_almost_equal(pw, 0.52460, decimal=3)
|
200
|
+
assert_almost_equal(shapiro_test.pvalue, 0.52460, decimal=3)
|
201
|
+
|
202
|
+
@pytest.mark.parametrize('x', ([], [1], [1, 2]))
|
203
|
+
def test_not_enough_values(self, x):
|
204
|
+
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
|
205
|
+
res = stats.shapiro(x)
|
206
|
+
assert_equal(res.statistic, np.nan)
|
207
|
+
assert_equal(res.pvalue, np.nan)
|
208
|
+
|
209
|
+
def test_nan_input(self):
|
210
|
+
x = np.arange(10.)
|
211
|
+
x[9] = np.nan
|
212
|
+
|
213
|
+
w, pw = stats.shapiro(x)
|
214
|
+
shapiro_test = stats.shapiro(x)
|
215
|
+
assert_equal(w, np.nan)
|
216
|
+
assert_equal(shapiro_test.statistic, np.nan)
|
217
|
+
# Originally, shapiro returned a p-value of 1 in this case,
|
218
|
+
# but there is no way to produce a numerical p-value if the
|
219
|
+
# statistic is not a number. NaN is more appropriate.
|
220
|
+
assert_almost_equal(pw, np.nan)
|
221
|
+
assert_almost_equal(shapiro_test.pvalue, np.nan)
|
222
|
+
|
223
|
+
def test_gh14462(self):
|
224
|
+
# shapiro is theoretically location-invariant, but when the magnitude
|
225
|
+
# of the values is much greater than the variance, there can be
|
226
|
+
# numerical issues. Fixed by subtracting median from the data.
|
227
|
+
# See gh-14462.
|
228
|
+
|
229
|
+
trans_val, maxlog = stats.boxcox([122500, 474400, 110400])
|
230
|
+
res = stats.shapiro(trans_val)
|
231
|
+
|
232
|
+
# Reference from R:
|
233
|
+
# options(digits=16)
|
234
|
+
# x = c(0.00000000e+00, 3.39996924e-08, -6.35166875e-09)
|
235
|
+
# shapiro.test(x)
|
236
|
+
ref = (0.86468431705371, 0.2805581751566)
|
237
|
+
|
238
|
+
assert_allclose(res, ref, rtol=1e-5)
|
239
|
+
|
240
|
+
def test_length_3_gh18322(self):
|
241
|
+
# gh-18322 reported that the p-value could be negative for input of
|
242
|
+
# length 3. Check that this is resolved.
|
243
|
+
res = stats.shapiro([0.6931471805599453, 0.0, 0.0])
|
244
|
+
assert res.pvalue >= 0
|
245
|
+
|
246
|
+
# R `shapiro.test` doesn't produce an accurate p-value in the case
|
247
|
+
# above. Check that the formula used in `stats.shapiro` is not wrong.
|
248
|
+
# options(digits=16)
|
249
|
+
# x = c(-0.7746653110021126, -0.4344432067942129, 1.8157053280290931)
|
250
|
+
# shapiro.test(x)
|
251
|
+
x = [-0.7746653110021126, -0.4344432067942129, 1.8157053280290931]
|
252
|
+
res = stats.shapiro(x)
|
253
|
+
assert_allclose(res.statistic, 0.84658770645509)
|
254
|
+
assert_allclose(res.pvalue, 0.2313666489882, rtol=1e-6)
|
255
|
+
|
256
|
+
|
257
|
+
class TestAnderson:
|
258
|
+
def test_normal(self):
|
259
|
+
rs = RandomState(1234567890)
|
260
|
+
x1 = rs.standard_exponential(size=50)
|
261
|
+
x2 = rs.standard_normal(size=50)
|
262
|
+
A, crit, sig = stats.anderson(x1)
|
263
|
+
assert_array_less(crit[:-1], A)
|
264
|
+
A, crit, sig = stats.anderson(x2)
|
265
|
+
assert_array_less(A, crit[-2:])
|
266
|
+
|
267
|
+
v = np.ones(10)
|
268
|
+
v[0] = 0
|
269
|
+
A, crit, sig = stats.anderson(v)
|
270
|
+
# The expected statistic 3.208057 was computed independently of scipy.
|
271
|
+
# For example, in R:
|
272
|
+
# > library(nortest)
|
273
|
+
# > v <- rep(1, 10)
|
274
|
+
# > v[1] <- 0
|
275
|
+
# > result <- ad.test(v)
|
276
|
+
# > result$statistic
|
277
|
+
# A
|
278
|
+
# 3.208057
|
279
|
+
assert_allclose(A, 3.208057)
|
280
|
+
|
281
|
+
def test_expon(self):
|
282
|
+
rs = RandomState(1234567890)
|
283
|
+
x1 = rs.standard_exponential(size=50)
|
284
|
+
x2 = rs.standard_normal(size=50)
|
285
|
+
A, crit, sig = stats.anderson(x1, 'expon')
|
286
|
+
assert_array_less(A, crit[-2:])
|
287
|
+
with np.errstate(all='ignore'):
|
288
|
+
A, crit, sig = stats.anderson(x2, 'expon')
|
289
|
+
assert_(A > crit[-1])
|
290
|
+
|
291
|
+
def test_gumbel(self):
|
292
|
+
# Regression test for gh-6306. Before that issue was fixed,
|
293
|
+
# this case would return a2=inf.
|
294
|
+
v = np.ones(100)
|
295
|
+
v[0] = 0.0
|
296
|
+
a2, crit, sig = stats.anderson(v, 'gumbel')
|
297
|
+
# A brief reimplementation of the calculation of the statistic.
|
298
|
+
n = len(v)
|
299
|
+
xbar, s = stats.gumbel_l.fit(v)
|
300
|
+
logcdf = stats.gumbel_l.logcdf(v, xbar, s)
|
301
|
+
logsf = stats.gumbel_l.logsf(v, xbar, s)
|
302
|
+
i = np.arange(1, n+1)
|
303
|
+
expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1]))
|
304
|
+
|
305
|
+
assert_allclose(a2, expected_a2)
|
306
|
+
|
307
|
+
def test_bad_arg(self):
|
308
|
+
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
|
309
|
+
|
310
|
+
def test_result_attributes(self):
|
311
|
+
rs = RandomState(1234567890)
|
312
|
+
x = rs.standard_exponential(size=50)
|
313
|
+
res = stats.anderson(x)
|
314
|
+
attributes = ('statistic', 'critical_values', 'significance_level')
|
315
|
+
check_named_results(res, attributes)
|
316
|
+
|
317
|
+
def test_gumbel_l(self):
|
318
|
+
# gh-2592, gh-6337
|
319
|
+
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
|
320
|
+
rs = RandomState(1234567890)
|
321
|
+
x = rs.gumbel(size=100)
|
322
|
+
A1, crit1, sig1 = stats.anderson(x, 'gumbel')
|
323
|
+
A2, crit2, sig2 = stats.anderson(x, 'gumbel_l')
|
324
|
+
|
325
|
+
assert_allclose(A2, A1)
|
326
|
+
|
327
|
+
def test_gumbel_r(self):
|
328
|
+
# gh-2592, gh-6337
|
329
|
+
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
|
330
|
+
rs = RandomState(1234567890)
|
331
|
+
x1 = rs.gumbel(size=100)
|
332
|
+
x2 = np.ones(100)
|
333
|
+
# A constant array is a degenerate case and breaks gumbel_r.fit, so
|
334
|
+
# change one value in x2.
|
335
|
+
x2[0] = 0.996
|
336
|
+
A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r')
|
337
|
+
A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r')
|
338
|
+
|
339
|
+
assert_array_less(A1, crit1[-2:])
|
340
|
+
assert_(A2 > crit2[-1])
|
341
|
+
|
342
|
+
def test_weibull_min_case_A(self):
|
343
|
+
# data and reference values from `anderson` reference [7]
|
344
|
+
x = np.array([225, 171, 198, 189, 189, 135, 162, 135, 117, 162])
|
345
|
+
res = stats.anderson(x, 'weibull_min')
|
346
|
+
m, loc, scale = res.fit_result.params
|
347
|
+
assert_allclose((m, loc, scale), (2.38, 99.02, 78.23), rtol=2e-3)
|
348
|
+
assert_allclose(res.statistic, 0.260, rtol=1e-3)
|
349
|
+
assert res.statistic < res.critical_values[0]
|
350
|
+
|
351
|
+
c = 1 / m # ~0.42
|
352
|
+
assert_allclose(c, 1/2.38, rtol=2e-3)
|
353
|
+
# interpolate between rows for c=0.4 and c=0.45, indices -3 and -2
|
354
|
+
As40 = _Avals_weibull[-3]
|
355
|
+
As45 = _Avals_weibull[-2]
|
356
|
+
As_ref = As40 + (c - 0.4)/(0.45 - 0.4) * (As45 - As40)
|
357
|
+
# atol=1e-3 because results are rounded up to the next third decimal
|
358
|
+
assert np.all(res.critical_values > As_ref)
|
359
|
+
assert_allclose(res.critical_values, As_ref, atol=1e-3)
|
360
|
+
|
361
|
+
def test_weibull_min_case_B(self):
|
362
|
+
# From `anderson` reference [7]
|
363
|
+
x = np.array([74, 57, 48, 29, 502, 12, 70, 21,
|
364
|
+
29, 386, 59, 27, 153, 26, 326])
|
365
|
+
message = "Maximum likelihood estimation has converged to "
|
366
|
+
with pytest.raises(ValueError, match=message):
|
367
|
+
stats.anderson(x, 'weibull_min')
|
368
|
+
|
369
|
+
@pytest.mark.thread_unsafe
|
370
|
+
def test_weibull_warning_error(self):
|
371
|
+
# Check for warning message when there are too few observations
|
372
|
+
# This is also an example in which an error occurs during fitting
|
373
|
+
x = -np.array([225, 75, 57, 168, 107, 12, 61, 43, 29])
|
374
|
+
wmessage = "Critical values of the test statistic are given for the..."
|
375
|
+
emessage = "An error occurred while fitting the Weibull distribution..."
|
376
|
+
wcontext = pytest.warns(UserWarning, match=wmessage)
|
377
|
+
econtext = pytest.raises(ValueError, match=emessage)
|
378
|
+
with wcontext, econtext:
|
379
|
+
stats.anderson(x, 'weibull_min')
|
380
|
+
|
381
|
+
@pytest.mark.parametrize('distname',
|
382
|
+
['norm', 'expon', 'gumbel_l', 'extreme1',
|
383
|
+
'gumbel', 'gumbel_r', 'logistic', 'weibull_min'])
|
384
|
+
def test_anderson_fit_params(self, distname):
|
385
|
+
# check that anderson now returns a FitResult
|
386
|
+
rng = np.random.default_rng(330691555377792039)
|
387
|
+
real_distname = ('gumbel_l' if distname in {'extreme1', 'gumbel'}
|
388
|
+
else distname)
|
389
|
+
dist = getattr(stats, real_distname)
|
390
|
+
params = distcont[real_distname]
|
391
|
+
x = dist.rvs(*params, size=1000, random_state=rng)
|
392
|
+
res = stats.anderson(x, distname)
|
393
|
+
assert res.fit_result.success
|
394
|
+
|
395
|
+
def test_anderson_weibull_As(self):
|
396
|
+
m = 1 # "when mi < 2, so that c > 0.5, the last line...should be used"
|
397
|
+
assert_equal(_get_As_weibull(1/m), _Avals_weibull[-1])
|
398
|
+
m = np.inf
|
399
|
+
assert_equal(_get_As_weibull(1/m), _Avals_weibull[0])
|
400
|
+
|
401
|
+
|
402
|
+
class TestAndersonKSamp:
|
403
|
+
def test_example1a(self):
|
404
|
+
# Example data from Scholz & Stephens (1987), originally
|
405
|
+
# published in Lehmann (1995, Nonparametrics, Statistical
|
406
|
+
# Methods Based on Ranks, p. 309)
|
407
|
+
# Pass a mixture of lists and arrays
|
408
|
+
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
|
409
|
+
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
|
410
|
+
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
|
411
|
+
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
|
412
|
+
|
413
|
+
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
|
414
|
+
|
415
|
+
assert_almost_equal(Tk, 4.449, 3)
|
416
|
+
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
|
417
|
+
tm[0:5], 4)
|
418
|
+
assert_allclose(p, 0.0021, atol=0.00025)
|
419
|
+
|
420
|
+
def test_example1b(self):
|
421
|
+
# Example data from Scholz & Stephens (1987), originally
|
422
|
+
# published in Lehmann (1995, Nonparametrics, Statistical
|
423
|
+
# Methods Based on Ranks, p. 309)
|
424
|
+
# Pass arrays
|
425
|
+
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
|
426
|
+
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
|
427
|
+
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
|
428
|
+
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
|
429
|
+
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
|
430
|
+
|
431
|
+
assert_almost_equal(Tk, 4.480, 3)
|
432
|
+
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
|
433
|
+
tm[0:5], 4)
|
434
|
+
assert_allclose(p, 0.0020, atol=0.00025)
|
435
|
+
|
436
|
+
@pytest.mark.xslow
|
437
|
+
def test_example2a(self):
|
438
|
+
# Example data taken from an earlier technical report of
|
439
|
+
# Scholz and Stephens
|
440
|
+
# Pass lists instead of arrays
|
441
|
+
t1 = [194, 15, 41, 29, 33, 181]
|
442
|
+
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
|
443
|
+
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
|
444
|
+
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
|
445
|
+
118, 25, 156, 310, 76, 26, 44, 23, 62]
|
446
|
+
t5 = [130, 208, 70, 101, 208]
|
447
|
+
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
|
448
|
+
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
|
449
|
+
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
|
450
|
+
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
|
451
|
+
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
|
452
|
+
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
|
453
|
+
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
|
454
|
+
22, 139, 210, 97, 30, 23, 13, 14]
|
455
|
+
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
|
456
|
+
t12 = [50, 254, 5, 283, 35, 12]
|
457
|
+
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
|
458
|
+
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
|
459
|
+
61, 34]
|
460
|
+
|
461
|
+
samples = (t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14)
|
462
|
+
Tk, tm, p = stats.anderson_ksamp(samples, midrank=False)
|
463
|
+
assert_almost_equal(Tk, 3.288, 3)
|
464
|
+
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
|
465
|
+
tm[0:5], 4)
|
466
|
+
assert_allclose(p, 0.0041, atol=0.00025)
|
467
|
+
|
468
|
+
rng = np.random.default_rng(6989860141921615054)
|
469
|
+
method = stats.PermutationMethod(n_resamples=9999, rng=rng)
|
470
|
+
res = stats.anderson_ksamp(samples, midrank=False, method=method)
|
471
|
+
assert_array_equal(res.statistic, Tk)
|
472
|
+
assert_array_equal(res.critical_values, tm)
|
473
|
+
assert_allclose(res.pvalue, p, atol=6e-4)
|
474
|
+
|
475
|
+
def test_example2b(self):
|
476
|
+
# Example data taken from an earlier technical report of
|
477
|
+
# Scholz and Stephens
|
478
|
+
t1 = [194, 15, 41, 29, 33, 181]
|
479
|
+
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
|
480
|
+
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
|
481
|
+
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
|
482
|
+
118, 25, 156, 310, 76, 26, 44, 23, 62]
|
483
|
+
t5 = [130, 208, 70, 101, 208]
|
484
|
+
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
|
485
|
+
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
|
486
|
+
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
|
487
|
+
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
|
488
|
+
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
|
489
|
+
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
|
490
|
+
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
|
491
|
+
22, 139, 210, 97, 30, 23, 13, 14]
|
492
|
+
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
|
493
|
+
t12 = [50, 254, 5, 283, 35, 12]
|
494
|
+
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
|
495
|
+
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
|
496
|
+
61, 34]
|
497
|
+
|
498
|
+
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
|
499
|
+
t9, t10, t11, t12, t13, t14),
|
500
|
+
midrank=True)
|
501
|
+
|
502
|
+
assert_almost_equal(Tk, 3.294, 3)
|
503
|
+
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
|
504
|
+
tm[0:5], 4)
|
505
|
+
assert_allclose(p, 0.0041, atol=0.00025)
|
506
|
+
|
507
|
+
@pytest.mark.thread_unsafe
|
508
|
+
def test_R_kSamples(self):
|
509
|
+
# test values generates with R package kSamples
|
510
|
+
# package version 1.2-6 (2017-06-14)
|
511
|
+
# r1 = 1:100
|
512
|
+
# continuous case (no ties) --> version 1
|
513
|
+
# res <- kSamples::ad.test(r1, r1 + 40.5)
|
514
|
+
# res$ad[1, "T.AD"] # 41.105
|
515
|
+
# res$ad[1, " asympt. P-value"] # 5.8399e-18
|
516
|
+
#
|
517
|
+
# discrete case (ties allowed) --> version 2 (here: midrank=True)
|
518
|
+
# res$ad[2, "T.AD"] # 41.235
|
519
|
+
#
|
520
|
+
# res <- kSamples::ad.test(r1, r1 + .5)
|
521
|
+
# res$ad[1, "T.AD"] # -1.2824
|
522
|
+
# res$ad[1, " asympt. P-value"] # 1
|
523
|
+
# res$ad[2, "T.AD"] # -1.2944
|
524
|
+
#
|
525
|
+
# res <- kSamples::ad.test(r1, r1 + 7.5)
|
526
|
+
# res$ad[1, "T.AD"] # 1.4923
|
527
|
+
# res$ad[1, " asympt. P-value"] # 0.077501
|
528
|
+
#
|
529
|
+
# res <- kSamples::ad.test(r1, r1 + 6)
|
530
|
+
# res$ad[2, "T.AD"] # 0.63892
|
531
|
+
# res$ad[2, " asympt. P-value"] # 0.17981
|
532
|
+
#
|
533
|
+
# res <- kSamples::ad.test(r1, r1 + 11.5)
|
534
|
+
# res$ad[1, "T.AD"] # 4.5042
|
535
|
+
# res$ad[1, " asympt. P-value"] # 0.00545
|
536
|
+
#
|
537
|
+
# res <- kSamples::ad.test(r1, r1 + 13.5)
|
538
|
+
# res$ad[1, "T.AD"] # 6.2982
|
539
|
+
# res$ad[1, " asympt. P-value"] # 0.00118
|
540
|
+
|
541
|
+
x1 = np.linspace(1, 100, 100)
|
542
|
+
# test case: different distributions;p-value floored at 0.001
|
543
|
+
# test case for issue #5493 / #8536
|
544
|
+
with suppress_warnings() as sup:
|
545
|
+
sup.filter(UserWarning, message='p-value floored')
|
546
|
+
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5], midrank=False)
|
547
|
+
assert_almost_equal(s, 41.105, 3)
|
548
|
+
assert_equal(p, 0.001)
|
549
|
+
|
550
|
+
with suppress_warnings() as sup:
|
551
|
+
sup.filter(UserWarning, message='p-value floored')
|
552
|
+
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5])
|
553
|
+
assert_almost_equal(s, 41.235, 3)
|
554
|
+
assert_equal(p, 0.001)
|
555
|
+
|
556
|
+
# test case: similar distributions --> p-value capped at 0.25
|
557
|
+
with suppress_warnings() as sup:
|
558
|
+
sup.filter(UserWarning, message='p-value capped')
|
559
|
+
s, _, p = stats.anderson_ksamp([x1, x1 + .5], midrank=False)
|
560
|
+
assert_almost_equal(s, -1.2824, 4)
|
561
|
+
assert_equal(p, 0.25)
|
562
|
+
|
563
|
+
with suppress_warnings() as sup:
|
564
|
+
sup.filter(UserWarning, message='p-value capped')
|
565
|
+
s, _, p = stats.anderson_ksamp([x1, x1 + .5])
|
566
|
+
assert_almost_equal(s, -1.2944, 4)
|
567
|
+
assert_equal(p, 0.25)
|
568
|
+
|
569
|
+
# test case: check interpolated p-value in [0.01, 0.25] (no ties)
|
570
|
+
s, _, p = stats.anderson_ksamp([x1, x1 + 7.5], midrank=False)
|
571
|
+
assert_almost_equal(s, 1.4923, 4)
|
572
|
+
assert_allclose(p, 0.0775, atol=0.005, rtol=0)
|
573
|
+
|
574
|
+
# test case: check interpolated p-value in [0.01, 0.25] (w/ ties)
|
575
|
+
s, _, p = stats.anderson_ksamp([x1, x1 + 6])
|
576
|
+
assert_almost_equal(s, 0.6389, 4)
|
577
|
+
assert_allclose(p, 0.1798, atol=0.005, rtol=0)
|
578
|
+
|
579
|
+
# test extended critical values for p=0.001 and p=0.005
|
580
|
+
s, _, p = stats.anderson_ksamp([x1, x1 + 11.5], midrank=False)
|
581
|
+
assert_almost_equal(s, 4.5042, 4)
|
582
|
+
assert_allclose(p, 0.00545, atol=0.0005, rtol=0)
|
583
|
+
|
584
|
+
s, _, p = stats.anderson_ksamp([x1, x1 + 13.5], midrank=False)
|
585
|
+
assert_almost_equal(s, 6.2982, 4)
|
586
|
+
assert_allclose(p, 0.00118, atol=0.0001, rtol=0)
|
587
|
+
|
588
|
+
def test_not_enough_samples(self):
|
589
|
+
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
|
590
|
+
|
591
|
+
def test_no_distinct_observations(self):
|
592
|
+
assert_raises(ValueError, stats.anderson_ksamp,
|
593
|
+
(np.ones(5), np.ones(5)))
|
594
|
+
|
595
|
+
def test_empty_sample(self):
|
596
|
+
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
|
597
|
+
|
598
|
+
def test_result_attributes(self):
|
599
|
+
# Pass a mixture of lists and arrays
|
600
|
+
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
|
601
|
+
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
|
602
|
+
res = stats.anderson_ksamp((t1, t2), midrank=False)
|
603
|
+
|
604
|
+
attributes = ('statistic', 'critical_values', 'significance_level')
|
605
|
+
check_named_results(res, attributes)
|
606
|
+
|
607
|
+
assert_equal(res.significance_level, res.pvalue)
|
608
|
+
|
609
|
+
|
610
|
+
class TestAnsari:
|
611
|
+
|
612
|
+
def test_small(self):
|
613
|
+
x = [1, 2, 3, 3, 4]
|
614
|
+
y = [3, 2, 6, 1, 6, 1, 4, 1]
|
615
|
+
with suppress_warnings() as sup:
|
616
|
+
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
|
617
|
+
W, pval = stats.ansari(x, y)
|
618
|
+
assert_almost_equal(W, 23.5, 11)
|
619
|
+
assert_almost_equal(pval, 0.13499256881897437, 11)
|
620
|
+
|
621
|
+
def test_approx(self):
|
622
|
+
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
|
623
|
+
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
|
624
|
+
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
|
625
|
+
100, 96, 108, 103, 104, 114, 114, 113, 108,
|
626
|
+
106, 99))
|
627
|
+
|
628
|
+
with suppress_warnings() as sup:
|
629
|
+
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
|
630
|
+
W, pval = stats.ansari(ramsay, parekh)
|
631
|
+
|
632
|
+
assert_almost_equal(W, 185.5, 11)
|
633
|
+
assert_almost_equal(pval, 0.18145819972867083, 11)
|
634
|
+
|
635
|
+
def test_exact(self):
|
636
|
+
W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12])
|
637
|
+
assert_almost_equal(W, 10.0, 11)
|
638
|
+
assert_almost_equal(pval, 0.533333333333333333, 7)
|
639
|
+
|
640
|
+
@pytest.mark.parametrize('args', [([], [1]), ([1], [])])
|
641
|
+
def test_bad_arg(self, args):
|
642
|
+
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
|
643
|
+
res = stats.ansari(*args)
|
644
|
+
assert_equal(res.statistic, np.nan)
|
645
|
+
assert_equal(res.pvalue, np.nan)
|
646
|
+
|
647
|
+
def test_result_attributes(self):
|
648
|
+
x = [1, 2, 3, 3, 4]
|
649
|
+
y = [3, 2, 6, 1, 6, 1, 4, 1]
|
650
|
+
with suppress_warnings() as sup:
|
651
|
+
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
|
652
|
+
res = stats.ansari(x, y)
|
653
|
+
attributes = ('statistic', 'pvalue')
|
654
|
+
check_named_results(res, attributes)
|
655
|
+
|
656
|
+
def test_bad_alternative(self):
|
657
|
+
# invalid value for alternative must raise a ValueError
|
658
|
+
x1 = [1, 2, 3, 4]
|
659
|
+
x2 = [5, 6, 7, 8]
|
660
|
+
match = "'alternative' must be 'two-sided'"
|
661
|
+
with assert_raises(ValueError, match=match):
|
662
|
+
stats.ansari(x1, x2, alternative='foo')
|
663
|
+
|
664
|
+
def test_alternative_exact(self):
|
665
|
+
x1 = [-5, 1, 5, 10, 15, 20, 25] # high scale, loc=10
|
666
|
+
x2 = [7.5, 8.5, 9.5, 10.5, 11.5, 12.5] # low scale, loc=10
|
667
|
+
# ratio of scales is greater than 1. So, the
|
668
|
+
# p-value must be high when `alternative='less'`
|
669
|
+
# and low when `alternative='greater'`.
|
670
|
+
statistic, pval = stats.ansari(x1, x2)
|
671
|
+
pval_l = stats.ansari(x1, x2, alternative='less').pvalue
|
672
|
+
pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
|
673
|
+
assert pval_l > 0.95
|
674
|
+
assert pval_g < 0.05 # level of significance.
|
675
|
+
# also check if the p-values sum up to 1 plus the probability
|
676
|
+
# mass under the calculated statistic.
|
677
|
+
prob = _abw_state.a.pmf(statistic, len(x1), len(x2))
|
678
|
+
assert_allclose(pval_g + pval_l, 1 + prob, atol=1e-12)
|
679
|
+
# also check if one of the one-sided p-value equals half the
|
680
|
+
# two-sided p-value and the other one-sided p-value is its
|
681
|
+
# compliment.
|
682
|
+
assert_allclose(pval_g, pval/2, atol=1e-12)
|
683
|
+
assert_allclose(pval_l, 1+prob-pval/2, atol=1e-12)
|
684
|
+
# sanity check. The result should flip if
|
685
|
+
# we exchange x and y.
|
686
|
+
pval_l_reverse = stats.ansari(x2, x1, alternative='less').pvalue
|
687
|
+
pval_g_reverse = stats.ansari(x2, x1, alternative='greater').pvalue
|
688
|
+
assert pval_l_reverse < 0.05
|
689
|
+
assert pval_g_reverse > 0.95
|
690
|
+
|
691
|
+
@pytest.mark.parametrize(
|
692
|
+
'x, y, alternative, expected',
|
693
|
+
# the tests are designed in such a way that the
|
694
|
+
# if else statement in ansari test for exact
|
695
|
+
# mode is covered.
|
696
|
+
[([1, 2, 3, 4], [5, 6, 7, 8], 'less', 0.6285714285714),
|
697
|
+
([1, 2, 3, 4], [5, 6, 7, 8], 'greater', 0.6285714285714),
|
698
|
+
([1, 2, 3], [4, 5, 6, 7, 8], 'less', 0.8928571428571),
|
699
|
+
([1, 2, 3], [4, 5, 6, 7, 8], 'greater', 0.2857142857143),
|
700
|
+
([1, 2, 3, 4, 5], [6, 7, 8], 'less', 0.2857142857143),
|
701
|
+
([1, 2, 3, 4, 5], [6, 7, 8], 'greater', 0.8928571428571)]
|
702
|
+
)
|
703
|
+
def test_alternative_exact_with_R(self, x, y, alternative, expected):
|
704
|
+
# testing with R on arbitrary data
|
705
|
+
# Sample R code used for the third test case above:
|
706
|
+
# ```R
|
707
|
+
# > options(digits=16)
|
708
|
+
# > x <- c(1,2,3)
|
709
|
+
# > y <- c(4,5,6,7,8)
|
710
|
+
# > ansari.test(x, y, alternative='less', exact=TRUE)
|
711
|
+
#
|
712
|
+
# Ansari-Bradley test
|
713
|
+
#
|
714
|
+
# data: x and y
|
715
|
+
# AB = 6, p-value = 0.8928571428571
|
716
|
+
# alternative hypothesis: true ratio of scales is less than 1
|
717
|
+
#
|
718
|
+
# ```
|
719
|
+
pval = stats.ansari(x, y, alternative=alternative).pvalue
|
720
|
+
assert_allclose(pval, expected, atol=1e-12)
|
721
|
+
|
722
|
+
def test_alternative_approx(self):
|
723
|
+
# intuitive tests for approximation
|
724
|
+
x1 = stats.norm.rvs(0, 5, size=100, random_state=123)
|
725
|
+
x2 = stats.norm.rvs(0, 2, size=100, random_state=123)
|
726
|
+
# for m > 55 or n > 55, the test should automatically
|
727
|
+
# switch to approximation.
|
728
|
+
pval_l = stats.ansari(x1, x2, alternative='less').pvalue
|
729
|
+
pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
|
730
|
+
assert_allclose(pval_l, 1.0, atol=1e-12)
|
731
|
+
assert_allclose(pval_g, 0.0, atol=1e-12)
|
732
|
+
# also check if one of the one-sided p-value equals half the
|
733
|
+
# two-sided p-value and the other one-sided p-value is its
|
734
|
+
# compliment.
|
735
|
+
x1 = stats.norm.rvs(0, 2, size=60, random_state=123)
|
736
|
+
x2 = stats.norm.rvs(0, 1.5, size=60, random_state=123)
|
737
|
+
pval = stats.ansari(x1, x2).pvalue
|
738
|
+
pval_l = stats.ansari(x1, x2, alternative='less').pvalue
|
739
|
+
pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
|
740
|
+
assert_allclose(pval_g, pval/2, atol=1e-12)
|
741
|
+
assert_allclose(pval_l, 1-pval/2, atol=1e-12)
|
742
|
+
|
743
|
+
|
744
|
+
class TestBartlett:
|
745
|
+
def test_data(self, xp):
|
746
|
+
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
|
747
|
+
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
|
748
|
+
args = [xp.asarray(arg) for arg in args]
|
749
|
+
T, pval = stats.bartlett(*args)
|
750
|
+
xp_assert_close(T, xp.asarray(20.78587342806484))
|
751
|
+
xp_assert_close(pval, xp.asarray(0.0136358632781))
|
752
|
+
|
753
|
+
def test_too_few_args(self, xp):
|
754
|
+
message = "Must enter at least two input sample vectors."
|
755
|
+
with pytest.raises(ValueError, match=message):
|
756
|
+
stats.bartlett(xp.asarray([1.]))
|
757
|
+
|
758
|
+
def test_result_attributes(self, xp):
|
759
|
+
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
|
760
|
+
args = [xp.asarray(arg) for arg in args]
|
761
|
+
res = stats.bartlett(*args)
|
762
|
+
attributes = ('statistic', 'pvalue')
|
763
|
+
check_named_results(res, attributes, xp=xp)
|
764
|
+
|
765
|
+
@pytest.mark.filterwarnings("ignore:invalid value encountered in divide")
|
766
|
+
def test_empty_arg(self, xp):
|
767
|
+
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
|
768
|
+
args = [xp.asarray(arg) for arg in args]
|
769
|
+
if is_numpy(xp):
|
770
|
+
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
|
771
|
+
res = stats.bartlett(*args)
|
772
|
+
else:
|
773
|
+
with np.testing.suppress_warnings() as sup:
|
774
|
+
# torch/array_api_strict
|
775
|
+
sup.filter(RuntimeWarning, "invalid value encountered")
|
776
|
+
sup.filter(UserWarning, r"var\(\): degrees of freedom is <= 0.")
|
777
|
+
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
|
778
|
+
res = stats.bartlett(*args)
|
779
|
+
NaN = xp.asarray(xp.nan)
|
780
|
+
xp_assert_equal(res.statistic, NaN)
|
781
|
+
xp_assert_equal(res.pvalue, NaN)
|
782
|
+
|
783
|
+
def test_negative_pvalue_gh21152(self, xp):
|
784
|
+
a = xp.asarray([10.1, 10.2, 10.3, 10.4], dtype=xp.float32)
|
785
|
+
b = xp.asarray([10.15, 10.25, 10.35, 10.45], dtype=xp.float32)
|
786
|
+
c = xp.asarray([10.05, 10.15, 10.25, 10.35], dtype=xp.float32)
|
787
|
+
res = stats.bartlett(a, b, c)
|
788
|
+
assert xp.all(res.statistic >= 0)
|
789
|
+
|
790
|
+
|
791
|
+
class TestLevene:
|
792
|
+
|
793
|
+
def test_data(self):
|
794
|
+
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
|
795
|
+
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
|
796
|
+
W, pval = stats.levene(*args)
|
797
|
+
assert_almost_equal(W, 1.7059176930008939, 7)
|
798
|
+
assert_almost_equal(pval, 0.0990829755522, 7)
|
799
|
+
|
800
|
+
def test_trimmed1(self):
|
801
|
+
# Test that center='trimmed' gives the same result as center='mean'
|
802
|
+
# when proportiontocut=0.
|
803
|
+
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
|
804
|
+
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed',
|
805
|
+
proportiontocut=0.0)
|
806
|
+
assert_almost_equal(W1, W2)
|
807
|
+
assert_almost_equal(pval1, pval2)
|
808
|
+
|
809
|
+
def test_trimmed2(self):
|
810
|
+
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
|
811
|
+
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
|
812
|
+
np.random.seed(1234)
|
813
|
+
x2 = np.random.permutation(x)
|
814
|
+
|
815
|
+
# Use center='trimmed'
|
816
|
+
W0, pval0 = stats.levene(x, y, center='trimmed',
|
817
|
+
proportiontocut=0.125)
|
818
|
+
W1, pval1 = stats.levene(x2, y, center='trimmed',
|
819
|
+
proportiontocut=0.125)
|
820
|
+
# Trim the data here, and use center='mean'
|
821
|
+
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
|
822
|
+
# Result should be the same.
|
823
|
+
assert_almost_equal(W0, W2)
|
824
|
+
assert_almost_equal(W1, W2)
|
825
|
+
assert_almost_equal(pval1, pval2)
|
826
|
+
|
827
|
+
def test_equal_mean_median(self):
|
828
|
+
x = np.linspace(-1, 1, 21)
|
829
|
+
np.random.seed(1234)
|
830
|
+
x2 = np.random.permutation(x)
|
831
|
+
y = x**3
|
832
|
+
W1, pval1 = stats.levene(x, y, center='mean')
|
833
|
+
W2, pval2 = stats.levene(x2, y, center='median')
|
834
|
+
assert_almost_equal(W1, W2)
|
835
|
+
assert_almost_equal(pval1, pval2)
|
836
|
+
|
837
|
+
def test_bad_keyword(self):
|
838
|
+
x = np.linspace(-1, 1, 21)
|
839
|
+
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
|
840
|
+
|
841
|
+
def test_bad_center_value(self):
|
842
|
+
x = np.linspace(-1, 1, 21)
|
843
|
+
assert_raises(ValueError, stats.levene, x, x, center='trim')
|
844
|
+
|
845
|
+
def test_too_few_args(self):
|
846
|
+
assert_raises(ValueError, stats.levene, [1])
|
847
|
+
|
848
|
+
def test_result_attributes(self):
|
849
|
+
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
|
850
|
+
res = stats.levene(*args)
|
851
|
+
attributes = ('statistic', 'pvalue')
|
852
|
+
check_named_results(res, attributes)
|
853
|
+
|
854
|
+
# temporary fix for issue #9252: only accept 1d input
|
855
|
+
def test_1d_input(self):
|
856
|
+
x = np.array([[1, 2], [3, 4]])
|
857
|
+
assert_raises(ValueError, stats.levene, g1, x)
|
858
|
+
|
859
|
+
|
860
|
+
class TestBinomTest:
|
861
|
+
"""Tests for stats.binomtest."""
|
862
|
+
|
863
|
+
# Expected results here are from R binom.test, e.g.
|
864
|
+
# options(digits=16)
|
865
|
+
# binom.test(484, 967, p=0.48)
|
866
|
+
#
|
867
|
+
def test_two_sided_pvalues1(self):
|
868
|
+
# `tol` could be stricter on most architectures, but the value
|
869
|
+
# here is limited by accuracy of `binom.cdf` for large inputs on
|
870
|
+
# Linux_Python_37_32bit_full and aarch64
|
871
|
+
rtol = 1e-10 # aarch64 observed rtol: 1.5e-11
|
872
|
+
res = stats.binomtest(10079999, 21000000, 0.48)
|
873
|
+
assert_allclose(res.pvalue, 1.0, rtol=rtol)
|
874
|
+
res = stats.binomtest(10079990, 21000000, 0.48)
|
875
|
+
assert_allclose(res.pvalue, 0.9966892187965, rtol=rtol)
|
876
|
+
res = stats.binomtest(10080009, 21000000, 0.48)
|
877
|
+
assert_allclose(res.pvalue, 0.9970377203856, rtol=rtol)
|
878
|
+
res = stats.binomtest(10080017, 21000000, 0.48)
|
879
|
+
assert_allclose(res.pvalue, 0.9940754817328, rtol=1e-9)
|
880
|
+
|
881
|
+
def test_two_sided_pvalues2(self):
|
882
|
+
rtol = 1e-10 # no aarch64 failure with 1e-15, preemptive bump
|
883
|
+
res = stats.binomtest(9, n=21, p=0.48)
|
884
|
+
assert_allclose(res.pvalue, 0.6689672431939, rtol=rtol)
|
885
|
+
res = stats.binomtest(4, 21, 0.48)
|
886
|
+
assert_allclose(res.pvalue, 0.008139563452106, rtol=rtol)
|
887
|
+
res = stats.binomtest(11, 21, 0.48)
|
888
|
+
assert_allclose(res.pvalue, 0.8278629664608, rtol=rtol)
|
889
|
+
res = stats.binomtest(7, 21, 0.48)
|
890
|
+
assert_allclose(res.pvalue, 0.1966772901718, rtol=rtol)
|
891
|
+
res = stats.binomtest(3, 10, .5)
|
892
|
+
assert_allclose(res.pvalue, 0.34375, rtol=rtol)
|
893
|
+
res = stats.binomtest(2, 2, .4)
|
894
|
+
assert_allclose(res.pvalue, 0.16, rtol=rtol)
|
895
|
+
res = stats.binomtest(2, 4, .3)
|
896
|
+
assert_allclose(res.pvalue, 0.5884, rtol=rtol)
|
897
|
+
|
898
|
+
def test_edge_cases(self):
|
899
|
+
rtol = 1e-10 # aarch64 observed rtol: 1.33e-15
|
900
|
+
res = stats.binomtest(484, 967, 0.5)
|
901
|
+
assert_allclose(res.pvalue, 1, rtol=rtol)
|
902
|
+
res = stats.binomtest(3, 47, 3/47)
|
903
|
+
assert_allclose(res.pvalue, 1, rtol=rtol)
|
904
|
+
res = stats.binomtest(13, 46, 13/46)
|
905
|
+
assert_allclose(res.pvalue, 1, rtol=rtol)
|
906
|
+
res = stats.binomtest(15, 44, 15/44)
|
907
|
+
assert_allclose(res.pvalue, 1, rtol=rtol)
|
908
|
+
res = stats.binomtest(7, 13, 0.5)
|
909
|
+
assert_allclose(res.pvalue, 1, rtol=rtol)
|
910
|
+
res = stats.binomtest(6, 11, 0.5)
|
911
|
+
assert_allclose(res.pvalue, 1, rtol=rtol)
|
912
|
+
|
913
|
+
def test_binary_srch_for_binom_tst(self):
|
914
|
+
# Test that old behavior of binomtest is maintained
|
915
|
+
# by the new binary search method in cases where d
|
916
|
+
# exactly equals the input on one side.
|
917
|
+
n = 10
|
918
|
+
p = 0.5
|
919
|
+
k = 3
|
920
|
+
# First test for the case where k > mode of PMF
|
921
|
+
i = np.arange(np.ceil(p * n), n+1)
|
922
|
+
d = stats.binom.pmf(k, n, p)
|
923
|
+
# Old way of calculating y, probably consistent with R.
|
924
|
+
y1 = np.sum(stats.binom.pmf(i, n, p) <= d, axis=0)
|
925
|
+
# New way with binary search.
|
926
|
+
ix = _binary_search_for_binom_tst(lambda x1:
|
927
|
+
-stats.binom.pmf(x1, n, p),
|
928
|
+
-d, np.ceil(p * n), n)
|
929
|
+
y2 = n - ix + int(d == stats.binom.pmf(ix, n, p))
|
930
|
+
assert_allclose(y1, y2, rtol=1e-9)
|
931
|
+
# Now test for the other side.
|
932
|
+
k = 7
|
933
|
+
i = np.arange(np.floor(p * n) + 1)
|
934
|
+
d = stats.binom.pmf(k, n, p)
|
935
|
+
# Old way of calculating y.
|
936
|
+
y1 = np.sum(stats.binom.pmf(i, n, p) <= d, axis=0)
|
937
|
+
# New way with binary search.
|
938
|
+
ix = _binary_search_for_binom_tst(lambda x1:
|
939
|
+
stats.binom.pmf(x1, n, p),
|
940
|
+
d, 0, np.floor(p * n))
|
941
|
+
y2 = ix + 1
|
942
|
+
assert_allclose(y1, y2, rtol=1e-9)
|
943
|
+
|
944
|
+
# Expected results here are from R 3.6.2 binom.test
|
945
|
+
@pytest.mark.parametrize('alternative, pval, ci_low, ci_high',
|
946
|
+
[('less', 0.148831050443,
|
947
|
+
0.0, 0.2772002496709138),
|
948
|
+
('greater', 0.9004695898947,
|
949
|
+
0.1366613252458672, 1.0),
|
950
|
+
('two-sided', 0.2983720970096,
|
951
|
+
0.1266555521019559, 0.2918426890886281)])
|
952
|
+
def test_confidence_intervals1(self, alternative, pval, ci_low, ci_high):
|
953
|
+
res = stats.binomtest(20, n=100, p=0.25, alternative=alternative)
|
954
|
+
assert_allclose(res.pvalue, pval, rtol=1e-12)
|
955
|
+
assert_equal(res.statistic, 0.2)
|
956
|
+
ci = res.proportion_ci(confidence_level=0.95)
|
957
|
+
assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-12)
|
958
|
+
|
959
|
+
# Expected results here are from R 3.6.2 binom.test.
|
960
|
+
@pytest.mark.parametrize('alternative, pval, ci_low, ci_high',
|
961
|
+
[('less',
|
962
|
+
0.005656361, 0.0, 0.1872093),
|
963
|
+
('greater',
|
964
|
+
0.9987146, 0.008860761, 1.0),
|
965
|
+
('two-sided',
|
966
|
+
0.01191714, 0.006872485, 0.202706269)])
|
967
|
+
def test_confidence_intervals2(self, alternative, pval, ci_low, ci_high):
|
968
|
+
res = stats.binomtest(3, n=50, p=0.2, alternative=alternative)
|
969
|
+
assert_allclose(res.pvalue, pval, rtol=1e-6)
|
970
|
+
assert_equal(res.statistic, 0.06)
|
971
|
+
ci = res.proportion_ci(confidence_level=0.99)
|
972
|
+
assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6)
|
973
|
+
|
974
|
+
# Expected results here are from R 3.6.2 binom.test.
|
975
|
+
@pytest.mark.parametrize('alternative, pval, ci_high',
|
976
|
+
[('less', 0.05631351, 0.2588656),
|
977
|
+
('greater', 1.0, 1.0),
|
978
|
+
('two-sided', 0.07604122, 0.3084971)])
|
979
|
+
def test_confidence_interval_exact_k0(self, alternative, pval, ci_high):
|
980
|
+
# Test with k=0, n = 10.
|
981
|
+
res = stats.binomtest(0, 10, p=0.25, alternative=alternative)
|
982
|
+
assert_allclose(res.pvalue, pval, rtol=1e-6)
|
983
|
+
ci = res.proportion_ci(confidence_level=0.95)
|
984
|
+
assert_equal(ci.low, 0.0)
|
985
|
+
assert_allclose(ci.high, ci_high, rtol=1e-6)
|
986
|
+
|
987
|
+
# Expected results here are from R 3.6.2 binom.test.
|
988
|
+
@pytest.mark.parametrize('alternative, pval, ci_low',
|
989
|
+
[('less', 1.0, 0.0),
|
990
|
+
('greater', 9.536743e-07, 0.7411344),
|
991
|
+
('two-sided', 9.536743e-07, 0.6915029)])
|
992
|
+
def test_confidence_interval_exact_k_is_n(self, alternative, pval, ci_low):
|
993
|
+
# Test with k = n = 10.
|
994
|
+
res = stats.binomtest(10, 10, p=0.25, alternative=alternative)
|
995
|
+
assert_allclose(res.pvalue, pval, rtol=1e-6)
|
996
|
+
ci = res.proportion_ci(confidence_level=0.95)
|
997
|
+
assert_equal(ci.high, 1.0)
|
998
|
+
assert_allclose(ci.low, ci_low, rtol=1e-6)
|
999
|
+
|
1000
|
+
# Expected results are from the prop.test function in R 3.6.2.
|
1001
|
+
@pytest.mark.parametrize(
|
1002
|
+
'k, alternative, corr, conf, ci_low, ci_high',
|
1003
|
+
[[3, 'two-sided', True, 0.95, 0.08094782, 0.64632928],
|
1004
|
+
[3, 'two-sided', True, 0.99, 0.0586329, 0.7169416],
|
1005
|
+
[3, 'two-sided', False, 0.95, 0.1077913, 0.6032219],
|
1006
|
+
[3, 'two-sided', False, 0.99, 0.07956632, 0.6799753],
|
1007
|
+
[3, 'less', True, 0.95, 0.0, 0.6043476],
|
1008
|
+
[3, 'less', True, 0.99, 0.0, 0.6901811],
|
1009
|
+
[3, 'less', False, 0.95, 0.0, 0.5583002],
|
1010
|
+
[3, 'less', False, 0.99, 0.0, 0.6507187],
|
1011
|
+
[3, 'greater', True, 0.95, 0.09644904, 1.0],
|
1012
|
+
[3, 'greater', True, 0.99, 0.06659141, 1.0],
|
1013
|
+
[3, 'greater', False, 0.95, 0.1268766, 1.0],
|
1014
|
+
[3, 'greater', False, 0.99, 0.08974147, 1.0],
|
1015
|
+
|
1016
|
+
[0, 'two-sided', True, 0.95, 0.0, 0.3445372],
|
1017
|
+
[0, 'two-sided', False, 0.95, 0.0, 0.2775328],
|
1018
|
+
[0, 'less', True, 0.95, 0.0, 0.2847374],
|
1019
|
+
[0, 'less', False, 0.95, 0.0, 0.212942],
|
1020
|
+
[0, 'greater', True, 0.95, 0.0, 1.0],
|
1021
|
+
[0, 'greater', False, 0.95, 0.0, 1.0],
|
1022
|
+
|
1023
|
+
[10, 'two-sided', True, 0.95, 0.6554628, 1.0],
|
1024
|
+
[10, 'two-sided', False, 0.95, 0.7224672, 1.0],
|
1025
|
+
[10, 'less', True, 0.95, 0.0, 1.0],
|
1026
|
+
[10, 'less', False, 0.95, 0.0, 1.0],
|
1027
|
+
[10, 'greater', True, 0.95, 0.7152626, 1.0],
|
1028
|
+
[10, 'greater', False, 0.95, 0.787058, 1.0]]
|
1029
|
+
)
|
1030
|
+
def test_ci_wilson_method(self, k, alternative, corr, conf,
|
1031
|
+
ci_low, ci_high):
|
1032
|
+
res = stats.binomtest(k, n=10, p=0.1, alternative=alternative)
|
1033
|
+
if corr:
|
1034
|
+
method = 'wilsoncc'
|
1035
|
+
else:
|
1036
|
+
method = 'wilson'
|
1037
|
+
ci = res.proportion_ci(confidence_level=conf, method=method)
|
1038
|
+
assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6)
|
1039
|
+
|
1040
|
+
def test_estimate_equals_hypothesized_prop(self):
|
1041
|
+
# Test the special case where the estimated proportion equals
|
1042
|
+
# the hypothesized proportion. When alternative is 'two-sided',
|
1043
|
+
# the p-value is 1.
|
1044
|
+
res = stats.binomtest(4, 16, 0.25)
|
1045
|
+
assert_equal(res.statistic, 0.25)
|
1046
|
+
assert_equal(res.pvalue, 1.0)
|
1047
|
+
|
1048
|
+
@pytest.mark.parametrize('k, n', [(0, 0), (-1, 2)])
|
1049
|
+
def test_invalid_k_n(self, k, n):
|
1050
|
+
with pytest.raises(ValueError,
|
1051
|
+
match="must be an integer not less than"):
|
1052
|
+
stats.binomtest(k, n)
|
1053
|
+
|
1054
|
+
def test_invalid_k_too_big(self):
|
1055
|
+
with pytest.raises(ValueError,
|
1056
|
+
match=r"k \(11\) must not be greater than n \(10\)."):
|
1057
|
+
stats.binomtest(11, 10, 0.25)
|
1058
|
+
|
1059
|
+
def test_invalid_k_wrong_type(self):
|
1060
|
+
with pytest.raises(TypeError,
|
1061
|
+
match="k must be an integer."):
|
1062
|
+
stats.binomtest([10, 11], 21, 0.25)
|
1063
|
+
|
1064
|
+
def test_invalid_p_range(self):
|
1065
|
+
message = r'p \(-0.5\) must be in range...'
|
1066
|
+
with pytest.raises(ValueError, match=message):
|
1067
|
+
stats.binomtest(50, 150, p=-0.5)
|
1068
|
+
message = r'p \(1.5\) must be in range...'
|
1069
|
+
with pytest.raises(ValueError, match=message):
|
1070
|
+
stats.binomtest(50, 150, p=1.5)
|
1071
|
+
|
1072
|
+
def test_invalid_confidence_level(self):
|
1073
|
+
res = stats.binomtest(3, n=10, p=0.1)
|
1074
|
+
message = r"confidence_level \(-1\) must be in the interval"
|
1075
|
+
with pytest.raises(ValueError, match=message):
|
1076
|
+
res.proportion_ci(confidence_level=-1)
|
1077
|
+
|
1078
|
+
def test_invalid_ci_method(self):
|
1079
|
+
res = stats.binomtest(3, n=10, p=0.1)
|
1080
|
+
with pytest.raises(ValueError, match=r"method \('plate of shrimp'\) must be"):
|
1081
|
+
res.proportion_ci(method="plate of shrimp")
|
1082
|
+
|
1083
|
+
def test_invalid_alternative(self):
|
1084
|
+
with pytest.raises(ValueError, match=r"alternative \('ekki'\) not..."):
|
1085
|
+
stats.binomtest(3, n=10, p=0.1, alternative='ekki')
|
1086
|
+
|
1087
|
+
def test_alias(self):
|
1088
|
+
res = stats.binomtest(3, n=10, p=0.1)
|
1089
|
+
assert_equal(res.proportion_estimate, res.statistic)
|
1090
|
+
|
1091
|
+
@pytest.mark.skipif(sys.maxsize <= 2**32, reason="32-bit does not overflow")
|
1092
|
+
def test_boost_overflow_raises(self):
|
1093
|
+
# Boost.Math error policy should raise exceptions in Python
|
1094
|
+
with pytest.raises(OverflowError, match='Error in function...'):
|
1095
|
+
stats.binomtest(5, 6, p=sys.float_info.min)
|
1096
|
+
|
1097
|
+
|
1098
|
+
class TestFligner:
|
1099
|
+
|
1100
|
+
def test_data(self):
|
1101
|
+
# numbers from R: fligner.test in package stats
|
1102
|
+
x1 = np.arange(5)
|
1103
|
+
assert_array_almost_equal(stats.fligner(x1, x1**2),
|
1104
|
+
(3.2282229927203536, 0.072379187848207877),
|
1105
|
+
11)
|
1106
|
+
|
1107
|
+
def test_trimmed1(self):
|
1108
|
+
# Perturb input to break ties in the transformed data
|
1109
|
+
# See https://github.com/scipy/scipy/pull/8042 for more details
|
1110
|
+
rs = np.random.RandomState(123)
|
1111
|
+
|
1112
|
+
def _perturb(g):
|
1113
|
+
return (np.asarray(g) + 1e-10 * rs.randn(len(g))).tolist()
|
1114
|
+
|
1115
|
+
g1_ = _perturb(g1)
|
1116
|
+
g2_ = _perturb(g2)
|
1117
|
+
g3_ = _perturb(g3)
|
1118
|
+
# Test that center='trimmed' gives the same result as center='mean'
|
1119
|
+
# when proportiontocut=0.
|
1120
|
+
Xsq1, pval1 = stats.fligner(g1_, g2_, g3_, center='mean')
|
1121
|
+
Xsq2, pval2 = stats.fligner(g1_, g2_, g3_, center='trimmed',
|
1122
|
+
proportiontocut=0.0)
|
1123
|
+
assert_almost_equal(Xsq1, Xsq2)
|
1124
|
+
assert_almost_equal(pval1, pval2)
|
1125
|
+
|
1126
|
+
def test_trimmed2(self):
|
1127
|
+
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
|
1128
|
+
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
|
1129
|
+
# Use center='trimmed'
|
1130
|
+
Xsq1, pval1 = stats.fligner(x, y, center='trimmed',
|
1131
|
+
proportiontocut=0.125)
|
1132
|
+
# Trim the data here, and use center='mean'
|
1133
|
+
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
|
1134
|
+
# Result should be the same.
|
1135
|
+
assert_almost_equal(Xsq1, Xsq2)
|
1136
|
+
assert_almost_equal(pval1, pval2)
|
1137
|
+
|
1138
|
+
# The following test looks reasonable at first, but fligner() uses the
|
1139
|
+
# function stats.rankdata(), and in one of the cases in this test,
|
1140
|
+
# there are ties, while in the other (because of normal rounding
|
1141
|
+
# errors) there are not. This difference leads to differences in the
|
1142
|
+
# third significant digit of W.
|
1143
|
+
#
|
1144
|
+
#def test_equal_mean_median(self):
|
1145
|
+
# x = np.linspace(-1,1,21)
|
1146
|
+
# y = x**3
|
1147
|
+
# W1, pval1 = stats.fligner(x, y, center='mean')
|
1148
|
+
# W2, pval2 = stats.fligner(x, y, center='median')
|
1149
|
+
# assert_almost_equal(W1, W2)
|
1150
|
+
# assert_almost_equal(pval1, pval2)
|
1151
|
+
|
1152
|
+
def test_bad_keyword(self):
|
1153
|
+
x = np.linspace(-1, 1, 21)
|
1154
|
+
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
|
1155
|
+
|
1156
|
+
def test_bad_center_value(self):
|
1157
|
+
x = np.linspace(-1, 1, 21)
|
1158
|
+
assert_raises(ValueError, stats.fligner, x, x, center='trim')
|
1159
|
+
|
1160
|
+
def test_bad_num_args(self):
|
1161
|
+
# Too few args raises ValueError.
|
1162
|
+
assert_raises(ValueError, stats.fligner, [1])
|
1163
|
+
|
1164
|
+
def test_empty_arg(self):
|
1165
|
+
x = np.arange(5)
|
1166
|
+
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
|
1167
|
+
res = stats.fligner(x, x**2, [])
|
1168
|
+
assert_equal(res.statistic, np.nan)
|
1169
|
+
assert_equal(res.pvalue, np.nan)
|
1170
|
+
|
1171
|
+
|
1172
|
+
def mood_cases_with_ties():
|
1173
|
+
# Generate random `x` and `y` arrays with ties both between and within the
|
1174
|
+
# samples. Expected results are (statistic, pvalue) from SAS.
|
1175
|
+
expected_results = [(-1.76658511464992, .0386488678399305),
|
1176
|
+
(-.694031428192304, .2438312498647250),
|
1177
|
+
(-1.15093525352151, .1248794365836150)]
|
1178
|
+
seeds = [23453254, 1298352315, 987234597]
|
1179
|
+
for si, seed in enumerate(seeds):
|
1180
|
+
rng = np.random.default_rng(seed)
|
1181
|
+
xy = rng.random(100)
|
1182
|
+
# Generate random indices to make ties
|
1183
|
+
tie_ind = rng.integers(low=0, high=99, size=5)
|
1184
|
+
# Generate a random number of ties for each index.
|
1185
|
+
num_ties_per_ind = rng.integers(low=1, high=5, size=5)
|
1186
|
+
# At each `tie_ind`, mark the next `n` indices equal to that value.
|
1187
|
+
for i, n in zip(tie_ind, num_ties_per_ind):
|
1188
|
+
for j in range(i + 1, i + n):
|
1189
|
+
xy[j] = xy[i]
|
1190
|
+
# scramble order of xy before splitting into `x, y`
|
1191
|
+
rng.shuffle(xy)
|
1192
|
+
x, y = np.split(xy, 2)
|
1193
|
+
yield x, y, 'less', *expected_results[si]
|
1194
|
+
|
1195
|
+
|
1196
|
+
class TestMood:
|
1197
|
+
@pytest.mark.parametrize("x,y,alternative,stat_expect,p_expect",
|
1198
|
+
mood_cases_with_ties())
|
1199
|
+
def test_against_SAS(self, x, y, alternative, stat_expect, p_expect):
|
1200
|
+
"""
|
1201
|
+
Example code used to generate SAS output:
|
1202
|
+
DATA myData;
|
1203
|
+
INPUT X Y;
|
1204
|
+
CARDS;
|
1205
|
+
1 0
|
1206
|
+
1 1
|
1207
|
+
1 2
|
1208
|
+
1 3
|
1209
|
+
1 4
|
1210
|
+
2 0
|
1211
|
+
2 1
|
1212
|
+
2 4
|
1213
|
+
2 9
|
1214
|
+
2 16
|
1215
|
+
ods graphics on;
|
1216
|
+
proc npar1way mood data=myData ;
|
1217
|
+
class X;
|
1218
|
+
ods output MoodTest=mt;
|
1219
|
+
proc contents data=mt;
|
1220
|
+
proc print data=mt;
|
1221
|
+
format Prob1 17.16 Prob2 17.16 Statistic 17.16 Z 17.16 ;
|
1222
|
+
title "Mood Two-Sample Test";
|
1223
|
+
proc print data=myData;
|
1224
|
+
title "Data for above results";
|
1225
|
+
run;
|
1226
|
+
"""
|
1227
|
+
statistic, pvalue = stats.mood(x, y, alternative=alternative)
|
1228
|
+
assert_allclose(stat_expect, statistic, atol=1e-16)
|
1229
|
+
assert_allclose(p_expect, pvalue, atol=1e-16)
|
1230
|
+
|
1231
|
+
@pytest.mark.parametrize("alternative, expected",
|
1232
|
+
[('two-sided', (1.019938533549930,
|
1233
|
+
.3077576129778760)),
|
1234
|
+
('less', (1.019938533549930,
|
1235
|
+
1 - .1538788064889380)),
|
1236
|
+
('greater', (1.019938533549930,
|
1237
|
+
.1538788064889380))])
|
1238
|
+
def test_against_SAS_2(self, alternative, expected):
|
1239
|
+
# Code to run in SAS in above function
|
1240
|
+
x = [111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
|
1241
|
+
101, 96, 97, 102, 107, 113, 116, 113, 110, 98]
|
1242
|
+
y = [107, 108, 106, 98, 105, 103, 110, 105, 104, 100,
|
1243
|
+
96, 108, 103, 104, 114, 114, 113, 108, 106, 99]
|
1244
|
+
res = stats.mood(x, y, alternative=alternative)
|
1245
|
+
assert_allclose(res, expected)
|
1246
|
+
|
1247
|
+
def test_mood_order_of_args(self):
|
1248
|
+
# z should change sign when the order of arguments changes, pvalue
|
1249
|
+
# should not change
|
1250
|
+
np.random.seed(1234)
|
1251
|
+
x1 = np.random.randn(10, 1)
|
1252
|
+
x2 = np.random.randn(15, 1)
|
1253
|
+
z1, p1 = stats.mood(x1, x2)
|
1254
|
+
z2, p2 = stats.mood(x2, x1)
|
1255
|
+
assert_array_almost_equal([z1, p1], [-z2, p2])
|
1256
|
+
|
1257
|
+
def test_mood_with_axis_none(self):
|
1258
|
+
# Test with axis = None, compare with results from R
|
1259
|
+
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
|
1260
|
+
1.59528080213779, 0.329507771815361, -0.820468384118015,
|
1261
|
+
0.487429052428485, 0.738324705129217, 0.575781351653492,
|
1262
|
+
-0.305388387156356, 1.51178116845085, 0.389843236411431,
|
1263
|
+
-0.621240580541804, -2.2146998871775, 1.12493091814311,
|
1264
|
+
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
|
1265
|
+
0.821221195098089, 0.593901321217509]
|
1266
|
+
|
1267
|
+
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
|
1268
|
+
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
|
1269
|
+
0.707954729271733, -0.23969802417184, 1.98447393665293,
|
1270
|
+
-0.138787012119665, 0.417650750792556, 0.981752777463662,
|
1271
|
+
-0.392695355503813, -1.03966897694891, 1.78222896030858,
|
1272
|
+
-2.31106908460517, 0.878604580921265, 0.035806718015226,
|
1273
|
+
1.01282869212708, 0.432265154539617, 2.09081920524915,
|
1274
|
+
-1.19992581964387, 1.58963820029007, 1.95465164222325,
|
1275
|
+
0.00493777682814261, -2.45170638784613, 0.477237302613617,
|
1276
|
+
-0.596558168631403, 0.792203270299649, 0.289636710177348]
|
1277
|
+
|
1278
|
+
x1 = np.array(x1)
|
1279
|
+
x2 = np.array(x2)
|
1280
|
+
x1.shape = (10, 2)
|
1281
|
+
x2.shape = (15, 2)
|
1282
|
+
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
|
1283
|
+
[-1.31716607555, 0.18778296257])
|
1284
|
+
|
1285
|
+
def test_mood_2d(self):
|
1286
|
+
# Test if the results of mood test in 2-D case are consistent with the
|
1287
|
+
# R result for the same inputs. Numbers from R mood.test().
|
1288
|
+
ny = 5
|
1289
|
+
np.random.seed(1234)
|
1290
|
+
x1 = np.random.randn(10, ny)
|
1291
|
+
x2 = np.random.randn(15, ny)
|
1292
|
+
z_vectest, pval_vectest = stats.mood(x1, x2)
|
1293
|
+
|
1294
|
+
for j in range(ny):
|
1295
|
+
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
|
1296
|
+
stats.mood(x1[:, j], x2[:, j]))
|
1297
|
+
|
1298
|
+
# inverse order of dimensions
|
1299
|
+
x1 = x1.transpose()
|
1300
|
+
x2 = x2.transpose()
|
1301
|
+
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
|
1302
|
+
|
1303
|
+
for i in range(ny):
|
1304
|
+
# check axis handling is self consistent
|
1305
|
+
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
|
1306
|
+
stats.mood(x1[i, :], x2[i, :]))
|
1307
|
+
|
1308
|
+
def test_mood_3d(self):
|
1309
|
+
shape = (10, 5, 6)
|
1310
|
+
np.random.seed(1234)
|
1311
|
+
x1 = np.random.randn(*shape)
|
1312
|
+
x2 = np.random.randn(*shape)
|
1313
|
+
|
1314
|
+
for axis in range(3):
|
1315
|
+
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
|
1316
|
+
# Tests that result for 3-D arrays is equal to that for the
|
1317
|
+
# same calculation on a set of 1-D arrays taken from the
|
1318
|
+
# 3-D array
|
1319
|
+
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
|
1320
|
+
for i in range(shape[axes_idx[axis][0]]):
|
1321
|
+
for j in range(shape[axes_idx[axis][1]]):
|
1322
|
+
if axis == 0:
|
1323
|
+
slice1 = x1[:, i, j]
|
1324
|
+
slice2 = x2[:, i, j]
|
1325
|
+
elif axis == 1:
|
1326
|
+
slice1 = x1[i, :, j]
|
1327
|
+
slice2 = x2[i, :, j]
|
1328
|
+
else:
|
1329
|
+
slice1 = x1[i, j, :]
|
1330
|
+
slice2 = x2[i, j, :]
|
1331
|
+
|
1332
|
+
assert_array_almost_equal([z_vectest[i, j],
|
1333
|
+
pval_vectest[i, j]],
|
1334
|
+
stats.mood(slice1, slice2))
|
1335
|
+
|
1336
|
+
def test_mood_bad_arg(self):
|
1337
|
+
# Warns when the sum of the lengths of the args is less than 3
|
1338
|
+
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
|
1339
|
+
res = stats.mood([1], [])
|
1340
|
+
assert_equal(res.statistic, np.nan)
|
1341
|
+
assert_equal(res.pvalue, np.nan)
|
1342
|
+
|
1343
|
+
def test_mood_alternative(self):
|
1344
|
+
|
1345
|
+
rng = np.random.RandomState(0)
|
1346
|
+
x = stats.norm.rvs(scale=0.75, size=100, random_state=rng)
|
1347
|
+
y = stats.norm.rvs(scale=1.25, size=100, random_state=rng)
|
1348
|
+
|
1349
|
+
stat1, p1 = stats.mood(x, y, alternative='two-sided')
|
1350
|
+
stat2, p2 = stats.mood(x, y, alternative='less')
|
1351
|
+
stat3, p3 = stats.mood(x, y, alternative='greater')
|
1352
|
+
|
1353
|
+
assert stat1 == stat2 == stat3
|
1354
|
+
assert_allclose(p1, 0, atol=1e-7)
|
1355
|
+
assert_allclose(p2, p1/2)
|
1356
|
+
assert_allclose(p3, 1 - p1/2)
|
1357
|
+
|
1358
|
+
with pytest.raises(ValueError, match="`alternative` must be..."):
|
1359
|
+
stats.mood(x, y, alternative='ekki-ekki')
|
1360
|
+
|
1361
|
+
@pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
|
1362
|
+
def test_result(self, alternative):
|
1363
|
+
rng = np.random.default_rng(265827767938813079281100964083953437622)
|
1364
|
+
x1 = rng.standard_normal((10, 1))
|
1365
|
+
x2 = rng.standard_normal((15, 1))
|
1366
|
+
|
1367
|
+
res = stats.mood(x1, x2, alternative=alternative)
|
1368
|
+
assert_equal((res.statistic, res.pvalue), res)
|
1369
|
+
|
1370
|
+
|
1371
|
+
class TestProbplot:
|
1372
|
+
|
1373
|
+
def test_basic(self):
|
1374
|
+
x = stats.norm.rvs(size=20, random_state=12345)
|
1375
|
+
osm, osr = stats.probplot(x, fit=False)
|
1376
|
+
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
|
1377
|
+
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
|
1378
|
+
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
|
1379
|
+
0.31273668, 0.44506467, 0.5857176, 0.73908135,
|
1380
|
+
0.91222575, 1.11829229, 1.38768012, 1.8241636]
|
1381
|
+
assert_allclose(osr, np.sort(x))
|
1382
|
+
assert_allclose(osm, osm_expected)
|
1383
|
+
|
1384
|
+
res, res_fit = stats.probplot(x, fit=True)
|
1385
|
+
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
|
1386
|
+
assert_allclose(res_fit, res_fit_expected)
|
1387
|
+
|
1388
|
+
def test_sparams_keyword(self):
|
1389
|
+
x = stats.norm.rvs(size=100, random_state=123456)
|
1390
|
+
# Check that None, () and 0 (loc=0, for normal distribution) all work
|
1391
|
+
# and give the same results
|
1392
|
+
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
|
1393
|
+
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
|
1394
|
+
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
|
1395
|
+
assert_allclose(osm1, osm2)
|
1396
|
+
assert_allclose(osm1, osm3)
|
1397
|
+
assert_allclose(osr1, osr2)
|
1398
|
+
assert_allclose(osr1, osr3)
|
1399
|
+
# Check giving (loc, scale) params for normal distribution
|
1400
|
+
osm, osr = stats.probplot(x, sparams=(), fit=False)
|
1401
|
+
|
1402
|
+
def test_dist_keyword(self):
|
1403
|
+
x = stats.norm.rvs(size=20, random_state=12345)
|
1404
|
+
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
|
1405
|
+
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
|
1406
|
+
assert_allclose(osm1, osm2)
|
1407
|
+
assert_allclose(osr1, osr2)
|
1408
|
+
|
1409
|
+
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
|
1410
|
+
assert_raises(AttributeError, stats.probplot, x, dist=[])
|
1411
|
+
|
1412
|
+
class custom_dist:
|
1413
|
+
"""Some class that looks just enough like a distribution."""
|
1414
|
+
def ppf(self, q):
|
1415
|
+
return stats.norm.ppf(q, loc=2)
|
1416
|
+
|
1417
|
+
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
|
1418
|
+
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
|
1419
|
+
assert_allclose(osm1, osm2)
|
1420
|
+
assert_allclose(osr1, osr2)
|
1421
|
+
|
1422
|
+
@pytest.mark.thread_unsafe
|
1423
|
+
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
|
1424
|
+
def test_plot_kwarg(self):
|
1425
|
+
fig = plt.figure()
|
1426
|
+
fig.add_subplot(111)
|
1427
|
+
x = stats.t.rvs(3, size=100, random_state=7654321)
|
1428
|
+
res1, fitres1 = stats.probplot(x, plot=plt)
|
1429
|
+
plt.close()
|
1430
|
+
res2, fitres2 = stats.probplot(x, plot=None)
|
1431
|
+
res3 = stats.probplot(x, fit=False, plot=plt)
|
1432
|
+
plt.close()
|
1433
|
+
res4 = stats.probplot(x, fit=False, plot=None)
|
1434
|
+
# Check that results are consistent between combinations of `fit` and
|
1435
|
+
# `plot` keywords.
|
1436
|
+
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
|
1437
|
+
assert_allclose(res1, res2)
|
1438
|
+
assert_allclose(res1, res3)
|
1439
|
+
assert_allclose(res1, res4)
|
1440
|
+
assert_allclose(fitres1, fitres2)
|
1441
|
+
|
1442
|
+
# Check that a Matplotlib Axes object is accepted
|
1443
|
+
fig = plt.figure()
|
1444
|
+
ax = fig.add_subplot(111)
|
1445
|
+
stats.probplot(x, fit=False, plot=ax)
|
1446
|
+
plt.close()
|
1447
|
+
|
1448
|
+
def test_probplot_bad_args(self):
|
1449
|
+
# Raise ValueError when given an invalid distribution.
|
1450
|
+
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
|
1451
|
+
|
1452
|
+
def test_empty(self):
|
1453
|
+
assert_equal(stats.probplot([], fit=False),
|
1454
|
+
(np.array([]), np.array([])))
|
1455
|
+
assert_equal(stats.probplot([], fit=True),
|
1456
|
+
((np.array([]), np.array([])),
|
1457
|
+
(np.nan, np.nan, 0.0)))
|
1458
|
+
|
1459
|
+
def test_array_of_size_one(self):
|
1460
|
+
message = "One or more sample arguments is too small..."
|
1461
|
+
with (np.errstate(invalid='ignore'),
|
1462
|
+
pytest.warns(SmallSampleWarning, match=message)):
|
1463
|
+
assert_equal(stats.probplot([1], fit=True),
|
1464
|
+
((np.array([0.]), np.array([1])),
|
1465
|
+
(np.nan, np.nan, np.nan)))
|
1466
|
+
|
1467
|
+
|
1468
|
+
class TestWilcoxon:
|
1469
|
+
def test_wilcoxon_bad_arg(self):
|
1470
|
+
# Raise ValueError when two args of different lengths are given or
|
1471
|
+
# zero_method is unknown.
|
1472
|
+
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy")
|
1473
|
+
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2],
|
1474
|
+
alternative="dummy")
|
1475
|
+
assert_raises(ValueError, stats.wilcoxon, [1]*10, method="xyz")
|
1476
|
+
|
1477
|
+
def test_zero_diff(self):
|
1478
|
+
x = np.arange(20)
|
1479
|
+
# pratt and wilcox do not work if x - y == 0 and method == "asymptotic"
|
1480
|
+
# => warning may be emitted and p-value is nan
|
1481
|
+
with np.errstate(invalid="ignore"):
|
1482
|
+
w, p = stats.wilcoxon(x, x, "wilcox", method="asymptotic")
|
1483
|
+
assert_equal((w, p), (0.0, np.nan))
|
1484
|
+
w, p = stats.wilcoxon(x, x, "pratt", method="asymptotic")
|
1485
|
+
assert_equal((w, p), (0.0, np.nan))
|
1486
|
+
# ranksum is n*(n+1)/2, split in half if zero_method == "zsplit"
|
1487
|
+
assert_equal(stats.wilcoxon(x, x, "zsplit", method="asymptotic"),
|
1488
|
+
(20*21/4, 1.0))
|
1489
|
+
|
1490
|
+
def test_pratt(self):
|
1491
|
+
# regression test for gh-6805: p-value matches value from R package
|
1492
|
+
# coin (wilcoxsign_test) reported in the issue
|
1493
|
+
x = [1, 2, 3, 4]
|
1494
|
+
y = [1, 2, 3, 5]
|
1495
|
+
res = stats.wilcoxon(x, y, zero_method="pratt", method="asymptotic",
|
1496
|
+
correction=False)
|
1497
|
+
assert_allclose(res, (0.0, 0.31731050786291415))
|
1498
|
+
|
1499
|
+
def test_wilcoxon_arg_type(self):
|
1500
|
+
# Should be able to accept list as arguments.
|
1501
|
+
# Address issue 6070.
|
1502
|
+
arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2]
|
1503
|
+
|
1504
|
+
_ = stats.wilcoxon(arr, zero_method="pratt", method="asymptotic")
|
1505
|
+
_ = stats.wilcoxon(arr, zero_method="zsplit", method="asymptotic")
|
1506
|
+
_ = stats.wilcoxon(arr, zero_method="wilcox", method="asymptotic")
|
1507
|
+
|
1508
|
+
def test_accuracy_wilcoxon(self):
|
1509
|
+
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
|
1510
|
+
nums = range(-4, 5)
|
1511
|
+
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
|
1512
|
+
y = np.zeros(x.size)
|
1513
|
+
|
1514
|
+
T, p = stats.wilcoxon(x, y, "pratt", method="asymptotic",
|
1515
|
+
correction=False)
|
1516
|
+
assert_allclose(T, 423)
|
1517
|
+
assert_allclose(p, 0.0031724568006762576)
|
1518
|
+
|
1519
|
+
T, p = stats.wilcoxon(x, y, "zsplit", method="asymptotic",
|
1520
|
+
correction=False)
|
1521
|
+
assert_allclose(T, 441)
|
1522
|
+
assert_allclose(p, 0.0032145343172473055)
|
1523
|
+
|
1524
|
+
T, p = stats.wilcoxon(x, y, "wilcox", method="asymptotic",
|
1525
|
+
correction=False)
|
1526
|
+
assert_allclose(T, 327)
|
1527
|
+
assert_allclose(p, 0.00641346115861)
|
1528
|
+
|
1529
|
+
# Test the 'correction' option, using values computed in R with:
|
1530
|
+
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
|
1531
|
+
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
|
1532
|
+
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
|
1533
|
+
T, p = stats.wilcoxon(x, y, correction=False, method="asymptotic")
|
1534
|
+
assert_equal(T, 34)
|
1535
|
+
assert_allclose(p, 0.6948866, rtol=1e-6)
|
1536
|
+
T, p = stats.wilcoxon(x, y, correction=True, method="asymptotic")
|
1537
|
+
assert_equal(T, 34)
|
1538
|
+
assert_allclose(p, 0.7240817, rtol=1e-6)
|
1539
|
+
|
1540
|
+
def test_approx_mode(self):
|
1541
|
+
# Check that `mode` is still an alias of keyword `method`,
|
1542
|
+
# and `"approx"` is still an alias of argument `"asymptotic"`
|
1543
|
+
x = np.array([3, 5, 23, 7, 243, 58, 98, 2, 8, -3, 9, 11])
|
1544
|
+
y = np.array([2, -2, 1, 23, 0, 5, 12, 18, 99, 12, 17, 27])
|
1545
|
+
res1 = stats.wilcoxon(x, y, "wilcox", method="approx")
|
1546
|
+
res2 = stats.wilcoxon(x, y, "wilcox", method="asymptotic")
|
1547
|
+
res3 = stats.wilcoxon(x, y, "wilcox", mode="approx")
|
1548
|
+
res4 = stats.wilcoxon(x, y, "wilcox", mode="asymptotic")
|
1549
|
+
assert res1 == res2 == res3 == res4
|
1550
|
+
|
1551
|
+
def test_wilcoxon_result_attributes(self):
|
1552
|
+
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
|
1553
|
+
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
|
1554
|
+
res = stats.wilcoxon(x, y, correction=False, method="asymptotic")
|
1555
|
+
attributes = ('statistic', 'pvalue')
|
1556
|
+
check_named_results(res, attributes)
|
1557
|
+
|
1558
|
+
def test_wilcoxon_has_zstatistic(self):
|
1559
|
+
rng = np.random.default_rng(89426135444)
|
1560
|
+
x, y = rng.random(15), rng.random(15)
|
1561
|
+
|
1562
|
+
res = stats.wilcoxon(x, y, method="asymptotic")
|
1563
|
+
ref = stats.norm.ppf(res.pvalue/2)
|
1564
|
+
assert_allclose(res.zstatistic, ref)
|
1565
|
+
|
1566
|
+
res = stats.wilcoxon(x, y, method="exact")
|
1567
|
+
assert not hasattr(res, 'zstatistic')
|
1568
|
+
|
1569
|
+
res = stats.wilcoxon(x, y)
|
1570
|
+
assert not hasattr(res, 'zstatistic')
|
1571
|
+
|
1572
|
+
def test_wilcoxon_tie(self):
|
1573
|
+
# Regression test for gh-2391.
|
1574
|
+
# Corresponding R code is:
|
1575
|
+
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
|
1576
|
+
# > result$p.value
|
1577
|
+
# [1] 0.001565402
|
1578
|
+
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
|
1579
|
+
# > result$p.value
|
1580
|
+
# [1] 0.001904195
|
1581
|
+
stat, p = stats.wilcoxon([0.1] * 10, method="asymptotic",
|
1582
|
+
correction=False)
|
1583
|
+
expected_p = 0.001565402
|
1584
|
+
assert_equal(stat, 0)
|
1585
|
+
assert_allclose(p, expected_p, rtol=1e-6)
|
1586
|
+
|
1587
|
+
stat, p = stats.wilcoxon([0.1] * 10, correction=True,
|
1588
|
+
method="asymptotic")
|
1589
|
+
expected_p = 0.001904195
|
1590
|
+
assert_equal(stat, 0)
|
1591
|
+
assert_allclose(p, expected_p, rtol=1e-6)
|
1592
|
+
|
1593
|
+
def test_onesided(self):
|
1594
|
+
# tested against "R version 3.4.1 (2017-06-30)"
|
1595
|
+
# x <- c(125, 115, 130, 140, 140, 115, 140, 125, 140, 135)
|
1596
|
+
# y <- c(110, 122, 125, 120, 140, 124, 123, 137, 135, 145)
|
1597
|
+
# cfg <- list(x = x, y = y, paired = TRUE, exact = FALSE)
|
1598
|
+
# do.call(wilcox.test, c(cfg, list(alternative = "less", correct = FALSE)))
|
1599
|
+
# do.call(wilcox.test, c(cfg, list(alternative = "less", correct = TRUE)))
|
1600
|
+
# do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = FALSE)))
|
1601
|
+
# do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = TRUE)))
|
1602
|
+
x = [125, 115, 130, 140, 140, 115, 140, 125, 140, 135]
|
1603
|
+
y = [110, 122, 125, 120, 140, 124, 123, 137, 135, 145]
|
1604
|
+
|
1605
|
+
w, p = stats.wilcoxon(x, y, alternative="less", method="asymptotic",
|
1606
|
+
correction=False)
|
1607
|
+
assert_equal(w, 27)
|
1608
|
+
assert_almost_equal(p, 0.7031847, decimal=6)
|
1609
|
+
|
1610
|
+
w, p = stats.wilcoxon(x, y, alternative="less", correction=True,
|
1611
|
+
method="asymptotic")
|
1612
|
+
assert_equal(w, 27)
|
1613
|
+
assert_almost_equal(p, 0.7233656, decimal=6)
|
1614
|
+
|
1615
|
+
w, p = stats.wilcoxon(x, y, alternative="greater",
|
1616
|
+
method="asymptotic", correction=False)
|
1617
|
+
assert_equal(w, 27)
|
1618
|
+
assert_almost_equal(p, 0.2968153, decimal=6)
|
1619
|
+
|
1620
|
+
w, p = stats.wilcoxon(x, y, alternative="greater",
|
1621
|
+
correction=True, method="asymptotic")
|
1622
|
+
assert_equal(w, 27)
|
1623
|
+
assert_almost_equal(p, 0.3176447, decimal=6)
|
1624
|
+
|
1625
|
+
def test_exact_basic(self):
|
1626
|
+
for n in range(1, 51):
|
1627
|
+
pmf1 = _get_wilcoxon_distr(n)
|
1628
|
+
pmf2 = _get_wilcoxon_distr2(n)
|
1629
|
+
assert_equal(n*(n+1)/2 + 1, len(pmf1))
|
1630
|
+
assert_equal(sum(pmf1), 1)
|
1631
|
+
assert_array_almost_equal(pmf1, pmf2)
|
1632
|
+
|
1633
|
+
def test_exact_pval(self):
|
1634
|
+
# expected values computed with "R version 3.4.1 (2017-06-30)"
|
1635
|
+
x = np.array([1.81, 0.82, 1.56, -0.48, 0.81, 1.28, -1.04, 0.23,
|
1636
|
+
-0.75, 0.14])
|
1637
|
+
y = np.array([0.71, 0.65, -0.2, 0.85, -1.1, -0.45, -0.84, -0.24,
|
1638
|
+
-0.68, -0.76])
|
1639
|
+
_, p = stats.wilcoxon(x, y, alternative="two-sided", method="exact")
|
1640
|
+
assert_almost_equal(p, 0.1054688, decimal=6)
|
1641
|
+
_, p = stats.wilcoxon(x, y, alternative="less", method="exact")
|
1642
|
+
assert_almost_equal(p, 0.9580078, decimal=6)
|
1643
|
+
_, p = stats.wilcoxon(x, y, alternative="greater", method="exact")
|
1644
|
+
assert_almost_equal(p, 0.05273438, decimal=6)
|
1645
|
+
|
1646
|
+
x = np.arange(0, 20) + 0.5
|
1647
|
+
y = np.arange(20, 0, -1)
|
1648
|
+
_, p = stats.wilcoxon(x, y, alternative="two-sided", method="exact")
|
1649
|
+
assert_almost_equal(p, 0.8694878, decimal=6)
|
1650
|
+
_, p = stats.wilcoxon(x, y, alternative="less", method="exact")
|
1651
|
+
assert_almost_equal(p, 0.4347439, decimal=6)
|
1652
|
+
_, p = stats.wilcoxon(x, y, alternative="greater", method="exact")
|
1653
|
+
assert_almost_equal(p, 0.5795889, decimal=6)
|
1654
|
+
|
1655
|
+
# These inputs were chosen to give a W statistic that is either the
|
1656
|
+
# center of the distribution (when the length of the support is odd), or
|
1657
|
+
# the value to the left of the center (when the length of the support is
|
1658
|
+
# even). Also, the numbers are chosen so that the W statistic is the
|
1659
|
+
# sum of the positive values.
|
1660
|
+
|
1661
|
+
@pytest.mark.parametrize('x', [[-1, -2, 3],
|
1662
|
+
[-1, 2, -3, -4, 5],
|
1663
|
+
[-1, -2, 3, -4, -5, -6, 7, 8]])
|
1664
|
+
def test_exact_p_1(self, x):
|
1665
|
+
w, p = stats.wilcoxon(x)
|
1666
|
+
x = np.array(x)
|
1667
|
+
wtrue = x[x > 0].sum()
|
1668
|
+
assert_equal(w, wtrue)
|
1669
|
+
assert_equal(p, 1)
|
1670
|
+
|
1671
|
+
def test_auto(self):
|
1672
|
+
# auto default to exact if there are no ties and n <= 50
|
1673
|
+
x = np.arange(0, 50) + 0.5
|
1674
|
+
y = np.arange(50, 0, -1)
|
1675
|
+
assert_equal(stats.wilcoxon(x, y),
|
1676
|
+
stats.wilcoxon(x, y, method="exact"))
|
1677
|
+
|
1678
|
+
# n <= 50: if there are zeros in d = x-y, use PermutationMethod
|
1679
|
+
pm = stats.PermutationMethod()
|
1680
|
+
d = np.arange(-2, 5)
|
1681
|
+
w, p = stats.wilcoxon(d)
|
1682
|
+
# rerunning the test gives the same results since n_resamples
|
1683
|
+
# is large enough to get deterministic results if n <= 13
|
1684
|
+
# so we do not need to use a seed. to avoid longer runtimes of the
|
1685
|
+
# test, use n=7 only. For n=13, see test_auto_permutation_edge_case
|
1686
|
+
assert_equal((w, p), stats.wilcoxon(d, method=pm))
|
1687
|
+
|
1688
|
+
# for larger vectors (n > 13) with ties/zeros, use asymptotic test
|
1689
|
+
d = np.arange(-5, 9) # zero
|
1690
|
+
w, p = stats.wilcoxon(d)
|
1691
|
+
assert_equal((w, p), stats.wilcoxon(d, method="asymptotic"))
|
1692
|
+
|
1693
|
+
d[d == 0] = 1 # tie
|
1694
|
+
w, p = stats.wilcoxon(d)
|
1695
|
+
assert_equal((w, p), stats.wilcoxon(d, method="asymptotic"))
|
1696
|
+
|
1697
|
+
# use approximation for samples > 50
|
1698
|
+
d = np.arange(1, 52)
|
1699
|
+
assert_equal(stats.wilcoxon(d), stats.wilcoxon(d, method="asymptotic"))
|
1700
|
+
|
1701
|
+
@pytest.mark.xslow
|
1702
|
+
def test_auto_permutation_edge_case(self):
|
1703
|
+
# Check that `PermutationMethod()` is used and results are deterministic when
|
1704
|
+
# `method='auto'`, there are zeros or ties in `d = x-y`, and `len(d) <= 13`.
|
1705
|
+
d = np.arange(-5, 8) # zero
|
1706
|
+
res = stats.wilcoxon(d)
|
1707
|
+
ref = (27.5, 0.3955078125) # stats.wilcoxon(d, method=PermutationMethod())
|
1708
|
+
assert_equal(res, ref)
|
1709
|
+
|
1710
|
+
d[d == 0] = 1 # tie
|
1711
|
+
res = stats.wilcoxon(d)
|
1712
|
+
ref = (32, 0.3779296875) # stats.wilcoxon(d, method=PermutationMethod())
|
1713
|
+
assert_equal(res, ref)
|
1714
|
+
|
1715
|
+
@pytest.mark.parametrize('size', [3, 5, 10])
|
1716
|
+
def test_permutation_method(self, size):
|
1717
|
+
rng = np.random.default_rng(92348034828501345)
|
1718
|
+
x = rng.random(size=size)
|
1719
|
+
res = stats.wilcoxon(x, method=stats.PermutationMethod())
|
1720
|
+
ref = stats.wilcoxon(x, method='exact')
|
1721
|
+
assert_equal(res.statistic, ref.statistic)
|
1722
|
+
assert_equal(res.pvalue, ref.pvalue)
|
1723
|
+
|
1724
|
+
x = rng.random(size=size*10)
|
1725
|
+
rng = np.random.default_rng(59234803482850134)
|
1726
|
+
pm = stats.PermutationMethod(n_resamples=99, rng=rng)
|
1727
|
+
ref = stats.wilcoxon(x, method=pm)
|
1728
|
+
# preserve use of old random_state during SPEC 7 transition
|
1729
|
+
rng = np.random.default_rng(59234803482850134)
|
1730
|
+
pm = stats.PermutationMethod(n_resamples=99, random_state=rng)
|
1731
|
+
res = stats.wilcoxon(x, method=pm)
|
1732
|
+
|
1733
|
+
assert_equal(np.round(res.pvalue, 2), res.pvalue) # n_resamples used
|
1734
|
+
assert_equal(res.pvalue, ref.pvalue) # rng/random_state used
|
1735
|
+
|
1736
|
+
def test_method_auto_nan_propagate_ND_length_gt_50_gh20591(self):
|
1737
|
+
# When method!='asymptotic', nan_policy='propagate', and a slice of
|
1738
|
+
# a >1 dimensional array input contained NaN, the result object of
|
1739
|
+
# `wilcoxon` could (under yet other conditions) return `zstatistic`
|
1740
|
+
# for some slices but not others. This resulted in an error because
|
1741
|
+
# `apply_along_axis` would have to create a ragged array.
|
1742
|
+
# Check that this is resolved.
|
1743
|
+
rng = np.random.default_rng(235889269872456)
|
1744
|
+
A = rng.normal(size=(51, 2)) # length along slice > exact threshold
|
1745
|
+
A[5, 1] = np.nan
|
1746
|
+
res = stats.wilcoxon(A)
|
1747
|
+
ref = stats.wilcoxon(A, method='asymptotic')
|
1748
|
+
assert_allclose(res, ref)
|
1749
|
+
assert hasattr(ref, 'zstatistic')
|
1750
|
+
assert not hasattr(res, 'zstatistic')
|
1751
|
+
|
1752
|
+
@pytest.mark.parametrize('method', ['exact', 'asymptotic'])
|
1753
|
+
def test_symmetry_gh19872_gh20752(self, method):
|
1754
|
+
# Check that one-sided exact tests obey required symmetry. Bug reported
|
1755
|
+
# in gh-19872 and again in gh-20752; example from gh-19872 is more concise:
|
1756
|
+
var1 = [62, 66, 61, 68, 74, 62, 68, 62, 55, 59]
|
1757
|
+
var2 = [71, 71, 69, 61, 75, 71, 77, 72, 62, 65]
|
1758
|
+
ref = stats.wilcoxon(var1, var2, alternative='less', method=method)
|
1759
|
+
res = stats.wilcoxon(var2, var1, alternative='greater', method=method)
|
1760
|
+
max_statistic = len(var1) * (len(var1) + 1) / 2
|
1761
|
+
assert int(res.statistic) != res.statistic
|
1762
|
+
assert_allclose(max_statistic - res.statistic, ref.statistic, rtol=1e-15)
|
1763
|
+
assert_allclose(res.pvalue, ref.pvalue, rtol=1e-15)
|
1764
|
+
|
1765
|
+
@pytest.mark.parametrize("method", ('exact', stats.PermutationMethod()))
|
1766
|
+
def test_all_zeros_exact(self, method):
|
1767
|
+
# previously, this raised a RuntimeWarning when calculating Z, even
|
1768
|
+
# when the Z value was not needed. Confirm that this no longer
|
1769
|
+
# occurs when `method` is 'exact' or a `PermutationMethod`.
|
1770
|
+
res = stats.wilcoxon(np.zeros(5), method=method)
|
1771
|
+
assert_allclose(res, [0, 1])
|
1772
|
+
|
1773
|
+
def test_wilcoxon_axis_broadcasting_errors_gh22051(self):
|
1774
|
+
# In previous versions of SciPy, `wilcoxon` gave an incorrect error
|
1775
|
+
# message when `AxisError` was not found in the base NumPy namespace.
|
1776
|
+
# Check that this is resolved with and without the ANP decorator.
|
1777
|
+
message = "Array shapes are incompatible for broadcasting."
|
1778
|
+
with pytest.raises(ValueError, match=message):
|
1779
|
+
stats.wilcoxon([1, 2, 3], [4, 5])
|
1780
|
+
|
1781
|
+
message = "operands could not be broadcast together with..."
|
1782
|
+
with pytest.raises(ValueError, match=message):
|
1783
|
+
stats.wilcoxon([1, 2, 3], [4, 5], _no_deco=True)
|
1784
|
+
|
1785
|
+
AxisError = getattr(np, 'AxisError', None) or np.exceptions.AxisError
|
1786
|
+
message = "source: axis 3 is out of bounds for array of dimension 1"
|
1787
|
+
with pytest.raises(AxisError, match=message):
|
1788
|
+
stats.wilcoxon([1, 2, 3], [4, 5, 6], axis=3)
|
1789
|
+
|
1790
|
+
message = "`axis` must be compatible with the shape..."
|
1791
|
+
with pytest.raises(AxisError, match=message):
|
1792
|
+
stats.wilcoxon([1, 2, 3], [4, 5, 6], axis=3, _no_deco=True)
|
1793
|
+
|
1794
|
+
|
1795
|
+
# data for k-statistics tests from
|
1796
|
+
# https://cran.r-project.org/web/packages/kStatistics/kStatistics.pdf
|
1797
|
+
# see nKS "Examples"
|
1798
|
+
x_kstat = [16.34, 10.76, 11.84, 13.55, 15.85, 18.20, 7.51, 10.22, 12.52, 14.68,
|
1799
|
+
16.08, 19.43, 8.12, 11.20, 12.95, 14.77, 16.83, 19.80, 8.55, 11.58,
|
1800
|
+
12.10, 15.02, 16.83, 16.98, 19.92, 9.47, 11.68, 13.41, 15.35, 19.11]
|
1801
|
+
|
1802
|
+
|
1803
|
+
class TestKstat:
|
1804
|
+
def test_moments_normal_distribution(self, xp):
|
1805
|
+
rng = np.random.RandomState(32149)
|
1806
|
+
data = xp.asarray(rng.randn(12345), dtype=xp.float64)
|
1807
|
+
moments = xp.stack([stats.kstat(data, n) for n in [1, 2, 3, 4]])
|
1808
|
+
|
1809
|
+
expected = xp.asarray([0.011315, 1.017931, 0.05811052, 0.0754134],
|
1810
|
+
dtype=data.dtype)
|
1811
|
+
xp_assert_close(moments, expected, rtol=1e-4)
|
1812
|
+
|
1813
|
+
# test equivalence with `stats.moment`
|
1814
|
+
m1 = stats.moment(data, order=1)
|
1815
|
+
m2 = stats.moment(data, order=2)
|
1816
|
+
m3 = stats.moment(data, order=3)
|
1817
|
+
xp_assert_close(xp.stack((m1, m2, m3)), expected[:-1], atol=0.02, rtol=1e-2)
|
1818
|
+
|
1819
|
+
@pytest.mark.filterwarnings("ignore:invalid value encountered in scalar divide")
|
1820
|
+
def test_empty_input(self, xp):
|
1821
|
+
if is_numpy(xp):
|
1822
|
+
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
|
1823
|
+
res = stats.kstat(xp.asarray([]))
|
1824
|
+
else:
|
1825
|
+
with np.errstate(invalid='ignore'): # for array_api_strict
|
1826
|
+
res = stats.kstat(xp.asarray([]))
|
1827
|
+
xp_assert_equal(res, xp.asarray(xp.nan))
|
1828
|
+
|
1829
|
+
def test_nan_input(self, xp):
|
1830
|
+
data = xp.arange(10.)
|
1831
|
+
data = xp.where(data == 6, xp.nan, data)
|
1832
|
+
|
1833
|
+
xp_assert_equal(stats.kstat(data), xp.asarray(xp.nan))
|
1834
|
+
|
1835
|
+
@pytest.mark.parametrize('n', [0, 4.001])
|
1836
|
+
def test_kstat_bad_arg(self, n, xp):
|
1837
|
+
# Raise ValueError if n > 4 or n < 1.
|
1838
|
+
data = xp.arange(10)
|
1839
|
+
message = 'k-statistics only supported for 1<=n<=4'
|
1840
|
+
with pytest.raises(ValueError, match=message):
|
1841
|
+
stats.kstat(data, n=n)
|
1842
|
+
|
1843
|
+
@pytest.mark.parametrize('case', [(1, 14.02166666666667),
|
1844
|
+
(2, 12.65006954022974),
|
1845
|
+
(3, -1.447059503280798),
|
1846
|
+
(4, -141.6682291883626)])
|
1847
|
+
def test_against_R(self, case, xp):
|
1848
|
+
# Test against reference values computed with R kStatistics, e.g.
|
1849
|
+
# options(digits=16)
|
1850
|
+
# library(kStatistics)
|
1851
|
+
# data <-c (16.34, 10.76, 11.84, 13.55, 15.85, 18.20, 7.51, 10.22,
|
1852
|
+
# 12.52, 14.68, 16.08, 19.43, 8.12, 11.20, 12.95, 14.77,
|
1853
|
+
# 16.83, 19.80, 8.55, 11.58, 12.10, 15.02, 16.83, 16.98,
|
1854
|
+
# 19.92, 9.47, 11.68, 13.41, 15.35, 19.11)
|
1855
|
+
# nKS(4, data)
|
1856
|
+
n, ref = case
|
1857
|
+
res = stats.kstat(xp.asarray(x_kstat), n)
|
1858
|
+
xp_assert_close(res, xp.asarray(ref))
|
1859
|
+
|
1860
|
+
|
1861
|
+
class TestKstatVar:
|
1862
|
+
@pytest.mark.filterwarnings("ignore:invalid value encountered in scalar divide")
|
1863
|
+
def test_empty_input(self, xp):
|
1864
|
+
x = xp.asarray([])
|
1865
|
+
if is_numpy(xp):
|
1866
|
+
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
|
1867
|
+
res = stats.kstatvar(x)
|
1868
|
+
else:
|
1869
|
+
with np.errstate(invalid='ignore'): # for array_api_strict
|
1870
|
+
res = stats.kstatvar(x)
|
1871
|
+
xp_assert_equal(res, xp.asarray(xp.nan))
|
1872
|
+
|
1873
|
+
def test_nan_input(self, xp):
|
1874
|
+
data = xp.arange(10.)
|
1875
|
+
data = xp.where(data == 6, xp.nan, data)
|
1876
|
+
|
1877
|
+
xp_assert_equal(stats.kstat(data), xp.asarray(xp.nan))
|
1878
|
+
|
1879
|
+
@skip_xp_backends(np_only=True,
|
1880
|
+
reason='input validation of `n` does not depend on backend')
|
1881
|
+
def test_bad_arg(self, xp):
|
1882
|
+
# Raise ValueError is n is not 1 or 2.
|
1883
|
+
data = [1]
|
1884
|
+
n = 10
|
1885
|
+
message = 'Only n=1 or n=2 supported.'
|
1886
|
+
with pytest.raises(ValueError, match=message):
|
1887
|
+
stats.kstatvar(data, n=n)
|
1888
|
+
|
1889
|
+
def test_against_R_mathworld(self, xp):
|
1890
|
+
# Test against reference values computed using formulas exactly as
|
1891
|
+
# they appear at https://mathworld.wolfram.com/k-Statistic.html
|
1892
|
+
# This is *really* similar to how they appear in the implementation,
|
1893
|
+
# but that could change, and this should not.
|
1894
|
+
n = len(x_kstat)
|
1895
|
+
k2 = 12.65006954022974 # see source code in TestKstat
|
1896
|
+
k4 = -141.6682291883626
|
1897
|
+
|
1898
|
+
res = stats.kstatvar(xp.asarray(x_kstat), 1)
|
1899
|
+
ref = k2 / n
|
1900
|
+
xp_assert_close(res, xp.asarray(ref))
|
1901
|
+
|
1902
|
+
res = stats.kstatvar(xp.asarray(x_kstat), 2)
|
1903
|
+
# *unbiased estimator* for var(k2)
|
1904
|
+
ref = (2*k2**2*n + (n-1)*k4) / (n * (n+1))
|
1905
|
+
xp_assert_close(res, xp.asarray(ref))
|
1906
|
+
|
1907
|
+
|
1908
|
+
class TestPpccPlot:
|
1909
|
+
def setup_method(self):
|
1910
|
+
self.x = _old_loggamma_rvs(5, size=500, random_state=7654321) + 5
|
1911
|
+
|
1912
|
+
def test_basic(self):
|
1913
|
+
N = 5
|
1914
|
+
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
|
1915
|
+
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,
|
1916
|
+
0.93519298]
|
1917
|
+
assert_allclose(svals, np.linspace(-10, 10, num=N))
|
1918
|
+
assert_allclose(ppcc, ppcc_expected)
|
1919
|
+
|
1920
|
+
def test_dist(self):
|
1921
|
+
# Test that we can specify distributions both by name and as objects.
|
1922
|
+
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
|
1923
|
+
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,
|
1924
|
+
dist=stats.tukeylambda)
|
1925
|
+
assert_allclose(svals1, svals2, rtol=1e-20)
|
1926
|
+
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
|
1927
|
+
# Test that 'tukeylambda' is the default dist
|
1928
|
+
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
|
1929
|
+
assert_allclose(svals1, svals3, rtol=1e-20)
|
1930
|
+
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
|
1931
|
+
|
1932
|
+
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
|
1933
|
+
def test_plot_kwarg(self):
|
1934
|
+
# Check with the matplotlib.pyplot module
|
1935
|
+
fig = plt.figure()
|
1936
|
+
ax = fig.add_subplot(111)
|
1937
|
+
stats.ppcc_plot(self.x, -20, 20, plot=plt)
|
1938
|
+
fig.delaxes(ax)
|
1939
|
+
|
1940
|
+
# Check that a Matplotlib Axes object is accepted
|
1941
|
+
ax = fig.add_subplot(111)
|
1942
|
+
stats.ppcc_plot(self.x, -20, 20, plot=ax)
|
1943
|
+
plt.close()
|
1944
|
+
|
1945
|
+
def test_invalid_inputs(self):
|
1946
|
+
# `b` has to be larger than `a`
|
1947
|
+
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
|
1948
|
+
|
1949
|
+
# Raise ValueError when given an invalid distribution.
|
1950
|
+
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
|
1951
|
+
dist="plate_of_shrimp")
|
1952
|
+
|
1953
|
+
def test_empty(self):
|
1954
|
+
# For consistency with probplot return for one empty array,
|
1955
|
+
# ppcc contains all zeros and svals is the same as for normal array
|
1956
|
+
# input.
|
1957
|
+
svals, ppcc = stats.ppcc_plot([], 0, 1)
|
1958
|
+
assert_allclose(svals, np.linspace(0, 1, num=80))
|
1959
|
+
assert_allclose(ppcc, np.zeros(80, dtype=float))
|
1960
|
+
|
1961
|
+
|
1962
|
+
class TestPpccMax:
|
1963
|
+
def test_ppcc_max_bad_arg(self):
|
1964
|
+
# Raise ValueError when given an invalid distribution.
|
1965
|
+
data = [1]
|
1966
|
+
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
|
1967
|
+
|
1968
|
+
def test_ppcc_max_basic(self):
|
1969
|
+
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
|
1970
|
+
random_state=1234567) + 1e4
|
1971
|
+
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=7)
|
1972
|
+
|
1973
|
+
def test_dist(self):
|
1974
|
+
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
|
1975
|
+
random_state=1234567) + 1e4
|
1976
|
+
|
1977
|
+
# Test that we can specify distributions both by name and as objects.
|
1978
|
+
max1 = stats.ppcc_max(x, dist='tukeylambda')
|
1979
|
+
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
|
1980
|
+
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
|
1981
|
+
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
|
1982
|
+
|
1983
|
+
# Test that 'tukeylambda' is the default dist
|
1984
|
+
max3 = stats.ppcc_max(x)
|
1985
|
+
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
|
1986
|
+
|
1987
|
+
def test_brack(self):
|
1988
|
+
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
|
1989
|
+
random_state=1234567) + 1e4
|
1990
|
+
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
|
1991
|
+
|
1992
|
+
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
|
1993
|
+
-0.71215366521264145, decimal=7)
|
1994
|
+
|
1995
|
+
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
|
1996
|
+
-0.71215366521264145, decimal=7)
|
1997
|
+
|
1998
|
+
|
1999
|
+
class TestBoxcox_llf:
|
2000
|
+
|
2001
|
+
@pytest.mark.parametrize("dtype", ["float32", "float64"])
|
2002
|
+
def test_basic(self, dtype, xp):
|
2003
|
+
dt = getattr(xp, dtype)
|
2004
|
+
x = stats.norm.rvs(size=10000, loc=10, random_state=54321)
|
2005
|
+
lmbda = 1
|
2006
|
+
llf = stats.boxcox_llf(lmbda, xp.asarray(x, dtype=dt))
|
2007
|
+
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
|
2008
|
+
xp_assert_close(llf, xp.asarray(llf_expected, dtype=dt))
|
2009
|
+
|
2010
|
+
@skip_xp_backends(np_only=True,
|
2011
|
+
reason='array-likes only accepted for NumPy backend.')
|
2012
|
+
def test_array_like(self, xp):
|
2013
|
+
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
|
2014
|
+
lmbda = 1
|
2015
|
+
llf = stats.boxcox_llf(lmbda, x)
|
2016
|
+
llf2 = stats.boxcox_llf(lmbda, list(x))
|
2017
|
+
xp_assert_close(llf, llf2, rtol=1e-12)
|
2018
|
+
|
2019
|
+
def test_2d_input(self, xp):
|
2020
|
+
# Note: boxcox_llf() was already working with 2-D input (sort of), so
|
2021
|
+
# keep it like that. boxcox() doesn't work with 2-D input though, due
|
2022
|
+
# to brent() returning a scalar.
|
2023
|
+
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
|
2024
|
+
lmbda = 1
|
2025
|
+
llf = stats.boxcox_llf(lmbda, x)
|
2026
|
+
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
|
2027
|
+
xp_assert_close(xp.asarray([llf, llf]), xp.asarray(llf2), rtol=1e-12)
|
2028
|
+
|
2029
|
+
@pytest.mark.thread_unsafe
|
2030
|
+
def test_empty(self, xp):
|
2031
|
+
message = "One or more sample arguments is too small..."
|
2032
|
+
context = (pytest.warns(SmallSampleWarning, match=message) if is_numpy(xp)
|
2033
|
+
else contextlib.nullcontext())
|
2034
|
+
with context:
|
2035
|
+
assert xp.isnan(xp.asarray(stats.boxcox_llf(1, xp.asarray([]))))
|
2036
|
+
|
2037
|
+
def test_gh_6873(self, xp):
|
2038
|
+
# Regression test for gh-6873.
|
2039
|
+
# This example was taken from gh-7534, a duplicate of gh-6873.
|
2040
|
+
data = xp.asarray([198.0, 233.0, 233.0, 392.0])
|
2041
|
+
llf = stats.boxcox_llf(-8, data)
|
2042
|
+
# The expected value was computed with mpmath.
|
2043
|
+
xp_assert_close(llf, xp.asarray(-17.93934208579061))
|
2044
|
+
|
2045
|
+
def test_instability_gh20021(self, xp):
|
2046
|
+
data = xp.asarray([2003, 1950, 1997, 2000, 2009], dtype=xp.float64)
|
2047
|
+
llf = stats.boxcox_llf(1e-8, data)
|
2048
|
+
# The expected value was computed with mpsci, set mpmath.mp.dps=100
|
2049
|
+
# expect float64 output for integer input
|
2050
|
+
xp_assert_close(llf, xp.asarray(-15.32401272869016598, dtype=xp.float64),
|
2051
|
+
rtol=1e-7)
|
2052
|
+
|
2053
|
+
def test_axis(self, xp):
|
2054
|
+
data = xp.asarray([[100, 200], [300, 400]])
|
2055
|
+
llf_axis_0 = stats.boxcox_llf(1, data, axis=0)
|
2056
|
+
llf_0 = xp.stack([
|
2057
|
+
stats.boxcox_llf(1, data[:, 0]),
|
2058
|
+
stats.boxcox_llf(1, data[:, 1]),
|
2059
|
+
])
|
2060
|
+
xp_assert_close(llf_axis_0, llf_0)
|
2061
|
+
llf_axis_1 = stats.boxcox_llf(1, data, axis=1)
|
2062
|
+
llf_1 = xp.stack([
|
2063
|
+
stats.boxcox_llf(1, data[0, :]),
|
2064
|
+
stats.boxcox_llf(1, data[1, :]),
|
2065
|
+
])
|
2066
|
+
xp_assert_close(llf_axis_1, llf_1)
|
2067
|
+
|
2068
|
+
|
2069
|
+
# This is the data from GitHub user Qukaiyi, given as an example
|
2070
|
+
# of a data set that caused boxcox to fail.
|
2071
|
+
_boxcox_data = [
|
2072
|
+
15957, 112079, 1039553, 711775, 173111, 307382, 183155, 53366, 760875,
|
2073
|
+
207500, 160045, 473714, 40194, 440319, 133261, 265444, 155590, 36660,
|
2074
|
+
904939, 55108, 138391, 339146, 458053, 63324, 1377727, 1342632, 41575,
|
2075
|
+
68685, 172755, 63323, 368161, 199695, 538214, 167760, 388610, 398855,
|
2076
|
+
1001873, 364591, 1320518, 194060, 194324, 2318551, 196114, 64225, 272000,
|
2077
|
+
198668, 123585, 86420, 1925556, 695798, 88664, 46199, 759135, 28051,
|
2078
|
+
345094, 1977752, 51778, 82746, 638126, 2560910, 45830, 140576, 1603787,
|
2079
|
+
57371, 548730, 5343629, 2298913, 998813, 2156812, 423966, 68350, 145237,
|
2080
|
+
131935, 1600305, 342359, 111398, 1409144, 281007, 60314, 242004, 113418,
|
2081
|
+
246211, 61940, 95858, 957805, 40909, 307955, 174159, 124278, 241193,
|
2082
|
+
872614, 304180, 146719, 64361, 87478, 509360, 167169, 933479, 620561,
|
2083
|
+
483333, 97416, 143518, 286905, 597837, 2556043, 89065, 69944, 196858,
|
2084
|
+
88883, 49379, 916265, 1527392, 626954, 54415, 89013, 2883386, 106096,
|
2085
|
+
402697, 45578, 349852, 140379, 34648, 757343, 1305442, 2054757, 121232,
|
2086
|
+
606048, 101492, 51426, 1820833, 83412, 136349, 1379924, 505977, 1303486,
|
2087
|
+
95853, 146451, 285422, 2205423, 259020, 45864, 684547, 182014, 784334,
|
2088
|
+
174793, 563068, 170745, 1195531, 63337, 71833, 199978, 2330904, 227335,
|
2089
|
+
898280, 75294, 2011361, 116771, 157489, 807147, 1321443, 1148635, 2456524,
|
2090
|
+
81839, 1228251, 97488, 1051892, 75397, 3009923, 2732230, 90923, 39735,
|
2091
|
+
132433, 225033, 337555, 1204092, 686588, 1062402, 40362, 1361829, 1497217,
|
2092
|
+
150074, 551459, 2019128, 39581, 45349, 1117187, 87845, 1877288, 164448,
|
2093
|
+
10338362, 24942, 64737, 769946, 2469124, 2366997, 259124, 2667585, 29175,
|
2094
|
+
56250, 74450, 96697, 5920978, 838375, 225914, 119494, 206004, 430907,
|
2095
|
+
244083, 219495, 322239, 407426, 618748, 2087536, 2242124, 4736149, 124624,
|
2096
|
+
406305, 240921, 2675273, 4425340, 821457, 578467, 28040, 348943, 48795,
|
2097
|
+
145531, 52110, 1645730, 1768364, 348363, 85042, 2673847, 81935, 169075,
|
2098
|
+
367733, 135474, 383327, 1207018, 93481, 5934183, 352190, 636533, 145870,
|
2099
|
+
55659, 146215, 73191, 248681, 376907, 1606620, 169381, 81164, 246390,
|
2100
|
+
236093, 885778, 335969, 49266, 381430, 307437, 350077, 34346, 49340,
|
2101
|
+
84715, 527120, 40163, 46898, 4609439, 617038, 2239574, 159905, 118337,
|
2102
|
+
120357, 430778, 3799158, 3516745, 54198, 2970796, 729239, 97848, 6317375,
|
2103
|
+
887345, 58198, 88111, 867595, 210136, 1572103, 1420760, 574046, 845988,
|
2104
|
+
509743, 397927, 1119016, 189955, 3883644, 291051, 126467, 1239907, 2556229,
|
2105
|
+
411058, 657444, 2025234, 1211368, 93151, 577594, 4842264, 1531713, 305084,
|
2106
|
+
479251, 20591, 1466166, 137417, 897756, 594767, 3606337, 32844, 82426,
|
2107
|
+
1294831, 57174, 290167, 322066, 813146, 5671804, 4425684, 895607, 450598,
|
2108
|
+
1048958, 232844, 56871, 46113, 70366, 701618, 97739, 157113, 865047,
|
2109
|
+
194810, 1501615, 1765727, 38125, 2733376, 40642, 437590, 127337, 106310,
|
2110
|
+
4167579, 665303, 809250, 1210317, 45750, 1853687, 348954, 156786, 90793,
|
2111
|
+
1885504, 281501, 3902273, 359546, 797540, 623508, 3672775, 55330, 648221,
|
2112
|
+
266831, 90030, 7118372, 735521, 1009925, 283901, 806005, 2434897, 94321,
|
2113
|
+
309571, 4213597, 2213280, 120339, 64403, 8155209, 1686948, 4327743,
|
2114
|
+
1868312, 135670, 3189615, 1569446, 706058, 58056, 2438625, 520619, 105201,
|
2115
|
+
141961, 179990, 1351440, 3148662, 2804457, 2760144, 70775, 33807, 1926518,
|
2116
|
+
2362142, 186761, 240941, 97860, 1040429, 1431035, 78892, 484039, 57845,
|
2117
|
+
724126, 3166209, 175913, 159211, 1182095, 86734, 1921472, 513546, 326016,
|
2118
|
+
1891609
|
2119
|
+
]
|
2120
|
+
|
2121
|
+
|
2122
|
+
class TestBoxcox:
|
2123
|
+
|
2124
|
+
def test_fixed_lmbda(self):
|
2125
|
+
x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5
|
2126
|
+
xt = stats.boxcox(x, lmbda=1)
|
2127
|
+
assert_allclose(xt, x - 1)
|
2128
|
+
xt = stats.boxcox(x, lmbda=-1)
|
2129
|
+
assert_allclose(xt, 1 - 1/x)
|
2130
|
+
|
2131
|
+
xt = stats.boxcox(x, lmbda=0)
|
2132
|
+
assert_allclose(xt, np.log(x))
|
2133
|
+
|
2134
|
+
# Also test that array_like input works
|
2135
|
+
xt = stats.boxcox(list(x), lmbda=0)
|
2136
|
+
assert_allclose(xt, np.log(x))
|
2137
|
+
|
2138
|
+
# test that constant input is accepted; see gh-12225
|
2139
|
+
xt = stats.boxcox(np.ones(10), 2)
|
2140
|
+
assert_equal(xt, np.zeros(10))
|
2141
|
+
|
2142
|
+
def test_lmbda_None(self):
|
2143
|
+
# Start from normal rv's, do inverse transform to check that
|
2144
|
+
# optimization function gets close to the right answer.
|
2145
|
+
lmbda = 2.5
|
2146
|
+
x = stats.norm.rvs(loc=10, size=50000, random_state=1245)
|
2147
|
+
x_inv = (x * lmbda + 1)**(-lmbda)
|
2148
|
+
xt, maxlog = stats.boxcox(x_inv)
|
2149
|
+
|
2150
|
+
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
|
2151
|
+
|
2152
|
+
def test_alpha(self):
|
2153
|
+
rng = np.random.RandomState(1234)
|
2154
|
+
x = _old_loggamma_rvs(5, size=50, random_state=rng) + 5
|
2155
|
+
|
2156
|
+
# Some regular values for alpha, on a small sample size
|
2157
|
+
_, _, interval = stats.boxcox(x, alpha=0.75)
|
2158
|
+
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
|
2159
|
+
_, _, interval = stats.boxcox(x, alpha=0.05)
|
2160
|
+
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
|
2161
|
+
|
2162
|
+
# Try some extreme values, see we don't hit the N=500 limit
|
2163
|
+
x = _old_loggamma_rvs(7, size=500, random_state=rng) + 15
|
2164
|
+
_, _, interval = stats.boxcox(x, alpha=0.001)
|
2165
|
+
assert_allclose(interval, [0.3988867, 11.40553131])
|
2166
|
+
_, _, interval = stats.boxcox(x, alpha=0.999)
|
2167
|
+
assert_allclose(interval, [5.83316246, 5.83735292])
|
2168
|
+
|
2169
|
+
def test_boxcox_bad_arg(self):
|
2170
|
+
# Raise ValueError if any data value is negative.
|
2171
|
+
x = np.array([-1, 2])
|
2172
|
+
assert_raises(ValueError, stats.boxcox, x)
|
2173
|
+
# Raise ValueError if data is constant.
|
2174
|
+
assert_raises(ValueError, stats.boxcox, np.array([1]))
|
2175
|
+
# Raise ValueError if data is not 1-dimensional.
|
2176
|
+
assert_raises(ValueError, stats.boxcox, np.array([[1], [2]]))
|
2177
|
+
|
2178
|
+
def test_empty(self):
|
2179
|
+
assert_(stats.boxcox([]).shape == (0,))
|
2180
|
+
|
2181
|
+
def test_gh_6873(self):
|
2182
|
+
# Regression test for gh-6873.
|
2183
|
+
y, lam = stats.boxcox(_boxcox_data)
|
2184
|
+
# The expected value of lam was computed with the function
|
2185
|
+
# powerTransform in the R library 'car'. I trust that value
|
2186
|
+
# to only about five significant digits.
|
2187
|
+
assert_allclose(lam, -0.051654, rtol=1e-5)
|
2188
|
+
|
2189
|
+
@pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)])
|
2190
|
+
def test_bounded_optimizer_within_bounds(self, bounds):
|
2191
|
+
# Define custom optimizer with bounds.
|
2192
|
+
def optimizer(fun):
|
2193
|
+
return optimize.minimize_scalar(fun, bounds=bounds,
|
2194
|
+
method="bounded")
|
2195
|
+
|
2196
|
+
_, lmbda = stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
|
2197
|
+
assert bounds[0] < lmbda < bounds[1]
|
2198
|
+
|
2199
|
+
def test_bounded_optimizer_against_unbounded_optimizer(self):
|
2200
|
+
# Test whether setting bounds on optimizer excludes solution from
|
2201
|
+
# unbounded optimizer.
|
2202
|
+
|
2203
|
+
# Get unbounded solution.
|
2204
|
+
_, lmbda = stats.boxcox(_boxcox_data, lmbda=None)
|
2205
|
+
|
2206
|
+
# Set tolerance and bounds around solution.
|
2207
|
+
bounds = (lmbda + 0.1, lmbda + 1)
|
2208
|
+
options = {'xatol': 1e-12}
|
2209
|
+
|
2210
|
+
def optimizer(fun):
|
2211
|
+
return optimize.minimize_scalar(fun, bounds=bounds,
|
2212
|
+
method="bounded", options=options)
|
2213
|
+
|
2214
|
+
# Check bounded solution. Lower bound should be active.
|
2215
|
+
_, lmbda_bounded = stats.boxcox(_boxcox_data, lmbda=None,
|
2216
|
+
optimizer=optimizer)
|
2217
|
+
assert lmbda_bounded != lmbda
|
2218
|
+
assert_allclose(lmbda_bounded, bounds[0])
|
2219
|
+
|
2220
|
+
@pytest.mark.parametrize("optimizer", ["str", (1, 2), 0.1])
|
2221
|
+
def test_bad_optimizer_type_raises_error(self, optimizer):
|
2222
|
+
# Check if error is raised if string, tuple or float is passed
|
2223
|
+
with pytest.raises(ValueError, match="`optimizer` must be a callable"):
|
2224
|
+
stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
|
2225
|
+
|
2226
|
+
def test_bad_optimizer_value_raises_error(self):
|
2227
|
+
# Check if error is raised if `optimizer` function does not return
|
2228
|
+
# `OptimizeResult` object
|
2229
|
+
|
2230
|
+
# Define test function that always returns 1
|
2231
|
+
def optimizer(fun):
|
2232
|
+
return 1
|
2233
|
+
|
2234
|
+
message = "return an object containing the optimal `lmbda`"
|
2235
|
+
with pytest.raises(ValueError, match=message):
|
2236
|
+
stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
|
2237
|
+
|
2238
|
+
@pytest.mark.parametrize(
|
2239
|
+
"bad_x", [np.array([1, -42, 12345.6]), np.array([np.nan, 42, 1])]
|
2240
|
+
)
|
2241
|
+
def test_negative_x_value_raises_error(self, bad_x):
|
2242
|
+
"""Test boxcox_normmax raises ValueError if x contains non-positive values."""
|
2243
|
+
message = "only positive, finite, real numbers"
|
2244
|
+
with pytest.raises(ValueError, match=message):
|
2245
|
+
stats.boxcox_normmax(bad_x)
|
2246
|
+
|
2247
|
+
@pytest.mark.parametrize('x', [
|
2248
|
+
# Attempt to trigger overflow in power expressions.
|
2249
|
+
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0,
|
2250
|
+
2009.0, 1980.0, 1999.0, 2007.0, 1991.0]),
|
2251
|
+
# Attempt to trigger overflow with a large optimal lambda.
|
2252
|
+
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0]),
|
2253
|
+
# Attempt to trigger overflow with large data.
|
2254
|
+
np.array([2003.0e200, 1950.0e200, 1997.0e200, 2000.0e200, 2009.0e200])
|
2255
|
+
])
|
2256
|
+
def test_overflow(self, x):
|
2257
|
+
with pytest.warns(UserWarning, match="The optimal lambda is"):
|
2258
|
+
xt_bc, lam_bc = stats.boxcox(x)
|
2259
|
+
assert np.all(np.isfinite(xt_bc))
|
2260
|
+
|
2261
|
+
|
2262
|
+
class TestBoxcoxNormmax:
|
2263
|
+
def setup_method(self):
|
2264
|
+
self.x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5
|
2265
|
+
|
2266
|
+
def test_pearsonr(self):
|
2267
|
+
maxlog = stats.boxcox_normmax(self.x)
|
2268
|
+
assert_allclose(maxlog, 1.804465, rtol=1e-6)
|
2269
|
+
|
2270
|
+
def test_mle(self):
|
2271
|
+
maxlog = stats.boxcox_normmax(self.x, method='mle')
|
2272
|
+
assert_allclose(maxlog, 1.758101, rtol=1e-6)
|
2273
|
+
|
2274
|
+
# Check that boxcox() uses 'mle'
|
2275
|
+
_, maxlog_boxcox = stats.boxcox(self.x)
|
2276
|
+
assert_allclose(maxlog_boxcox, maxlog)
|
2277
|
+
|
2278
|
+
def test_all(self):
|
2279
|
+
maxlog_all = stats.boxcox_normmax(self.x, method='all')
|
2280
|
+
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
|
2281
|
+
|
2282
|
+
@pytest.mark.parametrize("method", ["mle", "pearsonr", "all"])
|
2283
|
+
@pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)])
|
2284
|
+
def test_bounded_optimizer_within_bounds(self, method, bounds):
|
2285
|
+
|
2286
|
+
def optimizer(fun):
|
2287
|
+
return optimize.minimize_scalar(fun, bounds=bounds,
|
2288
|
+
method="bounded")
|
2289
|
+
|
2290
|
+
maxlog = stats.boxcox_normmax(self.x, method=method,
|
2291
|
+
optimizer=optimizer)
|
2292
|
+
assert np.all(bounds[0] < maxlog)
|
2293
|
+
assert np.all(maxlog < bounds[1])
|
2294
|
+
|
2295
|
+
@pytest.mark.slow
|
2296
|
+
def test_user_defined_optimizer(self):
|
2297
|
+
# tests an optimizer that is not based on scipy.optimize.minimize
|
2298
|
+
lmbda = stats.boxcox_normmax(self.x)
|
2299
|
+
lmbda_rounded = np.round(lmbda, 5)
|
2300
|
+
lmbda_range = np.linspace(lmbda_rounded-0.01, lmbda_rounded+0.01, 1001)
|
2301
|
+
|
2302
|
+
class MyResult:
|
2303
|
+
pass
|
2304
|
+
|
2305
|
+
def optimizer(fun):
|
2306
|
+
# brute force minimum over the range
|
2307
|
+
objs = []
|
2308
|
+
for lmbda in lmbda_range:
|
2309
|
+
objs.append(fun(lmbda))
|
2310
|
+
res = MyResult()
|
2311
|
+
res.x = lmbda_range[np.argmin(objs)]
|
2312
|
+
return res
|
2313
|
+
|
2314
|
+
lmbda2 = stats.boxcox_normmax(self.x, optimizer=optimizer)
|
2315
|
+
assert lmbda2 != lmbda # not identical
|
2316
|
+
assert_allclose(lmbda2, lmbda, 1e-5) # but as close as it should be
|
2317
|
+
|
2318
|
+
def test_user_defined_optimizer_and_brack_raises_error(self):
|
2319
|
+
optimizer = optimize.minimize_scalar
|
2320
|
+
|
2321
|
+
# Using default `brack=None` with user-defined `optimizer` works as
|
2322
|
+
# expected.
|
2323
|
+
stats.boxcox_normmax(self.x, brack=None, optimizer=optimizer)
|
2324
|
+
|
2325
|
+
# Using user-defined `brack` with user-defined `optimizer` is expected
|
2326
|
+
# to throw an error. Instead, users should specify
|
2327
|
+
# optimizer-specific parameters in the optimizer function itself.
|
2328
|
+
with pytest.raises(ValueError, match="`brack` must be None if "
|
2329
|
+
"`optimizer` is given"):
|
2330
|
+
|
2331
|
+
stats.boxcox_normmax(self.x, brack=(-2.0, 2.0),
|
2332
|
+
optimizer=optimizer)
|
2333
|
+
|
2334
|
+
@pytest.mark.parametrize(
|
2335
|
+
'x', ([2003.0, 1950.0, 1997.0, 2000.0, 2009.0],
|
2336
|
+
[0.50000471, 0.50004979, 0.50005902, 0.50009312, 0.50001632]))
|
2337
|
+
def test_overflow(self, x):
|
2338
|
+
message = "The optimal lambda is..."
|
2339
|
+
with pytest.warns(UserWarning, match=message):
|
2340
|
+
lmbda = stats.boxcox_normmax(x, method='mle')
|
2341
|
+
assert np.isfinite(special.boxcox(x, lmbda)).all()
|
2342
|
+
# 10000 is safety factor used in boxcox_normmax
|
2343
|
+
ymax = np.finfo(np.float64).max / 10000
|
2344
|
+
x_treme = np.max(x) if lmbda > 0 else np.min(x)
|
2345
|
+
y_extreme = special.boxcox(x_treme, lmbda)
|
2346
|
+
assert_allclose(y_extreme, ymax * np.sign(lmbda))
|
2347
|
+
|
2348
|
+
def test_negative_ymax(self):
|
2349
|
+
with pytest.raises(ValueError, match="`ymax` must be strictly positive"):
|
2350
|
+
stats.boxcox_normmax(self.x, ymax=-1)
|
2351
|
+
|
2352
|
+
@pytest.mark.parametrize("x", [
|
2353
|
+
# positive overflow in float64
|
2354
|
+
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0],
|
2355
|
+
dtype=np.float64),
|
2356
|
+
# negative overflow in float64
|
2357
|
+
np.array([0.50000471, 0.50004979, 0.50005902, 0.50009312, 0.50001632],
|
2358
|
+
dtype=np.float64),
|
2359
|
+
# positive overflow in float32
|
2360
|
+
np.array([200.3, 195.0, 199.7, 200.0, 200.9],
|
2361
|
+
dtype=np.float32),
|
2362
|
+
# negative overflow in float32
|
2363
|
+
np.array([2e-30, 1e-30, 1e-30, 1e-30, 1e-30, 1e-30],
|
2364
|
+
dtype=np.float32),
|
2365
|
+
])
|
2366
|
+
@pytest.mark.parametrize("ymax", [1e10, 1e30, None])
|
2367
|
+
# TODO: add method "pearsonr" after fix overflow issue
|
2368
|
+
@pytest.mark.parametrize("method", ["mle"])
|
2369
|
+
def test_user_defined_ymax_input_float64_32(self, x, ymax, method):
|
2370
|
+
# Test the maximum of the transformed data close to ymax
|
2371
|
+
with pytest.warns(UserWarning, match="The optimal lambda is"):
|
2372
|
+
kwarg = {'ymax': ymax} if ymax is not None else {}
|
2373
|
+
lmb = stats.boxcox_normmax(x, method=method, **kwarg)
|
2374
|
+
x_treme = [np.min(x), np.max(x)]
|
2375
|
+
ymax_res = max(abs(stats.boxcox(x_treme, lmb)))
|
2376
|
+
if ymax is None:
|
2377
|
+
# 10000 is safety factor used in boxcox_normmax
|
2378
|
+
ymax = np.finfo(x.dtype).max / 10000
|
2379
|
+
assert_allclose(ymax, ymax_res, rtol=1e-5)
|
2380
|
+
|
2381
|
+
@pytest.mark.parametrize("x", [
|
2382
|
+
# positive overflow in float32 but not float64
|
2383
|
+
[200.3, 195.0, 199.7, 200.0, 200.9],
|
2384
|
+
# negative overflow in float32 but not float64
|
2385
|
+
[2e-30, 1e-30, 1e-30, 1e-30, 1e-30, 1e-30],
|
2386
|
+
])
|
2387
|
+
# TODO: add method "pearsonr" after fix overflow issue
|
2388
|
+
@pytest.mark.parametrize("method", ["mle"])
|
2389
|
+
def test_user_defined_ymax_inf(self, x, method):
|
2390
|
+
x_32 = np.asarray(x, dtype=np.float32)
|
2391
|
+
x_64 = np.asarray(x, dtype=np.float64)
|
2392
|
+
|
2393
|
+
# assert overflow with float32 but not float64
|
2394
|
+
with pytest.warns(UserWarning, match="The optimal lambda is"):
|
2395
|
+
stats.boxcox_normmax(x_32, method=method)
|
2396
|
+
stats.boxcox_normmax(x_64, method=method)
|
2397
|
+
|
2398
|
+
# compute the true optimal lambda then compare them
|
2399
|
+
lmb_32 = stats.boxcox_normmax(x_32, ymax=np.inf, method=method)
|
2400
|
+
lmb_64 = stats.boxcox_normmax(x_64, ymax=np.inf, method=method)
|
2401
|
+
assert_allclose(lmb_32, lmb_64, rtol=1e-2)
|
2402
|
+
|
2403
|
+
|
2404
|
+
class TestBoxcoxNormplot:
|
2405
|
+
def setup_method(self):
|
2406
|
+
self.x = _old_loggamma_rvs(5, size=500, random_state=7654321) + 5
|
2407
|
+
|
2408
|
+
def test_basic(self):
|
2409
|
+
N = 5
|
2410
|
+
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
|
2411
|
+
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
|
2412
|
+
0.95843297]
|
2413
|
+
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
|
2414
|
+
assert_allclose(ppcc, ppcc_expected)
|
2415
|
+
|
2416
|
+
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
|
2417
|
+
def test_plot_kwarg(self):
|
2418
|
+
# Check with the matplotlib.pyplot module
|
2419
|
+
fig = plt.figure()
|
2420
|
+
ax = fig.add_subplot(111)
|
2421
|
+
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
|
2422
|
+
fig.delaxes(ax)
|
2423
|
+
|
2424
|
+
# Check that a Matplotlib Axes object is accepted
|
2425
|
+
ax = fig.add_subplot(111)
|
2426
|
+
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
|
2427
|
+
plt.close()
|
2428
|
+
|
2429
|
+
def test_invalid_inputs(self):
|
2430
|
+
# `lb` has to be larger than `la`
|
2431
|
+
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
|
2432
|
+
# `x` can not contain negative values
|
2433
|
+
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
|
2434
|
+
|
2435
|
+
def test_empty(self):
|
2436
|
+
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
|
2437
|
+
|
2438
|
+
|
2439
|
+
class TestYeojohnson_llf:
|
2440
|
+
|
2441
|
+
def test_array_like(self):
|
2442
|
+
x = stats.norm.rvs(size=100, loc=0, random_state=54321)
|
2443
|
+
lmbda = 1
|
2444
|
+
llf = stats.yeojohnson_llf(lmbda, x)
|
2445
|
+
llf2 = stats.yeojohnson_llf(lmbda, list(x))
|
2446
|
+
assert_allclose(llf, llf2, rtol=1e-12)
|
2447
|
+
|
2448
|
+
def test_2d_input(self):
|
2449
|
+
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
|
2450
|
+
lmbda = 1
|
2451
|
+
llf = stats.yeojohnson_llf(lmbda, x)
|
2452
|
+
llf2 = stats.yeojohnson_llf(lmbda, np.vstack([x, x]).T)
|
2453
|
+
assert_allclose([llf, llf], llf2, rtol=1e-12)
|
2454
|
+
|
2455
|
+
def test_empty(self):
|
2456
|
+
assert_(np.isnan(stats.yeojohnson_llf(1, [])))
|
2457
|
+
|
2458
|
+
|
2459
|
+
class TestYeojohnson:
|
2460
|
+
|
2461
|
+
def test_fixed_lmbda(self):
|
2462
|
+
rng = np.random.RandomState(12345)
|
2463
|
+
|
2464
|
+
# Test positive input
|
2465
|
+
x = _old_loggamma_rvs(5, size=50, random_state=rng) + 5
|
2466
|
+
assert np.all(x > 0)
|
2467
|
+
xt = stats.yeojohnson(x, lmbda=1)
|
2468
|
+
assert_allclose(xt, x)
|
2469
|
+
xt = stats.yeojohnson(x, lmbda=-1)
|
2470
|
+
assert_allclose(xt, 1 - 1 / (x + 1))
|
2471
|
+
xt = stats.yeojohnson(x, lmbda=0)
|
2472
|
+
assert_allclose(xt, np.log(x + 1))
|
2473
|
+
xt = stats.yeojohnson(x, lmbda=1)
|
2474
|
+
assert_allclose(xt, x)
|
2475
|
+
|
2476
|
+
# Test negative input
|
2477
|
+
x = _old_loggamma_rvs(5, size=50, random_state=rng) - 5
|
2478
|
+
assert np.all(x < 0)
|
2479
|
+
xt = stats.yeojohnson(x, lmbda=2)
|
2480
|
+
assert_allclose(xt, -np.log(-x + 1))
|
2481
|
+
xt = stats.yeojohnson(x, lmbda=1)
|
2482
|
+
assert_allclose(xt, x)
|
2483
|
+
xt = stats.yeojohnson(x, lmbda=3)
|
2484
|
+
assert_allclose(xt, 1 / (-x + 1) - 1)
|
2485
|
+
|
2486
|
+
# test both positive and negative input
|
2487
|
+
x = _old_loggamma_rvs(5, size=50, random_state=rng) - 2
|
2488
|
+
assert not np.all(x < 0)
|
2489
|
+
assert not np.all(x >= 0)
|
2490
|
+
pos = x >= 0
|
2491
|
+
xt = stats.yeojohnson(x, lmbda=1)
|
2492
|
+
assert_allclose(xt[pos], x[pos])
|
2493
|
+
xt = stats.yeojohnson(x, lmbda=-1)
|
2494
|
+
assert_allclose(xt[pos], 1 - 1 / (x[pos] + 1))
|
2495
|
+
xt = stats.yeojohnson(x, lmbda=0)
|
2496
|
+
assert_allclose(xt[pos], np.log(x[pos] + 1))
|
2497
|
+
xt = stats.yeojohnson(x, lmbda=1)
|
2498
|
+
assert_allclose(xt[pos], x[pos])
|
2499
|
+
|
2500
|
+
neg = ~pos
|
2501
|
+
xt = stats.yeojohnson(x, lmbda=2)
|
2502
|
+
assert_allclose(xt[neg], -np.log(-x[neg] + 1))
|
2503
|
+
xt = stats.yeojohnson(x, lmbda=1)
|
2504
|
+
assert_allclose(xt[neg], x[neg])
|
2505
|
+
xt = stats.yeojohnson(x, lmbda=3)
|
2506
|
+
assert_allclose(xt[neg], 1 / (-x[neg] + 1) - 1)
|
2507
|
+
|
2508
|
+
@pytest.mark.parametrize('lmbda', [0, .1, .5, 2])
|
2509
|
+
def test_lmbda_None(self, lmbda):
|
2510
|
+
# Start from normal rv's, do inverse transform to check that
|
2511
|
+
# optimization function gets close to the right answer.
|
2512
|
+
|
2513
|
+
def _inverse_transform(x, lmbda):
|
2514
|
+
x_inv = np.zeros(x.shape, dtype=x.dtype)
|
2515
|
+
pos = x >= 0
|
2516
|
+
|
2517
|
+
# when x >= 0
|
2518
|
+
if abs(lmbda) < np.spacing(1.):
|
2519
|
+
x_inv[pos] = np.exp(x[pos]) - 1
|
2520
|
+
else: # lmbda != 0
|
2521
|
+
x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1
|
2522
|
+
|
2523
|
+
# when x < 0
|
2524
|
+
if abs(lmbda - 2) > np.spacing(1.):
|
2525
|
+
x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1,
|
2526
|
+
1 / (2 - lmbda))
|
2527
|
+
else: # lmbda == 2
|
2528
|
+
x_inv[~pos] = 1 - np.exp(-x[~pos])
|
2529
|
+
|
2530
|
+
return x_inv
|
2531
|
+
|
2532
|
+
n_samples = 20000
|
2533
|
+
rng = np.random.RandomState(1234567)
|
2534
|
+
x = rng.normal(loc=0, scale=1, size=(n_samples))
|
2535
|
+
|
2536
|
+
x_inv = _inverse_transform(x, lmbda)
|
2537
|
+
xt, maxlog = stats.yeojohnson(x_inv)
|
2538
|
+
|
2539
|
+
assert_allclose(maxlog, lmbda, atol=1e-2)
|
2540
|
+
|
2541
|
+
assert_almost_equal(0, np.linalg.norm(x - xt) / n_samples, decimal=2)
|
2542
|
+
assert_almost_equal(0, xt.mean(), decimal=1)
|
2543
|
+
assert_almost_equal(1, xt.std(), decimal=1)
|
2544
|
+
|
2545
|
+
def test_empty(self):
|
2546
|
+
assert_(stats.yeojohnson([]).shape == (0,))
|
2547
|
+
|
2548
|
+
def test_array_like(self):
|
2549
|
+
x = stats.norm.rvs(size=100, loc=0, random_state=54321)
|
2550
|
+
xt1, _ = stats.yeojohnson(x)
|
2551
|
+
xt2, _ = stats.yeojohnson(list(x))
|
2552
|
+
assert_allclose(xt1, xt2, rtol=1e-12)
|
2553
|
+
|
2554
|
+
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
|
2555
|
+
def test_input_dtype_complex(self, dtype):
|
2556
|
+
x = np.arange(6, dtype=dtype)
|
2557
|
+
err_msg = ('Yeo-Johnson transformation is not defined for complex '
|
2558
|
+
'numbers.')
|
2559
|
+
with pytest.raises(ValueError, match=err_msg):
|
2560
|
+
stats.yeojohnson(x)
|
2561
|
+
|
2562
|
+
@pytest.mark.parametrize('dtype', [np.int8, np.uint8, np.int16, np.int32])
|
2563
|
+
def test_input_dtype_integer(self, dtype):
|
2564
|
+
x_int = np.arange(8, dtype=dtype)
|
2565
|
+
x_float = np.arange(8, dtype=np.float64)
|
2566
|
+
xt_int, lmbda_int = stats.yeojohnson(x_int)
|
2567
|
+
xt_float, lmbda_float = stats.yeojohnson(x_float)
|
2568
|
+
assert_allclose(xt_int, xt_float, rtol=1e-7)
|
2569
|
+
assert_allclose(lmbda_int, lmbda_float, rtol=1e-7)
|
2570
|
+
|
2571
|
+
def test_input_high_variance(self):
|
2572
|
+
# non-regression test for gh-10821
|
2573
|
+
x = np.array([3251637.22, 620695.44, 11642969.00, 2223468.22,
|
2574
|
+
85307500.00, 16494389.89, 917215.88, 11642969.00,
|
2575
|
+
2145773.87, 4962000.00, 620695.44, 651234.50,
|
2576
|
+
1907876.71, 4053297.88, 3251637.22, 3259103.08,
|
2577
|
+
9547969.00, 20631286.23, 12807072.08, 2383819.84,
|
2578
|
+
90114500.00, 17209575.46, 12852969.00, 2414609.99,
|
2579
|
+
2170368.23])
|
2580
|
+
xt_yeo, lam_yeo = stats.yeojohnson(x)
|
2581
|
+
xt_box, lam_box = stats.boxcox(x + 1)
|
2582
|
+
assert_allclose(xt_yeo, xt_box, rtol=1e-6)
|
2583
|
+
assert_allclose(lam_yeo, lam_box, rtol=1e-6)
|
2584
|
+
|
2585
|
+
@pytest.mark.parametrize('x', [
|
2586
|
+
np.array([1.0, float("nan"), 2.0]),
|
2587
|
+
np.array([1.0, float("inf"), 2.0]),
|
2588
|
+
np.array([1.0, -float("inf"), 2.0]),
|
2589
|
+
np.array([-1.0, float("nan"), float("inf"), -float("inf"), 1.0])
|
2590
|
+
])
|
2591
|
+
def test_nonfinite_input(self, x):
|
2592
|
+
with pytest.raises(ValueError, match='Yeo-Johnson input must be finite'):
|
2593
|
+
xt_yeo, lam_yeo = stats.yeojohnson(x)
|
2594
|
+
|
2595
|
+
@pytest.mark.parametrize('x', [
|
2596
|
+
# Attempt to trigger overflow in power expressions.
|
2597
|
+
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0,
|
2598
|
+
2009.0, 1980.0, 1999.0, 2007.0, 1991.0]),
|
2599
|
+
# Attempt to trigger overflow with a large optimal lambda.
|
2600
|
+
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0]),
|
2601
|
+
# Attempt to trigger overflow with large data.
|
2602
|
+
np.array([2003.0e200, 1950.0e200, 1997.0e200, 2000.0e200, 2009.0e200])
|
2603
|
+
])
|
2604
|
+
def test_overflow(self, x):
|
2605
|
+
# non-regression test for gh-18389
|
2606
|
+
|
2607
|
+
def optimizer(fun, lam_yeo):
|
2608
|
+
out = optimize.fminbound(fun, -lam_yeo, lam_yeo, xtol=1.48e-08)
|
2609
|
+
result = optimize.OptimizeResult()
|
2610
|
+
result.x = out
|
2611
|
+
return result
|
2612
|
+
|
2613
|
+
with np.errstate(all="raise"):
|
2614
|
+
xt_yeo, lam_yeo = stats.yeojohnson(x)
|
2615
|
+
xt_box, lam_box = stats.boxcox(
|
2616
|
+
x + 1, optimizer=partial(optimizer, lam_yeo=lam_yeo))
|
2617
|
+
assert np.isfinite(np.var(xt_yeo))
|
2618
|
+
assert np.isfinite(np.var(xt_box))
|
2619
|
+
assert_allclose(lam_yeo, lam_box, rtol=1e-6)
|
2620
|
+
assert_allclose(xt_yeo, xt_box, rtol=1e-4)
|
2621
|
+
|
2622
|
+
@pytest.mark.parametrize('x', [
|
2623
|
+
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0,
|
2624
|
+
2009.0, 1980.0, 1999.0, 2007.0, 1991.0]),
|
2625
|
+
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0])
|
2626
|
+
])
|
2627
|
+
@pytest.mark.parametrize('scale', [1, 1e-12, 1e-32, 1e-150, 1e32, 1e200])
|
2628
|
+
@pytest.mark.parametrize('sign', [1, -1])
|
2629
|
+
def test_overflow_underflow_signed_data(self, x, scale, sign):
|
2630
|
+
# non-regression test for gh-18389
|
2631
|
+
with np.errstate(all="raise"):
|
2632
|
+
xt_yeo, lam_yeo = stats.yeojohnson(sign * x * scale)
|
2633
|
+
assert np.all(np.sign(sign * x) == np.sign(xt_yeo))
|
2634
|
+
assert np.isfinite(lam_yeo)
|
2635
|
+
assert np.isfinite(np.var(xt_yeo))
|
2636
|
+
|
2637
|
+
@pytest.mark.parametrize('x', [
|
2638
|
+
np.array([0, 1, 2, 3]),
|
2639
|
+
np.array([0, -1, 2, -3]),
|
2640
|
+
np.array([0, 0, 0])
|
2641
|
+
])
|
2642
|
+
@pytest.mark.parametrize('sign', [1, -1])
|
2643
|
+
@pytest.mark.parametrize('brack', [None, (-2, 2)])
|
2644
|
+
def test_integer_signed_data(self, x, sign, brack):
|
2645
|
+
with np.errstate(all="raise"):
|
2646
|
+
x_int = sign * x
|
2647
|
+
x_float = x_int.astype(np.float64)
|
2648
|
+
lam_yeo_int = stats.yeojohnson_normmax(x_int, brack=brack)
|
2649
|
+
xt_yeo_int = stats.yeojohnson(x_int, lmbda=lam_yeo_int)
|
2650
|
+
lam_yeo_float = stats.yeojohnson_normmax(x_float, brack=brack)
|
2651
|
+
xt_yeo_float = stats.yeojohnson(x_float, lmbda=lam_yeo_float)
|
2652
|
+
assert np.all(np.sign(x_int) == np.sign(xt_yeo_int))
|
2653
|
+
assert np.isfinite(lam_yeo_int)
|
2654
|
+
assert np.isfinite(np.var(xt_yeo_int))
|
2655
|
+
assert lam_yeo_int == lam_yeo_float
|
2656
|
+
assert np.all(xt_yeo_int == xt_yeo_float)
|
2657
|
+
|
2658
|
+
|
2659
|
+
class TestYeojohnsonNormmax:
|
2660
|
+
def setup_method(self):
|
2661
|
+
self.x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5
|
2662
|
+
|
2663
|
+
def test_mle(self):
|
2664
|
+
maxlog = stats.yeojohnson_normmax(self.x)
|
2665
|
+
assert_allclose(maxlog, 1.876393, rtol=1e-6)
|
2666
|
+
|
2667
|
+
def test_darwin_example(self):
|
2668
|
+
# test from original paper "A new family of power transformations to
|
2669
|
+
# improve normality or symmetry" by Yeo and Johnson.
|
2670
|
+
x = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3,
|
2671
|
+
7.5, -6.0]
|
2672
|
+
lmbda = stats.yeojohnson_normmax(x)
|
2673
|
+
assert np.allclose(lmbda, 1.305, atol=1e-3)
|
2674
|
+
|
2675
|
+
|
2676
|
+
class TestCircFuncs:
|
2677
|
+
# In gh-5747, the R package `circular` was used to calculate reference
|
2678
|
+
# values for the circular variance, e.g.:
|
2679
|
+
# library(circular)
|
2680
|
+
# options(digits=16)
|
2681
|
+
# x = c(0, 2*pi/3, 5*pi/3)
|
2682
|
+
# var.circular(x)
|
2683
|
+
@pytest.mark.parametrize("test_func,expected",
|
2684
|
+
[(stats.circmean, 0.167690146),
|
2685
|
+
(stats.circvar, 0.006455174000787767),
|
2686
|
+
(stats.circstd, 6.520702116)])
|
2687
|
+
def test_circfuncs(self, test_func, expected, xp):
|
2688
|
+
x = xp.asarray([355., 5., 2., 359., 10., 350.])
|
2689
|
+
xp_assert_close(test_func(x, high=360), xp.asarray(expected))
|
2690
|
+
|
2691
|
+
def test_circfuncs_small(self, xp):
|
2692
|
+
# Default tolerances won't work here because the reference values
|
2693
|
+
# are approximations. Ensure all array types work in float64 to
|
2694
|
+
# avoid needing separate float32 and float64 tolerances.
|
2695
|
+
x = xp.asarray([20, 21, 22, 18, 19, 20.5, 19.2], dtype=xp.float64)
|
2696
|
+
M1 = xp.mean(x)
|
2697
|
+
M2 = stats.circmean(x, high=360)
|
2698
|
+
xp_assert_close(M2, M1, rtol=1e-5)
|
2699
|
+
|
2700
|
+
V1 = xp.var(x*xp.pi/180, correction=0)
|
2701
|
+
# for small variations, circvar is approximately half the
|
2702
|
+
# linear variance
|
2703
|
+
V1 = V1 / 2.
|
2704
|
+
V2 = stats.circvar(x, high=360)
|
2705
|
+
xp_assert_close(V2, V1, rtol=1e-4)
|
2706
|
+
|
2707
|
+
S1 = xp.std(x, correction=0)
|
2708
|
+
S2 = stats.circstd(x, high=360)
|
2709
|
+
xp_assert_close(S2, S1, rtol=1e-4)
|
2710
|
+
|
2711
|
+
@pytest.mark.parametrize("test_func, numpy_func",
|
2712
|
+
[(stats.circmean, np.mean),
|
2713
|
+
(stats.circvar, np.var),
|
2714
|
+
(stats.circstd, np.std)])
|
2715
|
+
def test_circfuncs_close(self, test_func, numpy_func, xp):
|
2716
|
+
# circfuncs should handle very similar inputs (gh-12740)
|
2717
|
+
x = np.asarray([0.12675364631578953] * 10 + [0.12675365920187928] * 100)
|
2718
|
+
circstat = test_func(xp.asarray(x))
|
2719
|
+
normal = xp.asarray(numpy_func(x))
|
2720
|
+
xp_assert_close(circstat, normal, atol=2e-8)
|
2721
|
+
|
2722
|
+
@pytest.mark.parametrize('circfunc', [stats.circmean,
|
2723
|
+
stats.circvar,
|
2724
|
+
stats.circstd])
|
2725
|
+
def test_circmean_axis(self, xp, circfunc):
|
2726
|
+
x = xp.asarray([[355, 5, 2, 359, 10, 350],
|
2727
|
+
[351, 7, 4, 352, 9, 349],
|
2728
|
+
[357, 9, 8, 358, 4, 356.]])
|
2729
|
+
res = circfunc(x, high=360)
|
2730
|
+
ref = circfunc(xp.reshape(x, (-1,)), high=360)
|
2731
|
+
xp_assert_close(res, xp.asarray(ref))
|
2732
|
+
|
2733
|
+
res = circfunc(x, high=360, axis=1)
|
2734
|
+
ref = [circfunc(x[i, :], high=360) for i in range(x.shape[0])]
|
2735
|
+
xp_assert_close(res, xp.stack(ref))
|
2736
|
+
|
2737
|
+
res = circfunc(x, high=360, axis=0)
|
2738
|
+
ref = [circfunc(x[:, i], high=360) for i in range(x.shape[1])]
|
2739
|
+
xp_assert_close(res, xp.stack(ref))
|
2740
|
+
|
2741
|
+
@pytest.mark.parametrize("test_func,expected",
|
2742
|
+
[(stats.circmean, 0.167690146),
|
2743
|
+
(stats.circvar, 0.006455174270186603),
|
2744
|
+
(stats.circstd, 6.520702116)])
|
2745
|
+
def test_circfuncs_array_like(self, test_func, expected, xp):
|
2746
|
+
x = xp.asarray([355, 5, 2, 359, 10, 350.])
|
2747
|
+
xp_assert_close(test_func(x, high=360), xp.asarray(expected))
|
2748
|
+
|
2749
|
+
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
|
2750
|
+
stats.circstd])
|
2751
|
+
def test_empty(self, test_func, xp):
|
2752
|
+
dtype = xp.float64
|
2753
|
+
x = xp.asarray([], dtype=dtype)
|
2754
|
+
if is_numpy(xp):
|
2755
|
+
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
|
2756
|
+
res = test_func(x)
|
2757
|
+
else:
|
2758
|
+
with np.testing.suppress_warnings() as sup:
|
2759
|
+
# for array_api_strict
|
2760
|
+
sup.filter(RuntimeWarning, "Mean of empty slice")
|
2761
|
+
sup.filter(RuntimeWarning, "invalid value encountered")
|
2762
|
+
res = test_func(x)
|
2763
|
+
xp_assert_equal(res, xp.asarray(xp.nan, dtype=dtype))
|
2764
|
+
|
2765
|
+
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
|
2766
|
+
stats.circstd])
|
2767
|
+
def test_nan_propagate(self, test_func, xp):
|
2768
|
+
x = xp.asarray([355, 5, 2, 359, 10, 350, np.nan])
|
2769
|
+
xp_assert_equal(test_func(x, high=360), xp.asarray(xp.nan))
|
2770
|
+
|
2771
|
+
@pytest.mark.parametrize("test_func,expected",
|
2772
|
+
[(stats.circmean,
|
2773
|
+
{None: np.nan, 0: 355.66582264, 1: 0.28725053}),
|
2774
|
+
(stats.circvar,
|
2775
|
+
{None: np.nan,
|
2776
|
+
0: 0.002570671054089924,
|
2777
|
+
1: 0.005545914017677123}),
|
2778
|
+
(stats.circstd,
|
2779
|
+
{None: np.nan, 0: 4.11093193, 1: 6.04265394})])
|
2780
|
+
def test_nan_propagate_array(self, test_func, expected, xp):
|
2781
|
+
x = xp.asarray([[355, 5, 2, 359, 10, 350, 1],
|
2782
|
+
[351, 7, 4, 352, 9, 349, np.nan],
|
2783
|
+
[1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
|
2784
|
+
for axis in expected.keys():
|
2785
|
+
out = test_func(x, high=360, axis=axis)
|
2786
|
+
if axis is None:
|
2787
|
+
xp_assert_equal(out, xp.asarray(xp.nan))
|
2788
|
+
else:
|
2789
|
+
xp_assert_close(out[0], xp.asarray(expected[axis]))
|
2790
|
+
xp_assert_equal(out[1:], xp.full_like(out[1:], xp.nan))
|
2791
|
+
|
2792
|
+
def test_circmean_scalar(self, xp):
|
2793
|
+
x = xp.asarray(1.)[()]
|
2794
|
+
M1 = x
|
2795
|
+
M2 = stats.circmean(x)
|
2796
|
+
xp_assert_close(M2, M1, rtol=1e-5)
|
2797
|
+
|
2798
|
+
def test_circmean_range(self, xp):
|
2799
|
+
# regression test for gh-6420: circmean(..., high, low) must be
|
2800
|
+
# between `high` and `low`
|
2801
|
+
m = stats.circmean(xp.arange(0, 2, 0.1), xp.pi, -xp.pi)
|
2802
|
+
xp_assert_less(m, xp.asarray(xp.pi))
|
2803
|
+
xp_assert_less(-m, xp.asarray(xp.pi))
|
2804
|
+
|
2805
|
+
def test_circfuncs_uint8(self, xp):
|
2806
|
+
# regression test for gh-7255: overflow when working with
|
2807
|
+
# numpy uint8 data type
|
2808
|
+
x = xp.asarray([150, 10], dtype=xp.uint8)
|
2809
|
+
xp_assert_close(stats.circmean(x, high=180), xp.asarray(170.0))
|
2810
|
+
xp_assert_close(stats.circvar(x, high=180), xp.asarray(0.2339555554617))
|
2811
|
+
xp_assert_close(stats.circstd(x, high=180), xp.asarray(20.91551378))
|
2812
|
+
|
2813
|
+
def test_circstd_zero(self, xp):
|
2814
|
+
# circstd() of a single number should return positive zero.
|
2815
|
+
y = stats.circstd(xp.asarray([0]))
|
2816
|
+
assert math.copysign(1.0, y) == 1.0
|
2817
|
+
|
2818
|
+
def test_circmean_accuracy_tiny_input(self, xp):
|
2819
|
+
# For tiny x such that sin(x) == x and cos(x) == 1.0 numerically,
|
2820
|
+
# circmean(x) should return x because atan2(sin(x), cos(x)) == x.
|
2821
|
+
# This test verifies this.
|
2822
|
+
#
|
2823
|
+
# The purpose of this test is not to show that circmean() is
|
2824
|
+
# accurate in the last digit for certain input, because this is
|
2825
|
+
# neither guaranteed not particularly useful. Rather, it is a
|
2826
|
+
# "white-box" sanity check that no undue loss of precision is
|
2827
|
+
# introduced by conversion between (high - low) and (2 * pi).
|
2828
|
+
|
2829
|
+
x = xp.linspace(1e-9, 6e-9, 50)
|
2830
|
+
assert xp.all(xp.sin(x) == x) and xp.all(xp.cos(x) == 1.0)
|
2831
|
+
|
2832
|
+
m = (x * (2 * xp.pi) / (2 * xp.pi)) != x
|
2833
|
+
assert xp.any(m)
|
2834
|
+
x = x[m]
|
2835
|
+
|
2836
|
+
y = stats.circmean(x[:, None], axis=1)
|
2837
|
+
assert xp.all(y == x)
|
2838
|
+
|
2839
|
+
def test_circmean_accuracy_huge_input(self, xp):
|
2840
|
+
# White-box test that circmean() does not introduce undue loss of
|
2841
|
+
# numerical accuracy by eagerly rotating the input. This is detected
|
2842
|
+
# by supplying a huge input x such that (x - low) == x numerically.
|
2843
|
+
x = xp.asarray(1e17, dtype=xp.float64)
|
2844
|
+
y = math.atan2(xp.sin(x), xp.cos(x)) # -2.6584887370946806
|
2845
|
+
expected = xp.asarray(y, dtype=xp.float64)
|
2846
|
+
actual = stats.circmean(x, high=xp.pi, low=-xp.pi)
|
2847
|
+
xp_assert_close(actual, expected, rtol=1e-15, atol=0.0)
|
2848
|
+
|
2849
|
+
|
2850
|
+
class TestCircFuncsNanPolicy:
|
2851
|
+
# `nan_policy` is implemented by the `_axis_nan_policy` decorator, which is
|
2852
|
+
# not yet array-API compatible. When it is array-API compatible, the generic
|
2853
|
+
# tests run on every function will be much stronger than these, so these
|
2854
|
+
# will not be necessary. So I don't see a need to make these array-API compatible;
|
2855
|
+
# when the time comes, they can just be removed.
|
2856
|
+
@pytest.mark.parametrize("test_func,expected",
|
2857
|
+
[(stats.circmean,
|
2858
|
+
{None: 359.4178026893944,
|
2859
|
+
0: np.array([353.0, 6.0, 3.0, 355.5, 9.5,
|
2860
|
+
349.5]),
|
2861
|
+
1: np.array([0.16769015, 358.66510252])}),
|
2862
|
+
(stats.circvar,
|
2863
|
+
{None: 0.008396678483192477,
|
2864
|
+
0: np.array([1.9997969, 0.4999873, 0.4999873,
|
2865
|
+
6.1230956, 0.1249992, 0.1249992]
|
2866
|
+
)*(np.pi/180)**2,
|
2867
|
+
1: np.array([0.006455174270186603,
|
2868
|
+
0.01016767581393285])}),
|
2869
|
+
(stats.circstd,
|
2870
|
+
{None: 7.440570778057074,
|
2871
|
+
0: np.array([2.00020313, 1.00002539, 1.00002539,
|
2872
|
+
3.50108929, 0.50000317,
|
2873
|
+
0.50000317]),
|
2874
|
+
1: np.array([6.52070212, 8.19138093])})])
|
2875
|
+
def test_nan_omit_array(self, test_func, expected):
|
2876
|
+
x = np.array([[355, 5, 2, 359, 10, 350, np.nan],
|
2877
|
+
[351, 7, 4, 352, 9, 349, np.nan],
|
2878
|
+
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
|
2879
|
+
for axis in expected.keys():
|
2880
|
+
if axis is None:
|
2881
|
+
out = test_func(x, high=360, nan_policy='omit', axis=axis)
|
2882
|
+
assert_allclose(out, expected[axis], rtol=1e-7)
|
2883
|
+
else:
|
2884
|
+
with pytest.warns(SmallSampleWarning, match=too_small_nd_omit):
|
2885
|
+
out = test_func(x, high=360, nan_policy='omit', axis=axis)
|
2886
|
+
assert_allclose(out[:-1], expected[axis], rtol=1e-7)
|
2887
|
+
assert_(np.isnan(out[-1]))
|
2888
|
+
|
2889
|
+
@pytest.mark.parametrize("test_func,expected",
|
2890
|
+
[(stats.circmean, 0.167690146),
|
2891
|
+
(stats.circvar, 0.006455174270186603),
|
2892
|
+
(stats.circstd, 6.520702116)])
|
2893
|
+
def test_nan_omit(self, test_func, expected):
|
2894
|
+
x = [355, 5, 2, 359, 10, 350, np.nan]
|
2895
|
+
assert_allclose(test_func(x, high=360, nan_policy='omit'),
|
2896
|
+
expected, rtol=1e-7)
|
2897
|
+
|
2898
|
+
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
|
2899
|
+
stats.circstd])
|
2900
|
+
def test_nan_omit_all(self, test_func):
|
2901
|
+
x = [np.nan, np.nan, np.nan, np.nan, np.nan]
|
2902
|
+
with pytest.warns(SmallSampleWarning, match=too_small_1d_omit):
|
2903
|
+
assert_(np.isnan(test_func(x, nan_policy='omit')))
|
2904
|
+
|
2905
|
+
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
|
2906
|
+
stats.circstd])
|
2907
|
+
def test_nan_omit_all_axis(self, test_func):
|
2908
|
+
with pytest.warns(SmallSampleWarning, match=too_small_nd_omit):
|
2909
|
+
x = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan],
|
2910
|
+
[np.nan, np.nan, np.nan, np.nan, np.nan]])
|
2911
|
+
out = test_func(x, nan_policy='omit', axis=1)
|
2912
|
+
assert_(np.isnan(out).all())
|
2913
|
+
assert_(len(out) == 2)
|
2914
|
+
|
2915
|
+
@pytest.mark.parametrize("x",
|
2916
|
+
[[355, 5, 2, 359, 10, 350, np.nan],
|
2917
|
+
np.array([[355, 5, 2, 359, 10, 350, np.nan],
|
2918
|
+
[351, 7, 4, 352, np.nan, 9, 349]])])
|
2919
|
+
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
|
2920
|
+
stats.circstd])
|
2921
|
+
def test_nan_raise(self, test_func, x):
|
2922
|
+
assert_raises(ValueError, test_func, x, high=360, nan_policy='raise')
|
2923
|
+
|
2924
|
+
@pytest.mark.parametrize("x",
|
2925
|
+
[[355, 5, 2, 359, 10, 350, np.nan],
|
2926
|
+
np.array([[355, 5, 2, 359, 10, 350, np.nan],
|
2927
|
+
[351, 7, 4, 352, np.nan, 9, 349]])])
|
2928
|
+
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
|
2929
|
+
stats.circstd])
|
2930
|
+
def test_bad_nan_policy(self, test_func, x):
|
2931
|
+
assert_raises(ValueError, test_func, x, high=360, nan_policy='foobar')
|
2932
|
+
|
2933
|
+
|
2934
|
+
class TestMedianTest:
|
2935
|
+
|
2936
|
+
def test_bad_n_samples(self):
|
2937
|
+
# median_test requires at least two samples.
|
2938
|
+
assert_raises(ValueError, stats.median_test, [1, 2, 3])
|
2939
|
+
|
2940
|
+
def test_empty_sample(self):
|
2941
|
+
# Each sample must contain at least one value.
|
2942
|
+
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
|
2943
|
+
|
2944
|
+
def test_empty_when_ties_ignored(self):
|
2945
|
+
# The grand median is 1, and all values in the first argument are
|
2946
|
+
# equal to the grand median. With ties="ignore", those values are
|
2947
|
+
# ignored, which results in the first sample being (in effect) empty.
|
2948
|
+
# This should raise a ValueError.
|
2949
|
+
assert_raises(ValueError, stats.median_test,
|
2950
|
+
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
|
2951
|
+
|
2952
|
+
def test_empty_contingency_row(self):
|
2953
|
+
# The grand median is 1, and with the default ties="below", all the
|
2954
|
+
# values in the samples are counted as being below the grand median.
|
2955
|
+
# This would result a row of zeros in the contingency table, which is
|
2956
|
+
# an error.
|
2957
|
+
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
|
2958
|
+
|
2959
|
+
# With ties="above", all the values are counted as above the
|
2960
|
+
# grand median.
|
2961
|
+
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
|
2962
|
+
ties="above")
|
2963
|
+
|
2964
|
+
def test_bad_ties(self):
|
2965
|
+
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
|
2966
|
+
ties="foo")
|
2967
|
+
|
2968
|
+
def test_bad_nan_policy(self):
|
2969
|
+
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
|
2970
|
+
nan_policy='foobar')
|
2971
|
+
|
2972
|
+
def test_bad_keyword(self):
|
2973
|
+
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],
|
2974
|
+
foo="foo")
|
2975
|
+
|
2976
|
+
def test_simple(self):
|
2977
|
+
x = [1, 2, 3]
|
2978
|
+
y = [1, 2, 3]
|
2979
|
+
stat, p, med, tbl = stats.median_test(x, y)
|
2980
|
+
|
2981
|
+
# The median is floating point, but this equality test should be safe.
|
2982
|
+
assert_equal(med, 2.0)
|
2983
|
+
|
2984
|
+
assert_array_equal(tbl, [[1, 1], [2, 2]])
|
2985
|
+
|
2986
|
+
# The expected values of the contingency table equal the contingency
|
2987
|
+
# table, so the statistic should be 0 and the p-value should be 1.
|
2988
|
+
assert_equal(stat, 0)
|
2989
|
+
assert_equal(p, 1)
|
2990
|
+
|
2991
|
+
def test_ties_options(self):
|
2992
|
+
# Test the contingency table calculation.
|
2993
|
+
x = [1, 2, 3, 4]
|
2994
|
+
y = [5, 6]
|
2995
|
+
z = [7, 8, 9]
|
2996
|
+
# grand median is 5.
|
2997
|
+
|
2998
|
+
# Default 'ties' option is "below".
|
2999
|
+
stat, p, m, tbl = stats.median_test(x, y, z)
|
3000
|
+
assert_equal(m, 5)
|
3001
|
+
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
|
3002
|
+
|
3003
|
+
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
|
3004
|
+
assert_equal(m, 5)
|
3005
|
+
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
|
3006
|
+
|
3007
|
+
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
|
3008
|
+
assert_equal(m, 5)
|
3009
|
+
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
|
3010
|
+
|
3011
|
+
def test_nan_policy_options(self):
|
3012
|
+
x = [1, 2, np.nan]
|
3013
|
+
y = [4, 5, 6]
|
3014
|
+
mt1 = stats.median_test(x, y, nan_policy='propagate')
|
3015
|
+
s, p, m, t = stats.median_test(x, y, nan_policy='omit')
|
3016
|
+
|
3017
|
+
assert_equal(mt1, (np.nan, np.nan, np.nan, None))
|
3018
|
+
assert_allclose(s, 0.31250000000000006)
|
3019
|
+
assert_allclose(p, 0.57615012203057869)
|
3020
|
+
assert_equal(m, 4.0)
|
3021
|
+
assert_equal(t, np.array([[0, 2], [2, 1]]))
|
3022
|
+
assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')
|
3023
|
+
|
3024
|
+
def test_basic(self):
|
3025
|
+
# median_test calls chi2_contingency to compute the test statistic
|
3026
|
+
# and p-value. Make sure it hasn't screwed up the call...
|
3027
|
+
|
3028
|
+
x = [1, 2, 3, 4, 5]
|
3029
|
+
y = [2, 4, 6, 8]
|
3030
|
+
|
3031
|
+
stat, p, m, tbl = stats.median_test(x, y)
|
3032
|
+
assert_equal(m, 4)
|
3033
|
+
assert_equal(tbl, [[1, 2], [4, 2]])
|
3034
|
+
|
3035
|
+
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
|
3036
|
+
assert_allclose(stat, exp_stat)
|
3037
|
+
assert_allclose(p, exp_p)
|
3038
|
+
|
3039
|
+
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
|
3040
|
+
assert_equal(m, 4)
|
3041
|
+
assert_equal(tbl, [[1, 2], [4, 2]])
|
3042
|
+
|
3043
|
+
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
|
3044
|
+
assert_allclose(stat, exp_stat)
|
3045
|
+
assert_allclose(p, exp_p)
|
3046
|
+
|
3047
|
+
stat, p, m, tbl = stats.median_test(x, y, correction=False)
|
3048
|
+
assert_equal(m, 4)
|
3049
|
+
assert_equal(tbl, [[1, 2], [4, 2]])
|
3050
|
+
|
3051
|
+
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
|
3052
|
+
assert_allclose(stat, exp_stat)
|
3053
|
+
assert_allclose(p, exp_p)
|
3054
|
+
|
3055
|
+
@pytest.mark.parametrize("correction", [False, True])
|
3056
|
+
def test_result(self, correction):
|
3057
|
+
x = [1, 2, 3]
|
3058
|
+
y = [1, 2, 3]
|
3059
|
+
|
3060
|
+
res = stats.median_test(x, y, correction=correction)
|
3061
|
+
assert_equal((res.statistic, res.pvalue, res.median, res.table), res)
|
3062
|
+
|
3063
|
+
class TestDirectionalStats:
|
3064
|
+
# Reference implementations are not available
|
3065
|
+
def test_directional_stats_correctness(self, xp):
|
3066
|
+
# Data from Fisher: Dispersion on a sphere, 1953 and
|
3067
|
+
# Mardia and Jupp, Directional Statistics.
|
3068
|
+
decl = -np.deg2rad(np.array([343.2, 62., 36.9, 27., 359.,
|
3069
|
+
5.7, 50.4, 357.6, 44.]))
|
3070
|
+
incl = -np.deg2rad(np.array([66.1, 68.7, 70.1, 82.1, 79.5,
|
3071
|
+
73., 69.3, 58.8, 51.4]))
|
3072
|
+
data = np.stack((np.cos(incl) * np.cos(decl),
|
3073
|
+
np.cos(incl) * np.sin(decl),
|
3074
|
+
np.sin(incl)),
|
3075
|
+
axis=1)
|
3076
|
+
|
3077
|
+
decl = xp.asarray(decl.tolist())
|
3078
|
+
incl = xp.asarray(incl.tolist())
|
3079
|
+
data = xp.asarray(data.tolist())
|
3080
|
+
|
3081
|
+
dirstats = stats.directional_stats(data)
|
3082
|
+
directional_mean = dirstats.mean_direction
|
3083
|
+
|
3084
|
+
reference_mean = xp.asarray([0.2984, -0.1346, -0.9449])
|
3085
|
+
xp_assert_close(directional_mean, reference_mean, atol=1e-4)
|
3086
|
+
|
3087
|
+
@pytest.mark.parametrize('angles, ref', [
|
3088
|
+
([-np.pi/2, np.pi/2], 1.),
|
3089
|
+
([0, 2 * np.pi], 0.)
|
3090
|
+
])
|
3091
|
+
def test_directional_stats_2d_special_cases(self, angles, ref, xp):
|
3092
|
+
angles = xp.asarray(angles)
|
3093
|
+
ref = xp.asarray(ref)
|
3094
|
+
data = xp.stack([xp.cos(angles), xp.sin(angles)], axis=1)
|
3095
|
+
res = 1 - stats.directional_stats(data).mean_resultant_length
|
3096
|
+
xp_assert_close(res, ref)
|
3097
|
+
|
3098
|
+
def test_directional_stats_2d(self, xp):
|
3099
|
+
# Test that for circular data directional_stats
|
3100
|
+
# yields the same result as circmean/circvar
|
3101
|
+
rng = np.random.default_rng(0xec9a6899d5a2830e0d1af479dbe1fd0c)
|
3102
|
+
testdata = xp.asarray(2 * xp.pi * rng.random((1000, )))
|
3103
|
+
testdata_vector = xp.stack((xp.cos(testdata),
|
3104
|
+
xp.sin(testdata)),
|
3105
|
+
axis=1)
|
3106
|
+
dirstats = stats.directional_stats(testdata_vector)
|
3107
|
+
directional_mean = dirstats.mean_direction
|
3108
|
+
directional_mean_angle = xp.atan2(directional_mean[1], directional_mean[0])
|
3109
|
+
directional_mean_angle = directional_mean_angle % (2 * xp.pi)
|
3110
|
+
circmean = stats.circmean(testdata)
|
3111
|
+
xp_assert_close(directional_mean_angle, circmean)
|
3112
|
+
|
3113
|
+
directional_var = 1. - dirstats.mean_resultant_length
|
3114
|
+
circular_var = stats.circvar(testdata)
|
3115
|
+
xp_assert_close(directional_var, circular_var)
|
3116
|
+
|
3117
|
+
def test_directional_mean_higher_dim(self, xp):
|
3118
|
+
# test that directional_stats works for higher dimensions
|
3119
|
+
# here a 4D array is reduced over axis = 2
|
3120
|
+
data = xp.asarray([[0.8660254, 0.5, 0.],
|
3121
|
+
[0.8660254, -0.5, 0.]])
|
3122
|
+
full_array = xp.asarray(xp.tile(data, (2, 2, 2, 1)))
|
3123
|
+
expected = xp.asarray([[[1., 0., 0.],
|
3124
|
+
[1., 0., 0.]],
|
3125
|
+
[[1., 0., 0.],
|
3126
|
+
[1., 0., 0.]]])
|
3127
|
+
dirstats = stats.directional_stats(full_array, axis=2)
|
3128
|
+
xp_assert_close(dirstats.mean_direction, expected)
|
3129
|
+
|
3130
|
+
@skip_xp_backends(np_only=True, reason='checking array-like input')
|
3131
|
+
def test_directional_stats_list_ndarray_input(self, xp):
|
3132
|
+
# test that list and numpy array inputs yield same results
|
3133
|
+
data = [[0.8660254, 0.5, 0.], [0.8660254, -0.5, 0]]
|
3134
|
+
data_array = xp.asarray(data, dtype=xp.float64)
|
3135
|
+
ref = stats.directional_stats(data)
|
3136
|
+
res = stats.directional_stats(data_array)
|
3137
|
+
xp_assert_close(res.mean_direction,
|
3138
|
+
xp.asarray(ref.mean_direction))
|
3139
|
+
xp_assert_close(res.mean_resultant_length,
|
3140
|
+
xp.asarray(res.mean_resultant_length))
|
3141
|
+
|
3142
|
+
def test_directional_stats_1d_error(self, xp):
|
3143
|
+
# test that one-dimensional data raises ValueError
|
3144
|
+
data = xp.ones((5, ))
|
3145
|
+
message = (r"samples must at least be two-dimensional. "
|
3146
|
+
r"Instead samples has shape: (5,)")
|
3147
|
+
with pytest.raises(ValueError, match=re.escape(message)):
|
3148
|
+
stats.directional_stats(data)
|
3149
|
+
|
3150
|
+
@pytest.mark.parametrize("dtype", ["float32", "float64"])
|
3151
|
+
def test_directional_stats_normalize(self, dtype, xp):
|
3152
|
+
# test that directional stats calculations yield same results
|
3153
|
+
# for unnormalized input with normalize=True and normalized
|
3154
|
+
# input with normalize=False
|
3155
|
+
data = np.array([[0.8660254, 0.5, 0.],
|
3156
|
+
[1.7320508, -1., 0.]], dtype=dtype)
|
3157
|
+
res = stats.directional_stats(xp.asarray(data), normalize=True)
|
3158
|
+
normalized_data = data / np.linalg.norm(data, axis=-1,
|
3159
|
+
keepdims=True)
|
3160
|
+
ref = stats.directional_stats(normalized_data, normalize=False)
|
3161
|
+
xp_assert_close(res.mean_direction,
|
3162
|
+
xp.asarray(ref.mean_direction))
|
3163
|
+
xp_assert_close(res.mean_resultant_length,
|
3164
|
+
xp.asarray(ref.mean_resultant_length))
|
3165
|
+
|
3166
|
+
|
3167
|
+
class TestFDRControl:
|
3168
|
+
def test_input_validation(self):
|
3169
|
+
message = "`ps` must include only numbers between 0 and 1"
|
3170
|
+
with pytest.raises(ValueError, match=message):
|
3171
|
+
stats.false_discovery_control([-1, 0.5, 0.7])
|
3172
|
+
with pytest.raises(ValueError, match=message):
|
3173
|
+
stats.false_discovery_control([0.5, 0.7, 2])
|
3174
|
+
with pytest.raises(ValueError, match=message):
|
3175
|
+
stats.false_discovery_control([0.5, 0.7, np.nan])
|
3176
|
+
|
3177
|
+
message = "Unrecognized `method` 'YAK'"
|
3178
|
+
with pytest.raises(ValueError, match=message):
|
3179
|
+
stats.false_discovery_control([0.5, 0.7, 0.9], method='YAK')
|
3180
|
+
|
3181
|
+
message = "`axis` must be an integer or `None`"
|
3182
|
+
with pytest.raises(ValueError, match=message):
|
3183
|
+
stats.false_discovery_control([0.5, 0.7, 0.9], axis=1.5)
|
3184
|
+
with pytest.raises(ValueError, match=message):
|
3185
|
+
stats.false_discovery_control([0.5, 0.7, 0.9], axis=(1, 2))
|
3186
|
+
|
3187
|
+
def test_against_TileStats(self):
|
3188
|
+
# See reference [3] of false_discovery_control
|
3189
|
+
ps = [0.005, 0.009, 0.019, 0.022, 0.051, 0.101, 0.361, 0.387]
|
3190
|
+
res = stats.false_discovery_control(ps)
|
3191
|
+
ref = [0.036, 0.036, 0.044, 0.044, 0.082, 0.135, 0.387, 0.387]
|
3192
|
+
assert_allclose(res, ref, atol=1e-3)
|
3193
|
+
|
3194
|
+
@pytest.mark.parametrize("case",
|
3195
|
+
[([0.24617028, 0.01140030, 0.05652047, 0.06841983,
|
3196
|
+
0.07989886, 0.01841490, 0.17540784, 0.06841983,
|
3197
|
+
0.06841983, 0.25464082], 'bh'),
|
3198
|
+
([0.72102493, 0.03339112, 0.16554665, 0.20039952,
|
3199
|
+
0.23402122, 0.05393666, 0.51376399, 0.20039952,
|
3200
|
+
0.20039952, 0.74583488], 'by')])
|
3201
|
+
def test_against_R(self, case):
|
3202
|
+
# Test against p.adjust, e.g.
|
3203
|
+
# p = c(0.22155325, 0.00114003,..., 0.0364813 , 0.25464082)
|
3204
|
+
# p.adjust(p, "BY")
|
3205
|
+
ref, method = case
|
3206
|
+
rng = np.random.default_rng(6134137338861652935)
|
3207
|
+
ps = stats.loguniform.rvs(1e-3, 0.5, size=10, random_state=rng)
|
3208
|
+
ps[3] = ps[7] # force a tie
|
3209
|
+
res = stats.false_discovery_control(ps, method=method)
|
3210
|
+
assert_allclose(res, ref, atol=1e-6)
|
3211
|
+
|
3212
|
+
def test_axis_None(self):
|
3213
|
+
rng = np.random.default_rng(6134137338861652935)
|
3214
|
+
ps = stats.loguniform.rvs(1e-3, 0.5, size=(3, 4, 5), random_state=rng)
|
3215
|
+
res = stats.false_discovery_control(ps, axis=None)
|
3216
|
+
ref = stats.false_discovery_control(ps.ravel())
|
3217
|
+
assert_equal(res, ref)
|
3218
|
+
|
3219
|
+
@pytest.mark.parametrize("axis", [0, 1, -1])
|
3220
|
+
def test_axis(self, axis):
|
3221
|
+
rng = np.random.default_rng(6134137338861652935)
|
3222
|
+
ps = stats.loguniform.rvs(1e-3, 0.5, size=(3, 4, 5), random_state=rng)
|
3223
|
+
res = stats.false_discovery_control(ps, axis=axis)
|
3224
|
+
ref = np.apply_along_axis(stats.false_discovery_control, axis, ps)
|
3225
|
+
assert_equal(res, ref)
|
3226
|
+
|
3227
|
+
def test_edge_cases(self):
|
3228
|
+
assert_array_equal(stats.false_discovery_control([0.25]), [0.25])
|
3229
|
+
assert_array_equal(stats.false_discovery_control(0.25), 0.25)
|
3230
|
+
assert_array_equal(stats.false_discovery_control([]), [])
|
3231
|
+
|
3232
|
+
|
3233
|
+
class TestCommonAxis:
|
3234
|
+
# More thorough testing of `axis` in `test_axis_nan_policy`,
|
3235
|
+
# but those tests aren't run with array API yet. This class
|
3236
|
+
# is in `test_morestats` instead of `test_axis_nan_policy`
|
3237
|
+
# because there is no reason to run `test_axis_nan_policy`
|
3238
|
+
# with the array API CI job right now.
|
3239
|
+
|
3240
|
+
@pytest.mark.parametrize('case', [(stats.sem, {}),
|
3241
|
+
(stats.kstat, {'n': 4}),
|
3242
|
+
(stats.kstat, {'n': 2}),
|
3243
|
+
(stats.variation, {})])
|
3244
|
+
def test_axis(self, case, xp):
|
3245
|
+
fun, kwargs = case
|
3246
|
+
rng = np.random.default_rng(24598245982345)
|
3247
|
+
x = xp.asarray(rng.random((6, 7)))
|
3248
|
+
|
3249
|
+
res = fun(x, **kwargs, axis=0)
|
3250
|
+
ref = xp.stack([fun(x[:, i], **kwargs) for i in range(x.shape[1])])
|
3251
|
+
xp_assert_close(res, ref)
|
3252
|
+
|
3253
|
+
res = fun(x, **kwargs, axis=1)
|
3254
|
+
ref = xp.stack([fun(x[i, :], **kwargs) for i in range(x.shape[0])])
|
3255
|
+
xp_assert_close(res, ref)
|
3256
|
+
|
3257
|
+
res = fun(x, **kwargs, axis=None)
|
3258
|
+
ref = fun(xp.reshape(x, (-1,)), **kwargs)
|
3259
|
+
xp_assert_close(res, ref)
|