scipy 1.16.2__cp312-cp312-win_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scipy/__config__.py +161 -0
- scipy/__init__.py +150 -0
- scipy/_cyutility.cp312-win_arm64.lib +0 -0
- scipy/_cyutility.cp312-win_arm64.pyd +0 -0
- scipy/_distributor_init.py +18 -0
- scipy/_lib/__init__.py +14 -0
- scipy/_lib/_array_api.py +931 -0
- scipy/_lib/_array_api_compat_vendor.py +9 -0
- scipy/_lib/_array_api_no_0d.py +103 -0
- scipy/_lib/_bunch.py +229 -0
- scipy/_lib/_ccallback.py +251 -0
- scipy/_lib/_ccallback_c.cp312-win_arm64.lib +0 -0
- scipy/_lib/_ccallback_c.cp312-win_arm64.pyd +0 -0
- scipy/_lib/_disjoint_set.py +254 -0
- scipy/_lib/_docscrape.py +761 -0
- scipy/_lib/_elementwise_iterative_method.py +346 -0
- scipy/_lib/_fpumode.cp312-win_arm64.lib +0 -0
- scipy/_lib/_fpumode.cp312-win_arm64.pyd +0 -0
- scipy/_lib/_gcutils.py +105 -0
- scipy/_lib/_pep440.py +487 -0
- scipy/_lib/_sparse.py +41 -0
- scipy/_lib/_test_ccallback.cp312-win_arm64.lib +0 -0
- scipy/_lib/_test_ccallback.cp312-win_arm64.pyd +0 -0
- scipy/_lib/_test_deprecation_call.cp312-win_arm64.lib +0 -0
- scipy/_lib/_test_deprecation_call.cp312-win_arm64.pyd +0 -0
- scipy/_lib/_test_deprecation_def.cp312-win_arm64.lib +0 -0
- scipy/_lib/_test_deprecation_def.cp312-win_arm64.pyd +0 -0
- scipy/_lib/_testutils.py +373 -0
- scipy/_lib/_threadsafety.py +58 -0
- scipy/_lib/_tmpdirs.py +86 -0
- scipy/_lib/_uarray/LICENSE +29 -0
- scipy/_lib/_uarray/__init__.py +116 -0
- scipy/_lib/_uarray/_backend.py +707 -0
- scipy/_lib/_uarray/_uarray.cp312-win_arm64.lib +0 -0
- scipy/_lib/_uarray/_uarray.cp312-win_arm64.pyd +0 -0
- scipy/_lib/_util.py +1283 -0
- scipy/_lib/array_api_compat/__init__.py +22 -0
- scipy/_lib/array_api_compat/_internal.py +59 -0
- scipy/_lib/array_api_compat/common/__init__.py +1 -0
- scipy/_lib/array_api_compat/common/_aliases.py +727 -0
- scipy/_lib/array_api_compat/common/_fft.py +213 -0
- scipy/_lib/array_api_compat/common/_helpers.py +1058 -0
- scipy/_lib/array_api_compat/common/_linalg.py +232 -0
- scipy/_lib/array_api_compat/common/_typing.py +192 -0
- scipy/_lib/array_api_compat/cupy/__init__.py +13 -0
- scipy/_lib/array_api_compat/cupy/_aliases.py +156 -0
- scipy/_lib/array_api_compat/cupy/_info.py +336 -0
- scipy/_lib/array_api_compat/cupy/_typing.py +31 -0
- scipy/_lib/array_api_compat/cupy/fft.py +36 -0
- scipy/_lib/array_api_compat/cupy/linalg.py +49 -0
- scipy/_lib/array_api_compat/dask/__init__.py +0 -0
- scipy/_lib/array_api_compat/dask/array/__init__.py +12 -0
- scipy/_lib/array_api_compat/dask/array/_aliases.py +376 -0
- scipy/_lib/array_api_compat/dask/array/_info.py +416 -0
- scipy/_lib/array_api_compat/dask/array/fft.py +21 -0
- scipy/_lib/array_api_compat/dask/array/linalg.py +72 -0
- scipy/_lib/array_api_compat/numpy/__init__.py +28 -0
- scipy/_lib/array_api_compat/numpy/_aliases.py +190 -0
- scipy/_lib/array_api_compat/numpy/_info.py +366 -0
- scipy/_lib/array_api_compat/numpy/_typing.py +30 -0
- scipy/_lib/array_api_compat/numpy/fft.py +35 -0
- scipy/_lib/array_api_compat/numpy/linalg.py +143 -0
- scipy/_lib/array_api_compat/torch/__init__.py +22 -0
- scipy/_lib/array_api_compat/torch/_aliases.py +855 -0
- scipy/_lib/array_api_compat/torch/_info.py +369 -0
- scipy/_lib/array_api_compat/torch/_typing.py +3 -0
- scipy/_lib/array_api_compat/torch/fft.py +85 -0
- scipy/_lib/array_api_compat/torch/linalg.py +121 -0
- scipy/_lib/array_api_extra/__init__.py +38 -0
- scipy/_lib/array_api_extra/_delegation.py +171 -0
- scipy/_lib/array_api_extra/_lib/__init__.py +1 -0
- scipy/_lib/array_api_extra/_lib/_at.py +463 -0
- scipy/_lib/array_api_extra/_lib/_backends.py +46 -0
- scipy/_lib/array_api_extra/_lib/_funcs.py +937 -0
- scipy/_lib/array_api_extra/_lib/_lazy.py +357 -0
- scipy/_lib/array_api_extra/_lib/_testing.py +278 -0
- scipy/_lib/array_api_extra/_lib/_utils/__init__.py +1 -0
- scipy/_lib/array_api_extra/_lib/_utils/_compat.py +74 -0
- scipy/_lib/array_api_extra/_lib/_utils/_compat.pyi +45 -0
- scipy/_lib/array_api_extra/_lib/_utils/_helpers.py +559 -0
- scipy/_lib/array_api_extra/_lib/_utils/_typing.py +10 -0
- scipy/_lib/array_api_extra/_lib/_utils/_typing.pyi +105 -0
- scipy/_lib/array_api_extra/testing.py +359 -0
- scipy/_lib/cobyqa/__init__.py +20 -0
- scipy/_lib/cobyqa/framework.py +1240 -0
- scipy/_lib/cobyqa/main.py +1506 -0
- scipy/_lib/cobyqa/models.py +1529 -0
- scipy/_lib/cobyqa/problem.py +1296 -0
- scipy/_lib/cobyqa/settings.py +132 -0
- scipy/_lib/cobyqa/subsolvers/__init__.py +14 -0
- scipy/_lib/cobyqa/subsolvers/geometry.py +387 -0
- scipy/_lib/cobyqa/subsolvers/optim.py +1203 -0
- scipy/_lib/cobyqa/utils/__init__.py +18 -0
- scipy/_lib/cobyqa/utils/exceptions.py +22 -0
- scipy/_lib/cobyqa/utils/math.py +77 -0
- scipy/_lib/cobyqa/utils/versions.py +67 -0
- scipy/_lib/decorator.py +399 -0
- scipy/_lib/deprecation.py +274 -0
- scipy/_lib/doccer.py +366 -0
- scipy/_lib/messagestream.cp312-win_arm64.lib +0 -0
- scipy/_lib/messagestream.cp312-win_arm64.pyd +0 -0
- scipy/_lib/pyprima/__init__.py +212 -0
- scipy/_lib/pyprima/cobyla/__init__.py +0 -0
- scipy/_lib/pyprima/cobyla/cobyla.py +559 -0
- scipy/_lib/pyprima/cobyla/cobylb.py +714 -0
- scipy/_lib/pyprima/cobyla/geometry.py +226 -0
- scipy/_lib/pyprima/cobyla/initialize.py +215 -0
- scipy/_lib/pyprima/cobyla/trustregion.py +492 -0
- scipy/_lib/pyprima/cobyla/update.py +289 -0
- scipy/_lib/pyprima/common/__init__.py +0 -0
- scipy/_lib/pyprima/common/_bounds.py +34 -0
- scipy/_lib/pyprima/common/_linear_constraints.py +46 -0
- scipy/_lib/pyprima/common/_nonlinear_constraints.py +54 -0
- scipy/_lib/pyprima/common/_project.py +173 -0
- scipy/_lib/pyprima/common/checkbreak.py +93 -0
- scipy/_lib/pyprima/common/consts.py +47 -0
- scipy/_lib/pyprima/common/evaluate.py +99 -0
- scipy/_lib/pyprima/common/history.py +38 -0
- scipy/_lib/pyprima/common/infos.py +30 -0
- scipy/_lib/pyprima/common/linalg.py +435 -0
- scipy/_lib/pyprima/common/message.py +290 -0
- scipy/_lib/pyprima/common/powalg.py +131 -0
- scipy/_lib/pyprima/common/preproc.py +277 -0
- scipy/_lib/pyprima/common/present.py +5 -0
- scipy/_lib/pyprima/common/ratio.py +54 -0
- scipy/_lib/pyprima/common/redrho.py +47 -0
- scipy/_lib/pyprima/common/selectx.py +296 -0
- scipy/_lib/tests/__init__.py +0 -0
- scipy/_lib/tests/test__gcutils.py +110 -0
- scipy/_lib/tests/test__pep440.py +67 -0
- scipy/_lib/tests/test__testutils.py +32 -0
- scipy/_lib/tests/test__threadsafety.py +51 -0
- scipy/_lib/tests/test__util.py +641 -0
- scipy/_lib/tests/test_array_api.py +322 -0
- scipy/_lib/tests/test_bunch.py +169 -0
- scipy/_lib/tests/test_ccallback.py +196 -0
- scipy/_lib/tests/test_config.py +45 -0
- scipy/_lib/tests/test_deprecation.py +10 -0
- scipy/_lib/tests/test_doccer.py +143 -0
- scipy/_lib/tests/test_import_cycles.py +18 -0
- scipy/_lib/tests/test_public_api.py +482 -0
- scipy/_lib/tests/test_scipy_version.py +28 -0
- scipy/_lib/tests/test_tmpdirs.py +48 -0
- scipy/_lib/tests/test_warnings.py +137 -0
- scipy/_lib/uarray.py +31 -0
- scipy/cluster/__init__.py +31 -0
- scipy/cluster/_hierarchy.cp312-win_arm64.lib +0 -0
- scipy/cluster/_hierarchy.cp312-win_arm64.pyd +0 -0
- scipy/cluster/_optimal_leaf_ordering.cp312-win_arm64.lib +0 -0
- scipy/cluster/_optimal_leaf_ordering.cp312-win_arm64.pyd +0 -0
- scipy/cluster/_vq.cp312-win_arm64.lib +0 -0
- scipy/cluster/_vq.cp312-win_arm64.pyd +0 -0
- scipy/cluster/hierarchy.py +4348 -0
- scipy/cluster/tests/__init__.py +0 -0
- scipy/cluster/tests/hierarchy_test_data.py +145 -0
- scipy/cluster/tests/test_disjoint_set.py +202 -0
- scipy/cluster/tests/test_hierarchy.py +1238 -0
- scipy/cluster/tests/test_vq.py +434 -0
- scipy/cluster/vq.py +832 -0
- scipy/conftest.py +683 -0
- scipy/constants/__init__.py +358 -0
- scipy/constants/_codata.py +2266 -0
- scipy/constants/_constants.py +369 -0
- scipy/constants/codata.py +21 -0
- scipy/constants/constants.py +53 -0
- scipy/constants/tests/__init__.py +0 -0
- scipy/constants/tests/test_codata.py +78 -0
- scipy/constants/tests/test_constants.py +83 -0
- scipy/datasets/__init__.py +90 -0
- scipy/datasets/_download_all.py +71 -0
- scipy/datasets/_fetchers.py +225 -0
- scipy/datasets/_registry.py +26 -0
- scipy/datasets/_utils.py +81 -0
- scipy/datasets/tests/__init__.py +0 -0
- scipy/datasets/tests/test_data.py +128 -0
- scipy/differentiate/__init__.py +27 -0
- scipy/differentiate/_differentiate.py +1129 -0
- scipy/differentiate/tests/__init__.py +0 -0
- scipy/differentiate/tests/test_differentiate.py +694 -0
- scipy/fft/__init__.py +114 -0
- scipy/fft/_backend.py +196 -0
- scipy/fft/_basic.py +1650 -0
- scipy/fft/_basic_backend.py +197 -0
- scipy/fft/_debug_backends.py +22 -0
- scipy/fft/_fftlog.py +223 -0
- scipy/fft/_fftlog_backend.py +200 -0
- scipy/fft/_helper.py +348 -0
- scipy/fft/_pocketfft/LICENSE.md +25 -0
- scipy/fft/_pocketfft/__init__.py +9 -0
- scipy/fft/_pocketfft/basic.py +251 -0
- scipy/fft/_pocketfft/helper.py +249 -0
- scipy/fft/_pocketfft/pypocketfft.cp312-win_arm64.lib +0 -0
- scipy/fft/_pocketfft/pypocketfft.cp312-win_arm64.pyd +0 -0
- scipy/fft/_pocketfft/realtransforms.py +109 -0
- scipy/fft/_pocketfft/tests/__init__.py +0 -0
- scipy/fft/_pocketfft/tests/test_basic.py +1011 -0
- scipy/fft/_pocketfft/tests/test_real_transforms.py +505 -0
- scipy/fft/_realtransforms.py +706 -0
- scipy/fft/_realtransforms_backend.py +63 -0
- scipy/fft/tests/__init__.py +0 -0
- scipy/fft/tests/mock_backend.py +96 -0
- scipy/fft/tests/test_backend.py +98 -0
- scipy/fft/tests/test_basic.py +504 -0
- scipy/fft/tests/test_fftlog.py +215 -0
- scipy/fft/tests/test_helper.py +558 -0
- scipy/fft/tests/test_multithreading.py +84 -0
- scipy/fft/tests/test_real_transforms.py +247 -0
- scipy/fftpack/__init__.py +103 -0
- scipy/fftpack/_basic.py +428 -0
- scipy/fftpack/_helper.py +115 -0
- scipy/fftpack/_pseudo_diffs.py +554 -0
- scipy/fftpack/_realtransforms.py +598 -0
- scipy/fftpack/basic.py +20 -0
- scipy/fftpack/convolve.cp312-win_arm64.lib +0 -0
- scipy/fftpack/convolve.cp312-win_arm64.pyd +0 -0
- scipy/fftpack/helper.py +19 -0
- scipy/fftpack/pseudo_diffs.py +22 -0
- scipy/fftpack/realtransforms.py +19 -0
- scipy/fftpack/tests/__init__.py +0 -0
- scipy/fftpack/tests/fftw_double_ref.npz +0 -0
- scipy/fftpack/tests/fftw_longdouble_ref.npz +0 -0
- scipy/fftpack/tests/fftw_single_ref.npz +0 -0
- scipy/fftpack/tests/test.npz +0 -0
- scipy/fftpack/tests/test_basic.py +877 -0
- scipy/fftpack/tests/test_helper.py +54 -0
- scipy/fftpack/tests/test_import.py +33 -0
- scipy/fftpack/tests/test_pseudo_diffs.py +388 -0
- scipy/fftpack/tests/test_real_transforms.py +836 -0
- scipy/integrate/__init__.py +122 -0
- scipy/integrate/_bvp.py +1160 -0
- scipy/integrate/_cubature.py +729 -0
- scipy/integrate/_dop.cp312-win_arm64.lib +0 -0
- scipy/integrate/_dop.cp312-win_arm64.pyd +0 -0
- scipy/integrate/_ivp/__init__.py +8 -0
- scipy/integrate/_ivp/base.py +290 -0
- scipy/integrate/_ivp/bdf.py +478 -0
- scipy/integrate/_ivp/common.py +451 -0
- scipy/integrate/_ivp/dop853_coefficients.py +193 -0
- scipy/integrate/_ivp/ivp.py +755 -0
- scipy/integrate/_ivp/lsoda.py +224 -0
- scipy/integrate/_ivp/radau.py +572 -0
- scipy/integrate/_ivp/rk.py +601 -0
- scipy/integrate/_ivp/tests/__init__.py +0 -0
- scipy/integrate/_ivp/tests/test_ivp.py +1287 -0
- scipy/integrate/_ivp/tests/test_rk.py +37 -0
- scipy/integrate/_lebedev.py +5450 -0
- scipy/integrate/_lsoda.cp312-win_arm64.lib +0 -0
- scipy/integrate/_lsoda.cp312-win_arm64.pyd +0 -0
- scipy/integrate/_ode.py +1395 -0
- scipy/integrate/_odepack.cp312-win_arm64.lib +0 -0
- scipy/integrate/_odepack.cp312-win_arm64.pyd +0 -0
- scipy/integrate/_odepack_py.py +273 -0
- scipy/integrate/_quad_vec.py +674 -0
- scipy/integrate/_quadpack.cp312-win_arm64.lib +0 -0
- scipy/integrate/_quadpack.cp312-win_arm64.pyd +0 -0
- scipy/integrate/_quadpack_py.py +1283 -0
- scipy/integrate/_quadrature.py +1336 -0
- scipy/integrate/_rules/__init__.py +12 -0
- scipy/integrate/_rules/_base.py +518 -0
- scipy/integrate/_rules/_gauss_kronrod.py +202 -0
- scipy/integrate/_rules/_gauss_legendre.py +62 -0
- scipy/integrate/_rules/_genz_malik.py +210 -0
- scipy/integrate/_tanhsinh.py +1385 -0
- scipy/integrate/_test_multivariate.cp312-win_arm64.lib +0 -0
- scipy/integrate/_test_multivariate.cp312-win_arm64.pyd +0 -0
- scipy/integrate/_test_odeint_banded.cp312-win_arm64.lib +0 -0
- scipy/integrate/_test_odeint_banded.cp312-win_arm64.pyd +0 -0
- scipy/integrate/_vode.cp312-win_arm64.lib +0 -0
- scipy/integrate/_vode.cp312-win_arm64.pyd +0 -0
- scipy/integrate/dop.py +15 -0
- scipy/integrate/lsoda.py +15 -0
- scipy/integrate/odepack.py +17 -0
- scipy/integrate/quadpack.py +23 -0
- scipy/integrate/tests/__init__.py +0 -0
- scipy/integrate/tests/test__quad_vec.py +211 -0
- scipy/integrate/tests/test_banded_ode_solvers.py +305 -0
- scipy/integrate/tests/test_bvp.py +714 -0
- scipy/integrate/tests/test_cubature.py +1375 -0
- scipy/integrate/tests/test_integrate.py +840 -0
- scipy/integrate/tests/test_odeint_jac.py +74 -0
- scipy/integrate/tests/test_quadpack.py +680 -0
- scipy/integrate/tests/test_quadrature.py +730 -0
- scipy/integrate/tests/test_tanhsinh.py +1171 -0
- scipy/integrate/vode.py +15 -0
- scipy/interpolate/__init__.py +228 -0
- scipy/interpolate/_bary_rational.py +715 -0
- scipy/interpolate/_bsplines.py +2469 -0
- scipy/interpolate/_cubic.py +973 -0
- scipy/interpolate/_dfitpack.cp312-win_arm64.lib +0 -0
- scipy/interpolate/_dfitpack.cp312-win_arm64.pyd +0 -0
- scipy/interpolate/_dierckx.cp312-win_arm64.lib +0 -0
- scipy/interpolate/_dierckx.cp312-win_arm64.pyd +0 -0
- scipy/interpolate/_fitpack.cp312-win_arm64.lib +0 -0
- scipy/interpolate/_fitpack.cp312-win_arm64.pyd +0 -0
- scipy/interpolate/_fitpack2.py +2397 -0
- scipy/interpolate/_fitpack_impl.py +811 -0
- scipy/interpolate/_fitpack_py.py +898 -0
- scipy/interpolate/_fitpack_repro.py +996 -0
- scipy/interpolate/_interpnd.cp312-win_arm64.lib +0 -0
- scipy/interpolate/_interpnd.cp312-win_arm64.pyd +0 -0
- scipy/interpolate/_interpolate.py +2266 -0
- scipy/interpolate/_ndbspline.py +415 -0
- scipy/interpolate/_ndgriddata.py +329 -0
- scipy/interpolate/_pade.py +67 -0
- scipy/interpolate/_polyint.py +1025 -0
- scipy/interpolate/_ppoly.cp312-win_arm64.lib +0 -0
- scipy/interpolate/_ppoly.cp312-win_arm64.pyd +0 -0
- scipy/interpolate/_rbf.py +290 -0
- scipy/interpolate/_rbfinterp.py +550 -0
- scipy/interpolate/_rbfinterp_pythran.cp312-win_arm64.lib +0 -0
- scipy/interpolate/_rbfinterp_pythran.cp312-win_arm64.pyd +0 -0
- scipy/interpolate/_rgi.py +764 -0
- scipy/interpolate/_rgi_cython.cp312-win_arm64.lib +0 -0
- scipy/interpolate/_rgi_cython.cp312-win_arm64.pyd +0 -0
- scipy/interpolate/dfitpack.py +24 -0
- scipy/interpolate/fitpack.py +31 -0
- scipy/interpolate/fitpack2.py +29 -0
- scipy/interpolate/interpnd.py +24 -0
- scipy/interpolate/interpolate.py +30 -0
- scipy/interpolate/ndgriddata.py +23 -0
- scipy/interpolate/polyint.py +24 -0
- scipy/interpolate/rbf.py +18 -0
- scipy/interpolate/tests/__init__.py +0 -0
- scipy/interpolate/tests/data/bug-1310.npz +0 -0
- scipy/interpolate/tests/data/estimate_gradients_hang.npy +0 -0
- scipy/interpolate/tests/data/gcvspl.npz +0 -0
- scipy/interpolate/tests/test_bary_rational.py +368 -0
- scipy/interpolate/tests/test_bsplines.py +3754 -0
- scipy/interpolate/tests/test_fitpack.py +519 -0
- scipy/interpolate/tests/test_fitpack2.py +1431 -0
- scipy/interpolate/tests/test_gil.py +64 -0
- scipy/interpolate/tests/test_interpnd.py +452 -0
- scipy/interpolate/tests/test_interpolate.py +2630 -0
- scipy/interpolate/tests/test_ndgriddata.py +308 -0
- scipy/interpolate/tests/test_pade.py +107 -0
- scipy/interpolate/tests/test_polyint.py +972 -0
- scipy/interpolate/tests/test_rbf.py +246 -0
- scipy/interpolate/tests/test_rbfinterp.py +534 -0
- scipy/interpolate/tests/test_rgi.py +1151 -0
- scipy/io/__init__.py +116 -0
- scipy/io/_fast_matrix_market/__init__.py +600 -0
- scipy/io/_fast_matrix_market/_fmm_core.cp312-win_arm64.lib +0 -0
- scipy/io/_fast_matrix_market/_fmm_core.cp312-win_arm64.pyd +0 -0
- scipy/io/_fortran.py +354 -0
- scipy/io/_harwell_boeing/__init__.py +7 -0
- scipy/io/_harwell_boeing/_fortran_format_parser.py +316 -0
- scipy/io/_harwell_boeing/hb.py +571 -0
- scipy/io/_harwell_boeing/tests/__init__.py +0 -0
- scipy/io/_harwell_boeing/tests/test_fortran_format.py +74 -0
- scipy/io/_harwell_boeing/tests/test_hb.py +70 -0
- scipy/io/_idl.py +917 -0
- scipy/io/_mmio.py +968 -0
- scipy/io/_netcdf.py +1104 -0
- scipy/io/_test_fortran.cp312-win_arm64.lib +0 -0
- scipy/io/_test_fortran.cp312-win_arm64.pyd +0 -0
- scipy/io/arff/__init__.py +28 -0
- scipy/io/arff/_arffread.py +873 -0
- scipy/io/arff/arffread.py +19 -0
- scipy/io/arff/tests/__init__.py +0 -0
- scipy/io/arff/tests/data/iris.arff +225 -0
- scipy/io/arff/tests/data/missing.arff +8 -0
- scipy/io/arff/tests/data/nodata.arff +11 -0
- scipy/io/arff/tests/data/quoted_nominal.arff +13 -0
- scipy/io/arff/tests/data/quoted_nominal_spaces.arff +13 -0
- scipy/io/arff/tests/data/test1.arff +10 -0
- scipy/io/arff/tests/data/test10.arff +8 -0
- scipy/io/arff/tests/data/test11.arff +11 -0
- scipy/io/arff/tests/data/test2.arff +15 -0
- scipy/io/arff/tests/data/test3.arff +6 -0
- scipy/io/arff/tests/data/test4.arff +11 -0
- scipy/io/arff/tests/data/test5.arff +26 -0
- scipy/io/arff/tests/data/test6.arff +12 -0
- scipy/io/arff/tests/data/test7.arff +15 -0
- scipy/io/arff/tests/data/test8.arff +12 -0
- scipy/io/arff/tests/data/test9.arff +14 -0
- scipy/io/arff/tests/test_arffread.py +421 -0
- scipy/io/harwell_boeing.py +17 -0
- scipy/io/idl.py +17 -0
- scipy/io/matlab/__init__.py +66 -0
- scipy/io/matlab/_byteordercodes.py +75 -0
- scipy/io/matlab/_mio.py +375 -0
- scipy/io/matlab/_mio4.py +632 -0
- scipy/io/matlab/_mio5.py +901 -0
- scipy/io/matlab/_mio5_params.py +281 -0
- scipy/io/matlab/_mio5_utils.cp312-win_arm64.lib +0 -0
- scipy/io/matlab/_mio5_utils.cp312-win_arm64.pyd +0 -0
- scipy/io/matlab/_mio_utils.cp312-win_arm64.lib +0 -0
- scipy/io/matlab/_mio_utils.cp312-win_arm64.pyd +0 -0
- scipy/io/matlab/_miobase.py +435 -0
- scipy/io/matlab/_streams.cp312-win_arm64.lib +0 -0
- scipy/io/matlab/_streams.cp312-win_arm64.pyd +0 -0
- scipy/io/matlab/byteordercodes.py +17 -0
- scipy/io/matlab/mio.py +16 -0
- scipy/io/matlab/mio4.py +17 -0
- scipy/io/matlab/mio5.py +19 -0
- scipy/io/matlab/mio5_params.py +18 -0
- scipy/io/matlab/mio5_utils.py +17 -0
- scipy/io/matlab/mio_utils.py +17 -0
- scipy/io/matlab/miobase.py +16 -0
- scipy/io/matlab/streams.py +16 -0
- scipy/io/matlab/tests/__init__.py +0 -0
- scipy/io/matlab/tests/data/bad_miuint32.mat +0 -0
- scipy/io/matlab/tests/data/bad_miutf8_array_name.mat +0 -0
- scipy/io/matlab/tests/data/big_endian.mat +0 -0
- scipy/io/matlab/tests/data/broken_utf8.mat +0 -0
- scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat +0 -0
- scipy/io/matlab/tests/data/corrupted_zlib_data.mat +0 -0
- scipy/io/matlab/tests/data/debigged_m4.mat +0 -0
- scipy/io/matlab/tests/data/japanese_utf8.txt +5 -0
- scipy/io/matlab/tests/data/little_endian.mat +0 -0
- scipy/io/matlab/tests/data/logical_sparse.mat +0 -0
- scipy/io/matlab/tests/data/malformed1.mat +0 -0
- scipy/io/matlab/tests/data/miuint32_for_miint32.mat +0 -0
- scipy/io/matlab/tests/data/miutf8_array_name.mat +0 -0
- scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat +0 -0
- scipy/io/matlab/tests/data/one_by_zero_char.mat +0 -0
- scipy/io/matlab/tests/data/parabola.mat +0 -0
- scipy/io/matlab/tests/data/single_empty_string.mat +0 -0
- scipy/io/matlab/tests/data/some_functions.mat +0 -0
- scipy/io/matlab/tests/data/sqr.mat +0 -0
- scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/test_empty_struct.mat +0 -0
- scipy/io/matlab/tests/data/test_mat4_le_floats.mat +0 -0
- scipy/io/matlab/tests/data/test_skip_variable.mat +0 -0
- scipy/io/matlab/tests/data/testbool_8_WIN64.mat +0 -0
- scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsimplecell.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testvec_4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/test_byteordercodes.py +29 -0
- scipy/io/matlab/tests/test_mio.py +1399 -0
- scipy/io/matlab/tests/test_mio5_utils.py +179 -0
- scipy/io/matlab/tests/test_mio_funcs.py +51 -0
- scipy/io/matlab/tests/test_mio_utils.py +45 -0
- scipy/io/matlab/tests/test_miobase.py +32 -0
- scipy/io/matlab/tests/test_pathological.py +33 -0
- scipy/io/matlab/tests/test_streams.py +241 -0
- scipy/io/mmio.py +17 -0
- scipy/io/netcdf.py +17 -0
- scipy/io/tests/__init__.py +0 -0
- scipy/io/tests/data/Transparent Busy.ani +0 -0
- scipy/io/tests/data/array_float32_1d.sav +0 -0
- scipy/io/tests/data/array_float32_2d.sav +0 -0
- scipy/io/tests/data/array_float32_3d.sav +0 -0
- scipy/io/tests/data/array_float32_4d.sav +0 -0
- scipy/io/tests/data/array_float32_5d.sav +0 -0
- scipy/io/tests/data/array_float32_6d.sav +0 -0
- scipy/io/tests/data/array_float32_7d.sav +0 -0
- scipy/io/tests/data/array_float32_8d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_1d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_2d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_3d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_4d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_5d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_6d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_7d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_8d.sav +0 -0
- scipy/io/tests/data/example_1.nc +0 -0
- scipy/io/tests/data/example_2.nc +0 -0
- scipy/io/tests/data/example_3_maskedvals.nc +0 -0
- scipy/io/tests/data/fortran-3x3d-2i.dat +0 -0
- scipy/io/tests/data/fortran-mixed.dat +0 -0
- scipy/io/tests/data/fortran-sf8-11x1x10.dat +0 -0
- scipy/io/tests/data/fortran-sf8-15x10x22.dat +0 -0
- scipy/io/tests/data/fortran-sf8-1x1x1.dat +0 -0
- scipy/io/tests/data/fortran-sf8-1x1x5.dat +0 -0
- scipy/io/tests/data/fortran-sf8-1x1x7.dat +0 -0
- scipy/io/tests/data/fortran-sf8-1x3x5.dat +0 -0
- scipy/io/tests/data/fortran-si4-11x1x10.dat +0 -0
- scipy/io/tests/data/fortran-si4-15x10x22.dat +0 -0
- scipy/io/tests/data/fortran-si4-1x1x1.dat +0 -0
- scipy/io/tests/data/fortran-si4-1x1x5.dat +0 -0
- scipy/io/tests/data/fortran-si4-1x1x7.dat +0 -0
- scipy/io/tests/data/fortran-si4-1x3x5.dat +0 -0
- scipy/io/tests/data/invalid_pointer.sav +0 -0
- scipy/io/tests/data/null_pointer.sav +0 -0
- scipy/io/tests/data/scalar_byte.sav +0 -0
- scipy/io/tests/data/scalar_byte_descr.sav +0 -0
- scipy/io/tests/data/scalar_complex32.sav +0 -0
- scipy/io/tests/data/scalar_complex64.sav +0 -0
- scipy/io/tests/data/scalar_float32.sav +0 -0
- scipy/io/tests/data/scalar_float64.sav +0 -0
- scipy/io/tests/data/scalar_heap_pointer.sav +0 -0
- scipy/io/tests/data/scalar_int16.sav +0 -0
- scipy/io/tests/data/scalar_int32.sav +0 -0
- scipy/io/tests/data/scalar_int64.sav +0 -0
- scipy/io/tests/data/scalar_string.sav +0 -0
- scipy/io/tests/data/scalar_uint16.sav +0 -0
- scipy/io/tests/data/scalar_uint32.sav +0 -0
- scipy/io/tests/data/scalar_uint64.sav +0 -0
- scipy/io/tests/data/struct_arrays.sav +0 -0
- scipy/io/tests/data/struct_arrays_byte_idl80.sav +0 -0
- scipy/io/tests/data/struct_arrays_replicated.sav +0 -0
- scipy/io/tests/data/struct_arrays_replicated_3d.sav +0 -0
- scipy/io/tests/data/struct_inherit.sav +0 -0
- scipy/io/tests/data/struct_pointer_arrays.sav +0 -0
- scipy/io/tests/data/struct_pointer_arrays_replicated.sav +0 -0
- scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav +0 -0
- scipy/io/tests/data/struct_pointers.sav +0 -0
- scipy/io/tests/data/struct_pointers_replicated.sav +0 -0
- scipy/io/tests/data/struct_pointers_replicated_3d.sav +0 -0
- scipy/io/tests/data/struct_scalars.sav +0 -0
- scipy/io/tests/data/struct_scalars_replicated.sav +0 -0
- scipy/io/tests/data/struct_scalars_replicated_3d.sav +0 -0
- scipy/io/tests/data/test-1234Hz-le-1ch-10S-20bit-extra.wav +0 -0
- scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav +0 -0
- scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav +0 -0
- scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-rf64.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav +0 -0
- scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav +0 -0
- scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-rf64.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav +0 -0
- scipy/io/tests/data/various_compressed.sav +0 -0
- scipy/io/tests/test_fortran.py +264 -0
- scipy/io/tests/test_idl.py +483 -0
- scipy/io/tests/test_mmio.py +831 -0
- scipy/io/tests/test_netcdf.py +550 -0
- scipy/io/tests/test_paths.py +93 -0
- scipy/io/tests/test_wavfile.py +501 -0
- scipy/io/wavfile.py +938 -0
- scipy/linalg/__init__.pxd +1 -0
- scipy/linalg/__init__.py +236 -0
- scipy/linalg/_basic.py +2146 -0
- scipy/linalg/_blas_subroutines.h +164 -0
- scipy/linalg/_cythonized_array_utils.cp312-win_arm64.lib +0 -0
- scipy/linalg/_cythonized_array_utils.cp312-win_arm64.pyd +0 -0
- scipy/linalg/_cythonized_array_utils.pxd +40 -0
- scipy/linalg/_cythonized_array_utils.pyi +16 -0
- scipy/linalg/_decomp.py +1645 -0
- scipy/linalg/_decomp_cholesky.py +413 -0
- scipy/linalg/_decomp_cossin.py +236 -0
- scipy/linalg/_decomp_interpolative.cp312-win_arm64.lib +0 -0
- scipy/linalg/_decomp_interpolative.cp312-win_arm64.pyd +0 -0
- scipy/linalg/_decomp_ldl.py +356 -0
- scipy/linalg/_decomp_lu.py +401 -0
- scipy/linalg/_decomp_lu_cython.cp312-win_arm64.lib +0 -0
- scipy/linalg/_decomp_lu_cython.cp312-win_arm64.pyd +0 -0
- scipy/linalg/_decomp_lu_cython.pyi +6 -0
- scipy/linalg/_decomp_polar.py +113 -0
- scipy/linalg/_decomp_qr.py +494 -0
- scipy/linalg/_decomp_qz.py +452 -0
- scipy/linalg/_decomp_schur.py +336 -0
- scipy/linalg/_decomp_svd.py +545 -0
- scipy/linalg/_decomp_update.cp312-win_arm64.lib +0 -0
- scipy/linalg/_decomp_update.cp312-win_arm64.pyd +0 -0
- scipy/linalg/_expm_frechet.py +417 -0
- scipy/linalg/_fblas.cp312-win_arm64.lib +0 -0
- scipy/linalg/_fblas.cp312-win_arm64.pyd +0 -0
- scipy/linalg/_flapack.cp312-win_arm64.lib +0 -0
- scipy/linalg/_flapack.cp312-win_arm64.pyd +0 -0
- scipy/linalg/_lapack_subroutines.h +1521 -0
- scipy/linalg/_linalg_pythran.cp312-win_arm64.lib +0 -0
- scipy/linalg/_linalg_pythran.cp312-win_arm64.pyd +0 -0
- scipy/linalg/_matfuncs.py +1050 -0
- scipy/linalg/_matfuncs_expm.cp312-win_arm64.lib +0 -0
- scipy/linalg/_matfuncs_expm.cp312-win_arm64.pyd +0 -0
- scipy/linalg/_matfuncs_expm.pyi +6 -0
- scipy/linalg/_matfuncs_inv_ssq.py +886 -0
- scipy/linalg/_matfuncs_schur_sqrtm.cp312-win_arm64.lib +0 -0
- scipy/linalg/_matfuncs_schur_sqrtm.cp312-win_arm64.pyd +0 -0
- scipy/linalg/_matfuncs_sqrtm.py +107 -0
- scipy/linalg/_matfuncs_sqrtm_triu.cp312-win_arm64.lib +0 -0
- scipy/linalg/_matfuncs_sqrtm_triu.cp312-win_arm64.pyd +0 -0
- scipy/linalg/_misc.py +191 -0
- scipy/linalg/_procrustes.py +113 -0
- scipy/linalg/_sketches.py +189 -0
- scipy/linalg/_solve_toeplitz.cp312-win_arm64.lib +0 -0
- scipy/linalg/_solve_toeplitz.cp312-win_arm64.pyd +0 -0
- scipy/linalg/_solvers.py +862 -0
- scipy/linalg/_special_matrices.py +1322 -0
- scipy/linalg/_testutils.py +65 -0
- scipy/linalg/basic.py +23 -0
- scipy/linalg/blas.py +495 -0
- scipy/linalg/cython_blas.cp312-win_arm64.lib +0 -0
- scipy/linalg/cython_blas.cp312-win_arm64.pyd +0 -0
- scipy/linalg/cython_blas.pxd +169 -0
- scipy/linalg/cython_blas.pyx +1432 -0
- scipy/linalg/cython_lapack.cp312-win_arm64.lib +0 -0
- scipy/linalg/cython_lapack.cp312-win_arm64.pyd +0 -0
- scipy/linalg/cython_lapack.pxd +1528 -0
- scipy/linalg/cython_lapack.pyx +12045 -0
- scipy/linalg/decomp.py +23 -0
- scipy/linalg/decomp_cholesky.py +21 -0
- scipy/linalg/decomp_lu.py +21 -0
- scipy/linalg/decomp_qr.py +20 -0
- scipy/linalg/decomp_schur.py +21 -0
- scipy/linalg/decomp_svd.py +21 -0
- scipy/linalg/interpolative.py +989 -0
- scipy/linalg/lapack.py +1081 -0
- scipy/linalg/matfuncs.py +23 -0
- scipy/linalg/misc.py +21 -0
- scipy/linalg/special_matrices.py +22 -0
- scipy/linalg/tests/__init__.py +0 -0
- scipy/linalg/tests/_cython_examples/extending.pyx +23 -0
- scipy/linalg/tests/_cython_examples/meson.build +34 -0
- scipy/linalg/tests/data/carex_15_data.npz +0 -0
- scipy/linalg/tests/data/carex_18_data.npz +0 -0
- scipy/linalg/tests/data/carex_19_data.npz +0 -0
- scipy/linalg/tests/data/carex_20_data.npz +0 -0
- scipy/linalg/tests/data/carex_6_data.npz +0 -0
- scipy/linalg/tests/data/gendare_20170120_data.npz +0 -0
- scipy/linalg/tests/test_basic.py +2074 -0
- scipy/linalg/tests/test_batch.py +588 -0
- scipy/linalg/tests/test_blas.py +1127 -0
- scipy/linalg/tests/test_cython_blas.py +118 -0
- scipy/linalg/tests/test_cython_lapack.py +22 -0
- scipy/linalg/tests/test_cythonized_array_utils.py +130 -0
- scipy/linalg/tests/test_decomp.py +3189 -0
- scipy/linalg/tests/test_decomp_cholesky.py +268 -0
- scipy/linalg/tests/test_decomp_cossin.py +314 -0
- scipy/linalg/tests/test_decomp_ldl.py +137 -0
- scipy/linalg/tests/test_decomp_lu.py +308 -0
- scipy/linalg/tests/test_decomp_polar.py +110 -0
- scipy/linalg/tests/test_decomp_update.py +1701 -0
- scipy/linalg/tests/test_extending.py +46 -0
- scipy/linalg/tests/test_fblas.py +607 -0
- scipy/linalg/tests/test_interpolative.py +232 -0
- scipy/linalg/tests/test_lapack.py +3620 -0
- scipy/linalg/tests/test_matfuncs.py +1125 -0
- scipy/linalg/tests/test_matmul_toeplitz.py +136 -0
- scipy/linalg/tests/test_procrustes.py +214 -0
- scipy/linalg/tests/test_sketches.py +118 -0
- scipy/linalg/tests/test_solve_toeplitz.py +150 -0
- scipy/linalg/tests/test_solvers.py +844 -0
- scipy/linalg/tests/test_special_matrices.py +636 -0
- scipy/misc/__init__.py +6 -0
- scipy/misc/common.py +6 -0
- scipy/misc/doccer.py +6 -0
- scipy/ndimage/__init__.py +174 -0
- scipy/ndimage/_ctest.cp312-win_arm64.lib +0 -0
- scipy/ndimage/_ctest.cp312-win_arm64.pyd +0 -0
- scipy/ndimage/_cytest.cp312-win_arm64.lib +0 -0
- scipy/ndimage/_cytest.cp312-win_arm64.pyd +0 -0
- scipy/ndimage/_delegators.py +303 -0
- scipy/ndimage/_filters.py +2422 -0
- scipy/ndimage/_fourier.py +306 -0
- scipy/ndimage/_interpolation.py +1033 -0
- scipy/ndimage/_measurements.py +1689 -0
- scipy/ndimage/_morphology.py +2634 -0
- scipy/ndimage/_nd_image.cp312-win_arm64.lib +0 -0
- scipy/ndimage/_nd_image.cp312-win_arm64.pyd +0 -0
- scipy/ndimage/_ndimage_api.py +16 -0
- scipy/ndimage/_ni_docstrings.py +214 -0
- scipy/ndimage/_ni_label.cp312-win_arm64.lib +0 -0
- scipy/ndimage/_ni_label.cp312-win_arm64.pyd +0 -0
- scipy/ndimage/_ni_support.py +139 -0
- scipy/ndimage/_rank_filter_1d.cp312-win_arm64.lib +0 -0
- scipy/ndimage/_rank_filter_1d.cp312-win_arm64.pyd +0 -0
- scipy/ndimage/_support_alternative_backends.py +84 -0
- scipy/ndimage/filters.py +27 -0
- scipy/ndimage/fourier.py +21 -0
- scipy/ndimage/interpolation.py +22 -0
- scipy/ndimage/measurements.py +24 -0
- scipy/ndimage/morphology.py +27 -0
- scipy/ndimage/tests/__init__.py +12 -0
- scipy/ndimage/tests/data/label_inputs.txt +21 -0
- scipy/ndimage/tests/data/label_results.txt +294 -0
- scipy/ndimage/tests/data/label_strels.txt +42 -0
- scipy/ndimage/tests/dots.png +0 -0
- scipy/ndimage/tests/test_c_api.py +102 -0
- scipy/ndimage/tests/test_datatypes.py +67 -0
- scipy/ndimage/tests/test_filters.py +3083 -0
- scipy/ndimage/tests/test_fourier.py +187 -0
- scipy/ndimage/tests/test_interpolation.py +1491 -0
- scipy/ndimage/tests/test_measurements.py +1592 -0
- scipy/ndimage/tests/test_morphology.py +2950 -0
- scipy/ndimage/tests/test_ni_support.py +78 -0
- scipy/ndimage/tests/test_splines.py +70 -0
- scipy/odr/__init__.py +131 -0
- scipy/odr/__odrpack.cp312-win_arm64.lib +0 -0
- scipy/odr/__odrpack.cp312-win_arm64.pyd +0 -0
- scipy/odr/_add_newdocs.py +34 -0
- scipy/odr/_models.py +315 -0
- scipy/odr/_odrpack.py +1154 -0
- scipy/odr/models.py +20 -0
- scipy/odr/odrpack.py +21 -0
- scipy/odr/tests/__init__.py +0 -0
- scipy/odr/tests/test_odr.py +607 -0
- scipy/optimize/__init__.pxd +1 -0
- scipy/optimize/__init__.py +460 -0
- scipy/optimize/_basinhopping.py +741 -0
- scipy/optimize/_bglu_dense.cp312-win_arm64.lib +0 -0
- scipy/optimize/_bglu_dense.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_bracket.py +706 -0
- scipy/optimize/_chandrupatla.py +551 -0
- scipy/optimize/_cobyla_py.py +297 -0
- scipy/optimize/_cobyqa_py.py +72 -0
- scipy/optimize/_constraints.py +598 -0
- scipy/optimize/_dcsrch.py +728 -0
- scipy/optimize/_differentiable_functions.py +835 -0
- scipy/optimize/_differentialevolution.py +1970 -0
- scipy/optimize/_direct.cp312-win_arm64.lib +0 -0
- scipy/optimize/_direct.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_direct_py.py +280 -0
- scipy/optimize/_dual_annealing.py +732 -0
- scipy/optimize/_elementwise.py +798 -0
- scipy/optimize/_group_columns.cp312-win_arm64.lib +0 -0
- scipy/optimize/_group_columns.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_hessian_update_strategy.py +479 -0
- scipy/optimize/_highspy/__init__.py +0 -0
- scipy/optimize/_highspy/_core.cp312-win_arm64.lib +0 -0
- scipy/optimize/_highspy/_core.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_highspy/_highs_options.cp312-win_arm64.lib +0 -0
- scipy/optimize/_highspy/_highs_options.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_highspy/_highs_wrapper.py +338 -0
- scipy/optimize/_isotonic.py +157 -0
- scipy/optimize/_lbfgsb.cp312-win_arm64.lib +0 -0
- scipy/optimize/_lbfgsb.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_lbfgsb_py.py +634 -0
- scipy/optimize/_linesearch.py +896 -0
- scipy/optimize/_linprog.py +733 -0
- scipy/optimize/_linprog_doc.py +1434 -0
- scipy/optimize/_linprog_highs.py +422 -0
- scipy/optimize/_linprog_ip.py +1141 -0
- scipy/optimize/_linprog_rs.py +572 -0
- scipy/optimize/_linprog_simplex.py +663 -0
- scipy/optimize/_linprog_util.py +1521 -0
- scipy/optimize/_lsap.cp312-win_arm64.lib +0 -0
- scipy/optimize/_lsap.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_lsq/__init__.py +5 -0
- scipy/optimize/_lsq/bvls.py +183 -0
- scipy/optimize/_lsq/common.py +731 -0
- scipy/optimize/_lsq/dogbox.py +345 -0
- scipy/optimize/_lsq/givens_elimination.cp312-win_arm64.lib +0 -0
- scipy/optimize/_lsq/givens_elimination.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_lsq/least_squares.py +1044 -0
- scipy/optimize/_lsq/lsq_linear.py +361 -0
- scipy/optimize/_lsq/trf.py +587 -0
- scipy/optimize/_lsq/trf_linear.py +249 -0
- scipy/optimize/_milp.py +394 -0
- scipy/optimize/_minimize.py +1199 -0
- scipy/optimize/_minpack.cp312-win_arm64.lib +0 -0
- scipy/optimize/_minpack.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_minpack_py.py +1178 -0
- scipy/optimize/_moduleTNC.cp312-win_arm64.lib +0 -0
- scipy/optimize/_moduleTNC.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_nnls.py +96 -0
- scipy/optimize/_nonlin.py +1634 -0
- scipy/optimize/_numdiff.py +963 -0
- scipy/optimize/_optimize.py +4169 -0
- scipy/optimize/_pava_pybind.cp312-win_arm64.lib +0 -0
- scipy/optimize/_pava_pybind.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_qap.py +760 -0
- scipy/optimize/_remove_redundancy.py +522 -0
- scipy/optimize/_root.py +732 -0
- scipy/optimize/_root_scalar.py +538 -0
- scipy/optimize/_shgo.py +1606 -0
- scipy/optimize/_shgo_lib/__init__.py +0 -0
- scipy/optimize/_shgo_lib/_complex.py +1225 -0
- scipy/optimize/_shgo_lib/_vertex.py +460 -0
- scipy/optimize/_slsqp_py.py +603 -0
- scipy/optimize/_slsqplib.cp312-win_arm64.lib +0 -0
- scipy/optimize/_slsqplib.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_spectral.py +260 -0
- scipy/optimize/_tnc.py +438 -0
- scipy/optimize/_trlib/__init__.py +12 -0
- scipy/optimize/_trlib/_trlib.cp312-win_arm64.lib +0 -0
- scipy/optimize/_trlib/_trlib.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_trustregion.py +318 -0
- scipy/optimize/_trustregion_constr/__init__.py +6 -0
- scipy/optimize/_trustregion_constr/canonical_constraint.py +390 -0
- scipy/optimize/_trustregion_constr/equality_constrained_sqp.py +231 -0
- scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py +584 -0
- scipy/optimize/_trustregion_constr/projections.py +411 -0
- scipy/optimize/_trustregion_constr/qp_subproblem.py +637 -0
- scipy/optimize/_trustregion_constr/report.py +49 -0
- scipy/optimize/_trustregion_constr/tests/__init__.py +0 -0
- scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py +296 -0
- scipy/optimize/_trustregion_constr/tests/test_nested_minimize.py +39 -0
- scipy/optimize/_trustregion_constr/tests/test_projections.py +214 -0
- scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py +645 -0
- scipy/optimize/_trustregion_constr/tests/test_report.py +34 -0
- scipy/optimize/_trustregion_constr/tr_interior_point.py +361 -0
- scipy/optimize/_trustregion_dogleg.py +122 -0
- scipy/optimize/_trustregion_exact.py +437 -0
- scipy/optimize/_trustregion_krylov.py +65 -0
- scipy/optimize/_trustregion_ncg.py +126 -0
- scipy/optimize/_tstutils.py +972 -0
- scipy/optimize/_zeros.cp312-win_arm64.lib +0 -0
- scipy/optimize/_zeros.cp312-win_arm64.pyd +0 -0
- scipy/optimize/_zeros_py.py +1475 -0
- scipy/optimize/cobyla.py +19 -0
- scipy/optimize/cython_optimize/__init__.py +133 -0
- scipy/optimize/cython_optimize/_zeros.cp312-win_arm64.lib +0 -0
- scipy/optimize/cython_optimize/_zeros.cp312-win_arm64.pyd +0 -0
- scipy/optimize/cython_optimize/_zeros.pxd +33 -0
- scipy/optimize/cython_optimize/c_zeros.pxd +26 -0
- scipy/optimize/cython_optimize.pxd +11 -0
- scipy/optimize/elementwise.py +38 -0
- scipy/optimize/lbfgsb.py +23 -0
- scipy/optimize/linesearch.py +18 -0
- scipy/optimize/minpack.py +27 -0
- scipy/optimize/minpack2.py +17 -0
- scipy/optimize/moduleTNC.py +19 -0
- scipy/optimize/nonlin.py +29 -0
- scipy/optimize/optimize.py +40 -0
- scipy/optimize/slsqp.py +22 -0
- scipy/optimize/tests/__init__.py +0 -0
- scipy/optimize/tests/_cython_examples/extending.pyx +43 -0
- scipy/optimize/tests/_cython_examples/meson.build +32 -0
- scipy/optimize/tests/test__basinhopping.py +535 -0
- scipy/optimize/tests/test__differential_evolution.py +1703 -0
- scipy/optimize/tests/test__dual_annealing.py +416 -0
- scipy/optimize/tests/test__linprog_clean_inputs.py +312 -0
- scipy/optimize/tests/test__numdiff.py +885 -0
- scipy/optimize/tests/test__remove_redundancy.py +228 -0
- scipy/optimize/tests/test__root.py +124 -0
- scipy/optimize/tests/test__shgo.py +1164 -0
- scipy/optimize/tests/test__spectral.py +226 -0
- scipy/optimize/tests/test_bracket.py +896 -0
- scipy/optimize/tests/test_chandrupatla.py +982 -0
- scipy/optimize/tests/test_cobyla.py +195 -0
- scipy/optimize/tests/test_cobyqa.py +252 -0
- scipy/optimize/tests/test_constraint_conversion.py +286 -0
- scipy/optimize/tests/test_constraints.py +255 -0
- scipy/optimize/tests/test_cython_optimize.py +92 -0
- scipy/optimize/tests/test_differentiable_functions.py +1025 -0
- scipy/optimize/tests/test_direct.py +321 -0
- scipy/optimize/tests/test_extending.py +28 -0
- scipy/optimize/tests/test_hessian_update_strategy.py +300 -0
- scipy/optimize/tests/test_isotonic_regression.py +167 -0
- scipy/optimize/tests/test_lbfgsb_hessinv.py +65 -0
- scipy/optimize/tests/test_lbfgsb_setulb.py +122 -0
- scipy/optimize/tests/test_least_squares.py +986 -0
- scipy/optimize/tests/test_linear_assignment.py +116 -0
- scipy/optimize/tests/test_linesearch.py +328 -0
- scipy/optimize/tests/test_linprog.py +2577 -0
- scipy/optimize/tests/test_lsq_common.py +297 -0
- scipy/optimize/tests/test_lsq_linear.py +287 -0
- scipy/optimize/tests/test_milp.py +459 -0
- scipy/optimize/tests/test_minimize_constrained.py +845 -0
- scipy/optimize/tests/test_minpack.py +1194 -0
- scipy/optimize/tests/test_nnls.py +469 -0
- scipy/optimize/tests/test_nonlin.py +572 -0
- scipy/optimize/tests/test_optimize.py +3344 -0
- scipy/optimize/tests/test_quadratic_assignment.py +455 -0
- scipy/optimize/tests/test_regression.py +40 -0
- scipy/optimize/tests/test_slsqp.py +645 -0
- scipy/optimize/tests/test_tnc.py +345 -0
- scipy/optimize/tests/test_trustregion.py +110 -0
- scipy/optimize/tests/test_trustregion_exact.py +351 -0
- scipy/optimize/tests/test_trustregion_krylov.py +170 -0
- scipy/optimize/tests/test_zeros.py +998 -0
- scipy/optimize/tnc.py +22 -0
- scipy/optimize/zeros.py +26 -0
- scipy/signal/__init__.py +316 -0
- scipy/signal/_arraytools.py +264 -0
- scipy/signal/_czt.py +575 -0
- scipy/signal/_delegators.py +568 -0
- scipy/signal/_filter_design.py +5893 -0
- scipy/signal/_fir_filter_design.py +1458 -0
- scipy/signal/_lti_conversion.py +534 -0
- scipy/signal/_ltisys.py +3546 -0
- scipy/signal/_max_len_seq.py +139 -0
- scipy/signal/_max_len_seq_inner.cp312-win_arm64.lib +0 -0
- scipy/signal/_max_len_seq_inner.cp312-win_arm64.pyd +0 -0
- scipy/signal/_peak_finding.py +1310 -0
- scipy/signal/_peak_finding_utils.cp312-win_arm64.lib +0 -0
- scipy/signal/_peak_finding_utils.cp312-win_arm64.pyd +0 -0
- scipy/signal/_polyutils.py +172 -0
- scipy/signal/_savitzky_golay.py +357 -0
- scipy/signal/_short_time_fft.py +2228 -0
- scipy/signal/_signal_api.py +30 -0
- scipy/signal/_signaltools.py +5309 -0
- scipy/signal/_sigtools.cp312-win_arm64.lib +0 -0
- scipy/signal/_sigtools.cp312-win_arm64.pyd +0 -0
- scipy/signal/_sosfilt.cp312-win_arm64.lib +0 -0
- scipy/signal/_sosfilt.cp312-win_arm64.pyd +0 -0
- scipy/signal/_spectral_py.py +2471 -0
- scipy/signal/_spline.cp312-win_arm64.lib +0 -0
- scipy/signal/_spline.cp312-win_arm64.pyd +0 -0
- scipy/signal/_spline.pyi +34 -0
- scipy/signal/_spline_filters.py +848 -0
- scipy/signal/_support_alternative_backends.py +73 -0
- scipy/signal/_upfirdn.py +219 -0
- scipy/signal/_upfirdn_apply.cp312-win_arm64.lib +0 -0
- scipy/signal/_upfirdn_apply.cp312-win_arm64.pyd +0 -0
- scipy/signal/_waveforms.py +687 -0
- scipy/signal/_wavelets.py +29 -0
- scipy/signal/bsplines.py +21 -0
- scipy/signal/filter_design.py +28 -0
- scipy/signal/fir_filter_design.py +21 -0
- scipy/signal/lti_conversion.py +20 -0
- scipy/signal/ltisys.py +25 -0
- scipy/signal/signaltools.py +27 -0
- scipy/signal/spectral.py +21 -0
- scipy/signal/spline.py +18 -0
- scipy/signal/tests/__init__.py +0 -0
- scipy/signal/tests/_scipy_spectral_test_shim.py +311 -0
- scipy/signal/tests/mpsig.py +122 -0
- scipy/signal/tests/test_array_tools.py +111 -0
- scipy/signal/tests/test_bsplines.py +365 -0
- scipy/signal/tests/test_cont2discrete.py +424 -0
- scipy/signal/tests/test_czt.py +221 -0
- scipy/signal/tests/test_dltisys.py +599 -0
- scipy/signal/tests/test_filter_design.py +4744 -0
- scipy/signal/tests/test_fir_filter_design.py +851 -0
- scipy/signal/tests/test_ltisys.py +1225 -0
- scipy/signal/tests/test_max_len_seq.py +71 -0
- scipy/signal/tests/test_peak_finding.py +915 -0
- scipy/signal/tests/test_result_type.py +51 -0
- scipy/signal/tests/test_savitzky_golay.py +363 -0
- scipy/signal/tests/test_short_time_fft.py +1107 -0
- scipy/signal/tests/test_signaltools.py +4735 -0
- scipy/signal/tests/test_spectral.py +2141 -0
- scipy/signal/tests/test_splines.py +427 -0
- scipy/signal/tests/test_upfirdn.py +322 -0
- scipy/signal/tests/test_waveforms.py +400 -0
- scipy/signal/tests/test_wavelets.py +59 -0
- scipy/signal/tests/test_windows.py +987 -0
- scipy/signal/waveforms.py +20 -0
- scipy/signal/wavelets.py +17 -0
- scipy/signal/windows/__init__.py +52 -0
- scipy/signal/windows/_windows.py +2513 -0
- scipy/signal/windows/windows.py +23 -0
- scipy/sparse/__init__.py +350 -0
- scipy/sparse/_base.py +1613 -0
- scipy/sparse/_bsr.py +880 -0
- scipy/sparse/_compressed.py +1328 -0
- scipy/sparse/_construct.py +1454 -0
- scipy/sparse/_coo.py +1581 -0
- scipy/sparse/_csc.py +367 -0
- scipy/sparse/_csparsetools.cp312-win_arm64.lib +0 -0
- scipy/sparse/_csparsetools.cp312-win_arm64.pyd +0 -0
- scipy/sparse/_csr.py +558 -0
- scipy/sparse/_data.py +569 -0
- scipy/sparse/_dia.py +677 -0
- scipy/sparse/_dok.py +669 -0
- scipy/sparse/_extract.py +178 -0
- scipy/sparse/_index.py +444 -0
- scipy/sparse/_lil.py +632 -0
- scipy/sparse/_matrix.py +169 -0
- scipy/sparse/_matrix_io.py +167 -0
- scipy/sparse/_sparsetools.cp312-win_arm64.lib +0 -0
- scipy/sparse/_sparsetools.cp312-win_arm64.pyd +0 -0
- scipy/sparse/_spfuncs.py +76 -0
- scipy/sparse/_sputils.py +632 -0
- scipy/sparse/base.py +24 -0
- scipy/sparse/bsr.py +22 -0
- scipy/sparse/compressed.py +20 -0
- scipy/sparse/construct.py +38 -0
- scipy/sparse/coo.py +23 -0
- scipy/sparse/csc.py +22 -0
- scipy/sparse/csgraph/__init__.py +210 -0
- scipy/sparse/csgraph/_flow.cp312-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_flow.cp312-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_laplacian.py +563 -0
- scipy/sparse/csgraph/_matching.cp312-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_matching.cp312-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_min_spanning_tree.cp312-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_min_spanning_tree.cp312-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_reordering.cp312-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_reordering.cp312-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_shortest_path.cp312-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_shortest_path.cp312-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_tools.cp312-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_tools.cp312-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_traversal.cp312-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_traversal.cp312-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_validation.py +66 -0
- scipy/sparse/csgraph/tests/__init__.py +0 -0
- scipy/sparse/csgraph/tests/test_connected_components.py +119 -0
- scipy/sparse/csgraph/tests/test_conversions.py +61 -0
- scipy/sparse/csgraph/tests/test_flow.py +209 -0
- scipy/sparse/csgraph/tests/test_graph_laplacian.py +368 -0
- scipy/sparse/csgraph/tests/test_matching.py +307 -0
- scipy/sparse/csgraph/tests/test_pydata_sparse.py +197 -0
- scipy/sparse/csgraph/tests/test_reordering.py +70 -0
- scipy/sparse/csgraph/tests/test_shortest_path.py +540 -0
- scipy/sparse/csgraph/tests/test_spanning_tree.py +66 -0
- scipy/sparse/csgraph/tests/test_traversal.py +148 -0
- scipy/sparse/csr.py +22 -0
- scipy/sparse/data.py +18 -0
- scipy/sparse/dia.py +22 -0
- scipy/sparse/dok.py +22 -0
- scipy/sparse/extract.py +23 -0
- scipy/sparse/lil.py +22 -0
- scipy/sparse/linalg/__init__.py +148 -0
- scipy/sparse/linalg/_dsolve/__init__.py +71 -0
- scipy/sparse/linalg/_dsolve/_add_newdocs.py +147 -0
- scipy/sparse/linalg/_dsolve/_superlu.cp312-win_arm64.lib +0 -0
- scipy/sparse/linalg/_dsolve/_superlu.cp312-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_dsolve/linsolve.py +882 -0
- scipy/sparse/linalg/_dsolve/tests/__init__.py +0 -0
- scipy/sparse/linalg/_dsolve/tests/test_linsolve.py +928 -0
- scipy/sparse/linalg/_eigen/__init__.py +22 -0
- scipy/sparse/linalg/_eigen/_svds.py +540 -0
- scipy/sparse/linalg/_eigen/_svds_doc.py +382 -0
- scipy/sparse/linalg/_eigen/arpack/COPYING +45 -0
- scipy/sparse/linalg/_eigen/arpack/__init__.py +20 -0
- scipy/sparse/linalg/_eigen/arpack/_arpack.cp312-win_arm64.lib +0 -0
- scipy/sparse/linalg/_eigen/arpack/_arpack.cp312-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_eigen/arpack/arpack.py +1706 -0
- scipy/sparse/linalg/_eigen/arpack/tests/__init__.py +0 -0
- scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py +717 -0
- scipy/sparse/linalg/_eigen/lobpcg/__init__.py +16 -0
- scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py +1110 -0
- scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py +0 -0
- scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py +725 -0
- scipy/sparse/linalg/_eigen/tests/__init__.py +0 -0
- scipy/sparse/linalg/_eigen/tests/test_svds.py +886 -0
- scipy/sparse/linalg/_expm_multiply.py +816 -0
- scipy/sparse/linalg/_interface.py +920 -0
- scipy/sparse/linalg/_isolve/__init__.py +20 -0
- scipy/sparse/linalg/_isolve/_gcrotmk.py +503 -0
- scipy/sparse/linalg/_isolve/iterative.py +1051 -0
- scipy/sparse/linalg/_isolve/lgmres.py +230 -0
- scipy/sparse/linalg/_isolve/lsmr.py +486 -0
- scipy/sparse/linalg/_isolve/lsqr.py +589 -0
- scipy/sparse/linalg/_isolve/minres.py +372 -0
- scipy/sparse/linalg/_isolve/tests/__init__.py +0 -0
- scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py +183 -0
- scipy/sparse/linalg/_isolve/tests/test_iterative.py +809 -0
- scipy/sparse/linalg/_isolve/tests/test_lgmres.py +225 -0
- scipy/sparse/linalg/_isolve/tests/test_lsmr.py +185 -0
- scipy/sparse/linalg/_isolve/tests/test_lsqr.py +120 -0
- scipy/sparse/linalg/_isolve/tests/test_minres.py +97 -0
- scipy/sparse/linalg/_isolve/tests/test_utils.py +9 -0
- scipy/sparse/linalg/_isolve/tfqmr.py +179 -0
- scipy/sparse/linalg/_isolve/utils.py +121 -0
- scipy/sparse/linalg/_matfuncs.py +940 -0
- scipy/sparse/linalg/_norm.py +195 -0
- scipy/sparse/linalg/_onenormest.py +467 -0
- scipy/sparse/linalg/_propack/_cpropack.cp312-win_arm64.lib +0 -0
- scipy/sparse/linalg/_propack/_cpropack.cp312-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_propack/_dpropack.cp312-win_arm64.lib +0 -0
- scipy/sparse/linalg/_propack/_dpropack.cp312-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_propack/_spropack.cp312-win_arm64.lib +0 -0
- scipy/sparse/linalg/_propack/_spropack.cp312-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_propack/_zpropack.cp312-win_arm64.lib +0 -0
- scipy/sparse/linalg/_propack/_zpropack.cp312-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_special_sparse_arrays.py +949 -0
- scipy/sparse/linalg/_svdp.py +309 -0
- scipy/sparse/linalg/dsolve.py +22 -0
- scipy/sparse/linalg/eigen.py +21 -0
- scipy/sparse/linalg/interface.py +20 -0
- scipy/sparse/linalg/isolve.py +22 -0
- scipy/sparse/linalg/matfuncs.py +18 -0
- scipy/sparse/linalg/tests/__init__.py +0 -0
- scipy/sparse/linalg/tests/propack_test_data.npz +0 -0
- scipy/sparse/linalg/tests/test_expm_multiply.py +367 -0
- scipy/sparse/linalg/tests/test_interface.py +561 -0
- scipy/sparse/linalg/tests/test_matfuncs.py +592 -0
- scipy/sparse/linalg/tests/test_norm.py +154 -0
- scipy/sparse/linalg/tests/test_onenormest.py +252 -0
- scipy/sparse/linalg/tests/test_propack.py +165 -0
- scipy/sparse/linalg/tests/test_pydata_sparse.py +272 -0
- scipy/sparse/linalg/tests/test_special_sparse_arrays.py +337 -0
- scipy/sparse/sparsetools.py +17 -0
- scipy/sparse/spfuncs.py +17 -0
- scipy/sparse/sputils.py +17 -0
- scipy/sparse/tests/__init__.py +0 -0
- scipy/sparse/tests/data/csc_py2.npz +0 -0
- scipy/sparse/tests/data/csc_py3.npz +0 -0
- scipy/sparse/tests/test_arithmetic1d.py +341 -0
- scipy/sparse/tests/test_array_api.py +561 -0
- scipy/sparse/tests/test_base.py +5870 -0
- scipy/sparse/tests/test_common1d.py +447 -0
- scipy/sparse/tests/test_construct.py +872 -0
- scipy/sparse/tests/test_coo.py +1119 -0
- scipy/sparse/tests/test_csc.py +98 -0
- scipy/sparse/tests/test_csr.py +214 -0
- scipy/sparse/tests/test_dok.py +209 -0
- scipy/sparse/tests/test_extract.py +51 -0
- scipy/sparse/tests/test_indexing1d.py +603 -0
- scipy/sparse/tests/test_matrix_io.py +109 -0
- scipy/sparse/tests/test_minmax1d.py +128 -0
- scipy/sparse/tests/test_sparsetools.py +344 -0
- scipy/sparse/tests/test_spfuncs.py +97 -0
- scipy/sparse/tests/test_sputils.py +424 -0
- scipy/spatial/__init__.py +129 -0
- scipy/spatial/_ckdtree.cp312-win_arm64.lib +0 -0
- scipy/spatial/_ckdtree.cp312-win_arm64.pyd +0 -0
- scipy/spatial/_distance_pybind.cp312-win_arm64.lib +0 -0
- scipy/spatial/_distance_pybind.cp312-win_arm64.pyd +0 -0
- scipy/spatial/_distance_wrap.cp312-win_arm64.lib +0 -0
- scipy/spatial/_distance_wrap.cp312-win_arm64.pyd +0 -0
- scipy/spatial/_geometric_slerp.py +238 -0
- scipy/spatial/_hausdorff.cp312-win_arm64.lib +0 -0
- scipy/spatial/_hausdorff.cp312-win_arm64.pyd +0 -0
- scipy/spatial/_kdtree.py +920 -0
- scipy/spatial/_plotutils.py +274 -0
- scipy/spatial/_procrustes.py +132 -0
- scipy/spatial/_qhull.cp312-win_arm64.lib +0 -0
- scipy/spatial/_qhull.cp312-win_arm64.pyd +0 -0
- scipy/spatial/_qhull.pyi +213 -0
- scipy/spatial/_spherical_voronoi.py +341 -0
- scipy/spatial/_voronoi.cp312-win_arm64.lib +0 -0
- scipy/spatial/_voronoi.cp312-win_arm64.pyd +0 -0
- scipy/spatial/_voronoi.pyi +4 -0
- scipy/spatial/ckdtree.py +18 -0
- scipy/spatial/distance.py +3147 -0
- scipy/spatial/distance.pyi +210 -0
- scipy/spatial/kdtree.py +25 -0
- scipy/spatial/qhull.py +25 -0
- scipy/spatial/qhull_src/COPYING_QHULL.txt +39 -0
- scipy/spatial/tests/__init__.py +0 -0
- scipy/spatial/tests/data/cdist-X1.txt +10 -0
- scipy/spatial/tests/data/cdist-X2.txt +20 -0
- scipy/spatial/tests/data/degenerate_pointset.npz +0 -0
- scipy/spatial/tests/data/iris.txt +150 -0
- scipy/spatial/tests/data/pdist-boolean-inp.txt +20 -0
- scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-chebyshev-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-cityblock-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-correlation-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-correlation-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-cosine-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-cosine-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-double-inp.txt +20 -0
- scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-euclidean-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-hamming-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-jaccard-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-jensenshannon-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-seuclidean-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-spearman-ml.txt +1 -0
- scipy/spatial/tests/data/random-bool-data.txt +100 -0
- scipy/spatial/tests/data/random-double-data.txt +100 -0
- scipy/spatial/tests/data/random-int-data.txt +100 -0
- scipy/spatial/tests/data/random-uint-data.txt +100 -0
- scipy/spatial/tests/data/selfdual-4d-polytope.txt +27 -0
- scipy/spatial/tests/test__plotutils.py +91 -0
- scipy/spatial/tests/test__procrustes.py +116 -0
- scipy/spatial/tests/test_distance.py +2389 -0
- scipy/spatial/tests/test_hausdorff.py +199 -0
- scipy/spatial/tests/test_kdtree.py +1536 -0
- scipy/spatial/tests/test_qhull.py +1313 -0
- scipy/spatial/tests/test_slerp.py +417 -0
- scipy/spatial/tests/test_spherical_voronoi.py +358 -0
- scipy/spatial/transform/__init__.py +31 -0
- scipy/spatial/transform/_rigid_transform.cp312-win_arm64.lib +0 -0
- scipy/spatial/transform/_rigid_transform.cp312-win_arm64.pyd +0 -0
- scipy/spatial/transform/_rotation.cp312-win_arm64.lib +0 -0
- scipy/spatial/transform/_rotation.cp312-win_arm64.pyd +0 -0
- scipy/spatial/transform/_rotation_groups.py +140 -0
- scipy/spatial/transform/_rotation_spline.py +460 -0
- scipy/spatial/transform/rotation.py +21 -0
- scipy/spatial/transform/tests/__init__.py +0 -0
- scipy/spatial/transform/tests/test_rigid_transform.py +1221 -0
- scipy/spatial/transform/tests/test_rotation.py +2569 -0
- scipy/spatial/transform/tests/test_rotation_groups.py +169 -0
- scipy/spatial/transform/tests/test_rotation_spline.py +183 -0
- scipy/special/__init__.pxd +1 -0
- scipy/special/__init__.py +841 -0
- scipy/special/_add_newdocs.py +9961 -0
- scipy/special/_basic.py +3576 -0
- scipy/special/_comb.cp312-win_arm64.lib +0 -0
- scipy/special/_comb.cp312-win_arm64.pyd +0 -0
- scipy/special/_ellip_harm.py +214 -0
- scipy/special/_ellip_harm_2.cp312-win_arm64.lib +0 -0
- scipy/special/_ellip_harm_2.cp312-win_arm64.pyd +0 -0
- scipy/special/_gufuncs.cp312-win_arm64.lib +0 -0
- scipy/special/_gufuncs.cp312-win_arm64.pyd +0 -0
- scipy/special/_input_validation.py +17 -0
- scipy/special/_lambertw.py +149 -0
- scipy/special/_logsumexp.py +426 -0
- scipy/special/_mptestutils.py +453 -0
- scipy/special/_multiufuncs.py +610 -0
- scipy/special/_orthogonal.py +2592 -0
- scipy/special/_orthogonal.pyi +330 -0
- scipy/special/_precompute/__init__.py +0 -0
- scipy/special/_precompute/cosine_cdf.py +17 -0
- scipy/special/_precompute/expn_asy.py +54 -0
- scipy/special/_precompute/gammainc_asy.py +116 -0
- scipy/special/_precompute/gammainc_data.py +124 -0
- scipy/special/_precompute/hyp2f1_data.py +484 -0
- scipy/special/_precompute/lambertw.py +68 -0
- scipy/special/_precompute/loggamma.py +43 -0
- scipy/special/_precompute/struve_convergence.py +131 -0
- scipy/special/_precompute/utils.py +38 -0
- scipy/special/_precompute/wright_bessel.py +342 -0
- scipy/special/_precompute/wright_bessel_data.py +152 -0
- scipy/special/_precompute/wrightomega.py +41 -0
- scipy/special/_precompute/zetac.py +27 -0
- scipy/special/_sf_error.py +15 -0
- scipy/special/_specfun.cp312-win_arm64.lib +0 -0
- scipy/special/_specfun.cp312-win_arm64.pyd +0 -0
- scipy/special/_special_ufuncs.cp312-win_arm64.lib +0 -0
- scipy/special/_special_ufuncs.cp312-win_arm64.pyd +0 -0
- scipy/special/_spfun_stats.py +106 -0
- scipy/special/_spherical_bessel.py +397 -0
- scipy/special/_support_alternative_backends.py +295 -0
- scipy/special/_test_internal.cp312-win_arm64.lib +0 -0
- scipy/special/_test_internal.cp312-win_arm64.pyd +0 -0
- scipy/special/_test_internal.pyi +9 -0
- scipy/special/_testutils.py +321 -0
- scipy/special/_ufuncs.cp312-win_arm64.lib +0 -0
- scipy/special/_ufuncs.cp312-win_arm64.pyd +0 -0
- scipy/special/_ufuncs.pyi +522 -0
- scipy/special/_ufuncs.pyx +13173 -0
- scipy/special/_ufuncs_cxx.cp312-win_arm64.lib +0 -0
- scipy/special/_ufuncs_cxx.cp312-win_arm64.pyd +0 -0
- scipy/special/_ufuncs_cxx.pxd +142 -0
- scipy/special/_ufuncs_cxx.pyx +427 -0
- scipy/special/_ufuncs_cxx_defs.h +147 -0
- scipy/special/_ufuncs_defs.h +57 -0
- scipy/special/add_newdocs.py +15 -0
- scipy/special/basic.py +87 -0
- scipy/special/cython_special.cp312-win_arm64.lib +0 -0
- scipy/special/cython_special.cp312-win_arm64.pyd +0 -0
- scipy/special/cython_special.pxd +259 -0
- scipy/special/cython_special.pyi +3 -0
- scipy/special/orthogonal.py +45 -0
- scipy/special/sf_error.py +20 -0
- scipy/special/specfun.py +24 -0
- scipy/special/spfun_stats.py +17 -0
- scipy/special/tests/__init__.py +0 -0
- scipy/special/tests/_cython_examples/extending.pyx +12 -0
- scipy/special/tests/_cython_examples/meson.build +34 -0
- scipy/special/tests/data/__init__.py +0 -0
- scipy/special/tests/data/boost.npz +0 -0
- scipy/special/tests/data/gsl.npz +0 -0
- scipy/special/tests/data/local.npz +0 -0
- scipy/special/tests/test_basic.py +4815 -0
- scipy/special/tests/test_bdtr.py +112 -0
- scipy/special/tests/test_boost_ufuncs.py +64 -0
- scipy/special/tests/test_boxcox.py +125 -0
- scipy/special/tests/test_cdflib.py +712 -0
- scipy/special/tests/test_cdft_asymptotic.py +49 -0
- scipy/special/tests/test_cephes_intp_cast.py +29 -0
- scipy/special/tests/test_cosine_distr.py +83 -0
- scipy/special/tests/test_cython_special.py +363 -0
- scipy/special/tests/test_data.py +719 -0
- scipy/special/tests/test_dd.py +42 -0
- scipy/special/tests/test_digamma.py +45 -0
- scipy/special/tests/test_ellip_harm.py +278 -0
- scipy/special/tests/test_erfinv.py +89 -0
- scipy/special/tests/test_exponential_integrals.py +118 -0
- scipy/special/tests/test_extending.py +28 -0
- scipy/special/tests/test_faddeeva.py +85 -0
- scipy/special/tests/test_gamma.py +12 -0
- scipy/special/tests/test_gammainc.py +152 -0
- scipy/special/tests/test_hyp2f1.py +2566 -0
- scipy/special/tests/test_hypergeometric.py +234 -0
- scipy/special/tests/test_iv_ratio.py +249 -0
- scipy/special/tests/test_kolmogorov.py +491 -0
- scipy/special/tests/test_lambertw.py +109 -0
- scipy/special/tests/test_legendre.py +1518 -0
- scipy/special/tests/test_log1mexp.py +85 -0
- scipy/special/tests/test_loggamma.py +70 -0
- scipy/special/tests/test_logit.py +162 -0
- scipy/special/tests/test_logsumexp.py +469 -0
- scipy/special/tests/test_mpmath.py +2293 -0
- scipy/special/tests/test_nan_inputs.py +65 -0
- scipy/special/tests/test_ndtr.py +77 -0
- scipy/special/tests/test_ndtri_exp.py +94 -0
- scipy/special/tests/test_orthogonal.py +821 -0
- scipy/special/tests/test_orthogonal_eval.py +275 -0
- scipy/special/tests/test_owens_t.py +53 -0
- scipy/special/tests/test_pcf.py +24 -0
- scipy/special/tests/test_pdtr.py +48 -0
- scipy/special/tests/test_powm1.py +65 -0
- scipy/special/tests/test_precompute_expn_asy.py +24 -0
- scipy/special/tests/test_precompute_gammainc.py +108 -0
- scipy/special/tests/test_precompute_utils.py +36 -0
- scipy/special/tests/test_round.py +18 -0
- scipy/special/tests/test_sf_error.py +146 -0
- scipy/special/tests/test_sici.py +36 -0
- scipy/special/tests/test_specfun.py +48 -0
- scipy/special/tests/test_spence.py +32 -0
- scipy/special/tests/test_spfun_stats.py +61 -0
- scipy/special/tests/test_sph_harm.py +85 -0
- scipy/special/tests/test_spherical_bessel.py +400 -0
- scipy/special/tests/test_support_alternative_backends.py +248 -0
- scipy/special/tests/test_trig.py +72 -0
- scipy/special/tests/test_ufunc_signatures.py +46 -0
- scipy/special/tests/test_wright_bessel.py +205 -0
- scipy/special/tests/test_wrightomega.py +117 -0
- scipy/special/tests/test_zeta.py +301 -0
- scipy/stats/__init__.py +670 -0
- scipy/stats/_ansari_swilk_statistics.cp312-win_arm64.lib +0 -0
- scipy/stats/_ansari_swilk_statistics.cp312-win_arm64.pyd +0 -0
- scipy/stats/_axis_nan_policy.py +692 -0
- scipy/stats/_biasedurn.cp312-win_arm64.lib +0 -0
- scipy/stats/_biasedurn.cp312-win_arm64.pyd +0 -0
- scipy/stats/_biasedurn.pxd +27 -0
- scipy/stats/_binned_statistic.py +795 -0
- scipy/stats/_binomtest.py +375 -0
- scipy/stats/_bws_test.py +177 -0
- scipy/stats/_censored_data.py +459 -0
- scipy/stats/_common.py +5 -0
- scipy/stats/_constants.py +42 -0
- scipy/stats/_continued_fraction.py +387 -0
- scipy/stats/_continuous_distns.py +12486 -0
- scipy/stats/_correlation.py +210 -0
- scipy/stats/_covariance.py +636 -0
- scipy/stats/_crosstab.py +204 -0
- scipy/stats/_discrete_distns.py +2098 -0
- scipy/stats/_distn_infrastructure.py +4201 -0
- scipy/stats/_distr_params.py +299 -0
- scipy/stats/_distribution_infrastructure.py +5750 -0
- scipy/stats/_entropy.py +428 -0
- scipy/stats/_finite_differences.py +145 -0
- scipy/stats/_fit.py +1351 -0
- scipy/stats/_hypotests.py +2060 -0
- scipy/stats/_kde.py +732 -0
- scipy/stats/_ksstats.py +600 -0
- scipy/stats/_levy_stable/__init__.py +1231 -0
- scipy/stats/_levy_stable/levyst.cp312-win_arm64.lib +0 -0
- scipy/stats/_levy_stable/levyst.cp312-win_arm64.pyd +0 -0
- scipy/stats/_mannwhitneyu.py +492 -0
- scipy/stats/_mgc.py +550 -0
- scipy/stats/_morestats.py +4626 -0
- scipy/stats/_mstats_basic.py +3658 -0
- scipy/stats/_mstats_extras.py +521 -0
- scipy/stats/_multicomp.py +449 -0
- scipy/stats/_multivariate.py +7281 -0
- scipy/stats/_new_distributions.py +452 -0
- scipy/stats/_odds_ratio.py +466 -0
- scipy/stats/_page_trend_test.py +486 -0
- scipy/stats/_probability_distribution.py +1964 -0
- scipy/stats/_qmc.py +2956 -0
- scipy/stats/_qmc_cy.cp312-win_arm64.lib +0 -0
- scipy/stats/_qmc_cy.cp312-win_arm64.pyd +0 -0
- scipy/stats/_qmc_cy.pyi +54 -0
- scipy/stats/_qmvnt.py +454 -0
- scipy/stats/_qmvnt_cy.cp312-win_arm64.lib +0 -0
- scipy/stats/_qmvnt_cy.cp312-win_arm64.pyd +0 -0
- scipy/stats/_quantile.py +335 -0
- scipy/stats/_rcont/__init__.py +4 -0
- scipy/stats/_rcont/rcont.cp312-win_arm64.lib +0 -0
- scipy/stats/_rcont/rcont.cp312-win_arm64.pyd +0 -0
- scipy/stats/_relative_risk.py +263 -0
- scipy/stats/_resampling.py +2352 -0
- scipy/stats/_result_classes.py +40 -0
- scipy/stats/_sampling.py +1314 -0
- scipy/stats/_sensitivity_analysis.py +713 -0
- scipy/stats/_sobol.cp312-win_arm64.lib +0 -0
- scipy/stats/_sobol.cp312-win_arm64.pyd +0 -0
- scipy/stats/_sobol.pyi +54 -0
- scipy/stats/_sobol_direction_numbers.npz +0 -0
- scipy/stats/_stats.cp312-win_arm64.lib +0 -0
- scipy/stats/_stats.cp312-win_arm64.pyd +0 -0
- scipy/stats/_stats.pxd +10 -0
- scipy/stats/_stats_mstats_common.py +322 -0
- scipy/stats/_stats_py.py +11089 -0
- scipy/stats/_stats_pythran.cp312-win_arm64.lib +0 -0
- scipy/stats/_stats_pythran.cp312-win_arm64.pyd +0 -0
- scipy/stats/_survival.py +683 -0
- scipy/stats/_tukeylambda_stats.py +199 -0
- scipy/stats/_unuran/__init__.py +0 -0
- scipy/stats/_unuran/unuran_wrapper.cp312-win_arm64.lib +0 -0
- scipy/stats/_unuran/unuran_wrapper.cp312-win_arm64.pyd +0 -0
- scipy/stats/_unuran/unuran_wrapper.pyi +179 -0
- scipy/stats/_variation.py +126 -0
- scipy/stats/_warnings_errors.py +38 -0
- scipy/stats/_wilcoxon.py +265 -0
- scipy/stats/biasedurn.py +16 -0
- scipy/stats/contingency.py +521 -0
- scipy/stats/distributions.py +24 -0
- scipy/stats/kde.py +18 -0
- scipy/stats/morestats.py +27 -0
- scipy/stats/mstats.py +140 -0
- scipy/stats/mstats_basic.py +42 -0
- scipy/stats/mstats_extras.py +25 -0
- scipy/stats/mvn.py +17 -0
- scipy/stats/qmc.py +236 -0
- scipy/stats/sampling.py +73 -0
- scipy/stats/stats.py +41 -0
- scipy/stats/tests/__init__.py +0 -0
- scipy/stats/tests/common_tests.py +356 -0
- scipy/stats/tests/data/_mvt.py +171 -0
- scipy/stats/tests/data/fisher_exact_results_from_r.py +607 -0
- scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy +0 -0
- scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy +0 -0
- scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy +0 -0
- scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy +0 -0
- scipy/stats/tests/data/nist_anova/AtmWtAg.dat +108 -0
- scipy/stats/tests/data/nist_anova/SiRstv.dat +85 -0
- scipy/stats/tests/data/nist_anova/SmLs01.dat +249 -0
- scipy/stats/tests/data/nist_anova/SmLs02.dat +1869 -0
- scipy/stats/tests/data/nist_anova/SmLs03.dat +18069 -0
- scipy/stats/tests/data/nist_anova/SmLs04.dat +249 -0
- scipy/stats/tests/data/nist_anova/SmLs05.dat +1869 -0
- scipy/stats/tests/data/nist_anova/SmLs06.dat +18069 -0
- scipy/stats/tests/data/nist_anova/SmLs07.dat +249 -0
- scipy/stats/tests/data/nist_anova/SmLs08.dat +1869 -0
- scipy/stats/tests/data/nist_anova/SmLs09.dat +18069 -0
- scipy/stats/tests/data/nist_linregress/Norris.dat +97 -0
- scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy +0 -0
- scipy/stats/tests/data/studentized_range_mpmath_ref.json +1499 -0
- scipy/stats/tests/test_axis_nan_policy.py +1388 -0
- scipy/stats/tests/test_binned_statistic.py +568 -0
- scipy/stats/tests/test_censored_data.py +152 -0
- scipy/stats/tests/test_contingency.py +294 -0
- scipy/stats/tests/test_continued_fraction.py +173 -0
- scipy/stats/tests/test_continuous.py +2198 -0
- scipy/stats/tests/test_continuous_basic.py +1053 -0
- scipy/stats/tests/test_continuous_fit_censored.py +683 -0
- scipy/stats/tests/test_correlation.py +80 -0
- scipy/stats/tests/test_crosstab.py +115 -0
- scipy/stats/tests/test_discrete_basic.py +580 -0
- scipy/stats/tests/test_discrete_distns.py +700 -0
- scipy/stats/tests/test_distributions.py +10413 -0
- scipy/stats/tests/test_entropy.py +322 -0
- scipy/stats/tests/test_fast_gen_inversion.py +435 -0
- scipy/stats/tests/test_fit.py +1090 -0
- scipy/stats/tests/test_hypotests.py +1991 -0
- scipy/stats/tests/test_kdeoth.py +676 -0
- scipy/stats/tests/test_marray.py +289 -0
- scipy/stats/tests/test_mgc.py +217 -0
- scipy/stats/tests/test_morestats.py +3259 -0
- scipy/stats/tests/test_mstats_basic.py +2071 -0
- scipy/stats/tests/test_mstats_extras.py +172 -0
- scipy/stats/tests/test_multicomp.py +405 -0
- scipy/stats/tests/test_multivariate.py +4381 -0
- scipy/stats/tests/test_odds_ratio.py +148 -0
- scipy/stats/tests/test_qmc.py +1492 -0
- scipy/stats/tests/test_quantile.py +199 -0
- scipy/stats/tests/test_rank.py +345 -0
- scipy/stats/tests/test_relative_risk.py +95 -0
- scipy/stats/tests/test_resampling.py +2000 -0
- scipy/stats/tests/test_sampling.py +1450 -0
- scipy/stats/tests/test_sensitivity_analysis.py +310 -0
- scipy/stats/tests/test_stats.py +9707 -0
- scipy/stats/tests/test_survival.py +466 -0
- scipy/stats/tests/test_tukeylambda_stats.py +85 -0
- scipy/stats/tests/test_variation.py +216 -0
- scipy/version.py +12 -0
- scipy-1.16.2.dist-info/DELVEWHEEL +2 -0
- scipy-1.16.2.dist-info/LICENSE.txt +912 -0
- scipy-1.16.2.dist-info/METADATA +1061 -0
- scipy-1.16.2.dist-info/RECORD +1530 -0
- scipy-1.16.2.dist-info/WHEEL +4 -0
- scipy.libs/msvcp140-5f1c5dd31916990d94181e07bc3afb32.dll +0 -0
- scipy.libs/scipy_openblas-f3ac85b1f412f7e86514c923dc4058d1.dll +0 -0
@@ -0,0 +1,4381 @@
|
|
1
|
+
"""
|
2
|
+
Test functions for multivariate normal distributions.
|
3
|
+
|
4
|
+
"""
|
5
|
+
import pickle
|
6
|
+
from dataclasses import dataclass
|
7
|
+
|
8
|
+
from numpy.testing import (assert_allclose, assert_almost_equal,
|
9
|
+
assert_array_almost_equal, assert_equal,
|
10
|
+
assert_array_less, assert_)
|
11
|
+
import pytest
|
12
|
+
from pytest import raises as assert_raises
|
13
|
+
|
14
|
+
from .test_continuous_basic import check_distribution_rvs
|
15
|
+
|
16
|
+
import numpy as np
|
17
|
+
|
18
|
+
import scipy.linalg
|
19
|
+
|
20
|
+
from scipy.stats._multivariate import (_PSD,
|
21
|
+
_lnB,
|
22
|
+
multivariate_normal_frozen)
|
23
|
+
from scipy.stats import (multivariate_normal, multivariate_hypergeom,
|
24
|
+
matrix_normal, special_ortho_group, ortho_group,
|
25
|
+
random_correlation, unitary_group, dirichlet,
|
26
|
+
beta, wishart, multinomial, invwishart, chi2,
|
27
|
+
invgamma, norm, uniform, ks_2samp, kstest, binom,
|
28
|
+
hypergeom, multivariate_t, cauchy, normaltest,
|
29
|
+
random_table, uniform_direction, vonmises_fisher,
|
30
|
+
dirichlet_multinomial, vonmises)
|
31
|
+
|
32
|
+
from scipy.stats import _covariance, Covariance
|
33
|
+
from scipy.stats._continuous_distns import _norm_pdf as norm_pdf
|
34
|
+
from scipy import stats
|
35
|
+
|
36
|
+
from scipy.integrate import tanhsinh, cubature, quad
|
37
|
+
from scipy.integrate import romb, qmc_quad, dblquad, tplquad
|
38
|
+
from scipy.special import multigammaln
|
39
|
+
import scipy.special as special
|
40
|
+
|
41
|
+
from .common_tests import check_random_state_property
|
42
|
+
from .data._mvt import _qsimvtv
|
43
|
+
|
44
|
+
from unittest.mock import patch
|
45
|
+
|
46
|
+
|
47
|
+
def assert_close(res, ref, *args, **kwargs):
|
48
|
+
res, ref = np.asarray(res), np.asarray(ref)
|
49
|
+
assert_allclose(res, ref, *args, **kwargs)
|
50
|
+
assert_equal(res.shape, ref.shape)
|
51
|
+
|
52
|
+
|
53
|
+
class TestCovariance:
|
54
|
+
|
55
|
+
def test_input_validation(self):
|
56
|
+
|
57
|
+
message = "The input `precision` must be a square, two-dimensional..."
|
58
|
+
with pytest.raises(ValueError, match=message):
|
59
|
+
_covariance.CovViaPrecision(np.ones(2))
|
60
|
+
|
61
|
+
message = "`precision.shape` must equal `covariance.shape`."
|
62
|
+
with pytest.raises(ValueError, match=message):
|
63
|
+
_covariance.CovViaPrecision(np.eye(3), covariance=np.eye(2))
|
64
|
+
|
65
|
+
message = "The input `diagonal` must be a one-dimensional array..."
|
66
|
+
with pytest.raises(ValueError, match=message):
|
67
|
+
_covariance.CovViaDiagonal("alpaca")
|
68
|
+
|
69
|
+
message = "The input `cholesky` must be a square, two-dimensional..."
|
70
|
+
with pytest.raises(ValueError, match=message):
|
71
|
+
_covariance.CovViaCholesky(np.ones(2))
|
72
|
+
|
73
|
+
message = "The input `eigenvalues` must be a one-dimensional..."
|
74
|
+
with pytest.raises(ValueError, match=message):
|
75
|
+
_covariance.CovViaEigendecomposition(("alpaca", np.eye(2)))
|
76
|
+
|
77
|
+
message = "The input `eigenvectors` must be a square..."
|
78
|
+
with pytest.raises(ValueError, match=message):
|
79
|
+
_covariance.CovViaEigendecomposition((np.ones(2), "alpaca"))
|
80
|
+
|
81
|
+
message = "The shapes of `eigenvalues` and `eigenvectors` must be..."
|
82
|
+
with pytest.raises(ValueError, match=message):
|
83
|
+
_covariance.CovViaEigendecomposition(([1, 2, 3], np.eye(2)))
|
84
|
+
|
85
|
+
_covariance_preprocessing = {"Diagonal": np.diag,
|
86
|
+
"Precision": np.linalg.inv,
|
87
|
+
"Cholesky": np.linalg.cholesky,
|
88
|
+
"Eigendecomposition": np.linalg.eigh,
|
89
|
+
"PSD": lambda x:
|
90
|
+
_PSD(x, allow_singular=True)}
|
91
|
+
_all_covariance_types = np.array(list(_covariance_preprocessing))
|
92
|
+
_matrices = {"diagonal full rank": np.diag([1, 2, 3]),
|
93
|
+
"general full rank": [[5, 1, 3], [1, 6, 4], [3, 4, 7]],
|
94
|
+
"diagonal singular": np.diag([1, 0, 3]),
|
95
|
+
"general singular": [[5, -1, 0], [-1, 5, 0], [0, 0, 0]]}
|
96
|
+
_cov_types = {"diagonal full rank": _all_covariance_types,
|
97
|
+
"general full rank": _all_covariance_types[1:],
|
98
|
+
"diagonal singular": _all_covariance_types[[0, -2, -1]],
|
99
|
+
"general singular": _all_covariance_types[-2:]}
|
100
|
+
|
101
|
+
@pytest.mark.parametrize("cov_type_name", _all_covariance_types[:-1])
|
102
|
+
def test_factories(self, cov_type_name):
|
103
|
+
A = np.diag([1, 2, 3])
|
104
|
+
x = [-4, 2, 5]
|
105
|
+
|
106
|
+
cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
|
107
|
+
preprocessing = self._covariance_preprocessing[cov_type_name]
|
108
|
+
factory = getattr(Covariance, f"from_{cov_type_name.lower()}")
|
109
|
+
|
110
|
+
res = factory(preprocessing(A))
|
111
|
+
ref = cov_type(preprocessing(A))
|
112
|
+
assert type(res) is type(ref)
|
113
|
+
assert_allclose(res.whiten(x), ref.whiten(x))
|
114
|
+
|
115
|
+
@pytest.mark.parametrize("matrix_type", list(_matrices))
|
116
|
+
@pytest.mark.parametrize("cov_type_name", _all_covariance_types)
|
117
|
+
def test_covariance(self, matrix_type, cov_type_name):
|
118
|
+
message = (f"CovVia{cov_type_name} does not support {matrix_type} "
|
119
|
+
"matrices")
|
120
|
+
if cov_type_name not in self._cov_types[matrix_type]:
|
121
|
+
pytest.skip(message)
|
122
|
+
|
123
|
+
A = self._matrices[matrix_type]
|
124
|
+
cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
|
125
|
+
preprocessing = self._covariance_preprocessing[cov_type_name]
|
126
|
+
|
127
|
+
psd = _PSD(A, allow_singular=True)
|
128
|
+
|
129
|
+
# test properties
|
130
|
+
cov_object = cov_type(preprocessing(A))
|
131
|
+
assert_close(cov_object.log_pdet, psd.log_pdet)
|
132
|
+
assert_equal(cov_object.rank, psd.rank)
|
133
|
+
assert_equal(cov_object.shape, np.asarray(A).shape)
|
134
|
+
assert_close(cov_object.covariance, np.asarray(A))
|
135
|
+
|
136
|
+
# test whitening/coloring 1D x
|
137
|
+
rng = np.random.default_rng(5292808890472453840)
|
138
|
+
x = rng.random(size=3)
|
139
|
+
res = cov_object.whiten(x)
|
140
|
+
ref = x @ psd.U
|
141
|
+
# res != ref in general; but res @ res == ref @ ref
|
142
|
+
assert_close(res @ res, ref @ ref)
|
143
|
+
if hasattr(cov_object, "_colorize") and "singular" not in matrix_type:
|
144
|
+
# CovViaPSD does not have _colorize
|
145
|
+
assert_close(cov_object.colorize(res), x)
|
146
|
+
|
147
|
+
# test whitening/coloring 3D x
|
148
|
+
x = rng.random(size=(2, 4, 3))
|
149
|
+
res = cov_object.whiten(x)
|
150
|
+
ref = x @ psd.U
|
151
|
+
assert_close((res**2).sum(axis=-1), (ref**2).sum(axis=-1))
|
152
|
+
if hasattr(cov_object, "_colorize") and "singular" not in matrix_type:
|
153
|
+
assert_close(cov_object.colorize(res), x)
|
154
|
+
|
155
|
+
# gh-19197 reported that multivariate normal `rvs` produced incorrect
|
156
|
+
# results when a singular Covariance object was produce using
|
157
|
+
# `from_eigenvalues`. This was due to an issue in `colorize` with
|
158
|
+
# singular covariance matrices. Check this edge case, which is skipped
|
159
|
+
# in the previous tests.
|
160
|
+
if hasattr(cov_object, "_colorize"):
|
161
|
+
res = cov_object.colorize(np.eye(len(A)))
|
162
|
+
assert_close(res.T @ res, A)
|
163
|
+
|
164
|
+
@pytest.mark.parametrize("size", [None, tuple(), 1, (2, 4, 3)])
|
165
|
+
@pytest.mark.parametrize("matrix_type", list(_matrices))
|
166
|
+
@pytest.mark.parametrize("cov_type_name", _all_covariance_types)
|
167
|
+
def test_mvn_with_covariance(self, size, matrix_type, cov_type_name):
|
168
|
+
message = (f"CovVia{cov_type_name} does not support {matrix_type} "
|
169
|
+
"matrices")
|
170
|
+
if cov_type_name not in self._cov_types[matrix_type]:
|
171
|
+
pytest.skip(message)
|
172
|
+
|
173
|
+
A = self._matrices[matrix_type]
|
174
|
+
cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
|
175
|
+
preprocessing = self._covariance_preprocessing[cov_type_name]
|
176
|
+
|
177
|
+
mean = [0.1, 0.2, 0.3]
|
178
|
+
cov_object = cov_type(preprocessing(A))
|
179
|
+
mvn = multivariate_normal
|
180
|
+
dist0 = multivariate_normal(mean, A, allow_singular=True)
|
181
|
+
dist1 = multivariate_normal(mean, cov_object, allow_singular=True)
|
182
|
+
|
183
|
+
rng = np.random.default_rng(5292808890472453840)
|
184
|
+
x = rng.multivariate_normal(mean, A, size=size)
|
185
|
+
rng = np.random.default_rng(5292808890472453840)
|
186
|
+
x1 = mvn.rvs(mean, cov_object, size=size, random_state=rng)
|
187
|
+
rng = np.random.default_rng(5292808890472453840)
|
188
|
+
x2 = mvn(mean, cov_object, seed=rng).rvs(size=size)
|
189
|
+
if isinstance(cov_object, _covariance.CovViaPSD):
|
190
|
+
assert_close(x1, np.squeeze(x)) # for backward compatibility
|
191
|
+
assert_close(x2, np.squeeze(x))
|
192
|
+
else:
|
193
|
+
assert_equal(x1.shape, x.shape)
|
194
|
+
assert_equal(x2.shape, x.shape)
|
195
|
+
assert_close(x2, x1)
|
196
|
+
|
197
|
+
assert_close(mvn.pdf(x, mean, cov_object), dist0.pdf(x))
|
198
|
+
assert_close(dist1.pdf(x), dist0.pdf(x))
|
199
|
+
assert_close(mvn.logpdf(x, mean, cov_object), dist0.logpdf(x))
|
200
|
+
assert_close(dist1.logpdf(x), dist0.logpdf(x))
|
201
|
+
assert_close(mvn.entropy(mean, cov_object), dist0.entropy())
|
202
|
+
assert_close(dist1.entropy(), dist0.entropy())
|
203
|
+
|
204
|
+
@pytest.mark.parametrize("size", [tuple(), (2, 4, 3)])
|
205
|
+
@pytest.mark.parametrize("cov_type_name", _all_covariance_types)
|
206
|
+
def test_mvn_with_covariance_cdf(self, size, cov_type_name):
|
207
|
+
# This is split from the test above because it's slow to be running
|
208
|
+
# with all matrix types, and there's no need because _mvn.mvnun
|
209
|
+
# does the calculation. All Covariance needs to do is pass is
|
210
|
+
# provide the `covariance` attribute.
|
211
|
+
matrix_type = "diagonal full rank"
|
212
|
+
A = self._matrices[matrix_type]
|
213
|
+
cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
|
214
|
+
preprocessing = self._covariance_preprocessing[cov_type_name]
|
215
|
+
|
216
|
+
mean = [0.1, 0.2, 0.3]
|
217
|
+
cov_object = cov_type(preprocessing(A))
|
218
|
+
mvn = multivariate_normal
|
219
|
+
dist0 = multivariate_normal(mean, A, allow_singular=True)
|
220
|
+
dist1 = multivariate_normal(mean, cov_object, allow_singular=True)
|
221
|
+
|
222
|
+
rng = np.random.default_rng(5292808890472453840)
|
223
|
+
x = rng.multivariate_normal(mean, A, size=size)
|
224
|
+
|
225
|
+
assert_close(mvn.cdf(x, mean, cov_object), dist0.cdf(x))
|
226
|
+
assert_close(dist1.cdf(x), dist0.cdf(x))
|
227
|
+
assert_close(mvn.logcdf(x, mean, cov_object), dist0.logcdf(x))
|
228
|
+
assert_close(dist1.logcdf(x), dist0.logcdf(x))
|
229
|
+
|
230
|
+
def test_covariance_instantiation(self):
|
231
|
+
message = "The `Covariance` class cannot be instantiated directly."
|
232
|
+
with pytest.raises(NotImplementedError, match=message):
|
233
|
+
Covariance()
|
234
|
+
|
235
|
+
@pytest.mark.filterwarnings("ignore::RuntimeWarning") # matrix not PSD
|
236
|
+
def test_gh9942(self):
|
237
|
+
# Originally there was a mistake in the `multivariate_normal_frozen`
|
238
|
+
# `rvs` method that caused all covariance objects to be processed as
|
239
|
+
# a `_CovViaPSD`. Ensure that this is resolved.
|
240
|
+
A = np.diag([1, 2, -1e-8])
|
241
|
+
n = A.shape[0]
|
242
|
+
mean = np.zeros(n)
|
243
|
+
|
244
|
+
# Error if the matrix is processed as a `_CovViaPSD`
|
245
|
+
with pytest.raises(ValueError, match="The input matrix must be..."):
|
246
|
+
multivariate_normal(mean, A).rvs()
|
247
|
+
|
248
|
+
# No error if it is provided as a `CovViaEigendecomposition`
|
249
|
+
seed = 3562050283508273023
|
250
|
+
rng1 = np.random.default_rng(seed)
|
251
|
+
rng2 = np.random.default_rng(seed)
|
252
|
+
cov = Covariance.from_eigendecomposition(np.linalg.eigh(A))
|
253
|
+
rv = multivariate_normal(mean, cov)
|
254
|
+
res = rv.rvs(random_state=rng1)
|
255
|
+
ref = multivariate_normal.rvs(mean, cov, random_state=rng2)
|
256
|
+
assert_equal(res, ref)
|
257
|
+
|
258
|
+
def test_gh19197(self):
|
259
|
+
# gh-19197 reported that multivariate normal `rvs` produced incorrect
|
260
|
+
# results when a singular Covariance object was produce using
|
261
|
+
# `from_eigenvalues`. Check that this specific issue is resolved;
|
262
|
+
# a more general test is included in `test_covariance`.
|
263
|
+
mean = np.ones(2)
|
264
|
+
cov = Covariance.from_eigendecomposition((np.zeros(2), np.eye(2)))
|
265
|
+
dist = scipy.stats.multivariate_normal(mean=mean, cov=cov)
|
266
|
+
rvs = dist.rvs(size=None)
|
267
|
+
assert_equal(rvs, mean)
|
268
|
+
|
269
|
+
cov = scipy.stats.Covariance.from_eigendecomposition(
|
270
|
+
(np.array([1., 0.]), np.array([[1., 0.], [0., 400.]])))
|
271
|
+
dist = scipy.stats.multivariate_normal(mean=mean, cov=cov)
|
272
|
+
rvs = dist.rvs(size=None)
|
273
|
+
assert rvs[0] != mean[0]
|
274
|
+
assert rvs[1] == mean[1]
|
275
|
+
|
276
|
+
|
277
|
+
def _random_covariance(dim, evals, rng, singular=False):
|
278
|
+
# Generates random covariance matrix with dimensionality `dim` and
|
279
|
+
# eigenvalues `evals` using provided Generator `rng`. Randomly sets
|
280
|
+
# some evals to zero if `singular` is True.
|
281
|
+
A = rng.random((dim, dim))
|
282
|
+
A = A @ A.T
|
283
|
+
_, v = np.linalg.eigh(A)
|
284
|
+
if singular:
|
285
|
+
zero_eigs = rng.normal(size=dim) > 0
|
286
|
+
evals[zero_eigs] = 0
|
287
|
+
cov = v @ np.diag(evals) @ v.T
|
288
|
+
return cov
|
289
|
+
|
290
|
+
|
291
|
+
def _sample_orthonormal_matrix(n):
|
292
|
+
M = np.random.randn(n, n)
|
293
|
+
u, s, v = scipy.linalg.svd(M)
|
294
|
+
return u
|
295
|
+
|
296
|
+
|
297
|
+
@dataclass
|
298
|
+
class MVNProblem:
|
299
|
+
"""Instantiate a multivariate normal integration problem with special structure.
|
300
|
+
|
301
|
+
When covariance matrix is a correlation matrix where the off-diagonal entries
|
302
|
+
``covar[i, j] == lambdas[i]*lambdas[j]`` for ``i != j``, then the multidimensional
|
303
|
+
integral reduces to a simpler univariate integral that can be numerically integrated
|
304
|
+
easily.
|
305
|
+
|
306
|
+
The ``generate_*()`` classmethods provide a few options for creating variations
|
307
|
+
of this problem.
|
308
|
+
|
309
|
+
References
|
310
|
+
----------
|
311
|
+
.. [1] Tong, Y.L. "The Multivariate Normal Distribution".
|
312
|
+
Springer-Verlag. p192. 1990.
|
313
|
+
"""
|
314
|
+
ndim : int
|
315
|
+
low : np.ndarray
|
316
|
+
high : np.ndarray
|
317
|
+
lambdas : np.ndarray
|
318
|
+
covar : np.ndarray
|
319
|
+
target_val : float
|
320
|
+
target_err : float
|
321
|
+
|
322
|
+
#: The `generator_halves()` case has an analytically-known true value that we'll
|
323
|
+
#: record here. It remain None for most cases, though.
|
324
|
+
true_val : float | None = None
|
325
|
+
|
326
|
+
def __init__(self, ndim, low, high, lambdas):
|
327
|
+
super().__init__()
|
328
|
+
self.ndim = ndim
|
329
|
+
self.low = low
|
330
|
+
self.high = high
|
331
|
+
self.lambdas = lambdas
|
332
|
+
|
333
|
+
self.covar = np.outer(self.lambdas, self.lambdas)
|
334
|
+
np.fill_diagonal(self.covar, 1.0)
|
335
|
+
self.find_target()
|
336
|
+
|
337
|
+
@classmethod
|
338
|
+
def generate_semigeneral(cls, ndim, rng=None):
|
339
|
+
"""Random lambdas, random upper bounds, infinite lower bounds.
|
340
|
+
"""
|
341
|
+
rng = np.random.default_rng(rng)
|
342
|
+
low = np.full(ndim, -np.inf)
|
343
|
+
high = rng.uniform(0.0, np.sqrt(ndim), size=ndim)
|
344
|
+
lambdas = rng.uniform(-1.0, 1.0, size=ndim)
|
345
|
+
|
346
|
+
self = cls(
|
347
|
+
ndim=ndim,
|
348
|
+
low=low,
|
349
|
+
high=high,
|
350
|
+
lambdas=lambdas,
|
351
|
+
)
|
352
|
+
return self
|
353
|
+
|
354
|
+
@classmethod
|
355
|
+
def generate_constant(cls, ndim, rng=None):
|
356
|
+
"""Constant off-diagonal covariance, random upper bounds, infinite lower bounds.
|
357
|
+
"""
|
358
|
+
rng = np.random.default_rng(rng)
|
359
|
+
low = np.full(ndim, -np.inf)
|
360
|
+
high = rng.uniform(0.0, np.sqrt(ndim), size=ndim)
|
361
|
+
sigma = np.sqrt(rng.uniform(0.0, 1.0))
|
362
|
+
lambdas = np.full(ndim, sigma)
|
363
|
+
|
364
|
+
self = cls(
|
365
|
+
ndim=ndim,
|
366
|
+
low=low,
|
367
|
+
high=high,
|
368
|
+
lambdas=lambdas,
|
369
|
+
)
|
370
|
+
return self
|
371
|
+
|
372
|
+
@classmethod
|
373
|
+
def generate_halves(cls, ndim, rng=None):
|
374
|
+
"""Off-diagonal covariance of 0.5, negative orthant bounds.
|
375
|
+
|
376
|
+
True analytically-derived answer is 1/(ndim+1).
|
377
|
+
"""
|
378
|
+
low = np.full(ndim, -np.inf)
|
379
|
+
high = np.zeros(ndim)
|
380
|
+
lambdas = np.sqrt(0.5)
|
381
|
+
|
382
|
+
self = cls(
|
383
|
+
ndim=ndim,
|
384
|
+
low=low,
|
385
|
+
high=high,
|
386
|
+
lambdas=lambdas,
|
387
|
+
)
|
388
|
+
self.true_val = 1 / (ndim+1)
|
389
|
+
return self
|
390
|
+
|
391
|
+
def find_target(self, **kwds):
|
392
|
+
"""Perform the simplified integral and store the results.
|
393
|
+
"""
|
394
|
+
d = dict(
|
395
|
+
a=-9.0,
|
396
|
+
b=+9.0,
|
397
|
+
)
|
398
|
+
d.update(kwds)
|
399
|
+
self.target_val, self.target_err = quad(self.univariate_func, **d)
|
400
|
+
|
401
|
+
def _univariate_term(self, t):
|
402
|
+
"""The parameter-specific term of the univariate integrand,
|
403
|
+
for separate plotting.
|
404
|
+
"""
|
405
|
+
denom = np.sqrt(1 - self.lambdas**2)
|
406
|
+
return np.prod(
|
407
|
+
special.ndtr((self.high + self.lambdas*t[:, np.newaxis]) / denom) -
|
408
|
+
special.ndtr((self.low + self.lambdas*t[:, np.newaxis]) / denom),
|
409
|
+
axis=1,
|
410
|
+
)
|
411
|
+
|
412
|
+
def univariate_func(self, t):
|
413
|
+
"""Univariate integrand.
|
414
|
+
"""
|
415
|
+
t = np.atleast_1d(t)
|
416
|
+
return np.squeeze(norm_pdf(t) * self._univariate_term(t))
|
417
|
+
|
418
|
+
def plot_integrand(self):
|
419
|
+
"""Plot the univariate integrand and its component terms for understanding.
|
420
|
+
"""
|
421
|
+
from matplotlib import pyplot as plt
|
422
|
+
|
423
|
+
t = np.linspace(-9.0, 9.0, 1001)
|
424
|
+
plt.plot(t, norm_pdf(t), label=r'$\phi(t)$')
|
425
|
+
plt.plot(t, self._univariate_term(t), label=r'$f(t)$')
|
426
|
+
plt.plot(t, self.univariate_func(t), label=r'$f(t)*phi(t)$')
|
427
|
+
plt.legend()
|
428
|
+
|
429
|
+
|
430
|
+
@dataclass
|
431
|
+
class SingularMVNProblem:
|
432
|
+
"""Instantiate a multivariate normal integration problem with a special singular
|
433
|
+
covariance structure.
|
434
|
+
|
435
|
+
When covariance matrix is a correlation matrix where the off-diagonal entries
|
436
|
+
``covar[i, j] == -lambdas[i]*lambdas[j]`` for ``i != j``, and
|
437
|
+
``sum(lambdas**2 / (1+lambdas**2)) == 1``, then the matrix is singular, and
|
438
|
+
the multidimensional integral reduces to a simpler univariate integral that
|
439
|
+
can be numerically integrated fairly easily.
|
440
|
+
|
441
|
+
The lower bound must be infinite, though the upper bounds can be general.
|
442
|
+
|
443
|
+
References
|
444
|
+
----------
|
445
|
+
.. [1] Kwong, K.-S. (1995). "Evaluation of the one-sided percentage points of the
|
446
|
+
singular multivariate normal distribution." Journal of Statistical
|
447
|
+
Computation and Simulation, 51(2-4), 121-135. doi:10.1080/00949659508811627
|
448
|
+
"""
|
449
|
+
ndim : int
|
450
|
+
low : np.ndarray
|
451
|
+
high : np.ndarray
|
452
|
+
lambdas : np.ndarray
|
453
|
+
covar : np.ndarray
|
454
|
+
target_val : float
|
455
|
+
target_err : float
|
456
|
+
|
457
|
+
def __init__(self, ndim, high, lambdas):
|
458
|
+
self.ndim = ndim
|
459
|
+
self.high = high
|
460
|
+
self.lambdas = lambdas
|
461
|
+
|
462
|
+
self.low = np.full(ndim, -np.inf)
|
463
|
+
self.covar = -np.outer(self.lambdas, self.lambdas)
|
464
|
+
np.fill_diagonal(self.covar, 1.0)
|
465
|
+
self.find_target()
|
466
|
+
|
467
|
+
@classmethod
|
468
|
+
def generate_semiinfinite(cls, ndim, rng=None):
|
469
|
+
"""Singular lambdas, random upper bounds.
|
470
|
+
"""
|
471
|
+
rng = np.random.default_rng(rng)
|
472
|
+
high = rng.uniform(0.0, np.sqrt(ndim), size=ndim)
|
473
|
+
p = rng.dirichlet(np.full(ndim, 1.0))
|
474
|
+
lambdas = np.sqrt(p / (1-p)) * rng.choice([-1.0, 1.0], size=ndim)
|
475
|
+
|
476
|
+
self = cls(
|
477
|
+
ndim=ndim,
|
478
|
+
high=high,
|
479
|
+
lambdas=lambdas,
|
480
|
+
)
|
481
|
+
return self
|
482
|
+
|
483
|
+
def find_target(self, **kwds):
|
484
|
+
d = dict(
|
485
|
+
a=-9.0,
|
486
|
+
b=+9.0,
|
487
|
+
)
|
488
|
+
d.update(kwds)
|
489
|
+
self.target_val, self.target_err = quad(self.univariate_func, **d)
|
490
|
+
|
491
|
+
def _univariate_term(self, t):
|
492
|
+
denom = np.sqrt(1 + self.lambdas**2)
|
493
|
+
i1 = np.prod(
|
494
|
+
special.ndtr((self.high - 1j*self.lambdas*t[:, np.newaxis]) / denom),
|
495
|
+
axis=1,
|
496
|
+
)
|
497
|
+
i2 = np.prod(
|
498
|
+
special.ndtr((-self.high + 1j*self.lambdas*t[:, np.newaxis]) / denom),
|
499
|
+
axis=1,
|
500
|
+
)
|
501
|
+
# The imaginary part is an odd function, so it can be ignored; it will integrate
|
502
|
+
# out to 0.
|
503
|
+
return (i1 - (-1)**self.ndim * i2).real
|
504
|
+
|
505
|
+
def univariate_func(self, t):
|
506
|
+
t = np.atleast_1d(t)
|
507
|
+
return (norm_pdf(t) * self._univariate_term(t)).squeeze()
|
508
|
+
|
509
|
+
def plot_integrand(self):
|
510
|
+
"""Plot the univariate integrand and its component terms for understanding.
|
511
|
+
"""
|
512
|
+
from matplotlib import pyplot as plt
|
513
|
+
|
514
|
+
t = np.linspace(-9.0, 9.0, 1001)
|
515
|
+
plt.plot(t, norm_pdf(t), label=r'$\phi(t)$')
|
516
|
+
plt.plot(t, self._univariate_term(t), label=r'$f(t)$')
|
517
|
+
plt.plot(t, self.univariate_func(t), label=r'$f(t)*phi(t)$')
|
518
|
+
plt.ylim(-0.1, 1.1)
|
519
|
+
plt.legend()
|
520
|
+
|
521
|
+
|
522
|
+
class TestMultivariateNormal:
|
523
|
+
def test_input_shape(self):
|
524
|
+
mu = np.arange(3)
|
525
|
+
cov = np.identity(2)
|
526
|
+
assert_raises(ValueError, multivariate_normal.pdf, (0, 1), mu, cov)
|
527
|
+
assert_raises(ValueError, multivariate_normal.pdf, (0, 1, 2), mu, cov)
|
528
|
+
assert_raises(ValueError, multivariate_normal.cdf, (0, 1), mu, cov)
|
529
|
+
assert_raises(ValueError, multivariate_normal.cdf, (0, 1, 2), mu, cov)
|
530
|
+
|
531
|
+
def test_scalar_values(self):
|
532
|
+
np.random.seed(1234)
|
533
|
+
|
534
|
+
# When evaluated on scalar data, the pdf should return a scalar
|
535
|
+
x, mean, cov = 1.5, 1.7, 2.5
|
536
|
+
pdf = multivariate_normal.pdf(x, mean, cov)
|
537
|
+
assert_equal(pdf.ndim, 0)
|
538
|
+
|
539
|
+
# When evaluated on a single vector, the pdf should return a scalar
|
540
|
+
x = np.random.randn(5)
|
541
|
+
mean = np.random.randn(5)
|
542
|
+
cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix
|
543
|
+
pdf = multivariate_normal.pdf(x, mean, cov)
|
544
|
+
assert_equal(pdf.ndim, 0)
|
545
|
+
|
546
|
+
# When evaluated on scalar data, the cdf should return a scalar
|
547
|
+
x, mean, cov = 1.5, 1.7, 2.5
|
548
|
+
cdf = multivariate_normal.cdf(x, mean, cov)
|
549
|
+
assert_equal(cdf.ndim, 0)
|
550
|
+
|
551
|
+
# When evaluated on a single vector, the cdf should return a scalar
|
552
|
+
x = np.random.randn(5)
|
553
|
+
mean = np.random.randn(5)
|
554
|
+
cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix
|
555
|
+
cdf = multivariate_normal.cdf(x, mean, cov)
|
556
|
+
assert_equal(cdf.ndim, 0)
|
557
|
+
|
558
|
+
def test_logpdf(self):
|
559
|
+
# Check that the log of the pdf is in fact the logpdf
|
560
|
+
np.random.seed(1234)
|
561
|
+
x = np.random.randn(5)
|
562
|
+
mean = np.random.randn(5)
|
563
|
+
cov = np.abs(np.random.randn(5))
|
564
|
+
d1 = multivariate_normal.logpdf(x, mean, cov)
|
565
|
+
d2 = multivariate_normal.pdf(x, mean, cov)
|
566
|
+
assert_allclose(d1, np.log(d2))
|
567
|
+
|
568
|
+
def test_logpdf_default_values(self):
|
569
|
+
# Check that the log of the pdf is in fact the logpdf
|
570
|
+
# with default parameters Mean=None and cov = 1
|
571
|
+
np.random.seed(1234)
|
572
|
+
x = np.random.randn(5)
|
573
|
+
d1 = multivariate_normal.logpdf(x)
|
574
|
+
d2 = multivariate_normal.pdf(x)
|
575
|
+
# check whether default values are being used
|
576
|
+
d3 = multivariate_normal.logpdf(x, None, 1)
|
577
|
+
d4 = multivariate_normal.pdf(x, None, 1)
|
578
|
+
assert_allclose(d1, np.log(d2))
|
579
|
+
assert_allclose(d3, np.log(d4))
|
580
|
+
|
581
|
+
def test_logcdf(self):
|
582
|
+
# Check that the log of the cdf is in fact the logcdf
|
583
|
+
np.random.seed(1234)
|
584
|
+
x = np.random.randn(5)
|
585
|
+
mean = np.random.randn(5)
|
586
|
+
cov = np.abs(np.random.randn(5))
|
587
|
+
d1 = multivariate_normal.logcdf(x, mean, cov)
|
588
|
+
d2 = multivariate_normal.cdf(x, mean, cov)
|
589
|
+
assert_allclose(d1, np.log(d2))
|
590
|
+
|
591
|
+
def test_logcdf_default_values(self):
|
592
|
+
# Check that the log of the cdf is in fact the logcdf
|
593
|
+
# with default parameters Mean=None and cov = 1
|
594
|
+
np.random.seed(1234)
|
595
|
+
x = np.random.randn(5)
|
596
|
+
d1 = multivariate_normal.logcdf(x)
|
597
|
+
d2 = multivariate_normal.cdf(x)
|
598
|
+
# check whether default values are being used
|
599
|
+
d3 = multivariate_normal.logcdf(x, None, 1)
|
600
|
+
d4 = multivariate_normal.cdf(x, None, 1)
|
601
|
+
assert_allclose(d1, np.log(d2))
|
602
|
+
assert_allclose(d3, np.log(d4))
|
603
|
+
|
604
|
+
def test_rank(self):
|
605
|
+
# Check that the rank is detected correctly.
|
606
|
+
np.random.seed(1234)
|
607
|
+
n = 4
|
608
|
+
mean = np.random.randn(n)
|
609
|
+
for expected_rank in range(1, n + 1):
|
610
|
+
s = np.random.randn(n, expected_rank)
|
611
|
+
cov = np.dot(s, s.T)
|
612
|
+
distn = multivariate_normal(mean, cov, allow_singular=True)
|
613
|
+
assert_equal(distn.cov_object.rank, expected_rank)
|
614
|
+
|
615
|
+
def test_degenerate_distributions(self):
|
616
|
+
|
617
|
+
for n in range(1, 5):
|
618
|
+
z = np.random.randn(n)
|
619
|
+
for k in range(1, n):
|
620
|
+
# Sample a small covariance matrix.
|
621
|
+
s = np.random.randn(k, k)
|
622
|
+
cov_kk = np.dot(s, s.T)
|
623
|
+
|
624
|
+
# Embed the small covariance matrix into a larger singular one.
|
625
|
+
cov_nn = np.zeros((n, n))
|
626
|
+
cov_nn[:k, :k] = cov_kk
|
627
|
+
|
628
|
+
# Embed part of the vector in the same way
|
629
|
+
x = np.zeros(n)
|
630
|
+
x[:k] = z[:k]
|
631
|
+
|
632
|
+
# Define a rotation of the larger low rank matrix.
|
633
|
+
u = _sample_orthonormal_matrix(n)
|
634
|
+
cov_rr = np.dot(u, np.dot(cov_nn, u.T))
|
635
|
+
y = np.dot(u, x)
|
636
|
+
|
637
|
+
# Check some identities.
|
638
|
+
distn_kk = multivariate_normal(np.zeros(k), cov_kk,
|
639
|
+
allow_singular=True)
|
640
|
+
distn_nn = multivariate_normal(np.zeros(n), cov_nn,
|
641
|
+
allow_singular=True)
|
642
|
+
distn_rr = multivariate_normal(np.zeros(n), cov_rr,
|
643
|
+
allow_singular=True)
|
644
|
+
assert_equal(distn_kk.cov_object.rank, k)
|
645
|
+
assert_equal(distn_nn.cov_object.rank, k)
|
646
|
+
assert_equal(distn_rr.cov_object.rank, k)
|
647
|
+
pdf_kk = distn_kk.pdf(x[:k])
|
648
|
+
pdf_nn = distn_nn.pdf(x)
|
649
|
+
pdf_rr = distn_rr.pdf(y)
|
650
|
+
assert_allclose(pdf_kk, pdf_nn)
|
651
|
+
assert_allclose(pdf_kk, pdf_rr)
|
652
|
+
logpdf_kk = distn_kk.logpdf(x[:k])
|
653
|
+
logpdf_nn = distn_nn.logpdf(x)
|
654
|
+
logpdf_rr = distn_rr.logpdf(y)
|
655
|
+
assert_allclose(logpdf_kk, logpdf_nn)
|
656
|
+
assert_allclose(logpdf_kk, logpdf_rr)
|
657
|
+
|
658
|
+
# Add an orthogonal component and find the density
|
659
|
+
y_orth = y + u[:, -1]
|
660
|
+
pdf_rr_orth = distn_rr.pdf(y_orth)
|
661
|
+
logpdf_rr_orth = distn_rr.logpdf(y_orth)
|
662
|
+
|
663
|
+
# Ensure that this has zero probability
|
664
|
+
assert_equal(pdf_rr_orth, 0.0)
|
665
|
+
assert_equal(logpdf_rr_orth, -np.inf)
|
666
|
+
|
667
|
+
def test_degenerate_array(self):
|
668
|
+
# Test that we can generate arrays of random variate from a degenerate
|
669
|
+
# multivariate normal, and that the pdf for these samples is non-zero
|
670
|
+
# (i.e. samples from the distribution lie on the subspace)
|
671
|
+
k = 10
|
672
|
+
for n in range(2, 6):
|
673
|
+
for r in range(1, n):
|
674
|
+
mn = np.zeros(n)
|
675
|
+
u = _sample_orthonormal_matrix(n)[:, :r]
|
676
|
+
vr = np.dot(u, u.T)
|
677
|
+
X = multivariate_normal.rvs(mean=mn, cov=vr, size=k)
|
678
|
+
|
679
|
+
pdf = multivariate_normal.pdf(X, mean=mn, cov=vr,
|
680
|
+
allow_singular=True)
|
681
|
+
assert_equal(pdf.size, k)
|
682
|
+
assert np.all(pdf > 0.0)
|
683
|
+
|
684
|
+
logpdf = multivariate_normal.logpdf(X, mean=mn, cov=vr,
|
685
|
+
allow_singular=True)
|
686
|
+
assert_equal(logpdf.size, k)
|
687
|
+
assert np.all(logpdf > -np.inf)
|
688
|
+
|
689
|
+
def test_large_pseudo_determinant(self):
|
690
|
+
# Check that large pseudo-determinants are handled appropriately.
|
691
|
+
|
692
|
+
# Construct a singular diagonal covariance matrix
|
693
|
+
# whose pseudo determinant overflows double precision.
|
694
|
+
large_total_log = 1000.0
|
695
|
+
npos = 100
|
696
|
+
nzero = 2
|
697
|
+
large_entry = np.exp(large_total_log / npos)
|
698
|
+
n = npos + nzero
|
699
|
+
cov = np.zeros((n, n), dtype=float)
|
700
|
+
np.fill_diagonal(cov, large_entry)
|
701
|
+
cov[-nzero:, -nzero:] = 0
|
702
|
+
|
703
|
+
# Check some determinants.
|
704
|
+
assert_equal(scipy.linalg.det(cov), 0)
|
705
|
+
assert_equal(scipy.linalg.det(cov[:npos, :npos]), np.inf)
|
706
|
+
assert_allclose(np.linalg.slogdet(cov[:npos, :npos]),
|
707
|
+
(1, large_total_log))
|
708
|
+
|
709
|
+
# Check the pseudo-determinant.
|
710
|
+
psd = _PSD(cov)
|
711
|
+
assert_allclose(psd.log_pdet, large_total_log)
|
712
|
+
|
713
|
+
def test_broadcasting(self):
|
714
|
+
rng = np.random.RandomState(1234)
|
715
|
+
n = 4
|
716
|
+
|
717
|
+
# Construct a random covariance matrix.
|
718
|
+
data = rng.randn(n, n)
|
719
|
+
cov = np.dot(data, data.T)
|
720
|
+
mean = rng.randn(n)
|
721
|
+
|
722
|
+
# Construct an ndarray which can be interpreted as
|
723
|
+
# a 2x3 array whose elements are random data vectors.
|
724
|
+
X = rng.randn(2, 3, n)
|
725
|
+
|
726
|
+
# Check that multiple data points can be evaluated at once.
|
727
|
+
desired_pdf = multivariate_normal.pdf(X, mean, cov)
|
728
|
+
desired_cdf = multivariate_normal.cdf(X, mean, cov)
|
729
|
+
for i in range(2):
|
730
|
+
for j in range(3):
|
731
|
+
actual = multivariate_normal.pdf(X[i, j], mean, cov)
|
732
|
+
assert_allclose(actual, desired_pdf[i,j])
|
733
|
+
# Repeat for cdf
|
734
|
+
actual = multivariate_normal.cdf(X[i, j], mean, cov)
|
735
|
+
assert_allclose(actual, desired_cdf[i,j], rtol=1e-3)
|
736
|
+
|
737
|
+
def test_normal_1D(self):
|
738
|
+
# The probability density function for a 1D normal variable should
|
739
|
+
# agree with the standard normal distribution in scipy.stats.distributions
|
740
|
+
x = np.linspace(0, 2, 10)
|
741
|
+
mean, cov = 1.2, 0.9
|
742
|
+
scale = cov**0.5
|
743
|
+
d1 = norm.pdf(x, mean, scale)
|
744
|
+
d2 = multivariate_normal.pdf(x, mean, cov)
|
745
|
+
assert_allclose(d1, d2)
|
746
|
+
# The same should hold for the cumulative distribution function
|
747
|
+
d1 = norm.cdf(x, mean, scale)
|
748
|
+
d2 = multivariate_normal.cdf(x, mean, cov)
|
749
|
+
assert_allclose(d1, d2)
|
750
|
+
|
751
|
+
def test_marginalization(self):
|
752
|
+
# Integrating out one of the variables of a 2D Gaussian should
|
753
|
+
# yield a 1D Gaussian
|
754
|
+
mean = np.array([2.5, 3.5])
|
755
|
+
cov = np.array([[.5, 0.2], [0.2, .6]])
|
756
|
+
n = 2 ** 8 + 1 # Number of samples
|
757
|
+
delta = 6 / (n - 1) # Grid spacing
|
758
|
+
|
759
|
+
v = np.linspace(0, 6, n)
|
760
|
+
xv, yv = np.meshgrid(v, v)
|
761
|
+
pos = np.empty((n, n, 2))
|
762
|
+
pos[:, :, 0] = xv
|
763
|
+
pos[:, :, 1] = yv
|
764
|
+
pdf = multivariate_normal.pdf(pos, mean, cov)
|
765
|
+
|
766
|
+
# Marginalize over x and y axis
|
767
|
+
margin_x = romb(pdf, delta, axis=0)
|
768
|
+
margin_y = romb(pdf, delta, axis=1)
|
769
|
+
|
770
|
+
# Compare with standard normal distribution
|
771
|
+
gauss_x = norm.pdf(v, loc=mean[0], scale=cov[0, 0] ** 0.5)
|
772
|
+
gauss_y = norm.pdf(v, loc=mean[1], scale=cov[1, 1] ** 0.5)
|
773
|
+
assert_allclose(margin_x, gauss_x, rtol=1e-2, atol=1e-2)
|
774
|
+
assert_allclose(margin_y, gauss_y, rtol=1e-2, atol=1e-2)
|
775
|
+
|
776
|
+
def test_frozen(self):
|
777
|
+
# The frozen distribution should agree with the regular one
|
778
|
+
np.random.seed(1234)
|
779
|
+
x = np.random.randn(5)
|
780
|
+
mean = np.random.randn(5)
|
781
|
+
cov = np.abs(np.random.randn(5))
|
782
|
+
norm_frozen = multivariate_normal(mean, cov)
|
783
|
+
assert_allclose(norm_frozen.pdf(x), multivariate_normal.pdf(x, mean, cov))
|
784
|
+
assert_allclose(norm_frozen.logpdf(x),
|
785
|
+
multivariate_normal.logpdf(x, mean, cov))
|
786
|
+
assert_allclose(norm_frozen.cdf(x), multivariate_normal.cdf(x, mean, cov))
|
787
|
+
assert_allclose(norm_frozen.logcdf(x),
|
788
|
+
multivariate_normal.logcdf(x, mean, cov))
|
789
|
+
|
790
|
+
@pytest.mark.parametrize(
|
791
|
+
'covariance',
|
792
|
+
[
|
793
|
+
np.eye(2),
|
794
|
+
Covariance.from_diagonal([1, 1]),
|
795
|
+
]
|
796
|
+
)
|
797
|
+
def test_frozen_multivariate_normal_exposes_attributes(self, covariance):
|
798
|
+
mean = np.ones((2,))
|
799
|
+
cov_should_be = np.eye(2)
|
800
|
+
norm_frozen = multivariate_normal(mean, covariance)
|
801
|
+
assert np.allclose(norm_frozen.mean, mean)
|
802
|
+
assert np.allclose(norm_frozen.cov, cov_should_be)
|
803
|
+
|
804
|
+
def test_pseudodet_pinv(self):
|
805
|
+
# Make sure that pseudo-inverse and pseudo-det agree on cutoff
|
806
|
+
|
807
|
+
# Assemble random covariance matrix with large and small eigenvalues
|
808
|
+
np.random.seed(1234)
|
809
|
+
n = 7
|
810
|
+
x = np.random.randn(n, n)
|
811
|
+
cov = np.dot(x, x.T)
|
812
|
+
s, u = scipy.linalg.eigh(cov)
|
813
|
+
s = np.full(n, 0.5)
|
814
|
+
s[0] = 1.0
|
815
|
+
s[-1] = 1e-7
|
816
|
+
cov = np.dot(u, np.dot(np.diag(s), u.T))
|
817
|
+
|
818
|
+
# Set cond so that the lowest eigenvalue is below the cutoff
|
819
|
+
cond = 1e-5
|
820
|
+
psd = _PSD(cov, cond=cond)
|
821
|
+
psd_pinv = _PSD(psd.pinv, cond=cond)
|
822
|
+
|
823
|
+
# Check that the log pseudo-determinant agrees with the sum
|
824
|
+
# of the logs of all but the smallest eigenvalue
|
825
|
+
assert_allclose(psd.log_pdet, np.sum(np.log(s[:-1])))
|
826
|
+
# Check that the pseudo-determinant of the pseudo-inverse
|
827
|
+
# agrees with 1 / pseudo-determinant
|
828
|
+
assert_allclose(-psd.log_pdet, psd_pinv.log_pdet)
|
829
|
+
|
830
|
+
def test_exception_nonsquare_cov(self):
|
831
|
+
cov = [[1, 2, 3], [4, 5, 6]]
|
832
|
+
assert_raises(ValueError, _PSD, cov)
|
833
|
+
|
834
|
+
def test_exception_nonfinite_cov(self):
|
835
|
+
cov_nan = [[1, 0], [0, np.nan]]
|
836
|
+
assert_raises(ValueError, _PSD, cov_nan)
|
837
|
+
cov_inf = [[1, 0], [0, np.inf]]
|
838
|
+
assert_raises(ValueError, _PSD, cov_inf)
|
839
|
+
|
840
|
+
def test_exception_non_psd_cov(self):
|
841
|
+
cov = [[1, 0], [0, -1]]
|
842
|
+
assert_raises(ValueError, _PSD, cov)
|
843
|
+
|
844
|
+
def test_exception_singular_cov(self):
|
845
|
+
np.random.seed(1234)
|
846
|
+
x = np.random.randn(5)
|
847
|
+
mean = np.random.randn(5)
|
848
|
+
cov = np.ones((5, 5))
|
849
|
+
e = np.linalg.LinAlgError
|
850
|
+
assert_raises(e, multivariate_normal, mean, cov)
|
851
|
+
assert_raises(e, multivariate_normal.pdf, x, mean, cov)
|
852
|
+
assert_raises(e, multivariate_normal.logpdf, x, mean, cov)
|
853
|
+
assert_raises(e, multivariate_normal.cdf, x, mean, cov)
|
854
|
+
assert_raises(e, multivariate_normal.logcdf, x, mean, cov)
|
855
|
+
|
856
|
+
# Message used to be "singular matrix", but this is more accurate.
|
857
|
+
# See gh-15508
|
858
|
+
cov = [[1., 0.], [1., 1.]]
|
859
|
+
msg = "When `allow_singular is False`, the input matrix"
|
860
|
+
with pytest.raises(np.linalg.LinAlgError, match=msg):
|
861
|
+
multivariate_normal(cov=cov)
|
862
|
+
|
863
|
+
def test_R_values(self):
|
864
|
+
# Compare the multivariate pdf with some values precomputed
|
865
|
+
# in R version 3.0.1 (2013-05-16) on Mac OS X 10.6.
|
866
|
+
|
867
|
+
# The values below were generated by the following R-script:
|
868
|
+
# > library(mnormt)
|
869
|
+
# > x <- seq(0, 2, length=5)
|
870
|
+
# > y <- 3*x - 2
|
871
|
+
# > z <- x + cos(y)
|
872
|
+
# > mu <- c(1, 3, 2)
|
873
|
+
# > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
|
874
|
+
# > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma)
|
875
|
+
r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692,
|
876
|
+
0.0103803050, 0.0140250800])
|
877
|
+
|
878
|
+
x = np.linspace(0, 2, 5)
|
879
|
+
y = 3 * x - 2
|
880
|
+
z = x + np.cos(y)
|
881
|
+
r = np.array([x, y, z]).T
|
882
|
+
|
883
|
+
mean = np.array([1, 3, 2], 'd')
|
884
|
+
cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd')
|
885
|
+
|
886
|
+
pdf = multivariate_normal.pdf(r, mean, cov)
|
887
|
+
assert_allclose(pdf, r_pdf, atol=1e-10)
|
888
|
+
|
889
|
+
# Compare the multivariate cdf with some values precomputed
|
890
|
+
# in R version 3.3.2 (2016-10-31) on Debian GNU/Linux.
|
891
|
+
|
892
|
+
# The values below were generated by the following R-script:
|
893
|
+
# > library(mnormt)
|
894
|
+
# > x <- seq(0, 2, length=5)
|
895
|
+
# > y <- 3*x - 2
|
896
|
+
# > z <- x + cos(y)
|
897
|
+
# > mu <- c(1, 3, 2)
|
898
|
+
# > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
|
899
|
+
# > r_cdf <- pmnorm(cbind(x,y,z), mu, Sigma)
|
900
|
+
r_cdf = np.array([0.0017866215, 0.0267142892, 0.0857098761,
|
901
|
+
0.1063242573, 0.2501068509])
|
902
|
+
|
903
|
+
cdf = multivariate_normal.cdf(r, mean, cov)
|
904
|
+
assert_allclose(cdf, r_cdf, atol=2e-5)
|
905
|
+
|
906
|
+
# Also test bivariate cdf with some values precomputed
|
907
|
+
# in R version 3.3.2 (2016-10-31) on Debian GNU/Linux.
|
908
|
+
|
909
|
+
# The values below were generated by the following R-script:
|
910
|
+
# > library(mnormt)
|
911
|
+
# > x <- seq(0, 2, length=5)
|
912
|
+
# > y <- 3*x - 2
|
913
|
+
# > mu <- c(1, 3)
|
914
|
+
# > Sigma <- matrix(c(1,2,2,5), 2, 2)
|
915
|
+
# > r_cdf2 <- pmnorm(cbind(x,y), mu, Sigma)
|
916
|
+
r_cdf2 = np.array([0.01262147, 0.05838989, 0.18389571,
|
917
|
+
0.40696599, 0.66470577])
|
918
|
+
|
919
|
+
r2 = np.array([x, y]).T
|
920
|
+
|
921
|
+
mean2 = np.array([1, 3], 'd')
|
922
|
+
cov2 = np.array([[1, 2], [2, 5]], 'd')
|
923
|
+
|
924
|
+
cdf2 = multivariate_normal.cdf(r2, mean2, cov2)
|
925
|
+
assert_allclose(cdf2, r_cdf2, atol=1e-5)
|
926
|
+
|
927
|
+
def test_multivariate_normal_rvs_zero_covariance(self):
|
928
|
+
mean = np.zeros(2)
|
929
|
+
covariance = np.zeros((2, 2))
|
930
|
+
model = multivariate_normal(mean, covariance, allow_singular=True)
|
931
|
+
sample = model.rvs()
|
932
|
+
assert_equal(sample, [0, 0])
|
933
|
+
|
934
|
+
def test_rvs_shape(self):
|
935
|
+
# Check that rvs parses the mean and covariance correctly, and returns
|
936
|
+
# an array of the right shape
|
937
|
+
N = 300
|
938
|
+
d = 4
|
939
|
+
sample = multivariate_normal.rvs(mean=np.zeros(d), cov=1, size=N)
|
940
|
+
assert_equal(sample.shape, (N, d))
|
941
|
+
|
942
|
+
sample = multivariate_normal.rvs(mean=None,
|
943
|
+
cov=np.array([[2, .1], [.1, 1]]),
|
944
|
+
size=N)
|
945
|
+
assert_equal(sample.shape, (N, 2))
|
946
|
+
|
947
|
+
u = multivariate_normal(mean=0, cov=1)
|
948
|
+
sample = u.rvs(N)
|
949
|
+
assert_equal(sample.shape, (N, ))
|
950
|
+
|
951
|
+
def test_large_sample(self):
|
952
|
+
# Generate large sample and compare sample mean and sample covariance
|
953
|
+
# with mean and covariance matrix.
|
954
|
+
|
955
|
+
rng = np.random.RandomState(2846)
|
956
|
+
|
957
|
+
n = 3
|
958
|
+
mean = rng.randn(n)
|
959
|
+
M = rng.randn(n, n)
|
960
|
+
cov = np.dot(M, M.T)
|
961
|
+
size = 5000
|
962
|
+
|
963
|
+
sample = multivariate_normal.rvs(mean, cov, size, random_state=rng)
|
964
|
+
|
965
|
+
assert_allclose(np.cov(sample.T), cov, rtol=1e-1)
|
966
|
+
assert_allclose(sample.mean(0), mean, rtol=1e-1)
|
967
|
+
|
968
|
+
def test_entropy(self):
|
969
|
+
rng = np.random.RandomState(2846)
|
970
|
+
|
971
|
+
n = 3
|
972
|
+
mean = rng.randn(n)
|
973
|
+
M = rng.randn(n, n)
|
974
|
+
cov = np.dot(M, M.T)
|
975
|
+
|
976
|
+
rv = multivariate_normal(mean, cov)
|
977
|
+
|
978
|
+
# Check that frozen distribution agrees with entropy function
|
979
|
+
assert_almost_equal(rv.entropy(), multivariate_normal.entropy(mean, cov))
|
980
|
+
# Compare entropy with manually computed expression involving
|
981
|
+
# the sum of the logs of the eigenvalues of the covariance matrix
|
982
|
+
eigs = np.linalg.eig(cov)[0]
|
983
|
+
desired = 1 / 2 * (n * (np.log(2 * np.pi) + 1) + np.sum(np.log(eigs)))
|
984
|
+
assert_almost_equal(desired, rv.entropy())
|
985
|
+
|
986
|
+
def test_lnB(self):
|
987
|
+
alpha = np.array([1, 1, 1])
|
988
|
+
desired = .5 # e^lnB = 1/2 for [1, 1, 1]
|
989
|
+
|
990
|
+
assert_almost_equal(np.exp(_lnB(alpha)), desired)
|
991
|
+
|
992
|
+
def test_cdf_with_lower_limit_arrays(self):
|
993
|
+
# test CDF with lower limit in several dimensions
|
994
|
+
rng = np.random.default_rng(2408071309372769818)
|
995
|
+
mean = [0, 0]
|
996
|
+
cov = np.eye(2)
|
997
|
+
a = rng.random((4, 3, 2))*6 - 3
|
998
|
+
b = rng.random((4, 3, 2))*6 - 3
|
999
|
+
|
1000
|
+
cdf1 = multivariate_normal.cdf(b, mean, cov, lower_limit=a)
|
1001
|
+
|
1002
|
+
cdf2a = multivariate_normal.cdf(b, mean, cov)
|
1003
|
+
cdf2b = multivariate_normal.cdf(a, mean, cov)
|
1004
|
+
ab1 = np.concatenate((a[..., 0:1], b[..., 1:2]), axis=-1)
|
1005
|
+
ab2 = np.concatenate((a[..., 1:2], b[..., 0:1]), axis=-1)
|
1006
|
+
cdf2ab1 = multivariate_normal.cdf(ab1, mean, cov)
|
1007
|
+
cdf2ab2 = multivariate_normal.cdf(ab2, mean, cov)
|
1008
|
+
cdf2 = cdf2a + cdf2b - cdf2ab1 - cdf2ab2
|
1009
|
+
|
1010
|
+
assert_allclose(cdf1, cdf2)
|
1011
|
+
|
1012
|
+
def test_cdf_with_lower_limit_consistency(self):
|
1013
|
+
# check that multivariate normal CDF functions are consistent
|
1014
|
+
rng = np.random.default_rng(2408071309372769818)
|
1015
|
+
mean = rng.random(3)
|
1016
|
+
cov = rng.random((3, 3))
|
1017
|
+
cov = cov @ cov.T
|
1018
|
+
a = rng.random((2, 3))*6 - 3
|
1019
|
+
b = rng.random((2, 3))*6 - 3
|
1020
|
+
|
1021
|
+
cdf1 = multivariate_normal.cdf(b, mean, cov, lower_limit=a)
|
1022
|
+
cdf2 = multivariate_normal(mean, cov).cdf(b, lower_limit=a)
|
1023
|
+
cdf3 = np.exp(multivariate_normal.logcdf(b, mean, cov, lower_limit=a))
|
1024
|
+
cdf4 = np.exp(multivariate_normal(mean, cov).logcdf(b, lower_limit=a))
|
1025
|
+
|
1026
|
+
assert_allclose(cdf2, cdf1, rtol=1e-4)
|
1027
|
+
assert_allclose(cdf3, cdf1, rtol=1e-4)
|
1028
|
+
assert_allclose(cdf4, cdf1, rtol=1e-4)
|
1029
|
+
|
1030
|
+
def test_cdf_signs(self):
|
1031
|
+
# check that sign of output is correct when np.any(lower > x)
|
1032
|
+
mean = np.zeros(3)
|
1033
|
+
cov = np.eye(3)
|
1034
|
+
b = [[1, 1, 1], [0, 0, 0], [1, 0, 1], [0, 1, 0]]
|
1035
|
+
a = [[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]]
|
1036
|
+
# when odd number of elements of b < a, output is negative
|
1037
|
+
expected_signs = np.array([1, -1, -1, 1])
|
1038
|
+
cdf = multivariate_normal.cdf(b, mean, cov, lower_limit=a)
|
1039
|
+
assert_allclose(cdf, cdf[0]*expected_signs)
|
1040
|
+
|
1041
|
+
@pytest.mark.slow
|
1042
|
+
def test_cdf_vs_cubature(self):
|
1043
|
+
ndim = 3
|
1044
|
+
rng = np.random.default_rng(123)
|
1045
|
+
a = rng.uniform(size=(ndim, ndim))
|
1046
|
+
cov = a.T @ a
|
1047
|
+
m = rng.uniform(size=ndim)
|
1048
|
+
dist = multivariate_normal(mean=m, cov=cov)
|
1049
|
+
x = rng.uniform(low=-3, high=3, size=(ndim,))
|
1050
|
+
cdf = dist.cdf(x)
|
1051
|
+
dist_i = multivariate_normal(mean=[0]*ndim, cov=cov)
|
1052
|
+
cdf_i = cubature(dist_i.pdf, [-np.inf]*ndim, x - m).estimate
|
1053
|
+
assert_allclose(cdf, cdf_i, atol=5e-6)
|
1054
|
+
|
1055
|
+
def test_cdf_known(self):
|
1056
|
+
# https://github.com/scipy/scipy/pull/17410#issuecomment-1312628547
|
1057
|
+
for ndim in range(2, 12):
|
1058
|
+
cov = np.full((ndim, ndim), 0.5)
|
1059
|
+
np.fill_diagonal(cov, 1.)
|
1060
|
+
dist = multivariate_normal([0]*ndim, cov=cov)
|
1061
|
+
assert_allclose(
|
1062
|
+
dist.cdf([0]*ndim),
|
1063
|
+
1. / (1. + ndim),
|
1064
|
+
atol=5e-5
|
1065
|
+
)
|
1066
|
+
|
1067
|
+
@pytest.mark.parametrize("ndim", range(2, 10))
|
1068
|
+
@pytest.mark.parametrize("seed", [0xdeadbeef, 0xdd24528764c9773579731c6b022b48e2])
|
1069
|
+
def test_cdf_vs_univariate(self, seed, ndim):
|
1070
|
+
rng = np.random.default_rng(seed)
|
1071
|
+
case = MVNProblem.generate_semigeneral(ndim=ndim, rng=rng)
|
1072
|
+
assert (case.low == -np.inf).all()
|
1073
|
+
|
1074
|
+
dist = multivariate_normal(mean=[0]*ndim, cov=case.covar)
|
1075
|
+
cdf_val = dist.cdf(case.high, rng=rng)
|
1076
|
+
assert_allclose(cdf_val, case.target_val, atol=5e-5)
|
1077
|
+
|
1078
|
+
@pytest.mark.parametrize("ndim", range(2, 11))
|
1079
|
+
@pytest.mark.parametrize("seed", [0xdeadbeef, 0xdd24528764c9773579731c6b022b48e2])
|
1080
|
+
def test_cdf_vs_univariate_2(self, seed, ndim):
|
1081
|
+
rng = np.random.default_rng(seed)
|
1082
|
+
case = MVNProblem.generate_constant(ndim=ndim, rng=rng)
|
1083
|
+
assert (case.low == -np.inf).all()
|
1084
|
+
|
1085
|
+
dist = multivariate_normal(mean=[0]*ndim, cov=case.covar)
|
1086
|
+
cdf_val = dist.cdf(case.high, rng=rng)
|
1087
|
+
assert_allclose(cdf_val, case.target_val, atol=5e-5)
|
1088
|
+
|
1089
|
+
@pytest.mark.parametrize("ndim", range(4, 11))
|
1090
|
+
@pytest.mark.parametrize("seed", [0xdeadbeef, 0xdd24528764c9773579731c6b022b48e4])
|
1091
|
+
def test_cdf_vs_univariate_singular(self, seed, ndim):
|
1092
|
+
# NB: ndim = 2, 3 has much poorer accuracy than ndim > 3 for many seeds.
|
1093
|
+
# No idea why.
|
1094
|
+
rng = np.random.default_rng(seed)
|
1095
|
+
case = SingularMVNProblem.generate_semiinfinite(ndim=ndim, rng=rng)
|
1096
|
+
assert (case.low == -np.inf).all()
|
1097
|
+
|
1098
|
+
dist = multivariate_normal(mean=[0]*ndim, cov=case.covar, allow_singular=True,
|
1099
|
+
# default maxpts is too slow, limit it here
|
1100
|
+
maxpts=10_000*case.covar.shape[0]
|
1101
|
+
)
|
1102
|
+
cdf_val = dist.cdf(case.high, rng=rng)
|
1103
|
+
assert_allclose(cdf_val, case.target_val, atol=1e-3)
|
1104
|
+
|
1105
|
+
def test_mean_cov(self):
|
1106
|
+
# test the interaction between a Covariance object and mean
|
1107
|
+
P = np.diag(1 / np.array([1, 2, 3]))
|
1108
|
+
cov_object = _covariance.CovViaPrecision(P)
|
1109
|
+
|
1110
|
+
message = "`cov` represents a covariance matrix in 3 dimensions..."
|
1111
|
+
with pytest.raises(ValueError, match=message):
|
1112
|
+
multivariate_normal.entropy([0, 0], cov_object)
|
1113
|
+
|
1114
|
+
with pytest.raises(ValueError, match=message):
|
1115
|
+
multivariate_normal([0, 0], cov_object)
|
1116
|
+
|
1117
|
+
x = [0.5, 0.5, 0.5]
|
1118
|
+
ref = multivariate_normal.pdf(x, [0, 0, 0], cov_object)
|
1119
|
+
assert_equal(multivariate_normal.pdf(x, cov=cov_object), ref)
|
1120
|
+
|
1121
|
+
ref = multivariate_normal.pdf(x, [1, 1, 1], cov_object)
|
1122
|
+
assert_equal(multivariate_normal.pdf(x, 1, cov=cov_object), ref)
|
1123
|
+
|
1124
|
+
def test_fit_wrong_fit_data_shape(self):
|
1125
|
+
data = [1, 3]
|
1126
|
+
error_msg = "`x` must be two-dimensional."
|
1127
|
+
with pytest.raises(ValueError, match=error_msg):
|
1128
|
+
multivariate_normal.fit(data)
|
1129
|
+
|
1130
|
+
@pytest.mark.parametrize('dim', (3, 5))
|
1131
|
+
def test_fit_correctness(self, dim):
|
1132
|
+
rng = np.random.default_rng(4385269356937404)
|
1133
|
+
x = rng.random((100, dim))
|
1134
|
+
mean_est, cov_est = multivariate_normal.fit(x)
|
1135
|
+
mean_ref, cov_ref = np.mean(x, axis=0), np.cov(x.T, ddof=0)
|
1136
|
+
assert_allclose(mean_est, mean_ref, atol=1e-15)
|
1137
|
+
assert_allclose(cov_est, cov_ref, rtol=1e-15)
|
1138
|
+
|
1139
|
+
def test_fit_both_parameters_fixed(self):
|
1140
|
+
data = np.full((2, 1), 3)
|
1141
|
+
mean_fixed = 1.
|
1142
|
+
cov_fixed = np.atleast_2d(1.)
|
1143
|
+
mean, cov = multivariate_normal.fit(data, fix_mean=mean_fixed,
|
1144
|
+
fix_cov=cov_fixed)
|
1145
|
+
assert_equal(mean, mean_fixed)
|
1146
|
+
assert_equal(cov, cov_fixed)
|
1147
|
+
|
1148
|
+
@pytest.mark.parametrize('fix_mean', [np.zeros((2, 2)),
|
1149
|
+
np.zeros((3, ))])
|
1150
|
+
def test_fit_fix_mean_input_validation(self, fix_mean):
|
1151
|
+
msg = ("`fix_mean` must be a one-dimensional array the same "
|
1152
|
+
"length as the dimensionality of the vectors `x`.")
|
1153
|
+
with pytest.raises(ValueError, match=msg):
|
1154
|
+
multivariate_normal.fit(np.eye(2), fix_mean=fix_mean)
|
1155
|
+
|
1156
|
+
@pytest.mark.parametrize('fix_cov', [np.zeros((2, )),
|
1157
|
+
np.zeros((3, 2)),
|
1158
|
+
np.zeros((4, 4))])
|
1159
|
+
def test_fit_fix_cov_input_validation_dimension(self, fix_cov):
|
1160
|
+
msg = ("`fix_cov` must be a two-dimensional square array "
|
1161
|
+
"of same side length as the dimensionality of the "
|
1162
|
+
"vectors `x`.")
|
1163
|
+
with pytest.raises(ValueError, match=msg):
|
1164
|
+
multivariate_normal.fit(np.eye(3), fix_cov=fix_cov)
|
1165
|
+
|
1166
|
+
def test_fit_fix_cov_not_positive_semidefinite(self):
|
1167
|
+
error_msg = "`fix_cov` must be symmetric positive semidefinite."
|
1168
|
+
with pytest.raises(ValueError, match=error_msg):
|
1169
|
+
fix_cov = np.array([[1., 0.], [0., -1.]])
|
1170
|
+
multivariate_normal.fit(np.eye(2), fix_cov=fix_cov)
|
1171
|
+
|
1172
|
+
def test_fit_fix_mean(self):
|
1173
|
+
rng = np.random.default_rng(4385269356937404)
|
1174
|
+
loc = rng.random(3)
|
1175
|
+
A = rng.random((3, 3))
|
1176
|
+
cov = np.dot(A, A.T)
|
1177
|
+
samples = multivariate_normal.rvs(mean=loc, cov=cov, size=100,
|
1178
|
+
random_state=rng)
|
1179
|
+
mean_free, cov_free = multivariate_normal.fit(samples)
|
1180
|
+
logp_free = multivariate_normal.logpdf(samples, mean=mean_free,
|
1181
|
+
cov=cov_free).sum()
|
1182
|
+
mean_fix, cov_fix = multivariate_normal.fit(samples, fix_mean=loc)
|
1183
|
+
assert_equal(mean_fix, loc)
|
1184
|
+
logp_fix = multivariate_normal.logpdf(samples, mean=mean_fix,
|
1185
|
+
cov=cov_fix).sum()
|
1186
|
+
# test that fixed parameters result in lower likelihood than free
|
1187
|
+
# parameters
|
1188
|
+
assert logp_fix < logp_free
|
1189
|
+
# test that a small perturbation of the resulting parameters
|
1190
|
+
# has lower likelihood than the estimated parameters
|
1191
|
+
A = rng.random((3, 3))
|
1192
|
+
m = 1e-8 * np.dot(A, A.T)
|
1193
|
+
cov_perturbed = cov_fix + m
|
1194
|
+
logp_perturbed = (multivariate_normal.logpdf(samples,
|
1195
|
+
mean=mean_fix,
|
1196
|
+
cov=cov_perturbed)
|
1197
|
+
).sum()
|
1198
|
+
assert logp_perturbed < logp_fix
|
1199
|
+
|
1200
|
+
|
1201
|
+
def test_fit_fix_cov(self):
|
1202
|
+
rng = np.random.default_rng(4385269356937404)
|
1203
|
+
loc = rng.random(3)
|
1204
|
+
A = rng.random((3, 3))
|
1205
|
+
cov = np.dot(A, A.T)
|
1206
|
+
samples = multivariate_normal.rvs(mean=loc, cov=cov,
|
1207
|
+
size=100, random_state=rng)
|
1208
|
+
mean_free, cov_free = multivariate_normal.fit(samples)
|
1209
|
+
logp_free = multivariate_normal.logpdf(samples, mean=mean_free,
|
1210
|
+
cov=cov_free).sum()
|
1211
|
+
mean_fix, cov_fix = multivariate_normal.fit(samples, fix_cov=cov)
|
1212
|
+
assert_equal(mean_fix, np.mean(samples, axis=0))
|
1213
|
+
assert_equal(cov_fix, cov)
|
1214
|
+
logp_fix = multivariate_normal.logpdf(samples, mean=mean_fix,
|
1215
|
+
cov=cov_fix).sum()
|
1216
|
+
# test that fixed parameters result in lower likelihood than free
|
1217
|
+
# parameters
|
1218
|
+
assert logp_fix < logp_free
|
1219
|
+
# test that a small perturbation of the resulting parameters
|
1220
|
+
# has lower likelihood than the estimated parameters
|
1221
|
+
mean_perturbed = mean_fix + 1e-8 * rng.random(3)
|
1222
|
+
logp_perturbed = (multivariate_normal.logpdf(samples,
|
1223
|
+
mean=mean_perturbed,
|
1224
|
+
cov=cov_fix)
|
1225
|
+
).sum()
|
1226
|
+
assert logp_perturbed < logp_fix
|
1227
|
+
|
1228
|
+
|
1229
|
+
class TestMatrixNormal:
|
1230
|
+
|
1231
|
+
def test_bad_input(self):
|
1232
|
+
# Check that bad inputs raise errors
|
1233
|
+
num_rows = 4
|
1234
|
+
num_cols = 3
|
1235
|
+
M = np.full((num_rows,num_cols), 0.3)
|
1236
|
+
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
|
1237
|
+
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
|
1238
|
+
|
1239
|
+
# Incorrect dimensions
|
1240
|
+
assert_raises(ValueError, matrix_normal, np.zeros((5,4,3)))
|
1241
|
+
assert_raises(ValueError, matrix_normal, M, np.zeros(10), V)
|
1242
|
+
assert_raises(ValueError, matrix_normal, M, U, np.zeros(10))
|
1243
|
+
assert_raises(ValueError, matrix_normal, M, U, U)
|
1244
|
+
assert_raises(ValueError, matrix_normal, M, V, V)
|
1245
|
+
assert_raises(ValueError, matrix_normal, M.T, U, V)
|
1246
|
+
|
1247
|
+
e = np.linalg.LinAlgError
|
1248
|
+
# Singular covariance for the rvs method of a non-frozen instance
|
1249
|
+
assert_raises(e, matrix_normal.rvs,
|
1250
|
+
M, U, np.ones((num_cols, num_cols)))
|
1251
|
+
assert_raises(e, matrix_normal.rvs,
|
1252
|
+
M, np.ones((num_rows, num_rows)), V)
|
1253
|
+
# Singular covariance for a frozen instance
|
1254
|
+
assert_raises(e, matrix_normal, M, U, np.ones((num_cols, num_cols)))
|
1255
|
+
assert_raises(e, matrix_normal, M, np.ones((num_rows, num_rows)), V)
|
1256
|
+
|
1257
|
+
def test_default_inputs(self):
|
1258
|
+
# Check that default argument handling works
|
1259
|
+
num_rows = 4
|
1260
|
+
num_cols = 3
|
1261
|
+
M = np.full((num_rows,num_cols), 0.3)
|
1262
|
+
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
|
1263
|
+
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
|
1264
|
+
Z = np.zeros((num_rows, num_cols))
|
1265
|
+
Zr = np.zeros((num_rows, 1))
|
1266
|
+
Zc = np.zeros((1, num_cols))
|
1267
|
+
Ir = np.identity(num_rows)
|
1268
|
+
Ic = np.identity(num_cols)
|
1269
|
+
I1 = np.identity(1)
|
1270
|
+
|
1271
|
+
assert_equal(matrix_normal.rvs(mean=M, rowcov=U, colcov=V).shape,
|
1272
|
+
(num_rows, num_cols))
|
1273
|
+
assert_equal(matrix_normal.rvs(mean=M).shape,
|
1274
|
+
(num_rows, num_cols))
|
1275
|
+
assert_equal(matrix_normal.rvs(rowcov=U).shape,
|
1276
|
+
(num_rows, 1))
|
1277
|
+
assert_equal(matrix_normal.rvs(colcov=V).shape,
|
1278
|
+
(1, num_cols))
|
1279
|
+
assert_equal(matrix_normal.rvs(mean=M, colcov=V).shape,
|
1280
|
+
(num_rows, num_cols))
|
1281
|
+
assert_equal(matrix_normal.rvs(mean=M, rowcov=U).shape,
|
1282
|
+
(num_rows, num_cols))
|
1283
|
+
assert_equal(matrix_normal.rvs(rowcov=U, colcov=V).shape,
|
1284
|
+
(num_rows, num_cols))
|
1285
|
+
|
1286
|
+
assert_equal(matrix_normal(mean=M).rowcov, Ir)
|
1287
|
+
assert_equal(matrix_normal(mean=M).colcov, Ic)
|
1288
|
+
assert_equal(matrix_normal(rowcov=U).mean, Zr)
|
1289
|
+
assert_equal(matrix_normal(rowcov=U).colcov, I1)
|
1290
|
+
assert_equal(matrix_normal(colcov=V).mean, Zc)
|
1291
|
+
assert_equal(matrix_normal(colcov=V).rowcov, I1)
|
1292
|
+
assert_equal(matrix_normal(mean=M, rowcov=U).colcov, Ic)
|
1293
|
+
assert_equal(matrix_normal(mean=M, colcov=V).rowcov, Ir)
|
1294
|
+
assert_equal(matrix_normal(rowcov=U, colcov=V).mean, Z)
|
1295
|
+
|
1296
|
+
def test_covariance_expansion(self):
|
1297
|
+
# Check that covariance can be specified with scalar or vector
|
1298
|
+
num_rows = 4
|
1299
|
+
num_cols = 3
|
1300
|
+
M = np.full((num_rows, num_cols), 0.3)
|
1301
|
+
Uv = np.full(num_rows, 0.2)
|
1302
|
+
Us = 0.2
|
1303
|
+
Vv = np.full(num_cols, 0.1)
|
1304
|
+
Vs = 0.1
|
1305
|
+
|
1306
|
+
Ir = np.identity(num_rows)
|
1307
|
+
Ic = np.identity(num_cols)
|
1308
|
+
|
1309
|
+
assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).rowcov,
|
1310
|
+
0.2*Ir)
|
1311
|
+
assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).colcov,
|
1312
|
+
0.1*Ic)
|
1313
|
+
assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).rowcov,
|
1314
|
+
0.2*Ir)
|
1315
|
+
assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).colcov,
|
1316
|
+
0.1*Ic)
|
1317
|
+
|
1318
|
+
def test_frozen_matrix_normal(self):
|
1319
|
+
for i in range(1,5):
|
1320
|
+
for j in range(1,5):
|
1321
|
+
M = np.full((i,j), 0.3)
|
1322
|
+
U = 0.5 * np.identity(i) + np.full((i,i), 0.5)
|
1323
|
+
V = 0.7 * np.identity(j) + np.full((j,j), 0.3)
|
1324
|
+
|
1325
|
+
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
|
1326
|
+
|
1327
|
+
rvs1 = frozen.rvs(random_state=1234)
|
1328
|
+
rvs2 = matrix_normal.rvs(mean=M, rowcov=U, colcov=V,
|
1329
|
+
random_state=1234)
|
1330
|
+
assert_equal(rvs1, rvs2)
|
1331
|
+
|
1332
|
+
X = frozen.rvs(random_state=1234)
|
1333
|
+
|
1334
|
+
pdf1 = frozen.pdf(X)
|
1335
|
+
pdf2 = matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
|
1336
|
+
assert_equal(pdf1, pdf2)
|
1337
|
+
|
1338
|
+
logpdf1 = frozen.logpdf(X)
|
1339
|
+
logpdf2 = matrix_normal.logpdf(X, mean=M, rowcov=U, colcov=V)
|
1340
|
+
assert_equal(logpdf1, logpdf2)
|
1341
|
+
|
1342
|
+
def test_matches_multivariate(self):
|
1343
|
+
# Check that the pdfs match those obtained by vectorising and
|
1344
|
+
# treating as a multivariate normal.
|
1345
|
+
for i in range(1,5):
|
1346
|
+
for j in range(1,5):
|
1347
|
+
M = np.full((i,j), 0.3)
|
1348
|
+
U = 0.5 * np.identity(i) + np.full((i,i), 0.5)
|
1349
|
+
V = 0.7 * np.identity(j) + np.full((j,j), 0.3)
|
1350
|
+
|
1351
|
+
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
|
1352
|
+
X = frozen.rvs(random_state=1234)
|
1353
|
+
pdf1 = frozen.pdf(X)
|
1354
|
+
logpdf1 = frozen.logpdf(X)
|
1355
|
+
entropy1 = frozen.entropy()
|
1356
|
+
|
1357
|
+
vecX = X.T.flatten()
|
1358
|
+
vecM = M.T.flatten()
|
1359
|
+
cov = np.kron(V,U)
|
1360
|
+
pdf2 = multivariate_normal.pdf(vecX, mean=vecM, cov=cov)
|
1361
|
+
logpdf2 = multivariate_normal.logpdf(vecX, mean=vecM, cov=cov)
|
1362
|
+
entropy2 = multivariate_normal.entropy(mean=vecM, cov=cov)
|
1363
|
+
|
1364
|
+
assert_allclose(pdf1, pdf2, rtol=1E-10)
|
1365
|
+
assert_allclose(logpdf1, logpdf2, rtol=1E-10)
|
1366
|
+
assert_allclose(entropy1, entropy2)
|
1367
|
+
|
1368
|
+
def test_array_input(self):
|
1369
|
+
# Check array of inputs has the same output as the separate entries.
|
1370
|
+
num_rows = 4
|
1371
|
+
num_cols = 3
|
1372
|
+
M = np.full((num_rows,num_cols), 0.3)
|
1373
|
+
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
|
1374
|
+
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
|
1375
|
+
N = 10
|
1376
|
+
|
1377
|
+
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
|
1378
|
+
X1 = frozen.rvs(size=N, random_state=1234)
|
1379
|
+
X2 = frozen.rvs(size=N, random_state=4321)
|
1380
|
+
X = np.concatenate((X1[np.newaxis,:,:,:],X2[np.newaxis,:,:,:]), axis=0)
|
1381
|
+
assert_equal(X.shape, (2, N, num_rows, num_cols))
|
1382
|
+
|
1383
|
+
array_logpdf = frozen.logpdf(X)
|
1384
|
+
assert_equal(array_logpdf.shape, (2, N))
|
1385
|
+
for i in range(2):
|
1386
|
+
for j in range(N):
|
1387
|
+
separate_logpdf = matrix_normal.logpdf(X[i,j], mean=M,
|
1388
|
+
rowcov=U, colcov=V)
|
1389
|
+
assert_allclose(separate_logpdf, array_logpdf[i,j], 1E-10)
|
1390
|
+
|
1391
|
+
def test_moments(self):
|
1392
|
+
# Check that the sample moments match the parameters
|
1393
|
+
num_rows = 4
|
1394
|
+
num_cols = 3
|
1395
|
+
M = np.full((num_rows,num_cols), 0.3)
|
1396
|
+
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
|
1397
|
+
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
|
1398
|
+
N = 1000
|
1399
|
+
|
1400
|
+
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
|
1401
|
+
X = frozen.rvs(size=N, random_state=1234)
|
1402
|
+
|
1403
|
+
sample_mean = np.mean(X,axis=0)
|
1404
|
+
assert_allclose(sample_mean, M, atol=0.1)
|
1405
|
+
|
1406
|
+
sample_colcov = np.cov(X.reshape(N*num_rows,num_cols).T)
|
1407
|
+
assert_allclose(sample_colcov, V, atol=0.1)
|
1408
|
+
|
1409
|
+
sample_rowcov = np.cov(np.swapaxes(X,1,2).reshape(
|
1410
|
+
N*num_cols,num_rows).T)
|
1411
|
+
assert_allclose(sample_rowcov, U, atol=0.1)
|
1412
|
+
|
1413
|
+
def test_samples(self):
|
1414
|
+
# Regression test to ensure that we always generate the same stream of
|
1415
|
+
# random variates.
|
1416
|
+
actual = matrix_normal.rvs(
|
1417
|
+
mean=np.array([[1, 2], [3, 4]]),
|
1418
|
+
rowcov=np.array([[4, -1], [-1, 2]]),
|
1419
|
+
colcov=np.array([[5, 1], [1, 10]]),
|
1420
|
+
random_state=np.random.default_rng(0),
|
1421
|
+
size=2
|
1422
|
+
)
|
1423
|
+
expected = np.array(
|
1424
|
+
[[[1.56228264238181, -1.24136424071189],
|
1425
|
+
[2.46865788392114, 6.22964440489445]],
|
1426
|
+
[[3.86405716144353, 10.73714311429529],
|
1427
|
+
[2.59428444080606, 5.79987854490876]]]
|
1428
|
+
)
|
1429
|
+
assert_allclose(actual, expected)
|
1430
|
+
|
1431
|
+
|
1432
|
+
class TestDirichlet:
|
1433
|
+
|
1434
|
+
def test_frozen_dirichlet(self):
|
1435
|
+
np.random.seed(2846)
|
1436
|
+
|
1437
|
+
n = np.random.randint(1, 32)
|
1438
|
+
alpha = np.random.uniform(10e-10, 100, n)
|
1439
|
+
|
1440
|
+
d = dirichlet(alpha)
|
1441
|
+
|
1442
|
+
assert_equal(d.var(), dirichlet.var(alpha))
|
1443
|
+
assert_equal(d.mean(), dirichlet.mean(alpha))
|
1444
|
+
assert_equal(d.entropy(), dirichlet.entropy(alpha))
|
1445
|
+
num_tests = 10
|
1446
|
+
for i in range(num_tests):
|
1447
|
+
x = np.random.uniform(10e-10, 100, n)
|
1448
|
+
x /= np.sum(x)
|
1449
|
+
assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha))
|
1450
|
+
assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha))
|
1451
|
+
|
1452
|
+
def test_numpy_rvs_shape_compatibility(self):
|
1453
|
+
np.random.seed(2846)
|
1454
|
+
alpha = np.array([1.0, 2.0, 3.0])
|
1455
|
+
x = np.random.dirichlet(alpha, size=7)
|
1456
|
+
assert_equal(x.shape, (7, 3))
|
1457
|
+
assert_raises(ValueError, dirichlet.pdf, x, alpha)
|
1458
|
+
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
|
1459
|
+
dirichlet.pdf(x.T, alpha)
|
1460
|
+
dirichlet.pdf(x.T[:-1], alpha)
|
1461
|
+
dirichlet.logpdf(x.T, alpha)
|
1462
|
+
dirichlet.logpdf(x.T[:-1], alpha)
|
1463
|
+
|
1464
|
+
def test_alpha_with_zeros(self):
|
1465
|
+
np.random.seed(2846)
|
1466
|
+
alpha = [1.0, 0.0, 3.0]
|
1467
|
+
# don't pass invalid alpha to np.random.dirichlet
|
1468
|
+
x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T
|
1469
|
+
assert_raises(ValueError, dirichlet.pdf, x, alpha)
|
1470
|
+
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
|
1471
|
+
|
1472
|
+
def test_alpha_with_negative_entries(self):
|
1473
|
+
np.random.seed(2846)
|
1474
|
+
alpha = [1.0, -2.0, 3.0]
|
1475
|
+
# don't pass invalid alpha to np.random.dirichlet
|
1476
|
+
x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T
|
1477
|
+
assert_raises(ValueError, dirichlet.pdf, x, alpha)
|
1478
|
+
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
|
1479
|
+
|
1480
|
+
def test_data_with_zeros(self):
|
1481
|
+
alpha = np.array([1.0, 2.0, 3.0, 4.0])
|
1482
|
+
x = np.array([0.1, 0.0, 0.2, 0.7])
|
1483
|
+
dirichlet.pdf(x, alpha)
|
1484
|
+
dirichlet.logpdf(x, alpha)
|
1485
|
+
alpha = np.array([1.0, 1.0, 1.0, 1.0])
|
1486
|
+
assert_almost_equal(dirichlet.pdf(x, alpha), 6)
|
1487
|
+
assert_almost_equal(dirichlet.logpdf(x, alpha), np.log(6))
|
1488
|
+
|
1489
|
+
def test_data_with_zeros_and_small_alpha(self):
|
1490
|
+
alpha = np.array([1.0, 0.5, 3.0, 4.0])
|
1491
|
+
x = np.array([0.1, 0.0, 0.2, 0.7])
|
1492
|
+
assert_raises(ValueError, dirichlet.pdf, x, alpha)
|
1493
|
+
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
|
1494
|
+
|
1495
|
+
def test_data_with_negative_entries(self):
|
1496
|
+
alpha = np.array([1.0, 2.0, 3.0, 4.0])
|
1497
|
+
x = np.array([0.1, -0.1, 0.3, 0.7])
|
1498
|
+
assert_raises(ValueError, dirichlet.pdf, x, alpha)
|
1499
|
+
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
|
1500
|
+
|
1501
|
+
def test_data_with_too_large_entries(self):
|
1502
|
+
alpha = np.array([1.0, 2.0, 3.0, 4.0])
|
1503
|
+
x = np.array([0.1, 1.1, 0.3, 0.7])
|
1504
|
+
assert_raises(ValueError, dirichlet.pdf, x, alpha)
|
1505
|
+
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
|
1506
|
+
|
1507
|
+
def test_data_too_deep_c(self):
|
1508
|
+
alpha = np.array([1.0, 2.0, 3.0])
|
1509
|
+
x = np.full((2, 7, 7), 1 / 14)
|
1510
|
+
assert_raises(ValueError, dirichlet.pdf, x, alpha)
|
1511
|
+
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
|
1512
|
+
|
1513
|
+
def test_alpha_too_deep(self):
|
1514
|
+
alpha = np.array([[1.0, 2.0], [3.0, 4.0]])
|
1515
|
+
x = np.full((2, 2, 7), 1 / 4)
|
1516
|
+
assert_raises(ValueError, dirichlet.pdf, x, alpha)
|
1517
|
+
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
|
1518
|
+
|
1519
|
+
def test_alpha_correct_depth(self):
|
1520
|
+
alpha = np.array([1.0, 2.0, 3.0])
|
1521
|
+
x = np.full((3, 7), 1 / 3)
|
1522
|
+
dirichlet.pdf(x, alpha)
|
1523
|
+
dirichlet.logpdf(x, alpha)
|
1524
|
+
|
1525
|
+
def test_non_simplex_data(self):
|
1526
|
+
alpha = np.array([1.0, 2.0, 3.0])
|
1527
|
+
x = np.full((3, 7), 1 / 2)
|
1528
|
+
assert_raises(ValueError, dirichlet.pdf, x, alpha)
|
1529
|
+
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
|
1530
|
+
|
1531
|
+
def test_data_vector_too_short(self):
|
1532
|
+
alpha = np.array([1.0, 2.0, 3.0, 4.0])
|
1533
|
+
x = np.full((2, 7), 1 / 2)
|
1534
|
+
assert_raises(ValueError, dirichlet.pdf, x, alpha)
|
1535
|
+
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
|
1536
|
+
|
1537
|
+
def test_data_vector_too_long(self):
|
1538
|
+
alpha = np.array([1.0, 2.0, 3.0, 4.0])
|
1539
|
+
x = np.full((5, 7), 1 / 5)
|
1540
|
+
assert_raises(ValueError, dirichlet.pdf, x, alpha)
|
1541
|
+
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
|
1542
|
+
|
1543
|
+
def test_mean_var_cov(self):
|
1544
|
+
# Reference values calculated by hand and confirmed with Mathematica, e.g.
|
1545
|
+
# `Covariance[DirichletDistribution[{ 1, 0.8, 0.2, 10^-300}]]`
|
1546
|
+
alpha = np.array([1., 0.8, 0.2])
|
1547
|
+
d = dirichlet(alpha)
|
1548
|
+
|
1549
|
+
expected_mean = [0.5, 0.4, 0.1]
|
1550
|
+
expected_var = [1. / 12., 0.08, 0.03]
|
1551
|
+
expected_cov = [
|
1552
|
+
[ 1. / 12, -1. / 15, -1. / 60],
|
1553
|
+
[-1. / 15, 2. / 25, -1. / 75],
|
1554
|
+
[-1. / 60, -1. / 75, 3. / 100],
|
1555
|
+
]
|
1556
|
+
|
1557
|
+
assert_array_almost_equal(d.mean(), expected_mean)
|
1558
|
+
assert_array_almost_equal(d.var(), expected_var)
|
1559
|
+
assert_array_almost_equal(d.cov(), expected_cov)
|
1560
|
+
|
1561
|
+
def test_scalar_values(self):
|
1562
|
+
alpha = np.array([0.2])
|
1563
|
+
d = dirichlet(alpha)
|
1564
|
+
|
1565
|
+
# For alpha of length 1, mean and var should be scalar instead of array
|
1566
|
+
assert_equal(d.mean().ndim, 0)
|
1567
|
+
assert_equal(d.var().ndim, 0)
|
1568
|
+
|
1569
|
+
assert_equal(d.pdf([1.]).ndim, 0)
|
1570
|
+
assert_equal(d.logpdf([1.]).ndim, 0)
|
1571
|
+
|
1572
|
+
def test_K_and_K_minus_1_calls_equal(self):
|
1573
|
+
# Test that calls with K and K-1 entries yield the same results.
|
1574
|
+
|
1575
|
+
np.random.seed(2846)
|
1576
|
+
|
1577
|
+
n = np.random.randint(1, 32)
|
1578
|
+
alpha = np.random.uniform(10e-10, 100, n)
|
1579
|
+
|
1580
|
+
d = dirichlet(alpha)
|
1581
|
+
num_tests = 10
|
1582
|
+
for i in range(num_tests):
|
1583
|
+
x = np.random.uniform(10e-10, 100, n)
|
1584
|
+
x /= np.sum(x)
|
1585
|
+
assert_almost_equal(d.pdf(x[:-1]), d.pdf(x))
|
1586
|
+
|
1587
|
+
def test_multiple_entry_calls(self):
|
1588
|
+
# Test that calls with multiple x vectors as matrix work
|
1589
|
+
np.random.seed(2846)
|
1590
|
+
|
1591
|
+
n = np.random.randint(1, 32)
|
1592
|
+
alpha = np.random.uniform(10e-10, 100, n)
|
1593
|
+
d = dirichlet(alpha)
|
1594
|
+
|
1595
|
+
num_tests = 10
|
1596
|
+
num_multiple = 5
|
1597
|
+
xm = None
|
1598
|
+
for i in range(num_tests):
|
1599
|
+
for m in range(num_multiple):
|
1600
|
+
x = np.random.uniform(10e-10, 100, n)
|
1601
|
+
x /= np.sum(x)
|
1602
|
+
if xm is not None:
|
1603
|
+
xm = np.vstack((xm, x))
|
1604
|
+
else:
|
1605
|
+
xm = x
|
1606
|
+
rm = d.pdf(xm.T)
|
1607
|
+
rs = None
|
1608
|
+
for xs in xm:
|
1609
|
+
r = d.pdf(xs)
|
1610
|
+
if rs is not None:
|
1611
|
+
rs = np.append(rs, r)
|
1612
|
+
else:
|
1613
|
+
rs = r
|
1614
|
+
assert_array_almost_equal(rm, rs)
|
1615
|
+
|
1616
|
+
def test_2D_dirichlet_is_beta(self):
|
1617
|
+
np.random.seed(2846)
|
1618
|
+
|
1619
|
+
alpha = np.random.uniform(10e-10, 100, 2)
|
1620
|
+
d = dirichlet(alpha)
|
1621
|
+
b = beta(alpha[0], alpha[1])
|
1622
|
+
|
1623
|
+
num_tests = 10
|
1624
|
+
for i in range(num_tests):
|
1625
|
+
x = np.random.uniform(10e-10, 100, 2)
|
1626
|
+
x /= np.sum(x)
|
1627
|
+
assert_almost_equal(b.pdf(x), d.pdf([x]))
|
1628
|
+
|
1629
|
+
assert_almost_equal(b.mean(), d.mean()[0])
|
1630
|
+
assert_almost_equal(b.var(), d.var()[0])
|
1631
|
+
|
1632
|
+
|
1633
|
+
def test_multivariate_normal_dimensions_mismatch():
|
1634
|
+
# Regression test for GH #3493. Check that setting up a PDF with a mean of
|
1635
|
+
# length M and a covariance matrix of size (N, N), where M != N, raises a
|
1636
|
+
# ValueError with an informative error message.
|
1637
|
+
mu = np.array([0.0, 0.0])
|
1638
|
+
sigma = np.array([[1.0]])
|
1639
|
+
|
1640
|
+
assert_raises(ValueError, multivariate_normal, mu, sigma)
|
1641
|
+
|
1642
|
+
# A simple check that the right error message was passed along. Checking
|
1643
|
+
# that the entire message is there, word for word, would be somewhat
|
1644
|
+
# fragile, so we just check for the leading part.
|
1645
|
+
try:
|
1646
|
+
multivariate_normal(mu, sigma)
|
1647
|
+
except ValueError as e:
|
1648
|
+
msg = "Dimension mismatch"
|
1649
|
+
assert_equal(str(e)[:len(msg)], msg)
|
1650
|
+
|
1651
|
+
|
1652
|
+
class TestWishart:
|
1653
|
+
def test_scale_dimensions(self):
|
1654
|
+
# Test that we can call the Wishart with various scale dimensions
|
1655
|
+
|
1656
|
+
# Test case: dim=1, scale=1
|
1657
|
+
true_scale = np.array(1, ndmin=2)
|
1658
|
+
scales = [
|
1659
|
+
1, # scalar
|
1660
|
+
[1], # iterable
|
1661
|
+
np.array(1), # 0-dim
|
1662
|
+
np.r_[1], # 1-dim
|
1663
|
+
np.array(1, ndmin=2) # 2-dim
|
1664
|
+
]
|
1665
|
+
for scale in scales:
|
1666
|
+
w = wishart(1, scale)
|
1667
|
+
assert_equal(w.scale, true_scale)
|
1668
|
+
assert_equal(w.scale.shape, true_scale.shape)
|
1669
|
+
|
1670
|
+
# Test case: dim=2, scale=[[1,0]
|
1671
|
+
# [0,2]
|
1672
|
+
true_scale = np.array([[1,0],
|
1673
|
+
[0,2]])
|
1674
|
+
scales = [
|
1675
|
+
[1,2], # iterable
|
1676
|
+
np.r_[1,2], # 1-dim
|
1677
|
+
np.array([[1,0], # 2-dim
|
1678
|
+
[0,2]])
|
1679
|
+
]
|
1680
|
+
for scale in scales:
|
1681
|
+
w = wishart(2, scale)
|
1682
|
+
assert_equal(w.scale, true_scale)
|
1683
|
+
assert_equal(w.scale.shape, true_scale.shape)
|
1684
|
+
|
1685
|
+
# We cannot call with a df < dim - 1
|
1686
|
+
assert_raises(ValueError, wishart, 1, np.eye(2))
|
1687
|
+
|
1688
|
+
# But we can call with dim - 1 < df < dim
|
1689
|
+
wishart(1.1, np.eye(2)) # no error
|
1690
|
+
# see gh-5562
|
1691
|
+
|
1692
|
+
# We cannot call with a 3-dimension array
|
1693
|
+
scale = np.array(1, ndmin=3)
|
1694
|
+
assert_raises(ValueError, wishart, 1, scale)
|
1695
|
+
|
1696
|
+
def test_quantile_dimensions(self):
|
1697
|
+
# Test that we can call the Wishart rvs with various quantile dimensions
|
1698
|
+
|
1699
|
+
# If dim == 1, consider x.shape = [1,1,1]
|
1700
|
+
X = [
|
1701
|
+
1, # scalar
|
1702
|
+
[1], # iterable
|
1703
|
+
np.array(1), # 0-dim
|
1704
|
+
np.r_[1], # 1-dim
|
1705
|
+
np.array(1, ndmin=2), # 2-dim
|
1706
|
+
np.array([1], ndmin=3) # 3-dim
|
1707
|
+
]
|
1708
|
+
|
1709
|
+
w = wishart(1,1)
|
1710
|
+
density = w.pdf(np.array(1, ndmin=3))
|
1711
|
+
for x in X:
|
1712
|
+
assert_equal(w.pdf(x), density)
|
1713
|
+
|
1714
|
+
# If dim == 1, consider x.shape = [1,1,*]
|
1715
|
+
X = [
|
1716
|
+
[1,2,3], # iterable
|
1717
|
+
np.r_[1,2,3], # 1-dim
|
1718
|
+
np.array([1,2,3], ndmin=3) # 3-dim
|
1719
|
+
]
|
1720
|
+
|
1721
|
+
w = wishart(1,1)
|
1722
|
+
density = w.pdf(np.array([1,2,3], ndmin=3))
|
1723
|
+
for x in X:
|
1724
|
+
assert_equal(w.pdf(x), density)
|
1725
|
+
|
1726
|
+
# If dim == 2, consider x.shape = [2,2,1]
|
1727
|
+
# where x[:,:,*] = np.eye(1)*2
|
1728
|
+
X = [
|
1729
|
+
2, # scalar
|
1730
|
+
[2,2], # iterable
|
1731
|
+
np.array(2), # 0-dim
|
1732
|
+
np.r_[2,2], # 1-dim
|
1733
|
+
np.array([[2,0],
|
1734
|
+
[0,2]]), # 2-dim
|
1735
|
+
np.array([[2,0],
|
1736
|
+
[0,2]])[:,:,np.newaxis] # 3-dim
|
1737
|
+
]
|
1738
|
+
|
1739
|
+
w = wishart(2,np.eye(2))
|
1740
|
+
density = w.pdf(np.array([[2,0],
|
1741
|
+
[0,2]])[:,:,np.newaxis])
|
1742
|
+
for x in X:
|
1743
|
+
assert_equal(w.pdf(x), density)
|
1744
|
+
|
1745
|
+
def test_frozen(self):
|
1746
|
+
# Test that the frozen and non-frozen Wishart gives the same answers
|
1747
|
+
|
1748
|
+
# Construct an arbitrary positive definite scale matrix
|
1749
|
+
dim = 4
|
1750
|
+
scale = np.diag(np.arange(dim)+1)
|
1751
|
+
scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
|
1752
|
+
scale = np.dot(scale.T, scale)
|
1753
|
+
|
1754
|
+
# Construct a collection of positive definite matrices to test the PDF
|
1755
|
+
X = []
|
1756
|
+
for i in range(5):
|
1757
|
+
x = np.diag(np.arange(dim)+(i+1)**2)
|
1758
|
+
x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
|
1759
|
+
x = np.dot(x.T, x)
|
1760
|
+
X.append(x)
|
1761
|
+
X = np.array(X).T
|
1762
|
+
|
1763
|
+
# Construct a 1D and 2D set of parameters
|
1764
|
+
parameters = [
|
1765
|
+
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
|
1766
|
+
(10, scale, X)
|
1767
|
+
]
|
1768
|
+
|
1769
|
+
for (df, scale, x) in parameters:
|
1770
|
+
w = wishart(df, scale)
|
1771
|
+
assert_equal(w.var(), wishart.var(df, scale))
|
1772
|
+
assert_equal(w.mean(), wishart.mean(df, scale))
|
1773
|
+
assert_equal(w.mode(), wishart.mode(df, scale))
|
1774
|
+
assert_equal(w.entropy(), wishart.entropy(df, scale))
|
1775
|
+
assert_equal(w.pdf(x), wishart.pdf(x, df, scale))
|
1776
|
+
|
1777
|
+
def test_wishart_2D_rvs(self):
|
1778
|
+
dim = 3
|
1779
|
+
df = 10
|
1780
|
+
|
1781
|
+
# Construct a simple non-diagonal positive definite matrix
|
1782
|
+
scale = np.eye(dim)
|
1783
|
+
scale[0,1] = 0.5
|
1784
|
+
scale[1,0] = 0.5
|
1785
|
+
|
1786
|
+
# Construct frozen Wishart random variables
|
1787
|
+
w = wishart(df, scale)
|
1788
|
+
|
1789
|
+
# Get the generated random variables from a known seed
|
1790
|
+
rng = np.random.RandomState(248042)
|
1791
|
+
w_rvs = wishart.rvs(df, scale, random_state=rng)
|
1792
|
+
rng = np.random.RandomState(248042)
|
1793
|
+
frozen_w_rvs = w.rvs(random_state=rng)
|
1794
|
+
|
1795
|
+
# Manually calculate what it should be, based on the Bartlett (1933)
|
1796
|
+
# decomposition of a Wishart into D A A' D', where D is the Cholesky
|
1797
|
+
# factorization of the scale matrix and A is the lower triangular matrix
|
1798
|
+
# with the square root of chi^2 variates on the diagonal and N(0,1)
|
1799
|
+
# variates in the lower triangle.
|
1800
|
+
rng = np.random.RandomState(248042)
|
1801
|
+
covariances = rng.normal(size=3)
|
1802
|
+
variances = np.r_[
|
1803
|
+
rng.chisquare(df),
|
1804
|
+
rng.chisquare(df-1),
|
1805
|
+
rng.chisquare(df-2),
|
1806
|
+
]**0.5
|
1807
|
+
|
1808
|
+
# Construct the lower-triangular A matrix
|
1809
|
+
A = np.diag(variances)
|
1810
|
+
A[np.tril_indices(dim, k=-1)] = covariances
|
1811
|
+
|
1812
|
+
# Wishart random variate
|
1813
|
+
D = np.linalg.cholesky(scale)
|
1814
|
+
DA = D.dot(A)
|
1815
|
+
manual_w_rvs = np.dot(DA, DA.T)
|
1816
|
+
|
1817
|
+
# Test for equality
|
1818
|
+
assert_allclose(w_rvs, manual_w_rvs)
|
1819
|
+
assert_allclose(frozen_w_rvs, manual_w_rvs)
|
1820
|
+
|
1821
|
+
def test_1D_is_chisquared(self):
|
1822
|
+
# The 1-dimensional Wishart with an identity scale matrix is just a
|
1823
|
+
# chi-squared distribution.
|
1824
|
+
# Test variance, mean, entropy, pdf
|
1825
|
+
# Kolgomorov-Smirnov test for rvs
|
1826
|
+
rng = np.random.default_rng(482974)
|
1827
|
+
|
1828
|
+
sn = 500
|
1829
|
+
dim = 1
|
1830
|
+
scale = np.eye(dim)
|
1831
|
+
|
1832
|
+
df_range = np.arange(1, 10, 2, dtype=float)
|
1833
|
+
X = np.linspace(0.1,10,num=10)
|
1834
|
+
for df in df_range:
|
1835
|
+
w = wishart(df, scale)
|
1836
|
+
c = chi2(df)
|
1837
|
+
|
1838
|
+
# Statistics
|
1839
|
+
assert_allclose(w.var(), c.var())
|
1840
|
+
assert_allclose(w.mean(), c.mean())
|
1841
|
+
assert_allclose(w.entropy(), c.entropy())
|
1842
|
+
|
1843
|
+
# PDF
|
1844
|
+
assert_allclose(w.pdf(X), c.pdf(X))
|
1845
|
+
|
1846
|
+
# rvs
|
1847
|
+
rvs = w.rvs(size=sn, random_state=rng)
|
1848
|
+
args = (df,)
|
1849
|
+
alpha = 0.01
|
1850
|
+
check_distribution_rvs('chi2', args, alpha, rvs)
|
1851
|
+
|
1852
|
+
def test_is_scaled_chisquared(self):
|
1853
|
+
# The 2-dimensional Wishart with an arbitrary scale matrix can be
|
1854
|
+
# transformed to a scaled chi-squared distribution.
|
1855
|
+
# For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have
|
1856
|
+
# :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)`
|
1857
|
+
rng = np.random.default_rng(482974)
|
1858
|
+
|
1859
|
+
sn = 500
|
1860
|
+
df = 10
|
1861
|
+
dim = 4
|
1862
|
+
# Construct an arbitrary positive definite matrix
|
1863
|
+
scale = np.diag(np.arange(4)+1)
|
1864
|
+
scale[np.tril_indices(4, k=-1)] = np.arange(6)
|
1865
|
+
scale = np.dot(scale.T, scale)
|
1866
|
+
# Use :math:`\lambda = [1, \dots, 1]'`
|
1867
|
+
lamda = np.ones((dim,1))
|
1868
|
+
sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze()
|
1869
|
+
w = wishart(df, sigma_lamda)
|
1870
|
+
c = chi2(df, scale=sigma_lamda)
|
1871
|
+
|
1872
|
+
# Statistics
|
1873
|
+
assert_allclose(w.var(), c.var())
|
1874
|
+
assert_allclose(w.mean(), c.mean())
|
1875
|
+
assert_allclose(w.entropy(), c.entropy())
|
1876
|
+
|
1877
|
+
# PDF
|
1878
|
+
X = np.linspace(0.1,10,num=10)
|
1879
|
+
assert_allclose(w.pdf(X), c.pdf(X))
|
1880
|
+
|
1881
|
+
# rvs
|
1882
|
+
rvs = w.rvs(size=sn, random_state=rng)
|
1883
|
+
args = (df,0,sigma_lamda)
|
1884
|
+
alpha = 0.01
|
1885
|
+
check_distribution_rvs('chi2', args, alpha, rvs)
|
1886
|
+
|
1887
|
+
class TestMultinomial:
|
1888
|
+
def test_logpmf(self):
|
1889
|
+
vals1 = multinomial.logpmf((3,4), 7, (0.3, 0.7))
|
1890
|
+
assert_allclose(vals1, -1.483270127243324, rtol=1e-8)
|
1891
|
+
|
1892
|
+
vals2 = multinomial.logpmf([3, 4], 0, [.3, .7])
|
1893
|
+
assert vals2 == -np.inf
|
1894
|
+
|
1895
|
+
vals3 = multinomial.logpmf([0, 0], 0, [.3, .7])
|
1896
|
+
assert vals3 == 0
|
1897
|
+
|
1898
|
+
vals4 = multinomial.logpmf([3, 4], 0, [-2, 3])
|
1899
|
+
assert_allclose(vals4, np.nan, rtol=1e-8)
|
1900
|
+
|
1901
|
+
def test_reduces_binomial(self):
|
1902
|
+
# test that the multinomial pmf reduces to the binomial pmf in the 2d
|
1903
|
+
# case
|
1904
|
+
val1 = multinomial.logpmf((3, 4), 7, (0.3, 0.7))
|
1905
|
+
val2 = binom.logpmf(3, 7, 0.3)
|
1906
|
+
assert_allclose(val1, val2, rtol=1e-8)
|
1907
|
+
|
1908
|
+
val1 = multinomial.pmf((6, 8), 14, (0.1, 0.9))
|
1909
|
+
val2 = binom.pmf(6, 14, 0.1)
|
1910
|
+
assert_allclose(val1, val2, rtol=1e-8)
|
1911
|
+
|
1912
|
+
def test_R(self):
|
1913
|
+
# test against the values produced by this R code
|
1914
|
+
# (https://stat.ethz.ch/R-manual/R-devel/library/stats/html/Multinom.html)
|
1915
|
+
# X <- t(as.matrix(expand.grid(0:3, 0:3))); X <- X[, colSums(X) <= 3]
|
1916
|
+
# X <- rbind(X, 3:3 - colSums(X)); dimnames(X) <- list(letters[1:3], NULL)
|
1917
|
+
# X
|
1918
|
+
# apply(X, 2, function(x) dmultinom(x, prob = c(1,2,5)))
|
1919
|
+
|
1920
|
+
n, p = 3, [1./8, 2./8, 5./8]
|
1921
|
+
r_vals = {(0, 0, 3): 0.244140625, (1, 0, 2): 0.146484375,
|
1922
|
+
(2, 0, 1): 0.029296875, (3, 0, 0): 0.001953125,
|
1923
|
+
(0, 1, 2): 0.292968750, (1, 1, 1): 0.117187500,
|
1924
|
+
(2, 1, 0): 0.011718750, (0, 2, 1): 0.117187500,
|
1925
|
+
(1, 2, 0): 0.023437500, (0, 3, 0): 0.015625000}
|
1926
|
+
for x in r_vals:
|
1927
|
+
assert_allclose(multinomial.pmf(x, n, p), r_vals[x], atol=1e-14)
|
1928
|
+
|
1929
|
+
@pytest.mark.parametrize("n", [0, 3])
|
1930
|
+
def test_rvs_np(self, n):
|
1931
|
+
# test that .rvs agrees w/numpy
|
1932
|
+
message = "Some rows of `p` do not sum to 1.0 within..."
|
1933
|
+
with pytest.warns(FutureWarning, match=message):
|
1934
|
+
rndm = np.random.RandomState(123)
|
1935
|
+
sc_rvs = multinomial.rvs(n, [1/4.]*3, size=7, random_state=123)
|
1936
|
+
np_rvs = rndm.multinomial(n, [1/4.]*3, size=7)
|
1937
|
+
assert_equal(sc_rvs, np_rvs)
|
1938
|
+
with pytest.warns(FutureWarning, match=message):
|
1939
|
+
rndm = np.random.RandomState(123)
|
1940
|
+
sc_rvs = multinomial.rvs(n, [1/4.]*5, size=7, random_state=123)
|
1941
|
+
np_rvs = rndm.multinomial(n, [1/4.]*5, size=7)
|
1942
|
+
assert_equal(sc_rvs, np_rvs)
|
1943
|
+
|
1944
|
+
def test_pmf(self):
|
1945
|
+
vals0 = multinomial.pmf((5,), 5, (1,))
|
1946
|
+
assert_allclose(vals0, 1, rtol=1e-8)
|
1947
|
+
|
1948
|
+
vals1 = multinomial.pmf((3,4), 7, (.3, .7))
|
1949
|
+
assert_allclose(vals1, .22689449999999994, rtol=1e-8)
|
1950
|
+
|
1951
|
+
vals2 = multinomial.pmf([[[3,5],[0,8]], [[-1, 9], [1, 1]]], 8,
|
1952
|
+
(.1, .9))
|
1953
|
+
assert_allclose(vals2, [[.03306744, .43046721], [0, 0]], rtol=1e-8)
|
1954
|
+
|
1955
|
+
x = np.empty((0,2), dtype=np.float64)
|
1956
|
+
vals3 = multinomial.pmf(x, 4, (.3, .7))
|
1957
|
+
assert_equal(vals3, np.empty([], dtype=np.float64))
|
1958
|
+
|
1959
|
+
vals4 = multinomial.pmf([1,2], 4, (.3, .7))
|
1960
|
+
assert_allclose(vals4, 0, rtol=1e-8)
|
1961
|
+
|
1962
|
+
vals5 = multinomial.pmf([3, 3, 0], 6, [2/3.0, 1/3.0, 0])
|
1963
|
+
assert_allclose(vals5, 0.219478737997, rtol=1e-8)
|
1964
|
+
|
1965
|
+
vals5 = multinomial.pmf([0, 0, 0], 0, [2/3.0, 1/3.0, 0])
|
1966
|
+
assert vals5 == 1
|
1967
|
+
|
1968
|
+
vals6 = multinomial.pmf([2, 1, 0], 0, [2/3.0, 1/3.0, 0])
|
1969
|
+
assert vals6 == 0
|
1970
|
+
|
1971
|
+
def test_pmf_broadcasting(self):
|
1972
|
+
vals0 = multinomial.pmf([1, 2], 3, [[.1, .9], [.2, .8]])
|
1973
|
+
assert_allclose(vals0, [.243, .384], rtol=1e-8)
|
1974
|
+
|
1975
|
+
vals1 = multinomial.pmf([1, 2], [3, 4], [.1, .9])
|
1976
|
+
assert_allclose(vals1, [.243, 0], rtol=1e-8)
|
1977
|
+
|
1978
|
+
vals2 = multinomial.pmf([[[1, 2], [1, 1]]], 3, [.1, .9])
|
1979
|
+
assert_allclose(vals2, [[.243, 0]], rtol=1e-8)
|
1980
|
+
|
1981
|
+
vals3 = multinomial.pmf([1, 2], [[[3], [4]]], [.1, .9])
|
1982
|
+
assert_allclose(vals3, [[[.243], [0]]], rtol=1e-8)
|
1983
|
+
|
1984
|
+
vals4 = multinomial.pmf([[1, 2], [1,1]], [[[[3]]]], [.1, .9])
|
1985
|
+
assert_allclose(vals4, [[[[.243, 0]]]], rtol=1e-8)
|
1986
|
+
|
1987
|
+
@pytest.mark.parametrize("n", [0, 5])
|
1988
|
+
def test_cov(self, n):
|
1989
|
+
cov1 = multinomial.cov(n, (.2, .3, .5))
|
1990
|
+
cov2 = [[n*.2*.8, -n*.2*.3, -n*.2*.5],
|
1991
|
+
[-n*.3*.2, n*.3*.7, -n*.3*.5],
|
1992
|
+
[-n*.5*.2, -n*.5*.3, n*.5*.5]]
|
1993
|
+
assert_allclose(cov1, cov2, rtol=1e-8)
|
1994
|
+
|
1995
|
+
def test_cov_broadcasting(self):
|
1996
|
+
cov1 = multinomial.cov(5, [[.1, .9], [.2, .8]])
|
1997
|
+
cov2 = [[[.45, -.45],[-.45, .45]], [[.8, -.8], [-.8, .8]]]
|
1998
|
+
assert_allclose(cov1, cov2, rtol=1e-8)
|
1999
|
+
|
2000
|
+
cov3 = multinomial.cov([4, 5], [.1, .9])
|
2001
|
+
cov4 = [[[.36, -.36], [-.36, .36]], [[.45, -.45], [-.45, .45]]]
|
2002
|
+
assert_allclose(cov3, cov4, rtol=1e-8)
|
2003
|
+
|
2004
|
+
cov5 = multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
|
2005
|
+
cov6 = [[[4*.3*.7, -4*.3*.7], [-4*.3*.7, 4*.3*.7]],
|
2006
|
+
[[5*.4*.6, -5*.4*.6], [-5*.4*.6, 5*.4*.6]]]
|
2007
|
+
assert_allclose(cov5, cov6, rtol=1e-8)
|
2008
|
+
|
2009
|
+
@pytest.mark.parametrize("n", [0, 2])
|
2010
|
+
def test_entropy(self, n):
|
2011
|
+
# this is equivalent to a binomial distribution with n=2, so the
|
2012
|
+
# entropy .77899774929 is easily computed "by hand"
|
2013
|
+
ent0 = multinomial.entropy(n, [.2, .8])
|
2014
|
+
assert_allclose(ent0, binom.entropy(n, .2), rtol=1e-8)
|
2015
|
+
|
2016
|
+
def test_entropy_broadcasting(self):
|
2017
|
+
ent0 = multinomial.entropy([2, 3], [.2, .8])
|
2018
|
+
assert_allclose(ent0, [binom.entropy(2, .2), binom.entropy(3, .2)],
|
2019
|
+
rtol=1e-8)
|
2020
|
+
|
2021
|
+
ent1 = multinomial.entropy([7, 8], [[.3, .7], [.4, .6]])
|
2022
|
+
assert_allclose(ent1, [binom.entropy(7, .3), binom.entropy(8, .4)],
|
2023
|
+
rtol=1e-8)
|
2024
|
+
|
2025
|
+
ent2 = multinomial.entropy([[7], [8]], [[.3, .7], [.4, .6]])
|
2026
|
+
assert_allclose(ent2,
|
2027
|
+
[[binom.entropy(7, .3), binom.entropy(7, .4)],
|
2028
|
+
[binom.entropy(8, .3), binom.entropy(8, .4)]],
|
2029
|
+
rtol=1e-8)
|
2030
|
+
|
2031
|
+
@pytest.mark.parametrize("n", [0, 5])
|
2032
|
+
def test_mean(self, n):
|
2033
|
+
mean1 = multinomial.mean(n, [.2, .8])
|
2034
|
+
assert_allclose(mean1, [n*.2, n*.8], rtol=1e-8)
|
2035
|
+
|
2036
|
+
def test_mean_broadcasting(self):
|
2037
|
+
mean1 = multinomial.mean([5, 6], [.2, .8])
|
2038
|
+
assert_allclose(mean1, [[5*.2, 5*.8], [6*.2, 6*.8]], rtol=1e-8)
|
2039
|
+
|
2040
|
+
def test_frozen(self):
|
2041
|
+
# The frozen distribution should agree with the regular one
|
2042
|
+
np.random.seed(1234)
|
2043
|
+
n = 12
|
2044
|
+
pvals = (.1, .2, .3, .4)
|
2045
|
+
x = [[0,0,0,12],[0,0,1,11],[0,1,1,10],[1,1,1,9],[1,1,2,8]]
|
2046
|
+
x = np.asarray(x, dtype=np.float64)
|
2047
|
+
mn_frozen = multinomial(n, pvals)
|
2048
|
+
assert_allclose(mn_frozen.pmf(x), multinomial.pmf(x, n, pvals))
|
2049
|
+
assert_allclose(mn_frozen.logpmf(x), multinomial.logpmf(x, n, pvals))
|
2050
|
+
assert_allclose(mn_frozen.entropy(), multinomial.entropy(n, pvals))
|
2051
|
+
|
2052
|
+
def test_gh_11860(self):
|
2053
|
+
# gh-11860 reported cases in which the adjustments made by multinomial
|
2054
|
+
# to the last element of `p` can cause `nan`s even when the input is
|
2055
|
+
# essentially valid. Check that a pathological case returns a finite,
|
2056
|
+
# nonzero result. (This would fail in main before the PR.)
|
2057
|
+
n = 88
|
2058
|
+
rng = np.random.default_rng(8879715917488330089)
|
2059
|
+
p = rng.random(n)
|
2060
|
+
p[-1] = 1e-30
|
2061
|
+
p /= np.sum(p)
|
2062
|
+
x = np.ones(n)
|
2063
|
+
logpmf = multinomial.logpmf(x, n, p)
|
2064
|
+
assert np.isfinite(logpmf)
|
2065
|
+
|
2066
|
+
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
|
2067
|
+
def test_gh_22565(self, dtype):
|
2068
|
+
# Same issue as gh-11860 above, essentially, but the original
|
2069
|
+
# fix didn't completely solve the problem.
|
2070
|
+
n = 19
|
2071
|
+
p = np.asarray([0.2, 0.2, 0.2, 0.2, 0.2], dtype=dtype)
|
2072
|
+
res1 = multinomial.pmf(x=[1, 2, 5, 7, 4], n=n, p=p)
|
2073
|
+
res2 = multinomial.pmf(x=[1, 2, 4, 5, 7], n=n, p=p)
|
2074
|
+
np.testing.assert_allclose(res1, res2, rtol=1e-15)
|
2075
|
+
|
2076
|
+
|
2077
|
+
class TestInvwishart:
|
2078
|
+
def test_frozen(self):
|
2079
|
+
# Test that the frozen and non-frozen inverse Wishart gives the same
|
2080
|
+
# answers
|
2081
|
+
|
2082
|
+
# Construct an arbitrary positive definite scale matrix
|
2083
|
+
dim = 4
|
2084
|
+
scale = np.diag(np.arange(dim)+1)
|
2085
|
+
scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
|
2086
|
+
scale = np.dot(scale.T, scale)
|
2087
|
+
|
2088
|
+
# Construct a collection of positive definite matrices to test the PDF
|
2089
|
+
X = []
|
2090
|
+
for i in range(5):
|
2091
|
+
x = np.diag(np.arange(dim)+(i+1)**2)
|
2092
|
+
x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
|
2093
|
+
x = np.dot(x.T, x)
|
2094
|
+
X.append(x)
|
2095
|
+
X = np.array(X).T
|
2096
|
+
|
2097
|
+
# Construct a 1D and 2D set of parameters
|
2098
|
+
parameters = [
|
2099
|
+
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
|
2100
|
+
(10, scale, X)
|
2101
|
+
]
|
2102
|
+
|
2103
|
+
for (df, scale, x) in parameters:
|
2104
|
+
iw = invwishart(df, scale)
|
2105
|
+
assert_equal(iw.var(), invwishart.var(df, scale))
|
2106
|
+
assert_equal(iw.mean(), invwishart.mean(df, scale))
|
2107
|
+
assert_equal(iw.mode(), invwishart.mode(df, scale))
|
2108
|
+
assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale))
|
2109
|
+
|
2110
|
+
def test_1D_is_invgamma(self):
|
2111
|
+
# The 1-dimensional inverse Wishart with an identity scale matrix is
|
2112
|
+
# just an inverse gamma distribution.
|
2113
|
+
# Test variance, mean, pdf, entropy
|
2114
|
+
# Kolgomorov-Smirnov test for rvs
|
2115
|
+
rng = np.random.RandomState(482974)
|
2116
|
+
|
2117
|
+
sn = 500
|
2118
|
+
dim = 1
|
2119
|
+
scale = np.eye(dim)
|
2120
|
+
|
2121
|
+
df_range = np.arange(5, 20, 2, dtype=float)
|
2122
|
+
X = np.linspace(0.1,10,num=10)
|
2123
|
+
for df in df_range:
|
2124
|
+
iw = invwishart(df, scale)
|
2125
|
+
ig = invgamma(df/2, scale=1./2)
|
2126
|
+
|
2127
|
+
# Statistics
|
2128
|
+
assert_allclose(iw.var(), ig.var())
|
2129
|
+
assert_allclose(iw.mean(), ig.mean())
|
2130
|
+
|
2131
|
+
# PDF
|
2132
|
+
assert_allclose(iw.pdf(X), ig.pdf(X))
|
2133
|
+
|
2134
|
+
# rvs
|
2135
|
+
rvs = iw.rvs(size=sn, random_state=rng)
|
2136
|
+
args = (df/2, 0, 1./2)
|
2137
|
+
alpha = 0.01
|
2138
|
+
check_distribution_rvs('invgamma', args, alpha, rvs)
|
2139
|
+
|
2140
|
+
# entropy
|
2141
|
+
assert_allclose(iw.entropy(), ig.entropy())
|
2142
|
+
|
2143
|
+
def test_invwishart_2D_rvs(self):
|
2144
|
+
dim = 3
|
2145
|
+
df = 10
|
2146
|
+
|
2147
|
+
# Construct a simple non-diagonal positive definite matrix
|
2148
|
+
scale = np.eye(dim)
|
2149
|
+
scale[0,1] = 0.5
|
2150
|
+
scale[1,0] = 0.5
|
2151
|
+
|
2152
|
+
# Construct frozen inverse-Wishart random variables
|
2153
|
+
iw = invwishart(df, scale)
|
2154
|
+
|
2155
|
+
# Get the generated random variables from a known seed
|
2156
|
+
rng = np.random.RandomState(608072)
|
2157
|
+
iw_rvs = invwishart.rvs(df, scale, random_state=rng)
|
2158
|
+
rng = np.random.RandomState(608072)
|
2159
|
+
frozen_iw_rvs = iw.rvs(random_state=rng)
|
2160
|
+
|
2161
|
+
# Manually calculate what it should be, based on the decomposition in
|
2162
|
+
# https://arxiv.org/abs/2310.15884 of an invers-Wishart into L L',
|
2163
|
+
# where L A = D, D is the Cholesky factorization of the scale matrix,
|
2164
|
+
# and A is the lower triangular matrix with the square root of chi^2
|
2165
|
+
# variates on the diagonal and N(0,1) variates in the lower triangle.
|
2166
|
+
# the diagonal chi^2 variates in this A are reversed compared to those
|
2167
|
+
# in the Bartlett decomposition A for Wishart rvs.
|
2168
|
+
rng = np.random.RandomState(608072)
|
2169
|
+
covariances = rng.normal(size=3)
|
2170
|
+
variances = np.r_[
|
2171
|
+
rng.chisquare(df-2),
|
2172
|
+
rng.chisquare(df-1),
|
2173
|
+
rng.chisquare(df),
|
2174
|
+
]**0.5
|
2175
|
+
|
2176
|
+
# Construct the lower-triangular A matrix
|
2177
|
+
A = np.diag(variances)
|
2178
|
+
A[np.tril_indices(dim, k=-1)] = covariances
|
2179
|
+
|
2180
|
+
# inverse-Wishart random variate
|
2181
|
+
D = np.linalg.cholesky(scale)
|
2182
|
+
L = np.linalg.solve(A.T, D.T).T
|
2183
|
+
manual_iw_rvs = np.dot(L, L.T)
|
2184
|
+
|
2185
|
+
# Test for equality
|
2186
|
+
assert_allclose(iw_rvs, manual_iw_rvs)
|
2187
|
+
assert_allclose(frozen_iw_rvs, manual_iw_rvs)
|
2188
|
+
|
2189
|
+
def test_sample_mean(self):
|
2190
|
+
"""Test that sample mean consistent with known mean."""
|
2191
|
+
# Construct an arbitrary positive definite scale matrix
|
2192
|
+
df = 10
|
2193
|
+
sample_size = 20_000
|
2194
|
+
for dim in [1, 5]:
|
2195
|
+
scale = np.diag(np.arange(dim) + 1)
|
2196
|
+
scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim - 1) / 2)
|
2197
|
+
scale = np.dot(scale.T, scale)
|
2198
|
+
|
2199
|
+
dist = invwishart(df, scale)
|
2200
|
+
Xmean_exp = dist.mean()
|
2201
|
+
Xvar_exp = dist.var()
|
2202
|
+
Xmean_std = (Xvar_exp / sample_size)**0.5 # asymptotic SE of mean estimate
|
2203
|
+
|
2204
|
+
X = dist.rvs(size=sample_size, random_state=1234)
|
2205
|
+
Xmean_est = X.mean(axis=0)
|
2206
|
+
|
2207
|
+
ntests = dim*(dim + 1)//2
|
2208
|
+
fail_rate = 0.01 / ntests # correct for multiple tests
|
2209
|
+
max_diff = norm.ppf(1 - fail_rate / 2)
|
2210
|
+
assert np.allclose(
|
2211
|
+
(Xmean_est - Xmean_exp) / Xmean_std,
|
2212
|
+
0,
|
2213
|
+
atol=max_diff,
|
2214
|
+
)
|
2215
|
+
|
2216
|
+
def test_logpdf_4x4(self):
|
2217
|
+
"""Regression test for gh-8844."""
|
2218
|
+
X = np.array([[2, 1, 0, 0.5],
|
2219
|
+
[1, 2, 0.5, 0.5],
|
2220
|
+
[0, 0.5, 3, 1],
|
2221
|
+
[0.5, 0.5, 1, 2]])
|
2222
|
+
Psi = np.array([[9, 7, 3, 1],
|
2223
|
+
[7, 9, 5, 1],
|
2224
|
+
[3, 5, 8, 2],
|
2225
|
+
[1, 1, 2, 9]])
|
2226
|
+
nu = 6
|
2227
|
+
prob = invwishart.logpdf(X, nu, Psi)
|
2228
|
+
# Explicit calculation from the formula on wikipedia.
|
2229
|
+
p = X.shape[0]
|
2230
|
+
sig, logdetX = np.linalg.slogdet(X)
|
2231
|
+
sig, logdetPsi = np.linalg.slogdet(Psi)
|
2232
|
+
M = np.linalg.solve(X, Psi)
|
2233
|
+
expected = ((nu/2)*logdetPsi
|
2234
|
+
- (nu*p/2)*np.log(2)
|
2235
|
+
- multigammaln(nu/2, p)
|
2236
|
+
- (nu + p + 1)/2*logdetX
|
2237
|
+
- 0.5*M.trace())
|
2238
|
+
assert_allclose(prob, expected)
|
2239
|
+
|
2240
|
+
|
2241
|
+
class TestSpecialOrthoGroup:
|
2242
|
+
def test_reproducibility(self):
|
2243
|
+
x = special_ortho_group.rvs(3, random_state=np.random.default_rng(514))
|
2244
|
+
expected = np.array([[-0.93200988, 0.01533561, -0.36210826],
|
2245
|
+
[0.35742128, 0.20446501, -0.91128705],
|
2246
|
+
[0.06006333, -0.97875374, -0.19604469]])
|
2247
|
+
assert_array_almost_equal(x, expected)
|
2248
|
+
|
2249
|
+
def test_invalid_dim(self):
|
2250
|
+
assert_raises(ValueError, special_ortho_group.rvs, None)
|
2251
|
+
assert_raises(ValueError, special_ortho_group.rvs, (2, 2))
|
2252
|
+
assert_raises(ValueError, special_ortho_group.rvs, -1)
|
2253
|
+
assert_raises(ValueError, special_ortho_group.rvs, 2.5)
|
2254
|
+
|
2255
|
+
def test_frozen_matrix(self):
|
2256
|
+
dim = 7
|
2257
|
+
frozen = special_ortho_group(dim)
|
2258
|
+
|
2259
|
+
rvs1 = frozen.rvs(random_state=1234)
|
2260
|
+
rvs2 = special_ortho_group.rvs(dim, random_state=1234)
|
2261
|
+
|
2262
|
+
assert_equal(rvs1, rvs2)
|
2263
|
+
|
2264
|
+
def test_det_and_ortho(self):
|
2265
|
+
xs = [special_ortho_group.rvs(dim)
|
2266
|
+
for dim in range(2,12)
|
2267
|
+
for i in range(3)]
|
2268
|
+
|
2269
|
+
# Test that determinants are always +1
|
2270
|
+
dets = [np.linalg.det(x) for x in xs]
|
2271
|
+
assert_allclose(dets, [1.]*30, rtol=1e-13)
|
2272
|
+
|
2273
|
+
# Test that these are orthogonal matrices
|
2274
|
+
for x in xs:
|
2275
|
+
assert_array_almost_equal(np.dot(x, x.T),
|
2276
|
+
np.eye(x.shape[0]))
|
2277
|
+
|
2278
|
+
def test_haar(self):
|
2279
|
+
# Test that the distribution is constant under rotation
|
2280
|
+
# Every column should have the same distribution
|
2281
|
+
# Additionally, the distribution should be invariant under another rotation
|
2282
|
+
|
2283
|
+
# Generate samples
|
2284
|
+
dim = 5
|
2285
|
+
samples = 1000 # Not too many, or the test takes too long
|
2286
|
+
ks_prob = .05
|
2287
|
+
xs = special_ortho_group.rvs(
|
2288
|
+
dim, size=samples, random_state=np.random.default_rng(513)
|
2289
|
+
)
|
2290
|
+
|
2291
|
+
# Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
|
2292
|
+
# effectively picking off entries in the matrices of xs.
|
2293
|
+
# These projections should all have the same distribution,
|
2294
|
+
# establishing rotational invariance. We use the two-sided
|
2295
|
+
# KS test to confirm this.
|
2296
|
+
# We could instead test that angles between random vectors
|
2297
|
+
# are uniformly distributed, but the below is sufficient.
|
2298
|
+
# It is not feasible to consider all pairs, so pick a few.
|
2299
|
+
els = ((0,0), (0,2), (1,4), (2,3))
|
2300
|
+
#proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
|
2301
|
+
proj = {(er, ec): sorted([x[er][ec] for x in xs]) for er, ec in els}
|
2302
|
+
pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
|
2303
|
+
ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
|
2304
|
+
assert_array_less([ks_prob]*len(pairs), ks_tests)
|
2305
|
+
|
2306
|
+
def test_one_by_one(self):
|
2307
|
+
# Test that the distribution is a delta function at the identity matrix
|
2308
|
+
# when dim=1
|
2309
|
+
assert_allclose(special_ortho_group.rvs(1, size=1000), 1, rtol=1e-13)
|
2310
|
+
|
2311
|
+
def test_zero_by_zero(self):
|
2312
|
+
assert_equal(special_ortho_group.rvs(0, size=4).shape, (4, 0, 0))
|
2313
|
+
|
2314
|
+
|
2315
|
+
class TestOrthoGroup:
|
2316
|
+
def test_reproducibility(self):
|
2317
|
+
seed = 514
|
2318
|
+
rng = np.random.RandomState(seed)
|
2319
|
+
x = ortho_group.rvs(3, random_state=rng)
|
2320
|
+
x2 = ortho_group.rvs(3, random_state=seed)
|
2321
|
+
# Note this matrix has det -1, distinguishing O(N) from SO(N)
|
2322
|
+
assert_almost_equal(np.linalg.det(x), -1)
|
2323
|
+
expected = np.array([[0.381686, -0.090374, 0.919863],
|
2324
|
+
[0.905794, -0.161537, -0.391718],
|
2325
|
+
[-0.183993, -0.98272, -0.020204]])
|
2326
|
+
assert_array_almost_equal(x, expected)
|
2327
|
+
assert_array_almost_equal(x2, expected)
|
2328
|
+
|
2329
|
+
def test_invalid_dim(self):
|
2330
|
+
assert_raises(ValueError, ortho_group.rvs, None)
|
2331
|
+
assert_raises(ValueError, ortho_group.rvs, (2, 2))
|
2332
|
+
assert_raises(ValueError, ortho_group.rvs, -1)
|
2333
|
+
assert_raises(ValueError, ortho_group.rvs, 2.5)
|
2334
|
+
|
2335
|
+
def test_frozen_matrix(self):
|
2336
|
+
dim = 7
|
2337
|
+
frozen = ortho_group(dim)
|
2338
|
+
frozen_seed = ortho_group(dim, seed=1234)
|
2339
|
+
|
2340
|
+
rvs1 = frozen.rvs(random_state=1234)
|
2341
|
+
rvs2 = ortho_group.rvs(dim, random_state=1234)
|
2342
|
+
rvs3 = frozen_seed.rvs(size=1)
|
2343
|
+
|
2344
|
+
assert_equal(rvs1, rvs2)
|
2345
|
+
assert_equal(rvs1, rvs3)
|
2346
|
+
|
2347
|
+
def test_det_and_ortho(self):
|
2348
|
+
xs = [[ortho_group.rvs(dim)
|
2349
|
+
for i in range(10)]
|
2350
|
+
for dim in range(2,12)]
|
2351
|
+
|
2352
|
+
# Test that abs determinants are always +1
|
2353
|
+
dets = np.array([[np.linalg.det(x) for x in xx] for xx in xs])
|
2354
|
+
assert_allclose(np.fabs(dets), np.ones(dets.shape), rtol=1e-13)
|
2355
|
+
|
2356
|
+
# Test that these are orthogonal matrices
|
2357
|
+
for xx in xs:
|
2358
|
+
for x in xx:
|
2359
|
+
assert_array_almost_equal(np.dot(x, x.T),
|
2360
|
+
np.eye(x.shape[0]))
|
2361
|
+
|
2362
|
+
@pytest.mark.parametrize("dim", [2, 5, 10, 20])
|
2363
|
+
def test_det_distribution_gh18272(self, dim):
|
2364
|
+
# Test that positive and negative determinants are equally likely.
|
2365
|
+
rng = np.random.default_rng(6796248956179332344)
|
2366
|
+
dist = ortho_group(dim=dim)
|
2367
|
+
rvs = dist.rvs(size=5000, random_state=rng)
|
2368
|
+
dets = scipy.linalg.det(rvs)
|
2369
|
+
k = np.sum(dets > 0)
|
2370
|
+
n = len(dets)
|
2371
|
+
res = stats.binomtest(k, n)
|
2372
|
+
low, high = res.proportion_ci(confidence_level=0.95)
|
2373
|
+
assert low < 0.5 < high
|
2374
|
+
|
2375
|
+
def test_haar(self):
|
2376
|
+
# Test that the distribution is constant under rotation
|
2377
|
+
# Every column should have the same distribution
|
2378
|
+
# Additionally, the distribution should be invariant under another rotation
|
2379
|
+
|
2380
|
+
# Generate samples
|
2381
|
+
dim = 5
|
2382
|
+
samples = 1000 # Not too many, or the test takes too long
|
2383
|
+
ks_prob = .05
|
2384
|
+
rng = np.random.RandomState(518) # Note that the test is sensitive to seed too
|
2385
|
+
xs = ortho_group.rvs(dim, size=samples, random_state=rng)
|
2386
|
+
|
2387
|
+
# Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
|
2388
|
+
# effectively picking off entries in the matrices of xs.
|
2389
|
+
# These projections should all have the same distribution,
|
2390
|
+
# establishing rotational invariance. We use the two-sided
|
2391
|
+
# KS test to confirm this.
|
2392
|
+
# We could instead test that angles between random vectors
|
2393
|
+
# are uniformly distributed, but the below is sufficient.
|
2394
|
+
# It is not feasible to consider all pairs, so pick a few.
|
2395
|
+
els = ((0,0), (0,2), (1,4), (2,3))
|
2396
|
+
#proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
|
2397
|
+
proj = {(er, ec): sorted([x[er][ec] for x in xs]) for er, ec in els}
|
2398
|
+
pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
|
2399
|
+
ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
|
2400
|
+
assert_array_less([ks_prob]*len(pairs), ks_tests)
|
2401
|
+
|
2402
|
+
def test_one_by_one(self):
|
2403
|
+
# Test that the 1x1 distribution gives ±1 with equal probability.
|
2404
|
+
dim = 1
|
2405
|
+
xs = ortho_group.rvs(dim, size=5000, random_state=np.random.default_rng(514))
|
2406
|
+
assert_allclose(np.abs(xs), 1, rtol=1e-13)
|
2407
|
+
k = np.sum(xs > 0)
|
2408
|
+
n = len(xs)
|
2409
|
+
res = stats.binomtest(k, n)
|
2410
|
+
low, high = res.proportion_ci(confidence_level=0.95)
|
2411
|
+
assert low < 0.5 < high
|
2412
|
+
|
2413
|
+
def test_zero_by_zero(self):
|
2414
|
+
assert_equal(special_ortho_group.rvs(0, size=4).shape, (4, 0, 0))
|
2415
|
+
|
2416
|
+
@pytest.mark.slow
|
2417
|
+
def test_pairwise_distances(self):
|
2418
|
+
# Test that the distribution of pairwise distances is close to correct.
|
2419
|
+
rng = np.random.RandomState(514)
|
2420
|
+
|
2421
|
+
def random_ortho(dim, random_state=None):
|
2422
|
+
u, _s, v = np.linalg.svd(rng.normal(size=(dim, dim)))
|
2423
|
+
return np.dot(u, v)
|
2424
|
+
|
2425
|
+
for dim in range(2, 6):
|
2426
|
+
def generate_test_statistics(rvs, N=1000, eps=1e-10):
|
2427
|
+
stats = np.array([
|
2428
|
+
np.sum((rvs(dim=dim, random_state=rng) -
|
2429
|
+
rvs(dim=dim, random_state=rng))**2)
|
2430
|
+
for _ in range(N)
|
2431
|
+
])
|
2432
|
+
# Add a bit of noise to account for numeric accuracy.
|
2433
|
+
stats += np.random.uniform(-eps, eps, size=stats.shape)
|
2434
|
+
return stats
|
2435
|
+
|
2436
|
+
expected = generate_test_statistics(random_ortho)
|
2437
|
+
actual = generate_test_statistics(scipy.stats.ortho_group.rvs)
|
2438
|
+
|
2439
|
+
_D, p = scipy.stats.ks_2samp(expected, actual)
|
2440
|
+
|
2441
|
+
assert_array_less(.05, p)
|
2442
|
+
|
2443
|
+
|
2444
|
+
class TestRandomCorrelation:
|
2445
|
+
def test_reproducibility(self):
|
2446
|
+
rng = np.random.RandomState(514)
|
2447
|
+
eigs = (.5, .8, 1.2, 1.5)
|
2448
|
+
x = random_correlation.rvs(eigs, random_state=rng)
|
2449
|
+
x2 = random_correlation.rvs(eigs, random_state=514)
|
2450
|
+
expected = np.array([[1., -0.184851, 0.109017, -0.227494],
|
2451
|
+
[-0.184851, 1., 0.231236, 0.326669],
|
2452
|
+
[0.109017, 0.231236, 1., -0.178912],
|
2453
|
+
[-0.227494, 0.326669, -0.178912, 1.]])
|
2454
|
+
assert_array_almost_equal(x, expected)
|
2455
|
+
assert_array_almost_equal(x2, expected)
|
2456
|
+
|
2457
|
+
def test_invalid_eigs(self):
|
2458
|
+
assert_raises(ValueError, random_correlation.rvs, None)
|
2459
|
+
assert_raises(ValueError, random_correlation.rvs, 'test')
|
2460
|
+
assert_raises(ValueError, random_correlation.rvs, 2.5)
|
2461
|
+
assert_raises(ValueError, random_correlation.rvs, [2.5])
|
2462
|
+
assert_raises(ValueError, random_correlation.rvs, [[1,2],[3,4]])
|
2463
|
+
assert_raises(ValueError, random_correlation.rvs, [2.5, -.5])
|
2464
|
+
assert_raises(ValueError, random_correlation.rvs, [1, 2, .1])
|
2465
|
+
|
2466
|
+
def test_frozen_matrix(self):
|
2467
|
+
eigs = (.5, .8, 1.2, 1.5)
|
2468
|
+
frozen = random_correlation(eigs)
|
2469
|
+
frozen_seed = random_correlation(eigs, seed=514)
|
2470
|
+
|
2471
|
+
rvs1 = random_correlation.rvs(eigs, random_state=514)
|
2472
|
+
rvs2 = frozen.rvs(random_state=514)
|
2473
|
+
rvs3 = frozen_seed.rvs()
|
2474
|
+
|
2475
|
+
assert_equal(rvs1, rvs2)
|
2476
|
+
assert_equal(rvs1, rvs3)
|
2477
|
+
|
2478
|
+
def test_definition(self):
|
2479
|
+
# Test the definition of a correlation matrix in several dimensions:
|
2480
|
+
#
|
2481
|
+
# 1. Det is product of eigenvalues (and positive by construction
|
2482
|
+
# in examples)
|
2483
|
+
# 2. 1's on diagonal
|
2484
|
+
# 3. Matrix is symmetric
|
2485
|
+
|
2486
|
+
def norm(i, e):
|
2487
|
+
return i*e/sum(e)
|
2488
|
+
|
2489
|
+
rng = np.random.RandomState(123)
|
2490
|
+
|
2491
|
+
eigs = [norm(i, rng.uniform(size=i)) for i in range(2, 6)]
|
2492
|
+
eigs.append([4,0,0,0])
|
2493
|
+
|
2494
|
+
ones = [[1.]*len(e) for e in eigs]
|
2495
|
+
xs = [random_correlation.rvs(e, random_state=rng) for e in eigs]
|
2496
|
+
|
2497
|
+
# Test that determinants are products of eigenvalues
|
2498
|
+
# These are positive by construction
|
2499
|
+
# Could also test that the eigenvalues themselves are correct,
|
2500
|
+
# but this seems sufficient.
|
2501
|
+
dets = [np.fabs(np.linalg.det(x)) for x in xs]
|
2502
|
+
dets_known = [np.prod(e) for e in eigs]
|
2503
|
+
assert_allclose(dets, dets_known, rtol=1e-13, atol=1e-13)
|
2504
|
+
|
2505
|
+
# Test for 1's on the diagonal
|
2506
|
+
diags = [np.diag(x) for x in xs]
|
2507
|
+
for a, b in zip(diags, ones):
|
2508
|
+
assert_allclose(a, b, rtol=1e-13)
|
2509
|
+
|
2510
|
+
# Correlation matrices are symmetric
|
2511
|
+
for x in xs:
|
2512
|
+
assert_allclose(x, x.T, rtol=1e-13)
|
2513
|
+
|
2514
|
+
def test_to_corr(self):
|
2515
|
+
# Check some corner cases in to_corr
|
2516
|
+
|
2517
|
+
# ajj == 1
|
2518
|
+
m = np.array([[0.1, 0], [0, 1]], dtype=float)
|
2519
|
+
m = random_correlation._to_corr(m)
|
2520
|
+
assert_allclose(m, np.array([[1, 0], [0, 0.1]]))
|
2521
|
+
|
2522
|
+
# Floating point overflow; fails to compute the correct
|
2523
|
+
# rotation, but should still produce some valid rotation
|
2524
|
+
# rather than infs/nans
|
2525
|
+
with np.errstate(over='ignore'):
|
2526
|
+
g = np.array([[0, 1], [-1, 0]])
|
2527
|
+
|
2528
|
+
m0 = np.array([[1e300, 0], [0, np.nextafter(1, 0)]], dtype=float)
|
2529
|
+
m = random_correlation._to_corr(m0.copy())
|
2530
|
+
assert_allclose(m, g.T.dot(m0).dot(g))
|
2531
|
+
|
2532
|
+
m0 = np.array([[0.9, 1e300], [1e300, 1.1]], dtype=float)
|
2533
|
+
m = random_correlation._to_corr(m0.copy())
|
2534
|
+
assert_allclose(m, g.T.dot(m0).dot(g))
|
2535
|
+
|
2536
|
+
# Zero discriminant; should set the first diag entry to 1
|
2537
|
+
m0 = np.array([[2, 1], [1, 2]], dtype=float)
|
2538
|
+
m = random_correlation._to_corr(m0.copy())
|
2539
|
+
assert_allclose(m[0,0], 1)
|
2540
|
+
|
2541
|
+
# Slightly negative discriminant; should be approx correct still
|
2542
|
+
m0 = np.array([[2 + 1e-7, 1], [1, 2]], dtype=float)
|
2543
|
+
m = random_correlation._to_corr(m0.copy())
|
2544
|
+
assert_allclose(m[0,0], 1)
|
2545
|
+
|
2546
|
+
|
2547
|
+
class TestUniformDirection:
|
2548
|
+
@pytest.mark.parametrize("dim", [1, 3])
|
2549
|
+
@pytest.mark.parametrize("size", [None, 1, 5, (5, 4)])
|
2550
|
+
def test_samples(self, dim, size):
|
2551
|
+
# test that samples have correct shape and norm 1
|
2552
|
+
rng = np.random.default_rng(2777937887058094419)
|
2553
|
+
uniform_direction_dist = uniform_direction(dim, seed=rng)
|
2554
|
+
samples = uniform_direction_dist.rvs(size)
|
2555
|
+
mean, cov = np.zeros(dim), np.eye(dim)
|
2556
|
+
expected_shape = rng.multivariate_normal(mean, cov, size=size).shape
|
2557
|
+
assert samples.shape == expected_shape
|
2558
|
+
norms = np.linalg.norm(samples, axis=-1)
|
2559
|
+
assert_allclose(norms, 1.)
|
2560
|
+
|
2561
|
+
@pytest.mark.parametrize("dim", [None, 0, (2, 2), 2.5])
|
2562
|
+
def test_invalid_dim(self, dim):
|
2563
|
+
message = ("Dimension of vector must be specified, "
|
2564
|
+
"and must be an integer greater than 0.")
|
2565
|
+
with pytest.raises(ValueError, match=message):
|
2566
|
+
uniform_direction.rvs(dim)
|
2567
|
+
|
2568
|
+
def test_frozen_distribution(self):
|
2569
|
+
dim = 5
|
2570
|
+
frozen = uniform_direction(dim)
|
2571
|
+
frozen_seed = uniform_direction(dim, seed=514)
|
2572
|
+
|
2573
|
+
rvs1 = frozen.rvs(random_state=514)
|
2574
|
+
rvs2 = uniform_direction.rvs(dim, random_state=514)
|
2575
|
+
rvs3 = frozen_seed.rvs()
|
2576
|
+
|
2577
|
+
assert_equal(rvs1, rvs2)
|
2578
|
+
assert_equal(rvs1, rvs3)
|
2579
|
+
|
2580
|
+
@pytest.mark.parametrize("dim", [2, 5, 8])
|
2581
|
+
def test_uniform(self, dim):
|
2582
|
+
rng = np.random.default_rng(1036978481269651776)
|
2583
|
+
spherical_dist = uniform_direction(dim, seed=rng)
|
2584
|
+
# generate random, orthogonal vectors
|
2585
|
+
v1, v2 = spherical_dist.rvs(size=2)
|
2586
|
+
v2 -= v1 @ v2 * v1
|
2587
|
+
v2 /= np.linalg.norm(v2)
|
2588
|
+
assert_allclose(v1 @ v2, 0, atol=1e-14) # orthogonal
|
2589
|
+
# generate data and project onto orthogonal vectors
|
2590
|
+
samples = spherical_dist.rvs(size=10000)
|
2591
|
+
s1 = samples @ v1
|
2592
|
+
s2 = samples @ v2
|
2593
|
+
angles = np.arctan2(s1, s2)
|
2594
|
+
# test that angles follow a uniform distribution
|
2595
|
+
# normalize angles to range [0, 1]
|
2596
|
+
angles += np.pi
|
2597
|
+
angles /= 2*np.pi
|
2598
|
+
# perform KS test
|
2599
|
+
uniform_dist = uniform()
|
2600
|
+
kstest_result = kstest(angles, uniform_dist.cdf)
|
2601
|
+
assert kstest_result.pvalue > 0.05
|
2602
|
+
|
2603
|
+
|
2604
|
+
class TestUnitaryGroup:
|
2605
|
+
def test_reproducibility(self):
|
2606
|
+
rng = np.random.RandomState(514)
|
2607
|
+
x = unitary_group.rvs(3, random_state=rng)
|
2608
|
+
x2 = unitary_group.rvs(3, random_state=514)
|
2609
|
+
|
2610
|
+
expected = np.array(
|
2611
|
+
[[0.308771+0.360312j, 0.044021+0.622082j, 0.160327+0.600173j],
|
2612
|
+
[0.732757+0.297107j, 0.076692-0.4614j, -0.394349+0.022613j],
|
2613
|
+
[-0.148844+0.357037j, -0.284602-0.557949j, 0.607051+0.299257j]]
|
2614
|
+
)
|
2615
|
+
|
2616
|
+
assert_array_almost_equal(x, expected)
|
2617
|
+
assert_array_almost_equal(x2, expected)
|
2618
|
+
|
2619
|
+
def test_invalid_dim(self):
|
2620
|
+
assert_raises(ValueError, unitary_group.rvs, None)
|
2621
|
+
assert_raises(ValueError, unitary_group.rvs, (2, 2))
|
2622
|
+
assert_raises(ValueError, unitary_group.rvs, -1)
|
2623
|
+
assert_raises(ValueError, unitary_group.rvs, 2.5)
|
2624
|
+
|
2625
|
+
def test_frozen_matrix(self):
|
2626
|
+
dim = 7
|
2627
|
+
frozen = unitary_group(dim)
|
2628
|
+
frozen_seed = unitary_group(dim, seed=514)
|
2629
|
+
|
2630
|
+
rvs1 = frozen.rvs(random_state=514)
|
2631
|
+
rvs2 = unitary_group.rvs(dim, random_state=514)
|
2632
|
+
rvs3 = frozen_seed.rvs(size=1)
|
2633
|
+
|
2634
|
+
assert_equal(rvs1, rvs2)
|
2635
|
+
assert_equal(rvs1, rvs3)
|
2636
|
+
|
2637
|
+
def test_unitarity(self):
|
2638
|
+
xs = [unitary_group.rvs(dim)
|
2639
|
+
for dim in range(2,12)
|
2640
|
+
for i in range(3)]
|
2641
|
+
|
2642
|
+
# Test that these are unitary matrices
|
2643
|
+
for x in xs:
|
2644
|
+
assert_allclose(np.dot(x, x.conj().T), np.eye(x.shape[0]), atol=1e-15)
|
2645
|
+
|
2646
|
+
def test_haar(self):
|
2647
|
+
# Test that the eigenvalues, which lie on the unit circle in
|
2648
|
+
# the complex plane, are uncorrelated.
|
2649
|
+
|
2650
|
+
# Generate samples
|
2651
|
+
for dim in (1, 5):
|
2652
|
+
samples = 1000 # Not too many, or the test takes too long
|
2653
|
+
# Note that the test is sensitive to seed too
|
2654
|
+
xs = unitary_group.rvs(
|
2655
|
+
dim, size=samples, random_state=np.random.default_rng(514)
|
2656
|
+
)
|
2657
|
+
|
2658
|
+
# The angles "x" of the eigenvalues should be uniformly distributed
|
2659
|
+
# Overall this seems to be a necessary but weak test of the distribution.
|
2660
|
+
eigs = np.vstack([scipy.linalg.eigvals(x) for x in xs])
|
2661
|
+
x = np.arctan2(eigs.imag, eigs.real)
|
2662
|
+
res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf)
|
2663
|
+
assert_(res.pvalue > 0.05)
|
2664
|
+
|
2665
|
+
def test_zero_by_zero(self):
|
2666
|
+
assert_equal(unitary_group.rvs(0, size=4).shape, (4, 0, 0))
|
2667
|
+
|
2668
|
+
|
2669
|
+
class TestMultivariateT:
|
2670
|
+
|
2671
|
+
# These tests were created by running vpa(mvtpdf(...)) in MATLAB. The
|
2672
|
+
# function takes no `mu` parameter. The tests were run as
|
2673
|
+
#
|
2674
|
+
# >> ans = vpa(mvtpdf(x - mu, shape, df));
|
2675
|
+
#
|
2676
|
+
PDF_TESTS = [(
|
2677
|
+
# x
|
2678
|
+
[
|
2679
|
+
[1, 2],
|
2680
|
+
[4, 1],
|
2681
|
+
[2, 1],
|
2682
|
+
[2, 4],
|
2683
|
+
[1, 4],
|
2684
|
+
[4, 1],
|
2685
|
+
[3, 2],
|
2686
|
+
[3, 3],
|
2687
|
+
[4, 4],
|
2688
|
+
[5, 1],
|
2689
|
+
],
|
2690
|
+
# loc
|
2691
|
+
[0, 0],
|
2692
|
+
# shape
|
2693
|
+
[
|
2694
|
+
[1, 0],
|
2695
|
+
[0, 1]
|
2696
|
+
],
|
2697
|
+
# df
|
2698
|
+
4,
|
2699
|
+
# ans
|
2700
|
+
[
|
2701
|
+
0.013972450422333741737457302178882,
|
2702
|
+
0.0010998721906793330026219646100571,
|
2703
|
+
0.013972450422333741737457302178882,
|
2704
|
+
0.00073682844024025606101402363634634,
|
2705
|
+
0.0010998721906793330026219646100571,
|
2706
|
+
0.0010998721906793330026219646100571,
|
2707
|
+
0.0020732579600816823488240725481546,
|
2708
|
+
0.00095660371505271429414668515889275,
|
2709
|
+
0.00021831953784896498569831346792114,
|
2710
|
+
0.00037725616140301147447000396084604
|
2711
|
+
]
|
2712
|
+
|
2713
|
+
), (
|
2714
|
+
# x
|
2715
|
+
[
|
2716
|
+
[0.9718, 0.1298, 0.8134],
|
2717
|
+
[0.4922, 0.5522, 0.7185],
|
2718
|
+
[0.3010, 0.1491, 0.5008],
|
2719
|
+
[0.5971, 0.2585, 0.8940],
|
2720
|
+
[0.5434, 0.5287, 0.9507],
|
2721
|
+
],
|
2722
|
+
# loc
|
2723
|
+
[-1, 1, 50],
|
2724
|
+
# shape
|
2725
|
+
[
|
2726
|
+
[1.0000, 0.5000, 0.2500],
|
2727
|
+
[0.5000, 1.0000, -0.1000],
|
2728
|
+
[0.2500, -0.1000, 1.0000],
|
2729
|
+
],
|
2730
|
+
# df
|
2731
|
+
8,
|
2732
|
+
# ans
|
2733
|
+
[
|
2734
|
+
0.00000000000000069609279697467772867405511133763,
|
2735
|
+
0.00000000000000073700739052207366474839369535934,
|
2736
|
+
0.00000000000000069522909962669171512174435447027,
|
2737
|
+
0.00000000000000074212293557998314091880208889767,
|
2738
|
+
0.00000000000000077039675154022118593323030449058,
|
2739
|
+
]
|
2740
|
+
)]
|
2741
|
+
|
2742
|
+
@pytest.mark.parametrize("x, loc, shape, df, ans", PDF_TESTS)
|
2743
|
+
def test_pdf_correctness(self, x, loc, shape, df, ans):
|
2744
|
+
dist = multivariate_t(loc, shape, df, seed=0)
|
2745
|
+
val = dist.pdf(x)
|
2746
|
+
assert_array_almost_equal(val, ans)
|
2747
|
+
|
2748
|
+
@pytest.mark.parametrize("x, loc, shape, df, ans", PDF_TESTS)
|
2749
|
+
def test_logpdf_correct(self, x, loc, shape, df, ans):
|
2750
|
+
dist = multivariate_t(loc, shape, df, seed=0)
|
2751
|
+
val1 = dist.pdf(x)
|
2752
|
+
val2 = dist.logpdf(x)
|
2753
|
+
assert_array_almost_equal(np.log(val1), val2)
|
2754
|
+
|
2755
|
+
# https://github.com/scipy/scipy/issues/10042#issuecomment-576795195
|
2756
|
+
def test_mvt_with_df_one_is_cauchy(self):
|
2757
|
+
x = [9, 7, 4, 1, -3, 9, 0, -3, -1, 3]
|
2758
|
+
val = multivariate_t.pdf(x, df=1)
|
2759
|
+
ans = cauchy.pdf(x)
|
2760
|
+
assert_array_almost_equal(val, ans)
|
2761
|
+
|
2762
|
+
def test_mvt_with_high_df_is_approx_normal(self):
|
2763
|
+
# `normaltest` returns the chi-squared statistic and the associated
|
2764
|
+
# p-value. The null hypothesis is that `x` came from a normal
|
2765
|
+
# distribution, so a low p-value represents rejecting the null, i.e.
|
2766
|
+
# that it is unlikely that `x` came a normal distribution.
|
2767
|
+
P_VAL_MIN = 0.1
|
2768
|
+
|
2769
|
+
dist = multivariate_t(0, 1, df=100000, seed=1)
|
2770
|
+
samples = dist.rvs(size=100000)
|
2771
|
+
_, p = normaltest(samples)
|
2772
|
+
assert (p > P_VAL_MIN)
|
2773
|
+
|
2774
|
+
dist = multivariate_t([-2, 3], [[10, -1], [-1, 10]], df=100000,
|
2775
|
+
seed=42)
|
2776
|
+
samples = dist.rvs(size=100000)
|
2777
|
+
_, p = normaltest(samples)
|
2778
|
+
assert ((p > P_VAL_MIN).all())
|
2779
|
+
|
2780
|
+
@pytest.mark.thread_unsafe
|
2781
|
+
@patch('scipy.stats.multivariate_normal._logpdf')
|
2782
|
+
def test_mvt_with_inf_df_calls_normal(self, mock):
|
2783
|
+
dist = multivariate_t(0, 1, df=np.inf, seed=7)
|
2784
|
+
assert isinstance(dist, multivariate_normal_frozen)
|
2785
|
+
multivariate_t.pdf(0, df=np.inf)
|
2786
|
+
assert mock.call_count == 1
|
2787
|
+
multivariate_t.logpdf(0, df=np.inf)
|
2788
|
+
assert mock.call_count == 2
|
2789
|
+
|
2790
|
+
def test_shape_correctness(self):
|
2791
|
+
# pdf and logpdf should return scalar when the
|
2792
|
+
# number of samples in x is one.
|
2793
|
+
dim = 4
|
2794
|
+
loc = np.zeros(dim)
|
2795
|
+
shape = np.eye(dim)
|
2796
|
+
df = 4.5
|
2797
|
+
x = np.zeros(dim)
|
2798
|
+
res = multivariate_t(loc, shape, df).pdf(x)
|
2799
|
+
assert np.isscalar(res)
|
2800
|
+
res = multivariate_t(loc, shape, df).logpdf(x)
|
2801
|
+
assert np.isscalar(res)
|
2802
|
+
|
2803
|
+
# pdf() and logpdf() should return probabilities of shape
|
2804
|
+
# (n_samples,) when x has n_samples.
|
2805
|
+
n_samples = 7
|
2806
|
+
x = np.random.random((n_samples, dim))
|
2807
|
+
res = multivariate_t(loc, shape, df).pdf(x)
|
2808
|
+
assert (res.shape == (n_samples,))
|
2809
|
+
res = multivariate_t(loc, shape, df).logpdf(x)
|
2810
|
+
assert (res.shape == (n_samples,))
|
2811
|
+
|
2812
|
+
# rvs() should return scalar unless a size argument is applied.
|
2813
|
+
res = multivariate_t(np.zeros(1), np.eye(1), 1).rvs()
|
2814
|
+
assert np.isscalar(res)
|
2815
|
+
|
2816
|
+
# rvs() should return vector of shape (size,) if size argument
|
2817
|
+
# is applied.
|
2818
|
+
size = 7
|
2819
|
+
res = multivariate_t(np.zeros(1), np.eye(1), 1).rvs(size=size)
|
2820
|
+
assert (res.shape == (size,))
|
2821
|
+
|
2822
|
+
def test_default_arguments(self):
|
2823
|
+
dist = multivariate_t()
|
2824
|
+
assert_equal(dist.loc, [0])
|
2825
|
+
assert_equal(dist.shape, [[1]])
|
2826
|
+
assert (dist.df == 1)
|
2827
|
+
|
2828
|
+
DEFAULT_ARGS_TESTS = [
|
2829
|
+
(None, None, None, 0, 1, 1),
|
2830
|
+
(None, None, 7, 0, 1, 7),
|
2831
|
+
(None, [[7, 0], [0, 7]], None, [0, 0], [[7, 0], [0, 7]], 1),
|
2832
|
+
(None, [[7, 0], [0, 7]], 7, [0, 0], [[7, 0], [0, 7]], 7),
|
2833
|
+
([7, 7], None, None, [7, 7], [[1, 0], [0, 1]], 1),
|
2834
|
+
([7, 7], None, 7, [7, 7], [[1, 0], [0, 1]], 7),
|
2835
|
+
([7, 7], [[7, 0], [0, 7]], None, [7, 7], [[7, 0], [0, 7]], 1),
|
2836
|
+
([7, 7], [[7, 0], [0, 7]], 7, [7, 7], [[7, 0], [0, 7]], 7)
|
2837
|
+
]
|
2838
|
+
|
2839
|
+
@pytest.mark.parametrize("loc, shape, df, loc_ans, shape_ans, df_ans",
|
2840
|
+
DEFAULT_ARGS_TESTS)
|
2841
|
+
def test_default_args(self, loc, shape, df, loc_ans, shape_ans, df_ans):
|
2842
|
+
dist = multivariate_t(loc=loc, shape=shape, df=df)
|
2843
|
+
assert_equal(dist.loc, loc_ans)
|
2844
|
+
assert_equal(dist.shape, shape_ans)
|
2845
|
+
assert (dist.df == df_ans)
|
2846
|
+
|
2847
|
+
ARGS_SHAPES_TESTS = [
|
2848
|
+
(-1, 2, 3, [-1], [[2]], 3),
|
2849
|
+
([-1], [2], 3, [-1], [[2]], 3),
|
2850
|
+
(np.array([-1]), np.array([2]), 3, [-1], [[2]], 3)
|
2851
|
+
]
|
2852
|
+
|
2853
|
+
@pytest.mark.parametrize("loc, shape, df, loc_ans, shape_ans, df_ans",
|
2854
|
+
ARGS_SHAPES_TESTS)
|
2855
|
+
def test_scalar_list_and_ndarray_arguments(self, loc, shape, df, loc_ans,
|
2856
|
+
shape_ans, df_ans):
|
2857
|
+
dist = multivariate_t(loc, shape, df)
|
2858
|
+
assert_equal(dist.loc, loc_ans)
|
2859
|
+
assert_equal(dist.shape, shape_ans)
|
2860
|
+
assert_equal(dist.df, df_ans)
|
2861
|
+
|
2862
|
+
def test_argument_error_handling(self):
|
2863
|
+
# `loc` should be a one-dimensional vector.
|
2864
|
+
loc = [[1, 1]]
|
2865
|
+
assert_raises(ValueError,
|
2866
|
+
multivariate_t,
|
2867
|
+
**dict(loc=loc))
|
2868
|
+
|
2869
|
+
# `shape` should be scalar or square matrix.
|
2870
|
+
shape = [[1, 1], [2, 2], [3, 3]]
|
2871
|
+
assert_raises(ValueError,
|
2872
|
+
multivariate_t,
|
2873
|
+
**dict(loc=loc, shape=shape))
|
2874
|
+
|
2875
|
+
# `df` should be greater than zero.
|
2876
|
+
loc = np.zeros(2)
|
2877
|
+
shape = np.eye(2)
|
2878
|
+
df = -1
|
2879
|
+
assert_raises(ValueError,
|
2880
|
+
multivariate_t,
|
2881
|
+
**dict(loc=loc, shape=shape, df=df))
|
2882
|
+
df = 0
|
2883
|
+
assert_raises(ValueError,
|
2884
|
+
multivariate_t,
|
2885
|
+
**dict(loc=loc, shape=shape, df=df))
|
2886
|
+
|
2887
|
+
def test_reproducibility(self):
|
2888
|
+
rng = np.random.RandomState(4)
|
2889
|
+
loc = rng.uniform(size=3)
|
2890
|
+
shape = np.eye(3)
|
2891
|
+
dist1 = multivariate_t(loc, shape, df=3, seed=2)
|
2892
|
+
dist2 = multivariate_t(loc, shape, df=3, seed=2)
|
2893
|
+
samples1 = dist1.rvs(size=10)
|
2894
|
+
samples2 = dist2.rvs(size=10)
|
2895
|
+
assert_equal(samples1, samples2)
|
2896
|
+
|
2897
|
+
def test_allow_singular(self):
|
2898
|
+
# Make shape singular and verify error was raised.
|
2899
|
+
args = dict(loc=[0,0], shape=[[0,0],[0,1]], df=1, allow_singular=False)
|
2900
|
+
assert_raises(np.linalg.LinAlgError, multivariate_t, **args)
|
2901
|
+
|
2902
|
+
@pytest.mark.parametrize("size", [(10, 3), (5, 6, 4, 3)])
|
2903
|
+
@pytest.mark.parametrize("dim", [2, 3, 4, 5])
|
2904
|
+
@pytest.mark.parametrize("df", [1., 2., np.inf])
|
2905
|
+
def test_rvs(self, size, dim, df):
|
2906
|
+
dist = multivariate_t(np.zeros(dim), np.eye(dim), df)
|
2907
|
+
rvs = dist.rvs(size=size)
|
2908
|
+
assert rvs.shape == size + (dim, )
|
2909
|
+
|
2910
|
+
def test_cdf_signs(self):
|
2911
|
+
# check that sign of output is correct when np.any(lower > x)
|
2912
|
+
mean = np.zeros(3)
|
2913
|
+
cov = np.eye(3)
|
2914
|
+
df = 10
|
2915
|
+
b = [[1, 1, 1], [0, 0, 0], [1, 0, 1], [0, 1, 0]]
|
2916
|
+
a = [[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]]
|
2917
|
+
# when odd number of elements of b < a, output is negative
|
2918
|
+
expected_signs = np.array([1, -1, -1, 1])
|
2919
|
+
cdf = multivariate_normal.cdf(b, mean, cov, df, lower_limit=a)
|
2920
|
+
assert_allclose(cdf, cdf[0]*expected_signs)
|
2921
|
+
|
2922
|
+
@pytest.mark.parametrize('dim', [1, 2, 5])
|
2923
|
+
def test_cdf_against_multivariate_normal(self, dim):
|
2924
|
+
# Check accuracy against MVN randomly-generated cases
|
2925
|
+
self.cdf_against_mvn_test(dim)
|
2926
|
+
|
2927
|
+
@pytest.mark.parametrize('dim', [3, 6, 9])
|
2928
|
+
def test_cdf_against_multivariate_normal_singular(self, dim):
|
2929
|
+
# Check accuracy against MVN for randomly-generated singular cases
|
2930
|
+
self.cdf_against_mvn_test(3, True)
|
2931
|
+
|
2932
|
+
def cdf_against_mvn_test(self, dim, singular=False):
|
2933
|
+
# Check for accuracy in the limit that df -> oo and MVT -> MVN
|
2934
|
+
rng = np.random.default_rng(413722918996573)
|
2935
|
+
n = 3
|
2936
|
+
|
2937
|
+
w = 10**rng.uniform(-2, 1, size=dim)
|
2938
|
+
cov = _random_covariance(dim, w, rng, singular)
|
2939
|
+
|
2940
|
+
mean = 10**rng.uniform(-1, 2, size=dim) * np.sign(rng.normal(size=dim))
|
2941
|
+
a = -10**rng.uniform(-1, 2, size=(n, dim)) + mean
|
2942
|
+
b = 10**rng.uniform(-1, 2, size=(n, dim)) + mean
|
2943
|
+
|
2944
|
+
res = stats.multivariate_t.cdf(b, mean, cov, df=10000, lower_limit=a,
|
2945
|
+
allow_singular=True, random_state=rng)
|
2946
|
+
ref = stats.multivariate_normal.cdf(b, mean, cov, allow_singular=True,
|
2947
|
+
lower_limit=a)
|
2948
|
+
assert_allclose(res, ref, atol=5e-4)
|
2949
|
+
|
2950
|
+
def test_cdf_against_univariate_t(self):
|
2951
|
+
rng = np.random.default_rng(413722918996573)
|
2952
|
+
cov = 2
|
2953
|
+
mean = 0
|
2954
|
+
x = rng.normal(size=10, scale=np.sqrt(cov))
|
2955
|
+
df = 3
|
2956
|
+
|
2957
|
+
res = stats.multivariate_t.cdf(x, mean, cov, df, lower_limit=-np.inf,
|
2958
|
+
random_state=rng)
|
2959
|
+
ref = stats.t.cdf(x, df, mean, np.sqrt(cov))
|
2960
|
+
incorrect = stats.norm.cdf(x, mean, np.sqrt(cov))
|
2961
|
+
|
2962
|
+
assert_allclose(res, ref, atol=5e-4) # close to t
|
2963
|
+
assert np.all(np.abs(res - incorrect) > 1e-3) # not close to normal
|
2964
|
+
|
2965
|
+
@pytest.mark.parametrize("dim", [2, 3, 5, 10])
|
2966
|
+
@pytest.mark.parametrize("seed", [3363958638, 7891119608, 3887698049,
|
2967
|
+
5013150848, 1495033423, 6170824608])
|
2968
|
+
@pytest.mark.parametrize("singular", [False, True])
|
2969
|
+
def test_cdf_against_qsimvtv(self, dim, seed, singular):
|
2970
|
+
if singular and seed != 3363958638:
|
2971
|
+
pytest.skip('Agreement with qsimvtv is not great in singular case')
|
2972
|
+
rng = np.random.default_rng(seed)
|
2973
|
+
w = 10**rng.uniform(-2, 2, size=dim)
|
2974
|
+
cov = _random_covariance(dim, w, rng, singular)
|
2975
|
+
mean = rng.random(dim)
|
2976
|
+
a = -rng.random(dim)
|
2977
|
+
b = rng.random(dim)
|
2978
|
+
df = rng.random() * 5
|
2979
|
+
|
2980
|
+
# no lower limit
|
2981
|
+
res = stats.multivariate_t.cdf(b, mean, cov, df, random_state=rng,
|
2982
|
+
allow_singular=True)
|
2983
|
+
with np.errstate(invalid='ignore'):
|
2984
|
+
ref = _qsimvtv(20000, df, cov, np.inf*a, b - mean, rng)[0]
|
2985
|
+
assert_allclose(res, ref, atol=2e-4, rtol=1e-3)
|
2986
|
+
|
2987
|
+
# with lower limit
|
2988
|
+
res = stats.multivariate_t.cdf(b, mean, cov, df, lower_limit=a,
|
2989
|
+
random_state=rng, allow_singular=True)
|
2990
|
+
with np.errstate(invalid='ignore'):
|
2991
|
+
ref = _qsimvtv(20000, df, cov, a - mean, b - mean, rng)[0]
|
2992
|
+
assert_allclose(res, ref, atol=1e-4, rtol=1e-3)
|
2993
|
+
|
2994
|
+
@pytest.mark.slow
|
2995
|
+
def test_cdf_against_generic_integrators(self):
|
2996
|
+
# Compare result against generic numerical integrators
|
2997
|
+
dim = 3
|
2998
|
+
rng = np.random.default_rng(41372291899657)
|
2999
|
+
w = 10 ** rng.uniform(-1, 1, size=dim)
|
3000
|
+
cov = _random_covariance(dim, w, rng, singular=True)
|
3001
|
+
mean = rng.random(dim)
|
3002
|
+
a = -rng.random(dim)
|
3003
|
+
b = rng.random(dim)
|
3004
|
+
df = rng.random() * 5
|
3005
|
+
|
3006
|
+
res = stats.multivariate_t.cdf(b, mean, cov, df, random_state=rng,
|
3007
|
+
lower_limit=a)
|
3008
|
+
|
3009
|
+
def integrand(x):
|
3010
|
+
return stats.multivariate_t.pdf(x.T, mean, cov, df)
|
3011
|
+
|
3012
|
+
ref = qmc_quad(integrand, a, b, qrng=stats.qmc.Halton(d=dim, seed=rng))
|
3013
|
+
assert_allclose(res, ref.integral, rtol=1e-3)
|
3014
|
+
|
3015
|
+
def integrand(*zyx):
|
3016
|
+
return stats.multivariate_t.pdf(zyx[::-1], mean, cov, df)
|
3017
|
+
|
3018
|
+
ref = tplquad(integrand, a[0], b[0], a[1], b[1], a[2], b[2])
|
3019
|
+
assert_allclose(res, ref[0], rtol=1e-3)
|
3020
|
+
|
3021
|
+
def test_against_matlab(self):
|
3022
|
+
# Test against matlab mvtcdf:
|
3023
|
+
# C = [6.21786909 0.2333667 7.95506077;
|
3024
|
+
# 0.2333667 29.67390923 16.53946426;
|
3025
|
+
# 7.95506077 16.53946426 19.17725252]
|
3026
|
+
# df = 1.9559939787727658
|
3027
|
+
# mvtcdf([0, 0, 0], C, df) % 0.2523
|
3028
|
+
rng = np.random.default_rng(2967390923)
|
3029
|
+
cov = np.array([[ 6.21786909, 0.2333667 , 7.95506077],
|
3030
|
+
[ 0.2333667 , 29.67390923, 16.53946426],
|
3031
|
+
[ 7.95506077, 16.53946426, 19.17725252]])
|
3032
|
+
df = 1.9559939787727658
|
3033
|
+
dist = stats.multivariate_t(shape=cov, df=df)
|
3034
|
+
res = dist.cdf([0, 0, 0], random_state=rng)
|
3035
|
+
ref = 0.2523
|
3036
|
+
assert_allclose(res, ref, rtol=1e-3)
|
3037
|
+
|
3038
|
+
def test_frozen(self):
|
3039
|
+
seed = 4137229573
|
3040
|
+
rng = np.random.default_rng(seed)
|
3041
|
+
loc = rng.uniform(size=3)
|
3042
|
+
x = rng.uniform(size=3) + loc
|
3043
|
+
shape = np.eye(3)
|
3044
|
+
df = rng.random()
|
3045
|
+
args = (loc, shape, df)
|
3046
|
+
|
3047
|
+
rng_frozen = np.random.default_rng(seed)
|
3048
|
+
rng_unfrozen = np.random.default_rng(seed)
|
3049
|
+
dist = stats.multivariate_t(*args, seed=rng_frozen)
|
3050
|
+
assert_equal(dist.cdf(x),
|
3051
|
+
multivariate_t.cdf(x, *args, random_state=rng_unfrozen))
|
3052
|
+
|
3053
|
+
def test_vectorized(self):
|
3054
|
+
dim = 4
|
3055
|
+
n = (2, 3)
|
3056
|
+
rng = np.random.default_rng(413722918996573)
|
3057
|
+
A = rng.random(size=(dim, dim))
|
3058
|
+
cov = A @ A.T
|
3059
|
+
mean = rng.random(dim)
|
3060
|
+
x = rng.random(n + (dim,))
|
3061
|
+
df = rng.random() * 5
|
3062
|
+
|
3063
|
+
res = stats.multivariate_t.cdf(x, mean, cov, df, random_state=rng)
|
3064
|
+
|
3065
|
+
def _cdf_1d(x):
|
3066
|
+
return _qsimvtv(10000, df, cov, -np.inf*x, x-mean, rng)[0]
|
3067
|
+
|
3068
|
+
ref = np.apply_along_axis(_cdf_1d, -1, x)
|
3069
|
+
assert_allclose(res, ref, atol=1e-4, rtol=1e-3)
|
3070
|
+
|
3071
|
+
@pytest.mark.parametrize("dim", (3, 7))
|
3072
|
+
def test_against_analytical(self, dim):
|
3073
|
+
rng = np.random.default_rng(413722918996573)
|
3074
|
+
A = scipy.linalg.toeplitz(c=[1] + [0.5] * (dim - 1))
|
3075
|
+
res = stats.multivariate_t(shape=A).cdf([0] * dim, random_state=rng)
|
3076
|
+
ref = 1 / (dim + 1)
|
3077
|
+
assert_allclose(res, ref, rtol=5e-5)
|
3078
|
+
|
3079
|
+
def test_entropy_inf_df(self):
|
3080
|
+
cov = np.eye(3, 3)
|
3081
|
+
df = np.inf
|
3082
|
+
mvt_entropy = stats.multivariate_t.entropy(shape=cov, df=df)
|
3083
|
+
mvn_entropy = stats.multivariate_normal.entropy(None, cov)
|
3084
|
+
assert mvt_entropy == mvn_entropy
|
3085
|
+
|
3086
|
+
@pytest.mark.parametrize("df", [1, 10, 100])
|
3087
|
+
def test_entropy_1d(self, df):
|
3088
|
+
mvt_entropy = stats.multivariate_t.entropy(shape=1., df=df)
|
3089
|
+
t_entropy = stats.t.entropy(df=df)
|
3090
|
+
assert_allclose(mvt_entropy, t_entropy, rtol=1e-13)
|
3091
|
+
|
3092
|
+
# entropy reference values were computed via numerical integration
|
3093
|
+
#
|
3094
|
+
# def integrand(x, y, mvt):
|
3095
|
+
# vec = np.array([x, y])
|
3096
|
+
# return mvt.logpdf(vec) * mvt.pdf(vec)
|
3097
|
+
|
3098
|
+
# def multivariate_t_entropy_quad_2d(df, cov):
|
3099
|
+
# dim = cov.shape[0]
|
3100
|
+
# loc = np.zeros((dim, ))
|
3101
|
+
# mvt = stats.multivariate_t(loc, cov, df)
|
3102
|
+
# limit = 100
|
3103
|
+
# return -integrate.dblquad(integrand, -limit, limit, -limit, limit,
|
3104
|
+
# args=(mvt, ))[0]
|
3105
|
+
|
3106
|
+
@pytest.mark.parametrize("df, cov, ref, tol",
|
3107
|
+
[(10, np.eye(2, 2), 3.0378770664093313, 1e-14),
|
3108
|
+
(100, np.array([[0.5, 1], [1, 10]]),
|
3109
|
+
3.55102424550609, 1e-8)])
|
3110
|
+
def test_entropy_vs_numerical_integration(self, df, cov, ref, tol):
|
3111
|
+
loc = np.zeros((2, ))
|
3112
|
+
mvt = stats.multivariate_t(loc, cov, df)
|
3113
|
+
assert_allclose(mvt.entropy(), ref, rtol=tol)
|
3114
|
+
|
3115
|
+
@pytest.mark.parametrize(
|
3116
|
+
"df, dim, ref, tol",
|
3117
|
+
[
|
3118
|
+
(10, 1, 1.5212624929756808, 1e-15),
|
3119
|
+
(100, 1, 1.4289633653182439, 1e-13),
|
3120
|
+
(500, 1, 1.420939531869349, 1e-14),
|
3121
|
+
(1e20, 1, 1.4189385332046727, 1e-15),
|
3122
|
+
(1e100, 1, 1.4189385332046727, 1e-15),
|
3123
|
+
(10, 10, 15.069150450832911, 1e-15),
|
3124
|
+
(1000, 10, 14.19936546446673, 1e-13),
|
3125
|
+
(1e20, 10, 14.189385332046728, 1e-15),
|
3126
|
+
(1e100, 10, 14.189385332046728, 1e-15),
|
3127
|
+
(10, 100, 148.28902883192654, 1e-15),
|
3128
|
+
(1000, 100, 141.99155538003762, 1e-14),
|
3129
|
+
(1e20, 100, 141.8938533204673, 1e-15),
|
3130
|
+
(1e100, 100, 141.8938533204673, 1e-15),
|
3131
|
+
]
|
3132
|
+
)
|
3133
|
+
def test_extreme_entropy(self, df, dim, ref, tol):
|
3134
|
+
# Reference values were calculated with mpmath:
|
3135
|
+
# from mpmath import mp
|
3136
|
+
# mp.dps = 500
|
3137
|
+
#
|
3138
|
+
# def mul_t_mpmath_entropy(dim, df=1):
|
3139
|
+
# dim = mp.mpf(dim)
|
3140
|
+
# df = mp.mpf(df)
|
3141
|
+
# halfsum = (dim + df)/2
|
3142
|
+
# half_df = df/2
|
3143
|
+
#
|
3144
|
+
# return float(
|
3145
|
+
# -mp.loggamma(halfsum) + mp.loggamma(half_df)
|
3146
|
+
# + dim / 2 * mp.log(df * mp.pi)
|
3147
|
+
# + halfsum * (mp.digamma(halfsum) - mp.digamma(half_df))
|
3148
|
+
# + 0.0
|
3149
|
+
# )
|
3150
|
+
mvt = stats.multivariate_t(shape=np.eye(dim), df=df)
|
3151
|
+
assert_allclose(mvt.entropy(), ref, rtol=tol)
|
3152
|
+
|
3153
|
+
def test_entropy_with_covariance(self):
|
3154
|
+
# Generated using np.randn(5, 5) and then rounding
|
3155
|
+
# to two decimal places
|
3156
|
+
_A = np.array([
|
3157
|
+
[1.42, 0.09, -0.49, 0.17, 0.74],
|
3158
|
+
[-1.13, -0.01, 0.71, 0.4, -0.56],
|
3159
|
+
[1.07, 0.44, -0.28, -0.44, 0.29],
|
3160
|
+
[-1.5, -0.94, -0.67, 0.73, -1.1],
|
3161
|
+
[0.17, -0.08, 1.46, -0.32, 1.36]
|
3162
|
+
])
|
3163
|
+
# Set cov to be a symmetric positive semi-definite matrix
|
3164
|
+
cov = _A @ _A.T
|
3165
|
+
|
3166
|
+
# Test the asymptotic case. For large degrees of freedom
|
3167
|
+
# the entropy approaches the multivariate normal entropy.
|
3168
|
+
df = 1e20
|
3169
|
+
mul_t_entropy = stats.multivariate_t.entropy(shape=cov, df=df)
|
3170
|
+
mul_norm_entropy = multivariate_normal(None, cov=cov).entropy()
|
3171
|
+
assert_allclose(mul_t_entropy, mul_norm_entropy, rtol=1e-15)
|
3172
|
+
|
3173
|
+
# Test the regular case. For a dim of 5 the threshold comes out
|
3174
|
+
# to be approximately 766.45. So using slightly
|
3175
|
+
# different dfs on each site of the threshold, the entropies
|
3176
|
+
# are being compared.
|
3177
|
+
df1 = 765
|
3178
|
+
df2 = 768
|
3179
|
+
_entropy1 = stats.multivariate_t.entropy(shape=cov, df=df1)
|
3180
|
+
_entropy2 = stats.multivariate_t.entropy(shape=cov, df=df2)
|
3181
|
+
assert_allclose(_entropy1, _entropy2, rtol=1e-5)
|
3182
|
+
|
3183
|
+
|
3184
|
+
class TestMultivariateHypergeom:
|
3185
|
+
@pytest.mark.parametrize(
|
3186
|
+
"x, m, n, expected",
|
3187
|
+
[
|
3188
|
+
# Ground truth value from R dmvhyper
|
3189
|
+
([3, 4], [5, 10], 7, -1.119814),
|
3190
|
+
# test for `n=0`
|
3191
|
+
([3, 4], [5, 10], 0, -np.inf),
|
3192
|
+
# test for `x < 0`
|
3193
|
+
([-3, 4], [5, 10], 7, -np.inf),
|
3194
|
+
# test for `m < 0` (RuntimeWarning issue)
|
3195
|
+
([3, 4], [-5, 10], 7, np.nan),
|
3196
|
+
# test for all `m < 0` and `x.sum() != n`
|
3197
|
+
([[1, 2], [3, 4]], [[-4, -6], [-5, -10]],
|
3198
|
+
[3, 7], [np.nan, np.nan]),
|
3199
|
+
# test for `x < 0` and `m < 0` (RuntimeWarning issue)
|
3200
|
+
([-3, 4], [-5, 10], 1, np.nan),
|
3201
|
+
# test for `x > m`
|
3202
|
+
([1, 11], [10, 1], 12, np.nan),
|
3203
|
+
# test for `m < 0` (RuntimeWarning issue)
|
3204
|
+
([1, 11], [10, -1], 12, np.nan),
|
3205
|
+
# test for `n < 0`
|
3206
|
+
([3, 4], [5, 10], -7, np.nan),
|
3207
|
+
# test for `x.sum() != n`
|
3208
|
+
([3, 3], [5, 10], 7, -np.inf)
|
3209
|
+
]
|
3210
|
+
)
|
3211
|
+
def test_logpmf(self, x, m, n, expected):
|
3212
|
+
vals = multivariate_hypergeom.logpmf(x, m, n)
|
3213
|
+
assert_allclose(vals, expected, rtol=1e-6)
|
3214
|
+
|
3215
|
+
def test_reduces_hypergeom(self):
|
3216
|
+
# test that the multivariate_hypergeom pmf reduces to the
|
3217
|
+
# hypergeom pmf in the 2d case.
|
3218
|
+
val1 = multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4)
|
3219
|
+
val2 = hypergeom.pmf(k=3, M=15, n=4, N=10)
|
3220
|
+
assert_allclose(val1, val2, rtol=1e-8)
|
3221
|
+
|
3222
|
+
val1 = multivariate_hypergeom.pmf(x=[7, 3], m=[15, 10], n=10)
|
3223
|
+
val2 = hypergeom.pmf(k=7, M=25, n=10, N=15)
|
3224
|
+
assert_allclose(val1, val2, rtol=1e-8)
|
3225
|
+
|
3226
|
+
def test_rvs(self):
|
3227
|
+
# test if `rvs` is unbiased and large sample size converges
|
3228
|
+
# to the true mean.
|
3229
|
+
rv = multivariate_hypergeom(m=[3, 5], n=4)
|
3230
|
+
rvs = rv.rvs(size=1000, random_state=123)
|
3231
|
+
assert_allclose(rvs.mean(0), rv.mean(), rtol=1e-2)
|
3232
|
+
|
3233
|
+
def test_rvs_broadcasting(self):
|
3234
|
+
rv = multivariate_hypergeom(m=[[3, 5], [5, 10]], n=[4, 9])
|
3235
|
+
rvs = rv.rvs(size=(1000, 2), random_state=123)
|
3236
|
+
assert_allclose(rvs.mean(0), rv.mean(), rtol=1e-2)
|
3237
|
+
|
3238
|
+
@pytest.mark.parametrize('m, n', (
|
3239
|
+
([0, 0, 20, 0, 0], 5), ([0, 0, 0, 0, 0], 0),
|
3240
|
+
([0, 0], 0), ([0], 0)
|
3241
|
+
))
|
3242
|
+
def test_rvs_gh16171(self, m, n):
|
3243
|
+
res = multivariate_hypergeom.rvs(m, n)
|
3244
|
+
m = np.asarray(m)
|
3245
|
+
res_ex = m.copy()
|
3246
|
+
res_ex[m != 0] = n
|
3247
|
+
assert_equal(res, res_ex)
|
3248
|
+
|
3249
|
+
@pytest.mark.parametrize(
|
3250
|
+
"x, m, n, expected",
|
3251
|
+
[
|
3252
|
+
([5], [5], 5, 1),
|
3253
|
+
([3, 4], [5, 10], 7, 0.3263403),
|
3254
|
+
# Ground truth value from R dmvhyper
|
3255
|
+
([[[3, 5], [0, 8]], [[-1, 9], [1, 1]]],
|
3256
|
+
[5, 10], [[8, 8], [8, 2]],
|
3257
|
+
[[0.3916084, 0.006993007], [0, 0.4761905]]),
|
3258
|
+
# test with empty arrays.
|
3259
|
+
(np.array([], dtype=int), np.array([], dtype=int), 0, []),
|
3260
|
+
([1, 2], [4, 5], 5, 0),
|
3261
|
+
# Ground truth value from R dmvhyper
|
3262
|
+
([3, 3, 0], [5, 6, 7], 6, 0.01077354)
|
3263
|
+
]
|
3264
|
+
)
|
3265
|
+
def test_pmf(self, x, m, n, expected):
|
3266
|
+
vals = multivariate_hypergeom.pmf(x, m, n)
|
3267
|
+
assert_allclose(vals, expected, rtol=1e-7)
|
3268
|
+
|
3269
|
+
@pytest.mark.parametrize(
|
3270
|
+
"x, m, n, expected",
|
3271
|
+
[
|
3272
|
+
([3, 4], [[5, 10], [10, 15]], 7, [0.3263403, 0.3407531]),
|
3273
|
+
([[1], [2]], [[3], [4]], [1, 3], [1., 0.]),
|
3274
|
+
([[[1], [2]]], [[3], [4]], [1, 3], [[1., 0.]]),
|
3275
|
+
([[1], [2]], [[[[3]]]], [1, 3], [[[1., 0.]]])
|
3276
|
+
]
|
3277
|
+
)
|
3278
|
+
def test_pmf_broadcasting(self, x, m, n, expected):
|
3279
|
+
vals = multivariate_hypergeom.pmf(x, m, n)
|
3280
|
+
assert_allclose(vals, expected, rtol=1e-7)
|
3281
|
+
|
3282
|
+
def test_cov(self):
|
3283
|
+
cov1 = multivariate_hypergeom.cov(m=[3, 7, 10], n=12)
|
3284
|
+
cov2 = [[0.64421053, -0.26526316, -0.37894737],
|
3285
|
+
[-0.26526316, 1.14947368, -0.88421053],
|
3286
|
+
[-0.37894737, -0.88421053, 1.26315789]]
|
3287
|
+
assert_allclose(cov1, cov2, rtol=1e-8)
|
3288
|
+
|
3289
|
+
def test_cov_broadcasting(self):
|
3290
|
+
cov1 = multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12])
|
3291
|
+
cov2 = [[[1.05, -1.05], [-1.05, 1.05]],
|
3292
|
+
[[1.56, -1.56], [-1.56, 1.56]]]
|
3293
|
+
assert_allclose(cov1, cov2, rtol=1e-8)
|
3294
|
+
|
3295
|
+
cov3 = multivariate_hypergeom.cov(m=[[4], [5]], n=[4, 5])
|
3296
|
+
cov4 = [[[0.]], [[0.]]]
|
3297
|
+
assert_allclose(cov3, cov4, rtol=1e-8)
|
3298
|
+
|
3299
|
+
cov5 = multivariate_hypergeom.cov(m=[7, 9], n=[8, 12])
|
3300
|
+
cov6 = [[[1.05, -1.05], [-1.05, 1.05]],
|
3301
|
+
[[0.7875, -0.7875], [-0.7875, 0.7875]]]
|
3302
|
+
assert_allclose(cov5, cov6, rtol=1e-8)
|
3303
|
+
|
3304
|
+
def test_var(self):
|
3305
|
+
# test with hypergeom
|
3306
|
+
var0 = multivariate_hypergeom.var(m=[10, 5], n=4)
|
3307
|
+
var1 = hypergeom.var(M=15, n=4, N=10)
|
3308
|
+
assert_allclose(var0, var1, rtol=1e-8)
|
3309
|
+
|
3310
|
+
def test_var_broadcasting(self):
|
3311
|
+
var0 = multivariate_hypergeom.var(m=[10, 5], n=[4, 8])
|
3312
|
+
var1 = multivariate_hypergeom.var(m=[10, 5], n=4)
|
3313
|
+
var2 = multivariate_hypergeom.var(m=[10, 5], n=8)
|
3314
|
+
assert_allclose(var0[0], var1, rtol=1e-8)
|
3315
|
+
assert_allclose(var0[1], var2, rtol=1e-8)
|
3316
|
+
|
3317
|
+
var3 = multivariate_hypergeom.var(m=[[10, 5], [10, 14]], n=[4, 8])
|
3318
|
+
var4 = [[0.6984127, 0.6984127], [1.352657, 1.352657]]
|
3319
|
+
assert_allclose(var3, var4, rtol=1e-8)
|
3320
|
+
|
3321
|
+
var5 = multivariate_hypergeom.var(m=[[5], [10]], n=[5, 10])
|
3322
|
+
var6 = [[0.], [0.]]
|
3323
|
+
assert_allclose(var5, var6, rtol=1e-8)
|
3324
|
+
|
3325
|
+
def test_mean(self):
|
3326
|
+
# test with hypergeom
|
3327
|
+
mean0 = multivariate_hypergeom.mean(m=[10, 5], n=4)
|
3328
|
+
mean1 = hypergeom.mean(M=15, n=4, N=10)
|
3329
|
+
assert_allclose(mean0[0], mean1, rtol=1e-8)
|
3330
|
+
|
3331
|
+
mean2 = multivariate_hypergeom.mean(m=[12, 8], n=10)
|
3332
|
+
mean3 = [12.*10./20., 8.*10./20.]
|
3333
|
+
assert_allclose(mean2, mean3, rtol=1e-8)
|
3334
|
+
|
3335
|
+
def test_mean_broadcasting(self):
|
3336
|
+
mean0 = multivariate_hypergeom.mean(m=[[3, 5], [10, 5]], n=[4, 8])
|
3337
|
+
mean1 = [[3.*4./8., 5.*4./8.], [10.*8./15., 5.*8./15.]]
|
3338
|
+
assert_allclose(mean0, mean1, rtol=1e-8)
|
3339
|
+
|
3340
|
+
def test_mean_edge_cases(self):
|
3341
|
+
mean0 = multivariate_hypergeom.mean(m=[0, 0, 0], n=0)
|
3342
|
+
assert_equal(mean0, [0., 0., 0.])
|
3343
|
+
|
3344
|
+
mean1 = multivariate_hypergeom.mean(m=[1, 0, 0], n=2)
|
3345
|
+
assert_equal(mean1, [np.nan, np.nan, np.nan])
|
3346
|
+
|
3347
|
+
mean2 = multivariate_hypergeom.mean(m=[[1, 0, 0], [1, 0, 1]], n=2)
|
3348
|
+
assert_allclose(mean2, [[np.nan, np.nan, np.nan], [1., 0., 1.]],
|
3349
|
+
rtol=1e-17)
|
3350
|
+
|
3351
|
+
mean3 = multivariate_hypergeom.mean(m=np.array([], dtype=int), n=0)
|
3352
|
+
assert_equal(mean3, [])
|
3353
|
+
assert_(mean3.shape == (0, ))
|
3354
|
+
|
3355
|
+
def test_var_edge_cases(self):
|
3356
|
+
var0 = multivariate_hypergeom.var(m=[0, 0, 0], n=0)
|
3357
|
+
assert_allclose(var0, [0., 0., 0.], rtol=1e-16)
|
3358
|
+
|
3359
|
+
var1 = multivariate_hypergeom.var(m=[1, 0, 0], n=2)
|
3360
|
+
assert_equal(var1, [np.nan, np.nan, np.nan])
|
3361
|
+
|
3362
|
+
var2 = multivariate_hypergeom.var(m=[[1, 0, 0], [1, 0, 1]], n=2)
|
3363
|
+
assert_allclose(var2, [[np.nan, np.nan, np.nan], [0., 0., 0.]],
|
3364
|
+
rtol=1e-17)
|
3365
|
+
|
3366
|
+
var3 = multivariate_hypergeom.var(m=np.array([], dtype=int), n=0)
|
3367
|
+
assert_equal(var3, [])
|
3368
|
+
assert_(var3.shape == (0, ))
|
3369
|
+
|
3370
|
+
def test_cov_edge_cases(self):
|
3371
|
+
cov0 = multivariate_hypergeom.cov(m=[1, 0, 0], n=1)
|
3372
|
+
cov1 = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
|
3373
|
+
assert_allclose(cov0, cov1, rtol=1e-17)
|
3374
|
+
|
3375
|
+
cov3 = multivariate_hypergeom.cov(m=[0, 0, 0], n=0)
|
3376
|
+
cov4 = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
|
3377
|
+
assert_equal(cov3, cov4)
|
3378
|
+
|
3379
|
+
cov5 = multivariate_hypergeom.cov(m=np.array([], dtype=int), n=0)
|
3380
|
+
cov6 = np.array([], dtype=np.float64).reshape(0, 0)
|
3381
|
+
assert_allclose(cov5, cov6, rtol=1e-17)
|
3382
|
+
assert_(cov5.shape == (0, 0))
|
3383
|
+
|
3384
|
+
def test_frozen(self):
|
3385
|
+
# The frozen distribution should agree with the regular one
|
3386
|
+
np.random.seed(1234)
|
3387
|
+
n = 12
|
3388
|
+
m = [7, 9, 11, 13]
|
3389
|
+
x = [[0, 0, 0, 12], [0, 0, 1, 11], [0, 1, 1, 10],
|
3390
|
+
[1, 1, 1, 9], [1, 1, 2, 8]]
|
3391
|
+
x = np.asarray(x, dtype=int)
|
3392
|
+
mhg_frozen = multivariate_hypergeom(m, n)
|
3393
|
+
assert_allclose(mhg_frozen.pmf(x),
|
3394
|
+
multivariate_hypergeom.pmf(x, m, n))
|
3395
|
+
assert_allclose(mhg_frozen.logpmf(x),
|
3396
|
+
multivariate_hypergeom.logpmf(x, m, n))
|
3397
|
+
assert_allclose(mhg_frozen.var(), multivariate_hypergeom.var(m, n))
|
3398
|
+
assert_allclose(mhg_frozen.cov(), multivariate_hypergeom.cov(m, n))
|
3399
|
+
|
3400
|
+
def test_invalid_params(self):
|
3401
|
+
assert_raises(ValueError, multivariate_hypergeom.pmf, 5, 10, 5)
|
3402
|
+
assert_raises(ValueError, multivariate_hypergeom.pmf, 5, [10], 5)
|
3403
|
+
assert_raises(ValueError, multivariate_hypergeom.pmf, [5, 4], [10], 5)
|
3404
|
+
assert_raises(TypeError, multivariate_hypergeom.pmf, [5.5, 4.5],
|
3405
|
+
[10, 15], 5)
|
3406
|
+
assert_raises(TypeError, multivariate_hypergeom.pmf, [5, 4],
|
3407
|
+
[10.5, 15.5], 5)
|
3408
|
+
assert_raises(TypeError, multivariate_hypergeom.pmf, [5, 4],
|
3409
|
+
[10, 15], 5.5)
|
3410
|
+
|
3411
|
+
|
3412
|
+
class TestRandomTable:
|
3413
|
+
def get_rng(self):
|
3414
|
+
return np.random.default_rng(628174795866951638)
|
3415
|
+
|
3416
|
+
def test_process_parameters(self):
|
3417
|
+
message = "`row` must be one-dimensional"
|
3418
|
+
with pytest.raises(ValueError, match=message):
|
3419
|
+
random_table([[1, 2]], [1, 2])
|
3420
|
+
|
3421
|
+
message = "`col` must be one-dimensional"
|
3422
|
+
with pytest.raises(ValueError, match=message):
|
3423
|
+
random_table([1, 2], [[1, 2]])
|
3424
|
+
|
3425
|
+
message = "each element of `row` must be non-negative"
|
3426
|
+
with pytest.raises(ValueError, match=message):
|
3427
|
+
random_table([1, -1], [1, 2])
|
3428
|
+
|
3429
|
+
message = "each element of `col` must be non-negative"
|
3430
|
+
with pytest.raises(ValueError, match=message):
|
3431
|
+
random_table([1, 2], [1, -2])
|
3432
|
+
|
3433
|
+
message = "sums over `row` and `col` must be equal"
|
3434
|
+
with pytest.raises(ValueError, match=message):
|
3435
|
+
random_table([1, 2], [1, 0])
|
3436
|
+
|
3437
|
+
message = "each element of `row` must be an integer"
|
3438
|
+
with pytest.raises(ValueError, match=message):
|
3439
|
+
random_table([2.1, 2.1], [1, 1, 2])
|
3440
|
+
|
3441
|
+
message = "each element of `col` must be an integer"
|
3442
|
+
with pytest.raises(ValueError, match=message):
|
3443
|
+
random_table([1, 2], [1.1, 1.1, 1])
|
3444
|
+
|
3445
|
+
row = [1, 3]
|
3446
|
+
col = [2, 1, 1]
|
3447
|
+
r, c, n = random_table._process_parameters([1, 3], [2, 1, 1])
|
3448
|
+
assert_equal(row, r)
|
3449
|
+
assert_equal(col, c)
|
3450
|
+
assert n == np.sum(row)
|
3451
|
+
|
3452
|
+
@pytest.mark.parametrize("scale,method",
|
3453
|
+
((1, "boyett"), (100, "patefield")))
|
3454
|
+
def test_process_rvs_method_on_None(self, scale, method):
|
3455
|
+
row = np.array([1, 3]) * scale
|
3456
|
+
col = np.array([2, 1, 1]) * scale
|
3457
|
+
|
3458
|
+
ct = random_table
|
3459
|
+
expected = ct.rvs(row, col, method=method, random_state=1)
|
3460
|
+
got = ct.rvs(row, col, method=None, random_state=1)
|
3461
|
+
|
3462
|
+
assert_equal(expected, got)
|
3463
|
+
|
3464
|
+
def test_process_rvs_method_bad_argument(self):
|
3465
|
+
row = [1, 3]
|
3466
|
+
col = [2, 1, 1]
|
3467
|
+
|
3468
|
+
# order of items in set is random, so cannot check that
|
3469
|
+
message = "'foo' not recognized, must be one of"
|
3470
|
+
with pytest.raises(ValueError, match=message):
|
3471
|
+
random_table.rvs(row, col, method="foo")
|
3472
|
+
|
3473
|
+
@pytest.mark.parametrize('frozen', (True, False))
|
3474
|
+
@pytest.mark.parametrize('log', (True, False))
|
3475
|
+
def test_pmf_logpmf(self, frozen, log):
|
3476
|
+
# The pmf is tested through random sample generation
|
3477
|
+
# with Boyett's algorithm, whose implementation is simple
|
3478
|
+
# enough to verify manually for correctness.
|
3479
|
+
rng = self.get_rng()
|
3480
|
+
row = [2, 6]
|
3481
|
+
col = [1, 3, 4]
|
3482
|
+
rvs = random_table.rvs(row, col, size=1000,
|
3483
|
+
method="boyett", random_state=rng)
|
3484
|
+
|
3485
|
+
obj = random_table(row, col) if frozen else random_table
|
3486
|
+
method = getattr(obj, "logpmf" if log else "pmf")
|
3487
|
+
if not frozen:
|
3488
|
+
original_method = method
|
3489
|
+
|
3490
|
+
def method(x):
|
3491
|
+
return original_method(x, row, col)
|
3492
|
+
pmf = (lambda x: np.exp(method(x))) if log else method
|
3493
|
+
|
3494
|
+
unique_rvs, counts = np.unique(rvs, axis=0, return_counts=True)
|
3495
|
+
|
3496
|
+
# rough accuracy check
|
3497
|
+
p = pmf(unique_rvs)
|
3498
|
+
assert_allclose(p * len(rvs), counts, rtol=0.1)
|
3499
|
+
|
3500
|
+
# accept any iterable
|
3501
|
+
p2 = pmf(list(unique_rvs[0]))
|
3502
|
+
assert_equal(p2, p[0])
|
3503
|
+
|
3504
|
+
# accept high-dimensional input and 2d input
|
3505
|
+
rvs_nd = rvs.reshape((10, 100) + rvs.shape[1:])
|
3506
|
+
p = pmf(rvs_nd)
|
3507
|
+
assert p.shape == (10, 100)
|
3508
|
+
for i in range(p.shape[0]):
|
3509
|
+
for j in range(p.shape[1]):
|
3510
|
+
pij = p[i, j]
|
3511
|
+
rvij = rvs_nd[i, j]
|
3512
|
+
qij = pmf(rvij)
|
3513
|
+
assert_equal(pij, qij)
|
3514
|
+
|
3515
|
+
# probability is zero if column marginal does not match
|
3516
|
+
x = [[0, 1, 1], [2, 1, 3]]
|
3517
|
+
assert_equal(np.sum(x, axis=-1), row)
|
3518
|
+
p = pmf(x)
|
3519
|
+
assert p == 0
|
3520
|
+
|
3521
|
+
# probability is zero if row marginal does not match
|
3522
|
+
x = [[0, 1, 2], [1, 2, 2]]
|
3523
|
+
assert_equal(np.sum(x, axis=-2), col)
|
3524
|
+
p = pmf(x)
|
3525
|
+
assert p == 0
|
3526
|
+
|
3527
|
+
# response to invalid inputs
|
3528
|
+
message = "`x` must be at least two-dimensional"
|
3529
|
+
with pytest.raises(ValueError, match=message):
|
3530
|
+
pmf([1])
|
3531
|
+
|
3532
|
+
message = "`x` must contain only integral values"
|
3533
|
+
with pytest.raises(ValueError, match=message):
|
3534
|
+
pmf([[1.1]])
|
3535
|
+
|
3536
|
+
message = "`x` must contain only integral values"
|
3537
|
+
with pytest.raises(ValueError, match=message):
|
3538
|
+
pmf([[np.nan]])
|
3539
|
+
|
3540
|
+
message = "`x` must contain only non-negative values"
|
3541
|
+
with pytest.raises(ValueError, match=message):
|
3542
|
+
pmf([[-1]])
|
3543
|
+
|
3544
|
+
message = "shape of `x` must agree with `row`"
|
3545
|
+
with pytest.raises(ValueError, match=message):
|
3546
|
+
pmf([[1, 2, 3]])
|
3547
|
+
|
3548
|
+
message = "shape of `x` must agree with `col`"
|
3549
|
+
with pytest.raises(ValueError, match=message):
|
3550
|
+
pmf([[1, 2],
|
3551
|
+
[3, 4]])
|
3552
|
+
|
3553
|
+
@pytest.mark.parametrize("method", ("boyett", "patefield"))
|
3554
|
+
def test_rvs_mean(self, method):
|
3555
|
+
# test if `rvs` is unbiased and large sample size converges
|
3556
|
+
# to the true mean.
|
3557
|
+
rng = self.get_rng()
|
3558
|
+
row = [2, 6]
|
3559
|
+
col = [1, 3, 4]
|
3560
|
+
rvs = random_table.rvs(row, col, size=1000, method=method,
|
3561
|
+
random_state=rng)
|
3562
|
+
mean = random_table.mean(row, col)
|
3563
|
+
assert_equal(np.sum(mean), np.sum(row))
|
3564
|
+
assert_allclose(rvs.mean(0), mean, atol=0.05)
|
3565
|
+
assert_equal(rvs.sum(axis=-1), np.broadcast_to(row, (1000, 2)))
|
3566
|
+
assert_equal(rvs.sum(axis=-2), np.broadcast_to(col, (1000, 3)))
|
3567
|
+
|
3568
|
+
def test_rvs_cov(self):
|
3569
|
+
# test if `rvs` generated with patefield and boyett algorithms
|
3570
|
+
# produce approximately the same covariance matrix
|
3571
|
+
rng = self.get_rng()
|
3572
|
+
row = [2, 6]
|
3573
|
+
col = [1, 3, 4]
|
3574
|
+
rvs1 = random_table.rvs(row, col, size=10000, method="boyett",
|
3575
|
+
random_state=rng)
|
3576
|
+
rvs2 = random_table.rvs(row, col, size=10000, method="patefield",
|
3577
|
+
random_state=rng)
|
3578
|
+
cov1 = np.var(rvs1, axis=0)
|
3579
|
+
cov2 = np.var(rvs2, axis=0)
|
3580
|
+
assert_allclose(cov1, cov2, atol=0.02)
|
3581
|
+
|
3582
|
+
@pytest.mark.parametrize("method", ("boyett", "patefield"))
|
3583
|
+
def test_rvs_size(self, method):
|
3584
|
+
row = [2, 6]
|
3585
|
+
col = [1, 3, 4]
|
3586
|
+
|
3587
|
+
# test size `None`
|
3588
|
+
rv = random_table.rvs(row, col, method=method,
|
3589
|
+
random_state=self.get_rng())
|
3590
|
+
assert rv.shape == (2, 3)
|
3591
|
+
|
3592
|
+
# test size 1
|
3593
|
+
rv2 = random_table.rvs(row, col, size=1, method=method,
|
3594
|
+
random_state=self.get_rng())
|
3595
|
+
assert rv2.shape == (1, 2, 3)
|
3596
|
+
assert_equal(rv, rv2[0])
|
3597
|
+
|
3598
|
+
# test size 0
|
3599
|
+
rv3 = random_table.rvs(row, col, size=0, method=method,
|
3600
|
+
random_state=self.get_rng())
|
3601
|
+
assert rv3.shape == (0, 2, 3)
|
3602
|
+
|
3603
|
+
# test other valid size
|
3604
|
+
rv4 = random_table.rvs(row, col, size=20, method=method,
|
3605
|
+
random_state=self.get_rng())
|
3606
|
+
assert rv4.shape == (20, 2, 3)
|
3607
|
+
|
3608
|
+
rv5 = random_table.rvs(row, col, size=(4, 5), method=method,
|
3609
|
+
random_state=self.get_rng())
|
3610
|
+
assert rv5.shape == (4, 5, 2, 3)
|
3611
|
+
|
3612
|
+
assert_allclose(rv5.reshape(20, 2, 3), rv4, rtol=1e-15)
|
3613
|
+
|
3614
|
+
# test invalid size
|
3615
|
+
message = "`size` must be a non-negative integer or `None`"
|
3616
|
+
with pytest.raises(ValueError, match=message):
|
3617
|
+
random_table.rvs(row, col, size=-1, method=method,
|
3618
|
+
random_state=self.get_rng())
|
3619
|
+
|
3620
|
+
with pytest.raises(ValueError, match=message):
|
3621
|
+
random_table.rvs(row, col, size=np.nan, method=method,
|
3622
|
+
random_state=self.get_rng())
|
3623
|
+
|
3624
|
+
@pytest.mark.parametrize("method", ("boyett", "patefield"))
|
3625
|
+
def test_rvs_method(self, method):
|
3626
|
+
# This test assumes that pmf is correct and checks that random samples
|
3627
|
+
# follow this probability distribution. This seems like a circular
|
3628
|
+
# argument, since pmf is checked in test_pmf_logpmf with random samples
|
3629
|
+
# generated with the rvs method. This test is not redundant, because
|
3630
|
+
# test_pmf_logpmf intentionally uses rvs generation with Boyett only,
|
3631
|
+
# but here we test both Boyett and Patefield.
|
3632
|
+
row = [2, 6]
|
3633
|
+
col = [1, 3, 4]
|
3634
|
+
|
3635
|
+
ct = random_table
|
3636
|
+
rvs = ct.rvs(row, col, size=100000, method=method,
|
3637
|
+
random_state=self.get_rng())
|
3638
|
+
|
3639
|
+
unique_rvs, counts = np.unique(rvs, axis=0, return_counts=True)
|
3640
|
+
|
3641
|
+
# generated frequencies should match expected frequencies
|
3642
|
+
p = ct.pmf(unique_rvs, row, col)
|
3643
|
+
assert_allclose(p * len(rvs), counts, rtol=0.02)
|
3644
|
+
|
3645
|
+
@pytest.mark.parametrize("method", ("boyett", "patefield"))
|
3646
|
+
def test_rvs_with_zeros_in_col_row(self, method):
|
3647
|
+
row = [0, 1, 0]
|
3648
|
+
col = [1, 0, 0, 0]
|
3649
|
+
d = random_table(row, col)
|
3650
|
+
rv = d.rvs(1000, method=method, random_state=self.get_rng())
|
3651
|
+
expected = np.zeros((1000, len(row), len(col)))
|
3652
|
+
expected[...] = [[0, 0, 0, 0],
|
3653
|
+
[1, 0, 0, 0],
|
3654
|
+
[0, 0, 0, 0]]
|
3655
|
+
assert_equal(rv, expected)
|
3656
|
+
|
3657
|
+
@pytest.mark.parametrize("method", (None, "boyett", "patefield"))
|
3658
|
+
@pytest.mark.parametrize("col", ([], [0]))
|
3659
|
+
@pytest.mark.parametrize("row", ([], [0]))
|
3660
|
+
def test_rvs_with_edge_cases(self, method, row, col):
|
3661
|
+
d = random_table(row, col)
|
3662
|
+
rv = d.rvs(10, method=method, random_state=self.get_rng())
|
3663
|
+
expected = np.zeros((10, len(row), len(col)))
|
3664
|
+
assert_equal(rv, expected)
|
3665
|
+
|
3666
|
+
@pytest.mark.parametrize('v', (1, 2))
|
3667
|
+
def test_rvs_rcont(self, v):
|
3668
|
+
# This test checks the internal low-level interface.
|
3669
|
+
# It is implicitly also checked by the other test_rvs* calls.
|
3670
|
+
import scipy.stats._rcont as _rcont
|
3671
|
+
|
3672
|
+
row = np.array([1, 3], dtype=np.int64)
|
3673
|
+
col = np.array([2, 1, 1], dtype=np.int64)
|
3674
|
+
|
3675
|
+
rvs = getattr(_rcont, f"rvs_rcont{v}")
|
3676
|
+
|
3677
|
+
ntot = np.sum(row)
|
3678
|
+
result = rvs(row, col, ntot, 1, self.get_rng())
|
3679
|
+
|
3680
|
+
assert result.shape == (1, len(row), len(col))
|
3681
|
+
assert np.sum(result) == ntot
|
3682
|
+
|
3683
|
+
def test_frozen(self):
|
3684
|
+
row = [2, 6]
|
3685
|
+
col = [1, 3, 4]
|
3686
|
+
d = random_table(row, col, seed=self.get_rng())
|
3687
|
+
|
3688
|
+
sample = d.rvs()
|
3689
|
+
|
3690
|
+
expected = random_table.mean(row, col)
|
3691
|
+
assert_equal(expected, d.mean())
|
3692
|
+
|
3693
|
+
expected = random_table.pmf(sample, row, col)
|
3694
|
+
assert_equal(expected, d.pmf(sample))
|
3695
|
+
|
3696
|
+
expected = random_table.logpmf(sample, row, col)
|
3697
|
+
assert_equal(expected, d.logpmf(sample))
|
3698
|
+
|
3699
|
+
@pytest.mark.parametrize("method", ("boyett", "patefield"))
|
3700
|
+
def test_rvs_frozen(self, method):
|
3701
|
+
row = [2, 6]
|
3702
|
+
col = [1, 3, 4]
|
3703
|
+
d = random_table(row, col, seed=self.get_rng())
|
3704
|
+
|
3705
|
+
expected = random_table.rvs(row, col, size=10, method=method,
|
3706
|
+
random_state=self.get_rng())
|
3707
|
+
got = d.rvs(size=10, method=method)
|
3708
|
+
assert_equal(expected, got)
|
3709
|
+
|
3710
|
+
|
3711
|
+
def check_pickling(distfn, args):
|
3712
|
+
# check that a distribution instance pickles and unpickles
|
3713
|
+
# pay special attention to the random_state property
|
3714
|
+
|
3715
|
+
# save the random_state (restore later)
|
3716
|
+
rndm = distfn.random_state
|
3717
|
+
|
3718
|
+
distfn.random_state = 1234
|
3719
|
+
distfn.rvs(*args, size=8)
|
3720
|
+
s = pickle.dumps(distfn)
|
3721
|
+
r0 = distfn.rvs(*args, size=8)
|
3722
|
+
|
3723
|
+
unpickled = pickle.loads(s)
|
3724
|
+
r1 = unpickled.rvs(*args, size=8)
|
3725
|
+
assert_equal(r0, r1)
|
3726
|
+
|
3727
|
+
# restore the random_state
|
3728
|
+
distfn.random_state = rndm
|
3729
|
+
|
3730
|
+
|
3731
|
+
@pytest.mark.thread_unsafe
|
3732
|
+
def test_random_state_property(num_parallel_threads):
|
3733
|
+
scale = np.eye(3)
|
3734
|
+
scale[0, 1] = 0.5
|
3735
|
+
scale[1, 0] = 0.5
|
3736
|
+
dists = [
|
3737
|
+
[multivariate_normal, ()],
|
3738
|
+
[dirichlet, (np.array([1.]), )],
|
3739
|
+
[wishart, (10, scale)],
|
3740
|
+
[invwishart, (10, scale)],
|
3741
|
+
[multinomial, (5, [0.5, 0.4, 0.1])],
|
3742
|
+
[ortho_group, (2,)],
|
3743
|
+
[special_ortho_group, (2,)]
|
3744
|
+
]
|
3745
|
+
for distfn, args in dists:
|
3746
|
+
check_random_state_property(distfn, args)
|
3747
|
+
check_pickling(distfn, args)
|
3748
|
+
|
3749
|
+
|
3750
|
+
class TestVonMises_Fisher:
|
3751
|
+
@pytest.mark.parametrize("dim", [2, 3, 4, 6])
|
3752
|
+
@pytest.mark.parametrize("size", [None, 1, 5, (5, 4)])
|
3753
|
+
def test_samples(self, dim, size):
|
3754
|
+
# test that samples have correct shape and norm 1
|
3755
|
+
rng = np.random.default_rng(2777937887058094419)
|
3756
|
+
mu = np.full((dim, ), 1/np.sqrt(dim))
|
3757
|
+
vmf_dist = vonmises_fisher(mu, 1, seed=rng)
|
3758
|
+
samples = vmf_dist.rvs(size)
|
3759
|
+
mean, cov = np.zeros(dim), np.eye(dim)
|
3760
|
+
expected_shape = rng.multivariate_normal(mean, cov, size=size).shape
|
3761
|
+
assert samples.shape == expected_shape
|
3762
|
+
norms = np.linalg.norm(samples, axis=-1)
|
3763
|
+
assert_allclose(norms, 1.)
|
3764
|
+
|
3765
|
+
@pytest.mark.parametrize("dim", [5, 8])
|
3766
|
+
@pytest.mark.parametrize("kappa", [1e15, 1e20, 1e30])
|
3767
|
+
def test_sampling_high_concentration(self, dim, kappa):
|
3768
|
+
# test that no warnings are encountered for high values
|
3769
|
+
rng = np.random.default_rng(2777937887058094419)
|
3770
|
+
mu = np.full((dim, ), 1/np.sqrt(dim))
|
3771
|
+
vmf_dist = vonmises_fisher(mu, kappa, seed=rng)
|
3772
|
+
vmf_dist.rvs(10)
|
3773
|
+
|
3774
|
+
def test_two_dimensional_mu(self):
|
3775
|
+
mu = np.ones((2, 2))
|
3776
|
+
msg = "'mu' must have one-dimensional shape."
|
3777
|
+
with pytest.raises(ValueError, match=msg):
|
3778
|
+
vonmises_fisher(mu, 1)
|
3779
|
+
|
3780
|
+
def test_wrong_norm_mu(self):
|
3781
|
+
mu = np.ones((2, ))
|
3782
|
+
msg = "'mu' must be a unit vector of norm 1."
|
3783
|
+
with pytest.raises(ValueError, match=msg):
|
3784
|
+
vonmises_fisher(mu, 1)
|
3785
|
+
|
3786
|
+
def test_one_entry_mu(self):
|
3787
|
+
mu = np.ones((1, ))
|
3788
|
+
msg = "'mu' must have at least two entries."
|
3789
|
+
with pytest.raises(ValueError, match=msg):
|
3790
|
+
vonmises_fisher(mu, 1)
|
3791
|
+
|
3792
|
+
@pytest.mark.parametrize("kappa", [-1, (5, 3)])
|
3793
|
+
def test_kappa_validation(self, kappa):
|
3794
|
+
msg = "'kappa' must be a positive scalar."
|
3795
|
+
with pytest.raises(ValueError, match=msg):
|
3796
|
+
vonmises_fisher([1, 0], kappa)
|
3797
|
+
|
3798
|
+
@pytest.mark.parametrize("kappa", [0, 0.])
|
3799
|
+
def test_kappa_zero(self, kappa):
|
3800
|
+
msg = ("For 'kappa=0' the von Mises-Fisher distribution "
|
3801
|
+
"becomes the uniform distribution on the sphere "
|
3802
|
+
"surface. Consider using 'scipy.stats.uniform_direction' "
|
3803
|
+
"instead.")
|
3804
|
+
with pytest.raises(ValueError, match=msg):
|
3805
|
+
vonmises_fisher([1, 0], kappa)
|
3806
|
+
|
3807
|
+
|
3808
|
+
@pytest.mark.parametrize("method", [vonmises_fisher.pdf,
|
3809
|
+
vonmises_fisher.logpdf])
|
3810
|
+
def test_invalid_shapes_pdf_logpdf(self, method):
|
3811
|
+
x = np.array([1., 0., 0])
|
3812
|
+
msg = ("The dimensionality of the last axis of 'x' must "
|
3813
|
+
"match the dimensionality of the von Mises Fisher "
|
3814
|
+
"distribution.")
|
3815
|
+
with pytest.raises(ValueError, match=msg):
|
3816
|
+
method(x, [1, 0], 1)
|
3817
|
+
|
3818
|
+
@pytest.mark.parametrize("method", [vonmises_fisher.pdf,
|
3819
|
+
vonmises_fisher.logpdf])
|
3820
|
+
def test_unnormalized_input(self, method):
|
3821
|
+
x = np.array([0.5, 0.])
|
3822
|
+
msg = "'x' must be unit vectors of norm 1 along last dimension."
|
3823
|
+
with pytest.raises(ValueError, match=msg):
|
3824
|
+
method(x, [1, 0], 1)
|
3825
|
+
|
3826
|
+
# Expected values of the vonmises-fisher logPDF were computed via mpmath
|
3827
|
+
# from mpmath import mp
|
3828
|
+
# import numpy as np
|
3829
|
+
# mp.dps = 50
|
3830
|
+
# def logpdf_mpmath(x, mu, kappa):
|
3831
|
+
# dim = mu.size
|
3832
|
+
# halfdim = mp.mpf(0.5 * dim)
|
3833
|
+
# kappa = mp.mpf(kappa)
|
3834
|
+
# const = (kappa**(halfdim - mp.one)/((2*mp.pi)**halfdim * \
|
3835
|
+
# mp.besseli(halfdim -mp.one, kappa)))
|
3836
|
+
# return float(const * mp.exp(kappa*mp.fdot(x, mu)))
|
3837
|
+
|
3838
|
+
@pytest.mark.parametrize('x, mu, kappa, reference',
|
3839
|
+
[(np.array([1., 0., 0.]), np.array([1., 0., 0.]),
|
3840
|
+
1e-4, 0.0795854295583605),
|
3841
|
+
(np.array([1., 0., 0]), np.array([0., 0., 1.]),
|
3842
|
+
1e-4, 0.07957747141331854),
|
3843
|
+
(np.array([1., 0., 0.]), np.array([1., 0., 0.]),
|
3844
|
+
100, 15.915494309189533),
|
3845
|
+
(np.array([1., 0., 0]), np.array([0., 0., 1.]),
|
3846
|
+
100, 5.920684802611232e-43),
|
3847
|
+
(np.array([1., 0., 0.]),
|
3848
|
+
np.array([np.sqrt(0.98), np.sqrt(0.02), 0.]),
|
3849
|
+
2000, 5.930499050746588e-07),
|
3850
|
+
(np.array([1., 0., 0]), np.array([1., 0., 0.]),
|
3851
|
+
2000, 318.3098861837907),
|
3852
|
+
(np.array([1., 0., 0., 0., 0.]),
|
3853
|
+
np.array([1., 0., 0., 0., 0.]),
|
3854
|
+
2000, 101371.86957712633),
|
3855
|
+
(np.array([1., 0., 0., 0., 0.]),
|
3856
|
+
np.array([np.sqrt(0.98), np.sqrt(0.02), 0.,
|
3857
|
+
0, 0.]),
|
3858
|
+
2000, 0.00018886808182653578),
|
3859
|
+
(np.array([1., 0., 0., 0., 0.]),
|
3860
|
+
np.array([np.sqrt(0.8), np.sqrt(0.2), 0.,
|
3861
|
+
0, 0.]),
|
3862
|
+
2000, 2.0255393314603194e-87)])
|
3863
|
+
def test_pdf_accuracy(self, x, mu, kappa, reference):
|
3864
|
+
pdf = vonmises_fisher(mu, kappa).pdf(x)
|
3865
|
+
assert_allclose(pdf, reference, rtol=1e-13)
|
3866
|
+
|
3867
|
+
# Expected values of the vonmises-fisher logPDF were computed via mpmath
|
3868
|
+
# from mpmath import mp
|
3869
|
+
# import numpy as np
|
3870
|
+
# mp.dps = 50
|
3871
|
+
# def logpdf_mpmath(x, mu, kappa):
|
3872
|
+
# dim = mu.size
|
3873
|
+
# halfdim = mp.mpf(0.5 * dim)
|
3874
|
+
# kappa = mp.mpf(kappa)
|
3875
|
+
# two = mp.mpf(2.)
|
3876
|
+
# const = (kappa**(halfdim - mp.one)/((two*mp.pi)**halfdim * \
|
3877
|
+
# mp.besseli(halfdim - mp.one, kappa)))
|
3878
|
+
# return float(mp.log(const * mp.exp(kappa*mp.fdot(x, mu))))
|
3879
|
+
|
3880
|
+
@pytest.mark.parametrize('x, mu, kappa, reference',
|
3881
|
+
[(np.array([1., 0., 0.]), np.array([1., 0., 0.]),
|
3882
|
+
1e-4, -2.5309242486359573),
|
3883
|
+
(np.array([1., 0., 0]), np.array([0., 0., 1.]),
|
3884
|
+
1e-4, -2.5310242486359575),
|
3885
|
+
(np.array([1., 0., 0.]), np.array([1., 0., 0.]),
|
3886
|
+
100, 2.767293119578746),
|
3887
|
+
(np.array([1., 0., 0]), np.array([0., 0., 1.]),
|
3888
|
+
100, -97.23270688042125),
|
3889
|
+
(np.array([1., 0., 0.]),
|
3890
|
+
np.array([np.sqrt(0.98), np.sqrt(0.02), 0.]),
|
3891
|
+
2000, -14.337987284534103),
|
3892
|
+
(np.array([1., 0., 0]), np.array([1., 0., 0.]),
|
3893
|
+
2000, 5.763025393132737),
|
3894
|
+
(np.array([1., 0., 0., 0., 0.]),
|
3895
|
+
np.array([1., 0., 0., 0., 0.]),
|
3896
|
+
2000, 11.526550911307156),
|
3897
|
+
(np.array([1., 0., 0., 0., 0.]),
|
3898
|
+
np.array([np.sqrt(0.98), np.sqrt(0.02), 0.,
|
3899
|
+
0, 0.]),
|
3900
|
+
2000, -8.574461766359684),
|
3901
|
+
(np.array([1., 0., 0., 0., 0.]),
|
3902
|
+
np.array([np.sqrt(0.8), np.sqrt(0.2), 0.,
|
3903
|
+
0, 0.]),
|
3904
|
+
2000, -199.61906708886113)])
|
3905
|
+
def test_logpdf_accuracy(self, x, mu, kappa, reference):
|
3906
|
+
logpdf = vonmises_fisher(mu, kappa).logpdf(x)
|
3907
|
+
assert_allclose(logpdf, reference, rtol=1e-14)
|
3908
|
+
|
3909
|
+
# Expected values of the vonmises-fisher entropy were computed via mpmath
|
3910
|
+
# from mpmath import mp
|
3911
|
+
# import numpy as np
|
3912
|
+
# mp.dps = 50
|
3913
|
+
# def entropy_mpmath(dim, kappa):
|
3914
|
+
# mu = np.full((dim, ), 1/np.sqrt(dim))
|
3915
|
+
# kappa = mp.mpf(kappa)
|
3916
|
+
# halfdim = mp.mpf(0.5 * dim)
|
3917
|
+
# logconstant = (mp.log(kappa**(halfdim - mp.one)
|
3918
|
+
# /((2*mp.pi)**halfdim
|
3919
|
+
# * mp.besseli(halfdim -mp.one, kappa)))
|
3920
|
+
# return float(-logconstant - kappa * mp.besseli(halfdim, kappa)/
|
3921
|
+
# mp.besseli(halfdim -1, kappa))
|
3922
|
+
|
3923
|
+
@pytest.mark.parametrize('dim, kappa, reference',
|
3924
|
+
[(3, 1e-4, 2.531024245302624),
|
3925
|
+
(3, 100, -1.7672931195787458),
|
3926
|
+
(5, 5000, -11.359032310024453),
|
3927
|
+
(8, 1, 3.4189526482545527)])
|
3928
|
+
def test_entropy_accuracy(self, dim, kappa, reference):
|
3929
|
+
mu = np.full((dim, ), 1/np.sqrt(dim))
|
3930
|
+
entropy = vonmises_fisher(mu, kappa).entropy()
|
3931
|
+
assert_allclose(entropy, reference, rtol=2e-14)
|
3932
|
+
|
3933
|
+
@pytest.mark.parametrize("method", [vonmises_fisher.pdf,
|
3934
|
+
vonmises_fisher.logpdf])
|
3935
|
+
def test_broadcasting(self, method):
|
3936
|
+
# test that pdf and logpdf values are correctly broadcasted
|
3937
|
+
testshape = (2, 2)
|
3938
|
+
rng = np.random.default_rng(2777937887058094419)
|
3939
|
+
x = uniform_direction(3).rvs(testshape, random_state=rng)
|
3940
|
+
mu = np.full((3, ), 1/np.sqrt(3))
|
3941
|
+
kappa = 5
|
3942
|
+
result_all = method(x, mu, kappa)
|
3943
|
+
assert result_all.shape == testshape
|
3944
|
+
for i in range(testshape[0]):
|
3945
|
+
for j in range(testshape[1]):
|
3946
|
+
current_val = method(x[i, j, :], mu, kappa)
|
3947
|
+
assert_allclose(current_val, result_all[i, j], rtol=1e-15)
|
3948
|
+
|
3949
|
+
def test_vs_vonmises_2d(self):
|
3950
|
+
# test that in 2D, von Mises-Fisher yields the same results
|
3951
|
+
# as the von Mises distribution
|
3952
|
+
rng = np.random.default_rng(2777937887058094419)
|
3953
|
+
mu = np.array([0, 1])
|
3954
|
+
mu_angle = np.arctan2(mu[1], mu[0])
|
3955
|
+
kappa = 20
|
3956
|
+
vmf = vonmises_fisher(mu, kappa)
|
3957
|
+
vonmises_dist = vonmises(loc=mu_angle, kappa=kappa)
|
3958
|
+
vectors = uniform_direction(2).rvs(10, random_state=rng)
|
3959
|
+
angles = np.arctan2(vectors[:, 1], vectors[:, 0])
|
3960
|
+
assert_allclose(vonmises_dist.entropy(), vmf.entropy())
|
3961
|
+
assert_allclose(vonmises_dist.pdf(angles), vmf.pdf(vectors))
|
3962
|
+
assert_allclose(vonmises_dist.logpdf(angles), vmf.logpdf(vectors))
|
3963
|
+
|
3964
|
+
@pytest.mark.parametrize("dim", [2, 3, 6])
|
3965
|
+
@pytest.mark.parametrize("kappa, mu_tol, kappa_tol",
|
3966
|
+
[(1, 5e-2, 5e-2),
|
3967
|
+
(10, 1e-2, 1e-2),
|
3968
|
+
(100, 5e-3, 2e-2),
|
3969
|
+
(1000, 1e-3, 2e-2)])
|
3970
|
+
def test_fit_accuracy(self, dim, kappa, mu_tol, kappa_tol):
|
3971
|
+
mu = np.full((dim, ), 1/np.sqrt(dim))
|
3972
|
+
vmf_dist = vonmises_fisher(mu, kappa)
|
3973
|
+
rng = np.random.default_rng(2777937887058094419)
|
3974
|
+
n_samples = 10000
|
3975
|
+
samples = vmf_dist.rvs(n_samples, random_state=rng)
|
3976
|
+
mu_fit, kappa_fit = vonmises_fisher.fit(samples)
|
3977
|
+
angular_error = np.arccos(mu.dot(mu_fit))
|
3978
|
+
assert_allclose(angular_error, 0., atol=mu_tol, rtol=0)
|
3979
|
+
assert_allclose(kappa, kappa_fit, rtol=kappa_tol)
|
3980
|
+
|
3981
|
+
def test_fit_error_one_dimensional_data(self):
|
3982
|
+
x = np.zeros((3, ))
|
3983
|
+
msg = "'x' must be two dimensional."
|
3984
|
+
with pytest.raises(ValueError, match=msg):
|
3985
|
+
vonmises_fisher.fit(x)
|
3986
|
+
|
3987
|
+
def test_fit_error_unnormalized_data(self):
|
3988
|
+
x = np.ones((3, 3))
|
3989
|
+
msg = "'x' must be unit vectors of norm 1 along last dimension."
|
3990
|
+
with pytest.raises(ValueError, match=msg):
|
3991
|
+
vonmises_fisher.fit(x)
|
3992
|
+
|
3993
|
+
def test_frozen_distribution(self):
|
3994
|
+
mu = np.array([0, 0, 1])
|
3995
|
+
kappa = 5
|
3996
|
+
frozen = vonmises_fisher(mu, kappa)
|
3997
|
+
frozen_seed = vonmises_fisher(mu, kappa, seed=514)
|
3998
|
+
|
3999
|
+
rvs1 = frozen.rvs(random_state=514)
|
4000
|
+
rvs2 = vonmises_fisher.rvs(mu, kappa, random_state=514)
|
4001
|
+
rvs3 = frozen_seed.rvs()
|
4002
|
+
|
4003
|
+
assert_equal(rvs1, rvs2)
|
4004
|
+
assert_equal(rvs1, rvs3)
|
4005
|
+
|
4006
|
+
|
4007
|
+
class TestDirichletMultinomial:
|
4008
|
+
@classmethod
|
4009
|
+
def get_params(self, m):
|
4010
|
+
rng = np.random.default_rng(28469824356873456)
|
4011
|
+
alpha = rng.uniform(0, 100, size=2)
|
4012
|
+
x = rng.integers(1, 20, size=(m, 2))
|
4013
|
+
n = x.sum(axis=-1)
|
4014
|
+
return rng, m, alpha, n, x
|
4015
|
+
|
4016
|
+
def test_frozen(self):
|
4017
|
+
rng = np.random.default_rng(28469824356873456)
|
4018
|
+
|
4019
|
+
alpha = rng.uniform(0, 100, 10)
|
4020
|
+
x = rng.integers(0, 10, 10)
|
4021
|
+
n = np.sum(x, axis=-1)
|
4022
|
+
|
4023
|
+
d = dirichlet_multinomial(alpha, n)
|
4024
|
+
assert_equal(d.logpmf(x), dirichlet_multinomial.logpmf(x, alpha, n))
|
4025
|
+
assert_equal(d.pmf(x), dirichlet_multinomial.pmf(x, alpha, n))
|
4026
|
+
assert_equal(d.mean(), dirichlet_multinomial.mean(alpha, n))
|
4027
|
+
assert_equal(d.var(), dirichlet_multinomial.var(alpha, n))
|
4028
|
+
assert_equal(d.cov(), dirichlet_multinomial.cov(alpha, n))
|
4029
|
+
|
4030
|
+
def test_pmf_logpmf_against_R(self):
|
4031
|
+
# # Compare PMF against R's extraDistr ddirmnon
|
4032
|
+
# # library(extraDistr)
|
4033
|
+
# # options(digits=16)
|
4034
|
+
# ddirmnom(c(1, 2, 3), 6, c(3, 4, 5))
|
4035
|
+
x = np.array([1, 2, 3])
|
4036
|
+
n = np.sum(x)
|
4037
|
+
alpha = np.array([3, 4, 5])
|
4038
|
+
res = dirichlet_multinomial.pmf(x, alpha, n)
|
4039
|
+
logres = dirichlet_multinomial.logpmf(x, alpha, n)
|
4040
|
+
ref = 0.08484162895927638
|
4041
|
+
assert_allclose(res, ref)
|
4042
|
+
assert_allclose(logres, np.log(ref))
|
4043
|
+
assert res.shape == logres.shape == ()
|
4044
|
+
|
4045
|
+
# library(extraDistr)
|
4046
|
+
# options(digits=16)
|
4047
|
+
# ddirmnom(c(4, 3, 2, 0, 2, 3, 5, 7, 4, 7), 37,
|
4048
|
+
# c(45.01025314, 21.98739582, 15.14851365, 80.21588671,
|
4049
|
+
# 52.84935481, 25.20905262, 53.85373737, 4.88568118,
|
4050
|
+
# 89.06440654, 20.11359466))
|
4051
|
+
rng = np.random.default_rng(28469824356873456)
|
4052
|
+
alpha = rng.uniform(0, 100, 10)
|
4053
|
+
x = rng.integers(0, 10, 10)
|
4054
|
+
n = np.sum(x, axis=-1)
|
4055
|
+
res = dirichlet_multinomial(alpha, n).pmf(x)
|
4056
|
+
logres = dirichlet_multinomial.logpmf(x, alpha, n)
|
4057
|
+
ref = 3.65409306285992e-16
|
4058
|
+
assert_allclose(res, ref)
|
4059
|
+
assert_allclose(logres, np.log(ref))
|
4060
|
+
|
4061
|
+
def test_pmf_logpmf_support(self):
|
4062
|
+
# when the sum of the category counts does not equal the number of
|
4063
|
+
# trials, the PMF is zero
|
4064
|
+
rng, m, alpha, n, x = self.get_params(1)
|
4065
|
+
n += 1
|
4066
|
+
assert_equal(dirichlet_multinomial(alpha, n).pmf(x), 0)
|
4067
|
+
assert_equal(dirichlet_multinomial(alpha, n).logpmf(x), -np.inf)
|
4068
|
+
|
4069
|
+
rng, m, alpha, n, x = self.get_params(10)
|
4070
|
+
i = rng.random(size=10) > 0.5
|
4071
|
+
x[i] = np.round(x[i] * 2) # sum of these x does not equal n
|
4072
|
+
assert_equal(dirichlet_multinomial(alpha, n).pmf(x)[i], 0)
|
4073
|
+
assert_equal(dirichlet_multinomial(alpha, n).logpmf(x)[i], -np.inf)
|
4074
|
+
assert np.all(dirichlet_multinomial(alpha, n).pmf(x)[~i] > 0)
|
4075
|
+
assert np.all(dirichlet_multinomial(alpha, n).logpmf(x)[~i] > -np.inf)
|
4076
|
+
|
4077
|
+
def test_dimensionality_one(self):
|
4078
|
+
# if the dimensionality is one, there is only one possible outcome
|
4079
|
+
n = 6 # number of trials
|
4080
|
+
alpha = [10] # concentration parameters
|
4081
|
+
x = np.asarray([n]) # counts
|
4082
|
+
dist = dirichlet_multinomial(alpha, n)
|
4083
|
+
|
4084
|
+
assert_equal(dist.pmf(x), 1)
|
4085
|
+
assert_equal(dist.pmf(x+1), 0)
|
4086
|
+
assert_equal(dist.logpmf(x), 0)
|
4087
|
+
assert_equal(dist.logpmf(x+1), -np.inf)
|
4088
|
+
assert_equal(dist.mean(), n)
|
4089
|
+
assert_equal(dist.var(), 0)
|
4090
|
+
assert_equal(dist.cov(), 0)
|
4091
|
+
|
4092
|
+
def test_n_is_zero(self):
|
4093
|
+
# similarly, only one possible outcome if n is zero
|
4094
|
+
n = 0
|
4095
|
+
alpha = np.asarray([1., 1.])
|
4096
|
+
x = np.asarray([0, 0])
|
4097
|
+
dist = dirichlet_multinomial(alpha, n)
|
4098
|
+
|
4099
|
+
assert_equal(dist.pmf(x), 1)
|
4100
|
+
assert_equal(dist.pmf(x+1), 0)
|
4101
|
+
assert_equal(dist.logpmf(x), 0)
|
4102
|
+
assert_equal(dist.logpmf(x+1), -np.inf)
|
4103
|
+
assert_equal(dist.mean(), [0, 0])
|
4104
|
+
assert_equal(dist.var(), [0, 0])
|
4105
|
+
assert_equal(dist.cov(), [[0, 0], [0, 0]])
|
4106
|
+
|
4107
|
+
@pytest.mark.parametrize('method_name', ['pmf', 'logpmf'])
|
4108
|
+
def test_against_betabinom_pmf(self, method_name):
|
4109
|
+
rng, m, alpha, n, x = self.get_params(100)
|
4110
|
+
|
4111
|
+
method = getattr(dirichlet_multinomial(alpha, n), method_name)
|
4112
|
+
ref_method = getattr(stats.betabinom(n, *alpha.T), method_name)
|
4113
|
+
|
4114
|
+
res = method(x)
|
4115
|
+
ref = ref_method(x.T[0])
|
4116
|
+
assert_allclose(res, ref)
|
4117
|
+
|
4118
|
+
@pytest.mark.parametrize('method_name', ['mean', 'var'])
|
4119
|
+
def test_against_betabinom_moments(self, method_name):
|
4120
|
+
rng, m, alpha, n, x = self.get_params(100)
|
4121
|
+
|
4122
|
+
method = getattr(dirichlet_multinomial(alpha, n), method_name)
|
4123
|
+
ref_method = getattr(stats.betabinom(n, *alpha.T), method_name)
|
4124
|
+
|
4125
|
+
res = method()[:, 0]
|
4126
|
+
ref = ref_method()
|
4127
|
+
assert_allclose(res, ref)
|
4128
|
+
|
4129
|
+
def test_moments(self):
|
4130
|
+
rng = np.random.default_rng(28469824356873456)
|
4131
|
+
dim = 5
|
4132
|
+
n = rng.integers(1, 100)
|
4133
|
+
alpha = rng.random(size=dim) * 10
|
4134
|
+
dist = dirichlet_multinomial(alpha, n)
|
4135
|
+
|
4136
|
+
# Generate a random sample from the distribution using NumPy
|
4137
|
+
m = 100000
|
4138
|
+
p = rng.dirichlet(alpha, size=m)
|
4139
|
+
x = rng.multinomial(n, p, size=m)
|
4140
|
+
|
4141
|
+
assert_allclose(dist.mean(), np.mean(x, axis=0), rtol=5e-3)
|
4142
|
+
assert_allclose(dist.var(), np.var(x, axis=0), rtol=1e-2)
|
4143
|
+
assert dist.mean().shape == dist.var().shape == (dim,)
|
4144
|
+
|
4145
|
+
cov = dist.cov()
|
4146
|
+
assert cov.shape == (dim, dim)
|
4147
|
+
assert_allclose(cov, np.cov(x.T), rtol=2e-2)
|
4148
|
+
assert_equal(np.diag(cov), dist.var())
|
4149
|
+
assert np.all(scipy.linalg.eigh(cov)[0] > 0) # positive definite
|
4150
|
+
|
4151
|
+
def test_input_validation(self):
|
4152
|
+
# valid inputs
|
4153
|
+
x0 = np.array([1, 2, 3])
|
4154
|
+
n0 = np.sum(x0)
|
4155
|
+
alpha0 = np.array([3, 4, 5])
|
4156
|
+
|
4157
|
+
text = "`x` must contain only non-negative integers."
|
4158
|
+
with assert_raises(ValueError, match=text):
|
4159
|
+
dirichlet_multinomial.logpmf([1, -1, 3], alpha0, n0)
|
4160
|
+
with assert_raises(ValueError, match=text):
|
4161
|
+
dirichlet_multinomial.logpmf([1, 2.1, 3], alpha0, n0)
|
4162
|
+
|
4163
|
+
text = "`alpha` must contain only positive values."
|
4164
|
+
with assert_raises(ValueError, match=text):
|
4165
|
+
dirichlet_multinomial.logpmf(x0, [3, 0, 4], n0)
|
4166
|
+
with assert_raises(ValueError, match=text):
|
4167
|
+
dirichlet_multinomial.logpmf(x0, [3, -1, 4], n0)
|
4168
|
+
|
4169
|
+
text = "`n` must be a non-negative integer."
|
4170
|
+
with assert_raises(ValueError, match=text):
|
4171
|
+
dirichlet_multinomial.logpmf(x0, alpha0, 49.1)
|
4172
|
+
with assert_raises(ValueError, match=text):
|
4173
|
+
dirichlet_multinomial.logpmf(x0, alpha0, -1)
|
4174
|
+
|
4175
|
+
x = np.array([1, 2, 3, 4])
|
4176
|
+
alpha = np.array([3, 4, 5])
|
4177
|
+
text = "`x` and `alpha` must be broadcastable."
|
4178
|
+
with assert_raises(ValueError, match=text):
|
4179
|
+
dirichlet_multinomial.logpmf(x, alpha, x.sum())
|
4180
|
+
|
4181
|
+
@pytest.mark.parametrize('method', ['pmf', 'logpmf'])
|
4182
|
+
def test_broadcasting_pmf(self, method):
|
4183
|
+
alpha = np.array([[3, 4, 5], [4, 5, 6], [5, 5, 7], [8, 9, 10]])
|
4184
|
+
n = np.array([[6], [7], [8]])
|
4185
|
+
x = np.array([[1, 2, 3], [2, 2, 3]]).reshape((2, 1, 1, 3))
|
4186
|
+
method = getattr(dirichlet_multinomial, method)
|
4187
|
+
res = method(x, alpha, n)
|
4188
|
+
assert res.shape == (2, 3, 4)
|
4189
|
+
for i in range(len(x)):
|
4190
|
+
for j in range(len(n)):
|
4191
|
+
for k in range(len(alpha)):
|
4192
|
+
res_ijk = res[i, j, k]
|
4193
|
+
ref = method(x[i].squeeze(), alpha[k].squeeze(), n[j].squeeze())
|
4194
|
+
assert_allclose(res_ijk, ref)
|
4195
|
+
|
4196
|
+
@pytest.mark.parametrize('method_name', ['mean', 'var', 'cov'])
|
4197
|
+
def test_broadcasting_moments(self, method_name):
|
4198
|
+
alpha = np.array([[3, 4, 5], [4, 5, 6], [5, 5, 7], [8, 9, 10]])
|
4199
|
+
n = np.array([[6], [7], [8]])
|
4200
|
+
method = getattr(dirichlet_multinomial, method_name)
|
4201
|
+
res = method(alpha, n)
|
4202
|
+
assert res.shape == (3, 4, 3) if method_name != 'cov' else (3, 4, 3, 3)
|
4203
|
+
for j in range(len(n)):
|
4204
|
+
for k in range(len(alpha)):
|
4205
|
+
res_ijk = res[j, k]
|
4206
|
+
ref = method(alpha[k].squeeze(), n[j].squeeze())
|
4207
|
+
assert_allclose(res_ijk, ref)
|
4208
|
+
|
4209
|
+
|
4210
|
+
class TestNormalInverseGamma:
|
4211
|
+
|
4212
|
+
def test_marginal_x(self):
|
4213
|
+
# According to [1], sqrt(a * lmbda / b) * (x - u) should follow a t-distribution
|
4214
|
+
# with 2*a degrees of freedom. Test that this is true of the PDF and random
|
4215
|
+
# variates.
|
4216
|
+
rng = np.random.default_rng(8925849245)
|
4217
|
+
mu, lmbda, a, b = rng.random(4)
|
4218
|
+
|
4219
|
+
norm_inv_gamma = stats.normal_inverse_gamma(mu, lmbda, a, b)
|
4220
|
+
t = stats.t(2*a, loc=mu, scale=1/np.sqrt(a * lmbda / b))
|
4221
|
+
|
4222
|
+
# Test PDF
|
4223
|
+
x = np.linspace(-5, 5, 11)
|
4224
|
+
res = tanhsinh(lambda s2, x: norm_inv_gamma.pdf(x, s2), 0, np.inf, args=(x,))
|
4225
|
+
ref = t.pdf(x)
|
4226
|
+
assert_allclose(res.integral, ref)
|
4227
|
+
|
4228
|
+
# Test RVS
|
4229
|
+
res = norm_inv_gamma.rvs(size=10000, random_state=rng)
|
4230
|
+
_, pvalue = stats.ks_1samp(res[0], t.cdf)
|
4231
|
+
assert pvalue > 0.1
|
4232
|
+
|
4233
|
+
def test_marginal_s2(self):
|
4234
|
+
# According to [1], s2 should follow an inverse gamma distribution with
|
4235
|
+
# shapes a, b (where b is the scale in our parameterization). Test that
|
4236
|
+
# this is true of the PDF and random variates.
|
4237
|
+
rng = np.random.default_rng(8925849245)
|
4238
|
+
mu, lmbda, a, b = rng.random(4)
|
4239
|
+
|
4240
|
+
norm_inv_gamma = stats.normal_inverse_gamma(mu, lmbda, a, b)
|
4241
|
+
inv_gamma = stats.invgamma(a, scale=b)
|
4242
|
+
|
4243
|
+
# Test PDF
|
4244
|
+
s2 = np.linspace(0.1, 10, 10)
|
4245
|
+
res = tanhsinh(lambda x, s2: norm_inv_gamma.pdf(x, s2),
|
4246
|
+
-np.inf, np.inf, args=(s2,))
|
4247
|
+
ref = inv_gamma.pdf(s2)
|
4248
|
+
assert_allclose(res.integral, ref)
|
4249
|
+
|
4250
|
+
# Test RVS
|
4251
|
+
res = norm_inv_gamma.rvs(size=10000, random_state=rng)
|
4252
|
+
_, pvalue = stats.ks_1samp(res[1], inv_gamma.cdf)
|
4253
|
+
assert pvalue > 0.1
|
4254
|
+
|
4255
|
+
def test_pdf_logpdf(self):
|
4256
|
+
# Check that PDF and log-PDF are consistent
|
4257
|
+
rng = np.random.default_rng(8925849245)
|
4258
|
+
mu, lmbda, a, b = rng.random((4, 20)) - 0.25 # make some invalid
|
4259
|
+
x, s2 = rng.random(size=(2, 20)) - 0.25
|
4260
|
+
res = stats.normal_inverse_gamma(mu, lmbda, a, b).pdf(x, s2)
|
4261
|
+
ref = stats.normal_inverse_gamma.logpdf(x, s2, mu, lmbda, a, b)
|
4262
|
+
assert_allclose(res, np.exp(ref))
|
4263
|
+
|
4264
|
+
def test_invalid_and_special_cases(self):
|
4265
|
+
# Test cases that are handled by input validation rather than the formulas
|
4266
|
+
rng = np.random.default_rng(8925849245)
|
4267
|
+
mu, lmbda, a, b = rng.random(4)
|
4268
|
+
x, s2 = rng.random(2)
|
4269
|
+
|
4270
|
+
res = stats.normal_inverse_gamma(np.nan, lmbda, a, b).pdf(x, s2)
|
4271
|
+
assert_equal(res, np.nan)
|
4272
|
+
|
4273
|
+
res = stats.normal_inverse_gamma(mu, -1, a, b).pdf(x, s2)
|
4274
|
+
assert_equal(res, np.nan)
|
4275
|
+
|
4276
|
+
res = stats.normal_inverse_gamma(mu, lmbda, 0, b).pdf(x, s2)
|
4277
|
+
assert_equal(res, np.nan)
|
4278
|
+
|
4279
|
+
res = stats.normal_inverse_gamma(mu, lmbda, a, -1).pdf(x, s2)
|
4280
|
+
assert_equal(res, np.nan)
|
4281
|
+
|
4282
|
+
res = stats.normal_inverse_gamma(mu, lmbda, a, b).pdf(x, -1)
|
4283
|
+
assert_equal(res, 0)
|
4284
|
+
|
4285
|
+
# PDF with out-of-support s2 is not zero if shape parameter is invalid
|
4286
|
+
res = stats.normal_inverse_gamma(mu, [-1, np.nan], a, b).pdf(x, -1)
|
4287
|
+
assert_equal(res, np.nan)
|
4288
|
+
|
4289
|
+
res = stats.normal_inverse_gamma(mu, -1, a, b).mean()
|
4290
|
+
assert_equal(res, (np.nan, np.nan))
|
4291
|
+
|
4292
|
+
res = stats.normal_inverse_gamma(mu, lmbda, -1, b).var()
|
4293
|
+
assert_equal(res, (np.nan, np.nan))
|
4294
|
+
|
4295
|
+
with pytest.raises(ValueError, match="Domain error in arguments..."):
|
4296
|
+
stats.normal_inverse_gamma(mu, lmbda, a, -1).rvs()
|
4297
|
+
|
4298
|
+
def test_broadcasting(self):
|
4299
|
+
# Test methods with broadcastable array parameters. Roughly speaking, the
|
4300
|
+
# shapes should be the broadcasted shapes of all arguments, and the raveled
|
4301
|
+
# outputs should be the same as the outputs with raveled inputs.
|
4302
|
+
rng = np.random.default_rng(8925849245)
|
4303
|
+
b = rng.random(2)
|
4304
|
+
a = rng.random((3, 1)) + 2 # for defined moments
|
4305
|
+
lmbda = rng.random((4, 1, 1))
|
4306
|
+
mu = rng.random((5, 1, 1, 1))
|
4307
|
+
s2 = rng.random((6, 1, 1, 1, 1))
|
4308
|
+
x = rng.random((7, 1, 1, 1, 1, 1))
|
4309
|
+
dist = stats.normal_inverse_gamma(mu, lmbda, a, b)
|
4310
|
+
|
4311
|
+
# Test PDF and log-PDF
|
4312
|
+
broadcasted = np.broadcast_arrays(x, s2, mu, lmbda, a, b)
|
4313
|
+
broadcasted_raveled = [np.ravel(arr) for arr in broadcasted]
|
4314
|
+
|
4315
|
+
res = dist.pdf(x, s2)
|
4316
|
+
assert res.shape == broadcasted[0].shape
|
4317
|
+
assert_allclose(res.ravel(),
|
4318
|
+
stats.normal_inverse_gamma.pdf(*broadcasted_raveled))
|
4319
|
+
|
4320
|
+
res = dist.logpdf(x, s2)
|
4321
|
+
assert res.shape == broadcasted[0].shape
|
4322
|
+
assert_allclose(res.ravel(),
|
4323
|
+
stats.normal_inverse_gamma.logpdf(*broadcasted_raveled))
|
4324
|
+
|
4325
|
+
# Test moments
|
4326
|
+
broadcasted = np.broadcast_arrays(mu, lmbda, a, b)
|
4327
|
+
broadcasted_raveled = [np.ravel(arr) for arr in broadcasted]
|
4328
|
+
|
4329
|
+
res = dist.mean()
|
4330
|
+
assert res[0].shape == broadcasted[0].shape
|
4331
|
+
assert_allclose((res[0].ravel(), res[1].ravel()),
|
4332
|
+
stats.normal_inverse_gamma.mean(*broadcasted_raveled))
|
4333
|
+
|
4334
|
+
res = dist.var()
|
4335
|
+
assert res[0].shape == broadcasted[0].shape
|
4336
|
+
assert_allclose((res[0].ravel(), res[1].ravel()),
|
4337
|
+
stats.normal_inverse_gamma.var(*broadcasted_raveled))
|
4338
|
+
|
4339
|
+
# Test RVS
|
4340
|
+
size = (6, 5, 4, 3, 2)
|
4341
|
+
rng = np.random.default_rng(2348923985324)
|
4342
|
+
res = dist.rvs(size=size, random_state=rng)
|
4343
|
+
rng = np.random.default_rng(2348923985324)
|
4344
|
+
shape = 6, 5*4*3*2
|
4345
|
+
ref = stats.normal_inverse_gamma.rvs(*broadcasted_raveled, size=shape,
|
4346
|
+
random_state=rng)
|
4347
|
+
assert_allclose((res[0].reshape(shape), res[1].reshape(shape)), ref)
|
4348
|
+
|
4349
|
+
@pytest.mark.slow
|
4350
|
+
@pytest.mark.fail_slow(10)
|
4351
|
+
def test_moments(self):
|
4352
|
+
# Test moments against quadrature
|
4353
|
+
rng = np.random.default_rng(8925849245)
|
4354
|
+
mu, lmbda, a, b = rng.random(4)
|
4355
|
+
a += 2 # ensure defined
|
4356
|
+
|
4357
|
+
dist = stats.normal_inverse_gamma(mu, lmbda, a, b)
|
4358
|
+
res = dist.mean()
|
4359
|
+
|
4360
|
+
ref = dblquad(lambda s2, x: dist.pdf(x, s2) * x, -np.inf, np.inf, 0, np.inf)
|
4361
|
+
assert_allclose(res[0], ref[0], rtol=1e-6)
|
4362
|
+
|
4363
|
+
ref = dblquad(lambda s2, x: dist.pdf(x, s2) * s2, -np.inf, np.inf, 0, np.inf)
|
4364
|
+
assert_allclose(res[1], ref[0], rtol=1e-6)
|
4365
|
+
|
4366
|
+
@pytest.mark.parametrize('dtype', [np.int32, np.float16, np.float32, np.float64])
|
4367
|
+
def test_dtype(self, dtype):
|
4368
|
+
if np.__version__ < "2":
|
4369
|
+
pytest.skip("Scalar dtypes only respected after NEP 50.")
|
4370
|
+
rng = np.random.default_rng(8925849245)
|
4371
|
+
x, s2, mu, lmbda, a, b = rng.uniform(3, 10, size=6).astype(dtype)
|
4372
|
+
dtype_out = np.result_type(1.0, dtype)
|
4373
|
+
dist = stats.normal_inverse_gamma(mu, lmbda, a, b)
|
4374
|
+
assert dist.rvs()[0].dtype == dtype_out
|
4375
|
+
assert dist.rvs()[1].dtype == dtype_out
|
4376
|
+
assert dist.mean()[0].dtype == dtype_out
|
4377
|
+
assert dist.mean()[1].dtype == dtype_out
|
4378
|
+
assert dist.var()[0].dtype == dtype_out
|
4379
|
+
assert dist.var()[1].dtype == dtype_out
|
4380
|
+
assert dist.logpdf(x, s2).dtype == dtype_out
|
4381
|
+
assert dist.pdf(x, s2).dtype == dtype_out
|