scipy 1.16.2__cp311-cp311-win_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scipy/__config__.py +161 -0
- scipy/__init__.py +150 -0
- scipy/_cyutility.cp311-win_arm64.lib +0 -0
- scipy/_cyutility.cp311-win_arm64.pyd +0 -0
- scipy/_distributor_init.py +18 -0
- scipy/_lib/__init__.py +14 -0
- scipy/_lib/_array_api.py +931 -0
- scipy/_lib/_array_api_compat_vendor.py +9 -0
- scipy/_lib/_array_api_no_0d.py +103 -0
- scipy/_lib/_bunch.py +229 -0
- scipy/_lib/_ccallback.py +251 -0
- scipy/_lib/_ccallback_c.cp311-win_arm64.lib +0 -0
- scipy/_lib/_ccallback_c.cp311-win_arm64.pyd +0 -0
- scipy/_lib/_disjoint_set.py +254 -0
- scipy/_lib/_docscrape.py +761 -0
- scipy/_lib/_elementwise_iterative_method.py +346 -0
- scipy/_lib/_fpumode.cp311-win_arm64.lib +0 -0
- scipy/_lib/_fpumode.cp311-win_arm64.pyd +0 -0
- scipy/_lib/_gcutils.py +105 -0
- scipy/_lib/_pep440.py +487 -0
- scipy/_lib/_sparse.py +41 -0
- scipy/_lib/_test_ccallback.cp311-win_arm64.lib +0 -0
- scipy/_lib/_test_ccallback.cp311-win_arm64.pyd +0 -0
- scipy/_lib/_test_deprecation_call.cp311-win_arm64.lib +0 -0
- scipy/_lib/_test_deprecation_call.cp311-win_arm64.pyd +0 -0
- scipy/_lib/_test_deprecation_def.cp311-win_arm64.lib +0 -0
- scipy/_lib/_test_deprecation_def.cp311-win_arm64.pyd +0 -0
- scipy/_lib/_testutils.py +373 -0
- scipy/_lib/_threadsafety.py +58 -0
- scipy/_lib/_tmpdirs.py +86 -0
- scipy/_lib/_uarray/LICENSE +29 -0
- scipy/_lib/_uarray/__init__.py +116 -0
- scipy/_lib/_uarray/_backend.py +707 -0
- scipy/_lib/_uarray/_uarray.cp311-win_arm64.lib +0 -0
- scipy/_lib/_uarray/_uarray.cp311-win_arm64.pyd +0 -0
- scipy/_lib/_util.py +1283 -0
- scipy/_lib/array_api_compat/__init__.py +22 -0
- scipy/_lib/array_api_compat/_internal.py +59 -0
- scipy/_lib/array_api_compat/common/__init__.py +1 -0
- scipy/_lib/array_api_compat/common/_aliases.py +727 -0
- scipy/_lib/array_api_compat/common/_fft.py +213 -0
- scipy/_lib/array_api_compat/common/_helpers.py +1058 -0
- scipy/_lib/array_api_compat/common/_linalg.py +232 -0
- scipy/_lib/array_api_compat/common/_typing.py +192 -0
- scipy/_lib/array_api_compat/cupy/__init__.py +13 -0
- scipy/_lib/array_api_compat/cupy/_aliases.py +156 -0
- scipy/_lib/array_api_compat/cupy/_info.py +336 -0
- scipy/_lib/array_api_compat/cupy/_typing.py +31 -0
- scipy/_lib/array_api_compat/cupy/fft.py +36 -0
- scipy/_lib/array_api_compat/cupy/linalg.py +49 -0
- scipy/_lib/array_api_compat/dask/__init__.py +0 -0
- scipy/_lib/array_api_compat/dask/array/__init__.py +12 -0
- scipy/_lib/array_api_compat/dask/array/_aliases.py +376 -0
- scipy/_lib/array_api_compat/dask/array/_info.py +416 -0
- scipy/_lib/array_api_compat/dask/array/fft.py +21 -0
- scipy/_lib/array_api_compat/dask/array/linalg.py +72 -0
- scipy/_lib/array_api_compat/numpy/__init__.py +28 -0
- scipy/_lib/array_api_compat/numpy/_aliases.py +190 -0
- scipy/_lib/array_api_compat/numpy/_info.py +366 -0
- scipy/_lib/array_api_compat/numpy/_typing.py +30 -0
- scipy/_lib/array_api_compat/numpy/fft.py +35 -0
- scipy/_lib/array_api_compat/numpy/linalg.py +143 -0
- scipy/_lib/array_api_compat/torch/__init__.py +22 -0
- scipy/_lib/array_api_compat/torch/_aliases.py +855 -0
- scipy/_lib/array_api_compat/torch/_info.py +369 -0
- scipy/_lib/array_api_compat/torch/_typing.py +3 -0
- scipy/_lib/array_api_compat/torch/fft.py +85 -0
- scipy/_lib/array_api_compat/torch/linalg.py +121 -0
- scipy/_lib/array_api_extra/__init__.py +38 -0
- scipy/_lib/array_api_extra/_delegation.py +171 -0
- scipy/_lib/array_api_extra/_lib/__init__.py +1 -0
- scipy/_lib/array_api_extra/_lib/_at.py +463 -0
- scipy/_lib/array_api_extra/_lib/_backends.py +46 -0
- scipy/_lib/array_api_extra/_lib/_funcs.py +937 -0
- scipy/_lib/array_api_extra/_lib/_lazy.py +357 -0
- scipy/_lib/array_api_extra/_lib/_testing.py +278 -0
- scipy/_lib/array_api_extra/_lib/_utils/__init__.py +1 -0
- scipy/_lib/array_api_extra/_lib/_utils/_compat.py +74 -0
- scipy/_lib/array_api_extra/_lib/_utils/_compat.pyi +45 -0
- scipy/_lib/array_api_extra/_lib/_utils/_helpers.py +559 -0
- scipy/_lib/array_api_extra/_lib/_utils/_typing.py +10 -0
- scipy/_lib/array_api_extra/_lib/_utils/_typing.pyi +105 -0
- scipy/_lib/array_api_extra/testing.py +359 -0
- scipy/_lib/cobyqa/__init__.py +20 -0
- scipy/_lib/cobyqa/framework.py +1240 -0
- scipy/_lib/cobyqa/main.py +1506 -0
- scipy/_lib/cobyqa/models.py +1529 -0
- scipy/_lib/cobyqa/problem.py +1296 -0
- scipy/_lib/cobyqa/settings.py +132 -0
- scipy/_lib/cobyqa/subsolvers/__init__.py +14 -0
- scipy/_lib/cobyqa/subsolvers/geometry.py +387 -0
- scipy/_lib/cobyqa/subsolvers/optim.py +1203 -0
- scipy/_lib/cobyqa/utils/__init__.py +18 -0
- scipy/_lib/cobyqa/utils/exceptions.py +22 -0
- scipy/_lib/cobyqa/utils/math.py +77 -0
- scipy/_lib/cobyqa/utils/versions.py +67 -0
- scipy/_lib/decorator.py +399 -0
- scipy/_lib/deprecation.py +274 -0
- scipy/_lib/doccer.py +366 -0
- scipy/_lib/messagestream.cp311-win_arm64.lib +0 -0
- scipy/_lib/messagestream.cp311-win_arm64.pyd +0 -0
- scipy/_lib/pyprima/__init__.py +212 -0
- scipy/_lib/pyprima/cobyla/__init__.py +0 -0
- scipy/_lib/pyprima/cobyla/cobyla.py +559 -0
- scipy/_lib/pyprima/cobyla/cobylb.py +714 -0
- scipy/_lib/pyprima/cobyla/geometry.py +226 -0
- scipy/_lib/pyprima/cobyla/initialize.py +215 -0
- scipy/_lib/pyprima/cobyla/trustregion.py +492 -0
- scipy/_lib/pyprima/cobyla/update.py +289 -0
- scipy/_lib/pyprima/common/__init__.py +0 -0
- scipy/_lib/pyprima/common/_bounds.py +34 -0
- scipy/_lib/pyprima/common/_linear_constraints.py +46 -0
- scipy/_lib/pyprima/common/_nonlinear_constraints.py +54 -0
- scipy/_lib/pyprima/common/_project.py +173 -0
- scipy/_lib/pyprima/common/checkbreak.py +93 -0
- scipy/_lib/pyprima/common/consts.py +47 -0
- scipy/_lib/pyprima/common/evaluate.py +99 -0
- scipy/_lib/pyprima/common/history.py +38 -0
- scipy/_lib/pyprima/common/infos.py +30 -0
- scipy/_lib/pyprima/common/linalg.py +435 -0
- scipy/_lib/pyprima/common/message.py +290 -0
- scipy/_lib/pyprima/common/powalg.py +131 -0
- scipy/_lib/pyprima/common/preproc.py +277 -0
- scipy/_lib/pyprima/common/present.py +5 -0
- scipy/_lib/pyprima/common/ratio.py +54 -0
- scipy/_lib/pyprima/common/redrho.py +47 -0
- scipy/_lib/pyprima/common/selectx.py +296 -0
- scipy/_lib/tests/__init__.py +0 -0
- scipy/_lib/tests/test__gcutils.py +110 -0
- scipy/_lib/tests/test__pep440.py +67 -0
- scipy/_lib/tests/test__testutils.py +32 -0
- scipy/_lib/tests/test__threadsafety.py +51 -0
- scipy/_lib/tests/test__util.py +641 -0
- scipy/_lib/tests/test_array_api.py +322 -0
- scipy/_lib/tests/test_bunch.py +169 -0
- scipy/_lib/tests/test_ccallback.py +196 -0
- scipy/_lib/tests/test_config.py +45 -0
- scipy/_lib/tests/test_deprecation.py +10 -0
- scipy/_lib/tests/test_doccer.py +143 -0
- scipy/_lib/tests/test_import_cycles.py +18 -0
- scipy/_lib/tests/test_public_api.py +482 -0
- scipy/_lib/tests/test_scipy_version.py +28 -0
- scipy/_lib/tests/test_tmpdirs.py +48 -0
- scipy/_lib/tests/test_warnings.py +137 -0
- scipy/_lib/uarray.py +31 -0
- scipy/cluster/__init__.py +31 -0
- scipy/cluster/_hierarchy.cp311-win_arm64.lib +0 -0
- scipy/cluster/_hierarchy.cp311-win_arm64.pyd +0 -0
- scipy/cluster/_optimal_leaf_ordering.cp311-win_arm64.lib +0 -0
- scipy/cluster/_optimal_leaf_ordering.cp311-win_arm64.pyd +0 -0
- scipy/cluster/_vq.cp311-win_arm64.lib +0 -0
- scipy/cluster/_vq.cp311-win_arm64.pyd +0 -0
- scipy/cluster/hierarchy.py +4348 -0
- scipy/cluster/tests/__init__.py +0 -0
- scipy/cluster/tests/hierarchy_test_data.py +145 -0
- scipy/cluster/tests/test_disjoint_set.py +202 -0
- scipy/cluster/tests/test_hierarchy.py +1238 -0
- scipy/cluster/tests/test_vq.py +434 -0
- scipy/cluster/vq.py +832 -0
- scipy/conftest.py +683 -0
- scipy/constants/__init__.py +358 -0
- scipy/constants/_codata.py +2266 -0
- scipy/constants/_constants.py +369 -0
- scipy/constants/codata.py +21 -0
- scipy/constants/constants.py +53 -0
- scipy/constants/tests/__init__.py +0 -0
- scipy/constants/tests/test_codata.py +78 -0
- scipy/constants/tests/test_constants.py +83 -0
- scipy/datasets/__init__.py +90 -0
- scipy/datasets/_download_all.py +71 -0
- scipy/datasets/_fetchers.py +225 -0
- scipy/datasets/_registry.py +26 -0
- scipy/datasets/_utils.py +81 -0
- scipy/datasets/tests/__init__.py +0 -0
- scipy/datasets/tests/test_data.py +128 -0
- scipy/differentiate/__init__.py +27 -0
- scipy/differentiate/_differentiate.py +1129 -0
- scipy/differentiate/tests/__init__.py +0 -0
- scipy/differentiate/tests/test_differentiate.py +694 -0
- scipy/fft/__init__.py +114 -0
- scipy/fft/_backend.py +196 -0
- scipy/fft/_basic.py +1650 -0
- scipy/fft/_basic_backend.py +197 -0
- scipy/fft/_debug_backends.py +22 -0
- scipy/fft/_fftlog.py +223 -0
- scipy/fft/_fftlog_backend.py +200 -0
- scipy/fft/_helper.py +348 -0
- scipy/fft/_pocketfft/LICENSE.md +25 -0
- scipy/fft/_pocketfft/__init__.py +9 -0
- scipy/fft/_pocketfft/basic.py +251 -0
- scipy/fft/_pocketfft/helper.py +249 -0
- scipy/fft/_pocketfft/pypocketfft.cp311-win_arm64.lib +0 -0
- scipy/fft/_pocketfft/pypocketfft.cp311-win_arm64.pyd +0 -0
- scipy/fft/_pocketfft/realtransforms.py +109 -0
- scipy/fft/_pocketfft/tests/__init__.py +0 -0
- scipy/fft/_pocketfft/tests/test_basic.py +1011 -0
- scipy/fft/_pocketfft/tests/test_real_transforms.py +505 -0
- scipy/fft/_realtransforms.py +706 -0
- scipy/fft/_realtransforms_backend.py +63 -0
- scipy/fft/tests/__init__.py +0 -0
- scipy/fft/tests/mock_backend.py +96 -0
- scipy/fft/tests/test_backend.py +98 -0
- scipy/fft/tests/test_basic.py +504 -0
- scipy/fft/tests/test_fftlog.py +215 -0
- scipy/fft/tests/test_helper.py +558 -0
- scipy/fft/tests/test_multithreading.py +84 -0
- scipy/fft/tests/test_real_transforms.py +247 -0
- scipy/fftpack/__init__.py +103 -0
- scipy/fftpack/_basic.py +428 -0
- scipy/fftpack/_helper.py +115 -0
- scipy/fftpack/_pseudo_diffs.py +554 -0
- scipy/fftpack/_realtransforms.py +598 -0
- scipy/fftpack/basic.py +20 -0
- scipy/fftpack/convolve.cp311-win_arm64.lib +0 -0
- scipy/fftpack/convolve.cp311-win_arm64.pyd +0 -0
- scipy/fftpack/helper.py +19 -0
- scipy/fftpack/pseudo_diffs.py +22 -0
- scipy/fftpack/realtransforms.py +19 -0
- scipy/fftpack/tests/__init__.py +0 -0
- scipy/fftpack/tests/fftw_double_ref.npz +0 -0
- scipy/fftpack/tests/fftw_longdouble_ref.npz +0 -0
- scipy/fftpack/tests/fftw_single_ref.npz +0 -0
- scipy/fftpack/tests/test.npz +0 -0
- scipy/fftpack/tests/test_basic.py +877 -0
- scipy/fftpack/tests/test_helper.py +54 -0
- scipy/fftpack/tests/test_import.py +33 -0
- scipy/fftpack/tests/test_pseudo_diffs.py +388 -0
- scipy/fftpack/tests/test_real_transforms.py +836 -0
- scipy/integrate/__init__.py +122 -0
- scipy/integrate/_bvp.py +1160 -0
- scipy/integrate/_cubature.py +729 -0
- scipy/integrate/_dop.cp311-win_arm64.lib +0 -0
- scipy/integrate/_dop.cp311-win_arm64.pyd +0 -0
- scipy/integrate/_ivp/__init__.py +8 -0
- scipy/integrate/_ivp/base.py +290 -0
- scipy/integrate/_ivp/bdf.py +478 -0
- scipy/integrate/_ivp/common.py +451 -0
- scipy/integrate/_ivp/dop853_coefficients.py +193 -0
- scipy/integrate/_ivp/ivp.py +755 -0
- scipy/integrate/_ivp/lsoda.py +224 -0
- scipy/integrate/_ivp/radau.py +572 -0
- scipy/integrate/_ivp/rk.py +601 -0
- scipy/integrate/_ivp/tests/__init__.py +0 -0
- scipy/integrate/_ivp/tests/test_ivp.py +1287 -0
- scipy/integrate/_ivp/tests/test_rk.py +37 -0
- scipy/integrate/_lebedev.py +5450 -0
- scipy/integrate/_lsoda.cp311-win_arm64.lib +0 -0
- scipy/integrate/_lsoda.cp311-win_arm64.pyd +0 -0
- scipy/integrate/_ode.py +1395 -0
- scipy/integrate/_odepack.cp311-win_arm64.lib +0 -0
- scipy/integrate/_odepack.cp311-win_arm64.pyd +0 -0
- scipy/integrate/_odepack_py.py +273 -0
- scipy/integrate/_quad_vec.py +674 -0
- scipy/integrate/_quadpack.cp311-win_arm64.lib +0 -0
- scipy/integrate/_quadpack.cp311-win_arm64.pyd +0 -0
- scipy/integrate/_quadpack_py.py +1283 -0
- scipy/integrate/_quadrature.py +1336 -0
- scipy/integrate/_rules/__init__.py +12 -0
- scipy/integrate/_rules/_base.py +518 -0
- scipy/integrate/_rules/_gauss_kronrod.py +202 -0
- scipy/integrate/_rules/_gauss_legendre.py +62 -0
- scipy/integrate/_rules/_genz_malik.py +210 -0
- scipy/integrate/_tanhsinh.py +1385 -0
- scipy/integrate/_test_multivariate.cp311-win_arm64.lib +0 -0
- scipy/integrate/_test_multivariate.cp311-win_arm64.pyd +0 -0
- scipy/integrate/_test_odeint_banded.cp311-win_arm64.lib +0 -0
- scipy/integrate/_test_odeint_banded.cp311-win_arm64.pyd +0 -0
- scipy/integrate/_vode.cp311-win_arm64.lib +0 -0
- scipy/integrate/_vode.cp311-win_arm64.pyd +0 -0
- scipy/integrate/dop.py +15 -0
- scipy/integrate/lsoda.py +15 -0
- scipy/integrate/odepack.py +17 -0
- scipy/integrate/quadpack.py +23 -0
- scipy/integrate/tests/__init__.py +0 -0
- scipy/integrate/tests/test__quad_vec.py +211 -0
- scipy/integrate/tests/test_banded_ode_solvers.py +305 -0
- scipy/integrate/tests/test_bvp.py +714 -0
- scipy/integrate/tests/test_cubature.py +1375 -0
- scipy/integrate/tests/test_integrate.py +840 -0
- scipy/integrate/tests/test_odeint_jac.py +74 -0
- scipy/integrate/tests/test_quadpack.py +680 -0
- scipy/integrate/tests/test_quadrature.py +730 -0
- scipy/integrate/tests/test_tanhsinh.py +1171 -0
- scipy/integrate/vode.py +15 -0
- scipy/interpolate/__init__.py +228 -0
- scipy/interpolate/_bary_rational.py +715 -0
- scipy/interpolate/_bsplines.py +2469 -0
- scipy/interpolate/_cubic.py +973 -0
- scipy/interpolate/_dfitpack.cp311-win_arm64.lib +0 -0
- scipy/interpolate/_dfitpack.cp311-win_arm64.pyd +0 -0
- scipy/interpolate/_dierckx.cp311-win_arm64.lib +0 -0
- scipy/interpolate/_dierckx.cp311-win_arm64.pyd +0 -0
- scipy/interpolate/_fitpack.cp311-win_arm64.lib +0 -0
- scipy/interpolate/_fitpack.cp311-win_arm64.pyd +0 -0
- scipy/interpolate/_fitpack2.py +2397 -0
- scipy/interpolate/_fitpack_impl.py +811 -0
- scipy/interpolate/_fitpack_py.py +898 -0
- scipy/interpolate/_fitpack_repro.py +996 -0
- scipy/interpolate/_interpnd.cp311-win_arm64.lib +0 -0
- scipy/interpolate/_interpnd.cp311-win_arm64.pyd +0 -0
- scipy/interpolate/_interpolate.py +2266 -0
- scipy/interpolate/_ndbspline.py +415 -0
- scipy/interpolate/_ndgriddata.py +329 -0
- scipy/interpolate/_pade.py +67 -0
- scipy/interpolate/_polyint.py +1025 -0
- scipy/interpolate/_ppoly.cp311-win_arm64.lib +0 -0
- scipy/interpolate/_ppoly.cp311-win_arm64.pyd +0 -0
- scipy/interpolate/_rbf.py +290 -0
- scipy/interpolate/_rbfinterp.py +550 -0
- scipy/interpolate/_rbfinterp_pythran.cp311-win_arm64.lib +0 -0
- scipy/interpolate/_rbfinterp_pythran.cp311-win_arm64.pyd +0 -0
- scipy/interpolate/_rgi.py +764 -0
- scipy/interpolate/_rgi_cython.cp311-win_arm64.lib +0 -0
- scipy/interpolate/_rgi_cython.cp311-win_arm64.pyd +0 -0
- scipy/interpolate/dfitpack.py +24 -0
- scipy/interpolate/fitpack.py +31 -0
- scipy/interpolate/fitpack2.py +29 -0
- scipy/interpolate/interpnd.py +24 -0
- scipy/interpolate/interpolate.py +30 -0
- scipy/interpolate/ndgriddata.py +23 -0
- scipy/interpolate/polyint.py +24 -0
- scipy/interpolate/rbf.py +18 -0
- scipy/interpolate/tests/__init__.py +0 -0
- scipy/interpolate/tests/data/bug-1310.npz +0 -0
- scipy/interpolate/tests/data/estimate_gradients_hang.npy +0 -0
- scipy/interpolate/tests/data/gcvspl.npz +0 -0
- scipy/interpolate/tests/test_bary_rational.py +368 -0
- scipy/interpolate/tests/test_bsplines.py +3754 -0
- scipy/interpolate/tests/test_fitpack.py +519 -0
- scipy/interpolate/tests/test_fitpack2.py +1431 -0
- scipy/interpolate/tests/test_gil.py +64 -0
- scipy/interpolate/tests/test_interpnd.py +452 -0
- scipy/interpolate/tests/test_interpolate.py +2630 -0
- scipy/interpolate/tests/test_ndgriddata.py +308 -0
- scipy/interpolate/tests/test_pade.py +107 -0
- scipy/interpolate/tests/test_polyint.py +972 -0
- scipy/interpolate/tests/test_rbf.py +246 -0
- scipy/interpolate/tests/test_rbfinterp.py +534 -0
- scipy/interpolate/tests/test_rgi.py +1151 -0
- scipy/io/__init__.py +116 -0
- scipy/io/_fast_matrix_market/__init__.py +600 -0
- scipy/io/_fast_matrix_market/_fmm_core.cp311-win_arm64.lib +0 -0
- scipy/io/_fast_matrix_market/_fmm_core.cp311-win_arm64.pyd +0 -0
- scipy/io/_fortran.py +354 -0
- scipy/io/_harwell_boeing/__init__.py +7 -0
- scipy/io/_harwell_boeing/_fortran_format_parser.py +316 -0
- scipy/io/_harwell_boeing/hb.py +571 -0
- scipy/io/_harwell_boeing/tests/__init__.py +0 -0
- scipy/io/_harwell_boeing/tests/test_fortran_format.py +74 -0
- scipy/io/_harwell_boeing/tests/test_hb.py +70 -0
- scipy/io/_idl.py +917 -0
- scipy/io/_mmio.py +968 -0
- scipy/io/_netcdf.py +1104 -0
- scipy/io/_test_fortran.cp311-win_arm64.lib +0 -0
- scipy/io/_test_fortran.cp311-win_arm64.pyd +0 -0
- scipy/io/arff/__init__.py +28 -0
- scipy/io/arff/_arffread.py +873 -0
- scipy/io/arff/arffread.py +19 -0
- scipy/io/arff/tests/__init__.py +0 -0
- scipy/io/arff/tests/data/iris.arff +225 -0
- scipy/io/arff/tests/data/missing.arff +8 -0
- scipy/io/arff/tests/data/nodata.arff +11 -0
- scipy/io/arff/tests/data/quoted_nominal.arff +13 -0
- scipy/io/arff/tests/data/quoted_nominal_spaces.arff +13 -0
- scipy/io/arff/tests/data/test1.arff +10 -0
- scipy/io/arff/tests/data/test10.arff +8 -0
- scipy/io/arff/tests/data/test11.arff +11 -0
- scipy/io/arff/tests/data/test2.arff +15 -0
- scipy/io/arff/tests/data/test3.arff +6 -0
- scipy/io/arff/tests/data/test4.arff +11 -0
- scipy/io/arff/tests/data/test5.arff +26 -0
- scipy/io/arff/tests/data/test6.arff +12 -0
- scipy/io/arff/tests/data/test7.arff +15 -0
- scipy/io/arff/tests/data/test8.arff +12 -0
- scipy/io/arff/tests/data/test9.arff +14 -0
- scipy/io/arff/tests/test_arffread.py +421 -0
- scipy/io/harwell_boeing.py +17 -0
- scipy/io/idl.py +17 -0
- scipy/io/matlab/__init__.py +66 -0
- scipy/io/matlab/_byteordercodes.py +75 -0
- scipy/io/matlab/_mio.py +375 -0
- scipy/io/matlab/_mio4.py +632 -0
- scipy/io/matlab/_mio5.py +901 -0
- scipy/io/matlab/_mio5_params.py +281 -0
- scipy/io/matlab/_mio5_utils.cp311-win_arm64.lib +0 -0
- scipy/io/matlab/_mio5_utils.cp311-win_arm64.pyd +0 -0
- scipy/io/matlab/_mio_utils.cp311-win_arm64.lib +0 -0
- scipy/io/matlab/_mio_utils.cp311-win_arm64.pyd +0 -0
- scipy/io/matlab/_miobase.py +435 -0
- scipy/io/matlab/_streams.cp311-win_arm64.lib +0 -0
- scipy/io/matlab/_streams.cp311-win_arm64.pyd +0 -0
- scipy/io/matlab/byteordercodes.py +17 -0
- scipy/io/matlab/mio.py +16 -0
- scipy/io/matlab/mio4.py +17 -0
- scipy/io/matlab/mio5.py +19 -0
- scipy/io/matlab/mio5_params.py +18 -0
- scipy/io/matlab/mio5_utils.py +17 -0
- scipy/io/matlab/mio_utils.py +17 -0
- scipy/io/matlab/miobase.py +16 -0
- scipy/io/matlab/streams.py +16 -0
- scipy/io/matlab/tests/__init__.py +0 -0
- scipy/io/matlab/tests/data/bad_miuint32.mat +0 -0
- scipy/io/matlab/tests/data/bad_miutf8_array_name.mat +0 -0
- scipy/io/matlab/tests/data/big_endian.mat +0 -0
- scipy/io/matlab/tests/data/broken_utf8.mat +0 -0
- scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat +0 -0
- scipy/io/matlab/tests/data/corrupted_zlib_data.mat +0 -0
- scipy/io/matlab/tests/data/debigged_m4.mat +0 -0
- scipy/io/matlab/tests/data/japanese_utf8.txt +5 -0
- scipy/io/matlab/tests/data/little_endian.mat +0 -0
- scipy/io/matlab/tests/data/logical_sparse.mat +0 -0
- scipy/io/matlab/tests/data/malformed1.mat +0 -0
- scipy/io/matlab/tests/data/miuint32_for_miint32.mat +0 -0
- scipy/io/matlab/tests/data/miutf8_array_name.mat +0 -0
- scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat +0 -0
- scipy/io/matlab/tests/data/one_by_zero_char.mat +0 -0
- scipy/io/matlab/tests/data/parabola.mat +0 -0
- scipy/io/matlab/tests/data/single_empty_string.mat +0 -0
- scipy/io/matlab/tests/data/some_functions.mat +0 -0
- scipy/io/matlab/tests/data/sqr.mat +0 -0
- scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/test_empty_struct.mat +0 -0
- scipy/io/matlab/tests/data/test_mat4_le_floats.mat +0 -0
- scipy/io/matlab/tests/data/test_skip_variable.mat +0 -0
- scipy/io/matlab/tests/data/testbool_8_WIN64.mat +0 -0
- scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsimplecell.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat +0 -0
- scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/data/testvec_4_GLNX86.mat +0 -0
- scipy/io/matlab/tests/test_byteordercodes.py +29 -0
- scipy/io/matlab/tests/test_mio.py +1399 -0
- scipy/io/matlab/tests/test_mio5_utils.py +179 -0
- scipy/io/matlab/tests/test_mio_funcs.py +51 -0
- scipy/io/matlab/tests/test_mio_utils.py +45 -0
- scipy/io/matlab/tests/test_miobase.py +32 -0
- scipy/io/matlab/tests/test_pathological.py +33 -0
- scipy/io/matlab/tests/test_streams.py +241 -0
- scipy/io/mmio.py +17 -0
- scipy/io/netcdf.py +17 -0
- scipy/io/tests/__init__.py +0 -0
- scipy/io/tests/data/Transparent Busy.ani +0 -0
- scipy/io/tests/data/array_float32_1d.sav +0 -0
- scipy/io/tests/data/array_float32_2d.sav +0 -0
- scipy/io/tests/data/array_float32_3d.sav +0 -0
- scipy/io/tests/data/array_float32_4d.sav +0 -0
- scipy/io/tests/data/array_float32_5d.sav +0 -0
- scipy/io/tests/data/array_float32_6d.sav +0 -0
- scipy/io/tests/data/array_float32_7d.sav +0 -0
- scipy/io/tests/data/array_float32_8d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_1d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_2d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_3d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_4d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_5d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_6d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_7d.sav +0 -0
- scipy/io/tests/data/array_float32_pointer_8d.sav +0 -0
- scipy/io/tests/data/example_1.nc +0 -0
- scipy/io/tests/data/example_2.nc +0 -0
- scipy/io/tests/data/example_3_maskedvals.nc +0 -0
- scipy/io/tests/data/fortran-3x3d-2i.dat +0 -0
- scipy/io/tests/data/fortran-mixed.dat +0 -0
- scipy/io/tests/data/fortran-sf8-11x1x10.dat +0 -0
- scipy/io/tests/data/fortran-sf8-15x10x22.dat +0 -0
- scipy/io/tests/data/fortran-sf8-1x1x1.dat +0 -0
- scipy/io/tests/data/fortran-sf8-1x1x5.dat +0 -0
- scipy/io/tests/data/fortran-sf8-1x1x7.dat +0 -0
- scipy/io/tests/data/fortran-sf8-1x3x5.dat +0 -0
- scipy/io/tests/data/fortran-si4-11x1x10.dat +0 -0
- scipy/io/tests/data/fortran-si4-15x10x22.dat +0 -0
- scipy/io/tests/data/fortran-si4-1x1x1.dat +0 -0
- scipy/io/tests/data/fortran-si4-1x1x5.dat +0 -0
- scipy/io/tests/data/fortran-si4-1x1x7.dat +0 -0
- scipy/io/tests/data/fortran-si4-1x3x5.dat +0 -0
- scipy/io/tests/data/invalid_pointer.sav +0 -0
- scipy/io/tests/data/null_pointer.sav +0 -0
- scipy/io/tests/data/scalar_byte.sav +0 -0
- scipy/io/tests/data/scalar_byte_descr.sav +0 -0
- scipy/io/tests/data/scalar_complex32.sav +0 -0
- scipy/io/tests/data/scalar_complex64.sav +0 -0
- scipy/io/tests/data/scalar_float32.sav +0 -0
- scipy/io/tests/data/scalar_float64.sav +0 -0
- scipy/io/tests/data/scalar_heap_pointer.sav +0 -0
- scipy/io/tests/data/scalar_int16.sav +0 -0
- scipy/io/tests/data/scalar_int32.sav +0 -0
- scipy/io/tests/data/scalar_int64.sav +0 -0
- scipy/io/tests/data/scalar_string.sav +0 -0
- scipy/io/tests/data/scalar_uint16.sav +0 -0
- scipy/io/tests/data/scalar_uint32.sav +0 -0
- scipy/io/tests/data/scalar_uint64.sav +0 -0
- scipy/io/tests/data/struct_arrays.sav +0 -0
- scipy/io/tests/data/struct_arrays_byte_idl80.sav +0 -0
- scipy/io/tests/data/struct_arrays_replicated.sav +0 -0
- scipy/io/tests/data/struct_arrays_replicated_3d.sav +0 -0
- scipy/io/tests/data/struct_inherit.sav +0 -0
- scipy/io/tests/data/struct_pointer_arrays.sav +0 -0
- scipy/io/tests/data/struct_pointer_arrays_replicated.sav +0 -0
- scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav +0 -0
- scipy/io/tests/data/struct_pointers.sav +0 -0
- scipy/io/tests/data/struct_pointers_replicated.sav +0 -0
- scipy/io/tests/data/struct_pointers_replicated_3d.sav +0 -0
- scipy/io/tests/data/struct_scalars.sav +0 -0
- scipy/io/tests/data/struct_scalars_replicated.sav +0 -0
- scipy/io/tests/data/struct_scalars_replicated_3d.sav +0 -0
- scipy/io/tests/data/test-1234Hz-le-1ch-10S-20bit-extra.wav +0 -0
- scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav +0 -0
- scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav +0 -0
- scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-rf64.wav +0 -0
- scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav +0 -0
- scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav +0 -0
- scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-rf64.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav +0 -0
- scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav +0 -0
- scipy/io/tests/data/various_compressed.sav +0 -0
- scipy/io/tests/test_fortran.py +264 -0
- scipy/io/tests/test_idl.py +483 -0
- scipy/io/tests/test_mmio.py +831 -0
- scipy/io/tests/test_netcdf.py +550 -0
- scipy/io/tests/test_paths.py +93 -0
- scipy/io/tests/test_wavfile.py +501 -0
- scipy/io/wavfile.py +938 -0
- scipy/linalg/__init__.pxd +1 -0
- scipy/linalg/__init__.py +236 -0
- scipy/linalg/_basic.py +2146 -0
- scipy/linalg/_blas_subroutines.h +164 -0
- scipy/linalg/_cythonized_array_utils.cp311-win_arm64.lib +0 -0
- scipy/linalg/_cythonized_array_utils.cp311-win_arm64.pyd +0 -0
- scipy/linalg/_cythonized_array_utils.pxd +40 -0
- scipy/linalg/_cythonized_array_utils.pyi +16 -0
- scipy/linalg/_decomp.py +1645 -0
- scipy/linalg/_decomp_cholesky.py +413 -0
- scipy/linalg/_decomp_cossin.py +236 -0
- scipy/linalg/_decomp_interpolative.cp311-win_arm64.lib +0 -0
- scipy/linalg/_decomp_interpolative.cp311-win_arm64.pyd +0 -0
- scipy/linalg/_decomp_ldl.py +356 -0
- scipy/linalg/_decomp_lu.py +401 -0
- scipy/linalg/_decomp_lu_cython.cp311-win_arm64.lib +0 -0
- scipy/linalg/_decomp_lu_cython.cp311-win_arm64.pyd +0 -0
- scipy/linalg/_decomp_lu_cython.pyi +6 -0
- scipy/linalg/_decomp_polar.py +113 -0
- scipy/linalg/_decomp_qr.py +494 -0
- scipy/linalg/_decomp_qz.py +452 -0
- scipy/linalg/_decomp_schur.py +336 -0
- scipy/linalg/_decomp_svd.py +545 -0
- scipy/linalg/_decomp_update.cp311-win_arm64.lib +0 -0
- scipy/linalg/_decomp_update.cp311-win_arm64.pyd +0 -0
- scipy/linalg/_expm_frechet.py +417 -0
- scipy/linalg/_fblas.cp311-win_arm64.lib +0 -0
- scipy/linalg/_fblas.cp311-win_arm64.pyd +0 -0
- scipy/linalg/_flapack.cp311-win_arm64.lib +0 -0
- scipy/linalg/_flapack.cp311-win_arm64.pyd +0 -0
- scipy/linalg/_lapack_subroutines.h +1521 -0
- scipy/linalg/_linalg_pythran.cp311-win_arm64.lib +0 -0
- scipy/linalg/_linalg_pythran.cp311-win_arm64.pyd +0 -0
- scipy/linalg/_matfuncs.py +1050 -0
- scipy/linalg/_matfuncs_expm.cp311-win_arm64.lib +0 -0
- scipy/linalg/_matfuncs_expm.cp311-win_arm64.pyd +0 -0
- scipy/linalg/_matfuncs_expm.pyi +6 -0
- scipy/linalg/_matfuncs_inv_ssq.py +886 -0
- scipy/linalg/_matfuncs_schur_sqrtm.cp311-win_arm64.lib +0 -0
- scipy/linalg/_matfuncs_schur_sqrtm.cp311-win_arm64.pyd +0 -0
- scipy/linalg/_matfuncs_sqrtm.py +107 -0
- scipy/linalg/_matfuncs_sqrtm_triu.cp311-win_arm64.lib +0 -0
- scipy/linalg/_matfuncs_sqrtm_triu.cp311-win_arm64.pyd +0 -0
- scipy/linalg/_misc.py +191 -0
- scipy/linalg/_procrustes.py +113 -0
- scipy/linalg/_sketches.py +189 -0
- scipy/linalg/_solve_toeplitz.cp311-win_arm64.lib +0 -0
- scipy/linalg/_solve_toeplitz.cp311-win_arm64.pyd +0 -0
- scipy/linalg/_solvers.py +862 -0
- scipy/linalg/_special_matrices.py +1322 -0
- scipy/linalg/_testutils.py +65 -0
- scipy/linalg/basic.py +23 -0
- scipy/linalg/blas.py +495 -0
- scipy/linalg/cython_blas.cp311-win_arm64.lib +0 -0
- scipy/linalg/cython_blas.cp311-win_arm64.pyd +0 -0
- scipy/linalg/cython_blas.pxd +169 -0
- scipy/linalg/cython_blas.pyx +1432 -0
- scipy/linalg/cython_lapack.cp311-win_arm64.lib +0 -0
- scipy/linalg/cython_lapack.cp311-win_arm64.pyd +0 -0
- scipy/linalg/cython_lapack.pxd +1528 -0
- scipy/linalg/cython_lapack.pyx +12045 -0
- scipy/linalg/decomp.py +23 -0
- scipy/linalg/decomp_cholesky.py +21 -0
- scipy/linalg/decomp_lu.py +21 -0
- scipy/linalg/decomp_qr.py +20 -0
- scipy/linalg/decomp_schur.py +21 -0
- scipy/linalg/decomp_svd.py +21 -0
- scipy/linalg/interpolative.py +989 -0
- scipy/linalg/lapack.py +1081 -0
- scipy/linalg/matfuncs.py +23 -0
- scipy/linalg/misc.py +21 -0
- scipy/linalg/special_matrices.py +22 -0
- scipy/linalg/tests/__init__.py +0 -0
- scipy/linalg/tests/_cython_examples/extending.pyx +23 -0
- scipy/linalg/tests/_cython_examples/meson.build +34 -0
- scipy/linalg/tests/data/carex_15_data.npz +0 -0
- scipy/linalg/tests/data/carex_18_data.npz +0 -0
- scipy/linalg/tests/data/carex_19_data.npz +0 -0
- scipy/linalg/tests/data/carex_20_data.npz +0 -0
- scipy/linalg/tests/data/carex_6_data.npz +0 -0
- scipy/linalg/tests/data/gendare_20170120_data.npz +0 -0
- scipy/linalg/tests/test_basic.py +2074 -0
- scipy/linalg/tests/test_batch.py +588 -0
- scipy/linalg/tests/test_blas.py +1127 -0
- scipy/linalg/tests/test_cython_blas.py +118 -0
- scipy/linalg/tests/test_cython_lapack.py +22 -0
- scipy/linalg/tests/test_cythonized_array_utils.py +130 -0
- scipy/linalg/tests/test_decomp.py +3189 -0
- scipy/linalg/tests/test_decomp_cholesky.py +268 -0
- scipy/linalg/tests/test_decomp_cossin.py +314 -0
- scipy/linalg/tests/test_decomp_ldl.py +137 -0
- scipy/linalg/tests/test_decomp_lu.py +308 -0
- scipy/linalg/tests/test_decomp_polar.py +110 -0
- scipy/linalg/tests/test_decomp_update.py +1701 -0
- scipy/linalg/tests/test_extending.py +46 -0
- scipy/linalg/tests/test_fblas.py +607 -0
- scipy/linalg/tests/test_interpolative.py +232 -0
- scipy/linalg/tests/test_lapack.py +3620 -0
- scipy/linalg/tests/test_matfuncs.py +1125 -0
- scipy/linalg/tests/test_matmul_toeplitz.py +136 -0
- scipy/linalg/tests/test_procrustes.py +214 -0
- scipy/linalg/tests/test_sketches.py +118 -0
- scipy/linalg/tests/test_solve_toeplitz.py +150 -0
- scipy/linalg/tests/test_solvers.py +844 -0
- scipy/linalg/tests/test_special_matrices.py +636 -0
- scipy/misc/__init__.py +6 -0
- scipy/misc/common.py +6 -0
- scipy/misc/doccer.py +6 -0
- scipy/ndimage/__init__.py +174 -0
- scipy/ndimage/_ctest.cp311-win_arm64.lib +0 -0
- scipy/ndimage/_ctest.cp311-win_arm64.pyd +0 -0
- scipy/ndimage/_cytest.cp311-win_arm64.lib +0 -0
- scipy/ndimage/_cytest.cp311-win_arm64.pyd +0 -0
- scipy/ndimage/_delegators.py +303 -0
- scipy/ndimage/_filters.py +2422 -0
- scipy/ndimage/_fourier.py +306 -0
- scipy/ndimage/_interpolation.py +1033 -0
- scipy/ndimage/_measurements.py +1689 -0
- scipy/ndimage/_morphology.py +2634 -0
- scipy/ndimage/_nd_image.cp311-win_arm64.lib +0 -0
- scipy/ndimage/_nd_image.cp311-win_arm64.pyd +0 -0
- scipy/ndimage/_ndimage_api.py +16 -0
- scipy/ndimage/_ni_docstrings.py +214 -0
- scipy/ndimage/_ni_label.cp311-win_arm64.lib +0 -0
- scipy/ndimage/_ni_label.cp311-win_arm64.pyd +0 -0
- scipy/ndimage/_ni_support.py +139 -0
- scipy/ndimage/_rank_filter_1d.cp311-win_arm64.lib +0 -0
- scipy/ndimage/_rank_filter_1d.cp311-win_arm64.pyd +0 -0
- scipy/ndimage/_support_alternative_backends.py +84 -0
- scipy/ndimage/filters.py +27 -0
- scipy/ndimage/fourier.py +21 -0
- scipy/ndimage/interpolation.py +22 -0
- scipy/ndimage/measurements.py +24 -0
- scipy/ndimage/morphology.py +27 -0
- scipy/ndimage/tests/__init__.py +12 -0
- scipy/ndimage/tests/data/label_inputs.txt +21 -0
- scipy/ndimage/tests/data/label_results.txt +294 -0
- scipy/ndimage/tests/data/label_strels.txt +42 -0
- scipy/ndimage/tests/dots.png +0 -0
- scipy/ndimage/tests/test_c_api.py +102 -0
- scipy/ndimage/tests/test_datatypes.py +67 -0
- scipy/ndimage/tests/test_filters.py +3083 -0
- scipy/ndimage/tests/test_fourier.py +187 -0
- scipy/ndimage/tests/test_interpolation.py +1491 -0
- scipy/ndimage/tests/test_measurements.py +1592 -0
- scipy/ndimage/tests/test_morphology.py +2950 -0
- scipy/ndimage/tests/test_ni_support.py +78 -0
- scipy/ndimage/tests/test_splines.py +70 -0
- scipy/odr/__init__.py +131 -0
- scipy/odr/__odrpack.cp311-win_arm64.lib +0 -0
- scipy/odr/__odrpack.cp311-win_arm64.pyd +0 -0
- scipy/odr/_add_newdocs.py +34 -0
- scipy/odr/_models.py +315 -0
- scipy/odr/_odrpack.py +1154 -0
- scipy/odr/models.py +20 -0
- scipy/odr/odrpack.py +21 -0
- scipy/odr/tests/__init__.py +0 -0
- scipy/odr/tests/test_odr.py +607 -0
- scipy/optimize/__init__.pxd +1 -0
- scipy/optimize/__init__.py +460 -0
- scipy/optimize/_basinhopping.py +741 -0
- scipy/optimize/_bglu_dense.cp311-win_arm64.lib +0 -0
- scipy/optimize/_bglu_dense.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_bracket.py +706 -0
- scipy/optimize/_chandrupatla.py +551 -0
- scipy/optimize/_cobyla_py.py +297 -0
- scipy/optimize/_cobyqa_py.py +72 -0
- scipy/optimize/_constraints.py +598 -0
- scipy/optimize/_dcsrch.py +728 -0
- scipy/optimize/_differentiable_functions.py +835 -0
- scipy/optimize/_differentialevolution.py +1970 -0
- scipy/optimize/_direct.cp311-win_arm64.lib +0 -0
- scipy/optimize/_direct.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_direct_py.py +280 -0
- scipy/optimize/_dual_annealing.py +732 -0
- scipy/optimize/_elementwise.py +798 -0
- scipy/optimize/_group_columns.cp311-win_arm64.lib +0 -0
- scipy/optimize/_group_columns.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_hessian_update_strategy.py +479 -0
- scipy/optimize/_highspy/__init__.py +0 -0
- scipy/optimize/_highspy/_core.cp311-win_arm64.lib +0 -0
- scipy/optimize/_highspy/_core.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_highspy/_highs_options.cp311-win_arm64.lib +0 -0
- scipy/optimize/_highspy/_highs_options.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_highspy/_highs_wrapper.py +338 -0
- scipy/optimize/_isotonic.py +157 -0
- scipy/optimize/_lbfgsb.cp311-win_arm64.lib +0 -0
- scipy/optimize/_lbfgsb.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_lbfgsb_py.py +634 -0
- scipy/optimize/_linesearch.py +896 -0
- scipy/optimize/_linprog.py +733 -0
- scipy/optimize/_linprog_doc.py +1434 -0
- scipy/optimize/_linprog_highs.py +422 -0
- scipy/optimize/_linprog_ip.py +1141 -0
- scipy/optimize/_linprog_rs.py +572 -0
- scipy/optimize/_linprog_simplex.py +663 -0
- scipy/optimize/_linprog_util.py +1521 -0
- scipy/optimize/_lsap.cp311-win_arm64.lib +0 -0
- scipy/optimize/_lsap.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_lsq/__init__.py +5 -0
- scipy/optimize/_lsq/bvls.py +183 -0
- scipy/optimize/_lsq/common.py +731 -0
- scipy/optimize/_lsq/dogbox.py +345 -0
- scipy/optimize/_lsq/givens_elimination.cp311-win_arm64.lib +0 -0
- scipy/optimize/_lsq/givens_elimination.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_lsq/least_squares.py +1044 -0
- scipy/optimize/_lsq/lsq_linear.py +361 -0
- scipy/optimize/_lsq/trf.py +587 -0
- scipy/optimize/_lsq/trf_linear.py +249 -0
- scipy/optimize/_milp.py +394 -0
- scipy/optimize/_minimize.py +1199 -0
- scipy/optimize/_minpack.cp311-win_arm64.lib +0 -0
- scipy/optimize/_minpack.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_minpack_py.py +1178 -0
- scipy/optimize/_moduleTNC.cp311-win_arm64.lib +0 -0
- scipy/optimize/_moduleTNC.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_nnls.py +96 -0
- scipy/optimize/_nonlin.py +1634 -0
- scipy/optimize/_numdiff.py +963 -0
- scipy/optimize/_optimize.py +4169 -0
- scipy/optimize/_pava_pybind.cp311-win_arm64.lib +0 -0
- scipy/optimize/_pava_pybind.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_qap.py +760 -0
- scipy/optimize/_remove_redundancy.py +522 -0
- scipy/optimize/_root.py +732 -0
- scipy/optimize/_root_scalar.py +538 -0
- scipy/optimize/_shgo.py +1606 -0
- scipy/optimize/_shgo_lib/__init__.py +0 -0
- scipy/optimize/_shgo_lib/_complex.py +1225 -0
- scipy/optimize/_shgo_lib/_vertex.py +460 -0
- scipy/optimize/_slsqp_py.py +603 -0
- scipy/optimize/_slsqplib.cp311-win_arm64.lib +0 -0
- scipy/optimize/_slsqplib.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_spectral.py +260 -0
- scipy/optimize/_tnc.py +438 -0
- scipy/optimize/_trlib/__init__.py +12 -0
- scipy/optimize/_trlib/_trlib.cp311-win_arm64.lib +0 -0
- scipy/optimize/_trlib/_trlib.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_trustregion.py +318 -0
- scipy/optimize/_trustregion_constr/__init__.py +6 -0
- scipy/optimize/_trustregion_constr/canonical_constraint.py +390 -0
- scipy/optimize/_trustregion_constr/equality_constrained_sqp.py +231 -0
- scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py +584 -0
- scipy/optimize/_trustregion_constr/projections.py +411 -0
- scipy/optimize/_trustregion_constr/qp_subproblem.py +637 -0
- scipy/optimize/_trustregion_constr/report.py +49 -0
- scipy/optimize/_trustregion_constr/tests/__init__.py +0 -0
- scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py +296 -0
- scipy/optimize/_trustregion_constr/tests/test_nested_minimize.py +39 -0
- scipy/optimize/_trustregion_constr/tests/test_projections.py +214 -0
- scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py +645 -0
- scipy/optimize/_trustregion_constr/tests/test_report.py +34 -0
- scipy/optimize/_trustregion_constr/tr_interior_point.py +361 -0
- scipy/optimize/_trustregion_dogleg.py +122 -0
- scipy/optimize/_trustregion_exact.py +437 -0
- scipy/optimize/_trustregion_krylov.py +65 -0
- scipy/optimize/_trustregion_ncg.py +126 -0
- scipy/optimize/_tstutils.py +972 -0
- scipy/optimize/_zeros.cp311-win_arm64.lib +0 -0
- scipy/optimize/_zeros.cp311-win_arm64.pyd +0 -0
- scipy/optimize/_zeros_py.py +1475 -0
- scipy/optimize/cobyla.py +19 -0
- scipy/optimize/cython_optimize/__init__.py +133 -0
- scipy/optimize/cython_optimize/_zeros.cp311-win_arm64.lib +0 -0
- scipy/optimize/cython_optimize/_zeros.cp311-win_arm64.pyd +0 -0
- scipy/optimize/cython_optimize/_zeros.pxd +33 -0
- scipy/optimize/cython_optimize/c_zeros.pxd +26 -0
- scipy/optimize/cython_optimize.pxd +11 -0
- scipy/optimize/elementwise.py +38 -0
- scipy/optimize/lbfgsb.py +23 -0
- scipy/optimize/linesearch.py +18 -0
- scipy/optimize/minpack.py +27 -0
- scipy/optimize/minpack2.py +17 -0
- scipy/optimize/moduleTNC.py +19 -0
- scipy/optimize/nonlin.py +29 -0
- scipy/optimize/optimize.py +40 -0
- scipy/optimize/slsqp.py +22 -0
- scipy/optimize/tests/__init__.py +0 -0
- scipy/optimize/tests/_cython_examples/extending.pyx +43 -0
- scipy/optimize/tests/_cython_examples/meson.build +32 -0
- scipy/optimize/tests/test__basinhopping.py +535 -0
- scipy/optimize/tests/test__differential_evolution.py +1703 -0
- scipy/optimize/tests/test__dual_annealing.py +416 -0
- scipy/optimize/tests/test__linprog_clean_inputs.py +312 -0
- scipy/optimize/tests/test__numdiff.py +885 -0
- scipy/optimize/tests/test__remove_redundancy.py +228 -0
- scipy/optimize/tests/test__root.py +124 -0
- scipy/optimize/tests/test__shgo.py +1164 -0
- scipy/optimize/tests/test__spectral.py +226 -0
- scipy/optimize/tests/test_bracket.py +896 -0
- scipy/optimize/tests/test_chandrupatla.py +982 -0
- scipy/optimize/tests/test_cobyla.py +195 -0
- scipy/optimize/tests/test_cobyqa.py +252 -0
- scipy/optimize/tests/test_constraint_conversion.py +286 -0
- scipy/optimize/tests/test_constraints.py +255 -0
- scipy/optimize/tests/test_cython_optimize.py +92 -0
- scipy/optimize/tests/test_differentiable_functions.py +1025 -0
- scipy/optimize/tests/test_direct.py +321 -0
- scipy/optimize/tests/test_extending.py +28 -0
- scipy/optimize/tests/test_hessian_update_strategy.py +300 -0
- scipy/optimize/tests/test_isotonic_regression.py +167 -0
- scipy/optimize/tests/test_lbfgsb_hessinv.py +65 -0
- scipy/optimize/tests/test_lbfgsb_setulb.py +122 -0
- scipy/optimize/tests/test_least_squares.py +986 -0
- scipy/optimize/tests/test_linear_assignment.py +116 -0
- scipy/optimize/tests/test_linesearch.py +328 -0
- scipy/optimize/tests/test_linprog.py +2577 -0
- scipy/optimize/tests/test_lsq_common.py +297 -0
- scipy/optimize/tests/test_lsq_linear.py +287 -0
- scipy/optimize/tests/test_milp.py +459 -0
- scipy/optimize/tests/test_minimize_constrained.py +845 -0
- scipy/optimize/tests/test_minpack.py +1194 -0
- scipy/optimize/tests/test_nnls.py +469 -0
- scipy/optimize/tests/test_nonlin.py +572 -0
- scipy/optimize/tests/test_optimize.py +3344 -0
- scipy/optimize/tests/test_quadratic_assignment.py +455 -0
- scipy/optimize/tests/test_regression.py +40 -0
- scipy/optimize/tests/test_slsqp.py +645 -0
- scipy/optimize/tests/test_tnc.py +345 -0
- scipy/optimize/tests/test_trustregion.py +110 -0
- scipy/optimize/tests/test_trustregion_exact.py +351 -0
- scipy/optimize/tests/test_trustregion_krylov.py +170 -0
- scipy/optimize/tests/test_zeros.py +998 -0
- scipy/optimize/tnc.py +22 -0
- scipy/optimize/zeros.py +26 -0
- scipy/signal/__init__.py +316 -0
- scipy/signal/_arraytools.py +264 -0
- scipy/signal/_czt.py +575 -0
- scipy/signal/_delegators.py +568 -0
- scipy/signal/_filter_design.py +5893 -0
- scipy/signal/_fir_filter_design.py +1458 -0
- scipy/signal/_lti_conversion.py +534 -0
- scipy/signal/_ltisys.py +3546 -0
- scipy/signal/_max_len_seq.py +139 -0
- scipy/signal/_max_len_seq_inner.cp311-win_arm64.lib +0 -0
- scipy/signal/_max_len_seq_inner.cp311-win_arm64.pyd +0 -0
- scipy/signal/_peak_finding.py +1310 -0
- scipy/signal/_peak_finding_utils.cp311-win_arm64.lib +0 -0
- scipy/signal/_peak_finding_utils.cp311-win_arm64.pyd +0 -0
- scipy/signal/_polyutils.py +172 -0
- scipy/signal/_savitzky_golay.py +357 -0
- scipy/signal/_short_time_fft.py +2228 -0
- scipy/signal/_signal_api.py +30 -0
- scipy/signal/_signaltools.py +5309 -0
- scipy/signal/_sigtools.cp311-win_arm64.lib +0 -0
- scipy/signal/_sigtools.cp311-win_arm64.pyd +0 -0
- scipy/signal/_sosfilt.cp311-win_arm64.lib +0 -0
- scipy/signal/_sosfilt.cp311-win_arm64.pyd +0 -0
- scipy/signal/_spectral_py.py +2471 -0
- scipy/signal/_spline.cp311-win_arm64.lib +0 -0
- scipy/signal/_spline.cp311-win_arm64.pyd +0 -0
- scipy/signal/_spline.pyi +34 -0
- scipy/signal/_spline_filters.py +848 -0
- scipy/signal/_support_alternative_backends.py +73 -0
- scipy/signal/_upfirdn.py +219 -0
- scipy/signal/_upfirdn_apply.cp311-win_arm64.lib +0 -0
- scipy/signal/_upfirdn_apply.cp311-win_arm64.pyd +0 -0
- scipy/signal/_waveforms.py +687 -0
- scipy/signal/_wavelets.py +29 -0
- scipy/signal/bsplines.py +21 -0
- scipy/signal/filter_design.py +28 -0
- scipy/signal/fir_filter_design.py +21 -0
- scipy/signal/lti_conversion.py +20 -0
- scipy/signal/ltisys.py +25 -0
- scipy/signal/signaltools.py +27 -0
- scipy/signal/spectral.py +21 -0
- scipy/signal/spline.py +18 -0
- scipy/signal/tests/__init__.py +0 -0
- scipy/signal/tests/_scipy_spectral_test_shim.py +311 -0
- scipy/signal/tests/mpsig.py +122 -0
- scipy/signal/tests/test_array_tools.py +111 -0
- scipy/signal/tests/test_bsplines.py +365 -0
- scipy/signal/tests/test_cont2discrete.py +424 -0
- scipy/signal/tests/test_czt.py +221 -0
- scipy/signal/tests/test_dltisys.py +599 -0
- scipy/signal/tests/test_filter_design.py +4744 -0
- scipy/signal/tests/test_fir_filter_design.py +851 -0
- scipy/signal/tests/test_ltisys.py +1225 -0
- scipy/signal/tests/test_max_len_seq.py +71 -0
- scipy/signal/tests/test_peak_finding.py +915 -0
- scipy/signal/tests/test_result_type.py +51 -0
- scipy/signal/tests/test_savitzky_golay.py +363 -0
- scipy/signal/tests/test_short_time_fft.py +1107 -0
- scipy/signal/tests/test_signaltools.py +4735 -0
- scipy/signal/tests/test_spectral.py +2141 -0
- scipy/signal/tests/test_splines.py +427 -0
- scipy/signal/tests/test_upfirdn.py +322 -0
- scipy/signal/tests/test_waveforms.py +400 -0
- scipy/signal/tests/test_wavelets.py +59 -0
- scipy/signal/tests/test_windows.py +987 -0
- scipy/signal/waveforms.py +20 -0
- scipy/signal/wavelets.py +17 -0
- scipy/signal/windows/__init__.py +52 -0
- scipy/signal/windows/_windows.py +2513 -0
- scipy/signal/windows/windows.py +23 -0
- scipy/sparse/__init__.py +350 -0
- scipy/sparse/_base.py +1613 -0
- scipy/sparse/_bsr.py +880 -0
- scipy/sparse/_compressed.py +1328 -0
- scipy/sparse/_construct.py +1454 -0
- scipy/sparse/_coo.py +1581 -0
- scipy/sparse/_csc.py +367 -0
- scipy/sparse/_csparsetools.cp311-win_arm64.lib +0 -0
- scipy/sparse/_csparsetools.cp311-win_arm64.pyd +0 -0
- scipy/sparse/_csr.py +558 -0
- scipy/sparse/_data.py +569 -0
- scipy/sparse/_dia.py +677 -0
- scipy/sparse/_dok.py +669 -0
- scipy/sparse/_extract.py +178 -0
- scipy/sparse/_index.py +444 -0
- scipy/sparse/_lil.py +632 -0
- scipy/sparse/_matrix.py +169 -0
- scipy/sparse/_matrix_io.py +167 -0
- scipy/sparse/_sparsetools.cp311-win_arm64.lib +0 -0
- scipy/sparse/_sparsetools.cp311-win_arm64.pyd +0 -0
- scipy/sparse/_spfuncs.py +76 -0
- scipy/sparse/_sputils.py +632 -0
- scipy/sparse/base.py +24 -0
- scipy/sparse/bsr.py +22 -0
- scipy/sparse/compressed.py +20 -0
- scipy/sparse/construct.py +38 -0
- scipy/sparse/coo.py +23 -0
- scipy/sparse/csc.py +22 -0
- scipy/sparse/csgraph/__init__.py +210 -0
- scipy/sparse/csgraph/_flow.cp311-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_flow.cp311-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_laplacian.py +563 -0
- scipy/sparse/csgraph/_matching.cp311-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_matching.cp311-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_min_spanning_tree.cp311-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_min_spanning_tree.cp311-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_reordering.cp311-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_reordering.cp311-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_shortest_path.cp311-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_shortest_path.cp311-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_tools.cp311-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_tools.cp311-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_traversal.cp311-win_arm64.lib +0 -0
- scipy/sparse/csgraph/_traversal.cp311-win_arm64.pyd +0 -0
- scipy/sparse/csgraph/_validation.py +66 -0
- scipy/sparse/csgraph/tests/__init__.py +0 -0
- scipy/sparse/csgraph/tests/test_connected_components.py +119 -0
- scipy/sparse/csgraph/tests/test_conversions.py +61 -0
- scipy/sparse/csgraph/tests/test_flow.py +209 -0
- scipy/sparse/csgraph/tests/test_graph_laplacian.py +368 -0
- scipy/sparse/csgraph/tests/test_matching.py +307 -0
- scipy/sparse/csgraph/tests/test_pydata_sparse.py +197 -0
- scipy/sparse/csgraph/tests/test_reordering.py +70 -0
- scipy/sparse/csgraph/tests/test_shortest_path.py +540 -0
- scipy/sparse/csgraph/tests/test_spanning_tree.py +66 -0
- scipy/sparse/csgraph/tests/test_traversal.py +148 -0
- scipy/sparse/csr.py +22 -0
- scipy/sparse/data.py +18 -0
- scipy/sparse/dia.py +22 -0
- scipy/sparse/dok.py +22 -0
- scipy/sparse/extract.py +23 -0
- scipy/sparse/lil.py +22 -0
- scipy/sparse/linalg/__init__.py +148 -0
- scipy/sparse/linalg/_dsolve/__init__.py +71 -0
- scipy/sparse/linalg/_dsolve/_add_newdocs.py +147 -0
- scipy/sparse/linalg/_dsolve/_superlu.cp311-win_arm64.lib +0 -0
- scipy/sparse/linalg/_dsolve/_superlu.cp311-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_dsolve/linsolve.py +882 -0
- scipy/sparse/linalg/_dsolve/tests/__init__.py +0 -0
- scipy/sparse/linalg/_dsolve/tests/test_linsolve.py +928 -0
- scipy/sparse/linalg/_eigen/__init__.py +22 -0
- scipy/sparse/linalg/_eigen/_svds.py +540 -0
- scipy/sparse/linalg/_eigen/_svds_doc.py +382 -0
- scipy/sparse/linalg/_eigen/arpack/COPYING +45 -0
- scipy/sparse/linalg/_eigen/arpack/__init__.py +20 -0
- scipy/sparse/linalg/_eigen/arpack/_arpack.cp311-win_arm64.lib +0 -0
- scipy/sparse/linalg/_eigen/arpack/_arpack.cp311-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_eigen/arpack/arpack.py +1706 -0
- scipy/sparse/linalg/_eigen/arpack/tests/__init__.py +0 -0
- scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py +717 -0
- scipy/sparse/linalg/_eigen/lobpcg/__init__.py +16 -0
- scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py +1110 -0
- scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py +0 -0
- scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py +725 -0
- scipy/sparse/linalg/_eigen/tests/__init__.py +0 -0
- scipy/sparse/linalg/_eigen/tests/test_svds.py +886 -0
- scipy/sparse/linalg/_expm_multiply.py +816 -0
- scipy/sparse/linalg/_interface.py +920 -0
- scipy/sparse/linalg/_isolve/__init__.py +20 -0
- scipy/sparse/linalg/_isolve/_gcrotmk.py +503 -0
- scipy/sparse/linalg/_isolve/iterative.py +1051 -0
- scipy/sparse/linalg/_isolve/lgmres.py +230 -0
- scipy/sparse/linalg/_isolve/lsmr.py +486 -0
- scipy/sparse/linalg/_isolve/lsqr.py +589 -0
- scipy/sparse/linalg/_isolve/minres.py +372 -0
- scipy/sparse/linalg/_isolve/tests/__init__.py +0 -0
- scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py +183 -0
- scipy/sparse/linalg/_isolve/tests/test_iterative.py +809 -0
- scipy/sparse/linalg/_isolve/tests/test_lgmres.py +225 -0
- scipy/sparse/linalg/_isolve/tests/test_lsmr.py +185 -0
- scipy/sparse/linalg/_isolve/tests/test_lsqr.py +120 -0
- scipy/sparse/linalg/_isolve/tests/test_minres.py +97 -0
- scipy/sparse/linalg/_isolve/tests/test_utils.py +9 -0
- scipy/sparse/linalg/_isolve/tfqmr.py +179 -0
- scipy/sparse/linalg/_isolve/utils.py +121 -0
- scipy/sparse/linalg/_matfuncs.py +940 -0
- scipy/sparse/linalg/_norm.py +195 -0
- scipy/sparse/linalg/_onenormest.py +467 -0
- scipy/sparse/linalg/_propack/_cpropack.cp311-win_arm64.lib +0 -0
- scipy/sparse/linalg/_propack/_cpropack.cp311-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_propack/_dpropack.cp311-win_arm64.lib +0 -0
- scipy/sparse/linalg/_propack/_dpropack.cp311-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_propack/_spropack.cp311-win_arm64.lib +0 -0
- scipy/sparse/linalg/_propack/_spropack.cp311-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_propack/_zpropack.cp311-win_arm64.lib +0 -0
- scipy/sparse/linalg/_propack/_zpropack.cp311-win_arm64.pyd +0 -0
- scipy/sparse/linalg/_special_sparse_arrays.py +949 -0
- scipy/sparse/linalg/_svdp.py +309 -0
- scipy/sparse/linalg/dsolve.py +22 -0
- scipy/sparse/linalg/eigen.py +21 -0
- scipy/sparse/linalg/interface.py +20 -0
- scipy/sparse/linalg/isolve.py +22 -0
- scipy/sparse/linalg/matfuncs.py +18 -0
- scipy/sparse/linalg/tests/__init__.py +0 -0
- scipy/sparse/linalg/tests/propack_test_data.npz +0 -0
- scipy/sparse/linalg/tests/test_expm_multiply.py +367 -0
- scipy/sparse/linalg/tests/test_interface.py +561 -0
- scipy/sparse/linalg/tests/test_matfuncs.py +592 -0
- scipy/sparse/linalg/tests/test_norm.py +154 -0
- scipy/sparse/linalg/tests/test_onenormest.py +252 -0
- scipy/sparse/linalg/tests/test_propack.py +165 -0
- scipy/sparse/linalg/tests/test_pydata_sparse.py +272 -0
- scipy/sparse/linalg/tests/test_special_sparse_arrays.py +337 -0
- scipy/sparse/sparsetools.py +17 -0
- scipy/sparse/spfuncs.py +17 -0
- scipy/sparse/sputils.py +17 -0
- scipy/sparse/tests/__init__.py +0 -0
- scipy/sparse/tests/data/csc_py2.npz +0 -0
- scipy/sparse/tests/data/csc_py3.npz +0 -0
- scipy/sparse/tests/test_arithmetic1d.py +341 -0
- scipy/sparse/tests/test_array_api.py +561 -0
- scipy/sparse/tests/test_base.py +5870 -0
- scipy/sparse/tests/test_common1d.py +447 -0
- scipy/sparse/tests/test_construct.py +872 -0
- scipy/sparse/tests/test_coo.py +1119 -0
- scipy/sparse/tests/test_csc.py +98 -0
- scipy/sparse/tests/test_csr.py +214 -0
- scipy/sparse/tests/test_dok.py +209 -0
- scipy/sparse/tests/test_extract.py +51 -0
- scipy/sparse/tests/test_indexing1d.py +603 -0
- scipy/sparse/tests/test_matrix_io.py +109 -0
- scipy/sparse/tests/test_minmax1d.py +128 -0
- scipy/sparse/tests/test_sparsetools.py +344 -0
- scipy/sparse/tests/test_spfuncs.py +97 -0
- scipy/sparse/tests/test_sputils.py +424 -0
- scipy/spatial/__init__.py +129 -0
- scipy/spatial/_ckdtree.cp311-win_arm64.lib +0 -0
- scipy/spatial/_ckdtree.cp311-win_arm64.pyd +0 -0
- scipy/spatial/_distance_pybind.cp311-win_arm64.lib +0 -0
- scipy/spatial/_distance_pybind.cp311-win_arm64.pyd +0 -0
- scipy/spatial/_distance_wrap.cp311-win_arm64.lib +0 -0
- scipy/spatial/_distance_wrap.cp311-win_arm64.pyd +0 -0
- scipy/spatial/_geometric_slerp.py +238 -0
- scipy/spatial/_hausdorff.cp311-win_arm64.lib +0 -0
- scipy/spatial/_hausdorff.cp311-win_arm64.pyd +0 -0
- scipy/spatial/_kdtree.py +920 -0
- scipy/spatial/_plotutils.py +274 -0
- scipy/spatial/_procrustes.py +132 -0
- scipy/spatial/_qhull.cp311-win_arm64.lib +0 -0
- scipy/spatial/_qhull.cp311-win_arm64.pyd +0 -0
- scipy/spatial/_qhull.pyi +213 -0
- scipy/spatial/_spherical_voronoi.py +341 -0
- scipy/spatial/_voronoi.cp311-win_arm64.lib +0 -0
- scipy/spatial/_voronoi.cp311-win_arm64.pyd +0 -0
- scipy/spatial/_voronoi.pyi +4 -0
- scipy/spatial/ckdtree.py +18 -0
- scipy/spatial/distance.py +3147 -0
- scipy/spatial/distance.pyi +210 -0
- scipy/spatial/kdtree.py +25 -0
- scipy/spatial/qhull.py +25 -0
- scipy/spatial/qhull_src/COPYING_QHULL.txt +39 -0
- scipy/spatial/tests/__init__.py +0 -0
- scipy/spatial/tests/data/cdist-X1.txt +10 -0
- scipy/spatial/tests/data/cdist-X2.txt +20 -0
- scipy/spatial/tests/data/degenerate_pointset.npz +0 -0
- scipy/spatial/tests/data/iris.txt +150 -0
- scipy/spatial/tests/data/pdist-boolean-inp.txt +20 -0
- scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-chebyshev-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-cityblock-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-correlation-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-correlation-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-cosine-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-cosine-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-double-inp.txt +20 -0
- scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-euclidean-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-hamming-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-jaccard-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-jensenshannon-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt +1 -0
- scipy/spatial/tests/data/pdist-seuclidean-ml.txt +1 -0
- scipy/spatial/tests/data/pdist-spearman-ml.txt +1 -0
- scipy/spatial/tests/data/random-bool-data.txt +100 -0
- scipy/spatial/tests/data/random-double-data.txt +100 -0
- scipy/spatial/tests/data/random-int-data.txt +100 -0
- scipy/spatial/tests/data/random-uint-data.txt +100 -0
- scipy/spatial/tests/data/selfdual-4d-polytope.txt +27 -0
- scipy/spatial/tests/test__plotutils.py +91 -0
- scipy/spatial/tests/test__procrustes.py +116 -0
- scipy/spatial/tests/test_distance.py +2389 -0
- scipy/spatial/tests/test_hausdorff.py +199 -0
- scipy/spatial/tests/test_kdtree.py +1536 -0
- scipy/spatial/tests/test_qhull.py +1313 -0
- scipy/spatial/tests/test_slerp.py +417 -0
- scipy/spatial/tests/test_spherical_voronoi.py +358 -0
- scipy/spatial/transform/__init__.py +31 -0
- scipy/spatial/transform/_rigid_transform.cp311-win_arm64.lib +0 -0
- scipy/spatial/transform/_rigid_transform.cp311-win_arm64.pyd +0 -0
- scipy/spatial/transform/_rotation.cp311-win_arm64.lib +0 -0
- scipy/spatial/transform/_rotation.cp311-win_arm64.pyd +0 -0
- scipy/spatial/transform/_rotation_groups.py +140 -0
- scipy/spatial/transform/_rotation_spline.py +460 -0
- scipy/spatial/transform/rotation.py +21 -0
- scipy/spatial/transform/tests/__init__.py +0 -0
- scipy/spatial/transform/tests/test_rigid_transform.py +1221 -0
- scipy/spatial/transform/tests/test_rotation.py +2569 -0
- scipy/spatial/transform/tests/test_rotation_groups.py +169 -0
- scipy/spatial/transform/tests/test_rotation_spline.py +183 -0
- scipy/special/__init__.pxd +1 -0
- scipy/special/__init__.py +841 -0
- scipy/special/_add_newdocs.py +9961 -0
- scipy/special/_basic.py +3576 -0
- scipy/special/_comb.cp311-win_arm64.lib +0 -0
- scipy/special/_comb.cp311-win_arm64.pyd +0 -0
- scipy/special/_ellip_harm.py +214 -0
- scipy/special/_ellip_harm_2.cp311-win_arm64.lib +0 -0
- scipy/special/_ellip_harm_2.cp311-win_arm64.pyd +0 -0
- scipy/special/_gufuncs.cp311-win_arm64.lib +0 -0
- scipy/special/_gufuncs.cp311-win_arm64.pyd +0 -0
- scipy/special/_input_validation.py +17 -0
- scipy/special/_lambertw.py +149 -0
- scipy/special/_logsumexp.py +426 -0
- scipy/special/_mptestutils.py +453 -0
- scipy/special/_multiufuncs.py +610 -0
- scipy/special/_orthogonal.py +2592 -0
- scipy/special/_orthogonal.pyi +330 -0
- scipy/special/_precompute/__init__.py +0 -0
- scipy/special/_precompute/cosine_cdf.py +17 -0
- scipy/special/_precompute/expn_asy.py +54 -0
- scipy/special/_precompute/gammainc_asy.py +116 -0
- scipy/special/_precompute/gammainc_data.py +124 -0
- scipy/special/_precompute/hyp2f1_data.py +484 -0
- scipy/special/_precompute/lambertw.py +68 -0
- scipy/special/_precompute/loggamma.py +43 -0
- scipy/special/_precompute/struve_convergence.py +131 -0
- scipy/special/_precompute/utils.py +38 -0
- scipy/special/_precompute/wright_bessel.py +342 -0
- scipy/special/_precompute/wright_bessel_data.py +152 -0
- scipy/special/_precompute/wrightomega.py +41 -0
- scipy/special/_precompute/zetac.py +27 -0
- scipy/special/_sf_error.py +15 -0
- scipy/special/_specfun.cp311-win_arm64.lib +0 -0
- scipy/special/_specfun.cp311-win_arm64.pyd +0 -0
- scipy/special/_special_ufuncs.cp311-win_arm64.lib +0 -0
- scipy/special/_special_ufuncs.cp311-win_arm64.pyd +0 -0
- scipy/special/_spfun_stats.py +106 -0
- scipy/special/_spherical_bessel.py +397 -0
- scipy/special/_support_alternative_backends.py +295 -0
- scipy/special/_test_internal.cp311-win_arm64.lib +0 -0
- scipy/special/_test_internal.cp311-win_arm64.pyd +0 -0
- scipy/special/_test_internal.pyi +9 -0
- scipy/special/_testutils.py +321 -0
- scipy/special/_ufuncs.cp311-win_arm64.lib +0 -0
- scipy/special/_ufuncs.cp311-win_arm64.pyd +0 -0
- scipy/special/_ufuncs.pyi +522 -0
- scipy/special/_ufuncs.pyx +13173 -0
- scipy/special/_ufuncs_cxx.cp311-win_arm64.lib +0 -0
- scipy/special/_ufuncs_cxx.cp311-win_arm64.pyd +0 -0
- scipy/special/_ufuncs_cxx.pxd +142 -0
- scipy/special/_ufuncs_cxx.pyx +427 -0
- scipy/special/_ufuncs_cxx_defs.h +147 -0
- scipy/special/_ufuncs_defs.h +57 -0
- scipy/special/add_newdocs.py +15 -0
- scipy/special/basic.py +87 -0
- scipy/special/cython_special.cp311-win_arm64.lib +0 -0
- scipy/special/cython_special.cp311-win_arm64.pyd +0 -0
- scipy/special/cython_special.pxd +259 -0
- scipy/special/cython_special.pyi +3 -0
- scipy/special/orthogonal.py +45 -0
- scipy/special/sf_error.py +20 -0
- scipy/special/specfun.py +24 -0
- scipy/special/spfun_stats.py +17 -0
- scipy/special/tests/__init__.py +0 -0
- scipy/special/tests/_cython_examples/extending.pyx +12 -0
- scipy/special/tests/_cython_examples/meson.build +34 -0
- scipy/special/tests/data/__init__.py +0 -0
- scipy/special/tests/data/boost.npz +0 -0
- scipy/special/tests/data/gsl.npz +0 -0
- scipy/special/tests/data/local.npz +0 -0
- scipy/special/tests/test_basic.py +4815 -0
- scipy/special/tests/test_bdtr.py +112 -0
- scipy/special/tests/test_boost_ufuncs.py +64 -0
- scipy/special/tests/test_boxcox.py +125 -0
- scipy/special/tests/test_cdflib.py +712 -0
- scipy/special/tests/test_cdft_asymptotic.py +49 -0
- scipy/special/tests/test_cephes_intp_cast.py +29 -0
- scipy/special/tests/test_cosine_distr.py +83 -0
- scipy/special/tests/test_cython_special.py +363 -0
- scipy/special/tests/test_data.py +719 -0
- scipy/special/tests/test_dd.py +42 -0
- scipy/special/tests/test_digamma.py +45 -0
- scipy/special/tests/test_ellip_harm.py +278 -0
- scipy/special/tests/test_erfinv.py +89 -0
- scipy/special/tests/test_exponential_integrals.py +118 -0
- scipy/special/tests/test_extending.py +28 -0
- scipy/special/tests/test_faddeeva.py +85 -0
- scipy/special/tests/test_gamma.py +12 -0
- scipy/special/tests/test_gammainc.py +152 -0
- scipy/special/tests/test_hyp2f1.py +2566 -0
- scipy/special/tests/test_hypergeometric.py +234 -0
- scipy/special/tests/test_iv_ratio.py +249 -0
- scipy/special/tests/test_kolmogorov.py +491 -0
- scipy/special/tests/test_lambertw.py +109 -0
- scipy/special/tests/test_legendre.py +1518 -0
- scipy/special/tests/test_log1mexp.py +85 -0
- scipy/special/tests/test_loggamma.py +70 -0
- scipy/special/tests/test_logit.py +162 -0
- scipy/special/tests/test_logsumexp.py +469 -0
- scipy/special/tests/test_mpmath.py +2293 -0
- scipy/special/tests/test_nan_inputs.py +65 -0
- scipy/special/tests/test_ndtr.py +77 -0
- scipy/special/tests/test_ndtri_exp.py +94 -0
- scipy/special/tests/test_orthogonal.py +821 -0
- scipy/special/tests/test_orthogonal_eval.py +275 -0
- scipy/special/tests/test_owens_t.py +53 -0
- scipy/special/tests/test_pcf.py +24 -0
- scipy/special/tests/test_pdtr.py +48 -0
- scipy/special/tests/test_powm1.py +65 -0
- scipy/special/tests/test_precompute_expn_asy.py +24 -0
- scipy/special/tests/test_precompute_gammainc.py +108 -0
- scipy/special/tests/test_precompute_utils.py +36 -0
- scipy/special/tests/test_round.py +18 -0
- scipy/special/tests/test_sf_error.py +146 -0
- scipy/special/tests/test_sici.py +36 -0
- scipy/special/tests/test_specfun.py +48 -0
- scipy/special/tests/test_spence.py +32 -0
- scipy/special/tests/test_spfun_stats.py +61 -0
- scipy/special/tests/test_sph_harm.py +85 -0
- scipy/special/tests/test_spherical_bessel.py +400 -0
- scipy/special/tests/test_support_alternative_backends.py +248 -0
- scipy/special/tests/test_trig.py +72 -0
- scipy/special/tests/test_ufunc_signatures.py +46 -0
- scipy/special/tests/test_wright_bessel.py +205 -0
- scipy/special/tests/test_wrightomega.py +117 -0
- scipy/special/tests/test_zeta.py +301 -0
- scipy/stats/__init__.py +670 -0
- scipy/stats/_ansari_swilk_statistics.cp311-win_arm64.lib +0 -0
- scipy/stats/_ansari_swilk_statistics.cp311-win_arm64.pyd +0 -0
- scipy/stats/_axis_nan_policy.py +692 -0
- scipy/stats/_biasedurn.cp311-win_arm64.lib +0 -0
- scipy/stats/_biasedurn.cp311-win_arm64.pyd +0 -0
- scipy/stats/_biasedurn.pxd +27 -0
- scipy/stats/_binned_statistic.py +795 -0
- scipy/stats/_binomtest.py +375 -0
- scipy/stats/_bws_test.py +177 -0
- scipy/stats/_censored_data.py +459 -0
- scipy/stats/_common.py +5 -0
- scipy/stats/_constants.py +42 -0
- scipy/stats/_continued_fraction.py +387 -0
- scipy/stats/_continuous_distns.py +12486 -0
- scipy/stats/_correlation.py +210 -0
- scipy/stats/_covariance.py +636 -0
- scipy/stats/_crosstab.py +204 -0
- scipy/stats/_discrete_distns.py +2098 -0
- scipy/stats/_distn_infrastructure.py +4201 -0
- scipy/stats/_distr_params.py +299 -0
- scipy/stats/_distribution_infrastructure.py +5750 -0
- scipy/stats/_entropy.py +428 -0
- scipy/stats/_finite_differences.py +145 -0
- scipy/stats/_fit.py +1351 -0
- scipy/stats/_hypotests.py +2060 -0
- scipy/stats/_kde.py +732 -0
- scipy/stats/_ksstats.py +600 -0
- scipy/stats/_levy_stable/__init__.py +1231 -0
- scipy/stats/_levy_stable/levyst.cp311-win_arm64.lib +0 -0
- scipy/stats/_levy_stable/levyst.cp311-win_arm64.pyd +0 -0
- scipy/stats/_mannwhitneyu.py +492 -0
- scipy/stats/_mgc.py +550 -0
- scipy/stats/_morestats.py +4626 -0
- scipy/stats/_mstats_basic.py +3658 -0
- scipy/stats/_mstats_extras.py +521 -0
- scipy/stats/_multicomp.py +449 -0
- scipy/stats/_multivariate.py +7281 -0
- scipy/stats/_new_distributions.py +452 -0
- scipy/stats/_odds_ratio.py +466 -0
- scipy/stats/_page_trend_test.py +486 -0
- scipy/stats/_probability_distribution.py +1964 -0
- scipy/stats/_qmc.py +2956 -0
- scipy/stats/_qmc_cy.cp311-win_arm64.lib +0 -0
- scipy/stats/_qmc_cy.cp311-win_arm64.pyd +0 -0
- scipy/stats/_qmc_cy.pyi +54 -0
- scipy/stats/_qmvnt.py +454 -0
- scipy/stats/_qmvnt_cy.cp311-win_arm64.lib +0 -0
- scipy/stats/_qmvnt_cy.cp311-win_arm64.pyd +0 -0
- scipy/stats/_quantile.py +335 -0
- scipy/stats/_rcont/__init__.py +4 -0
- scipy/stats/_rcont/rcont.cp311-win_arm64.lib +0 -0
- scipy/stats/_rcont/rcont.cp311-win_arm64.pyd +0 -0
- scipy/stats/_relative_risk.py +263 -0
- scipy/stats/_resampling.py +2352 -0
- scipy/stats/_result_classes.py +40 -0
- scipy/stats/_sampling.py +1314 -0
- scipy/stats/_sensitivity_analysis.py +713 -0
- scipy/stats/_sobol.cp311-win_arm64.lib +0 -0
- scipy/stats/_sobol.cp311-win_arm64.pyd +0 -0
- scipy/stats/_sobol.pyi +54 -0
- scipy/stats/_sobol_direction_numbers.npz +0 -0
- scipy/stats/_stats.cp311-win_arm64.lib +0 -0
- scipy/stats/_stats.cp311-win_arm64.pyd +0 -0
- scipy/stats/_stats.pxd +10 -0
- scipy/stats/_stats_mstats_common.py +322 -0
- scipy/stats/_stats_py.py +11089 -0
- scipy/stats/_stats_pythran.cp311-win_arm64.lib +0 -0
- scipy/stats/_stats_pythran.cp311-win_arm64.pyd +0 -0
- scipy/stats/_survival.py +683 -0
- scipy/stats/_tukeylambda_stats.py +199 -0
- scipy/stats/_unuran/__init__.py +0 -0
- scipy/stats/_unuran/unuran_wrapper.cp311-win_arm64.lib +0 -0
- scipy/stats/_unuran/unuran_wrapper.cp311-win_arm64.pyd +0 -0
- scipy/stats/_unuran/unuran_wrapper.pyi +179 -0
- scipy/stats/_variation.py +126 -0
- scipy/stats/_warnings_errors.py +38 -0
- scipy/stats/_wilcoxon.py +265 -0
- scipy/stats/biasedurn.py +16 -0
- scipy/stats/contingency.py +521 -0
- scipy/stats/distributions.py +24 -0
- scipy/stats/kde.py +18 -0
- scipy/stats/morestats.py +27 -0
- scipy/stats/mstats.py +140 -0
- scipy/stats/mstats_basic.py +42 -0
- scipy/stats/mstats_extras.py +25 -0
- scipy/stats/mvn.py +17 -0
- scipy/stats/qmc.py +236 -0
- scipy/stats/sampling.py +73 -0
- scipy/stats/stats.py +41 -0
- scipy/stats/tests/__init__.py +0 -0
- scipy/stats/tests/common_tests.py +356 -0
- scipy/stats/tests/data/_mvt.py +171 -0
- scipy/stats/tests/data/fisher_exact_results_from_r.py +607 -0
- scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy +0 -0
- scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy +0 -0
- scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy +0 -0
- scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy +0 -0
- scipy/stats/tests/data/nist_anova/AtmWtAg.dat +108 -0
- scipy/stats/tests/data/nist_anova/SiRstv.dat +85 -0
- scipy/stats/tests/data/nist_anova/SmLs01.dat +249 -0
- scipy/stats/tests/data/nist_anova/SmLs02.dat +1869 -0
- scipy/stats/tests/data/nist_anova/SmLs03.dat +18069 -0
- scipy/stats/tests/data/nist_anova/SmLs04.dat +249 -0
- scipy/stats/tests/data/nist_anova/SmLs05.dat +1869 -0
- scipy/stats/tests/data/nist_anova/SmLs06.dat +18069 -0
- scipy/stats/tests/data/nist_anova/SmLs07.dat +249 -0
- scipy/stats/tests/data/nist_anova/SmLs08.dat +1869 -0
- scipy/stats/tests/data/nist_anova/SmLs09.dat +18069 -0
- scipy/stats/tests/data/nist_linregress/Norris.dat +97 -0
- scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy +0 -0
- scipy/stats/tests/data/studentized_range_mpmath_ref.json +1499 -0
- scipy/stats/tests/test_axis_nan_policy.py +1388 -0
- scipy/stats/tests/test_binned_statistic.py +568 -0
- scipy/stats/tests/test_censored_data.py +152 -0
- scipy/stats/tests/test_contingency.py +294 -0
- scipy/stats/tests/test_continued_fraction.py +173 -0
- scipy/stats/tests/test_continuous.py +2198 -0
- scipy/stats/tests/test_continuous_basic.py +1053 -0
- scipy/stats/tests/test_continuous_fit_censored.py +683 -0
- scipy/stats/tests/test_correlation.py +80 -0
- scipy/stats/tests/test_crosstab.py +115 -0
- scipy/stats/tests/test_discrete_basic.py +580 -0
- scipy/stats/tests/test_discrete_distns.py +700 -0
- scipy/stats/tests/test_distributions.py +10413 -0
- scipy/stats/tests/test_entropy.py +322 -0
- scipy/stats/tests/test_fast_gen_inversion.py +435 -0
- scipy/stats/tests/test_fit.py +1090 -0
- scipy/stats/tests/test_hypotests.py +1991 -0
- scipy/stats/tests/test_kdeoth.py +676 -0
- scipy/stats/tests/test_marray.py +289 -0
- scipy/stats/tests/test_mgc.py +217 -0
- scipy/stats/tests/test_morestats.py +3259 -0
- scipy/stats/tests/test_mstats_basic.py +2071 -0
- scipy/stats/tests/test_mstats_extras.py +172 -0
- scipy/stats/tests/test_multicomp.py +405 -0
- scipy/stats/tests/test_multivariate.py +4381 -0
- scipy/stats/tests/test_odds_ratio.py +148 -0
- scipy/stats/tests/test_qmc.py +1492 -0
- scipy/stats/tests/test_quantile.py +199 -0
- scipy/stats/tests/test_rank.py +345 -0
- scipy/stats/tests/test_relative_risk.py +95 -0
- scipy/stats/tests/test_resampling.py +2000 -0
- scipy/stats/tests/test_sampling.py +1450 -0
- scipy/stats/tests/test_sensitivity_analysis.py +310 -0
- scipy/stats/tests/test_stats.py +9707 -0
- scipy/stats/tests/test_survival.py +466 -0
- scipy/stats/tests/test_tukeylambda_stats.py +85 -0
- scipy/stats/tests/test_variation.py +216 -0
- scipy/version.py +12 -0
- scipy-1.16.2.dist-info/DELVEWHEEL +2 -0
- scipy-1.16.2.dist-info/LICENSE.txt +912 -0
- scipy-1.16.2.dist-info/METADATA +1061 -0
- scipy-1.16.2.dist-info/RECORD +1530 -0
- scipy-1.16.2.dist-info/WHEEL +4 -0
- scipy.libs/msvcp140-5f1c5dd31916990d94181e07bc3afb32.dll +0 -0
- scipy.libs/scipy_openblas-f3ac85b1f412f7e86514c923dc4058d1.dll +0 -0
@@ -0,0 +1,3344 @@
|
|
1
|
+
"""
|
2
|
+
Unit tests for optimization routines from optimize.py
|
3
|
+
|
4
|
+
Authors:
|
5
|
+
Ed Schofield, Nov 2005
|
6
|
+
Andrew Straw, April 2008
|
7
|
+
|
8
|
+
"""
|
9
|
+
import itertools
|
10
|
+
import inspect
|
11
|
+
import platform
|
12
|
+
import threading
|
13
|
+
import numpy as np
|
14
|
+
from numpy.testing import (assert_allclose, assert_equal,
|
15
|
+
assert_almost_equal,
|
16
|
+
assert_no_warnings, assert_warns,
|
17
|
+
assert_array_less, suppress_warnings)
|
18
|
+
import pytest
|
19
|
+
from pytest import raises as assert_raises
|
20
|
+
|
21
|
+
import scipy
|
22
|
+
from scipy import optimize
|
23
|
+
from scipy.optimize._minimize import Bounds, NonlinearConstraint
|
24
|
+
from scipy.optimize._minimize import (MINIMIZE_METHODS,
|
25
|
+
MINIMIZE_METHODS_NEW_CB,
|
26
|
+
MINIMIZE_SCALAR_METHODS)
|
27
|
+
from scipy.optimize._linprog import LINPROG_METHODS
|
28
|
+
from scipy.optimize._root import ROOT_METHODS
|
29
|
+
from scipy.optimize._root_scalar import ROOT_SCALAR_METHODS
|
30
|
+
from scipy.optimize._qap import QUADRATIC_ASSIGNMENT_METHODS
|
31
|
+
from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS
|
32
|
+
from scipy.optimize._optimize import MemoizeJac, show_options, OptimizeResult
|
33
|
+
from scipy.optimize import rosen, rosen_der, rosen_hess
|
34
|
+
|
35
|
+
from scipy.sparse import (coo_matrix, csc_matrix, csr_matrix, coo_array,
|
36
|
+
csr_array, csc_array)
|
37
|
+
from scipy._lib._array_api_no_0d import xp_assert_equal
|
38
|
+
from scipy._lib._array_api import make_xp_test_case
|
39
|
+
from scipy._lib._util import MapWrapper
|
40
|
+
|
41
|
+
lazy_xp_modules = [optimize]
|
42
|
+
|
43
|
+
|
44
|
+
def test_check_grad():
|
45
|
+
# Verify if check_grad is able to estimate the derivative of the
|
46
|
+
# expit (logistic sigmoid) function.
|
47
|
+
|
48
|
+
def expit(x):
|
49
|
+
return 1 / (1 + np.exp(-x))
|
50
|
+
|
51
|
+
def der_expit(x):
|
52
|
+
return np.exp(-x) / (1 + np.exp(-x))**2
|
53
|
+
|
54
|
+
x0 = np.array([1.5])
|
55
|
+
|
56
|
+
r = optimize.check_grad(expit, der_expit, x0)
|
57
|
+
assert_almost_equal(r, 0)
|
58
|
+
# SPEC-007 leave one call with seed to check it still works
|
59
|
+
r = optimize.check_grad(expit, der_expit, x0,
|
60
|
+
direction='random', seed=1234)
|
61
|
+
assert_almost_equal(r, 0)
|
62
|
+
|
63
|
+
r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6)
|
64
|
+
assert_almost_equal(r, 0)
|
65
|
+
r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6,
|
66
|
+
direction='random', rng=1234)
|
67
|
+
assert_almost_equal(r, 0)
|
68
|
+
|
69
|
+
# Check if the epsilon parameter is being considered.
|
70
|
+
r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1) - 0)
|
71
|
+
assert r > 1e-7
|
72
|
+
r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1,
|
73
|
+
direction='random', rng=1234) - 0)
|
74
|
+
assert r > 1e-7
|
75
|
+
|
76
|
+
def x_sinx(x):
|
77
|
+
return (x*np.sin(x)).sum()
|
78
|
+
|
79
|
+
def der_x_sinx(x):
|
80
|
+
return np.sin(x) + x*np.cos(x)
|
81
|
+
|
82
|
+
x0 = np.arange(0, 2, 0.2)
|
83
|
+
|
84
|
+
r = optimize.check_grad(x_sinx, der_x_sinx, x0,
|
85
|
+
direction='random', rng=1234)
|
86
|
+
assert_almost_equal(r, 0)
|
87
|
+
|
88
|
+
assert_raises(ValueError, optimize.check_grad,
|
89
|
+
x_sinx, der_x_sinx, x0,
|
90
|
+
direction='random_projection', rng=1234)
|
91
|
+
|
92
|
+
# checking can be done for derivatives of vector valued functions
|
93
|
+
r = optimize.check_grad(himmelblau_grad, himmelblau_hess, himmelblau_x0,
|
94
|
+
direction='all', rng=1234)
|
95
|
+
assert r < 5e-7
|
96
|
+
|
97
|
+
|
98
|
+
class CheckOptimize:
|
99
|
+
""" Base test case for a simple constrained entropy maximization problem
|
100
|
+
(the machine translation example of Berger et al in
|
101
|
+
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
|
102
|
+
"""
|
103
|
+
|
104
|
+
def setup_method(self):
|
105
|
+
self.F = np.array([[1, 1, 1],
|
106
|
+
[1, 1, 0],
|
107
|
+
[1, 0, 1],
|
108
|
+
[1, 0, 0],
|
109
|
+
[1, 0, 0]])
|
110
|
+
self.K = np.array([1., 0.3, 0.5])
|
111
|
+
self.startparams = np.zeros(3, np.float64)
|
112
|
+
self.solution = np.array([0., -0.524869316, 0.487525860])
|
113
|
+
self.maxiter = 1000
|
114
|
+
self.funccalls = threading.local()
|
115
|
+
self.gradcalls = threading.local()
|
116
|
+
self.trace = threading.local()
|
117
|
+
|
118
|
+
def func(self, x):
|
119
|
+
if not hasattr(self.funccalls, 'c'):
|
120
|
+
self.funccalls.c = 0
|
121
|
+
|
122
|
+
if not hasattr(self.gradcalls, 'c'):
|
123
|
+
self.gradcalls.c = 0
|
124
|
+
|
125
|
+
self.funccalls.c += 1
|
126
|
+
if self.funccalls.c > 6000:
|
127
|
+
raise RuntimeError("too many iterations in optimization routine")
|
128
|
+
log_pdot = np.dot(self.F, x)
|
129
|
+
logZ = np.log(sum(np.exp(log_pdot)))
|
130
|
+
f = logZ - np.dot(self.K, x)
|
131
|
+
if not hasattr(self.trace, 't'):
|
132
|
+
self.trace.t = []
|
133
|
+
self.trace.t.append(np.copy(x))
|
134
|
+
return f
|
135
|
+
|
136
|
+
def grad(self, x):
|
137
|
+
if not hasattr(self.gradcalls, 'c'):
|
138
|
+
self.gradcalls.c = 0
|
139
|
+
self.gradcalls.c += 1
|
140
|
+
log_pdot = np.dot(self.F, x)
|
141
|
+
logZ = np.log(sum(np.exp(log_pdot)))
|
142
|
+
p = np.exp(log_pdot - logZ)
|
143
|
+
return np.dot(self.F.transpose(), p) - self.K
|
144
|
+
|
145
|
+
def hess(self, x):
|
146
|
+
log_pdot = np.dot(self.F, x)
|
147
|
+
logZ = np.log(sum(np.exp(log_pdot)))
|
148
|
+
p = np.exp(log_pdot - logZ)
|
149
|
+
return np.dot(self.F.T,
|
150
|
+
np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
|
151
|
+
|
152
|
+
def hessp(self, x, p):
|
153
|
+
return np.dot(self.hess(x), p)
|
154
|
+
|
155
|
+
|
156
|
+
class CheckOptimizeParameterized(CheckOptimize):
|
157
|
+
|
158
|
+
def test_cg(self):
|
159
|
+
# conjugate gradient optimization routine
|
160
|
+
if self.use_wrapper:
|
161
|
+
opts = {'maxiter': self.maxiter, 'disp': self.disp,
|
162
|
+
'return_all': False}
|
163
|
+
res = optimize.minimize(self.func, self.startparams, args=(),
|
164
|
+
method='CG', jac=self.grad,
|
165
|
+
options=opts)
|
166
|
+
params, fopt, func_calls, grad_calls, warnflag = \
|
167
|
+
res['x'], res['fun'], res['nfev'], res['njev'], res['status']
|
168
|
+
else:
|
169
|
+
retval = optimize.fmin_cg(self.func, self.startparams,
|
170
|
+
self.grad, (), maxiter=self.maxiter,
|
171
|
+
full_output=True, disp=self.disp,
|
172
|
+
retall=False)
|
173
|
+
(params, fopt, func_calls, grad_calls, warnflag) = retval
|
174
|
+
|
175
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
176
|
+
atol=1e-6)
|
177
|
+
|
178
|
+
# Ensure that function call counts are 'known good'; these are from
|
179
|
+
# SciPy 0.7.0. Don't allow them to increase.
|
180
|
+
assert self.funccalls.c == 9, self.funccalls.c
|
181
|
+
assert self.gradcalls.c == 7, self.gradcalls.c
|
182
|
+
|
183
|
+
# Ensure that the function behaves the same; this is from SciPy 0.7.0
|
184
|
+
assert_allclose(self.trace.t[2:4],
|
185
|
+
[[0, -0.5, 0.5],
|
186
|
+
[0, -5.05700028e-01, 4.95985862e-01]],
|
187
|
+
atol=1e-14, rtol=1e-7)
|
188
|
+
|
189
|
+
def test_cg_cornercase(self):
|
190
|
+
def f(r):
|
191
|
+
return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2
|
192
|
+
|
193
|
+
# Check several initial guesses. (Too far away from the
|
194
|
+
# minimum, the function ends up in the flat region of exp.)
|
195
|
+
for x0 in np.linspace(-0.75, 3, 71):
|
196
|
+
sol = optimize.minimize(f, [x0], method='CG')
|
197
|
+
assert sol.success
|
198
|
+
assert_allclose(sol.x, [0.5], rtol=1e-5)
|
199
|
+
|
200
|
+
def test_bfgs(self):
|
201
|
+
# Broyden-Fletcher-Goldfarb-Shanno optimization routine
|
202
|
+
if self.use_wrapper:
|
203
|
+
opts = {'maxiter': self.maxiter, 'disp': self.disp,
|
204
|
+
'return_all': False}
|
205
|
+
res = optimize.minimize(self.func, self.startparams,
|
206
|
+
jac=self.grad, method='BFGS', args=(),
|
207
|
+
options=opts)
|
208
|
+
|
209
|
+
params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = (
|
210
|
+
res['x'], res['fun'], res['jac'], res['hess_inv'],
|
211
|
+
res['nfev'], res['njev'], res['status'])
|
212
|
+
else:
|
213
|
+
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
|
214
|
+
args=(), maxiter=self.maxiter,
|
215
|
+
full_output=True, disp=self.disp,
|
216
|
+
retall=False)
|
217
|
+
(params, fopt, gopt, Hopt,
|
218
|
+
func_calls, grad_calls, warnflag) = retval
|
219
|
+
|
220
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
221
|
+
atol=1e-6)
|
222
|
+
|
223
|
+
# Ensure that function call counts are 'known good'; these are from
|
224
|
+
# SciPy 0.7.0. Don't allow them to increase.
|
225
|
+
assert self.funccalls.c == 10, self.funccalls.c
|
226
|
+
assert self.gradcalls.c == 8, self.gradcalls.c
|
227
|
+
|
228
|
+
# Ensure that the function behaves the same; this is from SciPy 0.7.0
|
229
|
+
assert_allclose(self.trace.t[6:8],
|
230
|
+
[[0, -5.25060743e-01, 4.87748473e-01],
|
231
|
+
[0, -5.24885582e-01, 4.87530347e-01]],
|
232
|
+
atol=1e-14, rtol=1e-7)
|
233
|
+
|
234
|
+
def test_bfgs_hess_inv0_neg(self):
|
235
|
+
# Ensure that BFGS does not accept neg. def. initial inverse
|
236
|
+
# Hessian estimate.
|
237
|
+
with pytest.raises(ValueError, match="'hess_inv0' matrix isn't "
|
238
|
+
"positive definite."):
|
239
|
+
x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
|
240
|
+
opts = {'disp': self.disp, 'hess_inv0': -np.eye(5)}
|
241
|
+
optimize.minimize(optimize.rosen, x0=x0, method='BFGS', args=(),
|
242
|
+
options=opts)
|
243
|
+
|
244
|
+
def test_bfgs_hess_inv0_semipos(self):
|
245
|
+
# Ensure that BFGS does not accept semi pos. def. initial inverse
|
246
|
+
# Hessian estimate.
|
247
|
+
with pytest.raises(ValueError, match="'hess_inv0' matrix isn't "
|
248
|
+
"positive definite."):
|
249
|
+
x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
|
250
|
+
hess_inv0 = np.eye(5)
|
251
|
+
hess_inv0[0, 0] = 0
|
252
|
+
opts = {'disp': self.disp, 'hess_inv0': hess_inv0}
|
253
|
+
optimize.minimize(optimize.rosen, x0=x0, method='BFGS', args=(),
|
254
|
+
options=opts)
|
255
|
+
|
256
|
+
def test_bfgs_hess_inv0_sanity(self):
|
257
|
+
# Ensure that BFGS handles `hess_inv0` parameter correctly.
|
258
|
+
fun = optimize.rosen
|
259
|
+
x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
|
260
|
+
opts = {'disp': self.disp, 'hess_inv0': 1e-2 * np.eye(5)}
|
261
|
+
res = optimize.minimize(fun, x0=x0, method='BFGS', args=(),
|
262
|
+
options=opts)
|
263
|
+
res_true = optimize.minimize(fun, x0=x0, method='BFGS', args=(),
|
264
|
+
options={'disp': self.disp})
|
265
|
+
assert_allclose(res.fun, res_true.fun, atol=1e-6)
|
266
|
+
|
267
|
+
@pytest.mark.filterwarnings('ignore::UserWarning')
|
268
|
+
def test_bfgs_infinite(self):
|
269
|
+
# Test corner case where -Inf is the minimum. See gh-2019.
|
270
|
+
def func(x):
|
271
|
+
return -np.e ** (-x)
|
272
|
+
def fprime(x):
|
273
|
+
return -func(x)
|
274
|
+
x0 = [0]
|
275
|
+
with np.errstate(over='ignore'):
|
276
|
+
if self.use_wrapper:
|
277
|
+
opts = {'disp': self.disp}
|
278
|
+
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
|
279
|
+
args=(), options=opts)['x']
|
280
|
+
else:
|
281
|
+
x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp)
|
282
|
+
assert not np.isfinite(func(x))
|
283
|
+
|
284
|
+
def test_bfgs_xrtol(self):
|
285
|
+
# test for #17345 to test xrtol parameter
|
286
|
+
x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
|
287
|
+
res = optimize.minimize(optimize.rosen,
|
288
|
+
x0, method='bfgs', options={'xrtol': 1e-3})
|
289
|
+
ref = optimize.minimize(optimize.rosen,
|
290
|
+
x0, method='bfgs', options={'gtol': 1e-3})
|
291
|
+
assert res.nit != ref.nit
|
292
|
+
|
293
|
+
def test_bfgs_c1(self):
|
294
|
+
# test for #18977 insufficiently low value of c1 leads to precision loss
|
295
|
+
# for poor starting parameters
|
296
|
+
x0 = [10.3, 20.7, 10.8, 1.9, -1.2]
|
297
|
+
res_c1_small = optimize.minimize(optimize.rosen,
|
298
|
+
x0, method='bfgs', options={'c1': 1e-8})
|
299
|
+
res_c1_big = optimize.minimize(optimize.rosen,
|
300
|
+
x0, method='bfgs', options={'c1': 1e-1})
|
301
|
+
|
302
|
+
assert res_c1_small.nfev > res_c1_big.nfev
|
303
|
+
|
304
|
+
def test_bfgs_c2(self):
|
305
|
+
# test that modification of c2 parameter
|
306
|
+
# results in different number of iterations
|
307
|
+
x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
|
308
|
+
res_default = optimize.minimize(optimize.rosen,
|
309
|
+
x0, method='bfgs', options={'c2': .9})
|
310
|
+
res_mod = optimize.minimize(optimize.rosen,
|
311
|
+
x0, method='bfgs', options={'c2': 1e-2})
|
312
|
+
assert res_default.nit > res_mod.nit
|
313
|
+
|
314
|
+
@pytest.mark.parametrize(["c1", "c2"], [[0.5, 2],
|
315
|
+
[-0.1, 0.1],
|
316
|
+
[0.2, 0.1]])
|
317
|
+
def test_invalid_c1_c2(self, c1, c2):
|
318
|
+
with pytest.raises(ValueError, match="'c1' and 'c2'"):
|
319
|
+
x0 = [10.3, 20.7, 10.8, 1.9, -1.2]
|
320
|
+
optimize.minimize(optimize.rosen, x0, method='cg',
|
321
|
+
options={'c1': c1, 'c2': c2})
|
322
|
+
|
323
|
+
def test_powell(self):
|
324
|
+
# Powell (direction set) optimization routine
|
325
|
+
if self.use_wrapper:
|
326
|
+
opts = {'maxiter': self.maxiter, 'disp': self.disp,
|
327
|
+
'return_all': False}
|
328
|
+
res = optimize.minimize(self.func, self.startparams, args=(),
|
329
|
+
method='Powell', options=opts)
|
330
|
+
params, fopt, direc, numiter, func_calls, warnflag = (
|
331
|
+
res['x'], res['fun'], res['direc'], res['nit'],
|
332
|
+
res['nfev'], res['status'])
|
333
|
+
else:
|
334
|
+
retval = optimize.fmin_powell(self.func, self.startparams,
|
335
|
+
args=(), maxiter=self.maxiter,
|
336
|
+
full_output=True, disp=self.disp,
|
337
|
+
retall=False)
|
338
|
+
(params, fopt, direc, numiter, func_calls, warnflag) = retval
|
339
|
+
|
340
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
341
|
+
atol=1e-6)
|
342
|
+
# params[0] does not affect the objective function
|
343
|
+
assert_allclose(params[1:], self.solution[1:], atol=5e-6)
|
344
|
+
|
345
|
+
# Ensure that function call counts are 'known good'; these are from
|
346
|
+
# SciPy 0.7.0. Don't allow them to increase.
|
347
|
+
#
|
348
|
+
# However, some leeway must be added: the exact evaluation
|
349
|
+
# count is sensitive to numerical error, and floating-point
|
350
|
+
# computations are not bit-for-bit reproducible across
|
351
|
+
# machines, and when using e.g., MKL, data alignment
|
352
|
+
# etc., affect the rounding error.
|
353
|
+
#
|
354
|
+
assert self.funccalls.c <= 116 + 20, self.funccalls.c
|
355
|
+
assert self.gradcalls.c == 0, self.gradcalls.c
|
356
|
+
|
357
|
+
@pytest.mark.xfail(reason="This part of test_powell fails on some "
|
358
|
+
"platforms, but the solution returned by powell is "
|
359
|
+
"still valid.")
|
360
|
+
def test_powell_gh14014(self):
|
361
|
+
# This part of test_powell started failing on some CI platforms;
|
362
|
+
# see gh-14014. Since the solution is still correct and the comments
|
363
|
+
# in test_powell suggest that small differences in the bits are known
|
364
|
+
# to change the "trace" of the solution, seems safe to xfail to get CI
|
365
|
+
# green now and investigate later.
|
366
|
+
|
367
|
+
# Powell (direction set) optimization routine
|
368
|
+
if self.use_wrapper:
|
369
|
+
opts = {'maxiter': self.maxiter, 'disp': self.disp,
|
370
|
+
'return_all': False}
|
371
|
+
res = optimize.minimize(self.func, self.startparams, args=(),
|
372
|
+
method='Powell', options=opts)
|
373
|
+
params, fopt, direc, numiter, func_calls, warnflag = (
|
374
|
+
res['x'], res['fun'], res['direc'], res['nit'],
|
375
|
+
res['nfev'], res['status'])
|
376
|
+
else:
|
377
|
+
retval = optimize.fmin_powell(self.func, self.startparams,
|
378
|
+
args=(), maxiter=self.maxiter,
|
379
|
+
full_output=True, disp=self.disp,
|
380
|
+
retall=False)
|
381
|
+
(params, fopt, direc, numiter, func_calls, warnflag) = retval
|
382
|
+
|
383
|
+
# Ensure that the function behaves the same; this is from SciPy 0.7.0
|
384
|
+
assert_allclose(self.trace[34:39],
|
385
|
+
[[0.72949016, -0.44156936, 0.47100962],
|
386
|
+
[0.72949016, -0.44156936, 0.48052496],
|
387
|
+
[1.45898031, -0.88313872, 0.95153458],
|
388
|
+
[0.72949016, -0.44156936, 0.47576729],
|
389
|
+
[1.72949016, -0.44156936, 0.47576729]],
|
390
|
+
atol=1e-14, rtol=1e-7)
|
391
|
+
|
392
|
+
def test_powell_bounded(self):
|
393
|
+
# Powell (direction set) optimization routine
|
394
|
+
# same as test_powell above, but with bounds
|
395
|
+
bounds = [(-np.pi, np.pi) for _ in self.startparams]
|
396
|
+
if self.use_wrapper:
|
397
|
+
opts = {'maxiter': self.maxiter, 'disp': self.disp,
|
398
|
+
'return_all': False}
|
399
|
+
res = optimize.minimize(self.func, self.startparams, args=(),
|
400
|
+
bounds=bounds,
|
401
|
+
method='Powell', options=opts)
|
402
|
+
params, func_calls = (res['x'], res['nfev'])
|
403
|
+
|
404
|
+
assert func_calls == self.funccalls.c
|
405
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
406
|
+
atol=1e-6, rtol=1e-5)
|
407
|
+
|
408
|
+
# The exact evaluation count is sensitive to numerical error, and
|
409
|
+
# floating-point computations are not bit-for-bit reproducible
|
410
|
+
# across machines, and when using e.g. MKL, data alignment etc.
|
411
|
+
# affect the rounding error.
|
412
|
+
# It takes 155 calls on my machine, but we can add the same +20
|
413
|
+
# margin as is used in `test_powell`
|
414
|
+
assert self.funccalls.c <= 155 + 20
|
415
|
+
assert self.gradcalls.c == 0
|
416
|
+
|
417
|
+
def test_neldermead(self):
|
418
|
+
# Nelder-Mead simplex algorithm
|
419
|
+
if self.use_wrapper:
|
420
|
+
opts = {'maxiter': self.maxiter, 'disp': self.disp,
|
421
|
+
'return_all': False}
|
422
|
+
res = optimize.minimize(self.func, self.startparams, args=(),
|
423
|
+
method='Nelder-mead', options=opts)
|
424
|
+
params, fopt, numiter, func_calls, warnflag = (
|
425
|
+
res['x'], res['fun'], res['nit'], res['nfev'],
|
426
|
+
res['status'])
|
427
|
+
else:
|
428
|
+
retval = optimize.fmin(self.func, self.startparams,
|
429
|
+
args=(), maxiter=self.maxiter,
|
430
|
+
full_output=True, disp=self.disp,
|
431
|
+
retall=False)
|
432
|
+
(params, fopt, numiter, func_calls, warnflag) = retval
|
433
|
+
|
434
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
435
|
+
atol=1e-6)
|
436
|
+
|
437
|
+
# Ensure that function call counts are 'known good'; these are from
|
438
|
+
# SciPy 0.7.0. Don't allow them to increase.
|
439
|
+
assert self.funccalls.c == 167, self.funccalls.c
|
440
|
+
assert self.gradcalls.c == 0, self.gradcalls.c
|
441
|
+
|
442
|
+
# Ensure that the function behaves the same; this is from SciPy 0.7.0
|
443
|
+
assert_allclose(self.trace.t[76:78],
|
444
|
+
[[0.1928968, -0.62780447, 0.35166118],
|
445
|
+
[0.19572515, -0.63648426, 0.35838135]],
|
446
|
+
atol=1e-14, rtol=1e-7)
|
447
|
+
|
448
|
+
def test_neldermead_initial_simplex(self):
|
449
|
+
# Nelder-Mead simplex algorithm
|
450
|
+
simplex = np.zeros((4, 3))
|
451
|
+
simplex[...] = self.startparams
|
452
|
+
for j in range(3):
|
453
|
+
simplex[j+1, j] += 0.1
|
454
|
+
|
455
|
+
if self.use_wrapper:
|
456
|
+
opts = {'maxiter': self.maxiter, 'disp': False,
|
457
|
+
'return_all': True, 'initial_simplex': simplex}
|
458
|
+
res = optimize.minimize(self.func, self.startparams, args=(),
|
459
|
+
method='Nelder-mead', options=opts)
|
460
|
+
params, fopt, numiter, func_calls, warnflag = (res['x'],
|
461
|
+
res['fun'],
|
462
|
+
res['nit'],
|
463
|
+
res['nfev'],
|
464
|
+
res['status'])
|
465
|
+
assert_allclose(res['allvecs'][0], simplex[0])
|
466
|
+
else:
|
467
|
+
retval = optimize.fmin(self.func, self.startparams,
|
468
|
+
args=(), maxiter=self.maxiter,
|
469
|
+
full_output=True, disp=False, retall=False,
|
470
|
+
initial_simplex=simplex)
|
471
|
+
|
472
|
+
(params, fopt, numiter, func_calls, warnflag) = retval
|
473
|
+
|
474
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
475
|
+
atol=1e-6)
|
476
|
+
|
477
|
+
# Ensure that function call counts are 'known good'; these are from
|
478
|
+
# SciPy 0.17.0. Don't allow them to increase.
|
479
|
+
assert self.funccalls.c == 100, self.funccalls.c
|
480
|
+
assert self.gradcalls.c == 0, self.gradcalls.c
|
481
|
+
|
482
|
+
# Ensure that the function behaves the same; this is from SciPy 0.15.0
|
483
|
+
assert_allclose(self.trace.t[50:52],
|
484
|
+
[[0.14687474, -0.5103282, 0.48252111],
|
485
|
+
[0.14474003, -0.5282084, 0.48743951]],
|
486
|
+
atol=1e-14, rtol=1e-7)
|
487
|
+
|
488
|
+
def test_neldermead_initial_simplex_bad(self):
|
489
|
+
# Check it fails with a bad simplices
|
490
|
+
bad_simplices = []
|
491
|
+
|
492
|
+
simplex = np.zeros((3, 2))
|
493
|
+
simplex[...] = self.startparams[:2]
|
494
|
+
for j in range(2):
|
495
|
+
simplex[j+1, j] += 0.1
|
496
|
+
bad_simplices.append(simplex)
|
497
|
+
|
498
|
+
simplex = np.zeros((3, 3))
|
499
|
+
bad_simplices.append(simplex)
|
500
|
+
|
501
|
+
for simplex in bad_simplices:
|
502
|
+
if self.use_wrapper:
|
503
|
+
opts = {'maxiter': self.maxiter, 'disp': False,
|
504
|
+
'return_all': False, 'initial_simplex': simplex}
|
505
|
+
assert_raises(ValueError,
|
506
|
+
optimize.minimize,
|
507
|
+
self.func,
|
508
|
+
self.startparams,
|
509
|
+
args=(),
|
510
|
+
method='Nelder-mead',
|
511
|
+
options=opts)
|
512
|
+
else:
|
513
|
+
assert_raises(ValueError, optimize.fmin,
|
514
|
+
self.func, self.startparams,
|
515
|
+
args=(), maxiter=self.maxiter,
|
516
|
+
full_output=True, disp=False, retall=False,
|
517
|
+
initial_simplex=simplex)
|
518
|
+
|
519
|
+
def test_neldermead_x0_ub(self):
|
520
|
+
# checks whether minimisation occurs correctly for entries where
|
521
|
+
# x0 == ub
|
522
|
+
# gh19991
|
523
|
+
def quad(x):
|
524
|
+
return np.sum(x**2)
|
525
|
+
|
526
|
+
res = optimize.minimize(
|
527
|
+
quad,
|
528
|
+
[1],
|
529
|
+
bounds=[(0, 1.)],
|
530
|
+
method='nelder-mead'
|
531
|
+
)
|
532
|
+
assert_allclose(res.x, [0])
|
533
|
+
|
534
|
+
res = optimize.minimize(
|
535
|
+
quad,
|
536
|
+
[1, 2],
|
537
|
+
bounds=[(0, 1.), (1, 3.)],
|
538
|
+
method='nelder-mead'
|
539
|
+
)
|
540
|
+
assert_allclose(res.x, [0, 1])
|
541
|
+
|
542
|
+
def test_ncg_negative_maxiter(self):
|
543
|
+
# Regression test for gh-8241
|
544
|
+
opts = {'maxiter': -1}
|
545
|
+
result = optimize.minimize(self.func, self.startparams,
|
546
|
+
method='Newton-CG', jac=self.grad,
|
547
|
+
args=(), options=opts)
|
548
|
+
assert result.status == 1
|
549
|
+
|
550
|
+
def test_ncg_zero_xtol(self):
|
551
|
+
# Regression test for gh-20214
|
552
|
+
def cosine(x):
|
553
|
+
return np.cos(x[0])
|
554
|
+
|
555
|
+
def jac(x):
|
556
|
+
return -np.sin(x[0])
|
557
|
+
|
558
|
+
x0 = [0.1]
|
559
|
+
xtol = 0
|
560
|
+
result = optimize.minimize(cosine,
|
561
|
+
x0=x0,
|
562
|
+
jac=jac,
|
563
|
+
method="newton-cg",
|
564
|
+
options=dict(xtol=xtol))
|
565
|
+
assert result.status == 0
|
566
|
+
assert_almost_equal(result.x[0], np.pi)
|
567
|
+
|
568
|
+
def test_ncg(self):
|
569
|
+
# line-search Newton conjugate gradient optimization routine
|
570
|
+
if self.use_wrapper:
|
571
|
+
opts = {'maxiter': self.maxiter, 'disp': self.disp,
|
572
|
+
'return_all': False}
|
573
|
+
retval = optimize.minimize(self.func, self.startparams,
|
574
|
+
method='Newton-CG', jac=self.grad,
|
575
|
+
args=(), options=opts)['x']
|
576
|
+
else:
|
577
|
+
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
|
578
|
+
args=(), maxiter=self.maxiter,
|
579
|
+
full_output=False, disp=self.disp,
|
580
|
+
retall=False)
|
581
|
+
|
582
|
+
params = retval
|
583
|
+
|
584
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
585
|
+
atol=1e-6)
|
586
|
+
|
587
|
+
# Ensure that function call counts are 'known good'; these are from
|
588
|
+
# SciPy 0.7.0. Don't allow them to increase.
|
589
|
+
assert self.funccalls.c == 7, self.funccalls.c
|
590
|
+
assert self.gradcalls.c <= 22, self.gradcalls.c # 0.13.0
|
591
|
+
# assert self.gradcalls <= 18, self.gradcalls # 0.9.0
|
592
|
+
# assert self.gradcalls == 18, self.gradcalls # 0.8.0
|
593
|
+
# assert self.gradcalls == 22, self.gradcalls # 0.7.0
|
594
|
+
|
595
|
+
# Ensure that the function behaves the same; this is from SciPy 0.7.0
|
596
|
+
assert_allclose(self.trace.t[3:5],
|
597
|
+
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
|
598
|
+
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
|
599
|
+
atol=1e-6, rtol=1e-7)
|
600
|
+
|
601
|
+
def test_ncg_hess(self):
|
602
|
+
# Newton conjugate gradient with Hessian
|
603
|
+
if self.use_wrapper:
|
604
|
+
opts = {'maxiter': self.maxiter, 'disp': self.disp,
|
605
|
+
'return_all': False}
|
606
|
+
retval = optimize.minimize(self.func, self.startparams,
|
607
|
+
method='Newton-CG', jac=self.grad,
|
608
|
+
hess=self.hess,
|
609
|
+
args=(), options=opts)['x']
|
610
|
+
else:
|
611
|
+
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
|
612
|
+
fhess=self.hess,
|
613
|
+
args=(), maxiter=self.maxiter,
|
614
|
+
full_output=False, disp=self.disp,
|
615
|
+
retall=False)
|
616
|
+
|
617
|
+
params = retval
|
618
|
+
|
619
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
620
|
+
atol=1e-6)
|
621
|
+
|
622
|
+
# Ensure that function call counts are 'known good'; these are from
|
623
|
+
# SciPy 0.7.0. Don't allow them to increase.
|
624
|
+
assert self.funccalls.c <= 7, self.funccalls.c # gh10673
|
625
|
+
assert self.gradcalls.c <= 18, self.gradcalls.c # 0.9.0
|
626
|
+
# assert self.gradcalls == 18, self.gradcalls # 0.8.0
|
627
|
+
# assert self.gradcalls == 22, self.gradcalls # 0.7.0
|
628
|
+
|
629
|
+
# Ensure that the function behaves the same; this is from SciPy 0.7.0
|
630
|
+
assert_allclose(self.trace.t[3:5],
|
631
|
+
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
|
632
|
+
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
|
633
|
+
atol=1e-6, rtol=1e-7)
|
634
|
+
|
635
|
+
def test_ncg_hessp(self):
|
636
|
+
# Newton conjugate gradient with Hessian times a vector p.
|
637
|
+
if self.use_wrapper:
|
638
|
+
opts = {'maxiter': self.maxiter, 'disp': self.disp,
|
639
|
+
'return_all': False}
|
640
|
+
retval = optimize.minimize(self.func, self.startparams,
|
641
|
+
method='Newton-CG', jac=self.grad,
|
642
|
+
hessp=self.hessp,
|
643
|
+
args=(), options=opts)['x']
|
644
|
+
else:
|
645
|
+
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
|
646
|
+
fhess_p=self.hessp,
|
647
|
+
args=(), maxiter=self.maxiter,
|
648
|
+
full_output=False, disp=self.disp,
|
649
|
+
retall=False)
|
650
|
+
|
651
|
+
params = retval
|
652
|
+
|
653
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
654
|
+
atol=1e-6)
|
655
|
+
|
656
|
+
# Ensure that function call counts are 'known good'; these are from
|
657
|
+
# SciPy 0.7.0. Don't allow them to increase.
|
658
|
+
assert self.funccalls.c <= 7, self.funccalls.c # gh10673
|
659
|
+
assert self.gradcalls.c <= 18, self.gradcalls.c # 0.9.0
|
660
|
+
# assert self.gradcalls == 18, self.gradcalls # 0.8.0
|
661
|
+
# assert self.gradcalls == 22, self.gradcalls # 0.7.0
|
662
|
+
|
663
|
+
# Ensure that the function behaves the same; this is from SciPy 0.7.0
|
664
|
+
assert_allclose(self.trace.t[3:5],
|
665
|
+
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
|
666
|
+
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
|
667
|
+
atol=1e-6, rtol=1e-7)
|
668
|
+
|
669
|
+
def test_cobyqa(self):
|
670
|
+
# COBYQA method.
|
671
|
+
if self.use_wrapper:
|
672
|
+
res = optimize.minimize(
|
673
|
+
self.func,
|
674
|
+
self.startparams,
|
675
|
+
method='cobyqa',
|
676
|
+
options={'maxiter': self.maxiter, 'disp': self.disp},
|
677
|
+
)
|
678
|
+
assert_allclose(res.fun, self.func(self.solution), atol=1e-6)
|
679
|
+
|
680
|
+
# Ensure that function call counts are 'known good'; these are from
|
681
|
+
# SciPy 1.14.0. Don't allow them to increase. The exact evaluation
|
682
|
+
# count is sensitive to numerical error and floating-point
|
683
|
+
# computations are not bit-for-bit reproducible across machines. It
|
684
|
+
# takes 45 calls on my machine, but we can add the same +20 margin
|
685
|
+
# as is used in `test_powell`
|
686
|
+
assert self.funccalls.c <= 45 + 20, self.funccalls.c
|
687
|
+
|
688
|
+
|
689
|
+
def test_maxfev_test():
|
690
|
+
rng = np.random.default_rng(271707100830272976862395227613146332411)
|
691
|
+
|
692
|
+
def cost(x):
|
693
|
+
return rng.random(1) * 1000 # never converged problem
|
694
|
+
|
695
|
+
for imaxfev in [1, 10, 50]:
|
696
|
+
# "TNC" and "L-BFGS-B" also supports max function evaluation, but
|
697
|
+
# these may violate the limit because of evaluating gradients
|
698
|
+
# by numerical differentiation. See the discussion in PR #14805.
|
699
|
+
for method in ['Powell', 'Nelder-Mead']:
|
700
|
+
result = optimize.minimize(cost, rng.random(10),
|
701
|
+
method=method,
|
702
|
+
options={'maxfev': imaxfev})
|
703
|
+
assert result["nfev"] == imaxfev
|
704
|
+
|
705
|
+
|
706
|
+
def test_wrap_scalar_function_with_validation():
|
707
|
+
|
708
|
+
def func_(x):
|
709
|
+
return x
|
710
|
+
|
711
|
+
fcalls, func = optimize._optimize.\
|
712
|
+
_wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5)
|
713
|
+
|
714
|
+
for i in range(5):
|
715
|
+
func(np.asarray(i))
|
716
|
+
assert fcalls[0] == i+1
|
717
|
+
|
718
|
+
msg = "Too many function calls"
|
719
|
+
with assert_raises(optimize._optimize._MaxFuncCallError, match=msg):
|
720
|
+
func(np.asarray(i)) # exceeded maximum function call
|
721
|
+
|
722
|
+
fcalls, func = optimize._optimize.\
|
723
|
+
_wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5)
|
724
|
+
|
725
|
+
msg = "The user-provided objective function must return a scalar value."
|
726
|
+
with assert_raises(ValueError, match=msg):
|
727
|
+
func(np.array([1, 1]))
|
728
|
+
|
729
|
+
|
730
|
+
def test_obj_func_returns_scalar():
|
731
|
+
match = ("The user-provided "
|
732
|
+
"objective function must "
|
733
|
+
"return a scalar value.")
|
734
|
+
with assert_raises(ValueError, match=match):
|
735
|
+
optimize.minimize(lambda x: x, np.array([1, 1]), method='BFGS')
|
736
|
+
|
737
|
+
|
738
|
+
def test_neldermead_iteration_num():
|
739
|
+
x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
|
740
|
+
res = optimize._minimize._minimize_neldermead(optimize.rosen, x0,
|
741
|
+
xatol=1e-8)
|
742
|
+
assert res.nit <= 339
|
743
|
+
|
744
|
+
|
745
|
+
def test_neldermead_respect_fp():
|
746
|
+
# Nelder-Mead should respect the fp type of the input + function
|
747
|
+
x0 = np.array([5.0, 4.0]).astype(np.float32)
|
748
|
+
def rosen_(x):
|
749
|
+
assert x.dtype == np.float32
|
750
|
+
return optimize.rosen(x)
|
751
|
+
|
752
|
+
optimize.minimize(rosen_, x0, method='Nelder-Mead')
|
753
|
+
|
754
|
+
|
755
|
+
def test_neldermead_xatol_fatol():
|
756
|
+
# gh4484
|
757
|
+
# test we can call with fatol, xatol specified
|
758
|
+
def func(x):
|
759
|
+
return x[0] ** 2 + x[1] ** 2
|
760
|
+
|
761
|
+
optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2,
|
762
|
+
xatol=1e-3, fatol=1e-3)
|
763
|
+
|
764
|
+
|
765
|
+
def test_neldermead_adaptive():
|
766
|
+
def func(x):
|
767
|
+
return np.sum(x ** 2)
|
768
|
+
p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159,
|
769
|
+
0.32308456, 0.9692297, 0.4471682, 0.77411992, 0.80441652,
|
770
|
+
0.35994957, 0.75487856, 0.99973421, 0.65063887, 0.09626474]
|
771
|
+
|
772
|
+
res = optimize.minimize(func, p0, method='Nelder-Mead')
|
773
|
+
assert_equal(res.success, False)
|
774
|
+
|
775
|
+
res = optimize.minimize(func, p0, method='Nelder-Mead',
|
776
|
+
options={'adaptive': True})
|
777
|
+
assert_equal(res.success, True)
|
778
|
+
|
779
|
+
|
780
|
+
@pytest.mark.thread_unsafe
|
781
|
+
def test_bounded_powell_outsidebounds():
|
782
|
+
# With the bounded Powell method if you start outside the bounds the final
|
783
|
+
# should still be within the bounds (provided that the user doesn't make a
|
784
|
+
# bad choice for the `direc` argument).
|
785
|
+
def func(x):
|
786
|
+
return np.sum(x ** 2)
|
787
|
+
bounds = (-1, 1), (-1, 1), (-1, 1)
|
788
|
+
x0 = [-4, .5, -.8]
|
789
|
+
|
790
|
+
# we're starting outside the bounds, so we should get a warning
|
791
|
+
with assert_warns(optimize.OptimizeWarning):
|
792
|
+
res = optimize.minimize(func, x0, bounds=bounds, method="Powell")
|
793
|
+
assert_allclose(res.x, np.array([0.] * len(x0)), atol=1e-6)
|
794
|
+
assert_equal(res.success, True)
|
795
|
+
assert_equal(res.status, 0)
|
796
|
+
|
797
|
+
# However, now if we change the `direc` argument such that the
|
798
|
+
# set of vectors does not span the parameter space, then we may
|
799
|
+
# not end up back within the bounds. Here we see that the first
|
800
|
+
# parameter cannot be updated!
|
801
|
+
direc = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
|
802
|
+
# we're starting outside the bounds, so we should get a warning
|
803
|
+
with assert_warns(optimize.OptimizeWarning):
|
804
|
+
res = optimize.minimize(func, x0,
|
805
|
+
bounds=bounds, method="Powell",
|
806
|
+
options={'direc': direc})
|
807
|
+
assert_allclose(res.x, np.array([-4., 0, 0]), atol=1e-6)
|
808
|
+
assert_equal(res.success, False)
|
809
|
+
assert_equal(res.status, 4)
|
810
|
+
|
811
|
+
|
812
|
+
@pytest.mark.thread_unsafe
|
813
|
+
def test_bounded_powell_vs_powell():
|
814
|
+
# here we test an example where the bounded Powell method
|
815
|
+
# will return a different result than the standard Powell
|
816
|
+
# method.
|
817
|
+
|
818
|
+
# first we test a simple example where the minimum is at
|
819
|
+
# the origin and the minimum that is within the bounds is
|
820
|
+
# larger than the minimum at the origin.
|
821
|
+
def func(x):
|
822
|
+
return np.sum(x ** 2)
|
823
|
+
bounds = (-5, -1), (-10, -0.1), (1, 9.2), (-4, 7.6), (-15.9, -2)
|
824
|
+
x0 = [-2.1, -5.2, 1.9, 0, -2]
|
825
|
+
|
826
|
+
options = {'ftol': 1e-10, 'xtol': 1e-10}
|
827
|
+
|
828
|
+
res_powell = optimize.minimize(func, x0, method="Powell", options=options)
|
829
|
+
assert_allclose(res_powell.x, 0., atol=1e-6)
|
830
|
+
assert_allclose(res_powell.fun, 0., atol=1e-6)
|
831
|
+
|
832
|
+
res_bounded_powell = optimize.minimize(func, x0, options=options,
|
833
|
+
bounds=bounds,
|
834
|
+
method="Powell")
|
835
|
+
p = np.array([-1, -0.1, 1, 0, -2])
|
836
|
+
assert_allclose(res_bounded_powell.x, p, atol=1e-6)
|
837
|
+
assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6)
|
838
|
+
|
839
|
+
# now we test bounded Powell but with a mix of inf bounds.
|
840
|
+
bounds = (None, -1), (-np.inf, -.1), (1, np.inf), (-4, None), (-15.9, -2)
|
841
|
+
res_bounded_powell = optimize.minimize(func, x0, options=options,
|
842
|
+
bounds=bounds,
|
843
|
+
method="Powell")
|
844
|
+
p = np.array([-1, -0.1, 1, 0, -2])
|
845
|
+
assert_allclose(res_bounded_powell.x, p, atol=1e-6)
|
846
|
+
assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6)
|
847
|
+
|
848
|
+
# next we test an example where the global minimum is within
|
849
|
+
# the bounds, but the bounded Powell method performs better
|
850
|
+
# than the standard Powell method.
|
851
|
+
def func(x):
|
852
|
+
t = np.sin(-x[0]) * np.cos(x[1]) * np.sin(-x[0] * x[1]) * np.cos(x[1])
|
853
|
+
t -= np.cos(np.sin(x[1] * x[2]) * np.cos(x[2]))
|
854
|
+
return t**2
|
855
|
+
|
856
|
+
bounds = [(-2, 5)] * 3
|
857
|
+
x0 = [-0.5, -0.5, -0.5]
|
858
|
+
|
859
|
+
res_powell = optimize.minimize(func, x0, method="Powell")
|
860
|
+
res_bounded_powell = optimize.minimize(func, x0,
|
861
|
+
bounds=bounds,
|
862
|
+
method="Powell")
|
863
|
+
assert_allclose(res_powell.fun, 0.007136253919761627, atol=1e-6)
|
864
|
+
assert_allclose(res_bounded_powell.fun, 0, atol=1e-6)
|
865
|
+
|
866
|
+
# next we test the previous example where the we provide Powell
|
867
|
+
# with (-inf, inf) bounds, and compare it to providing Powell
|
868
|
+
# with no bounds. They should end up the same.
|
869
|
+
bounds = [(-np.inf, np.inf)] * 3
|
870
|
+
|
871
|
+
res_bounded_powell = optimize.minimize(func, x0,
|
872
|
+
bounds=bounds,
|
873
|
+
method="Powell")
|
874
|
+
assert_allclose(res_powell.fun, res_bounded_powell.fun, atol=1e-6)
|
875
|
+
assert_allclose(res_powell.nfev, res_bounded_powell.nfev, atol=1e-6)
|
876
|
+
assert_allclose(res_powell.x, res_bounded_powell.x, atol=1e-6)
|
877
|
+
|
878
|
+
# now test when x0 starts outside of the bounds.
|
879
|
+
x0 = [45.46254415, -26.52351498, 31.74830248]
|
880
|
+
bounds = [(-2, 5)] * 3
|
881
|
+
# we're starting outside the bounds, so we should get a warning
|
882
|
+
with assert_warns(optimize.OptimizeWarning):
|
883
|
+
res_bounded_powell = optimize.minimize(func, x0,
|
884
|
+
bounds=bounds,
|
885
|
+
method="Powell")
|
886
|
+
assert_allclose(res_bounded_powell.fun, 0, atol=1e-6)
|
887
|
+
|
888
|
+
|
889
|
+
def test_onesided_bounded_powell_stability():
|
890
|
+
# When the Powell method is bounded on only one side, a
|
891
|
+
# np.tan transform is done in order to convert it into a
|
892
|
+
# completely bounded problem. Here we do some simple tests
|
893
|
+
# of one-sided bounded Powell where the optimal solutions
|
894
|
+
# are large to test the stability of the transformation.
|
895
|
+
kwargs = {'method': 'Powell',
|
896
|
+
'bounds': [(-np.inf, 1e6)] * 3,
|
897
|
+
'options': {'ftol': 1e-8, 'xtol': 1e-8}}
|
898
|
+
x0 = [1, 1, 1]
|
899
|
+
|
900
|
+
# df/dx is constant.
|
901
|
+
def f(x):
|
902
|
+
return -np.sum(x)
|
903
|
+
res = optimize.minimize(f, x0, **kwargs)
|
904
|
+
assert_allclose(res.fun, -3e6, atol=1e-4)
|
905
|
+
|
906
|
+
# df/dx gets smaller and smaller.
|
907
|
+
def f(x):
|
908
|
+
return -np.abs(np.sum(x)) ** (0.1) * (1 if np.all(x > 0) else -1)
|
909
|
+
|
910
|
+
res = optimize.minimize(f, x0, **kwargs)
|
911
|
+
assert_allclose(res.fun, -(3e6) ** (0.1))
|
912
|
+
|
913
|
+
# df/dx gets larger and larger.
|
914
|
+
def f(x):
|
915
|
+
return -np.abs(np.sum(x)) ** 10 * (1 if np.all(x > 0) else -1)
|
916
|
+
|
917
|
+
res = optimize.minimize(f, x0, **kwargs)
|
918
|
+
assert_allclose(res.fun, -(3e6) ** 10, rtol=1e-7)
|
919
|
+
|
920
|
+
# df/dx gets larger for some of the variables and smaller for others.
|
921
|
+
def f(x):
|
922
|
+
t = -np.abs(np.sum(x[:2])) ** 5 - np.abs(np.sum(x[2:])) ** (0.1)
|
923
|
+
t *= (1 if np.all(x > 0) else -1)
|
924
|
+
return t
|
925
|
+
|
926
|
+
kwargs['bounds'] = [(-np.inf, 1e3)] * 3
|
927
|
+
res = optimize.minimize(f, x0, **kwargs)
|
928
|
+
assert_allclose(res.fun, -(2e3) ** 5 - (1e6) ** (0.1), rtol=1e-7)
|
929
|
+
|
930
|
+
|
931
|
+
class TestOptimizeWrapperDisp(CheckOptimizeParameterized):
|
932
|
+
use_wrapper = True
|
933
|
+
disp = True
|
934
|
+
|
935
|
+
|
936
|
+
class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized):
|
937
|
+
use_wrapper = True
|
938
|
+
disp = False
|
939
|
+
|
940
|
+
|
941
|
+
class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized):
|
942
|
+
use_wrapper = False
|
943
|
+
disp = True
|
944
|
+
|
945
|
+
|
946
|
+
class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized):
|
947
|
+
use_wrapper = False
|
948
|
+
disp = False
|
949
|
+
|
950
|
+
|
951
|
+
class TestOptimizeSimple(CheckOptimize):
|
952
|
+
|
953
|
+
def test_bfgs_nan(self):
|
954
|
+
# Test corner case where nan is fed to optimizer. See gh-2067.
|
955
|
+
def func(x):
|
956
|
+
return x
|
957
|
+
def fprime(x):
|
958
|
+
return np.ones_like(x)
|
959
|
+
x0 = [np.nan]
|
960
|
+
with np.errstate(over='ignore', invalid='ignore'):
|
961
|
+
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
|
962
|
+
assert np.isnan(func(x))
|
963
|
+
|
964
|
+
def test_bfgs_nan_return(self):
|
965
|
+
# Test corner cases where fun returns NaN. See gh-4793.
|
966
|
+
|
967
|
+
# First case: NaN from first call.
|
968
|
+
def func(x):
|
969
|
+
return np.nan
|
970
|
+
with np.errstate(invalid='ignore'):
|
971
|
+
result = optimize.minimize(func, 0)
|
972
|
+
|
973
|
+
assert np.isnan(result['fun'])
|
974
|
+
assert result['success'] is False
|
975
|
+
|
976
|
+
# Second case: NaN from second call.
|
977
|
+
def func(x):
|
978
|
+
return 0 if x == 0 else np.nan
|
979
|
+
def fprime(x):
|
980
|
+
return np.ones_like(x) # Steer away from zero.
|
981
|
+
with np.errstate(invalid='ignore'):
|
982
|
+
result = optimize.minimize(func, 0, jac=fprime)
|
983
|
+
|
984
|
+
assert np.isnan(result['fun'])
|
985
|
+
assert result['success'] is False
|
986
|
+
|
987
|
+
def test_bfgs_numerical_jacobian(self):
|
988
|
+
# BFGS with numerical Jacobian and a vector epsilon parameter.
|
989
|
+
# define the epsilon parameter using a random vector
|
990
|
+
epsilon = np.sqrt(np.spacing(1.)) * np.random.rand(len(self.solution))
|
991
|
+
|
992
|
+
params = optimize.fmin_bfgs(self.func, self.startparams,
|
993
|
+
epsilon=epsilon, args=(),
|
994
|
+
maxiter=self.maxiter, disp=False)
|
995
|
+
|
996
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
997
|
+
atol=1e-6)
|
998
|
+
|
999
|
+
def test_finite_differences_jac(self):
|
1000
|
+
methods = ['BFGS', 'CG', 'TNC']
|
1001
|
+
jacs = ['2-point', '3-point', None]
|
1002
|
+
for method, jac in itertools.product(methods, jacs):
|
1003
|
+
result = optimize.minimize(self.func, self.startparams,
|
1004
|
+
method=method, jac=jac)
|
1005
|
+
assert_allclose(self.func(result.x), self.func(self.solution),
|
1006
|
+
atol=1e-6)
|
1007
|
+
|
1008
|
+
def test_finite_differences_hess(self):
|
1009
|
+
# test that all the methods that require hess can use finite-difference
|
1010
|
+
# For Newton-CG, trust-ncg, trust-krylov the FD estimated hessian is
|
1011
|
+
# wrapped in a hessp function
|
1012
|
+
# dogleg, trust-exact actually require true hessians at the moment, so
|
1013
|
+
# they're excluded.
|
1014
|
+
methods = ['trust-constr', 'Newton-CG', 'trust-ncg', 'trust-krylov']
|
1015
|
+
hesses = FD_METHODS + (optimize.BFGS,)
|
1016
|
+
for method, hess in itertools.product(methods, hesses):
|
1017
|
+
if hess is optimize.BFGS:
|
1018
|
+
hess = hess()
|
1019
|
+
result = optimize.minimize(self.func, self.startparams,
|
1020
|
+
method=method, jac=self.grad,
|
1021
|
+
hess=hess)
|
1022
|
+
assert result.success
|
1023
|
+
|
1024
|
+
# check that the methods demand some sort of Hessian specification
|
1025
|
+
# Newton-CG creates its own hessp, and trust-constr doesn't need a hess
|
1026
|
+
# specified either
|
1027
|
+
methods = ['trust-ncg', 'trust-krylov', 'dogleg', 'trust-exact']
|
1028
|
+
for method in methods:
|
1029
|
+
with pytest.raises(ValueError):
|
1030
|
+
optimize.minimize(self.func, self.startparams,
|
1031
|
+
method=method, jac=self.grad,
|
1032
|
+
hess=None)
|
1033
|
+
|
1034
|
+
def test_bfgs_gh_2169(self):
|
1035
|
+
def f(x):
|
1036
|
+
if x < 0:
|
1037
|
+
return 1.79769313e+308
|
1038
|
+
else:
|
1039
|
+
return x + 1./x
|
1040
|
+
xs = optimize.fmin_bfgs(f, [10.], disp=False)
|
1041
|
+
assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4)
|
1042
|
+
|
1043
|
+
def test_bfgs_double_evaluations(self):
|
1044
|
+
# check BFGS does not evaluate twice in a row at same point
|
1045
|
+
def f(x):
|
1046
|
+
xp = x[0]
|
1047
|
+
assert xp not in seen
|
1048
|
+
seen.add(xp)
|
1049
|
+
return 10*x**2, 20*x
|
1050
|
+
|
1051
|
+
seen = set()
|
1052
|
+
optimize.minimize(f, -100, method='bfgs', jac=True, tol=1e-7)
|
1053
|
+
|
1054
|
+
def test_l_bfgs_b(self):
|
1055
|
+
# limited-memory bound-constrained BFGS algorithm
|
1056
|
+
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
|
1057
|
+
self.grad, args=(),
|
1058
|
+
maxiter=self.maxiter)
|
1059
|
+
|
1060
|
+
(params, fopt, d) = retval
|
1061
|
+
|
1062
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
1063
|
+
atol=1e-6)
|
1064
|
+
|
1065
|
+
# Ensure that function call counts are 'known good'; these are from
|
1066
|
+
# SciPy 0.7.0. Don't allow them to increase.
|
1067
|
+
assert self.funccalls.c == 7, self.funccalls.c
|
1068
|
+
assert self.gradcalls.c == 5, self.gradcalls.c
|
1069
|
+
|
1070
|
+
# Ensure that the function behaves the same; this is from SciPy 0.7.0
|
1071
|
+
# test fixed in gh10673
|
1072
|
+
assert_allclose(self.trace.t[3:5],
|
1073
|
+
[[8.117083e-16, -5.196198e-01, 4.897617e-01],
|
1074
|
+
[0., -0.52489628, 0.48753042]],
|
1075
|
+
atol=1e-14, rtol=1e-7)
|
1076
|
+
|
1077
|
+
def test_l_bfgs_b_numjac(self):
|
1078
|
+
# L-BFGS-B with numerical Jacobian
|
1079
|
+
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
|
1080
|
+
approx_grad=True,
|
1081
|
+
maxiter=self.maxiter)
|
1082
|
+
|
1083
|
+
(params, fopt, d) = retval
|
1084
|
+
|
1085
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
1086
|
+
atol=1e-6)
|
1087
|
+
|
1088
|
+
def test_l_bfgs_b_funjac(self):
|
1089
|
+
# L-BFGS-B with combined objective function and Jacobian
|
1090
|
+
def fun(x):
|
1091
|
+
return self.func(x), self.grad(x)
|
1092
|
+
|
1093
|
+
retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
|
1094
|
+
maxiter=self.maxiter)
|
1095
|
+
|
1096
|
+
(params, fopt, d) = retval
|
1097
|
+
|
1098
|
+
assert_allclose(self.func(params), self.func(self.solution),
|
1099
|
+
atol=1e-6)
|
1100
|
+
|
1101
|
+
def test_l_bfgs_b_maxiter(self):
|
1102
|
+
# gh7854
|
1103
|
+
# Ensure that not more than maxiters are ever run.
|
1104
|
+
class Callback:
|
1105
|
+
def __init__(self):
|
1106
|
+
self.nit = 0
|
1107
|
+
self.fun = None
|
1108
|
+
self.x = None
|
1109
|
+
|
1110
|
+
def __call__(self, x):
|
1111
|
+
self.x = x
|
1112
|
+
self.fun = optimize.rosen(x)
|
1113
|
+
self.nit += 1
|
1114
|
+
|
1115
|
+
c = Callback()
|
1116
|
+
res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b',
|
1117
|
+
callback=c, options={'maxiter': 5})
|
1118
|
+
|
1119
|
+
assert_equal(res.nit, 5)
|
1120
|
+
assert_almost_equal(res.x, c.x)
|
1121
|
+
assert_almost_equal(res.fun, c.fun)
|
1122
|
+
assert_equal(res.status, 1)
|
1123
|
+
assert res.success is False
|
1124
|
+
assert_equal(res.message,
|
1125
|
+
'STOP: TOTAL NO. OF ITERATIONS REACHED LIMIT')
|
1126
|
+
|
1127
|
+
def test_minimize_l_bfgs_b(self):
|
1128
|
+
# Minimize with L-BFGS-B method
|
1129
|
+
opts = {'maxiter': self.maxiter}
|
1130
|
+
r = optimize.minimize(self.func, self.startparams,
|
1131
|
+
method='L-BFGS-B', jac=self.grad,
|
1132
|
+
options=opts)
|
1133
|
+
assert_allclose(self.func(r.x), self.func(self.solution),
|
1134
|
+
atol=1e-6)
|
1135
|
+
assert self.gradcalls.c == r.njev
|
1136
|
+
|
1137
|
+
self.funccalls.c = self.gradcalls.c = 0
|
1138
|
+
# approximate jacobian
|
1139
|
+
ra = optimize.minimize(self.func, self.startparams,
|
1140
|
+
method='L-BFGS-B', options=opts)
|
1141
|
+
# check that function evaluations in approximate jacobian are counted
|
1142
|
+
# assert_(ra.nfev > r.nfev)
|
1143
|
+
assert self.funccalls.c == ra.nfev
|
1144
|
+
assert_allclose(self.func(ra.x), self.func(self.solution),
|
1145
|
+
atol=1e-6)
|
1146
|
+
|
1147
|
+
self.funccalls.c = self.gradcalls.c = 0
|
1148
|
+
# approximate jacobian
|
1149
|
+
ra = optimize.minimize(self.func, self.startparams, jac='3-point',
|
1150
|
+
method='L-BFGS-B', options=opts)
|
1151
|
+
assert self.funccalls.c == ra.nfev
|
1152
|
+
assert_allclose(self.func(ra.x), self.func(self.solution),
|
1153
|
+
atol=1e-6)
|
1154
|
+
|
1155
|
+
def test_minimize_l_bfgs_b_ftol(self):
|
1156
|
+
# Check that the `ftol` parameter in l_bfgs_b works as expected
|
1157
|
+
v0 = None
|
1158
|
+
for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
|
1159
|
+
opts = {'maxiter': self.maxiter, 'ftol': tol}
|
1160
|
+
sol = optimize.minimize(self.func, self.startparams,
|
1161
|
+
method='L-BFGS-B', jac=self.grad,
|
1162
|
+
options=opts)
|
1163
|
+
v = self.func(sol.x)
|
1164
|
+
|
1165
|
+
if v0 is None:
|
1166
|
+
v0 = v
|
1167
|
+
else:
|
1168
|
+
assert v < v0
|
1169
|
+
|
1170
|
+
assert_allclose(v, self.func(self.solution), rtol=tol)
|
1171
|
+
|
1172
|
+
def test_minimize_l_bfgs_maxls(self):
|
1173
|
+
# check that the maxls is passed down to the Fortran routine
|
1174
|
+
sol = optimize.minimize(optimize.rosen, np.array([-1.2, 1.0]),
|
1175
|
+
method='L-BFGS-B', jac=optimize.rosen_der,
|
1176
|
+
options={'maxls': 1})
|
1177
|
+
assert not sol.success
|
1178
|
+
|
1179
|
+
def test_minimize_l_bfgs_b_maxfun_interruption(self):
|
1180
|
+
# gh-6162
|
1181
|
+
f = optimize.rosen
|
1182
|
+
g = optimize.rosen_der
|
1183
|
+
values = []
|
1184
|
+
x0 = np.full(7, 1000)
|
1185
|
+
|
1186
|
+
def objfun(x):
|
1187
|
+
value = f(x)
|
1188
|
+
values.append(value)
|
1189
|
+
return value
|
1190
|
+
|
1191
|
+
# Look for an interesting test case.
|
1192
|
+
# Request a maxfun that stops at a particularly bad function
|
1193
|
+
# evaluation somewhere between 100 and 300 evaluations.
|
1194
|
+
low, medium, high = 30, 100, 300
|
1195
|
+
optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
|
1196
|
+
v, k = max((y, i) for i, y in enumerate(values[medium:]))
|
1197
|
+
maxfun = medium + k
|
1198
|
+
# If the minimization strategy is reasonable,
|
1199
|
+
# the minimize() result should not be worse than the best
|
1200
|
+
# of the first 30 function evaluations.
|
1201
|
+
target = min(values[:low])
|
1202
|
+
xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
|
1203
|
+
assert_array_less(fmin, target)
|
1204
|
+
|
1205
|
+
def test_custom(self):
|
1206
|
+
# This function comes from the documentation example.
|
1207
|
+
def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
|
1208
|
+
maxiter=100, callback=None, **options):
|
1209
|
+
bestx = x0
|
1210
|
+
besty = fun(x0)
|
1211
|
+
funcalls = 1
|
1212
|
+
niter = 0
|
1213
|
+
improved = True
|
1214
|
+
stop = False
|
1215
|
+
|
1216
|
+
while improved and not stop and niter < maxiter:
|
1217
|
+
improved = False
|
1218
|
+
niter += 1
|
1219
|
+
for dim in range(np.size(x0)):
|
1220
|
+
for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
|
1221
|
+
testx = np.copy(bestx)
|
1222
|
+
testx[dim] = s
|
1223
|
+
testy = fun(testx, *args)
|
1224
|
+
funcalls += 1
|
1225
|
+
if testy < besty:
|
1226
|
+
besty = testy
|
1227
|
+
bestx = testx
|
1228
|
+
improved = True
|
1229
|
+
if callback is not None:
|
1230
|
+
callback(bestx)
|
1231
|
+
if maxfev is not None and funcalls >= maxfev:
|
1232
|
+
stop = True
|
1233
|
+
break
|
1234
|
+
|
1235
|
+
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
|
1236
|
+
nfev=funcalls, success=(niter > 1))
|
1237
|
+
|
1238
|
+
x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
|
1239
|
+
res = optimize.minimize(optimize.rosen, x0, method=custmin,
|
1240
|
+
options=dict(stepsize=0.05))
|
1241
|
+
assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)
|
1242
|
+
|
1243
|
+
def test_gh10771(self):
|
1244
|
+
# check that minimize passes bounds and constraints to a custom
|
1245
|
+
# minimizer without altering them.
|
1246
|
+
bounds = [(-2, 2), (0, 3)]
|
1247
|
+
constraints = 'constraints'
|
1248
|
+
|
1249
|
+
def custmin(fun, x0, **options):
|
1250
|
+
assert options['bounds'] is bounds
|
1251
|
+
assert options['constraints'] is constraints
|
1252
|
+
return optimize.OptimizeResult()
|
1253
|
+
|
1254
|
+
x0 = [1, 1]
|
1255
|
+
optimize.minimize(optimize.rosen, x0, method=custmin,
|
1256
|
+
bounds=bounds, constraints=constraints)
|
1257
|
+
|
1258
|
+
def test_minimize_tol_parameter(self):
|
1259
|
+
# Check that the minimize() tol= argument does something
|
1260
|
+
def func(z):
|
1261
|
+
x, y = z
|
1262
|
+
return x**2*y**2 + x**4 + 1
|
1263
|
+
|
1264
|
+
def dfunc(z):
|
1265
|
+
x, y = z
|
1266
|
+
return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])
|
1267
|
+
|
1268
|
+
for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
|
1269
|
+
'newton-cg', 'l-bfgs-b', 'tnc',
|
1270
|
+
'cobyla', 'cobyqa', 'slsqp']:
|
1271
|
+
if method in ('nelder-mead', 'powell', 'cobyla', 'cobyqa'):
|
1272
|
+
jac = None
|
1273
|
+
else:
|
1274
|
+
jac = dfunc
|
1275
|
+
|
1276
|
+
sol1 = optimize.minimize(func, [2, 2], jac=jac, tol=1e-10,
|
1277
|
+
method=method)
|
1278
|
+
sol2 = optimize.minimize(func, [2, 2], jac=jac, tol=1.0,
|
1279
|
+
method=method)
|
1280
|
+
assert func(sol1.x) < func(sol2.x), \
|
1281
|
+
f"{method}: {func(sol1.x)} vs. {func(sol2.x)}"
|
1282
|
+
|
1283
|
+
@pytest.mark.fail_slow(10)
|
1284
|
+
@pytest.mark.filterwarnings('ignore::UserWarning')
|
1285
|
+
@pytest.mark.filterwarnings('ignore::RuntimeWarning') # See gh-18547
|
1286
|
+
@pytest.mark.parametrize('method',
|
1287
|
+
['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs',
|
1288
|
+
'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc',
|
1289
|
+
'fmin_slsqp'] + MINIMIZE_METHODS)
|
1290
|
+
def test_minimize_callback_copies_array(self, method):
|
1291
|
+
# Check that arrays passed to callbacks are not modified
|
1292
|
+
# inplace by the optimizer afterward
|
1293
|
+
|
1294
|
+
if method in ('fmin_tnc', 'fmin_l_bfgs_b'):
|
1295
|
+
def func(x):
|
1296
|
+
return optimize.rosen(x), optimize.rosen_der(x)
|
1297
|
+
else:
|
1298
|
+
func = optimize.rosen
|
1299
|
+
jac = optimize.rosen_der
|
1300
|
+
hess = optimize.rosen_hess
|
1301
|
+
|
1302
|
+
x0 = np.zeros(10)
|
1303
|
+
|
1304
|
+
# Set options
|
1305
|
+
kwargs = {}
|
1306
|
+
if method.startswith('fmin'):
|
1307
|
+
routine = getattr(optimize, method)
|
1308
|
+
if method == 'fmin_slsqp':
|
1309
|
+
kwargs['iter'] = 5
|
1310
|
+
elif method == 'fmin_tnc':
|
1311
|
+
kwargs['maxfun'] = 100
|
1312
|
+
elif method in ('fmin', 'fmin_powell'):
|
1313
|
+
kwargs['maxiter'] = 3500
|
1314
|
+
else:
|
1315
|
+
kwargs['maxiter'] = 5
|
1316
|
+
else:
|
1317
|
+
def routine(*a, **kw):
|
1318
|
+
kw['method'] = method
|
1319
|
+
return optimize.minimize(*a, **kw)
|
1320
|
+
|
1321
|
+
if method == 'tnc':
|
1322
|
+
kwargs['options'] = dict(maxfun=100)
|
1323
|
+
elif method == 'cobyla':
|
1324
|
+
kwargs['options'] = dict(maxiter=100)
|
1325
|
+
else:
|
1326
|
+
kwargs['options'] = dict(maxiter=5)
|
1327
|
+
|
1328
|
+
if method in ('fmin_ncg',):
|
1329
|
+
kwargs['fprime'] = jac
|
1330
|
+
elif method in ('newton-cg',):
|
1331
|
+
kwargs['jac'] = jac
|
1332
|
+
elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
|
1333
|
+
'trust-constr'):
|
1334
|
+
kwargs['jac'] = jac
|
1335
|
+
kwargs['hess'] = hess
|
1336
|
+
|
1337
|
+
# Run with callback
|
1338
|
+
results = []
|
1339
|
+
|
1340
|
+
def callback(x, *args, **kwargs):
|
1341
|
+
assert not isinstance(x, optimize.OptimizeResult)
|
1342
|
+
results.append((x, np.copy(x)))
|
1343
|
+
|
1344
|
+
routine(func, x0, callback=callback, **kwargs)
|
1345
|
+
|
1346
|
+
# Check returned arrays coincide with their copies
|
1347
|
+
# and have no memory overlap
|
1348
|
+
assert len(results) > 2
|
1349
|
+
assert all(np.all(x == y) for x, y in results)
|
1350
|
+
combinations = itertools.combinations(results, 2)
|
1351
|
+
assert not any(np.may_share_memory(x[0], y[0]) for x, y in combinations)
|
1352
|
+
|
1353
|
+
@pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg',
|
1354
|
+
'bfgs', 'newton-cg', 'l-bfgs-b',
|
1355
|
+
'tnc', 'cobyla', 'cobyqa', 'slsqp'])
|
1356
|
+
def test_no_increase(self, method):
|
1357
|
+
# Check that the solver doesn't return a value worse than the
|
1358
|
+
# initial point.
|
1359
|
+
|
1360
|
+
def func(x):
|
1361
|
+
return (x - 1)**2
|
1362
|
+
|
1363
|
+
def bad_grad(x):
|
1364
|
+
# purposefully invalid gradient function, simulates a case
|
1365
|
+
# where line searches start failing
|
1366
|
+
return 2*(x - 1) * (-1) - 2
|
1367
|
+
|
1368
|
+
x0 = np.array([2.0])
|
1369
|
+
f0 = func(x0)
|
1370
|
+
jac = bad_grad
|
1371
|
+
options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20)
|
1372
|
+
if method in ['nelder-mead', 'powell', 'cobyla', 'cobyqa']:
|
1373
|
+
jac = None
|
1374
|
+
sol = optimize.minimize(func, x0, jac=jac, method=method,
|
1375
|
+
options=options)
|
1376
|
+
assert_equal(func(sol.x), sol.fun)
|
1377
|
+
|
1378
|
+
if method == 'slsqp':
|
1379
|
+
pytest.xfail("SLSQP returns slightly worse")
|
1380
|
+
assert func(sol.x) <= f0
|
1381
|
+
|
1382
|
+
def test_slsqp_respect_bounds(self):
|
1383
|
+
# Regression test for gh-3108
|
1384
|
+
def f(x):
|
1385
|
+
return sum((x - np.array([1., 2., 3., 4.]))**2)
|
1386
|
+
|
1387
|
+
def cons(x):
|
1388
|
+
a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])
|
1389
|
+
return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])
|
1390
|
+
|
1391
|
+
x0 = np.array([0.5, 1., 1.5, 2.])
|
1392
|
+
res = optimize.minimize(f, x0, method='slsqp',
|
1393
|
+
constraints={'type': 'ineq', 'fun': cons})
|
1394
|
+
assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12)
|
1395
|
+
|
1396
|
+
@pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', 'CG', 'BFGS',
|
1397
|
+
'Newton-CG', 'L-BFGS-B', 'SLSQP',
|
1398
|
+
'trust-constr', 'dogleg', 'trust-ncg',
|
1399
|
+
'trust-exact', 'trust-krylov',
|
1400
|
+
'cobyqa'])
|
1401
|
+
def test_respect_maxiter(self, method):
|
1402
|
+
# Check that the number of iterations equals max_iter, assuming
|
1403
|
+
# convergence doesn't establish before
|
1404
|
+
MAXITER = 4
|
1405
|
+
|
1406
|
+
x0 = np.zeros(10)
|
1407
|
+
|
1408
|
+
sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der,
|
1409
|
+
optimize.rosen_hess, None, None)
|
1410
|
+
|
1411
|
+
# Set options
|
1412
|
+
kwargs = {'method': method, 'options': dict(maxiter=MAXITER)}
|
1413
|
+
|
1414
|
+
if method in ('Newton-CG',):
|
1415
|
+
kwargs['jac'] = sf.grad
|
1416
|
+
elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
|
1417
|
+
'trust-constr'):
|
1418
|
+
kwargs['jac'] = sf.grad
|
1419
|
+
kwargs['hess'] = sf.hess
|
1420
|
+
|
1421
|
+
sol = optimize.minimize(sf.fun, x0, **kwargs)
|
1422
|
+
assert sol.nit == MAXITER
|
1423
|
+
assert sol.nfev >= sf.nfev
|
1424
|
+
if hasattr(sol, 'njev'):
|
1425
|
+
assert sol.njev >= sf.ngev
|
1426
|
+
|
1427
|
+
# method specific tests
|
1428
|
+
if method == 'SLSQP':
|
1429
|
+
assert sol.status == 9 # Iteration limit reached
|
1430
|
+
elif method == 'cobyqa':
|
1431
|
+
assert sol.status == 6 # Iteration limit reached
|
1432
|
+
|
1433
|
+
@pytest.mark.thread_unsafe
|
1434
|
+
@pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell',
|
1435
|
+
'fmin', 'fmin_powell'])
|
1436
|
+
def test_runtime_warning(self, method):
|
1437
|
+
x0 = np.zeros(10)
|
1438
|
+
sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der,
|
1439
|
+
optimize.rosen_hess, None, None)
|
1440
|
+
options = {"maxiter": 1, "disp": True}
|
1441
|
+
with pytest.warns(RuntimeWarning,
|
1442
|
+
match=r'Maximum number of iterations'):
|
1443
|
+
if method.startswith('fmin'):
|
1444
|
+
routine = getattr(optimize, method)
|
1445
|
+
routine(sf.fun, x0, **options)
|
1446
|
+
else:
|
1447
|
+
optimize.minimize(sf.fun, x0, method=method, options=options)
|
1448
|
+
|
1449
|
+
def test_respect_maxiter_trust_constr_ineq_constraints(self):
|
1450
|
+
# special case of minimization with trust-constr and inequality
|
1451
|
+
# constraints to check maxiter limit is obeyed when using internal
|
1452
|
+
# method 'tr_interior_point'
|
1453
|
+
MAXITER = 4
|
1454
|
+
f = optimize.rosen
|
1455
|
+
jac = optimize.rosen_der
|
1456
|
+
hess = optimize.rosen_hess
|
1457
|
+
|
1458
|
+
def fun(x):
|
1459
|
+
return np.array([0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
|
1460
|
+
cons = ({'type': 'ineq',
|
1461
|
+
'fun': fun},)
|
1462
|
+
|
1463
|
+
x0 = np.zeros(10)
|
1464
|
+
sol = optimize.minimize(f, x0, constraints=cons, jac=jac, hess=hess,
|
1465
|
+
method='trust-constr',
|
1466
|
+
options=dict(maxiter=MAXITER))
|
1467
|
+
assert sol.nit == MAXITER
|
1468
|
+
|
1469
|
+
def test_minimize_automethod(self):
|
1470
|
+
def f(x):
|
1471
|
+
return x**2
|
1472
|
+
|
1473
|
+
def cons(x):
|
1474
|
+
return x - 2
|
1475
|
+
|
1476
|
+
x0 = np.array([10.])
|
1477
|
+
sol_0 = optimize.minimize(f, x0)
|
1478
|
+
sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq',
|
1479
|
+
'fun': cons}])
|
1480
|
+
sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])
|
1481
|
+
sol_3 = optimize.minimize(f, x0,
|
1482
|
+
constraints=[{'type': 'ineq', 'fun': cons}],
|
1483
|
+
bounds=[(5, 10)])
|
1484
|
+
sol_4 = optimize.minimize(f, x0,
|
1485
|
+
constraints=[{'type': 'ineq', 'fun': cons}],
|
1486
|
+
bounds=[(1, 10)])
|
1487
|
+
for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:
|
1488
|
+
assert sol.success
|
1489
|
+
assert_allclose(sol_0.x, 0, atol=1e-7)
|
1490
|
+
assert_allclose(sol_1.x, 2, atol=1e-7)
|
1491
|
+
assert_allclose(sol_2.x, 5, atol=1e-7)
|
1492
|
+
assert_allclose(sol_3.x, 5, atol=1e-7)
|
1493
|
+
assert_allclose(sol_4.x, 2, atol=1e-7)
|
1494
|
+
|
1495
|
+
def test_minimize_coerce_args_param(self):
|
1496
|
+
# Regression test for gh-3503
|
1497
|
+
def Y(x, c):
|
1498
|
+
return np.sum((x-c)**2)
|
1499
|
+
|
1500
|
+
def dY_dx(x, c=None):
|
1501
|
+
return 2*(x-c)
|
1502
|
+
|
1503
|
+
c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5])
|
1504
|
+
xinit = np.random.randn(len(c))
|
1505
|
+
optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS")
|
1506
|
+
|
1507
|
+
def test_initial_step_scaling(self):
|
1508
|
+
# Check that optimizer initial step is not huge even if the
|
1509
|
+
# function and gradients are
|
1510
|
+
|
1511
|
+
scales = [1e-50, 1, 1e50]
|
1512
|
+
methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG']
|
1513
|
+
|
1514
|
+
def f(x):
|
1515
|
+
if first_step_size[0] is None and x[0] != x0[0]:
|
1516
|
+
first_step_size[0] = abs(x[0] - x0[0])
|
1517
|
+
if abs(x).max() > 1e4:
|
1518
|
+
raise AssertionError("Optimization stepped far away!")
|
1519
|
+
return scale*(x[0] - 1)**2
|
1520
|
+
|
1521
|
+
def g(x):
|
1522
|
+
return np.array([scale*(x[0] - 1)])
|
1523
|
+
|
1524
|
+
for scale, method in itertools.product(scales, methods):
|
1525
|
+
if method in ('CG', 'BFGS'):
|
1526
|
+
options = dict(gtol=scale*1e-8)
|
1527
|
+
else:
|
1528
|
+
options = dict()
|
1529
|
+
|
1530
|
+
if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'):
|
1531
|
+
# XXX: return initial point if they see small gradient
|
1532
|
+
continue
|
1533
|
+
|
1534
|
+
x0 = [-1.0]
|
1535
|
+
first_step_size = [None]
|
1536
|
+
res = optimize.minimize(f, x0, jac=g, method=method,
|
1537
|
+
options=options)
|
1538
|
+
|
1539
|
+
err_msg = f"{method} {scale}: {first_step_size}: {res}"
|
1540
|
+
|
1541
|
+
assert res.success, err_msg
|
1542
|
+
assert_allclose(res.x, [1.0], err_msg=err_msg)
|
1543
|
+
assert res.nit <= 3, err_msg
|
1544
|
+
|
1545
|
+
if scale > 1e-10:
|
1546
|
+
if method in ('CG', 'BFGS'):
|
1547
|
+
assert_allclose(first_step_size[0], 1.01, err_msg=err_msg)
|
1548
|
+
else:
|
1549
|
+
# Newton-CG and L-BFGS-B use different logic for the first
|
1550
|
+
# step, but are both scaling invariant with step sizes ~ 1
|
1551
|
+
assert first_step_size[0] > 0.5 and first_step_size[0] < 3, err_msg
|
1552
|
+
else:
|
1553
|
+
# step size has upper bound of ||grad||, so line
|
1554
|
+
# search makes many small steps
|
1555
|
+
pass
|
1556
|
+
|
1557
|
+
@pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs',
|
1558
|
+
'newton-cg', 'l-bfgs-b', 'tnc',
|
1559
|
+
'cobyqa', 'slsqp',
|
1560
|
+
'trust-constr', 'dogleg', 'trust-ncg',
|
1561
|
+
'trust-exact', 'trust-krylov'])
|
1562
|
+
def test_nan_values(self, method, num_parallel_threads):
|
1563
|
+
if num_parallel_threads > 1 and method == 'cobyqa':
|
1564
|
+
pytest.skip('COBYQA does not support concurrent execution')
|
1565
|
+
|
1566
|
+
# Check nan values result to failed exit status
|
1567
|
+
rng = np.random.RandomState(1234)
|
1568
|
+
|
1569
|
+
count = [0]
|
1570
|
+
|
1571
|
+
def func(x):
|
1572
|
+
return np.nan
|
1573
|
+
|
1574
|
+
def func2(x):
|
1575
|
+
count[0] += 1
|
1576
|
+
if count[0] > 2:
|
1577
|
+
return np.nan
|
1578
|
+
else:
|
1579
|
+
return rng.rand()
|
1580
|
+
|
1581
|
+
def grad(x):
|
1582
|
+
return np.array([1.0])
|
1583
|
+
|
1584
|
+
def hess(x):
|
1585
|
+
return np.array([[1.0]])
|
1586
|
+
|
1587
|
+
x0 = np.array([1.0])
|
1588
|
+
|
1589
|
+
needs_grad = method in ('newton-cg', 'trust-krylov', 'trust-exact',
|
1590
|
+
'trust-ncg', 'dogleg')
|
1591
|
+
needs_hess = method in ('trust-krylov', 'trust-exact', 'trust-ncg',
|
1592
|
+
'dogleg')
|
1593
|
+
|
1594
|
+
funcs = [func, func2]
|
1595
|
+
grads = [grad] if needs_grad else [grad, None]
|
1596
|
+
hesss = [hess] if needs_hess else [hess, None]
|
1597
|
+
options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20)
|
1598
|
+
|
1599
|
+
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
|
1600
|
+
sup.filter(UserWarning, "delta_grad == 0.*")
|
1601
|
+
sup.filter(RuntimeWarning, ".*does not use Hessian.*")
|
1602
|
+
sup.filter(RuntimeWarning, ".*does not use gradient.*")
|
1603
|
+
|
1604
|
+
for f, g, h in itertools.product(funcs, grads, hesss):
|
1605
|
+
count = [0]
|
1606
|
+
sol = optimize.minimize(f, x0, jac=g, hess=h, method=method,
|
1607
|
+
options=options)
|
1608
|
+
assert_equal(sol.success, False)
|
1609
|
+
|
1610
|
+
@pytest.mark.parametrize('method', ['nelder-mead', 'cg', 'bfgs',
|
1611
|
+
'l-bfgs-b', 'tnc',
|
1612
|
+
'cobyla', 'cobyqa', 'slsqp',
|
1613
|
+
'trust-constr', 'dogleg', 'trust-ncg',
|
1614
|
+
'trust-exact', 'trust-krylov'])
|
1615
|
+
def test_duplicate_evaluations(self, method):
|
1616
|
+
# check that there are no duplicate evaluations for any methods
|
1617
|
+
jac = hess = None
|
1618
|
+
if method in ('newton-cg', 'trust-krylov', 'trust-exact',
|
1619
|
+
'trust-ncg', 'dogleg'):
|
1620
|
+
jac = self.grad
|
1621
|
+
if method in ('trust-krylov', 'trust-exact', 'trust-ncg',
|
1622
|
+
'dogleg'):
|
1623
|
+
hess = self.hess
|
1624
|
+
|
1625
|
+
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
|
1626
|
+
# for trust-constr
|
1627
|
+
sup.filter(UserWarning, "delta_grad == 0.*")
|
1628
|
+
optimize.minimize(self.func, self.startparams,
|
1629
|
+
method=method, jac=jac, hess=hess)
|
1630
|
+
|
1631
|
+
for i in range(1, len(self.trace.t)):
|
1632
|
+
if np.array_equal(self.trace.t[i - 1], self.trace.t[i]):
|
1633
|
+
raise RuntimeError(
|
1634
|
+
f"Duplicate evaluations made by {method}")
|
1635
|
+
|
1636
|
+
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
|
1637
|
+
@pytest.mark.parametrize('method', MINIMIZE_METHODS_NEW_CB)
|
1638
|
+
@pytest.mark.parametrize('new_cb_interface', [0, 1, 2])
|
1639
|
+
def test_callback_stopiteration(self, method, new_cb_interface):
|
1640
|
+
# Check that if callback raises StopIteration, optimization
|
1641
|
+
# terminates with the same result as if iterations were limited
|
1642
|
+
|
1643
|
+
def f(x):
|
1644
|
+
f.flag = False # check that f isn't called after StopIteration
|
1645
|
+
return optimize.rosen(x)
|
1646
|
+
f.flag = False
|
1647
|
+
|
1648
|
+
def g(x):
|
1649
|
+
f.flag = False
|
1650
|
+
return optimize.rosen_der(x)
|
1651
|
+
|
1652
|
+
def h(x):
|
1653
|
+
f.flag = False
|
1654
|
+
return optimize.rosen_hess(x)
|
1655
|
+
|
1656
|
+
maxiter = 5
|
1657
|
+
|
1658
|
+
if new_cb_interface == 1:
|
1659
|
+
def callback_interface(*, intermediate_result):
|
1660
|
+
assert intermediate_result.fun == f(intermediate_result.x)
|
1661
|
+
callback()
|
1662
|
+
elif new_cb_interface == 2:
|
1663
|
+
class Callback:
|
1664
|
+
def __call__(self, intermediate_result: OptimizeResult):
|
1665
|
+
assert intermediate_result.fun == f(intermediate_result.x)
|
1666
|
+
callback()
|
1667
|
+
callback_interface = Callback()
|
1668
|
+
else:
|
1669
|
+
def callback_interface(xk, *args): # type: ignore[misc]
|
1670
|
+
callback()
|
1671
|
+
|
1672
|
+
def callback():
|
1673
|
+
callback.i += 1
|
1674
|
+
callback.flag = False
|
1675
|
+
if callback.i == maxiter:
|
1676
|
+
callback.flag = True
|
1677
|
+
raise StopIteration()
|
1678
|
+
callback.i = 0
|
1679
|
+
callback.flag = False
|
1680
|
+
|
1681
|
+
kwargs = {'x0': [1.1]*5, 'method': method,
|
1682
|
+
'fun': f, 'jac': g, 'hess': h}
|
1683
|
+
|
1684
|
+
res = optimize.minimize(**kwargs, callback=callback_interface)
|
1685
|
+
if method == 'nelder-mead':
|
1686
|
+
maxiter = maxiter + 1 # nelder-mead counts differently
|
1687
|
+
if method == 'cobyqa':
|
1688
|
+
ref = optimize.minimize(**kwargs, options={'maxfev': maxiter})
|
1689
|
+
assert res.nfev == ref.nfev == maxiter
|
1690
|
+
elif method == 'cobyla':
|
1691
|
+
# COBYLA calls the callback once per iteration, not once per function
|
1692
|
+
# evaluation, so this test is not applicable. However we can test
|
1693
|
+
# the COBYLA status to verify that res stopped back on the callback
|
1694
|
+
# and ref stopped based on the iteration limit.
|
1695
|
+
# COBYLA requires at least n+2 function evaluations
|
1696
|
+
maxiter = max(maxiter, len(kwargs['x0'])+2)
|
1697
|
+
ref = optimize.minimize(**kwargs, options={'maxiter': maxiter})
|
1698
|
+
assert res.status == 30
|
1699
|
+
assert res.message == ("Return from COBYLA because the callback function "
|
1700
|
+
"requested termination")
|
1701
|
+
assert ref.status == 3
|
1702
|
+
assert ref.message == ("Return from COBYLA because the objective function "
|
1703
|
+
"has been evaluated MAXFUN times.")
|
1704
|
+
# Return early because res/ref will be unequal for COBYLA for the reasons
|
1705
|
+
# mentioned above.
|
1706
|
+
return
|
1707
|
+
else:
|
1708
|
+
ref = optimize.minimize(**kwargs, options={'maxiter': maxiter})
|
1709
|
+
assert res.nit == ref.nit == maxiter
|
1710
|
+
assert res.fun == ref.fun
|
1711
|
+
assert_equal(res.x, ref.x)
|
1712
|
+
assert res.status == (3 if method in [
|
1713
|
+
'trust-constr',
|
1714
|
+
'cobyqa',
|
1715
|
+
] else 99)
|
1716
|
+
|
1717
|
+
def test_ndim_error(self):
|
1718
|
+
msg = "'x0' must only have one dimension."
|
1719
|
+
with assert_raises(ValueError, match=msg):
|
1720
|
+
optimize.minimize(lambda x: x, np.ones((2, 1)))
|
1721
|
+
|
1722
|
+
@pytest.mark.parametrize('method', ('nelder-mead', 'l-bfgs-b', 'tnc',
|
1723
|
+
'powell', 'cobyla', 'cobyqa',
|
1724
|
+
'trust-constr'))
|
1725
|
+
def test_minimize_invalid_bounds(self, method):
|
1726
|
+
def f(x):
|
1727
|
+
return np.sum(x**2)
|
1728
|
+
|
1729
|
+
bounds = Bounds([1, 2], [3, 4])
|
1730
|
+
msg = 'The number of bounds is not compatible with the length of `x0`.'
|
1731
|
+
with pytest.raises(ValueError, match=msg):
|
1732
|
+
optimize.minimize(f, x0=[1, 2, 3], method=method, bounds=bounds)
|
1733
|
+
|
1734
|
+
bounds = Bounds([1, 6, 1], [3, 4, 2])
|
1735
|
+
msg = 'An upper bound is less than the corresponding lower bound.'
|
1736
|
+
with pytest.raises(ValueError, match=msg):
|
1737
|
+
optimize.minimize(f, x0=[1, 2, 3], method=method, bounds=bounds)
|
1738
|
+
|
1739
|
+
@pytest.mark.thread_unsafe
|
1740
|
+
@pytest.mark.parametrize('method', ['bfgs', 'cg', 'newton-cg', 'powell'])
|
1741
|
+
def test_minimize_warnings_gh1953(self, method):
|
1742
|
+
# test that minimize methods produce warnings rather than just using
|
1743
|
+
# `print`; see gh-1953.
|
1744
|
+
kwargs = {} if method=='powell' else {'jac': optimize.rosen_der}
|
1745
|
+
warning_type = (RuntimeWarning if method=='powell'
|
1746
|
+
else optimize.OptimizeWarning)
|
1747
|
+
|
1748
|
+
options = {'disp': True, 'maxiter': 10}
|
1749
|
+
with pytest.warns(warning_type, match='Maximum number'):
|
1750
|
+
optimize.minimize(lambda x: optimize.rosen(x), [0, 0],
|
1751
|
+
method=method, options=options, **kwargs)
|
1752
|
+
|
1753
|
+
options['disp'] = False
|
1754
|
+
optimize.minimize(lambda x: optimize.rosen(x), [0, 0],
|
1755
|
+
method=method, options=options, **kwargs)
|
1756
|
+
|
1757
|
+
|
1758
|
+
@pytest.mark.parametrize(
|
1759
|
+
'method',
|
1760
|
+
['l-bfgs-b', 'tnc', 'Powell', 'Nelder-Mead', 'cobyqa']
|
1761
|
+
)
|
1762
|
+
def test_minimize_with_scalar(method):
|
1763
|
+
# checks that minimize works with a scalar being provided to it.
|
1764
|
+
def f(x):
|
1765
|
+
return np.sum(x ** 2)
|
1766
|
+
|
1767
|
+
res = optimize.minimize(f, 17, bounds=[(-100, 100)], method=method)
|
1768
|
+
assert res.success
|
1769
|
+
assert_allclose(res.x, [0.0], atol=1e-5)
|
1770
|
+
|
1771
|
+
|
1772
|
+
class TestLBFGSBBounds:
|
1773
|
+
def setup_method(self):
|
1774
|
+
self.bounds = ((1, None), (None, None))
|
1775
|
+
self.solution = (1, 0)
|
1776
|
+
|
1777
|
+
def fun(self, x, p=2.0):
|
1778
|
+
return 1.0 / p * (x[0]**p + x[1]**p)
|
1779
|
+
|
1780
|
+
def jac(self, x, p=2.0):
|
1781
|
+
return x**(p - 1)
|
1782
|
+
|
1783
|
+
def fj(self, x, p=2.0):
|
1784
|
+
return self.fun(x, p), self.jac(x, p)
|
1785
|
+
|
1786
|
+
def test_l_bfgs_b_bounds(self):
|
1787
|
+
x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],
|
1788
|
+
fprime=self.jac,
|
1789
|
+
bounds=self.bounds)
|
1790
|
+
assert d['warnflag'] == 0, d['task']
|
1791
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1792
|
+
|
1793
|
+
def test_l_bfgs_b_funjac(self):
|
1794
|
+
# L-BFGS-B with fun and jac combined and extra arguments
|
1795
|
+
x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),
|
1796
|
+
bounds=self.bounds)
|
1797
|
+
assert d['warnflag'] == 0, d['task']
|
1798
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1799
|
+
|
1800
|
+
def test_minimize_l_bfgs_b_bounds(self):
|
1801
|
+
# Minimize with method='L-BFGS-B' with bounds
|
1802
|
+
res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
|
1803
|
+
jac=self.jac, bounds=self.bounds)
|
1804
|
+
assert res['success'], res['message']
|
1805
|
+
assert_allclose(res.x, self.solution, atol=1e-6)
|
1806
|
+
|
1807
|
+
@pytest.mark.parametrize('bounds', [
|
1808
|
+
([(10, 1), (1, 10)]),
|
1809
|
+
([(1, 10), (10, 1)]),
|
1810
|
+
([(10, 1), (10, 1)])
|
1811
|
+
])
|
1812
|
+
def test_minimize_l_bfgs_b_incorrect_bounds(self, bounds):
|
1813
|
+
with pytest.raises(ValueError, match='.*bound.*'):
|
1814
|
+
optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
|
1815
|
+
jac=self.jac, bounds=bounds)
|
1816
|
+
|
1817
|
+
def test_minimize_l_bfgs_b_bounds_FD(self):
|
1818
|
+
# test that initial starting value outside bounds doesn't raise
|
1819
|
+
# an error (done with clipping).
|
1820
|
+
# test all different finite differences combos, with and without args
|
1821
|
+
|
1822
|
+
jacs = ['2-point', '3-point', None]
|
1823
|
+
argss = [(2.,), ()]
|
1824
|
+
for jac, args in itertools.product(jacs, argss):
|
1825
|
+
res = optimize.minimize(self.fun, [0, -1], args=args,
|
1826
|
+
method='L-BFGS-B',
|
1827
|
+
jac=jac, bounds=self.bounds,
|
1828
|
+
options={'finite_diff_rel_step': None})
|
1829
|
+
assert res['success'], res['message']
|
1830
|
+
assert_allclose(res.x, self.solution, atol=1e-6)
|
1831
|
+
|
1832
|
+
|
1833
|
+
class TestOptimizeScalar:
|
1834
|
+
def setup_method(self):
|
1835
|
+
self.solution = 1.5
|
1836
|
+
|
1837
|
+
def fun(self, x, a=1.5):
|
1838
|
+
"""Objective function"""
|
1839
|
+
return (x - a)**2 - 0.8
|
1840
|
+
|
1841
|
+
def test_brent(self):
|
1842
|
+
x = optimize.brent(self.fun)
|
1843
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1844
|
+
|
1845
|
+
x = optimize.brent(self.fun, brack=(-3, -2))
|
1846
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1847
|
+
|
1848
|
+
x = optimize.brent(self.fun, full_output=True)
|
1849
|
+
assert_allclose(x[0], self.solution, atol=1e-6)
|
1850
|
+
|
1851
|
+
x = optimize.brent(self.fun, brack=(-15, -1, 15))
|
1852
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1853
|
+
|
1854
|
+
message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)"
|
1855
|
+
with pytest.raises(ValueError, match=message):
|
1856
|
+
optimize.brent(self.fun, brack=(-1, 0, 1))
|
1857
|
+
|
1858
|
+
message = r"\(xa < xb\) and \(xb < xc\)"
|
1859
|
+
with pytest.raises(ValueError, match=message):
|
1860
|
+
optimize.brent(self.fun, brack=(0, -1, 1))
|
1861
|
+
|
1862
|
+
@pytest.mark.filterwarnings('ignore::UserWarning')
|
1863
|
+
def test_golden(self):
|
1864
|
+
x = optimize.golden(self.fun)
|
1865
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1866
|
+
|
1867
|
+
x = optimize.golden(self.fun, brack=(-3, -2))
|
1868
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1869
|
+
|
1870
|
+
x = optimize.golden(self.fun, full_output=True)
|
1871
|
+
assert_allclose(x[0], self.solution, atol=1e-6)
|
1872
|
+
|
1873
|
+
x = optimize.golden(self.fun, brack=(-15, -1, 15))
|
1874
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1875
|
+
|
1876
|
+
x = optimize.golden(self.fun, tol=0)
|
1877
|
+
assert_allclose(x, self.solution)
|
1878
|
+
|
1879
|
+
maxiter_test_cases = [0, 1, 5]
|
1880
|
+
for maxiter in maxiter_test_cases:
|
1881
|
+
x0 = optimize.golden(self.fun, maxiter=0, full_output=True)
|
1882
|
+
x = optimize.golden(self.fun, maxiter=maxiter, full_output=True)
|
1883
|
+
nfev0, nfev = x0[2], x[2]
|
1884
|
+
assert_equal(nfev - nfev0, maxiter)
|
1885
|
+
|
1886
|
+
message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)"
|
1887
|
+
with pytest.raises(ValueError, match=message):
|
1888
|
+
optimize.golden(self.fun, brack=(-1, 0, 1))
|
1889
|
+
|
1890
|
+
message = r"\(xa < xb\) and \(xb < xc\)"
|
1891
|
+
with pytest.raises(ValueError, match=message):
|
1892
|
+
optimize.golden(self.fun, brack=(0, -1, 1))
|
1893
|
+
|
1894
|
+
def test_fminbound(self):
|
1895
|
+
x = optimize.fminbound(self.fun, 0, 1)
|
1896
|
+
assert_allclose(x, 1, atol=1e-4)
|
1897
|
+
|
1898
|
+
x = optimize.fminbound(self.fun, 1, 5)
|
1899
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1900
|
+
|
1901
|
+
x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
|
1902
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1903
|
+
assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
|
1904
|
+
|
1905
|
+
def test_fminbound_scalar(self):
|
1906
|
+
with pytest.raises(ValueError, match='.*must be finite scalars.*'):
|
1907
|
+
optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
|
1908
|
+
|
1909
|
+
x = optimize.fminbound(self.fun, 1, np.array(5))
|
1910
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1911
|
+
|
1912
|
+
def test_gh11207(self):
|
1913
|
+
def fun(x):
|
1914
|
+
return x**2
|
1915
|
+
optimize.fminbound(fun, 0, 0)
|
1916
|
+
|
1917
|
+
def test_minimize_scalar(self):
|
1918
|
+
# combine all tests above for the minimize_scalar wrapper
|
1919
|
+
x = optimize.minimize_scalar(self.fun).x
|
1920
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1921
|
+
|
1922
|
+
x = optimize.minimize_scalar(self.fun, method='Brent')
|
1923
|
+
assert x.success
|
1924
|
+
|
1925
|
+
x = optimize.minimize_scalar(self.fun, method='Brent',
|
1926
|
+
options=dict(maxiter=3))
|
1927
|
+
assert not x.success
|
1928
|
+
|
1929
|
+
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
|
1930
|
+
args=(1.5, ), method='Brent').x
|
1931
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1932
|
+
|
1933
|
+
x = optimize.minimize_scalar(self.fun, method='Brent',
|
1934
|
+
args=(1.5,)).x
|
1935
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1936
|
+
|
1937
|
+
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
|
1938
|
+
args=(1.5, ), method='Brent').x
|
1939
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1940
|
+
|
1941
|
+
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
|
1942
|
+
args=(1.5, ), method='golden').x
|
1943
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1944
|
+
|
1945
|
+
x = optimize.minimize_scalar(self.fun, method='golden',
|
1946
|
+
args=(1.5,)).x
|
1947
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1948
|
+
|
1949
|
+
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
|
1950
|
+
args=(1.5, ), method='golden').x
|
1951
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1952
|
+
|
1953
|
+
x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
|
1954
|
+
method='Bounded').x
|
1955
|
+
assert_allclose(x, 1, atol=1e-4)
|
1956
|
+
|
1957
|
+
x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
|
1958
|
+
method='bounded').x
|
1959
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1960
|
+
|
1961
|
+
x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
|
1962
|
+
np.array([5])),
|
1963
|
+
args=(np.array([1.5]), ),
|
1964
|
+
method='bounded').x
|
1965
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1966
|
+
|
1967
|
+
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
|
1968
|
+
bounds=(5, 1), method='bounded', args=(1.5, ))
|
1969
|
+
|
1970
|
+
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
|
1971
|
+
bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))
|
1972
|
+
|
1973
|
+
x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
|
1974
|
+
method='bounded').x
|
1975
|
+
assert_allclose(x, self.solution, atol=1e-6)
|
1976
|
+
|
1977
|
+
def test_minimize_scalar_custom(self):
|
1978
|
+
# This function comes from the documentation example.
|
1979
|
+
def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,
|
1980
|
+
maxiter=100, callback=None, **options):
|
1981
|
+
bestx = (bracket[1] + bracket[0]) / 2.0
|
1982
|
+
besty = fun(bestx)
|
1983
|
+
funcalls = 1
|
1984
|
+
niter = 0
|
1985
|
+
improved = True
|
1986
|
+
stop = False
|
1987
|
+
|
1988
|
+
while improved and not stop and niter < maxiter:
|
1989
|
+
improved = False
|
1990
|
+
niter += 1
|
1991
|
+
for testx in [bestx - stepsize, bestx + stepsize]:
|
1992
|
+
testy = fun(testx, *args)
|
1993
|
+
funcalls += 1
|
1994
|
+
if testy < besty:
|
1995
|
+
besty = testy
|
1996
|
+
bestx = testx
|
1997
|
+
improved = True
|
1998
|
+
if callback is not None:
|
1999
|
+
callback(bestx)
|
2000
|
+
if maxfev is not None and funcalls >= maxfev:
|
2001
|
+
stop = True
|
2002
|
+
break
|
2003
|
+
|
2004
|
+
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
|
2005
|
+
nfev=funcalls, success=(niter > 1))
|
2006
|
+
|
2007
|
+
res = optimize.minimize_scalar(self.fun, bracket=(0, 4),
|
2008
|
+
method=custmin,
|
2009
|
+
options=dict(stepsize=0.05))
|
2010
|
+
assert_allclose(res.x, self.solution, atol=1e-6)
|
2011
|
+
|
2012
|
+
def test_minimize_scalar_coerce_args_param(self):
|
2013
|
+
# Regression test for gh-3503
|
2014
|
+
optimize.minimize_scalar(self.fun, args=1.5)
|
2015
|
+
|
2016
|
+
@pytest.mark.parametrize('method', ['brent', 'bounded', 'golden'])
|
2017
|
+
def test_disp(self, method):
|
2018
|
+
# test that all minimize_scalar methods accept a disp option.
|
2019
|
+
for disp in [0, 1, 2, 3]:
|
2020
|
+
optimize.minimize_scalar(self.fun, options={"disp": disp})
|
2021
|
+
|
2022
|
+
@pytest.mark.parametrize('method', ['brent', 'bounded', 'golden'])
|
2023
|
+
def test_result_attributes(self, method):
|
2024
|
+
kwargs = {"bounds": [-10, 10]} if method == 'bounded' else {}
|
2025
|
+
result = optimize.minimize_scalar(self.fun, method=method, **kwargs)
|
2026
|
+
assert hasattr(result, "x")
|
2027
|
+
assert hasattr(result, "success")
|
2028
|
+
assert hasattr(result, "message")
|
2029
|
+
assert hasattr(result, "fun")
|
2030
|
+
assert hasattr(result, "nfev")
|
2031
|
+
assert hasattr(result, "nit")
|
2032
|
+
|
2033
|
+
@pytest.mark.filterwarnings('ignore::UserWarning')
|
2034
|
+
@pytest.mark.parametrize('method', ['brent', 'bounded', 'golden'])
|
2035
|
+
def test_nan_values(self, method):
|
2036
|
+
# Check nan values result to failed exit status
|
2037
|
+
np.random.seed(1234)
|
2038
|
+
|
2039
|
+
count = [0]
|
2040
|
+
|
2041
|
+
def func(x):
|
2042
|
+
count[0] += 1
|
2043
|
+
if count[0] > 4:
|
2044
|
+
return np.nan
|
2045
|
+
else:
|
2046
|
+
return x**2 + 0.1 * np.sin(x)
|
2047
|
+
|
2048
|
+
bracket = (-1, 0, 1)
|
2049
|
+
bounds = (-1, 1)
|
2050
|
+
|
2051
|
+
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
|
2052
|
+
sup.filter(UserWarning, "delta_grad == 0.*")
|
2053
|
+
sup.filter(RuntimeWarning, ".*does not use Hessian.*")
|
2054
|
+
sup.filter(RuntimeWarning, ".*does not use gradient.*")
|
2055
|
+
|
2056
|
+
count = [0]
|
2057
|
+
|
2058
|
+
kwargs = {"bounds": bounds} if method == 'bounded' else {}
|
2059
|
+
sol = optimize.minimize_scalar(func, bracket=bracket,
|
2060
|
+
**kwargs, method=method,
|
2061
|
+
options=dict(maxiter=20))
|
2062
|
+
assert_equal(sol.success, False)
|
2063
|
+
|
2064
|
+
def test_minimize_scalar_defaults_gh10911(self):
|
2065
|
+
# Previously, bounds were silently ignored unless `method='bounds'`
|
2066
|
+
# was chosen. See gh-10911. Check that this is no longer the case.
|
2067
|
+
def f(x):
|
2068
|
+
return x**2
|
2069
|
+
|
2070
|
+
res = optimize.minimize_scalar(f)
|
2071
|
+
assert_allclose(res.x, 0, atol=1e-8)
|
2072
|
+
|
2073
|
+
res = optimize.minimize_scalar(f, bounds=(1, 100),
|
2074
|
+
options={'xatol': 1e-10})
|
2075
|
+
assert_allclose(res.x, 1)
|
2076
|
+
|
2077
|
+
def test_minimize_non_finite_bounds_gh10911(self):
|
2078
|
+
# Previously, minimize_scalar misbehaved with infinite bounds.
|
2079
|
+
# See gh-10911. Check that it now raises an error, instead.
|
2080
|
+
msg = "Optimization bounds must be finite scalars."
|
2081
|
+
with pytest.raises(ValueError, match=msg):
|
2082
|
+
optimize.minimize_scalar(np.sin, bounds=(1, np.inf))
|
2083
|
+
with pytest.raises(ValueError, match=msg):
|
2084
|
+
optimize.minimize_scalar(np.sin, bounds=(np.nan, 1))
|
2085
|
+
|
2086
|
+
@pytest.mark.parametrize("method", ['brent', 'golden'])
|
2087
|
+
def test_minimize_unbounded_method_with_bounds_gh10911(self, method):
|
2088
|
+
# Previously, `bounds` were silently ignored when `method='brent'` or
|
2089
|
+
# `method='golden'`. See gh-10911. Check that error is now raised.
|
2090
|
+
msg = "Use of `bounds` is incompatible with..."
|
2091
|
+
with pytest.raises(ValueError, match=msg):
|
2092
|
+
optimize.minimize_scalar(np.sin, method=method, bounds=(1, 2))
|
2093
|
+
|
2094
|
+
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
|
2095
|
+
@pytest.mark.parametrize("method", MINIMIZE_SCALAR_METHODS)
|
2096
|
+
@pytest.mark.parametrize("tol", [1, 1e-6])
|
2097
|
+
@pytest.mark.parametrize("fshape", [(), (1,), (1, 1)])
|
2098
|
+
def test_minimize_scalar_dimensionality_gh16196(self, method, tol, fshape):
|
2099
|
+
# gh-16196 reported that the output shape of `minimize_scalar` was not
|
2100
|
+
# consistent when an objective function returned an array. Check that
|
2101
|
+
# `res.fun` and `res.x` are now consistent.
|
2102
|
+
def f(x):
|
2103
|
+
return np.array(x**4).reshape(fshape)
|
2104
|
+
|
2105
|
+
a, b = -0.1, 0.2
|
2106
|
+
kwargs = (dict(bracket=(a, b)) if method != "bounded"
|
2107
|
+
else dict(bounds=(a, b)))
|
2108
|
+
kwargs.update(dict(method=method, tol=tol))
|
2109
|
+
|
2110
|
+
res = optimize.minimize_scalar(f, **kwargs)
|
2111
|
+
assert res.x.shape == res.fun.shape == f(res.x).shape == fshape
|
2112
|
+
|
2113
|
+
@pytest.mark.thread_unsafe
|
2114
|
+
@pytest.mark.parametrize('method', ['bounded', 'brent', 'golden'])
|
2115
|
+
def test_minimize_scalar_warnings_gh1953(self, method):
|
2116
|
+
# test that minimize_scalar methods produce warnings rather than just
|
2117
|
+
# using `print`; see gh-1953.
|
2118
|
+
def f(x):
|
2119
|
+
return (x - 1)**2
|
2120
|
+
|
2121
|
+
kwargs = {}
|
2122
|
+
kwd = 'bounds' if method == 'bounded' else 'bracket'
|
2123
|
+
kwargs[kwd] = [-2, 10]
|
2124
|
+
|
2125
|
+
options = {'disp': True, 'maxiter': 3}
|
2126
|
+
with pytest.warns(optimize.OptimizeWarning, match='Maximum number'):
|
2127
|
+
optimize.minimize_scalar(f, method=method, options=options,
|
2128
|
+
**kwargs)
|
2129
|
+
|
2130
|
+
options['disp'] = False
|
2131
|
+
optimize.minimize_scalar(f, method=method, options=options, **kwargs)
|
2132
|
+
|
2133
|
+
|
2134
|
+
class TestBracket:
|
2135
|
+
|
2136
|
+
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
|
2137
|
+
def test_errors_and_status_false(self):
|
2138
|
+
# Check that `bracket` raises the errors it is supposed to
|
2139
|
+
def f(x): # gh-14858
|
2140
|
+
return x**2 if ((-1 < x) & (x < 1)) else 100.0
|
2141
|
+
|
2142
|
+
message = "The algorithm terminated without finding a valid bracket."
|
2143
|
+
with pytest.raises(RuntimeError, match=message):
|
2144
|
+
optimize.bracket(f, -1, 1)
|
2145
|
+
with pytest.raises(RuntimeError, match=message):
|
2146
|
+
optimize.bracket(f, -1, np.inf)
|
2147
|
+
with pytest.raises(RuntimeError, match=message):
|
2148
|
+
optimize.brent(f, brack=(-1, 1))
|
2149
|
+
with pytest.raises(RuntimeError, match=message):
|
2150
|
+
optimize.golden(f, brack=(-1, 1))
|
2151
|
+
|
2152
|
+
def f(x): # gh-5899
|
2153
|
+
return -5 * x**5 + 4 * x**4 - 12 * x**3 + 11 * x**2 - 2 * x + 1
|
2154
|
+
|
2155
|
+
message = "No valid bracket was found before the iteration limit..."
|
2156
|
+
with pytest.raises(RuntimeError, match=message):
|
2157
|
+
optimize.bracket(f, -0.5, 0.5, maxiter=10)
|
2158
|
+
|
2159
|
+
@pytest.mark.parametrize('method', ('brent', 'golden'))
|
2160
|
+
def test_minimize_scalar_success_false(self, method):
|
2161
|
+
# Check that status information from `bracket` gets to minimize_scalar
|
2162
|
+
def f(x): # gh-14858
|
2163
|
+
return x**2 if ((-1 < x) & (x < 1)) else 100.0
|
2164
|
+
|
2165
|
+
message = "The algorithm terminated without finding a valid bracket."
|
2166
|
+
|
2167
|
+
res = optimize.minimize_scalar(f, bracket=(-1, 1), method=method)
|
2168
|
+
assert not res.success
|
2169
|
+
assert message in res.message
|
2170
|
+
assert res.nfev == 3
|
2171
|
+
assert res.nit == 0
|
2172
|
+
assert res.fun == 100
|
2173
|
+
|
2174
|
+
|
2175
|
+
def test_brent_negative_tolerance():
|
2176
|
+
assert_raises(ValueError, optimize.brent, np.cos, tol=-.01)
|
2177
|
+
|
2178
|
+
|
2179
|
+
class TestNewtonCg:
|
2180
|
+
def test_rosenbrock(self):
|
2181
|
+
x0 = np.array([-1.2, 1.0])
|
2182
|
+
sol = optimize.minimize(optimize.rosen, x0,
|
2183
|
+
jac=optimize.rosen_der,
|
2184
|
+
hess=optimize.rosen_hess,
|
2185
|
+
tol=1e-5,
|
2186
|
+
method='Newton-CG')
|
2187
|
+
assert sol.success, sol.message
|
2188
|
+
assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
|
2189
|
+
|
2190
|
+
def test_himmelblau(self):
|
2191
|
+
x0 = np.array(himmelblau_x0)
|
2192
|
+
sol = optimize.minimize(himmelblau,
|
2193
|
+
x0,
|
2194
|
+
jac=himmelblau_grad,
|
2195
|
+
hess=himmelblau_hess,
|
2196
|
+
method='Newton-CG',
|
2197
|
+
tol=1e-6)
|
2198
|
+
assert sol.success, sol.message
|
2199
|
+
assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4)
|
2200
|
+
assert_allclose(sol.fun, himmelblau_min, atol=1e-4)
|
2201
|
+
|
2202
|
+
def test_finite_difference(self):
|
2203
|
+
x0 = np.array([-1.2, 1.0])
|
2204
|
+
sol = optimize.minimize(optimize.rosen, x0,
|
2205
|
+
jac=optimize.rosen_der,
|
2206
|
+
hess='2-point',
|
2207
|
+
tol=1e-5,
|
2208
|
+
method='Newton-CG')
|
2209
|
+
assert sol.success, sol.message
|
2210
|
+
assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
|
2211
|
+
|
2212
|
+
def test_hessian_update_strategy(self):
|
2213
|
+
x0 = np.array([-1.2, 1.0])
|
2214
|
+
sol = optimize.minimize(optimize.rosen, x0,
|
2215
|
+
jac=optimize.rosen_der,
|
2216
|
+
hess=optimize.BFGS(),
|
2217
|
+
tol=1e-5,
|
2218
|
+
method='Newton-CG')
|
2219
|
+
assert sol.success, sol.message
|
2220
|
+
assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
|
2221
|
+
|
2222
|
+
|
2223
|
+
def test_line_for_search():
|
2224
|
+
# _line_for_search is only used in _linesearch_powell, which is also
|
2225
|
+
# tested below. Thus there are more tests of _line_for_search in the
|
2226
|
+
# test_linesearch_powell_bounded function.
|
2227
|
+
|
2228
|
+
line_for_search = optimize._optimize._line_for_search
|
2229
|
+
# args are x0, alpha, lower_bound, upper_bound
|
2230
|
+
# returns lmin, lmax
|
2231
|
+
|
2232
|
+
lower_bound = np.array([-5.3, -1, -1.5, -3])
|
2233
|
+
upper_bound = np.array([1.9, 1, 2.8, 3])
|
2234
|
+
|
2235
|
+
# test when starting in the bounds
|
2236
|
+
x0 = np.array([0., 0, 0, 0])
|
2237
|
+
# and when starting outside of the bounds
|
2238
|
+
x1 = np.array([0., 2, -3, 0])
|
2239
|
+
|
2240
|
+
all_tests = (
|
2241
|
+
(x0, np.array([1., 0, 0, 0]), -5.3, 1.9),
|
2242
|
+
(x0, np.array([0., 1, 0, 0]), -1, 1),
|
2243
|
+
(x0, np.array([0., 0, 1, 0]), -1.5, 2.8),
|
2244
|
+
(x0, np.array([0., 0, 0, 1]), -3, 3),
|
2245
|
+
(x0, np.array([1., 1, 0, 0]), -1, 1),
|
2246
|
+
(x0, np.array([1., 0, -1, 2]), -1.5, 1.5),
|
2247
|
+
(x0, np.array([2., 0, -1, 2]), -1.5, 0.95),
|
2248
|
+
(x1, np.array([1., 0, 0, 0]), -5.3, 1.9),
|
2249
|
+
(x1, np.array([0., 1, 0, 0]), -3, -1),
|
2250
|
+
(x1, np.array([0., 0, 1, 0]), 1.5, 5.8),
|
2251
|
+
(x1, np.array([0., 0, 0, 1]), -3, 3),
|
2252
|
+
(x1, np.array([1., 1, 0, 0]), -3, -1),
|
2253
|
+
(x1, np.array([1., 0, -1, 0]), -5.3, -1.5),
|
2254
|
+
)
|
2255
|
+
|
2256
|
+
for x, alpha, lmin, lmax in all_tests:
|
2257
|
+
mi, ma = line_for_search(x, alpha, lower_bound, upper_bound)
|
2258
|
+
assert_allclose(mi, lmin, atol=1e-6)
|
2259
|
+
assert_allclose(ma, lmax, atol=1e-6)
|
2260
|
+
|
2261
|
+
# now with infinite bounds
|
2262
|
+
lower_bound = np.array([-np.inf, -1, -np.inf, -3])
|
2263
|
+
upper_bound = np.array([np.inf, 1, 2.8, np.inf])
|
2264
|
+
|
2265
|
+
all_tests = (
|
2266
|
+
(x0, np.array([1., 0, 0, 0]), -np.inf, np.inf),
|
2267
|
+
(x0, np.array([0., 1, 0, 0]), -1, 1),
|
2268
|
+
(x0, np.array([0., 0, 1, 0]), -np.inf, 2.8),
|
2269
|
+
(x0, np.array([0., 0, 0, 1]), -3, np.inf),
|
2270
|
+
(x0, np.array([1., 1, 0, 0]), -1, 1),
|
2271
|
+
(x0, np.array([1., 0, -1, 2]), -1.5, np.inf),
|
2272
|
+
(x1, np.array([1., 0, 0, 0]), -np.inf, np.inf),
|
2273
|
+
(x1, np.array([0., 1, 0, 0]), -3, -1),
|
2274
|
+
(x1, np.array([0., 0, 1, 0]), -np.inf, 5.8),
|
2275
|
+
(x1, np.array([0., 0, 0, 1]), -3, np.inf),
|
2276
|
+
(x1, np.array([1., 1, 0, 0]), -3, -1),
|
2277
|
+
(x1, np.array([1., 0, -1, 0]), -5.8, np.inf),
|
2278
|
+
)
|
2279
|
+
|
2280
|
+
for x, alpha, lmin, lmax in all_tests:
|
2281
|
+
mi, ma = line_for_search(x, alpha, lower_bound, upper_bound)
|
2282
|
+
assert_allclose(mi, lmin, atol=1e-6)
|
2283
|
+
assert_allclose(ma, lmax, atol=1e-6)
|
2284
|
+
|
2285
|
+
|
2286
|
+
def test_linesearch_powell():
|
2287
|
+
# helper function in optimize.py, not a public function.
|
2288
|
+
linesearch_powell = optimize._optimize._linesearch_powell
|
2289
|
+
# args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3
|
2290
|
+
# returns new_fval, p + direction, direction
|
2291
|
+
def func(x):
|
2292
|
+
return np.sum((x - np.array([-1.0, 2.0, 1.5, -0.4])) ** 2)
|
2293
|
+
p0 = np.array([0., 0, 0, 0])
|
2294
|
+
fval = func(p0)
|
2295
|
+
lower_bound = np.array([-np.inf] * 4)
|
2296
|
+
upper_bound = np.array([np.inf] * 4)
|
2297
|
+
|
2298
|
+
all_tests = (
|
2299
|
+
(np.array([1., 0, 0, 0]), -1),
|
2300
|
+
(np.array([0., 1, 0, 0]), 2),
|
2301
|
+
(np.array([0., 0, 1, 0]), 1.5),
|
2302
|
+
(np.array([0., 0, 0, 1]), -.4),
|
2303
|
+
(np.array([-1., 0, 1, 0]), 1.25),
|
2304
|
+
(np.array([0., 0, 1, 1]), .55),
|
2305
|
+
(np.array([2., 0, -1, 1]), -.65),
|
2306
|
+
)
|
2307
|
+
|
2308
|
+
for xi, l in all_tests:
|
2309
|
+
f, p, direction = linesearch_powell(func, p0, xi,
|
2310
|
+
fval=fval, tol=1e-5)
|
2311
|
+
assert_allclose(f, func(l * xi), atol=1e-6)
|
2312
|
+
assert_allclose(p, l * xi, atol=1e-6)
|
2313
|
+
assert_allclose(direction, l * xi, atol=1e-6)
|
2314
|
+
|
2315
|
+
f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
|
2316
|
+
lower_bound=lower_bound,
|
2317
|
+
upper_bound=upper_bound,
|
2318
|
+
fval=fval)
|
2319
|
+
assert_allclose(f, func(l * xi), atol=1e-6)
|
2320
|
+
assert_allclose(p, l * xi, atol=1e-6)
|
2321
|
+
assert_allclose(direction, l * xi, atol=1e-6)
|
2322
|
+
|
2323
|
+
|
2324
|
+
def test_linesearch_powell_bounded():
|
2325
|
+
# helper function in optimize.py, not a public function.
|
2326
|
+
linesearch_powell = optimize._optimize._linesearch_powell
|
2327
|
+
# args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3
|
2328
|
+
# returns new_fval, p+direction, direction
|
2329
|
+
def func(x):
|
2330
|
+
return np.sum((x - np.array([-1.0, 2.0, 1.5, -0.4])) ** 2)
|
2331
|
+
p0 = np.array([0., 0, 0, 0])
|
2332
|
+
fval = func(p0)
|
2333
|
+
|
2334
|
+
# first choose bounds such that the same tests from
|
2335
|
+
# test_linesearch_powell should pass.
|
2336
|
+
lower_bound = np.array([-2.]*4)
|
2337
|
+
upper_bound = np.array([2.]*4)
|
2338
|
+
|
2339
|
+
all_tests = (
|
2340
|
+
(np.array([1., 0, 0, 0]), -1),
|
2341
|
+
(np.array([0., 1, 0, 0]), 2),
|
2342
|
+
(np.array([0., 0, 1, 0]), 1.5),
|
2343
|
+
(np.array([0., 0, 0, 1]), -.4),
|
2344
|
+
(np.array([-1., 0, 1, 0]), 1.25),
|
2345
|
+
(np.array([0., 0, 1, 1]), .55),
|
2346
|
+
(np.array([2., 0, -1, 1]), -.65),
|
2347
|
+
)
|
2348
|
+
|
2349
|
+
for xi, l in all_tests:
|
2350
|
+
f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
|
2351
|
+
lower_bound=lower_bound,
|
2352
|
+
upper_bound=upper_bound,
|
2353
|
+
fval=fval)
|
2354
|
+
assert_allclose(f, func(l * xi), atol=1e-6)
|
2355
|
+
assert_allclose(p, l * xi, atol=1e-6)
|
2356
|
+
assert_allclose(direction, l * xi, atol=1e-6)
|
2357
|
+
|
2358
|
+
# now choose bounds such that unbounded vs bounded gives different results
|
2359
|
+
lower_bound = np.array([-.3]*3 + [-1])
|
2360
|
+
upper_bound = np.array([.45]*3 + [.9])
|
2361
|
+
|
2362
|
+
all_tests = (
|
2363
|
+
(np.array([1., 0, 0, 0]), -.3),
|
2364
|
+
(np.array([0., 1, 0, 0]), .45),
|
2365
|
+
(np.array([0., 0, 1, 0]), .45),
|
2366
|
+
(np.array([0., 0, 0, 1]), -.4),
|
2367
|
+
(np.array([-1., 0, 1, 0]), .3),
|
2368
|
+
(np.array([0., 0, 1, 1]), .45),
|
2369
|
+
(np.array([2., 0, -1, 1]), -.15),
|
2370
|
+
)
|
2371
|
+
|
2372
|
+
for xi, l in all_tests:
|
2373
|
+
f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
|
2374
|
+
lower_bound=lower_bound,
|
2375
|
+
upper_bound=upper_bound,
|
2376
|
+
fval=fval)
|
2377
|
+
assert_allclose(f, func(l * xi), atol=1e-6)
|
2378
|
+
assert_allclose(p, l * xi, atol=1e-6)
|
2379
|
+
assert_allclose(direction, l * xi, atol=1e-6)
|
2380
|
+
|
2381
|
+
# now choose as above but start outside the bounds
|
2382
|
+
p0 = np.array([-1., 0, 0, 2])
|
2383
|
+
fval = func(p0)
|
2384
|
+
|
2385
|
+
all_tests = (
|
2386
|
+
(np.array([1., 0, 0, 0]), .7),
|
2387
|
+
(np.array([0., 1, 0, 0]), .45),
|
2388
|
+
(np.array([0., 0, 1, 0]), .45),
|
2389
|
+
(np.array([0., 0, 0, 1]), -2.4),
|
2390
|
+
)
|
2391
|
+
|
2392
|
+
for xi, l in all_tests:
|
2393
|
+
f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
|
2394
|
+
lower_bound=lower_bound,
|
2395
|
+
upper_bound=upper_bound,
|
2396
|
+
fval=fval)
|
2397
|
+
assert_allclose(f, func(p0 + l * xi), atol=1e-6)
|
2398
|
+
assert_allclose(p, p0 + l * xi, atol=1e-6)
|
2399
|
+
assert_allclose(direction, l * xi, atol=1e-6)
|
2400
|
+
|
2401
|
+
# now mix in inf
|
2402
|
+
p0 = np.array([0., 0, 0, 0])
|
2403
|
+
fval = func(p0)
|
2404
|
+
|
2405
|
+
# now choose bounds that mix inf
|
2406
|
+
lower_bound = np.array([-.3, -np.inf, -np.inf, -1])
|
2407
|
+
upper_bound = np.array([np.inf, .45, np.inf, .9])
|
2408
|
+
|
2409
|
+
all_tests = (
|
2410
|
+
(np.array([1., 0, 0, 0]), -.3),
|
2411
|
+
(np.array([0., 1, 0, 0]), .45),
|
2412
|
+
(np.array([0., 0, 1, 0]), 1.5),
|
2413
|
+
(np.array([0., 0, 0, 1]), -.4),
|
2414
|
+
(np.array([-1., 0, 1, 0]), .3),
|
2415
|
+
(np.array([0., 0, 1, 1]), .55),
|
2416
|
+
(np.array([2., 0, -1, 1]), -.15),
|
2417
|
+
)
|
2418
|
+
|
2419
|
+
for xi, l in all_tests:
|
2420
|
+
f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
|
2421
|
+
lower_bound=lower_bound,
|
2422
|
+
upper_bound=upper_bound,
|
2423
|
+
fval=fval)
|
2424
|
+
assert_allclose(f, func(l * xi), atol=1e-6)
|
2425
|
+
assert_allclose(p, l * xi, atol=1e-6)
|
2426
|
+
assert_allclose(direction, l * xi, atol=1e-6)
|
2427
|
+
|
2428
|
+
# now choose as above but start outside the bounds
|
2429
|
+
p0 = np.array([-1., 0, 0, 2])
|
2430
|
+
fval = func(p0)
|
2431
|
+
|
2432
|
+
all_tests = (
|
2433
|
+
(np.array([1., 0, 0, 0]), .7),
|
2434
|
+
(np.array([0., 1, 0, 0]), .45),
|
2435
|
+
(np.array([0., 0, 1, 0]), 1.5),
|
2436
|
+
(np.array([0., 0, 0, 1]), -2.4),
|
2437
|
+
)
|
2438
|
+
|
2439
|
+
for xi, l in all_tests:
|
2440
|
+
f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
|
2441
|
+
lower_bound=lower_bound,
|
2442
|
+
upper_bound=upper_bound,
|
2443
|
+
fval=fval)
|
2444
|
+
assert_allclose(f, func(p0 + l * xi), atol=1e-6)
|
2445
|
+
assert_allclose(p, p0 + l * xi, atol=1e-6)
|
2446
|
+
assert_allclose(direction, l * xi, atol=1e-6)
|
2447
|
+
|
2448
|
+
|
2449
|
+
def test_powell_limits():
|
2450
|
+
# gh15342 - powell was going outside bounds for some function evaluations.
|
2451
|
+
bounds = optimize.Bounds([0, 0], [0.6, 20])
|
2452
|
+
|
2453
|
+
def fun(x):
|
2454
|
+
a, b = x
|
2455
|
+
assert (x >= bounds.lb).all() and (x <= bounds.ub).all()
|
2456
|
+
return a ** 2 + b ** 2
|
2457
|
+
|
2458
|
+
optimize.minimize(fun, x0=[0.6, 20], method='Powell', bounds=bounds)
|
2459
|
+
|
2460
|
+
# Another test from the original report - gh-13411
|
2461
|
+
bounds = optimize.Bounds(lb=[0,], ub=[1,], keep_feasible=[True,])
|
2462
|
+
|
2463
|
+
def func(x):
|
2464
|
+
assert x >= 0 and x <= 1
|
2465
|
+
return np.exp(x)
|
2466
|
+
|
2467
|
+
optimize.minimize(fun=func, x0=[0.5], method='powell', bounds=bounds)
|
2468
|
+
|
2469
|
+
|
2470
|
+
def test_powell_output():
|
2471
|
+
funs = [rosen, lambda x: np.array(rosen(x)), lambda x: np.array([rosen(x)])]
|
2472
|
+
for fun in funs:
|
2473
|
+
res = optimize.minimize(fun, x0=[0.6, 20], method='Powell')
|
2474
|
+
assert np.isscalar(res.fun)
|
2475
|
+
|
2476
|
+
|
2477
|
+
class TestRosen:
|
2478
|
+
@make_xp_test_case(optimize.rosen)
|
2479
|
+
def test_rosen(self, xp):
|
2480
|
+
# integer input should be promoted to the default floating type
|
2481
|
+
x = xp.asarray([1, 1, 1])
|
2482
|
+
xp_assert_equal(optimize.rosen(x),
|
2483
|
+
xp.asarray(0.))
|
2484
|
+
|
2485
|
+
@make_xp_test_case(optimize.rosen_der)
|
2486
|
+
def test_rosen_der(self, xp):
|
2487
|
+
x = xp.asarray([1, 1, 1, 1])
|
2488
|
+
xp_assert_equal(optimize.rosen_der(x),
|
2489
|
+
xp.zeros_like(x, dtype=xp.asarray(1.).dtype))
|
2490
|
+
|
2491
|
+
@make_xp_test_case(optimize.rosen_hess, optimize.rosen_hess_prod)
|
2492
|
+
def test_hess_prod(self, xp):
|
2493
|
+
one = xp.asarray(1.)
|
2494
|
+
|
2495
|
+
# Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775.
|
2496
|
+
x = xp.asarray([3, 4, 5])
|
2497
|
+
p = xp.asarray([2, 2, 2])
|
2498
|
+
hp = optimize.rosen_hess_prod(x, p)
|
2499
|
+
p = xp.astype(p, one.dtype)
|
2500
|
+
dothp = optimize.rosen_hess(x) @ p
|
2501
|
+
xp_assert_equal(hp, dothp)
|
2502
|
+
|
2503
|
+
|
2504
|
+
def himmelblau(p):
|
2505
|
+
"""
|
2506
|
+
R^2 -> R^1 test function for optimization. The function has four local
|
2507
|
+
minima where himmelblau(xopt) == 0.
|
2508
|
+
"""
|
2509
|
+
x, y = p
|
2510
|
+
a = x*x + y - 11
|
2511
|
+
b = x + y*y - 7
|
2512
|
+
return a*a + b*b
|
2513
|
+
|
2514
|
+
|
2515
|
+
def himmelblau_grad(p):
|
2516
|
+
x, y = p
|
2517
|
+
return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14,
|
2518
|
+
2*x**2 + 4*x*y + 4*y**3 - 26*y - 22])
|
2519
|
+
|
2520
|
+
|
2521
|
+
def himmelblau_hess(p):
|
2522
|
+
x, y = p
|
2523
|
+
return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y],
|
2524
|
+
[4*x + 4*y, 4*x + 12*y**2 - 26]])
|
2525
|
+
|
2526
|
+
|
2527
|
+
himmelblau_x0 = [-0.27, -0.9]
|
2528
|
+
himmelblau_xopt = [3, 2]
|
2529
|
+
himmelblau_min = 0.0
|
2530
|
+
|
2531
|
+
|
2532
|
+
def test_minimize_multiple_constraints():
|
2533
|
+
# Regression test for gh-4240.
|
2534
|
+
def func(x):
|
2535
|
+
return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
|
2536
|
+
|
2537
|
+
def func1(x):
|
2538
|
+
return np.array([x[1]])
|
2539
|
+
|
2540
|
+
def func2(x):
|
2541
|
+
return np.array([x[2]])
|
2542
|
+
|
2543
|
+
cons = ({'type': 'ineq', 'fun': func},
|
2544
|
+
{'type': 'ineq', 'fun': func1},
|
2545
|
+
{'type': 'ineq', 'fun': func2})
|
2546
|
+
|
2547
|
+
def f(x):
|
2548
|
+
return -1 * (x[0] + x[1] + x[2])
|
2549
|
+
|
2550
|
+
res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons)
|
2551
|
+
assert_allclose(res.x, [125, 0, 0], atol=1e-10)
|
2552
|
+
|
2553
|
+
|
2554
|
+
class TestOptimizeResultAttributes:
|
2555
|
+
# Test that all minimizers return an OptimizeResult containing
|
2556
|
+
# all the OptimizeResult attributes
|
2557
|
+
def setup_method(self):
|
2558
|
+
self.x0 = [5, 5]
|
2559
|
+
self.func = optimize.rosen
|
2560
|
+
self.jac = optimize.rosen_der
|
2561
|
+
self.hess = optimize.rosen_hess
|
2562
|
+
self.hessp = optimize.rosen_hess_prod
|
2563
|
+
self.bounds = [(0., 10.), (0., 10.)]
|
2564
|
+
|
2565
|
+
@pytest.mark.fail_slow(2)
|
2566
|
+
def test_attributes_present(self):
|
2567
|
+
attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun',
|
2568
|
+
'message']
|
2569
|
+
skip = {'cobyla': ['nit']}
|
2570
|
+
for method in MINIMIZE_METHODS:
|
2571
|
+
with suppress_warnings() as sup:
|
2572
|
+
sup.filter(RuntimeWarning,
|
2573
|
+
("Method .+ does not use (gradient|Hessian.*)"
|
2574
|
+
" information"))
|
2575
|
+
res = optimize.minimize(self.func, self.x0, method=method,
|
2576
|
+
jac=self.jac, hess=self.hess,
|
2577
|
+
hessp=self.hessp)
|
2578
|
+
for attribute in attributes:
|
2579
|
+
if method in skip and attribute in skip[method]:
|
2580
|
+
continue
|
2581
|
+
|
2582
|
+
assert hasattr(res, attribute)
|
2583
|
+
assert attribute in dir(res)
|
2584
|
+
|
2585
|
+
# gh13001, OptimizeResult.message should be a str
|
2586
|
+
assert isinstance(res.message, str)
|
2587
|
+
|
2588
|
+
|
2589
|
+
def f1(z, *params):
|
2590
|
+
x, y = z
|
2591
|
+
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
|
2592
|
+
return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
|
2593
|
+
|
2594
|
+
|
2595
|
+
def f2(z, *params):
|
2596
|
+
x, y = z
|
2597
|
+
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
|
2598
|
+
return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
|
2599
|
+
|
2600
|
+
|
2601
|
+
def f3(z, *params):
|
2602
|
+
x, y = z
|
2603
|
+
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
|
2604
|
+
return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
|
2605
|
+
|
2606
|
+
|
2607
|
+
def brute_func(z, *params):
|
2608
|
+
return f1(z, *params) + f2(z, *params) + f3(z, *params)
|
2609
|
+
|
2610
|
+
|
2611
|
+
class TestBrute:
|
2612
|
+
# Test the "brute force" method
|
2613
|
+
def setup_method(self):
|
2614
|
+
self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
|
2615
|
+
self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
|
2616
|
+
self.solution = np.array([-1.05665192, 1.80834843])
|
2617
|
+
|
2618
|
+
def brute_func(self, z, *params):
|
2619
|
+
# an instance method optimizing
|
2620
|
+
return brute_func(z, *params)
|
2621
|
+
|
2622
|
+
def test_brute(self):
|
2623
|
+
# test fmin
|
2624
|
+
resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
|
2625
|
+
full_output=True, finish=optimize.fmin)
|
2626
|
+
assert_allclose(resbrute[0], self.solution, atol=1e-3)
|
2627
|
+
assert_allclose(resbrute[1], brute_func(self.solution, *self.params),
|
2628
|
+
atol=1e-3)
|
2629
|
+
|
2630
|
+
# test minimize
|
2631
|
+
resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
|
2632
|
+
full_output=True,
|
2633
|
+
finish=optimize.minimize)
|
2634
|
+
assert_allclose(resbrute[0], self.solution, atol=1e-3)
|
2635
|
+
assert_allclose(resbrute[1], brute_func(self.solution, *self.params),
|
2636
|
+
atol=1e-3)
|
2637
|
+
|
2638
|
+
# test that brute can optimize an instance method (the other tests use
|
2639
|
+
# a non-class based function
|
2640
|
+
resbrute = optimize.brute(self.brute_func, self.rranges,
|
2641
|
+
args=self.params, full_output=True,
|
2642
|
+
finish=optimize.minimize)
|
2643
|
+
assert_allclose(resbrute[0], self.solution, atol=1e-3)
|
2644
|
+
|
2645
|
+
def test_1D(self):
|
2646
|
+
# test that for a 1-D problem the test function is passed an array,
|
2647
|
+
# not a scalar.
|
2648
|
+
def f(x):
|
2649
|
+
assert len(x.shape) == 1
|
2650
|
+
assert x.shape[0] == 1
|
2651
|
+
return x ** 2
|
2652
|
+
|
2653
|
+
optimize.brute(f, [(-1, 1)], Ns=3, finish=None)
|
2654
|
+
|
2655
|
+
@pytest.mark.fail_slow(10)
|
2656
|
+
def test_workers(self):
|
2657
|
+
# check that parallel evaluation works
|
2658
|
+
resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
|
2659
|
+
full_output=True, finish=None)
|
2660
|
+
|
2661
|
+
resbrute1 = optimize.brute(brute_func, self.rranges, args=self.params,
|
2662
|
+
full_output=True, finish=None, workers=2)
|
2663
|
+
|
2664
|
+
assert_allclose(resbrute1[-1], resbrute[-1])
|
2665
|
+
assert_allclose(resbrute1[0], resbrute[0])
|
2666
|
+
|
2667
|
+
@pytest.mark.thread_unsafe
|
2668
|
+
def test_runtime_warning(self, capsys):
|
2669
|
+
rng = np.random.default_rng(1234)
|
2670
|
+
|
2671
|
+
def func(z, *params):
|
2672
|
+
return rng.random(1) * 1000 # never converged problem
|
2673
|
+
|
2674
|
+
msg = "final optimization did not succeed.*|Maximum number of function eval.*"
|
2675
|
+
with pytest.warns(RuntimeWarning, match=msg):
|
2676
|
+
optimize.brute(func, self.rranges, args=self.params, disp=True)
|
2677
|
+
|
2678
|
+
def test_coerce_args_param(self):
|
2679
|
+
# optimize.brute should coerce non-iterable args to a tuple.
|
2680
|
+
def f(x, *args):
|
2681
|
+
return x ** args[0]
|
2682
|
+
|
2683
|
+
resbrute = optimize.brute(f, (slice(-4, 4, .25),), args=2)
|
2684
|
+
assert_allclose(resbrute, 0)
|
2685
|
+
|
2686
|
+
|
2687
|
+
@pytest.mark.thread_unsafe
|
2688
|
+
@pytest.mark.fail_slow(20)
|
2689
|
+
def test_cobyla_threadsafe():
|
2690
|
+
|
2691
|
+
# Verify that cobyla is threadsafe. Will segfault if it is not.
|
2692
|
+
|
2693
|
+
import concurrent.futures
|
2694
|
+
import time
|
2695
|
+
|
2696
|
+
def objective1(x):
|
2697
|
+
time.sleep(0.1)
|
2698
|
+
return x[0]**2
|
2699
|
+
|
2700
|
+
def objective2(x):
|
2701
|
+
time.sleep(0.1)
|
2702
|
+
return (x[0]-1)**2
|
2703
|
+
|
2704
|
+
min_method = "COBYLA"
|
2705
|
+
|
2706
|
+
def minimizer1():
|
2707
|
+
return optimize.minimize(objective1,
|
2708
|
+
[0.0],
|
2709
|
+
method=min_method)
|
2710
|
+
|
2711
|
+
def minimizer2():
|
2712
|
+
return optimize.minimize(objective2,
|
2713
|
+
[0.0],
|
2714
|
+
method=min_method)
|
2715
|
+
|
2716
|
+
with concurrent.futures.ThreadPoolExecutor() as pool:
|
2717
|
+
tasks = []
|
2718
|
+
tasks.append(pool.submit(minimizer1))
|
2719
|
+
tasks.append(pool.submit(minimizer2))
|
2720
|
+
for t in tasks:
|
2721
|
+
t.result()
|
2722
|
+
|
2723
|
+
|
2724
|
+
class TestIterationLimits:
|
2725
|
+
# Tests that optimisation does not give up before trying requested
|
2726
|
+
# number of iterations or evaluations. And that it does not succeed
|
2727
|
+
# by exceeding the limits.
|
2728
|
+
def setup_method(self):
|
2729
|
+
self.funcalls = threading.local()
|
2730
|
+
|
2731
|
+
def slow_func(self, v):
|
2732
|
+
if not hasattr(self.funcalls, 'c'):
|
2733
|
+
self.funcalls.c = 0
|
2734
|
+
self.funcalls.c += 1
|
2735
|
+
r, t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0], v[1])
|
2736
|
+
return np.sin(r*20 + t)+r*0.5
|
2737
|
+
|
2738
|
+
@pytest.mark.fail_slow(10)
|
2739
|
+
def test_neldermead_limit(self):
|
2740
|
+
self.check_limits("Nelder-Mead", 200)
|
2741
|
+
|
2742
|
+
def test_powell_limit(self):
|
2743
|
+
self.check_limits("powell", 1000)
|
2744
|
+
|
2745
|
+
def check_limits(self, method, default_iters):
|
2746
|
+
for start_v in [[0.1, 0.1], [1, 1], [2, 2]]:
|
2747
|
+
for mfev in [50, 500, 5000]:
|
2748
|
+
self.funcalls.c = 0
|
2749
|
+
res = optimize.minimize(self.slow_func, start_v,
|
2750
|
+
method=method,
|
2751
|
+
options={"maxfev": mfev})
|
2752
|
+
assert self.funcalls.c == res["nfev"]
|
2753
|
+
if res["success"]:
|
2754
|
+
assert res["nfev"] < mfev
|
2755
|
+
else:
|
2756
|
+
assert res["nfev"] >= mfev
|
2757
|
+
for mit in [50, 500, 5000]:
|
2758
|
+
res = optimize.minimize(self.slow_func, start_v,
|
2759
|
+
method=method,
|
2760
|
+
options={"maxiter": mit})
|
2761
|
+
if res["success"]:
|
2762
|
+
assert res["nit"] <= mit
|
2763
|
+
else:
|
2764
|
+
assert res["nit"] >= mit
|
2765
|
+
for mfev, mit in [[50, 50], [5000, 5000], [5000, np.inf]]:
|
2766
|
+
self.funcalls.c = 0
|
2767
|
+
res = optimize.minimize(self.slow_func, start_v,
|
2768
|
+
method=method,
|
2769
|
+
options={"maxiter": mit,
|
2770
|
+
"maxfev": mfev})
|
2771
|
+
assert self.funcalls.c == res["nfev"]
|
2772
|
+
if res["success"]:
|
2773
|
+
assert res["nfev"] < mfev and res["nit"] <= mit
|
2774
|
+
else:
|
2775
|
+
assert res["nfev"] >= mfev or res["nit"] >= mit
|
2776
|
+
for mfev, mit in [[np.inf, None], [None, np.inf]]:
|
2777
|
+
self.funcalls.c = 0
|
2778
|
+
res = optimize.minimize(self.slow_func, start_v,
|
2779
|
+
method=method,
|
2780
|
+
options={"maxiter": mit,
|
2781
|
+
"maxfev": mfev})
|
2782
|
+
assert self.funcalls.c == res["nfev"]
|
2783
|
+
if res["success"]:
|
2784
|
+
if mfev is None:
|
2785
|
+
assert res["nfev"] < default_iters*2
|
2786
|
+
else:
|
2787
|
+
assert res["nit"] <= default_iters*2
|
2788
|
+
else:
|
2789
|
+
assert (res["nfev"] >= default_iters*2
|
2790
|
+
or res["nit"] >= default_iters*2)
|
2791
|
+
|
2792
|
+
|
2793
|
+
def test_result_x_shape_when_len_x_is_one():
|
2794
|
+
def fun(x):
|
2795
|
+
return x * x
|
2796
|
+
|
2797
|
+
def jac(x):
|
2798
|
+
return 2. * x
|
2799
|
+
|
2800
|
+
def hess(x):
|
2801
|
+
return np.array([[2.]])
|
2802
|
+
|
2803
|
+
methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC',
|
2804
|
+
'COBYLA', 'COBYQA', 'SLSQP']
|
2805
|
+
for method in methods:
|
2806
|
+
res = optimize.minimize(fun, np.array([0.1]), method=method)
|
2807
|
+
assert res.x.shape == (1,)
|
2808
|
+
|
2809
|
+
# use jac + hess
|
2810
|
+
methods = ['trust-constr', 'dogleg', 'trust-ncg', 'trust-exact',
|
2811
|
+
'trust-krylov', 'Newton-CG']
|
2812
|
+
for method in methods:
|
2813
|
+
res = optimize.minimize(fun, np.array([0.1]), method=method, jac=jac,
|
2814
|
+
hess=hess)
|
2815
|
+
assert res.x.shape == (1,)
|
2816
|
+
|
2817
|
+
|
2818
|
+
class FunctionWithGradient:
|
2819
|
+
def __init__(self):
|
2820
|
+
self.number_of_calls = threading.local()
|
2821
|
+
|
2822
|
+
def __call__(self, x):
|
2823
|
+
if not hasattr(self.number_of_calls, 'c'):
|
2824
|
+
self.number_of_calls.c = 0
|
2825
|
+
self.number_of_calls.c += 1
|
2826
|
+
return np.sum(x**2), 2 * x
|
2827
|
+
|
2828
|
+
|
2829
|
+
@pytest.fixture
|
2830
|
+
def function_with_gradient():
|
2831
|
+
return FunctionWithGradient()
|
2832
|
+
|
2833
|
+
|
2834
|
+
def test_memoize_jac_function_before_gradient(function_with_gradient):
|
2835
|
+
memoized_function = MemoizeJac(function_with_gradient)
|
2836
|
+
|
2837
|
+
x0 = np.array([1.0, 2.0])
|
2838
|
+
assert_allclose(memoized_function(x0), 5.0)
|
2839
|
+
assert function_with_gradient.number_of_calls.c == 1
|
2840
|
+
|
2841
|
+
assert_allclose(memoized_function.derivative(x0), 2 * x0)
|
2842
|
+
assert function_with_gradient.number_of_calls.c == 1, \
|
2843
|
+
"function is not recomputed " \
|
2844
|
+
"if gradient is requested after function value"
|
2845
|
+
|
2846
|
+
assert_allclose(
|
2847
|
+
memoized_function(2 * x0), 20.0,
|
2848
|
+
err_msg="different input triggers new computation")
|
2849
|
+
assert function_with_gradient.number_of_calls.c == 2, \
|
2850
|
+
"different input triggers new computation"
|
2851
|
+
|
2852
|
+
|
2853
|
+
def test_memoize_jac_gradient_before_function(function_with_gradient):
|
2854
|
+
memoized_function = MemoizeJac(function_with_gradient)
|
2855
|
+
|
2856
|
+
x0 = np.array([1.0, 2.0])
|
2857
|
+
assert_allclose(memoized_function.derivative(x0), 2 * x0)
|
2858
|
+
assert function_with_gradient.number_of_calls.c == 1
|
2859
|
+
|
2860
|
+
assert_allclose(memoized_function(x0), 5.0)
|
2861
|
+
assert function_with_gradient.number_of_calls.c == 1, \
|
2862
|
+
"function is not recomputed " \
|
2863
|
+
"if function value is requested after gradient"
|
2864
|
+
|
2865
|
+
assert_allclose(
|
2866
|
+
memoized_function.derivative(2 * x0), 4 * x0,
|
2867
|
+
err_msg="different input triggers new computation")
|
2868
|
+
assert function_with_gradient.number_of_calls.c == 2, \
|
2869
|
+
"different input triggers new computation"
|
2870
|
+
|
2871
|
+
|
2872
|
+
def test_memoize_jac_with_bfgs(function_with_gradient):
|
2873
|
+
""" Tests that using MemoizedJac in combination with ScalarFunction
|
2874
|
+
and BFGS does not lead to repeated function evaluations.
|
2875
|
+
Tests changes made in response to GH11868.
|
2876
|
+
"""
|
2877
|
+
memoized_function = MemoizeJac(function_with_gradient)
|
2878
|
+
jac = memoized_function.derivative
|
2879
|
+
hess = optimize.BFGS()
|
2880
|
+
|
2881
|
+
x0 = np.array([1.0, 0.5])
|
2882
|
+
scalar_function = ScalarFunction(
|
2883
|
+
memoized_function, x0, (), jac, hess, None, None)
|
2884
|
+
assert function_with_gradient.number_of_calls.c == 1
|
2885
|
+
|
2886
|
+
scalar_function.fun(x0 + 0.1)
|
2887
|
+
assert function_with_gradient.number_of_calls.c == 2
|
2888
|
+
|
2889
|
+
scalar_function.fun(x0 + 0.2)
|
2890
|
+
assert function_with_gradient.number_of_calls.c == 3
|
2891
|
+
|
2892
|
+
|
2893
|
+
def test_gh12696():
|
2894
|
+
# Test that optimize doesn't throw warning gh-12696
|
2895
|
+
with assert_no_warnings():
|
2896
|
+
optimize.fminbound(
|
2897
|
+
lambda x: np.array([x**2]), -np.pi, np.pi, disp=False)
|
2898
|
+
|
2899
|
+
|
2900
|
+
# --- Test minimize with equal upper and lower bounds --- #
|
2901
|
+
|
2902
|
+
def setup_test_equal_bounds():
|
2903
|
+
|
2904
|
+
rng = np.random.RandomState(0)
|
2905
|
+
x0 = rng.rand(4)
|
2906
|
+
lb = np.array([0, 2, -1, -1.0])
|
2907
|
+
ub = np.array([3, 2, 2, -1.0])
|
2908
|
+
i_eb = (lb == ub)
|
2909
|
+
|
2910
|
+
def check_x(x, check_size=True, check_values=True):
|
2911
|
+
if check_size:
|
2912
|
+
assert x.size == 4
|
2913
|
+
if check_values:
|
2914
|
+
assert_allclose(x[i_eb], lb[i_eb])
|
2915
|
+
|
2916
|
+
def func(x):
|
2917
|
+
check_x(x)
|
2918
|
+
return optimize.rosen(x)
|
2919
|
+
|
2920
|
+
def grad(x):
|
2921
|
+
check_x(x)
|
2922
|
+
return optimize.rosen_der(x)
|
2923
|
+
|
2924
|
+
def callback(x, *args):
|
2925
|
+
check_x(x)
|
2926
|
+
|
2927
|
+
def callback2(intermediate_result):
|
2928
|
+
assert isinstance(intermediate_result, OptimizeResult)
|
2929
|
+
check_x(intermediate_result.x)
|
2930
|
+
|
2931
|
+
def constraint1(x):
|
2932
|
+
check_x(x, check_values=False)
|
2933
|
+
return x[0:1] - 1
|
2934
|
+
|
2935
|
+
def jacobian1(x):
|
2936
|
+
check_x(x, check_values=False)
|
2937
|
+
dc = np.zeros_like(x)
|
2938
|
+
dc[0] = 1
|
2939
|
+
return dc
|
2940
|
+
|
2941
|
+
def constraint2(x):
|
2942
|
+
check_x(x, check_values=False)
|
2943
|
+
return x[2:3] - 0.5
|
2944
|
+
|
2945
|
+
def jacobian2(x):
|
2946
|
+
check_x(x, check_values=False)
|
2947
|
+
dc = np.zeros_like(x)
|
2948
|
+
dc[2] = 1
|
2949
|
+
return dc
|
2950
|
+
|
2951
|
+
c1a = NonlinearConstraint(constraint1, -np.inf, 0)
|
2952
|
+
c1b = NonlinearConstraint(constraint1, -np.inf, 0, jacobian1)
|
2953
|
+
c2a = NonlinearConstraint(constraint2, -np.inf, 0)
|
2954
|
+
c2b = NonlinearConstraint(constraint2, -np.inf, 0, jacobian2)
|
2955
|
+
|
2956
|
+
# test using the three methods that accept bounds, use derivatives, and
|
2957
|
+
# have some trouble when bounds fix variables
|
2958
|
+
methods = ('L-BFGS-B', 'SLSQP', 'TNC')
|
2959
|
+
|
2960
|
+
# test w/out gradient, w/ gradient, and w/ combined objective/gradient
|
2961
|
+
kwds = ({"fun": func, "jac": False},
|
2962
|
+
{"fun": func, "jac": grad},
|
2963
|
+
{"fun": (lambda x: (func(x), grad(x))),
|
2964
|
+
"jac": True})
|
2965
|
+
|
2966
|
+
# test with both old- and new-style bounds
|
2967
|
+
bound_types = (lambda lb, ub: list(zip(lb, ub)),
|
2968
|
+
Bounds)
|
2969
|
+
|
2970
|
+
# Test for many combinations of constraints w/ and w/out jacobian
|
2971
|
+
# Pairs in format: (test constraints, reference constraints)
|
2972
|
+
# (always use analytical jacobian in reference)
|
2973
|
+
constraints = ((None, None), ([], []),
|
2974
|
+
(c1a, c1b), (c2b, c2b),
|
2975
|
+
([c1b], [c1b]), ([c2a], [c2b]),
|
2976
|
+
([c1a, c2a], [c1b, c2b]),
|
2977
|
+
([c1a, c2b], [c1b, c2b]),
|
2978
|
+
([c1b, c2b], [c1b, c2b]))
|
2979
|
+
|
2980
|
+
# test with and without callback function
|
2981
|
+
callbacks = (None, callback, callback2)
|
2982
|
+
|
2983
|
+
data = {"methods": methods, "kwds": kwds, "bound_types": bound_types,
|
2984
|
+
"constraints": constraints, "callbacks": callbacks,
|
2985
|
+
"lb": lb, "ub": ub, "x0": x0, "i_eb": i_eb}
|
2986
|
+
|
2987
|
+
return data
|
2988
|
+
|
2989
|
+
|
2990
|
+
eb_data = setup_test_equal_bounds()
|
2991
|
+
|
2992
|
+
|
2993
|
+
# This test is about handling fixed variables, not the accuracy of the solvers
|
2994
|
+
@pytest.mark.xfail_on_32bit("Failures due to floating point issues, not logic")
|
2995
|
+
@pytest.mark.xfail(scipy.show_config(mode='dicts')['Compilers']['fortran']['name'] ==
|
2996
|
+
"intel-llvm",
|
2997
|
+
reason="Failures due to floating point issues, not logic")
|
2998
|
+
@pytest.mark.parametrize('method', eb_data["methods"])
|
2999
|
+
@pytest.mark.parametrize('kwds', eb_data["kwds"])
|
3000
|
+
@pytest.mark.parametrize('bound_type', eb_data["bound_types"])
|
3001
|
+
@pytest.mark.parametrize('constraints', eb_data["constraints"])
|
3002
|
+
@pytest.mark.parametrize('callback', eb_data["callbacks"])
|
3003
|
+
def test_equal_bounds(method, kwds, bound_type, constraints, callback):
|
3004
|
+
"""
|
3005
|
+
Tests that minimizers still work if (bounds.lb == bounds.ub).any()
|
3006
|
+
gh12502 - Divide by zero in Jacobian numerical differentiation when
|
3007
|
+
equality bounds constraints are used
|
3008
|
+
"""
|
3009
|
+
# GH-15051; slightly more skips than necessary; hopefully fixed by GH-14882
|
3010
|
+
if (platform.machine() == 'aarch64' and method == "TNC"
|
3011
|
+
and kwds["jac"] is False and callback is not None):
|
3012
|
+
pytest.skip('Tolerance violation on aarch')
|
3013
|
+
|
3014
|
+
lb, ub = eb_data["lb"], eb_data["ub"]
|
3015
|
+
x0, i_eb = eb_data["x0"], eb_data["i_eb"]
|
3016
|
+
|
3017
|
+
test_constraints, reference_constraints = constraints
|
3018
|
+
if test_constraints and not method == 'SLSQP':
|
3019
|
+
pytest.skip('Only SLSQP supports nonlinear constraints')
|
3020
|
+
|
3021
|
+
if method in ['SLSQP', 'TNC'] and callable(callback):
|
3022
|
+
sig = inspect.signature(callback)
|
3023
|
+
if 'intermediate_result' in set(sig.parameters):
|
3024
|
+
pytest.skip("SLSQP, TNC don't support intermediate_result")
|
3025
|
+
|
3026
|
+
# reference constraints always have analytical jacobian
|
3027
|
+
# if test constraints are not the same, we'll need finite differences
|
3028
|
+
fd_needed = (test_constraints != reference_constraints)
|
3029
|
+
|
3030
|
+
bounds = bound_type(lb, ub) # old- or new-style
|
3031
|
+
|
3032
|
+
kwds.update({"x0": x0, "method": method, "bounds": bounds,
|
3033
|
+
"constraints": test_constraints, "callback": callback})
|
3034
|
+
res = optimize.minimize(**kwds)
|
3035
|
+
|
3036
|
+
expected = optimize.minimize(optimize.rosen, x0, method=method,
|
3037
|
+
jac=optimize.rosen_der, bounds=bounds,
|
3038
|
+
constraints=reference_constraints)
|
3039
|
+
|
3040
|
+
# compare the output of a solution with FD vs that of an analytic grad
|
3041
|
+
assert res.success
|
3042
|
+
assert_allclose(res.fun, expected.fun, rtol=2e-6)
|
3043
|
+
assert_allclose(res.x, expected.x, rtol=5e-4)
|
3044
|
+
|
3045
|
+
if fd_needed or kwds['jac'] is False:
|
3046
|
+
expected.jac[i_eb] = np.nan
|
3047
|
+
assert res.jac.shape[0] == 4
|
3048
|
+
assert_allclose(res.jac[i_eb], expected.jac[i_eb], rtol=1e-6)
|
3049
|
+
|
3050
|
+
if not (kwds['jac'] or test_constraints or isinstance(bounds, Bounds)):
|
3051
|
+
# compare the output to an equivalent FD minimization that doesn't
|
3052
|
+
# need factorization
|
3053
|
+
def fun(x):
|
3054
|
+
new_x = np.array([np.nan, 2, np.nan, -1])
|
3055
|
+
new_x[[0, 2]] = x
|
3056
|
+
return optimize.rosen(new_x)
|
3057
|
+
|
3058
|
+
fd_res = optimize.minimize(fun,
|
3059
|
+
x0[[0, 2]],
|
3060
|
+
method=method,
|
3061
|
+
bounds=bounds[::2])
|
3062
|
+
assert_allclose(res.fun, fd_res.fun)
|
3063
|
+
# TODO this test should really be equivalent to factorized version
|
3064
|
+
# above, down to res.nfev. However, testing found that when TNC is
|
3065
|
+
# called with or without a callback the output is different. The two
|
3066
|
+
# should be the same! This indicates that the TNC callback may be
|
3067
|
+
# mutating something when it shouldn't.
|
3068
|
+
assert_allclose(res.x[[0, 2]], fd_res.x, rtol=2e-6)
|
3069
|
+
|
3070
|
+
|
3071
|
+
@pytest.mark.parametrize('method', eb_data["methods"])
|
3072
|
+
def test_all_bounds_equal(method):
|
3073
|
+
# this only tests methods that have parameters factored out when lb==ub
|
3074
|
+
# it does not test other methods that work with bounds
|
3075
|
+
def f(x, p1=1):
|
3076
|
+
return np.linalg.norm(x) + p1
|
3077
|
+
|
3078
|
+
bounds = [(1, 1), (2, 2)]
|
3079
|
+
x0 = (1.0, 3.0)
|
3080
|
+
res = optimize.minimize(f, x0, bounds=bounds, method=method)
|
3081
|
+
assert res.success
|
3082
|
+
assert_allclose(res.fun, f([1.0, 2.0]))
|
3083
|
+
assert res.nfev == 1
|
3084
|
+
assert res.message == 'All independent variables were fixed by bounds.'
|
3085
|
+
|
3086
|
+
args = (2,)
|
3087
|
+
res = optimize.minimize(f, x0, bounds=bounds, method=method, args=args)
|
3088
|
+
assert res.success
|
3089
|
+
assert_allclose(res.fun, f([1.0, 2.0], 2))
|
3090
|
+
|
3091
|
+
if method.upper() == 'SLSQP':
|
3092
|
+
def con(x):
|
3093
|
+
return np.sum(x)
|
3094
|
+
nlc = NonlinearConstraint(con, -np.inf, 0.0)
|
3095
|
+
res = optimize.minimize(
|
3096
|
+
f, x0, bounds=bounds, method=method, constraints=[nlc]
|
3097
|
+
)
|
3098
|
+
assert res.success is False
|
3099
|
+
assert_allclose(res.fun, f([1.0, 2.0]))
|
3100
|
+
assert res.nfev == 1
|
3101
|
+
message = "All independent variables were fixed by bounds, but"
|
3102
|
+
assert res.message.startswith(message)
|
3103
|
+
|
3104
|
+
nlc = NonlinearConstraint(con, -np.inf, 4)
|
3105
|
+
res = optimize.minimize(
|
3106
|
+
f, x0, bounds=bounds, method=method, constraints=[nlc]
|
3107
|
+
)
|
3108
|
+
assert res.success is True
|
3109
|
+
assert_allclose(res.fun, f([1.0, 2.0]))
|
3110
|
+
assert res.nfev == 1
|
3111
|
+
message = "All independent variables were fixed by bounds at values"
|
3112
|
+
assert res.message.startswith(message)
|
3113
|
+
|
3114
|
+
|
3115
|
+
def test_eb_constraints():
|
3116
|
+
# make sure constraint functions aren't overwritten when equal bounds
|
3117
|
+
# are employed, and a parameter is factored out. GH14859
|
3118
|
+
def f(x):
|
3119
|
+
return x[0]**3 + x[1]**2 + x[2]*x[3]
|
3120
|
+
|
3121
|
+
def cfun(x):
|
3122
|
+
return x[0] + x[1] + x[2] + x[3] - 40
|
3123
|
+
|
3124
|
+
constraints = [{'type': 'ineq', 'fun': cfun}]
|
3125
|
+
|
3126
|
+
bounds = [(0, 20)] * 4
|
3127
|
+
bounds[1] = (5, 5)
|
3128
|
+
optimize.minimize(
|
3129
|
+
f,
|
3130
|
+
x0=[1, 2, 3, 4],
|
3131
|
+
method='SLSQP',
|
3132
|
+
bounds=bounds,
|
3133
|
+
constraints=constraints,
|
3134
|
+
)
|
3135
|
+
assert constraints[0]['fun'] == cfun
|
3136
|
+
|
3137
|
+
|
3138
|
+
def test_show_options():
|
3139
|
+
solver_methods = {
|
3140
|
+
'minimize': MINIMIZE_METHODS,
|
3141
|
+
'minimize_scalar': MINIMIZE_SCALAR_METHODS,
|
3142
|
+
'root': ROOT_METHODS,
|
3143
|
+
'root_scalar': ROOT_SCALAR_METHODS,
|
3144
|
+
'linprog': LINPROG_METHODS,
|
3145
|
+
'quadratic_assignment': QUADRATIC_ASSIGNMENT_METHODS,
|
3146
|
+
}
|
3147
|
+
for solver, methods in solver_methods.items():
|
3148
|
+
for method in methods:
|
3149
|
+
# testing that `show_options` works without error
|
3150
|
+
show_options(solver, method)
|
3151
|
+
|
3152
|
+
unknown_solver_method = {
|
3153
|
+
'minimize': "ekki", # unknown method
|
3154
|
+
'maximize': "cg", # unknown solver
|
3155
|
+
'maximize_scalar': "ekki", # unknown solver and method
|
3156
|
+
}
|
3157
|
+
for solver, method in unknown_solver_method.items():
|
3158
|
+
# testing that `show_options` raises ValueError
|
3159
|
+
assert_raises(ValueError, show_options, solver, method)
|
3160
|
+
|
3161
|
+
|
3162
|
+
def test_bounds_with_list():
|
3163
|
+
# gh13501. Bounds created with lists weren't working for Powell.
|
3164
|
+
bounds = optimize.Bounds(lb=[5., 5.], ub=[10., 10.])
|
3165
|
+
optimize.minimize(
|
3166
|
+
optimize.rosen, x0=np.array([9, 9]), method='Powell', bounds=bounds
|
3167
|
+
)
|
3168
|
+
|
3169
|
+
|
3170
|
+
@pytest.mark.parametrize('method', (
|
3171
|
+
'slsqp', 'cg', 'cobyqa', 'powell','nelder-mead', 'bfgs', 'l-bfgs-b',
|
3172
|
+
'trust-constr'))
|
3173
|
+
def test_minimize_maxiter_noninteger(method):
|
3174
|
+
# Regression test for gh-23430
|
3175
|
+
x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
|
3176
|
+
optimize.minimize(rosen, x0, method=method, options={'maxiter': 100.1})
|
3177
|
+
|
3178
|
+
|
3179
|
+
def test_x_overwritten_user_function():
|
3180
|
+
# if the user overwrites the x-array in the user function it's likely
|
3181
|
+
# that the minimizer stops working properly.
|
3182
|
+
# gh13740
|
3183
|
+
def fquad(x):
|
3184
|
+
a = np.arange(np.size(x))
|
3185
|
+
x -= a
|
3186
|
+
x *= x
|
3187
|
+
return np.sum(x)
|
3188
|
+
|
3189
|
+
def fquad_jac(x):
|
3190
|
+
a = np.arange(np.size(x))
|
3191
|
+
x *= 2
|
3192
|
+
x -= 2 * a
|
3193
|
+
return x
|
3194
|
+
|
3195
|
+
def fquad_hess(x):
|
3196
|
+
return np.eye(np.size(x)) * 2.0
|
3197
|
+
|
3198
|
+
meth_jac = [
|
3199
|
+
'newton-cg', 'dogleg', 'trust-ncg', 'trust-exact',
|
3200
|
+
'trust-krylov', 'trust-constr'
|
3201
|
+
]
|
3202
|
+
meth_hess = [
|
3203
|
+
'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', 'trust-constr'
|
3204
|
+
]
|
3205
|
+
|
3206
|
+
x0 = np.ones(5) * 1.5
|
3207
|
+
|
3208
|
+
for meth in MINIMIZE_METHODS:
|
3209
|
+
jac = None
|
3210
|
+
hess = None
|
3211
|
+
if meth in meth_jac:
|
3212
|
+
jac = fquad_jac
|
3213
|
+
if meth in meth_hess:
|
3214
|
+
hess = fquad_hess
|
3215
|
+
res = optimize.minimize(fquad, x0, method=meth, jac=jac, hess=hess)
|
3216
|
+
assert_allclose(res.x, np.arange(np.size(x0)), atol=2e-4)
|
3217
|
+
|
3218
|
+
|
3219
|
+
class TestGlobalOptimization:
|
3220
|
+
|
3221
|
+
def test_optimize_result_attributes(self):
|
3222
|
+
def func(x):
|
3223
|
+
return x ** 2
|
3224
|
+
|
3225
|
+
# Note that `brute` solver does not return `OptimizeResult`
|
3226
|
+
results = [optimize.basinhopping(func, x0=1),
|
3227
|
+
optimize.differential_evolution(func, [(-4, 4)]),
|
3228
|
+
optimize.shgo(func, [(-4, 4)]),
|
3229
|
+
optimize.dual_annealing(func, [(-4, 4)]),
|
3230
|
+
optimize.direct(func, [(-4, 4)]),
|
3231
|
+
]
|
3232
|
+
|
3233
|
+
for result in results:
|
3234
|
+
assert isinstance(result, optimize.OptimizeResult)
|
3235
|
+
assert hasattr(result, "x")
|
3236
|
+
assert hasattr(result, "success")
|
3237
|
+
assert hasattr(result, "message")
|
3238
|
+
assert hasattr(result, "fun")
|
3239
|
+
assert hasattr(result, "nfev")
|
3240
|
+
assert hasattr(result, "nit")
|
3241
|
+
|
3242
|
+
|
3243
|
+
def test_approx_fprime():
|
3244
|
+
# check that approx_fprime (serviced by approx_derivative) works for
|
3245
|
+
# jac and hess
|
3246
|
+
g = optimize.approx_fprime(himmelblau_x0, himmelblau)
|
3247
|
+
assert_allclose(g, himmelblau_grad(himmelblau_x0), rtol=5e-6)
|
3248
|
+
|
3249
|
+
h = optimize.approx_fprime(himmelblau_x0, himmelblau_grad)
|
3250
|
+
assert_allclose(h, himmelblau_hess(himmelblau_x0), rtol=5e-6)
|
3251
|
+
|
3252
|
+
|
3253
|
+
def test_gh12594():
|
3254
|
+
# gh-12594 reported an error in `_linesearch_powell` and
|
3255
|
+
# `_line_for_search` when `Bounds` was passed lists instead of arrays.
|
3256
|
+
# Check that results are the same whether the inputs are lists or arrays.
|
3257
|
+
|
3258
|
+
def f(x):
|
3259
|
+
return x[0]**2 + (x[1] - 1)**2
|
3260
|
+
|
3261
|
+
bounds = Bounds(lb=[-10, -10], ub=[10, 10])
|
3262
|
+
res = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds)
|
3263
|
+
bounds = Bounds(lb=np.array([-10, -10]), ub=np.array([10, 10]))
|
3264
|
+
ref = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds)
|
3265
|
+
|
3266
|
+
assert_allclose(res.fun, ref.fun)
|
3267
|
+
assert_allclose(res.x, ref.x)
|
3268
|
+
|
3269
|
+
|
3270
|
+
@pytest.mark.parametrize('method', ['Newton-CG', 'trust-constr'])
|
3271
|
+
@pytest.mark.parametrize('sparse_type', [coo_matrix, csc_matrix, csr_matrix,
|
3272
|
+
coo_array, csr_array, csc_array])
|
3273
|
+
def test_sparse_hessian(method, sparse_type):
|
3274
|
+
# gh-8792 reported an error for minimization with `newton_cg` when `hess`
|
3275
|
+
# returns a sparse array. Check that results are the same whether `hess`
|
3276
|
+
# returns a dense or sparse array for optimization methods that accept
|
3277
|
+
# sparse Hessian matrices.
|
3278
|
+
|
3279
|
+
def sparse_rosen_hess(x):
|
3280
|
+
return sparse_type(rosen_hess(x))
|
3281
|
+
|
3282
|
+
x0 = [2., 2.]
|
3283
|
+
|
3284
|
+
res_sparse = optimize.minimize(rosen, x0, method=method,
|
3285
|
+
jac=rosen_der, hess=sparse_rosen_hess)
|
3286
|
+
res_dense = optimize.minimize(rosen, x0, method=method,
|
3287
|
+
jac=rosen_der, hess=rosen_hess)
|
3288
|
+
|
3289
|
+
assert_allclose(res_dense.fun, res_sparse.fun)
|
3290
|
+
assert_allclose(res_dense.x, res_sparse.x)
|
3291
|
+
assert res_dense.nfev == res_sparse.nfev
|
3292
|
+
assert res_dense.njev == res_sparse.njev
|
3293
|
+
assert res_dense.nhev == res_sparse.nhev
|
3294
|
+
|
3295
|
+
|
3296
|
+
@pytest.mark.parametrize('workers', [None, 2])
|
3297
|
+
@pytest.mark.parametrize(
|
3298
|
+
'method',
|
3299
|
+
['l-bfgs-b',
|
3300
|
+
'bfgs',
|
3301
|
+
'slsqp',
|
3302
|
+
'trust-constr',
|
3303
|
+
'Newton-CG',
|
3304
|
+
'CG',
|
3305
|
+
'tnc',
|
3306
|
+
'trust-ncg',
|
3307
|
+
'trust-krylov'])
|
3308
|
+
class TestWorkers:
|
3309
|
+
|
3310
|
+
def setup_method(self):
|
3311
|
+
self.x0 = np.array([1.0, 2.0, 3.0])
|
3312
|
+
|
3313
|
+
def test_smoke(self, workers, method):
|
3314
|
+
# checks parallelised optimization output is same as serial
|
3315
|
+
workers = workers or map
|
3316
|
+
|
3317
|
+
kwds = {'jac': None, 'hess': None}
|
3318
|
+
if method in ['Newton-CG', 'trust-ncg', 'trust-krylov']:
|
3319
|
+
# methods that require a callable jac
|
3320
|
+
kwds['jac'] = rosen_der
|
3321
|
+
kwds['hess'] = '2-point'
|
3322
|
+
|
3323
|
+
with MapWrapper(workers) as mf:
|
3324
|
+
res = optimize.minimize(
|
3325
|
+
rosen, self.x0, options={"workers":mf}, method=method, **kwds
|
3326
|
+
)
|
3327
|
+
res_default = optimize.minimize(
|
3328
|
+
rosen, self.x0, method=method, **kwds
|
3329
|
+
)
|
3330
|
+
assert_equal(res.x, res_default.x)
|
3331
|
+
assert_equal(res.nfev, res_default.nfev)
|
3332
|
+
|
3333
|
+
def test_equal_bounds(self, workers, method):
|
3334
|
+
workers = workers or map
|
3335
|
+
if method not in ['l-bfgs-b', 'slsqp', 'trust-constr', 'tnc']:
|
3336
|
+
pytest.skip(f"{method} cannot use bounds")
|
3337
|
+
|
3338
|
+
bounds = Bounds([0, 2.0, 0.], [10., 2.0, 10.])
|
3339
|
+
with MapWrapper(workers) as mf:
|
3340
|
+
res = optimize.minimize(
|
3341
|
+
rosen, self.x0, bounds=bounds, options={"workers": mf}, method=method
|
3342
|
+
)
|
3343
|
+
assert res.success
|
3344
|
+
assert_allclose(res.x[1], 2.0)
|