scipy 1.16.0rc1__cp313-cp313t-macosx_14_0_arm64.whl → 1.16.1__cp313-cp313t-macosx_14_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scipy/.dylibs/libgcc_s.1.1.dylib +0 -0
- scipy/.dylibs/libgfortran.5.dylib +0 -0
- scipy/.dylibs/libquadmath.0.dylib +0 -0
- scipy/__config__.py +5 -5
- scipy/_cyutility.cpython-313t-darwin.so +0 -0
- scipy/_lib/_ccallback_c.cpython-313t-darwin.so +0 -0
- scipy/_lib/_test_deprecation_call.cpython-313t-darwin.so +0 -0
- scipy/_lib/_util.py +7 -0
- scipy/_lib/messagestream.cpython-313t-darwin.so +0 -0
- scipy/cluster/_hierarchy.cpython-313t-darwin.so +0 -0
- scipy/cluster/_optimal_leaf_ordering.cpython-313t-darwin.so +0 -0
- scipy/cluster/_vq.cpython-313t-darwin.so +0 -0
- scipy/conftest.py +25 -0
- scipy/fft/_pocketfft/pypocketfft.cpython-313t-darwin.so +0 -0
- scipy/fftpack/convolve.cpython-313t-darwin.so +0 -0
- scipy/integrate/_dop.cpython-313t-darwin.so +0 -0
- scipy/integrate/_lsoda.cpython-313t-darwin.so +0 -0
- scipy/integrate/_odepack.cpython-313t-darwin.so +0 -0
- scipy/integrate/_test_odeint_banded.cpython-313t-darwin.so +0 -0
- scipy/integrate/_vode.cpython-313t-darwin.so +0 -0
- scipy/interpolate/_dfitpack.cpython-313t-darwin.so +0 -0
- scipy/interpolate/_fitpack.cpython-313t-darwin.so +0 -0
- scipy/interpolate/_interpnd.cpython-313t-darwin.so +0 -0
- scipy/interpolate/_ppoly.cpython-313t-darwin.so +0 -0
- scipy/interpolate/_rgi_cython.cpython-313t-darwin.so +0 -0
- scipy/io/_fast_matrix_market/_fmm_core.cpython-313t-darwin.so +0 -0
- scipy/io/_test_fortran.cpython-313t-darwin.so +0 -0
- scipy/io/matlab/_mio5_utils.cpython-313t-darwin.so +0 -0
- scipy/io/matlab/_mio_utils.cpython-313t-darwin.so +0 -0
- scipy/io/matlab/_streams.cpython-313t-darwin.so +0 -0
- scipy/io/matlab/tests/test_streams.py +9 -0
- scipy/linalg/_cythonized_array_utils.cpython-313t-darwin.so +0 -0
- scipy/linalg/_decomp_interpolative.cpython-313t-darwin.so +0 -0
- scipy/linalg/_decomp_lu_cython.cpython-313t-darwin.so +0 -0
- scipy/linalg/_decomp_update.cpython-313t-darwin.so +0 -0
- scipy/linalg/_fblas.cpython-313t-darwin.so +0 -0
- scipy/linalg/_flapack.cpython-313t-darwin.so +0 -0
- scipy/linalg/_matfuncs_schur_sqrtm.cpython-313t-darwin.so +0 -0
- scipy/linalg/_matfuncs_sqrtm_triu.cpython-313t-darwin.so +0 -0
- scipy/linalg/_solve_toeplitz.cpython-313t-darwin.so +0 -0
- scipy/linalg/blas.py +35 -24
- scipy/linalg/cython_blas.cpython-313t-darwin.so +0 -0
- scipy/linalg/cython_lapack.cpython-313t-darwin.so +0 -0
- scipy/linalg/tests/test_matfuncs.py +7 -0
- scipy/ndimage/_cytest.cpython-313t-darwin.so +0 -0
- scipy/ndimage/_filters.py +52 -23
- scipy/ndimage/_ni_label.cpython-313t-darwin.so +0 -0
- scipy/ndimage/_rank_filter_1d.cpython-313t-darwin.so +0 -0
- scipy/ndimage/tests/test_filters.py +94 -9
- scipy/odr/__odrpack.cpython-313t-darwin.so +0 -0
- scipy/optimize/_bglu_dense.cpython-313t-darwin.so +0 -0
- scipy/optimize/_highspy/_core.cpython-313t-darwin.so +0 -0
- scipy/optimize/_highspy/_highs_options.cpython-313t-darwin.so +0 -0
- scipy/optimize/_lbfgsb_py.py +23 -8
- scipy/optimize/_lsq/givens_elimination.cpython-313t-darwin.so +0 -0
- scipy/optimize/_lsq/least_squares.py +2 -2
- scipy/optimize/_minimize.py +0 -1
- scipy/optimize/_moduleTNC.cpython-313t-darwin.so +0 -0
- scipy/optimize/_pava_pybind.cpython-313t-darwin.so +0 -0
- scipy/optimize/_shgo_lib/_complex.py +2 -2
- scipy/optimize/_trlib/_trlib.cpython-313t-darwin.so +0 -0
- scipy/optimize/cython_optimize/_zeros.cpython-313t-darwin.so +0 -0
- scipy/optimize/tests/test_lsq_linear.py +1 -1
- scipy/optimize/tests/test_optimize.py +3 -3
- scipy/signal/_filter_design.py +13 -1
- scipy/signal/_fir_filter_design.py +1 -1
- scipy/signal/_peak_finding_utils.cpython-313t-darwin.so +0 -0
- scipy/signal/_polyutils.py +1 -1
- scipy/signal/_sosfilt.cpython-313t-darwin.so +0 -0
- scipy/signal/_spectral_py.py +9 -0
- scipy/signal/_upfirdn_apply.cpython-313t-darwin.so +0 -0
- scipy/signal/tests/test_filter_design.py +19 -0
- scipy/signal/tests/test_fir_filter_design.py +5 -0
- scipy/signal/tests/test_signaltools.py +1 -1
- scipy/signal/tests/test_spectral.py +31 -0
- scipy/sparse/_base.py +5 -2
- scipy/sparse/_csparsetools.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_flow.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_matching.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_min_spanning_tree.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_reordering.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_shortest_path.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_tools.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_traversal.cpython-313t-darwin.so +0 -0
- scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-313t-darwin.so +0 -0
- scipy/sparse/linalg/_propack/_cpropack.cpython-313t-darwin.so +0 -0
- scipy/sparse/linalg/_propack/_dpropack.cpython-313t-darwin.so +0 -0
- scipy/sparse/linalg/_propack/_spropack.cpython-313t-darwin.so +0 -0
- scipy/sparse/linalg/_propack/_zpropack.cpython-313t-darwin.so +0 -0
- scipy/sparse/tests/test_base.py +10 -0
- scipy/spatial/_ckdtree.cpython-313t-darwin.so +0 -0
- scipy/spatial/_distance_pybind.cpython-313t-darwin.so +0 -0
- scipy/spatial/_hausdorff.cpython-313t-darwin.so +0 -0
- scipy/spatial/_qhull.cpython-313t-darwin.so +0 -0
- scipy/spatial/_voronoi.cpython-313t-darwin.so +0 -0
- scipy/spatial/tests/test_distance.py +12 -0
- scipy/spatial/transform/_rigid_transform.cpython-313t-darwin.so +0 -0
- scipy/spatial/transform/_rotation.cpython-313t-darwin.so +0 -0
- scipy/special/_comb.cpython-313t-darwin.so +0 -0
- scipy/special/_ellip_harm_2.cpython-313t-darwin.so +0 -0
- scipy/special/_specfun.cpython-313t-darwin.so +0 -0
- scipy/special/_test_internal.cpython-313t-darwin.so +0 -0
- scipy/special/_ufuncs.cpython-313t-darwin.so +0 -0
- scipy/special/_ufuncs_cxx.cpython-313t-darwin.so +0 -0
- scipy/special/cython_special.cpython-313t-darwin.so +0 -0
- scipy/stats/_ansari_swilk_statistics.cpython-313t-darwin.so +0 -0
- scipy/stats/_axis_nan_policy.py +1 -9
- scipy/stats/_biasedurn.cpython-313t-darwin.so +0 -0
- scipy/stats/_continuous_distns.py +19 -16
- scipy/stats/_correlation.py +1 -1
- scipy/stats/_distribution_infrastructure.py +20 -0
- scipy/stats/_entropy.py +2 -2
- scipy/stats/_hypotests.py +1 -1
- scipy/stats/_levy_stable/levyst.cpython-313t-darwin.so +0 -0
- scipy/stats/_morestats.py +7 -7
- scipy/stats/_qmc_cy.cpython-313t-darwin.so +0 -0
- scipy/stats/_qmvnt_cy.cpython-313t-darwin.so +0 -0
- scipy/stats/_rcont/rcont.cpython-313t-darwin.so +0 -0
- scipy/stats/_resampling.py +1 -1
- scipy/stats/_sobol.cpython-313t-darwin.so +0 -0
- scipy/stats/_stats.cpython-313t-darwin.so +0 -0
- scipy/stats/_stats_mstats_common.py +4 -2
- scipy/stats/_stats_py.py +19 -19
- scipy/stats/_unuran/unuran_wrapper.cpython-313t-darwin.so +0 -0
- scipy/stats/_variation.py +1 -1
- scipy/stats/tests/test_distributions.py +13 -0
- scipy/stats/tests/test_fast_gen_inversion.py +2 -0
- scipy/stats/tests/test_morestats.py +4 -4
- scipy/version.py +2 -2
- {scipy-1.16.0rc1.dist-info → scipy-1.16.1.dist-info}/LICENSE.txt +3 -3
- {scipy-1.16.0rc1.dist-info → scipy-1.16.1.dist-info}/METADATA +5 -4
- {scipy-1.16.0rc1.dist-info → scipy-1.16.1.dist-info}/RECORD +133 -133
- {scipy-1.16.0rc1.dist-info → scipy-1.16.1.dist-info}/WHEEL +0 -0
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
scipy/stats/_axis_nan_policy.py
CHANGED
@@ -403,17 +403,9 @@ def _axis_nan_policy_factory(tuple_to_result, default_axis=0,
|
|
403
403
|
override.update(temp)
|
404
404
|
|
405
405
|
if result_to_tuple is None:
|
406
|
-
def result_to_tuple(res):
|
406
|
+
def result_to_tuple(res, _):
|
407
407
|
return res
|
408
408
|
|
409
|
-
# The only `result_to_tuple` that needs the second argument (number of
|
410
|
-
# outputs) is the one for `moment`, and this was realized very late.
|
411
|
-
# Rather than changing all `result_to_tuple` definitions, we wrap them
|
412
|
-
# here to accept a second argument if they don't already.
|
413
|
-
if len(inspect.signature(result_to_tuple).parameters) == 1:
|
414
|
-
def result_to_tuple(res, _, f=result_to_tuple):
|
415
|
-
return f(res)
|
416
|
-
|
417
409
|
if not callable(too_small):
|
418
410
|
def is_too_small(samples, *ts_args, axis=-1, **ts_kwargs):
|
419
411
|
for sample in samples:
|
Binary file
|
@@ -935,23 +935,26 @@ class beta_gen(rv_continuous):
|
|
935
935
|
log_term = sum_ab*np.log1p(a/b) + np.log(b) - 2*np.log(sum_ab)
|
936
936
|
return t1 + t2 + log_term
|
937
937
|
|
938
|
-
def
|
939
|
-
if v == 1.0:
|
940
|
-
return 1000
|
941
|
-
|
942
|
-
j = np.log10(v)
|
943
|
-
digits = int(j)
|
944
|
-
d = int(v / 10 ** digits) + 2
|
945
|
-
return d*10**(7 + j)
|
946
|
-
|
947
|
-
if a >= 4.96e6 and b >= 4.96e6:
|
948
|
-
return asymptotic_ab_large(a, b)
|
949
|
-
elif a <= 4.9e6 and b - a >= 1e6 and b >= threshold_large(a):
|
950
|
-
return asymptotic_b_large(a, b)
|
951
|
-
elif b <= 4.9e6 and a - b >= 1e6 and a >= threshold_large(b):
|
938
|
+
def asymptotic_a_large(a, b):
|
952
939
|
return asymptotic_b_large(b, a)
|
953
|
-
|
954
|
-
|
940
|
+
|
941
|
+
def threshold_large(v):
|
942
|
+
j = np.floor(np.log10(v))
|
943
|
+
d = np.floor(v / 10 ** j) + 2
|
944
|
+
return xpx.apply_where(v != 1.0, (d, j), lambda d_, j_: d_ * 10**(7 + j_),
|
945
|
+
fill_value=1000)
|
946
|
+
|
947
|
+
threshold_a = threshold_large(a)
|
948
|
+
threshold_b = threshold_large(b)
|
949
|
+
return _lazyselect([(a >= 4.96e6) & (b >= 4.96e6),
|
950
|
+
(a <= 4.9e6) & (b - a >= 1e6) & (b >= threshold_a),
|
951
|
+
(b <= 4.9e6) & (a - b >= 1e6) & (a >= threshold_b),
|
952
|
+
(a < 4.9e6) & (b < 4.9e6)
|
953
|
+
],
|
954
|
+
[asymptotic_ab_large, asymptotic_b_large,
|
955
|
+
asymptotic_a_large, regular],
|
956
|
+
[a, b]
|
957
|
+
)
|
955
958
|
|
956
959
|
|
957
960
|
beta = beta_gen(a=0.0, b=1.0, name='beta')
|
scipy/stats/_correlation.py
CHANGED
@@ -5100,6 +5100,26 @@ class Mixture(_ProbabilityDistribution):
|
|
5100
5100
|
.. [1] Mixture distribution, *Wikipedia*,
|
5101
5101
|
https://en.wikipedia.org/wiki/Mixture_distribution
|
5102
5102
|
|
5103
|
+
|
5104
|
+
Examples
|
5105
|
+
--------
|
5106
|
+
A mixture of normal distributions:
|
5107
|
+
|
5108
|
+
>>> import numpy as np
|
5109
|
+
>>> from scipy import stats
|
5110
|
+
>>> import matplotlib.pyplot as plt
|
5111
|
+
>>> X1 = stats.Normal(mu=-2, sigma=1)
|
5112
|
+
>>> X2 = stats.Normal(mu=2, sigma=1)
|
5113
|
+
>>> mixture = stats.Mixture([X1, X2], weights=[0.4, 0.6])
|
5114
|
+
>>> print(f'mean: {mixture.mean():.2f}, '
|
5115
|
+
... f'median: {mixture.median():.2f}, '
|
5116
|
+
... f'mode: {mixture.mode():.2f}')
|
5117
|
+
mean: 0.40, median: 1.04, mode: 2.00
|
5118
|
+
>>> x = np.linspace(-10, 10, 300)
|
5119
|
+
>>> plt.plot(x, mixture.pdf(x))
|
5120
|
+
>>> plt.title('PDF of normal distribution mixture')
|
5121
|
+
>>> plt.show()
|
5122
|
+
|
5103
5123
|
"""
|
5104
5124
|
# Todo:
|
5105
5125
|
# Add support for array shapes, weights
|
scipy/stats/_entropy.py
CHANGED
@@ -20,7 +20,7 @@ __all__ = ['entropy', 'differential_entropy']
|
|
20
20
|
2 if ("qk" in kwgs and kwgs["qk"] is not None)
|
21
21
|
else 1
|
22
22
|
),
|
23
|
-
n_outputs=1, result_to_tuple=lambda x: (x,), paired=True,
|
23
|
+
n_outputs=1, result_to_tuple=lambda x, _: (x,), paired=True,
|
24
24
|
too_small=-1 # entropy doesn't have too small inputs
|
25
25
|
)
|
26
26
|
def entropy(pk: np.typing.ArrayLike,
|
@@ -170,7 +170,7 @@ def _differential_entropy_is_too_small(samples, kwargs, axis=-1):
|
|
170
170
|
|
171
171
|
|
172
172
|
@_axis_nan_policy_factory(
|
173
|
-
lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,),
|
173
|
+
lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,),
|
174
174
|
too_small=_differential_entropy_is_too_small
|
175
175
|
)
|
176
176
|
def differential_entropy(
|
scipy/stats/_hypotests.py
CHANGED
Binary file
|
scipy/stats/_morestats.py
CHANGED
@@ -222,7 +222,7 @@ def mvsdist(data):
|
|
222
222
|
|
223
223
|
|
224
224
|
@_axis_nan_policy_factory(
|
225
|
-
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None
|
225
|
+
lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1, default_axis=None
|
226
226
|
)
|
227
227
|
def kstat(data, n=2, *, axis=None):
|
228
228
|
r"""
|
@@ -327,7 +327,7 @@ def kstat(data, n=2, *, axis=None):
|
|
327
327
|
|
328
328
|
|
329
329
|
@_axis_nan_policy_factory(
|
330
|
-
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None
|
330
|
+
lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1, default_axis=None
|
331
331
|
)
|
332
332
|
def kstatvar(data, n=2, *, axis=None):
|
333
333
|
r"""Return an unbiased estimator of the variance of the k-statistic.
|
@@ -984,7 +984,7 @@ def boxcox_llf(lmb, data, *, axis=0, keepdims=False, nan_policy='propagate'):
|
|
984
984
|
|
985
985
|
|
986
986
|
@_axis_nan_policy_factory(lambda x: x, n_outputs=1, default_axis=0,
|
987
|
-
result_to_tuple=lambda x: (x,))
|
987
|
+
result_to_tuple=lambda x, _: (x,))
|
988
988
|
def _boxcox_llf(data, axis=0, *, lmb):
|
989
989
|
xp = array_namespace(data)
|
990
990
|
lmb, data = xp_promote(lmb, data, force_floating=True, xp=xp)
|
@@ -3496,7 +3496,7 @@ def mood(x, y, axis=0, alternative="two-sided"):
|
|
3496
3496
|
WilcoxonResult = _make_tuple_bunch('WilcoxonResult', ['statistic', 'pvalue'])
|
3497
3497
|
|
3498
3498
|
|
3499
|
-
def wilcoxon_result_unpacker(res):
|
3499
|
+
def wilcoxon_result_unpacker(res, _):
|
3500
3500
|
if hasattr(res, 'zstatistic'):
|
3501
3501
|
return res.statistic, res.pvalue, res.zstatistic
|
3502
3502
|
else:
|
@@ -3993,7 +3993,7 @@ def _circfuncs_common(samples, period, xp=None):
|
|
3993
3993
|
|
3994
3994
|
@_axis_nan_policy_factory(
|
3995
3995
|
lambda x: x, n_outputs=1, default_axis=None,
|
3996
|
-
result_to_tuple=lambda x: (x,)
|
3996
|
+
result_to_tuple=lambda x, _: (x,)
|
3997
3997
|
)
|
3998
3998
|
def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
|
3999
3999
|
r"""Compute the circular mean of a sample of angle observations.
|
@@ -4086,7 +4086,7 @@ def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
|
|
4086
4086
|
|
4087
4087
|
@_axis_nan_policy_factory(
|
4088
4088
|
lambda x: x, n_outputs=1, default_axis=None,
|
4089
|
-
result_to_tuple=lambda x: (x,)
|
4089
|
+
result_to_tuple=lambda x, _: (x,)
|
4090
4090
|
)
|
4091
4091
|
def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
|
4092
4092
|
r"""Compute the circular variance of a sample of angle observations.
|
@@ -4180,7 +4180,7 @@ def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
|
|
4180
4180
|
|
4181
4181
|
@_axis_nan_policy_factory(
|
4182
4182
|
lambda x: x, n_outputs=1, default_axis=None,
|
4183
|
-
result_to_tuple=lambda x: (x,)
|
4183
|
+
result_to_tuple=lambda x, _: (x,)
|
4184
4184
|
)
|
4185
4185
|
def circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate', *,
|
4186
4186
|
normalize=False):
|
Binary file
|
Binary file
|
Binary file
|
scipy/stats/_resampling.py
CHANGED
@@ -942,7 +942,7 @@ def monte_carlo_test(data, rvs, statistic, *, vectorized=None,
|
|
942
942
|
for rvs_i, n_observations_i in zip(rvs, n_observations)]
|
943
943
|
null_distribution.append(statistic(*resamples, axis=-1))
|
944
944
|
null_distribution = xp.concat(null_distribution)
|
945
|
-
null_distribution = xp.reshape(null_distribution,
|
945
|
+
null_distribution = xp.reshape(null_distribution, (-1,) + (1,)*observed.ndim)
|
946
946
|
|
947
947
|
# relative tolerance for detecting numerically distinct but
|
948
948
|
# theoretically equal values in the null distribution
|
Binary file
|
Binary file
|
@@ -21,7 +21,8 @@ def _n_samples_optional_x(kwargs):
|
|
21
21
|
|
22
22
|
@_axis_nan_policy_factory(TheilslopesResult, default_axis=None, n_outputs=4,
|
23
23
|
n_samples=_n_samples_optional_x,
|
24
|
-
result_to_tuple=tuple, paired=True,
|
24
|
+
result_to_tuple=lambda x, _: tuple(x), paired=True,
|
25
|
+
too_small=1)
|
25
26
|
def theilslopes(y, x=None, alpha=0.95, method='separate'):
|
26
27
|
r"""
|
27
28
|
Computes the Theil-Sen estimator for a set of points (x, y).
|
@@ -204,7 +205,8 @@ def _find_repeats(arr):
|
|
204
205
|
|
205
206
|
@_axis_nan_policy_factory(SiegelslopesResult, default_axis=None, n_outputs=2,
|
206
207
|
n_samples=_n_samples_optional_x,
|
207
|
-
result_to_tuple=tuple, paired=True,
|
208
|
+
result_to_tuple=lambda x, _: tuple(x), paired=True,
|
209
|
+
too_small=1)
|
208
210
|
def siegelslopes(y, x=None, method="hierarchical"):
|
209
211
|
r"""
|
210
212
|
Computes the Siegel estimator for a set of points (x, y).
|
scipy/stats/_stats_py.py
CHANGED
@@ -160,7 +160,7 @@ def _pack_CorrelationResult(statistic, pvalue, correlation):
|
|
160
160
|
return res
|
161
161
|
|
162
162
|
|
163
|
-
def _unpack_CorrelationResult(res):
|
163
|
+
def _unpack_CorrelationResult(res, _):
|
164
164
|
return res.statistic, res.pvalue, res.correlation
|
165
165
|
|
166
166
|
|
@@ -168,7 +168,7 @@ def _unpack_CorrelationResult(res):
|
|
168
168
|
@xp_capabilities()
|
169
169
|
@_axis_nan_policy_factory(
|
170
170
|
lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
|
171
|
-
result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
|
171
|
+
result_to_tuple=lambda x, _: (x,), kwd_samples=['weights'])
|
172
172
|
def gmean(a, axis=0, dtype=None, weights=None):
|
173
173
|
r"""Compute the weighted geometric mean along the specified axis.
|
174
174
|
|
@@ -252,7 +252,7 @@ def gmean(a, axis=0, dtype=None, weights=None):
|
|
252
252
|
@xp_capabilities(jax_jit=False, allow_dask_compute=1)
|
253
253
|
@_axis_nan_policy_factory(
|
254
254
|
lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
|
255
|
-
result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
|
255
|
+
result_to_tuple=lambda x, _: (x,), kwd_samples=['weights'])
|
256
256
|
def hmean(a, axis=0, dtype=None, *, weights=None):
|
257
257
|
r"""Calculate the weighted harmonic mean along the specified axis.
|
258
258
|
|
@@ -353,7 +353,7 @@ def hmean(a, axis=0, dtype=None, *, weights=None):
|
|
353
353
|
@xp_capabilities(jax_jit=False, allow_dask_compute=1)
|
354
354
|
@_axis_nan_policy_factory(
|
355
355
|
lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
|
356
|
-
result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
|
356
|
+
result_to_tuple=lambda x, _: (x,), kwd_samples=['weights'])
|
357
357
|
def pmean(a, p, *, axis=0, dtype=None, weights=None):
|
358
358
|
r"""Calculate the weighted power mean along the specified axis.
|
359
359
|
|
@@ -634,7 +634,7 @@ def _put_val_to_limits(a, limits, inclusive, val=np.nan, xp=None):
|
|
634
634
|
@xp_capabilities()
|
635
635
|
@_axis_nan_policy_factory(
|
636
636
|
lambda x: x, n_outputs=1, default_axis=None,
|
637
|
-
result_to_tuple=lambda x: (x,)
|
637
|
+
result_to_tuple=lambda x, _: (x,)
|
638
638
|
)
|
639
639
|
def tmean(a, limits=None, inclusive=(True, True), axis=None):
|
640
640
|
"""Compute the trimmed mean.
|
@@ -689,7 +689,7 @@ def tmean(a, limits=None, inclusive=(True, True), axis=None):
|
|
689
689
|
|
690
690
|
@xp_capabilities()
|
691
691
|
@_axis_nan_policy_factory(
|
692
|
-
lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
|
692
|
+
lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
|
693
693
|
)
|
694
694
|
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
|
695
695
|
"""Compute the trimmed variance.
|
@@ -749,7 +749,7 @@ def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
|
|
749
749
|
|
750
750
|
@xp_capabilities()
|
751
751
|
@_axis_nan_policy_factory(
|
752
|
-
lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
|
752
|
+
lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
|
753
753
|
)
|
754
754
|
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
|
755
755
|
"""Compute the trimmed minimum.
|
@@ -813,7 +813,7 @@ def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
|
|
813
813
|
|
814
814
|
@xp_capabilities()
|
815
815
|
@_axis_nan_policy_factory(
|
816
|
-
lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
|
816
|
+
lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
|
817
817
|
)
|
818
818
|
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
|
819
819
|
"""Compute the trimmed maximum.
|
@@ -876,7 +876,7 @@ def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
|
|
876
876
|
|
877
877
|
@xp_capabilities()
|
878
878
|
@_axis_nan_policy_factory(
|
879
|
-
lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
|
879
|
+
lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
|
880
880
|
)
|
881
881
|
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
|
882
882
|
"""Compute the trimmed sample standard deviation.
|
@@ -929,7 +929,7 @@ def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
|
|
929
929
|
|
930
930
|
@xp_capabilities()
|
931
931
|
@_axis_nan_policy_factory(
|
932
|
-
lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
|
932
|
+
lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
|
933
933
|
)
|
934
934
|
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
|
935
935
|
"""Compute the trimmed standard error of the mean.
|
@@ -1265,7 +1265,7 @@ def _share_masks(*args, xp):
|
|
1265
1265
|
|
1266
1266
|
@xp_capabilities(jax_jit=False, allow_dask_compute=2)
|
1267
1267
|
@_axis_nan_policy_factory(
|
1268
|
-
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1
|
1268
|
+
lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1
|
1269
1269
|
)
|
1270
1270
|
# nan_policy handled by `_axis_nan_policy`, but needs to be left
|
1271
1271
|
# in signature to preserve use as a positional argument
|
@@ -1366,7 +1366,7 @@ def skew(a, axis=0, bias=True, nan_policy='propagate'):
|
|
1366
1366
|
|
1367
1367
|
@xp_capabilities(jax_jit=False, allow_dask_compute=2)
|
1368
1368
|
@_axis_nan_policy_factory(
|
1369
|
-
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1
|
1369
|
+
lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1
|
1370
1370
|
)
|
1371
1371
|
# nan_policy handled by `_axis_nan_policy`, but needs to be left
|
1372
1372
|
# in signature to preserve use as a positional argument
|
@@ -2602,7 +2602,7 @@ def obrientransform(*samples):
|
|
2602
2602
|
|
2603
2603
|
@xp_capabilities()
|
2604
2604
|
@_axis_nan_policy_factory(
|
2605
|
-
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, too_small=1
|
2605
|
+
lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1, too_small=1
|
2606
2606
|
)
|
2607
2607
|
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
|
2608
2608
|
"""Compute standard error of the mean.
|
@@ -3070,7 +3070,7 @@ _scale_conversions = {'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
|
|
3070
3070
|
|
3071
3071
|
|
3072
3072
|
@_axis_nan_policy_factory(
|
3073
|
-
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1,
|
3073
|
+
lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1,
|
3074
3074
|
default_axis=None, override={'nan_propagation': False}
|
3075
3075
|
)
|
3076
3076
|
def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate',
|
@@ -4068,7 +4068,7 @@ class AlexanderGovernResult:
|
|
4068
4068
|
|
4069
4069
|
@_axis_nan_policy_factory(
|
4070
4070
|
AlexanderGovernResult, n_samples=None,
|
4071
|
-
result_to_tuple=lambda x: (x.statistic, x.pvalue),
|
4071
|
+
result_to_tuple=lambda x, _: (x.statistic, x.pvalue),
|
4072
4072
|
too_small=1
|
4073
4073
|
)
|
4074
4074
|
def alexandergovern(*samples, nan_policy='propagate', axis=0):
|
@@ -6027,7 +6027,7 @@ def pack_TtestResult(statistic, pvalue, df, alternative, standard_error,
|
|
6027
6027
|
standard_error=standard_error, estimate=estimate)
|
6028
6028
|
|
6029
6029
|
|
6030
|
-
def unpack_TtestResult(res):
|
6030
|
+
def unpack_TtestResult(res, _):
|
6031
6031
|
return (res.statistic, res.pvalue, res.df, res._alternative,
|
6032
6032
|
res._standard_error, res._estimate)
|
6033
6033
|
|
@@ -7633,7 +7633,7 @@ def _tuple_to_KstestResult(statistic, pvalue,
|
|
7633
7633
|
statistic_sign=statistic_sign)
|
7634
7634
|
|
7635
7635
|
|
7636
|
-
def _KstestResult_to_tuple(res):
|
7636
|
+
def _KstestResult_to_tuple(res, _):
|
7637
7637
|
return *res, res.statistic_location, res.statistic_sign
|
7638
7638
|
|
7639
7639
|
|
@@ -10651,7 +10651,7 @@ def _pack_LinregressResult(slope, intercept, rvalue, pvalue, stderr, intercept_s
|
|
10651
10651
|
intercept_stderr=intercept_stderr)
|
10652
10652
|
|
10653
10653
|
|
10654
|
-
def _unpack_LinregressResult(res):
|
10654
|
+
def _unpack_LinregressResult(res, _):
|
10655
10655
|
return tuple(res) + (res.intercept_stderr,)
|
10656
10656
|
|
10657
10657
|
|
@@ -10975,7 +10975,7 @@ def _xp_mean(x, /, *, axis=None, weights=None, keepdims=False, nan_policy='propa
|
|
10975
10975
|
for i in axes:
|
10976
10976
|
final_shape[i] = 1
|
10977
10977
|
|
10978
|
-
res = xp.reshape(res, final_shape)
|
10978
|
+
res = xp.reshape(res, tuple(final_shape))
|
10979
10979
|
|
10980
10980
|
return res[()] if res.ndim == 0 else res
|
10981
10981
|
|
Binary file
|
scipy/stats/_variation.py
CHANGED
@@ -7,7 +7,7 @@ from ._axis_nan_policy import _axis_nan_policy_factory
|
|
7
7
|
|
8
8
|
|
9
9
|
@_axis_nan_policy_factory(
|
10
|
-
lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
|
10
|
+
lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
|
11
11
|
)
|
12
12
|
def variation(a, axis=0, nan_policy='propagate', ddof=0, *, keepdims=False):
|
13
13
|
"""
|
@@ -5003,6 +5003,19 @@ class TestBeta:
|
|
5003
5003
|
# return float(entropy)
|
5004
5004
|
assert_allclose(stats.beta(a, b).entropy(), ref, rtol=tol)
|
5005
5005
|
|
5006
|
+
def test_entropy_broadcasting(self):
|
5007
|
+
# gh-23127 reported that the entropy method of the beta
|
5008
|
+
# distribution did not broadcast correctly.
|
5009
|
+
Beta = stats.make_distribution(stats.beta)
|
5010
|
+
a = np.asarray([5e6, 100, 1e9, 10])
|
5011
|
+
b = np.asarray([5e6, 1e9, 100, 20])
|
5012
|
+
res = Beta(a=a, b=b).entropy()
|
5013
|
+
ref = np.asarray([Beta(a=a[0], b=b[0]).entropy(),
|
5014
|
+
Beta(a=a[1], b=b[1]).entropy(),
|
5015
|
+
Beta(a=a[2], b=b[2]).entropy(),
|
5016
|
+
Beta(a=a[3], b=b[3]).entropy()])
|
5017
|
+
assert_allclose(res, ref)
|
5018
|
+
|
5006
5019
|
|
5007
5020
|
class TestBetaPrime:
|
5008
5021
|
# the test values are used in test_cdf_gh_17631 / test_ppf_gh_17631
|
@@ -6,6 +6,7 @@ from numpy.testing import (assert_array_equal, assert_allclose,
|
|
6
6
|
from copy import deepcopy
|
7
7
|
from scipy.stats.sampling import FastGeneratorInversion
|
8
8
|
from scipy import stats
|
9
|
+
from scipy._lib._testutils import IS_MUSL
|
9
10
|
|
10
11
|
|
11
12
|
def test_bad_args():
|
@@ -142,6 +143,7 @@ def test_geninvgauss_uerror():
|
|
142
143
|
|
143
144
|
|
144
145
|
# TODO: add more distributions
|
146
|
+
@pytest.mark.skipif(IS_MUSL, reason="Hits RecursionError, see gh-23172")
|
145
147
|
@pytest.mark.fail_slow(5)
|
146
148
|
@pytest.mark.parametrize(("distname, args"), [("beta", (0.11, 0.11))])
|
147
149
|
def test_error_extreme_params(distname, args):
|
@@ -2053,13 +2053,13 @@ class TestBoxcox_llf:
|
|
2053
2053
|
def test_axis(self, xp):
|
2054
2054
|
data = xp.asarray([[100, 200], [300, 400]])
|
2055
2055
|
llf_axis_0 = stats.boxcox_llf(1, data, axis=0)
|
2056
|
-
llf_0 = xp.
|
2056
|
+
llf_0 = xp.stack([
|
2057
2057
|
stats.boxcox_llf(1, data[:, 0]),
|
2058
2058
|
stats.boxcox_llf(1, data[:, 1]),
|
2059
2059
|
])
|
2060
2060
|
xp_assert_close(llf_axis_0, llf_0)
|
2061
2061
|
llf_axis_1 = stats.boxcox_llf(1, data, axis=1)
|
2062
|
-
llf_1 = xp.
|
2062
|
+
llf_1 = xp.stack([
|
2063
2063
|
stats.boxcox_llf(1, data[0, :]),
|
2064
2064
|
stats.boxcox_llf(1, data[1, :]),
|
2065
2065
|
])
|
@@ -2732,11 +2732,11 @@ class TestCircFuncs:
|
|
2732
2732
|
|
2733
2733
|
res = circfunc(x, high=360, axis=1)
|
2734
2734
|
ref = [circfunc(x[i, :], high=360) for i in range(x.shape[0])]
|
2735
|
-
xp_assert_close(res, xp.
|
2735
|
+
xp_assert_close(res, xp.stack(ref))
|
2736
2736
|
|
2737
2737
|
res = circfunc(x, high=360, axis=0)
|
2738
2738
|
ref = [circfunc(x[:, i], high=360) for i in range(x.shape[1])]
|
2739
|
-
xp_assert_close(res, xp.
|
2739
|
+
xp_assert_close(res, xp.stack(ref))
|
2740
2740
|
|
2741
2741
|
@pytest.mark.parametrize("test_func,expected",
|
2742
2742
|
[(stats.circmean, 0.167690146),
|
scipy/version.py
CHANGED
@@ -2,10 +2,10 @@
|
|
2
2
|
"""
|
3
3
|
Module to expose more detailed version info for the installed `scipy`
|
4
4
|
"""
|
5
|
-
version = "1.16.
|
5
|
+
version = "1.16.1"
|
6
6
|
full_version = version
|
7
7
|
short_version = version.split('.dev')[0]
|
8
|
-
git_revision = "
|
8
|
+
git_revision = "0cf8e9541b1a2457992bf4ec2c0c669da373e497"
|
9
9
|
release = 'dev' not in version and '+' not in version
|
10
10
|
|
11
11
|
if not release:
|
@@ -39,7 +39,7 @@ Name: OpenBLAS
|
|
39
39
|
Files: scipy/.dylibs/libscipy_openblas*.so
|
40
40
|
Description: bundled as a dynamically linked library
|
41
41
|
Availability: https://github.com/OpenMathLib/OpenBLAS/
|
42
|
-
License: BSD-3-Clause
|
42
|
+
License: BSD-3-Clause
|
43
43
|
Copyright (c) 2011-2014, The OpenBLAS Project
|
44
44
|
All rights reserved.
|
45
45
|
|
@@ -75,7 +75,7 @@ Name: LAPACK
|
|
75
75
|
Files: scipy/.dylibs/libscipy_openblas*.so
|
76
76
|
Description: bundled in OpenBLAS
|
77
77
|
Availability: https://github.com/OpenMathLib/OpenBLAS/
|
78
|
-
License: BSD-3-Clause-
|
78
|
+
License: BSD-3-Clause-Open-MPI
|
79
79
|
Copyright (c) 1992-2013 The University of Tennessee and The University
|
80
80
|
of Tennessee Research Foundation. All rights
|
81
81
|
reserved.
|
@@ -130,7 +130,7 @@ Name: GCC runtime library
|
|
130
130
|
Files: scipy/.dylibs/libgfortran*, scipy/.dylibs/libgcc*
|
131
131
|
Description: dynamically linked to files compiled with gcc
|
132
132
|
Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
|
133
|
-
License: GPL-3.0-
|
133
|
+
License: GPL-3.0-or-later WITH GCC-exception-3.1
|
134
134
|
Copyright (C) 2002-2017 Free Software Foundation, Inc.
|
135
135
|
|
136
136
|
Libgfortran is free software; you can redistribute it and/or modify
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: scipy
|
3
|
-
Version: 1.16.
|
3
|
+
Version: 1.16.1
|
4
4
|
Summary: Fundamental algorithms for scientific computing in Python
|
5
5
|
Maintainer-Email: SciPy Developers <scipy-dev@python.org>
|
6
6
|
License: Copyright (c) 2001-2002 Enthought, Inc. 2003, SciPy Developers.
|
@@ -44,7 +44,7 @@ License: Copyright (c) 2001-2002 Enthought, Inc. 2003, SciPy Developers.
|
|
44
44
|
Files: scipy/.dylibs/libscipy_openblas*.so
|
45
45
|
Description: bundled as a dynamically linked library
|
46
46
|
Availability: https://github.com/OpenMathLib/OpenBLAS/
|
47
|
-
License: BSD-3-Clause
|
47
|
+
License: BSD-3-Clause
|
48
48
|
Copyright (c) 2011-2014, The OpenBLAS Project
|
49
49
|
All rights reserved.
|
50
50
|
|
@@ -80,7 +80,7 @@ License: Copyright (c) 2001-2002 Enthought, Inc. 2003, SciPy Developers.
|
|
80
80
|
Files: scipy/.dylibs/libscipy_openblas*.so
|
81
81
|
Description: bundled in OpenBLAS
|
82
82
|
Availability: https://github.com/OpenMathLib/OpenBLAS/
|
83
|
-
License: BSD-3-Clause-
|
83
|
+
License: BSD-3-Clause-Open-MPI
|
84
84
|
Copyright (c) 1992-2013 The University of Tennessee and The University
|
85
85
|
of Tennessee Research Foundation. All rights
|
86
86
|
reserved.
|
@@ -135,7 +135,7 @@ License: Copyright (c) 2001-2002 Enthought, Inc. 2003, SciPy Developers.
|
|
135
135
|
Files: scipy/.dylibs/libgfortran*, scipy/.dylibs/libgcc*
|
136
136
|
Description: dynamically linked to files compiled with gcc
|
137
137
|
Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
|
138
|
-
License: GPL-3.0-
|
138
|
+
License: GPL-3.0-or-later WITH GCC-exception-3.1
|
139
139
|
Copyright (C) 2002-2017 Free Software Foundation, Inc.
|
140
140
|
|
141
141
|
Libgfortran is free software; you can redistribute it and/or modify
|
@@ -948,6 +948,7 @@ Classifier: Programming Language :: Python :: 3
|
|
948
948
|
Classifier: Programming Language :: Python :: 3.11
|
949
949
|
Classifier: Programming Language :: Python :: 3.12
|
950
950
|
Classifier: Programming Language :: Python :: 3.13
|
951
|
+
Classifier: Programming Language :: Python :: 3.14
|
951
952
|
Classifier: Topic :: Software Development :: Libraries
|
952
953
|
Classifier: Topic :: Scientific/Engineering
|
953
954
|
Classifier: Operating System :: Microsoft :: Windows
|