scipy 1.16.0rc1__cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl → 1.16.0rc2__cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
scipy/ndimage/_filters.py CHANGED
@@ -68,12 +68,23 @@ def _vectorized_filter_iv(input, function, size, footprint, output, mode, cval,
68
68
  if size is not None and footprint is not None:
69
69
  raise ValueError("Either `size` or `footprint` may be provided, not both.")
70
70
 
71
- # Either footprint or size must be provided, and these determine the core
72
- # dimensionality...
71
+ if axes is None:
72
+ axes = tuple(range(-input.ndim, 0))
73
+ elif np.isscalar(axes):
74
+ axes = (axes,)
75
+ n_axes = len(axes)
76
+ n_batch = input.ndim - n_axes
77
+
78
+ if n_axes > input.ndim:
79
+ message = ("The length of `axes` may not exceed the dimensionality of `input`"
80
+ "(`input.ndim`).")
81
+ raise ValueError(message)
82
+
83
+ # Either footprint or size must be provided
73
84
  footprinted_function = function
74
85
  if size is not None:
75
86
  # If provided, size must be an integer or tuple of integers.
76
- size = (size,)*input.ndim if np.isscalar(size) else tuple(size)
87
+ size = (size,)*n_axes if np.isscalar(size) else tuple(size)
77
88
  valid = [xp.isdtype(xp.asarray(i).dtype, 'integral') and i > 0 for i in size]
78
89
  if not all(valid):
79
90
  raise ValueError("All elements of `size` must be positive integers.")
@@ -84,13 +95,10 @@ def _vectorized_filter_iv(input, function, size, footprint, output, mode, cval,
84
95
  def footprinted_function(input, *args, axis=-1, **kwargs):
85
96
  return function(input[..., footprint], *args, axis=-1, **kwargs)
86
97
 
87
- n_axes = len(size)
88
- n_batch = input.ndim - n_axes
89
-
90
- # ...which can't exceed the dimensionality of `input`.
91
- if n_axes > input.ndim:
92
- message = ("The dimensionality of the window (`len(size)` or `footprint.ndim`) "
93
- "may not exceed the number of axes of `input` (`input.ndim`).")
98
+ # And by now, the dimensionality of the footprint must equal the number of axes
99
+ if n_axes != len(size):
100
+ message = ("`axes` must be compatible with the dimensionality "
101
+ "of the window specified by `size` or `footprint`.")
94
102
  raise ValueError(message)
95
103
 
96
104
  # If this is not *equal* to the dimensionality of `input`, then `axes`
@@ -101,9 +109,10 @@ def _vectorized_filter_iv(input, function, size, footprint, output, mode, cval,
101
109
  "(`len(size)` or `footprint.ndim`) does not equal the number "
102
110
  "of axes of `input` (`input.ndim`).")
103
111
  raise ValueError(message)
104
- axes = (axes,) if np.isscalar(axes) else axes
105
112
  else:
106
- axes = tuple(range(-n_axes, 0))
113
+ axes = tuple(range(-n_axes, 0)) if axes is None else axes
114
+
115
+ axes = (axes,) if np.isscalar(axes) else axes
107
116
 
108
117
  # If `origin` is provided, then it must be "broadcastable" to a tuple with length
109
118
  # equal to the core dimensionality.
@@ -150,10 +159,9 @@ def _vectorized_filter_iv(input, function, size, footprint, output, mode, cval,
150
159
 
151
160
  # For simplicity, work with `axes` at the end.
152
161
  working_axes = tuple(range(-n_axes, 0))
153
- if axes is not None:
154
- input = xp.moveaxis(input, axes, working_axes)
155
- output = (xp.moveaxis(output, axes, working_axes)
156
- if output is not None else output)
162
+ input = xp.moveaxis(input, axes, working_axes)
163
+ output = (xp.moveaxis(output, axes, working_axes)
164
+ if output is not None else output)
157
165
 
158
166
  # Wrap the function to limit maximum memory usage, deal with `footprint`,
159
167
  # and populate `output`. The latter requires some verbosity because we
@@ -190,8 +198,8 @@ def _vectorized_filter_iv(input, function, size, footprint, output, mode, cval,
190
198
  **kwargs)
191
199
  return output
192
200
 
193
- return (input, wrapped_function, size, mode, cval,
194
- origin, working_axes, n_axes, n_batch, xp)
201
+ return (input, wrapped_function, size, mode, cval, origin,
202
+ working_axes, axes, n_axes, n_batch, xp)
195
203
 
196
204
 
197
205
  @_ni_docstrings.docfiller
@@ -211,7 +219,19 @@ def vectorized_filter(input, function, *, size=None, footprint=None, output=None
211
219
 
212
220
  where ``axis`` specifies the axis (or axes) of ``window`` along which
213
221
  the filter function is evaluated.
214
- %(size_foot)s
222
+ size : scalar or tuple, optional
223
+ See `footprint` below. Ignored if `footprint` is given.
224
+ footprint : array, optional
225
+ Either `size` or `footprint` must be defined. `size` gives
226
+ the shape that is taken from the input array, at every element
227
+ position, to define the input to the filter function.
228
+ `footprint` is a boolean array that specifies (implicitly) a
229
+ shape, but also which of the elements within this shape will get
230
+ passed to the filter function. Thus ``size=(n, m)`` is equivalent
231
+ to ``footprint=np.ones((n, m))``.
232
+ We adjust `size` to the number of dimensions indicated by `axes`.
233
+ For instance, if `axes` is ``(0, 2, 1)`` and ``n`` is passed for ``size``,
234
+ then the effective `size` is ``(n, n, n)``.
215
235
  %(output)s
216
236
  mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
217
237
  The `mode` parameter determines how the input array is extended
@@ -409,7 +429,7 @@ def vectorized_filter(input, function, *, size=None, footprint=None, output=None
409
429
 
410
430
  """ # noqa: E501
411
431
 
412
- (input, function, size, mode, cval, origin, working_axes, n_axes, n_batch, xp
432
+ (input, function, size, mode, cval, origin, working_axes, axes, n_axes, n_batch, xp
413
433
  ) = _vectorized_filter_iv(input, function, size, footprint, output, mode, cval,
414
434
  origin, axes, batch_memory)
415
435
 
@@ -455,7 +475,7 @@ def vectorized_filter(input, function, *, size=None, footprint=None, output=None
455
475
  res = function(view)
456
476
 
457
477
  # move working_axes back to original positions
458
- return xp.moveaxis(res, working_axes, axes) if axes is not None else res
478
+ return xp.moveaxis(res, working_axes, axes)
459
479
 
460
480
 
461
481
  def _invalid_origin(origin, lenw):
@@ -2571,7 +2571,7 @@ class TestThreading:
2571
2571
  for i in range(n):
2572
2572
  fun(*args, output=out[i, ...])
2573
2573
 
2574
- @xfail_xp_backends("cupy",
2574
+ @xfail_xp_backends("cupy",
2575
2575
  reason="XXX thread exception; cannot repro outside of pytest")
2576
2576
  def test_correlate1d(self, xp):
2577
2577
  d = np.random.randn(5000)
@@ -2585,7 +2585,7 @@ class TestThreading:
2585
2585
  self.check_func_thread(4, ndimage.correlate1d, (d, k), ot)
2586
2586
  xp_assert_equal(os, ot)
2587
2587
 
2588
- @xfail_xp_backends("cupy",
2588
+ @xfail_xp_backends("cupy",
2589
2589
  reason="XXX thread exception; cannot repro outside of pytest")
2590
2590
  def test_correlate(self, xp):
2591
2591
  d = xp.asarray(np.random.randn(500, 500))
@@ -2596,7 +2596,7 @@ class TestThreading:
2596
2596
  self.check_func_thread(4, ndimage.correlate, (d, k), ot)
2597
2597
  xp_assert_equal(os, ot)
2598
2598
 
2599
- @xfail_xp_backends("cupy",
2599
+ @xfail_xp_backends("cupy",
2600
2600
  reason="XXX thread exception; cannot repro outside of pytest")
2601
2601
  def test_median_filter(self, xp):
2602
2602
  d = xp.asarray(np.random.randn(500, 500))
@@ -2606,7 +2606,7 @@ class TestThreading:
2606
2606
  self.check_func_thread(4, ndimage.median_filter, (d, 3), ot)
2607
2607
  xp_assert_equal(os, ot)
2608
2608
 
2609
- @xfail_xp_backends("cupy",
2609
+ @xfail_xp_backends("cupy",
2610
2610
  reason="XXX thread exception; cannot repro outside of pytest")
2611
2611
  def test_uniform_filter1d(self, xp):
2612
2612
  d = np.random.randn(5000)
@@ -2619,7 +2619,7 @@ class TestThreading:
2619
2619
  self.check_func_thread(4, ndimage.uniform_filter1d, (d, 5), ot)
2620
2620
  xp_assert_equal(os, ot)
2621
2621
 
2622
- @xfail_xp_backends("cupy",
2622
+ @xfail_xp_backends("cupy",
2623
2623
  reason="XXX thread exception; cannot repro outside of pytest")
2624
2624
  def test_minmax_filter(self, xp):
2625
2625
  d = xp.asarray(np.random.randn(500, 500))
@@ -2908,15 +2908,19 @@ class TestVectorizedFilter:
2908
2908
  with pytest.raises(ValueError, match=message):
2909
2909
  ndimage.vectorized_filter(input, function, size=0)
2910
2910
 
2911
- message = "The dimensionality of the window"
2911
+ message = "The length of `axes` may not exceed "
2912
+ axes = (0, 1, 2)
2912
2913
  with pytest.raises(ValueError, match=message):
2913
- ndimage.vectorized_filter(input, function, size=(1, 2, 3))
2914
+ ndimage.vectorized_filter(input, function, size=(1, 2), axes=axes)
2914
2915
  with pytest.raises(ValueError, match=message):
2915
- ndimage.vectorized_filter(input, function, footprint=xp.ones((2, 2, 2)))
2916
+ ndimage.vectorized_filter(input, function, footprint=xp.ones((2, 2)),
2917
+ axes=axes)
2916
2918
 
2917
- message = "`axes` must be provided if the dimensionality..."
2919
+ message = "`axes` must be compatible with the dimensionality..."
2918
2920
  with pytest.raises(ValueError, match=message):
2919
2921
  ndimage.vectorized_filter(input, function, size=(1,))
2922
+ with pytest.raises(ValueError, match=message):
2923
+ ndimage.vectorized_filter(input, function, size=(2,), axes=(0,1))
2920
2924
 
2921
2925
  message = "All elements of `origin` must be integers"
2922
2926
  with pytest.raises(ValueError, match=message):
@@ -2986,6 +2990,35 @@ class TestVectorizedFilter:
2986
2990
  ref = ndimage.vectorized_filter(input, function, size=21)
2987
2991
  xp_assert_close(res, ref)
2988
2992
 
2993
+ def test_gh23046_feature(self, xp):
2994
+ # The intent of gh-23046 was to always allow `size` to be a scalar.
2995
+ rng = np.random.default_rng(45982734597824)
2996
+ img = xp.asarray(rng.random((5, 5)))
2997
+
2998
+ ref = ndimage.vectorized_filter(img, xp.mean, size=2)
2999
+ res = ndimage.vectorized_filter(img, xp.mean, size=2, axes=(0, 1))
3000
+ xp_assert_close(res, ref)
3001
+
3002
+ ref = ndimage.vectorized_filter(img, xp.mean, size=(2,), axes=(0,))
3003
+ res = ndimage.vectorized_filter(img, xp.mean, size=2, axes=0)
3004
+ xp_assert_close(res, ref)
3005
+
3006
+ def test_gh23046_fix(self, xp):
3007
+ # While investigating the feasibility of gh-23046, I noticed a bug when the
3008
+ # length of an `axes` tuple equals the dimensionality of the image.
3009
+ rng = np.random.default_rng(45982734597824)
3010
+ img = xp.asarray(rng.random((5, 5)))
3011
+ size = (2, 3)
3012
+ ref = ndimage.vectorized_filter(img.T, xp.mean, size=size).T
3013
+ res = ndimage.vectorized_filter(img, xp.mean, size=size, axes=(1, 0))
3014
+ xp_assert_close(res, ref)
3015
+
3016
+ ref = ndimage.vectorized_filter(img, xp.mean, size=size, mode='constant')
3017
+ res = ndimage.vectorized_filter(img, xp.mean, size=size[::-1], axes=(1, 0),
3018
+ mode='constant')
3019
+ xp_assert_close(res, ref)
3020
+
3021
+
2989
3022
 
2990
3023
  @given(x=npst.arrays(dtype=np.float64,
2991
3024
  shape=st.integers(min_value=1, max_value=1000)),
@@ -239,7 +239,7 @@ class SparseMixin:
239
239
 
240
240
  # Default lsmr arguments should not fully converge the solution
241
241
  default_lsmr_sol = lsq_linear(A, b, lsq_solver='lsmr')
242
- with pytest.raises(AssertionError, match=""):
242
+ with pytest.raises(AssertionError):
243
243
  assert_allclose(exact_sol.x, default_lsmr_sol.x)
244
244
 
245
245
  # By increasing the maximum lsmr iters, it will converge
@@ -897,6 +897,15 @@ def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
897
897
  if np.iscomplexobj(x) and return_onesided:
898
898
  return_onesided = False
899
899
 
900
+ if x.shape[axis] < y.shape[axis]: # zero-pad x to shape of y:
901
+ z_shape = list(y.shape)
902
+ z_shape[axis] = y.shape[axis] - x.shape[axis]
903
+ x = np.concatenate((x, np.zeros(z_shape)), axis=axis)
904
+ elif y.shape[axis] < x.shape[axis]: # zero-pad y to shape of x:
905
+ z_shape = list(x.shape)
906
+ z_shape[axis] = x.shape[axis] - y.shape[axis]
907
+ y = np.concatenate((y, np.zeros(z_shape)), axis=axis)
908
+
900
909
  # using cast() to make mypy happy:
901
910
  fft_mode = cast(FFT_MODE_TYPE, 'onesided' if return_onesided else 'twosided')
902
911
  if scaling not in (scales := {'spectrum': 'magnitude', 'density': 'psd'}):
@@ -8,6 +8,7 @@ import pytest
8
8
  from pytest import raises as assert_raises
9
9
 
10
10
  from scipy import signal
11
+ from scipy._lib._array_api import xp_assert_close
11
12
  from scipy.fft import fftfreq, rfftfreq, fft, irfft
12
13
  from scipy.integrate import trapezoid
13
14
  from scipy.signal import (periodogram, welch, lombscargle, coherence, csd,
@@ -591,6 +592,36 @@ class TestCSD:
591
592
  assert_allclose(f, f1)
592
593
  assert_allclose(c, c1)
593
594
 
595
+ def test_unequal_length_input_1D(self):
596
+ """Test zero-padding for input `x.shape[axis] != y.shape[axis]` for 1d arrays.
597
+
598
+ This test ensures that issue 23036 is fixed.
599
+ """
600
+ x = np.tile([4, 0, -4, 0], 4)
601
+
602
+ kw = dict(fs=len(x), window='boxcar', nperseg=4)
603
+ X0 = signal.csd(x, np.copy(x), **kw)[1] # `x is x` must be False
604
+ X1 = signal.csd(x, x[:8], **kw)[1]
605
+ X2 = signal.csd(x[:8], x, **kw)[1]
606
+ xp_assert_close(X1, X0 / 2)
607
+ xp_assert_close(X2, X0 / 2)
608
+
609
+ def test_unequal_length_input_3D(self):
610
+ """Test zero-padding for input `x.shape[axis] != y.shape[axis]` for 3d arrays.
611
+
612
+ This test ensures that issue 23036 is fixed.
613
+ """
614
+ n = 8
615
+ x = np.zeros(2 * 3 * n).reshape(2, n, 3)
616
+ x[:, 0, :] = n
617
+
618
+ kw = dict(fs=n, window='boxcar', nperseg=n, detrend=None, axis=1)
619
+ X0 = signal.csd(x, x.copy(), **kw)[1] # `x is x` must be False
620
+ X1 = signal.csd(x, x[:, :2, :], **kw)[1]
621
+ X2 = signal.csd(x[:, :2, :], x, **kw)[1]
622
+ xp_assert_close(X1, X0)
623
+ xp_assert_close(X2, X0)
624
+
594
625
  def test_real_onesided_even(self):
595
626
  x = np.zeros(16)
596
627
  x[0] = 1
scipy/sparse/_base.py CHANGED
@@ -658,7 +658,7 @@ class _spbase(SparseABC):
658
658
  # eq and ne return True or False instead of an array when the shapes
659
659
  # don't match. Numpy doesn't do this. Is this what we want?
660
660
  if op in (operator.eq, operator.ne):
661
- return op == operator.eq
661
+ return op is operator.ne
662
662
  raise ValueError("inconsistent shape")
663
663
 
664
664
  csr_self = (self if self.ndim < 3 else self.reshape(1, -1)).tocsr()
@@ -433,6 +433,13 @@ class _TestCommon:
433
433
  for dtype in self.checked_dtypes:
434
434
  check(dtype)
435
435
 
436
+ def test_eq_ne_different_shapes(self):
437
+ if self.datsp.format not in ['bsr', 'csc', 'csr']:
438
+ pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
439
+ # Is this what we want? numpy raises when shape differs. we return False.
440
+ assert (self.datsp == self.datsp.T) is False
441
+ assert (self.datsp != self.datsp.T) is True
442
+
436
443
  def test_lt(self):
437
444
  sup = suppress_warnings()
438
445
  sup.filter(SparseEfficiencyWarning)
@@ -2231,6 +2231,18 @@ def test_immutable_input(metric):
2231
2231
  getattr(scipy.spatial.distance, metric)(x, x, w=x)
2232
2232
 
2233
2233
 
2234
+ def test_gh_23109():
2235
+ a = np.array([0, 0, 1, 1])
2236
+ b = np.array([0, 1, 1, 0])
2237
+ w = np.asarray([1.5, 1.2, 0.7, 1.3])
2238
+ expected = yule(a, b, w=w)
2239
+ assert_allclose(expected, 1.1954022988505748)
2240
+ actual = cdist(np.atleast_2d(a),
2241
+ np.atleast_2d(b),
2242
+ metric='yule', w=w)
2243
+ assert_allclose(actual, expected)
2244
+
2245
+
2234
2246
  class TestJaccard:
2235
2247
 
2236
2248
  def test_pdist_jaccard_random(self):
@@ -403,17 +403,9 @@ def _axis_nan_policy_factory(tuple_to_result, default_axis=0,
403
403
  override.update(temp)
404
404
 
405
405
  if result_to_tuple is None:
406
- def result_to_tuple(res):
406
+ def result_to_tuple(res, _):
407
407
  return res
408
408
 
409
- # The only `result_to_tuple` that needs the second argument (number of
410
- # outputs) is the one for `moment`, and this was realized very late.
411
- # Rather than changing all `result_to_tuple` definitions, we wrap them
412
- # here to accept a second argument if they don't already.
413
- if len(inspect.signature(result_to_tuple).parameters) == 1:
414
- def result_to_tuple(res, _, f=result_to_tuple):
415
- return f(res)
416
-
417
409
  if not callable(too_small):
418
410
  def is_too_small(samples, *ts_args, axis=-1, **ts_kwargs):
419
411
  for sample in samples:
@@ -81,7 +81,7 @@ def _chatterjeexi_iv(y_continuous, method):
81
81
  return y_continuous, method
82
82
 
83
83
 
84
- def _unpack(res):
84
+ def _unpack(res, _):
85
85
  return res.statistic, res.pvalue
86
86
 
87
87
 
scipy/stats/_entropy.py CHANGED
@@ -20,7 +20,7 @@ __all__ = ['entropy', 'differential_entropy']
20
20
  2 if ("qk" in kwgs and kwgs["qk"] is not None)
21
21
  else 1
22
22
  ),
23
- n_outputs=1, result_to_tuple=lambda x: (x,), paired=True,
23
+ n_outputs=1, result_to_tuple=lambda x, _: (x,), paired=True,
24
24
  too_small=-1 # entropy doesn't have too small inputs
25
25
  )
26
26
  def entropy(pk: np.typing.ArrayLike,
@@ -170,7 +170,7 @@ def _differential_entropy_is_too_small(samples, kwargs, axis=-1):
170
170
 
171
171
 
172
172
  @_axis_nan_policy_factory(
173
- lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,),
173
+ lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,),
174
174
  too_small=_differential_entropy_is_too_small
175
175
  )
176
176
  def differential_entropy(
scipy/stats/_hypotests.py CHANGED
@@ -482,7 +482,7 @@ def _cdf_cvm(x, n=None):
482
482
  return y
483
483
 
484
484
 
485
- def _cvm_result_to_tuple(res):
485
+ def _cvm_result_to_tuple(res, _):
486
486
  return res.statistic, res.pvalue
487
487
 
488
488
 
scipy/stats/_morestats.py CHANGED
@@ -222,7 +222,7 @@ def mvsdist(data):
222
222
 
223
223
 
224
224
  @_axis_nan_policy_factory(
225
- lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None
225
+ lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1, default_axis=None
226
226
  )
227
227
  def kstat(data, n=2, *, axis=None):
228
228
  r"""
@@ -327,7 +327,7 @@ def kstat(data, n=2, *, axis=None):
327
327
 
328
328
 
329
329
  @_axis_nan_policy_factory(
330
- lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None
330
+ lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1, default_axis=None
331
331
  )
332
332
  def kstatvar(data, n=2, *, axis=None):
333
333
  r"""Return an unbiased estimator of the variance of the k-statistic.
@@ -984,7 +984,7 @@ def boxcox_llf(lmb, data, *, axis=0, keepdims=False, nan_policy='propagate'):
984
984
 
985
985
 
986
986
  @_axis_nan_policy_factory(lambda x: x, n_outputs=1, default_axis=0,
987
- result_to_tuple=lambda x: (x,))
987
+ result_to_tuple=lambda x, _: (x,))
988
988
  def _boxcox_llf(data, axis=0, *, lmb):
989
989
  xp = array_namespace(data)
990
990
  lmb, data = xp_promote(lmb, data, force_floating=True, xp=xp)
@@ -3496,7 +3496,7 @@ def mood(x, y, axis=0, alternative="two-sided"):
3496
3496
  WilcoxonResult = _make_tuple_bunch('WilcoxonResult', ['statistic', 'pvalue'])
3497
3497
 
3498
3498
 
3499
- def wilcoxon_result_unpacker(res):
3499
+ def wilcoxon_result_unpacker(res, _):
3500
3500
  if hasattr(res, 'zstatistic'):
3501
3501
  return res.statistic, res.pvalue, res.zstatistic
3502
3502
  else:
@@ -3993,7 +3993,7 @@ def _circfuncs_common(samples, period, xp=None):
3993
3993
 
3994
3994
  @_axis_nan_policy_factory(
3995
3995
  lambda x: x, n_outputs=1, default_axis=None,
3996
- result_to_tuple=lambda x: (x,)
3996
+ result_to_tuple=lambda x, _: (x,)
3997
3997
  )
3998
3998
  def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
3999
3999
  r"""Compute the circular mean of a sample of angle observations.
@@ -4086,7 +4086,7 @@ def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
4086
4086
 
4087
4087
  @_axis_nan_policy_factory(
4088
4088
  lambda x: x, n_outputs=1, default_axis=None,
4089
- result_to_tuple=lambda x: (x,)
4089
+ result_to_tuple=lambda x, _: (x,)
4090
4090
  )
4091
4091
  def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
4092
4092
  r"""Compute the circular variance of a sample of angle observations.
@@ -4180,7 +4180,7 @@ def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
4180
4180
 
4181
4181
  @_axis_nan_policy_factory(
4182
4182
  lambda x: x, n_outputs=1, default_axis=None,
4183
- result_to_tuple=lambda x: (x,)
4183
+ result_to_tuple=lambda x, _: (x,)
4184
4184
  )
4185
4185
  def circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate', *,
4186
4186
  normalize=False):
@@ -21,7 +21,8 @@ def _n_samples_optional_x(kwargs):
21
21
 
22
22
  @_axis_nan_policy_factory(TheilslopesResult, default_axis=None, n_outputs=4,
23
23
  n_samples=_n_samples_optional_x,
24
- result_to_tuple=tuple, paired=True, too_small=1)
24
+ result_to_tuple=lambda x, _: tuple(x), paired=True,
25
+ too_small=1)
25
26
  def theilslopes(y, x=None, alpha=0.95, method='separate'):
26
27
  r"""
27
28
  Computes the Theil-Sen estimator for a set of points (x, y).
@@ -204,7 +205,8 @@ def _find_repeats(arr):
204
205
 
205
206
  @_axis_nan_policy_factory(SiegelslopesResult, default_axis=None, n_outputs=2,
206
207
  n_samples=_n_samples_optional_x,
207
- result_to_tuple=tuple, paired=True, too_small=1)
208
+ result_to_tuple=lambda x, _: tuple(x), paired=True,
209
+ too_small=1)
208
210
  def siegelslopes(y, x=None, method="hierarchical"):
209
211
  r"""
210
212
  Computes the Siegel estimator for a set of points (x, y).
scipy/stats/_stats_py.py CHANGED
@@ -160,7 +160,7 @@ def _pack_CorrelationResult(statistic, pvalue, correlation):
160
160
  return res
161
161
 
162
162
 
163
- def _unpack_CorrelationResult(res):
163
+ def _unpack_CorrelationResult(res, _):
164
164
  return res.statistic, res.pvalue, res.correlation
165
165
 
166
166
 
@@ -168,7 +168,7 @@ def _unpack_CorrelationResult(res):
168
168
  @xp_capabilities()
169
169
  @_axis_nan_policy_factory(
170
170
  lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
171
- result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
171
+ result_to_tuple=lambda x, _: (x,), kwd_samples=['weights'])
172
172
  def gmean(a, axis=0, dtype=None, weights=None):
173
173
  r"""Compute the weighted geometric mean along the specified axis.
174
174
 
@@ -252,7 +252,7 @@ def gmean(a, axis=0, dtype=None, weights=None):
252
252
  @xp_capabilities(jax_jit=False, allow_dask_compute=1)
253
253
  @_axis_nan_policy_factory(
254
254
  lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
255
- result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
255
+ result_to_tuple=lambda x, _: (x,), kwd_samples=['weights'])
256
256
  def hmean(a, axis=0, dtype=None, *, weights=None):
257
257
  r"""Calculate the weighted harmonic mean along the specified axis.
258
258
 
@@ -353,7 +353,7 @@ def hmean(a, axis=0, dtype=None, *, weights=None):
353
353
  @xp_capabilities(jax_jit=False, allow_dask_compute=1)
354
354
  @_axis_nan_policy_factory(
355
355
  lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
356
- result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
356
+ result_to_tuple=lambda x, _: (x,), kwd_samples=['weights'])
357
357
  def pmean(a, p, *, axis=0, dtype=None, weights=None):
358
358
  r"""Calculate the weighted power mean along the specified axis.
359
359
 
@@ -634,7 +634,7 @@ def _put_val_to_limits(a, limits, inclusive, val=np.nan, xp=None):
634
634
  @xp_capabilities()
635
635
  @_axis_nan_policy_factory(
636
636
  lambda x: x, n_outputs=1, default_axis=None,
637
- result_to_tuple=lambda x: (x,)
637
+ result_to_tuple=lambda x, _: (x,)
638
638
  )
639
639
  def tmean(a, limits=None, inclusive=(True, True), axis=None):
640
640
  """Compute the trimmed mean.
@@ -689,7 +689,7 @@ def tmean(a, limits=None, inclusive=(True, True), axis=None):
689
689
 
690
690
  @xp_capabilities()
691
691
  @_axis_nan_policy_factory(
692
- lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
692
+ lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
693
693
  )
694
694
  def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
695
695
  """Compute the trimmed variance.
@@ -749,7 +749,7 @@ def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
749
749
 
750
750
  @xp_capabilities()
751
751
  @_axis_nan_policy_factory(
752
- lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
752
+ lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
753
753
  )
754
754
  def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
755
755
  """Compute the trimmed minimum.
@@ -813,7 +813,7 @@ def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
813
813
 
814
814
  @xp_capabilities()
815
815
  @_axis_nan_policy_factory(
816
- lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
816
+ lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
817
817
  )
818
818
  def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
819
819
  """Compute the trimmed maximum.
@@ -876,7 +876,7 @@ def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
876
876
 
877
877
  @xp_capabilities()
878
878
  @_axis_nan_policy_factory(
879
- lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
879
+ lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
880
880
  )
881
881
  def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
882
882
  """Compute the trimmed sample standard deviation.
@@ -929,7 +929,7 @@ def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
929
929
 
930
930
  @xp_capabilities()
931
931
  @_axis_nan_policy_factory(
932
- lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
932
+ lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
933
933
  )
934
934
  def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
935
935
  """Compute the trimmed standard error of the mean.
@@ -1265,7 +1265,7 @@ def _share_masks(*args, xp):
1265
1265
 
1266
1266
  @xp_capabilities(jax_jit=False, allow_dask_compute=2)
1267
1267
  @_axis_nan_policy_factory(
1268
- lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1
1268
+ lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1
1269
1269
  )
1270
1270
  # nan_policy handled by `_axis_nan_policy`, but needs to be left
1271
1271
  # in signature to preserve use as a positional argument
@@ -1366,7 +1366,7 @@ def skew(a, axis=0, bias=True, nan_policy='propagate'):
1366
1366
 
1367
1367
  @xp_capabilities(jax_jit=False, allow_dask_compute=2)
1368
1368
  @_axis_nan_policy_factory(
1369
- lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1
1369
+ lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1
1370
1370
  )
1371
1371
  # nan_policy handled by `_axis_nan_policy`, but needs to be left
1372
1372
  # in signature to preserve use as a positional argument
@@ -2602,7 +2602,7 @@ def obrientransform(*samples):
2602
2602
 
2603
2603
  @xp_capabilities()
2604
2604
  @_axis_nan_policy_factory(
2605
- lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, too_small=1
2605
+ lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1, too_small=1
2606
2606
  )
2607
2607
  def sem(a, axis=0, ddof=1, nan_policy='propagate'):
2608
2608
  """Compute standard error of the mean.
@@ -3070,7 +3070,7 @@ _scale_conversions = {'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
3070
3070
 
3071
3071
 
3072
3072
  @_axis_nan_policy_factory(
3073
- lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1,
3073
+ lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1,
3074
3074
  default_axis=None, override={'nan_propagation': False}
3075
3075
  )
3076
3076
  def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate',
@@ -4068,7 +4068,7 @@ class AlexanderGovernResult:
4068
4068
 
4069
4069
  @_axis_nan_policy_factory(
4070
4070
  AlexanderGovernResult, n_samples=None,
4071
- result_to_tuple=lambda x: (x.statistic, x.pvalue),
4071
+ result_to_tuple=lambda x, _: (x.statistic, x.pvalue),
4072
4072
  too_small=1
4073
4073
  )
4074
4074
  def alexandergovern(*samples, nan_policy='propagate', axis=0):
@@ -6027,7 +6027,7 @@ def pack_TtestResult(statistic, pvalue, df, alternative, standard_error,
6027
6027
  standard_error=standard_error, estimate=estimate)
6028
6028
 
6029
6029
 
6030
- def unpack_TtestResult(res):
6030
+ def unpack_TtestResult(res, _):
6031
6031
  return (res.statistic, res.pvalue, res.df, res._alternative,
6032
6032
  res._standard_error, res._estimate)
6033
6033
 
@@ -7633,7 +7633,7 @@ def _tuple_to_KstestResult(statistic, pvalue,
7633
7633
  statistic_sign=statistic_sign)
7634
7634
 
7635
7635
 
7636
- def _KstestResult_to_tuple(res):
7636
+ def _KstestResult_to_tuple(res, _):
7637
7637
  return *res, res.statistic_location, res.statistic_sign
7638
7638
 
7639
7639
 
@@ -10651,7 +10651,7 @@ def _pack_LinregressResult(slope, intercept, rvalue, pvalue, stderr, intercept_s
10651
10651
  intercept_stderr=intercept_stderr)
10652
10652
 
10653
10653
 
10654
- def _unpack_LinregressResult(res):
10654
+ def _unpack_LinregressResult(res, _):
10655
10655
  return tuple(res) + (res.intercept_stderr,)
10656
10656
 
10657
10657
 
scipy/stats/_variation.py CHANGED
@@ -7,7 +7,7 @@ from ._axis_nan_policy import _axis_nan_policy_factory
7
7
 
8
8
 
9
9
  @_axis_nan_policy_factory(
10
- lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
10
+ lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,)
11
11
  )
12
12
  def variation(a, axis=0, nan_policy='propagate', ddof=0, *, keepdims=False):
13
13
  """
scipy/version.py CHANGED
@@ -2,10 +2,10 @@
2
2
  """
3
3
  Module to expose more detailed version info for the installed `scipy`
4
4
  """
5
- version = "1.16.0rc1"
5
+ version = "1.16.0rc2"
6
6
  full_version = version
7
7
  short_version = version.split('.dev')[0]
8
- git_revision = "f89e9488f1e80e6eb1982bb1a06bab88abc4b498"
8
+ git_revision = "e0b3e3ff7842025c64b134de740680b8ba9951b9"
9
9
  release = 'dev' not in version and '+' not in version
10
10
 
11
11
  if not release:
@@ -39,7 +39,7 @@ Name: OpenBLAS
39
39
  Files: scipy.libs/libscipy_openblas*.so
40
40
  Description: bundled as a dynamically linked library
41
41
  Availability: https://github.com/OpenMathLib/OpenBLAS/
42
- License: BSD-3-Clause-Attribution
42
+ License: BSD-3-Clause
43
43
  Copyright (c) 2011-2014, The OpenBLAS Project
44
44
  All rights reserved.
45
45
 
@@ -75,7 +75,7 @@ Name: LAPACK
75
75
  Files: scipy.libs/libscipy_openblas*.so
76
76
  Description: bundled in OpenBLAS
77
77
  Availability: https://github.com/OpenMathLib/OpenBLAS/
78
- License: BSD-3-Clause-Attribution
78
+ License: BSD-3-Clause-Open-MPI
79
79
  Copyright (c) 1992-2013 The University of Tennessee and The University
80
80
  of Tennessee Research Foundation. All rights
81
81
  reserved.
@@ -130,7 +130,7 @@ Name: GCC runtime library
130
130
  Files: scipy.libs/libgfortran*.so
131
131
  Description: dynamically linked to files compiled with gcc
132
132
  Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
133
- License: GPL-3.0-with-GCC-exception
133
+ License: GPL-3.0-or-later WITH GCC-exception-3.1
134
134
  Copyright (C) 2002-2017 Free Software Foundation, Inc.
135
135
 
136
136
  Libgfortran is free software; you can redistribute it and/or modify
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: scipy
3
- Version: 1.16.0rc1
3
+ Version: 1.16.0rc2
4
4
  Summary: Fundamental algorithms for scientific computing in Python
5
5
  Maintainer-Email: SciPy Developers <scipy-dev@python.org>
6
6
  License: Copyright (c) 2001-2002 Enthought, Inc. 2003, SciPy Developers.
@@ -44,7 +44,7 @@ License: Copyright (c) 2001-2002 Enthought, Inc. 2003, SciPy Developers.
44
44
  Files: scipy.libs/libscipy_openblas*.so
45
45
  Description: bundled as a dynamically linked library
46
46
  Availability: https://github.com/OpenMathLib/OpenBLAS/
47
- License: BSD-3-Clause-Attribution
47
+ License: BSD-3-Clause
48
48
  Copyright (c) 2011-2014, The OpenBLAS Project
49
49
  All rights reserved.
50
50
 
@@ -80,7 +80,7 @@ License: Copyright (c) 2001-2002 Enthought, Inc. 2003, SciPy Developers.
80
80
  Files: scipy.libs/libscipy_openblas*.so
81
81
  Description: bundled in OpenBLAS
82
82
  Availability: https://github.com/OpenMathLib/OpenBLAS/
83
- License: BSD-3-Clause-Attribution
83
+ License: BSD-3-Clause-Open-MPI
84
84
  Copyright (c) 1992-2013 The University of Tennessee and The University
85
85
  of Tennessee Research Foundation. All rights
86
86
  reserved.
@@ -135,7 +135,7 @@ License: Copyright (c) 2001-2002 Enthought, Inc. 2003, SciPy Developers.
135
135
  Files: scipy.libs/libgfortran*.so
136
136
  Description: dynamically linked to files compiled with gcc
137
137
  Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
138
- License: GPL-3.0-with-GCC-exception
138
+ License: GPL-3.0-or-later WITH GCC-exception-3.1
139
139
  Copyright (C) 2002-2017 Free Software Foundation, Inc.
140
140
 
141
141
  Libgfortran is free software; you can redistribute it and/or modify
@@ -1,9 +1,9 @@
1
1
  scipy/__config__.py,sha256=g8l73aCA3tcHZ3PJjbLTPhzxbnmQXaOaXoHzv6L2nE8,5229
2
2
  scipy/__init__.py,sha256=pyQSpcYkQoduBzoB2hkQyEtyFNE4I4C3Goc7RwQg3Wg,4063
3
- scipy/_cyutility.cpython-313t-aarch64-linux-gnu.so,sha256=uxt-vvZx77NHEJiem-cGCQ8lV2ldmAmHujzFEIIkBeU,229704
3
+ scipy/_cyutility.cpython-313t-aarch64-linux-gnu.so,sha256=DojXU2jj42yYW9Q_QlVTGCxDTqhFXJWWPPSCgwGeLZM,229704
4
4
  scipy/_distributor_init.py,sha256=zJThN3Fvof09h24804pNDPd2iN-lCHV3yPlZylSefgQ,611
5
5
  scipy/conftest.py,sha256=KCWKNGR_NypEJx4vM5FGN7PEepE87cob4Kh7dAjpG5s,26272
6
- scipy/version.py,sha256=I_00CrnwtEBX0cRV9hTq0nMzCau_MMR8juywqWTirAE,321
6
+ scipy/version.py,sha256=4VHXVkCNQuJkllVz9r4-D4qherAjcRLNRXHFmYg4I9w,321
7
7
  scipy/_lib/__init__.py,sha256=CXrH_YBpZ-HImHHrqXIhQt_vevp4P5NXClp7hnFMVLM,353
8
8
  scipy/_lib/_array_api.py,sha256=oG1O6R_3DoFSaA2ZevUGoSaNtDXJMLVijiZ-KZn2LHQ,34594
9
9
  scipy/_lib/_array_api_compat_vendor.py,sha256=H8MxZuHSs4TtWXgfEUs0_y0BQy57j6rX330DVashpZ4,393
@@ -683,7 +683,7 @@ scipy/ndimage/__init__.py,sha256=KUbDfnLPN7B-U650Nh-XVf7fQ0bW8vkEDPssG8oZsi4,517
683
683
  scipy/ndimage/_ctest.cpython-313t-aarch64-linux-gnu.so,sha256=KlQ01dEH5y0QzYZmuK3jKKOKw_7uIb60B4Nhcozqkz0,70880
684
684
  scipy/ndimage/_cytest.cpython-313t-aarch64-linux-gnu.so,sha256=6Dv0_hDbZ0JLzT4u31_pwIMOVVYcfUZJOR540463auk,150536
685
685
  scipy/ndimage/_delegators.py,sha256=EI2Xsmw6GDL8MnLeQYZ6uK9dVkMv01WOd1fZTLS_BrU,9410
686
- scipy/ndimage/_filters.py,sha256=x8nj_Hhi_Ldjl06alLYDH54k3PP34sv1gyXR53mkhlE,90748
686
+ scipy/ndimage/_filters.py,sha256=yk-opbujP4xB-9z_9GZ035sVgbcB4PXdAi0Xq4i5XFs,91742
687
687
  scipy/ndimage/_fourier.py,sha256=SoAYRx7ax7Tv51MyYzDlZ3fN682x4T6N8yReX2La4-I,11266
688
688
  scipy/ndimage/_interpolation.py,sha256=KKQMixU4VgfEprLNPUeLWNvzfBBJ_nhr9bnJYT3o7Nc,37740
689
689
  scipy/ndimage/_measurements.py,sha256=MCdbyKlILgfmne0qFFOAFCKv-oWqqnEmb83iG8Tlwuk,56248
@@ -704,7 +704,7 @@ scipy/ndimage/tests/__init__.py,sha256=GbIXCsLtZxgmuisjxfFsd3pj6-RQhmauc6AVy6syb
704
704
  scipy/ndimage/tests/dots.png,sha256=sgtW-tx0ccBpTT6BSNniioPXlnusFr-IUglK_qOVBBQ,2114
705
705
  scipy/ndimage/tests/test_c_api.py,sha256=7Gv-hR91MWpiGQ32yjXIBjFytuaYLqz3wYiCXcC8ZSk,3738
706
706
  scipy/ndimage/tests/test_datatypes.py,sha256=TYMiGyBcdOq3KVLzvjZPjerD1EXonyHFQYBLTWDwN7o,2819
707
- scipy/ndimage/tests/test_filters.py,sha256=xHXSMO_GO30aSCVgxrAimLSbTX4OtxFg3Mm6Dwuw11o,129649
707
+ scipy/ndimage/tests/test_filters.py,sha256=hzFSE1befzDDiN36ws88p51_MgkiRpFHpIpM4k0Hyew,131187
708
708
  scipy/ndimage/tests/test_fourier.py,sha256=BDKXgdV5wCnd7MIkvQ_Fnk8fQ7163mJBtFv5Zod16f4,7618
709
709
  scipy/ndimage/tests/test_interpolation.py,sha256=mEq534rYzoVdfZ4fbiErDTWkzc_ntQDk7QVp7S_Ds0M,61116
710
710
  scipy/ndimage/tests/test_measurements.py,sha256=LYERZh0uIM3HRkYoK1Njq_h-nAByW2NrFxjayH0UCP0,58418
@@ -858,7 +858,7 @@ scipy/optimize/tests/test_linear_assignment.py,sha256=-IGbiBidLNWAgMo3LBsa1ak8v_
858
858
  scipy/optimize/tests/test_linesearch.py,sha256=xmK2zvgIbLMOWkb2B1ALBWiPHQyGGxzDG0MXaHjNlqA,11400
859
859
  scipy/optimize/tests/test_linprog.py,sha256=VWOkH9vfeXFBSAzDYaaGQ-0BMpY6x15uHb-Bc-So0UQ,102695
860
860
  scipy/optimize/tests/test_lsq_common.py,sha256=alCLPPQB4mrxLIAo_rn7eg9xrCEH7DerNBozSimOQRA,9500
861
- scipy/optimize/tests/test_lsq_linear.py,sha256=mPf3nqoH0EGclQWGm-LO0qGvhFIVxmvdexTIfr_H2QI,10984
861
+ scipy/optimize/tests/test_lsq_linear.py,sha256=uVFSH6MFBg6JfqfA2Mrl3_Wzr2Aiyc6i0Y3kmnxrQyg,10974
862
862
  scipy/optimize/tests/test_milp.py,sha256=V4KeW9Z3CfCvCk_NT88yqvw9E_t2r-aIq-yJFwVIaWY,18302
863
863
  scipy/optimize/tests/test_minimize_constrained.py,sha256=avT1wMWHBXQnezIGrhq96k4R41qSXaJRB2dObeb5QAI,27940
864
864
  scipy/optimize/tests/test_minpack.py,sha256=H73NNF83gZBBX18Iw6uEev2GCnXFNafSCfwLoZhWamg,44845
@@ -894,7 +894,7 @@ scipy/signal/_signal_api.py,sha256=wp3qv0vBhANNCCq1S--VUWmHRdEDy5u5obbNDNUk_Cw,1
894
894
  scipy/signal/_signaltools.py,sha256=TWX2tIdmEsYdXE5oUFysM62jUKy20uNFZavWsdeCEuw,192737
895
895
  scipy/signal/_sigtools.cpython-313t-aarch64-linux-gnu.so,sha256=fG_9YrrvJkSrD04wojHmNXRYiPD61F_mOo5zm_xEQiA,143520
896
896
  scipy/signal/_sosfilt.cpython-313t-aarch64-linux-gnu.so,sha256=TIhgoBLclEd6S9ZTHrxTzgSOUjUd2SSP8pv9Jif3Q3k,159864
897
- scipy/signal/_spectral_py.py,sha256=mnoZ1aMfHD86o2012jwizg9wwBfshv1O0DkMdquJMCY,95620
897
+ scipy/signal/_spectral_py.py,sha256=W0n2dT99wTc50fwxhTMCa2XL5tgYXxCqnPv734UPj44,96053
898
898
  scipy/signal/_spline.cpython-313t-aarch64-linux-gnu.so,sha256=0do6BCGUkinER0WcK8PCJzBZRL_uARwMebsm4PtihsI,77336
899
899
  scipy/signal/_spline.pyi,sha256=9tWZQCI7D84ONLwICZG6psBGtwKxAvLF7JaZ1tQUKoY,948
900
900
  scipy/signal/_spline_filters.py,sha256=meTSSe0pIRnutEufLvxJNWPpxbLEFhLnv8cenALplnI,25528
@@ -930,7 +930,7 @@ scipy/signal/tests/test_result_type.py,sha256=F48EQGbFfQfMwcnt-sMofHGNHVTbHntbMl
930
930
  scipy/signal/tests/test_savitzky_golay.py,sha256=afOF6B97cKQVR68D_u3NZdF6D0IvUFgmd_EzVZdk-C8,12470
931
931
  scipy/signal/tests/test_short_time_fft.py,sha256=pk_7fVP-ABL3PtLa6YYPl2lUMDjoszrF8HExrfCiTd8,47476
932
932
  scipy/signal/tests/test_signaltools.py,sha256=ZxiDH8kFjiKoNqQXYTqosjO7y65QpZs94yDt_fKExH4,190777
933
- scipy/signal/tests/test_spectral.py,sha256=EVDRvms0MNKrwHoO8NrxBpeF7q3XmdnQUUUDinOQE5Y,79345
933
+ scipy/signal/tests/test_spectral.py,sha256=XVxAjs36AcMbCd_9Ekb5Vl2Ly4h99LEtUaAVMkIOPjs,80514
934
934
  scipy/signal/tests/test_splines.py,sha256=dP9Ua8FGgw_z_GUxGd_AxKAbH62Y0rvrGabVJwH9SMA,17078
935
935
  scipy/signal/tests/test_upfirdn.py,sha256=utXj0C32iwg_N3XPs32EGLEuQp4_YPCCUKB6_AzMQQQ,12602
936
936
  scipy/signal/tests/test_waveforms.py,sha256=HfyUh2X65Qfv0qNLGOk99XAtwy8ELEcIDJnCCoHR6WY,13554
@@ -940,7 +940,7 @@ scipy/signal/windows/__init__.py,sha256=BUSXzc_D5Agp59RacDdG6EE9QjkXXtlcfQrTop_I
940
940
  scipy/signal/windows/_windows.py,sha256=2Tw5gwXH5wp6RSj8bBbfLJc3zl6PBPTWZgCv0Oxu48Q,89501
941
941
  scipy/signal/windows/windows.py,sha256=FI6w8mt0V1221Rqv3Do3LuWRWrtKo3hYYTvpB_5UB1c,839
942
942
  scipy/sparse/__init__.py,sha256=7sYqDxLcEsW7lw3H19wOGn6m1KAysKQ8gn-l9oPyL7M,9950
943
- scipy/sparse/_base.py,sha256=EVaUh-oNnkZjSIfhHO2ycCTVJH6lai30HHslqdIby8c,58446
943
+ scipy/sparse/_base.py,sha256=w-q3swuWQU9lWq_iNXYM1-Qm10-NAK_Ft-8Rqx7YpLM,58446
944
944
  scipy/sparse/_bsr.py,sha256=0OruL9evP-j9W_fKIkRM3zcc390c98iaUOI_mNsQzN8,30957
945
945
  scipy/sparse/_compressed.py,sha256=JJn2BE_li1dkeo5PtOR7GRd5c6JWcAw5UmxN9mlAgK4,51673
946
946
  scipy/sparse/_construct.py,sha256=iZSnx2FCeeJCOG98FULiMRt1rreane8b7hoXzpF9TA0,49620
@@ -1063,7 +1063,7 @@ scipy/sparse/linalg/tests/test_special_sparse_arrays.py,sha256=2Z7r1LPx7QTekuXNT
1063
1063
  scipy/sparse/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1064
1064
  scipy/sparse/tests/test_arithmetic1d.py,sha256=4woi2qefAsFXR94Tjj_rlvOfdI-e_Vl6wGAFFw01HkI,11984
1065
1065
  scipy/sparse/tests/test_array_api.py,sha256=U8TBj4ZJ5Bc6sOsJ6Q8HgnGBhGJK-sLXS1QD_9pK-4c,14201
1066
- scipy/sparse/tests/test_base.py,sha256=b0Li4n7NBPSFfGzS7lQrOelI73OBnekyudzqovBMHks,219941
1066
+ scipy/sparse/tests/test_base.py,sha256=r_WRjILr9FPPf1oApMvpQLuPqJ5zFgt0FyQ7PZnuNV0,220315
1067
1067
  scipy/sparse/tests/test_common1d.py,sha256=q1LHzO7HzGulvFrJCren3Vy3RMPXZNxO8aSxq68MUb8,15471
1068
1068
  scipy/sparse/tests/test_construct.py,sha256=lX0Yo17OkR2AwAYvlJvbjpHHOHdXNlLOCaPPJs_EcpE,38434
1069
1069
  scipy/sparse/tests/test_coo.py,sha256=v6mdl7NCT-hOm_ouIbWchdz13V5ssP40LuHCSVgHCzw,39632
@@ -1081,7 +1081,7 @@ scipy/sparse/tests/data/csc_py2.npz,sha256=usJ_Gj6x_dEC2uObfdYc6D6C8JY4jjROFChQc
1081
1081
  scipy/sparse/tests/data/csc_py3.npz,sha256=axuEMVxwd0F-cgUS0IalpiF8KHW4GNJ3BK6bcjfGnf4,851
1082
1082
  scipy/spatial/__init__.py,sha256=-FVg_WjbK0J0U2kyei6Fz6NgqEso5cipWZ5gHnqjErs,3731
1083
1083
  scipy/spatial/_ckdtree.cpython-313t-aarch64-linux-gnu.so,sha256=ad4rSJT8_6qEt49qFg2LyDLAzXxR8uWvgy-97cObLXM,778704
1084
- scipy/spatial/_distance_pybind.cpython-313t-aarch64-linux-gnu.so,sha256=CT8XybfJkCZyX8x7Md9YpWonseA2ZEL_x6LJlLS8I70,650632
1084
+ scipy/spatial/_distance_pybind.cpython-313t-aarch64-linux-gnu.so,sha256=6Lk5ng_IkBXAv_LwQZmyB4aBKU5KsgqR4eXYTOfkkj8,650584
1085
1085
  scipy/spatial/_distance_wrap.cpython-313t-aarch64-linux-gnu.so,sha256=yNSVv8IvVbepKKVheVYBw58agAR0Hh2kboQeYRTllxk,142456
1086
1086
  scipy/spatial/_geometric_slerp.py,sha256=d3pavtaMuIIKjupWLwFLt7WrfqvtT18u7wcsBdnuOTs,7951
1087
1087
  scipy/spatial/_hausdorff.cpython-313t-aarch64-linux-gnu.so,sha256=HU6d6ttEwBhDauKjuKh249Pm03xa-wVn7DnHH3TgSwo,152232
@@ -1101,7 +1101,7 @@ scipy/spatial/qhull.py,sha256=aFE-KscuINt6QIhFC2dqhwFCYu3HSBkVXDH5exHH71s,622
1101
1101
  scipy/spatial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1102
1102
  scipy/spatial/tests/test__plotutils.py,sha256=fASbg0i7iLiJIEj5vIkiDuTq3wU0z3mKJY019kzKrFk,3814
1103
1103
  scipy/spatial/tests/test__procrustes.py,sha256=wmmnUHRdw_oID0YLi404IEWPH6vEGhvHXSeGPY_idHo,4974
1104
- scipy/spatial/tests/test_distance.py,sha256=AEno6aruOowVEsrBL0KEE--6Inj9U9c84UJDYgnGYsM,87973
1104
+ scipy/spatial/tests/test_distance.py,sha256=JcHbc8SbO1AM6jriyj9bkhQgqt81_6MByoUsaUrjIDA,88331
1105
1105
  scipy/spatial/tests/test_hausdorff.py,sha256=XcDEzwFuOR9BaLegIj-DPp5GrAi_RsvcW8oGqJf0xkg,8217
1106
1106
  scipy/spatial/tests/test_kdtree.py,sha256=dlSaXMAIXFS73SMM2Vl9UPEe8Vtbyyiz69zmdb8ddYA,49340
1107
1107
  scipy/spatial/tests/test_qhull.py,sha256=wf_jw289-0zv-fJmD8nk7cd68yoG8VE95My336NTovU,50183
@@ -1269,7 +1269,7 @@ scipy/special/tests/data/gsl.npz,sha256=y_Gv3SeZmAanECeZEKLrL59_VZAzx-y3lt6qEMRP
1269
1269
  scipy/special/tests/data/local.npz,sha256=bCnljOgnCE-E258bupYEWmHOafHT6j18gop5wTPPiPI,203438
1270
1270
  scipy/stats/__init__.py,sha256=mUrEnW9fiJ4memdw1jH_gSuar39XNq4IztecOF64a6g,18746
1271
1271
  scipy/stats/_ansari_swilk_statistics.cpython-313t-aarch64-linux-gnu.so,sha256=-FS-yUMkJhrhwbecpOroRuYUQds9-rkd27A2Pl7VWsw,155984
1272
- scipy/stats/_axis_nan_policy.py,sha256=s-ijEpr2oLnQbdQPmmcR4j6eVen2h_L4ZG_VJ2n1uaE,31811
1272
+ scipy/stats/_axis_nan_policy.py,sha256=Iy98OVtoOmzhF3f4mFFcz1AbUomigpifHv47pUM8Sz8,31382
1273
1273
  scipy/stats/_biasedurn.cpython-313t-aarch64-linux-gnu.so,sha256=2tfL6-OUoTstbKVQHw3ONkAiHerqKdGUPbt_2xcCFhg,349584
1274
1274
  scipy/stats/_biasedurn.pxd,sha256=bQC6xG4RH1E5h2jCKXRMADfgGctiO5TgNlJegKrR7DY,1046
1275
1275
  scipy/stats/_binned_statistic.py,sha256=ATvrikTtX6zW8FKbjpV7O7IvAKSCBBLQSH1JKFR9R7Q,32702
@@ -1280,22 +1280,22 @@ scipy/stats/_common.py,sha256=4RqXT04Knp1CoOJuSBV6Uy_XmcmtVr0bImAbSk_VHlQ,172
1280
1280
  scipy/stats/_constants.py,sha256=mBeJgvWcDZBmPFStDNEjlzeZY3aMDMCHWoj7dCmgugQ,1002
1281
1281
  scipy/stats/_continued_fraction.py,sha256=2WyLuQWsx9aIHkYvTE4_VlepAfSKG4otiu_Y5wYbzKA,15508
1282
1282
  scipy/stats/_continuous_distns.py,sha256=G3AK1Yo64IVm19JOIzvxvq-DZXIBMbTPInfoagbHu_A,406271
1283
- scipy/stats/_correlation.py,sha256=TKenq2UmJ6gMligjczL1nTIXgUShprfYyBc23lhTCuo,7911
1283
+ scipy/stats/_correlation.py,sha256=kj9EhgPYOnqwQkEgTwdj67iYEwDsntKgcUtQElgQpk0,7914
1284
1284
  scipy/stats/_covariance.py,sha256=SLFFrCly5UPu0d-nn2P_U-jdI71qa3w6AGVMnDsxvi0,22660
1285
1285
  scipy/stats/_crosstab.py,sha256=djdU7xCQ-513VlxFEOvLN8oaY4QyUPHDJHWlilhyEVA,7351
1286
1286
  scipy/stats/_discrete_distns.py,sha256=LZ_MakDbm14ygu24l-BqWT9k41lSptVu9OVq91bQ2K0,65473
1287
1287
  scipy/stats/_distn_infrastructure.py,sha256=AjUhOgqm-_R_3leAsqowo_hs0GZvvGg0jaLc7LbICC4,152345
1288
1288
  scipy/stats/_distr_params.py,sha256=bD2Sdq0etEh0NYfi3-vFM-C7PevQfH0dRLbNnXeOtYY,9052
1289
1289
  scipy/stats/_distribution_infrastructure.py,sha256=D8JiWabCT6gYL4uUtpufiAFtWuUXZTpGsfdrvo8KBsE,233033
1290
- scipy/stats/_entropy.py,sha256=hzsUIk5EcOIe2lddUwYN2GKy37urRhRUTSdOs4_l1wo,15762
1290
+ scipy/stats/_entropy.py,sha256=lT10WPcnWF23Z9hsiY6cC82aC0MPMXFGu27dfsan5Tc,15768
1291
1291
  scipy/stats/_finite_differences.py,sha256=QaA5p36T0oDt4e_oMOE3QGBT8gB2C3E3ziZSWkkBF9g,4168
1292
1292
  scipy/stats/_fit.py,sha256=PmLg5oE25gnOIHVV-4U-nfUEsKdfgac4M9OaBSjKrow,59747
1293
- scipy/stats/_hypotests.py,sha256=aqtaY8-iETkI-37d-80wRZYhKpIcA-AYt30FPzQdy5Y,81343
1293
+ scipy/stats/_hypotests.py,sha256=8wEEnCrNIs9Mroff0cCdbgzaUat4-kcPxOAGqLJ2rN0,81346
1294
1294
  scipy/stats/_kde.py,sha256=eLh5TP8UDJyKqQlx3-q27UyLvSibimDooZNPpcKLhDI,25678
1295
1295
  scipy/stats/_ksstats.py,sha256=8Oo_0BAAZnDkLgckkySAFGxUo51ksnDADzkBe4RdkmU,20140
1296
1296
  scipy/stats/_mannwhitneyu.py,sha256=LQII0f5CF4-OfWXqBuP4uPjNJ8IuVgPp04itqacy1EA,19330
1297
1297
  scipy/stats/_mgc.py,sha256=iImSUbFmYh_7Ouap70PFP6O6CVpUylf5y44z33j3obg,21359
1298
- scipy/stats/_morestats.py,sha256=LbSF2NnQk4AVMnyOh4noNH9j5R1bQstASB-aVbOK3p8,172424
1298
+ scipy/stats/_morestats.py,sha256=kJp4WGWU7Nkrk_96-ZW8c5hbu0FfFih2DWk95d_MnMQ,172445
1299
1299
  scipy/stats/_mstats_basic.py,sha256=Thh1IkZUX3HwIumwUvN4SSLIsEGYTkv3hWysLufoEE4,122909
1300
1300
  scipy/stats/_mstats_extras.py,sha256=0LL3I-tOG17fI5CKPBK7a8e5-yrgX4XLjfsHOs5MMQs,16362
1301
1301
  scipy/stats/_multicomp.py,sha256=x9XBSCbTWl4V-hUZ_YaMYZ5smpE95qBCUic6yYygnpA,16836
@@ -1320,12 +1320,12 @@ scipy/stats/_sobol.pyi,sha256=TAywylI75AF9th9QZY8TYfHvIQ1cyM5QZi7eBOAkrbg,971
1320
1320
  scipy/stats/_sobol_direction_numbers.npz,sha256=SFmTEUfULORluGBcsnf5V9mLg50DGU_fBleTV5BtGTs,589334
1321
1321
  scipy/stats/_stats.cpython-313t-aarch64-linux-gnu.so,sha256=pupGK8pL9WHtnVsUSigXEF8Lj62ngqwHXAJXbBBYXX0,512256
1322
1322
  scipy/stats/_stats.pxd,sha256=T_7IrDqgIahKMECV5WAtxtsoV91XBVRM359kAXPIhww,709
1323
- scipy/stats/_stats_mstats_common.py,sha256=BQwTh0AaQIX8dU1LRkCLK3KdCJZwr3wSdNTW791HXaw,12388
1324
- scipy/stats/_stats_py.py,sha256=hPkEDf_ZF8mJ0oSzgdxIhNSNMZUZ9K70BX92-Xp9ifM,422733
1323
+ scipy/stats/_stats_mstats_common.py,sha256=f9B_XmuN2OTZei2CpWQnrvHO_rcdfdBXsvQgByISY4o,12472
1324
+ scipy/stats/_stats_py.py,sha256=bpc12XHfXxzyoBG0-Q_-ofothsCPevZB5kb6gedhPsQ,422787
1325
1325
  scipy/stats/_stats_pythran.cpython-313t-aarch64-linux-gnu.so,sha256=9DPaXeuaEq9m6QJ7VLPuNDd6SHHITiIHe23-D-OxTrA,165400
1326
1326
  scipy/stats/_survival.py,sha256=JexV_eUz0H_2QSwpido_M_LJr4mkODmhHVwjzFXjgj8,25939
1327
1327
  scipy/stats/_tukeylambda_stats.py,sha256=eodvo09rCVfcYa1Uh6BKHKvXyY8K5Zg2uGQX1phQ6Ew,6871
1328
- scipy/stats/_variation.py,sha256=-skdZO1EjedA2PLWqkQVyFMAC-4SsTHyeE6XEMDPb98,4562
1328
+ scipy/stats/_variation.py,sha256=qUg6GOwwPytwDjpK5av_t-71U2UijPfST1J-QLbGgi4,4565
1329
1329
  scipy/stats/_warnings_errors.py,sha256=MpucxNFYEDytXh7vrZCMqTkRfuXTvvMpQ2W_Ak2OnPk,1196
1330
1330
  scipy/stats/_wilcoxon.py,sha256=1Biio5qRv9hhxE9cC6_2luBwCcaMS31tN5fJgAOtxQ8,9507
1331
1331
  scipy/stats/biasedurn.py,sha256=ECfilE4KrIhU2sK-KWtr8yxqthfVsyz_-o4F2TnMXU4,431
@@ -1409,7 +1409,7 @@ scipy/stats/tests/data/nist_linregress/Norris.dat,sha256=zD_RTRxfqJHVZTAAyddzLDD
1409
1409
  scipy.libs/libgfortran-daac5196-038a5e3c.so.5.0.0,sha256=jBP0S0Tk3QAFYy6kFz9fUBTSgp1QqhKyBUA0KXwH8jM,1671529
1410
1410
  scipy.libs/libgfortran-daac5196.so.5.0.0,sha256=c7Btx9EsXU43_oumeqmrRAu3lOea3_WK6JSz1FfnG9c,1484449
1411
1411
  scipy.libs/libscipy_openblas-9778f98e.so,sha256=cuV_hdBpKSghnUcWu0h0mWQFPvRFgksWDxtz3gkKD3Q,24715465
1412
- scipy-1.16.0rc1.dist-info/LICENSE.txt,sha256=idHb33WU4v8pvbr-MWW4rRmaAizPEbxUrCEKpII3TUw,46840
1413
- scipy-1.16.0rc1.dist-info/METADATA,sha256=zjgAv60fntC6MVTR2w6nbwV39s6w-aypyGl0GnnxFBg,61950
1414
- scipy-1.16.0rc1.dist-info/WHEEL,sha256=jN5JNAg89R8qmE6EYw_AdRdGdSeOMflVbeU-jIdKDPs,141
1415
- scipy-1.16.0rc1.dist-info/RECORD,,
1412
+ scipy-1.16.0rc2.dist-info/LICENSE.txt,sha256=Ta8U43Qy5wJhZZeey3o5kwcHo-ib70h2z6-drTbtNko,46838
1413
+ scipy-1.16.0rc2.dist-info/METADATA,sha256=w2Bt4VYI6_LYLka9Lr1XD_GDaVirm7vTFFc0SfLZ9b8,61948
1414
+ scipy-1.16.0rc2.dist-info/WHEEL,sha256=jN5JNAg89R8qmE6EYw_AdRdGdSeOMflVbeU-jIdKDPs,141
1415
+ scipy-1.16.0rc2.dist-info/RECORD,,