scipy 1.16.0rc1__cp313-cp313t-macosx_14_0_x86_64.whl → 1.16.0rc2__cp313-cp313t-macosx_14_0_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scipy/__config__.py +2 -2
- scipy/_cyutility.cpython-313t-darwin.so +0 -0
- scipy/cluster/_hierarchy.cpython-313t-darwin.so +0 -0
- scipy/cluster/_optimal_leaf_ordering.cpython-313t-darwin.so +0 -0
- scipy/cluster/_vq.cpython-313t-darwin.so +0 -0
- scipy/fftpack/convolve.cpython-313t-darwin.so +0 -0
- scipy/integrate/_dop.cpython-313t-darwin.so +0 -0
- scipy/integrate/_lsoda.cpython-313t-darwin.so +0 -0
- scipy/integrate/_test_odeint_banded.cpython-313t-darwin.so +0 -0
- scipy/integrate/_vode.cpython-313t-darwin.so +0 -0
- scipy/interpolate/_dfitpack.cpython-313t-darwin.so +0 -0
- scipy/interpolate/_rgi_cython.cpython-313t-darwin.so +0 -0
- scipy/io/_test_fortran.cpython-313t-darwin.so +0 -0
- scipy/io/matlab/_mio5_utils.cpython-313t-darwin.so +0 -0
- scipy/io/matlab/_mio_utils.cpython-313t-darwin.so +0 -0
- scipy/linalg/_decomp_interpolative.cpython-313t-darwin.so +0 -0
- scipy/linalg/_decomp_lu_cython.cpython-313t-darwin.so +0 -0
- scipy/linalg/_decomp_update.cpython-313t-darwin.so +0 -0
- scipy/linalg/_fblas.cpython-313t-darwin.so +0 -0
- scipy/linalg/_flapack.cpython-313t-darwin.so +0 -0
- scipy/ndimage/_cytest.cpython-313t-darwin.so +0 -0
- scipy/ndimage/_filters.py +41 -21
- scipy/ndimage/_ni_label.cpython-313t-darwin.so +0 -0
- scipy/ndimage/tests/test_filters.py +42 -9
- scipy/optimize/_bglu_dense.cpython-313t-darwin.so +0 -0
- scipy/optimize/_moduleTNC.cpython-313t-darwin.so +0 -0
- scipy/optimize/_trlib/_trlib.cpython-313t-darwin.so +0 -0
- scipy/optimize/tests/test_lsq_linear.py +1 -1
- scipy/signal/_peak_finding_utils.cpython-313t-darwin.so +0 -0
- scipy/signal/_sosfilt.cpython-313t-darwin.so +0 -0
- scipy/signal/_spectral_py.py +9 -0
- scipy/signal/_upfirdn_apply.cpython-313t-darwin.so +0 -0
- scipy/signal/tests/test_spectral.py +31 -0
- scipy/sparse/_base.py +1 -1
- scipy/sparse/_csparsetools.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_flow.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_matching.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_min_spanning_tree.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_reordering.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_shortest_path.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_tools.cpython-313t-darwin.so +0 -0
- scipy/sparse/csgraph/_traversal.cpython-313t-darwin.so +0 -0
- scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-313t-darwin.so +0 -0
- scipy/sparse/linalg/_propack/_cpropack.cpython-313t-darwin.so +0 -0
- scipy/sparse/linalg/_propack/_dpropack.cpython-313t-darwin.so +0 -0
- scipy/sparse/linalg/_propack/_spropack.cpython-313t-darwin.so +0 -0
- scipy/sparse/linalg/_propack/_zpropack.cpython-313t-darwin.so +0 -0
- scipy/sparse/tests/test_base.py +7 -0
- scipy/spatial/_ckdtree.cpython-313t-darwin.so +0 -0
- scipy/spatial/_distance_pybind.cpython-313t-darwin.so +0 -0
- scipy/spatial/_hausdorff.cpython-313t-darwin.so +0 -0
- scipy/spatial/_qhull.cpython-313t-darwin.so +0 -0
- scipy/spatial/_voronoi.cpython-313t-darwin.so +0 -0
- scipy/spatial/tests/test_distance.py +12 -0
- scipy/spatial/transform/_rigid_transform.cpython-313t-darwin.so +0 -0
- scipy/spatial/transform/_rotation.cpython-313t-darwin.so +0 -0
- scipy/special/_ellip_harm_2.cpython-313t-darwin.so +0 -0
- scipy/special/_specfun.cpython-313t-darwin.so +0 -0
- scipy/special/_ufuncs.cpython-313t-darwin.so +0 -0
- scipy/special/_ufuncs_cxx.cpython-313t-darwin.so +0 -0
- scipy/stats/_ansari_swilk_statistics.cpython-313t-darwin.so +0 -0
- scipy/stats/_axis_nan_policy.py +1 -9
- scipy/stats/_biasedurn.cpython-313t-darwin.so +0 -0
- scipy/stats/_correlation.py +1 -1
- scipy/stats/_entropy.py +2 -2
- scipy/stats/_hypotests.py +1 -1
- scipy/stats/_morestats.py +7 -7
- scipy/stats/_qmc_cy.cpython-313t-darwin.so +0 -0
- scipy/stats/_rcont/rcont.cpython-313t-darwin.so +0 -0
- scipy/stats/_sobol.cpython-313t-darwin.so +0 -0
- scipy/stats/_stats.cpython-313t-darwin.so +0 -0
- scipy/stats/_stats_mstats_common.py +4 -2
- scipy/stats/_stats_py.py +18 -18
- scipy/stats/_unuran/unuran_wrapper.cpython-313t-darwin.so +0 -0
- scipy/stats/_variation.py +1 -1
- scipy/version.py +2 -2
- {scipy-1.16.0rc1.dist-info → scipy-1.16.0rc2.dist-info}/LICENSE.txt +3 -3
- {scipy-1.16.0rc1.dist-info → scipy-1.16.0rc2.dist-info}/METADATA +4 -4
- {scipy-1.16.0rc1.dist-info → scipy-1.16.0rc2.dist-info}/RECORD +80 -80
- {scipy-1.16.0rc1.dist-info → scipy-1.16.0rc2.dist-info}/WHEEL +0 -0
scipy/__config__.py
CHANGED
@@ -59,7 +59,7 @@ CONFIG = _cleanup(
|
|
59
59
|
},
|
60
60
|
"pythran": {
|
61
61
|
"version": r"0.18.0",
|
62
|
-
"include directory": r"../../../../../../private/var/folders/
|
62
|
+
"include directory": r"../../../../../../private/var/folders/vk/nx37ffx50hv5djclhltc26vw0000gn/T/cibw-run-vzyixur7/cp313t-macosx_x86_64/build/venv/lib/python3.13t/site-packages/pythran"
|
63
63
|
},
|
64
64
|
},
|
65
65
|
"Machine Information": {
|
@@ -106,7 +106,7 @@ CONFIG = _cleanup(
|
|
106
106
|
},
|
107
107
|
},
|
108
108
|
"Python Information": {
|
109
|
-
"path": r"/private/var/folders/
|
109
|
+
"path": r"/private/var/folders/vk/nx37ffx50hv5djclhltc26vw0000gn/T/cibw-run-vzyixur7/cp313t-macosx_x86_64/build/venv/bin/python",
|
110
110
|
"version": "3.13",
|
111
111
|
},
|
112
112
|
}
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
scipy/ndimage/_filters.py
CHANGED
@@ -68,12 +68,23 @@ def _vectorized_filter_iv(input, function, size, footprint, output, mode, cval,
|
|
68
68
|
if size is not None and footprint is not None:
|
69
69
|
raise ValueError("Either `size` or `footprint` may be provided, not both.")
|
70
70
|
|
71
|
-
|
72
|
-
|
71
|
+
if axes is None:
|
72
|
+
axes = tuple(range(-input.ndim, 0))
|
73
|
+
elif np.isscalar(axes):
|
74
|
+
axes = (axes,)
|
75
|
+
n_axes = len(axes)
|
76
|
+
n_batch = input.ndim - n_axes
|
77
|
+
|
78
|
+
if n_axes > input.ndim:
|
79
|
+
message = ("The length of `axes` may not exceed the dimensionality of `input`"
|
80
|
+
"(`input.ndim`).")
|
81
|
+
raise ValueError(message)
|
82
|
+
|
83
|
+
# Either footprint or size must be provided
|
73
84
|
footprinted_function = function
|
74
85
|
if size is not None:
|
75
86
|
# If provided, size must be an integer or tuple of integers.
|
76
|
-
size = (size,)*
|
87
|
+
size = (size,)*n_axes if np.isscalar(size) else tuple(size)
|
77
88
|
valid = [xp.isdtype(xp.asarray(i).dtype, 'integral') and i > 0 for i in size]
|
78
89
|
if not all(valid):
|
79
90
|
raise ValueError("All elements of `size` must be positive integers.")
|
@@ -84,13 +95,10 @@ def _vectorized_filter_iv(input, function, size, footprint, output, mode, cval,
|
|
84
95
|
def footprinted_function(input, *args, axis=-1, **kwargs):
|
85
96
|
return function(input[..., footprint], *args, axis=-1, **kwargs)
|
86
97
|
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
if n_axes > input.ndim:
|
92
|
-
message = ("The dimensionality of the window (`len(size)` or `footprint.ndim`) "
|
93
|
-
"may not exceed the number of axes of `input` (`input.ndim`).")
|
98
|
+
# And by now, the dimensionality of the footprint must equal the number of axes
|
99
|
+
if n_axes != len(size):
|
100
|
+
message = ("`axes` must be compatible with the dimensionality "
|
101
|
+
"of the window specified by `size` or `footprint`.")
|
94
102
|
raise ValueError(message)
|
95
103
|
|
96
104
|
# If this is not *equal* to the dimensionality of `input`, then `axes`
|
@@ -101,9 +109,10 @@ def _vectorized_filter_iv(input, function, size, footprint, output, mode, cval,
|
|
101
109
|
"(`len(size)` or `footprint.ndim`) does not equal the number "
|
102
110
|
"of axes of `input` (`input.ndim`).")
|
103
111
|
raise ValueError(message)
|
104
|
-
axes = (axes,) if np.isscalar(axes) else axes
|
105
112
|
else:
|
106
|
-
axes = tuple(range(-n_axes, 0))
|
113
|
+
axes = tuple(range(-n_axes, 0)) if axes is None else axes
|
114
|
+
|
115
|
+
axes = (axes,) if np.isscalar(axes) else axes
|
107
116
|
|
108
117
|
# If `origin` is provided, then it must be "broadcastable" to a tuple with length
|
109
118
|
# equal to the core dimensionality.
|
@@ -150,10 +159,9 @@ def _vectorized_filter_iv(input, function, size, footprint, output, mode, cval,
|
|
150
159
|
|
151
160
|
# For simplicity, work with `axes` at the end.
|
152
161
|
working_axes = tuple(range(-n_axes, 0))
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
if output is not None else output)
|
162
|
+
input = xp.moveaxis(input, axes, working_axes)
|
163
|
+
output = (xp.moveaxis(output, axes, working_axes)
|
164
|
+
if output is not None else output)
|
157
165
|
|
158
166
|
# Wrap the function to limit maximum memory usage, deal with `footprint`,
|
159
167
|
# and populate `output`. The latter requires some verbosity because we
|
@@ -190,8 +198,8 @@ def _vectorized_filter_iv(input, function, size, footprint, output, mode, cval,
|
|
190
198
|
**kwargs)
|
191
199
|
return output
|
192
200
|
|
193
|
-
return (input, wrapped_function, size, mode, cval,
|
194
|
-
|
201
|
+
return (input, wrapped_function, size, mode, cval, origin,
|
202
|
+
working_axes, axes, n_axes, n_batch, xp)
|
195
203
|
|
196
204
|
|
197
205
|
@_ni_docstrings.docfiller
|
@@ -211,7 +219,19 @@ def vectorized_filter(input, function, *, size=None, footprint=None, output=None
|
|
211
219
|
|
212
220
|
where ``axis`` specifies the axis (or axes) of ``window`` along which
|
213
221
|
the filter function is evaluated.
|
214
|
-
|
222
|
+
size : scalar or tuple, optional
|
223
|
+
See `footprint` below. Ignored if `footprint` is given.
|
224
|
+
footprint : array, optional
|
225
|
+
Either `size` or `footprint` must be defined. `size` gives
|
226
|
+
the shape that is taken from the input array, at every element
|
227
|
+
position, to define the input to the filter function.
|
228
|
+
`footprint` is a boolean array that specifies (implicitly) a
|
229
|
+
shape, but also which of the elements within this shape will get
|
230
|
+
passed to the filter function. Thus ``size=(n, m)`` is equivalent
|
231
|
+
to ``footprint=np.ones((n, m))``.
|
232
|
+
We adjust `size` to the number of dimensions indicated by `axes`.
|
233
|
+
For instance, if `axes` is ``(0, 2, 1)`` and ``n`` is passed for ``size``,
|
234
|
+
then the effective `size` is ``(n, n, n)``.
|
215
235
|
%(output)s
|
216
236
|
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
|
217
237
|
The `mode` parameter determines how the input array is extended
|
@@ -409,7 +429,7 @@ def vectorized_filter(input, function, *, size=None, footprint=None, output=None
|
|
409
429
|
|
410
430
|
""" # noqa: E501
|
411
431
|
|
412
|
-
(input, function, size, mode, cval, origin, working_axes, n_axes, n_batch, xp
|
432
|
+
(input, function, size, mode, cval, origin, working_axes, axes, n_axes, n_batch, xp
|
413
433
|
) = _vectorized_filter_iv(input, function, size, footprint, output, mode, cval,
|
414
434
|
origin, axes, batch_memory)
|
415
435
|
|
@@ -455,7 +475,7 @@ def vectorized_filter(input, function, *, size=None, footprint=None, output=None
|
|
455
475
|
res = function(view)
|
456
476
|
|
457
477
|
# move working_axes back to original positions
|
458
|
-
return xp.moveaxis(res, working_axes, axes)
|
478
|
+
return xp.moveaxis(res, working_axes, axes)
|
459
479
|
|
460
480
|
|
461
481
|
def _invalid_origin(origin, lenw):
|
Binary file
|
@@ -2571,7 +2571,7 @@ class TestThreading:
|
|
2571
2571
|
for i in range(n):
|
2572
2572
|
fun(*args, output=out[i, ...])
|
2573
2573
|
|
2574
|
-
@xfail_xp_backends("cupy",
|
2574
|
+
@xfail_xp_backends("cupy",
|
2575
2575
|
reason="XXX thread exception; cannot repro outside of pytest")
|
2576
2576
|
def test_correlate1d(self, xp):
|
2577
2577
|
d = np.random.randn(5000)
|
@@ -2585,7 +2585,7 @@ class TestThreading:
|
|
2585
2585
|
self.check_func_thread(4, ndimage.correlate1d, (d, k), ot)
|
2586
2586
|
xp_assert_equal(os, ot)
|
2587
2587
|
|
2588
|
-
@xfail_xp_backends("cupy",
|
2588
|
+
@xfail_xp_backends("cupy",
|
2589
2589
|
reason="XXX thread exception; cannot repro outside of pytest")
|
2590
2590
|
def test_correlate(self, xp):
|
2591
2591
|
d = xp.asarray(np.random.randn(500, 500))
|
@@ -2596,7 +2596,7 @@ class TestThreading:
|
|
2596
2596
|
self.check_func_thread(4, ndimage.correlate, (d, k), ot)
|
2597
2597
|
xp_assert_equal(os, ot)
|
2598
2598
|
|
2599
|
-
@xfail_xp_backends("cupy",
|
2599
|
+
@xfail_xp_backends("cupy",
|
2600
2600
|
reason="XXX thread exception; cannot repro outside of pytest")
|
2601
2601
|
def test_median_filter(self, xp):
|
2602
2602
|
d = xp.asarray(np.random.randn(500, 500))
|
@@ -2606,7 +2606,7 @@ class TestThreading:
|
|
2606
2606
|
self.check_func_thread(4, ndimage.median_filter, (d, 3), ot)
|
2607
2607
|
xp_assert_equal(os, ot)
|
2608
2608
|
|
2609
|
-
@xfail_xp_backends("cupy",
|
2609
|
+
@xfail_xp_backends("cupy",
|
2610
2610
|
reason="XXX thread exception; cannot repro outside of pytest")
|
2611
2611
|
def test_uniform_filter1d(self, xp):
|
2612
2612
|
d = np.random.randn(5000)
|
@@ -2619,7 +2619,7 @@ class TestThreading:
|
|
2619
2619
|
self.check_func_thread(4, ndimage.uniform_filter1d, (d, 5), ot)
|
2620
2620
|
xp_assert_equal(os, ot)
|
2621
2621
|
|
2622
|
-
@xfail_xp_backends("cupy",
|
2622
|
+
@xfail_xp_backends("cupy",
|
2623
2623
|
reason="XXX thread exception; cannot repro outside of pytest")
|
2624
2624
|
def test_minmax_filter(self, xp):
|
2625
2625
|
d = xp.asarray(np.random.randn(500, 500))
|
@@ -2908,15 +2908,19 @@ class TestVectorizedFilter:
|
|
2908
2908
|
with pytest.raises(ValueError, match=message):
|
2909
2909
|
ndimage.vectorized_filter(input, function, size=0)
|
2910
2910
|
|
2911
|
-
message = "The
|
2911
|
+
message = "The length of `axes` may not exceed "
|
2912
|
+
axes = (0, 1, 2)
|
2912
2913
|
with pytest.raises(ValueError, match=message):
|
2913
|
-
ndimage.vectorized_filter(input, function, size=(1, 2,
|
2914
|
+
ndimage.vectorized_filter(input, function, size=(1, 2), axes=axes)
|
2914
2915
|
with pytest.raises(ValueError, match=message):
|
2915
|
-
ndimage.vectorized_filter(input, function, footprint=xp.ones((2, 2
|
2916
|
+
ndimage.vectorized_filter(input, function, footprint=xp.ones((2, 2)),
|
2917
|
+
axes=axes)
|
2916
2918
|
|
2917
|
-
message = "`axes` must be
|
2919
|
+
message = "`axes` must be compatible with the dimensionality..."
|
2918
2920
|
with pytest.raises(ValueError, match=message):
|
2919
2921
|
ndimage.vectorized_filter(input, function, size=(1,))
|
2922
|
+
with pytest.raises(ValueError, match=message):
|
2923
|
+
ndimage.vectorized_filter(input, function, size=(2,), axes=(0,1))
|
2920
2924
|
|
2921
2925
|
message = "All elements of `origin` must be integers"
|
2922
2926
|
with pytest.raises(ValueError, match=message):
|
@@ -2986,6 +2990,35 @@ class TestVectorizedFilter:
|
|
2986
2990
|
ref = ndimage.vectorized_filter(input, function, size=21)
|
2987
2991
|
xp_assert_close(res, ref)
|
2988
2992
|
|
2993
|
+
def test_gh23046_feature(self, xp):
|
2994
|
+
# The intent of gh-23046 was to always allow `size` to be a scalar.
|
2995
|
+
rng = np.random.default_rng(45982734597824)
|
2996
|
+
img = xp.asarray(rng.random((5, 5)))
|
2997
|
+
|
2998
|
+
ref = ndimage.vectorized_filter(img, xp.mean, size=2)
|
2999
|
+
res = ndimage.vectorized_filter(img, xp.mean, size=2, axes=(0, 1))
|
3000
|
+
xp_assert_close(res, ref)
|
3001
|
+
|
3002
|
+
ref = ndimage.vectorized_filter(img, xp.mean, size=(2,), axes=(0,))
|
3003
|
+
res = ndimage.vectorized_filter(img, xp.mean, size=2, axes=0)
|
3004
|
+
xp_assert_close(res, ref)
|
3005
|
+
|
3006
|
+
def test_gh23046_fix(self, xp):
|
3007
|
+
# While investigating the feasibility of gh-23046, I noticed a bug when the
|
3008
|
+
# length of an `axes` tuple equals the dimensionality of the image.
|
3009
|
+
rng = np.random.default_rng(45982734597824)
|
3010
|
+
img = xp.asarray(rng.random((5, 5)))
|
3011
|
+
size = (2, 3)
|
3012
|
+
ref = ndimage.vectorized_filter(img.T, xp.mean, size=size).T
|
3013
|
+
res = ndimage.vectorized_filter(img, xp.mean, size=size, axes=(1, 0))
|
3014
|
+
xp_assert_close(res, ref)
|
3015
|
+
|
3016
|
+
ref = ndimage.vectorized_filter(img, xp.mean, size=size, mode='constant')
|
3017
|
+
res = ndimage.vectorized_filter(img, xp.mean, size=size[::-1], axes=(1, 0),
|
3018
|
+
mode='constant')
|
3019
|
+
xp_assert_close(res, ref)
|
3020
|
+
|
3021
|
+
|
2989
3022
|
|
2990
3023
|
@given(x=npst.arrays(dtype=np.float64,
|
2991
3024
|
shape=st.integers(min_value=1, max_value=1000)),
|
Binary file
|
Binary file
|
Binary file
|
@@ -239,7 +239,7 @@ class SparseMixin:
|
|
239
239
|
|
240
240
|
# Default lsmr arguments should not fully converge the solution
|
241
241
|
default_lsmr_sol = lsq_linear(A, b, lsq_solver='lsmr')
|
242
|
-
with pytest.raises(AssertionError
|
242
|
+
with pytest.raises(AssertionError):
|
243
243
|
assert_allclose(exact_sol.x, default_lsmr_sol.x)
|
244
244
|
|
245
245
|
# By increasing the maximum lsmr iters, it will converge
|
Binary file
|
Binary file
|
scipy/signal/_spectral_py.py
CHANGED
@@ -897,6 +897,15 @@ def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
|
|
897
897
|
if np.iscomplexobj(x) and return_onesided:
|
898
898
|
return_onesided = False
|
899
899
|
|
900
|
+
if x.shape[axis] < y.shape[axis]: # zero-pad x to shape of y:
|
901
|
+
z_shape = list(y.shape)
|
902
|
+
z_shape[axis] = y.shape[axis] - x.shape[axis]
|
903
|
+
x = np.concatenate((x, np.zeros(z_shape)), axis=axis)
|
904
|
+
elif y.shape[axis] < x.shape[axis]: # zero-pad y to shape of x:
|
905
|
+
z_shape = list(x.shape)
|
906
|
+
z_shape[axis] = x.shape[axis] - y.shape[axis]
|
907
|
+
y = np.concatenate((y, np.zeros(z_shape)), axis=axis)
|
908
|
+
|
900
909
|
# using cast() to make mypy happy:
|
901
910
|
fft_mode = cast(FFT_MODE_TYPE, 'onesided' if return_onesided else 'twosided')
|
902
911
|
if scaling not in (scales := {'spectrum': 'magnitude', 'density': 'psd'}):
|
Binary file
|
@@ -8,6 +8,7 @@ import pytest
|
|
8
8
|
from pytest import raises as assert_raises
|
9
9
|
|
10
10
|
from scipy import signal
|
11
|
+
from scipy._lib._array_api import xp_assert_close
|
11
12
|
from scipy.fft import fftfreq, rfftfreq, fft, irfft
|
12
13
|
from scipy.integrate import trapezoid
|
13
14
|
from scipy.signal import (periodogram, welch, lombscargle, coherence, csd,
|
@@ -591,6 +592,36 @@ class TestCSD:
|
|
591
592
|
assert_allclose(f, f1)
|
592
593
|
assert_allclose(c, c1)
|
593
594
|
|
595
|
+
def test_unequal_length_input_1D(self):
|
596
|
+
"""Test zero-padding for input `x.shape[axis] != y.shape[axis]` for 1d arrays.
|
597
|
+
|
598
|
+
This test ensures that issue 23036 is fixed.
|
599
|
+
"""
|
600
|
+
x = np.tile([4, 0, -4, 0], 4)
|
601
|
+
|
602
|
+
kw = dict(fs=len(x), window='boxcar', nperseg=4)
|
603
|
+
X0 = signal.csd(x, np.copy(x), **kw)[1] # `x is x` must be False
|
604
|
+
X1 = signal.csd(x, x[:8], **kw)[1]
|
605
|
+
X2 = signal.csd(x[:8], x, **kw)[1]
|
606
|
+
xp_assert_close(X1, X0 / 2)
|
607
|
+
xp_assert_close(X2, X0 / 2)
|
608
|
+
|
609
|
+
def test_unequal_length_input_3D(self):
|
610
|
+
"""Test zero-padding for input `x.shape[axis] != y.shape[axis]` for 3d arrays.
|
611
|
+
|
612
|
+
This test ensures that issue 23036 is fixed.
|
613
|
+
"""
|
614
|
+
n = 8
|
615
|
+
x = np.zeros(2 * 3 * n).reshape(2, n, 3)
|
616
|
+
x[:, 0, :] = n
|
617
|
+
|
618
|
+
kw = dict(fs=n, window='boxcar', nperseg=n, detrend=None, axis=1)
|
619
|
+
X0 = signal.csd(x, x.copy(), **kw)[1] # `x is x` must be False
|
620
|
+
X1 = signal.csd(x, x[:, :2, :], **kw)[1]
|
621
|
+
X2 = signal.csd(x[:, :2, :], x, **kw)[1]
|
622
|
+
xp_assert_close(X1, X0)
|
623
|
+
xp_assert_close(X2, X0)
|
624
|
+
|
594
625
|
def test_real_onesided_even(self):
|
595
626
|
x = np.zeros(16)
|
596
627
|
x[0] = 1
|
scipy/sparse/_base.py
CHANGED
@@ -658,7 +658,7 @@ class _spbase(SparseABC):
|
|
658
658
|
# eq and ne return True or False instead of an array when the shapes
|
659
659
|
# don't match. Numpy doesn't do this. Is this what we want?
|
660
660
|
if op in (operator.eq, operator.ne):
|
661
|
-
return op
|
661
|
+
return op is operator.ne
|
662
662
|
raise ValueError("inconsistent shape")
|
663
663
|
|
664
664
|
csr_self = (self if self.ndim < 3 else self.reshape(1, -1)).tocsr()
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
scipy/sparse/tests/test_base.py
CHANGED
@@ -433,6 +433,13 @@ class _TestCommon:
|
|
433
433
|
for dtype in self.checked_dtypes:
|
434
434
|
check(dtype)
|
435
435
|
|
436
|
+
def test_eq_ne_different_shapes(self):
|
437
|
+
if self.datsp.format not in ['bsr', 'csc', 'csr']:
|
438
|
+
pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
|
439
|
+
# Is this what we want? numpy raises when shape differs. we return False.
|
440
|
+
assert (self.datsp == self.datsp.T) is False
|
441
|
+
assert (self.datsp != self.datsp.T) is True
|
442
|
+
|
436
443
|
def test_lt(self):
|
437
444
|
sup = suppress_warnings()
|
438
445
|
sup.filter(SparseEfficiencyWarning)
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
@@ -2231,6 +2231,18 @@ def test_immutable_input(metric):
|
|
2231
2231
|
getattr(scipy.spatial.distance, metric)(x, x, w=x)
|
2232
2232
|
|
2233
2233
|
|
2234
|
+
def test_gh_23109():
|
2235
|
+
a = np.array([0, 0, 1, 1])
|
2236
|
+
b = np.array([0, 1, 1, 0])
|
2237
|
+
w = np.asarray([1.5, 1.2, 0.7, 1.3])
|
2238
|
+
expected = yule(a, b, w=w)
|
2239
|
+
assert_allclose(expected, 1.1954022988505748)
|
2240
|
+
actual = cdist(np.atleast_2d(a),
|
2241
|
+
np.atleast_2d(b),
|
2242
|
+
metric='yule', w=w)
|
2243
|
+
assert_allclose(actual, expected)
|
2244
|
+
|
2245
|
+
|
2234
2246
|
class TestJaccard:
|
2235
2247
|
|
2236
2248
|
def test_pdist_jaccard_random(self):
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
scipy/stats/_axis_nan_policy.py
CHANGED
@@ -403,17 +403,9 @@ def _axis_nan_policy_factory(tuple_to_result, default_axis=0,
|
|
403
403
|
override.update(temp)
|
404
404
|
|
405
405
|
if result_to_tuple is None:
|
406
|
-
def result_to_tuple(res):
|
406
|
+
def result_to_tuple(res, _):
|
407
407
|
return res
|
408
408
|
|
409
|
-
# The only `result_to_tuple` that needs the second argument (number of
|
410
|
-
# outputs) is the one for `moment`, and this was realized very late.
|
411
|
-
# Rather than changing all `result_to_tuple` definitions, we wrap them
|
412
|
-
# here to accept a second argument if they don't already.
|
413
|
-
if len(inspect.signature(result_to_tuple).parameters) == 1:
|
414
|
-
def result_to_tuple(res, _, f=result_to_tuple):
|
415
|
-
return f(res)
|
416
|
-
|
417
409
|
if not callable(too_small):
|
418
410
|
def is_too_small(samples, *ts_args, axis=-1, **ts_kwargs):
|
419
411
|
for sample in samples:
|
Binary file
|
scipy/stats/_correlation.py
CHANGED
scipy/stats/_entropy.py
CHANGED
@@ -20,7 +20,7 @@ __all__ = ['entropy', 'differential_entropy']
|
|
20
20
|
2 if ("qk" in kwgs and kwgs["qk"] is not None)
|
21
21
|
else 1
|
22
22
|
),
|
23
|
-
n_outputs=1, result_to_tuple=lambda x: (x,), paired=True,
|
23
|
+
n_outputs=1, result_to_tuple=lambda x, _: (x,), paired=True,
|
24
24
|
too_small=-1 # entropy doesn't have too small inputs
|
25
25
|
)
|
26
26
|
def entropy(pk: np.typing.ArrayLike,
|
@@ -170,7 +170,7 @@ def _differential_entropy_is_too_small(samples, kwargs, axis=-1):
|
|
170
170
|
|
171
171
|
|
172
172
|
@_axis_nan_policy_factory(
|
173
|
-
lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,),
|
173
|
+
lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,),
|
174
174
|
too_small=_differential_entropy_is_too_small
|
175
175
|
)
|
176
176
|
def differential_entropy(
|
scipy/stats/_hypotests.py
CHANGED
scipy/stats/_morestats.py
CHANGED
@@ -222,7 +222,7 @@ def mvsdist(data):
|
|
222
222
|
|
223
223
|
|
224
224
|
@_axis_nan_policy_factory(
|
225
|
-
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None
|
225
|
+
lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1, default_axis=None
|
226
226
|
)
|
227
227
|
def kstat(data, n=2, *, axis=None):
|
228
228
|
r"""
|
@@ -327,7 +327,7 @@ def kstat(data, n=2, *, axis=None):
|
|
327
327
|
|
328
328
|
|
329
329
|
@_axis_nan_policy_factory(
|
330
|
-
lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None
|
330
|
+
lambda x: x, result_to_tuple=lambda x, _: (x,), n_outputs=1, default_axis=None
|
331
331
|
)
|
332
332
|
def kstatvar(data, n=2, *, axis=None):
|
333
333
|
r"""Return an unbiased estimator of the variance of the k-statistic.
|
@@ -984,7 +984,7 @@ def boxcox_llf(lmb, data, *, axis=0, keepdims=False, nan_policy='propagate'):
|
|
984
984
|
|
985
985
|
|
986
986
|
@_axis_nan_policy_factory(lambda x: x, n_outputs=1, default_axis=0,
|
987
|
-
result_to_tuple=lambda x: (x,))
|
987
|
+
result_to_tuple=lambda x, _: (x,))
|
988
988
|
def _boxcox_llf(data, axis=0, *, lmb):
|
989
989
|
xp = array_namespace(data)
|
990
990
|
lmb, data = xp_promote(lmb, data, force_floating=True, xp=xp)
|
@@ -3496,7 +3496,7 @@ def mood(x, y, axis=0, alternative="two-sided"):
|
|
3496
3496
|
WilcoxonResult = _make_tuple_bunch('WilcoxonResult', ['statistic', 'pvalue'])
|
3497
3497
|
|
3498
3498
|
|
3499
|
-
def wilcoxon_result_unpacker(res):
|
3499
|
+
def wilcoxon_result_unpacker(res, _):
|
3500
3500
|
if hasattr(res, 'zstatistic'):
|
3501
3501
|
return res.statistic, res.pvalue, res.zstatistic
|
3502
3502
|
else:
|
@@ -3993,7 +3993,7 @@ def _circfuncs_common(samples, period, xp=None):
|
|
3993
3993
|
|
3994
3994
|
@_axis_nan_policy_factory(
|
3995
3995
|
lambda x: x, n_outputs=1, default_axis=None,
|
3996
|
-
result_to_tuple=lambda x: (x,)
|
3996
|
+
result_to_tuple=lambda x, _: (x,)
|
3997
3997
|
)
|
3998
3998
|
def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
|
3999
3999
|
r"""Compute the circular mean of a sample of angle observations.
|
@@ -4086,7 +4086,7 @@ def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
|
|
4086
4086
|
|
4087
4087
|
@_axis_nan_policy_factory(
|
4088
4088
|
lambda x: x, n_outputs=1, default_axis=None,
|
4089
|
-
result_to_tuple=lambda x: (x,)
|
4089
|
+
result_to_tuple=lambda x, _: (x,)
|
4090
4090
|
)
|
4091
4091
|
def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
|
4092
4092
|
r"""Compute the circular variance of a sample of angle observations.
|
@@ -4180,7 +4180,7 @@ def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
|
|
4180
4180
|
|
4181
4181
|
@_axis_nan_policy_factory(
|
4182
4182
|
lambda x: x, n_outputs=1, default_axis=None,
|
4183
|
-
result_to_tuple=lambda x: (x,)
|
4183
|
+
result_to_tuple=lambda x, _: (x,)
|
4184
4184
|
)
|
4185
4185
|
def circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate', *,
|
4186
4186
|
normalize=False):
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|