anndata 0.12.0rc4__py3-none-any.whl → 0.12.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- anndata/_core/aligned_df.py +1 -1
- anndata/_core/anndata.py +1 -1
- anndata/_core/merge.py +0 -6
- anndata/_core/sparse_dataset.py +5 -1
- anndata/_core/xarray.py +7 -4
- anndata/_io/h5ad.py +1 -1
- anndata/_io/read.py +5 -1
- anndata/_io/specs/lazy_methods.py +25 -15
- anndata/_io/specs/methods.py +18 -16
- anndata/_io/specs/registry.py +15 -4
- anndata/_io/zarr.py +0 -14
- anndata/_settings.py +5 -2
- anndata/_settings.pyi +49 -0
- anndata/compat/__init__.py +7 -7
- anndata/experimental/backed/_lazy_arrays.py +27 -5
- anndata/tests/helpers.py +8 -24
- anndata/utils.py +3 -4
- {anndata-0.12.0rc4.dist-info → anndata-0.12.1.dist-info}/METADATA +9 -10
- {anndata-0.12.0rc4.dist-info → anndata-0.12.1.dist-info}/RECORD +22 -21
- testing/anndata/_pytest.py +4 -0
- {anndata-0.12.0rc4.dist-info → anndata-0.12.1.dist-info}/WHEEL +0 -0
- {anndata-0.12.0rc4.dist-info → anndata-0.12.1.dist-info}/licenses/LICENSE +0 -0
anndata/_core/aligned_df.py
CHANGED
anndata/_core/anndata.py
CHANGED
anndata/_core/merge.py
CHANGED
|
@@ -904,12 +904,6 @@ def concat_arrays( # noqa: PLR0911, PLR0912
|
|
|
904
904
|
],
|
|
905
905
|
format="csr",
|
|
906
906
|
)
|
|
907
|
-
scipy_version = Version(scipy.__version__)
|
|
908
|
-
# Bug where xstack produces a matrix not an array in 1.11.*
|
|
909
|
-
if use_sparse_array and (scipy_version.major, scipy_version.minor) == (1, 11):
|
|
910
|
-
if mat.format == "csc":
|
|
911
|
-
return sparse.csc_array(mat)
|
|
912
|
-
return sparse.csr_array(mat)
|
|
913
907
|
return mat
|
|
914
908
|
else:
|
|
915
909
|
return np.concatenate(
|
anndata/_core/sparse_dataset.py
CHANGED
|
@@ -165,7 +165,11 @@ class BackedSparseMatrix(_cs_matrix):
|
|
|
165
165
|
def _get_contiguous_compressed_slice(
|
|
166
166
|
self, s: slice
|
|
167
167
|
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
|
|
168
|
-
new_indptr = self.indptr[s.start : s.stop + 1]
|
|
168
|
+
new_indptr = self.indptr[s.start : s.stop + 1]
|
|
169
|
+
# If indptr is cached, we need to make a copy of the subset
|
|
170
|
+
# so as not to alter the underlying cached data.
|
|
171
|
+
if isinstance(self.indptr, np.ndarray):
|
|
172
|
+
new_indptr = new_indptr.copy()
|
|
169
173
|
|
|
170
174
|
start = new_indptr[0]
|
|
171
175
|
stop = new_indptr[-1]
|
anndata/_core/xarray.py
CHANGED
|
@@ -245,7 +245,7 @@ class Dataset2D:
|
|
|
245
245
|
if df.index.name != index_key and index_key is not None:
|
|
246
246
|
df = df.set_index(index_key)
|
|
247
247
|
for col in set(self.columns) - non_nullable_string_cols:
|
|
248
|
-
df[col] =
|
|
248
|
+
df[col] = df[col].astype(dtype="string")
|
|
249
249
|
df.index.name = None # matches old AnnData object
|
|
250
250
|
return df
|
|
251
251
|
|
|
@@ -389,9 +389,12 @@ class Dataset2D:
|
|
|
389
389
|
}
|
|
390
390
|
el = self.ds.drop_vars(extension_arrays.keys())
|
|
391
391
|
el = el.reindex({index_dim: index}, method=None, fill_value=fill_value)
|
|
392
|
-
for col in
|
|
393
|
-
el[col] =
|
|
394
|
-
|
|
392
|
+
for col, data in extension_arrays.items():
|
|
393
|
+
el[col] = XDataArray.from_series(
|
|
394
|
+
pd.Series(data.data, index=self.index).reindex(
|
|
395
|
+
index.rename(self.index.name) if index is not None else index,
|
|
396
|
+
fill_value=fill_value,
|
|
397
|
+
)
|
|
395
398
|
)
|
|
396
399
|
return Dataset2D(el)
|
|
397
400
|
|
anndata/_io/h5ad.py
CHANGED
|
@@ -176,7 +176,7 @@ def read_h5ad_backed(
|
|
|
176
176
|
|
|
177
177
|
def read_h5ad(
|
|
178
178
|
filename: PathLike[str] | str,
|
|
179
|
-
backed: Literal["r", "r+"] | bool | None = None,
|
|
179
|
+
backed: Literal["r", "r+"] | bool | None = None, # noqa: FBT001
|
|
180
180
|
*,
|
|
181
181
|
as_sparse: Sequence[str] = (),
|
|
182
182
|
as_sparse_fmt: type[CSMatrix] = sparse.csr_matrix,
|
anndata/_io/read.py
CHANGED
|
@@ -22,9 +22,11 @@ if TYPE_CHECKING:
|
|
|
22
22
|
from collections.abc import Generator, Iterable, Iterator, Mapping
|
|
23
23
|
|
|
24
24
|
|
|
25
|
+
@old_positionals("first_column_names", "dtype")
|
|
25
26
|
def read_csv(
|
|
26
27
|
filename: PathLike[str] | str | Iterator[str],
|
|
27
28
|
delimiter: str | None = ",",
|
|
29
|
+
*,
|
|
28
30
|
first_column_names: bool | None = None,
|
|
29
31
|
dtype: str = "float32",
|
|
30
32
|
) -> AnnData:
|
|
@@ -331,9 +333,11 @@ def read_mtx(filename: PathLike[str] | str, dtype: str = "float32") -> AnnData:
|
|
|
331
333
|
return AnnData(X)
|
|
332
334
|
|
|
333
335
|
|
|
336
|
+
@old_positionals("first_column_names", "dtype")
|
|
334
337
|
def read_text(
|
|
335
338
|
filename: PathLike[str] | str | Iterator[str],
|
|
336
339
|
delimiter: str | None = None,
|
|
340
|
+
*,
|
|
337
341
|
first_column_names: bool | None = None,
|
|
338
342
|
dtype: str = "float32",
|
|
339
343
|
) -> AnnData:
|
|
@@ -381,7 +385,7 @@ def _iter_lines(file_like: Iterable[str]) -> Generator[str, None, None]:
|
|
|
381
385
|
def _read_text( # noqa: PLR0912, PLR0915
|
|
382
386
|
f: Iterator[str],
|
|
383
387
|
delimiter: str | None,
|
|
384
|
-
first_column_names: bool | None,
|
|
388
|
+
first_column_names: bool | None, # noqa: FBT001
|
|
385
389
|
dtype: str,
|
|
386
390
|
) -> AnnData:
|
|
387
391
|
comments = []
|
|
@@ -132,7 +132,7 @@ def read_sparse_as_dask(
|
|
|
132
132
|
path_or_sparse_dataset = (
|
|
133
133
|
Path(filename(elem))
|
|
134
134
|
if isinstance(elem, H5Group)
|
|
135
|
-
else ad.io.sparse_dataset(elem)
|
|
135
|
+
else ad.io.sparse_dataset(elem, should_cache_indptr=False)
|
|
136
136
|
)
|
|
137
137
|
elem_name = get_elem_name(elem)
|
|
138
138
|
shape: tuple[int, int] = tuple(elem.attrs["shape"])
|
|
@@ -177,21 +177,37 @@ def read_sparse_as_dask(
|
|
|
177
177
|
return da_mtx
|
|
178
178
|
|
|
179
179
|
|
|
180
|
+
def resolve_chunks(
|
|
181
|
+
elem: H5Array | ZarrArray,
|
|
182
|
+
chunks_arg: tuple[int, ...] | None,
|
|
183
|
+
shape: tuple[int, ...],
|
|
184
|
+
) -> tuple[int, ...]:
|
|
185
|
+
shape = tuple(elem.shape)
|
|
186
|
+
if chunks_arg is not None:
|
|
187
|
+
# None and -1 on a given axis indicate that one should use the shape
|
|
188
|
+
# in `dask`'s semantics.
|
|
189
|
+
return tuple(
|
|
190
|
+
c if c not in {None, -1} else s
|
|
191
|
+
for c, s in zip(chunks_arg, shape, strict=True)
|
|
192
|
+
)
|
|
193
|
+
elif elem.chunks is None: # h5 unchunked
|
|
194
|
+
return tuple(min(_DEFAULT_STRIDE, s) for s in shape)
|
|
195
|
+
return elem.chunks
|
|
196
|
+
|
|
197
|
+
|
|
180
198
|
@_LAZY_REGISTRY.register_read(H5Array, IOSpec("string-array", "0.2.0"))
|
|
181
199
|
def read_h5_string_array(
|
|
182
200
|
elem: H5Array,
|
|
183
201
|
*,
|
|
184
202
|
_reader: LazyReader,
|
|
185
|
-
chunks: tuple[int
|
|
203
|
+
chunks: tuple[int] | None = None,
|
|
186
204
|
) -> DaskArray:
|
|
187
205
|
import dask.array as da
|
|
188
206
|
|
|
189
207
|
from anndata._io.h5ad import read_dataset
|
|
190
208
|
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
chunks=chunks if chunks is not None else (_DEFAULT_STRIDE,) * len(elem.shape),
|
|
194
|
-
)
|
|
209
|
+
chunks = resolve_chunks(elem, chunks, tuple(elem.shape))
|
|
210
|
+
return da.from_array(read_dataset(elem), chunks=chunks)
|
|
195
211
|
|
|
196
212
|
|
|
197
213
|
@_LAZY_REGISTRY.register_read(H5Array, IOSpec("array", "0.2.0"))
|
|
@@ -204,13 +220,7 @@ def read_h5_array(
|
|
|
204
220
|
elem_name: str = elem.name
|
|
205
221
|
shape = tuple(elem.shape)
|
|
206
222
|
dtype = elem.dtype
|
|
207
|
-
chunks = (
|
|
208
|
-
tuple(
|
|
209
|
-
c if c not in {None, -1} else s for c, s in zip(chunks, shape, strict=True)
|
|
210
|
-
)
|
|
211
|
-
if chunks is not None
|
|
212
|
-
else tuple(min(_DEFAULT_STRIDE, s) for s in shape)
|
|
213
|
-
)
|
|
223
|
+
chunks = resolve_chunks(elem, chunks, shape)
|
|
214
224
|
|
|
215
225
|
chunk_layout = tuple(
|
|
216
226
|
compute_chunk_layout_for_axis_size(chunks[i], shape[i])
|
|
@@ -228,7 +238,6 @@ def read_h5_array(
|
|
|
228
238
|
def read_zarr_array(
|
|
229
239
|
elem: ZarrArray, *, _reader: LazyReader, chunks: tuple[int, ...] | None = None
|
|
230
240
|
) -> DaskArray:
|
|
231
|
-
chunks: tuple[int, ...] = chunks if chunks is not None else elem.chunks
|
|
232
241
|
import dask.array as da
|
|
233
242
|
|
|
234
243
|
return da.from_zarr(elem, chunks=chunks)
|
|
@@ -284,9 +293,10 @@ def read_dataframe(
|
|
|
284
293
|
*,
|
|
285
294
|
_reader: LazyReader,
|
|
286
295
|
use_range_index: bool = False,
|
|
296
|
+
chunks: tuple[int] | None = None,
|
|
287
297
|
) -> Dataset2D:
|
|
288
298
|
elem_dict = {
|
|
289
|
-
k: _reader.read_elem(elem[k])
|
|
299
|
+
k: _reader.read_elem(elem[k], chunks=chunks)
|
|
290
300
|
for k in [*elem.attrs["column-order"], elem.attrs["_index"]]
|
|
291
301
|
}
|
|
292
302
|
# If we use a range index, the coord axis needs to have the special dim name
|
anndata/_io/specs/methods.py
CHANGED
|
@@ -24,6 +24,7 @@ from anndata._core.sparse_dataset import _CSCDataset, _CSRDataset, sparse_datase
|
|
|
24
24
|
from anndata._io.utils import H5PY_V3, check_key, zero_dim_array_as_scalar
|
|
25
25
|
from anndata._warnings import OldFormatWarning
|
|
26
26
|
from anndata.compat import (
|
|
27
|
+
NULLABLE_NUMPY_STRING_TYPE,
|
|
27
28
|
AwkArray,
|
|
28
29
|
CupyArray,
|
|
29
30
|
CupyCSCMatrix,
|
|
@@ -431,7 +432,7 @@ def write_basic(
|
|
|
431
432
|
dataset_kwargs = zarr_v3_compressor_compat(dataset_kwargs)
|
|
432
433
|
f.create_array(k, shape=elem.shape, dtype=dtype, **dataset_kwargs)
|
|
433
434
|
# see https://github.com/zarr-developers/zarr-python/discussions/2712
|
|
434
|
-
if isinstance(elem, ZarrArray):
|
|
435
|
+
if isinstance(elem, ZarrArray | H5Array):
|
|
435
436
|
f[k][...] = elem[...]
|
|
436
437
|
else:
|
|
437
438
|
f[k][...] = elem
|
|
@@ -622,24 +623,20 @@ def write_vlen_string_array_zarr(
|
|
|
622
623
|
f[k][:] = elem
|
|
623
624
|
else:
|
|
624
625
|
from numcodecs import VLenUTF8
|
|
626
|
+
from zarr.core.dtype import VariableLengthUTF8
|
|
625
627
|
|
|
626
628
|
dataset_kwargs = dataset_kwargs.copy()
|
|
627
629
|
dataset_kwargs = zarr_v3_compressor_compat(dataset_kwargs)
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
case 2, _:
|
|
633
|
-
filters, dtype = [VLenUTF8()], object
|
|
634
|
-
case 3, True:
|
|
635
|
-
filters, dtype = None, np.dtypes.StringDType()
|
|
636
|
-
case 3, False:
|
|
637
|
-
filters, dtype = None, np.dtypes.ObjectDType()
|
|
630
|
+
dtype = VariableLengthUTF8()
|
|
631
|
+
filters, fill_value = None, None
|
|
632
|
+
if ad.settings.zarr_write_format == 2:
|
|
633
|
+
filters, fill_value = [VLenUTF8()], ""
|
|
638
634
|
f.create_array(
|
|
639
635
|
k,
|
|
640
636
|
shape=elem.shape,
|
|
641
637
|
dtype=dtype,
|
|
642
638
|
filters=filters,
|
|
639
|
+
fill_value=fill_value,
|
|
643
640
|
**dataset_kwargs,
|
|
644
641
|
)
|
|
645
642
|
f[k][:] = elem
|
|
@@ -1041,7 +1038,7 @@ def read_dataframe_partial(
|
|
|
1041
1038
|
df = pd.DataFrame(
|
|
1042
1039
|
{k: read_elem_partial(elem[k], indices=indices[0]) for k in columns},
|
|
1043
1040
|
index=read_elem_partial(elem[idx_key], indices=indices[0]),
|
|
1044
|
-
columns=columns if
|
|
1041
|
+
columns=columns if columns else None,
|
|
1045
1042
|
)
|
|
1046
1043
|
if idx_key != "_index":
|
|
1047
1044
|
df.index.name = idx_key
|
|
@@ -1210,7 +1207,10 @@ def _string_array(
|
|
|
1210
1207
|
values: np.ndarray, mask: np.ndarray
|
|
1211
1208
|
) -> pd.api.extensions.ExtensionArray:
|
|
1212
1209
|
"""Construct a string array from values and mask."""
|
|
1213
|
-
arr = pd.array(
|
|
1210
|
+
arr = pd.array(
|
|
1211
|
+
values.astype(NULLABLE_NUMPY_STRING_TYPE),
|
|
1212
|
+
dtype=pd.StringDtype(),
|
|
1213
|
+
)
|
|
1214
1214
|
arr[mask] = pd.NA
|
|
1215
1215
|
return arr
|
|
1216
1216
|
|
|
@@ -1281,19 +1281,21 @@ def write_scalar_zarr(
|
|
|
1281
1281
|
return f.create_dataset(key, data=np.array(value), shape=(), **dataset_kwargs)
|
|
1282
1282
|
else:
|
|
1283
1283
|
from numcodecs import VLenUTF8
|
|
1284
|
+
from zarr.core.dtype import VariableLengthUTF8
|
|
1284
1285
|
|
|
1285
1286
|
match ad.settings.zarr_write_format, value:
|
|
1286
1287
|
case 2, str():
|
|
1287
|
-
filters, dtype = [VLenUTF8()],
|
|
1288
|
+
filters, dtype, fill_value = [VLenUTF8()], VariableLengthUTF8(), ""
|
|
1288
1289
|
case 3, str():
|
|
1289
|
-
filters, dtype = None,
|
|
1290
|
+
filters, dtype, fill_value = None, VariableLengthUTF8(), None
|
|
1290
1291
|
case _, _:
|
|
1291
|
-
filters, dtype = None, np.array(value).dtype
|
|
1292
|
+
filters, dtype, fill_value = None, np.array(value).dtype, None
|
|
1292
1293
|
a = f.create_array(
|
|
1293
1294
|
key,
|
|
1294
1295
|
shape=(),
|
|
1295
1296
|
dtype=dtype,
|
|
1296
1297
|
filters=filters,
|
|
1298
|
+
fill_value=fill_value,
|
|
1297
1299
|
**dataset_kwargs,
|
|
1298
1300
|
)
|
|
1299
1301
|
a[...] = np.array(value)
|
anndata/_io/specs/registry.py
CHANGED
|
@@ -360,11 +360,22 @@ class Writer:
|
|
|
360
360
|
dest_type = type(store)
|
|
361
361
|
|
|
362
362
|
# Normalize k to absolute path
|
|
363
|
-
if (
|
|
364
|
-
|
|
365
|
-
|
|
363
|
+
if (
|
|
364
|
+
is_zarr_v2_store := (
|
|
365
|
+
(is_zarr_store := isinstance(store, ZarrGroup)) and is_zarr_v2()
|
|
366
|
+
)
|
|
367
|
+
) or (isinstance(store, h5py.Group) and not PurePosixPath(k).is_absolute()):
|
|
366
368
|
k = str(PurePosixPath(store.name) / k)
|
|
367
|
-
|
|
369
|
+
is_consolidated = False
|
|
370
|
+
if is_zarr_v2_store:
|
|
371
|
+
from zarr.storage import ConsolidatedMetadataStore
|
|
372
|
+
|
|
373
|
+
is_consolidated = isinstance(store.store, ConsolidatedMetadataStore)
|
|
374
|
+
elif is_zarr_store:
|
|
375
|
+
is_consolidated = store.metadata.consolidated_metadata is not None
|
|
376
|
+
if is_consolidated:
|
|
377
|
+
msg = "Cannot overwrite/edit a store with consolidated metadata"
|
|
378
|
+
raise ValueError(msg)
|
|
368
379
|
if k == "/":
|
|
369
380
|
if isinstance(store, ZarrGroup) and not is_zarr_v2():
|
|
370
381
|
from zarr.core.sync import sync
|
anndata/_io/zarr.py
CHANGED
|
@@ -27,19 +27,6 @@ if TYPE_CHECKING:
|
|
|
27
27
|
T = TypeVar("T")
|
|
28
28
|
|
|
29
29
|
|
|
30
|
-
def _check_rec_array(adata: AnnData) -> None:
|
|
31
|
-
if settings.zarr_write_format == 3 and (
|
|
32
|
-
structured_dtype_keys := {
|
|
33
|
-
k
|
|
34
|
-
for k, v in adata.uns.items()
|
|
35
|
-
if isinstance(v, np.recarray)
|
|
36
|
-
or (isinstance(v, np.ndarray) and v.dtype.fields)
|
|
37
|
-
}
|
|
38
|
-
):
|
|
39
|
-
msg = f"zarr v3 does not support structured dtypes. Found keys {structured_dtype_keys}"
|
|
40
|
-
raise NotImplementedError(msg)
|
|
41
|
-
|
|
42
|
-
|
|
43
30
|
@no_write_dataset_2d
|
|
44
31
|
def write_zarr(
|
|
45
32
|
store: StoreLike,
|
|
@@ -50,7 +37,6 @@ def write_zarr(
|
|
|
50
37
|
**ds_kwargs,
|
|
51
38
|
) -> None:
|
|
52
39
|
"""See :meth:`~anndata.AnnData.write_zarr`."""
|
|
53
|
-
_check_rec_array(adata)
|
|
54
40
|
if isinstance(store, Path):
|
|
55
41
|
store = str(store)
|
|
56
42
|
if convert_strings_to_categoricals:
|
anndata/_settings.py
CHANGED
|
@@ -13,7 +13,7 @@ from inspect import Parameter, signature
|
|
|
13
13
|
from types import GenericAlias
|
|
14
14
|
from typing import TYPE_CHECKING, Generic, NamedTuple, TypeVar, cast
|
|
15
15
|
|
|
16
|
-
from .compat import old_positionals
|
|
16
|
+
from .compat import is_zarr_v2, old_positionals
|
|
17
17
|
|
|
18
18
|
if TYPE_CHECKING:
|
|
19
19
|
from collections.abc import Callable, Sequence
|
|
@@ -333,7 +333,7 @@ class SettingsManager:
|
|
|
333
333
|
raise AttributeError(msg)
|
|
334
334
|
|
|
335
335
|
def __dir__(self) -> Iterable[str]:
|
|
336
|
-
return sorted((*
|
|
336
|
+
return sorted((*super().__dir__(), *self._config.keys()))
|
|
337
337
|
|
|
338
338
|
def reset(self, option: Iterable[str] | str) -> None:
|
|
339
339
|
"""
|
|
@@ -439,6 +439,9 @@ def validate_zarr_write_format(format: int):
|
|
|
439
439
|
if format not in {2, 3}:
|
|
440
440
|
msg = "non-v2 zarr on-disk format not supported"
|
|
441
441
|
raise ValueError(msg)
|
|
442
|
+
if format == 3 and is_zarr_v2():
|
|
443
|
+
msg = "Cannot write v3 format against v2 package"
|
|
444
|
+
raise ValueError(msg)
|
|
442
445
|
|
|
443
446
|
|
|
444
447
|
settings.register(
|
anndata/_settings.pyi
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from collections.abc import Callable as Callable
|
|
2
|
+
from collections.abc import Generator, Iterable
|
|
3
|
+
from contextlib import contextmanager
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import Literal, TypeVar
|
|
6
|
+
|
|
7
|
+
_T = TypeVar("_T")
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class SettingsManager:
|
|
11
|
+
__doc_tmpl__: str = ...
|
|
12
|
+
def describe(
|
|
13
|
+
self,
|
|
14
|
+
option: str | Iterable[str] | None = None,
|
|
15
|
+
*,
|
|
16
|
+
should_print_description: bool = True,
|
|
17
|
+
as_rst: bool = False,
|
|
18
|
+
) -> str: ...
|
|
19
|
+
def deprecate(
|
|
20
|
+
self, option: str, removal_version: str, message: str | None = None
|
|
21
|
+
) -> None: ...
|
|
22
|
+
def register(
|
|
23
|
+
self,
|
|
24
|
+
option: str,
|
|
25
|
+
*,
|
|
26
|
+
default_value: _T,
|
|
27
|
+
description: str,
|
|
28
|
+
validate: Callable[[_T], None],
|
|
29
|
+
option_type: object | None = None,
|
|
30
|
+
get_from_env: Callable[[str, _T], _T] = ...,
|
|
31
|
+
) -> None: ...
|
|
32
|
+
def __setattr__(self, option: str, val: object) -> None: ...
|
|
33
|
+
def __getattr__(self, option: str) -> object: ...
|
|
34
|
+
def __dir__(self) -> Iterable[str]: ...
|
|
35
|
+
def reset(self, option: Iterable[str] | str) -> None: ...
|
|
36
|
+
@contextmanager
|
|
37
|
+
def override(self, **overrides) -> Generator[None]: ...
|
|
38
|
+
@property
|
|
39
|
+
def __doc__(self): ...
|
|
40
|
+
|
|
41
|
+
class _AnnDataSettingsManager(SettingsManager):
|
|
42
|
+
remove_unused_categories: bool = True
|
|
43
|
+
check_uniqueness: bool = True
|
|
44
|
+
allow_write_nullable_strings: bool = False
|
|
45
|
+
zarr_write_format: Literal[2, 3] = 2
|
|
46
|
+
use_sparse_array_on_read: bool = False
|
|
47
|
+
min_rows_for_chunked_h5_copy: int = 1000
|
|
48
|
+
|
|
49
|
+
settings: _AnnDataSettingsManager
|
anndata/compat/__init__.py
CHANGED
|
@@ -190,6 +190,13 @@ else:
|
|
|
190
190
|
#############################
|
|
191
191
|
|
|
192
192
|
|
|
193
|
+
NULLABLE_NUMPY_STRING_TYPE = (
|
|
194
|
+
np.dtype("O")
|
|
195
|
+
if Version(np.__version__) < Version("2")
|
|
196
|
+
else np.dtypes.StringDType(na_object=pd.NA)
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
|
|
193
200
|
@singledispatch
|
|
194
201
|
def _read_attr(attrs: Mapping, name: str, default: Any | None = Empty):
|
|
195
202
|
if default is Empty:
|
|
@@ -404,10 +411,3 @@ def _map_cat_to_str(cat: pd.Categorical) -> pd.Categorical:
|
|
|
404
411
|
return cat.map(str, na_action="ignore")
|
|
405
412
|
else:
|
|
406
413
|
return cat.map(str)
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
NULLABLE_NUMPY_STRING_TYPE = (
|
|
410
|
-
np.dtype("O")
|
|
411
|
-
if Version(np.__version__) < Version("2")
|
|
412
|
-
else np.dtypes.StringDType(na_object=pd.NA)
|
|
413
|
-
)
|
|
@@ -3,19 +3,21 @@ from __future__ import annotations
|
|
|
3
3
|
from functools import cached_property
|
|
4
4
|
from typing import TYPE_CHECKING, Generic, TypeVar
|
|
5
5
|
|
|
6
|
+
import numpy as np
|
|
6
7
|
import pandas as pd
|
|
7
8
|
|
|
8
9
|
from anndata._core.index import _subset
|
|
9
10
|
from anndata._core.views import as_view
|
|
10
11
|
from anndata._io.specs.lazy_methods import get_chunksize
|
|
11
|
-
from anndata.compat import H5Array, ZarrArray
|
|
12
12
|
|
|
13
13
|
from ..._settings import settings
|
|
14
14
|
from ...compat import (
|
|
15
15
|
NULLABLE_NUMPY_STRING_TYPE,
|
|
16
|
+
H5Array,
|
|
16
17
|
XBackendArray,
|
|
17
18
|
XDataArray,
|
|
18
19
|
XZarrArrayWrapper,
|
|
20
|
+
ZarrArray,
|
|
19
21
|
)
|
|
20
22
|
from ...compat import xarray as xr
|
|
21
23
|
|
|
@@ -23,8 +25,6 @@ if TYPE_CHECKING:
|
|
|
23
25
|
from pathlib import Path
|
|
24
26
|
from typing import Literal
|
|
25
27
|
|
|
26
|
-
import numpy as np
|
|
27
|
-
|
|
28
28
|
from anndata._core.index import Index
|
|
29
29
|
from anndata.compat import ZarrGroup
|
|
30
30
|
|
|
@@ -45,12 +45,34 @@ class ZarrOrHDF5Wrapper(XZarrArrayWrapper, Generic[K]):
|
|
|
45
45
|
def __getitem__(self, key: xr.core.indexing.ExplicitIndexer):
|
|
46
46
|
if isinstance(self._array, ZarrArray):
|
|
47
47
|
return super().__getitem__(key)
|
|
48
|
-
|
|
48
|
+
res = xr.core.indexing.explicit_indexing_adapter(
|
|
49
49
|
key,
|
|
50
50
|
self.shape,
|
|
51
51
|
xr.core.indexing.IndexingSupport.OUTER_1VECTOR,
|
|
52
|
-
|
|
52
|
+
self._getitem,
|
|
53
53
|
)
|
|
54
|
+
return res
|
|
55
|
+
|
|
56
|
+
def _getitem(self, key: tuple[int | np.integer | slice | np.ndarray]):
|
|
57
|
+
if not isinstance(key, tuple):
|
|
58
|
+
msg = f"`xr.core.indexing.explicit_indexing_adapter` should have produced a tuple, got {type(key)} instead"
|
|
59
|
+
raise ValueError(msg)
|
|
60
|
+
if (n_key_dims := len(key)) != 1:
|
|
61
|
+
msg = f"Backed arrays currently only supported in 1d, got {n_key_dims} dims"
|
|
62
|
+
raise ValueError(msg)
|
|
63
|
+
key = key[0]
|
|
64
|
+
# See https://github.com/h5py/h5py/issues/293 for why we need to convert.
|
|
65
|
+
# See https://github.com/pydata/xarray/blob/fa03b5b4ae95a366f6de5b60f5cc4eb801cd51ec/xarray/core/indexing.py#L1259-L1263
|
|
66
|
+
# for why we can expect sorted/deduped indexers (which are needed for hdf5).
|
|
67
|
+
if (
|
|
68
|
+
isinstance(key, np.ndarray)
|
|
69
|
+
and np.issubdtype(key.dtype, np.integer)
|
|
70
|
+
and isinstance(self._array, H5Array)
|
|
71
|
+
):
|
|
72
|
+
key_mask = np.zeros(self._array.shape).astype("bool")
|
|
73
|
+
key_mask[key] = True
|
|
74
|
+
return self._array[key_mask]
|
|
75
|
+
return self._array[key]
|
|
54
76
|
|
|
55
77
|
|
|
56
78
|
class CategoricalArray(XBackendArray, Generic[K]):
|
anndata/tests/helpers.py
CHANGED
|
@@ -17,7 +17,6 @@ import pytest
|
|
|
17
17
|
from pandas.api.types import is_numeric_dtype
|
|
18
18
|
from scipy import sparse
|
|
19
19
|
|
|
20
|
-
import anndata
|
|
21
20
|
from anndata import AnnData, ExperimentalFeatureWarning, Raw
|
|
22
21
|
from anndata._core.aligned_mapping import AlignedMappingBase
|
|
23
22
|
from anndata._core.sparse_dataset import BaseCompressedSparseDataset
|
|
@@ -413,10 +412,6 @@ def gen_adata( # noqa: PLR0913
|
|
|
413
412
|
awkward_ragged=gen_awkward((12, None, None)),
|
|
414
413
|
# U_recarray=gen_vstr_recarray(N, 5, "U4")
|
|
415
414
|
)
|
|
416
|
-
# https://github.com/zarr-developers/zarr-python/issues/2134
|
|
417
|
-
# zarr v3 on-disk does not write structured dtypes
|
|
418
|
-
if anndata.settings.zarr_write_format == 3:
|
|
419
|
-
del uns["O_recarray"]
|
|
420
415
|
with warnings.catch_warnings():
|
|
421
416
|
warnings.simplefilter("ignore", ExperimentalFeatureWarning)
|
|
422
417
|
adata = AnnData(
|
|
@@ -1088,11 +1083,17 @@ class AccessTrackingStoreBase(LocalStore):
|
|
|
1088
1083
|
_accessed_keys: defaultdict[str, list[str]]
|
|
1089
1084
|
|
|
1090
1085
|
def __init__(self, *args, **kwargs):
|
|
1086
|
+
# Needed for zarr v3 to prevent a read-only copy being made
|
|
1087
|
+
# https://github.com/zarr-developers/zarr-python/pull/3156
|
|
1088
|
+
if not is_zarr_v2() and "read_only" not in kwargs:
|
|
1089
|
+
kwargs["read_only"] = True
|
|
1091
1090
|
super().__init__(*args, **kwargs)
|
|
1092
1091
|
self._access_count = Counter()
|
|
1093
1092
|
self._accessed = defaultdict(set)
|
|
1094
1093
|
self._accessed_keys = defaultdict(list)
|
|
1095
1094
|
|
|
1095
|
+
self._read_only = True
|
|
1096
|
+
|
|
1096
1097
|
def _check_and_track_key(self, key: str):
|
|
1097
1098
|
for tracked in self._access_count:
|
|
1098
1099
|
if tracked in key:
|
|
@@ -1147,26 +1148,9 @@ if is_zarr_v2():
|
|
|
1147
1148
|
else:
|
|
1148
1149
|
|
|
1149
1150
|
class AccessTrackingStore(AccessTrackingStoreBase):
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
key: str,
|
|
1153
|
-
prototype: BufferPrototype | None = None,
|
|
1154
|
-
byte_range: ByteRequest | None = None,
|
|
1155
|
-
) -> object:
|
|
1156
|
-
self._check_and_track_key(key)
|
|
1157
|
-
return await super().get(key, prototype=prototype, byte_range=byte_range)
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
if is_zarr_v2():
|
|
1161
|
-
|
|
1162
|
-
class AccessTrackingStore(AccessTrackingStoreBase):
|
|
1163
|
-
def __getitem__(self, key: str) -> bytes:
|
|
1164
|
-
self._check_and_track_key(key)
|
|
1165
|
-
return super().__getitem__(key)
|
|
1151
|
+
def __init__(self, *args, **kwargs):
|
|
1152
|
+
super().__init__(*args, **kwargs, read_only=True)
|
|
1166
1153
|
|
|
1167
|
-
else:
|
|
1168
|
-
|
|
1169
|
-
class AccessTrackingStore(AccessTrackingStoreBase):
|
|
1170
1154
|
async def get(
|
|
1171
1155
|
self,
|
|
1172
1156
|
key: str,
|
anndata/utils.py
CHANGED
|
@@ -266,10 +266,9 @@ def make_index_unique(index: pd.Index, join: str = "-"):
|
|
|
266
266
|
|
|
267
267
|
if issue_interpretation_warning:
|
|
268
268
|
msg = (
|
|
269
|
-
f"Suffix used ({join}[0-9]+) to deduplicate index values may make index "
|
|
270
|
-
"
|
|
271
|
-
"
|
|
272
|
-
"`join={delimiter}`"
|
|
269
|
+
f"Suffix used ({join}[0-9]+) to deduplicate index values may make index values difficult to interpret. "
|
|
270
|
+
"There values with a similar suffixes in the index. "
|
|
271
|
+
"Consider using a different delimiter by passing `join={delimiter}`. "
|
|
273
272
|
"Example key collisions generated by the make_index_unique algorithm: "
|
|
274
273
|
f"{example_colliding_values}"
|
|
275
274
|
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: anndata
|
|
3
|
-
Version: 0.12.
|
|
3
|
+
Version: 0.12.1
|
|
4
4
|
Summary: Annotated data.
|
|
5
5
|
Project-URL: Documentation, https://anndata.readthedocs.io/
|
|
6
6
|
Project-URL: Source, https://github.com/scverse/anndata
|
|
@@ -28,11 +28,11 @@ Requires-Dist: array-api-compat>=1.7.1
|
|
|
28
28
|
Requires-Dist: h5py>=3.8
|
|
29
29
|
Requires-Dist: legacy-api-wrap
|
|
30
30
|
Requires-Dist: natsort
|
|
31
|
-
Requires-Dist: numpy>=1.
|
|
31
|
+
Requires-Dist: numpy>=1.26
|
|
32
32
|
Requires-Dist: packaging>=24.2
|
|
33
|
-
Requires-Dist: pandas!=2.1.
|
|
34
|
-
Requires-Dist: scipy>=1.
|
|
35
|
-
Requires-Dist: zarr!=3.0
|
|
33
|
+
Requires-Dist: pandas!=2.1.2,>=2.1.0
|
|
34
|
+
Requires-Dist: scipy>=1.12
|
|
35
|
+
Requires-Dist: zarr!=3.0.*,>=2.18.7
|
|
36
36
|
Provides-Extra: cu11
|
|
37
37
|
Requires-Dist: cupy-cuda11x; extra == 'cu11'
|
|
38
38
|
Provides-Extra: cu12
|
|
@@ -74,13 +74,12 @@ Requires-Dist: boltons; extra == 'test'
|
|
|
74
74
|
Requires-Dist: dask[array]!=2024.8.*,!=2024.9.*,<2025.2.0,>=2023.5.1; extra == 'test'
|
|
75
75
|
Requires-Dist: dask[distributed]; extra == 'test'
|
|
76
76
|
Requires-Dist: filelock; extra == 'test'
|
|
77
|
-
Requires-Dist: httpx; extra == 'test'
|
|
77
|
+
Requires-Dist: httpx<1.0; extra == 'test'
|
|
78
78
|
Requires-Dist: joblib; extra == 'test'
|
|
79
79
|
Requires-Dist: loompy>=3.0.5; extra == 'test'
|
|
80
80
|
Requires-Dist: matplotlib; extra == 'test'
|
|
81
81
|
Requires-Dist: openpyxl; extra == 'test'
|
|
82
|
-
Requires-Dist:
|
|
83
|
-
Requires-Dist: pyarrow; extra == 'test'
|
|
82
|
+
Requires-Dist: pyarrow<21; extra == 'test'
|
|
84
83
|
Requires-Dist: pytest-cov; extra == 'test'
|
|
85
84
|
Requires-Dist: pytest-memray; extra == 'test'
|
|
86
85
|
Requires-Dist: pytest-mock; extra == 'test'
|
|
@@ -97,12 +96,12 @@ Requires-Dist: boltons; extra == 'test-min'
|
|
|
97
96
|
Requires-Dist: dask[array]!=2024.8.*,!=2024.9.*,<2025.2.0,>=2023.5.1; extra == 'test-min'
|
|
98
97
|
Requires-Dist: dask[distributed]; extra == 'test-min'
|
|
99
98
|
Requires-Dist: filelock; extra == 'test-min'
|
|
100
|
-
Requires-Dist: httpx; extra == 'test-min'
|
|
99
|
+
Requires-Dist: httpx<1.0; extra == 'test-min'
|
|
101
100
|
Requires-Dist: joblib; extra == 'test-min'
|
|
102
101
|
Requires-Dist: loompy>=3.0.5; extra == 'test-min'
|
|
103
102
|
Requires-Dist: matplotlib; extra == 'test-min'
|
|
104
103
|
Requires-Dist: openpyxl; extra == 'test-min'
|
|
105
|
-
Requires-Dist: pyarrow; extra == 'test-min'
|
|
104
|
+
Requires-Dist: pyarrow<21; extra == 'test-min'
|
|
106
105
|
Requires-Dist: pytest-cov; extra == 'test-min'
|
|
107
106
|
Requires-Dist: pytest-memray; extra == 'test-min'
|
|
108
107
|
Requires-Dist: pytest-mock; extra == 'test-min'
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
anndata/__init__.py,sha256=GdrXtUOgciN34rNOl3CxpzvoBgl0yERqrDLWjlwF3RI,1468
|
|
2
|
-
anndata/_settings.py,sha256=
|
|
2
|
+
anndata/_settings.py,sha256=Le3ysSfuG0vuYCtnUN3DV4HOnyiGgC-k9Gv0JShosSs,15654
|
|
3
|
+
anndata/_settings.pyi,sha256=KUJEheJy1zvCBN2NvNEXAGcjLPUQcdhk0STIcU4mM4I,1545
|
|
3
4
|
anndata/_types.py,sha256=c71REP9wS7Vz2cYrNxuNjPYdnq8MJ5g04MNrSi85ATA,5427
|
|
4
5
|
anndata/_version.py,sha256=qsRPKvJAzUrnf49DHZk-FYfXKOASg5nJV2qomuOttX0,2160
|
|
5
6
|
anndata/_warnings.py,sha256=iFXa9EzPyuPbzRAzoG04oTXAyjnXhQa5zxAMZdsGLwM,702
|
|
@@ -8,50 +9,50 @@ anndata/io.py,sha256=DrIo-FU6qbrdk5aVKoUIBoMttZaO5QWP4bowS9xaebI,698
|
|
|
8
9
|
anndata/logging.py,sha256=E6nlPl-Em0yBjL5p-EcQFmhHTIUirhnZbfXbQtSVSek,1662
|
|
9
10
|
anndata/types.py,sha256=FF3wDkntl6Jq35l0r_kEET33ljj9L7pmIrUr5-MLAvE,698
|
|
10
11
|
anndata/typing.py,sha256=sRiAg16asjnKyXk1L4BtKWggyHMPLoxXzxTDmX3i7MY,1555
|
|
11
|
-
anndata/utils.py,sha256=
|
|
12
|
+
anndata/utils.py,sha256=D4t_iQdTPeHukN4H7no0QZTIzWzclMYsWAHSBpubvCg,14758
|
|
12
13
|
anndata/_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
14
|
anndata/_core/access.py,sha256=pts7fGUKgGZANSsu_qAA7L10qHM-jT1zIehbl3441OY,873
|
|
14
|
-
anndata/_core/aligned_df.py,sha256=
|
|
15
|
+
anndata/_core/aligned_df.py,sha256=MrGdi1zNZZlkkv4IeS2yY-R5ldXpchTlMlJK7PKRa7A,3833
|
|
15
16
|
anndata/_core/aligned_mapping.py,sha256=BYU1jslMWIhtFTtUMaXY8ZCyt0J4_ZsJTmj6J2yAXTQ,14257
|
|
16
|
-
anndata/_core/anndata.py,sha256=
|
|
17
|
+
anndata/_core/anndata.py,sha256=BADYc8TpS6yc1-bw5sh5cPzB4v63O5ZH0YFFChKO7TY,77747
|
|
17
18
|
anndata/_core/extensions.py,sha256=9Rsho6qnr3PJHULrYGiZHCBinBZYJK6zyf3cFsl_gBY,10425
|
|
18
19
|
anndata/_core/file_backing.py,sha256=kT71R_kZp_CiHImBK8IaZXsvYVtbX2Mg-7L2ldAWojM,5113
|
|
19
20
|
anndata/_core/index.py,sha256=lyVuDfKvEeQYpgKjDsnuZNt4k4wV3adFowO_RrcohpE,9353
|
|
20
|
-
anndata/_core/merge.py,sha256=
|
|
21
|
+
anndata/_core/merge.py,sha256=v0PxVs49KUZx6X252EtGt7XUHbO3mytTPK_o3Vd1nuo,60302
|
|
21
22
|
anndata/_core/raw.py,sha256=EfTLoizP_mLtfYn0BR1Rpya0iZoGF4CupojWyeLGWpc,7797
|
|
22
|
-
anndata/_core/sparse_dataset.py,sha256=
|
|
23
|
+
anndata/_core/sparse_dataset.py,sha256=syXVDdQsEPSBvEi4eyBiAxPiq2TSk7qGStP8QSBTwBg,26801
|
|
23
24
|
anndata/_core/storage.py,sha256=mHzqp7YBJ-rGQFulMAx__D-Z7y4omHPyb1cP7YxfbFE,2555
|
|
24
25
|
anndata/_core/views.py,sha256=rbmI7P4dEcefhjons2l42H9D509xhxyXKTeyWiIBdtw,14657
|
|
25
|
-
anndata/_core/xarray.py,sha256=
|
|
26
|
+
anndata/_core/xarray.py,sha256=pvAMSxrHmuz2d-xbalYmuNi56GrLPw_2Ehko8nss9Xg,16157
|
|
26
27
|
anndata/_io/__init__.py,sha256=GTNeUZ8d8aA3sK4P33tyljIc60KapLbkqBC6J1y3l9U,346
|
|
27
|
-
anndata/_io/h5ad.py,sha256=
|
|
28
|
-
anndata/_io/read.py,sha256=
|
|
28
|
+
anndata/_io/h5ad.py,sha256=ekfuxldN6LuhR3b1aXJKvjfivOBlRP_A4rhEqvej-8Q,13633
|
|
29
|
+
anndata/_io/read.py,sha256=oc8Af3r9ieh0-SFN3GKRIFxenijrbng55-Ds-WUhbdE,15691
|
|
29
30
|
anndata/_io/utils.py,sha256=dB2RRaWm9V-CMGI46KhqpmzBGA6LLBnsJc83Q85OcgM,9613
|
|
30
31
|
anndata/_io/write.py,sha256=r55w6yPIIuUSLW9wyYL8GnkzHHQdAxy6xiCEw9cAC38,4811
|
|
31
|
-
anndata/_io/zarr.py,sha256=
|
|
32
|
+
anndata/_io/zarr.py,sha256=6ejnp9VNsxSihSd3HXAv2uVkrv7dRnuox4Jt1Y0yyGY,5261
|
|
32
33
|
anndata/_io/specs/__init__.py,sha256=Z6l8xqa7B480U3pqrNIg4-fhUvpBW85w4xA3i3maAUM,427
|
|
33
|
-
anndata/_io/specs/lazy_methods.py,sha256=
|
|
34
|
-
anndata/_io/specs/methods.py,sha256=
|
|
35
|
-
anndata/_io/specs/registry.py,sha256=
|
|
36
|
-
anndata/compat/__init__.py,sha256=
|
|
34
|
+
anndata/_io/specs/lazy_methods.py,sha256=64rr1C83aaiN-iO7m1R03dqd6TbiJQbhzOQI5lXHD0o,12270
|
|
35
|
+
anndata/_io/specs/methods.py,sha256=qRRZfz4pqjaxRrkhiYiZe9_mXuWjAj4OLhbL-CO_VSM,46485
|
|
36
|
+
anndata/_io/specs/registry.py,sha256=hno-mvefhcDHmACrBjcKmlf5T87FPoe-N1Gd3i9otYo,17459
|
|
37
|
+
anndata/compat/__init__.py,sha256=FZZbV36BG0L0v-N5IXQbrCa5jNgBRsFY44vmdG-2y_4,12227
|
|
37
38
|
anndata/experimental/__init__.py,sha256=polIxriEkby0iEqw-IXkUzp8k0wp92BpYY4zl4BsHH0,1648
|
|
38
39
|
anndata/experimental/_dispatch_io.py,sha256=JzH8Uvewabc1gIF3L16RZnM9m2NAG28bQIQ57uP097k,1869
|
|
39
40
|
anndata/experimental/merge.py,sha256=Y18C1AT9R1IF60Y7YxyprJ1hz-ySNb2okXHA6IR6fCU,21914
|
|
40
41
|
anndata/experimental/backed/__init__.py,sha256=4dc9M_-_SlfUidDrbWt8PRyD_8bYjypHJ86IpdThHus,230
|
|
41
42
|
anndata/experimental/backed/_compat.py,sha256=rM7CnSJEZCko5wPBFRfvZA9ZKUSpaOVcWFy5u09p1go,519
|
|
42
43
|
anndata/experimental/backed/_io.py,sha256=7bFzn3h8ut49NzppUvsqAX1gjXxAVCFK55Ln0XWzZdY,5965
|
|
43
|
-
anndata/experimental/backed/_lazy_arrays.py,sha256=
|
|
44
|
+
anndata/experimental/backed/_lazy_arrays.py,sha256=FR-ZPCOhjPaha278KkhMuYYPYx4T_5IOxtjX3XYjDC4,7424
|
|
44
45
|
anndata/experimental/multi_files/__init__.py,sha256=T7iNLlRbe-KnLT3o7Tb7_nE4Iy_hLkG66UjBOvj2Bj8,107
|
|
45
46
|
anndata/experimental/multi_files/_anncollection.py,sha256=RQ79_7r8sdV_kx66UJUuY9ZgcdM92NxplMEDQUrZXvM,35259
|
|
46
47
|
anndata/experimental/pytorch/__init__.py,sha256=4CkgrahLO8Kc-s2bmv6lVQfDxbO3IUyV0v4ygBDkttY,95
|
|
47
48
|
anndata/experimental/pytorch/_annloader.py,sha256=7mpsFV5vBfxKIje1cPjahtDZ5afkU-H663XB4FJhmok,8075
|
|
48
49
|
anndata/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
49
|
-
anndata/tests/helpers.py,sha256=
|
|
50
|
+
anndata/tests/helpers.py,sha256=yVFdqB2PDDw8jNOkKAo-6VR7vk8yUlbafZbtDyibB8c,35111
|
|
50
51
|
testing/anndata/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
51
52
|
testing/anndata/_doctest.py,sha256=Qew0N0zLLNiPKN1CLunqY5cTinFLaEhY5GagiYfm6KI,344
|
|
52
|
-
testing/anndata/_pytest.py,sha256=
|
|
53
|
+
testing/anndata/_pytest.py,sha256=a1Qn5KR5mjCcQI05negkMEv0e2aJX2P_teyl0UMfoeQ,4160
|
|
53
54
|
testing/anndata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
54
|
-
anndata-0.12.
|
|
55
|
-
anndata-0.12.
|
|
56
|
-
anndata-0.12.
|
|
57
|
-
anndata-0.12.
|
|
55
|
+
anndata-0.12.1.dist-info/METADATA,sha256=ah9uvJCDn4h1tspyYlcwiSC7mGBhwWHVs6irKQ8LzXY,9643
|
|
56
|
+
anndata-0.12.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
57
|
+
anndata-0.12.1.dist-info/licenses/LICENSE,sha256=VcrXoEVMhtNuvMvKYGP-I5lMT8qZ_6dFf22fsL180qA,1575
|
|
58
|
+
anndata-0.12.1.dist-info/RECORD,,
|
testing/anndata/_pytest.py
CHANGED
|
@@ -93,6 +93,10 @@ def pytest_addoption(parser: pytest.Parser) -> None:
|
|
|
93
93
|
def pytest_collection_modifyitems(
|
|
94
94
|
session: pytest.Session, config: pytest.Config, items: Iterable[pytest.Item]
|
|
95
95
|
):
|
|
96
|
+
for item in items:
|
|
97
|
+
if "zarr" in item.name:
|
|
98
|
+
item.add_marker("zarr_io")
|
|
99
|
+
|
|
96
100
|
if not config.getoption("--strict-warnings"):
|
|
97
101
|
return
|
|
98
102
|
|
|
File without changes
|
|
File without changes
|