anndata 0.12.2__py3-none-any.whl → 0.12.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- anndata/__init__.py +23 -18
- anndata/_core/anndata.py +8 -7
- anndata/_core/merge.py +28 -39
- anndata/_core/sparse_dataset.py +8 -8
- anndata/_core/views.py +1 -1
- anndata/_io/h5ad.py +18 -27
- anndata/_io/specs/lazy_methods.py +1 -1
- anndata/_io/specs/methods.py +33 -40
- anndata/_io/specs/registry.py +17 -20
- anndata/_io/utils.py +2 -7
- anndata/_io/zarr.py +16 -7
- anndata/_settings.py +8 -0
- anndata/_settings.pyi +1 -0
- anndata/compat/__init__.py +3 -11
- anndata/experimental/merge.py +86 -50
- anndata/experimental/multi_files/_anncollection.py +2 -2
- {anndata-0.12.2.dist-info → anndata-0.12.3.dist-info}/METADATA +6 -7
- {anndata-0.12.2.dist-info → anndata-0.12.3.dist-info}/RECORD +21 -22
- testing/anndata/_pytest.py +2 -6
- anndata/_version.py +0 -62
- {anndata-0.12.2.dist-info → anndata-0.12.3.dist-info}/WHEEL +0 -0
- {anndata-0.12.2.dist-info → anndata-0.12.3.dist-info}/licenses/LICENSE +0 -0
anndata/__init__.py
CHANGED
|
@@ -12,7 +12,6 @@ from ._core.extensions import register_anndata_namespace
|
|
|
12
12
|
from ._core.merge import concat
|
|
13
13
|
from ._core.raw import Raw
|
|
14
14
|
from ._settings import settings
|
|
15
|
-
from ._version import __version__
|
|
16
15
|
from ._warnings import (
|
|
17
16
|
ExperimentalFeatureWarning,
|
|
18
17
|
ImplicitModificationWarning,
|
|
@@ -28,22 +27,6 @@ from . import abc, experimental, typing, io, types # isort: skip
|
|
|
28
27
|
# We use these in tests by attribute access
|
|
29
28
|
from . import logging # noqa: F401 # isort: skip
|
|
30
29
|
|
|
31
|
-
_DEPRECATED_IO = (
|
|
32
|
-
"read_loom",
|
|
33
|
-
"read_hdf",
|
|
34
|
-
"read_excel",
|
|
35
|
-
"read_umi_tools",
|
|
36
|
-
"read_csv",
|
|
37
|
-
"read_text",
|
|
38
|
-
"read_mtx",
|
|
39
|
-
)
|
|
40
|
-
_DEPRECATED = {method: f"io.{method}" for method in _DEPRECATED_IO}
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
def __getattr__(attr_name: str) -> Any:
|
|
44
|
-
return module_get_attr_redirect(attr_name, deprecated_mapping=_DEPRECATED)
|
|
45
|
-
|
|
46
|
-
|
|
47
30
|
__all__ = [
|
|
48
31
|
"AnnData",
|
|
49
32
|
"ExperimentalFeatureWarning",
|
|
@@ -51,7 +34,6 @@ __all__ = [
|
|
|
51
34
|
"OldFormatWarning",
|
|
52
35
|
"Raw",
|
|
53
36
|
"WriteWarning",
|
|
54
|
-
"__version__",
|
|
55
37
|
"abc",
|
|
56
38
|
"concat",
|
|
57
39
|
"experimental",
|
|
@@ -63,3 +45,26 @@ __all__ = [
|
|
|
63
45
|
"types",
|
|
64
46
|
"typing",
|
|
65
47
|
]
|
|
48
|
+
|
|
49
|
+
_DEPRECATED_IO = (
|
|
50
|
+
"read_loom",
|
|
51
|
+
"read_hdf",
|
|
52
|
+
"read_excel",
|
|
53
|
+
"read_umi_tools",
|
|
54
|
+
"read_csv",
|
|
55
|
+
"read_text",
|
|
56
|
+
"read_mtx",
|
|
57
|
+
)
|
|
58
|
+
_DEPRECATED = {method: f"io.{method}" for method in _DEPRECATED_IO}
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def __getattr__(attr_name: str) -> Any:
|
|
62
|
+
if attr_name == "__version__":
|
|
63
|
+
import warnings
|
|
64
|
+
from importlib.metadata import version
|
|
65
|
+
|
|
66
|
+
msg = "`__version__` is deprecated, use `importlib.metadata.version('anndata')` instead."
|
|
67
|
+
warnings.warn(msg, FutureWarning, stacklevel=2)
|
|
68
|
+
return version("anndata")
|
|
69
|
+
|
|
70
|
+
return module_get_attr_redirect(attr_name, deprecated_mapping=_DEPRECATED)
|
anndata/_core/anndata.py
CHANGED
|
@@ -42,11 +42,7 @@ from .index import _normalize_indices, _subset, get_vector
|
|
|
42
42
|
from .raw import Raw
|
|
43
43
|
from .sparse_dataset import BaseCompressedSparseDataset, sparse_dataset
|
|
44
44
|
from .storage import coerce_array
|
|
45
|
-
from .views import
|
|
46
|
-
DictView,
|
|
47
|
-
_resolve_idxs,
|
|
48
|
-
as_view,
|
|
49
|
-
)
|
|
45
|
+
from .views import DictView, _resolve_idxs, as_view
|
|
50
46
|
from .xarray import Dataset2D
|
|
51
47
|
|
|
52
48
|
if TYPE_CHECKING:
|
|
@@ -940,22 +936,27 @@ class AnnData(metaclass=utils.DeprecationMixinMeta): # noqa: PLW1641
|
|
|
940
936
|
Is sliced with `data` and `var` but behaves otherwise like a :term:`mapping`.
|
|
941
937
|
"""
|
|
942
938
|
|
|
939
|
+
@deprecated("obs (e.g. `k in adata.obs` or `str(adata.obs.columns.tolist())`)")
|
|
943
940
|
def obs_keys(self) -> list[str]:
|
|
944
941
|
"""List keys of observation annotation :attr:`obs`."""
|
|
945
942
|
return self._obs.keys().tolist()
|
|
946
943
|
|
|
944
|
+
@deprecated("var (e.g. `k in adata.var` or `str(adata.var.columns.tolist())`)")
|
|
947
945
|
def var_keys(self) -> list[str]:
|
|
948
946
|
"""List keys of variable annotation :attr:`var`."""
|
|
949
947
|
return self._var.keys().tolist()
|
|
950
948
|
|
|
949
|
+
@deprecated("obsm (e.g. `k in adata.obsm` or `adata.obsm.keys() | {'u'}`)")
|
|
951
950
|
def obsm_keys(self) -> list[str]:
|
|
952
951
|
"""List keys of observation annotation :attr:`obsm`."""
|
|
953
952
|
return list(self.obsm.keys())
|
|
954
953
|
|
|
954
|
+
@deprecated("varm (e.g. `k in adata.varm` or `adata.varm.keys() | {'u'}`)")
|
|
955
955
|
def varm_keys(self) -> list[str]:
|
|
956
956
|
"""List keys of variable annotation :attr:`varm`."""
|
|
957
957
|
return list(self.varm.keys())
|
|
958
958
|
|
|
959
|
+
@deprecated("uns (e.g. `k in adata.uns` or `sorted(adata.uns)`)")
|
|
959
960
|
def uns_keys(self) -> list[str]:
|
|
960
961
|
"""List keys of unstructured annotation."""
|
|
961
962
|
return sorted(self._uns.keys())
|
|
@@ -1907,8 +1908,8 @@ class AnnData(metaclass=utils.DeprecationMixinMeta): # noqa: PLW1641
|
|
|
1907
1908
|
compression_opts=compression_opts,
|
|
1908
1909
|
as_dense=as_dense,
|
|
1909
1910
|
)
|
|
1910
|
-
|
|
1911
|
-
if self.isbacked:
|
|
1911
|
+
# Only reset the filename if the AnnData object now points to a complete new copy
|
|
1912
|
+
if self.isbacked and not self.is_view:
|
|
1912
1913
|
self.file.filename = filename
|
|
1913
1914
|
|
|
1914
1915
|
write = write_h5ad # a shortcut and backwards compat
|
anndata/_core/merge.py
CHANGED
|
@@ -14,9 +14,7 @@ from warnings import warn
|
|
|
14
14
|
|
|
15
15
|
import numpy as np
|
|
16
16
|
import pandas as pd
|
|
17
|
-
import scipy
|
|
18
17
|
from natsort import natsorted
|
|
19
|
-
from packaging.version import Version
|
|
20
18
|
from scipy import sparse
|
|
21
19
|
|
|
22
20
|
from anndata._core.file_backing import to_memory
|
|
@@ -30,7 +28,6 @@ from ..compat import (
|
|
|
30
28
|
CupyCSRMatrix,
|
|
31
29
|
CupySparseMatrix,
|
|
32
30
|
DaskArray,
|
|
33
|
-
_map_cat_to_str,
|
|
34
31
|
)
|
|
35
32
|
from ..utils import asarray, axis_len, warn_once
|
|
36
33
|
from .anndata import AnnData
|
|
@@ -146,11 +143,16 @@ def equal_dask_array(a, b) -> bool:
|
|
|
146
143
|
return False
|
|
147
144
|
if isinstance(b, DaskArray) and tokenize(a) == tokenize(b):
|
|
148
145
|
return True
|
|
149
|
-
if isinstance(a._meta,
|
|
146
|
+
if isinstance(a._meta, np.ndarray):
|
|
147
|
+
return da.equal(a, b, where=~(da.isnan(a) & da.isnan(b))).all().compute()
|
|
148
|
+
if a.chunksize == b.chunksize and isinstance(
|
|
149
|
+
a._meta, CupySparseMatrix | CSMatrix | CSArray
|
|
150
|
+
):
|
|
150
151
|
# TODO: Maybe also do this in the other case?
|
|
151
152
|
return da.map_blocks(equal, a, b, drop_axis=(0, 1)).all()
|
|
152
|
-
|
|
153
|
-
|
|
153
|
+
msg = "Misaligned chunks detected when checking for merge equality of dask arrays. Reading full arrays into memory."
|
|
154
|
+
warn(msg, UserWarning, stacklevel=3)
|
|
155
|
+
return equal(a.compute(), b.compute())
|
|
154
156
|
|
|
155
157
|
|
|
156
158
|
@equal.register(np.ndarray)
|
|
@@ -185,15 +187,6 @@ def equal_sparse(a, b) -> bool:
|
|
|
185
187
|
# Comparison broken for CSC matrices
|
|
186
188
|
# https://github.com/cupy/cupy/issues/7757
|
|
187
189
|
a, b = CupyCSRMatrix(a), CupyCSRMatrix(b)
|
|
188
|
-
if Version(scipy.__version__) >= Version("1.16.0rc1"):
|
|
189
|
-
# TODO: https://github.com/scipy/scipy/issues/23068
|
|
190
|
-
return bool(
|
|
191
|
-
a.format == b.format
|
|
192
|
-
and (a.shape == b.shape)
|
|
193
|
-
and np.all(a.indptr == b.indptr)
|
|
194
|
-
and np.all(a.indices == b.indices)
|
|
195
|
-
and np.all((a.data == b.data) | (np.isnan(a.data) & np.isnan(b.data)))
|
|
196
|
-
)
|
|
197
190
|
comp = a != b
|
|
198
191
|
if isinstance(comp, bool):
|
|
199
192
|
return not comp
|
|
@@ -617,6 +610,9 @@ class Reindexer:
|
|
|
617
610
|
sub_el = _subset(el, make_slice(indexer, axis, len(shape)))
|
|
618
611
|
|
|
619
612
|
if any(indexer == -1):
|
|
613
|
+
# TODO: Remove this condition once https://github.com/dask/dask/pull/12078 is released
|
|
614
|
+
if isinstance(sub_el._meta, CSArray | CSMatrix) and np.isscalar(fill_value):
|
|
615
|
+
fill_value = np.array([[fill_value]])
|
|
620
616
|
sub_el[make_slice(indexer == -1, axis, len(shape))] = fill_value
|
|
621
617
|
|
|
622
618
|
return sub_el
|
|
@@ -1643,7 +1639,7 @@ def concat( # noqa: PLR0912, PLR0913, PLR0915
|
|
|
1643
1639
|
)
|
|
1644
1640
|
if index_unique is not None:
|
|
1645
1641
|
concat_indices = concat_indices.str.cat(
|
|
1646
|
-
|
|
1642
|
+
label_col.map(str, na_action="ignore"), sep=index_unique
|
|
1647
1643
|
)
|
|
1648
1644
|
concat_indices = pd.Index(concat_indices)
|
|
1649
1645
|
|
|
@@ -1748,15 +1744,10 @@ def concat( # noqa: PLR0912, PLR0913, PLR0915
|
|
|
1748
1744
|
for r, a in zip(reindexers, adatas, strict=True)
|
|
1749
1745
|
],
|
|
1750
1746
|
)
|
|
1751
|
-
alt_pairwise = merge(
|
|
1752
|
-
|
|
1753
|
-
|
|
1754
|
-
|
|
1755
|
-
for k, v in getattr(a, f"{alt_axis_name}p").items()
|
|
1756
|
-
}
|
|
1757
|
-
for r, a in zip(reindexers, adatas, strict=True)
|
|
1758
|
-
]
|
|
1759
|
-
)
|
|
1747
|
+
alt_pairwise = merge([
|
|
1748
|
+
{k: r(r(v, axis=0), axis=1) for k, v in getattr(a, f"{alt_axis_name}p").items()}
|
|
1749
|
+
for r, a in zip(reindexers, adatas, strict=True)
|
|
1750
|
+
])
|
|
1760
1751
|
uns = uns_merge([a.uns for a in adatas])
|
|
1761
1752
|
|
|
1762
1753
|
raw = None
|
|
@@ -1785,17 +1776,15 @@ def concat( # noqa: PLR0912, PLR0913, PLR0915
|
|
|
1785
1776
|
"not concatenating `.raw` attributes."
|
|
1786
1777
|
)
|
|
1787
1778
|
warn(msg, UserWarning, stacklevel=2)
|
|
1788
|
-
return AnnData(
|
|
1789
|
-
|
|
1790
|
-
|
|
1791
|
-
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
|
|
1795
|
-
|
|
1796
|
-
|
|
1797
|
-
|
|
1798
|
-
|
|
1799
|
-
|
|
1800
|
-
}
|
|
1801
|
-
)
|
|
1779
|
+
return AnnData(**{
|
|
1780
|
+
"X": X,
|
|
1781
|
+
"layers": layers,
|
|
1782
|
+
axis_name: concat_annot,
|
|
1783
|
+
alt_axis_name: alt_annot,
|
|
1784
|
+
f"{axis_name}m": concat_mapping,
|
|
1785
|
+
f"{alt_axis_name}m": alt_mapping,
|
|
1786
|
+
f"{axis_name}p": concat_pairwise,
|
|
1787
|
+
f"{alt_axis_name}p": alt_pairwise,
|
|
1788
|
+
"uns": uns,
|
|
1789
|
+
"raw": raw,
|
|
1790
|
+
})
|
anndata/_core/sparse_dataset.py
CHANGED
|
@@ -16,6 +16,7 @@ import warnings
|
|
|
16
16
|
from abc import ABC
|
|
17
17
|
from collections.abc import Iterable
|
|
18
18
|
from functools import cached_property
|
|
19
|
+
from importlib.metadata import version
|
|
19
20
|
from itertools import accumulate, chain, pairwise
|
|
20
21
|
from math import floor
|
|
21
22
|
from pathlib import Path
|
|
@@ -23,7 +24,6 @@ from typing import TYPE_CHECKING, NamedTuple
|
|
|
23
24
|
|
|
24
25
|
import h5py
|
|
25
26
|
import numpy as np
|
|
26
|
-
import scipy
|
|
27
27
|
import scipy.sparse as ss
|
|
28
28
|
from packaging.version import Version
|
|
29
29
|
from scipy.sparse import _sparsetools
|
|
@@ -54,7 +54,7 @@ else:
|
|
|
54
54
|
from scipy.sparse import spmatrix as _cs_matrix
|
|
55
55
|
|
|
56
56
|
|
|
57
|
-
SCIPY_1_15 = Version(scipy
|
|
57
|
+
SCIPY_1_15 = Version(version("scipy")) >= Version("1.15rc0")
|
|
58
58
|
|
|
59
59
|
|
|
60
60
|
class BackedFormat(NamedTuple):
|
|
@@ -278,9 +278,9 @@ def get_compressed_vectors(
|
|
|
278
278
|
indptr_slices = [slice(*(x.indptr[i : i + 2])) for i in row_idxs]
|
|
279
279
|
# HDF5 cannot handle out-of-order integer indexing
|
|
280
280
|
if isinstance(x.data, ZarrArray):
|
|
281
|
-
as_np_indptr = np.concatenate(
|
|
282
|
-
|
|
283
|
-
)
|
|
281
|
+
as_np_indptr = np.concatenate([
|
|
282
|
+
np.arange(s.start, s.stop) for s in indptr_slices
|
|
283
|
+
])
|
|
284
284
|
data = x.data[as_np_indptr]
|
|
285
285
|
indices = x.indices[as_np_indptr]
|
|
286
286
|
else:
|
|
@@ -309,9 +309,9 @@ def get_compressed_vectors_for_slices(
|
|
|
309
309
|
start_indptr = indptr_indices[0] - next(offsets)
|
|
310
310
|
if len(slices) < 2: # there is only one slice so no need to concatenate
|
|
311
311
|
return data, indices, start_indptr
|
|
312
|
-
end_indptr = np.concatenate(
|
|
313
|
-
|
|
314
|
-
)
|
|
312
|
+
end_indptr = np.concatenate([
|
|
313
|
+
s[1:] - o for s, o in zip(indptr_indices[1:], offsets, strict=True)
|
|
314
|
+
])
|
|
315
315
|
indptr = np.concatenate([start_indptr, end_indptr])
|
|
316
316
|
return data, indices, indptr
|
|
317
317
|
|
anndata/_core/views.py
CHANGED
|
@@ -100,7 +100,7 @@ class _ViewMixin(_SetItemMixin):
|
|
|
100
100
|
|
|
101
101
|
# TODO: This makes `deepcopy(obj)` return `obj._view_args.parent._adata_ref`, fix it
|
|
102
102
|
def __deepcopy__(self, memo):
|
|
103
|
-
parent, attrname,
|
|
103
|
+
parent, attrname, _keys = self._view_args
|
|
104
104
|
return deepcopy(getattr(parent._adata_ref, attrname))
|
|
105
105
|
|
|
106
106
|
|
anndata/_io/h5ad.py
CHANGED
|
@@ -27,7 +27,6 @@ from ..experimental import read_dispatched
|
|
|
27
27
|
from .specs import read_elem, write_elem
|
|
28
28
|
from .specs.registry import IOSpec, write_spec
|
|
29
29
|
from .utils import (
|
|
30
|
-
H5PY_V3,
|
|
31
30
|
_read_legacy_raw,
|
|
32
31
|
idx_chunks_along_axis,
|
|
33
32
|
no_write_dataset_2d,
|
|
@@ -264,15 +263,13 @@ def read_h5ad(
|
|
|
264
263
|
|
|
265
264
|
def callback(func, elem_name: str, elem, iospec):
|
|
266
265
|
if iospec.encoding_type == "anndata" or elem_name.endswith("/"):
|
|
267
|
-
return AnnData(
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
}
|
|
275
|
-
)
|
|
266
|
+
return AnnData(**{
|
|
267
|
+
# This is covering up backwards compat in the anndata initializer
|
|
268
|
+
# In most cases we should be able to call `func(elen[k])` instead
|
|
269
|
+
k: read_dispatched(elem[k], callback)
|
|
270
|
+
for k in elem
|
|
271
|
+
if not k.startswith("raw.")
|
|
272
|
+
})
|
|
276
273
|
elif elem_name.startswith("/raw."):
|
|
277
274
|
return None
|
|
278
275
|
elif elem_name == "/X" and "X" in as_sparse:
|
|
@@ -326,16 +323,12 @@ def read_dataframe_legacy(dataset: h5py.Dataset) -> pd.DataFrame:
|
|
|
326
323
|
"Consider rewriting it."
|
|
327
324
|
)
|
|
328
325
|
warn(msg, OldFormatWarning, stacklevel=2)
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
_from_fixed_length_strings(dataset[()]), dtype=dataset.dtype
|
|
333
|
-
)
|
|
326
|
+
df = pd.DataFrame(
|
|
327
|
+
_decode_structured_array(
|
|
328
|
+
_from_fixed_length_strings(dataset[()]), dtype=dataset.dtype
|
|
334
329
|
)
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
df.set_index(df.columns[0], inplace=True)
|
|
338
|
-
return df
|
|
330
|
+
)
|
|
331
|
+
return df.set_index(df.columns[0])
|
|
339
332
|
|
|
340
333
|
|
|
341
334
|
def read_dataframe(group: h5py.Group | h5py.Dataset) -> pd.DataFrame:
|
|
@@ -348,10 +341,9 @@ def read_dataframe(group: h5py.Group | h5py.Dataset) -> pd.DataFrame:
|
|
|
348
341
|
|
|
349
342
|
@report_read_key_on_error
|
|
350
343
|
def read_dataset(dataset: h5py.Dataset):
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
dataset = dataset.asstr()
|
|
344
|
+
string_dtype = h5py.check_string_dtype(dataset.dtype)
|
|
345
|
+
if (string_dtype is not None) and (string_dtype.encoding == "utf-8"):
|
|
346
|
+
dataset = dataset.asstr()
|
|
355
347
|
value = dataset[()]
|
|
356
348
|
if not hasattr(value, "dtype"):
|
|
357
349
|
return value
|
|
@@ -364,10 +356,9 @@ def read_dataset(dataset: h5py.Dataset):
|
|
|
364
356
|
return value[0]
|
|
365
357
|
elif len(value.dtype.descr) > 1: # Compound dtype
|
|
366
358
|
# For backwards compat, now strings are written as variable length
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
value = _decode_structured_array(value, dtype=dtype)
|
|
359
|
+
value = _decode_structured_array(
|
|
360
|
+
_from_fixed_length_strings(value), dtype=value.dtype
|
|
361
|
+
)
|
|
371
362
|
if value.shape == ():
|
|
372
363
|
value = value[()]
|
|
373
364
|
return value
|
anndata/_io/specs/methods.py
CHANGED
|
@@ -4,6 +4,7 @@ import warnings
|
|
|
4
4
|
from collections.abc import Mapping
|
|
5
5
|
from copy import copy
|
|
6
6
|
from functools import partial
|
|
7
|
+
from importlib.metadata import version
|
|
7
8
|
from itertools import product
|
|
8
9
|
from types import MappingProxyType
|
|
9
10
|
from typing import TYPE_CHECKING
|
|
@@ -21,7 +22,7 @@ from anndata._core import views
|
|
|
21
22
|
from anndata._core.index import _normalize_indices
|
|
22
23
|
from anndata._core.merge import intersect_keys
|
|
23
24
|
from anndata._core.sparse_dataset import _CSCDataset, _CSRDataset, sparse_dataset
|
|
24
|
-
from anndata._io.utils import
|
|
25
|
+
from anndata._io.utils import check_key, zero_dim_array_as_scalar
|
|
25
26
|
from anndata._warnings import OldFormatWarning
|
|
26
27
|
from anndata.compat import (
|
|
27
28
|
NULLABLE_NUMPY_STRING_TYPE,
|
|
@@ -492,6 +493,7 @@ _REGISTRY.register_write(ZarrGroup, CupyArray, IOSpec("array", "0.2.0"))(
|
|
|
492
493
|
)
|
|
493
494
|
|
|
494
495
|
|
|
496
|
+
@_REGISTRY.register_write(ZarrGroup, views.DaskArrayView, IOSpec("array", "0.2.0"))
|
|
495
497
|
@_REGISTRY.register_write(ZarrGroup, DaskArray, IOSpec("array", "0.2.0"))
|
|
496
498
|
def write_basic_dask_zarr(
|
|
497
499
|
f: ZarrGroup,
|
|
@@ -514,6 +516,7 @@ def write_basic_dask_zarr(
|
|
|
514
516
|
|
|
515
517
|
# Adding this separately because h5py isn't serializable
|
|
516
518
|
# https://github.com/pydata/xarray/issues/4242
|
|
519
|
+
@_REGISTRY.register_write(H5Group, views.DaskArrayView, IOSpec("array", "0.2.0"))
|
|
517
520
|
@_REGISTRY.register_write(H5Group, DaskArray, IOSpec("array", "0.2.0"))
|
|
518
521
|
def write_basic_dask_h5(
|
|
519
522
|
f: H5Group,
|
|
@@ -607,7 +610,7 @@ def write_vlen_string_array_zarr(
|
|
|
607
610
|
if is_zarr_v2():
|
|
608
611
|
import numcodecs
|
|
609
612
|
|
|
610
|
-
if Version(numcodecs
|
|
613
|
+
if Version(version("numcodecs")) < Version("0.13"):
|
|
611
614
|
msg = "Old numcodecs version detected. Please update for improved performance and stability."
|
|
612
615
|
warnings.warn(msg, UserWarning, stacklevel=2)
|
|
613
616
|
# Workaround for https://github.com/zarr-developers/numcodecs/issues/514
|
|
@@ -663,10 +666,9 @@ def _to_hdf5_vlen_strings(value: np.ndarray) -> np.ndarray:
|
|
|
663
666
|
@_REGISTRY.register_read(ZarrArray, IOSpec("rec-array", "0.2.0"))
|
|
664
667
|
def read_recarray(d: ArrayStorageType, *, _reader: Reader) -> np.recarray | npt.NDArray:
|
|
665
668
|
value = d[()]
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
value = _decode_structured_array(value, dtype=dtype)
|
|
669
|
+
value = _decode_structured_array(
|
|
670
|
+
_from_fixed_length_strings(value), dtype=value.dtype
|
|
671
|
+
)
|
|
670
672
|
return value
|
|
671
673
|
|
|
672
674
|
|
|
@@ -778,10 +780,10 @@ for store_type, (cls, spec, func) in product(
|
|
|
778
780
|
_REGISTRY.register_write(store_type, cls, spec)(func)
|
|
779
781
|
|
|
780
782
|
|
|
781
|
-
@_REGISTRY.register_write(H5Group, _CSRDataset, IOSpec("", "0.1.0"))
|
|
782
|
-
@_REGISTRY.register_write(H5Group, _CSCDataset, IOSpec("", "0.1.0"))
|
|
783
|
-
@_REGISTRY.register_write(ZarrGroup, _CSRDataset, IOSpec("", "0.1.0"))
|
|
784
|
-
@_REGISTRY.register_write(ZarrGroup, _CSCDataset, IOSpec("", "0.1.0"))
|
|
783
|
+
@_REGISTRY.register_write(H5Group, _CSRDataset, IOSpec("csr_matrix", "0.1.0"))
|
|
784
|
+
@_REGISTRY.register_write(H5Group, _CSCDataset, IOSpec("csc_matrix", "0.1.0"))
|
|
785
|
+
@_REGISTRY.register_write(ZarrGroup, _CSRDataset, IOSpec("csr_matrix", "0.1.0"))
|
|
786
|
+
@_REGISTRY.register_write(ZarrGroup, _CSCDataset, IOSpec("csc_matrix", "0.1.0"))
|
|
785
787
|
def write_sparse_dataset(
|
|
786
788
|
f: GroupStorageType,
|
|
787
789
|
k: str,
|
|
@@ -798,26 +800,9 @@ def write_sparse_dataset(
|
|
|
798
800
|
fmt=elem.format,
|
|
799
801
|
dataset_kwargs=dataset_kwargs,
|
|
800
802
|
)
|
|
801
|
-
# TODO: Cleaner way to do this
|
|
802
|
-
f[k].attrs["encoding-type"] = f"{elem.format}_matrix"
|
|
803
|
-
f[k].attrs["encoding-version"] = "0.1.0"
|
|
804
803
|
|
|
805
804
|
|
|
806
|
-
|
|
807
|
-
@_REGISTRY.register_write(ZarrGroup, (DaskArray, CupyArray), IOSpec("array", "0.2.0"))
|
|
808
|
-
@_REGISTRY.register_write(
|
|
809
|
-
H5Group, (DaskArray, CupyCSRMatrix), IOSpec("csr_matrix", "0.1.0")
|
|
810
|
-
)
|
|
811
|
-
@_REGISTRY.register_write(
|
|
812
|
-
H5Group, (DaskArray, CupyCSCMatrix), IOSpec("csc_matrix", "0.1.0")
|
|
813
|
-
)
|
|
814
|
-
@_REGISTRY.register_write(
|
|
815
|
-
ZarrGroup, (DaskArray, CupyCSRMatrix), IOSpec("csr_matrix", "0.1.0")
|
|
816
|
-
)
|
|
817
|
-
@_REGISTRY.register_write(
|
|
818
|
-
ZarrGroup, (DaskArray, CupyCSCMatrix), IOSpec("csc_matrix", "0.1.0")
|
|
819
|
-
)
|
|
820
|
-
def write_cupy_dask_sparse(f, k, elem, _writer, dataset_kwargs=MappingProxyType({})):
|
|
805
|
+
def write_cupy_dask(f, k, elem, _writer, dataset_kwargs=MappingProxyType({})):
|
|
821
806
|
_writer.write_elem(
|
|
822
807
|
f,
|
|
823
808
|
k,
|
|
@@ -826,18 +811,6 @@ def write_cupy_dask_sparse(f, k, elem, _writer, dataset_kwargs=MappingProxyType(
|
|
|
826
811
|
)
|
|
827
812
|
|
|
828
813
|
|
|
829
|
-
@_REGISTRY.register_write(
|
|
830
|
-
H5Group, (DaskArray, sparse.csr_matrix), IOSpec("csr_matrix", "0.1.0")
|
|
831
|
-
)
|
|
832
|
-
@_REGISTRY.register_write(
|
|
833
|
-
H5Group, (DaskArray, sparse.csc_matrix), IOSpec("csc_matrix", "0.1.0")
|
|
834
|
-
)
|
|
835
|
-
@_REGISTRY.register_write(
|
|
836
|
-
ZarrGroup, (DaskArray, sparse.csr_matrix), IOSpec("csr_matrix", "0.1.0")
|
|
837
|
-
)
|
|
838
|
-
@_REGISTRY.register_write(
|
|
839
|
-
ZarrGroup, (DaskArray, sparse.csc_matrix), IOSpec("csc_matrix", "0.1.0")
|
|
840
|
-
)
|
|
841
814
|
def write_dask_sparse(
|
|
842
815
|
f: GroupStorageType,
|
|
843
816
|
k: str,
|
|
@@ -886,6 +859,26 @@ def write_dask_sparse(
|
|
|
886
859
|
disk_mtx.append(elem[chunk_slice(chunk_start, chunk_stop)].compute())
|
|
887
860
|
|
|
888
861
|
|
|
862
|
+
for array_type, group_type in product(
|
|
863
|
+
[DaskArray, views.DaskArrayView], [H5Group, ZarrGroup]
|
|
864
|
+
):
|
|
865
|
+
for cupy_array_type, spec in [
|
|
866
|
+
(CupyArray, IOSpec("array", "0.2.0")),
|
|
867
|
+
(CupyCSCMatrix, IOSpec("csc_matrix", "0.1.0")),
|
|
868
|
+
(CupyCSRMatrix, IOSpec("csr_matrix", "0.1.0")),
|
|
869
|
+
]:
|
|
870
|
+
_REGISTRY.register_write(group_type, (array_type, cupy_array_type), spec)(
|
|
871
|
+
write_cupy_dask
|
|
872
|
+
)
|
|
873
|
+
for scipy_sparse_type, spec in [
|
|
874
|
+
(sparse.csr_matrix, IOSpec("csr_matrix", "0.1.0")),
|
|
875
|
+
(sparse.csc_matrix, IOSpec("csc_matrix", "0.1.0")),
|
|
876
|
+
]:
|
|
877
|
+
_REGISTRY.register_write(group_type, (array_type, scipy_sparse_type), spec)(
|
|
878
|
+
write_dask_sparse
|
|
879
|
+
)
|
|
880
|
+
|
|
881
|
+
|
|
889
882
|
@_REGISTRY.register_read(H5Group, IOSpec("csc_matrix", "0.1.0"))
|
|
890
883
|
@_REGISTRY.register_read(H5Group, IOSpec("csr_matrix", "0.1.0"))
|
|
891
884
|
@_REGISTRY.register_read(ZarrGroup, IOSpec("csc_matrix", "0.1.0"))
|
anndata/_io/specs/registry.py
CHANGED
|
@@ -9,6 +9,7 @@ from types import MappingProxyType
|
|
|
9
9
|
from typing import TYPE_CHECKING, Generic, TypeVar
|
|
10
10
|
|
|
11
11
|
from anndata._io.utils import report_read_key_on_error, report_write_key_on_error
|
|
12
|
+
from anndata._settings import settings
|
|
12
13
|
from anndata._types import Read, ReadLazy, _ReadInternal, _ReadLazyInternal
|
|
13
14
|
from anndata.compat import DaskArray, ZarrGroup, _read_attr, is_zarr_v2
|
|
14
15
|
|
|
@@ -240,12 +241,9 @@ def proc_spec_mapping(spec: Mapping[str, str]) -> IOSpec:
|
|
|
240
241
|
def get_spec(
|
|
241
242
|
elem: StorageType,
|
|
242
243
|
) -> IOSpec:
|
|
243
|
-
return proc_spec(
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
for k in ["encoding-type", "encoding-version"]
|
|
247
|
-
}
|
|
248
|
-
)
|
|
244
|
+
return proc_spec({
|
|
245
|
+
k: _read_attr(elem.attrs, k, "") for k in ["encoding-type", "encoding-version"]
|
|
246
|
+
})
|
|
249
247
|
|
|
250
248
|
|
|
251
249
|
def _iter_patterns(
|
|
@@ -349,10 +347,17 @@ class Writer:
|
|
|
349
347
|
|
|
350
348
|
import h5py
|
|
351
349
|
|
|
350
|
+
from anndata._io.zarr import is_group_consolidated
|
|
351
|
+
|
|
352
352
|
# we allow stores to have a prefix like /uns which are then written to with keys like /uns/foo
|
|
353
|
+
is_zarr_group = isinstance(store, ZarrGroup)
|
|
353
354
|
if "/" in k.split(store.name)[-1][1:]:
|
|
354
|
-
|
|
355
|
-
|
|
355
|
+
if is_zarr_group or settings.disallow_forward_slash_in_h5ad:
|
|
356
|
+
msg = f"Forward slashes are not allowed in keys in {type(store)}"
|
|
357
|
+
raise ValueError(msg)
|
|
358
|
+
else:
|
|
359
|
+
msg = "Forward slashes will be disallowed in h5 stores in the next minor release"
|
|
360
|
+
warnings.warn(msg, FutureWarning, stacklevel=2)
|
|
356
361
|
|
|
357
362
|
if isinstance(store, h5py.File):
|
|
358
363
|
store = store["/"]
|
|
@@ -360,19 +365,11 @@ class Writer:
|
|
|
360
365
|
dest_type = type(store)
|
|
361
366
|
|
|
362
367
|
# Normalize k to absolute path
|
|
363
|
-
if (
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
)
|
|
367
|
-
) or (isinstance(store, h5py.Group) and not PurePosixPath(k).is_absolute()):
|
|
368
|
+
if (is_zarr_group and is_zarr_v2()) or (
|
|
369
|
+
isinstance(store, h5py.Group) and not PurePosixPath(k).is_absolute()
|
|
370
|
+
):
|
|
368
371
|
k = str(PurePosixPath(store.name) / k)
|
|
369
|
-
is_consolidated = False
|
|
370
|
-
if is_zarr_v2_store:
|
|
371
|
-
from zarr.storage import ConsolidatedMetadataStore
|
|
372
|
-
|
|
373
|
-
is_consolidated = isinstance(store.store, ConsolidatedMetadataStore)
|
|
374
|
-
elif is_zarr_store:
|
|
375
|
-
is_consolidated = store.metadata.consolidated_metadata is not None
|
|
372
|
+
is_consolidated = is_group_consolidated(store) if is_zarr_group else False
|
|
376
373
|
if is_consolidated:
|
|
377
374
|
msg = "Cannot overwrite/edit a store with consolidated metadata"
|
|
378
375
|
raise ValueError(msg)
|
anndata/_io/utils.py
CHANGED
|
@@ -1,13 +1,11 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
from collections.abc import Callable
|
|
3
4
|
from functools import WRAPPER_ASSIGNMENTS, wraps
|
|
4
5
|
from itertools import pairwise
|
|
5
|
-
from typing import TYPE_CHECKING, cast
|
|
6
|
+
from typing import TYPE_CHECKING, Literal, cast
|
|
6
7
|
from warnings import warn
|
|
7
8
|
|
|
8
|
-
import h5py
|
|
9
|
-
from packaging.version import Version
|
|
10
|
-
|
|
11
9
|
from .._core.sparse_dataset import BaseCompressedSparseDataset
|
|
12
10
|
|
|
13
11
|
if TYPE_CHECKING:
|
|
@@ -21,9 +19,6 @@ if TYPE_CHECKING:
|
|
|
21
19
|
|
|
22
20
|
Storage = StorageType | BaseCompressedSparseDataset
|
|
23
21
|
|
|
24
|
-
# For allowing h5py v3
|
|
25
|
-
# https://github.com/scverse/anndata/issues/442
|
|
26
|
-
H5PY_V3 = Version(h5py.__version__).major >= 3
|
|
27
22
|
|
|
28
23
|
# -------------------------------------------------------------------------------
|
|
29
24
|
# Type conversion
|
anndata/_io/zarr.py
CHANGED
|
@@ -77,13 +77,11 @@ def read_zarr(store: PathLike[str] | str | MutableMapping | zarr.Group) -> AnnDa
|
|
|
77
77
|
# Read with handling for backwards compat
|
|
78
78
|
def callback(func, elem_name: str, elem, iospec):
|
|
79
79
|
if iospec.encoding_type == "anndata" or elem_name.endswith("/"):
|
|
80
|
-
return AnnData(
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
}
|
|
86
|
-
)
|
|
80
|
+
return AnnData(**{
|
|
81
|
+
k: read_dispatched(v, callback)
|
|
82
|
+
for k, v in dict(elem).items()
|
|
83
|
+
if not k.startswith("raw.")
|
|
84
|
+
})
|
|
87
85
|
elif elem_name.startswith("/raw."):
|
|
88
86
|
return None
|
|
89
87
|
elif elem_name in {"/obs", "/var"}:
|
|
@@ -155,3 +153,14 @@ def open_write_group(
|
|
|
155
153
|
if not is_zarr_v2() and "zarr_format" not in kwargs:
|
|
156
154
|
kwargs["zarr_format"] = settings.zarr_write_format
|
|
157
155
|
return zarr.open_group(store, mode=mode, **kwargs)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def is_group_consolidated(group: zarr.Group) -> bool:
|
|
159
|
+
if not isinstance(group, zarr.Group):
|
|
160
|
+
msg = f"Expected zarr.Group, got {type(group)}"
|
|
161
|
+
raise TypeError(msg)
|
|
162
|
+
if is_zarr_v2():
|
|
163
|
+
from zarr.storage import ConsolidatedMetadataStore
|
|
164
|
+
|
|
165
|
+
return isinstance(group.store, ConsolidatedMetadataStore)
|
|
166
|
+
return group.metadata.consolidated_metadata is not None
|
anndata/_settings.py
CHANGED
|
@@ -478,6 +478,14 @@ settings.register(
|
|
|
478
478
|
get_from_env=check_and_get_int,
|
|
479
479
|
)
|
|
480
480
|
|
|
481
|
+
settings.register(
|
|
482
|
+
"disallow_forward_slash_in_h5ad",
|
|
483
|
+
default_value=False,
|
|
484
|
+
description="Whether or not to disallow the `/` character in keys for h5ad files",
|
|
485
|
+
validate=validate_bool,
|
|
486
|
+
get_from_env=check_and_get_bool,
|
|
487
|
+
)
|
|
488
|
+
|
|
481
489
|
|
|
482
490
|
##################################################################################
|
|
483
491
|
##################################################################################
|
anndata/_settings.pyi
CHANGED
anndata/compat/__init__.py
CHANGED
|
@@ -3,6 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
from codecs import decode
|
|
4
4
|
from collections.abc import Mapping, Sequence
|
|
5
5
|
from functools import cache, partial, singledispatch
|
|
6
|
+
from importlib.metadata import version
|
|
6
7
|
from importlib.util import find_spec
|
|
7
8
|
from types import EllipsisType
|
|
8
9
|
from typing import TYPE_CHECKING, TypeVar
|
|
@@ -75,10 +76,9 @@ H5File = h5py.File
|
|
|
75
76
|
#############################
|
|
76
77
|
@cache
|
|
77
78
|
def is_zarr_v2() -> bool:
|
|
78
|
-
import zarr
|
|
79
79
|
from packaging.version import Version
|
|
80
80
|
|
|
81
|
-
return Version(zarr
|
|
81
|
+
return Version(version("zarr")) < Version("3.0.0")
|
|
82
82
|
|
|
83
83
|
|
|
84
84
|
if is_zarr_v2():
|
|
@@ -213,7 +213,7 @@ else:
|
|
|
213
213
|
|
|
214
214
|
NULLABLE_NUMPY_STRING_TYPE = (
|
|
215
215
|
np.dtype("O")
|
|
216
|
-
if Version(
|
|
216
|
+
if Version(version("numpy")) < Version("2")
|
|
217
217
|
else np.dtypes.StringDType(na_object=pd.NA)
|
|
218
218
|
)
|
|
219
219
|
|
|
@@ -428,11 +428,3 @@ def _safe_transpose(x):
|
|
|
428
428
|
return _transpose_by_block(x)
|
|
429
429
|
else:
|
|
430
430
|
return x.T
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
def _map_cat_to_str(cat: pd.Categorical) -> pd.Categorical:
|
|
434
|
-
if Version(pd.__version__) >= Version("2.1"):
|
|
435
|
-
# Argument added in pandas 2.1
|
|
436
|
-
return cat.map(str, na_action="ignore")
|
|
437
|
-
else:
|
|
438
|
-
return cat.map(str)
|
anndata/experimental/merge.py
CHANGED
|
@@ -26,8 +26,8 @@ from .._core.merge import (
|
|
|
26
26
|
)
|
|
27
27
|
from .._core.sparse_dataset import BaseCompressedSparseDataset, sparse_dataset
|
|
28
28
|
from .._io.specs import read_elem, write_elem
|
|
29
|
-
from ..compat import H5Array, H5Group, ZarrArray, ZarrGroup
|
|
30
|
-
from . import read_dispatched
|
|
29
|
+
from ..compat import H5Array, H5Group, ZarrArray, ZarrGroup
|
|
30
|
+
from . import read_dispatched, read_elem_lazy
|
|
31
31
|
|
|
32
32
|
if TYPE_CHECKING:
|
|
33
33
|
from collections.abc import Callable, Collection, Iterable, Sequence
|
|
@@ -173,7 +173,7 @@ def write_concat_dense( # noqa: PLR0917
|
|
|
173
173
|
output_path: ZarrGroup | H5Group,
|
|
174
174
|
axis: Literal[0, 1] = 0,
|
|
175
175
|
reindexers: Reindexer | None = None,
|
|
176
|
-
fill_value=None,
|
|
176
|
+
fill_value: Any = None,
|
|
177
177
|
):
|
|
178
178
|
"""
|
|
179
179
|
Writes the concatenation of given dense arrays to disk using dask.
|
|
@@ -193,9 +193,10 @@ def write_concat_dense( # noqa: PLR0917
|
|
|
193
193
|
axis=axis,
|
|
194
194
|
)
|
|
195
195
|
write_elem(output_group, output_path, res)
|
|
196
|
-
output_group[output_path].attrs.update(
|
|
197
|
-
|
|
198
|
-
|
|
196
|
+
output_group[output_path].attrs.update({
|
|
197
|
+
"encoding-type": "array",
|
|
198
|
+
"encoding-version": "0.2.0",
|
|
199
|
+
})
|
|
199
200
|
|
|
200
201
|
|
|
201
202
|
def write_concat_sparse( # noqa: PLR0917
|
|
@@ -205,7 +206,7 @@ def write_concat_sparse( # noqa: PLR0917
|
|
|
205
206
|
max_loaded_elems: int,
|
|
206
207
|
axis: Literal[0, 1] = 0,
|
|
207
208
|
reindexers: Reindexer | None = None,
|
|
208
|
-
fill_value=None,
|
|
209
|
+
fill_value: Any = None,
|
|
209
210
|
):
|
|
210
211
|
"""
|
|
211
212
|
Writes and concatenates sparse datasets into a single output dataset.
|
|
@@ -245,26 +246,24 @@ def write_concat_sparse( # noqa: PLR0917
|
|
|
245
246
|
|
|
246
247
|
|
|
247
248
|
def _write_concat_mappings( # noqa: PLR0913, PLR0917
|
|
248
|
-
mappings,
|
|
249
|
+
mappings: Collection[dict],
|
|
249
250
|
output_group: ZarrGroup | H5Group,
|
|
250
|
-
keys,
|
|
251
|
-
|
|
252
|
-
max_loaded_elems,
|
|
253
|
-
axis=0,
|
|
254
|
-
index=None,
|
|
255
|
-
reindexers=None,
|
|
256
|
-
fill_value=None,
|
|
251
|
+
keys: Collection[str],
|
|
252
|
+
output_path: str | Path,
|
|
253
|
+
max_loaded_elems: int,
|
|
254
|
+
axis: Literal[0, 1] = 0,
|
|
255
|
+
index: pd.Index = None,
|
|
256
|
+
reindexers: list[Reindexer] | None = None,
|
|
257
|
+
fill_value: Any = None,
|
|
257
258
|
):
|
|
258
259
|
"""
|
|
259
260
|
Write a list of mappings to a zarr/h5 group.
|
|
260
261
|
"""
|
|
261
|
-
mapping_group = output_group.create_group(
|
|
262
|
-
mapping_group.attrs.update(
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
}
|
|
267
|
-
)
|
|
262
|
+
mapping_group = output_group.create_group(output_path)
|
|
263
|
+
mapping_group.attrs.update({
|
|
264
|
+
"encoding-type": "dict",
|
|
265
|
+
"encoding-version": "0.1.0",
|
|
266
|
+
})
|
|
268
267
|
for k in keys:
|
|
269
268
|
elems = [m[k] for m in mappings]
|
|
270
269
|
_write_concat_sequence(
|
|
@@ -281,13 +280,13 @@ def _write_concat_mappings( # noqa: PLR0913, PLR0917
|
|
|
281
280
|
|
|
282
281
|
def _write_concat_arrays( # noqa: PLR0913, PLR0917
|
|
283
282
|
arrays: Sequence[ZarrArray | H5Array | BaseCompressedSparseDataset],
|
|
284
|
-
output_group,
|
|
285
|
-
output_path,
|
|
286
|
-
max_loaded_elems,
|
|
287
|
-
axis=0,
|
|
288
|
-
reindexers=None,
|
|
289
|
-
fill_value=None,
|
|
290
|
-
join="inner",
|
|
283
|
+
output_group: ZarrGroup | H5Group,
|
|
284
|
+
output_path: str | Path,
|
|
285
|
+
max_loaded_elems: int,
|
|
286
|
+
axis: Literal[0, 1] = 0,
|
|
287
|
+
reindexers: list[Reindexer] | None = None,
|
|
288
|
+
fill_value: Any = None,
|
|
289
|
+
join: Literal["inner", "outer"] = "inner",
|
|
291
290
|
):
|
|
292
291
|
init_elem = arrays[0]
|
|
293
292
|
init_type = type(init_elem)
|
|
@@ -325,14 +324,14 @@ def _write_concat_arrays( # noqa: PLR0913, PLR0917
|
|
|
325
324
|
|
|
326
325
|
def _write_concat_sequence( # noqa: PLR0913, PLR0917
|
|
327
326
|
arrays: Sequence[pd.DataFrame | BaseCompressedSparseDataset | H5Array | ZarrArray],
|
|
328
|
-
output_group,
|
|
329
|
-
output_path,
|
|
330
|
-
max_loaded_elems,
|
|
331
|
-
axis=0,
|
|
332
|
-
index=None,
|
|
333
|
-
reindexers=None,
|
|
334
|
-
fill_value=None,
|
|
335
|
-
join="inner",
|
|
327
|
+
output_group: ZarrGroup | H5Group,
|
|
328
|
+
output_path: str | Path,
|
|
329
|
+
max_loaded_elems: int,
|
|
330
|
+
axis: Literal[0, 1] = 0,
|
|
331
|
+
index: pd.Index = None,
|
|
332
|
+
reindexers: list[Reindexer] | None = None,
|
|
333
|
+
fill_value: Any = None,
|
|
334
|
+
join: Literal["inner", "outer"] = "inner",
|
|
336
335
|
):
|
|
337
336
|
"""
|
|
338
337
|
array, dataframe, csc_matrix, csc_matrix
|
|
@@ -377,17 +376,27 @@ def _write_concat_sequence( # noqa: PLR0913, PLR0917
|
|
|
377
376
|
raise NotImplementedError(msg)
|
|
378
377
|
|
|
379
378
|
|
|
380
|
-
def _write_alt_mapping(
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
379
|
+
def _write_alt_mapping(
|
|
380
|
+
groups: Collection[H5Group, ZarrGroup],
|
|
381
|
+
output_group: ZarrGroup | H5Group,
|
|
382
|
+
alt_axis_name: Literal["obs", "var"],
|
|
383
|
+
merge: Callable,
|
|
384
|
+
reindexers: list[Reindexer],
|
|
385
|
+
):
|
|
386
|
+
alt_mapping = merge([
|
|
387
|
+
{k: r(read_elem(v), axis=0) for k, v in dict(g[f"{alt_axis_name}m"]).items()}
|
|
388
|
+
for r, g in zip(reindexers, groups, strict=True)
|
|
389
|
+
])
|
|
390
|
+
write_elem(output_group, f"{alt_axis_name}m", alt_mapping)
|
|
388
391
|
|
|
389
392
|
|
|
390
|
-
def _write_alt_annot(
|
|
393
|
+
def _write_alt_annot(
|
|
394
|
+
groups: Collection[H5Group, ZarrGroup],
|
|
395
|
+
output_group: ZarrGroup | H5Group,
|
|
396
|
+
alt_axis_name: Literal["obs", "var"],
|
|
397
|
+
alt_indices: pd.Index,
|
|
398
|
+
merge: Callable,
|
|
399
|
+
):
|
|
391
400
|
# Annotation for other axis
|
|
392
401
|
alt_annot = merge_dataframes(
|
|
393
402
|
[read_elem(g[alt_axis_name]) for g in groups], alt_indices, merge
|
|
@@ -396,7 +405,13 @@ def _write_alt_annot(groups, output_group, alt_axis_name, alt_indices, merge):
|
|
|
396
405
|
|
|
397
406
|
|
|
398
407
|
def _write_axis_annot( # noqa: PLR0917
|
|
399
|
-
groups
|
|
408
|
+
groups: Collection[H5Group, ZarrGroup],
|
|
409
|
+
output_group: ZarrGroup | H5Group,
|
|
410
|
+
axis_name: Literal["obs", "var"],
|
|
411
|
+
concat_indices: pd.Index,
|
|
412
|
+
label: str,
|
|
413
|
+
label_col: str,
|
|
414
|
+
join: Literal["inner", "outer"],
|
|
400
415
|
):
|
|
401
416
|
concat_annot = pd.concat(
|
|
402
417
|
unify_dtypes(read_elem(g[axis_name]) for g in groups),
|
|
@@ -409,6 +424,23 @@ def _write_axis_annot( # noqa: PLR0917
|
|
|
409
424
|
write_elem(output_group, axis_name, concat_annot)
|
|
410
425
|
|
|
411
426
|
|
|
427
|
+
def _write_alt_pairwise(
|
|
428
|
+
groups: Collection[H5Group, ZarrGroup],
|
|
429
|
+
output_group: ZarrGroup | H5Group,
|
|
430
|
+
alt_axis_name: Literal["obs", "var"],
|
|
431
|
+
merge: Callable,
|
|
432
|
+
reindexers: list[Reindexer],
|
|
433
|
+
):
|
|
434
|
+
alt_pairwise = merge([
|
|
435
|
+
{
|
|
436
|
+
k: r(r(read_elem_lazy(v), axis=0), axis=1)
|
|
437
|
+
for k, v in dict(g[f"{alt_axis_name}p"]).items()
|
|
438
|
+
}
|
|
439
|
+
for r, g in zip(reindexers, groups, strict=True)
|
|
440
|
+
])
|
|
441
|
+
write_elem(output_group, f"{alt_axis_name}p", alt_pairwise)
|
|
442
|
+
|
|
443
|
+
|
|
412
444
|
def concat_on_disk( # noqa: PLR0912, PLR0913, PLR0915
|
|
413
445
|
in_files: Collection[PathLike[str] | str] | Mapping[str, PathLike[str] | str],
|
|
414
446
|
out_file: PathLike[str] | str,
|
|
@@ -491,7 +523,8 @@ def concat_on_disk( # noqa: PLR0912, PLR0913, PLR0915
|
|
|
491
523
|
DataFrames are padded with missing values.
|
|
492
524
|
pairwise
|
|
493
525
|
Whether pairwise elements along the concatenated dimension should be included.
|
|
494
|
-
This is False by default, since the resulting arrays are often not meaningful.
|
|
526
|
+
This is False by default, since the resulting arrays are often not meaningful, and raises {class}`NotImplementedError` when True.
|
|
527
|
+
If you are interested in this feature, please open an issue.
|
|
495
528
|
|
|
496
529
|
Notes
|
|
497
530
|
-----
|
|
@@ -610,7 +643,7 @@ def concat_on_disk( # noqa: PLR0912, PLR0913, PLR0915
|
|
|
610
643
|
)
|
|
611
644
|
if index_unique is not None:
|
|
612
645
|
concat_indices = concat_indices.str.cat(
|
|
613
|
-
|
|
646
|
+
label_col.map(str, na_action="ignore"), sep=index_unique
|
|
614
647
|
)
|
|
615
648
|
|
|
616
649
|
# Resulting indices for {axis_name} and {alt_axis_name}
|
|
@@ -635,7 +668,10 @@ def concat_on_disk( # noqa: PLR0912, PLR0913, PLR0915
|
|
|
635
668
|
_write_alt_annot(groups, output_group, alt_axis_name, alt_index, merge)
|
|
636
669
|
|
|
637
670
|
# Write {alt_axis_name}m
|
|
638
|
-
_write_alt_mapping(groups, output_group, alt_axis_name,
|
|
671
|
+
_write_alt_mapping(groups, output_group, alt_axis_name, merge, reindexers)
|
|
672
|
+
|
|
673
|
+
# Write {alt_axis_name}p
|
|
674
|
+
_write_alt_pairwise(groups, output_group, alt_axis_name, merge, reindexers)
|
|
639
675
|
|
|
640
676
|
# Write X
|
|
641
677
|
|
|
@@ -16,7 +16,7 @@ from ..._core.index import _normalize_index, _normalize_indices
|
|
|
16
16
|
from ..._core.merge import concat_arrays, inner_concat_aligned_mapping
|
|
17
17
|
from ..._core.sparse_dataset import BaseCompressedSparseDataset
|
|
18
18
|
from ..._core.views import _resolve_idx
|
|
19
|
-
from ...compat import
|
|
19
|
+
from ...compat import old_positionals
|
|
20
20
|
|
|
21
21
|
if TYPE_CHECKING:
|
|
22
22
|
from collections.abc import Iterable, Sequence
|
|
@@ -731,7 +731,7 @@ class AnnCollection(_ConcatViewMixin, _IterateViewMixin):
|
|
|
731
731
|
)
|
|
732
732
|
if index_unique is not None:
|
|
733
733
|
concat_indices = concat_indices.str.cat(
|
|
734
|
-
|
|
734
|
+
label_col.map(str, na_action="ignore"), sep=index_unique
|
|
735
735
|
)
|
|
736
736
|
self.obs_names = pd.Index(concat_indices)
|
|
737
737
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: anndata
|
|
3
|
-
Version: 0.12.
|
|
3
|
+
Version: 0.12.3
|
|
4
4
|
Summary: Annotated data.
|
|
5
5
|
Project-URL: Documentation, https://anndata.readthedocs.io/
|
|
6
6
|
Project-URL: Source, https://github.com/scverse/anndata
|
|
@@ -38,15 +38,14 @@ Requires-Dist: cupy-cuda11x; extra == 'cu11'
|
|
|
38
38
|
Provides-Extra: cu12
|
|
39
39
|
Requires-Dist: cupy-cuda12x; extra == 'cu12'
|
|
40
40
|
Provides-Extra: dask
|
|
41
|
-
Requires-Dist: dask[array]!=2024.8.*,!=2024.9
|
|
41
|
+
Requires-Dist: dask[array]!=2024.8.*,!=2024.9.*,!=2025.2.*,!=2025.3.*,!=2025.4.*,!=2025.5.*,!=2025.6.*,!=2025.7.*,!=2025.8.*,>=2023.5.1; extra == 'dask'
|
|
42
42
|
Provides-Extra: dev
|
|
43
|
-
Requires-Dist: hatch-vcs; extra == 'dev'
|
|
44
43
|
Requires-Dist: towncrier>=24.8.0; extra == 'dev'
|
|
45
44
|
Provides-Extra: dev-doc
|
|
46
45
|
Requires-Dist: towncrier>=24.8.0; extra == 'dev-doc'
|
|
47
46
|
Provides-Extra: doc
|
|
48
47
|
Requires-Dist: awkward>=2.3; extra == 'doc'
|
|
49
|
-
Requires-Dist: dask[array]!=2024.8.*,!=2024.9
|
|
48
|
+
Requires-Dist: dask[array]!=2024.8.*,!=2024.9.*,!=2025.2.*,!=2025.3.*,!=2025.4.*,!=2025.5.*,!=2025.6.*,!=2025.7.*,!=2025.8.*,>=2023.5.1; extra == 'doc'
|
|
50
49
|
Requires-Dist: ipython; extra == 'doc'
|
|
51
50
|
Requires-Dist: myst-nb; extra == 'doc'
|
|
52
51
|
Requires-Dist: myst-parser; extra == 'doc'
|
|
@@ -64,14 +63,14 @@ Provides-Extra: gpu
|
|
|
64
63
|
Requires-Dist: cupy; extra == 'gpu'
|
|
65
64
|
Provides-Extra: lazy
|
|
66
65
|
Requires-Dist: aiohttp; extra == 'lazy'
|
|
67
|
-
Requires-Dist: dask[array]!=2024.8.*,!=2024.9
|
|
66
|
+
Requires-Dist: dask[array]!=2024.8.*,!=2024.9.*,!=2025.2.*,!=2025.3.*,!=2025.4.*,!=2025.5.*,!=2025.6.*,!=2025.7.*,!=2025.8.*,>=2023.5.1; extra == 'lazy'
|
|
68
67
|
Requires-Dist: requests; extra == 'lazy'
|
|
69
68
|
Requires-Dist: xarray>=2025.06.1; extra == 'lazy'
|
|
70
69
|
Provides-Extra: test
|
|
71
70
|
Requires-Dist: aiohttp; extra == 'test'
|
|
72
71
|
Requires-Dist: awkward>=2.3.2; extra == 'test'
|
|
73
72
|
Requires-Dist: boltons; extra == 'test'
|
|
74
|
-
Requires-Dist: dask[array]!=2024.8.*,!=2024.9
|
|
73
|
+
Requires-Dist: dask[array]!=2024.8.*,!=2024.9.*,!=2025.2.*,!=2025.3.*,!=2025.4.*,!=2025.5.*,!=2025.6.*,!=2025.7.*,!=2025.8.*,>=2023.5.1; extra == 'test'
|
|
75
74
|
Requires-Dist: dask[distributed]; extra == 'test'
|
|
76
75
|
Requires-Dist: filelock; extra == 'test'
|
|
77
76
|
Requires-Dist: httpx<1.0; extra == 'test'
|
|
@@ -93,7 +92,7 @@ Requires-Dist: xarray>=2025.06.1; extra == 'test'
|
|
|
93
92
|
Provides-Extra: test-min
|
|
94
93
|
Requires-Dist: awkward>=2.3.2; extra == 'test-min'
|
|
95
94
|
Requires-Dist: boltons; extra == 'test-min'
|
|
96
|
-
Requires-Dist: dask[array]!=2024.8.*,!=2024.9
|
|
95
|
+
Requires-Dist: dask[array]!=2024.8.*,!=2024.9.*,!=2025.2.*,!=2025.3.*,!=2025.4.*,!=2025.5.*,!=2025.6.*,!=2025.7.*,!=2025.8.*,>=2023.5.1; extra == 'test-min'
|
|
97
96
|
Requires-Dist: dask[distributed]; extra == 'test-min'
|
|
98
97
|
Requires-Dist: filelock; extra == 'test-min'
|
|
99
98
|
Requires-Dist: httpx<1.0; extra == 'test-min'
|
|
@@ -1,8 +1,7 @@
|
|
|
1
|
-
anndata/__init__.py,sha256=
|
|
2
|
-
anndata/_settings.py,sha256=
|
|
3
|
-
anndata/_settings.pyi,sha256=
|
|
1
|
+
anndata/__init__.py,sha256=daAzY8GGouJxCe30Lcr2pl9Jwo2dcGXHPi7WxnHpuOE,1710
|
|
2
|
+
anndata/_settings.py,sha256=Y6d28jAF7qnZLfDdg-0L_-Z7zXZR8zjSYlmvWRU_HO4,15949
|
|
3
|
+
anndata/_settings.pyi,sha256=88wa1EG5Bz3ZKIJ68iw41nwGPA8asnpqYsnATsC5mpc,1594
|
|
4
4
|
anndata/_types.py,sha256=c71REP9wS7Vz2cYrNxuNjPYdnq8MJ5g04MNrSi85ATA,5427
|
|
5
|
-
anndata/_version.py,sha256=qsRPKvJAzUrnf49DHZk-FYfXKOASg5nJV2qomuOttX0,2160
|
|
6
5
|
anndata/_warnings.py,sha256=iFXa9EzPyuPbzRAzoG04oTXAyjnXhQa5zxAMZdsGLwM,702
|
|
7
6
|
anndata/abc.py,sha256=jG64k59ZZ9Hfn-QWt_btZLuF7eGv_YNYwH91WdbR240,1645
|
|
8
7
|
anndata/io.py,sha256=DrIo-FU6qbrdk5aVKoUIBoMttZaO5QWP4bowS9xaebI,698
|
|
@@ -14,45 +13,45 @@ anndata/_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
14
13
|
anndata/_core/access.py,sha256=pts7fGUKgGZANSsu_qAA7L10qHM-jT1zIehbl3441OY,873
|
|
15
14
|
anndata/_core/aligned_df.py,sha256=MrGdi1zNZZlkkv4IeS2yY-R5ldXpchTlMlJK7PKRa7A,3833
|
|
16
15
|
anndata/_core/aligned_mapping.py,sha256=BYU1jslMWIhtFTtUMaXY8ZCyt0J4_ZsJTmj6J2yAXTQ,14257
|
|
17
|
-
anndata/_core/anndata.py,sha256=
|
|
16
|
+
anndata/_core/anndata.py,sha256=e9ISy2CI7QaG6mievs8Aw33sho8ZN1CMgqPQZ-ZfbSw,78782
|
|
18
17
|
anndata/_core/extensions.py,sha256=9Rsho6qnr3PJHULrYGiZHCBinBZYJK6zyf3cFsl_gBY,10425
|
|
19
18
|
anndata/_core/file_backing.py,sha256=kT71R_kZp_CiHImBK8IaZXsvYVtbX2Mg-7L2ldAWojM,5113
|
|
20
19
|
anndata/_core/index.py,sha256=6oED8kjTFKXnZSJXbkGFwscRtqV346h05Dx_Spd68WY,9298
|
|
21
|
-
anndata/_core/merge.py,sha256=
|
|
20
|
+
anndata/_core/merge.py,sha256=q9eDTQNdqvEJhnkft2UP9gXrmYLQf6CzWgj4YXHIn-s,60242
|
|
22
21
|
anndata/_core/raw.py,sha256=x_PwwaDQscVQOFJ38kF7sNQ47LxowpS38h2RQfU5Zwo,7925
|
|
23
|
-
anndata/_core/sparse_dataset.py,sha256=
|
|
22
|
+
anndata/_core/sparse_dataset.py,sha256=9ZVzH7cPdom-bsWEYgbqQvCucc3erhEIeqK2Z_wzDwA,26826
|
|
24
23
|
anndata/_core/storage.py,sha256=mHzqp7YBJ-rGQFulMAx__D-Z7y4omHPyb1cP7YxfbFE,2555
|
|
25
|
-
anndata/_core/views.py,sha256=
|
|
24
|
+
anndata/_core/views.py,sha256=DIJgnqPvh07wbLousjZbGBsMC55oyBsMbSeybQC5sIY,15019
|
|
26
25
|
anndata/_core/xarray.py,sha256=JeQjTuSQEiZF8cryKDYf9d7yt-ufQEVo9x94YaczuPQ,16078
|
|
27
26
|
anndata/_io/__init__.py,sha256=GTNeUZ8d8aA3sK4P33tyljIc60KapLbkqBC6J1y3l9U,346
|
|
28
|
-
anndata/_io/h5ad.py,sha256=
|
|
27
|
+
anndata/_io/h5ad.py,sha256=BwBEYU_SZWn3KDD3RuxltDSkyqHxE3xXUfkiPh8OG-Y,13908
|
|
29
28
|
anndata/_io/read.py,sha256=MuTR6dR2WItV2y0sKYvxSO2fu7OlDjaCRYJuT5UbuBo,15933
|
|
30
|
-
anndata/_io/utils.py,sha256=
|
|
29
|
+
anndata/_io/utils.py,sha256=3Lg27Q0Uo3HYlz980bG2Y02_VFIt0PiXMNIj_o-mgC4,9490
|
|
31
30
|
anndata/_io/write.py,sha256=r55w6yPIIuUSLW9wyYL8GnkzHHQdAxy6xiCEw9cAC38,4811
|
|
32
|
-
anndata/_io/zarr.py,sha256=
|
|
31
|
+
anndata/_io/zarr.py,sha256=Z996SZ8LV1Fpa_q8o70vHnBzNLOLlVjhf_Rs5EM_Slo,5461
|
|
33
32
|
anndata/_io/specs/__init__.py,sha256=Z6l8xqa7B480U3pqrNIg4-fhUvpBW85w4xA3i3maAUM,427
|
|
34
|
-
anndata/_io/specs/lazy_methods.py,sha256=
|
|
35
|
-
anndata/_io/specs/methods.py,sha256=
|
|
36
|
-
anndata/_io/specs/registry.py,sha256=
|
|
37
|
-
anndata/compat/__init__.py,sha256=
|
|
33
|
+
anndata/_io/specs/lazy_methods.py,sha256=k-s-YwOtwXpdjsyrM0IAsGSadPFxDsVDA3d_Nbpb7Ak,12261
|
|
34
|
+
anndata/_io/specs/methods.py,sha256=rAxRyMphd7DOI7gf2jpUloRFFYA5XgBxQ4mvX4Wcsk0,46171
|
|
35
|
+
anndata/_io/specs/registry.py,sha256=6Z_ffk3uOIagzRPcDCvEoszcgD-U3n8wYnGiPA71ZeI,17539
|
|
36
|
+
anndata/compat/__init__.py,sha256=lsLHB7je0SHSePi9noY3p7kRbOAHhZzmMT1hs_ZSXys,12702
|
|
38
37
|
anndata/experimental/__init__.py,sha256=polIxriEkby0iEqw-IXkUzp8k0wp92BpYY4zl4BsHH0,1648
|
|
39
38
|
anndata/experimental/_dispatch_io.py,sha256=JzH8Uvewabc1gIF3L16RZnM9m2NAG28bQIQ57uP097k,1869
|
|
40
|
-
anndata/experimental/merge.py,sha256=
|
|
39
|
+
anndata/experimental/merge.py,sha256=pl4MtDs_M76cTEqrJ_YJ8zyB6ID7QGzjntlAL7vp_qk,23303
|
|
41
40
|
anndata/experimental/backed/__init__.py,sha256=4dc9M_-_SlfUidDrbWt8PRyD_8bYjypHJ86IpdThHus,230
|
|
42
41
|
anndata/experimental/backed/_compat.py,sha256=rM7CnSJEZCko5wPBFRfvZA9ZKUSpaOVcWFy5u09p1go,519
|
|
43
42
|
anndata/experimental/backed/_io.py,sha256=7bFzn3h8ut49NzppUvsqAX1gjXxAVCFK55Ln0XWzZdY,5965
|
|
44
43
|
anndata/experimental/backed/_lazy_arrays.py,sha256=FR-ZPCOhjPaha278KkhMuYYPYx4T_5IOxtjX3XYjDC4,7424
|
|
45
44
|
anndata/experimental/multi_files/__init__.py,sha256=T7iNLlRbe-KnLT3o7Tb7_nE4Iy_hLkG66UjBOvj2Bj8,107
|
|
46
|
-
anndata/experimental/multi_files/_anncollection.py,sha256=
|
|
45
|
+
anndata/experimental/multi_files/_anncollection.py,sha256=d_d-v8X2WJTGNjAJoo2Mdykd-woSTM_oXEf2PUIqS6A,35254
|
|
47
46
|
anndata/experimental/pytorch/__init__.py,sha256=4CkgrahLO8Kc-s2bmv6lVQfDxbO3IUyV0v4ygBDkttY,95
|
|
48
47
|
anndata/experimental/pytorch/_annloader.py,sha256=7mpsFV5vBfxKIje1cPjahtDZ5afkU-H663XB4FJhmok,8075
|
|
49
48
|
anndata/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
50
49
|
anndata/tests/helpers.py,sha256=27p_Nc5vFIiW-7EhV85g3QiE0dStMnUg0uFBRyroZUg,36072
|
|
51
50
|
testing/anndata/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
51
|
testing/anndata/_doctest.py,sha256=Qew0N0zLLNiPKN1CLunqY5cTinFLaEhY5GagiYfm6KI,344
|
|
53
|
-
testing/anndata/_pytest.py,sha256=
|
|
52
|
+
testing/anndata/_pytest.py,sha256=cg4oWbtH9J1sRNul0n2oOraU1h7cprugr27EUPGDaN0,3997
|
|
54
53
|
testing/anndata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
55
|
-
anndata-0.12.
|
|
56
|
-
anndata-0.12.
|
|
57
|
-
anndata-0.12.
|
|
58
|
-
anndata-0.12.
|
|
54
|
+
anndata-0.12.3.dist-info/METADATA,sha256=6MqCFe54oaFQd9YKcXv8ctz2irw1mLOhFBnq9YgDA3s,9937
|
|
55
|
+
anndata-0.12.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
56
|
+
anndata-0.12.3.dist-info/licenses/LICENSE,sha256=VcrXoEVMhtNuvMvKYGP-I5lMT8qZ_6dFf22fsL180qA,1575
|
|
57
|
+
anndata-0.12.3.dist-info/RECORD,,
|
testing/anndata/_pytest.py
CHANGED
|
@@ -35,14 +35,10 @@ def _anndata_test_env(request: pytest.FixtureRequest) -> None:
|
|
|
35
35
|
def _doctest_env(
|
|
36
36
|
request: pytest.FixtureRequest, cache: pytest.Cache, tmp_path: Path
|
|
37
37
|
) -> Generator[None, None, None]:
|
|
38
|
-
with warnings.catch_warnings():
|
|
39
|
-
warnings.filterwarnings(
|
|
40
|
-
"ignore", message=r"Importing read_.* from `anndata` is deprecated"
|
|
41
|
-
)
|
|
42
|
-
from scanpy import settings
|
|
43
|
-
|
|
44
38
|
from contextlib import chdir
|
|
45
39
|
|
|
40
|
+
from scanpy import settings
|
|
41
|
+
|
|
46
42
|
from anndata.utils import import_name
|
|
47
43
|
|
|
48
44
|
assert isinstance(request.node.parent, pytest.Module)
|
anndata/_version.py
DELETED
|
@@ -1,62 +0,0 @@
|
|
|
1
|
-
"""Get version from VCS in a dev environment or from package metadata in production.
|
|
2
|
-
|
|
3
|
-
See <https://github.com/maresb/hatch-vcs-footgun-example>.
|
|
4
|
-
"""
|
|
5
|
-
|
|
6
|
-
from __future__ import annotations
|
|
7
|
-
|
|
8
|
-
import warnings
|
|
9
|
-
from pathlib import Path
|
|
10
|
-
|
|
11
|
-
__all__ = ["__version__"]
|
|
12
|
-
|
|
13
|
-
_PROJECT_NAME = "anndata"
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class GetVersionError(Exception):
|
|
17
|
-
pass
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def _get_version_from_vcs(project_name: str) -> str: # pragma: no cover
|
|
21
|
-
from hatchling.metadata.core import ProjectMetadata
|
|
22
|
-
from hatchling.plugin.exceptions import UnknownPluginError
|
|
23
|
-
from hatchling.plugin.manager import PluginManager
|
|
24
|
-
from hatchling.utils.fs import locate_file
|
|
25
|
-
|
|
26
|
-
if (pyproject_toml := locate_file(__file__, "pyproject.toml")) is None:
|
|
27
|
-
msg = "pyproject.toml not found although hatchling is installed"
|
|
28
|
-
raise LookupError(msg)
|
|
29
|
-
root = Path(pyproject_toml).parent
|
|
30
|
-
metadata = ProjectMetadata(root=str(root), plugin_manager=PluginManager())
|
|
31
|
-
try:
|
|
32
|
-
# Version can be either statically set in pyproject.toml or computed dynamically:
|
|
33
|
-
version = metadata.core.version or metadata.hatch.version.cached
|
|
34
|
-
except UnknownPluginError as e:
|
|
35
|
-
msg = "Unable to import hatch plugin."
|
|
36
|
-
raise ImportError(msg) from e
|
|
37
|
-
except ValueError as e:
|
|
38
|
-
msg = f"Could not find hatchling project data in TOML file, {pyproject_toml}"
|
|
39
|
-
raise GetVersionError(msg) from e
|
|
40
|
-
except TypeError as e:
|
|
41
|
-
msg = "Could not parse build configuration."
|
|
42
|
-
raise GetVersionError(msg) from e
|
|
43
|
-
except Exception as e:
|
|
44
|
-
msg = (
|
|
45
|
-
f"Unknown error getting version from hatchling config for '{project_name}'."
|
|
46
|
-
)
|
|
47
|
-
warnings.warn(f"{msg}: {e}", stacklevel=1)
|
|
48
|
-
raise GetVersionError(msg) from e
|
|
49
|
-
|
|
50
|
-
# We found a hatchling environment, but is it ours?
|
|
51
|
-
if metadata.core.name != project_name:
|
|
52
|
-
msg = f"Data in pyproject.toml is not related to {project_name}."
|
|
53
|
-
raise GetVersionError(msg)
|
|
54
|
-
return version
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
try:
|
|
58
|
-
__version__ = _get_version_from_vcs(_PROJECT_NAME)
|
|
59
|
-
except (ImportError, LookupError, GetVersionError):
|
|
60
|
-
import importlib.metadata
|
|
61
|
-
|
|
62
|
-
__version__ = importlib.metadata.version(_PROJECT_NAME)
|
|
File without changes
|
|
File without changes
|