anndata 0.12.3__py3-none-any.whl → 0.12.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -78,6 +78,13 @@ def _gen_dataframe_df(
78
78
  attr: Literal["obs", "var"],
79
79
  length: int | None = None,
80
80
  ):
81
+ if isinstance(anno.index, pd.MultiIndex):
82
+ msg = (
83
+ "pandas.MultiIndex not supported as index for obs or var on declaration.\n\
84
+ You can set `obs_names` manually although most operations after will error or convert to str.\n\
85
+ This behavior will likely be clarified in a future breaking release."
86
+ )
87
+ raise ValueError(msg)
81
88
  if length is not None and length != len(anno):
82
89
  raise _mk_df_error(source, attr, length, len(anno))
83
90
  anno = anno.copy(deep=False)
anndata/_core/index.py CHANGED
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  from collections.abc import Iterable, Sequence
4
4
  from functools import singledispatch
5
5
  from itertools import repeat
6
- from typing import TYPE_CHECKING
6
+ from typing import TYPE_CHECKING, cast, overload
7
7
 
8
8
  import h5py
9
9
  import numpy as np
@@ -14,6 +14,8 @@ from ..compat import AwkArray, CSArray, CSMatrix, DaskArray, XDataArray
14
14
  from .xarray import Dataset2D
15
15
 
16
16
  if TYPE_CHECKING:
17
+ from numpy.typing import NDArray
18
+
17
19
  from ..compat import Index, Index1D, Index1DNorm
18
20
 
19
21
 
@@ -161,7 +163,10 @@ def unpack_index(index: Index) -> tuple[Index1D, Index1D]:
161
163
 
162
164
 
163
165
  @singledispatch
164
- def _subset(a: np.ndarray | pd.DataFrame, subset_idx: Index):
166
+ def _subset(
167
+ a: np.ndarray | pd.DataFrame,
168
+ subset_idx: tuple[Index1DNorm] | tuple[Index1DNorm, Index1DNorm],
169
+ ):
165
170
  # Select as combination of indexes, not coordinates
166
171
  # Correcting for indexing behaviour of np.ndarray
167
172
  if all(isinstance(x, Iterable) for x in subset_idx):
@@ -170,7 +175,9 @@ def _subset(a: np.ndarray | pd.DataFrame, subset_idx: Index):
170
175
 
171
176
 
172
177
  @_subset.register(DaskArray)
173
- def _subset_dask(a: DaskArray, subset_idx: Index):
178
+ def _subset_dask(
179
+ a: DaskArray, subset_idx: tuple[Index1DNorm] | tuple[Index1DNorm, Index1DNorm]
180
+ ):
174
181
  if len(subset_idx) > 1 and all(isinstance(x, Iterable) for x in subset_idx):
175
182
  if issparse(a._meta) and a._meta.format == "csc":
176
183
  return a[:, subset_idx[1]][subset_idx[0], :]
@@ -180,24 +187,32 @@ def _subset_dask(a: DaskArray, subset_idx: Index):
180
187
 
181
188
  @_subset.register(CSMatrix)
182
189
  @_subset.register(CSArray)
183
- def _subset_sparse(a: CSMatrix | CSArray, subset_idx: Index):
190
+ def _subset_sparse(
191
+ a: CSMatrix | CSArray,
192
+ subset_idx: tuple[Index1DNorm] | tuple[Index1DNorm, Index1DNorm],
193
+ ):
184
194
  # Correcting for indexing behaviour of sparse.spmatrix
185
195
  if len(subset_idx) > 1 and all(isinstance(x, Iterable) for x in subset_idx):
186
196
  first_idx = subset_idx[0]
187
197
  if issubclass(first_idx.dtype.type, np.bool_):
188
- first_idx = np.where(first_idx)[0]
198
+ first_idx = np.flatnonzero(first_idx)
189
199
  subset_idx = (first_idx.reshape(-1, 1), *subset_idx[1:])
190
200
  return a[subset_idx]
191
201
 
192
202
 
193
203
  @_subset.register(pd.DataFrame)
194
204
  @_subset.register(Dataset2D)
195
- def _subset_df(df: pd.DataFrame | Dataset2D, subset_idx: Index):
205
+ def _subset_df(
206
+ df: pd.DataFrame | Dataset2D,
207
+ subset_idx: tuple[Index1DNorm] | tuple[Index1DNorm, Index1DNorm],
208
+ ):
196
209
  return df.iloc[subset_idx]
197
210
 
198
211
 
199
212
  @_subset.register(AwkArray)
200
- def _subset_awkarray(a: AwkArray, subset_idx: Index):
213
+ def _subset_awkarray(
214
+ a: AwkArray, subset_idx: tuple[Index1DNorm] | tuple[Index1DNorm, Index1DNorm]
215
+ ):
201
216
  if all(isinstance(x, Iterable) for x in subset_idx):
202
217
  subset_idx = np.ix_(*subset_idx)
203
218
  return a[subset_idx]
@@ -205,23 +220,121 @@ def _subset_awkarray(a: AwkArray, subset_idx: Index):
205
220
 
206
221
  # Registration for SparseDataset occurs in sparse_dataset.py
207
222
  @_subset.register(h5py.Dataset)
208
- def _subset_dataset(d: h5py.Dataset, subset_idx: Index):
209
- if not isinstance(subset_idx, tuple):
210
- subset_idx = (subset_idx,)
211
- ordered = list(subset_idx)
212
- rev_order = [slice(None) for _ in range(len(subset_idx))]
213
- for axis, axis_idx in enumerate(ordered.copy()):
214
- if isinstance(axis_idx, np.ndarray):
215
- if axis_idx.dtype == bool:
216
- axis_idx = np.where(axis_idx)[0]
217
- order = np.argsort(axis_idx)
218
- ordered[axis] = axis_idx[order]
219
- rev_order[axis] = np.argsort(order)
223
+ def _subset_dataset(
224
+ d: h5py.Dataset, subset_idx: tuple[Index1DNorm] | tuple[Index1DNorm, Index1DNorm]
225
+ ):
226
+ order: tuple[NDArray[np.integer] | slice, ...]
227
+ inv_order: tuple[NDArray[np.integer] | slice, ...]
228
+ order, inv_order = zip(*map(_index_order_and_inverse, subset_idx), strict=True)
229
+ # check for duplicates or multi-dimensional fancy indexing
230
+ array_dims = [i for i in order if isinstance(i, np.ndarray)]
231
+ has_duplicates = any(len(np.unique(i)) != len(i) for i in array_dims)
232
+ # Use safe indexing if there are duplicates OR multiple array dimensions
233
+ # (h5py doesn't support multi-dimensional fancy indexing natively)
234
+ if has_duplicates or len(array_dims) > 1:
235
+ # For multi-dimensional indexing, bypass the sorting logic and use original indices
236
+ return _safe_fancy_index_h5py(d, subset_idx)
220
237
  # from hdf5, then to real order
221
- return d[tuple(ordered)][tuple(rev_order)]
222
-
223
-
224
- def make_slice(idx, dimidx, n=2):
238
+ return d[order][inv_order]
239
+
240
+
241
+ @overload
242
+ def _index_order_and_inverse(
243
+ axis_idx: NDArray[np.integer] | NDArray[np.bool_],
244
+ ) -> tuple[NDArray[np.integer], NDArray[np.integer]]: ...
245
+ @overload
246
+ def _index_order_and_inverse(axis_idx: slice) -> tuple[slice, slice]: ...
247
+ def _index_order_and_inverse(
248
+ axis_idx: Index1DNorm,
249
+ ) -> tuple[Index1DNorm, NDArray[np.integer] | slice]:
250
+ """Order and get inverse index array."""
251
+ if not isinstance(axis_idx, np.ndarray):
252
+ return axis_idx, slice(None)
253
+ if axis_idx.dtype == bool:
254
+ axis_idx = np.flatnonzero(axis_idx)
255
+ order = np.argsort(axis_idx)
256
+ return axis_idx[order], np.argsort(order)
257
+
258
+
259
+ @overload
260
+ def _process_index_for_h5py(
261
+ idx: NDArray[np.integer] | NDArray[np.bool_],
262
+ ) -> tuple[NDArray[np.integer], NDArray[np.integer]]: ...
263
+ @overload
264
+ def _process_index_for_h5py(idx: slice) -> tuple[slice, None]: ...
265
+ def _process_index_for_h5py(
266
+ idx: Index1DNorm,
267
+ ) -> tuple[Index1DNorm, NDArray[np.integer] | None]:
268
+ """Process a single index for h5py compatibility, handling sorting and duplicates."""
269
+ if not isinstance(idx, np.ndarray):
270
+ # Not an array (slice, integer, list) - no special processing needed
271
+ return idx, None
272
+
273
+ if idx.dtype == bool:
274
+ idx = np.flatnonzero(idx)
275
+
276
+ # For h5py fancy indexing, we need sorted indices
277
+ # But we also need to track how to reverse the sorting
278
+ unique, inverse = np.unique(idx, return_inverse=True)
279
+ return (
280
+ # Has duplicates - use unique + inverse mapping approach
281
+ (unique, inverse)
282
+ if len(unique) != len(idx)
283
+ # No duplicates - just sort and track reverse mapping
284
+ else _index_order_and_inverse(idx)
285
+ )
286
+
287
+
288
+ def _safe_fancy_index_h5py(
289
+ dataset: h5py.Dataset,
290
+ subset_idx: tuple[Index1DNorm] | tuple[Index1DNorm, Index1DNorm],
291
+ ) -> h5py.Dataset:
292
+ # Handle multi-dimensional indexing of h5py dataset
293
+ # This avoids h5py's limitation with multi-dimensional fancy indexing
294
+ # without loading the entire dataset into memory
295
+
296
+ # Convert boolean arrays to integer arrays and handle sorting for h5py
297
+ processed_indices: tuple[NDArray[np.integer] | slice, ...]
298
+ reverse_indices: tuple[NDArray[np.integer] | None, ...]
299
+ processed_indices, reverse_indices = zip(
300
+ *map(_process_index_for_h5py, subset_idx), strict=True
301
+ )
302
+
303
+ # First find the index that reduces the size of the dataset the most
304
+ i_min = np.argmin([
305
+ _get_index_size(inds, dataset.shape[i]) / dataset.shape[i]
306
+ for i, inds in enumerate(processed_indices)
307
+ ])
308
+
309
+ # Apply the most selective index first to h5py dataset
310
+ first_index = [slice(None)] * len(processed_indices)
311
+ first_index[i_min] = processed_indices[i_min]
312
+ in_memory_array = cast("np.ndarray", dataset[tuple(first_index)])
313
+
314
+ # Apply remaining indices to the numpy array
315
+ remaining_indices = list(processed_indices)
316
+ remaining_indices[i_min] = slice(None) # Already applied
317
+ result = in_memory_array[tuple(remaining_indices)]
318
+
319
+ # Now apply reverse mappings to get the original order
320
+ for dim, reverse_map in enumerate(reverse_indices):
321
+ if reverse_map is not None:
322
+ result = result.take(reverse_map, axis=dim)
323
+
324
+ return result
325
+
326
+
327
+ def _get_index_size(idx: Index1DNorm, dim_size: int) -> int:
328
+ """Get size for any index type."""
329
+ if isinstance(idx, slice):
330
+ return len(range(*idx.indices(dim_size)))
331
+ elif isinstance(idx, int):
332
+ return 1
333
+ else: # For other types, try to get length
334
+ return len(idx)
335
+
336
+
337
+ def make_slice(idx, dimidx: int, n: int = 2) -> tuple[slice, ...]:
225
338
  mut = list(repeat(slice(None), n))
226
339
  mut[dimidx] = idx
227
340
  return tuple(mut)
anndata/_core/merge.py CHANGED
@@ -38,6 +38,7 @@ if TYPE_CHECKING:
38
38
  from collections.abc import Collection, Generator, Iterable, Sequence
39
39
  from typing import Any
40
40
 
41
+ from numpy.typing import NDArray
41
42
  from pandas.api.extensions import ExtensionDtype
42
43
 
43
44
  from anndata._types import Join_T
@@ -553,7 +554,7 @@ class Reindexer:
553
554
  Together with `old_pos` this forms a mapping.
554
555
  """
555
556
 
556
- def __init__(self, old_idx, new_idx):
557
+ def __init__(self, old_idx: pd.Index, new_idx: pd.Index) -> None:
557
558
  self.old_idx = old_idx
558
559
  self.new_idx = new_idx
559
560
  self.no_change = new_idx.equals(old_idx)
@@ -753,7 +754,7 @@ class Reindexer:
753
754
  return el[self.idx]
754
755
 
755
756
  @property
756
- def idx(self):
757
+ def idx(self) -> NDArray[np.intp]:
757
758
  return self.old_idx.get_indexer(self.new_idx)
758
759
 
759
760
 
@@ -782,7 +783,7 @@ def default_fill_value(els):
782
783
  return np.nan
783
784
 
784
785
 
785
- def gen_reindexer(new_var: pd.Index, cur_var: pd.Index):
786
+ def gen_reindexer(new_var: pd.Index, cur_var: pd.Index) -> Reindexer:
786
787
  """
787
788
  Given a new set of var_names, and a current set, generates a function which will reindex
788
789
  a matrix to be aligned with the new set.
@@ -939,7 +940,7 @@ def inner_concat_aligned_mapping(
939
940
  return result
940
941
 
941
942
 
942
- def gen_inner_reindexers(els, new_index, axis: Literal[0, 1] = 0):
943
+ def gen_inner_reindexers(els, new_index, axis: Literal[0, 1] = 0) -> list[Reindexer]:
943
944
  alt_axis = 1 - axis
944
945
  if axis == 0:
945
946
  df_indices = lambda x: x.columns
@@ -1016,7 +1017,7 @@ def missing_element(
1016
1017
  axis: Literal[0, 1] = 0,
1017
1018
  fill_value: Any | None = None,
1018
1019
  off_axis_size: int = 0,
1019
- ) -> np.ndarray | DaskArray:
1020
+ ) -> NDArray[np.bool_] | DaskArray:
1020
1021
  """Generates value to use when there is a missing element."""
1021
1022
  should_return_dask = any(isinstance(el, DaskArray) for el in els)
1022
1023
  # 0 sized array for in-memory prevents allocating unnecessary memory while preserving broadcasting.
@@ -48,8 +48,7 @@ if TYPE_CHECKING:
48
48
  from scipy.sparse._compressed import _cs_matrix
49
49
 
50
50
  from .._types import GroupStorageType
51
- from ..compat import H5Array
52
- from .index import Index, Index1D
51
+ from ..compat import H5Array, Index, Index1D, Index1DNorm
53
52
  else:
54
53
  from scipy.sparse import spmatrix as _cs_matrix
55
54
 
@@ -738,5 +737,7 @@ def sparse_dataset(
738
737
 
739
738
 
740
739
  @_subset.register(BaseCompressedSparseDataset)
741
- def subset_sparsedataset(d, subset_idx):
740
+ def subset_sparsedataset(
741
+ d, subset_idx: tuple[Index1DNorm] | tuple[Index1DNorm, Index1DNorm]
742
+ ):
742
743
  return d[subset_idx]
@@ -495,31 +495,10 @@ _REGISTRY.register_write(ZarrGroup, CupyArray, IOSpec("array", "0.2.0"))(
495
495
 
496
496
  @_REGISTRY.register_write(ZarrGroup, views.DaskArrayView, IOSpec("array", "0.2.0"))
497
497
  @_REGISTRY.register_write(ZarrGroup, DaskArray, IOSpec("array", "0.2.0"))
498
- def write_basic_dask_zarr(
499
- f: ZarrGroup,
500
- k: str,
501
- elem: DaskArray,
502
- *,
503
- _writer: Writer,
504
- dataset_kwargs: Mapping[str, Any] = MappingProxyType({}),
505
- ):
506
- import dask.array as da
507
-
508
- dataset_kwargs = dataset_kwargs.copy()
509
- dataset_kwargs = zarr_v3_compressor_compat(dataset_kwargs)
510
- if is_zarr_v2():
511
- g = f.require_dataset(k, shape=elem.shape, dtype=elem.dtype, **dataset_kwargs)
512
- else:
513
- g = f.require_array(k, shape=elem.shape, dtype=elem.dtype, **dataset_kwargs)
514
- da.store(elem, g, lock=GLOBAL_LOCK)
515
-
516
-
517
- # Adding this separately because h5py isn't serializable
518
- # https://github.com/pydata/xarray/issues/4242
519
498
  @_REGISTRY.register_write(H5Group, views.DaskArrayView, IOSpec("array", "0.2.0"))
520
499
  @_REGISTRY.register_write(H5Group, DaskArray, IOSpec("array", "0.2.0"))
521
- def write_basic_dask_h5(
522
- f: H5Group,
500
+ def write_basic_dask_dask_dense(
501
+ f: ZarrGroup | H5Group,
523
502
  k: str,
524
503
  elem: DaskArray,
525
504
  *,
@@ -529,11 +508,23 @@ def write_basic_dask_h5(
529
508
  import dask.array as da
530
509
  import dask.config as dc
531
510
 
532
- if dc.get("scheduler", None) == "dask.distributed":
511
+ is_distributed = dc.get("scheduler", None) == "dask.distributed"
512
+ is_h5 = isinstance(f, H5Group)
513
+ if is_distributed and is_h5:
533
514
  msg = "Cannot write dask arrays to hdf5 when using distributed scheduler"
534
515
  raise ValueError(msg)
535
516
 
536
- g = f.require_dataset(k, shape=elem.shape, dtype=elem.dtype, **dataset_kwargs)
517
+ dataset_kwargs = dataset_kwargs.copy()
518
+ if not is_h5:
519
+ dataset_kwargs = zarr_v3_compressor_compat(dataset_kwargs)
520
+ # See https://github.com/dask/dask/issues/12109
521
+ if Version(version("dask")) < Version("2025.4.0") and is_distributed:
522
+ msg = "Writing dense data with a distributed scheduler to zarr could produce corrupted data with a Lock and will error without one when dask is older than 2025.4.0: https://github.com/dask/dask/issues/12109"
523
+ raise RuntimeError(msg)
524
+ if is_zarr_v2() or is_h5:
525
+ g = f.require_dataset(k, shape=elem.shape, dtype=elem.dtype, **dataset_kwargs)
526
+ else:
527
+ g = f.require_array(k, shape=elem.shape, dtype=elem.dtype, **dataset_kwargs)
537
528
  da.store(elem, g)
538
529
 
539
530
 
@@ -25,9 +25,10 @@ if TYPE_CHECKING:
25
25
  from pathlib import Path
26
26
  from typing import Literal
27
27
 
28
- from anndata._core.index import Index
29
28
  from anndata.compat import ZarrGroup
30
29
 
30
+ from ...compat import Index1DNorm
31
+
31
32
 
32
33
  K = TypeVar("K", H5Array, ZarrArray)
33
34
 
@@ -199,7 +200,9 @@ class MaskedArray(XBackendArray, Generic[K]):
199
200
 
200
201
 
201
202
  @_subset.register(XDataArray)
202
- def _subset_masked(a: XDataArray, subset_idx: Index):
203
+ def _subset_masked(
204
+ a: XDataArray, subset_idx: tuple[Index1DNorm] | tuple[Index1DNorm, Index1DNorm]
205
+ ):
203
206
  return a[subset_idx]
204
207
 
205
208
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: anndata
3
- Version: 0.12.3
3
+ Version: 0.12.4
4
4
  Summary: Annotated data.
5
5
  Project-URL: Documentation, https://anndata.readthedocs.io/
6
6
  Project-URL: Source, https://github.com/scverse/anndata
@@ -11,15 +11,15 @@ anndata/typing.py,sha256=sRiAg16asjnKyXk1L4BtKWggyHMPLoxXzxTDmX3i7MY,1555
11
11
  anndata/utils.py,sha256=D4t_iQdTPeHukN4H7no0QZTIzWzclMYsWAHSBpubvCg,14758
12
12
  anndata/_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  anndata/_core/access.py,sha256=pts7fGUKgGZANSsu_qAA7L10qHM-jT1zIehbl3441OY,873
14
- anndata/_core/aligned_df.py,sha256=MrGdi1zNZZlkkv4IeS2yY-R5ldXpchTlMlJK7PKRa7A,3833
14
+ anndata/_core/aligned_df.py,sha256=EC01OveJ0tS5bQQHc_OprYSPprl-YtJQK-kIOY_4SX0,4214
15
15
  anndata/_core/aligned_mapping.py,sha256=BYU1jslMWIhtFTtUMaXY8ZCyt0J4_ZsJTmj6J2yAXTQ,14257
16
16
  anndata/_core/anndata.py,sha256=e9ISy2CI7QaG6mievs8Aw33sho8ZN1CMgqPQZ-ZfbSw,78782
17
17
  anndata/_core/extensions.py,sha256=9Rsho6qnr3PJHULrYGiZHCBinBZYJK6zyf3cFsl_gBY,10425
18
18
  anndata/_core/file_backing.py,sha256=kT71R_kZp_CiHImBK8IaZXsvYVtbX2Mg-7L2ldAWojM,5113
19
- anndata/_core/index.py,sha256=6oED8kjTFKXnZSJXbkGFwscRtqV346h05Dx_Spd68WY,9298
20
- anndata/_core/merge.py,sha256=q9eDTQNdqvEJhnkft2UP9gXrmYLQf6CzWgj4YXHIn-s,60242
19
+ anndata/_core/index.py,sha256=F3TQBUbWpt09Pb4MpwB7xfCI9uPuv7jrqx8X74CwVDU,13472
20
+ anndata/_core/merge.py,sha256=HVVLWEqk3PdU_U8UoOyKJaIp0ZQIfWy0cWM2iac4_H8,60366
21
21
  anndata/_core/raw.py,sha256=x_PwwaDQscVQOFJ38kF7sNQ47LxowpS38h2RQfU5Zwo,7925
22
- anndata/_core/sparse_dataset.py,sha256=9ZVzH7cPdom-bsWEYgbqQvCucc3erhEIeqK2Z_wzDwA,26826
22
+ anndata/_core/sparse_dataset.py,sha256=mE-PRX4znkDyuum3BBBv7MJwyn4XL9C3nIQNRjZJ94w,26877
23
23
  anndata/_core/storage.py,sha256=mHzqp7YBJ-rGQFulMAx__D-Z7y4omHPyb1cP7YxfbFE,2555
24
24
  anndata/_core/views.py,sha256=DIJgnqPvh07wbLousjZbGBsMC55oyBsMbSeybQC5sIY,15019
25
25
  anndata/_core/xarray.py,sha256=JeQjTuSQEiZF8cryKDYf9d7yt-ufQEVo9x94YaczuPQ,16078
@@ -31,7 +31,7 @@ anndata/_io/write.py,sha256=r55w6yPIIuUSLW9wyYL8GnkzHHQdAxy6xiCEw9cAC38,4811
31
31
  anndata/_io/zarr.py,sha256=Z996SZ8LV1Fpa_q8o70vHnBzNLOLlVjhf_Rs5EM_Slo,5461
32
32
  anndata/_io/specs/__init__.py,sha256=Z6l8xqa7B480U3pqrNIg4-fhUvpBW85w4xA3i3maAUM,427
33
33
  anndata/_io/specs/lazy_methods.py,sha256=k-s-YwOtwXpdjsyrM0IAsGSadPFxDsVDA3d_Nbpb7Ak,12261
34
- anndata/_io/specs/methods.py,sha256=rAxRyMphd7DOI7gf2jpUloRFFYA5XgBxQ4mvX4Wcsk0,46171
34
+ anndata/_io/specs/methods.py,sha256=T7926LkdvUJzvTAneRbDxM8YVIZNGz9GKgccEcsBumU,46264
35
35
  anndata/_io/specs/registry.py,sha256=6Z_ffk3uOIagzRPcDCvEoszcgD-U3n8wYnGiPA71ZeI,17539
36
36
  anndata/compat/__init__.py,sha256=lsLHB7je0SHSePi9noY3p7kRbOAHhZzmMT1hs_ZSXys,12702
37
37
  anndata/experimental/__init__.py,sha256=polIxriEkby0iEqw-IXkUzp8k0wp92BpYY4zl4BsHH0,1648
@@ -40,7 +40,7 @@ anndata/experimental/merge.py,sha256=pl4MtDs_M76cTEqrJ_YJ8zyB6ID7QGzjntlAL7vp_qk
40
40
  anndata/experimental/backed/__init__.py,sha256=4dc9M_-_SlfUidDrbWt8PRyD_8bYjypHJ86IpdThHus,230
41
41
  anndata/experimental/backed/_compat.py,sha256=rM7CnSJEZCko5wPBFRfvZA9ZKUSpaOVcWFy5u09p1go,519
42
42
  anndata/experimental/backed/_io.py,sha256=7bFzn3h8ut49NzppUvsqAX1gjXxAVCFK55Ln0XWzZdY,5965
43
- anndata/experimental/backed/_lazy_arrays.py,sha256=FR-ZPCOhjPaha278KkhMuYYPYx4T_5IOxtjX3XYjDC4,7424
43
+ anndata/experimental/backed/_lazy_arrays.py,sha256=9uuEgP4p5oT49qhkzWHxC8eNFJFE1gRlYwAjMKFRejI,7474
44
44
  anndata/experimental/multi_files/__init__.py,sha256=T7iNLlRbe-KnLT3o7Tb7_nE4Iy_hLkG66UjBOvj2Bj8,107
45
45
  anndata/experimental/multi_files/_anncollection.py,sha256=d_d-v8X2WJTGNjAJoo2Mdykd-woSTM_oXEf2PUIqS6A,35254
46
46
  anndata/experimental/pytorch/__init__.py,sha256=4CkgrahLO8Kc-s2bmv6lVQfDxbO3IUyV0v4ygBDkttY,95
@@ -51,7 +51,7 @@ testing/anndata/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
51
  testing/anndata/_doctest.py,sha256=Qew0N0zLLNiPKN1CLunqY5cTinFLaEhY5GagiYfm6KI,344
52
52
  testing/anndata/_pytest.py,sha256=cg4oWbtH9J1sRNul0n2oOraU1h7cprugr27EUPGDaN0,3997
53
53
  testing/anndata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
- anndata-0.12.3.dist-info/METADATA,sha256=6MqCFe54oaFQd9YKcXv8ctz2irw1mLOhFBnq9YgDA3s,9937
55
- anndata-0.12.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
56
- anndata-0.12.3.dist-info/licenses/LICENSE,sha256=VcrXoEVMhtNuvMvKYGP-I5lMT8qZ_6dFf22fsL180qA,1575
57
- anndata-0.12.3.dist-info/RECORD,,
54
+ anndata-0.12.4.dist-info/METADATA,sha256=8SqT0BaH42j9XfqXxmF0_Z6GCk0pd6lN7h1Rn3V3FTg,9937
55
+ anndata-0.12.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
56
+ anndata-0.12.4.dist-info/licenses/LICENSE,sha256=VcrXoEVMhtNuvMvKYGP-I5lMT8qZ_6dFf22fsL180qA,1575
57
+ anndata-0.12.4.dist-info/RECORD,,