anndata 0.12.1__tar.gz → 0.12.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (207) hide show
  1. {anndata-0.12.1 → anndata-0.12.2}/.github/workflows/test-cpu.yml +5 -2
  2. {anndata-0.12.1 → anndata-0.12.2}/PKG-INFO +1 -1
  3. anndata-0.12.2/docs/release-notes/0.12.2.md +6 -0
  4. {anndata-0.12.1 → anndata-0.12.2}/docs/tutorials/zarr-v3.md +2 -2
  5. {anndata-0.12.1 → anndata-0.12.2}/hatch.toml +1 -0
  6. {anndata-0.12.1 → anndata-0.12.2}/pyproject.toml +3 -0
  7. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/anndata.py +22 -5
  8. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/index.py +6 -13
  9. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/raw.py +5 -3
  10. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/views.py +20 -14
  11. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/xarray.py +13 -12
  12. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_io/h5ad.py +47 -22
  13. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_io/read.py +17 -6
  14. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_io/specs/methods.py +5 -5
  15. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_io/zarr.py +0 -6
  16. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_settings.py +1 -1
  17. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/compat/__init__.py +29 -4
  18. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/tests/helpers.py +44 -26
  19. {anndata-0.12.1 → anndata-0.12.2}/tests/conftest.py +2 -1
  20. anndata-0.12.2/tests/data/archives/v0.11.4/adata.h5ad +0 -0
  21. anndata-0.12.2/tests/data/archives/v0.11.4/adata.zarr.zip +0 -0
  22. anndata-0.12.2/tests/data/archives/v0.11.4/readme.md +10 -0
  23. {anndata-0.12.1 → anndata-0.12.2}/tests/test_backed_hdf5.py +1 -1
  24. {anndata-0.12.1 → anndata-0.12.2}/tests/test_backed_sparse.py +9 -5
  25. {anndata-0.12.1 → anndata-0.12.2}/tests/test_base.py +1 -1
  26. anndata-0.12.2/tests/test_io_backwards_compat.py +77 -0
  27. {anndata-0.12.1 → anndata-0.12.2}/tests/test_io_elementwise.py +0 -12
  28. {anndata-0.12.1 → anndata-0.12.2}/tests/test_io_partial.py +0 -6
  29. {anndata-0.12.1 → anndata-0.12.2}/tests/test_readwrite.py +45 -10
  30. {anndata-0.12.1 → anndata-0.12.2}/tests/test_structured_arrays.py +10 -1
  31. {anndata-0.12.1 → anndata-0.12.2}/tests/test_views.py +33 -15
  32. anndata-0.12.1/tests/test_io_backwards_compat.py +0 -54
  33. {anndata-0.12.1 → anndata-0.12.2}/.cirun.yml +0 -0
  34. {anndata-0.12.1 → anndata-0.12.2}/.codecov.yml +0 -0
  35. {anndata-0.12.1 → anndata-0.12.2}/.editorconfig +0 -0
  36. {anndata-0.12.1 → anndata-0.12.2}/.github/ISSUE_TEMPLATE/bug-report.yml +0 -0
  37. {anndata-0.12.1 → anndata-0.12.2}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  38. {anndata-0.12.1 → anndata-0.12.2}/.github/ISSUE_TEMPLATE/enhancement-request.yml +0 -0
  39. {anndata-0.12.1 → anndata-0.12.2}/.github/ISSUE_TEMPLATE/question.yml +0 -0
  40. {anndata-0.12.1 → anndata-0.12.2}/.github/PULL_REQUEST_TEMPLATE.md +0 -0
  41. {anndata-0.12.1 → anndata-0.12.2}/.github/workflows/benchmark.yml +0 -0
  42. {anndata-0.12.1 → anndata-0.12.2}/.github/workflows/check-pr-milestoned.yml +0 -0
  43. {anndata-0.12.1 → anndata-0.12.2}/.github/workflows/close-stale.yml +0 -0
  44. {anndata-0.12.1 → anndata-0.12.2}/.github/workflows/codespell.yml +0 -0
  45. {anndata-0.12.1 → anndata-0.12.2}/.github/workflows/label-stale.yml +0 -0
  46. {anndata-0.12.1 → anndata-0.12.2}/.github/workflows/publish.yml +0 -0
  47. {anndata-0.12.1 → anndata-0.12.2}/.github/workflows/test-gpu.yml +0 -0
  48. {anndata-0.12.1 → anndata-0.12.2}/.gitignore +0 -0
  49. {anndata-0.12.1 → anndata-0.12.2}/.gitmodules +0 -0
  50. {anndata-0.12.1 → anndata-0.12.2}/.pre-commit-config.yaml +0 -0
  51. {anndata-0.12.1 → anndata-0.12.2}/.prettierignore +0 -0
  52. {anndata-0.12.1 → anndata-0.12.2}/.prettierrc.yaml +0 -0
  53. {anndata-0.12.1 → anndata-0.12.2}/.readthedocs.yml +0 -0
  54. {anndata-0.12.1 → anndata-0.12.2}/.taplo.toml +0 -0
  55. {anndata-0.12.1 → anndata-0.12.2}/.vscode/launch.json +0 -0
  56. {anndata-0.12.1 → anndata-0.12.2}/.vscode/settings.json +0 -0
  57. {anndata-0.12.1 → anndata-0.12.2}/LICENSE +0 -0
  58. {anndata-0.12.1 → anndata-0.12.2}/README.md +0 -0
  59. {anndata-0.12.1 → anndata-0.12.2}/benchmarks/README.md +0 -0
  60. {anndata-0.12.1 → anndata-0.12.2}/benchmarks/asv.conf.json +0 -0
  61. {anndata-0.12.1 → anndata-0.12.2}/benchmarks/benchmarks/__init__.py +0 -0
  62. {anndata-0.12.1 → anndata-0.12.2}/benchmarks/benchmarks/anndata.py +0 -0
  63. {anndata-0.12.1 → anndata-0.12.2}/benchmarks/benchmarks/dataset2d.py +0 -0
  64. {anndata-0.12.1 → anndata-0.12.2}/benchmarks/benchmarks/readwrite.py +0 -0
  65. {anndata-0.12.1 → anndata-0.12.2}/benchmarks/benchmarks/sparse_dataset.py +0 -0
  66. {anndata-0.12.1 → anndata-0.12.2}/benchmarks/benchmarks/utils.py +0 -0
  67. {anndata-0.12.1 → anndata-0.12.2}/biome.jsonc +0 -0
  68. {anndata-0.12.1 → anndata-0.12.2}/ci/constraints.txt +0 -0
  69. {anndata-0.12.1 → anndata-0.12.2}/ci/scripts/min-deps.py +0 -0
  70. {anndata-0.12.1 → anndata-0.12.2}/ci/scripts/towncrier_automation.py +0 -0
  71. {anndata-0.12.1 → anndata-0.12.2}/docs/Makefile +0 -0
  72. {anndata-0.12.1 → anndata-0.12.2}/docs/_key_contributors.rst +0 -0
  73. {anndata-0.12.1 → anndata-0.12.2}/docs/_static/img/anndata_schema.svg +0 -0
  74. {anndata-0.12.1 → anndata-0.12.2}/docs/_templates/autosummary/class.rst +0 -0
  75. {anndata-0.12.1 → anndata-0.12.2}/docs/api.md +0 -0
  76. {anndata-0.12.1 → anndata-0.12.2}/docs/benchmark-read-write.ipynb +0 -0
  77. {anndata-0.12.1 → anndata-0.12.2}/docs/benchmarks.md +0 -0
  78. {anndata-0.12.1 → anndata-0.12.2}/docs/concatenation.rst +0 -0
  79. {anndata-0.12.1 → anndata-0.12.2}/docs/conf.py +0 -0
  80. {anndata-0.12.1 → anndata-0.12.2}/docs/contributing.md +0 -0
  81. {anndata-0.12.1 → anndata-0.12.2}/docs/extensions/autosummary_skip_inherited.py +0 -0
  82. {anndata-0.12.1 → anndata-0.12.2}/docs/extensions/no_skip_abc_members.py +0 -0
  83. {anndata-0.12.1 → anndata-0.12.2}/docs/extensions/patch_myst_cite.py +0 -0
  84. {anndata-0.12.1 → anndata-0.12.2}/docs/fileformat-prose.md +0 -0
  85. {anndata-0.12.1 → anndata-0.12.2}/docs/index.md +0 -0
  86. {anndata-0.12.1 → anndata-0.12.2}/docs/interoperability.md +0 -0
  87. {anndata-0.12.1 → anndata-0.12.2}/docs/news.md +0 -0
  88. {anndata-0.12.1 → anndata-0.12.2}/docs/references.rst +0 -0
  89. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.10.0.md +0 -0
  90. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.10.1.md +0 -0
  91. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.10.2.md +0 -0
  92. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.10.3.md +0 -0
  93. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.10.4.md +0 -0
  94. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.10.5.md +0 -0
  95. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.10.6.md +0 -0
  96. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.10.7.md +0 -0
  97. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.10.8.md +0 -0
  98. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.10.9.md +0 -0
  99. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.11.0.md +0 -0
  100. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.11.1.md +0 -0
  101. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.11.2.md +0 -0
  102. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.11.3.md +0 -0
  103. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.11.4.md +0 -0
  104. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.12.0.md +0 -0
  105. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.12.1.md +0 -0
  106. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.4.0.md +0 -0
  107. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.5.0.md +0 -0
  108. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.6.0.md +0 -0
  109. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.6.x.md +0 -0
  110. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.7.0.md +0 -0
  111. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.7.2.md +0 -0
  112. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.7.3.md +0 -0
  113. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.7.4.md +0 -0
  114. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.7.5.md +0 -0
  115. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.7.6.md +0 -0
  116. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.7.7.md +0 -0
  117. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.7.8.md +0 -0
  118. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.8.0.md +0 -0
  119. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.9.0.md +0 -0
  120. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.9.1.md +0 -0
  121. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/0.9.2.md +0 -0
  122. {anndata-0.12.1 → anndata-0.12.2}/docs/release-notes/index.md +0 -0
  123. {anndata-0.12.1 → anndata-0.12.2}/docs/tutorials/index.md +0 -0
  124. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/__init__.py +0 -0
  125. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/__init__.py +0 -0
  126. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/access.py +0 -0
  127. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/aligned_df.py +0 -0
  128. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/aligned_mapping.py +0 -0
  129. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/extensions.py +0 -0
  130. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/file_backing.py +0 -0
  131. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/merge.py +0 -0
  132. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/sparse_dataset.py +0 -0
  133. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_core/storage.py +0 -0
  134. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_io/__init__.py +0 -0
  135. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_io/specs/__init__.py +0 -0
  136. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_io/specs/lazy_methods.py +0 -0
  137. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_io/specs/registry.py +0 -0
  138. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_io/utils.py +0 -0
  139. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_io/write.py +0 -0
  140. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_settings.pyi +0 -0
  141. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_types.py +0 -0
  142. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_version.py +0 -0
  143. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/_warnings.py +0 -0
  144. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/abc.py +0 -0
  145. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/experimental/__init__.py +0 -0
  146. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/experimental/_dispatch_io.py +0 -0
  147. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/experimental/backed/__init__.py +0 -0
  148. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/experimental/backed/_compat.py +0 -0
  149. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/experimental/backed/_io.py +0 -0
  150. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/experimental/backed/_lazy_arrays.py +0 -0
  151. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/experimental/merge.py +0 -0
  152. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/experimental/multi_files/__init__.py +0 -0
  153. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/experimental/multi_files/_anncollection.py +0 -0
  154. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/experimental/pytorch/__init__.py +0 -0
  155. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/experimental/pytorch/_annloader.py +0 -0
  156. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/io.py +0 -0
  157. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/logging.py +0 -0
  158. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/tests/__init__.py +0 -0
  159. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/types.py +0 -0
  160. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/typing.py +0 -0
  161. {anndata-0.12.1 → anndata-0.12.2}/src/anndata/utils.py +0 -0
  162. {anndata-0.12.1 → anndata-0.12.2}/src/testing/anndata/__init__.py +0 -0
  163. {anndata-0.12.1 → anndata-0.12.2}/src/testing/anndata/_doctest.py +0 -0
  164. {anndata-0.12.1 → anndata-0.12.2}/src/testing/anndata/_pytest.py +0 -0
  165. {anndata-0.12.1 → anndata-0.12.2}/src/testing/anndata/py.typed +0 -0
  166. {anndata-0.12.1 → anndata-0.12.2}/tests/data/adata-comments.tsv +0 -0
  167. {anndata-0.12.1 → anndata-0.12.2}/tests/data/adata.csv +0 -0
  168. {anndata-0.12.1 → anndata-0.12.2}/tests/data/archives/readme.md +0 -0
  169. {anndata-0.12.1 → anndata-0.12.2}/tests/data/archives/v0.7.0/adata.h5ad +0 -0
  170. {anndata-0.12.1 → anndata-0.12.2}/tests/data/archives/v0.7.0/adata.zarr.zip +0 -0
  171. {anndata-0.12.1 → anndata-0.12.2}/tests/data/archives/v0.7.8/adata.h5ad +0 -0
  172. {anndata-0.12.1 → anndata-0.12.2}/tests/data/archives/v0.7.8/adata.zarr.zip +0 -0
  173. {anndata-0.12.1 → anndata-0.12.2}/tests/data/excel.xlsx +0 -0
  174. {anndata-0.12.1 → anndata-0.12.2}/tests/data/umi_tools.tsv.gz +0 -0
  175. {anndata-0.12.1 → anndata-0.12.2}/tests/lazy/conftest.py +0 -0
  176. {anndata-0.12.1 → anndata-0.12.2}/tests/lazy/test_concat.py +0 -0
  177. {anndata-0.12.1 → anndata-0.12.2}/tests/lazy/test_read.py +0 -0
  178. {anndata-0.12.1 → anndata-0.12.2}/tests/lazy/test_write.py +0 -0
  179. {anndata-0.12.1 → anndata-0.12.2}/tests/test_anncollection.py +0 -0
  180. {anndata-0.12.1 → anndata-0.12.2}/tests/test_annot.py +0 -0
  181. {anndata-0.12.1 → anndata-0.12.2}/tests/test_awkward.py +0 -0
  182. {anndata-0.12.1 → anndata-0.12.2}/tests/test_backed_dense.py +0 -0
  183. {anndata-0.12.1 → anndata-0.12.2}/tests/test_concatenate.py +0 -0
  184. {anndata-0.12.1 → anndata-0.12.2}/tests/test_concatenate_disk.py +0 -0
  185. {anndata-0.12.1 → anndata-0.12.2}/tests/test_dask.py +0 -0
  186. {anndata-0.12.1 → anndata-0.12.2}/tests/test_dask_view_mem.py +0 -0
  187. {anndata-0.12.1 → anndata-0.12.2}/tests/test_deprecations.py +0 -0
  188. {anndata-0.12.1 → anndata-0.12.2}/tests/test_extensions.py +0 -0
  189. {anndata-0.12.1 → anndata-0.12.2}/tests/test_get_vector.py +0 -0
  190. {anndata-0.12.1 → anndata-0.12.2}/tests/test_gpu.py +0 -0
  191. {anndata-0.12.1 → anndata-0.12.2}/tests/test_helpers.py +0 -0
  192. {anndata-0.12.1 → anndata-0.12.2}/tests/test_inplace_subset.py +0 -0
  193. {anndata-0.12.1 → anndata-0.12.2}/tests/test_io_conversion.py +0 -0
  194. {anndata-0.12.1 → anndata-0.12.2}/tests/test_io_dispatched.py +0 -0
  195. {anndata-0.12.1 → anndata-0.12.2}/tests/test_io_utils.py +0 -0
  196. {anndata-0.12.1 → anndata-0.12.2}/tests/test_io_warnings.py +0 -0
  197. {anndata-0.12.1 → anndata-0.12.2}/tests/test_layers.py +0 -0
  198. {anndata-0.12.1 → anndata-0.12.2}/tests/test_obsmvarm.py +0 -0
  199. {anndata-0.12.1 → anndata-0.12.2}/tests/test_obspvarp.py +0 -0
  200. {anndata-0.12.1 → anndata-0.12.2}/tests/test_raw.py +0 -0
  201. {anndata-0.12.1 → anndata-0.12.2}/tests/test_repr.py +0 -0
  202. {anndata-0.12.1 → anndata-0.12.2}/tests/test_settings.py +0 -0
  203. {anndata-0.12.1 → anndata-0.12.2}/tests/test_transpose.py +0 -0
  204. {anndata-0.12.1 → anndata-0.12.2}/tests/test_uns.py +0 -0
  205. {anndata-0.12.1 → anndata-0.12.2}/tests/test_utils.py +0 -0
  206. {anndata-0.12.1 → anndata-0.12.2}/tests/test_x.py +0 -0
  207. {anndata-0.12.1 → anndata-0.12.2}/tests/test_xarray.py +0 -0
@@ -38,7 +38,7 @@ jobs:
38
38
  ENVS_JSON=$(NO_COLOR=1 uvx hatch env show --json | jq -c 'to_entries
39
39
  | map(
40
40
  select(.key | startswith("hatch-test"))
41
- | { name: .key, python: .value.python }
41
+ | { name: .key, python: .value.python, args: (.value."extra-args" // [] | join(" ")) }
42
42
  )')
43
43
  echo "envs=${ENVS_JSON}" | tee $GITHUB_OUTPUT
44
44
  test:
@@ -56,6 +56,9 @@ jobs:
56
56
  fetch-depth: 0
57
57
  filter: blob:none
58
58
 
59
+ - name: Install system dependencies
60
+ run: sudo apt install -y hdf5-tools
61
+
59
62
  - name: Set up Python ${{ matrix.env.python }}
60
63
  uses: actions/setup-python@v5
61
64
  with:
@@ -71,7 +74,7 @@ jobs:
71
74
  run: uvx hatch -v env create ${{ matrix.env.name }}
72
75
 
73
76
  - name: Run tests
74
- run: uvx hatch run ${{ matrix.env.name }}:run-cov -v --color=yes -n auto --cov --cov-report=xml --junitxml=test-data/test-results.xml -m "${{matrix.io_mark}}"
77
+ run: uvx hatch run ${{ matrix.env.name }}:run-cov -v --color=yes -n auto --cov --cov-report=xml --junitxml=test-data/test-results.xml -m "${{ matrix.io_mark }}" ${{ matrix.env.args }}
75
78
 
76
79
  - name: Upload coverage data
77
80
  uses: codecov/codecov-action@v5
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: anndata
3
- Version: 0.12.1
3
+ Version: 0.12.2
4
4
  Summary: Annotated data.
5
5
  Project-URL: Documentation, https://anndata.readthedocs.io/
6
6
  Project-URL: Source, https://github.com/scverse/anndata
@@ -0,0 +1,6 @@
1
+ (v0.12.2)=
2
+ ### 0.12.2 {small}`2025-08-11`
3
+
4
+ ### Bug fixes
5
+
6
+ - Revert accidental change where {attr}`~anndata.AnnData.X` got written to disk when it was `None` {user}`flying-sheep` ({pr}`2054`)
@@ -1,6 +1,6 @@
1
1
  # zarr-v3 Guide/Roadmap
2
2
 
3
- `anndata` now uses the much improved {mod}`zarr` v3 package and also allows writing of datasets in the v3 format via {attr}`anndata.settings.zarr_write_format`, with the exception of structured arrays.
3
+ `anndata` now uses the much improved {mod}`zarr` v3 package and also allows writing of datasets in the v3 format via {attr}`anndata.settings.zarr_write_format` via {func}`anndata.io.write_zarr` or {meth}`anndata.AnnData.write_zarr`, with the exception of structured arrays.
4
4
  Users should notice a significant performance improvement, especially for cloud data, but also likely for local data as well.
5
5
  Here is a quick guide on some of our learnings so far:
6
6
 
@@ -48,7 +48,7 @@ import anndata as ad
48
48
  from collections.abc import Mapping
49
49
  from typing import Any
50
50
 
51
- ad.settings.zarr_write_format = 3 # Absolutely crucial! Sharding is only for the v3 file format!
51
+ g = zarr.open_group(orig_path, mode="a", use_consolidated=False, zarr_version=3) # zarr_version 3 is default but note that sharding only works with v3!
52
52
 
53
53
  def write_sharded(group: zarr.Group, adata: ad.AnnData):
54
54
  def callback(
@@ -36,6 +36,7 @@ overrides.matrix.deps.python = [
36
36
  overrides.matrix.deps.features = [
37
37
  { if = [ "stable", "pre" ], value = "test" },
38
38
  ]
39
+ overrides.matrix.deps.extra-args = { if = [ "stable", "pre" ], value = [ "--strict-warnings" ] }
39
40
 
40
41
  [[envs.hatch-test.matrix]]
41
42
  deps = [ "stable", "pre", "min" ]
@@ -146,6 +146,7 @@ addopts = [
146
146
  filterwarnings = [
147
147
  "ignore::anndata._warnings.OldFormatWarning",
148
148
  "ignore::anndata._warnings.ExperimentalFeatureWarning",
149
+ "ignore:.*first_column_names:FutureWarning:scanpy", # scanpy 1.10.x
149
150
  ]
150
151
  # When `--strict-warnings` is used, all warnings are treated as errors, except those:
151
152
  filterwarnings_when_strict = [
@@ -158,6 +159,8 @@ filterwarnings_when_strict = [
158
159
  "default:The codec `vlen-utf8:UserWarning",
159
160
  "default:The dtype `StringDType():UserWarning",
160
161
  "default:Consolidated metadata is:UserWarning",
162
+ "default:.*Structured:zarr.core.dtype.common.UnstableSpecificationWarning",
163
+ "default:.*FixedLengthUTF32:zarr.core.dtype.common.UnstableSpecificationWarning",
161
164
  ]
162
165
  python_files = "test_*.py"
163
166
  testpaths = [
@@ -56,7 +56,7 @@ if TYPE_CHECKING:
56
56
 
57
57
  from zarr.storage import StoreLike
58
58
 
59
- from ..compat import Index1D, XDataset
59
+ from ..compat import Index1D, Index1DNorm, XDataset
60
60
  from ..typing import XDataType
61
61
  from .aligned_mapping import AxisArraysView, LayersView, PairwiseArraysView
62
62
  from .index import Index
@@ -197,6 +197,11 @@ class AnnData(metaclass=utils.DeprecationMixinMeta): # noqa: PLW1641
197
197
 
198
198
  _accessors: ClassVar[set[str]] = set()
199
199
 
200
+ # view attributes
201
+ _adata_ref: AnnData | None
202
+ _oidx: Index1DNorm | None
203
+ _vidx: Index1DNorm | None
204
+
200
205
  @old_positionals(
201
206
  "obsm",
202
207
  "varm",
@@ -226,8 +231,8 @@ class AnnData(metaclass=utils.DeprecationMixinMeta): # noqa: PLW1641
226
231
  asview: bool = False,
227
232
  obsp: np.ndarray | Mapping[str, Sequence[Any]] | None = None,
228
233
  varp: np.ndarray | Mapping[str, Sequence[Any]] | None = None,
229
- oidx: Index1D | None = None,
230
- vidx: Index1D | None = None,
234
+ oidx: Index1DNorm | int | np.integer | None = None,
235
+ vidx: Index1DNorm | int | np.integer | None = None,
231
236
  ):
232
237
  # check for any multi-indices that aren’t later checked in coerce_array
233
238
  for attr, key in [(obs, "obs"), (var, "var"), (X, "X")]:
@@ -237,6 +242,8 @@ class AnnData(metaclass=utils.DeprecationMixinMeta): # noqa: PLW1641
237
242
  if not isinstance(X, AnnData):
238
243
  msg = "`X` has to be an AnnData object."
239
244
  raise ValueError(msg)
245
+ assert oidx is not None
246
+ assert vidx is not None
240
247
  self._init_as_view(X, oidx, vidx)
241
248
  else:
242
249
  self._init_as_actual(
@@ -256,7 +263,12 @@ class AnnData(metaclass=utils.DeprecationMixinMeta): # noqa: PLW1641
256
263
  filemode=filemode,
257
264
  )
258
265
 
259
- def _init_as_view(self, adata_ref: AnnData, oidx: Index, vidx: Index):
266
+ def _init_as_view(
267
+ self,
268
+ adata_ref: AnnData,
269
+ oidx: Index1DNorm | int | np.integer,
270
+ vidx: Index1DNorm | int | np.integer,
271
+ ):
260
272
  if adata_ref.isbacked and adata_ref.is_view:
261
273
  msg = (
262
274
  "Currently, you cannot index repeatedly into a backed AnnData, "
@@ -277,6 +289,9 @@ class AnnData(metaclass=utils.DeprecationMixinMeta): # noqa: PLW1641
277
289
  vidx += adata_ref.n_vars * (vidx < 0)
278
290
  vidx = slice(vidx, vidx + 1, 1)
279
291
  if adata_ref.is_view:
292
+ assert adata_ref._adata_ref is not None
293
+ assert adata_ref._oidx is not None
294
+ assert adata_ref._vidx is not None
280
295
  prev_oidx, prev_vidx = adata_ref._oidx, adata_ref._vidx
281
296
  adata_ref = adata_ref._adata_ref
282
297
  oidx, vidx = _resolve_idxs((prev_oidx, prev_vidx), (oidx, vidx), adata_ref)
@@ -1004,7 +1019,9 @@ class AnnData(metaclass=utils.DeprecationMixinMeta): # noqa: PLW1641
1004
1019
 
1005
1020
  write_attribute(self.file._file, attr, value)
1006
1021
 
1007
- def _normalize_indices(self, index: Index | None) -> tuple[slice, slice]:
1022
+ def _normalize_indices(
1023
+ self, index: Index | None
1024
+ ) -> tuple[Index1DNorm | int | np.integer, Index1DNorm | int | np.integer]:
1008
1025
  return _normalize_indices(index, self.obs_names, self.var_names)
1009
1026
 
1010
1027
  # TODO: this is not quite complete...
@@ -14,18 +14,18 @@ from ..compat import AwkArray, CSArray, CSMatrix, DaskArray, XDataArray
14
14
  from .xarray import Dataset2D
15
15
 
16
16
  if TYPE_CHECKING:
17
- from ..compat import Index, Index1D
17
+ from ..compat import Index, Index1D, Index1DNorm
18
18
 
19
19
 
20
20
  def _normalize_indices(
21
21
  index: Index | None, names0: pd.Index, names1: pd.Index
22
- ) -> tuple[slice, slice]:
22
+ ) -> tuple[Index1DNorm | int | np.integer, Index1DNorm | int | np.integer]:
23
23
  # deal with tuples of length 1
24
24
  if isinstance(index, tuple) and len(index) == 1:
25
25
  index = index[0]
26
26
  # deal with pd.Series
27
27
  if isinstance(index, pd.Series):
28
- index: Index = index.values
28
+ index = index.values
29
29
  if isinstance(index, tuple):
30
30
  # TODO: The series should probably be aligned first
31
31
  index = tuple(i.values if isinstance(i, pd.Series) else i for i in index)
@@ -36,15 +36,8 @@ def _normalize_indices(
36
36
 
37
37
 
38
38
  def _normalize_index( # noqa: PLR0911, PLR0912
39
- indexer: slice
40
- | np.integer
41
- | int
42
- | str
43
- | Sequence[bool | int | np.integer]
44
- | np.ndarray
45
- | pd.Index,
46
- index: pd.Index,
47
- ) -> slice | int | np.ndarray: # ndarray of int or bool
39
+ indexer: Index1D, index: pd.Index
40
+ ) -> Index1DNorm | int | np.integer:
48
41
  # TODO: why is this here? All tests pass without it and it seems at the minimum not strict enough.
49
42
  if not isinstance(index, pd.RangeIndex) and index.dtype in (np.float64, np.int64):
50
43
  msg = f"Don’t call _normalize_index with non-categorical/string names and non-range index {index}"
@@ -212,7 +205,7 @@ def _subset_awkarray(a: AwkArray, subset_idx: Index):
212
205
 
213
206
  # Registration for SparseDataset occurs in sparse_dataset.py
214
207
  @_subset.register(h5py.Dataset)
215
- def _subset_dataset(d, subset_idx):
208
+ def _subset_dataset(d: h5py.Dataset, subset_idx: Index):
216
209
  if not isinstance(subset_idx, tuple):
217
210
  subset_idx = (subset_idx,)
218
211
  ordered = list(subset_idx)
@@ -17,7 +17,7 @@ if TYPE_CHECKING:
17
17
  from collections.abc import Mapping, Sequence
18
18
  from typing import ClassVar
19
19
 
20
- from ..compat import CSMatrix
20
+ from ..compat import CSMatrix, Index, Index1DNorm
21
21
  from .aligned_mapping import AxisArraysView
22
22
  from .anndata import AnnData
23
23
  from .sparse_dataset import BaseCompressedSparseDataset
@@ -121,7 +121,7 @@ class Raw:
121
121
  def obs_names(self) -> pd.Index[str]:
122
122
  return self._adata.obs_names
123
123
 
124
- def __getitem__(self, index):
124
+ def __getitem__(self, index: Index) -> Raw:
125
125
  oidx, vidx = self._normalize_indices(index)
126
126
 
127
127
  # To preserve two dimensional shape
@@ -169,7 +169,9 @@ class Raw:
169
169
  uns=self._adata.uns.copy(),
170
170
  )
171
171
 
172
- def _normalize_indices(self, packed_index):
172
+ def _normalize_indices(
173
+ self, packed_index: Index
174
+ ) -> tuple[Index1DNorm | int | np.integer, Index1DNorm | int | np.integer]:
173
175
  # deal with slicing with pd.Series
174
176
  if isinstance(packed_index, pd.Series):
175
177
  packed_index = packed_index.values
@@ -29,8 +29,12 @@ if TYPE_CHECKING:
29
29
  from collections.abc import Callable, Iterable, KeysView, Sequence
30
30
  from typing import Any, ClassVar
31
31
 
32
+ from numpy.typing import NDArray
33
+
32
34
  from anndata import AnnData
33
35
 
36
+ from ..compat import Index1DNorm
37
+
34
38
 
35
39
  @contextmanager
36
40
  def view_update(adata_view: AnnData, attr_name: str, keys: tuple[str, ...]):
@@ -433,18 +437,24 @@ except ImportError:
433
437
  pass
434
438
 
435
439
 
436
- def _resolve_idxs(old, new, adata):
437
- t = tuple(_resolve_idx(old[i], new[i], adata.shape[i]) for i in (0, 1))
438
- return t
440
+ def _resolve_idxs(
441
+ old: tuple[Index1DNorm, Index1DNorm],
442
+ new: tuple[Index1DNorm, Index1DNorm],
443
+ adata: AnnData,
444
+ ) -> tuple[Index1DNorm, Index1DNorm]:
445
+ o, v = (_resolve_idx(old[i], new[i], adata.shape[i]) for i in (0, 1))
446
+ return o, v
439
447
 
440
448
 
441
449
  @singledispatch
442
- def _resolve_idx(old, new, l):
443
- return old[new]
450
+ def _resolve_idx(old: Index1DNorm, new: Index1DNorm, l: Literal[0, 1]) -> Index1DNorm:
451
+ raise NotImplementedError
444
452
 
445
453
 
446
454
  @_resolve_idx.register(np.ndarray)
447
- def _resolve_idx_ndarray(old, new, l):
455
+ def _resolve_idx_ndarray(
456
+ old: NDArray[np.bool_] | NDArray[np.integer], new: Index1DNorm, l: Literal[0, 1]
457
+ ) -> NDArray[np.bool_] | NDArray[np.integer]:
448
458
  if is_bool_dtype(old) and is_bool_dtype(new):
449
459
  mask_new = np.zeros_like(old)
450
460
  mask_new[np.flatnonzero(old)[new]] = True
@@ -454,21 +464,17 @@ def _resolve_idx_ndarray(old, new, l):
454
464
  return old[new]
455
465
 
456
466
 
457
- @_resolve_idx.register(np.integer)
458
- @_resolve_idx.register(int)
459
- def _resolve_idx_scalar(old, new, l):
460
- return np.array([old])[new]
461
-
462
-
463
467
  @_resolve_idx.register(slice)
464
- def _resolve_idx_slice(old, new, l):
468
+ def _resolve_idx_slice(
469
+ old: slice, new: Index1DNorm, l: Literal[0, 1]
470
+ ) -> slice | NDArray[np.integer]:
465
471
  if isinstance(new, slice):
466
472
  return _resolve_idx_slice_slice(old, new, l)
467
473
  else:
468
474
  return np.arange(*old.indices(l))[new]
469
475
 
470
476
 
471
- def _resolve_idx_slice_slice(old, new, l):
477
+ def _resolve_idx_slice_slice(old: slice, new: slice, l: Literal[0, 1]) -> slice:
472
478
  r = range(*old.indices(l))[new]
473
479
  # Convert back to slice
474
480
  start, stop, step = r.start, r.stop, r.step
@@ -184,18 +184,6 @@ class Dataset2D:
184
184
  Handler class for doing the iloc-style indexing using :meth:`~xarray.Dataset.isel`.
185
185
  """
186
186
 
187
- @dataclass(frozen=True)
188
- class IlocGetter:
189
- _ds: XDataset
190
- _coord: str
191
-
192
- def __getitem__(self, idx) -> Dataset2D:
193
- # xarray seems to have some code looking for a second entry in tuples,
194
- # so we unpack the tuple
195
- if isinstance(idx, tuple) and len(idx) == 1:
196
- idx = idx[0]
197
- return Dataset2D(self._ds.isel(**{self._coord: idx}))
198
-
199
187
  return IlocGetter(self.ds, self.index_dim)
200
188
 
201
189
  # See https://github.com/pydata/xarray/blob/568f3c1638d2d34373408ce2869028faa3949446/xarray/core/dataset.py#L1239-L1248
@@ -402,3 +390,16 @@ class Dataset2D:
402
390
  def _items(self):
403
391
  for col in self:
404
392
  yield col, self[col]
393
+
394
+
395
+ @dataclass(frozen=True)
396
+ class IlocGetter:
397
+ _ds: XDataset
398
+ _coord: str
399
+
400
+ def __getitem__(self, idx) -> Dataset2D:
401
+ # xarray seems to have some code looking for a second entry in tuples,
402
+ # so we unpack the tuple
403
+ if isinstance(idx, tuple) and len(idx) == 1:
404
+ idx = idx[0]
405
+ return Dataset2D(self._ds.isel(**{self._coord: idx}))
@@ -4,7 +4,7 @@ import re
4
4
  from functools import partial
5
5
  from pathlib import Path
6
6
  from types import MappingProxyType
7
- from typing import TYPE_CHECKING, TypeVar
7
+ from typing import TYPE_CHECKING, TypeVar, cast
8
8
  from warnings import warn
9
9
 
10
10
  import h5py
@@ -36,11 +36,12 @@ from .utils import (
36
36
  )
37
37
 
38
38
  if TYPE_CHECKING:
39
- from collections.abc import Callable, Collection, Mapping, Sequence
39
+ from collections.abc import Callable, Collection, Container, Mapping, Sequence
40
40
  from os import PathLike
41
41
  from typing import Any, Literal
42
42
 
43
43
  from .._core.file_backing import AnnDataFileManager
44
+ from .._core.raw import Raw
44
45
 
45
46
  T = TypeVar("T")
46
47
 
@@ -82,29 +83,18 @@ def write_h5ad(
82
83
  # TODO: Use spec writing system for this
83
84
  # Currently can't use write_dispatched here because this function is also called to do an
84
85
  # inplace update of a backed object, which would delete "/"
85
- f = f["/"]
86
+ f = cast("h5py.Group", f["/"])
86
87
  f.attrs.setdefault("encoding-type", "anndata")
87
88
  f.attrs.setdefault("encoding-version", "0.1.0")
88
89
 
89
- if "X" in as_dense and isinstance(
90
- adata.X, CSMatrix | BaseCompressedSparseDataset
91
- ):
92
- write_sparse_as_dense(f, "X", adata.X, dataset_kwargs=dataset_kwargs)
93
- elif not (adata.isbacked and Path(adata.filename) == Path(filepath)):
94
- # If adata.isbacked, X should already be up to date
95
- write_elem(f, "X", adata.X, dataset_kwargs=dataset_kwargs)
96
- if "raw/X" in as_dense and isinstance(
97
- adata.raw.X, CSMatrix | BaseCompressedSparseDataset
98
- ):
99
- write_sparse_as_dense(
100
- f, "raw/X", adata.raw.X, dataset_kwargs=dataset_kwargs
101
- )
102
- write_elem(f, "raw/var", adata.raw.var, dataset_kwargs=dataset_kwargs)
103
- write_elem(
104
- f, "raw/varm", dict(adata.raw.varm), dataset_kwargs=dataset_kwargs
105
- )
106
- elif adata.raw is not None:
107
- write_elem(f, "raw", adata.raw, dataset_kwargs=dataset_kwargs)
90
+ _write_x(
91
+ f,
92
+ adata, # accessing adata.X reopens adata.file if it’s backed
93
+ is_backed=adata.isbacked and adata.filename == filepath,
94
+ as_dense=as_dense,
95
+ dataset_kwargs=dataset_kwargs,
96
+ )
97
+ _write_raw(f, adata.raw, as_dense=as_dense, dataset_kwargs=dataset_kwargs)
108
98
  write_elem(f, "obs", adata.obs, dataset_kwargs=dataset_kwargs)
109
99
  write_elem(f, "var", adata.var, dataset_kwargs=dataset_kwargs)
110
100
  write_elem(f, "obsm", dict(adata.obsm), dataset_kwargs=dataset_kwargs)
@@ -115,6 +105,41 @@ def write_h5ad(
115
105
  write_elem(f, "uns", dict(adata.uns), dataset_kwargs=dataset_kwargs)
116
106
 
117
107
 
108
+ def _write_x(
109
+ f: h5py.Group,
110
+ adata: AnnData,
111
+ *,
112
+ is_backed: bool,
113
+ as_dense: Container[str],
114
+ dataset_kwargs: Mapping[str, Any],
115
+ ) -> None:
116
+ if "X" in as_dense and isinstance(adata.X, CSMatrix | BaseCompressedSparseDataset):
117
+ write_sparse_as_dense(f, "X", adata.X, dataset_kwargs=dataset_kwargs)
118
+ elif is_backed:
119
+ pass # If adata.isbacked, X should already be up to date
120
+ elif adata.X is None:
121
+ f.pop("X", None)
122
+ else:
123
+ write_elem(f, "X", adata.X, dataset_kwargs=dataset_kwargs)
124
+
125
+
126
+ def _write_raw(
127
+ f: h5py.Group,
128
+ raw: Raw,
129
+ *,
130
+ as_dense: Container[str],
131
+ dataset_kwargs: Mapping[str, Any],
132
+ ) -> None:
133
+ if "raw/X" in as_dense and isinstance(
134
+ raw.X, CSMatrix | BaseCompressedSparseDataset
135
+ ):
136
+ write_sparse_as_dense(f, "raw/X", raw.X, dataset_kwargs=dataset_kwargs)
137
+ write_elem(f, "raw/var", raw.var, dataset_kwargs=dataset_kwargs)
138
+ write_elem(f, "raw/varm", dict(raw.varm), dataset_kwargs=dataset_kwargs)
139
+ elif raw is not None:
140
+ write_elem(f, "raw", raw, dataset_kwargs=dataset_kwargs)
141
+
142
+
118
143
  @report_write_key_on_error
119
144
  @write_spec(IOSpec("array", "0.2.0"))
120
145
  def write_sparse_as_dense(
@@ -48,7 +48,9 @@ def read_csv(
48
48
  dtype
49
49
  Numpy data type.
50
50
  """
51
- return read_text(filename, delimiter, first_column_names, dtype)
51
+ return read_text(
52
+ filename, delimiter, first_column_names=first_column_names, dtype=dtype
53
+ )
52
54
 
53
55
 
54
56
  def read_excel(
@@ -360,18 +362,26 @@ def read_text(
360
362
  Numpy data type.
361
363
  """
362
364
  if not isinstance(filename, PathLike | str | bytes):
363
- return _read_text(filename, delimiter, first_column_names, dtype)
365
+ return _read_text(
366
+ filename, delimiter, first_column_names=first_column_names, dtype=dtype
367
+ )
364
368
 
365
369
  filename = Path(filename)
366
370
  if filename.suffix == ".gz":
367
371
  with gzip.open(str(filename), mode="rt") as f:
368
- return _read_text(f, delimiter, first_column_names, dtype)
372
+ return _read_text(
373
+ f, delimiter, first_column_names=first_column_names, dtype=dtype
374
+ )
369
375
  elif filename.suffix == ".bz2":
370
376
  with bz2.open(str(filename), mode="rt") as f:
371
- return _read_text(f, delimiter, first_column_names, dtype)
377
+ return _read_text(
378
+ f, delimiter, first_column_names=first_column_names, dtype=dtype
379
+ )
372
380
  else:
373
381
  with filename.open() as f:
374
- return _read_text(f, delimiter, first_column_names, dtype)
382
+ return _read_text(
383
+ f, delimiter, first_column_names=first_column_names, dtype=dtype
384
+ )
375
385
 
376
386
 
377
387
  def _iter_lines(file_like: Iterable[str]) -> Generator[str, None, None]:
@@ -385,7 +395,8 @@ def _iter_lines(file_like: Iterable[str]) -> Generator[str, None, None]:
385
395
  def _read_text( # noqa: PLR0912, PLR0915
386
396
  f: Iterator[str],
387
397
  delimiter: str | None,
388
- first_column_names: bool | None, # noqa: FBT001
398
+ *,
399
+ first_column_names: bool | None,
389
400
  dtype: str,
390
401
  ) -> AnnData:
391
402
  comments = []
@@ -275,7 +275,8 @@ def write_anndata(
275
275
  dataset_kwargs: Mapping[str, Any] = MappingProxyType({}),
276
276
  ):
277
277
  g = f.require_group(k)
278
- _writer.write_elem(g, "X", adata.X, dataset_kwargs=dataset_kwargs)
278
+ if adata.X is not None:
279
+ _writer.write_elem(g, "X", adata.X, dataset_kwargs=dataset_kwargs)
279
280
  _writer.write_elem(g, "obs", adata.obs, dataset_kwargs=dataset_kwargs)
280
281
  _writer.write_elem(g, "var", adata.var, dataset_kwargs=dataset_kwargs)
281
282
  _writer.write_elem(g, "obsm", dict(adata.obsm), dataset_kwargs=dataset_kwargs)
@@ -629,7 +630,7 @@ def write_vlen_string_array_zarr(
629
630
  dataset_kwargs = zarr_v3_compressor_compat(dataset_kwargs)
630
631
  dtype = VariableLengthUTF8()
631
632
  filters, fill_value = None, None
632
- if ad.settings.zarr_write_format == 2:
633
+ if f.metadata.zarr_format == 2:
633
634
  filters, fill_value = [VLenUTF8()], ""
634
635
  f.create_array(
635
636
  k,
@@ -695,12 +696,11 @@ def write_recarray_zarr(
695
696
  from anndata.compat import _to_fixed_length_strings
696
697
 
697
698
  elem = _to_fixed_length_strings(elem)
698
- if isinstance(f, H5Group) or is_zarr_v2():
699
+ if is_zarr_v2():
699
700
  f.create_dataset(k, data=elem, shape=elem.shape, **dataset_kwargs)
700
701
  else:
701
702
  dataset_kwargs = dataset_kwargs.copy()
702
703
  dataset_kwargs = zarr_v3_compressor_compat(dataset_kwargs)
703
- # TODO: zarr’s on-disk format v3 doesn’t support this dtype
704
704
  f.create_array(k, shape=elem.shape, dtype=elem.dtype, **dataset_kwargs)
705
705
  f[k][...] = elem
706
706
 
@@ -1283,7 +1283,7 @@ def write_scalar_zarr(
1283
1283
  from numcodecs import VLenUTF8
1284
1284
  from zarr.core.dtype import VariableLengthUTF8
1285
1285
 
1286
- match ad.settings.zarr_write_format, value:
1286
+ match f.metadata.zarr_format, value:
1287
1287
  case 2, str():
1288
1288
  filters, dtype, fill_value = [VLenUTF8()], VariableLengthUTF8(), ""
1289
1289
  case 3, str():
@@ -1,6 +1,5 @@
1
1
  from __future__ import annotations
2
2
 
3
- from pathlib import Path
4
3
  from typing import TYPE_CHECKING, TypeVar
5
4
  from warnings import warn
6
5
 
@@ -37,8 +36,6 @@ def write_zarr(
37
36
  **ds_kwargs,
38
37
  ) -> None:
39
38
  """See :meth:`~anndata.AnnData.write_zarr`."""
40
- if isinstance(store, Path):
41
- store = str(store)
42
39
  if convert_strings_to_categoricals:
43
40
  adata.strings_to_categoricals()
44
41
  if adata.raw is not None:
@@ -75,9 +72,6 @@ def read_zarr(store: PathLike[str] | str | MutableMapping | zarr.Group) -> AnnDa
75
72
  store
76
73
  The filename, a :class:`~typing.MutableMapping`, or a Zarr storage class.
77
74
  """
78
- if isinstance(store, Path):
79
- store = str(store)
80
-
81
75
  f = store if isinstance(store, zarr.Group) else zarr.open(store, mode="r")
82
76
 
83
77
  # Read with handling for backwards compat
@@ -447,7 +447,7 @@ def validate_zarr_write_format(format: int):
447
447
  settings.register(
448
448
  "zarr_write_format",
449
449
  default_value=2,
450
- description="Which version of zarr to write to.",
450
+ description="Which version of zarr to write to when anndata must internally open a write-able zarr group.",
451
451
  validate=validate_zarr_write_format,
452
452
  get_from_env=lambda name, default: check_and_get_environ_var(
453
453
  f"ANNDATA_{name.upper()}",
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from codecs import decode
4
- from collections.abc import Mapping
4
+ from collections.abc import Mapping, Sequence
5
5
  from functools import cache, partial, singledispatch
6
6
  from importlib.util import find_spec
7
7
  from types import EllipsisType
@@ -12,6 +12,7 @@ import h5py
12
12
  import numpy as np
13
13
  import pandas as pd
14
14
  import scipy
15
+ from numpy.typing import NDArray
15
16
  from packaging.version import Version
16
17
  from zarr import Array as ZarrArray # noqa: F401
17
18
  from zarr import Group as ZarrGroup
@@ -19,6 +20,7 @@ from zarr import Group as ZarrGroup
19
20
  if TYPE_CHECKING:
20
21
  from typing import Any
21
22
 
23
+
22
24
  #############################
23
25
  # scipy sparse array comapt #
24
26
  #############################
@@ -32,7 +34,26 @@ class Empty:
32
34
  pass
33
35
 
34
36
 
35
- Index1D = slice | int | str | np.int64 | np.ndarray | pd.Series
37
+ Index1DNorm = slice | NDArray[np.bool_] | NDArray[np.integer]
38
+ # TODO: pd.Index[???]
39
+ Index1D = (
40
+ # 0D index
41
+ int
42
+ | str
43
+ | np.int64
44
+ # normalized 1D idex
45
+ | Index1DNorm
46
+ # different containers for mask, obs/varnames, or numerical index
47
+ | Sequence[int]
48
+ | Sequence[str]
49
+ | Sequence[bool]
50
+ | pd.Series # bool, int, str
51
+ | pd.Index
52
+ | NDArray[np.str_]
53
+ | np.matrix # bool
54
+ | CSMatrix # bool
55
+ | CSArray # bool
56
+ )
36
57
  IndexRest = Index1D | EllipsisType
37
58
  Index = (
38
59
  IndexRest
@@ -286,8 +307,12 @@ def _to_fixed_length_strings(value: np.ndarray) -> np.ndarray:
286
307
  """\
287
308
  Convert variable length strings to fixed length.
288
309
 
289
- Currently a workaround for
290
- https://github.com/zarr-developers/zarr-python/pull/422
310
+ Formerly a workaround for
311
+ https://github.com/zarr-developers/zarr-python/pull/422,
312
+ resolved in https://github.com/zarr-developers/zarr-python/pull/813.
313
+
314
+ But if we didn't do this conversion, we would have to use a special codec in v2
315
+ for objects and v3 doesn't support objects at all. So we leave this function as-is.
291
316
  """
292
317
  new_dtype = []
293
318
  for dt_name, (dt_type, dt_offset) in value.dtype.fields.items():