anndata 0.12.7__tar.gz → 0.12.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (217) hide show
  1. {anndata-0.12.7 → anndata-0.12.9}/.github/workflows/test-cpu.yml +9 -5
  2. {anndata-0.12.7 → anndata-0.12.9}/.github/workflows/test-gpu.yml +3 -2
  3. {anndata-0.12.7 → anndata-0.12.9}/.readthedocs.yml +1 -1
  4. {anndata-0.12.7 → anndata-0.12.9}/PKG-INFO +4 -4
  5. {anndata-0.12.7 → anndata-0.12.9}/benchmarks/benchmarks/dataset2d.py +17 -4
  6. {anndata-0.12.7 → anndata-0.12.9}/docs/conf.py +4 -4
  7. anndata-0.12.9/docs/release-notes/0.12.8.md +10 -0
  8. anndata-0.12.9/docs/release-notes/0.12.9.md +6 -0
  9. {anndata-0.12.7 → anndata-0.12.9}/hatch.toml +1 -4
  10. {anndata-0.12.7 → anndata-0.12.9}/pyproject.toml +3 -5
  11. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/anndata.py +6 -1
  12. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/index.py +14 -12
  13. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/merge.py +65 -90
  14. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/xarray.py +44 -16
  15. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_io/h5ad.py +2 -2
  16. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_io/specs/lazy_methods.py +18 -22
  17. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_io/specs/methods.py +37 -4
  18. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_io/utils.py +30 -1
  19. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_io/zarr.py +6 -2
  20. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_settings.py +8 -0
  21. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_settings.pyi +1 -0
  22. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/compat/__init__.py +23 -3
  23. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/experimental/backed/_io.py +17 -11
  24. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/experimental/backed/_lazy_arrays.py +31 -33
  25. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/experimental/merge.py +19 -13
  26. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/tests/helpers.py +14 -18
  27. {anndata-0.12.7 → anndata-0.12.9}/tests/lazy/conftest.py +7 -4
  28. {anndata-0.12.7 → anndata-0.12.9}/tests/lazy/test_concat.py +11 -7
  29. {anndata-0.12.7 → anndata-0.12.9}/tests/lazy/test_read.py +43 -4
  30. {anndata-0.12.7 → anndata-0.12.9}/tests/test_concatenate.py +1 -1
  31. {anndata-0.12.7 → anndata-0.12.9}/tests/test_concatenate_disk.py +4 -0
  32. {anndata-0.12.7 → anndata-0.12.9}/tests/test_dask_view_mem.py +8 -4
  33. {anndata-0.12.7 → anndata-0.12.9}/tests/test_helpers.py +10 -2
  34. {anndata-0.12.7 → anndata-0.12.9}/tests/test_io_elementwise.py +107 -0
  35. {anndata-0.12.7 → anndata-0.12.9}/tests/test_readwrite.py +30 -17
  36. {anndata-0.12.7 → anndata-0.12.9}/tests/test_views.py +58 -3
  37. {anndata-0.12.7 → anndata-0.12.9}/tests/test_xarray.py +65 -21
  38. {anndata-0.12.7 → anndata-0.12.9}/.cirun.yml +0 -0
  39. {anndata-0.12.7 → anndata-0.12.9}/.codecov.yml +0 -0
  40. {anndata-0.12.7 → anndata-0.12.9}/.cruft.json +0 -0
  41. {anndata-0.12.7 → anndata-0.12.9}/.editorconfig +0 -0
  42. {anndata-0.12.7 → anndata-0.12.9}/.github/ISSUE_TEMPLATE/bug-report.yml +0 -0
  43. {anndata-0.12.7 → anndata-0.12.9}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  44. {anndata-0.12.7 → anndata-0.12.9}/.github/ISSUE_TEMPLATE/enhancement-request.yml +0 -0
  45. {anndata-0.12.7 → anndata-0.12.9}/.github/ISSUE_TEMPLATE/question.yml +0 -0
  46. {anndata-0.12.7 → anndata-0.12.9}/.github/PULL_REQUEST_TEMPLATE.md +0 -0
  47. {anndata-0.12.7 → anndata-0.12.9}/.github/dependabot.yml +0 -0
  48. {anndata-0.12.7 → anndata-0.12.9}/.github/workflows/benchmark.yml +0 -0
  49. {anndata-0.12.7 → anndata-0.12.9}/.github/workflows/check-pr.yml +0 -0
  50. {anndata-0.12.7 → anndata-0.12.9}/.github/workflows/close-stale.yml +0 -0
  51. {anndata-0.12.7 → anndata-0.12.9}/.github/workflows/codespell.yml +0 -0
  52. {anndata-0.12.7 → anndata-0.12.9}/.github/workflows/label-stale.yml +0 -0
  53. {anndata-0.12.7 → anndata-0.12.9}/.github/workflows/publish.yml +0 -0
  54. {anndata-0.12.7 → anndata-0.12.9}/.gitignore +0 -0
  55. {anndata-0.12.7 → anndata-0.12.9}/.gitmodules +0 -0
  56. {anndata-0.12.7 → anndata-0.12.9}/.pre-commit-config.yaml +0 -0
  57. {anndata-0.12.7 → anndata-0.12.9}/.taplo.toml +0 -0
  58. {anndata-0.12.7 → anndata-0.12.9}/.vscode/launch.json +0 -0
  59. {anndata-0.12.7 → anndata-0.12.9}/.vscode/settings.json +0 -0
  60. {anndata-0.12.7 → anndata-0.12.9}/LICENSE +0 -0
  61. {anndata-0.12.7 → anndata-0.12.9}/README.md +0 -0
  62. {anndata-0.12.7 → anndata-0.12.9}/benchmarks/README.md +0 -0
  63. {anndata-0.12.7 → anndata-0.12.9}/benchmarks/asv.conf.json +0 -0
  64. {anndata-0.12.7 → anndata-0.12.9}/benchmarks/benchmarks/__init__.py +0 -0
  65. {anndata-0.12.7 → anndata-0.12.9}/benchmarks/benchmarks/anndata.py +0 -0
  66. {anndata-0.12.7 → anndata-0.12.9}/benchmarks/benchmarks/backed_hdf5.py +0 -0
  67. {anndata-0.12.7 → anndata-0.12.9}/benchmarks/benchmarks/readwrite.py +0 -0
  68. {anndata-0.12.7 → anndata-0.12.9}/benchmarks/benchmarks/sparse_dataset.py +0 -0
  69. {anndata-0.12.7 → anndata-0.12.9}/benchmarks/benchmarks/utils.py +0 -0
  70. {anndata-0.12.7 → anndata-0.12.9}/biome.jsonc +0 -0
  71. {anndata-0.12.7 → anndata-0.12.9}/ci/constraints.txt +0 -0
  72. {anndata-0.12.7 → anndata-0.12.9}/ci/min-constraints.txt +0 -0
  73. {anndata-0.12.7 → anndata-0.12.9}/ci/scripts/min-deps.py +0 -0
  74. {anndata-0.12.7 → anndata-0.12.9}/ci/scripts/towncrier_automation.py +0 -0
  75. {anndata-0.12.7 → anndata-0.12.9}/docs/Makefile +0 -0
  76. {anndata-0.12.7 → anndata-0.12.9}/docs/_key_contributors.rst +0 -0
  77. {anndata-0.12.7 → anndata-0.12.9}/docs/_static/img/anndata_schema.svg +0 -0
  78. {anndata-0.12.7 → anndata-0.12.9}/docs/_templates/autosummary/class.rst +0 -0
  79. {anndata-0.12.7 → anndata-0.12.9}/docs/api.md +0 -0
  80. {anndata-0.12.7 → anndata-0.12.9}/docs/benchmark-read-write.ipynb +0 -0
  81. {anndata-0.12.7 → anndata-0.12.9}/docs/benchmarks.md +0 -0
  82. {anndata-0.12.7 → anndata-0.12.9}/docs/concatenation.rst +0 -0
  83. {anndata-0.12.7 → anndata-0.12.9}/docs/contributing.md +0 -0
  84. {anndata-0.12.7 → anndata-0.12.9}/docs/extensions/autosummary_skip_inherited.py +0 -0
  85. {anndata-0.12.7 → anndata-0.12.9}/docs/extensions/no_skip_abc_members.py +0 -0
  86. {anndata-0.12.7 → anndata-0.12.9}/docs/extensions/patch_myst_cite.py +0 -0
  87. {anndata-0.12.7 → anndata-0.12.9}/docs/fileformat-prose.md +0 -0
  88. {anndata-0.12.7 → anndata-0.12.9}/docs/index.md +0 -0
  89. {anndata-0.12.7 → anndata-0.12.9}/docs/interoperability.md +0 -0
  90. {anndata-0.12.7 → anndata-0.12.9}/docs/references.rst +0 -0
  91. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.10.0.md +0 -0
  92. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.10.1.md +0 -0
  93. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.10.2.md +0 -0
  94. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.10.3.md +0 -0
  95. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.10.4.md +0 -0
  96. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.10.5.md +0 -0
  97. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.10.6.md +0 -0
  98. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.10.7.md +0 -0
  99. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.10.8.md +0 -0
  100. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.10.9.md +0 -0
  101. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.11.0.md +0 -0
  102. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.11.1.md +0 -0
  103. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.11.2.md +0 -0
  104. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.11.3.md +0 -0
  105. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.11.4.md +0 -0
  106. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.12.0.md +0 -0
  107. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.12.1.md +0 -0
  108. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.12.2.md +0 -0
  109. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.12.3.md +0 -0
  110. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.12.4.md +0 -0
  111. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.12.5.md +0 -0
  112. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.12.6.md +0 -0
  113. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.12.7.md +0 -0
  114. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.4.0.md +0 -0
  115. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.5.0.md +0 -0
  116. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.6.0.md +0 -0
  117. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.6.x.md +0 -0
  118. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.7.0.md +0 -0
  119. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.7.2.md +0 -0
  120. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.7.3.md +0 -0
  121. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.7.4.md +0 -0
  122. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.7.5.md +0 -0
  123. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.7.6.md +0 -0
  124. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.7.7.md +0 -0
  125. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.7.8.md +0 -0
  126. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.8.0.md +0 -0
  127. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.9.0.md +0 -0
  128. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.9.1.md +0 -0
  129. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/0.9.2.md +0 -0
  130. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/2172.bug.md +0 -0
  131. {anndata-0.12.7 → anndata-0.12.9}/docs/release-notes/index.md +0 -0
  132. {anndata-0.12.7 → anndata-0.12.9}/docs/tutorials/index.md +0 -0
  133. {anndata-0.12.7 → anndata-0.12.9}/docs/tutorials/zarr-v3.md +0 -0
  134. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/__init__.py +0 -0
  135. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/__init__.py +0 -0
  136. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/access.py +0 -0
  137. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/aligned_df.py +0 -0
  138. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/aligned_mapping.py +0 -0
  139. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/extensions.py +0 -0
  140. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/file_backing.py +0 -0
  141. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/raw.py +0 -0
  142. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/sparse_dataset.py +0 -0
  143. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/storage.py +0 -0
  144. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_core/views.py +0 -0
  145. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_io/__init__.py +0 -0
  146. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_io/read.py +0 -0
  147. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_io/specs/__init__.py +0 -0
  148. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_io/specs/registry.py +0 -0
  149. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_io/write.py +0 -0
  150. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_types.py +0 -0
  151. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/_warnings.py +0 -0
  152. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/abc.py +0 -0
  153. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/experimental/__init__.py +0 -0
  154. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/experimental/_dispatch_io.py +0 -0
  155. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/experimental/backed/__init__.py +0 -0
  156. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/experimental/backed/_compat.py +0 -0
  157. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/experimental/multi_files/__init__.py +0 -0
  158. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/experimental/multi_files/_anncollection.py +0 -0
  159. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/experimental/pytorch/__init__.py +0 -0
  160. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/experimental/pytorch/_annloader.py +0 -0
  161. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/io.py +0 -0
  162. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/logging.py +0 -0
  163. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/tests/__init__.py +0 -0
  164. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/types.py +0 -0
  165. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/typing.py +0 -0
  166. {anndata-0.12.7 → anndata-0.12.9}/src/anndata/utils.py +0 -0
  167. {anndata-0.12.7 → anndata-0.12.9}/src/testing/anndata/__init__.py +0 -0
  168. {anndata-0.12.7 → anndata-0.12.9}/src/testing/anndata/_doctest.py +0 -0
  169. {anndata-0.12.7 → anndata-0.12.9}/src/testing/anndata/_pytest.py +0 -0
  170. {anndata-0.12.7 → anndata-0.12.9}/src/testing/anndata/py.typed +0 -0
  171. {anndata-0.12.7 → anndata-0.12.9}/tests/conftest.py +0 -0
  172. {anndata-0.12.7 → anndata-0.12.9}/tests/data/adata-comments.tsv +0 -0
  173. {anndata-0.12.7 → anndata-0.12.9}/tests/data/adata.csv +0 -0
  174. {anndata-0.12.7 → anndata-0.12.9}/tests/data/archives/readme.md +0 -0
  175. {anndata-0.12.7 → anndata-0.12.9}/tests/data/archives/v0.11.4/adata.h5ad +0 -0
  176. {anndata-0.12.7 → anndata-0.12.9}/tests/data/archives/v0.11.4/adata.zarr.zip +0 -0
  177. {anndata-0.12.7 → anndata-0.12.9}/tests/data/archives/v0.11.4/readme.md +0 -0
  178. {anndata-0.12.7 → anndata-0.12.9}/tests/data/archives/v0.5.0/adata.h5ad +0 -0
  179. {anndata-0.12.7 → anndata-0.12.9}/tests/data/archives/v0.5.0/readme.md +0 -0
  180. {anndata-0.12.7 → anndata-0.12.9}/tests/data/archives/v0.7.0/adata.h5ad +0 -0
  181. {anndata-0.12.7 → anndata-0.12.9}/tests/data/archives/v0.7.0/adata.zarr.zip +0 -0
  182. {anndata-0.12.7 → anndata-0.12.9}/tests/data/archives/v0.7.8/adata.h5ad +0 -0
  183. {anndata-0.12.7 → anndata-0.12.9}/tests/data/archives/v0.7.8/adata.zarr.zip +0 -0
  184. {anndata-0.12.7 → anndata-0.12.9}/tests/data/excel.xlsx +0 -0
  185. {anndata-0.12.7 → anndata-0.12.9}/tests/data/umi_tools.tsv.gz +0 -0
  186. {anndata-0.12.7 → anndata-0.12.9}/tests/lazy/__init__.py +0 -0
  187. {anndata-0.12.7 → anndata-0.12.9}/tests/lazy/test_write.py +0 -0
  188. {anndata-0.12.7 → anndata-0.12.9}/tests/test_anncollection.py +0 -0
  189. {anndata-0.12.7 → anndata-0.12.9}/tests/test_annot.py +0 -0
  190. {anndata-0.12.7 → anndata-0.12.9}/tests/test_awkward.py +0 -0
  191. {anndata-0.12.7 → anndata-0.12.9}/tests/test_backed_dense.py +0 -0
  192. {anndata-0.12.7 → anndata-0.12.9}/tests/test_backed_hdf5.py +0 -0
  193. {anndata-0.12.7 → anndata-0.12.9}/tests/test_backed_sparse.py +0 -0
  194. {anndata-0.12.7 → anndata-0.12.9}/tests/test_base.py +0 -0
  195. {anndata-0.12.7 → anndata-0.12.9}/tests/test_dask.py +0 -0
  196. {anndata-0.12.7 → anndata-0.12.9}/tests/test_deprecations.py +0 -0
  197. {anndata-0.12.7 → anndata-0.12.9}/tests/test_extensions.py +0 -0
  198. {anndata-0.12.7 → anndata-0.12.9}/tests/test_get_vector.py +0 -0
  199. {anndata-0.12.7 → anndata-0.12.9}/tests/test_gpu.py +0 -0
  200. {anndata-0.12.7 → anndata-0.12.9}/tests/test_inplace_subset.py +0 -0
  201. {anndata-0.12.7 → anndata-0.12.9}/tests/test_io_backwards_compat.py +0 -0
  202. {anndata-0.12.7 → anndata-0.12.9}/tests/test_io_conversion.py +0 -0
  203. {anndata-0.12.7 → anndata-0.12.9}/tests/test_io_dispatched.py +0 -0
  204. {anndata-0.12.7 → anndata-0.12.9}/tests/test_io_partial.py +0 -0
  205. {anndata-0.12.7 → anndata-0.12.9}/tests/test_io_utils.py +0 -0
  206. {anndata-0.12.7 → anndata-0.12.9}/tests/test_io_warnings.py +0 -0
  207. {anndata-0.12.7 → anndata-0.12.9}/tests/test_layers.py +0 -0
  208. {anndata-0.12.7 → anndata-0.12.9}/tests/test_obsmvarm.py +0 -0
  209. {anndata-0.12.7 → anndata-0.12.9}/tests/test_obspvarp.py +0 -0
  210. {anndata-0.12.7 → anndata-0.12.9}/tests/test_raw.py +0 -0
  211. {anndata-0.12.7 → anndata-0.12.9}/tests/test_repr.py +0 -0
  212. {anndata-0.12.7 → anndata-0.12.9}/tests/test_settings.py +0 -0
  213. {anndata-0.12.7 → anndata-0.12.9}/tests/test_structured_arrays.py +0 -0
  214. {anndata-0.12.7 → anndata-0.12.9}/tests/test_transpose.py +0 -0
  215. {anndata-0.12.7 → anndata-0.12.9}/tests/test_uns.py +0 -0
  216. {anndata-0.12.7 → anndata-0.12.9}/tests/test_utils.py +0 -0
  217. {anndata-0.12.7 → anndata-0.12.9}/tests/test_x.py +0 -0
@@ -44,6 +44,8 @@ jobs:
44
44
  matrix:
45
45
  env: ${{ fromJSON(needs.get-environments.outputs.envs) }}
46
46
  io_mark: ["zarr_io", "not zarr_io", "dask_distributed"] # dask_distributed should not be run with -n auto as it uses a client with processes
47
+ permissions:
48
+ id-token: write # for codecov OIDC
47
49
  env: # environment variables for use in codecov’s env_vars tagging
48
50
  ENV_NAME: ${{ matrix.env.name }}
49
51
  IO_MARK: ${{ matrix.io_mark }}
@@ -72,23 +74,25 @@ jobs:
72
74
  hatch run ${{ matrix.env.name }}:run-cov -v --color=yes ${{ matrix.io_mark != 'dask_distributed' && '-n auto' || '' }} --junitxml=test-data/test-results.xml -m "${{ matrix.io_mark }}" ${{ matrix.env.args }}
73
75
  hatch run ${{ matrix.env.name }}:cov-combine
74
76
  hatch run ${{ matrix.env.name }}:coverage xml
77
+ hatch run ${{ matrix.env.name }}:cov-report
75
78
 
76
79
  - name: Upload test results
77
80
  if: ${{ !cancelled() }}
78
- uses: codecov/test-results-action@v1
81
+ uses: codecov/codecov-action@v5
79
82
  with:
80
- token: ${{ secrets.CODECOV_TOKEN }}
83
+ report_type: test_results
81
84
  env_vars: ENV_NAME,IO_MARK
85
+ files: test-data/test-results.xml
86
+ use_oidc: true
82
87
  fail_ci_if_error: true
83
- file: test-data/test-results.xml
84
88
 
85
89
  - name: Upload coverage data
86
90
  uses: codecov/codecov-action@v5
87
91
  with:
88
- token: ${{ secrets.CODECOV_TOKEN }}
89
92
  env_vars: ENV_NAME,IO_MARK
90
- fail_ci_if_error: true
91
93
  files: test-data/coverage.xml
94
+ use_oidc: true
95
+ fail_ci_if_error: true
92
96
 
93
97
  build:
94
98
  runs-on: ubuntu-24.04
@@ -52,9 +52,10 @@ jobs:
52
52
  - name: Nvidia SMI sanity check
53
53
  run: nvidia-smi
54
54
 
55
- - name: Install yq
55
+ - name: Install yq # https://cirun.slack.com/archives/C09SNDRB3A8/p1766512487317849?thread_ts=1766512112.938459&cid=C09SNDRB3A8
56
56
  run: |
57
- sudo snap install yq
57
+ sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64
58
+ sudo chmod +x /usr/local/bin/yq
58
59
 
59
60
  - name: Extract max Python version from classifiers
60
61
  run: |
@@ -2,7 +2,7 @@ version: 2
2
2
  build:
3
3
  os: ubuntu-24.04
4
4
  tools:
5
- python: "3.13"
5
+ python: "3.14"
6
6
  jobs:
7
7
  post_checkout:
8
8
  # unshallow so version can be derived from tag
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: anndata
3
- Version: 0.12.7
3
+ Version: 0.12.9
4
4
  Summary: Annotated data.
5
5
  Project-URL: Documentation, https://anndata.readthedocs.io/
6
6
  Project-URL: Source, https://github.com/scverse/anndata
@@ -32,7 +32,7 @@ Requires-Dist: natsort
32
32
  Requires-Dist: numpy>=1.26
33
33
  Requires-Dist: packaging>=24.2
34
34
  Requires-Dist: pandas!=2.1.2,<3,>=2.1.0
35
- Requires-Dist: scipy>=1.12
35
+ Requires-Dist: scipy!=1.17.0,>=1.12
36
36
  Requires-Dist: zarr!=3.0.*,>=2.18.7
37
37
  Provides-Extra: cu11
38
38
  Requires-Dist: cupy-cuda11x; extra == 'cu11'
@@ -74,11 +74,11 @@ Requires-Dist: boltons; extra == 'test'
74
74
  Requires-Dist: dask[array]!=2024.8.*,!=2024.9.*,!=2025.2.*,!=2025.3.*,!=2025.4.*,!=2025.5.*,!=2025.6.*,!=2025.7.*,!=2025.8.*,>=2023.5.1; extra == 'test'
75
75
  Requires-Dist: dask[distributed]; extra == 'test'
76
76
  Requires-Dist: filelock; extra == 'test'
77
- Requires-Dist: httpx<1.0; extra == 'test'
78
77
  Requires-Dist: joblib; extra == 'test'
79
78
  Requires-Dist: loompy>=3.0.5; extra == 'test'
80
79
  Requires-Dist: matplotlib; extra == 'test'
81
80
  Requires-Dist: openpyxl; extra == 'test'
81
+ Requires-Dist: pooch; extra == 'test'
82
82
  Requires-Dist: pyarrow; extra == 'test'
83
83
  Requires-Dist: pytest; extra == 'test'
84
84
  Requires-Dist: pytest-cov; extra == 'test'
@@ -96,11 +96,11 @@ Requires-Dist: boltons; extra == 'test-min'
96
96
  Requires-Dist: dask[array]!=2024.8.*,!=2024.9.*,!=2025.2.*,!=2025.3.*,!=2025.4.*,!=2025.5.*,!=2025.6.*,!=2025.7.*,!=2025.8.*,>=2023.5.1; extra == 'test-min'
97
97
  Requires-Dist: dask[distributed]; extra == 'test-min'
98
98
  Requires-Dist: filelock; extra == 'test-min'
99
- Requires-Dist: httpx<1.0; extra == 'test-min'
100
99
  Requires-Dist: joblib; extra == 'test-min'
101
100
  Requires-Dist: loompy>=3.0.5; extra == 'test-min'
102
101
  Requires-Dist: matplotlib; extra == 'test-min'
103
102
  Requires-Dist: openpyxl; extra == 'test-min'
103
+ Requires-Dist: pooch; extra == 'test-min'
104
104
  Requires-Dist: pyarrow; extra == 'test-min'
105
105
  Requires-Dist: pytest; extra == 'test-min'
106
106
  Requires-Dist: pytest-cov; extra == 'test-min'
@@ -18,11 +18,11 @@ class Dataset2D:
18
18
  params = (
19
19
  ("zarr", "h5ad"),
20
20
  ((-1,), None),
21
- ("cat", "numeric", "string-array", "nullable-string-array"),
21
+ ("cat", "numeric", "string-array", "nullable-string-array", "all"),
22
22
  )
23
23
 
24
24
  def setup_cache(self):
25
- n_obs = 10000
25
+ n_obs = 100_000
26
26
  array_types = {
27
27
  "numeric": np.arange(n_obs),
28
28
  "string-array": np.array(["a"] * n_obs),
@@ -40,17 +40,30 @@ class Dataset2D:
40
40
  if writing_string_array_on_disk := (
41
41
  isinstance(v, np.ndarray) and df["a"].dtype == "string"
42
42
  ):
43
- df["a"] = df["a"].to_numpy()
43
+ with pd.option_context("future.infer_string", False): # noqa: FBT003
44
+ df["a"] = df["a"].to_numpy()
44
45
  with ad.settings.override(allow_write_nullable_strings=True):
45
46
  ad.io.write_elem(store, "df", df)
46
47
  if writing_string_array_on_disk:
47
48
  assert store["df"]["a"].attrs["encoding-type"] == "string-array"
49
+ for store in [
50
+ h5py.File("data_all.h5ad", mode="w"),
51
+ zarr.open("data_all.zarr", mode="w", zarr_version=2),
52
+ ]:
53
+ df = pd.DataFrame(array_types, index=[f"cell{i}" for i in range(n_obs)])
54
+ # write a string array by triggering:
55
+ # https://github.com/scverse/anndata/blob/71966500949adcac4e49d2233f06e9f11f438e19/src/anndata/_io/specs/methods.py#L557-L559
56
+ df["string-array"] = df["string-array"].to_numpy().astype(object)
57
+ with ad.settings.override(allow_write_nullable_strings=True):
58
+ ad.io.write_elem(store, "df", df)
48
59
 
49
60
  def setup(
50
61
  self,
51
62
  store_type: Literal["zarr", "h5ad"],
52
63
  chunks: None | tuple[int],
53
- array_type: Literal["cat", "numeric", "string-array", "nullable-string-array"],
64
+ array_type: Literal[
65
+ "cat", "numeric", "string-array", "nullable-string-array", "all"
66
+ ],
54
67
  ):
55
68
  self.store = (
56
69
  h5py.File(f"data_{array_type}.h5ad", mode="r")
@@ -85,6 +85,7 @@ napoleon_use_rtype = True # having a separate entry generally helps readability
85
85
  napoleon_use_param = True
86
86
  napoleon_custom_sections = [("Params", "Parameters")]
87
87
  typehints_defaults = "braces"
88
+ always_use_bars_union = True # use `|`, not `Union` in types even when on Python ≤3.14
88
89
  todo_include_todos = False
89
90
  nitpicky = True # Report broken links
90
91
  nitpick_ignore = [ # APIs without an intersphinx entry
@@ -133,10 +134,8 @@ intersphinx_mapping = dict(
133
134
  loompy=("https://linnarssonlab.org/loompy", None),
134
135
  numpy=("https://numpy.org/doc/stable", None),
135
136
  obstore=("https://developmentseed.org/obstore/latest/", None),
136
- pandas=("https://pandas.pydata.org/pandas-docs/stable", None),
137
- # TODO: switch to `/3` once docs are built with Python 3.14
138
- # https://github.com/readthedocs/readthedocs.org/issues/12523
139
- python=("https://docs.python.org/3.13", None),
137
+ pandas=("https://pandas.pydata.org/pandas-docs/version/2.3", None),
138
+ python=("https://docs.python.org/3", None),
140
139
  scipy=("https://docs.scipy.org/doc/scipy", None),
141
140
  sklearn=("https://scikit-learn.org/stable", None),
142
141
  xarray=("https://docs.xarray.dev/en/stable", None),
@@ -174,6 +173,7 @@ qualname_overrides = {
174
173
  "numpy.dtypes.StringDType": ("py:attr", "numpy.dtypes.StringDType"),
175
174
  "pandas.DataFrame.iloc": ("py:attr", "pandas.DataFrame.iloc"),
176
175
  "pandas.DataFrame.loc": ("py:attr", "pandas.DataFrame.loc"),
176
+ "pandas.core.dtypes.dtypes.BaseMaskedDtype": "pandas.api.extensions.ExtensionDtype",
177
177
  # should be fixed soon: https://github.com/tox-dev/sphinx-autodoc-typehints/pull/516
178
178
  "types.EllipsisType": ("py:data", "types.EllipsisType"),
179
179
  "pathlib._local.Path": "pathlib.Path",
@@ -0,0 +1,10 @@
1
+ (v0.12.8)=
2
+ ### 0.12.8 {small}`2026-01-27`
3
+
4
+ #### Bug fixes
5
+
6
+ - Actually copy single zarr store input in {func}`anndata.experimental.concat_on_disk` {user}`ilan-gold` ({pr}`2267`)
7
+ - Fix `compressor` kwarg handling when writing to zarr v3 {user}`ilan-gold` ({pr}`2270`)
8
+ - Only open HDF5 file once in backed mode {user}`flying-sheep` ({pr}`2274`)
9
+ - Fix {obj}`numpy.uint` support in {func}`anndata.experimental.read_lazy` and {func}`anndata.experimental.read_elem_lazy` {user}`flying-sheep` ({pr}`2287`)
10
+ - `var` index is loaded into memory if it wasn't previously for {func}`anndata.concat` when {attr}`anndata.AnnData.var` is a {class}`anndata.experimental.backed.Dataset2D` {user}`ilan-gold` ({pr}`2299`)
@@ -0,0 +1,6 @@
1
+ (v0.12.9)=
2
+ ### 0.12.9 {small}`2026-01-29`
3
+
4
+ #### Performance
5
+
6
+ - Add a `write_csr_csc_indices_with_min_possible_dtype` option to {attr}`anndata.settings` to enable downcasting of the `indices` of csr and csc matrices to a smaller dtype when writing. For example, if your csr matrix only has 30000 columns, then you can write out the `indices` of that matrix as `uint16` instead of `int64`. {user}`ilan-gold` ({pr}`2159`)
@@ -14,6 +14,7 @@ scripts.build = "python3 ci/scripts/towncrier_automation.py {args}"
14
14
  scripts.clean = "git restore --source=HEAD --staged --worktree -- docs/release-notes"
15
15
 
16
16
  [envs.hatch-test]
17
+ python = "3.14"
17
18
  default-args = [ ]
18
19
  features = [ "dev", "test-min" ]
19
20
  extra-dependencies = [ "ipykernel" ]
@@ -36,10 +37,6 @@ overrides.matrix.deps.pre-install-commands = [
36
37
  ]
37
38
  overrides.matrix.deps.python = [
38
39
  { if = [ "min" ], value = "3.11" },
39
- # transitive test dep numba doesn’t support 3.14 in a stable release yet:
40
- # https://github.com/numba/numba/issues/9957
41
- { if = [ "stable" ], value = "3.13" },
42
- { if = [ "pre" ], value = "3.14" },
43
40
  ]
44
41
  overrides.matrix.deps.features = [
45
42
  { if = [ "stable", "pre" ], value = "test" },
@@ -40,7 +40,7 @@ dependencies = [
40
40
  "pandas >=2.1.0, !=2.1.2, <3",
41
41
  "numpy>=1.26",
42
42
  # https://github.com/scverse/anndata/issues/1434
43
- "scipy >=1.12",
43
+ "scipy >=1.12,!=1.17.0",
44
44
  "h5py>=3.8",
45
45
  "natsort",
46
46
  "packaging>=24.2",
@@ -93,11 +93,10 @@ test-min = [
93
93
  "joblib",
94
94
  "boltons",
95
95
  "scanpy>=1.10",
96
- # TODO: Is 1.0dev1 a real pre-release? https://pypi.org/project/httpx/#history
97
- "httpx<1.0", # For data downloading
98
96
  "dask[distributed]",
99
97
  "awkward>=2.3.2",
100
98
  "pyarrow",
99
+ "pooch",
101
100
  "anndata[dask]",
102
101
  ]
103
102
  test = [ "anndata[test-min,lazy]" ]
@@ -159,12 +158,11 @@ filterwarnings_when_strict = [
159
158
  "default::scipy.sparse.SparseEfficiencyWarning",
160
159
  "default::dask.array.core.PerformanceWarning",
161
160
  "default:anndata will no longer support zarr v2:DeprecationWarning",
162
- "default:The codec `vlen-utf8:UserWarning",
163
- "default:The dtype `StringDType():UserWarning",
164
161
  "default:Consolidated metadata is:UserWarning",
165
162
  "default:.*Structured:zarr.core.dtype.common.UnstableSpecificationWarning",
166
163
  "default:.*FixedLengthUTF32:zarr.core.dtype.common.UnstableSpecificationWarning",
167
164
  "default:Automatic shard shape inference is experimental",
165
+ "default:Writing zarr v2:UserWarning",
168
166
  ]
169
167
  python_files = [ "test_*.py" ]
170
168
  testpaths = [
@@ -362,7 +362,12 @@ class AnnData(metaclass=utils.DeprecationMixinMeta): # noqa: PLW1641
362
362
 
363
363
  # init from file
364
364
  if filename is not None:
365
- self.file = AnnDataFileManager(self, filename, filemode)
365
+ fileobj, filename = (
366
+ (filename, None)
367
+ if isinstance(filename, h5py.File)
368
+ else (None, filename)
369
+ )
370
+ self.file = AnnDataFileManager(self, filename, filemode, fileobj)
366
371
  else:
367
372
  self.file = AnnDataFileManager(self, None)
368
373
 
@@ -25,12 +25,6 @@ def _normalize_indices(
25
25
  # deal with tuples of length 1
26
26
  if isinstance(index, tuple) and len(index) == 1:
27
27
  index = index[0]
28
- # deal with pd.Series
29
- if isinstance(index, pd.Series):
30
- index = index.values
31
- if isinstance(index, tuple):
32
- # TODO: The series should probably be aligned first
33
- index = tuple(i.values if isinstance(i, pd.Series) else i for i in index)
34
28
  ax0, ax1 = unpack_index(index)
35
29
  ax0 = _normalize_index(ax0, names0)
36
30
  ax1 = _normalize_index(ax1, names1)
@@ -45,6 +39,9 @@ def _normalize_index( # noqa: PLR0911, PLR0912
45
39
  msg = f"Don’t call _normalize_index with non-categorical/string names and non-range index {index}"
46
40
  raise TypeError(msg)
47
41
 
42
+ if isinstance(indexer, pd.Index | pd.Series):
43
+ indexer = indexer.array
44
+
48
45
  # the following is insanely slow for sequences,
49
46
  # we replaced it using pandas below
50
47
  def name_idx(i):
@@ -65,16 +62,21 @@ def _normalize_index( # noqa: PLR0911, PLR0912
65
62
  elif isinstance(indexer, str):
66
63
  return index.get_loc(indexer) # int
67
64
  elif isinstance(
68
- indexer, Sequence | np.ndarray | pd.Index | CSMatrix | np.matrix | CSArray
65
+ indexer,
66
+ Sequence
67
+ | np.ndarray
68
+ | pd.api.extensions.ExtensionArray
69
+ | CSMatrix
70
+ | np.matrix
71
+ | CSArray,
69
72
  ):
70
- if hasattr(indexer, "shape") and (
71
- (indexer.shape == (index.shape[0], 1))
72
- or (indexer.shape == (1, index.shape[0]))
73
+ if (shape := getattr(indexer, "shape", None)) is not None and (
74
+ shape == (index.shape[0], 1) or shape == (1, index.shape[0])
73
75
  ):
74
76
  if isinstance(indexer, CSMatrix | CSArray):
75
77
  indexer = indexer.toarray()
76
78
  indexer = np.ravel(indexer)
77
- if not isinstance(indexer, np.ndarray | pd.Index):
79
+ if not isinstance(indexer, np.ndarray):
78
80
  indexer = np.array(indexer)
79
81
  if len(indexer) == 0:
80
82
  indexer = indexer.astype(int)
@@ -111,7 +113,7 @@ def _normalize_index( # noqa: PLR0911, PLR0912
111
113
  return indexer.data.compute()
112
114
  return indexer.data
113
115
  msg = f"Unknown indexer {indexer!r} of type {type(indexer)}"
114
- raise IndexError()
116
+ raise IndexError(msg)
115
117
 
116
118
 
117
119
  def _fix_slice_bounds(s: slice, length: int) -> slice:
@@ -10,7 +10,7 @@ from collections.abc import Callable, Mapping, MutableSet
10
10
  from functools import partial, reduce, singledispatch
11
11
  from itertools import repeat
12
12
  from operator import and_, or_, sub
13
- from typing import TYPE_CHECKING, Literal, TypeVar
13
+ from typing import TYPE_CHECKING, Literal, TypeVar, cast
14
14
  from warnings import warn
15
15
 
16
16
  import numpy as np
@@ -44,7 +44,7 @@ if TYPE_CHECKING:
44
44
 
45
45
  from anndata._types import Join_T
46
46
 
47
- from ..compat import XDataArray, XDataset
47
+ from ..compat import XDataArray
48
48
 
49
49
  T = TypeVar("T")
50
50
 
@@ -244,110 +244,89 @@ def as_cp_sparse(x) -> CupySparseMatrix:
244
244
  def unify_dtypes(
245
245
  dfs: Iterable[pd.DataFrame | Dataset2D],
246
246
  ) -> list[pd.DataFrame | Dataset2D]:
247
- """
248
- Attempts to unify datatypes from multiple dataframes.
247
+ """Attempt to unify datatypes from multiple dataframes.
249
248
 
250
249
  For catching cases where pandas would convert to object dtype.
251
250
  """
252
251
  dfs = list(dfs)
253
252
  # Get shared categorical columns
254
- df_dtypes = [dict(df.dtypes) for df in dfs]
253
+ df_dtypes = [
254
+ cast("pd.Series[ExtensionDtype]", df.dtypes).to_dict()
255
+ if isinstance(df, pd.DataFrame)
256
+ else df.dtypes
257
+ for df in dfs
258
+ ]
255
259
  columns = reduce(lambda x, y: x.union(y), [df.columns for df in dfs])
256
-
257
- dtypes: dict[str, list[np.dtype | ExtensionDtype]] = {col: [] for col in columns}
258
- for col in columns:
259
- for df in df_dtypes:
260
- dtypes[col].append(df.get(col, None))
261
-
260
+ dtypes = {
261
+ col: (
262
+ [df[col] for df in df_dtypes if col in df],
263
+ any(col not in df for df in df_dtypes),
264
+ )
265
+ for col in columns
266
+ }
262
267
  if len(dtypes) == 0:
263
268
  return dfs
264
- else:
265
- dfs = [df.copy(deep=False) for df in dfs]
266
269
 
267
270
  new_dtypes = {
268
271
  col: target_dtype
269
- for col, dtype in dtypes.items()
270
- if (target_dtype := try_unifying_dtype(dtype)) is not None
272
+ for col, (dts, has_missing) in dtypes.items()
273
+ if (target_dtype := try_unifying_dtype(dts, has_missing=has_missing))
274
+ is not None
271
275
  }
272
276
 
277
+ dfs = [df.copy(deep=False) for df in dfs]
273
278
  for df in dfs:
274
279
  for col, dtype in new_dtypes.items():
275
280
  if col in df:
276
281
  df[col] = df[col].astype(dtype)
277
-
278
282
  return dfs
279
283
 
280
284
 
281
- def try_unifying_dtype( # noqa PLR0911, PLR0912
282
- col: Sequence[np.dtype | ExtensionDtype],
283
- ) -> pd.core.dtypes.base.ExtensionDtype | None:
284
- """
285
- If dtypes can be unified, returns the dtype they would be unified to.
285
+ def try_unifying_dtype(
286
+ dtypes: Sequence[np.dtype | ExtensionDtype], *, has_missing: bool
287
+ ) -> ExtensionDtype | type[object] | None:
288
+ """Determine unified dtype if possible.
286
289
 
287
- Returns None if they can't be unified, or if we can expect pandas to unify them for
288
- us.
290
+ Returns None if they cant be unified, or if we can expect pandas to unify them for us.
289
291
 
290
292
  Params
291
293
  ------
292
- col:
293
- A list of dtypes to unify. Can be numpy/ pandas dtypes, or None (which denotes
294
- a missing value)
294
+ dtypes
295
+ A list of dtypes to unify. Can be numpy or pandas dtypes
296
+ has_missing
297
+ Whether the result needs to accommodate missing values
295
298
  """
296
- dtypes: set[pd.CategoricalDtype] = set()
297
299
  # Categorical
298
- if any(isinstance(dtype, pd.CategoricalDtype) for dtype in col):
299
- ordered = False
300
- for dtype in col:
301
- if isinstance(dtype, pd.CategoricalDtype):
302
- dtypes.add(dtype)
303
- ordered = ordered | dtype.ordered
304
- elif not pd.isnull(dtype):
305
- return None
306
- if len(dtypes) > 0:
307
- categories = reduce(
308
- lambda x, y: x.union(y),
309
- (dtype.categories for dtype in dtypes if not pd.isnull(dtype)),
310
- )
300
+ if any(isinstance(dtype, pd.CategoricalDtype) for dtype in dtypes):
301
+ if not all(isinstance(dtype, pd.CategoricalDtype) for dtype in dtypes):
302
+ return None
303
+ if TYPE_CHECKING:
304
+ dtypes = cast("Sequence[pd.CategoricalDtype]", dtypes)
305
+
306
+ all_categories = reduce(
307
+ lambda x, y: x.union(y), (dtype.categories for dtype in dtypes)
308
+ )
309
+ if not any(dtype.ordered for dtype in dtypes):
310
+ return pd.CategoricalDtype(natsorted(all_categories), ordered=False)
311
+
312
+ dtypes_with_categories = [
313
+ dtype for dtype in dtypes if len(dtype.categories) > 0
314
+ ]
315
+ if dtypes_with_categories and all(
316
+ len(dtype.categories) == len(all_categories)
317
+ and dtype.ordered
318
+ and np.all(all_categories == dtype.categories)
319
+ for dtype in dtypes_with_categories
320
+ ):
321
+ return dtypes_with_categories[0]
322
+
323
+ return object
311
324
 
312
- if not ordered:
313
- return pd.CategoricalDtype(natsorted(categories), ordered=False)
314
- else: # for xarray Datasets, see https://github.com/pydata/xarray/issues/10247
315
- categories_intersection = reduce(
316
- lambda x, y: x.intersection(y),
317
- (
318
- dtype.categories
319
- for dtype in dtypes
320
- if not pd.isnull(dtype) and len(dtype.categories) > 0
321
- ),
322
- )
323
- if len(categories_intersection) < len(categories):
324
- return object
325
- else:
326
- same_orders = all(
327
- dtype.ordered
328
- for dtype in dtypes
329
- if not pd.isnull(dtype) and len(dtype.categories) > 0
330
- )
331
- same_orders &= all(
332
- np.all(categories == dtype.categories)
333
- for dtype in dtypes
334
- if not pd.isnull(dtype) and len(dtype.categories) > 0
335
- )
336
- if same_orders:
337
- return next(
338
- dtype
339
- for dtype in dtypes
340
- if not pd.isnull(dtype) and len(dtype.categories) > 0
341
- )
342
- return object
343
325
  # Boolean
344
- elif all(pd.api.types.is_bool_dtype(dtype) or dtype is None for dtype in col):
345
- if any(dtype is None for dtype in col):
346
- return pd.BooleanDtype()
347
- else:
348
- return None
349
- else:
350
- return None
326
+ if all(pd.api.types.is_bool_dtype(dtype) for dtype in dtypes) and has_missing:
327
+ return pd.BooleanDtype()
328
+
329
+ return None
351
330
 
352
331
 
353
332
  def check_combinable_cols(cols: list[pd.Index], join: Join_T):
@@ -1207,15 +1186,13 @@ def make_dask_col_from_extension_dtype(
1207
1186
  A :class:`dask.Array`: representation of the column.
1208
1187
  """
1209
1188
  import dask.array as da
1210
- import xarray as xr
1211
- from xarray.core.indexing import LazilyIndexedArray
1212
1189
 
1213
1190
  from anndata._io.specs.lazy_methods import (
1214
1191
  compute_chunk_layout_for_axis_size,
1215
1192
  get_chunksize,
1216
1193
  maybe_open_h5,
1217
1194
  )
1218
- from anndata.compat import XDataArray
1195
+ from anndata.compat import xarray as xr
1219
1196
  from anndata.experimental import read_elem_lazy
1220
1197
 
1221
1198
  base_path_or_zarr_group = col.attrs.get("base_path_or_zarr_group")
@@ -1224,7 +1201,6 @@ def make_dask_col_from_extension_dtype(
1224
1201
  base_path_or_zarr_group is not None and elem_name is not None
1225
1202
  ): # lazy, backed by store
1226
1203
  dims = col.dims
1227
- coords = col.coords.copy()
1228
1204
  with maybe_open_h5(base_path_or_zarr_group, elem_name) as f:
1229
1205
  maybe_chunk_size = get_chunksize(read_elem_lazy(f))
1230
1206
  chunk_size = (
@@ -1238,17 +1214,14 @@ def make_dask_col_from_extension_dtype(
1238
1214
  # reopening is important to get around h5py's unserializable lock in processes
1239
1215
  with maybe_open_h5(base_path_or_zarr_group, elem_name) as f:
1240
1216
  v = read_elem_lazy(f)
1241
- variable = xr.Variable(data=LazilyIndexedArray(v), dims=dims)
1242
- data_array = XDataArray(
1243
- variable,
1244
- coords=coords,
1245
- dims=dims,
1217
+ variable = xr.Variable(
1218
+ data=xr.core.indexing.LazilyIndexedArray(v), dims=dims
1246
1219
  )
1247
1220
  idx = tuple(
1248
1221
  slice(start, stop)
1249
1222
  for start, stop in block_info[None]["array-location"]
1250
1223
  )
1251
- chunk = np.array(data_array.data[idx])
1224
+ chunk = np.array(variable.data[idx])
1252
1225
  return chunk
1253
1226
 
1254
1227
  if col.dtype == "category" or col.dtype == "string" or use_only_object_dtype: # noqa PLR1714
@@ -1268,7 +1241,7 @@ def make_dask_col_from_extension_dtype(
1268
1241
 
1269
1242
  def make_xarray_extension_dtypes_dask(
1270
1243
  annotations: Iterable[Dataset2D], *, use_only_object_dtype: bool = False
1271
- ) -> Generator[XDataset, None, None]:
1244
+ ) -> Generator[Dataset2D, None, None]:
1272
1245
  """
1273
1246
  Creates a generator of Dataset2D objects with dask arrays in place of :class:`pandas.api.extensions.ExtensionArray` dtype columns.
1274
1247
 
@@ -1710,6 +1683,9 @@ def concat( # noqa: PLR0912, PLR0913, PLR0915
1710
1683
  alt_annotations, use_only_object_dtype=True
1711
1684
  )
1712
1685
  )
1686
+ for a in annotations_with_only_dask:
1687
+ if a.true_index_dim != a.index_dim:
1688
+ a.index = a.true_index
1713
1689
  annotations_with_only_dask = [
1714
1690
  a.ds.rename({a.true_index_dim: "merge_index"})
1715
1691
  for a in annotations_with_only_dask
@@ -1717,7 +1693,6 @@ def concat( # noqa: PLR0912, PLR0913, PLR0915
1717
1693
  alt_annot = Dataset2D(
1718
1694
  xr.merge(annotations_with_only_dask, join=join, compat="override")
1719
1695
  )
1720
- alt_annot.true_index_dim = "merge_index"
1721
1696
 
1722
1697
  X = concat_Xs(adatas, reindexers, axis=axis, fill_value=fill_value)
1723
1698