lsst-pipe-base 29.2025.1600__tar.gz → 29.2025.1800__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (144) hide show
  1. {lsst_pipe_base-29.2025.1600/python/lsst_pipe_base.egg-info → lsst_pipe_base-29.2025.1800}/PKG-INFO +1 -1
  2. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/_status.py +11 -1
  3. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/all_dimensions_quantum_graph_builder.py +43 -259
  4. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/caching_limited_butler.py +12 -0
  5. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/connections.py +90 -5
  6. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/quantum_graph_builder.py +3 -2
  7. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/quantum_graph_skeleton.py +14 -0
  8. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/tests/mocks/_data_id_match.py +4 -0
  9. lsst_pipe_base-29.2025.1800/python/lsst/pipe/base/version.py +2 -0
  10. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800/python/lsst_pipe_base.egg-info}/PKG-INFO +1 -1
  11. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_adjust_all_quanta.py +16 -4
  12. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_caching_limited_butler.py +8 -2
  13. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_task.py +11 -0
  14. lsst_pipe_base-29.2025.1600/python/lsst/pipe/base/version.py +0 -2
  15. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/COPYRIGHT +0 -0
  16. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/LICENSE +0 -0
  17. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/MANIFEST.in +0 -0
  18. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/README.md +0 -0
  19. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/bsd_license.txt +0 -0
  20. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/doc/lsst.pipe.base/CHANGES.rst +0 -0
  21. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/doc/lsst.pipe.base/creating-a-pipeline.rst +0 -0
  22. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/doc/lsst.pipe.base/creating-a-pipelinetask.rst +0 -0
  23. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/doc/lsst.pipe.base/creating-a-task.rst +0 -0
  24. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/doc/lsst.pipe.base/index.rst +0 -0
  25. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/doc/lsst.pipe.base/task-framework-overview.rst +0 -0
  26. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/doc/lsst.pipe.base/task-retargeting-howto.rst +0 -0
  27. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/doc/lsst.pipe.base/testing-a-pipeline-task.rst +0 -0
  28. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/doc/lsst.pipe.base/testing-pipelines-with-mocks.rst +0 -0
  29. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/doc/lsst.pipe.base/working-with-pipeline-graphs.rst +0 -0
  30. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/gpl-v3.0.txt +0 -0
  31. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/pyproject.toml +0 -0
  32. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/__init__.py +0 -0
  33. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/__init__.py +0 -0
  34. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/__init__.py +0 -0
  35. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/_datasetQueryConstraints.py +0 -0
  36. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/_dataset_handle.py +0 -0
  37. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/_instrument.py +0 -0
  38. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/_observation_dimension_packer.py +0 -0
  39. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/_quantumContext.py +0 -0
  40. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/_task_metadata.py +0 -0
  41. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/automatic_connection_constants.py +0 -0
  42. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/cli/__init__.py +0 -0
  43. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/cli/_get_cli_subcommands.py +0 -0
  44. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/cli/cmd/__init__.py +0 -0
  45. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/cli/cmd/commands.py +0 -0
  46. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/cli/opt/__init__.py +0 -0
  47. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/cli/opt/arguments.py +0 -0
  48. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/cli/opt/options.py +0 -0
  49. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/config.py +0 -0
  50. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/configOverrides.py +0 -0
  51. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/connectionTypes.py +0 -0
  52. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/dot_tools.py +0 -0
  53. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/executionButlerBuilder.py +0 -0
  54. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/execution_reports.py +0 -0
  55. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/formatters/__init__.py +0 -0
  56. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/formatters/pexConfig.py +0 -0
  57. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/graph/__init__.py +0 -0
  58. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/graph/_implDetails.py +0 -0
  59. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/graph/_loadHelpers.py +0 -0
  60. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/graph/_versionDeserializers.py +0 -0
  61. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/graph/graph.py +0 -0
  62. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/graph/graphSummary.py +0 -0
  63. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/graph/quantumNode.py +0 -0
  64. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/mermaid_tools.py +0 -0
  65. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline.py +0 -0
  66. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipelineIR.py +0 -0
  67. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipelineTask.py +0 -0
  68. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/__init__.py +0 -0
  69. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/__main__.py +0 -0
  70. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/_dataset_types.py +0 -0
  71. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/_edges.py +0 -0
  72. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/_exceptions.py +0 -0
  73. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/_mapping_views.py +0 -0
  74. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/_nodes.py +0 -0
  75. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/_pipeline_graph.py +0 -0
  76. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/_task_subsets.py +0 -0
  77. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/_tasks.py +0 -0
  78. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/expressions.py +0 -0
  79. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/io.py +0 -0
  80. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/visualization/__init__.py +0 -0
  81. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/visualization/_dot.py +0 -0
  82. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/visualization/_formatting.py +0 -0
  83. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/visualization/_layout.py +0 -0
  84. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/visualization/_merge.py +0 -0
  85. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/visualization/_mermaid.py +0 -0
  86. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/visualization/_options.py +0 -0
  87. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/visualization/_printer.py +0 -0
  88. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/visualization/_show.py +0 -0
  89. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/pipeline_graph/visualization/_status_annotator.py +0 -0
  90. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/prerequisite_helpers.py +0 -0
  91. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/py.typed +0 -0
  92. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/quantum_provenance_graph.py +0 -0
  93. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/script/__init__.py +0 -0
  94. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/script/register_instrument.py +0 -0
  95. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/script/retrieve_artifacts_for_quanta.py +0 -0
  96. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/script/transfer_from_graph.py +0 -0
  97. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/script/zip_from_graph.py +0 -0
  98. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/struct.py +0 -0
  99. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/task.py +0 -0
  100. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/taskFactory.py +0 -0
  101. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/testUtils.py +0 -0
  102. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/tests/__init__.py +0 -0
  103. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/tests/mocks/__init__.py +0 -0
  104. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/tests/mocks/_pipeline_task.py +0 -0
  105. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/tests/mocks/_storage_class.py +0 -0
  106. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/tests/no_dimensions.py +0 -0
  107. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/tests/pipelineStepTester.py +0 -0
  108. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/tests/simpleQGraph.py +0 -0
  109. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/tests/util.py +0 -0
  110. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst/pipe/base/utils.py +0 -0
  111. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst_pipe_base.egg-info/SOURCES.txt +0 -0
  112. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst_pipe_base.egg-info/dependency_links.txt +0 -0
  113. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst_pipe_base.egg-info/entry_points.txt +0 -0
  114. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst_pipe_base.egg-info/requires.txt +0 -0
  115. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst_pipe_base.egg-info/top_level.txt +0 -0
  116. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/python/lsst_pipe_base.egg-info/zip-safe +0 -0
  117. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/setup.cfg +0 -0
  118. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_cliCmdRegisterInstrument.py +0 -0
  119. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_configOverrides.py +0 -0
  120. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_config_formatter.py +0 -0
  121. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_connections.py +0 -0
  122. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_dataid_match.py +0 -0
  123. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_dataset_handle.py +0 -0
  124. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_dot_tools.py +0 -0
  125. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_dynamic_connections.py +0 -0
  126. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_executionButler.py +0 -0
  127. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_execution_reports.py +0 -0
  128. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_graphBuilder.py +0 -0
  129. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_init_output_run.py +0 -0
  130. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_instrument.py +0 -0
  131. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_mermaid.py +0 -0
  132. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_pipeline.py +0 -0
  133. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_pipelineIR.py +0 -0
  134. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_pipelineLoadSubset.py +0 -0
  135. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_pipelineTask.py +0 -0
  136. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_pipeline_graph.py +0 -0
  137. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_pipeline_graph_expressions.py +0 -0
  138. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_quantumGraph.py +0 -0
  139. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_quantum_provenance_graph.py +0 -0
  140. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_quantum_success_caveats.py +0 -0
  141. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_struct.py +0 -0
  142. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_taskmetadata.py +0 -0
  143. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_testUtils.py +0 -0
  144. {lsst_pipe_base-29.2025.1600 → lsst_pipe_base-29.2025.1800}/tests/test_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lsst-pipe-base
3
- Version: 29.2025.1600
3
+ Version: 29.2025.1800
4
4
  Summary: Pipeline infrastructure for the Rubin Science Pipelines.
5
5
  Author-email: Rubin Observatory Data Management <dm-admin@lists.lsst.org>
6
6
  License: BSD 3-Clause License
@@ -30,7 +30,7 @@ from __future__ import annotations
30
30
  import abc
31
31
  import enum
32
32
  import logging
33
- from typing import TYPE_CHECKING, ClassVar, Protocol
33
+ from typing import TYPE_CHECKING, Any, ClassVar, Protocol
34
34
 
35
35
  from lsst.utils import introspection
36
36
 
@@ -245,6 +245,16 @@ class AlgorithmError(RepeatableQuantumError, abc.ABC):
245
245
  (for example: number of data points in a fit vs. degrees of freedom).
246
246
  """
247
247
 
248
+ def __new__(cls, *args: Any, **kwargs: Any) -> AlgorithmError:
249
+ # Have to override __new__ because builtin subclasses aren't checked
250
+ # for abstract methods; see https://github.com/python/cpython/issues/50246
251
+ if cls.__abstractmethods__:
252
+ raise TypeError(
253
+ f"Can't instantiate abstract class {cls.__name__} with "
254
+ f"abstract methods: {','.join(sorted(cls.__abstractmethods__))}"
255
+ )
256
+ return super().__new__(cls, *args, **kwargs)
257
+
248
258
  @property
249
259
  @abc.abstractmethod
250
260
  def metadata(self) -> NestedMetadataDict | None:
@@ -34,20 +34,18 @@ from __future__ import annotations
34
34
  __all__ = ("AllDimensionsQuantumGraphBuilder", "DatasetQueryConstraintVariant")
35
35
 
36
36
  import dataclasses
37
- import itertools
38
37
  from collections import defaultdict
39
38
  from collections.abc import Iterable, Mapping
40
- from typing import TYPE_CHECKING, Any, TypeAlias, final
39
+ from typing import TYPE_CHECKING, Any, final
41
40
 
42
41
  import astropy.table
43
42
 
44
43
  from lsst.daf.butler import (
45
44
  Butler,
46
45
  DataCoordinate,
47
- DataIdValue,
46
+ DimensionDataAttacher,
48
47
  DimensionGroup,
49
- DimensionRecord,
50
- DimensionUniverse,
48
+ DimensionRecordSet,
51
49
  MissingDatasetTypeError,
52
50
  )
53
51
  from lsst.utils.logging import LsstLogAdapter
@@ -61,9 +59,6 @@ if TYPE_CHECKING:
61
59
  from .pipeline_graph import DatasetTypeNode, PipelineGraph, TaskNode
62
60
 
63
61
 
64
- DimensionRecordsMap: TypeAlias = dict[str, dict[tuple[DataIdValue, ...], DimensionRecord]]
65
-
66
-
67
62
  @final
68
63
  class AllDimensionsQuantumGraphBuilder(QuantumGraphBuilder):
69
64
  """An implementation of `QuantumGraphBuilder` that uses a single large
@@ -145,9 +140,7 @@ class AllDimensionsQuantumGraphBuilder(QuantumGraphBuilder):
145
140
  return skeleton
146
141
  self._find_followup_datasets(tree, skeleton)
147
142
  dimension_records = self._fetch_most_dimension_records(tree)
148
- leftovers = self._attach_most_dimension_records(skeleton, dimension_records)
149
- self._fetch_leftover_dimension_records(leftovers, dimension_records)
150
- self._attach_leftover_dimension_records(skeleton, leftovers, dimension_records)
143
+ self._attach_dimension_records(skeleton, dimension_records)
151
144
  return skeleton
152
145
 
153
146
  def _query_for_data_ids(self, tree: _DimensionGroupTree) -> None:
@@ -197,7 +190,14 @@ class AllDimensionsQuantumGraphBuilder(QuantumGraphBuilder):
197
190
  query_cmd.append(f" collections = {list(self.input_collections)}")
198
191
  for dataset_type_name in constraint_datasets:
199
192
  query_cmd.append(f" query = query.join_dataset_search({dataset_type_name!r}, collections)")
200
- query = query.join_dataset_search(dataset_type_name, self.input_collections)
193
+ try:
194
+ query = query.join_dataset_search(dataset_type_name, self.input_collections)
195
+ except MissingDatasetTypeError:
196
+ raise QuantumGraphBuilderError(
197
+ f"No datasets for overall-input {dataset_type_name!r} found (the dataset type is "
198
+ "not even registered). This is probably a bug in either the pipeline definition or "
199
+ "the dataset constraints passed to the quantum graph builder."
200
+ ) from None
201
201
  query_cmd.append(
202
202
  f" query = query.where({dict(tree.subgraph.data_id.mapping)}, "
203
203
  f"{self.where!r}, bind={self.bind!r})"
@@ -453,7 +453,7 @@ class AllDimensionsQuantumGraphBuilder(QuantumGraphBuilder):
453
453
  del branch.data_ids
454
454
 
455
455
  @timeMethod
456
- def _fetch_most_dimension_records(self, tree: _DimensionGroupTree) -> DimensionRecordsMap:
456
+ def _fetch_most_dimension_records(self, tree: _DimensionGroupTree) -> list[DimensionRecordSet]:
457
457
  """Query for dimension records for all non-prerequisite data IDs (and
458
458
  possibly some prerequisite data IDs).
459
459
 
@@ -464,10 +464,8 @@ class AllDimensionsQuantumGraphBuilder(QuantumGraphBuilder):
464
464
 
465
465
  Returns
466
466
  -------
467
- dimension_records : `dict`
468
- Nested dictionary of dimension records, keyed first by dimension
469
- element name and then by the `DataCoordinate.required_values`
470
- tuple.
467
+ dimension_records : `list` [ `lsst.daf.butler.DimensionRecordSet` ]
468
+ List of sets of dimension records.
471
469
 
472
470
  Notes
473
471
  -----
@@ -476,7 +474,7 @@ class AllDimensionsQuantumGraphBuilder(QuantumGraphBuilder):
476
474
  can also be used to fetch dimension records for those data IDs.
477
475
  """
478
476
  self.log.verbose("Performing follow-up queries for dimension records.")
479
- result: dict[str, dict[tuple[DataIdValue, ...], DimensionRecord]] = {}
477
+ result: list[DimensionRecordSet] = []
480
478
  for branch in tree.branches_by_dimensions.values():
481
479
  if not branch.record_elements:
482
480
  continue
@@ -485,16 +483,17 @@ class AllDimensionsQuantumGraphBuilder(QuantumGraphBuilder):
485
483
  with self.butler.query() as butler_query:
486
484
  butler_query = butler_query.join_data_coordinates(branch.data_ids)
487
485
  for element in branch.record_elements:
488
- result[element] = {
489
- record.dataId.required_values: record
490
- for record in butler_query.dimension_records(element)
491
- }
486
+ result.append(
487
+ DimensionRecordSet(
488
+ element, butler_query.dimension_records(element), universe=self.universe
489
+ )
490
+ )
492
491
  return result
493
492
 
494
493
  @timeMethod
495
- def _attach_most_dimension_records(
496
- self, skeleton: QuantumGraphSkeleton, dimension_records: DimensionRecordsMap
497
- ) -> DataIdExpansionLeftovers:
494
+ def _attach_dimension_records(
495
+ self, skeleton: QuantumGraphSkeleton, dimension_records: Iterable[DimensionRecordSet]
496
+ ) -> None:
498
497
  """Attach dimension records to most data IDs in the in-progress graph,
499
498
  and return a data structure that records the rest.
500
499
 
@@ -502,108 +501,31 @@ class AllDimensionsQuantumGraphBuilder(QuantumGraphBuilder):
502
501
  ----------
503
502
  skeleton : `.quantum_graph_skeleton.QuantumGraphSkeleton`
504
503
  In-progress quantum graph to modify in place.
505
- dimension_records : `dict`
506
- Nested dictionary of dimension records, keyed first by dimension
507
- element name and then by the `DataCoordinate.required_values`
508
- tuple.
509
-
510
- Returns
511
- -------
512
- leftovers : `DataIdExpansionLeftovers`
513
- Struct recording data IDs in ``skeleton`` that were not expanded
514
- and the data IDs of the dimension records that need to be fetched
515
- in order to do so.
504
+ dimension_records : `~collections.abc.Iterable` [ \
505
+ `lsst.daf.butler.DimensionRecordSet` ]
506
+ Iterable of sets of dimension records.
516
507
  """
517
508
  # Group all nodes by data ID (and dimensions of data ID).
518
- data_ids_to_expand: defaultdict[DimensionGroup, defaultdict[DataCoordinate, NodeKeysForDataId]] = (
519
- defaultdict(NodeKeysForDataId.make_defaultdict)
509
+ data_ids_to_expand: defaultdict[DimensionGroup, defaultdict[DataCoordinate, list[Key]]] = defaultdict(
510
+ lambda: defaultdict(list)
520
511
  )
521
512
  data_id: DataCoordinate | None
522
513
  for node_key in skeleton:
523
514
  if data_id := skeleton[node_key].get("data_id"):
524
- if isinstance(node_key, PrerequisiteDatasetKey):
525
- data_ids_to_expand[data_id.dimensions][data_id].prerequisites.append(node_key)
526
- else:
527
- data_ids_to_expand[data_id.dimensions][data_id].others.append(node_key)
528
- # Expand data IDs and track the records that are missing (which can
529
- # only come from prerequisite data IDs).
530
- leftovers = DataIdExpansionLeftovers()
515
+ data_ids_to_expand[data_id.dimensions][data_id].append(node_key)
516
+ attacher = DimensionDataAttacher(
517
+ records=dimension_records,
518
+ dimensions=DimensionGroup.union(*data_ids_to_expand.keys(), universe=self.universe),
519
+ )
531
520
  for dimensions, data_ids in data_ids_to_expand.items():
532
- attacher = DimensionRecordAttacher(dimensions, dimension_records)
533
- skipped_data_ids: dict[DataCoordinate, list[PrerequisiteDatasetKey]] = {}
534
- for data_id, node_keys in data_ids.items():
535
- expanded_data_id: DataCoordinate | None
536
- if node_keys.others:
537
- # This data ID was used in at least one non-prerequisite
538
- # key, so we know we've got the records we need for it
539
- # already.
540
- expanded_data_id = attacher.apply(data_id)
541
- else:
542
- # This key only appeared in prerequisites, so we might not
543
- # have all the records we need.
544
- if (expanded_data_id := attacher.maybe_apply(data_id, leftovers)) is None:
545
- skipped_data_ids[data_id] = node_keys.prerequisites
546
- continue
547
- for node_key in itertools.chain(node_keys.others, node_keys.prerequisites):
548
- skeleton.set_data_id(node_key, expanded_data_id)
549
- if skipped_data_ids:
550
- leftovers.data_ids_to_expand[dimensions] = skipped_data_ids
551
- return leftovers
552
-
553
- @timeMethod
554
- def _fetch_leftover_dimension_records(
555
- self, leftovers: DataIdExpansionLeftovers, dimension_records: DimensionRecordsMap
556
- ) -> None:
557
- """Fetch additional dimension records whose data IDs were not included
558
- in the initial common data ID query.
559
-
560
- Parameters
561
- ----------
562
- leftovers : `DataIdExpansionLeftovers`
563
- Struct recording data IDs in ``skeleton`` that were not expanded.
564
- dimension_records : `dict`
565
- Nested dictionary of dimension records, keyed first by dimension
566
- element name and then by the `DataCoordinate.required_values`
567
- tuple. Will be updated in place.
568
- """
569
- for element, data_id_values_set in leftovers.missing_record_data_ids.items():
570
- dimensions = self.butler.dimensions[element].minimal_group
571
- data_ids = [DataCoordinate.from_required_values(dimensions, v) for v in data_id_values_set]
572
- with self.butler.query() as q:
573
- new_records = {
574
- r.dataId.required_values: r
575
- for r in q.join_data_coordinates(data_ids).dimension_records(element)
576
- }
577
- dimension_records.setdefault(element, {}).update(new_records)
578
-
579
- @timeMethod
580
- def _attach_leftover_dimension_records(
581
- self,
582
- skeleton: QuantumGraphSkeleton,
583
- leftovers: DataIdExpansionLeftovers,
584
- dimension_records: DimensionRecordsMap,
585
- ) -> None:
586
- """Attach dimension records to any data IDs in the in-progress graph
587
- that were not handled in the first pass.
588
-
589
- Parameters
590
- ----------
591
- skeleton : `.quantum_graph_skeleton.QuantumGraphSkeleton`
592
- In-progress quantum graph to modify in place.
593
- leftovers : `DataIdExpansionLeftovers`
594
- Struct recording data IDs in ``skeleton`` that were not expanded
595
- and the data IDs of the dimension records that need to be fetched
596
- in order to do so.
597
- dimension_records : `dict`
598
- Nested dictionary of dimension records, keyed first by dimension
599
- element name and then by the `DataCoordinate.required_values`
600
- tuple.
601
- """
602
- for dimensions, data_ids in leftovers.data_ids_to_expand.items():
603
- attacher = DimensionRecordAttacher(dimensions, dimension_records)
604
- for data_id, prerequisite_keys in data_ids.items():
605
- expanded_data_id = attacher.apply(data_id)
606
- for node_key in prerequisite_keys:
521
+ with self.butler.query() as query:
522
+ # Butler query will be used as-needed to get dimension records
523
+ # (from prerequisites) we didn't fetch in advance. These are
524
+ # cached in the attacher so we don't look them up multiple
525
+ # times.
526
+ expanded_data_ids = attacher.attach(dimensions, data_ids.keys(), query=query)
527
+ for expanded_data_id, node_keys in zip(expanded_data_ids, data_ids.values()):
528
+ for node_key in node_keys:
607
529
  skeleton.set_data_id(node_key, expanded_data_id)
608
530
 
609
531
 
@@ -970,7 +892,7 @@ class _DimensionGroupTree:
970
892
  dimensions: _DimensionGroupBranch(tasks, dataset_types)
971
893
  for dimensions, (tasks, dataset_types) in self.subgraph.group_by_dimensions().items()
972
894
  }
973
- self.all_dimensions = _union_dimensions(self.branches_by_dimensions.keys(), universe)
895
+ self.all_dimensions = DimensionGroup.union(*self.branches_by_dimensions.keys(), universe=universe)
974
896
  _DimensionGroupBranch.populate_record_elements(self.all_dimensions, self.branches_by_dimensions)
975
897
  _DimensionGroupBranch.populate_edges(self.subgraph, self.branches_by_dimensions)
976
898
  self.trunk_branches = _DimensionGroupBranch.populate_branches(
@@ -997,141 +919,3 @@ class _DimensionGroupTree:
997
919
  for branch_dimensions, branch in self.trunk_branches.items():
998
920
  log.debug("Projecting query data IDs to %s.", branch_dimensions)
999
921
  branch.project_data_ids(log)
1000
-
1001
-
1002
- class DimensionRecordAttacher:
1003
- """A helper class that expands data IDs by attaching dimension records.
1004
-
1005
- Parameters
1006
- ----------
1007
- dimensions : `DimensionGroup`
1008
- Dimensions of the data IDs this instance will operate on.
1009
- dimension_records : `dict`
1010
- Nested dictionary of dimension records, keyed first by dimension
1011
- element name and then by the `DataCoordinate.required_values`
1012
- tuple.
1013
- """
1014
-
1015
- def __init__(self, dimensions: DimensionGroup, dimension_records: DimensionRecordsMap):
1016
- self.dimensions = dimensions
1017
- self.indexers = {element: self._make_indexer(element) for element in dimensions.elements}
1018
- self.dimension_records = dimension_records
1019
-
1020
- def _make_indexer(self, element: str) -> list[int]:
1021
- """Return a list of indexes into data ID full-values that extract the
1022
- `~DataCoordinate.required_values` for the ``element``
1023
- `DimensionElement.minimal_group` from the `~DataCoordinate.full_values`
1024
- for `dimensions`.
1025
- """
1026
- return [
1027
- self.dimensions._data_coordinate_indices[d]
1028
- for d in self.dimensions.universe[element].minimal_group.required
1029
- ]
1030
-
1031
- def apply(self, data_id: DataCoordinate) -> DataCoordinate:
1032
- """Attach dimension records to the given data ID.
1033
-
1034
- Parameters
1035
- ----------
1036
- data_id : `DataCoordinate`
1037
- Input data ID.
1038
-
1039
- Returns
1040
- -------
1041
- expanded_data_id : `DataCoordinate`
1042
- Output data ID.
1043
- """
1044
- v = data_id.full_values
1045
- records_for_data_id = {}
1046
- for element, indexer in self.indexers.items():
1047
- records_for_element = self.dimension_records[element]
1048
- records_for_data_id[element] = records_for_element[tuple([v[i] for i in indexer])]
1049
- return data_id.expanded(records_for_data_id)
1050
-
1051
- def maybe_apply(
1052
- self, data_id: DataCoordinate, leftovers: DataIdExpansionLeftovers
1053
- ) -> DataCoordinate | None:
1054
- """Attempt to attach dimension records to the given data ID, and record
1055
- the data IDs of missing dimension records on failure.
1056
-
1057
- Parameters
1058
- ----------
1059
- data_id : `DataCoordinate`
1060
- Input data ID.
1061
- leftovers : `DataIdExpansionLeftovers`
1062
- Struct recording data IDs that were not expanded and the data IDs
1063
- of the dimension records that need to be fetched in order to do so.
1064
- The latter will be updated in place; callers are responsible for
1065
- updating the former when `None` is returned (since this method
1066
- doesn't have enough information to do so on its own).
1067
-
1068
- Returns
1069
- -------
1070
- expanded_data_id : `DataCoordinate` or `None`
1071
- Output data ID, or `None` if one or more records were missing.
1072
- """
1073
- v = data_id.full_values
1074
- records_for_data_id = {}
1075
- failed = False
1076
- # Note that we need to process all elements even when we know we're
1077
- # going to fail, since we need to fully populate `leftovers` with what
1078
- # we still need to query for.
1079
- for element, indexer in self.indexers.items():
1080
- k = tuple([v[i] for i in indexer])
1081
- if (records_for_element := self.dimension_records.get(element)) is None:
1082
- leftovers.missing_record_data_ids[element].add(k)
1083
- failed = True
1084
- elif (r := records_for_element.get(k)) is None:
1085
- leftovers.missing_record_data_ids[element].add(k)
1086
- failed = True
1087
- else:
1088
- records_for_data_id[element] = r
1089
- if not failed:
1090
- return data_id.expanded(records_for_data_id)
1091
- else:
1092
- return None
1093
-
1094
-
1095
- @dataclasses.dataclass
1096
- class NodeKeysForDataId:
1097
- """Struct that holds the skeleton-graph node keys for a single data ID.
1098
-
1099
- This is used when expanding (i.e. attaching dimension records to) data IDs,
1100
- where we group node keys by data ID in order to avoid repeatedly expanding
1101
- the same data ID and cut down on the number of equivalent data ID instances
1102
- alive in memory. We separate prerequisite nodes from all other nodes
1103
- because they're the only ones whose data IDs are not by construction a
1104
- subset of the data IDs in the big initial query.
1105
- """
1106
-
1107
- prerequisites: list[PrerequisiteDatasetKey] = dataclasses.field(default_factory=list)
1108
- """Node keys that correspond to prerequisite input dataset types."""
1109
-
1110
- others: list[Key] = dataclasses.field(default_factory=list)
1111
- """All other node keys."""
1112
-
1113
- @classmethod
1114
- def make_defaultdict(cls) -> defaultdict[DataCoordinate, NodeKeysForDataId]:
1115
- return defaultdict(cls)
1116
-
1117
-
1118
- DataIdExpansionToDoMap: TypeAlias = defaultdict[
1119
- DimensionGroup, defaultdict[DataCoordinate, NodeKeysForDataId]
1120
- ]
1121
-
1122
-
1123
- @dataclasses.dataclass
1124
- class DataIdExpansionLeftovers:
1125
- data_ids_to_expand: dict[DimensionGroup, dict[DataCoordinate, list[PrerequisiteDatasetKey]]] = (
1126
- dataclasses.field(default_factory=dict)
1127
- )
1128
- missing_record_data_ids: defaultdict[str, set[tuple[DataIdValue, ...]]] = dataclasses.field(
1129
- default_factory=lambda: defaultdict(set)
1130
- )
1131
-
1132
-
1133
- def _union_dimensions(groups: Iterable[DimensionGroup], universe: DimensionUniverse) -> DimensionGroup:
1134
- dimension_names: set[str] = set()
1135
- for dimensions_for_group in groups:
1136
- dimension_names.update(dimensions_for_group.names)
1137
- return universe.conform(dimension_names)
@@ -34,6 +34,7 @@ from collections.abc import Iterable, Set
34
34
  from typing import Any
35
35
 
36
36
  from lsst.daf.butler import (
37
+ ButlerMetrics,
37
38
  DatasetId,
38
39
  DatasetProvenance,
39
40
  DatasetRef,
@@ -90,6 +91,17 @@ class CachingLimitedButler(LimitedButler):
90
91
  self._cache: dict[str, tuple[DatasetId, InMemoryDatasetHandle]] = {}
91
92
  self._no_copy_on_cache = no_copy_on_cache
92
93
 
94
+ @property
95
+ def _metrics(self) -> ButlerMetrics:
96
+ # Need to always forward from the wrapped metrics object.
97
+ return self._wrapped._metrics
98
+
99
+ @_metrics.setter
100
+ def _metrics(self, metrics: ButlerMetrics) -> None:
101
+ # Allow record_metrics() context manager to override the wrapped
102
+ # butler.
103
+ self._wrapped._metrics = metrics
104
+
93
105
  def get(
94
106
  self,
95
107
  ref: DatasetRef,
@@ -52,7 +52,15 @@ from dataclasses import dataclass
52
52
  from types import MappingProxyType, SimpleNamespace
53
53
  from typing import TYPE_CHECKING, Any
54
54
 
55
- from lsst.daf.butler import DataCoordinate, DatasetRef, DatasetType, NamedKeyDict, NamedKeyMapping, Quantum
55
+ from lsst.daf.butler import (
56
+ Butler,
57
+ DataCoordinate,
58
+ DatasetRef,
59
+ DatasetType,
60
+ NamedKeyDict,
61
+ NamedKeyMapping,
62
+ Quantum,
63
+ )
56
64
 
57
65
  from ._status import NoWorkFound
58
66
  from .connectionTypes import BaseConnection, BaseInput, Output, PrerequisiteInput
@@ -1165,13 +1173,19 @@ class QuantaAdjuster:
1165
1173
  Pipeline graph the quantum graph is being built from.
1166
1174
  skeleton : `quantum_graph_skeleton.QuantumGraphSkeleton`
1167
1175
  Under-construction quantum graph that will be modified in place.
1176
+ butler : `lsst.daf.butler.Butler`
1177
+ Read-only instance with its default collection search path set to the
1178
+ input collections passed to the quantum-graph builder.
1168
1179
  """
1169
1180
 
1170
- def __init__(self, task_label: str, pipeline_graph: PipelineGraph, skeleton: QuantumGraphSkeleton):
1181
+ def __init__(
1182
+ self, task_label: str, pipeline_graph: PipelineGraph, skeleton: QuantumGraphSkeleton, butler: Butler
1183
+ ):
1171
1184
  self._task_node = pipeline_graph.tasks[task_label]
1172
1185
  self._pipeline_graph = pipeline_graph
1173
1186
  self._skeleton = skeleton
1174
1187
  self._n_removed = 0
1188
+ self._butler = butler
1175
1189
 
1176
1190
  @property
1177
1191
  def task_label(self) -> str:
@@ -1183,8 +1197,15 @@ class QuantaAdjuster:
1183
1197
  """The node for this task in the pipeline graph."""
1184
1198
  return self._task_node
1185
1199
 
1200
+ @property
1201
+ def butler(self) -> Butler:
1202
+ """Read-only instance with its default collection search path set to
1203
+ the input collections passed to the quantum-graph builder.
1204
+ """
1205
+ return self._butler
1206
+
1186
1207
  def iter_data_ids(self) -> Iterator[DataCoordinate]:
1187
- """Iterate over the data IDs of all quanta for this task."
1208
+ """Iterate over the data IDs of all quanta for this task.
1188
1209
 
1189
1210
  Returns
1190
1211
  -------
@@ -1218,7 +1239,7 @@ class QuantaAdjuster:
1218
1239
 
1219
1240
  Parameters
1220
1241
  ----------
1221
- data_id : `~lsst.daf.butler.DataCoordinate`
1242
+ quantum_data_id : `~lsst.daf.butler.DataCoordinate`
1222
1243
  Data ID of the quantum to get the inputs of.
1223
1244
 
1224
1245
  Returns
@@ -1250,6 +1271,41 @@ class QuantaAdjuster:
1250
1271
  for edge in self._task_node.iter_all_inputs()
1251
1272
  }
1252
1273
 
1274
+ def get_outputs(self, quantum_data_id: DataCoordinate) -> dict[str, list[DataCoordinate]]:
1275
+ """Return the data IDs of all regular outputs to a quantum.
1276
+
1277
+ Parameters
1278
+ ----------
1279
+ quantum_data_id : `~lsst.daf.butler.DataCoordinate`
1280
+ Data ID of the quantum to get the outputs of.
1281
+
1282
+ Returns
1283
+ -------
1284
+ inputs : `dict` [ `str`, `list` [ `~lsst.daf.butler.DataCoordinate` ] ]
1285
+ Data IDs of inputs, keyed by the connection name (the internal task
1286
+ name, not the dataset type name). This only contains regular
1287
+ outputs, not init-outputs or log or metadata outputs.
1288
+
1289
+ Notes
1290
+ -----
1291
+ If two connections have the same dataset type, the current
1292
+ implementation assumes the set of datasets is the same for the two
1293
+ connections. This limitation may be removed in the future.
1294
+ """
1295
+ from .quantum_graph_skeleton import QuantumKey
1296
+
1297
+ by_dataset_type_name: defaultdict[str, list[DataCoordinate]] = defaultdict(list)
1298
+ quantum_key = QuantumKey(self._task_node.label, quantum_data_id.required_values)
1299
+ for dataset_key in self._skeleton.iter_outputs_of(quantum_key):
1300
+ dataset_type_node = self._pipeline_graph.dataset_types[dataset_key.parent_dataset_type_name]
1301
+ by_dataset_type_name[dataset_key.parent_dataset_type_name].append(
1302
+ DataCoordinate.from_required_values(dataset_type_node.dimensions, dataset_key.data_id_values)
1303
+ )
1304
+ return {
1305
+ edge.connection_name: by_dataset_type_name[edge.parent_dataset_type_name]
1306
+ for edge in self._task_node.outputs.values()
1307
+ }
1308
+
1253
1309
  def add_input(
1254
1310
  self, quantum_data_id: DataCoordinate, connection_name: str, dataset_data_id: DataCoordinate
1255
1311
  ) -> None:
@@ -1284,13 +1340,42 @@ class QuantaAdjuster:
1284
1340
  )
1285
1341
  self._skeleton.add_input_edge(quantum_key, dataset_key)
1286
1342
 
1343
+ def move_output(
1344
+ self, quantum_data_id: DataCoordinate, connection_name: str, dataset_data_id: DataCoordinate
1345
+ ) -> None:
1346
+ """Remove an output of one quantum and make it a new output of another.
1347
+
1348
+ Parameters
1349
+ ----------
1350
+ quantum_data_id : `~lsst.daf.butler.DataCoordinate`
1351
+ Data ID of the quantum to move the output to.
1352
+ connection_name : `str`
1353
+ Name of the connection (the task-internal name, not the butler
1354
+ dataset type name).
1355
+ dataset_data_id : `~lsst.daf.butler.DataCoordinate`
1356
+ Data ID of the output dataset. Must already exist in the graph
1357
+ as an output of a different quantum of this task.
1358
+ """
1359
+ from .quantum_graph_skeleton import DatasetKey, QuantumKey
1360
+
1361
+ quantum_key = QuantumKey(self._task_node.label, quantum_data_id.required_values)
1362
+ write_edge = self._task_node.outputs[connection_name]
1363
+ dataset_key = DatasetKey(write_edge.parent_dataset_type_name, dataset_data_id.required_values)
1364
+ if dataset_key not in self._skeleton:
1365
+ raise LookupError(
1366
+ f"Dataset {write_edge.parent_dataset_type_name}@{dataset_data_id} is "
1367
+ "not already in the graph."
1368
+ )
1369
+ self._skeleton.remove_output_edge(dataset_key)
1370
+ self._skeleton.add_output_edge(quantum_key, dataset_key)
1371
+
1287
1372
  def expand_quantum_data_id(self, data_id: DataCoordinate) -> DataCoordinate:
1288
1373
  """Expand a quantum data ID to include implied values and dimension
1289
1374
  records.
1290
1375
 
1291
1376
  Parameters
1292
1377
  ----------
1293
- quantum_data_id : `~lsst.daf.butler.DataCoordinate`
1378
+ data_id : `~lsst.daf.butler.DataCoordinate`
1294
1379
  A data ID of a quantum already in the graph.
1295
1380
 
1296
1381
  Returns
@@ -170,7 +170,6 @@ class QuantumGraphBuilder(ABC):
170
170
  self.log = getLogger(__name__)
171
171
  self.metadata = TaskMetadata()
172
172
  self._pipeline_graph = pipeline_graph
173
- self.butler = butler
174
173
  if input_collections is None:
175
174
  input_collections = butler.collections.defaults
176
175
  if not input_collections:
@@ -180,6 +179,7 @@ class QuantumGraphBuilder(ABC):
180
179
  output_run = butler.run
181
180
  if not output_run:
182
181
  raise ValueError("No output RUN collection provided.")
182
+ self.butler = butler.clone(collections=input_collections)
183
183
  self.output_run = output_run
184
184
  self.skip_existing_in = skip_existing_in
185
185
  self.empty_data_id = DataCoordinate.make_empty(butler.dimensions)
@@ -494,7 +494,7 @@ class QuantumGraphBuilder(ABC):
494
494
  # Give the task a chance to adjust all quanta together. This
495
495
  # operates directly on the skeleton (via a the 'adjuster', which
496
496
  # is just an interface adapter).
497
- adjuster = QuantaAdjuster(task_node.label, self._pipeline_graph, skeleton)
497
+ adjuster = QuantaAdjuster(task_node.label, self._pipeline_graph, skeleton, self.butler)
498
498
  task_node.get_connections().adjust_all_quanta(adjuster)
499
499
  # Loop over all quanta again, remembering those we get rid of in other
500
500
  # ways.
@@ -507,6 +507,7 @@ class QuantumGraphBuilder(ABC):
507
507
  # raise if one of the check conditions is not met, which is the
508
508
  # intended behavior.
509
509
  helper = AdjustQuantumHelper(inputs=adjusted_inputs, outputs=adjusted_outputs)
510
+ quantum_data_id = skeleton[quantum_key]["data_id"]
510
511
  try:
511
512
  helper.adjust_in_place(task_node.get_connections(), task_node.label, quantum_data_id)
512
513
  except NoWorkFound as err:
@@ -517,6 +517,20 @@ class QuantumGraphSkeleton:
517
517
  assert dataset_key in self._xgraph
518
518
  self._xgraph.add_edge(task_key, dataset_key)
519
519
 
520
+ def remove_output_edge(self, dataset_key: DatasetKey) -> None:
521
+ """Remove the edge connecting a dataset to the quantum that produces
522
+ it.
523
+
524
+ Parameters
525
+ ----------
526
+ dataset_key : `DatasetKey`
527
+ Identifier for the dataset node. Must identify a node already
528
+ present in the graph.
529
+ """
530
+ (task_key,) = self._xgraph.predecessors(dataset_key)
531
+ assert dataset_key in self._xgraph
532
+ self._xgraph.remove_edge(task_key, dataset_key)
533
+
520
534
  def remove_orphan_datasets(self) -> None:
521
535
  """Remove any dataset nodes that do not have any edges."""
522
536
  for orphan in list(networkx.isolates(self._xgraph)):
@@ -135,6 +135,10 @@ class _DataIdMatchTreeVisitor(TreeVisitor):
135
135
  # docstring is inherited from base class
136
136
  raise NotImplementedError()
137
137
 
138
+ def visitGlobNode(self, expression: Any, pattern: Any, node: Node) -> Any:
139
+ # docstring is inherited from base class
140
+ raise NotImplementedError()
141
+
138
142
 
139
143
  class DataIdMatch:
140
144
  """Class that can match DataId against the user-defined string expression.
@@ -0,0 +1,2 @@
1
+ __all__ = ["__version__"]
2
+ __version__ = "29.2025.1800"