lsst-pipe-base 30.0.1rc1__py3-none-any.whl → 30.2025.5200__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. lsst/pipe/base/_instrument.py +20 -31
  2. lsst/pipe/base/_quantumContext.py +3 -3
  3. lsst/pipe/base/_status.py +10 -43
  4. lsst/pipe/base/_task_metadata.py +2 -2
  5. lsst/pipe/base/all_dimensions_quantum_graph_builder.py +3 -8
  6. lsst/pipe/base/automatic_connection_constants.py +1 -20
  7. lsst/pipe/base/cli/cmd/__init__.py +2 -18
  8. lsst/pipe/base/cli/cmd/commands.py +4 -149
  9. lsst/pipe/base/connectionTypes.py +160 -72
  10. lsst/pipe/base/connections.py +9 -6
  11. lsst/pipe/base/execution_reports.py +5 -0
  12. lsst/pipe/base/graph/graph.py +10 -11
  13. lsst/pipe/base/graph/quantumNode.py +4 -4
  14. lsst/pipe/base/graph_walker.py +10 -8
  15. lsst/pipe/base/log_capture.py +5 -9
  16. lsst/pipe/base/mp_graph_executor.py +15 -51
  17. lsst/pipe/base/pipeline.py +6 -5
  18. lsst/pipe/base/pipelineIR.py +8 -2
  19. lsst/pipe/base/pipelineTask.py +7 -5
  20. lsst/pipe/base/pipeline_graph/_dataset_types.py +2 -2
  21. lsst/pipe/base/pipeline_graph/_edges.py +22 -32
  22. lsst/pipe/base/pipeline_graph/_mapping_views.py +7 -4
  23. lsst/pipe/base/pipeline_graph/_pipeline_graph.py +7 -14
  24. lsst/pipe/base/pipeline_graph/expressions.py +2 -2
  25. lsst/pipe/base/pipeline_graph/io.py +10 -7
  26. lsst/pipe/base/pipeline_graph/visualization/_dot.py +12 -13
  27. lsst/pipe/base/pipeline_graph/visualization/_layout.py +18 -16
  28. lsst/pipe/base/pipeline_graph/visualization/_merge.py +7 -4
  29. lsst/pipe/base/pipeline_graph/visualization/_printer.py +10 -10
  30. lsst/pipe/base/pipeline_graph/visualization/_status_annotator.py +0 -7
  31. lsst/pipe/base/prerequisite_helpers.py +1 -2
  32. lsst/pipe/base/quantum_graph/_common.py +20 -19
  33. lsst/pipe/base/quantum_graph/_multiblock.py +31 -37
  34. lsst/pipe/base/quantum_graph/_predicted.py +13 -111
  35. lsst/pipe/base/quantum_graph/_provenance.py +45 -1136
  36. lsst/pipe/base/quantum_graph/aggregator/__init__.py +1 -0
  37. lsst/pipe/base/quantum_graph/aggregator/_communicators.py +289 -204
  38. lsst/pipe/base/quantum_graph/aggregator/_config.py +9 -87
  39. lsst/pipe/base/quantum_graph/aggregator/_ingester.py +12 -13
  40. lsst/pipe/base/quantum_graph/aggregator/_scanner.py +235 -49
  41. lsst/pipe/base/quantum_graph/aggregator/_structs.py +116 -6
  42. lsst/pipe/base/quantum_graph/aggregator/_supervisor.py +39 -29
  43. lsst/pipe/base/quantum_graph/aggregator/_writer.py +351 -34
  44. lsst/pipe/base/quantum_graph/visualization.py +1 -5
  45. lsst/pipe/base/quantum_graph_builder.py +8 -21
  46. lsst/pipe/base/quantum_graph_executor.py +13 -116
  47. lsst/pipe/base/quantum_graph_skeleton.py +29 -31
  48. lsst/pipe/base/quantum_provenance_graph.py +12 -29
  49. lsst/pipe/base/separable_pipeline_executor.py +3 -19
  50. lsst/pipe/base/single_quantum_executor.py +42 -67
  51. lsst/pipe/base/struct.py +0 -4
  52. lsst/pipe/base/testUtils.py +3 -3
  53. lsst/pipe/base/tests/mocks/_storage_class.py +1 -2
  54. lsst/pipe/base/version.py +1 -1
  55. {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5200.dist-info}/METADATA +3 -3
  56. lsst_pipe_base-30.2025.5200.dist-info/RECORD +125 -0
  57. {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5200.dist-info}/WHEEL +1 -1
  58. lsst/pipe/base/log_on_close.py +0 -76
  59. lsst/pipe/base/quantum_graph/aggregator/_workers.py +0 -303
  60. lsst/pipe/base/quantum_graph/formatter.py +0 -171
  61. lsst/pipe/base/quantum_graph/ingest_graph.py +0 -413
  62. lsst_pipe_base-30.0.1rc1.dist-info/RECORD +0 -129
  63. {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5200.dist-info}/entry_points.txt +0 -0
  64. {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5200.dist-info}/licenses/COPYRIGHT +0 -0
  65. {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5200.dist-info}/licenses/LICENSE +0 -0
  66. {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5200.dist-info}/licenses/bsd_license.txt +0 -0
  67. {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5200.dist-info}/licenses/gpl-v3.0.txt +0 -0
  68. {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5200.dist-info}/top_level.txt +0 -0
  69. {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5200.dist-info}/zip-safe +0 -0
@@ -41,36 +41,35 @@ from lsst.utils.introspection import find_outside_stacklevel
41
41
 
42
42
  @dataclasses.dataclass(frozen=True)
43
43
  class BaseConnection:
44
- """Base class used for declaring `PipelineTask` connections."""
44
+ """Base class used for declaring `PipelineTask` connections.
45
+
46
+ Attributes
47
+ ----------
48
+ name : `str`
49
+ The name used to identify the dataset type.
50
+ storageClass : `str`
51
+ The storage class used when (un)/persisting the dataset type.
52
+ multiple : `bool`
53
+ Indicates if this connection should expect to contain multiple objects
54
+ of the given dataset type. Tasks with more than one connection with
55
+ ``multiple=True`` with the same dimensions may want to implement
56
+ `.PipelineTaskConnections.adjustQuantum` to ensure those datasets are
57
+ consistent (i.e. zip-iterable) in `PipelineTask.runQuantum()` and
58
+ notify the execution system as early as possible of outputs that will
59
+ not be produced because the corresponding input is missing.
60
+ deprecated : `str`, optional
61
+ A description of why this connection is deprecated, including the
62
+ version after which it may be removed.
63
+
64
+ If not `None`, the string is appended to the docstring for this
65
+ connection and the corresponding config Field.
66
+ """
45
67
 
46
68
  name: str
47
- """The name used to identify the dataset type."""
48
-
49
69
  storageClass: str
50
- """The storage class used when (un)/persisting the dataset type."""
51
-
52
70
  doc: str = ""
53
- """Documentation for this connection."""
54
-
55
71
  multiple: bool = False
56
- """Indicates if this connection should expect to contain multiple objects
57
- of the given dataset type.
58
-
59
- Tasks with more than one connection with ``multiple=True`` with the same
60
- dimensions may want to implement `.PipelineTaskConnections.adjustQuantum`
61
- to ensure those datasets are consistent (i.e. zip-iterable) in
62
- `PipelineTask.runQuantum()` and notify the execution system as early as
63
- possible of outputs that will not be produced because the corresponding
64
- input is missing.
65
- """
66
-
67
72
  deprecated: str | None = dataclasses.field(default=None, kw_only=True)
68
- """A description of why this connection is deprecated, including the
69
- version after which it may be removed.
70
-
71
- If not `None`, the string is appended to the docstring for this
72
- connection and the corresponding config Field.
73
- """
74
73
 
75
74
  _connection_type_set: ClassVar[str]
76
75
  _deprecation_context: str = ""
@@ -111,15 +110,32 @@ class BaseConnection:
111
110
  class DimensionedConnection(BaseConnection):
112
111
  """Class used for declaring PipelineTask connections that includes
113
112
  dimensions.
113
+
114
+ Attributes
115
+ ----------
116
+ name : `str`
117
+ The name used to identify the dataset type.
118
+ storageClass : `str`
119
+ The storage class used when (un)/persisting the dataset type.
120
+ multiple : `bool`
121
+ Indicates if this connection should expect to contain multiple objects
122
+ of the given dataset type. Tasks with more than one connection with
123
+ ``multiple=True`` with the same dimensions may want to implement
124
+ `.PipelineTaskConnections.adjustQuantum` to ensure those datasets are
125
+ consistent (i.e. zip-iterable) in `PipelineTask.runQuantum` and notify
126
+ the execution system as early as possible of outputs that will not be
127
+ produced because the corresponding input is missing.
128
+ dimensions : iterable of `str`
129
+ The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
130
+ to identify the dataset type identified by the specified name.
131
+ isCalibration : `bool`, optional
132
+ `True` if this dataset type may be included in CALIBRATION-type
133
+ collections to associate it with a validity range, `False` (default)
134
+ otherwise.
114
135
  """
115
136
 
116
137
  dimensions: Iterable[str] = ()
117
- """The keys of the butler data coordinates for this dataset type."""
118
-
119
138
  isCalibration: bool = False
120
- """ `True` if this dataset type may be included in
121
- `~lsst.daf.butler.CollectionType.CALIBRATION` collections to associate it
122
- with a validity range, `False` (default) otherwise."""
123
139
 
124
140
  def __post_init__(self):
125
141
  super().__post_init__()
@@ -135,6 +151,39 @@ class DimensionedConnection(BaseConnection):
135
151
  class BaseInput(DimensionedConnection):
136
152
  """Class used for declaring PipelineTask input connections.
137
153
 
154
+ Attributes
155
+ ----------
156
+ name : `str`
157
+ The default name used to identify the dataset type.
158
+ storageClass : `str`
159
+ The storage class used when (un)/persisting the dataset type.
160
+ multiple : `bool`
161
+ Indicates if this connection should expect to contain multiple objects
162
+ of the given dataset type. Tasks with more than one connection with
163
+ ``multiple=True`` with the same dimensions may want to implement
164
+ `.PipelineTaskConnections.adjustQuantum` to ensure those datasets are
165
+ consistent (i.e. zip-iterable) in `PipelineTask.runQuantum` and notify
166
+ the execution system as early as possible of outputs that will not be
167
+ produced because the corresponding input is missing.
168
+ dimensions : iterable of `str`
169
+ The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
170
+ to identify the dataset type identified by the specified name.
171
+ deferLoad : `bool`
172
+ Indicates that this dataset type will be loaded as a
173
+ `lsst.daf.butler.DeferredDatasetHandle`. PipelineTasks can use this
174
+ object to load the object at a later time.
175
+ minimum : `bool`
176
+ Minimum number of datasets required for this connection, per quantum.
177
+ This is checked in the base implementation of
178
+ `.PipelineTaskConnections.adjustQuantum`, which raises `NoWorkFound` if
179
+ the minimum is not met for `Input` connections (causing the quantum to
180
+ be pruned, skipped, or never created, depending on the context), and
181
+ `FileNotFoundError` for `PrerequisiteInput` connections (causing
182
+ QuantumGraph generation to fail). `PipelineTask` implementations may
183
+ provide custom `~.PipelineTaskConnections.adjustQuantum`
184
+ implementations for more fine-grained or configuration-driven
185
+ constraints, as long as they are compatible with this minium.
186
+
138
187
  Raises
139
188
  ------
140
189
  TypeError
@@ -145,24 +194,7 @@ class BaseInput(DimensionedConnection):
145
194
  """
146
195
 
147
196
  deferLoad: bool = False
148
- """Whether this dataset type will be loaded as a
149
- `lsst.daf.butler.DeferredDatasetHandle`. PipelineTasks can use this
150
- object to load the object at a later time.
151
- """
152
-
153
197
  minimum: int = 1
154
- """Minimum number of datasets required for this connection, per quantum.
155
-
156
- This is checked in the base implementation of
157
- `.PipelineTaskConnections.adjustQuantum`, which raises `NoWorkFound` if the
158
- minimum is not met for `Input` connections (causing the quantum to be
159
- pruned, skipped, or never created, depending on the context), and
160
- `FileNotFoundError` for `PrerequisiteInput` connections (causing
161
- QuantumGraph generation to fail). `PipelineTask` implementations may
162
- provide custom `~.PipelineTaskConnections.adjustQuantum` implementations
163
- for more fine-grained or configuration-driven constraints, as long as they
164
- are compatible with this minimum.
165
- """
166
198
 
167
199
  def __post_init__(self) -> None:
168
200
  super().__post_init__()
@@ -174,6 +206,56 @@ class BaseInput(DimensionedConnection):
174
206
  class Input(BaseInput):
175
207
  """Class used for declaring PipelineTask input connections.
176
208
 
209
+ Attributes
210
+ ----------
211
+ name : `str`
212
+ The default name used to identify the dataset type.
213
+ storageClass : `str`
214
+ The storage class used when (un)/persisting the dataset type.
215
+ multiple : `bool`
216
+ Indicates if this connection should expect to contain multiple objects
217
+ of the given dataset type. Tasks with more than one connection with
218
+ ``multiple=True`` with the same dimensions may want to implement
219
+ `.PipelineTaskConnections.adjustQuantum` to ensure those datasets are
220
+ consistent (i.e. zip-iterable) in `PipelineTask.runQuantum` and notify
221
+ the execution system as early as possible of outputs that will not be
222
+ produced because the corresponding input is missing.
223
+ dimensions : iterable of `str`
224
+ The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
225
+ to identify the dataset type identified by the specified name.
226
+ deferLoad : `bool`
227
+ Indicates that this dataset type will be loaded as a
228
+ `lsst.daf.butler.DeferredDatasetHandle`. PipelineTasks can use this
229
+ object to load the object at a later time.
230
+ minimum : `bool`
231
+ Minimum number of datasets required for this connection, per quantum.
232
+ This is checked in the base implementation of
233
+ `.PipelineTaskConnections.adjustQuantum`, which raises `NoWorkFound` if
234
+ the minimum is not met for `Input` connections (causing the quantum to
235
+ be pruned, skipped, or never created, depending on the context), and
236
+ `FileNotFoundError` for `PrerequisiteInput` connections (causing
237
+ QuantumGraph generation to fail). `PipelineTask` implementations may
238
+ provide custom `~.PipelineTaskConnections.adjustQuantum`
239
+ implementations for more fine-grained or configuration-driven
240
+ constraints, as long as they are compatible with this minium.
241
+ deferGraphConstraint : `bool`, optional
242
+ If `True`, do not include this dataset type's existence in the initial
243
+ query that starts the QuantumGraph generation process. This can be
244
+ used to make QuantumGraph generation faster by avoiding redundant
245
+ datasets, and in certain cases it can (along with careful attention to
246
+ which tasks are included in the same QuantumGraph) be used to work
247
+ around the QuantumGraph generation algorithm's inflexible handling of
248
+ spatial overlaps. This option has no effect when the connection is not
249
+ an overall input of the pipeline (or subset thereof) for which a graph
250
+ is being created, and it never affects the ordering of quanta.
251
+ deferBinding : `bool`, optional
252
+ If `True`, the dataset will not be automatically included in
253
+ the pipeline graph, ``deferGraphConstraint`` is implied.
254
+ The custom QuantumGraphBuilder is required to bind it and add a
255
+ corresponding edge to the pipeline graph.
256
+ This option allows to have the same dataset type as both
257
+ input and output of a quantum.
258
+
177
259
  Raises
178
260
  ------
179
261
  TypeError
@@ -184,27 +266,8 @@ class Input(BaseInput):
184
266
  """
185
267
 
186
268
  deferGraphConstraint: bool = False
187
- """If `True`, do not include this dataset type's existence in the initial
188
- query that starts the QuantumGraph generation process.
189
-
190
- This can be used to make QuantumGraph generation faster by avoiding
191
- redundant datasets, and in certain cases it can (along with careful
192
- attention to which tasks are included in the same QuantumGraph) be used to
193
- work around the QuantumGraph generation algorithm's inflexible handling of
194
- spatial overlaps. This option has no effect when the connection is not an
195
- overall input of the pipeline (or subset thereof) for which a graph is
196
- being created, and it never affects the ordering of quanta.
197
- """
198
269
 
199
270
  deferBinding: bool = False
200
- """If `True`, the dataset will not be automatically included in the
201
- pipeline graph (``deferGraphConstraint=True`` is implied).
202
-
203
- A custom `~.quantum_graph_builder.QuantumGraphBuilder` is required to bind
204
- it and add a corresponding edge to the pipeline graph. This option allows
205
- the same dataset type to be used as both an input and an output of a
206
- quantum.
207
- """
208
271
 
209
272
  _connection_type_set: ClassVar[str] = "inputs"
210
273
 
@@ -213,6 +276,38 @@ class Input(BaseInput):
213
276
  class PrerequisiteInput(BaseInput):
214
277
  """Class used for declaring PipelineTask prerequisite connections.
215
278
 
279
+ Attributes
280
+ ----------
281
+ name : `str`
282
+ The default name used to identify the dataset type.
283
+ storageClass : `str`
284
+ The storage class used when (un)/persisting the dataset type.
285
+ multiple : `bool`
286
+ Indicates if this connection should expect to contain multiple objects
287
+ of the given dataset type. Tasks with more than one connection with
288
+ ``multiple=True`` with the same dimensions may want to implement
289
+ `.PipelineTaskConnections.adjustQuantum` to ensure those datasets are
290
+ consistent (i.e. zip-iterable) in `PipelineTask.runQuantum` and notify
291
+ the execution system as early as possible of outputs that will not be
292
+ produced because the corresponding input is missing.
293
+ dimensions : iterable of `str`
294
+ The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
295
+ to identify the dataset type identified by the specified name.
296
+ minimum : `bool`
297
+ Minimum number of datasets required for this connection, per quantum.
298
+ This is checked in the base implementation of
299
+ `.PipelineTaskConnections.adjustQuantum`, which raises
300
+ `FileNotFoundError` (causing QuantumGraph generation to fail).
301
+ `PipelineTask` implementations may provide custom
302
+ `~.PipelineTaskConnections.adjustQuantum` implementations for more
303
+ fine-grained or configuration-driven constraints, as long as they are
304
+ compatible with this minium.
305
+ lookupFunction : `typing.Callable`, optional
306
+ An optional callable function that will look up PrerequisiteInputs
307
+ using the DatasetType, registry, quantum dataId, and input collections
308
+ passed to it. If no function is specified, the default temporal spatial
309
+ lookup will be used.
310
+
216
311
  Raises
217
312
  ------
218
313
  TypeError
@@ -247,13 +342,6 @@ class PrerequisiteInput(BaseInput):
247
342
  lookupFunction: (
248
343
  Callable[[DatasetType, Registry, DataCoordinate, Sequence[str]], Iterable[DatasetRef]] | None
249
344
  ) = None
250
- """An optional callable function that will look up PrerequisiteInputs
251
- using the DatasetType, registry, quantum dataId, and input collections
252
- passed to it.
253
-
254
- If no function is specified, the default temporal/spatial lookup will be
255
- used.
256
- """
257
345
 
258
346
  _connection_type_set: ClassVar[str] = "prerequisiteInputs"
259
347
 
@@ -461,7 +461,7 @@ class QuantizedConnection(SimpleNamespace):
461
461
 
462
462
  def __iter__(
463
463
  self,
464
- ) -> Generator[tuple[str, DatasetRef | list[DatasetRef]]]:
464
+ ) -> Generator[tuple[str, DatasetRef | list[DatasetRef]], None, None]:
465
465
  """Make an iterator for this `QuantizedConnection`.
466
466
 
467
467
  Iterating over a `QuantizedConnection` will yield a tuple with the name
@@ -471,7 +471,7 @@ class QuantizedConnection(SimpleNamespace):
471
471
  """
472
472
  yield from ((name, getattr(self, name)) for name in self._attributes)
473
473
 
474
- def keys(self) -> Generator[str]:
474
+ def keys(self) -> Generator[str, None, None]:
475
475
  """Return an iterator over all the attributes added to a
476
476
  `QuantizedConnection` class.
477
477
  """
@@ -495,12 +495,15 @@ class DeferredDatasetRef:
495
495
  """A wrapper class for `~lsst.daf.butler.DatasetRef` that indicates that a
496
496
  `PipelineTask` should receive a `~lsst.daf.butler.DeferredDatasetHandle`
497
497
  instead of an in-memory dataset.
498
+
499
+ Attributes
500
+ ----------
501
+ datasetRef : `lsst.daf.butler.DatasetRef`
502
+ The `lsst.daf.butler.DatasetRef` that will be eventually used to
503
+ resolve a dataset.
498
504
  """
499
505
 
500
506
  datasetRef: DatasetRef
501
- """The `lsst.daf.butler.DatasetRef` that will be eventually used to
502
- resolve a dataset.
503
- """
504
507
 
505
508
  def __getattr__(self, name: str) -> Any:
506
509
  # make sure reduce is called on DeferredDatasetRef and not on
@@ -1041,7 +1044,7 @@ class PipelineTaskConnections(metaclass=PipelineTaskConnectionsMetaclass):
1041
1044
 
1042
1045
  def iterConnections(
1043
1046
  connections: PipelineTaskConnections, connectionType: str | Iterable[str]
1044
- ) -> Generator[BaseConnection]:
1047
+ ) -> Generator[BaseConnection, None, None]:
1045
1048
  """Create an iterator over the selected connections type which yields
1046
1049
  all the defined connections of that type.
1047
1050
 
@@ -299,6 +299,11 @@ class QuantumGraphExecutionReport:
299
299
  produced DatasetTypes for each task. This report can be output as a
300
300
  dictionary or a yaml file.
301
301
 
302
+ Attributes
303
+ ----------
304
+ tasks : `dict`
305
+ A dictionary of TaskExecutionReports by task label.
306
+
302
307
  See Also
303
308
  --------
304
309
  TaskExecutionReport : A task report.
@@ -136,14 +136,13 @@ class QuantumGraph:
136
136
  Maps tasks to their InitOutput dataset refs. Dataset refs can be either
137
137
  resolved or non-resolved. For intermediate resolved refs their dataset
138
138
  ID must match ``initInputs`` and Quantum ``initInputs``.
139
- globalInitOutputs : `~collections.abc.Iterable` \
140
- [ `~lsst.daf.butler.DatasetRef` ], optional
139
+ globalInitOutputs : iterable [ `~lsst.daf.butler.DatasetRef` ], optional
141
140
  Dataset refs for some global objects produced by pipeline. These
142
141
  objects include task configurations and package versions. Typically
143
142
  they have an empty DataId, but there is no real restriction on what
144
143
  can appear here.
145
- registryDatasetTypes : `~collections.abc.Iterable` \
146
- [ `~lsst.daf.butler.DatasetType` ], optional
144
+ registryDatasetTypes : iterable [ `~lsst.daf.butler.DatasetType` ], \
145
+ optional
147
146
  Dataset types which are used by this graph, their definitions must
148
147
  match registry. If registry does not define dataset type yet, then
149
148
  it should match one that will be created later.
@@ -489,7 +488,7 @@ class QuantumGraph:
489
488
 
490
489
  Returns
491
490
  -------
492
- tasks : `~collections.abc.Iterable` [ `TaskDef` ]
491
+ tasks : iterable of `TaskDef`
493
492
  `TaskDef` objects that have the specified `DatasetTypeName` as an
494
493
  input, list will be empty if no tasks use specified
495
494
  `DatasetTypeName` as an input.
@@ -538,7 +537,7 @@ class QuantumGraph:
538
537
 
539
538
  Returns
540
539
  -------
541
- result : `~collections.abc.Iterable` [`TaskDef`]
540
+ result : iterable of `TaskDef`
542
541
  `TaskDef` objects that are associated with the specified
543
542
  `DatasetTypeName`.
544
543
 
@@ -936,7 +935,7 @@ class QuantumGraph:
936
935
  saved structure. If supplied, the
937
936
  `~lsst.daf.butler.DimensionUniverse` from the loaded `QuantumGraph`
938
937
  will be validated against the supplied argument for compatibility.
939
- nodes : `~collections.abc.Iterable` [ `uuid.UUID` | `str` ] or `None`
938
+ nodes : iterable of [ `uuid.UUID` | `str` ] or `None`
940
939
  UUIDs that correspond to nodes in the graph. If specified, only
941
940
  these nodes will be loaded. Defaults to None, in which case all
942
941
  nodes will be loaded.
@@ -1221,7 +1220,7 @@ class QuantumGraph:
1221
1220
  saved structure. If supplied, the
1222
1221
  `~lsst.daf.butler.DimensionUniverse` from the loaded `QuantumGraph`
1223
1222
  will be validated against the supplied argument for compatibility.
1224
- nodes : `~collections.abc.Iterable` [`uuid.UUID`] or `None`
1223
+ nodes : iterable of `uuid.UUID` or `None`
1225
1224
  UUIDs that correspond to nodes in the graph. If specified, only
1226
1225
  these nodes will be loaded. Defaults to None, in which case all
1227
1226
  nodes will be loaded.
@@ -1257,7 +1256,7 @@ class QuantumGraph:
1257
1256
  raise TypeError(f"QuantumGraph file contains unexpected object type: {type(qgraph)}")
1258
1257
  return qgraph
1259
1258
 
1260
- def iterTaskGraph(self) -> Generator[TaskDef]:
1259
+ def iterTaskGraph(self) -> Generator[TaskDef, None, None]:
1261
1260
  """Iterate over the `taskGraph` attribute in topological order.
1262
1261
 
1263
1262
  Yields
@@ -1375,7 +1374,7 @@ class QuantumGraph:
1375
1374
  """
1376
1375
  return self._universe
1377
1376
 
1378
- def __iter__(self) -> Generator[QuantumNode]:
1377
+ def __iter__(self) -> Generator[QuantumNode, None, None]:
1379
1378
  yield from nx.topological_sort(self._connectedQuanta)
1380
1379
 
1381
1380
  def __len__(self) -> int:
@@ -1439,7 +1438,7 @@ class QuantumGraph:
1439
1438
  Returns
1440
1439
  -------
1441
1440
  summary : `QgraphSummary`
1442
- Summary of QuantumGraph.
1441
+ Summary of QuantumGraph.
1443
1442
  """
1444
1443
  inCollection = self.metadata.get("input", None)
1445
1444
  if isinstance(inCollection, str):
@@ -54,7 +54,7 @@ def _hashDsRef(ref: DatasetRef) -> int:
54
54
 
55
55
  @dataclass(frozen=True, eq=True)
56
56
  class NodeId:
57
- r"""Deprecated, this class is used with QuantumGraph save formats of
57
+ """Deprecated, this class is used with QuantumGraph save formats of
58
58
  1 and 2 when unpicking objects and must be retained until those formats
59
59
  are considered unloadable.
60
60
 
@@ -66,9 +66,9 @@ class NodeId:
66
66
  A `NodeId` will not be the same if a new graph is built containing the same
67
67
  information in a `QuantumNode`, or even built from exactly the same inputs.
68
68
 
69
- `NodeId`\ s do not play any role in deciding the equality or identity
70
- (hash) of a `QuantumNode`, and are mainly useful in debugging or working
71
- with various subsets of the same graph.
69
+ `NodeId`s do not play any role in deciding the equality or identity (hash)
70
+ of a `QuantumNode`, and are mainly useful in debugging or working with
71
+ various subsets of the same graph.
72
72
 
73
73
  This interface is a convenance only, and no guarantees on long term
74
74
  stability are made. New implementations might change the `NodeId`, or
@@ -29,12 +29,14 @@ from __future__ import annotations
29
29
 
30
30
  __all__ = ("GraphWalker",)
31
31
 
32
- from typing import Self
32
+ from typing import Generic, Self, TypeVar
33
33
 
34
34
  import networkx
35
35
 
36
+ _T = TypeVar("_T")
36
37
 
37
- class GraphWalker[T]:
38
+
39
+ class GraphWalker(Generic[_T]):
38
40
  """A helper for traversing directed acyclic graphs.
39
41
 
40
42
  Parameters
@@ -57,14 +59,14 @@ class GraphWalker[T]:
57
59
 
58
60
  def __init__(self, xgraph: networkx.DiGraph | networkx.MultiDiGraph):
59
61
  self._xgraph = xgraph
60
- self._ready: set[T] = set(next(iter(networkx.dag.topological_generations(self._xgraph)), []))
61
- self._active: set[T] = set()
62
- self._incomplete: set[T] = set(self._xgraph)
62
+ self._ready: set[_T] = set(next(iter(networkx.dag.topological_generations(self._xgraph)), []))
63
+ self._active: set[_T] = set()
64
+ self._incomplete: set[_T] = set(self._xgraph)
63
65
 
64
66
  def __iter__(self) -> Self:
65
67
  return self
66
68
 
67
- def __next__(self) -> frozenset[T]:
69
+ def __next__(self) -> frozenset[_T]:
68
70
  if not self._incomplete:
69
71
  raise StopIteration()
70
72
  new_active = frozenset(self._ready)
@@ -72,7 +74,7 @@ class GraphWalker[T]:
72
74
  self._ready.clear()
73
75
  return new_active
74
76
 
75
- def finish(self, key: T) -> None:
77
+ def finish(self, key: _T) -> None:
76
78
  """Mark a node as successfully processed, unblocking (at least in part)
77
79
  iteration over successor nodes.
78
80
 
@@ -95,7 +97,7 @@ class GraphWalker[T]:
95
97
  ):
96
98
  self._ready.add(successor)
97
99
 
98
- def fail(self, key: T) -> list[T]:
100
+ def fail(self, key: _T) -> list[_T]:
99
101
  """Mark a node as unsuccessfully processed, permanently blocking all
100
102
  recursive descendants.
101
103
 
@@ -103,7 +103,7 @@ class _ExecutionLogRecordsExtra(pydantic.BaseModel):
103
103
 
104
104
  Parameters
105
105
  ----------
106
- log_records : `lsst.daf.butler.ButlerLogRecords`
106
+ log_records : `ButlerLogRecords`
107
107
  Logs from a past attempt to run a quantum.
108
108
  """
109
109
  previous = self.model_validate(log_records.extra)
@@ -163,9 +163,7 @@ class LogCapture:
163
163
  return cls(butler, butler)
164
164
 
165
165
  @contextmanager
166
- def capture_logging(
167
- self, task_node: TaskNode, /, quantum: Quantum, records: ButlerLogRecords | None = None
168
- ) -> Iterator[_LogCaptureContext]:
166
+ def capture_logging(self, task_node: TaskNode, /, quantum: Quantum) -> Iterator[_LogCaptureContext]:
169
167
  """Configure logging system to capture logs for execution of this task.
170
168
 
171
169
  Parameters
@@ -174,9 +172,6 @@ class LogCapture:
174
172
  The task definition.
175
173
  quantum : `~lsst.daf.butler.Quantum`
176
174
  Single Quantum instance.
177
- records : `lsst.daf.butler.logging.ButlerLogRecords`, optional
178
- Log record container to append to and save. If provided, streaming
179
- mode is disabled (since we'll be saving logs in memory anyway).
180
175
 
181
176
  Notes
182
177
  -----
@@ -218,7 +213,7 @@ class LogCapture:
218
213
  ) from exc
219
214
  # Either accumulate into ButlerLogRecords or stream JSON records to
220
215
  # file and ingest that (ingest is possible only with full butler).
221
- if self.stream_json_logs and self.full_butler is not None and records is None:
216
+ if self.stream_json_logs and self.full_butler is not None:
222
217
  with TemporaryForIngest(self.full_butler, ref) as temporary:
223
218
  log_handler_file = FileHandler(temporary.ospath)
224
219
  log_handler_file.setFormatter(JsonLogFormatter())
@@ -241,7 +236,7 @@ class LogCapture:
241
236
  temporary.ingest()
242
237
 
243
238
  else:
244
- log_handler_memory = ButlerLogRecordHandler(records)
239
+ log_handler_memory = ButlerLogRecordHandler()
245
240
  logging.getLogger().addHandler(log_handler_memory)
246
241
 
247
242
  try:
@@ -260,6 +255,7 @@ class LogCapture:
260
255
  logging.getLogger().removeHandler(log_handler_memory)
261
256
  if ctx.store:
262
257
  self._store_log_records(quantum, log_dataset_name, log_handler_memory)
258
+ log_handler_memory.records.clear()
263
259
 
264
260
  else:
265
261
  with ButlerMDC.set_mdc(mdc):