lsst-pipe-base 30.0.0rc2__py3-none-any.whl → 30.0.1rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. lsst/pipe/base/_instrument.py +31 -20
  2. lsst/pipe/base/_quantumContext.py +3 -3
  3. lsst/pipe/base/_status.py +43 -10
  4. lsst/pipe/base/_task_metadata.py +2 -2
  5. lsst/pipe/base/all_dimensions_quantum_graph_builder.py +8 -3
  6. lsst/pipe/base/automatic_connection_constants.py +20 -1
  7. lsst/pipe/base/cli/cmd/__init__.py +18 -2
  8. lsst/pipe/base/cli/cmd/commands.py +149 -4
  9. lsst/pipe/base/connectionTypes.py +72 -160
  10. lsst/pipe/base/connections.py +6 -9
  11. lsst/pipe/base/execution_reports.py +0 -5
  12. lsst/pipe/base/graph/graph.py +11 -10
  13. lsst/pipe/base/graph/quantumNode.py +4 -4
  14. lsst/pipe/base/graph_walker.py +8 -10
  15. lsst/pipe/base/log_capture.py +40 -80
  16. lsst/pipe/base/log_on_close.py +76 -0
  17. lsst/pipe/base/mp_graph_executor.py +51 -15
  18. lsst/pipe/base/pipeline.py +5 -6
  19. lsst/pipe/base/pipelineIR.py +2 -8
  20. lsst/pipe/base/pipelineTask.py +5 -7
  21. lsst/pipe/base/pipeline_graph/_dataset_types.py +2 -2
  22. lsst/pipe/base/pipeline_graph/_edges.py +32 -22
  23. lsst/pipe/base/pipeline_graph/_mapping_views.py +4 -7
  24. lsst/pipe/base/pipeline_graph/_pipeline_graph.py +14 -7
  25. lsst/pipe/base/pipeline_graph/expressions.py +2 -2
  26. lsst/pipe/base/pipeline_graph/io.py +7 -10
  27. lsst/pipe/base/pipeline_graph/visualization/_dot.py +13 -12
  28. lsst/pipe/base/pipeline_graph/visualization/_layout.py +16 -18
  29. lsst/pipe/base/pipeline_graph/visualization/_merge.py +4 -7
  30. lsst/pipe/base/pipeline_graph/visualization/_printer.py +10 -10
  31. lsst/pipe/base/pipeline_graph/visualization/_status_annotator.py +7 -0
  32. lsst/pipe/base/prerequisite_helpers.py +2 -1
  33. lsst/pipe/base/quantum_graph/_common.py +19 -20
  34. lsst/pipe/base/quantum_graph/_multiblock.py +37 -31
  35. lsst/pipe/base/quantum_graph/_predicted.py +113 -15
  36. lsst/pipe/base/quantum_graph/_provenance.py +1136 -45
  37. lsst/pipe/base/quantum_graph/aggregator/__init__.py +0 -1
  38. lsst/pipe/base/quantum_graph/aggregator/_communicators.py +204 -289
  39. lsst/pipe/base/quantum_graph/aggregator/_config.py +87 -9
  40. lsst/pipe/base/quantum_graph/aggregator/_ingester.py +13 -12
  41. lsst/pipe/base/quantum_graph/aggregator/_scanner.py +49 -235
  42. lsst/pipe/base/quantum_graph/aggregator/_structs.py +6 -116
  43. lsst/pipe/base/quantum_graph/aggregator/_supervisor.py +29 -39
  44. lsst/pipe/base/quantum_graph/aggregator/_workers.py +303 -0
  45. lsst/pipe/base/quantum_graph/aggregator/_writer.py +34 -351
  46. lsst/pipe/base/quantum_graph/formatter.py +171 -0
  47. lsst/pipe/base/quantum_graph/ingest_graph.py +413 -0
  48. lsst/pipe/base/quantum_graph/visualization.py +5 -1
  49. lsst/pipe/base/quantum_graph_builder.py +33 -9
  50. lsst/pipe/base/quantum_graph_executor.py +116 -13
  51. lsst/pipe/base/quantum_graph_skeleton.py +31 -35
  52. lsst/pipe/base/quantum_provenance_graph.py +29 -12
  53. lsst/pipe/base/separable_pipeline_executor.py +19 -3
  54. lsst/pipe/base/single_quantum_executor.py +67 -42
  55. lsst/pipe/base/struct.py +4 -0
  56. lsst/pipe/base/testUtils.py +3 -3
  57. lsst/pipe/base/tests/mocks/_storage_class.py +2 -1
  58. lsst/pipe/base/version.py +1 -1
  59. {lsst_pipe_base-30.0.0rc2.dist-info → lsst_pipe_base-30.0.1rc1.dist-info}/METADATA +3 -3
  60. lsst_pipe_base-30.0.1rc1.dist-info/RECORD +129 -0
  61. {lsst_pipe_base-30.0.0rc2.dist-info → lsst_pipe_base-30.0.1rc1.dist-info}/WHEEL +1 -1
  62. lsst_pipe_base-30.0.0rc2.dist-info/RECORD +0 -125
  63. {lsst_pipe_base-30.0.0rc2.dist-info → lsst_pipe_base-30.0.1rc1.dist-info}/entry_points.txt +0 -0
  64. {lsst_pipe_base-30.0.0rc2.dist-info → lsst_pipe_base-30.0.1rc1.dist-info}/licenses/COPYRIGHT +0 -0
  65. {lsst_pipe_base-30.0.0rc2.dist-info → lsst_pipe_base-30.0.1rc1.dist-info}/licenses/LICENSE +0 -0
  66. {lsst_pipe_base-30.0.0rc2.dist-info → lsst_pipe_base-30.0.1rc1.dist-info}/licenses/bsd_license.txt +0 -0
  67. {lsst_pipe_base-30.0.0rc2.dist-info → lsst_pipe_base-30.0.1rc1.dist-info}/licenses/gpl-v3.0.txt +0 -0
  68. {lsst_pipe_base-30.0.0rc2.dist-info → lsst_pipe_base-30.0.1rc1.dist-info}/top_level.txt +0 -0
  69. {lsst_pipe_base-30.0.0rc2.dist-info → lsst_pipe_base-30.0.1rc1.dist-info}/zip-safe +0 -0
@@ -41,35 +41,36 @@ from lsst.utils.introspection import find_outside_stacklevel
41
41
 
42
42
  @dataclasses.dataclass(frozen=True)
43
43
  class BaseConnection:
44
- """Base class used for declaring `PipelineTask` connections.
45
-
46
- Attributes
47
- ----------
48
- name : `str`
49
- The name used to identify the dataset type.
50
- storageClass : `str`
51
- The storage class used when (un)/persisting the dataset type.
52
- multiple : `bool`
53
- Indicates if this connection should expect to contain multiple objects
54
- of the given dataset type. Tasks with more than one connection with
55
- ``multiple=True`` with the same dimensions may want to implement
56
- `.PipelineTaskConnections.adjustQuantum` to ensure those datasets are
57
- consistent (i.e. zip-iterable) in `PipelineTask.runQuantum()` and
58
- notify the execution system as early as possible of outputs that will
59
- not be produced because the corresponding input is missing.
60
- deprecated : `str`, optional
61
- A description of why this connection is deprecated, including the
62
- version after which it may be removed.
63
-
64
- If not `None`, the string is appended to the docstring for this
65
- connection and the corresponding config Field.
66
- """
44
+ """Base class used for declaring `PipelineTask` connections."""
67
45
 
68
46
  name: str
47
+ """The name used to identify the dataset type."""
48
+
69
49
  storageClass: str
50
+ """The storage class used when (un)/persisting the dataset type."""
51
+
70
52
  doc: str = ""
53
+ """Documentation for this connection."""
54
+
71
55
  multiple: bool = False
56
+ """Indicates if this connection should expect to contain multiple objects
57
+ of the given dataset type.
58
+
59
+ Tasks with more than one connection with ``multiple=True`` with the same
60
+ dimensions may want to implement `.PipelineTaskConnections.adjustQuantum`
61
+ to ensure those datasets are consistent (i.e. zip-iterable) in
62
+ `PipelineTask.runQuantum()` and notify the execution system as early as
63
+ possible of outputs that will not be produced because the corresponding
64
+ input is missing.
65
+ """
66
+
72
67
  deprecated: str | None = dataclasses.field(default=None, kw_only=True)
68
+ """A description of why this connection is deprecated, including the
69
+ version after which it may be removed.
70
+
71
+ If not `None`, the string is appended to the docstring for this
72
+ connection and the corresponding config Field.
73
+ """
73
74
 
74
75
  _connection_type_set: ClassVar[str]
75
76
  _deprecation_context: str = ""
@@ -110,32 +111,15 @@ class BaseConnection:
110
111
  class DimensionedConnection(BaseConnection):
111
112
  """Class used for declaring PipelineTask connections that includes
112
113
  dimensions.
113
-
114
- Attributes
115
- ----------
116
- name : `str`
117
- The name used to identify the dataset type.
118
- storageClass : `str`
119
- The storage class used when (un)/persisting the dataset type.
120
- multiple : `bool`
121
- Indicates if this connection should expect to contain multiple objects
122
- of the given dataset type. Tasks with more than one connection with
123
- ``multiple=True`` with the same dimensions may want to implement
124
- `.PipelineTaskConnections.adjustQuantum` to ensure those datasets are
125
- consistent (i.e. zip-iterable) in `PipelineTask.runQuantum` and notify
126
- the execution system as early as possible of outputs that will not be
127
- produced because the corresponding input is missing.
128
- dimensions : iterable of `str`
129
- The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
130
- to identify the dataset type identified by the specified name.
131
- isCalibration : `bool`, optional
132
- `True` if this dataset type may be included in CALIBRATION-type
133
- collections to associate it with a validity range, `False` (default)
134
- otherwise.
135
114
  """
136
115
 
137
116
  dimensions: Iterable[str] = ()
117
+ """The keys of the butler data coordinates for this dataset type."""
118
+
138
119
  isCalibration: bool = False
120
+ """ `True` if this dataset type may be included in
121
+ `~lsst.daf.butler.CollectionType.CALIBRATION` collections to associate it
122
+ with a validity range, `False` (default) otherwise."""
139
123
 
140
124
  def __post_init__(self):
141
125
  super().__post_init__()
@@ -151,39 +135,6 @@ class DimensionedConnection(BaseConnection):
151
135
  class BaseInput(DimensionedConnection):
152
136
  """Class used for declaring PipelineTask input connections.
153
137
 
154
- Attributes
155
- ----------
156
- name : `str`
157
- The default name used to identify the dataset type.
158
- storageClass : `str`
159
- The storage class used when (un)/persisting the dataset type.
160
- multiple : `bool`
161
- Indicates if this connection should expect to contain multiple objects
162
- of the given dataset type. Tasks with more than one connection with
163
- ``multiple=True`` with the same dimensions may want to implement
164
- `.PipelineTaskConnections.adjustQuantum` to ensure those datasets are
165
- consistent (i.e. zip-iterable) in `PipelineTask.runQuantum` and notify
166
- the execution system as early as possible of outputs that will not be
167
- produced because the corresponding input is missing.
168
- dimensions : iterable of `str`
169
- The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
170
- to identify the dataset type identified by the specified name.
171
- deferLoad : `bool`
172
- Indicates that this dataset type will be loaded as a
173
- `lsst.daf.butler.DeferredDatasetHandle`. PipelineTasks can use this
174
- object to load the object at a later time.
175
- minimum : `bool`
176
- Minimum number of datasets required for this connection, per quantum.
177
- This is checked in the base implementation of
178
- `.PipelineTaskConnections.adjustQuantum`, which raises `NoWorkFound` if
179
- the minimum is not met for `Input` connections (causing the quantum to
180
- be pruned, skipped, or never created, depending on the context), and
181
- `FileNotFoundError` for `PrerequisiteInput` connections (causing
182
- QuantumGraph generation to fail). `PipelineTask` implementations may
183
- provide custom `~.PipelineTaskConnections.adjustQuantum`
184
- implementations for more fine-grained or configuration-driven
185
- constraints, as long as they are compatible with this minium.
186
-
187
138
  Raises
188
139
  ------
189
140
  TypeError
@@ -194,7 +145,24 @@ class BaseInput(DimensionedConnection):
194
145
  """
195
146
 
196
147
  deferLoad: bool = False
148
+ """Whether this dataset type will be loaded as a
149
+ `lsst.daf.butler.DeferredDatasetHandle`. PipelineTasks can use this
150
+ object to load the object at a later time.
151
+ """
152
+
197
153
  minimum: int = 1
154
+ """Minimum number of datasets required for this connection, per quantum.
155
+
156
+ This is checked in the base implementation of
157
+ `.PipelineTaskConnections.adjustQuantum`, which raises `NoWorkFound` if the
158
+ minimum is not met for `Input` connections (causing the quantum to be
159
+ pruned, skipped, or never created, depending on the context), and
160
+ `FileNotFoundError` for `PrerequisiteInput` connections (causing
161
+ QuantumGraph generation to fail). `PipelineTask` implementations may
162
+ provide custom `~.PipelineTaskConnections.adjustQuantum` implementations
163
+ for more fine-grained or configuration-driven constraints, as long as they
164
+ are compatible with this minimum.
165
+ """
198
166
 
199
167
  def __post_init__(self) -> None:
200
168
  super().__post_init__()
@@ -206,56 +174,6 @@ class BaseInput(DimensionedConnection):
206
174
  class Input(BaseInput):
207
175
  """Class used for declaring PipelineTask input connections.
208
176
 
209
- Attributes
210
- ----------
211
- name : `str`
212
- The default name used to identify the dataset type.
213
- storageClass : `str`
214
- The storage class used when (un)/persisting the dataset type.
215
- multiple : `bool`
216
- Indicates if this connection should expect to contain multiple objects
217
- of the given dataset type. Tasks with more than one connection with
218
- ``multiple=True`` with the same dimensions may want to implement
219
- `.PipelineTaskConnections.adjustQuantum` to ensure those datasets are
220
- consistent (i.e. zip-iterable) in `PipelineTask.runQuantum` and notify
221
- the execution system as early as possible of outputs that will not be
222
- produced because the corresponding input is missing.
223
- dimensions : iterable of `str`
224
- The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
225
- to identify the dataset type identified by the specified name.
226
- deferLoad : `bool`
227
- Indicates that this dataset type will be loaded as a
228
- `lsst.daf.butler.DeferredDatasetHandle`. PipelineTasks can use this
229
- object to load the object at a later time.
230
- minimum : `bool`
231
- Minimum number of datasets required for this connection, per quantum.
232
- This is checked in the base implementation of
233
- `.PipelineTaskConnections.adjustQuantum`, which raises `NoWorkFound` if
234
- the minimum is not met for `Input` connections (causing the quantum to
235
- be pruned, skipped, or never created, depending on the context), and
236
- `FileNotFoundError` for `PrerequisiteInput` connections (causing
237
- QuantumGraph generation to fail). `PipelineTask` implementations may
238
- provide custom `~.PipelineTaskConnections.adjustQuantum`
239
- implementations for more fine-grained or configuration-driven
240
- constraints, as long as they are compatible with this minium.
241
- deferGraphConstraint : `bool`, optional
242
- If `True`, do not include this dataset type's existence in the initial
243
- query that starts the QuantumGraph generation process. This can be
244
- used to make QuantumGraph generation faster by avoiding redundant
245
- datasets, and in certain cases it can (along with careful attention to
246
- which tasks are included in the same QuantumGraph) be used to work
247
- around the QuantumGraph generation algorithm's inflexible handling of
248
- spatial overlaps. This option has no effect when the connection is not
249
- an overall input of the pipeline (or subset thereof) for which a graph
250
- is being created, and it never affects the ordering of quanta.
251
- deferBinding : `bool`, optional
252
- If `True`, the dataset will not be automatically included in
253
- the pipeline graph, ``deferGraphConstraint`` is implied.
254
- The custom QuantumGraphBuilder is required to bind it and add a
255
- corresponding edge to the pipeline graph.
256
- This option allows to have the same dataset type as both
257
- input and output of a quantum.
258
-
259
177
  Raises
260
178
  ------
261
179
  TypeError
@@ -266,8 +184,27 @@ class Input(BaseInput):
266
184
  """
267
185
 
268
186
  deferGraphConstraint: bool = False
187
+ """If `True`, do not include this dataset type's existence in the initial
188
+ query that starts the QuantumGraph generation process.
189
+
190
+ This can be used to make QuantumGraph generation faster by avoiding
191
+ redundant datasets, and in certain cases it can (along with careful
192
+ attention to which tasks are included in the same QuantumGraph) be used to
193
+ work around the QuantumGraph generation algorithm's inflexible handling of
194
+ spatial overlaps. This option has no effect when the connection is not an
195
+ overall input of the pipeline (or subset thereof) for which a graph is
196
+ being created, and it never affects the ordering of quanta.
197
+ """
269
198
 
270
199
  deferBinding: bool = False
200
+ """If `True`, the dataset will not be automatically included in the
201
+ pipeline graph (``deferGraphConstraint=True`` is implied).
202
+
203
+ A custom `~.quantum_graph_builder.QuantumGraphBuilder` is required to bind
204
+ it and add a corresponding edge to the pipeline graph. This option allows
205
+ the same dataset type to be used as both an input and an output of a
206
+ quantum.
207
+ """
271
208
 
272
209
  _connection_type_set: ClassVar[str] = "inputs"
273
210
 
@@ -276,38 +213,6 @@ class Input(BaseInput):
276
213
  class PrerequisiteInput(BaseInput):
277
214
  """Class used for declaring PipelineTask prerequisite connections.
278
215
 
279
- Attributes
280
- ----------
281
- name : `str`
282
- The default name used to identify the dataset type.
283
- storageClass : `str`
284
- The storage class used when (un)/persisting the dataset type.
285
- multiple : `bool`
286
- Indicates if this connection should expect to contain multiple objects
287
- of the given dataset type. Tasks with more than one connection with
288
- ``multiple=True`` with the same dimensions may want to implement
289
- `.PipelineTaskConnections.adjustQuantum` to ensure those datasets are
290
- consistent (i.e. zip-iterable) in `PipelineTask.runQuantum` and notify
291
- the execution system as early as possible of outputs that will not be
292
- produced because the corresponding input is missing.
293
- dimensions : iterable of `str`
294
- The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
295
- to identify the dataset type identified by the specified name.
296
- minimum : `bool`
297
- Minimum number of datasets required for this connection, per quantum.
298
- This is checked in the base implementation of
299
- `.PipelineTaskConnections.adjustQuantum`, which raises
300
- `FileNotFoundError` (causing QuantumGraph generation to fail).
301
- `PipelineTask` implementations may provide custom
302
- `~.PipelineTaskConnections.adjustQuantum` implementations for more
303
- fine-grained or configuration-driven constraints, as long as they are
304
- compatible with this minium.
305
- lookupFunction : `typing.Callable`, optional
306
- An optional callable function that will look up PrerequisiteInputs
307
- using the DatasetType, registry, quantum dataId, and input collections
308
- passed to it. If no function is specified, the default temporal spatial
309
- lookup will be used.
310
-
311
216
  Raises
312
217
  ------
313
218
  TypeError
@@ -342,6 +247,13 @@ class PrerequisiteInput(BaseInput):
342
247
  lookupFunction: (
343
248
  Callable[[DatasetType, Registry, DataCoordinate, Sequence[str]], Iterable[DatasetRef]] | None
344
249
  ) = None
250
+ """An optional callable function that will look up PrerequisiteInputs
251
+ using the DatasetType, registry, quantum dataId, and input collections
252
+ passed to it.
253
+
254
+ If no function is specified, the default temporal/spatial lookup will be
255
+ used.
256
+ """
345
257
 
346
258
  _connection_type_set: ClassVar[str] = "prerequisiteInputs"
347
259
 
@@ -461,7 +461,7 @@ class QuantizedConnection(SimpleNamespace):
461
461
 
462
462
  def __iter__(
463
463
  self,
464
- ) -> Generator[tuple[str, DatasetRef | list[DatasetRef]], None, None]:
464
+ ) -> Generator[tuple[str, DatasetRef | list[DatasetRef]]]:
465
465
  """Make an iterator for this `QuantizedConnection`.
466
466
 
467
467
  Iterating over a `QuantizedConnection` will yield a tuple with the name
@@ -471,7 +471,7 @@ class QuantizedConnection(SimpleNamespace):
471
471
  """
472
472
  yield from ((name, getattr(self, name)) for name in self._attributes)
473
473
 
474
- def keys(self) -> Generator[str, None, None]:
474
+ def keys(self) -> Generator[str]:
475
475
  """Return an iterator over all the attributes added to a
476
476
  `QuantizedConnection` class.
477
477
  """
@@ -495,15 +495,12 @@ class DeferredDatasetRef:
495
495
  """A wrapper class for `~lsst.daf.butler.DatasetRef` that indicates that a
496
496
  `PipelineTask` should receive a `~lsst.daf.butler.DeferredDatasetHandle`
497
497
  instead of an in-memory dataset.
498
-
499
- Attributes
500
- ----------
501
- datasetRef : `lsst.daf.butler.DatasetRef`
502
- The `lsst.daf.butler.DatasetRef` that will be eventually used to
503
- resolve a dataset.
504
498
  """
505
499
 
506
500
  datasetRef: DatasetRef
501
+ """The `lsst.daf.butler.DatasetRef` that will be eventually used to
502
+ resolve a dataset.
503
+ """
507
504
 
508
505
  def __getattr__(self, name: str) -> Any:
509
506
  # make sure reduce is called on DeferredDatasetRef and not on
@@ -1044,7 +1041,7 @@ class PipelineTaskConnections(metaclass=PipelineTaskConnectionsMetaclass):
1044
1041
 
1045
1042
  def iterConnections(
1046
1043
  connections: PipelineTaskConnections, connectionType: str | Iterable[str]
1047
- ) -> Generator[BaseConnection, None, None]:
1044
+ ) -> Generator[BaseConnection]:
1048
1045
  """Create an iterator over the selected connections type which yields
1049
1046
  all the defined connections of that type.
1050
1047
 
@@ -299,11 +299,6 @@ class QuantumGraphExecutionReport:
299
299
  produced DatasetTypes for each task. This report can be output as a
300
300
  dictionary or a yaml file.
301
301
 
302
- Attributes
303
- ----------
304
- tasks : `dict`
305
- A dictionary of TaskExecutionReports by task label.
306
-
307
302
  See Also
308
303
  --------
309
304
  TaskExecutionReport : A task report.
@@ -136,13 +136,14 @@ class QuantumGraph:
136
136
  Maps tasks to their InitOutput dataset refs. Dataset refs can be either
137
137
  resolved or non-resolved. For intermediate resolved refs their dataset
138
138
  ID must match ``initInputs`` and Quantum ``initInputs``.
139
- globalInitOutputs : iterable [ `~lsst.daf.butler.DatasetRef` ], optional
139
+ globalInitOutputs : `~collections.abc.Iterable` \
140
+ [ `~lsst.daf.butler.DatasetRef` ], optional
140
141
  Dataset refs for some global objects produced by pipeline. These
141
142
  objects include task configurations and package versions. Typically
142
143
  they have an empty DataId, but there is no real restriction on what
143
144
  can appear here.
144
- registryDatasetTypes : iterable [ `~lsst.daf.butler.DatasetType` ], \
145
- optional
145
+ registryDatasetTypes : `~collections.abc.Iterable` \
146
+ [ `~lsst.daf.butler.DatasetType` ], optional
146
147
  Dataset types which are used by this graph, their definitions must
147
148
  match registry. If registry does not define dataset type yet, then
148
149
  it should match one that will be created later.
@@ -488,7 +489,7 @@ class QuantumGraph:
488
489
 
489
490
  Returns
490
491
  -------
491
- tasks : iterable of `TaskDef`
492
+ tasks : `~collections.abc.Iterable` [ `TaskDef` ]
492
493
  `TaskDef` objects that have the specified `DatasetTypeName` as an
493
494
  input, list will be empty if no tasks use specified
494
495
  `DatasetTypeName` as an input.
@@ -537,7 +538,7 @@ class QuantumGraph:
537
538
 
538
539
  Returns
539
540
  -------
540
- result : iterable of `TaskDef`
541
+ result : `~collections.abc.Iterable` [`TaskDef`]
541
542
  `TaskDef` objects that are associated with the specified
542
543
  `DatasetTypeName`.
543
544
 
@@ -935,7 +936,7 @@ class QuantumGraph:
935
936
  saved structure. If supplied, the
936
937
  `~lsst.daf.butler.DimensionUniverse` from the loaded `QuantumGraph`
937
938
  will be validated against the supplied argument for compatibility.
938
- nodes : iterable of [ `uuid.UUID` | `str` ] or `None`
939
+ nodes : `~collections.abc.Iterable` [ `uuid.UUID` | `str` ] or `None`
939
940
  UUIDs that correspond to nodes in the graph. If specified, only
940
941
  these nodes will be loaded. Defaults to None, in which case all
941
942
  nodes will be loaded.
@@ -1220,7 +1221,7 @@ class QuantumGraph:
1220
1221
  saved structure. If supplied, the
1221
1222
  `~lsst.daf.butler.DimensionUniverse` from the loaded `QuantumGraph`
1222
1223
  will be validated against the supplied argument for compatibility.
1223
- nodes : iterable of `uuid.UUID` or `None`
1224
+ nodes : `~collections.abc.Iterable` [`uuid.UUID`] or `None`
1224
1225
  UUIDs that correspond to nodes in the graph. If specified, only
1225
1226
  these nodes will be loaded. Defaults to None, in which case all
1226
1227
  nodes will be loaded.
@@ -1256,7 +1257,7 @@ class QuantumGraph:
1256
1257
  raise TypeError(f"QuantumGraph file contains unexpected object type: {type(qgraph)}")
1257
1258
  return qgraph
1258
1259
 
1259
- def iterTaskGraph(self) -> Generator[TaskDef, None, None]:
1260
+ def iterTaskGraph(self) -> Generator[TaskDef]:
1260
1261
  """Iterate over the `taskGraph` attribute in topological order.
1261
1262
 
1262
1263
  Yields
@@ -1374,7 +1375,7 @@ class QuantumGraph:
1374
1375
  """
1375
1376
  return self._universe
1376
1377
 
1377
- def __iter__(self) -> Generator[QuantumNode, None, None]:
1378
+ def __iter__(self) -> Generator[QuantumNode]:
1378
1379
  yield from nx.topological_sort(self._connectedQuanta)
1379
1380
 
1380
1381
  def __len__(self) -> int:
@@ -1438,7 +1439,7 @@ class QuantumGraph:
1438
1439
  Returns
1439
1440
  -------
1440
1441
  summary : `QgraphSummary`
1441
- Summary of QuantumGraph.
1442
+ Summary of QuantumGraph.
1442
1443
  """
1443
1444
  inCollection = self.metadata.get("input", None)
1444
1445
  if isinstance(inCollection, str):
@@ -54,7 +54,7 @@ def _hashDsRef(ref: DatasetRef) -> int:
54
54
 
55
55
  @dataclass(frozen=True, eq=True)
56
56
  class NodeId:
57
- """Deprecated, this class is used with QuantumGraph save formats of
57
+ r"""Deprecated, this class is used with QuantumGraph save formats of
58
58
  1 and 2 when unpicking objects and must be retained until those formats
59
59
  are considered unloadable.
60
60
 
@@ -66,9 +66,9 @@ class NodeId:
66
66
  A `NodeId` will not be the same if a new graph is built containing the same
67
67
  information in a `QuantumNode`, or even built from exactly the same inputs.
68
68
 
69
- `NodeId`s do not play any role in deciding the equality or identity (hash)
70
- of a `QuantumNode`, and are mainly useful in debugging or working with
71
- various subsets of the same graph.
69
+ `NodeId`\ s do not play any role in deciding the equality or identity
70
+ (hash) of a `QuantumNode`, and are mainly useful in debugging or working
71
+ with various subsets of the same graph.
72
72
 
73
73
  This interface is a convenance only, and no guarantees on long term
74
74
  stability are made. New implementations might change the `NodeId`, or
@@ -29,14 +29,12 @@ from __future__ import annotations
29
29
 
30
30
  __all__ = ("GraphWalker",)
31
31
 
32
- from typing import Generic, Self, TypeVar
32
+ from typing import Self
33
33
 
34
34
  import networkx
35
35
 
36
- _T = TypeVar("_T")
37
36
 
38
-
39
- class GraphWalker(Generic[_T]):
37
+ class GraphWalker[T]:
40
38
  """A helper for traversing directed acyclic graphs.
41
39
 
42
40
  Parameters
@@ -59,14 +57,14 @@ class GraphWalker(Generic[_T]):
59
57
 
60
58
  def __init__(self, xgraph: networkx.DiGraph | networkx.MultiDiGraph):
61
59
  self._xgraph = xgraph
62
- self._ready: set[_T] = set(next(iter(networkx.dag.topological_generations(self._xgraph)), []))
63
- self._active: set[_T] = set()
64
- self._incomplete: set[_T] = set(self._xgraph)
60
+ self._ready: set[T] = set(next(iter(networkx.dag.topological_generations(self._xgraph)), []))
61
+ self._active: set[T] = set()
62
+ self._incomplete: set[T] = set(self._xgraph)
65
63
 
66
64
  def __iter__(self) -> Self:
67
65
  return self
68
66
 
69
- def __next__(self) -> frozenset[_T]:
67
+ def __next__(self) -> frozenset[T]:
70
68
  if not self._incomplete:
71
69
  raise StopIteration()
72
70
  new_active = frozenset(self._ready)
@@ -74,7 +72,7 @@ class GraphWalker(Generic[_T]):
74
72
  self._ready.clear()
75
73
  return new_active
76
74
 
77
- def finish(self, key: _T) -> None:
75
+ def finish(self, key: T) -> None:
78
76
  """Mark a node as successfully processed, unblocking (at least in part)
79
77
  iteration over successor nodes.
80
78
 
@@ -97,7 +95,7 @@ class GraphWalker(Generic[_T]):
97
95
  ):
98
96
  self._ready.add(successor)
99
97
 
100
- def fail(self, key: _T) -> list[_T]:
98
+ def fail(self, key: T) -> list[T]:
101
99
  """Mark a node as unsuccessfully processed, permanently blocking all
102
100
  recursive descendants.
103
101