lsst-ctrl-mpexec 29.2025.2400__py3-none-any.whl → 29.2025.3200__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. lsst/ctrl/mpexec/__init__.py +1 -2
  2. lsst/ctrl/mpexec/cli/butler_factory.py +464 -0
  3. lsst/ctrl/mpexec/cli/cmd/commands.py +7 -1
  4. lsst/ctrl/mpexec/cli/opt/optionGroups.py +0 -13
  5. lsst/ctrl/mpexec/cli/opt/options.py +0 -46
  6. lsst/ctrl/mpexec/cli/script/build.py +49 -36
  7. lsst/ctrl/mpexec/cli/script/pre_exec_init_qbb.py +3 -1
  8. lsst/ctrl/mpexec/cli/script/qgraph.py +0 -25
  9. lsst/ctrl/mpexec/cli/script/run.py +2 -1
  10. lsst/ctrl/mpexec/cli/script/run_qbb.py +2 -1
  11. lsst/ctrl/mpexec/cmdLineFwk.py +30 -556
  12. lsst/ctrl/mpexec/execFixupDataId.py +9 -101
  13. lsst/ctrl/mpexec/executionGraphFixup.py +12 -37
  14. lsst/ctrl/mpexec/log_capture.py +9 -195
  15. lsst/ctrl/mpexec/mpGraphExecutor.py +60 -696
  16. lsst/ctrl/mpexec/quantumGraphExecutor.py +20 -90
  17. lsst/ctrl/mpexec/reports.py +30 -206
  18. lsst/ctrl/mpexec/separablePipelineExecutor.py +12 -263
  19. lsst/ctrl/mpexec/showInfo.py +2 -2
  20. lsst/ctrl/mpexec/simple_pipeline_executor.py +11 -590
  21. lsst/ctrl/mpexec/singleQuantumExecutor.py +75 -532
  22. lsst/ctrl/mpexec/taskFactory.py +12 -38
  23. lsst/ctrl/mpexec/version.py +1 -1
  24. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/METADATA +1 -1
  25. lsst_ctrl_mpexec-29.2025.3200.dist-info/RECORD +51 -0
  26. lsst/ctrl/mpexec/dotTools.py +0 -100
  27. lsst_ctrl_mpexec-29.2025.2400.dist-info/RECORD +0 -51
  28. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/WHEEL +0 -0
  29. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/entry_points.txt +0 -0
  30. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/licenses/COPYRIGHT +0 -0
  31. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/licenses/LICENSE +0 -0
  32. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/licenses/bsd_license.txt +0 -0
  33. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/licenses/gpl-v3.0.txt +0 -0
  34. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/top_level.txt +0 -0
  35. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/zip-safe +0 -0
@@ -25,66 +25,38 @@
25
25
  # You should have received a copy of the GNU General Public License
26
26
  # along with this program. If not, see <http://www.gnu.org/licenses/>.
27
27
 
28
- __all__ = ["SingleQuantumExecutor"]
28
+ from __future__ import annotations
29
+
30
+ __all__ = ("SingleQuantumExecutor",)
29
31
 
30
- # -------------------------------
31
- # Imports of standard modules --
32
- # -------------------------------
33
- import logging
34
- import time
35
32
  import uuid
36
- from collections import defaultdict
37
33
  from collections.abc import Callable, Mapping
38
- from itertools import chain
39
- from typing import Any, cast
40
-
41
- from lsst.daf.butler import (
42
- Butler,
43
- ButlerMetrics,
44
- CollectionType,
45
- DatasetRef,
46
- DatasetType,
47
- LimitedButler,
48
- NamedKeyDict,
49
- Quantum,
50
- )
51
- from lsst.daf.butler.registry.wildcards import CollectionWildcard
52
- from lsst.pipe.base import (
53
- AdjustQuantumHelper,
54
- AnnotatedPartialOutputsError,
55
- ExecutionResources,
56
- Instrument,
57
- InvalidQuantumError,
58
- NoWorkFound,
59
- PipelineTask,
60
- QuantumContext,
61
- QuantumSuccessCaveats,
62
- TaskFactory,
63
- )
64
- from lsst.pipe.base.pipeline_graph import TaskNode
65
-
66
- # During metadata transition phase, determine metadata class by
67
- # asking pipe_base
68
- from lsst.pipe.base.task import _TASK_FULL_METADATA_TYPE, _TASK_METADATA_TYPE
69
- from lsst.utils.timer import logInfo
34
+ from typing import TYPE_CHECKING, Any
70
35
 
71
- # -----------------------------
72
- # Imports for other modules --
73
- # -----------------------------
74
- from .log_capture import LogCapture
75
- from .quantumGraphExecutor import QuantumExecutor
76
- from .reports import QuantumReport
36
+ from deprecated.sphinx import deprecated
77
37
 
78
- # ----------------------------------
79
- # Local non-exported definitions --
80
- # ----------------------------------
38
+ import lsst.pipe.base.single_quantum_executor
81
39
 
82
- _LOG = logging.getLogger(__name__)
40
+ if TYPE_CHECKING:
41
+ from lsst.daf.butler import Butler, ButlerMetrics, LimitedButler, Quantum
42
+ from lsst.pipe.base import ExecutionResources, PipelineTask, QuantumSuccessCaveats, TaskFactory
43
+ from lsst.pipe.base.pipeline_graph import TaskNode
83
44
 
84
45
 
85
- class SingleQuantumExecutor(QuantumExecutor):
46
+ # TODO[DM-51962]: Remove this module.
47
+ @deprecated(
48
+ "The SingleQuantumExecutor class has moved to lsst.pipe.base.single_quantum_executor. "
49
+ "This forwarding shim will be removed after v30.",
50
+ version="v30",
51
+ category=FutureWarning,
52
+ )
53
+ class SingleQuantumExecutor(lsst.pipe.base.single_quantum_executor.SingleQuantumExecutor):
86
54
  """Executor class which runs one Quantum at a time.
87
55
 
56
+ This is a deprecated backwards-compatibility shim for
57
+ `lsst.pipe.base.single_quantum_executor.SingleQuantumExecutor`, which has
58
+ the same functionality with very minor interface changes.
59
+
88
60
  Parameters
89
61
  ----------
90
62
  butler : `~lsst.daf.butler.Butler` or `None`
@@ -149,349 +121,29 @@ class SingleQuantumExecutor(QuantumExecutor):
149
121
  raise_on_partial_outputs: bool = True,
150
122
  job_metadata: Mapping[str, int | str | float] | None = None,
151
123
  ):
152
- self.butler = butler
153
- self.taskFactory = taskFactory
154
- self.enableLsstDebug = enableLsstDebug
155
- self.clobberOutputs = clobberOutputs
156
- self.limited_butler_factory = limited_butler_factory
157
- self.resources = resources
158
- self.assumeNoExistingOutputs = assumeNoExistingOutputs
159
- self.raise_on_partial_outputs = raise_on_partial_outputs
160
- self.job_metadata = job_metadata
161
-
162
- if self.butler is None:
163
- assert limited_butler_factory is not None, "limited_butler_factory is needed when butler is None"
164
-
165
- # Find whether output run is in skipExistingIn.
166
- # TODO: This duplicates logic in GraphBuilder, would be nice to have
167
- # better abstraction for this some day.
168
- self.skipExisting = skipExisting
169
- if self.butler is not None and skipExistingIn:
170
- skip_collections_wildcard = CollectionWildcard.from_expression(skipExistingIn)
171
- # As optimization check in the explicit list of names first
172
- self.skipExisting = self.butler.run in skip_collections_wildcard.strings
173
- if not self.skipExisting:
174
- # need to flatten it and check again
175
- self.skipExisting = self.butler.run in self.butler.registry.queryCollections(
176
- skipExistingIn,
177
- collectionTypes=CollectionType.RUN,
178
- )
179
-
180
- def execute(
181
- self, task_node: TaskNode, /, quantum: Quantum, quantum_id: uuid.UUID | None = None
182
- ) -> tuple[Quantum, QuantumReport | None]:
183
- # Docstring inherited from QuantumExecutor.execute
184
- assert quantum.dataId is not None, "Quantum DataId cannot be None"
185
-
186
- if self.butler is not None:
187
- self.butler.registry.refresh()
188
-
189
- result = self._execute(task_node, quantum, quantum_id=quantum_id)
190
- report = QuantumReport(dataId=quantum.dataId, taskLabel=task_node.label)
191
- return result, report
192
-
193
- def _execute(
194
- self, task_node: TaskNode, /, quantum: Quantum, quantum_id: uuid.UUID | None = None
195
- ) -> Quantum:
196
- """Execute the quantum.
197
-
198
- Internal implementation of `execute()`.
199
- """
200
- startTime = time.time()
201
-
202
- # Make a limited butler instance if needed (which should be QBB if full
203
- # butler is not defined).
204
- limited_butler: LimitedButler
205
- if self.butler is not None:
206
- limited_butler = self.butler
207
- else:
208
- # We check this in constructor, but mypy needs this check here.
209
- assert self.limited_butler_factory is not None
210
- limited_butler = self.limited_butler_factory(quantum)
211
-
212
- if self.butler is not None:
213
- log_capture = LogCapture.from_full(self.butler)
214
- else:
215
- log_capture = LogCapture.from_limited(limited_butler)
216
- with log_capture.capture_logging(task_node, quantum) as captureLog:
217
- # Save detailed resource usage before task start to metadata.
218
- quantumMetadata = _TASK_METADATA_TYPE()
219
- logInfo(None, "prep", metadata=quantumMetadata) # type: ignore[arg-type]
220
-
221
- _LOG.info(
222
- "Preparing execution of quantum for label=%s dataId=%s.", task_node.label, quantum.dataId
223
- )
224
-
225
- # check whether to skip or delete old outputs, if it returns True
226
- # or raises an exception do not try to store logs, as they may be
227
- # already in butler.
228
- captureLog.store = False
229
- if self.checkExistingOutputs(quantum, task_node, limited_butler):
230
- _LOG.info(
231
- "Skipping already-successful quantum for label=%s dataId=%s.",
232
- task_node.label,
233
- quantum.dataId,
234
- )
235
- return quantum
236
- captureLog.store = True
237
-
238
- try:
239
- quantum = self.updatedQuantumInputs(quantum, task_node, limited_butler)
240
- except NoWorkFound as exc:
241
- _LOG.info(
242
- "Nothing to do for task '%s' on quantum %s; saving metadata and skipping: %s",
243
- task_node.label,
244
- quantum.dataId,
245
- str(exc),
246
- )
247
- quantumMetadata["caveats"] = QuantumSuccessCaveats.from_adjust_quantum_no_work().value
248
- # Make empty metadata that looks something like what a
249
- # do-nothing task would write (but we don't bother with empty
250
- # nested PropertySets for subtasks). This is slightly
251
- # duplicative with logic in pipe_base that we can't easily call
252
- # from here; we'll fix this on DM-29761.
253
- logInfo(None, "end", metadata=quantumMetadata) # type: ignore[arg-type]
254
- fullMetadata = _TASK_FULL_METADATA_TYPE()
255
- fullMetadata[task_node.label] = _TASK_METADATA_TYPE()
256
- fullMetadata["quantum"] = quantumMetadata
257
- if self.job_metadata is not None:
258
- fullMetadata["job"] = self.job_metadata
259
- self.writeMetadata(quantum, fullMetadata, task_node, limited_butler)
260
- return quantum
261
-
262
- # enable lsstDebug debugging
263
- if self.enableLsstDebug:
264
- try:
265
- _LOG.debug("Will try to import debug.py")
266
- import debug # type: ignore # noqa:F401
267
- except ImportError:
268
- _LOG.warning("No 'debug' module found.")
269
-
270
- # initialize global state
271
- self.initGlobals(quantum)
272
-
273
- # Ensure that we are executing a frozen config
274
- task_node.config.freeze()
275
- logInfo(None, "init", metadata=quantumMetadata) # type: ignore[arg-type]
276
- init_input_refs = list(quantum.initInputs.values())
277
-
278
- _LOG.info(
279
- "Constructing task and executing quantum for label=%s dataId=%s.",
280
- task_node.label,
281
- quantum.dataId,
282
- )
283
- task = self.taskFactory.makeTask(task_node, limited_butler, init_input_refs)
284
- logInfo(None, "start", metadata=quantumMetadata) # type: ignore[arg-type]
285
- try:
286
- caveats, outputsPut, butler_metrics = self.runQuantum(
287
- task, quantum, task_node, limited_butler, quantum_id=quantum_id
288
- )
289
- except Exception as e:
290
- _LOG.error(
291
- "Execution of task '%s' on quantum %s failed. Exception %s: %s",
292
- task_node.label,
293
- quantum.dataId,
294
- e.__class__.__name__,
295
- str(e),
296
- )
297
- raise
298
- else:
299
- quantumMetadata["butler_metrics"] = butler_metrics.model_dump()
300
- quantumMetadata["caveats"] = caveats.value
301
- # Stringify the UUID for easier compatibility with
302
- # PropertyList.
303
- quantumMetadata["outputs"] = [str(output) for output in outputsPut]
304
- logInfo(None, "end", metadata=quantumMetadata) # type: ignore[arg-type]
305
- fullMetadata = task.getFullMetadata()
306
- fullMetadata["quantum"] = quantumMetadata
307
- if self.job_metadata is not None:
308
- fullMetadata["job"] = self.job_metadata
309
- self.writeMetadata(quantum, fullMetadata, task_node, limited_butler)
310
- stopTime = time.time()
311
- _LOG.info(
312
- "Execution of task '%s' on quantum %s took %.3f seconds",
313
- task_node.label,
314
- quantum.dataId,
315
- stopTime - startTime,
316
- )
317
- return quantum
124
+ super().__init__(
125
+ butler=butler,
126
+ task_factory=taskFactory,
127
+ skip_existing_in=skipExistingIn,
128
+ clobber_outputs=clobberOutputs,
129
+ enable_lsst_debug=enableLsstDebug,
130
+ limited_butler_factory=limited_butler_factory,
131
+ resources=resources,
132
+ skip_existing=skipExisting,
133
+ assume_no_existing_outputs=assumeNoExistingOutputs,
134
+ raise_on_partial_outputs=raise_on_partial_outputs,
135
+ job_metadata=job_metadata,
136
+ )
318
137
 
319
138
  def checkExistingOutputs(
320
139
  self, quantum: Quantum, task_node: TaskNode, /, limited_butler: LimitedButler
321
140
  ) -> bool:
322
- """Decide whether this quantum needs to be executed.
323
-
324
- If only partial outputs exist then they are removed if
325
- ``clobberOutputs`` is True, otherwise an exception is raised.
326
-
327
- The ``LimitedButler`` is used for everything, and should be set to
328
- ``self.butler`` if no separate ``LimitedButler`` is available.
329
-
330
- Parameters
331
- ----------
332
- quantum : `~lsst.daf.butler.Quantum`
333
- Quantum to check for existing outputs.
334
- task_node : `~lsst.pipe.base.pipeline_graph.TaskNode`
335
- Task definition structure.
336
- limited_butler : `~lsst.daf.butler.LimitedButler`
337
- Butler to use for querying and clobbering.
338
-
339
- Returns
340
- -------
341
- exist : `bool`
342
- `True` if ``self.skipExisting`` is defined, and a previous
343
- execution of this quanta appears to have completed successfully
344
- (either because metadata was written or all datasets were written).
345
- `False` otherwise.
346
-
347
- Raises
348
- ------
349
- RuntimeError
350
- Raised if some outputs exist and some not.
351
- """
352
- if self.assumeNoExistingOutputs:
353
- return False
354
-
355
- if self.skipExisting:
356
- _LOG.debug(
357
- "Checking existence of metadata from previous execution of label=%s dataId=%s.",
358
- task_node.label,
359
- quantum.dataId,
360
- )
361
- # Metadata output exists; this is sufficient to assume the previous
362
- # run was successful and should be skipped.
363
- [metadata_ref] = quantum.outputs[task_node.metadata_output.dataset_type_name]
364
- if metadata_ref is not None:
365
- if limited_butler.stored(metadata_ref):
366
- return True
367
-
368
- # Find and prune (partial) outputs if `self.clobberOutputs` is set.
369
- _LOG.debug(
370
- "Looking for existing outputs in the way for label=%s dataId=%s.", task_node.label, quantum.dataId
371
- )
372
- ref_dict = limited_butler.stored_many(chain.from_iterable(quantum.outputs.values()))
373
- existingRefs = [ref for ref, exists in ref_dict.items() if exists]
374
- missingRefs = [ref for ref, exists in ref_dict.items() if not exists]
375
- if existingRefs:
376
- if not missingRefs:
377
- # Full outputs exist.
378
- if self.skipExisting:
379
- return True
380
- elif self.clobberOutputs:
381
- _LOG.info("Removing complete outputs for quantum %s: %s", quantum, existingRefs)
382
- limited_butler.pruneDatasets(existingRefs, disassociate=True, unstore=True, purge=True)
383
- else:
384
- raise RuntimeError(
385
- f"Complete outputs exists for a quantum {quantum} "
386
- "and neither clobberOutputs nor skipExisting is set: "
387
- f"existingRefs={existingRefs}"
388
- )
389
- else:
390
- # Partial outputs from a failed quantum.
391
- _LOG.debug(
392
- "Partial outputs exist for quantum %s existingRefs=%s missingRefs=%s",
393
- quantum,
394
- existingRefs,
395
- missingRefs,
396
- )
397
- if self.clobberOutputs:
398
- # only prune
399
- _LOG.info("Removing partial outputs for task %s: %s", task_node.label, existingRefs)
400
- limited_butler.pruneDatasets(existingRefs, disassociate=True, unstore=True, purge=True)
401
- return False
402
- else:
403
- raise RuntimeError(
404
- "Registry inconsistency while checking for existing quantum outputs:"
405
- f" quantum={quantum} existingRefs={existingRefs}"
406
- f" missingRefs={missingRefs}"
407
- )
408
-
409
- # By default always execute.
410
- return False
141
+ return super()._check_existing_outputs(quantum, task_node, limited_butler=limited_butler)
411
142
 
412
143
  def updatedQuantumInputs(
413
144
  self, quantum: Quantum, task_node: TaskNode, /, limited_butler: LimitedButler
414
145
  ) -> Quantum:
415
- """Update quantum with extra information, returns a new updated
416
- Quantum.
417
-
418
- Some methods may require input DatasetRefs to have non-None
419
- ``dataset_id``, but in case of intermediate dataset it may not be
420
- filled during QuantumGraph construction. This method will retrieve
421
- missing info from registry.
422
-
423
- Parameters
424
- ----------
425
- quantum : `~lsst.daf.butler.Quantum`
426
- Single Quantum instance.
427
- task_node : `~lsst.pipe.base.pipeline_graph.TaskNode`
428
- Task definition structure.
429
- limited_butler : `~lsst.daf.butler.LimitedButler`
430
- Butler to use for querying.
431
-
432
- Returns
433
- -------
434
- update : `~lsst.daf.butler.Quantum`
435
- Updated Quantum instance.
436
- """
437
- anyChanges = False
438
- updatedInputs: defaultdict[DatasetType, list] = defaultdict(list)
439
- for key, refsForDatasetType in quantum.inputs.items():
440
- _LOG.debug(
441
- "Checking existence of input '%s' for label=%s dataId=%s.",
442
- key.name,
443
- task_node.label,
444
- quantum.dataId,
445
- )
446
- toCheck = []
447
- newRefsForDatasetType = updatedInputs[key]
448
- for ref in refsForDatasetType:
449
- if self._should_assume_exists(quantum, ref):
450
- newRefsForDatasetType.append(ref)
451
- else:
452
- toCheck.append(ref)
453
- if not toCheck:
454
- _LOG.debug(
455
- "Assuming overall input '%s' is present without checks for label=%s dataId=%s.",
456
- key.name,
457
- task_node.label,
458
- quantum.dataId,
459
- )
460
- continue
461
- stored = limited_butler.stored_many(toCheck)
462
- for ref in toCheck:
463
- if stored[ref]:
464
- newRefsForDatasetType.append(ref)
465
- else:
466
- # This should only happen if a predicted intermediate was
467
- # not actually produced upstream, but
468
- # datastore misconfigurations can unfortunately also land
469
- # us here.
470
- _LOG.info("No dataset artifact found for %s", ref)
471
- continue
472
- if len(newRefsForDatasetType) != len(refsForDatasetType):
473
- anyChanges = True
474
- # If we removed any input datasets, let the task check if it has enough
475
- # to proceed and/or prune related datasets that it also doesn't
476
- # need/produce anymore. It will raise NoWorkFound if it can't run,
477
- # which we'll let propagate up. This is exactly what we run during QG
478
- # generation, because a task shouldn't care whether an input is missing
479
- # because some previous task didn't produce it, or because it just
480
- # wasn't there during QG generation.
481
- namedUpdatedInputs = NamedKeyDict[DatasetType, list[DatasetRef]](updatedInputs.items())
482
- helper = AdjustQuantumHelper(namedUpdatedInputs, quantum.outputs)
483
- if anyChanges:
484
- _LOG.debug("Running adjustQuantum for label=%s dataId=%s.", task_node.label, quantum.dataId)
485
- assert quantum.dataId is not None, "Quantum DataId cannot be None"
486
- helper.adjust_in_place(task_node.get_connections(), label=task_node.label, data_id=quantum.dataId)
487
- return Quantum(
488
- taskName=quantum.taskName,
489
- taskClass=quantum.taskClass,
490
- dataId=quantum.dataId,
491
- initInputs=quantum.initInputs,
492
- inputs=helper.inputs,
493
- outputs=helper.outputs,
494
- )
146
+ return super()._updated_quantum_inputs(quantum, task_node, limited_butler=limited_butler)
495
147
 
496
148
  def runQuantum(
497
149
  self,
@@ -502,163 +154,54 @@ class SingleQuantumExecutor(QuantumExecutor):
502
154
  limited_butler: LimitedButler,
503
155
  quantum_id: uuid.UUID | None = None,
504
156
  ) -> tuple[QuantumSuccessCaveats, list[uuid.UUID], ButlerMetrics]:
505
- """Execute task on a single quantum.
506
-
507
- Parameters
508
- ----------
509
- task : `~lsst.pipe.base.PipelineTask`
510
- Task object.
511
- quantum : `~lsst.daf.butler.Quantum`
512
- Single Quantum instance.
513
- task_node : `~lsst.pipe.base.pipeline_graph.TaskNode`
514
- Task definition structure.
515
- limited_butler : `~lsst.daf.butler.LimitedButler`
516
- Butler to use for dataset I/O.
517
- quantum_id : `uuid.UUID` or `None`, optional
518
- ID of the quantum being executed.
519
-
520
- Returns
521
- -------
522
- flags : `QuantumSuccessCaveats`
523
- Flags that describe qualified successes.
524
- ids_put : list[ `uuid.UUID` ]
525
- Record of all the dataset IDs that were written by this quantum
526
- being executed.
527
- metrics : `lsst.daf.butler.ButlerMetrics`
528
- Butler metrics recorded for this quantum.
529
- """
530
- flags = QuantumSuccessCaveats.NO_CAVEATS
531
-
532
- # Create a butler that operates in the context of a quantum
533
- butlerQC = QuantumContext(limited_butler, quantum, resources=self.resources, quantum_id=quantum_id)
534
-
535
- # Get the input and output references for the task
536
- inputRefs, outputRefs = task_node.get_connections().buildDatasetRefs(quantum)
537
-
538
- # Call task runQuantum() method.
539
- try:
540
- with limited_butler.record_metrics() as butler_metrics:
541
- task.runQuantum(butlerQC, inputRefs, outputRefs)
542
- except NoWorkFound as err:
543
- # Not an error, just an early exit.
544
- _LOG.info(
545
- "Task '%s' on quantum %s exited early with no work found: %s.",
546
- task_node.label,
547
- quantum.dataId,
548
- str(err),
549
- )
550
- flags |= err.FLAGS
551
- except AnnotatedPartialOutputsError as caught:
552
- error: BaseException
553
- if caught.__cause__ is None:
554
- _LOG.error(
555
- "Incorrect use of AnnotatedPartialOutputsError: no chained exception found.",
556
- task_node.label,
557
- quantum.dataId,
558
- )
559
- error = caught
560
- else:
561
- error = caught.__cause__
562
- if self.raise_on_partial_outputs:
563
- # Note: this is a real edge case that required some
564
- # experimentation: without 'from None' below, this raise would
565
- # produce a "while one exception was being handled, another was
566
- # raised" traceback involving AnnotatedPartialOutputsError.
567
- # With the 'from None', we get just the error chained to it, as
568
- # desired.
569
- raise error from None
570
- else:
571
- _LOG.error(
572
- "Task '%s' on quantum %s exited with partial outputs; "
573
- "considering this a qualified success and proceeding.",
574
- task_node.label,
575
- quantum.dataId,
576
- )
577
- _LOG.error(error, exc_info=error)
578
- flags |= caught.FLAGS
579
- if not butlerQC.outputsPut:
580
- flags |= QuantumSuccessCaveats.ALL_OUTPUTS_MISSING
581
- if not butlerQC.outputsPut == butlerQC.allOutputs:
582
- flags |= QuantumSuccessCaveats.ANY_OUTPUTS_MISSING
583
- ids_put = [output[2] for output in butlerQC.outputsPut]
584
- return flags, ids_put, butler_metrics
157
+ return super()._run_quantum(
158
+ task, quantum, task_node, limited_butler=limited_butler, quantum_id=quantum_id
159
+ )
585
160
 
586
161
  def writeMetadata(
587
162
  self, quantum: Quantum, metadata: Any, task_node: TaskNode, /, limited_butler: LimitedButler
588
163
  ) -> None:
589
- # DatasetRef has to be in the Quantum outputs, can lookup by name
590
- try:
591
- [ref] = quantum.outputs[task_node.metadata_output.dataset_type_name]
592
- except LookupError as exc:
593
- raise InvalidQuantumError(
594
- "Quantum outputs is missing metadata dataset type "
595
- f"{task_node.metadata_output.dataset_type_name};"
596
- " this could happen due to inconsistent options between QuantumGraph generation"
597
- " and execution"
598
- ) from exc
599
- limited_butler.put(metadata, ref)
164
+ return super()._write_metadata(quantum, metadata, task_node, limited_butler=limited_butler)
600
165
 
601
166
  def initGlobals(self, quantum: Quantum) -> None:
602
- """Initialize global state needed for task execution.
167
+ return super()._init_globals(quantum)
168
+
169
+ @property
170
+ def butler(self) -> Butler | None:
171
+ return self._butler
172
+
173
+ @property
174
+ def taskFactory(self) -> TaskFactory:
175
+ return self._task_factory
176
+
177
+ @property
178
+ def clobberOutputs(self) -> bool:
179
+ return self._clobber_outputs
603
180
 
604
- Parameters
605
- ----------
606
- quantum : `~lsst.daf.butler.Quantum`
607
- Single Quantum instance.
181
+ @property
182
+ def enableLsstDebug(self) -> bool:
183
+ return self._enable_lsst_debug
608
184
 
609
- Notes
610
- -----
611
- There is an issue with initializing filters singleton which is done
612
- by instrument, to avoid requiring tasks to do it in runQuantum()
613
- we do it here when any dataId has an instrument dimension. Also for
614
- now we only allow single instrument, verify that all instrument
615
- names in all dataIds are identical.
185
+ @property
186
+ def limited_butler_factory(self) -> Callable[[Quantum], LimitedButler] | None:
187
+ return self._limited_butler_factory
616
188
 
617
- This will need revision when filter singleton disappears.
618
- """
619
- # can only work for full butler
620
- if self.butler is None:
621
- return
622
- oneInstrument = None
623
- for datasetRefs in chain(quantum.inputs.values(), quantum.outputs.values()):
624
- for datasetRef in datasetRefs:
625
- dataId = datasetRef.dataId
626
- instrument = cast(str, dataId.get("instrument"))
627
- if instrument is not None:
628
- if oneInstrument is not None:
629
- assert ( # type: ignore
630
- instrument == oneInstrument
631
- ), "Currently require that only one instrument is used per graph"
632
- else:
633
- oneInstrument = instrument
634
- Instrument.fromName(instrument, self.butler.registry)
189
+ @property
190
+ def resources(self) -> ExecutionResources | None:
191
+ return self._resources
635
192
 
636
- def _should_assume_exists(self, quantum: Quantum, ref: DatasetRef) -> bool | None:
637
- """Report whether the given dataset can be assumed to exist because
638
- some previous check reported that it did.
193
+ @property
194
+ def assumeNoExistingOutputs(self) -> bool:
195
+ return self._assume_no_existing_outputs
639
196
 
640
- If this is `True` for a dataset does not in fact exist anymore, that's
641
- an unexpected problem that we want to raise as an exception, and
642
- definitely not a case where some predicted output just wasn't produced.
643
- We can't always tell the difference, but in this case we can.
197
+ @property
198
+ def raise_on_partial_outputs(self) -> bool:
199
+ return self._raise_on_partial_outputs
644
200
 
645
- Parameters
646
- ----------
647
- quantum : `Quantum`
648
- Quantum being processed.
649
- ref : `lsst.daf.butler.DatasetRef`
650
- Reference to the input dataset.
201
+ @property
202
+ def job_metadata(self) -> Mapping[str, int | str | float] | None:
203
+ return self._job_metadata
651
204
 
652
- Returns
653
- -------
654
- exists : `bool` or `None`
655
- `True` if this dataset is definitely an overall input, `False` if
656
- some other quantum in the graph is expected to produce it, and
657
- `None` if the answer could not be determined.
658
- """
659
- if quantum.datastore_records:
660
- for datastore_record_data in quantum.datastore_records.values():
661
- if ref.id in datastore_record_data.records:
662
- return True
663
- return False
664
- return None
205
+ @property
206
+ def skipExisting(self) -> bool:
207
+ return self._skip_existing