lsst-pipe-base 30.0.1rc1__py3-none-any.whl → 30.2025.5100__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lsst/pipe/base/_instrument.py +20 -31
- lsst/pipe/base/_quantumContext.py +3 -3
- lsst/pipe/base/_status.py +10 -43
- lsst/pipe/base/_task_metadata.py +2 -2
- lsst/pipe/base/all_dimensions_quantum_graph_builder.py +3 -8
- lsst/pipe/base/automatic_connection_constants.py +1 -20
- lsst/pipe/base/cli/cmd/__init__.py +2 -18
- lsst/pipe/base/cli/cmd/commands.py +4 -149
- lsst/pipe/base/connectionTypes.py +160 -72
- lsst/pipe/base/connections.py +9 -6
- lsst/pipe/base/execution_reports.py +5 -0
- lsst/pipe/base/graph/graph.py +10 -11
- lsst/pipe/base/graph/quantumNode.py +4 -4
- lsst/pipe/base/graph_walker.py +10 -8
- lsst/pipe/base/log_capture.py +80 -40
- lsst/pipe/base/mp_graph_executor.py +15 -51
- lsst/pipe/base/pipeline.py +6 -5
- lsst/pipe/base/pipelineIR.py +8 -2
- lsst/pipe/base/pipelineTask.py +7 -5
- lsst/pipe/base/pipeline_graph/_dataset_types.py +2 -2
- lsst/pipe/base/pipeline_graph/_edges.py +22 -32
- lsst/pipe/base/pipeline_graph/_mapping_views.py +7 -4
- lsst/pipe/base/pipeline_graph/_pipeline_graph.py +7 -14
- lsst/pipe/base/pipeline_graph/expressions.py +2 -2
- lsst/pipe/base/pipeline_graph/io.py +10 -7
- lsst/pipe/base/pipeline_graph/visualization/_dot.py +12 -13
- lsst/pipe/base/pipeline_graph/visualization/_layout.py +18 -16
- lsst/pipe/base/pipeline_graph/visualization/_merge.py +7 -4
- lsst/pipe/base/pipeline_graph/visualization/_printer.py +10 -10
- lsst/pipe/base/pipeline_graph/visualization/_status_annotator.py +0 -7
- lsst/pipe/base/prerequisite_helpers.py +1 -2
- lsst/pipe/base/quantum_graph/_common.py +20 -19
- lsst/pipe/base/quantum_graph/_multiblock.py +31 -37
- lsst/pipe/base/quantum_graph/_predicted.py +13 -111
- lsst/pipe/base/quantum_graph/_provenance.py +45 -1136
- lsst/pipe/base/quantum_graph/aggregator/__init__.py +1 -0
- lsst/pipe/base/quantum_graph/aggregator/_communicators.py +289 -204
- lsst/pipe/base/quantum_graph/aggregator/_config.py +9 -87
- lsst/pipe/base/quantum_graph/aggregator/_ingester.py +12 -13
- lsst/pipe/base/quantum_graph/aggregator/_scanner.py +235 -49
- lsst/pipe/base/quantum_graph/aggregator/_structs.py +116 -6
- lsst/pipe/base/quantum_graph/aggregator/_supervisor.py +39 -29
- lsst/pipe/base/quantum_graph/aggregator/_writer.py +351 -34
- lsst/pipe/base/quantum_graph/visualization.py +1 -5
- lsst/pipe/base/quantum_graph_builder.py +8 -21
- lsst/pipe/base/quantum_graph_executor.py +13 -116
- lsst/pipe/base/quantum_graph_skeleton.py +29 -31
- lsst/pipe/base/quantum_provenance_graph.py +12 -29
- lsst/pipe/base/separable_pipeline_executor.py +3 -19
- lsst/pipe/base/single_quantum_executor.py +42 -67
- lsst/pipe/base/struct.py +0 -4
- lsst/pipe/base/testUtils.py +3 -3
- lsst/pipe/base/tests/mocks/_storage_class.py +1 -2
- lsst/pipe/base/version.py +1 -1
- {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5100.dist-info}/METADATA +3 -3
- lsst_pipe_base-30.2025.5100.dist-info/RECORD +125 -0
- {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5100.dist-info}/WHEEL +1 -1
- lsst/pipe/base/log_on_close.py +0 -76
- lsst/pipe/base/quantum_graph/aggregator/_workers.py +0 -303
- lsst/pipe/base/quantum_graph/formatter.py +0 -171
- lsst/pipe/base/quantum_graph/ingest_graph.py +0 -413
- lsst_pipe_base-30.0.1rc1.dist-info/RECORD +0 -129
- {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5100.dist-info}/entry_points.txt +0 -0
- {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5100.dist-info}/licenses/COPYRIGHT +0 -0
- {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5100.dist-info}/licenses/LICENSE +0 -0
- {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5100.dist-info}/licenses/bsd_license.txt +0 -0
- {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5100.dist-info}/licenses/gpl-v3.0.txt +0 -0
- {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5100.dist-info}/top_level.txt +0 -0
- {lsst_pipe_base-30.0.1rc1.dist-info → lsst_pipe_base-30.2025.5100.dist-info}/zip-safe +0 -0
|
@@ -27,113 +27,23 @@
|
|
|
27
27
|
|
|
28
28
|
from __future__ import annotations
|
|
29
29
|
|
|
30
|
-
__all__ = ["
|
|
30
|
+
__all__ = ["QuantumExecutor", "QuantumGraphExecutor"]
|
|
31
31
|
|
|
32
32
|
from abc import ABC, abstractmethod
|
|
33
|
-
from typing import TYPE_CHECKING
|
|
34
|
-
|
|
35
|
-
from lsst.daf.butler import Quantum
|
|
33
|
+
from typing import TYPE_CHECKING
|
|
36
34
|
|
|
37
35
|
from .quantum_reports import QuantumReport, Report
|
|
38
36
|
|
|
39
37
|
if TYPE_CHECKING:
|
|
40
38
|
import uuid
|
|
41
39
|
|
|
42
|
-
from lsst.daf.butler
|
|
40
|
+
from lsst.daf.butler import Quantum
|
|
43
41
|
|
|
44
|
-
from ._task_metadata import TaskMetadata
|
|
45
42
|
from .graph import QuantumGraph
|
|
46
43
|
from .pipeline_graph import TaskNode
|
|
47
44
|
from .quantum_graph import PredictedQuantumGraph
|
|
48
45
|
|
|
49
46
|
|
|
50
|
-
class QuantumExecutionResult(tuple[Quantum, QuantumReport | None]):
|
|
51
|
-
"""A result struct that captures information about a single quantum's
|
|
52
|
-
execution.
|
|
53
|
-
|
|
54
|
-
Parameters
|
|
55
|
-
----------
|
|
56
|
-
quantum : `lsst.daf.butler.Quantum`
|
|
57
|
-
Quantum that was executed.
|
|
58
|
-
report : `.quantum_reports.QuantumReport`
|
|
59
|
-
Report with basic information about the execution.
|
|
60
|
-
task_metadata : `TaskMetadata`, optional
|
|
61
|
-
Metadata saved by the task and executor during execution.
|
|
62
|
-
skipped_existing : `bool`, optional
|
|
63
|
-
If `True`, this quantum was not executed because it appeared to have
|
|
64
|
-
already been executed successfully.
|
|
65
|
-
adjusted_no_work : `bool`, optional
|
|
66
|
-
If `True`, this quantum was not executed because the
|
|
67
|
-
`PipelineTaskConnections.adjustQuanta` hook raised `NoWorkFound`.
|
|
68
|
-
|
|
69
|
-
Notes
|
|
70
|
-
-----
|
|
71
|
-
For backwards compatibility, this class is a two-element tuple that allows
|
|
72
|
-
the ``quantum`` and ``report`` attributes to be unpacked. Additional
|
|
73
|
-
regular attributes may be added by executors (but the tuple must remain
|
|
74
|
-
only two elements to enable the current unpacking interface).
|
|
75
|
-
"""
|
|
76
|
-
|
|
77
|
-
def __new__(
|
|
78
|
-
cls,
|
|
79
|
-
quantum: Quantum,
|
|
80
|
-
report: QuantumReport | None,
|
|
81
|
-
*,
|
|
82
|
-
task_metadata: TaskMetadata | None = None,
|
|
83
|
-
skipped_existing: bool | None = None,
|
|
84
|
-
adjusted_no_work: bool | None = None,
|
|
85
|
-
) -> Self:
|
|
86
|
-
return super().__new__(cls, (quantum, report))
|
|
87
|
-
|
|
88
|
-
# We need to define both __init__ and __new__ because tuple inheritance
|
|
89
|
-
# requires __new__ and numpydoc requires __init__.
|
|
90
|
-
|
|
91
|
-
def __init__(
|
|
92
|
-
self,
|
|
93
|
-
quantum: Quantum,
|
|
94
|
-
report: QuantumReport | None,
|
|
95
|
-
*,
|
|
96
|
-
task_metadata: TaskMetadata | None = None,
|
|
97
|
-
skipped_existing: bool | None = None,
|
|
98
|
-
adjusted_no_work: bool | None = None,
|
|
99
|
-
):
|
|
100
|
-
self._task_metadata = task_metadata
|
|
101
|
-
self._skipped_existing = skipped_existing
|
|
102
|
-
self._adjusted_no_work = adjusted_no_work
|
|
103
|
-
|
|
104
|
-
@property
|
|
105
|
-
def quantum(self) -> Quantum:
|
|
106
|
-
"""The quantum actually executed."""
|
|
107
|
-
return self[0]
|
|
108
|
-
|
|
109
|
-
@property
|
|
110
|
-
def report(self) -> QuantumReport | None:
|
|
111
|
-
"""Structure describing the status of the execution of a quantum.
|
|
112
|
-
|
|
113
|
-
This is `None` if the implementation does not support this feature.
|
|
114
|
-
"""
|
|
115
|
-
return self[1]
|
|
116
|
-
|
|
117
|
-
@property
|
|
118
|
-
def task_metadata(self) -> TaskMetadata | None:
|
|
119
|
-
"""Metadata saved by the task and executor during execution."""
|
|
120
|
-
return self._task_metadata
|
|
121
|
-
|
|
122
|
-
@property
|
|
123
|
-
def skipped_existing(self) -> bool | None:
|
|
124
|
-
"""If `True`, this quantum was not executed because it appeared to have
|
|
125
|
-
already been executed successfully.
|
|
126
|
-
"""
|
|
127
|
-
return self._skipped_existing
|
|
128
|
-
|
|
129
|
-
@property
|
|
130
|
-
def adjusted_no_work(self) -> bool | None:
|
|
131
|
-
"""If `True`, this quantum was not executed because the
|
|
132
|
-
`PipelineTaskConnections.adjustQuanta` hook raised `NoWorkFound`.
|
|
133
|
-
"""
|
|
134
|
-
return self._adjusted_no_work
|
|
135
|
-
|
|
136
|
-
|
|
137
47
|
class QuantumExecutor(ABC):
|
|
138
48
|
"""Class which abstracts execution of a single Quantum.
|
|
139
49
|
|
|
@@ -145,14 +55,8 @@ class QuantumExecutor(ABC):
|
|
|
145
55
|
|
|
146
56
|
@abstractmethod
|
|
147
57
|
def execute(
|
|
148
|
-
self,
|
|
149
|
-
|
|
150
|
-
/,
|
|
151
|
-
quantum: Quantum,
|
|
152
|
-
quantum_id: uuid.UUID | None = None,
|
|
153
|
-
*,
|
|
154
|
-
log_records: ButlerLogRecords | None = None,
|
|
155
|
-
) -> QuantumExecutionResult:
|
|
58
|
+
self, task_node: TaskNode, /, quantum: Quantum, quantum_id: uuid.UUID | None = None
|
|
59
|
+
) -> tuple[Quantum, QuantumReport | None]:
|
|
156
60
|
"""Execute single quantum.
|
|
157
61
|
|
|
158
62
|
Parameters
|
|
@@ -163,18 +67,15 @@ class QuantumExecutor(ABC):
|
|
|
163
67
|
Quantum for this execution.
|
|
164
68
|
quantum_id : `uuid.UUID` or `None`, optional
|
|
165
69
|
The ID of the quantum to be executed.
|
|
166
|
-
log_records : `lsst.daf.butler.ButlerLogRecords`, optional
|
|
167
|
-
Container that should be used to store logs in memory before
|
|
168
|
-
writing them to the butler. This disables streaming log (since
|
|
169
|
-
we'd have to store them in memory anyway), but it permits the
|
|
170
|
-
caller to prepend logs to be stored in the butler and allows task
|
|
171
|
-
logs to be inspected by the caller after execution is complete.
|
|
172
70
|
|
|
173
71
|
Returns
|
|
174
72
|
-------
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
73
|
+
quantum : `~lsst.daf.butler.Quantum`
|
|
74
|
+
The quantum actually executed.
|
|
75
|
+
report : `~.quantum_reports.QuantumReport`
|
|
76
|
+
Structure describing the status of the execution of a quantum.
|
|
77
|
+
`None` is returned if implementation does not support this
|
|
78
|
+
feature.
|
|
178
79
|
|
|
179
80
|
Notes
|
|
180
81
|
-----
|
|
@@ -192,9 +93,7 @@ class QuantumGraphExecutor(ABC):
|
|
|
192
93
|
"""
|
|
193
94
|
|
|
194
95
|
@abstractmethod
|
|
195
|
-
def execute(
|
|
196
|
-
self, graph: QuantumGraph | PredictedQuantumGraph, *, provenance_graph_file: str | None = None
|
|
197
|
-
) -> None:
|
|
96
|
+
def execute(self, graph: QuantumGraph | PredictedQuantumGraph) -> None:
|
|
198
97
|
"""Execute whole graph.
|
|
199
98
|
|
|
200
99
|
Implementation of this method depends on particular execution model
|
|
@@ -204,10 +103,8 @@ class QuantumGraphExecutor(ABC):
|
|
|
204
103
|
|
|
205
104
|
Parameters
|
|
206
105
|
----------
|
|
207
|
-
graph : `.QuantumGraph`
|
|
106
|
+
graph : `.QuantumGraph`
|
|
208
107
|
Execution graph.
|
|
209
|
-
provenance_graph_file : `str`, optional
|
|
210
|
-
A filename to write provenance to.
|
|
211
108
|
"""
|
|
212
109
|
raise NotImplementedError()
|
|
213
110
|
|
|
@@ -42,7 +42,7 @@ __all__ = (
|
|
|
42
42
|
import dataclasses
|
|
43
43
|
from collections import defaultdict
|
|
44
44
|
from collections.abc import Iterable, Iterator, MutableMapping, Set
|
|
45
|
-
from typing import TYPE_CHECKING, Any, ClassVar, Literal
|
|
45
|
+
from typing import TYPE_CHECKING, Any, ClassVar, Literal, TypeAlias
|
|
46
46
|
|
|
47
47
|
import networkx
|
|
48
48
|
|
|
@@ -145,7 +145,7 @@ class PrerequisiteDatasetKey:
|
|
|
145
145
|
is_prerequisite: ClassVar[Literal[True]] = True
|
|
146
146
|
|
|
147
147
|
|
|
148
|
-
|
|
148
|
+
Key: TypeAlias = QuantumKey | TaskInitKey | DatasetKey | PrerequisiteDatasetKey
|
|
149
149
|
|
|
150
150
|
|
|
151
151
|
class QuantumGraphSkeleton:
|
|
@@ -571,13 +571,12 @@ class QuantumGraphSkeleton:
|
|
|
571
571
|
def set_dataset_ref(
|
|
572
572
|
self, ref: DatasetRef, key: DatasetKey | PrerequisiteDatasetKey | None = None
|
|
573
573
|
) -> None:
|
|
574
|
-
"""Associate a dataset node with a
|
|
575
|
-
instance.
|
|
574
|
+
"""Associate a dataset node with a `DatasetRef` instance.
|
|
576
575
|
|
|
577
576
|
Parameters
|
|
578
577
|
----------
|
|
579
|
-
ref :
|
|
580
|
-
|
|
578
|
+
ref : `DatasetRef`
|
|
579
|
+
`DatasetRef` to associate with the node.
|
|
581
580
|
key : `DatasetKey` or `PrerequisiteDatasetKey`, optional
|
|
582
581
|
Identifier for the graph node. If not provided, a `DatasetKey`
|
|
583
582
|
is constructed from the dataset type name and data ID of ``ref``.
|
|
@@ -587,33 +586,32 @@ class QuantumGraphSkeleton:
|
|
|
587
586
|
self._xgraph.nodes[key]["ref"] = ref
|
|
588
587
|
|
|
589
588
|
def set_output_for_skip(self, ref: DatasetRef) -> None:
|
|
590
|
-
"""Associate a dataset node with a
|
|
591
|
-
|
|
592
|
-
|
|
589
|
+
"""Associate a dataset node with a `DatasetRef` that represents an
|
|
590
|
+
existing output in a collection where such outputs can cause a quantum
|
|
591
|
+
to be skipped.
|
|
593
592
|
|
|
594
593
|
Parameters
|
|
595
594
|
----------
|
|
596
|
-
ref :
|
|
597
|
-
|
|
595
|
+
ref : `DatasetRef`
|
|
596
|
+
`DatasetRef` to associate with the node.
|
|
598
597
|
"""
|
|
599
598
|
key = DatasetKey(ref.datasetType.name, ref.dataId.required_values)
|
|
600
599
|
self._xgraph.nodes[key]["output_for_skip"] = ref
|
|
601
600
|
|
|
602
601
|
def set_output_in_the_way(self, ref: DatasetRef) -> None:
|
|
603
|
-
"""Associate a dataset node with a
|
|
604
|
-
|
|
602
|
+
"""Associate a dataset node with a `DatasetRef` that represents an
|
|
603
|
+
existing output in the output RUN collection.
|
|
605
604
|
|
|
606
605
|
Parameters
|
|
607
606
|
----------
|
|
608
|
-
ref :
|
|
609
|
-
|
|
607
|
+
ref : `DatasetRef`
|
|
608
|
+
`DatasetRef` to associate with the node.
|
|
610
609
|
"""
|
|
611
610
|
key = DatasetKey(ref.datasetType.name, ref.dataId.required_values)
|
|
612
611
|
self._xgraph.nodes[key]["output_in_the_way"] = ref
|
|
613
612
|
|
|
614
613
|
def get_dataset_ref(self, key: DatasetKey | PrerequisiteDatasetKey) -> DatasetRef | None:
|
|
615
|
-
"""Return the
|
|
616
|
-
node.
|
|
614
|
+
"""Return the `DatasetRef` associated with the given node.
|
|
617
615
|
|
|
618
616
|
This does not return "output for skip" and "output in the way"
|
|
619
617
|
datasets.
|
|
@@ -625,14 +623,14 @@ class QuantumGraphSkeleton:
|
|
|
625
623
|
|
|
626
624
|
Returns
|
|
627
625
|
-------
|
|
628
|
-
ref :
|
|
626
|
+
ref : `DatasetRef` or `None`
|
|
629
627
|
Dataset reference associated with the node.
|
|
630
628
|
"""
|
|
631
629
|
return self._xgraph.nodes[key].get("ref")
|
|
632
630
|
|
|
633
631
|
def get_output_for_skip(self, key: DatasetKey) -> DatasetRef | None:
|
|
634
|
-
"""Return the
|
|
635
|
-
|
|
632
|
+
"""Return the `DatasetRef` associated with the given node in a
|
|
633
|
+
collection where it could lead to a quantum being skipped.
|
|
636
634
|
|
|
637
635
|
Parameters
|
|
638
636
|
----------
|
|
@@ -641,14 +639,14 @@ class QuantumGraphSkeleton:
|
|
|
641
639
|
|
|
642
640
|
Returns
|
|
643
641
|
-------
|
|
644
|
-
ref :
|
|
642
|
+
ref : `DatasetRef` or `None`
|
|
645
643
|
Dataset reference associated with the node.
|
|
646
644
|
"""
|
|
647
645
|
return self._xgraph.nodes[key].get("output_for_skip")
|
|
648
646
|
|
|
649
647
|
def get_output_in_the_way(self, key: DatasetKey) -> DatasetRef | None:
|
|
650
|
-
"""Return the
|
|
651
|
-
|
|
648
|
+
"""Return the `DatasetRef` associated with the given node in the
|
|
649
|
+
output RUN collection.
|
|
652
650
|
|
|
653
651
|
Parameters
|
|
654
652
|
----------
|
|
@@ -657,16 +655,16 @@ class QuantumGraphSkeleton:
|
|
|
657
655
|
|
|
658
656
|
Returns
|
|
659
657
|
-------
|
|
660
|
-
ref :
|
|
658
|
+
ref : `DatasetRef` or `None`
|
|
661
659
|
Dataset reference associated with the node.
|
|
662
660
|
"""
|
|
663
661
|
return self._xgraph.nodes[key].get("output_in_the_way")
|
|
664
662
|
|
|
665
663
|
def discard_output_in_the_way(self, key: DatasetKey) -> None:
|
|
666
|
-
"""Drop any
|
|
667
|
-
|
|
664
|
+
"""Drop any `DatasetRef` associated with this node in the output RUN
|
|
665
|
+
collection.
|
|
668
666
|
|
|
669
|
-
Does nothing if there is no such
|
|
667
|
+
Does nothing if there is no such `DatasetRef`.
|
|
670
668
|
|
|
671
669
|
Parameters
|
|
672
670
|
----------
|
|
@@ -678,8 +676,8 @@ class QuantumGraphSkeleton:
|
|
|
678
676
|
def set_data_id(self, key: Key, data_id: DataCoordinate) -> None:
|
|
679
677
|
"""Set the data ID associated with a node.
|
|
680
678
|
|
|
681
|
-
This updates the data ID in any
|
|
682
|
-
|
|
679
|
+
This updates the data ID in any `DatasetRef` objects associated with
|
|
680
|
+
the node via `set_ref`, `set_output_for_skip`, or
|
|
683
681
|
`set_output_in_the_way` as well, assuming it is an expanded version
|
|
684
682
|
of the original data ID.
|
|
685
683
|
|
|
@@ -687,7 +685,7 @@ class QuantumGraphSkeleton:
|
|
|
687
685
|
----------
|
|
688
686
|
key : `Key`
|
|
689
687
|
Identifier for the graph node.
|
|
690
|
-
data_id :
|
|
688
|
+
data_id : `DataCoordinate`
|
|
691
689
|
Data ID for the node.
|
|
692
690
|
"""
|
|
693
691
|
state: MutableMapping[str, Any] = self._xgraph.nodes[key]
|
|
@@ -712,7 +710,7 @@ class QuantumGraphSkeleton:
|
|
|
712
710
|
|
|
713
711
|
Returns
|
|
714
712
|
-------
|
|
715
|
-
data_id :
|
|
713
|
+
data_id : `DataCoordinate`
|
|
716
714
|
Expanded data ID for the node, if one is available.
|
|
717
715
|
|
|
718
716
|
Raises
|
|
@@ -79,7 +79,6 @@ from .automatic_connection_constants import (
|
|
|
79
79
|
METADATA_OUTPUT_CONNECTION_NAME,
|
|
80
80
|
METADATA_OUTPUT_STORAGE_CLASS,
|
|
81
81
|
METADATA_OUTPUT_TEMPLATE,
|
|
82
|
-
PROVENANCE_DATASET_TYPE_NAME,
|
|
83
82
|
)
|
|
84
83
|
from .graph import QuantumGraph, QuantumNode
|
|
85
84
|
|
|
@@ -587,8 +586,8 @@ class TaskSummary(pydantic.BaseModel):
|
|
|
587
586
|
|
|
588
587
|
Unpack the `QuantumInfo` object, sorting quanta of each status into
|
|
589
588
|
the correct place in the `TaskSummary`. If looking for error messages
|
|
590
|
-
in the `
|
|
591
|
-
|
|
589
|
+
in the `Butler` logs is desired, take special care to catch issues
|
|
590
|
+
with missing logs.
|
|
592
591
|
|
|
593
592
|
Parameters
|
|
594
593
|
----------
|
|
@@ -867,7 +866,7 @@ class DatasetTypeSummary(pydantic.BaseModel):
|
|
|
867
866
|
class Summary(pydantic.BaseModel):
|
|
868
867
|
"""A summary of the contents of the QuantumProvenanceGraph, including
|
|
869
868
|
all information on the quanta for each task and the datasets of each
|
|
870
|
-
|
|
869
|
+
`DatasetType`.
|
|
871
870
|
"""
|
|
872
871
|
|
|
873
872
|
tasks: dict[str, TaskSummary] = pydantic.Field(default_factory=dict)
|
|
@@ -886,7 +885,7 @@ class Summary(pydantic.BaseModel):
|
|
|
886
885
|
|
|
887
886
|
Parameters
|
|
888
887
|
----------
|
|
889
|
-
summaries :
|
|
888
|
+
summaries : `Sequence[Summary]`
|
|
890
889
|
Sequence of all `Summary` objects to aggregate.
|
|
891
890
|
"""
|
|
892
891
|
result = cls()
|
|
@@ -1246,8 +1245,8 @@ class QuantumProvenanceGraph:
|
|
|
1246
1245
|
Returns
|
|
1247
1246
|
-------
|
|
1248
1247
|
dataset_info : `DatasetInfo`
|
|
1249
|
-
The `TypedDict` with information about the
|
|
1250
|
-
|
|
1248
|
+
The `TypedDict` with information about the `DatasetType`-dataID
|
|
1249
|
+
pair across all runs.
|
|
1251
1250
|
"""
|
|
1252
1251
|
return self._xgraph.nodes[key]
|
|
1253
1252
|
|
|
@@ -1263,7 +1262,6 @@ class QuantumProvenanceGraph:
|
|
|
1263
1262
|
do_store_logs : `bool`
|
|
1264
1263
|
Store the logs in the summary dictionary.
|
|
1265
1264
|
n_cores : `int`, optional
|
|
1266
|
-
Number of cores to use.
|
|
1267
1265
|
|
|
1268
1266
|
Returns
|
|
1269
1267
|
-------
|
|
@@ -1515,22 +1513,8 @@ class QuantumProvenanceGraph:
|
|
|
1515
1513
|
len(self._datasets.keys()),
|
|
1516
1514
|
)
|
|
1517
1515
|
if use_qbb:
|
|
1518
|
-
|
|
1519
|
-
|
|
1520
|
-
provenance_graph_ref = butler.find_dataset(
|
|
1521
|
-
PROVENANCE_DATASET_TYPE_NAME, collections=output_run
|
|
1522
|
-
)
|
|
1523
|
-
except MissingDatasetTypeError:
|
|
1524
|
-
pass
|
|
1525
|
-
if provenance_graph_ref is not None:
|
|
1526
|
-
_LOG.warning(
|
|
1527
|
-
"Cannot use QBB for metadata/log reads after provenance has been ingested; "
|
|
1528
|
-
"falling back to full butler."
|
|
1529
|
-
)
|
|
1530
|
-
self._butler_wrappers[output_run] = _ThreadLocalButlerWrapper.wrap_full(butler)
|
|
1531
|
-
else:
|
|
1532
|
-
_LOG.verbose("Using quantum-backed butler for metadata loads.")
|
|
1533
|
-
self._butler_wrappers[output_run] = _ThreadLocalButlerWrapper.wrap_qbb(butler, qgraph)
|
|
1516
|
+
_LOG.verbose("Using quantum-backed butler for metadata loads.")
|
|
1517
|
+
self._butler_wrappers[output_run] = _ThreadLocalButlerWrapper.wrap_qbb(butler, qgraph)
|
|
1534
1518
|
else:
|
|
1535
1519
|
_LOG.verbose("Using full butler for metadata loads.")
|
|
1536
1520
|
self._butler_wrappers[output_run] = _ThreadLocalButlerWrapper.wrap_full(butler)
|
|
@@ -1793,10 +1777,9 @@ class QuantumProvenanceGraph:
|
|
|
1793
1777
|
successes. If "exhaustive", all metadata files will be read. If
|
|
1794
1778
|
"lazy", only metadata files where at least one predicted output is
|
|
1795
1779
|
missing will be read.
|
|
1796
|
-
|
|
1797
|
-
The
|
|
1798
|
-
|
|
1799
|
-
Current list of futures. Will be modified.
|
|
1780
|
+
butler : `lsst.daf.butler.Butler`
|
|
1781
|
+
The Butler used for this report. This should match the Butler
|
|
1782
|
+
used for the run associated with the executed quantum graph.
|
|
1800
1783
|
"""
|
|
1801
1784
|
if read_caveats == "lazy" and all(
|
|
1802
1785
|
self.get_dataset_info(dataset_key)["runs"][output_run].produced
|
|
@@ -2011,7 +1994,7 @@ class _ThreadLocalButlerWrapper:
|
|
|
2011
1994
|
full_butler : `~lsst.daf.butler.Butler`
|
|
2012
1995
|
Full butler to draw datastore and dimension configuration from.
|
|
2013
1996
|
qg : `QuantumGraph`
|
|
2014
|
-
Quantum graph
|
|
1997
|
+
Quantum graph,
|
|
2015
1998
|
|
|
2016
1999
|
Returns
|
|
2017
2000
|
-------
|
|
@@ -40,8 +40,7 @@ from collections.abc import Iterable
|
|
|
40
40
|
from typing import Any
|
|
41
41
|
|
|
42
42
|
import lsst.resources
|
|
43
|
-
from lsst.daf.butler import Butler
|
|
44
|
-
from lsst.daf.butler._rubin.temporary_for_ingest import TemporaryForIngest
|
|
43
|
+
from lsst.daf.butler import Butler
|
|
45
44
|
|
|
46
45
|
from ._quantumContext import ExecutionResources
|
|
47
46
|
from .all_dimensions_quantum_graph_builder import AllDimensionsQuantumGraphBuilder
|
|
@@ -79,7 +78,7 @@ class SeparablePipelineExecutor:
|
|
|
79
78
|
clobber_output : `bool`, optional
|
|
80
79
|
If set, the pipeline execution overwrites existing output files.
|
|
81
80
|
Otherwise, any conflict between existing and new outputs is an error.
|
|
82
|
-
skip_existing_in :
|
|
81
|
+
skip_existing_in : iterable [`str`], optional
|
|
83
82
|
If not empty, the pipeline execution searches the listed collections
|
|
84
83
|
for existing outputs, and skips any quanta that have run to completion
|
|
85
84
|
(or have no work to do). Otherwise, all tasks are attempted (subject to
|
|
@@ -363,8 +362,6 @@ class SeparablePipelineExecutor:
|
|
|
363
362
|
fail_fast: bool = False,
|
|
364
363
|
graph_executor: QuantumGraphExecutor | None = None,
|
|
365
364
|
num_proc: int = 1,
|
|
366
|
-
*,
|
|
367
|
-
provenance_dataset_ref: DatasetRef | None = None,
|
|
368
365
|
) -> None:
|
|
369
366
|
"""Run a pipeline in the form of a prepared quantum graph.
|
|
370
367
|
|
|
@@ -387,14 +384,6 @@ class SeparablePipelineExecutor:
|
|
|
387
384
|
The number of processes that can be used to run the pipeline. The
|
|
388
385
|
default value ensures that no subprocess is created. Only used with
|
|
389
386
|
the default graph executor.
|
|
390
|
-
provenance_dataset_ref : `lsst.daf.butler.DatasetRef`, optional
|
|
391
|
-
Dataset that should be used to save provenance. Provenance is only
|
|
392
|
-
supported when running in a single process (at least for the
|
|
393
|
-
default quantum executor), and should not be used with
|
|
394
|
-
``skip_existing_in=[output_run]`` when retrying a previous
|
|
395
|
-
execution attempt. The caller is responsible for registering the
|
|
396
|
-
dataset type and for ensuring that the dimensions of this dataset
|
|
397
|
-
do not lead to uniqueness conflicts.
|
|
398
387
|
"""
|
|
399
388
|
if not graph_executor:
|
|
400
389
|
quantum_executor = SingleQuantumExecutor(
|
|
@@ -415,9 +404,4 @@ class SeparablePipelineExecutor:
|
|
|
415
404
|
# forked processes.
|
|
416
405
|
self._butler.registry.resetConnectionPool()
|
|
417
406
|
|
|
418
|
-
|
|
419
|
-
with TemporaryForIngest(self._butler, provenance_dataset_ref) as temporary:
|
|
420
|
-
graph_executor.execute(graph, provenance_graph_file=temporary.ospath)
|
|
421
|
-
temporary.ingest()
|
|
422
|
-
else:
|
|
423
|
-
graph_executor.execute(graph)
|
|
407
|
+
graph_executor.execute(graph)
|