lsst-ctrl-mpexec 29.2025.2400__py3-none-any.whl → 29.2025.3200__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. lsst/ctrl/mpexec/__init__.py +1 -2
  2. lsst/ctrl/mpexec/cli/butler_factory.py +464 -0
  3. lsst/ctrl/mpexec/cli/cmd/commands.py +7 -1
  4. lsst/ctrl/mpexec/cli/opt/optionGroups.py +0 -13
  5. lsst/ctrl/mpexec/cli/opt/options.py +0 -46
  6. lsst/ctrl/mpexec/cli/script/build.py +49 -36
  7. lsst/ctrl/mpexec/cli/script/pre_exec_init_qbb.py +3 -1
  8. lsst/ctrl/mpexec/cli/script/qgraph.py +0 -25
  9. lsst/ctrl/mpexec/cli/script/run.py +2 -1
  10. lsst/ctrl/mpexec/cli/script/run_qbb.py +2 -1
  11. lsst/ctrl/mpexec/cmdLineFwk.py +30 -556
  12. lsst/ctrl/mpexec/execFixupDataId.py +9 -101
  13. lsst/ctrl/mpexec/executionGraphFixup.py +12 -37
  14. lsst/ctrl/mpexec/log_capture.py +9 -195
  15. lsst/ctrl/mpexec/mpGraphExecutor.py +60 -696
  16. lsst/ctrl/mpexec/quantumGraphExecutor.py +20 -90
  17. lsst/ctrl/mpexec/reports.py +30 -206
  18. lsst/ctrl/mpexec/separablePipelineExecutor.py +12 -263
  19. lsst/ctrl/mpexec/showInfo.py +2 -2
  20. lsst/ctrl/mpexec/simple_pipeline_executor.py +11 -590
  21. lsst/ctrl/mpexec/singleQuantumExecutor.py +75 -532
  22. lsst/ctrl/mpexec/taskFactory.py +12 -38
  23. lsst/ctrl/mpexec/version.py +1 -1
  24. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/METADATA +1 -1
  25. lsst_ctrl_mpexec-29.2025.3200.dist-info/RECORD +51 -0
  26. lsst/ctrl/mpexec/dotTools.py +0 -100
  27. lsst_ctrl_mpexec-29.2025.2400.dist-info/RECORD +0 -51
  28. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/WHEEL +0 -0
  29. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/entry_points.txt +0 -0
  30. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/licenses/COPYRIGHT +0 -0
  31. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/licenses/LICENSE +0 -0
  32. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/licenses/bsd_license.txt +0 -0
  33. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/licenses/gpl-v3.0.txt +0 -0
  34. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/top_level.txt +0 -0
  35. {lsst_ctrl_mpexec-29.2025.2400.dist-info → lsst_ctrl_mpexec-29.2025.3200.dist-info}/zip-safe +0 -0
@@ -25,108 +25,16 @@
25
25
  # You should have received a copy of the GNU General Public License
26
26
  # along with this program. If not, see <http://www.gnu.org/licenses/>.
27
27
 
28
- __all__ = ["ExecutionGraphFixup"]
28
+ __all__ = ("ExecFixupDataId",)
29
29
 
30
- import contextlib
31
- import itertools
32
- from collections import defaultdict
33
- from collections.abc import Sequence
34
- from typing import Any
30
+ import warnings
35
31
 
36
- import networkx as nx
32
+ from lsst.pipe.base.exec_fixup_data_id import ExecFixupDataId
37
33
 
38
- from lsst.pipe.base import QuantumGraph, QuantumNode
34
+ # TODO[DM-51962]: Remove this module.
39
35
 
40
- from .executionGraphFixup import ExecutionGraphFixup
41
-
42
-
43
- class ExecFixupDataId(ExecutionGraphFixup):
44
- """Implementation of ExecutionGraphFixup for ordering of tasks based
45
- on DataId values.
46
-
47
- This class is a trivial implementation mostly useful as an example,
48
- though it can be used to make actual fixup instances by defining
49
- a method that instantiates it, e.g.::
50
-
51
- # lsst/ap/verify/ci_fixup.py
52
-
53
- from lsst.ctrl.mpexec.execFixupDataId import ExecFixupDataId
54
-
55
-
56
- def assoc_fixup():
57
- return ExecFixupDataId(
58
- taskLabel="ap_assoc", dimensions=("visit", "detector")
59
- )
60
-
61
- and then executing pipetask::
62
-
63
- pipetask run --graph-fixup=lsst.ap.verify.ci_fixup.assoc_fixup ...
64
-
65
- This will add new dependencies between quanta executed by the task with
66
- label "ap_assoc". Quanta with higher visit number will depend on quanta
67
- with lower visit number and their execution will wait until lower visit
68
- number finishes.
69
-
70
- Parameters
71
- ----------
72
- taskLabel : `str`
73
- The label of the task for which to add dependencies.
74
- dimensions : `str` or sequence [`str`]
75
- One or more dimension names, quanta execution will be ordered
76
- according to values of these dimensions.
77
- reverse : `bool`, optional
78
- If `False` (default) then quanta with higher values of dimensions
79
- will be executed after quanta with lower values, otherwise the order
80
- is reversed.
81
- """
82
-
83
- def __init__(self, taskLabel: str, dimensions: str | Sequence[str], reverse: bool = False):
84
- self.taskLabel = taskLabel
85
- self.dimensions = dimensions
86
- self.reverse = reverse
87
- if isinstance(self.dimensions, str):
88
- self.dimensions = (self.dimensions,)
89
- else:
90
- self.dimensions = tuple(self.dimensions)
91
-
92
- def _key(self, qnode: QuantumNode) -> tuple[Any, ...]:
93
- """Produce comparison key for quantum data.
94
-
95
- Parameters
96
- ----------
97
- qnode : `QuantumNode`
98
- An individual node in a `~lsst.pipe.base.QuantumGraph`
99
-
100
- Returns
101
- -------
102
- key : `tuple`
103
- """
104
- dataId = qnode.quantum.dataId
105
- assert dataId is not None, "Quantum DataId cannot be None"
106
- key = tuple(dataId[dim] for dim in self.dimensions)
107
- return key
108
-
109
- def fixupQuanta(self, graph: QuantumGraph) -> QuantumGraph:
110
- taskDef = graph.findTaskDefByLabel(self.taskLabel)
111
- if taskDef is None:
112
- raise ValueError(f"Cannot find task with label {self.taskLabel}")
113
- quanta = list(graph.getNodesForTask(taskDef))
114
- keyQuanta = defaultdict(list)
115
- for q in quanta:
116
- key = self._key(q)
117
- keyQuanta[key].append(q)
118
- keys = sorted(keyQuanta.keys(), reverse=self.reverse)
119
- networkGraph = graph.graph
120
-
121
- for prev_key, key in itertools.pairwise(keys):
122
- for prev_node in keyQuanta[prev_key]:
123
- for node in keyQuanta[key]:
124
- # remove any existing edges between the two nodes, but
125
- # don't fail if there are not any. Both directions need
126
- # tried because in a directed graph, order maters
127
- for edge in ((node, prev_node), (prev_node, node)):
128
- with contextlib.suppress(nx.NetworkXException):
129
- networkGraph.remove_edge(*edge)
130
-
131
- networkGraph.add_edge(prev_node, node)
132
- return graph
36
+ warnings.warn(
37
+ "The execFixupDataId module has moved to lsst.pipe.base.exec_fixup_data_id. "
38
+ "This forwarding shim will be removed after v30.",
39
+ category=FutureWarning,
40
+ )
@@ -25,45 +25,20 @@
25
25
  # You should have received a copy of the GNU General Public License
26
26
  # along with this program. If not, see <http://www.gnu.org/licenses/>.
27
27
 
28
- __all__ = ["ExecutionGraphFixup"]
28
+ __all__ = ("ExecutionGraphFixup",)
29
29
 
30
- from abc import ABC, abstractmethod
30
+ from deprecated.sphinx import deprecated
31
31
 
32
- from lsst.pipe.base import QuantumGraph
32
+ import lsst.pipe.base.execution_graph_fixup
33
33
 
34
+ # TODO[DM-51962]: Remove this module.
34
35
 
35
- class ExecutionGraphFixup(ABC):
36
- """Interface for classes which update quantum graphs before execution.
37
36
 
38
- Primary goal of this class is to modify quanta dependencies which may
39
- not be possible to reflect in a quantum graph using standard tools.
40
- One known use case for that is to guarantee particular execution order
41
- of visits in CI jobs for cases when outcome depends on the processing
42
- order of visits (e.g. AP association pipeline).
43
-
44
- Instances of this class receive pre-ordered sequence of quanta
45
- (`~lsst.pipe.base.QuantumGraph` instances) and they are allowed to
46
- modify quanta data in place, for example update ``dependencies`` field to
47
- add additional dependencies. Returned list of quanta will be re-ordered
48
- once again by the graph executor to reflect new dependencies.
49
- """
50
-
51
- @abstractmethod
52
- def fixupQuanta(self, graph: QuantumGraph) -> QuantumGraph:
53
- """Update quanta in a graph.
54
-
55
- Potentially anything in the graph could be changed if it does not
56
- break executor assumptions. If modifications result in a dependency
57
- cycle the executor will raise an exception.
58
-
59
- Parameters
60
- ----------
61
- graph : QuantumGraph
62
- Quantum Graph that will be executed by the executor.
63
-
64
- Returns
65
- -------
66
- graph : QuantumGraph
67
- Modified graph.
68
- """
69
- raise NotImplementedError
37
+ @deprecated(
38
+ "The ExecutionGraphFixup class has moved to lsst.pipe.base.execution_graph_fixup. "
39
+ "This forwarding shim will be removed after v30.",
40
+ version="v30",
41
+ category=FutureWarning,
42
+ )
43
+ class ExecutionGraphFixup(lsst.pipe.base.execution_graph_fixup.ExecutionGraphFixup): # noqa: D101
44
+ pass
@@ -25,202 +25,16 @@
25
25
  # You should have received a copy of the GNU General Public License
26
26
  # along with this program. If not, see <http://www.gnu.org/licenses/>.
27
27
 
28
- from __future__ import annotations
28
+ __all__ = ("LogCapture",)
29
29
 
30
- __all__ = ["LogCapture"]
30
+ import warnings
31
31
 
32
- import logging
33
- import os
34
- import shutil
35
- import tempfile
36
- from collections.abc import Iterator
37
- from contextlib import contextmanager, suppress
38
- from logging import FileHandler
32
+ from lsst.pipe.base.log_capture import LogCapture
39
33
 
40
- from lsst.daf.butler import Butler, FileDataset, LimitedButler, Quantum
41
- from lsst.daf.butler.logging import ButlerLogRecordHandler, ButlerLogRecords, ButlerMDC, JsonLogFormatter
42
- from lsst.pipe.base import InvalidQuantumError
43
- from lsst.pipe.base.pipeline_graph import TaskNode
34
+ # TODO[DM-51962]: Remove this module.
44
35
 
45
- _LOG = logging.getLogger(__name__)
46
-
47
-
48
- class _LogCaptureFlag:
49
- """Simple flag to enable/disable log-to-butler saving."""
50
-
51
- store: bool = True
52
-
53
-
54
- class LogCapture:
55
- """Class handling capture of logging messages and their export to butler.
56
-
57
- Parameters
58
- ----------
59
- butler : `~lsst.daf.butler.LimitedButler`
60
- Data butler with limited API.
61
- full_butler : `~lsst.daf.butler.Butler` or `None`
62
- Data butler with full API, or `None` if full Butler is not available.
63
- If not none, then this must be the same instance as ``butler``.
64
- """
65
-
66
- stream_json_logs = True
67
- """If True each log record is written to a temporary file and ingested
68
- when quantum completes. If False the records are accumulated in memory
69
- and stored in butler on quantum completion. If full butler is not available
70
- then temporary file is not used."""
71
-
72
- def __init__(
73
- self,
74
- butler: LimitedButler,
75
- full_butler: Butler | None,
76
- ):
77
- self.butler = butler
78
- self.full_butler = full_butler
79
-
80
- @classmethod
81
- def from_limited(cls, butler: LimitedButler) -> LogCapture:
82
- return cls(butler, None)
83
-
84
- @classmethod
85
- def from_full(cls, butler: Butler) -> LogCapture:
86
- return cls(butler, butler)
87
-
88
- @contextmanager
89
- def capture_logging(self, task_node: TaskNode, /, quantum: Quantum) -> Iterator[_LogCaptureFlag]:
90
- """Configure logging system to capture logs for execution of this task.
91
-
92
- Parameters
93
- ----------
94
- task_node : `~lsst.pipe.base.pipeline_graph.TaskNode`
95
- The task definition.
96
- quantum : `~lsst.daf.butler.Quantum`
97
- Single Quantum instance.
98
-
99
- Notes
100
- -----
101
- Expected to be used as a context manager to ensure that logging
102
- records are inserted into the butler once the quantum has been
103
- executed:
104
-
105
- .. code-block:: py
106
-
107
- with self.capture_logging(task_node, quantum):
108
- # Run quantum and capture logs.
109
-
110
- Ths method can also setup logging to attach task- or
111
- quantum-specific information to log messages. Potentially this can
112
- take into account some info from task configuration as well.
113
- """
114
- # include quantum dataId and task label into MDC
115
- mdc = {"LABEL": task_node.label, "RUN": ""}
116
- if quantum.dataId:
117
- mdc["LABEL"] += f":{quantum.dataId}"
118
- if self.full_butler is not None:
119
- mdc["RUN"] = self.full_butler.run or ""
120
- ctx = _LogCaptureFlag()
121
- log_dataset_name = (
122
- task_node.log_output.dataset_type_name if task_node.log_output is not None else None
123
- )
124
-
125
- # Add a handler to the root logger to capture execution log output.
126
- if log_dataset_name is not None:
127
- # Either accumulate into ButlerLogRecords or stream JSON records to
128
- # file and ingest that (ingest is possible only with full butler).
129
- if self.stream_json_logs and self.full_butler is not None:
130
- # Create the log file in a temporary directory rather than
131
- # creating a temporary file. This is necessary because
132
- # temporary files are created with restrictive permissions
133
- # and during file ingest these permissions persist in the
134
- # datastore. Using a temp directory allows us to create
135
- # a file with umask default permissions.
136
- tmpdir = tempfile.mkdtemp(prefix="butler-temp-logs-")
137
-
138
- # Construct a file to receive the log records and "touch" it.
139
- log_file = os.path.join(tmpdir, f"butler-log-{task_node.label}.json")
140
- with open(log_file, "w"):
141
- pass
142
- log_handler_file = FileHandler(log_file)
143
- log_handler_file.setFormatter(JsonLogFormatter())
144
- logging.getLogger().addHandler(log_handler_file)
145
-
146
- try:
147
- with ButlerMDC.set_mdc(mdc):
148
- yield ctx
149
- finally:
150
- # Ensure that the logs are stored in butler.
151
- logging.getLogger().removeHandler(log_handler_file)
152
- log_handler_file.close()
153
- if ctx.store:
154
- self._ingest_log_records(quantum, log_dataset_name, log_file)
155
- shutil.rmtree(tmpdir, ignore_errors=True)
156
-
157
- else:
158
- log_handler_memory = ButlerLogRecordHandler()
159
- logging.getLogger().addHandler(log_handler_memory)
160
-
161
- try:
162
- with ButlerMDC.set_mdc(mdc):
163
- yield ctx
164
- finally:
165
- # Ensure that the logs are stored in butler.
166
- logging.getLogger().removeHandler(log_handler_memory)
167
- if ctx.store:
168
- self._store_log_records(quantum, log_dataset_name, log_handler_memory)
169
- log_handler_memory.records.clear()
170
-
171
- else:
172
- with ButlerMDC.set_mdc(mdc):
173
- yield ctx
174
-
175
- def _store_log_records(
176
- self, quantum: Quantum, dataset_type: str, log_handler: ButlerLogRecordHandler
177
- ) -> None:
178
- # DatasetRef has to be in the Quantum outputs, can lookup by name.
179
- try:
180
- [ref] = quantum.outputs[dataset_type]
181
- except LookupError as exc:
182
- raise InvalidQuantumError(
183
- f"Quantum outputs is missing log output dataset type {dataset_type};"
184
- " this could happen due to inconsistent options between QuantumGraph generation"
185
- " and execution"
186
- ) from exc
187
-
188
- self.butler.put(log_handler.records, ref)
189
-
190
- def _ingest_log_records(self, quantum: Quantum, dataset_type: str, filename: str) -> None:
191
- # If we are logging to an external file we must always try to
192
- # close it.
193
- assert self.full_butler is not None, "Expected to have full butler for ingest"
194
- ingested = False
195
- try:
196
- # DatasetRef has to be in the Quantum outputs, can lookup by name.
197
- try:
198
- [ref] = quantum.outputs[dataset_type]
199
- except LookupError as exc:
200
- raise InvalidQuantumError(
201
- f"Quantum outputs is missing log output dataset type {dataset_type};"
202
- " this could happen due to inconsistent options between QuantumGraph generation"
203
- " and execution"
204
- ) from exc
205
-
206
- # Need to ingest this file directly into butler.
207
- dataset = FileDataset(path=filename, refs=ref)
208
- try:
209
- self.full_butler.ingest(dataset, transfer="move")
210
- ingested = True
211
- except NotImplementedError:
212
- # Some datastores can't receive files (e.g. in-memory datastore
213
- # when testing), we store empty list for those just to have a
214
- # dataset. Alternative is to read the file as a
215
- # ButlerLogRecords object and put it.
216
- _LOG.info(
217
- "Log records could not be stored in this butler because the"
218
- " datastore can not ingest files, empty record list is stored instead."
219
- )
220
- records = ButlerLogRecords.from_records([])
221
- self.full_butler.put(records, ref)
222
- finally:
223
- # remove file if it is not ingested
224
- if not ingested:
225
- with suppress(OSError):
226
- os.remove(filename)
36
+ warnings.warn(
37
+ "The log_capture module has moved to lsst.pipe.base.log_capture. "
38
+ "This forwarding shim will be removed after v30.",
39
+ category=FutureWarning,
40
+ )