lsst-ctrl-mpexec 29.2025.3400__py3-none-any.whl → 29.2025.3600__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lsst/ctrl/mpexec/__init__.py +0 -1
- lsst/ctrl/mpexec/cli/butler_factory.py +253 -95
- lsst/ctrl/mpexec/cli/cmd/commands.py +2 -2
- lsst/ctrl/mpexec/cli/opt/optionGroups.py +0 -1
- lsst/ctrl/mpexec/cli/opt/options.py +0 -7
- lsst/ctrl/mpexec/cli/script/pre_exec_init_qbb.py +25 -12
- lsst/ctrl/mpexec/cli/script/qgraph.py +177 -89
- lsst/ctrl/mpexec/cli/script/run.py +211 -99
- lsst/ctrl/mpexec/cli/script/run_qbb.py +166 -31
- lsst/ctrl/mpexec/cli/utils.py +49 -0
- lsst/ctrl/mpexec/showInfo.py +17 -15
- lsst/ctrl/mpexec/version.py +1 -1
- {lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/METADATA +1 -1
- {lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/RECORD +22 -23
- lsst/ctrl/mpexec/cmdLineFwk.py +0 -534
- {lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/WHEEL +0 -0
- {lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/entry_points.txt +0 -0
- {lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/licenses/COPYRIGHT +0 -0
- {lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/licenses/LICENSE +0 -0
- {lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/licenses/bsd_license.txt +0 -0
- {lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/licenses/gpl-v3.0.txt +0 -0
- {lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/top_level.txt +0 -0
- {lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/zip-safe +0 -0
lsst/ctrl/mpexec/cmdLineFwk.py
DELETED
|
@@ -1,534 +0,0 @@
|
|
|
1
|
-
# This file is part of ctrl_mpexec.
|
|
2
|
-
#
|
|
3
|
-
# Developed for the LSST Data Management System.
|
|
4
|
-
# This product includes software developed by the LSST Project
|
|
5
|
-
# (http://www.lsst.org).
|
|
6
|
-
# See the COPYRIGHT file at the top-level directory of this distribution
|
|
7
|
-
# for details of code ownership.
|
|
8
|
-
#
|
|
9
|
-
# This software is dual licensed under the GNU General Public License and also
|
|
10
|
-
# under a 3-clause BSD license. Recipients may choose which of these licenses
|
|
11
|
-
# to use; please see the files gpl-3.0.txt and/or bsd_license.txt,
|
|
12
|
-
# respectively. If you choose the GPL option then the following text applies
|
|
13
|
-
# (but note that there is still no warranty even if you opt for BSD instead):
|
|
14
|
-
#
|
|
15
|
-
# This program is free software: you can redistribute it and/or modify
|
|
16
|
-
# it under the terms of the GNU General Public License as published by
|
|
17
|
-
# the Free Software Foundation, either version 3 of the License, or
|
|
18
|
-
# (at your option) any later version.
|
|
19
|
-
#
|
|
20
|
-
# This program is distributed in the hope that it will be useful,
|
|
21
|
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
22
|
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
23
|
-
# GNU General Public License for more details.
|
|
24
|
-
#
|
|
25
|
-
# You should have received a copy of the GNU General Public License
|
|
26
|
-
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
27
|
-
|
|
28
|
-
"""Module defining CmdLineFwk class and related methods."""
|
|
29
|
-
|
|
30
|
-
from __future__ import annotations
|
|
31
|
-
|
|
32
|
-
__all__ = ["CmdLineFwk"]
|
|
33
|
-
|
|
34
|
-
import logging
|
|
35
|
-
import pickle
|
|
36
|
-
from collections.abc import Mapping
|
|
37
|
-
from types import SimpleNamespace
|
|
38
|
-
|
|
39
|
-
import astropy.units as u
|
|
40
|
-
from astropy.table import Table
|
|
41
|
-
|
|
42
|
-
import lsst.utils.timer
|
|
43
|
-
from lsst.daf.butler import (
|
|
44
|
-
Butler,
|
|
45
|
-
Config,
|
|
46
|
-
DatasetType,
|
|
47
|
-
DimensionConfig,
|
|
48
|
-
DimensionUniverse,
|
|
49
|
-
LimitedButler,
|
|
50
|
-
Quantum,
|
|
51
|
-
QuantumBackedButler,
|
|
52
|
-
)
|
|
53
|
-
from lsst.pipe.base import ExecutionResources, QuantumGraph, TaskFactory
|
|
54
|
-
from lsst.pipe.base.all_dimensions_quantum_graph_builder import AllDimensionsQuantumGraphBuilder
|
|
55
|
-
from lsst.pipe.base.dot_tools import graph2dot
|
|
56
|
-
from lsst.pipe.base.execution_graph_fixup import ExecutionGraphFixup
|
|
57
|
-
from lsst.pipe.base.mermaid_tools import graph2mermaid
|
|
58
|
-
from lsst.pipe.base.mp_graph_executor import MPGraphExecutor
|
|
59
|
-
from lsst.pipe.base.quantum_reports import Report
|
|
60
|
-
from lsst.pipe.base.single_quantum_executor import SingleQuantumExecutor
|
|
61
|
-
from lsst.resources import ResourcePath
|
|
62
|
-
from lsst.utils import doImportType
|
|
63
|
-
from lsst.utils.logging import VERBOSE, getLogger
|
|
64
|
-
from lsst.utils.threads import disable_implicit_threading
|
|
65
|
-
|
|
66
|
-
from ._pipeline_graph_factory import PipelineGraphFactory
|
|
67
|
-
from .cli.butler_factory import ButlerFactory
|
|
68
|
-
from .preExecInit import PreExecInit, PreExecInitLimited
|
|
69
|
-
|
|
70
|
-
_LOG = getLogger(__name__)
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
class _QBBFactory:
|
|
74
|
-
"""Class which is a callable for making QBB instances.
|
|
75
|
-
|
|
76
|
-
This class is also responsible for reconstructing correct dimension
|
|
77
|
-
universe after unpickling. When pickling multiple things that require
|
|
78
|
-
dimension universe, this class must be unpickled first. The logic in
|
|
79
|
-
MPGraphExecutor ensures that SingleQuantumExecutor is unpickled first in
|
|
80
|
-
the subprocess, which causes unpickling of this class.
|
|
81
|
-
"""
|
|
82
|
-
|
|
83
|
-
def __init__(
|
|
84
|
-
self, butler_config: Config, dimensions: DimensionUniverse, dataset_types: Mapping[str, DatasetType]
|
|
85
|
-
):
|
|
86
|
-
self.butler_config = butler_config
|
|
87
|
-
self.dimensions = dimensions
|
|
88
|
-
self.dataset_types = dataset_types
|
|
89
|
-
|
|
90
|
-
def __call__(self, quantum: Quantum) -> LimitedButler:
|
|
91
|
-
"""Return freshly initialized `~lsst.daf.butler.QuantumBackedButler`.
|
|
92
|
-
|
|
93
|
-
Factory method to create QuantumBackedButler instances.
|
|
94
|
-
"""
|
|
95
|
-
return QuantumBackedButler.initialize(
|
|
96
|
-
config=self.butler_config,
|
|
97
|
-
quantum=quantum,
|
|
98
|
-
dimensions=self.dimensions,
|
|
99
|
-
dataset_types=self.dataset_types,
|
|
100
|
-
)
|
|
101
|
-
|
|
102
|
-
@classmethod
|
|
103
|
-
def _unpickle(
|
|
104
|
-
cls, butler_config: Config, dimensions_config: DimensionConfig | None, dataset_types_pickle: bytes
|
|
105
|
-
) -> _QBBFactory:
|
|
106
|
-
universe = DimensionUniverse(dimensions_config)
|
|
107
|
-
dataset_types = pickle.loads(dataset_types_pickle)
|
|
108
|
-
return _QBBFactory(butler_config, universe, dataset_types)
|
|
109
|
-
|
|
110
|
-
def __reduce__(self) -> tuple:
|
|
111
|
-
# If dimension universe is not default one, we need to dump/restore
|
|
112
|
-
# its config.
|
|
113
|
-
config = self.dimensions.dimensionConfig
|
|
114
|
-
default = DimensionConfig()
|
|
115
|
-
# Only send configuration to other side if it is non-default, default
|
|
116
|
-
# will be instantiated from config=None.
|
|
117
|
-
if (config["namespace"], config["version"]) != (default["namespace"], default["version"]):
|
|
118
|
-
dimension_config = config
|
|
119
|
-
else:
|
|
120
|
-
dimension_config = None
|
|
121
|
-
# Dataset types need to be unpickled only after universe is made.
|
|
122
|
-
dataset_types_pickle = pickle.dumps(self.dataset_types)
|
|
123
|
-
return (self._unpickle, (self.butler_config, dimension_config, dataset_types_pickle))
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
class CmdLineFwk:
|
|
127
|
-
"""PipelineTask framework which executes tasks from command line.
|
|
128
|
-
|
|
129
|
-
In addition to executing tasks this activator provides additional methods
|
|
130
|
-
for task management like dumping configuration or execution chain.
|
|
131
|
-
"""
|
|
132
|
-
|
|
133
|
-
MP_TIMEOUT = 3600 * 24 * 30 # Default timeout (sec) for multiprocessing
|
|
134
|
-
|
|
135
|
-
def makeGraph(
|
|
136
|
-
self, pipeline_graph_factory: PipelineGraphFactory | None, args: SimpleNamespace
|
|
137
|
-
) -> QuantumGraph | None:
|
|
138
|
-
"""Build a graph from command line arguments.
|
|
139
|
-
|
|
140
|
-
Parameters
|
|
141
|
-
----------
|
|
142
|
-
pipeline_graph_factory : `PipelineGraphFactory`
|
|
143
|
-
Factory that holds a pipeline and can produce a pipeline graph.
|
|
144
|
-
Must be ``None`` if and only if graph is read from a file.
|
|
145
|
-
args : `types.SimpleNamespace`
|
|
146
|
-
Parsed command line.
|
|
147
|
-
|
|
148
|
-
Returns
|
|
149
|
-
-------
|
|
150
|
-
graph : `~lsst.pipe.base.QuantumGraph` or `None`
|
|
151
|
-
If resulting graph is empty then `None` is returned.
|
|
152
|
-
"""
|
|
153
|
-
# make sure that --extend-run always enables --skip-existing
|
|
154
|
-
if args.extend_run:
|
|
155
|
-
args.skip_existing = True
|
|
156
|
-
|
|
157
|
-
butler, collections, run = ButlerFactory.make_butler_and_collections(args)
|
|
158
|
-
|
|
159
|
-
if args.skip_existing and run:
|
|
160
|
-
args.skip_existing_in += (run,)
|
|
161
|
-
|
|
162
|
-
if args.qgraph:
|
|
163
|
-
# click passes empty tuple as default value for qgraph_node_id
|
|
164
|
-
nodes = args.qgraph_node_id or None
|
|
165
|
-
qgraph = QuantumGraph.loadUri(args.qgraph, butler.dimensions, nodes=nodes, graphID=args.qgraph_id)
|
|
166
|
-
|
|
167
|
-
# pipeline can not be provided in this case
|
|
168
|
-
if pipeline_graph_factory:
|
|
169
|
-
raise ValueError(
|
|
170
|
-
"Pipeline must not be given when quantum graph is read from "
|
|
171
|
-
f"file: {bool(pipeline_graph_factory)}"
|
|
172
|
-
)
|
|
173
|
-
if args.show_qgraph_header:
|
|
174
|
-
print(QuantumGraph.readHeader(args.qgraph))
|
|
175
|
-
else:
|
|
176
|
-
if pipeline_graph_factory is None:
|
|
177
|
-
raise ValueError("Pipeline must be given when quantum graph is not read from file.")
|
|
178
|
-
# We can't resolve the pipeline graph if we're mocking until after
|
|
179
|
-
# we've done the mocking (and the QG build will resolve on its own
|
|
180
|
-
# anyway).
|
|
181
|
-
pipeline_graph = pipeline_graph_factory(resolve=False)
|
|
182
|
-
if args.mock:
|
|
183
|
-
from lsst.pipe.base.tests.mocks import mock_pipeline_graph
|
|
184
|
-
|
|
185
|
-
pipeline_graph = mock_pipeline_graph(
|
|
186
|
-
pipeline_graph,
|
|
187
|
-
unmocked_dataset_types=args.unmocked_dataset_types,
|
|
188
|
-
force_failures=args.mock_failure,
|
|
189
|
-
)
|
|
190
|
-
data_id_tables = []
|
|
191
|
-
for table_file in args.data_id_table:
|
|
192
|
-
with ResourcePath(table_file).as_local() as local_path:
|
|
193
|
-
table = Table.read(local_path.ospath)
|
|
194
|
-
# Add the filename to the metadata for more logging
|
|
195
|
-
# information down in the QG builder.
|
|
196
|
-
table.meta["filename"] = table_file
|
|
197
|
-
data_id_tables.append(table)
|
|
198
|
-
# make execution plan (a.k.a. DAG) for pipeline
|
|
199
|
-
graph_builder = AllDimensionsQuantumGraphBuilder(
|
|
200
|
-
pipeline_graph,
|
|
201
|
-
butler,
|
|
202
|
-
where=args.data_query or "",
|
|
203
|
-
skip_existing_in=args.skip_existing_in if args.skip_existing_in is not None else (),
|
|
204
|
-
clobber=args.clobber_outputs,
|
|
205
|
-
dataset_query_constraint=args.dataset_query_constraint,
|
|
206
|
-
input_collections=collections,
|
|
207
|
-
output_run=run,
|
|
208
|
-
data_id_tables=data_id_tables,
|
|
209
|
-
)
|
|
210
|
-
# accumulate metadata
|
|
211
|
-
metadata = {
|
|
212
|
-
"input": args.input,
|
|
213
|
-
"output": args.output,
|
|
214
|
-
"butler_argument": args.butler_config,
|
|
215
|
-
"output_run": run,
|
|
216
|
-
"extend_run": args.extend_run,
|
|
217
|
-
"skip_existing_in": args.skip_existing_in,
|
|
218
|
-
"skip_existing": args.skip_existing,
|
|
219
|
-
"data_query": args.data_query or "",
|
|
220
|
-
}
|
|
221
|
-
assert run is not None, "Butler output run collection must be defined"
|
|
222
|
-
qgraph = graph_builder.build(metadata, attach_datastore_records=args.qgraph_datastore_records)
|
|
223
|
-
if args.show_qgraph_header:
|
|
224
|
-
qgraph.buildAndPrintHeader()
|
|
225
|
-
|
|
226
|
-
if len(qgraph) == 0:
|
|
227
|
-
# Nothing to do.
|
|
228
|
-
return None
|
|
229
|
-
self._summarize_qgraph(qgraph)
|
|
230
|
-
|
|
231
|
-
if args.save_qgraph:
|
|
232
|
-
_LOG.verbose("Writing QuantumGraph to %r.", args.save_qgraph)
|
|
233
|
-
qgraph.saveUri(args.save_qgraph)
|
|
234
|
-
|
|
235
|
-
if args.qgraph_dot:
|
|
236
|
-
_LOG.verbose("Writing quantum graph DOT visualization to %r.", args.qgraph_dot)
|
|
237
|
-
graph2dot(qgraph, args.qgraph_dot)
|
|
238
|
-
|
|
239
|
-
if args.qgraph_mermaid:
|
|
240
|
-
_LOG.verbose("Writing quantum graph Mermaid visualization to %r.", args.qgraph_mermaid)
|
|
241
|
-
graph2mermaid(qgraph, args.qgraph_mermaid)
|
|
242
|
-
|
|
243
|
-
return qgraph
|
|
244
|
-
|
|
245
|
-
def _make_execution_resources(self, args: SimpleNamespace) -> ExecutionResources:
|
|
246
|
-
"""Construct the execution resource class from arguments.
|
|
247
|
-
|
|
248
|
-
Parameters
|
|
249
|
-
----------
|
|
250
|
-
args : `types.SimpleNamespace`
|
|
251
|
-
Parsed command line.
|
|
252
|
-
|
|
253
|
-
Returns
|
|
254
|
-
-------
|
|
255
|
-
resources : `~lsst.pipe.base.ExecutionResources`
|
|
256
|
-
The resources available to each quantum.
|
|
257
|
-
"""
|
|
258
|
-
return ExecutionResources(
|
|
259
|
-
num_cores=args.cores_per_quantum, max_mem=args.memory_per_quantum, default_mem_units=u.MB
|
|
260
|
-
)
|
|
261
|
-
|
|
262
|
-
def runPipeline(
|
|
263
|
-
self,
|
|
264
|
-
graph: QuantumGraph,
|
|
265
|
-
taskFactory: TaskFactory,
|
|
266
|
-
args: SimpleNamespace,
|
|
267
|
-
butler: Butler | None = None,
|
|
268
|
-
) -> None:
|
|
269
|
-
"""Execute complete QuantumGraph.
|
|
270
|
-
|
|
271
|
-
Parameters
|
|
272
|
-
----------
|
|
273
|
-
graph : `~lsst.pipe.base.QuantumGraph`
|
|
274
|
-
Execution graph.
|
|
275
|
-
taskFactory : `~lsst.pipe.base.TaskFactory`
|
|
276
|
-
Task factory.
|
|
277
|
-
args : `types.SimpleNamespace`
|
|
278
|
-
Parsed command line.
|
|
279
|
-
butler : `~lsst.daf.butler.Butler`, optional
|
|
280
|
-
Data Butler instance, if not defined then new instance is made
|
|
281
|
-
using command line options.
|
|
282
|
-
"""
|
|
283
|
-
if not args.enable_implicit_threading:
|
|
284
|
-
disable_implicit_threading()
|
|
285
|
-
|
|
286
|
-
# Check that output run defined on command line is consistent with
|
|
287
|
-
# quantum graph.
|
|
288
|
-
if args.output_run and graph.metadata:
|
|
289
|
-
graph_output_run = graph.metadata.get("output_run", args.output_run)
|
|
290
|
-
if graph_output_run != args.output_run:
|
|
291
|
-
raise ValueError(
|
|
292
|
-
f"Output run defined on command line ({args.output_run}) has to be "
|
|
293
|
-
f"identical to graph metadata ({graph_output_run}). "
|
|
294
|
-
"To update graph metadata run `pipetask update-graph-run` command."
|
|
295
|
-
)
|
|
296
|
-
|
|
297
|
-
# Make sure that --extend-run always enables --skip-existing,
|
|
298
|
-
# clobbering should be disabled if --extend-run is not specified.
|
|
299
|
-
if args.extend_run:
|
|
300
|
-
args.skip_existing = True
|
|
301
|
-
else:
|
|
302
|
-
args.clobber_outputs = False
|
|
303
|
-
|
|
304
|
-
# Make butler instance. QuantumGraph should have an output run defined,
|
|
305
|
-
# but we ignore it here and let command line decide actual output run.
|
|
306
|
-
if butler is None:
|
|
307
|
-
butler = ButlerFactory.make_write_butler(args, graph.pipeline_graph)
|
|
308
|
-
|
|
309
|
-
if args.skip_existing:
|
|
310
|
-
args.skip_existing_in += (butler.run,)
|
|
311
|
-
|
|
312
|
-
# Enable lsstDebug debugging. Note that this is done once in the
|
|
313
|
-
# main process before PreExecInit and it is also repeated before
|
|
314
|
-
# running each task in SingleQuantumExecutor (which may not be
|
|
315
|
-
# needed if `multiprocessing` always uses fork start method).
|
|
316
|
-
if args.enableLsstDebug:
|
|
317
|
-
try:
|
|
318
|
-
_LOG.debug("Will try to import debug.py")
|
|
319
|
-
import debug # type: ignore # noqa:F401
|
|
320
|
-
except ImportError:
|
|
321
|
-
_LOG.warning("No 'debug' module found.")
|
|
322
|
-
|
|
323
|
-
# Save all InitOutputs, configs, etc.
|
|
324
|
-
preExecInit = PreExecInit(butler, taskFactory, extendRun=args.extend_run)
|
|
325
|
-
preExecInit.initialize(
|
|
326
|
-
graph,
|
|
327
|
-
saveInitOutputs=not args.skip_init_writes,
|
|
328
|
-
registerDatasetTypes=args.register_dataset_types,
|
|
329
|
-
saveVersions=not args.no_versions,
|
|
330
|
-
)
|
|
331
|
-
|
|
332
|
-
if not args.init_only:
|
|
333
|
-
graphFixup = self._importGraphFixup(args)
|
|
334
|
-
resources = self._make_execution_resources(args)
|
|
335
|
-
quantumExecutor = SingleQuantumExecutor(
|
|
336
|
-
butler=butler,
|
|
337
|
-
task_factory=taskFactory,
|
|
338
|
-
skip_existing_in=args.skip_existing_in,
|
|
339
|
-
clobber_outputs=args.clobber_outputs,
|
|
340
|
-
enable_lsst_debug=args.enableLsstDebug,
|
|
341
|
-
resources=resources,
|
|
342
|
-
raise_on_partial_outputs=args.raise_on_partial_outputs,
|
|
343
|
-
)
|
|
344
|
-
|
|
345
|
-
timeout = self.MP_TIMEOUT if args.timeout is None else args.timeout
|
|
346
|
-
executor = MPGraphExecutor(
|
|
347
|
-
num_proc=args.processes,
|
|
348
|
-
timeout=timeout,
|
|
349
|
-
start_method=args.start_method,
|
|
350
|
-
quantum_executor=quantumExecutor,
|
|
351
|
-
fail_fast=args.fail_fast,
|
|
352
|
-
pdb=args.pdb,
|
|
353
|
-
execution_graph_fixup=graphFixup,
|
|
354
|
-
)
|
|
355
|
-
# Have to reset connection pool to avoid sharing connections with
|
|
356
|
-
# forked processes.
|
|
357
|
-
butler.registry.resetConnectionPool()
|
|
358
|
-
try:
|
|
359
|
-
with lsst.utils.timer.profile(args.profile, _LOG):
|
|
360
|
-
executor.execute(graph)
|
|
361
|
-
finally:
|
|
362
|
-
if args.summary:
|
|
363
|
-
report = executor.getReport()
|
|
364
|
-
if report:
|
|
365
|
-
with open(args.summary, "w") as out:
|
|
366
|
-
# Do not save fields that are not set.
|
|
367
|
-
out.write(report.model_dump_json(exclude_none=True, indent=2))
|
|
368
|
-
|
|
369
|
-
def _generateTaskTable(self) -> Table:
|
|
370
|
-
"""Generate astropy table listing the number of quanta per task for a
|
|
371
|
-
given quantum graph.
|
|
372
|
-
|
|
373
|
-
Returns
|
|
374
|
-
-------
|
|
375
|
-
qg_task_table : `astropy.table.table.Table`
|
|
376
|
-
An astropy table containing columns: Quanta and Tasks.
|
|
377
|
-
"""
|
|
378
|
-
qg_quanta, qg_tasks = [], []
|
|
379
|
-
for task_label, task_info in self.report.qgraphSummary.qgraphTaskSummaries.items():
|
|
380
|
-
qg_tasks.append(task_label)
|
|
381
|
-
qg_quanta.append(task_info.numQuanta)
|
|
382
|
-
|
|
383
|
-
qg_task_table = Table(dict(Quanta=qg_quanta, Tasks=qg_tasks))
|
|
384
|
-
return qg_task_table
|
|
385
|
-
|
|
386
|
-
def _summarize_qgraph(self, qgraph: QuantumGraph) -> int:
|
|
387
|
-
"""Report a summary of the quanta in the graph.
|
|
388
|
-
|
|
389
|
-
Parameters
|
|
390
|
-
----------
|
|
391
|
-
qgraph : `lsst.pipe.base.QuantumGraph`
|
|
392
|
-
The graph to be summarized.
|
|
393
|
-
|
|
394
|
-
Returns
|
|
395
|
-
-------
|
|
396
|
-
n_quanta : `int`
|
|
397
|
-
The number of quanta in the graph.
|
|
398
|
-
"""
|
|
399
|
-
n_quanta = len(qgraph)
|
|
400
|
-
if n_quanta == 0:
|
|
401
|
-
_LOG.info("QuantumGraph contains no quanta.")
|
|
402
|
-
else:
|
|
403
|
-
self.report = Report(qgraphSummary=qgraph.getSummary())
|
|
404
|
-
if _LOG.isEnabledFor(logging.INFO):
|
|
405
|
-
qg_task_table = self._generateTaskTable()
|
|
406
|
-
qg_task_table_formatted = "\n".join(qg_task_table.pformat())
|
|
407
|
-
quanta_str = "quantum" if n_quanta == 1 else "quanta"
|
|
408
|
-
n_tasks = len(qgraph.taskGraph)
|
|
409
|
-
n_tasks_plural = "" if n_tasks == 1 else "s"
|
|
410
|
-
_LOG.info(
|
|
411
|
-
"QuantumGraph contains %d %s for %d task%s, graph ID: %r\n%s",
|
|
412
|
-
n_quanta,
|
|
413
|
-
quanta_str,
|
|
414
|
-
n_tasks,
|
|
415
|
-
n_tasks_plural,
|
|
416
|
-
qgraph.graphID,
|
|
417
|
-
qg_task_table_formatted,
|
|
418
|
-
)
|
|
419
|
-
return n_quanta
|
|
420
|
-
|
|
421
|
-
def _importGraphFixup(self, args: SimpleNamespace) -> ExecutionGraphFixup | None:
|
|
422
|
-
"""Import/instantiate graph fixup object.
|
|
423
|
-
|
|
424
|
-
Parameters
|
|
425
|
-
----------
|
|
426
|
-
args : `types.SimpleNamespace`
|
|
427
|
-
Parsed command line.
|
|
428
|
-
|
|
429
|
-
Returns
|
|
430
|
-
-------
|
|
431
|
-
fixup : `ExecutionGraphFixup` or `None`
|
|
432
|
-
|
|
433
|
-
Raises
|
|
434
|
-
------
|
|
435
|
-
ValueError
|
|
436
|
-
Raised if import fails, method call raises exception, or returned
|
|
437
|
-
instance has unexpected type.
|
|
438
|
-
"""
|
|
439
|
-
if args.graph_fixup:
|
|
440
|
-
try:
|
|
441
|
-
factory = doImportType(args.graph_fixup)
|
|
442
|
-
except Exception as exc:
|
|
443
|
-
raise ValueError("Failed to import graph fixup class/method") from exc
|
|
444
|
-
try:
|
|
445
|
-
fixup = factory()
|
|
446
|
-
except Exception as exc:
|
|
447
|
-
raise ValueError("Failed to make instance of graph fixup") from exc
|
|
448
|
-
if not isinstance(fixup, ExecutionGraphFixup):
|
|
449
|
-
raise ValueError("Graph fixup is not an instance of ExecutionGraphFixup class")
|
|
450
|
-
return fixup
|
|
451
|
-
return None
|
|
452
|
-
|
|
453
|
-
def preExecInitQBB(self, task_factory: TaskFactory, args: SimpleNamespace) -> None:
|
|
454
|
-
_LOG.verbose("Reading full quantum graph from %s.", args.qgraph)
|
|
455
|
-
# Load quantum graph. We do not really need individual Quanta here,
|
|
456
|
-
# but we need datastore records for initInputs, and those are only
|
|
457
|
-
# available from Quanta, so load the whole thing.
|
|
458
|
-
qgraph = QuantumGraph.loadUri(args.qgraph, graphID=args.qgraph_id)
|
|
459
|
-
|
|
460
|
-
# Ensure that QBB uses shared datastore cache for writes.
|
|
461
|
-
ButlerFactory.define_datastore_cache()
|
|
462
|
-
|
|
463
|
-
# Make QBB.
|
|
464
|
-
_LOG.verbose("Initializing quantum-backed butler.")
|
|
465
|
-
butler = qgraph.make_init_qbb(args.butler_config, config_search_paths=args.config_search_path)
|
|
466
|
-
# Save all InitOutputs, configs, etc.
|
|
467
|
-
_LOG.verbose("Instantiating tasks and saving init-outputs.")
|
|
468
|
-
preExecInit = PreExecInitLimited(butler, task_factory)
|
|
469
|
-
preExecInit.initialize(qgraph)
|
|
470
|
-
|
|
471
|
-
def runGraphQBB(self, task_factory: TaskFactory, args: SimpleNamespace) -> None:
|
|
472
|
-
if not args.enable_implicit_threading:
|
|
473
|
-
disable_implicit_threading()
|
|
474
|
-
|
|
475
|
-
# Load quantum graph.
|
|
476
|
-
nodes = args.qgraph_node_id or None
|
|
477
|
-
with lsst.utils.timer.time_this(
|
|
478
|
-
_LOG,
|
|
479
|
-
msg=f"Reading {str(len(nodes)) if nodes is not None else 'all'} quanta.",
|
|
480
|
-
level=VERBOSE,
|
|
481
|
-
) as qg_read_time:
|
|
482
|
-
qgraph = QuantumGraph.loadUri(args.qgraph, nodes=nodes, graphID=args.qgraph_id)
|
|
483
|
-
job_metadata = {"qg_read_time": qg_read_time.duration, "qg_size": len(qgraph)}
|
|
484
|
-
|
|
485
|
-
if qgraph.metadata is None:
|
|
486
|
-
raise ValueError("QuantumGraph is missing metadata, cannot continue.")
|
|
487
|
-
|
|
488
|
-
self._summarize_qgraph(qgraph)
|
|
489
|
-
|
|
490
|
-
dataset_types = {dstype.name: dstype for dstype in qgraph.registryDatasetTypes()}
|
|
491
|
-
|
|
492
|
-
# Ensure that QBB uses shared datastore cache.
|
|
493
|
-
ButlerFactory.define_datastore_cache()
|
|
494
|
-
|
|
495
|
-
_butler_factory = _QBBFactory(
|
|
496
|
-
butler_config=args.butler_config,
|
|
497
|
-
dimensions=qgraph.universe,
|
|
498
|
-
dataset_types=dataset_types,
|
|
499
|
-
)
|
|
500
|
-
|
|
501
|
-
# make special quantum executor
|
|
502
|
-
resources = self._make_execution_resources(args)
|
|
503
|
-
quantumExecutor = SingleQuantumExecutor(
|
|
504
|
-
butler=None,
|
|
505
|
-
task_factory=task_factory,
|
|
506
|
-
enable_lsst_debug=args.enableLsstDebug,
|
|
507
|
-
limited_butler_factory=_butler_factory,
|
|
508
|
-
resources=resources,
|
|
509
|
-
assume_no_existing_outputs=args.no_existing_outputs,
|
|
510
|
-
skip_existing=True,
|
|
511
|
-
clobber_outputs=True,
|
|
512
|
-
raise_on_partial_outputs=args.raise_on_partial_outputs,
|
|
513
|
-
job_metadata=job_metadata,
|
|
514
|
-
)
|
|
515
|
-
|
|
516
|
-
timeout = self.MP_TIMEOUT if args.timeout is None else args.timeout
|
|
517
|
-
executor = MPGraphExecutor(
|
|
518
|
-
num_proc=args.processes,
|
|
519
|
-
timeout=timeout,
|
|
520
|
-
start_method=args.start_method,
|
|
521
|
-
quantum_executor=quantumExecutor,
|
|
522
|
-
fail_fast=args.fail_fast,
|
|
523
|
-
pdb=args.pdb,
|
|
524
|
-
)
|
|
525
|
-
try:
|
|
526
|
-
with lsst.utils.timer.profile(args.profile, _LOG):
|
|
527
|
-
executor.execute(qgraph)
|
|
528
|
-
finally:
|
|
529
|
-
if args.summary:
|
|
530
|
-
report = executor.getReport()
|
|
531
|
-
if report:
|
|
532
|
-
with open(args.summary, "w") as out:
|
|
533
|
-
# Do not save fields that are not set.
|
|
534
|
-
out.write(report.model_dump_json(exclude_none=True, indent=2))
|
|
File without changes
|
{lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
|
File without changes
|
{lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/top_level.txt
RENAMED
|
File without changes
|
{lsst_ctrl_mpexec-29.2025.3400.dist-info → lsst_ctrl_mpexec-29.2025.3600.dist-info}/zip-safe
RENAMED
|
File without changes
|