lsst-pipe-base 29.2025.3000__py3-none-any.whl → 29.2025.3100__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lsst/pipe/base/_datasetQueryConstraints.py +1 -1
- lsst/pipe/base/all_dimensions_quantum_graph_builder.py +6 -4
- lsst/pipe/base/connectionTypes.py +19 -19
- lsst/pipe/base/connections.py +2 -2
- lsst/pipe/base/exec_fixup_data_id.py +131 -0
- lsst/pipe/base/execution_graph_fixup.py +69 -0
- lsst/pipe/base/log_capture.py +227 -0
- lsst/pipe/base/mp_graph_executor.py +774 -0
- lsst/pipe/base/quantum_graph_builder.py +43 -42
- lsst/pipe/base/quantum_graph_executor.py +125 -0
- lsst/pipe/base/quantum_reports.py +334 -0
- lsst/pipe/base/script/transfer_from_graph.py +4 -1
- lsst/pipe/base/separable_pipeline_executor.py +296 -0
- lsst/pipe/base/simple_pipeline_executor.py +674 -0
- lsst/pipe/base/single_quantum_executor.py +636 -0
- lsst/pipe/base/taskFactory.py +18 -12
- lsst/pipe/base/version.py +1 -1
- {lsst_pipe_base-29.2025.3000.dist-info → lsst_pipe_base-29.2025.3100.dist-info}/METADATA +1 -1
- {lsst_pipe_base-29.2025.3000.dist-info → lsst_pipe_base-29.2025.3100.dist-info}/RECORD +27 -18
- {lsst_pipe_base-29.2025.3000.dist-info → lsst_pipe_base-29.2025.3100.dist-info}/WHEEL +0 -0
- {lsst_pipe_base-29.2025.3000.dist-info → lsst_pipe_base-29.2025.3100.dist-info}/entry_points.txt +0 -0
- {lsst_pipe_base-29.2025.3000.dist-info → lsst_pipe_base-29.2025.3100.dist-info}/licenses/COPYRIGHT +0 -0
- {lsst_pipe_base-29.2025.3000.dist-info → lsst_pipe_base-29.2025.3100.dist-info}/licenses/LICENSE +0 -0
- {lsst_pipe_base-29.2025.3000.dist-info → lsst_pipe_base-29.2025.3100.dist-info}/licenses/bsd_license.txt +0 -0
- {lsst_pipe_base-29.2025.3000.dist-info → lsst_pipe_base-29.2025.3100.dist-info}/licenses/gpl-v3.0.txt +0 -0
- {lsst_pipe_base-29.2025.3000.dist-info → lsst_pipe_base-29.2025.3100.dist-info}/top_level.txt +0 -0
- {lsst_pipe_base-29.2025.3000.dist-info → lsst_pipe_base-29.2025.3100.dist-info}/zip-safe +0 -0
|
@@ -0,0 +1,774 @@
|
|
|
1
|
+
# This file is part of pipe_base.
|
|
2
|
+
#
|
|
3
|
+
# Developed for the LSST Data Management System.
|
|
4
|
+
# This product includes software developed by the LSST Project
|
|
5
|
+
# (http://www.lsst.org).
|
|
6
|
+
# See the COPYRIGHT file at the top-level directory of this distribution
|
|
7
|
+
# for details of code ownership.
|
|
8
|
+
#
|
|
9
|
+
# This software is dual licensed under the GNU General Public License and also
|
|
10
|
+
# under a 3-clause BSD license. Recipients may choose which of these licenses
|
|
11
|
+
# to use; please see the files gpl-3.0.txt and/or bsd_license.txt,
|
|
12
|
+
# respectively. If you choose the GPL option then the following text applies
|
|
13
|
+
# (but note that there is still no warranty even if you opt for BSD instead):
|
|
14
|
+
#
|
|
15
|
+
# This program is free software: you can redistribute it and/or modify
|
|
16
|
+
# it under the terms of the GNU General Public License as published by
|
|
17
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
18
|
+
# (at your option) any later version.
|
|
19
|
+
#
|
|
20
|
+
# This program is distributed in the hope that it will be useful,
|
|
21
|
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
22
|
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
23
|
+
# GNU General Public License for more details.
|
|
24
|
+
#
|
|
25
|
+
# You should have received a copy of the GNU General Public License
|
|
26
|
+
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
27
|
+
|
|
28
|
+
from __future__ import annotations
|
|
29
|
+
|
|
30
|
+
__all__ = ["MPGraphExecutor", "MPGraphExecutorError", "MPTimeoutError"]
|
|
31
|
+
|
|
32
|
+
import importlib
|
|
33
|
+
import logging
|
|
34
|
+
import multiprocessing
|
|
35
|
+
import pickle
|
|
36
|
+
import signal
|
|
37
|
+
import sys
|
|
38
|
+
import threading
|
|
39
|
+
import time
|
|
40
|
+
import uuid
|
|
41
|
+
from collections.abc import Iterable
|
|
42
|
+
from enum import Enum
|
|
43
|
+
from typing import Literal
|
|
44
|
+
|
|
45
|
+
from lsst.daf.butler.cli.cliLog import CliLog
|
|
46
|
+
from lsst.utils.threads import disable_implicit_threading
|
|
47
|
+
|
|
48
|
+
from ._status import InvalidQuantumError, RepeatableQuantumError
|
|
49
|
+
from .execution_graph_fixup import ExecutionGraphFixup
|
|
50
|
+
from .graph import QuantumGraph, QuantumNode
|
|
51
|
+
from .pipeline_graph import TaskNode
|
|
52
|
+
from .quantum_graph_executor import QuantumExecutor, QuantumGraphExecutor
|
|
53
|
+
from .quantum_reports import ExecutionStatus, QuantumReport, Report
|
|
54
|
+
|
|
55
|
+
_LOG = logging.getLogger(__name__)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
# Possible states for the executing task:
|
|
59
|
+
# - PENDING: job has not started yet
|
|
60
|
+
# - RUNNING: job is currently executing
|
|
61
|
+
# - FINISHED: job finished successfully
|
|
62
|
+
# - FAILED: job execution failed (process returned non-zero status)
|
|
63
|
+
# - TIMED_OUT: job is killed due to too long execution time
|
|
64
|
+
# - FAILED_DEP: one of the dependencies of this job has failed/timed out
|
|
65
|
+
JobState = Enum("JobState", "PENDING RUNNING FINISHED FAILED TIMED_OUT FAILED_DEP")
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class _Job:
|
|
69
|
+
"""Class representing a job running single task.
|
|
70
|
+
|
|
71
|
+
Parameters
|
|
72
|
+
----------
|
|
73
|
+
qnode: `QuantumNode`
|
|
74
|
+
Quantum and some associated information.
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
def __init__(self, qnode: QuantumNode, fail_fast: bool = False):
|
|
78
|
+
self.qnode = qnode
|
|
79
|
+
self._fail_fast = fail_fast
|
|
80
|
+
self.process: multiprocessing.process.BaseProcess | None = None
|
|
81
|
+
self._state = JobState.PENDING
|
|
82
|
+
self.started: float = 0.0
|
|
83
|
+
self._rcv_conn: multiprocessing.connection.Connection | None = None
|
|
84
|
+
self._terminated = False
|
|
85
|
+
|
|
86
|
+
@property
|
|
87
|
+
def state(self) -> JobState:
|
|
88
|
+
"""Job processing state (JobState)."""
|
|
89
|
+
return self._state
|
|
90
|
+
|
|
91
|
+
@property
|
|
92
|
+
def terminated(self) -> bool:
|
|
93
|
+
"""Return `True` if job was killed by stop() method and negative exit
|
|
94
|
+
code is returned from child process (`bool`).
|
|
95
|
+
"""
|
|
96
|
+
if self._terminated:
|
|
97
|
+
assert self.process is not None, "Process must be started"
|
|
98
|
+
if self.process.exitcode is not None:
|
|
99
|
+
return self.process.exitcode < 0
|
|
100
|
+
return False
|
|
101
|
+
|
|
102
|
+
def start(
|
|
103
|
+
self,
|
|
104
|
+
quantumExecutor: QuantumExecutor,
|
|
105
|
+
startMethod: Literal["spawn"] | Literal["forkserver"],
|
|
106
|
+
) -> None:
|
|
107
|
+
"""Start process which runs the task.
|
|
108
|
+
|
|
109
|
+
Parameters
|
|
110
|
+
----------
|
|
111
|
+
quantumExecutor : `QuantumExecutor`
|
|
112
|
+
Executor for single quantum.
|
|
113
|
+
startMethod : `str`, optional
|
|
114
|
+
Start method from `multiprocessing` module.
|
|
115
|
+
"""
|
|
116
|
+
# Unpickling of quantum has to happen after butler/executor, also we
|
|
117
|
+
# want to setup logging before unpickling anything that can generate
|
|
118
|
+
# messages, this is why things are pickled manually here.
|
|
119
|
+
qe_pickle = pickle.dumps(quantumExecutor)
|
|
120
|
+
task_node_pickle = pickle.dumps(self.qnode.task_node)
|
|
121
|
+
quantum_pickle = pickle.dumps(self.qnode.quantum)
|
|
122
|
+
self._rcv_conn, snd_conn = multiprocessing.Pipe(False)
|
|
123
|
+
logConfigState = CliLog.configState
|
|
124
|
+
|
|
125
|
+
mp_ctx = multiprocessing.get_context(startMethod)
|
|
126
|
+
self.process = mp_ctx.Process( # type: ignore[attr-defined]
|
|
127
|
+
target=_Job._executeJob,
|
|
128
|
+
args=(
|
|
129
|
+
qe_pickle,
|
|
130
|
+
task_node_pickle,
|
|
131
|
+
quantum_pickle,
|
|
132
|
+
self.qnode.nodeId,
|
|
133
|
+
logConfigState,
|
|
134
|
+
snd_conn,
|
|
135
|
+
self._fail_fast,
|
|
136
|
+
),
|
|
137
|
+
name=f"task-{self.qnode.quantum.dataId}",
|
|
138
|
+
)
|
|
139
|
+
# mypy is getting confused by multiprocessing.
|
|
140
|
+
assert self.process is not None
|
|
141
|
+
self.process.start()
|
|
142
|
+
self.started = time.time()
|
|
143
|
+
self._state = JobState.RUNNING
|
|
144
|
+
|
|
145
|
+
@staticmethod
|
|
146
|
+
def _executeJob(
|
|
147
|
+
quantumExecutor_pickle: bytes,
|
|
148
|
+
task_node_pickle: bytes,
|
|
149
|
+
quantum_pickle: bytes,
|
|
150
|
+
quantum_id: uuid.UUID | None,
|
|
151
|
+
logConfigState: list,
|
|
152
|
+
snd_conn: multiprocessing.connection.Connection,
|
|
153
|
+
fail_fast: bool,
|
|
154
|
+
) -> None:
|
|
155
|
+
"""Execute a job with arguments.
|
|
156
|
+
|
|
157
|
+
Parameters
|
|
158
|
+
----------
|
|
159
|
+
quantumExecutor_pickle : `bytes`
|
|
160
|
+
Executor for single quantum, pickled.
|
|
161
|
+
task_node_pickle : `bytes`
|
|
162
|
+
Task definition structure, pickled.
|
|
163
|
+
quantum_pickle : `bytes`
|
|
164
|
+
Quantum for this task execution in pickled form.
|
|
165
|
+
logConfigState : `list`
|
|
166
|
+
Logging state from parent process.
|
|
167
|
+
snd_conn : `multiprocessing.Connection`
|
|
168
|
+
Connection to send job report to parent process.
|
|
169
|
+
fail_fast : `bool`
|
|
170
|
+
If `True` then kill subprocess on RepeatableQuantumError.
|
|
171
|
+
"""
|
|
172
|
+
# This terrible hack is a workaround for Python threading bug:
|
|
173
|
+
# https://github.com/python/cpython/issues/102512. Should be removed
|
|
174
|
+
# when fix for that bug is deployed. Inspired by
|
|
175
|
+
# https://github.com/QubesOS/qubes-core-admin-client/pull/236/files.
|
|
176
|
+
thread = threading.current_thread()
|
|
177
|
+
if isinstance(thread, threading._DummyThread):
|
|
178
|
+
if getattr(thread, "_tstate_lock", "") is None:
|
|
179
|
+
thread._set_tstate_lock() # type: ignore[attr-defined]
|
|
180
|
+
|
|
181
|
+
if logConfigState and not CliLog.configState:
|
|
182
|
+
# means that we are in a new spawned Python process and we have to
|
|
183
|
+
# re-initialize logging
|
|
184
|
+
CliLog.replayConfigState(logConfigState)
|
|
185
|
+
|
|
186
|
+
quantumExecutor: QuantumExecutor = pickle.loads(quantumExecutor_pickle)
|
|
187
|
+
task_node: TaskNode = pickle.loads(task_node_pickle)
|
|
188
|
+
quantum = pickle.loads(quantum_pickle)
|
|
189
|
+
report: QuantumReport | None = None
|
|
190
|
+
# Catch a few known failure modes and stop the process immediately,
|
|
191
|
+
# with exception-specific exit code.
|
|
192
|
+
try:
|
|
193
|
+
_, report = quantumExecutor.execute(task_node, quantum, quantum_id=quantum_id)
|
|
194
|
+
except RepeatableQuantumError as exc:
|
|
195
|
+
report = QuantumReport.from_exception(
|
|
196
|
+
exception=exc,
|
|
197
|
+
dataId=quantum.dataId,
|
|
198
|
+
taskLabel=task_node.label,
|
|
199
|
+
exitCode=exc.EXIT_CODE if fail_fast else None,
|
|
200
|
+
)
|
|
201
|
+
if fail_fast:
|
|
202
|
+
_LOG.warning("Caught repeatable quantum error for %s (%s):", task_node.label, quantum.dataId)
|
|
203
|
+
_LOG.warning(exc, exc_info=True)
|
|
204
|
+
sys.exit(exc.EXIT_CODE)
|
|
205
|
+
else:
|
|
206
|
+
raise
|
|
207
|
+
except InvalidQuantumError as exc:
|
|
208
|
+
_LOG.fatal("Invalid quantum error for %s (%s): %s", task_node.label, quantum.dataId)
|
|
209
|
+
_LOG.fatal(exc, exc_info=True)
|
|
210
|
+
report = QuantumReport.from_exception(
|
|
211
|
+
exception=exc,
|
|
212
|
+
dataId=quantum.dataId,
|
|
213
|
+
taskLabel=task_node.label,
|
|
214
|
+
exitCode=exc.EXIT_CODE,
|
|
215
|
+
)
|
|
216
|
+
sys.exit(exc.EXIT_CODE)
|
|
217
|
+
except Exception as exc:
|
|
218
|
+
_LOG.debug("exception from task %s dataId %s: %s", task_node.label, quantum.dataId, exc)
|
|
219
|
+
report = QuantumReport.from_exception(
|
|
220
|
+
exception=exc,
|
|
221
|
+
dataId=quantum.dataId,
|
|
222
|
+
taskLabel=task_node.label,
|
|
223
|
+
)
|
|
224
|
+
raise
|
|
225
|
+
finally:
|
|
226
|
+
if report is not None:
|
|
227
|
+
# If sending fails we do not want this new exception to be
|
|
228
|
+
# exposed.
|
|
229
|
+
try:
|
|
230
|
+
_LOG.debug("sending report for task %s dataId %s", task_node.label, quantum.dataId)
|
|
231
|
+
snd_conn.send(report)
|
|
232
|
+
except Exception:
|
|
233
|
+
pass
|
|
234
|
+
|
|
235
|
+
def stop(self) -> None:
|
|
236
|
+
"""Stop the process."""
|
|
237
|
+
assert self.process is not None, "Process must be started"
|
|
238
|
+
self.process.terminate()
|
|
239
|
+
# give it 1 second to finish or KILL
|
|
240
|
+
for _ in range(10):
|
|
241
|
+
time.sleep(0.1)
|
|
242
|
+
if not self.process.is_alive():
|
|
243
|
+
break
|
|
244
|
+
else:
|
|
245
|
+
_LOG.debug("Killing process %s", self.process.name)
|
|
246
|
+
self.process.kill()
|
|
247
|
+
self._terminated = True
|
|
248
|
+
|
|
249
|
+
def cleanup(self) -> None:
|
|
250
|
+
"""Release processes resources, has to be called for each finished
|
|
251
|
+
process.
|
|
252
|
+
"""
|
|
253
|
+
if self.process and not self.process.is_alive():
|
|
254
|
+
self.process.close()
|
|
255
|
+
self.process = None
|
|
256
|
+
self._rcv_conn = None
|
|
257
|
+
|
|
258
|
+
def report(self) -> QuantumReport:
|
|
259
|
+
"""Return task report, should be called after process finishes and
|
|
260
|
+
before cleanup().
|
|
261
|
+
"""
|
|
262
|
+
assert self.process is not None, "Process must be started"
|
|
263
|
+
assert self._rcv_conn is not None, "Process must be started"
|
|
264
|
+
try:
|
|
265
|
+
report = self._rcv_conn.recv()
|
|
266
|
+
report.exitCode = self.process.exitcode
|
|
267
|
+
except Exception:
|
|
268
|
+
# Likely due to the process killed, but there may be other reasons.
|
|
269
|
+
# Exit code should not be None, this is to keep mypy happy.
|
|
270
|
+
exitcode = self.process.exitcode if self.process.exitcode is not None else -1
|
|
271
|
+
assert self.qnode.quantum.dataId is not None, "Quantum DataId cannot be None"
|
|
272
|
+
report = QuantumReport.from_exit_code(
|
|
273
|
+
exitCode=exitcode,
|
|
274
|
+
dataId=self.qnode.quantum.dataId,
|
|
275
|
+
taskLabel=self.qnode.task_node.label,
|
|
276
|
+
)
|
|
277
|
+
if self.terminated:
|
|
278
|
+
# Means it was killed, assume it's due to timeout
|
|
279
|
+
report.status = ExecutionStatus.TIMEOUT
|
|
280
|
+
return report
|
|
281
|
+
|
|
282
|
+
def failMessage(self) -> str:
|
|
283
|
+
"""Return a message describing task failure."""
|
|
284
|
+
assert self.process is not None, "Process must be started"
|
|
285
|
+
assert self.process.exitcode is not None, "Process has to finish"
|
|
286
|
+
exitcode = self.process.exitcode
|
|
287
|
+
if exitcode < 0:
|
|
288
|
+
# Negative exit code means it is killed by signal
|
|
289
|
+
signum = -exitcode
|
|
290
|
+
msg = f"Task {self} failed, killed by signal {signum}"
|
|
291
|
+
# Just in case this is some very odd signal, expect ValueError
|
|
292
|
+
try:
|
|
293
|
+
strsignal = signal.strsignal(signum)
|
|
294
|
+
msg = f"{msg} ({strsignal})"
|
|
295
|
+
except ValueError:
|
|
296
|
+
pass
|
|
297
|
+
elif exitcode > 0:
|
|
298
|
+
msg = f"Task {self} failed, exit code={exitcode}"
|
|
299
|
+
else:
|
|
300
|
+
msg = ""
|
|
301
|
+
return msg
|
|
302
|
+
|
|
303
|
+
def __str__(self) -> str:
|
|
304
|
+
return f"<{self.qnode.task_node.label} dataId={self.qnode.quantum.dataId}>"
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
class _JobList:
|
|
308
|
+
"""Simple list of _Job instances with few convenience methods.
|
|
309
|
+
|
|
310
|
+
Parameters
|
|
311
|
+
----------
|
|
312
|
+
iterable : ~collections.abc.Iterable` [ `QuantumNode` ]
|
|
313
|
+
Sequence of Quanta to execute. This has to be ordered according to
|
|
314
|
+
task dependencies.
|
|
315
|
+
"""
|
|
316
|
+
|
|
317
|
+
def __init__(self, iterable: Iterable[QuantumNode]):
|
|
318
|
+
self.jobs = [_Job(qnode) for qnode in iterable]
|
|
319
|
+
self.pending = self.jobs[:]
|
|
320
|
+
self.running: list[_Job] = []
|
|
321
|
+
self.finishedNodes: set[QuantumNode] = set()
|
|
322
|
+
self.failedNodes: set[QuantumNode] = set()
|
|
323
|
+
self.timedOutNodes: set[QuantumNode] = set()
|
|
324
|
+
|
|
325
|
+
def submit(
|
|
326
|
+
self,
|
|
327
|
+
job: _Job,
|
|
328
|
+
quantumExecutor: QuantumExecutor,
|
|
329
|
+
startMethod: Literal["spawn"] | Literal["forkserver"],
|
|
330
|
+
) -> None:
|
|
331
|
+
"""Submit one more job for execution.
|
|
332
|
+
|
|
333
|
+
Parameters
|
|
334
|
+
----------
|
|
335
|
+
job : `_Job`
|
|
336
|
+
Job to submit.
|
|
337
|
+
quantumExecutor : `QuantumExecutor`
|
|
338
|
+
Executor for single quantum.
|
|
339
|
+
startMethod : `str`, optional
|
|
340
|
+
Start method from `multiprocessing` module.
|
|
341
|
+
"""
|
|
342
|
+
# this will raise if job is not in pending list
|
|
343
|
+
self.pending.remove(job)
|
|
344
|
+
job.start(quantumExecutor, startMethod)
|
|
345
|
+
self.running.append(job)
|
|
346
|
+
|
|
347
|
+
def setJobState(self, job: _Job, state: JobState) -> None:
|
|
348
|
+
"""Update job state.
|
|
349
|
+
|
|
350
|
+
Parameters
|
|
351
|
+
----------
|
|
352
|
+
job : `_Job`
|
|
353
|
+
Job to submit.
|
|
354
|
+
state : `JobState`
|
|
355
|
+
New job state, note that only FINISHED, FAILED, TIMED_OUT, or
|
|
356
|
+
FAILED_DEP state is acceptable.
|
|
357
|
+
"""
|
|
358
|
+
allowedStates = (JobState.FINISHED, JobState.FAILED, JobState.TIMED_OUT, JobState.FAILED_DEP)
|
|
359
|
+
assert state in allowedStates, f"State {state} not allowed here"
|
|
360
|
+
|
|
361
|
+
# remove job from pending/running lists
|
|
362
|
+
if job.state == JobState.PENDING:
|
|
363
|
+
self.pending.remove(job)
|
|
364
|
+
elif job.state == JobState.RUNNING:
|
|
365
|
+
self.running.remove(job)
|
|
366
|
+
|
|
367
|
+
qnode = job.qnode
|
|
368
|
+
# it should not be in any of these, but just in case
|
|
369
|
+
self.finishedNodes.discard(qnode)
|
|
370
|
+
self.failedNodes.discard(qnode)
|
|
371
|
+
self.timedOutNodes.discard(qnode)
|
|
372
|
+
|
|
373
|
+
job._state = state
|
|
374
|
+
if state == JobState.FINISHED:
|
|
375
|
+
self.finishedNodes.add(qnode)
|
|
376
|
+
elif state == JobState.FAILED:
|
|
377
|
+
self.failedNodes.add(qnode)
|
|
378
|
+
elif state == JobState.FAILED_DEP:
|
|
379
|
+
self.failedNodes.add(qnode)
|
|
380
|
+
elif state == JobState.TIMED_OUT:
|
|
381
|
+
self.failedNodes.add(qnode)
|
|
382
|
+
self.timedOutNodes.add(qnode)
|
|
383
|
+
else:
|
|
384
|
+
raise ValueError(f"Unexpected state value: {state}")
|
|
385
|
+
|
|
386
|
+
def cleanup(self) -> None:
|
|
387
|
+
"""Do periodic cleanup for jobs that did not finish correctly.
|
|
388
|
+
|
|
389
|
+
If timed out jobs are killed but take too long to stop then regular
|
|
390
|
+
cleanup will not work for them. Here we check all timed out jobs
|
|
391
|
+
periodically and do cleanup if they managed to die by this time.
|
|
392
|
+
"""
|
|
393
|
+
for job in self.jobs:
|
|
394
|
+
if job.state == JobState.TIMED_OUT and job.process is not None:
|
|
395
|
+
job.cleanup()
|
|
396
|
+
|
|
397
|
+
|
|
398
|
+
class MPGraphExecutorError(Exception):
|
|
399
|
+
"""Exception class for errors raised by MPGraphExecutor."""
|
|
400
|
+
|
|
401
|
+
pass
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
class MPTimeoutError(MPGraphExecutorError):
|
|
405
|
+
"""Exception raised when task execution times out."""
|
|
406
|
+
|
|
407
|
+
pass
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
class MPGraphExecutor(QuantumGraphExecutor):
|
|
411
|
+
"""Implementation of QuantumGraphExecutor using same-host multiprocess
|
|
412
|
+
execution of Quanta.
|
|
413
|
+
|
|
414
|
+
Parameters
|
|
415
|
+
----------
|
|
416
|
+
num_proc : `int`
|
|
417
|
+
Number of processes to use for executing tasks.
|
|
418
|
+
timeout : `float`
|
|
419
|
+
Time in seconds to wait for tasks to finish.
|
|
420
|
+
quantum_executor : `.quantum_graph_executor.QuantumExecutor`
|
|
421
|
+
Executor for single quantum. For multiprocess-style execution when
|
|
422
|
+
``num_proc`` is greater than one this instance must support pickle.
|
|
423
|
+
start_method : `str`, optional
|
|
424
|
+
Start method from `multiprocessing` module, `None` selects the best
|
|
425
|
+
one for current platform.
|
|
426
|
+
fail_fast : `bool`, optional
|
|
427
|
+
If set to ``True`` then stop processing on first error from any task.
|
|
428
|
+
pdb : `str`, optional
|
|
429
|
+
Debugger to import and use (via the ``post_mortem`` function) in the
|
|
430
|
+
event of an exception.
|
|
431
|
+
execution_graph_fixup : `.execution_graph_fixup.ExecutionGraphFixup`, \
|
|
432
|
+
optional
|
|
433
|
+
Instance used for modification of execution graph.
|
|
434
|
+
"""
|
|
435
|
+
|
|
436
|
+
def __init__(
|
|
437
|
+
self,
|
|
438
|
+
*,
|
|
439
|
+
num_proc: int,
|
|
440
|
+
timeout: float,
|
|
441
|
+
quantum_executor: QuantumExecutor,
|
|
442
|
+
start_method: Literal["spawn"] | Literal["forkserver"] | None = None,
|
|
443
|
+
fail_fast: bool = False,
|
|
444
|
+
pdb: str | None = None,
|
|
445
|
+
execution_graph_fixup: ExecutionGraphFixup | None = None,
|
|
446
|
+
):
|
|
447
|
+
self._num_proc = num_proc
|
|
448
|
+
self._timeout = timeout
|
|
449
|
+
self._quantum_executor = quantum_executor
|
|
450
|
+
self._fail_fast = fail_fast
|
|
451
|
+
self._pdb = pdb
|
|
452
|
+
self._execution_graph_fixup = execution_graph_fixup
|
|
453
|
+
self._report: Report | None = None
|
|
454
|
+
|
|
455
|
+
# We set default start method as spawn for all platforms.
|
|
456
|
+
if start_method is None:
|
|
457
|
+
start_method = "spawn"
|
|
458
|
+
self._start_method = start_method
|
|
459
|
+
|
|
460
|
+
def execute(self, graph: QuantumGraph) -> None:
|
|
461
|
+
# Docstring inherited from QuantumGraphExecutor.execute
|
|
462
|
+
graph = self._fixupQuanta(graph)
|
|
463
|
+
self._report = Report(qgraphSummary=graph.getSummary())
|
|
464
|
+
try:
|
|
465
|
+
if self._num_proc > 1:
|
|
466
|
+
self._executeQuantaMP(graph, self._report)
|
|
467
|
+
else:
|
|
468
|
+
self._executeQuantaInProcess(graph, self._report)
|
|
469
|
+
except Exception as exc:
|
|
470
|
+
self._report.set_exception(exc)
|
|
471
|
+
raise
|
|
472
|
+
|
|
473
|
+
def _fixupQuanta(self, graph: QuantumGraph) -> QuantumGraph:
|
|
474
|
+
"""Call fixup code to modify execution graph.
|
|
475
|
+
|
|
476
|
+
Parameters
|
|
477
|
+
----------
|
|
478
|
+
graph : `.QuantumGraph`
|
|
479
|
+
`.QuantumGraph` to modify.
|
|
480
|
+
|
|
481
|
+
Returns
|
|
482
|
+
-------
|
|
483
|
+
graph : `.QuantumGraph`
|
|
484
|
+
Modified `.QuantumGraph`.
|
|
485
|
+
|
|
486
|
+
Raises
|
|
487
|
+
------
|
|
488
|
+
MPGraphExecutorError
|
|
489
|
+
Raised if execution graph cannot be ordered after modification,
|
|
490
|
+
i.e. it has dependency cycles.
|
|
491
|
+
"""
|
|
492
|
+
if not self._execution_graph_fixup:
|
|
493
|
+
return graph
|
|
494
|
+
|
|
495
|
+
_LOG.debug("Call execution graph fixup method")
|
|
496
|
+
graph = self._execution_graph_fixup.fixupQuanta(graph)
|
|
497
|
+
|
|
498
|
+
# Detect if there is now a cycle created within the graph
|
|
499
|
+
if graph.findCycle():
|
|
500
|
+
raise MPGraphExecutorError("Updated execution graph has dependency cycle.")
|
|
501
|
+
|
|
502
|
+
return graph
|
|
503
|
+
|
|
504
|
+
def _executeQuantaInProcess(self, graph: QuantumGraph, report: Report) -> None:
|
|
505
|
+
"""Execute all Quanta in current process.
|
|
506
|
+
|
|
507
|
+
Parameters
|
|
508
|
+
----------
|
|
509
|
+
graph : `.QuantumGraph`
|
|
510
|
+
`.QuantumGraph` that is to be executed.
|
|
511
|
+
report : `Report`
|
|
512
|
+
Object for reporting execution status.
|
|
513
|
+
"""
|
|
514
|
+
successCount, totalCount = 0, len(graph)
|
|
515
|
+
failedNodes: set[QuantumNode] = set()
|
|
516
|
+
for qnode in graph:
|
|
517
|
+
assert qnode.quantum.dataId is not None, "Quantum DataId cannot be None"
|
|
518
|
+
task_node = qnode.task_node
|
|
519
|
+
|
|
520
|
+
# Any failed inputs mean that the quantum has to be skipped.
|
|
521
|
+
inputNodes = graph.determineInputsToQuantumNode(qnode)
|
|
522
|
+
if inputNodes & failedNodes:
|
|
523
|
+
_LOG.error(
|
|
524
|
+
"Upstream job failed for task <%s dataId=%s>, skipping this task.",
|
|
525
|
+
task_node.label,
|
|
526
|
+
qnode.quantum.dataId,
|
|
527
|
+
)
|
|
528
|
+
failedNodes.add(qnode)
|
|
529
|
+
failed_quantum_report = QuantumReport(
|
|
530
|
+
status=ExecutionStatus.SKIPPED,
|
|
531
|
+
dataId=qnode.quantum.dataId,
|
|
532
|
+
taskLabel=task_node.label,
|
|
533
|
+
)
|
|
534
|
+
report.quantaReports.append(failed_quantum_report)
|
|
535
|
+
continue
|
|
536
|
+
|
|
537
|
+
_LOG.debug("Executing %s", qnode)
|
|
538
|
+
fail_exit_code: int | None = None
|
|
539
|
+
try:
|
|
540
|
+
# For some exception types we want to exit immediately with
|
|
541
|
+
# exception-specific exit code, but we still want to start
|
|
542
|
+
# debugger before exiting if debugging is enabled.
|
|
543
|
+
try:
|
|
544
|
+
_, quantum_report = self._quantum_executor.execute(
|
|
545
|
+
task_node, qnode.quantum, quantum_id=qnode.nodeId
|
|
546
|
+
)
|
|
547
|
+
if quantum_report:
|
|
548
|
+
report.quantaReports.append(quantum_report)
|
|
549
|
+
successCount += 1
|
|
550
|
+
except RepeatableQuantumError as exc:
|
|
551
|
+
if self._fail_fast:
|
|
552
|
+
_LOG.warning(
|
|
553
|
+
"Caught repeatable quantum error for %s (%s):",
|
|
554
|
+
task_node.label,
|
|
555
|
+
qnode.quantum.dataId,
|
|
556
|
+
)
|
|
557
|
+
_LOG.warning(exc, exc_info=True)
|
|
558
|
+
fail_exit_code = exc.EXIT_CODE
|
|
559
|
+
raise
|
|
560
|
+
except InvalidQuantumError as exc:
|
|
561
|
+
_LOG.fatal("Invalid quantum error for %s (%s):", task_node.label, qnode.quantum.dataId)
|
|
562
|
+
_LOG.fatal(exc, exc_info=True)
|
|
563
|
+
fail_exit_code = exc.EXIT_CODE
|
|
564
|
+
raise
|
|
565
|
+
except Exception as exc:
|
|
566
|
+
quantum_report = QuantumReport.from_exception(
|
|
567
|
+
exception=exc,
|
|
568
|
+
dataId=qnode.quantum.dataId,
|
|
569
|
+
taskLabel=task_node.label,
|
|
570
|
+
)
|
|
571
|
+
report.quantaReports.append(quantum_report)
|
|
572
|
+
|
|
573
|
+
if self._pdb and sys.stdin.isatty() and sys.stdout.isatty():
|
|
574
|
+
_LOG.error(
|
|
575
|
+
"Task <%s dataId=%s> failed; dropping into pdb.",
|
|
576
|
+
task_node.label,
|
|
577
|
+
qnode.quantum.dataId,
|
|
578
|
+
exc_info=exc,
|
|
579
|
+
)
|
|
580
|
+
try:
|
|
581
|
+
pdb = importlib.import_module(self._pdb)
|
|
582
|
+
except ImportError as imp_exc:
|
|
583
|
+
raise MPGraphExecutorError(
|
|
584
|
+
f"Unable to import specified debugger module ({self._pdb}): {imp_exc}"
|
|
585
|
+
) from exc
|
|
586
|
+
if not hasattr(pdb, "post_mortem"):
|
|
587
|
+
raise MPGraphExecutorError(
|
|
588
|
+
f"Specified debugger module ({self._pdb}) can't debug with post_mortem",
|
|
589
|
+
) from exc
|
|
590
|
+
pdb.post_mortem(exc.__traceback__)
|
|
591
|
+
failedNodes.add(qnode)
|
|
592
|
+
report.status = ExecutionStatus.FAILURE
|
|
593
|
+
|
|
594
|
+
# If exception specified an exit code then just exit with that
|
|
595
|
+
# code, otherwise crash if fail-fast option is enabled.
|
|
596
|
+
if fail_exit_code is not None:
|
|
597
|
+
sys.exit(fail_exit_code)
|
|
598
|
+
if self._fail_fast:
|
|
599
|
+
raise MPGraphExecutorError(
|
|
600
|
+
f"Task <{task_node.label} dataId={qnode.quantum.dataId}> failed."
|
|
601
|
+
) from exc
|
|
602
|
+
else:
|
|
603
|
+
# Note that there could be exception safety issues, which
|
|
604
|
+
# we presently ignore.
|
|
605
|
+
_LOG.error(
|
|
606
|
+
"Task <%s dataId=%s> failed; processing will continue for remaining tasks.",
|
|
607
|
+
task_node.label,
|
|
608
|
+
qnode.quantum.dataId,
|
|
609
|
+
exc_info=exc,
|
|
610
|
+
)
|
|
611
|
+
|
|
612
|
+
_LOG.info(
|
|
613
|
+
"Executed %d quanta successfully, %d failed and %d remain out of total %d quanta.",
|
|
614
|
+
successCount,
|
|
615
|
+
len(failedNodes),
|
|
616
|
+
totalCount - successCount - len(failedNodes),
|
|
617
|
+
totalCount,
|
|
618
|
+
)
|
|
619
|
+
|
|
620
|
+
# Raise an exception if there were any failures.
|
|
621
|
+
if failedNodes:
|
|
622
|
+
raise MPGraphExecutorError("One or more tasks failed during execution.")
|
|
623
|
+
|
|
624
|
+
def _executeQuantaMP(self, graph: QuantumGraph, report: Report) -> None:
|
|
625
|
+
"""Execute all Quanta in separate processes.
|
|
626
|
+
|
|
627
|
+
Parameters
|
|
628
|
+
----------
|
|
629
|
+
graph : `.QuantumGraph`
|
|
630
|
+
`.QuantumGraph` that is to be executed.
|
|
631
|
+
report : `Report`
|
|
632
|
+
Object for reporting execution status.
|
|
633
|
+
"""
|
|
634
|
+
disable_implicit_threading() # To prevent thread contention
|
|
635
|
+
|
|
636
|
+
_LOG.debug("Using %r for multiprocessing start method", self._start_method)
|
|
637
|
+
|
|
638
|
+
# re-pack input quantum data into jobs list
|
|
639
|
+
jobs = _JobList(graph)
|
|
640
|
+
|
|
641
|
+
# check that all tasks can run in sub-process
|
|
642
|
+
for job in jobs.jobs:
|
|
643
|
+
task_node = job.qnode.task_node
|
|
644
|
+
if not task_node.task_class.canMultiprocess:
|
|
645
|
+
raise MPGraphExecutorError(
|
|
646
|
+
f"Task {task_node.label!r} does not support multiprocessing; use single process"
|
|
647
|
+
)
|
|
648
|
+
|
|
649
|
+
finishedCount, failedCount = 0, 0
|
|
650
|
+
while jobs.pending or jobs.running:
|
|
651
|
+
_LOG.debug("#pendingJobs: %s", len(jobs.pending))
|
|
652
|
+
_LOG.debug("#runningJobs: %s", len(jobs.running))
|
|
653
|
+
|
|
654
|
+
# See if any jobs have finished
|
|
655
|
+
for job in jobs.running:
|
|
656
|
+
assert job.process is not None, "Process cannot be None"
|
|
657
|
+
if not job.process.is_alive():
|
|
658
|
+
_LOG.debug("finished: %s", job)
|
|
659
|
+
# finished
|
|
660
|
+
exitcode = job.process.exitcode
|
|
661
|
+
quantum_report = job.report()
|
|
662
|
+
report.quantaReports.append(quantum_report)
|
|
663
|
+
if exitcode == 0:
|
|
664
|
+
jobs.setJobState(job, JobState.FINISHED)
|
|
665
|
+
job.cleanup()
|
|
666
|
+
_LOG.debug("success: %s took %.3f seconds", job, time.time() - job.started)
|
|
667
|
+
else:
|
|
668
|
+
if job.terminated:
|
|
669
|
+
# Was killed due to timeout.
|
|
670
|
+
if report.status == ExecutionStatus.SUCCESS:
|
|
671
|
+
# Do not override global FAILURE status
|
|
672
|
+
report.status = ExecutionStatus.TIMEOUT
|
|
673
|
+
message = f"Timeout ({self._timeout} sec) for task {job}, task is killed"
|
|
674
|
+
jobs.setJobState(job, JobState.TIMED_OUT)
|
|
675
|
+
else:
|
|
676
|
+
report.status = ExecutionStatus.FAILURE
|
|
677
|
+
# failMessage() has to be called before cleanup()
|
|
678
|
+
message = job.failMessage()
|
|
679
|
+
jobs.setJobState(job, JobState.FAILED)
|
|
680
|
+
|
|
681
|
+
job.cleanup()
|
|
682
|
+
_LOG.debug("failed: %s", job)
|
|
683
|
+
if self._fail_fast or exitcode == InvalidQuantumError.EXIT_CODE:
|
|
684
|
+
# stop all running jobs
|
|
685
|
+
for stopJob in jobs.running:
|
|
686
|
+
if stopJob is not job:
|
|
687
|
+
stopJob.stop()
|
|
688
|
+
if job.state is JobState.TIMED_OUT:
|
|
689
|
+
raise MPTimeoutError(f"Timeout ({self._timeout} sec) for task {job}.")
|
|
690
|
+
else:
|
|
691
|
+
raise MPGraphExecutorError(message)
|
|
692
|
+
else:
|
|
693
|
+
_LOG.error("%s; processing will continue for remaining tasks.", message)
|
|
694
|
+
else:
|
|
695
|
+
# check for timeout
|
|
696
|
+
now = time.time()
|
|
697
|
+
if now - job.started > self._timeout:
|
|
698
|
+
# Try to kill it, and there is a chance that it
|
|
699
|
+
# finishes successfully before it gets killed. Exit
|
|
700
|
+
# status is handled by the code above on next
|
|
701
|
+
# iteration.
|
|
702
|
+
_LOG.debug("Terminating job %s due to timeout", job)
|
|
703
|
+
job.stop()
|
|
704
|
+
|
|
705
|
+
# Fail jobs whose inputs failed, this may need several iterations
|
|
706
|
+
# if the order is not right, will be done in the next loop.
|
|
707
|
+
if jobs.failedNodes:
|
|
708
|
+
for job in jobs.pending:
|
|
709
|
+
jobInputNodes = graph.determineInputsToQuantumNode(job.qnode)
|
|
710
|
+
assert job.qnode.quantum.dataId is not None, "Quantum DataId cannot be None"
|
|
711
|
+
if jobInputNodes & jobs.failedNodes:
|
|
712
|
+
quantum_report = QuantumReport(
|
|
713
|
+
status=ExecutionStatus.SKIPPED,
|
|
714
|
+
dataId=job.qnode.quantum.dataId,
|
|
715
|
+
taskLabel=job.qnode.task_node.label,
|
|
716
|
+
)
|
|
717
|
+
report.quantaReports.append(quantum_report)
|
|
718
|
+
jobs.setJobState(job, JobState.FAILED_DEP)
|
|
719
|
+
_LOG.error("Upstream job failed for task %s, skipping this task.", job)
|
|
720
|
+
|
|
721
|
+
# see if we can start more jobs
|
|
722
|
+
if len(jobs.running) < self._num_proc:
|
|
723
|
+
for job in jobs.pending:
|
|
724
|
+
jobInputNodes = graph.determineInputsToQuantumNode(job.qnode)
|
|
725
|
+
if jobInputNodes <= jobs.finishedNodes:
|
|
726
|
+
# all dependencies have completed, can start new job
|
|
727
|
+
if len(jobs.running) < self._num_proc:
|
|
728
|
+
_LOG.debug("Submitting %s", job)
|
|
729
|
+
jobs.submit(job, self._quantum_executor, self._start_method)
|
|
730
|
+
if len(jobs.running) >= self._num_proc:
|
|
731
|
+
# Cannot start any more jobs, wait until something
|
|
732
|
+
# finishes.
|
|
733
|
+
break
|
|
734
|
+
|
|
735
|
+
# Do cleanup for timed out jobs if necessary.
|
|
736
|
+
jobs.cleanup()
|
|
737
|
+
|
|
738
|
+
# Print progress message if something changed.
|
|
739
|
+
newFinished, newFailed = len(jobs.finishedNodes), len(jobs.failedNodes)
|
|
740
|
+
if (finishedCount, failedCount) != (newFinished, newFailed):
|
|
741
|
+
finishedCount, failedCount = newFinished, newFailed
|
|
742
|
+
totalCount = len(jobs.jobs)
|
|
743
|
+
_LOG.info(
|
|
744
|
+
"Executed %d quanta successfully, %d failed and %d remain out of total %d quanta.",
|
|
745
|
+
finishedCount,
|
|
746
|
+
failedCount,
|
|
747
|
+
totalCount - finishedCount - failedCount,
|
|
748
|
+
totalCount,
|
|
749
|
+
)
|
|
750
|
+
|
|
751
|
+
# Here we want to wait until one of the running jobs completes
|
|
752
|
+
# but multiprocessing does not provide an API for that, for now
|
|
753
|
+
# just sleep a little bit and go back to the loop.
|
|
754
|
+
if jobs.running:
|
|
755
|
+
time.sleep(0.1)
|
|
756
|
+
|
|
757
|
+
if jobs.failedNodes:
|
|
758
|
+
# print list of failed jobs
|
|
759
|
+
_LOG.error("Failed jobs:")
|
|
760
|
+
for job in jobs.jobs:
|
|
761
|
+
if job.state != JobState.FINISHED:
|
|
762
|
+
_LOG.error(" - %s: %s", job.state.name, job)
|
|
763
|
+
|
|
764
|
+
# if any job failed raise an exception
|
|
765
|
+
if jobs.failedNodes == jobs.timedOutNodes:
|
|
766
|
+
raise MPTimeoutError("One or more tasks timed out during execution.")
|
|
767
|
+
else:
|
|
768
|
+
raise MPGraphExecutorError("One or more tasks failed or timed out during execution.")
|
|
769
|
+
|
|
770
|
+
def getReport(self) -> Report | None:
|
|
771
|
+
# Docstring inherited from base class
|
|
772
|
+
if self._report is None:
|
|
773
|
+
raise RuntimeError("getReport() called before execute()")
|
|
774
|
+
return self._report
|