fractal-server 2.14.0a10__py3-none-any.whl → 2.14.0a11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/app/routes/api/v2/submit.py +1 -1
- fractal_server/app/runner/components.py +0 -3
- fractal_server/app/runner/exceptions.py +4 -0
- fractal_server/app/runner/executors/base_runner.py +16 -17
- fractal_server/app/runner/executors/local/{_local_config.py → get_local_config.py} +0 -7
- fractal_server/app/runner/executors/local/runner.py +117 -58
- fractal_server/app/runner/executors/slurm_common/_check_jobs_status.py +4 -0
- fractal_server/app/runner/executors/slurm_ssh/executor.py +7 -5
- fractal_server/app/runner/executors/slurm_ssh/runner.py +6 -10
- fractal_server/app/runner/executors/slurm_sudo/runner.py +201 -96
- fractal_server/app/runner/task_files.py +8 -0
- fractal_server/app/runner/v2/__init__.py +0 -366
- fractal_server/app/runner/v2/_local.py +2 -2
- fractal_server/app/runner/v2/_slurm_ssh.py +2 -2
- fractal_server/app/runner/v2/_slurm_sudo.py +2 -2
- fractal_server/app/runner/v2/db_tools.py +87 -0
- fractal_server/app/runner/v2/runner.py +77 -81
- fractal_server/app/runner/v2/runner_functions.py +274 -436
- fractal_server/app/runner/v2/runner_functions_low_level.py +37 -39
- fractal_server/app/runner/v2/submit_workflow.py +366 -0
- fractal_server/app/runner/v2/task_interface.py +31 -0
- {fractal_server-2.14.0a10.dist-info → fractal_server-2.14.0a11.dist-info}/METADATA +1 -1
- {fractal_server-2.14.0a10.dist-info → fractal_server-2.14.0a11.dist-info}/RECORD +27 -28
- fractal_server/app/runner/executors/local/_submit_setup.py +0 -46
- fractal_server/app/runner/executors/slurm_common/_submit_setup.py +0 -84
- fractal_server/app/runner/v2/_db_tools.py +0 -48
- {fractal_server-2.14.0a10.dist-info → fractal_server-2.14.0a11.dist-info}/LICENSE +0 -0
- {fractal_server-2.14.0a10.dist-info → fractal_server-2.14.0a11.dist-info}/WHEEL +0 -0
- {fractal_server-2.14.0a10.dist-info → fractal_server-2.14.0a11.dist-info}/entry_points.txt +0 -0
@@ -2,16 +2,11 @@ import json
|
|
2
2
|
import logging
|
3
3
|
import shutil
|
4
4
|
import subprocess # nosec
|
5
|
-
from
|
6
|
-
from shlex import split as shlex_split
|
5
|
+
from shlex import split
|
7
6
|
from typing import Any
|
8
|
-
from typing import Optional
|
9
7
|
|
10
|
-
from
|
11
|
-
from
|
12
|
-
from ..exceptions import TaskExecutionError
|
13
|
-
from fractal_server.app.models.v2 import WorkflowTaskV2
|
14
|
-
from fractal_server.app.runner.task_files import TaskFiles
|
8
|
+
from fractal_server.app.runner.exceptions import JobExecutionError
|
9
|
+
from fractal_server.app.runner.exceptions import TaskExecutionError
|
15
10
|
from fractal_server.string_tools import validate_cmd
|
16
11
|
|
17
12
|
|
@@ -32,9 +27,9 @@ def _call_command_wrapper(cmd: str, log_path: str) -> None:
|
|
32
27
|
raise TaskExecutionError(f"Invalid command. Original error: {str(e)}")
|
33
28
|
|
34
29
|
# Verify that task command is executable
|
35
|
-
if shutil.which(
|
30
|
+
if shutil.which(split(cmd)[0]) is None:
|
36
31
|
msg = (
|
37
|
-
f'Command "{
|
32
|
+
f'Command "{split(cmd)[0]}" is not valid. '
|
38
33
|
"Hint: make sure that it is executable."
|
39
34
|
)
|
40
35
|
raise TaskExecutionError(msg)
|
@@ -42,7 +37,7 @@ def _call_command_wrapper(cmd: str, log_path: str) -> None:
|
|
42
37
|
with open(log_path, "w") as fp_log:
|
43
38
|
try:
|
44
39
|
result = subprocess.run( # nosec
|
45
|
-
|
40
|
+
split(cmd),
|
46
41
|
stderr=fp_log,
|
47
42
|
stdout=fp_log,
|
48
43
|
)
|
@@ -60,58 +55,61 @@ def _call_command_wrapper(cmd: str, log_path: str) -> None:
|
|
60
55
|
|
61
56
|
|
62
57
|
def run_single_task(
|
63
|
-
|
58
|
+
# COMMON to all parallel tasks
|
64
59
|
command: str,
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
60
|
+
workflow_task_order: int,
|
61
|
+
workflow_task_id: int,
|
62
|
+
task_name: str,
|
63
|
+
# SPECIAL for each parallel task
|
64
|
+
parameters: dict[str, Any],
|
65
|
+
remote_files: dict[str, str],
|
69
66
|
) -> dict[str, Any]:
|
70
67
|
"""
|
71
68
|
Runs within an executor (AKA on the SLURM cluster).
|
72
69
|
"""
|
73
70
|
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
71
|
+
try:
|
72
|
+
args_file_remote = remote_files["args_file_remote"]
|
73
|
+
metadiff_file_remote = remote_files["metadiff_file_remote"]
|
74
|
+
log_file_remote = remote_files["log_file_remote"]
|
75
|
+
except KeyError:
|
76
|
+
raise TaskExecutionError(
|
77
|
+
f"Invalid {remote_files=}",
|
78
|
+
workflow_task_order=workflow_task_order,
|
79
|
+
workflow_task_id=workflow_task_id,
|
80
|
+
task_name=task_name,
|
81
|
+
)
|
81
82
|
|
82
|
-
|
83
|
-
|
84
|
-
root_dir_local=root_dir_local,
|
85
|
-
root_dir_remote=root_dir_remote,
|
86
|
-
task_name=task_name,
|
87
|
-
task_order=wftask.order,
|
88
|
-
component=component,
|
89
|
-
)
|
83
|
+
logger = logging.getLogger(None)
|
84
|
+
logger.debug(f"Now start running {command=}")
|
90
85
|
|
91
86
|
# Write arguments to args.json file
|
92
|
-
|
87
|
+
# FIXME: this could be done backend-side, with an additional
|
88
|
+
# file transfer if needed (e.g. on SSH)
|
89
|
+
with open(args_file_remote, "w") as f:
|
93
90
|
json.dump(parameters, f, indent=2)
|
94
91
|
|
95
92
|
# Assemble full command
|
93
|
+
# FIXME: this could be assembled backend-side
|
96
94
|
full_command = (
|
97
95
|
f"{command} "
|
98
|
-
f"--args-json {
|
99
|
-
f"--out-json {
|
96
|
+
f"--args-json {args_file_remote} "
|
97
|
+
f"--out-json {metadiff_file_remote}"
|
100
98
|
)
|
101
99
|
|
102
100
|
try:
|
103
101
|
_call_command_wrapper(
|
104
102
|
full_command,
|
105
|
-
log_path=
|
103
|
+
log_path=log_file_remote,
|
106
104
|
)
|
107
105
|
except TaskExecutionError as e:
|
108
|
-
e.workflow_task_order =
|
109
|
-
e.workflow_task_id =
|
110
|
-
e.task_name =
|
106
|
+
e.workflow_task_order = workflow_task_order
|
107
|
+
e.workflow_task_id = workflow_task_id
|
108
|
+
e.task_name = task_name
|
111
109
|
raise e
|
112
110
|
|
113
111
|
try:
|
114
|
-
with open(
|
112
|
+
with open(metadiff_file_remote, "r") as f:
|
115
113
|
out_meta = json.load(f)
|
116
114
|
except FileNotFoundError as e:
|
117
115
|
logger.debug(
|
@@ -0,0 +1,366 @@
|
|
1
|
+
"""
|
2
|
+
Runner backend subsystem root V2
|
3
|
+
|
4
|
+
This module is the single entry point to the runner backend subsystem V2.
|
5
|
+
Other subsystems should only import this module and not its submodules or
|
6
|
+
the individual backends.
|
7
|
+
"""
|
8
|
+
import os
|
9
|
+
import traceback
|
10
|
+
from pathlib import Path
|
11
|
+
from typing import Optional
|
12
|
+
|
13
|
+
from sqlalchemy.orm import Session as DBSyncSession
|
14
|
+
|
15
|
+
from ....config import get_settings
|
16
|
+
from ....logger import get_logger
|
17
|
+
from ....logger import reset_logger_handlers
|
18
|
+
from ....logger import set_logger
|
19
|
+
from ....ssh._fabric import FractalSSH
|
20
|
+
from ....syringe import Inject
|
21
|
+
from ....utils import get_timestamp
|
22
|
+
from ....zip_tools import _zip_folder_to_file_and_remove
|
23
|
+
from ...db import DB
|
24
|
+
from ...models.v2 import DatasetV2
|
25
|
+
from ...models.v2 import JobV2
|
26
|
+
from ...models.v2 import WorkflowV2
|
27
|
+
from ...schemas.v2 import JobStatusTypeV2
|
28
|
+
from ..exceptions import JobExecutionError
|
29
|
+
from ..exceptions import TaskExecutionError
|
30
|
+
from ..executors.slurm_sudo._subprocess_run_as_user import _mkdir_as_user
|
31
|
+
from ..filenames import WORKFLOW_LOG_FILENAME
|
32
|
+
from ._local import process_workflow as local_process_workflow
|
33
|
+
from ._slurm_ssh import process_workflow as slurm_ssh_process_workflow
|
34
|
+
from ._slurm_sudo import process_workflow as slurm_sudo_process_workflow
|
35
|
+
from fractal_server import __VERSION__
|
36
|
+
from fractal_server.app.models import UserSettings
|
37
|
+
|
38
|
+
|
39
|
+
_backends = {}
|
40
|
+
_backends["local"] = local_process_workflow
|
41
|
+
_backends["slurm"] = slurm_sudo_process_workflow
|
42
|
+
_backends["slurm_ssh"] = slurm_ssh_process_workflow
|
43
|
+
|
44
|
+
|
45
|
+
def fail_job(
|
46
|
+
*,
|
47
|
+
db: DBSyncSession,
|
48
|
+
job: JobV2,
|
49
|
+
log_msg: str,
|
50
|
+
logger_name: str,
|
51
|
+
emit_log: bool = False,
|
52
|
+
) -> None:
|
53
|
+
logger = get_logger(logger_name=logger_name)
|
54
|
+
if emit_log:
|
55
|
+
logger.error(log_msg)
|
56
|
+
reset_logger_handlers(logger)
|
57
|
+
job.status = JobStatusTypeV2.FAILED
|
58
|
+
job.end_timestamp = get_timestamp()
|
59
|
+
job.log = log_msg
|
60
|
+
db.merge(job)
|
61
|
+
db.commit()
|
62
|
+
db.close()
|
63
|
+
return
|
64
|
+
|
65
|
+
|
66
|
+
def submit_workflow(
|
67
|
+
*,
|
68
|
+
workflow_id: int,
|
69
|
+
dataset_id: int,
|
70
|
+
job_id: int,
|
71
|
+
user_id: int,
|
72
|
+
user_settings: UserSettings,
|
73
|
+
worker_init: Optional[str] = None,
|
74
|
+
slurm_user: Optional[str] = None,
|
75
|
+
user_cache_dir: Optional[str] = None,
|
76
|
+
fractal_ssh: Optional[FractalSSH] = None,
|
77
|
+
) -> None:
|
78
|
+
"""
|
79
|
+
Prepares a workflow and applies it to a dataset
|
80
|
+
|
81
|
+
This function wraps the process_workflow one, which is different for each
|
82
|
+
backend (e.g. local or slurm backend).
|
83
|
+
|
84
|
+
Args:
|
85
|
+
workflow_id:
|
86
|
+
ID of the workflow being applied
|
87
|
+
dataset_id:
|
88
|
+
Dataset ID
|
89
|
+
job_id:
|
90
|
+
Id of the job record which stores the state for the current
|
91
|
+
workflow application.
|
92
|
+
user_id:
|
93
|
+
User ID.
|
94
|
+
worker_init:
|
95
|
+
Custom executor parameters that get parsed before the execution of
|
96
|
+
each task.
|
97
|
+
user_cache_dir:
|
98
|
+
Cache directory (namely a path where the user can write); for the
|
99
|
+
slurm backend, this is used as a base directory for
|
100
|
+
`job.working_dir_user`.
|
101
|
+
slurm_user:
|
102
|
+
The username to impersonate for the workflow execution, for the
|
103
|
+
slurm backend.
|
104
|
+
"""
|
105
|
+
# Declare runner backend and set `process_workflow` function
|
106
|
+
settings = Inject(get_settings)
|
107
|
+
FRACTAL_RUNNER_BACKEND = settings.FRACTAL_RUNNER_BACKEND
|
108
|
+
logger_name = f"WF{workflow_id}_job{job_id}"
|
109
|
+
logger = set_logger(logger_name=logger_name)
|
110
|
+
|
111
|
+
with next(DB.get_sync_db()) as db_sync:
|
112
|
+
try:
|
113
|
+
job: Optional[JobV2] = db_sync.get(JobV2, job_id)
|
114
|
+
dataset: Optional[DatasetV2] = db_sync.get(DatasetV2, dataset_id)
|
115
|
+
workflow: Optional[WorkflowV2] = db_sync.get(
|
116
|
+
WorkflowV2, workflow_id
|
117
|
+
)
|
118
|
+
except Exception as e:
|
119
|
+
logger.error(
|
120
|
+
f"Error connecting to the database. Original error: {str(e)}"
|
121
|
+
)
|
122
|
+
reset_logger_handlers(logger)
|
123
|
+
return
|
124
|
+
|
125
|
+
if job is None:
|
126
|
+
logger.error(f"JobV2 {job_id} does not exist")
|
127
|
+
reset_logger_handlers(logger)
|
128
|
+
return
|
129
|
+
if dataset is None or workflow is None:
|
130
|
+
log_msg = ""
|
131
|
+
if not dataset:
|
132
|
+
log_msg += f"Cannot fetch dataset {dataset_id} from database\n"
|
133
|
+
if not workflow:
|
134
|
+
log_msg += (
|
135
|
+
f"Cannot fetch workflow {workflow_id} from database\n"
|
136
|
+
)
|
137
|
+
fail_job(
|
138
|
+
db=db_sync, job=job, log_msg=log_msg, logger_name=logger_name
|
139
|
+
)
|
140
|
+
return
|
141
|
+
|
142
|
+
# Declare runner backend and set `process_workflow` function
|
143
|
+
settings = Inject(get_settings)
|
144
|
+
FRACTAL_RUNNER_BACKEND = settings.FRACTAL_RUNNER_BACKEND
|
145
|
+
try:
|
146
|
+
process_workflow = _backends[settings.FRACTAL_RUNNER_BACKEND]
|
147
|
+
except KeyError as e:
|
148
|
+
fail_job(
|
149
|
+
db=db_sync,
|
150
|
+
job=job,
|
151
|
+
log_msg=(
|
152
|
+
f"Invalid {FRACTAL_RUNNER_BACKEND=}.\n"
|
153
|
+
f"Original KeyError: {str(e)}"
|
154
|
+
),
|
155
|
+
logger_name=logger_name,
|
156
|
+
emit_log=True,
|
157
|
+
)
|
158
|
+
return
|
159
|
+
|
160
|
+
# Define and create server-side working folder
|
161
|
+
WORKFLOW_DIR_LOCAL = Path(job.working_dir)
|
162
|
+
if WORKFLOW_DIR_LOCAL.exists():
|
163
|
+
fail_job(
|
164
|
+
db=db_sync,
|
165
|
+
job=job,
|
166
|
+
log_msg=f"Workflow dir {WORKFLOW_DIR_LOCAL} already exists.",
|
167
|
+
logger_name=logger_name,
|
168
|
+
emit_log=True,
|
169
|
+
)
|
170
|
+
return
|
171
|
+
|
172
|
+
try:
|
173
|
+
# Create WORKFLOW_DIR_LOCAL
|
174
|
+
if FRACTAL_RUNNER_BACKEND == "slurm":
|
175
|
+
original_umask = os.umask(0)
|
176
|
+
WORKFLOW_DIR_LOCAL.mkdir(parents=True, mode=0o755)
|
177
|
+
os.umask(original_umask)
|
178
|
+
else:
|
179
|
+
WORKFLOW_DIR_LOCAL.mkdir(parents=True)
|
180
|
+
|
181
|
+
# Define and create WORKFLOW_DIR_REMOTE
|
182
|
+
if FRACTAL_RUNNER_BACKEND == "local":
|
183
|
+
WORKFLOW_DIR_REMOTE = WORKFLOW_DIR_LOCAL
|
184
|
+
elif FRACTAL_RUNNER_BACKEND == "slurm":
|
185
|
+
WORKFLOW_DIR_REMOTE = (
|
186
|
+
Path(user_cache_dir) / WORKFLOW_DIR_LOCAL.name
|
187
|
+
)
|
188
|
+
_mkdir_as_user(
|
189
|
+
folder=str(WORKFLOW_DIR_REMOTE), user=slurm_user
|
190
|
+
)
|
191
|
+
elif FRACTAL_RUNNER_BACKEND == "slurm_ssh":
|
192
|
+
# Folder creation is deferred to _process_workflow
|
193
|
+
WORKFLOW_DIR_REMOTE = (
|
194
|
+
Path(user_settings.ssh_jobs_dir) / WORKFLOW_DIR_LOCAL.name
|
195
|
+
)
|
196
|
+
else:
|
197
|
+
logger.error(
|
198
|
+
"Invalid FRACTAL_RUNNER_BACKEND="
|
199
|
+
f"{settings.FRACTAL_RUNNER_BACKEND}."
|
200
|
+
)
|
201
|
+
|
202
|
+
except Exception as e:
|
203
|
+
error_type = type(e).__name__
|
204
|
+
fail_job(
|
205
|
+
db=db_sync,
|
206
|
+
job=job,
|
207
|
+
log_msg=(
|
208
|
+
f"{error_type} error occurred while creating job folder "
|
209
|
+
f"and subfolders.\nOriginal error: {str(e)}"
|
210
|
+
),
|
211
|
+
logger_name=logger_name,
|
212
|
+
emit_log=True,
|
213
|
+
)
|
214
|
+
return
|
215
|
+
|
216
|
+
# After Session.commit() is called, either explicitly or when using a
|
217
|
+
# context manager, all objects associated with the Session are expired.
|
218
|
+
# https://docs.sqlalchemy.org/en/14/orm/
|
219
|
+
# session_basics.html#opening-and-closing-a-session
|
220
|
+
# https://docs.sqlalchemy.org/en/14/orm/
|
221
|
+
# session_state_management.html#refreshing-expiring
|
222
|
+
|
223
|
+
# See issue #928:
|
224
|
+
# https://github.com/fractal-analytics-platform/
|
225
|
+
# fractal-server/issues/928
|
226
|
+
|
227
|
+
db_sync.refresh(dataset)
|
228
|
+
db_sync.refresh(workflow)
|
229
|
+
for wftask in workflow.task_list:
|
230
|
+
db_sync.refresh(wftask)
|
231
|
+
|
232
|
+
# Write logs
|
233
|
+
log_file_path = WORKFLOW_DIR_LOCAL / WORKFLOW_LOG_FILENAME
|
234
|
+
logger = set_logger(
|
235
|
+
logger_name=logger_name,
|
236
|
+
log_file_path=log_file_path,
|
237
|
+
)
|
238
|
+
logger.info(
|
239
|
+
f'Start execution of workflow "{workflow.name}"; '
|
240
|
+
f"more logs at {str(log_file_path)}"
|
241
|
+
)
|
242
|
+
logger.debug(f"fractal_server.__VERSION__: {__VERSION__}")
|
243
|
+
logger.debug(f"FRACTAL_RUNNER_BACKEND: {FRACTAL_RUNNER_BACKEND}")
|
244
|
+
if FRACTAL_RUNNER_BACKEND == "slurm":
|
245
|
+
logger.debug(f"slurm_user: {slurm_user}")
|
246
|
+
logger.debug(f"slurm_account: {job.slurm_account}")
|
247
|
+
logger.debug(f"worker_init: {worker_init}")
|
248
|
+
elif FRACTAL_RUNNER_BACKEND == "slurm_ssh":
|
249
|
+
logger.debug(f"ssh_user: {user_settings.ssh_username}")
|
250
|
+
logger.debug(f"base dir: {user_settings.ssh_tasks_dir}")
|
251
|
+
logger.debug(f"worker_init: {worker_init}")
|
252
|
+
logger.debug(f"job.id: {job.id}")
|
253
|
+
logger.debug(f"job.working_dir: {job.working_dir}")
|
254
|
+
logger.debug(f"job.working_dir_user: {job.working_dir_user}")
|
255
|
+
logger.debug(f"job.first_task_index: {job.first_task_index}")
|
256
|
+
logger.debug(f"job.last_task_index: {job.last_task_index}")
|
257
|
+
logger.debug(f'START workflow "{workflow.name}"')
|
258
|
+
|
259
|
+
try:
|
260
|
+
if FRACTAL_RUNNER_BACKEND == "local":
|
261
|
+
process_workflow = local_process_workflow
|
262
|
+
backend_specific_kwargs = {}
|
263
|
+
elif FRACTAL_RUNNER_BACKEND == "slurm":
|
264
|
+
process_workflow = slurm_sudo_process_workflow
|
265
|
+
backend_specific_kwargs = dict(
|
266
|
+
slurm_user=slurm_user,
|
267
|
+
slurm_account=job.slurm_account,
|
268
|
+
user_cache_dir=user_cache_dir,
|
269
|
+
)
|
270
|
+
elif FRACTAL_RUNNER_BACKEND == "slurm_ssh":
|
271
|
+
process_workflow = slurm_ssh_process_workflow
|
272
|
+
backend_specific_kwargs = dict(fractal_ssh=fractal_ssh)
|
273
|
+
else:
|
274
|
+
raise RuntimeError(
|
275
|
+
f"Invalid runner backend {FRACTAL_RUNNER_BACKEND=}"
|
276
|
+
)
|
277
|
+
|
278
|
+
# "The Session.close() method does not prevent the Session from being
|
279
|
+
# used again. The Session itself does not actually have a distinct
|
280
|
+
# “closed” state; it merely means the Session will release all database
|
281
|
+
# connections and ORM objects."
|
282
|
+
# (https://docs.sqlalchemy.org/en/20/orm/session_api.html#sqlalchemy.orm.Session.close).
|
283
|
+
#
|
284
|
+
# We close the session before the (possibly long) process_workflow
|
285
|
+
# call, to make sure all DB connections are released. The reason why we
|
286
|
+
# are not using a context manager within the try block is that we also
|
287
|
+
# need access to db_sync in the except branches.
|
288
|
+
db_sync = next(DB.get_sync_db())
|
289
|
+
db_sync.close()
|
290
|
+
|
291
|
+
process_workflow(
|
292
|
+
workflow=workflow,
|
293
|
+
dataset=dataset,
|
294
|
+
user_id=user_id,
|
295
|
+
workflow_dir_local=WORKFLOW_DIR_LOCAL,
|
296
|
+
workflow_dir_remote=WORKFLOW_DIR_REMOTE,
|
297
|
+
logger_name=logger_name,
|
298
|
+
worker_init=worker_init,
|
299
|
+
first_task_index=job.first_task_index,
|
300
|
+
last_task_index=job.last_task_index,
|
301
|
+
job_attribute_filters=job.attribute_filters,
|
302
|
+
job_type_filters=job.type_filters,
|
303
|
+
**backend_specific_kwargs,
|
304
|
+
)
|
305
|
+
|
306
|
+
logger.info(
|
307
|
+
f'End execution of workflow "{workflow.name}"; '
|
308
|
+
f"more logs at {str(log_file_path)}"
|
309
|
+
)
|
310
|
+
logger.debug(f'END workflow "{workflow.name}"')
|
311
|
+
|
312
|
+
# Update job DB entry
|
313
|
+
job.status = JobStatusTypeV2.DONE
|
314
|
+
job.end_timestamp = get_timestamp()
|
315
|
+
with log_file_path.open("r") as f:
|
316
|
+
logs = f.read()
|
317
|
+
job.log = logs
|
318
|
+
db_sync.merge(job)
|
319
|
+
db_sync.commit()
|
320
|
+
|
321
|
+
except TaskExecutionError as e:
|
322
|
+
logger.debug(f'FAILED workflow "{workflow.name}", TaskExecutionError.')
|
323
|
+
logger.info(f'Workflow "{workflow.name}" failed (TaskExecutionError).')
|
324
|
+
|
325
|
+
exception_args_string = "\n".join(e.args)
|
326
|
+
log_msg = (
|
327
|
+
f"TASK ERROR: "
|
328
|
+
f"Task name: {e.task_name}, "
|
329
|
+
f"position in Workflow: {e.workflow_task_order}\n"
|
330
|
+
f"TRACEBACK:\n{exception_args_string}"
|
331
|
+
)
|
332
|
+
fail_job(db=db_sync, job=job, log_msg=log_msg, logger_name=logger_name)
|
333
|
+
|
334
|
+
except JobExecutionError as e:
|
335
|
+
logger.debug(f'FAILED workflow "{workflow.name}", JobExecutionError.')
|
336
|
+
logger.info(f'Workflow "{workflow.name}" failed (JobExecutionError).')
|
337
|
+
|
338
|
+
fail_job(
|
339
|
+
db=db_sync,
|
340
|
+
job=job,
|
341
|
+
log_msg=(
|
342
|
+
f"JOB ERROR in Fractal job {job.id}:\n"
|
343
|
+
f"TRACEBACK:\n{e.assemble_error()}"
|
344
|
+
),
|
345
|
+
logger_name=logger_name,
|
346
|
+
)
|
347
|
+
|
348
|
+
except Exception:
|
349
|
+
logger.debug(f'FAILED workflow "{workflow.name}", unknown error.')
|
350
|
+
logger.info(f'Workflow "{workflow.name}" failed (unkwnon error).')
|
351
|
+
|
352
|
+
current_traceback = traceback.format_exc()
|
353
|
+
fail_job(
|
354
|
+
db=db_sync,
|
355
|
+
job=job,
|
356
|
+
log_msg=(
|
357
|
+
f"UNKNOWN ERROR in Fractal job {job.id}\n"
|
358
|
+
f"TRACEBACK:\n{current_traceback}"
|
359
|
+
),
|
360
|
+
logger_name=logger_name,
|
361
|
+
)
|
362
|
+
|
363
|
+
finally:
|
364
|
+
reset_logger_handlers(logger)
|
365
|
+
db_sync.close()
|
366
|
+
_zip_folder_to_file_and_remove(folder=job.working_dir)
|
@@ -1,11 +1,14 @@
|
|
1
1
|
from typing import Any
|
2
|
+
from typing import Optional
|
2
3
|
|
3
4
|
from pydantic import BaseModel
|
4
5
|
from pydantic import ConfigDict
|
5
6
|
from pydantic import Field
|
6
7
|
from pydantic import field_validator
|
8
|
+
from pydantic import ValidationError
|
7
9
|
|
8
10
|
from ....images import SingleImageTaskOutput
|
11
|
+
from fractal_server.app.runner.exceptions import TaskOutputValidationError
|
9
12
|
from fractal_server.urls import normalize_url
|
10
13
|
|
11
14
|
|
@@ -61,3 +64,31 @@ class InitTaskOutput(BaseModel):
|
|
61
64
|
model_config = ConfigDict(extra="forbid")
|
62
65
|
|
63
66
|
parallelization_list: list[InitArgsModel] = Field(default_factory=list)
|
67
|
+
|
68
|
+
|
69
|
+
def _cast_and_validate_TaskOutput(
|
70
|
+
task_output: dict[str, Any]
|
71
|
+
) -> Optional[TaskOutput]:
|
72
|
+
try:
|
73
|
+
validated_task_output = TaskOutput(**task_output)
|
74
|
+
return validated_task_output
|
75
|
+
except ValidationError as e:
|
76
|
+
raise TaskOutputValidationError(
|
77
|
+
"Validation of task output failed.\n"
|
78
|
+
f"Original error: {str(e)}\n"
|
79
|
+
f"Original data: {task_output}."
|
80
|
+
)
|
81
|
+
|
82
|
+
|
83
|
+
def _cast_and_validate_InitTaskOutput(
|
84
|
+
init_task_output: dict[str, Any],
|
85
|
+
) -> Optional[InitTaskOutput]:
|
86
|
+
try:
|
87
|
+
validated_init_task_output = InitTaskOutput(**init_task_output)
|
88
|
+
return validated_init_task_output
|
89
|
+
except ValidationError as e:
|
90
|
+
raise TaskOutputValidationError(
|
91
|
+
"Validation of init-task output failed.\n"
|
92
|
+
f"Original error: {str(e)}\n"
|
93
|
+
f"Original data: {init_task_output}."
|
94
|
+
)
|
@@ -1,4 +1,4 @@
|
|
1
|
-
fractal_server/__init__.py,sha256=
|
1
|
+
fractal_server/__init__.py,sha256=GtuXF0xAIDTX9AXSJY7o0G6jQtfGliAzZf-nkCVjhSg,26
|
2
2
|
fractal_server/__main__.py,sha256=rkM8xjY1KeS3l63irB8yCrlVobR-73uDapC4wvrIlxI,6957
|
3
3
|
fractal_server/alembic.ini,sha256=MWwi7GzjzawI9cCAK1LW7NxIBQDUqD12-ptJoq5JpP0,3153
|
4
4
|
fractal_server/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -41,7 +41,7 @@ fractal_server/app/routes/api/v2/images.py,sha256=R0ccsHMeMyvZ3Sh23zGXyfqniMFOTV
|
|
41
41
|
fractal_server/app/routes/api/v2/job.py,sha256=MU1sHIKk_89WrD0TD44d4ufzqnywot7On_W71KjyUbQ,6500
|
42
42
|
fractal_server/app/routes/api/v2/project.py,sha256=hMvL9QLPUcAAiPGy6ta2LBLTVRozJsfvBPl5D06_MHg,6666
|
43
43
|
fractal_server/app/routes/api/v2/status_legacy.py,sha256=Q5ZWQNfeZKL8Xgtou2Xr80iaF1uO-r4oSKgq5H42V_8,6349
|
44
|
-
fractal_server/app/routes/api/v2/submit.py,sha256=
|
44
|
+
fractal_server/app/routes/api/v2/submit.py,sha256=hCwwC6bXP7EyhgGyVLv1ClybRH1YytDVoPunOzpsf0s,8822
|
45
45
|
fractal_server/app/routes/api/v2/task.py,sha256=O7pquZhXIS4lRs5XqHvstiwe8BiCuS-B3ZKJI1g6EJU,6985
|
46
46
|
fractal_server/app/routes/api/v2/task_collection.py,sha256=IDNF6sjDuU37HIQ0TuQA-TZIuf7nfHAQXUUNmkrlhLM,12706
|
47
47
|
fractal_server/app/routes/api/v2/task_collection_custom.py,sha256=cctW61-C2QYF2KXluS15lLhZJS_kt30Ca6UGLFO32z0,6207
|
@@ -66,21 +66,19 @@ fractal_server/app/routes/aux/_runner.py,sha256=spNudutueHTBJPhm55RlOuYzb31Dhyhe
|
|
66
66
|
fractal_server/app/routes/aux/validate_user_settings.py,sha256=FLVi__8YFcm_6c_K5uMQo7raWWXQLBcZtx8yaPO4jaE,2301
|
67
67
|
fractal_server/app/routes/pagination.py,sha256=L8F5JqekF39qz-LpeScdlhb57MQnSRXjK4ZEtsZqYLk,1210
|
68
68
|
fractal_server/app/runner/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
69
|
-
fractal_server/app/runner/components.py,sha256
|
69
|
+
fractal_server/app/runner/components.py,sha256=-Ii5l8d_V6f5DFOd-Zsr8VYmOsyqw0Hox9fEFQiuqxY,66
|
70
70
|
fractal_server/app/runner/compress_folder.py,sha256=HSc1tv7x2DBjBoXwugZlC79rm9GNBIWtQKK9yWn5ZBI,3991
|
71
|
-
fractal_server/app/runner/exceptions.py,sha256=
|
71
|
+
fractal_server/app/runner/exceptions.py,sha256=JC5ufHyeA1hYD_rkZUscI30DD8D903ncag7Z3AArmUY,4215
|
72
72
|
fractal_server/app/runner/executors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
73
|
-
fractal_server/app/runner/executors/base_runner.py,sha256=
|
73
|
+
fractal_server/app/runner/executors/base_runner.py,sha256=WhS7MLOkSHy8vjrGZGRHkounziJoYsqUlPkAdUQIs44,4511
|
74
74
|
fractal_server/app/runner/executors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
75
|
-
fractal_server/app/runner/executors/local/
|
76
|
-
fractal_server/app/runner/executors/local/
|
77
|
-
fractal_server/app/runner/executors/local/runner.py,sha256=LNql8q6M-Cn_hEV4IMkNP57XFPQJ6eaVd0YIDKJLk60,5621
|
75
|
+
fractal_server/app/runner/executors/local/get_local_config.py,sha256=wbrIYuGOvABOStrE7jNrC4ULPhtBQ5Q7Y3aKm_icomg,3508
|
76
|
+
fractal_server/app/runner/executors/local/runner.py,sha256=KHmG6tw1TCb1HLtUhUC4FYcK_OdJk-SDdXtLkX_G4-4,7939
|
78
77
|
fractal_server/app/runner/executors/slurm_common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
79
78
|
fractal_server/app/runner/executors/slurm_common/_batching.py,sha256=ZY020JZlDS5mfpgpWTChQkyHU7iLE5kx2HVd57_C6XA,8850
|
80
|
-
fractal_server/app/runner/executors/slurm_common/_check_jobs_status.py,sha256=
|
79
|
+
fractal_server/app/runner/executors/slurm_common/_check_jobs_status.py,sha256=bzpqx8mxT-2xRIGQK6YLMIvMN-SM6OeYWUs97W0iRno,2103
|
81
80
|
fractal_server/app/runner/executors/slurm_common/_job_states.py,sha256=nuV-Zba38kDrRESOVB3gaGbrSPZc4q7YGichQaeqTW0,238
|
82
81
|
fractal_server/app/runner/executors/slurm_common/_slurm_config.py,sha256=fZaFUUXqDH0p3DndCFUpFqTqyD2tMVCuSYgYLAycpVw,15897
|
83
|
-
fractal_server/app/runner/executors/slurm_common/_submit_setup.py,sha256=crbfAAvXbxe_9PaokXkkVdPV65lSCFbInZ0RlT6uyHI,2746
|
84
82
|
fractal_server/app/runner/executors/slurm_common/get_slurm_config.py,sha256=-fAX1DZMB5RZnyYanIJD72mWOJAPkh21jd4loDXKJw4,5994
|
85
83
|
fractal_server/app/runner/executors/slurm_common/remote.py,sha256=iXLu4d-bWzn7qmDaOjKFkcuaSHLjPESAMSLcg6c99fc,5852
|
86
84
|
fractal_server/app/runner/executors/slurm_common/utils_executors.py,sha256=naPyJI0I3lD-sYHbSXbMFGUBK4h_SggA5V91Z1Ch1Xg,1416
|
@@ -88,28 +86,29 @@ fractal_server/app/runner/executors/slurm_ssh/__init__.py,sha256=47DEQpj8HBSa-_T
|
|
88
86
|
fractal_server/app/runner/executors/slurm_ssh/_check_job_status_ssh.py,sha256=81e4kYpnNlONUw0a3o0o5kTu35Dshelc61y6Gf63m2M,1899
|
89
87
|
fractal_server/app/runner/executors/slurm_ssh/_executor_wait_thread.py,sha256=lnW8dNNPqqbpQvojVBQaNJm4wN3Qkw02RWBZ1w68Hyw,3755
|
90
88
|
fractal_server/app/runner/executors/slurm_ssh/_slurm_job.py,sha256=IL1C52dezEiincVX2yKryNiPHi4YOMURNLdQO_QPdGw,4406
|
91
|
-
fractal_server/app/runner/executors/slurm_ssh/executor.py,sha256=
|
92
|
-
fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=
|
89
|
+
fractal_server/app/runner/executors/slurm_ssh/executor.py,sha256=StEX6vN9jY79nTxqRDb5OEhkTVd3jYhT4X0_luZSqd4,53678
|
90
|
+
fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=LON7H3RGPYwqmDj0gEcPg7CtBGQzClU8o0fubsXk-Rw,24482
|
93
91
|
fractal_server/app/runner/executors/slurm_sudo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
94
92
|
fractal_server/app/runner/executors/slurm_sudo/_subprocess_run_as_user.py,sha256=O1bNg1DiSDJmQE0RmOk2Ii47DagiXp5ryd0R6KxO2OM,3177
|
95
|
-
fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=
|
93
|
+
fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=hjHn09Gvw1xZ-mmSRRaye-0-JInA-YhM45ohCZ_8WYY,27911
|
96
94
|
fractal_server/app/runner/extract_archive.py,sha256=tLpjDrX47OjTNhhoWvm6iNukg8KoieWyTb7ZfvE9eWU,2483
|
97
95
|
fractal_server/app/runner/filenames.py,sha256=lPnxKHtdRizr6FqG3zOdjDPyWA7GoaJGTtiuJV0gA8E,70
|
98
96
|
fractal_server/app/runner/run_subprocess.py,sha256=c3JbYXq3hX2aaflQU19qJ5Xs6J6oXGNvnTEoAfv2bxc,959
|
99
97
|
fractal_server/app/runner/set_start_and_last_task_index.py,sha256=-q4zVybAj8ek2XlbENKlfOAJ39hT_zoJoZkqzDqiAMY,1254
|
100
98
|
fractal_server/app/runner/shutdown.py,sha256=9pfSKHDNdIcm0eY-opgRTi7y0HmvfPmYiu9JR6Idark,2082
|
101
|
-
fractal_server/app/runner/task_files.py,sha256=
|
102
|
-
fractal_server/app/runner/v2/__init__.py,sha256=
|
103
|
-
fractal_server/app/runner/v2/
|
104
|
-
fractal_server/app/runner/v2/
|
105
|
-
fractal_server/app/runner/v2/
|
106
|
-
fractal_server/app/runner/v2/
|
99
|
+
fractal_server/app/runner/task_files.py,sha256=pGqaFsSXg-3kywf_FGqgjx_8pPLwIn0MpjQ3009fkus,2741
|
100
|
+
fractal_server/app/runner/v2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
101
|
+
fractal_server/app/runner/v2/_local.py,sha256=DK8yagbvd6HHjcDVhUzTy0f7MURlTkQha-NM6OZKgJc,3044
|
102
|
+
fractal_server/app/runner/v2/_slurm_ssh.py,sha256=RhfV1quXGGU4bYioRF7UKAdrzOI_stgsFdXdcTegrv0,3310
|
103
|
+
fractal_server/app/runner/v2/_slurm_sudo.py,sha256=vjvOA-6_0vSKp704wM1O0ubEDpKZZ0vJG5ZfUi9-wSA,2997
|
104
|
+
fractal_server/app/runner/v2/db_tools.py,sha256=yPF2KgkrEdvAc380YL-wux68O0Ej9cY-usTSd-m_evE,2400
|
107
105
|
fractal_server/app/runner/v2/deduplicate_list.py,sha256=IVTE4abBU1bUprFTkxrTfYKnvkNTanWQ-KWh_etiT08,645
|
108
106
|
fractal_server/app/runner/v2/merge_outputs.py,sha256=D1L4Taieq9i71SPQyNc1kMokgHh-sV_MqF3bv7QMDBc,907
|
109
|
-
fractal_server/app/runner/v2/runner.py,sha256=
|
110
|
-
fractal_server/app/runner/v2/runner_functions.py,sha256=
|
111
|
-
fractal_server/app/runner/v2/runner_functions_low_level.py,sha256=
|
112
|
-
fractal_server/app/runner/v2/
|
107
|
+
fractal_server/app/runner/v2/runner.py,sha256=tCBGtUjExhlAplFLw3AlxEr7NZmpAj9QIwSh2xstIO8,15153
|
108
|
+
fractal_server/app/runner/v2/runner_functions.py,sha256=xnc1rpuECwUEDuD4oLF7PpyenHUe3kTVD41xjZa2r_0,16490
|
109
|
+
fractal_server/app/runner/v2/runner_functions_low_level.py,sha256=9t1CHN3EyfsGRWfG257YPY5WjQ6zuztsw_KZrpEAFPo,3703
|
110
|
+
fractal_server/app/runner/v2/submit_workflow.py,sha256=PTLXNT_ZoEd9GTZ5C4Ai0imOFJOfY4E9Yg1rB-gsl7I,13340
|
111
|
+
fractal_server/app/runner/v2/task_interface.py,sha256=IXdQTI8rXFgXv1Ez0js4CjKFf3QwO2GCHRTuwiFtiTQ,2891
|
113
112
|
fractal_server/app/runner/versions.py,sha256=dSaPRWqmFPHjg20kTCHmi_dmGNcCETflDtDLronNanU,852
|
114
113
|
fractal_server/app/schemas/__init__.py,sha256=stURAU_t3AOBaH0HSUbV-GKhlPKngnnIMoqWc3orFyI,135
|
115
114
|
fractal_server/app/schemas/_filter_validators.py,sha256=Gkf2USrkuxZx1TWeeMRmhgfmG60AAIDQfbaWslLsvJQ,1572
|
@@ -210,8 +209,8 @@ fractal_server/tasks/v2/utils_templates.py,sha256=Kc_nSzdlV6KIsO0CQSPs1w70zLyENP
|
|
210
209
|
fractal_server/urls.py,sha256=QjIKAC1a46bCdiPMu3AlpgFbcv6a4l3ABcd5xz190Og,471
|
211
210
|
fractal_server/utils.py,sha256=PMwrxWFxRTQRl1b9h-NRIbFGPKqpH_hXnkAT3NfZdpY,3571
|
212
211
|
fractal_server/zip_tools.py,sha256=GjDgo_sf6V_DDg6wWeBlZu5zypIxycn_l257p_YVKGc,4876
|
213
|
-
fractal_server-2.14.
|
214
|
-
fractal_server-2.14.
|
215
|
-
fractal_server-2.14.
|
216
|
-
fractal_server-2.14.
|
217
|
-
fractal_server-2.14.
|
212
|
+
fractal_server-2.14.0a11.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
|
213
|
+
fractal_server-2.14.0a11.dist-info/METADATA,sha256=_yK06nJ_yK0gkC42b3nVAtSD0_TQTlD7VJwUl9rlzf0,4563
|
214
|
+
fractal_server-2.14.0a11.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
|
215
|
+
fractal_server-2.14.0a11.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
|
216
|
+
fractal_server-2.14.0a11.dist-info/RECORD,,
|