fractal-server 2.10.5__py3-none-any.whl → 2.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/app/models/v2/dataset.py +9 -6
- fractal_server/app/models/v2/job.py +5 -0
- fractal_server/app/models/v2/workflowtask.py +5 -8
- fractal_server/app/routes/api/v1/dataset.py +2 -2
- fractal_server/app/routes/api/v2/_aux_functions.py +3 -10
- fractal_server/app/routes/api/v2/_aux_functions_tasks.py +21 -0
- fractal_server/app/routes/api/v2/images.py +30 -7
- fractal_server/app/routes/api/v2/job.py +14 -1
- fractal_server/app/routes/api/v2/status.py +20 -20
- fractal_server/app/routes/api/v2/submit.py +11 -4
- fractal_server/app/routes/api/v2/workflow.py +95 -0
- fractal_server/app/routes/api/v2/workflow_import.py +8 -0
- fractal_server/app/routes/api/v2/workflowtask.py +45 -26
- fractal_server/app/runner/{async_wrap.py → async_wrap_v1.py} +1 -1
- fractal_server/app/runner/executors/slurm/_slurm_config.py +1 -1
- fractal_server/app/runner/executors/slurm/ssh/executor.py +2 -2
- fractal_server/app/runner/filenames.py +2 -4
- fractal_server/app/runner/v1/_common.py +4 -4
- fractal_server/app/runner/v1/_local/__init__.py +2 -2
- fractal_server/app/runner/v1/_slurm/__init__.py +2 -2
- fractal_server/app/runner/v1/handle_failed_job.py +4 -4
- fractal_server/app/runner/v2/__init__.py +12 -66
- fractal_server/app/runner/v2/_local/__init__.py +17 -47
- fractal_server/app/runner/v2/_local_experimental/__init__.py +27 -61
- fractal_server/app/runner/v2/_slurm_ssh/__init__.py +26 -65
- fractal_server/app/runner/v2/_slurm_sudo/__init__.py +24 -66
- fractal_server/app/runner/v2/handle_failed_job.py +31 -130
- fractal_server/app/runner/v2/merge_outputs.py +6 -17
- fractal_server/app/runner/v2/runner.py +51 -89
- fractal_server/app/runner/v2/task_interface.py +0 -2
- fractal_server/app/schemas/_filter_validators.py +43 -0
- fractal_server/app/schemas/_validators.py +13 -2
- fractal_server/app/schemas/v2/dataset.py +85 -12
- fractal_server/app/schemas/v2/dumps.py +6 -8
- fractal_server/app/schemas/v2/job.py +14 -0
- fractal_server/app/schemas/v2/task.py +9 -9
- fractal_server/app/schemas/v2/task_group.py +2 -2
- fractal_server/app/schemas/v2/workflowtask.py +69 -20
- fractal_server/data_migrations/2_11_0.py +168 -0
- fractal_server/images/__init__.py +0 -1
- fractal_server/images/models.py +12 -35
- fractal_server/images/tools.py +53 -14
- fractal_server/logger.py +4 -1
- fractal_server/migrations/versions/db09233ad13a_split_filters_and_keep_old_columns.py +96 -0
- fractal_server/tasks/v2/local/collect.py +2 -2
- fractal_server/tasks/v2/local/deactivate.py +2 -2
- fractal_server/tasks/v2/local/reactivate.py +2 -3
- fractal_server/tasks/v2/ssh/collect.py +2 -2
- fractal_server/tasks/v2/ssh/deactivate.py +2 -2
- fractal_server/tasks/v2/ssh/reactivate.py +2 -2
- fractal_server/utils.py +9 -7
- {fractal_server-2.10.5.dist-info → fractal_server-2.11.0.dist-info}/METADATA +1 -1
- {fractal_server-2.10.5.dist-info → fractal_server-2.11.0.dist-info}/RECORD +57 -54
- {fractal_server-2.10.5.dist-info → fractal_server-2.11.0.dist-info}/LICENSE +0 -0
- {fractal_server-2.10.5.dist-info → fractal_server-2.11.0.dist-info}/WHEEL +0 -0
- {fractal_server-2.10.5.dist-info → fractal_server-2.11.0.dist-info}/entry_points.txt +0 -0
@@ -12,6 +12,7 @@ from ....db import get_async_db
|
|
12
12
|
from ._aux_functions import _get_workflow_check_owner
|
13
13
|
from ._aux_functions import _get_workflow_task_check_owner
|
14
14
|
from ._aux_functions import _workflow_insert_task
|
15
|
+
from ._aux_functions_tasks import _check_type_filters_compatibility
|
15
16
|
from ._aux_functions_tasks import _get_task_read_access
|
16
17
|
from fractal_server.app.models import UserOAuth
|
17
18
|
from fractal_server.app.models.v2 import WorkflowTaskV2
|
@@ -47,24 +48,29 @@ async def replace_workflowtask(
|
|
47
48
|
db=db,
|
48
49
|
)
|
49
50
|
|
50
|
-
|
51
|
+
new_task = await _get_task_read_access(
|
51
52
|
task_id=task_id, user_id=user.id, db=db, require_active=True
|
52
53
|
)
|
53
54
|
|
54
|
-
if
|
55
|
+
if new_task.type != old_workflow_task.task.type:
|
55
56
|
raise HTTPException(
|
56
57
|
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
|
57
58
|
detail=(
|
58
59
|
f"Cannot replace a Task '{old_workflow_task.task.type}' with a"
|
59
|
-
f" Task '{
|
60
|
+
f" Task '{new_task.type}'."
|
60
61
|
),
|
61
62
|
)
|
62
63
|
|
64
|
+
_check_type_filters_compatibility(
|
65
|
+
task_input_types=new_task.input_types,
|
66
|
+
wftask_type_filters=old_workflow_task.type_filters,
|
67
|
+
)
|
68
|
+
|
63
69
|
_args_non_parallel = old_workflow_task.args_non_parallel
|
64
70
|
_args_parallel = old_workflow_task.args_parallel
|
65
71
|
if replace is not None:
|
66
72
|
if replace.args_non_parallel is not None:
|
67
|
-
if
|
73
|
+
if new_task.type == "parallel":
|
68
74
|
raise HTTPException(
|
69
75
|
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
|
70
76
|
detail=(
|
@@ -76,7 +82,7 @@ async def replace_workflowtask(
|
|
76
82
|
_args_non_parallel = replace.args_non_parallel
|
77
83
|
|
78
84
|
if replace.args_parallel is not None:
|
79
|
-
if
|
85
|
+
if new_task.type == "non_parallel":
|
80
86
|
raise HTTPException(
|
81
87
|
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
|
82
88
|
detail=(
|
@@ -92,24 +98,26 @@ async def replace_workflowtask(
|
|
92
98
|
if (
|
93
99
|
old_workflow_task.meta_non_parallel
|
94
100
|
!= old_workflow_task.task.meta_non_parallel
|
95
|
-
) and (
|
101
|
+
) and (
|
102
|
+
old_workflow_task.task.meta_non_parallel == new_task.meta_non_parallel
|
103
|
+
):
|
96
104
|
_meta_non_parallel = old_workflow_task.meta_non_parallel
|
97
105
|
else:
|
98
|
-
_meta_non_parallel =
|
106
|
+
_meta_non_parallel = new_task.meta_non_parallel
|
99
107
|
# Same for `meta_parallel`
|
100
108
|
if (
|
101
109
|
old_workflow_task.meta_parallel != old_workflow_task.task.meta_parallel
|
102
|
-
) and (old_workflow_task.task.meta_parallel ==
|
110
|
+
) and (old_workflow_task.task.meta_parallel == new_task.meta_parallel):
|
103
111
|
_meta_parallel = old_workflow_task.meta_parallel
|
104
112
|
else:
|
105
|
-
_meta_parallel =
|
113
|
+
_meta_parallel = new_task.meta_parallel
|
106
114
|
|
107
115
|
new_workflow_task = WorkflowTaskV2(
|
108
|
-
task_id=
|
109
|
-
task_type=
|
110
|
-
task=
|
116
|
+
task_id=new_task.id,
|
117
|
+
task_type=new_task.type,
|
118
|
+
task=new_task,
|
111
119
|
# old-task values
|
112
|
-
|
120
|
+
type_filters=old_workflow_task.type_filters,
|
113
121
|
# possibly new values
|
114
122
|
args_non_parallel=_args_non_parallel,
|
115
123
|
args_parallel=_args_parallel,
|
@@ -134,7 +142,7 @@ async def create_workflowtask(
|
|
134
142
|
project_id: int,
|
135
143
|
workflow_id: int,
|
136
144
|
task_id: int,
|
137
|
-
|
145
|
+
wftask: WorkflowTaskCreateV2,
|
138
146
|
user: UserOAuth = Depends(current_active_user),
|
139
147
|
db: AsyncSession = Depends(get_async_db),
|
140
148
|
) -> Optional[WorkflowTaskReadV2]:
|
@@ -152,8 +160,8 @@ async def create_workflowtask(
|
|
152
160
|
|
153
161
|
if task.type == "parallel":
|
154
162
|
if (
|
155
|
-
|
156
|
-
or
|
163
|
+
wftask.meta_non_parallel is not None
|
164
|
+
or wftask.args_non_parallel is not None
|
157
165
|
):
|
158
166
|
raise HTTPException(
|
159
167
|
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
|
@@ -165,8 +173,8 @@ async def create_workflowtask(
|
|
165
173
|
)
|
166
174
|
elif task.type == "non_parallel":
|
167
175
|
if (
|
168
|
-
|
169
|
-
or
|
176
|
+
wftask.meta_parallel is not None
|
177
|
+
or wftask.args_parallel is not None
|
170
178
|
):
|
171
179
|
raise HTTPException(
|
172
180
|
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
|
@@ -176,20 +184,26 @@ async def create_workflowtask(
|
|
176
184
|
"is `non_parallel`."
|
177
185
|
),
|
178
186
|
)
|
179
|
-
|
187
|
+
|
188
|
+
_check_type_filters_compatibility(
|
189
|
+
task_input_types=task.input_types,
|
190
|
+
wftask_type_filters=wftask.type_filters,
|
191
|
+
)
|
192
|
+
|
193
|
+
wftask_db = await _workflow_insert_task(
|
180
194
|
workflow_id=workflow.id,
|
181
195
|
task_id=task_id,
|
182
|
-
meta_non_parallel=
|
183
|
-
meta_parallel=
|
184
|
-
args_non_parallel=
|
185
|
-
args_parallel=
|
186
|
-
|
196
|
+
meta_non_parallel=wftask.meta_non_parallel,
|
197
|
+
meta_parallel=wftask.meta_parallel,
|
198
|
+
args_non_parallel=wftask.args_non_parallel,
|
199
|
+
args_parallel=wftask.args_parallel,
|
200
|
+
type_filters=wftask.type_filters,
|
187
201
|
db=db,
|
188
202
|
)
|
189
203
|
|
190
204
|
await db.close()
|
191
205
|
|
192
|
-
return
|
206
|
+
return wftask_db
|
193
207
|
|
194
208
|
|
195
209
|
@router.get(
|
@@ -236,6 +250,11 @@ async def update_workflowtask(
|
|
236
250
|
user_id=user.id,
|
237
251
|
db=db,
|
238
252
|
)
|
253
|
+
if workflow_task_update.type_filters is not None:
|
254
|
+
_check_type_filters_compatibility(
|
255
|
+
task_input_types=db_wf_task.task.input_types,
|
256
|
+
wftask_type_filters=workflow_task_update.type_filters,
|
257
|
+
)
|
239
258
|
|
240
259
|
if db_wf_task.task_type == "parallel" and (
|
241
260
|
workflow_task_update.args_non_parallel is not None
|
@@ -274,7 +293,7 @@ async def update_workflowtask(
|
|
274
293
|
if not actual_args:
|
275
294
|
actual_args = None
|
276
295
|
setattr(db_wf_task, key, actual_args)
|
277
|
-
elif key in ["meta_parallel", "meta_non_parallel", "
|
296
|
+
elif key in ["meta_parallel", "meta_non_parallel", "type_filters"]:
|
278
297
|
setattr(db_wf_task, key, value)
|
279
298
|
else:
|
280
299
|
raise HTTPException(
|
@@ -966,8 +966,8 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
|
|
966
966
|
# Retrieve job and future objects
|
967
967
|
job = jobs[ind_job]
|
968
968
|
future = futures[ind_job]
|
969
|
-
remaining_job_ids = job_ids[ind_job + 1 :]
|
970
|
-
remaining_futures = futures[ind_job + 1 :]
|
969
|
+
remaining_job_ids = job_ids[ind_job + 1 :]
|
970
|
+
remaining_futures = futures[ind_job + 1 :]
|
971
971
|
|
972
972
|
outputs = []
|
973
973
|
|
@@ -1,6 +1,4 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
IMAGES_FILENAME = "images.json"
|
4
|
-
METADATA_FILENAME = "metadata.json"
|
1
|
+
HISTORY_FILENAME_V1 = "history.json"
|
2
|
+
METADATA_FILENAME_V1 = "metadata.json"
|
5
3
|
SHUTDOWN_FILENAME = "shutdown"
|
6
4
|
WORKFLOW_LOG_FILENAME = "workflow.log"
|
@@ -28,8 +28,8 @@ from ..exceptions import JobExecutionError
|
|
28
28
|
from ..exceptions import TaskExecutionError
|
29
29
|
from .common import TaskParameters
|
30
30
|
from .common import write_args_file
|
31
|
-
from fractal_server.app.runner.filenames import
|
32
|
-
from fractal_server.app.runner.filenames import
|
31
|
+
from fractal_server.app.runner.filenames import HISTORY_FILENAME_V1
|
32
|
+
from fractal_server.app.runner.filenames import METADATA_FILENAME_V1
|
33
33
|
from fractal_server.app.runner.task_files import get_task_file_paths
|
34
34
|
from fractal_server.string_tools import validate_cmd
|
35
35
|
|
@@ -610,11 +610,11 @@ def execute_tasks(
|
|
610
610
|
)
|
611
611
|
|
612
612
|
# Write most recent metadata to METADATA_FILENAME
|
613
|
-
with open(workflow_dir_local /
|
613
|
+
with open(workflow_dir_local / METADATA_FILENAME_V1, "w") as f:
|
614
614
|
json.dump(current_task_pars.metadata, f, indent=2)
|
615
615
|
|
616
616
|
# Write most recent metadata to HISTORY_FILENAME
|
617
|
-
with open(workflow_dir_local /
|
617
|
+
with open(workflow_dir_local / HISTORY_FILENAME_V1, "w") as f:
|
618
618
|
json.dump(current_task_pars.history, f, indent=2)
|
619
619
|
|
620
620
|
return current_task_pars
|
@@ -24,7 +24,7 @@ from typing import Any
|
|
24
24
|
from typing import Optional
|
25
25
|
|
26
26
|
from ....models.v1 import Workflow
|
27
|
-
from ...
|
27
|
+
from ...async_wrap_v1 import async_wrap_v1
|
28
28
|
from ...set_start_and_last_task_index import set_start_and_last_task_index
|
29
29
|
from .._common import execute_tasks
|
30
30
|
from ..common import TaskParameters
|
@@ -172,7 +172,7 @@ async def process_workflow(
|
|
172
172
|
last_task_index=last_task_index,
|
173
173
|
)
|
174
174
|
|
175
|
-
output_dataset_metadata_history = await
|
175
|
+
output_dataset_metadata_history = await async_wrap_v1(_process_workflow)(
|
176
176
|
workflow=workflow,
|
177
177
|
input_paths=input_paths,
|
178
178
|
output_path=output_path,
|
@@ -21,7 +21,7 @@ from typing import Any
|
|
21
21
|
from typing import Optional
|
22
22
|
from typing import Union
|
23
23
|
|
24
|
-
from ...
|
24
|
+
from ...async_wrap_v1 import async_wrap_v1
|
25
25
|
from ...executors.slurm.sudo.executor import FractalSlurmExecutor
|
26
26
|
from ...set_start_and_last_task_index import set_start_and_last_task_index
|
27
27
|
from .._common import execute_tasks
|
@@ -145,7 +145,7 @@ async def process_workflow(
|
|
145
145
|
last_task_index=last_task_index,
|
146
146
|
)
|
147
147
|
|
148
|
-
output_dataset_metadata_history = await
|
148
|
+
output_dataset_metadata_history = await async_wrap_v1(_process_workflow)(
|
149
149
|
workflow=workflow,
|
150
150
|
input_paths=input_paths,
|
151
151
|
output_path=output_path,
|
@@ -24,8 +24,8 @@ from ...models.v1 import Dataset
|
|
24
24
|
from ...models.v1 import Workflow
|
25
25
|
from ...models.v1 import WorkflowTask
|
26
26
|
from ...schemas.v1 import WorkflowTaskStatusTypeV1
|
27
|
-
from ..filenames import
|
28
|
-
from ..filenames import
|
27
|
+
from ..filenames import HISTORY_FILENAME_V1
|
28
|
+
from ..filenames import METADATA_FILENAME_V1
|
29
29
|
|
30
30
|
|
31
31
|
def assemble_history_failed_job(
|
@@ -64,7 +64,7 @@ def assemble_history_failed_job(
|
|
64
64
|
new_history = output_dataset.history
|
65
65
|
|
66
66
|
# Part 2: Extend history based on tmp_metadata_file
|
67
|
-
tmp_history_file = Path(job.working_dir) /
|
67
|
+
tmp_history_file = Path(job.working_dir) / HISTORY_FILENAME_V1
|
68
68
|
try:
|
69
69
|
with tmp_history_file.open("r") as f:
|
70
70
|
tmp_file_history = json.load(f)
|
@@ -129,7 +129,7 @@ def assemble_meta_failed_job(
|
|
129
129
|
"""
|
130
130
|
|
131
131
|
new_meta = deepcopy(output_dataset.meta)
|
132
|
-
metadata_file = Path(job.working_dir) /
|
132
|
+
metadata_file = Path(job.working_dir) / METADATA_FILENAME_V1
|
133
133
|
try:
|
134
134
|
with metadata_file.open("r") as f:
|
135
135
|
metadata_update = json.load(f)
|
@@ -11,7 +11,6 @@ from pathlib import Path
|
|
11
11
|
from typing import Optional
|
12
12
|
|
13
13
|
from sqlalchemy.orm import Session as DBSyncSession
|
14
|
-
from sqlalchemy.orm.attributes import flag_modified
|
15
14
|
|
16
15
|
from ....config import get_settings
|
17
16
|
from ....logger import get_logger
|
@@ -24,7 +23,6 @@ from ....zip_tools import _zip_folder_to_file_and_remove
|
|
24
23
|
from ...db import DB
|
25
24
|
from ...models.v2 import DatasetV2
|
26
25
|
from ...models.v2 import JobV2
|
27
|
-
from ...models.v2 import WorkflowTaskV2
|
28
26
|
from ...models.v2 import WorkflowV2
|
29
27
|
from ...schemas.v2 import JobStatusTypeV2
|
30
28
|
from ..exceptions import JobExecutionError
|
@@ -38,12 +36,11 @@ from ._local_experimental import (
|
|
38
36
|
)
|
39
37
|
from ._slurm_ssh import process_workflow as slurm_ssh_process_workflow
|
40
38
|
from ._slurm_sudo import process_workflow as slurm_sudo_process_workflow
|
41
|
-
from .handle_failed_job import
|
42
|
-
from .handle_failed_job import assemble_history_failed_job
|
43
|
-
from .handle_failed_job import assemble_images_failed_job
|
39
|
+
from .handle_failed_job import mark_last_wftask_as_failed
|
44
40
|
from fractal_server import __VERSION__
|
45
41
|
from fractal_server.app.models import UserSettings
|
46
42
|
|
43
|
+
|
47
44
|
_backends = {}
|
48
45
|
_backends["local"] = local_process_workflow
|
49
46
|
_backends["slurm"] = slurm_sudo_process_workflow
|
@@ -72,7 +69,7 @@ def fail_job(
|
|
72
69
|
return
|
73
70
|
|
74
71
|
|
75
|
-
|
72
|
+
def submit_workflow(
|
76
73
|
*,
|
77
74
|
workflow_id: int,
|
78
75
|
dataset_id: int,
|
@@ -115,7 +112,6 @@ async def submit_workflow(
|
|
115
112
|
logger = set_logger(logger_name=logger_name)
|
116
113
|
|
117
114
|
with next(DB.get_sync_db()) as db_sync:
|
118
|
-
|
119
115
|
try:
|
120
116
|
job: Optional[JobV2] = db_sync.get(JobV2, job_id)
|
121
117
|
dataset: Optional[DatasetV2] = db_sync.get(DatasetV2, dataset_id)
|
@@ -322,7 +318,7 @@ async def submit_workflow(
|
|
322
318
|
db_sync = next(DB.get_sync_db())
|
323
319
|
db_sync.close()
|
324
320
|
|
325
|
-
|
321
|
+
process_workflow(
|
326
322
|
workflow=workflow,
|
327
323
|
dataset=dataset,
|
328
324
|
workflow_dir_local=WORKFLOW_DIR_LOCAL,
|
@@ -331,6 +327,7 @@ async def submit_workflow(
|
|
331
327
|
worker_init=worker_init,
|
332
328
|
first_task_index=job.first_task_index,
|
333
329
|
last_task_index=job.last_task_index,
|
330
|
+
job_attribute_filters=job.attribute_filters,
|
334
331
|
**backend_specific_kwargs,
|
335
332
|
)
|
336
333
|
|
@@ -340,14 +337,6 @@ async def submit_workflow(
|
|
340
337
|
)
|
341
338
|
logger.debug(f'END workflow "{workflow.name}"')
|
342
339
|
|
343
|
-
# Update dataset attributes, in case of successful execution
|
344
|
-
dataset.history.extend(new_dataset_attributes["history"])
|
345
|
-
dataset.filters = new_dataset_attributes["filters"]
|
346
|
-
dataset.images = new_dataset_attributes["images"]
|
347
|
-
for attribute_name in ["filters", "history", "images"]:
|
348
|
-
flag_modified(dataset, attribute_name)
|
349
|
-
db_sync.merge(dataset)
|
350
|
-
|
351
340
|
# Update job DB entry
|
352
341
|
job.status = JobStatusTypeV2.DONE
|
353
342
|
job.end_timestamp = get_timestamp()
|
@@ -358,28 +347,13 @@ async def submit_workflow(
|
|
358
347
|
db_sync.commit()
|
359
348
|
|
360
349
|
except TaskExecutionError as e:
|
361
|
-
|
362
350
|
logger.debug(f'FAILED workflow "{workflow.name}", TaskExecutionError.')
|
363
351
|
logger.info(f'Workflow "{workflow.name}" failed (TaskExecutionError).')
|
364
352
|
|
365
|
-
|
366
|
-
|
367
|
-
failed_wftask = db_sync.get(WorkflowTaskV2, e.workflow_task_id)
|
368
|
-
dataset.history = assemble_history_failed_job(
|
369
|
-
job,
|
370
|
-
dataset,
|
371
|
-
workflow,
|
353
|
+
mark_last_wftask_as_failed(
|
354
|
+
dataset_id=dataset_id,
|
372
355
|
logger_name=logger_name,
|
373
|
-
failed_wftask=failed_wftask,
|
374
356
|
)
|
375
|
-
latest_filters = assemble_filters_failed_job(job)
|
376
|
-
if latest_filters is not None:
|
377
|
-
dataset.filters = latest_filters
|
378
|
-
latest_images = assemble_images_failed_job(job)
|
379
|
-
if latest_images is not None:
|
380
|
-
dataset.images = latest_images
|
381
|
-
db_sync.merge(dataset)
|
382
|
-
|
383
357
|
exception_args_string = "\n".join(e.args)
|
384
358
|
log_msg = (
|
385
359
|
f"TASK ERROR: "
|
@@ -390,26 +364,12 @@ async def submit_workflow(
|
|
390
364
|
fail_job(db=db_sync, job=job, log_msg=log_msg, logger_name=logger_name)
|
391
365
|
|
392
366
|
except JobExecutionError as e:
|
393
|
-
|
394
367
|
logger.debug(f'FAILED workflow "{workflow.name}", JobExecutionError.')
|
395
368
|
logger.info(f'Workflow "{workflow.name}" failed (JobExecutionError).')
|
396
|
-
|
397
|
-
|
398
|
-
# update the DB dataset accordingly
|
399
|
-
dataset.history = assemble_history_failed_job(
|
400
|
-
job,
|
401
|
-
dataset,
|
402
|
-
workflow,
|
369
|
+
mark_last_wftask_as_failed(
|
370
|
+
dataset_id=dataset_id,
|
403
371
|
logger_name=logger_name,
|
404
372
|
)
|
405
|
-
latest_filters = assemble_filters_failed_job(job)
|
406
|
-
if latest_filters is not None:
|
407
|
-
dataset.filters = latest_filters
|
408
|
-
latest_images = assemble_images_failed_job(job)
|
409
|
-
if latest_images is not None:
|
410
|
-
dataset.images = latest_images
|
411
|
-
db_sync.merge(dataset)
|
412
|
-
|
413
373
|
fail_job(
|
414
374
|
db=db_sync,
|
415
375
|
job=job,
|
@@ -421,27 +381,13 @@ async def submit_workflow(
|
|
421
381
|
)
|
422
382
|
|
423
383
|
except Exception:
|
424
|
-
|
425
384
|
logger.debug(f'FAILED workflow "{workflow.name}", unknown error.')
|
426
385
|
logger.info(f'Workflow "{workflow.name}" failed (unkwnon error).')
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
# Read dataset attributes produced by the last successful task, and
|
431
|
-
# update the DB dataset accordingly
|
432
|
-
dataset.history = assemble_history_failed_job(
|
433
|
-
job,
|
434
|
-
dataset,
|
435
|
-
workflow,
|
386
|
+
mark_last_wftask_as_failed(
|
387
|
+
dataset_id=dataset_id,
|
436
388
|
logger_name=logger_name,
|
437
389
|
)
|
438
|
-
|
439
|
-
if latest_filters is not None:
|
440
|
-
dataset.filters = latest_filters
|
441
|
-
latest_images = assemble_images_failed_job(job)
|
442
|
-
if latest_images is not None:
|
443
|
-
dataset.images = latest_images
|
444
|
-
db_sync.merge(dataset)
|
390
|
+
current_traceback = traceback.format_exc()
|
445
391
|
fail_job(
|
446
392
|
db=db_sync,
|
447
393
|
job=job,
|
@@ -24,44 +24,14 @@ from typing import Optional
|
|
24
24
|
|
25
25
|
from ....models.v2 import DatasetV2
|
26
26
|
from ....models.v2 import WorkflowV2
|
27
|
-
from ...async_wrap import async_wrap
|
28
27
|
from ...set_start_and_last_task_index import set_start_and_last_task_index
|
29
28
|
from ..runner import execute_tasks_v2
|
30
29
|
from ._submit_setup import _local_submit_setup
|
31
30
|
from .executor import FractalThreadPoolExecutor
|
31
|
+
from fractal_server.images.models import AttributeFiltersType
|
32
32
|
|
33
33
|
|
34
|
-
def
|
35
|
-
*,
|
36
|
-
workflow: WorkflowV2,
|
37
|
-
dataset: DatasetV2,
|
38
|
-
logger_name: str,
|
39
|
-
workflow_dir_local: Path,
|
40
|
-
first_task_index: int,
|
41
|
-
last_task_index: int,
|
42
|
-
) -> dict:
|
43
|
-
"""
|
44
|
-
Internal processing routine
|
45
|
-
|
46
|
-
Schedules the workflow using a `FractalThreadPoolExecutor`.
|
47
|
-
"""
|
48
|
-
|
49
|
-
with FractalThreadPoolExecutor() as executor:
|
50
|
-
new_dataset_attributes = execute_tasks_v2(
|
51
|
-
wf_task_list=workflow.task_list[
|
52
|
-
first_task_index : (last_task_index + 1) # noqa
|
53
|
-
], # noqa
|
54
|
-
dataset=dataset,
|
55
|
-
executor=executor,
|
56
|
-
workflow_dir_local=workflow_dir_local,
|
57
|
-
workflow_dir_remote=workflow_dir_local,
|
58
|
-
logger_name=logger_name,
|
59
|
-
submit_setup_call=_local_submit_setup,
|
60
|
-
)
|
61
|
-
return new_dataset_attributes
|
62
|
-
|
63
|
-
|
64
|
-
async def process_workflow(
|
34
|
+
def process_workflow(
|
65
35
|
*,
|
66
36
|
workflow: WorkflowV2,
|
67
37
|
dataset: DatasetV2,
|
@@ -70,12 +40,13 @@ async def process_workflow(
|
|
70
40
|
first_task_index: Optional[int] = None,
|
71
41
|
last_task_index: Optional[int] = None,
|
72
42
|
logger_name: str,
|
43
|
+
job_attribute_filters: AttributeFiltersType,
|
73
44
|
# Slurm-specific
|
74
45
|
user_cache_dir: Optional[str] = None,
|
75
46
|
slurm_user: Optional[str] = None,
|
76
47
|
slurm_account: Optional[str] = None,
|
77
48
|
worker_init: Optional[str] = None,
|
78
|
-
) ->
|
49
|
+
) -> None:
|
79
50
|
"""
|
80
51
|
Run a workflow
|
81
52
|
|
@@ -127,11 +98,6 @@ async def process_workflow(
|
|
127
98
|
(positive exit codes).
|
128
99
|
JobExecutionError: wrapper for errors raised by the tasks' executors
|
129
100
|
(negative exit codes).
|
130
|
-
|
131
|
-
Returns:
|
132
|
-
output_dataset_metadata:
|
133
|
-
The updated metadata for the dataset, as returned by the last task
|
134
|
-
of the workflow
|
135
101
|
"""
|
136
102
|
|
137
103
|
if workflow_dir_remote and (workflow_dir_remote != workflow_dir_local):
|
@@ -148,12 +114,16 @@ async def process_workflow(
|
|
148
114
|
last_task_index=last_task_index,
|
149
115
|
)
|
150
116
|
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
117
|
+
with FractalThreadPoolExecutor() as executor:
|
118
|
+
execute_tasks_v2(
|
119
|
+
wf_task_list=workflow.task_list[
|
120
|
+
first_task_index : (last_task_index + 1)
|
121
|
+
],
|
122
|
+
dataset=dataset,
|
123
|
+
executor=executor,
|
124
|
+
workflow_dir_local=workflow_dir_local,
|
125
|
+
workflow_dir_remote=workflow_dir_local,
|
126
|
+
logger_name=logger_name,
|
127
|
+
submit_setup_call=_local_submit_setup,
|
128
|
+
job_attribute_filters=job_attribute_filters,
|
129
|
+
)
|