fractal-server 2.10.4__py3-none-any.whl → 2.11.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/app/routes/api/v1/dataset.py +2 -2
- fractal_server/app/routes/api/v2/status.py +20 -20
- fractal_server/app/runner/filenames.py +2 -4
- fractal_server/app/runner/v1/_common.py +4 -4
- fractal_server/app/runner/v1/handle_failed_job.py +4 -4
- fractal_server/app/runner/v2/__init__.py +10 -65
- fractal_server/app/runner/v2/_local/__init__.py +7 -17
- fractal_server/app/runner/v2/_local_experimental/__init__.py +6 -20
- fractal_server/app/runner/v2/_slurm_ssh/__init__.py +7 -13
- fractal_server/app/runner/v2/_slurm_sudo/__init__.py +7 -14
- fractal_server/app/runner/v2/handle_failed_job.py +31 -130
- fractal_server/app/runner/v2/runner.py +32 -40
- fractal_server/app/security/__init__.py +5 -0
- fractal_server/app/security/signup_email.py +1 -1
- {fractal_server-2.10.4.dist-info → fractal_server-2.11.0a0.dist-info}/METADATA +1 -1
- {fractal_server-2.10.4.dist-info → fractal_server-2.11.0a0.dist-info}/RECORD +20 -20
- {fractal_server-2.10.4.dist-info → fractal_server-2.11.0a0.dist-info}/LICENSE +0 -0
- {fractal_server-2.10.4.dist-info → fractal_server-2.11.0a0.dist-info}/WHEEL +0 -0
- {fractal_server-2.10.4.dist-info → fractal_server-2.11.0a0.dist-info}/entry_points.txt +0 -0
fractal_server/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__VERSION__ = "2.
|
1
|
+
__VERSION__ = "2.11.0a0"
|
@@ -17,7 +17,7 @@ from ....models.v1 import ApplyWorkflow
|
|
17
17
|
from ....models.v1 import Dataset
|
18
18
|
from ....models.v1 import Project
|
19
19
|
from ....models.v1 import Resource
|
20
|
-
from ....runner.filenames import
|
20
|
+
from ....runner.filenames import HISTORY_FILENAME_V1
|
21
21
|
from ....schemas.v1 import DatasetCreateV1
|
22
22
|
from ....schemas.v1 import DatasetReadV1
|
23
23
|
from ....schemas.v1 import DatasetStatusReadV1
|
@@ -511,7 +511,7 @@ async def get_workflowtask_status(
|
|
511
511
|
# Highest priority: Read status updates coming from the running-job
|
512
512
|
# temporary file. Note: this file only contains information on
|
513
513
|
# WorkflowTask's that ran through successfully
|
514
|
-
tmp_file = Path(running_job.working_dir) /
|
514
|
+
tmp_file = Path(running_job.working_dir) / HISTORY_FILENAME_V1
|
515
515
|
try:
|
516
516
|
with tmp_file.open("r") as f:
|
517
517
|
history = json.load(f)
|
@@ -1,5 +1,3 @@
|
|
1
|
-
import json
|
2
|
-
from pathlib import Path
|
3
1
|
from typing import Optional
|
4
2
|
|
5
3
|
from fastapi import APIRouter
|
@@ -18,7 +16,6 @@ from ._aux_functions import _get_submitted_jobs_statement
|
|
18
16
|
from ._aux_functions import _get_workflow_check_owner
|
19
17
|
from fractal_server.app.models import UserOAuth
|
20
18
|
from fractal_server.app.routes.auth import current_active_user
|
21
|
-
from fractal_server.app.runner.filenames import HISTORY_FILENAME
|
22
19
|
|
23
20
|
router = APIRouter()
|
24
21
|
|
@@ -98,8 +95,8 @@ async def get_workflowtask_status(
|
|
98
95
|
if running_job is None:
|
99
96
|
# If no job is running, the chronological-last history item is also the
|
100
97
|
# positional-last workflow task to be included in the response.
|
101
|
-
if len(
|
102
|
-
last_valid_wftask_id =
|
98
|
+
if len(history) > 0:
|
99
|
+
last_valid_wftask_id = history[-1]["workflowtask"]["id"]
|
103
100
|
else:
|
104
101
|
last_valid_wftask_id = None
|
105
102
|
else:
|
@@ -109,7 +106,24 @@ async def get_workflowtask_status(
|
|
109
106
|
# as "submitted"
|
110
107
|
start = running_job.first_task_index
|
111
108
|
end = running_job.last_task_index + 1
|
112
|
-
|
109
|
+
|
110
|
+
running_job_wftasks = workflow.task_list[start:end]
|
111
|
+
running_job_statuses = [
|
112
|
+
workflow_tasks_status_dict.get(wft.id, None)
|
113
|
+
for wft in running_job_wftasks
|
114
|
+
]
|
115
|
+
try:
|
116
|
+
first_submitted_index = running_job_statuses.index(
|
117
|
+
WorkflowTaskStatusTypeV2.SUBMITTED
|
118
|
+
)
|
119
|
+
except ValueError:
|
120
|
+
logger.warning(
|
121
|
+
f"Job {running_job.id} is submitted but its task list does "
|
122
|
+
f"not contain a {WorkflowTaskStatusTypeV2.SUBMITTED} task."
|
123
|
+
)
|
124
|
+
first_submitted_index = 0
|
125
|
+
|
126
|
+
for wftask in running_job_wftasks[first_submitted_index:]:
|
113
127
|
workflow_tasks_status_dict[
|
114
128
|
wftask.id
|
115
129
|
] = WorkflowTaskStatusTypeV2.SUBMITTED
|
@@ -133,20 +147,6 @@ async def get_workflowtask_status(
|
|
133
147
|
last_valid_wftask_id = None
|
134
148
|
logger.warning(f"Now setting {last_valid_wftask_id=}.")
|
135
149
|
|
136
|
-
# Highest priority: Read status updates coming from the running-job
|
137
|
-
# temporary file. Note: this file only contains information on
|
138
|
-
# WorkflowTask's that ran through successfully.
|
139
|
-
tmp_file = Path(running_job.working_dir) / HISTORY_FILENAME
|
140
|
-
try:
|
141
|
-
with tmp_file.open("r") as f:
|
142
|
-
history = json.load(f)
|
143
|
-
except FileNotFoundError:
|
144
|
-
history = []
|
145
|
-
for history_item in history:
|
146
|
-
wftask_id = history_item["workflowtask"]["id"]
|
147
|
-
wftask_status = history_item["status"]
|
148
|
-
workflow_tasks_status_dict[wftask_id] = wftask_status
|
149
|
-
|
150
150
|
# Based on previously-gathered information, clean up the response body
|
151
151
|
clean_workflow_tasks_status_dict = {}
|
152
152
|
for wf_task in workflow.task_list:
|
@@ -1,6 +1,4 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
IMAGES_FILENAME = "images.json"
|
4
|
-
METADATA_FILENAME = "metadata.json"
|
1
|
+
HISTORY_FILENAME_V1 = "history.json"
|
2
|
+
METADATA_FILENAME_V1 = "metadata.json"
|
5
3
|
SHUTDOWN_FILENAME = "shutdown"
|
6
4
|
WORKFLOW_LOG_FILENAME = "workflow.log"
|
@@ -28,8 +28,8 @@ from ..exceptions import JobExecutionError
|
|
28
28
|
from ..exceptions import TaskExecutionError
|
29
29
|
from .common import TaskParameters
|
30
30
|
from .common import write_args_file
|
31
|
-
from fractal_server.app.runner.filenames import
|
32
|
-
from fractal_server.app.runner.filenames import
|
31
|
+
from fractal_server.app.runner.filenames import HISTORY_FILENAME_V1
|
32
|
+
from fractal_server.app.runner.filenames import METADATA_FILENAME_V1
|
33
33
|
from fractal_server.app.runner.task_files import get_task_file_paths
|
34
34
|
from fractal_server.string_tools import validate_cmd
|
35
35
|
|
@@ -610,11 +610,11 @@ def execute_tasks(
|
|
610
610
|
)
|
611
611
|
|
612
612
|
# Write most recent metadata to METADATA_FILENAME
|
613
|
-
with open(workflow_dir_local /
|
613
|
+
with open(workflow_dir_local / METADATA_FILENAME_V1, "w") as f:
|
614
614
|
json.dump(current_task_pars.metadata, f, indent=2)
|
615
615
|
|
616
616
|
# Write most recent metadata to HISTORY_FILENAME
|
617
|
-
with open(workflow_dir_local /
|
617
|
+
with open(workflow_dir_local / HISTORY_FILENAME_V1, "w") as f:
|
618
618
|
json.dump(current_task_pars.history, f, indent=2)
|
619
619
|
|
620
620
|
return current_task_pars
|
@@ -24,8 +24,8 @@ from ...models.v1 import Dataset
|
|
24
24
|
from ...models.v1 import Workflow
|
25
25
|
from ...models.v1 import WorkflowTask
|
26
26
|
from ...schemas.v1 import WorkflowTaskStatusTypeV1
|
27
|
-
from ..filenames import
|
28
|
-
from ..filenames import
|
27
|
+
from ..filenames import HISTORY_FILENAME_V1
|
28
|
+
from ..filenames import METADATA_FILENAME_V1
|
29
29
|
|
30
30
|
|
31
31
|
def assemble_history_failed_job(
|
@@ -64,7 +64,7 @@ def assemble_history_failed_job(
|
|
64
64
|
new_history = output_dataset.history
|
65
65
|
|
66
66
|
# Part 2: Extend history based on tmp_metadata_file
|
67
|
-
tmp_history_file = Path(job.working_dir) /
|
67
|
+
tmp_history_file = Path(job.working_dir) / HISTORY_FILENAME_V1
|
68
68
|
try:
|
69
69
|
with tmp_history_file.open("r") as f:
|
70
70
|
tmp_file_history = json.load(f)
|
@@ -129,7 +129,7 @@ def assemble_meta_failed_job(
|
|
129
129
|
"""
|
130
130
|
|
131
131
|
new_meta = deepcopy(output_dataset.meta)
|
132
|
-
metadata_file = Path(job.working_dir) /
|
132
|
+
metadata_file = Path(job.working_dir) / METADATA_FILENAME_V1
|
133
133
|
try:
|
134
134
|
with metadata_file.open("r") as f:
|
135
135
|
metadata_update = json.load(f)
|
@@ -11,7 +11,6 @@ from pathlib import Path
|
|
11
11
|
from typing import Optional
|
12
12
|
|
13
13
|
from sqlalchemy.orm import Session as DBSyncSession
|
14
|
-
from sqlalchemy.orm.attributes import flag_modified
|
15
14
|
|
16
15
|
from ....config import get_settings
|
17
16
|
from ....logger import get_logger
|
@@ -24,7 +23,6 @@ from ....zip_tools import _zip_folder_to_file_and_remove
|
|
24
23
|
from ...db import DB
|
25
24
|
from ...models.v2 import DatasetV2
|
26
25
|
from ...models.v2 import JobV2
|
27
|
-
from ...models.v2 import WorkflowTaskV2
|
28
26
|
from ...models.v2 import WorkflowV2
|
29
27
|
from ...schemas.v2 import JobStatusTypeV2
|
30
28
|
from ..exceptions import JobExecutionError
|
@@ -38,12 +36,11 @@ from ._local_experimental import (
|
|
38
36
|
)
|
39
37
|
from ._slurm_ssh import process_workflow as slurm_ssh_process_workflow
|
40
38
|
from ._slurm_sudo import process_workflow as slurm_sudo_process_workflow
|
41
|
-
from .handle_failed_job import
|
42
|
-
from .handle_failed_job import assemble_history_failed_job
|
43
|
-
from .handle_failed_job import assemble_images_failed_job
|
39
|
+
from .handle_failed_job import mark_last_wftask_as_failed
|
44
40
|
from fractal_server import __VERSION__
|
45
41
|
from fractal_server.app.models import UserSettings
|
46
42
|
|
43
|
+
|
47
44
|
_backends = {}
|
48
45
|
_backends["local"] = local_process_workflow
|
49
46
|
_backends["slurm"] = slurm_sudo_process_workflow
|
@@ -115,7 +112,6 @@ async def submit_workflow(
|
|
115
112
|
logger = set_logger(logger_name=logger_name)
|
116
113
|
|
117
114
|
with next(DB.get_sync_db()) as db_sync:
|
118
|
-
|
119
115
|
try:
|
120
116
|
job: Optional[JobV2] = db_sync.get(JobV2, job_id)
|
121
117
|
dataset: Optional[DatasetV2] = db_sync.get(DatasetV2, dataset_id)
|
@@ -322,7 +318,7 @@ async def submit_workflow(
|
|
322
318
|
db_sync = next(DB.get_sync_db())
|
323
319
|
db_sync.close()
|
324
320
|
|
325
|
-
|
321
|
+
await process_workflow(
|
326
322
|
workflow=workflow,
|
327
323
|
dataset=dataset,
|
328
324
|
workflow_dir_local=WORKFLOW_DIR_LOCAL,
|
@@ -340,14 +336,6 @@ async def submit_workflow(
|
|
340
336
|
)
|
341
337
|
logger.debug(f'END workflow "{workflow.name}"')
|
342
338
|
|
343
|
-
# Update dataset attributes, in case of successful execution
|
344
|
-
dataset.history.extend(new_dataset_attributes["history"])
|
345
|
-
dataset.filters = new_dataset_attributes["filters"]
|
346
|
-
dataset.images = new_dataset_attributes["images"]
|
347
|
-
for attribute_name in ["filters", "history", "images"]:
|
348
|
-
flag_modified(dataset, attribute_name)
|
349
|
-
db_sync.merge(dataset)
|
350
|
-
|
351
339
|
# Update job DB entry
|
352
340
|
job.status = JobStatusTypeV2.DONE
|
353
341
|
job.end_timestamp = get_timestamp()
|
@@ -358,28 +346,13 @@ async def submit_workflow(
|
|
358
346
|
db_sync.commit()
|
359
347
|
|
360
348
|
except TaskExecutionError as e:
|
361
|
-
|
362
349
|
logger.debug(f'FAILED workflow "{workflow.name}", TaskExecutionError.')
|
363
350
|
logger.info(f'Workflow "{workflow.name}" failed (TaskExecutionError).')
|
364
351
|
|
365
|
-
|
366
|
-
|
367
|
-
failed_wftask = db_sync.get(WorkflowTaskV2, e.workflow_task_id)
|
368
|
-
dataset.history = assemble_history_failed_job(
|
369
|
-
job,
|
370
|
-
dataset,
|
371
|
-
workflow,
|
352
|
+
mark_last_wftask_as_failed(
|
353
|
+
dataset_id=dataset_id,
|
372
354
|
logger_name=logger_name,
|
373
|
-
failed_wftask=failed_wftask,
|
374
355
|
)
|
375
|
-
latest_filters = assemble_filters_failed_job(job)
|
376
|
-
if latest_filters is not None:
|
377
|
-
dataset.filters = latest_filters
|
378
|
-
latest_images = assemble_images_failed_job(job)
|
379
|
-
if latest_images is not None:
|
380
|
-
dataset.images = latest_images
|
381
|
-
db_sync.merge(dataset)
|
382
|
-
|
383
356
|
exception_args_string = "\n".join(e.args)
|
384
357
|
log_msg = (
|
385
358
|
f"TASK ERROR: "
|
@@ -390,26 +363,12 @@ async def submit_workflow(
|
|
390
363
|
fail_job(db=db_sync, job=job, log_msg=log_msg, logger_name=logger_name)
|
391
364
|
|
392
365
|
except JobExecutionError as e:
|
393
|
-
|
394
366
|
logger.debug(f'FAILED workflow "{workflow.name}", JobExecutionError.')
|
395
367
|
logger.info(f'Workflow "{workflow.name}" failed (JobExecutionError).')
|
396
|
-
|
397
|
-
|
398
|
-
# update the DB dataset accordingly
|
399
|
-
dataset.history = assemble_history_failed_job(
|
400
|
-
job,
|
401
|
-
dataset,
|
402
|
-
workflow,
|
368
|
+
mark_last_wftask_as_failed(
|
369
|
+
dataset_id=dataset_id,
|
403
370
|
logger_name=logger_name,
|
404
371
|
)
|
405
|
-
latest_filters = assemble_filters_failed_job(job)
|
406
|
-
if latest_filters is not None:
|
407
|
-
dataset.filters = latest_filters
|
408
|
-
latest_images = assemble_images_failed_job(job)
|
409
|
-
if latest_images is not None:
|
410
|
-
dataset.images = latest_images
|
411
|
-
db_sync.merge(dataset)
|
412
|
-
|
413
372
|
fail_job(
|
414
373
|
db=db_sync,
|
415
374
|
job=job,
|
@@ -421,27 +380,13 @@ async def submit_workflow(
|
|
421
380
|
)
|
422
381
|
|
423
382
|
except Exception:
|
424
|
-
|
425
383
|
logger.debug(f'FAILED workflow "{workflow.name}", unknown error.')
|
426
384
|
logger.info(f'Workflow "{workflow.name}" failed (unkwnon error).')
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
# Read dataset attributes produced by the last successful task, and
|
431
|
-
# update the DB dataset accordingly
|
432
|
-
dataset.history = assemble_history_failed_job(
|
433
|
-
job,
|
434
|
-
dataset,
|
435
|
-
workflow,
|
385
|
+
mark_last_wftask_as_failed(
|
386
|
+
dataset_id=dataset_id,
|
436
387
|
logger_name=logger_name,
|
437
388
|
)
|
438
|
-
|
439
|
-
if latest_filters is not None:
|
440
|
-
dataset.filters = latest_filters
|
441
|
-
latest_images = assemble_images_failed_job(job)
|
442
|
-
if latest_images is not None:
|
443
|
-
dataset.images = latest_images
|
444
|
-
db_sync.merge(dataset)
|
389
|
+
current_traceback = traceback.format_exc()
|
445
390
|
fail_job(
|
446
391
|
db=db_sync,
|
447
392
|
job=job,
|
@@ -39,18 +39,15 @@ def _process_workflow(
|
|
39
39
|
workflow_dir_local: Path,
|
40
40
|
first_task_index: int,
|
41
41
|
last_task_index: int,
|
42
|
-
) ->
|
42
|
+
) -> None:
|
43
43
|
"""
|
44
|
-
|
45
|
-
|
46
|
-
Schedules the workflow using a `FractalThreadPoolExecutor`.
|
44
|
+
Run the workflow using a `FractalThreadPoolExecutor`.
|
47
45
|
"""
|
48
|
-
|
49
46
|
with FractalThreadPoolExecutor() as executor:
|
50
|
-
|
47
|
+
execute_tasks_v2(
|
51
48
|
wf_task_list=workflow.task_list[
|
52
|
-
first_task_index : (last_task_index + 1)
|
53
|
-
],
|
49
|
+
first_task_index : (last_task_index + 1)
|
50
|
+
],
|
54
51
|
dataset=dataset,
|
55
52
|
executor=executor,
|
56
53
|
workflow_dir_local=workflow_dir_local,
|
@@ -58,7 +55,6 @@ def _process_workflow(
|
|
58
55
|
logger_name=logger_name,
|
59
56
|
submit_setup_call=_local_submit_setup,
|
60
57
|
)
|
61
|
-
return new_dataset_attributes
|
62
58
|
|
63
59
|
|
64
60
|
async def process_workflow(
|
@@ -75,7 +71,7 @@ async def process_workflow(
|
|
75
71
|
slurm_user: Optional[str] = None,
|
76
72
|
slurm_account: Optional[str] = None,
|
77
73
|
worker_init: Optional[str] = None,
|
78
|
-
) ->
|
74
|
+
) -> None:
|
79
75
|
"""
|
80
76
|
Run a workflow
|
81
77
|
|
@@ -127,11 +123,6 @@ async def process_workflow(
|
|
127
123
|
(positive exit codes).
|
128
124
|
JobExecutionError: wrapper for errors raised by the tasks' executors
|
129
125
|
(negative exit codes).
|
130
|
-
|
131
|
-
Returns:
|
132
|
-
output_dataset_metadata:
|
133
|
-
The updated metadata for the dataset, as returned by the last task
|
134
|
-
of the workflow
|
135
126
|
"""
|
136
127
|
|
137
128
|
if workflow_dir_remote and (workflow_dir_remote != workflow_dir_local):
|
@@ -148,7 +139,7 @@ async def process_workflow(
|
|
148
139
|
last_task_index=last_task_index,
|
149
140
|
)
|
150
141
|
|
151
|
-
|
142
|
+
await async_wrap(_process_workflow)(
|
152
143
|
workflow=workflow,
|
153
144
|
dataset=dataset,
|
154
145
|
logger_name=logger_name,
|
@@ -156,4 +147,3 @@ async def process_workflow(
|
|
156
147
|
first_task_index=first_task_index,
|
157
148
|
last_task_index=last_task_index,
|
158
149
|
)
|
159
|
-
return new_dataset_attributes
|
@@ -21,23 +21,17 @@ def _process_workflow(
|
|
21
21
|
workflow_dir_local: Path,
|
22
22
|
first_task_index: int,
|
23
23
|
last_task_index: int,
|
24
|
-
) ->
|
24
|
+
) -> None:
|
25
25
|
"""
|
26
|
-
|
27
|
-
|
28
|
-
Schedules the workflow using a `FractalProcessPoolExecutor`.
|
29
|
-
|
30
|
-
Cf.
|
31
|
-
[process_workflow][fractal_server.app.runner.v2._local_experimental.process_workflow]
|
32
|
-
for the call signature.
|
26
|
+
Run the workflow using a `FractalProcessPoolExecutor`.
|
33
27
|
"""
|
34
28
|
with FractalProcessPoolExecutor(
|
35
29
|
shutdown_file=workflow_dir_local / SHUTDOWN_FILENAME
|
36
30
|
) as executor:
|
37
31
|
try:
|
38
|
-
|
32
|
+
execute_tasks_v2(
|
39
33
|
wf_task_list=workflow.task_list[
|
40
|
-
first_task_index : (last_task_index + 1)
|
34
|
+
first_task_index : (last_task_index + 1)
|
41
35
|
],
|
42
36
|
dataset=dataset,
|
43
37
|
executor=executor,
|
@@ -54,8 +48,6 @@ def _process_workflow(
|
|
54
48
|
)
|
55
49
|
)
|
56
50
|
|
57
|
-
return new_dataset_attributes
|
58
|
-
|
59
51
|
|
60
52
|
async def process_workflow(
|
61
53
|
*,
|
@@ -71,7 +63,7 @@ async def process_workflow(
|
|
71
63
|
slurm_user: Optional[str] = None,
|
72
64
|
slurm_account: Optional[str] = None,
|
73
65
|
worker_init: Optional[str] = None,
|
74
|
-
) ->
|
66
|
+
) -> None:
|
75
67
|
"""
|
76
68
|
Run a workflow
|
77
69
|
|
@@ -123,11 +115,6 @@ async def process_workflow(
|
|
123
115
|
(positive exit codes).
|
124
116
|
JobExecutionError: wrapper for errors raised by the tasks' executors
|
125
117
|
(negative exit codes).
|
126
|
-
|
127
|
-
Returns:
|
128
|
-
output_dataset_metadata:
|
129
|
-
The updated metadata for the dataset, as returned by the last task
|
130
|
-
of the workflow
|
131
118
|
"""
|
132
119
|
|
133
120
|
if workflow_dir_remote and (workflow_dir_remote != workflow_dir_local):
|
@@ -144,7 +131,7 @@ async def process_workflow(
|
|
144
131
|
last_task_index=last_task_index,
|
145
132
|
)
|
146
133
|
|
147
|
-
|
134
|
+
await async_wrap(_process_workflow)(
|
148
135
|
workflow=workflow,
|
149
136
|
dataset=dataset,
|
150
137
|
logger_name=logger_name,
|
@@ -152,4 +139,3 @@ async def process_workflow(
|
|
152
139
|
first_task_index=first_task_index,
|
153
140
|
last_task_index=last_task_index,
|
154
141
|
)
|
155
|
-
return new_dataset_attributes
|
@@ -17,7 +17,6 @@ This backend runs fractal workflows in a SLURM cluster using Clusterfutures
|
|
17
17
|
Executor objects.
|
18
18
|
"""
|
19
19
|
from pathlib import Path
|
20
|
-
from typing import Any
|
21
20
|
from typing import Optional
|
22
21
|
from typing import Union
|
23
22
|
|
@@ -47,16 +46,13 @@ def _process_workflow(
|
|
47
46
|
last_task_index: int,
|
48
47
|
fractal_ssh: FractalSSH,
|
49
48
|
worker_init: Optional[Union[str, list[str]]] = None,
|
50
|
-
) ->
|
49
|
+
) -> None:
|
51
50
|
"""
|
52
|
-
|
51
|
+
Run the workflow using a `FractalSlurmSSHExecutor`.
|
53
52
|
|
54
53
|
This function initialises the a FractalSlurmExecutor, setting logging,
|
55
54
|
workflow working dir and user to impersonate. It then schedules the
|
56
55
|
workflow tasks and returns the new dataset attributes
|
57
|
-
|
58
|
-
Returns:
|
59
|
-
new_dataset_attributes:
|
60
56
|
"""
|
61
57
|
|
62
58
|
if isinstance(worker_init, str):
|
@@ -80,10 +76,10 @@ def _process_workflow(
|
|
80
76
|
workflow_dir_remote=workflow_dir_remote,
|
81
77
|
common_script_lines=worker_init,
|
82
78
|
) as executor:
|
83
|
-
|
79
|
+
execute_tasks_v2(
|
84
80
|
wf_task_list=workflow.task_list[
|
85
|
-
first_task_index : (last_task_index + 1)
|
86
|
-
],
|
81
|
+
first_task_index : (last_task_index + 1)
|
82
|
+
],
|
87
83
|
dataset=dataset,
|
88
84
|
executor=executor,
|
89
85
|
workflow_dir_local=workflow_dir_local,
|
@@ -91,7 +87,6 @@ def _process_workflow(
|
|
91
87
|
logger_name=logger_name,
|
92
88
|
submit_setup_call=_slurm_submit_setup,
|
93
89
|
)
|
94
|
-
return new_dataset_attributes
|
95
90
|
|
96
91
|
|
97
92
|
async def process_workflow(
|
@@ -109,7 +104,7 @@ async def process_workflow(
|
|
109
104
|
slurm_user: Optional[str] = None,
|
110
105
|
slurm_account: Optional[str] = None,
|
111
106
|
worker_init: Optional[str] = None,
|
112
|
-
) ->
|
107
|
+
) -> None:
|
113
108
|
"""
|
114
109
|
Process workflow (SLURM backend public interface)
|
115
110
|
"""
|
@@ -122,7 +117,7 @@ async def process_workflow(
|
|
122
117
|
last_task_index=last_task_index,
|
123
118
|
)
|
124
119
|
|
125
|
-
|
120
|
+
await async_wrap(_process_workflow)(
|
126
121
|
workflow=workflow,
|
127
122
|
dataset=dataset,
|
128
123
|
logger_name=logger_name,
|
@@ -133,4 +128,3 @@ async def process_workflow(
|
|
133
128
|
worker_init=worker_init,
|
134
129
|
fractal_ssh=fractal_ssh,
|
135
130
|
)
|
136
|
-
return new_dataset_attributes
|
@@ -17,7 +17,6 @@ This backend runs fractal workflows in a SLURM cluster using Clusterfutures
|
|
17
17
|
Executor objects.
|
18
18
|
"""
|
19
19
|
from pathlib import Path
|
20
|
-
from typing import Any
|
21
20
|
from typing import Optional
|
22
21
|
from typing import Union
|
23
22
|
|
@@ -43,16 +42,13 @@ def _process_workflow(
|
|
43
42
|
slurm_account: Optional[str] = None,
|
44
43
|
user_cache_dir: str,
|
45
44
|
worker_init: Optional[Union[str, list[str]]] = None,
|
46
|
-
) ->
|
45
|
+
) -> None:
|
47
46
|
"""
|
48
|
-
|
47
|
+
Run the workflow using a `FractalSlurmExecutor`.
|
49
48
|
|
50
49
|
This function initialises the a FractalSlurmExecutor, setting logging,
|
51
50
|
workflow working dir and user to impersonate. It then schedules the
|
52
51
|
workflow tasks and returns the new dataset attributes
|
53
|
-
|
54
|
-
Returns:
|
55
|
-
new_dataset_attributes:
|
56
52
|
"""
|
57
53
|
|
58
54
|
if not slurm_user:
|
@@ -73,10 +69,10 @@ def _process_workflow(
|
|
73
69
|
common_script_lines=worker_init,
|
74
70
|
slurm_account=slurm_account,
|
75
71
|
) as executor:
|
76
|
-
|
72
|
+
execute_tasks_v2(
|
77
73
|
wf_task_list=workflow.task_list[
|
78
|
-
first_task_index : (last_task_index + 1)
|
79
|
-
],
|
74
|
+
first_task_index : (last_task_index + 1)
|
75
|
+
],
|
80
76
|
dataset=dataset,
|
81
77
|
executor=executor,
|
82
78
|
workflow_dir_local=workflow_dir_local,
|
@@ -84,7 +80,6 @@ def _process_workflow(
|
|
84
80
|
logger_name=logger_name,
|
85
81
|
submit_setup_call=_slurm_submit_setup,
|
86
82
|
)
|
87
|
-
return new_dataset_attributes
|
88
83
|
|
89
84
|
|
90
85
|
async def process_workflow(
|
@@ -101,7 +96,7 @@ async def process_workflow(
|
|
101
96
|
slurm_user: Optional[str] = None,
|
102
97
|
slurm_account: Optional[str] = None,
|
103
98
|
worker_init: Optional[str] = None,
|
104
|
-
) ->
|
99
|
+
) -> None:
|
105
100
|
"""
|
106
101
|
Process workflow (SLURM backend public interface).
|
107
102
|
"""
|
@@ -113,8 +108,7 @@ async def process_workflow(
|
|
113
108
|
first_task_index=first_task_index,
|
114
109
|
last_task_index=last_task_index,
|
115
110
|
)
|
116
|
-
|
117
|
-
new_dataset_attributes = await async_wrap(_process_workflow)(
|
111
|
+
await async_wrap(_process_workflow)(
|
118
112
|
workflow=workflow,
|
119
113
|
dataset=dataset,
|
120
114
|
logger_name=logger_name,
|
@@ -127,4 +121,3 @@ async def process_workflow(
|
|
127
121
|
slurm_account=slurm_account,
|
128
122
|
worker_init=worker_init,
|
129
123
|
)
|
130
|
-
return new_dataset_attributes
|
@@ -12,147 +12,48 @@
|
|
12
12
|
"""
|
13
13
|
Helper functions to handle Dataset history.
|
14
14
|
"""
|
15
|
-
import json
|
16
15
|
import logging
|
17
|
-
|
18
|
-
from
|
19
|
-
from typing import Optional
|
16
|
+
|
17
|
+
from sqlalchemy.orm.attributes import flag_modified
|
20
18
|
|
21
19
|
from ...models.v2 import DatasetV2
|
22
|
-
from ...models.v2 import JobV2
|
23
|
-
from ...models.v2 import WorkflowTaskV2
|
24
|
-
from ...models.v2 import WorkflowV2
|
25
20
|
from ...schemas.v2 import WorkflowTaskStatusTypeV2
|
26
|
-
from
|
27
|
-
from ..filenames import HISTORY_FILENAME
|
28
|
-
from ..filenames import IMAGES_FILENAME
|
21
|
+
from fractal_server.app.db import get_sync_db
|
29
22
|
|
30
23
|
|
31
|
-
def
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
logger_name: Optional[str] = None,
|
36
|
-
failed_wftask: Optional[WorkflowTaskV2] = None,
|
37
|
-
) -> list[dict[str, Any]]:
|
24
|
+
def mark_last_wftask_as_failed(
|
25
|
+
dataset_id: int,
|
26
|
+
logger_name: str,
|
27
|
+
) -> None:
|
38
28
|
"""
|
39
|
-
|
29
|
+
Edit dataset history, by marking last item as failed.
|
40
30
|
|
41
31
|
Args:
|
42
|
-
|
43
|
-
The failed `JobV2` object.
|
44
|
-
dataset:
|
45
|
-
The `DatasetV2` object associated to `job`.
|
46
|
-
workflow:
|
47
|
-
The `WorkflowV2` object associated to `job`.
|
32
|
+
dataset: The `DatasetV2` object
|
48
33
|
logger_name: A logger name.
|
49
|
-
failed_wftask:
|
50
|
-
If set, append it to `history` during step 3; if `None`, infer
|
51
|
-
it by comparing the job task list and the one in
|
52
|
-
`HISTORY_FILENAME`.
|
53
|
-
|
54
|
-
Returns:
|
55
|
-
The new value of `history`, to be merged into
|
56
|
-
`dataset.meta`.
|
57
34
|
"""
|
58
35
|
|
59
36
|
logger = logging.getLogger(logger_name)
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
# Part 2: Extend history based on temporary-file contents
|
69
|
-
tmp_history_file = Path(job.working_dir) / HISTORY_FILENAME
|
70
|
-
try:
|
71
|
-
with tmp_history_file.open("r") as f:
|
72
|
-
tmp_file_history = json.load(f)
|
73
|
-
new_history.extend(tmp_file_history)
|
74
|
-
except FileNotFoundError:
|
75
|
-
tmp_file_history = []
|
76
|
-
|
77
|
-
# Part 3/A: Identify failed task, if needed
|
78
|
-
if failed_wftask is None:
|
79
|
-
job_wftasks = workflow.task_list[
|
80
|
-
job.first_task_index : (job.last_task_index + 1) # noqa
|
81
|
-
]
|
82
|
-
tmp_file_wftasks = [
|
83
|
-
history_item["workflowtask"] for history_item in tmp_file_history
|
84
|
-
]
|
85
|
-
if len(job_wftasks) <= len(tmp_file_wftasks):
|
86
|
-
n_tasks_job = len(job_wftasks)
|
87
|
-
n_tasks_tmp = len(tmp_file_wftasks)
|
88
|
-
logger.error(
|
89
|
-
"Cannot identify the failed task based on job task list "
|
90
|
-
f"(length {n_tasks_job}) and temporary-file task list "
|
91
|
-
f"(length {n_tasks_tmp})."
|
37
|
+
with next(get_sync_db()) as db:
|
38
|
+
db_dataset = db.get(DatasetV2, dataset_id)
|
39
|
+
if len(db_dataset.history) == 0:
|
40
|
+
logger.warning(
|
41
|
+
f"History for {dataset_id=} is empty. Likely reason: the job "
|
42
|
+
"failed before its first task was marked as SUBMITTED. "
|
43
|
+
"Continue."
|
92
44
|
)
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
)
|
106
|
-
|
107
|
-
|
108
|
-
return new_history
|
109
|
-
|
110
|
-
|
111
|
-
def assemble_images_failed_job(job: JobV2) -> Optional[dict[str, Any]]:
|
112
|
-
"""
|
113
|
-
Assemble `DatasetV2.images` for a failed workflow-execution.
|
114
|
-
|
115
|
-
Assemble new value of `images` based on the last successful task, i.e.
|
116
|
-
based on the content of the temporary `IMAGES_FILENAME` file. If the file
|
117
|
-
is missing, return `None`.
|
118
|
-
|
119
|
-
Argumentss:
|
120
|
-
job:
|
121
|
-
The failed `JobV2` object.
|
122
|
-
|
123
|
-
Returns:
|
124
|
-
The new value of `dataset.images`, or `None` if `IMAGES_FILENAME`
|
125
|
-
is missing.
|
126
|
-
"""
|
127
|
-
tmp_file = Path(job.working_dir) / IMAGES_FILENAME
|
128
|
-
try:
|
129
|
-
with tmp_file.open("r") as f:
|
130
|
-
new_images = json.load(f)
|
131
|
-
return new_images
|
132
|
-
except FileNotFoundError:
|
133
|
-
return None
|
134
|
-
|
135
|
-
|
136
|
-
def assemble_filters_failed_job(job: JobV2) -> Optional[dict[str, Any]]:
|
137
|
-
"""
|
138
|
-
Assemble `DatasetV2.filters` for a failed workflow-execution.
|
139
|
-
|
140
|
-
Assemble new value of `filters` based on the last successful task, i.e.
|
141
|
-
based on the content of the temporary `FILTERS_FILENAME` file. If the file
|
142
|
-
is missing, return `None`.
|
143
|
-
|
144
|
-
Argumentss:
|
145
|
-
job:
|
146
|
-
The failed `JobV2` object.
|
147
|
-
|
148
|
-
Returns:
|
149
|
-
The new value of `dataset.filters`, or `None` if `FILTERS_FILENAME`
|
150
|
-
is missing.
|
151
|
-
"""
|
152
|
-
tmp_file = Path(job.working_dir) / FILTERS_FILENAME
|
153
|
-
try:
|
154
|
-
with tmp_file.open("r") as f:
|
155
|
-
new_filters = json.load(f)
|
156
|
-
return new_filters
|
157
|
-
except FileNotFoundError:
|
158
|
-
return None
|
45
|
+
return
|
46
|
+
workflowtask_id = db_dataset.history[-1]["workflowtask"]["id"]
|
47
|
+
last_item_status = db_dataset.history[-1]["status"]
|
48
|
+
if last_item_status != WorkflowTaskStatusTypeV2.SUBMITTED:
|
49
|
+
logger.warning(
|
50
|
+
"Unexpected branch: "
|
51
|
+
f"Last history item, for {workflowtask_id=}, "
|
52
|
+
f"has status {last_item_status}. Skip."
|
53
|
+
)
|
54
|
+
return
|
55
|
+
logger.info(f"Setting history item for {workflowtask_id=} to failed.")
|
56
|
+
db_dataset.history[-1]["status"] = WorkflowTaskStatusTypeV2.FAILED
|
57
|
+
flag_modified(db_dataset, "history")
|
58
|
+
db.merge(db_dataset)
|
59
|
+
db.commit()
|
@@ -1,4 +1,3 @@
|
|
1
|
-
import json
|
2
1
|
import logging
|
3
2
|
from concurrent.futures import ThreadPoolExecutor
|
4
3
|
from copy import copy
|
@@ -7,20 +6,20 @@ from pathlib import Path
|
|
7
6
|
from typing import Callable
|
8
7
|
from typing import Optional
|
9
8
|
|
9
|
+
from sqlalchemy.orm.attributes import flag_modified
|
10
|
+
|
10
11
|
from ....images import Filters
|
11
12
|
from ....images import SingleImage
|
12
13
|
from ....images.tools import filter_image_list
|
13
14
|
from ....images.tools import find_image_by_zarr_url
|
14
15
|
from ....images.tools import match_filter
|
15
16
|
from ..exceptions import JobExecutionError
|
16
|
-
from ..filenames import FILTERS_FILENAME
|
17
|
-
from ..filenames import HISTORY_FILENAME
|
18
|
-
from ..filenames import IMAGES_FILENAME
|
19
17
|
from .runner_functions import no_op_submit_setup_call
|
20
18
|
from .runner_functions import run_v2_task_compound
|
21
19
|
from .runner_functions import run_v2_task_non_parallel
|
22
20
|
from .runner_functions import run_v2_task_parallel
|
23
21
|
from .task_interface import TaskOutput
|
22
|
+
from fractal_server.app.db import get_sync_db
|
24
23
|
from fractal_server.app.models.v2 import DatasetV2
|
25
24
|
from fractal_server.app.models.v2 import WorkflowTaskV2
|
26
25
|
from fractal_server.app.schemas.v2.dataset import _DatasetHistoryItemV2
|
@@ -35,20 +34,20 @@ def execute_tasks_v2(
|
|
35
34
|
workflow_dir_remote: Optional[Path] = None,
|
36
35
|
logger_name: Optional[str] = None,
|
37
36
|
submit_setup_call: Callable = no_op_submit_setup_call,
|
38
|
-
) ->
|
39
|
-
|
37
|
+
) -> None:
|
40
38
|
logger = logging.getLogger(logger_name)
|
41
39
|
|
42
|
-
if (
|
43
|
-
|
44
|
-
|
40
|
+
if not workflow_dir_local.exists():
|
41
|
+
logger.warning(
|
42
|
+
f"Now creating {workflow_dir_local}, "
|
43
|
+
"but it should have already happened."
|
44
|
+
)
|
45
45
|
workflow_dir_local.mkdir()
|
46
46
|
|
47
47
|
# Initialize local dataset attributes
|
48
48
|
zarr_dir = dataset.zarr_dir
|
49
49
|
tmp_images = deepcopy(dataset.images)
|
50
50
|
tmp_filters = deepcopy(dataset.filters)
|
51
|
-
tmp_history = []
|
52
51
|
|
53
52
|
for wftask in wf_task_list:
|
54
53
|
task = wftask.task
|
@@ -77,7 +76,18 @@ def execute_tasks_v2(
|
|
77
76
|
f'Image zarr_url: {image["zarr_url"]}\n'
|
78
77
|
f'Image types: {image["types"]}\n'
|
79
78
|
)
|
80
|
-
|
79
|
+
# First, set status SUBMITTED in dataset.history for each wftask
|
80
|
+
with next(get_sync_db()) as db:
|
81
|
+
db_dataset = db.get(DatasetV2, dataset.id)
|
82
|
+
new_history_item = _DatasetHistoryItemV2(
|
83
|
+
workflowtask=wftask,
|
84
|
+
status=WorkflowTaskStatusTypeV2.SUBMITTED,
|
85
|
+
parallelization=dict(), # FIXME: re-include parallelization
|
86
|
+
).dict()
|
87
|
+
db_dataset.history.append(new_history_item)
|
88
|
+
flag_modified(db_dataset, "history")
|
89
|
+
db.merge(db_dataset)
|
90
|
+
db.commit()
|
81
91
|
# TASK EXECUTION (V2)
|
82
92
|
if task.type == "non_parallel":
|
83
93
|
current_task_output = run_v2_task_non_parallel(
|
@@ -282,36 +292,18 @@ def execute_tasks_v2(
|
|
282
292
|
tmp_filters["types"].update(types_from_manifest)
|
283
293
|
tmp_filters["types"].update(types_from_task)
|
284
294
|
|
285
|
-
#
|
286
|
-
|
287
|
-
workflowtask=wftask,
|
288
|
-
status=WorkflowTaskStatusTypeV2.DONE,
|
289
|
-
parallelization=dict(
|
290
|
-
# task_type=wftask.task.type, # FIXME: breaks for V1 tasks
|
291
|
-
# component_list=fil, #FIXME
|
292
|
-
),
|
293
|
-
).dict()
|
294
|
-
tmp_history.append(history_item)
|
295
|
-
|
296
|
-
# Write current dataset attributes (history, images, filters) into
|
297
|
-
# temporary files which can be used (1) to retrieve the latest state
|
295
|
+
# Write current dataset attributes (history, images, filters) into the
|
296
|
+
# database. They can be used (1) to retrieve the latest state
|
298
297
|
# when the job fails, (2) from within endpoints that need up-to-date
|
299
298
|
# information
|
300
|
-
with
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
299
|
+
with next(get_sync_db()) as db:
|
300
|
+
db_dataset = db.get(DatasetV2, dataset.id)
|
301
|
+
db_dataset.history[-1]["status"] = WorkflowTaskStatusTypeV2.DONE
|
302
|
+
db_dataset.filters = tmp_filters
|
303
|
+
db_dataset.images = tmp_images
|
304
|
+
for attribute_name in ["filters", "history", "images"]:
|
305
|
+
flag_modified(db_dataset, attribute_name)
|
306
|
+
db.merge(db_dataset)
|
307
|
+
db.commit()
|
306
308
|
|
307
309
|
logger.debug(f'END {wftask.order}-th task (name="{task_name}")')
|
308
|
-
|
309
|
-
# NOTE: tmp_history only contains the newly-added history items (to be
|
310
|
-
# appended to the original history), while tmp_filters and tmp_images
|
311
|
-
# represent the new attributes (to replace the original ones)
|
312
|
-
result = dict(
|
313
|
-
history=tmp_history,
|
314
|
-
filters=tmp_filters,
|
315
|
-
images=tmp_images,
|
316
|
-
)
|
317
|
-
return result
|
@@ -254,10 +254,15 @@ class UserManager(IntegerIDMixin, BaseUserManager[UserOAuth, int]):
|
|
254
254
|
|
255
255
|
if this_user.oauth_accounts and settings.MAIL_SETTINGS is not None:
|
256
256
|
try:
|
257
|
+
logger.info(
|
258
|
+
"START sending email about new signup to "
|
259
|
+
f"{settings.MAIL_SETTINGS.recipients}."
|
260
|
+
)
|
257
261
|
mail_new_oauth_signup(
|
258
262
|
msg=f"New user registered: '{this_user.email}'.",
|
259
263
|
mail_settings=settings.MAIL_SETTINGS,
|
260
264
|
)
|
265
|
+
logger.info("END sending email about new signup.")
|
261
266
|
except Exception as e:
|
262
267
|
logger.error(
|
263
268
|
"ERROR sending notification email after oauth "
|
@@ -13,7 +13,7 @@ def mail_new_oauth_signup(msg: str, mail_settings: MailSettings):
|
|
13
13
|
mail_msg = EmailMessage()
|
14
14
|
mail_msg.set_content(msg)
|
15
15
|
mail_msg["From"] = formataddr((mail_settings.sender, mail_settings.sender))
|
16
|
-
mail_msg["To"] = ",".join(
|
16
|
+
mail_msg["To"] = ", ".join(
|
17
17
|
[
|
18
18
|
formataddr((recipient, recipient))
|
19
19
|
for recipient in mail_settings.recipients
|
@@ -1,4 +1,4 @@
|
|
1
|
-
fractal_server/__init__.py,sha256=
|
1
|
+
fractal_server/__init__.py,sha256=mDKrGJqt2VCe76rX51KUhjV5LzNikbT0m0A7-304kpM,25
|
2
2
|
fractal_server/__main__.py,sha256=D2YTmSowmXNyvqOjW_HeItCZT2UliWlySl_owicaZg0,8026
|
3
3
|
fractal_server/alembic.ini,sha256=MWwi7GzjzawI9cCAK1LW7NxIBQDUqD12-ptJoq5JpP0,3153
|
4
4
|
fractal_server/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -35,7 +35,7 @@ fractal_server/app/routes/admin/v2/task_group_lifecycle.py,sha256=0e0ZJ_k75TVHaT
|
|
35
35
|
fractal_server/app/routes/api/__init__.py,sha256=2IDheFi0OFdsUg7nbUiyahqybvpgXqeHUXIL2QtWrQQ,641
|
36
36
|
fractal_server/app/routes/api/v1/__init__.py,sha256=Y2HQdG197J0a7DyQEE2jn53IfxD0EHGhzK1I2JZuEck,958
|
37
37
|
fractal_server/app/routes/api/v1/_aux_functions.py,sha256=P9Q48thGH95w0h5cacYoibxqgiiLW4oqZ8rNJ2LIISY,13219
|
38
|
-
fractal_server/app/routes/api/v1/dataset.py,sha256=
|
38
|
+
fractal_server/app/routes/api/v1/dataset.py,sha256=7P2VugyaHDhAhUK2EUDksj7O2SgSxRsR7dMDMQB_dPI,17288
|
39
39
|
fractal_server/app/routes/api/v1/job.py,sha256=0jGxvu0xNQnWuov2qnoo9yE7Oat37XbcVn4Ute-UsiE,5370
|
40
40
|
fractal_server/app/routes/api/v1/project.py,sha256=3NsdNXLIsE8QiNgKP1Kp1-B0zYG0Zi5HKBzWA0LjlQg,15551
|
41
41
|
fractal_server/app/routes/api/v1/task.py,sha256=eW89nMCjpD4G6tHXDo2qGBKqWaPirjH6M3hpdJQhfa0,6528
|
@@ -50,7 +50,7 @@ fractal_server/app/routes/api/v2/dataset.py,sha256=Y6uZz--YSEGgnPYu05rZ9sr1Ug08b
|
|
50
50
|
fractal_server/app/routes/api/v2/images.py,sha256=JR1rR6qEs81nacjriOXAOBQjAbCXF4Ew7M7mkWdxBU0,7920
|
51
51
|
fractal_server/app/routes/api/v2/job.py,sha256=Bga2Kz1OjvDIdxZObWaaXVhNIhC_5JKhKRjEH2_ayEE,5157
|
52
52
|
fractal_server/app/routes/api/v2/project.py,sha256=eWYFJ7F2ZYQcpi-_n-rhPF-Q4gJhzYBsVGYFhHZZXAE,6653
|
53
|
-
fractal_server/app/routes/api/v2/status.py,sha256=
|
53
|
+
fractal_server/app/routes/api/v2/status.py,sha256=_cDZW-ESYw6zpf-lLFFqko5bLpKhqKrCM6yv1OfqxN4,6300
|
54
54
|
fractal_server/app/routes/api/v2/submit.py,sha256=cQwt0oK8xjHMGA_bQrw4Um8jd_aCvgmWfoqSQDh12hQ,8246
|
55
55
|
fractal_server/app/routes/api/v2/task.py,sha256=K0ik33t7vL8BAK5S7fqyJDNdRK4stGqb_73bSa8tvPE,7159
|
56
56
|
fractal_server/app/routes/api/v2/task_collection.py,sha256=9p8w9UnN6RFszC1ohy9Uo3I4HIMVdfD8fYGWuQqzxMU,12682
|
@@ -94,13 +94,13 @@ fractal_server/app/runner/executors/slurm/sudo/_subprocess_run_as_user.py,sha256
|
|
94
94
|
fractal_server/app/runner/executors/slurm/sudo/executor.py,sha256=FVgx2mxqCLOhSoH3UTAeNc0BT0eJaxHMglGzGYePGPM,47439
|
95
95
|
fractal_server/app/runner/executors/slurm/utils_executors.py,sha256=naPyJI0I3lD-sYHbSXbMFGUBK4h_SggA5V91Z1Ch1Xg,1416
|
96
96
|
fractal_server/app/runner/extract_archive.py,sha256=tLpjDrX47OjTNhhoWvm6iNukg8KoieWyTb7ZfvE9eWU,2483
|
97
|
-
fractal_server/app/runner/filenames.py,sha256=
|
97
|
+
fractal_server/app/runner/filenames.py,sha256=h4iOHD5z_f1y-bFJL-FXAws7k7SwmBmCr8y28m-EGcY,146
|
98
98
|
fractal_server/app/runner/run_subprocess.py,sha256=c3JbYXq3hX2aaflQU19qJ5Xs6J6oXGNvnTEoAfv2bxc,959
|
99
99
|
fractal_server/app/runner/set_start_and_last_task_index.py,sha256=-q4zVybAj8ek2XlbENKlfOAJ39hT_zoJoZkqzDqiAMY,1254
|
100
100
|
fractal_server/app/runner/shutdown.py,sha256=I_o2iYKJwzku0L3E85ETjrve3QPECygR5xhhsAo5huM,2910
|
101
101
|
fractal_server/app/runner/task_files.py,sha256=sd_MpJ01C8c9QTO8GzGMidFGdlq_hXX_ARDRhd_YMnI,3762
|
102
102
|
fractal_server/app/runner/v1/__init__.py,sha256=VvJFk4agX2X3fQfDcoNmOB2ouNCaQU7dAqaFmpcdP8I,15063
|
103
|
-
fractal_server/app/runner/v1/_common.py,sha256=
|
103
|
+
fractal_server/app/runner/v1/_common.py,sha256=MALqlDwvvniBT_z2cuyEHdiOmzSp9lH4BEGGitFggAQ,21561
|
104
104
|
fractal_server/app/runner/v1/_local/__init__.py,sha256=KlSML4LqF4p1IfhSd8tAkiu3aeDzifeanuNXjATDsYE,6929
|
105
105
|
fractal_server/app/runner/v1/_local/_local_config.py,sha256=hM7SPxR07luXPcXdrWXRpEB2uOyjSSRUdqW3QBKJn9c,3147
|
106
106
|
fractal_server/app/runner/v1/_local/_submit_setup.py,sha256=XyBDPb4IYdKEEnzLYdcYteIHWVWofJxKMmQCyRkn5Bc,1509
|
@@ -109,26 +109,26 @@ fractal_server/app/runner/v1/_slurm/__init__.py,sha256=9pZxqJYcLBk8eZkSgP-L_qvd9
|
|
109
109
|
fractal_server/app/runner/v1/_slurm/_submit_setup.py,sha256=KO9c694d318adoPQh9UGwxLkw4fRIgybQ5h7QHQKLXQ,2828
|
110
110
|
fractal_server/app/runner/v1/_slurm/get_slurm_config.py,sha256=6pQNNx997bLIfLp0guF09t_O0ZYRXnbEGLktSAcKnic,5999
|
111
111
|
fractal_server/app/runner/v1/common.py,sha256=_L-vjLnWato80VdlB_BFN4G8P4jSM07u-5cnl1T3S34,3294
|
112
|
-
fractal_server/app/runner/v1/handle_failed_job.py,sha256=
|
113
|
-
fractal_server/app/runner/v2/__init__.py,sha256=
|
114
|
-
fractal_server/app/runner/v2/_local/__init__.py,sha256=
|
112
|
+
fractal_server/app/runner/v1/handle_failed_job.py,sha256=R8IsM_ucX0_lqFCly8BYuzf-VAVafE5wj_1JXapnxeQ,4696
|
113
|
+
fractal_server/app/runner/v2/__init__.py,sha256=txN9kUugjxZZqLZ8CReXz5JHDK_0MJQvkVSlaMIPI_E,15184
|
114
|
+
fractal_server/app/runner/v2/_local/__init__.py,sha256=bgN-fUYh6rgZSrBaCThNi2A5naOAIqeynQy_B6r7iu4,5529
|
115
115
|
fractal_server/app/runner/v2/_local/_local_config.py,sha256=9oi209Dlp35ANfxb_DISqmMKKc6DPaMsmYVWbZLseME,3630
|
116
116
|
fractal_server/app/runner/v2/_local/_submit_setup.py,sha256=MucNOo8Er0F5ZIwH7CnTeXgnFMc6d3pKPkv563QNVi0,1630
|
117
117
|
fractal_server/app/runner/v2/_local/executor.py,sha256=QrJlD77G6q4WohoJQO7XXbvi2RlCUsNvMnPDEZIoAqA,3620
|
118
|
-
fractal_server/app/runner/v2/_local_experimental/__init__.py,sha256=
|
118
|
+
fractal_server/app/runner/v2/_local_experimental/__init__.py,sha256=Gb8AmDoZ1Q2b79eLNv8ZW4KNFqihuaiN5zBWbP619ZU,5247
|
119
119
|
fractal_server/app/runner/v2/_local_experimental/_local_config.py,sha256=QiS5ODe-iGmUQdIT8QgpbyMc7-ZpIRv1V_f2q3qfPQ8,3211
|
120
120
|
fractal_server/app/runner/v2/_local_experimental/_submit_setup.py,sha256=we7r-sQf0CJ9gxbfbgHcYdC6pKjx8eXweljIjthxkv8,1212
|
121
121
|
fractal_server/app/runner/v2/_local_experimental/executor.py,sha256=plvEqqdcXOSohYsQoykYlyDwCING7OO5h-4XAZtwdPs,5503
|
122
122
|
fractal_server/app/runner/v2/_slurm_common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
123
123
|
fractal_server/app/runner/v2/_slurm_common/get_slurm_config.py,sha256=UdkoFF0HF_TdKbay-d9bjkxT2ltcOE5i8H_FoOu64HU,6202
|
124
|
-
fractal_server/app/runner/v2/_slurm_ssh/__init__.py,sha256=
|
124
|
+
fractal_server/app/runner/v2/_slurm_ssh/__init__.py,sha256=ZeiXWoex96FcvG8DZQ5PJPJ922-QZC_XWegJxWtm1rk,4147
|
125
125
|
fractal_server/app/runner/v2/_slurm_ssh/_submit_setup.py,sha256=a5_FDPH_yxYmrjAjMRLgh_Y4DSG3mRslCLQodGM3-t4,2838
|
126
|
-
fractal_server/app/runner/v2/_slurm_sudo/__init__.py,sha256=
|
126
|
+
fractal_server/app/runner/v2/_slurm_sudo/__init__.py,sha256=dOwfDItdcM1pS99xxemwePpjxqy8zAU-pCmmIm4u1io,3930
|
127
127
|
fractal_server/app/runner/v2/_slurm_sudo/_submit_setup.py,sha256=a5_FDPH_yxYmrjAjMRLgh_Y4DSG3mRslCLQodGM3-t4,2838
|
128
128
|
fractal_server/app/runner/v2/deduplicate_list.py,sha256=-imwO7OB7ATADEnqVbTElUwoY0YIJCTf_SbWJNN9OZg,639
|
129
|
-
fractal_server/app/runner/v2/handle_failed_job.py,sha256
|
129
|
+
fractal_server/app/runner/v2/handle_failed_job.py,sha256=-zFWw4d208bQEFUF_sAdH2LdHEARyg1FC8BENr1SjhU,2045
|
130
130
|
fractal_server/app/runner/v2/merge_outputs.py,sha256=IHuHqbKmk97K35BFvTrKVBs60z3e_--OzXTnsvmA02c,1281
|
131
|
-
fractal_server/app/runner/v2/runner.py,sha256=
|
131
|
+
fractal_server/app/runner/v2/runner.py,sha256=u-AhvSrZ_ca18LEOgJTwnXapvNq0OnRjbwOsM7NAvcw,12964
|
132
132
|
fractal_server/app/runner/v2/runner_functions.py,sha256=BLREIcQaE6FSc2AEJyZuiYk6rGazEz_9gprUqUZDljs,9488
|
133
133
|
fractal_server/app/runner/v2/runner_functions_low_level.py,sha256=1fWvQ6YZUUnDhO_mipXC5hnaT-zK-GHxg8ayoxZX82k,3648
|
134
134
|
fractal_server/app/runner/v2/task_interface.py,sha256=hT3p-bRGsLNAR_dNv_PYFoqzIF_EQtSsGwl38j1haYA,1824
|
@@ -160,8 +160,8 @@ fractal_server/app/schemas/v2/task_collection.py,sha256=9c_yyFcVBXdAZpQQniy1bROh
|
|
160
160
|
fractal_server/app/schemas/v2/task_group.py,sha256=EPQ1WHjIA8WDrpsTfvfRESjwUVzu6jKiaKZx45b36N4,3215
|
161
161
|
fractal_server/app/schemas/v2/workflow.py,sha256=-KWvXnbHBFA3pj5n7mfSyLKJQSqkJmoziIEe7mpLl3M,1875
|
162
162
|
fractal_server/app/schemas/v2/workflowtask.py,sha256=FthKErVgx3a-k7WVk3nqJe1G-fl_iHND4rVrDXJ0F84,5942
|
163
|
-
fractal_server/app/security/__init__.py,sha256=
|
164
|
-
fractal_server/app/security/signup_email.py,sha256=
|
163
|
+
fractal_server/app/security/__init__.py,sha256=qn6idYgl-p5HWea0gTVnz4JnkoxGEkmQjPzvKpDWT0I,14035
|
164
|
+
fractal_server/app/security/signup_email.py,sha256=DrL51UdTSrgjleynMD5CRZwTSOpPrZ96fasRV0fvxDE,1165
|
165
165
|
fractal_server/app/user_settings.py,sha256=OP1yiYKtPadxwM51_Q0hdPk3z90TCN4z1BLpQsXyWiU,1316
|
166
166
|
fractal_server/config.py,sha256=9rAzw7OO6ZeHEz-I8NJHuGoHf4xCHxfFLyRNZQD9ytY,27019
|
167
167
|
fractal_server/data_migrations/README.md,sha256=_3AEFvDg9YkybDqCLlFPdDmGJvr6Tw7HRI14aZ3LOIw,398
|
@@ -238,8 +238,8 @@ fractal_server/tasks/v2/utils_templates.py,sha256=07TZpJ0Mh_A4lXVXrrH2o1VLFFGwxe
|
|
238
238
|
fractal_server/urls.py,sha256=QjIKAC1a46bCdiPMu3AlpgFbcv6a4l3ABcd5xz190Og,471
|
239
239
|
fractal_server/utils.py,sha256=utvmBx8K9I8hRWFquxna2pBaOqe0JifDL_NVPmihEJI,3525
|
240
240
|
fractal_server/zip_tools.py,sha256=GjDgo_sf6V_DDg6wWeBlZu5zypIxycn_l257p_YVKGc,4876
|
241
|
-
fractal_server-2.
|
242
|
-
fractal_server-2.
|
243
|
-
fractal_server-2.
|
244
|
-
fractal_server-2.
|
245
|
-
fractal_server-2.
|
241
|
+
fractal_server-2.11.0a0.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
|
242
|
+
fractal_server-2.11.0a0.dist-info/METADATA,sha256=aeiYrMcHVQT6PdF1ZBf1VXrLNlHNMjxDWQ_WJYNxElI,4564
|
243
|
+
fractal_server-2.11.0a0.dist-info/WHEEL,sha256=RaoafKOydTQ7I_I3JTrPCg6kUmTgtm4BornzOqyEfJ8,88
|
244
|
+
fractal_server-2.11.0a0.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
|
245
|
+
fractal_server-2.11.0a0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|