fractal-server 2.13.1__py3-none-any.whl → 2.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/__main__.py +3 -1
- fractal_server/app/models/linkusergroup.py +6 -2
- fractal_server/app/models/v2/__init__.py +7 -1
- fractal_server/app/models/v2/dataset.py +1 -11
- fractal_server/app/models/v2/history.py +78 -0
- fractal_server/app/models/v2/job.py +10 -3
- fractal_server/app/models/v2/task_group.py +2 -2
- fractal_server/app/models/v2/workflow.py +1 -1
- fractal_server/app/models/v2/workflowtask.py +1 -1
- fractal_server/app/routes/admin/v2/accounting.py +18 -28
- fractal_server/app/routes/admin/v2/task.py +1 -1
- fractal_server/app/routes/admin/v2/task_group.py +0 -17
- fractal_server/app/routes/api/__init__.py +1 -1
- fractal_server/app/routes/api/v2/__init__.py +8 -2
- fractal_server/app/routes/api/v2/_aux_functions.py +66 -0
- fractal_server/app/routes/api/v2/_aux_functions_history.py +166 -0
- fractal_server/app/routes/api/v2/dataset.py +0 -17
- fractal_server/app/routes/api/v2/history.py +544 -0
- fractal_server/app/routes/api/v2/images.py +31 -43
- fractal_server/app/routes/api/v2/job.py +30 -0
- fractal_server/app/routes/api/v2/project.py +1 -53
- fractal_server/app/routes/api/v2/{status.py → status_legacy.py} +6 -6
- fractal_server/app/routes/api/v2/submit.py +16 -14
- fractal_server/app/routes/api/v2/task.py +3 -10
- fractal_server/app/routes/api/v2/task_collection_custom.py +4 -9
- fractal_server/app/routes/api/v2/task_group.py +0 -17
- fractal_server/app/routes/api/v2/verify_image_types.py +61 -0
- fractal_server/app/routes/api/v2/workflow.py +28 -69
- fractal_server/app/routes/api/v2/workflowtask.py +53 -50
- fractal_server/app/routes/auth/group.py +0 -16
- fractal_server/app/routes/auth/oauth.py +5 -3
- fractal_server/app/routes/pagination.py +47 -0
- fractal_server/app/runner/components.py +0 -3
- fractal_server/app/runner/compress_folder.py +57 -29
- fractal_server/app/runner/exceptions.py +4 -0
- fractal_server/app/runner/executors/base_runner.py +157 -0
- fractal_server/app/runner/{v2/_local/_local_config.py → executors/local/get_local_config.py} +7 -9
- fractal_server/app/runner/executors/local/runner.py +248 -0
- fractal_server/app/runner/executors/{slurm → slurm_common}/_batching.py +1 -1
- fractal_server/app/runner/executors/{slurm → slurm_common}/_slurm_config.py +9 -7
- fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py +868 -0
- fractal_server/app/runner/{v2/_slurm_common → executors/slurm_common}/get_slurm_config.py +48 -17
- fractal_server/app/runner/executors/{slurm → slurm_common}/remote.py +36 -47
- fractal_server/app/runner/executors/slurm_common/slurm_job_task_models.py +134 -0
- fractal_server/app/runner/executors/slurm_ssh/runner.py +268 -0
- fractal_server/app/runner/executors/slurm_sudo/__init__.py +0 -0
- fractal_server/app/runner/executors/{slurm/sudo → slurm_sudo}/_subprocess_run_as_user.py +2 -83
- fractal_server/app/runner/executors/slurm_sudo/runner.py +193 -0
- fractal_server/app/runner/extract_archive.py +1 -3
- fractal_server/app/runner/task_files.py +134 -87
- fractal_server/app/runner/v2/__init__.py +0 -399
- fractal_server/app/runner/v2/_local.py +88 -0
- fractal_server/app/runner/v2/{_slurm_ssh/__init__.py → _slurm_ssh.py} +20 -19
- fractal_server/app/runner/v2/{_slurm_sudo/__init__.py → _slurm_sudo.py} +17 -15
- fractal_server/app/runner/v2/db_tools.py +119 -0
- fractal_server/app/runner/v2/runner.py +206 -95
- fractal_server/app/runner/v2/runner_functions.py +488 -187
- fractal_server/app/runner/v2/runner_functions_low_level.py +40 -43
- fractal_server/app/runner/v2/submit_workflow.py +358 -0
- fractal_server/app/runner/v2/task_interface.py +31 -0
- fractal_server/app/schemas/_validators.py +13 -24
- fractal_server/app/schemas/user.py +10 -7
- fractal_server/app/schemas/user_settings.py +9 -21
- fractal_server/app/schemas/v2/__init__.py +9 -1
- fractal_server/app/schemas/v2/dataset.py +12 -94
- fractal_server/app/schemas/v2/dumps.py +26 -9
- fractal_server/app/schemas/v2/history.py +80 -0
- fractal_server/app/schemas/v2/job.py +15 -8
- fractal_server/app/schemas/v2/manifest.py +14 -7
- fractal_server/app/schemas/v2/project.py +9 -7
- fractal_server/app/schemas/v2/status_legacy.py +35 -0
- fractal_server/app/schemas/v2/task.py +72 -77
- fractal_server/app/schemas/v2/task_collection.py +14 -32
- fractal_server/app/schemas/v2/task_group.py +10 -9
- fractal_server/app/schemas/v2/workflow.py +10 -11
- fractal_server/app/schemas/v2/workflowtask.py +2 -21
- fractal_server/app/security/__init__.py +3 -3
- fractal_server/app/security/signup_email.py +2 -2
- fractal_server/config.py +41 -46
- fractal_server/images/tools.py +23 -0
- fractal_server/migrations/versions/47351f8c7ebc_drop_dataset_filters.py +50 -0
- fractal_server/migrations/versions/9db60297b8b2_set_ondelete.py +250 -0
- fractal_server/migrations/versions/c90a7c76e996_job_id_in_history_run.py +41 -0
- fractal_server/migrations/versions/e81103413827_add_job_type_filters.py +36 -0
- fractal_server/migrations/versions/f37aceb45062_make_historyunit_logfile_required.py +39 -0
- fractal_server/migrations/versions/fbce16ff4e47_new_history_items.py +120 -0
- fractal_server/ssh/_fabric.py +28 -14
- fractal_server/tasks/v2/local/collect.py +2 -2
- fractal_server/tasks/v2/ssh/collect.py +2 -2
- fractal_server/tasks/v2/templates/2_pip_install.sh +1 -1
- fractal_server/tasks/v2/templates/4_pip_show.sh +1 -1
- fractal_server/tasks/v2/utils_background.py +0 -19
- fractal_server/tasks/v2/utils_database.py +30 -17
- fractal_server/tasks/v2/utils_templates.py +6 -0
- {fractal_server-2.13.1.dist-info → fractal_server-2.14.0.dist-info}/METADATA +4 -4
- {fractal_server-2.13.1.dist-info → fractal_server-2.14.0.dist-info}/RECORD +106 -96
- {fractal_server-2.13.1.dist-info → fractal_server-2.14.0.dist-info}/WHEEL +1 -1
- fractal_server/app/runner/executors/slurm/ssh/_executor_wait_thread.py +0 -126
- fractal_server/app/runner/executors/slurm/ssh/_slurm_job.py +0 -116
- fractal_server/app/runner/executors/slurm/ssh/executor.py +0 -1386
- fractal_server/app/runner/executors/slurm/sudo/_check_jobs_status.py +0 -71
- fractal_server/app/runner/executors/slurm/sudo/_executor_wait_thread.py +0 -130
- fractal_server/app/runner/executors/slurm/sudo/executor.py +0 -1281
- fractal_server/app/runner/v2/_local/__init__.py +0 -132
- fractal_server/app/runner/v2/_local/_submit_setup.py +0 -52
- fractal_server/app/runner/v2/_local/executor.py +0 -100
- fractal_server/app/runner/v2/_slurm_ssh/_submit_setup.py +0 -83
- fractal_server/app/runner/v2/_slurm_sudo/_submit_setup.py +0 -83
- fractal_server/app/runner/v2/handle_failed_job.py +0 -59
- fractal_server/app/schemas/v2/status.py +0 -16
- /fractal_server/app/{runner/executors/slurm → history}/__init__.py +0 -0
- /fractal_server/app/runner/executors/{slurm/ssh → local}/__init__.py +0 -0
- /fractal_server/app/runner/executors/{slurm/sudo → slurm_common}/__init__.py +0 -0
- /fractal_server/app/runner/executors/{_job_states.py → slurm_common/_job_states.py} +0 -0
- /fractal_server/app/runner/executors/{slurm → slurm_common}/utils_executors.py +0 -0
- /fractal_server/app/runner/{v2/_slurm_common → executors/slurm_ssh}/__init__.py +0 -0
- {fractal_server-2.13.1.dist-info → fractal_server-2.14.0.dist-info}/LICENSE +0 -0
- {fractal_server-2.13.1.dist-info → fractal_server-2.14.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,119 @@
|
|
1
|
+
from typing import Any
|
2
|
+
|
3
|
+
from sqlalchemy.dialects.postgresql import insert as pg_insert
|
4
|
+
from sqlalchemy.orm import Session
|
5
|
+
from sqlmodel import update
|
6
|
+
|
7
|
+
from fractal_server.app.models.v2 import HistoryImageCache
|
8
|
+
from fractal_server.app.models.v2 import HistoryRun
|
9
|
+
from fractal_server.app.models.v2 import HistoryUnit
|
10
|
+
from fractal_server.app.schemas.v2 import HistoryUnitStatus
|
11
|
+
from fractal_server.logger import set_logger
|
12
|
+
|
13
|
+
|
14
|
+
_CHUNK_SIZE = 2_000
|
15
|
+
|
16
|
+
logger = set_logger(__name__)
|
17
|
+
|
18
|
+
|
19
|
+
def update_status_of_history_run(
|
20
|
+
*,
|
21
|
+
history_run_id: int,
|
22
|
+
status: HistoryUnitStatus,
|
23
|
+
db_sync: Session,
|
24
|
+
) -> None:
|
25
|
+
run = db_sync.get(HistoryRun, history_run_id)
|
26
|
+
if run is None:
|
27
|
+
raise ValueError(f"HistoryRun {history_run_id} not found.")
|
28
|
+
run.status = status
|
29
|
+
db_sync.merge(run)
|
30
|
+
db_sync.commit()
|
31
|
+
|
32
|
+
|
33
|
+
def update_status_of_history_unit(
|
34
|
+
*,
|
35
|
+
history_unit_id: int,
|
36
|
+
status: HistoryUnitStatus,
|
37
|
+
db_sync: Session,
|
38
|
+
) -> None:
|
39
|
+
unit = db_sync.get(HistoryUnit, history_unit_id)
|
40
|
+
if unit is None:
|
41
|
+
raise ValueError(f"HistoryUnit {history_unit_id} not found.")
|
42
|
+
unit.status = status
|
43
|
+
db_sync.merge(unit)
|
44
|
+
db_sync.commit()
|
45
|
+
|
46
|
+
|
47
|
+
def bulk_update_status_of_history_unit(
|
48
|
+
*,
|
49
|
+
history_unit_ids: list[int],
|
50
|
+
status: HistoryUnitStatus,
|
51
|
+
db_sync: Session,
|
52
|
+
) -> None:
|
53
|
+
|
54
|
+
len_history_unit_ids = len(history_unit_ids)
|
55
|
+
logger.debug(
|
56
|
+
f"[bulk_update_status_of_history_unit] {len_history_unit_ids=}."
|
57
|
+
)
|
58
|
+
for ind in range(0, len_history_unit_ids, _CHUNK_SIZE):
|
59
|
+
db_sync.execute(
|
60
|
+
update(HistoryUnit)
|
61
|
+
.where(
|
62
|
+
HistoryUnit.id.in_(history_unit_ids[ind : ind + _CHUNK_SIZE])
|
63
|
+
)
|
64
|
+
.values(status=status)
|
65
|
+
)
|
66
|
+
# NOTE: keeping commit within the for loop is much more efficient
|
67
|
+
db_sync.commit()
|
68
|
+
|
69
|
+
|
70
|
+
def bulk_upsert_image_cache_fast(
|
71
|
+
*,
|
72
|
+
list_upsert_objects: list[dict[str, Any]],
|
73
|
+
db: Session,
|
74
|
+
) -> None:
|
75
|
+
"""
|
76
|
+
Insert or update many objects into `HistoryImageCache` and commit
|
77
|
+
|
78
|
+
This function is an optimized version of
|
79
|
+
|
80
|
+
```python
|
81
|
+
for obj in list_upsert_objects:
|
82
|
+
db.merge(**obj)
|
83
|
+
db.commit()
|
84
|
+
```
|
85
|
+
|
86
|
+
See docs at
|
87
|
+
https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#insert-on-conflict-upsert
|
88
|
+
|
89
|
+
NOTE: we tried to replace `index_elements` with
|
90
|
+
`constraint="pk_historyimagecache"`, but it did not work as expected.
|
91
|
+
|
92
|
+
Arguments:
|
93
|
+
list_upsert_objects:
|
94
|
+
List of dictionaries for objects to be upsert-ed.
|
95
|
+
db: A sync database session
|
96
|
+
"""
|
97
|
+
len_list_upsert_objects = len(list_upsert_objects)
|
98
|
+
|
99
|
+
logger.debug(f"[bulk_upsert_image_cache_fast] {len_list_upsert_objects=}.")
|
100
|
+
|
101
|
+
if len_list_upsert_objects == 0:
|
102
|
+
return None
|
103
|
+
|
104
|
+
for ind in range(0, len_list_upsert_objects, _CHUNK_SIZE):
|
105
|
+
stmt = pg_insert(HistoryImageCache).values(
|
106
|
+
list_upsert_objects[ind : ind + _CHUNK_SIZE]
|
107
|
+
)
|
108
|
+
stmt = stmt.on_conflict_do_update(
|
109
|
+
index_elements=[
|
110
|
+
HistoryImageCache.zarr_url,
|
111
|
+
HistoryImageCache.dataset_id,
|
112
|
+
HistoryImageCache.workflowtask_id,
|
113
|
+
],
|
114
|
+
set_=dict(
|
115
|
+
latest_history_unit_id=stmt.excluded.latest_history_unit_id
|
116
|
+
),
|
117
|
+
)
|
118
|
+
db.execute(stmt)
|
119
|
+
db.commit()
|
@@ -1,28 +1,37 @@
|
|
1
1
|
import logging
|
2
|
-
from concurrent.futures import ThreadPoolExecutor
|
3
2
|
from copy import copy
|
4
3
|
from copy import deepcopy
|
5
4
|
from pathlib import Path
|
5
|
+
from typing import Any
|
6
6
|
from typing import Callable
|
7
|
+
from typing import Literal
|
7
8
|
from typing import Optional
|
8
9
|
|
9
10
|
from sqlalchemy.orm.attributes import flag_modified
|
11
|
+
from sqlmodel import delete
|
10
12
|
|
11
13
|
from ....images import SingleImage
|
12
14
|
from ....images.tools import filter_image_list
|
13
15
|
from ....images.tools import find_image_by_zarr_url
|
14
16
|
from ..exceptions import JobExecutionError
|
15
|
-
from .
|
17
|
+
from .merge_outputs import merge_outputs
|
16
18
|
from .runner_functions import run_v2_task_compound
|
17
19
|
from .runner_functions import run_v2_task_non_parallel
|
18
20
|
from .runner_functions import run_v2_task_parallel
|
21
|
+
from .runner_functions import SubmissionOutcome
|
19
22
|
from .task_interface import TaskOutput
|
20
23
|
from fractal_server.app.db import get_sync_db
|
21
24
|
from fractal_server.app.models.v2 import AccountingRecord
|
22
25
|
from fractal_server.app.models.v2 import DatasetV2
|
26
|
+
from fractal_server.app.models.v2 import HistoryImageCache
|
27
|
+
from fractal_server.app.models.v2 import HistoryRun
|
28
|
+
from fractal_server.app.models.v2 import TaskGroupV2
|
23
29
|
from fractal_server.app.models.v2 import WorkflowTaskV2
|
24
|
-
from fractal_server.app.
|
25
|
-
from fractal_server.app.
|
30
|
+
from fractal_server.app.runner.executors.base_runner import BaseRunner
|
31
|
+
from fractal_server.app.runner.v2.db_tools import update_status_of_history_run
|
32
|
+
from fractal_server.app.schemas.v2 import HistoryUnitStatus
|
33
|
+
from fractal_server.app.schemas.v2 import TaskDumpV2
|
34
|
+
from fractal_server.app.schemas.v2 import TaskGroupDumpV2
|
26
35
|
from fractal_server.images.models import AttributeFiltersType
|
27
36
|
from fractal_server.images.tools import merge_type_filters
|
28
37
|
|
@@ -31,27 +40,40 @@ def execute_tasks_v2(
|
|
31
40
|
*,
|
32
41
|
wf_task_list: list[WorkflowTaskV2],
|
33
42
|
dataset: DatasetV2,
|
34
|
-
|
43
|
+
runner: BaseRunner,
|
35
44
|
user_id: int,
|
36
45
|
workflow_dir_local: Path,
|
46
|
+
job_id: int,
|
37
47
|
workflow_dir_remote: Optional[Path] = None,
|
38
48
|
logger_name: Optional[str] = None,
|
39
|
-
|
49
|
+
get_runner_config: Callable[
|
50
|
+
[
|
51
|
+
WorkflowTaskV2,
|
52
|
+
Literal["non_parallel", "parallel"],
|
53
|
+
Optional[Path],
|
54
|
+
],
|
55
|
+
Any,
|
56
|
+
],
|
57
|
+
job_type_filters: dict[str, bool],
|
40
58
|
job_attribute_filters: AttributeFiltersType,
|
41
59
|
) -> None:
|
42
60
|
logger = logging.getLogger(logger_name)
|
43
61
|
|
44
62
|
if not workflow_dir_local.exists():
|
45
63
|
logger.warning(
|
46
|
-
f"Now creating {workflow_dir_local}, "
|
47
|
-
"
|
64
|
+
f"Now creating {workflow_dir_local}, but it "
|
65
|
+
"should have already happened."
|
48
66
|
)
|
49
67
|
workflow_dir_local.mkdir()
|
50
68
|
|
69
|
+
# For local backend, remote and local folders are the same
|
70
|
+
if workflow_dir_remote is None:
|
71
|
+
workflow_dir_remote = workflow_dir_local
|
72
|
+
|
51
73
|
# Initialize local dataset attributes
|
52
74
|
zarr_dir = dataset.zarr_dir
|
53
75
|
tmp_images = deepcopy(dataset.images)
|
54
|
-
current_dataset_type_filters =
|
76
|
+
current_dataset_type_filters = copy(job_type_filters)
|
55
77
|
|
56
78
|
for wftask in wf_task_list:
|
57
79
|
task = wftask.task
|
@@ -60,91 +82,143 @@ def execute_tasks_v2(
|
|
60
82
|
|
61
83
|
# PRE TASK EXECUTION
|
62
84
|
|
63
|
-
#
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
85
|
+
# Filter images by types and attributes (in two steps)
|
86
|
+
if wftask.task_type in ["compound", "parallel", "non_parallel"]:
|
87
|
+
# Non-converter task
|
88
|
+
type_filters = copy(current_dataset_type_filters)
|
89
|
+
type_filters_patch = merge_type_filters(
|
90
|
+
task_input_types=task.input_types,
|
91
|
+
wftask_type_filters=wftask.type_filters,
|
92
|
+
)
|
93
|
+
type_filters.update(type_filters_patch)
|
94
|
+
type_filtered_images = filter_image_list(
|
95
|
+
images=tmp_images,
|
96
|
+
type_filters=type_filters,
|
97
|
+
attribute_filters=None,
|
98
|
+
)
|
99
|
+
num_available_images = len(type_filtered_images)
|
100
|
+
filtered_images = filter_image_list(
|
101
|
+
images=type_filtered_images,
|
102
|
+
type_filters=None,
|
103
|
+
attribute_filters=job_attribute_filters,
|
104
|
+
)
|
105
|
+
else:
|
106
|
+
# Converter task
|
107
|
+
filtered_images = []
|
108
|
+
num_available_images = 0
|
75
109
|
|
76
|
-
# First, set status SUBMITTED in dataset.history for each wftask
|
77
110
|
with next(get_sync_db()) as db:
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
111
|
+
# Create dumps for workflowtask and taskgroup
|
112
|
+
workflowtask_dump = dict(
|
113
|
+
**wftask.model_dump(exclude={"task"}),
|
114
|
+
task=TaskDumpV2(**wftask.task.model_dump()).model_dump(),
|
115
|
+
)
|
116
|
+
task_group = db.get(TaskGroupV2, wftask.task.taskgroupv2_id)
|
117
|
+
task_group_dump = TaskGroupDumpV2(
|
118
|
+
**task_group.model_dump()
|
86
119
|
).model_dump()
|
87
|
-
|
88
|
-
|
89
|
-
|
120
|
+
# Create HistoryRun
|
121
|
+
history_run = HistoryRun(
|
122
|
+
dataset_id=dataset.id,
|
123
|
+
workflowtask_id=wftask.id,
|
124
|
+
job_id=job_id,
|
125
|
+
workflowtask_dump=workflowtask_dump,
|
126
|
+
task_group_dump=task_group_dump,
|
127
|
+
num_available_images=num_available_images,
|
128
|
+
status=HistoryUnitStatus.SUBMITTED,
|
129
|
+
)
|
130
|
+
db.add(history_run)
|
90
131
|
db.commit()
|
132
|
+
db.refresh(history_run)
|
133
|
+
history_run_id = history_run.id
|
134
|
+
|
91
135
|
# TASK EXECUTION (V2)
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
136
|
+
try:
|
137
|
+
if task.type in ["non_parallel", "converter_non_parallel"]:
|
138
|
+
outcomes_dict, num_tasks = run_v2_task_non_parallel(
|
139
|
+
images=filtered_images,
|
140
|
+
zarr_dir=zarr_dir,
|
141
|
+
wftask=wftask,
|
142
|
+
task=task,
|
143
|
+
workflow_dir_local=workflow_dir_local,
|
144
|
+
workflow_dir_remote=workflow_dir_remote,
|
145
|
+
runner=runner,
|
146
|
+
get_runner_config=get_runner_config,
|
147
|
+
history_run_id=history_run_id,
|
148
|
+
dataset_id=dataset.id,
|
149
|
+
user_id=user_id,
|
150
|
+
task_type=task.type,
|
151
|
+
)
|
152
|
+
elif task.type == "parallel":
|
153
|
+
outcomes_dict, num_tasks = run_v2_task_parallel(
|
154
|
+
images=filtered_images,
|
155
|
+
wftask=wftask,
|
156
|
+
task=task,
|
157
|
+
workflow_dir_local=workflow_dir_local,
|
158
|
+
workflow_dir_remote=workflow_dir_remote,
|
159
|
+
runner=runner,
|
160
|
+
get_runner_config=get_runner_config,
|
161
|
+
history_run_id=history_run_id,
|
162
|
+
dataset_id=dataset.id,
|
163
|
+
user_id=user_id,
|
164
|
+
)
|
165
|
+
elif task.type in ["compound", "converter_compound"]:
|
166
|
+
outcomes_dict, num_tasks = run_v2_task_compound(
|
167
|
+
images=filtered_images,
|
168
|
+
zarr_dir=zarr_dir,
|
169
|
+
wftask=wftask,
|
170
|
+
task=task,
|
171
|
+
workflow_dir_local=workflow_dir_local,
|
172
|
+
workflow_dir_remote=workflow_dir_remote,
|
173
|
+
runner=runner,
|
174
|
+
get_runner_config=get_runner_config,
|
175
|
+
history_run_id=history_run_id,
|
176
|
+
dataset_id=dataset.id,
|
177
|
+
task_type=task.type,
|
178
|
+
user_id=user_id,
|
179
|
+
)
|
180
|
+
else:
|
181
|
+
raise ValueError(f"Unexpected error: Invalid {task.type=}.")
|
182
|
+
except Exception as e:
|
183
|
+
outcomes_dict = {
|
184
|
+
0: SubmissionOutcome(
|
185
|
+
result=None,
|
186
|
+
exception=e,
|
187
|
+
)
|
188
|
+
}
|
189
|
+
num_tasks = 0
|
126
190
|
|
127
191
|
# POST TASK EXECUTION
|
128
192
|
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
current_task_output
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
image_list_updates
|
141
|
-
|
142
|
-
|
143
|
-
|
193
|
+
non_failed_task_outputs = [
|
194
|
+
value.task_output
|
195
|
+
for value in outcomes_dict.values()
|
196
|
+
if value.task_output is not None
|
197
|
+
]
|
198
|
+
if len(non_failed_task_outputs) > 0:
|
199
|
+
current_task_output = merge_outputs(non_failed_task_outputs)
|
200
|
+
# If `current_task_output` includes no images (to be created or
|
201
|
+
# removed), then flag all the input images as modified.
|
202
|
+
# See fractal-server issues #1374 and #2409.
|
203
|
+
if (
|
204
|
+
current_task_output.image_list_updates == []
|
205
|
+
and current_task_output.image_list_removals == []
|
206
|
+
):
|
207
|
+
current_task_output = TaskOutput(
|
208
|
+
image_list_updates=[
|
209
|
+
dict(zarr_url=img["zarr_url"])
|
210
|
+
for img in filtered_images
|
211
|
+
],
|
212
|
+
)
|
213
|
+
else:
|
214
|
+
current_task_output = TaskOutput()
|
144
215
|
|
145
216
|
# Update image list
|
146
217
|
num_new_images = 0
|
147
218
|
current_task_output.check_zarr_urls_are_unique()
|
219
|
+
# NOTE: In principle we could make the task-output processing more
|
220
|
+
# granular, and also associate output-processing failures to history
|
221
|
+
# status.
|
148
222
|
for image_obj in current_task_output.image_list_updates:
|
149
223
|
image = image_obj.model_dump()
|
150
224
|
# Edit existing image
|
@@ -264,23 +338,26 @@ def execute_tasks_v2(
|
|
264
338
|
type_filters_from_task_manifest = task.output_types
|
265
339
|
current_dataset_type_filters.update(type_filters_from_task_manifest)
|
266
340
|
|
267
|
-
# Write current dataset attributes (history, images, filters) into the
|
268
|
-
# database. They can be used (1) to retrieve the latest state
|
269
|
-
# when the job fails, (2) from within endpoints that need up-to-date
|
270
|
-
# information
|
271
341
|
with next(get_sync_db()) as db:
|
342
|
+
# Write current dataset images into the database.
|
272
343
|
db_dataset = db.get(DatasetV2, dataset.id)
|
273
|
-
db_dataset.history[-1]["status"] = WorkflowTaskStatusTypeV2.DONE
|
274
|
-
db_dataset.type_filters = current_dataset_type_filters
|
275
344
|
db_dataset.images = tmp_images
|
276
|
-
|
277
|
-
"type_filters",
|
278
|
-
"history",
|
279
|
-
"images",
|
280
|
-
]:
|
281
|
-
flag_modified(db_dataset, attribute_name)
|
345
|
+
flag_modified(db_dataset, "images")
|
282
346
|
db.merge(db_dataset)
|
347
|
+
|
348
|
+
db.execute(
|
349
|
+
delete(HistoryImageCache)
|
350
|
+
.where(HistoryImageCache.dataset_id == dataset.id)
|
351
|
+
.where(HistoryImageCache.workflowtask_id == wftask.id)
|
352
|
+
.where(
|
353
|
+
HistoryImageCache.zarr_url.in_(
|
354
|
+
current_task_output.image_list_removals
|
355
|
+
)
|
356
|
+
)
|
357
|
+
)
|
358
|
+
|
283
359
|
db.commit()
|
360
|
+
db.close() # NOTE: this is needed, but the reason is unclear
|
284
361
|
|
285
362
|
# Create accounting record
|
286
363
|
record = AccountingRecord(
|
@@ -291,4 +368,38 @@ def execute_tasks_v2(
|
|
291
368
|
db.add(record)
|
292
369
|
db.commit()
|
293
370
|
|
294
|
-
|
371
|
+
# Update `HistoryRun` entry, and raise an error if task failed
|
372
|
+
try:
|
373
|
+
first_exception = next(
|
374
|
+
value.exception
|
375
|
+
for value in outcomes_dict.values()
|
376
|
+
if value.exception is not None
|
377
|
+
)
|
378
|
+
# An exception was found
|
379
|
+
update_status_of_history_run(
|
380
|
+
history_run_id=history_run_id,
|
381
|
+
status=HistoryUnitStatus.FAILED,
|
382
|
+
db_sync=db,
|
383
|
+
)
|
384
|
+
logger.error(
|
385
|
+
f'END {wftask.order}-th task (name="{task_name}") - '
|
386
|
+
"ERROR."
|
387
|
+
)
|
388
|
+
# Raise first error
|
389
|
+
raise JobExecutionError(
|
390
|
+
info=(
|
391
|
+
f"An error occurred.\n"
|
392
|
+
f"Original error:\n{first_exception}"
|
393
|
+
)
|
394
|
+
)
|
395
|
+
except StopIteration:
|
396
|
+
# No exception was found
|
397
|
+
update_status_of_history_run(
|
398
|
+
history_run_id=history_run_id,
|
399
|
+
status=HistoryUnitStatus.DONE,
|
400
|
+
db_sync=db,
|
401
|
+
)
|
402
|
+
db.commit()
|
403
|
+
logger.debug(
|
404
|
+
f'END {wftask.order}-th task (name="{task_name}")'
|
405
|
+
)
|