fractal-server 2.14.0a3__py3-none-any.whl → 2.14.0a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/__main__.py +3 -1
- fractal_server/app/history/__init__.py +4 -4
- fractal_server/app/history/image_updates.py +124 -143
- fractal_server/app/history/status_enum.py +2 -2
- fractal_server/app/models/v2/__init__.py +6 -4
- fractal_server/app/models/v2/history.py +44 -20
- fractal_server/app/routes/api/__init__.py +1 -1
- fractal_server/app/routes/api/v2/__init__.py +4 -0
- fractal_server/app/routes/api/v2/_aux_functions_history.py +49 -0
- fractal_server/app/routes/api/v2/dataset.py +0 -12
- fractal_server/app/routes/api/v2/history.py +301 -186
- fractal_server/app/routes/api/v2/project.py +0 -25
- fractal_server/app/routes/api/v2/status_legacy.py +168 -0
- fractal_server/app/routes/api/v2/workflow.py +2 -17
- fractal_server/app/routes/api/v2/workflowtask.py +41 -71
- fractal_server/app/routes/auth/oauth.py +5 -3
- fractal_server/app/runner/executors/local/runner.py +10 -55
- fractal_server/app/runner/executors/slurm_sudo/runner.py +171 -108
- fractal_server/app/runner/v2/__init__.py +0 -20
- fractal_server/app/runner/v2/runner.py +45 -58
- fractal_server/app/runner/v2/runner_functions.py +164 -22
- fractal_server/app/schemas/_validators.py +13 -24
- fractal_server/app/schemas/user.py +10 -7
- fractal_server/app/schemas/user_settings.py +9 -21
- fractal_server/app/schemas/v2/dataset.py +8 -6
- fractal_server/app/schemas/v2/job.py +9 -5
- fractal_server/app/schemas/v2/manifest.py +2 -6
- fractal_server/app/schemas/v2/project.py +9 -7
- fractal_server/app/schemas/v2/task.py +41 -77
- fractal_server/app/schemas/v2/task_collection.py +14 -32
- fractal_server/app/schemas/v2/task_group.py +10 -9
- fractal_server/app/schemas/v2/workflow.py +10 -11
- fractal_server/app/security/signup_email.py +2 -2
- fractal_server/config.py +31 -32
- fractal_server/migrations/versions/fbce16ff4e47_new_history_items.py +120 -0
- fractal_server/tasks/v2/templates/2_pip_install.sh +1 -1
- fractal_server/tasks/v2/utils_templates.py +6 -0
- {fractal_server-2.14.0a3.dist-info → fractal_server-2.14.0a4.dist-info}/METADATA +1 -1
- {fractal_server-2.14.0a3.dist-info → fractal_server-2.14.0a4.dist-info}/RECORD +43 -44
- fractal_server/app/runner/executors/slurm_sudo/_executor_wait_thread.py +0 -130
- fractal_server/app/schemas/v2/history.py +0 -23
- fractal_server/migrations/versions/87cd72a537a2_add_historyitem_table.py +0 -68
- fractal_server/migrations/versions/954ddc64425a_image_status.py +0 -63
- {fractal_server-2.14.0a3.dist-info → fractal_server-2.14.0a4.dist-info}/LICENSE +0 -0
- {fractal_server-2.14.0a3.dist-info → fractal_server-2.14.0a4.dist-info}/WHEEL +0 -0
- {fractal_server-2.14.0a3.dist-info → fractal_server-2.14.0a4.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,3 @@
|
|
1
|
-
import json
|
2
1
|
import logging
|
3
2
|
from copy import copy
|
4
3
|
from copy import deepcopy
|
@@ -7,6 +6,7 @@ from typing import Callable
|
|
7
6
|
from typing import Optional
|
8
7
|
|
9
8
|
from sqlalchemy.orm.attributes import flag_modified
|
9
|
+
from sqlmodel import update
|
10
10
|
|
11
11
|
from ....images import SingleImage
|
12
12
|
from ....images.tools import filter_image_list
|
@@ -18,11 +18,10 @@ from .runner_functions import run_v2_task_non_parallel
|
|
18
18
|
from .runner_functions import run_v2_task_parallel
|
19
19
|
from .task_interface import TaskOutput
|
20
20
|
from fractal_server.app.db import get_sync_db
|
21
|
-
from fractal_server.app.history.status_enum import
|
21
|
+
from fractal_server.app.history.status_enum import XXXStatus
|
22
22
|
from fractal_server.app.models.v2 import AccountingRecord
|
23
23
|
from fractal_server.app.models.v2 import DatasetV2
|
24
|
-
from fractal_server.app.models.v2 import
|
25
|
-
from fractal_server.app.models.v2 import ImageStatus
|
24
|
+
from fractal_server.app.models.v2 import HistoryRun
|
26
25
|
from fractal_server.app.models.v2 import TaskGroupV2
|
27
26
|
from fractal_server.app.models.v2 import WorkflowTaskV2
|
28
27
|
from fractal_server.app.runner.executors.base_runner import BaseRunner
|
@@ -87,6 +86,7 @@ def execute_tasks_v2(
|
|
87
86
|
**wftask.model_dump(exclude={"task"}),
|
88
87
|
task=wftask.task.model_dump(),
|
89
88
|
)
|
89
|
+
|
90
90
|
# Exclude timestamps since they'd need to be serialized properly
|
91
91
|
task_group = db.get(TaskGroupV2, wftask.task.taskgroupv2_id)
|
92
92
|
task_group_dump = task_group.model_dump(
|
@@ -95,44 +95,18 @@ def execute_tasks_v2(
|
|
95
95
|
"timestamp_last_used",
|
96
96
|
}
|
97
97
|
)
|
98
|
-
|
99
|
-
hash(
|
100
|
-
json.dumps(
|
101
|
-
[workflowtask_dump, task_group_dump],
|
102
|
-
sort_keys=True,
|
103
|
-
indent=None,
|
104
|
-
).encode("utf-8")
|
105
|
-
)
|
106
|
-
)
|
107
|
-
images = {
|
108
|
-
image["zarr_url"]: HistoryItemImageStatus.SUBMITTED
|
109
|
-
for image in filtered_images
|
110
|
-
}
|
111
|
-
history_item = HistoryItemV2(
|
98
|
+
history_run = HistoryRun(
|
112
99
|
dataset_id=dataset.id,
|
113
100
|
workflowtask_id=wftask.id,
|
114
101
|
workflowtask_dump=workflowtask_dump,
|
115
102
|
task_group_dump=task_group_dump,
|
116
|
-
parameters_hash=parameters_hash,
|
117
103
|
num_available_images=len(type_filtered_images),
|
118
|
-
|
119
|
-
images=images,
|
104
|
+
status=XXXStatus.SUBMITTED,
|
120
105
|
)
|
121
|
-
db.add(
|
122
|
-
for image in filtered_images:
|
123
|
-
db.merge(
|
124
|
-
ImageStatus(
|
125
|
-
zarr_url=image["zarr_url"],
|
126
|
-
workflowtask_id=wftask.id,
|
127
|
-
dataset_id=dataset.id,
|
128
|
-
parameters_hash=parameters_hash,
|
129
|
-
status=HistoryItemImageStatus.SUBMITTED,
|
130
|
-
logfile=None,
|
131
|
-
)
|
132
|
-
)
|
106
|
+
db.add(history_run)
|
133
107
|
db.commit()
|
134
|
-
db.refresh(
|
135
|
-
|
108
|
+
db.refresh(history_run)
|
109
|
+
history_run_id = history_run.id
|
136
110
|
|
137
111
|
# TASK EXECUTION (V2)
|
138
112
|
if task.type == "non_parallel":
|
@@ -149,7 +123,8 @@ def execute_tasks_v2(
|
|
149
123
|
workflow_dir_remote=workflow_dir_remote,
|
150
124
|
executor=runner,
|
151
125
|
submit_setup_call=submit_setup_call,
|
152
|
-
|
126
|
+
history_run_id=history_run_id,
|
127
|
+
dataset_id=dataset.id,
|
153
128
|
)
|
154
129
|
elif task.type == "parallel":
|
155
130
|
current_task_output, num_tasks, exceptions = run_v2_task_parallel(
|
@@ -160,7 +135,8 @@ def execute_tasks_v2(
|
|
160
135
|
workflow_dir_remote=workflow_dir_remote,
|
161
136
|
executor=runner,
|
162
137
|
submit_setup_call=submit_setup_call,
|
163
|
-
|
138
|
+
history_run_id=history_run_id,
|
139
|
+
dataset_id=dataset.id,
|
164
140
|
)
|
165
141
|
elif task.type == "compound":
|
166
142
|
current_task_output, num_tasks, exceptions = run_v2_task_compound(
|
@@ -172,7 +148,8 @@ def execute_tasks_v2(
|
|
172
148
|
workflow_dir_remote=workflow_dir_remote,
|
173
149
|
executor=runner,
|
174
150
|
submit_setup_call=submit_setup_call,
|
175
|
-
|
151
|
+
history_run_id=history_run_id,
|
152
|
+
dataset_id=dataset.id,
|
176
153
|
)
|
177
154
|
else:
|
178
155
|
raise ValueError(f"Unexpected error: Invalid {task.type=}.")
|
@@ -320,22 +297,17 @@ def execute_tasks_v2(
|
|
320
297
|
type_filters_from_task_manifest = task.output_types
|
321
298
|
current_dataset_type_filters.update(type_filters_from_task_manifest)
|
322
299
|
|
323
|
-
# Write current dataset attributes (history, images, filters) into the
|
324
|
-
# database. They can be used (1) to retrieve the latest state
|
325
|
-
# when the job fails, (2) from within endpoints that need up-to-date
|
326
|
-
# information
|
327
300
|
with next(get_sync_db()) as db:
|
301
|
+
# Write current dataset attributes (history + filters) into the
|
302
|
+
# database.
|
328
303
|
db_dataset = db.get(DatasetV2, dataset.id)
|
329
304
|
db_dataset.type_filters = current_dataset_type_filters
|
330
305
|
db_dataset.images = tmp_images
|
331
|
-
for attribute_name in [
|
332
|
-
"type_filters",
|
333
|
-
"history",
|
334
|
-
"images",
|
335
|
-
]:
|
306
|
+
for attribute_name in ["type_filters", "images"]:
|
336
307
|
flag_modified(db_dataset, attribute_name)
|
337
308
|
db.merge(db_dataset)
|
338
309
|
db.commit()
|
310
|
+
db.close() # FIXME: why is this needed?
|
339
311
|
|
340
312
|
# Create accounting record
|
341
313
|
record = AccountingRecord(
|
@@ -346,15 +318,30 @@ def execute_tasks_v2(
|
|
346
318
|
db.add(record)
|
347
319
|
db.commit()
|
348
320
|
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
321
|
+
# Update History tables, and raise an error if task failed
|
322
|
+
if exceptions == {}:
|
323
|
+
db.execute(
|
324
|
+
update(HistoryRun)
|
325
|
+
.where(HistoryRun.id == history_run_id)
|
326
|
+
.values(status=XXXStatus.DONE)
|
327
|
+
)
|
328
|
+
db.commit()
|
329
|
+
else:
|
330
|
+
db.execute(
|
331
|
+
update(HistoryRun)
|
332
|
+
.where(HistoryRun.id == history_run_id)
|
333
|
+
.values(status=XXXStatus.FAILED)
|
334
|
+
)
|
335
|
+
db.commit()
|
336
|
+
logger.error(
|
337
|
+
f'END {wftask.order}-th task (name="{task_name}") - '
|
338
|
+
"ERROR."
|
339
|
+
)
|
340
|
+
# Raise first error
|
341
|
+
for key, value in exceptions.items():
|
342
|
+
raise JobExecutionError(
|
343
|
+
info=(f"An error occurred.\nOriginal error:\n{value}")
|
344
|
+
)
|
345
|
+
logger.debug(
|
346
|
+
f'END {wftask.order}-th task (name="{task_name}")'
|
358
347
|
)
|
359
|
-
|
360
|
-
logger.debug(f'END {wftask.order}-th task (name="{task_name}")')
|
@@ -6,6 +6,7 @@ from typing import Literal
|
|
6
6
|
from typing import Optional
|
7
7
|
|
8
8
|
from pydantic import ValidationError
|
9
|
+
from sqlmodel import update
|
9
10
|
|
10
11
|
from ..exceptions import JobExecutionError
|
11
12
|
from .deduplicate_list import deduplicate_list
|
@@ -13,6 +14,10 @@ from .merge_outputs import merge_outputs
|
|
13
14
|
from .runner_functions_low_level import run_single_task
|
14
15
|
from .task_interface import InitTaskOutput
|
15
16
|
from .task_interface import TaskOutput
|
17
|
+
from fractal_server.app.db import get_sync_db
|
18
|
+
from fractal_server.app.history.status_enum import XXXStatus
|
19
|
+
from fractal_server.app.models.v2 import HistoryImageCache
|
20
|
+
from fractal_server.app.models.v2 import HistoryUnit
|
16
21
|
from fractal_server.app.models.v2 import TaskV2
|
17
22
|
from fractal_server.app.models.v2 import WorkflowTaskV2
|
18
23
|
from fractal_server.app.runner.components import _COMPONENT_KEY_
|
@@ -88,7 +93,8 @@ def run_v2_task_non_parallel(
|
|
88
93
|
workflow_dir_remote: Optional[Path] = None,
|
89
94
|
executor: BaseRunner,
|
90
95
|
submit_setup_call: callable = no_op_submit_setup_call,
|
91
|
-
|
96
|
+
dataset_id: int,
|
97
|
+
history_run_id: int,
|
92
98
|
) -> tuple[TaskOutput, int, dict[int, BaseException]]:
|
93
99
|
"""
|
94
100
|
This runs server-side (see `executor` argument)
|
@@ -115,6 +121,29 @@ def run_v2_task_non_parallel(
|
|
115
121
|
)
|
116
122
|
function_kwargs[_COMPONENT_KEY_] = _index_to_component(0)
|
117
123
|
|
124
|
+
# Database History operations
|
125
|
+
with next(get_sync_db()) as db:
|
126
|
+
history_unit = HistoryUnit(
|
127
|
+
history_run_id=history_run_id,
|
128
|
+
status=XXXStatus.SUBMITTED,
|
129
|
+
logfile=None, # FIXME
|
130
|
+
zarr_urls=function_kwargs["zarr_urls"],
|
131
|
+
)
|
132
|
+
db.add(history_unit)
|
133
|
+
db.commit()
|
134
|
+
db.refresh(history_unit)
|
135
|
+
history_unit_id = history_unit.id
|
136
|
+
for zarr_url in function_kwargs["zarr_urls"]:
|
137
|
+
db.merge(
|
138
|
+
HistoryImageCache(
|
139
|
+
workflowtask_id=wftask.id,
|
140
|
+
dataset_id=dataset_id,
|
141
|
+
zarr_url=zarr_url,
|
142
|
+
latest_history_unit_id=history_unit_id,
|
143
|
+
)
|
144
|
+
)
|
145
|
+
db.commit()
|
146
|
+
|
118
147
|
result, exception = executor.submit(
|
119
148
|
functools.partial(
|
120
149
|
run_single_task,
|
@@ -124,18 +153,30 @@ def run_v2_task_non_parallel(
|
|
124
153
|
root_dir_remote=workflow_dir_remote,
|
125
154
|
),
|
126
155
|
parameters=function_kwargs,
|
127
|
-
history_item_id=history_item_id,
|
128
156
|
**executor_options,
|
129
157
|
)
|
130
158
|
|
131
159
|
num_tasks = 1
|
132
|
-
|
133
|
-
if
|
134
|
-
|
160
|
+
with next(get_sync_db()) as db:
|
161
|
+
if exception is None:
|
162
|
+
db.execute(
|
163
|
+
update(HistoryUnit)
|
164
|
+
.where(HistoryUnit.id == history_unit_id)
|
165
|
+
.values(status=XXXStatus.DONE)
|
166
|
+
)
|
167
|
+
db.commit()
|
168
|
+
if result is None:
|
169
|
+
return (TaskOutput(), num_tasks, {})
|
170
|
+
else:
|
171
|
+
return (_cast_and_validate_TaskOutput(result), num_tasks, {})
|
135
172
|
else:
|
136
|
-
|
137
|
-
|
138
|
-
|
173
|
+
db.execute(
|
174
|
+
update(HistoryUnit)
|
175
|
+
.where(HistoryUnit.id == history_unit_id)
|
176
|
+
.values(status=XXXStatus.FAILED)
|
177
|
+
)
|
178
|
+
db.commit()
|
179
|
+
return (TaskOutput(), num_tasks, {0: exception})
|
139
180
|
|
140
181
|
|
141
182
|
def run_v2_task_parallel(
|
@@ -147,10 +188,12 @@ def run_v2_task_parallel(
|
|
147
188
|
workflow_dir_local: Path,
|
148
189
|
workflow_dir_remote: Optional[Path] = None,
|
149
190
|
submit_setup_call: callable = no_op_submit_setup_call,
|
150
|
-
|
191
|
+
dataset_id: int,
|
192
|
+
history_run_id: int,
|
151
193
|
) -> tuple[TaskOutput, int, dict[int, BaseException]]:
|
152
194
|
|
153
195
|
if len(images) == 0:
|
196
|
+
# FIXME: Do something with history units/images?
|
154
197
|
return (TaskOutput(), 0, {})
|
155
198
|
|
156
199
|
_check_parallelization_list_size(images)
|
@@ -163,14 +206,38 @@ def run_v2_task_parallel(
|
|
163
206
|
)
|
164
207
|
|
165
208
|
list_function_kwargs = []
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
209
|
+
history_unit_ids = []
|
210
|
+
with next(get_sync_db()) as db:
|
211
|
+
for ind, image in enumerate(images):
|
212
|
+
list_function_kwargs.append(
|
213
|
+
dict(
|
214
|
+
zarr_url=image["zarr_url"],
|
215
|
+
**(wftask.args_parallel or {}),
|
216
|
+
),
|
217
|
+
)
|
218
|
+
list_function_kwargs[-1][_COMPONENT_KEY_] = _index_to_component(
|
219
|
+
ind
|
220
|
+
)
|
221
|
+
history_unit = HistoryUnit(
|
222
|
+
history_run_id=history_run_id,
|
223
|
+
status=XXXStatus.SUBMITTED,
|
224
|
+
logfile=None, # FIXME
|
225
|
+
zarr_urls=[image["zarr_url"]],
|
226
|
+
)
|
227
|
+
# FIXME: this should be a bulk operation
|
228
|
+
db.add(history_unit)
|
229
|
+
db.commit()
|
230
|
+
db.refresh(history_unit)
|
231
|
+
db.merge(
|
232
|
+
HistoryImageCache(
|
233
|
+
workflowtask_id=wftask.id,
|
234
|
+
dataset_id=dataset_id,
|
235
|
+
zarr_url=image["zarr_url"],
|
236
|
+
latest_history_unit_id=history_unit.id,
|
237
|
+
)
|
238
|
+
)
|
239
|
+
db.commit()
|
240
|
+
history_unit_ids.append(history_unit.id)
|
174
241
|
|
175
242
|
results, exceptions = executor.multisubmit(
|
176
243
|
functools.partial(
|
@@ -181,11 +248,12 @@ def run_v2_task_parallel(
|
|
181
248
|
root_dir_remote=workflow_dir_remote,
|
182
249
|
),
|
183
250
|
list_parameters=list_function_kwargs,
|
184
|
-
history_item_id=history_item_id,
|
185
251
|
**executor_options,
|
186
252
|
)
|
187
253
|
|
188
254
|
outputs = []
|
255
|
+
history_unit_ids_done: list[int] = []
|
256
|
+
history_unit_ids_failed: list[int] = []
|
189
257
|
for ind in range(len(list_function_kwargs)):
|
190
258
|
if ind in results.keys():
|
191
259
|
result = results[ind]
|
@@ -194,11 +262,26 @@ def run_v2_task_parallel(
|
|
194
262
|
else:
|
195
263
|
output = _cast_and_validate_TaskOutput(result)
|
196
264
|
outputs.append(output)
|
265
|
+
history_unit_ids_done.append(history_unit_ids[ind])
|
197
266
|
elif ind in exceptions.keys():
|
198
267
|
print(f"Bad: {exceptions[ind]}")
|
268
|
+
history_unit_ids_failed.append(history_unit_ids[ind])
|
199
269
|
else:
|
200
270
|
print("VERY BAD - should have not reached this point")
|
201
271
|
|
272
|
+
with next(get_sync_db()) as db:
|
273
|
+
db.execute(
|
274
|
+
update(HistoryUnit)
|
275
|
+
.where(HistoryUnit.id.in_(history_unit_ids_done))
|
276
|
+
.values(status=XXXStatus.DONE)
|
277
|
+
)
|
278
|
+
db.execute(
|
279
|
+
update(HistoryUnit)
|
280
|
+
.where(HistoryUnit.id.in_(history_unit_ids_failed))
|
281
|
+
.values(status=XXXStatus.FAILED)
|
282
|
+
)
|
283
|
+
db.commit()
|
284
|
+
|
202
285
|
num_tasks = len(images)
|
203
286
|
merged_output = merge_outputs(outputs)
|
204
287
|
return (merged_output, num_tasks, exceptions)
|
@@ -214,7 +297,8 @@ def run_v2_task_compound(
|
|
214
297
|
workflow_dir_local: Path,
|
215
298
|
workflow_dir_remote: Optional[Path] = None,
|
216
299
|
submit_setup_call: callable = no_op_submit_setup_call,
|
217
|
-
|
300
|
+
dataset_id: int,
|
301
|
+
history_run_id: int,
|
218
302
|
) -> tuple[TaskOutput, int, dict[int, BaseException]]:
|
219
303
|
|
220
304
|
executor_options_init = submit_setup_call(
|
@@ -237,6 +321,33 @@ def run_v2_task_compound(
|
|
237
321
|
**(wftask.args_non_parallel or {}),
|
238
322
|
)
|
239
323
|
function_kwargs[_COMPONENT_KEY_] = f"init_{_index_to_component(0)}"
|
324
|
+
|
325
|
+
# Create database History entries
|
326
|
+
input_image_zarr_urls = function_kwargs["zarr_urls"]
|
327
|
+
with next(get_sync_db()) as db:
|
328
|
+
# Create a single `HistoryUnit` for the whole compound task
|
329
|
+
history_unit = HistoryUnit(
|
330
|
+
history_run_id=history_run_id,
|
331
|
+
status=XXXStatus.SUBMITTED,
|
332
|
+
logfile=None, # FIXME
|
333
|
+
zarr_urls=input_image_zarr_urls,
|
334
|
+
)
|
335
|
+
db.add(history_unit)
|
336
|
+
db.commit()
|
337
|
+
db.refresh(history_unit)
|
338
|
+
history_unit_id = history_unit.id
|
339
|
+
# Create one `HistoryImageCache` for each input image
|
340
|
+
for zarr_url in input_image_zarr_urls:
|
341
|
+
db.merge(
|
342
|
+
HistoryImageCache(
|
343
|
+
workflowtask_id=wftask.id,
|
344
|
+
dataset_id=dataset_id,
|
345
|
+
zarr_url=zarr_url,
|
346
|
+
latest_history_unit_id=history_unit_id,
|
347
|
+
)
|
348
|
+
)
|
349
|
+
db.commit()
|
350
|
+
|
240
351
|
result, exception = executor.submit(
|
241
352
|
functools.partial(
|
242
353
|
run_single_task,
|
@@ -246,8 +357,6 @@ def run_v2_task_compound(
|
|
246
357
|
root_dir_remote=workflow_dir_remote,
|
247
358
|
),
|
248
359
|
parameters=function_kwargs,
|
249
|
-
history_item_id=history_item_id,
|
250
|
-
in_compound_task=True,
|
251
360
|
**executor_options_init,
|
252
361
|
)
|
253
362
|
|
@@ -258,6 +367,13 @@ def run_v2_task_compound(
|
|
258
367
|
else:
|
259
368
|
init_task_output = _cast_and_validate_InitTaskOutput(result)
|
260
369
|
else:
|
370
|
+
with next(get_sync_db()) as db:
|
371
|
+
db.execute(
|
372
|
+
update(HistoryUnit)
|
373
|
+
.where(HistoryUnit.id == history_unit_id)
|
374
|
+
.values(status=XXXStatus.FAILED)
|
375
|
+
)
|
376
|
+
db.commit()
|
261
377
|
return (TaskOutput(), num_tasks, {0: exception})
|
262
378
|
|
263
379
|
parallelization_list = init_task_output.parallelization_list
|
@@ -269,6 +385,13 @@ def run_v2_task_compound(
|
|
269
385
|
_check_parallelization_list_size(parallelization_list)
|
270
386
|
|
271
387
|
if len(parallelization_list) == 0:
|
388
|
+
with next(get_sync_db()) as db:
|
389
|
+
db.execute(
|
390
|
+
update(HistoryUnit)
|
391
|
+
.where(HistoryUnit.id == history_unit_id)
|
392
|
+
.values(status=XXXStatus.DONE)
|
393
|
+
)
|
394
|
+
db.commit()
|
272
395
|
return (TaskOutput(), 0, {})
|
273
396
|
|
274
397
|
list_function_kwargs = []
|
@@ -293,12 +416,12 @@ def run_v2_task_compound(
|
|
293
416
|
root_dir_remote=workflow_dir_remote,
|
294
417
|
),
|
295
418
|
list_parameters=list_function_kwargs,
|
296
|
-
history_item_id=history_item_id,
|
297
419
|
in_compound_task=True,
|
298
420
|
**executor_options_compute,
|
299
421
|
)
|
300
422
|
|
301
423
|
outputs = []
|
424
|
+
failure = False
|
302
425
|
for ind in range(len(list_function_kwargs)):
|
303
426
|
if ind in results.keys():
|
304
427
|
result = results[ind]
|
@@ -307,8 +430,27 @@ def run_v2_task_compound(
|
|
307
430
|
else:
|
308
431
|
output = _cast_and_validate_TaskOutput(result)
|
309
432
|
outputs.append(output)
|
433
|
+
|
310
434
|
elif ind in exceptions.keys():
|
311
435
|
print(f"Bad: {exceptions[ind]}")
|
436
|
+
failure = True
|
437
|
+
else:
|
438
|
+
print("VERY BAD - should have not reached this point")
|
439
|
+
|
440
|
+
with next(get_sync_db()) as db:
|
441
|
+
if failure:
|
442
|
+
db.execute(
|
443
|
+
update(HistoryUnit)
|
444
|
+
.where(HistoryUnit.id == history_unit_id)
|
445
|
+
.values(status=XXXStatus.FAILED)
|
446
|
+
)
|
447
|
+
else:
|
448
|
+
db.execute(
|
449
|
+
update(HistoryUnit)
|
450
|
+
.where(HistoryUnit.id == history_unit_id)
|
451
|
+
.values(status=XXXStatus.DONE)
|
452
|
+
)
|
453
|
+
db.commit()
|
312
454
|
|
313
455
|
merged_output = merge_outputs(outputs)
|
314
456
|
return (merged_output, num_tasks, exceptions)
|
@@ -1,43 +1,32 @@
|
|
1
1
|
import os
|
2
|
+
from typing import Annotated
|
2
3
|
from typing import Any
|
3
4
|
from typing import Optional
|
4
5
|
|
6
|
+
from pydantic.types import StringConstraints
|
5
7
|
|
6
|
-
def valstr(attribute: str, accept_none: bool = False):
|
7
|
-
"""
|
8
|
-
Check that a string attribute is not an empty string, and remove the
|
9
|
-
leading and trailing whitespace characters.
|
10
8
|
|
11
|
-
|
12
|
-
|
9
|
+
def cant_set_none(value: Any) -> Any:
|
10
|
+
if value is None:
|
11
|
+
raise ValueError("Field cannot be set to 'None'.")
|
12
|
+
return value
|
13
13
|
|
14
|
-
def val(cls, string: Optional[str]) -> Optional[str]:
|
15
|
-
if string is None:
|
16
|
-
if accept_none:
|
17
|
-
return string
|
18
|
-
else:
|
19
|
-
raise ValueError(
|
20
|
-
f"String attribute '{attribute}' cannot be None"
|
21
|
-
)
|
22
|
-
s = string.strip()
|
23
|
-
if not s:
|
24
|
-
raise ValueError(f"String attribute '{attribute}' cannot be empty")
|
25
|
-
return s
|
26
14
|
|
27
|
-
|
15
|
+
NonEmptyString = Annotated[
|
16
|
+
str, StringConstraints(min_length=1, strip_whitespace=True)
|
17
|
+
]
|
28
18
|
|
29
19
|
|
30
20
|
def valdict_keys(attribute: str):
|
31
21
|
def val(cls, d: Optional[dict[str, Any]]) -> Optional[dict[str, Any]]:
|
32
22
|
"""
|
33
|
-
|
34
|
-
identical keys.
|
23
|
+
Strip every key of the dictionary, and fail if there are identical keys
|
35
24
|
"""
|
36
25
|
if d is not None:
|
37
26
|
old_keys = list(d.keys())
|
38
|
-
new_keys = [
|
39
|
-
|
40
|
-
|
27
|
+
new_keys = [key.strip() for key in old_keys]
|
28
|
+
if any(k == "" for k in new_keys):
|
29
|
+
raise ValueError(f"Empty string in {new_keys}.")
|
41
30
|
if len(new_keys) != len(set(new_keys)):
|
42
31
|
raise ValueError(
|
43
32
|
f"Dictionary contains multiple identical keys: '{d}'."
|
@@ -7,8 +7,8 @@ from pydantic import Field
|
|
7
7
|
from pydantic import field_validator
|
8
8
|
from pydantic import ValidationInfo
|
9
9
|
|
10
|
+
from ._validators import NonEmptyString
|
10
11
|
from ._validators import val_unique_list
|
11
|
-
from ._validators import valstr
|
12
12
|
|
13
13
|
__all__ = (
|
14
14
|
"UserRead",
|
@@ -57,12 +57,12 @@ class UserUpdate(schemas.BaseUserUpdate):
|
|
57
57
|
|
58
58
|
model_config = ConfigDict(extra="forbid")
|
59
59
|
|
60
|
-
username: Optional[
|
60
|
+
username: Optional[NonEmptyString] = None
|
61
61
|
|
62
62
|
# Validators
|
63
|
-
_username = field_validator("username")(classmethod(valstr("username")))
|
64
63
|
|
65
64
|
@field_validator(
|
65
|
+
"username",
|
66
66
|
"is_active",
|
67
67
|
"is_verified",
|
68
68
|
"is_superuser",
|
@@ -94,11 +94,14 @@ class UserCreate(schemas.BaseUserCreate):
|
|
94
94
|
username:
|
95
95
|
"""
|
96
96
|
|
97
|
-
username: Optional[
|
97
|
+
username: Optional[NonEmptyString] = None
|
98
98
|
|
99
|
-
|
100
|
-
|
101
|
-
|
99
|
+
@field_validator("username")
|
100
|
+
@classmethod
|
101
|
+
def cant_set_none(cls, v, info: ValidationInfo):
|
102
|
+
if v is None:
|
103
|
+
raise ValueError(f"Cannot set {info.field_name}=None")
|
104
|
+
return v
|
102
105
|
|
103
106
|
|
104
107
|
class UserUpdateGroups(BaseModel):
|
@@ -5,9 +5,9 @@ from pydantic import ConfigDict
|
|
5
5
|
from pydantic import field_validator
|
6
6
|
from pydantic.types import StrictStr
|
7
7
|
|
8
|
+
from ._validators import NonEmptyString
|
8
9
|
from ._validators import val_absolute_path
|
9
10
|
from ._validators import val_unique_list
|
10
|
-
from ._validators import valstr
|
11
11
|
from fractal_server.string_tools import validate_cmd
|
12
12
|
|
13
13
|
__all__ = (
|
@@ -48,21 +48,15 @@ class UserSettingsUpdate(BaseModel):
|
|
48
48
|
|
49
49
|
model_config = ConfigDict(extra="forbid")
|
50
50
|
|
51
|
-
ssh_host: Optional[
|
52
|
-
ssh_username: Optional[
|
53
|
-
ssh_private_key_path: Optional[
|
54
|
-
ssh_tasks_dir: Optional[
|
55
|
-
ssh_jobs_dir: Optional[
|
56
|
-
slurm_user: Optional[
|
57
|
-
slurm_accounts: Optional[list[
|
58
|
-
project_dir: Optional[
|
51
|
+
ssh_host: Optional[NonEmptyString] = None
|
52
|
+
ssh_username: Optional[NonEmptyString] = None
|
53
|
+
ssh_private_key_path: Optional[NonEmptyString] = None
|
54
|
+
ssh_tasks_dir: Optional[NonEmptyString] = None
|
55
|
+
ssh_jobs_dir: Optional[NonEmptyString] = None
|
56
|
+
slurm_user: Optional[NonEmptyString] = None
|
57
|
+
slurm_accounts: Optional[list[NonEmptyString]] = None
|
58
|
+
project_dir: Optional[NonEmptyString] = None
|
59
59
|
|
60
|
-
_ssh_host = field_validator("ssh_host")(
|
61
|
-
classmethod(valstr("ssh_host", accept_none=True))
|
62
|
-
)
|
63
|
-
_ssh_username = field_validator("ssh_username")(
|
64
|
-
classmethod(valstr("ssh_username", accept_none=True))
|
65
|
-
)
|
66
60
|
_ssh_private_key_path = field_validator("ssh_private_key_path")(
|
67
61
|
classmethod(
|
68
62
|
val_absolute_path("ssh_private_key_path", accept_none=True)
|
@@ -76,17 +70,11 @@ class UserSettingsUpdate(BaseModel):
|
|
76
70
|
classmethod(val_absolute_path("ssh_jobs_dir", accept_none=True))
|
77
71
|
)
|
78
72
|
|
79
|
-
_slurm_user = field_validator("slurm_user")(
|
80
|
-
classmethod(valstr("slurm_user", accept_none=True))
|
81
|
-
)
|
82
|
-
|
83
73
|
@field_validator("slurm_accounts")
|
84
74
|
@classmethod
|
85
75
|
def slurm_accounts_validator(cls, value):
|
86
76
|
if value is None:
|
87
77
|
return value
|
88
|
-
for i, item in enumerate(value):
|
89
|
-
value[i] = valstr(f"slurm_accounts[{i}]")(cls, item)
|
90
78
|
return val_unique_list("slurm_accounts")(cls, value)
|
91
79
|
|
92
80
|
@field_validator("project_dir")
|