fractal-server 2.14.0a6__py3-none-any.whl → 2.14.0a8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/app/history/__init__.py +0 -4
- fractal_server/app/routes/api/v2/_aux_functions.py +35 -45
- fractal_server/app/routes/api/v2/_aux_functions_history.py +109 -0
- fractal_server/app/routes/api/v2/history.py +42 -71
- fractal_server/app/routes/api/v2/job.py +30 -0
- fractal_server/app/routes/api/v2/status_legacy.py +5 -5
- fractal_server/app/routes/api/v2/task.py +3 -10
- fractal_server/app/routes/api/v2/workflowtask.py +13 -2
- fractal_server/app/runner/executors/base_runner.py +53 -26
- fractal_server/app/runner/executors/local/runner.py +6 -5
- fractal_server/app/runner/executors/slurm_ssh/executor.py +3 -4
- fractal_server/app/runner/executors/slurm_sudo/runner.py +5 -60
- fractal_server/app/runner/v2/runner.py +68 -33
- fractal_server/app/runner/v2/runner_functions.py +256 -14
- fractal_server/app/schemas/v2/__init__.py +7 -1
- fractal_server/app/schemas/v2/dumps.py +20 -4
- fractal_server/app/schemas/v2/history.py +54 -0
- fractal_server/app/schemas/v2/manifest.py +11 -0
- fractal_server/app/schemas/v2/status_legacy.py +35 -0
- fractal_server/app/schemas/v2/task.py +32 -1
- fractal_server/app/schemas/v2/workflowtask.py +2 -21
- fractal_server/tasks/v2/utils_database.py +1 -16
- {fractal_server-2.14.0a6.dist-info → fractal_server-2.14.0a8.dist-info}/METADATA +2 -2
- {fractal_server-2.14.0a6.dist-info → fractal_server-2.14.0a8.dist-info}/RECORD +28 -29
- {fractal_server-2.14.0a6.dist-info → fractal_server-2.14.0a8.dist-info}/WHEEL +1 -1
- fractal_server/app/history/image_updates.py +0 -124
- fractal_server/app/history/status_enum.py +0 -16
- fractal_server/app/schemas/v2/status.py +0 -16
- {fractal_server-2.14.0a6.dist-info → fractal_server-2.14.0a8.dist-info}/LICENSE +0 -0
- {fractal_server-2.14.0a6.dist-info → fractal_server-2.14.0a8.dist-info}/entry_points.txt +0 -0
@@ -15,7 +15,6 @@ from .runner_functions_low_level import run_single_task
|
|
15
15
|
from .task_interface import InitTaskOutput
|
16
16
|
from .task_interface import TaskOutput
|
17
17
|
from fractal_server.app.db import get_sync_db
|
18
|
-
from fractal_server.app.history.status_enum import XXXStatus
|
19
18
|
from fractal_server.app.models.v2 import HistoryUnit
|
20
19
|
from fractal_server.app.models.v2 import TaskV2
|
21
20
|
from fractal_server.app.models.v2 import WorkflowTaskV2
|
@@ -23,12 +22,15 @@ from fractal_server.app.runner.components import _COMPONENT_KEY_
|
|
23
22
|
from fractal_server.app.runner.components import _index_to_component
|
24
23
|
from fractal_server.app.runner.executors.base_runner import BaseRunner
|
25
24
|
from fractal_server.app.runner.v2._db_tools import bulk_upsert_image_cache_fast
|
25
|
+
from fractal_server.app.schemas.v2 import HistoryUnitStatus
|
26
26
|
|
27
27
|
|
28
28
|
__all__ = [
|
29
|
-
"run_v2_task_non_parallel",
|
30
29
|
"run_v2_task_parallel",
|
30
|
+
"run_v2_task_non_parallel",
|
31
31
|
"run_v2_task_compound",
|
32
|
+
"run_v2_task_converter_non_parallel",
|
33
|
+
"run_v2_task_converter_compound",
|
32
34
|
]
|
33
35
|
|
34
36
|
MAX_PARALLELIZATION_LIST_SIZE = 20_000
|
@@ -125,7 +127,7 @@ def run_v2_task_non_parallel(
|
|
125
127
|
with next(get_sync_db()) as db:
|
126
128
|
history_unit = HistoryUnit(
|
127
129
|
history_run_id=history_run_id,
|
128
|
-
status=
|
130
|
+
status=HistoryUnitStatus.SUBMITTED,
|
129
131
|
logfile=None, # FIXME
|
130
132
|
zarr_urls=function_kwargs["zarr_urls"],
|
131
133
|
)
|
@@ -155,6 +157,92 @@ def run_v2_task_non_parallel(
|
|
155
157
|
root_dir_remote=workflow_dir_remote,
|
156
158
|
),
|
157
159
|
parameters=function_kwargs,
|
160
|
+
task_type="non_parallel",
|
161
|
+
**executor_options,
|
162
|
+
)
|
163
|
+
|
164
|
+
num_tasks = 1
|
165
|
+
with next(get_sync_db()) as db:
|
166
|
+
if exception is None:
|
167
|
+
db.execute(
|
168
|
+
update(HistoryUnit)
|
169
|
+
.where(HistoryUnit.id == history_unit_id)
|
170
|
+
.values(status=HistoryUnitStatus.DONE)
|
171
|
+
)
|
172
|
+
db.commit()
|
173
|
+
if result is None:
|
174
|
+
return (TaskOutput(), num_tasks, {})
|
175
|
+
else:
|
176
|
+
return (_cast_and_validate_TaskOutput(result), num_tasks, {})
|
177
|
+
else:
|
178
|
+
db.execute(
|
179
|
+
update(HistoryUnit)
|
180
|
+
.where(HistoryUnit.id == history_unit_id)
|
181
|
+
.values(status=HistoryUnitStatus.FAILED)
|
182
|
+
)
|
183
|
+
db.commit()
|
184
|
+
return (TaskOutput(), num_tasks, {0: exception})
|
185
|
+
|
186
|
+
|
187
|
+
def run_v2_task_converter_non_parallel(
|
188
|
+
*,
|
189
|
+
zarr_dir: str,
|
190
|
+
task: TaskV2,
|
191
|
+
wftask: WorkflowTaskV2,
|
192
|
+
workflow_dir_local: Path,
|
193
|
+
workflow_dir_remote: Optional[Path] = None,
|
194
|
+
executor: BaseRunner,
|
195
|
+
submit_setup_call: callable = no_op_submit_setup_call,
|
196
|
+
dataset_id: int,
|
197
|
+
history_run_id: int,
|
198
|
+
) -> tuple[TaskOutput, int, dict[int, BaseException]]:
|
199
|
+
"""
|
200
|
+
This runs server-side (see `executor` argument)
|
201
|
+
"""
|
202
|
+
|
203
|
+
if workflow_dir_remote is None:
|
204
|
+
workflow_dir_remote = workflow_dir_local
|
205
|
+
logging.warning(
|
206
|
+
"In `run_single_task`, workflow_dir_remote=None. Is this right?"
|
207
|
+
)
|
208
|
+
workflow_dir_remote = workflow_dir_local
|
209
|
+
|
210
|
+
executor_options = submit_setup_call(
|
211
|
+
wftask=wftask,
|
212
|
+
root_dir_local=workflow_dir_local,
|
213
|
+
root_dir_remote=workflow_dir_remote,
|
214
|
+
which_type="non_parallel",
|
215
|
+
)
|
216
|
+
|
217
|
+
function_kwargs = {
|
218
|
+
"zarr_dir": zarr_dir,
|
219
|
+
_COMPONENT_KEY_: _index_to_component(0),
|
220
|
+
**(wftask.args_non_parallel or {}),
|
221
|
+
}
|
222
|
+
|
223
|
+
# Database History operations
|
224
|
+
with next(get_sync_db()) as db:
|
225
|
+
history_unit = HistoryUnit(
|
226
|
+
history_run_id=history_run_id,
|
227
|
+
status=HistoryUnitStatus.SUBMITTED,
|
228
|
+
logfile=None, # FIXME
|
229
|
+
zarr_urls=[],
|
230
|
+
)
|
231
|
+
db.add(history_unit)
|
232
|
+
db.commit()
|
233
|
+
db.refresh(history_unit)
|
234
|
+
history_unit_id = history_unit.id
|
235
|
+
|
236
|
+
result, exception = executor.submit(
|
237
|
+
functools.partial(
|
238
|
+
run_single_task,
|
239
|
+
wftask=wftask,
|
240
|
+
command=task.command_non_parallel,
|
241
|
+
root_dir_local=workflow_dir_local,
|
242
|
+
root_dir_remote=workflow_dir_remote,
|
243
|
+
),
|
244
|
+
task_type="converter_non_parallel",
|
245
|
+
parameters=function_kwargs,
|
158
246
|
**executor_options,
|
159
247
|
)
|
160
248
|
|
@@ -164,7 +252,7 @@ def run_v2_task_non_parallel(
|
|
164
252
|
db.execute(
|
165
253
|
update(HistoryUnit)
|
166
254
|
.where(HistoryUnit.id == history_unit_id)
|
167
|
-
.values(status=
|
255
|
+
.values(status=HistoryUnitStatus.DONE)
|
168
256
|
)
|
169
257
|
db.commit()
|
170
258
|
if result is None:
|
@@ -175,7 +263,7 @@ def run_v2_task_non_parallel(
|
|
175
263
|
db.execute(
|
176
264
|
update(HistoryUnit)
|
177
265
|
.where(HistoryUnit.id == history_unit_id)
|
178
|
-
.values(status=
|
266
|
+
.values(status=HistoryUnitStatus.FAILED)
|
179
267
|
)
|
180
268
|
db.commit()
|
181
269
|
return (TaskOutput(), num_tasks, {0: exception})
|
@@ -217,7 +305,7 @@ def run_v2_task_parallel(
|
|
217
305
|
history_units = [
|
218
306
|
HistoryUnit(
|
219
307
|
history_run_id=history_run_id,
|
220
|
-
status=
|
308
|
+
status=HistoryUnitStatus.SUBMITTED,
|
221
309
|
logfile=None, # FIXME
|
222
310
|
zarr_urls=[image["zarr_url"]],
|
223
311
|
)
|
@@ -255,6 +343,7 @@ def run_v2_task_parallel(
|
|
255
343
|
root_dir_remote=workflow_dir_remote,
|
256
344
|
),
|
257
345
|
list_parameters=list_function_kwargs,
|
346
|
+
task_type="parallel",
|
258
347
|
**executor_options,
|
259
348
|
)
|
260
349
|
|
@@ -280,12 +369,12 @@ def run_v2_task_parallel(
|
|
280
369
|
db.execute(
|
281
370
|
update(HistoryUnit)
|
282
371
|
.where(HistoryUnit.id.in_(history_unit_ids_done))
|
283
|
-
.values(status=
|
372
|
+
.values(status=HistoryUnitStatus.DONE)
|
284
373
|
)
|
285
374
|
db.execute(
|
286
375
|
update(HistoryUnit)
|
287
376
|
.where(HistoryUnit.id.in_(history_unit_ids_failed))
|
288
|
-
.values(status=
|
377
|
+
.values(status=HistoryUnitStatus.FAILED)
|
289
378
|
)
|
290
379
|
db.commit()
|
291
380
|
|
@@ -334,7 +423,7 @@ def run_v2_task_compound(
|
|
334
423
|
# Create a single `HistoryUnit` for the whole compound task
|
335
424
|
history_unit = HistoryUnit(
|
336
425
|
history_run_id=history_run_id,
|
337
|
-
status=
|
426
|
+
status=HistoryUnitStatus.SUBMITTED,
|
338
427
|
logfile=None, # FIXME
|
339
428
|
zarr_urls=input_image_zarr_urls,
|
340
429
|
)
|
@@ -365,6 +454,159 @@ def run_v2_task_compound(
|
|
365
454
|
root_dir_remote=workflow_dir_remote,
|
366
455
|
),
|
367
456
|
parameters=function_kwargs,
|
457
|
+
task_type="compound",
|
458
|
+
**executor_options_init,
|
459
|
+
)
|
460
|
+
|
461
|
+
num_tasks = 1
|
462
|
+
if exception is None:
|
463
|
+
if result is None:
|
464
|
+
init_task_output = InitTaskOutput()
|
465
|
+
else:
|
466
|
+
init_task_output = _cast_and_validate_InitTaskOutput(result)
|
467
|
+
else:
|
468
|
+
with next(get_sync_db()) as db:
|
469
|
+
db.execute(
|
470
|
+
update(HistoryUnit)
|
471
|
+
.where(HistoryUnit.id == history_unit_id)
|
472
|
+
.values(status=HistoryUnitStatus.FAILED)
|
473
|
+
)
|
474
|
+
db.commit()
|
475
|
+
return (TaskOutput(), num_tasks, {0: exception})
|
476
|
+
|
477
|
+
parallelization_list = init_task_output.parallelization_list
|
478
|
+
parallelization_list = deduplicate_list(parallelization_list)
|
479
|
+
|
480
|
+
num_tasks = 1 + len(parallelization_list)
|
481
|
+
|
482
|
+
# 3/B: parallel part of a compound task
|
483
|
+
_check_parallelization_list_size(parallelization_list)
|
484
|
+
|
485
|
+
if len(parallelization_list) == 0:
|
486
|
+
with next(get_sync_db()) as db:
|
487
|
+
db.execute(
|
488
|
+
update(HistoryUnit)
|
489
|
+
.where(HistoryUnit.id == history_unit_id)
|
490
|
+
.values(status=HistoryUnitStatus.DONE)
|
491
|
+
)
|
492
|
+
db.commit()
|
493
|
+
return (TaskOutput(), 0, {})
|
494
|
+
|
495
|
+
list_function_kwargs = [
|
496
|
+
{
|
497
|
+
"zarr_url": parallelization_item.zarr_url,
|
498
|
+
"init_args": parallelization_item.init_args,
|
499
|
+
_COMPONENT_KEY_: f"compute_{_index_to_component(ind)}",
|
500
|
+
**(wftask.args_parallel or {}),
|
501
|
+
}
|
502
|
+
for ind, parallelization_item in enumerate(parallelization_list)
|
503
|
+
]
|
504
|
+
|
505
|
+
results, exceptions = executor.multisubmit(
|
506
|
+
functools.partial(
|
507
|
+
run_single_task,
|
508
|
+
wftask=wftask,
|
509
|
+
command=task.command_parallel,
|
510
|
+
root_dir_local=workflow_dir_local,
|
511
|
+
root_dir_remote=workflow_dir_remote,
|
512
|
+
),
|
513
|
+
list_parameters=list_function_kwargs,
|
514
|
+
task_type="compound",
|
515
|
+
**executor_options_compute,
|
516
|
+
)
|
517
|
+
|
518
|
+
outputs = []
|
519
|
+
failure = False
|
520
|
+
for ind in range(len(list_function_kwargs)):
|
521
|
+
if ind in results.keys():
|
522
|
+
result = results[ind]
|
523
|
+
if result is None:
|
524
|
+
output = TaskOutput()
|
525
|
+
else:
|
526
|
+
output = _cast_and_validate_TaskOutput(result)
|
527
|
+
outputs.append(output)
|
528
|
+
|
529
|
+
elif ind in exceptions.keys():
|
530
|
+
print(f"Bad: {exceptions[ind]}")
|
531
|
+
failure = True
|
532
|
+
else:
|
533
|
+
print("VERY BAD - should have not reached this point")
|
534
|
+
|
535
|
+
with next(get_sync_db()) as db:
|
536
|
+
if failure:
|
537
|
+
db.execute(
|
538
|
+
update(HistoryUnit)
|
539
|
+
.where(HistoryUnit.id == history_unit_id)
|
540
|
+
.values(status=HistoryUnitStatus.FAILED)
|
541
|
+
)
|
542
|
+
else:
|
543
|
+
db.execute(
|
544
|
+
update(HistoryUnit)
|
545
|
+
.where(HistoryUnit.id == history_unit_id)
|
546
|
+
.values(status=HistoryUnitStatus.DONE)
|
547
|
+
)
|
548
|
+
db.commit()
|
549
|
+
|
550
|
+
merged_output = merge_outputs(outputs)
|
551
|
+
return (merged_output, num_tasks, exceptions)
|
552
|
+
|
553
|
+
|
554
|
+
def run_v2_task_converter_compound(
|
555
|
+
*,
|
556
|
+
zarr_dir: str,
|
557
|
+
task: TaskV2,
|
558
|
+
wftask: WorkflowTaskV2,
|
559
|
+
executor: BaseRunner,
|
560
|
+
workflow_dir_local: Path,
|
561
|
+
workflow_dir_remote: Optional[Path] = None,
|
562
|
+
submit_setup_call: callable = no_op_submit_setup_call,
|
563
|
+
dataset_id: int,
|
564
|
+
history_run_id: int,
|
565
|
+
) -> tuple[TaskOutput, int, dict[int, BaseException]]:
|
566
|
+
executor_options_init = submit_setup_call(
|
567
|
+
wftask=wftask,
|
568
|
+
root_dir_local=workflow_dir_local,
|
569
|
+
root_dir_remote=workflow_dir_remote,
|
570
|
+
which_type="non_parallel",
|
571
|
+
)
|
572
|
+
executor_options_compute = submit_setup_call(
|
573
|
+
wftask=wftask,
|
574
|
+
root_dir_local=workflow_dir_local,
|
575
|
+
root_dir_remote=workflow_dir_remote,
|
576
|
+
which_type="parallel",
|
577
|
+
)
|
578
|
+
|
579
|
+
# 3/A: non-parallel init task
|
580
|
+
function_kwargs = {
|
581
|
+
"zarr_dir": zarr_dir,
|
582
|
+
_COMPONENT_KEY_: f"init_{_index_to_component(0)}",
|
583
|
+
**(wftask.args_non_parallel or {}),
|
584
|
+
}
|
585
|
+
|
586
|
+
# Create database History entries
|
587
|
+
with next(get_sync_db()) as db:
|
588
|
+
# Create a single `HistoryUnit` for the whole compound task
|
589
|
+
history_unit = HistoryUnit(
|
590
|
+
history_run_id=history_run_id,
|
591
|
+
status=HistoryUnitStatus.SUBMITTED,
|
592
|
+
logfile=None, # FIXME
|
593
|
+
zarr_urls=[],
|
594
|
+
)
|
595
|
+
db.add(history_unit)
|
596
|
+
db.commit()
|
597
|
+
db.refresh(history_unit)
|
598
|
+
history_unit_id = history_unit.id
|
599
|
+
|
600
|
+
result, exception = executor.submit(
|
601
|
+
functools.partial(
|
602
|
+
run_single_task,
|
603
|
+
wftask=wftask,
|
604
|
+
command=task.command_non_parallel,
|
605
|
+
root_dir_local=workflow_dir_local,
|
606
|
+
root_dir_remote=workflow_dir_remote,
|
607
|
+
),
|
608
|
+
parameters=function_kwargs,
|
609
|
+
task_type="converter_compound",
|
368
610
|
**executor_options_init,
|
369
611
|
)
|
370
612
|
|
@@ -379,7 +621,7 @@ def run_v2_task_compound(
|
|
379
621
|
db.execute(
|
380
622
|
update(HistoryUnit)
|
381
623
|
.where(HistoryUnit.id == history_unit_id)
|
382
|
-
.values(status=
|
624
|
+
.values(status=HistoryUnitStatus.FAILED)
|
383
625
|
)
|
384
626
|
db.commit()
|
385
627
|
return (TaskOutput(), num_tasks, {0: exception})
|
@@ -397,7 +639,7 @@ def run_v2_task_compound(
|
|
397
639
|
db.execute(
|
398
640
|
update(HistoryUnit)
|
399
641
|
.where(HistoryUnit.id == history_unit_id)
|
400
|
-
.values(status=
|
642
|
+
.values(status=HistoryUnitStatus.DONE)
|
401
643
|
)
|
402
644
|
db.commit()
|
403
645
|
return (TaskOutput(), 0, {})
|
@@ -421,7 +663,7 @@ def run_v2_task_compound(
|
|
421
663
|
root_dir_remote=workflow_dir_remote,
|
422
664
|
),
|
423
665
|
list_parameters=list_function_kwargs,
|
424
|
-
|
666
|
+
task_type="converter_compound",
|
425
667
|
**executor_options_compute,
|
426
668
|
)
|
427
669
|
|
@@ -447,13 +689,13 @@ def run_v2_task_compound(
|
|
447
689
|
db.execute(
|
448
690
|
update(HistoryUnit)
|
449
691
|
.where(HistoryUnit.id == history_unit_id)
|
450
|
-
.values(status=
|
692
|
+
.values(status=HistoryUnitStatus.FAILED)
|
451
693
|
)
|
452
694
|
else:
|
453
695
|
db.execute(
|
454
696
|
update(HistoryUnit)
|
455
697
|
.where(HistoryUnit.id == history_unit_id)
|
456
|
-
.values(status=
|
698
|
+
.values(status=HistoryUnitStatus.DONE)
|
457
699
|
)
|
458
700
|
db.commit()
|
459
701
|
|
@@ -7,8 +7,14 @@ from .dataset import DatasetUpdateV2 # noqa F401
|
|
7
7
|
from .dumps import DatasetDumpV2 # noqa F401
|
8
8
|
from .dumps import ProjectDumpV2 # noqa F401
|
9
9
|
from .dumps import TaskDumpV2 # noqa F401
|
10
|
+
from .dumps import TaskGroupDumpV2 # noqa F401
|
10
11
|
from .dumps import WorkflowDumpV2 # noqa F401
|
11
12
|
from .dumps import WorkflowTaskDumpV2 # noqa F401
|
13
|
+
from .history import HistoryRunReadAggregated # noqa F401
|
14
|
+
from .history import HistoryUnitRead # noqa F401
|
15
|
+
from .history import HistoryUnitStatus # noqa F401
|
16
|
+
from .history import ImageLogsRequest # noqa F401
|
17
|
+
from .history import ZarrUrlAndStatus # noqa F401
|
12
18
|
from .job import JobCreateV2 # noqa F401
|
13
19
|
from .job import JobReadV2 # noqa F401
|
14
20
|
from .job import JobStatusTypeV2 # noqa F401
|
@@ -18,6 +24,7 @@ from .manifest import TaskManifestV2 # noqa F401
|
|
18
24
|
from .project import ProjectCreateV2 # noqa F401
|
19
25
|
from .project import ProjectReadV2 # noqa F401
|
20
26
|
from .project import ProjectUpdateV2 # noqa F401
|
27
|
+
from .status_legacy import WorkflowTaskStatusTypeV2 # noqa F401
|
21
28
|
from .task import TaskCreateV2 # noqa F401
|
22
29
|
from .task import TaskExportV2 # noqa F401
|
23
30
|
from .task import TaskImportV2 # noqa F401
|
@@ -47,5 +54,4 @@ from .workflowtask import WorkflowTaskImportV2 # noqa F401
|
|
47
54
|
from .workflowtask import WorkflowTaskReadV2 # noqa F401
|
48
55
|
from .workflowtask import WorkflowTaskReadV2WithWarning # noqa F401
|
49
56
|
from .workflowtask import WorkflowTaskReplaceV2 # noqa F401
|
50
|
-
from .workflowtask import WorkflowTaskStatusTypeV2 # noqa F401
|
51
57
|
from .workflowtask import WorkflowTaskUpdateV2 # noqa F401
|
@@ -1,18 +1,20 @@
|
|
1
1
|
"""
|
2
|
-
|
3
2
|
Dump models differ from their Read counterpart in that:
|
4
3
|
* They are directly JSON-able, without any additional encoder.
|
5
|
-
* They may only
|
4
|
+
* They may include only a subset of the available fields.
|
6
5
|
|
7
6
|
These models are used in at least two situations:
|
8
7
|
1. In the "*_dump" attributes of Job models;
|
9
|
-
2. In the
|
8
|
+
2. In the history items, to trim their size.
|
10
9
|
"""
|
11
10
|
from typing import Optional
|
12
11
|
|
13
12
|
from pydantic import BaseModel
|
14
13
|
from pydantic import ConfigDict
|
14
|
+
from pydantic import Field
|
15
15
|
|
16
|
+
from .task import TaskTypeType
|
17
|
+
from .task_group import TaskGroupV2OriginEnum
|
16
18
|
from fractal_server.images.models import AttributeFiltersType
|
17
19
|
|
18
20
|
|
@@ -26,7 +28,7 @@ class ProjectDumpV2(BaseModel):
|
|
26
28
|
class TaskDumpV2(BaseModel):
|
27
29
|
id: int
|
28
30
|
name: str
|
29
|
-
type:
|
31
|
+
type: TaskTypeType
|
30
32
|
|
31
33
|
command_non_parallel: Optional[str] = None
|
32
34
|
command_parallel: Optional[str] = None
|
@@ -72,3 +74,17 @@ class DatasetDumpV2(BaseModel):
|
|
72
74
|
zarr_dir: str
|
73
75
|
type_filters: dict[str, bool]
|
74
76
|
attribute_filters: AttributeFiltersType
|
77
|
+
|
78
|
+
|
79
|
+
class TaskGroupDumpV2(BaseModel):
|
80
|
+
id: int
|
81
|
+
origin: TaskGroupV2OriginEnum
|
82
|
+
pkg_name: str
|
83
|
+
version: Optional[str] = None
|
84
|
+
python_version: Optional[str] = None
|
85
|
+
pip_extras: Optional[str] = None
|
86
|
+
pinned_package_versions: dict[str, str] = Field(default_factory=dict)
|
87
|
+
|
88
|
+
path: Optional[str] = None
|
89
|
+
venv_path: Optional[str] = None
|
90
|
+
wheel_path: Optional[str] = None
|
@@ -0,0 +1,54 @@
|
|
1
|
+
from datetime import datetime
|
2
|
+
from enum import Enum
|
3
|
+
from typing import Any
|
4
|
+
from typing import Optional
|
5
|
+
|
6
|
+
from pydantic import AwareDatetime
|
7
|
+
from pydantic import BaseModel
|
8
|
+
from pydantic import field_serializer
|
9
|
+
|
10
|
+
|
11
|
+
class HistoryUnitStatus(str, Enum):
|
12
|
+
"""
|
13
|
+
Available status for images
|
14
|
+
|
15
|
+
Attributes:
|
16
|
+
SUBMITTED:
|
17
|
+
DONE:
|
18
|
+
FAILED:
|
19
|
+
"""
|
20
|
+
|
21
|
+
SUBMITTED = "submitted"
|
22
|
+
DONE = "done"
|
23
|
+
FAILED = "failed"
|
24
|
+
|
25
|
+
|
26
|
+
class HistoryUnitRead(BaseModel):
|
27
|
+
id: int
|
28
|
+
logfile: Optional[str] = None
|
29
|
+
status: HistoryUnitStatus
|
30
|
+
zarr_urls: list[str]
|
31
|
+
|
32
|
+
|
33
|
+
class HistoryRunReadAggregated(BaseModel):
|
34
|
+
id: int
|
35
|
+
timestamp_started: AwareDatetime
|
36
|
+
workflowtask_dump: dict[str, Any]
|
37
|
+
num_submitted_units: int
|
38
|
+
num_done_units: int
|
39
|
+
num_failed_units: int
|
40
|
+
|
41
|
+
@field_serializer("timestamp_started")
|
42
|
+
def serialize_datetime(v: datetime) -> str:
|
43
|
+
return v.isoformat()
|
44
|
+
|
45
|
+
|
46
|
+
class ImageLogsRequest(BaseModel):
|
47
|
+
workflowtask_id: int
|
48
|
+
dataset_id: int
|
49
|
+
zarr_url: str
|
50
|
+
|
51
|
+
|
52
|
+
class ZarrUrlAndStatus(BaseModel):
|
53
|
+
zarr_url: str
|
54
|
+
status: Optional[HistoryUnitStatus] = None
|
@@ -1,4 +1,5 @@
|
|
1
1
|
from typing import Any
|
2
|
+
from typing import Literal
|
2
3
|
from typing import Optional
|
3
4
|
|
4
5
|
from pydantic import BaseModel
|
@@ -56,6 +57,16 @@ class TaskManifestV2(BaseModel):
|
|
56
57
|
modality: Optional[str] = None
|
57
58
|
tags: list[str] = Field(default_factory=list)
|
58
59
|
|
60
|
+
type: Optional[
|
61
|
+
Literal[
|
62
|
+
"compound",
|
63
|
+
"converter_compound",
|
64
|
+
"non_parallel",
|
65
|
+
"converter_non_parallel",
|
66
|
+
"parallel",
|
67
|
+
]
|
68
|
+
] = None
|
69
|
+
|
59
70
|
@model_validator(mode="after")
|
60
71
|
def validate_executable_args_meta(self):
|
61
72
|
executable_non_parallel = self.executable_non_parallel
|
@@ -0,0 +1,35 @@
|
|
1
|
+
from enum import Enum
|
2
|
+
|
3
|
+
from pydantic import BaseModel
|
4
|
+
from pydantic import Field
|
5
|
+
|
6
|
+
|
7
|
+
class WorkflowTaskStatusTypeV2(str, Enum):
|
8
|
+
"""
|
9
|
+
Define the available values for the status of a `WorkflowTask`.
|
10
|
+
|
11
|
+
This model is used within the `Dataset.history` attribute, which is
|
12
|
+
constructed in the runner and then used in the API (e.g. in the
|
13
|
+
`api/v2/project/{project_id}/dataset/{dataset_id}/status` endpoint).
|
14
|
+
|
15
|
+
Attributes:
|
16
|
+
SUBMITTED: The `WorkflowTask` is part of a running job.
|
17
|
+
DONE: The most-recent execution of this `WorkflowTask` was successful.
|
18
|
+
FAILED: The most-recent execution of this `WorkflowTask` failed.
|
19
|
+
"""
|
20
|
+
|
21
|
+
SUBMITTED = "submitted"
|
22
|
+
DONE = "done"
|
23
|
+
FAILED = "failed"
|
24
|
+
|
25
|
+
|
26
|
+
class LegacyStatusReadV2(BaseModel):
|
27
|
+
"""
|
28
|
+
Response type for the
|
29
|
+
`/project/{project_id}/status/` endpoint
|
30
|
+
"""
|
31
|
+
|
32
|
+
status: dict[
|
33
|
+
str,
|
34
|
+
WorkflowTaskStatusTypeV2,
|
35
|
+
] = Field(default_factory=dict)
|
@@ -13,8 +13,20 @@ from .._validators import cant_set_none
|
|
13
13
|
from fractal_server.app.schemas._validators import NonEmptyString
|
14
14
|
from fractal_server.app.schemas._validators import val_unique_list
|
15
15
|
from fractal_server.app.schemas._validators import valdict_keys
|
16
|
+
from fractal_server.logger import set_logger
|
16
17
|
from fractal_server.string_tools import validate_cmd
|
17
18
|
|
19
|
+
TaskTypeType = Literal[
|
20
|
+
"compound",
|
21
|
+
"converter_compound",
|
22
|
+
"non_parallel",
|
23
|
+
"converter_non_parallel",
|
24
|
+
"parallel",
|
25
|
+
]
|
26
|
+
|
27
|
+
|
28
|
+
logger = set_logger(__name__)
|
29
|
+
|
18
30
|
|
19
31
|
class TaskCreateV2(BaseModel):
|
20
32
|
model_config = ConfigDict(extra="forbid")
|
@@ -41,6 +53,8 @@ class TaskCreateV2(BaseModel):
|
|
41
53
|
tags: list[NonEmptyString] = Field(default_factory=list)
|
42
54
|
authors: Optional[NonEmptyString] = None
|
43
55
|
|
56
|
+
type: Optional[TaskTypeType] = None
|
57
|
+
|
44
58
|
# Validators
|
45
59
|
|
46
60
|
@field_validator(
|
@@ -69,6 +83,23 @@ class TaskCreateV2(BaseModel):
|
|
69
83
|
|
70
84
|
return self
|
71
85
|
|
86
|
+
@model_validator(mode="after")
|
87
|
+
def set_task_type(self):
|
88
|
+
if self.type is None:
|
89
|
+
logger.warning(
|
90
|
+
f"Task type is not set for task '{self.name}', "
|
91
|
+
"which will be deprecated in a future version. "
|
92
|
+
"Please move to `fractal-task-tools`."
|
93
|
+
)
|
94
|
+
if self.command_non_parallel is None:
|
95
|
+
self.type = "parallel"
|
96
|
+
elif self.command_parallel is None:
|
97
|
+
self.type = "non_parallel"
|
98
|
+
else:
|
99
|
+
self.type = "compound"
|
100
|
+
|
101
|
+
return self
|
102
|
+
|
72
103
|
_meta_non_parallel = field_validator("meta_non_parallel")(
|
73
104
|
classmethod(valdict_keys("meta_non_parallel"))
|
74
105
|
)
|
@@ -104,7 +135,7 @@ class TaskCreateV2(BaseModel):
|
|
104
135
|
class TaskReadV2(BaseModel):
|
105
136
|
id: int
|
106
137
|
name: str
|
107
|
-
type:
|
138
|
+
type: TaskTypeType
|
108
139
|
source: Optional[str] = None
|
109
140
|
version: Optional[str] = None
|
110
141
|
|
@@ -1,4 +1,3 @@
|
|
1
|
-
from enum import Enum
|
2
1
|
from typing import Any
|
3
2
|
from typing import Optional
|
4
3
|
from typing import Union
|
@@ -16,29 +15,11 @@ from .task import TaskExportV2
|
|
16
15
|
from .task import TaskImportV2
|
17
16
|
from .task import TaskImportV2Legacy
|
18
17
|
from .task import TaskReadV2
|
18
|
+
from .task import TaskTypeType
|
19
19
|
|
20
20
|
RESERVED_ARGUMENTS = {"zarr_dir", "zarr_url", "zarr_urls", "init_args"}
|
21
21
|
|
22
22
|
|
23
|
-
class WorkflowTaskStatusTypeV2(str, Enum):
|
24
|
-
"""
|
25
|
-
Define the available values for the status of a `WorkflowTask`.
|
26
|
-
|
27
|
-
This model is used within the `Dataset.history` attribute, which is
|
28
|
-
constructed in the runner and then used in the API (e.g. in the
|
29
|
-
`api/v2/project/{project_id}/dataset/{dataset_id}/status` endpoint).
|
30
|
-
|
31
|
-
Attributes:
|
32
|
-
SUBMITTED: The `WorkflowTask` is part of a running job.
|
33
|
-
DONE: The most-recent execution of this `WorkflowTask` was successful.
|
34
|
-
FAILED: The most-recent execution of this `WorkflowTask` failed.
|
35
|
-
"""
|
36
|
-
|
37
|
-
SUBMITTED = "submitted"
|
38
|
-
DONE = "done"
|
39
|
-
FAILED = "failed"
|
40
|
-
|
41
|
-
|
42
23
|
class WorkflowTaskCreateV2(BaseModel):
|
43
24
|
model_config = ConfigDict(extra="forbid")
|
44
25
|
|
@@ -113,7 +94,7 @@ class WorkflowTaskReadV2(BaseModel):
|
|
113
94
|
|
114
95
|
type_filters: dict[str, bool]
|
115
96
|
|
116
|
-
task_type:
|
97
|
+
task_type: TaskTypeType
|
117
98
|
task_id: int
|
118
99
|
task: TaskReadV2
|
119
100
|
|