fractal-server 2.14.0a24__py3-none-any.whl → 2.14.0a26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/app/models/v2/history.py +1 -1
- fractal_server/app/routes/api/v2/history.py +4 -2
- fractal_server/app/runner/executors/base_runner.py +20 -36
- fractal_server/app/runner/executors/local/get_local_config.py +5 -0
- fractal_server/app/runner/executors/local/runner.py +4 -47
- fractal_server/app/runner/executors/slurm_common/_slurm_config.py +4 -0
- fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py +27 -64
- fractal_server/app/runner/executors/slurm_common/get_slurm_config.py +38 -1
- fractal_server/app/runner/executors/slurm_ssh/runner.py +4 -3
- fractal_server/app/runner/task_files.py +29 -0
- fractal_server/app/runner/v2/db_tools.py +0 -15
- fractal_server/app/runner/v2/runner.py +4 -8
- fractal_server/app/runner/v2/runner_functions.py +73 -40
- fractal_server/migrations/versions/f37aceb45062_make_historyunit_logfile_required.py +39 -0
- {fractal_server-2.14.0a24.dist-info → fractal_server-2.14.0a26.dist-info}/METADATA +1 -1
- {fractal_server-2.14.0a24.dist-info → fractal_server-2.14.0a26.dist-info}/RECORD +20 -19
- {fractal_server-2.14.0a24.dist-info → fractal_server-2.14.0a26.dist-info}/LICENSE +0 -0
- {fractal_server-2.14.0a24.dist-info → fractal_server-2.14.0a26.dist-info}/WHEEL +0 -0
- {fractal_server-2.14.0a24.dist-info → fractal_server-2.14.0a26.dist-info}/entry_points.txt +0 -0
fractal_server/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__VERSION__ = "2.14.
|
1
|
+
__VERSION__ = "2.14.0a26"
|
@@ -249,8 +249,10 @@ async def get_history_run_units(
|
|
249
249
|
page_size = pagination.page_size or total_count
|
250
250
|
|
251
251
|
# Query `HistoryUnit`s
|
252
|
-
stmt =
|
253
|
-
HistoryUnit
|
252
|
+
stmt = (
|
253
|
+
select(HistoryUnit)
|
254
|
+
.where(HistoryUnit.history_run_id == history_run_id)
|
255
|
+
.order_by(HistoryUnit.id)
|
254
256
|
)
|
255
257
|
if unit_status:
|
256
258
|
stmt = stmt.where(HistoryUnit.status == unit_status)
|
@@ -4,7 +4,6 @@ from fractal_server.app.runner.task_files import TaskFiles
|
|
4
4
|
from fractal_server.app.schemas.v2.task import TaskTypeType
|
5
5
|
from fractal_server.logger import set_logger
|
6
6
|
|
7
|
-
|
8
7
|
TASK_TYPES_SUBMIT: list[TaskTypeType] = [
|
9
8
|
"compound",
|
10
9
|
"converter_compound",
|
@@ -103,29 +102,44 @@ class BaseRunner(object):
|
|
103
102
|
|
104
103
|
def validate_multisubmit_parameters(
|
105
104
|
self,
|
106
|
-
|
105
|
+
*,
|
107
106
|
task_type: TaskTypeType,
|
107
|
+
list_parameters: list[dict[str, Any]],
|
108
108
|
list_task_files: list[TaskFiles],
|
109
|
+
history_unit_ids: list[int],
|
109
110
|
) -> None:
|
110
111
|
"""
|
111
112
|
Validate parameters for `multisubmit` method
|
112
113
|
|
113
114
|
Args:
|
114
|
-
list_parameters: List of parameters dictionaries.
|
115
115
|
task_type: Task type.
|
116
|
+
list_parameters: List of parameters dictionaries.
|
117
|
+
list_task_files:
|
118
|
+
history_unit_ids:
|
116
119
|
"""
|
117
120
|
if task_type not in TASK_TYPES_MULTISUBMIT:
|
118
121
|
raise ValueError(f"Invalid {task_type=} for `multisubmit`.")
|
119
122
|
|
123
|
+
if not isinstance(list_parameters, list):
|
124
|
+
raise ValueError("`parameters` must be a list.")
|
125
|
+
|
126
|
+
if len(list_parameters) != len(list_task_files):
|
127
|
+
raise ValueError(
|
128
|
+
f"{len(list_task_files)=} differs from "
|
129
|
+
f"{len(list_parameters)=}."
|
130
|
+
)
|
131
|
+
if len(history_unit_ids) != len(list_parameters):
|
132
|
+
raise ValueError(
|
133
|
+
f"{len(history_unit_ids)=} differs from "
|
134
|
+
f"{len(list_parameters)=}."
|
135
|
+
)
|
136
|
+
|
120
137
|
subfolders = set(
|
121
138
|
task_file.wftask_subfolder_local for task_file in list_task_files
|
122
139
|
)
|
123
140
|
if len(subfolders) != 1:
|
124
141
|
raise ValueError(f"More than one subfolders: {subfolders}.")
|
125
142
|
|
126
|
-
if not isinstance(list_parameters, list):
|
127
|
-
raise ValueError("`parameters` must be a list.")
|
128
|
-
|
129
143
|
for single_kwargs in list_parameters:
|
130
144
|
if not isinstance(single_kwargs, dict):
|
131
145
|
raise ValueError("kwargs itemt must be a dictionary.")
|
@@ -137,33 +151,3 @@ class BaseRunner(object):
|
|
137
151
|
zarr_urls = [kwargs["zarr_url"] for kwargs in list_parameters]
|
138
152
|
if len(zarr_urls) != len(set(zarr_urls)):
|
139
153
|
raise ValueError("Non-unique zarr_urls")
|
140
|
-
|
141
|
-
def validate_multisubmit_history_unit_ids(
|
142
|
-
self,
|
143
|
-
*,
|
144
|
-
history_unit_ids: list[int],
|
145
|
-
task_type: TaskTypeType,
|
146
|
-
list_parameters: list[dict[str, Any]],
|
147
|
-
) -> None:
|
148
|
-
"""
|
149
|
-
Run preliminary check for multisubmit inputs.
|
150
|
-
|
151
|
-
Args:
|
152
|
-
history_unit_ids:
|
153
|
-
task_type:
|
154
|
-
list_parameters:
|
155
|
-
"""
|
156
|
-
if task_type in ["compound", "converter_compound"]:
|
157
|
-
if len(history_unit_ids) != 1:
|
158
|
-
raise NotImplementedError(
|
159
|
-
"We are breaking the assumption that compound/multisubmit "
|
160
|
-
"is associated to a single HistoryUnit. This is not "
|
161
|
-
"supported."
|
162
|
-
)
|
163
|
-
elif task_type == "parallel" and len(history_unit_ids) != len(
|
164
|
-
list_parameters
|
165
|
-
):
|
166
|
-
raise ValueError(
|
167
|
-
f"{len(history_unit_ids)=} differs from "
|
168
|
-
f"{len(list_parameters)=}."
|
169
|
-
)
|
@@ -47,11 +47,16 @@ class LocalBackendConfig(BaseModel):
|
|
47
47
|
model_config = ConfigDict(extra="forbid")
|
48
48
|
parallel_tasks_per_job: Optional[int] = None
|
49
49
|
|
50
|
+
@property
|
51
|
+
def batch_size(self) -> int:
|
52
|
+
return self.parallel_tasks_per_job or 1
|
53
|
+
|
50
54
|
|
51
55
|
def get_local_backend_config(
|
52
56
|
wftask: WorkflowTaskV2,
|
53
57
|
which_type: Literal["non_parallel", "parallel"],
|
54
58
|
config_path: Optional[Path] = None,
|
59
|
+
tot_tasks: int = 1,
|
55
60
|
) -> LocalBackendConfig:
|
56
61
|
"""
|
57
62
|
Prepare a `LocalBackendConfig` configuration object
|
@@ -8,12 +8,7 @@ from .get_local_config import LocalBackendConfig
|
|
8
8
|
from fractal_server.app.db import get_sync_db
|
9
9
|
from fractal_server.app.runner.exceptions import TaskExecutionError
|
10
10
|
from fractal_server.app.runner.executors.base_runner import BaseRunner
|
11
|
-
from fractal_server.app.runner.task_files import MULTISUBMIT_PREFIX
|
12
|
-
from fractal_server.app.runner.task_files import SUBMIT_PREFIX
|
13
11
|
from fractal_server.app.runner.task_files import TaskFiles
|
14
|
-
from fractal_server.app.runner.v2.db_tools import (
|
15
|
-
update_logfile_of_history_unit,
|
16
|
-
)
|
17
12
|
from fractal_server.app.runner.v2.db_tools import update_status_of_history_unit
|
18
13
|
from fractal_server.app.schemas.v2 import HistoryUnitStatus
|
19
14
|
from fractal_server.logger import set_logger
|
@@ -68,13 +63,6 @@ class LocalRunner(BaseRunner):
|
|
68
63
|
workdir_local = task_files.wftask_subfolder_local
|
69
64
|
workdir_local.mkdir()
|
70
65
|
|
71
|
-
# Add prefix to task_files object
|
72
|
-
task_files.prefix = SUBMIT_PREFIX
|
73
|
-
update_logfile_of_history_unit(
|
74
|
-
history_unit_id=history_unit_id,
|
75
|
-
logfile=task_files.log_file_local,
|
76
|
-
)
|
77
|
-
|
78
66
|
# SUBMISSION PHASE
|
79
67
|
future = self.executor.submit(
|
80
68
|
func,
|
@@ -111,29 +99,18 @@ class LocalRunner(BaseRunner):
|
|
111
99
|
list_task_files: list[TaskFiles],
|
112
100
|
task_type: Literal["parallel", "compound", "converter_compound"],
|
113
101
|
config: LocalBackendConfig,
|
114
|
-
):
|
102
|
+
) -> tuple[dict[int, Any], dict[int, BaseException]]:
|
115
103
|
"""
|
116
|
-
Note:
|
117
|
-
|
118
|
-
|
119
|
-
2. The number of `HistoryUnit`s is equal to `len(history_unit_ids)`.
|
120
|
-
3. For compound tasks, these two numbers are not the same.
|
121
|
-
|
122
|
-
For this reason, we defer database updates to the caller function,
|
123
|
-
when we are in one of the "compound" cases
|
124
|
-
|
104
|
+
Note: `list_parameters`, `list_task_files` and `history_unit_ids`
|
105
|
+
have the same size. For parallel tasks, this is also the number of
|
106
|
+
input images, while for compound tasks these can differ.
|
125
107
|
"""
|
126
108
|
|
127
109
|
self.validate_multisubmit_parameters(
|
128
110
|
list_parameters=list_parameters,
|
129
111
|
task_type=task_type,
|
130
112
|
list_task_files=list_task_files,
|
131
|
-
)
|
132
|
-
|
133
|
-
self.validate_multisubmit_history_unit_ids(
|
134
113
|
history_unit_ids=history_unit_ids,
|
135
|
-
task_type=task_type,
|
136
|
-
list_parameters=list_parameters,
|
137
114
|
)
|
138
115
|
|
139
116
|
logger.debug(f"[multisubmit] START, {len(list_parameters)=}")
|
@@ -159,9 +136,6 @@ class LocalRunner(BaseRunner):
|
|
159
136
|
active_futures: dict[int, Future] = {}
|
160
137
|
for ind_within_chunk, kwargs in enumerate(list_parameters_chunk):
|
161
138
|
positional_index = ind_chunk + ind_within_chunk
|
162
|
-
list_task_files[
|
163
|
-
positional_index
|
164
|
-
].prefix = f"{MULTISUBMIT_PREFIX}-{positional_index:06d}"
|
165
139
|
future = self.executor.submit(
|
166
140
|
func,
|
167
141
|
parameters=kwargs,
|
@@ -171,23 +145,6 @@ class LocalRunner(BaseRunner):
|
|
171
145
|
)
|
172
146
|
active_futures[positional_index] = future
|
173
147
|
|
174
|
-
if task_type == "parallel":
|
175
|
-
# FIXME: replace loop with a `bulk_update_history_unit`
|
176
|
-
# function
|
177
|
-
update_logfile_of_history_unit(
|
178
|
-
history_unit_id=history_unit_ids[positional_index],
|
179
|
-
logfile=list_task_files[
|
180
|
-
positional_index
|
181
|
-
].log_file_local,
|
182
|
-
)
|
183
|
-
else:
|
184
|
-
logger.debug(
|
185
|
-
f"Unclear what logfile to associate to {task_type=} "
|
186
|
-
"within multisubmit (see issue #2382)."
|
187
|
-
)
|
188
|
-
# FIXME: Improve definition for compound tasks
|
189
|
-
pass
|
190
|
-
|
191
148
|
while active_futures:
|
192
149
|
finished_futures = [
|
193
150
|
index_and_future
|
@@ -12,7 +12,6 @@ import cloudpickle
|
|
12
12
|
from ..slurm_common._slurm_config import SlurmConfig
|
13
13
|
from ..slurm_common.slurm_job_task_models import SlurmJob
|
14
14
|
from ..slurm_common.slurm_job_task_models import SlurmTask
|
15
|
-
from ._batching import heuristics
|
16
15
|
from ._job_states import STATES_FINISHED
|
17
16
|
from fractal_server import __VERSION__
|
18
17
|
from fractal_server.app.db import get_sync_db
|
@@ -20,15 +19,10 @@ from fractal_server.app.runner.exceptions import JobExecutionError
|
|
20
19
|
from fractal_server.app.runner.exceptions import TaskExecutionError
|
21
20
|
from fractal_server.app.runner.executors.base_runner import BaseRunner
|
22
21
|
from fractal_server.app.runner.filenames import SHUTDOWN_FILENAME
|
23
|
-
from fractal_server.app.runner.task_files import MULTISUBMIT_PREFIX
|
24
|
-
from fractal_server.app.runner.task_files import SUBMIT_PREFIX
|
25
22
|
from fractal_server.app.runner.task_files import TaskFiles
|
26
23
|
from fractal_server.app.runner.v2.db_tools import (
|
27
24
|
bulk_update_status_of_history_unit,
|
28
25
|
)
|
29
|
-
from fractal_server.app.runner.v2.db_tools import (
|
30
|
-
update_logfile_of_history_unit,
|
31
|
-
)
|
32
26
|
from fractal_server.app.runner.v2.db_tools import update_status_of_history_unit
|
33
27
|
from fractal_server.app.schemas.v2 import HistoryUnitStatus
|
34
28
|
from fractal_server.config import get_settings
|
@@ -84,9 +78,18 @@ class BaseSlurmRunner(BaseRunner):
|
|
84
78
|
|
85
79
|
# Create job folders. Note that the local one may or may not exist
|
86
80
|
# depending on whether it is a test or an actual run
|
87
|
-
|
88
|
-
self.
|
89
|
-
|
81
|
+
try:
|
82
|
+
if not self.root_dir_local.is_dir():
|
83
|
+
self._mkdir_local_folder(self.root_dir_local.as_posix())
|
84
|
+
self._mkdir_remote_folder(self.root_dir_remote.as_posix())
|
85
|
+
except Exception as e:
|
86
|
+
error_msg = (
|
87
|
+
f"Could not mkdir {self.root_dir_local.as_posix()} or "
|
88
|
+
f"{self.root_dir_remote.as_posix()}. "
|
89
|
+
f"Original error: {str(e)}."
|
90
|
+
)
|
91
|
+
logger.error(error_msg)
|
92
|
+
raise RuntimeError(error_msg)
|
90
93
|
|
91
94
|
self.shutdown_file = self.root_dir_local / SHUTDOWN_FILENAME
|
92
95
|
self.jobs = {}
|
@@ -178,7 +181,7 @@ class BaseSlurmRunner(BaseRunner):
|
|
178
181
|
fractal_server=__VERSION__,
|
179
182
|
)
|
180
183
|
for task in slurm_job.tasks:
|
181
|
-
#
|
184
|
+
# Write input pickle
|
182
185
|
_args = []
|
183
186
|
_kwargs = dict(
|
184
187
|
parameters=task.parameters,
|
@@ -245,7 +248,7 @@ class BaseSlurmRunner(BaseRunner):
|
|
245
248
|
|
246
249
|
# Always print output of `uname -n` and `pwd`
|
247
250
|
script_lines.append('\necho "Hostname: $(uname -n)"')
|
248
|
-
script_lines.append('echo "Current directory
|
251
|
+
script_lines.append('echo "Current directory: $(pwd)"')
|
249
252
|
script_lines.append(
|
250
253
|
'echo "Start time: $(date +"%Y-%m-%dT%H:%M:%S%z")"'
|
251
254
|
)
|
@@ -485,21 +488,14 @@ class BaseSlurmRunner(BaseRunner):
|
|
485
488
|
self._mkdir_remote_folder(folder=workdir_remote.as_posix())
|
486
489
|
logger.info("[submit] Create local/remote folders - END")
|
487
490
|
|
488
|
-
# Add prefix to task_files object
|
489
|
-
task_files.prefix = SUBMIT_PREFIX
|
490
|
-
update_logfile_of_history_unit(
|
491
|
-
history_unit_id=history_unit_id,
|
492
|
-
logfile=task_files.log_file_local,
|
493
|
-
)
|
494
|
-
|
495
491
|
# Submission phase
|
496
492
|
slurm_job = SlurmJob(
|
497
|
-
prefix=
|
493
|
+
prefix=task_files.prefix,
|
498
494
|
workdir_local=workdir_local,
|
499
495
|
workdir_remote=workdir_remote,
|
500
496
|
tasks=[
|
501
497
|
SlurmTask(
|
502
|
-
prefix=
|
498
|
+
prefix=task_files.prefix,
|
503
499
|
index=0,
|
504
500
|
component=task_files.component,
|
505
501
|
parameters=parameters,
|
@@ -575,6 +571,11 @@ class BaseSlurmRunner(BaseRunner):
|
|
575
571
|
task_type: Literal["parallel", "compound", "converter_compound"],
|
576
572
|
config: SlurmConfig,
|
577
573
|
) -> tuple[dict[int, Any], dict[int, BaseException]]:
|
574
|
+
"""
|
575
|
+
Note: `list_parameters`, `list_task_files` and `history_unit_ids`
|
576
|
+
have the same size. For parallel tasks, this is also the number of
|
577
|
+
input images, while for compound tasks these can differ.
|
578
|
+
"""
|
578
579
|
|
579
580
|
if len(self.jobs) > 0:
|
580
581
|
raise RuntimeError(
|
@@ -599,11 +600,7 @@ class BaseSlurmRunner(BaseRunner):
|
|
599
600
|
list_parameters=list_parameters,
|
600
601
|
task_type=task_type,
|
601
602
|
list_task_files=list_task_files,
|
602
|
-
)
|
603
|
-
self.validate_multisubmit_history_unit_ids(
|
604
603
|
history_unit_ids=history_unit_ids,
|
605
|
-
task_type=task_type,
|
606
|
-
list_parameters=list_parameters,
|
607
604
|
)
|
608
605
|
|
609
606
|
logger.info(f"[multisubmit] START, {len(list_parameters)=}")
|
@@ -624,46 +621,28 @@ class BaseSlurmRunner(BaseRunner):
|
|
624
621
|
|
625
622
|
tot_tasks = len(list_parameters)
|
626
623
|
|
627
|
-
#
|
628
|
-
tasks_per_job
|
629
|
-
# Number of parallel components (always known)
|
630
|
-
tot_tasks=tot_tasks,
|
631
|
-
# Optional WorkflowTask attributes:
|
632
|
-
tasks_per_job=config.tasks_per_job,
|
633
|
-
parallel_tasks_per_job=config.parallel_tasks_per_job, # noqa
|
634
|
-
# Task requirements (multiple possible sources):
|
635
|
-
cpus_per_task=config.cpus_per_task,
|
636
|
-
mem_per_task=config.mem_per_task_MB,
|
637
|
-
# Fractal configuration variables (soft/hard limits):
|
638
|
-
target_cpus_per_job=config.target_cpus_per_job,
|
639
|
-
target_mem_per_job=config.target_mem_per_job,
|
640
|
-
target_num_jobs=config.target_num_jobs,
|
641
|
-
max_cpus_per_job=config.max_cpus_per_job,
|
642
|
-
max_mem_per_job=config.max_mem_per_job,
|
643
|
-
max_num_jobs=config.max_num_jobs,
|
644
|
-
)
|
645
|
-
config.parallel_tasks_per_job = parallel_tasks_per_job
|
646
|
-
config.tasks_per_job = tasks_per_job
|
624
|
+
# NOTE: chunking has already taken place in `get_slurm_config`,
|
625
|
+
# so that `config.tasks_per_job` is now set.
|
647
626
|
|
648
627
|
# Divide arguments in batches of `tasks_per_job` tasks each
|
649
628
|
args_batches = []
|
650
|
-
batch_size = tasks_per_job
|
629
|
+
batch_size = config.tasks_per_job
|
651
630
|
for ind_chunk in range(0, tot_tasks, batch_size):
|
652
631
|
args_batches.append(
|
653
632
|
list_parameters[ind_chunk : ind_chunk + batch_size] # noqa
|
654
633
|
)
|
655
|
-
if len(args_batches) != math.ceil(tot_tasks / tasks_per_job):
|
634
|
+
if len(args_batches) != math.ceil(tot_tasks / config.tasks_per_job):
|
656
635
|
raise RuntimeError("Something wrong here while batching tasks")
|
657
636
|
|
658
637
|
# Part 1/3: Iterate over chunks, prepare SlurmJob objects
|
659
638
|
logger.info("[multisubmit] Prepare `SlurmJob`s.")
|
660
639
|
jobs_to_submit = []
|
661
640
|
for ind_batch, chunk in enumerate(args_batches):
|
662
|
-
prefix
|
641
|
+
# Read prefix based on the first task of this batch
|
642
|
+
prefix = list_task_files[ind_batch * batch_size].prefix
|
663
643
|
tasks = []
|
664
644
|
for ind_chunk, parameters in enumerate(chunk):
|
665
645
|
index = (ind_batch * batch_size) + ind_chunk
|
666
|
-
list_task_files[index].prefix = prefix
|
667
646
|
tasks.append(
|
668
647
|
SlurmTask(
|
669
648
|
prefix=prefix,
|
@@ -676,7 +655,6 @@ class BaseSlurmRunner(BaseRunner):
|
|
676
655
|
task_files=list_task_files[index],
|
677
656
|
),
|
678
657
|
)
|
679
|
-
|
680
658
|
jobs_to_submit.append(
|
681
659
|
SlurmJob(
|
682
660
|
prefix=prefix,
|
@@ -697,21 +675,6 @@ class BaseSlurmRunner(BaseRunner):
|
|
697
675
|
slurm_config=config,
|
698
676
|
)
|
699
677
|
|
700
|
-
if task_type == "parallel":
|
701
|
-
# FIXME: replace loop with a `bulk_update_history_unit` function
|
702
|
-
for ind, task_files in enumerate(list_task_files):
|
703
|
-
update_logfile_of_history_unit(
|
704
|
-
history_unit_id=history_unit_ids[ind],
|
705
|
-
logfile=task_files.log_file_local,
|
706
|
-
)
|
707
|
-
else:
|
708
|
-
logger.debug(
|
709
|
-
f"Unclear what logfile to associate to {task_type=} "
|
710
|
-
"within multisubmit (see issue #2382)."
|
711
|
-
)
|
712
|
-
# FIXME: Improve definition for compound tasks
|
713
|
-
pass
|
714
|
-
|
715
678
|
logger.info(f"END submission phase, {self.job_ids=}")
|
716
679
|
|
717
680
|
# FIXME: replace this sleep with a more precise check
|
@@ -2,6 +2,7 @@ from pathlib import Path
|
|
2
2
|
from typing import Literal
|
3
3
|
from typing import Optional
|
4
4
|
|
5
|
+
from ._batching import heuristics
|
5
6
|
from ._slurm_config import _parse_mem_value
|
6
7
|
from ._slurm_config import load_slurm_config_file
|
7
8
|
from ._slurm_config import logger
|
@@ -10,7 +11,7 @@ from ._slurm_config import SlurmConfigError
|
|
10
11
|
from fractal_server.app.models.v2 import WorkflowTaskV2
|
11
12
|
|
12
13
|
|
13
|
-
def
|
14
|
+
def get_slurm_config_internal(
|
14
15
|
wftask: WorkflowTaskV2,
|
15
16
|
which_type: Literal["non_parallel", "parallel"],
|
16
17
|
config_path: Optional[Path] = None,
|
@@ -162,3 +163,39 @@ def get_slurm_config(
|
|
162
163
|
slurm_config = SlurmConfig(**slurm_dict)
|
163
164
|
|
164
165
|
return slurm_config
|
166
|
+
|
167
|
+
|
168
|
+
def get_slurm_config(
|
169
|
+
wftask: WorkflowTaskV2,
|
170
|
+
which_type: Literal["non_parallel", "parallel"],
|
171
|
+
config_path: Optional[Path] = None,
|
172
|
+
tot_tasks: int = 1,
|
173
|
+
) -> SlurmConfig:
|
174
|
+
config = get_slurm_config_internal(
|
175
|
+
wftask,
|
176
|
+
which_type,
|
177
|
+
config_path,
|
178
|
+
)
|
179
|
+
|
180
|
+
# Set/validate parameters for task batching
|
181
|
+
tasks_per_job, parallel_tasks_per_job = heuristics(
|
182
|
+
# Number of parallel components (always known)
|
183
|
+
tot_tasks=tot_tasks,
|
184
|
+
# Optional WorkflowTask attributes:
|
185
|
+
tasks_per_job=config.tasks_per_job,
|
186
|
+
parallel_tasks_per_job=config.parallel_tasks_per_job, # noqa
|
187
|
+
# Task requirements (multiple possible sources):
|
188
|
+
cpus_per_task=config.cpus_per_task,
|
189
|
+
mem_per_task=config.mem_per_task_MB,
|
190
|
+
# Fractal configuration variables (soft/hard limits):
|
191
|
+
target_cpus_per_job=config.target_cpus_per_job,
|
192
|
+
target_mem_per_job=config.target_mem_per_job,
|
193
|
+
target_num_jobs=config.target_num_jobs,
|
194
|
+
max_cpus_per_job=config.max_cpus_per_job,
|
195
|
+
max_mem_per_job=config.max_mem_per_job,
|
196
|
+
max_num_jobs=config.max_num_jobs,
|
197
|
+
)
|
198
|
+
config.parallel_tasks_per_job = parallel_tasks_per_job
|
199
|
+
config.tasks_per_job = tasks_per_job
|
200
|
+
|
201
|
+
return config
|
@@ -137,7 +137,7 @@ class SlurmSSHRunner(BaseSlurmRunner):
|
|
137
137
|
self.fractal_ssh.run_command(cmd=tar_command)
|
138
138
|
t_1_tar = time.perf_counter()
|
139
139
|
logger.info(
|
140
|
-
f"Remote archive {tarfile_path_remote} created"
|
140
|
+
f"[_fetch_artifacts] Remote archive {tarfile_path_remote} created"
|
141
141
|
f" - elapsed: {t_1_tar - t_0_tar:.3f} s"
|
142
142
|
)
|
143
143
|
|
@@ -149,7 +149,8 @@ class SlurmSSHRunner(BaseSlurmRunner):
|
|
149
149
|
)
|
150
150
|
t_1_get = time.perf_counter()
|
151
151
|
logger.info(
|
152
|
-
|
152
|
+
"[_fetch_artifacts] Subfolder archive transferred back "
|
153
|
+
f"to {tarfile_path_local}"
|
153
154
|
f" - elapsed: {t_1_get - t_0_get:.3f} s"
|
154
155
|
)
|
155
156
|
|
@@ -160,7 +161,7 @@ class SlurmSSHRunner(BaseSlurmRunner):
|
|
160
161
|
Path(tarfile_path_local).unlink(missing_ok=True)
|
161
162
|
|
162
163
|
t_1 = time.perf_counter()
|
163
|
-
logger.info(f"[
|
164
|
+
logger.info(f"[_fetch_artifacts] End - elapsed: {t_1 - t_0:.3f} s")
|
164
165
|
|
165
166
|
def _send_inputs(self, jobs: list[SlurmJob]) -> None:
|
166
167
|
"""
|
@@ -4,6 +4,7 @@ from typing import Union
|
|
4
4
|
|
5
5
|
from pydantic import BaseModel
|
6
6
|
|
7
|
+
from fractal_server.app.runner.components import _index_to_component
|
7
8
|
from fractal_server.string_tools import sanitize_string
|
8
9
|
|
9
10
|
SUBMIT_PREFIX = "non_par"
|
@@ -140,3 +141,31 @@ class TaskFiles(BaseModel):
|
|
140
141
|
metadiff_file_remote=self.metadiff_file_remote,
|
141
142
|
log_file_remote=self.log_file_remote,
|
142
143
|
)
|
144
|
+
|
145
|
+
|
146
|
+
def enrich_task_files_multisubmit(
|
147
|
+
*,
|
148
|
+
tot_tasks: int,
|
149
|
+
batch_size: int,
|
150
|
+
base_task_files: TaskFiles,
|
151
|
+
) -> list[TaskFiles]:
|
152
|
+
"""
|
153
|
+
Expand `TaskFiles` objects with `component` and `prefix`.
|
154
|
+
"""
|
155
|
+
|
156
|
+
new_list_task_files: list[TaskFiles] = []
|
157
|
+
for absolute_index in range(tot_tasks):
|
158
|
+
ind_batch = absolute_index // batch_size
|
159
|
+
new_list_task_files.append(
|
160
|
+
TaskFiles(
|
161
|
+
**base_task_files.model_dump(
|
162
|
+
exclude={
|
163
|
+
"component",
|
164
|
+
"prefix",
|
165
|
+
}
|
166
|
+
),
|
167
|
+
prefix=f"{MULTISUBMIT_PREFIX}-{ind_batch:06d}",
|
168
|
+
component=_index_to_component(absolute_index),
|
169
|
+
)
|
170
|
+
)
|
171
|
+
return new_list_task_files
|
@@ -4,7 +4,6 @@ from sqlalchemy.dialects.postgresql import insert as pg_insert
|
|
4
4
|
from sqlalchemy.orm import Session
|
5
5
|
from sqlmodel import update
|
6
6
|
|
7
|
-
from fractal_server.app.db import get_sync_db
|
8
7
|
from fractal_server.app.models.v2 import HistoryImageCache
|
9
8
|
from fractal_server.app.models.v2 import HistoryRun
|
10
9
|
from fractal_server.app.models.v2 import HistoryUnit
|
@@ -59,20 +58,6 @@ def bulk_update_status_of_history_unit(
|
|
59
58
|
db_sync.commit()
|
60
59
|
|
61
60
|
|
62
|
-
def update_logfile_of_history_unit(
|
63
|
-
*,
|
64
|
-
history_unit_id: int,
|
65
|
-
logfile: str,
|
66
|
-
) -> None:
|
67
|
-
with next(get_sync_db()) as db_sync:
|
68
|
-
unit = db_sync.get(HistoryUnit, history_unit_id)
|
69
|
-
if unit is None:
|
70
|
-
raise ValueError(f"HistoryUnit {history_unit_id} not found.")
|
71
|
-
unit.logfile = logfile
|
72
|
-
db_sync.merge(unit)
|
73
|
-
db_sync.commit()
|
74
|
-
|
75
|
-
|
76
61
|
def bulk_upsert_image_cache_fast(
|
77
62
|
*,
|
78
63
|
list_upsert_objects: list[dict[str, Any]],
|
@@ -128,10 +128,6 @@ def execute_tasks_v2(
|
|
128
128
|
db.commit()
|
129
129
|
db.refresh(history_run)
|
130
130
|
history_run_id = history_run.id
|
131
|
-
logger.debug(
|
132
|
-
f"Created {history_run_id=}, for "
|
133
|
-
f"{wftask.id=} and {dataset.id=}"
|
134
|
-
)
|
135
131
|
|
136
132
|
# TASK EXECUTION (V2)
|
137
133
|
if task.type in ["non_parallel", "converter_non_parallel"]:
|
@@ -205,9 +201,9 @@ def execute_tasks_v2(
|
|
205
201
|
# Update image list
|
206
202
|
num_new_images = 0
|
207
203
|
current_task_output.check_zarr_urls_are_unique()
|
208
|
-
#
|
209
|
-
#
|
210
|
-
#
|
204
|
+
# NOTE: In principle we could make the task-output processing more
|
205
|
+
# granular, and also associate output-processing failures to history
|
206
|
+
# status.
|
211
207
|
for image_obj in current_task_output.image_list_updates:
|
212
208
|
image = image_obj.model_dump()
|
213
209
|
# Edit existing image
|
@@ -346,7 +342,7 @@ def execute_tasks_v2(
|
|
346
342
|
)
|
347
343
|
|
348
344
|
db.commit()
|
349
|
-
db.close() #
|
345
|
+
db.close() # NOTE: this is needed, but the reason is unclear
|
350
346
|
|
351
347
|
# Create accounting record
|
352
348
|
record = AccountingRecord(
|
@@ -19,9 +19,13 @@ from fractal_server.app.db import get_sync_db
|
|
19
19
|
from fractal_server.app.models.v2 import HistoryUnit
|
20
20
|
from fractal_server.app.models.v2 import TaskV2
|
21
21
|
from fractal_server.app.models.v2 import WorkflowTaskV2
|
22
|
-
from fractal_server.app.runner.components import _index_to_component
|
23
22
|
from fractal_server.app.runner.executors.base_runner import BaseRunner
|
23
|
+
from fractal_server.app.runner.task_files import enrich_task_files_multisubmit
|
24
|
+
from fractal_server.app.runner.task_files import SUBMIT_PREFIX
|
24
25
|
from fractal_server.app.runner.task_files import TaskFiles
|
26
|
+
from fractal_server.app.runner.v2.db_tools import (
|
27
|
+
bulk_update_status_of_history_unit,
|
28
|
+
)
|
25
29
|
from fractal_server.app.runner.v2.db_tools import bulk_upsert_image_cache_fast
|
26
30
|
from fractal_server.app.runner.v2.task_interface import (
|
27
31
|
_cast_and_validate_InitTaskOutput,
|
@@ -128,6 +132,7 @@ def run_v2_task_non_parallel(
|
|
128
132
|
WorkflowTaskV2,
|
129
133
|
Literal["non_parallel", "parallel"],
|
130
134
|
Optional[Path],
|
135
|
+
int,
|
131
136
|
],
|
132
137
|
Any,
|
133
138
|
],
|
@@ -150,10 +155,14 @@ def run_v2_task_non_parallel(
|
|
150
155
|
root_dir_remote=workflow_dir_remote,
|
151
156
|
task_order=wftask.order,
|
152
157
|
task_name=wftask.task.name,
|
153
|
-
component=
|
158
|
+
component="",
|
159
|
+
prefix=SUBMIT_PREFIX,
|
154
160
|
)
|
155
161
|
|
156
|
-
runner_config = get_runner_config(
|
162
|
+
runner_config = get_runner_config(
|
163
|
+
wftask=wftask,
|
164
|
+
which_type="non_parallel",
|
165
|
+
)
|
157
166
|
|
158
167
|
function_kwargs = {
|
159
168
|
"zarr_dir": zarr_dir,
|
@@ -172,7 +181,7 @@ def run_v2_task_non_parallel(
|
|
172
181
|
history_unit = HistoryUnit(
|
173
182
|
history_run_id=history_run_id,
|
174
183
|
status=HistoryUnitStatus.SUBMITTED,
|
175
|
-
logfile=
|
184
|
+
logfile=task_files.log_file_local,
|
176
185
|
zarr_urls=zarr_urls,
|
177
186
|
)
|
178
187
|
db.add(history_unit)
|
@@ -232,6 +241,7 @@ def run_v2_task_parallel(
|
|
232
241
|
WorkflowTaskV2,
|
233
242
|
Literal["non_parallel", "parallel"],
|
234
243
|
Optional[Path],
|
244
|
+
int,
|
235
245
|
],
|
236
246
|
Any,
|
237
247
|
],
|
@@ -254,6 +264,7 @@ def run_v2_task_parallel(
|
|
254
264
|
runner_config = get_runner_config(
|
255
265
|
wftask=wftask,
|
256
266
|
which_type="parallel",
|
267
|
+
tot_tasks=len(images),
|
257
268
|
)
|
258
269
|
|
259
270
|
list_function_kwargs = [
|
@@ -263,19 +274,18 @@ def run_v2_task_parallel(
|
|
263
274
|
}
|
264
275
|
for image in images
|
265
276
|
]
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
]
|
277
|
+
|
278
|
+
list_task_files = enrich_task_files_multisubmit(
|
279
|
+
base_task_files=task_files,
|
280
|
+
tot_tasks=len(images),
|
281
|
+
batch_size=runner_config.batch_size,
|
282
|
+
)
|
273
283
|
|
274
284
|
history_units = [
|
275
285
|
HistoryUnit(
|
276
286
|
history_run_id=history_run_id,
|
277
287
|
status=HistoryUnitStatus.SUBMITTED,
|
278
|
-
logfile=
|
288
|
+
logfile=list_task_files[ind].log_file_local,
|
279
289
|
zarr_urls=[image["zarr_url"]],
|
280
290
|
)
|
281
291
|
for ind, image in enumerate(images)
|
@@ -337,14 +347,6 @@ def run_v2_task_parallel(
|
|
337
347
|
return outcome, num_tasks
|
338
348
|
|
339
349
|
|
340
|
-
# FIXME: THIS FOR CONVERTERS:
|
341
|
-
# if task_type in ["converter_non_parallel"]:
|
342
|
-
# run = db.get(HistoryRun, history_run_id)
|
343
|
-
# run.status = HistoryUnitStatus.DONE
|
344
|
-
# db.merge(run)
|
345
|
-
# db.commit()
|
346
|
-
|
347
|
-
|
348
350
|
def run_v2_task_compound(
|
349
351
|
*,
|
350
352
|
images: list[dict[str, Any]],
|
@@ -359,6 +361,7 @@ def run_v2_task_compound(
|
|
359
361
|
WorkflowTaskV2,
|
360
362
|
Literal["non_parallel", "parallel"],
|
361
363
|
Optional[Path],
|
364
|
+
int,
|
362
365
|
],
|
363
366
|
Any,
|
364
367
|
],
|
@@ -372,18 +375,14 @@ def run_v2_task_compound(
|
|
372
375
|
root_dir_remote=workflow_dir_remote,
|
373
376
|
task_order=wftask.order,
|
374
377
|
task_name=wftask.task.name,
|
375
|
-
component=
|
378
|
+
component="",
|
379
|
+
prefix=SUBMIT_PREFIX,
|
376
380
|
)
|
377
381
|
|
378
382
|
runner_config_init = get_runner_config(
|
379
383
|
wftask=wftask,
|
380
384
|
which_type="non_parallel",
|
381
385
|
)
|
382
|
-
runner_config_compute = get_runner_config(
|
383
|
-
wftask=wftask,
|
384
|
-
which_type="parallel",
|
385
|
-
)
|
386
|
-
|
387
386
|
# 3/A: non-parallel init task
|
388
387
|
function_kwargs = {
|
389
388
|
"zarr_dir": zarr_dir,
|
@@ -401,7 +400,7 @@ def run_v2_task_compound(
|
|
401
400
|
history_unit = HistoryUnit(
|
402
401
|
history_run_id=history_run_id,
|
403
402
|
status=HistoryUnitStatus.SUBMITTED,
|
404
|
-
logfile=
|
403
|
+
logfile=task_files_init.log_file_local,
|
405
404
|
zarr_urls=input_image_zarr_urls,
|
406
405
|
)
|
407
406
|
db.add(history_unit)
|
@@ -457,6 +456,14 @@ def run_v2_task_compound(
|
|
457
456
|
|
458
457
|
num_tasks = 1 + len(parallelization_list)
|
459
458
|
|
459
|
+
# Mark the init-task `HistoryUnit` as "done"
|
460
|
+
with next(get_sync_db()) as db:
|
461
|
+
update_status_of_history_unit(
|
462
|
+
history_unit_id=history_unit_id,
|
463
|
+
status=HistoryUnitStatus.DONE,
|
464
|
+
db_sync=db,
|
465
|
+
)
|
466
|
+
|
460
467
|
# 3/B: parallel part of a compound task
|
461
468
|
_check_parallelization_list_size(parallelization_list)
|
462
469
|
|
@@ -476,16 +483,23 @@ def run_v2_task_compound(
|
|
476
483
|
}
|
477
484
|
return init_outcome, num_tasks
|
478
485
|
|
479
|
-
|
480
|
-
|
486
|
+
runner_config_compute = get_runner_config(
|
487
|
+
wftask=wftask,
|
488
|
+
which_type="parallel",
|
489
|
+
tot_tasks=len(parallelization_list),
|
490
|
+
)
|
491
|
+
|
492
|
+
list_task_files = enrich_task_files_multisubmit(
|
493
|
+
base_task_files=TaskFiles(
|
481
494
|
root_dir_local=workflow_dir_local,
|
482
495
|
root_dir_remote=workflow_dir_remote,
|
483
496
|
task_order=wftask.order,
|
484
497
|
task_name=wftask.task.name,
|
485
|
-
|
486
|
-
)
|
487
|
-
|
488
|
-
|
498
|
+
),
|
499
|
+
tot_tasks=len(parallelization_list),
|
500
|
+
batch_size=runner_config_compute.batch_size,
|
501
|
+
)
|
502
|
+
|
489
503
|
list_function_kwargs = [
|
490
504
|
{
|
491
505
|
"zarr_url": parallelization_item.zarr_url,
|
@@ -495,6 +509,23 @@ def run_v2_task_compound(
|
|
495
509
|
for parallelization_item in parallelization_list
|
496
510
|
]
|
497
511
|
|
512
|
+
# Create one `HistoryUnit` per parallelization item
|
513
|
+
history_units = [
|
514
|
+
HistoryUnit(
|
515
|
+
history_run_id=history_run_id,
|
516
|
+
status=HistoryUnitStatus.SUBMITTED,
|
517
|
+
logfile=list_task_files[ind].log_file_local,
|
518
|
+
zarr_urls=[parallelization_item.zarr_url],
|
519
|
+
)
|
520
|
+
for ind, parallelization_item in enumerate(parallelization_list)
|
521
|
+
]
|
522
|
+
with next(get_sync_db()) as db:
|
523
|
+
db.add_all(history_units)
|
524
|
+
db.commit()
|
525
|
+
for history_unit in history_units:
|
526
|
+
db.refresh(history_unit)
|
527
|
+
history_unit_ids = [history_unit.id for history_unit in history_units]
|
528
|
+
|
498
529
|
results, exceptions = runner.multisubmit(
|
499
530
|
functools.partial(
|
500
531
|
run_single_task,
|
@@ -506,7 +537,7 @@ def run_v2_task_compound(
|
|
506
537
|
list_parameters=list_function_kwargs,
|
507
538
|
task_type=task_type,
|
508
539
|
list_task_files=list_task_files,
|
509
|
-
history_unit_ids=
|
540
|
+
history_unit_ids=history_unit_ids,
|
510
541
|
config=runner_config_compute,
|
511
542
|
)
|
512
543
|
|
@@ -526,18 +557,20 @@ def run_v2_task_compound(
|
|
526
557
|
exception=exceptions.get(ind, None),
|
527
558
|
)
|
528
559
|
|
529
|
-
#
|
530
|
-
# than
|
560
|
+
# NOTE: For compound tasks, we update `HistoryUnit.status` from here,
|
561
|
+
# rather than within the submit/multisubmit runner methods. This is
|
562
|
+
# to enforce the fact that either all units succeed or they all fail -
|
563
|
+
# at a difference with the parallel-task case.
|
531
564
|
with next(get_sync_db()) as db:
|
532
565
|
if failure:
|
533
|
-
|
534
|
-
|
566
|
+
bulk_update_status_of_history_unit(
|
567
|
+
history_unit_ids=history_unit_ids,
|
535
568
|
status=HistoryUnitStatus.FAILED,
|
536
569
|
db_sync=db,
|
537
570
|
)
|
538
571
|
else:
|
539
|
-
|
540
|
-
|
572
|
+
bulk_update_status_of_history_unit(
|
573
|
+
history_unit_ids=history_unit_ids,
|
541
574
|
status=HistoryUnitStatus.DONE,
|
542
575
|
db_sync=db,
|
543
576
|
)
|
@@ -0,0 +1,39 @@
|
|
1
|
+
"""Make HistoryUnit.logfile required
|
2
|
+
|
3
|
+
Revision ID: f37aceb45062
|
4
|
+
Revises: 9db60297b8b2
|
5
|
+
Create Date: 2025-04-14 13:49:40.910342
|
6
|
+
|
7
|
+
"""
|
8
|
+
import sqlalchemy as sa
|
9
|
+
from alembic import op
|
10
|
+
|
11
|
+
|
12
|
+
# revision identifiers, used by Alembic.
|
13
|
+
revision = "f37aceb45062"
|
14
|
+
down_revision = "9db60297b8b2"
|
15
|
+
branch_labels = None
|
16
|
+
depends_on = None
|
17
|
+
|
18
|
+
|
19
|
+
def upgrade() -> None:
|
20
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
21
|
+
with op.batch_alter_table("historyunit", schema=None) as batch_op:
|
22
|
+
batch_op.alter_column(
|
23
|
+
"logfile",
|
24
|
+
existing_type=sa.VARCHAR(),
|
25
|
+
nullable=False,
|
26
|
+
server_default="__LOGFILE_PLACEHOLDER__",
|
27
|
+
)
|
28
|
+
|
29
|
+
# ### end Alembic commands ###
|
30
|
+
|
31
|
+
|
32
|
+
def downgrade() -> None:
|
33
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
34
|
+
with op.batch_alter_table("historyunit", schema=None) as batch_op:
|
35
|
+
batch_op.alter_column(
|
36
|
+
"logfile", existing_type=sa.VARCHAR(), nullable=True
|
37
|
+
)
|
38
|
+
|
39
|
+
# ### end Alembic commands ###
|
@@ -1,4 +1,4 @@
|
|
1
|
-
fractal_server/__init__.py,sha256=
|
1
|
+
fractal_server/__init__.py,sha256=5TMj3rq5zJN-KKD4tuCsVrhEOdTr3z_6ei5PgKuP2uM,26
|
2
2
|
fractal_server/__main__.py,sha256=rkM8xjY1KeS3l63irB8yCrlVobR-73uDapC4wvrIlxI,6957
|
3
3
|
fractal_server/alembic.ini,sha256=MWwi7GzjzawI9cCAK1LW7NxIBQDUqD12-ptJoq5JpP0,3153
|
4
4
|
fractal_server/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -12,7 +12,7 @@ fractal_server/app/models/user_settings.py,sha256=Y-ZV-uZAFLZqXxy8c5_Qeh_F7zQuZD
|
|
12
12
|
fractal_server/app/models/v2/__init__.py,sha256=vjHwek7-IXmaZZL9VF0nD30YL9ca4wNc8P4RXJK_kDc,832
|
13
13
|
fractal_server/app/models/v2/accounting.py,sha256=f2ALxfKKBNxFLJTtC2-YqRepVK253x68y7zkD2V_Nls,1115
|
14
14
|
fractal_server/app/models/v2/dataset.py,sha256=Xa3YLmqvSChBJoqlSsjmt-5x0zC-6rSx2eafFnMukfo,1240
|
15
|
-
fractal_server/app/models/v2/history.py,sha256=
|
15
|
+
fractal_server/app/models/v2/history.py,sha256=u4i0NZko8eX5YKAk3MvVIIxU3owJ7D9tEPS_uJT9rrQ,2034
|
16
16
|
fractal_server/app/models/v2/job.py,sha256=JWrEjX_E4iRFr5MbmtV_aY28J-5D469awLr0rfa5Kig,2052
|
17
17
|
fractal_server/app/models/v2/project.py,sha256=rAHoh5KfYwIaW7rTX0_O0jvWmxEvfo1BafvmcXuSSRk,786
|
18
18
|
fractal_server/app/models/v2/task.py,sha256=8KEROaadgccXRZIP7EriBp2j1FgzYkgiirOi5_fG79M,1494
|
@@ -36,7 +36,7 @@ fractal_server/app/routes/api/v2/_aux_functions_history.py,sha256=ZlI6nwzB5r9AiY
|
|
36
36
|
fractal_server/app/routes/api/v2/_aux_functions_task_lifecycle.py,sha256=qdXCb6IP8-qPEAxGZKljtjIqNzIAyRaAsQSRi5VqFHM,6773
|
37
37
|
fractal_server/app/routes/api/v2/_aux_functions_tasks.py,sha256=uhNSs-jcS7ndIUFKiOC1yrDiViw3uvKEXi9UL04BMks,11642
|
38
38
|
fractal_server/app/routes/api/v2/dataset.py,sha256=h5AhE0sdhQ20ZlIbEJsFnHIOUW0S1VHFpoflpBkVScs,8936
|
39
|
-
fractal_server/app/routes/api/v2/history.py,sha256=
|
39
|
+
fractal_server/app/routes/api/v2/history.py,sha256=pDztvwQFOh3JChtSk9GIG3H17yg4G5pk1mq14qXF4Ck,17793
|
40
40
|
fractal_server/app/routes/api/v2/images.py,sha256=BGpO94gVd8BTpCN6Mun2RXmjrPmfkIp73m8RN7uiGW4,8361
|
41
41
|
fractal_server/app/routes/api/v2/job.py,sha256=MU1sHIKk_89WrD0TD44d4ufzqnywot7On_W71KjyUbQ,6500
|
42
42
|
fractal_server/app/routes/api/v2/project.py,sha256=uAZgATiHcOvbnRX-vv1D3HoaEUvLUd7vzVmGcqOP8ZY,4602
|
@@ -70,21 +70,21 @@ fractal_server/app/runner/components.py,sha256=-Ii5l8d_V6f5DFOd-Zsr8VYmOsyqw0Hox
|
|
70
70
|
fractal_server/app/runner/compress_folder.py,sha256=DX-4IYlSXlMd0EmXDD8M8FxisfKLbooSTrdNtzYAQAM,4876
|
71
71
|
fractal_server/app/runner/exceptions.py,sha256=JC5ufHyeA1hYD_rkZUscI30DD8D903ncag7Z3AArmUY,4215
|
72
72
|
fractal_server/app/runner/executors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
73
|
-
fractal_server/app/runner/executors/base_runner.py,sha256=
|
73
|
+
fractal_server/app/runner/executors/base_runner.py,sha256=knWOERUwRLhsd9eq5GwGxH2ZVsvPOZRRjQPGbiExqcU,5052
|
74
74
|
fractal_server/app/runner/executors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
75
|
-
fractal_server/app/runner/executors/local/get_local_config.py,sha256=
|
76
|
-
fractal_server/app/runner/executors/local/runner.py,sha256=
|
75
|
+
fractal_server/app/runner/executors/local/get_local_config.py,sha256=KiakXxOahaLgWvQJ1LVGYGXht6DMGR9x8Xu-TuT9aY4,3628
|
76
|
+
fractal_server/app/runner/executors/local/runner.py,sha256=5SVNWnCfj2D5hIw_KNf8VchC0czLhmfqmqdHM0kWsuY,7159
|
77
77
|
fractal_server/app/runner/executors/slurm_common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
78
78
|
fractal_server/app/runner/executors/slurm_common/_batching.py,sha256=ZY020JZlDS5mfpgpWTChQkyHU7iLE5kx2HVd57_C6XA,8850
|
79
79
|
fractal_server/app/runner/executors/slurm_common/_job_states.py,sha256=nuV-Zba38kDrRESOVB3gaGbrSPZc4q7YGichQaeqTW0,238
|
80
|
-
fractal_server/app/runner/executors/slurm_common/_slurm_config.py,sha256=
|
81
|
-
fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=
|
82
|
-
fractal_server/app/runner/executors/slurm_common/get_slurm_config.py,sha256
|
80
|
+
fractal_server/app/runner/executors/slurm_common/_slurm_config.py,sha256=qzWsMFUbcgxo2p5BltTlxDBLgGa6Z4gDKDdZioK3MB0,15979
|
81
|
+
fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=vF2lAUgO7vbK9pR1Jd2dFsimO45ccw2OeJTJ0z1YWwQ,30729
|
82
|
+
fractal_server/app/runner/executors/slurm_common/get_slurm_config.py,sha256=BW6fDpPyB0VH5leVxvwzkVH3r3hC7DuSyoWmRzHITWg,7305
|
83
83
|
fractal_server/app/runner/executors/slurm_common/remote.py,sha256=FS_F8EaPp-A5eQT5_ZH3ICCHt0-C8b_2OSYcyRkXnb4,5851
|
84
84
|
fractal_server/app/runner/executors/slurm_common/slurm_job_task_models.py,sha256=RoxHLKOn0_wGjnY0Sv0a9nDSiqxYZHKRoMkT3p9_G1E,3607
|
85
85
|
fractal_server/app/runner/executors/slurm_common/utils_executors.py,sha256=naPyJI0I3lD-sYHbSXbMFGUBK4h_SggA5V91Z1Ch1Xg,1416
|
86
86
|
fractal_server/app/runner/executors/slurm_ssh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
87
|
-
fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=
|
87
|
+
fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=ULQYW8A12BwC4GK2_2RhS33DFOFJewZoxS6vn_80z8c,7187
|
88
88
|
fractal_server/app/runner/executors/slurm_sudo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
89
89
|
fractal_server/app/runner/executors/slurm_sudo/_subprocess_run_as_user.py,sha256=O1bNg1DiSDJmQE0RmOk2Ii47DagiXp5ryd0R6KxO2OM,3177
|
90
90
|
fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=WGGVHX_juqyC6OVhln9yg-YKjLiuAoWZhAGxBjhNkWw,5873
|
@@ -93,16 +93,16 @@ fractal_server/app/runner/filenames.py,sha256=lPnxKHtdRizr6FqG3zOdjDPyWA7GoaJGTt
|
|
93
93
|
fractal_server/app/runner/run_subprocess.py,sha256=c3JbYXq3hX2aaflQU19qJ5Xs6J6oXGNvnTEoAfv2bxc,959
|
94
94
|
fractal_server/app/runner/set_start_and_last_task_index.py,sha256=-q4zVybAj8ek2XlbENKlfOAJ39hT_zoJoZkqzDqiAMY,1254
|
95
95
|
fractal_server/app/runner/shutdown.py,sha256=9pfSKHDNdIcm0eY-opgRTi7y0HmvfPmYiu9JR6Idark,2082
|
96
|
-
fractal_server/app/runner/task_files.py,sha256=
|
96
|
+
fractal_server/app/runner/task_files.py,sha256=27xFuPzSJc1Pw912CfSMPOhOIpvNwpkyLCnycqdo9lw,4365
|
97
97
|
fractal_server/app/runner/v2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
98
98
|
fractal_server/app/runner/v2/_local.py,sha256=DK8yagbvd6HHjcDVhUzTy0f7MURlTkQha-NM6OZKgJc,3044
|
99
99
|
fractal_server/app/runner/v2/_slurm_ssh.py,sha256=_bytOf8z9sdrhI03D6eqg-aQPnJ7V2-qnqpcHAYizns,3278
|
100
100
|
fractal_server/app/runner/v2/_slurm_sudo.py,sha256=DBCNxifXmMkpu71Wnk5u9-wKT7PV1WROQuY_4DYoZRI,2993
|
101
|
-
fractal_server/app/runner/v2/db_tools.py,sha256=
|
101
|
+
fractal_server/app/runner/v2/db_tools.py,sha256=Ots6-Da7A_5yetSYrUGi-_yV-2r21Nc6XUBK3bv2mTM,2967
|
102
102
|
fractal_server/app/runner/v2/deduplicate_list.py,sha256=IVTE4abBU1bUprFTkxrTfYKnvkNTanWQ-KWh_etiT08,645
|
103
103
|
fractal_server/app/runner/v2/merge_outputs.py,sha256=D1L4Taieq9i71SPQyNc1kMokgHh-sV_MqF3bv7QMDBc,907
|
104
|
-
fractal_server/app/runner/v2/runner.py,sha256=
|
105
|
-
fractal_server/app/runner/v2/runner_functions.py,sha256=
|
104
|
+
fractal_server/app/runner/v2/runner.py,sha256=sbBOH5gCErxK0fCPPGBWtLtqsSwtmrhTth5OLUGMeZQ,15658
|
105
|
+
fractal_server/app/runner/v2/runner_functions.py,sha256=gi5M_JlFMV3DP6ismF7eObs2cTglAcVdgsRDKSAQRc8,17632
|
106
106
|
fractal_server/app/runner/v2/runner_functions_low_level.py,sha256=9t1CHN3EyfsGRWfG257YPY5WjQ6zuztsw_KZrpEAFPo,3703
|
107
107
|
fractal_server/app/runner/v2/submit_workflow.py,sha256=EDUyUuIPwZHb2zm7SCRRoFsGq2cN-b5OKw6CYkZ8kWk,13048
|
108
108
|
fractal_server/app/runner/v2/task_interface.py,sha256=IXdQTI8rXFgXv1Ez0js4CjKFf3QwO2GCHRTuwiFtiTQ,2891
|
@@ -173,6 +173,7 @@ fractal_server/migrations/versions/db09233ad13a_split_filters_and_keep_old_colum
|
|
173
173
|
fractal_server/migrations/versions/e75cac726012_make_applyworkflow_start_timestamp_not_.py,sha256=lOggSvzGWqQvnxxFuSM6W50Ui49R918A-uBuiZJ0pNM,963
|
174
174
|
fractal_server/migrations/versions/e81103413827_add_job_type_filters.py,sha256=t4ImlKNHx5JMgBL2sTpLWunv1gwY8OCFOKd3G338mdE,890
|
175
175
|
fractal_server/migrations/versions/efa89c30e0a4_add_project_timestamp_created.py,sha256=jilQW3QIqYQ4Q6hCnUiG7UtNMpA41ujqrB3tPFiPM1Q,1221
|
176
|
+
fractal_server/migrations/versions/f37aceb45062_make_historyunit_logfile_required.py,sha256=jLHcVq9z0Ou20u-mwPf6EICDKY4dwFAzBgbRRx9_xDw,1007
|
176
177
|
fractal_server/migrations/versions/f384e1c0cf5d_drop_task_default_args_columns.py,sha256=9BwqUS9Gf7UW_KjrzHbtViC880qhD452KAytkHWWZyk,746
|
177
178
|
fractal_server/migrations/versions/fbce16ff4e47_new_history_items.py,sha256=TDWCaIoM0Q4SpRWmR9zr_rdp3lJXhCfBPTMhtrP5xYE,3950
|
178
179
|
fractal_server/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -207,8 +208,8 @@ fractal_server/tasks/v2/utils_templates.py,sha256=Kc_nSzdlV6KIsO0CQSPs1w70zLyENP
|
|
207
208
|
fractal_server/urls.py,sha256=QjIKAC1a46bCdiPMu3AlpgFbcv6a4l3ABcd5xz190Og,471
|
208
209
|
fractal_server/utils.py,sha256=PMwrxWFxRTQRl1b9h-NRIbFGPKqpH_hXnkAT3NfZdpY,3571
|
209
210
|
fractal_server/zip_tools.py,sha256=GjDgo_sf6V_DDg6wWeBlZu5zypIxycn_l257p_YVKGc,4876
|
210
|
-
fractal_server-2.14.
|
211
|
-
fractal_server-2.14.
|
212
|
-
fractal_server-2.14.
|
213
|
-
fractal_server-2.14.
|
214
|
-
fractal_server-2.14.
|
211
|
+
fractal_server-2.14.0a26.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
|
212
|
+
fractal_server-2.14.0a26.dist-info/METADATA,sha256=5sAI97ML9ViHIX8EvykTdZBa8B8PSri-pjDXzsYG4gM,4563
|
213
|
+
fractal_server-2.14.0a26.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
|
214
|
+
fractal_server-2.14.0a26.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
|
215
|
+
fractal_server-2.14.0a26.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|