fractal-server 2.14.0a15__py3-none-any.whl → 2.14.0a17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/app/runner/executors/local/runner.py +31 -5
- fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py +116 -31
- fractal_server/app/runner/executors/slurm_sudo/runner.py +2 -2
- fractal_server/app/runner/v2/db_tools.py +15 -1
- fractal_server/app/runner/v2/runner_functions.py +3 -4
- {fractal_server-2.14.0a15.dist-info → fractal_server-2.14.0a17.dist-info}/METADATA +1 -1
- {fractal_server-2.14.0a15.dist-info → fractal_server-2.14.0a17.dist-info}/RECORD +11 -11
- {fractal_server-2.14.0a15.dist-info → fractal_server-2.14.0a17.dist-info}/LICENSE +0 -0
- {fractal_server-2.14.0a15.dist-info → fractal_server-2.14.0a17.dist-info}/WHEEL +0 -0
- {fractal_server-2.14.0a15.dist-info → fractal_server-2.14.0a17.dist-info}/entry_points.txt +0 -0
fractal_server/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__VERSION__ = "2.14.
|
1
|
+
__VERSION__ = "2.14.0a17"
|
@@ -11,6 +11,9 @@ from fractal_server.app.runner.executors.base_runner import BaseRunner
|
|
11
11
|
from fractal_server.app.runner.task_files import MULTISUBMIT_PREFIX
|
12
12
|
from fractal_server.app.runner.task_files import SUBMIT_PREFIX
|
13
13
|
from fractal_server.app.runner.task_files import TaskFiles
|
14
|
+
from fractal_server.app.runner.v2.db_tools import (
|
15
|
+
update_logfile_of_history_unit,
|
16
|
+
)
|
14
17
|
from fractal_server.app.runner.v2.db_tools import update_status_of_history_unit
|
15
18
|
from fractal_server.app.schemas.v2 import HistoryUnitStatus
|
16
19
|
from fractal_server.logger import set_logger
|
@@ -69,7 +72,12 @@ class LocalRunner(BaseRunner):
|
|
69
72
|
workdir_local = task_files.wftask_subfolder_local
|
70
73
|
workdir_local.mkdir()
|
71
74
|
|
75
|
+
# Add prefix to task_files object
|
72
76
|
task_files.prefix = SUBMIT_PREFIX
|
77
|
+
update_logfile_of_history_unit(
|
78
|
+
history_unit_id=history_unit_id,
|
79
|
+
logfile=task_files.log_file_local,
|
80
|
+
)
|
73
81
|
|
74
82
|
# SUBMISSION PHASE
|
75
83
|
future = self.executor.submit(
|
@@ -155,17 +163,35 @@ class LocalRunner(BaseRunner):
|
|
155
163
|
active_futures: dict[int, Future] = {}
|
156
164
|
for ind_within_chunk, kwargs in enumerate(list_parameters_chunk):
|
157
165
|
positional_index = ind_chunk + ind_within_chunk
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
)
|
166
|
+
list_task_files[
|
167
|
+
positional_index
|
168
|
+
].prefix = f"{MULTISUBMIT_PREFIX}-{positional_index:06d}"
|
162
169
|
future = self.executor.submit(
|
163
170
|
func,
|
164
171
|
parameters=kwargs,
|
165
|
-
remote_files=
|
172
|
+
remote_files=list_task_files[
|
173
|
+
positional_index
|
174
|
+
].remote_files_dict,
|
166
175
|
)
|
167
176
|
active_futures[positional_index] = future
|
168
177
|
|
178
|
+
if task_type == "parallel":
|
179
|
+
# FIXME: replace loop with a `bulk_update_history_unit`
|
180
|
+
# function
|
181
|
+
update_logfile_of_history_unit(
|
182
|
+
history_unit_id=history_unit_ids[positional_index],
|
183
|
+
logfile=list_task_files[
|
184
|
+
positional_index
|
185
|
+
].log_file_local,
|
186
|
+
)
|
187
|
+
else:
|
188
|
+
logger.debug(
|
189
|
+
f"Unclear what logfile to associate to {task_type=} "
|
190
|
+
"within multisubmit (see issue #2382)."
|
191
|
+
)
|
192
|
+
# FIXME: Improve definition for compound tasks
|
193
|
+
pass
|
194
|
+
|
169
195
|
while active_futures:
|
170
196
|
# FIXME: add shutdown detection
|
171
197
|
# if file exists: cancel all futures, and raise
|
@@ -23,12 +23,18 @@ from fractal_server.app.runner.filenames import SHUTDOWN_FILENAME
|
|
23
23
|
from fractal_server.app.runner.task_files import MULTISUBMIT_PREFIX
|
24
24
|
from fractal_server.app.runner.task_files import SUBMIT_PREFIX
|
25
25
|
from fractal_server.app.runner.task_files import TaskFiles
|
26
|
+
from fractal_server.app.runner.v2.db_tools import (
|
27
|
+
update_logfile_of_history_unit,
|
28
|
+
)
|
26
29
|
from fractal_server.app.runner.v2.db_tools import update_status_of_history_unit
|
27
30
|
from fractal_server.app.schemas.v2 import HistoryUnitStatus
|
28
31
|
from fractal_server.config import get_settings
|
29
32
|
from fractal_server.logger import set_logger
|
30
33
|
from fractal_server.syringe import Inject
|
31
34
|
|
35
|
+
SHUTDOWN_ERROR_MESSAGE = "Failed due to job-execution shutdown."
|
36
|
+
SHUTDOWN_EXCEPTION = JobExecutionError(SHUTDOWN_ERROR_MESSAGE)
|
37
|
+
|
32
38
|
logger = set_logger(__name__)
|
33
39
|
|
34
40
|
# FIXME: Transform several logger.info into logger.debug.
|
@@ -91,6 +97,7 @@ class BaseSlurmRunner(BaseRunner):
|
|
91
97
|
|
92
98
|
def run_squeue(self, job_ids: list[str]) -> tuple[bool, str]:
|
93
99
|
# FIXME: review different cases (exception vs no job found)
|
100
|
+
# FIXME: Fail for empty list
|
94
101
|
job_id_single_str = ",".join([str(j) for j in job_ids])
|
95
102
|
cmd = (
|
96
103
|
f"squeue --noheader --format='%i %T' --jobs {job_id_single_str}"
|
@@ -225,9 +232,11 @@ class BaseSlurmRunner(BaseRunner):
|
|
225
232
|
logger.info(script_lines)
|
226
233
|
|
227
234
|
# Always print output of `uname -n` and `pwd`
|
228
|
-
script_lines.append("Hostname: $(uname -n)
|
229
|
-
script_lines.append("Current directory : $(pwd)
|
230
|
-
script_lines.append(
|
235
|
+
script_lines.append('\necho "Hostname: $(uname -n)"')
|
236
|
+
script_lines.append('echo "Current directory : $(pwd)"')
|
237
|
+
script_lines.append(
|
238
|
+
'echo "Start time: $(date +"%Y-%m-%dT%H:%M:%S%z")"'
|
239
|
+
)
|
231
240
|
|
232
241
|
# Complete script preamble
|
233
242
|
script_lines.append("\n")
|
@@ -242,7 +251,9 @@ class BaseSlurmRunner(BaseRunner):
|
|
242
251
|
)
|
243
252
|
script_lines.append("wait\n")
|
244
253
|
script = "\n".join(script_lines)
|
245
|
-
script_lines.append(
|
254
|
+
script_lines.append(
|
255
|
+
'echo "End time: $(date +"%Y-%m-%dT%H:%M:%S%z")"'
|
256
|
+
)
|
246
257
|
|
247
258
|
# Write submission script
|
248
259
|
with open(slurm_job.slurm_submission_script_local, "w") as f:
|
@@ -332,7 +343,10 @@ class BaseSlurmRunner(BaseRunner):
|
|
332
343
|
pass
|
333
344
|
|
334
345
|
def _postprocess_single_task(
|
335
|
-
self,
|
346
|
+
self,
|
347
|
+
*,
|
348
|
+
task: SlurmTask,
|
349
|
+
was_job_scancelled: bool = False,
|
336
350
|
) -> tuple[Any, Exception]:
|
337
351
|
try:
|
338
352
|
with open(task.output_pickle_file_local, "rb") as f:
|
@@ -344,17 +358,24 @@ class BaseSlurmRunner(BaseRunner):
|
|
344
358
|
else:
|
345
359
|
exception = _handle_exception_proxy(output)
|
346
360
|
return None, exception
|
361
|
+
|
347
362
|
except Exception as e:
|
348
363
|
exception = JobExecutionError(f"ERROR, {str(e)}")
|
364
|
+
# If job was scancelled and task failed, replace
|
365
|
+
# exception with a shutdown-related one.
|
366
|
+
if was_job_scancelled:
|
367
|
+
logger.debug(
|
368
|
+
"Replacing exception with a shutdown-related one, "
|
369
|
+
f"for {task.index=}."
|
370
|
+
)
|
371
|
+
exception = SHUTDOWN_EXCEPTION
|
372
|
+
|
349
373
|
return None, exception
|
350
374
|
finally:
|
351
|
-
|
352
|
-
|
353
|
-
# Path(task.input_pickle_file_local).unlink(missing_ok=True)
|
354
|
-
# Path(task.output_pickle_file_local).unlink(missing_ok=True)
|
375
|
+
Path(task.input_pickle_file_local).unlink(missing_ok=True)
|
376
|
+
Path(task.output_pickle_file_local).unlink(missing_ok=True)
|
355
377
|
|
356
378
|
def is_shutdown(self) -> bool:
|
357
|
-
# FIXME: shutdown is not implemented
|
358
379
|
return self.shutdown_file.exists()
|
359
380
|
|
360
381
|
@property
|
@@ -385,7 +406,14 @@ class BaseSlurmRunner(BaseRunner):
|
|
385
406
|
raise JobExecutionError("Unexpected branch: jobs should be empty.")
|
386
407
|
|
387
408
|
if self.is_shutdown():
|
388
|
-
|
409
|
+
with next(get_sync_db()) as db:
|
410
|
+
update_status_of_history_unit(
|
411
|
+
history_unit_id=history_unit_id,
|
412
|
+
status=HistoryUnitStatus.FAILED,
|
413
|
+
db_sync=db,
|
414
|
+
)
|
415
|
+
|
416
|
+
return None, SHUTDOWN_EXCEPTION
|
389
417
|
|
390
418
|
# Validation phase
|
391
419
|
self.validate_submit_parameters(
|
@@ -401,6 +429,10 @@ class BaseSlurmRunner(BaseRunner):
|
|
401
429
|
|
402
430
|
# Add prefix to task_files object
|
403
431
|
task_files.prefix = SUBMIT_PREFIX
|
432
|
+
update_logfile_of_history_unit(
|
433
|
+
history_unit_id=history_unit_id,
|
434
|
+
logfile=task_files.log_file_local,
|
435
|
+
)
|
404
436
|
|
405
437
|
# Submission phase
|
406
438
|
slurm_job = SlurmJob(
|
@@ -437,19 +469,29 @@ class BaseSlurmRunner(BaseRunner):
|
|
437
469
|
# Retrieval phase
|
438
470
|
logger.info("[submit] START retrieval phase")
|
439
471
|
while len(self.jobs) > 0:
|
472
|
+
|
473
|
+
# Handle shutdown
|
474
|
+
scancelled_job_ids = []
|
440
475
|
if self.is_shutdown():
|
441
|
-
|
476
|
+
logger.info("[submit] Shutdown file detected")
|
477
|
+
scancelled_job_ids = self.scancel_jobs()
|
478
|
+
logger.info(f"[submit] {scancelled_job_ids=}")
|
479
|
+
|
480
|
+
# Look for finished jobs
|
442
481
|
finished_job_ids = self._get_finished_jobs(job_ids=self.job_ids)
|
443
|
-
logger.
|
482
|
+
logger.debug(f"[submit] {finished_job_ids=}")
|
483
|
+
|
444
484
|
with next(get_sync_db()) as db:
|
445
485
|
for slurm_job_id in finished_job_ids:
|
446
|
-
logger.
|
486
|
+
logger.debug(f"[submit] Now process {slurm_job_id=}")
|
447
487
|
slurm_job = self.jobs.pop(slurm_job_id)
|
448
|
-
|
449
488
|
self._copy_files_from_remote_to_local(slurm_job)
|
489
|
+
was_job_scancelled = slurm_job_id in scancelled_job_ids
|
450
490
|
result, exception = self._postprocess_single_task(
|
451
|
-
task=slurm_job.tasks[0]
|
491
|
+
task=slurm_job.tasks[0],
|
492
|
+
was_job_scancelled=was_job_scancelled,
|
452
493
|
)
|
494
|
+
|
453
495
|
if exception is not None:
|
454
496
|
update_status_of_history_unit(
|
455
497
|
history_unit_id=history_unit_id,
|
@@ -477,13 +519,29 @@ class BaseSlurmRunner(BaseRunner):
|
|
477
519
|
list_task_files: list[TaskFiles],
|
478
520
|
task_type: Literal["parallel", "compound", "converter_compound"],
|
479
521
|
config: SlurmConfig,
|
480
|
-
):
|
522
|
+
) -> tuple[dict[int, Any], dict[int, BaseException]]:
|
481
523
|
|
482
524
|
if len(self.jobs) > 0:
|
483
525
|
raise RuntimeError(
|
484
|
-
f"Cannot run
|
526
|
+
f"Cannot run `multisubmit` when {len(self.jobs)=}"
|
485
527
|
)
|
486
528
|
|
529
|
+
if self.is_shutdown():
|
530
|
+
if task_type == "parallel":
|
531
|
+
with next(get_sync_db()) as db:
|
532
|
+
# FIXME: Replace with bulk function
|
533
|
+
for history_unit_id in history_unit_ids:
|
534
|
+
update_status_of_history_unit(
|
535
|
+
history_unit_id=history_unit_id,
|
536
|
+
status=HistoryUnitStatus.FAILED,
|
537
|
+
db_sync=db,
|
538
|
+
)
|
539
|
+
results = {}
|
540
|
+
exceptions = {
|
541
|
+
ind: SHUTDOWN_EXCEPTION for ind in range(len(list_parameters))
|
542
|
+
}
|
543
|
+
return results, exceptions
|
544
|
+
|
487
545
|
self.validate_multisubmit_parameters(
|
488
546
|
list_parameters=list_parameters,
|
489
547
|
task_type=task_type,
|
@@ -550,18 +608,17 @@ class BaseSlurmRunner(BaseRunner):
|
|
550
608
|
tasks = []
|
551
609
|
for ind_chunk, parameters in enumerate(chunk):
|
552
610
|
index = (ind_batch * batch_size) + ind_chunk
|
553
|
-
|
554
|
-
current_task_files.prefix = prefix
|
611
|
+
list_task_files[index].prefix = prefix
|
555
612
|
tasks.append(
|
556
613
|
SlurmTask(
|
557
614
|
prefix=prefix,
|
558
615
|
index=index,
|
559
|
-
component=
|
616
|
+
component=list_task_files[index].component,
|
560
617
|
workdir_local=workdir_local,
|
561
618
|
workdir_remote=workdir_remote,
|
562
619
|
parameters=parameters,
|
563
620
|
zarr_url=parameters["zarr_url"],
|
564
|
-
task_files=
|
621
|
+
task_files=list_task_files[index],
|
565
622
|
),
|
566
623
|
)
|
567
624
|
|
@@ -576,6 +633,21 @@ class BaseSlurmRunner(BaseRunner):
|
|
576
633
|
slurm_job=slurm_job,
|
577
634
|
slurm_config=config,
|
578
635
|
)
|
636
|
+
if task_type == "parallel":
|
637
|
+
# FIXME: replace loop with a `bulk_update_history_unit` function
|
638
|
+
for ind, task_files in enumerate(list_task_files):
|
639
|
+
update_logfile_of_history_unit(
|
640
|
+
history_unit_id=history_unit_ids[ind],
|
641
|
+
logfile=task_files.log_file_local,
|
642
|
+
)
|
643
|
+
else:
|
644
|
+
logger.debug(
|
645
|
+
f"Unclear what logfile to associate to {task_type=} "
|
646
|
+
"within multisubmit (see issue #2382)."
|
647
|
+
)
|
648
|
+
# FIXME: Improve definition for compound tasks
|
649
|
+
pass
|
650
|
+
|
579
651
|
logger.info(f"END submission phase, {self.job_ids=}")
|
580
652
|
|
581
653
|
# FIXME: replace this sleep a more precise check
|
@@ -584,22 +656,34 @@ class BaseSlurmRunner(BaseRunner):
|
|
584
656
|
logger.warning(f"[submit] Now sleep {sleep_time} (FIXME)")
|
585
657
|
time.sleep(sleep_time)
|
586
658
|
|
659
|
+
# FIXME: Could we merge the submit/multisubmit retrieval phases?
|
660
|
+
|
587
661
|
# Retrieval phase
|
588
|
-
logger.info("START retrieval phase")
|
662
|
+
logger.info("[multisubmit] START retrieval phase")
|
589
663
|
while len(self.jobs) > 0:
|
664
|
+
|
665
|
+
# Handle shutdown
|
666
|
+
scancelled_job_ids = []
|
590
667
|
if self.is_shutdown():
|
591
|
-
|
668
|
+
logger.info("[multisubmit] Shutdown file detected")
|
669
|
+
scancelled_job_ids = self.scancel_jobs()
|
670
|
+
logger.info(f"[multisubmit] {scancelled_job_ids=}")
|
671
|
+
|
672
|
+
# Look for finished jobs
|
592
673
|
finished_job_ids = self._get_finished_jobs(job_ids=self.job_ids)
|
593
|
-
logger.
|
674
|
+
logger.debug(f"[multisubmit] {finished_job_ids=}")
|
675
|
+
|
594
676
|
with next(get_sync_db()) as db:
|
595
677
|
for slurm_job_id in finished_job_ids:
|
596
|
-
logger.info(f"Now
|
678
|
+
logger.info(f"[multisubmit] Now process {slurm_job_id=}")
|
597
679
|
slurm_job = self.jobs.pop(slurm_job_id)
|
598
680
|
self._copy_files_from_remote_to_local(slurm_job)
|
599
681
|
for task in slurm_job.tasks:
|
600
|
-
logger.info(f"Now
|
682
|
+
logger.info(f"[multisubmit] Now process {task.index=}")
|
683
|
+
was_job_scancelled = slurm_job_id in scancelled_job_ids
|
601
684
|
result, exception = self._postprocess_single_task(
|
602
|
-
task=task
|
685
|
+
task=task,
|
686
|
+
was_job_scancelled=was_job_scancelled,
|
603
687
|
)
|
604
688
|
|
605
689
|
# Note: the relevant done/failed check is based on
|
@@ -659,11 +743,12 @@ class BaseSlurmRunner(BaseRunner):
|
|
659
743
|
logger.error(error_msg)
|
660
744
|
raise RuntimeError(error_msg)
|
661
745
|
|
662
|
-
def scancel_jobs(self) ->
|
746
|
+
def scancel_jobs(self) -> list[str]:
|
663
747
|
logger.info("[scancel_jobs] START")
|
664
748
|
|
665
749
|
if self.jobs:
|
666
|
-
|
750
|
+
scancelled_job_ids = self.job_ids
|
751
|
+
scancel_string = " ".join(scancelled_job_ids)
|
667
752
|
scancel_cmd = f"scancel {scancel_string}"
|
668
753
|
logger.warning(f"Now scancel-ing SLURM jobs {scancel_string}")
|
669
754
|
try:
|
@@ -673,5 +758,5 @@ class BaseSlurmRunner(BaseRunner):
|
|
673
758
|
"[scancel_jobs] `scancel` command failed. "
|
674
759
|
f"Original error:\n{str(e)}"
|
675
760
|
)
|
676
|
-
|
677
761
|
logger.info("[scancel_jobs] END")
|
762
|
+
return scancelled_job_ids
|
@@ -140,7 +140,7 @@ class SudoSlurmRunner(BaseSlurmRunner):
|
|
140
140
|
f"Original error: {str(e)}"
|
141
141
|
)
|
142
142
|
|
143
|
-
def _run_remote_cmd(self, cmd: str):
|
143
|
+
def _run_remote_cmd(self, cmd: str) -> str:
|
144
144
|
res = _run_command_as_user(
|
145
145
|
cmd=cmd,
|
146
146
|
user=self.slurm_user,
|
@@ -149,6 +149,6 @@ class SudoSlurmRunner(BaseSlurmRunner):
|
|
149
149
|
)
|
150
150
|
return res.stdout
|
151
151
|
|
152
|
-
def _run_local_cmd(self, cmd: str):
|
152
|
+
def _run_local_cmd(self, cmd: str) -> str:
|
153
153
|
res = _subprocess_run_or_raise(cmd)
|
154
154
|
return res.stdout
|
@@ -3,11 +3,14 @@ from typing import Any
|
|
3
3
|
from sqlalchemy.dialects.postgresql import insert as pg_insert
|
4
4
|
from sqlalchemy.orm import Session
|
5
5
|
|
6
|
+
from fractal_server.app.db import get_sync_db
|
6
7
|
from fractal_server.app.models.v2 import HistoryImageCache
|
7
8
|
from fractal_server.app.models.v2 import HistoryRun
|
8
9
|
from fractal_server.app.models.v2 import HistoryUnit
|
9
10
|
from fractal_server.app.schemas.v2 import HistoryUnitStatus
|
10
11
|
|
12
|
+
_CHUNK_SIZE = 2_000
|
13
|
+
|
11
14
|
|
12
15
|
def update_status_of_history_run(
|
13
16
|
*,
|
@@ -37,7 +40,18 @@ def update_status_of_history_unit(
|
|
37
40
|
db_sync.commit()
|
38
41
|
|
39
42
|
|
40
|
-
|
43
|
+
def update_logfile_of_history_unit(
|
44
|
+
*,
|
45
|
+
history_unit_id: int,
|
46
|
+
logfile: str,
|
47
|
+
) -> None:
|
48
|
+
with next(get_sync_db()) as db_sync:
|
49
|
+
unit = db_sync.get(HistoryUnit, history_unit_id)
|
50
|
+
if unit is None:
|
51
|
+
raise ValueError(f"HistoryUnit {history_unit_id} not found.")
|
52
|
+
unit.logfile = logfile
|
53
|
+
db_sync.merge(unit)
|
54
|
+
db_sync.commit()
|
41
55
|
|
42
56
|
|
43
57
|
def bulk_upsert_image_cache_fast(
|
@@ -172,7 +172,7 @@ def run_v2_task_non_parallel(
|
|
172
172
|
history_unit = HistoryUnit(
|
173
173
|
history_run_id=history_run_id,
|
174
174
|
status=HistoryUnitStatus.SUBMITTED,
|
175
|
-
logfile=
|
175
|
+
logfile=None,
|
176
176
|
zarr_urls=zarr_urls,
|
177
177
|
)
|
178
178
|
db.add(history_unit)
|
@@ -275,7 +275,7 @@ def run_v2_task_parallel(
|
|
275
275
|
HistoryUnit(
|
276
276
|
history_run_id=history_run_id,
|
277
277
|
status=HistoryUnitStatus.SUBMITTED,
|
278
|
-
logfile=
|
278
|
+
logfile=None,
|
279
279
|
zarr_urls=[image["zarr_url"]],
|
280
280
|
)
|
281
281
|
for ind, image in enumerate(images)
|
@@ -401,8 +401,7 @@ def run_v2_task_compound(
|
|
401
401
|
history_unit = HistoryUnit(
|
402
402
|
history_run_id=history_run_id,
|
403
403
|
status=HistoryUnitStatus.SUBMITTED,
|
404
|
-
|
405
|
-
logfile=task_files_init.log_file_local,
|
404
|
+
logfile=None,
|
406
405
|
zarr_urls=input_image_zarr_urls,
|
407
406
|
)
|
408
407
|
db.add(history_unit)
|
@@ -1,4 +1,4 @@
|
|
1
|
-
fractal_server/__init__.py,sha256=
|
1
|
+
fractal_server/__init__.py,sha256=Kv1hRPs4otMnorymxtQd2kL3M7DCvXZqfxY3-m9qJ3U,26
|
2
2
|
fractal_server/__main__.py,sha256=rkM8xjY1KeS3l63irB8yCrlVobR-73uDapC4wvrIlxI,6957
|
3
3
|
fractal_server/alembic.ini,sha256=MWwi7GzjzawI9cCAK1LW7NxIBQDUqD12-ptJoq5JpP0,3153
|
4
4
|
fractal_server/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -73,13 +73,13 @@ fractal_server/app/runner/executors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
|
|
73
73
|
fractal_server/app/runner/executors/base_runner.py,sha256=s5aZLDPzC565FadaqFxrCLIlQzBn2D9iOpEjnBZROkk,5541
|
74
74
|
fractal_server/app/runner/executors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
75
75
|
fractal_server/app/runner/executors/local/get_local_config.py,sha256=wbrIYuGOvABOStrE7jNrC4ULPhtBQ5Q7Y3aKm_icomg,3508
|
76
|
-
fractal_server/app/runner/executors/local/runner.py,sha256=
|
76
|
+
fractal_server/app/runner/executors/local/runner.py,sha256=Kv_ZTRARAg-lAhh-4tbSE1JcwukqGzgeQk0RAMLFgGk,8963
|
77
77
|
fractal_server/app/runner/executors/slurm_common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
78
78
|
fractal_server/app/runner/executors/slurm_common/_batching.py,sha256=ZY020JZlDS5mfpgpWTChQkyHU7iLE5kx2HVd57_C6XA,8850
|
79
79
|
fractal_server/app/runner/executors/slurm_common/_handle_exception_proxy.py,sha256=jU2N4vMafdcDPqVXwSApu4zxskCqhHmsXF3hBpOAAFA,577
|
80
80
|
fractal_server/app/runner/executors/slurm_common/_job_states.py,sha256=nuV-Zba38kDrRESOVB3gaGbrSPZc4q7YGichQaeqTW0,238
|
81
81
|
fractal_server/app/runner/executors/slurm_common/_slurm_config.py,sha256=fZaFUUXqDH0p3DndCFUpFqTqyD2tMVCuSYgYLAycpVw,15897
|
82
|
-
fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=
|
82
|
+
fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=4U3ZAN6QbBd_ukNapU8DHQ6Qughf7HmfNdi9Q2Nmd6g,29132
|
83
83
|
fractal_server/app/runner/executors/slurm_common/get_slurm_config.py,sha256=-fAX1DZMB5RZnyYanIJD72mWOJAPkh21jd4loDXKJw4,5994
|
84
84
|
fractal_server/app/runner/executors/slurm_common/remote.py,sha256=iXLu4d-bWzn7qmDaOjKFkcuaSHLjPESAMSLcg6c99fc,5852
|
85
85
|
fractal_server/app/runner/executors/slurm_common/slurm_job_task_models.py,sha256=YGgzTspkK9ItSMzwuYv_1tY7_1g89Qpeny5Auinxk1E,2708
|
@@ -88,7 +88,7 @@ fractal_server/app/runner/executors/slurm_ssh/__init__.py,sha256=47DEQpj8HBSa-_T
|
|
88
88
|
fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=n9DAn2Rn_4gUphJCJk4CaQH7WQP4nBNZxqF9dj0H5cw,5768
|
89
89
|
fractal_server/app/runner/executors/slurm_sudo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
90
90
|
fractal_server/app/runner/executors/slurm_sudo/_subprocess_run_as_user.py,sha256=O1bNg1DiSDJmQE0RmOk2Ii47DagiXp5ryd0R6KxO2OM,3177
|
91
|
-
fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=
|
91
|
+
fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=6E1Pl2k9DFekZkZgU3rpp_v8wrJG7qGt4rDaUCFlnXI,5059
|
92
92
|
fractal_server/app/runner/extract_archive.py,sha256=tLpjDrX47OjTNhhoWvm6iNukg8KoieWyTb7ZfvE9eWU,2483
|
93
93
|
fractal_server/app/runner/filenames.py,sha256=lPnxKHtdRizr6FqG3zOdjDPyWA7GoaJGTtiuJV0gA8E,70
|
94
94
|
fractal_server/app/runner/run_subprocess.py,sha256=c3JbYXq3hX2aaflQU19qJ5Xs6J6oXGNvnTEoAfv2bxc,959
|
@@ -99,11 +99,11 @@ fractal_server/app/runner/v2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
|
|
99
99
|
fractal_server/app/runner/v2/_local.py,sha256=DK8yagbvd6HHjcDVhUzTy0f7MURlTkQha-NM6OZKgJc,3044
|
100
100
|
fractal_server/app/runner/v2/_slurm_ssh.py,sha256=_bytOf8z9sdrhI03D6eqg-aQPnJ7V2-qnqpcHAYizns,3278
|
101
101
|
fractal_server/app/runner/v2/_slurm_sudo.py,sha256=DBCNxifXmMkpu71Wnk5u9-wKT7PV1WROQuY_4DYoZRI,2993
|
102
|
-
fractal_server/app/runner/v2/db_tools.py,sha256=
|
102
|
+
fractal_server/app/runner/v2/db_tools.py,sha256=tyLGY-g4ISZSJzk6ootEuHocdVwMg3taBL0oIivXV7M,2846
|
103
103
|
fractal_server/app/runner/v2/deduplicate_list.py,sha256=IVTE4abBU1bUprFTkxrTfYKnvkNTanWQ-KWh_etiT08,645
|
104
104
|
fractal_server/app/runner/v2/merge_outputs.py,sha256=D1L4Taieq9i71SPQyNc1kMokgHh-sV_MqF3bv7QMDBc,907
|
105
105
|
fractal_server/app/runner/v2/runner.py,sha256=SsKEZAsB8sPV8W3khTkAqaGdDwoTm_lav-fx6DdCwyA,15294
|
106
|
-
fractal_server/app/runner/v2/runner_functions.py,sha256=
|
106
|
+
fractal_server/app/runner/v2/runner_functions.py,sha256=5cK5O2rTrCsCxMTVN3iNPRwZ_891BC9_RMo64a8ZGYw,16338
|
107
107
|
fractal_server/app/runner/v2/runner_functions_low_level.py,sha256=9t1CHN3EyfsGRWfG257YPY5WjQ6zuztsw_KZrpEAFPo,3703
|
108
108
|
fractal_server/app/runner/v2/submit_workflow.py,sha256=EDUyUuIPwZHb2zm7SCRRoFsGq2cN-b5OKw6CYkZ8kWk,13048
|
109
109
|
fractal_server/app/runner/v2/task_interface.py,sha256=IXdQTI8rXFgXv1Ez0js4CjKFf3QwO2GCHRTuwiFtiTQ,2891
|
@@ -208,8 +208,8 @@ fractal_server/tasks/v2/utils_templates.py,sha256=Kc_nSzdlV6KIsO0CQSPs1w70zLyENP
|
|
208
208
|
fractal_server/urls.py,sha256=QjIKAC1a46bCdiPMu3AlpgFbcv6a4l3ABcd5xz190Og,471
|
209
209
|
fractal_server/utils.py,sha256=PMwrxWFxRTQRl1b9h-NRIbFGPKqpH_hXnkAT3NfZdpY,3571
|
210
210
|
fractal_server/zip_tools.py,sha256=GjDgo_sf6V_DDg6wWeBlZu5zypIxycn_l257p_YVKGc,4876
|
211
|
-
fractal_server-2.14.
|
212
|
-
fractal_server-2.14.
|
213
|
-
fractal_server-2.14.
|
214
|
-
fractal_server-2.14.
|
215
|
-
fractal_server-2.14.
|
211
|
+
fractal_server-2.14.0a17.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
|
212
|
+
fractal_server-2.14.0a17.dist-info/METADATA,sha256=Pv9LhU977x4Xvcet2GgaVMTnlU1ZQyKARhRTwH6_GkQ,4563
|
213
|
+
fractal_server-2.14.0a17.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
|
214
|
+
fractal_server-2.14.0a17.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
|
215
|
+
fractal_server-2.14.0a17.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|