fractal-server 2.14.0a16__py3-none-any.whl → 2.14.0a18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __VERSION__ = "2.14.0a16"
1
+ __VERSION__ = "2.14.0a18"
@@ -32,6 +32,9 @@ from fractal_server.config import get_settings
32
32
  from fractal_server.logger import set_logger
33
33
  from fractal_server.syringe import Inject
34
34
 
35
+ SHUTDOWN_ERROR_MESSAGE = "Failed due to job-execution shutdown."
36
+ SHUTDOWN_EXCEPTION = JobExecutionError(SHUTDOWN_ERROR_MESSAGE)
37
+
35
38
  logger = set_logger(__name__)
36
39
 
37
40
  # FIXME: Transform several logger.info into logger.debug.
@@ -94,6 +97,7 @@ class BaseSlurmRunner(BaseRunner):
94
97
 
95
98
  def run_squeue(self, job_ids: list[str]) -> tuple[bool, str]:
96
99
  # FIXME: review different cases (exception vs no job found)
100
+ # FIXME: Fail for empty list
97
101
  job_id_single_str = ",".join([str(j) for j in job_ids])
98
102
  cmd = (
99
103
  f"squeue --noheader --format='%i %T' --jobs {job_id_single_str}"
@@ -228,9 +232,11 @@ class BaseSlurmRunner(BaseRunner):
228
232
  logger.info(script_lines)
229
233
 
230
234
  # Always print output of `uname -n` and `pwd`
231
- script_lines.append("Hostname: $(uname -n)\n")
232
- script_lines.append("Current directory : $(pwd)\n")
233
- script_lines.append('Start time: $(date + "%Y-%m-%dT%H:%M:%S%z")\n')
235
+ script_lines.append('\necho "Hostname: $(uname -n)"')
236
+ script_lines.append('echo "Current directory : $(pwd)"')
237
+ script_lines.append(
238
+ 'echo "Start time: $(date +"%Y-%m-%dT%H:%M:%S%z")"'
239
+ )
234
240
 
235
241
  # Complete script preamble
236
242
  script_lines.append("\n")
@@ -245,7 +251,9 @@ class BaseSlurmRunner(BaseRunner):
245
251
  )
246
252
  script_lines.append("wait\n")
247
253
  script = "\n".join(script_lines)
248
- script_lines.append('End time: $(date + "%Y-%m-%dT%H:%M:%S%z")\n')
254
+ script_lines.append(
255
+ 'echo "End time: $(date +"%Y-%m-%dT%H:%M:%S%z")"'
256
+ )
249
257
 
250
258
  # Write submission script
251
259
  with open(slurm_job.slurm_submission_script_local, "w") as f:
@@ -335,7 +343,10 @@ class BaseSlurmRunner(BaseRunner):
335
343
  pass
336
344
 
337
345
  def _postprocess_single_task(
338
- self, *, task: SlurmTask
346
+ self,
347
+ *,
348
+ task: SlurmTask,
349
+ was_job_scancelled: bool = False,
339
350
  ) -> tuple[Any, Exception]:
340
351
  try:
341
352
  with open(task.output_pickle_file_local, "rb") as f:
@@ -347,17 +358,24 @@ class BaseSlurmRunner(BaseRunner):
347
358
  else:
348
359
  exception = _handle_exception_proxy(output)
349
360
  return None, exception
361
+
350
362
  except Exception as e:
351
363
  exception = JobExecutionError(f"ERROR, {str(e)}")
364
+ # If job was scancelled and task failed, replace
365
+ # exception with a shutdown-related one.
366
+ if was_job_scancelled:
367
+ logger.debug(
368
+ "Replacing exception with a shutdown-related one, "
369
+ f"for {task.index=}."
370
+ )
371
+ exception = SHUTDOWN_EXCEPTION
372
+
352
373
  return None, exception
353
374
  finally:
354
- pass
355
- # FIXME: Re-include unlinks of pickle files
356
- # Path(task.input_pickle_file_local).unlink(missing_ok=True)
357
- # Path(task.output_pickle_file_local).unlink(missing_ok=True)
375
+ Path(task.input_pickle_file_local).unlink(missing_ok=True)
376
+ Path(task.output_pickle_file_local).unlink(missing_ok=True)
358
377
 
359
378
  def is_shutdown(self) -> bool:
360
- # FIXME: shutdown is not implemented
361
379
  return self.shutdown_file.exists()
362
380
 
363
381
  @property
@@ -388,7 +406,14 @@ class BaseSlurmRunner(BaseRunner):
388
406
  raise JobExecutionError("Unexpected branch: jobs should be empty.")
389
407
 
390
408
  if self.is_shutdown():
391
- raise JobExecutionError("Cannot continue after shutdown.")
409
+ with next(get_sync_db()) as db:
410
+ update_status_of_history_unit(
411
+ history_unit_id=history_unit_id,
412
+ status=HistoryUnitStatus.FAILED,
413
+ db_sync=db,
414
+ )
415
+
416
+ return None, SHUTDOWN_EXCEPTION
392
417
 
393
418
  # Validation phase
394
419
  self.validate_submit_parameters(
@@ -444,19 +469,29 @@ class BaseSlurmRunner(BaseRunner):
444
469
  # Retrieval phase
445
470
  logger.info("[submit] START retrieval phase")
446
471
  while len(self.jobs) > 0:
472
+
473
+ # Handle shutdown
474
+ scancelled_job_ids = []
447
475
  if self.is_shutdown():
448
- self.scancel_jobs()
476
+ logger.info("[submit] Shutdown file detected")
477
+ scancelled_job_ids = self.scancel_jobs()
478
+ logger.info(f"[submit] {scancelled_job_ids=}")
479
+
480
+ # Look for finished jobs
449
481
  finished_job_ids = self._get_finished_jobs(job_ids=self.job_ids)
450
- logger.info(f"{finished_job_ids=}")
482
+ logger.debug(f"[submit] {finished_job_ids=}")
483
+
451
484
  with next(get_sync_db()) as db:
452
485
  for slurm_job_id in finished_job_ids:
453
- logger.info(f"Now process {slurm_job_id=}")
486
+ logger.debug(f"[submit] Now process {slurm_job_id=}")
454
487
  slurm_job = self.jobs.pop(slurm_job_id)
455
-
456
488
  self._copy_files_from_remote_to_local(slurm_job)
489
+ was_job_scancelled = slurm_job_id in scancelled_job_ids
457
490
  result, exception = self._postprocess_single_task(
458
- task=slurm_job.tasks[0]
491
+ task=slurm_job.tasks[0],
492
+ was_job_scancelled=was_job_scancelled,
459
493
  )
494
+
460
495
  if exception is not None:
461
496
  update_status_of_history_unit(
462
497
  history_unit_id=history_unit_id,
@@ -484,13 +519,29 @@ class BaseSlurmRunner(BaseRunner):
484
519
  list_task_files: list[TaskFiles],
485
520
  task_type: Literal["parallel", "compound", "converter_compound"],
486
521
  config: SlurmConfig,
487
- ):
522
+ ) -> tuple[dict[int, Any], dict[int, BaseException]]:
488
523
 
489
524
  if len(self.jobs) > 0:
490
525
  raise RuntimeError(
491
- f"Cannot run .multisubmit when {len(self.jobs)=}"
526
+ f"Cannot run `multisubmit` when {len(self.jobs)=}"
492
527
  )
493
528
 
529
+ if self.is_shutdown():
530
+ if task_type == "parallel":
531
+ with next(get_sync_db()) as db:
532
+ # FIXME: Replace with bulk function
533
+ for history_unit_id in history_unit_ids:
534
+ update_status_of_history_unit(
535
+ history_unit_id=history_unit_id,
536
+ status=HistoryUnitStatus.FAILED,
537
+ db_sync=db,
538
+ )
539
+ results = {}
540
+ exceptions = {
541
+ ind: SHUTDOWN_EXCEPTION for ind in range(len(list_parameters))
542
+ }
543
+ return results, exceptions
544
+
494
545
  self.validate_multisubmit_parameters(
495
546
  list_parameters=list_parameters,
496
547
  task_type=task_type,
@@ -605,22 +656,34 @@ class BaseSlurmRunner(BaseRunner):
605
656
  logger.warning(f"[submit] Now sleep {sleep_time} (FIXME)")
606
657
  time.sleep(sleep_time)
607
658
 
659
+ # FIXME: Could we merge the submit/multisubmit retrieval phases?
660
+
608
661
  # Retrieval phase
609
- logger.info("START retrieval phase")
662
+ logger.info("[multisubmit] START retrieval phase")
610
663
  while len(self.jobs) > 0:
664
+
665
+ # Handle shutdown
666
+ scancelled_job_ids = []
611
667
  if self.is_shutdown():
612
- self.scancel_jobs()
668
+ logger.info("[multisubmit] Shutdown file detected")
669
+ scancelled_job_ids = self.scancel_jobs()
670
+ logger.info(f"[multisubmit] {scancelled_job_ids=}")
671
+
672
+ # Look for finished jobs
613
673
  finished_job_ids = self._get_finished_jobs(job_ids=self.job_ids)
614
- logger.info(f"{finished_job_ids=}")
674
+ logger.debug(f"[multisubmit] {finished_job_ids=}")
675
+
615
676
  with next(get_sync_db()) as db:
616
677
  for slurm_job_id in finished_job_ids:
617
- logger.info(f"Now processing {slurm_job_id=}")
678
+ logger.info(f"[multisubmit] Now process {slurm_job_id=}")
618
679
  slurm_job = self.jobs.pop(slurm_job_id)
619
680
  self._copy_files_from_remote_to_local(slurm_job)
620
681
  for task in slurm_job.tasks:
621
- logger.info(f"Now processing {task.index=}")
682
+ logger.info(f"[multisubmit] Now process {task.index=}")
683
+ was_job_scancelled = slurm_job_id in scancelled_job_ids
622
684
  result, exception = self._postprocess_single_task(
623
- task=task
685
+ task=task,
686
+ was_job_scancelled=was_job_scancelled,
624
687
  )
625
688
 
626
689
  # Note: the relevant done/failed check is based on
@@ -680,11 +743,12 @@ class BaseSlurmRunner(BaseRunner):
680
743
  logger.error(error_msg)
681
744
  raise RuntimeError(error_msg)
682
745
 
683
- def scancel_jobs(self) -> None:
746
+ def scancel_jobs(self) -> list[str]:
684
747
  logger.info("[scancel_jobs] START")
685
748
 
686
749
  if self.jobs:
687
- scancel_string = " ".join(self.job_ids)
750
+ scancelled_job_ids = self.job_ids
751
+ scancel_string = " ".join(scancelled_job_ids)
688
752
  scancel_cmd = f"scancel {scancel_string}"
689
753
  logger.warning(f"Now scancel-ing SLURM jobs {scancel_string}")
690
754
  try:
@@ -694,5 +758,5 @@ class BaseSlurmRunner(BaseRunner):
694
758
  "[scancel_jobs] `scancel` command failed. "
695
759
  f"Original error:\n{str(e)}"
696
760
  )
697
-
698
761
  logger.info("[scancel_jobs] END")
762
+ return scancelled_job_ids
@@ -140,7 +140,7 @@ class SudoSlurmRunner(BaseSlurmRunner):
140
140
  f"Original error: {str(e)}"
141
141
  )
142
142
 
143
- def _run_remote_cmd(self, cmd: str):
143
+ def _run_remote_cmd(self, cmd: str) -> str:
144
144
  res = _run_command_as_user(
145
145
  cmd=cmd,
146
146
  user=self.slurm_user,
@@ -149,6 +149,6 @@ class SudoSlurmRunner(BaseSlurmRunner):
149
149
  )
150
150
  return res.stdout
151
151
 
152
- def _run_local_cmd(self, cmd: str):
152
+ def _run_local_cmd(self, cmd: str) -> str:
153
153
  res = _subprocess_run_or_raise(cmd)
154
154
  return res.stdout
@@ -1,16 +1,16 @@
1
- """on cascade
1
+ """Set ondelete
2
2
 
3
- Revision ID: 5b6007027595
4
- Revises: af1ef1c83c9b
5
- Create Date: 2025-04-02 17:03:59.542921
3
+ Revision ID: 9db60297b8b2
4
+ Revises: e81103413827
5
+ Create Date: 2025-04-07 13:13:14.596394
6
6
 
7
7
  """
8
8
  from alembic import op
9
9
 
10
10
 
11
11
  # revision identifiers, used by Alembic.
12
- revision = "5b6007027595"
13
- down_revision = "af1ef1c83c9b"
12
+ revision = "9db60297b8b2"
13
+ down_revision = "e81103413827"
14
14
  branch_labels = None
15
15
  depends_on = None
16
16
 
@@ -34,15 +34,15 @@ def upgrade() -> None:
34
34
  "fk_jobv2_dataset_id_datasetv2", type_="foreignkey"
35
35
  )
36
36
  batch_op.drop_constraint(
37
- "fk_jobv2_workflow_id_workflowv2", type_="foreignkey"
37
+ "fk_jobv2_project_id_projectv2", type_="foreignkey"
38
38
  )
39
39
  batch_op.drop_constraint(
40
- "fk_jobv2_project_id_projectv2", type_="foreignkey"
40
+ "fk_jobv2_workflow_id_workflowv2", type_="foreignkey"
41
41
  )
42
42
  batch_op.create_foreign_key(
43
- batch_op.f("fk_jobv2_project_id_projectv2"),
44
- "projectv2",
45
- ["project_id"],
43
+ batch_op.f("fk_jobv2_workflow_id_workflowv2"),
44
+ "workflowv2",
45
+ ["workflow_id"],
46
46
  ["id"],
47
47
  ondelete="SET NULL",
48
48
  )
@@ -54,19 +54,19 @@ def upgrade() -> None:
54
54
  ondelete="SET NULL",
55
55
  )
56
56
  batch_op.create_foreign_key(
57
- batch_op.f("fk_jobv2_workflow_id_workflowv2"),
58
- "workflowv2",
59
- ["workflow_id"],
57
+ batch_op.f("fk_jobv2_project_id_projectv2"),
58
+ "projectv2",
59
+ ["project_id"],
60
60
  ["id"],
61
61
  ondelete="SET NULL",
62
62
  )
63
63
 
64
64
  with op.batch_alter_table("linkusergroup", schema=None) as batch_op:
65
65
  batch_op.drop_constraint(
66
- "fk_linkusergroup_group_id_usergroup", type_="foreignkey"
66
+ "fk_linkusergroup_user_id_user_oauth", type_="foreignkey"
67
67
  )
68
68
  batch_op.drop_constraint(
69
- "fk_linkusergroup_user_id_user_oauth", type_="foreignkey"
69
+ "fk_linkusergroup_group_id_usergroup", type_="foreignkey"
70
70
  )
71
71
  batch_op.create_foreign_key(
72
72
  batch_op.f("fk_linkusergroup_group_id_usergroup"),
@@ -194,34 +194,28 @@ def downgrade() -> None:
194
194
  batch_op.f("fk_linkusergroup_group_id_usergroup"),
195
195
  type_="foreignkey",
196
196
  )
197
- batch_op.create_foreign_key(
198
- "fk_linkusergroup_user_id_user_oauth",
199
- "user_oauth",
200
- ["user_id"],
201
- ["id"],
202
- )
203
197
  batch_op.create_foreign_key(
204
198
  "fk_linkusergroup_group_id_usergroup",
205
199
  "usergroup",
206
200
  ["group_id"],
207
201
  ["id"],
208
202
  )
203
+ batch_op.create_foreign_key(
204
+ "fk_linkusergroup_user_id_user_oauth",
205
+ "user_oauth",
206
+ ["user_id"],
207
+ ["id"],
208
+ )
209
209
 
210
210
  with op.batch_alter_table("jobv2", schema=None) as batch_op:
211
211
  batch_op.drop_constraint(
212
- batch_op.f("fk_jobv2_workflow_id_workflowv2"), type_="foreignkey"
212
+ batch_op.f("fk_jobv2_project_id_projectv2"), type_="foreignkey"
213
213
  )
214
214
  batch_op.drop_constraint(
215
215
  batch_op.f("fk_jobv2_dataset_id_datasetv2"), type_="foreignkey"
216
216
  )
217
217
  batch_op.drop_constraint(
218
- batch_op.f("fk_jobv2_project_id_projectv2"), type_="foreignkey"
219
- )
220
- batch_op.create_foreign_key(
221
- "fk_jobv2_project_id_projectv2",
222
- "projectv2",
223
- ["project_id"],
224
- ["id"],
218
+ batch_op.f("fk_jobv2_workflow_id_workflowv2"), type_="foreignkey"
225
219
  )
226
220
  batch_op.create_foreign_key(
227
221
  "fk_jobv2_workflow_id_workflowv2",
@@ -229,6 +223,12 @@ def downgrade() -> None:
229
223
  ["workflow_id"],
230
224
  ["id"],
231
225
  )
226
+ batch_op.create_foreign_key(
227
+ "fk_jobv2_project_id_projectv2",
228
+ "projectv2",
229
+ ["project_id"],
230
+ ["id"],
231
+ )
232
232
  batch_op.create_foreign_key(
233
233
  "fk_jobv2_dataset_id_datasetv2",
234
234
  "datasetv2",
@@ -1,7 +1,7 @@
1
1
  """new history items
2
2
 
3
3
  Revision ID: fbce16ff4e47
4
- Revises: 5b6007027595
4
+ Revises: af1ef1c83c9b
5
5
  Create Date: 2025-03-14 15:25:01.083619
6
6
 
7
7
  """
@@ -12,7 +12,7 @@ from sqlalchemy.dialects import postgresql
12
12
 
13
13
  # revision identifiers, used by Alembic.
14
14
  revision = "fbce16ff4e47"
15
- down_revision = "5b6007027595"
15
+ down_revision = "af1ef1c83c9b"
16
16
  branch_labels = None
17
17
  depends_on = None
18
18
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: fractal-server
3
- Version: 2.14.0a16
3
+ Version: 2.14.0a18
4
4
  Summary: Backend component of the Fractal analytics platform
5
5
  License: BSD-3-Clause
6
6
  Author: Tommaso Comparin
@@ -20,11 +20,11 @@ Requires-Dist: fastapi-users[oauth] (>=14,<15)
20
20
  Requires-Dist: gunicorn (>=23.0,<24.0)
21
21
  Requires-Dist: packaging (>=24.0.0,<25.0.0)
22
22
  Requires-Dist: psycopg[binary] (>=3.1.0,<4.0.0)
23
- Requires-Dist: pydantic (>=2.10.0,<2.11.0)
23
+ Requires-Dist: pydantic (>=2.11.0,<2.12.0)
24
24
  Requires-Dist: pydantic-settings (>=2.7.0)
25
25
  Requires-Dist: python-dotenv (>=1.0.0,<1.1.0)
26
26
  Requires-Dist: sqlalchemy[asyncio] (>=2.0.23,<2.1)
27
- Requires-Dist: sqlmodel (==0.0.22)
27
+ Requires-Dist: sqlmodel (==0.0.24)
28
28
  Requires-Dist: uvicorn (>=0.29.0,<0.35.0)
29
29
  Requires-Dist: uvicorn-worker (==0.3.0)
30
30
  Project-URL: Documentation, https://fractal-analytics-platform.github.io/fractal-server
@@ -1,4 +1,4 @@
1
- fractal_server/__init__.py,sha256=ewSoBgac6DlCJh5FbTCwJqfzkPFKTK2DqqhRuiv9G4k,26
1
+ fractal_server/__init__.py,sha256=PbuUW1BZbTi_QK9mBU8I6Y8UD2lrNGqSZddYzzIogbg,26
2
2
  fractal_server/__main__.py,sha256=rkM8xjY1KeS3l63irB8yCrlVobR-73uDapC4wvrIlxI,6957
3
3
  fractal_server/alembic.ini,sha256=MWwi7GzjzawI9cCAK1LW7NxIBQDUqD12-ptJoq5JpP0,3153
4
4
  fractal_server/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -79,7 +79,7 @@ fractal_server/app/runner/executors/slurm_common/_batching.py,sha256=ZY020JZlDS5
79
79
  fractal_server/app/runner/executors/slurm_common/_handle_exception_proxy.py,sha256=jU2N4vMafdcDPqVXwSApu4zxskCqhHmsXF3hBpOAAFA,577
80
80
  fractal_server/app/runner/executors/slurm_common/_job_states.py,sha256=nuV-Zba38kDrRESOVB3gaGbrSPZc4q7YGichQaeqTW0,238
81
81
  fractal_server/app/runner/executors/slurm_common/_slurm_config.py,sha256=fZaFUUXqDH0p3DndCFUpFqTqyD2tMVCuSYgYLAycpVw,15897
82
- fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=9UpCV8CyqOeLzfEiJRxm4ODeJRDqQjE2QFx8Oi9ZjOA,26629
82
+ fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=4U3ZAN6QbBd_ukNapU8DHQ6Qughf7HmfNdi9Q2Nmd6g,29132
83
83
  fractal_server/app/runner/executors/slurm_common/get_slurm_config.py,sha256=-fAX1DZMB5RZnyYanIJD72mWOJAPkh21jd4loDXKJw4,5994
84
84
  fractal_server/app/runner/executors/slurm_common/remote.py,sha256=iXLu4d-bWzn7qmDaOjKFkcuaSHLjPESAMSLcg6c99fc,5852
85
85
  fractal_server/app/runner/executors/slurm_common/slurm_job_task_models.py,sha256=YGgzTspkK9ItSMzwuYv_1tY7_1g89Qpeny5Auinxk1E,2708
@@ -88,7 +88,7 @@ fractal_server/app/runner/executors/slurm_ssh/__init__.py,sha256=47DEQpj8HBSa-_T
88
88
  fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=n9DAn2Rn_4gUphJCJk4CaQH7WQP4nBNZxqF9dj0H5cw,5768
89
89
  fractal_server/app/runner/executors/slurm_sudo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
90
90
  fractal_server/app/runner/executors/slurm_sudo/_subprocess_run_as_user.py,sha256=O1bNg1DiSDJmQE0RmOk2Ii47DagiXp5ryd0R6KxO2OM,3177
91
- fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=1NoXMQH-JAzBHGmzlUdu6P1gLGJE5y17U3yLxJOyyHE,5045
91
+ fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=6E1Pl2k9DFekZkZgU3rpp_v8wrJG7qGt4rDaUCFlnXI,5059
92
92
  fractal_server/app/runner/extract_archive.py,sha256=tLpjDrX47OjTNhhoWvm6iNukg8KoieWyTb7ZfvE9eWU,2483
93
93
  fractal_server/app/runner/filenames.py,sha256=lPnxKHtdRizr6FqG3zOdjDPyWA7GoaJGTtiuJV0gA8E,70
94
94
  fractal_server/app/runner/run_subprocess.py,sha256=c3JbYXq3hX2aaflQU19qJ5Xs6J6oXGNvnTEoAfv2bxc,959
@@ -152,7 +152,6 @@ fractal_server/migrations/versions/4c308bcaea2b_add_task_args_schema_and_task_ar
152
152
  fractal_server/migrations/versions/4cedeb448a53_workflowtask_foreign_keys_not_nullables.py,sha256=Mob8McGYAcmgvrseyyYOa54E6Gsgr-4SiGdC-r9O4_A,1157
153
153
  fractal_server/migrations/versions/501961cfcd85_remove_link_between_v1_and_v2_tasks_.py,sha256=5ROUgcoZOdjf8kMt6cxuvPhzHmV6xaCxvZEbhUEyZM4,3271
154
154
  fractal_server/migrations/versions/50a13d6138fd_initial_schema.py,sha256=zwXegXs9J40eyCWi3w0c_iIBVJjXNn4VdVnQaT3KxDg,8770
155
- fractal_server/migrations/versions/5b6007027595_on_cascade.py,sha256=44EmzOkk5-FJwtAy4TQuj7EctKwz6ZERkQjh_ljdDJc,7926
156
155
  fractal_server/migrations/versions/5bf02391cfef_v2.py,sha256=axhNkr_H6R4rRbY7oGYazNbFvPXeSyBDWFVbKNmiqs8,8433
157
156
  fractal_server/migrations/versions/70e77f1c38b0_add_applyworkflow_first_task_index_and_.py,sha256=Q-DsMzG3IcUV2Ol1dhJWosDvKERamBE6QvA2zzS5zpQ,1632
158
157
  fractal_server/migrations/versions/71eefd1dd202_add_slurm_accounts.py,sha256=mbWuCkTpRAdGbRhW7lhXs_e5S6O37UAcCN6JfoY5H8A,1353
@@ -163,6 +162,7 @@ fractal_server/migrations/versions/94a47ea2d3ff_remove_cache_dir_slurm_user_and_
163
162
  fractal_server/migrations/versions/97f444d47249_add_applyworkflow_project_dump.py,sha256=eKTZm3EgUgapXBxO0RuHkEfTKic-TZG3ADaMpGLuc0k,1057
164
163
  fractal_server/migrations/versions/99ea79d9e5d2_add_dataset_history.py,sha256=0im6TxDr53sKKcjiPgeH4ftVRGnRXZSh2lPbRQ1Ir9w,883
165
164
  fractal_server/migrations/versions/9c5ae74c9b98_add_user_settings_table.py,sha256=syONdZNf4-OnAcWIsbzXpYwpXPsXZ4SsmjwVvmVG0PU,2256
165
+ fractal_server/migrations/versions/9db60297b8b2_set_ondelete.py,sha256=F0IdXk8vclViOGKe2SOHO3MsQsqe7SsZRSqz9cXhhrE,7928
166
166
  fractal_server/migrations/versions/9fd26a2b0de4_add_workflow_timestamp_created.py,sha256=4l1AHGUsa0ONoJVZlr3fTXw_xbbQ8O7wlD92Az2aRfM,1849
167
167
  fractal_server/migrations/versions/a7f4d6137b53_add_workflow_dump_to_applyworkflow.py,sha256=ekDUML7ILpmdoqEclKbEUdyLi4uw9HSG_sTjG2hp_JE,867
168
168
  fractal_server/migrations/versions/af1ef1c83c9b_add_accounting_tables.py,sha256=BftudWuSGvKGBzIL5AMb3yWkgTAuaKPBGsYcOzp_gLQ,1899
@@ -175,7 +175,7 @@ fractal_server/migrations/versions/e75cac726012_make_applyworkflow_start_timesta
175
175
  fractal_server/migrations/versions/e81103413827_add_job_type_filters.py,sha256=t4ImlKNHx5JMgBL2sTpLWunv1gwY8OCFOKd3G338mdE,890
176
176
  fractal_server/migrations/versions/efa89c30e0a4_add_project_timestamp_created.py,sha256=jilQW3QIqYQ4Q6hCnUiG7UtNMpA41ujqrB3tPFiPM1Q,1221
177
177
  fractal_server/migrations/versions/f384e1c0cf5d_drop_task_default_args_columns.py,sha256=9BwqUS9Gf7UW_KjrzHbtViC880qhD452KAytkHWWZyk,746
178
- fractal_server/migrations/versions/fbce16ff4e47_new_history_items.py,sha256=8Wh-t2-1J4pABxjfT2NYHQzpZkNbgNqHfUHwQ6yB1us,3950
178
+ fractal_server/migrations/versions/fbce16ff4e47_new_history_items.py,sha256=TDWCaIoM0Q4SpRWmR9zr_rdp3lJXhCfBPTMhtrP5xYE,3950
179
179
  fractal_server/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
180
180
  fractal_server/ssh/__init__.py,sha256=sVUmzxf7_DuXG1xoLQ1_00fo5NPhi2LJipSmU5EAkPs,124
181
181
  fractal_server/ssh/_fabric.py,sha256=lNy4IX1I4We6VoWa4Bz4fUPuApLMSoejpyE6I3jDZeM,22869
@@ -208,8 +208,8 @@ fractal_server/tasks/v2/utils_templates.py,sha256=Kc_nSzdlV6KIsO0CQSPs1w70zLyENP
208
208
  fractal_server/urls.py,sha256=QjIKAC1a46bCdiPMu3AlpgFbcv6a4l3ABcd5xz190Og,471
209
209
  fractal_server/utils.py,sha256=PMwrxWFxRTQRl1b9h-NRIbFGPKqpH_hXnkAT3NfZdpY,3571
210
210
  fractal_server/zip_tools.py,sha256=GjDgo_sf6V_DDg6wWeBlZu5zypIxycn_l257p_YVKGc,4876
211
- fractal_server-2.14.0a16.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
212
- fractal_server-2.14.0a16.dist-info/METADATA,sha256=cX3Og-LgceYk8wYfm6SlGSXr_5s-tDFI8VYJ48zPRWw,4563
213
- fractal_server-2.14.0a16.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
214
- fractal_server-2.14.0a16.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
215
- fractal_server-2.14.0a16.dist-info/RECORD,,
211
+ fractal_server-2.14.0a18.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
212
+ fractal_server-2.14.0a18.dist-info/METADATA,sha256=2jm6YHfgiVwh6xoH69oBjtZ4ANiPC8cN-F4vHqeY0PA,4563
213
+ fractal_server-2.14.0a18.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
214
+ fractal_server-2.14.0a18.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
215
+ fractal_server-2.14.0a18.dist-info/RECORD,,