fractal-server 2.14.0a27__py3-none-any.whl → 2.14.0a29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. fractal_server/__init__.py +1 -1
  2. fractal_server/app/models/v2/history.py +1 -0
  3. fractal_server/app/routes/api/v2/history.py +5 -0
  4. fractal_server/app/routes/api/v2/verify_image_types.py +1 -1
  5. fractal_server/app/runner/executors/local/runner.py +1 -6
  6. fractal_server/app/runner/executors/slurm_common/_slurm_config.py +1 -3
  7. fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py +4 -13
  8. fractal_server/app/runner/executors/slurm_common/remote.py +1 -1
  9. fractal_server/app/runner/executors/slurm_ssh/runner.py +1 -2
  10. fractal_server/app/runner/executors/slurm_sudo/runner.py +4 -1
  11. fractal_server/app/runner/v2/_local.py +2 -0
  12. fractal_server/app/runner/v2/_slurm_ssh.py +2 -0
  13. fractal_server/app/runner/v2/_slurm_sudo.py +2 -0
  14. fractal_server/app/runner/v2/db_tools.py +17 -4
  15. fractal_server/app/runner/v2/runner.py +6 -0
  16. fractal_server/app/runner/v2/runner_functions.py +36 -8
  17. fractal_server/app/runner/v2/runner_functions_low_level.py +2 -3
  18. fractal_server/app/runner/v2/submit_workflow.py +1 -0
  19. fractal_server/app/schemas/v2/history.py +1 -0
  20. fractal_server/config.py +1 -1
  21. fractal_server/migrations/versions/c90a7c76e996_job_id_in_history_run.py +41 -0
  22. {fractal_server-2.14.0a27.dist-info → fractal_server-2.14.0a29.dist-info}/METADATA +1 -1
  23. {fractal_server-2.14.0a27.dist-info → fractal_server-2.14.0a29.dist-info}/RECORD +26 -25
  24. {fractal_server-2.14.0a27.dist-info → fractal_server-2.14.0a29.dist-info}/LICENSE +0 -0
  25. {fractal_server-2.14.0a27.dist-info → fractal_server-2.14.0a29.dist-info}/WHEEL +0 -0
  26. {fractal_server-2.14.0a27.dist-info → fractal_server-2.14.0a29.dist-info}/entry_points.txt +0 -0
@@ -1 +1 @@
1
- __VERSION__ = "2.14.0a27"
1
+ __VERSION__ = "2.14.0a29"
@@ -27,6 +27,7 @@ class HistoryRun(SQLModel, table=True):
27
27
  default=None,
28
28
  ondelete="SET NULL",
29
29
  )
30
+ job_id: int = Field(foreign_key="jobv2.id")
30
31
 
31
32
  workflowtask_dump: dict[str, Any] = Field(
32
33
  sa_column=Column(JSONB, nullable=False),
@@ -142,6 +142,11 @@ async def get_workflow_tasks_statuses(
142
142
  value["num_available_images"] = None
143
143
  new_response[key] = value
144
144
 
145
+ for wftask in workflow.task_list:
146
+ logger.debug(
147
+ f"({dataset_id=}, {wftask.id=}): {new_response[wftask.id]}"
148
+ )
149
+
145
150
  return JSONResponse(content=new_response, status_code=200)
146
151
 
147
152
 
@@ -42,7 +42,7 @@ async def verify_unique_types(
42
42
  type_filters=query.type_filters,
43
43
  )
44
44
 
45
- # Get all available types (#FIXME use aux function)
45
+ # NOTE: see issue 2486
46
46
  available_types = set(
47
47
  _type for _img in filtered_images for _type in _img["types"].keys()
48
48
  )
@@ -25,7 +25,6 @@ class LocalRunner(BaseRunner):
25
25
  self,
26
26
  root_dir_local: Path,
27
27
  ):
28
-
29
28
  self.root_dir_local = root_dir_local
30
29
  self.root_dir_local.mkdir(parents=True, exist_ok=True)
31
30
  self.executor = ThreadPoolExecutor()
@@ -182,10 +181,6 @@ class LocalRunner(BaseRunner):
182
181
  db_sync=db,
183
182
  )
184
183
 
185
- # FIXME: what should happen here? Option 1: stop
186
- # all existing tasks and shutdown runner (for the
187
- # compound-task case)
188
-
189
- logger.debug(f"[multisubmit] END, {results=}, {exceptions=}")
184
+ logger.debug(f"[multisubmit] END, {len(results)=}, {len(exceptions)=}")
190
185
 
191
186
  return results, exceptions
@@ -368,9 +368,7 @@ class SlurmConfig(BaseModel):
368
368
  if value is not None:
369
369
  # Handle the `time` parameter
370
370
  if key == "time" and self.parallel_tasks_per_job > 1:
371
- # FIXME SSH: time setting must be handled better. Right now
372
- # we simply propagate `time`, but this is not enough when
373
- # several `srun` are combined in a single script.
371
+ # NOTE: see issue #1632
374
372
  logger.warning(
375
373
  f"`time` SLURM parameter is set to {self.time}, "
376
374
  "but this does not take into account the number of "
@@ -34,7 +34,7 @@ SHUTDOWN_EXCEPTION = JobExecutionError(SHUTDOWN_ERROR_MESSAGE)
34
34
 
35
35
  logger = set_logger(__name__)
36
36
 
37
- # FIXME: Transform several logger.info into logger.debug.
37
+ # NOTE: see issue 2481.
38
38
 
39
39
 
40
40
  class BaseSlurmRunner(BaseRunner):
@@ -107,8 +107,7 @@ class BaseSlurmRunner(BaseRunner):
107
107
  raise NotImplementedError("Implement in child class.")
108
108
 
109
109
  def run_squeue(self, job_ids: list[str]) -> tuple[bool, str]:
110
-
111
- # FIXME: review different cases (exception vs no job found)
110
+ # NOTE: see issue 2482
112
111
 
113
112
  if len(job_ids) == 0:
114
113
  return (False, "")
@@ -457,7 +456,6 @@ class BaseSlurmRunner(BaseRunner):
457
456
  "converter_compound",
458
457
  ],
459
458
  ) -> tuple[Any, Exception]:
460
-
461
459
  logger.info("[submit] START")
462
460
 
463
461
  workdir_local = task_files.wftask_subfolder_local
@@ -514,7 +512,7 @@ class BaseSlurmRunner(BaseRunner):
514
512
  )
515
513
  logger.info(f"[submit] END submission phase, {self.job_ids=}")
516
514
 
517
- # FIXME: replace this sleep with a more precise check
515
+ # NOTE: see issue 2444
518
516
  settings = Inject(get_settings)
519
517
  sleep_time = settings.FRACTAL_SLURM_INTERVAL_BEFORE_RETRIEVAL
520
518
  logger.warning(f"[submit] Now sleep {sleep_time} seconds.")
@@ -524,7 +522,6 @@ class BaseSlurmRunner(BaseRunner):
524
522
  logger.info("[submit] START retrieval phase")
525
523
  scancelled_job_ids = []
526
524
  while len(self.jobs) > 0:
527
-
528
525
  # Look for finished jobs
529
526
  finished_job_ids = self._get_finished_jobs(job_ids=self.job_ids)
530
527
  logger.debug(f"[submit] {finished_job_ids=}")
@@ -664,9 +661,7 @@ class BaseSlurmRunner(BaseRunner):
664
661
  )
665
662
  )
666
663
 
667
- # FIXME: split parts 2 and 3
668
- # Part 2/3. Transfer all relevant input files (for SSH)
669
- # Part 3/3. Run all `sbatch`es and update `self.jobs`
664
+ # NOTE: see issue 2431
670
665
  logger.info("[multisubmit] Transfer files and submit jobs.")
671
666
  for slurm_job in jobs_to_submit:
672
667
  self._submit_single_sbatch(
@@ -677,19 +672,15 @@ class BaseSlurmRunner(BaseRunner):
677
672
 
678
673
  logger.info(f"END submission phase, {self.job_ids=}")
679
674
 
680
- # FIXME: replace this sleep with a more precise check
681
675
  settings = Inject(get_settings)
682
676
  sleep_time = settings.FRACTAL_SLURM_INTERVAL_BEFORE_RETRIEVAL
683
677
  logger.warning(f"[submit] Now sleep {sleep_time} seconds.")
684
678
  time.sleep(sleep_time)
685
679
 
686
- # FIXME: Could we merge the submit/multisubmit retrieval phases?
687
-
688
680
  # Retrieval phase
689
681
  logger.info("[multisubmit] START retrieval phase")
690
682
  scancelled_job_ids = []
691
683
  while len(self.jobs) > 0:
692
-
693
684
  # Look for finished jobs
694
685
  finished_job_ids = self._get_finished_jobs(job_ids=self.job_ids)
695
686
  logger.debug(f"[multisubmit] {finished_job_ids=}")
@@ -59,7 +59,7 @@ def _check_versions_mismatch(
59
59
  if worker_python_version != server_python_version:
60
60
  # FIXME: turn this into an error, after fixing a broader CI issue, see
61
61
  # https://github.com/fractal-analytics-platform/fractal-server/issues/375
62
- logging.critical(
62
+ logging.warning(
63
63
  f"{server_python_version=} but {worker_python_version=}. "
64
64
  "cloudpickle is not guaranteed to correctly load "
65
65
  "pickle files created with different python versions. "
@@ -90,7 +90,7 @@ class SlurmSSHRunner(BaseSlurmRunner):
90
90
  ).as_posix()
91
91
 
92
92
  # Create file list
93
- # # FIXME can we make this more efficient with iterators?
93
+ # NOTE: see issue 2483
94
94
  filelist = []
95
95
  for _slurm_job in finished_slurm_jobs:
96
96
  _single_job_filelist = [
@@ -168,7 +168,6 @@ class SlurmSSHRunner(BaseSlurmRunner):
168
168
  Transfer the jobs subfolder to the remote host.
169
169
  """
170
170
  for job in jobs:
171
-
172
171
  # Create local archive
173
172
  tarfile_path_local = compress_folder(
174
173
  job.workdir_local,
@@ -133,7 +133,10 @@ class SudoSlurmRunner(BaseSlurmRunner):
133
133
  # Write local file
134
134
  with open(target, "wb") as f:
135
135
  f.write(res.stdout)
136
- logger.critical(f"Copied {source} into {target}")
136
+ logger.debug(
137
+ f"[_fetch_artifacts_single_job] Copied {source} into "
138
+ f"{target}"
139
+ )
137
140
  except RuntimeError as e:
138
141
  logger.warning(
139
142
  f"SKIP copy {source} into {target}. "
@@ -15,6 +15,7 @@ def process_workflow(
15
15
  workflow: WorkflowV2,
16
16
  dataset: DatasetV2,
17
17
  workflow_dir_local: Path,
18
+ job_id: int,
18
19
  workflow_dir_remote: Optional[Path] = None,
19
20
  first_task_index: Optional[int] = None,
20
21
  last_task_index: Optional[int] = None,
@@ -75,6 +76,7 @@ def process_workflow(
75
76
  first_task_index : (last_task_index + 1)
76
77
  ],
77
78
  dataset=dataset,
79
+ job_id=job_id,
78
80
  runner=runner,
79
81
  workflow_dir_local=workflow_dir_local,
80
82
  workflow_dir_remote=workflow_dir_local,
@@ -38,6 +38,7 @@ def process_workflow(
38
38
  workflow: WorkflowV2,
39
39
  dataset: DatasetV2,
40
40
  workflow_dir_local: Path,
41
+ job_id: int,
41
42
  workflow_dir_remote: Optional[Path] = None,
42
43
  first_task_index: Optional[int] = None,
43
44
  last_task_index: Optional[int] = None,
@@ -87,6 +88,7 @@ def process_workflow(
87
88
  first_task_index : (last_task_index + 1)
88
89
  ],
89
90
  dataset=dataset,
91
+ job_id=job_id,
90
92
  runner=runner,
91
93
  workflow_dir_local=workflow_dir_local,
92
94
  workflow_dir_remote=workflow_dir_remote,
@@ -33,6 +33,7 @@ def process_workflow(
33
33
  workflow: WorkflowV2,
34
34
  dataset: DatasetV2,
35
35
  workflow_dir_local: Path,
36
+ job_id: int,
36
37
  workflow_dir_remote: Optional[Path] = None,
37
38
  first_task_index: Optional[int] = None,
38
39
  last_task_index: Optional[int] = None,
@@ -79,6 +80,7 @@ def process_workflow(
79
80
  first_task_index : (last_task_index + 1)
80
81
  ],
81
82
  dataset=dataset,
83
+ job_id=job_id,
82
84
  runner=runner,
83
85
  workflow_dir_local=workflow_dir_local,
84
86
  workflow_dir_remote=workflow_dir_remote,
@@ -8,9 +8,13 @@ from fractal_server.app.models.v2 import HistoryImageCache
8
8
  from fractal_server.app.models.v2 import HistoryRun
9
9
  from fractal_server.app.models.v2 import HistoryUnit
10
10
  from fractal_server.app.schemas.v2 import HistoryUnitStatus
11
+ from fractal_server.logger import set_logger
12
+
11
13
 
12
14
  _CHUNK_SIZE = 2_000
13
15
 
16
+ logger = set_logger(__name__)
17
+
14
18
 
15
19
  def update_status_of_history_run(
16
20
  *,
@@ -46,7 +50,12 @@ def bulk_update_status_of_history_unit(
46
50
  status: HistoryUnitStatus,
47
51
  db_sync: Session,
48
52
  ) -> None:
49
- for ind in range(0, len(history_unit_ids), _CHUNK_SIZE):
53
+
54
+ len_history_unit_ids = len(history_unit_ids)
55
+ logger.debug(
56
+ f"[bulk_update_status_of_history_unit] {len_history_unit_ids=}."
57
+ )
58
+ for ind in range(0, len_history_unit_ids, _CHUNK_SIZE):
50
59
  db_sync.execute(
51
60
  update(HistoryUnit)
52
61
  .where(
@@ -77,7 +86,7 @@ def bulk_upsert_image_cache_fast(
77
86
  See docs at
78
87
  https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#insert-on-conflict-upsert
79
88
 
80
- FIXME: we tried to replace `index_elements` with
89
+ NOTE: we tried to replace `index_elements` with
81
90
  `constraint="pk_historyimagecache"`, but it did not work as expected.
82
91
 
83
92
  Arguments:
@@ -85,10 +94,14 @@ def bulk_upsert_image_cache_fast(
85
94
  List of dictionaries for objects to be upsert-ed.
86
95
  db: A sync database session
87
96
  """
88
- if len(list_upsert_objects) == 0:
97
+ len_list_upsert_objects = len(list_upsert_objects)
98
+
99
+ logger.debug(f"[bulk_upsert_image_cache_fast] {len_list_upsert_objects=}.")
100
+
101
+ if len_list_upsert_objects == 0:
89
102
  return None
90
103
 
91
- for ind in range(0, len(list_upsert_objects), _CHUNK_SIZE):
104
+ for ind in range(0, len_list_upsert_objects, _CHUNK_SIZE):
92
105
  stmt = pg_insert(HistoryImageCache).values(
93
106
  list_upsert_objects[ind : ind + _CHUNK_SIZE]
94
107
  )
@@ -42,6 +42,7 @@ def execute_tasks_v2(
42
42
  runner: BaseRunner,
43
43
  user_id: int,
44
44
  workflow_dir_local: Path,
45
+ job_id: int,
45
46
  workflow_dir_remote: Optional[Path] = None,
46
47
  logger_name: Optional[str] = None,
47
48
  get_runner_config: Callable[
@@ -119,6 +120,7 @@ def execute_tasks_v2(
119
120
  history_run = HistoryRun(
120
121
  dataset_id=dataset.id,
121
122
  workflowtask_id=wftask.id,
123
+ job_id=job_id,
122
124
  workflowtask_dump=workflowtask_dump,
123
125
  task_group_dump=task_group_dump,
124
126
  num_available_images=num_available_images,
@@ -128,6 +130,10 @@ def execute_tasks_v2(
128
130
  db.commit()
129
131
  db.refresh(history_run)
130
132
  history_run_id = history_run.id
133
+ logger.debug(
134
+ "[execute_tasks_v2] Created `HistoryRun` with "
135
+ f"{history_run_id=}."
136
+ )
131
137
 
132
138
  # TASK EXECUTION (V2)
133
139
  if task.type in ["non_parallel", "converter_non_parallel"]:
@@ -50,6 +50,7 @@ class SubmissionOutcome(BaseModel):
50
50
  model_config = ConfigDict(arbitrary_types_allowed=True)
51
51
  task_output: TaskOutput | None = None
52
52
  exception: BaseException | None = None
53
+ invalid_output: bool = False
53
54
 
54
55
 
55
56
  class InitSubmissionOutcome(BaseModel):
@@ -66,6 +67,7 @@ def _process_task_output(
66
67
  result: dict[str, Any] | None = None,
67
68
  exception: BaseException | None = None,
68
69
  ) -> SubmissionOutcome:
70
+ invalid_output = False
69
71
  if exception is not None:
70
72
  task_output = None
71
73
  else:
@@ -75,13 +77,13 @@ def _process_task_output(
75
77
  try:
76
78
  task_output = _cast_and_validate_TaskOutput(result)
77
79
  except TaskOutputValidationError as e:
78
- # FIXME: This should correspond to some status="failed",
79
- # but it does not
80
80
  task_output = None
81
81
  exception = e
82
+ invalid_output = True
82
83
  return SubmissionOutcome(
83
84
  task_output=task_output,
84
85
  exception=exception,
86
+ invalid_output=invalid_output,
85
87
  )
86
88
 
87
89
 
@@ -99,8 +101,6 @@ def _process_init_task_output(
99
101
  try:
100
102
  task_output = _cast_and_validate_InitTaskOutput(result)
101
103
  except TaskOutputValidationError as e:
102
- # FIXME: This should correspond to some status="failed",
103
- # but it does not
104
104
  task_output = None
105
105
  exception = e
106
106
  return InitSubmissionOutcome(
@@ -187,6 +187,10 @@ def run_v2_task_non_parallel(
187
187
  db.add(history_unit)
188
188
  db.commit()
189
189
  db.refresh(history_unit)
190
+ logger.debug(
191
+ "[run_v2_task_non_parallel] Created `HistoryUnit` with "
192
+ f"{history_run_id=}."
193
+ )
190
194
  history_unit_id = history_unit.id
191
195
  bulk_upsert_image_cache_fast(
192
196
  db=db,
@@ -225,6 +229,13 @@ def run_v2_task_non_parallel(
225
229
  exception=exception,
226
230
  )
227
231
  }
232
+ if outcome[0].invalid_output:
233
+ with next(get_sync_db()) as db:
234
+ update_status_of_history_unit(
235
+ history_unit_id=history_unit_id,
236
+ status=HistoryUnitStatus.FAILED,
237
+ db_sync=db,
238
+ )
228
239
  return outcome, num_tasks
229
240
 
230
241
 
@@ -294,6 +305,10 @@ def run_v2_task_parallel(
294
305
  with next(get_sync_db()) as db:
295
306
  db.add_all(history_units)
296
307
  db.commit()
308
+ logger.debug(
309
+ f"[run_v2_task_non_parallel] Created {len(history_units)} "
310
+ "`HistoryUnit`s."
311
+ )
297
312
 
298
313
  for history_unit in history_units:
299
314
  db.refresh(history_unit)
@@ -331,7 +346,6 @@ def run_v2_task_parallel(
331
346
  outcome = {}
332
347
  for ind in range(len(list_function_kwargs)):
333
348
  if ind not in results.keys() and ind not in exceptions.keys():
334
- # FIXME: Could we avoid this branch?
335
349
  error_msg = (
336
350
  f"Invalid branch: {ind=} is not in `results.keys()` "
337
351
  "nor in `exceptions.keys()`."
@@ -342,7 +356,13 @@ def run_v2_task_parallel(
342
356
  result=results.get(ind, None),
343
357
  exception=exceptions.get(ind, None),
344
358
  )
345
-
359
+ if outcome[ind].invalid_output:
360
+ with next(get_sync_db()) as db:
361
+ update_status_of_history_unit(
362
+ history_unit_id=history_unit_ids[ind],
363
+ status=HistoryUnitStatus.FAILED,
364
+ db_sync=db,
365
+ )
346
366
  num_tasks = len(images)
347
367
  return outcome, num_tasks
348
368
 
@@ -407,6 +427,10 @@ def run_v2_task_compound(
407
427
  db.commit()
408
428
  db.refresh(history_unit)
409
429
  init_history_unit_id = history_unit.id
430
+ logger.debug(
431
+ "[run_v2_task_compound] Created `HistoryUnit` with "
432
+ f"{init_history_unit_id=}."
433
+ )
410
434
  # Create one `HistoryImageCache` for each input image
411
435
  bulk_upsert_image_cache_fast(
412
436
  db=db,
@@ -524,6 +548,10 @@ def run_v2_task_compound(
524
548
  db.commit()
525
549
  for history_unit in history_units:
526
550
  db.refresh(history_unit)
551
+ logger.debug(
552
+ f"[run_v2_task_compound] Created {len(history_units)} "
553
+ "`HistoryUnit`s."
554
+ )
527
555
  history_unit_ids = [history_unit.id for history_unit in history_units]
528
556
 
529
557
  results, exceptions = runner.multisubmit(
@@ -545,7 +573,7 @@ def run_v2_task_compound(
545
573
  failure = False
546
574
  for ind in range(len(list_function_kwargs)):
547
575
  if ind not in results.keys() and ind not in exceptions.keys():
548
- # FIXME: Could we avoid this branch?
576
+ # NOTE: see issue 2484
549
577
  error_msg = (
550
578
  f"Invalid branch: {ind=} is not in `results.keys()` "
551
579
  "nor in `exceptions.keys()`."
@@ -556,7 +584,7 @@ def run_v2_task_compound(
556
584
  result=results.get(ind, None),
557
585
  exception=exceptions.get(ind, None),
558
586
  )
559
- if compute_outcomes[ind].exception is not None:
587
+ if compute_outcomes[ind].invalid_output:
560
588
  failure = True
561
589
 
562
590
  # NOTE: For compound tasks, we update `HistoryUnit.status` from here,
@@ -84,13 +84,12 @@ def run_single_task(
84
84
  logger.debug(f"Now start running {command=}")
85
85
 
86
86
  # Write arguments to args.json file
87
- # FIXME: this could be done backend-side, with an additional
88
- # file transfer if needed (e.g. on SSH)
87
+ # NOTE: see issue 2346
89
88
  with open(args_file_remote, "w") as f:
90
89
  json.dump(parameters, f, indent=2)
91
90
 
92
91
  # Assemble full command
93
- # FIXME: this could be assembled backend-side
92
+ # NOTE: this could be assembled backend-side
94
93
  full_command = (
95
94
  f"{command} "
96
95
  f"--args-json {args_file_remote} "
@@ -282,6 +282,7 @@ def submit_workflow(
282
282
  process_workflow(
283
283
  workflow=workflow,
284
284
  dataset=dataset,
285
+ job_id=job_id,
285
286
  user_id=user_id,
286
287
  workflow_dir_local=WORKFLOW_DIR_LOCAL,
287
288
  workflow_dir_remote=WORKFLOW_DIR_REMOTE,
@@ -45,6 +45,7 @@ class HistoryRunRead(BaseModel):
45
45
  id: int
46
46
  dataset_id: int
47
47
  workflowtask_id: Optional[int] = None
48
+ job_id: int
48
49
  workflowtask_dump: dict[str, Any]
49
50
  task_group_dump: dict[str, Any]
50
51
  timestamp_started: AwareDatetime
fractal_server/config.py CHANGED
@@ -494,7 +494,7 @@ class Settings(BaseSettings):
494
494
 
495
495
  FRACTAL_SLURM_INTERVAL_BEFORE_RETRIEVAL: int = 2
496
496
  """
497
- FIXME: this is a workaround, we are still investigating.
497
+ NOTE: see issue 2444
498
498
  """
499
499
 
500
500
  FRACTAL_SLURM_SBATCH_SLEEP: float = 0
@@ -0,0 +1,41 @@
1
+ """job id in history run
2
+
3
+ Revision ID: c90a7c76e996
4
+ Revises: f37aceb45062
5
+ Create Date: 2025-04-16 10:44:30.219309
6
+
7
+ """
8
+ import sqlalchemy as sa
9
+ from alembic import op
10
+
11
+
12
+ # revision identifiers, used by Alembic.
13
+ revision = "c90a7c76e996"
14
+ down_revision = "f37aceb45062"
15
+ branch_labels = None
16
+ depends_on = None
17
+
18
+
19
+ def upgrade() -> None:
20
+ # ### commands auto generated by Alembic - please adjust! ###
21
+ with op.batch_alter_table("historyrun", schema=None) as batch_op:
22
+ batch_op.add_column(sa.Column("job_id", sa.Integer(), nullable=False))
23
+ batch_op.create_foreign_key(
24
+ batch_op.f("fk_historyrun_job_id_jobv2"),
25
+ "jobv2",
26
+ ["job_id"],
27
+ ["id"],
28
+ )
29
+
30
+ # ### end Alembic commands ###
31
+
32
+
33
+ def downgrade() -> None:
34
+ # ### commands auto generated by Alembic - please adjust! ###
35
+ with op.batch_alter_table("historyrun", schema=None) as batch_op:
36
+ batch_op.drop_constraint(
37
+ batch_op.f("fk_historyrun_job_id_jobv2"), type_="foreignkey"
38
+ )
39
+ batch_op.drop_column("job_id")
40
+
41
+ # ### end Alembic commands ###
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: fractal-server
3
- Version: 2.14.0a27
3
+ Version: 2.14.0a29
4
4
  Summary: Backend component of the Fractal analytics platform
5
5
  License: BSD-3-Clause
6
6
  Author: Tommaso Comparin
@@ -1,4 +1,4 @@
1
- fractal_server/__init__.py,sha256=CloicZW6sLyhonlS5tDSm-UfO9_zirGQq16Kn8P5ek0,26
1
+ fractal_server/__init__.py,sha256=kUL0dbn6Xeh51ncedqHsdAUJ9AyGZJ6Jg5xDra2AQec,26
2
2
  fractal_server/__main__.py,sha256=rkM8xjY1KeS3l63irB8yCrlVobR-73uDapC4wvrIlxI,6957
3
3
  fractal_server/alembic.ini,sha256=MWwi7GzjzawI9cCAK1LW7NxIBQDUqD12-ptJoq5JpP0,3153
4
4
  fractal_server/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -12,7 +12,7 @@ fractal_server/app/models/user_settings.py,sha256=Y-ZV-uZAFLZqXxy8c5_Qeh_F7zQuZD
12
12
  fractal_server/app/models/v2/__init__.py,sha256=vjHwek7-IXmaZZL9VF0nD30YL9ca4wNc8P4RXJK_kDc,832
13
13
  fractal_server/app/models/v2/accounting.py,sha256=f2ALxfKKBNxFLJTtC2-YqRepVK253x68y7zkD2V_Nls,1115
14
14
  fractal_server/app/models/v2/dataset.py,sha256=Xa3YLmqvSChBJoqlSsjmt-5x0zC-6rSx2eafFnMukfo,1240
15
- fractal_server/app/models/v2/history.py,sha256=u4i0NZko8eX5YKAk3MvVIIxU3owJ7D9tEPS_uJT9rrQ,2034
15
+ fractal_server/app/models/v2/history.py,sha256=6yuYhsXgahHxv5FmDdv__aFndT228_rBFjTtkS-3Ohg,2082
16
16
  fractal_server/app/models/v2/job.py,sha256=JWrEjX_E4iRFr5MbmtV_aY28J-5D469awLr0rfa5Kig,2052
17
17
  fractal_server/app/models/v2/project.py,sha256=rAHoh5KfYwIaW7rTX0_O0jvWmxEvfo1BafvmcXuSSRk,786
18
18
  fractal_server/app/models/v2/task.py,sha256=8KEROaadgccXRZIP7EriBp2j1FgzYkgiirOi5_fG79M,1494
@@ -36,7 +36,7 @@ fractal_server/app/routes/api/v2/_aux_functions_history.py,sha256=ZlI6nwzB5r9AiY
36
36
  fractal_server/app/routes/api/v2/_aux_functions_task_lifecycle.py,sha256=qdXCb6IP8-qPEAxGZKljtjIqNzIAyRaAsQSRi5VqFHM,6773
37
37
  fractal_server/app/routes/api/v2/_aux_functions_tasks.py,sha256=uhNSs-jcS7ndIUFKiOC1yrDiViw3uvKEXi9UL04BMks,11642
38
38
  fractal_server/app/routes/api/v2/dataset.py,sha256=h5AhE0sdhQ20ZlIbEJsFnHIOUW0S1VHFpoflpBkVScs,8936
39
- fractal_server/app/routes/api/v2/history.py,sha256=pDztvwQFOh3JChtSk9GIG3H17yg4G5pk1mq14qXF4Ck,17793
39
+ fractal_server/app/routes/api/v2/history.py,sha256=SljYnMClbSTz3Rs42ELYKHdtiAr9pxn8nkEHoXEOAnc,17936
40
40
  fractal_server/app/routes/api/v2/images.py,sha256=BGpO94gVd8BTpCN6Mun2RXmjrPmfkIp73m8RN7uiGW4,8361
41
41
  fractal_server/app/routes/api/v2/job.py,sha256=MU1sHIKk_89WrD0TD44d4ufzqnywot7On_W71KjyUbQ,6500
42
42
  fractal_server/app/routes/api/v2/project.py,sha256=uAZgATiHcOvbnRX-vv1D3HoaEUvLUd7vzVmGcqOP8ZY,4602
@@ -47,7 +47,7 @@ fractal_server/app/routes/api/v2/task_collection.py,sha256=IDNF6sjDuU37HIQ0TuQA-
47
47
  fractal_server/app/routes/api/v2/task_collection_custom.py,sha256=totsl0DOC2DFLw8vgqOFivvftpEk3KbFDeOHT0UVQUs,5997
48
48
  fractal_server/app/routes/api/v2/task_group.py,sha256=62zcVTdheXM5V3WmFuqisIqgETjXmZaRpNMcDX5bXS0,7408
49
49
  fractal_server/app/routes/api/v2/task_group_lifecycle.py,sha256=3o9bCC8ubMwffQPPaxQZy-CjH9IB2RkIReIecI6L2_w,9300
50
- fractal_server/app/routes/api/v2/verify_image_types.py,sha256=IOB96X3_FYBd9L_QiyVSEoV13ZP7YGS4WlBIDA1Op4I,1979
50
+ fractal_server/app/routes/api/v2/verify_image_types.py,sha256=zGT1el58P-E7dVttyuo6MdCC0DtsxiP-NqMawl6EpGE,1950
51
51
  fractal_server/app/routes/api/v2/workflow.py,sha256=sW6Nm7dfzUY354hawyEkpQHy7rUvV2FCV8DPorH-TDU,10270
52
52
  fractal_server/app/routes/api/v2/workflow_import.py,sha256=INmnhlMEBJp-vHPR0f940DANPmIidts3OfcooeM_aNA,11205
53
53
  fractal_server/app/routes/api/v2/workflowtask.py,sha256=7_syX2EO7ibF6Xkm7HBPhsUYq6aYnKNeC5iSaafQhG4,11342
@@ -73,21 +73,21 @@ fractal_server/app/runner/executors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
73
73
  fractal_server/app/runner/executors/base_runner.py,sha256=knWOERUwRLhsd9eq5GwGxH2ZVsvPOZRRjQPGbiExqcU,5052
74
74
  fractal_server/app/runner/executors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
75
  fractal_server/app/runner/executors/local/get_local_config.py,sha256=KiakXxOahaLgWvQJ1LVGYGXht6DMGR9x8Xu-TuT9aY4,3628
76
- fractal_server/app/runner/executors/local/runner.py,sha256=5SVNWnCfj2D5hIw_KNf8VchC0czLhmfqmqdHM0kWsuY,7159
76
+ fractal_server/app/runner/executors/local/runner.py,sha256=pcwQ-ow4pJk4mkUg6mODMmfzGiMWX18vPxybrly_evY,6962
77
77
  fractal_server/app/runner/executors/slurm_common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
78
78
  fractal_server/app/runner/executors/slurm_common/_batching.py,sha256=ZY020JZlDS5mfpgpWTChQkyHU7iLE5kx2HVd57_C6XA,8850
79
79
  fractal_server/app/runner/executors/slurm_common/_job_states.py,sha256=nuV-Zba38kDrRESOVB3gaGbrSPZc4q7YGichQaeqTW0,238
80
- fractal_server/app/runner/executors/slurm_common/_slurm_config.py,sha256=qzWsMFUbcgxo2p5BltTlxDBLgGa6Z4gDKDdZioK3MB0,15979
81
- fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=vF2lAUgO7vbK9pR1Jd2dFsimO45ccw2OeJTJ0z1YWwQ,30729
80
+ fractal_server/app/runner/executors/slurm_common/_slurm_config.py,sha256=_feRRnVVnvQa3AsOQqfULfOgaoj2o6Ze0-fwXwic8p4,15795
81
+ fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=3LHrNmJ8VuBSeFI07q4tq41DWtcYTzYJfHvsaezDyoI,30355
82
82
  fractal_server/app/runner/executors/slurm_common/get_slurm_config.py,sha256=BW6fDpPyB0VH5leVxvwzkVH3r3hC7DuSyoWmRzHITWg,7305
83
- fractal_server/app/runner/executors/slurm_common/remote.py,sha256=FS_F8EaPp-A5eQT5_ZH3ICCHt0-C8b_2OSYcyRkXnb4,5851
83
+ fractal_server/app/runner/executors/slurm_common/remote.py,sha256=EB2uASKjrBIr25oc13XvSwf8x-TpTBr9WuaLMwNr2y4,5850
84
84
  fractal_server/app/runner/executors/slurm_common/slurm_job_task_models.py,sha256=RoxHLKOn0_wGjnY0Sv0a9nDSiqxYZHKRoMkT3p9_G1E,3607
85
85
  fractal_server/app/runner/executors/slurm_common/utils_executors.py,sha256=naPyJI0I3lD-sYHbSXbMFGUBK4h_SggA5V91Z1Ch1Xg,1416
86
86
  fractal_server/app/runner/executors/slurm_ssh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
87
- fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=ULQYW8A12BwC4GK2_2RhS33DFOFJewZoxS6vn_80z8c,7187
87
+ fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=LeEt8a4knm9OCULxhhLkMPBanMW_65ZvL1O-HEA9QMw,7151
88
88
  fractal_server/app/runner/executors/slurm_sudo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
89
89
  fractal_server/app/runner/executors/slurm_sudo/_subprocess_run_as_user.py,sha256=O1bNg1DiSDJmQE0RmOk2Ii47DagiXp5ryd0R6KxO2OM,3177
90
- fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=WGGVHX_juqyC6OVhln9yg-YKjLiuAoWZhAGxBjhNkWw,5873
90
+ fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=iFaE3EMepbXzmKwqydwYMGJu7D1ak4RhbA43rkVUWZo,5962
91
91
  fractal_server/app/runner/extract_archive.py,sha256=I7UGIHXXuFvlgVPsP7GMWPu2-DiS1EiyBs7a1bvgkxI,2458
92
92
  fractal_server/app/runner/filenames.py,sha256=lPnxKHtdRizr6FqG3zOdjDPyWA7GoaJGTtiuJV0gA8E,70
93
93
  fractal_server/app/runner/run_subprocess.py,sha256=c3JbYXq3hX2aaflQU19qJ5Xs6J6oXGNvnTEoAfv2bxc,959
@@ -95,16 +95,16 @@ fractal_server/app/runner/set_start_and_last_task_index.py,sha256=-q4zVybAj8ek2X
95
95
  fractal_server/app/runner/shutdown.py,sha256=9pfSKHDNdIcm0eY-opgRTi7y0HmvfPmYiu9JR6Idark,2082
96
96
  fractal_server/app/runner/task_files.py,sha256=27xFuPzSJc1Pw912CfSMPOhOIpvNwpkyLCnycqdo9lw,4365
97
97
  fractal_server/app/runner/v2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
98
- fractal_server/app/runner/v2/_local.py,sha256=DK8yagbvd6HHjcDVhUzTy0f7MURlTkQha-NM6OZKgJc,3044
99
- fractal_server/app/runner/v2/_slurm_ssh.py,sha256=_bytOf8z9sdrhI03D6eqg-aQPnJ7V2-qnqpcHAYizns,3278
100
- fractal_server/app/runner/v2/_slurm_sudo.py,sha256=DBCNxifXmMkpu71Wnk5u9-wKT7PV1WROQuY_4DYoZRI,2993
101
- fractal_server/app/runner/v2/db_tools.py,sha256=Ots6-Da7A_5yetSYrUGi-_yV-2r21Nc6XUBK3bv2mTM,2967
98
+ fractal_server/app/runner/v2/_local.py,sha256=Ggdxx_XOlMya3bgXn_vGd2WMNVmLQaO3w9ZPaxYlRQk,3088
99
+ fractal_server/app/runner/v2/_slurm_ssh.py,sha256=CEaJLajwdDjdpxY1_7aTLb9wqgzeOuxLlSewScMEx_Y,3322
100
+ fractal_server/app/runner/v2/_slurm_sudo.py,sha256=TVihkQKMX6YWEWxXJjQo0WEQOjVy7FVVLmbM3MCulR0,3037
101
+ fractal_server/app/runner/v2/db_tools.py,sha256=du5dKhMMFMErQXbGIgu9JvO_vtMensodyPsyDeqz1yQ,3324
102
102
  fractal_server/app/runner/v2/deduplicate_list.py,sha256=IVTE4abBU1bUprFTkxrTfYKnvkNTanWQ-KWh_etiT08,645
103
103
  fractal_server/app/runner/v2/merge_outputs.py,sha256=D1L4Taieq9i71SPQyNc1kMokgHh-sV_MqF3bv7QMDBc,907
104
- fractal_server/app/runner/v2/runner.py,sha256=sbBOH5gCErxK0fCPPGBWtLtqsSwtmrhTth5OLUGMeZQ,15658
105
- fractal_server/app/runner/v2/runner_functions.py,sha256=FsUkgnVC2Wb_YJzzMf_pAgVAwjfEGi6p6r4WLHtv-sw,17807
106
- fractal_server/app/runner/v2/runner_functions_low_level.py,sha256=9t1CHN3EyfsGRWfG257YPY5WjQ6zuztsw_KZrpEAFPo,3703
107
- fractal_server/app/runner/v2/submit_workflow.py,sha256=EDUyUuIPwZHb2zm7SCRRoFsGq2cN-b5OKw6CYkZ8kWk,13048
104
+ fractal_server/app/runner/v2/runner.py,sha256=aNMPABdTS9kJADL2JUeRNI6Ir-gDFTOnRI2tFRohjOU,15848
105
+ fractal_server/app/runner/v2/runner_functions.py,sha256=X8veuvIxoEYEhQTOqGHv7FIYFiu8MbrH__YOjV_7WU4,18745
106
+ fractal_server/app/runner/v2/runner_functions_low_level.py,sha256=_h_OOffq3d7V0uHa8Uvs0mj31y1GSZBUXjDDF3WjVjY,3620
107
+ fractal_server/app/runner/v2/submit_workflow.py,sha256=QywUGIoHAHnrWgfnyX8W9kVqKY-RvVyNLpzrbsXZOZ4,13075
108
108
  fractal_server/app/runner/v2/task_interface.py,sha256=IXdQTI8rXFgXv1Ez0js4CjKFf3QwO2GCHRTuwiFtiTQ,2891
109
109
  fractal_server/app/runner/versions.py,sha256=dSaPRWqmFPHjg20kTCHmi_dmGNcCETflDtDLronNanU,852
110
110
  fractal_server/app/schemas/__init__.py,sha256=stURAU_t3AOBaH0HSUbV-GKhlPKngnnIMoqWc3orFyI,135
@@ -117,7 +117,7 @@ fractal_server/app/schemas/v2/__init__.py,sha256=wXS4ZEzobWx5dh-XLjMZWpd-JMwWFPO
117
117
  fractal_server/app/schemas/v2/accounting.py,sha256=Wylt7uWTiDIFlHJOh4XEtYitk2FjFlmnodDrJDxcr0E,397
118
118
  fractal_server/app/schemas/v2/dataset.py,sha256=xNWdOW8hhL5Wx-iwyUPrZfWcC8fFuGDgdOHvZLbGVME,2782
119
119
  fractal_server/app/schemas/v2/dumps.py,sha256=uc9itXekO5IFfR6UucpQ5BX9NZZ8erE4hRR6S6aXlOc,2284
120
- fractal_server/app/schemas/v2/history.py,sha256=EhfTBYNVVWeWTj5QFrnuaD4zB73fThK1lyhxB2OBFoc,1659
120
+ fractal_server/app/schemas/v2/history.py,sha256=Y3rc96DOPGQGZWJtBYVHiBjMQEhFtMq4WGkV4vs1oDE,1675
121
121
  fractal_server/app/schemas/v2/job.py,sha256=OXPB4oPiMVWYgZu0lGzM_LGACvhWBavsW7c3MmivdDM,4556
122
122
  fractal_server/app/schemas/v2/manifest.py,sha256=8mmB0QwxEgAeGgwKD_fT-o-wFy7lb6HxNXbp17IJqNY,7281
123
123
  fractal_server/app/schemas/v2/project.py,sha256=ulgCmUnX0w-0jrSjVYIT7sxeK95CSNGh2msXydhsgYI,885
@@ -130,7 +130,7 @@ fractal_server/app/schemas/v2/workflowtask.py,sha256=rVbmNihDAJL_Sckbt1hBK2JEcb-
130
130
  fractal_server/app/security/__init__.py,sha256=e2cveg5hQpieGD3bSPd5GTOMthvJ-HXH3buSb9WVfEU,14096
131
131
  fractal_server/app/security/signup_email.py,sha256=Xd6QYxcdmg0PHpDwmUE8XQmPcOj3Xjy5oROcIMhmltM,1472
132
132
  fractal_server/app/user_settings.py,sha256=OP1yiYKtPadxwM51_Q0hdPk3z90TCN4z1BLpQsXyWiU,1316
133
- fractal_server/config.py,sha256=RQmM9IGlT3K7jpWolwVPVe9KoDgFY3abk9IjEmmtvhs,28570
133
+ fractal_server/config.py,sha256=83dHIuZMdMiu4LAtzVGBe_iD1nWEYOiKmeC-HHZ0nhw,28534
134
134
  fractal_server/data_migrations/README.md,sha256=_3AEFvDg9YkybDqCLlFPdDmGJvr6Tw7HRI14aZ3LOIw,398
135
135
  fractal_server/data_migrations/tools.py,sha256=LeMeASwYGtEqd-3wOLle6WARdTGAimoyMmRbbJl-hAM,572
136
136
  fractal_server/gunicorn_fractal.py,sha256=u6U01TLGlXgq1v8QmEpLih3QnsInZD7CqphgJ_GrGzc,1230
@@ -166,6 +166,7 @@ fractal_server/migrations/versions/9fd26a2b0de4_add_workflow_timestamp_created.p
166
166
  fractal_server/migrations/versions/a7f4d6137b53_add_workflow_dump_to_applyworkflow.py,sha256=ekDUML7ILpmdoqEclKbEUdyLi4uw9HSG_sTjG2hp_JE,867
167
167
  fractal_server/migrations/versions/af1ef1c83c9b_add_accounting_tables.py,sha256=BftudWuSGvKGBzIL5AMb3yWkgTAuaKPBGsYcOzp_gLQ,1899
168
168
  fractal_server/migrations/versions/af8673379a5c_drop_old_filter_columns.py,sha256=9sLd0F7nO5chHHm7RZ4wBA-9bvWomS-av_odKwODADM,1551
169
+ fractal_server/migrations/versions/c90a7c76e996_job_id_in_history_run.py,sha256=Y1cPwmFOZ4mx3v2XZM6adgu8u0L0VD_R4ADURyMb2ro,1102
169
170
  fractal_server/migrations/versions/d256a7379ab8_taskgroup_activity_and_venv_info_to_.py,sha256=HN3_Pk8G81SzdYjg4K1RZAyjKSlsZGvcYE2nWOUbwxQ,3861
170
171
  fractal_server/migrations/versions/d4fe3708d309_make_applyworkflow_workflow_dump_non_.py,sha256=6cHEZFuTXiQg9yu32Y3RH1XAl71av141WQ6UMbiITIg,949
171
172
  fractal_server/migrations/versions/da2cb2ac4255_user_group_viewer_paths.py,sha256=yGWSA2HIHUybcVy66xBITk08opV2DFYSCIIrulaUZhI,901
@@ -208,8 +209,8 @@ fractal_server/tasks/v2/utils_templates.py,sha256=Kc_nSzdlV6KIsO0CQSPs1w70zLyENP
208
209
  fractal_server/urls.py,sha256=QjIKAC1a46bCdiPMu3AlpgFbcv6a4l3ABcd5xz190Og,471
209
210
  fractal_server/utils.py,sha256=PMwrxWFxRTQRl1b9h-NRIbFGPKqpH_hXnkAT3NfZdpY,3571
210
211
  fractal_server/zip_tools.py,sha256=GjDgo_sf6V_DDg6wWeBlZu5zypIxycn_l257p_YVKGc,4876
211
- fractal_server-2.14.0a27.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
212
- fractal_server-2.14.0a27.dist-info/METADATA,sha256=C4cH5dZP3XZ1XpieeG5pOdYq24CGuSRrMkwXeUN6_BY,4563
213
- fractal_server-2.14.0a27.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
214
- fractal_server-2.14.0a27.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
215
- fractal_server-2.14.0a27.dist-info/RECORD,,
212
+ fractal_server-2.14.0a29.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
213
+ fractal_server-2.14.0a29.dist-info/METADATA,sha256=3i4Ng70jNwtP8x-LM_n5OI02RsO6kqtfijb-iKYuVR8,4563
214
+ fractal_server-2.14.0a29.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
215
+ fractal_server-2.14.0a29.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
216
+ fractal_server-2.14.0a29.dist-info/RECORD,,