fractal-server 2.14.0a26__py3-none-any.whl → 2.14.0a28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __VERSION__ = "2.14.0a26"
1
+ __VERSION__ = "2.14.0a28"
@@ -42,7 +42,7 @@ async def verify_unique_types(
42
42
  type_filters=query.type_filters,
43
43
  )
44
44
 
45
- # Get all available types (#FIXME use aux function)
45
+ # NOTE: see issue 2486
46
46
  available_types = set(
47
47
  _type for _img in filtered_images for _type in _img["types"].keys()
48
48
  )
@@ -25,7 +25,6 @@ class LocalRunner(BaseRunner):
25
25
  self,
26
26
  root_dir_local: Path,
27
27
  ):
28
-
29
28
  self.root_dir_local = root_dir_local
30
29
  self.root_dir_local.mkdir(parents=True, exist_ok=True)
31
30
  self.executor = ThreadPoolExecutor()
@@ -182,10 +181,6 @@ class LocalRunner(BaseRunner):
182
181
  db_sync=db,
183
182
  )
184
183
 
185
- # FIXME: what should happen here? Option 1: stop
186
- # all existing tasks and shutdown runner (for the
187
- # compound-task case)
188
-
189
- logger.debug(f"[multisubmit] END, {results=}, {exceptions=}")
184
+ logger.debug(f"[multisubmit] END, {len(results)=}, {len(exceptions)=}")
190
185
 
191
186
  return results, exceptions
@@ -368,9 +368,7 @@ class SlurmConfig(BaseModel):
368
368
  if value is not None:
369
369
  # Handle the `time` parameter
370
370
  if key == "time" and self.parallel_tasks_per_job > 1:
371
- # FIXME SSH: time setting must be handled better. Right now
372
- # we simply propagate `time`, but this is not enough when
373
- # several `srun` are combined in a single script.
371
+ # NOTE: see issue #1632
374
372
  logger.warning(
375
373
  f"`time` SLURM parameter is set to {self.time}, "
376
374
  "but this does not take into account the number of "
@@ -34,7 +34,7 @@ SHUTDOWN_EXCEPTION = JobExecutionError(SHUTDOWN_ERROR_MESSAGE)
34
34
 
35
35
  logger = set_logger(__name__)
36
36
 
37
- # FIXME: Transform several logger.info into logger.debug.
37
+ # NOTE: see issue 2481.
38
38
 
39
39
 
40
40
  class BaseSlurmRunner(BaseRunner):
@@ -107,8 +107,7 @@ class BaseSlurmRunner(BaseRunner):
107
107
  raise NotImplementedError("Implement in child class.")
108
108
 
109
109
  def run_squeue(self, job_ids: list[str]) -> tuple[bool, str]:
110
-
111
- # FIXME: review different cases (exception vs no job found)
110
+ # NOTE: see issue 2482
112
111
 
113
112
  if len(job_ids) == 0:
114
113
  return (False, "")
@@ -457,7 +456,6 @@ class BaseSlurmRunner(BaseRunner):
457
456
  "converter_compound",
458
457
  ],
459
458
  ) -> tuple[Any, Exception]:
460
-
461
459
  logger.info("[submit] START")
462
460
 
463
461
  workdir_local = task_files.wftask_subfolder_local
@@ -514,7 +512,7 @@ class BaseSlurmRunner(BaseRunner):
514
512
  )
515
513
  logger.info(f"[submit] END submission phase, {self.job_ids=}")
516
514
 
517
- # FIXME: replace this sleep with a more precise check
515
+ # NOTE: see issue 2444
518
516
  settings = Inject(get_settings)
519
517
  sleep_time = settings.FRACTAL_SLURM_INTERVAL_BEFORE_RETRIEVAL
520
518
  logger.warning(f"[submit] Now sleep {sleep_time} seconds.")
@@ -524,7 +522,6 @@ class BaseSlurmRunner(BaseRunner):
524
522
  logger.info("[submit] START retrieval phase")
525
523
  scancelled_job_ids = []
526
524
  while len(self.jobs) > 0:
527
-
528
525
  # Look for finished jobs
529
526
  finished_job_ids = self._get_finished_jobs(job_ids=self.job_ids)
530
527
  logger.debug(f"[submit] {finished_job_ids=}")
@@ -664,9 +661,7 @@ class BaseSlurmRunner(BaseRunner):
664
661
  )
665
662
  )
666
663
 
667
- # FIXME: split parts 2 and 3
668
- # Part 2/3. Transfer all relevant input files (for SSH)
669
- # Part 3/3. Run all `sbatch`es and update `self.jobs`
664
+ # NOTE: see issue 2431
670
665
  logger.info("[multisubmit] Transfer files and submit jobs.")
671
666
  for slurm_job in jobs_to_submit:
672
667
  self._submit_single_sbatch(
@@ -677,19 +672,15 @@ class BaseSlurmRunner(BaseRunner):
677
672
 
678
673
  logger.info(f"END submission phase, {self.job_ids=}")
679
674
 
680
- # FIXME: replace this sleep with a more precise check
681
675
  settings = Inject(get_settings)
682
676
  sleep_time = settings.FRACTAL_SLURM_INTERVAL_BEFORE_RETRIEVAL
683
677
  logger.warning(f"[submit] Now sleep {sleep_time} seconds.")
684
678
  time.sleep(sleep_time)
685
679
 
686
- # FIXME: Could we merge the submit/multisubmit retrieval phases?
687
-
688
680
  # Retrieval phase
689
681
  logger.info("[multisubmit] START retrieval phase")
690
682
  scancelled_job_ids = []
691
683
  while len(self.jobs) > 0:
692
-
693
684
  # Look for finished jobs
694
685
  finished_job_ids = self._get_finished_jobs(job_ids=self.job_ids)
695
686
  logger.debug(f"[multisubmit] {finished_job_ids=}")
@@ -90,7 +90,7 @@ class SlurmSSHRunner(BaseSlurmRunner):
90
90
  ).as_posix()
91
91
 
92
92
  # Create file list
93
- # # FIXME can we make this more efficient with iterators?
93
+ # NOTE: see issue 2483
94
94
  filelist = []
95
95
  for _slurm_job in finished_slurm_jobs:
96
96
  _single_job_filelist = [
@@ -168,7 +168,6 @@ class SlurmSSHRunner(BaseSlurmRunner):
168
168
  Transfer the jobs subfolder to the remote host.
169
169
  """
170
170
  for job in jobs:
171
-
172
171
  # Create local archive
173
172
  tarfile_path_local = compress_folder(
174
173
  job.workdir_local,
@@ -77,7 +77,7 @@ def bulk_upsert_image_cache_fast(
77
77
  See docs at
78
78
  https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#insert-on-conflict-upsert
79
79
 
80
- FIXME: we tried to replace `index_elements` with
80
+ NOTE: we tried to replace `index_elements` with
81
81
  `constraint="pk_historyimagecache"`, but it did not work as expected.
82
82
 
83
83
  Arguments:
@@ -50,6 +50,7 @@ class SubmissionOutcome(BaseModel):
50
50
  model_config = ConfigDict(arbitrary_types_allowed=True)
51
51
  task_output: TaskOutput | None = None
52
52
  exception: BaseException | None = None
53
+ invalid_output: bool = False
53
54
 
54
55
 
55
56
  class InitSubmissionOutcome(BaseModel):
@@ -66,6 +67,7 @@ def _process_task_output(
66
67
  result: dict[str, Any] | None = None,
67
68
  exception: BaseException | None = None,
68
69
  ) -> SubmissionOutcome:
70
+ invalid_output = False
69
71
  if exception is not None:
70
72
  task_output = None
71
73
  else:
@@ -75,13 +77,13 @@ def _process_task_output(
75
77
  try:
76
78
  task_output = _cast_and_validate_TaskOutput(result)
77
79
  except TaskOutputValidationError as e:
78
- # FIXME: This should correspond to some status="failed",
79
- # but it does not
80
80
  task_output = None
81
81
  exception = e
82
+ invalid_output = True
82
83
  return SubmissionOutcome(
83
84
  task_output=task_output,
84
85
  exception=exception,
86
+ invalid_output=invalid_output,
85
87
  )
86
88
 
87
89
 
@@ -99,8 +101,6 @@ def _process_init_task_output(
99
101
  try:
100
102
  task_output = _cast_and_validate_InitTaskOutput(result)
101
103
  except TaskOutputValidationError as e:
102
- # FIXME: This should correspond to some status="failed",
103
- # but it does not
104
104
  task_output = None
105
105
  exception = e
106
106
  return InitSubmissionOutcome(
@@ -225,6 +225,13 @@ def run_v2_task_non_parallel(
225
225
  exception=exception,
226
226
  )
227
227
  }
228
+ if outcome[0].invalid_output:
229
+ with next(get_sync_db()) as db:
230
+ update_status_of_history_unit(
231
+ history_unit_id=history_unit_id,
232
+ status=HistoryUnitStatus.FAILED,
233
+ db_sync=db,
234
+ )
228
235
  return outcome, num_tasks
229
236
 
230
237
 
@@ -331,7 +338,6 @@ def run_v2_task_parallel(
331
338
  outcome = {}
332
339
  for ind in range(len(list_function_kwargs)):
333
340
  if ind not in results.keys() and ind not in exceptions.keys():
334
- # FIXME: Could we avoid this branch?
335
341
  error_msg = (
336
342
  f"Invalid branch: {ind=} is not in `results.keys()` "
337
343
  "nor in `exceptions.keys()`."
@@ -342,7 +348,13 @@ def run_v2_task_parallel(
342
348
  result=results.get(ind, None),
343
349
  exception=exceptions.get(ind, None),
344
350
  )
345
-
351
+ if outcome[ind].invalid_output:
352
+ with next(get_sync_db()) as db:
353
+ update_status_of_history_unit(
354
+ history_unit_id=history_unit_ids[ind],
355
+ status=HistoryUnitStatus.FAILED,
356
+ db_sync=db,
357
+ )
346
358
  num_tasks = len(images)
347
359
  return outcome, num_tasks
348
360
 
@@ -406,7 +418,7 @@ def run_v2_task_compound(
406
418
  db.add(history_unit)
407
419
  db.commit()
408
420
  db.refresh(history_unit)
409
- history_unit_id = history_unit.id
421
+ init_history_unit_id = history_unit.id
410
422
  # Create one `HistoryImageCache` for each input image
411
423
  bulk_upsert_image_cache_fast(
412
424
  db=db,
@@ -415,7 +427,7 @@ def run_v2_task_compound(
415
427
  workflowtask_id=wftask.id,
416
428
  dataset_id=dataset_id,
417
429
  zarr_url=zarr_url,
418
- latest_history_unit_id=history_unit_id,
430
+ latest_history_unit_id=init_history_unit_id,
419
431
  )
420
432
  for zarr_url in input_image_zarr_urls
421
433
  ],
@@ -431,7 +443,7 @@ def run_v2_task_compound(
431
443
  parameters=function_kwargs,
432
444
  task_type=task_type,
433
445
  task_files=task_files_init,
434
- history_unit_id=history_unit_id,
446
+ history_unit_id=init_history_unit_id,
435
447
  config=runner_config_init,
436
448
  )
437
449
 
@@ -459,7 +471,7 @@ def run_v2_task_compound(
459
471
  # Mark the init-task `HistoryUnit` as "done"
460
472
  with next(get_sync_db()) as db:
461
473
  update_status_of_history_unit(
462
- history_unit_id=history_unit_id,
474
+ history_unit_id=init_history_unit_id,
463
475
  status=HistoryUnitStatus.DONE,
464
476
  db_sync=db,
465
477
  )
@@ -470,7 +482,7 @@ def run_v2_task_compound(
470
482
  if len(parallelization_list) == 0:
471
483
  with next(get_sync_db()) as db:
472
484
  update_status_of_history_unit(
473
- history_unit_id=history_unit_id,
485
+ history_unit_id=init_history_unit_id,
474
486
  status=HistoryUnitStatus.DONE,
475
487
  db_sync=db,
476
488
  )
@@ -541,21 +553,23 @@ def run_v2_task_compound(
541
553
  config=runner_config_compute,
542
554
  )
543
555
 
544
- init_outcome = {}
556
+ compute_outcomes: dict[int, SubmissionOutcome] = {}
545
557
  failure = False
546
558
  for ind in range(len(list_function_kwargs)):
547
559
  if ind not in results.keys() and ind not in exceptions.keys():
548
- # FIXME: Could we avoid this branch?
560
+ # NOTE: see issue 2484
549
561
  error_msg = (
550
562
  f"Invalid branch: {ind=} is not in `results.keys()` "
551
563
  "nor in `exceptions.keys()`."
552
564
  )
553
565
  logger.error(error_msg)
554
566
  raise RuntimeError(error_msg)
555
- init_outcome[ind] = _process_task_output(
567
+ compute_outcomes[ind] = _process_task_output(
556
568
  result=results.get(ind, None),
557
569
  exception=exceptions.get(ind, None),
558
570
  )
571
+ if compute_outcomes[ind].invalid_output:
572
+ failure = True
559
573
 
560
574
  # NOTE: For compound tasks, we update `HistoryUnit.status` from here,
561
575
  # rather than within the submit/multisubmit runner methods. This is
@@ -564,7 +578,7 @@ def run_v2_task_compound(
564
578
  with next(get_sync_db()) as db:
565
579
  if failure:
566
580
  bulk_update_status_of_history_unit(
567
- history_unit_ids=history_unit_ids,
581
+ history_unit_ids=history_unit_ids + [init_history_unit_id],
568
582
  status=HistoryUnitStatus.FAILED,
569
583
  db_sync=db,
570
584
  )
@@ -575,4 +589,4 @@ def run_v2_task_compound(
575
589
  db_sync=db,
576
590
  )
577
591
 
578
- return init_outcome, num_tasks
592
+ return compute_outcomes, num_tasks
@@ -84,13 +84,12 @@ def run_single_task(
84
84
  logger.debug(f"Now start running {command=}")
85
85
 
86
86
  # Write arguments to args.json file
87
- # FIXME: this could be done backend-side, with an additional
88
- # file transfer if needed (e.g. on SSH)
87
+ # NOTE: see issue 2346
89
88
  with open(args_file_remote, "w") as f:
90
89
  json.dump(parameters, f, indent=2)
91
90
 
92
91
  # Assemble full command
93
- # FIXME: this could be assembled backend-side
92
+ # NOTE: this could be assembled backend-side
94
93
  full_command = (
95
94
  f"{command} "
96
95
  f"--args-json {args_file_remote} "
fractal_server/config.py CHANGED
@@ -494,7 +494,7 @@ class Settings(BaseSettings):
494
494
 
495
495
  FRACTAL_SLURM_INTERVAL_BEFORE_RETRIEVAL: int = 2
496
496
  """
497
- FIXME: this is a workaround, we are still investigating.
497
+ NOTE: see issue 2444
498
498
  """
499
499
 
500
500
  FRACTAL_SLURM_SBATCH_SLEEP: float = 0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: fractal-server
3
- Version: 2.14.0a26
3
+ Version: 2.14.0a28
4
4
  Summary: Backend component of the Fractal analytics platform
5
5
  License: BSD-3-Clause
6
6
  Author: Tommaso Comparin
@@ -1,4 +1,4 @@
1
- fractal_server/__init__.py,sha256=5TMj3rq5zJN-KKD4tuCsVrhEOdTr3z_6ei5PgKuP2uM,26
1
+ fractal_server/__init__.py,sha256=7UElp_oL26Xzwy0fazO-_dZcH0gdX29TdHAqVARp7O8,26
2
2
  fractal_server/__main__.py,sha256=rkM8xjY1KeS3l63irB8yCrlVobR-73uDapC4wvrIlxI,6957
3
3
  fractal_server/alembic.ini,sha256=MWwi7GzjzawI9cCAK1LW7NxIBQDUqD12-ptJoq5JpP0,3153
4
4
  fractal_server/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -47,7 +47,7 @@ fractal_server/app/routes/api/v2/task_collection.py,sha256=IDNF6sjDuU37HIQ0TuQA-
47
47
  fractal_server/app/routes/api/v2/task_collection_custom.py,sha256=totsl0DOC2DFLw8vgqOFivvftpEk3KbFDeOHT0UVQUs,5997
48
48
  fractal_server/app/routes/api/v2/task_group.py,sha256=62zcVTdheXM5V3WmFuqisIqgETjXmZaRpNMcDX5bXS0,7408
49
49
  fractal_server/app/routes/api/v2/task_group_lifecycle.py,sha256=3o9bCC8ubMwffQPPaxQZy-CjH9IB2RkIReIecI6L2_w,9300
50
- fractal_server/app/routes/api/v2/verify_image_types.py,sha256=IOB96X3_FYBd9L_QiyVSEoV13ZP7YGS4WlBIDA1Op4I,1979
50
+ fractal_server/app/routes/api/v2/verify_image_types.py,sha256=zGT1el58P-E7dVttyuo6MdCC0DtsxiP-NqMawl6EpGE,1950
51
51
  fractal_server/app/routes/api/v2/workflow.py,sha256=sW6Nm7dfzUY354hawyEkpQHy7rUvV2FCV8DPorH-TDU,10270
52
52
  fractal_server/app/routes/api/v2/workflow_import.py,sha256=INmnhlMEBJp-vHPR0f940DANPmIidts3OfcooeM_aNA,11205
53
53
  fractal_server/app/routes/api/v2/workflowtask.py,sha256=7_syX2EO7ibF6Xkm7HBPhsUYq6aYnKNeC5iSaafQhG4,11342
@@ -73,18 +73,18 @@ fractal_server/app/runner/executors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
73
73
  fractal_server/app/runner/executors/base_runner.py,sha256=knWOERUwRLhsd9eq5GwGxH2ZVsvPOZRRjQPGbiExqcU,5052
74
74
  fractal_server/app/runner/executors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
75
  fractal_server/app/runner/executors/local/get_local_config.py,sha256=KiakXxOahaLgWvQJ1LVGYGXht6DMGR9x8Xu-TuT9aY4,3628
76
- fractal_server/app/runner/executors/local/runner.py,sha256=5SVNWnCfj2D5hIw_KNf8VchC0czLhmfqmqdHM0kWsuY,7159
76
+ fractal_server/app/runner/executors/local/runner.py,sha256=pcwQ-ow4pJk4mkUg6mODMmfzGiMWX18vPxybrly_evY,6962
77
77
  fractal_server/app/runner/executors/slurm_common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
78
78
  fractal_server/app/runner/executors/slurm_common/_batching.py,sha256=ZY020JZlDS5mfpgpWTChQkyHU7iLE5kx2HVd57_C6XA,8850
79
79
  fractal_server/app/runner/executors/slurm_common/_job_states.py,sha256=nuV-Zba38kDrRESOVB3gaGbrSPZc4q7YGichQaeqTW0,238
80
- fractal_server/app/runner/executors/slurm_common/_slurm_config.py,sha256=qzWsMFUbcgxo2p5BltTlxDBLgGa6Z4gDKDdZioK3MB0,15979
81
- fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=vF2lAUgO7vbK9pR1Jd2dFsimO45ccw2OeJTJ0z1YWwQ,30729
80
+ fractal_server/app/runner/executors/slurm_common/_slurm_config.py,sha256=_feRRnVVnvQa3AsOQqfULfOgaoj2o6Ze0-fwXwic8p4,15795
81
+ fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=3LHrNmJ8VuBSeFI07q4tq41DWtcYTzYJfHvsaezDyoI,30355
82
82
  fractal_server/app/runner/executors/slurm_common/get_slurm_config.py,sha256=BW6fDpPyB0VH5leVxvwzkVH3r3hC7DuSyoWmRzHITWg,7305
83
83
  fractal_server/app/runner/executors/slurm_common/remote.py,sha256=FS_F8EaPp-A5eQT5_ZH3ICCHt0-C8b_2OSYcyRkXnb4,5851
84
84
  fractal_server/app/runner/executors/slurm_common/slurm_job_task_models.py,sha256=RoxHLKOn0_wGjnY0Sv0a9nDSiqxYZHKRoMkT3p9_G1E,3607
85
85
  fractal_server/app/runner/executors/slurm_common/utils_executors.py,sha256=naPyJI0I3lD-sYHbSXbMFGUBK4h_SggA5V91Z1Ch1Xg,1416
86
86
  fractal_server/app/runner/executors/slurm_ssh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
87
- fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=ULQYW8A12BwC4GK2_2RhS33DFOFJewZoxS6vn_80z8c,7187
87
+ fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=LeEt8a4knm9OCULxhhLkMPBanMW_65ZvL1O-HEA9QMw,7151
88
88
  fractal_server/app/runner/executors/slurm_sudo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
89
89
  fractal_server/app/runner/executors/slurm_sudo/_subprocess_run_as_user.py,sha256=O1bNg1DiSDJmQE0RmOk2Ii47DagiXp5ryd0R6KxO2OM,3177
90
90
  fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=WGGVHX_juqyC6OVhln9yg-YKjLiuAoWZhAGxBjhNkWw,5873
@@ -98,12 +98,12 @@ fractal_server/app/runner/v2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
98
98
  fractal_server/app/runner/v2/_local.py,sha256=DK8yagbvd6HHjcDVhUzTy0f7MURlTkQha-NM6OZKgJc,3044
99
99
  fractal_server/app/runner/v2/_slurm_ssh.py,sha256=_bytOf8z9sdrhI03D6eqg-aQPnJ7V2-qnqpcHAYizns,3278
100
100
  fractal_server/app/runner/v2/_slurm_sudo.py,sha256=DBCNxifXmMkpu71Wnk5u9-wKT7PV1WROQuY_4DYoZRI,2993
101
- fractal_server/app/runner/v2/db_tools.py,sha256=Ots6-Da7A_5yetSYrUGi-_yV-2r21Nc6XUBK3bv2mTM,2967
101
+ fractal_server/app/runner/v2/db_tools.py,sha256=BfwDhIDssBmEu6HDRj1RSvDYLaoLSWFByro1Ca70aA8,2966
102
102
  fractal_server/app/runner/v2/deduplicate_list.py,sha256=IVTE4abBU1bUprFTkxrTfYKnvkNTanWQ-KWh_etiT08,645
103
103
  fractal_server/app/runner/v2/merge_outputs.py,sha256=D1L4Taieq9i71SPQyNc1kMokgHh-sV_MqF3bv7QMDBc,907
104
104
  fractal_server/app/runner/v2/runner.py,sha256=sbBOH5gCErxK0fCPPGBWtLtqsSwtmrhTth5OLUGMeZQ,15658
105
- fractal_server/app/runner/v2/runner_functions.py,sha256=gi5M_JlFMV3DP6ismF7eObs2cTglAcVdgsRDKSAQRc8,17632
106
- fractal_server/app/runner/v2/runner_functions_low_level.py,sha256=9t1CHN3EyfsGRWfG257YPY5WjQ6zuztsw_KZrpEAFPo,3703
105
+ fractal_server/app/runner/v2/runner_functions.py,sha256=2im4gskJRLN4SQ6jhfgbU-U0-nEz5r7YtrpC10S2aWg,18209
106
+ fractal_server/app/runner/v2/runner_functions_low_level.py,sha256=_h_OOffq3d7V0uHa8Uvs0mj31y1GSZBUXjDDF3WjVjY,3620
107
107
  fractal_server/app/runner/v2/submit_workflow.py,sha256=EDUyUuIPwZHb2zm7SCRRoFsGq2cN-b5OKw6CYkZ8kWk,13048
108
108
  fractal_server/app/runner/v2/task_interface.py,sha256=IXdQTI8rXFgXv1Ez0js4CjKFf3QwO2GCHRTuwiFtiTQ,2891
109
109
  fractal_server/app/runner/versions.py,sha256=dSaPRWqmFPHjg20kTCHmi_dmGNcCETflDtDLronNanU,852
@@ -130,7 +130,7 @@ fractal_server/app/schemas/v2/workflowtask.py,sha256=rVbmNihDAJL_Sckbt1hBK2JEcb-
130
130
  fractal_server/app/security/__init__.py,sha256=e2cveg5hQpieGD3bSPd5GTOMthvJ-HXH3buSb9WVfEU,14096
131
131
  fractal_server/app/security/signup_email.py,sha256=Xd6QYxcdmg0PHpDwmUE8XQmPcOj3Xjy5oROcIMhmltM,1472
132
132
  fractal_server/app/user_settings.py,sha256=OP1yiYKtPadxwM51_Q0hdPk3z90TCN4z1BLpQsXyWiU,1316
133
- fractal_server/config.py,sha256=RQmM9IGlT3K7jpWolwVPVe9KoDgFY3abk9IjEmmtvhs,28570
133
+ fractal_server/config.py,sha256=83dHIuZMdMiu4LAtzVGBe_iD1nWEYOiKmeC-HHZ0nhw,28534
134
134
  fractal_server/data_migrations/README.md,sha256=_3AEFvDg9YkybDqCLlFPdDmGJvr6Tw7HRI14aZ3LOIw,398
135
135
  fractal_server/data_migrations/tools.py,sha256=LeMeASwYGtEqd-3wOLle6WARdTGAimoyMmRbbJl-hAM,572
136
136
  fractal_server/gunicorn_fractal.py,sha256=u6U01TLGlXgq1v8QmEpLih3QnsInZD7CqphgJ_GrGzc,1230
@@ -208,8 +208,8 @@ fractal_server/tasks/v2/utils_templates.py,sha256=Kc_nSzdlV6KIsO0CQSPs1w70zLyENP
208
208
  fractal_server/urls.py,sha256=QjIKAC1a46bCdiPMu3AlpgFbcv6a4l3ABcd5xz190Og,471
209
209
  fractal_server/utils.py,sha256=PMwrxWFxRTQRl1b9h-NRIbFGPKqpH_hXnkAT3NfZdpY,3571
210
210
  fractal_server/zip_tools.py,sha256=GjDgo_sf6V_DDg6wWeBlZu5zypIxycn_l257p_YVKGc,4876
211
- fractal_server-2.14.0a26.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
212
- fractal_server-2.14.0a26.dist-info/METADATA,sha256=5sAI97ML9ViHIX8EvykTdZBa8B8PSri-pjDXzsYG4gM,4563
213
- fractal_server-2.14.0a26.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
214
- fractal_server-2.14.0a26.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
215
- fractal_server-2.14.0a26.dist-info/RECORD,,
211
+ fractal_server-2.14.0a28.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
212
+ fractal_server-2.14.0a28.dist-info/METADATA,sha256=bSrvv3snGeMW_3fQ2zdtKvoGU2SZm5J0-EZPxEcy43c,4563
213
+ fractal_server-2.14.0a28.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
214
+ fractal_server-2.14.0a28.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
215
+ fractal_server-2.14.0a28.dist-info/RECORD,,