fractal-server 2.15.7__py3-none-any.whl → 2.15.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __VERSION__ = "2.15.7"
1
+ __VERSION__ = "2.15.8"
@@ -73,10 +73,9 @@ class UserOAuth(SQLModel, table=True):
73
73
  is_active:
74
74
  is_superuser:
75
75
  is_verified:
76
- slurm_user:
77
- slurm_accounts:
78
76
  username:
79
77
  oauth_accounts:
78
+ settings:
80
79
  """
81
80
 
82
81
  __tablename__ = "user_oauth"
@@ -109,7 +109,7 @@ async def view_job(
109
109
 
110
110
  @router.get("/{job_id}/", response_model=JobReadV2)
111
111
  async def view_single_job(
112
- job_id: int = None,
112
+ job_id: int,
113
113
  show_tmp_logs: bool = False,
114
114
  user: UserOAuth = Depends(current_active_superuser),
115
115
  db: AsyncSession = Depends(get_async_db),
@@ -25,7 +25,6 @@ from fractal_server.app.routes.aux.validate_user_settings import (
25
25
  from fractal_server.app.schemas.v2 import TaskGroupActivityActionV2
26
26
  from fractal_server.app.schemas.v2 import TaskGroupActivityStatusV2
27
27
  from fractal_server.app.schemas.v2 import TaskGroupActivityV2Read
28
- from fractal_server.app.schemas.v2 import TaskGroupReadV2
29
28
  from fractal_server.app.schemas.v2 import TaskGroupV2OriginEnum
30
29
  from fractal_server.config import get_settings
31
30
  from fractal_server.logger import set_logger
@@ -52,7 +51,7 @@ async def deactivate_task_group(
52
51
  response: Response,
53
52
  superuser: UserOAuth = Depends(current_active_superuser),
54
53
  db: AsyncSession = Depends(get_async_db),
55
- ) -> TaskGroupReadV2:
54
+ ) -> TaskGroupActivityV2Read:
56
55
  """
57
56
  Deactivate task-group venv
58
57
  """
@@ -157,7 +156,7 @@ async def reactivate_task_group(
157
156
  response: Response,
158
157
  superuser: UserOAuth = Depends(current_active_superuser),
159
158
  db: AsyncSession = Depends(get_async_db),
160
- ) -> TaskGroupReadV2:
159
+ ) -> TaskGroupActivityV2Read:
161
160
  """
162
161
  Deactivate task-group venv
163
162
  """
@@ -6,7 +6,7 @@ def get_new_workflow_task_meta(
6
6
  old_workflow_task_meta: dict | None,
7
7
  old_task_meta: dict | None,
8
8
  new_task_meta: dict | None,
9
- ) -> dict[str, Any]:
9
+ ) -> dict[str, Any] | None:
10
10
  """
11
11
  Prepare new meta field based on old/new tasks and old workflow task.
12
12
  """
@@ -156,6 +156,28 @@ async def apply_workflow(
156
156
  if len(user_settings.slurm_accounts) > 0:
157
157
  job_create.slurm_account = user_settings.slurm_accounts[0]
158
158
 
159
+ # User appropriate FractalSSH object
160
+ if settings.FRACTAL_RUNNER_BACKEND == "slurm_ssh":
161
+ ssh_config = dict(
162
+ user=user_settings.ssh_username,
163
+ host=user_settings.ssh_host,
164
+ key_path=user_settings.ssh_private_key_path,
165
+ )
166
+ fractal_ssh_list = request.app.state.fractal_ssh_list
167
+ try:
168
+ fractal_ssh = fractal_ssh_list.get(**ssh_config)
169
+ except Exception as e:
170
+ logger.error(
171
+ "Could not get a valid SSH connection in the submit endpoint. "
172
+ f"Original error: '{str(e)}'."
173
+ )
174
+ raise HTTPException(
175
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
176
+ detail="Error in setting up the SSH connection.",
177
+ )
178
+ else:
179
+ fractal_ssh = None
180
+
159
181
  # Add new Job object to DB
160
182
  job = JobV2(
161
183
  project_id=project_id,
@@ -219,18 +241,6 @@ async def apply_workflow(
219
241
  await db.merge(job)
220
242
  await db.commit()
221
243
 
222
- # User appropriate FractalSSH object
223
- if settings.FRACTAL_RUNNER_BACKEND == "slurm_ssh":
224
- ssh_config = dict(
225
- user=user_settings.ssh_username,
226
- host=user_settings.ssh_host,
227
- key_path=user_settings.ssh_private_key_path,
228
- )
229
- fractal_ssh_list = request.app.state.fractal_ssh_list
230
- fractal_ssh = fractal_ssh_list.get(**ssh_config)
231
- else:
232
- fractal_ssh = None
233
-
234
244
  # Expunge user settings from db, to use in background task
235
245
  db.expunge(user_settings)
236
246
 
@@ -69,7 +69,7 @@ async def get_list_task(
69
69
  stm = stm.where(TaskV2.authors.icontains(author))
70
70
 
71
71
  res = await db.execute(stm)
72
- task_list = res.scalars().all()
72
+ task_list = list(res.scalars().all())
73
73
  await db.close()
74
74
  if args_schema is False:
75
75
  for task in task_list:
@@ -49,7 +49,7 @@ async def deactivate_task_group(
49
49
  response: Response,
50
50
  user: UserOAuth = Depends(current_active_user),
51
51
  db: AsyncSession = Depends(get_async_db),
52
- ) -> TaskGroupReadV2:
52
+ ) -> TaskGroupActivityV2Read:
53
53
  """
54
54
  Deactivate task-group venv
55
55
  """
@@ -66,7 +66,6 @@ class _SlurmConfigSet(BaseModel):
66
66
  time: str | None = None
67
67
  account: str | None = None
68
68
  extra_lines: list[str] | None = None
69
- pre_submission_commands: list[str] | None = None
70
69
  gpus: str | None = None
71
70
 
72
71
 
@@ -253,8 +252,6 @@ class SlurmConfig(BaseModel):
253
252
  Key-value pairs to be included as `export`-ed variables in SLURM
254
253
  submission script, after prepending values with the user's cache
255
254
  directory.
256
- pre_submission_commands: List of commands to be prepended to the sbatch
257
- command.
258
255
  """
259
256
 
260
257
  model_config = ConfigDict(extra="forbid")
@@ -294,8 +291,6 @@ class SlurmConfig(BaseModel):
294
291
  target_num_jobs: int
295
292
  max_num_jobs: int
296
293
 
297
- pre_submission_commands: list[str] = Field(default_factory=list)
298
-
299
294
  def _sorted_extra_lines(self) -> list[str]:
300
295
  """
301
296
  Return a copy of `self.extra_lines`, where lines starting with
@@ -137,7 +137,9 @@ class BaseSlurmRunner(BaseRunner):
137
137
  def run_squeue(self, *, job_ids: list[str], **kwargs) -> str:
138
138
  raise NotImplementedError("Implement in child class.")
139
139
 
140
- def _is_squeue_error_recoverable(self, exception: BaseException) -> True:
140
+ def _is_squeue_error_recoverable(
141
+ self, exception: BaseException
142
+ ) -> Literal[True]:
141
143
  """
142
144
  Determine whether a `squeue` error is considered recoverable.
143
145
 
@@ -262,14 +264,25 @@ class BaseSlurmRunner(BaseRunner):
262
264
 
263
265
  return new_slurm_config
264
266
 
265
- def _submit_single_sbatch(
267
+ def _prepare_single_slurm_job(
266
268
  self,
267
269
  *,
268
270
  base_command: str,
269
271
  slurm_job: SlurmJob,
270
272
  slurm_config: SlurmConfig,
271
273
  ) -> str:
272
- logger.debug("[_submit_single_sbatch] START")
274
+ """
275
+ Prepare submission script locally.
276
+
277
+ Args:
278
+ base_command: Base of task executable command.
279
+ slurm_job: `SlurmJob` object
280
+ slurm_config: Configuration for SLURM job
281
+
282
+ Returns:
283
+ Command to submit the SLURM job.
284
+ """
285
+ logger.debug("[_prepare_single_slurm_job] START")
273
286
 
274
287
  for task in slurm_job.tasks:
275
288
  # Write input file
@@ -299,24 +312,10 @@ class BaseSlurmRunner(BaseRunner):
299
312
  json.dump(task.parameters, f, indent=2)
300
313
 
301
314
  logger.debug(
302
- "[_submit_single_sbatch] Written " f"{task.input_file_local=}"
315
+ "[_prepare_single_slurm_job] Written "
316
+ f"{task.input_file_local=}"
303
317
  )
304
318
 
305
- if self.slurm_runner_type == "ssh":
306
- # Send input file (only relevant for SSH)
307
- self.fractal_ssh.send_file(
308
- local=task.input_file_local,
309
- remote=task.input_file_remote,
310
- )
311
- self.fractal_ssh.send_file(
312
- local=task.task_files.args_file_local,
313
- remote=task.task_files.args_file_remote,
314
- )
315
- logger.debug(
316
- "[_submit_single_sbatch] Transferred "
317
- f"{task.input_file_local=}"
318
- )
319
-
320
319
  # Prepare commands to be included in SLURM submission script
321
320
  cmdlines = []
322
321
  for task in slurm_job.tasks:
@@ -353,7 +352,7 @@ class BaseSlurmRunner(BaseRunner):
353
352
  ]
354
353
  )
355
354
  script_lines = slurm_config.sort_script_lines(script_lines)
356
- logger.debug(script_lines)
355
+ logger.debug(f"[_prepare_single_slurm_job] {script_lines=}")
357
356
 
358
357
  # Always print output of `uname -n` and `pwd`
359
358
  script_lines.append('\necho "Hostname: $(uname -n)"')
@@ -373,61 +372,64 @@ class BaseSlurmRunner(BaseRunner):
373
372
  f"--mem={mem_per_task_MB}MB "
374
373
  f"{cmd} &"
375
374
  )
376
- script_lines.append("wait\n")
377
- script = "\n".join(script_lines)
375
+ script_lines.append("wait\n\n")
378
376
  script_lines.append(
379
377
  'echo "End time: $(date +"%Y-%m-%dT%H:%M:%S%z")"'
380
378
  )
379
+ script = "\n".join(script_lines)
381
380
 
382
381
  # Write submission script
383
382
  with open(slurm_job.slurm_submission_script_local, "w") as f:
384
383
  f.write(script)
385
384
  logger.debug(
386
- "[_submit_single_sbatch] Written "
385
+ "[_prepare_single_slurm_job] Written "
387
386
  f"{slurm_job.slurm_submission_script_local=}"
388
387
  )
389
388
 
390
389
  if self.slurm_runner_type == "ssh":
391
- self.fractal_ssh.send_file(
392
- local=slurm_job.slurm_submission_script_local,
393
- remote=slurm_job.slurm_submission_script_remote,
394
- )
395
390
  submit_command = (
396
- "sbatch --parsable "
397
- f"{slurm_job.slurm_submission_script_remote}"
391
+ f"sbatch --parsable {slurm_job.slurm_submission_script_remote}"
398
392
  )
399
393
  else:
400
394
  submit_command = (
401
- "sbatch --parsable "
402
- f"{slurm_job.slurm_submission_script_local}"
395
+ f"sbatch --parsable {slurm_job.slurm_submission_script_local}"
403
396
  )
404
- # Run sbatch
405
- pre_submission_cmds = slurm_config.pre_submission_commands
406
- if len(pre_submission_cmds) == 0:
407
- logger.debug(f"Now run {submit_command=}")
408
- sbatch_stdout = self._run_remote_cmd(submit_command)
409
- else:
410
- logger.debug(f"Now using {pre_submission_cmds=}")
411
- script_lines = pre_submission_cmds + [submit_command]
412
- wrapper_script_contents = "\n".join(script_lines)
413
- wrapper_script_contents = f"{wrapper_script_contents}\n"
414
- if self.slurm_runner_type == "ssh":
415
- wrapper_script = (
416
- f"{slurm_job.slurm_submission_script_remote}_wrapper.sh"
417
- )
418
- self.fractal_ssh.write_remote_file(
419
- path=wrapper_script, content=wrapper_script_contents
420
- )
421
- else:
422
- wrapper_script = (
423
- f"{slurm_job.slurm_submission_script_local}_wrapper.sh"
424
- )
425
- with open(wrapper_script, "w") as f:
426
- f.write(wrapper_script_contents)
427
- logger.debug(f"Now run {wrapper_script=}")
428
- sbatch_stdout = self._run_remote_cmd(f"bash {wrapper_script}")
397
+ logger.debug("[_prepare_single_slurm_job] END")
398
+ return submit_command
399
+
400
+ def _send_many_job_inputs(
401
+ self, *, workdir_local: Path, workdir_remote: Path
402
+ ) -> None:
403
+ """
404
+ Placeholder method.
405
+
406
+ This method is intentionally left unimplemented in the base class.
407
+ Subclasses must override it to provide the logic for transferring
408
+ input data.
409
+ """
410
+ pass
411
+
412
+ def _submit_single_sbatch(
413
+ self,
414
+ *,
415
+ submit_command: str,
416
+ slurm_job: SlurmJob,
417
+ ) -> None:
418
+ """
419
+ Run `sbatch` and add the `slurm_job` to `self.jobs`.
420
+
421
+ Args:
422
+ submit_command:
423
+ The SLURM submission command prepared in
424
+ `self._prepare_single_slurm_job`.
425
+ slurm_job: The `SlurmJob` object.
426
+ """
427
+
428
+ logger.debug("[_submit_single_sbatch] START")
429
429
 
430
430
  # Submit SLURM job and retrieve job ID
431
+ logger.debug(f"[_submit_single_sbatch] Now run {submit_command=}")
432
+ sbatch_stdout = self._run_remote_cmd(submit_command)
431
433
  logger.info(f"[_submit_single_sbatch] {sbatch_stdout=}")
432
434
  stdout = sbatch_stdout.strip("\n")
433
435
  submitted_job_id = int(stdout)
@@ -623,11 +625,19 @@ class BaseSlurmRunner(BaseRunner):
623
625
  )
624
626
 
625
627
  config.parallel_tasks_per_job = 1
626
- self._submit_single_sbatch(
628
+ submit_command = self._prepare_single_slurm_job(
627
629
  base_command=base_command,
628
630
  slurm_job=slurm_job,
629
631
  slurm_config=config,
630
632
  )
633
+ self._send_many_job_inputs(
634
+ workdir_local=workdir_local,
635
+ workdir_remote=workdir_remote,
636
+ )
637
+ self._submit_single_sbatch(
638
+ submit_command=submit_command,
639
+ slurm_job=slurm_job,
640
+ )
631
641
  logger.debug(f"[submit] END submission phase, {self.job_ids=}")
632
642
 
633
643
  create_accounting_record_slurm(
@@ -726,8 +736,8 @@ class BaseSlurmRunner(BaseRunner):
726
736
  status=HistoryUnitStatus.FAILED,
727
737
  db_sync=db,
728
738
  )
729
- results = {}
730
- exceptions = {
739
+ results: dict[int, Any] = {}
740
+ exceptions: dict[int, BaseException] = {
731
741
  ind: SHUTDOWN_EXCEPTION
732
742
  for ind in range(len(list_parameters))
733
743
  }
@@ -801,13 +811,25 @@ class BaseSlurmRunner(BaseRunner):
801
811
  )
802
812
  )
803
813
 
804
- # NOTE: see issue 2431
805
- logger.debug("[multisubmit] Transfer files and submit jobs.")
814
+ submit_commands = []
806
815
  for slurm_job in jobs_to_submit:
816
+ submit_commands.append(
817
+ self._prepare_single_slurm_job(
818
+ base_command=base_command,
819
+ slurm_job=slurm_job,
820
+ slurm_config=config,
821
+ )
822
+ )
823
+ self._send_many_job_inputs(
824
+ workdir_local=workdir_local,
825
+ workdir_remote=workdir_remote,
826
+ )
827
+ for slurm_job, submit_command in zip(
828
+ jobs_to_submit, submit_commands
829
+ ):
807
830
  self._submit_single_sbatch(
808
- base_command=base_command,
831
+ submit_command=submit_command,
809
832
  slurm_job=slurm_job,
810
- slurm_config=config,
811
833
  )
812
834
 
813
835
  logger.info(f"[multisubmit] END submission phase, {self.job_ids=}")
@@ -830,8 +852,10 @@ class BaseSlurmRunner(BaseRunner):
830
852
  status=HistoryUnitStatus.FAILED,
831
853
  db_sync=db,
832
854
  )
833
- results = {}
834
- exceptions = {ind: e for ind in range(len(list_parameters))}
855
+ results: dict[int, Any] = {}
856
+ exceptions: dict[int, BaseException] = {
857
+ ind: e for ind in range(len(list_parameters))
858
+ }
835
859
  return results, exceptions
836
860
 
837
861
  # Retrieval phase
@@ -166,12 +166,69 @@ class SlurmSSHRunner(BaseSlurmRunner):
166
166
  stdout = self.fractal_ssh.run_command(cmd=cmd)
167
167
  return stdout
168
168
 
169
+ def _send_many_job_inputs(
170
+ self, *, workdir_local: Path, workdir_remote: Path
171
+ ) -> None:
172
+ """
173
+ Compress, transfer, and extract a local working directory onto a remote
174
+ host.
175
+
176
+ This method creates a temporary `.tar.gz` archive of the given
177
+ `workdir_local`, transfers it to the remote machine via the configured
178
+ SSH connection, extracts it into `workdir_remote`, and removes the
179
+ temporary archive from both local and remote filesystems.
180
+ """
181
+
182
+ logger.debug("[_send_many_job_inputs] START")
183
+
184
+ tar_path_local = workdir_local.with_suffix(".tar.gz")
185
+ tar_name = Path(tar_path_local).name
186
+ tar_path_remote = workdir_remote.parent / tar_name
187
+
188
+ tar_compression_cmd = get_tar_compression_cmd(
189
+ subfolder_path=workdir_local, filelist_path=None
190
+ )
191
+ _, tar_extraction_cmd = get_tar_extraction_cmd(
192
+ archive_path=tar_path_remote
193
+ )
194
+ rm_tar_cmd = f"rm {tar_path_remote.as_posix()}"
195
+
196
+ try:
197
+ run_subprocess(tar_compression_cmd, logger_name=logger.name)
198
+ logger.debug(
199
+ "[_send_many_job_inputs] "
200
+ f"{workdir_local=} compressed to {tar_path_local=}."
201
+ )
202
+ self.fractal_ssh.send_file(
203
+ local=tar_path_local.as_posix(),
204
+ remote=tar_path_remote.as_posix(),
205
+ )
206
+ logger.debug(
207
+ "[_send_many_job_inputs] "
208
+ f"{tar_path_local=} sent via SSH to {tar_path_remote=}."
209
+ )
210
+ self.fractal_ssh.run_command(cmd=tar_extraction_cmd)
211
+ logger.debug(
212
+ "[_send_many_job_inputs] "
213
+ f"{tar_path_remote=} extracted to {workdir_remote=}."
214
+ )
215
+ self.fractal_ssh.run_command(cmd=rm_tar_cmd)
216
+ logger.debug(
217
+ "[_send_many_job_inputs] "
218
+ f"{tar_path_remote=} removed from remote server."
219
+ )
220
+ except Exception as e:
221
+ raise e
222
+ finally:
223
+ Path(tar_path_local).unlink(missing_ok=True)
224
+ logger.debug(f"[_send_many_job_inputs] {tar_path_local=} removed.")
225
+
226
+ logger.debug("[_send_many_job_inputs] END.")
227
+
169
228
  def run_squeue(
170
229
  self,
171
230
  *,
172
231
  job_ids: list[str],
173
- base_interval: float = 2.0,
174
- max_attempts: int = 7,
175
232
  ) -> str:
176
233
  """
177
234
  Run `squeue` for a set of SLURM job IDs.
@@ -205,8 +262,6 @@ class SlurmSSHRunner(BaseSlurmRunner):
205
262
  try:
206
263
  stdout = self.fractal_ssh.run_command(
207
264
  cmd=cmd,
208
- base_interval=base_interval,
209
- max_attempts=max_attempts,
210
265
  )
211
266
  return stdout
212
267
  except FractalSSHCommandError as e:
@@ -167,7 +167,7 @@ class SudoSlurmRunner(BaseSlurmRunner):
167
167
  )
168
168
  return res.stdout
169
169
 
170
- def run_squeue(self, job_ids: list[str]) -> str:
170
+ def run_squeue(self, *, job_ids: list[str]) -> str:
171
171
  """
172
172
  Run `squeue` for a set of SLURM job IDs.
173
173
  """
@@ -47,7 +47,7 @@ def _remove_status_from_attributes(
47
47
  Drop attribute `IMAGE_STATUS_KEY` from all images.
48
48
  """
49
49
  images_copy = deepcopy(images)
50
- [img["attributes"].pop(IMAGE_STATUS_KEY) for img in images_copy]
50
+ [img["attributes"].pop(IMAGE_STATUS_KEY, None) for img in images_copy]
51
51
  return images_copy
52
52
 
53
53
 
fractal_server/config.py CHANGED
@@ -66,37 +66,74 @@ class MailSettings(BaseModel):
66
66
 
67
67
  class PixiSettings(BaseModel):
68
68
  """
69
- Configuration for Pixi task collection
70
-
71
- See https://pixi.sh/latest/reference/cli/pixi/install/#config-options for
72
- `pixi install` concurrency options.
73
- See https://docs.rs/tokio/latest/tokio/#cpu-bound-tasks-and-blocking-code
74
- for `tokio` configuration.
75
-
76
- versions:
77
- Available `pixi` versions and their `PIXI_HOME` folders.
78
- default_version:
79
- Default `pixi` version to use for task collection - must be one
80
- of `versions` keys.
81
- PIXI_CONCURRENT_SOLVES:
82
- Value of `--concurrent-solves` for `pixi install`.
83
- PIXI_CONCURRENT_DOWNLOADS:
84
- Value of `--concurrent-downloads for `pixi install`.
85
- TOKIO_WORKER_THREADS:
86
- From tokio docs, "The core threads are where all asynchronous code
87
- runs, and Tokio will by default spawn one for each CPU core. You can
88
- use the environment variable TOKIO_WORKER_THREADS to override the
89
- default value."
69
+ Configuration for Pixi Task collection.
70
+
71
+ In order to use Pixi for Task collection, you must have one or more Pixi
72
+ binaries in your machine
73
+ (see
74
+ [example/get_pixi.sh](https://github.com/fractal-analytics-platform/fractal-server/blob/main/example/get_pixi.sh)
75
+ for installation example).
76
+
77
+ To let Fractal Server use these binaries for Task collection, a JSON file
78
+ must be prepared with the data to populate `PixiSettings` (arguments with
79
+ default values may be omitted).
80
+
81
+ The path to this JSON file must then be provided to Fractal via the
82
+ environment variable `FRACTAL_PIXI_CONFIG_FILE`.
90
83
  """
91
84
 
92
85
  versions: DictStrStr
86
+ """
87
+ A dictionary with Pixi versions as keys and paths to the corresponding
88
+ folder as values.
89
+
90
+ E.g. let's assume that you have Pixi v0.47.0 at
91
+ `/pixi-path/0.47.0/bin/pixi` and Pixi v0.48.2 at
92
+ `/pixi-path/0.48.2/bin/pixi`, then
93
+ ```json
94
+ "versions": {
95
+ "0.47.0": "/pixi-path/0.47.0",
96
+ "0.48.2": "/pixi-path/0.48.2"
97
+ }
98
+ ```
99
+ """
93
100
  default_version: str
101
+ """
102
+ Default Pixi version to be used for Task collection.
94
103
 
104
+ Must be a key of the `versions` dictionary.
105
+ """
95
106
  PIXI_CONCURRENT_SOLVES: int = 4
107
+ """
108
+ Value of
109
+ [`--concurrent-solves`](https://pixi.sh/latest/reference/cli/pixi/install/#arg---concurrent-solves)
110
+ for `pixi install`.
111
+ """
96
112
  PIXI_CONCURRENT_DOWNLOADS: int = 4
113
+ """
114
+ Value of
115
+ [`--concurrent-downloads`](https://pixi.sh/latest/reference/cli/pixi/install/#arg---concurrent-downloads)
116
+ for `pixi install`.
117
+ """
97
118
  TOKIO_WORKER_THREADS: int = 2
119
+ """
120
+ From
121
+ [Tokio documentation](
122
+ https://docs.rs/tokio/latest/tokio/#cpu-bound-tasks-and-blocking-code
123
+ )
124
+ :
125
+
126
+ The core threads are where all asynchronous code runs,
127
+ and Tokio will by default spawn one for each CPU core.
128
+ You can use the environment variable `TOKIO_WORKER_THREADS` to override
129
+ the default value.
130
+ """
98
131
  DEFAULT_ENVIRONMENT: str = "default"
132
+ """
133
+ """
99
134
  DEFAULT_PLATFORM: str = "linux-64"
135
+ """
136
+ """
100
137
 
101
138
  @model_validator(mode="after")
102
139
  def check_pixi_settings(self):
@@ -542,7 +579,7 @@ class Settings(BaseSettings):
542
579
  else:
543
580
  return "--no-cache-dir"
544
581
 
545
- FRACTAL_MAX_PIP_VERSION: str = "24.0"
582
+ FRACTAL_MAX_PIP_VERSION: str = "25.2"
546
583
  """
547
584
  Maximum value at which to update `pip` before performing task collection.
548
585
  """
@@ -578,6 +615,9 @@ class Settings(BaseSettings):
578
615
  """
579
616
 
580
617
  FRACTAL_PIXI_CONFIG_FILE: Path | None = None
618
+ """
619
+ Path to the Pixi configuration JSON file that will populate `PixiSettings`.
620
+ """
581
621
 
582
622
  pixi: PixiSettings | None = None
583
623
 
@@ -3,6 +3,7 @@ import logging
3
3
  import time
4
4
  from collections.abc import Generator
5
5
  from contextlib import contextmanager
6
+ from functools import wraps
6
7
  from pathlib import Path
7
8
  from threading import Lock
8
9
  from typing import Any
@@ -44,6 +45,32 @@ class SSHConfig(BaseModel):
44
45
  logger = set_logger(__name__)
45
46
 
46
47
 
48
+ def retry_if_socket_error(func):
49
+ @wraps(func)
50
+ def func_with_retry(*args, **kwargs):
51
+ self = args[0]
52
+ try:
53
+ return func(*args, **kwargs)
54
+ except NoValidConnectionsError as e:
55
+ self.logger.warning(
56
+ f"Socket error type: {e.__class__.__name__}, {e}"
57
+ )
58
+ self.logger.warning("Now refresh connection")
59
+ self.refresh_connection()
60
+ self.logger.warning(f"Now retry {func.__name__}")
61
+ return func(*args, **kwargs)
62
+ except OSError as e:
63
+ self.logger.warning(f"Something goes wrong,{e}")
64
+ if "Socket is closed" in str(e):
65
+ self.logger.warning("Now refresh connection")
66
+ self.refresh_connection()
67
+ self.logger.warning(f"Now retry {func.__name__}")
68
+ return func(*args, **kwargs)
69
+ raise e
70
+
71
+ return func_with_retry
72
+
73
+
47
74
  @contextmanager
48
75
  def _acquire_lock_with_timeout(
49
76
  lock: Lock,
@@ -92,8 +119,6 @@ class FractalSSH:
92
119
  _lock:
93
120
  _connection:
94
121
  default_lock_timeout:
95
- default_max_attempts:
96
- default_base_interval:
97
122
  sftp_get_prefetch:
98
123
  sftp_get_max_requests:
99
124
  logger_name:
@@ -102,8 +127,6 @@ class FractalSSH:
102
127
  _lock: Lock
103
128
  _connection: Connection
104
129
  default_lock_timeout: float
105
- default_max_attempts: int
106
- default_base_interval: float
107
130
  sftp_get_prefetch: bool
108
131
  sftp_get_max_requests: int
109
132
  logger_name: str
@@ -112,8 +135,6 @@ class FractalSSH:
112
135
  self,
113
136
  connection: Connection,
114
137
  default_timeout: float = 250,
115
- default_max_attempts: int = 5,
116
- default_base_interval: float = 3.0,
117
138
  sftp_get_prefetch: bool = False,
118
139
  sftp_get_max_requests: int = 64,
119
140
  logger_name: str = __name__,
@@ -121,8 +142,6 @@ class FractalSSH:
121
142
  self._lock = Lock()
122
143
  self._connection = connection
123
144
  self.default_lock_timeout = default_timeout
124
- self.default_base_interval = default_base_interval
125
- self.default_max_attempts = default_max_attempts
126
145
  self.sftp_get_prefetch = sftp_get_prefetch
127
146
  self.sftp_get_max_requests = sftp_get_max_requests
128
147
  self.logger_name = logger_name
@@ -188,6 +207,7 @@ class FractalSSH:
188
207
  """
189
208
  return self._connection.sftp()
190
209
 
210
+ @retry_if_socket_error
191
211
  def read_remote_json_file(self, filepath: str) -> dict[str, Any]:
192
212
  self.logger.info(f"START reading remote JSON file {filepath}.")
193
213
  with _acquire_lock_with_timeout(
@@ -208,6 +228,7 @@ class FractalSSH:
208
228
  self.logger.info(f"END reading remote JSON file {filepath}.")
209
229
  return data
210
230
 
231
+ @retry_if_socket_error
211
232
  def read_remote_text_file(self, filepath: str) -> dict[str, Any]:
212
233
  """
213
234
  Read a remote text file into a string.
@@ -270,6 +291,9 @@ class FractalSSH:
270
291
  )
271
292
  # Try opening the connection (if it was closed) or to re-open it (if
272
293
  # an error happened).
294
+ self.refresh_connection()
295
+
296
+ def refresh_connection(self) -> None:
273
297
  try:
274
298
  self.close()
275
299
  with _acquire_lock_with_timeout(
@@ -307,13 +331,12 @@ class FractalSSH:
307
331
  if self._connection.client is not None:
308
332
  self._connection.client.close()
309
333
 
334
+ @retry_if_socket_error
310
335
  def run_command(
311
336
  self,
312
337
  *,
313
338
  cmd: str,
314
339
  allow_char: str | None = None,
315
- max_attempts: int | None = None,
316
- base_interval: float | None = None,
317
340
  lock_timeout: int | None = None,
318
341
  ) -> str:
319
342
  """
@@ -332,81 +355,50 @@ class FractalSSH:
332
355
 
333
356
  validate_cmd(cmd, allow_char=allow_char)
334
357
 
335
- actual_max_attempts = self.default_max_attempts
336
- if max_attempts is not None:
337
- actual_max_attempts = max_attempts
338
-
339
- actual_base_interval = self.default_base_interval
340
- if base_interval is not None:
341
- actual_base_interval = base_interval
342
-
343
358
  actual_lock_timeout = self.default_lock_timeout
344
359
  if lock_timeout is not None:
345
360
  actual_lock_timeout = lock_timeout
346
361
 
347
362
  t_0 = time.perf_counter()
348
- ind_attempt = 0
349
- while ind_attempt <= actual_max_attempts:
350
- ind_attempt += 1
351
- prefix = f"[attempt {ind_attempt}/{actual_max_attempts}]"
352
- self.logger.info(f"{prefix} START running '{cmd}' over SSH.")
353
- try:
354
- # Case 1: Command runs successfully
355
- res = self._run(
356
- cmd,
357
- label=f"run {cmd}",
358
- lock_timeout=actual_lock_timeout,
359
- hide=True,
360
- in_stream=False,
361
- )
362
- t_1 = time.perf_counter()
363
- self.logger.info(
364
- f"{prefix} END running '{cmd}' over SSH, "
365
- f"elapsed={t_1 - t_0:.3f}"
366
- )
367
- self.logger.debug("STDOUT:")
368
- self.logger.debug(res.stdout)
369
- self.logger.debug("STDERR:")
370
- self.logger.debug(res.stderr)
371
- return res.stdout
372
- except NoValidConnectionsError as e:
373
- # Case 2: Command fails with a connection error
374
- self.logger.warning(
375
- f"{prefix} Running command `{cmd}` over SSH failed.\n"
376
- f"Original NoValidConnectionError:\n{str(e)}.\n"
377
- f"{e.errors=}\n"
378
- )
379
- if ind_attempt < actual_max_attempts:
380
- sleeptime = actual_base_interval**ind_attempt
381
- self.logger.warning(
382
- f"{prefix} Now sleep {sleeptime:.3f} "
383
- "seconds and retry."
384
- )
385
- time.sleep(sleeptime)
386
- else:
387
- self.logger.error(f"{prefix} Reached last attempt")
388
- raise FractalSSHConnectionError(
389
- f"Reached last attempt "
390
- f"({max_attempts=}) for running "
391
- f"'{cmd}' over SSH"
392
- )
393
- except UnexpectedExit as e:
394
- # Case 3: Command fails with an actual error
395
- error_msg = (
396
- f"{prefix} Running command `{cmd}` over SSH failed.\n"
397
- f"Original error:\n{str(e)}."
398
- )
399
- self.logger.error(error_msg)
400
- raise FractalSSHCommandError(error_msg)
401
- except FractalSSHTimeoutError as e:
402
- raise e
403
- except Exception as e:
404
- self.logger.error(
405
- f"Running command `{cmd}` over SSH failed.\n"
406
- f"Original Error:\n{str(e)}."
407
- )
408
- raise FractalSSHUnknownError(f"{type(e)}: {str(e)}")
363
+ try:
364
+ # Case 1: Command runs successfully
365
+ res = self._run(
366
+ cmd,
367
+ label=f"run {cmd}",
368
+ lock_timeout=actual_lock_timeout,
369
+ hide=True,
370
+ in_stream=False,
371
+ )
372
+ t_1 = time.perf_counter()
373
+ self.logger.info(
374
+ f"END running '{cmd}' over SSH, " f"elapsed={t_1 - t_0:.3f}"
375
+ )
376
+ self.logger.debug("STDOUT:")
377
+ self.logger.debug(res.stdout)
378
+ self.logger.debug("STDERR:")
379
+ self.logger.debug(res.stderr)
380
+ return res.stdout
381
+ # Case 2: Command fails with a connection error
382
+ except NoValidConnectionsError as e:
383
+ raise NoValidConnectionsError(errors=e.errors)
384
+ except UnexpectedExit as e:
385
+ # Case 3: Command fails with an actual error
386
+ error_msg = (
387
+ f"Running command `{cmd}` over SSH failed.\n"
388
+ f"Original error:\n{str(e)}."
389
+ )
390
+ self.logger.error(error_msg)
391
+ raise FractalSSHCommandError(error_msg)
392
+ except FractalSSHTimeoutError as e:
393
+ raise e
394
+ except Exception as e:
395
+ self.logger.error(
396
+ f"Running command `{cmd}` over SSH failed.\n"
397
+ f"Original Error:\n{str(e)}."
398
+ )
399
+ raise FractalSSHUnknownError(f"{type(e)}: {str(e)}")
409
400
 
401
+ @retry_if_socket_error
410
402
  def send_file(
411
403
  self,
412
404
  *,
@@ -447,6 +439,7 @@ class FractalSSH:
447
439
  ),
448
440
  )
449
441
 
442
+ @retry_if_socket_error
450
443
  def fetch_file(
451
444
  self,
452
445
  *,
@@ -538,6 +531,7 @@ class FractalSSH:
538
531
  cmd = f"rm -r {folder}"
539
532
  self.run_command(cmd=cmd)
540
533
 
534
+ @retry_if_socket_error
541
535
  def write_remote_file(
542
536
  self,
543
537
  *,
@@ -574,6 +568,7 @@ class FractalSSH:
574
568
  elapsed = time.perf_counter() - t_start
575
569
  self.logger.info(f"[write_remote_file] END, {elapsed=} s ({path}).")
576
570
 
571
+ @retry_if_socket_error
577
572
  def remote_exists(self, path: str) -> bool:
578
573
  """
579
574
  Return whether a remote file/folder exists
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: fractal-server
3
- Version: 2.15.7
3
+ Version: 2.15.8
4
4
  Summary: Backend component of the Fractal analytics platform
5
5
  License: BSD-3-Clause
6
6
  Author: Tommaso Comparin
@@ -1,4 +1,4 @@
1
- fractal_server/__init__.py,sha256=SIpD8yvbZydQsQo0H2fClLPf0pyYpjeHjRBmNRTNLKk,23
1
+ fractal_server/__init__.py,sha256=eybsCSXKOGknxAUjcGaSuqucfMoHoV6kgH5_eHJcYyQ,23
2
2
  fractal_server/__main__.py,sha256=rkM8xjY1KeS3l63irB8yCrlVobR-73uDapC4wvrIlxI,6957
3
3
  fractal_server/alembic.ini,sha256=MWwi7GzjzawI9cCAK1LW7NxIBQDUqD12-ptJoq5JpP0,3153
4
4
  fractal_server/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -6,7 +6,7 @@ fractal_server/app/db/__init__.py,sha256=U2gwpNyy79iMsK1lg43LRl9z-MW8wiOaICJ7GGd
6
6
  fractal_server/app/models/__init__.py,sha256=xJWiGAwpXmCpnFMC4c_HTqoUCzMOXrakoGLUH_uMvdA,415
7
7
  fractal_server/app/models/linkusergroup.py,sha256=3KkkE4QIUAlTrBAZs_tVy0pGvAxUAq6yOEjflct_z2M,678
8
8
  fractal_server/app/models/linkuserproject.py,sha256=hvaxh3Lkiy2uUCwB8gvn8RorCpvxSSdzWdCS_U1GL7g,315
9
- fractal_server/app/models/security.py,sha256=NfR0I4dRbOEmCWOKeEHyFO-uqhSJ11dS0B6yWtZRqs4,3852
9
+ fractal_server/app/models/security.py,sha256=bwye5cLPUq4R2ArC2KKvUhi_XQtgliM7a-t5Ofnk_rw,3826
10
10
  fractal_server/app/models/user_settings.py,sha256=WdnrLOP2w8Nqh_3K-4-b-8a7XEC9ILrE6SfbYoTk-7Y,1279
11
11
  fractal_server/app/models/v2/__init__.py,sha256=vjHwek7-IXmaZZL9VF0nD30YL9ca4wNc8P4RXJK_kDc,832
12
12
  fractal_server/app/models/v2/accounting.py,sha256=i-2TsjqyuclxFQ21C-TeDoss7ZBTRuXdzIJfVr2UxwE,1081
@@ -23,17 +23,17 @@ fractal_server/app/routes/admin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm
23
23
  fractal_server/app/routes/admin/v2/__init__.py,sha256=_5lqb6-M8-fZqE1HRMep6pAFYRUKMxrvbZOKs-RXWkw,933
24
24
  fractal_server/app/routes/admin/v2/accounting.py,sha256=-eLzBirENvdhjhrCf6bs1pTriIui19DAr_k-zuJB7Y8,3593
25
25
  fractal_server/app/routes/admin/v2/impersonate.py,sha256=gc4lshfEPFR6W2asH7aKu6hqE6chzusdhAUVV9p51eU,1131
26
- fractal_server/app/routes/admin/v2/job.py,sha256=ysfrmsaLUwdo3_YbXOhh6j2zZqKrj5dF2s4gJf1ngtQ,8604
26
+ fractal_server/app/routes/admin/v2/job.py,sha256=NJwAecyiQDRwLEqg1fKQsllxW1FdtbwnLJQHocBQinU,8597
27
27
  fractal_server/app/routes/admin/v2/project.py,sha256=MA_LdoEuSuisSGRO43TapMuJ080y5iaUGSAUgKuuKOg,1188
28
28
  fractal_server/app/routes/admin/v2/task.py,sha256=8njjq_zcvNW-Ewxn7WfsRQbs_aV5h-7pgcRIAegzTnc,4308
29
29
  fractal_server/app/routes/admin/v2/task_group.py,sha256=JUCxKnwSNmOKyh6lpqfG5jT5BuRE_U2X6HqD5VVhWPg,7096
30
- fractal_server/app/routes/admin/v2/task_group_lifecycle.py,sha256=2J3M9VXWD_0j9jRTZ5APuUXl9E-aVv0qF8K02vvcO3s,9150
30
+ fractal_server/app/routes/admin/v2/task_group_lifecycle.py,sha256=qHQbSD7EKSOLvt4KigbXIb2iJmI9sAEGROSrMS8Vwfg,9108
31
31
  fractal_server/app/routes/api/__init__.py,sha256=B8l6PSAhR10iZqHEiyTat-_0tkeKdrCigIE6DJGP5b8,638
32
32
  fractal_server/app/routes/api/v2/__init__.py,sha256=D3sRRsqkmZO6kBxUjg40q0aRDsnuXI4sOOfn0xF9JsM,2820
33
33
  fractal_server/app/routes/api/v2/_aux_functions.py,sha256=tfnxqbufKqd5dbFUzOFlRPc9EgURhpNa5V2V4agzndg,14259
34
34
  fractal_server/app/routes/api/v2/_aux_functions_history.py,sha256=ykTyiY3H-nGSgGfcVkb2xQqJd65YOY2aWVK9sfqTdIY,4439
35
35
  fractal_server/app/routes/api/v2/_aux_functions_task_lifecycle.py,sha256=GpKfw9yj01LmOAuNMTOreU1PFkCKpjK5oCt7_wp35-A,6741
36
- fractal_server/app/routes/api/v2/_aux_functions_task_version_update.py,sha256=WLDOYCnb6fnS5avKflyx6yN24Vo1n5kJk5ZyiKbzb8Y,1175
36
+ fractal_server/app/routes/api/v2/_aux_functions_task_version_update.py,sha256=PKjV7r8YsPRXoNiVSnOK4KBYVV3l_Yb_ZPrqAkMkXrQ,1182
37
37
  fractal_server/app/routes/api/v2/_aux_functions_tasks.py,sha256=sjlhv6LliDvhttVTjC2xhIFO-Vj3roE3bEv7_TZ1h0A,12494
38
38
  fractal_server/app/routes/api/v2/_aux_task_group_disambiguation.py,sha256=8x1_q9FyCzItnPmdSdLQuwUTy4B9xCsXscp97_lJcpM,4635
39
39
  fractal_server/app/routes/api/v2/dataset.py,sha256=6u4MFqJ3YZ0Zq6Xx8CRMrTPKW55ZaR63Uno21DqFr4Q,8889
@@ -43,13 +43,13 @@ fractal_server/app/routes/api/v2/job.py,sha256=8xRTwh_OCHmK9IfI_zUASa2ozewR0qu0z
43
43
  fractal_server/app/routes/api/v2/pre_submission_checks.py,sha256=AEN6w2X5kCkSpZydudvuTPhl_VZUQ4zUav6D3NiF6r8,4935
44
44
  fractal_server/app/routes/api/v2/project.py,sha256=ldMEyjtwGpX2teu85sCNWaubDFlw-En8U1SA7G1VaIw,4567
45
45
  fractal_server/app/routes/api/v2/status_legacy.py,sha256=ZckHeBy8y21cyQ_OLY-VmkapzMhd3g9ae-qg-r4-uVo,6317
46
- fractal_server/app/routes/api/v2/submit.py,sha256=3RPfpuMWEp-orB6iUQCQYmxXu9nRC0m1V5-dznsDrks,8780
47
- fractal_server/app/routes/api/v2/task.py,sha256=ptS47XtxnHzk9bPNZV24Wfroo5sP19RE0-LsfX0ZvOc,7018
46
+ fractal_server/app/routes/api/v2/submit.py,sha256=C9cLecHoVl_VXQFAHs2Y2LVE6S_v2PHpa-w5XQIaT-M,9174
47
+ fractal_server/app/routes/api/v2/task.py,sha256=WxkWRw-0fJkgmMt9fSDc6oikBFd6nSS-JdPwR0z24tg,7024
48
48
  fractal_server/app/routes/api/v2/task_collection.py,sha256=UcS7tb9RjiDimeI-iWwD0wqnXYQEdEZT56PnPa0zC9Q,12233
49
49
  fractal_server/app/routes/api/v2/task_collection_custom.py,sha256=j-vI1_FjoxU59VwkwUTYW5HhMVB5NObMkWLCjAKbers,6726
50
50
  fractal_server/app/routes/api/v2/task_collection_pixi.py,sha256=1rCBfKANT9zcKcOVMQTByVpP8GjdrzeaMzyQT05k0nY,7517
51
51
  fractal_server/app/routes/api/v2/task_group.py,sha256=DrXMc0D7IYqUsrlrpF5JO1zVKvx7RPdKPPOhz3FqqJE,9203
52
- fractal_server/app/routes/api/v2/task_group_lifecycle.py,sha256=itsldN_aYT_-wuCmwAoVKbH8D6eZpL_z89gCBZDM5cg,9996
52
+ fractal_server/app/routes/api/v2/task_group_lifecycle.py,sha256=9EF5o8ASGa-hMpL_PLekDNp1uOnJ13SM7x9YmSSLAbM,10004
53
53
  fractal_server/app/routes/api/v2/task_version_update.py,sha256=RoeN21kYvBwc8xLcJZ8t0hZtl32P3FhMqiofntHyrjQ,8233
54
54
  fractal_server/app/routes/api/v2/workflow.py,sha256=n0sRsDHGrjDAoo_0klqoYW4AuZ1LajWtlzEd6gKmvps,10697
55
55
  fractal_server/app/routes/api/v2/workflow_import.py,sha256=kOGDaCj0jCGK1WSYGbnUjtUg2U1YxUY9UMH-2ilqJg4,9027
@@ -80,18 +80,18 @@ fractal_server/app/runner/executors/local/runner.py,sha256=syJcUYkdKeGYbD33VzuX9
80
80
  fractal_server/app/runner/executors/slurm_common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
81
81
  fractal_server/app/runner/executors/slurm_common/_batching.py,sha256=gbHZIxt90GjUwhB9_UInwVqpX-KdxRQMDeXzUagdL3U,8816
82
82
  fractal_server/app/runner/executors/slurm_common/_job_states.py,sha256=nuV-Zba38kDrRESOVB3gaGbrSPZc4q7YGichQaeqTW0,238
83
- fractal_server/app/runner/executors/slurm_common/_slurm_config.py,sha256=7L80pH2lOe6qAH-8VzmcUOMgA9KJoQLTPDpkonKwhH8,15945
84
- fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=1Sh56lb7NERVtsBMvVs4K7nVHhMy_KDbwquPl1ub8vE,37937
83
+ fractal_server/app/runner/executors/slurm_common/_slurm_config.py,sha256=FI2p4yQ16571C_YM3Vs_tJnb71urVykwxfIXDXa79I8,15721
84
+ fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=yWl71_rTp34t9KvB6jjCBQIJPEg-btsWm3oZC7ytRgI,38141
85
85
  fractal_server/app/runner/executors/slurm_common/get_slurm_config.py,sha256=KhQQJrGQZLX5u8fsMcIx7FN8jUKSGEeG68yO7Ay_LXg,7454
86
86
  fractal_server/app/runner/executors/slurm_common/remote.py,sha256=LHK2Ram8X8q6jNSCxnnwKUwmSJMsyQyRem_VjH53qdw,3811
87
87
  fractal_server/app/runner/executors/slurm_common/slurm_job_task_models.py,sha256=K4SdJOKsUWzDlnkb8Ug_UmTx6nBMsTqn9_oKqwE4XDI,3520
88
88
  fractal_server/app/runner/executors/slurm_ssh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
89
89
  fractal_server/app/runner/executors/slurm_ssh/run_subprocess.py,sha256=SyW6t4egvbiARph2YkFjc88Hj94fCamZVi50L7ph8VM,996
90
- fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=Hzq01rLgOEzS6UPMzW59d4Ox-wwHlxyPF1KiKcGQvIM,7993
90
+ fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=3YEE3_CKQWqrgINA7LLn5Xnq510clskTP2KDSTc2orw,10098
91
91
  fractal_server/app/runner/executors/slurm_ssh/tar_commands.py,sha256=yqBjWTLmh_FzhCllt_KfbuCUGvRhsHLVWlPOZlRWLzY,1762
92
92
  fractal_server/app/runner/executors/slurm_sudo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
93
93
  fractal_server/app/runner/executors/slurm_sudo/_subprocess_run_as_user.py,sha256=W-FxnVcHxMGpv4zGgJVttVQoweyGgR4uBxO22QIZkp0,2576
94
- fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=TGH6B1eRHgHjYjqN86F1V-WNAB_kp-nPo5TJ0FHwigY,5859
94
+ fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=pb2otlEQIHT9qaN-q0qlUq_BJG4-ozk-P2-C9drHbDU,5862
95
95
  fractal_server/app/runner/filenames.py,sha256=lPnxKHtdRizr6FqG3zOdjDPyWA7GoaJGTtiuJV0gA8E,70
96
96
  fractal_server/app/runner/set_start_and_last_task_index.py,sha256=NsioSzfEpGyo9ZKrV5KsbxeI7d5V3tE678Y3IAo5rHM,1218
97
97
  fractal_server/app/runner/shutdown.py,sha256=ViSNJyXWU_iWPSDOOMGNh_iQdUFrdPh_jvf8vVKLpAo,1950
@@ -103,7 +103,7 @@ fractal_server/app/runner/v2/_slurm_sudo.py,sha256=Gvsh4tUlc1_3KdF3B7zEqs-YIntC_
103
103
  fractal_server/app/runner/v2/db_tools.py,sha256=ozp4RFLB3LNI0rM0q0udi6ja8-5vooH_dVqrbmTPNDg,3323
104
104
  fractal_server/app/runner/v2/deduplicate_list.py,sha256=IVTE4abBU1bUprFTkxrTfYKnvkNTanWQ-KWh_etiT08,645
105
105
  fractal_server/app/runner/v2/merge_outputs.py,sha256=YOTKbGOM9s-uqY4KN2onoIxuHNm-v3hr5zv6Aa1KEtA,905
106
- fractal_server/app/runner/v2/runner.py,sha256=IsQhSxASVI-yPBO2G2uuVvZkRIoAlSY6Ev7gcSYmDOw,18989
106
+ fractal_server/app/runner/v2/runner.py,sha256=IE7Ups3LqMy4ijV427xugYtEz003xM7UzJD3G8Jw_T4,18995
107
107
  fractal_server/app/runner/v2/runner_functions.py,sha256=xteoDSXKxStl3ABEXyjrggPiXdXAPy9sJPfT3B8CQ3Y,19050
108
108
  fractal_server/app/runner/v2/submit_workflow.py,sha256=L6WqQC3Vm-tLe_CJXHIMbYLAeulAN2no_4HZD4cbhR0,12554
109
109
  fractal_server/app/runner/v2/task_interface.py,sha256=BRSKpitGproY48JQdCbfrghbDonA-EqPP1yIopohpPo,2525
@@ -129,7 +129,7 @@ fractal_server/app/schemas/v2/workflowtask.py,sha256=6eweAMyziwaoMT-7R1fVJYunIeZ
129
129
  fractal_server/app/security/__init__.py,sha256=oJ8RVglpOvWPQY4RokiE2YA72Nqo42dZEjywWTt8xr8,14032
130
130
  fractal_server/app/security/signup_email.py,sha256=Xd6QYxcdmg0PHpDwmUE8XQmPcOj3Xjy5oROcIMhmltM,1472
131
131
  fractal_server/app/user_settings.py,sha256=OP1yiYKtPadxwM51_Q0hdPk3z90TCN4z1BLpQsXyWiU,1316
132
- fractal_server/config.py,sha256=NQ4wHfxzbd3nlCLbuATimq-5L7OQ2-4Y9MnyX1DEup0,28484
132
+ fractal_server/config.py,sha256=vNaDkzjqOzQMQyNVCWJ9qNMvfXKDuk2L0mMTPGzX79U,29505
133
133
  fractal_server/data_migrations/2_14_10.py,sha256=jzMg2c1zNO8C_Nho_9_EZJD6kR1-gkFNpNrMR5Hr8hM,1598
134
134
  fractal_server/data_migrations/README.md,sha256=_3AEFvDg9YkybDqCLlFPdDmGJvr6Tw7HRI14aZ3LOIw,398
135
135
  fractal_server/data_migrations/tools.py,sha256=LeMeASwYGtEqd-3wOLle6WARdTGAimoyMmRbbJl-hAM,572
@@ -185,7 +185,7 @@ fractal_server/migrations/versions/f384e1c0cf5d_drop_task_default_args_columns.p
185
185
  fractal_server/migrations/versions/fbce16ff4e47_new_history_items.py,sha256=TDWCaIoM0Q4SpRWmR9zr_rdp3lJXhCfBPTMhtrP5xYE,3950
186
186
  fractal_server/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
187
187
  fractal_server/ssh/__init__.py,sha256=sVUmzxf7_DuXG1xoLQ1_00fo5NPhi2LJipSmU5EAkPs,124
188
- fractal_server/ssh/_fabric.py,sha256=7fCxTYqkAOaTTm67trfYdYQenOsI4EfrRQoG6x3M5kk,25188
188
+ fractal_server/ssh/_fabric.py,sha256=I_94ha3xxVESoa8opuvidcO32E4cJZF1Tow4eXPPc8o,24450
189
189
  fractal_server/string_tools.py,sha256=UJFi8rhlI6QXxX5twycLjsvOQ6x4uG7L3JdxEVDhC5A,2592
190
190
  fractal_server/syringe.py,sha256=3YJeIALH-wibuJ9R5VMNYUWh7x1-MkWT0SqGcWG5MY8,2795
191
191
  fractal_server/tasks/__init__.py,sha256=kadmVUoIghl8s190_Tt-8f-WBqMi8u8oU4Pvw39NHE8,23
@@ -230,8 +230,8 @@ fractal_server/types/validators/_workflow_task_arguments_validators.py,sha256=HL
230
230
  fractal_server/urls.py,sha256=QjIKAC1a46bCdiPMu3AlpgFbcv6a4l3ABcd5xz190Og,471
231
231
  fractal_server/utils.py,sha256=Vn35lApt1T1J8nc09sAVqd10Cy0sa3dLipcljI-hkuk,2185
232
232
  fractal_server/zip_tools.py,sha256=H0w7wS5yE4ebj7hw1_77YQ959dl2c-L0WX6J_ro1TY4,4884
233
- fractal_server-2.15.7.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
234
- fractal_server-2.15.7.dist-info/METADATA,sha256=8pzRt84EdeTR6rbYAlgbDw6cC-YErI_N_eBIeGUtvLs,4334
235
- fractal_server-2.15.7.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
236
- fractal_server-2.15.7.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
237
- fractal_server-2.15.7.dist-info/RECORD,,
233
+ fractal_server-2.15.8.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
234
+ fractal_server-2.15.8.dist-info/METADATA,sha256=rCOwXnSjjBclcc6-qtpSeGkHxWtfyaE3Iglq7grOi1Y,4334
235
+ fractal_server-2.15.8.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
236
+ fractal_server-2.15.8.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
237
+ fractal_server-2.15.8.dist-info/RECORD,,