fractal-server 2.14.0a34__py3-none-any.whl → 2.14.0a36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __VERSION__ = "2.14.0a34"
1
+ __VERSION__ = "2.14.0a36"
@@ -32,6 +32,7 @@ class BaseRunner(object):
32
32
  task_type: TaskTypeType,
33
33
  task_files: TaskFiles,
34
34
  config: Any,
35
+ user_id: int,
35
36
  ) -> tuple[Any, BaseException]:
36
37
  """
37
38
  Run a single fractal task.
@@ -44,6 +45,7 @@ class BaseRunner(object):
44
45
  task_type: Task type.
45
46
  task_files: `TaskFiles` object.
46
47
  config: Runner-specific parameters.
48
+ user_id:
47
49
  """
48
50
  raise NotImplementedError()
49
51
 
@@ -55,6 +57,7 @@ class BaseRunner(object):
55
57
  list_task_files: list[TaskFiles],
56
58
  task_type: TaskTypeType,
57
59
  config: Any,
60
+ user_id: int,
58
61
  ) -> tuple[dict[int, Any], dict[int, BaseException]]:
59
62
  """
60
63
  Run a parallel fractal task.
@@ -68,6 +71,7 @@ class BaseRunner(object):
68
71
  task_type: Task type.
69
72
  task_files: `TaskFiles` object.
70
73
  config: Runner-specific parameters.
74
+ user_id
71
75
  """
72
76
  raise NotImplementedError()
73
77
 
@@ -58,6 +58,7 @@ class LocalRunner(BaseRunner):
58
58
  "converter_compound",
59
59
  ],
60
60
  config: LocalBackendConfig,
61
+ user_id: int,
61
62
  ) -> tuple[Any, Exception]:
62
63
  logger.debug("[submit] START")
63
64
 
@@ -116,6 +117,7 @@ class LocalRunner(BaseRunner):
116
117
  list_task_files: list[TaskFiles],
117
118
  task_type: Literal["parallel", "compound", "converter_compound"],
118
119
  config: LocalBackendConfig,
120
+ user_id: int,
119
121
  ) -> tuple[dict[int, Any], dict[int, BaseException]]:
120
122
  """
121
123
  Note: `list_parameters`, `list_task_files` and `history_unit_ids`
@@ -15,6 +15,7 @@ from ..slurm_common.slurm_job_task_models import SlurmTask
15
15
  from ._job_states import STATES_FINISHED
16
16
  from fractal_server import __VERSION__
17
17
  from fractal_server.app.db import get_sync_db
18
+ from fractal_server.app.models.v2 import AccountingRecordSlurm
18
19
  from fractal_server.app.runner.exceptions import JobExecutionError
19
20
  from fractal_server.app.runner.exceptions import TaskExecutionError
20
21
  from fractal_server.app.runner.executors.base_runner import BaseRunner
@@ -34,7 +35,20 @@ SHUTDOWN_EXCEPTION = JobExecutionError(SHUTDOWN_ERROR_MESSAGE)
34
35
 
35
36
  logger = set_logger(__name__)
36
37
 
37
- # NOTE: see issue 2481.
38
+
39
+ def create_accounting_record_slurm(
40
+ *,
41
+ user_id: int,
42
+ slurm_job_ids: list[int],
43
+ ) -> None:
44
+ with next(get_sync_db()) as db:
45
+ db.add(
46
+ AccountingRecordSlurm(
47
+ user_id=user_id,
48
+ slurm_job_ids=slurm_job_ids,
49
+ )
50
+ )
51
+ db.commit()
38
52
 
39
53
 
40
54
  class BaseSlurmRunner(BaseRunner):
@@ -158,7 +172,7 @@ class BaseSlurmRunner(BaseRunner):
158
172
  slurm_job: SlurmJob,
159
173
  slurm_config: SlurmConfig,
160
174
  ) -> str:
161
- logger.info("[_submit_single_sbatch] START")
175
+ logger.debug("[_submit_single_sbatch] START")
162
176
  # Prepare input pickle(s)
163
177
  versions = dict(
164
178
  python=sys.version_info[:3],
@@ -175,7 +189,7 @@ class BaseSlurmRunner(BaseRunner):
175
189
  funcser = cloudpickle.dumps((versions, func, _args, _kwargs))
176
190
  with open(task.input_pickle_file_local, "wb") as f:
177
191
  f.write(funcser)
178
- logger.info(
192
+ logger.debug(
179
193
  "[_submit_single_sbatch] Written "
180
194
  f"{task.input_pickle_file_local=}"
181
195
  )
@@ -186,7 +200,7 @@ class BaseSlurmRunner(BaseRunner):
186
200
  local=task.input_pickle_file_local,
187
201
  remote=task.input_pickle_file_remote,
188
202
  )
189
- logger.info(
203
+ logger.debug(
190
204
  "[_submit_single_sbatch] Transferred "
191
205
  f"{task.input_pickle_file_local=}"
192
206
  )
@@ -229,7 +243,7 @@ class BaseSlurmRunner(BaseRunner):
229
243
  ]
230
244
  )
231
245
  script_lines = slurm_config.sort_script_lines(script_lines)
232
- logger.info(script_lines)
246
+ logger.debug(script_lines)
233
247
 
234
248
  # Always print output of `uname -n` and `pwd`
235
249
  script_lines.append('\necho "Hostname: $(uname -n)"')
@@ -258,7 +272,7 @@ class BaseSlurmRunner(BaseRunner):
258
272
  # Write submission script
259
273
  with open(slurm_job.slurm_submission_script_local, "w") as f:
260
274
  f.write(script)
261
- logger.info(
275
+ logger.debug(
262
276
  "[_submit_single_sbatch] Written "
263
277
  f"{slurm_job.slurm_submission_script_local=}"
264
278
  )
@@ -280,10 +294,10 @@ class BaseSlurmRunner(BaseRunner):
280
294
  # Run sbatch
281
295
  pre_submission_cmds = slurm_config.pre_submission_commands
282
296
  if len(pre_submission_cmds) == 0:
283
- logger.info(f"Now run {submit_command=}")
297
+ logger.debug(f"Now run {submit_command=}")
284
298
  sbatch_stdout = self._run_remote_cmd(submit_command)
285
299
  else:
286
- logger.info(f"Now using {pre_submission_cmds=}")
300
+ logger.debug(f"Now using {pre_submission_cmds=}")
287
301
  script_lines = pre_submission_cmds + [submit_command]
288
302
  wrapper_script_contents = "\n".join(script_lines)
289
303
  wrapper_script_contents = f"{wrapper_script_contents}\n"
@@ -300,22 +314,22 @@ class BaseSlurmRunner(BaseRunner):
300
314
  )
301
315
  with open(wrapper_script, "w") as f:
302
316
  f.write(wrapper_script_contents)
303
- logger.info(f"Now run {wrapper_script=}")
317
+ logger.debug(f"Now run {wrapper_script=}")
304
318
  sbatch_stdout = self._run_remote_cmd(f"bash {wrapper_script}")
305
319
 
306
320
  # Submit SLURM job and retrieve job ID
307
- logger.info(f"[_submit_single_sbatc] {sbatch_stdout=}")
321
+ logger.info(f"[_submit_single_sbatch] {sbatch_stdout=}")
308
322
  stdout = sbatch_stdout.strip("\n")
309
323
  submitted_job_id = int(stdout)
310
324
  slurm_job.slurm_job_id = str(submitted_job_id)
311
325
 
312
326
  # Add job to self.jobs
313
327
  self.jobs[slurm_job.slurm_job_id] = slurm_job
314
- logger.info(
328
+ logger.debug(
315
329
  "[_submit_single_sbatch] Added "
316
330
  f"{slurm_job.slurm_job_id} to self.jobs."
317
331
  )
318
- logger.info("[_submit_single_sbatch] END")
332
+ logger.debug("[_submit_single_sbatch] END")
319
333
 
320
334
  def _fetch_artifacts(
321
335
  self,
@@ -448,8 +462,9 @@ class BaseSlurmRunner(BaseRunner):
448
462
  "compound",
449
463
  "converter_compound",
450
464
  ],
465
+ user_id: int,
451
466
  ) -> tuple[Any, Exception]:
452
- logger.info("[submit] START")
467
+ logger.debug("[submit] START")
453
468
  try:
454
469
  workdir_local = task_files.wftask_subfolder_local
455
470
  workdir_remote = task_files.wftask_subfolder_remote
@@ -473,10 +488,10 @@ class BaseSlurmRunner(BaseRunner):
473
488
  )
474
489
 
475
490
  # Create task subfolder
476
- logger.info("[submit] Create local/remote folders - START")
491
+ logger.debug("[submit] Create local/remote folders - START")
477
492
  self._mkdir_local_folder(folder=workdir_local.as_posix())
478
493
  self._mkdir_remote_folder(folder=workdir_remote.as_posix())
479
- logger.info("[submit] Create local/remote folders - END")
494
+ logger.debug("[submit] Create local/remote folders - END")
480
495
 
481
496
  # Submission phase
482
497
  slurm_job = SlurmJob(
@@ -502,7 +517,12 @@ class BaseSlurmRunner(BaseRunner):
502
517
  slurm_job=slurm_job,
503
518
  slurm_config=config,
504
519
  )
505
- logger.info(f"[submit] END submission phase, {self.job_ids=}")
520
+ logger.debug(f"[submit] END submission phase, {self.job_ids=}")
521
+
522
+ create_accounting_record_slurm(
523
+ user_id=user_id,
524
+ slurm_job_ids=self.job_ids,
525
+ )
506
526
 
507
527
  # NOTE: see issue 2444
508
528
  settings = Inject(get_settings)
@@ -511,7 +531,7 @@ class BaseSlurmRunner(BaseRunner):
511
531
  time.sleep(sleep_time)
512
532
 
513
533
  # Retrieval phase
514
- logger.info("[submit] START retrieval phase")
534
+ logger.debug("[submit] START retrieval phase")
515
535
  scancelled_job_ids = []
516
536
  while len(self.jobs) > 0:
517
537
  # Look for finished jobs
@@ -554,7 +574,7 @@ class BaseSlurmRunner(BaseRunner):
554
574
  if len(self.jobs) > 0:
555
575
  scancelled_job_ids = self.wait_and_check_shutdown()
556
576
 
557
- logger.info("[submit] END")
577
+ logger.debug("[submit] END")
558
578
  return result, exception
559
579
 
560
580
  except Exception as e:
@@ -578,6 +598,7 @@ class BaseSlurmRunner(BaseRunner):
578
598
  list_task_files: list[TaskFiles],
579
599
  task_type: Literal["parallel", "compound", "converter_compound"],
580
600
  config: SlurmConfig,
601
+ user_id: int,
581
602
  ) -> tuple[dict[int, Any], dict[int, BaseException]]:
582
603
  """
583
604
  Note: `list_parameters`, `list_task_files` and `history_unit_ids`
@@ -585,7 +606,7 @@ class BaseSlurmRunner(BaseRunner):
585
606
  input images, while for compound tasks these can differ.
586
607
  """
587
608
 
588
- logger.info(f"[multisubmit] START, {len(list_parameters)=}")
609
+ logger.debug(f"[multisubmit] START, {len(list_parameters)=}")
589
610
  try:
590
611
 
591
612
  if self.is_shutdown():
@@ -639,7 +660,7 @@ class BaseSlurmRunner(BaseRunner):
639
660
  raise RuntimeError("Something wrong here while batching tasks")
640
661
 
641
662
  # Part 1/3: Iterate over chunks, prepare SlurmJob objects
642
- logger.info("[multisubmit] Prepare `SlurmJob`s.")
663
+ logger.debug("[multisubmit] Prepare `SlurmJob`s.")
643
664
  jobs_to_submit = []
644
665
  for ind_batch, chunk in enumerate(args_batches):
645
666
  # Read prefix based on the first task of this batch
@@ -669,7 +690,7 @@ class BaseSlurmRunner(BaseRunner):
669
690
  )
670
691
 
671
692
  # NOTE: see issue 2431
672
- logger.info("[multisubmit] Transfer files and submit jobs.")
693
+ logger.debug("[multisubmit] Transfer files and submit jobs.")
673
694
  for slurm_job in jobs_to_submit:
674
695
  self._submit_single_sbatch(
675
696
  func,
@@ -677,11 +698,16 @@ class BaseSlurmRunner(BaseRunner):
677
698
  slurm_config=config,
678
699
  )
679
700
 
680
- logger.info(f"END submission phase, {self.job_ids=}")
701
+ logger.info(f"[multisubmit] END submission phase, {self.job_ids=}")
702
+
703
+ create_accounting_record_slurm(
704
+ user_id=user_id,
705
+ slurm_job_ids=self.job_ids,
706
+ )
681
707
 
682
708
  settings = Inject(get_settings)
683
709
  sleep_time = settings.FRACTAL_SLURM_INTERVAL_BEFORE_RETRIEVAL
684
- logger.warning(f"[submit] Now sleep {sleep_time} seconds.")
710
+ logger.warning(f"[multisubmit] Now sleep {sleep_time} seconds.")
685
711
  time.sleep(sleep_time)
686
712
  except Exception as e:
687
713
  logger.error(
@@ -701,7 +727,7 @@ class BaseSlurmRunner(BaseRunner):
701
727
  return results, exceptions
702
728
 
703
729
  # Retrieval phase
704
- logger.info("[multisubmit] START retrieval phase")
730
+ logger.debug("[multisubmit] START retrieval phase")
705
731
  scancelled_job_ids = []
706
732
  while len(self.jobs) > 0:
707
733
  # Look for finished jobs
@@ -723,10 +749,12 @@ class BaseSlurmRunner(BaseRunner):
723
749
 
724
750
  with next(get_sync_db()) as db:
725
751
  for slurm_job_id in finished_job_ids:
726
- logger.info(f"[multisubmit] Now process {slurm_job_id=}")
752
+ logger.debug(f"[multisubmit] Now process {slurm_job_id=}")
727
753
  slurm_job = self.jobs.pop(slurm_job_id)
728
754
  for task in slurm_job.tasks:
729
- logger.info(f"[multisubmit] Now process {task.index=}")
755
+ logger.debug(
756
+ f"[multisubmit] Now process {task.index=}"
757
+ )
730
758
  was_job_scancelled = slurm_job_id in scancelled_job_ids
731
759
  if fetch_artifacts_exception is not None:
732
760
  result = None
@@ -775,7 +803,7 @@ class BaseSlurmRunner(BaseRunner):
775
803
  if len(self.jobs) > 0:
776
804
  scancelled_job_ids = self.wait_and_check_shutdown()
777
805
 
778
- logger.info("[multisubmit] END")
806
+ logger.debug("[multisubmit] END")
779
807
  return results, exceptions
780
808
 
781
809
  def check_fractal_server_versions(self) -> None:
@@ -814,7 +842,7 @@ class BaseSlurmRunner(BaseRunner):
814
842
  if self.jobs:
815
843
  scancel_string = " ".join(scancelled_job_ids)
816
844
  scancel_cmd = f"scancel {scancel_string}"
817
- logger.warning(f"Now scancel-ing SLURM jobs {scancel_string}")
845
+ logger.warning(f"[scancel_jobs] {scancel_string}")
818
846
  try:
819
847
  self._run_remote_cmd(scancel_cmd)
820
848
  except Exception as e:
@@ -54,18 +54,21 @@ def _check_versions_mismatch(
54
54
  do not match with the ones on the server
55
55
  """
56
56
 
57
- server_python_version = server_versions["python"]
58
- worker_python_version = sys.version_info[:3]
57
+ server_python_version = list(server_versions["python"])
58
+ worker_python_version = list(sys.version_info[:3])
59
59
  if worker_python_version != server_python_version:
60
- # FIXME: turn this into an error, after fixing a broader CI issue, see
61
- # https://github.com/fractal-analytics-platform/fractal-server/issues/375
62
- logging.warning(
63
- f"{server_python_version=} but {worker_python_version=}. "
64
- "cloudpickle is not guaranteed to correctly load "
65
- "pickle files created with different python versions. "
66
- "Note, however, that if you reached this line it means that "
67
- "the pickle file was likely loaded correctly."
68
- )
60
+ if worker_python_version[:2] != server_python_version[:2]:
61
+ # FIXME: Turn this into an error, in some version post 2.14.
62
+ logging.error(
63
+ f"{server_python_version=} but {worker_python_version=}. "
64
+ "This configuration will be deprecated in a future version, "
65
+ "please contact the admin of this Fractal instance."
66
+ )
67
+ else:
68
+ # Major.minor versions match, patch versions differ
69
+ logging.warning(
70
+ f"{server_python_version=} but {worker_python_version=}."
71
+ )
69
72
 
70
73
  server_cloudpickle_version = server_versions["cloudpickle"]
71
74
  worker_cloudpickle_version = cloudpickle.__version__
@@ -113,7 +113,7 @@ class SlurmSSHRunner(BaseSlurmRunner):
113
113
  elapsed = time.perf_counter() - t_0
114
114
  logger.debug(
115
115
  "[_fetch_artifacts] Created filelist "
116
- f"({len(filelist)=}, from start: {elapsed:.3f} s)."
116
+ f"({len(filelist)=}, from start: {elapsed=:.3f} s)."
117
117
  )
118
118
 
119
119
  # Write filelist to file remotely
@@ -125,7 +125,7 @@ class SlurmSSHRunner(BaseSlurmRunner):
125
125
  elapsed = time.perf_counter() - t_0
126
126
  logger.debug(
127
127
  f"[_fetch_artifacts] File list written to {tmp_filelist_path} "
128
- f"(from start: {elapsed:.3f} s)."
128
+ f"(from start: {elapsed=:.3f} s)."
129
129
  )
130
130
 
131
131
  # Create remote tarfile
@@ -140,7 +140,7 @@ class SlurmSSHRunner(BaseSlurmRunner):
140
140
  t_1_tar = time.perf_counter()
141
141
  logger.info(
142
142
  f"[_fetch_artifacts] Remote archive {tarfile_path_remote} created"
143
- f" - elapsed: {t_1_tar - t_0_tar:.3f} s"
143
+ f" - elapsed={t_1_tar - t_0_tar:.3f} s"
144
144
  )
145
145
 
146
146
  # Fetch tarfile
@@ -153,7 +153,7 @@ class SlurmSSHRunner(BaseSlurmRunner):
153
153
  logger.info(
154
154
  "[_fetch_artifacts] Subfolder archive transferred back "
155
155
  f"to {tarfile_path_local}"
156
- f" - elapsed: {t_1_get - t_0_get:.3f} s"
156
+ f" - elapsed={t_1_get - t_0_get:.3f} s"
157
157
  )
158
158
 
159
159
  # Extract tarfile locally
@@ -163,7 +163,7 @@ class SlurmSSHRunner(BaseSlurmRunner):
163
163
  Path(tarfile_path_local).unlink(missing_ok=True)
164
164
 
165
165
  t_1 = time.perf_counter()
166
- logger.info(f"[_fetch_artifacts] End - elapsed: {t_1 - t_0:.3f} s")
166
+ logger.info(f"[_fetch_artifacts] End - elapsed={t_1 - t_0:.3f} s")
167
167
 
168
168
  def _send_inputs(self, jobs: list[SlurmJob]) -> None:
169
169
  """
@@ -190,7 +190,7 @@ class SlurmSSHRunner(BaseSlurmRunner):
190
190
  t_1_put = time.perf_counter()
191
191
  logger.info(
192
192
  f"Subfolder archive transferred to {tarfile_path_remote}"
193
- f" - elapsed: {t_1_put - t_0_put:.3f} s"
193
+ f" - elapsed={t_1_put - t_0_put:.3f} s"
194
194
  )
195
195
 
196
196
  # Remove local archive
@@ -88,18 +88,3 @@ def _mkdir_as_user(*, folder: str, user: str) -> None:
88
88
 
89
89
  cmd = f"mkdir -p {folder}"
90
90
  _run_command_as_user(cmd=cmd, user=user, check=True)
91
-
92
-
93
- def _path_exists_as_user(*, path: str, user: Optional[str] = None) -> bool:
94
- """
95
- Impersonate a user and check if `path` exists via `ls`
96
-
97
- Arguments:
98
- path: Absolute file/folder path
99
- user: If not `None`, user to be impersonated
100
- """
101
- res = _run_command_as_user(cmd=f"ls {path}", user=user)
102
- if res.returncode == 0:
103
- return True
104
- else:
105
- return False
@@ -18,6 +18,7 @@ from .merge_outputs import merge_outputs
18
18
  from .runner_functions import run_v2_task_compound
19
19
  from .runner_functions import run_v2_task_non_parallel
20
20
  from .runner_functions import run_v2_task_parallel
21
+ from .runner_functions import SubmissionOutcome
21
22
  from .task_interface import TaskOutput
22
23
  from fractal_server.app.db import get_sync_db
23
24
  from fractal_server.app.models.v2 import AccountingRecord
@@ -132,48 +133,60 @@ def execute_tasks_v2(
132
133
  history_run_id = history_run.id
133
134
 
134
135
  # TASK EXECUTION (V2)
135
- if task.type in ["non_parallel", "converter_non_parallel"]:
136
- outcomes_dict, num_tasks = run_v2_task_non_parallel(
137
- images=filtered_images,
138
- zarr_dir=zarr_dir,
139
- wftask=wftask,
140
- task=task,
141
- workflow_dir_local=workflow_dir_local,
142
- workflow_dir_remote=workflow_dir_remote,
143
- runner=runner,
144
- get_runner_config=get_runner_config,
145
- history_run_id=history_run_id,
146
- dataset_id=dataset.id,
147
- task_type=task.type,
148
- )
149
- elif task.type == "parallel":
150
- outcomes_dict, num_tasks = run_v2_task_parallel(
151
- images=filtered_images,
152
- wftask=wftask,
153
- task=task,
154
- workflow_dir_local=workflow_dir_local,
155
- workflow_dir_remote=workflow_dir_remote,
156
- runner=runner,
157
- get_runner_config=get_runner_config,
158
- history_run_id=history_run_id,
159
- dataset_id=dataset.id,
160
- )
161
- elif task.type in ["compound", "converter_compound"]:
162
- outcomes_dict, num_tasks = run_v2_task_compound(
163
- images=filtered_images,
164
- zarr_dir=zarr_dir,
165
- wftask=wftask,
166
- task=task,
167
- workflow_dir_local=workflow_dir_local,
168
- workflow_dir_remote=workflow_dir_remote,
169
- runner=runner,
170
- get_runner_config=get_runner_config,
171
- history_run_id=history_run_id,
172
- dataset_id=dataset.id,
173
- task_type=task.type,
174
- )
175
- else:
176
- raise ValueError(f"Unexpected error: Invalid {task.type=}.")
136
+ try:
137
+ if task.type in ["non_parallel", "converter_non_parallel"]:
138
+ outcomes_dict, num_tasks = run_v2_task_non_parallel(
139
+ images=filtered_images,
140
+ zarr_dir=zarr_dir,
141
+ wftask=wftask,
142
+ task=task,
143
+ workflow_dir_local=workflow_dir_local,
144
+ workflow_dir_remote=workflow_dir_remote,
145
+ runner=runner,
146
+ get_runner_config=get_runner_config,
147
+ history_run_id=history_run_id,
148
+ dataset_id=dataset.id,
149
+ user_id=user_id,
150
+ task_type=task.type,
151
+ )
152
+ elif task.type == "parallel":
153
+ outcomes_dict, num_tasks = run_v2_task_parallel(
154
+ images=filtered_images,
155
+ wftask=wftask,
156
+ task=task,
157
+ workflow_dir_local=workflow_dir_local,
158
+ workflow_dir_remote=workflow_dir_remote,
159
+ runner=runner,
160
+ get_runner_config=get_runner_config,
161
+ history_run_id=history_run_id,
162
+ dataset_id=dataset.id,
163
+ user_id=user_id,
164
+ )
165
+ elif task.type in ["compound", "converter_compound"]:
166
+ outcomes_dict, num_tasks = run_v2_task_compound(
167
+ images=filtered_images,
168
+ zarr_dir=zarr_dir,
169
+ wftask=wftask,
170
+ task=task,
171
+ workflow_dir_local=workflow_dir_local,
172
+ workflow_dir_remote=workflow_dir_remote,
173
+ runner=runner,
174
+ get_runner_config=get_runner_config,
175
+ history_run_id=history_run_id,
176
+ dataset_id=dataset.id,
177
+ task_type=task.type,
178
+ user_id=user_id,
179
+ )
180
+ else:
181
+ raise ValueError(f"Unexpected error: Invalid {task.type=}.")
182
+ except Exception as e:
183
+ outcomes_dict = {
184
+ 0: SubmissionOutcome(
185
+ result=None,
186
+ exception=e,
187
+ )
188
+ }
189
+ num_tasks = 0
177
190
 
178
191
  # POST TASK EXECUTION
179
192
 
@@ -139,6 +139,7 @@ def run_v2_task_non_parallel(
139
139
  dataset_id: int,
140
140
  history_run_id: int,
141
141
  task_type: Literal["non_parallel", "converter_non_parallel"],
142
+ user_id: int,
142
143
  ) -> tuple[dict[int, SubmissionOutcome], int]:
143
144
  """
144
145
  This runs server-side (see `executor` argument)
@@ -218,6 +219,7 @@ def run_v2_task_non_parallel(
218
219
  task_files=task_files,
219
220
  history_unit_id=history_unit_id,
220
221
  config=runner_config,
222
+ user_id=user_id,
221
223
  )
222
224
 
223
225
  positional_index = 0
@@ -261,6 +263,7 @@ def run_v2_task_parallel(
261
263
  ],
262
264
  dataset_id: int,
263
265
  history_run_id: int,
266
+ user_id: int,
264
267
  ) -> tuple[dict[int, SubmissionOutcome], int]:
265
268
  if len(images) == 0:
266
269
  return {}, 0
@@ -344,6 +347,7 @@ def run_v2_task_parallel(
344
347
  list_task_files=list_task_files,
345
348
  history_unit_ids=history_unit_ids,
346
349
  config=runner_config,
350
+ user_id=user_id,
347
351
  )
348
352
 
349
353
  outcome = {}
@@ -394,6 +398,7 @@ def run_v2_task_compound(
394
398
  dataset_id: int,
395
399
  history_run_id: int,
396
400
  task_type: Literal["compound", "converter_compound"],
401
+ user_id: int,
397
402
  ) -> tuple[dict[int, SubmissionOutcome], int]:
398
403
  # Get TaskFiles object
399
404
  task_files_init = TaskFiles(
@@ -463,6 +468,7 @@ def run_v2_task_compound(
463
468
  task_files=task_files_init,
464
469
  history_unit_id=init_history_unit_id,
465
470
  config=runner_config_init,
471
+ user_id=user_id,
466
472
  )
467
473
 
468
474
  init_outcome = _process_init_task_output(
@@ -565,6 +571,7 @@ def run_v2_task_compound(
565
571
  list_task_files=list_task_files,
566
572
  history_unit_ids=history_unit_ids,
567
573
  config=runner_config_compute,
574
+ user_id=user_id,
568
575
  )
569
576
 
570
577
  compute_outcomes: dict[int, SubmissionOutcome] = {}
@@ -322,7 +322,7 @@ class FractalSSH(object):
322
322
  t_1 = time.perf_counter()
323
323
  self.logger.info(
324
324
  f"{prefix} END running '{cmd}' over SSH, "
325
- f"elapsed {t_1 - t_0:.3f}"
325
+ f"elapsed={t_1 - t_0:.3f}"
326
326
  )
327
327
  self.logger.debug("STDOUT:")
328
328
  self.logger.debug(res.stdout)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: fractal-server
3
- Version: 2.14.0a34
3
+ Version: 2.14.0a36
4
4
  Summary: Backend component of the Fractal analytics platform
5
5
  License: BSD-3-Clause
6
6
  Author: Tommaso Comparin
@@ -1,4 +1,4 @@
1
- fractal_server/__init__.py,sha256=B5mHrNKBuCS1_dfqSKK7a3mM57rWv7Sf9ODhxz6f23g,26
1
+ fractal_server/__init__.py,sha256=8fGTYA0v4yFRsbsrwIUxQDcJEeH1hYIY2CuXgnou2hM,26
2
2
  fractal_server/__main__.py,sha256=rkM8xjY1KeS3l63irB8yCrlVobR-73uDapC4wvrIlxI,6957
3
3
  fractal_server/alembic.ini,sha256=MWwi7GzjzawI9cCAK1LW7NxIBQDUqD12-ptJoq5JpP0,3153
4
4
  fractal_server/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -70,23 +70,23 @@ fractal_server/app/runner/components.py,sha256=-Ii5l8d_V6f5DFOd-Zsr8VYmOsyqw0Hox
70
70
  fractal_server/app/runner/compress_folder.py,sha256=DX-4IYlSXlMd0EmXDD8M8FxisfKLbooSTrdNtzYAQAM,4876
71
71
  fractal_server/app/runner/exceptions.py,sha256=JC5ufHyeA1hYD_rkZUscI30DD8D903ncag7Z3AArmUY,4215
72
72
  fractal_server/app/runner/executors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
- fractal_server/app/runner/executors/base_runner.py,sha256=knWOERUwRLhsd9eq5GwGxH2ZVsvPOZRRjQPGbiExqcU,5052
73
+ fractal_server/app/runner/executors/base_runner.py,sha256=4xxMpYycIeAOz5niaJj2xtVW_Cq-shCxP1qk4g-KwOM,5137
74
74
  fractal_server/app/runner/executors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
75
  fractal_server/app/runner/executors/local/get_local_config.py,sha256=KiakXxOahaLgWvQJ1LVGYGXht6DMGR9x8Xu-TuT9aY4,3628
76
- fractal_server/app/runner/executors/local/runner.py,sha256=dPEpjIfJQu-st_tYiaI8VhH3y1uvK6DgfQ2cXU0vhOU,9543
76
+ fractal_server/app/runner/executors/local/runner.py,sha256=AfJ2KDUBdLqkeJTdRzYCkfJh4LiGbdnsHROko_Pk9vA,9587
77
77
  fractal_server/app/runner/executors/slurm_common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
78
78
  fractal_server/app/runner/executors/slurm_common/_batching.py,sha256=ZY020JZlDS5mfpgpWTChQkyHU7iLE5kx2HVd57_C6XA,8850
79
79
  fractal_server/app/runner/executors/slurm_common/_job_states.py,sha256=nuV-Zba38kDrRESOVB3gaGbrSPZc4q7YGichQaeqTW0,238
80
80
  fractal_server/app/runner/executors/slurm_common/_slurm_config.py,sha256=_feRRnVVnvQa3AsOQqfULfOgaoj2o6Ze0-fwXwic8p4,15795
81
- fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=S9BdLz7Enqx6hjH154LYas38b-t52mved0TUWCbMTyo,33118
81
+ fractal_server/app/runner/executors/slurm_common/base_slurm_runner.py,sha256=kmou-asQJ7SHBR0VPPiQrMLP9gv_NZG3s9t2yoszGhY,33870
82
82
  fractal_server/app/runner/executors/slurm_common/get_slurm_config.py,sha256=BW6fDpPyB0VH5leVxvwzkVH3r3hC7DuSyoWmRzHITWg,7305
83
- fractal_server/app/runner/executors/slurm_common/remote.py,sha256=EB2uASKjrBIr25oc13XvSwf8x-TpTBr9WuaLMwNr2y4,5850
83
+ fractal_server/app/runner/executors/slurm_common/remote.py,sha256=L5llMccL6ctdFpDQvynJl5KbxtATX2wzpq13_3ppw-I,5929
84
84
  fractal_server/app/runner/executors/slurm_common/slurm_job_task_models.py,sha256=RoxHLKOn0_wGjnY0Sv0a9nDSiqxYZHKRoMkT3p9_G1E,3607
85
85
  fractal_server/app/runner/executors/slurm_common/utils_executors.py,sha256=naPyJI0I3lD-sYHbSXbMFGUBK4h_SggA5V91Z1Ch1Xg,1416
86
86
  fractal_server/app/runner/executors/slurm_ssh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
87
- fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=5ppdV5D1N6v3T2QUGBn1Q7dswcUKIpI6ZjX_yIO_Z9A,9439
87
+ fractal_server/app/runner/executors/slurm_ssh/runner.py,sha256=yKK_cjskHDiasn_QQ-k14GhplP3tNaK7Kp4yiVn44Y0,9437
88
88
  fractal_server/app/runner/executors/slurm_sudo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
89
- fractal_server/app/runner/executors/slurm_sudo/_subprocess_run_as_user.py,sha256=O1bNg1DiSDJmQE0RmOk2Ii47DagiXp5ryd0R6KxO2OM,3177
89
+ fractal_server/app/runner/executors/slurm_sudo/_subprocess_run_as_user.py,sha256=BlOz4NElv3v7rUYefyeki33uaJxcSDk6rPuVZx9ocdw,2776
90
90
  fractal_server/app/runner/executors/slurm_sudo/runner.py,sha256=lPWkRT499mChP3dNLrdDjMT-nw7-LWv6g58kdF_sMRw,6290
91
91
  fractal_server/app/runner/extract_archive.py,sha256=I7UGIHXXuFvlgVPsP7GMWPu2-DiS1EiyBs7a1bvgkxI,2458
92
92
  fractal_server/app/runner/filenames.py,sha256=lPnxKHtdRizr6FqG3zOdjDPyWA7GoaJGTtiuJV0gA8E,70
@@ -101,8 +101,8 @@ fractal_server/app/runner/v2/_slurm_sudo.py,sha256=TVihkQKMX6YWEWxXJjQo0WEQOjVy7
101
101
  fractal_server/app/runner/v2/db_tools.py,sha256=du5dKhMMFMErQXbGIgu9JvO_vtMensodyPsyDeqz1yQ,3324
102
102
  fractal_server/app/runner/v2/deduplicate_list.py,sha256=IVTE4abBU1bUprFTkxrTfYKnvkNTanWQ-KWh_etiT08,645
103
103
  fractal_server/app/runner/v2/merge_outputs.py,sha256=D1L4Taieq9i71SPQyNc1kMokgHh-sV_MqF3bv7QMDBc,907
104
- fractal_server/app/runner/v2/runner.py,sha256=B4kAF1S-zHf2PbyHedfuiaNpu4oslVDp33KgXYcoXIk,15706
105
- fractal_server/app/runner/v2/runner_functions.py,sha256=2W6CFkezUsQ_k8YuC2oOEMtB_-7M9ensyhwCFvlS2No,19096
104
+ fractal_server/app/runner/v2/runner.py,sha256=UmUhAOOcwAT-8b28o5bWn5S9APtr5EbEvulxWJPo6r4,16269
105
+ fractal_server/app/runner/v2/runner_functions.py,sha256=AzsE7VF6NMz_5qc0htQkfow5_2rr-wkx50vFJTndj8I,19250
106
106
  fractal_server/app/runner/v2/runner_functions_low_level.py,sha256=_h_OOffq3d7V0uHa8Uvs0mj31y1GSZBUXjDDF3WjVjY,3620
107
107
  fractal_server/app/runner/v2/submit_workflow.py,sha256=QywUGIoHAHnrWgfnyX8W9kVqKY-RvVyNLpzrbsXZOZ4,13075
108
108
  fractal_server/app/runner/v2/task_interface.py,sha256=IXdQTI8rXFgXv1Ez0js4CjKFf3QwO2GCHRTuwiFtiTQ,2891
@@ -179,7 +179,7 @@ fractal_server/migrations/versions/f384e1c0cf5d_drop_task_default_args_columns.p
179
179
  fractal_server/migrations/versions/fbce16ff4e47_new_history_items.py,sha256=TDWCaIoM0Q4SpRWmR9zr_rdp3lJXhCfBPTMhtrP5xYE,3950
180
180
  fractal_server/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
181
181
  fractal_server/ssh/__init__.py,sha256=sVUmzxf7_DuXG1xoLQ1_00fo5NPhi2LJipSmU5EAkPs,124
182
- fractal_server/ssh/_fabric.py,sha256=Do7wX1xsV3Pjmwqg-Z_X1_QM05RN5-sAowO_Hh7-9bk,23324
182
+ fractal_server/ssh/_fabric.py,sha256=jF7Nny0r3_PL1WjM1Zlw1I73Uqerx-mTaDWQlOaOpa0,23324
183
183
  fractal_server/string_tools.py,sha256=niViRrrZAOo0y6pEFI9L_eUYS1PoOiQZUBtngiLc2_k,1877
184
184
  fractal_server/syringe.py,sha256=3qSMW3YaMKKnLdgnooAINOPxnCOxP7y2jeAQYB21Gdo,2786
185
185
  fractal_server/tasks/__init__.py,sha256=kadmVUoIghl8s190_Tt-8f-WBqMi8u8oU4Pvw39NHE8,23
@@ -209,8 +209,8 @@ fractal_server/tasks/v2/utils_templates.py,sha256=Kc_nSzdlV6KIsO0CQSPs1w70zLyENP
209
209
  fractal_server/urls.py,sha256=QjIKAC1a46bCdiPMu3AlpgFbcv6a4l3ABcd5xz190Og,471
210
210
  fractal_server/utils.py,sha256=PMwrxWFxRTQRl1b9h-NRIbFGPKqpH_hXnkAT3NfZdpY,3571
211
211
  fractal_server/zip_tools.py,sha256=GjDgo_sf6V_DDg6wWeBlZu5zypIxycn_l257p_YVKGc,4876
212
- fractal_server-2.14.0a34.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
213
- fractal_server-2.14.0a34.dist-info/METADATA,sha256=YljYi9W71066fSXY2MIAuZQ_P1AqIhfyTECxk78i4og,4563
214
- fractal_server-2.14.0a34.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
215
- fractal_server-2.14.0a34.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
216
- fractal_server-2.14.0a34.dist-info/RECORD,,
212
+ fractal_server-2.14.0a36.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
213
+ fractal_server-2.14.0a36.dist-info/METADATA,sha256=5MOzziccWO5Ah9boFwgKLEMgJKoZbRLwHQvhcj4T0-w,4563
214
+ fractal_server-2.14.0a36.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
215
+ fractal_server-2.14.0a36.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
216
+ fractal_server-2.14.0a36.dist-info/RECORD,,