fractal-server 2.16.6__py3-none-any.whl → 2.17.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. fractal_server/__init__.py +1 -1
  2. fractal_server/__main__.py +129 -22
  3. fractal_server/app/db/__init__.py +9 -11
  4. fractal_server/app/models/security.py +7 -3
  5. fractal_server/app/models/user_settings.py +0 -4
  6. fractal_server/app/models/v2/__init__.py +4 -0
  7. fractal_server/app/models/v2/profile.py +16 -0
  8. fractal_server/app/models/v2/project.py +3 -0
  9. fractal_server/app/models/v2/resource.py +130 -0
  10. fractal_server/app/models/v2/task_group.py +3 -0
  11. fractal_server/app/routes/admin/v2/__init__.py +4 -0
  12. fractal_server/app/routes/admin/v2/_aux_functions.py +55 -0
  13. fractal_server/app/routes/admin/v2/profile.py +86 -0
  14. fractal_server/app/routes/admin/v2/resource.py +229 -0
  15. fractal_server/app/routes/admin/v2/task_group_lifecycle.py +48 -82
  16. fractal_server/app/routes/api/__init__.py +26 -7
  17. fractal_server/app/routes/api/v2/_aux_functions.py +27 -1
  18. fractal_server/app/routes/api/v2/_aux_functions_history.py +2 -2
  19. fractal_server/app/routes/api/v2/_aux_functions_task_lifecycle.py +3 -3
  20. fractal_server/app/routes/api/v2/_aux_functions_tasks.py +7 -7
  21. fractal_server/app/routes/api/v2/project.py +5 -1
  22. fractal_server/app/routes/api/v2/submit.py +32 -24
  23. fractal_server/app/routes/api/v2/task.py +5 -0
  24. fractal_server/app/routes/api/v2/task_collection.py +36 -47
  25. fractal_server/app/routes/api/v2/task_collection_custom.py +11 -5
  26. fractal_server/app/routes/api/v2/task_collection_pixi.py +34 -40
  27. fractal_server/app/routes/api/v2/task_group_lifecycle.py +39 -82
  28. fractal_server/app/routes/auth/_aux_auth.py +3 -3
  29. fractal_server/app/routes/auth/current_user.py +45 -7
  30. fractal_server/app/routes/auth/oauth.py +1 -1
  31. fractal_server/app/routes/auth/users.py +9 -0
  32. fractal_server/app/routes/aux/_runner.py +2 -1
  33. fractal_server/app/routes/aux/validate_user_profile.py +62 -0
  34. fractal_server/app/routes/aux/validate_user_settings.py +12 -9
  35. fractal_server/app/schemas/user.py +20 -13
  36. fractal_server/app/schemas/user_settings.py +0 -4
  37. fractal_server/app/schemas/v2/__init__.py +11 -0
  38. fractal_server/app/schemas/v2/profile.py +72 -0
  39. fractal_server/app/schemas/v2/resource.py +117 -0
  40. fractal_server/app/security/__init__.py +6 -13
  41. fractal_server/app/security/signup_email.py +2 -2
  42. fractal_server/app/user_settings.py +2 -12
  43. fractal_server/config/__init__.py +23 -0
  44. fractal_server/config/_database.py +58 -0
  45. fractal_server/config/_email.py +170 -0
  46. fractal_server/config/_init_data.py +27 -0
  47. fractal_server/config/_main.py +216 -0
  48. fractal_server/config/_settings_config.py +7 -0
  49. fractal_server/images/tools.py +3 -3
  50. fractal_server/logger.py +3 -3
  51. fractal_server/main.py +14 -21
  52. fractal_server/migrations/versions/90f6508c6379_drop_useroauth_username.py +36 -0
  53. fractal_server/migrations/versions/a80ac5a352bf_resource_profile.py +195 -0
  54. fractal_server/runner/config/__init__.py +2 -0
  55. fractal_server/runner/config/_local.py +21 -0
  56. fractal_server/runner/config/_slurm.py +128 -0
  57. fractal_server/runner/config/slurm_mem_to_MB.py +63 -0
  58. fractal_server/runner/exceptions.py +4 -0
  59. fractal_server/runner/executors/base_runner.py +17 -7
  60. fractal_server/runner/executors/local/get_local_config.py +21 -86
  61. fractal_server/runner/executors/local/runner.py +48 -5
  62. fractal_server/runner/executors/slurm_common/_batching.py +2 -2
  63. fractal_server/runner/executors/slurm_common/base_slurm_runner.py +59 -25
  64. fractal_server/runner/executors/slurm_common/get_slurm_config.py +38 -54
  65. fractal_server/runner/executors/slurm_common/remote.py +1 -1
  66. fractal_server/runner/executors/slurm_common/{_slurm_config.py → slurm_config.py} +3 -254
  67. fractal_server/runner/executors/slurm_common/slurm_job_task_models.py +1 -1
  68. fractal_server/runner/executors/slurm_ssh/runner.py +12 -14
  69. fractal_server/runner/executors/slurm_sudo/_subprocess_run_as_user.py +2 -2
  70. fractal_server/runner/executors/slurm_sudo/runner.py +12 -12
  71. fractal_server/runner/v2/_local.py +36 -21
  72. fractal_server/runner/v2/_slurm_ssh.py +40 -4
  73. fractal_server/runner/v2/_slurm_sudo.py +41 -11
  74. fractal_server/runner/v2/db_tools.py +1 -1
  75. fractal_server/runner/v2/runner.py +3 -11
  76. fractal_server/runner/v2/runner_functions.py +42 -28
  77. fractal_server/runner/v2/submit_workflow.py +87 -108
  78. fractal_server/runner/versions.py +8 -3
  79. fractal_server/ssh/_fabric.py +6 -6
  80. fractal_server/tasks/config/__init__.py +3 -0
  81. fractal_server/tasks/config/_pixi.py +127 -0
  82. fractal_server/tasks/config/_python.py +51 -0
  83. fractal_server/tasks/v2/local/_utils.py +7 -7
  84. fractal_server/tasks/v2/local/collect.py +13 -5
  85. fractal_server/tasks/v2/local/collect_pixi.py +26 -10
  86. fractal_server/tasks/v2/local/deactivate.py +7 -1
  87. fractal_server/tasks/v2/local/deactivate_pixi.py +5 -1
  88. fractal_server/tasks/v2/local/delete.py +4 -0
  89. fractal_server/tasks/v2/local/reactivate.py +13 -5
  90. fractal_server/tasks/v2/local/reactivate_pixi.py +27 -9
  91. fractal_server/tasks/v2/ssh/_pixi_slurm_ssh.py +11 -10
  92. fractal_server/tasks/v2/ssh/_utils.py +6 -7
  93. fractal_server/tasks/v2/ssh/collect.py +19 -12
  94. fractal_server/tasks/v2/ssh/collect_pixi.py +34 -16
  95. fractal_server/tasks/v2/ssh/deactivate.py +12 -8
  96. fractal_server/tasks/v2/ssh/deactivate_pixi.py +14 -10
  97. fractal_server/tasks/v2/ssh/delete.py +12 -9
  98. fractal_server/tasks/v2/ssh/reactivate.py +18 -12
  99. fractal_server/tasks/v2/ssh/reactivate_pixi.py +36 -17
  100. fractal_server/tasks/v2/templates/4_pip_show.sh +4 -6
  101. fractal_server/tasks/v2/utils_database.py +2 -2
  102. fractal_server/tasks/v2/utils_python_interpreter.py +8 -16
  103. fractal_server/tasks/v2/utils_templates.py +7 -10
  104. fractal_server/utils.py +1 -1
  105. {fractal_server-2.16.6.dist-info → fractal_server-2.17.0a0.dist-info}/METADATA +1 -1
  106. {fractal_server-2.16.6.dist-info → fractal_server-2.17.0a0.dist-info}/RECORD +110 -88
  107. fractal_server/config.py +0 -906
  108. /fractal_server/{runner → app}/shutdown.py +0 -0
  109. {fractal_server-2.16.6.dist-info → fractal_server-2.17.0a0.dist-info}/WHEEL +0 -0
  110. {fractal_server-2.16.6.dist-info → fractal_server-2.17.0a0.dist-info}/entry_points.txt +0 -0
  111. {fractal_server-2.16.6.dist-info → fractal_server-2.17.0a0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,128 @@
1
+ from typing import Annotated
2
+
3
+ from pydantic import AfterValidator
4
+ from pydantic import BaseModel
5
+ from pydantic import ConfigDict
6
+ from pydantic.types import PositiveInt
7
+
8
+ from fractal_server.runner.config.slurm_mem_to_MB import slurm_mem_to_MB
9
+ from fractal_server.types import DictStrStr
10
+ from fractal_server.types import NonEmptyStr
11
+
12
+
13
+ MemMBType = Annotated[
14
+ PositiveInt | NonEmptyStr, AfterValidator(slurm_mem_to_MB)
15
+ ]
16
+
17
+
18
+ class _SlurmConfigSet(BaseModel):
19
+ """
20
+ Options for the default or gpu SLURM config.
21
+
22
+ Attributes:
23
+ partition:
24
+ cpus_per_task:
25
+ mem:
26
+ See `_parse_mem_value` for details on allowed values.
27
+ constraint:
28
+ gres:
29
+ time:
30
+ exclude:
31
+ nodelist:
32
+ account:
33
+ extra_lines:
34
+ """
35
+
36
+ model_config = ConfigDict(extra="forbid")
37
+
38
+ partition: NonEmptyStr | None = None
39
+ cpus_per_task: PositiveInt | None = None
40
+ mem: MemMBType | None = None
41
+ constraint: NonEmptyStr | None = None
42
+ gres: NonEmptyStr | None = None
43
+ exclude: NonEmptyStr | None = None
44
+ nodelist: NonEmptyStr | None = None
45
+ time: NonEmptyStr | None = None
46
+ account: NonEmptyStr | None = None
47
+ extra_lines: list[NonEmptyStr] | None = None
48
+ gpus: NonEmptyStr | None = None
49
+
50
+
51
+ class _BatchingConfigSet(BaseModel):
52
+ """
53
+ Options to configure the batching strategy (that is, how to combine
54
+ several tasks in a single SLURM job).
55
+
56
+ Attributes:
57
+ target_cpus_per_job:
58
+ max_cpus_per_job:
59
+ target_mem_per_job:
60
+ (see `_parse_mem_value` for details on allowed values)
61
+ max_mem_per_job:
62
+ (see `_parse_mem_value` for details on allowed values)
63
+ target_num_jobs:
64
+ max_num_jobs:
65
+ """
66
+
67
+ model_config = ConfigDict(extra="forbid")
68
+
69
+ target_num_jobs: PositiveInt
70
+ max_num_jobs: PositiveInt
71
+ target_cpus_per_job: PositiveInt
72
+ max_cpus_per_job: PositiveInt
73
+ target_mem_per_job: MemMBType
74
+ max_mem_per_job: MemMBType
75
+
76
+
77
+ class JobRunnerConfigSLURM(BaseModel):
78
+ """
79
+ Common SLURM configuration.
80
+
81
+ Note: this is a common and abstract class, which gets transformed into
82
+ more specific configuration objects during job execution.
83
+
84
+ Valid JSON example
85
+ ```JSON
86
+ {
87
+ "default_slurm_config": {
88
+ "partition": "main",
89
+ "cpus_per_task": 1
90
+ },
91
+ "gpu_slurm_config": {
92
+ "partition": "gpu",
93
+ "extra_lines": ["#SBATCH --gres=gpu:v100:1"]
94
+ },
95
+ "batching_config": {
96
+ "target_cpus_per_job": 1,
97
+ "max_cpus_per_job": 1,
98
+ "target_mem_per_job": 200,
99
+ "max_mem_per_job": 500,
100
+ "target_num_jobs": 2,
101
+ "max_num_jobs": 4
102
+ },
103
+ "user_local_exports": {
104
+ "CELLPOSE_LOCAL_MODELS_PATH": "CELLPOSE_LOCAL_MODELS_PATH",
105
+ "NAPARI_CONFIG": "napari_config.json"
106
+ }
107
+ }
108
+ ```
109
+
110
+ Attributes:
111
+ default_slurm_config:
112
+ Common default options for all tasks.
113
+ gpu_slurm_config:
114
+ Default configuration for all GPU tasks.
115
+ batching_config:
116
+ Configuration of the batching strategy.
117
+ user_local_exports:
118
+ Key-value pairs to be included as `export`-ed variables in SLURM
119
+ submission script, after prepending values with the user's cache
120
+ directory.
121
+ """
122
+
123
+ model_config = ConfigDict(extra="forbid")
124
+
125
+ default_slurm_config: _SlurmConfigSet
126
+ gpu_slurm_config: _SlurmConfigSet | None = None
127
+ batching_config: _BatchingConfigSet
128
+ user_local_exports: DictStrStr | None = None
@@ -0,0 +1,63 @@
1
+ from fractal_server.logger import set_logger
2
+ from fractal_server.runner.exceptions import SlurmConfigError
3
+
4
+
5
+ logger = set_logger(__name__)
6
+
7
+
8
+ def slurm_mem_to_MB(raw_mem: str | int) -> int:
9
+ """
10
+ Convert a memory-specification string into an integer (in MB units), or
11
+ simply return the input if it is already an integer.
12
+
13
+ Supported units are `"M", "G", "T"`, with `"M"` being the default; some
14
+ parsing examples are: `"10M" -> 10000`, `"3G" -> 3000000`.
15
+
16
+ Args:
17
+ raw_mem:
18
+ A string (e.g. `"100M"`) or an integer (in MB).
19
+
20
+ Returns:
21
+ Integer value of memory in MB units.
22
+ """
23
+
24
+ info = f"[_parse_mem_value] {raw_mem=}"
25
+ error_msg = (
26
+ f"{info}, invalid specification of memory requirements "
27
+ "(valid examples: 93, 71M, 93G, 71T)."
28
+ )
29
+
30
+ # Handle integer argument
31
+ if type(raw_mem) is int:
32
+ return raw_mem
33
+
34
+ # Handle string argument
35
+ if not raw_mem[0].isdigit(): # fail e.g. for raw_mem="M100"
36
+ logger.error(error_msg)
37
+ raise SlurmConfigError(error_msg)
38
+ if raw_mem.isdigit():
39
+ mem_MB = int(raw_mem)
40
+ elif raw_mem.endswith("M"):
41
+ stripped_raw_mem = raw_mem.strip("M")
42
+ if not stripped_raw_mem.isdigit():
43
+ logger.error(error_msg)
44
+ raise SlurmConfigError(error_msg)
45
+ mem_MB = int(stripped_raw_mem)
46
+ elif raw_mem.endswith("G"):
47
+ stripped_raw_mem = raw_mem.strip("G")
48
+ if not stripped_raw_mem.isdigit():
49
+ logger.error(error_msg)
50
+ raise SlurmConfigError(error_msg)
51
+ mem_MB = int(stripped_raw_mem) * 10**3
52
+ elif raw_mem.endswith("T"):
53
+ stripped_raw_mem = raw_mem.strip("T")
54
+ if not stripped_raw_mem.isdigit():
55
+ logger.error(error_msg)
56
+ raise SlurmConfigError(error_msg)
57
+ mem_MB = int(stripped_raw_mem) * 10**6
58
+ else:
59
+ logger.error(error_msg)
60
+ raise SlurmConfigError(error_msg)
61
+
62
+ logger.debug(f"{info}, return {mem_MB}")
63
+ return mem_MB
@@ -63,3 +63,7 @@ class JobExecutionError(RuntimeError):
63
63
  content = str(self)
64
64
  message = f"JobExecutionError\n{content}"
65
65
  return message
66
+
67
+
68
+ class SlurmConfigError(ValueError):
69
+ pass
@@ -3,6 +3,8 @@ from typing import Any
3
3
 
4
4
  from fractal_server.app.schemas.v2.task import TaskType
5
5
  from fractal_server.logger import set_logger
6
+ from fractal_server.runner.config import JobRunnerConfigLocal
7
+ from fractal_server.runner.config import JobRunnerConfigSLURM
6
8
  from fractal_server.runner.task_files import TaskFiles
7
9
 
8
10
 
@@ -39,21 +41,24 @@ class BaseRunner:
39
41
  Base class for Fractal runners.
40
42
  """
41
43
 
44
+ shared_config: JobRunnerConfigLocal | JobRunnerConfigSLURM
45
+
42
46
  executor_error_log: str | None = None
43
47
 
44
48
  def submit(
45
49
  self,
50
+ *,
46
51
  base_command: str,
47
52
  workflow_task_order: int,
48
53
  workflow_task_id: int,
49
54
  task_name: str,
50
55
  parameters: dict[str, Any],
51
56
  history_unit_id: int,
52
- task_type: TaskType,
57
+ task_type: SubmitTaskType,
53
58
  task_files: TaskFiles,
54
- config: Any,
55
59
  user_id: int,
56
- ) -> tuple[Any, BaseException]:
60
+ config: Any,
61
+ ) -> tuple[Any, BaseException | None]:
57
62
  """
58
63
  Run a single fractal task.
59
64
 
@@ -74,20 +79,25 @@ class BaseRunner:
74
79
 
75
80
  def multisubmit(
76
81
  self,
82
+ *,
77
83
  base_command: str,
78
84
  workflow_task_order: int,
79
85
  workflow_task_id: int,
80
86
  task_name: str,
81
- list_parameters: list[dict[str, Any]],
87
+ list_parameters: list[dict],
82
88
  history_unit_ids: list[int],
83
89
  list_task_files: list[TaskFiles],
84
- task_type: TaskType,
90
+ task_type: MultisubmitTaskType,
85
91
  config: Any,
86
92
  user_id: int,
87
93
  ) -> tuple[dict[int, Any], dict[int, BaseException]]:
88
94
  """
89
95
  Run a parallel fractal task.
90
96
 
97
+ Note: `list_parameters`, `list_task_files` and `history_unit_ids`
98
+ have the same size. For parallel tasks, this is also the number of
99
+ input images, while for compound tasks these can differ.
100
+
91
101
  Args:
92
102
  base_command:
93
103
  workflow_task_order:
@@ -108,7 +118,7 @@ class BaseRunner:
108
118
  def validate_submit_parameters(
109
119
  self,
110
120
  parameters: dict[str, Any],
111
- task_type: TaskType,
121
+ task_type: SubmitTaskType | MultisubmitTaskType,
112
122
  ) -> None:
113
123
  """
114
124
  Validate parameters for `submit` method
@@ -143,7 +153,7 @@ class BaseRunner:
143
153
  def validate_multisubmit_parameters(
144
154
  self,
145
155
  *,
146
- task_type: TaskType,
156
+ task_type: MultisubmitTaskType,
147
157
  list_parameters: list[dict[str, Any]],
148
158
  list_task_files: list[TaskFiles],
149
159
  history_unit_ids: list[int],
@@ -1,116 +1,51 @@
1
- # Copyright 2022 (C) Friedrich Miescher Institute for Biomedical Research and
2
- # University of Zurich
3
- #
4
- # Original authors:
5
- # Tommaso Comparin <tommaso.comparin@exact-lab.it>
6
- #
7
- # This file is part of Fractal and was originally developed by eXact lab S.r.l.
8
- # <exact-lab.it> under contract with Liberali Lab from the Friedrich Miescher
9
- # Institute for Biomedical Research and Pelkmans Lab from the University of
10
- # Zurich.
11
1
  """
12
2
  Submodule to handle the local-backend configuration for a WorkflowTask
13
3
  """
14
- import json
15
- from pathlib import Path
16
4
  from typing import Literal
17
5
 
18
- from pydantic import BaseModel
19
- from pydantic import ConfigDict
20
- from pydantic import ValidationError
21
-
22
6
  from fractal_server.app.models.v2 import WorkflowTaskV2
23
- from fractal_server.config import get_settings
24
- from fractal_server.syringe import Inject
25
-
26
-
27
- class LocalBackendConfigError(ValueError):
28
- """
29
- Local-backend configuration error
30
- """
31
-
32
- pass
33
-
34
-
35
- class LocalBackendConfig(BaseModel):
36
- """
37
- Specifications of the local-backend configuration
38
-
39
- Attributes:
40
- parallel_tasks_per_job:
41
- Maximum number of tasks to be run in parallel as part of a call to
42
- `FractalThreadPoolExecutor.map`; if `None`, then all tasks will
43
- start at the same time.
44
- """
45
-
46
- model_config = ConfigDict(extra="forbid")
47
- parallel_tasks_per_job: int | None = None
48
-
49
- @property
50
- def batch_size(self) -> int:
51
- return self.parallel_tasks_per_job or 1
7
+ from fractal_server.runner.config import JobRunnerConfigLocal
52
8
 
53
9
 
54
10
  def get_local_backend_config(
11
+ shared_config: JobRunnerConfigLocal,
55
12
  wftask: WorkflowTaskV2,
56
13
  which_type: Literal["non_parallel", "parallel"],
57
- config_path: Path | None = None,
58
14
  tot_tasks: int = 1,
59
- ) -> LocalBackendConfig:
15
+ ) -> JobRunnerConfigLocal:
60
16
  """
61
- Prepare a `LocalBackendConfig` configuration object
17
+ Prepare a specific `LocalBackendConfig` configuration.
62
18
 
63
- The sources for `parallel_tasks_per_job` attributes, starting from the
64
- highest-priority one, are
19
+ The base configuration is the runner-level `shared_config` object, based
20
+ on `resource.jobs_runner_config`. We then incorporate attributes from
21
+ `wftask.meta_{non_parallel,parallel}` - with higher priority.
65
22
 
66
- 1. Properties in `wftask.meta_parallel` or `wftask.meta_non_parallel`
67
- (depending on `which_type`);
68
- 2. The general content of the local-backend configuration file;
69
- 3. The default value (`None`).
70
-
71
- Arguments:
23
+ Args:
24
+ shared_config:
25
+ Configuration object based on `resource.jobs_runner_config`.
72
26
  wftask:
73
27
  WorkflowTaskV2 for which the backend configuration should
74
28
  be prepared.
75
- config_path:
76
- Path of local-backend configuration file; if `None`, use
77
- `FRACTAL_LOCAL_CONFIG_FILE` variable from settings.
29
+ which_type:
30
+ Whether we should look at the non-parallel or parallel part
31
+ of `wftask`.
32
+ tot_tasks: Not used here, only present as a common interface.
78
33
 
79
34
  Returns:
80
- A local-backend configuration object
35
+ A ready-to-use local-backend configuration object.
81
36
  """
82
37
 
83
- key = "parallel_tasks_per_job"
84
- default_value = None
85
-
86
38
  if which_type == "non_parallel":
87
39
  wftask_meta = wftask.meta_non_parallel
88
40
  elif which_type == "parallel":
89
41
  wftask_meta = wftask.meta_parallel
90
42
  else:
91
43
  raise ValueError(
92
- "`get_local_backend_config` received an invalid argument"
93
- f" {which_type=}."
44
+ f"Invalid {which_type=} in `get_local_backend_config`."
94
45
  )
95
46
 
96
- if wftask_meta and key in wftask_meta:
97
- parallel_tasks_per_job = wftask_meta[key]
98
- else:
99
- if not config_path:
100
- settings = Inject(get_settings)
101
- config_path = settings.FRACTAL_LOCAL_CONFIG_FILE
102
- if config_path is None:
103
- parallel_tasks_per_job = default_value
104
- else:
105
- with config_path.open("r") as f:
106
- env = json.load(f)
107
- try:
108
- _ = LocalBackendConfig(**env)
109
- except ValidationError as e:
110
- raise LocalBackendConfigError(
111
- f"Error while loading {config_path=}. "
112
- f"Original error:\n{str(e)}"
113
- )
114
-
115
- parallel_tasks_per_job = env.get(key, default_value)
116
- return LocalBackendConfig(parallel_tasks_per_job=parallel_tasks_per_job)
47
+ __KEY__ = "parallel_tasks_per_job"
48
+ output = shared_config.model_copy(deep=True)
49
+ if wftask_meta and __KEY__ in wftask_meta:
50
+ output.parallel_tasks_per_job = wftask_meta[__KEY__]
51
+ return output
@@ -5,11 +5,13 @@ from pathlib import Path
5
5
  from typing import Any
6
6
 
7
7
  from ..call_command_wrapper import call_command_wrapper
8
- from .get_local_config import LocalBackendConfig
9
8
  from fractal_server.app.db import get_sync_db
9
+ from fractal_server.app.models import Profile
10
+ from fractal_server.app.models import Resource
10
11
  from fractal_server.app.schemas.v2 import HistoryUnitStatus
11
12
  from fractal_server.app.schemas.v2 import TaskType
12
13
  from fractal_server.logger import set_logger
14
+ from fractal_server.runner.config import JobRunnerConfigLocal
13
15
  from fractal_server.runner.exceptions import TaskExecutionError
14
16
  from fractal_server.runner.executors.base_runner import BaseRunner
15
17
  from fractal_server.runner.executors.base_runner import MultisubmitTaskType
@@ -56,15 +58,21 @@ def run_single_task(
56
58
  class LocalRunner(BaseRunner):
57
59
  executor: ThreadPoolExecutor
58
60
  root_dir_local: Path
61
+ shared_config: JobRunnerConfigLocal
59
62
 
60
63
  def __init__(
61
64
  self,
62
65
  root_dir_local: Path,
66
+ resource: Resource,
67
+ profile: Profile,
63
68
  ):
64
69
  self.root_dir_local = root_dir_local
65
70
  self.root_dir_local.mkdir(parents=True, exist_ok=True)
66
71
  self.executor = ThreadPoolExecutor()
67
72
  logger.debug("Create LocalRunner")
73
+ self.shared_config = JobRunnerConfigLocal(
74
+ **resource.jobs_runner_config
75
+ )
68
76
 
69
77
  def __enter__(self):
70
78
  logger.debug("Enter LocalRunner")
@@ -80,6 +88,7 @@ class LocalRunner(BaseRunner):
80
88
 
81
89
  def submit(
82
90
  self,
91
+ *,
83
92
  base_command: str,
84
93
  workflow_task_order: int,
85
94
  workflow_task_id: int,
@@ -87,10 +96,26 @@ class LocalRunner(BaseRunner):
87
96
  parameters: dict[str, Any],
88
97
  history_unit_id: int,
89
98
  task_files: TaskFiles,
90
- config: LocalBackendConfig,
99
+ config: JobRunnerConfigLocal,
91
100
  task_type: SubmitTaskType,
92
101
  user_id: int,
93
- ) -> tuple[Any, Exception]:
102
+ ) -> tuple[Any, Exception | None]:
103
+ """
104
+ Run a single fractal task.
105
+
106
+ Args:
107
+ base_command:
108
+ workflow_task_order:
109
+ workflow_task_id:
110
+ task_name:
111
+ parameters: Dictionary of parameters.
112
+ history_unit_id:
113
+ Database ID of the corresponding `HistoryUnit` entry.
114
+ task_type: Task type.
115
+ task_files: `TaskFiles` object.
116
+ config: Runner-specific parameters.
117
+ user_id:
118
+ """
94
119
  logger.debug("[submit] START")
95
120
 
96
121
  try:
@@ -146,21 +171,39 @@ class LocalRunner(BaseRunner):
146
171
 
147
172
  def multisubmit(
148
173
  self,
174
+ *,
149
175
  base_command: str,
150
176
  workflow_task_order: int,
151
177
  workflow_task_id: int,
152
178
  task_name: str,
153
- list_parameters: list[dict],
179
+ list_parameters: list[dict[str, Any]],
154
180
  history_unit_ids: list[int],
155
181
  list_task_files: list[TaskFiles],
156
182
  task_type: MultisubmitTaskType,
157
- config: LocalBackendConfig,
183
+ config: JobRunnerConfigLocal,
158
184
  user_id: int,
159
185
  ) -> tuple[dict[int, Any], dict[int, BaseException]]:
160
186
  """
187
+ Run a parallel fractal task.
188
+
161
189
  Note: `list_parameters`, `list_task_files` and `history_unit_ids`
162
190
  have the same size. For parallel tasks, this is also the number of
163
191
  input images, while for compound tasks these can differ.
192
+
193
+ Args:
194
+ base_command:
195
+ workflow_task_order:
196
+ workflow_task_id:
197
+ task_name:
198
+ list_parameters:
199
+ List of dictionaries of parameters (each one must include
200
+ `zarr_urls` key).
201
+ history_unit_ids:
202
+ Database IDs of the corresponding `HistoryUnit` entries.
203
+ list_task_files: `TaskFiles` objects.
204
+ task_type: Task type.
205
+ config: Runner-specific parameters.
206
+ user_id:
164
207
  """
165
208
 
166
209
  logger.debug(f"[multisubmit] START, {len(list_parameters)=}")
@@ -36,7 +36,7 @@ def _estimate_parallel_tasks_per_job(
36
36
  assumes that further checks will be performed on the output of the current
37
37
  function, as is the case in the `heuristics` function below.
38
38
 
39
- Arguments:
39
+ Args:
40
40
  cpus_per_task: Number of CPUs needed for one task.
41
41
  mem_per_task: Memory (in MB) needed for one task.
42
42
  max_cpus_per_job: Maximum number of CPUs available for one job.
@@ -91,7 +91,7 @@ def heuristics(
91
91
  introduce in-job queues to satisfy the hard constraint on the maximum
92
92
  number of jobs.
93
93
 
94
- Arguments:
94
+ Args:
95
95
  tot_tasks:
96
96
  Total number of elements to be processed (e.g. number of images in
97
97
  a OME-NGFF array).