fractal-server 2.3.4__py3-none-any.whl → 2.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_server/__init__.py +1 -1
- fractal_server/app/models/v2/task.py +0 -52
- fractal_server/app/routes/api/v2/_aux_functions.py +4 -6
- fractal_server/app/routes/api/v2/workflowtask.py +6 -23
- fractal_server/app/runner/executors/slurm/_slurm_config.py +16 -1
- fractal_server/app/runner/executors/slurm/ssh/executor.py +19 -4
- fractal_server/app/runner/executors/slurm/sudo/executor.py +6 -0
- fractal_server/app/runner/v2/__init__.py +1 -1
- fractal_server/app/runner/v2/_slurm_common/__init__.py +0 -0
- fractal_server/app/runner/v2/{_slurm_ssh → _slurm_common}/get_slurm_config.py +4 -13
- fractal_server/app/runner/v2/_slurm_ssh/_submit_setup.py +3 -3
- fractal_server/app/runner/v2/{_slurm → _slurm_sudo}/_submit_setup.py +3 -3
- fractal_server/ssh/_fabric.py +22 -0
- {fractal_server-2.3.4.dist-info → fractal_server-2.3.6.dist-info}/METADATA +2 -2
- {fractal_server-2.3.4.dist-info → fractal_server-2.3.6.dist-info}/RECORD +19 -19
- fractal_server/app/runner/v2/_slurm/get_slurm_config.py +0 -182
- /fractal_server/app/runner/v2/{_slurm → _slurm_sudo}/__init__.py +0 -0
- {fractal_server-2.3.4.dist-info → fractal_server-2.3.6.dist-info}/LICENSE +0 -0
- {fractal_server-2.3.4.dist-info → fractal_server-2.3.6.dist-info}/WHEEL +0 -0
- {fractal_server-2.3.4.dist-info → fractal_server-2.3.6.dist-info}/entry_points.txt +0 -0
fractal_server/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__VERSION__ = "2.3.
|
1
|
+
__VERSION__ = "2.3.6"
|
@@ -1,5 +1,3 @@
|
|
1
|
-
import json
|
2
|
-
import logging
|
3
1
|
from typing import Any
|
4
2
|
from typing import Optional
|
5
3
|
|
@@ -41,53 +39,3 @@ class TaskV2(SQLModel, table=True):
|
|
41
39
|
|
42
40
|
input_types: dict[str, bool] = Field(sa_column=Column(JSON), default={})
|
43
41
|
output_types: dict[str, bool] = Field(sa_column=Column(JSON), default={})
|
44
|
-
|
45
|
-
@property
|
46
|
-
def default_args_non_parallel_from_args_schema(self) -> dict[str, Any]:
|
47
|
-
"""
|
48
|
-
Extract default arguments from args_schema
|
49
|
-
"""
|
50
|
-
# Return {} if there is no args_schema
|
51
|
-
if self.args_schema_non_parallel is None:
|
52
|
-
return {}
|
53
|
-
# Try to construct default_args
|
54
|
-
try:
|
55
|
-
default_args = {}
|
56
|
-
properties = self.args_schema_non_parallel["properties"]
|
57
|
-
for prop_name, prop_schema in properties.items():
|
58
|
-
default_value = prop_schema.get("default", None)
|
59
|
-
if default_value is not None:
|
60
|
-
default_args[prop_name] = default_value
|
61
|
-
return default_args
|
62
|
-
except KeyError as e:
|
63
|
-
logging.warning(
|
64
|
-
"Cannot set default_args from args_schema_non_parallel="
|
65
|
-
f"{json.dumps(self.args_schema_non_parallel)}\n"
|
66
|
-
f"Original KeyError: {str(e)}"
|
67
|
-
)
|
68
|
-
return {}
|
69
|
-
|
70
|
-
@property
|
71
|
-
def default_args_parallel_from_args_schema(self) -> dict[str, Any]:
|
72
|
-
"""
|
73
|
-
Extract default arguments from args_schema
|
74
|
-
"""
|
75
|
-
# Return {} if there is no args_schema
|
76
|
-
if self.args_schema_parallel is None:
|
77
|
-
return {}
|
78
|
-
# Try to construct default_args
|
79
|
-
try:
|
80
|
-
default_args = {}
|
81
|
-
properties = self.args_schema_parallel["properties"]
|
82
|
-
for prop_name, prop_schema in properties.items():
|
83
|
-
default_value = prop_schema.get("default", None)
|
84
|
-
if default_value is not None:
|
85
|
-
default_args[prop_name] = default_value
|
86
|
-
return default_args
|
87
|
-
except KeyError as e:
|
88
|
-
logging.warning(
|
89
|
-
"Cannot set default_args from args_schema_parallel="
|
90
|
-
f"{json.dumps(self.args_schema_parallel)}\n"
|
91
|
-
f"Original KeyError: {str(e)}"
|
92
|
-
)
|
93
|
-
return {}
|
@@ -422,6 +422,8 @@ async def _workflow_insert_task(
|
|
422
422
|
|
423
423
|
# Get task from db, and extract default arguments via a Task property
|
424
424
|
# method
|
425
|
+
# NOTE: this logic remains there for V1 tasks only. When we deprecate V1
|
426
|
+
# tasks, we can simplify this block
|
425
427
|
if is_legacy_task is True:
|
426
428
|
db_task = await db.get(Task, task_id)
|
427
429
|
if db_task is None:
|
@@ -439,12 +441,8 @@ async def _workflow_insert_task(
|
|
439
441
|
raise ValueError(f"TaskV2 {task_id} not found.")
|
440
442
|
task_type = db_task.type
|
441
443
|
|
442
|
-
final_args_non_parallel =
|
443
|
-
|
444
|
-
)
|
445
|
-
final_args_parallel = (
|
446
|
-
db_task.default_args_parallel_from_args_schema.copy()
|
447
|
-
)
|
444
|
+
final_args_non_parallel = {}
|
445
|
+
final_args_parallel = {}
|
448
446
|
final_meta_parallel = (db_task.meta_parallel or {}).copy()
|
449
447
|
final_meta_non_parallel = (db_task.meta_non_parallel or {}).copy()
|
450
448
|
|
@@ -186,34 +186,17 @@ async def update_workflowtask(
|
|
186
186
|
default_args = (
|
187
187
|
db_wf_task.task_legacy.default_args_from_args_schema
|
188
188
|
)
|
189
|
+
actual_args = deepcopy(default_args)
|
190
|
+
if value is not None:
|
191
|
+
for k, v in value.items():
|
192
|
+
actual_args[k] = v
|
189
193
|
else:
|
190
|
-
|
191
|
-
db_wf_task.task.default_args_parallel_from_args_schema
|
192
|
-
)
|
193
|
-
# Override default_args with args value items
|
194
|
-
actual_args = deepcopy(default_args)
|
195
|
-
if value is not None:
|
196
|
-
for k, v in value.items():
|
197
|
-
actual_args[k] = v
|
194
|
+
actual_args = deepcopy(value)
|
198
195
|
if not actual_args:
|
199
196
|
actual_args = None
|
200
197
|
setattr(db_wf_task, key, actual_args)
|
201
198
|
elif key == "args_non_parallel":
|
202
|
-
|
203
|
-
if db_wf_task.is_legacy_task:
|
204
|
-
# This is only needed so that we don't have to modify the rest
|
205
|
-
# of this block, but legacy task cannot take any non-parallel
|
206
|
-
# args (see checks above).
|
207
|
-
default_args = {}
|
208
|
-
else:
|
209
|
-
default_args = deepcopy(
|
210
|
-
db_wf_task.task.default_args_non_parallel_from_args_schema
|
211
|
-
)
|
212
|
-
# Override default_args with args value items
|
213
|
-
actual_args = default_args.copy()
|
214
|
-
if value is not None:
|
215
|
-
for k, v in value.items():
|
216
|
-
actual_args[k] = v
|
199
|
+
actual_args = deepcopy(value)
|
217
200
|
if not actual_args:
|
218
201
|
actual_args = None
|
219
202
|
setattr(db_wf_task, key, actual_args)
|
@@ -62,6 +62,8 @@ class _SlurmConfigSet(BaseModel, extra=Extra.forbid):
|
|
62
62
|
time: Optional[str]
|
63
63
|
account: Optional[str]
|
64
64
|
extra_lines: Optional[list[str]]
|
65
|
+
pre_submission_commands: Optional[list[str]]
|
66
|
+
gpus: Optional[str]
|
65
67
|
|
66
68
|
|
67
69
|
class _BatchingConfigSet(BaseModel, extra=Extra.forbid):
|
@@ -219,6 +221,7 @@ class SlurmConfig(BaseModel, extra=Extra.forbid):
|
|
219
221
|
constraint: Corresponds to SLURM option.
|
220
222
|
gres: Corresponds to SLURM option.
|
221
223
|
account: Corresponds to SLURM option.
|
224
|
+
gpus: Corresponds to SLURM option.
|
222
225
|
time: Corresponds to SLURM option (WARNING: not fully supported).
|
223
226
|
prefix: Prefix of configuration lines in SLURM submission scripts.
|
224
227
|
shebang_line: Shebang line for SLURM submission scripts.
|
@@ -240,6 +243,8 @@ class SlurmConfig(BaseModel, extra=Extra.forbid):
|
|
240
243
|
Key-value pairs to be included as `export`-ed variables in SLURM
|
241
244
|
submission script, after prepending values with the user's cache
|
242
245
|
directory.
|
246
|
+
pre_submission_commands: List of commands to be prepended to the sbatch
|
247
|
+
command.
|
243
248
|
"""
|
244
249
|
|
245
250
|
# Required SLURM parameters (note that the integer attributes are those
|
@@ -254,6 +259,7 @@ class SlurmConfig(BaseModel, extra=Extra.forbid):
|
|
254
259
|
job_name: Optional[str] = None
|
255
260
|
constraint: Optional[str] = None
|
256
261
|
gres: Optional[str] = None
|
262
|
+
gpus: Optional[str] = None
|
257
263
|
time: Optional[str] = None
|
258
264
|
account: Optional[str] = None
|
259
265
|
|
@@ -274,6 +280,8 @@ class SlurmConfig(BaseModel, extra=Extra.forbid):
|
|
274
280
|
target_num_jobs: int
|
275
281
|
max_num_jobs: int
|
276
282
|
|
283
|
+
pre_submission_commands: list[str] = Field(default_factory=list)
|
284
|
+
|
277
285
|
def _sorted_extra_lines(self) -> list[str]:
|
278
286
|
"""
|
279
287
|
Return a copy of `self.extra_lines`, where lines starting with
|
@@ -340,7 +348,14 @@ class SlurmConfig(BaseModel, extra=Extra.forbid):
|
|
340
348
|
f"{self.prefix} --cpus-per-task={self.cpus_per_task}",
|
341
349
|
f"{self.prefix} --mem={mem_per_job_MB}M",
|
342
350
|
]
|
343
|
-
for key in [
|
351
|
+
for key in [
|
352
|
+
"job_name",
|
353
|
+
"constraint",
|
354
|
+
"gres",
|
355
|
+
"gpus",
|
356
|
+
"time",
|
357
|
+
"account",
|
358
|
+
]:
|
344
359
|
value = getattr(self, key)
|
345
360
|
if value is not None:
|
346
361
|
# Handle the `time` parameter
|
@@ -869,9 +869,22 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
|
|
869
869
|
|
870
870
|
# Submit job to SLURM, and get jobid
|
871
871
|
sbatch_command = f"sbatch --parsable {job.slurm_script_remote}"
|
872
|
-
|
873
|
-
|
874
|
-
|
872
|
+
pre_submission_cmds = job.slurm_config.pre_submission_commands
|
873
|
+
if len(pre_submission_cmds) == 0:
|
874
|
+
sbatch_stdout = self.fractal_ssh.run_command(cmd=sbatch_command)
|
875
|
+
else:
|
876
|
+
logger.debug(f"Now using {pre_submission_cmds=}")
|
877
|
+
script_lines = pre_submission_cmds + [sbatch_command]
|
878
|
+
script_content = "\n".join(script_lines)
|
879
|
+
script_content = f"{script_content}\n"
|
880
|
+
script_path_remote = (
|
881
|
+
f"{job.slurm_script_remote.as_posix()}_wrapper.sh"
|
882
|
+
)
|
883
|
+
self.fractal_ssh.write_remote_file(
|
884
|
+
path=script_path_remote, content=script_content
|
885
|
+
)
|
886
|
+
cmd = f"bash {script_path_remote}"
|
887
|
+
sbatch_stdout = self.fractal_ssh.run_command(cmd=cmd)
|
875
888
|
|
876
889
|
# Extract SLURM job ID from stdout
|
877
890
|
try:
|
@@ -881,7 +894,9 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
|
|
881
894
|
error_msg = (
|
882
895
|
f"Submit command `{sbatch_command}` returned "
|
883
896
|
f"`{stdout=}` which cannot be cast to an integer "
|
884
|
-
f"SLURM-job ID
|
897
|
+
f"SLURM-job ID.\n"
|
898
|
+
f"Note that {pre_submission_cmds=}.\n"
|
899
|
+
f"Original error:\n{str(e)}"
|
885
900
|
)
|
886
901
|
logger.error(error_msg)
|
887
902
|
raise JobExecutionError(info=error_msg)
|
@@ -1121,6 +1121,12 @@ class FractalSlurmExecutor(SlurmExecutor):
|
|
1121
1121
|
slurm_err_path=str(job.slurm_stderr),
|
1122
1122
|
)
|
1123
1123
|
|
1124
|
+
# Print warning for ignored parameter
|
1125
|
+
if len(job.slurm_config.pre_submission_commands) > 0:
|
1126
|
+
logger.warning(
|
1127
|
+
f"Ignoring {job.slurm_config.pre_submission_commands=}."
|
1128
|
+
)
|
1129
|
+
|
1124
1130
|
# Submit job via sbatch, and retrieve jobid
|
1125
1131
|
|
1126
1132
|
# Write script content to a job.slurm_script
|
@@ -36,8 +36,8 @@ from ._local import process_workflow as local_process_workflow
|
|
36
36
|
from ._local_experimental import (
|
37
37
|
process_workflow as local_experimental_process_workflow,
|
38
38
|
)
|
39
|
-
from ._slurm import process_workflow as slurm_sudo_process_workflow
|
40
39
|
from ._slurm_ssh import process_workflow as slurm_ssh_process_workflow
|
40
|
+
from ._slurm_sudo import process_workflow as slurm_sudo_process_workflow
|
41
41
|
from .handle_failed_job import assemble_filters_failed_job
|
42
42
|
from .handle_failed_job import assemble_history_failed_job
|
43
43
|
from .handle_failed_job import assemble_images_failed_job
|
File without changes
|
@@ -18,8 +18,6 @@ from fractal_server.app.runner.executors.slurm._slurm_config import (
|
|
18
18
|
|
19
19
|
def get_slurm_config(
|
20
20
|
wftask: WorkflowTaskV2,
|
21
|
-
workflow_dir_local: Path,
|
22
|
-
workflow_dir_remote: Path,
|
23
21
|
which_type: Literal["non_parallel", "parallel"],
|
24
22
|
config_path: Optional[Path] = None,
|
25
23
|
) -> SlurmConfig:
|
@@ -43,13 +41,6 @@ def get_slurm_config(
|
|
43
41
|
wftask:
|
44
42
|
WorkflowTask for which the SLURM configuration is is to be
|
45
43
|
prepared.
|
46
|
-
workflow_dir_local:
|
47
|
-
Server-owned directory to store all task-execution-related relevant
|
48
|
-
files (inputs, outputs, errors, and all meta files related to the
|
49
|
-
job execution). Note: users cannot write directly to this folder.
|
50
|
-
workflow_dir_remote:
|
51
|
-
User-side directory with the same scope as `workflow_dir_local`,
|
52
|
-
and where a user can write.
|
53
44
|
config_path:
|
54
45
|
Path of a Fractal SLURM configuration file; if `None`, use
|
55
46
|
`FRACTAL_SLURM_CONFIG_FILE` variable from settings.
|
@@ -99,13 +90,13 @@ def get_slurm_config(
|
|
99
90
|
# 1. This block of definitions takes priority over other definitions from
|
100
91
|
# slurm_env which are not under the `needs_gpu` subgroup
|
101
92
|
# 2. This block of definitions has lower priority than whatever comes next
|
102
|
-
# (i.e. from WorkflowTask.
|
93
|
+
# (i.e. from WorkflowTask.meta_parallel).
|
103
94
|
if wftask_meta is not None:
|
104
95
|
needs_gpu = wftask_meta.get("needs_gpu", False)
|
105
96
|
else:
|
106
97
|
needs_gpu = False
|
107
98
|
logger.debug(f"[get_slurm_config] {needs_gpu=}")
|
108
|
-
if needs_gpu
|
99
|
+
if needs_gpu:
|
109
100
|
for key, value in slurm_env.gpu_slurm_config.dict(
|
110
101
|
exclude_unset=True, exclude={"mem"}
|
111
102
|
).items():
|
@@ -143,9 +134,9 @@ def get_slurm_config(
|
|
143
134
|
)
|
144
135
|
logger.error(error_msg)
|
145
136
|
raise SlurmConfigError(error_msg)
|
146
|
-
for key in ["time", "gres", "constraint"]:
|
137
|
+
for key in ["time", "gres", "gpus", "constraint"]:
|
147
138
|
value = wftask_meta.get(key, None)
|
148
|
-
if value:
|
139
|
+
if value is not None:
|
149
140
|
slurm_dict[key] = value
|
150
141
|
if wftask_meta is not None:
|
151
142
|
extra_lines = wftask_meta.get("extra_lines", [])
|
@@ -17,8 +17,10 @@ from pathlib import Path
|
|
17
17
|
from typing import Literal
|
18
18
|
|
19
19
|
from ...task_files import get_task_file_paths
|
20
|
-
from .get_slurm_config import get_slurm_config
|
21
20
|
from fractal_server.app.models.v2 import WorkflowTaskV2
|
21
|
+
from fractal_server.app.runner.v2._slurm_common.get_slurm_config import (
|
22
|
+
get_slurm_config,
|
23
|
+
)
|
22
24
|
|
23
25
|
|
24
26
|
def _slurm_submit_setup(
|
@@ -62,8 +64,6 @@ def _slurm_submit_setup(
|
|
62
64
|
# Get SlurmConfig object
|
63
65
|
slurm_config = get_slurm_config(
|
64
66
|
wftask=wftask,
|
65
|
-
workflow_dir_local=workflow_dir_local,
|
66
|
-
workflow_dir_remote=workflow_dir_remote,
|
67
67
|
which_type=which_type,
|
68
68
|
)
|
69
69
|
|
@@ -17,8 +17,10 @@ from pathlib import Path
|
|
17
17
|
from typing import Literal
|
18
18
|
|
19
19
|
from ...task_files import get_task_file_paths
|
20
|
-
from .get_slurm_config import get_slurm_config
|
21
20
|
from fractal_server.app.models.v2 import WorkflowTaskV2
|
21
|
+
from fractal_server.app.runner.v2._slurm_common.get_slurm_config import (
|
22
|
+
get_slurm_config,
|
23
|
+
)
|
22
24
|
|
23
25
|
|
24
26
|
def _slurm_submit_setup(
|
@@ -62,8 +64,6 @@ def _slurm_submit_setup(
|
|
62
64
|
# Get SlurmConfig object
|
63
65
|
slurm_config = get_slurm_config(
|
64
66
|
wftask=wftask,
|
65
|
-
workflow_dir_local=workflow_dir_local,
|
66
|
-
workflow_dir_remote=workflow_dir_remote,
|
67
67
|
which_type=which_type,
|
68
68
|
)
|
69
69
|
|
fractal_server/ssh/_fabric.py
CHANGED
@@ -306,6 +306,28 @@ class FractalSSH(object):
|
|
306
306
|
cmd = f"rm -r {folder}"
|
307
307
|
self.run_command(cmd=cmd)
|
308
308
|
|
309
|
+
def write_remote_file(
|
310
|
+
self,
|
311
|
+
*,
|
312
|
+
path: str,
|
313
|
+
content: str,
|
314
|
+
lock_timeout: Optional[float] = None,
|
315
|
+
) -> None:
|
316
|
+
"""
|
317
|
+
Open a remote file via SFTP and write it.
|
318
|
+
|
319
|
+
Args:
|
320
|
+
path: Absolute path
|
321
|
+
contents: File contents
|
322
|
+
lock_timeout:
|
323
|
+
"""
|
324
|
+
actual_lock_timeout = self.default_lock_timeout
|
325
|
+
if lock_timeout is not None:
|
326
|
+
actual_lock_timeout = lock_timeout
|
327
|
+
with self.acquire_timeout(timeout=actual_lock_timeout):
|
328
|
+
with self.sftp().open(filename=path, mode="w") as f:
|
329
|
+
f.write(content)
|
330
|
+
|
309
331
|
|
310
332
|
def get_ssh_connection(
|
311
333
|
*,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: fractal-server
|
3
|
-
Version: 2.3.
|
3
|
+
Version: 2.3.6
|
4
4
|
Summary: Server component of the Fractal analytics platform
|
5
5
|
Home-page: https://github.com/fractal-analytics-platform/fractal-server
|
6
6
|
License: BSD-3-Clause
|
@@ -33,7 +33,7 @@ Requires-Dist: psycopg[binary] (>=3.1.0,<4.0.0) ; extra == "postgres-psycopg-bin
|
|
33
33
|
Requires-Dist: pydantic (>=1.10.8,<2)
|
34
34
|
Requires-Dist: python-dotenv (>=1.0.0,<2.0.0)
|
35
35
|
Requires-Dist: sqlalchemy[asyncio] (>=2.0.23,<2.1)
|
36
|
-
Requires-Dist: sqlmodel (>=0.0.
|
36
|
+
Requires-Dist: sqlmodel (>=0.0.21,<0.0.22)
|
37
37
|
Requires-Dist: uvicorn (>=0.29.0,<0.30.0)
|
38
38
|
Project-URL: Changelog, https://github.com/fractal-analytics-platform/fractal-server/blob/main/CHANGELOG.md
|
39
39
|
Project-URL: Documentation, https://fractal-analytics-platform.github.io/fractal-server
|
@@ -1,4 +1,4 @@
|
|
1
|
-
fractal_server/__init__.py,sha256=
|
1
|
+
fractal_server/__init__.py,sha256=lCtPuZps1Hbf7xTBL9gN0BnZZFuDzV5N5mezQVbB9lA,22
|
2
2
|
fractal_server/__main__.py,sha256=CocbzZooX1UtGqPi55GcHGNxnrJXFg5tUU5b3wyFCyo,4958
|
3
3
|
fractal_server/alembic.ini,sha256=MWwi7GzjzawI9cCAK1LW7NxIBQDUqD12-ptJoq5JpP0,3153
|
4
4
|
fractal_server/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -18,7 +18,7 @@ fractal_server/app/models/v2/collection_state.py,sha256=nxb042i8tt8rCpmgbFJoBCYW
|
|
18
18
|
fractal_server/app/models/v2/dataset.py,sha256=-7sxHEw4IIAvF_uSan7tA3o8hvoakBkQ0SRvqS2iOQU,1455
|
19
19
|
fractal_server/app/models/v2/job.py,sha256=ypJmN-qspkKBGhBG7Mt-HypSQqcQ2EmB4Bzzb2-y550,1535
|
20
20
|
fractal_server/app/models/v2/project.py,sha256=CRBnZ8QITNp6u1f5bMxvi1_mcvEfXpWyitsWB5f7gn8,759
|
21
|
-
fractal_server/app/models/v2/task.py,sha256=
|
21
|
+
fractal_server/app/models/v2/task.py,sha256=Esf2j9c-0pGYjdbb__Ptpdx7NCAKVxqbQMoza524miU,1286
|
22
22
|
fractal_server/app/models/v2/workflow.py,sha256=YBgFGCziUgU0aJ5EM3Svu9W2c46AewZO9VBlFCHiSps,1069
|
23
23
|
fractal_server/app/models/v2/workflowtask.py,sha256=3jEkObsSnlI05Pur_dSsXYdJxRqPL60Z7tK5-EJLOks,1532
|
24
24
|
fractal_server/app/routes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -36,7 +36,7 @@ fractal_server/app/routes/api/v1/task_collection.py,sha256=82XBsJHlPiDPCbpLa-16o
|
|
36
36
|
fractal_server/app/routes/api/v1/workflow.py,sha256=7r9IoIevg_rvYCrerMOsIsUabSOQatxdPCfLdkP0dRs,10942
|
37
37
|
fractal_server/app/routes/api/v1/workflowtask.py,sha256=qcHQlzlSFf_k8gtId-mA3tnyzgSR7i1m7pvR4R86blE,5582
|
38
38
|
fractal_server/app/routes/api/v2/__init__.py,sha256=JrPWfKIJy9btRCP-zw2nZwLpSdBxEKY5emuCuJbqG0s,1813
|
39
|
-
fractal_server/app/routes/api/v2/_aux_functions.py,sha256=
|
39
|
+
fractal_server/app/routes/api/v2/_aux_functions.py,sha256=yeA0650pBk43M5ZQGpVQ17nH5D97NIGY-3tNNLQIW1M,14901
|
40
40
|
fractal_server/app/routes/api/v2/dataset.py,sha256=_HjKNP9XsMGoqyubGdF2ZyeW7vXC3VdK_0_TaUxgIF0,8248
|
41
41
|
fractal_server/app/routes/api/v2/images.py,sha256=4r_HblPWyuKSZSJZfn8mbDaLv1ncwZU0gWdKneZcNG4,7894
|
42
42
|
fractal_server/app/routes/api/v2/job.py,sha256=RkIj7ANK-nkxUvcG9K2r4dFdPnvGomx7jdB6U9bqOVQ,5202
|
@@ -48,7 +48,7 @@ fractal_server/app/routes/api/v2/task_collection.py,sha256=BiZ5s6DwdQbM79s_dPivg
|
|
48
48
|
fractal_server/app/routes/api/v2/task_collection_custom.py,sha256=cvZorN_Xt57Rj-2JATRrdtSw6I_5befB0ua3FWh6hW4,5988
|
49
49
|
fractal_server/app/routes/api/v2/task_legacy.py,sha256=P_VJv9v0yzFUBuS-DQHhMVSOe20ecGJJcFBqiiFciOM,1628
|
50
50
|
fractal_server/app/routes/api/v2/workflow.py,sha256=2GlcYNjpvCdjwC_Kn7y0UP16B3pOLSNXBvIVsVDtDKM,11863
|
51
|
-
fractal_server/app/routes/api/v2/workflowtask.py,sha256=
|
51
|
+
fractal_server/app/routes/api/v2/workflowtask.py,sha256=l_eQPniK1zR0u249bJj4c2hFlyDwsSJgsFR6hxJaOjs,8007
|
52
52
|
fractal_server/app/routes/auth.py,sha256=Xv80iqdyfY3lyicYs2Y8B6zEDEnyUu_H6_6psYtv3R4,4885
|
53
53
|
fractal_server/app/routes/aux/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
54
54
|
fractal_server/app/routes/aux/_job.py,sha256=HUItNm0SZFAYsyL1rXSjBre1-rXSe6x51qH9KAQWS1w,1361
|
@@ -62,17 +62,17 @@ fractal_server/app/runner/exceptions.py,sha256=_qZ_t8O4umAdJ1ikockiF5rDJuxnEskrG
|
|
62
62
|
fractal_server/app/runner/executors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
63
63
|
fractal_server/app/runner/executors/slurm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
64
64
|
fractal_server/app/runner/executors/slurm/_batching.py,sha256=3mfeFuYm3UA4EXh4VWuqZTF-dcINECZgTHoPOaOszDo,8840
|
65
|
-
fractal_server/app/runner/executors/slurm/_slurm_config.py,sha256=
|
65
|
+
fractal_server/app/runner/executors/slurm/_slurm_config.py,sha256=iyhtDi1qveqq7I4S1tycVKsp3VfyocvBgGugYDpOzAs,16069
|
66
66
|
fractal_server/app/runner/executors/slurm/remote.py,sha256=wLziIsGdSMiO-jIXM8x77JRK82g_2hx0iBKTiMghuIo,5852
|
67
67
|
fractal_server/app/runner/executors/slurm/ssh/__init__.py,sha256=Cjn1rYvljddi96tAwS-qqGkNfOcfPzjChdaEZEObCcM,65
|
68
68
|
fractal_server/app/runner/executors/slurm/ssh/_executor_wait_thread.py,sha256=jM4G-wiHynZhNERusVGLtDTepJDiYjCDloWZyflaMV0,3482
|
69
69
|
fractal_server/app/runner/executors/slurm/ssh/_slurm_job.py,sha256=rwlqZzoGo4SAb4nSlFjsQJdaCgfM1J6YGcjb8yYxlqc,4506
|
70
|
-
fractal_server/app/runner/executors/slurm/ssh/executor.py,sha256=
|
70
|
+
fractal_server/app/runner/executors/slurm/ssh/executor.py,sha256=rfLEO6mN3sZvZYHqs3lmYvPYFGLmXyMPWl1Bg0mq-6k,56109
|
71
71
|
fractal_server/app/runner/executors/slurm/sudo/__init__.py,sha256=Cjn1rYvljddi96tAwS-qqGkNfOcfPzjChdaEZEObCcM,65
|
72
72
|
fractal_server/app/runner/executors/slurm/sudo/_check_jobs_status.py,sha256=wAgwpVcr6JIslKHOuS0FhRa_6T1KCManyRJqA-fifzw,1909
|
73
73
|
fractal_server/app/runner/executors/slurm/sudo/_executor_wait_thread.py,sha256=z5LlhaiqAb8pHsF1WwdzXN39C5anQmwjo1rSQgtRAYE,4422
|
74
74
|
fractal_server/app/runner/executors/slurm/sudo/_subprocess_run_as_user.py,sha256=uZgmxP0ZneGpzTVt-GT-6EgNKUh1sW2-QH7LFYc1tNI,5132
|
75
|
-
fractal_server/app/runner/executors/slurm/sudo/executor.py,sha256=
|
75
|
+
fractal_server/app/runner/executors/slurm/sudo/executor.py,sha256=74jfNauDgQOdcILHcCLJM4Awm_SThsZYgc0Vwx0hnB8,48460
|
76
76
|
fractal_server/app/runner/extract_archive.py,sha256=tLpjDrX47OjTNhhoWvm6iNukg8KoieWyTb7ZfvE9eWU,2483
|
77
77
|
fractal_server/app/runner/filenames.py,sha256=9lwu3yB4C67yiijYw8XIKaLFn3mJUt6_TCyVFM_aZUQ,206
|
78
78
|
fractal_server/app/runner/run_subprocess.py,sha256=KTkJnWLrLQdR2WRJ3jGu0RBu4330L3mtCAE_B0wDx3M,818
|
@@ -90,7 +90,7 @@ fractal_server/app/runner/v1/_slurm/_submit_setup.py,sha256=KO9c694d318adoPQh9UG
|
|
90
90
|
fractal_server/app/runner/v1/_slurm/get_slurm_config.py,sha256=6pQNNx997bLIfLp0guF09t_O0ZYRXnbEGLktSAcKnic,5999
|
91
91
|
fractal_server/app/runner/v1/common.py,sha256=_L-vjLnWato80VdlB_BFN4G8P4jSM07u-5cnl1T3S34,3294
|
92
92
|
fractal_server/app/runner/v1/handle_failed_job.py,sha256=bHzScC_aIlU3q-bQxGW6rfWV4xbZ2tho_sktjsAs1no,4684
|
93
|
-
fractal_server/app/runner/v2/__init__.py,sha256=
|
93
|
+
fractal_server/app/runner/v2/__init__.py,sha256=nD4uFi-RGsN6JAmJNpV2dS603u8KqFuGwXZS8jIrf50,16917
|
94
94
|
fractal_server/app/runner/v2/_local/__init__.py,sha256=KTj14K6jH8fXGUi5P7u5_RqEE1zF4aXtgPxCKzw46iw,5971
|
95
95
|
fractal_server/app/runner/v2/_local/_local_config.py,sha256=9oi209Dlp35ANfxb_DISqmMKKc6DPaMsmYVWbZLseME,3630
|
96
96
|
fractal_server/app/runner/v2/_local/_submit_setup.py,sha256=MucNOo8Er0F5ZIwH7CnTeXgnFMc6d3pKPkv563QNVi0,1630
|
@@ -99,12 +99,12 @@ fractal_server/app/runner/v2/_local_experimental/__init__.py,sha256=53yS8a-l0dMT
|
|
99
99
|
fractal_server/app/runner/v2/_local_experimental/_local_config.py,sha256=QiS5ODe-iGmUQdIT8QgpbyMc7-ZpIRv1V_f2q3qfPQ8,3211
|
100
100
|
fractal_server/app/runner/v2/_local_experimental/_submit_setup.py,sha256=we7r-sQf0CJ9gxbfbgHcYdC6pKjx8eXweljIjthxkv8,1212
|
101
101
|
fractal_server/app/runner/v2/_local_experimental/executor.py,sha256=vcBKjireIIyF5WgIQLatD6ojlWEydbTwyIG0bcpIjys,5438
|
102
|
-
fractal_server/app/runner/v2/
|
103
|
-
fractal_server/app/runner/v2/
|
104
|
-
fractal_server/app/runner/v2/_slurm/get_slurm_config.py,sha256=btGmbZB0fO6bg2WujFxbGEV2iWzaMKbHgV1r2hm_4a0,6748
|
102
|
+
fractal_server/app/runner/v2/_slurm_common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
103
|
+
fractal_server/app/runner/v2/_slurm_common/get_slurm_config.py,sha256=V47uckqA4Vp-7m5esDnTitekc-yabLhaZSlPj4jN_D8,6307
|
105
104
|
fractal_server/app/runner/v2/_slurm_ssh/__init__.py,sha256=1p6d_ppXBqRNPXPGxM8cmIOffEsfkEPEfvDeT-_90dE,3990
|
106
|
-
fractal_server/app/runner/v2/_slurm_ssh/_submit_setup.py,sha256=
|
107
|
-
fractal_server/app/runner/v2/
|
105
|
+
fractal_server/app/runner/v2/_slurm_ssh/_submit_setup.py,sha256=a5_FDPH_yxYmrjAjMRLgh_Y4DSG3mRslCLQodGM3-t4,2838
|
106
|
+
fractal_server/app/runner/v2/_slurm_sudo/__init__.py,sha256=q2fwiKqtNpXtfs5wUFQjwJxdYqKPPTbCy1ieBhhi-Bw,4316
|
107
|
+
fractal_server/app/runner/v2/_slurm_sudo/_submit_setup.py,sha256=a5_FDPH_yxYmrjAjMRLgh_Y4DSG3mRslCLQodGM3-t4,2838
|
108
108
|
fractal_server/app/runner/v2/deduplicate_list.py,sha256=-imwO7OB7ATADEnqVbTElUwoY0YIJCTf_SbWJNN9OZg,639
|
109
109
|
fractal_server/app/runner/v2/handle_failed_job.py,sha256=M1r3dnrbUMo_AI2qjaVuGhieMAyLh5gcvB10YOBpjvI,5415
|
110
110
|
fractal_server/app/runner/v2/merge_outputs.py,sha256=IHuHqbKmk97K35BFvTrKVBs60z3e_--OzXTnsvmA02c,1281
|
@@ -168,7 +168,7 @@ fractal_server/migrations/versions/efa89c30e0a4_add_project_timestamp_created.py
|
|
168
168
|
fractal_server/migrations/versions/f384e1c0cf5d_drop_task_default_args_columns.py,sha256=9BwqUS9Gf7UW_KjrzHbtViC880qhD452KAytkHWWZyk,746
|
169
169
|
fractal_server/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
170
170
|
fractal_server/ssh/__init__.py,sha256=sVUmzxf7_DuXG1xoLQ1_00fo5NPhi2LJipSmU5EAkPs,124
|
171
|
-
fractal_server/ssh/_fabric.py,sha256=
|
171
|
+
fractal_server/ssh/_fabric.py,sha256=9xcsOEwbCgbJtupkIeG8OOtT8ct8c7_ruIehhNmD4wc,11379
|
172
172
|
fractal_server/string_tools.py,sha256=KThgTLn_FHNSuEUGLabryJAP6DaFd7bpi-hF5FgkBjw,1268
|
173
173
|
fractal_server/syringe.py,sha256=3qSMW3YaMKKnLdgnooAINOPxnCOxP7y2jeAQYB21Gdo,2786
|
174
174
|
fractal_server/tasks/__init__.py,sha256=kadmVUoIghl8s190_Tt-8f-WBqMi8u8oU4Pvw39NHE8,23
|
@@ -193,8 +193,8 @@ fractal_server/tasks/v2/templates/_5_pip_show.sh,sha256=GrJ19uHYQxANEy9JaeNJZVTq
|
|
193
193
|
fractal_server/tasks/v2/utils.py,sha256=JOyCacb6MNvrwfLNTyLwcz8y79J29YuJeJ2MK5kqXRM,1657
|
194
194
|
fractal_server/urls.py,sha256=5o_qq7PzKKbwq12NHSQZDmDitn5RAOeQ4xufu-2v9Zk,448
|
195
195
|
fractal_server/utils.py,sha256=b7WwFdcFZ8unyT65mloFToYuEDXpQoHRcmRNqrhd_dQ,2115
|
196
|
-
fractal_server-2.3.
|
197
|
-
fractal_server-2.3.
|
198
|
-
fractal_server-2.3.
|
199
|
-
fractal_server-2.3.
|
200
|
-
fractal_server-2.3.
|
196
|
+
fractal_server-2.3.6.dist-info/LICENSE,sha256=QKAharUuhxL58kSoLizKJeZE3mTCBnX6ucmz8W0lxlk,1576
|
197
|
+
fractal_server-2.3.6.dist-info/METADATA,sha256=WrV14I1NR6DeXCeuHJTZqsnJfCf4EqdY-IZSeeGPAPM,4425
|
198
|
+
fractal_server-2.3.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
199
|
+
fractal_server-2.3.6.dist-info/entry_points.txt,sha256=8tV2kynvFkjnhbtDnxAqImL6HMVKsopgGfew0DOp5UY,58
|
200
|
+
fractal_server-2.3.6.dist-info/RECORD,,
|
@@ -1,182 +0,0 @@
|
|
1
|
-
from pathlib import Path
|
2
|
-
from typing import Literal
|
3
|
-
from typing import Optional
|
4
|
-
|
5
|
-
from fractal_server.app.models.v2 import WorkflowTaskV2
|
6
|
-
from fractal_server.app.runner.executors.slurm._slurm_config import (
|
7
|
-
_parse_mem_value,
|
8
|
-
)
|
9
|
-
from fractal_server.app.runner.executors.slurm._slurm_config import (
|
10
|
-
load_slurm_config_file,
|
11
|
-
)
|
12
|
-
from fractal_server.app.runner.executors.slurm._slurm_config import logger
|
13
|
-
from fractal_server.app.runner.executors.slurm._slurm_config import SlurmConfig
|
14
|
-
from fractal_server.app.runner.executors.slurm._slurm_config import (
|
15
|
-
SlurmConfigError,
|
16
|
-
)
|
17
|
-
|
18
|
-
|
19
|
-
def get_slurm_config(
|
20
|
-
wftask: WorkflowTaskV2,
|
21
|
-
workflow_dir_local: Path,
|
22
|
-
workflow_dir_remote: Path,
|
23
|
-
which_type: Literal["non_parallel", "parallel"],
|
24
|
-
config_path: Optional[Path] = None,
|
25
|
-
) -> SlurmConfig:
|
26
|
-
"""
|
27
|
-
Prepare a `SlurmConfig` configuration object
|
28
|
-
|
29
|
-
The argument `which_type` determines whether we use `wftask.meta_parallel`
|
30
|
-
or `wftask.meta_non_parallel`. In the following descritpion, let us assume
|
31
|
-
that `which_type="parallel"`.
|
32
|
-
|
33
|
-
The sources for `SlurmConfig` attributes, in increasing priority order, are
|
34
|
-
|
35
|
-
1. The general content of the Fractal SLURM configuration file.
|
36
|
-
2. The GPU-specific content of the Fractal SLURM configuration file, if
|
37
|
-
appropriate.
|
38
|
-
3. Properties in `wftask.meta_parallel` (which typically include those in
|
39
|
-
`wftask.task.meta_parallel`). Note that `wftask.meta_parallel` may be
|
40
|
-
`None`.
|
41
|
-
|
42
|
-
Arguments:
|
43
|
-
wftask:
|
44
|
-
WorkflowTask for which the SLURM configuration is is to be
|
45
|
-
prepared.
|
46
|
-
workflow_dir_local:
|
47
|
-
Server-owned directory to store all task-execution-related relevant
|
48
|
-
files (inputs, outputs, errors, and all meta files related to the
|
49
|
-
job execution). Note: users cannot write directly to this folder.
|
50
|
-
workflow_dir_remote:
|
51
|
-
User-side directory with the same scope as `workflow_dir_local`,
|
52
|
-
and where a user can write.
|
53
|
-
config_path:
|
54
|
-
Path of a Fractal SLURM configuration file; if `None`, use
|
55
|
-
`FRACTAL_SLURM_CONFIG_FILE` variable from settings.
|
56
|
-
which_type:
|
57
|
-
Determines whether to use `meta_parallel` or `meta_non_parallel`.
|
58
|
-
|
59
|
-
Returns:
|
60
|
-
slurm_config:
|
61
|
-
The SlurmConfig object
|
62
|
-
"""
|
63
|
-
|
64
|
-
if which_type == "non_parallel":
|
65
|
-
wftask_meta = wftask.meta_non_parallel
|
66
|
-
elif which_type == "parallel":
|
67
|
-
wftask_meta = wftask.meta_parallel
|
68
|
-
else:
|
69
|
-
raise ValueError(
|
70
|
-
f"get_slurm_config received invalid argument {which_type=}."
|
71
|
-
)
|
72
|
-
|
73
|
-
logger.debug(
|
74
|
-
"[get_slurm_config] WorkflowTask meta attribute: {wftask_meta=}"
|
75
|
-
)
|
76
|
-
|
77
|
-
# Incorporate slurm_env.default_slurm_config
|
78
|
-
slurm_env = load_slurm_config_file(config_path=config_path)
|
79
|
-
slurm_dict = slurm_env.default_slurm_config.dict(
|
80
|
-
exclude_unset=True, exclude={"mem"}
|
81
|
-
)
|
82
|
-
if slurm_env.default_slurm_config.mem:
|
83
|
-
slurm_dict["mem_per_task_MB"] = slurm_env.default_slurm_config.mem
|
84
|
-
|
85
|
-
# Incorporate slurm_env.batching_config
|
86
|
-
for key, value in slurm_env.batching_config.dict().items():
|
87
|
-
slurm_dict[key] = value
|
88
|
-
|
89
|
-
# Incorporate slurm_env.user_local_exports
|
90
|
-
slurm_dict["user_local_exports"] = slurm_env.user_local_exports
|
91
|
-
|
92
|
-
logger.debug(
|
93
|
-
"[get_slurm_config] Fractal SLURM configuration file: "
|
94
|
-
f"{slurm_env.dict()=}"
|
95
|
-
)
|
96
|
-
|
97
|
-
# GPU-related options
|
98
|
-
# Notes about priority:
|
99
|
-
# 1. This block of definitions takes priority over other definitions from
|
100
|
-
# slurm_env which are not under the `needs_gpu` subgroup
|
101
|
-
# 2. This block of definitions has lower priority than whatever comes next
|
102
|
-
# (i.e. from WorkflowTask.meta).
|
103
|
-
if wftask_meta is not None:
|
104
|
-
needs_gpu = wftask_meta.get("needs_gpu", False)
|
105
|
-
else:
|
106
|
-
needs_gpu = False
|
107
|
-
logger.debug(f"[get_slurm_config] {needs_gpu=}")
|
108
|
-
if needs_gpu:
|
109
|
-
for key, value in slurm_env.gpu_slurm_config.dict(
|
110
|
-
exclude_unset=True, exclude={"mem"}
|
111
|
-
).items():
|
112
|
-
slurm_dict[key] = value
|
113
|
-
if slurm_env.gpu_slurm_config.mem:
|
114
|
-
slurm_dict["mem_per_task_MB"] = slurm_env.gpu_slurm_config.mem
|
115
|
-
|
116
|
-
# Number of CPUs per task, for multithreading
|
117
|
-
if wftask_meta is not None and "cpus_per_task" in wftask_meta:
|
118
|
-
cpus_per_task = int(wftask_meta["cpus_per_task"])
|
119
|
-
slurm_dict["cpus_per_task"] = cpus_per_task
|
120
|
-
|
121
|
-
# Required memory per task, in MB
|
122
|
-
if wftask_meta is not None and "mem" in wftask_meta:
|
123
|
-
raw_mem = wftask_meta["mem"]
|
124
|
-
mem_per_task_MB = _parse_mem_value(raw_mem)
|
125
|
-
slurm_dict["mem_per_task_MB"] = mem_per_task_MB
|
126
|
-
|
127
|
-
# Job name
|
128
|
-
if wftask.is_legacy_task:
|
129
|
-
job_name = wftask.task_legacy.name.replace(" ", "_")
|
130
|
-
else:
|
131
|
-
job_name = wftask.task.name.replace(" ", "_")
|
132
|
-
slurm_dict["job_name"] = job_name
|
133
|
-
|
134
|
-
# Optional SLURM arguments and extra lines
|
135
|
-
if wftask_meta is not None:
|
136
|
-
account = wftask_meta.get("account", None)
|
137
|
-
if account is not None:
|
138
|
-
error_msg = (
|
139
|
-
f"Invalid {account=} property in WorkflowTask `meta` "
|
140
|
-
"attribute.\n"
|
141
|
-
"SLURM account must be set in the request body of the "
|
142
|
-
"apply-workflow endpoint, or by modifying the user properties."
|
143
|
-
)
|
144
|
-
logger.error(error_msg)
|
145
|
-
raise SlurmConfigError(error_msg)
|
146
|
-
for key in ["time", "gres", "constraint"]:
|
147
|
-
value = wftask_meta.get(key, None)
|
148
|
-
if value:
|
149
|
-
slurm_dict[key] = value
|
150
|
-
if wftask_meta is not None:
|
151
|
-
extra_lines = wftask_meta.get("extra_lines", [])
|
152
|
-
else:
|
153
|
-
extra_lines = []
|
154
|
-
extra_lines = slurm_dict.get("extra_lines", []) + extra_lines
|
155
|
-
if len(set(extra_lines)) != len(extra_lines):
|
156
|
-
logger.debug(
|
157
|
-
"[get_slurm_config] Removing repeated elements "
|
158
|
-
f"from {extra_lines=}."
|
159
|
-
)
|
160
|
-
extra_lines = list(set(extra_lines))
|
161
|
-
slurm_dict["extra_lines"] = extra_lines
|
162
|
-
|
163
|
-
# Job-batching parameters (if None, they will be determined heuristically)
|
164
|
-
if wftask_meta is not None:
|
165
|
-
tasks_per_job = wftask_meta.get("tasks_per_job", None)
|
166
|
-
parallel_tasks_per_job = wftask_meta.get(
|
167
|
-
"parallel_tasks_per_job", None
|
168
|
-
)
|
169
|
-
else:
|
170
|
-
tasks_per_job = None
|
171
|
-
parallel_tasks_per_job = None
|
172
|
-
slurm_dict["tasks_per_job"] = tasks_per_job
|
173
|
-
slurm_dict["parallel_tasks_per_job"] = parallel_tasks_per_job
|
174
|
-
|
175
|
-
# Put everything together
|
176
|
-
logger.debug(
|
177
|
-
"[get_slurm_config] Now create a SlurmConfig object based "
|
178
|
-
f"on {slurm_dict=}"
|
179
|
-
)
|
180
|
-
slurm_config = SlurmConfig(**slurm_dict)
|
181
|
-
|
182
|
-
return slurm_config
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|