xmanager-slurm 0.4.7__py3-none-any.whl → 0.4.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xmanager-slurm might be problematic. Click here for more details.
- xm_slurm/api/web/client.py +1 -1
- xm_slurm/config.py +2 -1
- xm_slurm/execution.py +24 -11
- xm_slurm/executors.py +5 -0
- xm_slurm/templates/slurm/fragments/monitor.bash.j2 +56 -17
- xm_slurm/templates/slurm/job.bash.j2 +1 -1
- xm_slurm/utils.py +30 -0
- {xmanager_slurm-0.4.7.dist-info → xmanager_slurm-0.4.9.dist-info}/METADATA +1 -1
- {xmanager_slurm-0.4.7.dist-info → xmanager_slurm-0.4.9.dist-info}/RECORD +12 -12
- {xmanager_slurm-0.4.7.dist-info → xmanager_slurm-0.4.9.dist-info}/WHEEL +0 -0
- {xmanager_slurm-0.4.7.dist-info → xmanager_slurm-0.4.9.dist-info}/entry_points.txt +0 -0
- {xmanager_slurm-0.4.7.dist-info → xmanager_slurm-0.4.9.dist-info}/licenses/LICENSE.md +0 -0
xm_slurm/api/web/client.py
CHANGED
|
@@ -33,7 +33,7 @@ class XManagerWebAPI(XManagerAPI):
|
|
|
33
33
|
self.client = httpx.Client(headers={"Authorization": f"Bearer {token}"}, verify=False)
|
|
34
34
|
|
|
35
35
|
def _make_url(self, path: str) -> str:
|
|
36
|
-
return f"{self.base_url}
|
|
36
|
+
return f"{self.base_url}{path}"
|
|
37
37
|
|
|
38
38
|
@with_backoff
|
|
39
39
|
def get_experiment(self, xid: int) -> models.Experiment:
|
xm_slurm/config.py
CHANGED
|
@@ -119,8 +119,9 @@ class SlurmSSHConfig:
|
|
|
119
119
|
config=None,
|
|
120
120
|
kbdint_auth=False,
|
|
121
121
|
disable_trivial_auth=True,
|
|
122
|
+
known_hosts=self.known_hosts,
|
|
122
123
|
)
|
|
123
|
-
options.prepare(last_config=self.config
|
|
124
|
+
options.prepare(last_config=self.config)
|
|
124
125
|
return options
|
|
125
126
|
|
|
126
127
|
def serialize(self):
|
xm_slurm/execution.py
CHANGED
|
@@ -235,11 +235,10 @@ class SlurmHandle(_BatchedSlurmHandle, tp.Generic[SlurmJobT]):
|
|
|
235
235
|
return await self._batched_get_state(self.ssh, self.slurm_job)
|
|
236
236
|
|
|
237
237
|
async def logs(
|
|
238
|
-
self, *, num_lines: int, block_size: int, wait: bool, follow: bool
|
|
239
|
-
) -> tp.AsyncGenerator[ConsoleRenderable, None]:
|
|
238
|
+
self, *, num_lines: int, block_size: int, wait: bool, follow: bool, raw: bool = False
|
|
239
|
+
) -> tp.AsyncGenerator[tp.Union[str, ConsoleRenderable], None]:
|
|
240
240
|
experiment_dir = await get_client().experiment_dir(self.ssh, self.experiment_id)
|
|
241
241
|
file = experiment_dir / f"slurm-{self.slurm_job.job_id}.out"
|
|
242
|
-
|
|
243
242
|
fs = await get_client().fs(self.ssh)
|
|
244
243
|
|
|
245
244
|
if wait:
|
|
@@ -249,7 +248,7 @@ class SlurmHandle(_BatchedSlurmHandle, tp.Generic[SlurmJobT]):
|
|
|
249
248
|
file_size = await fs.size(file)
|
|
250
249
|
assert file_size is not None
|
|
251
250
|
|
|
252
|
-
async with await fs.open(file, "rb") as remote_file:
|
|
251
|
+
async with await fs.open(file, "rb") as remote_file:
|
|
253
252
|
data = b""
|
|
254
253
|
lines = []
|
|
255
254
|
position = file_size
|
|
@@ -257,27 +256,40 @@ class SlurmHandle(_BatchedSlurmHandle, tp.Generic[SlurmJobT]):
|
|
|
257
256
|
while len(lines) <= num_lines and position > 0:
|
|
258
257
|
read_size = min(block_size, position)
|
|
259
258
|
position -= read_size
|
|
260
|
-
await remote_file.seek(position)
|
|
259
|
+
await remote_file.seek(position)
|
|
261
260
|
chunk = await remote_file.read(read_size)
|
|
262
261
|
data = chunk + data
|
|
263
262
|
lines = data.splitlines()
|
|
264
263
|
|
|
265
264
|
if position <= 0:
|
|
266
|
-
|
|
265
|
+
if raw:
|
|
266
|
+
yield "\033[31mBEGINNING OF FILE\033[0m\n"
|
|
267
|
+
else:
|
|
268
|
+
yield Rule("[bold red]BEGINNING OF FILE[/bold red]")
|
|
267
269
|
for line in lines[-num_lines:]:
|
|
268
|
-
|
|
270
|
+
if raw:
|
|
271
|
+
yield line.decode("utf-8", errors="replace") + "\n"
|
|
272
|
+
else:
|
|
273
|
+
yield Text.from_ansi(line.decode("utf-8", errors="replace"))
|
|
269
274
|
|
|
270
275
|
if (await self.get_state()) not in status.SlurmActiveJobStates:
|
|
271
|
-
|
|
272
|
-
|
|
276
|
+
if raw:
|
|
277
|
+
yield "\033[31mEND OF FILE\033[0m\n"
|
|
278
|
+
return
|
|
279
|
+
else:
|
|
280
|
+
yield Rule("[bold red]END OF FILE[/bold red]")
|
|
281
|
+
return
|
|
273
282
|
|
|
274
283
|
if not follow:
|
|
275
284
|
return
|
|
276
285
|
|
|
277
|
-
await remote_file.seek(file_size)
|
|
286
|
+
await remote_file.seek(file_size)
|
|
278
287
|
while True:
|
|
279
288
|
if new_data := (await remote_file.read(block_size)):
|
|
280
|
-
|
|
289
|
+
if raw:
|
|
290
|
+
yield new_data.decode("utf-8", errors="replace")
|
|
291
|
+
else:
|
|
292
|
+
yield Text.from_ansi(new_data.decode("utf-8", errors="replace"))
|
|
281
293
|
else:
|
|
282
294
|
await asyncio.sleep(0.25)
|
|
283
295
|
|
|
@@ -454,6 +466,7 @@ class SlurmExecutionClient:
|
|
|
454
466
|
return False
|
|
455
467
|
|
|
456
468
|
@functools.cache
|
|
469
|
+
@utils.reawaitable
|
|
457
470
|
async def _state_dir(self, ssh_config: SlurmSSHConfig) -> pathlib.Path:
|
|
458
471
|
state_dirs = [
|
|
459
472
|
("XM_SLURM_STATE_DIR", ""),
|
xm_slurm/executors.py
CHANGED
|
@@ -57,8 +57,13 @@ class Slurm(xm.Executor):
|
|
|
57
57
|
|
|
58
58
|
requeue: bool = True # Is this job ellible for requeueing?
|
|
59
59
|
requeue_on_exit_code: int = 42 # The exit code that triggers requeueing
|
|
60
|
+
requeue_on_timeout: bool = True # Should the job requeue upon timeout minus the grace period
|
|
60
61
|
requeue_max_attempts: int = 5 # How many times to attempt requeueing
|
|
61
62
|
|
|
63
|
+
@property
|
|
64
|
+
def requeue_timeout(self) -> dt.timedelta:
|
|
65
|
+
return self.time - self.timeout_signal_grace_period
|
|
66
|
+
|
|
62
67
|
def __post_init__(self) -> None:
|
|
63
68
|
if not isinstance(self.time, dt.timedelta):
|
|
64
69
|
raise TypeError(f"time must be a `datetime.timedelta`, got {type(self.time)}")
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
{% macro monitor(requeue_max_attempts, requeue_exit_code) -%}
|
|
1
|
+
{% macro monitor(requeue_max_attempts, requeue_exit_code, requeue_on_timeout, requeue_timeout) -%}
|
|
2
2
|
__xm_slurm_wait_for_children() {
|
|
3
3
|
if [[ -n "${SLURM_ARRAY_JOB_ID:-}" ]]; then
|
|
4
4
|
local -r JOB_ID="${SLURM_ARRAY_JOB_ID}_${SLURM_ARRAY_TASK_ID}"
|
|
@@ -7,30 +7,69 @@ __xm_slurm_wait_for_children() {
|
|
|
7
7
|
fi
|
|
8
8
|
|
|
9
9
|
# If there are no child jobs we should error out
|
|
10
|
-
|
|
10
|
+
children=( $(jobs -p) )
|
|
11
|
+
{% raw %}
|
|
12
|
+
if [ ${#children[@]} -eq 0 ]; then
|
|
13
|
+
{% endraw %}
|
|
11
14
|
echo "ERROR: no child jobs exist..." >&2
|
|
12
|
-
exit
|
|
15
|
+
exit 1
|
|
13
16
|
fi
|
|
14
17
|
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
local -r JOB_EXIT_CODE="${?}"
|
|
21
|
-
set -e
|
|
18
|
+
{% if requeue_on_timeout %}
|
|
19
|
+
# Start a watchdog process to signal timeout.
|
|
20
|
+
sleep {{ requeue_timeout }} &
|
|
21
|
+
timeout_pid=$!
|
|
22
|
+
{% endif %}
|
|
22
23
|
|
|
23
|
-
|
|
24
|
-
|
|
24
|
+
{% raw %}
|
|
25
|
+
while [ ${#children[@]} -gt 0 ]; do
|
|
26
|
+
{% endraw %}
|
|
27
|
+
echo "INFO: Waiting for child processes to finish..."
|
|
28
|
+
{% if requeue_on_timeout %}
|
|
29
|
+
# Wait on either one of the child processes or the timeout process.
|
|
30
|
+
wait -n -p child_pid "${children[@]}" "${timeout_pid}"
|
|
31
|
+
{% else %}
|
|
32
|
+
wait -n -p child_pid "${children[@]}"
|
|
33
|
+
{% endif %}
|
|
34
|
+
local child_exit_code=$?
|
|
35
|
+
|
|
36
|
+
{% if requeue_on_timeout %}
|
|
37
|
+
# If the finished process is the watchdog, trigger the timeout handling.
|
|
38
|
+
if [ "${child_pid}" = "${timeout_pid}" ]; then
|
|
39
|
+
echo "INFO: Timeout of {{ requeue_timeout }} seconds reached. Killing remaining processes: ${children[*]}" >&2
|
|
40
|
+
kill "${children[@]}" 2>/dev/null || true
|
|
25
41
|
scontrol requeue "${JOB_ID}"
|
|
26
42
|
exit {{ requeue_exit_code }}
|
|
27
|
-
elif [ "${JOB_EXIT_CODE}" -ne 0 ]; then
|
|
28
|
-
echo "ERROR: Job ${job} exited with code ${JOB_EXIT_CODE}." >&2
|
|
29
|
-
exit "${JOB_EXIT_CODE}"
|
|
30
|
-
else
|
|
31
|
-
echo "INFO: Job ${job} exited successfully." >&2
|
|
32
43
|
fi
|
|
44
|
+
{% endif %}
|
|
45
|
+
|
|
46
|
+
echo "INFO: Process ${child_pid} finished with exit code ${child_exit_code}."
|
|
47
|
+
|
|
48
|
+
# Handle the exit code of the finished process.
|
|
49
|
+
if [ "${child_exit_code}" -eq "{{ requeue_exit_code }}" ] && [ "${SLURM_RESTART_COUNT:-0}" -le "{{ requeue_max_attempts }}" ]; then
|
|
50
|
+
echo "INFO: Received requeue exit code {{ requeue_exit_code }} from process ${child_pid}. Requeuing Slurm job ${JOB_ID} after ${SLURM_RESTART_COUNT-0} restarts." >&2
|
|
51
|
+
scontrol requeue "${JOB_ID}"
|
|
52
|
+
exit {{ requeue_exit_code }}
|
|
53
|
+
elif [ "${child_exit_code}" -ne 0 ]; then
|
|
54
|
+
echo "ERROR: Process ${child_pid} exited with code ${child_exit_code}." >&2
|
|
55
|
+
exit "${child_exit_code}"
|
|
56
|
+
fi
|
|
57
|
+
|
|
58
|
+
# Remove the finished PID from the array in a concise way.
|
|
59
|
+
for i in "${!children[@]}"; do
|
|
60
|
+
if [ "${children[i]}" = "$child_pid" ]; then
|
|
61
|
+
unset 'children[i]'
|
|
62
|
+
break
|
|
63
|
+
fi
|
|
64
|
+
done
|
|
65
|
+
|
|
66
|
+
# Reindex the array.
|
|
67
|
+
children=( "${children[@]}" )
|
|
33
68
|
done
|
|
69
|
+
|
|
70
|
+
{% if requeue_on_timeout %}
|
|
71
|
+
kill "$timeout_pid" 2>/dev/null || true
|
|
72
|
+
{% endif %}
|
|
34
73
|
}
|
|
35
74
|
|
|
36
75
|
__xm_slurm_wait_for_children
|
|
@@ -73,7 +73,7 @@ echo "[INFO] Start timestamp: $(date)"
|
|
|
73
73
|
|
|
74
74
|
{% block monitor -%}
|
|
75
75
|
{% from 'fragments/monitor.bash.j2' import monitor %}
|
|
76
|
-
{{ monitor(job.executor.requeue_max_attempts, job.executor.requeue_on_exit_code) }}
|
|
76
|
+
{{ monitor(job.executor.requeue_max_attempts, job.executor.requeue_on_exit_code, job.executor.requeue_on_timeout, job.executor.requeue_timeout.seconds) }}
|
|
77
77
|
{%- endblock monitor %}
|
|
78
78
|
|
|
79
79
|
|
xm_slurm/utils.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
import functools
|
|
2
3
|
import logging
|
|
3
4
|
import os
|
|
@@ -13,10 +14,39 @@ import typing as tp
|
|
|
13
14
|
from xmanager import xm
|
|
14
15
|
|
|
15
16
|
T = tp.TypeVar("T")
|
|
17
|
+
P = tp.ParamSpec("P")
|
|
16
18
|
|
|
17
19
|
logger = logging.getLogger(__name__)
|
|
18
20
|
|
|
19
21
|
|
|
22
|
+
class CachedAwaitable(tp.Awaitable[T]):
|
|
23
|
+
def __init__(self, awaitable: tp.Awaitable[T]):
|
|
24
|
+
self.awaitable = awaitable
|
|
25
|
+
self.result: asyncio.Future[T] | None = None
|
|
26
|
+
|
|
27
|
+
def __await__(self):
|
|
28
|
+
if not self.result:
|
|
29
|
+
future = asyncio.get_event_loop().create_future()
|
|
30
|
+
self.result = future
|
|
31
|
+
try:
|
|
32
|
+
result = yield from self.awaitable.__await__()
|
|
33
|
+
future.set_result(result)
|
|
34
|
+
except Exception as e:
|
|
35
|
+
future.set_exception(e)
|
|
36
|
+
|
|
37
|
+
if not self.result.done():
|
|
38
|
+
yield from self.result
|
|
39
|
+
return self.result.result()
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def reawaitable(f: tp.Callable[P, tp.Awaitable[T]]) -> tp.Callable[P, CachedAwaitable[T]]:
|
|
43
|
+
@functools.wraps(f)
|
|
44
|
+
def wrapper(*args: P.args, **kwargs: P.kwargs) -> CachedAwaitable[T]:
|
|
45
|
+
return CachedAwaitable(f(*args, **kwargs))
|
|
46
|
+
|
|
47
|
+
return wrapper
|
|
48
|
+
|
|
49
|
+
|
|
20
50
|
@functools.cache
|
|
21
51
|
def find_project_root() -> pathlib.Path:
|
|
22
52
|
launch_script_path: pathlib.Path | None = None
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
xm_slurm/__init__.py,sha256=WgRn9HDYa5H3sfIH-HZu33liBOh98jM4GqcR349RaSY,1086
|
|
2
2
|
xm_slurm/batching.py,sha256=GbKBsNz9w8gIc2fHLZpslC0e4K9YUfLXFHmjduRRCfQ,4385
|
|
3
|
-
xm_slurm/config.py,sha256=
|
|
3
|
+
xm_slurm/config.py,sha256=PvdLG6fSNfbABbtwELbnytx85vWLPtyVfEJtoQsLf94,7084
|
|
4
4
|
xm_slurm/console.py,sha256=UpMqeJ0C8i0pkue1AHnnyyX0bFJ9zZeJ7HBR6yhuA8A,54
|
|
5
5
|
xm_slurm/constants.py,sha256=zefVtlFdflgSolie5g_rVxWV-Zpydxapchm3y0a2FDc,999
|
|
6
6
|
xm_slurm/dependencies.py,sha256=-5gN_tpfs3dOA7H5_MIHO2ratb7F5Pm_yjkR5rZcgI8,6421
|
|
7
7
|
xm_slurm/executables.py,sha256=fGmrFBl-258bMn6ip5adYeM7xxUHAeIbDN9zD2FDGtY,6373
|
|
8
|
-
xm_slurm/execution.py,sha256=
|
|
9
|
-
xm_slurm/executors.py,sha256=
|
|
8
|
+
xm_slurm/execution.py,sha256=c0aV1h2tKQFyAGM6JLd16MWFgpRLKAbcutZz17xPUSw,31400
|
|
9
|
+
xm_slurm/executors.py,sha256=bUgKcgtvf-nPGjcuHRzUAqD1r3_vwea_h-Y9MAB-Kqo,4887
|
|
10
10
|
xm_slurm/experiment.py,sha256=94r0mhtUPUzw4eaUEz0kpsufC25wEGqlDhV4Fcr1ukY,39883
|
|
11
11
|
xm_slurm/filesystem.py,sha256=4rKtq3t-KDgxJbSGt6JVyRJT_3lCN_vIKTcwKHpTo3I,4389
|
|
12
12
|
xm_slurm/job_blocks.py,sha256=_F8CKCs5BQFj40a2-mjG71HfacvWoBXBDPDKEaKTbXc,616
|
|
@@ -15,12 +15,12 @@ xm_slurm/packageables.py,sha256=fPUvqF2IvJ2Hn6hodDdQwtx1Ze3sJ8U-BUbxDHauW-g,1239
|
|
|
15
15
|
xm_slurm/resources.py,sha256=tET3TPOQ8nXYE_SxAs2fiHt9UKJsCLW1vFktJTH0xG4,5722
|
|
16
16
|
xm_slurm/status.py,sha256=WTWiDHi-ZHtwHRnDP0cGa-27zTSm6LkA-GCKsN-zBgg,6916
|
|
17
17
|
xm_slurm/types.py,sha256=TsVykDm-LazVkrjeJrTwCMs4Q8APKhy7BTk0yKIhFNg,805
|
|
18
|
-
xm_slurm/utils.py,sha256=
|
|
18
|
+
xm_slurm/utils.py,sha256=xtFvktaxr0z65sTdu6HhOVfyo0OAB9t-EYXWcYrQQEU,5958
|
|
19
19
|
xm_slurm/api/__init__.py,sha256=cyao3LZ3uLftu1wIv1aN7Qvsl6gYzYpkxeehTHZ0fA8,1089
|
|
20
20
|
xm_slurm/api/abc.py,sha256=-lS2OndnOuEiwNdr8ccQKkwMd1iDmKMmkBOSTvo5H5w,1816
|
|
21
21
|
xm_slurm/api/models.py,sha256=_INVh0j-4-rRs0WASyg4fNB6NF1L1nUeGgQ6-XnbwsM,1610
|
|
22
22
|
xm_slurm/api/sqlite/client.py,sha256=WykSIO7b14rRLy9qebbkiLKXy7EHU61jtoebLX17HMM,14124
|
|
23
|
-
xm_slurm/api/web/client.py,sha256=
|
|
23
|
+
xm_slurm/api/web/client.py,sha256=uO67Y7fnQ-w__Vm_A5BEuy7Qi8wQcWk3vIsBGEBkyfk,6261
|
|
24
24
|
xm_slurm/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
25
25
|
xm_slurm/contrib/clusters/__init__.py,sha256=XFCVnkThiU3_8uA_tUgDByOBanXNHrxDvfmuptmQ2KE,2214
|
|
26
26
|
xm_slurm/contrib/clusters/drac.py,sha256=ViLYerYBMSuZXnWVbz9RDIPPV7JA8BgBpgTfj1wPP28,5881
|
|
@@ -39,13 +39,13 @@ xm_slurm/templates/docker/uv.Dockerfile,sha256=L2UJMX2c8waMdrRhiqPytQe3pTBu6u5Pp
|
|
|
39
39
|
xm_slurm/templates/slurm/entrypoint.bash.j2,sha256=MRdSVwgGrgQdpEhqfkP35IidgsblrtVXB1YWzvE9hkk,666
|
|
40
40
|
xm_slurm/templates/slurm/job-array.bash.j2,sha256=smxmSSzBEUHm6MJF-nYPVVjK6CLKrb1fRxF_tfrzAX8,552
|
|
41
41
|
xm_slurm/templates/slurm/job-group.bash.j2,sha256=Cp8YhNOxYqaOkl4MFjQlcaLMGZwdDh97m8OGT5RWbAo,1101
|
|
42
|
-
xm_slurm/templates/slurm/job.bash.j2,sha256=
|
|
43
|
-
xm_slurm/templates/slurm/fragments/monitor.bash.j2,sha256=
|
|
42
|
+
xm_slurm/templates/slurm/job.bash.j2,sha256=pNKir1tkmRTGDiGxlQ3DkUaW9Zos_gdkkXJC_xX5Cxo,1985
|
|
43
|
+
xm_slurm/templates/slurm/fragments/monitor.bash.j2,sha256=BJ1brSjhESOe9VX_OYaPyy9-qE3uiFlzxp8ZkFcTw8Y,2504
|
|
44
44
|
xm_slurm/templates/slurm/fragments/proxy.bash.j2,sha256=VJLglZo-Nvx9R-qe3rHTxr07CylTQ6Z9NwBzvIpAZrA,814
|
|
45
45
|
xm_slurm/templates/slurm/runtimes/apptainer.bash.j2,sha256=lE2EWVCK2O-n08RL4_MJYIikVTvODjcYKuv7Eh73Q2w,1932
|
|
46
46
|
xm_slurm/templates/slurm/runtimes/podman.bash.j2,sha256=3j7K5eyXt_WhXK0EoMlxnhlmFVJ2JyxRKbsMRaDqzSs,1148
|
|
47
|
-
xmanager_slurm-0.4.
|
|
48
|
-
xmanager_slurm-0.4.
|
|
49
|
-
xmanager_slurm-0.4.
|
|
50
|
-
xmanager_slurm-0.4.
|
|
51
|
-
xmanager_slurm-0.4.
|
|
47
|
+
xmanager_slurm-0.4.9.dist-info/METADATA,sha256=WWPRzVrTsK5t8kD732EIejSlNgQ8KP01Ln7eM1Mj4e4,1042
|
|
48
|
+
xmanager_slurm-0.4.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
49
|
+
xmanager_slurm-0.4.9.dist-info/entry_points.txt,sha256=_HLGmLgxuQLOPmF2gOFYDVq2HqtMVD_SzigHvUh8TCY,49
|
|
50
|
+
xmanager_slurm-0.4.9.dist-info/licenses/LICENSE.md,sha256=IxstXr3MPHwTJ5jMrByHrQsR1ZAGQ2U_uz_4qzI_15Y,11756
|
|
51
|
+
xmanager_slurm-0.4.9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|