torch-atomic-save 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- torch_atomic_save-0.1.0/LICENSE +21 -0
- torch_atomic_save-0.1.0/PKG-INFO +52 -0
- torch_atomic_save-0.1.0/README.md +36 -0
- torch_atomic_save-0.1.0/pyproject.toml +22 -0
- torch_atomic_save-0.1.0/setup.cfg +4 -0
- torch_atomic_save-0.1.0/src/torch_atomic_save/__init__.py +3 -0
- torch_atomic_save-0.1.0/src/torch_atomic_save/manager.py +172 -0
- torch_atomic_save-0.1.0/src/torch_atomic_save.egg-info/PKG-INFO +52 -0
- torch_atomic_save-0.1.0/src/torch_atomic_save.egg-info/SOURCES.txt +11 -0
- torch_atomic_save-0.1.0/src/torch_atomic_save.egg-info/dependency_links.txt +1 -0
- torch_atomic_save-0.1.0/src/torch_atomic_save.egg-info/requires.txt +6 -0
- torch_atomic_save-0.1.0/src/torch_atomic_save.egg-info/top_level.txt +1 -0
- torch_atomic_save-0.1.0/tests/test_manager.py +548 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Patrick Carnahan
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: torch_atomic_save
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Atomic, asynchronous checkpoint saving for Pytorch for Slurm/Lustre environments
|
|
5
|
+
Author-email: Patrick Carnahan <pcarnah@uwo.ca>
|
|
6
|
+
License: MIT
|
|
7
|
+
Requires-Python: >=3.9
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Requires-Dist: torch>=2.0.0
|
|
11
|
+
Requires-Dist: numpy
|
|
12
|
+
Provides-Extra: dev
|
|
13
|
+
Requires-Dist: pytest; extra == "dev"
|
|
14
|
+
Requires-Dist: pytest-cov; extra == "dev"
|
|
15
|
+
Dynamic: license-file
|
|
16
|
+
|
|
17
|
+
# torch-atomic-save
|
|
18
|
+
|
|
19
|
+
[](https://github.com/pcarnah/torch-atomic-save/actions)
|
|
20
|
+
[](https://opensource.org/licenses/MIT)
|
|
21
|
+
|
|
22
|
+
An asynchronous, atomic checkpointing utility for PyTorch, optimized for Slurm and Lustre/NFS environments.
|
|
23
|
+
|
|
24
|
+
## Key Features
|
|
25
|
+
* **Atomic Moves:** Prevents corrupted checkpoints during Slurm preemption.
|
|
26
|
+
* **Non-Blocking:** Offloads I/O to a background thread pool.
|
|
27
|
+
* **Race Condition Protection:** Automatically clones tensors to CPU before background saving.
|
|
28
|
+
* **Cross-FS Support:** Handles moves between local SSD and network storage safely.
|
|
29
|
+
|
|
30
|
+
## Installation
|
|
31
|
+
```bash
|
|
32
|
+
pip install torch-atomic-save
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
## Usage
|
|
37
|
+
```python
|
|
38
|
+
from torch_atomic_save import SlurmAtomicManager
|
|
39
|
+
|
|
40
|
+
# Initialize the manager
|
|
41
|
+
manager = SlurmAtomicManager(max_workers=4)
|
|
42
|
+
|
|
43
|
+
# In your training loop:
|
|
44
|
+
if epoch % save_interval == 0:
|
|
45
|
+
manager.save(model, "path/to/checkpoints/model.pt", tmp_dir="/scratch/user/tmp")
|
|
46
|
+
|
|
47
|
+
# Ensure all I/O is finished before exiting
|
|
48
|
+
manager.wait_for_all()
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Attribution
|
|
52
|
+
If you use this software in your research, please cite it using the "Cite this repository" button or the provided CITATION.cff.
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# torch-atomic-save
|
|
2
|
+
|
|
3
|
+
[](https://github.com/pcarnah/torch-atomic-save/actions)
|
|
4
|
+
[](https://opensource.org/licenses/MIT)
|
|
5
|
+
|
|
6
|
+
An asynchronous, atomic checkpointing utility for PyTorch, optimized for Slurm and Lustre/NFS environments.
|
|
7
|
+
|
|
8
|
+
## Key Features
|
|
9
|
+
* **Atomic Moves:** Prevents corrupted checkpoints during Slurm preemption.
|
|
10
|
+
* **Non-Blocking:** Offloads I/O to a background thread pool.
|
|
11
|
+
* **Race Condition Protection:** Automatically clones tensors to CPU before background saving.
|
|
12
|
+
* **Cross-FS Support:** Handles moves between local SSD and network storage safely.
|
|
13
|
+
|
|
14
|
+
## Installation
|
|
15
|
+
```bash
|
|
16
|
+
pip install torch-atomic-save
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
## Usage
|
|
21
|
+
```python
|
|
22
|
+
from torch_atomic_save import SlurmAtomicManager
|
|
23
|
+
|
|
24
|
+
# Initialize the manager
|
|
25
|
+
manager = SlurmAtomicManager(max_workers=4)
|
|
26
|
+
|
|
27
|
+
# In your training loop:
|
|
28
|
+
if epoch % save_interval == 0:
|
|
29
|
+
manager.save(model, "path/to/checkpoints/model.pt", tmp_dir="/scratch/user/tmp")
|
|
30
|
+
|
|
31
|
+
# Ensure all I/O is finished before exiting
|
|
32
|
+
manager.wait_for_all()
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
## Attribution
|
|
36
|
+
If you use this software in your research, please cite it using the "Cite this repository" button or the provided CITATION.cff.
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "torch_atomic_save"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Atomic, asynchronous checkpoint saving for Pytorch for Slurm/Lustre environments"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
authors = [{ name = "Patrick Carnahan", email = "pcarnah@uwo.ca" }]
|
|
11
|
+
requires-python = ">=3.9"
|
|
12
|
+
license = {text = "MIT"}
|
|
13
|
+
dependencies = [
|
|
14
|
+
"torch>=2.0.0",
|
|
15
|
+
"numpy",
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
[project.optional-dependencies]
|
|
19
|
+
dev = ["pytest", "pytest-cov"]
|
|
20
|
+
|
|
21
|
+
[tool.setuptools.packages.find]
|
|
22
|
+
where = ["src"]
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import shutil
|
|
3
|
+
import tempfile
|
|
4
|
+
import threading
|
|
5
|
+
import concurrent.futures
|
|
6
|
+
import logging
|
|
7
|
+
import signal
|
|
8
|
+
import torch
|
|
9
|
+
from typing import Any, Optional, Dict, Tuple
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class SlurmAtomicManager:
|
|
15
|
+
"""
|
|
16
|
+
Manages asynchronous, atomic checkpoint saving across different filesystems.
|
|
17
|
+
Optimized for Slurm/Lustre environments.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, max_workers: int = 4):
|
|
21
|
+
self._executor = concurrent.futures.ThreadPoolExecutor(
|
|
22
|
+
max_workers=max_workers,
|
|
23
|
+
thread_name_prefix="SlurmSaveWorker"
|
|
24
|
+
)
|
|
25
|
+
# path -> (future, cancellation_event)
|
|
26
|
+
self._registry: Dict[str, Tuple[concurrent.futures.Future, threading.Event]] = {}
|
|
27
|
+
self._lock = threading.Lock()
|
|
28
|
+
self._shutdown_event = threading.Event()
|
|
29
|
+
|
|
30
|
+
def __enter__(self):
|
|
31
|
+
return self
|
|
32
|
+
|
|
33
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
34
|
+
self.shutdown()
|
|
35
|
+
|
|
36
|
+
def wait_for_all(self, timeout: Optional[float] = None):
|
|
37
|
+
"""Blocks until all currently pending saves are complete."""
|
|
38
|
+
with self._lock:
|
|
39
|
+
futures = [f for f, _ in self._registry.values()]
|
|
40
|
+
concurrent.futures.wait(futures, timeout=timeout)
|
|
41
|
+
|
|
42
|
+
def save(self, model: torch.nn.Module, path: str, tmp_dir: Optional[str] = None, half_prec: bool = False):
|
|
43
|
+
"""
|
|
44
|
+
Main entry point for saving models.
|
|
45
|
+
Moves tensors to CPU and clones them to prevent race conditions.
|
|
46
|
+
"""
|
|
47
|
+
path = os.path.abspath(path)
|
|
48
|
+
|
|
49
|
+
# Snapshot weights: Move to CPU and Clone
|
|
50
|
+
if half_prec:
|
|
51
|
+
state_dict = {k: v.half().cpu().clone() for k, v in model.state_dict().items()}
|
|
52
|
+
else:
|
|
53
|
+
state_dict = {k: v.cpu().clone() for k, v in model.state_dict().items()}
|
|
54
|
+
|
|
55
|
+
if tmp_dir is None:
|
|
56
|
+
# Synchronous fallback
|
|
57
|
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
|
58
|
+
torch.save(state_dict, path)
|
|
59
|
+
else:
|
|
60
|
+
self._atomic_save(state_dict, path, tmp_dir)
|
|
61
|
+
|
|
62
|
+
def _atomic_save(self, obj: Any, path: str, tmp_dir: str) -> None:
|
|
63
|
+
"""
|
|
64
|
+
Saves an object to a staging area, then schedules an atomic cross-FS move.
|
|
65
|
+
"""
|
|
66
|
+
if self._shutdown_event.is_set():
|
|
67
|
+
logger.warning(f"Manager is shutting down. Ignoring save request for {path}")
|
|
68
|
+
return
|
|
69
|
+
|
|
70
|
+
path = os.path.abspath(path)
|
|
71
|
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
|
72
|
+
|
|
73
|
+
if not os.path.isdir(tmp_dir):
|
|
74
|
+
raise FileNotFoundError(f"Staging directory missing: {tmp_dir}")
|
|
75
|
+
|
|
76
|
+
# 1. Setup staging dir
|
|
77
|
+
fd, tmp_src = tempfile.mkstemp(dir=tmp_dir, suffix=".pt.staging")
|
|
78
|
+
os.close(fd)
|
|
79
|
+
|
|
80
|
+
# 2. Registry Update & Cancellation
|
|
81
|
+
cancelled = threading.Event()
|
|
82
|
+
with self._lock:
|
|
83
|
+
if path in self._registry:
|
|
84
|
+
prev_f, prev_ev = self._registry[path]
|
|
85
|
+
prev_ev.set() # Signal background thread to skip the move
|
|
86
|
+
prev_f.cancel()
|
|
87
|
+
|
|
88
|
+
future = self._executor.submit(
|
|
89
|
+
self._save_and_copy, obj, tmp_src, path, cancelled
|
|
90
|
+
)
|
|
91
|
+
self._registry[path] = (future, cancelled)
|
|
92
|
+
future.add_done_callback(lambda f: self._cleanup_registry(path, f))
|
|
93
|
+
|
|
94
|
+
def _cleanup_registry(self, path: str, future: concurrent.futures.Future) -> None:
|
|
95
|
+
"""Removes the future from registry and logs errors."""
|
|
96
|
+
with self._lock:
|
|
97
|
+
current = self._registry.get(path)
|
|
98
|
+
if current and current[0] is future:
|
|
99
|
+
self._registry.pop(path, None)
|
|
100
|
+
|
|
101
|
+
if not future.cancelled():
|
|
102
|
+
exc = future.exception()
|
|
103
|
+
if exc:
|
|
104
|
+
logger.error(f"Async save to {path} failed: {exc}", exc_info=exc)
|
|
105
|
+
|
|
106
|
+
def _save_and_copy(self, obj, tmp_src, dst, cancelled):
|
|
107
|
+
"""Handles both the initial write and the atomic move."""
|
|
108
|
+
try:
|
|
109
|
+
if not cancelled.is_set():
|
|
110
|
+
torch.save(obj, tmp_src)
|
|
111
|
+
|
|
112
|
+
self._atomic_copy_and_cleanup(tmp_src, dst, cancelled)
|
|
113
|
+
except Exception as e:
|
|
114
|
+
# Cleanup tmp_src if torch.save failed
|
|
115
|
+
if os.path.exists(tmp_src):
|
|
116
|
+
os.unlink(tmp_src)
|
|
117
|
+
raise
|
|
118
|
+
|
|
119
|
+
def _atomic_copy_and_cleanup(self, src: str, dst: str, cancelled: threading.Event, max_retries: int = 5) -> None:
|
|
120
|
+
"""The background worker logic."""
|
|
121
|
+
dst_dir = os.path.dirname(os.path.abspath(dst))
|
|
122
|
+
try:
|
|
123
|
+
# Check if same device (ST_DEV)
|
|
124
|
+
if _same_filesystem(src, dst):
|
|
125
|
+
if not cancelled.is_set():
|
|
126
|
+
os.replace(src, dst)
|
|
127
|
+
else:
|
|
128
|
+
# Cross-FS: Create sibling tmp in destination's parent
|
|
129
|
+
fd, sibling_tmp = tempfile.mkstemp(dir=dst_dir, suffix=".atomic_tmp")
|
|
130
|
+
os.close(fd)
|
|
131
|
+
try:
|
|
132
|
+
shutil.copy2(src, sibling_tmp)
|
|
133
|
+
if not cancelled.is_set():
|
|
134
|
+
os.replace(sibling_tmp, dst)
|
|
135
|
+
else:
|
|
136
|
+
os.unlink(sibling_tmp)
|
|
137
|
+
except Exception:
|
|
138
|
+
try:
|
|
139
|
+
os.unlink(sibling_tmp)
|
|
140
|
+
except OSError:
|
|
141
|
+
pass
|
|
142
|
+
raise
|
|
143
|
+
finally:
|
|
144
|
+
try:
|
|
145
|
+
os.unlink(src)
|
|
146
|
+
except OSError:
|
|
147
|
+
pass
|
|
148
|
+
|
|
149
|
+
def shutdown(self, wait: bool = True):
|
|
150
|
+
"""Cleanly shut down the executor."""
|
|
151
|
+
self._shutdown_event.set()
|
|
152
|
+
self._executor.shutdown(wait=wait)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def install_slurm_handler(manager: SlurmAtomicManager):
|
|
156
|
+
"""
|
|
157
|
+
Installs a handler for SIGTERM (Slurm preemption) to ensure
|
|
158
|
+
the manager flushes pending IO before the process exits.
|
|
159
|
+
"""
|
|
160
|
+
|
|
161
|
+
def handle_sigterm(signum, frame):
|
|
162
|
+
logger.info("Received SIGTERM/Preemption signal. Flushing IO...")
|
|
163
|
+
manager.shutdown(wait=True)
|
|
164
|
+
# Optional: Re-raise or exit
|
|
165
|
+
# os._exit(0)
|
|
166
|
+
|
|
167
|
+
signal.signal(signal.SIGTERM, handle_sigterm)
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def _same_filesystem(path_a: str, path_b: str) -> bool:
|
|
171
|
+
stat_b_target = path_b if os.path.exists(path_b) else os.path.dirname(os.path.abspath(path_b))
|
|
172
|
+
return os.stat(path_a).st_dev == os.stat(stat_b_target).st_dev
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: torch_atomic_save
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Atomic, asynchronous checkpoint saving for Pytorch for Slurm/Lustre environments
|
|
5
|
+
Author-email: Patrick Carnahan <pcarnah@uwo.ca>
|
|
6
|
+
License: MIT
|
|
7
|
+
Requires-Python: >=3.9
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Requires-Dist: torch>=2.0.0
|
|
11
|
+
Requires-Dist: numpy
|
|
12
|
+
Provides-Extra: dev
|
|
13
|
+
Requires-Dist: pytest; extra == "dev"
|
|
14
|
+
Requires-Dist: pytest-cov; extra == "dev"
|
|
15
|
+
Dynamic: license-file
|
|
16
|
+
|
|
17
|
+
# torch-atomic-save
|
|
18
|
+
|
|
19
|
+
[](https://github.com/pcarnah/torch-atomic-save/actions)
|
|
20
|
+
[](https://opensource.org/licenses/MIT)
|
|
21
|
+
|
|
22
|
+
An asynchronous, atomic checkpointing utility for PyTorch, optimized for Slurm and Lustre/NFS environments.
|
|
23
|
+
|
|
24
|
+
## Key Features
|
|
25
|
+
* **Atomic Moves:** Prevents corrupted checkpoints during Slurm preemption.
|
|
26
|
+
* **Non-Blocking:** Offloads I/O to a background thread pool.
|
|
27
|
+
* **Race Condition Protection:** Automatically clones tensors to CPU before background saving.
|
|
28
|
+
* **Cross-FS Support:** Handles moves between local SSD and network storage safely.
|
|
29
|
+
|
|
30
|
+
## Installation
|
|
31
|
+
```bash
|
|
32
|
+
pip install torch-atomic-save
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
## Usage
|
|
37
|
+
```python
|
|
38
|
+
from torch_atomic_save import SlurmAtomicManager
|
|
39
|
+
|
|
40
|
+
# Initialize the manager
|
|
41
|
+
manager = SlurmAtomicManager(max_workers=4)
|
|
42
|
+
|
|
43
|
+
# In your training loop:
|
|
44
|
+
if epoch % save_interval == 0:
|
|
45
|
+
manager.save(model, "path/to/checkpoints/model.pt", tmp_dir="/scratch/user/tmp")
|
|
46
|
+
|
|
47
|
+
# Ensure all I/O is finished before exiting
|
|
48
|
+
manager.wait_for_all()
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Attribution
|
|
52
|
+
If you use this software in your research, please cite it using the "Cite this repository" button or the provided CITATION.cff.
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
README.md
|
|
3
|
+
pyproject.toml
|
|
4
|
+
src/torch_atomic_save/__init__.py
|
|
5
|
+
src/torch_atomic_save/manager.py
|
|
6
|
+
src/torch_atomic_save.egg-info/PKG-INFO
|
|
7
|
+
src/torch_atomic_save.egg-info/SOURCES.txt
|
|
8
|
+
src/torch_atomic_save.egg-info/dependency_links.txt
|
|
9
|
+
src/torch_atomic_save.egg-info/requires.txt
|
|
10
|
+
src/torch_atomic_save.egg-info/top_level.txt
|
|
11
|
+
tests/test_manager.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
torch_atomic_save
|
|
@@ -0,0 +1,548 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Comprehensive tests for SlurmAtomicManager.
|
|
3
|
+
Run with: pytest test_manager.py -v
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import logging
|
|
7
|
+
import os
|
|
8
|
+
import shutil
|
|
9
|
+
import tempfile
|
|
10
|
+
import threading
|
|
11
|
+
import time
|
|
12
|
+
import signal
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from unittest.mock import MagicMock, patch
|
|
15
|
+
|
|
16
|
+
import pytest
|
|
17
|
+
import torch
|
|
18
|
+
|
|
19
|
+
# Adjust import to your actual package path
|
|
20
|
+
from torch_atomic_save.manager import SlurmAtomicManager, install_slurm_handler
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# ---------------------------------------------------------------------------
|
|
24
|
+
# Helpers & Fixtures
|
|
25
|
+
# ---------------------------------------------------------------------------
|
|
26
|
+
|
|
27
|
+
def _make_model():
|
|
28
|
+
model = torch.nn.Linear(10, 1)
|
|
29
|
+
return model
|
|
30
|
+
|
|
31
|
+
@pytest.fixture()
|
|
32
|
+
def manager():
|
|
33
|
+
"""Provides a fresh manager for every test, ensuring a clean thread pool."""
|
|
34
|
+
mgr = SlurmAtomicManager(max_workers=4)
|
|
35
|
+
yield mgr
|
|
36
|
+
mgr.shutdown(wait=True)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@pytest.fixture()
|
|
40
|
+
def tmp(tmp_path):
|
|
41
|
+
src_dir = tmp_path / "staging"
|
|
42
|
+
dst_dir = tmp_path / "checkpoints"
|
|
43
|
+
src_dir.mkdir()
|
|
44
|
+
dst_dir.mkdir()
|
|
45
|
+
return src_dir, dst_dir
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# ===========================================================================
|
|
49
|
+
# 1. Basic Correctness & Serialization
|
|
50
|
+
# ===========================================================================
|
|
51
|
+
|
|
52
|
+
class TestBasicSave:
|
|
53
|
+
def test_model_round_trip(self, manager, tmp):
|
|
54
|
+
src_dir, dst_dir = tmp
|
|
55
|
+
model = _make_model()
|
|
56
|
+
dst = dst_dir / "ckpt.pt"
|
|
57
|
+
|
|
58
|
+
manager.save(model, str(dst), str(src_dir))
|
|
59
|
+
|
|
60
|
+
path_key = os.path.abspath(str(dst))
|
|
61
|
+
manager._registry[path_key][0].result(timeout=10)
|
|
62
|
+
|
|
63
|
+
assert dst.exists()
|
|
64
|
+
loaded = torch.load(str(dst))
|
|
65
|
+
# Check that weights match
|
|
66
|
+
for k, v in model.state_dict().items():
|
|
67
|
+
assert torch.equal(v.cpu(), loaded[k])
|
|
68
|
+
|
|
69
|
+
def test_half_precision_save(self, manager, tmp):
|
|
70
|
+
src_dir, dst_dir = tmp
|
|
71
|
+
model = _make_model()
|
|
72
|
+
dst = dst_dir / "ckpt_half.pt"
|
|
73
|
+
|
|
74
|
+
manager.save(model, str(dst), str(src_dir), half_prec=True)
|
|
75
|
+
|
|
76
|
+
path_key = os.path.abspath(str(dst))
|
|
77
|
+
manager._registry[path_key][0].result(timeout=10)
|
|
78
|
+
|
|
79
|
+
loaded = torch.load(str(dst))
|
|
80
|
+
assert loaded['weight'].dtype == torch.float16
|
|
81
|
+
|
|
82
|
+
def test_cpu_cloning_race_protection(self, manager, tmp):
|
|
83
|
+
"""Verify that modifying the model immediately after save() doesn't corrupt the checkpoint."""
|
|
84
|
+
src_dir, dst_dir = tmp
|
|
85
|
+
model = _make_model()
|
|
86
|
+
dst = dst_dir / "race_test.pt"
|
|
87
|
+
|
|
88
|
+
# Initialize with known value
|
|
89
|
+
with torch.no_grad():
|
|
90
|
+
model.weight.fill_(1.0)
|
|
91
|
+
|
|
92
|
+
# Trigger save
|
|
93
|
+
manager.save(model, str(dst), str(src_dir))
|
|
94
|
+
|
|
95
|
+
# IMMEDIATELY change weights in main thread
|
|
96
|
+
with torch.no_grad():
|
|
97
|
+
model.weight.fill_(99.0)
|
|
98
|
+
|
|
99
|
+
path_key = os.path.abspath(str(dst))
|
|
100
|
+
manager._registry[path_key][0].result(timeout=10)
|
|
101
|
+
|
|
102
|
+
loaded = torch.load(str(dst))
|
|
103
|
+
# Loaded weights should be 1.0, NOT 99.0
|
|
104
|
+
assert torch.all(loaded['weight'] == 1.0)
|
|
105
|
+
|
|
106
|
+
def test_synchronous_fallback(self, manager, tmp_path):
|
|
107
|
+
"""Synchronous fallback when tmp_dir is None."""
|
|
108
|
+
model = torch.nn.Linear(1, 1)
|
|
109
|
+
dst = tmp_path / "sync_dir" / "model.pt"
|
|
110
|
+
|
|
111
|
+
# Passing tmp_dir=None triggers the synchronous torch.save path
|
|
112
|
+
manager.save(model, str(dst), tmp_dir=None)
|
|
113
|
+
|
|
114
|
+
assert dst.exists()
|
|
115
|
+
assert torch.load(str(dst))['weight'].shape == model.state_dict()['weight'].shape
|
|
116
|
+
|
|
117
|
+
# ===========================================================================
|
|
118
|
+
# 2. Filesystem & Atomicity
|
|
119
|
+
# ===========================================================================
|
|
120
|
+
|
|
121
|
+
class TestFilesystemLogic:
|
|
122
|
+
def test_same_fs_uses_replace_not_copy(self, manager, tmp):
|
|
123
|
+
"""On same filesystem os.replace is called; shutil.copy2 must NOT be."""
|
|
124
|
+
src_dir, dst_dir = tmp
|
|
125
|
+
dst = dst_dir / "ckpt.pt"
|
|
126
|
+
|
|
127
|
+
with patch("shutil.copy2") as mock_copy, \
|
|
128
|
+
patch("os.replace", wraps=os.replace) as mock_replace:
|
|
129
|
+
|
|
130
|
+
assert os.stat(src_dir).st_dev == os.stat(dst_dir).st_dev, \
|
|
131
|
+
"Both dirs should be on the same FS for this test"
|
|
132
|
+
|
|
133
|
+
manager.save(_make_model(), str(dst), str(src_dir))
|
|
134
|
+
manager._registry[os.path.abspath(str(dst))][0].result(timeout=10)
|
|
135
|
+
|
|
136
|
+
mock_copy.assert_not_called()
|
|
137
|
+
mock_replace.assert_called_once()
|
|
138
|
+
|
|
139
|
+
def test_cross_fs_uses_sibling_copy(self, manager, tmp):
|
|
140
|
+
"""When _same_filesystem returns False the sibling-copy path is taken."""
|
|
141
|
+
src_dir, dst_dir = tmp
|
|
142
|
+
dst = dst_dir / "ckpt.pt"
|
|
143
|
+
|
|
144
|
+
with patch("torch_atomic_save.manager._same_filesystem", return_value=False), \
|
|
145
|
+
patch("shutil.copy2", wraps=shutil.copy2) as mock_copy, \
|
|
146
|
+
patch("os.replace", wraps=os.replace) as mock_replace:
|
|
147
|
+
|
|
148
|
+
manager.save(_make_model(), str(dst), str(src_dir))
|
|
149
|
+
manager._registry[os.path.abspath(str(dst))][0].result(timeout=10)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
mock_copy.assert_called_once()
|
|
153
|
+
mock_replace.assert_called_once()
|
|
154
|
+
|
|
155
|
+
def test_cross_fs_sibling_tmp_cleaned_on_success(self, manager, tmp):
|
|
156
|
+
"""The .atomic_tmp sibling file must not linger after a successful copy."""
|
|
157
|
+
src_dir, dst_dir = tmp
|
|
158
|
+
dst = dst_dir / "ckpt.pt"
|
|
159
|
+
|
|
160
|
+
with patch("torch_atomic_save.manager._same_filesystem", return_value=False):
|
|
161
|
+
manager.save(_make_model(), str(dst), str(src_dir))
|
|
162
|
+
manager._registry[os.path.abspath(str(dst))][0].result(timeout=10)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
leftovers = list(dst_dir.glob("*.atomic_tmp"))
|
|
166
|
+
assert leftovers == [], f"Leftover sibling tmp files: {leftovers}"
|
|
167
|
+
|
|
168
|
+
def test_staging_file_cleaned_on_success(self, manager, tmp):
|
|
169
|
+
"""The .pt.staging file in tmp_dir must be removed after copy."""
|
|
170
|
+
src_dir, dst_dir = tmp
|
|
171
|
+
dst = dst_dir / "ckpt.pt"
|
|
172
|
+
manager.save(_make_model(), str(dst), str(src_dir))
|
|
173
|
+
manager._registry[os.path.abspath(str(dst))][0].result(timeout=10)
|
|
174
|
+
|
|
175
|
+
leftovers = list(src_dir.glob("*.pt.staging"))
|
|
176
|
+
assert leftovers == [], f"Leftover staging files: {leftovers}"
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
# ===========================================================================
|
|
180
|
+
# 3. Directory handling
|
|
181
|
+
# ===========================================================================
|
|
182
|
+
|
|
183
|
+
class TestDirectoryHandling:
|
|
184
|
+
|
|
185
|
+
def test_missing_tmp_dir_raises(self, manager, tmp_path):
|
|
186
|
+
"""Verify that a non-existent staging directory raises FileNotFoundError immediately."""
|
|
187
|
+
model = _make_model()
|
|
188
|
+
dst = tmp_path / "ckpt.pt"
|
|
189
|
+
staging = tmp_path / "nonexistent_staging"
|
|
190
|
+
|
|
191
|
+
with pytest.raises(FileNotFoundError, match="Staging directory missing"):
|
|
192
|
+
manager.save(model, str(dst), str(staging))
|
|
193
|
+
|
|
194
|
+
def test_missing_dst_dir_is_created(self, manager, tmp_path):
|
|
195
|
+
"""Verify that deep nested destination directories are created automatically."""
|
|
196
|
+
src_dir = tmp_path / "staging"
|
|
197
|
+
src_dir.mkdir()
|
|
198
|
+
# Nested path that doesn't exist yet
|
|
199
|
+
dst = tmp_path / "deep" / "nested" / "ckpt.pt"
|
|
200
|
+
|
|
201
|
+
manager.save(_make_model(), str(dst), str(src_dir))
|
|
202
|
+
|
|
203
|
+
# Wait for the background task to finish
|
|
204
|
+
path_key = os.path.abspath(str(dst))
|
|
205
|
+
manager._registry[path_key][0].result(timeout=10)
|
|
206
|
+
|
|
207
|
+
assert dst.exists()
|
|
208
|
+
|
|
209
|
+
def test_existing_dst_dir_is_fine(self, manager, tmp):
|
|
210
|
+
"""Verify that saving to a directory that already exists does not cause errors."""
|
|
211
|
+
src_dir, dst_dir = tmp
|
|
212
|
+
dst = dst_dir / "ckpt.pt"
|
|
213
|
+
|
|
214
|
+
# Should not raise even if dst_dir already exists from the fixture
|
|
215
|
+
manager.save(_make_model(), str(dst), str(src_dir))
|
|
216
|
+
|
|
217
|
+
path_key = os.path.abspath(str(dst))
|
|
218
|
+
manager._registry[path_key][0].result(timeout=10)
|
|
219
|
+
assert dst.exists()
|
|
220
|
+
|
|
221
|
+
# ===========================================================================
|
|
222
|
+
# 4. Cancellation & Registry
|
|
223
|
+
# ===========================================================================
|
|
224
|
+
|
|
225
|
+
class TestCancellation:
|
|
226
|
+
|
|
227
|
+
def test_cancelled_copy_does_not_call_replace(self, manager, tmp):
|
|
228
|
+
"""A copy whose Event is pre-set must skip os.replace entirely."""
|
|
229
|
+
src_dir, dst_dir = tmp
|
|
230
|
+
dst = dst_dir / "ckpt.pt"
|
|
231
|
+
cancelled = threading.Event()
|
|
232
|
+
cancelled.set()
|
|
233
|
+
|
|
234
|
+
# Create a dummy staging file manually for the low-level call
|
|
235
|
+
fd, tmp_src = tempfile.mkstemp(dir=str(src_dir), suffix=".pt.staging")
|
|
236
|
+
os.close(fd)
|
|
237
|
+
torch.save(_make_model().state_dict(), tmp_src)
|
|
238
|
+
|
|
239
|
+
with patch("os.replace") as mock_replace:
|
|
240
|
+
manager._atomic_copy_and_cleanup(tmp_src, str(dst), cancelled)
|
|
241
|
+
mock_replace.assert_not_called()
|
|
242
|
+
|
|
243
|
+
assert not dst.exists()
|
|
244
|
+
|
|
245
|
+
def test_cancelled_sibling_tmp_is_cleaned_up(self, manager, tmp):
|
|
246
|
+
"""Even when cancelled after copy2, the sibling_tmp must be removed."""
|
|
247
|
+
src_dir, dst_dir = tmp
|
|
248
|
+
dst = dst_dir / "ckpt.pt"
|
|
249
|
+
cancelled = threading.Event()
|
|
250
|
+
cancelled.set()
|
|
251
|
+
|
|
252
|
+
fd, tmp_src = tempfile.mkstemp(dir=str(src_dir), suffix=".pt.staging")
|
|
253
|
+
os.close(fd)
|
|
254
|
+
torch.save(_make_model().state_dict(), tmp_src)
|
|
255
|
+
|
|
256
|
+
with patch("torch_atomic_save.manager._same_filesystem", return_value=False):
|
|
257
|
+
manager._atomic_copy_and_cleanup(tmp_src, str(dst), cancelled)
|
|
258
|
+
|
|
259
|
+
leftovers = list(dst_dir.glob("*.atomic_tmp"))
|
|
260
|
+
assert leftovers == [], "Sibling tmp not cleaned after cancellation"
|
|
261
|
+
|
|
262
|
+
def test_registry_cleanup_on_done(self, manager, tmp):
|
|
263
|
+
src_dir, dst_dir = tmp
|
|
264
|
+
dst = str(dst_dir / "clean_registry.pt")
|
|
265
|
+
|
|
266
|
+
manager.save(_make_model(), dst, str(src_dir))
|
|
267
|
+
path_key = os.path.abspath(dst)
|
|
268
|
+
|
|
269
|
+
future, _ = manager._registry[path_key]
|
|
270
|
+
future.result(timeout=5)
|
|
271
|
+
|
|
272
|
+
# Registry should be empty after callback runs
|
|
273
|
+
assert path_key not in manager._registry
|
|
274
|
+
|
|
275
|
+
def test_stalled_copy_respects_cancellation(self, manager, tmp):
|
|
276
|
+
src_dir, dst_dir = tmp
|
|
277
|
+
dst = dst_dir / "stalled.pt"
|
|
278
|
+
path_key = os.path.abspath(str(dst))
|
|
279
|
+
barrier = threading.Barrier(2)
|
|
280
|
+
|
|
281
|
+
original_copy2 = shutil.copy2
|
|
282
|
+
stalled_once = False
|
|
283
|
+
lock = threading.Lock()
|
|
284
|
+
def stalling_copy2(src, dst_sibling):
|
|
285
|
+
nonlocal stalled_once
|
|
286
|
+
original_copy2(src, dst_sibling)
|
|
287
|
+
|
|
288
|
+
should_stall = False
|
|
289
|
+
with lock:
|
|
290
|
+
if not stalled_once:
|
|
291
|
+
stalled_once = True
|
|
292
|
+
should_stall = True
|
|
293
|
+
|
|
294
|
+
if should_stall:
|
|
295
|
+
# Worker 1 waits here
|
|
296
|
+
barrier.wait(timeout=5)
|
|
297
|
+
# Worker 2 (and any others) bypass the barrier and finish immediately
|
|
298
|
+
|
|
299
|
+
with patch("torch_atomic_save.manager._same_filesystem", return_value=False), \
|
|
300
|
+
patch("shutil.copy2", side_effect=stalling_copy2), \
|
|
301
|
+
patch("os.replace", wraps=os.replace) as mock_replace:
|
|
302
|
+
# 1. Submit first save (will stall at barrier)
|
|
303
|
+
manager.save(_make_model(), str(dst), str(src_dir))
|
|
304
|
+
_, first_event = manager._registry[path_key]
|
|
305
|
+
|
|
306
|
+
# 2. Submit second save (sets first_event to cancelled)
|
|
307
|
+
manager.save(_make_model(), str(dst), str(src_dir))
|
|
308
|
+
assert first_event.is_set()
|
|
309
|
+
|
|
310
|
+
# 3. Release worker 1
|
|
311
|
+
barrier.wait(timeout=5)
|
|
312
|
+
|
|
313
|
+
# Wait for manager to finish
|
|
314
|
+
manager._registry[path_key][0].result(timeout=10)
|
|
315
|
+
|
|
316
|
+
# mock_replace should only be called for the second save
|
|
317
|
+
# because the first one should have seen the cancellation event.
|
|
318
|
+
assert mock_replace.call_count == 1
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
# ===========================================================================
|
|
322
|
+
# 5. Error handling and logging
|
|
323
|
+
# ===========================================================================
|
|
324
|
+
|
|
325
|
+
class TestErrorHandling:
|
|
326
|
+
|
|
327
|
+
def test_copy_failure_does_not_raise_in_main_thread(self, manager, tmp, caplog):
|
|
328
|
+
src_dir, dst_dir = tmp
|
|
329
|
+
dst = dst_dir / "ckpt.pt"
|
|
330
|
+
model = _make_model()
|
|
331
|
+
|
|
332
|
+
# Patch the manager's package path
|
|
333
|
+
with patch("torch_atomic_save.manager.shutil.copy2",
|
|
334
|
+
side_effect=OSError("Lustre exploded")), \
|
|
335
|
+
patch("torch_atomic_save.manager._same_filesystem", return_value=False), \
|
|
336
|
+
caplog.at_level(logging.ERROR):
|
|
337
|
+
|
|
338
|
+
manager.save(model, str(dst), str(src_dir))
|
|
339
|
+
path_key = os.path.abspath(str(dst))
|
|
340
|
+
|
|
341
|
+
# Fetching result surfaces exception in the future, but shouldn't crash loop
|
|
342
|
+
try:
|
|
343
|
+
manager._registry[path_key][0].result(timeout=10)
|
|
344
|
+
except Exception:
|
|
345
|
+
pass
|
|
346
|
+
|
|
347
|
+
# Verify background callback logged the error
|
|
348
|
+
start = time.time()
|
|
349
|
+
logged = False
|
|
350
|
+
while time.time() - start < 5:
|
|
351
|
+
if any("Lustre exploded" in r.message for r in caplog.records):
|
|
352
|
+
logged = True
|
|
353
|
+
break
|
|
354
|
+
time.sleep(0.1)
|
|
355
|
+
|
|
356
|
+
assert logged, "Error should be logged"
|
|
357
|
+
|
|
358
|
+
def test_replace_failure_cleans_sibling_tmp(self, manager, tmp):
|
|
359
|
+
"""If os.replace fails the sibling_tmp should not be left behind."""
|
|
360
|
+
src_dir, dst_dir = tmp
|
|
361
|
+
dst = dst_dir / "ckpt.pt"
|
|
362
|
+
|
|
363
|
+
cancelled = threading.Event()
|
|
364
|
+
fd, tmp_src = tempfile.mkstemp(dir=str(src_dir), suffix=".pt.staging")
|
|
365
|
+
os.close(fd)
|
|
366
|
+
torch.save(_make_model().state_dict(), tmp_src)
|
|
367
|
+
|
|
368
|
+
with patch("torch_atomic_save.manager._same_filesystem", return_value=False), \
|
|
369
|
+
patch("os.replace", side_effect=OSError("replace failed")):
|
|
370
|
+
with pytest.raises(OSError):
|
|
371
|
+
manager._atomic_copy_and_cleanup(tmp_src, str(dst), cancelled)
|
|
372
|
+
|
|
373
|
+
leftovers = list(dst_dir.glob("*.atomic_tmp"))
|
|
374
|
+
assert leftovers == [], "sibling_tmp must be cleaned up even on replace failure"
|
|
375
|
+
|
|
376
|
+
def test_staging_file_cleaned_even_on_copy_failure(self, manager, tmp):
|
|
377
|
+
src_dir, dst_dir = tmp
|
|
378
|
+
dst = dst_dir / "ckpt.pt"
|
|
379
|
+
|
|
380
|
+
cancelled = threading.Event()
|
|
381
|
+
fd, tmp_src = tempfile.mkstemp(dir=str(src_dir), suffix=".pt.staging")
|
|
382
|
+
os.close(fd)
|
|
383
|
+
torch.save(_make_model().state_dict(), tmp_src)
|
|
384
|
+
|
|
385
|
+
with patch("torch_atomic_save.manager._same_filesystem", return_value=False), \
|
|
386
|
+
patch("torch_atomic_save.manager.shutil.copy2", side_effect=OSError("disk full")):
|
|
387
|
+
with pytest.raises(OSError):
|
|
388
|
+
manager._atomic_copy_and_cleanup(tmp_src, str(dst), cancelled)
|
|
389
|
+
|
|
390
|
+
assert not os.path.exists(tmp_src), "Staging file must be cleaned up after failure"
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
# ===========================================================================
|
|
394
|
+
# 6. Multiple independent paths
|
|
395
|
+
# ===========================================================================
|
|
396
|
+
|
|
397
|
+
class TestMultiplePaths:
|
|
398
|
+
|
|
399
|
+
def test_independent_paths_tracked_separately(self, manager, tmp):
|
|
400
|
+
src_dir, dst_dir = tmp
|
|
401
|
+
dst_a = dst_dir / "model_a.pt"
|
|
402
|
+
dst_b = dst_dir / "model_b.pt"
|
|
403
|
+
|
|
404
|
+
# Use a barrier to force the background threads to wait
|
|
405
|
+
# 3 parties: Worker A, Worker B, and this Test Thread
|
|
406
|
+
barrier = threading.Barrier(3)
|
|
407
|
+
|
|
408
|
+
def blocked_save(*args, **kwargs):
|
|
409
|
+
barrier.wait(timeout=5)
|
|
410
|
+
# Use the real torch.save logic after the barrier
|
|
411
|
+
torch.save(*args, **kwargs)
|
|
412
|
+
|
|
413
|
+
# Patch torch.save to block the workers in the registry
|
|
414
|
+
with patch("torch_atomic_save.manager.torch.save", side_effect=blocked_save):
|
|
415
|
+
manager.save(_make_model(), str(dst_a), str(src_dir))
|
|
416
|
+
manager.save(_make_model(), str(dst_b), str(src_dir))
|
|
417
|
+
|
|
418
|
+
key_a = os.path.abspath(str(dst_a))
|
|
419
|
+
key_b = os.path.abspath(str(dst_b))
|
|
420
|
+
|
|
421
|
+
# 1. Verify they are both in the registry while "blocked"
|
|
422
|
+
assert key_a in manager._registry
|
|
423
|
+
assert key_b in manager._registry
|
|
424
|
+
assert manager._registry[key_a] is not manager._registry[key_b]
|
|
425
|
+
|
|
426
|
+
# 2. Release the workers
|
|
427
|
+
barrier.wait(timeout=5)
|
|
428
|
+
|
|
429
|
+
# 3. Now wait for completion normally
|
|
430
|
+
manager.wait_for_all(timeout=10)
|
|
431
|
+
|
|
432
|
+
assert dst_a.exists()
|
|
433
|
+
assert dst_b.exists()
|
|
434
|
+
|
|
435
|
+
def test_cancel_does_not_affect_other_paths(self, manager, tmp):
|
|
436
|
+
src_dir, dst_dir = tmp
|
|
437
|
+
dst_a = dst_dir / "model_a.pt"
|
|
438
|
+
dst_b = dst_dir / "model_b.pt"
|
|
439
|
+
|
|
440
|
+
# Save to B and wait
|
|
441
|
+
manager.save(_make_model(), str(dst_b), str(src_dir))
|
|
442
|
+
manager._registry[os.path.abspath(str(dst_b))][0].result(timeout=10)
|
|
443
|
+
|
|
444
|
+
# Trigger a replacement save on A
|
|
445
|
+
manager.save(_make_model(), str(dst_a), str(src_dir))
|
|
446
|
+
manager.save(_make_model(), str(dst_a), str(src_dir))
|
|
447
|
+
|
|
448
|
+
# Ensure B is unaffected and still exists
|
|
449
|
+
assert dst_b.exists()
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
# ===========================================================================
|
|
453
|
+
# 7. Thread pool
|
|
454
|
+
# ===========================================================================
|
|
455
|
+
|
|
456
|
+
class TestThreadPool:
|
|
457
|
+
|
|
458
|
+
def test_executor_is_reused_across_calls(self, manager, tmp):
|
|
459
|
+
src_dir, dst_dir = tmp
|
|
460
|
+
# Since the executor is now instance-bound, we verify it's the same
|
|
461
|
+
# object across multiple .save calls on the same manager instance
|
|
462
|
+
e1 = manager._executor
|
|
463
|
+
manager.save(_make_model(), str(dst_dir / "a.pt"), str(src_dir))
|
|
464
|
+
e2 = manager._executor
|
|
465
|
+
assert e1 is e2, "Executor should be reused by the manager instance"
|
|
466
|
+
|
|
467
|
+
def test_many_sequential_saves_all_land(self, manager, tmp):
|
|
468
|
+
src_dir, dst_dir = tmp
|
|
469
|
+
dst = dst_dir / "ckpt.pt"
|
|
470
|
+
n = 10
|
|
471
|
+
|
|
472
|
+
# Use a model with a single weight we can track
|
|
473
|
+
model = _make_model()
|
|
474
|
+
|
|
475
|
+
for i in range(1, n + 1):
|
|
476
|
+
with torch.no_grad():
|
|
477
|
+
model.weight.fill_(float(i))
|
|
478
|
+
manager.save(model, str(dst), str(src_dir))
|
|
479
|
+
|
|
480
|
+
# Wait for whichever future is current in the registry
|
|
481
|
+
path_key = os.path.abspath(str(dst))
|
|
482
|
+
manager._registry[path_key][0].result(timeout=30)
|
|
483
|
+
|
|
484
|
+
loaded = torch.load(str(dst))
|
|
485
|
+
# Check that the weight matches the final iteration
|
|
486
|
+
assert torch.all(loaded['weight'] == float(n))
|
|
487
|
+
|
|
488
|
+
def test_context_manager_usage(self, tmp):
|
|
489
|
+
"""__enter__ and __exit__ logic."""
|
|
490
|
+
from torch_atomic_save.manager import SlurmAtomicManager
|
|
491
|
+
|
|
492
|
+
with SlurmAtomicManager(max_workers=1) as manager:
|
|
493
|
+
assert manager._executor is not None
|
|
494
|
+
|
|
495
|
+
# Verify executor is shut down after context exit
|
|
496
|
+
assert manager._executor._shutdown is True
|
|
497
|
+
|
|
498
|
+
def test_shutdown_ignores_new_saves(self, manager, tmp, caplog):
|
|
499
|
+
"""Manager is shutting down warning."""
|
|
500
|
+
src_dir, dst_dir = tmp
|
|
501
|
+
manager.shutdown(wait=True)
|
|
502
|
+
|
|
503
|
+
with caplog.at_level(logging.WARNING):
|
|
504
|
+
manager.save(torch.nn.Linear(1, 1), str(dst_dir / "late.pt"), str(src_dir))
|
|
505
|
+
|
|
506
|
+
assert "Manager is shutting down" in caplog.text
|
|
507
|
+
|
|
508
|
+
def test_cleanup_registry_safety(self, manager, tmp):
|
|
509
|
+
"""Ensure cleanup handles missing paths gracefully."""
|
|
510
|
+
# Manually call private cleanup on a non-existent path to hit the 'if current' check
|
|
511
|
+
future = MagicMock()
|
|
512
|
+
manager._cleanup_registry("non_existent_path", future)
|
|
513
|
+
# Should not raise any errors
|
|
514
|
+
|
|
515
|
+
# ===========================================================================
|
|
516
|
+
# 8. Slurm Signals
|
|
517
|
+
# ===========================================================================
|
|
518
|
+
|
|
519
|
+
class TestSignals:
|
|
520
|
+
def test_sigterm_handler_logic(self, manager):
|
|
521
|
+
"""Verifies handler logic without killing the process (and the coverage report)."""
|
|
522
|
+
install_slurm_handler(manager)
|
|
523
|
+
|
|
524
|
+
# Retrieve the function actually registered with the OS
|
|
525
|
+
handler_func = signal.getsignal(signal.SIGTERM)
|
|
526
|
+
|
|
527
|
+
with patch.object(manager, 'shutdown') as mock_shutdown:
|
|
528
|
+
# Manually trigger the function. This is 'Logic Coverage'.
|
|
529
|
+
handler_func(signal.SIGTERM, None)
|
|
530
|
+
|
|
531
|
+
# This confirms your code DOES the right thing when the signal hits
|
|
532
|
+
mock_shutdown.assert_called_once_with(wait=True)
|
|
533
|
+
|
|
534
|
+
def test_sigterm_handler_manual_trigger(self, manager, caplog):
|
|
535
|
+
"""Targets lines 140-141: SIGTERM handler logging."""
|
|
536
|
+
from torch_atomic_save.manager import install_slurm_handler
|
|
537
|
+
import signal
|
|
538
|
+
|
|
539
|
+
install_slurm_handler(manager)
|
|
540
|
+
handler = signal.getsignal(signal.SIGTERM)
|
|
541
|
+
|
|
542
|
+
with caplog.at_level(logging.INFO), patch.object(manager, 'shutdown') as mock_shat:
|
|
543
|
+
# Simulate signal arrival
|
|
544
|
+
handler(signal.SIGTERM, None)
|
|
545
|
+
|
|
546
|
+
assert "Received SIGTERM" in caplog.text
|
|
547
|
+
mock_shat.assert_called_once_with(wait=True)
|
|
548
|
+
|