gpu-memory-guard 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Dmytro Romanov
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,168 @@
1
+ Metadata-Version: 2.4
2
+ Name: gpu-memory-guard
3
+ Version: 0.1.0
4
+ Summary: CLI tool to check GPU VRAM before loading AI models
5
+ Author-email: Dmytro Romanov <casteldazur@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/CastelDazur/gpu-memory-guard
8
+ Project-URL: Repository, https://github.com/CastelDazur/gpu-memory-guard
9
+ Project-URL: Issues, https://github.com/CastelDazur/gpu-memory-guard/issues
10
+ Keywords: gpu,vram,memory,ai,llm,cuda,nvidia,oom
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Topic :: System :: Hardware
21
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
22
+ Requires-Python: >=3.8
23
+ Description-Content-Type: text/markdown
24
+ License-File: LICENSE
25
+ Provides-Extra: pynvml
26
+ Requires-Dist: pynvml>=11.0.0; extra == "pynvml"
27
+ Dynamic: license-file
28
+
29
+ # GPU Memory Guard
30
+
31
+ [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
32
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)
33
+ [![GitHub stars](https://img.shields.io/github/stars/CastelDazur/gpu-memory-guard?style=social)](https://github.com/CastelDazur/gpu-memory-guard/stargazers)
34
+
35
+ A CLI utility that checks available GPU VRAM before you load AI models. Prevents OOM crashes that force a full system reboot.
36
+
37
+ ## Why?
38
+
39
+ If you run local inference on consumer GPUs, you know the pain:
40
+
41
+ | Without gpu-memory-guard | With gpu-memory-guard |
42
+ |---|---|
43
+ | Load 70B model on 24GB card | Check VRAM **before** loading |
44
+ | System freezes, GPU hangs | Get a clear warning in terminal |
45
+ | Force reboot, lose unsaved work | Pick a smaller model or free memory |
46
+ | Repeat next week | Zero OOM crashes |
47
+
48
+ One command saves you from constant reboots.
49
+
50
+ ## Quick Start
51
+
52
+ ```bash
53
+ git clone https://github.com/CastelDazur/gpu-memory-guard.git
54
+ cd gpu-memory-guard
55
+ pip install -e .
56
+ ```
57
+
58
+ ```bash
59
+ # Check current GPU status
60
+ gpu-guard
61
+
62
+ # Check if an 18GB model fits with 2GB safety buffer
63
+ gpu-guard --model-size 18 --buffer 2
64
+ ```
65
+
66
+ **Example output:**
67
+
68
+ ```
69
+ GPU 0: NVIDIA GeForce RTX 5090
70
+ Total: 32.00 GB
71
+ Used: 4.12 GB
72
+ Available: 27.88 GB
73
+
74
+ Model size: 18.00 GB (buffer: 2.00 GB)
75
+ Status: OK - model fits with 7.88 GB to spare
76
+ ```
77
+
78
+ ## Installation
79
+
80
+ ### From source (recommended)
81
+
82
+ ```bash
83
+ git clone https://github.com/CastelDazur/gpu-memory-guard.git
84
+ cd gpu-memory-guard
85
+ pip install -e .
86
+ ```
87
+
88
+ ### Requirements
89
+
90
+ - Python 3.8+
91
+ - NVIDIA GPU with `nvidia-smi` installed, OR
92
+ - `pynvml` Python package (`pip install pynvml`)
93
+
94
+ ## Usage
95
+
96
+ ### CLI
97
+
98
+ ```bash
99
+ # Basic VRAM check
100
+ gpu-guard
101
+
102
+ # Check if a model fits (size in GB)
103
+ gpu-guard --model-size 13
104
+
105
+ # Custom safety buffer (default: 1GB)
106
+ gpu-guard --model-size 18 --buffer 2
107
+
108
+ # JSON output for scripting
109
+ gpu-guard --model-size 13 --json
110
+
111
+ # Quiet mode: exit code only (0 = fits, 1 = doesn't)
112
+ gpu-guard --model-size 7 --quiet
113
+ ```
114
+
115
+ ### As a Python library
116
+
117
+ ```python
118
+ from gpu_guard import check_vram, can_load_model, get_gpu_info
119
+
120
+ # Check current VRAM
121
+ gpu_info = get_gpu_info()
122
+ for gpu in gpu_info:
123
+ print(f"GPU {gpu.device_id}: {gpu.available_memory_gb:.2f}GB available")
124
+
125
+ # Check if a model fits
126
+ result = can_load_model(model_size_gb=13.0, buffer_gb=2.0)
127
+ if result.fits:
128
+ print("Safe to load")
129
+ else:
130
+ print(f"Need {result.shortage_gb:.2f}GB more VRAM")
131
+ ```
132
+
133
+ ### Scripting example
134
+
135
+ ```bash
136
+ # Pre-check before launching inference
137
+ if gpu-guard --model-size 13 --quiet; then
138
+ python run_inference.py --model llama-13b
139
+ else
140
+ echo "Not enough VRAM, switching to 7B model"
141
+ python run_inference.py --model llama-7b
142
+ fi
143
+ ```
144
+
145
+ ## Common model sizes (approximate VRAM)
146
+
147
+ | Model | FP16 | Q4 (GGUF) |
148
+ |---|---|---|
149
+ | 7B params | ~14 GB | ~4 GB |
150
+ | 13B params | ~26 GB | ~7 GB |
151
+ | 33B params | ~66 GB | ~18 GB |
152
+ | 70B params | ~140 GB | ~35 GB |
153
+
154
+ ## Roadmap
155
+
156
+ - [ ] AMD ROCm support
157
+ - [ ] Memory estimation by model architecture
158
+ - [ ] Multi-GPU split recommendations
159
+ - [ ] PyPI package (`pip install gpu-memory-guard`)
160
+ - [ ] Integration with Ollama and vLLM
161
+
162
+ ## Contributing
163
+
164
+ PRs welcome. If you want to add AMD ROCm support or model-specific memory estimation, open an issue first so we can discuss the approach.
165
+
166
+ ## License
167
+
168
+ MIT
@@ -0,0 +1,140 @@
1
+ # GPU Memory Guard
2
+
3
+ [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
4
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)
5
+ [![GitHub stars](https://img.shields.io/github/stars/CastelDazur/gpu-memory-guard?style=social)](https://github.com/CastelDazur/gpu-memory-guard/stargazers)
6
+
7
+ A CLI utility that checks available GPU VRAM before you load AI models. Prevents OOM crashes that force a full system reboot.
8
+
9
+ ## Why?
10
+
11
+ If you run local inference on consumer GPUs, you know the pain:
12
+
13
+ | Without gpu-memory-guard | With gpu-memory-guard |
14
+ |---|---|
15
+ | Load 70B model on 24GB card | Check VRAM **before** loading |
16
+ | System freezes, GPU hangs | Get a clear warning in terminal |
17
+ | Force reboot, lose unsaved work | Pick a smaller model or free memory |
18
+ | Repeat next week | Zero OOM crashes |
19
+
20
+ One command saves you from constant reboots.
21
+
22
+ ## Quick Start
23
+
24
+ ```bash
25
+ git clone https://github.com/CastelDazur/gpu-memory-guard.git
26
+ cd gpu-memory-guard
27
+ pip install -e .
28
+ ```
29
+
30
+ ```bash
31
+ # Check current GPU status
32
+ gpu-guard
33
+
34
+ # Check if an 18GB model fits with 2GB safety buffer
35
+ gpu-guard --model-size 18 --buffer 2
36
+ ```
37
+
38
+ **Example output:**
39
+
40
+ ```
41
+ GPU 0: NVIDIA GeForce RTX 5090
42
+ Total: 32.00 GB
43
+ Used: 4.12 GB
44
+ Available: 27.88 GB
45
+
46
+ Model size: 18.00 GB (buffer: 2.00 GB)
47
+ Status: OK - model fits with 7.88 GB to spare
48
+ ```
49
+
50
+ ## Installation
51
+
52
+ ### From source (recommended)
53
+
54
+ ```bash
55
+ git clone https://github.com/CastelDazur/gpu-memory-guard.git
56
+ cd gpu-memory-guard
57
+ pip install -e .
58
+ ```
59
+
60
+ ### Requirements
61
+
62
+ - Python 3.8+
63
+ - NVIDIA GPU with `nvidia-smi` installed, OR
64
+ - `pynvml` Python package (`pip install pynvml`)
65
+
66
+ ## Usage
67
+
68
+ ### CLI
69
+
70
+ ```bash
71
+ # Basic VRAM check
72
+ gpu-guard
73
+
74
+ # Check if a model fits (size in GB)
75
+ gpu-guard --model-size 13
76
+
77
+ # Custom safety buffer (default: 1GB)
78
+ gpu-guard --model-size 18 --buffer 2
79
+
80
+ # JSON output for scripting
81
+ gpu-guard --model-size 13 --json
82
+
83
+ # Quiet mode: exit code only (0 = fits, 1 = doesn't)
84
+ gpu-guard --model-size 7 --quiet
85
+ ```
86
+
87
+ ### As a Python library
88
+
89
+ ```python
90
+ from gpu_guard import check_vram, can_load_model, get_gpu_info
91
+
92
+ # Check current VRAM
93
+ gpu_info = get_gpu_info()
94
+ for gpu in gpu_info:
95
+ print(f"GPU {gpu.device_id}: {gpu.available_memory_gb:.2f}GB available")
96
+
97
+ # Check if a model fits
98
+ result = can_load_model(model_size_gb=13.0, buffer_gb=2.0)
99
+ if result.fits:
100
+ print("Safe to load")
101
+ else:
102
+ print(f"Need {result.shortage_gb:.2f}GB more VRAM")
103
+ ```
104
+
105
+ ### Scripting example
106
+
107
+ ```bash
108
+ # Pre-check before launching inference
109
+ if gpu-guard --model-size 13 --quiet; then
110
+ python run_inference.py --model llama-13b
111
+ else
112
+ echo "Not enough VRAM, switching to 7B model"
113
+ python run_inference.py --model llama-7b
114
+ fi
115
+ ```
116
+
117
+ ## Common model sizes (approximate VRAM)
118
+
119
+ | Model | FP16 | Q4 (GGUF) |
120
+ |---|---|---|
121
+ | 7B params | ~14 GB | ~4 GB |
122
+ | 13B params | ~26 GB | ~7 GB |
123
+ | 33B params | ~66 GB | ~18 GB |
124
+ | 70B params | ~140 GB | ~35 GB |
125
+
126
+ ## Roadmap
127
+
128
+ - [ ] AMD ROCm support
129
+ - [ ] Memory estimation by model architecture
130
+ - [ ] Multi-GPU split recommendations
131
+ - [ ] PyPI package (`pip install gpu-memory-guard`)
132
+ - [ ] Integration with Ollama and vLLM
133
+
134
+ ## Contributing
135
+
136
+ PRs welcome. If you want to add AMD ROCm support or model-specific memory estimation, open an issue first so we can discuss the approach.
137
+
138
+ ## License
139
+
140
+ MIT
@@ -0,0 +1,309 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ GPU Memory Guard - CLI utility to check VRAM before loading AI models.
4
+
5
+ Prevents out-of-memory crashes by checking available GPU VRAM and
6
+ estimating whether a model will fit with a safety buffer.
7
+ """
8
+
9
+ import json
10
+ import sys
11
+ import argparse
12
+ from dataclasses import dataclass, asdict
13
+ from typing import Optional, List, Tuple
14
+
15
+
16
+ @dataclass
17
+ class GPUInfo:
18
+ """GPU memory information."""
19
+ device_id: int
20
+ name: str
21
+ total_memory_gb: float
22
+ used_memory_gb: float
23
+ available_memory_gb: float
24
+ utilization_percent: float
25
+
26
+
27
+ def get_nvidia_smi_info() -> Optional[List[GPUInfo]]:
28
+ """Query GPU info using nvidia-smi."""
29
+ import subprocess
30
+
31
+ try:
32
+ cmd = [
33
+ "nvidia-smi",
34
+ "--query-gpu=index,name,memory.total,memory.used,memory.free,"
35
+ "utilization.gpu",
36
+ "--format=csv,nounits,noheader",
37
+ ]
38
+ result = subprocess.run(
39
+ cmd,
40
+ capture_output=True,
41
+ text=True,
42
+ timeout=5,
43
+ )
44
+
45
+ if result.returncode != 0:
46
+ return None
47
+
48
+ gpus = []
49
+ for line in result.stdout.strip().split("\n"):
50
+ if not line.strip():
51
+ continue
52
+
53
+ parts = [p.strip() for p in line.split(",")]
54
+ if len(parts) < 6:
55
+ continue
56
+
57
+ try:
58
+ device_id = int(parts[0])
59
+ name = parts[1]
60
+ total_mb = float(parts[2])
61
+ used_mb = float(parts[3])
62
+ free_mb = float(parts[4])
63
+ util = float(parts[5])
64
+
65
+ gpu = GPUInfo(
66
+ device_id=device_id,
67
+ name=name,
68
+ total_memory_gb=total_mb / 1024.0,
69
+ used_memory_gb=used_mb / 1024.0,
70
+ available_memory_gb=free_mb / 1024.0,
71
+ utilization_percent=util,
72
+ )
73
+ gpus.append(gpu)
74
+ except (ValueError, IndexError):
75
+ continue
76
+
77
+ return gpus if gpus else None
78
+
79
+ except (FileNotFoundError, subprocess.TimeoutExpired, Exception):
80
+ return None
81
+
82
+
83
+ def get_pynvml_info() -> Optional[List[GPUInfo]]:
84
+ """Query GPU info using pynvml."""
85
+ try:
86
+ import pynvml
87
+
88
+ pynvml.nvmlInit()
89
+ device_count = pynvml.nvmlDeviceGetCount()
90
+
91
+ gpus = []
92
+ for i in range(device_count):
93
+ handle = pynvml.nvmlDeviceGetHandleByIndex(i)
94
+ name = pynvml.nvmlDeviceGetName(handle).decode("utf-8")
95
+ mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
96
+ util = pynvml.nvmlDeviceGetUtilizationRates(handle)
97
+
98
+ gpu = GPUInfo(
99
+ device_id=i,
100
+ name=name,
101
+ total_memory_gb=mem_info.total / (1024**3),
102
+ used_memory_gb=mem_info.used / (1024**3),
103
+ available_memory_gb=mem_info.free / (1024**3),
104
+ utilization_percent=util.gpu,
105
+ )
106
+ gpus.append(gpu)
107
+
108
+ pynvml.nvmlShutdown()
109
+ return gpus
110
+
111
+ except (ImportError, Exception):
112
+ return None
113
+
114
+
115
+ def get_gpu_info() -> Optional[List[GPUInfo]]:
116
+ """Get GPU information. Try pynvml first, fall back to nvidia-smi."""
117
+ info = get_pynvml_info()
118
+ if info is not None:
119
+ return info
120
+
121
+ info = get_nvidia_smi_info()
122
+ if info is not None:
123
+ return info
124
+
125
+ return None
126
+
127
+
128
+ def check_vram(model_size_gb: float, buffer_gb: float = 0.5) -> Tuple[bool, str]:
129
+ """
130
+ Check if sufficient VRAM is available for a model.
131
+
132
+ Args:
133
+ model_size_gb: Model size in GB
134
+ buffer_gb: Safety buffer in GB (default 0.5)
135
+
136
+ Returns:
137
+ (can_fit, message) tuple
138
+ """
139
+ gpu_info = get_gpu_info()
140
+
141
+ if gpu_info is None:
142
+ msg = "Unable to detect GPU. Ensure nvidia-smi or pynvml is available."
143
+ return (False, msg)
144
+
145
+ if not gpu_info:
146
+ return (False, "No GPUs detected.")
147
+
148
+ total_available = sum(gpu.available_memory_gb for gpu in gpu_info)
149
+ required = model_size_gb + buffer_gb
150
+
151
+ can_fit = total_available >= required
152
+
153
+ message = f"Total available: {total_available:.2f}GB, required: {required:.2f}GB"
154
+
155
+ return (can_fit, message)
156
+
157
+
158
+ def can_load_model(model_size_gb: float, buffer_gb: float = 0.5) -> bool:
159
+ """
160
+ Check if a model can be loaded without OOM.
161
+
162
+ Args:
163
+ model_size_gb: Model size in GB
164
+ buffer_gb: Safety buffer in GB
165
+
166
+ Returns:
167
+ True if the model fits, False otherwise
168
+ """
169
+ fits, _ = check_vram(model_size_gb, buffer_gb)
170
+ return fits
171
+
172
+
173
+ def format_human_output(gpu_info, model_size_gb=None, buffer_gb=0.5):
174
+ """Format GPU info for human-readable output."""
175
+ lines = []
176
+ lines.append("GPU Memory Status")
177
+ lines.append("=" * 60)
178
+
179
+ total_available = 0
180
+ for gpu in gpu_info:
181
+ lines.append(f"\nGPU {gpu.device_id}: {gpu.name}")
182
+ lines.append(f" Total: {gpu.total_memory_gb:>7.2f}GB")
183
+ lines.append(f" Used: {gpu.used_memory_gb:>7.2f}GB")
184
+ lines.append(f" Available: {gpu.available_memory_gb:>7.2f}GB")
185
+ lines.append(f" Util: {gpu.utilization_percent:>7.1f}%")
186
+ total_available += gpu.available_memory_gb
187
+
188
+ lines.append("\n" + "-" * 60)
189
+ lines.append(f"Total available across all GPUs: {total_available:.2f}GB")
190
+
191
+ if model_size_gb is not None:
192
+ required = model_size_gb + buffer_gb
193
+ lines.append(f"\nModel size: {model_size_gb:.2f}GB")
194
+ lines.append(f"Safety buffer: {buffer_gb:.2f}GB")
195
+ lines.append(f"Total required: {required:.2f}GB")
196
+ lines.append("-" * 60)
197
+
198
+ if total_available >= required:
199
+ margin = total_available - required
200
+ lines.append(f"\u2713 Model WILL fit ({margin:.2f}GB margin)")
201
+ else:
202
+ deficit = required - total_available
203
+ lines.append(f"\u2717 Model will NOT fit (need {deficit:.2f}GB more)")
204
+
205
+ return "\n".join(lines)
206
+
207
+
208
+ def format_json_output(gpu_info, model_size_gb=None, buffer_gb=0.5):
209
+ """Format output as JSON."""
210
+ total_available = sum(gpu.available_memory_gb for gpu in gpu_info)
211
+
212
+ output = {
213
+ "gpus": [asdict(gpu) for gpu in gpu_info],
214
+ "total_available_gb": total_available,
215
+ }
216
+
217
+ if model_size_gb is not None:
218
+ required = model_size_gb + buffer_gb
219
+ output.update({
220
+ "model_size_gb": model_size_gb,
221
+ "buffer_gb": buffer_gb,
222
+ "total_required_gb": required,
223
+ "can_fit": total_available >= required,
224
+ })
225
+
226
+ return json.dumps(output, indent=2)
227
+
228
+
229
+ def main():
230
+ """CLI entry point."""
231
+ parser = argparse.ArgumentParser(
232
+ description="GPU Memory Guard - Check VRAM before loading AI models",
233
+ formatter_class=argparse.RawDescriptionHelpFormatter,
234
+ epilog="""
235
+ Examples:
236
+ # Check current GPU status
237
+ gpu_guard.py
238
+
239
+ # Check if a 18GB model fits with 2GB buffer
240
+ gpu_guard.py --model-size 18 --buffer 2
241
+
242
+ # JSON output for scripting
243
+ gpu_guard.py --model-size 13 --json
244
+
245
+ # Minimal output for shell scripts
246
+ gpu_guard.py --model-size 7 --quiet
247
+ """,
248
+ )
249
+
250
+ parser.add_argument(
251
+ "--model-size",
252
+ type=float,
253
+ help="Model size in GB to check",
254
+ )
255
+ parser.add_argument(
256
+ "--buffer",
257
+ type=float,
258
+ default=0.5,
259
+ help="Safety buffer in GB (default: 0.5)",
260
+ )
261
+ parser.add_argument(
262
+ "--json",
263
+ action="store_true",
264
+ help="Output as JSON",
265
+ )
266
+ parser.add_argument(
267
+ "--quiet",
268
+ action="store_true",
269
+ help="Minimal output (exit code only)",
270
+ )
271
+
272
+ args = parser.parse_args()
273
+
274
+ gpu_info = get_gpu_info()
275
+
276
+ if gpu_info is None:
277
+ if not args.quiet:
278
+ print("ERROR: Unable to detect GPU.", file=sys.stderr)
279
+ print(
280
+ "Ensure nvidia-smi is installed or pynvml is available.",
281
+ file=sys.stderr,
282
+ )
283
+ sys.exit(2)
284
+
285
+ if not gpu_info:
286
+ if not args.quiet:
287
+ print("ERROR: No GPUs detected.", file=sys.stderr)
288
+ sys.exit(2)
289
+
290
+ if args.quiet and args.model_size is not None:
291
+ fits, _ = check_vram(args.model_size, args.buffer)
292
+ sys.exit(0 if fits else 1)
293
+
294
+ if args.json:
295
+ output = format_json_output(gpu_info, args.model_size, args.buffer)
296
+ print(output)
297
+ else:
298
+ output = format_human_output(gpu_info, args.model_size, args.buffer)
299
+ print(output)
300
+
301
+ if args.model_size is not None:
302
+ fits, _ = check_vram(args.model_size, args.buffer)
303
+ sys.exit(0 if fits else 1)
304
+
305
+ sys.exit(0)
306
+
307
+
308
+ if __name__ == "__main__":
309
+ main()
@@ -0,0 +1,168 @@
1
+ Metadata-Version: 2.4
2
+ Name: gpu-memory-guard
3
+ Version: 0.1.0
4
+ Summary: CLI tool to check GPU VRAM before loading AI models
5
+ Author-email: Dmytro Romanov <casteldazur@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/CastelDazur/gpu-memory-guard
8
+ Project-URL: Repository, https://github.com/CastelDazur/gpu-memory-guard
9
+ Project-URL: Issues, https://github.com/CastelDazur/gpu-memory-guard/issues
10
+ Keywords: gpu,vram,memory,ai,llm,cuda,nvidia,oom
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Topic :: System :: Hardware
21
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
22
+ Requires-Python: >=3.8
23
+ Description-Content-Type: text/markdown
24
+ License-File: LICENSE
25
+ Provides-Extra: pynvml
26
+ Requires-Dist: pynvml>=11.0.0; extra == "pynvml"
27
+ Dynamic: license-file
28
+
29
+ # GPU Memory Guard
30
+
31
+ [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
32
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)
33
+ [![GitHub stars](https://img.shields.io/github/stars/CastelDazur/gpu-memory-guard?style=social)](https://github.com/CastelDazur/gpu-memory-guard/stargazers)
34
+
35
+ A CLI utility that checks available GPU VRAM before you load AI models. Prevents OOM crashes that force a full system reboot.
36
+
37
+ ## Why?
38
+
39
+ If you run local inference on consumer GPUs, you know the pain:
40
+
41
+ | Without gpu-memory-guard | With gpu-memory-guard |
42
+ |---|---|
43
+ | Load 70B model on 24GB card | Check VRAM **before** loading |
44
+ | System freezes, GPU hangs | Get a clear warning in terminal |
45
+ | Force reboot, lose unsaved work | Pick a smaller model or free memory |
46
+ | Repeat next week | Zero OOM crashes |
47
+
48
+ One command saves you from constant reboots.
49
+
50
+ ## Quick Start
51
+
52
+ ```bash
53
+ git clone https://github.com/CastelDazur/gpu-memory-guard.git
54
+ cd gpu-memory-guard
55
+ pip install -e .
56
+ ```
57
+
58
+ ```bash
59
+ # Check current GPU status
60
+ gpu-guard
61
+
62
+ # Check if an 18GB model fits with 2GB safety buffer
63
+ gpu-guard --model-size 18 --buffer 2
64
+ ```
65
+
66
+ **Example output:**
67
+
68
+ ```
69
+ GPU 0: NVIDIA GeForce RTX 5090
70
+ Total: 32.00 GB
71
+ Used: 4.12 GB
72
+ Available: 27.88 GB
73
+
74
+ Model size: 18.00 GB (buffer: 2.00 GB)
75
+ Status: OK - model fits with 7.88 GB to spare
76
+ ```
77
+
78
+ ## Installation
79
+
80
+ ### From source (recommended)
81
+
82
+ ```bash
83
+ git clone https://github.com/CastelDazur/gpu-memory-guard.git
84
+ cd gpu-memory-guard
85
+ pip install -e .
86
+ ```
87
+
88
+ ### Requirements
89
+
90
+ - Python 3.8+
91
+ - NVIDIA GPU with `nvidia-smi` installed, OR
92
+ - `pynvml` Python package (`pip install pynvml`)
93
+
94
+ ## Usage
95
+
96
+ ### CLI
97
+
98
+ ```bash
99
+ # Basic VRAM check
100
+ gpu-guard
101
+
102
+ # Check if a model fits (size in GB)
103
+ gpu-guard --model-size 13
104
+
105
+ # Custom safety buffer (default: 1GB)
106
+ gpu-guard --model-size 18 --buffer 2
107
+
108
+ # JSON output for scripting
109
+ gpu-guard --model-size 13 --json
110
+
111
+ # Quiet mode: exit code only (0 = fits, 1 = doesn't)
112
+ gpu-guard --model-size 7 --quiet
113
+ ```
114
+
115
+ ### As a Python library
116
+
117
+ ```python
118
+ from gpu_guard import check_vram, can_load_model, get_gpu_info
119
+
120
+ # Check current VRAM
121
+ gpu_info = get_gpu_info()
122
+ for gpu in gpu_info:
123
+ print(f"GPU {gpu.device_id}: {gpu.available_memory_gb:.2f}GB available")
124
+
125
+ # Check if a model fits
126
+ result = can_load_model(model_size_gb=13.0, buffer_gb=2.0)
127
+ if result.fits:
128
+ print("Safe to load")
129
+ else:
130
+ print(f"Need {result.shortage_gb:.2f}GB more VRAM")
131
+ ```
132
+
133
+ ### Scripting example
134
+
135
+ ```bash
136
+ # Pre-check before launching inference
137
+ if gpu-guard --model-size 13 --quiet; then
138
+ python run_inference.py --model llama-13b
139
+ else
140
+ echo "Not enough VRAM, switching to 7B model"
141
+ python run_inference.py --model llama-7b
142
+ fi
143
+ ```
144
+
145
+ ## Common model sizes (approximate VRAM)
146
+
147
+ | Model | FP16 | Q4 (GGUF) |
148
+ |---|---|---|
149
+ | 7B params | ~14 GB | ~4 GB |
150
+ | 13B params | ~26 GB | ~7 GB |
151
+ | 33B params | ~66 GB | ~18 GB |
152
+ | 70B params | ~140 GB | ~35 GB |
153
+
154
+ ## Roadmap
155
+
156
+ - [ ] AMD ROCm support
157
+ - [ ] Memory estimation by model architecture
158
+ - [ ] Multi-GPU split recommendations
159
+ - [ ] PyPI package (`pip install gpu-memory-guard`)
160
+ - [ ] Integration with Ollama and vLLM
161
+
162
+ ## Contributing
163
+
164
+ PRs welcome. If you want to add AMD ROCm support or model-specific memory estimation, open an issue first so we can discuss the approach.
165
+
166
+ ## License
167
+
168
+ MIT
@@ -0,0 +1,10 @@
1
+ LICENSE
2
+ README.md
3
+ gpu_guard.py
4
+ pyproject.toml
5
+ gpu_memory_guard.egg-info/PKG-INFO
6
+ gpu_memory_guard.egg-info/SOURCES.txt
7
+ gpu_memory_guard.egg-info/dependency_links.txt
8
+ gpu_memory_guard.egg-info/entry_points.txt
9
+ gpu_memory_guard.egg-info/requires.txt
10
+ gpu_memory_guard.egg-info/top_level.txt
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ gpu-guard = gpu_guard:main
@@ -0,0 +1,3 @@
1
+
2
+ [pynvml]
3
+ pynvml>=11.0.0
@@ -0,0 +1,39 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "gpu-memory-guard"
7
+ version = "0.1.0"
8
+ description = "CLI tool to check GPU VRAM before loading AI models"
9
+ readme = "README.md"
10
+ license = {text = "MIT"}
11
+ requires-python = ">=3.8"
12
+ authors = [
13
+ {name = "Dmytro Romanov", email = "casteldazur@gmail.com"}
14
+ ]
15
+ classifiers = [
16
+ "Development Status :: 3 - Alpha",
17
+ "Intended Audience :: Developers",
18
+ "License :: OSI Approved :: MIT License",
19
+ "Programming Language :: Python :: 3",
20
+ "Programming Language :: Python :: 3.8",
21
+ "Programming Language :: Python :: 3.9",
22
+ "Programming Language :: Python :: 3.10",
23
+ "Programming Language :: Python :: 3.11",
24
+ "Programming Language :: Python :: 3.12",
25
+ "Topic :: System :: Hardware",
26
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
27
+ ]
28
+ keywords = ["gpu", "vram", "memory", "ai", "llm", "cuda", "nvidia", "oom"]
29
+
30
+ [project.optional-dependencies]
31
+ pynvml = ["pynvml>=11.0.0"]
32
+
33
+ [project.scripts]
34
+ gpu-guard = "gpu_guard:main"
35
+
36
+ [project.urls]
37
+ Homepage = "https://github.com/CastelDazur/gpu-memory-guard"
38
+ Repository = "https://github.com/CastelDazur/gpu-memory-guard"
39
+ Issues = "https://github.com/CastelDazur/gpu-memory-guard/issues"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+