torch-device-manager 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tests/__init__.py ADDED
File without changes
@@ -0,0 +1,119 @@
1
+ """
2
+ Torch Device Manager - Automatic hardware detection and memory optimization for PyTorch
3
+ """
4
+
5
+ import logging
6
+ from typing import Tuple
7
+ import torch
8
+
9
+ __version__ = "0.1.0"
10
+ __author__ = "Ali B.M."
11
+
12
+ # Set up logging
13
+ logger = logging.getLogger(__name__)
14
+
15
+ class DeviceManager:
16
+ """Manage device selection and memory optimization for different hardware"""
17
+
18
+ def __init__(self, device: str = "auto", mixed_precision: bool = True):
19
+ self.device = self._detect_device(device)
20
+ self.mixed_precision = mixed_precision
21
+ self.scaler = None
22
+
23
+ logger.info(f"Device Manager initialized:")
24
+ logger.info(f" - Device: {self.device}")
25
+ logger.info(f" - Mixed Precision: {self.mixed_precision}")
26
+
27
+ if self.mixed_precision and self.device != "cpu":
28
+ self.scaler = torch.cuda.amp.GradScaler()
29
+ logger.info(f" - Gradient Scaler: Enabled")
30
+
31
+ def _detect_device(self, device: str) -> str:
32
+ """Detect the best available device"""
33
+ if device == "auto":
34
+ if torch.cuda.is_available():
35
+ device = "cuda"
36
+ logger.info(f"CUDA detected: {torch.cuda.get_device_name()}")
37
+ logger.info(f"CUDA memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
38
+ elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
39
+ device = "mps"
40
+ logger.info(f"Apple Silicon MPS detected")
41
+ else:
42
+ device = "cpu"
43
+ logger.info(f"Using CPU")
44
+ else:
45
+ if device == "cuda" and not torch.cuda.is_available():
46
+ logger.warning("CUDA requested but not available, falling back to CPU")
47
+ device = "cpu"
48
+ elif device == "mps" and not (hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()):
49
+ logger.warning("MPS requested but not available, falling back to CPU")
50
+ device = "cpu"
51
+
52
+ return device
53
+
54
+ def get_device(self):
55
+ """Get the torch device object"""
56
+ return torch.device(self.device)
57
+
58
+ def get_memory_info(self):
59
+ """Get memory information for the current device"""
60
+ if self.device == "cuda":
61
+ allocated = torch.cuda.memory_allocated() / 1e9
62
+ reserved = torch.cuda.memory_reserved() / 1e9
63
+ total = torch.cuda.get_device_properties(0).total_memory / 1e9
64
+ return {
65
+ "allocated_gb": allocated,
66
+ "reserved_gb": reserved,
67
+ "total_gb": total,
68
+ "free_gb": total - reserved
69
+ }
70
+ elif self.device == "mps":
71
+ # MPS doesn't provide detailed memory info like CUDA
72
+ return {"device": "mps", "info": "Memory info not available for MPS"}
73
+ else:
74
+ return {"device": "cpu", "info": "Memory info not available for CPU"}
75
+
76
+ def log_memory_usage(self):
77
+ """Log current memory usage"""
78
+ memory_info = self.get_memory_info()
79
+ if "allocated_gb" in memory_info:
80
+ logger.info(f"Memory Usage: {memory_info['allocated_gb']:.2f}GB allocated, "
81
+ f"{memory_info['free_gb']:.2f}GB free")
82
+
83
+ def optimize_for_memory(self, model, batch_size: int) -> Tuple[int, int]:
84
+ """Optimize batch size and gradient accumulation for available memory"""
85
+
86
+ if self.device == "cpu":
87
+ # CPU: Use smaller batches
88
+ optimized_batch_size = min(batch_size, 8)
89
+ gradient_steps = max(1, batch_size // optimized_batch_size)
90
+ logger.info(f"CPU optimization: batch_size={optimized_batch_size}, gradient_steps={gradient_steps}")
91
+
92
+ elif self.device == "mps":
93
+ # Apple Silicon: Conservative settings
94
+ optimized_batch_size = min(batch_size, 4)
95
+ gradient_steps = max(1, batch_size // optimized_batch_size)
96
+ logger.info(f"MPS optimization: batch_size={optimized_batch_size}, gradient_steps={gradient_steps}")
97
+
98
+ elif self.device == "cuda":
99
+ # CUDA: Check available memory
100
+ memory_info = self.get_memory_info()
101
+ total_memory = memory_info["total_gb"]
102
+
103
+ if total_memory < 8: # Less than 8GB
104
+ optimized_batch_size = min(batch_size, 4)
105
+ gradient_steps = max(1, batch_size // optimized_batch_size)
106
+ logger.info(f"CUDA <8GB optimization: batch_size={optimized_batch_size}, gradient_steps={gradient_steps}")
107
+ elif total_memory < 16: # Less than 16GB
108
+ optimized_batch_size = min(batch_size, 8)
109
+ gradient_steps = max(1, batch_size // optimized_batch_size)
110
+ logger.info(f"CUDA <16GB optimization: batch_size={optimized_batch_size}, gradient_steps={gradient_steps}")
111
+ else: # 16GB or more
112
+ optimized_batch_size = batch_size
113
+ gradient_steps = 1
114
+ logger.info(f"CUDA >=16GB: using full batch_size={optimized_batch_size}")
115
+
116
+ return optimized_batch_size, gradient_steps
117
+
118
+ # Make DeviceManager easily importable
119
+ __all__ = ["DeviceManager"]
File without changes
@@ -0,0 +1,184 @@
1
+ Metadata-Version: 2.4
2
+ Name: torch-device-manager
3
+ Version: 0.1.0
4
+ Summary: A PyTorch device manager for automatic hardware detection and memory optimization
5
+ Home-page: https://github.com/yourusername/torch-device-manager
6
+ Author: Ali B.M.
7
+ Author-email: mainabukarali@gmail.com
8
+ Project-URL: Bug Reports, https://github.com/TempCoder82/torch-device-manager/issues
9
+ Project-URL: Source, https://github.com/TempCoder82/torch-device-manager
10
+ Keywords: pytorch,cuda,mps,device,memory,optimization,machine learning
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.8
17
+ Classifier: Programming Language :: Python :: 3.9
18
+ Classifier: Programming Language :: Python :: 3.10
19
+ Classifier: Programming Language :: Python :: 3.11
20
+ Classifier: Programming Language :: Python :: 3.12
21
+ Classifier: Programming Language :: Python :: 3.13
22
+ Classifier: Operating System :: OS Independent
23
+ Requires-Python: >=3.8
24
+ Description-Content-Type: text/markdown
25
+ License-File: LICENSE
26
+ Requires-Dist: torch>=2.1.1
27
+ Dynamic: author
28
+ Dynamic: author-email
29
+ Dynamic: classifier
30
+ Dynamic: description
31
+ Dynamic: description-content-type
32
+ Dynamic: home-page
33
+ Dynamic: keywords
34
+ Dynamic: license-file
35
+ Dynamic: project-url
36
+ Dynamic: requires-dist
37
+ Dynamic: requires-python
38
+ Dynamic: summary
39
+
40
+ # Torch Device Manager
41
+
42
+ A lightweight PyTorch utility for automatic hardware detection and memory optimization across different devices (CPU, CUDA, MPS).
43
+
44
+ ## Features
45
+
46
+ - 🔍 **Automatic Device Detection**: Detects the best available hardware (CUDA, Apple Silicon MPS, or CPU)
47
+ - 🧠 **Memory Optimization**: Automatically adjusts batch sizes and gradient accumulation based on available memory
48
+ - ⚡ **Mixed Precision Support**: Optional automatic mixed precision with gradient scaling
49
+ - 📊 **Memory Monitoring**: Real-time memory usage tracking and logging
50
+ - 🛡️ **Fallback Protection**: Graceful fallback to CPU when requested devices aren't available
51
+
52
+ ## Installation
53
+
54
+ ```bash
55
+ pip install torch-device-manager
56
+ ```
57
+
58
+ ## Quick Start
59
+
60
+ ```python
61
+ from torch_device_manager import DeviceManager
62
+ import torch
63
+
64
+ # Initialize device manager (auto-detects best device)
65
+ device_manager = DeviceManager(device="auto", mixed_precision=True)
66
+
67
+ # Get the torch device
68
+ device = device_manager.get_device()
69
+
70
+ # Move your model to the optimal device
71
+ model = YourModel().to(device)
72
+
73
+ # Optimize batch size based on available memory
74
+ optimal_batch_size, gradient_steps = device_manager.optimize_for_memory(
75
+ model=model,
76
+ batch_size=32
77
+ )
78
+
79
+ print(f"Using device: {device}")
80
+ print(f"Optimized batch size: {optimal_batch_size}")
81
+ print(f"Gradient accumulation steps: {gradient_steps}")
82
+ ```
83
+
84
+ ## Usage in Training Scripts
85
+
86
+ ### Basic Integration
87
+
88
+ ```python
89
+ import torch
90
+ import torch.nn as nn
91
+ from torch_device_manager import DeviceManager
92
+
93
+ def train_model():
94
+ # Initialize device manager
95
+ device_manager = DeviceManager(device="auto", mixed_precision=True)
96
+ device = device_manager.get_device()
97
+
98
+ # Setup model
99
+ model = YourModel().to(device)
100
+ optimizer = torch.optim.Adam(model.parameters())
101
+
102
+ # Optimize memory usage
103
+ batch_size, gradient_steps = device_manager.optimize_for_memory(model, 32)
104
+
105
+ # Training loop
106
+ for epoch in range(num_epochs):
107
+ for batch_idx, (data, target) in enumerate(dataloader):
108
+ data, target = data.to(device), target.to(device)
109
+
110
+ # Use mixed precision if available
111
+ if device_manager.mixed_precision and device_manager.scaler:
112
+ with torch.cuda.amp.autocast():
113
+ output = model(data)
114
+ loss = criterion(output, target)
115
+
116
+ device_manager.scaler.scale(loss).backward()
117
+
118
+ if (batch_idx + 1) % gradient_steps == 0:
119
+ device_manager.scaler.step(optimizer)
120
+ device_manager.scaler.update()
121
+ optimizer.zero_grad()
122
+ else:
123
+ output = model(data)
124
+ loss = criterion(output, target)
125
+ loss.backward()
126
+
127
+ if (batch_idx + 1) % gradient_steps == 0:
128
+ optimizer.step()
129
+ optimizer.zero_grad()
130
+
131
+ # Log memory usage
132
+ device_manager.log_memory_usage()
133
+ ```
134
+
135
+ ### Advanced Usage
136
+
137
+ ```python
138
+ from torch_device_manager import DeviceManager
139
+
140
+ # Force specific device
141
+ device_manager = DeviceManager(device="cuda", mixed_precision=False)
142
+
143
+ # Check memory info
144
+ memory_info = device_manager.get_memory_info()
145
+ print(f"Available memory: {memory_info}")
146
+
147
+ # Manual memory optimization
148
+ if memory_info.get("free_gb", 0) < 2.0:
149
+ print("Low memory detected, reducing batch size")
150
+ batch_size = 4
151
+ ```
152
+
153
+ ## API Reference
154
+
155
+ ### DeviceManager
156
+
157
+ #### Constructor
158
+ - `device` (str, default="auto"): Device to use ("auto", "cuda", "mps", "cpu")
159
+ - `mixed_precision` (bool, default=True): Enable mixed precision training
160
+
161
+ #### Methods
162
+ - `get_device()`: Returns torch.device object
163
+ - `get_memory_info()`: Returns memory information dict
164
+ - `log_memory_usage()`: Logs current memory usage
165
+ - `optimize_for_memory(model, batch_size)`: Returns optimized (batch_size, gradient_steps)
166
+
167
+ ## Device Support
168
+
169
+ - **CUDA**: Full support with memory optimization and mixed precision
170
+ - **Apple Silicon (MPS)**: Basic support with conservative memory settings
171
+ - **CPU**: Fallback support with optimized batch sizes
172
+
173
+ ## Requirements
174
+
175
+ - Python >= 3.8
176
+ - PyTorch >= 2.1.1
177
+
178
+ ## License
179
+
180
+ MIT License
181
+
182
+ ## Contributing
183
+
184
+ Contributions are welcome!
@@ -0,0 +1,8 @@
1
+ tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ torch_device_manager/__init__.py,sha256=XomGANg50MTegWdWJGZOpEdQDzzQuLkCb713pcEtm9k,5250
3
+ torch_device_manager/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ torch_device_manager-0.1.0.dist-info/licenses/LICENSE,sha256=ysBrFSSfXzwsvRopk2TpXewp3RIEe9MEBV_SJgWLCag,1064
5
+ torch_device_manager-0.1.0.dist-info/METADATA,sha256=ob2yfvkTiIbhysUUEm4GQY8A8kA4ZJCEI_Tl8W1cRDg,5767
6
+ torch_device_manager-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
7
+ torch_device_manager-0.1.0.dist-info/top_level.txt,sha256=TsbZ2eT7dMAY_OarI9zZwTcOojMdaB2c0CH1i7dOY_g,27
8
+ torch_device_manager-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Ali B.M.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,2 @@
1
+ tests
2
+ torch_device_manager