comfy-env 0.1.14__py3-none-any.whl → 0.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. comfy_env/__init__.py +115 -62
  2. comfy_env/cli.py +89 -319
  3. comfy_env/config/__init__.py +18 -8
  4. comfy_env/config/parser.py +21 -122
  5. comfy_env/config/types.py +37 -70
  6. comfy_env/detection/__init__.py +77 -0
  7. comfy_env/detection/cuda.py +61 -0
  8. comfy_env/detection/gpu.py +230 -0
  9. comfy_env/detection/platform.py +70 -0
  10. comfy_env/detection/runtime.py +103 -0
  11. comfy_env/environment/__init__.py +53 -0
  12. comfy_env/environment/cache.py +141 -0
  13. comfy_env/environment/libomp.py +41 -0
  14. comfy_env/environment/paths.py +38 -0
  15. comfy_env/environment/setup.py +88 -0
  16. comfy_env/install.py +163 -249
  17. comfy_env/isolation/__init__.py +33 -2
  18. comfy_env/isolation/tensor_utils.py +83 -0
  19. comfy_env/isolation/workers/__init__.py +16 -0
  20. comfy_env/{workers → isolation/workers}/mp.py +1 -1
  21. comfy_env/{workers → isolation/workers}/subprocess.py +2 -2
  22. comfy_env/isolation/wrap.py +149 -409
  23. comfy_env/packages/__init__.py +60 -0
  24. comfy_env/packages/apt.py +36 -0
  25. comfy_env/packages/cuda_wheels.py +97 -0
  26. comfy_env/packages/node_dependencies.py +77 -0
  27. comfy_env/packages/pixi.py +85 -0
  28. comfy_env/packages/toml_generator.py +88 -0
  29. comfy_env-0.1.16.dist-info/METADATA +279 -0
  30. comfy_env-0.1.16.dist-info/RECORD +36 -0
  31. comfy_env/cache.py +0 -331
  32. comfy_env/errors.py +0 -293
  33. comfy_env/nodes.py +0 -187
  34. comfy_env/pixi/__init__.py +0 -48
  35. comfy_env/pixi/core.py +0 -588
  36. comfy_env/pixi/cuda_detection.py +0 -303
  37. comfy_env/pixi/platform/__init__.py +0 -21
  38. comfy_env/pixi/platform/base.py +0 -96
  39. comfy_env/pixi/platform/darwin.py +0 -53
  40. comfy_env/pixi/platform/linux.py +0 -68
  41. comfy_env/pixi/platform/windows.py +0 -284
  42. comfy_env/pixi/resolver.py +0 -198
  43. comfy_env/prestartup.py +0 -192
  44. comfy_env/workers/__init__.py +0 -38
  45. comfy_env/workers/tensor_utils.py +0 -188
  46. comfy_env-0.1.14.dist-info/METADATA +0 -291
  47. comfy_env-0.1.14.dist-info/RECORD +0 -33
  48. /comfy_env/{workers → isolation/workers}/base.py +0 -0
  49. {comfy_env-0.1.14.dist-info → comfy_env-0.1.16.dist-info}/WHEEL +0 -0
  50. {comfy_env-0.1.14.dist-info → comfy_env-0.1.16.dist-info}/entry_points.txt +0 -0
  51. {comfy_env-0.1.14.dist-info → comfy_env-0.1.16.dist-info}/licenses/LICENSE +0 -0
@@ -1,303 +0,0 @@
1
- """
2
- Robust CUDA/GPU detection with multiple fallback methods.
3
-
4
- Detection priority: NVML -> PyTorch -> nvidia-smi -> sysfs -> env vars
5
- """
6
-
7
- from __future__ import annotations
8
-
9
- import logging
10
- import os
11
- import re
12
- import subprocess
13
- import time
14
- from dataclasses import dataclass, field
15
- from pathlib import Path
16
-
17
- logger = logging.getLogger(__name__)
18
-
19
- CUDA_VERSION_ENV_VAR = "COMFY_ENV_CUDA_VERSION"
20
-
21
- _cache: tuple[float, "CUDAEnvironment | None"] = (0, None)
22
- CACHE_TTL = 60
23
-
24
-
25
- @dataclass
26
- class GPUInfo:
27
- index: int
28
- name: str
29
- compute_capability: tuple[int, int]
30
- architecture: str
31
- vram_total_mb: int = 0
32
- vram_free_mb: int = 0
33
- uuid: str = ""
34
- pci_bus_id: str = ""
35
- driver_version: str = ""
36
-
37
- def cc_str(self) -> str:
38
- return f"{self.compute_capability[0]}.{self.compute_capability[1]}"
39
-
40
- def sm_version(self) -> str:
41
- return f"sm_{self.compute_capability[0]}{self.compute_capability[1]}"
42
-
43
-
44
- @dataclass
45
- class CUDAEnvironment:
46
- gpus: list[GPUInfo] = field(default_factory=list)
47
- driver_version: str = ""
48
- cuda_runtime_version: str = ""
49
- recommended_cuda: str = ""
50
- detection_method: str = ""
51
-
52
-
53
- COMPUTE_TO_ARCH = {
54
- (5, 0): "Maxwell", (5, 2): "Maxwell", (5, 3): "Maxwell",
55
- (6, 0): "Pascal", (6, 1): "Pascal", (6, 2): "Pascal",
56
- (7, 0): "Volta", (7, 2): "Volta", (7, 5): "Turing",
57
- (8, 0): "Ampere", (8, 6): "Ampere", (8, 7): "Ampere", (8, 9): "Ada",
58
- (9, 0): "Hopper",
59
- (10, 0): "Blackwell", (10, 1): "Blackwell", (10, 2): "Blackwell",
60
- }
61
-
62
-
63
- def _cc_to_arch(major: int, minor: int) -> str:
64
- if arch := COMPUTE_TO_ARCH.get((major, minor)):
65
- return arch
66
- if major >= 10: return "Blackwell"
67
- if major == 9: return "Hopper"
68
- if major == 8: return "Ada" if minor >= 9 else "Ampere"
69
- if major == 7: return "Turing" if minor >= 5 else "Volta"
70
- if major == 6: return "Pascal"
71
- return "Maxwell" if major == 5 else "Unknown"
72
-
73
-
74
- def _parse_cc(s: str) -> tuple[int, int]:
75
- try:
76
- if "." in s:
77
- p = s.split(".")
78
- return (int(p[0]), int(p[1]))
79
- if len(s) >= 2:
80
- return (int(s[:-1]), int(s[-1]))
81
- except (ValueError, IndexError):
82
- pass
83
- return (0, 0)
84
-
85
-
86
- def _detect_nvml() -> list[GPUInfo] | None:
87
- try:
88
- import pynvml
89
- pynvml.nvmlInit()
90
- try:
91
- count = pynvml.nvmlDeviceGetCount()
92
- if not count:
93
- return None
94
- gpus = []
95
- for i in range(count):
96
- h = pynvml.nvmlDeviceGetHandleByIndex(i)
97
- name = pynvml.nvmlDeviceGetName(h)
98
- if isinstance(name, bytes): name = name.decode()
99
- cc = pynvml.nvmlDeviceGetCudaComputeCapability(h)
100
- mem = pynvml.nvmlDeviceGetMemoryInfo(h)
101
- gpus.append(GPUInfo(
102
- index=i, name=name, compute_capability=cc,
103
- architecture=_cc_to_arch(*cc),
104
- vram_total_mb=mem.total // (1024*1024),
105
- vram_free_mb=mem.free // (1024*1024),
106
- ))
107
- return gpus
108
- finally:
109
- pynvml.nvmlShutdown()
110
- except Exception:
111
- return None
112
-
113
-
114
- def _detect_torch() -> list[GPUInfo] | None:
115
- try:
116
- import torch
117
- if not torch.cuda.is_available():
118
- return None
119
- gpus = []
120
- for i in range(torch.cuda.device_count()):
121
- p = torch.cuda.get_device_properties(i)
122
- gpus.append(GPUInfo(
123
- index=i, name=p.name,
124
- compute_capability=(p.major, p.minor),
125
- architecture=_cc_to_arch(p.major, p.minor),
126
- vram_total_mb=p.total_memory // (1024*1024),
127
- ))
128
- return gpus if gpus else None
129
- except Exception:
130
- return None
131
-
132
-
133
- def _detect_smi() -> list[GPUInfo] | None:
134
- try:
135
- r = subprocess.run(
136
- ["nvidia-smi", "--query-gpu=index,name,uuid,pci.bus_id,compute_cap,memory.total,memory.free,driver_version",
137
- "--format=csv,noheader,nounits"],
138
- capture_output=True, text=True, timeout=10
139
- )
140
- if r.returncode != 0:
141
- return None
142
- gpus = []
143
- for line in r.stdout.strip().split("\n"):
144
- if not line.strip():
145
- continue
146
- p = [x.strip() for x in line.split(",")]
147
- if len(p) < 5:
148
- continue
149
- cc = _parse_cc(p[4])
150
- gpus.append(GPUInfo(
151
- index=int(p[0]) if p[0].isdigit() else len(gpus),
152
- name=p[1], uuid=p[2] if len(p) > 2 else "",
153
- pci_bus_id=p[3] if len(p) > 3 else "",
154
- compute_capability=cc, architecture=_cc_to_arch(*cc),
155
- vram_total_mb=int(p[5]) if len(p) > 5 and p[5].isdigit() else 0,
156
- vram_free_mb=int(p[6]) if len(p) > 6 and p[6].isdigit() else 0,
157
- driver_version=p[7] if len(p) > 7 else "",
158
- ))
159
- return gpus if gpus else None
160
- except Exception:
161
- return None
162
-
163
-
164
- def _detect_sysfs() -> list[GPUInfo] | None:
165
- try:
166
- pci_path = Path("/sys/bus/pci/devices")
167
- if not pci_path.exists():
168
- return None
169
- gpus = []
170
- for d in sorted(pci_path.iterdir()):
171
- vendor = (d / "vendor").read_text().strip().lower() if (d / "vendor").exists() else ""
172
- if "10de" not in vendor:
173
- continue
174
- cls = (d / "class").read_text().strip() if (d / "class").exists() else ""
175
- if not (cls.startswith("0x0300") or cls.startswith("0x0302")):
176
- continue
177
- gpus.append(GPUInfo(
178
- index=len(gpus), name=f"NVIDIA GPU", pci_bus_id=d.name,
179
- compute_capability=(0, 0), architecture="Unknown"
180
- ))
181
- return gpus if gpus else None
182
- except Exception:
183
- return None
184
-
185
-
186
- def _get_driver_version() -> str:
187
- try:
188
- import pynvml
189
- pynvml.nvmlInit()
190
- v = pynvml.nvmlSystemGetDriverVersion()
191
- pynvml.nvmlShutdown()
192
- return v.decode() if isinstance(v, bytes) else v
193
- except Exception:
194
- pass
195
- try:
196
- r = subprocess.run(["nvidia-smi", "--query-gpu=driver_version", "--format=csv,noheader"],
197
- capture_output=True, text=True, timeout=5)
198
- if r.returncode == 0:
199
- return r.stdout.strip().split("\n")[0]
200
- except Exception:
201
- pass
202
- return ""
203
-
204
-
205
- def _get_cuda_version() -> str:
206
- try:
207
- import torch
208
- if torch.cuda.is_available() and torch.version.cuda:
209
- return torch.version.cuda
210
- except Exception:
211
- pass
212
- try:
213
- r = subprocess.run(["nvcc", "--version"], capture_output=True, text=True, timeout=5)
214
- if m := re.search(r"release (\d+\.\d+)", r.stdout):
215
- return m.group(1)
216
- except Exception:
217
- pass
218
- return ""
219
-
220
-
221
- def _recommended_cuda(gpus: list[GPUInfo]) -> str:
222
- if override := os.environ.get(CUDA_VERSION_ENV_VAR, "").strip():
223
- if "." not in override and len(override) >= 2:
224
- return f"{override[:-1]}.{override[-1]}"
225
- return override
226
- if not gpus:
227
- return ""
228
- for gpu in gpus:
229
- if gpu.compute_capability[0] >= 10:
230
- return "12.8" # Blackwell requires 12.8
231
- for gpu in gpus:
232
- cc = gpu.compute_capability
233
- if cc[0] < 7 or (cc[0] == 7 and cc[1] < 5):
234
- return "12.4" # Legacy (Pascal) uses 12.4
235
- return "12.8" # Modern GPUs use 12.8
236
-
237
-
238
- def detect_cuda_environment(force_refresh: bool = False) -> CUDAEnvironment:
239
- global _cache
240
- if not force_refresh and _cache[1] and time.time() - _cache[0] < CACHE_TTL:
241
- return _cache[1]
242
-
243
- gpus, method = None, "none"
244
- for name, fn in [("nvml", _detect_nvml), ("torch", _detect_torch),
245
- ("smi", _detect_smi), ("sysfs", _detect_sysfs)]:
246
- if gpus := fn():
247
- method = name
248
- break
249
-
250
- env = CUDAEnvironment(
251
- gpus=gpus or [],
252
- driver_version=_get_driver_version(),
253
- cuda_runtime_version=_get_cuda_version(),
254
- recommended_cuda=_recommended_cuda(gpus or []),
255
- detection_method=method,
256
- )
257
- _cache = (time.time(), env)
258
- return env
259
-
260
-
261
- def get_recommended_cuda_version() -> str | None:
262
- if override := os.environ.get(CUDA_VERSION_ENV_VAR, "").strip():
263
- if "." not in override and len(override) >= 2:
264
- return f"{override[:-1]}.{override[-1]}"
265
- return override
266
- env = detect_cuda_environment()
267
- return env.recommended_cuda or None
268
-
269
-
270
- def detect_gpus() -> list[GPUInfo]:
271
- return detect_cuda_environment().gpus
272
-
273
-
274
- def detect_gpu_info() -> list[dict]:
275
- """Return GPU info as list of dicts."""
276
- from dataclasses import asdict
277
- return [asdict(gpu) for gpu in detect_gpus()]
278
-
279
-
280
- def get_gpu_summary() -> str:
281
- """Human-readable GPU summary."""
282
- env = detect_cuda_environment()
283
- if not env.gpus:
284
- override = os.environ.get(CUDA_VERSION_ENV_VAR)
285
- if override:
286
- return f"No NVIDIA GPU detected (using {CUDA_VERSION_ENV_VAR}={override})"
287
- return f"No NVIDIA GPU detected (set {CUDA_VERSION_ENV_VAR} to override)"
288
-
289
- lines = [f"Detection: {env.detection_method}"]
290
- if env.driver_version:
291
- lines.append(f"Driver: {env.driver_version}")
292
- if env.cuda_runtime_version:
293
- lines.append(f"CUDA: {env.cuda_runtime_version}")
294
- lines.append(f"Recommended: CUDA {env.recommended_cuda}")
295
- lines.append("")
296
- for gpu in env.gpus:
297
- vram = f"{gpu.vram_total_mb}MB" if gpu.vram_total_mb else "?"
298
- lines.append(f" GPU {gpu.index}: {gpu.name} ({gpu.sm_version()}) [{gpu.architecture}] {vram}")
299
- return "\n".join(lines)
300
-
301
-
302
- # Aliases
303
- detect_cuda_version = get_recommended_cuda_version
@@ -1,21 +0,0 @@
1
- """Platform-specific providers for comfyui-isolation."""
2
-
3
- import sys
4
-
5
- from .base import PlatformProvider, PlatformPaths
6
-
7
- # Import platform-specific provider
8
- if sys.platform == 'win32':
9
- from .windows import WindowsPlatformProvider as _Provider
10
- elif sys.platform == 'darwin':
11
- from .darwin import DarwinPlatformProvider as _Provider
12
- else:
13
- from .linux import LinuxPlatformProvider as _Provider
14
-
15
-
16
- def get_platform() -> PlatformProvider:
17
- """Get the platform provider for the current system."""
18
- return _Provider()
19
-
20
-
21
- __all__ = ["PlatformProvider", "PlatformPaths", "get_platform"]
@@ -1,96 +0,0 @@
1
- """
2
- Abstract base class for platform-specific operations.
3
- """
4
-
5
- from abc import ABC, abstractmethod
6
- from dataclasses import dataclass
7
- from pathlib import Path
8
- from typing import Optional, List, Tuple
9
-
10
-
11
- @dataclass
12
- class PlatformPaths:
13
- """Platform-specific paths within an environment."""
14
- python: Path
15
- pip: Path
16
- site_packages: Path
17
- bin_dir: Path
18
-
19
-
20
- class PlatformProvider(ABC):
21
- """
22
- Abstract base class for platform-specific operations.
23
-
24
- Each platform (Linux, Windows, macOS) implements this interface
25
- to provide consistent behavior across operating systems.
26
- """
27
-
28
- @property
29
- @abstractmethod
30
- def name(self) -> str:
31
- """Platform name: 'linux', 'windows', 'darwin'."""
32
- pass
33
-
34
- @property
35
- @abstractmethod
36
- def executable_suffix(self) -> str:
37
- """Executable suffix: '' for Unix, '.exe' for Windows."""
38
- pass
39
-
40
- @property
41
- @abstractmethod
42
- def shared_lib_extension(self) -> str:
43
- """Shared library extension: '.so', '.dll', '.dylib'."""
44
- pass
45
-
46
- @abstractmethod
47
- def get_env_paths(self, env_dir: Path, python_version: str = "3.10") -> PlatformPaths:
48
- """
49
- Get platform-specific paths for an environment.
50
-
51
- Args:
52
- env_dir: Root directory of the environment
53
- python_version: Python version (e.g., "3.10")
54
-
55
- Returns:
56
- PlatformPaths with python, pip, site_packages, bin_dir
57
- """
58
- pass
59
-
60
- @abstractmethod
61
- def check_prerequisites(self) -> Tuple[bool, Optional[str]]:
62
- """
63
- Check platform-specific prerequisites.
64
-
65
- Returns:
66
- Tuple of (is_compatible, error_message)
67
- error_message is None if compatible
68
- """
69
- pass
70
-
71
- @abstractmethod
72
- def make_executable(self, path: Path) -> None:
73
- """
74
- Make a file executable.
75
-
76
- Args:
77
- path: Path to the file
78
- """
79
- pass
80
-
81
- @abstractmethod
82
- def rmtree_robust(self, path: Path) -> bool:
83
- """
84
- Remove directory tree with platform-specific error handling.
85
-
86
- Args:
87
- path: Directory to remove
88
-
89
- Returns:
90
- True if successful
91
- """
92
- pass
93
-
94
- def get_uv_exe_name(self) -> str:
95
- """Get uv executable name for this platform."""
96
- return f"uv{self.executable_suffix}"
@@ -1,53 +0,0 @@
1
- """
2
- macOS (Darwin) platform provider implementation.
3
- """
4
-
5
- import os
6
- import stat
7
- import shutil
8
- from pathlib import Path
9
- from typing import Optional, Tuple
10
-
11
- from .base import PlatformProvider, PlatformPaths
12
-
13
-
14
- class DarwinPlatformProvider(PlatformProvider):
15
- """Platform provider for macOS systems."""
16
-
17
- @property
18
- def name(self) -> str:
19
- return 'darwin'
20
-
21
- @property
22
- def executable_suffix(self) -> str:
23
- return ''
24
-
25
- @property
26
- def shared_lib_extension(self) -> str:
27
- return '.dylib'
28
-
29
- def get_env_paths(self, env_dir: Path, python_version: str = "3.10") -> PlatformPaths:
30
- return PlatformPaths(
31
- python=env_dir / "bin" / "python",
32
- pip=env_dir / "bin" / "pip",
33
- site_packages=env_dir / "lib" / f"python{python_version}" / "site-packages",
34
- bin_dir=env_dir / "bin"
35
- )
36
-
37
- def check_prerequisites(self) -> Tuple[bool, Optional[str]]:
38
- # macOS with Apple Silicon can use MPS (Metal Performance Shaders)
39
- # but CUDA is not available
40
- return (True, None)
41
-
42
- def is_apple_silicon(self) -> bool:
43
- """Check if running on Apple Silicon."""
44
- import platform
45
- return platform.machine() == 'arm64'
46
-
47
- def make_executable(self, path: Path) -> None:
48
- current = os.stat(path).st_mode
49
- os.chmod(path, current | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
50
-
51
- def rmtree_robust(self, path: Path) -> bool:
52
- shutil.rmtree(path)
53
- return True
@@ -1,68 +0,0 @@
1
- """
2
- Linux platform provider implementation.
3
- """
4
-
5
- import os
6
- import stat
7
- import shutil
8
- from pathlib import Path
9
- from typing import Optional, Tuple
10
-
11
- from .base import PlatformProvider, PlatformPaths
12
-
13
-
14
- class LinuxPlatformProvider(PlatformProvider):
15
- """Platform provider for Linux systems."""
16
-
17
- @property
18
- def name(self) -> str:
19
- return 'linux'
20
-
21
- @property
22
- def executable_suffix(self) -> str:
23
- return ''
24
-
25
- @property
26
- def shared_lib_extension(self) -> str:
27
- return '.so'
28
-
29
- def get_env_paths(self, env_dir: Path, python_version: str = "3.10") -> PlatformPaths:
30
- return PlatformPaths(
31
- python=env_dir / "bin" / "python",
32
- pip=env_dir / "bin" / "pip",
33
- site_packages=env_dir / "lib" / f"python{python_version}" / "site-packages",
34
- bin_dir=env_dir / "bin"
35
- )
36
-
37
- def check_prerequisites(self) -> Tuple[bool, Optional[str]]:
38
- # WSL2 with NVIDIA CUDA drivers is supported
39
- return (True, None)
40
-
41
- def is_wsl(self) -> bool:
42
- """Detect if running under Windows Subsystem for Linux."""
43
- # Method 1: Check /proc/sys/kernel/osrelease
44
- try:
45
- with open('/proc/sys/kernel/osrelease', 'r') as f:
46
- kernel_release = f.read().lower()
47
- if 'microsoft' in kernel_release or 'wsl' in kernel_release:
48
- return True
49
- except (FileNotFoundError, PermissionError):
50
- pass
51
-
52
- # Method 2: Check for WSLInterop
53
- if os.path.exists('/proc/sys/fs/binfmt_misc/WSLInterop'):
54
- return True
55
-
56
- # Method 3: Check environment variable
57
- if 'WSL_DISTRO_NAME' in os.environ:
58
- return True
59
-
60
- return False
61
-
62
- def make_executable(self, path: Path) -> None:
63
- current = os.stat(path).st_mode
64
- os.chmod(path, current | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
65
-
66
- def rmtree_robust(self, path: Path) -> bool:
67
- shutil.rmtree(path)
68
- return True