comfy-env 0.0.13__tar.gz → 0.0.15__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. {comfy_env-0.0.13 → comfy_env-0.0.15}/PKG-INFO +1 -1
  2. {comfy_env-0.0.13 → comfy_env-0.0.15}/pyproject.toml +1 -1
  3. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/__init__.py +16 -44
  4. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/env/__init__.py +19 -2
  5. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/env/config.py +10 -0
  6. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/env/config_file.py +39 -3
  7. comfy_env-0.0.15/src/comfy_env/env/cuda_gpu_detection.py +303 -0
  8. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/env/manager.py +3 -3
  9. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/env/platform/windows.py +1 -94
  10. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/install.py +44 -8
  11. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/resolver.py +4 -1
  12. comfy_env-0.0.15/src/comfy_env/tools.py +221 -0
  13. comfy_env-0.0.15/untitled.txt +0 -0
  14. comfy_env-0.0.13/src/comfy_env/env/detection.py +0 -183
  15. {comfy_env-0.0.13 → comfy_env-0.0.15}/.github/workflows/publish.yml +0 -0
  16. {comfy_env-0.0.13 → comfy_env-0.0.15}/.gitignore +0 -0
  17. {comfy_env-0.0.13 → comfy_env-0.0.15}/CLAUDE.md +0 -0
  18. {comfy_env-0.0.13 → comfy_env-0.0.15}/CRITICISM.md +0 -0
  19. {comfy_env-0.0.13 → comfy_env-0.0.15}/LICENSE +0 -0
  20. {comfy_env-0.0.13 → comfy_env-0.0.15}/README.md +0 -0
  21. {comfy_env-0.0.13 → comfy_env-0.0.15}/examples/basic_node/__init__.py +0 -0
  22. {comfy_env-0.0.13 → comfy_env-0.0.15}/examples/basic_node/comfy-env.toml +0 -0
  23. {comfy_env-0.0.13 → comfy_env-0.0.15}/examples/basic_node/nodes.py +0 -0
  24. {comfy_env-0.0.13 → comfy_env-0.0.15}/examples/basic_node/worker.py +0 -0
  25. {comfy_env-0.0.13 → comfy_env-0.0.15}/examples/decorator_node/__init__.py +0 -0
  26. {comfy_env-0.0.13 → comfy_env-0.0.15}/examples/decorator_node/nodes.py +0 -0
  27. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/cli.py +0 -0
  28. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/decorator.py +0 -0
  29. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/env/platform/__init__.py +0 -0
  30. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/env/platform/base.py +0 -0
  31. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/env/platform/darwin.py +0 -0
  32. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/env/platform/linux.py +0 -0
  33. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/env/security.py +0 -0
  34. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/errors.py +0 -0
  35. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/ipc/__init__.py +0 -0
  36. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/ipc/bridge.py +0 -0
  37. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/ipc/protocol.py +0 -0
  38. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/ipc/tensor.py +0 -0
  39. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/ipc/torch_bridge.py +0 -0
  40. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/ipc/transport.py +0 -0
  41. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/ipc/worker.py +0 -0
  42. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/registry.py +0 -0
  43. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/stubs/__init__.py +0 -0
  44. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/stubs/folder_paths.py +0 -0
  45. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/wheel_sources.yml +0 -0
  46. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/workers/__init__.py +0 -0
  47. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/workers/base.py +0 -0
  48. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/workers/pool.py +0 -0
  49. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/workers/tensor_utils.py +0 -0
  50. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/workers/torch_mp.py +0 -0
  51. {comfy_env-0.0.13 → comfy_env-0.0.15}/src/comfy_env/workers/venv.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: comfy-env
3
- Version: 0.0.13
3
+ Version: 0.0.15
4
4
  Summary: Environment management for ComfyUI custom nodes - CUDA wheel resolution and process isolation
5
5
  Project-URL: Homepage, https://github.com/PozzettiAndrea/comfy-env
6
6
  Project-URL: Repository, https://github.com/PozzettiAndrea/comfy-env
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "comfy-env"
3
- version = "0.0.13"
3
+ version = "0.0.15"
4
4
  description = "Environment management for ComfyUI custom nodes - CUDA wheel resolution and process isolation"
5
5
  readme = "README.md"
6
6
  license = {text = "MIT"}
@@ -1,46 +1,4 @@
1
- """
2
- comfy-env: Environment management for ComfyUI custom nodes.
3
-
4
- This package provides:
5
- - CUDA wheel resolution and in-place installation (Type 2 nodes)
6
- - Process isolation with separate venvs (Type 1 nodes)
7
-
8
- ## Quick Start - In-Place Installation
9
-
10
- from comfy_env import install
11
-
12
- # Auto-discover config and install CUDA wheels
13
- install()
14
-
15
- # Or with explicit config
16
- install(config="comfyui_env.toml")
17
-
18
- ## Quick Start - Process Isolation
19
-
20
- from comfy_env.workers import get_worker, TorchMPWorker
21
-
22
- # Same-venv isolation (zero-copy tensors)
23
- worker = TorchMPWorker()
24
- result = worker.call(my_gpu_function, image=tensor)
25
-
26
- # Cross-venv isolation
27
- from comfy_env.workers import PersistentVenvWorker
28
- worker = PersistentVenvWorker(python="/path/to/venv/bin/python")
29
- result = worker.call_module("my_module", "my_func", image=tensor)
30
-
31
- ## CLI
32
-
33
- comfy-env install # Install from config
34
- comfy-env info # Show environment info
35
- comfy-env resolve pkg==1.0 # Show resolved wheel URL
36
- comfy-env doctor # Verify installation
37
-
38
- ## Legacy APIs (still supported)
39
-
40
- The @isolated decorator and WorkerBridge are still available.
41
- """
42
-
43
- __version__ = "0.0.11"
1
+ __version__ = "0.0.14"
44
2
 
45
3
  from .env.config import IsolatedEnv, EnvManagerConfig, LocalConfig, NodeReq
46
4
  from .env.config_file import (
@@ -51,7 +9,16 @@ from .env.config_file import (
51
9
  CONFIG_FILE_NAMES,
52
10
  )
53
11
  from .env.manager import IsolatedEnvManager
54
- from .env.detection import detect_cuda_version, detect_gpu_info, get_gpu_summary
12
+ from .env.cuda_gpu_detection import (
13
+ GPUInfo,
14
+ CUDAEnvironment,
15
+ detect_cuda_environment,
16
+ detect_cuda_version,
17
+ detect_gpu_info,
18
+ detect_gpus,
19
+ get_gpu_summary,
20
+ get_recommended_cuda_version,
21
+ )
55
22
  from .env.security import (
56
23
  normalize_env_name,
57
24
  validate_dependency,
@@ -134,9 +101,14 @@ __all__ = [
134
101
  "discover_config",
135
102
  "CONFIG_FILE_NAMES",
136
103
  # Detection
104
+ "GPUInfo",
105
+ "CUDAEnvironment",
106
+ "detect_cuda_environment",
137
107
  "detect_cuda_version",
138
108
  "detect_gpu_info",
109
+ "detect_gpus",
139
110
  "get_gpu_summary",
111
+ "get_recommended_cuda_version",
140
112
  # Security validation
141
113
  "normalize_env_name",
142
114
  "validate_dependency",
@@ -1,8 +1,17 @@
1
1
  """Environment management for comfyui-isolation."""
2
2
 
3
- from .config import IsolatedEnv
3
+ from .config import IsolatedEnv, ToolConfig
4
4
  from .manager import IsolatedEnvManager
5
- from .detection import detect_cuda_version, detect_gpu_info, get_gpu_summary
5
+ from .cuda_gpu_detection import (
6
+ GPUInfo,
7
+ CUDAEnvironment,
8
+ detect_cuda_environment,
9
+ detect_cuda_version,
10
+ detect_gpu_info,
11
+ detect_gpus,
12
+ get_gpu_summary,
13
+ get_recommended_cuda_version,
14
+ )
6
15
  from .platform import get_platform, PlatformProvider, PlatformPaths
7
16
  from .security import (
8
17
  normalize_env_name,
@@ -15,9 +24,17 @@ from .security import (
15
24
  __all__ = [
16
25
  "IsolatedEnv",
17
26
  "IsolatedEnvManager",
27
+ "ToolConfig",
28
+ # GPU Detection
29
+ "GPUInfo",
30
+ "CUDAEnvironment",
31
+ "detect_cuda_environment",
18
32
  "detect_cuda_version",
19
33
  "detect_gpu_info",
34
+ "detect_gpus",
20
35
  "get_gpu_summary",
36
+ "get_recommended_cuda_version",
37
+ # Platform
21
38
  "get_platform",
22
39
  "PlatformProvider",
23
40
  "PlatformPaths",
@@ -23,6 +23,14 @@ class NodeReq:
23
23
  repo: str # GitHub repo path, e.g., "Kosinkadink/ComfyUI-VideoHelperSuite"
24
24
 
25
25
 
26
+ @dataclass
27
+ class ToolConfig:
28
+ """Configuration for an external tool like Blender."""
29
+ name: str
30
+ version: str = "latest"
31
+ install_dir: Optional[Path] = None
32
+
33
+
26
34
  @dataclass
27
35
  class EnvManagerConfig:
28
36
  """
@@ -35,10 +43,12 @@ class EnvManagerConfig:
35
43
  [envname.cuda] - CUDA packages for isolated env
36
44
  [envname.packages] - Regular packages for isolated env
37
45
  [node_reqs] - Node dependencies
46
+ [tools] - External tools (e.g., blender = "4.2")
38
47
  """
39
48
  local: LocalConfig = field(default_factory=LocalConfig)
40
49
  envs: Dict[str, "IsolatedEnv"] = field(default_factory=dict)
41
50
  node_reqs: List[NodeReq] = field(default_factory=list)
51
+ tools: Dict[str, ToolConfig] = field(default_factory=dict)
42
52
 
43
53
  @property
44
54
  def has_local(self) -> bool:
@@ -63,8 +63,8 @@ else:
63
63
  except ImportError:
64
64
  tomllib = None # type: ignore
65
65
 
66
- from .config import IsolatedEnv, EnvManagerConfig, LocalConfig, NodeReq
67
- from .detection import detect_cuda_version
66
+ from .config import IsolatedEnv, EnvManagerConfig, LocalConfig, NodeReq, ToolConfig
67
+ from .cuda_gpu_detection import detect_cuda_version
68
68
 
69
69
 
70
70
  # Config file name
@@ -332,7 +332,7 @@ def _substitute_vars(s: str, variables: Dict[str, str]) -> str:
332
332
  # =============================================================================
333
333
 
334
334
  # Reserved table names that are NOT isolated environments
335
- RESERVED_TABLES = {"local", "node_reqs", "env", "packages", "sources", "cuda", "variables", "worker"}
335
+ RESERVED_TABLES = {"local", "node_reqs", "env", "packages", "sources", "cuda", "variables", "worker", "tools"}
336
336
 
337
337
 
338
338
  def load_config(
@@ -429,11 +429,13 @@ def _parse_full_config(data: Dict[str, Any], base_dir: Path) -> EnvManagerConfig
429
429
  local = _parse_local_section(data.get("local", {}))
430
430
  envs = _parse_env_sections(data, base_dir)
431
431
  node_reqs = _parse_node_reqs(data.get("node_reqs", {}))
432
+ tools = _parse_tools_section(data.get("tools", {}))
432
433
 
433
434
  return EnvManagerConfig(
434
435
  local=local,
435
436
  envs=envs,
436
437
  node_reqs=node_reqs,
438
+ tools=tools,
437
439
  )
438
440
 
439
441
 
@@ -582,6 +584,35 @@ def _parse_node_reqs(node_reqs_data: Dict[str, Any]) -> List[NodeReq]:
582
584
  return reqs
583
585
 
584
586
 
587
+ def _parse_tools_section(tools_data: Dict[str, Any]) -> Dict[str, ToolConfig]:
588
+ """Parse [tools] section.
589
+
590
+ Supports:
591
+ [tools]
592
+ blender = "4.2"
593
+
594
+ Or extended:
595
+ [tools.blender]
596
+ version = "4.2"
597
+ install_dir = "/custom/path"
598
+ """
599
+ tools = {}
600
+
601
+ for name, value in tools_data.items():
602
+ if isinstance(value, str):
603
+ # Simple format: blender = "4.2"
604
+ tools[name] = ToolConfig(name=name, version=value)
605
+ elif isinstance(value, dict):
606
+ # Extended format: [tools.blender] with version, install_dir
607
+ version = value.get("version", "latest")
608
+ install_dir = value.get("install_dir")
609
+ if install_dir:
610
+ install_dir = Path(install_dir)
611
+ tools[name] = ToolConfig(name=name, version=version, install_dir=install_dir)
612
+
613
+ return tools
614
+
615
+
585
616
  def _convert_simple_to_full(data: Dict[str, Any], base_dir: Path) -> EnvManagerConfig:
586
617
  """Convert simple config format to full EnvManagerConfig.
587
618
 
@@ -591,6 +622,9 @@ def _convert_simple_to_full(data: Dict[str, Any], base_dir: Path) -> EnvManagerC
591
622
  # Parse using simple parser to get IsolatedEnv
592
623
  simple_env = _parse_config(data, base_dir)
593
624
 
625
+ # Parse tools section (shared between simple and full format)
626
+ tools = _parse_tools_section(data.get("tools", {}))
627
+
594
628
  # Check if this has explicit env settings (isolated venv) vs just CUDA packages (local install)
595
629
  env_section = data.get("env", {})
596
630
  has_explicit_env = bool(env_section.get("name") or env_section.get("python"))
@@ -601,6 +635,7 @@ def _convert_simple_to_full(data: Dict[str, Any], base_dir: Path) -> EnvManagerC
601
635
  local=LocalConfig(),
602
636
  envs={simple_env.name: simple_env},
603
637
  node_reqs=[],
638
+ tools=tools,
604
639
  )
605
640
  else:
606
641
  # Local CUDA packages only (no isolated venv)
@@ -619,4 +654,5 @@ def _convert_simple_to_full(data: Dict[str, Any], base_dir: Path) -> EnvManagerC
619
654
  ),
620
655
  envs={},
621
656
  node_reqs=[],
657
+ tools=tools,
622
658
  )
@@ -0,0 +1,303 @@
1
+ """
2
+ Robust CUDA/GPU detection with multiple fallback methods.
3
+
4
+ Detection priority: NVML → PyTorch → nvidia-smi → sysfs → env vars
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import logging
10
+ import os
11
+ import re
12
+ import subprocess
13
+ import time
14
+ from dataclasses import dataclass, field
15
+ from pathlib import Path
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+ CUDA_VERSION_ENV_VAR = "COMFY_ENV_CUDA_VERSION"
20
+
21
+ _cache: tuple[float, "CUDAEnvironment | None"] = (0, None)
22
+ CACHE_TTL = 60
23
+
24
+
25
+ @dataclass
26
+ class GPUInfo:
27
+ index: int
28
+ name: str
29
+ compute_capability: tuple[int, int]
30
+ architecture: str
31
+ vram_total_mb: int = 0
32
+ vram_free_mb: int = 0
33
+ uuid: str = ""
34
+ pci_bus_id: str = ""
35
+ driver_version: str = ""
36
+
37
+ def cc_str(self) -> str:
38
+ return f"{self.compute_capability[0]}.{self.compute_capability[1]}"
39
+
40
+ def sm_version(self) -> str:
41
+ return f"sm_{self.compute_capability[0]}{self.compute_capability[1]}"
42
+
43
+
44
+ @dataclass
45
+ class CUDAEnvironment:
46
+ gpus: list[GPUInfo] = field(default_factory=list)
47
+ driver_version: str = ""
48
+ cuda_runtime_version: str = ""
49
+ recommended_cuda: str = ""
50
+ detection_method: str = ""
51
+
52
+
53
+ COMPUTE_TO_ARCH = {
54
+ (5, 0): "Maxwell", (5, 2): "Maxwell", (5, 3): "Maxwell",
55
+ (6, 0): "Pascal", (6, 1): "Pascal", (6, 2): "Pascal",
56
+ (7, 0): "Volta", (7, 2): "Volta", (7, 5): "Turing",
57
+ (8, 0): "Ampere", (8, 6): "Ampere", (8, 7): "Ampere", (8, 9): "Ada",
58
+ (9, 0): "Hopper",
59
+ (10, 0): "Blackwell", (10, 1): "Blackwell", (10, 2): "Blackwell",
60
+ }
61
+
62
+
63
+ def _cc_to_arch(major: int, minor: int) -> str:
64
+ if arch := COMPUTE_TO_ARCH.get((major, minor)):
65
+ return arch
66
+ if major >= 10: return "Blackwell"
67
+ if major == 9: return "Hopper"
68
+ if major == 8: return "Ada" if minor >= 9 else "Ampere"
69
+ if major == 7: return "Turing" if minor >= 5 else "Volta"
70
+ if major == 6: return "Pascal"
71
+ return "Maxwell" if major == 5 else "Unknown"
72
+
73
+
74
+ def _parse_cc(s: str) -> tuple[int, int]:
75
+ try:
76
+ if "." in s:
77
+ p = s.split(".")
78
+ return (int(p[0]), int(p[1]))
79
+ if len(s) >= 2:
80
+ return (int(s[:-1]), int(s[-1]))
81
+ except (ValueError, IndexError):
82
+ pass
83
+ return (0, 0)
84
+
85
+
86
+ def _detect_nvml() -> list[GPUInfo] | None:
87
+ try:
88
+ import pynvml
89
+ pynvml.nvmlInit()
90
+ try:
91
+ count = pynvml.nvmlDeviceGetCount()
92
+ if not count:
93
+ return None
94
+ gpus = []
95
+ for i in range(count):
96
+ h = pynvml.nvmlDeviceGetHandleByIndex(i)
97
+ name = pynvml.nvmlDeviceGetName(h)
98
+ if isinstance(name, bytes): name = name.decode()
99
+ cc = pynvml.nvmlDeviceGetCudaComputeCapability(h)
100
+ mem = pynvml.nvmlDeviceGetMemoryInfo(h)
101
+ gpus.append(GPUInfo(
102
+ index=i, name=name, compute_capability=cc,
103
+ architecture=_cc_to_arch(*cc),
104
+ vram_total_mb=mem.total // (1024*1024),
105
+ vram_free_mb=mem.free // (1024*1024),
106
+ ))
107
+ return gpus
108
+ finally:
109
+ pynvml.nvmlShutdown()
110
+ except Exception:
111
+ return None
112
+
113
+
114
+ def _detect_torch() -> list[GPUInfo] | None:
115
+ try:
116
+ import torch
117
+ if not torch.cuda.is_available():
118
+ return None
119
+ gpus = []
120
+ for i in range(torch.cuda.device_count()):
121
+ p = torch.cuda.get_device_properties(i)
122
+ gpus.append(GPUInfo(
123
+ index=i, name=p.name,
124
+ compute_capability=(p.major, p.minor),
125
+ architecture=_cc_to_arch(p.major, p.minor),
126
+ vram_total_mb=p.total_memory // (1024*1024),
127
+ ))
128
+ return gpus if gpus else None
129
+ except Exception:
130
+ return None
131
+
132
+
133
+ def _detect_smi() -> list[GPUInfo] | None:
134
+ try:
135
+ r = subprocess.run(
136
+ ["nvidia-smi", "--query-gpu=index,name,uuid,pci.bus_id,compute_cap,memory.total,memory.free,driver_version",
137
+ "--format=csv,noheader,nounits"],
138
+ capture_output=True, text=True, timeout=10
139
+ )
140
+ if r.returncode != 0:
141
+ return None
142
+ gpus = []
143
+ for line in r.stdout.strip().split("\n"):
144
+ if not line.strip():
145
+ continue
146
+ p = [x.strip() for x in line.split(",")]
147
+ if len(p) < 5:
148
+ continue
149
+ cc = _parse_cc(p[4])
150
+ gpus.append(GPUInfo(
151
+ index=int(p[0]) if p[0].isdigit() else len(gpus),
152
+ name=p[1], uuid=p[2] if len(p) > 2 else "",
153
+ pci_bus_id=p[3] if len(p) > 3 else "",
154
+ compute_capability=cc, architecture=_cc_to_arch(*cc),
155
+ vram_total_mb=int(p[5]) if len(p) > 5 and p[5].isdigit() else 0,
156
+ vram_free_mb=int(p[6]) if len(p) > 6 and p[6].isdigit() else 0,
157
+ driver_version=p[7] if len(p) > 7 else "",
158
+ ))
159
+ return gpus if gpus else None
160
+ except Exception:
161
+ return None
162
+
163
+
164
+ def _detect_sysfs() -> list[GPUInfo] | None:
165
+ try:
166
+ pci_path = Path("/sys/bus/pci/devices")
167
+ if not pci_path.exists():
168
+ return None
169
+ gpus = []
170
+ for d in sorted(pci_path.iterdir()):
171
+ vendor = (d / "vendor").read_text().strip().lower() if (d / "vendor").exists() else ""
172
+ if "10de" not in vendor:
173
+ continue
174
+ cls = (d / "class").read_text().strip() if (d / "class").exists() else ""
175
+ if not (cls.startswith("0x0300") or cls.startswith("0x0302")):
176
+ continue
177
+ gpus.append(GPUInfo(
178
+ index=len(gpus), name=f"NVIDIA GPU", pci_bus_id=d.name,
179
+ compute_capability=(0, 0), architecture="Unknown"
180
+ ))
181
+ return gpus if gpus else None
182
+ except Exception:
183
+ return None
184
+
185
+
186
+ def _get_driver_version() -> str:
187
+ try:
188
+ import pynvml
189
+ pynvml.nvmlInit()
190
+ v = pynvml.nvmlSystemGetDriverVersion()
191
+ pynvml.nvmlShutdown()
192
+ return v.decode() if isinstance(v, bytes) else v
193
+ except Exception:
194
+ pass
195
+ try:
196
+ r = subprocess.run(["nvidia-smi", "--query-gpu=driver_version", "--format=csv,noheader"],
197
+ capture_output=True, text=True, timeout=5)
198
+ if r.returncode == 0:
199
+ return r.stdout.strip().split("\n")[0]
200
+ except Exception:
201
+ pass
202
+ return ""
203
+
204
+
205
+ def _get_cuda_version() -> str:
206
+ try:
207
+ import torch
208
+ if torch.cuda.is_available() and torch.version.cuda:
209
+ return torch.version.cuda
210
+ except Exception:
211
+ pass
212
+ try:
213
+ r = subprocess.run(["nvcc", "--version"], capture_output=True, text=True, timeout=5)
214
+ if m := re.search(r"release (\d+\.\d+)", r.stdout):
215
+ return m.group(1)
216
+ except Exception:
217
+ pass
218
+ return ""
219
+
220
+
221
+ def _recommended_cuda(gpus: list[GPUInfo]) -> str:
222
+ if override := os.environ.get(CUDA_VERSION_ENV_VAR, "").strip():
223
+ if "." not in override and len(override) >= 2:
224
+ return f"{override[:-1]}.{override[-1]}"
225
+ return override
226
+ if not gpus:
227
+ return ""
228
+ for gpu in gpus:
229
+ if gpu.compute_capability[0] >= 10:
230
+ return "12.8" # Blackwell requires 12.8
231
+ for gpu in gpus:
232
+ cc = gpu.compute_capability
233
+ if cc[0] < 7 or (cc[0] == 7 and cc[1] < 5):
234
+ return "12.4" # Legacy (Pascal) uses 12.4
235
+ return "12.8" # Modern GPUs use 12.8
236
+
237
+
238
+ def detect_cuda_environment(force_refresh: bool = False) -> CUDAEnvironment:
239
+ global _cache
240
+ if not force_refresh and _cache[1] and time.time() - _cache[0] < CACHE_TTL:
241
+ return _cache[1]
242
+
243
+ gpus, method = None, "none"
244
+ for name, fn in [("nvml", _detect_nvml), ("torch", _detect_torch),
245
+ ("smi", _detect_smi), ("sysfs", _detect_sysfs)]:
246
+ if gpus := fn():
247
+ method = name
248
+ break
249
+
250
+ env = CUDAEnvironment(
251
+ gpus=gpus or [],
252
+ driver_version=_get_driver_version(),
253
+ cuda_runtime_version=_get_cuda_version(),
254
+ recommended_cuda=_recommended_cuda(gpus or []),
255
+ detection_method=method,
256
+ )
257
+ _cache = (time.time(), env)
258
+ return env
259
+
260
+
261
+ def get_recommended_cuda_version() -> str | None:
262
+ if override := os.environ.get(CUDA_VERSION_ENV_VAR, "").strip():
263
+ if "." not in override and len(override) >= 2:
264
+ return f"{override[:-1]}.{override[-1]}"
265
+ return override
266
+ env = detect_cuda_environment()
267
+ return env.recommended_cuda or None
268
+
269
+
270
+ def detect_gpus() -> list[GPUInfo]:
271
+ return detect_cuda_environment().gpus
272
+
273
+
274
+ def detect_gpu_info() -> list[dict]:
275
+ """Return GPU info as list of dicts."""
276
+ from dataclasses import asdict
277
+ return [asdict(gpu) for gpu in detect_gpus()]
278
+
279
+
280
+ def get_gpu_summary() -> str:
281
+ """Human-readable GPU summary."""
282
+ env = detect_cuda_environment()
283
+ if not env.gpus:
284
+ override = os.environ.get(CUDA_VERSION_ENV_VAR)
285
+ if override:
286
+ return f"No NVIDIA GPU detected (using {CUDA_VERSION_ENV_VAR}={override})"
287
+ return f"No NVIDIA GPU detected (set {CUDA_VERSION_ENV_VAR} to override)"
288
+
289
+ lines = [f"Detection: {env.detection_method}"]
290
+ if env.driver_version:
291
+ lines.append(f"Driver: {env.driver_version}")
292
+ if env.cuda_runtime_version:
293
+ lines.append(f"CUDA: {env.cuda_runtime_version}")
294
+ lines.append(f"Recommended: CUDA {env.recommended_cuda}")
295
+ lines.append("")
296
+ for gpu in env.gpus:
297
+ vram = f"{gpu.vram_total_mb}MB" if gpu.vram_total_mb else "?"
298
+ lines.append(f" GPU {gpu.index}: {gpu.name} ({gpu.sm_version()}) [{gpu.architecture}] {vram}")
299
+ return "\n".join(lines)
300
+
301
+
302
+ # Aliases
303
+ detect_cuda_version = get_recommended_cuda_version
@@ -12,7 +12,7 @@ from typing import Optional, Callable
12
12
 
13
13
  from .config import IsolatedEnv
14
14
  from .platform import get_platform, PlatformProvider
15
- from .detection import detect_cuda_version
15
+ from .cuda_gpu_detection import detect_cuda_version
16
16
  from .security import (
17
17
  normalize_env_name,
18
18
  validate_dependency,
@@ -475,7 +475,7 @@ class IsolatedEnvManager:
475
475
  url_template = source["url_template"]
476
476
  url = self._substitute_template(url_template, vars_dict)
477
477
 
478
- self.log(f" Trying {source.get('name', 'unknown')}: {url[:80]}...")
478
+ self.log(f" Trying {source.get('name', 'unknown')}: {url}")
479
479
  result = subprocess.run(
480
480
  pip_args + ["--no-deps", url],
481
481
  capture_output=True, text=True,
@@ -484,7 +484,7 @@ class IsolatedEnvManager:
484
484
  if result.returncode == 0:
485
485
  return # Success!
486
486
 
487
- errors.append(f"{source.get('name', 'unknown')}: {result.stderr[:100]}")
487
+ errors.append(f"{source.get('name', 'unknown')}: {result.stderr.strip()}")
488
488
 
489
489
  # All sources failed
490
490
  raise RuntimeError(
@@ -45,12 +45,7 @@ class WindowsPlatformProvider(PlatformProvider):
45
45
  f"Running in {shell_env.upper()} environment.\n"
46
46
  f"This package requires native Windows Python.\n"
47
47
  f"Please use PowerShell, Command Prompt, or native Windows terminal.")
48
-
49
- # Check Visual C++ Redistributable
50
- vc_ok, vc_error = self._check_vc_redistributable()
51
- if not vc_ok:
52
- return (False, vc_error)
53
-
48
+ # Note: VC++ runtime is handled by msvc-runtime package, no system check needed
54
49
  return (True, None)
55
50
 
56
51
  def _detect_shell_environment(self) -> str:
@@ -67,94 +62,6 @@ class WindowsPlatformProvider(PlatformProvider):
67
62
 
68
63
  return 'native-windows'
69
64
 
70
- def _find_vc_dlls(self) -> Dict[str, Optional[Path]]:
71
- """Find VC++ runtime DLLs in common locations."""
72
- required_dlls = ['vcruntime140.dll', 'msvcp140.dll']
73
- found = {}
74
-
75
- # Search locations in order of preference
76
- search_paths = []
77
-
78
- # 1. Current Python environment (conda/venv)
79
- if hasattr(sys, 'base_prefix'):
80
- search_paths.append(Path(sys.base_prefix) / 'Library' / 'bin')
81
- search_paths.append(Path(sys.base_prefix) / 'DLLs')
82
- if hasattr(sys, 'prefix'):
83
- search_paths.append(Path(sys.prefix) / 'Library' / 'bin')
84
- search_paths.append(Path(sys.prefix) / 'DLLs')
85
-
86
- # 2. System directories
87
- system_root = os.environ.get('SystemRoot', r'C:\Windows')
88
- search_paths.append(Path(system_root) / 'System32')
89
-
90
- # 3. Visual Studio redistributable directories
91
- program_files = os.environ.get('ProgramFiles', r'C:\Program Files')
92
- vc_redist = Path(program_files) / 'Microsoft Visual Studio' / '2022' / 'Community' / 'VC' / 'Redist' / 'MSVC'
93
- if vc_redist.exists():
94
- for version_dir in vc_redist.iterdir():
95
- search_paths.append(version_dir / 'x64' / 'Microsoft.VC143.CRT')
96
-
97
- for dll_name in required_dlls:
98
- found[dll_name] = None
99
- for search_path in search_paths:
100
- dll_path = search_path / dll_name
101
- if dll_path.exists():
102
- found[dll_name] = dll_path
103
- break
104
-
105
- return found
106
-
107
- def bundle_vc_dlls_to_env(self, env_dir: Path) -> Tuple[bool, Optional[str]]:
108
- """Bundle VC++ runtime DLLs into the isolated environment."""
109
- required_dlls = ['vcruntime140.dll', 'msvcp140.dll']
110
- found_dlls = self._find_vc_dlls()
111
-
112
- # Check which DLLs are missing
113
- missing = [dll for dll, path in found_dlls.items() if path is None]
114
-
115
- if missing:
116
- return (False,
117
- f"Could not find VC++ DLLs to bundle: {', '.join(missing)}\n\n"
118
- f"Please install Visual C++ Redistributable:\n"
119
- f" Download: https://aka.ms/vs/17/release/vc_redist.x64.exe\n"
120
- f"\nAfter installation, delete the environment and try again.")
121
-
122
- # Copy DLLs to the environment's Scripts directory
123
- scripts_dir = env_dir / 'Scripts'
124
-
125
- copied = []
126
- for dll_name, source_path in found_dlls.items():
127
- if source_path:
128
- try:
129
- if scripts_dir.exists():
130
- scripts_target = scripts_dir / dll_name
131
- if not scripts_target.exists():
132
- shutil.copy2(source_path, scripts_target)
133
- copied.append(f"{dll_name} -> Scripts/")
134
- except (OSError, IOError) as e:
135
- return (False, f"Failed to copy {dll_name}: {e}")
136
-
137
- return (True, None)
138
-
139
- def _check_vc_redistributable(self) -> Tuple[bool, Optional[str]]:
140
- """Check if Visual C++ Redistributable DLLs are available."""
141
- required_dlls = ['vcruntime140.dll', 'msvcp140.dll']
142
- found_dlls = self._find_vc_dlls()
143
-
144
- missing = [dll for dll, path in found_dlls.items() if path is None]
145
-
146
- if missing:
147
- error_msg = (
148
- f"Visual C++ Redistributable DLLs not found!\n"
149
- f"\nMissing: {', '.join(missing)}\n"
150
- f"\nPlease install Visual C++ Redistributable for Visual Studio 2015-2022:\n"
151
- f"\n Download (64-bit): https://aka.ms/vs/17/release/vc_redist.x64.exe\n"
152
- f"\nAfter installation, restart your terminal and try again."
153
- )
154
- return (False, error_msg)
155
-
156
- return (True, None)
157
-
158
65
  def make_executable(self, path: Path) -> None:
159
66
  # No-op on Windows - executables are determined by extension
160
67
  pass