comfy-env 0.0.8__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,303 @@
1
+ """
2
+ Robust CUDA/GPU detection with multiple fallback methods.
3
+
4
+ Detection priority: NVML → PyTorch → nvidia-smi → sysfs → env vars
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import logging
10
+ import os
11
+ import re
12
+ import subprocess
13
+ import time
14
+ from dataclasses import dataclass, field
15
+ from pathlib import Path
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+ CUDA_VERSION_ENV_VAR = "COMFY_ENV_CUDA_VERSION"
20
+
21
+ _cache: tuple[float, "CUDAEnvironment | None"] = (0, None)
22
+ CACHE_TTL = 60
23
+
24
+
25
+ @dataclass
26
+ class GPUInfo:
27
+ index: int
28
+ name: str
29
+ compute_capability: tuple[int, int]
30
+ architecture: str
31
+ vram_total_mb: int = 0
32
+ vram_free_mb: int = 0
33
+ uuid: str = ""
34
+ pci_bus_id: str = ""
35
+ driver_version: str = ""
36
+
37
+ def cc_str(self) -> str:
38
+ return f"{self.compute_capability[0]}.{self.compute_capability[1]}"
39
+
40
+ def sm_version(self) -> str:
41
+ return f"sm_{self.compute_capability[0]}{self.compute_capability[1]}"
42
+
43
+
44
+ @dataclass
45
+ class CUDAEnvironment:
46
+ gpus: list[GPUInfo] = field(default_factory=list)
47
+ driver_version: str = ""
48
+ cuda_runtime_version: str = ""
49
+ recommended_cuda: str = ""
50
+ detection_method: str = ""
51
+
52
+
53
+ COMPUTE_TO_ARCH = {
54
+ (5, 0): "Maxwell", (5, 2): "Maxwell", (5, 3): "Maxwell",
55
+ (6, 0): "Pascal", (6, 1): "Pascal", (6, 2): "Pascal",
56
+ (7, 0): "Volta", (7, 2): "Volta", (7, 5): "Turing",
57
+ (8, 0): "Ampere", (8, 6): "Ampere", (8, 7): "Ampere", (8, 9): "Ada",
58
+ (9, 0): "Hopper",
59
+ (10, 0): "Blackwell", (10, 1): "Blackwell", (10, 2): "Blackwell",
60
+ }
61
+
62
+
63
+ def _cc_to_arch(major: int, minor: int) -> str:
64
+ if arch := COMPUTE_TO_ARCH.get((major, minor)):
65
+ return arch
66
+ if major >= 10: return "Blackwell"
67
+ if major == 9: return "Hopper"
68
+ if major == 8: return "Ada" if minor >= 9 else "Ampere"
69
+ if major == 7: return "Turing" if minor >= 5 else "Volta"
70
+ if major == 6: return "Pascal"
71
+ return "Maxwell" if major == 5 else "Unknown"
72
+
73
+
74
+ def _parse_cc(s: str) -> tuple[int, int]:
75
+ try:
76
+ if "." in s:
77
+ p = s.split(".")
78
+ return (int(p[0]), int(p[1]))
79
+ if len(s) >= 2:
80
+ return (int(s[:-1]), int(s[-1]))
81
+ except (ValueError, IndexError):
82
+ pass
83
+ return (0, 0)
84
+
85
+
86
+ def _detect_nvml() -> list[GPUInfo] | None:
87
+ try:
88
+ import pynvml
89
+ pynvml.nvmlInit()
90
+ try:
91
+ count = pynvml.nvmlDeviceGetCount()
92
+ if not count:
93
+ return None
94
+ gpus = []
95
+ for i in range(count):
96
+ h = pynvml.nvmlDeviceGetHandleByIndex(i)
97
+ name = pynvml.nvmlDeviceGetName(h)
98
+ if isinstance(name, bytes): name = name.decode()
99
+ cc = pynvml.nvmlDeviceGetCudaComputeCapability(h)
100
+ mem = pynvml.nvmlDeviceGetMemoryInfo(h)
101
+ gpus.append(GPUInfo(
102
+ index=i, name=name, compute_capability=cc,
103
+ architecture=_cc_to_arch(*cc),
104
+ vram_total_mb=mem.total // (1024*1024),
105
+ vram_free_mb=mem.free // (1024*1024),
106
+ ))
107
+ return gpus
108
+ finally:
109
+ pynvml.nvmlShutdown()
110
+ except Exception:
111
+ return None
112
+
113
+
114
+ def _detect_torch() -> list[GPUInfo] | None:
115
+ try:
116
+ import torch
117
+ if not torch.cuda.is_available():
118
+ return None
119
+ gpus = []
120
+ for i in range(torch.cuda.device_count()):
121
+ p = torch.cuda.get_device_properties(i)
122
+ gpus.append(GPUInfo(
123
+ index=i, name=p.name,
124
+ compute_capability=(p.major, p.minor),
125
+ architecture=_cc_to_arch(p.major, p.minor),
126
+ vram_total_mb=p.total_memory // (1024*1024),
127
+ ))
128
+ return gpus if gpus else None
129
+ except Exception:
130
+ return None
131
+
132
+
133
+ def _detect_smi() -> list[GPUInfo] | None:
134
+ try:
135
+ r = subprocess.run(
136
+ ["nvidia-smi", "--query-gpu=index,name,uuid,pci.bus_id,compute_cap,memory.total,memory.free,driver_version",
137
+ "--format=csv,noheader,nounits"],
138
+ capture_output=True, text=True, timeout=10
139
+ )
140
+ if r.returncode != 0:
141
+ return None
142
+ gpus = []
143
+ for line in r.stdout.strip().split("\n"):
144
+ if not line.strip():
145
+ continue
146
+ p = [x.strip() for x in line.split(",")]
147
+ if len(p) < 5:
148
+ continue
149
+ cc = _parse_cc(p[4])
150
+ gpus.append(GPUInfo(
151
+ index=int(p[0]) if p[0].isdigit() else len(gpus),
152
+ name=p[1], uuid=p[2] if len(p) > 2 else "",
153
+ pci_bus_id=p[3] if len(p) > 3 else "",
154
+ compute_capability=cc, architecture=_cc_to_arch(*cc),
155
+ vram_total_mb=int(p[5]) if len(p) > 5 and p[5].isdigit() else 0,
156
+ vram_free_mb=int(p[6]) if len(p) > 6 and p[6].isdigit() else 0,
157
+ driver_version=p[7] if len(p) > 7 else "",
158
+ ))
159
+ return gpus if gpus else None
160
+ except Exception:
161
+ return None
162
+
163
+
164
+ def _detect_sysfs() -> list[GPUInfo] | None:
165
+ try:
166
+ pci_path = Path("/sys/bus/pci/devices")
167
+ if not pci_path.exists():
168
+ return None
169
+ gpus = []
170
+ for d in sorted(pci_path.iterdir()):
171
+ vendor = (d / "vendor").read_text().strip().lower() if (d / "vendor").exists() else ""
172
+ if "10de" not in vendor:
173
+ continue
174
+ cls = (d / "class").read_text().strip() if (d / "class").exists() else ""
175
+ if not (cls.startswith("0x0300") or cls.startswith("0x0302")):
176
+ continue
177
+ gpus.append(GPUInfo(
178
+ index=len(gpus), name=f"NVIDIA GPU", pci_bus_id=d.name,
179
+ compute_capability=(0, 0), architecture="Unknown"
180
+ ))
181
+ return gpus if gpus else None
182
+ except Exception:
183
+ return None
184
+
185
+
186
+ def _get_driver_version() -> str:
187
+ try:
188
+ import pynvml
189
+ pynvml.nvmlInit()
190
+ v = pynvml.nvmlSystemGetDriverVersion()
191
+ pynvml.nvmlShutdown()
192
+ return v.decode() if isinstance(v, bytes) else v
193
+ except Exception:
194
+ pass
195
+ try:
196
+ r = subprocess.run(["nvidia-smi", "--query-gpu=driver_version", "--format=csv,noheader"],
197
+ capture_output=True, text=True, timeout=5)
198
+ if r.returncode == 0:
199
+ return r.stdout.strip().split("\n")[0]
200
+ except Exception:
201
+ pass
202
+ return ""
203
+
204
+
205
+ def _get_cuda_version() -> str:
206
+ try:
207
+ import torch
208
+ if torch.cuda.is_available() and torch.version.cuda:
209
+ return torch.version.cuda
210
+ except Exception:
211
+ pass
212
+ try:
213
+ r = subprocess.run(["nvcc", "--version"], capture_output=True, text=True, timeout=5)
214
+ if m := re.search(r"release (\d+\.\d+)", r.stdout):
215
+ return m.group(1)
216
+ except Exception:
217
+ pass
218
+ return ""
219
+
220
+
221
+ def _recommended_cuda(gpus: list[GPUInfo]) -> str:
222
+ if override := os.environ.get(CUDA_VERSION_ENV_VAR, "").strip():
223
+ if "." not in override and len(override) >= 2:
224
+ return f"{override[:-1]}.{override[-1]}"
225
+ return override
226
+ if not gpus:
227
+ return ""
228
+ for gpu in gpus:
229
+ if gpu.compute_capability[0] >= 10:
230
+ return "12.8" # Blackwell requires 12.8
231
+ for gpu in gpus:
232
+ cc = gpu.compute_capability
233
+ if cc[0] < 7 or (cc[0] == 7 and cc[1] < 5):
234
+ return "12.4" # Legacy (Pascal) uses 12.4
235
+ return "12.8" # Modern GPUs use 12.8
236
+
237
+
238
+ def detect_cuda_environment(force_refresh: bool = False) -> CUDAEnvironment:
239
+ global _cache
240
+ if not force_refresh and _cache[1] and time.time() - _cache[0] < CACHE_TTL:
241
+ return _cache[1]
242
+
243
+ gpus, method = None, "none"
244
+ for name, fn in [("nvml", _detect_nvml), ("torch", _detect_torch),
245
+ ("smi", _detect_smi), ("sysfs", _detect_sysfs)]:
246
+ if gpus := fn():
247
+ method = name
248
+ break
249
+
250
+ env = CUDAEnvironment(
251
+ gpus=gpus or [],
252
+ driver_version=_get_driver_version(),
253
+ cuda_runtime_version=_get_cuda_version(),
254
+ recommended_cuda=_recommended_cuda(gpus or []),
255
+ detection_method=method,
256
+ )
257
+ _cache = (time.time(), env)
258
+ return env
259
+
260
+
261
+ def get_recommended_cuda_version() -> str | None:
262
+ if override := os.environ.get(CUDA_VERSION_ENV_VAR, "").strip():
263
+ if "." not in override and len(override) >= 2:
264
+ return f"{override[:-1]}.{override[-1]}"
265
+ return override
266
+ env = detect_cuda_environment()
267
+ return env.recommended_cuda or None
268
+
269
+
270
+ def detect_gpus() -> list[GPUInfo]:
271
+ return detect_cuda_environment().gpus
272
+
273
+
274
+ def detect_gpu_info() -> list[dict]:
275
+ """Return GPU info as list of dicts."""
276
+ from dataclasses import asdict
277
+ return [asdict(gpu) for gpu in detect_gpus()]
278
+
279
+
280
+ def get_gpu_summary() -> str:
281
+ """Human-readable GPU summary."""
282
+ env = detect_cuda_environment()
283
+ if not env.gpus:
284
+ override = os.environ.get(CUDA_VERSION_ENV_VAR)
285
+ if override:
286
+ return f"No NVIDIA GPU detected (using {CUDA_VERSION_ENV_VAR}={override})"
287
+ return f"No NVIDIA GPU detected (set {CUDA_VERSION_ENV_VAR} to override)"
288
+
289
+ lines = [f"Detection: {env.detection_method}"]
290
+ if env.driver_version:
291
+ lines.append(f"Driver: {env.driver_version}")
292
+ if env.cuda_runtime_version:
293
+ lines.append(f"CUDA: {env.cuda_runtime_version}")
294
+ lines.append(f"Recommended: CUDA {env.recommended_cuda}")
295
+ lines.append("")
296
+ for gpu in env.gpus:
297
+ vram = f"{gpu.vram_total_mb}MB" if gpu.vram_total_mb else "?"
298
+ lines.append(f" GPU {gpu.index}: {gpu.name} ({gpu.sm_version()}) [{gpu.architecture}] {vram}")
299
+ return "\n".join(lines)
300
+
301
+
302
+ # Aliases
303
+ detect_cuda_version = get_recommended_cuda_version
comfy_env/env/manager.py CHANGED
@@ -6,12 +6,13 @@ Uses uv for fast environment creation and package installation.
6
6
 
7
7
  import subprocess
8
8
  import shutil
9
+ import sys
9
10
  from pathlib import Path
10
11
  from typing import Optional, Callable
11
12
 
12
13
  from .config import IsolatedEnv
13
14
  from .platform import get_platform, PlatformProvider
14
- from .detection import detect_cuda_version
15
+ from .cuda_gpu_detection import detect_cuda_version
15
16
  from .security import (
16
17
  normalize_env_name,
17
18
  validate_dependency,
@@ -263,13 +264,29 @@ class IsolatedEnvManager:
263
264
  """
264
265
  python_exe = self.get_python(env)
265
266
 
266
- if not env.requirements and not env.requirements_file:
267
+ # Merge platform-specific requirements
268
+ if sys.platform == 'win32':
269
+ platform_reqs = env.windows_requirements
270
+ platform_name = 'Windows'
271
+ elif sys.platform == 'darwin':
272
+ platform_reqs = env.darwin_requirements
273
+ platform_name = 'macOS'
274
+ else:
275
+ platform_reqs = env.linux_requirements
276
+ platform_name = 'Linux'
277
+
278
+ all_requirements = list(env.requirements) + list(platform_reqs)
279
+
280
+ if platform_reqs:
281
+ self.log(f"Including {len(platform_reqs)} {platform_name}-specific packages")
282
+
283
+ if not all_requirements and not env.requirements_file:
267
284
  self.log("No requirements to install")
268
285
  return
269
286
 
270
287
  # Validate requirements for security
271
- if env.requirements:
272
- validate_dependencies(env.requirements)
288
+ if all_requirements:
289
+ validate_dependencies(all_requirements)
273
290
 
274
291
  # Validate wheel sources
275
292
  for wheel_source in env.wheel_sources:
@@ -311,11 +328,11 @@ class IsolatedEnvManager:
311
328
  self.log(f"Installing {len(env.no_deps_requirements)} CUDA packages")
312
329
  self._install_cuda_packages(env, pip_args)
313
330
 
314
- # Install individual requirements
315
- if env.requirements:
316
- self.log(f"Installing {len(env.requirements)} packages")
331
+ # Install individual requirements (including platform-specific)
332
+ if all_requirements:
333
+ self.log(f"Installing {len(all_requirements)} packages")
317
334
  result = subprocess.run(
318
- pip_args + env.requirements,
335
+ pip_args + all_requirements,
319
336
  capture_output=True,
320
337
  text=True,
321
338
  )
@@ -458,7 +475,7 @@ class IsolatedEnvManager:
458
475
  url_template = source["url_template"]
459
476
  url = self._substitute_template(url_template, vars_dict)
460
477
 
461
- self.log(f" Trying {source.get('name', 'unknown')}: {url[:80]}...")
478
+ self.log(f" Trying {source.get('name', 'unknown')}: {url}")
462
479
  result = subprocess.run(
463
480
  pip_args + ["--no-deps", url],
464
481
  capture_output=True, text=True,
@@ -467,7 +484,7 @@ class IsolatedEnvManager:
467
484
  if result.returncode == 0:
468
485
  return # Success!
469
486
 
470
- errors.append(f"{source.get('name', 'unknown')}: {result.stderr[:100]}")
487
+ errors.append(f"{source.get('name', 'unknown')}: {result.stderr.strip()}")
471
488
 
472
489
  # All sources failed
473
490
  raise RuntimeError(
@@ -45,12 +45,7 @@ class WindowsPlatformProvider(PlatformProvider):
45
45
  f"Running in {shell_env.upper()} environment.\n"
46
46
  f"This package requires native Windows Python.\n"
47
47
  f"Please use PowerShell, Command Prompt, or native Windows terminal.")
48
-
49
- # Check Visual C++ Redistributable
50
- vc_ok, vc_error = self._check_vc_redistributable()
51
- if not vc_ok:
52
- return (False, vc_error)
53
-
48
+ # Note: VC++ runtime is handled by msvc-runtime package, no system check needed
54
49
  return (True, None)
55
50
 
56
51
  def _detect_shell_environment(self) -> str:
@@ -67,94 +62,6 @@ class WindowsPlatformProvider(PlatformProvider):
67
62
 
68
63
  return 'native-windows'
69
64
 
70
- def _find_vc_dlls(self) -> Dict[str, Optional[Path]]:
71
- """Find VC++ runtime DLLs in common locations."""
72
- required_dlls = ['vcruntime140.dll', 'msvcp140.dll']
73
- found = {}
74
-
75
- # Search locations in order of preference
76
- search_paths = []
77
-
78
- # 1. Current Python environment (conda/venv)
79
- if hasattr(sys, 'base_prefix'):
80
- search_paths.append(Path(sys.base_prefix) / 'Library' / 'bin')
81
- search_paths.append(Path(sys.base_prefix) / 'DLLs')
82
- if hasattr(sys, 'prefix'):
83
- search_paths.append(Path(sys.prefix) / 'Library' / 'bin')
84
- search_paths.append(Path(sys.prefix) / 'DLLs')
85
-
86
- # 2. System directories
87
- system_root = os.environ.get('SystemRoot', r'C:\Windows')
88
- search_paths.append(Path(system_root) / 'System32')
89
-
90
- # 3. Visual Studio redistributable directories
91
- program_files = os.environ.get('ProgramFiles', r'C:\Program Files')
92
- vc_redist = Path(program_files) / 'Microsoft Visual Studio' / '2022' / 'Community' / 'VC' / 'Redist' / 'MSVC'
93
- if vc_redist.exists():
94
- for version_dir in vc_redist.iterdir():
95
- search_paths.append(version_dir / 'x64' / 'Microsoft.VC143.CRT')
96
-
97
- for dll_name in required_dlls:
98
- found[dll_name] = None
99
- for search_path in search_paths:
100
- dll_path = search_path / dll_name
101
- if dll_path.exists():
102
- found[dll_name] = dll_path
103
- break
104
-
105
- return found
106
-
107
- def bundle_vc_dlls_to_env(self, env_dir: Path) -> Tuple[bool, Optional[str]]:
108
- """Bundle VC++ runtime DLLs into the isolated environment."""
109
- required_dlls = ['vcruntime140.dll', 'msvcp140.dll']
110
- found_dlls = self._find_vc_dlls()
111
-
112
- # Check which DLLs are missing
113
- missing = [dll for dll, path in found_dlls.items() if path is None]
114
-
115
- if missing:
116
- return (False,
117
- f"Could not find VC++ DLLs to bundle: {', '.join(missing)}\n\n"
118
- f"Please install Visual C++ Redistributable:\n"
119
- f" Download: https://aka.ms/vs/17/release/vc_redist.x64.exe\n"
120
- f"\nAfter installation, delete the environment and try again.")
121
-
122
- # Copy DLLs to the environment's Scripts directory
123
- scripts_dir = env_dir / 'Scripts'
124
-
125
- copied = []
126
- for dll_name, source_path in found_dlls.items():
127
- if source_path:
128
- try:
129
- if scripts_dir.exists():
130
- scripts_target = scripts_dir / dll_name
131
- if not scripts_target.exists():
132
- shutil.copy2(source_path, scripts_target)
133
- copied.append(f"{dll_name} -> Scripts/")
134
- except (OSError, IOError) as e:
135
- return (False, f"Failed to copy {dll_name}: {e}")
136
-
137
- return (True, None)
138
-
139
- def _check_vc_redistributable(self) -> Tuple[bool, Optional[str]]:
140
- """Check if Visual C++ Redistributable DLLs are available."""
141
- required_dlls = ['vcruntime140.dll', 'msvcp140.dll']
142
- found_dlls = self._find_vc_dlls()
143
-
144
- missing = [dll for dll, path in found_dlls.items() if path is None]
145
-
146
- if missing:
147
- error_msg = (
148
- f"Visual C++ Redistributable DLLs not found!\n"
149
- f"\nMissing: {', '.join(missing)}\n"
150
- f"\nPlease install Visual C++ Redistributable for Visual Studio 2015-2022:\n"
151
- f"\n Download (64-bit): https://aka.ms/vs/17/release/vc_redist.x64.exe\n"
152
- f"\nAfter installation, restart your terminal and try again."
153
- )
154
- return (False, error_msg)
155
-
156
- return (True, None)
157
-
158
65
  def make_executable(self, path: Path) -> None:
159
66
  # No-op on Windows - executables are determined by extension
160
67
  pass
comfy_env/errors.py CHANGED
@@ -291,35 +291,3 @@ class InstallError(EnvManagerError):
291
291
 
292
292
  details = "\n".join(details_parts) if details_parts else None
293
293
  super().__init__(message, details)
294
-
295
-
296
- def format_environment_mismatch(
297
- expected: "RuntimeEnv",
298
- actual: "RuntimeEnv",
299
- ) -> str:
300
- """
301
- Format a message explaining environment mismatch.
302
-
303
- Used when the current environment doesn't match what's needed.
304
- """
305
- mismatches = []
306
-
307
- if expected.cuda_version != actual.cuda_version:
308
- mismatches.append(
309
- f" CUDA: expected {expected.cuda_version}, got {actual.cuda_version}"
310
- )
311
-
312
- if expected.torch_version != actual.torch_version:
313
- mismatches.append(
314
- f" PyTorch: expected {expected.torch_version}, got {actual.torch_version}"
315
- )
316
-
317
- if expected.python_version != actual.python_version:
318
- mismatches.append(
319
- f" Python: expected {expected.python_version}, got {actual.python_version}"
320
- )
321
-
322
- if not mismatches:
323
- return "Environment matches expected configuration"
324
-
325
- return "Environment mismatch:\n" + "\n".join(mismatches)
comfy_env/install.py CHANGED
@@ -24,12 +24,13 @@ import sys
24
24
  from pathlib import Path
25
25
  from typing import Any, Callable, Dict, List, Optional, Union
26
26
 
27
- from .env.config import IsolatedEnv
28
- from .env.config_file import discover_env_config, load_env_from_file
27
+ from .env.config import IsolatedEnv, ToolConfig
28
+ from .env.config_file import discover_env_config, load_env_from_file, load_config, discover_config
29
29
  from .env.manager import IsolatedEnvManager
30
30
  from .errors import CUDANotFoundError, DependencyError, InstallError, WheelNotFoundError
31
31
  from .registry import PACKAGE_REGISTRY, get_cuda_short2, is_registered
32
32
  from .resolver import RuntimeEnv, WheelResolver, parse_wheel_requirement
33
+ from .tools import install_tool
33
34
 
34
35
 
35
36
  def install(
@@ -77,20 +78,40 @@ def install(
77
78
  log = log_callback or print
78
79
  node_dir = Path(node_dir) if node_dir else Path.cwd()
79
80
 
80
- # Load configuration
81
- env_config = _load_config(config, node_dir)
82
- if env_config is None:
81
+ # Load full configuration (includes tools)
82
+ full_config = _load_full_config(config, node_dir)
83
+ if full_config is None:
83
84
  raise FileNotFoundError(
84
85
  "No configuration file found. "
85
86
  "Create comfyui_env.toml or specify path explicitly."
86
87
  )
87
88
 
88
- log(f"Found configuration: {env_config.name}")
89
+ # Install tools first (e.g., Blender)
90
+ # Tools are installed to ComfyUI root (shared across all nodes)
91
+ if full_config.tools:
92
+ log(f"Installing {len(full_config.tools)} tool(s)...")
93
+ comfyui_root = node_dir.parent.parent # custom_nodes/../.. = ComfyUI/
94
+ for name, tool_config in full_config.tools.items():
95
+ if dry_run:
96
+ log(f" Would install {name} {tool_config.version}")
97
+ else:
98
+ install_tool(tool_config, log, comfyui_root)
99
+
100
+ # Get environment config
101
+ env_config = full_config.default_env
102
+ if env_config is None and not full_config.has_local:
103
+ log("No packages to install")
104
+ return True
105
+
106
+ if env_config:
107
+ log(f"Found configuration: {env_config.name}")
89
108
 
90
- if mode == "isolated":
109
+ if mode == "isolated" and env_config:
91
110
  return _install_isolated(env_config, node_dir, log, dry_run)
92
- else:
111
+ elif env_config:
93
112
  return _install_inplace(env_config, node_dir, log, dry_run, verify_wheels)
113
+ else:
114
+ return True
94
115
 
95
116
 
96
117
  def _load_config(
@@ -107,6 +128,17 @@ def _load_config(
107
128
  return discover_env_config(node_dir)
108
129
 
109
130
 
131
+ def _load_full_config(config: Optional[Union[str, Path]], node_dir: Path):
132
+ """Load full EnvManagerConfig (includes tools)."""
133
+ from .env.config import EnvManagerConfig
134
+ if config is not None:
135
+ config_path = Path(config)
136
+ if not config_path.is_absolute():
137
+ config_path = node_dir / config_path
138
+ return load_config(config_path, node_dir)
139
+ return discover_config(node_dir)
140
+
141
+
110
142
  def _install_isolated(
111
143
  env_config: IsolatedEnv,
112
144
  node_dir: Path,
@@ -140,6 +172,12 @@ def _install_inplace(
140
172
  """Install in-place into current environment using the package registry."""
141
173
  log("Installing in-place mode")
142
174
 
175
+ # Install MSVC runtime on Windows (required for CUDA/PyTorch native extensions)
176
+ if sys.platform == "win32":
177
+ log("Installing MSVC runtime for Windows...")
178
+ if not dry_run:
179
+ _pip_install(["msvc-runtime"], no_deps=False, log=log)
180
+
143
181
  # Detect runtime environment
144
182
  env = RuntimeEnv.detect()
145
183
  log(f"Detected environment: {env}")
@@ -421,6 +459,7 @@ def _install_from_github_release(
421
459
  url = url.replace(f"{{{key}}}", str(value))
422
460
 
423
461
  log(f" Trying {source_name}: {package}=={version}...")
462
+ log(f" Resolved wheel to: {url}")
424
463
 
425
464
  try:
426
465
  pip_cmd = _get_pip_command()
comfy_env/ipc/bridge.py CHANGED
@@ -48,10 +48,6 @@ class WorkerBridge:
48
48
  result = bridge.call("process", image=my_image)
49
49
  """
50
50
 
51
- # Singleton instances by environment hash
52
- _instances: Dict[str, "WorkerBridge"] = {}
53
- _instances_lock = threading.Lock()
54
-
55
51
  def __init__(
56
52
  self,
57
53
  env: IsolatedEnv,
@@ -81,38 +77,6 @@ class WorkerBridge:
81
77
  self._process_lock = threading.Lock()
82
78
  self._stderr_thread: Optional[threading.Thread] = None
83
79
 
84
- @classmethod
85
- def get_instance(
86
- cls,
87
- env: IsolatedEnv,
88
- worker_script: Path,
89
- base_dir: Optional[Path] = None,
90
- log_callback: Optional[Callable[[str], None]] = None,
91
- ) -> "WorkerBridge":
92
- """
93
- Get or create a singleton bridge instance for an environment.
94
-
95
- Args:
96
- env: Isolated environment configuration
97
- worker_script: Path to the worker Python script
98
- base_dir: Base directory for environments
99
- log_callback: Optional callback for logging
100
-
101
- Returns:
102
- WorkerBridge instance (reused if same env hash)
103
- """
104
- env_hash = env.get_env_hash()
105
-
106
- with cls._instances_lock:
107
- if env_hash not in cls._instances:
108
- cls._instances[env_hash] = cls(
109
- env=env,
110
- worker_script=worker_script,
111
- base_dir=base_dir,
112
- log_callback=log_callback,
113
- )
114
- return cls._instances[env_hash]
115
-
116
80
  @classmethod
117
81
  def from_config_file(
118
82
  cls,