comfy-env 0.0.12__py3-none-any.whl → 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comfy_env/cli.py +103 -5
- comfy_env/env/config_file.py +35 -32
- comfy_env/env/detection.py +0 -16
- comfy_env/errors.py +0 -32
- comfy_env/install.py +1 -0
- comfy_env/ipc/bridge.py +0 -36
- comfy_env/registry.py +17 -178
- comfy_env/resolver.py +3 -17
- comfy_env/wheel_sources.yml +141 -0
- {comfy_env-0.0.12.dist-info → comfy_env-0.0.13.dist-info}/METADATA +2 -1
- {comfy_env-0.0.12.dist-info → comfy_env-0.0.13.dist-info}/RECORD +14 -14
- comfy_env/runner.py +0 -273
- {comfy_env-0.0.12.dist-info → comfy_env-0.0.13.dist-info}/WHEEL +0 -0
- {comfy_env-0.0.12.dist-info → comfy_env-0.0.13.dist-info}/entry_points.txt +0 -0
- {comfy_env-0.0.12.dist-info → comfy_env-0.0.13.dist-info}/licenses/LICENSE +0 -0
comfy_env/cli.py
CHANGED
|
@@ -234,6 +234,7 @@ def cmd_info(args) -> int:
|
|
|
234
234
|
def cmd_resolve(args) -> int:
|
|
235
235
|
"""Handle resolve command."""
|
|
236
236
|
from .resolver import RuntimeEnv, WheelResolver, parse_wheel_requirement
|
|
237
|
+
from .registry import PACKAGE_REGISTRY
|
|
237
238
|
from .env.config_file import discover_env_config, load_env_from_file
|
|
238
239
|
|
|
239
240
|
env = RuntimeEnv.detect()
|
|
@@ -270,19 +271,116 @@ def cmd_resolve(args) -> int:
|
|
|
270
271
|
print(f" {package}: No version specified, skipping")
|
|
271
272
|
continue
|
|
272
273
|
|
|
274
|
+
pkg_lower = package.lower()
|
|
273
275
|
try:
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
276
|
+
# Check if package is in registry with github_release method
|
|
277
|
+
if pkg_lower in PACKAGE_REGISTRY:
|
|
278
|
+
registry_config = PACKAGE_REGISTRY[pkg_lower]
|
|
279
|
+
method = registry_config.get("method")
|
|
280
|
+
|
|
281
|
+
if method == "github_release":
|
|
282
|
+
# Resolve URL from registry sources
|
|
283
|
+
url = _resolve_github_release_url(package, version, env, registry_config)
|
|
284
|
+
status = "OK" if args.verify else "resolved"
|
|
285
|
+
print(f" {package}=={version}: {status}")
|
|
286
|
+
print(f" {url}")
|
|
287
|
+
else:
|
|
288
|
+
# For other methods, just show what method will be used
|
|
289
|
+
print(f" {package}=={version}: uses {method} method")
|
|
290
|
+
if "index_url" in registry_config:
|
|
291
|
+
index_url = _substitute_template(registry_config["index_url"], env)
|
|
292
|
+
print(f" index: {index_url}")
|
|
293
|
+
elif "package_template" in registry_config:
|
|
294
|
+
pkg_name = _substitute_template(registry_config["package_template"], env)
|
|
295
|
+
print(f" installs as: {pkg_name}")
|
|
296
|
+
else:
|
|
297
|
+
# Fall back to WheelResolver
|
|
298
|
+
url = resolver.resolve(package, version, env, verify=args.verify)
|
|
299
|
+
status = "OK" if args.verify else "resolved"
|
|
300
|
+
print(f" {package}=={version}: {status}")
|
|
301
|
+
print(f" {url}")
|
|
278
302
|
except Exception as e:
|
|
279
303
|
print(f" {package}=={version}: FAILED")
|
|
280
|
-
|
|
304
|
+
_print_wheel_not_found_error(package, version, env, e)
|
|
281
305
|
all_ok = False
|
|
282
306
|
|
|
283
307
|
return 0 if all_ok else 1
|
|
284
308
|
|
|
285
309
|
|
|
310
|
+
def _substitute_template(template: str, env) -> str:
|
|
311
|
+
"""Substitute environment variables into a URL template."""
|
|
312
|
+
vars_dict = env.as_dict()
|
|
313
|
+
result = template
|
|
314
|
+
for key, value in vars_dict.items():
|
|
315
|
+
if value is not None:
|
|
316
|
+
result = result.replace(f"{{{key}}}", str(value))
|
|
317
|
+
return result
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def _resolve_github_release_url(package: str, version: str, env, config: dict) -> str:
|
|
321
|
+
"""Resolve URL for github_release method packages."""
|
|
322
|
+
sources = config.get("sources", [])
|
|
323
|
+
if not sources:
|
|
324
|
+
raise ValueError(f"No sources configured for {package}")
|
|
325
|
+
|
|
326
|
+
# Build template variables
|
|
327
|
+
vars_dict = env.as_dict()
|
|
328
|
+
vars_dict["version"] = version
|
|
329
|
+
vars_dict["py_tag"] = f"cp{env.python_short}"
|
|
330
|
+
if env.cuda_version:
|
|
331
|
+
vars_dict["cuda_major"] = env.cuda_version.split(".")[0]
|
|
332
|
+
|
|
333
|
+
# Filter sources by platform
|
|
334
|
+
current_platform = env.platform_tag
|
|
335
|
+
compatible_sources = [
|
|
336
|
+
s for s in sources
|
|
337
|
+
if current_platform in s.get("platforms", [])
|
|
338
|
+
]
|
|
339
|
+
|
|
340
|
+
if not compatible_sources:
|
|
341
|
+
available = set()
|
|
342
|
+
for s in sources:
|
|
343
|
+
available.update(s.get("platforms", []))
|
|
344
|
+
raise ValueError(
|
|
345
|
+
f"No {package} wheels for platform {current_platform}. "
|
|
346
|
+
f"Available: {', '.join(sorted(available))}"
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
# Return URL from first compatible source
|
|
350
|
+
source = compatible_sources[0]
|
|
351
|
+
url_template = source.get("url_template", "")
|
|
352
|
+
url = url_template
|
|
353
|
+
for key, value in vars_dict.items():
|
|
354
|
+
if value is not None:
|
|
355
|
+
url = url.replace(f"{{{key}}}", str(value))
|
|
356
|
+
|
|
357
|
+
return url
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def _print_wheel_not_found_error(package: str, version: str, env, error: Exception) -> None:
|
|
361
|
+
"""Print a formatted error message for wheel not found."""
|
|
362
|
+
from .errors import WheelNotFoundError
|
|
363
|
+
|
|
364
|
+
if isinstance(error, WheelNotFoundError):
|
|
365
|
+
print(f" CUDA wheel not found: {package}=={version}")
|
|
366
|
+
print()
|
|
367
|
+
print("+------------------------------------------------------------------+")
|
|
368
|
+
print("| CUDA Wheel Not Found |")
|
|
369
|
+
print("+------------------------------------------------------------------+")
|
|
370
|
+
print(f"| Package: {package}=={version:<46} |")
|
|
371
|
+
print(f"| Requested: cu{env.cuda_short}-torch{env.torch_mm}-{env.python_short}-{env.platform_tag:<17} |")
|
|
372
|
+
print("| |")
|
|
373
|
+
print(f"| Reason: {error.reason:<54} |")
|
|
374
|
+
print("| |")
|
|
375
|
+
print("| Suggestions: |")
|
|
376
|
+
print(f"| 1. Check if wheel exists: comfy-env resolve {package:<15} |")
|
|
377
|
+
print(f"| 2. Build wheel locally: comfy-env build {package:<18} |")
|
|
378
|
+
print("| |")
|
|
379
|
+
print("+------------------------------------------------------------------+")
|
|
380
|
+
else:
|
|
381
|
+
print(f" {error}")
|
|
382
|
+
|
|
383
|
+
|
|
286
384
|
def cmd_doctor(args) -> int:
|
|
287
385
|
"""Handle doctor command."""
|
|
288
386
|
from .install import verify_installation
|
comfy_env/env/config_file.py
CHANGED
|
@@ -168,7 +168,7 @@ def _get_default_pytorch_version(cuda_version: Optional[str]) -> str:
|
|
|
168
168
|
- CUDA 12.8 (Turing+): PyTorch 2.8.0
|
|
169
169
|
"""
|
|
170
170
|
if cuda_version == "12.4":
|
|
171
|
-
return "2.5.1" #
|
|
171
|
+
return "2.5.1" # Full: Pascal GPUs
|
|
172
172
|
return "2.8.0" # Modern: Turing through Blackwell
|
|
173
173
|
|
|
174
174
|
|
|
@@ -176,7 +176,7 @@ def _parse_config(data: Dict[str, Any], base_dir: Path) -> IsolatedEnv:
|
|
|
176
176
|
"""
|
|
177
177
|
Parse TOML data into IsolatedEnv.
|
|
178
178
|
|
|
179
|
-
Supports both simplified and
|
|
179
|
+
Supports both simplified and full config formats:
|
|
180
180
|
|
|
181
181
|
Simplified (CUDA packages only):
|
|
182
182
|
[packages]
|
|
@@ -186,7 +186,7 @@ def _parse_config(data: Dict[str, Any], base_dir: Path) -> IsolatedEnv:
|
|
|
186
186
|
Or as list:
|
|
187
187
|
packages = ["torch-scatter==2.1.2", "torch-cluster==1.6.3"]
|
|
188
188
|
|
|
189
|
-
|
|
189
|
+
Full:
|
|
190
190
|
[env]
|
|
191
191
|
name = "my-node"
|
|
192
192
|
[packages]
|
|
@@ -233,7 +233,7 @@ def _parse_config(data: Dict[str, Any], base_dir: Path) -> IsolatedEnv:
|
|
|
233
233
|
variables.setdefault("pytorch_mm", pytorch_mm)
|
|
234
234
|
|
|
235
235
|
# Parse CUDA packages - support multiple formats
|
|
236
|
-
# Priority: [cuda] section > cuda = [...] >
|
|
236
|
+
# Priority: [cuda] section > cuda = [...] > [packages] section
|
|
237
237
|
no_deps_requirements = []
|
|
238
238
|
requirements = []
|
|
239
239
|
|
|
@@ -258,12 +258,12 @@ def _parse_config(data: Dict[str, Any], base_dir: Path) -> IsolatedEnv:
|
|
|
258
258
|
|
|
259
259
|
elif isinstance(packages_section, dict):
|
|
260
260
|
# Check for simplified format: [packages] with key=value pairs
|
|
261
|
-
# vs
|
|
261
|
+
# vs old format: [packages] with requirements/no_deps lists
|
|
262
262
|
|
|
263
|
-
|
|
263
|
+
has_old_keys = any(k in packages_section for k in ["requirements", "no_deps", "requirements_file"])
|
|
264
264
|
|
|
265
|
-
if
|
|
266
|
-
#
|
|
265
|
+
if has_old_keys:
|
|
266
|
+
# Old format
|
|
267
267
|
raw_requirements = packages_section.get("requirements", [])
|
|
268
268
|
requirements = [_substitute_vars(req, variables) for req in raw_requirements]
|
|
269
269
|
|
|
@@ -340,9 +340,9 @@ def load_config(
|
|
|
340
340
|
base_dir: Optional[Path] = None,
|
|
341
341
|
) -> EnvManagerConfig:
|
|
342
342
|
"""
|
|
343
|
-
Load full EnvManagerConfig from a TOML file
|
|
343
|
+
Load full EnvManagerConfig from a TOML file.
|
|
344
344
|
|
|
345
|
-
Supports both
|
|
345
|
+
Supports both full schema (named envs) and simple format (auto-detected).
|
|
346
346
|
|
|
347
347
|
Args:
|
|
348
348
|
path: Path to the TOML config file
|
|
@@ -373,7 +373,7 @@ def load_config(
|
|
|
373
373
|
with open(path, "rb") as f:
|
|
374
374
|
data = tomllib.load(f)
|
|
375
375
|
|
|
376
|
-
return
|
|
376
|
+
return _parse_full_config(data, base_dir)
|
|
377
377
|
|
|
378
378
|
|
|
379
379
|
def discover_config(
|
|
@@ -404,9 +404,9 @@ def discover_config(
|
|
|
404
404
|
return None
|
|
405
405
|
|
|
406
406
|
|
|
407
|
-
def
|
|
407
|
+
def _parse_full_config(data: Dict[str, Any], base_dir: Path) -> EnvManagerConfig:
|
|
408
408
|
"""
|
|
409
|
-
Parse TOML data into EnvManagerConfig
|
|
409
|
+
Parse TOML data into EnvManagerConfig.
|
|
410
410
|
|
|
411
411
|
Schema:
|
|
412
412
|
[local.cuda] - CUDA packages for host
|
|
@@ -416,16 +416,16 @@ def _parse_config_v2(data: Dict[str, Any], base_dir: Path) -> EnvManagerConfig:
|
|
|
416
416
|
[envname.packages] - Packages for env
|
|
417
417
|
[node_reqs] - Node dependencies
|
|
418
418
|
|
|
419
|
-
Also supports
|
|
419
|
+
Also supports simple format ([env] + [packages]) for backward compatibility.
|
|
420
420
|
"""
|
|
421
|
-
# Detect if this is
|
|
422
|
-
|
|
421
|
+
# Detect if this is full schema or simple format
|
|
422
|
+
is_full = "local" in data or _has_named_env(data)
|
|
423
423
|
|
|
424
|
-
if not
|
|
425
|
-
#
|
|
426
|
-
return
|
|
424
|
+
if not is_full:
|
|
425
|
+
# Simple format - convert to full structure
|
|
426
|
+
return _convert_simple_to_full(data, base_dir)
|
|
427
427
|
|
|
428
|
-
# Parse
|
|
428
|
+
# Parse full schema
|
|
429
429
|
local = _parse_local_section(data.get("local", {}))
|
|
430
430
|
envs = _parse_env_sections(data, base_dir)
|
|
431
431
|
node_reqs = _parse_node_reqs(data.get("node_reqs", {}))
|
|
@@ -582,27 +582,30 @@ def _parse_node_reqs(node_reqs_data: Dict[str, Any]) -> List[NodeReq]:
|
|
|
582
582
|
return reqs
|
|
583
583
|
|
|
584
584
|
|
|
585
|
-
def
|
|
586
|
-
"""Convert
|
|
587
|
-
# Parse using legacy parser to get IsolatedEnv
|
|
588
|
-
legacy_env = _parse_config(data, base_dir)
|
|
585
|
+
def _convert_simple_to_full(data: Dict[str, Any], base_dir: Path) -> EnvManagerConfig:
|
|
586
|
+
"""Convert simple config format to full EnvManagerConfig.
|
|
589
587
|
|
|
590
|
-
|
|
591
|
-
|
|
588
|
+
Simple configs have [env] and [packages] sections but no named environments.
|
|
589
|
+
This converts them to the full format with a single named environment.
|
|
590
|
+
"""
|
|
591
|
+
# Parse using simple parser to get IsolatedEnv
|
|
592
|
+
simple_env = _parse_config(data, base_dir)
|
|
593
|
+
|
|
594
|
+
# Check if this has explicit env settings (isolated venv) vs just CUDA packages (local install)
|
|
592
595
|
env_section = data.get("env", {})
|
|
593
596
|
has_explicit_env = bool(env_section.get("name") or env_section.get("python"))
|
|
594
597
|
|
|
595
598
|
if has_explicit_env:
|
|
596
|
-
#
|
|
599
|
+
# Isolated venv config
|
|
597
600
|
return EnvManagerConfig(
|
|
598
601
|
local=LocalConfig(),
|
|
599
|
-
envs={
|
|
602
|
+
envs={simple_env.name: simple_env},
|
|
600
603
|
node_reqs=[],
|
|
601
604
|
)
|
|
602
605
|
else:
|
|
603
|
-
#
|
|
606
|
+
# Local CUDA packages only (no isolated venv)
|
|
604
607
|
cuda_packages = {}
|
|
605
|
-
for req in
|
|
608
|
+
for req in simple_env.no_deps_requirements:
|
|
606
609
|
if "==" in req:
|
|
607
610
|
pkg, ver = req.split("==", 1)
|
|
608
611
|
cuda_packages[pkg] = ver
|
|
@@ -612,8 +615,8 @@ def _convert_legacy_to_v2(data: Dict[str, Any], base_dir: Path) -> EnvManagerCon
|
|
|
612
615
|
return EnvManagerConfig(
|
|
613
616
|
local=LocalConfig(
|
|
614
617
|
cuda_packages=cuda_packages,
|
|
615
|
-
requirements=
|
|
618
|
+
requirements=simple_env.requirements,
|
|
616
619
|
),
|
|
617
620
|
envs={},
|
|
618
621
|
node_reqs=[],
|
|
619
|
-
)
|
|
622
|
+
)
|
comfy_env/env/detection.py
CHANGED
|
@@ -92,22 +92,6 @@ def is_blackwell_gpu(name: str, compute_cap: str) -> bool:
|
|
|
92
92
|
return False
|
|
93
93
|
|
|
94
94
|
|
|
95
|
-
def needs_cuda_128() -> bool:
|
|
96
|
-
"""
|
|
97
|
-
Check if any detected GPU requires CUDA 12.8.
|
|
98
|
-
|
|
99
|
-
Returns:
|
|
100
|
-
True if Blackwell GPU detected, False otherwise.
|
|
101
|
-
"""
|
|
102
|
-
gpus = detect_gpu_info()
|
|
103
|
-
|
|
104
|
-
for gpu in gpus:
|
|
105
|
-
if is_blackwell_gpu(gpu["name"], gpu["compute_cap"]):
|
|
106
|
-
return True
|
|
107
|
-
|
|
108
|
-
return False
|
|
109
|
-
|
|
110
|
-
|
|
111
95
|
def is_legacy_gpu(compute_cap: str) -> bool:
|
|
112
96
|
"""
|
|
113
97
|
Check if GPU is Pascal or older (requires legacy CUDA/PyTorch).
|
comfy_env/errors.py
CHANGED
|
@@ -291,35 +291,3 @@ class InstallError(EnvManagerError):
|
|
|
291
291
|
|
|
292
292
|
details = "\n".join(details_parts) if details_parts else None
|
|
293
293
|
super().__init__(message, details)
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
def format_environment_mismatch(
|
|
297
|
-
expected: "RuntimeEnv",
|
|
298
|
-
actual: "RuntimeEnv",
|
|
299
|
-
) -> str:
|
|
300
|
-
"""
|
|
301
|
-
Format a message explaining environment mismatch.
|
|
302
|
-
|
|
303
|
-
Used when the current environment doesn't match what's needed.
|
|
304
|
-
"""
|
|
305
|
-
mismatches = []
|
|
306
|
-
|
|
307
|
-
if expected.cuda_version != actual.cuda_version:
|
|
308
|
-
mismatches.append(
|
|
309
|
-
f" CUDA: expected {expected.cuda_version}, got {actual.cuda_version}"
|
|
310
|
-
)
|
|
311
|
-
|
|
312
|
-
if expected.torch_version != actual.torch_version:
|
|
313
|
-
mismatches.append(
|
|
314
|
-
f" PyTorch: expected {expected.torch_version}, got {actual.torch_version}"
|
|
315
|
-
)
|
|
316
|
-
|
|
317
|
-
if expected.python_version != actual.python_version:
|
|
318
|
-
mismatches.append(
|
|
319
|
-
f" Python: expected {expected.python_version}, got {actual.python_version}"
|
|
320
|
-
)
|
|
321
|
-
|
|
322
|
-
if not mismatches:
|
|
323
|
-
return "Environment matches expected configuration"
|
|
324
|
-
|
|
325
|
-
return "Environment mismatch:\n" + "\n".join(mismatches)
|
comfy_env/install.py
CHANGED
comfy_env/ipc/bridge.py
CHANGED
|
@@ -48,10 +48,6 @@ class WorkerBridge:
|
|
|
48
48
|
result = bridge.call("process", image=my_image)
|
|
49
49
|
"""
|
|
50
50
|
|
|
51
|
-
# Singleton instances by environment hash
|
|
52
|
-
_instances: Dict[str, "WorkerBridge"] = {}
|
|
53
|
-
_instances_lock = threading.Lock()
|
|
54
|
-
|
|
55
51
|
def __init__(
|
|
56
52
|
self,
|
|
57
53
|
env: IsolatedEnv,
|
|
@@ -81,38 +77,6 @@ class WorkerBridge:
|
|
|
81
77
|
self._process_lock = threading.Lock()
|
|
82
78
|
self._stderr_thread: Optional[threading.Thread] = None
|
|
83
79
|
|
|
84
|
-
@classmethod
|
|
85
|
-
def get_instance(
|
|
86
|
-
cls,
|
|
87
|
-
env: IsolatedEnv,
|
|
88
|
-
worker_script: Path,
|
|
89
|
-
base_dir: Optional[Path] = None,
|
|
90
|
-
log_callback: Optional[Callable[[str], None]] = None,
|
|
91
|
-
) -> "WorkerBridge":
|
|
92
|
-
"""
|
|
93
|
-
Get or create a singleton bridge instance for an environment.
|
|
94
|
-
|
|
95
|
-
Args:
|
|
96
|
-
env: Isolated environment configuration
|
|
97
|
-
worker_script: Path to the worker Python script
|
|
98
|
-
base_dir: Base directory for environments
|
|
99
|
-
log_callback: Optional callback for logging
|
|
100
|
-
|
|
101
|
-
Returns:
|
|
102
|
-
WorkerBridge instance (reused if same env hash)
|
|
103
|
-
"""
|
|
104
|
-
env_hash = env.get_env_hash()
|
|
105
|
-
|
|
106
|
-
with cls._instances_lock:
|
|
107
|
-
if env_hash not in cls._instances:
|
|
108
|
-
cls._instances[env_hash] = cls(
|
|
109
|
-
env=env,
|
|
110
|
-
worker_script=worker_script,
|
|
111
|
-
base_dir=base_dir,
|
|
112
|
-
log_callback=log_callback,
|
|
113
|
-
)
|
|
114
|
-
return cls._instances[env_hash]
|
|
115
|
-
|
|
116
80
|
@classmethod
|
|
117
81
|
def from_config_file(
|
|
118
82
|
cls,
|
comfy_env/registry.py
CHANGED
|
@@ -1,8 +1,7 @@
|
|
|
1
1
|
"""Built-in registry of CUDA packages and their wheel sources.
|
|
2
2
|
|
|
3
|
-
This module
|
|
4
|
-
|
|
5
|
-
wheel_sources in their comfyui_env.toml.
|
|
3
|
+
This module loads package configurations from wheel_sources.yml and provides
|
|
4
|
+
lookup functions for the install module.
|
|
6
5
|
|
|
7
6
|
Install method types:
|
|
8
7
|
- "index": Use pip --extra-index-url (PEP 503 simple repository)
|
|
@@ -10,9 +9,13 @@ Install method types:
|
|
|
10
9
|
- "find_links": Use pip --find-links (for PyG, etc.)
|
|
11
10
|
- "pypi_variant": Package name varies by CUDA version (e.g., spconv-cu124)
|
|
12
11
|
- "github_release": Direct wheel URL from GitHub releases with fallback sources
|
|
12
|
+
- "pypi": Standard PyPI install
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
|
-
from
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Any, Dict, Optional
|
|
17
|
+
|
|
18
|
+
import yaml
|
|
16
19
|
|
|
17
20
|
|
|
18
21
|
def get_cuda_short2(cuda_version: str) -> str:
|
|
@@ -40,180 +43,16 @@ def get_cuda_short2(cuda_version: str) -> str:
|
|
|
40
43
|
return f"{major}{minor}"
|
|
41
44
|
|
|
42
45
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
#
|
|
52
|
-
|
|
53
|
-
# {torch_short} - PyTorch without dots (e.g., "280")
|
|
54
|
-
# {torch_mm} - PyTorch major.minor (e.g., "28")
|
|
55
|
-
# {py_version} - Python version (e.g., "3.10")
|
|
56
|
-
# {py_short} - Python without dot (e.g., "310")
|
|
57
|
-
# {py_minor} - Python minor version only (e.g., "10")
|
|
58
|
-
# {platform} - Platform tag (e.g., "linux_x86_64")
|
|
59
|
-
# =============================================================================
|
|
60
|
-
|
|
61
|
-
PACKAGE_REGISTRY: Dict[str, Dict[str, Any]] = {
|
|
62
|
-
# =========================================================================
|
|
63
|
-
# PyTorch Geometric (PyG) packages - official index
|
|
64
|
-
# https://pytorch-geometric.readthedocs.io/en/latest/install/installation.html
|
|
65
|
-
# Uses --find-links (not --extra-index-url) for proper wheel discovery
|
|
66
|
-
# =========================================================================
|
|
67
|
-
"torch-scatter": {
|
|
68
|
-
"method": "find_links",
|
|
69
|
-
"index_url": "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html",
|
|
70
|
-
"description": "Scatter operations for PyTorch",
|
|
71
|
-
},
|
|
72
|
-
"torch-cluster": {
|
|
73
|
-
"method": "find_links",
|
|
74
|
-
"index_url": "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html",
|
|
75
|
-
"description": "Clustering algorithms for PyTorch",
|
|
76
|
-
},
|
|
77
|
-
"torch-sparse": {
|
|
78
|
-
"method": "find_links",
|
|
79
|
-
"index_url": "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html",
|
|
80
|
-
"description": "Sparse tensor operations for PyTorch",
|
|
81
|
-
},
|
|
82
|
-
"torch-spline-conv": {
|
|
83
|
-
"method": "find_links",
|
|
84
|
-
"index_url": "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html",
|
|
85
|
-
"description": "Spline convolutions for PyTorch",
|
|
86
|
-
},
|
|
87
|
-
|
|
88
|
-
# =========================================================================
|
|
89
|
-
# pytorch3d - Facebook's official wheels
|
|
90
|
-
# https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md
|
|
91
|
-
# =========================================================================
|
|
92
|
-
"pytorch3d": {
|
|
93
|
-
"method": "index",
|
|
94
|
-
"index_url": "https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py3{py_minor}_cu{cuda_short}_pyt{torch_short}/download.html",
|
|
95
|
-
"description": "PyTorch3D - 3D deep learning library",
|
|
96
|
-
},
|
|
97
|
-
|
|
98
|
-
# =========================================================================
|
|
99
|
-
# PozzettiAndrea wheel repos (GitHub Pages indexes)
|
|
100
|
-
# =========================================================================
|
|
101
|
-
# nvdiffrast - wheels are now at cu{cuda}-torch{torch_short} releases
|
|
102
|
-
"nvdiffrast": {
|
|
103
|
-
"method": "github_index",
|
|
104
|
-
"index_url": "https://pozzettiandrea.github.io/nvdiffrast-full-wheels/cu{cuda_short}-torch{torch_short}/",
|
|
105
|
-
"description": "NVIDIA differentiable rasterizer",
|
|
106
|
-
},
|
|
107
|
-
# cumesh, o_voxel, flex_gemm, nvdiffrec_render use torch_short (3 digits: 280)
|
|
108
|
-
"cumesh": {
|
|
109
|
-
"method": "github_index",
|
|
110
|
-
"index_url": "https://pozzettiandrea.github.io/cumesh-wheels/cu{cuda_short}-torch{torch_short}/",
|
|
111
|
-
"description": "CUDA-accelerated mesh utilities",
|
|
112
|
-
},
|
|
113
|
-
"o_voxel": {
|
|
114
|
-
"method": "github_index",
|
|
115
|
-
"index_url": "https://pozzettiandrea.github.io/ovoxel-wheels/cu{cuda_short}-torch{torch_short}/",
|
|
116
|
-
"description": "O-Voxel CUDA extension for TRELLIS",
|
|
117
|
-
},
|
|
118
|
-
"flex_gemm": {
|
|
119
|
-
"method": "github_index",
|
|
120
|
-
"index_url": "https://pozzettiandrea.github.io/flexgemm-wheels/cu{cuda_short}-torch{torch_short}/",
|
|
121
|
-
"description": "Flexible GEMM operations",
|
|
122
|
-
},
|
|
123
|
-
"nvdiffrec_render": {
|
|
124
|
-
"method": "github_release",
|
|
125
|
-
"sources": [
|
|
126
|
-
{
|
|
127
|
-
"name": "PozzettiAndrea",
|
|
128
|
-
"url_template": "https://github.com/PozzettiAndrea/nvdiffrec_render-wheels/releases/download/cu{cuda_short}-torch{torch_short}/nvdiffrec_render-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-linux_x86_64.whl",
|
|
129
|
-
"platforms": ["linux_x86_64"],
|
|
130
|
-
},
|
|
131
|
-
{
|
|
132
|
-
"name": "PozzettiAndrea-windows",
|
|
133
|
-
"url_template": "https://github.com/PozzettiAndrea/nvdiffrec_render-wheels/releases/download/cu{cuda_short}-torch{torch_short}/nvdiffrec_render-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-win_amd64.whl",
|
|
134
|
-
"platforms": ["win_amd64", "windows_amd64"],
|
|
135
|
-
},
|
|
136
|
-
],
|
|
137
|
-
"description": "NVDiffRec rendering utilities",
|
|
138
|
-
},
|
|
139
|
-
|
|
140
|
-
# =========================================================================
|
|
141
|
-
# spconv - PyPI with CUDA-versioned package names
|
|
142
|
-
# Package names: spconv-cu118, spconv-cu120, spconv-cu121, spconv-cu124, spconv-cu126
|
|
143
|
-
# Note: Max available is cu126 as of Jan 2026, use explicit spconv-cu126 in config
|
|
144
|
-
# =========================================================================
|
|
145
|
-
"spconv": {
|
|
146
|
-
"method": "pypi_variant",
|
|
147
|
-
"package_template": "spconv-cu{cuda_short2}",
|
|
148
|
-
"description": "Sparse convolution library (use spconv-cu126 for CUDA 12.6+)",
|
|
149
|
-
},
|
|
150
|
-
|
|
151
|
-
# =========================================================================
|
|
152
|
-
# sageattention - Fast quantized attention (2-5x faster than FlashAttention)
|
|
153
|
-
# Linux: Prebuilt wheels from Kijai/PrecompiledWheels (v2.2.0, cp312)
|
|
154
|
-
# Windows: Prebuilt wheels from woct0rdho (v2.2.0, cp39-abi3)
|
|
155
|
-
# =========================================================================
|
|
156
|
-
"sageattention": {
|
|
157
|
-
"method": "github_release",
|
|
158
|
-
"sources": [
|
|
159
|
-
# Linux: Kijai's precompiled wheels on HuggingFace (Python 3.12)
|
|
160
|
-
{
|
|
161
|
-
"name": "kijai-hf",
|
|
162
|
-
"url_template": "https://huggingface.co/Kijai/PrecompiledWheels/resolve/main/sageattention-{version}-cp312-cp312-linux_x86_64.whl",
|
|
163
|
-
"platforms": ["linux_x86_64"],
|
|
164
|
-
},
|
|
165
|
-
# Windows: woct0rdho prebuilt wheels (ABI3: Python >= 3.9)
|
|
166
|
-
# Format: sageattention-2.2.0+cu128torch2.8.0.post3-cp39-abi3-win_amd64.whl
|
|
167
|
-
{
|
|
168
|
-
"name": "woct0rdho",
|
|
169
|
-
"url_template": "https://github.com/woct0rdho/SageAttention/releases/download/v2.2.0-windows.post3/sageattention-2.2.0%2Bcu{cuda_short}torch{torch_version}.post3-cp39-abi3-win_amd64.whl",
|
|
170
|
-
"platforms": ["win_amd64"],
|
|
171
|
-
},
|
|
172
|
-
],
|
|
173
|
-
"description": "SageAttention - 2-5x faster than FlashAttention with quantized kernels",
|
|
174
|
-
},
|
|
175
|
-
|
|
176
|
-
# =========================================================================
|
|
177
|
-
# triton - Required for sageattention on Linux (usually bundled with PyTorch)
|
|
178
|
-
# =========================================================================
|
|
179
|
-
"triton": {
|
|
180
|
-
"method": "pypi",
|
|
181
|
-
"description": "Triton compiler for custom CUDA kernels (required by sageattention)",
|
|
182
|
-
},
|
|
183
|
-
|
|
184
|
-
# =========================================================================
|
|
185
|
-
# flash-attn - Multi-source prebuilt wheels
|
|
186
|
-
# Required for UniRig and other transformer-based models
|
|
187
|
-
# Sources: Dao-AILab (official), mjun0812 (Linux), bdashore3 (Windows)
|
|
188
|
-
# =========================================================================
|
|
189
|
-
"flash-attn": {
|
|
190
|
-
"method": "github_release",
|
|
191
|
-
"sources": [
|
|
192
|
-
# Linux: Dao-AILab official wheels (CUDA 12.x, PyTorch 2.4-2.8)
|
|
193
|
-
# Format: flash_attn-2.8.3+cu12torch2.8cxx11abiTRUE-cp310-cp310-linux_x86_64.whl
|
|
194
|
-
{
|
|
195
|
-
"name": "Dao-AILab",
|
|
196
|
-
"url_template": "https://github.com/Dao-AILab/flash-attention/releases/download/v{version}/flash_attn-{version}%2Bcu{cuda_major}torch{torch_dotted_mm}cxx11abiTRUE-{py_tag}-{py_tag}-linux_x86_64.whl",
|
|
197
|
-
"platforms": ["linux_x86_64"],
|
|
198
|
-
},
|
|
199
|
-
# Linux: mjun0812 prebuilt wheels (CUDA 12.4-13.0, PyTorch 2.5-2.9)
|
|
200
|
-
# Format: flash_attn-2.8.3+cu128torch2.8-cp310-cp310-linux_x86_64.whl
|
|
201
|
-
# Note: Release v0.7.2 contains multiple flash_attn versions
|
|
202
|
-
{
|
|
203
|
-
"name": "mjun0812",
|
|
204
|
-
"url_template": "https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.7.2/flash_attn-{version}%2Bcu{cuda_short}torch{torch_dotted_mm}-{py_tag}-{py_tag}-linux_x86_64.whl",
|
|
205
|
-
"platforms": ["linux_x86_64"],
|
|
206
|
-
},
|
|
207
|
-
# Windows: bdashore3 prebuilt wheels (CUDA 12.4/12.8, PyTorch 2.6-2.8)
|
|
208
|
-
{
|
|
209
|
-
"name": "bdashore3",
|
|
210
|
-
"url_template": "https://github.com/bdashore3/flash-attention/releases/download/v{version}/flash_attn-{version}%2Bcu{cuda_short}torch{torch_version}cxx11abiFALSE-{py_tag}-{py_tag}-win_amd64.whl",
|
|
211
|
-
"platforms": ["win_amd64"],
|
|
212
|
-
},
|
|
213
|
-
],
|
|
214
|
-
"description": "Flash Attention for fast transformer inference",
|
|
215
|
-
},
|
|
216
|
-
}
|
|
46
|
+
def _load_wheel_sources() -> Dict[str, Dict[str, Any]]:
|
|
47
|
+
"""Load package registry from wheel_sources.yml."""
|
|
48
|
+
yml_path = Path(__file__).parent / "wheel_sources.yml"
|
|
49
|
+
with open(yml_path, "r") as f:
|
|
50
|
+
data = yaml.safe_load(f)
|
|
51
|
+
return data.get("packages", {})
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
# Load registry at module import time
|
|
55
|
+
PACKAGE_REGISTRY: Dict[str, Dict[str, Any]] = _load_wheel_sources()
|
|
217
56
|
|
|
218
57
|
|
|
219
58
|
def get_package_info(package: str) -> Optional[Dict[str, Any]]:
|
comfy_env/resolver.py
CHANGED
|
@@ -218,21 +218,6 @@ class WheelSource:
|
|
|
218
218
|
return package.lower() in [p.lower() for p in self.packages]
|
|
219
219
|
|
|
220
220
|
|
|
221
|
-
# Default wheel sources for common CUDA packages
|
|
222
|
-
DEFAULT_WHEEL_SOURCES = [
|
|
223
|
-
WheelSource(
|
|
224
|
-
name="nvdiffrast-wheels",
|
|
225
|
-
url_template="https://github.com/PozzettiAndrea/nvdiffrast-full-wheels/releases/download/v{version}/nvdiffrast-{version}%2Bcu{cuda_short}torch{torch_mm}-cp{py_short}-cp{py_short}-{platform}.whl",
|
|
226
|
-
packages=["nvdiffrast"],
|
|
227
|
-
),
|
|
228
|
-
WheelSource(
|
|
229
|
-
name="cumesh-wheels",
|
|
230
|
-
url_template="https://github.com/PozzettiAndrea/cumesh-wheels/releases/download/v{version}/{package}-{version}%2Bcu{cuda_short}torch{torch_mm}-cp{py_short}-cp{py_short}-{platform}.whl",
|
|
231
|
-
packages=["pytorch3d", "torch-cluster", "torch-scatter", "torch-sparse"],
|
|
232
|
-
),
|
|
233
|
-
]
|
|
234
|
-
|
|
235
|
-
|
|
236
221
|
class WheelResolver:
|
|
237
222
|
"""
|
|
238
223
|
Resolves CUDA wheel URLs from package name and runtime environment.
|
|
@@ -255,10 +240,11 @@ class WheelResolver:
|
|
|
255
240
|
Initialize resolver.
|
|
256
241
|
|
|
257
242
|
Args:
|
|
258
|
-
sources: List of WheelSource configurations.
|
|
243
|
+
sources: List of WheelSource configurations. Defaults to empty
|
|
244
|
+
(use PACKAGE_REGISTRY in install.py for actual sources).
|
|
259
245
|
overrides: Package-specific URL overrides (package -> template).
|
|
260
246
|
"""
|
|
261
|
-
self.sources = sources or
|
|
247
|
+
self.sources = sources or []
|
|
262
248
|
self.overrides = overrides or {}
|
|
263
249
|
|
|
264
250
|
def resolve(
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
# Wheel sources registry for CUDA packages
|
|
2
|
+
#
|
|
3
|
+
# Template variables:
|
|
4
|
+
# {cuda_version} - Full CUDA version (e.g., "12.8")
|
|
5
|
+
# {cuda_short} - CUDA without dot (e.g., "128")
|
|
6
|
+
# {cuda_short2} - CUDA short for spconv (e.g., "124" not "1240")
|
|
7
|
+
# {cuda_major} - CUDA major version (e.g., "12")
|
|
8
|
+
# {torch_version} - Full PyTorch version (e.g., "2.8.0")
|
|
9
|
+
# {torch_short} - PyTorch without dots (e.g., "280")
|
|
10
|
+
# {torch_mm} - PyTorch major.minor no dot (e.g., "28")
|
|
11
|
+
# {torch_dotted_mm}- PyTorch major.minor with dot (e.g., "2.8")
|
|
12
|
+
# {py_version} - Python version (e.g., "3.10")
|
|
13
|
+
# {py_short} - Python without dot (e.g., "310")
|
|
14
|
+
# {py_minor} - Python minor version only (e.g., "10")
|
|
15
|
+
# {py_tag} - Python tag (e.g., "cp310")
|
|
16
|
+
# {platform} - Platform tag (e.g., "linux_x86_64")
|
|
17
|
+
#
|
|
18
|
+
# Install methods:
|
|
19
|
+
# index - pip --extra-index-url (PEP 503)
|
|
20
|
+
# find_links - pip --find-links
|
|
21
|
+
# github_index - GitHub Pages index (--find-links)
|
|
22
|
+
# pypi_variant - Package name varies by CUDA (e.g., spconv-cu124)
|
|
23
|
+
# github_release - Direct wheel URL from GitHub releases
|
|
24
|
+
# pypi - Standard PyPI install
|
|
25
|
+
|
|
26
|
+
packages:
|
|
27
|
+
# ===========================================================================
|
|
28
|
+
# PyTorch Geometric (PyG) - official index
|
|
29
|
+
# https://pytorch-geometric.readthedocs.io/en/latest/install/installation.html
|
|
30
|
+
# ===========================================================================
|
|
31
|
+
torch-scatter:
|
|
32
|
+
method: find_links
|
|
33
|
+
index_url: "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html"
|
|
34
|
+
description: Scatter operations for PyTorch
|
|
35
|
+
|
|
36
|
+
torch-cluster:
|
|
37
|
+
method: find_links
|
|
38
|
+
index_url: "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html"
|
|
39
|
+
description: Clustering algorithms for PyTorch
|
|
40
|
+
|
|
41
|
+
torch-sparse:
|
|
42
|
+
method: find_links
|
|
43
|
+
index_url: "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html"
|
|
44
|
+
description: Sparse tensor operations for PyTorch
|
|
45
|
+
|
|
46
|
+
torch-spline-conv:
|
|
47
|
+
method: find_links
|
|
48
|
+
index_url: "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html"
|
|
49
|
+
description: Spline convolutions for PyTorch
|
|
50
|
+
|
|
51
|
+
# ===========================================================================
|
|
52
|
+
# pytorch3d - Facebook's official wheels
|
|
53
|
+
# https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md
|
|
54
|
+
# ===========================================================================
|
|
55
|
+
pytorch3d:
|
|
56
|
+
method: index
|
|
57
|
+
index_url: "https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py3{py_minor}_cu{cuda_short}_pyt{torch_short}/download.html"
|
|
58
|
+
description: PyTorch3D - 3D deep learning library
|
|
59
|
+
|
|
60
|
+
# ===========================================================================
|
|
61
|
+
# PozzettiAndrea wheel repos (GitHub Pages indexes)
|
|
62
|
+
# ===========================================================================
|
|
63
|
+
nvdiffrast:
|
|
64
|
+
method: github_index
|
|
65
|
+
index_url: "https://pozzettiandrea.github.io/nvdiffrast-full-wheels/cu{cuda_short}-torch{torch_short}/"
|
|
66
|
+
description: NVIDIA differentiable rasterizer
|
|
67
|
+
|
|
68
|
+
cumesh:
|
|
69
|
+
method: github_index
|
|
70
|
+
index_url: "https://pozzettiandrea.github.io/cumesh-wheels/cu{cuda_short}-torch{torch_short}/"
|
|
71
|
+
description: CUDA-accelerated mesh utilities
|
|
72
|
+
|
|
73
|
+
o_voxel:
|
|
74
|
+
method: github_index
|
|
75
|
+
index_url: "https://pozzettiandrea.github.io/ovoxel-wheels/cu{cuda_short}-torch{torch_short}/"
|
|
76
|
+
description: O-Voxel CUDA extension for TRELLIS
|
|
77
|
+
|
|
78
|
+
flex_gemm:
|
|
79
|
+
method: github_index
|
|
80
|
+
index_url: "https://pozzettiandrea.github.io/flexgemm-wheels/cu{cuda_short}-torch{torch_short}/"
|
|
81
|
+
description: Flexible GEMM operations
|
|
82
|
+
|
|
83
|
+
nvdiffrec_render:
|
|
84
|
+
method: github_release
|
|
85
|
+
description: NVDiffRec rendering utilities
|
|
86
|
+
sources:
|
|
87
|
+
- name: PozzettiAndrea
|
|
88
|
+
url_template: "https://github.com/PozzettiAndrea/nvdiffrec_render-wheels/releases/download/cu{cuda_short}-torch{torch_short}/nvdiffrec_render-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-linux_x86_64.whl"
|
|
89
|
+
platforms: [linux_x86_64]
|
|
90
|
+
- name: PozzettiAndrea-windows
|
|
91
|
+
url_template: "https://github.com/PozzettiAndrea/nvdiffrec_render-wheels/releases/download/cu{cuda_short}-torch{torch_short}/nvdiffrec_render-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-win_amd64.whl"
|
|
92
|
+
platforms: [win_amd64]
|
|
93
|
+
|
|
94
|
+
# ===========================================================================
|
|
95
|
+
# spconv - PyPI with CUDA-versioned package names
|
|
96
|
+
# ===========================================================================
|
|
97
|
+
spconv:
|
|
98
|
+
method: pypi_variant
|
|
99
|
+
package_template: "spconv-cu{cuda_short2}"
|
|
100
|
+
description: Sparse convolution library (use spconv-cu126 for CUDA 12.6+)
|
|
101
|
+
|
|
102
|
+
# ===========================================================================
|
|
103
|
+
# sageattention - Fast quantized attention
|
|
104
|
+
# ===========================================================================
|
|
105
|
+
sageattention:
|
|
106
|
+
method: github_release
|
|
107
|
+
description: SageAttention - 2-5x faster than FlashAttention with quantized kernels
|
|
108
|
+
sources:
|
|
109
|
+
- name: kijai-hf
|
|
110
|
+
url_template: "https://huggingface.co/Kijai/PrecompiledWheels/resolve/main/sageattention-{version}-cp312-cp312-linux_x86_64.whl"
|
|
111
|
+
platforms: [linux_x86_64]
|
|
112
|
+
- name: woct0rdho
|
|
113
|
+
url_template: "https://github.com/woct0rdho/SageAttention/releases/download/v2.2.0-windows.post3/sageattention-2.2.0%2Bcu{cuda_short}torch{torch_version}.post3-cp39-abi3-win_amd64.whl"
|
|
114
|
+
platforms: [win_amd64]
|
|
115
|
+
|
|
116
|
+
# ===========================================================================
|
|
117
|
+
# triton - Required for sageattention on Linux
|
|
118
|
+
# ===========================================================================
|
|
119
|
+
triton:
|
|
120
|
+
method: pypi
|
|
121
|
+
description: Triton compiler for custom CUDA kernels
|
|
122
|
+
|
|
123
|
+
# ===========================================================================
|
|
124
|
+
# flash-attn - Multi-source prebuilt wheels
|
|
125
|
+
# ===========================================================================
|
|
126
|
+
flash-attn:
|
|
127
|
+
method: github_release
|
|
128
|
+
description: Flash Attention for fast transformer inference
|
|
129
|
+
sources:
|
|
130
|
+
- name: Dao-AILab
|
|
131
|
+
url_template: "https://github.com/Dao-AILab/flash-attention/releases/download/v{version}/flash_attn-{version}%2Bcu{cuda_major}torch{torch_dotted_mm}cxx11abiTRUE-{py_tag}-{py_tag}-linux_x86_64.whl"
|
|
132
|
+
platforms: [linux_x86_64]
|
|
133
|
+
- name: mjun0812
|
|
134
|
+
url_template: "https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.7.7/flash_attn-{version}%2Bcu{cuda_short}torch{torch_dotted_mm}-{py_tag}-{py_tag}-linux_x86_64.whl"
|
|
135
|
+
platforms: [linux_x86_64]
|
|
136
|
+
- name: mjun0812-windows
|
|
137
|
+
url_template: "https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.7.7/flash_attn-{version}%2Bcu{cuda_short}torch{torch_dotted_mm}-{py_tag}-{py_tag}-win_amd64.whl"
|
|
138
|
+
platforms: [win_amd64]
|
|
139
|
+
- name: bdashore3
|
|
140
|
+
url_template: "https://github.com/bdashore3/flash-attention/releases/download/v{version}/flash_attn-{version}%2Bcu{cuda_short}torch{torch_version}cxx11abiFALSE-{py_tag}-{py_tag}-win_amd64.whl"
|
|
141
|
+
platforms: [win_amd64]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: comfy-env
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.13
|
|
4
4
|
Summary: Environment management for ComfyUI custom nodes - CUDA wheel resolution and process isolation
|
|
5
5
|
Project-URL: Homepage, https://github.com/PozzettiAndrea/comfy-env
|
|
6
6
|
Project-URL: Repository, https://github.com/PozzettiAndrea/comfy-env
|
|
@@ -17,6 +17,7 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
17
17
|
Classifier: Programming Language :: Python :: 3.12
|
|
18
18
|
Classifier: Programming Language :: Python :: 3.13
|
|
19
19
|
Requires-Python: >=3.10
|
|
20
|
+
Requires-Dist: pyyaml>=6.0
|
|
20
21
|
Requires-Dist: tomli>=2.0.0; python_version < '3.11'
|
|
21
22
|
Requires-Dist: uv>=0.4.0
|
|
22
23
|
Provides-Extra: dev
|
|
@@ -1,15 +1,14 @@
|
|
|
1
1
|
comfy_env/__init__.py,sha256=1EjSpAK20Z6RAXKn3onZwj2a_FIctG0E_LhWlE7SgIs,4250
|
|
2
|
-
comfy_env/cli.py,sha256=
|
|
2
|
+
comfy_env/cli.py,sha256=q4y_tlPyqKMZhge7XeO9VdbFVZ4dl9LZsgnnTVQYXD4,15979
|
|
3
3
|
comfy_env/decorator.py,sha256=daFR5aLzshkmo5sRKhSGPcTUgIUWml7Gs6A1bfnDuyc,15718
|
|
4
|
-
comfy_env/errors.py,sha256=
|
|
5
|
-
comfy_env/install.py,sha256=
|
|
6
|
-
comfy_env/registry.py,sha256=
|
|
7
|
-
comfy_env/resolver.py,sha256=
|
|
8
|
-
comfy_env/runner.py,sha256=0YUqzK93u--7pKV6_yVC564AJE9rS3y81t5ZhQi2t4Y,9696
|
|
4
|
+
comfy_env/errors.py,sha256=8hN8NDlo8oBUdapc-eT3ZluigI5VBzfqsSBvQdfWlz4,9943
|
|
5
|
+
comfy_env/install.py,sha256=q8x7sQmCWMQyUjeBcbrg39eAP3ZPars54iepupMe2pE,18922
|
|
6
|
+
comfy_env/registry.py,sha256=uFCtGmWYvwGCqObXgzmArX7o5JsFNsHXxayofk3m6no,2569
|
|
7
|
+
comfy_env/resolver.py,sha256=AC_xizQARVdmc8ZpGrIPQd-oA6Gv8w49OsHKltRAq5o,12251
|
|
9
8
|
comfy_env/env/__init__.py,sha256=sybOBrxJCfL4Xry9NNd5xwn9hXIHudXlXDa7SpJkPCE,811
|
|
10
9
|
comfy_env/env/config.py,sha256=R8JyE5iQLHKgnxXOGA8SAI7iu2eYSfXn-MsaqHoU2_A,5667
|
|
11
|
-
comfy_env/env/config_file.py,sha256=
|
|
12
|
-
comfy_env/env/detection.py,sha256=
|
|
10
|
+
comfy_env/env/config_file.py,sha256=6Mh2VsNvyFJuTL4_vOX0sG3Q5SMHLI1P97aY8FFvwnk,20696
|
|
11
|
+
comfy_env/env/detection.py,sha256=L4kXKGs9j7Vw4Qeh6Zw5PfVZW0dN2zuJqojDPgINIhA,5653
|
|
13
12
|
comfy_env/env/manager.py,sha256=MRmqg3Qw0IYUwR6FtM3ZIcGZvWcnKDyO8xbzIAxnwCk,24782
|
|
14
13
|
comfy_env/env/security.py,sha256=dNSitAnfBNVdvxgBBntYw33AJaCs_S1MHb7KJhAVYzM,8171
|
|
15
14
|
comfy_env/env/platform/__init__.py,sha256=Nb5MPZIEeanSMEWwqU4p4bnEKTJn1tWcwobnhq9x9IY,614
|
|
@@ -18,7 +17,7 @@ comfy_env/env/platform/darwin.py,sha256=HK3VkLT6DfesAnIXwx2IaUFHTBclF0xTQnC7azWY
|
|
|
18
17
|
comfy_env/env/platform/linux.py,sha256=xLp8FEbFqZLQrzIZBI9z3C4g23Ab1ASTHLsXDzsdCoA,2062
|
|
19
18
|
comfy_env/env/platform/windows.py,sha256=nD1-bKU2rGmEJlS-cc5yWXMSA51YQtVupn-lQEO5UYA,14840
|
|
20
19
|
comfy_env/ipc/__init__.py,sha256=pTjgJn5YJxLXmEvuKh3lkCEJQs-6W6_F01jfkFMUi0c,1375
|
|
21
|
-
comfy_env/ipc/bridge.py,sha256=
|
|
20
|
+
comfy_env/ipc/bridge.py,sha256=zcyN3xzV4WWBrBFNwCniPBR58dLCg46-k9TtyW5U000,16437
|
|
22
21
|
comfy_env/ipc/protocol.py,sha256=gfWe5yEDUn4QWhcdWFcxn40GqxlW1Uf23j0edOzPPng,7951
|
|
23
22
|
comfy_env/ipc/tensor.py,sha256=DyU28GymKkLPVwzZyKdm2Av222hdaycMgv3KdL5mtO0,12009
|
|
24
23
|
comfy_env/ipc/torch_bridge.py,sha256=WzdwDJa3N_1fEl9OeZxikvMbwryO5P63o0WmEDpS18A,13206
|
|
@@ -32,8 +31,9 @@ comfy_env/workers/pool.py,sha256=MtjeOWfvHSCockq8j1gfnxIl-t01GSB79T5N4YB82Lg,695
|
|
|
32
31
|
comfy_env/workers/tensor_utils.py,sha256=TCuOAjJymrSbkgfyvcKtQ_KbVWTqSwP9VH_bCaFLLq8,6409
|
|
33
32
|
comfy_env/workers/torch_mp.py,sha256=DsfxE3LBAWEuGtk-p-YL0UhVQ7VDh73KT_TFRxYN4-Q,12563
|
|
34
33
|
comfy_env/workers/venv.py,sha256=_ekHfZPqBIPY08DjqiXm6cTBQH4DrbxRWR3AAv3mit8,31589
|
|
35
|
-
comfy_env
|
|
36
|
-
comfy_env-0.0.
|
|
37
|
-
comfy_env-0.0.
|
|
38
|
-
comfy_env-0.0.
|
|
39
|
-
comfy_env-0.0.
|
|
34
|
+
comfy_env/wheel_sources.yml,sha256=ubVuQllCQGkZhLNQaG54divCwn0zLzYg4turzhnIZQ8,7150
|
|
35
|
+
comfy_env-0.0.13.dist-info/METADATA,sha256=FRLYYBNxWwLm5I3vWRVxT1LYp5wvW13ibZ95PVdOaU4,5399
|
|
36
|
+
comfy_env-0.0.13.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
37
|
+
comfy_env-0.0.13.dist-info/entry_points.txt,sha256=J4fXeqgxU_YenuW_Zxn_pEL7J-3R0--b6MS5t0QmAr0,49
|
|
38
|
+
comfy_env-0.0.13.dist-info/licenses/LICENSE,sha256=E68QZMMpW4P2YKstTZ3QU54HRQO8ecew09XZ4_Vn870,1093
|
|
39
|
+
comfy_env-0.0.13.dist-info/RECORD,,
|
comfy_env/runner.py
DELETED
|
@@ -1,273 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Generic runner for isolated subprocess execution.
|
|
3
|
-
|
|
4
|
-
This module is the entry point for subprocess execution. The runner handles
|
|
5
|
-
requests for ANY @isolated class in the environment, importing classes on demand.
|
|
6
|
-
|
|
7
|
-
Usage (Unix Domain Socket - recommended):
|
|
8
|
-
python -m comfy_env.runner \
|
|
9
|
-
--node-dir /path/to/ComfyUI-SAM3DObjects/nodes \
|
|
10
|
-
--comfyui-base /path/to/ComfyUI \
|
|
11
|
-
--import-paths ".,../vendor" \
|
|
12
|
-
--socket /tmp/comfyui-isolation-myenv-12345.sock
|
|
13
|
-
|
|
14
|
-
Usage (Legacy stdin/stdout):
|
|
15
|
-
python -m comfy_env.runner \
|
|
16
|
-
--node-dir /path/to/ComfyUI-SAM3DObjects/nodes \
|
|
17
|
-
--comfyui-base /path/to/ComfyUI \
|
|
18
|
-
--import-paths ".,../vendor"
|
|
19
|
-
|
|
20
|
-
The runner:
|
|
21
|
-
1. Sets COMFYUI_ISOLATION_WORKER=1 (so @isolated decorator becomes no-op)
|
|
22
|
-
2. Adds paths to sys.path
|
|
23
|
-
3. Connects to Unix Domain Socket (or uses stdin/stdout)
|
|
24
|
-
4. Dynamically imports classes as needed (cached)
|
|
25
|
-
5. Calls methods and returns responses
|
|
26
|
-
"""
|
|
27
|
-
|
|
28
|
-
import os
|
|
29
|
-
import sys
|
|
30
|
-
import json
|
|
31
|
-
import argparse
|
|
32
|
-
import traceback
|
|
33
|
-
import warnings
|
|
34
|
-
import logging
|
|
35
|
-
import importlib
|
|
36
|
-
from typing import Any, Dict, Optional
|
|
37
|
-
|
|
38
|
-
# Suppress warnings that could interfere with JSON IPC
|
|
39
|
-
warnings.filterwarnings("ignore")
|
|
40
|
-
os.environ.setdefault("TF_CPP_MIN_LOG_LEVEL", "3")
|
|
41
|
-
logging.disable(logging.WARNING)
|
|
42
|
-
|
|
43
|
-
# Mark that we're in worker mode - this makes @isolated decorator a no-op
|
|
44
|
-
os.environ["COMFYUI_ISOLATION_WORKER"] = "1"
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
def setup_paths(node_dir: str, comfyui_base: Optional[str], import_paths: Optional[str]):
|
|
48
|
-
"""Setup sys.path for imports."""
|
|
49
|
-
from pathlib import Path
|
|
50
|
-
|
|
51
|
-
node_path = Path(node_dir)
|
|
52
|
-
|
|
53
|
-
# Set COMFYUI_BASE env var for stubs to use
|
|
54
|
-
if comfyui_base:
|
|
55
|
-
os.environ["COMFYUI_BASE"] = comfyui_base
|
|
56
|
-
|
|
57
|
-
# Add comfyui-isolation stubs directory (provides folder_paths, etc.)
|
|
58
|
-
stubs_dir = Path(__file__).parent / "stubs"
|
|
59
|
-
sys.path.insert(0, str(stubs_dir))
|
|
60
|
-
|
|
61
|
-
# Add import paths
|
|
62
|
-
if import_paths:
|
|
63
|
-
for p in import_paths.split(","):
|
|
64
|
-
p = p.strip()
|
|
65
|
-
if p:
|
|
66
|
-
full_path = node_path / p
|
|
67
|
-
sys.path.insert(0, str(full_path))
|
|
68
|
-
|
|
69
|
-
# Add node_dir itself
|
|
70
|
-
sys.path.insert(0, str(node_path))
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
def serialize_result(obj: Any) -> Any:
|
|
74
|
-
"""Serialize result for JSON transport."""
|
|
75
|
-
from comfy_env.ipc.protocol import encode_object
|
|
76
|
-
return encode_object(obj)
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
def deserialize_arg(obj: Any) -> Any:
|
|
80
|
-
"""Deserialize argument from JSON transport."""
|
|
81
|
-
from comfy_env.ipc.protocol import decode_object
|
|
82
|
-
return decode_object(obj)
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
# Cache for imported classes and instances
|
|
86
|
-
_class_cache: Dict[str, type] = {}
|
|
87
|
-
_instance_cache: Dict[str, object] = {}
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
def get_instance(module_name: str, class_name: str) -> object:
|
|
91
|
-
"""Get or create an instance of a class."""
|
|
92
|
-
cache_key = f"{module_name}.{class_name}"
|
|
93
|
-
|
|
94
|
-
if cache_key not in _instance_cache:
|
|
95
|
-
# Import the class if not cached
|
|
96
|
-
if cache_key not in _class_cache:
|
|
97
|
-
print(f"[Runner] Importing {class_name} from {module_name}...", file=sys.stderr)
|
|
98
|
-
module = importlib.import_module(module_name)
|
|
99
|
-
cls = getattr(module, class_name)
|
|
100
|
-
_class_cache[cache_key] = cls
|
|
101
|
-
|
|
102
|
-
# Create instance
|
|
103
|
-
cls = _class_cache[cache_key]
|
|
104
|
-
_instance_cache[cache_key] = cls()
|
|
105
|
-
print(f"[Runner] Created instance of {class_name}", file=sys.stderr)
|
|
106
|
-
|
|
107
|
-
return _instance_cache[cache_key]
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
def run_worker(
|
|
111
|
-
node_dir: str,
|
|
112
|
-
comfyui_base: Optional[str],
|
|
113
|
-
import_paths: Optional[str],
|
|
114
|
-
socket_path: Optional[str] = None,
|
|
115
|
-
):
|
|
116
|
-
"""
|
|
117
|
-
Main worker loop - handles JSON-RPC requests via transport.
|
|
118
|
-
|
|
119
|
-
Args:
|
|
120
|
-
node_dir: Path to node package directory
|
|
121
|
-
comfyui_base: Path to ComfyUI base directory
|
|
122
|
-
import_paths: Comma-separated import paths
|
|
123
|
-
socket_path: Unix domain socket path (if None, uses stdin/stdout)
|
|
124
|
-
"""
|
|
125
|
-
from comfy_env.ipc.transport import UnixSocketTransport, StdioTransport
|
|
126
|
-
|
|
127
|
-
# Setup paths first
|
|
128
|
-
setup_paths(node_dir, comfyui_base, import_paths)
|
|
129
|
-
|
|
130
|
-
# Create transport
|
|
131
|
-
if socket_path:
|
|
132
|
-
# Unix Domain Socket transport (recommended)
|
|
133
|
-
print(f"[Runner] Connecting to socket: {socket_path}", file=sys.stderr)
|
|
134
|
-
transport = UnixSocketTransport.connect(socket_path)
|
|
135
|
-
use_uds = True
|
|
136
|
-
else:
|
|
137
|
-
# Legacy stdin/stdout transport
|
|
138
|
-
print("[Runner] Using stdin/stdout transport", file=sys.stderr)
|
|
139
|
-
transport = StdioTransport()
|
|
140
|
-
use_uds = False
|
|
141
|
-
|
|
142
|
-
try:
|
|
143
|
-
# Send ready signal
|
|
144
|
-
transport.send({"status": "ready"})
|
|
145
|
-
|
|
146
|
-
# Main loop - read requests, execute, respond
|
|
147
|
-
while True:
|
|
148
|
-
response = {"jsonrpc": "2.0", "id": None}
|
|
149
|
-
|
|
150
|
-
try:
|
|
151
|
-
request = transport.recv()
|
|
152
|
-
response["id"] = request.get("id")
|
|
153
|
-
|
|
154
|
-
method_name = request.get("method")
|
|
155
|
-
params = request.get("params", {})
|
|
156
|
-
|
|
157
|
-
if method_name == "shutdown":
|
|
158
|
-
# Clean shutdown
|
|
159
|
-
response["result"] = {"status": "shutdown"}
|
|
160
|
-
transport.send(response)
|
|
161
|
-
break
|
|
162
|
-
|
|
163
|
-
# Get module/class from request
|
|
164
|
-
module_name = request.get("module")
|
|
165
|
-
class_name = request.get("class")
|
|
166
|
-
|
|
167
|
-
if not module_name or not class_name:
|
|
168
|
-
response["error"] = {
|
|
169
|
-
"code": -32602,
|
|
170
|
-
"message": "Missing 'module' or 'class' in request",
|
|
171
|
-
}
|
|
172
|
-
transport.send(response)
|
|
173
|
-
continue
|
|
174
|
-
|
|
175
|
-
# Get or create instance
|
|
176
|
-
try:
|
|
177
|
-
instance = get_instance(module_name, class_name)
|
|
178
|
-
except Exception as e:
|
|
179
|
-
response["error"] = {
|
|
180
|
-
"code": -32000,
|
|
181
|
-
"message": f"Failed to import {module_name}.{class_name}: {e}",
|
|
182
|
-
"data": {"traceback": traceback.format_exc()}
|
|
183
|
-
}
|
|
184
|
-
transport.send(response)
|
|
185
|
-
continue
|
|
186
|
-
|
|
187
|
-
# Get the method
|
|
188
|
-
method = getattr(instance, method_name, None)
|
|
189
|
-
if method is None:
|
|
190
|
-
response["error"] = {
|
|
191
|
-
"code": -32601,
|
|
192
|
-
"message": f"Method not found: {method_name}",
|
|
193
|
-
}
|
|
194
|
-
transport.send(response)
|
|
195
|
-
continue
|
|
196
|
-
|
|
197
|
-
# Deserialize arguments
|
|
198
|
-
deserialized_params = {}
|
|
199
|
-
for key, value in params.items():
|
|
200
|
-
deserialized_params[key] = deserialize_arg(value)
|
|
201
|
-
|
|
202
|
-
# For legacy stdio transport, redirect stdout to stderr during execution
|
|
203
|
-
# This prevents print() in node code from corrupting JSON protocol
|
|
204
|
-
# (UDS transport doesn't need this since it uses a separate socket)
|
|
205
|
-
if not use_uds:
|
|
206
|
-
original_stdout = sys.stdout
|
|
207
|
-
sys.stdout = sys.stderr
|
|
208
|
-
|
|
209
|
-
# Also redirect at file descriptor level for C libraries
|
|
210
|
-
stdout_fd = original_stdout.fileno()
|
|
211
|
-
stderr_fd = sys.stderr.fileno()
|
|
212
|
-
stdout_fd_copy = os.dup(stdout_fd)
|
|
213
|
-
os.dup2(stderr_fd, stdout_fd)
|
|
214
|
-
|
|
215
|
-
# Call the method
|
|
216
|
-
print(f"[Runner] Calling {class_name}.{method_name}...", file=sys.stderr)
|
|
217
|
-
try:
|
|
218
|
-
result = method(**deserialized_params)
|
|
219
|
-
finally:
|
|
220
|
-
if not use_uds:
|
|
221
|
-
# Restore file descriptor first, then Python stdout
|
|
222
|
-
os.dup2(stdout_fd_copy, stdout_fd)
|
|
223
|
-
os.close(stdout_fd_copy)
|
|
224
|
-
sys.stdout = original_stdout
|
|
225
|
-
|
|
226
|
-
# Serialize result
|
|
227
|
-
serialized_result = serialize_result(result)
|
|
228
|
-
response["result"] = serialized_result
|
|
229
|
-
|
|
230
|
-
print(f"[Runner] {class_name}.{method_name} completed", file=sys.stderr)
|
|
231
|
-
|
|
232
|
-
except ConnectionError as e:
|
|
233
|
-
# Socket closed - normal shutdown
|
|
234
|
-
print(f"[Runner] Connection closed: {e}", file=sys.stderr)
|
|
235
|
-
break
|
|
236
|
-
except Exception as e:
|
|
237
|
-
tb = traceback.format_exc()
|
|
238
|
-
print(f"[Runner] Error: {e}", file=sys.stderr)
|
|
239
|
-
print(tb, file=sys.stderr)
|
|
240
|
-
response["error"] = {
|
|
241
|
-
"code": -32000,
|
|
242
|
-
"message": str(e),
|
|
243
|
-
"data": {"traceback": tb}
|
|
244
|
-
}
|
|
245
|
-
|
|
246
|
-
try:
|
|
247
|
-
transport.send(response)
|
|
248
|
-
except ConnectionError:
|
|
249
|
-
break
|
|
250
|
-
|
|
251
|
-
finally:
|
|
252
|
-
transport.close()
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
def main():
|
|
256
|
-
parser = argparse.ArgumentParser(description="Isolated node runner")
|
|
257
|
-
parser.add_argument("--node-dir", required=True, help="Node package directory")
|
|
258
|
-
parser.add_argument("--comfyui-base", help="ComfyUI base directory")
|
|
259
|
-
parser.add_argument("--import-paths", help="Comma-separated import paths")
|
|
260
|
-
parser.add_argument("--socket", help="Unix domain socket path (if not provided, uses stdin/stdout)")
|
|
261
|
-
|
|
262
|
-
args = parser.parse_args()
|
|
263
|
-
|
|
264
|
-
run_worker(
|
|
265
|
-
node_dir=args.node_dir,
|
|
266
|
-
comfyui_base=args.comfyui_base,
|
|
267
|
-
import_paths=args.import_paths,
|
|
268
|
-
socket_path=args.socket,
|
|
269
|
-
)
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
if __name__ == "__main__":
|
|
273
|
-
main()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|