comfy-env 0.1.15__py3-none-any.whl → 0.1.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comfy_env/__init__.py +117 -40
- comfy_env/cli.py +122 -311
- comfy_env/config/__init__.py +12 -4
- comfy_env/config/parser.py +30 -79
- comfy_env/config/types.py +37 -0
- comfy_env/detection/__init__.py +77 -0
- comfy_env/detection/cuda.py +61 -0
- comfy_env/detection/gpu.py +230 -0
- comfy_env/detection/platform.py +70 -0
- comfy_env/detection/runtime.py +103 -0
- comfy_env/environment/__init__.py +53 -0
- comfy_env/environment/cache.py +141 -0
- comfy_env/environment/libomp.py +41 -0
- comfy_env/environment/paths.py +38 -0
- comfy_env/environment/setup.py +91 -0
- comfy_env/install.py +134 -331
- comfy_env/isolation/__init__.py +32 -2
- comfy_env/isolation/tensor_utils.py +83 -0
- comfy_env/isolation/workers/__init__.py +16 -0
- comfy_env/{workers → isolation/workers}/mp.py +1 -1
- comfy_env/{workers → isolation/workers}/subprocess.py +1 -1
- comfy_env/isolation/wrap.py +128 -509
- comfy_env/packages/__init__.py +60 -0
- comfy_env/packages/apt.py +36 -0
- comfy_env/packages/cuda_wheels.py +97 -0
- comfy_env/packages/node_dependencies.py +77 -0
- comfy_env/packages/pixi.py +85 -0
- comfy_env/packages/toml_generator.py +88 -0
- comfy_env-0.1.17.dist-info/METADATA +225 -0
- comfy_env-0.1.17.dist-info/RECORD +36 -0
- comfy_env/cache.py +0 -203
- comfy_env/nodes.py +0 -187
- comfy_env/pixi/__init__.py +0 -48
- comfy_env/pixi/core.py +0 -587
- comfy_env/pixi/cuda_detection.py +0 -303
- comfy_env/pixi/platform/__init__.py +0 -21
- comfy_env/pixi/platform/base.py +0 -96
- comfy_env/pixi/platform/darwin.py +0 -53
- comfy_env/pixi/platform/linux.py +0 -68
- comfy_env/pixi/platform/windows.py +0 -284
- comfy_env/pixi/resolver.py +0 -198
- comfy_env/prestartup.py +0 -208
- comfy_env/workers/__init__.py +0 -38
- comfy_env/workers/tensor_utils.py +0 -188
- comfy_env-0.1.15.dist-info/METADATA +0 -291
- comfy_env-0.1.15.dist-info/RECORD +0 -31
- /comfy_env/{workers → isolation/workers}/base.py +0 -0
- {comfy_env-0.1.15.dist-info → comfy_env-0.1.17.dist-info}/WHEEL +0 -0
- {comfy_env-0.1.15.dist-info → comfy_env-0.1.17.dist-info}/entry_points.txt +0 -0
- {comfy_env-0.1.15.dist-info → comfy_env-0.1.17.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""Tensor utilities for IPC - prevents GC races and handles CUDA re-share."""
|
|
2
|
+
|
|
3
|
+
import collections
|
|
4
|
+
import logging
|
|
5
|
+
import threading
|
|
6
|
+
import time
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger("comfy_env")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class TensorKeeper:
|
|
13
|
+
"""Keep tensor references during IPC to prevent premature GC."""
|
|
14
|
+
|
|
15
|
+
def __init__(self, retention_seconds: float = 30.0):
|
|
16
|
+
self.retention_seconds = retention_seconds
|
|
17
|
+
self._keeper: collections.deque = collections.deque()
|
|
18
|
+
self._lock = threading.Lock()
|
|
19
|
+
|
|
20
|
+
def keep(self, t: Any) -> None:
|
|
21
|
+
try:
|
|
22
|
+
import torch
|
|
23
|
+
if not isinstance(t, torch.Tensor): return
|
|
24
|
+
except ImportError: return
|
|
25
|
+
|
|
26
|
+
now = time.time()
|
|
27
|
+
with self._lock:
|
|
28
|
+
self._keeper.append((now, t))
|
|
29
|
+
while self._keeper and now - self._keeper[0][0] > self.retention_seconds:
|
|
30
|
+
self._keeper.popleft()
|
|
31
|
+
|
|
32
|
+
def keep_recursive(self, obj: Any) -> None:
|
|
33
|
+
try:
|
|
34
|
+
import torch
|
|
35
|
+
if isinstance(obj, torch.Tensor): self.keep(obj)
|
|
36
|
+
elif isinstance(obj, (list, tuple)):
|
|
37
|
+
for item in obj: self.keep_recursive(item)
|
|
38
|
+
elif isinstance(obj, dict):
|
|
39
|
+
for v in obj.values(): self.keep_recursive(v)
|
|
40
|
+
except ImportError: pass
|
|
41
|
+
|
|
42
|
+
def __len__(self) -> int:
|
|
43
|
+
with self._lock: return len(self._keeper)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
_tensor_keeper = TensorKeeper()
|
|
47
|
+
keep_tensor = lambda t: _tensor_keeper.keep(t)
|
|
48
|
+
keep_tensors_recursive = lambda obj: _tensor_keeper.keep_recursive(obj)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def prepare_tensor_for_ipc(t: Any) -> Any:
|
|
52
|
+
"""Clone tensor if it was received via IPC (can't be re-shared)."""
|
|
53
|
+
try:
|
|
54
|
+
import torch
|
|
55
|
+
if not isinstance(t, torch.Tensor) or not t.is_cuda: return t
|
|
56
|
+
|
|
57
|
+
import torch.multiprocessing.reductions as reductions
|
|
58
|
+
try:
|
|
59
|
+
reductions.reduce_tensor(t)
|
|
60
|
+
return t
|
|
61
|
+
except RuntimeError as e:
|
|
62
|
+
if "received from another process" in str(e):
|
|
63
|
+
size_mb = t.numel() * t.element_size() / (1024 * 1024)
|
|
64
|
+
if size_mb > 100:
|
|
65
|
+
logger.warning(f"Cloning large CUDA tensor ({size_mb:.1f}MB) for IPC")
|
|
66
|
+
return t.clone()
|
|
67
|
+
raise
|
|
68
|
+
except ImportError: return t
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def prepare_for_ipc_recursive(obj: Any) -> Any:
|
|
72
|
+
"""Recursively prepare tensors for IPC and keep references."""
|
|
73
|
+
try:
|
|
74
|
+
import torch
|
|
75
|
+
if isinstance(obj, torch.Tensor):
|
|
76
|
+
prepared = prepare_tensor_for_ipc(obj)
|
|
77
|
+
keep_tensor(prepared)
|
|
78
|
+
return prepared
|
|
79
|
+
elif isinstance(obj, list): return [prepare_for_ipc_recursive(x) for x in obj]
|
|
80
|
+
elif isinstance(obj, tuple): return tuple(prepare_for_ipc_recursive(x) for x in obj)
|
|
81
|
+
elif isinstance(obj, dict): return {k: prepare_for_ipc_recursive(v) for k, v in obj.items()}
|
|
82
|
+
except ImportError: pass
|
|
83
|
+
return obj
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Workers - Process isolation implementations.
|
|
3
|
+
|
|
4
|
+
Provides multiprocessing and subprocess-based workers for isolated execution.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .base import Worker, WorkerError
|
|
8
|
+
from .mp import MPWorker
|
|
9
|
+
from .subprocess import SubprocessWorker
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"Worker",
|
|
13
|
+
"WorkerError",
|
|
14
|
+
"MPWorker",
|
|
15
|
+
"SubprocessWorker",
|
|
16
|
+
]
|
|
@@ -29,7 +29,7 @@ from queue import Empty as QueueEmpty
|
|
|
29
29
|
from typing import Any, Callable, Optional
|
|
30
30
|
|
|
31
31
|
from .base import Worker, WorkerError
|
|
32
|
-
from
|
|
32
|
+
from ..tensor_utils import prepare_for_ipc_recursive, keep_tensors_recursive
|
|
33
33
|
|
|
34
34
|
logger = logging.getLogger("comfy_env")
|
|
35
35
|
|
|
@@ -42,7 +42,7 @@ from pathlib import Path
|
|
|
42
42
|
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
|
43
43
|
|
|
44
44
|
from .base import Worker, WorkerError
|
|
45
|
-
from
|
|
45
|
+
from ...packages.pixi import get_pixi_path
|
|
46
46
|
|
|
47
47
|
# Debug logging (set COMFY_ENV_DEBUG=1 to enable)
|
|
48
48
|
_DEBUG = os.environ.get("COMFY_ENV_DEBUG", "").lower() in ("1", "true", "yes")
|