comfy-env 0.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
comfy_env/decorator.py ADDED
@@ -0,0 +1,422 @@
1
+ """
2
+ Decorator-based API for easy subprocess isolation.
3
+
4
+ This module provides the @isolated decorator that makes it simple to run
5
+ ComfyUI node methods in isolated subprocess environments.
6
+
7
+ Architecture:
8
+ The decorator wraps the node's FUNCTION method. When called in the HOST
9
+ process, it forwards the call to an isolated worker (TorchMPWorker for
10
+ same-venv, PersistentVenvWorker for different venv).
11
+
12
+ When imported in the WORKER subprocess (COMFYUI_ISOLATION_WORKER=1),
13
+ the decorator is a transparent no-op.
14
+
15
+ Example:
16
+ from comfy_env import isolated
17
+
18
+ @isolated(env="myenv")
19
+ class MyNode:
20
+ FUNCTION = "process"
21
+ RETURN_TYPES = ("IMAGE",)
22
+
23
+ def process(self, image):
24
+ # This code runs in isolated subprocess
25
+ import heavy_package
26
+ return (heavy_package.run(image),)
27
+
28
+ Implementation:
29
+ This decorator is thin sugar over the workers module. Internally it uses:
30
+ - TorchMPWorker: Same Python, zero-copy tensor transfer via torch.mp.Queue
31
+ - PersistentVenvWorker: Different venv, tensor transfer via torch.save/load
32
+ """
33
+
34
+ import os
35
+ import sys
36
+ import atexit
37
+ import inspect
38
+ import logging
39
+ import threading
40
+ import time
41
+ from dataclasses import dataclass
42
+ from functools import wraps
43
+ from pathlib import Path
44
+ from typing import Any, Callable, Dict, List, Optional, Union
45
+
46
+ logger = logging.getLogger("comfy_env")
47
+
48
+ # Enable verbose logging by default (can be disabled)
49
+ VERBOSE_LOGGING = os.environ.get("COMFYUI_ISOLATION_QUIET", "0") != "1"
50
+
51
+
52
+ def _log(env_name: str, msg: str):
53
+ """Log with environment prefix."""
54
+ if VERBOSE_LOGGING:
55
+ print(f"[{env_name}] {msg}")
56
+
57
+
58
+ def _is_worker_mode() -> bool:
59
+ """Check if we're running inside the worker subprocess."""
60
+ return os.environ.get("COMFYUI_ISOLATION_WORKER") == "1"
61
+
62
+
63
+ def _describe_tensor(t) -> str:
64
+ """Get human-readable tensor description."""
65
+ try:
66
+ import torch
67
+ if isinstance(t, torch.Tensor):
68
+ size_mb = t.numel() * t.element_size() / (1024 * 1024)
69
+ return f"Tensor({list(t.shape)}, {t.dtype}, {t.device}, {size_mb:.1f}MB)"
70
+ except:
71
+ pass
72
+ return str(type(t).__name__)
73
+
74
+
75
+ def _describe_args(args: dict) -> str:
76
+ """Describe arguments for logging."""
77
+ parts = []
78
+ for k, v in args.items():
79
+ parts.append(f"{k}={_describe_tensor(v)}")
80
+ return ", ".join(parts) if parts else "(no args)"
81
+
82
+
83
+ def _clone_tensor_if_needed(obj: Any, smart_clone: bool = True) -> Any:
84
+ """
85
+ Defensively clone tensors to prevent mutation/re-share bugs.
86
+
87
+ This handles:
88
+ 1. Input tensors that might be mutated in worker
89
+ 2. Output tensors received via IPC that can't be re-shared
90
+
91
+ Args:
92
+ obj: Object to process (tensor or nested structure)
93
+ smart_clone: If True, use smart CUDA IPC detection (only clone
94
+ when necessary). If False, always clone.
95
+ """
96
+ if smart_clone:
97
+ # Use smart detection - only clones CUDA tensors that can't be re-shared
98
+ from .workers.tensor_utils import prepare_for_ipc_recursive
99
+ return prepare_for_ipc_recursive(obj)
100
+
101
+ # Fallback: always clone (original behavior)
102
+ try:
103
+ import torch
104
+ if isinstance(obj, torch.Tensor):
105
+ return obj.clone()
106
+ elif isinstance(obj, (list, tuple)):
107
+ cloned = [_clone_tensor_if_needed(x, smart_clone=False) for x in obj]
108
+ return type(obj)(cloned)
109
+ elif isinstance(obj, dict):
110
+ return {k: _clone_tensor_if_needed(v, smart_clone=False) for k, v in obj.items()}
111
+ except ImportError:
112
+ pass
113
+ return obj
114
+
115
+
116
+ # ---------------------------------------------------------------------------
117
+ # Worker Management
118
+ # ---------------------------------------------------------------------------
119
+
120
+ @dataclass
121
+ class WorkerConfig:
122
+ """Configuration for an isolated worker."""
123
+ env_name: str
124
+ python: Optional[str] = None # None = same Python (TorchMPWorker)
125
+ working_dir: Optional[Path] = None
126
+ sys_path: Optional[List[str]] = None
127
+ timeout: float = 600.0
128
+
129
+
130
+ # Global worker cache
131
+ _workers: Dict[str, Any] = {}
132
+ _workers_lock = threading.Lock()
133
+
134
+
135
+ def _get_or_create_worker(config: WorkerConfig, log_fn: Callable):
136
+ """Get or create a worker for the given configuration.
137
+
138
+ Thread-safe: worker creation happens inside the lock to prevent
139
+ race conditions where multiple threads create duplicate workers.
140
+ """
141
+ cache_key = f"{config.env_name}:{config.python or 'same'}"
142
+
143
+ with _workers_lock:
144
+ if cache_key in _workers:
145
+ worker = _workers[cache_key]
146
+ if worker.is_alive():
147
+ return worker
148
+ # Worker died, recreate
149
+ log_fn(f"Worker died, recreating...")
150
+
151
+ # Create new worker INSIDE the lock (fixes race condition)
152
+ if config.python is None:
153
+ # Same Python - use TorchMPWorker (fast, zero-copy)
154
+ from .workers import TorchMPWorker
155
+ log_fn(f"Creating TorchMPWorker (same Python, zero-copy tensors)")
156
+ worker = TorchMPWorker(name=config.env_name)
157
+ else:
158
+ # Different Python - use PersistentVenvWorker
159
+ from .workers.venv import PersistentVenvWorker
160
+ log_fn(f"Creating PersistentVenvWorker (python={config.python})")
161
+ worker = PersistentVenvWorker(
162
+ python=config.python,
163
+ working_dir=config.working_dir,
164
+ sys_path=config.sys_path,
165
+ name=config.env_name,
166
+ )
167
+
168
+ _workers[cache_key] = worker
169
+ return worker
170
+
171
+
172
+ def shutdown_all_processes():
173
+ """Shutdown all cached workers. Called at exit."""
174
+ with _workers_lock:
175
+ for name, worker in _workers.items():
176
+ try:
177
+ worker.shutdown()
178
+ except Exception as e:
179
+ logger.debug(f"Error shutting down {name}: {e}")
180
+ _workers.clear()
181
+
182
+
183
+ atexit.register(shutdown_all_processes)
184
+
185
+
186
+ # ---------------------------------------------------------------------------
187
+ # The @isolated Decorator
188
+ # ---------------------------------------------------------------------------
189
+
190
+ def isolated(
191
+ env: str,
192
+ requirements: Optional[List[str]] = None,
193
+ config: Optional[str] = None,
194
+ python: Optional[str] = None,
195
+ cuda: Optional[str] = "auto",
196
+ timeout: float = 600.0,
197
+ log_callback: Optional[Callable[[str], None]] = None,
198
+ import_paths: Optional[List[str]] = None,
199
+ clone_tensors: bool = True,
200
+ same_venv: bool = False,
201
+ ):
202
+ """
203
+ Class decorator that runs node methods in isolated subprocess.
204
+
205
+ The decorated class's FUNCTION method will be executed in an isolated
206
+ Python environment. Tensors are transferred efficiently via PyTorch's
207
+ native IPC mechanisms (CUDA IPC for GPU, shared memory for CPU).
208
+
209
+ By default, auto-discovers config file (comfy_env_reqs.toml) and
210
+ uses full venv isolation with PersistentVenvWorker. Use same_venv=True
211
+ for lightweight same-venv isolation with TorchMPWorker.
212
+
213
+ Args:
214
+ env: Name of the isolated environment (used for logging/caching)
215
+ requirements: [DEPRECATED] Use config file instead
216
+ config: Path to TOML config file. If None, auto-discovers in node directory.
217
+ python: Path to Python executable (overrides config-based detection)
218
+ cuda: [DEPRECATED] Detected automatically
219
+ timeout: Timeout for calls in seconds (default: 10 minutes)
220
+ log_callback: Optional callback for logging
221
+ import_paths: Paths to add to sys.path in worker
222
+ clone_tensors: Clone tensors at boundary to prevent mutation bugs (default: True)
223
+ same_venv: If True, use TorchMPWorker (same venv, just process isolation).
224
+ If False (default), use full venv isolation with auto-discovered config.
225
+
226
+ Example:
227
+ # Full venv isolation (default) - auto-discovers comfy_env_reqs.toml
228
+ @isolated(env="sam3d")
229
+ class MyNode:
230
+ FUNCTION = "process"
231
+
232
+ def process(self, image):
233
+ import heavy_lib
234
+ return heavy_lib.run(image)
235
+
236
+ # Lightweight same-venv isolation (opt-in)
237
+ @isolated(env="sam3d", same_venv=True)
238
+ class MyLightNode:
239
+ FUNCTION = "process"
240
+ ...
241
+ """
242
+ def decorator(cls):
243
+ # In worker mode, decorator is a no-op
244
+ if _is_worker_mode():
245
+ return cls
246
+
247
+ # --- HOST MODE: Wrap the FUNCTION method ---
248
+
249
+ func_name = getattr(cls, 'FUNCTION', None)
250
+ if not func_name:
251
+ raise ValueError(
252
+ f"Node class {cls.__name__} must have FUNCTION attribute."
253
+ )
254
+
255
+ original_method = getattr(cls, func_name, None)
256
+ if original_method is None:
257
+ raise ValueError(
258
+ f"Node class {cls.__name__} has FUNCTION='{func_name}' but "
259
+ f"no method with that name."
260
+ )
261
+
262
+ # Get source file info for sys.path setup
263
+ source_file = Path(inspect.getfile(cls))
264
+ node_dir = source_file.parent
265
+ if node_dir.name == "nodes":
266
+ node_package_dir = node_dir.parent
267
+ else:
268
+ node_package_dir = node_dir
269
+
270
+ # Build sys.path for worker
271
+ sys_path_additions = [str(node_dir)]
272
+ if import_paths:
273
+ for p in import_paths:
274
+ full_path = node_dir / p
275
+ sys_path_additions.append(str(full_path.resolve()))
276
+
277
+ # Resolve python path for venv isolation
278
+ resolved_python = python
279
+ env_config = None
280
+
281
+ # If same_venv=True, skip venv isolation entirely
282
+ if same_venv:
283
+ _log(env, "Using same-venv isolation (TorchMPWorker)")
284
+ resolved_python = None
285
+
286
+ # Otherwise, try to get a venv python path
287
+ elif python:
288
+ # Explicit python path provided
289
+ resolved_python = python
290
+
291
+ else:
292
+ # Auto-discover or use explicit config
293
+ if config:
294
+ # Explicit config file specified
295
+ config_file = node_package_dir / config
296
+ if config_file.exists():
297
+ from .env.config_file import load_env_from_file
298
+ env_config = load_env_from_file(config_file, node_package_dir)
299
+ else:
300
+ _log(env, f"Warning: Config file not found: {config_file}")
301
+ else:
302
+ # Auto-discover config file - try v2 API first
303
+ from .env.config_file import discover_config, discover_env_config
304
+ v2_config = discover_config(node_package_dir)
305
+ if v2_config and env in v2_config.envs:
306
+ # v2 schema: get the named environment
307
+ env_config = v2_config.envs[env]
308
+ _log(env, f"Auto-discovered v2 config: {env_config.name}")
309
+ else:
310
+ # Fall back to v1 API
311
+ env_config = discover_env_config(node_package_dir)
312
+ if env_config:
313
+ _log(env, f"Auto-discovered config: {env_config.name}")
314
+
315
+ # If we have a config, set up the venv
316
+ if env_config:
317
+ from .env.manager import IsolatedEnvManager
318
+ manager = IsolatedEnvManager(base_dir=node_package_dir)
319
+
320
+ if not manager.is_ready(env_config):
321
+ _log(env, f"Setting up isolated environment...")
322
+ manager.setup(env_config)
323
+
324
+ resolved_python = str(manager.get_python(env_config))
325
+ else:
326
+ # No config found - fall back to same-venv isolation
327
+ _log(env, "No config found, using same-venv isolation (TorchMPWorker)")
328
+ resolved_python = None
329
+
330
+ # Create worker config
331
+ worker_config = WorkerConfig(
332
+ env_name=env,
333
+ python=resolved_python,
334
+ working_dir=node_dir,
335
+ sys_path=sys_path_additions,
336
+ timeout=timeout,
337
+ )
338
+
339
+ # Setup logging
340
+ log_fn = log_callback or (lambda msg: _log(env, msg))
341
+
342
+ # Create the proxy method
343
+ @wraps(original_method)
344
+ def proxy(self, *args, **kwargs):
345
+ # Get or create worker
346
+ worker = _get_or_create_worker(worker_config, log_fn)
347
+
348
+ # Bind arguments to get kwargs dict
349
+ sig = inspect.signature(original_method)
350
+ try:
351
+ bound = sig.bind(self, *args, **kwargs)
352
+ bound.apply_defaults()
353
+ call_kwargs = dict(bound.arguments)
354
+ del call_kwargs['self']
355
+ except TypeError:
356
+ call_kwargs = kwargs
357
+
358
+ # Log entry with argument descriptions
359
+ if VERBOSE_LOGGING:
360
+ log_fn(f"→ {cls.__name__}.{func_name}({_describe_args(call_kwargs)})")
361
+
362
+ start_time = time.time()
363
+
364
+ try:
365
+ # Clone tensors defensively if enabled
366
+ if clone_tensors:
367
+ call_kwargs = {k: _clone_tensor_if_needed(v) for k, v in call_kwargs.items()}
368
+
369
+ # Get module name for import in worker
370
+ module_name = cls.__module__
371
+
372
+ # Call worker using appropriate method
373
+ if worker_config.python is None:
374
+ # TorchMPWorker - use call_method protocol (avoids pickle issues)
375
+ result = worker.call_method(
376
+ module_name=module_name,
377
+ class_name=cls.__name__,
378
+ method_name=func_name,
379
+ self_state=self.__dict__.copy(),
380
+ kwargs=call_kwargs,
381
+ timeout=timeout,
382
+ )
383
+ else:
384
+ # PersistentVenvWorker - call by module/class/method path
385
+ result = worker.call_method(
386
+ module_name=source_file.stem,
387
+ class_name=cls.__name__,
388
+ method_name=func_name,
389
+ self_state=self.__dict__.copy() if hasattr(self, '__dict__') else None,
390
+ kwargs=call_kwargs,
391
+ timeout=timeout,
392
+ )
393
+
394
+ # Clone result tensors defensively
395
+ if clone_tensors:
396
+ result = _clone_tensor_if_needed(result)
397
+
398
+ elapsed = time.time() - start_time
399
+ if VERBOSE_LOGGING:
400
+ result_desc = _describe_tensor(result) if not isinstance(result, tuple) else f"tuple({len(result)} items)"
401
+ log_fn(f"← {cls.__name__}.{func_name} returned {result_desc} [{elapsed:.2f}s]")
402
+
403
+ return result
404
+
405
+ except Exception as e:
406
+ elapsed = time.time() - start_time
407
+ log_fn(f"✗ {cls.__name__}.{func_name} failed after {elapsed:.2f}s: {e}")
408
+ raise
409
+
410
+ # Store original method before replacing (for worker to access)
411
+ cls._isolated_original_method = original_method
412
+
413
+ # Replace method with proxy
414
+ setattr(cls, func_name, proxy)
415
+
416
+ # Store metadata
417
+ cls._isolated_env = env
418
+ cls._isolated_node_dir = node_dir
419
+
420
+ return cls
421
+
422
+ return decorator
@@ -0,0 +1,30 @@
1
+ """Environment management for comfyui-isolation."""
2
+
3
+ from .config import IsolatedEnv
4
+ from .manager import IsolatedEnvManager
5
+ from .detection import detect_cuda_version, detect_gpu_info, get_gpu_summary
6
+ from .platform import get_platform, PlatformProvider, PlatformPaths
7
+ from .security import (
8
+ normalize_env_name,
9
+ validate_dependency,
10
+ validate_dependencies,
11
+ validate_path_within_root,
12
+ validate_wheel_url,
13
+ )
14
+
15
+ __all__ = [
16
+ "IsolatedEnv",
17
+ "IsolatedEnvManager",
18
+ "detect_cuda_version",
19
+ "detect_gpu_info",
20
+ "get_gpu_summary",
21
+ "get_platform",
22
+ "PlatformProvider",
23
+ "PlatformPaths",
24
+ # Security
25
+ "normalize_env_name",
26
+ "validate_dependency",
27
+ "validate_dependencies",
28
+ "validate_path_within_root",
29
+ "validate_wheel_url",
30
+ ]
@@ -0,0 +1,144 @@
1
+ """Configuration for isolated environments."""
2
+
3
+ from dataclasses import dataclass, field
4
+ from pathlib import Path
5
+ from typing import Dict, List, Optional
6
+
7
+
8
+ @dataclass
9
+ class LocalConfig:
10
+ """Configuration for local (host environment) installs.
11
+
12
+ These packages are installed into ComfyUI's main environment,
13
+ not into an isolated venv.
14
+ """
15
+ cuda_packages: Dict[str, str] = field(default_factory=dict) # package -> version
16
+ requirements: List[str] = field(default_factory=list)
17
+
18
+
19
+ @dataclass
20
+ class NodeReq:
21
+ """A node dependency (another ComfyUI node pack)."""
22
+ name: str
23
+ repo: str # GitHub repo path, e.g., "Kosinkadink/ComfyUI-VideoHelperSuite"
24
+
25
+
26
+ @dataclass
27
+ class EnvManagerConfig:
28
+ """
29
+ Full configuration parsed from comfyui_env.toml.
30
+
31
+ Supports the v2 schema:
32
+ [local.cuda] - CUDA packages for host environment
33
+ [local.packages] - Regular packages for host environment
34
+ [envname] - Isolated env definition
35
+ [envname.cuda] - CUDA packages for isolated env
36
+ [envname.packages] - Regular packages for isolated env
37
+ [node_reqs] - Node dependencies
38
+ """
39
+ local: LocalConfig = field(default_factory=LocalConfig)
40
+ envs: Dict[str, "IsolatedEnv"] = field(default_factory=dict)
41
+ node_reqs: List[NodeReq] = field(default_factory=list)
42
+
43
+ @property
44
+ def has_local(self) -> bool:
45
+ """Check if there are local packages to install."""
46
+ return bool(self.local.cuda_packages or self.local.requirements)
47
+
48
+ @property
49
+ def has_envs(self) -> bool:
50
+ """Check if there are isolated environments defined."""
51
+ return bool(self.envs)
52
+
53
+ def get_env(self, name: str) -> Optional["IsolatedEnv"]:
54
+ """Get an isolated environment by name."""
55
+ return self.envs.get(name)
56
+
57
+ @property
58
+ def default_env(self) -> Optional["IsolatedEnv"]:
59
+ """Get the first/only isolated environment, or None."""
60
+ if self.envs:
61
+ return next(iter(self.envs.values()))
62
+ return None
63
+
64
+
65
+ @dataclass
66
+ class IsolatedEnv:
67
+ """
68
+ Configuration for an isolated Python environment.
69
+
70
+ This defines what Python version, CUDA version, and dependencies
71
+ should be installed in the isolated environment.
72
+
73
+ Args:
74
+ name: Unique name for this environment (used for caching)
75
+ python: Python version (e.g., "3.10", "3.11")
76
+ cuda: CUDA version (e.g., "12.4", "12.8") or None for CPU-only
77
+ requirements: List of pip requirements (e.g., ["torch==2.8.0", "numpy"])
78
+ requirements_file: Path to requirements.txt file
79
+ wheel_sources: List of URLs for --find-links (custom wheel repos)
80
+ index_urls: List of URLs for --extra-index-url
81
+ env_dir: Custom directory for the venv (default: auto-generated)
82
+ pytorch_version: Specific PyTorch version (auto-detected if None)
83
+ worker_package: Worker package directory (e.g., "worker" -> worker/__main__.py)
84
+ worker_script: Worker script file (e.g., "worker.py")
85
+
86
+ Example:
87
+ env = IsolatedEnv(
88
+ name="my-node",
89
+ python="3.10",
90
+ cuda="12.8",
91
+ requirements=["torch==2.8.0", "nvdiffrast"],
92
+ wheel_sources=["https://my-wheels.github.io/"],
93
+ )
94
+ """
95
+
96
+ name: str
97
+ python: str = "3.10"
98
+ cuda: Optional[str] = None
99
+ requirements: list[str] = field(default_factory=list)
100
+ no_deps_requirements: list[str] = field(default_factory=list) # Install with --no-deps
101
+ requirements_file: Optional[Path] = None
102
+ wheel_sources: list[str] = field(default_factory=list)
103
+ index_urls: list[str] = field(default_factory=list)
104
+ env_dir: Optional[Path] = None
105
+ pytorch_version: Optional[str] = None
106
+ # Worker configuration
107
+ worker_package: Optional[str] = None # e.g., "worker" -> worker/__main__.py
108
+ worker_script: Optional[str] = None # e.g., "worker.py" -> worker.py
109
+
110
+ def __post_init__(self):
111
+ """Validate and normalize configuration."""
112
+ # Normalize paths
113
+ if self.requirements_file is not None:
114
+ self.requirements_file = Path(self.requirements_file)
115
+ if self.env_dir is not None:
116
+ self.env_dir = Path(self.env_dir)
117
+
118
+ # Validate Python version
119
+ if not self.python.replace(".", "").isdigit():
120
+ raise ValueError(f"Invalid Python version: {self.python}")
121
+
122
+ # Validate CUDA version if specified
123
+ if self.cuda is not None:
124
+ cuda_clean = self.cuda.replace(".", "")
125
+ if not cuda_clean.isdigit():
126
+ raise ValueError(f"Invalid CUDA version: {self.cuda}")
127
+
128
+ @property
129
+ def cuda_short(self) -> Optional[str]:
130
+ """Get CUDA version without dots (e.g., '128' for '12.8')."""
131
+ if self.cuda is None:
132
+ return None
133
+ return self.cuda.replace(".", "")
134
+
135
+ @property
136
+ def python_short(self) -> str:
137
+ """Get Python version without dots (e.g., '310' for '3.10')."""
138
+ return self.python.replace(".", "")
139
+
140
+ def get_default_env_dir(self, base_dir: Path) -> Path:
141
+ """Get the default environment directory path."""
142
+ if self.env_dir is not None:
143
+ return self.env_dir
144
+ return base_dir / f"_env_{self.name}"