comfy-env 0.0.64__py3-none-any.whl → 0.0.66__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. comfy_env/__init__.py +70 -122
  2. comfy_env/cli.py +78 -7
  3. comfy_env/config/__init__.py +19 -0
  4. comfy_env/config/parser.py +151 -0
  5. comfy_env/config/types.py +64 -0
  6. comfy_env/install.py +83 -361
  7. comfy_env/isolation/__init__.py +9 -0
  8. comfy_env/isolation/wrap.py +351 -0
  9. comfy_env/nodes.py +2 -2
  10. comfy_env/pixi/__init__.py +48 -0
  11. comfy_env/pixi/core.py +356 -0
  12. comfy_env/{resolver.py → pixi/resolver.py} +1 -14
  13. comfy_env/prestartup.py +60 -0
  14. comfy_env/templates/comfy-env-instructions.txt +30 -87
  15. comfy_env/templates/comfy-env.toml +68 -136
  16. comfy_env/workers/__init__.py +21 -32
  17. comfy_env/workers/base.py +1 -1
  18. comfy_env/workers/{torch_mp.py → mp.py} +47 -14
  19. comfy_env/workers/{venv.py → subprocess.py} +405 -441
  20. {comfy_env-0.0.64.dist-info → comfy_env-0.0.66.dist-info}/METADATA +2 -1
  21. comfy_env-0.0.66.dist-info/RECORD +34 -0
  22. comfy_env/decorator.py +0 -700
  23. comfy_env/env/__init__.py +0 -47
  24. comfy_env/env/config.py +0 -201
  25. comfy_env/env/config_file.py +0 -740
  26. comfy_env/env/manager.py +0 -636
  27. comfy_env/env/security.py +0 -267
  28. comfy_env/ipc/__init__.py +0 -55
  29. comfy_env/ipc/bridge.py +0 -476
  30. comfy_env/ipc/protocol.py +0 -265
  31. comfy_env/ipc/tensor.py +0 -371
  32. comfy_env/ipc/torch_bridge.py +0 -401
  33. comfy_env/ipc/transport.py +0 -318
  34. comfy_env/ipc/worker.py +0 -221
  35. comfy_env/isolation.py +0 -310
  36. comfy_env/pixi.py +0 -760
  37. comfy_env/stub_imports.py +0 -270
  38. comfy_env/stubs/__init__.py +0 -1
  39. comfy_env/stubs/comfy/__init__.py +0 -6
  40. comfy_env/stubs/comfy/model_management.py +0 -58
  41. comfy_env/stubs/comfy/utils.py +0 -29
  42. comfy_env/stubs/folder_paths.py +0 -71
  43. comfy_env/workers/pool.py +0 -241
  44. comfy_env-0.0.64.dist-info/RECORD +0 -48
  45. /comfy_env/{env/cuda_gpu_detection.py → pixi/cuda_detection.py} +0 -0
  46. /comfy_env/{env → pixi}/platform/__init__.py +0 -0
  47. /comfy_env/{env → pixi}/platform/base.py +0 -0
  48. /comfy_env/{env → pixi}/platform/darwin.py +0 -0
  49. /comfy_env/{env → pixi}/platform/linux.py +0 -0
  50. /comfy_env/{env → pixi}/platform/windows.py +0 -0
  51. /comfy_env/{registry.py → pixi/registry.py} +0 -0
  52. /comfy_env/{wheel_sources.yml → pixi/wheel_sources.yml} +0 -0
  53. {comfy_env-0.0.64.dist-info → comfy_env-0.0.66.dist-info}/WHEEL +0 -0
  54. {comfy_env-0.0.64.dist-info → comfy_env-0.0.66.dist-info}/entry_points.txt +0 -0
  55. {comfy_env-0.0.64.dist-info → comfy_env-0.0.66.dist-info}/licenses/LICENSE +0 -0
@@ -1,318 +0,0 @@
1
- """
2
- Transport Layer - Pluggable IPC transports for host-worker communication.
3
-
4
- Supports:
5
- - QueueTransport: multiprocessing.Queue with zero-copy tensor support (recommended)
6
- - UnixSocketTransport: Unix Domain Sockets with length-prefixed JSON (Linux/macOS only)
7
- - StdioTransport: Legacy stdin/stdout JSON lines (fallback)
8
-
9
- The transport abstraction allows swapping IPC mechanisms without changing
10
- the serialization protocol.
11
- """
12
-
13
- import json
14
- import os
15
- import socket
16
- import struct
17
- import sys
18
- import threading
19
- from abc import ABC, abstractmethod
20
- from pathlib import Path
21
- from typing import Any, Optional, Protocol, Tuple, runtime_checkable
22
-
23
-
24
- @runtime_checkable
25
- class Transport(Protocol):
26
- """Protocol for IPC transport mechanisms."""
27
-
28
- def send(self, obj: Any) -> None:
29
- """Send a JSON-serializable object to the remote endpoint."""
30
- ...
31
-
32
- def recv(self) -> Any:
33
- """Receive a JSON object from the remote endpoint. Blocks until available."""
34
- ...
35
-
36
- def close(self) -> None:
37
- """Close the transport. Further send/recv calls may fail."""
38
- ...
39
-
40
-
41
- def create_queue_pair(share_torch: bool = True):
42
- """
43
- Create a pair of queues for bidirectional IPC.
44
-
45
- When share_torch=True, uses torch.multiprocessing for zero-copy tensor transfer.
46
- This works cross-platform (Windows, Linux, macOS).
47
-
48
- Args:
49
- share_torch: If True, use torch.multiprocessing for zero-copy tensors.
50
-
51
- Returns:
52
- Tuple of (to_worker_queue, from_worker_queue, multiprocessing_module)
53
- """
54
- if share_torch:
55
- try:
56
- import torch.multiprocessing as mp
57
- except ImportError:
58
- import multiprocessing as mp
59
- else:
60
- import multiprocessing as mp
61
-
62
- # Ensure spawn method for proper isolation
63
- start_method = mp.get_start_method(allow_none=True)
64
- if start_method is None:
65
- mp.set_start_method("spawn")
66
-
67
- to_worker = mp.Queue()
68
- from_worker = mp.Queue()
69
- return to_worker, from_worker, mp
70
-
71
-
72
- class QueueTransport:
73
- """
74
- Cross-platform transport using multiprocessing.Queue.
75
-
76
- This is the recommended transport as it:
77
- - Works on Windows, Linux, and macOS (no AF_UNIX dependency)
78
- - Supports zero-copy tensor transfer when using torch.multiprocessing
79
- - Has simpler code than socket-based transports
80
- - Is battle-tested (used by pyisolate)
81
-
82
- When share_torch=True, PyTorch tensors are transferred via:
83
- - CPU tensors: shared memory (share_memory_())
84
- - GPU tensors: CUDA IPC handles (zero-copy)
85
- """
86
-
87
- def __init__(self, send_queue, recv_queue):
88
- """
89
- Initialize with send and receive queues.
90
-
91
- Args:
92
- send_queue: Queue for sending messages (to remote)
93
- recv_queue: Queue for receiving messages (from remote)
94
- """
95
- self._send = send_queue
96
- self._recv = recv_queue
97
-
98
- def send(self, obj: Any) -> None:
99
- """Send an object to the remote endpoint."""
100
- self._send.put(obj)
101
-
102
- def recv(self, timeout: Optional[float] = None) -> Any:
103
- """
104
- Receive an object from the remote endpoint.
105
-
106
- Args:
107
- timeout: Timeout in seconds (None = block forever)
108
-
109
- Returns:
110
- Received object
111
-
112
- Raises:
113
- Empty: If timeout expires with no message
114
- """
115
- import queue
116
- try:
117
- return self._recv.get(timeout=timeout)
118
- except queue.Empty:
119
- raise
120
-
121
- def close(self) -> None:
122
- """Close the transport (no-op for queues, cleanup handled by creator)."""
123
- pass
124
-
125
-
126
- class UnixSocketTransport:
127
- """
128
- Transport using Unix Domain Sockets with length-prefixed JSON messages.
129
-
130
- This is the recommended transport as it:
131
- - Doesn't interfere with stdout/stderr (no C library output issues)
132
- - Supports binary-safe length-prefixed framing
133
- - Is more efficient than line-based JSON
134
-
135
- Message format: [4-byte big-endian length][JSON payload]
136
- """
137
-
138
- def __init__(self, sock: socket.socket):
139
- """
140
- Initialize with an already-connected socket.
141
-
142
- Args:
143
- sock: Connected Unix domain socket
144
- """
145
- self._sock = sock
146
- self._send_lock = threading.Lock()
147
- self._recv_lock = threading.Lock()
148
-
149
- @classmethod
150
- def create_server(cls, sock_path: str) -> tuple[socket.socket, "UnixSocketTransport"]:
151
- """
152
- Create a server socket, wait for one connection, return transport.
153
-
154
- Args:
155
- sock_path: Path for the Unix domain socket
156
-
157
- Returns:
158
- Tuple of (server_socket, transport) - caller should close server_socket
159
- """
160
- # Remove existing socket file if present
161
- if os.path.exists(sock_path):
162
- os.unlink(sock_path)
163
-
164
- server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
165
- server.bind(sock_path)
166
- server.listen(1)
167
-
168
- conn, _ = server.accept()
169
- return server, cls(conn)
170
-
171
- @classmethod
172
- def connect(cls, sock_path: str, timeout: float = 30.0) -> "UnixSocketTransport":
173
- """
174
- Connect to an existing Unix domain socket.
175
-
176
- Args:
177
- sock_path: Path to the Unix domain socket
178
- timeout: Connection timeout in seconds
179
-
180
- Returns:
181
- Connected transport
182
- """
183
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
184
- sock.settimeout(timeout)
185
- sock.connect(sock_path)
186
- sock.settimeout(None) # Back to blocking mode
187
- return cls(sock)
188
-
189
- def send(self, obj: Any) -> None:
190
- """Send a JSON-serializable object with length prefix."""
191
- data = json.dumps(obj).encode('utf-8')
192
- header = struct.pack('>I', len(data))
193
-
194
- with self._send_lock:
195
- self._sock.sendall(header + data)
196
-
197
- def recv(self) -> Any:
198
- """Receive a length-prefixed JSON message."""
199
- with self._recv_lock:
200
- # Read 4-byte length header
201
- header = self._recvall(4)
202
- if not header or len(header) < 4:
203
- raise ConnectionError("Socket closed or incomplete header")
204
-
205
- msg_len = struct.unpack('>I', header)[0]
206
-
207
- # Sanity check - 100MB limit
208
- if msg_len > 100 * 1024 * 1024:
209
- raise ValueError(f"Message too large: {msg_len} bytes")
210
-
211
- # Read payload
212
- data = self._recvall(msg_len)
213
- if len(data) < msg_len:
214
- raise ConnectionError(f"Incomplete message: {len(data)}/{msg_len} bytes")
215
-
216
- return json.loads(data.decode('utf-8'))
217
-
218
- def _recvall(self, n: int) -> bytes:
219
- """Receive exactly n bytes from the socket."""
220
- chunks = []
221
- remaining = n
222
- while remaining > 0:
223
- chunk = self._sock.recv(min(remaining, 65536))
224
- if not chunk:
225
- break
226
- chunks.append(chunk)
227
- remaining -= len(chunk)
228
- return b''.join(chunks)
229
-
230
- def close(self) -> None:
231
- """Close the socket."""
232
- try:
233
- self._sock.close()
234
- except Exception:
235
- pass
236
-
237
- def fileno(self) -> int:
238
- """Return socket file descriptor for select()."""
239
- return self._sock.fileno()
240
-
241
-
242
- class StdioTransport:
243
- """
244
- Legacy transport using stdin/stdout with JSON lines.
245
-
246
- This transport has issues with C libraries that print to stdout,
247
- which is why UnixSocketTransport is preferred. Kept for compatibility.
248
-
249
- Note: Requires fd-level redirection during method execution to prevent
250
- C library output from corrupting the JSON stream.
251
- """
252
-
253
- def __init__(
254
- self,
255
- stdin: Any = None,
256
- stdout: Any = None,
257
- ):
258
- """
259
- Initialize with file handles.
260
-
261
- Args:
262
- stdin: Input stream (default: sys.stdin)
263
- stdout: Output stream (default: sys.stdout)
264
- """
265
- self._stdin = stdin or sys.stdin
266
- self._stdout = stdout or sys.stdout
267
- self._send_lock = threading.Lock()
268
-
269
- def send(self, obj: Any) -> None:
270
- """Send a JSON object as a single line."""
271
- with self._send_lock:
272
- line = json.dumps(obj) + '\n'
273
- self._stdout.write(line)
274
- self._stdout.flush()
275
-
276
- def recv(self) -> Any:
277
- """Receive a JSON object from a single line."""
278
- line = self._stdin.readline()
279
- if not line:
280
- raise ConnectionError("stdin closed")
281
- return json.loads(line.strip())
282
-
283
- def close(self) -> None:
284
- """No-op for stdio transport."""
285
- pass
286
-
287
-
288
- def get_socket_path(env_name: str, pid: Optional[int] = None) -> str:
289
- """
290
- Generate a unique socket path for an isolated environment.
291
-
292
- Args:
293
- env_name: Name of the isolated environment
294
- pid: Process ID (default: current process)
295
-
296
- Returns:
297
- Path string for Unix domain socket
298
- """
299
- if pid is None:
300
- pid = os.getpid()
301
-
302
- # Use /tmp on Linux, or a temp directory on other platforms
303
- if sys.platform == 'linux':
304
- base = '/tmp'
305
- else:
306
- import tempfile
307
- base = tempfile.gettempdir()
308
-
309
- return str(Path(base) / f"comfyui-isolation-{env_name}-{pid}.sock")
310
-
311
-
312
- def cleanup_socket(sock_path: str) -> None:
313
- """Remove a socket file if it exists."""
314
- try:
315
- if os.path.exists(sock_path):
316
- os.unlink(sock_path)
317
- except OSError:
318
- pass
comfy_env/ipc/worker.py DELETED
@@ -1,221 +0,0 @@
1
- """
2
- BaseWorker - Base class for worker scripts that run in isolated environments.
3
-
4
- Node developers extend this class to define their worker's functionality.
5
- """
6
-
7
- import sys
8
- import json
9
- import traceback
10
- from typing import Any, Callable, Dict, Optional
11
- from functools import wraps
12
-
13
- from .protocol import encode_object, decode_object
14
-
15
-
16
- # Global registry of methods
17
- _method_registry: Dict[str, Callable] = {}
18
-
19
-
20
- def register(name: Optional[str] = None):
21
- """
22
- Decorator to register a method as callable from the bridge.
23
-
24
- Args:
25
- name: Optional method name (defaults to function name)
26
-
27
- Example:
28
- class MyWorker(BaseWorker):
29
- @register("process")
30
- def process_image(self, image):
31
- return processed_image
32
-
33
- @register() # Uses function name "do_something"
34
- def do_something(self, x):
35
- return x * 2
36
- """
37
- def decorator(func: Callable) -> Callable:
38
- method_name = name if name else func.__name__
39
- _method_registry[method_name] = func
40
-
41
- @wraps(func)
42
- def wrapper(*args, **kwargs):
43
- return func(*args, **kwargs)
44
- return wrapper
45
-
46
- return decorator
47
-
48
-
49
- class BaseWorker:
50
- """
51
- Base class for isolated worker processes.
52
-
53
- Subclass this to create a worker that handles requests from WorkerBridge.
54
-
55
- The worker runs a main loop that:
56
- 1. Reads JSON requests from stdin
57
- 2. Dispatches to registered methods
58
- 3. Writes JSON responses to stdout
59
-
60
- Example:
61
- class MyWorker(BaseWorker):
62
- def setup(self):
63
- # Called once when worker starts
64
- import torch
65
- self.model = load_my_model()
66
-
67
- @register("inference")
68
- def run_inference(self, image, params):
69
- return self.model(image, **params)
70
-
71
- if __name__ == "__main__":
72
- MyWorker().run()
73
- """
74
-
75
- def __init__(self):
76
- """Initialize worker."""
77
- self._methods: Dict[str, Callable] = {}
78
- self._running = True
79
-
80
- # Register decorated methods
81
- for method_name, func in _method_registry.items():
82
- # Bind method to this instance
83
- self._methods[method_name] = lambda *args, f=func, **kwargs: f(self, *args, **kwargs)
84
-
85
- # Also check instance methods decorated with @register
86
- for name in dir(self):
87
- method = getattr(self, name)
88
- if hasattr(method, '__wrapped__') and name in _method_registry:
89
- self._methods[name] = method
90
-
91
- def setup(self) -> None:
92
- """
93
- Called once when the worker starts, before processing any requests.
94
-
95
- Override this to load models, initialize state, etc.
96
- """
97
- pass
98
-
99
- def teardown(self) -> None:
100
- """
101
- Called when the worker is shutting down.
102
-
103
- Override this to cleanup resources.
104
- """
105
- pass
106
-
107
- def log(self, message: str) -> None:
108
- """
109
- Log a message to stderr (visible in main process).
110
-
111
- Args:
112
- message: Message to log
113
- """
114
- print(f"[Worker] {message}", file=sys.stderr, flush=True)
115
-
116
- def handle_request(self, request: Dict[str, Any]) -> Dict[str, Any]:
117
- """
118
- Handle a single request.
119
-
120
- Args:
121
- request: Request dict with 'method' and 'args' keys
122
-
123
- Returns:
124
- Response dict with 'result' or 'error' key
125
- """
126
- method_name = request.get("method")
127
- args = request.get("args", {})
128
- request_id = request.get("id", "unknown")
129
-
130
- # Handle built-in commands
131
- if method_name == "ping":
132
- return {"id": request_id, "result": "pong"}
133
-
134
- if method_name == "shutdown":
135
- self._running = False
136
- return {"id": request_id, "result": "shutting_down"}
137
-
138
- if method_name == "list_methods":
139
- return {"id": request_id, "result": list(self._methods.keys())}
140
-
141
- # Find and call registered method
142
- if method_name not in self._methods:
143
- return {
144
- "id": request_id,
145
- "error": f"Unknown method: {method_name}",
146
- "traceback": f"Available methods: {list(self._methods.keys())}",
147
- }
148
-
149
- try:
150
- # Decode any encoded objects in args
151
- decoded_args = decode_object(args)
152
-
153
- # Call method
154
- result = self._methods[method_name](**decoded_args)
155
-
156
- # Encode result for JSON
157
- encoded_result = encode_object(result)
158
-
159
- return {"id": request_id, "result": encoded_result}
160
-
161
- except Exception as e:
162
- return {
163
- "id": request_id,
164
- "error": str(e),
165
- "traceback": traceback.format_exc(),
166
- }
167
-
168
- def run(self) -> None:
169
- """
170
- Main worker loop - reads from stdin, writes to stdout.
171
-
172
- This method blocks until shutdown is requested.
173
- """
174
- # Suppress library output that could interfere with JSON protocol
175
- import warnings
176
- import logging
177
- import os
178
-
179
- warnings.filterwarnings("ignore")
180
- os.environ.setdefault('TF_CPP_MIN_LOG_LEVEL', '3')
181
- logging.disable(logging.WARNING)
182
-
183
- self.log("Worker starting...")
184
-
185
- # Run setup
186
- try:
187
- self.setup()
188
- self.log("Setup complete, ready for requests")
189
- except Exception as e:
190
- self.log(f"Setup failed: {e}")
191
- traceback.print_exc(file=sys.stderr)
192
- sys.exit(1)
193
-
194
- # Main loop
195
- try:
196
- for line in sys.stdin:
197
- line = line.strip()
198
- if not line:
199
- continue
200
-
201
- try:
202
- request = json.loads(line)
203
- response = self.handle_request(request)
204
- print(json.dumps(response), flush=True)
205
-
206
- if not self._running:
207
- break
208
-
209
- except json.JSONDecodeError as e:
210
- error_response = {
211
- "id": "unknown",
212
- "error": f"Invalid JSON: {e}",
213
- }
214
- print(json.dumps(error_response), flush=True)
215
-
216
- except KeyboardInterrupt:
217
- self.log("Interrupted")
218
- finally:
219
- self.log("Shutting down...")
220
- self.teardown()
221
- self.log("Goodbye")