comfy-env 0.1.14__py3-none-any.whl → 0.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. comfy_env/__init__.py +115 -62
  2. comfy_env/cli.py +89 -319
  3. comfy_env/config/__init__.py +18 -8
  4. comfy_env/config/parser.py +21 -122
  5. comfy_env/config/types.py +37 -70
  6. comfy_env/detection/__init__.py +77 -0
  7. comfy_env/detection/cuda.py +61 -0
  8. comfy_env/detection/gpu.py +230 -0
  9. comfy_env/detection/platform.py +70 -0
  10. comfy_env/detection/runtime.py +103 -0
  11. comfy_env/environment/__init__.py +53 -0
  12. comfy_env/environment/cache.py +141 -0
  13. comfy_env/environment/libomp.py +41 -0
  14. comfy_env/environment/paths.py +38 -0
  15. comfy_env/environment/setup.py +88 -0
  16. comfy_env/install.py +163 -249
  17. comfy_env/isolation/__init__.py +33 -2
  18. comfy_env/isolation/tensor_utils.py +83 -0
  19. comfy_env/isolation/workers/__init__.py +16 -0
  20. comfy_env/{workers → isolation/workers}/mp.py +1 -1
  21. comfy_env/{workers → isolation/workers}/subprocess.py +2 -2
  22. comfy_env/isolation/wrap.py +149 -409
  23. comfy_env/packages/__init__.py +60 -0
  24. comfy_env/packages/apt.py +36 -0
  25. comfy_env/packages/cuda_wheels.py +97 -0
  26. comfy_env/packages/node_dependencies.py +77 -0
  27. comfy_env/packages/pixi.py +85 -0
  28. comfy_env/packages/toml_generator.py +88 -0
  29. comfy_env-0.1.16.dist-info/METADATA +279 -0
  30. comfy_env-0.1.16.dist-info/RECORD +36 -0
  31. comfy_env/cache.py +0 -331
  32. comfy_env/errors.py +0 -293
  33. comfy_env/nodes.py +0 -187
  34. comfy_env/pixi/__init__.py +0 -48
  35. comfy_env/pixi/core.py +0 -588
  36. comfy_env/pixi/cuda_detection.py +0 -303
  37. comfy_env/pixi/platform/__init__.py +0 -21
  38. comfy_env/pixi/platform/base.py +0 -96
  39. comfy_env/pixi/platform/darwin.py +0 -53
  40. comfy_env/pixi/platform/linux.py +0 -68
  41. comfy_env/pixi/platform/windows.py +0 -284
  42. comfy_env/pixi/resolver.py +0 -198
  43. comfy_env/prestartup.py +0 -192
  44. comfy_env/workers/__init__.py +0 -38
  45. comfy_env/workers/tensor_utils.py +0 -188
  46. comfy_env-0.1.14.dist-info/METADATA +0 -291
  47. comfy_env-0.1.14.dist-info/RECORD +0 -33
  48. /comfy_env/{workers → isolation/workers}/base.py +0 -0
  49. {comfy_env-0.1.14.dist-info → comfy_env-0.1.16.dist-info}/WHEEL +0 -0
  50. {comfy_env-0.1.14.dist-info → comfy_env-0.1.16.dist-info}/entry_points.txt +0 -0
  51. {comfy_env-0.1.14.dist-info → comfy_env-0.1.16.dist-info}/licenses/LICENSE +0 -0
@@ -1,38 +0,0 @@
1
- """
2
- Workers - Process isolation for ComfyUI nodes.
3
-
4
- This module provides two isolation tiers:
5
-
6
- Tier 1: MPWorker (same Python, fresh CUDA context)
7
- - Uses multiprocessing.Queue
8
- - Zero-copy tensor transfer via shared memory
9
- - ~30ms overhead per call
10
- - Use for: Memory isolation, fresh CUDA context
11
-
12
- Tier 2: SubprocessWorker (different Python/venv)
13
- - Persistent subprocess + socket IPC
14
- - ~50-100ms overhead per call
15
- - Use for: Different PyTorch versions, incompatible deps
16
-
17
- Usage:
18
- from comfy_env.workers import MPWorker, SubprocessWorker
19
-
20
- # Create worker directly
21
- worker = MPWorker()
22
- result = worker.call(my_function, arg1, arg2)
23
-
24
- # Or use SubprocessWorker for isolated Python
25
- worker = SubprocessWorker(python="/path/to/venv/bin/python")
26
- result = worker.call(my_function, image=tensor)
27
- """
28
-
29
- from .base import Worker, WorkerError
30
- from .mp import MPWorker
31
- from .subprocess import SubprocessWorker
32
-
33
- __all__ = [
34
- "Worker",
35
- "WorkerError",
36
- "MPWorker",
37
- "SubprocessWorker",
38
- ]
@@ -1,188 +0,0 @@
1
- """
2
- Tensor utilities for robust IPC handling.
3
-
4
- Patterns borrowed from pyisolate (MIT licensed):
5
- - TensorKeeper: Prevents GC race conditions
6
- - CUDA IPC re-share detection: Graceful handling of received tensors
7
- """
8
-
9
- import collections
10
- import logging
11
- import threading
12
- import time
13
- from typing import Any
14
-
15
- logger = logging.getLogger("comfy_env")
16
-
17
-
18
- # ---------------------------------------------------------------------------
19
- # TensorKeeper - Prevents GC Race Conditions
20
- # ---------------------------------------------------------------------------
21
-
22
- class TensorKeeper:
23
- """
24
- Keeps strong references to tensors during IPC to prevent premature GC.
25
-
26
- Problem this solves:
27
- When a tensor is serialized for IPC, the serialization returns
28
- immediately but the receiving process may not have opened the
29
- shared memory yet. If the sending process's tensor gets garbage
30
- collected, the shared memory file is deleted, causing
31
- "No such file or directory" errors on the receiver.
32
-
33
- Solution:
34
- Keep strong references to tensors for a configurable window
35
- (default 30 seconds) to ensure the receiver has time to open them.
36
-
37
- Usage:
38
- keeper = TensorKeeper()
39
- keeper.keep(tensor) # Call before putting on queue
40
- """
41
-
42
- def __init__(self, retention_seconds: float = 30.0):
43
- """
44
- Args:
45
- retention_seconds: How long to keep tensor references.
46
- 30s is safe for slow systems.
47
- """
48
- self.retention_seconds = retention_seconds
49
- self._keeper: collections.deque = collections.deque()
50
- self._lock = threading.Lock()
51
-
52
- def keep(self, t: Any) -> None:
53
- """Keep a strong reference to tensor for retention_seconds."""
54
- # Only keep torch tensors
55
- try:
56
- import torch
57
- if not isinstance(t, torch.Tensor):
58
- return
59
- except ImportError:
60
- return
61
-
62
- now = time.time()
63
- with self._lock:
64
- self._keeper.append((now, t))
65
-
66
- # Cleanup old entries
67
- while self._keeper:
68
- timestamp, _ = self._keeper[0]
69
- if now - timestamp > self.retention_seconds:
70
- self._keeper.popleft()
71
- else:
72
- break
73
-
74
- def keep_recursive(self, obj: Any) -> None:
75
- """Recursively keep all tensors in a nested structure."""
76
- try:
77
- import torch
78
- if isinstance(obj, torch.Tensor):
79
- self.keep(obj)
80
- elif isinstance(obj, (list, tuple)):
81
- for item in obj:
82
- self.keep_recursive(item)
83
- elif isinstance(obj, dict):
84
- for v in obj.values():
85
- self.keep_recursive(v)
86
- except ImportError:
87
- pass
88
-
89
- def __len__(self) -> int:
90
- """Return number of tensors currently being kept."""
91
- with self._lock:
92
- return len(self._keeper)
93
-
94
-
95
- # Global instance
96
- _tensor_keeper = TensorKeeper()
97
-
98
-
99
- def keep_tensor(t: Any) -> None:
100
- """Keep a tensor reference to prevent GC during IPC."""
101
- _tensor_keeper.keep(t)
102
-
103
-
104
- def keep_tensors_recursive(obj: Any) -> None:
105
- """Keep all tensor references in a nested structure."""
106
- _tensor_keeper.keep_recursive(obj)
107
-
108
-
109
- # ---------------------------------------------------------------------------
110
- # CUDA IPC Re-share Detection
111
- # ---------------------------------------------------------------------------
112
-
113
- def prepare_tensor_for_ipc(t: Any) -> Any:
114
- """
115
- Prepare a tensor for IPC, handling CUDA IPC re-share limitation.
116
-
117
- Problem this solves:
118
- Tensors received via CUDA IPC cannot be re-shared. If a node
119
- receives a tensor via IPC and tries to return it, you get:
120
- "RuntimeError: Attempted to send CUDA tensor received from
121
- another process; this is not currently supported."
122
-
123
- Solution:
124
- Detect this situation and clone the tensor. Log a warning for
125
- large tensors so users can optimize their pipelines.
126
-
127
- Args:
128
- t: A tensor (or non-tensor, which is returned as-is)
129
-
130
- Returns:
131
- The tensor, possibly cloned if it was received via IPC.
132
- """
133
- try:
134
- import torch
135
- if not isinstance(t, torch.Tensor):
136
- return t
137
-
138
- if not t.is_cuda:
139
- # CPU tensors don't have this limitation
140
- return t
141
-
142
- # Test if tensor can be shared
143
- import torch.multiprocessing.reductions as reductions
144
- try:
145
- func, args = reductions.reduce_tensor(t)
146
- return t # Can be shared as-is
147
- except RuntimeError as e:
148
- if "received from another process" in str(e):
149
- # This tensor was received via IPC and can't be re-shared
150
- tensor_size_mb = t.numel() * t.element_size() / (1024 * 1024)
151
- if tensor_size_mb > 100:
152
- logger.warning(
153
- f"PERFORMANCE: Cloning large CUDA tensor ({tensor_size_mb:.1f}MB) "
154
- "received from another process. Consider modifying the node "
155
- "to avoid returning unmodified input tensors."
156
- )
157
- else:
158
- logger.debug(
159
- f"Cloning CUDA tensor ({tensor_size_mb:.2f}MB) received from another process"
160
- )
161
- return t.clone()
162
- raise
163
-
164
- except ImportError:
165
- return t
166
-
167
-
168
- def prepare_for_ipc_recursive(obj: Any) -> Any:
169
- """
170
- Recursively prepare all tensors in a nested structure for IPC.
171
-
172
- Also keeps tensor references to prevent GC.
173
- """
174
- try:
175
- import torch
176
- if isinstance(obj, torch.Tensor):
177
- prepared = prepare_tensor_for_ipc(obj)
178
- keep_tensor(prepared)
179
- return prepared
180
- elif isinstance(obj, list):
181
- return [prepare_for_ipc_recursive(x) for x in obj]
182
- elif isinstance(obj, tuple):
183
- return tuple(prepare_for_ipc_recursive(x) for x in obj)
184
- elif isinstance(obj, dict):
185
- return {k: prepare_for_ipc_recursive(v) for k, v in obj.items()}
186
- except ImportError:
187
- pass
188
- return obj
@@ -1,291 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: comfy-env
3
- Version: 0.1.14
4
- Summary: Environment management for ComfyUI custom nodes - CUDA wheel resolution and process isolation
5
- Project-URL: Homepage, https://github.com/PozzettiAndrea/comfy-env
6
- Project-URL: Repository, https://github.com/PozzettiAndrea/comfy-env
7
- Project-URL: Issues, https://github.com/PozzettiAndrea/comfy-env/issues
8
- Author: Andrea Pozzetti
9
- License: MIT
10
- License-File: LICENSE
11
- Keywords: comfyui,cuda,environment,isolation,process,venv,wheels
12
- Classifier: Development Status :: 3 - Alpha
13
- Classifier: Intended Audience :: Developers
14
- Classifier: License :: OSI Approved :: MIT License
15
- Classifier: Programming Language :: Python :: 3.10
16
- Classifier: Programming Language :: Python :: 3.11
17
- Classifier: Programming Language :: Python :: 3.12
18
- Classifier: Programming Language :: Python :: 3.13
19
- Requires-Python: >=3.10
20
- Requires-Dist: pip>=21.0
21
- Requires-Dist: tomli-w>=1.0.0
22
- Requires-Dist: tomli>=2.0.0; python_version < '3.11'
23
- Requires-Dist: uv>=0.4.0
24
- Provides-Extra: dev
25
- Requires-Dist: mypy; extra == 'dev'
26
- Requires-Dist: pytest; extra == 'dev'
27
- Requires-Dist: ruff; extra == 'dev'
28
- Description-Content-Type: text/markdown
29
-
30
- # comfy-env
31
-
32
- Environment management for ComfyUI custom nodes. Provides:
33
-
34
- 1. **CUDA Wheel Resolution** - Install pre-built CUDA wheels (nvdiffrast, pytorch3d) without compilation
35
- 2. **Process Isolation** - Run nodes in separate Python environments with their own dependencies
36
-
37
- ## Why?
38
-
39
- ComfyUI custom nodes face two challenges:
40
-
41
- **Type 1: Dependency Conflicts**
42
- - Node A needs `torch==2.1.0` with CUDA 11.8
43
- - Node B needs `torch==2.8.0` with CUDA 12.8
44
-
45
- **Type 2: CUDA Package Installation**
46
- - Users don't have compilers installed
47
- - Building from source takes forever
48
- - pip install fails with cryptic errors
49
-
50
- This package solves both problems.
51
-
52
- ## Installation
53
-
54
- ```bash
55
- pip install comfy-env
56
- ```
57
-
58
- Requires [uv](https://github.com/astral-sh/uv) for fast environment creation:
59
-
60
- ```bash
61
- curl -LsSf https://astral.sh/uv/install.sh | sh
62
- ```
63
-
64
- ## Quick Start
65
-
66
- ### In-Place Installation (Type 2 - CUDA Wheels)
67
-
68
- Create a `comfy-env.toml` in your node directory:
69
-
70
- ```toml
71
- [cuda]
72
- packages = ["nvdiffrast", "pytorch3d"]
73
-
74
- [packages]
75
- requirements = ["transformers>=4.56", "pillow"]
76
- ```
77
-
78
- Then in your `__init__.py`:
79
-
80
- ```python
81
- from comfy_env import install
82
-
83
- # Install CUDA wheels into current environment
84
- install()
85
- ```
86
-
87
- ### Process Isolation (Type 1 - Separate Environment)
88
-
89
- For nodes that need completely separate dependencies (different Python version, conda packages, conflicting libraries).
90
-
91
- #### Recommended: Pack-Wide Isolation
92
-
93
- For node packs where ALL nodes run in the same isolated environment:
94
-
95
- **Step 1: Configure comfy-env.toml**
96
-
97
- ```toml
98
- [mypack]
99
- python = "3.11"
100
- isolated = true # All nodes run in this env
101
-
102
- [mypack.conda]
103
- packages = ["cgal"] # Conda packages (uses pixi)
104
-
105
- [mypack.packages]
106
- requirements = ["trimesh[easy]>=4.0", "bpy>=4.2"]
107
- ```
108
-
109
- **Step 2: Enable in __init__.py**
110
-
111
- ```python
112
- from comfy_env import setup_isolated_imports, enable_isolation
113
-
114
- # Setup import stubs BEFORE importing nodes
115
- setup_isolated_imports(__file__)
116
-
117
- from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
118
-
119
- # Enable isolation for all nodes
120
- enable_isolation(NODE_CLASS_MAPPINGS)
121
- ```
122
-
123
- **That's it!** All nodes run in an isolated Python 3.11 environment with their own dependencies.
124
-
125
- #### Alternative: Per-Node Isolation
126
-
127
- For cases where different nodes need different environments:
128
-
129
- ```python
130
- from comfy_env import isolated
131
-
132
- @isolated(env="my-node")
133
- class MyNode:
134
- FUNCTION = "process"
135
- RETURN_TYPES = ("IMAGE",)
136
-
137
- def process(self, image):
138
- # Runs in isolated subprocess with its own venv
139
- import conflicting_package
140
- return (result,)
141
- ```
142
-
143
- ## CLI
144
-
145
- ```bash
146
- # Show detected environment
147
- comfy-env info
148
-
149
- # Install from config
150
- comfy-env install
151
-
152
- # Dry run (show what would be installed)
153
- comfy-env install --dry-run
154
-
155
- # Verify installation
156
- comfy-env doctor
157
- ```
158
-
159
- ## Configuration
160
-
161
- ### Simple Format (comfy-env.toml)
162
-
163
- ```toml
164
- # CUDA packages from https://pozzettiandrea.github.io/cuda-wheels/
165
- [cuda]
166
- packages = ["nvdiffrast", "pytorch3d", "torch-scatter"]
167
-
168
- # Regular pip packages
169
- [packages]
170
- requirements = ["transformers>=4.56", "pillow"]
171
- ```
172
-
173
- ### Full Format
174
-
175
- ```toml
176
- [system]
177
- linux = ["libgl1", "libopengl0"] # apt packages
178
-
179
- [local.cuda]
180
- packages = ["nvdiffrast"]
181
-
182
- [local.packages]
183
- requirements = ["pillow", "numpy"]
184
-
185
- # For isolated environments (creates separate venv)
186
- [myenv]
187
- python = "3.10"
188
- cuda = "12.8"
189
-
190
- [myenv.cuda]
191
- packages = ["torch-scatter"]
192
-
193
- [myenv.packages]
194
- requirements = ["transformers>=4.56"]
195
- ```
196
-
197
- ## CUDA Wheels Index
198
-
199
- CUDA packages are installed from the [cuda-wheels](https://pozzettiandrea.github.io/cuda-wheels/) index, which provides pre-built wheels for:
200
-
201
- - **PyTorch Geometric**: torch-scatter, torch-cluster, torch-sparse, torch-spline-conv
202
- - **NVIDIA**: nvdiffrast, pytorch3d, gsplat
203
- - **Attention**: flash-attn, sageattention
204
- - **Mesh Processing**: cumesh, cubvh
205
- - **Others**: spconv, detectron2, lietorch, and more
206
-
207
- Wheels are automatically selected based on your GPU, CUDA version, PyTorch version, and Python version.
208
-
209
- ### Supported Configurations
210
-
211
- | GPU Architecture | CUDA | PyTorch |
212
- |-----------------|------|---------|
213
- | Blackwell (sm_100+) | 12.8 | 2.8+ |
214
- | Ada/Hopper/Ampere (sm_80+) | 12.8 | 2.8 |
215
- | Turing (sm_75) | 12.8 | 2.8 |
216
- | Pascal (sm_60) | 12.4 | 2.4 |
217
-
218
- ## API Reference
219
-
220
- ### install()
221
-
222
- ```python
223
- from comfy_env import install
224
-
225
- # Auto-discover config
226
- install()
227
-
228
- # Explicit config
229
- install(config="comfy-env.toml")
230
-
231
- # Dry run
232
- install(dry_run=True)
233
- ```
234
-
235
- ### RuntimeEnv
236
-
237
- ```python
238
- from comfy_env import RuntimeEnv
239
-
240
- env = RuntimeEnv.detect()
241
- print(env)
242
- # Python 3.10, CUDA 12.8, PyTorch 2.8.0, GPU: NVIDIA GeForce RTX 4090
243
-
244
- # Get environment variables
245
- vars_dict = env.as_dict()
246
- # {'cuda_version': '12.8', 'cuda_short': '128', 'torch_mm': '28', ...}
247
- ```
248
-
249
- ### enable_isolation()
250
-
251
- ```python
252
- from comfy_env import enable_isolation
253
-
254
- enable_isolation(NODE_CLASS_MAPPINGS)
255
- ```
256
-
257
- Wraps all node classes so their FUNCTION methods run in the isolated environment specified in comfy-env.toml. Requires `isolated = true` in the environment config.
258
-
259
- ### setup_isolated_imports()
260
-
261
- ```python
262
- from comfy_env import setup_isolated_imports
263
-
264
- setup_isolated_imports(__file__)
265
- ```
266
-
267
- Sets up import stubs for packages that exist only in the isolated pixi environment. Call this BEFORE importing your nodes module. Packages available in both host and isolated environment are not stubbed.
268
-
269
- ### Workers (for custom isolation)
270
-
271
- ```python
272
- from comfy_env import TorchMPWorker
273
-
274
- # Same-venv isolation (zero-copy tensors)
275
- worker = TorchMPWorker()
276
- result = worker.call(my_function, image=tensor)
277
- ```
278
-
279
- ## GPU Detection
280
-
281
- ```python
282
- from comfy_env import detect_cuda_version, get_gpu_summary
283
-
284
- cuda = detect_cuda_version() # "12.8", "12.4", or None
285
- print(get_gpu_summary())
286
- # GPU 0: NVIDIA GeForce RTX 5090 (sm_120) [Blackwell - CUDA 12.8]
287
- ```
288
-
289
- ## License
290
-
291
- MIT - see LICENSE file.
@@ -1,33 +0,0 @@
1
- comfy_env/__init__.py,sha256=xEbM77Oc7TKI-gu87_Lto8GF6Odjie-BKov4sr1XQ_k,2790
2
- comfy_env/cache.py,sha256=x705DNxvvY8GUQ40TLhIDk9vX3a7EFJOaskULybVWeM,9068
3
- comfy_env/cli.py,sha256=ty4HYlzollCUCS0o6Sha6eczPAsW_gHRVgvck3IfA2w,12723
4
- comfy_env/errors.py,sha256=q-C3vyrPa_kk_Ao8l17mIGfJiG2IR0hCFV0GFcNLmcI,9924
5
- comfy_env/install.py,sha256=N7eBj8wB2DrGepVYk-Hks2mSf6UuGzj34pfVLNYJgQ4,10357
6
- comfy_env/nodes.py,sha256=tUbsUdjnJCUUoxM7NpsdUuawuIz1exfOmWdsLGILXiY,5391
7
- comfy_env/prestartup.py,sha256=FjuQLfDs3ggc47siak33K4dhQzNRhCIEHshX7u1XQXc,6734
8
- comfy_env/config/__init__.py,sha256=4Guylkb-FV8QxhFwschzpzbr2eu8y-KNgNT3_JOm9jc,403
9
- comfy_env/config/parser.py,sha256=dA1lX5ExBEfCqUJwe4V5i_jn2NJ69bMq3c3ji3lMSV8,4295
10
- comfy_env/config/types.py,sha256=Sb8HO34xsSZu5YAc2K4M7Gb3QNevJlngf12hHiwuU0w,2140
11
- comfy_env/isolation/__init__.py,sha256=vw9a4mpJ2CFjy-PLe_A3zQ6umBQklgqWNxwn9beNw3g,175
12
- comfy_env/isolation/wrap.py,sha256=Z4JuQeLm4MIY9rzO7b2FJ5Q3p4onVswdklsYIYHAbwc,16182
13
- comfy_env/pixi/__init__.py,sha256=BUrq7AQf3WDm0cHWh72B2xZbURNnDu2dCuELWiQCUiM,997
14
- comfy_env/pixi/core.py,sha256=wXNy31deOI7QpRLPt9i1QfLrWGvKwg2j_kva-kaUEjs,21789
15
- comfy_env/pixi/cuda_detection.py,sha256=sqB3LjvGNdV4eFqiARQGfyecBM3ZiUmeh6nG0YCRYQw,9751
16
- comfy_env/pixi/resolver.py,sha256=U_A8rBDxCj4gUlJt2YJQniP4cCKqxJEiVFgXOoH7vM8,6339
17
- comfy_env/pixi/platform/__init__.py,sha256=Nb5MPZIEeanSMEWwqU4p4bnEKTJn1tWcwobnhq9x9IY,614
18
- comfy_env/pixi/platform/base.py,sha256=iS0ptTTVjXRwPU4qWUdvHI7jteuzxGSjWr5BUQ7hGiU,2453
19
- comfy_env/pixi/platform/darwin.py,sha256=HK3VkLT6DfesAnIXwx2IaUFHTBclF0xTQnC7azWY6Kc,1552
20
- comfy_env/pixi/platform/linux.py,sha256=xLp8FEbFqZLQrzIZBI9z3C4g23Ab1ASTHLsXDzsdCoA,2062
21
- comfy_env/pixi/platform/windows.py,sha256=FCOCgpzGzorY9-HueMlJUR8DxM2eH-cj9iZk6K026Is,10891
22
- comfy_env/templates/comfy-env-instructions.txt,sha256=ve1RAthW7ouumU9h6DM7mIRX1MS8_Tyonq2U4tcrFu8,1031
23
- comfy_env/templates/comfy-env.toml,sha256=ROIqi4BlPL1MEdL1VgebfTHpdwPNYGHwWeigI9Kw-1I,4831
24
- comfy_env/workers/__init__.py,sha256=TMVG55d2XLP1mJ3x1d16H0SBDJZtk2kMC5P4HLk9TrA,1073
25
- comfy_env/workers/base.py,sha256=4ZYTaQ4J0kBHCoO_OfZnsowm4rJCoqinZUaOtgkOPbw,2307
26
- comfy_env/workers/mp.py,sha256=R0XWsiHv8gswxa_-iNHU14o_9Og0RFG0QnY9DRZzn2c,34060
27
- comfy_env/workers/subprocess.py,sha256=B0PsHLuywPIJSRpuA4E8Dg6tGzuro6wqIr8czqMqtPE,57128
28
- comfy_env/workers/tensor_utils.py,sha256=TCuOAjJymrSbkgfyvcKtQ_KbVWTqSwP9VH_bCaFLLq8,6409
29
- comfy_env-0.1.14.dist-info/METADATA,sha256=o-SWEjHCqQWW2CcCWNGCbNcCtbYRAU70lAiNwXo41U8,6971
30
- comfy_env-0.1.14.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
31
- comfy_env-0.1.14.dist-info/entry_points.txt,sha256=J4fXeqgxU_YenuW_Zxn_pEL7J-3R0--b6MS5t0QmAr0,49
32
- comfy_env-0.1.14.dist-info/licenses/LICENSE,sha256=E68QZMMpW4P2YKstTZ3QU54HRQO8ecew09XZ4_Vn870,1093
33
- comfy_env-0.1.14.dist-info/RECORD,,
File without changes