comfy-env 0.0.8__py3-none-any.whl → 0.0.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
comfy_env/registry.py CHANGED
@@ -1,8 +1,7 @@
1
1
  """Built-in registry of CUDA packages and their wheel sources.
2
2
 
3
- This module provides a mapping of well-known CUDA packages to their
4
- installation sources, eliminating the need for users to specify
5
- wheel_sources in their comfyui_env.toml.
3
+ This module loads package configurations from wheel_sources.yml and provides
4
+ lookup functions for the install module.
6
5
 
7
6
  Install method types:
8
7
  - "index": Use pip --extra-index-url (PEP 503 simple repository)
@@ -10,9 +9,13 @@ Install method types:
10
9
  - "find_links": Use pip --find-links (for PyG, etc.)
11
10
  - "pypi_variant": Package name varies by CUDA version (e.g., spconv-cu124)
12
11
  - "github_release": Direct wheel URL from GitHub releases with fallback sources
12
+ - "pypi": Standard PyPI install
13
13
  """
14
14
 
15
- from typing import Dict, Any, Optional
15
+ from pathlib import Path
16
+ from typing import Any, Dict, Optional
17
+
18
+ import yaml
16
19
 
17
20
 
18
21
  def get_cuda_short2(cuda_version: str) -> str:
@@ -40,180 +43,16 @@ def get_cuda_short2(cuda_version: str) -> str:
40
43
  return f"{major}{minor}"
41
44
 
42
45
 
43
- # =============================================================================
44
- # Package Registry
45
- # =============================================================================
46
- # Maps package names to their installation configuration.
47
- #
48
- # Template variables available:
49
- # {cuda_version} - Full CUDA version (e.g., "12.8")
50
- # {cuda_short} - CUDA without dot (e.g., "128")
51
- # {cuda_short2} - CUDA short for spconv (e.g., "124" not "1240")
52
- # {torch_version} - Full PyTorch version (e.g., "2.8.0")
53
- # {torch_short} - PyTorch without dots (e.g., "280")
54
- # {torch_mm} - PyTorch major.minor (e.g., "28")
55
- # {py_version} - Python version (e.g., "3.10")
56
- # {py_short} - Python without dot (e.g., "310")
57
- # {py_minor} - Python minor version only (e.g., "10")
58
- # {platform} - Platform tag (e.g., "linux_x86_64")
59
- # =============================================================================
60
-
61
- PACKAGE_REGISTRY: Dict[str, Dict[str, Any]] = {
62
- # =========================================================================
63
- # PyTorch Geometric (PyG) packages - official index
64
- # https://pytorch-geometric.readthedocs.io/en/latest/install/installation.html
65
- # Uses --find-links (not --extra-index-url) for proper wheel discovery
66
- # =========================================================================
67
- "torch-scatter": {
68
- "method": "find_links",
69
- "index_url": "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html",
70
- "description": "Scatter operations for PyTorch",
71
- },
72
- "torch-cluster": {
73
- "method": "find_links",
74
- "index_url": "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html",
75
- "description": "Clustering algorithms for PyTorch",
76
- },
77
- "torch-sparse": {
78
- "method": "find_links",
79
- "index_url": "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html",
80
- "description": "Sparse tensor operations for PyTorch",
81
- },
82
- "torch-spline-conv": {
83
- "method": "find_links",
84
- "index_url": "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html",
85
- "description": "Spline convolutions for PyTorch",
86
- },
87
-
88
- # =========================================================================
89
- # pytorch3d - Facebook's official wheels
90
- # https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md
91
- # =========================================================================
92
- "pytorch3d": {
93
- "method": "index",
94
- "index_url": "https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py3{py_minor}_cu{cuda_short}_pyt{torch_short}/download.html",
95
- "description": "PyTorch3D - 3D deep learning library",
96
- },
97
-
98
- # =========================================================================
99
- # PozzettiAndrea wheel repos (GitHub Pages indexes)
100
- # =========================================================================
101
- # nvdiffrast - wheels are now at cu{cuda}-torch{torch_short} releases
102
- "nvdiffrast": {
103
- "method": "github_index",
104
- "index_url": "https://pozzettiandrea.github.io/nvdiffrast-full-wheels/cu{cuda_short}-torch{torch_short}/",
105
- "description": "NVIDIA differentiable rasterizer",
106
- },
107
- # cumesh, o_voxel, flex_gemm, nvdiffrec_render use torch_short (3 digits: 280)
108
- "cumesh": {
109
- "method": "github_index",
110
- "index_url": "https://pozzettiandrea.github.io/cumesh-wheels/cu{cuda_short}-torch{torch_short}/",
111
- "description": "CUDA-accelerated mesh utilities",
112
- },
113
- "o_voxel": {
114
- "method": "github_index",
115
- "index_url": "https://pozzettiandrea.github.io/ovoxel-wheels/cu{cuda_short}-torch{torch_short}/",
116
- "description": "O-Voxel CUDA extension for TRELLIS",
117
- },
118
- "flex_gemm": {
119
- "method": "github_index",
120
- "index_url": "https://pozzettiandrea.github.io/flexgemm-wheels/cu{cuda_short}-torch{torch_short}/",
121
- "description": "Flexible GEMM operations",
122
- },
123
- "nvdiffrec_render": {
124
- "method": "github_release",
125
- "sources": [
126
- {
127
- "name": "PozzettiAndrea",
128
- "url_template": "https://github.com/PozzettiAndrea/nvdiffrec_render-wheels/releases/download/cu{cuda_short}-torch{torch_short}/nvdiffrec_render-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-linux_x86_64.whl",
129
- "platforms": ["linux_x86_64"],
130
- },
131
- {
132
- "name": "PozzettiAndrea-windows",
133
- "url_template": "https://github.com/PozzettiAndrea/nvdiffrec_render-wheels/releases/download/cu{cuda_short}-torch{torch_short}/nvdiffrec_render-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-win_amd64.whl",
134
- "platforms": ["win_amd64", "windows_amd64"],
135
- },
136
- ],
137
- "description": "NVDiffRec rendering utilities",
138
- },
139
-
140
- # =========================================================================
141
- # spconv - PyPI with CUDA-versioned package names
142
- # Package names: spconv-cu118, spconv-cu120, spconv-cu121, spconv-cu124, spconv-cu126
143
- # Note: Max available is cu126 as of Jan 2026, use explicit spconv-cu126 in config
144
- # =========================================================================
145
- "spconv": {
146
- "method": "pypi_variant",
147
- "package_template": "spconv-cu{cuda_short2}",
148
- "description": "Sparse convolution library (use spconv-cu126 for CUDA 12.6+)",
149
- },
150
-
151
- # =========================================================================
152
- # sageattention - Fast quantized attention (2-5x faster than FlashAttention)
153
- # Linux: Prebuilt wheels from Kijai/PrecompiledWheels (v2.2.0, cp312)
154
- # Windows: Prebuilt wheels from woct0rdho (v2.2.0, cp39-abi3)
155
- # =========================================================================
156
- "sageattention": {
157
- "method": "github_release",
158
- "sources": [
159
- # Linux: Kijai's precompiled wheels on HuggingFace (Python 3.12)
160
- {
161
- "name": "kijai-hf",
162
- "url_template": "https://huggingface.co/Kijai/PrecompiledWheels/resolve/main/sageattention-{version}-cp312-cp312-linux_x86_64.whl",
163
- "platforms": ["linux_x86_64"],
164
- },
165
- # Windows: woct0rdho prebuilt wheels (ABI3: Python >= 3.9)
166
- # Format: sageattention-2.2.0+cu128torch2.8.0.post3-cp39-abi3-win_amd64.whl
167
- {
168
- "name": "woct0rdho",
169
- "url_template": "https://github.com/woct0rdho/SageAttention/releases/download/v2.2.0-windows.post3/sageattention-2.2.0%2Bcu{cuda_short}torch{torch_version}.post3-cp39-abi3-win_amd64.whl",
170
- "platforms": ["win_amd64"],
171
- },
172
- ],
173
- "description": "SageAttention - 2-5x faster than FlashAttention with quantized kernels",
174
- },
175
-
176
- # =========================================================================
177
- # triton - Required for sageattention on Linux (usually bundled with PyTorch)
178
- # =========================================================================
179
- "triton": {
180
- "method": "pypi",
181
- "description": "Triton compiler for custom CUDA kernels (required by sageattention)",
182
- },
183
-
184
- # =========================================================================
185
- # flash-attn - Multi-source prebuilt wheels
186
- # Required for UniRig and other transformer-based models
187
- # Sources: Dao-AILab (official), mjun0812 (Linux), bdashore3 (Windows)
188
- # =========================================================================
189
- "flash-attn": {
190
- "method": "github_release",
191
- "sources": [
192
- # Linux: Dao-AILab official wheels (CUDA 12.x, PyTorch 2.4-2.8)
193
- # Format: flash_attn-2.8.3+cu12torch2.8cxx11abiTRUE-cp310-cp310-linux_x86_64.whl
194
- {
195
- "name": "Dao-AILab",
196
- "url_template": "https://github.com/Dao-AILab/flash-attention/releases/download/v{version}/flash_attn-{version}%2Bcu{cuda_major}torch{torch_dotted_mm}cxx11abiTRUE-{py_tag}-{py_tag}-linux_x86_64.whl",
197
- "platforms": ["linux_x86_64"],
198
- },
199
- # Linux: mjun0812 prebuilt wheels (CUDA 12.4-13.0, PyTorch 2.5-2.9)
200
- # Format: flash_attn-2.8.3+cu128torch2.8-cp310-cp310-linux_x86_64.whl
201
- # Note: Release v0.7.2 contains multiple flash_attn versions
202
- {
203
- "name": "mjun0812",
204
- "url_template": "https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.7.2/flash_attn-{version}%2Bcu{cuda_short}torch{torch_dotted_mm}-{py_tag}-{py_tag}-linux_x86_64.whl",
205
- "platforms": ["linux_x86_64"],
206
- },
207
- # Windows: bdashore3 prebuilt wheels (CUDA 12.4/12.8, PyTorch 2.6-2.8)
208
- {
209
- "name": "bdashore3",
210
- "url_template": "https://github.com/bdashore3/flash-attention/releases/download/v{version}/flash_attn-{version}%2Bcu{cuda_short}torch{torch_version}cxx11abiFALSE-{py_tag}-{py_tag}-win_amd64.whl",
211
- "platforms": ["win_amd64"],
212
- },
213
- ],
214
- "description": "Flash Attention for fast transformer inference",
215
- },
216
- }
46
+ def _load_wheel_sources() -> Dict[str, Dict[str, Any]]:
47
+ """Load package registry from wheel_sources.yml."""
48
+ yml_path = Path(__file__).parent / "wheel_sources.yml"
49
+ with open(yml_path, "r") as f:
50
+ data = yaml.safe_load(f)
51
+ return data.get("packages", {})
52
+
53
+
54
+ # Load registry at module import time
55
+ PACKAGE_REGISTRY: Dict[str, Dict[str, Any]] = _load_wheel_sources()
217
56
 
218
57
 
219
58
  def get_package_info(package: str) -> Optional[Dict[str, Any]]:
comfy_env/resolver.py CHANGED
@@ -137,11 +137,14 @@ class RuntimeEnv:
137
137
  "py_version": self.python_version,
138
138
  "py_short": self.python_short,
139
139
  "py_minor": py_minor,
140
+ "py_tag": f"cp{self.python_short}", # e.g., cp310, cp311
140
141
  }
141
142
 
142
143
  if self.cuda_version:
143
144
  result["cuda_version"] = self.cuda_version
144
145
  result["cuda_short"] = self.cuda_short
146
+ # cuda_major: just the major version (e.g., "12" from "12.8")
147
+ result["cuda_major"] = self.cuda_version.split(".")[0]
145
148
 
146
149
  if self.torch_version:
147
150
  result["torch_version"] = self.torch_version
@@ -218,21 +221,6 @@ class WheelSource:
218
221
  return package.lower() in [p.lower() for p in self.packages]
219
222
 
220
223
 
221
- # Default wheel sources for common CUDA packages
222
- DEFAULT_WHEEL_SOURCES = [
223
- WheelSource(
224
- name="nvdiffrast-wheels",
225
- url_template="https://github.com/PozzettiAndrea/nvdiffrast-full-wheels/releases/download/v{version}/nvdiffrast-{version}%2Bcu{cuda_short}torch{torch_mm}-cp{py_short}-cp{py_short}-{platform}.whl",
226
- packages=["nvdiffrast"],
227
- ),
228
- WheelSource(
229
- name="cumesh-wheels",
230
- url_template="https://github.com/PozzettiAndrea/cumesh-wheels/releases/download/v{version}/{package}-{version}%2Bcu{cuda_short}torch{torch_mm}-cp{py_short}-cp{py_short}-{platform}.whl",
231
- packages=["pytorch3d", "torch-cluster", "torch-scatter", "torch-sparse"],
232
- ),
233
- ]
234
-
235
-
236
224
  class WheelResolver:
237
225
  """
238
226
  Resolves CUDA wheel URLs from package name and runtime environment.
@@ -255,10 +243,11 @@ class WheelResolver:
255
243
  Initialize resolver.
256
244
 
257
245
  Args:
258
- sources: List of WheelSource configurations.
246
+ sources: List of WheelSource configurations. Defaults to empty
247
+ (use PACKAGE_REGISTRY in install.py for actual sources).
259
248
  overrides: Package-specific URL overrides (package -> template).
260
249
  """
261
- self.sources = sources or DEFAULT_WHEEL_SOURCES
250
+ self.sources = sources or []
262
251
  self.overrides = overrides or {}
263
252
 
264
253
  def resolve(
@@ -52,6 +52,10 @@ class _ModuleProxy:
52
52
  base = _find_comfyui_base()
53
53
  return str(base / "input") if base else None
54
54
 
55
+ def get_temp_directory(self):
56
+ base = _find_comfyui_base()
57
+ return str(base / "temp") if base else None
58
+
55
59
  # Replace module with proxy instance
56
60
  import sys
57
61
  sys.modules[__name__] = _ModuleProxy()
@@ -0,0 +1,141 @@
1
+ # Wheel sources registry for CUDA packages
2
+ #
3
+ # Template variables:
4
+ # {cuda_version} - Full CUDA version (e.g., "12.8")
5
+ # {cuda_short} - CUDA without dot (e.g., "128")
6
+ # {cuda_short2} - CUDA short for spconv (e.g., "124" not "1240")
7
+ # {cuda_major} - CUDA major version (e.g., "12")
8
+ # {torch_version} - Full PyTorch version (e.g., "2.8.0")
9
+ # {torch_short} - PyTorch without dots (e.g., "280")
10
+ # {torch_mm} - PyTorch major.minor no dot (e.g., "28")
11
+ # {torch_dotted_mm}- PyTorch major.minor with dot (e.g., "2.8")
12
+ # {py_version} - Python version (e.g., "3.10")
13
+ # {py_short} - Python without dot (e.g., "310")
14
+ # {py_minor} - Python minor version only (e.g., "10")
15
+ # {py_tag} - Python tag (e.g., "cp310")
16
+ # {platform} - Platform tag (e.g., "linux_x86_64")
17
+ #
18
+ # Install methods:
19
+ # index - pip --extra-index-url (PEP 503)
20
+ # find_links - pip --find-links
21
+ # github_index - GitHub Pages index (--find-links)
22
+ # pypi_variant - Package name varies by CUDA (e.g., spconv-cu124)
23
+ # github_release - Direct wheel URL from GitHub releases
24
+ # pypi - Standard PyPI install
25
+
26
+ packages:
27
+ # ===========================================================================
28
+ # PyTorch Geometric (PyG) - official index
29
+ # https://pytorch-geometric.readthedocs.io/en/latest/install/installation.html
30
+ # ===========================================================================
31
+ torch-scatter:
32
+ method: find_links
33
+ index_url: "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html"
34
+ description: Scatter operations for PyTorch
35
+
36
+ torch-cluster:
37
+ method: find_links
38
+ index_url: "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html"
39
+ description: Clustering algorithms for PyTorch
40
+
41
+ torch-sparse:
42
+ method: find_links
43
+ index_url: "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html"
44
+ description: Sparse tensor operations for PyTorch
45
+
46
+ torch-spline-conv:
47
+ method: find_links
48
+ index_url: "https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_short}.html"
49
+ description: Spline convolutions for PyTorch
50
+
51
+ # ===========================================================================
52
+ # pytorch3d - Facebook's official wheels
53
+ # https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md
54
+ # ===========================================================================
55
+ pytorch3d:
56
+ method: index
57
+ index_url: "https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py3{py_minor}_cu{cuda_short}_pyt{torch_short}/download.html"
58
+ description: PyTorch3D - 3D deep learning library
59
+
60
+ # ===========================================================================
61
+ # PozzettiAndrea wheel repos (GitHub Pages indexes)
62
+ # ===========================================================================
63
+ nvdiffrast:
64
+ method: github_index
65
+ index_url: "https://pozzettiandrea.github.io/nvdiffrast-full-wheels/cu{cuda_short}-torch{torch_short}/"
66
+ description: NVIDIA differentiable rasterizer
67
+
68
+ cumesh:
69
+ method: github_index
70
+ index_url: "https://pozzettiandrea.github.io/cumesh-wheels/cu{cuda_short}-torch{torch_short}/"
71
+ description: CUDA-accelerated mesh utilities
72
+
73
+ o_voxel:
74
+ method: github_index
75
+ index_url: "https://pozzettiandrea.github.io/ovoxel-wheels/cu{cuda_short}-torch{torch_short}/"
76
+ description: O-Voxel CUDA extension for TRELLIS
77
+
78
+ flex_gemm:
79
+ method: github_index
80
+ index_url: "https://pozzettiandrea.github.io/flexgemm-wheels/cu{cuda_short}-torch{torch_short}/"
81
+ description: Flexible GEMM operations
82
+
83
+ nvdiffrec_render:
84
+ method: github_release
85
+ description: NVDiffRec rendering utilities
86
+ sources:
87
+ - name: PozzettiAndrea
88
+ url_template: "https://github.com/PozzettiAndrea/nvdiffrec_render-wheels/releases/download/cu{cuda_short}-torch{torch_short}/nvdiffrec_render-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-linux_x86_64.whl"
89
+ platforms: [linux_x86_64]
90
+ - name: PozzettiAndrea-windows
91
+ url_template: "https://github.com/PozzettiAndrea/nvdiffrec_render-wheels/releases/download/cu{cuda_short}-torch{torch_short}/nvdiffrec_render-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-win_amd64.whl"
92
+ platforms: [win_amd64]
93
+
94
+ # ===========================================================================
95
+ # spconv - PyPI with CUDA-versioned package names
96
+ # ===========================================================================
97
+ spconv:
98
+ method: pypi_variant
99
+ package_template: "spconv-cu{cuda_short2}"
100
+ description: Sparse convolution library (use spconv-cu126 for CUDA 12.6+)
101
+
102
+ # ===========================================================================
103
+ # sageattention - Fast quantized attention
104
+ # ===========================================================================
105
+ sageattention:
106
+ method: github_release
107
+ description: SageAttention - 2-5x faster than FlashAttention with quantized kernels
108
+ sources:
109
+ - name: kijai-hf
110
+ url_template: "https://huggingface.co/Kijai/PrecompiledWheels/resolve/main/sageattention-{version}-cp312-cp312-linux_x86_64.whl"
111
+ platforms: [linux_x86_64]
112
+ - name: woct0rdho
113
+ url_template: "https://github.com/woct0rdho/SageAttention/releases/download/v2.2.0-windows.post3/sageattention-2.2.0%2Bcu{cuda_short}torch{torch_version}.post3-cp39-abi3-win_amd64.whl"
114
+ platforms: [win_amd64]
115
+
116
+ # ===========================================================================
117
+ # triton - Required for sageattention on Linux
118
+ # ===========================================================================
119
+ triton:
120
+ method: pypi
121
+ description: Triton compiler for custom CUDA kernels
122
+
123
+ # ===========================================================================
124
+ # flash-attn - Multi-source prebuilt wheels
125
+ # ===========================================================================
126
+ flash-attn:
127
+ method: github_release
128
+ description: Flash Attention for fast transformer inference
129
+ sources:
130
+ - name: Dao-AILab
131
+ url_template: "https://github.com/Dao-AILab/flash-attention/releases/download/v{version}/flash_attn-{version}%2Bcu{cuda_major}torch{torch_dotted_mm}cxx11abiTRUE-{py_tag}-{py_tag}-linux_x86_64.whl"
132
+ platforms: [linux_x86_64]
133
+ - name: mjun0812
134
+ url_template: "https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.7.7/flash_attn-{version}%2Bcu{cuda_short}torch{torch_dotted_mm}-{py_tag}-{py_tag}-linux_x86_64.whl"
135
+ platforms: [linux_x86_64]
136
+ - name: mjun0812-windows
137
+ url_template: "https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.7.7/flash_attn-{version}%2Bcu{cuda_short}torch{torch_dotted_mm}-{py_tag}-{py_tag}-win_amd64.whl"
138
+ platforms: [win_amd64]
139
+ - name: bdashore3
140
+ url_template: "https://github.com/bdashore3/flash-attention/releases/download/v{version}/flash_attn-{version}%2Bcu{cuda_short}torch{torch_version}cxx11abiFALSE-{py_tag}-{py_tag}-win_amd64.whl"
141
+ platforms: [win_amd64]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: comfy-env
3
- Version: 0.0.8
3
+ Version: 0.0.14
4
4
  Summary: Environment management for ComfyUI custom nodes - CUDA wheel resolution and process isolation
5
5
  Project-URL: Homepage, https://github.com/PozzettiAndrea/comfy-env
6
6
  Project-URL: Repository, https://github.com/PozzettiAndrea/comfy-env
@@ -17,6 +17,7 @@ Classifier: Programming Language :: Python :: 3.11
17
17
  Classifier: Programming Language :: Python :: 3.12
18
18
  Classifier: Programming Language :: Python :: 3.13
19
19
  Requires-Python: >=3.10
20
+ Requires-Dist: pyyaml>=6.0
20
21
  Requires-Dist: tomli>=2.0.0; python_version < '3.11'
21
22
  Requires-Dist: uv>=0.4.0
22
23
  Provides-Extra: dev
@@ -1,16 +1,15 @@
1
- comfy_env/__init__.py,sha256=U5XtB2UeXZi8JPBhlh0XjQprLsAuVslPoJNWOSvCFhs,4249
2
- comfy_env/cli.py,sha256=9GvQYrXlJRl0ZaCuFHvRtVxWQ34Axd5Brgu5FWRONp4,11424
1
+ comfy_env/__init__.py,sha256=1EjSpAK20Z6RAXKn3onZwj2a_FIctG0E_LhWlE7SgIs,4250
2
+ comfy_env/cli.py,sha256=q4y_tlPyqKMZhge7XeO9VdbFVZ4dl9LZsgnnTVQYXD4,15979
3
3
  comfy_env/decorator.py,sha256=daFR5aLzshkmo5sRKhSGPcTUgIUWml7Gs6A1bfnDuyc,15718
4
- comfy_env/errors.py,sha256=egeyXY-j7KpxyA0s67TcJLEfJX23LCAD3v1P4FgQIGE,10917
5
- comfy_env/install.py,sha256=JaatBM5PyY4Ua6E2HgkLAlFZ2TSEjlI-lAwJ-Hl7wU8,18876
6
- comfy_env/registry.py,sha256=iZprw2iIAJl-Vvotsf_B_PofzqE-6IiVfNPBHYUYg6g,11577
7
- comfy_env/resolver.py,sha256=xz7GPlxy02iwwpocIzzbdGnrwnSpi-D5IzpL8SQSgvI,12893
8
- comfy_env/runner.py,sha256=0YUqzK93u--7pKV6_yVC564AJE9rS3y81t5ZhQi2t4Y,9696
4
+ comfy_env/errors.py,sha256=8hN8NDlo8oBUdapc-eT3ZluigI5VBzfqsSBvQdfWlz4,9943
5
+ comfy_env/install.py,sha256=q8x7sQmCWMQyUjeBcbrg39eAP3ZPars54iepupMe2pE,18922
6
+ comfy_env/registry.py,sha256=uFCtGmWYvwGCqObXgzmArX7o5JsFNsHXxayofk3m6no,2569
7
+ comfy_env/resolver.py,sha256=Z-INLhmIxZBXpETYlxUgUIndEVJCswOEHQQQdj3eC-o,12465
9
8
  comfy_env/env/__init__.py,sha256=sybOBrxJCfL4Xry9NNd5xwn9hXIHudXlXDa7SpJkPCE,811
10
- comfy_env/env/config.py,sha256=fL2P0ScoioPktZEHQnoo1dy-VB5intJEYLHm5fuOmF8,5406
11
- comfy_env/env/config_file.py,sha256=QLf3WPCqx46v1kVzM4jV_QAz919zmkk0_FRtLD3jO8s,19507
12
- comfy_env/env/detection.py,sha256=Co8BJmTRCq1ZHDsm6832jF87za0GRAhH7zF04-5QwcE,4949
13
- comfy_env/env/manager.py,sha256=ysIVlpxRD5x-5X47ESmfly2Vz6jv-9yA6863szHf7-8,24131
9
+ comfy_env/env/config.py,sha256=R8JyE5iQLHKgnxXOGA8SAI7iu2eYSfXn-MsaqHoU2_A,5667
10
+ comfy_env/env/config_file.py,sha256=6Mh2VsNvyFJuTL4_vOX0sG3Q5SMHLI1P97aY8FFvwnk,20696
11
+ comfy_env/env/detection.py,sha256=L4kXKGs9j7Vw4Qeh6Zw5PfVZW0dN2zuJqojDPgINIhA,5653
12
+ comfy_env/env/manager.py,sha256=cmHd9Lc9cxvL3nLvgBbIxdIuyMb7519n0emjhUzJ3oI,24776
14
13
  comfy_env/env/security.py,sha256=dNSitAnfBNVdvxgBBntYw33AJaCs_S1MHb7KJhAVYzM,8171
15
14
  comfy_env/env/platform/__init__.py,sha256=Nb5MPZIEeanSMEWwqU4p4bnEKTJn1tWcwobnhq9x9IY,614
16
15
  comfy_env/env/platform/base.py,sha256=iS0ptTTVjXRwPU4qWUdvHI7jteuzxGSjWr5BUQ7hGiU,2453
@@ -18,22 +17,23 @@ comfy_env/env/platform/darwin.py,sha256=HK3VkLT6DfesAnIXwx2IaUFHTBclF0xTQnC7azWY
18
17
  comfy_env/env/platform/linux.py,sha256=xLp8FEbFqZLQrzIZBI9z3C4g23Ab1ASTHLsXDzsdCoA,2062
19
18
  comfy_env/env/platform/windows.py,sha256=nD1-bKU2rGmEJlS-cc5yWXMSA51YQtVupn-lQEO5UYA,14840
20
19
  comfy_env/ipc/__init__.py,sha256=pTjgJn5YJxLXmEvuKh3lkCEJQs-6W6_F01jfkFMUi0c,1375
21
- comfy_env/ipc/bridge.py,sha256=kEy__kco8FVQNj5MyadF5k00YEivcGmifAJAOfr643U,17645
20
+ comfy_env/ipc/bridge.py,sha256=zcyN3xzV4WWBrBFNwCniPBR58dLCg46-k9TtyW5U000,16437
22
21
  comfy_env/ipc/protocol.py,sha256=gfWe5yEDUn4QWhcdWFcxn40GqxlW1Uf23j0edOzPPng,7951
23
22
  comfy_env/ipc/tensor.py,sha256=DyU28GymKkLPVwzZyKdm2Av222hdaycMgv3KdL5mtO0,12009
24
23
  comfy_env/ipc/torch_bridge.py,sha256=WzdwDJa3N_1fEl9OeZxikvMbwryO5P63o0WmEDpS18A,13206
25
24
  comfy_env/ipc/transport.py,sha256=XQlRcfQsd4nd909KIYnZKvsS3ksGpGjyVucn8jvmLIU,9698
26
25
  comfy_env/ipc/worker.py,sha256=oxTLF9xXrl8CRx_JVNBdkxZh35NuzfkdxhaUtUuXogs,6661
27
26
  comfy_env/stubs/__init__.py,sha256=jMeWEKY30y8QqYX9AUyuZbmm607erQTc4N7YaDoAH00,38
28
- comfy_env/stubs/folder_paths.py,sha256=KEH9ntMH6HOgE595G5dBL9kSTEUvf_shYtQKIyiiDbk,1586
27
+ comfy_env/stubs/folder_paths.py,sha256=Pv30qHeHBmfK5icIpmXnTGvZkRMJHDrTh9mQ1WMclLg,1713
29
28
  comfy_env/workers/__init__.py,sha256=IKZwOvrWOGqBLDUIFAalg4CdqzJ_YnAdxo2Ha7gZTJ0,1467
30
29
  comfy_env/workers/base.py,sha256=ZILYXlvGCWuCZXmjKqfG8VeD19ihdYaASdlbasl2BMo,2312
31
30
  comfy_env/workers/pool.py,sha256=MtjeOWfvHSCockq8j1gfnxIl-t01GSB79T5N4YB82Lg,6956
32
31
  comfy_env/workers/tensor_utils.py,sha256=TCuOAjJymrSbkgfyvcKtQ_KbVWTqSwP9VH_bCaFLLq8,6409
33
32
  comfy_env/workers/torch_mp.py,sha256=DsfxE3LBAWEuGtk-p-YL0UhVQ7VDh73KT_TFRxYN4-Q,12563
34
33
  comfy_env/workers/venv.py,sha256=_ekHfZPqBIPY08DjqiXm6cTBQH4DrbxRWR3AAv3mit8,31589
35
- comfy_env-0.0.8.dist-info/METADATA,sha256=mir9pZymbCPcnI6yc1YrjxyMMnX3qm3pUR7NILB5iCE,5371
36
- comfy_env-0.0.8.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
37
- comfy_env-0.0.8.dist-info/entry_points.txt,sha256=J4fXeqgxU_YenuW_Zxn_pEL7J-3R0--b6MS5t0QmAr0,49
38
- comfy_env-0.0.8.dist-info/licenses/LICENSE,sha256=E68QZMMpW4P2YKstTZ3QU54HRQO8ecew09XZ4_Vn870,1093
39
- comfy_env-0.0.8.dist-info/RECORD,,
34
+ comfy_env/wheel_sources.yml,sha256=ubVuQllCQGkZhLNQaG54divCwn0zLzYg4turzhnIZQ8,7150
35
+ comfy_env-0.0.14.dist-info/METADATA,sha256=DAAZ96UOmA7GyMRPkMBNkYBjICnR8cBkUEKvA9mledo,5399
36
+ comfy_env-0.0.14.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
37
+ comfy_env-0.0.14.dist-info/entry_points.txt,sha256=J4fXeqgxU_YenuW_Zxn_pEL7J-3R0--b6MS5t0QmAr0,49
38
+ comfy_env-0.0.14.dist-info/licenses/LICENSE,sha256=E68QZMMpW4P2YKstTZ3QU54HRQO8ecew09XZ4_Vn870,1093
39
+ comfy_env-0.0.14.dist-info/RECORD,,