comfy-env 0.0.66__tar.gz → 0.0.68__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {comfy_env-0.0.66 → comfy_env-0.0.68}/PKG-INFO +22 -92
  2. {comfy_env-0.0.66 → comfy_env-0.0.68}/README.md +21 -90
  3. {comfy_env-0.0.66 → comfy_env-0.0.68}/pyproject.toml +1 -5
  4. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/__init__.py +0 -2
  5. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/cli.py +1 -202
  6. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/pixi/__init__.py +3 -3
  7. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/pixi/core.py +21 -27
  8. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/workers/mp.py +60 -12
  9. comfy_env-0.0.66/src/comfy_env/pixi/registry.py +0 -130
  10. comfy_env-0.0.66/src/comfy_env/pixi/wheel_sources.yml +0 -141
  11. {comfy_env-0.0.66 → comfy_env-0.0.68}/.github/workflows/publish.yml +0 -0
  12. {comfy_env-0.0.66 → comfy_env-0.0.68}/.gitignore +0 -0
  13. {comfy_env-0.0.66 → comfy_env-0.0.68}/LICENSE +0 -0
  14. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/config/__init__.py +0 -0
  15. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/config/parser.py +0 -0
  16. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/config/types.py +0 -0
  17. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/errors.py +0 -0
  18. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/install.py +0 -0
  19. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/isolation/__init__.py +0 -0
  20. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/isolation/wrap.py +0 -0
  21. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/nodes.py +0 -0
  22. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/pixi/cuda_detection.py +0 -0
  23. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/pixi/platform/__init__.py +0 -0
  24. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/pixi/platform/base.py +0 -0
  25. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/pixi/platform/darwin.py +0 -0
  26. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/pixi/platform/linux.py +0 -0
  27. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/pixi/platform/windows.py +0 -0
  28. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/pixi/resolver.py +0 -0
  29. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/prestartup.py +0 -0
  30. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/templates/comfy-env-instructions.txt +0 -0
  31. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/templates/comfy-env.toml +0 -0
  32. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/workers/__init__.py +0 -0
  33. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/workers/base.py +0 -0
  34. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/workers/subprocess.py +0 -0
  35. {comfy_env-0.0.66 → comfy_env-0.0.68}/src/comfy_env/workers/tensor_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: comfy-env
3
- Version: 0.0.66
3
+ Version: 0.0.68
4
4
  Summary: Environment management for ComfyUI custom nodes - CUDA wheel resolution and process isolation
5
5
  Project-URL: Homepage, https://github.com/PozzettiAndrea/comfy-env
6
6
  Project-URL: Repository, https://github.com/PozzettiAndrea/comfy-env
@@ -17,7 +17,6 @@ Classifier: Programming Language :: Python :: 3.11
17
17
  Classifier: Programming Language :: Python :: 3.12
18
18
  Classifier: Programming Language :: Python :: 3.13
19
19
  Requires-Python: >=3.10
20
- Requires-Dist: pyyaml>=6.0
21
20
  Requires-Dist: tomli-w>=1.0.0
22
21
  Requires-Dist: tomli>=2.0.0; python_version < '3.11'
23
22
  Requires-Dist: uv>=0.4.0
@@ -69,8 +68,7 @@ Create a `comfy-env.toml` in your node directory:
69
68
 
70
69
  ```toml
71
70
  [cuda]
72
- nvdiffrast = "0.4.0"
73
- pytorch3d = "0.7.9"
71
+ packages = ["nvdiffrast", "pytorch3d"]
74
72
 
75
73
  [packages]
76
74
  requirements = ["transformers>=4.56", "pillow"]
@@ -153,12 +151,6 @@ comfy-env install
153
151
  # Dry run (show what would be installed)
154
152
  comfy-env install --dry-run
155
153
 
156
- # Resolve wheel URLs without installing
157
- comfy-env resolve nvdiffrast==0.4.0
158
-
159
- # List all packages in the built-in registry
160
- comfy-env list-packages
161
-
162
154
  # Verify installation
163
155
  comfy-env doctor
164
156
  ```
@@ -168,11 +160,9 @@ comfy-env doctor
168
160
  ### Simple Format (comfy-env.toml)
169
161
 
170
162
  ```toml
171
- # CUDA packages (uses built-in registry)
163
+ # CUDA packages from https://pozzettiandrea.github.io/cuda-wheels/
172
164
  [cuda]
173
- nvdiffrast = "0.4.0"
174
- pytorch3d = "0.7.9"
175
- torch-scatter = "2.1.2"
165
+ packages = ["nvdiffrast", "pytorch3d", "torch-scatter"]
176
166
 
177
167
  # Regular pip packages
178
168
  [packages]
@@ -186,7 +176,7 @@ requirements = ["transformers>=4.56", "pillow"]
186
176
  linux = ["libgl1", "libopengl0"] # apt packages
187
177
 
188
178
  [local.cuda]
189
- nvdiffrast = "0.4.0"
179
+ packages = ["nvdiffrast"]
190
180
 
191
181
  [local.packages]
192
182
  requirements = ["pillow", "numpy"]
@@ -197,82 +187,32 @@ python = "3.10"
197
187
  cuda = "12.8"
198
188
 
199
189
  [myenv.cuda]
200
- torch-scatter = "2.1.2"
190
+ packages = ["torch-scatter"]
201
191
 
202
192
  [myenv.packages]
203
193
  requirements = ["transformers>=4.56"]
204
-
205
- # Custom wheel templates (override built-in registry)
206
- [wheel_sources]
207
- my-custom-pkg = "https://my-server.com/my-pkg-{version}+cu{cuda_short}-{py_tag}-{platform}.whl"
208
- ```
209
-
210
- ## Writing Wheel Templates
211
-
212
- ### Template Variables
213
-
214
- | Variable | Example | Description |
215
- |----------|---------|-------------|
216
- | `{version}` | `0.4.0` | Package version |
217
- | `{cuda_version}` | `12.8` | Full CUDA version |
218
- | `{cuda_short}` | `128` | CUDA without dot |
219
- | `{cuda_major}` | `12` | CUDA major only |
220
- | `{torch_version}` | `2.8.0` | Full PyTorch version |
221
- | `{torch_mm}` | `28` | PyTorch major.minor no dot |
222
- | `{torch_dotted_mm}` | `2.8` | PyTorch major.minor with dot |
223
- | `{py_version}` | `3.10` | Python version |
224
- | `{py_short}` | `310` | Python without dot |
225
- | `{py_tag}` | `cp310` | Python wheel tag |
226
- | `{platform}` | `linux_x86_64` | Platform tag |
227
-
228
- ### Common Wheel URL Patterns
229
-
230
- **Pattern 1: Simple CUDA + Python**
231
- ```
232
- https://example.com/{package}-{version}+cu{cuda_short}-{py_tag}-{py_tag}-{platform}.whl
233
- ```
234
-
235
- **Pattern 2: CUDA + PyTorch**
236
- ```
237
- https://example.com/{package}-{version}+cu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl
238
- ```
239
-
240
- **Pattern 3: GitHub Releases**
241
- ```
242
- https://github.com/org/repo/releases/download/v{version}/{package}-{version}+cu{cuda_short}-{py_tag}-{platform}.whl
243
- ```
244
-
245
- ### How to Find the Right Template
246
-
247
- 1. Download a wheel manually from the source
248
- 2. Look at the filename pattern: `nvdiffrast-0.4.0+cu128torch28-cp310-cp310-linux_x86_64.whl`
249
- 3. Replace values with variables: `nvdiffrast-{version}+cu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl`
250
- 4. Prepend the base URL
251
-
252
- ### Testing Your Template
253
-
254
- ```bash
255
- comfy-env resolve my-package==1.0.0
256
194
  ```
257
195
 
258
- This shows the resolved URL without installing.
196
+ ## CUDA Wheels Index
259
197
 
260
- ### Adding Custom Wheel Sources
198
+ CUDA packages are installed from the [cuda-wheels](https://pozzettiandrea.github.io/cuda-wheels/) index, which provides pre-built wheels for:
261
199
 
262
- If a package isn't in the built-in registry, add it to your `comfy-env.toml`:
200
+ - **PyTorch Geometric**: torch-scatter, torch-cluster, torch-sparse, torch-spline-conv
201
+ - **NVIDIA**: nvdiffrast, pytorch3d, gsplat
202
+ - **Attention**: flash-attn, sageattention
203
+ - **Mesh Processing**: cumesh, cubvh
204
+ - **Others**: spconv, detectron2, lietorch, and more
263
205
 
264
- ```toml
265
- [cuda]
266
- my-custom-pkg = "1.0.0"
206
+ Wheels are automatically selected based on your GPU, CUDA version, PyTorch version, and Python version.
267
207
 
268
- [wheel_sources]
269
- my-custom-pkg = "https://my-server.com/my-custom-pkg-{version}+cu{cuda_short}-{py_tag}-{platform}.whl"
270
- ```
208
+ ### Supported Configurations
271
209
 
272
- Resolution order:
273
- 1. User's `[wheel_sources]` in comfy-env.toml (highest priority)
274
- 2. Built-in `wheel_sources.yml` registry
275
- 3. Error if not found
210
+ | GPU Architecture | CUDA | PyTorch |
211
+ |-----------------|------|---------|
212
+ | Blackwell (sm_100+) | 12.8 | 2.8+ |
213
+ | Ada/Hopper/Ampere (sm_80+) | 12.8 | 2.8 |
214
+ | Turing (sm_75) | 12.8 | 2.8 |
215
+ | Pascal (sm_60) | 12.4 | 2.4 |
276
216
 
277
217
  ## API Reference
278
218
 
@@ -300,7 +240,7 @@ env = RuntimeEnv.detect()
300
240
  print(env)
301
241
  # Python 3.10, CUDA 12.8, PyTorch 2.8.0, GPU: NVIDIA GeForce RTX 4090
302
242
 
303
- # Get template variables
243
+ # Get environment variables
304
244
  vars_dict = env.as_dict()
305
245
  # {'cuda_version': '12.8', 'cuda_short': '128', 'torch_mm': '28', ...}
306
246
  ```
@@ -345,16 +285,6 @@ print(get_gpu_summary())
345
285
  # GPU 0: NVIDIA GeForce RTX 5090 (sm_120) [Blackwell - CUDA 12.8]
346
286
  ```
347
287
 
348
- ## Built-in Package Registry
349
-
350
- Run `comfy-env list-packages` to see all packages in the built-in registry.
351
-
352
- The registry includes:
353
- - PyTorch Geometric packages (torch-scatter, torch-cluster, torch-sparse)
354
- - NVIDIA packages (nvdiffrast, pytorch3d, gsplat)
355
- - Flash Attention (flash-attn)
356
- - And more
357
-
358
288
  ## License
359
289
 
360
290
  MIT - see LICENSE file.
@@ -40,8 +40,7 @@ Create a `comfy-env.toml` in your node directory:
40
40
 
41
41
  ```toml
42
42
  [cuda]
43
- nvdiffrast = "0.4.0"
44
- pytorch3d = "0.7.9"
43
+ packages = ["nvdiffrast", "pytorch3d"]
45
44
 
46
45
  [packages]
47
46
  requirements = ["transformers>=4.56", "pillow"]
@@ -124,12 +123,6 @@ comfy-env install
124
123
  # Dry run (show what would be installed)
125
124
  comfy-env install --dry-run
126
125
 
127
- # Resolve wheel URLs without installing
128
- comfy-env resolve nvdiffrast==0.4.0
129
-
130
- # List all packages in the built-in registry
131
- comfy-env list-packages
132
-
133
126
  # Verify installation
134
127
  comfy-env doctor
135
128
  ```
@@ -139,11 +132,9 @@ comfy-env doctor
139
132
  ### Simple Format (comfy-env.toml)
140
133
 
141
134
  ```toml
142
- # CUDA packages (uses built-in registry)
135
+ # CUDA packages from https://pozzettiandrea.github.io/cuda-wheels/
143
136
  [cuda]
144
- nvdiffrast = "0.4.0"
145
- pytorch3d = "0.7.9"
146
- torch-scatter = "2.1.2"
137
+ packages = ["nvdiffrast", "pytorch3d", "torch-scatter"]
147
138
 
148
139
  # Regular pip packages
149
140
  [packages]
@@ -157,7 +148,7 @@ requirements = ["transformers>=4.56", "pillow"]
157
148
  linux = ["libgl1", "libopengl0"] # apt packages
158
149
 
159
150
  [local.cuda]
160
- nvdiffrast = "0.4.0"
151
+ packages = ["nvdiffrast"]
161
152
 
162
153
  [local.packages]
163
154
  requirements = ["pillow", "numpy"]
@@ -168,82 +159,32 @@ python = "3.10"
168
159
  cuda = "12.8"
169
160
 
170
161
  [myenv.cuda]
171
- torch-scatter = "2.1.2"
162
+ packages = ["torch-scatter"]
172
163
 
173
164
  [myenv.packages]
174
165
  requirements = ["transformers>=4.56"]
175
-
176
- # Custom wheel templates (override built-in registry)
177
- [wheel_sources]
178
- my-custom-pkg = "https://my-server.com/my-pkg-{version}+cu{cuda_short}-{py_tag}-{platform}.whl"
179
- ```
180
-
181
- ## Writing Wheel Templates
182
-
183
- ### Template Variables
184
-
185
- | Variable | Example | Description |
186
- |----------|---------|-------------|
187
- | `{version}` | `0.4.0` | Package version |
188
- | `{cuda_version}` | `12.8` | Full CUDA version |
189
- | `{cuda_short}` | `128` | CUDA without dot |
190
- | `{cuda_major}` | `12` | CUDA major only |
191
- | `{torch_version}` | `2.8.0` | Full PyTorch version |
192
- | `{torch_mm}` | `28` | PyTorch major.minor no dot |
193
- | `{torch_dotted_mm}` | `2.8` | PyTorch major.minor with dot |
194
- | `{py_version}` | `3.10` | Python version |
195
- | `{py_short}` | `310` | Python without dot |
196
- | `{py_tag}` | `cp310` | Python wheel tag |
197
- | `{platform}` | `linux_x86_64` | Platform tag |
198
-
199
- ### Common Wheel URL Patterns
200
-
201
- **Pattern 1: Simple CUDA + Python**
202
- ```
203
- https://example.com/{package}-{version}+cu{cuda_short}-{py_tag}-{py_tag}-{platform}.whl
204
- ```
205
-
206
- **Pattern 2: CUDA + PyTorch**
207
- ```
208
- https://example.com/{package}-{version}+cu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl
209
- ```
210
-
211
- **Pattern 3: GitHub Releases**
212
- ```
213
- https://github.com/org/repo/releases/download/v{version}/{package}-{version}+cu{cuda_short}-{py_tag}-{platform}.whl
214
- ```
215
-
216
- ### How to Find the Right Template
217
-
218
- 1. Download a wheel manually from the source
219
- 2. Look at the filename pattern: `nvdiffrast-0.4.0+cu128torch28-cp310-cp310-linux_x86_64.whl`
220
- 3. Replace values with variables: `nvdiffrast-{version}+cu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl`
221
- 4. Prepend the base URL
222
-
223
- ### Testing Your Template
224
-
225
- ```bash
226
- comfy-env resolve my-package==1.0.0
227
166
  ```
228
167
 
229
- This shows the resolved URL without installing.
168
+ ## CUDA Wheels Index
230
169
 
231
- ### Adding Custom Wheel Sources
170
+ CUDA packages are installed from the [cuda-wheels](https://pozzettiandrea.github.io/cuda-wheels/) index, which provides pre-built wheels for:
232
171
 
233
- If a package isn't in the built-in registry, add it to your `comfy-env.toml`:
172
+ - **PyTorch Geometric**: torch-scatter, torch-cluster, torch-sparse, torch-spline-conv
173
+ - **NVIDIA**: nvdiffrast, pytorch3d, gsplat
174
+ - **Attention**: flash-attn, sageattention
175
+ - **Mesh Processing**: cumesh, cubvh
176
+ - **Others**: spconv, detectron2, lietorch, and more
234
177
 
235
- ```toml
236
- [cuda]
237
- my-custom-pkg = "1.0.0"
178
+ Wheels are automatically selected based on your GPU, CUDA version, PyTorch version, and Python version.
238
179
 
239
- [wheel_sources]
240
- my-custom-pkg = "https://my-server.com/my-custom-pkg-{version}+cu{cuda_short}-{py_tag}-{platform}.whl"
241
- ```
180
+ ### Supported Configurations
242
181
 
243
- Resolution order:
244
- 1. User's `[wheel_sources]` in comfy-env.toml (highest priority)
245
- 2. Built-in `wheel_sources.yml` registry
246
- 3. Error if not found
182
+ | GPU Architecture | CUDA | PyTorch |
183
+ |-----------------|------|---------|
184
+ | Blackwell (sm_100+) | 12.8 | 2.8+ |
185
+ | Ada/Hopper/Ampere (sm_80+) | 12.8 | 2.8 |
186
+ | Turing (sm_75) | 12.8 | 2.8 |
187
+ | Pascal (sm_60) | 12.4 | 2.4 |
247
188
 
248
189
  ## API Reference
249
190
 
@@ -271,7 +212,7 @@ env = RuntimeEnv.detect()
271
212
  print(env)
272
213
  # Python 3.10, CUDA 12.8, PyTorch 2.8.0, GPU: NVIDIA GeForce RTX 4090
273
214
 
274
- # Get template variables
215
+ # Get environment variables
275
216
  vars_dict = env.as_dict()
276
217
  # {'cuda_version': '12.8', 'cuda_short': '128', 'torch_mm': '28', ...}
277
218
  ```
@@ -316,16 +257,6 @@ print(get_gpu_summary())
316
257
  # GPU 0: NVIDIA GeForce RTX 5090 (sm_120) [Blackwell - CUDA 12.8]
317
258
  ```
318
259
 
319
- ## Built-in Package Registry
320
-
321
- Run `comfy-env list-packages` to see all packages in the built-in registry.
322
-
323
- The registry includes:
324
- - PyTorch Geometric packages (torch-scatter, torch-cluster, torch-sparse)
325
- - NVIDIA packages (nvdiffrast, pytorch3d, gsplat)
326
- - Flash Attention (flash-attn)
327
- - And more
328
-
329
260
  ## License
330
261
 
331
262
  MIT - see LICENSE file.
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "comfy-env"
3
- version = "0.0.66"
3
+ version = "0.0.68"
4
4
  description = "Environment management for ComfyUI custom nodes - CUDA wheel resolution and process isolation"
5
5
  readme = "README.md"
6
6
  license = {text = "MIT"}
@@ -22,7 +22,6 @@ dependencies = [
22
22
  "tomli>=2.0.0; python_version < '3.11'", # TOML parsing (built-in tomllib for 3.11+)
23
23
  "tomli-w>=1.0.0", # TOML writing (no stdlib equivalent)
24
24
  "uv>=0.4.0", # Fast Python package installer and venv creator
25
- "pyyaml>=6.0", # YAML parsing for wheel_sources.yml
26
25
  ]
27
26
 
28
27
  [project.optional-dependencies]
@@ -43,9 +42,6 @@ build-backend = "hatchling.build"
43
42
  [tool.hatch.build.targets.wheel]
44
43
  packages = ["src/comfy_env"]
45
44
 
46
- [tool.hatch.build.targets.wheel.force-include]
47
- "src/comfy_env/pixi/wheel_sources.yml" = "comfy_env/pixi/wheel_sources.yml"
48
-
49
45
  [tool.ruff]
50
46
  line-length = 100
51
47
  target-version = "py310"
@@ -32,7 +32,6 @@ from .pixi import (
32
32
  pixi_run,
33
33
  pixi_install,
34
34
  CUDA_WHEELS_INDEX,
35
- PACKAGE_REGISTRY,
36
35
  detect_cuda_version,
37
36
  detect_cuda_environment,
38
37
  get_recommended_cuda_version,
@@ -88,7 +87,6 @@ __all__ = [
88
87
  "pixi_run",
89
88
  "pixi_install",
90
89
  "CUDA_WHEELS_INDEX",
91
- "PACKAGE_REGISTRY",
92
90
  # CUDA detection
93
91
  "detect_cuda_version",
94
92
  "detect_cuda_environment",
@@ -6,9 +6,7 @@ Provides the `comfy-env` command with subcommands:
6
6
  - generate: Generate pixi.toml from comfy-env.toml
7
7
  - install: Install dependencies from config
8
8
  - info: Show runtime environment information
9
- - resolve: Show resolved wheel URLs
10
9
  - doctor: Verify installation
11
- - list-packages: Show all packages in the built-in registry
12
10
 
13
11
  Usage:
14
12
  comfy-env init ---> creates template comfy-env.toml
@@ -18,12 +16,7 @@ Usage:
18
16
 
19
17
  comfy-env info
20
18
 
21
- comfy-env resolve nvdiffrast==0.4.0
22
- comfy-env resolve --all
23
-
24
19
  comfy-env doctor
25
-
26
- comfy-env list-packages
27
20
  """
28
21
 
29
22
  import argparse
@@ -109,28 +102,6 @@ def main(args: Optional[List[str]] = None) -> int:
109
102
  help="Output as JSON",
110
103
  )
111
104
 
112
- # resolve command
113
- resolve_parser = subparsers.add_parser(
114
- "resolve",
115
- help="Resolve wheel URLs for packages",
116
- description="Show resolved wheel URLs without installing",
117
- )
118
- resolve_parser.add_argument(
119
- "packages",
120
- nargs="*",
121
- help="Package specs (e.g., nvdiffrast==0.4.0)",
122
- )
123
- resolve_parser.add_argument(
124
- "--all", "-a",
125
- action="store_true",
126
- help="Resolve all packages from config",
127
- )
128
- resolve_parser.add_argument(
129
- "--config", "-c",
130
- type=str,
131
- help="Path to config file",
132
- )
133
-
134
105
  # doctor command
135
106
  doctor_parser = subparsers.add_parser(
136
107
  "doctor",
@@ -148,18 +119,6 @@ def main(args: Optional[List[str]] = None) -> int:
148
119
  help="Path to config file",
149
120
  )
150
121
 
151
- # list-packages command
152
- list_parser = subparsers.add_parser(
153
- "list-packages",
154
- help="Show all packages in the built-in registry",
155
- description="List CUDA packages that comfy-env knows how to install",
156
- )
157
- list_parser.add_argument(
158
- "--json",
159
- action="store_true",
160
- help="Output as JSON",
161
- )
162
-
163
122
  parsed = parser.parse_args(args)
164
123
 
165
124
  if parsed.command is None:
@@ -175,12 +134,8 @@ def main(args: Optional[List[str]] = None) -> int:
175
134
  return cmd_install(parsed)
176
135
  elif parsed.command == "info":
177
136
  return cmd_info(parsed)
178
- elif parsed.command == "resolve":
179
- return cmd_resolve(parsed)
180
137
  elif parsed.command == "doctor":
181
138
  return cmd_doctor(parsed)
182
- elif parsed.command == "list-packages":
183
- return cmd_list_packages(parsed)
184
139
  else:
185
140
  parser.print_help()
186
141
  return 1
@@ -206,7 +161,7 @@ cuda_version = "auto"
206
161
  pytorch_version = "auto"
207
162
 
208
163
  [environment.cuda]
209
- # CUDA packages from comfy-env registry
164
+ # CUDA packages from https://pozzettiandrea.github.io/cuda-wheels/
210
165
  # Example: nvdiffrast = "0.4.0"
211
166
 
212
167
  [environment.packages]
@@ -332,95 +287,6 @@ def cmd_info(args) -> int:
332
287
  return 0
333
288
 
334
289
 
335
- def cmd_resolve(args) -> int:
336
- """Handle resolve command."""
337
- from .pixi import RuntimeEnv, parse_wheel_requirement
338
- from .pixi import PACKAGE_REGISTRY
339
- from .pixi.registry import get_cuda_short2
340
- from .config.parser import discover_env_config, load_env_from_file
341
-
342
- env = RuntimeEnv.detect()
343
- packages = []
344
-
345
- # Get packages from args or config
346
- if args.all or (not args.packages and args.config):
347
- if args.config:
348
- config = load_env_from_file(Path(args.config))
349
- else:
350
- config = discover_env_config(Path.cwd())
351
-
352
- if config and config.no_deps_requirements:
353
- packages = config.no_deps_requirements
354
- else:
355
- print("No CUDA packages found in config", file=sys.stderr)
356
- return 1
357
- elif args.packages:
358
- packages = args.packages
359
- else:
360
- print("Specify packages or use --all with a config file", file=sys.stderr)
361
- return 1
362
-
363
- print(f"Resolving wheels for: {env}")
364
- print("=" * 60)
365
-
366
- # Build template variables
367
- vars_dict = env.as_dict()
368
- if env.cuda_version:
369
- vars_dict["cuda_short2"] = get_cuda_short2(env.cuda_version)
370
-
371
- all_ok = True
372
- for pkg_spec in packages:
373
- package, version = parse_wheel_requirement(pkg_spec)
374
- pkg_lower = package.lower()
375
-
376
- try:
377
- if pkg_lower in PACKAGE_REGISTRY:
378
- config = PACKAGE_REGISTRY[pkg_lower]
379
-
380
- if "wheel_template" in config:
381
- # Direct wheel URL template
382
- effective_version = version or config.get("default_version")
383
- if not effective_version:
384
- print(f" {package}: No version specified (no default in registry)")
385
- all_ok = False
386
- continue
387
-
388
- vars_dict["version"] = effective_version
389
- url = _substitute_template(config["wheel_template"], vars_dict)
390
- print(f" {package}=={effective_version}: resolved")
391
- print(f" {url}")
392
-
393
- elif "package_name" in config:
394
- # PyPI variant (e.g., spconv-cu124)
395
- pkg_name = _substitute_template(config["package_name"], vars_dict)
396
- pkg_spec = f"{pkg_name}=={version}" if version else pkg_name
397
- print(f" {package}: installs as {pkg_spec} from PyPI")
398
-
399
- else:
400
- print(f" {package}: no wheel_template or package_name in registry")
401
- all_ok = False
402
- else:
403
- print(f" {package}: NOT in registry")
404
- print(f" Add to [wheel_sources] in comfy-env.toml:")
405
- print(f' {package} = "https://example.com/{package}-{{version}}+cu{{cuda_short}}-{{py_tag}}-{{platform}}.whl"')
406
- all_ok = False
407
-
408
- except Exception as e:
409
- print(f" {package}: FAILED - {e}")
410
- all_ok = False
411
-
412
- return 0 if all_ok else 1
413
-
414
-
415
- def _substitute_template(template: str, vars_dict: dict) -> str:
416
- """Substitute {var} placeholders in template."""
417
- result = template
418
- for key, value in vars_dict.items():
419
- if value is not None:
420
- result = result.replace(f"{{{key}}}", str(value))
421
- return result
422
-
423
-
424
290
  def cmd_doctor(args) -> int:
425
291
  """Handle doctor command."""
426
292
  from .install import verify_installation
@@ -466,72 +332,5 @@ def cmd_doctor(args) -> int:
466
332
  return 0
467
333
 
468
334
 
469
- def cmd_list_packages(args) -> int:
470
- """Handle list-packages command."""
471
- from .pixi import PACKAGE_REGISTRY
472
-
473
- if args.json:
474
- import json
475
- result = {}
476
- for name, config in PACKAGE_REGISTRY.items():
477
- result[name] = {
478
- "description": config.get("description", ""),
479
- }
480
- if "wheel_template" in config:
481
- result[name]["wheel_template"] = config["wheel_template"]
482
- if "package_name" in config:
483
- result[name]["package_name"] = config["package_name"]
484
- if "default_version" in config:
485
- result[name]["default_version"] = config["default_version"]
486
- print(json.dumps(result, indent=2))
487
- return 0
488
-
489
- print("Built-in CUDA Package Registry")
490
- print("=" * 60)
491
- print()
492
- print("These packages can be installed by adding them to comfy-env.toml:")
493
- print()
494
- print(" [cuda]")
495
- print(' nvdiffrast = "0.4.0"')
496
- print(' torch-scatter = "2.1.2"')
497
- print()
498
- print("Or override with custom wheel source:")
499
- print()
500
- print(" [wheel_sources]")
501
- print(' nvdiffrast = "https://my-mirror.com/nvdiffrast-{version}+cu{cuda_short}-{py_tag}-{platform}.whl"')
502
- print()
503
- print("-" * 60)
504
-
505
- # Group by type
506
- wheel_template_packages = []
507
- package_name_packages = []
508
-
509
- for name, config in PACKAGE_REGISTRY.items():
510
- desc = config.get("description", "")
511
- default = config.get("default_version", "")
512
- if "wheel_template" in config:
513
- wheel_template_packages.append((name, desc, default))
514
- elif "package_name" in config:
515
- package_name_packages.append((name, desc, config["package_name"]))
516
-
517
- if wheel_template_packages:
518
- print("\nDirect wheel URL packages:")
519
- for name, desc, default in sorted(wheel_template_packages):
520
- version_info = f" (default: {default})" if default else ""
521
- print(f" {name:20} - {desc}{version_info}")
522
-
523
- if package_name_packages:
524
- print("\nPyPI variant packages:")
525
- for name, desc, pkg_template in sorted(package_name_packages):
526
- print(f" {name:20} - {desc}")
527
- print(f" installs as: {pkg_template}")
528
-
529
- print()
530
- print("Template variables: {version}, {cuda_short}, {torch_mm}, {py_tag}, {platform}")
531
- print("See README for full documentation on writing wheel templates.")
532
- print()
533
- return 0
534
-
535
-
536
335
  if __name__ == "__main__":
537
336
  sys.exit(main())
@@ -13,7 +13,6 @@ from .core import (
13
13
  clean_pixi_artifacts,
14
14
  CUDA_WHEELS_INDEX,
15
15
  )
16
- from .registry import PACKAGE_REGISTRY
17
16
  from .cuda_detection import (
18
17
  detect_cuda_version,
19
18
  detect_cuda_environment,
@@ -24,6 +23,7 @@ from .cuda_detection import (
24
23
  GPUInfo,
25
24
  CUDAEnvironment,
26
25
  )
26
+ from .resolver import RuntimeEnv
27
27
 
28
28
  __all__ = [
29
29
  # Core pixi functions
@@ -34,8 +34,6 @@ __all__ = [
34
34
  "pixi_install",
35
35
  "clean_pixi_artifacts",
36
36
  "CUDA_WHEELS_INDEX",
37
- # Registry
38
- "PACKAGE_REGISTRY",
39
37
  # CUDA detection
40
38
  "detect_cuda_version",
41
39
  "detect_cuda_environment",
@@ -45,4 +43,6 @@ __all__ = [
45
43
  "get_recommended_cuda_version",
46
44
  "GPUInfo",
47
45
  "CUDAEnvironment",
46
+ # Resolver
47
+ "RuntimeEnv",
48
48
  ]
@@ -252,24 +252,10 @@ def pixi_install(
252
252
  dependencies.setdefault("pip", "*") # Always include pip
253
253
  pixi_data["dependencies"] = dependencies
254
254
 
255
- # Add pypi-options for CUDA wheels
255
+ # Add pypi-options for PyTorch index (CUDA packages installed separately via pip)
256
256
  if cfg.has_cuda and cuda_version:
257
257
  pypi_options = pixi_data.get("pypi-options", {})
258
- # Merge find-links (pixi expects [{url: "..."}, ...] format)
259
- find_links = pypi_options.get("find-links", [])
260
- existing_urls = {
261
- entry.get("url") if isinstance(entry, dict) else entry
262
- for entry in find_links
263
- }
264
- if CUDA_WHEELS_INDEX not in existing_urls:
265
- find_links.append({"url": CUDA_WHEELS_INDEX})
266
- # Normalize any plain strings to {url: ...} format
267
- find_links = [
268
- {"url": entry} if isinstance(entry, str) else entry
269
- for entry in find_links
270
- ]
271
- pypi_options["find-links"] = find_links
272
- # Merge extra-index-urls
258
+ # Add PyTorch CUDA index for torch installation
273
259
  cuda_short = cuda_version.replace(".", "")[:3]
274
260
  pytorch_index = f"https://download.pytorch.org/whl/cu{cuda_short}"
275
261
  extra_urls = pypi_options.get("extra-index-urls", [])
@@ -311,23 +297,31 @@ def pixi_install(
311
297
  log(f"pixi install failed:\n{result.stderr}")
312
298
  raise RuntimeError(f"pixi install failed: {result.stderr}")
313
299
 
314
- # Install CUDA packages with --no-deps (avoids PyPI version conflicts)
300
+ # Install CUDA packages with --no-index --find-links (bypasses PyPI completely)
315
301
  if cfg.cuda_packages and cuda_version:
316
- log(f"Installing CUDA packages with --no-deps: {cfg.cuda_packages}")
302
+ log(f"Installing CUDA packages: {cfg.cuda_packages}")
317
303
  python_path = get_pixi_python(node_dir)
318
304
  if not python_path:
319
305
  raise RuntimeError("Could not find Python in pixi environment")
320
306
 
321
- pip_cmd = [
322
- str(python_path), "-m", "pip", "install",
323
- "--no-deps",
324
- "--find-links", CUDA_WHEELS_INDEX,
325
- ] + cfg.cuda_packages
307
+ for package in cfg.cuda_packages:
308
+ # Each package has its own find-links page at CUDA_WHEELS_INDEX/<package>/
309
+ find_links_url = f"{CUDA_WHEELS_INDEX}{package}/"
310
+ log(f" Installing {package} from {find_links_url}")
311
+
312
+ pip_cmd = [
313
+ str(python_path), "-m", "pip", "install",
314
+ "--no-index",
315
+ "--no-deps",
316
+ "--no-cache-dir",
317
+ "--find-links", find_links_url,
318
+ package,
319
+ ]
320
+ result = subprocess.run(pip_cmd, capture_output=True, text=True)
321
+ if result.returncode != 0:
322
+ log(f"CUDA package install failed for {package}:\n{result.stderr}")
323
+ raise RuntimeError(f"CUDA package install failed: {result.stderr}")
326
324
 
327
- result = subprocess.run(pip_cmd, capture_output=True, text=True)
328
- if result.returncode != 0:
329
- log(f"CUDA package install failed:\n{result.stderr}")
330
- raise RuntimeError(f"CUDA package install failed: {result.stderr}")
331
325
  log("CUDA packages installed")
332
326
 
333
327
  # Create symlink/junction to _env_<name> for discovery (only for isolated subdirs)
@@ -430,20 +430,68 @@ class MPWorker(Worker):
430
430
  return
431
431
 
432
432
  # Import torch here to avoid import at module level
433
+ import os
434
+ import sys
435
+
436
+ # Clear conda/pixi environment variables FIRST, before importing multiprocessing
437
+ # These can cause the child process to pick up the wrong Python interpreter
438
+ # or stdlib, leading to sys.version mismatch errors in platform module
439
+ conda_env_vars = [
440
+ 'CONDA_PREFIX',
441
+ 'CONDA_DEFAULT_ENV',
442
+ 'CONDA_PYTHON_EXE',
443
+ 'CONDA_EXE',
444
+ 'CONDA_SHLVL',
445
+ 'PYTHONHOME',
446
+ 'PYTHONPATH', # Also clear PYTHONPATH to prevent pixi paths
447
+ '_CE_CONDA',
448
+ '_CE_M',
449
+ ]
450
+ saved_env = {}
451
+ for var in conda_env_vars:
452
+ if var in os.environ:
453
+ saved_env[var] = os.environ.pop(var)
454
+
455
+ # Also remove pixi paths from LD_LIBRARY_PATH
456
+ ld_lib = os.environ.get('LD_LIBRARY_PATH', '')
457
+ if '.pixi' in ld_lib:
458
+ saved_env['LD_LIBRARY_PATH'] = ld_lib
459
+ # Filter out pixi paths
460
+ new_ld_lib = ':'.join(p for p in ld_lib.split(':') if '.pixi' not in p)
461
+ if new_ld_lib:
462
+ os.environ['LD_LIBRARY_PATH'] = new_ld_lib
463
+ else:
464
+ os.environ.pop('LD_LIBRARY_PATH', None)
465
+
433
466
  import torch.multiprocessing as mp
434
467
 
435
- # Use spawn to get clean subprocess (no inherited CUDA context)
436
- ctx = mp.get_context('spawn')
437
-
438
- self._queue_in = ctx.Queue()
439
- self._queue_out = ctx.Queue()
440
- self._process = ctx.Process(
441
- target=_worker_loop,
442
- args=(self._queue_in, self._queue_out, self._sys_path, self._lib_path),
443
- daemon=True,
444
- )
445
- self._process.start()
446
- self._started = True
468
+ try:
469
+ # Use spawn to get clean subprocess (no inherited CUDA context)
470
+ ctx = mp.get_context('spawn')
471
+
472
+ # Explicitly set the spawn executable to the current Python
473
+ # This prevents pixi/conda from hijacking the spawn process
474
+ import multiprocessing.spawn as mp_spawn
475
+ original_exe = mp_spawn.get_executable()
476
+ if original_exe != sys.executable.encode() and original_exe != sys.executable:
477
+ print(f"[comfy-env] Warning: spawn executable was {original_exe}, forcing to {sys.executable}")
478
+ mp_spawn.set_executable(sys.executable)
479
+
480
+ self._queue_in = ctx.Queue()
481
+ self._queue_out = ctx.Queue()
482
+ self._process = ctx.Process(
483
+ target=_worker_loop,
484
+ args=(self._queue_in, self._queue_out, self._sys_path, self._lib_path),
485
+ daemon=True,
486
+ )
487
+ self._process.start()
488
+ self._started = True
489
+
490
+ # Restore original executable setting
491
+ mp_spawn.set_executable(original_exe)
492
+ finally:
493
+ # Restore env vars in parent process
494
+ os.environ.update(saved_env)
447
495
 
448
496
  def call(
449
497
  self,
@@ -1,130 +0,0 @@
1
- """Built-in registry of CUDA packages and their wheel sources.
2
-
3
- This module loads package configurations from wheel_sources.yml and provides
4
- lookup functions for the install module.
5
-
6
- Each package has either:
7
- - wheel_template: Direct URL template for .whl file
8
- - package_name: PyPI package name template (for packages like spconv-cu124)
9
- """
10
-
11
- from pathlib import Path
12
- from typing import Any, Dict, Optional
13
-
14
- import yaml
15
-
16
-
17
- def get_cuda_short2(cuda_version: str) -> str:
18
- """Convert CUDA version to 2-3 digit format for spconv.
19
-
20
- spconv uses "cu124" not "cu1240" for CUDA 12.4.
21
-
22
- Args:
23
- cuda_version: CUDA version string (e.g., "12.4", "12.8")
24
-
25
- Returns:
26
- Short format string (e.g., "124", "128")
27
-
28
- Examples:
29
- >>> get_cuda_short2("12.4")
30
- '124'
31
- >>> get_cuda_short2("12.8")
32
- '128'
33
- """
34
- parts = cuda_version.split(".")
35
- major = parts[0]
36
- minor = parts[1] if len(parts) > 1 else "0"
37
- return f"{major}{minor}"
38
-
39
-
40
- def _load_wheel_sources() -> Dict[str, Dict[str, Any]]:
41
- """Load package registry from wheel_sources.yml."""
42
- yml_path = Path(__file__).parent / "wheel_sources.yml"
43
- with open(yml_path, "r") as f:
44
- data = yaml.safe_load(f)
45
- return data.get("packages", {})
46
-
47
-
48
- # Load registry at module import time
49
- PACKAGE_REGISTRY: Dict[str, Dict[str, Any]] = _load_wheel_sources()
50
-
51
-
52
- def get_package_info(package: str) -> Optional[Dict[str, Any]]:
53
- """Get registry info for a package.
54
-
55
- Args:
56
- package: Package name (case-insensitive)
57
-
58
- Returns:
59
- Registry entry dict or None if not found
60
- """
61
- return PACKAGE_REGISTRY.get(package.lower())
62
-
63
-
64
- def list_packages() -> Dict[str, str]:
65
- """List all registered packages with their descriptions.
66
-
67
- Returns:
68
- Dict mapping package name to description
69
- """
70
- return {
71
- name: info.get("description", "No description")
72
- for name, info in PACKAGE_REGISTRY.items()
73
- }
74
-
75
-
76
- def is_registered(package: str) -> bool:
77
- """Check if a package is in the registry.
78
-
79
- Args:
80
- package: Package name (case-insensitive)
81
-
82
- Returns:
83
- True if package is registered
84
- """
85
- return package.lower() in PACKAGE_REGISTRY
86
-
87
-
88
- def get_wheel_template(package: str) -> Optional[str]:
89
- """Get wheel_template for a package.
90
-
91
- Args:
92
- package: Package name (case-insensitive)
93
-
94
- Returns:
95
- wheel_template string or None if not found/not available
96
- """
97
- info = get_package_info(package)
98
- if info:
99
- return info.get("wheel_template")
100
- return None
101
-
102
-
103
- def get_package_name_template(package: str) -> Optional[str]:
104
- """Get package_name template for PyPI variant packages (like spconv).
105
-
106
- Args:
107
- package: Package name (case-insensitive)
108
-
109
- Returns:
110
- package_name template string or None if not found/not available
111
- """
112
- info = get_package_info(package)
113
- if info:
114
- return info.get("package_name")
115
- return None
116
-
117
-
118
- def get_default_version(package: str) -> Optional[str]:
119
- """Get default_version for a package.
120
-
121
- Args:
122
- package: Package name (case-insensitive)
123
-
124
- Returns:
125
- default_version string or None if not specified
126
- """
127
- info = get_package_info(package)
128
- if info:
129
- return info.get("default_version")
130
- return None
@@ -1,141 +0,0 @@
1
- # Wheel sources registry for CUDA packages
2
- #
3
- # Each package has a single wheel_template that resolves to a direct .whl URL.
4
- # Users can override these in their comfy-env.toml [wheel_sources] section.
5
- #
6
- # Template variables:
7
- # {version} - Package version (e.g., "0.4.0")
8
- # {cuda_version} - Full CUDA version (e.g., "12.8")
9
- # {cuda_short} - CUDA without dot (e.g., "128")
10
- # {cuda_major} - CUDA major version (e.g., "12")
11
- # {torch_version} - Full PyTorch version (e.g., "2.8.0")
12
- # {torch_short} - PyTorch without dots (e.g., "280")
13
- # {torch_mm} - PyTorch major.minor no dot (e.g., "28")
14
- # {torch_dotted_mm}- PyTorch major.minor with dot (e.g., "2.8")
15
- # {py_version} - Python version (e.g., "3.10")
16
- # {py_short} - Python without dot (e.g., "310")
17
- # {py_minor} - Python minor version only (e.g., "10")
18
- # {py_tag} - Python tag (e.g., "cp310")
19
- # {platform} - Platform tag (e.g., "linux_x86_64")
20
-
21
- packages:
22
- # ===========================================================================
23
- # PyTorch Geometric (PyG) - official wheels
24
- # https://pytorch-geometric.readthedocs.io/en/latest/install/installation.html
25
- # ===========================================================================
26
- torch-scatter:
27
- wheel_template: "https://data.pyg.org/whl/torch-{torch_version}%2Bcu{cuda_short}/torch_scatter-{version}%2Bpt{torch_mm}cu{cuda_short}-{py_tag}-{py_tag}-{platform}.whl"
28
- default_version: "2.1.2"
29
- description: Scatter operations for PyTorch
30
-
31
- torch-cluster:
32
- wheel_template: "https://data.pyg.org/whl/torch-{torch_version}%2Bcu{cuda_short}/torch_cluster-{version}%2Bpt{torch_mm}cu{cuda_short}-{py_tag}-{py_tag}-{platform}.whl"
33
- default_version: "1.6.3"
34
- description: Clustering algorithms for PyTorch
35
-
36
- torch-sparse:
37
- wheel_template: "https://data.pyg.org/whl/torch-{torch_version}%2Bcu{cuda_short}/torch_sparse-{version}%2Bpt{torch_mm}cu{cuda_short}-{py_tag}-{py_tag}-{platform}.whl"
38
- default_version: "0.6.18"
39
- description: Sparse tensor operations for PyTorch
40
-
41
- torch-spline-conv:
42
- wheel_template: "https://data.pyg.org/whl/torch-{torch_version}%2Bcu{cuda_short}/torch_spline_conv-{version}%2Bpt{torch_mm}cu{cuda_short}-{py_tag}-{py_tag}-{platform}.whl"
43
- default_version: "1.2.2"
44
- description: Spline convolutions for PyTorch
45
-
46
- # ===========================================================================
47
- # PozzettiAndrea cuda-wheels (GitHub releases)
48
- # https://github.com/PozzettiAndrea/cuda-wheels/releases
49
- # ===========================================================================
50
- pytorch3d:
51
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/pytorch3d-latest/pytorch3d-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
52
- default_version: "0.7.9"
53
- description: PyTorch3D - 3D deep learning library
54
-
55
- nvdiffrast:
56
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/nvdiffrast-latest/nvdiffrast-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
57
- default_version: "0.4.0"
58
- description: NVIDIA differentiable rasterizer
59
-
60
- cumesh:
61
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/cumesh-latest/cumesh-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
62
- default_version: "0.0.1"
63
- description: CUDA-accelerated mesh utilities
64
-
65
- cubvh:
66
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/cubvh-latest/cubvh-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
67
- default_version: "0.1.2"
68
- description: CUDA BVH acceleration for mesh operations and marching cubes
69
-
70
- o_voxel:
71
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/o_voxel-latest/o_voxel-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
72
- default_version: "0.0.1"
73
- description: O-Voxel CUDA extension for TRELLIS
74
-
75
- flex_gemm:
76
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/flex_gemm-latest/flex_gemm-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
77
- default_version: "1.0.0"
78
- description: Flexible GEMM operations
79
-
80
- nvdiffrec_render:
81
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/nvdiffrec_render-latest/nvdiffrec_render-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
82
- default_version: "0.0.1"
83
- description: NVDiffRec rendering utilities
84
-
85
- gsplat:
86
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/gsplat-latest/gsplat-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
87
- default_version: "1.5.3"
88
- description: Gaussian splatting rasterization
89
-
90
- cc_torch:
91
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/cc_torch-latest/cc_torch-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
92
- default_version: "0.2"
93
- description: GPU-accelerated connected components
94
-
95
- torch_generic_nms:
96
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/torch_generic_nms-latest/torch_generic_nms-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
97
- default_version: "0.1"
98
- description: GPU-accelerated Non-Maximum Suppression
99
-
100
- lietorch:
101
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/lietorch-latest/lietorch-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
102
- default_version: "0.3"
103
- description: Lie group operations for PyTorch (DPVO dependency)
104
-
105
- sageattention:
106
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/sageattention-latest/sageattention-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
107
- default_version: "2.2.0"
108
- description: SageAttention - 2-5x faster than FlashAttention with quantized kernels
109
-
110
- dpvo-cuda:
111
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/dpvo_cuda-latest/dpvo_cuda-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
112
- default_version: "0.0.0"
113
- description: DPVO CUDA extensions (cuda_corr, cuda_ba, lietorch_backends) - torch 2.4 only
114
-
115
- # ===========================================================================
116
- # detectron2 - Facebook's detection library
117
- # https://github.com/facebookresearch/detectron2
118
- # Prebuilt wheels from miropsota's torch_packages_builder
119
- # ===========================================================================
120
- detectron2:
121
- find_links: "https://miropsota.github.io/torch_packages_builder/detectron2/"
122
- default_version: "0.6"
123
- description: Detectron2 - Facebook's detection and segmentation library
124
-
125
- # ===========================================================================
126
- # flash-attn - Dao-AILab (Linux x86_64)
127
- # For other platforms, users should add custom wheel_source in their config
128
- # ===========================================================================
129
- flash-attn:
130
- wheel_template: "https://github.com/Dao-AILab/flash-attention/releases/download/v{version}/flash_attn-{version}%2Bcu{cuda_major}torch{torch_dotted_mm}cxx11abiTRUE-{py_tag}-{py_tag}-linux_x86_64.whl"
131
- description: Flash Attention for fast transformer inference (Linux x86_64)
132
-
133
- # ===========================================================================
134
- # spconv - Sparse convolution library
135
- # PyPI with CUDA-versioned package names (spconv-cu124, spconv-cu126, etc.)
136
- # Note: This uses a special package_name field, not wheel_template
137
- # ===========================================================================
138
- spconv:
139
- wheel_template: "https://github.com/PozzettiAndrea/cuda-wheels/releases/download/spconv_cu{cuda_short}-latest/spconv_cu{cuda_short}-{version}%2Bcu{cuda_short}torch{torch_mm}-{py_tag}-{py_tag}-{platform}.whl"
140
- default_version: "2.3.8"
141
- description: Sparse convolution library
File without changes
File without changes