comfyui-workflow-templates 0.1.28__py3-none-any.whl → 0.1.30__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comfyui_workflow_templates/templates/api_recraft_image_gen_with_color_control.json +184 -175
- comfyui_workflow_templates/templates/flux_kontext_dev_basic-1.webp +0 -0
- comfyui_workflow_templates/templates/flux_kontext_dev_basic-2.webp +0 -0
- comfyui_workflow_templates/templates/flux_kontext_dev_basic.json +1114 -0
- comfyui_workflow_templates/templates/flux_kontext_dev_grouped-1.webp +0 -0
- comfyui_workflow_templates/templates/flux_kontext_dev_grouped-2.webp +0 -0
- comfyui_workflow_templates/templates/flux_kontext_dev_grouped.json +2296 -0
- comfyui_workflow_templates/templates/image_cosmos_predict2_2B_t2i-1.webp +0 -0
- comfyui_workflow_templates/templates/image_cosmos_predict2_2B_t2i.json +544 -0
- comfyui_workflow_templates/templates/index.json +111 -79
- comfyui_workflow_templates/templates/video_cosmos_predict2_2B_video2world_480p_16fps-1.webp +0 -0
- comfyui_workflow_templates/templates/video_cosmos_predict2_2B_video2world_480p_16fps.json +724 -0
- {comfyui_workflow_templates-0.1.28.dist-info → comfyui_workflow_templates-0.1.30.dist-info}/METADATA +1 -1
- {comfyui_workflow_templates-0.1.28.dist-info → comfyui_workflow_templates-0.1.30.dist-info}/RECORD +17 -7
- {comfyui_workflow_templates-0.1.28.dist-info → comfyui_workflow_templates-0.1.30.dist-info}/WHEEL +0 -0
- {comfyui_workflow_templates-0.1.28.dist-info → comfyui_workflow_templates-0.1.30.dist-info}/licenses/LICENSE +0 -0
- {comfyui_workflow_templates-0.1.28.dist-info → comfyui_workflow_templates-0.1.30.dist-info}/top_level.txt +0 -0
@@ -9,7 +9,7 @@
|
|
9
9
|
"title": "Image Generation",
|
10
10
|
"mediaType": "image",
|
11
11
|
"mediaSubtype": "webp",
|
12
|
-
"description": "Generate images from text
|
12
|
+
"description": "Generate images from text prompts."
|
13
13
|
},
|
14
14
|
{
|
15
15
|
"name": "image2image",
|
@@ -24,7 +24,7 @@
|
|
24
24
|
"title": "Lora",
|
25
25
|
"mediaType": "image",
|
26
26
|
"mediaSubtype": "webp",
|
27
|
-
"description": "
|
27
|
+
"description": "Generate images with LoRA models for specialized styles or subjects.",
|
28
28
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/lora/"
|
29
29
|
},
|
30
30
|
{
|
@@ -32,7 +32,7 @@
|
|
32
32
|
"title": "Lora Multiple",
|
33
33
|
"mediaType": "image",
|
34
34
|
"mediaSubtype": "webp",
|
35
|
-
"description": "
|
35
|
+
"description": "Generate images by combining multiple LoRA models.",
|
36
36
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/lora/"
|
37
37
|
},
|
38
38
|
{
|
@@ -58,7 +58,7 @@
|
|
58
58
|
"title": "Embedding",
|
59
59
|
"mediaType": "image",
|
60
60
|
"mediaSubtype": "webp",
|
61
|
-
"description": "
|
61
|
+
"description": "Generate images using textual inversion for consistent styles.",
|
62
62
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/"
|
63
63
|
},
|
64
64
|
{
|
@@ -66,7 +66,7 @@
|
|
66
66
|
"title": "Gligen Textbox",
|
67
67
|
"mediaType": "image",
|
68
68
|
"mediaSubtype": "webp",
|
69
|
-
"description": "
|
69
|
+
"description": "Generate images with precise object placement using text boxes.",
|
70
70
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/"
|
71
71
|
}
|
72
72
|
]
|
@@ -76,6 +76,22 @@
|
|
76
76
|
"title": "Flux",
|
77
77
|
"type": "image",
|
78
78
|
"templates": [
|
79
|
+
{
|
80
|
+
"name": "flux_kontext_dev_basic",
|
81
|
+
"title": "Flux Kontext Dev(Basic)",
|
82
|
+
"mediaType": "image",
|
83
|
+
"mediaSubtype": "webp",
|
84
|
+
"thumbnailVariant": "hoverDissolve",
|
85
|
+
"description": "Edit image using Flux Kontext with full node visibility, perfect for learning the workflow."
|
86
|
+
},
|
87
|
+
{
|
88
|
+
"name": "flux_kontext_dev_grouped",
|
89
|
+
"title": "Flux Kontext Dev(Grouped)",
|
90
|
+
"mediaType": "image",
|
91
|
+
"mediaSubtype": "webp",
|
92
|
+
"thumbnailVariant": "hoverDissolve",
|
93
|
+
"description": "Streamlined version of Flux Kontext with grouped nodes for cleaner workspace."
|
94
|
+
},
|
79
95
|
{
|
80
96
|
"name": "flux_dev_checkpoint_example",
|
81
97
|
"title": "Flux Dev fp8",
|
@@ -113,7 +129,7 @@
|
|
113
129
|
"title": "Flux Inpaint",
|
114
130
|
"mediaType": "image",
|
115
131
|
"mediaSubtype": "webp",
|
116
|
-
"description": "Fill
|
132
|
+
"description": "Fill missing parts of images using Flux inpainting.",
|
117
133
|
"thumbnailVariant": "compareSlider",
|
118
134
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#fill-inpainting-model"
|
119
135
|
},
|
@@ -122,7 +138,7 @@
|
|
122
138
|
"title": "Flux Outpaint",
|
123
139
|
"mediaType": "image",
|
124
140
|
"mediaSubtype": "webp",
|
125
|
-
"description": "Extend images using Flux outpainting.",
|
141
|
+
"description": "Extend images beyond boundaries using Flux outpainting.",
|
126
142
|
"thumbnailVariant": "compareSlider",
|
127
143
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#fill-inpainting-model"
|
128
144
|
},
|
@@ -131,7 +147,7 @@
|
|
131
147
|
"title": "Flux Canny Model",
|
132
148
|
"mediaType": "image",
|
133
149
|
"mediaSubtype": "webp",
|
134
|
-
"description": "Generate images
|
150
|
+
"description": "Generate images guided by edge detection using Flux Canny.",
|
135
151
|
"thumbnailVariant": "hoverDissolve",
|
136
152
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#canny-and-depth"
|
137
153
|
},
|
@@ -140,7 +156,7 @@
|
|
140
156
|
"title": "Flux Depth Lora",
|
141
157
|
"mediaType": "image",
|
142
158
|
"mediaSubtype": "webp",
|
143
|
-
"description": "
|
159
|
+
"description": "Generate images guided by depth information using Flux LoRA.",
|
144
160
|
"thumbnailVariant": "hoverDissolve",
|
145
161
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#canny-and-depth"
|
146
162
|
},
|
@@ -149,7 +165,7 @@
|
|
149
165
|
"title": "Flux Redux Model",
|
150
166
|
"mediaType": "image",
|
151
167
|
"mediaSubtype": "webp",
|
152
|
-
"description": "
|
168
|
+
"description": "Generate images by transferring style from reference images using Flux Redux.",
|
153
169
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#redux"
|
154
170
|
}
|
155
171
|
]
|
@@ -159,6 +175,14 @@
|
|
159
175
|
"title": "Image",
|
160
176
|
"type": "image",
|
161
177
|
"templates": [
|
178
|
+
{
|
179
|
+
"name": "image_cosmos_predict2_2B_t2i",
|
180
|
+
"title": "Cosmos Predict2 2B T2I",
|
181
|
+
"mediaType": "image",
|
182
|
+
"mediaSubtype": "webp",
|
183
|
+
"description": "Generate images with Cosmos-Predict2 2B T2I, delivering physically accurate, high-fidelity, and detail-rich image generation.",
|
184
|
+
"tutorialUrl": "http://docs.comfy.org/tutorials/image/cosmos/cosmos-predict2-t2i"
|
185
|
+
},
|
162
186
|
{
|
163
187
|
"name": "image_chroma_text_to_image",
|
164
188
|
"title": "Chroma text to image",
|
@@ -200,7 +224,7 @@
|
|
200
224
|
"title": "SD3.5 Simple",
|
201
225
|
"mediaType": "image",
|
202
226
|
"mediaSubtype": "webp",
|
203
|
-
"description": "Generate images
|
227
|
+
"description": "Generate images using SD 3.5.",
|
204
228
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35"
|
205
229
|
},
|
206
230
|
{
|
@@ -208,7 +232,7 @@
|
|
208
232
|
"title": "SD3.5 Large Canny ControlNet",
|
209
233
|
"mediaType": "image",
|
210
234
|
"mediaSubtype": "webp",
|
211
|
-
"description": "
|
235
|
+
"description": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
|
212
236
|
"thumbnailVariant": "hoverDissolve",
|
213
237
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
|
214
238
|
},
|
@@ -217,7 +241,7 @@
|
|
217
241
|
"title": "SD3.5 Large Depth",
|
218
242
|
"mediaType": "image",
|
219
243
|
"mediaSubtype": "webp",
|
220
|
-
"description": "
|
244
|
+
"description": "Generate images guided by depth information using SD 3.5.",
|
221
245
|
"thumbnailVariant": "hoverDissolve",
|
222
246
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
|
223
247
|
},
|
@@ -226,7 +250,7 @@
|
|
226
250
|
"title": "SD3.5 Large Blur",
|
227
251
|
"mediaType": "image",
|
228
252
|
"mediaSubtype": "webp",
|
229
|
-
"description": "Generate images
|
253
|
+
"description": "Generate images guided by blurred reference images using SD 3.5.",
|
230
254
|
"thumbnailVariant": "hoverDissolve",
|
231
255
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
|
232
256
|
},
|
@@ -235,7 +259,7 @@
|
|
235
259
|
"title": "SDXL Simple",
|
236
260
|
"mediaType": "image",
|
237
261
|
"mediaSubtype": "webp",
|
238
|
-
"description": "
|
262
|
+
"description": "Generate high-quality images using SDXL.",
|
239
263
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
|
240
264
|
},
|
241
265
|
{
|
@@ -243,7 +267,7 @@
|
|
243
267
|
"title": "SDXL Refiner Prompt",
|
244
268
|
"mediaType": "image",
|
245
269
|
"mediaSubtype": "webp",
|
246
|
-
"description": "Enhance SDXL
|
270
|
+
"description": "Enhance SDXL images using refiner models.",
|
247
271
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
|
248
272
|
},
|
249
273
|
{
|
@@ -251,7 +275,7 @@
|
|
251
275
|
"title": "SDXL Revision Text Prompts",
|
252
276
|
"mediaType": "image",
|
253
277
|
"mediaSubtype": "webp",
|
254
|
-
"description": "
|
278
|
+
"description": "Generate images by transferring concepts from reference images using SDXL Revision.",
|
255
279
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
|
256
280
|
},
|
257
281
|
{
|
@@ -259,7 +283,7 @@
|
|
259
283
|
"title": "SDXL Revision Zero Positive",
|
260
284
|
"mediaType": "image",
|
261
285
|
"mediaSubtype": "webp",
|
262
|
-
"description": "
|
286
|
+
"description": "Generate images using both text prompts and reference images with SDXL Revision.",
|
263
287
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
|
264
288
|
},
|
265
289
|
{
|
@@ -267,7 +291,7 @@
|
|
267
291
|
"title": "SDXL Turbo",
|
268
292
|
"mediaType": "image",
|
269
293
|
"mediaSubtype": "webp",
|
270
|
-
"description": "Generate images in a single step
|
294
|
+
"description": "Generate images in a single step using SDXL Turbo.",
|
271
295
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/"
|
272
296
|
},
|
273
297
|
{
|
@@ -285,6 +309,14 @@
|
|
285
309
|
"title": "Video",
|
286
310
|
"type": "video",
|
287
311
|
"templates": [
|
312
|
+
{
|
313
|
+
"name": "video_cosmos_predict2_2B_video2world_480p_16fps",
|
314
|
+
"title": "Cosmos Predict2 2B Video2World 480p 16fps",
|
315
|
+
"description": "Generate videos with Cosmos-Predict2 2B Video2World, generating physically accurate, high-fidelity, and consistent video simulations.",
|
316
|
+
"mediaType": "image",
|
317
|
+
"mediaSubtype": "webp",
|
318
|
+
"tutorialUrl": "http://docs.comfy.org/tutorials/video/cosmos/cosmos-predict2-video2world"
|
319
|
+
},
|
288
320
|
{
|
289
321
|
"name": "video_wan_vace_14B_t2v",
|
290
322
|
"title": "Wan VACE Text to Video",
|
@@ -302,7 +334,7 @@
|
|
302
334
|
},{
|
303
335
|
"name": "video_wan_vace_14B_v2v",
|
304
336
|
"title": "Wan VACE Control Video",
|
305
|
-
"description": "
|
337
|
+
"description": "Generate videos by controlling input videos and reference images using Wan VACE.",
|
306
338
|
"mediaType": "image",
|
307
339
|
"mediaSubtype": "webp",
|
308
340
|
"thumbnailVariant": "compareSlider",
|
@@ -310,7 +342,7 @@
|
|
310
342
|
},{
|
311
343
|
"name": "video_wan_vace_outpainting",
|
312
344
|
"title": "Wan VACE Outpainting",
|
313
|
-
"description": "
|
345
|
+
"description": "Generate extended videos by expanding video size using Wan VACE outpainting.",
|
314
346
|
"mediaType": "image",
|
315
347
|
"mediaSubtype": "webp",
|
316
348
|
"thumbnailVariant": "compareSlider",
|
@@ -333,7 +365,7 @@
|
|
333
365
|
},{
|
334
366
|
"name": "video_wan2.1_fun_camera_v1.1_1.3B",
|
335
367
|
"title": "Wan 2.1 Fun Camera 1.3B",
|
336
|
-
"description": "
|
368
|
+
"description": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
|
337
369
|
"mediaType": "image",
|
338
370
|
"mediaSubtype": "webp"
|
339
371
|
},{
|
@@ -346,7 +378,7 @@
|
|
346
378
|
{
|
347
379
|
"name": "text_to_video_wan",
|
348
380
|
"title": "Wan 2.1 Text to Video",
|
349
|
-
"description": "
|
381
|
+
"description": "Generate videos from text prompts using Wan 2.1.",
|
350
382
|
"mediaType": "image",
|
351
383
|
"mediaSubtype": "webp",
|
352
384
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/wan/#text-to-video"
|
@@ -354,7 +386,7 @@
|
|
354
386
|
{
|
355
387
|
"name": "image_to_video_wan",
|
356
388
|
"title": "Wan 2.1 Image to Video",
|
357
|
-
"description": "
|
389
|
+
"description": "Generate videos from images using Wan 2.1.",
|
358
390
|
"mediaType": "image",
|
359
391
|
"mediaSubtype": "webp",
|
360
392
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/wan/#image-to-video"
|
@@ -362,7 +394,7 @@
|
|
362
394
|
{
|
363
395
|
"name": "wan2.1_fun_inp",
|
364
396
|
"title": "Wan 2.1 Inpainting",
|
365
|
-
"description": "
|
397
|
+
"description": "Generate videos from start and end frames using Wan 2.1 inpainting.",
|
366
398
|
"mediaType": "image",
|
367
399
|
"mediaSubtype": "webp",
|
368
400
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp"
|
@@ -370,7 +402,7 @@
|
|
370
402
|
{
|
371
403
|
"name": "wan2.1_fun_control",
|
372
404
|
"title": "Wan 2.1 ControlNet",
|
373
|
-
"description": "
|
405
|
+
"description": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
|
374
406
|
"mediaType": "image",
|
375
407
|
"mediaSubtype": "webp",
|
376
408
|
"thumbnailVariant": "hoverDissolve",
|
@@ -379,7 +411,7 @@
|
|
379
411
|
{
|
380
412
|
"name": "wan2.1_flf2v_720_f16",
|
381
413
|
"title": "Wan 2.1 FLF2V 720p F16",
|
382
|
-
"description": "Generate
|
414
|
+
"description": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
|
383
415
|
"mediaType": "image",
|
384
416
|
"mediaSubtype": "webp",
|
385
417
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf"
|
@@ -389,7 +421,7 @@
|
|
389
421
|
"title": "LTXV Text to Video",
|
390
422
|
"mediaType": "image",
|
391
423
|
"mediaSubtype": "webp",
|
392
|
-
"description": "Generate videos from text
|
424
|
+
"description": "Generate videos from text prompts.",
|
393
425
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/ltxv/#text-to-video"
|
394
426
|
},
|
395
427
|
{
|
@@ -397,7 +429,7 @@
|
|
397
429
|
"title": "LTXV Image to Video",
|
398
430
|
"mediaType": "image",
|
399
431
|
"mediaSubtype": "webp",
|
400
|
-
"description": "
|
432
|
+
"description": "Generate videos from still images.",
|
401
433
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/ltxv/#image-to-video"
|
402
434
|
},
|
403
435
|
{
|
@@ -405,7 +437,7 @@
|
|
405
437
|
"title": "Mochi Text to Video",
|
406
438
|
"mediaType": "image",
|
407
439
|
"mediaSubtype": "webp",
|
408
|
-
"description": "
|
440
|
+
"description": "Generate videos from text prompts using Mochi model.",
|
409
441
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/"
|
410
442
|
},
|
411
443
|
{
|
@@ -413,7 +445,7 @@
|
|
413
445
|
"title": "Hunyuan Video Text to Video",
|
414
446
|
"mediaType": "image",
|
415
447
|
"mediaSubtype": "webp",
|
416
|
-
"description": "Generate videos using Hunyuan model.",
|
448
|
+
"description": "Generate videos from text prompts using Hunyuan model.",
|
417
449
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/"
|
418
450
|
},
|
419
451
|
{
|
@@ -421,7 +453,7 @@
|
|
421
453
|
"title": "SVD Image to Video",
|
422
454
|
"mediaType": "image",
|
423
455
|
"mediaSubtype": "webp",
|
424
|
-
"description": "
|
456
|
+
"description": "Generate videos from still images.",
|
425
457
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video"
|
426
458
|
},
|
427
459
|
{
|
@@ -429,7 +461,7 @@
|
|
429
461
|
"title": "SVD Text to Image to Video",
|
430
462
|
"mediaType": "image",
|
431
463
|
"mediaSubtype": "webp",
|
432
|
-
"description": "Generate
|
464
|
+
"description": "Generate videos by first creating images from text prompts.",
|
433
465
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video"
|
434
466
|
}
|
435
467
|
]
|
@@ -466,7 +498,7 @@
|
|
466
498
|
{
|
467
499
|
"name": "api_bfl_flux_pro_t2i",
|
468
500
|
"title": "BFL Flux[Pro]: Text to Image",
|
469
|
-
"description": "
|
501
|
+
"description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
|
470
502
|
"mediaType": "image",
|
471
503
|
"mediaSubtype": "webp"
|
472
504
|
},
|
@@ -481,7 +513,7 @@
|
|
481
513
|
{
|
482
514
|
"name": "api_luma_photon_style_ref",
|
483
515
|
"title": "Luma Photon: Style Reference",
|
484
|
-
"description": "
|
516
|
+
"description": "Generate images by blending style references with precise control using Luma Photon.",
|
485
517
|
"mediaType": "image",
|
486
518
|
"mediaSubtype": "webp",
|
487
519
|
"thumbnailVariant": "compareSlider"
|
@@ -489,7 +521,7 @@
|
|
489
521
|
{
|
490
522
|
"name": "api_recraft_image_gen_with_color_control",
|
491
523
|
"title": "Recraft: Color Control Image Generation",
|
492
|
-
"description": "
|
524
|
+
"description": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
|
493
525
|
"mediaType": "image",
|
494
526
|
"mediaSubtype": "webp"
|
495
527
|
},
|
@@ -503,13 +535,13 @@
|
|
503
535
|
{
|
504
536
|
"name": "api_recraft_vector_gen",
|
505
537
|
"title": "Recraft: Vector Generation",
|
506
|
-
"description": "
|
538
|
+
"description": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
|
507
539
|
"mediaType": "image",
|
508
540
|
"mediaSubtype": "webp"
|
509
541
|
},{
|
510
542
|
"name": "api_runway_text_to_image",
|
511
543
|
"title": "Runway: Text to Image",
|
512
|
-
"description": "
|
544
|
+
"description": "Generate high-quality images from text prompts using Runway's AI model.",
|
513
545
|
"mediaType": "image",
|
514
546
|
"mediaSubtype": "webp"
|
515
547
|
},
|
@@ -531,7 +563,7 @@
|
|
531
563
|
{
|
532
564
|
"name": "api_stability_ai_i2i",
|
533
565
|
"title": "Stability AI: Image to Image",
|
534
|
-
"description": "Transform
|
566
|
+
"description": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
|
535
567
|
"mediaType": "image",
|
536
568
|
"thumbnailVariant": "compareSlider",
|
537
569
|
"mediaSubtype": "webp"
|
@@ -554,14 +586,14 @@
|
|
554
586
|
{
|
555
587
|
"name": "api_ideogram_v3_t2i",
|
556
588
|
"title": "Ideogram V3: Text to Image",
|
557
|
-
"description": "Generate images with
|
589
|
+
"description": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
|
558
590
|
"mediaType": "image",
|
559
591
|
"mediaSubtype": "webp"
|
560
592
|
},
|
561
593
|
{
|
562
594
|
"name": "api_openai_image_1_t2i",
|
563
595
|
"title": "OpenAI: GPT-Image-1 Text to Image",
|
564
|
-
"description": "
|
596
|
+
"description": "Generate images from text prompts using OpenAI GPT Image 1 API.",
|
565
597
|
"mediaType": "image",
|
566
598
|
"mediaSubtype": "webp",
|
567
599
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
|
@@ -569,7 +601,7 @@
|
|
569
601
|
{
|
570
602
|
"name": "api_openai_image_1_i2i",
|
571
603
|
"title": "OpenAI: GPT-Image-1 Image to Image",
|
572
|
-
"description": "
|
604
|
+
"description": "Generate images from input images using OpenAI GPT Image 1 API.",
|
573
605
|
"mediaType": "image",
|
574
606
|
"mediaSubtype": "webp",
|
575
607
|
"thumbnailVariant": "compareSlider",
|
@@ -578,7 +610,7 @@
|
|
578
610
|
{
|
579
611
|
"name": "api_openai_image_1_inpaint",
|
580
612
|
"title": "OpenAI: GPT-Image-1 Inpaint",
|
581
|
-
"description": "
|
613
|
+
"description": "Edit images using inpainting with OpenAI GPT Image 1 API.",
|
582
614
|
"mediaType": "image",
|
583
615
|
"mediaSubtype": "webp",
|
584
616
|
"thumbnailVariant": "compareSlider",
|
@@ -587,7 +619,7 @@
|
|
587
619
|
{
|
588
620
|
"name": "api_openai_image_1_multi_inputs",
|
589
621
|
"title": "OpenAI: GPT-Image-1 Multi Inputs",
|
590
|
-
"description": "
|
622
|
+
"description": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
|
591
623
|
"mediaType": "image",
|
592
624
|
"mediaSubtype": "webp",
|
593
625
|
"thumbnailVariant": "compareSlider",
|
@@ -596,7 +628,7 @@
|
|
596
628
|
{
|
597
629
|
"name": "api_openai_dall_e_2_t2i",
|
598
630
|
"title": "OpenAI: Dall-E 2 Text to Image",
|
599
|
-
"description": "
|
631
|
+
"description": "Generate images from text prompts using OpenAI Dall-E 2 API.",
|
600
632
|
"mediaType": "image",
|
601
633
|
"mediaSubtype": "webp",
|
602
634
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
|
@@ -604,7 +636,7 @@
|
|
604
636
|
{
|
605
637
|
"name": "api_openai_dall_e_2_inpaint",
|
606
638
|
"title": "OpenAI: Dall-E 2 Inpaint",
|
607
|
-
"description": "
|
639
|
+
"description": "Edit images using inpainting with OpenAI Dall-E 2 API.",
|
608
640
|
"mediaType": "image",
|
609
641
|
"mediaSubtype": "webp",
|
610
642
|
"thumbnailVariant": "compareSlider",
|
@@ -613,7 +645,7 @@
|
|
613
645
|
{
|
614
646
|
"name": "api_openai_dall_e_3_t2i",
|
615
647
|
"title": "OpenAI: Dall-E 3 Text to Image",
|
616
|
-
"description": "
|
648
|
+
"description": "Generate images from text prompts using OpenAI Dall-E 3 API.",
|
617
649
|
"mediaType": "image",
|
618
650
|
"mediaSubtype": "webp",
|
619
651
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3"
|
@@ -628,14 +660,14 @@
|
|
628
660
|
{
|
629
661
|
"name": "api_kling_i2v",
|
630
662
|
"title": "Kling: Image to Video",
|
631
|
-
"description": "
|
663
|
+
"description": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
|
632
664
|
"mediaType": "image",
|
633
665
|
"mediaSubtype": "webp"
|
634
666
|
},
|
635
667
|
{
|
636
668
|
"name": "api_kling_effects",
|
637
669
|
"title": "Kling: Video Effects",
|
638
|
-
"description": "
|
670
|
+
"description": "Generate dynamic videos by applying visual effects to images using Kling.",
|
639
671
|
"mediaType": "image",
|
640
672
|
"mediaSubtype": "webp"
|
641
673
|
},
|
@@ -670,21 +702,21 @@
|
|
670
702
|
{
|
671
703
|
"name": "api_hailuo_minimax_i2v",
|
672
704
|
"title": "MiniMax: Image to Video",
|
673
|
-
"description": "
|
705
|
+
"description": "Generate refined videos from images and text with CGI integration using MiniMax.",
|
674
706
|
"mediaType": "image",
|
675
707
|
"mediaSubtype": "webp"
|
676
708
|
},
|
677
709
|
{
|
678
710
|
"name": "api_pixverse_i2v",
|
679
711
|
"title": "PixVerse: Image to Video",
|
680
|
-
"description": "
|
712
|
+
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
681
713
|
"mediaType": "image",
|
682
714
|
"mediaSubtype": "webp"
|
683
715
|
},
|
684
716
|
{
|
685
717
|
"name": "api_pixverse_template_i2v",
|
686
718
|
"title": "PixVerse Templates: Image to Video",
|
687
|
-
"description": "
|
719
|
+
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
688
720
|
"mediaType": "image",
|
689
721
|
"mediaSubtype": "webp"
|
690
722
|
},
|
@@ -698,14 +730,14 @@
|
|
698
730
|
{
|
699
731
|
"name": "api_runway_gen3a_turbo_image_to_video",
|
700
732
|
"title": "Runway: Gen3a Turbo Image to Video",
|
701
|
-
"description": "
|
733
|
+
"description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
|
702
734
|
"mediaType": "image",
|
703
735
|
"mediaSubtype": "webp"
|
704
736
|
},
|
705
737
|
{
|
706
738
|
"name": "api_runway_gen4_turo_image_to_video",
|
707
739
|
"title": "Runway: Gen4 Turbo Image to Video",
|
708
|
-
"description": "
|
740
|
+
"description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
|
709
741
|
"mediaType": "image",
|
710
742
|
"mediaSubtype": "webp"
|
711
743
|
},
|
@@ -719,21 +751,21 @@
|
|
719
751
|
{
|
720
752
|
"name": "api_pika_i2v",
|
721
753
|
"title": "Pika: Image to Video",
|
722
|
-
"description": "
|
754
|
+
"description": "Generate smooth animated videos from single static images using Pika AI.",
|
723
755
|
"mediaType": "image",
|
724
756
|
"mediaSubtype": "webp"
|
725
757
|
},
|
726
758
|
{
|
727
759
|
"name": "api_pika_scene",
|
728
760
|
"title": "Pika Scenes: Images to Video",
|
729
|
-
"description": "
|
761
|
+
"description": "Generate videos that incorporate multiple input images using Pika Scenes.",
|
730
762
|
"mediaType": "image",
|
731
763
|
"mediaSubtype": "webp"
|
732
764
|
},
|
733
765
|
{
|
734
766
|
"name": "api_veo2_i2v",
|
735
767
|
"title": "Veo2: Image to Video",
|
736
|
-
"description": "
|
768
|
+
"description": "Generate videos from images using Google Veo2 API.",
|
737
769
|
"mediaType": "image",
|
738
770
|
"mediaSubtype": "webp"
|
739
771
|
}
|
@@ -747,7 +779,7 @@
|
|
747
779
|
{
|
748
780
|
"name": "api_rodin_image_to_model",
|
749
781
|
"title": "Rodin: Image to Model",
|
750
|
-
"description": "
|
782
|
+
"description": "Generate detailed 3D models from single photos using Rodin AI.",
|
751
783
|
"mediaType": "image",
|
752
784
|
"thumbnailVariant": "compareSlider",
|
753
785
|
"mediaSubtype": "webp"
|
@@ -770,7 +802,7 @@
|
|
770
802
|
{
|
771
803
|
"name": "api_tripo_image_to_model",
|
772
804
|
"title": "Tripo: Image to Model",
|
773
|
-
"description": "
|
805
|
+
"description": "Generate professional 3D assets from 2D images using Tripo engine.",
|
774
806
|
"mediaType": "image",
|
775
807
|
"thumbnailVariant": "compareSlider",
|
776
808
|
"mediaSubtype": "webp"
|
@@ -816,7 +848,7 @@
|
|
816
848
|
"title": "Upscale",
|
817
849
|
"mediaType": "image",
|
818
850
|
"mediaSubtype": "webp",
|
819
|
-
"description": "
|
851
|
+
"description": "Upscale images by enhancing quality in latent space.",
|
820
852
|
"thumbnailVariant": "zoomHover",
|
821
853
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/"
|
822
854
|
},
|
@@ -825,7 +857,7 @@
|
|
825
857
|
"title": "ESRGAN",
|
826
858
|
"mediaType": "image",
|
827
859
|
"mediaSubtype": "webp",
|
828
|
-
"description": "
|
860
|
+
"description": "Upscale images using ESRGAN models to enhance quality.",
|
829
861
|
"thumbnailVariant": "zoomHover",
|
830
862
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/"
|
831
863
|
},
|
@@ -834,7 +866,7 @@
|
|
834
866
|
"title": "HiresFix ESRGAN Workflow",
|
835
867
|
"mediaType": "image",
|
836
868
|
"mediaSubtype": "webp",
|
837
|
-
"description": "
|
869
|
+
"description": "Upscale images using ESRGAN models during intermediate generation steps.",
|
838
870
|
"thumbnailVariant": "zoomHover",
|
839
871
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling"
|
840
872
|
},
|
@@ -843,7 +875,7 @@
|
|
843
875
|
"title": "Latent Upscale Different Prompt Model",
|
844
876
|
"mediaType": "image",
|
845
877
|
"mediaSubtype": "webp",
|
846
|
-
"description": "Upscale
|
878
|
+
"description": "Upscale images while changing prompts across generation passes.",
|
847
879
|
"thumbnailVariant": "zoomHover",
|
848
880
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples"
|
849
881
|
}
|
@@ -859,7 +891,7 @@
|
|
859
891
|
"title": "Scribble ControlNet",
|
860
892
|
"mediaType": "image",
|
861
893
|
"mediaSubtype": "webp",
|
862
|
-
"description": "
|
894
|
+
"description": "Generate images guided by scribble reference images using ControlNet.",
|
863
895
|
"thumbnailVariant": "hoverDissolve",
|
864
896
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/"
|
865
897
|
},
|
@@ -868,7 +900,7 @@
|
|
868
900
|
"title": "Pose ControlNet 2 Pass",
|
869
901
|
"mediaType": "image",
|
870
902
|
"mediaSubtype": "webp",
|
871
|
-
"description": "Generate images
|
903
|
+
"description": "Generate images guided by pose references using ControlNet.",
|
872
904
|
"thumbnailVariant": "hoverDissolve",
|
873
905
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet"
|
874
906
|
},
|
@@ -877,7 +909,7 @@
|
|
877
909
|
"title": "Depth ControlNet",
|
878
910
|
"mediaType": "image",
|
879
911
|
"mediaSubtype": "webp",
|
880
|
-
"description": "
|
912
|
+
"description": "Generate images guided by depth information using ControlNet.",
|
881
913
|
"thumbnailVariant": "hoverDissolve",
|
882
914
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
|
883
915
|
},
|
@@ -886,7 +918,7 @@
|
|
886
918
|
"title": "Depth T2I Adapter",
|
887
919
|
"mediaType": "image",
|
888
920
|
"mediaSubtype": "webp",
|
889
|
-
"description": "
|
921
|
+
"description": "Generate images guided by depth information using T2I adapter.",
|
890
922
|
"thumbnailVariant": "hoverDissolve",
|
891
923
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
|
892
924
|
},
|
@@ -895,7 +927,7 @@
|
|
895
927
|
"title": "Mixing ControlNets",
|
896
928
|
"mediaType": "image",
|
897
929
|
"mediaSubtype": "webp",
|
898
|
-
"description": "
|
930
|
+
"description": "Generate images by combining multiple ControlNet models.",
|
899
931
|
"thumbnailVariant": "hoverDissolve",
|
900
932
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets"
|
901
933
|
}
|
@@ -911,7 +943,7 @@
|
|
911
943
|
"title": "Area Composition",
|
912
944
|
"mediaType": "image",
|
913
945
|
"mediaSubtype": "webp",
|
914
|
-
"description": "
|
946
|
+
"description": "Generate images by controlling composition with defined areas.",
|
915
947
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/"
|
916
948
|
},
|
917
949
|
{
|
@@ -919,7 +951,7 @@
|
|
919
951
|
"title": "Area Composition Reversed",
|
920
952
|
"mediaType": "image",
|
921
953
|
"mediaSubtype": "webp",
|
922
|
-
"description": "
|
954
|
+
"description": "Generate images using reverse area composition workflow.",
|
923
955
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/"
|
924
956
|
},
|
925
957
|
{
|
@@ -927,7 +959,7 @@
|
|
927
959
|
"title": "Area Composition Square Area for Subject",
|
928
960
|
"mediaType": "image",
|
929
961
|
"mediaSubtype": "webp",
|
930
|
-
"description": "
|
962
|
+
"description": "Generate images with consistent subject placement using area composition.",
|
931
963
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition"
|
932
964
|
}
|
933
965
|
]
|
@@ -942,7 +974,7 @@
|
|
942
974
|
"title": "Hunyuan3D 2.0",
|
943
975
|
"mediaType": "image",
|
944
976
|
"mediaSubtype": "webp",
|
945
|
-
"description": "
|
977
|
+
"description": "Generate 3D models from single images using Hunyuan3D 2.0.",
|
946
978
|
"tutorialUrl": ""
|
947
979
|
},
|
948
980
|
{
|
@@ -950,7 +982,7 @@
|
|
950
982
|
"title": "Hunyuan3D 2.0 MV",
|
951
983
|
"mediaType": "image",
|
952
984
|
"mediaSubtype": "webp",
|
953
|
-
"description": "
|
985
|
+
"description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
|
954
986
|
"tutorialUrl": "",
|
955
987
|
"thumbnailVariant": "compareSlider"
|
956
988
|
},
|
@@ -959,7 +991,7 @@
|
|
959
991
|
"title": "Hunyuan3D 2.0 MV Turbo",
|
960
992
|
"mediaType": "image",
|
961
993
|
"mediaSubtype": "webp",
|
962
|
-
"description": "
|
994
|
+
"description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
|
963
995
|
"tutorialUrl": "",
|
964
996
|
"thumbnailVariant": "compareSlider"
|
965
997
|
},
|
@@ -968,7 +1000,7 @@
|
|
968
1000
|
"title": "Stable Zero123",
|
969
1001
|
"mediaType": "image",
|
970
1002
|
"mediaSubtype": "webp",
|
971
|
-
"description": "Generate 3D views from single images.",
|
1003
|
+
"description": "Generate 3D views from single images using Stable Zero123.",
|
972
1004
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/"
|
973
1005
|
}
|
974
1006
|
]
|
@@ -983,7 +1015,7 @@
|
|
983
1015
|
"title": "Stable Audio",
|
984
1016
|
"mediaType": "audio",
|
985
1017
|
"mediaSubtype": "mp3",
|
986
|
-
"description": "Generate audio from text
|
1018
|
+
"description": "Generate audio from text prompts using Stable Audio.",
|
987
1019
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/"
|
988
1020
|
},
|
989
1021
|
{
|
@@ -991,7 +1023,7 @@
|
|
991
1023
|
"title": "ACE-Step v1 Text to Instrumentals Music",
|
992
1024
|
"mediaType": "audio",
|
993
1025
|
"mediaSubtype": "mp3",
|
994
|
-
"description": "
|
1026
|
+
"description": "Generate instrumental music from text prompts using ACE-Step v1.",
|
995
1027
|
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
|
996
1028
|
},
|
997
1029
|
{
|
@@ -999,7 +1031,7 @@
|
|
999
1031
|
"title": "ACE Step v1 Text to Song",
|
1000
1032
|
"mediaType": "audio",
|
1001
1033
|
"mediaSubtype": "mp3",
|
1002
|
-
"description": "
|
1034
|
+
"description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
|
1003
1035
|
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
|
1004
1036
|
},
|
1005
1037
|
{
|
@@ -1007,7 +1039,7 @@
|
|
1007
1039
|
"title": "ACE Step v1 M2M Editing",
|
1008
1040
|
"mediaType": "audio",
|
1009
1041
|
"mediaSubtype": "mp3",
|
1010
|
-
"description": "
|
1042
|
+
"description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
|
1011
1043
|
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
|
1012
1044
|
}
|
1013
1045
|
]
|