comfyui-workflow-templates 0.1.28__py3-none-any.whl → 0.1.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,7 +9,7 @@
9
9
  "title": "Image Generation",
10
10
  "mediaType": "image",
11
11
  "mediaSubtype": "webp",
12
- "description": "Generate images from text descriptions."
12
+ "description": "Generate images from text prompts."
13
13
  },
14
14
  {
15
15
  "name": "image2image",
@@ -24,7 +24,7 @@
24
24
  "title": "Lora",
25
25
  "mediaType": "image",
26
26
  "mediaSubtype": "webp",
27
- "description": "Apply LoRA models for specialized styles or subjects.",
27
+ "description": "Generate images with LoRA models for specialized styles or subjects.",
28
28
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/lora/"
29
29
  },
30
30
  {
@@ -32,7 +32,7 @@
32
32
  "title": "Lora Multiple",
33
33
  "mediaType": "image",
34
34
  "mediaSubtype": "webp",
35
- "description": "Combine multiple LoRA models for unique results.",
35
+ "description": "Generate images by combining multiple LoRA models.",
36
36
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/lora/"
37
37
  },
38
38
  {
@@ -58,7 +58,7 @@
58
58
  "title": "Embedding",
59
59
  "mediaType": "image",
60
60
  "mediaSubtype": "webp",
61
- "description": "Use textual inversion for consistent styles.",
61
+ "description": "Generate images using textual inversion for consistent styles.",
62
62
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/"
63
63
  },
64
64
  {
@@ -66,7 +66,7 @@
66
66
  "title": "Gligen Textbox",
67
67
  "mediaType": "image",
68
68
  "mediaSubtype": "webp",
69
- "description": "Specify the location and size of objects.",
69
+ "description": "Generate images with precise object placement using text boxes.",
70
70
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/"
71
71
  }
72
72
  ]
@@ -113,7 +113,7 @@
113
113
  "title": "Flux Inpaint",
114
114
  "mediaType": "image",
115
115
  "mediaSubtype": "webp",
116
- "description": "Fill in missing parts of images.",
116
+ "description": "Fill missing parts of images using Flux inpainting.",
117
117
  "thumbnailVariant": "compareSlider",
118
118
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#fill-inpainting-model"
119
119
  },
@@ -122,7 +122,7 @@
122
122
  "title": "Flux Outpaint",
123
123
  "mediaType": "image",
124
124
  "mediaSubtype": "webp",
125
- "description": "Extend images using Flux outpainting.",
125
+ "description": "Extend images beyond boundaries using Flux outpainting.",
126
126
  "thumbnailVariant": "compareSlider",
127
127
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#fill-inpainting-model"
128
128
  },
@@ -131,7 +131,7 @@
131
131
  "title": "Flux Canny Model",
132
132
  "mediaType": "image",
133
133
  "mediaSubtype": "webp",
134
- "description": "Generate images from edge detection.",
134
+ "description": "Generate images guided by edge detection using Flux Canny.",
135
135
  "thumbnailVariant": "hoverDissolve",
136
136
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#canny-and-depth"
137
137
  },
@@ -140,7 +140,7 @@
140
140
  "title": "Flux Depth Lora",
141
141
  "mediaType": "image",
142
142
  "mediaSubtype": "webp",
143
- "description": "Create images with depth-aware LoRA.",
143
+ "description": "Generate images guided by depth information using Flux LoRA.",
144
144
  "thumbnailVariant": "hoverDissolve",
145
145
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#canny-and-depth"
146
146
  },
@@ -149,7 +149,7 @@
149
149
  "title": "Flux Redux Model",
150
150
  "mediaType": "image",
151
151
  "mediaSubtype": "webp",
152
- "description": "Transfer style from a reference image to guide image generation with Flux.",
152
+ "description": "Generate images by transferring style from reference images using Flux Redux.",
153
153
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#redux"
154
154
  }
155
155
  ]
@@ -159,6 +159,14 @@
159
159
  "title": "Image",
160
160
  "type": "image",
161
161
  "templates": [
162
+ {
163
+ "name": "image_cosmos_predict2_2B_t2i",
164
+ "title": "Cosmos Predict2 2B T2I",
165
+ "mediaType": "image",
166
+ "mediaSubtype": "webp",
167
+ "description": "Generate images with Cosmos-Predict2 2B T2I, delivering physically accurate, high-fidelity, and detail-rich image generation.",
168
+ "tutorialUrl": "http://docs.comfy.org/tutorials/image/cosmos/cosmos-predict2-t2i"
169
+ },
162
170
  {
163
171
  "name": "image_chroma_text_to_image",
164
172
  "title": "Chroma text to image",
@@ -200,7 +208,7 @@
200
208
  "title": "SD3.5 Simple",
201
209
  "mediaType": "image",
202
210
  "mediaSubtype": "webp",
203
- "description": "Generate images with SD 3.5.",
211
+ "description": "Generate images using SD 3.5.",
204
212
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35"
205
213
  },
206
214
  {
@@ -208,7 +216,7 @@
208
216
  "title": "SD3.5 Large Canny ControlNet",
209
217
  "mediaType": "image",
210
218
  "mediaSubtype": "webp",
211
- "description": "Use edge detection to guide image generation with SD 3.5.",
219
+ "description": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
212
220
  "thumbnailVariant": "hoverDissolve",
213
221
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
214
222
  },
@@ -217,7 +225,7 @@
217
225
  "title": "SD3.5 Large Depth",
218
226
  "mediaType": "image",
219
227
  "mediaSubtype": "webp",
220
- "description": "Create depth-aware images with SD 3.5.",
228
+ "description": "Generate images guided by depth information using SD 3.5.",
221
229
  "thumbnailVariant": "hoverDissolve",
222
230
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
223
231
  },
@@ -226,7 +234,7 @@
226
234
  "title": "SD3.5 Large Blur",
227
235
  "mediaType": "image",
228
236
  "mediaSubtype": "webp",
229
- "description": "Generate images from blurred reference images with SD 3.5.",
237
+ "description": "Generate images guided by blurred reference images using SD 3.5.",
230
238
  "thumbnailVariant": "hoverDissolve",
231
239
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
232
240
  },
@@ -235,7 +243,7 @@
235
243
  "title": "SDXL Simple",
236
244
  "mediaType": "image",
237
245
  "mediaSubtype": "webp",
238
- "description": "Create high-quality images with SDXL.",
246
+ "description": "Generate high-quality images using SDXL.",
239
247
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
240
248
  },
241
249
  {
@@ -243,7 +251,7 @@
243
251
  "title": "SDXL Refiner Prompt",
244
252
  "mediaType": "image",
245
253
  "mediaSubtype": "webp",
246
- "description": "Enhance SDXL outputs with refiners.",
254
+ "description": "Enhance SDXL images using refiner models.",
247
255
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
248
256
  },
249
257
  {
@@ -251,7 +259,7 @@
251
259
  "title": "SDXL Revision Text Prompts",
252
260
  "mediaType": "image",
253
261
  "mediaSubtype": "webp",
254
- "description": "Transfer concepts from reference images to guide image generation with SDXL.",
262
+ "description": "Generate images by transferring concepts from reference images using SDXL Revision.",
255
263
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
256
264
  },
257
265
  {
@@ -259,7 +267,7 @@
259
267
  "title": "SDXL Revision Zero Positive",
260
268
  "mediaType": "image",
261
269
  "mediaSubtype": "webp",
262
- "description": "Add text prompts alongside reference images to guide image generation with SDXL.",
270
+ "description": "Generate images using both text prompts and reference images with SDXL Revision.",
263
271
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
264
272
  },
265
273
  {
@@ -267,7 +275,7 @@
267
275
  "title": "SDXL Turbo",
268
276
  "mediaType": "image",
269
277
  "mediaSubtype": "webp",
270
- "description": "Generate images in a single step with SDXL Turbo.",
278
+ "description": "Generate images in a single step using SDXL Turbo.",
271
279
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/"
272
280
  },
273
281
  {
@@ -285,6 +293,14 @@
285
293
  "title": "Video",
286
294
  "type": "video",
287
295
  "templates": [
296
+ {
297
+ "name": "video_cosmos_predict2_2B_video2world_480p_16fps",
298
+ "title": "Cosmos Predict2 2B Video2World 480p 16fps",
299
+ "description": "Generate videos with Cosmos-Predict2 2B Video2World, generating physically accurate, high-fidelity, and consistent video simulations.",
300
+ "mediaType": "image",
301
+ "mediaSubtype": "webp",
302
+ "tutorialUrl": "http://docs.comfy.org/tutorials/video/cosmos/cosmos-predict2-video2world"
303
+ },
288
304
  {
289
305
  "name": "video_wan_vace_14B_t2v",
290
306
  "title": "Wan VACE Text to Video",
@@ -302,7 +318,7 @@
302
318
  },{
303
319
  "name": "video_wan_vace_14B_v2v",
304
320
  "title": "Wan VACE Control Video",
305
- "description": "Guide video generation using both input videos and reference images for precise content control.",
321
+ "description": "Generate videos by controlling input videos and reference images using Wan VACE.",
306
322
  "mediaType": "image",
307
323
  "mediaSubtype": "webp",
308
324
  "thumbnailVariant": "compareSlider",
@@ -310,7 +326,7 @@
310
326
  },{
311
327
  "name": "video_wan_vace_outpainting",
312
328
  "title": "Wan VACE Outpainting",
313
- "description": "Extend video boundaries while maintaining visual consistency. Ideal for aspect ratio adjustments.",
329
+ "description": "Generate extended videos by expanding video size using Wan VACE outpainting.",
314
330
  "mediaType": "image",
315
331
  "mediaSubtype": "webp",
316
332
  "thumbnailVariant": "compareSlider",
@@ -333,7 +349,7 @@
333
349
  },{
334
350
  "name": "video_wan2.1_fun_camera_v1.1_1.3B",
335
351
  "title": "Wan 2.1 Fun Camera 1.3B",
336
- "description": "Create dynamic videos with cinematic camera movements using the lightweight 1.3B model.",
352
+ "description": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
337
353
  "mediaType": "image",
338
354
  "mediaSubtype": "webp"
339
355
  },{
@@ -346,7 +362,7 @@
346
362
  {
347
363
  "name": "text_to_video_wan",
348
364
  "title": "Wan 2.1 Text to Video",
349
- "description": "Quickly Generate videos from text descriptions.",
365
+ "description": "Generate videos from text prompts using Wan 2.1.",
350
366
  "mediaType": "image",
351
367
  "mediaSubtype": "webp",
352
368
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/wan/#text-to-video"
@@ -354,7 +370,7 @@
354
370
  {
355
371
  "name": "image_to_video_wan",
356
372
  "title": "Wan 2.1 Image to Video",
357
- "description": "Quickly Generate videos from images.",
373
+ "description": "Generate videos from images using Wan 2.1.",
358
374
  "mediaType": "image",
359
375
  "mediaSubtype": "webp",
360
376
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/wan/#image-to-video"
@@ -362,7 +378,7 @@
362
378
  {
363
379
  "name": "wan2.1_fun_inp",
364
380
  "title": "Wan 2.1 Inpainting",
365
- "description": "Create videos from start and end frames.",
381
+ "description": "Generate videos from start and end frames using Wan 2.1 inpainting.",
366
382
  "mediaType": "image",
367
383
  "mediaSubtype": "webp",
368
384
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp"
@@ -370,7 +386,7 @@
370
386
  {
371
387
  "name": "wan2.1_fun_control",
372
388
  "title": "Wan 2.1 ControlNet",
373
- "description": "Guide video generation with pose, depth, edge controls and more.",
389
+ "description": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
374
390
  "mediaType": "image",
375
391
  "mediaSubtype": "webp",
376
392
  "thumbnailVariant": "hoverDissolve",
@@ -379,7 +395,7 @@
379
395
  {
380
396
  "name": "wan2.1_flf2v_720_f16",
381
397
  "title": "Wan 2.1 FLF2V 720p F16",
382
- "description": "Generate video through controlling the first and last frames.",
398
+ "description": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
383
399
  "mediaType": "image",
384
400
  "mediaSubtype": "webp",
385
401
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf"
@@ -389,7 +405,7 @@
389
405
  "title": "LTXV Text to Video",
390
406
  "mediaType": "image",
391
407
  "mediaSubtype": "webp",
392
- "description": "Generate videos from text descriptions.",
408
+ "description": "Generate videos from text prompts.",
393
409
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/ltxv/#text-to-video"
394
410
  },
395
411
  {
@@ -397,7 +413,7 @@
397
413
  "title": "LTXV Image to Video",
398
414
  "mediaType": "image",
399
415
  "mediaSubtype": "webp",
400
- "description": "Convert still images into videos.",
416
+ "description": "Generate videos from still images.",
401
417
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/ltxv/#image-to-video"
402
418
  },
403
419
  {
@@ -405,7 +421,7 @@
405
421
  "title": "Mochi Text to Video",
406
422
  "mediaType": "image",
407
423
  "mediaSubtype": "webp",
408
- "description": "Create videos with Mochi model.",
424
+ "description": "Generate videos from text prompts using Mochi model.",
409
425
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/"
410
426
  },
411
427
  {
@@ -413,7 +429,7 @@
413
429
  "title": "Hunyuan Video Text to Video",
414
430
  "mediaType": "image",
415
431
  "mediaSubtype": "webp",
416
- "description": "Generate videos using Hunyuan model.",
432
+ "description": "Generate videos from text prompts using Hunyuan model.",
417
433
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/"
418
434
  },
419
435
  {
@@ -421,7 +437,7 @@
421
437
  "title": "SVD Image to Video",
422
438
  "mediaType": "image",
423
439
  "mediaSubtype": "webp",
424
- "description": "Transform images into animated videos.",
440
+ "description": "Generate videos from still images.",
425
441
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video"
426
442
  },
427
443
  {
@@ -429,7 +445,7 @@
429
445
  "title": "SVD Text to Image to Video",
430
446
  "mediaType": "image",
431
447
  "mediaSubtype": "webp",
432
- "description": "Generate images from text and then convert them into videos.",
448
+ "description": "Generate videos by first creating images from text prompts.",
433
449
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video"
434
450
  }
435
451
  ]
@@ -466,7 +482,7 @@
466
482
  {
467
483
  "name": "api_bfl_flux_pro_t2i",
468
484
  "title": "BFL Flux[Pro]: Text to Image",
469
- "description": "Create images with FLUX.1 [pro]'s excellent prompt following, visual quality, image detail and output diversity.",
485
+ "description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
470
486
  "mediaType": "image",
471
487
  "mediaSubtype": "webp"
472
488
  },
@@ -481,7 +497,7 @@
481
497
  {
482
498
  "name": "api_luma_photon_style_ref",
483
499
  "title": "Luma Photon: Style Reference",
484
- "description": "Apply and blend style references with exact control. Luma Photon captures the essence of each reference image, letting you combine distinct visual elements while maintaining professional quality.",
500
+ "description": "Generate images by blending style references with precise control using Luma Photon.",
485
501
  "mediaType": "image",
486
502
  "mediaSubtype": "webp",
487
503
  "thumbnailVariant": "compareSlider"
@@ -489,7 +505,7 @@
489
505
  {
490
506
  "name": "api_recraft_image_gen_with_color_control",
491
507
  "title": "Recraft: Color Control Image Generation",
492
- "description": "Create a custom palette to reuse for multiple images or hand-pick colors for each photo. Match your brand's color palette and craft visuals that are distinctly yours.",
508
+ "description": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
493
509
  "mediaType": "image",
494
510
  "mediaSubtype": "webp"
495
511
  },
@@ -503,13 +519,13 @@
503
519
  {
504
520
  "name": "api_recraft_vector_gen",
505
521
  "title": "Recraft: Vector Generation",
506
- "description": "Go from a text prompt to vector image with Recraft's AI vector generator. Produce the best-quality vector art for logos, posters, icon sets, ads, banners and mockups. Perfect your designs with sharp, high-quality SVG files. Create branded vector illustrations for your app or website in seconds.",
522
+ "description": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
507
523
  "mediaType": "image",
508
524
  "mediaSubtype": "webp"
509
525
  },{
510
526
  "name": "api_runway_text_to_image",
511
527
  "title": "Runway: Text to Image",
512
- "description": "Transform text prompts into high-quality images using Runway's cutting-edge AI model.",
528
+ "description": "Generate high-quality images from text prompts using Runway's AI model.",
513
529
  "mediaType": "image",
514
530
  "mediaSubtype": "webp"
515
531
  },
@@ -531,7 +547,7 @@
531
547
  {
532
548
  "name": "api_stability_ai_i2i",
533
549
  "title": "Stability AI: Image to Image",
534
- "description": "Transform your images with high-quality image-to-image generation. Perfect for professional image editing and style transfer.",
550
+ "description": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
535
551
  "mediaType": "image",
536
552
  "thumbnailVariant": "compareSlider",
537
553
  "mediaSubtype": "webp"
@@ -554,14 +570,14 @@
554
570
  {
555
571
  "name": "api_ideogram_v3_t2i",
556
572
  "title": "Ideogram V3: Text to Image",
557
- "description": "Generate images with high-quality image-prompt alignment, photorealism, and text rendering. Create professional-quality logos, promotional posters, landing page concepts, product photography, and more. Effortlessly craft sophisticated spatial compositions with intricate backgrounds, precise and nuanced lighting and colors, and lifelike environmental detail.",
573
+ "description": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
558
574
  "mediaType": "image",
559
575
  "mediaSubtype": "webp"
560
576
  },
561
577
  {
562
578
  "name": "api_openai_image_1_t2i",
563
579
  "title": "OpenAI: GPT-Image-1 Text to Image",
564
- "description": "Use GPT Image 1 API to generate images from text descriptions.",
580
+ "description": "Generate images from text prompts using OpenAI GPT Image 1 API.",
565
581
  "mediaType": "image",
566
582
  "mediaSubtype": "webp",
567
583
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
@@ -569,7 +585,7 @@
569
585
  {
570
586
  "name": "api_openai_image_1_i2i",
571
587
  "title": "OpenAI: GPT-Image-1 Image to Image",
572
- "description": "Use GPT Image 1 API to generate images from images.",
588
+ "description": "Generate images from input images using OpenAI GPT Image 1 API.",
573
589
  "mediaType": "image",
574
590
  "mediaSubtype": "webp",
575
591
  "thumbnailVariant": "compareSlider",
@@ -578,7 +594,7 @@
578
594
  {
579
595
  "name": "api_openai_image_1_inpaint",
580
596
  "title": "OpenAI: GPT-Image-1 Inpaint",
581
- "description": "Use GPT Image 1 API to inpaint images.",
597
+ "description": "Edit images using inpainting with OpenAI GPT Image 1 API.",
582
598
  "mediaType": "image",
583
599
  "mediaSubtype": "webp",
584
600
  "thumbnailVariant": "compareSlider",
@@ -587,7 +603,7 @@
587
603
  {
588
604
  "name": "api_openai_image_1_multi_inputs",
589
605
  "title": "OpenAI: GPT-Image-1 Multi Inputs",
590
- "description": "Use GPT Image 1 API with multiple inputs to generate images.",
606
+ "description": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
591
607
  "mediaType": "image",
592
608
  "mediaSubtype": "webp",
593
609
  "thumbnailVariant": "compareSlider",
@@ -596,7 +612,7 @@
596
612
  {
597
613
  "name": "api_openai_dall_e_2_t2i",
598
614
  "title": "OpenAI: Dall-E 2 Text to Image",
599
- "description": "Use Dall-E 2 API to generate images from text descriptions.",
615
+ "description": "Generate images from text prompts using OpenAI Dall-E 2 API.",
600
616
  "mediaType": "image",
601
617
  "mediaSubtype": "webp",
602
618
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
@@ -604,7 +620,7 @@
604
620
  {
605
621
  "name": "api_openai_dall_e_2_inpaint",
606
622
  "title": "OpenAI: Dall-E 2 Inpaint",
607
- "description": "Use Dall-E 2 API to inpaint images.",
623
+ "description": "Edit images using inpainting with OpenAI Dall-E 2 API.",
608
624
  "mediaType": "image",
609
625
  "mediaSubtype": "webp",
610
626
  "thumbnailVariant": "compareSlider",
@@ -613,7 +629,7 @@
613
629
  {
614
630
  "name": "api_openai_dall_e_3_t2i",
615
631
  "title": "OpenAI: Dall-E 3 Text to Image",
616
- "description": "Use Dall-E 3 API to generate images from text descriptions.",
632
+ "description": "Generate images from text prompts using OpenAI Dall-E 3 API.",
617
633
  "mediaType": "image",
618
634
  "mediaSubtype": "webp",
619
635
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3"
@@ -628,14 +644,14 @@
628
644
  {
629
645
  "name": "api_kling_i2v",
630
646
  "title": "Kling: Image to Video",
631
- "description": "Create videos with great prompt adherence for actions, expressions, and camera movements. Now supporting complex prompts with sequential actions for you to be the director of your scene.",
647
+ "description": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
632
648
  "mediaType": "image",
633
649
  "mediaSubtype": "webp"
634
650
  },
635
651
  {
636
652
  "name": "api_kling_effects",
637
653
  "title": "Kling: Video Effects",
638
- "description": "Apply stunning visual effects to your images and transform them into dynamic videos. Choose from a variety of effects.",
654
+ "description": "Generate dynamic videos by applying visual effects to images using Kling.",
639
655
  "mediaType": "image",
640
656
  "mediaSubtype": "webp"
641
657
  },
@@ -670,21 +686,21 @@
670
686
  {
671
687
  "name": "api_hailuo_minimax_i2v",
672
688
  "title": "MiniMax: Image to Video",
673
- "description": "Create refined videos from images and text, including CGI integration and trendy photo effects like viral AI hugging. Choose from a variety of video styles and themes to match your creative vision.",
689
+ "description": "Generate refined videos from images and text with CGI integration using MiniMax.",
674
690
  "mediaType": "image",
675
691
  "mediaSubtype": "webp"
676
692
  },
677
693
  {
678
694
  "name": "api_pixverse_i2v",
679
695
  "title": "PixVerse: Image to Video",
680
- "description": "Transforms static images into dynamic videos with motion and effects.",
696
+ "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
681
697
  "mediaType": "image",
682
698
  "mediaSubtype": "webp"
683
699
  },
684
700
  {
685
701
  "name": "api_pixverse_template_i2v",
686
702
  "title": "PixVerse Templates: Image to Video",
687
- "description": "Transforms static images into dynamic videos with motion and effects.",
703
+ "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
688
704
  "mediaType": "image",
689
705
  "mediaSubtype": "webp"
690
706
  },
@@ -698,14 +714,14 @@
698
714
  {
699
715
  "name": "api_runway_gen3a_turbo_image_to_video",
700
716
  "title": "Runway: Gen3a Turbo Image to Video",
701
- "description": "Create cinematic videos from static images with Runway's Gen3a Turbo speed.",
717
+ "description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
702
718
  "mediaType": "image",
703
719
  "mediaSubtype": "webp"
704
720
  },
705
721
  {
706
722
  "name": "api_runway_gen4_turo_image_to_video",
707
723
  "title": "Runway: Gen4 Turbo Image to Video",
708
- "description": "Transform images into dynamic videos using Runway's latest Gen4 technology.",
724
+ "description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
709
725
  "mediaType": "image",
710
726
  "mediaSubtype": "webp"
711
727
  },
@@ -719,21 +735,21 @@
719
735
  {
720
736
  "name": "api_pika_i2v",
721
737
  "title": "Pika: Image to Video",
722
- "description": "Transform a single static image into a smooth, animated video. Leverage Pika's AI technology to bring natural motion and life to your images.",
738
+ "description": "Generate smooth animated videos from single static images using Pika AI.",
723
739
  "mediaType": "image",
724
740
  "mediaSubtype": "webp"
725
741
  },
726
742
  {
727
743
  "name": "api_pika_scene",
728
744
  "title": "Pika Scenes: Images to Video",
729
- "description": "Use multiple images as ingredients and generate videos that incorporate all of them.",
745
+ "description": "Generate videos that incorporate multiple input images using Pika Scenes.",
730
746
  "mediaType": "image",
731
747
  "mediaSubtype": "webp"
732
748
  },
733
749
  {
734
750
  "name": "api_veo2_i2v",
735
751
  "title": "Veo2: Image to Video",
736
- "description": "Use Google Veo2 API to generate videos from images.",
752
+ "description": "Generate videos from images using Google Veo2 API.",
737
753
  "mediaType": "image",
738
754
  "mediaSubtype": "webp"
739
755
  }
@@ -747,7 +763,7 @@
747
763
  {
748
764
  "name": "api_rodin_image_to_model",
749
765
  "title": "Rodin: Image to Model",
750
- "description": "Transform single photos into detailed 3D sculptures with Rodin's artistic AI.",
766
+ "description": "Generate detailed 3D models from single photos using Rodin AI.",
751
767
  "mediaType": "image",
752
768
  "thumbnailVariant": "compareSlider",
753
769
  "mediaSubtype": "webp"
@@ -770,7 +786,7 @@
770
786
  {
771
787
  "name": "api_tripo_image_to_model",
772
788
  "title": "Tripo: Image to Model",
773
- "description": "Convert 2D images into professional 3D assets using Tripo's engine.",
789
+ "description": "Generate professional 3D assets from 2D images using Tripo engine.",
774
790
  "mediaType": "image",
775
791
  "thumbnailVariant": "compareSlider",
776
792
  "mediaSubtype": "webp"
@@ -816,7 +832,7 @@
816
832
  "title": "Upscale",
817
833
  "mediaType": "image",
818
834
  "mediaSubtype": "webp",
819
- "description": "Enhance image quality in latent space.",
835
+ "description": "Upscale images by enhancing quality in latent space.",
820
836
  "thumbnailVariant": "zoomHover",
821
837
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/"
822
838
  },
@@ -825,7 +841,7 @@
825
841
  "title": "ESRGAN",
826
842
  "mediaType": "image",
827
843
  "mediaSubtype": "webp",
828
- "description": "Use upscale models to enhance image quality.",
844
+ "description": "Upscale images using ESRGAN models to enhance quality.",
829
845
  "thumbnailVariant": "zoomHover",
830
846
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/"
831
847
  },
@@ -834,7 +850,7 @@
834
850
  "title": "HiresFix ESRGAN Workflow",
835
851
  "mediaType": "image",
836
852
  "mediaSubtype": "webp",
837
- "description": "Use upscale models during intermediate steps.",
853
+ "description": "Upscale images using ESRGAN models during intermediate generation steps.",
838
854
  "thumbnailVariant": "zoomHover",
839
855
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling"
840
856
  },
@@ -843,7 +859,7 @@
843
859
  "title": "Latent Upscale Different Prompt Model",
844
860
  "mediaType": "image",
845
861
  "mediaSubtype": "webp",
846
- "description": "Upscale and change prompt across passes.",
862
+ "description": "Upscale images while changing prompts across generation passes.",
847
863
  "thumbnailVariant": "zoomHover",
848
864
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples"
849
865
  }
@@ -859,7 +875,7 @@
859
875
  "title": "Scribble ControlNet",
860
876
  "mediaType": "image",
861
877
  "mediaSubtype": "webp",
862
- "description": "Control image generation with reference images.",
878
+ "description": "Generate images guided by scribble reference images using ControlNet.",
863
879
  "thumbnailVariant": "hoverDissolve",
864
880
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/"
865
881
  },
@@ -868,7 +884,7 @@
868
884
  "title": "Pose ControlNet 2 Pass",
869
885
  "mediaType": "image",
870
886
  "mediaSubtype": "webp",
871
- "description": "Generate images from pose references.",
887
+ "description": "Generate images guided by pose references using ControlNet.",
872
888
  "thumbnailVariant": "hoverDissolve",
873
889
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet"
874
890
  },
@@ -877,7 +893,7 @@
877
893
  "title": "Depth ControlNet",
878
894
  "mediaType": "image",
879
895
  "mediaSubtype": "webp",
880
- "description": "Create images with depth-aware generation.",
896
+ "description": "Generate images guided by depth information using ControlNet.",
881
897
  "thumbnailVariant": "hoverDissolve",
882
898
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
883
899
  },
@@ -886,7 +902,7 @@
886
902
  "title": "Depth T2I Adapter",
887
903
  "mediaType": "image",
888
904
  "mediaSubtype": "webp",
889
- "description": "Quickly generate depth-aware images with a T2I adapter.",
905
+ "description": "Generate images guided by depth information using T2I adapter.",
890
906
  "thumbnailVariant": "hoverDissolve",
891
907
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
892
908
  },
@@ -895,7 +911,7 @@
895
911
  "title": "Mixing ControlNets",
896
912
  "mediaType": "image",
897
913
  "mediaSubtype": "webp",
898
- "description": "Combine multiple ControlNet models together.",
914
+ "description": "Generate images by combining multiple ControlNet models.",
899
915
  "thumbnailVariant": "hoverDissolve",
900
916
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets"
901
917
  }
@@ -911,7 +927,7 @@
911
927
  "title": "Area Composition",
912
928
  "mediaType": "image",
913
929
  "mediaSubtype": "webp",
914
- "description": "Control image composition with areas.",
930
+ "description": "Generate images by controlling composition with defined areas.",
915
931
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/"
916
932
  },
917
933
  {
@@ -919,7 +935,7 @@
919
935
  "title": "Area Composition Reversed",
920
936
  "mediaType": "image",
921
937
  "mediaSubtype": "webp",
922
- "description": "Reverse area composition workflow.",
938
+ "description": "Generate images using reverse area composition workflow.",
923
939
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/"
924
940
  },
925
941
  {
@@ -927,7 +943,7 @@
927
943
  "title": "Area Composition Square Area for Subject",
928
944
  "mediaType": "image",
929
945
  "mediaSubtype": "webp",
930
- "description": "Create consistent subject placement.",
946
+ "description": "Generate images with consistent subject placement using area composition.",
931
947
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition"
932
948
  }
933
949
  ]
@@ -942,7 +958,7 @@
942
958
  "title": "Hunyuan3D 2.0",
943
959
  "mediaType": "image",
944
960
  "mediaSubtype": "webp",
945
- "description": "Use Hunyuan3D 2.0 to generate models from a single view.",
961
+ "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
946
962
  "tutorialUrl": ""
947
963
  },
948
964
  {
@@ -950,7 +966,7 @@
950
966
  "title": "Hunyuan3D 2.0 MV",
951
967
  "mediaType": "image",
952
968
  "mediaSubtype": "webp",
953
- "description": " Use Hunyuan3D 2mv to generate models from multiple views.",
969
+ "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
954
970
  "tutorialUrl": "",
955
971
  "thumbnailVariant": "compareSlider"
956
972
  },
@@ -959,7 +975,7 @@
959
975
  "title": "Hunyuan3D 2.0 MV Turbo",
960
976
  "mediaType": "image",
961
977
  "mediaSubtype": "webp",
962
- "description": "Use Hunyuan3D 2mv turbo to generate models from multiple views.",
978
+ "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
963
979
  "tutorialUrl": "",
964
980
  "thumbnailVariant": "compareSlider"
965
981
  },
@@ -968,7 +984,7 @@
968
984
  "title": "Stable Zero123",
969
985
  "mediaType": "image",
970
986
  "mediaSubtype": "webp",
971
- "description": "Generate 3D views from single images.",
987
+ "description": "Generate 3D views from single images using Stable Zero123.",
972
988
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/"
973
989
  }
974
990
  ]
@@ -983,7 +999,7 @@
983
999
  "title": "Stable Audio",
984
1000
  "mediaType": "audio",
985
1001
  "mediaSubtype": "mp3",
986
- "description": "Generate audio from text descriptions.",
1002
+ "description": "Generate audio from text prompts using Stable Audio.",
987
1003
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/"
988
1004
  },
989
1005
  {
@@ -991,7 +1007,7 @@
991
1007
  "title": "ACE-Step v1 Text to Instrumentals Music",
992
1008
  "mediaType": "audio",
993
1009
  "mediaSubtype": "mp3",
994
- "description": "Input text/lyrics to generate Instrumentals Music.",
1010
+ "description": "Generate instrumental music from text prompts using ACE-Step v1.",
995
1011
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
996
1012
  },
997
1013
  {
@@ -999,7 +1015,7 @@
999
1015
  "title": "ACE Step v1 Text to Song",
1000
1016
  "mediaType": "audio",
1001
1017
  "mediaSubtype": "mp3",
1002
- "description": "Input text/lyrics to generate song with human vocals, supporting multilingual & style customization.",
1018
+ "description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
1003
1019
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
1004
1020
  },
1005
1021
  {
@@ -1007,7 +1023,7 @@
1007
1023
  "title": "ACE Step v1 M2M Editing",
1008
1024
  "mediaType": "audio",
1009
1025
  "mediaSubtype": "mp3",
1010
- "description": "Use M2M to edit existing song, change the style, lyrics, etc.",
1026
+ "description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
1011
1027
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
1012
1028
  }
1013
1029
  ]