comfyui-workflow-templates 0.1.27__py3-none-any.whl → 0.1.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. comfyui_workflow_templates/templates/2_pass_pose_worship-1.webp +0 -0
  2. comfyui_workflow_templates/templates/2_pass_pose_worship-2.webp +0 -0
  3. comfyui_workflow_templates/templates/3d_hunyuan3d_image_to_model-1.webp +0 -0
  4. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model-1.webp +0 -0
  5. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model-2.webp +0 -0
  6. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo-1.webp +0 -0
  7. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo-2.webp +0 -0
  8. comfyui_workflow_templates/templates/area_composition-1.webp +0 -0
  9. comfyui_workflow_templates/templates/area_composition_reversed-1.webp +0 -0
  10. comfyui_workflow_templates/templates/area_composition_square_area_for_subject-1.webp +0 -0
  11. comfyui_workflow_templates/templates/controlnet_example-1.webp +0 -0
  12. comfyui_workflow_templates/templates/controlnet_example-2.webp +0 -0
  13. comfyui_workflow_templates/templates/default-1.webp +0 -0
  14. comfyui_workflow_templates/templates/depth_controlnet-1.webp +0 -0
  15. comfyui_workflow_templates/templates/depth_controlnet-2.webp +0 -0
  16. comfyui_workflow_templates/templates/depth_t2i_adapter-1.webp +0 -0
  17. comfyui_workflow_templates/templates/depth_t2i_adapter-2.webp +0 -0
  18. comfyui_workflow_templates/templates/embedding_example-1.webp +0 -0
  19. comfyui_workflow_templates/templates/esrgan_example-1.webp +0 -0
  20. comfyui_workflow_templates/templates/flux_canny_model_example-1.webp +0 -0
  21. comfyui_workflow_templates/templates/flux_canny_model_example-2.webp +0 -0
  22. comfyui_workflow_templates/templates/flux_depth_lora_example-1.webp +0 -0
  23. comfyui_workflow_templates/templates/flux_depth_lora_example-2.webp +0 -0
  24. comfyui_workflow_templates/templates/flux_dev_checkpoint_example-1.webp +0 -0
  25. comfyui_workflow_templates/templates/flux_dev_full_text_to_image-1.webp +0 -0
  26. comfyui_workflow_templates/templates/flux_dev_full_text_to_image.json +575 -0
  27. comfyui_workflow_templates/templates/flux_fill_inpaint_example-1.webp +0 -0
  28. comfyui_workflow_templates/templates/flux_fill_inpaint_example-2.webp +0 -0
  29. comfyui_workflow_templates/templates/flux_fill_outpaint_example-1.webp +0 -0
  30. comfyui_workflow_templates/templates/flux_fill_outpaint_example-2.webp +0 -0
  31. comfyui_workflow_templates/templates/flux_redux_model_example-1.webp +0 -0
  32. comfyui_workflow_templates/templates/flux_schnell-1.webp +0 -0
  33. comfyui_workflow_templates/templates/flux_schnell_full_text_to_image-1.webp +0 -0
  34. comfyui_workflow_templates/templates/flux_schnell_full_text_to_image.json +575 -0
  35. comfyui_workflow_templates/templates/gligen_textbox_example-1.webp +0 -0
  36. comfyui_workflow_templates/templates/hidream_e1_full-1.webp +0 -0
  37. comfyui_workflow_templates/templates/hidream_e1_full-2.webp +0 -0
  38. comfyui_workflow_templates/templates/hidream_i1_dev-1.webp +0 -0
  39. comfyui_workflow_templates/templates/hidream_i1_fast-1.webp +0 -0
  40. comfyui_workflow_templates/templates/hidream_i1_full-1.webp +0 -0
  41. comfyui_workflow_templates/templates/hiresfix_esrgan_workflow-1.webp +0 -0
  42. comfyui_workflow_templates/templates/hiresfix_latent_workflow-1.webp +0 -0
  43. comfyui_workflow_templates/templates/hunyuan_video_text_to_video-1.webp +0 -0
  44. comfyui_workflow_templates/templates/image2image-1.webp +0 -0
  45. comfyui_workflow_templates/templates/image_chroma_text_to_image-1.webp +0 -0
  46. comfyui_workflow_templates/templates/image_cosmos_predict2_2B_t2i-1.webp +0 -0
  47. comfyui_workflow_templates/templates/image_cosmos_predict2_2B_t2i.json +544 -0
  48. comfyui_workflow_templates/templates/image_lotus_depth_v1_1-1.webp +0 -0
  49. comfyui_workflow_templates/templates/image_lotus_depth_v1_1-2.webp +0 -0
  50. comfyui_workflow_templates/templates/image_to_video-1.webp +0 -0
  51. comfyui_workflow_templates/templates/image_to_video_wan-1.webp +0 -0
  52. comfyui_workflow_templates/templates/index.json +131 -89
  53. comfyui_workflow_templates/templates/inpaint_example-1.webp +0 -0
  54. comfyui_workflow_templates/templates/inpaint_example-2.webp +0 -0
  55. comfyui_workflow_templates/templates/inpaint_model_outpainting-1.webp +0 -0
  56. comfyui_workflow_templates/templates/inpaint_model_outpainting-2.webp +0 -0
  57. comfyui_workflow_templates/templates/inpaint_model_outpainting.json +57 -262
  58. comfyui_workflow_templates/templates/latent_upscale_different_prompt_model-1.webp +0 -0
  59. comfyui_workflow_templates/templates/lora-1.webp +0 -0
  60. comfyui_workflow_templates/templates/lora_multiple-1.webp +0 -0
  61. comfyui_workflow_templates/templates/ltxv_image_to_video-1.webp +0 -0
  62. comfyui_workflow_templates/templates/ltxv_text_to_video-1.webp +0 -0
  63. comfyui_workflow_templates/templates/mixing_controlnets-1.webp +0 -0
  64. comfyui_workflow_templates/templates/mixing_controlnets-2.webp +0 -0
  65. comfyui_workflow_templates/templates/mochi_text_to_video_example-1.webp +0 -0
  66. comfyui_workflow_templates/templates/sd3.5_large_blur-1.webp +0 -0
  67. comfyui_workflow_templates/templates/sd3.5_large_blur-2.webp +0 -0
  68. comfyui_workflow_templates/templates/sd3.5_large_canny_controlnet_example-1.webp +0 -0
  69. comfyui_workflow_templates/templates/sd3.5_large_canny_controlnet_example-2.webp +0 -0
  70. comfyui_workflow_templates/templates/sd3.5_large_depth-1.webp +0 -0
  71. comfyui_workflow_templates/templates/sd3.5_large_depth-2.webp +0 -0
  72. comfyui_workflow_templates/templates/sd3.5_simple_example-1.webp +0 -0
  73. comfyui_workflow_templates/templates/sdxl_refiner_prompt_example-1.webp +0 -0
  74. comfyui_workflow_templates/templates/sdxl_revision_text_prompts-1.webp +0 -0
  75. comfyui_workflow_templates/templates/sdxl_revision_zero_positive-1.webp +0 -0
  76. comfyui_workflow_templates/templates/sdxl_simple_example-1.webp +0 -0
  77. comfyui_workflow_templates/templates/sdxlturbo_example-1.webp +0 -0
  78. comfyui_workflow_templates/templates/text_to_video_wan-1.webp +0 -0
  79. comfyui_workflow_templates/templates/txt_to_image_to_video-1.webp +0 -0
  80. comfyui_workflow_templates/templates/video_cosmos_predict2_2B_video2world_480p_16fps-1.webp +0 -0
  81. comfyui_workflow_templates/templates/video_cosmos_predict2_2B_video2world_480p_16fps.json +724 -0
  82. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_1.3B-1.webp +0 -0
  83. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_1.3B.json +5 -5
  84. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_14B-1.webp +0 -0
  85. comfyui_workflow_templates/templates/video_wan_vace_14B_ref2v-1.webp +0 -0
  86. comfyui_workflow_templates/templates/video_wan_vace_14B_t2v-1.webp +0 -0
  87. comfyui_workflow_templates/templates/video_wan_vace_14B_v2v-1.webp +0 -0
  88. comfyui_workflow_templates/templates/video_wan_vace_14B_v2v-2.webp +0 -0
  89. comfyui_workflow_templates/templates/video_wan_vace_flf2v-1.webp +0 -0
  90. comfyui_workflow_templates/templates/video_wan_vace_inpainting-1.webp +0 -0
  91. comfyui_workflow_templates/templates/video_wan_vace_inpainting-2.webp +0 -0
  92. comfyui_workflow_templates/templates/video_wan_vace_inpainting.json +2139 -0
  93. comfyui_workflow_templates/templates/video_wan_vace_outpainting-1.webp +0 -0
  94. comfyui_workflow_templates/templates/video_wan_vace_outpainting-2.webp +0 -0
  95. comfyui_workflow_templates/templates/wan2.1_flf2v_720_f16-1.webp +0 -0
  96. comfyui_workflow_templates/templates/wan2.1_fun_control-1.webp +0 -0
  97. comfyui_workflow_templates/templates/wan2.1_fun_control-2.webp +0 -0
  98. comfyui_workflow_templates/templates/wan2.1_fun_inp-1.webp +0 -0
  99. {comfyui_workflow_templates-0.1.27.dist-info → comfyui_workflow_templates-0.1.29.dist-info}/METADATA +1 -1
  100. {comfyui_workflow_templates-0.1.27.dist-info → comfyui_workflow_templates-0.1.29.dist-info}/RECORD +103 -92
  101. comfyui_workflow_templates/templates/area_composition_reversed-1-2.webp +0 -0
  102. comfyui_workflow_templates/templates/inpain_model_outpainting.json +0 -375
  103. {comfyui_workflow_templates-0.1.27.dist-info → comfyui_workflow_templates-0.1.29.dist-info}/WHEEL +0 -0
  104. {comfyui_workflow_templates-0.1.27.dist-info → comfyui_workflow_templates-0.1.29.dist-info}/licenses/LICENSE +0 -0
  105. {comfyui_workflow_templates-0.1.27.dist-info → comfyui_workflow_templates-0.1.29.dist-info}/top_level.txt +0 -0
@@ -9,7 +9,7 @@
9
9
  "title": "Image Generation",
10
10
  "mediaType": "image",
11
11
  "mediaSubtype": "webp",
12
- "description": "Generate images from text descriptions."
12
+ "description": "Generate images from text prompts."
13
13
  },
14
14
  {
15
15
  "name": "image2image",
@@ -24,7 +24,7 @@
24
24
  "title": "Lora",
25
25
  "mediaType": "image",
26
26
  "mediaSubtype": "webp",
27
- "description": "Apply LoRA models for specialized styles or subjects.",
27
+ "description": "Generate images with LoRA models for specialized styles or subjects.",
28
28
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/lora/"
29
29
  },
30
30
  {
@@ -32,7 +32,7 @@
32
32
  "title": "Lora Multiple",
33
33
  "mediaType": "image",
34
34
  "mediaSubtype": "webp",
35
- "description": "Combine multiple LoRA models for unique results.",
35
+ "description": "Generate images by combining multiple LoRA models.",
36
36
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/lora/"
37
37
  },
38
38
  {
@@ -58,7 +58,7 @@
58
58
  "title": "Embedding",
59
59
  "mediaType": "image",
60
60
  "mediaSubtype": "webp",
61
- "description": "Use textual inversion for consistent styles.",
61
+ "description": "Generate images using textual inversion for consistent styles.",
62
62
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/"
63
63
  },
64
64
  {
@@ -66,7 +66,7 @@
66
66
  "title": "Gligen Textbox",
67
67
  "mediaType": "image",
68
68
  "mediaSubtype": "webp",
69
- "description": "Specify the location and size of objects.",
69
+ "description": "Generate images with precise object placement using text boxes.",
70
70
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/"
71
71
  }
72
72
  ]
@@ -78,18 +78,34 @@
78
78
  "templates": [
79
79
  {
80
80
  "name": "flux_dev_checkpoint_example",
81
- "title": "Flux Dev",
81
+ "title": "Flux Dev fp8",
82
82
  "mediaType": "image",
83
83
  "mediaSubtype": "webp",
84
- "description": "Create images using Flux development models.",
84
+ "description": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.",
85
85
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-dev-1"
86
86
  },
87
87
  {
88
88
  "name": "flux_schnell",
89
- "title": "Flux Schnell",
89
+ "title": "Flux Schnell fp8",
90
90
  "mediaType": "image",
91
91
  "mediaSubtype": "webp",
92
- "description": "Generate images quickly with Flux Schnell.",
92
+ "description": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.",
93
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-schnell-1"
94
+ },
95
+ {
96
+ "name": "flux_dev_full_text_to_image",
97
+ "title": "Flux Dev full text to image",
98
+ "mediaType": "image",
99
+ "mediaSubtype": "webp",
100
+ "description": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.",
101
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-dev-1"
102
+ },
103
+ {
104
+ "name": "flux_schnell_full_text_to_image",
105
+ "title": "Flux Schnell full text to image",
106
+ "mediaType": "image",
107
+ "mediaSubtype": "webp",
108
+ "description": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.",
93
109
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-schnell-1"
94
110
  },
95
111
  {
@@ -97,7 +113,7 @@
97
113
  "title": "Flux Inpaint",
98
114
  "mediaType": "image",
99
115
  "mediaSubtype": "webp",
100
- "description": "Fill in missing parts of images.",
116
+ "description": "Fill missing parts of images using Flux inpainting.",
101
117
  "thumbnailVariant": "compareSlider",
102
118
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#fill-inpainting-model"
103
119
  },
@@ -106,7 +122,7 @@
106
122
  "title": "Flux Outpaint",
107
123
  "mediaType": "image",
108
124
  "mediaSubtype": "webp",
109
- "description": "Extend images using Flux outpainting.",
125
+ "description": "Extend images beyond boundaries using Flux outpainting.",
110
126
  "thumbnailVariant": "compareSlider",
111
127
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#fill-inpainting-model"
112
128
  },
@@ -115,7 +131,7 @@
115
131
  "title": "Flux Canny Model",
116
132
  "mediaType": "image",
117
133
  "mediaSubtype": "webp",
118
- "description": "Generate images from edge detection.",
134
+ "description": "Generate images guided by edge detection using Flux Canny.",
119
135
  "thumbnailVariant": "hoverDissolve",
120
136
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#canny-and-depth"
121
137
  },
@@ -124,7 +140,7 @@
124
140
  "title": "Flux Depth Lora",
125
141
  "mediaType": "image",
126
142
  "mediaSubtype": "webp",
127
- "description": "Create images with depth-aware LoRA.",
143
+ "description": "Generate images guided by depth information using Flux LoRA.",
128
144
  "thumbnailVariant": "hoverDissolve",
129
145
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#canny-and-depth"
130
146
  },
@@ -133,7 +149,7 @@
133
149
  "title": "Flux Redux Model",
134
150
  "mediaType": "image",
135
151
  "mediaSubtype": "webp",
136
- "description": "Transfer style from a reference image to guide image generation with Flux.",
152
+ "description": "Generate images by transferring style from reference images using Flux Redux.",
137
153
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#redux"
138
154
  }
139
155
  ]
@@ -143,6 +159,14 @@
143
159
  "title": "Image",
144
160
  "type": "image",
145
161
  "templates": [
162
+ {
163
+ "name": "image_cosmos_predict2_2B_t2i",
164
+ "title": "Cosmos Predict2 2B T2I",
165
+ "mediaType": "image",
166
+ "mediaSubtype": "webp",
167
+ "description": "Generate images with Cosmos-Predict2 2B T2I, delivering physically accurate, high-fidelity, and detail-rich image generation.",
168
+ "tutorialUrl": "http://docs.comfy.org/tutorials/image/cosmos/cosmos-predict2-t2i"
169
+ },
146
170
  {
147
171
  "name": "image_chroma_text_to_image",
148
172
  "title": "Chroma text to image",
@@ -184,7 +208,7 @@
184
208
  "title": "SD3.5 Simple",
185
209
  "mediaType": "image",
186
210
  "mediaSubtype": "webp",
187
- "description": "Generate images with SD 3.5.",
211
+ "description": "Generate images using SD 3.5.",
188
212
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35"
189
213
  },
190
214
  {
@@ -192,7 +216,7 @@
192
216
  "title": "SD3.5 Large Canny ControlNet",
193
217
  "mediaType": "image",
194
218
  "mediaSubtype": "webp",
195
- "description": "Use edge detection to guide image generation with SD 3.5.",
219
+ "description": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
196
220
  "thumbnailVariant": "hoverDissolve",
197
221
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
198
222
  },
@@ -201,7 +225,7 @@
201
225
  "title": "SD3.5 Large Depth",
202
226
  "mediaType": "image",
203
227
  "mediaSubtype": "webp",
204
- "description": "Create depth-aware images with SD 3.5.",
228
+ "description": "Generate images guided by depth information using SD 3.5.",
205
229
  "thumbnailVariant": "hoverDissolve",
206
230
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
207
231
  },
@@ -210,7 +234,7 @@
210
234
  "title": "SD3.5 Large Blur",
211
235
  "mediaType": "image",
212
236
  "mediaSubtype": "webp",
213
- "description": "Generate images from blurred reference images with SD 3.5.",
237
+ "description": "Generate images guided by blurred reference images using SD 3.5.",
214
238
  "thumbnailVariant": "hoverDissolve",
215
239
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
216
240
  },
@@ -219,7 +243,7 @@
219
243
  "title": "SDXL Simple",
220
244
  "mediaType": "image",
221
245
  "mediaSubtype": "webp",
222
- "description": "Create high-quality images with SDXL.",
246
+ "description": "Generate high-quality images using SDXL.",
223
247
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
224
248
  },
225
249
  {
@@ -227,7 +251,7 @@
227
251
  "title": "SDXL Refiner Prompt",
228
252
  "mediaType": "image",
229
253
  "mediaSubtype": "webp",
230
- "description": "Enhance SDXL outputs with refiners.",
254
+ "description": "Enhance SDXL images using refiner models.",
231
255
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
232
256
  },
233
257
  {
@@ -235,7 +259,7 @@
235
259
  "title": "SDXL Revision Text Prompts",
236
260
  "mediaType": "image",
237
261
  "mediaSubtype": "webp",
238
- "description": "Transfer concepts from reference images to guide image generation with SDXL.",
262
+ "description": "Generate images by transferring concepts from reference images using SDXL Revision.",
239
263
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
240
264
  },
241
265
  {
@@ -243,7 +267,7 @@
243
267
  "title": "SDXL Revision Zero Positive",
244
268
  "mediaType": "image",
245
269
  "mediaSubtype": "webp",
246
- "description": "Add text prompts alongside reference images to guide image generation with SDXL.",
270
+ "description": "Generate images using both text prompts and reference images with SDXL Revision.",
247
271
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
248
272
  },
249
273
  {
@@ -251,7 +275,7 @@
251
275
  "title": "SDXL Turbo",
252
276
  "mediaType": "image",
253
277
  "mediaSubtype": "webp",
254
- "description": "Generate images in a single step with SDXL Turbo.",
278
+ "description": "Generate images in a single step using SDXL Turbo.",
255
279
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/"
256
280
  },
257
281
  {
@@ -269,24 +293,32 @@
269
293
  "title": "Video",
270
294
  "type": "video",
271
295
  "templates": [
296
+ {
297
+ "name": "video_cosmos_predict2_2B_video2world_480p_16fps",
298
+ "title": "Cosmos Predict2 2B Video2World 480p 16fps",
299
+ "description": "Generate videos with Cosmos-Predict2 2B Video2World, generating physically accurate, high-fidelity, and consistent video simulations.",
300
+ "mediaType": "image",
301
+ "mediaSubtype": "webp",
302
+ "tutorialUrl": "http://docs.comfy.org/tutorials/video/cosmos/cosmos-predict2-video2world"
303
+ },
272
304
  {
273
305
  "name": "video_wan_vace_14B_t2v",
274
306
  "title": "Wan VACE Text to Video",
275
- "description": "Generate high-quality and coherent videos directly from natural language prompts.",
307
+ "description": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
276
308
  "mediaType": "image",
277
309
  "mediaSubtype": "webp",
278
310
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
279
311
  },{
280
312
  "name": "video_wan_vace_14B_ref2v",
281
313
  "title": "Wan VACE Reference to Video",
282
- "description": "Extend a reference image into a stylistically consistent and content-coherent video sequence.",
314
+ "description": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
283
315
  "mediaType": "image",
284
316
  "mediaSubtype": "webp",
285
317
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
286
318
  },{
287
319
  "name": "video_wan_vace_14B_v2v",
288
320
  "title": "Wan VACE Control Video",
289
- "description": "Create new videos by controlling input videos and reference images",
321
+ "description": "Generate videos by controlling input videos and reference images using Wan VACE.",
290
322
  "mediaType": "image",
291
323
  "mediaSubtype": "webp",
292
324
  "thumbnailVariant": "compareSlider",
@@ -294,22 +326,30 @@
294
326
  },{
295
327
  "name": "video_wan_vace_outpainting",
296
328
  "title": "Wan VACE Outpainting",
297
- "description": "Use the video extension capabilities of Wan VACE to expand the video size.",
329
+ "description": "Generate extended videos by expanding video size using Wan VACE outpainting.",
298
330
  "mediaType": "image",
299
331
  "mediaSubtype": "webp",
300
332
  "thumbnailVariant": "compareSlider",
301
333
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
302
334
  },{
303
335
  "name": "video_wan_vace_flf2v",
304
- "title": "Wan VACE FLF2V",
305
- "description": "Generate videos through controlling the first and last frames.",
336
+ "title": "Wan VACE First-Last Frame",
337
+ "description": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
338
+ "mediaType": "image",
339
+ "mediaSubtype": "webp",
340
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
341
+ },{
342
+ "name": "video_wan_vace_inpainting",
343
+ "title": "Wan VACE Inpainting",
344
+ "description": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
306
345
  "mediaType": "image",
307
346
  "mediaSubtype": "webp",
347
+ "thumbnailVariant": "compareSlider",
308
348
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
309
349
  },{
310
350
  "name": "video_wan2.1_fun_camera_v1.1_1.3B",
311
351
  "title": "Wan 2.1 Fun Camera 1.3B",
312
- "description": "Create dynamic videos with cinematic camera movements using the lightweight 1.3B model.",
352
+ "description": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
313
353
  "mediaType": "image",
314
354
  "mediaSubtype": "webp"
315
355
  },{
@@ -322,7 +362,7 @@
322
362
  {
323
363
  "name": "text_to_video_wan",
324
364
  "title": "Wan 2.1 Text to Video",
325
- "description": "Quickly Generate videos from text descriptions.",
365
+ "description": "Generate videos from text prompts using Wan 2.1.",
326
366
  "mediaType": "image",
327
367
  "mediaSubtype": "webp",
328
368
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/wan/#text-to-video"
@@ -330,7 +370,7 @@
330
370
  {
331
371
  "name": "image_to_video_wan",
332
372
  "title": "Wan 2.1 Image to Video",
333
- "description": "Quickly Generate videos from images.",
373
+ "description": "Generate videos from images using Wan 2.1.",
334
374
  "mediaType": "image",
335
375
  "mediaSubtype": "webp",
336
376
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/wan/#image-to-video"
@@ -338,7 +378,7 @@
338
378
  {
339
379
  "name": "wan2.1_fun_inp",
340
380
  "title": "Wan 2.1 Inpainting",
341
- "description": "Create videos from start and end frames.",
381
+ "description": "Generate videos from start and end frames using Wan 2.1 inpainting.",
342
382
  "mediaType": "image",
343
383
  "mediaSubtype": "webp",
344
384
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp"
@@ -346,7 +386,7 @@
346
386
  {
347
387
  "name": "wan2.1_fun_control",
348
388
  "title": "Wan 2.1 ControlNet",
349
- "description": "Guide video generation with pose, depth, edge controls and more.",
389
+ "description": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
350
390
  "mediaType": "image",
351
391
  "mediaSubtype": "webp",
352
392
  "thumbnailVariant": "hoverDissolve",
@@ -355,7 +395,7 @@
355
395
  {
356
396
  "name": "wan2.1_flf2v_720_f16",
357
397
  "title": "Wan 2.1 FLF2V 720p F16",
358
- "description": "Generate video through controlling the first and last frames.",
398
+ "description": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
359
399
  "mediaType": "image",
360
400
  "mediaSubtype": "webp",
361
401
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf"
@@ -365,7 +405,7 @@
365
405
  "title": "LTXV Text to Video",
366
406
  "mediaType": "image",
367
407
  "mediaSubtype": "webp",
368
- "description": "Generate videos from text descriptions.",
408
+ "description": "Generate videos from text prompts.",
369
409
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/ltxv/#text-to-video"
370
410
  },
371
411
  {
@@ -373,7 +413,7 @@
373
413
  "title": "LTXV Image to Video",
374
414
  "mediaType": "image",
375
415
  "mediaSubtype": "webp",
376
- "description": "Convert still images into videos.",
416
+ "description": "Generate videos from still images.",
377
417
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/ltxv/#image-to-video"
378
418
  },
379
419
  {
@@ -381,7 +421,7 @@
381
421
  "title": "Mochi Text to Video",
382
422
  "mediaType": "image",
383
423
  "mediaSubtype": "webp",
384
- "description": "Create videos with Mochi model.",
424
+ "description": "Generate videos from text prompts using Mochi model.",
385
425
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/"
386
426
  },
387
427
  {
@@ -389,7 +429,7 @@
389
429
  "title": "Hunyuan Video Text to Video",
390
430
  "mediaType": "image",
391
431
  "mediaSubtype": "webp",
392
- "description": "Generate videos using Hunyuan model.",
432
+ "description": "Generate videos from text prompts using Hunyuan model.",
393
433
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/"
394
434
  },
395
435
  {
@@ -397,7 +437,7 @@
397
437
  "title": "SVD Image to Video",
398
438
  "mediaType": "image",
399
439
  "mediaSubtype": "webp",
400
- "description": "Transform images into animated videos.",
440
+ "description": "Generate videos from still images.",
401
441
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video"
402
442
  },
403
443
  {
@@ -405,7 +445,7 @@
405
445
  "title": "SVD Text to Image to Video",
406
446
  "mediaType": "image",
407
447
  "mediaSubtype": "webp",
408
- "description": "Generate images from text and then convert them into videos.",
448
+ "description": "Generate videos by first creating images from text prompts.",
409
449
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video"
410
450
  }
411
451
  ]
@@ -442,7 +482,7 @@
442
482
  {
443
483
  "name": "api_bfl_flux_pro_t2i",
444
484
  "title": "BFL Flux[Pro]: Text to Image",
445
- "description": "Create images with FLUX.1 [pro]'s excellent prompt following, visual quality, image detail and output diversity.",
485
+ "description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
446
486
  "mediaType": "image",
447
487
  "mediaSubtype": "webp"
448
488
  },
@@ -457,7 +497,7 @@
457
497
  {
458
498
  "name": "api_luma_photon_style_ref",
459
499
  "title": "Luma Photon: Style Reference",
460
- "description": "Apply and blend style references with exact control. Luma Photon captures the essence of each reference image, letting you combine distinct visual elements while maintaining professional quality.",
500
+ "description": "Generate images by blending style references with precise control using Luma Photon.",
461
501
  "mediaType": "image",
462
502
  "mediaSubtype": "webp",
463
503
  "thumbnailVariant": "compareSlider"
@@ -465,7 +505,7 @@
465
505
  {
466
506
  "name": "api_recraft_image_gen_with_color_control",
467
507
  "title": "Recraft: Color Control Image Generation",
468
- "description": "Create a custom palette to reuse for multiple images or hand-pick colors for each photo. Match your brand's color palette and craft visuals that are distinctly yours.",
508
+ "description": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
469
509
  "mediaType": "image",
470
510
  "mediaSubtype": "webp"
471
511
  },
@@ -479,13 +519,13 @@
479
519
  {
480
520
  "name": "api_recraft_vector_gen",
481
521
  "title": "Recraft: Vector Generation",
482
- "description": "Go from a text prompt to vector image with Recraft's AI vector generator. Produce the best-quality vector art for logos, posters, icon sets, ads, banners and mockups. Perfect your designs with sharp, high-quality SVG files. Create branded vector illustrations for your app or website in seconds.",
522
+ "description": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
483
523
  "mediaType": "image",
484
524
  "mediaSubtype": "webp"
485
525
  },{
486
526
  "name": "api_runway_text_to_image",
487
527
  "title": "Runway: Text to Image",
488
- "description": "Transform text prompts into high-quality images using Runway's cutting-edge AI model.",
528
+ "description": "Generate high-quality images from text prompts using Runway's AI model.",
489
529
  "mediaType": "image",
490
530
  "mediaSubtype": "webp"
491
531
  },
@@ -507,7 +547,7 @@
507
547
  {
508
548
  "name": "api_stability_ai_i2i",
509
549
  "title": "Stability AI: Image to Image",
510
- "description": "Transform your images with high-quality image-to-image generation. Perfect for professional image editing and style transfer.",
550
+ "description": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
511
551
  "mediaType": "image",
512
552
  "thumbnailVariant": "compareSlider",
513
553
  "mediaSubtype": "webp"
@@ -530,14 +570,14 @@
530
570
  {
531
571
  "name": "api_ideogram_v3_t2i",
532
572
  "title": "Ideogram V3: Text to Image",
533
- "description": "Generate images with high-quality image-prompt alignment, photorealism, and text rendering. Create professional-quality logos, promotional posters, landing page concepts, product photography, and more. Effortlessly craft sophisticated spatial compositions with intricate backgrounds, precise and nuanced lighting and colors, and lifelike environmental detail.",
573
+ "description": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
534
574
  "mediaType": "image",
535
575
  "mediaSubtype": "webp"
536
576
  },
537
577
  {
538
578
  "name": "api_openai_image_1_t2i",
539
579
  "title": "OpenAI: GPT-Image-1 Text to Image",
540
- "description": "Use GPT Image 1 API to generate images from text descriptions.",
580
+ "description": "Generate images from text prompts using OpenAI GPT Image 1 API.",
541
581
  "mediaType": "image",
542
582
  "mediaSubtype": "webp",
543
583
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
@@ -545,7 +585,7 @@
545
585
  {
546
586
  "name": "api_openai_image_1_i2i",
547
587
  "title": "OpenAI: GPT-Image-1 Image to Image",
548
- "description": "Use GPT Image 1 API to generate images from images.",
588
+ "description": "Generate images from input images using OpenAI GPT Image 1 API.",
549
589
  "mediaType": "image",
550
590
  "mediaSubtype": "webp",
551
591
  "thumbnailVariant": "compareSlider",
@@ -554,7 +594,7 @@
554
594
  {
555
595
  "name": "api_openai_image_1_inpaint",
556
596
  "title": "OpenAI: GPT-Image-1 Inpaint",
557
- "description": "Use GPT Image 1 API to inpaint images.",
597
+ "description": "Edit images using inpainting with OpenAI GPT Image 1 API.",
558
598
  "mediaType": "image",
559
599
  "mediaSubtype": "webp",
560
600
  "thumbnailVariant": "compareSlider",
@@ -563,7 +603,7 @@
563
603
  {
564
604
  "name": "api_openai_image_1_multi_inputs",
565
605
  "title": "OpenAI: GPT-Image-1 Multi Inputs",
566
- "description": "Use GPT Image 1 API with multiple inputs to generate images.",
606
+ "description": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
567
607
  "mediaType": "image",
568
608
  "mediaSubtype": "webp",
569
609
  "thumbnailVariant": "compareSlider",
@@ -572,7 +612,7 @@
572
612
  {
573
613
  "name": "api_openai_dall_e_2_t2i",
574
614
  "title": "OpenAI: Dall-E 2 Text to Image",
575
- "description": "Use Dall-E 2 API to generate images from text descriptions.",
615
+ "description": "Generate images from text prompts using OpenAI Dall-E 2 API.",
576
616
  "mediaType": "image",
577
617
  "mediaSubtype": "webp",
578
618
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
@@ -580,7 +620,7 @@
580
620
  {
581
621
  "name": "api_openai_dall_e_2_inpaint",
582
622
  "title": "OpenAI: Dall-E 2 Inpaint",
583
- "description": "Use Dall-E 2 API to inpaint images.",
623
+ "description": "Edit images using inpainting with OpenAI Dall-E 2 API.",
584
624
  "mediaType": "image",
585
625
  "mediaSubtype": "webp",
586
626
  "thumbnailVariant": "compareSlider",
@@ -589,7 +629,7 @@
589
629
  {
590
630
  "name": "api_openai_dall_e_3_t2i",
591
631
  "title": "OpenAI: Dall-E 3 Text to Image",
592
- "description": "Use Dall-E 3 API to generate images from text descriptions.",
632
+ "description": "Generate images from text prompts using OpenAI Dall-E 3 API.",
593
633
  "mediaType": "image",
594
634
  "mediaSubtype": "webp",
595
635
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3"
@@ -604,14 +644,14 @@
604
644
  {
605
645
  "name": "api_kling_i2v",
606
646
  "title": "Kling: Image to Video",
607
- "description": "Create videos with great prompt adherence for actions, expressions, and camera movements. Now supporting complex prompts with sequential actions for you to be the director of your scene.",
647
+ "description": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
608
648
  "mediaType": "image",
609
649
  "mediaSubtype": "webp"
610
650
  },
611
651
  {
612
652
  "name": "api_kling_effects",
613
653
  "title": "Kling: Video Effects",
614
- "description": "Apply stunning visual effects to your images and transform them into dynamic videos. Choose from a variety of effects.",
654
+ "description": "Generate dynamic videos by applying visual effects to images using Kling.",
615
655
  "mediaType": "image",
616
656
  "mediaSubtype": "webp"
617
657
  },
@@ -646,21 +686,21 @@
646
686
  {
647
687
  "name": "api_hailuo_minimax_i2v",
648
688
  "title": "MiniMax: Image to Video",
649
- "description": "Create refined videos from images and text, including CGI integration and trendy photo effects like viral AI hugging. Choose from a variety of video styles and themes to match your creative vision.",
689
+ "description": "Generate refined videos from images and text with CGI integration using MiniMax.",
650
690
  "mediaType": "image",
651
691
  "mediaSubtype": "webp"
652
692
  },
653
693
  {
654
694
  "name": "api_pixverse_i2v",
655
695
  "title": "PixVerse: Image to Video",
656
- "description": "Transforms static images into dynamic videos with motion and effects.",
696
+ "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
657
697
  "mediaType": "image",
658
698
  "mediaSubtype": "webp"
659
699
  },
660
700
  {
661
701
  "name": "api_pixverse_template_i2v",
662
702
  "title": "PixVerse Templates: Image to Video",
663
- "description": "Transforms static images into dynamic videos with motion and effects.",
703
+ "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
664
704
  "mediaType": "image",
665
705
  "mediaSubtype": "webp"
666
706
  },
@@ -674,14 +714,14 @@
674
714
  {
675
715
  "name": "api_runway_gen3a_turbo_image_to_video",
676
716
  "title": "Runway: Gen3a Turbo Image to Video",
677
- "description": "Create cinematic videos from static images with Runway's Gen3a Turbo speed.",
717
+ "description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
678
718
  "mediaType": "image",
679
719
  "mediaSubtype": "webp"
680
720
  },
681
721
  {
682
722
  "name": "api_runway_gen4_turo_image_to_video",
683
723
  "title": "Runway: Gen4 Turbo Image to Video",
684
- "description": "Transform images into dynamic videos using Runway's latest Gen4 technology.",
724
+ "description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
685
725
  "mediaType": "image",
686
726
  "mediaSubtype": "webp"
687
727
  },
@@ -695,21 +735,21 @@
695
735
  {
696
736
  "name": "api_pika_i2v",
697
737
  "title": "Pika: Image to Video",
698
- "description": "Transform a single static image into a smooth, animated video. Leverage Pika's AI technology to bring natural motion and life to your images.",
738
+ "description": "Generate smooth animated videos from single static images using Pika AI.",
699
739
  "mediaType": "image",
700
740
  "mediaSubtype": "webp"
701
741
  },
702
742
  {
703
743
  "name": "api_pika_scene",
704
744
  "title": "Pika Scenes: Images to Video",
705
- "description": "Use multiple images as ingredients and generate videos that incorporate all of them.",
745
+ "description": "Generate videos that incorporate multiple input images using Pika Scenes.",
706
746
  "mediaType": "image",
707
747
  "mediaSubtype": "webp"
708
748
  },
709
749
  {
710
750
  "name": "api_veo2_i2v",
711
751
  "title": "Veo2: Image to Video",
712
- "description": "Use Google Veo2 API to generate videos from images.",
752
+ "description": "Generate videos from images using Google Veo2 API.",
713
753
  "mediaType": "image",
714
754
  "mediaSubtype": "webp"
715
755
  }
@@ -723,7 +763,7 @@
723
763
  {
724
764
  "name": "api_rodin_image_to_model",
725
765
  "title": "Rodin: Image to Model",
726
- "description": "Transform single photos into detailed 3D sculptures with Rodin's artistic AI.",
766
+ "description": "Generate detailed 3D models from single photos using Rodin AI.",
727
767
  "mediaType": "image",
728
768
  "thumbnailVariant": "compareSlider",
729
769
  "mediaSubtype": "webp"
@@ -746,7 +786,7 @@
746
786
  {
747
787
  "name": "api_tripo_image_to_model",
748
788
  "title": "Tripo: Image to Model",
749
- "description": "Convert 2D images into professional 3D assets using Tripo's engine.",
789
+ "description": "Generate professional 3D assets from 2D images using Tripo engine.",
750
790
  "mediaType": "image",
751
791
  "thumbnailVariant": "compareSlider",
752
792
  "mediaSubtype": "webp"
@@ -792,7 +832,7 @@
792
832
  "title": "Upscale",
793
833
  "mediaType": "image",
794
834
  "mediaSubtype": "webp",
795
- "description": "Enhance image quality in latent space.",
835
+ "description": "Upscale images by enhancing quality in latent space.",
796
836
  "thumbnailVariant": "zoomHover",
797
837
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/"
798
838
  },
@@ -801,7 +841,7 @@
801
841
  "title": "ESRGAN",
802
842
  "mediaType": "image",
803
843
  "mediaSubtype": "webp",
804
- "description": "Use upscale models to enhance image quality.",
844
+ "description": "Upscale images using ESRGAN models to enhance quality.",
805
845
  "thumbnailVariant": "zoomHover",
806
846
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/"
807
847
  },
@@ -810,7 +850,7 @@
810
850
  "title": "HiresFix ESRGAN Workflow",
811
851
  "mediaType": "image",
812
852
  "mediaSubtype": "webp",
813
- "description": "Use upscale models during intermediate steps.",
853
+ "description": "Upscale images using ESRGAN models during intermediate generation steps.",
814
854
  "thumbnailVariant": "zoomHover",
815
855
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling"
816
856
  },
@@ -819,7 +859,7 @@
819
859
  "title": "Latent Upscale Different Prompt Model",
820
860
  "mediaType": "image",
821
861
  "mediaSubtype": "webp",
822
- "description": "Upscale and change prompt across passes.",
862
+ "description": "Upscale images while changing prompts across generation passes.",
823
863
  "thumbnailVariant": "zoomHover",
824
864
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples"
825
865
  }
@@ -835,7 +875,7 @@
835
875
  "title": "Scribble ControlNet",
836
876
  "mediaType": "image",
837
877
  "mediaSubtype": "webp",
838
- "description": "Control image generation with reference images.",
878
+ "description": "Generate images guided by scribble reference images using ControlNet.",
839
879
  "thumbnailVariant": "hoverDissolve",
840
880
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/"
841
881
  },
@@ -844,7 +884,7 @@
844
884
  "title": "Pose ControlNet 2 Pass",
845
885
  "mediaType": "image",
846
886
  "mediaSubtype": "webp",
847
- "description": "Generate images from pose references.",
887
+ "description": "Generate images guided by pose references using ControlNet.",
848
888
  "thumbnailVariant": "hoverDissolve",
849
889
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet"
850
890
  },
@@ -853,7 +893,7 @@
853
893
  "title": "Depth ControlNet",
854
894
  "mediaType": "image",
855
895
  "mediaSubtype": "webp",
856
- "description": "Create images with depth-aware generation.",
896
+ "description": "Generate images guided by depth information using ControlNet.",
857
897
  "thumbnailVariant": "hoverDissolve",
858
898
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
859
899
  },
@@ -862,7 +902,7 @@
862
902
  "title": "Depth T2I Adapter",
863
903
  "mediaType": "image",
864
904
  "mediaSubtype": "webp",
865
- "description": "Quickly generate depth-aware images with a T2I adapter.",
905
+ "description": "Generate images guided by depth information using T2I adapter.",
866
906
  "thumbnailVariant": "hoverDissolve",
867
907
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
868
908
  },
@@ -871,7 +911,7 @@
871
911
  "title": "Mixing ControlNets",
872
912
  "mediaType": "image",
873
913
  "mediaSubtype": "webp",
874
- "description": "Combine multiple ControlNet models together.",
914
+ "description": "Generate images by combining multiple ControlNet models.",
875
915
  "thumbnailVariant": "hoverDissolve",
876
916
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets"
877
917
  }
@@ -887,7 +927,7 @@
887
927
  "title": "Area Composition",
888
928
  "mediaType": "image",
889
929
  "mediaSubtype": "webp",
890
- "description": "Control image composition with areas.",
930
+ "description": "Generate images by controlling composition with defined areas.",
891
931
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/"
892
932
  },
893
933
  {
@@ -895,7 +935,7 @@
895
935
  "title": "Area Composition Reversed",
896
936
  "mediaType": "image",
897
937
  "mediaSubtype": "webp",
898
- "description": "Reverse area composition workflow.",
938
+ "description": "Generate images using reverse area composition workflow.",
899
939
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/"
900
940
  },
901
941
  {
@@ -903,7 +943,7 @@
903
943
  "title": "Area Composition Square Area for Subject",
904
944
  "mediaType": "image",
905
945
  "mediaSubtype": "webp",
906
- "description": "Create consistent subject placement.",
946
+ "description": "Generate images with consistent subject placement using area composition.",
907
947
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition"
908
948
  }
909
949
  ]
@@ -918,7 +958,7 @@
918
958
  "title": "Hunyuan3D 2.0",
919
959
  "mediaType": "image",
920
960
  "mediaSubtype": "webp",
921
- "description": "Use Hunyuan3D 2.0 to generate models from a single view.",
961
+ "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
922
962
  "tutorialUrl": ""
923
963
  },
924
964
  {
@@ -926,23 +966,25 @@
926
966
  "title": "Hunyuan3D 2.0 MV",
927
967
  "mediaType": "image",
928
968
  "mediaSubtype": "webp",
929
- "description": " Use Hunyuan3D 2mv to generate models from multiple views.",
930
- "tutorialUrl": ""
969
+ "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
970
+ "tutorialUrl": "",
971
+ "thumbnailVariant": "compareSlider"
931
972
  },
932
973
  {
933
974
  "name": "3d_hunyuan3d_multiview_to_model_turbo",
934
975
  "title": "Hunyuan3D 2.0 MV Turbo",
935
976
  "mediaType": "image",
936
977
  "mediaSubtype": "webp",
937
- "description": "Use Hunyuan3D 2mv turbo to generate models from multiple views.",
938
- "tutorialUrl": ""
978
+ "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
979
+ "tutorialUrl": "",
980
+ "thumbnailVariant": "compareSlider"
939
981
  },
940
982
  {
941
983
  "name": "stable_zero123_example",
942
984
  "title": "Stable Zero123",
943
985
  "mediaType": "image",
944
986
  "mediaSubtype": "webp",
945
- "description": "Generate 3D views from single images.",
987
+ "description": "Generate 3D views from single images using Stable Zero123.",
946
988
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/"
947
989
  }
948
990
  ]
@@ -957,7 +999,7 @@
957
999
  "title": "Stable Audio",
958
1000
  "mediaType": "audio",
959
1001
  "mediaSubtype": "mp3",
960
- "description": "Generate audio from text descriptions.",
1002
+ "description": "Generate audio from text prompts using Stable Audio.",
961
1003
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/"
962
1004
  },
963
1005
  {
@@ -965,7 +1007,7 @@
965
1007
  "title": "ACE-Step v1 Text to Instrumentals Music",
966
1008
  "mediaType": "audio",
967
1009
  "mediaSubtype": "mp3",
968
- "description": "Input text/lyrics to generate Instrumentals Music.",
1010
+ "description": "Generate instrumental music from text prompts using ACE-Step v1.",
969
1011
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
970
1012
  },
971
1013
  {
@@ -973,7 +1015,7 @@
973
1015
  "title": "ACE Step v1 Text to Song",
974
1016
  "mediaType": "audio",
975
1017
  "mediaSubtype": "mp3",
976
- "description": "Input text/lyrics to generate song with human vocals, supporting multilingual & style customization.",
1018
+ "description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
977
1019
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
978
1020
  },
979
1021
  {
@@ -981,7 +1023,7 @@
981
1023
  "title": "ACE Step v1 M2M Editing",
982
1024
  "mediaType": "audio",
983
1025
  "mediaSubtype": "mp3",
984
- "description": "Use M2M to edit existing song, change the style, lyrics, etc.",
1026
+ "description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
985
1027
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
986
1028
  }
987
1029
  ]