comfyui-workflow-templates 0.1.91__py3-none-any.whl → 0.1.93__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of comfyui-workflow-templates might be problematic. Click here for more details.

Files changed (24) hide show
  1. comfyui_workflow_templates/templates/api_bytedance_flf2v-1.webp +0 -0
  2. comfyui_workflow_templates/templates/api_bytedance_flf2v.json +269 -0
  3. comfyui_workflow_templates/templates/api_bytedance_image_to_video-1.webp +0 -0
  4. comfyui_workflow_templates/templates/api_bytedance_image_to_video.json +208 -0
  5. comfyui_workflow_templates/templates/api_bytedance_text_to_video-1.webp +0 -0
  6. comfyui_workflow_templates/templates/api_bytedance_text_to_video.json +144 -0
  7. comfyui_workflow_templates/templates/api_kling_i2v.json +66 -58
  8. comfyui_workflow_templates/templates/api_wan_text_to_image-1.webp +0 -0
  9. comfyui_workflow_templates/templates/index.es.json +501 -285
  10. comfyui_workflow_templates/templates/index.fr.json +501 -285
  11. comfyui_workflow_templates/templates/index.ja.json +501 -285
  12. comfyui_workflow_templates/templates/index.json +555 -339
  13. comfyui_workflow_templates/templates/index.ko.json +501 -285
  14. comfyui_workflow_templates/templates/index.ru.json +501 -285
  15. comfyui_workflow_templates/templates/index.schema.json +4 -0
  16. comfyui_workflow_templates/templates/index.zh-TW.json +501 -285
  17. comfyui_workflow_templates/templates/index.zh.json +501 -285
  18. comfyui_workflow_templates/templates/video_wan2.1_alpha_t2v_14B-1.webp +0 -0
  19. comfyui_workflow_templates/templates/video_wan2.1_alpha_t2v_14B.json +978 -0
  20. {comfyui_workflow_templates-0.1.91.dist-info → comfyui_workflow_templates-0.1.93.dist-info}/METADATA +1 -1
  21. {comfyui_workflow_templates-0.1.91.dist-info → comfyui_workflow_templates-0.1.93.dist-info}/RECORD +24 -15
  22. {comfyui_workflow_templates-0.1.91.dist-info → comfyui_workflow_templates-0.1.93.dist-info}/WHEEL +0 -0
  23. {comfyui_workflow_templates-0.1.91.dist-info → comfyui_workflow_templates-0.1.93.dist-info}/licenses/LICENSE +0 -0
  24. {comfyui_workflow_templates-0.1.91.dist-info → comfyui_workflow_templates-0.1.93.dist-info}/top_level.txt +0 -0
@@ -13,9 +13,10 @@
13
13
  "description": "Generate images from text prompts.",
14
14
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
15
15
  "tags": ["Text to Image", "Image"],
16
- "models": ["SD1.5"],
16
+ "models": ["SD1.5", "Stability"],
17
17
  "date": "2025-03-01",
18
- "size": 1.99
18
+ "size": 1.99,
19
+ "vram": 2.88
19
20
  },
20
21
  {
21
22
  "name": "image2image",
@@ -25,9 +26,10 @@
25
26
  "description": "Transform existing images using text prompts.",
26
27
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/image-to-image",
27
28
  "tags": ["Image to Image", "Image"],
28
- "models": ["SD1.5"],
29
+ "models": ["SD1.5", "Stability"],
29
30
  "date": "2025-03-01",
30
- "size": 1.99
31
+ "size": 1.99,
32
+ "vram": 2.88
31
33
  },
32
34
  {
33
35
  "name": "lora",
@@ -37,8 +39,10 @@
37
39
  "description": "Generate images with LoRA models for specialized styles or subjects.",
38
40
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
39
41
  "tags": ["Text to Image", "Image"],
40
- "models": ["SD1.5"],
41
- "date": "2025-03-01"
42
+ "models": ["SD1.5", "Stability"],
43
+ "date": "2025-03-01",
44
+ "size": 2.27,
45
+ "vram": 2.88
42
46
  },
43
47
  {
44
48
  "name": "lora_multiple",
@@ -47,9 +51,11 @@
47
51
  "mediaSubtype": "webp",
48
52
  "description": "Generate images by combining multiple LoRA models.",
49
53
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
50
- "tags": ["Text to Image", "Image", "LoRA"],
51
- "models": ["SD1.5"],
52
- "date": "2025-03-01"
54
+ "tags": ["Text to Image", "Image"],
55
+ "models": ["SD1.5", "Stability"],
56
+ "date": "2025-03-01",
57
+ "size": 2.27,
58
+ "vram": 3.12
53
59
  },
54
60
  {
55
61
  "name": "inpaint_example",
@@ -59,10 +65,11 @@
59
65
  "description": "Edit specific parts of images seamlessly.",
60
66
  "thumbnailVariant": "compareSlider",
61
67
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
62
- "tags": ["Inpaint", "Image"],
63
- "models": ["SD1.5"],
68
+ "tags": ["Inpainting", "Image"],
69
+ "models": ["SD1.5", "Stability"],
64
70
  "date": "2025-03-01",
65
- "size": 4.86
71
+ "size": 4.86,
72
+ "vram": 3.82
66
73
  },
67
74
  {
68
75
  "name": "inpaint_model_outpainting",
@@ -72,10 +79,11 @@
72
79
  "description": "Extend images beyond their original boundaries.",
73
80
  "thumbnailVariant": "compareSlider",
74
81
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
75
- "tags": ["Outpaint", "Image"],
76
- "models": ["SD1.5"],
82
+ "tags": ["Outpainting", "Image"],
83
+ "models": ["SD1.5", "Stability"],
77
84
  "date": "2025-03-01",
78
- "size": 4.86
85
+ "size": 4.86,
86
+ "vram": 3.82
79
87
  },
80
88
  {
81
89
  "name": "embedding_example",
@@ -84,10 +92,11 @@
84
92
  "mediaSubtype": "webp",
85
93
  "description": "Generate images using textual inversion for consistent styles.",
86
94
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/",
87
- "tags": ["Embedding", "Image"],
88
- "models": ["SD1.5"],
95
+ "tags": ["Text to Image", "Image"],
96
+ "models": ["SD1.5", "Stability"],
89
97
  "date": "2025-03-01",
90
- "size": 4.86
98
+ "size": 4.86,
99
+ "vram": 3.84
91
100
  },
92
101
  {
93
102
  "name": "gligen_textbox_example",
@@ -96,10 +105,11 @@
96
105
  "mediaSubtype": "webp",
97
106
  "description": "Generate images with precise object placement using text boxes.",
98
107
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/",
99
- "tags": ["Gligen", "Image"],
100
- "models": ["SD1.5"],
108
+ "tags": ["Image"],
109
+ "models": ["SD1.5", "Stability"],
101
110
  "date": "2025-03-01",
102
- "size": 2.77
111
+ "size": 2.77,
112
+ "vram": 3.8
103
113
  },
104
114
  {
105
115
  "name": "area_composition",
@@ -107,11 +117,12 @@
107
117
  "mediaType": "image",
108
118
  "mediaSubtype": "webp",
109
119
  "description": "Generate images by controlling composition with defined areas.",
110
- "tags": ["Area Composition", "Image"],
111
- "models": ["SD1.5"],
120
+ "tags": ["Text to Image", "Image"],
121
+ "models": ["SD1.5", "Stability"],
112
122
  "date": "2025-03-01",
113
123
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/",
114
- "size": 2.3
124
+ "size": 2.3,
125
+ "vram": 5.76
115
126
  },
116
127
  {
117
128
  "name": "area_composition_square_area_for_subject",
@@ -119,11 +130,12 @@
119
130
  "mediaType": "image",
120
131
  "mediaSubtype": "webp",
121
132
  "description": "Generate images with consistent subject placement using area composition.",
122
- "tags": ["Area Composition", "Image"],
123
- "models": ["SD1.5"],
133
+ "tags": ["Text to Image", "Image"],
134
+ "models": ["SD1.5", "Stability"],
124
135
  "date": "2025-03-01",
125
136
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition",
126
- "size": 2.3
137
+ "size": 2.3,
138
+ "vram": 5.52
127
139
  },
128
140
  {
129
141
  "name": "hiresfix_latent_workflow",
@@ -133,10 +145,11 @@
133
145
  "description": "Upscale images by enhancing quality in latent space.",
134
146
  "thumbnailVariant": "compareSlider",
135
147
  "tags": ["Upscale", "Image"],
136
- "models": ["SD1.5"],
148
+ "models": ["SD1.5", "Stability"],
137
149
  "date": "2025-03-01",
138
150
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/",
139
- "size": 1.99
151
+ "size": 1.99,
152
+ "vram": 3.66
140
153
  },
141
154
  {
142
155
  "name": "esrgan_example",
@@ -146,10 +159,11 @@
146
159
  "description": "Upscale images using ESRGAN models to enhance quality.",
147
160
  "thumbnailVariant": "compareSlider",
148
161
  "tags": ["Upscale", "Image"],
149
- "models": ["SD1.5"],
162
+ "models": ["SD1.5", "Stability"],
150
163
  "date": "2025-03-01",
151
164
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/",
152
- "size": 2.05
165
+ "size": 2.05,
166
+ "vram": 6.0
153
167
  },
154
168
  {
155
169
  "name": "hiresfix_esrgan_workflow",
@@ -159,10 +173,11 @@
159
173
  "description": "Upscale images using ESRGAN models during intermediate generation steps.",
160
174
  "thumbnailVariant": "compareSlider",
161
175
  "tags": ["Upscale", "Image"],
162
- "models": ["SD1.5"],
176
+ "models": ["SD1.5", "Stability"],
163
177
  "date": "2025-03-01",
164
178
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling",
165
- "size": 2.05
179
+ "size": 2.05,
180
+ "vram": 6.0
166
181
  },
167
182
  {
168
183
  "name": "latent_upscale_different_prompt_model",
@@ -172,10 +187,11 @@
172
187
  "description": "Upscale images while changing prompts across generation passes.",
173
188
  "thumbnailVariant": "zoomHover",
174
189
  "tags": ["Upscale", "Image"],
175
- "models": ["SD1.5"],
190
+ "models": ["SD1.5", "Stability"],
176
191
  "date": "2025-03-01",
177
192
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples",
178
- "size": 3.97
193
+ "size": 3.97,
194
+ "vram": 4.8
179
195
  },
180
196
  {
181
197
  "name": "controlnet_example",
@@ -185,10 +201,11 @@
185
201
  "description": "Generate images guided by scribble reference images using ControlNet.",
186
202
  "thumbnailVariant": "hoverDissolve",
187
203
  "tags": ["ControlNet", "Image"],
188
- "models": ["SD1.5"],
204
+ "models": ["SD1.5", "Stability"],
189
205
  "date": "2025-03-01",
190
206
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/",
191
- "size": 2.97
207
+ "size": 2.97,
208
+ "vram": 6.0
192
209
  },
193
210
  {
194
211
  "name": "2_pass_pose_worship",
@@ -198,10 +215,11 @@
198
215
  "description": "Generate images guided by pose references using ControlNet.",
199
216
  "thumbnailVariant": "hoverDissolve",
200
217
  "tags": ["ControlNet", "Image"],
201
- "models": ["SD1.5"],
218
+ "models": ["SD1.5", "Stability"],
202
219
  "date": "2025-03-01",
203
220
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet",
204
- "size": 0.98
221
+ "size": 4.34,
222
+ "vram": 6.0
205
223
  },
206
224
  {
207
225
  "name": "depth_controlnet",
@@ -210,11 +228,12 @@
210
228
  "mediaSubtype": "webp",
211
229
  "description": "Generate images guided by depth information using ControlNet.",
212
230
  "thumbnailVariant": "hoverDissolve",
213
- "tags": ["ControlNet", "Image"],
214
- "models": ["SD1.5"],
231
+ "tags": ["ControlNet", "Image", "Text to Image"],
232
+ "models": ["SD1.5", "Stability"],
215
233
  "date": "2025-03-01",
216
234
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
217
- "size": 0.67
235
+ "size": 2.69,
236
+ "vram": 6.0
218
237
  },
219
238
  {
220
239
  "name": "depth_t2i_adapter",
@@ -223,10 +242,12 @@
223
242
  "mediaSubtype": "webp",
224
243
  "description": "Generate images guided by depth information using T2I adapter.",
225
244
  "thumbnailVariant": "hoverDissolve",
226
- "tags": ["T2I Adapter", "Image"],
227
- "models": ["SD1.5"],
245
+ "tags": ["ControlNet", "Image", "Text to Image"],
246
+ "models": ["SD1.5", "Stability"],
228
247
  "date": "2025-03-01",
229
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
248
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
249
+ "size": 2.35,
250
+ "vram": 6.0
230
251
  },
231
252
  {
232
253
  "name": "mixing_controlnets",
@@ -235,42 +256,109 @@
235
256
  "mediaSubtype": "webp",
236
257
  "description": "Generate images by combining multiple ControlNet models.",
237
258
  "thumbnailVariant": "hoverDissolve",
238
- "tags": ["ControlNet", "Image"],
239
- "models": ["SD1.5"],
259
+ "tags": ["ControlNet", "Image", "Text to Image"],
260
+ "models": ["SD1.5", "Stability"],
240
261
  "date": "2025-03-01",
241
262
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets",
242
- "size": 1.66
263
+ "size": 3.1,
264
+ "vram": 6.0
243
265
  }
244
266
  ]
245
267
  },
246
268
  {
247
269
  "moduleName": "default",
248
270
  "category": "GENERATION TYPE",
249
- "icon": "icon-[lucide--volume-2]",
250
- "title": "Flux",
271
+ "icon": "icon-[lucide--image]",
272
+ "title": "Image",
251
273
  "type": "image",
252
274
  "templates": [
253
275
  {
254
- "name": "image_chroma1_radiance_text_to_image",
255
- "title": "Chroma1 Radiance text to image",
276
+ "name": "image_qwen_image",
277
+ "title": "Qwen-Image Text to Image",
256
278
  "mediaType": "image",
257
279
  "mediaSubtype": "webp",
258
- "description": "Chroma1-Radiance works directly with image pixels instead of compressed latents, delivering higher quality images with reduced artifacts and distortion.",
280
+ "description": "Generate images with exceptional multilingual text rendering and editing capabilities using Qwen-Image's 20B MMDiT model..",
281
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
259
282
  "tags": ["Text to Image", "Image"],
260
- "models": ["Chroma1 Radiance"],
261
- "date": "2025-09-18",
262
- "size": 21.69
283
+ "models": ["Qwen-Image"],
284
+ "date": "2025-08-05",
285
+ "size": 29.59
263
286
  },
264
287
  {
265
- "name": "image_chroma_text_to_image",
266
- "title": "Chroma text to image",
288
+ "name": "image_qwen_image_instantx_controlnet",
289
+ "title": "Qwen-Image InstantX Union ControlNet",
267
290
  "mediaType": "image",
268
291
  "mediaSubtype": "webp",
269
- "description": "Chroma - enhanced Flux model with improved image quality and better prompt understanding for stunning text-to-image generation.",
270
- "tags": ["Text to Image", "Image"],
271
- "models": ["Chroma", "Flux"],
272
- "date": "2025-06-04",
273
- "size": 21.69
292
+ "description": "Generate images with Qwen-Image InstantX ControlNet, supporting canny, soft edge, depth, pose",
293
+ "tags": ["Image to Image", "Image", "ControlNet"],
294
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
295
+ "models": ["Qwen-Image"],
296
+ "date": "2025-08-23",
297
+ "size": 32.88
298
+ },
299
+ {
300
+ "name": "image_qwen_image_instantx_inpainting_controlnet",
301
+ "title": "Qwen-Image InstantX Inpainting ControlNet",
302
+ "mediaType": "image",
303
+ "mediaSubtype": "webp",
304
+ "thumbnailVariant": "compareSlider",
305
+ "description": "Professional inpainting and image editing with Qwen-Image InstantX ControlNet. Supports object replacement, text modification, background changes, and outpainting.",
306
+ "tags": ["Image to Image", "Image", "ControlNet", "Inpainting"],
307
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
308
+ "models": ["Qwen-Image"],
309
+ "date": "2025-09-12",
310
+ "size": 33.54
311
+ },
312
+ {
313
+ "name": "image_qwen_image_union_control_lora",
314
+ "title": "Qwen-Image Union Control",
315
+ "mediaType": "image",
316
+ "mediaSubtype": "webp",
317
+ "description": "Generate images with precise structural control using Qwen-Image's unified ControlNet LoRA. Supports multiple control types including canny, depth, lineart, softedge, normal, and openpose for diverse creative applications.",
318
+ "tags": ["Text to Image", "Image", "ControlNet"],
319
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
320
+ "models": ["Qwen-Image"],
321
+ "date": "2025-08-23",
322
+ "size": 30.47
323
+ },
324
+ {
325
+ "name": "image_qwen_image_controlnet_patch",
326
+ "title": "Qwen-Image ControlNet model patch",
327
+ "mediaType": "image",
328
+ "mediaSubtype": "webp",
329
+ "thumbnailVariant": "compareSlider",
330
+ "description": "Control image generation using Qwen-Image ControlNet models. Supports canny, depth, and inpainting controls through model patching.",
331
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
332
+ "tags": ["Text to Image", "Image", "ControlNet"],
333
+ "models": ["Qwen-Image"],
334
+ "date": "2025-08-24",
335
+ "size": 31.7
336
+ },
337
+ {
338
+ "name": "image_qwen_image_edit_2509",
339
+ "title": "Qwen Image Edit 2509",
340
+ "mediaType": "image",
341
+ "mediaSubtype": "webp",
342
+ "thumbnailVariant": "compareSlider",
343
+ "description": "Advanced image editing with multi-image support, improved consistency, and ControlNet integration.",
344
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
345
+ "tags": ["Image to Image", "Image Edit", "ControlNet"],
346
+ "models": ["Qwen-Image"],
347
+ "date": "2025-09-25",
348
+ "size": 29.59
349
+ },
350
+ {
351
+ "name": "image_qwen_image_edit",
352
+ "title": "Qwen Image Edit",
353
+ "mediaType": "image",
354
+ "mediaSubtype": "webp",
355
+ "thumbnailVariant": "compareSlider",
356
+ "description": "Edit images with precise bilingual text editing and dual semantic/appearance editing capabilities using Qwen-Image-Edit's 20B MMDiT model.",
357
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
358
+ "tags": ["Image to Image", "Image Edit"],
359
+ "models": ["Qwen-Image"],
360
+ "date": "2025-08-18",
361
+ "size": 29.59
274
362
  },
275
363
  {
276
364
  "name": "flux_kontext_dev_basic",
@@ -283,7 +371,32 @@
283
371
  "tags": ["Image Edit", "Image to Image"],
284
372
  "models": ["Flux"],
285
373
  "date": "2025-06-26",
286
- "size": 16.43
374
+ "size": 16.43,
375
+ "vram": 18.0
376
+ },
377
+ {
378
+ "name": "image_chroma1_radiance_text_to_image",
379
+ "title": "Chroma1 Radiance text to image",
380
+ "mediaType": "image",
381
+ "mediaSubtype": "webp",
382
+ "description": "Chroma1-Radiance works directly with image pixels instead of compressed latents, delivering higher quality images with reduced artifacts and distortion.",
383
+ "tags": ["Text to Image", "Image"],
384
+ "models": ["Chroma"],
385
+ "date": "2025-09-18",
386
+ "size": 22.0,
387
+ "vram": 22.0
388
+ },
389
+ {
390
+ "name": "image_chroma_text_to_image",
391
+ "title": "Chroma text to image",
392
+ "mediaType": "image",
393
+ "mediaSubtype": "webp",
394
+ "description": "Chroma - enhanced Flux model with improved image quality and better prompt understanding for stunning text-to-image generation.",
395
+ "tags": ["Text to Image", "Image"],
396
+ "models": ["Chroma", "Flux"],
397
+ "date": "2025-06-04",
398
+ "size": 21.69,
399
+ "vram": 14.5
287
400
  },
288
401
  {
289
402
  "name": "image_flux.1_fill_dev_OneReward",
@@ -295,7 +408,8 @@
295
408
  "tags": ["Inpainting", "Outpainting"],
296
409
  "models": ["Flux"],
297
410
  "date": "2025-09-21",
298
- "size": 27.01
411
+ "size": 27.01,
412
+ "vram": 20.0
299
413
  },
300
414
  {
301
415
  "name": "flux_dev_checkpoint_example",
@@ -307,7 +421,8 @@
307
421
  "tags": ["Text to Image", "Image"],
308
422
  "models": ["Flux"],
309
423
  "date": "2025-03-01",
310
- "size": 16.06
424
+ "size": 16.06,
425
+ "vram": 17.0
311
426
  },
312
427
  {
313
428
  "name": "flux1_dev_uso_reference_image_gen",
@@ -320,7 +435,8 @@
320
435
  "models": ["Flux"],
321
436
  "date": "2025-09-02",
322
437
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
323
- "size": 17.32
438
+ "size": 17.32,
439
+ "vram": 18.5
324
440
  },
325
441
  {
326
442
  "name": "flux_schnell",
@@ -332,7 +448,8 @@
332
448
  "tags": ["Text to Image", "Image"],
333
449
  "models": ["Flux"],
334
450
  "date": "2025-03-01",
335
- "size": 16.05
451
+ "size": 16.05,
452
+ "vram": 17.0
336
453
  },
337
454
  {
338
455
  "name": "flux1_krea_dev",
@@ -341,10 +458,11 @@
341
458
  "mediaSubtype": "webp",
342
459
  "description": "A fine-tuned FLUX model pushing photorealism to the max",
343
460
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
344
- "tags": ["Text to Image", "Image", "Photorealism"],
461
+ "tags": ["Text to Image", "Image"],
345
462
  "models": ["Flux"],
346
463
  "date": "2025-07-31",
347
- "size": 20.74
464
+ "size": 20.74,
465
+ "vram": 21.5
348
466
  },
349
467
  {
350
468
  "name": "flux_dev_full_text_to_image",
@@ -356,7 +474,8 @@
356
474
  "tags": ["Text to Image", "Image"],
357
475
  "models": ["Flux"],
358
476
  "date": "2025-03-01",
359
- "size": 31.83
477
+ "size": 31.83,
478
+ "vram": 22.0
360
479
  },
361
480
  {
362
481
  "name": "flux_schnell_full_text_to_image",
@@ -378,7 +497,7 @@
378
497
  "description": "Fill missing parts of images using Flux inpainting.",
379
498
  "thumbnailVariant": "compareSlider",
380
499
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
381
- "tags": ["Image to Image", "Inpaint", "Image"],
500
+ "tags": ["Image to Image", "Inpainting", "Image"],
382
501
  "models": ["Flux"],
383
502
  "date": "2025-03-01",
384
503
  "size": 9.66
@@ -391,7 +510,7 @@
391
510
  "description": "Extend images beyond boundaries using Flux outpainting.",
392
511
  "thumbnailVariant": "compareSlider",
393
512
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
394
- "tags": ["Outpaint", "Image", "Image to Image"],
513
+ "tags": ["Outpainting", "Image", "Image to Image"],
395
514
  "models": ["Flux"],
396
515
  "date": "2025-03-01",
397
516
  "size": 9.66
@@ -417,7 +536,7 @@
417
536
  "description": "Generate images guided by depth information using Flux LoRA.",
418
537
  "thumbnailVariant": "hoverDissolve",
419
538
  "tutorialUrl": "ttps://docs.comfy.org/tutorials/flux/flux-1-controlnet",
420
- "tags": ["Image to Image", "ControlNet", "Image", "LoRA"],
539
+ "tags": ["Image to Image", "ControlNet", "Image"],
421
540
  "models": ["Flux"],
422
541
  "date": "2025-03-01",
423
542
  "size": 32.98
@@ -429,107 +548,10 @@
429
548
  "mediaSubtype": "webp",
430
549
  "description": "Generate images by transferring style from reference images using Flux Redux.",
431
550
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
432
- "tags": ["Image to Image", "ControlNet", "Image", "LoRA"],
551
+ "tags": ["Image to Image", "ControlNet", "Image"],
433
552
  "models": ["Flux"],
434
553
  "date": "2025-03-01",
435
554
  "size": 32.74
436
- }
437
- ]
438
- },
439
- {
440
- "moduleName": "default",
441
- "category": "GENERATION TYPE",
442
- "icon": "icon-[lucide--image]",
443
- "title": "Image",
444
- "type": "image",
445
- "templates": [
446
- {
447
- "name": "image_qwen_image",
448
- "title": "Qwen-Image Text to Image",
449
- "mediaType": "image",
450
- "mediaSubtype": "webp",
451
- "description": "Generate images with exceptional multilingual text rendering and editing capabilities using Qwen-Image's 20B MMDiT model..",
452
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
453
- "tags": ["Text to Image", "Image"],
454
- "models": ["Qwen-Image"],
455
- "date": "2025-08-05",
456
- "size": 29.59
457
- },
458
- {
459
- "name": "image_qwen_image_instantx_controlnet",
460
- "title": "Qwen-Image InstantX Union ControlNet",
461
- "mediaType": "image",
462
- "mediaSubtype": "webp",
463
- "description": "Generate images with Qwen-Image InstantX ControlNet, supporting canny, soft edge, depth, pose",
464
- "tags": ["Image to Image", "Image", "ControlNet"],
465
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
466
- "models": ["Qwen-Image"],
467
- "date": "2025-08-23",
468
- "size": 32.88
469
- },
470
- {
471
- "name": "image_qwen_image_instantx_inpainting_controlnet",
472
- "title": "Qwen-Image InstantX Inpainting ControlNet",
473
- "mediaType": "image",
474
- "mediaSubtype": "webp",
475
- "thumbnailVariant": "compareSlider",
476
- "description": "Professional inpainting and image editing with Qwen-Image InstantX ControlNet. Supports object replacement, text modification, background changes, and outpainting.",
477
- "tags": ["Image to Image", "Image", "ControlNet", "Inpainting"],
478
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
479
- "models": ["Qwen-Image"],
480
- "date": "2025-09-12",
481
- "size": 33.54
482
- },
483
- {
484
- "name": "image_qwen_image_union_control_lora",
485
- "title": "Qwen-Image Union Control",
486
- "mediaType": "image",
487
- "mediaSubtype": "webp",
488
- "description": "Generate images with precise structural control using Qwen-Image's unified ControlNet LoRA. Supports multiple control types including canny, depth, lineart, softedge, normal, and openpose for diverse creative applications.",
489
- "tags": ["Text to Image", "Image", "ControlNet"],
490
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
491
- "models": ["Qwen-Image"],
492
- "date": "2025-08-23",
493
- "size": 30.47
494
- },
495
- {
496
- "name": "image_qwen_image_controlnet_patch",
497
- "title": "Qwen-Image ControlNet model patch",
498
- "mediaType": "image",
499
- "mediaSubtype": "webp",
500
- "thumbnailVariant": "compareSlider",
501
- "description": "Control image generation using Qwen-Image ControlNet models. Supports canny, depth, and inpainting controls through model patching.",
502
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
503
- "tags": ["Text to Image", "Image", "ControlNet"],
504
- "models": ["Qwen-Image"],
505
- "date": "2025-08-24",
506
- "size": 31.7
507
- },
508
- {
509
- "name": "image_qwen_image_edit_2509",
510
- "title": "Qwen Image Edit 2509",
511
- "mediaType": "image",
512
- "mediaSubtype": "webp",
513
- "thumbnailVariant": "compareSlider",
514
- "description": "Advanced image editing with multi-image support, improved consistency, and ControlNet integration.",
515
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
516
- "tags": ["Image to Image", "Image Edit", "Multi-Image", "ControlNet"],
517
- "models": ["Qwen-Image"],
518
- "date": "2025-09-25",
519
- "size": 29.59
520
- },
521
- {
522
- "name": "image_qwen_image_edit",
523
- "title": "Qwen Image Edit",
524
- "mediaType": "image",
525
- "mediaSubtype": "webp",
526
- "thumbnailVariant": "compareSlider",
527
- "description": "Edit images with precise bilingual text editing and dual semantic/appearance editing capabilities using Qwen-Image-Edit's 20B MMDiT model.",
528
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
529
- "tags": ["Image to Image", "Image Edit"],
530
- "models": ["Qwen-Image"],
531
- "date": "2025-08-18",
532
- "size": 29.59
533
555
  },
534
556
  {
535
557
  "name": "image_omnigen2_t2i",
@@ -677,7 +699,7 @@
677
699
  "description": "Generate high-quality images using SDXL.",
678
700
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
679
701
  "tags": ["Text to Image", "Image"],
680
- "models": ["SDXL"],
702
+ "models": ["SDXL", "Stability"],
681
703
  "date": "2025-03-01",
682
704
  "size": 12.12
683
705
  },
@@ -689,7 +711,7 @@
689
711
  "description": "Enhance SDXL images using refiner models.",
690
712
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
691
713
  "tags": ["Text to Image", "Image"],
692
- "models": ["SDXL"],
714
+ "models": ["SDXL", "Stability"],
693
715
  "date": "2025-03-01",
694
716
  "size": 12.12
695
717
  },
@@ -701,7 +723,7 @@
701
723
  "description": "Generate images by transferring concepts from reference images using SDXL Revision.",
702
724
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
703
725
  "tags": ["Text to Image", "Image"],
704
- "models": ["SDXL"],
726
+ "models": ["SDXL", "Stability"],
705
727
  "date": "2025-03-01",
706
728
  "size": 9.9
707
729
  },
@@ -713,7 +735,7 @@
713
735
  "description": "Generate images using both text prompts and reference images with SDXL Revision.",
714
736
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
715
737
  "tags": ["Text to Image", "Image"],
716
- "models": ["SDXL"],
738
+ "models": ["SDXL", "Stability"],
717
739
  "date": "2025-03-01",
718
740
  "size": 9.9
719
741
  },
@@ -725,7 +747,7 @@
725
747
  "description": "Generate images in a single step using SDXL Turbo.",
726
748
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
727
749
  "tags": ["Text to Image", "Image"],
728
- "models": ["SDXL Turbo"],
750
+ "models": ["SDXL", "Stability"],
729
751
  "date": "2025-03-01",
730
752
  "size": 6.46
731
753
  },
@@ -736,7 +758,7 @@
736
758
  "mediaSubtype": "webp",
737
759
  "thumbnailVariant": "compareSlider",
738
760
  "description": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention.",
739
- "tags": ["Depth", "Image"],
761
+ "tags": ["Image", "Text to Image"],
740
762
  "models": ["SD1.5"],
741
763
  "date": "2025-05-21",
742
764
  "size": 1.93
@@ -758,7 +780,7 @@
758
780
  "mediaSubtype": "webp",
759
781
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
760
782
  "tags": ["Text to Video", "Video"],
761
- "models": ["Wan"],
783
+ "models": ["Wan2.2", "Wan"],
762
784
  "date": "2025-07-29",
763
785
  "size": 35.42
764
786
  },
@@ -771,7 +793,7 @@
771
793
  "thumbnailVariant": "hoverDissolve",
772
794
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
773
795
  "tags": ["Image to Video", "Video"],
774
- "models": ["Wan2.2"],
796
+ "models": ["Wan2.2", "Wan"],
775
797
  "date": "2025-07-29",
776
798
  "size": 35.42
777
799
  },
@@ -784,7 +806,7 @@
784
806
  "thumbnailVariant": "hoverDissolve",
785
807
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
786
808
  "tags": ["FLF2V", "Video"],
787
- "models": ["Wan2.2"],
809
+ "models": ["Wan2.2", "Wan"],
788
810
  "date": "2025-08-02",
789
811
  "size": 35.42
790
812
  },
@@ -796,7 +818,7 @@
796
818
  "mediaSubtype": "webp",
797
819
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-animate",
798
820
  "tags": ["Video", "Image to Video"],
799
- "models": ["Wan2.2"],
821
+ "models": ["Wan2.2", "Wan"],
800
822
  "date": "2025-09-22",
801
823
  "size": 25.535
802
824
  },
@@ -808,7 +830,7 @@
808
830
  "mediaSubtype": "webp",
809
831
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-s2v",
810
832
  "tags": ["Video"],
811
- "models": ["Wan2.2"],
833
+ "models": ["Wan2.2", "Wan"],
812
834
  "date": "2025-08-02",
813
835
  "size": 23.52
814
836
  },
@@ -831,7 +853,7 @@
831
853
  "mediaSubtype": "webp",
832
854
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-inp",
833
855
  "tags": ["FLF2V", "Video"],
834
- "models": ["Wan2.2"],
856
+ "models": ["Wan2.2", "Wan"],
835
857
  "date": "2025-08-12",
836
858
  "size": 35.42
837
859
  },
@@ -843,7 +865,7 @@
843
865
  "mediaSubtype": "webp",
844
866
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-control",
845
867
  "tags": ["Video to Video", "Video"],
846
- "models": ["Wan2.2"],
868
+ "models": ["Wan2.2", "Wan"],
847
869
  "date": "2025-08-12",
848
870
  "size": 35.42
849
871
  },
@@ -855,7 +877,7 @@
855
877
  "mediaSubtype": "webp",
856
878
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-camera",
857
879
  "tags": ["Video to Video", "Video"],
858
- "models": ["Wan2.2"],
880
+ "models": ["Wan2.2", "Wan"],
859
881
  "date": "2025-08-17",
860
882
  "size": 37.3
861
883
  },
@@ -866,7 +888,7 @@
866
888
  "mediaType": "image",
867
889
  "mediaSubtype": "webp",
868
890
  "tags": ["Text to Video", "Video"],
869
- "models": ["Wan2.2"],
891
+ "models": ["Wan2.2", "Wan"],
870
892
  "date": "2025-07-29",
871
893
  "size": 16.9
872
894
  },
@@ -877,7 +899,7 @@
877
899
  "mediaType": "image",
878
900
  "mediaSubtype": "webp",
879
901
  "tags": ["Text to Video", "Video"],
880
- "models": ["Wan2.2"],
902
+ "models": ["Wan2.2", "Wan"],
881
903
  "date": "2025-07-29",
882
904
  "size": 16.9
883
905
  },
@@ -888,95 +910,106 @@
888
910
  "mediaType": "image",
889
911
  "mediaSubtype": "webp",
890
912
  "tags": ["Text to Video", "Video"],
891
- "models": ["Wan2.2"],
913
+ "models": ["Wan2.2", "Wan"],
892
914
  "date": "2025-07-29",
893
915
  "size": 16.9
894
916
  },
895
917
  {
896
918
  "name": "video_wan_vace_14B_t2v",
897
- "title": "Wan VACE Text to Video",
919
+ "title": "Wan2.1 VACE Text to Video",
898
920
  "description": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
899
921
  "mediaType": "image",
900
922
  "mediaSubtype": "webp",
901
923
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
902
924
  "tags": ["Text to Video", "Video"],
903
- "models": ["Wan2.1"],
925
+ "models": ["Wan2.1", "Wan"],
904
926
  "date": "2025-05-21",
905
927
  "size": 53.79
906
928
  },
907
929
  {
908
930
  "name": "video_wan_vace_14B_ref2v",
909
- "title": "Wan VACE Reference to Video",
931
+ "title": "Wan2.1 VACE Reference to Video",
910
932
  "description": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
911
933
  "mediaType": "image",
912
934
  "mediaSubtype": "webp",
913
935
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
914
- "tags": ["Reference to Video", "Video"],
915
- "models": ["Wan2.1"],
936
+ "tags": ["Video", "Image to Video"],
937
+ "models": ["Wan2.1", "Wan"],
916
938
  "date": "2025-05-21",
917
939
  "size": 53.79
918
940
  },
919
941
  {
920
942
  "name": "video_wan_vace_14B_v2v",
921
- "title": "Wan VACE Control Video",
943
+ "title": "Wan2.1 VACE Control Video",
922
944
  "description": "Generate videos by controlling input videos and reference images using Wan VACE.",
923
945
  "mediaType": "image",
924
946
  "mediaSubtype": "webp",
925
947
  "thumbnailVariant": "compareSlider",
926
948
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
927
949
  "tags": ["Video to Video", "Video"],
928
- "models": ["Wan2.1"],
950
+ "models": ["Wan2.1", "Wan"],
929
951
  "date": "2025-05-21",
930
952
  "size": 53.79
931
953
  },
932
954
  {
933
955
  "name": "video_wan_vace_outpainting",
934
- "title": "Wan VACE Outpainting",
956
+ "title": "Wan2.1 VACE Outpainting",
935
957
  "description": "Generate extended videos by expanding video size using Wan VACE outpainting.",
936
958
  "mediaType": "image",
937
959
  "mediaSubtype": "webp",
938
960
  "thumbnailVariant": "compareSlider",
939
961
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
940
962
  "tags": ["Outpainting", "Video"],
941
- "models": ["Wan2.1"],
963
+ "models": ["Wan2.1", "Wan"],
942
964
  "date": "2025-05-21",
943
965
  "size": 53.79
944
966
  },
945
967
  {
946
968
  "name": "video_wan_vace_flf2v",
947
- "title": "Wan VACE First-Last Frame",
969
+ "title": "Wan2.1 VACE First-Last Frame",
948
970
  "description": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
949
971
  "mediaType": "image",
950
972
  "mediaSubtype": "webp",
951
973
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
952
974
  "tags": ["FLF2V", "Video"],
953
- "models": ["Wan2.1"],
975
+ "models": ["Wan2.1", "Wan"],
954
976
  "date": "2025-05-21",
955
977
  "size": 53.79
956
978
  },
957
979
  {
958
980
  "name": "video_wan_vace_inpainting",
959
- "title": "Wan VACE Inpainting",
981
+ "title": "Wan2.1 VACE Inpainting",
960
982
  "description": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
961
983
  "mediaType": "image",
962
984
  "mediaSubtype": "webp",
963
985
  "thumbnailVariant": "compareSlider",
964
986
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
965
987
  "tags": ["Inpainting", "Video"],
966
- "models": ["Wan2.1"],
988
+ "models": ["Wan2.1", "Wan"],
967
989
  "date": "2025-05-21",
968
990
  "size": 53.79
969
991
  },
992
+ {
993
+ "name": "video_wan2.1_alpha_t2v_14B",
994
+ "title": "Wan2.1 Alpha T2V",
995
+ "description": "Generate text-to-video with alpha channel support for transparent backgrounds and semi-transparent objects.",
996
+ "mediaType": "image",
997
+ "mediaSubtype": "webp",
998
+ "tags": ["Text to Video", "Video"],
999
+ "models": ["Wan2.1", "Wan"],
1000
+ "date": "2025-10-06",
1001
+ "size": 20.95
1002
+ },
970
1003
  {
971
1004
  "name": "video_wan_ati",
972
- "title": "Wan ATI",
1005
+ "title": "Wan2.1 ATI",
973
1006
  "description": "Trajectory-controlled video generation.",
974
1007
  "mediaType": "image",
975
1008
  "mediaSubtype": "webp",
976
1009
  "thumbnailVariant": "hoverDissolve",
977
1010
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
978
1011
  "tags": ["Video"],
979
- "models": ["Wan2.1"],
1012
+ "models": ["Wan2.1", "Wan"],
980
1013
  "date": "2025-05-21",
981
1014
  "size": 23.65
982
1015
  },
@@ -988,7 +1021,7 @@
988
1021
  "mediaSubtype": "webp",
989
1022
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
990
1023
  "tags": ["Video"],
991
- "models": ["Wan2.1"],
1024
+ "models": ["Wan2.1", "Wan"],
992
1025
  "date": "2025-04-15",
993
1026
  "size": 10.7
994
1027
  },
@@ -1000,7 +1033,7 @@
1000
1033
  "mediaSubtype": "webp",
1001
1034
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
1002
1035
  "tags": ["Video"],
1003
- "models": ["Wan2.1"],
1036
+ "models": ["Wan2.1", "Wan"],
1004
1037
  "date": "2025-04-15",
1005
1038
  "size": 39.16
1006
1039
  },
@@ -1012,7 +1045,7 @@
1012
1045
  "mediaSubtype": "webp",
1013
1046
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
1014
1047
  "tags": ["Text to Video", "Video"],
1015
- "models": ["Wan2.1"],
1048
+ "models": ["Wan2.1", "Wan"],
1016
1049
  "date": "2025-03-01",
1017
1050
  "size": 9.15
1018
1051
  },
@@ -1024,7 +1057,7 @@
1024
1057
  "mediaSubtype": "webp",
1025
1058
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
1026
1059
  "tags": ["Text to Video", "Video"],
1027
- "models": ["Wan2.1"],
1060
+ "models": ["Wan2.1", "Wan"],
1028
1061
  "date": "2025-03-01",
1029
1062
  "size": 38.23
1030
1063
  },
@@ -1035,8 +1068,8 @@
1035
1068
  "mediaType": "image",
1036
1069
  "mediaSubtype": "webp",
1037
1070
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
1038
- "tags": ["Inpaint", "Video"],
1039
- "models": ["Wan2.1"],
1071
+ "tags": ["Inpainting", "Video"],
1072
+ "models": ["Wan2.1", "Wan"],
1040
1073
  "date": "2025-04-15",
1041
1074
  "size": 10.6
1042
1075
  },
@@ -1049,7 +1082,7 @@
1049
1082
  "thumbnailVariant": "hoverDissolve",
1050
1083
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
1051
1084
  "tags": ["Video to Video", "Video"],
1052
- "models": ["Wan2.1"],
1085
+ "models": ["Wan2.1", "Wan"],
1053
1086
  "date": "2025-04-15",
1054
1087
  "size": 10.6
1055
1088
  },
@@ -1061,7 +1094,7 @@
1061
1094
  "mediaSubtype": "webp",
1062
1095
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
1063
1096
  "tags": ["FLF2V", "Video"],
1064
- "models": ["Wan2.1"],
1097
+ "models": ["Wan2.1", "Wan"],
1065
1098
  "date": "2025-04-15",
1066
1099
  "size": 38.23
1067
1100
  },
@@ -1164,8 +1197,8 @@
1164
1197
  "mediaType": "audio",
1165
1198
  "mediaSubtype": "mp3",
1166
1199
  "description": "Generate instrumental music from text prompts using ACE-Step v1.",
1167
- "tags": ["Text to Audio", "Audio", "Instrumentals"],
1168
- "models": ["ACE-Step v1"],
1200
+ "tags": ["Text to Audio", "Audio"],
1201
+ "models": ["ACE-Step"],
1169
1202
  "date": "2025-03-01",
1170
1203
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
1171
1204
  "size": 7.17
@@ -1176,8 +1209,8 @@
1176
1209
  "mediaType": "audio",
1177
1210
  "mediaSubtype": "mp3",
1178
1211
  "description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
1179
- "tags": ["Text to Audio", "Audio", "Song"],
1180
- "models": ["ACE-Step v1"],
1212
+ "tags": ["Text to Audio", "Audio"],
1213
+ "models": ["ACE-Step"],
1181
1214
  "date": "2025-03-01",
1182
1215
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
1183
1216
  "size": 7.17
@@ -1189,7 +1222,7 @@
1189
1222
  "mediaSubtype": "mp3",
1190
1223
  "description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
1191
1224
  "tags": ["Audio Editing", "Audio"],
1192
- "models": ["ACE-Step v1"],
1225
+ "models": ["ACE-Step"],
1193
1226
  "date": "2025-03-01",
1194
1227
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
1195
1228
  "size": 7.17
@@ -1209,8 +1242,8 @@
1209
1242
  "mediaType": "image",
1210
1243
  "mediaSubtype": "webp",
1211
1244
  "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
1212
- "tags": ["Image to Model", "3D"],
1213
- "models": ["Hunyuan3D 2.0"],
1245
+ "tags": ["Image to 3D", "3D"],
1246
+ "models": ["Hunyuan3D"],
1214
1247
  "date": "2025-03-01",
1215
1248
  "tutorialUrl": "",
1216
1249
  "size": 4.59
@@ -1221,8 +1254,8 @@
1221
1254
  "mediaType": "image",
1222
1255
  "mediaSubtype": "webp",
1223
1256
  "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
1224
- "tags": ["Image to Model", "3D"],
1225
- "models": ["Hunyuan3D 2.0"],
1257
+ "tags": ["Image to 3D", "3D"],
1258
+ "models": ["Hunyuan3D"],
1226
1259
  "date": "2025-03-01",
1227
1260
  "tutorialUrl": "",
1228
1261
  "size": 4.59
@@ -1233,8 +1266,8 @@
1233
1266
  "mediaType": "image",
1234
1267
  "mediaSubtype": "webp",
1235
1268
  "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
1236
- "tags": ["Multiview to Model", "3D"],
1237
- "models": ["Hunyuan3D 2.0 MV"],
1269
+ "tags": ["3D", "Image to 3D"],
1270
+ "models": ["Hunyuan3D"],
1238
1271
  "date": "2025-03-01",
1239
1272
  "tutorialUrl": "",
1240
1273
  "thumbnailVariant": "hoverDissolve",
@@ -1246,8 +1279,8 @@
1246
1279
  "mediaType": "image",
1247
1280
  "mediaSubtype": "webp",
1248
1281
  "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
1249
- "tags": ["Multiview to Model", "3D"],
1250
- "models": ["Hunyuan3D 2.0 MV Turbo"],
1282
+ "tags": ["Image to 3D", "3D"],
1283
+ "models": ["Hunyuan3D"],
1251
1284
  "date": "2025-03-01",
1252
1285
  "tutorialUrl": "",
1253
1286
  "thumbnailVariant": "hoverDissolve",
@@ -1281,9 +1314,11 @@
1281
1314
  "mediaType": "image",
1282
1315
  "mediaSubtype": "webp",
1283
1316
  "tags": ["Image Edit", "Image", "API", "Text-to-Image"],
1284
- "models": ["Seedream 4.0"],
1317
+ "models": ["Seedream 4.0", "ByteDance"],
1285
1318
  "date": "2025-09-11",
1286
- "OpenSource": false
1319
+ "OpenSource": false,
1320
+ "size": 0,
1321
+ "vram": 0
1287
1322
  },
1288
1323
  {
1289
1324
  "name": "api_google_gemini_image",
@@ -1292,9 +1327,11 @@
1292
1327
  "mediaType": "image",
1293
1328
  "mediaSubtype": "webp",
1294
1329
  "tags": ["Image Edit", "Image", "API", "Text-to-Image"],
1295
- "models": ["Gemini-2.5-Flash", "nano-banana"],
1330
+ "models": ["Gemini-2.5-Flash", "nano-banana", "Google"],
1296
1331
  "date": "2025-08-27",
1297
- "OpenSource": false
1332
+ "OpenSource": false,
1333
+ "size": 0,
1334
+ "vram": 0
1298
1335
  },
1299
1336
  {
1300
1337
  "name": "api_bfl_flux_1_kontext_multiple_images_input",
@@ -1305,9 +1342,11 @@
1305
1342
  "thumbnailVariant": "compareSlider",
1306
1343
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
1307
1344
  "tags": ["Image Edit", "Image"],
1308
- "models": ["Flux"],
1345
+ "models": ["Flux", "Kontext"],
1309
1346
  "date": "2025-05-29",
1310
- "OpenSource": false
1347
+ "OpenSource": false,
1348
+ "size": 0,
1349
+ "vram": 0
1311
1350
  },
1312
1351
  {
1313
1352
  "name": "api_bfl_flux_1_kontext_pro_image",
@@ -1318,9 +1357,11 @@
1318
1357
  "thumbnailVariant": "compareSlider",
1319
1358
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
1320
1359
  "tags": ["Image Edit", "Image"],
1321
- "models": ["Flux"],
1360
+ "models": ["Flux", "Kontext", "BFL"],
1322
1361
  "date": "2025-05-29",
1323
- "OpenSource": false
1362
+ "OpenSource": false,
1363
+ "size": 0,
1364
+ "vram": 0
1324
1365
  },
1325
1366
  {
1326
1367
  "name": "api_bfl_flux_1_kontext_max_image",
@@ -1331,9 +1372,24 @@
1331
1372
  "thumbnailVariant": "compareSlider",
1332
1373
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
1333
1374
  "tags": ["Image Edit", "Image"],
1334
- "models": ["Flux"],
1375
+ "models": ["Flux", "Kontext", "BFL"],
1335
1376
  "date": "2025-05-29",
1336
- "OpenSource": false
1377
+ "OpenSource": false,
1378
+ "size": 0,
1379
+ "vram": 0
1380
+ },
1381
+ {
1382
+ "name": "api_wan_text_to_image",
1383
+ "title": "Wan2.5: Text to Image",
1384
+ "description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
1385
+ "mediaType": "image",
1386
+ "mediaSubtype": "webp",
1387
+ "tags": ["Text to Image", "Image", "API"],
1388
+ "models": ["Wan2.5"],
1389
+ "date": "2025-09-25",
1390
+ "OpenSource": false,
1391
+ "size": 0,
1392
+ "vram": 0
1337
1393
  },
1338
1394
  {
1339
1395
  "name": "api_bfl_flux_pro_t2i",
@@ -1343,9 +1399,11 @@
1343
1399
  "mediaSubtype": "webp",
1344
1400
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
1345
1401
  "tags": ["Image Edit", "Image"],
1346
- "models": ["Flux"],
1402
+ "models": ["Flux", "BFL"],
1347
1403
  "date": "2025-05-01",
1348
- "OpenSource": false
1404
+ "OpenSource": false,
1405
+ "size": 0,
1406
+ "vram": 0
1349
1407
  },
1350
1408
  {
1351
1409
  "name": "api_luma_photon_i2i",
@@ -1355,9 +1413,11 @@
1355
1413
  "mediaSubtype": "webp",
1356
1414
  "thumbnailVariant": "compareSlider",
1357
1415
  "tags": ["Image to Image", "Image", "API"],
1358
- "models": ["Luma Photon"],
1416
+ "models": ["Luma"],
1359
1417
  "date": "2025-03-01",
1360
- "OpenSource": false
1418
+ "OpenSource": false,
1419
+ "size": 0,
1420
+ "vram": 0
1361
1421
  },
1362
1422
  {
1363
1423
  "name": "api_luma_photon_style_ref",
@@ -1366,10 +1426,12 @@
1366
1426
  "mediaType": "image",
1367
1427
  "mediaSubtype": "webp",
1368
1428
  "thumbnailVariant": "compareSlider",
1369
- "tags": ["Text to Image", "Image", "API", "Style Transfer"],
1370
- "models": ["Luma Photon"],
1429
+ "tags": ["Text to Image", "Image", "API"],
1430
+ "models": ["Luma"],
1371
1431
  "date": "2025-03-01",
1372
- "OpenSource": false
1432
+ "OpenSource": false,
1433
+ "size": 0,
1434
+ "vram": 0
1373
1435
  },
1374
1436
  {
1375
1437
  "name": "api_recraft_image_gen_with_color_control",
@@ -1377,10 +1439,12 @@
1377
1439
  "description": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
1378
1440
  "mediaType": "image",
1379
1441
  "mediaSubtype": "webp",
1380
- "tags": ["Text to Image", "Image", "API", "Color Control"],
1442
+ "tags": ["Text to Image", "Image", "API"],
1381
1443
  "models": ["Recraft"],
1382
1444
  "date": "2025-03-01",
1383
- "OpenSource": false
1445
+ "OpenSource": false,
1446
+ "size": 0,
1447
+ "vram": 0
1384
1448
  },
1385
1449
  {
1386
1450
  "name": "api_recraft_image_gen_with_style_control",
@@ -1388,10 +1452,12 @@
1388
1452
  "description": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.",
1389
1453
  "mediaType": "image",
1390
1454
  "mediaSubtype": "webp",
1391
- "tags": ["Text to Image", "Image", "API", "Style Control"],
1455
+ "tags": ["Text to Image", "Image", "API"],
1392
1456
  "models": ["Recraft"],
1393
1457
  "date": "2025-03-01",
1394
- "OpenSource": false
1458
+ "OpenSource": false,
1459
+ "size": 0,
1460
+ "vram": 0
1395
1461
  },
1396
1462
  {
1397
1463
  "name": "api_recraft_vector_gen",
@@ -1402,7 +1468,9 @@
1402
1468
  "tags": ["Text to Image", "Image", "API", "Vector"],
1403
1469
  "models": ["Recraft"],
1404
1470
  "date": "2025-03-01",
1405
- "OpenSource": false
1471
+ "OpenSource": false,
1472
+ "size": 0,
1473
+ "vram": 0
1406
1474
  },
1407
1475
  {
1408
1476
  "name": "api_runway_text_to_image",
@@ -1413,7 +1481,9 @@
1413
1481
  "tags": ["Text to Image", "Image", "API"],
1414
1482
  "models": ["Runway"],
1415
1483
  "date": "2025-03-01",
1416
- "OpenSource": false
1484
+ "OpenSource": false,
1485
+ "size": 0,
1486
+ "vram": 0
1417
1487
  },
1418
1488
  {
1419
1489
  "name": "api_runway_reference_to_image",
@@ -1422,10 +1492,12 @@
1422
1492
  "mediaType": "image",
1423
1493
  "thumbnailVariant": "compareSlider",
1424
1494
  "mediaSubtype": "webp",
1425
- "tags": ["Image to Image", "Image", "API", "Style Transfer"],
1495
+ "tags": ["Image to Image", "Image", "API"],
1426
1496
  "models": ["Runway"],
1427
1497
  "date": "2025-03-01",
1428
- "OpenSource": false
1498
+ "OpenSource": false,
1499
+ "size": 0,
1500
+ "vram": 0
1429
1501
  },
1430
1502
  {
1431
1503
  "name": "api_stability_ai_stable_image_ultra_t2i",
@@ -1434,9 +1506,11 @@
1434
1506
  "mediaType": "image",
1435
1507
  "mediaSubtype": "webp",
1436
1508
  "tags": ["Text to Image", "Image", "API"],
1437
- "models": ["Stable Image Ultra"],
1509
+ "models": ["Stability"],
1438
1510
  "date": "2025-03-01",
1439
- "OpenSource": false
1511
+ "OpenSource": false,
1512
+ "size": 0,
1513
+ "vram": 0
1440
1514
  },
1441
1515
  {
1442
1516
  "name": "api_stability_ai_i2i",
@@ -1446,9 +1520,11 @@
1446
1520
  "thumbnailVariant": "compareSlider",
1447
1521
  "mediaSubtype": "webp",
1448
1522
  "tags": ["Image to Image", "Image", "API"],
1449
- "models": ["Stability AI"],
1523
+ "models": ["Stability"],
1450
1524
  "date": "2025-03-01",
1451
- "OpenSource": false
1525
+ "OpenSource": false,
1526
+ "size": 0,
1527
+ "vram": 0
1452
1528
  },
1453
1529
  {
1454
1530
  "name": "api_stability_ai_sd3.5_t2i",
@@ -1457,9 +1533,11 @@
1457
1533
  "mediaType": "image",
1458
1534
  "mediaSubtype": "webp",
1459
1535
  "tags": ["Text to Image", "Image", "API"],
1460
- "models": ["SD3.5"],
1536
+ "models": ["Stability"],
1461
1537
  "date": "2025-03-01",
1462
- "OpenSource": false
1538
+ "OpenSource": false,
1539
+ "size": 0,
1540
+ "vram": 0
1463
1541
  },
1464
1542
  {
1465
1543
  "name": "api_stability_ai_sd3.5_i2i",
@@ -1469,9 +1547,11 @@
1469
1547
  "thumbnailVariant": "compareSlider",
1470
1548
  "mediaSubtype": "webp",
1471
1549
  "tags": ["Image to Image", "Image", "API"],
1472
- "models": ["SD3.5"],
1550
+ "models": ["Stability"],
1473
1551
  "date": "2025-03-01",
1474
- "OpenSource": false
1552
+ "OpenSource": false,
1553
+ "size": 0,
1554
+ "vram": 0
1475
1555
  },
1476
1556
  {
1477
1557
  "name": "api_ideogram_v3_t2i",
@@ -1479,10 +1559,12 @@
1479
1559
  "description": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
1480
1560
  "mediaType": "image",
1481
1561
  "mediaSubtype": "webp",
1482
- "tags": ["Text to Image", "Image", "API", "Text Rendering"],
1483
- "models": ["Ideogram V3"],
1562
+ "tags": ["Text to Image", "Image", "API"],
1563
+ "models": ["Ideogram"],
1484
1564
  "date": "2025-03-01",
1485
- "OpenSource": false
1565
+ "OpenSource": false,
1566
+ "size": 0,
1567
+ "vram": 0
1486
1568
  },
1487
1569
  {
1488
1570
  "name": "api_openai_image_1_t2i",
@@ -1491,10 +1573,12 @@
1491
1573
  "mediaType": "image",
1492
1574
  "mediaSubtype": "webp",
1493
1575
  "tags": ["Text to Image", "Image", "API"],
1494
- "models": ["GPT-Image-1"],
1576
+ "models": ["GPT-Image-1", "OpenAI"],
1495
1577
  "date": "2025-03-01",
1496
1578
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
1497
- "OpenSource": false
1579
+ "OpenSource": false,
1580
+ "size": 0,
1581
+ "vram": 0
1498
1582
  },
1499
1583
  {
1500
1584
  "name": "api_openai_image_1_i2i",
@@ -1507,7 +1591,9 @@
1507
1591
  "models": ["GPT-Image-1"],
1508
1592
  "date": "2025-03-01",
1509
1593
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
1510
- "OpenSource": false
1594
+ "OpenSource": false,
1595
+ "size": 0,
1596
+ "vram": 0
1511
1597
  },
1512
1598
  {
1513
1599
  "name": "api_openai_image_1_inpaint",
@@ -1516,11 +1602,13 @@
1516
1602
  "mediaType": "image",
1517
1603
  "mediaSubtype": "webp",
1518
1604
  "thumbnailVariant": "compareSlider",
1519
- "tags": ["Inpaint", "Image", "API"],
1605
+ "tags": ["Inpainting", "Image", "API"],
1520
1606
  "models": ["GPT-Image-1"],
1521
1607
  "date": "2025-03-01",
1522
1608
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
1523
- "OpenSource": false
1609
+ "OpenSource": false,
1610
+ "size": 0,
1611
+ "vram": 0
1524
1612
  },
1525
1613
  {
1526
1614
  "name": "api_openai_image_1_multi_inputs",
@@ -1529,11 +1617,13 @@
1529
1617
  "mediaType": "image",
1530
1618
  "mediaSubtype": "webp",
1531
1619
  "thumbnailVariant": "compareSlider",
1532
- "tags": ["Text to Image", "Image", "API", "Multi Input"],
1620
+ "tags": ["Text to Image", "Image", "API"],
1533
1621
  "models": ["GPT-Image-1"],
1534
1622
  "date": "2025-03-01",
1535
1623
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
1536
- "OpenSource": false
1624
+ "OpenSource": false,
1625
+ "size": 0,
1626
+ "vram": 0
1537
1627
  },
1538
1628
  {
1539
1629
  "name": "api_openai_dall_e_2_t2i",
@@ -1542,10 +1632,12 @@
1542
1632
  "mediaType": "image",
1543
1633
  "mediaSubtype": "webp",
1544
1634
  "tags": ["Text to Image", "Image", "API"],
1545
- "models": ["Dall-E 2"],
1635
+ "models": ["Dall-E", "OpenAI"],
1546
1636
  "date": "2025-03-01",
1547
1637
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
1548
- "OpenSource": false
1638
+ "OpenSource": false,
1639
+ "size": 0,
1640
+ "vram": 0
1549
1641
  },
1550
1642
  {
1551
1643
  "name": "api_openai_dall_e_2_inpaint",
@@ -1554,11 +1646,13 @@
1554
1646
  "mediaType": "image",
1555
1647
  "mediaSubtype": "webp",
1556
1648
  "thumbnailVariant": "compareSlider",
1557
- "tags": ["Inpaint", "Image", "API"],
1558
- "models": ["Dall-E 2"],
1649
+ "tags": ["Inpainting", "Image", "API"],
1650
+ "models": ["Dall-E", "OpenAI"],
1559
1651
  "date": "2025-03-01",
1560
1652
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
1561
- "OpenSource": false
1653
+ "OpenSource": false,
1654
+ "size": 0,
1655
+ "vram": 0
1562
1656
  },
1563
1657
  {
1564
1658
  "name": "api_openai_dall_e_3_t2i",
@@ -1567,10 +1661,12 @@
1567
1661
  "mediaType": "image",
1568
1662
  "mediaSubtype": "webp",
1569
1663
  "tags": ["Text to Image", "Image", "API"],
1570
- "models": ["Dall-E 3"],
1664
+ "models": ["Dall-E", "OpenAI"],
1571
1665
  "date": "2025-03-01",
1572
1666
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3",
1573
- "OpenSource": false
1667
+ "OpenSource": false,
1668
+ "size": 0,
1669
+ "vram": 0
1574
1670
  }
1575
1671
  ]
1576
1672
  },
@@ -1591,7 +1687,9 @@
1591
1687
  "models": ["Wan2.5"],
1592
1688
  "date": "2025-09-27",
1593
1689
  "tutorialUrl": "",
1594
- "OpenSource": false
1690
+ "OpenSource": false,
1691
+ "size": 0,
1692
+ "vram": 0
1595
1693
  },
1596
1694
  {
1597
1695
  "name": "api_wan_image_to_video",
@@ -1603,7 +1701,9 @@
1603
1701
  "models": ["Wan2.5"],
1604
1702
  "date": "2025-09-27",
1605
1703
  "tutorialUrl": "",
1606
- "OpenSource": false
1704
+ "OpenSource": false,
1705
+ "size": 0,
1706
+ "vram": 0
1607
1707
  },
1608
1708
  {
1609
1709
  "name": "api_kling_i2v",
@@ -1615,7 +1715,9 @@
1615
1715
  "models": ["Kling"],
1616
1716
  "date": "2025-03-01",
1617
1717
  "tutorialUrl": "",
1618
- "OpenSource": false
1718
+ "OpenSource": false,
1719
+ "size": 0,
1720
+ "vram": 0
1619
1721
  },
1620
1722
  {
1621
1723
  "name": "api_kling_effects",
@@ -1623,11 +1725,13 @@
1623
1725
  "description": "Generate dynamic videos by applying visual effects to images using Kling.",
1624
1726
  "mediaType": "image",
1625
1727
  "mediaSubtype": "webp",
1626
- "tags": ["Video Effects", "Video", "API"],
1728
+ "tags": ["Video", "API"],
1627
1729
  "models": ["Kling"],
1628
1730
  "date": "2025-03-01",
1629
1731
  "tutorialUrl": "",
1630
- "OpenSource": false
1732
+ "OpenSource": false,
1733
+ "size": 0,
1734
+ "vram": 0
1631
1735
  },
1632
1736
  {
1633
1737
  "name": "api_kling_flf",
@@ -1635,11 +1739,13 @@
1635
1739
  "description": "Generate videos through controlling the first and last frames.",
1636
1740
  "mediaType": "image",
1637
1741
  "mediaSubtype": "webp",
1638
- "tags": ["Video Generation", "Video", "API", "Frame Control"],
1742
+ "tags": ["Video", "API", "FLF2V"],
1639
1743
  "models": ["Kling"],
1640
1744
  "date": "2025-03-01",
1641
1745
  "tutorialUrl": "",
1642
- "OpenSource": false
1746
+ "OpenSource": false,
1747
+ "size": 0,
1748
+ "vram": 0
1643
1749
  },
1644
1750
  {
1645
1751
  "name": "api_vidu_text_to_video",
@@ -1651,7 +1757,9 @@
1651
1757
  "models": ["Vidu"],
1652
1758
  "date": "2025-08-23",
1653
1759
  "tutorialUrl": "",
1654
- "OpenSource": false
1760
+ "OpenSource": false,
1761
+ "size": 0,
1762
+ "vram": 0
1655
1763
  },
1656
1764
  {
1657
1765
  "name": "api_vidu_image_to_video",
@@ -1663,7 +1771,9 @@
1663
1771
  "models": ["Vidu"],
1664
1772
  "date": "2025-08-23",
1665
1773
  "tutorialUrl": "",
1666
- "OpenSource": false
1774
+ "OpenSource": false,
1775
+ "size": 0,
1776
+ "vram": 0
1667
1777
  },
1668
1778
  {
1669
1779
  "name": "api_vidu_reference_to_video",
@@ -1671,11 +1781,13 @@
1671
1781
  "description": "Generate videos with consistent subjects using multiple reference images (up to 7) for character and style continuity across the video sequence.",
1672
1782
  "mediaType": "image",
1673
1783
  "mediaSubtype": "webp",
1674
- "tags": ["Reference to Video", "Video", "API"],
1784
+ "tags": ["Video", "Image to Video", "API"],
1675
1785
  "models": ["Vidu"],
1676
1786
  "date": "2025-08-23",
1677
1787
  "tutorialUrl": "",
1678
- "OpenSource": false
1788
+ "OpenSource": false,
1789
+ "size": 0,
1790
+ "vram": 0
1679
1791
  },
1680
1792
  {
1681
1793
  "name": "api_vidu_start_end_to_video",
@@ -1683,11 +1795,55 @@
1683
1795
  "description": "Create smooth video transitions between defined start and end frames with natural motion interpolation and consistent visual quality.",
1684
1796
  "mediaType": "image",
1685
1797
  "mediaSubtype": "webp",
1686
- "tags": ["FLF2V", "Video", "API"],
1798
+ "tags": ["Video", "API", "FLF2V"],
1687
1799
  "models": ["Vidu"],
1688
1800
  "date": "2025-08-23",
1689
1801
  "tutorialUrl": "",
1690
- "OpenSource": false
1802
+ "OpenSource": false,
1803
+ "size": 0,
1804
+ "vram": 0
1805
+ },
1806
+ {
1807
+ "name": "api_bytedance_text_to_video",
1808
+ "title": "ByteDance: Text to Video",
1809
+ "description": "Generate high-quality videos directly from text prompts using ByteDance's Seedance model. Supports multiple resolutions and aspect ratios with natural motion and cinematic quality.",
1810
+ "mediaType": "image",
1811
+ "mediaSubtype": "webp",
1812
+ "tags": ["Video", "API", "Text to Video"],
1813
+ "models": ["ByteDance"],
1814
+ "date": "2025-10-6",
1815
+ "tutorialUrl": "",
1816
+ "OpenSource": false,
1817
+ "size": 0,
1818
+ "vram": 0
1819
+ },
1820
+ {
1821
+ "name": "api_bytedance_image_to_video",
1822
+ "title": "ByteDance: Image to Video",
1823
+ "description": "Transform static images into dynamic videos using ByteDance's Seedance model. Analyzes image structure and generates natural motion with consistent visual style and coherent video sequences.",
1824
+ "mediaType": "image",
1825
+ "mediaSubtype": "webp",
1826
+ "tags": ["Video", "API", "Image to Video"],
1827
+ "models": ["ByteDance"],
1828
+ "date": "2025-10-6",
1829
+ "tutorialUrl": "",
1830
+ "OpenSource": false,
1831
+ "size": 0,
1832
+ "vram": 0
1833
+ },
1834
+ {
1835
+ "name": "api_bytedance_flf2v",
1836
+ "title": "ByteDance: Start End to Video",
1837
+ "description": "Generate cinematic video transitions between start and end frames with fluid motion, scene consistency, and professional polish using ByteDance's Seedance model.",
1838
+ "mediaType": "image",
1839
+ "mediaSubtype": "webp",
1840
+ "tags": ["Video", "API", "FLF2V"],
1841
+ "models": ["ByteDance"],
1842
+ "date": "2025-10-6",
1843
+ "tutorialUrl": "",
1844
+ "OpenSource": false,
1845
+ "size": 0,
1846
+ "vram": 0
1691
1847
  },
1692
1848
  {
1693
1849
  "name": "api_luma_i2v",
@@ -1699,7 +1855,9 @@
1699
1855
  "models": ["Luma"],
1700
1856
  "date": "2025-03-01",
1701
1857
  "tutorialUrl": "",
1702
- "OpenSource": false
1858
+ "OpenSource": false,
1859
+ "size": 0,
1860
+ "vram": 0
1703
1861
  },
1704
1862
  {
1705
1863
  "name": "api_luma_t2v",
@@ -1711,7 +1869,9 @@
1711
1869
  "models": ["Luma"],
1712
1870
  "date": "2025-03-01",
1713
1871
  "tutorialUrl": "",
1714
- "OpenSource": false
1872
+ "OpenSource": false,
1873
+ "size": 0,
1874
+ "vram": 0
1715
1875
  },
1716
1876
  {
1717
1877
  "name": "api_moonvalley_text_to_video",
@@ -1723,7 +1883,9 @@
1723
1883
  "models": ["Moonvalley"],
1724
1884
  "date": "2025-03-01",
1725
1885
  "tutorialUrl": "",
1726
- "OpenSource": false
1886
+ "OpenSource": false,
1887
+ "size": 0,
1888
+ "vram": 0
1727
1889
  },
1728
1890
  {
1729
1891
  "name": "api_moonvalley_image_to_video",
@@ -1735,7 +1897,9 @@
1735
1897
  "models": ["Moonvalley"],
1736
1898
  "date": "2025-03-01",
1737
1899
  "tutorialUrl": "",
1738
- "OpenSource": false
1900
+ "OpenSource": false,
1901
+ "size": 0,
1902
+ "vram": 0
1739
1903
  },
1740
1904
  {
1741
1905
  "name": "api_moonvalley_video_to_video_motion_transfer",
@@ -1744,11 +1908,13 @@
1744
1908
  "mediaType": "image",
1745
1909
  "thumbnailVariant": "hoverDissolve",
1746
1910
  "mediaSubtype": "webp",
1747
- "tags": ["Video to Video", "Video", "API", "Motion Transfer"],
1911
+ "tags": ["Video to Video", "Video", "API"],
1748
1912
  "models": ["Moonvalley"],
1749
1913
  "date": "2025-03-01",
1750
1914
  "tutorialUrl": "",
1751
- "OpenSource": false
1915
+ "OpenSource": false,
1916
+ "size": 0,
1917
+ "vram": 0
1752
1918
  },
1753
1919
  {
1754
1920
  "name": "api_moonvalley_video_to_video_pose_control",
@@ -1757,11 +1923,13 @@
1757
1923
  "mediaType": "image",
1758
1924
  "thumbnailVariant": "hoverDissolve",
1759
1925
  "mediaSubtype": "webp",
1760
- "tags": ["Video to Video", "Video", "API", "Pose Control"],
1926
+ "tags": ["Video to Video", "Video", "API"],
1761
1927
  "models": ["Moonvalley"],
1762
1928
  "date": "2025-03-01",
1763
1929
  "tutorialUrl": "",
1764
- "OpenSource": false
1930
+ "OpenSource": false,
1931
+ "size": 0,
1932
+ "vram": 0
1765
1933
  },
1766
1934
  {
1767
1935
  "name": "api_hailuo_minimax_video",
@@ -1773,7 +1941,9 @@
1773
1941
  "models": ["MiniMax"],
1774
1942
  "date": "2025-03-01",
1775
1943
  "tutorialUrl": "",
1776
- "OpenSource": false
1944
+ "OpenSource": false,
1945
+ "size": 0,
1946
+ "vram": 0
1777
1947
  },
1778
1948
  {
1779
1949
  "name": "api_hailuo_minimax_t2v",
@@ -1785,7 +1955,9 @@
1785
1955
  "models": ["MiniMax"],
1786
1956
  "date": "2025-03-01",
1787
1957
  "tutorialUrl": "",
1788
- "OpenSource": false
1958
+ "OpenSource": false,
1959
+ "size": 0,
1960
+ "vram": 0
1789
1961
  },
1790
1962
  {
1791
1963
  "name": "api_hailuo_minimax_i2v",
@@ -1797,7 +1969,9 @@
1797
1969
  "models": ["MiniMax"],
1798
1970
  "date": "2025-03-01",
1799
1971
  "tutorialUrl": "",
1800
- "OpenSource": false
1972
+ "OpenSource": false,
1973
+ "size": 0,
1974
+ "vram": 0
1801
1975
  },
1802
1976
  {
1803
1977
  "name": "api_pixverse_i2v",
@@ -1809,7 +1983,9 @@
1809
1983
  "models": ["PixVerse"],
1810
1984
  "date": "2025-03-01",
1811
1985
  "tutorialUrl": "",
1812
- "OpenSource": false
1986
+ "OpenSource": false,
1987
+ "size": 0,
1988
+ "vram": 0
1813
1989
  },
1814
1990
  {
1815
1991
  "name": "api_pixverse_template_i2v",
@@ -1817,11 +1993,13 @@
1817
1993
  "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
1818
1994
  "mediaType": "image",
1819
1995
  "mediaSubtype": "webp",
1820
- "tags": ["Image to Video", "Video", "API", "Templates"],
1996
+ "tags": ["Image to Video", "Video", "API"],
1821
1997
  "models": ["PixVerse"],
1822
1998
  "date": "2025-03-01",
1823
1999
  "tutorialUrl": "",
1824
- "OpenSource": false
2000
+ "OpenSource": false,
2001
+ "size": 0,
2002
+ "vram": 0
1825
2003
  },
1826
2004
  {
1827
2005
  "name": "api_pixverse_t2v",
@@ -1833,7 +2011,9 @@
1833
2011
  "models": ["PixVerse"],
1834
2012
  "date": "2025-03-01",
1835
2013
  "tutorialUrl": "",
1836
- "OpenSource": false
2014
+ "OpenSource": false,
2015
+ "size": 0,
2016
+ "vram": 0
1837
2017
  },
1838
2018
  {
1839
2019
  "name": "api_runway_gen3a_turbo_image_to_video",
@@ -1842,10 +2022,12 @@
1842
2022
  "mediaType": "image",
1843
2023
  "mediaSubtype": "webp",
1844
2024
  "tags": ["Image to Video", "Video", "API"],
1845
- "models": ["Runway Gen3a Turbo"],
2025
+ "models": ["Runway"],
1846
2026
  "date": "2025-03-01",
1847
2027
  "tutorialUrl": "",
1848
- "OpenSource": false
2028
+ "OpenSource": false,
2029
+ "size": 0,
2030
+ "vram": 0
1849
2031
  },
1850
2032
  {
1851
2033
  "name": "api_runway_gen4_turo_image_to_video",
@@ -1854,10 +2036,12 @@
1854
2036
  "mediaType": "image",
1855
2037
  "mediaSubtype": "webp",
1856
2038
  "tags": ["Image to Video", "Video", "API"],
1857
- "models": ["Runway Gen4 Turbo"],
2039
+ "models": ["Runway"],
1858
2040
  "date": "2025-03-01",
1859
2041
  "tutorialUrl": "",
1860
- "OpenSource": false
2042
+ "OpenSource": false,
2043
+ "size": 0,
2044
+ "vram": 0
1861
2045
  },
1862
2046
  {
1863
2047
  "name": "api_runway_first_last_frame",
@@ -1865,11 +2049,13 @@
1865
2049
  "description": "Generate smooth video transitions between two keyframes with Runway's precision.",
1866
2050
  "mediaType": "image",
1867
2051
  "mediaSubtype": "webp",
1868
- "tags": ["Video Generation", "Video", "API", "Frame Control"],
2052
+ "tags": ["Video", "API", "FLF2V"],
1869
2053
  "models": ["Runway"],
1870
2054
  "date": "2025-03-01",
1871
2055
  "tutorialUrl": "",
1872
- "OpenSource": false
2056
+ "OpenSource": false,
2057
+ "size": 0,
2058
+ "vram": 0
1873
2059
  },
1874
2060
  {
1875
2061
  "name": "api_pika_i2v",
@@ -1881,7 +2067,9 @@
1881
2067
  "models": ["Pika"],
1882
2068
  "date": "2025-03-01",
1883
2069
  "tutorialUrl": "",
1884
- "OpenSource": false
2070
+ "OpenSource": false,
2071
+ "size": 0,
2072
+ "vram": 0
1885
2073
  },
1886
2074
  {
1887
2075
  "name": "api_pika_scene",
@@ -1889,11 +2077,13 @@
1889
2077
  "description": "Generate videos that incorporate multiple input images using Pika Scenes.",
1890
2078
  "mediaType": "image",
1891
2079
  "mediaSubtype": "webp",
1892
- "tags": ["Image to Video", "Video", "API", "Multi Image"],
1893
- "models": ["Pika Scenes"],
2080
+ "tags": ["Image to Video", "Video", "API"],
2081
+ "models": ["Pika"],
1894
2082
  "date": "2025-03-01",
1895
2083
  "tutorialUrl": "",
1896
- "OpenSource": false
2084
+ "OpenSource": false,
2085
+ "size": 0,
2086
+ "vram": 0
1897
2087
  },
1898
2088
  {
1899
2089
  "name": "api_veo2_i2v",
@@ -1902,10 +2092,12 @@
1902
2092
  "mediaType": "image",
1903
2093
  "mediaSubtype": "webp",
1904
2094
  "tags": ["Image to Video", "Video", "API"],
1905
- "models": ["Veo2"],
2095
+ "models": ["Veo", "Google"],
1906
2096
  "date": "2025-03-01",
1907
2097
  "tutorialUrl": "",
1908
- "OpenSource": false
2098
+ "OpenSource": false,
2099
+ "size": 0,
2100
+ "vram": 0
1909
2101
  },
1910
2102
  {
1911
2103
  "name": "api_veo3",
@@ -1914,10 +2106,12 @@
1914
2106
  "mediaType": "image",
1915
2107
  "mediaSubtype": "webp",
1916
2108
  "tags": ["Image to Video", "Text to Video", "API"],
1917
- "models": ["Veo3"],
2109
+ "models": ["Veo", "Google"],
1918
2110
  "date": "2025-03-01",
1919
2111
  "tutorialUrl": "",
1920
- "OpenSource": false
2112
+ "OpenSource": false,
2113
+ "size": 0,
2114
+ "vram": 0
1921
2115
  }
1922
2116
  ]
1923
2117
  },
@@ -1934,11 +2128,13 @@
1934
2128
  "description": "Generate detailed 4X mesh quality 3D models from photos using Rodin Gen2",
1935
2129
  "mediaType": "image",
1936
2130
  "mediaSubtype": "webp",
1937
- "tags": ["Image to Model", "3D", "API"],
2131
+ "tags": ["Image to 3D", "3D", "API"],
1938
2132
  "models": ["Rodin"],
1939
2133
  "date": "2025-09-27",
1940
2134
  "tutorialUrl": "",
1941
- "OpenSource": false
2135
+ "OpenSource": false,
2136
+ "size": 0,
2137
+ "vram": 0
1942
2138
  },
1943
2139
  {
1944
2140
  "name": "api_rodin_image_to_model",
@@ -1946,11 +2142,13 @@
1946
2142
  "description": "Generate detailed 3D models from single photos using Rodin AI.",
1947
2143
  "mediaType": "image",
1948
2144
  "mediaSubtype": "webp",
1949
- "tags": ["Image to Model", "3D", "API"],
2145
+ "tags": ["Image to 3D", "3D", "API"],
1950
2146
  "models": ["Rodin"],
1951
2147
  "date": "2025-03-01",
1952
2148
  "tutorialUrl": "",
1953
- "OpenSource": false
2149
+ "OpenSource": false,
2150
+ "size": 0,
2151
+ "vram": 0
1954
2152
  },
1955
2153
  {
1956
2154
  "name": "api_rodin_multiview_to_model",
@@ -1958,11 +2156,13 @@
1958
2156
  "description": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.",
1959
2157
  "mediaType": "image",
1960
2158
  "mediaSubtype": "webp",
1961
- "tags": ["Multiview to Model", "3D", "API"],
2159
+ "tags": ["Image to 3D", "3D", "API"],
1962
2160
  "models": ["Rodin"],
1963
2161
  "date": "2025-03-01",
1964
2162
  "tutorialUrl": "",
1965
- "OpenSource": false
2163
+ "OpenSource": false,
2164
+ "size": 0,
2165
+ "vram": 0
1966
2166
  },
1967
2167
  {
1968
2168
  "name": "api_tripo_text_to_model",
@@ -1974,7 +2174,9 @@
1974
2174
  "models": ["Tripo"],
1975
2175
  "date": "2025-03-01",
1976
2176
  "tutorialUrl": "",
1977
- "OpenSource": false
2177
+ "OpenSource": false,
2178
+ "size": 0,
2179
+ "vram": 0
1978
2180
  },
1979
2181
  {
1980
2182
  "name": "api_tripo_image_to_model",
@@ -1982,11 +2184,13 @@
1982
2184
  "description": "Generate professional 3D assets from 2D images using Tripo engine.",
1983
2185
  "mediaType": "image",
1984
2186
  "mediaSubtype": "webp",
1985
- "tags": ["Image to Model", "3D", "API"],
2187
+ "tags": ["Image to 3D", "3D", "API"],
1986
2188
  "models": ["Tripo"],
1987
2189
  "date": "2025-03-01",
1988
2190
  "tutorialUrl": "",
1989
- "OpenSource": false
2191
+ "OpenSource": false,
2192
+ "size": 0,
2193
+ "vram": 0
1990
2194
  },
1991
2195
  {
1992
2196
  "name": "api_tripo_multiview_to_model",
@@ -1994,11 +2198,13 @@
1994
2198
  "description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
1995
2199
  "mediaType": "image",
1996
2200
  "mediaSubtype": "webp",
1997
- "tags": ["Multiview to Model", "3D", "API"],
2201
+ "tags": ["Image to 3D", "3D", "API"],
1998
2202
  "models": ["Tripo"],
1999
2203
  "date": "2025-03-01",
2000
2204
  "tutorialUrl": "",
2001
- "OpenSource": false
2205
+ "OpenSource": false,
2206
+ "size": 0,
2207
+ "vram": 0
2002
2208
  }
2003
2209
  ]
2004
2210
  },
@@ -2017,8 +2223,10 @@
2017
2223
  "mediaSubtype": "mp3",
2018
2224
  "tags": ["Text to Audio", "Audio", "API"],
2019
2225
  "date": "2025-09-09",
2020
- "models": ["Stable Audio 2.5"],
2021
- "OpenSource": false
2226
+ "models": ["Stability", "Stable Audio"],
2227
+ "OpenSource": false,
2228
+ "size": 0,
2229
+ "vram": 0
2022
2230
  },
2023
2231
  {
2024
2232
  "name": "api_stability_ai_audio_to_audio",
@@ -2028,8 +2236,10 @@
2028
2236
  "mediaSubtype": "mp3",
2029
2237
  "tags": ["Audio to Audio", "Audio", "API"],
2030
2238
  "date": "2025-09-09",
2031
- "models": ["Stable Audio 2.5"],
2032
- "OpenSource": false
2239
+ "models": ["Stability", "Stable Audio"],
2240
+ "OpenSource": false,
2241
+ "size": 0,
2242
+ "vram": 0
2033
2243
  },
2034
2244
  {
2035
2245
  "name": "api_stability_ai_audio_inpaint",
@@ -2039,8 +2249,10 @@
2039
2249
  "mediaSubtype": "mp3",
2040
2250
  "tags": ["Audio to Audio", "Audio", "API"],
2041
2251
  "date": "2025-09-09",
2042
- "models": ["Stable Audio 2.5"],
2043
- "OpenSource": false
2252
+ "models": ["Stability", "Stable Audio"],
2253
+ "OpenSource": false,
2254
+ "size": 0,
2255
+ "vram": 0
2044
2256
  }
2045
2257
  ]
2046
2258
  },
@@ -2057,11 +2269,13 @@
2057
2269
  "description": "Engage with OpenAI's advanced language models for intelligent conversations.",
2058
2270
  "mediaType": "image",
2059
2271
  "mediaSubtype": "webp",
2060
- "tags": ["Chat", "LLM", "API"],
2272
+ "tags": ["LLM", "API"],
2061
2273
  "models": ["OpenAI"],
2062
2274
  "date": "2025-03-01",
2063
2275
  "tutorialUrl": "",
2064
- "OpenSource": false
2276
+ "OpenSource": false,
2277
+ "size": 0,
2278
+ "vram": 0
2065
2279
  },
2066
2280
  {
2067
2281
  "name": "api_google_gemini",
@@ -2069,11 +2283,13 @@
2069
2283
  "description": "Experience Google's multimodal AI with Gemini's reasoning capabilities.",
2070
2284
  "mediaType": "image",
2071
2285
  "mediaSubtype": "webp",
2072
- "tags": ["Chat", "LLM", "API"],
2073
- "models": ["Google Gemini"],
2286
+ "tags": ["LLM", "API"],
2287
+ "models": ["Google Gemini", "Google"],
2074
2288
  "date": "2025-03-01",
2075
2289
  "tutorialUrl": "",
2076
- "OpenSource": false
2290
+ "OpenSource": false,
2291
+ "size": 0,
2292
+ "vram": 0
2077
2293
  }
2078
2294
  ]
2079
2295
  }