comfyui-workflow-templates 0.1.44__py3-none-any.whl → 0.1.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of comfyui-workflow-templates might be problematic. Click here for more details.

Files changed (25) hide show
  1. comfyui_workflow_templates/templates/flux1_krea_dev-1.webp +0 -0
  2. comfyui_workflow_templates/templates/flux1_krea_dev.json +543 -0
  3. comfyui_workflow_templates/templates/hunyuan_video_text_to_video.json +689 -312
  4. comfyui_workflow_templates/templates/image_to_video.json +390 -161
  5. comfyui_workflow_templates/templates/image_to_video_wan.json +610 -271
  6. comfyui_workflow_templates/templates/index.json +564 -107
  7. comfyui_workflow_templates/templates/index.schema.json +19 -0
  8. comfyui_workflow_templates/templates/ltxv_image_to_video.json +676 -274
  9. comfyui_workflow_templates/templates/ltxv_text_to_video.json +547 -203
  10. comfyui_workflow_templates/templates/mochi_text_to_video_example.json +433 -170
  11. comfyui_workflow_templates/templates/text_to_video_wan.json +409 -169
  12. comfyui_workflow_templates/templates/txt_to_image_to_video.json +556 -223
  13. comfyui_workflow_templates/templates/video_wan2_2_14B_flf2v-1.webp +0 -0
  14. comfyui_workflow_templates/templates/video_wan2_2_14B_flf2v-2.webp +0 -0
  15. comfyui_workflow_templates/templates/video_wan2_2_14B_flf2v.json +1090 -0
  16. comfyui_workflow_templates/templates/video_wan2_2_14B_i2v.json +41 -17
  17. comfyui_workflow_templates/templates/video_wan2_2_14B_t2v.json +41 -17
  18. comfyui_workflow_templates/templates/wan2.1_flf2v_720_f16.json +567 -182
  19. comfyui_workflow_templates/templates/wan2.1_fun_control.json +168 -110
  20. comfyui_workflow_templates/templates/wan2.1_fun_inp.json +470 -418
  21. {comfyui_workflow_templates-0.1.44.dist-info → comfyui_workflow_templates-0.1.46.dist-info}/METADATA +1 -1
  22. {comfyui_workflow_templates-0.1.44.dist-info → comfyui_workflow_templates-0.1.46.dist-info}/RECORD +25 -20
  23. {comfyui_workflow_templates-0.1.44.dist-info → comfyui_workflow_templates-0.1.46.dist-info}/WHEEL +0 -0
  24. {comfyui_workflow_templates-0.1.44.dist-info → comfyui_workflow_templates-0.1.46.dist-info}/licenses/LICENSE +0 -0
  25. {comfyui_workflow_templates-0.1.44.dist-info → comfyui_workflow_templates-0.1.46.dist-info}/top_level.txt +0 -0
@@ -9,7 +9,11 @@
9
9
  "title": "Image Generation",
10
10
  "mediaType": "image",
11
11
  "mediaSubtype": "webp",
12
- "description": "Generate images from text prompts."
12
+ "description": "Generate images from text prompts.",
13
+ "tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
14
+ "tags": ["Text to Image", "Image"],
15
+ "models": ["SD1.5"],
16
+ "date": "2025-03-01"
13
17
  },
14
18
  {
15
19
  "name": "image2image",
@@ -17,23 +21,32 @@
17
21
  "mediaType": "image",
18
22
  "mediaSubtype": "webp",
19
23
  "description": "Transform existing images using text prompts.",
20
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/img2img/"
24
+ "tutorialUrl": "https://docs.comfy.org/tutorials/basic/image-to-image",
25
+ "tags": ["Image to Image", "Image"],
26
+ "models": ["SD1.5"],
27
+ "date": "2025-03-01"
21
28
  },
22
29
  {
23
30
  "name": "lora",
24
- "title": "Lora",
31
+ "title": "LoRA",
25
32
  "mediaType": "image",
26
33
  "mediaSubtype": "webp",
27
34
  "description": "Generate images with LoRA models for specialized styles or subjects.",
28
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/lora/"
35
+ "tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
36
+ "tags": ["Text to Image", "Image"],
37
+ "models": ["SD1.5"],
38
+ "date": "2025-03-01"
29
39
  },
30
40
  {
31
41
  "name": "lora_multiple",
32
- "title": "Lora Multiple",
42
+ "title": "LoRA Multiple",
33
43
  "mediaType": "image",
34
44
  "mediaSubtype": "webp",
35
45
  "description": "Generate images by combining multiple LoRA models.",
36
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/lora/"
46
+ "tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
47
+ "tags": ["Text to Image", "Image", "LoRA"],
48
+ "models": ["SD1.5"],
49
+ "date": "2025-03-01"
37
50
  },
38
51
  {
39
52
  "name": "inpaint_example",
@@ -42,7 +55,10 @@
42
55
  "mediaSubtype": "webp",
43
56
  "description": "Edit specific parts of images seamlessly.",
44
57
  "thumbnailVariant": "compareSlider",
45
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/inpaint/"
58
+ "tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
59
+ "tags": ["Inpaint", "Image"],
60
+ "models": ["SD1.5"],
61
+ "date": "2025-03-01"
46
62
  },
47
63
  {
48
64
  "name": "inpaint_model_outpainting",
@@ -51,7 +67,10 @@
51
67
  "mediaSubtype": "webp",
52
68
  "description": "Extend images beyond their original boundaries.",
53
69
  "thumbnailVariant": "compareSlider",
54
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/inpaint/#outpainting"
70
+ "tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
71
+ "tags": ["Outpaint", "Image"],
72
+ "models": ["SD1.5"],
73
+ "date": "2025-03-01"
55
74
  },
56
75
  {
57
76
  "name": "embedding_example",
@@ -59,7 +78,10 @@
59
78
  "mediaType": "image",
60
79
  "mediaSubtype": "webp",
61
80
  "description": "Generate images using textual inversion for consistent styles.",
62
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/"
81
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/",
82
+ "tags": ["Embedding", "Image"],
83
+ "models": ["SD1.5"],
84
+ "date": "2025-03-01"
63
85
  },
64
86
  {
65
87
  "name": "gligen_textbox_example",
@@ -67,7 +89,10 @@
67
89
  "mediaType": "image",
68
90
  "mediaSubtype": "webp",
69
91
  "description": "Generate images with precise object placement using text boxes.",
70
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/"
92
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/",
93
+ "tags": ["Gligen", "Image"],
94
+ "models": ["SD1.5"],
95
+ "date": "2025-03-01"
71
96
  }
72
97
  ]
73
98
  },
@@ -76,13 +101,28 @@
76
101
  "title": "Flux",
77
102
  "type": "image",
78
103
  "templates": [
104
+ {
105
+ "name": "flux1_krea_dev",
106
+ "title": "Flux.1 Krea Dev",
107
+ "mediaType": "image",
108
+ "mediaSubtype": "webp",
109
+ "description": "A fine-tuned FLUX model pushing photorealism to the max",
110
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
111
+ "tags": ["Text to Image", "Image", "Photorealism"],
112
+ "models": ["Flux.1 Krea Dev"],
113
+ "date": "2025-07-31"
114
+ },
79
115
  {
80
116
  "name": "flux_kontext_dev_basic",
81
117
  "title": "Flux Kontext Dev(Basic)",
82
118
  "mediaType": "image",
83
119
  "mediaSubtype": "webp",
84
120
  "thumbnailVariant": "hoverDissolve",
85
- "description": "Edit image using Flux Kontext with full node visibility, perfect for learning the workflow."
121
+ "description": "Edit image using Flux Kontext with full node visibility, perfect for learning the workflow.",
122
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
123
+ "tags": ["Image Edit", "Image to Image"],
124
+ "models": ["Flux"],
125
+ "date": "2025-06-26"
86
126
  },
87
127
  {
88
128
  "name": "flux_kontext_dev_grouped",
@@ -90,7 +130,11 @@
90
130
  "mediaType": "image",
91
131
  "mediaSubtype": "webp",
92
132
  "thumbnailVariant": "hoverDissolve",
93
- "description": "Streamlined version of Flux Kontext with grouped nodes for cleaner workspace."
133
+ "description": "Streamlined version of Flux Kontext with grouped nodes for cleaner workspace.",
134
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
135
+ "tags": ["Image Edit", "Image to Image"],
136
+ "models": ["Flux"],
137
+ "date": "2025-06-26"
94
138
  },
95
139
  {
96
140
  "name": "flux_dev_checkpoint_example",
@@ -98,7 +142,10 @@
98
142
  "mediaType": "image",
99
143
  "mediaSubtype": "webp",
100
144
  "description": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.",
101
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-dev-1"
145
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
146
+ "tags": ["Text to Image", "Image"],
147
+ "models": ["Flux"],
148
+ "date": "2025-03-01"
102
149
  },
103
150
  {
104
151
  "name": "flux_schnell",
@@ -106,7 +153,10 @@
106
153
  "mediaType": "image",
107
154
  "mediaSubtype": "webp",
108
155
  "description": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.",
109
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-schnell-1"
156
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
157
+ "tags": ["Text to Image", "Image"],
158
+ "models": ["Flux"],
159
+ "date": "2025-03-01"
110
160
  },
111
161
  {
112
162
  "name": "flux_dev_full_text_to_image",
@@ -114,7 +164,10 @@
114
164
  "mediaType": "image",
115
165
  "mediaSubtype": "webp",
116
166
  "description": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.",
117
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-dev-1"
167
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
168
+ "tags": ["Text to Image", "Image"],
169
+ "models": ["Flux"],
170
+ "date": "2025-03-01"
118
171
  },
119
172
  {
120
173
  "name": "flux_schnell_full_text_to_image",
@@ -122,7 +175,10 @@
122
175
  "mediaType": "image",
123
176
  "mediaSubtype": "webp",
124
177
  "description": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.",
125
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-schnell-1"
178
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
179
+ "tags": ["Text to Image", "Image"],
180
+ "models": ["Flux"],
181
+ "date": "2025-03-01"
126
182
  },
127
183
  {
128
184
  "name": "flux_fill_inpaint_example",
@@ -131,7 +187,10 @@
131
187
  "mediaSubtype": "webp",
132
188
  "description": "Fill missing parts of images using Flux inpainting.",
133
189
  "thumbnailVariant": "compareSlider",
134
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#fill-inpainting-model"
190
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
191
+ "tags": ["Image to Image", "Inpaint", "Image"],
192
+ "models": ["Flux"],
193
+ "date": "2025-03-01"
135
194
  },
136
195
  {
137
196
  "name": "flux_fill_outpaint_example",
@@ -140,7 +199,10 @@
140
199
  "mediaSubtype": "webp",
141
200
  "description": "Extend images beyond boundaries using Flux outpainting.",
142
201
  "thumbnailVariant": "compareSlider",
143
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#fill-inpainting-model"
202
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
203
+ "tags": ["Outpaint", "Image", "Image to Image"],
204
+ "models": ["Flux"],
205
+ "date": "2025-03-01"
144
206
  },
145
207
  {
146
208
  "name": "flux_canny_model_example",
@@ -149,7 +211,10 @@
149
211
  "mediaSubtype": "webp",
150
212
  "description": "Generate images guided by edge detection using Flux Canny.",
151
213
  "thumbnailVariant": "hoverDissolve",
152
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#canny-and-depth"
214
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
215
+ "tags": ["Image to Image", "ControlNet", "Image"],
216
+ "models": ["Flux"],
217
+ "date": "2025-03-01"
153
218
  },
154
219
  {
155
220
  "name": "flux_depth_lora_example",
@@ -158,7 +223,10 @@
158
223
  "mediaSubtype": "webp",
159
224
  "description": "Generate images guided by depth information using Flux LoRA.",
160
225
  "thumbnailVariant": "hoverDissolve",
161
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#canny-and-depth"
226
+ "tutorialUrl": "ttps://docs.comfy.org/tutorials/flux/flux-1-controlnet",
227
+ "tags": ["Image to Image", "ControlNet", "Image", "LoRA"],
228
+ "models": ["Flux"],
229
+ "date": "2025-03-01"
162
230
  },
163
231
  {
164
232
  "name": "flux_redux_model_example",
@@ -166,7 +234,10 @@
166
234
  "mediaType": "image",
167
235
  "mediaSubtype": "webp",
168
236
  "description": "Generate images by transferring style from reference images using Flux Redux.",
169
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#redux"
237
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
238
+ "tags": ["Image to Image", "ControlNet", "Image", "LoRA"],
239
+ "models": ["Flux"],
240
+ "date": "2025-03-01"
170
241
  }
171
242
  ]
172
243
  },
@@ -181,7 +252,10 @@
181
252
  "mediaType": "image",
182
253
  "mediaSubtype": "webp",
183
254
  "description": "Generate high-quality images from text prompts using OmniGen2's unified 7B multimodal model with dual-path architecture.",
184
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2"
255
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
256
+ "tags": ["Text to Image", "Image"],
257
+ "models": ["OmniGen"],
258
+ "date": "2025-06-30"
185
259
  },
186
260
  {
187
261
  "name": "image_omnigen2_image_edit",
@@ -190,7 +264,10 @@
190
264
  "mediaSubtype": "webp",
191
265
  "thumbnailVariant": "hoverDissolve",
192
266
  "description": "Edit images with natural language instructions using OmniGen2's advanced image editing capabilities and text rendering support.",
193
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2"
267
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
268
+ "tags": ["Image Edit", "Image"],
269
+ "models": ["OmniGen"],
270
+ "date": "2025-06-30"
194
271
  },
195
272
  {
196
273
  "name": "image_cosmos_predict2_2B_t2i",
@@ -198,35 +275,53 @@
198
275
  "mediaType": "image",
199
276
  "mediaSubtype": "webp",
200
277
  "description": "Generate images with Cosmos-Predict2 2B T2I, delivering physically accurate, high-fidelity, and detail-rich image generation.",
201
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2"
278
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/cosmos/cosmos-predict2-t2i",
279
+ "tags": ["Text to Image", "Image"],
280
+ "models": ["Cosmos"],
281
+ "date": "2025-06-16"
202
282
  },
203
283
  {
204
284
  "name": "image_chroma_text_to_image",
205
285
  "title": "Chroma text to image",
206
286
  "mediaType": "image",
207
287
  "mediaSubtype": "webp",
208
- "description": "Chroma is modified from flux and has some changes in the architecture."
288
+ "description": "Chroma is modified from flux and has some changes in the architecture.",
289
+ "tags": ["Text to Image", "Image"],
290
+ "models": ["Chroma", "Flux"],
291
+ "date": "2025-06-04"
209
292
  },
210
293
  {
211
294
  "name": "hidream_i1_dev",
212
295
  "title": "HiDream I1 Dev",
213
296
  "mediaType": "image",
214
297
  "mediaSubtype": "webp",
215
- "description": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware."
298
+ "description": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware.",
299
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
300
+ "tags": ["Text to Image", "Image"],
301
+ "models": ["HiDream"],
302
+ "date": "2025-04-17"
216
303
  },
217
304
  {
218
305
  "name": "hidream_i1_fast",
219
306
  "title": "HiDream I1 Fast",
220
307
  "mediaType": "image",
221
308
  "mediaSubtype": "webp",
222
- "description": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware."
309
+ "description": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware.",
310
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
311
+ "tags": ["Text to Image", "Image"],
312
+ "models": ["HiDream"],
313
+ "date": "2025-04-17"
223
314
  },
224
315
  {
225
316
  "name": "hidream_i1_full",
226
317
  "title": "HiDream I1 Full",
227
318
  "mediaType": "image",
228
319
  "mediaSubtype": "webp",
229
- "description": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output."
320
+ "description": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output.",
321
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
322
+ "tags": ["Text to Image", "Image"],
323
+ "models": ["HiDream"],
324
+ "date": "2025-04-17"
230
325
  },
231
326
  {
232
327
  "name": "hidream_e1_1",
@@ -234,7 +329,11 @@
234
329
  "mediaType": "image",
235
330
  "mediaSubtype": "webp",
236
331
  "thumbnailVariant": "compareSlider",
237
- "description": "Edit images with HiDream E1.1 – it’s better in image quality and editing accuracy than HiDream-E1-Full."
332
+ "description": "Edit images with HiDream E1.1 – it’s better in image quality and editing accuracy than HiDream-E1-Full.",
333
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
334
+ "tags": ["Image Edit", "Image"],
335
+ "models": ["HiDream"],
336
+ "date": "2025-07-21"
238
337
  },
239
338
  {
240
339
  "name": "hidream_e1_full",
@@ -242,7 +341,11 @@
242
341
  "mediaType": "image",
243
342
  "mediaSubtype": "webp",
244
343
  "thumbnailVariant": "compareSlider",
245
- "description": "Edit images with HiDream E1 - Professional natural language image editing model."
344
+ "description": "Edit images with HiDream E1 - Professional natural language image editing model.",
345
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
346
+ "tags": ["Image Edit", "Image"],
347
+ "models": ["HiDream"],
348
+ "date": "2025-05-01"
246
349
  },
247
350
  {
248
351
  "name": "sd3.5_simple_example",
@@ -250,7 +353,10 @@
250
353
  "mediaType": "image",
251
354
  "mediaSubtype": "webp",
252
355
  "description": "Generate images using SD 3.5.",
253
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35"
356
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
357
+ "tags": ["Text to Image", "Image"],
358
+ "models": ["SD3.5"],
359
+ "date": "2025-03-01"
254
360
  },
255
361
  {
256
362
  "name": "sd3.5_large_canny_controlnet_example",
@@ -259,7 +365,10 @@
259
365
  "mediaSubtype": "webp",
260
366
  "description": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
261
367
  "thumbnailVariant": "hoverDissolve",
262
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
368
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
369
+ "tags": ["Image to Image", "Image", "ControlNet"],
370
+ "models": ["SD3.5"],
371
+ "date": "2025-03-01"
263
372
  },
264
373
  {
265
374
  "name": "sd3.5_large_depth",
@@ -268,7 +377,10 @@
268
377
  "mediaSubtype": "webp",
269
378
  "description": "Generate images guided by depth information using SD 3.5.",
270
379
  "thumbnailVariant": "hoverDissolve",
271
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
380
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
381
+ "tags": ["Image to Image", "Image", "ControlNet"],
382
+ "models": ["SD3.5"],
383
+ "date": "2025-03-01"
272
384
  },
273
385
  {
274
386
  "name": "sd3.5_large_blur",
@@ -277,7 +389,10 @@
277
389
  "mediaSubtype": "webp",
278
390
  "description": "Generate images guided by blurred reference images using SD 3.5.",
279
391
  "thumbnailVariant": "hoverDissolve",
280
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
392
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
393
+ "tags": ["Image to Image", "Image"],
394
+ "models": ["SD3.5"],
395
+ "date": "2025-03-01"
281
396
  },
282
397
  {
283
398
  "name": "sdxl_simple_example",
@@ -285,7 +400,10 @@
285
400
  "mediaType": "image",
286
401
  "mediaSubtype": "webp",
287
402
  "description": "Generate high-quality images using SDXL.",
288
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
403
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
404
+ "tags": ["Text to Image", "Image"],
405
+ "models": ["SDXL"],
406
+ "date": "2025-03-01"
289
407
  },
290
408
  {
291
409
  "name": "sdxl_refiner_prompt_example",
@@ -293,7 +411,10 @@
293
411
  "mediaType": "image",
294
412
  "mediaSubtype": "webp",
295
413
  "description": "Enhance SDXL images using refiner models.",
296
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
414
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
415
+ "tags": ["Text to Image", "Image"],
416
+ "models": ["SDXL"],
417
+ "date": "2025-03-01"
297
418
  },
298
419
  {
299
420
  "name": "sdxl_revision_text_prompts",
@@ -301,7 +422,10 @@
301
422
  "mediaType": "image",
302
423
  "mediaSubtype": "webp",
303
424
  "description": "Generate images by transferring concepts from reference images using SDXL Revision.",
304
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
425
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
426
+ "tags": ["Text to Image", "Image"],
427
+ "models": ["SDXL"],
428
+ "date": "2025-03-01"
305
429
  },
306
430
  {
307
431
  "name": "sdxl_revision_zero_positive",
@@ -309,7 +433,10 @@
309
433
  "mediaType": "image",
310
434
  "mediaSubtype": "webp",
311
435
  "description": "Generate images using both text prompts and reference images with SDXL Revision.",
312
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
436
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
437
+ "tags": ["Text to Image", "Image"],
438
+ "models": ["SDXL"],
439
+ "date": "2025-03-01"
313
440
  },
314
441
  {
315
442
  "name": "sdxlturbo_example",
@@ -317,7 +444,10 @@
317
444
  "mediaType": "image",
318
445
  "mediaSubtype": "webp",
319
446
  "description": "Generate images in a single step using SDXL Turbo.",
320
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/"
447
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
448
+ "tags": ["Text to Image", "Image"],
449
+ "models": ["SDXL Turbo"],
450
+ "date": "2025-03-01"
321
451
  },
322
452
  {
323
453
  "name": "image_lotus_depth_v1_1",
@@ -325,7 +455,10 @@
325
455
  "mediaType": "image",
326
456
  "mediaSubtype": "webp",
327
457
  "thumbnailVariant": "compareSlider",
328
- "description": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention."
458
+ "description": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention.",
459
+ "tags": ["Depth", "Image"],
460
+ "models": ["SD1.5"],
461
+ "date": "2025-05-21"
329
462
  }
330
463
  ]
331
464
  },
@@ -340,7 +473,10 @@
340
473
  "description": "Generate high-quality videos from text prompts with cinematic aesthetic control and dynamic motion generation using Wan 2.2.",
341
474
  "mediaType": "image",
342
475
  "mediaSubtype": "webp",
343
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2"
476
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
477
+ "tags": ["Text to Video", "Video"],
478
+ "models": ["Wan"],
479
+ "date": "2025-07-29"
344
480
  },
345
481
  {
346
482
  "name": "video_wan2_2_14B_i2v",
@@ -349,7 +485,22 @@
349
485
  "mediaType": "image",
350
486
  "mediaSubtype": "webp",
351
487
  "thumbnailVariant": "hoverDissolve",
352
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2"
488
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
489
+ "tags": ["Text to Video", "Video"],
490
+ "models": ["Wan2.2"],
491
+ "date": "2025-07-29"
492
+ },
493
+ {
494
+ "name": "video_wan2_2_14B_flf2v",
495
+ "title": "Wan 2.2 14B First-Last Frame to Video",
496
+ "description": "Generate smooth video transitions by defining start and end frames.",
497
+ "mediaType": "image",
498
+ "mediaSubtype": "webp",
499
+ "thumbnailVariant": "hoverDissolve",
500
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
501
+ "tags": ["Text to Video", "Video"],
502
+ "models": ["Wan2.2"],
503
+ "date": "2025-08-02"
353
504
  },
354
505
  {
355
506
  "name": "video_wan2_2_5B_ti2v",
@@ -357,7 +508,10 @@
357
508
  "description": "Generate videos from text or images using Wan 2.2 5B hybrid model",
358
509
  "mediaType": "image",
359
510
  "mediaSubtype": "webp",
360
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2"
511
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
512
+ "tags": ["Text to Video", "Video"],
513
+ "models": ["Wan2.2"],
514
+ "date": "2025-07-29"
361
515
  },
362
516
  {
363
517
  "name": "video_wan_vace_14B_t2v",
@@ -365,7 +519,10 @@
365
519
  "description": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
366
520
  "mediaType": "image",
367
521
  "mediaSubtype": "webp",
368
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
522
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
523
+ "tags": ["Text to Video", "Video"],
524
+ "models": ["Wan2.1"],
525
+ "date": "2025-05-21"
369
526
  },
370
527
  {
371
528
  "name": "video_wan_vace_14B_ref2v",
@@ -373,7 +530,10 @@
373
530
  "description": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
374
531
  "mediaType": "image",
375
532
  "mediaSubtype": "webp",
376
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
533
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
534
+ "tags": ["Reference to Video", "Video"],
535
+ "models": ["Wan2.1"],
536
+ "date": "2025-05-21"
377
537
  },
378
538
  {
379
539
  "name": "video_wan_vace_14B_v2v",
@@ -382,7 +542,10 @@
382
542
  "mediaType": "image",
383
543
  "mediaSubtype": "webp",
384
544
  "thumbnailVariant": "compareSlider",
385
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
545
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
546
+ "tags": ["Video to Video", "Video"],
547
+ "models": ["Wan2.1"],
548
+ "date": "2025-05-21"
386
549
  },
387
550
  {
388
551
  "name": "video_wan_vace_outpainting",
@@ -391,7 +554,10 @@
391
554
  "mediaType": "image",
392
555
  "mediaSubtype": "webp",
393
556
  "thumbnailVariant": "compareSlider",
394
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
557
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
558
+ "tags": ["Outpainting", "Video"],
559
+ "models": ["Wan2.1"],
560
+ "date": "2025-05-21"
395
561
  },
396
562
  {
397
563
  "name": "video_wan_vace_flf2v",
@@ -399,7 +565,10 @@
399
565
  "description": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
400
566
  "mediaType": "image",
401
567
  "mediaSubtype": "webp",
402
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
568
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
569
+ "tags": ["FLF2V", "Video"],
570
+ "models": ["Wan2.1"],
571
+ "date": "2025-05-21"
403
572
  },
404
573
  {
405
574
  "name": "video_wan_vace_inpainting",
@@ -408,7 +577,10 @@
408
577
  "mediaType": "image",
409
578
  "mediaSubtype": "webp",
410
579
  "thumbnailVariant": "compareSlider",
411
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
580
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
581
+ "tags": ["Inpainting", "Video"],
582
+ "models": ["Wan2.1"],
583
+ "date": "2025-05-21"
412
584
  },
413
585
  {
414
586
  "name": "video_wan_ati",
@@ -417,21 +589,32 @@
417
589
  "mediaType": "image",
418
590
  "mediaSubtype": "webp",
419
591
  "thumbnailVariant": "hoverDissolve",
420
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati"
592
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
593
+ "tags": ["Video"],
594
+ "models": ["Wan2.1"],
595
+ "date": "2025-05-21"
421
596
  },
422
597
  {
423
598
  "name": "video_wan2.1_fun_camera_v1.1_1.3B",
424
599
  "title": "Wan 2.1 Fun Camera 1.3B",
425
600
  "description": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
426
601
  "mediaType": "image",
427
- "mediaSubtype": "webp"
602
+ "mediaSubtype": "webp",
603
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
604
+ "tags": ["Video"],
605
+ "models": ["Wan2.1"],
606
+ "date": "2025-04-15"
428
607
  },
429
608
  {
430
609
  "name": "video_wan2.1_fun_camera_v1.1_14B",
431
610
  "title": "Wan 2.1 Fun Camera 14B",
432
611
  "description": "Generate high-quality videos with advanced camera control using the full 14B model",
433
612
  "mediaType": "image",
434
- "mediaSubtype": "webp"
613
+ "mediaSubtype": "webp",
614
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
615
+ "tags": ["Video"],
616
+ "models": ["Wan2.1"],
617
+ "date": "2025-04-15"
435
618
  },
436
619
  {
437
620
  "name": "text_to_video_wan",
@@ -439,7 +622,10 @@
439
622
  "description": "Generate videos from text prompts using Wan 2.1.",
440
623
  "mediaType": "image",
441
624
  "mediaSubtype": "webp",
442
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/wan/#text-to-video"
625
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
626
+ "tags": ["Text to Video", "Video"],
627
+ "models": ["Wan2.1"],
628
+ "date": "2025-03-01"
443
629
  },
444
630
  {
445
631
  "name": "image_to_video_wan",
@@ -447,7 +633,10 @@
447
633
  "description": "Generate videos from images using Wan 2.1.",
448
634
  "mediaType": "image",
449
635
  "mediaSubtype": "webp",
450
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/wan/#image-to-video"
636
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
637
+ "tags": ["Text to Video", "Video"],
638
+ "models": ["Wan2.1"],
639
+ "date": "2025-03-01"
451
640
  },
452
641
  {
453
642
  "name": "wan2.1_fun_inp",
@@ -455,7 +644,10 @@
455
644
  "description": "Generate videos from start and end frames using Wan 2.1 inpainting.",
456
645
  "mediaType": "image",
457
646
  "mediaSubtype": "webp",
458
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp"
647
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
648
+ "tags": ["Inpaint", "Video"],
649
+ "models": ["Wan2.1"],
650
+ "date": "2025-04-15"
459
651
  },
460
652
  {
461
653
  "name": "wan2.1_fun_control",
@@ -464,7 +656,10 @@
464
656
  "mediaType": "image",
465
657
  "mediaSubtype": "webp",
466
658
  "thumbnailVariant": "hoverDissolve",
467
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control"
659
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
660
+ "tags": ["Video to Video", "Video"],
661
+ "models": ["Wan2.1"],
662
+ "date": "2025-04-15"
468
663
  },
469
664
  {
470
665
  "name": "wan2.1_flf2v_720_f16",
@@ -472,7 +667,10 @@
472
667
  "description": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
473
668
  "mediaType": "image",
474
669
  "mediaSubtype": "webp",
475
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf"
670
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
671
+ "tags": ["FLF2V", "Video"],
672
+ "models": ["Wan2.1"],
673
+ "date": "2025-04-15"
476
674
  },
477
675
  {
478
676
  "name": "video_cosmos_predict2_2B_video2world_480p_16fps",
@@ -480,7 +678,10 @@
480
678
  "description": "Generate videos with Cosmos-Predict2 2B Video2World, generating physically accurate, high-fidelity, and consistent video simulations.",
481
679
  "mediaType": "image",
482
680
  "mediaSubtype": "webp",
483
- "tutorialUrl": "http://docs.comfy.org/tutorials/video/cosmos/cosmos-predict2-video2world"
681
+ "tutorialUrl": "http://docs.comfy.org/tutorials/video/cosmos/cosmos-predict2-video2world",
682
+ "tags": ["Video2World", "Video"],
683
+ "models": ["Cosmos"],
684
+ "date": "2025-06-16"
484
685
  },
485
686
  {
486
687
  "name": "ltxv_text_to_video",
@@ -488,7 +689,10 @@
488
689
  "mediaType": "image",
489
690
  "mediaSubtype": "webp",
490
691
  "description": "Generate videos from text prompts.",
491
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/ltxv/#text-to-video"
692
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
693
+ "tags": ["Text to Video", "Video"],
694
+ "models": ["LTXV"],
695
+ "date": "2025-03-01"
492
696
  },
493
697
  {
494
698
  "name": "ltxv_image_to_video",
@@ -496,7 +700,10 @@
496
700
  "mediaType": "image",
497
701
  "mediaSubtype": "webp",
498
702
  "description": "Generate videos from still images.",
499
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/ltxv/#image-to-video"
703
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
704
+ "tags": ["Image to Video", "Video"],
705
+ "models": ["LTXV"],
706
+ "date": "2025-03-01"
500
707
  },
501
708
  {
502
709
  "name": "mochi_text_to_video_example",
@@ -504,7 +711,10 @@
504
711
  "mediaType": "image",
505
712
  "mediaSubtype": "webp",
506
713
  "description": "Generate videos from text prompts using Mochi model.",
507
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/"
714
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/",
715
+ "tags": ["Text to Video", "Video"],
716
+ "models": ["Mochi"],
717
+ "date": "2025-03-01"
508
718
  },
509
719
  {
510
720
  "name": "hunyuan_video_text_to_video",
@@ -512,7 +722,10 @@
512
722
  "mediaType": "image",
513
723
  "mediaSubtype": "webp",
514
724
  "description": "Generate videos from text prompts using Hunyuan model.",
515
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/"
725
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
726
+ "tags": ["Text to Video", "Video"],
727
+ "models": ["Hunyuan Video"],
728
+ "date": "2025-03-01"
516
729
  },
517
730
  {
518
731
  "name": "image_to_video",
@@ -520,7 +733,10 @@
520
733
  "mediaType": "image",
521
734
  "mediaSubtype": "webp",
522
735
  "description": "Generate videos from still images.",
523
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video"
736
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
737
+ "tags": ["Image to Video", "Video"],
738
+ "models": ["SVD"],
739
+ "date": "2025-03-01"
524
740
  },
525
741
  {
526
742
  "name": "txt_to_image_to_video",
@@ -528,7 +744,10 @@
528
744
  "mediaType": "image",
529
745
  "mediaSubtype": "webp",
530
746
  "description": "Generate videos by first creating images from text prompts.",
531
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video"
747
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
748
+ "tags": ["Text to Video", "Video"],
749
+ "models": ["SVD"],
750
+ "date": "2025-03-01"
532
751
  }
533
752
  ]
534
753
  },
@@ -543,7 +762,11 @@
543
762
  "description": "Input multiple images and edit them with Flux.1 Kontext.",
544
763
  "mediaType": "image",
545
764
  "mediaSubtype": "webp",
546
- "thumbnailVariant": "compareSlider"
765
+ "thumbnailVariant": "compareSlider",
766
+ "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
767
+ "tags": ["Image Edit", "Image"],
768
+ "models": ["Flux"],
769
+ "date": "2025-05-29"
547
770
  },
548
771
  {
549
772
  "name": "api_bfl_flux_1_kontext_pro_image",
@@ -551,7 +774,11 @@
551
774
  "description": "Edit images with Flux.1 Kontext pro image.",
552
775
  "mediaType": "image",
553
776
  "mediaSubtype": "webp",
554
- "thumbnailVariant": "compareSlider"
777
+ "thumbnailVariant": "compareSlider",
778
+ "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
779
+ "tags": ["Image Edit", "Image"],
780
+ "models": ["Flux"],
781
+ "date": "2025-05-29"
555
782
  },
556
783
  {
557
784
  "name": "api_bfl_flux_1_kontext_max_image",
@@ -559,14 +786,22 @@
559
786
  "description": "Edit images with Flux.1 Kontext max image.",
560
787
  "mediaType": "image",
561
788
  "mediaSubtype": "webp",
562
- "thumbnailVariant": "compareSlider"
789
+ "thumbnailVariant": "compareSlider",
790
+ "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
791
+ "tags": ["Image Edit", "Image"],
792
+ "models": ["Flux"],
793
+ "date": "2025-05-29"
563
794
  },
564
795
  {
565
796
  "name": "api_bfl_flux_pro_t2i",
566
797
  "title": "BFL Flux[Pro]: Text to Image",
567
798
  "description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
568
799
  "mediaType": "image",
569
- "mediaSubtype": "webp"
800
+ "mediaSubtype": "webp",
801
+ "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
802
+ "tags": ["Image Edit", "Image"],
803
+ "models": ["Flux"],
804
+ "date": "2025-05-01"
570
805
  },
571
806
  {
572
807
  "name": "api_luma_photon_i2i",
@@ -574,7 +809,10 @@
574
809
  "description": "Guide image generation using a combination of images and prompt.",
575
810
  "mediaType": "image",
576
811
  "mediaSubtype": "webp",
577
- "thumbnailVariant": "compareSlider"
812
+ "thumbnailVariant": "compareSlider",
813
+ "tags": ["Image to Image", "Image", "API"],
814
+ "models": ["Luma Photon"],
815
+ "date": "2025-03-01"
578
816
  },
579
817
  {
580
818
  "name": "api_luma_photon_style_ref",
@@ -582,35 +820,50 @@
582
820
  "description": "Generate images by blending style references with precise control using Luma Photon.",
583
821
  "mediaType": "image",
584
822
  "mediaSubtype": "webp",
585
- "thumbnailVariant": "compareSlider"
823
+ "thumbnailVariant": "compareSlider",
824
+ "tags": ["Text to Image", "Image", "API", "Style Transfer"],
825
+ "models": ["Luma Photon"],
826
+ "date": "2025-03-01"
586
827
  },
587
828
  {
588
829
  "name": "api_recraft_image_gen_with_color_control",
589
830
  "title": "Recraft: Color Control Image Generation",
590
831
  "description": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
591
832
  "mediaType": "image",
592
- "mediaSubtype": "webp"
833
+ "mediaSubtype": "webp",
834
+ "tags": ["Text to Image", "Image", "API", "Color Control"],
835
+ "models": ["Recraft"],
836
+ "date": "2025-03-01"
593
837
  },
594
838
  {
595
839
  "name": "api_recraft_image_gen_with_style_control",
596
840
  "title": "Recraft: Style Control Image Generation",
597
841
  "description": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.",
598
842
  "mediaType": "image",
599
- "mediaSubtype": "webp"
843
+ "mediaSubtype": "webp",
844
+ "tags": ["Text to Image", "Image", "API", "Style Control"],
845
+ "models": ["Recraft"],
846
+ "date": "2025-03-01"
600
847
  },
601
848
  {
602
849
  "name": "api_recraft_vector_gen",
603
850
  "title": "Recraft: Vector Generation",
604
851
  "description": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
605
852
  "mediaType": "image",
606
- "mediaSubtype": "webp"
853
+ "mediaSubtype": "webp",
854
+ "tags": ["Text to Image", "Image", "API", "Vector"],
855
+ "models": ["Recraft"],
856
+ "date": "2025-03-01"
607
857
  },
608
858
  {
609
859
  "name": "api_runway_text_to_image",
610
860
  "title": "Runway: Text to Image",
611
861
  "description": "Generate high-quality images from text prompts using Runway's AI model.",
612
862
  "mediaType": "image",
613
- "mediaSubtype": "webp"
863
+ "mediaSubtype": "webp",
864
+ "tags": ["Text to Image", "Image", "API"],
865
+ "models": ["Runway"],
866
+ "date": "2025-03-01"
614
867
  },
615
868
  {
616
869
  "name": "api_runway_reference_to_image",
@@ -618,14 +871,20 @@
618
871
  "description": "Generate new images based on reference styles and compositions with Runway's AI.",
619
872
  "mediaType": "image",
620
873
  "thumbnailVariant": "compareSlider",
621
- "mediaSubtype": "webp"
874
+ "mediaSubtype": "webp",
875
+ "tags": ["Image to Image", "Image", "API", "Style Transfer"],
876
+ "models": ["Runway"],
877
+ "date": "2025-03-01"
622
878
  },
623
879
  {
624
880
  "name": "api_stability_ai_stable_image_ultra_t2i",
625
881
  "title": "Stability AI: Stable Image Ultra Text to Image",
626
882
  "description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
627
883
  "mediaType": "image",
628
- "mediaSubtype": "webp"
884
+ "mediaSubtype": "webp",
885
+ "tags": ["Text to Image", "Image", "API"],
886
+ "models": ["Stable Image Ultra"],
887
+ "date": "2025-03-01"
629
888
  },
630
889
  {
631
890
  "name": "api_stability_ai_i2i",
@@ -633,14 +892,20 @@
633
892
  "description": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
634
893
  "mediaType": "image",
635
894
  "thumbnailVariant": "compareSlider",
636
- "mediaSubtype": "webp"
895
+ "mediaSubtype": "webp",
896
+ "tags": ["Image to Image", "Image", "API"],
897
+ "models": ["Stability AI"],
898
+ "date": "2025-03-01"
637
899
  },
638
900
  {
639
901
  "name": "api_stability_ai_sd3.5_t2i",
640
902
  "title": "Stability AI: SD3.5 Text to Image",
641
903
  "description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
642
904
  "mediaType": "image",
643
- "mediaSubtype": "webp"
905
+ "mediaSubtype": "webp",
906
+ "tags": ["Text to Image", "Image", "API"],
907
+ "models": ["SD3.5"],
908
+ "date": "2025-03-01"
644
909
  },
645
910
  {
646
911
  "name": "api_stability_ai_sd3.5_i2i",
@@ -648,14 +913,20 @@
648
913
  "description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
649
914
  "mediaType": "image",
650
915
  "thumbnailVariant": "compareSlider",
651
- "mediaSubtype": "webp"
916
+ "mediaSubtype": "webp",
917
+ "tags": ["Image to Image", "Image", "API"],
918
+ "models": ["SD3.5"],
919
+ "date": "2025-03-01"
652
920
  },
653
921
  {
654
922
  "name": "api_ideogram_v3_t2i",
655
923
  "title": "Ideogram V3: Text to Image",
656
924
  "description": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
657
925
  "mediaType": "image",
658
- "mediaSubtype": "webp"
926
+ "mediaSubtype": "webp",
927
+ "tags": ["Text to Image", "Image", "API", "Text Rendering"],
928
+ "models": ["Ideogram V3"],
929
+ "date": "2025-03-01"
659
930
  },
660
931
  {
661
932
  "name": "api_openai_image_1_t2i",
@@ -663,6 +934,9 @@
663
934
  "description": "Generate images from text prompts using OpenAI GPT Image 1 API.",
664
935
  "mediaType": "image",
665
936
  "mediaSubtype": "webp",
937
+ "tags": ["Text to Image", "Image", "API"],
938
+ "models": ["GPT-Image-1"],
939
+ "date": "2025-03-01",
666
940
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
667
941
  },
668
942
  {
@@ -672,6 +946,9 @@
672
946
  "mediaType": "image",
673
947
  "mediaSubtype": "webp",
674
948
  "thumbnailVariant": "compareSlider",
949
+ "tags": ["Image to Image", "Image", "API"],
950
+ "models": ["GPT-Image-1"],
951
+ "date": "2025-03-01",
675
952
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
676
953
  },
677
954
  {
@@ -681,6 +958,9 @@
681
958
  "mediaType": "image",
682
959
  "mediaSubtype": "webp",
683
960
  "thumbnailVariant": "compareSlider",
961
+ "tags": ["Inpaint", "Image", "API"],
962
+ "models": ["GPT-Image-1"],
963
+ "date": "2025-03-01",
684
964
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
685
965
  },
686
966
  {
@@ -690,6 +970,9 @@
690
970
  "mediaType": "image",
691
971
  "mediaSubtype": "webp",
692
972
  "thumbnailVariant": "compareSlider",
973
+ "tags": ["Text to Image", "Image", "API", "Multi Input"],
974
+ "models": ["GPT-Image-1"],
975
+ "date": "2025-03-01",
693
976
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
694
977
  },
695
978
  {
@@ -698,6 +981,9 @@
698
981
  "description": "Generate images from text prompts using OpenAI Dall-E 2 API.",
699
982
  "mediaType": "image",
700
983
  "mediaSubtype": "webp",
984
+ "tags": ["Text to Image", "Image", "API"],
985
+ "models": ["Dall-E 2"],
986
+ "date": "2025-03-01",
701
987
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
702
988
  },
703
989
  {
@@ -707,6 +993,9 @@
707
993
  "mediaType": "image",
708
994
  "mediaSubtype": "webp",
709
995
  "thumbnailVariant": "compareSlider",
996
+ "tags": ["Inpaint", "Image", "API"],
997
+ "models": ["Dall-E 2"],
998
+ "date": "2025-03-01",
710
999
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
711
1000
  },
712
1001
  {
@@ -715,6 +1004,9 @@
715
1004
  "description": "Generate images from text prompts using OpenAI Dall-E 3 API.",
716
1005
  "mediaType": "image",
717
1006
  "mediaSubtype": "webp",
1007
+ "tags": ["Text to Image", "Image", "API"],
1008
+ "models": ["Dall-E 3"],
1009
+ "date": "2025-03-01",
718
1010
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3"
719
1011
  }
720
1012
  ]
@@ -729,49 +1021,77 @@
729
1021
  "title": "Kling: Image to Video",
730
1022
  "description": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
731
1023
  "mediaType": "image",
732
- "mediaSubtype": "webp"
1024
+ "mediaSubtype": "webp",
1025
+ "tags": ["Image to Video", "Video", "API"],
1026
+ "models": ["Kling"],
1027
+ "date": "2025-03-01",
1028
+ "tutorialUrl": ""
733
1029
  },
734
1030
  {
735
1031
  "name": "api_kling_effects",
736
1032
  "title": "Kling: Video Effects",
737
1033
  "description": "Generate dynamic videos by applying visual effects to images using Kling.",
738
1034
  "mediaType": "image",
739
- "mediaSubtype": "webp"
1035
+ "mediaSubtype": "webp",
1036
+ "tags": ["Video Effects", "Video", "API"],
1037
+ "models": ["Kling"],
1038
+ "date": "2025-03-01",
1039
+ "tutorialUrl": ""
740
1040
  },
741
1041
  {
742
1042
  "name": "api_kling_flf",
743
1043
  "title": "Kling: FLF2V",
744
1044
  "description": "Generate videos through controlling the first and last frames.",
745
1045
  "mediaType": "image",
746
- "mediaSubtype": "webp"
1046
+ "mediaSubtype": "webp",
1047
+ "tags": ["Video Generation", "Video", "API", "Frame Control"],
1048
+ "models": ["Kling"],
1049
+ "date": "2025-03-01",
1050
+ "tutorialUrl": ""
747
1051
  },
748
1052
  {
749
1053
  "name": "api_luma_i2v",
750
1054
  "title": "Luma: Image to Video",
751
1055
  "description": "Take static images and instantly create magical high quality animations.",
752
1056
  "mediaType": "image",
753
- "mediaSubtype": "webp"
1057
+ "mediaSubtype": "webp",
1058
+ "tags": ["Image to Video", "Video", "API"],
1059
+ "models": ["Luma"],
1060
+ "date": "2025-03-01",
1061
+ "tutorialUrl": ""
754
1062
  },
755
1063
  {
756
1064
  "name": "api_luma_t2v",
757
1065
  "title": "Luma: Text to Video",
758
1066
  "description": "High-quality videos can be generated using simple prompts.",
759
1067
  "mediaType": "image",
760
- "mediaSubtype": "webp"
1068
+ "mediaSubtype": "webp",
1069
+ "tags": ["Text to Video", "Video", "API"],
1070
+ "models": ["Luma"],
1071
+ "date": "2025-03-01",
1072
+ "tutorialUrl": ""
761
1073
  },
762
1074
  {
763
1075
  "name": "api_moonvalley_text_to_video",
764
1076
  "title": "Moonvalley: Text to Video",
765
1077
  "description": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.",
766
1078
  "mediaType": "image",
767
- "mediaSubtype": "webp"
1079
+ "mediaSubtype": "webp",
1080
+ "tags": ["Text to Video", "Video", "API"],
1081
+ "models": ["Moonvalley"],
1082
+ "date": "2025-03-01",
1083
+ "tutorialUrl": ""
768
1084
  },
769
1085
  {
770
1086
  "name": "api_moonvalley_image_to_video",
771
1087
  "title": "Moonvalley: Image to Video",
772
1088
  "description": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.",
773
1089
  "mediaType": "image",
774
- "mediaSubtype": "webp"
1090
+ "mediaSubtype": "webp",
1091
+ "tags": ["Image to Video", "Video", "API"],
1092
+ "models": ["Moonvalley"],
1093
+ "date": "2025-03-01",
1094
+ "tutorialUrl": ""
775
1095
  },
776
1096
  {
777
1097
  "name": "api_moonvalley_video_to_video_motion_transfer",
@@ -779,7 +1099,11 @@
779
1099
  "description": "Apply motion from one video to another.",
780
1100
  "mediaType": "image",
781
1101
  "thumbnailVariant": "hoverDissolve",
782
- "mediaSubtype": "webp"
1102
+ "mediaSubtype": "webp",
1103
+ "tags": ["Video to Video", "Video", "API", "Motion Transfer"],
1104
+ "models": ["Moonvalley"],
1105
+ "date": "2025-03-01",
1106
+ "tutorialUrl": ""
783
1107
  },
784
1108
  {
785
1109
  "name": "api_moonvalley_video_to_video_pose_control",
@@ -787,84 +1111,132 @@
787
1111
  "description": "Apply human pose and movement from one video to another.",
788
1112
  "mediaType": "image",
789
1113
  "thumbnailVariant": "hoverDissolve",
790
- "mediaSubtype": "webp"
1114
+ "mediaSubtype": "webp",
1115
+ "tags": ["Video to Video", "Video", "API", "Pose Control"],
1116
+ "models": ["Moonvalley"],
1117
+ "date": "2025-03-01",
1118
+ "tutorialUrl": ""
791
1119
  },
792
1120
  {
793
1121
  "name": "api_hailuo_minimax_t2v",
794
1122
  "title": "MiniMax: Text to Video",
795
1123
  "description": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
796
1124
  "mediaType": "image",
797
- "mediaSubtype": "webp"
1125
+ "mediaSubtype": "webp",
1126
+ "tags": ["Text to Video", "Video", "API"],
1127
+ "models": ["MiniMax"],
1128
+ "date": "2025-03-01",
1129
+ "tutorialUrl": ""
798
1130
  },
799
1131
  {
800
1132
  "name": "api_hailuo_minimax_i2v",
801
1133
  "title": "MiniMax: Image to Video",
802
1134
  "description": "Generate refined videos from images and text with CGI integration using MiniMax.",
803
1135
  "mediaType": "image",
804
- "mediaSubtype": "webp"
1136
+ "mediaSubtype": "webp",
1137
+ "tags": ["Image to Video", "Video", "API"],
1138
+ "models": ["MiniMax"],
1139
+ "date": "2025-03-01",
1140
+ "tutorialUrl": ""
805
1141
  },
806
1142
  {
807
1143
  "name": "api_pixverse_i2v",
808
1144
  "title": "PixVerse: Image to Video",
809
1145
  "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
810
1146
  "mediaType": "image",
811
- "mediaSubtype": "webp"
1147
+ "mediaSubtype": "webp",
1148
+ "tags": ["Image to Video", "Video", "API"],
1149
+ "models": ["PixVerse"],
1150
+ "date": "2025-03-01",
1151
+ "tutorialUrl": ""
812
1152
  },
813
1153
  {
814
1154
  "name": "api_pixverse_template_i2v",
815
1155
  "title": "PixVerse Templates: Image to Video",
816
1156
  "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
817
1157
  "mediaType": "image",
818
- "mediaSubtype": "webp"
1158
+ "mediaSubtype": "webp",
1159
+ "tags": ["Image to Video", "Video", "API", "Templates"],
1160
+ "models": ["PixVerse"],
1161
+ "date": "2025-03-01",
1162
+ "tutorialUrl": ""
819
1163
  },
820
1164
  {
821
1165
  "name": "api_pixverse_t2v",
822
1166
  "title": "PixVerse: Text to Video",
823
1167
  "description": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
824
1168
  "mediaType": "image",
825
- "mediaSubtype": "webp"
1169
+ "mediaSubtype": "webp",
1170
+ "tags": ["Text to Video", "Video", "API"],
1171
+ "models": ["PixVerse"],
1172
+ "date": "2025-03-01",
1173
+ "tutorialUrl": ""
826
1174
  },
827
1175
  {
828
1176
  "name": "api_runway_gen3a_turbo_image_to_video",
829
1177
  "title": "Runway: Gen3a Turbo Image to Video",
830
1178
  "description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
831
1179
  "mediaType": "image",
832
- "mediaSubtype": "webp"
1180
+ "mediaSubtype": "webp",
1181
+ "tags": ["Image to Video", "Video", "API"],
1182
+ "models": ["Runway Gen3a Turbo"],
1183
+ "date": "2025-03-01",
1184
+ "tutorialUrl": ""
833
1185
  },
834
1186
  {
835
1187
  "name": "api_runway_gen4_turo_image_to_video",
836
1188
  "title": "Runway: Gen4 Turbo Image to Video",
837
1189
  "description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
838
1190
  "mediaType": "image",
839
- "mediaSubtype": "webp"
1191
+ "mediaSubtype": "webp",
1192
+ "tags": ["Image to Video", "Video", "API"],
1193
+ "models": ["Runway Gen4 Turbo"],
1194
+ "date": "2025-03-01",
1195
+ "tutorialUrl": ""
840
1196
  },
841
1197
  {
842
1198
  "name": "api_runway_first_last_frame",
843
1199
  "title": "Runway: First Last Frame to Video",
844
1200
  "description": "Generate smooth video transitions between two keyframes with Runway's precision.",
845
1201
  "mediaType": "image",
846
- "mediaSubtype": "webp"
1202
+ "mediaSubtype": "webp",
1203
+ "tags": ["Video Generation", "Video", "API", "Frame Control"],
1204
+ "models": ["Runway"],
1205
+ "date": "2025-03-01",
1206
+ "tutorialUrl": ""
847
1207
  },
848
1208
  {
849
1209
  "name": "api_pika_i2v",
850
1210
  "title": "Pika: Image to Video",
851
1211
  "description": "Generate smooth animated videos from single static images using Pika AI.",
852
1212
  "mediaType": "image",
853
- "mediaSubtype": "webp"
1213
+ "mediaSubtype": "webp",
1214
+ "tags": ["Image to Video", "Video", "API"],
1215
+ "models": ["Pika"],
1216
+ "date": "2025-03-01",
1217
+ "tutorialUrl": ""
854
1218
  },
855
1219
  {
856
1220
  "name": "api_pika_scene",
857
1221
  "title": "Pika Scenes: Images to Video",
858
1222
  "description": "Generate videos that incorporate multiple input images using Pika Scenes.",
859
1223
  "mediaType": "image",
860
- "mediaSubtype": "webp"
1224
+ "mediaSubtype": "webp",
1225
+ "tags": ["Image to Video", "Video", "API", "Multi Image"],
1226
+ "models": ["Pika Scenes"],
1227
+ "date": "2025-03-01",
1228
+ "tutorialUrl": ""
861
1229
  },
862
1230
  {
863
1231
  "name": "api_veo2_i2v",
864
1232
  "title": "Veo2: Image to Video",
865
1233
  "description": "Generate videos from images using Google Veo2 API.",
866
1234
  "mediaType": "image",
867
- "mediaSubtype": "webp"
1235
+ "mediaSubtype": "webp",
1236
+ "tags": ["Image to Video", "Video", "API"],
1237
+ "models": ["Veo2"],
1238
+ "date": "2025-03-01",
1239
+ "tutorialUrl": ""
868
1240
  }
869
1241
  ]
870
1242
  },
@@ -879,7 +1251,11 @@
879
1251
  "description": "Generate detailed 3D models from single photos using Rodin AI.",
880
1252
  "mediaType": "image",
881
1253
  "thumbnailVariant": "compareSlider",
882
- "mediaSubtype": "webp"
1254
+ "mediaSubtype": "webp",
1255
+ "tags": ["Image to Model", "3D", "API"],
1256
+ "models": ["Rodin"],
1257
+ "date": "2025-03-01",
1258
+ "tutorialUrl": ""
883
1259
  },
884
1260
  {
885
1261
  "name": "api_rodin_multiview_to_model",
@@ -887,14 +1263,22 @@
887
1263
  "description": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.",
888
1264
  "mediaType": "image",
889
1265
  "thumbnailVariant": "compareSlider",
890
- "mediaSubtype": "webp"
1266
+ "mediaSubtype": "webp",
1267
+ "tags": ["Multiview to Model", "3D", "API"],
1268
+ "models": ["Rodin"],
1269
+ "date": "2025-03-01",
1270
+ "tutorialUrl": ""
891
1271
  },
892
1272
  {
893
1273
  "name": "api_tripo_text_to_model",
894
1274
  "title": "Tripo: Text to Model",
895
1275
  "description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
896
1276
  "mediaType": "image",
897
- "mediaSubtype": "webp"
1277
+ "mediaSubtype": "webp",
1278
+ "tags": ["Text to Model", "3D", "API"],
1279
+ "models": ["Tripo"],
1280
+ "date": "2025-03-01",
1281
+ "tutorialUrl": ""
898
1282
  },
899
1283
  {
900
1284
  "name": "api_tripo_image_to_model",
@@ -902,7 +1286,11 @@
902
1286
  "description": "Generate professional 3D assets from 2D images using Tripo engine.",
903
1287
  "mediaType": "image",
904
1288
  "thumbnailVariant": "compareSlider",
905
- "mediaSubtype": "webp"
1289
+ "mediaSubtype": "webp",
1290
+ "tags": ["Image to Model", "3D", "API"],
1291
+ "models": ["Tripo"],
1292
+ "date": "2025-03-01",
1293
+ "tutorialUrl": ""
906
1294
  },
907
1295
  {
908
1296
  "name": "api_tripo_multiview_to_model",
@@ -910,7 +1298,11 @@
910
1298
  "description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
911
1299
  "mediaType": "image",
912
1300
  "thumbnailVariant": "compareSlider",
913
- "mediaSubtype": "webp"
1301
+ "mediaSubtype": "webp",
1302
+ "tags": ["Multiview to Model", "3D", "API"],
1303
+ "models": ["Tripo"],
1304
+ "date": "2025-03-01",
1305
+ "tutorialUrl": ""
914
1306
  }
915
1307
  ]
916
1308
  },
@@ -924,14 +1316,22 @@
924
1316
  "title": "OpenAI: Chat",
925
1317
  "description": "Engage with OpenAI's advanced language models for intelligent conversations.",
926
1318
  "mediaType": "image",
927
- "mediaSubtype": "webp"
1319
+ "mediaSubtype": "webp",
1320
+ "tags": ["Chat", "LLM", "API"],
1321
+ "models": ["OpenAI"],
1322
+ "date": "2025-03-01",
1323
+ "tutorialUrl": ""
928
1324
  },
929
1325
  {
930
1326
  "name": "api_google_gemini",
931
1327
  "title": "Google Gemini: Chat",
932
1328
  "description": "Experience Google's multimodal AI with Gemini's reasoning capabilities.",
933
1329
  "mediaType": "image",
934
- "mediaSubtype": "webp"
1330
+ "mediaSubtype": "webp",
1331
+ "tags": ["Chat", "LLM", "API"],
1332
+ "models": ["Google Gemini"],
1333
+ "date": "2025-03-01",
1334
+ "tutorialUrl": ""
935
1335
  }
936
1336
  ]
937
1337
  },
@@ -947,6 +1347,9 @@
947
1347
  "mediaSubtype": "webp",
948
1348
  "description": "Upscale images by enhancing quality in latent space.",
949
1349
  "thumbnailVariant": "compareSlider",
1350
+ "tags": ["Upscale", "Image"],
1351
+ "models": ["SD1.5"],
1352
+ "date": "2025-03-01",
950
1353
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/"
951
1354
  },
952
1355
  {
@@ -956,6 +1359,9 @@
956
1359
  "mediaSubtype": "webp",
957
1360
  "description": "Upscale images using ESRGAN models to enhance quality.",
958
1361
  "thumbnailVariant": "compareSlider",
1362
+ "tags": ["Upscale", "Image"],
1363
+ "models": ["SD1.5"],
1364
+ "date": "2025-03-01",
959
1365
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/"
960
1366
  },
961
1367
  {
@@ -965,6 +1371,9 @@
965
1371
  "mediaSubtype": "webp",
966
1372
  "description": "Upscale images using ESRGAN models during intermediate generation steps.",
967
1373
  "thumbnailVariant": "compareSlider",
1374
+ "tags": ["Upscale", "Image"],
1375
+ "models": ["SD1.5"],
1376
+ "date": "2025-03-01",
968
1377
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling"
969
1378
  },
970
1379
  {
@@ -974,6 +1383,9 @@
974
1383
  "mediaSubtype": "webp",
975
1384
  "description": "Upscale images while changing prompts across generation passes.",
976
1385
  "thumbnailVariant": "zoomHover",
1386
+ "tags": ["Upscale", "Image"],
1387
+ "models": ["SD1.5"],
1388
+ "date": "2025-03-01",
977
1389
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples"
978
1390
  }
979
1391
  ]
@@ -990,6 +1402,9 @@
990
1402
  "mediaSubtype": "webp",
991
1403
  "description": "Generate images guided by scribble reference images using ControlNet.",
992
1404
  "thumbnailVariant": "hoverDissolve",
1405
+ "tags": ["ControlNet", "Image"],
1406
+ "models": ["SD1.5"],
1407
+ "date": "2025-03-01",
993
1408
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/"
994
1409
  },
995
1410
  {
@@ -999,6 +1414,9 @@
999
1414
  "mediaSubtype": "webp",
1000
1415
  "description": "Generate images guided by pose references using ControlNet.",
1001
1416
  "thumbnailVariant": "hoverDissolve",
1417
+ "tags": ["ControlNet", "Image"],
1418
+ "models": ["SD1.5"],
1419
+ "date": "2025-03-01",
1002
1420
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet"
1003
1421
  },
1004
1422
  {
@@ -1008,6 +1426,9 @@
1008
1426
  "mediaSubtype": "webp",
1009
1427
  "description": "Generate images guided by depth information using ControlNet.",
1010
1428
  "thumbnailVariant": "hoverDissolve",
1429
+ "tags": ["ControlNet", "Image"],
1430
+ "models": ["SD1.5"],
1431
+ "date": "2025-03-01",
1011
1432
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
1012
1433
  },
1013
1434
  {
@@ -1017,6 +1438,9 @@
1017
1438
  "mediaSubtype": "webp",
1018
1439
  "description": "Generate images guided by depth information using T2I adapter.",
1019
1440
  "thumbnailVariant": "hoverDissolve",
1441
+ "tags": ["T2I Adapter", "Image"],
1442
+ "models": ["SD1.5"],
1443
+ "date": "2025-03-01",
1020
1444
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
1021
1445
  },
1022
1446
  {
@@ -1026,6 +1450,9 @@
1026
1450
  "mediaSubtype": "webp",
1027
1451
  "description": "Generate images by combining multiple ControlNet models.",
1028
1452
  "thumbnailVariant": "hoverDissolve",
1453
+ "tags": ["ControlNet", "Image"],
1454
+ "models": ["SD1.5"],
1455
+ "date": "2025-03-01",
1029
1456
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets"
1030
1457
  }
1031
1458
  ]
@@ -1041,6 +1468,9 @@
1041
1468
  "mediaType": "image",
1042
1469
  "mediaSubtype": "webp",
1043
1470
  "description": "Generate images by controlling composition with defined areas.",
1471
+ "tags": ["Area Composition", "Image"],
1472
+ "models": ["SD1.5"],
1473
+ "date": "2025-03-01",
1044
1474
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/"
1045
1475
  },
1046
1476
  {
@@ -1049,6 +1479,9 @@
1049
1479
  "mediaType": "image",
1050
1480
  "mediaSubtype": "webp",
1051
1481
  "description": "Generate images with consistent subject placement using area composition.",
1482
+ "tags": ["Area Composition", "Image"],
1483
+ "models": ["SD1.5"],
1484
+ "date": "2025-03-01",
1052
1485
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition"
1053
1486
  }
1054
1487
  ]
@@ -1064,6 +1497,9 @@
1064
1497
  "mediaType": "image",
1065
1498
  "mediaSubtype": "webp",
1066
1499
  "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
1500
+ "tags": ["Image to Model", "3D"],
1501
+ "models": ["Hunyuan3D 2.0"],
1502
+ "date": "2025-03-01",
1067
1503
  "tutorialUrl": ""
1068
1504
  },
1069
1505
  {
@@ -1072,6 +1508,9 @@
1072
1508
  "mediaType": "image",
1073
1509
  "mediaSubtype": "webp",
1074
1510
  "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
1511
+ "tags": ["Multiview to Model", "3D"],
1512
+ "models": ["Hunyuan3D 2.0 MV"],
1513
+ "date": "2025-03-01",
1075
1514
  "tutorialUrl": "",
1076
1515
  "thumbnailVariant": "hoverDissolve"
1077
1516
  },
@@ -1081,6 +1520,9 @@
1081
1520
  "mediaType": "image",
1082
1521
  "mediaSubtype": "webp",
1083
1522
  "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
1523
+ "tags": ["Multiview to Model", "3D"],
1524
+ "models": ["Hunyuan3D 2.0 MV Turbo"],
1525
+ "date": "2025-03-01",
1084
1526
  "tutorialUrl": "",
1085
1527
  "thumbnailVariant": "hoverDissolve"
1086
1528
  },
@@ -1090,6 +1532,9 @@
1090
1532
  "mediaType": "image",
1091
1533
  "mediaSubtype": "webp",
1092
1534
  "description": "Generate 3D views from single images using Stable Zero123.",
1535
+ "tags": ["Image to 3D", "3D"],
1536
+ "models": ["Stable Zero123"],
1537
+ "date": "2025-03-01",
1093
1538
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/"
1094
1539
  }
1095
1540
  ]
@@ -1105,6 +1550,9 @@
1105
1550
  "mediaType": "audio",
1106
1551
  "mediaSubtype": "mp3",
1107
1552
  "description": "Generate audio from text prompts using Stable Audio.",
1553
+ "tags": ["Text to Audio", "Audio"],
1554
+ "models": ["Stable Audio"],
1555
+ "date": "2025-03-01",
1108
1556
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/"
1109
1557
  },
1110
1558
  {
@@ -1113,6 +1561,9 @@
1113
1561
  "mediaType": "audio",
1114
1562
  "mediaSubtype": "mp3",
1115
1563
  "description": "Generate instrumental music from text prompts using ACE-Step v1.",
1564
+ "tags": ["Text to Audio", "Audio", "Instrumentals"],
1565
+ "models": ["ACE-Step v1"],
1566
+ "date": "2025-03-01",
1116
1567
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
1117
1568
  },
1118
1569
  {
@@ -1121,6 +1572,9 @@
1121
1572
  "mediaType": "audio",
1122
1573
  "mediaSubtype": "mp3",
1123
1574
  "description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
1575
+ "tags": ["Text to Audio", "Audio", "Song"],
1576
+ "models": ["ACE-Step v1"],
1577
+ "date": "2025-03-01",
1124
1578
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
1125
1579
  },
1126
1580
  {
@@ -1129,6 +1583,9 @@
1129
1583
  "mediaType": "audio",
1130
1584
  "mediaSubtype": "mp3",
1131
1585
  "description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
1586
+ "tags": ["Audio Editing", "Audio"],
1587
+ "models": ["ACE-Step v1"],
1588
+ "date": "2025-03-01",
1132
1589
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
1133
1590
  }
1134
1591
  ]