comfyui-workflow-templates 0.1.43__py3-none-any.whl → 0.1.45__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of comfyui-workflow-templates might be problematic. Click here for more details.
- comfyui_workflow_templates/templates/flux1_krea_dev-1.webp +0 -0
- comfyui_workflow_templates/templates/flux1_krea_dev.json +543 -0
- comfyui_workflow_templates/templates/index.json +552 -107
- comfyui_workflow_templates/templates/video_wan2_2_14B_i2v.json +4 -4
- comfyui_workflow_templates/templates/video_wan2_2_14B_t2v.json +42 -42
- {comfyui_workflow_templates-0.1.43.dist-info → comfyui_workflow_templates-0.1.45.dist-info}/METADATA +1 -1
- {comfyui_workflow_templates-0.1.43.dist-info → comfyui_workflow_templates-0.1.45.dist-info}/RECORD +10 -8
- {comfyui_workflow_templates-0.1.43.dist-info → comfyui_workflow_templates-0.1.45.dist-info}/WHEEL +0 -0
- {comfyui_workflow_templates-0.1.43.dist-info → comfyui_workflow_templates-0.1.45.dist-info}/licenses/LICENSE +0 -0
- {comfyui_workflow_templates-0.1.43.dist-info → comfyui_workflow_templates-0.1.45.dist-info}/top_level.txt +0 -0
|
@@ -9,7 +9,11 @@
|
|
|
9
9
|
"title": "Image Generation",
|
|
10
10
|
"mediaType": "image",
|
|
11
11
|
"mediaSubtype": "webp",
|
|
12
|
-
"description": "Generate images from text prompts."
|
|
12
|
+
"description": "Generate images from text prompts.",
|
|
13
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
|
|
14
|
+
"tags": ["Text to Image", "Image"],
|
|
15
|
+
"models": ["SD1.5"],
|
|
16
|
+
"date": "2025-03-01"
|
|
13
17
|
},
|
|
14
18
|
{
|
|
15
19
|
"name": "image2image",
|
|
@@ -17,23 +21,32 @@
|
|
|
17
21
|
"mediaType": "image",
|
|
18
22
|
"mediaSubtype": "webp",
|
|
19
23
|
"description": "Transform existing images using text prompts.",
|
|
20
|
-
"tutorialUrl": "https://
|
|
24
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/image-to-image",
|
|
25
|
+
"tags": ["Image to Image", "Image"],
|
|
26
|
+
"models": ["SD1.5"],
|
|
27
|
+
"date": "2025-03-01"
|
|
21
28
|
},
|
|
22
29
|
{
|
|
23
30
|
"name": "lora",
|
|
24
|
-
"title": "
|
|
31
|
+
"title": "LoRA",
|
|
25
32
|
"mediaType": "image",
|
|
26
33
|
"mediaSubtype": "webp",
|
|
27
34
|
"description": "Generate images with LoRA models for specialized styles or subjects.",
|
|
28
|
-
"tutorialUrl": "https://
|
|
35
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
|
|
36
|
+
"tags": ["Text to Image", "Image"],
|
|
37
|
+
"models": ["SD1.5"],
|
|
38
|
+
"date": "2025-03-01"
|
|
29
39
|
},
|
|
30
40
|
{
|
|
31
41
|
"name": "lora_multiple",
|
|
32
|
-
"title": "
|
|
42
|
+
"title": "LoRA Multiple",
|
|
33
43
|
"mediaType": "image",
|
|
34
44
|
"mediaSubtype": "webp",
|
|
35
45
|
"description": "Generate images by combining multiple LoRA models.",
|
|
36
|
-
"tutorialUrl": "https://
|
|
46
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
|
|
47
|
+
"tags": ["Text to Image", "Image", "LoRA"],
|
|
48
|
+
"models": ["SD1.5"],
|
|
49
|
+
"date": "2025-03-01"
|
|
37
50
|
},
|
|
38
51
|
{
|
|
39
52
|
"name": "inpaint_example",
|
|
@@ -42,7 +55,10 @@
|
|
|
42
55
|
"mediaSubtype": "webp",
|
|
43
56
|
"description": "Edit specific parts of images seamlessly.",
|
|
44
57
|
"thumbnailVariant": "compareSlider",
|
|
45
|
-
"tutorialUrl": "https://
|
|
58
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
|
|
59
|
+
"tags": ["Inpaint", "Image"],
|
|
60
|
+
"models": ["SD1.5"],
|
|
61
|
+
"date": "2025-03-01"
|
|
46
62
|
},
|
|
47
63
|
{
|
|
48
64
|
"name": "inpaint_model_outpainting",
|
|
@@ -51,7 +67,10 @@
|
|
|
51
67
|
"mediaSubtype": "webp",
|
|
52
68
|
"description": "Extend images beyond their original boundaries.",
|
|
53
69
|
"thumbnailVariant": "compareSlider",
|
|
54
|
-
"tutorialUrl": "https://
|
|
70
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
|
|
71
|
+
"tags": ["Outpaint", "Image"],
|
|
72
|
+
"models": ["SD1.5"],
|
|
73
|
+
"date": "2025-03-01"
|
|
55
74
|
},
|
|
56
75
|
{
|
|
57
76
|
"name": "embedding_example",
|
|
@@ -59,7 +78,10 @@
|
|
|
59
78
|
"mediaType": "image",
|
|
60
79
|
"mediaSubtype": "webp",
|
|
61
80
|
"description": "Generate images using textual inversion for consistent styles.",
|
|
62
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/"
|
|
81
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/",
|
|
82
|
+
"tags": ["Embedding", "Image"],
|
|
83
|
+
"models": ["SD1.5"],
|
|
84
|
+
"date": "2025-03-01"
|
|
63
85
|
},
|
|
64
86
|
{
|
|
65
87
|
"name": "gligen_textbox_example",
|
|
@@ -67,7 +89,10 @@
|
|
|
67
89
|
"mediaType": "image",
|
|
68
90
|
"mediaSubtype": "webp",
|
|
69
91
|
"description": "Generate images with precise object placement using text boxes.",
|
|
70
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/"
|
|
92
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/",
|
|
93
|
+
"tags": ["Gligen", "Image"],
|
|
94
|
+
"models": ["SD1.5"],
|
|
95
|
+
"date": "2025-03-01"
|
|
71
96
|
}
|
|
72
97
|
]
|
|
73
98
|
},
|
|
@@ -76,13 +101,28 @@
|
|
|
76
101
|
"title": "Flux",
|
|
77
102
|
"type": "image",
|
|
78
103
|
"templates": [
|
|
104
|
+
{
|
|
105
|
+
"name": "flux1_krea_dev",
|
|
106
|
+
"title": "Flux.1 Krea Dev",
|
|
107
|
+
"mediaType": "image",
|
|
108
|
+
"mediaSubtype": "webp",
|
|
109
|
+
"description": "A fine-tuned FLUX model pushing photorealism to the max",
|
|
110
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
|
|
111
|
+
"tags": ["Text to Image", "Image", "Photorealism"],
|
|
112
|
+
"models": ["Flux.1 Krea Dev"],
|
|
113
|
+
"date": "2025-07-31"
|
|
114
|
+
},
|
|
79
115
|
{
|
|
80
116
|
"name": "flux_kontext_dev_basic",
|
|
81
117
|
"title": "Flux Kontext Dev(Basic)",
|
|
82
118
|
"mediaType": "image",
|
|
83
119
|
"mediaSubtype": "webp",
|
|
84
120
|
"thumbnailVariant": "hoverDissolve",
|
|
85
|
-
"description": "Edit image using Flux Kontext with full node visibility, perfect for learning the workflow."
|
|
121
|
+
"description": "Edit image using Flux Kontext with full node visibility, perfect for learning the workflow.",
|
|
122
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
|
|
123
|
+
"tags": ["Image Edit", "Image to Image"],
|
|
124
|
+
"models": ["Flux"],
|
|
125
|
+
"date": "2025-06-26"
|
|
86
126
|
},
|
|
87
127
|
{
|
|
88
128
|
"name": "flux_kontext_dev_grouped",
|
|
@@ -90,7 +130,11 @@
|
|
|
90
130
|
"mediaType": "image",
|
|
91
131
|
"mediaSubtype": "webp",
|
|
92
132
|
"thumbnailVariant": "hoverDissolve",
|
|
93
|
-
"description": "Streamlined version of Flux Kontext with grouped nodes for cleaner workspace."
|
|
133
|
+
"description": "Streamlined version of Flux Kontext with grouped nodes for cleaner workspace.",
|
|
134
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
|
|
135
|
+
"tags": ["Image Edit", "Image to Image"],
|
|
136
|
+
"models": ["Flux"],
|
|
137
|
+
"date": "2025-06-26"
|
|
94
138
|
},
|
|
95
139
|
{
|
|
96
140
|
"name": "flux_dev_checkpoint_example",
|
|
@@ -98,7 +142,10 @@
|
|
|
98
142
|
"mediaType": "image",
|
|
99
143
|
"mediaSubtype": "webp",
|
|
100
144
|
"description": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.",
|
|
101
|
-
"tutorialUrl": "https://
|
|
145
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
146
|
+
"tags": ["Text to Image", "Image"],
|
|
147
|
+
"models": ["Flux"],
|
|
148
|
+
"date": "2025-03-01"
|
|
102
149
|
},
|
|
103
150
|
{
|
|
104
151
|
"name": "flux_schnell",
|
|
@@ -106,7 +153,10 @@
|
|
|
106
153
|
"mediaType": "image",
|
|
107
154
|
"mediaSubtype": "webp",
|
|
108
155
|
"description": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.",
|
|
109
|
-
"tutorialUrl": "https://
|
|
156
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
157
|
+
"tags": ["Text to Image", "Image"],
|
|
158
|
+
"models": ["Flux"],
|
|
159
|
+
"date": "2025-03-01"
|
|
110
160
|
},
|
|
111
161
|
{
|
|
112
162
|
"name": "flux_dev_full_text_to_image",
|
|
@@ -114,7 +164,10 @@
|
|
|
114
164
|
"mediaType": "image",
|
|
115
165
|
"mediaSubtype": "webp",
|
|
116
166
|
"description": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.",
|
|
117
|
-
"tutorialUrl": "https://
|
|
167
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
168
|
+
"tags": ["Text to Image", "Image"],
|
|
169
|
+
"models": ["Flux"],
|
|
170
|
+
"date": "2025-03-01"
|
|
118
171
|
},
|
|
119
172
|
{
|
|
120
173
|
"name": "flux_schnell_full_text_to_image",
|
|
@@ -122,7 +175,10 @@
|
|
|
122
175
|
"mediaType": "image",
|
|
123
176
|
"mediaSubtype": "webp",
|
|
124
177
|
"description": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.",
|
|
125
|
-
"tutorialUrl": "https://
|
|
178
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
179
|
+
"tags": ["Text to Image", "Image"],
|
|
180
|
+
"models": ["Flux"],
|
|
181
|
+
"date": "2025-03-01"
|
|
126
182
|
},
|
|
127
183
|
{
|
|
128
184
|
"name": "flux_fill_inpaint_example",
|
|
@@ -131,7 +187,10 @@
|
|
|
131
187
|
"mediaSubtype": "webp",
|
|
132
188
|
"description": "Fill missing parts of images using Flux inpainting.",
|
|
133
189
|
"thumbnailVariant": "compareSlider",
|
|
134
|
-
"tutorialUrl": "https://
|
|
190
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
191
|
+
"tags": ["Image to Image", "Inpaint", "Image"],
|
|
192
|
+
"models": ["Flux"],
|
|
193
|
+
"date": "2025-03-01"
|
|
135
194
|
},
|
|
136
195
|
{
|
|
137
196
|
"name": "flux_fill_outpaint_example",
|
|
@@ -140,7 +199,10 @@
|
|
|
140
199
|
"mediaSubtype": "webp",
|
|
141
200
|
"description": "Extend images beyond boundaries using Flux outpainting.",
|
|
142
201
|
"thumbnailVariant": "compareSlider",
|
|
143
|
-
"tutorialUrl": "https://
|
|
202
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
203
|
+
"tags": ["Outpaint", "Image", "Image to Image"],
|
|
204
|
+
"models": ["Flux"],
|
|
205
|
+
"date": "2025-03-01"
|
|
144
206
|
},
|
|
145
207
|
{
|
|
146
208
|
"name": "flux_canny_model_example",
|
|
@@ -149,7 +211,10 @@
|
|
|
149
211
|
"mediaSubtype": "webp",
|
|
150
212
|
"description": "Generate images guided by edge detection using Flux Canny.",
|
|
151
213
|
"thumbnailVariant": "hoverDissolve",
|
|
152
|
-
"tutorialUrl": "https://
|
|
214
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
215
|
+
"tags": ["Image to Image", "ControlNet", "Image"],
|
|
216
|
+
"models": ["Flux"],
|
|
217
|
+
"date": "2025-03-01"
|
|
153
218
|
},
|
|
154
219
|
{
|
|
155
220
|
"name": "flux_depth_lora_example",
|
|
@@ -158,7 +223,10 @@
|
|
|
158
223
|
"mediaSubtype": "webp",
|
|
159
224
|
"description": "Generate images guided by depth information using Flux LoRA.",
|
|
160
225
|
"thumbnailVariant": "hoverDissolve",
|
|
161
|
-
"tutorialUrl": "
|
|
226
|
+
"tutorialUrl": "ttps://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
227
|
+
"tags": ["Image to Image", "ControlNet", "Image", "LoRA"],
|
|
228
|
+
"models": ["Flux"],
|
|
229
|
+
"date": "2025-03-01"
|
|
162
230
|
},
|
|
163
231
|
{
|
|
164
232
|
"name": "flux_redux_model_example",
|
|
@@ -166,7 +234,10 @@
|
|
|
166
234
|
"mediaType": "image",
|
|
167
235
|
"mediaSubtype": "webp",
|
|
168
236
|
"description": "Generate images by transferring style from reference images using Flux Redux.",
|
|
169
|
-
"tutorialUrl": "https://
|
|
237
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
238
|
+
"tags": ["Image to Image", "ControlNet", "Image", "LoRA"],
|
|
239
|
+
"models": ["Flux"],
|
|
240
|
+
"date": "2025-03-01"
|
|
170
241
|
}
|
|
171
242
|
]
|
|
172
243
|
},
|
|
@@ -181,7 +252,10 @@
|
|
|
181
252
|
"mediaType": "image",
|
|
182
253
|
"mediaSubtype": "webp",
|
|
183
254
|
"description": "Generate high-quality images from text prompts using OmniGen2's unified 7B multimodal model with dual-path architecture.",
|
|
184
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2"
|
|
255
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
256
|
+
"tags": ["Text to Image", "Image"],
|
|
257
|
+
"models": ["OmniGen"],
|
|
258
|
+
"date": "2025-06-30"
|
|
185
259
|
},
|
|
186
260
|
{
|
|
187
261
|
"name": "image_omnigen2_image_edit",
|
|
@@ -190,7 +264,10 @@
|
|
|
190
264
|
"mediaSubtype": "webp",
|
|
191
265
|
"thumbnailVariant": "hoverDissolve",
|
|
192
266
|
"description": "Edit images with natural language instructions using OmniGen2's advanced image editing capabilities and text rendering support.",
|
|
193
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2"
|
|
267
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
268
|
+
"tags": ["Image Edit", "Image"],
|
|
269
|
+
"models": ["OmniGen"],
|
|
270
|
+
"date": "2025-06-30"
|
|
194
271
|
},
|
|
195
272
|
{
|
|
196
273
|
"name": "image_cosmos_predict2_2B_t2i",
|
|
@@ -198,35 +275,53 @@
|
|
|
198
275
|
"mediaType": "image",
|
|
199
276
|
"mediaSubtype": "webp",
|
|
200
277
|
"description": "Generate images with Cosmos-Predict2 2B T2I, delivering physically accurate, high-fidelity, and detail-rich image generation.",
|
|
201
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/
|
|
278
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/cosmos/cosmos-predict2-t2i",
|
|
279
|
+
"tags": ["Text to Image", "Image"],
|
|
280
|
+
"models": ["Cosmos"],
|
|
281
|
+
"date": "2025-06-16"
|
|
202
282
|
},
|
|
203
283
|
{
|
|
204
284
|
"name": "image_chroma_text_to_image",
|
|
205
285
|
"title": "Chroma text to image",
|
|
206
286
|
"mediaType": "image",
|
|
207
287
|
"mediaSubtype": "webp",
|
|
208
|
-
"description": "Chroma is modified from flux and has some changes in the architecture."
|
|
288
|
+
"description": "Chroma is modified from flux and has some changes in the architecture.",
|
|
289
|
+
"tags": ["Text to Image", "Image"],
|
|
290
|
+
"models": ["Chroma", "Flux"],
|
|
291
|
+
"date": "2025-06-04"
|
|
209
292
|
},
|
|
210
293
|
{
|
|
211
294
|
"name": "hidream_i1_dev",
|
|
212
295
|
"title": "HiDream I1 Dev",
|
|
213
296
|
"mediaType": "image",
|
|
214
297
|
"mediaSubtype": "webp",
|
|
215
|
-
"description": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware."
|
|
298
|
+
"description": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware.",
|
|
299
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
300
|
+
"tags": ["Text to Image", "Image"],
|
|
301
|
+
"models": ["HiDream"],
|
|
302
|
+
"date": "2025-04-17"
|
|
216
303
|
},
|
|
217
304
|
{
|
|
218
305
|
"name": "hidream_i1_fast",
|
|
219
306
|
"title": "HiDream I1 Fast",
|
|
220
307
|
"mediaType": "image",
|
|
221
308
|
"mediaSubtype": "webp",
|
|
222
|
-
"description": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware."
|
|
309
|
+
"description": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware.",
|
|
310
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
311
|
+
"tags": ["Text to Image", "Image"],
|
|
312
|
+
"models": ["HiDream"],
|
|
313
|
+
"date": "2025-04-17"
|
|
223
314
|
},
|
|
224
315
|
{
|
|
225
316
|
"name": "hidream_i1_full",
|
|
226
317
|
"title": "HiDream I1 Full",
|
|
227
318
|
"mediaType": "image",
|
|
228
319
|
"mediaSubtype": "webp",
|
|
229
|
-
"description": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output."
|
|
320
|
+
"description": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output.",
|
|
321
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
322
|
+
"tags": ["Text to Image", "Image"],
|
|
323
|
+
"models": ["HiDream"],
|
|
324
|
+
"date": "2025-04-17"
|
|
230
325
|
},
|
|
231
326
|
{
|
|
232
327
|
"name": "hidream_e1_1",
|
|
@@ -234,7 +329,11 @@
|
|
|
234
329
|
"mediaType": "image",
|
|
235
330
|
"mediaSubtype": "webp",
|
|
236
331
|
"thumbnailVariant": "compareSlider",
|
|
237
|
-
"description": "Edit images with HiDream E1.1 – it’s better in image quality and editing accuracy than HiDream-E1-Full."
|
|
332
|
+
"description": "Edit images with HiDream E1.1 – it’s better in image quality and editing accuracy than HiDream-E1-Full.",
|
|
333
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
334
|
+
"tags": ["Image Edit", "Image"],
|
|
335
|
+
"models": ["HiDream"],
|
|
336
|
+
"date": "2025-07-21"
|
|
238
337
|
},
|
|
239
338
|
{
|
|
240
339
|
"name": "hidream_e1_full",
|
|
@@ -242,7 +341,11 @@
|
|
|
242
341
|
"mediaType": "image",
|
|
243
342
|
"mediaSubtype": "webp",
|
|
244
343
|
"thumbnailVariant": "compareSlider",
|
|
245
|
-
"description": "Edit images with HiDream E1 - Professional natural language image editing model."
|
|
344
|
+
"description": "Edit images with HiDream E1 - Professional natural language image editing model.",
|
|
345
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
346
|
+
"tags": ["Image Edit", "Image"],
|
|
347
|
+
"models": ["HiDream"],
|
|
348
|
+
"date": "2025-05-01"
|
|
246
349
|
},
|
|
247
350
|
{
|
|
248
351
|
"name": "sd3.5_simple_example",
|
|
@@ -250,7 +353,10 @@
|
|
|
250
353
|
"mediaType": "image",
|
|
251
354
|
"mediaSubtype": "webp",
|
|
252
355
|
"description": "Generate images using SD 3.5.",
|
|
253
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35"
|
|
356
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
|
|
357
|
+
"tags": ["Text to Image", "Image"],
|
|
358
|
+
"models": ["SD3.5"],
|
|
359
|
+
"date": "2025-03-01"
|
|
254
360
|
},
|
|
255
361
|
{
|
|
256
362
|
"name": "sd3.5_large_canny_controlnet_example",
|
|
@@ -259,7 +365,10 @@
|
|
|
259
365
|
"mediaSubtype": "webp",
|
|
260
366
|
"description": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
|
|
261
367
|
"thumbnailVariant": "hoverDissolve",
|
|
262
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
|
|
368
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
369
|
+
"tags": ["Image to Image", "Image", "ControlNet"],
|
|
370
|
+
"models": ["SD3.5"],
|
|
371
|
+
"date": "2025-03-01"
|
|
263
372
|
},
|
|
264
373
|
{
|
|
265
374
|
"name": "sd3.5_large_depth",
|
|
@@ -268,7 +377,10 @@
|
|
|
268
377
|
"mediaSubtype": "webp",
|
|
269
378
|
"description": "Generate images guided by depth information using SD 3.5.",
|
|
270
379
|
"thumbnailVariant": "hoverDissolve",
|
|
271
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
|
|
380
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
381
|
+
"tags": ["Image to Image", "Image", "ControlNet"],
|
|
382
|
+
"models": ["SD3.5"],
|
|
383
|
+
"date": "2025-03-01"
|
|
272
384
|
},
|
|
273
385
|
{
|
|
274
386
|
"name": "sd3.5_large_blur",
|
|
@@ -277,7 +389,10 @@
|
|
|
277
389
|
"mediaSubtype": "webp",
|
|
278
390
|
"description": "Generate images guided by blurred reference images using SD 3.5.",
|
|
279
391
|
"thumbnailVariant": "hoverDissolve",
|
|
280
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
|
|
392
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
393
|
+
"tags": ["Image to Image", "Image"],
|
|
394
|
+
"models": ["SD3.5"],
|
|
395
|
+
"date": "2025-03-01"
|
|
281
396
|
},
|
|
282
397
|
{
|
|
283
398
|
"name": "sdxl_simple_example",
|
|
@@ -285,7 +400,10 @@
|
|
|
285
400
|
"mediaType": "image",
|
|
286
401
|
"mediaSubtype": "webp",
|
|
287
402
|
"description": "Generate high-quality images using SDXL.",
|
|
288
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
|
|
403
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
404
|
+
"tags": ["Text to Image", "Image"],
|
|
405
|
+
"models": ["SDXL"],
|
|
406
|
+
"date": "2025-03-01"
|
|
289
407
|
},
|
|
290
408
|
{
|
|
291
409
|
"name": "sdxl_refiner_prompt_example",
|
|
@@ -293,7 +411,10 @@
|
|
|
293
411
|
"mediaType": "image",
|
|
294
412
|
"mediaSubtype": "webp",
|
|
295
413
|
"description": "Enhance SDXL images using refiner models.",
|
|
296
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
|
|
414
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
415
|
+
"tags": ["Text to Image", "Image"],
|
|
416
|
+
"models": ["SDXL"],
|
|
417
|
+
"date": "2025-03-01"
|
|
297
418
|
},
|
|
298
419
|
{
|
|
299
420
|
"name": "sdxl_revision_text_prompts",
|
|
@@ -301,7 +422,10 @@
|
|
|
301
422
|
"mediaType": "image",
|
|
302
423
|
"mediaSubtype": "webp",
|
|
303
424
|
"description": "Generate images by transferring concepts from reference images using SDXL Revision.",
|
|
304
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
|
|
425
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
426
|
+
"tags": ["Text to Image", "Image"],
|
|
427
|
+
"models": ["SDXL"],
|
|
428
|
+
"date": "2025-03-01"
|
|
305
429
|
},
|
|
306
430
|
{
|
|
307
431
|
"name": "sdxl_revision_zero_positive",
|
|
@@ -309,7 +433,10 @@
|
|
|
309
433
|
"mediaType": "image",
|
|
310
434
|
"mediaSubtype": "webp",
|
|
311
435
|
"description": "Generate images using both text prompts and reference images with SDXL Revision.",
|
|
312
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
|
|
436
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
437
|
+
"tags": ["Text to Image", "Image"],
|
|
438
|
+
"models": ["SDXL"],
|
|
439
|
+
"date": "2025-03-01"
|
|
313
440
|
},
|
|
314
441
|
{
|
|
315
442
|
"name": "sdxlturbo_example",
|
|
@@ -317,7 +444,10 @@
|
|
|
317
444
|
"mediaType": "image",
|
|
318
445
|
"mediaSubtype": "webp",
|
|
319
446
|
"description": "Generate images in a single step using SDXL Turbo.",
|
|
320
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/"
|
|
447
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
|
|
448
|
+
"tags": ["Text to Image", "Image"],
|
|
449
|
+
"models": ["SDXL Turbo"],
|
|
450
|
+
"date": "2025-03-01"
|
|
321
451
|
},
|
|
322
452
|
{
|
|
323
453
|
"name": "image_lotus_depth_v1_1",
|
|
@@ -325,7 +455,10 @@
|
|
|
325
455
|
"mediaType": "image",
|
|
326
456
|
"mediaSubtype": "webp",
|
|
327
457
|
"thumbnailVariant": "compareSlider",
|
|
328
|
-
"description": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention."
|
|
458
|
+
"description": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention.",
|
|
459
|
+
"tags": ["Depth", "Image"],
|
|
460
|
+
"models": ["SD1.5"],
|
|
461
|
+
"date": "2025-05-21"
|
|
329
462
|
}
|
|
330
463
|
]
|
|
331
464
|
},
|
|
@@ -340,7 +473,10 @@
|
|
|
340
473
|
"description": "Generate high-quality videos from text prompts with cinematic aesthetic control and dynamic motion generation using Wan 2.2.",
|
|
341
474
|
"mediaType": "image",
|
|
342
475
|
"mediaSubtype": "webp",
|
|
343
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2"
|
|
476
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
477
|
+
"tags": ["Text to Video", "Video"],
|
|
478
|
+
"models": ["Wan"],
|
|
479
|
+
"date": "2025-07-29"
|
|
344
480
|
},
|
|
345
481
|
{
|
|
346
482
|
"name": "video_wan2_2_14B_i2v",
|
|
@@ -349,7 +485,10 @@
|
|
|
349
485
|
"mediaType": "image",
|
|
350
486
|
"mediaSubtype": "webp",
|
|
351
487
|
"thumbnailVariant": "hoverDissolve",
|
|
352
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2"
|
|
488
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
489
|
+
"tags": ["Text to Video", "Video"],
|
|
490
|
+
"models": ["Wan2.2"],
|
|
491
|
+
"date": "2025-07-29"
|
|
353
492
|
},
|
|
354
493
|
{
|
|
355
494
|
"name": "video_wan2_2_5B_ti2v",
|
|
@@ -357,7 +496,10 @@
|
|
|
357
496
|
"description": "Generate videos from text or images using Wan 2.2 5B hybrid model",
|
|
358
497
|
"mediaType": "image",
|
|
359
498
|
"mediaSubtype": "webp",
|
|
360
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2"
|
|
499
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
500
|
+
"tags": ["Text to Video", "Video"],
|
|
501
|
+
"models": ["Wan2.2"],
|
|
502
|
+
"date": "2025-07-29"
|
|
361
503
|
},
|
|
362
504
|
{
|
|
363
505
|
"name": "video_wan_vace_14B_t2v",
|
|
@@ -365,7 +507,10 @@
|
|
|
365
507
|
"description": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
|
|
366
508
|
"mediaType": "image",
|
|
367
509
|
"mediaSubtype": "webp",
|
|
368
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
|
|
510
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
511
|
+
"tags": ["Text to Video", "Video"],
|
|
512
|
+
"models": ["Wan2.1"],
|
|
513
|
+
"date": "2025-05-21"
|
|
369
514
|
},
|
|
370
515
|
{
|
|
371
516
|
"name": "video_wan_vace_14B_ref2v",
|
|
@@ -373,7 +518,10 @@
|
|
|
373
518
|
"description": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
|
|
374
519
|
"mediaType": "image",
|
|
375
520
|
"mediaSubtype": "webp",
|
|
376
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
|
|
521
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
522
|
+
"tags": ["Reference to Video", "Video"],
|
|
523
|
+
"models": ["Wan2.1"],
|
|
524
|
+
"date": "2025-05-21"
|
|
377
525
|
},
|
|
378
526
|
{
|
|
379
527
|
"name": "video_wan_vace_14B_v2v",
|
|
@@ -382,7 +530,10 @@
|
|
|
382
530
|
"mediaType": "image",
|
|
383
531
|
"mediaSubtype": "webp",
|
|
384
532
|
"thumbnailVariant": "compareSlider",
|
|
385
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
|
|
533
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
534
|
+
"tags": ["Video to Video", "Video"],
|
|
535
|
+
"models": ["Wan2.1"],
|
|
536
|
+
"date": "2025-05-21"
|
|
386
537
|
},
|
|
387
538
|
{
|
|
388
539
|
"name": "video_wan_vace_outpainting",
|
|
@@ -391,7 +542,10 @@
|
|
|
391
542
|
"mediaType": "image",
|
|
392
543
|
"mediaSubtype": "webp",
|
|
393
544
|
"thumbnailVariant": "compareSlider",
|
|
394
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
|
|
545
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
546
|
+
"tags": ["Outpainting", "Video"],
|
|
547
|
+
"models": ["Wan2.1"],
|
|
548
|
+
"date": "2025-05-21"
|
|
395
549
|
},
|
|
396
550
|
{
|
|
397
551
|
"name": "video_wan_vace_flf2v",
|
|
@@ -399,7 +553,10 @@
|
|
|
399
553
|
"description": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
|
|
400
554
|
"mediaType": "image",
|
|
401
555
|
"mediaSubtype": "webp",
|
|
402
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
|
|
556
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
557
|
+
"tags": ["FLF2V", "Video"],
|
|
558
|
+
"models": ["Wan2.1"],
|
|
559
|
+
"date": "2025-05-21"
|
|
403
560
|
},
|
|
404
561
|
{
|
|
405
562
|
"name": "video_wan_vace_inpainting",
|
|
@@ -408,7 +565,10 @@
|
|
|
408
565
|
"mediaType": "image",
|
|
409
566
|
"mediaSubtype": "webp",
|
|
410
567
|
"thumbnailVariant": "compareSlider",
|
|
411
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
|
|
568
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
569
|
+
"tags": ["Inpainting", "Video"],
|
|
570
|
+
"models": ["Wan2.1"],
|
|
571
|
+
"date": "2025-05-21"
|
|
412
572
|
},
|
|
413
573
|
{
|
|
414
574
|
"name": "video_wan_ati",
|
|
@@ -417,21 +577,32 @@
|
|
|
417
577
|
"mediaType": "image",
|
|
418
578
|
"mediaSubtype": "webp",
|
|
419
579
|
"thumbnailVariant": "hoverDissolve",
|
|
420
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati"
|
|
580
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
|
|
581
|
+
"tags": ["Video"],
|
|
582
|
+
"models": ["Wan2.1"],
|
|
583
|
+
"date": "2025-05-21"
|
|
421
584
|
},
|
|
422
585
|
{
|
|
423
586
|
"name": "video_wan2.1_fun_camera_v1.1_1.3B",
|
|
424
587
|
"title": "Wan 2.1 Fun Camera 1.3B",
|
|
425
588
|
"description": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
|
|
426
589
|
"mediaType": "image",
|
|
427
|
-
"mediaSubtype": "webp"
|
|
590
|
+
"mediaSubtype": "webp",
|
|
591
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
592
|
+
"tags": ["Video"],
|
|
593
|
+
"models": ["Wan2.1"],
|
|
594
|
+
"date": "2025-04-15"
|
|
428
595
|
},
|
|
429
596
|
{
|
|
430
597
|
"name": "video_wan2.1_fun_camera_v1.1_14B",
|
|
431
598
|
"title": "Wan 2.1 Fun Camera 14B",
|
|
432
599
|
"description": "Generate high-quality videos with advanced camera control using the full 14B model",
|
|
433
600
|
"mediaType": "image",
|
|
434
|
-
"mediaSubtype": "webp"
|
|
601
|
+
"mediaSubtype": "webp",
|
|
602
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
603
|
+
"tags": ["Video"],
|
|
604
|
+
"models": ["Wan2.1"],
|
|
605
|
+
"date": "2025-04-15"
|
|
435
606
|
},
|
|
436
607
|
{
|
|
437
608
|
"name": "text_to_video_wan",
|
|
@@ -439,7 +610,10 @@
|
|
|
439
610
|
"description": "Generate videos from text prompts using Wan 2.1.",
|
|
440
611
|
"mediaType": "image",
|
|
441
612
|
"mediaSubtype": "webp",
|
|
442
|
-
"tutorialUrl": "https://
|
|
613
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
614
|
+
"tags": ["Text to Video", "Video"],
|
|
615
|
+
"models": ["Wan2.1"],
|
|
616
|
+
"date": "2025-03-01"
|
|
443
617
|
},
|
|
444
618
|
{
|
|
445
619
|
"name": "image_to_video_wan",
|
|
@@ -447,7 +621,10 @@
|
|
|
447
621
|
"description": "Generate videos from images using Wan 2.1.",
|
|
448
622
|
"mediaType": "image",
|
|
449
623
|
"mediaSubtype": "webp",
|
|
450
|
-
"tutorialUrl": "https://
|
|
624
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
625
|
+
"tags": ["Text to Video", "Video"],
|
|
626
|
+
"models": ["Wan2.1"],
|
|
627
|
+
"date": "2025-03-01"
|
|
451
628
|
},
|
|
452
629
|
{
|
|
453
630
|
"name": "wan2.1_fun_inp",
|
|
@@ -455,7 +632,10 @@
|
|
|
455
632
|
"description": "Generate videos from start and end frames using Wan 2.1 inpainting.",
|
|
456
633
|
"mediaType": "image",
|
|
457
634
|
"mediaSubtype": "webp",
|
|
458
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp"
|
|
635
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
|
|
636
|
+
"tags": ["Inpaint", "Video"],
|
|
637
|
+
"models": ["Wan2.1"],
|
|
638
|
+
"date": "2025-04-15"
|
|
459
639
|
},
|
|
460
640
|
{
|
|
461
641
|
"name": "wan2.1_fun_control",
|
|
@@ -464,7 +644,10 @@
|
|
|
464
644
|
"mediaType": "image",
|
|
465
645
|
"mediaSubtype": "webp",
|
|
466
646
|
"thumbnailVariant": "hoverDissolve",
|
|
467
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control"
|
|
647
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
648
|
+
"tags": ["Video to Video", "Video"],
|
|
649
|
+
"models": ["Wan2.1"],
|
|
650
|
+
"date": "2025-04-15"
|
|
468
651
|
},
|
|
469
652
|
{
|
|
470
653
|
"name": "wan2.1_flf2v_720_f16",
|
|
@@ -472,7 +655,10 @@
|
|
|
472
655
|
"description": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
|
|
473
656
|
"mediaType": "image",
|
|
474
657
|
"mediaSubtype": "webp",
|
|
475
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf"
|
|
658
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
|
|
659
|
+
"tags": ["FLF2V", "Video"],
|
|
660
|
+
"models": ["Wan2.1"],
|
|
661
|
+
"date": "2025-04-15"
|
|
476
662
|
},
|
|
477
663
|
{
|
|
478
664
|
"name": "video_cosmos_predict2_2B_video2world_480p_16fps",
|
|
@@ -480,7 +666,10 @@
|
|
|
480
666
|
"description": "Generate videos with Cosmos-Predict2 2B Video2World, generating physically accurate, high-fidelity, and consistent video simulations.",
|
|
481
667
|
"mediaType": "image",
|
|
482
668
|
"mediaSubtype": "webp",
|
|
483
|
-
"tutorialUrl": "http://docs.comfy.org/tutorials/video/cosmos/cosmos-predict2-video2world"
|
|
669
|
+
"tutorialUrl": "http://docs.comfy.org/tutorials/video/cosmos/cosmos-predict2-video2world",
|
|
670
|
+
"tags": ["Video2World", "Video"],
|
|
671
|
+
"models": ["Cosmos"],
|
|
672
|
+
"date": "2025-06-16"
|
|
484
673
|
},
|
|
485
674
|
{
|
|
486
675
|
"name": "ltxv_text_to_video",
|
|
@@ -488,7 +677,10 @@
|
|
|
488
677
|
"mediaType": "image",
|
|
489
678
|
"mediaSubtype": "webp",
|
|
490
679
|
"description": "Generate videos from text prompts.",
|
|
491
|
-
"tutorialUrl": "https://
|
|
680
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
681
|
+
"tags": ["Text to Video", "Video"],
|
|
682
|
+
"models": ["LTXV"],
|
|
683
|
+
"date": "2025-03-01"
|
|
492
684
|
},
|
|
493
685
|
{
|
|
494
686
|
"name": "ltxv_image_to_video",
|
|
@@ -496,7 +688,10 @@
|
|
|
496
688
|
"mediaType": "image",
|
|
497
689
|
"mediaSubtype": "webp",
|
|
498
690
|
"description": "Generate videos from still images.",
|
|
499
|
-
"tutorialUrl": "https://
|
|
691
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
692
|
+
"tags": ["Image to Video", "Video"],
|
|
693
|
+
"models": ["LTXV"],
|
|
694
|
+
"date": "2025-03-01"
|
|
500
695
|
},
|
|
501
696
|
{
|
|
502
697
|
"name": "mochi_text_to_video_example",
|
|
@@ -504,7 +699,10 @@
|
|
|
504
699
|
"mediaType": "image",
|
|
505
700
|
"mediaSubtype": "webp",
|
|
506
701
|
"description": "Generate videos from text prompts using Mochi model.",
|
|
507
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/"
|
|
702
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/",
|
|
703
|
+
"tags": ["Text to Video", "Video"],
|
|
704
|
+
"models": ["Mochi"],
|
|
705
|
+
"date": "2025-03-01"
|
|
508
706
|
},
|
|
509
707
|
{
|
|
510
708
|
"name": "hunyuan_video_text_to_video",
|
|
@@ -512,7 +710,10 @@
|
|
|
512
710
|
"mediaType": "image",
|
|
513
711
|
"mediaSubtype": "webp",
|
|
514
712
|
"description": "Generate videos from text prompts using Hunyuan model.",
|
|
515
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/"
|
|
713
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
|
|
714
|
+
"tags": ["Text to Video", "Video"],
|
|
715
|
+
"models": ["Hunyuan Video"],
|
|
716
|
+
"date": "2025-03-01"
|
|
516
717
|
},
|
|
517
718
|
{
|
|
518
719
|
"name": "image_to_video",
|
|
@@ -520,7 +721,10 @@
|
|
|
520
721
|
"mediaType": "image",
|
|
521
722
|
"mediaSubtype": "webp",
|
|
522
723
|
"description": "Generate videos from still images.",
|
|
523
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video"
|
|
724
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
725
|
+
"tags": ["Image to Video", "Video"],
|
|
726
|
+
"models": ["SVD"],
|
|
727
|
+
"date": "2025-03-01"
|
|
524
728
|
},
|
|
525
729
|
{
|
|
526
730
|
"name": "txt_to_image_to_video",
|
|
@@ -528,7 +732,10 @@
|
|
|
528
732
|
"mediaType": "image",
|
|
529
733
|
"mediaSubtype": "webp",
|
|
530
734
|
"description": "Generate videos by first creating images from text prompts.",
|
|
531
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video"
|
|
735
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
736
|
+
"tags": ["Text to Video", "Video"],
|
|
737
|
+
"models": ["SVD"],
|
|
738
|
+
"date": "2025-03-01"
|
|
532
739
|
}
|
|
533
740
|
]
|
|
534
741
|
},
|
|
@@ -543,7 +750,11 @@
|
|
|
543
750
|
"description": "Input multiple images and edit them with Flux.1 Kontext.",
|
|
544
751
|
"mediaType": "image",
|
|
545
752
|
"mediaSubtype": "webp",
|
|
546
|
-
"thumbnailVariant": "compareSlider"
|
|
753
|
+
"thumbnailVariant": "compareSlider",
|
|
754
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
755
|
+
"tags": ["Image Edit", "Image"],
|
|
756
|
+
"models": ["Flux"],
|
|
757
|
+
"date": "2025-05-29"
|
|
547
758
|
},
|
|
548
759
|
{
|
|
549
760
|
"name": "api_bfl_flux_1_kontext_pro_image",
|
|
@@ -551,7 +762,11 @@
|
|
|
551
762
|
"description": "Edit images with Flux.1 Kontext pro image.",
|
|
552
763
|
"mediaType": "image",
|
|
553
764
|
"mediaSubtype": "webp",
|
|
554
|
-
"thumbnailVariant": "compareSlider"
|
|
765
|
+
"thumbnailVariant": "compareSlider",
|
|
766
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
767
|
+
"tags": ["Image Edit", "Image"],
|
|
768
|
+
"models": ["Flux"],
|
|
769
|
+
"date": "2025-05-29"
|
|
555
770
|
},
|
|
556
771
|
{
|
|
557
772
|
"name": "api_bfl_flux_1_kontext_max_image",
|
|
@@ -559,14 +774,22 @@
|
|
|
559
774
|
"description": "Edit images with Flux.1 Kontext max image.",
|
|
560
775
|
"mediaType": "image",
|
|
561
776
|
"mediaSubtype": "webp",
|
|
562
|
-
"thumbnailVariant": "compareSlider"
|
|
777
|
+
"thumbnailVariant": "compareSlider",
|
|
778
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
779
|
+
"tags": ["Image Edit", "Image"],
|
|
780
|
+
"models": ["Flux"],
|
|
781
|
+
"date": "2025-05-29"
|
|
563
782
|
},
|
|
564
783
|
{
|
|
565
784
|
"name": "api_bfl_flux_pro_t2i",
|
|
566
785
|
"title": "BFL Flux[Pro]: Text to Image",
|
|
567
786
|
"description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
|
|
568
787
|
"mediaType": "image",
|
|
569
|
-
"mediaSubtype": "webp"
|
|
788
|
+
"mediaSubtype": "webp",
|
|
789
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
|
|
790
|
+
"tags": ["Image Edit", "Image"],
|
|
791
|
+
"models": ["Flux"],
|
|
792
|
+
"date": "2025-05-01"
|
|
570
793
|
},
|
|
571
794
|
{
|
|
572
795
|
"name": "api_luma_photon_i2i",
|
|
@@ -574,7 +797,10 @@
|
|
|
574
797
|
"description": "Guide image generation using a combination of images and prompt.",
|
|
575
798
|
"mediaType": "image",
|
|
576
799
|
"mediaSubtype": "webp",
|
|
577
|
-
"thumbnailVariant": "compareSlider"
|
|
800
|
+
"thumbnailVariant": "compareSlider",
|
|
801
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
802
|
+
"models": ["Luma Photon"],
|
|
803
|
+
"date": "2025-03-01"
|
|
578
804
|
},
|
|
579
805
|
{
|
|
580
806
|
"name": "api_luma_photon_style_ref",
|
|
@@ -582,35 +808,50 @@
|
|
|
582
808
|
"description": "Generate images by blending style references with precise control using Luma Photon.",
|
|
583
809
|
"mediaType": "image",
|
|
584
810
|
"mediaSubtype": "webp",
|
|
585
|
-
"thumbnailVariant": "compareSlider"
|
|
811
|
+
"thumbnailVariant": "compareSlider",
|
|
812
|
+
"tags": ["Text to Image", "Image", "API", "Style Transfer"],
|
|
813
|
+
"models": ["Luma Photon"],
|
|
814
|
+
"date": "2025-03-01"
|
|
586
815
|
},
|
|
587
816
|
{
|
|
588
817
|
"name": "api_recraft_image_gen_with_color_control",
|
|
589
818
|
"title": "Recraft: Color Control Image Generation",
|
|
590
819
|
"description": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
|
|
591
820
|
"mediaType": "image",
|
|
592
|
-
"mediaSubtype": "webp"
|
|
821
|
+
"mediaSubtype": "webp",
|
|
822
|
+
"tags": ["Text to Image", "Image", "API", "Color Control"],
|
|
823
|
+
"models": ["Recraft"],
|
|
824
|
+
"date": "2025-03-01"
|
|
593
825
|
},
|
|
594
826
|
{
|
|
595
827
|
"name": "api_recraft_image_gen_with_style_control",
|
|
596
828
|
"title": "Recraft: Style Control Image Generation",
|
|
597
829
|
"description": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.",
|
|
598
830
|
"mediaType": "image",
|
|
599
|
-
"mediaSubtype": "webp"
|
|
831
|
+
"mediaSubtype": "webp",
|
|
832
|
+
"tags": ["Text to Image", "Image", "API", "Style Control"],
|
|
833
|
+
"models": ["Recraft"],
|
|
834
|
+
"date": "2025-03-01"
|
|
600
835
|
},
|
|
601
836
|
{
|
|
602
837
|
"name": "api_recraft_vector_gen",
|
|
603
838
|
"title": "Recraft: Vector Generation",
|
|
604
839
|
"description": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
|
|
605
840
|
"mediaType": "image",
|
|
606
|
-
"mediaSubtype": "webp"
|
|
841
|
+
"mediaSubtype": "webp",
|
|
842
|
+
"tags": ["Text to Image", "Image", "API", "Vector"],
|
|
843
|
+
"models": ["Recraft"],
|
|
844
|
+
"date": "2025-03-01"
|
|
607
845
|
},
|
|
608
846
|
{
|
|
609
847
|
"name": "api_runway_text_to_image",
|
|
610
848
|
"title": "Runway: Text to Image",
|
|
611
849
|
"description": "Generate high-quality images from text prompts using Runway's AI model.",
|
|
612
850
|
"mediaType": "image",
|
|
613
|
-
"mediaSubtype": "webp"
|
|
851
|
+
"mediaSubtype": "webp",
|
|
852
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
853
|
+
"models": ["Runway"],
|
|
854
|
+
"date": "2025-03-01"
|
|
614
855
|
},
|
|
615
856
|
{
|
|
616
857
|
"name": "api_runway_reference_to_image",
|
|
@@ -618,14 +859,20 @@
|
|
|
618
859
|
"description": "Generate new images based on reference styles and compositions with Runway's AI.",
|
|
619
860
|
"mediaType": "image",
|
|
620
861
|
"thumbnailVariant": "compareSlider",
|
|
621
|
-
"mediaSubtype": "webp"
|
|
862
|
+
"mediaSubtype": "webp",
|
|
863
|
+
"tags": ["Image to Image", "Image", "API", "Style Transfer"],
|
|
864
|
+
"models": ["Runway"],
|
|
865
|
+
"date": "2025-03-01"
|
|
622
866
|
},
|
|
623
867
|
{
|
|
624
868
|
"name": "api_stability_ai_stable_image_ultra_t2i",
|
|
625
869
|
"title": "Stability AI: Stable Image Ultra Text to Image",
|
|
626
870
|
"description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
|
|
627
871
|
"mediaType": "image",
|
|
628
|
-
"mediaSubtype": "webp"
|
|
872
|
+
"mediaSubtype": "webp",
|
|
873
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
874
|
+
"models": ["Stable Image Ultra"],
|
|
875
|
+
"date": "2025-03-01"
|
|
629
876
|
},
|
|
630
877
|
{
|
|
631
878
|
"name": "api_stability_ai_i2i",
|
|
@@ -633,14 +880,20 @@
|
|
|
633
880
|
"description": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
|
|
634
881
|
"mediaType": "image",
|
|
635
882
|
"thumbnailVariant": "compareSlider",
|
|
636
|
-
"mediaSubtype": "webp"
|
|
883
|
+
"mediaSubtype": "webp",
|
|
884
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
885
|
+
"models": ["Stability AI"],
|
|
886
|
+
"date": "2025-03-01"
|
|
637
887
|
},
|
|
638
888
|
{
|
|
639
889
|
"name": "api_stability_ai_sd3.5_t2i",
|
|
640
890
|
"title": "Stability AI: SD3.5 Text to Image",
|
|
641
891
|
"description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
|
|
642
892
|
"mediaType": "image",
|
|
643
|
-
"mediaSubtype": "webp"
|
|
893
|
+
"mediaSubtype": "webp",
|
|
894
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
895
|
+
"models": ["SD3.5"],
|
|
896
|
+
"date": "2025-03-01"
|
|
644
897
|
},
|
|
645
898
|
{
|
|
646
899
|
"name": "api_stability_ai_sd3.5_i2i",
|
|
@@ -648,14 +901,20 @@
|
|
|
648
901
|
"description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
|
|
649
902
|
"mediaType": "image",
|
|
650
903
|
"thumbnailVariant": "compareSlider",
|
|
651
|
-
"mediaSubtype": "webp"
|
|
904
|
+
"mediaSubtype": "webp",
|
|
905
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
906
|
+
"models": ["SD3.5"],
|
|
907
|
+
"date": "2025-03-01"
|
|
652
908
|
},
|
|
653
909
|
{
|
|
654
910
|
"name": "api_ideogram_v3_t2i",
|
|
655
911
|
"title": "Ideogram V3: Text to Image",
|
|
656
912
|
"description": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
|
|
657
913
|
"mediaType": "image",
|
|
658
|
-
"mediaSubtype": "webp"
|
|
914
|
+
"mediaSubtype": "webp",
|
|
915
|
+
"tags": ["Text to Image", "Image", "API", "Text Rendering"],
|
|
916
|
+
"models": ["Ideogram V3"],
|
|
917
|
+
"date": "2025-03-01"
|
|
659
918
|
},
|
|
660
919
|
{
|
|
661
920
|
"name": "api_openai_image_1_t2i",
|
|
@@ -663,6 +922,9 @@
|
|
|
663
922
|
"description": "Generate images from text prompts using OpenAI GPT Image 1 API.",
|
|
664
923
|
"mediaType": "image",
|
|
665
924
|
"mediaSubtype": "webp",
|
|
925
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
926
|
+
"models": ["GPT-Image-1"],
|
|
927
|
+
"date": "2025-03-01",
|
|
666
928
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
|
|
667
929
|
},
|
|
668
930
|
{
|
|
@@ -672,6 +934,9 @@
|
|
|
672
934
|
"mediaType": "image",
|
|
673
935
|
"mediaSubtype": "webp",
|
|
674
936
|
"thumbnailVariant": "compareSlider",
|
|
937
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
938
|
+
"models": ["GPT-Image-1"],
|
|
939
|
+
"date": "2025-03-01",
|
|
675
940
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
|
|
676
941
|
},
|
|
677
942
|
{
|
|
@@ -681,6 +946,9 @@
|
|
|
681
946
|
"mediaType": "image",
|
|
682
947
|
"mediaSubtype": "webp",
|
|
683
948
|
"thumbnailVariant": "compareSlider",
|
|
949
|
+
"tags": ["Inpaint", "Image", "API"],
|
|
950
|
+
"models": ["GPT-Image-1"],
|
|
951
|
+
"date": "2025-03-01",
|
|
684
952
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
|
|
685
953
|
},
|
|
686
954
|
{
|
|
@@ -690,6 +958,9 @@
|
|
|
690
958
|
"mediaType": "image",
|
|
691
959
|
"mediaSubtype": "webp",
|
|
692
960
|
"thumbnailVariant": "compareSlider",
|
|
961
|
+
"tags": ["Text to Image", "Image", "API", "Multi Input"],
|
|
962
|
+
"models": ["GPT-Image-1"],
|
|
963
|
+
"date": "2025-03-01",
|
|
693
964
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
|
|
694
965
|
},
|
|
695
966
|
{
|
|
@@ -698,6 +969,9 @@
|
|
|
698
969
|
"description": "Generate images from text prompts using OpenAI Dall-E 2 API.",
|
|
699
970
|
"mediaType": "image",
|
|
700
971
|
"mediaSubtype": "webp",
|
|
972
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
973
|
+
"models": ["Dall-E 2"],
|
|
974
|
+
"date": "2025-03-01",
|
|
701
975
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
|
|
702
976
|
},
|
|
703
977
|
{
|
|
@@ -707,6 +981,9 @@
|
|
|
707
981
|
"mediaType": "image",
|
|
708
982
|
"mediaSubtype": "webp",
|
|
709
983
|
"thumbnailVariant": "compareSlider",
|
|
984
|
+
"tags": ["Inpaint", "Image", "API"],
|
|
985
|
+
"models": ["Dall-E 2"],
|
|
986
|
+
"date": "2025-03-01",
|
|
710
987
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
|
|
711
988
|
},
|
|
712
989
|
{
|
|
@@ -715,6 +992,9 @@
|
|
|
715
992
|
"description": "Generate images from text prompts using OpenAI Dall-E 3 API.",
|
|
716
993
|
"mediaType": "image",
|
|
717
994
|
"mediaSubtype": "webp",
|
|
995
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
996
|
+
"models": ["Dall-E 3"],
|
|
997
|
+
"date": "2025-03-01",
|
|
718
998
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3"
|
|
719
999
|
}
|
|
720
1000
|
]
|
|
@@ -729,49 +1009,77 @@
|
|
|
729
1009
|
"title": "Kling: Image to Video",
|
|
730
1010
|
"description": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
|
|
731
1011
|
"mediaType": "image",
|
|
732
|
-
"mediaSubtype": "webp"
|
|
1012
|
+
"mediaSubtype": "webp",
|
|
1013
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1014
|
+
"models": ["Kling"],
|
|
1015
|
+
"date": "2025-03-01",
|
|
1016
|
+
"tutorialUrl": ""
|
|
733
1017
|
},
|
|
734
1018
|
{
|
|
735
1019
|
"name": "api_kling_effects",
|
|
736
1020
|
"title": "Kling: Video Effects",
|
|
737
1021
|
"description": "Generate dynamic videos by applying visual effects to images using Kling.",
|
|
738
1022
|
"mediaType": "image",
|
|
739
|
-
"mediaSubtype": "webp"
|
|
1023
|
+
"mediaSubtype": "webp",
|
|
1024
|
+
"tags": ["Video Effects", "Video", "API"],
|
|
1025
|
+
"models": ["Kling"],
|
|
1026
|
+
"date": "2025-03-01",
|
|
1027
|
+
"tutorialUrl": ""
|
|
740
1028
|
},
|
|
741
1029
|
{
|
|
742
1030
|
"name": "api_kling_flf",
|
|
743
1031
|
"title": "Kling: FLF2V",
|
|
744
1032
|
"description": "Generate videos through controlling the first and last frames.",
|
|
745
1033
|
"mediaType": "image",
|
|
746
|
-
"mediaSubtype": "webp"
|
|
1034
|
+
"mediaSubtype": "webp",
|
|
1035
|
+
"tags": ["Video Generation", "Video", "API", "Frame Control"],
|
|
1036
|
+
"models": ["Kling"],
|
|
1037
|
+
"date": "2025-03-01",
|
|
1038
|
+
"tutorialUrl": ""
|
|
747
1039
|
},
|
|
748
1040
|
{
|
|
749
1041
|
"name": "api_luma_i2v",
|
|
750
1042
|
"title": "Luma: Image to Video",
|
|
751
1043
|
"description": "Take static images and instantly create magical high quality animations.",
|
|
752
1044
|
"mediaType": "image",
|
|
753
|
-
"mediaSubtype": "webp"
|
|
1045
|
+
"mediaSubtype": "webp",
|
|
1046
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1047
|
+
"models": ["Luma"],
|
|
1048
|
+
"date": "2025-03-01",
|
|
1049
|
+
"tutorialUrl": ""
|
|
754
1050
|
},
|
|
755
1051
|
{
|
|
756
1052
|
"name": "api_luma_t2v",
|
|
757
1053
|
"title": "Luma: Text to Video",
|
|
758
1054
|
"description": "High-quality videos can be generated using simple prompts.",
|
|
759
1055
|
"mediaType": "image",
|
|
760
|
-
"mediaSubtype": "webp"
|
|
1056
|
+
"mediaSubtype": "webp",
|
|
1057
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
1058
|
+
"models": ["Luma"],
|
|
1059
|
+
"date": "2025-03-01",
|
|
1060
|
+
"tutorialUrl": ""
|
|
761
1061
|
},
|
|
762
1062
|
{
|
|
763
1063
|
"name": "api_moonvalley_text_to_video",
|
|
764
1064
|
"title": "Moonvalley: Text to Video",
|
|
765
1065
|
"description": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.",
|
|
766
1066
|
"mediaType": "image",
|
|
767
|
-
"mediaSubtype": "webp"
|
|
1067
|
+
"mediaSubtype": "webp",
|
|
1068
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
1069
|
+
"models": ["Moonvalley"],
|
|
1070
|
+
"date": "2025-03-01",
|
|
1071
|
+
"tutorialUrl": ""
|
|
768
1072
|
},
|
|
769
1073
|
{
|
|
770
1074
|
"name": "api_moonvalley_image_to_video",
|
|
771
1075
|
"title": "Moonvalley: Image to Video",
|
|
772
1076
|
"description": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.",
|
|
773
1077
|
"mediaType": "image",
|
|
774
|
-
"mediaSubtype": "webp"
|
|
1078
|
+
"mediaSubtype": "webp",
|
|
1079
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1080
|
+
"models": ["Moonvalley"],
|
|
1081
|
+
"date": "2025-03-01",
|
|
1082
|
+
"tutorialUrl": ""
|
|
775
1083
|
},
|
|
776
1084
|
{
|
|
777
1085
|
"name": "api_moonvalley_video_to_video_motion_transfer",
|
|
@@ -779,7 +1087,11 @@
|
|
|
779
1087
|
"description": "Apply motion from one video to another.",
|
|
780
1088
|
"mediaType": "image",
|
|
781
1089
|
"thumbnailVariant": "hoverDissolve",
|
|
782
|
-
"mediaSubtype": "webp"
|
|
1090
|
+
"mediaSubtype": "webp",
|
|
1091
|
+
"tags": ["Video to Video", "Video", "API", "Motion Transfer"],
|
|
1092
|
+
"models": ["Moonvalley"],
|
|
1093
|
+
"date": "2025-03-01",
|
|
1094
|
+
"tutorialUrl": ""
|
|
783
1095
|
},
|
|
784
1096
|
{
|
|
785
1097
|
"name": "api_moonvalley_video_to_video_pose_control",
|
|
@@ -787,84 +1099,132 @@
|
|
|
787
1099
|
"description": "Apply human pose and movement from one video to another.",
|
|
788
1100
|
"mediaType": "image",
|
|
789
1101
|
"thumbnailVariant": "hoverDissolve",
|
|
790
|
-
"mediaSubtype": "webp"
|
|
1102
|
+
"mediaSubtype": "webp",
|
|
1103
|
+
"tags": ["Video to Video", "Video", "API", "Pose Control"],
|
|
1104
|
+
"models": ["Moonvalley"],
|
|
1105
|
+
"date": "2025-03-01",
|
|
1106
|
+
"tutorialUrl": ""
|
|
791
1107
|
},
|
|
792
1108
|
{
|
|
793
1109
|
"name": "api_hailuo_minimax_t2v",
|
|
794
1110
|
"title": "MiniMax: Text to Video",
|
|
795
1111
|
"description": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
|
|
796
1112
|
"mediaType": "image",
|
|
797
|
-
"mediaSubtype": "webp"
|
|
1113
|
+
"mediaSubtype": "webp",
|
|
1114
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
1115
|
+
"models": ["MiniMax"],
|
|
1116
|
+
"date": "2025-03-01",
|
|
1117
|
+
"tutorialUrl": ""
|
|
798
1118
|
},
|
|
799
1119
|
{
|
|
800
1120
|
"name": "api_hailuo_minimax_i2v",
|
|
801
1121
|
"title": "MiniMax: Image to Video",
|
|
802
1122
|
"description": "Generate refined videos from images and text with CGI integration using MiniMax.",
|
|
803
1123
|
"mediaType": "image",
|
|
804
|
-
"mediaSubtype": "webp"
|
|
1124
|
+
"mediaSubtype": "webp",
|
|
1125
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1126
|
+
"models": ["MiniMax"],
|
|
1127
|
+
"date": "2025-03-01",
|
|
1128
|
+
"tutorialUrl": ""
|
|
805
1129
|
},
|
|
806
1130
|
{
|
|
807
1131
|
"name": "api_pixverse_i2v",
|
|
808
1132
|
"title": "PixVerse: Image to Video",
|
|
809
1133
|
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
|
810
1134
|
"mediaType": "image",
|
|
811
|
-
"mediaSubtype": "webp"
|
|
1135
|
+
"mediaSubtype": "webp",
|
|
1136
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1137
|
+
"models": ["PixVerse"],
|
|
1138
|
+
"date": "2025-03-01",
|
|
1139
|
+
"tutorialUrl": ""
|
|
812
1140
|
},
|
|
813
1141
|
{
|
|
814
1142
|
"name": "api_pixverse_template_i2v",
|
|
815
1143
|
"title": "PixVerse Templates: Image to Video",
|
|
816
1144
|
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
|
817
1145
|
"mediaType": "image",
|
|
818
|
-
"mediaSubtype": "webp"
|
|
1146
|
+
"mediaSubtype": "webp",
|
|
1147
|
+
"tags": ["Image to Video", "Video", "API", "Templates"],
|
|
1148
|
+
"models": ["PixVerse"],
|
|
1149
|
+
"date": "2025-03-01",
|
|
1150
|
+
"tutorialUrl": ""
|
|
819
1151
|
},
|
|
820
1152
|
{
|
|
821
1153
|
"name": "api_pixverse_t2v",
|
|
822
1154
|
"title": "PixVerse: Text to Video",
|
|
823
1155
|
"description": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
|
|
824
1156
|
"mediaType": "image",
|
|
825
|
-
"mediaSubtype": "webp"
|
|
1157
|
+
"mediaSubtype": "webp",
|
|
1158
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
1159
|
+
"models": ["PixVerse"],
|
|
1160
|
+
"date": "2025-03-01",
|
|
1161
|
+
"tutorialUrl": ""
|
|
826
1162
|
},
|
|
827
1163
|
{
|
|
828
1164
|
"name": "api_runway_gen3a_turbo_image_to_video",
|
|
829
1165
|
"title": "Runway: Gen3a Turbo Image to Video",
|
|
830
1166
|
"description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
|
|
831
1167
|
"mediaType": "image",
|
|
832
|
-
"mediaSubtype": "webp"
|
|
1168
|
+
"mediaSubtype": "webp",
|
|
1169
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1170
|
+
"models": ["Runway Gen3a Turbo"],
|
|
1171
|
+
"date": "2025-03-01",
|
|
1172
|
+
"tutorialUrl": ""
|
|
833
1173
|
},
|
|
834
1174
|
{
|
|
835
1175
|
"name": "api_runway_gen4_turo_image_to_video",
|
|
836
1176
|
"title": "Runway: Gen4 Turbo Image to Video",
|
|
837
1177
|
"description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
|
|
838
1178
|
"mediaType": "image",
|
|
839
|
-
"mediaSubtype": "webp"
|
|
1179
|
+
"mediaSubtype": "webp",
|
|
1180
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1181
|
+
"models": ["Runway Gen4 Turbo"],
|
|
1182
|
+
"date": "2025-03-01",
|
|
1183
|
+
"tutorialUrl": ""
|
|
840
1184
|
},
|
|
841
1185
|
{
|
|
842
1186
|
"name": "api_runway_first_last_frame",
|
|
843
1187
|
"title": "Runway: First Last Frame to Video",
|
|
844
1188
|
"description": "Generate smooth video transitions between two keyframes with Runway's precision.",
|
|
845
1189
|
"mediaType": "image",
|
|
846
|
-
"mediaSubtype": "webp"
|
|
1190
|
+
"mediaSubtype": "webp",
|
|
1191
|
+
"tags": ["Video Generation", "Video", "API", "Frame Control"],
|
|
1192
|
+
"models": ["Runway"],
|
|
1193
|
+
"date": "2025-03-01",
|
|
1194
|
+
"tutorialUrl": ""
|
|
847
1195
|
},
|
|
848
1196
|
{
|
|
849
1197
|
"name": "api_pika_i2v",
|
|
850
1198
|
"title": "Pika: Image to Video",
|
|
851
1199
|
"description": "Generate smooth animated videos from single static images using Pika AI.",
|
|
852
1200
|
"mediaType": "image",
|
|
853
|
-
"mediaSubtype": "webp"
|
|
1201
|
+
"mediaSubtype": "webp",
|
|
1202
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1203
|
+
"models": ["Pika"],
|
|
1204
|
+
"date": "2025-03-01",
|
|
1205
|
+
"tutorialUrl": ""
|
|
854
1206
|
},
|
|
855
1207
|
{
|
|
856
1208
|
"name": "api_pika_scene",
|
|
857
1209
|
"title": "Pika Scenes: Images to Video",
|
|
858
1210
|
"description": "Generate videos that incorporate multiple input images using Pika Scenes.",
|
|
859
1211
|
"mediaType": "image",
|
|
860
|
-
"mediaSubtype": "webp"
|
|
1212
|
+
"mediaSubtype": "webp",
|
|
1213
|
+
"tags": ["Image to Video", "Video", "API", "Multi Image"],
|
|
1214
|
+
"models": ["Pika Scenes"],
|
|
1215
|
+
"date": "2025-03-01",
|
|
1216
|
+
"tutorialUrl": ""
|
|
861
1217
|
},
|
|
862
1218
|
{
|
|
863
1219
|
"name": "api_veo2_i2v",
|
|
864
1220
|
"title": "Veo2: Image to Video",
|
|
865
1221
|
"description": "Generate videos from images using Google Veo2 API.",
|
|
866
1222
|
"mediaType": "image",
|
|
867
|
-
"mediaSubtype": "webp"
|
|
1223
|
+
"mediaSubtype": "webp",
|
|
1224
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1225
|
+
"models": ["Veo2"],
|
|
1226
|
+
"date": "2025-03-01",
|
|
1227
|
+
"tutorialUrl": ""
|
|
868
1228
|
}
|
|
869
1229
|
]
|
|
870
1230
|
},
|
|
@@ -879,7 +1239,11 @@
|
|
|
879
1239
|
"description": "Generate detailed 3D models from single photos using Rodin AI.",
|
|
880
1240
|
"mediaType": "image",
|
|
881
1241
|
"thumbnailVariant": "compareSlider",
|
|
882
|
-
"mediaSubtype": "webp"
|
|
1242
|
+
"mediaSubtype": "webp",
|
|
1243
|
+
"tags": ["Image to Model", "3D", "API"],
|
|
1244
|
+
"models": ["Rodin"],
|
|
1245
|
+
"date": "2025-03-01",
|
|
1246
|
+
"tutorialUrl": ""
|
|
883
1247
|
},
|
|
884
1248
|
{
|
|
885
1249
|
"name": "api_rodin_multiview_to_model",
|
|
@@ -887,14 +1251,22 @@
|
|
|
887
1251
|
"description": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.",
|
|
888
1252
|
"mediaType": "image",
|
|
889
1253
|
"thumbnailVariant": "compareSlider",
|
|
890
|
-
"mediaSubtype": "webp"
|
|
1254
|
+
"mediaSubtype": "webp",
|
|
1255
|
+
"tags": ["Multiview to Model", "3D", "API"],
|
|
1256
|
+
"models": ["Rodin"],
|
|
1257
|
+
"date": "2025-03-01",
|
|
1258
|
+
"tutorialUrl": ""
|
|
891
1259
|
},
|
|
892
1260
|
{
|
|
893
1261
|
"name": "api_tripo_text_to_model",
|
|
894
1262
|
"title": "Tripo: Text to Model",
|
|
895
1263
|
"description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
|
|
896
1264
|
"mediaType": "image",
|
|
897
|
-
"mediaSubtype": "webp"
|
|
1265
|
+
"mediaSubtype": "webp",
|
|
1266
|
+
"tags": ["Text to Model", "3D", "API"],
|
|
1267
|
+
"models": ["Tripo"],
|
|
1268
|
+
"date": "2025-03-01",
|
|
1269
|
+
"tutorialUrl": ""
|
|
898
1270
|
},
|
|
899
1271
|
{
|
|
900
1272
|
"name": "api_tripo_image_to_model",
|
|
@@ -902,7 +1274,11 @@
|
|
|
902
1274
|
"description": "Generate professional 3D assets from 2D images using Tripo engine.",
|
|
903
1275
|
"mediaType": "image",
|
|
904
1276
|
"thumbnailVariant": "compareSlider",
|
|
905
|
-
"mediaSubtype": "webp"
|
|
1277
|
+
"mediaSubtype": "webp",
|
|
1278
|
+
"tags": ["Image to Model", "3D", "API"],
|
|
1279
|
+
"models": ["Tripo"],
|
|
1280
|
+
"date": "2025-03-01",
|
|
1281
|
+
"tutorialUrl": ""
|
|
906
1282
|
},
|
|
907
1283
|
{
|
|
908
1284
|
"name": "api_tripo_multiview_to_model",
|
|
@@ -910,7 +1286,11 @@
|
|
|
910
1286
|
"description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
|
|
911
1287
|
"mediaType": "image",
|
|
912
1288
|
"thumbnailVariant": "compareSlider",
|
|
913
|
-
"mediaSubtype": "webp"
|
|
1289
|
+
"mediaSubtype": "webp",
|
|
1290
|
+
"tags": ["Multiview to Model", "3D", "API"],
|
|
1291
|
+
"models": ["Tripo"],
|
|
1292
|
+
"date": "2025-03-01",
|
|
1293
|
+
"tutorialUrl": ""
|
|
914
1294
|
}
|
|
915
1295
|
]
|
|
916
1296
|
},
|
|
@@ -924,14 +1304,22 @@
|
|
|
924
1304
|
"title": "OpenAI: Chat",
|
|
925
1305
|
"description": "Engage with OpenAI's advanced language models for intelligent conversations.",
|
|
926
1306
|
"mediaType": "image",
|
|
927
|
-
"mediaSubtype": "webp"
|
|
1307
|
+
"mediaSubtype": "webp",
|
|
1308
|
+
"tags": ["Chat", "LLM", "API"],
|
|
1309
|
+
"models": ["OpenAI"],
|
|
1310
|
+
"date": "2025-03-01",
|
|
1311
|
+
"tutorialUrl": ""
|
|
928
1312
|
},
|
|
929
1313
|
{
|
|
930
1314
|
"name": "api_google_gemini",
|
|
931
1315
|
"title": "Google Gemini: Chat",
|
|
932
1316
|
"description": "Experience Google's multimodal AI with Gemini's reasoning capabilities.",
|
|
933
1317
|
"mediaType": "image",
|
|
934
|
-
"mediaSubtype": "webp"
|
|
1318
|
+
"mediaSubtype": "webp",
|
|
1319
|
+
"tags": ["Chat", "LLM", "API"],
|
|
1320
|
+
"models": ["Google Gemini"],
|
|
1321
|
+
"date": "2025-03-01",
|
|
1322
|
+
"tutorialUrl": ""
|
|
935
1323
|
}
|
|
936
1324
|
]
|
|
937
1325
|
},
|
|
@@ -947,6 +1335,9 @@
|
|
|
947
1335
|
"mediaSubtype": "webp",
|
|
948
1336
|
"description": "Upscale images by enhancing quality in latent space.",
|
|
949
1337
|
"thumbnailVariant": "compareSlider",
|
|
1338
|
+
"tags": ["Upscale", "Image"],
|
|
1339
|
+
"models": ["SD1.5"],
|
|
1340
|
+
"date": "2025-03-01",
|
|
950
1341
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/"
|
|
951
1342
|
},
|
|
952
1343
|
{
|
|
@@ -956,6 +1347,9 @@
|
|
|
956
1347
|
"mediaSubtype": "webp",
|
|
957
1348
|
"description": "Upscale images using ESRGAN models to enhance quality.",
|
|
958
1349
|
"thumbnailVariant": "compareSlider",
|
|
1350
|
+
"tags": ["Upscale", "Image"],
|
|
1351
|
+
"models": ["SD1.5"],
|
|
1352
|
+
"date": "2025-03-01",
|
|
959
1353
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/"
|
|
960
1354
|
},
|
|
961
1355
|
{
|
|
@@ -965,6 +1359,9 @@
|
|
|
965
1359
|
"mediaSubtype": "webp",
|
|
966
1360
|
"description": "Upscale images using ESRGAN models during intermediate generation steps.",
|
|
967
1361
|
"thumbnailVariant": "compareSlider",
|
|
1362
|
+
"tags": ["Upscale", "Image"],
|
|
1363
|
+
"models": ["SD1.5"],
|
|
1364
|
+
"date": "2025-03-01",
|
|
968
1365
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling"
|
|
969
1366
|
},
|
|
970
1367
|
{
|
|
@@ -974,6 +1371,9 @@
|
|
|
974
1371
|
"mediaSubtype": "webp",
|
|
975
1372
|
"description": "Upscale images while changing prompts across generation passes.",
|
|
976
1373
|
"thumbnailVariant": "zoomHover",
|
|
1374
|
+
"tags": ["Upscale", "Image"],
|
|
1375
|
+
"models": ["SD1.5"],
|
|
1376
|
+
"date": "2025-03-01",
|
|
977
1377
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples"
|
|
978
1378
|
}
|
|
979
1379
|
]
|
|
@@ -990,6 +1390,9 @@
|
|
|
990
1390
|
"mediaSubtype": "webp",
|
|
991
1391
|
"description": "Generate images guided by scribble reference images using ControlNet.",
|
|
992
1392
|
"thumbnailVariant": "hoverDissolve",
|
|
1393
|
+
"tags": ["ControlNet", "Image"],
|
|
1394
|
+
"models": ["SD1.5"],
|
|
1395
|
+
"date": "2025-03-01",
|
|
993
1396
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/"
|
|
994
1397
|
},
|
|
995
1398
|
{
|
|
@@ -999,6 +1402,9 @@
|
|
|
999
1402
|
"mediaSubtype": "webp",
|
|
1000
1403
|
"description": "Generate images guided by pose references using ControlNet.",
|
|
1001
1404
|
"thumbnailVariant": "hoverDissolve",
|
|
1405
|
+
"tags": ["ControlNet", "Image"],
|
|
1406
|
+
"models": ["SD1.5"],
|
|
1407
|
+
"date": "2025-03-01",
|
|
1002
1408
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet"
|
|
1003
1409
|
},
|
|
1004
1410
|
{
|
|
@@ -1008,6 +1414,9 @@
|
|
|
1008
1414
|
"mediaSubtype": "webp",
|
|
1009
1415
|
"description": "Generate images guided by depth information using ControlNet.",
|
|
1010
1416
|
"thumbnailVariant": "hoverDissolve",
|
|
1417
|
+
"tags": ["ControlNet", "Image"],
|
|
1418
|
+
"models": ["SD1.5"],
|
|
1419
|
+
"date": "2025-03-01",
|
|
1011
1420
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
|
|
1012
1421
|
},
|
|
1013
1422
|
{
|
|
@@ -1017,6 +1426,9 @@
|
|
|
1017
1426
|
"mediaSubtype": "webp",
|
|
1018
1427
|
"description": "Generate images guided by depth information using T2I adapter.",
|
|
1019
1428
|
"thumbnailVariant": "hoverDissolve",
|
|
1429
|
+
"tags": ["T2I Adapter", "Image"],
|
|
1430
|
+
"models": ["SD1.5"],
|
|
1431
|
+
"date": "2025-03-01",
|
|
1020
1432
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
|
|
1021
1433
|
},
|
|
1022
1434
|
{
|
|
@@ -1026,6 +1438,9 @@
|
|
|
1026
1438
|
"mediaSubtype": "webp",
|
|
1027
1439
|
"description": "Generate images by combining multiple ControlNet models.",
|
|
1028
1440
|
"thumbnailVariant": "hoverDissolve",
|
|
1441
|
+
"tags": ["ControlNet", "Image"],
|
|
1442
|
+
"models": ["SD1.5"],
|
|
1443
|
+
"date": "2025-03-01",
|
|
1029
1444
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets"
|
|
1030
1445
|
}
|
|
1031
1446
|
]
|
|
@@ -1041,6 +1456,9 @@
|
|
|
1041
1456
|
"mediaType": "image",
|
|
1042
1457
|
"mediaSubtype": "webp",
|
|
1043
1458
|
"description": "Generate images by controlling composition with defined areas.",
|
|
1459
|
+
"tags": ["Area Composition", "Image"],
|
|
1460
|
+
"models": ["SD1.5"],
|
|
1461
|
+
"date": "2025-03-01",
|
|
1044
1462
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/"
|
|
1045
1463
|
},
|
|
1046
1464
|
{
|
|
@@ -1049,6 +1467,9 @@
|
|
|
1049
1467
|
"mediaType": "image",
|
|
1050
1468
|
"mediaSubtype": "webp",
|
|
1051
1469
|
"description": "Generate images with consistent subject placement using area composition.",
|
|
1470
|
+
"tags": ["Area Composition", "Image"],
|
|
1471
|
+
"models": ["SD1.5"],
|
|
1472
|
+
"date": "2025-03-01",
|
|
1052
1473
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition"
|
|
1053
1474
|
}
|
|
1054
1475
|
]
|
|
@@ -1064,6 +1485,9 @@
|
|
|
1064
1485
|
"mediaType": "image",
|
|
1065
1486
|
"mediaSubtype": "webp",
|
|
1066
1487
|
"description": "Generate 3D models from single images using Hunyuan3D 2.0.",
|
|
1488
|
+
"tags": ["Image to Model", "3D"],
|
|
1489
|
+
"models": ["Hunyuan3D 2.0"],
|
|
1490
|
+
"date": "2025-03-01",
|
|
1067
1491
|
"tutorialUrl": ""
|
|
1068
1492
|
},
|
|
1069
1493
|
{
|
|
@@ -1072,6 +1496,9 @@
|
|
|
1072
1496
|
"mediaType": "image",
|
|
1073
1497
|
"mediaSubtype": "webp",
|
|
1074
1498
|
"description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
|
|
1499
|
+
"tags": ["Multiview to Model", "3D"],
|
|
1500
|
+
"models": ["Hunyuan3D 2.0 MV"],
|
|
1501
|
+
"date": "2025-03-01",
|
|
1075
1502
|
"tutorialUrl": "",
|
|
1076
1503
|
"thumbnailVariant": "hoverDissolve"
|
|
1077
1504
|
},
|
|
@@ -1081,6 +1508,9 @@
|
|
|
1081
1508
|
"mediaType": "image",
|
|
1082
1509
|
"mediaSubtype": "webp",
|
|
1083
1510
|
"description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
|
|
1511
|
+
"tags": ["Multiview to Model", "3D"],
|
|
1512
|
+
"models": ["Hunyuan3D 2.0 MV Turbo"],
|
|
1513
|
+
"date": "2025-03-01",
|
|
1084
1514
|
"tutorialUrl": "",
|
|
1085
1515
|
"thumbnailVariant": "hoverDissolve"
|
|
1086
1516
|
},
|
|
@@ -1090,6 +1520,9 @@
|
|
|
1090
1520
|
"mediaType": "image",
|
|
1091
1521
|
"mediaSubtype": "webp",
|
|
1092
1522
|
"description": "Generate 3D views from single images using Stable Zero123.",
|
|
1523
|
+
"tags": ["Image to 3D", "3D"],
|
|
1524
|
+
"models": ["Stable Zero123"],
|
|
1525
|
+
"date": "2025-03-01",
|
|
1093
1526
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/"
|
|
1094
1527
|
}
|
|
1095
1528
|
]
|
|
@@ -1105,6 +1538,9 @@
|
|
|
1105
1538
|
"mediaType": "audio",
|
|
1106
1539
|
"mediaSubtype": "mp3",
|
|
1107
1540
|
"description": "Generate audio from text prompts using Stable Audio.",
|
|
1541
|
+
"tags": ["Text to Audio", "Audio"],
|
|
1542
|
+
"models": ["Stable Audio"],
|
|
1543
|
+
"date": "2025-03-01",
|
|
1108
1544
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/"
|
|
1109
1545
|
},
|
|
1110
1546
|
{
|
|
@@ -1113,6 +1549,9 @@
|
|
|
1113
1549
|
"mediaType": "audio",
|
|
1114
1550
|
"mediaSubtype": "mp3",
|
|
1115
1551
|
"description": "Generate instrumental music from text prompts using ACE-Step v1.",
|
|
1552
|
+
"tags": ["Text to Audio", "Audio", "Instrumentals"],
|
|
1553
|
+
"models": ["ACE-Step v1"],
|
|
1554
|
+
"date": "2025-03-01",
|
|
1116
1555
|
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
|
|
1117
1556
|
},
|
|
1118
1557
|
{
|
|
@@ -1121,6 +1560,9 @@
|
|
|
1121
1560
|
"mediaType": "audio",
|
|
1122
1561
|
"mediaSubtype": "mp3",
|
|
1123
1562
|
"description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
|
|
1563
|
+
"tags": ["Text to Audio", "Audio", "Song"],
|
|
1564
|
+
"models": ["ACE-Step v1"],
|
|
1565
|
+
"date": "2025-03-01",
|
|
1124
1566
|
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
|
|
1125
1567
|
},
|
|
1126
1568
|
{
|
|
@@ -1129,6 +1571,9 @@
|
|
|
1129
1571
|
"mediaType": "audio",
|
|
1130
1572
|
"mediaSubtype": "mp3",
|
|
1131
1573
|
"description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
|
|
1574
|
+
"tags": ["Audio Editing", "Audio"],
|
|
1575
|
+
"models": ["ACE-Step v1"],
|
|
1576
|
+
"date": "2025-03-01",
|
|
1132
1577
|
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
|
|
1133
1578
|
}
|
|
1134
1579
|
]
|