comfyui-workflow-templates-media-other 0.3.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comfyui_workflow_templates_media_other/__init__.py +6 -0
- comfyui_workflow_templates_media_other/templates/04_hunyuan_3d_2.1_subgraphed-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/04_hunyuan_3d_2.1_subgraphed.json +849 -0
- comfyui_workflow_templates_media_other/templates/05_audio_ace_step_1_t2a_song_subgraphed-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/05_audio_ace_step_1_t2a_song_subgraphed.json +1039 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship.json +1256 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d-v2.1-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d-v2.1.json +618 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model.json +1101 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo.json +1149 -0
- comfyui_workflow_templates_media_other/templates/ByteDance-Seedance_00003_.json +210 -0
- comfyui_workflow_templates_media_other/templates/area_composition-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/area_composition.json +1626 -0
- comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject.json +1114 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_m2m_editing-1.mp3 +0 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_m2m_editing.json +688 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_instrumentals-1.mp3 +0 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_instrumentals.json +650 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_song-1.mp3 +0 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_song.json +630 -0
- comfyui_workflow_templates_media_other/templates/audio_stable_audio_example-1.mp3 +0 -0
- comfyui_workflow_templates_media_other/templates/audio_stable_audio_example.json +495 -0
- comfyui_workflow_templates_media_other/templates/default-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/default.json +547 -0
- comfyui_workflow_templates_media_other/templates/embedding_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/embedding_example.json +267 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example.json +635 -0
- comfyui_workflow_templates_media_other/templates/gligen_textbox_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gligen_textbox_example.json +686 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1.json +1133 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_full-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_full-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_full.json +1021 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_dev-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_dev.json +700 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_fast-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_fast.json +700 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_full-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_full.json +700 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow.json +1029 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow.json +772 -0
- comfyui_workflow_templates_media_other/templates/index.ar.json +2521 -0
- comfyui_workflow_templates_media_other/templates/index.es.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.fr.json +2527 -0
- comfyui_workflow_templates_media_other/templates/index.ja.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.json +2527 -0
- comfyui_workflow_templates_media_other/templates/index.ko.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.ru.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.schema.json +123 -0
- comfyui_workflow_templates_media_other/templates/index.tr.json +2521 -0
- comfyui_workflow_templates_media_other/templates/index.zh-TW.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.zh.json +2526 -0
- comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model.json +929 -0
- comfyui_workflow_templates_media_other/templates/lora-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/lora.json +615 -0
- comfyui_workflow_templates_media_other/templates/lora_multiple-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/lora_multiple.json +656 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_blur-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_blur-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_blur.json +764 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_depth-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_depth-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_depth.json +1857 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_simple_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_simple_example.json +278 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_flf2v_720_f16-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_flf2v_720_f16.json +1038 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_control-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_control-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_control.json +1284 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_inp-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_inp.json +1142 -0
- comfyui_workflow_templates_media_other-0.3.10.dist-info/METADATA +9 -0
- comfyui_workflow_templates_media_other-0.3.10.dist-info/RECORD +92 -0
- comfyui_workflow_templates_media_other-0.3.10.dist-info/WHEEL +5 -0
- comfyui_workflow_templates_media_other-0.3.10.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2527 @@
|
|
|
1
|
+
[
|
|
2
|
+
{
|
|
3
|
+
"moduleName": "default",
|
|
4
|
+
"isEssential": true,
|
|
5
|
+
"title": "Getting Started",
|
|
6
|
+
"type": "image",
|
|
7
|
+
"templates": [
|
|
8
|
+
{
|
|
9
|
+
"name": "01_qwen_t2i_subgraphed",
|
|
10
|
+
"title": "Text to Image (New)",
|
|
11
|
+
"mediaType": "image",
|
|
12
|
+
"mediaSubtype": "webp",
|
|
13
|
+
"description": "Generate images from text prompts using the Qwen-Image model.",
|
|
14
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
15
|
+
"tags": ["Text to Image", "Image"],
|
|
16
|
+
"models": ["Qwen-Image"],
|
|
17
|
+
"date": "2025-10-17",
|
|
18
|
+
"size": 31772020572
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"name": "02_qwen_Image_edit_subgraphed",
|
|
22
|
+
"title": "Image Editing (New)",
|
|
23
|
+
"mediaType": "image",
|
|
24
|
+
"mediaSubtype": "webp",
|
|
25
|
+
"description": "Edit your images with Qwen-Image-Edit, the latest OSS model",
|
|
26
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
27
|
+
"tags": ["Image to Image", "Image Edit", "ControlNet"],
|
|
28
|
+
"models": ["Qwen-Image"],
|
|
29
|
+
"date": "2025-10-17",
|
|
30
|
+
"size": 31772020572
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
"name": "03_video_wan2_2_14B_i2v_subgraphed",
|
|
34
|
+
"title": "Image to Video (New)",
|
|
35
|
+
"description": "Generate videos from an input image using Wan2.2 14B",
|
|
36
|
+
"mediaType": "image",
|
|
37
|
+
"mediaSubtype": "webp",
|
|
38
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
39
|
+
"tags": ["Image to Video", "Video"],
|
|
40
|
+
"models": ["Wan2.2", "Wan"],
|
|
41
|
+
"date": "2025-10-17",
|
|
42
|
+
"size": 38031935406
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
"name": "04_hunyuan_3d_2.1_subgraphed",
|
|
46
|
+
"title": "Image to 3D (New)",
|
|
47
|
+
"mediaType": "image",
|
|
48
|
+
"mediaSubtype": "webp",
|
|
49
|
+
"description": "Generate 3D models from single images using Hunyuan3D 2.1.",
|
|
50
|
+
"tags": ["Image to 3D", "3D"],
|
|
51
|
+
"models": ["Hunyuan3D"],
|
|
52
|
+
"date": "2025-10-17",
|
|
53
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/3d/hunyuan3D-2",
|
|
54
|
+
"size": 4928474972
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
"name": "05_audio_ace_step_1_t2a_song_subgraphed",
|
|
58
|
+
"title": "Text to Song (New)",
|
|
59
|
+
"mediaType": "image",
|
|
60
|
+
"mediaSubtype": "webp",
|
|
61
|
+
"description": "Generate songs from text prompts using ACE-Step v1",
|
|
62
|
+
"tags": ["Text to Audio", "Audio"],
|
|
63
|
+
"models": ["ACE-Step"],
|
|
64
|
+
"date": "2025-10-17",
|
|
65
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
66
|
+
"size": 7698728878
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
"name": "default",
|
|
70
|
+
"title": "Image Generation",
|
|
71
|
+
"mediaType": "image",
|
|
72
|
+
"mediaSubtype": "webp",
|
|
73
|
+
"description": "Generate images from text prompts.",
|
|
74
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
|
|
75
|
+
"tags": ["Text to Image", "Image"],
|
|
76
|
+
"models": ["SD1.5", "Stability"],
|
|
77
|
+
"date": "2025-03-01",
|
|
78
|
+
"size": 2136746230,
|
|
79
|
+
"vram": 3092376453
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
"name": "image2image",
|
|
83
|
+
"title": "Image to Image",
|
|
84
|
+
"mediaType": "image",
|
|
85
|
+
"mediaSubtype": "webp",
|
|
86
|
+
"thumbnailVariant": "hoverDissolve",
|
|
87
|
+
"description": "Transform existing images using text prompts.",
|
|
88
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/image-to-image",
|
|
89
|
+
"tags": ["Image to Image", "Image"],
|
|
90
|
+
"models": ["SD1.5", "Stability"],
|
|
91
|
+
"date": "2025-03-01",
|
|
92
|
+
"size": 2136746230,
|
|
93
|
+
"vram": 3092376453
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
"name": "lora",
|
|
97
|
+
"title": "LoRA",
|
|
98
|
+
"mediaType": "image",
|
|
99
|
+
"mediaSubtype": "webp",
|
|
100
|
+
"description": "Generate images with LoRA models for specialized styles or subjects.",
|
|
101
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
|
|
102
|
+
"tags": ["Text to Image", "Image"],
|
|
103
|
+
"models": ["SD1.5", "Stability"],
|
|
104
|
+
"date": "2025-03-01",
|
|
105
|
+
"size": 2437393940,
|
|
106
|
+
"vram": 3092376453
|
|
107
|
+
},
|
|
108
|
+
{
|
|
109
|
+
"name": "lora_multiple",
|
|
110
|
+
"title": "LoRA Multiple",
|
|
111
|
+
"mediaType": "image",
|
|
112
|
+
"mediaSubtype": "webp",
|
|
113
|
+
"description": "Generate images by combining multiple LoRA models.",
|
|
114
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
|
|
115
|
+
"tags": ["Text to Image", "Image"],
|
|
116
|
+
"models": ["SD1.5", "Stability"],
|
|
117
|
+
"date": "2025-03-01",
|
|
118
|
+
"size": 2437393940,
|
|
119
|
+
"vram": 3350074491
|
|
120
|
+
},
|
|
121
|
+
{
|
|
122
|
+
"name": "inpaint_example",
|
|
123
|
+
"title": "Inpaint",
|
|
124
|
+
"mediaType": "image",
|
|
125
|
+
"mediaSubtype": "webp",
|
|
126
|
+
"description": "Edit specific parts of images seamlessly.",
|
|
127
|
+
"thumbnailVariant": "compareSlider",
|
|
128
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
|
|
129
|
+
"tags": ["Inpainting", "Image"],
|
|
130
|
+
"models": ["SD1.5", "Stability"],
|
|
131
|
+
"date": "2025-03-01",
|
|
132
|
+
"size": 5218385265,
|
|
133
|
+
"vram": 4101693768
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
"name": "inpaint_model_outpainting",
|
|
137
|
+
"title": "Outpaint",
|
|
138
|
+
"mediaType": "image",
|
|
139
|
+
"mediaSubtype": "webp",
|
|
140
|
+
"description": "Extend images beyond their original boundaries.",
|
|
141
|
+
"thumbnailVariant": "compareSlider",
|
|
142
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
|
|
143
|
+
"tags": ["Outpainting", "Image"],
|
|
144
|
+
"models": ["SD1.5", "Stability"],
|
|
145
|
+
"date": "2025-03-01",
|
|
146
|
+
"size": 5218385265,
|
|
147
|
+
"vram": 4101693768
|
|
148
|
+
},
|
|
149
|
+
{
|
|
150
|
+
"name": "embedding_example",
|
|
151
|
+
"title": "Embedding",
|
|
152
|
+
"mediaType": "image",
|
|
153
|
+
"mediaSubtype": "webp",
|
|
154
|
+
"description": "Generate images using textual inversion for consistent styles.",
|
|
155
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/",
|
|
156
|
+
"tags": ["Text to Image", "Image"],
|
|
157
|
+
"models": ["SD1.5", "Stability"],
|
|
158
|
+
"date": "2025-03-01",
|
|
159
|
+
"size": 5218385265,
|
|
160
|
+
"vram": 4123168604
|
|
161
|
+
},
|
|
162
|
+
{
|
|
163
|
+
"name": "gligen_textbox_example",
|
|
164
|
+
"title": "Gligen Textbox",
|
|
165
|
+
"mediaType": "image",
|
|
166
|
+
"mediaSubtype": "webp",
|
|
167
|
+
"description": "Generate images with precise object placement using text boxes.",
|
|
168
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/",
|
|
169
|
+
"tags": ["Image"],
|
|
170
|
+
"models": ["SD1.5", "Stability"],
|
|
171
|
+
"date": "2025-03-01",
|
|
172
|
+
"size": 2974264852,
|
|
173
|
+
"vram": 4080218931
|
|
174
|
+
},
|
|
175
|
+
{
|
|
176
|
+
"name": "area_composition",
|
|
177
|
+
"title": "Area Composition",
|
|
178
|
+
"mediaType": "image",
|
|
179
|
+
"mediaSubtype": "webp",
|
|
180
|
+
"description": "Generate images by controlling composition with defined areas.",
|
|
181
|
+
"tags": ["Text to Image", "Image"],
|
|
182
|
+
"models": ["SD1.5", "Stability"],
|
|
183
|
+
"date": "2025-03-01",
|
|
184
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/",
|
|
185
|
+
"size": 2469606195,
|
|
186
|
+
"vram": 6184752906
|
|
187
|
+
},
|
|
188
|
+
{
|
|
189
|
+
"name": "area_composition_square_area_for_subject",
|
|
190
|
+
"title": "Area Composition Square Area for Subject",
|
|
191
|
+
"mediaType": "image",
|
|
192
|
+
"mediaSubtype": "webp",
|
|
193
|
+
"description": "Generate images with consistent subject placement using area composition.",
|
|
194
|
+
"tags": ["Text to Image", "Image"],
|
|
195
|
+
"models": ["SD1.5", "Stability"],
|
|
196
|
+
"date": "2025-03-01",
|
|
197
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition",
|
|
198
|
+
"size": 2469606195,
|
|
199
|
+
"vram": 5927054868
|
|
200
|
+
},
|
|
201
|
+
{
|
|
202
|
+
"name": "hiresfix_latent_workflow",
|
|
203
|
+
"title": "Upscale",
|
|
204
|
+
"mediaType": "image",
|
|
205
|
+
"mediaSubtype": "webp",
|
|
206
|
+
"description": "Upscale images by enhancing quality in latent space.",
|
|
207
|
+
"thumbnailVariant": "compareSlider",
|
|
208
|
+
"tags": ["Upscale", "Image"],
|
|
209
|
+
"models": ["SD1.5", "Stability"],
|
|
210
|
+
"date": "2025-03-01",
|
|
211
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/",
|
|
212
|
+
"size": 2136746230,
|
|
213
|
+
"vram": 3929895076
|
|
214
|
+
},
|
|
215
|
+
{
|
|
216
|
+
"name": "esrgan_example",
|
|
217
|
+
"title": "ESRGAN",
|
|
218
|
+
"mediaType": "image",
|
|
219
|
+
"mediaSubtype": "webp",
|
|
220
|
+
"description": "Upscale images using ESRGAN models to enhance quality.",
|
|
221
|
+
"thumbnailVariant": "compareSlider",
|
|
222
|
+
"tags": ["Upscale", "Image"],
|
|
223
|
+
"models": ["SD1.5", "Stability"],
|
|
224
|
+
"date": "2025-03-01",
|
|
225
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/",
|
|
226
|
+
"size": 2201170739,
|
|
227
|
+
"vram": 6442450944
|
|
228
|
+
},
|
|
229
|
+
{
|
|
230
|
+
"name": "hiresfix_esrgan_workflow",
|
|
231
|
+
"title": "HiresFix ESRGAN Workflow",
|
|
232
|
+
"mediaType": "image",
|
|
233
|
+
"mediaSubtype": "webp",
|
|
234
|
+
"description": "Upscale images using ESRGAN models during intermediate generation steps.",
|
|
235
|
+
"thumbnailVariant": "compareSlider",
|
|
236
|
+
"tags": ["Upscale", "Image"],
|
|
237
|
+
"models": ["SD1.5", "Stability"],
|
|
238
|
+
"date": "2025-03-01",
|
|
239
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling",
|
|
240
|
+
"size": 2201170739,
|
|
241
|
+
"vram": 6442450944
|
|
242
|
+
},
|
|
243
|
+
{
|
|
244
|
+
"name": "latent_upscale_different_prompt_model",
|
|
245
|
+
"title": "Latent Upscale Different Prompt Model",
|
|
246
|
+
"mediaType": "image",
|
|
247
|
+
"mediaSubtype": "webp",
|
|
248
|
+
"description": "Upscale images while changing prompts across generation passes.",
|
|
249
|
+
"thumbnailVariant": "zoomHover",
|
|
250
|
+
"tags": ["Upscale", "Image"],
|
|
251
|
+
"models": ["SD1.5", "Stability"],
|
|
252
|
+
"date": "2025-03-01",
|
|
253
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples",
|
|
254
|
+
"size": 4262755041,
|
|
255
|
+
"vram": 5153960755
|
|
256
|
+
},
|
|
257
|
+
{
|
|
258
|
+
"name": "controlnet_example",
|
|
259
|
+
"title": "Scribble ControlNet",
|
|
260
|
+
"mediaType": "image",
|
|
261
|
+
"mediaSubtype": "webp",
|
|
262
|
+
"description": "Generate images guided by scribble reference images using ControlNet.",
|
|
263
|
+
"thumbnailVariant": "hoverDissolve",
|
|
264
|
+
"tags": ["ControlNet", "Image"],
|
|
265
|
+
"models": ["SD1.5", "Stability"],
|
|
266
|
+
"date": "2025-03-01",
|
|
267
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/",
|
|
268
|
+
"size": 3189013217,
|
|
269
|
+
"vram": 6442450944
|
|
270
|
+
},
|
|
271
|
+
{
|
|
272
|
+
"name": "2_pass_pose_worship",
|
|
273
|
+
"title": "Pose ControlNet 2 Pass",
|
|
274
|
+
"mediaType": "image",
|
|
275
|
+
"mediaSubtype": "webp",
|
|
276
|
+
"description": "Generate images guided by pose references using ControlNet.",
|
|
277
|
+
"thumbnailVariant": "hoverDissolve",
|
|
278
|
+
"tags": ["ControlNet", "Image"],
|
|
279
|
+
"models": ["SD1.5", "Stability"],
|
|
280
|
+
"date": "2025-03-01",
|
|
281
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet",
|
|
282
|
+
"size": 4660039516,
|
|
283
|
+
"vram": 6442450944
|
|
284
|
+
},
|
|
285
|
+
{
|
|
286
|
+
"name": "depth_controlnet",
|
|
287
|
+
"title": "Depth ControlNet",
|
|
288
|
+
"mediaType": "image",
|
|
289
|
+
"mediaSubtype": "webp",
|
|
290
|
+
"description": "Generate images guided by depth information using ControlNet.",
|
|
291
|
+
"thumbnailVariant": "hoverDissolve",
|
|
292
|
+
"tags": ["ControlNet", "Image", "Text to Image"],
|
|
293
|
+
"models": ["SD1.5", "Stability"],
|
|
294
|
+
"date": "2025-03-01",
|
|
295
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
|
|
296
|
+
"size": 2888365507,
|
|
297
|
+
"vram": 6442450944
|
|
298
|
+
},
|
|
299
|
+
{
|
|
300
|
+
"name": "depth_t2i_adapter",
|
|
301
|
+
"title": "Depth T2I Adapter",
|
|
302
|
+
"mediaType": "image",
|
|
303
|
+
"mediaSubtype": "webp",
|
|
304
|
+
"description": "Generate images guided by depth information using T2I adapter.",
|
|
305
|
+
"thumbnailVariant": "hoverDissolve",
|
|
306
|
+
"tags": ["ControlNet", "Image", "Text to Image"],
|
|
307
|
+
"models": ["SD1.5", "Stability"],
|
|
308
|
+
"date": "2025-03-01",
|
|
309
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
|
|
310
|
+
"size": 2523293286,
|
|
311
|
+
"vram": 6442450944
|
|
312
|
+
},
|
|
313
|
+
{
|
|
314
|
+
"name": "mixing_controlnets",
|
|
315
|
+
"title": "Mixing ControlNets",
|
|
316
|
+
"mediaType": "image",
|
|
317
|
+
"mediaSubtype": "webp",
|
|
318
|
+
"description": "Generate images by combining multiple ControlNet models.",
|
|
319
|
+
"thumbnailVariant": "hoverDissolve",
|
|
320
|
+
"tags": ["ControlNet", "Image", "Text to Image"],
|
|
321
|
+
"models": ["SD1.5", "Stability"],
|
|
322
|
+
"date": "2025-03-01",
|
|
323
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets",
|
|
324
|
+
"size": 3328599654,
|
|
325
|
+
"vram": 6442450944
|
|
326
|
+
}
|
|
327
|
+
]
|
|
328
|
+
},
|
|
329
|
+
{
|
|
330
|
+
"moduleName": "default",
|
|
331
|
+
"category": "GENERATION TYPE",
|
|
332
|
+
"icon": "icon-[lucide--image]",
|
|
333
|
+
"title": "Image",
|
|
334
|
+
"type": "image",
|
|
335
|
+
"templates": [
|
|
336
|
+
{
|
|
337
|
+
"name": "image_flux2",
|
|
338
|
+
"title": "Flux.2 Dev",
|
|
339
|
+
"mediaType": "image",
|
|
340
|
+
"mediaSubtype": "webp",
|
|
341
|
+
"thumbnailVariant": "compareSlider",
|
|
342
|
+
"description": "Generate up to 4MP photorealistic images with multi-reference consistency and professional text rendering.",
|
|
343
|
+
"tags": ["Text to Image", "Image", "Image Edit"],
|
|
344
|
+
"models": ["Flux.2 Dev", "BFL"],
|
|
345
|
+
"date": "2025-11-26",
|
|
346
|
+
"size": 71382356459,
|
|
347
|
+
"vram": 0
|
|
348
|
+
},
|
|
349
|
+
{
|
|
350
|
+
"name": "image_flux2_fp8",
|
|
351
|
+
"title": "Product Mockup(Flux.2 Dev FP8)",
|
|
352
|
+
"mediaType": "image",
|
|
353
|
+
"mediaSubtype": "webp",
|
|
354
|
+
"description": "Create product mockups by applying design patterns to packaging, mugs, and other products using multi-reference consistency.",
|
|
355
|
+
"tags": [
|
|
356
|
+
"Text to Image",
|
|
357
|
+
"Image",
|
|
358
|
+
"Image Edit",
|
|
359
|
+
"Mockup",
|
|
360
|
+
"Product Design"
|
|
361
|
+
],
|
|
362
|
+
"models": ["Flux.2 Dev", "BFL"],
|
|
363
|
+
"date": "2025-11-26",
|
|
364
|
+
"size": 53837415055,
|
|
365
|
+
"vram": 0
|
|
366
|
+
},
|
|
367
|
+
{
|
|
368
|
+
"name": "image_z_image_turbo",
|
|
369
|
+
"title": "Z-Image-Turbo Text to Image",
|
|
370
|
+
"mediaType": "image",
|
|
371
|
+
"mediaSubtype": "webp",
|
|
372
|
+
"description": "An Efficient Image Generation Foundation Model with Single-Stream Diffusion Transformer, supports English & Chinese.",
|
|
373
|
+
"tags": ["Text to Image","Image"],
|
|
374
|
+
"models": ["Z-Image-Turbo"],
|
|
375
|
+
"date": "2025-11-27",
|
|
376
|
+
"size": 35326050304
|
|
377
|
+
},
|
|
378
|
+
{
|
|
379
|
+
"name": "image_qwen_image",
|
|
380
|
+
"title": "Qwen-Image Text to Image",
|
|
381
|
+
"mediaType": "image",
|
|
382
|
+
"mediaSubtype": "webp",
|
|
383
|
+
"description": "Generate images with exceptional multilingual text rendering and editing capabilities using Qwen-Image's 20B MMDiT model..",
|
|
384
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
385
|
+
"tags": ["Text to Image", "Image"],
|
|
386
|
+
"models": ["Qwen-Image"],
|
|
387
|
+
"date": "2025-08-05",
|
|
388
|
+
"size": 31772020572
|
|
389
|
+
},
|
|
390
|
+
{
|
|
391
|
+
"name": "image_qwen_image_instantx_controlnet",
|
|
392
|
+
"title": "Qwen-Image InstantX Union ControlNet",
|
|
393
|
+
"mediaType": "image",
|
|
394
|
+
"mediaSubtype": "webp",
|
|
395
|
+
"description": "Generate images with Qwen-Image InstantX ControlNet, supporting canny, soft edge, depth, pose",
|
|
396
|
+
"tags": ["Image to Image", "Image", "ControlNet"],
|
|
397
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
398
|
+
"models": ["Qwen-Image"],
|
|
399
|
+
"date": "2025-08-23",
|
|
400
|
+
"size": 35304631173
|
|
401
|
+
},
|
|
402
|
+
{
|
|
403
|
+
"name": "image_qwen_image_instantx_inpainting_controlnet",
|
|
404
|
+
"title": "Qwen-Image InstantX Inpainting ControlNet",
|
|
405
|
+
"mediaType": "image",
|
|
406
|
+
"mediaSubtype": "webp",
|
|
407
|
+
"thumbnailVariant": "compareSlider",
|
|
408
|
+
"description": "Professional inpainting and image editing with Qwen-Image InstantX ControlNet. Supports object replacement, text modification, background changes, and outpainting.",
|
|
409
|
+
"tags": ["Image to Image", "Image", "ControlNet", "Inpainting"],
|
|
410
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
411
|
+
"models": ["Qwen-Image"],
|
|
412
|
+
"date": "2025-09-12",
|
|
413
|
+
"size": 36013300777
|
|
414
|
+
},
|
|
415
|
+
{
|
|
416
|
+
"name": "image_qwen_image_union_control_lora",
|
|
417
|
+
"title": "Qwen-Image Union Control",
|
|
418
|
+
"mediaType": "image",
|
|
419
|
+
"mediaSubtype": "webp",
|
|
420
|
+
"description": "Generate images with precise structural control using Qwen-Image's unified ControlNet LoRA. Supports multiple control types including canny, depth, lineart, softedge, normal, and openpose for diverse creative applications.",
|
|
421
|
+
"tags": ["Text to Image", "Image", "ControlNet"],
|
|
422
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
423
|
+
"models": ["Qwen-Image"],
|
|
424
|
+
"date": "2025-08-23",
|
|
425
|
+
"size": 32716913377
|
|
426
|
+
},
|
|
427
|
+
{
|
|
428
|
+
"name": "image_qwen_image_controlnet_patch",
|
|
429
|
+
"title": "Qwen-Image ControlNet model patch",
|
|
430
|
+
"mediaType": "image",
|
|
431
|
+
"mediaSubtype": "webp",
|
|
432
|
+
"thumbnailVariant": "compareSlider",
|
|
433
|
+
"description": "Control image generation using Qwen-Image ControlNet models. Supports canny, depth, and inpainting controls through model patching.",
|
|
434
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
435
|
+
"tags": ["Text to Image", "Image", "ControlNet"],
|
|
436
|
+
"models": ["Qwen-Image"],
|
|
437
|
+
"date": "2025-08-24",
|
|
438
|
+
"size": 34037615821
|
|
439
|
+
},
|
|
440
|
+
{
|
|
441
|
+
"name": "image_qwen_image_edit_2509",
|
|
442
|
+
"title": "Qwen Image Edit 2509",
|
|
443
|
+
"mediaType": "image",
|
|
444
|
+
"mediaSubtype": "webp",
|
|
445
|
+
"thumbnailVariant": "compareSlider",
|
|
446
|
+
"description": "Advanced image editing with multi-image support, improved consistency, and ControlNet integration.",
|
|
447
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
448
|
+
"tags": ["Image to Image", "Image Edit", "ControlNet"],
|
|
449
|
+
"models": ["Qwen-Image"],
|
|
450
|
+
"date": "2025-09-25",
|
|
451
|
+
"size": 31772020572
|
|
452
|
+
},
|
|
453
|
+
{
|
|
454
|
+
"name": "image_qwen_image_edit",
|
|
455
|
+
"title": "Qwen Image Edit",
|
|
456
|
+
"mediaType": "image",
|
|
457
|
+
"mediaSubtype": "webp",
|
|
458
|
+
"thumbnailVariant": "compareSlider",
|
|
459
|
+
"description": "Edit images with precise bilingual text editing and dual semantic/appearance editing capabilities using Qwen-Image-Edit's 20B MMDiT model.",
|
|
460
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
461
|
+
"tags": ["Image to Image", "Image Edit"],
|
|
462
|
+
"models": ["Qwen-Image"],
|
|
463
|
+
"date": "2025-08-18",
|
|
464
|
+
"size": 31772020572
|
|
465
|
+
},
|
|
466
|
+
{
|
|
467
|
+
"name": "image_chrono_edit_14B",
|
|
468
|
+
"title": "ChronoEdit 14B",
|
|
469
|
+
"mediaType": "image",
|
|
470
|
+
"mediaSubtype": "webp",
|
|
471
|
+
"thumbnailVariant": "compareSlider",
|
|
472
|
+
"description": "Image editing powered by video models' dynamic understanding, creating physically plausible results while preserving character and style consistency.",
|
|
473
|
+
"tags": ["Image Edit", "Image to Image"],
|
|
474
|
+
"models": ["Wan2.1", "ChronoEdit", "Nvidia"],
|
|
475
|
+
"date": "2025-11-03",
|
|
476
|
+
"size": 40459304
|
|
477
|
+
},
|
|
478
|
+
{
|
|
479
|
+
"name": "flux_kontext_dev_basic",
|
|
480
|
+
"title": "Flux Kontext Dev Image Edit",
|
|
481
|
+
"mediaType": "image",
|
|
482
|
+
"mediaSubtype": "webp",
|
|
483
|
+
"thumbnailVariant": "hoverDissolve",
|
|
484
|
+
"description": "Smart image editing that keeps characters consistent, edits specific parts without affecting others, and preserves original styles.",
|
|
485
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
|
|
486
|
+
"tags": ["Image Edit", "Image to Image"],
|
|
487
|
+
"models": ["Flux", "BFL"],
|
|
488
|
+
"date": "2025-06-26",
|
|
489
|
+
"size": 17641578168,
|
|
490
|
+
"vram": 19327352832
|
|
491
|
+
},
|
|
492
|
+
{
|
|
493
|
+
"name": "image_chroma1_radiance_text_to_image",
|
|
494
|
+
"title": "Chroma1 Radiance text to image",
|
|
495
|
+
"mediaType": "image",
|
|
496
|
+
"mediaSubtype": "webp",
|
|
497
|
+
"description": "Chroma1-Radiance works directly with image pixels instead of compressed latents, delivering higher quality images with reduced artifacts and distortion.",
|
|
498
|
+
"tags": ["Text to Image", "Image"],
|
|
499
|
+
"models": ["Chroma"],
|
|
500
|
+
"date": "2025-09-18",
|
|
501
|
+
"size": 23622320128,
|
|
502
|
+
"vram": 23622320128
|
|
503
|
+
},
|
|
504
|
+
{
|
|
505
|
+
"name": "image_netayume_lumina_t2i",
|
|
506
|
+
"title": "NetaYume Lumina Text to Image",
|
|
507
|
+
"mediaType": "image",
|
|
508
|
+
"mediaSubtype": "webp",
|
|
509
|
+
"description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
|
|
510
|
+
"tags": ["Text to Image", "Image", "Anime"],
|
|
511
|
+
"models": ["OmniGen"],
|
|
512
|
+
"date": "2025-10-10",
|
|
513
|
+
"size": 10619306639
|
|
514
|
+
},
|
|
515
|
+
{
|
|
516
|
+
"name": "image_chroma_text_to_image",
|
|
517
|
+
"title": "Chroma text to image",
|
|
518
|
+
"mediaType": "image",
|
|
519
|
+
"mediaSubtype": "webp",
|
|
520
|
+
"description": "Chroma - enhanced Flux model with improved image quality and better prompt understanding for stunning text-to-image generation.",
|
|
521
|
+
"tags": ["Text to Image", "Image"],
|
|
522
|
+
"models": ["Chroma", "Flux"],
|
|
523
|
+
"date": "2025-06-04",
|
|
524
|
+
"size": 23289460163,
|
|
525
|
+
"vram": 15569256448
|
|
526
|
+
},
|
|
527
|
+
{
|
|
528
|
+
"name": "image_flux.1_fill_dev_OneReward",
|
|
529
|
+
"title": "Flux.1 Dev OneReward",
|
|
530
|
+
"mediaType": "image",
|
|
531
|
+
"mediaSubtype": "webp",
|
|
532
|
+
"thumbnailVariant": "compareSlider",
|
|
533
|
+
"description": "Supports various tasks such as image inpainting, outpainting, and object removal by bytedance-research team",
|
|
534
|
+
"tags": ["Inpainting", "Outpainting"],
|
|
535
|
+
"models": ["Flux", "BFL"],
|
|
536
|
+
"date": "2025-09-21",
|
|
537
|
+
"size": 29001766666,
|
|
538
|
+
"vram": 21474836480
|
|
539
|
+
},
|
|
540
|
+
{
|
|
541
|
+
"name": "flux_dev_checkpoint_example",
|
|
542
|
+
"title": "Flux Dev fp8",
|
|
543
|
+
"mediaType": "image",
|
|
544
|
+
"mediaSubtype": "webp",
|
|
545
|
+
"description": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.",
|
|
546
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
547
|
+
"tags": ["Text to Image", "Image"],
|
|
548
|
+
"models": ["Flux", "BFL"],
|
|
549
|
+
"date": "2025-03-01",
|
|
550
|
+
"size": 17244293693,
|
|
551
|
+
"vram": 18253611008
|
|
552
|
+
},
|
|
553
|
+
{
|
|
554
|
+
"name": "flux1_dev_uso_reference_image_gen",
|
|
555
|
+
"title": "Flux.1 Dev USO Reference Image Generation",
|
|
556
|
+
"description": "Use reference images to control both style and subject - keep your character's face while changing artistic style, or apply artistic styles to new scenes",
|
|
557
|
+
"thumbnailVariant": "hoverDissolve",
|
|
558
|
+
"mediaType": "image",
|
|
559
|
+
"mediaSubtype": "webp",
|
|
560
|
+
"tags": ["Image to Image", "Image"],
|
|
561
|
+
"models": ["Flux", "BFL"],
|
|
562
|
+
"date": "2025-09-02",
|
|
563
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
|
|
564
|
+
"size": 18597208392,
|
|
565
|
+
"vram": 19864223744
|
|
566
|
+
},
|
|
567
|
+
{
|
|
568
|
+
"name": "flux_schnell",
|
|
569
|
+
"title": "Flux Schnell fp8",
|
|
570
|
+
"mediaType": "image",
|
|
571
|
+
"mediaSubtype": "webp",
|
|
572
|
+
"description": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.",
|
|
573
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
574
|
+
"tags": ["Text to Image", "Image"],
|
|
575
|
+
"models": ["Flux", "BFL"],
|
|
576
|
+
"date": "2025-03-01",
|
|
577
|
+
"size": 17233556275,
|
|
578
|
+
"vram": 18253611008
|
|
579
|
+
},
|
|
580
|
+
{
|
|
581
|
+
"name": "flux1_krea_dev",
|
|
582
|
+
"title": "Flux.1 Krea Dev",
|
|
583
|
+
"mediaType": "image",
|
|
584
|
+
"mediaSubtype": "webp",
|
|
585
|
+
"description": "A fine-tuned FLUX model pushing photorealism to the max",
|
|
586
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
|
|
587
|
+
"tags": ["Text to Image", "Image"],
|
|
588
|
+
"models": ["Flux", "BFL"],
|
|
589
|
+
"date": "2025-07-31",
|
|
590
|
+
"size": 22269405430,
|
|
591
|
+
"vram": 23085449216
|
|
592
|
+
},
|
|
593
|
+
{
|
|
594
|
+
"name": "flux_dev_full_text_to_image",
|
|
595
|
+
"title": "Flux Dev full text to image",
|
|
596
|
+
"mediaType": "image",
|
|
597
|
+
"mediaSubtype": "webp",
|
|
598
|
+
"description": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.",
|
|
599
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
600
|
+
"tags": ["Text to Image", "Image"],
|
|
601
|
+
"models": ["Flux", "BFL"],
|
|
602
|
+
"date": "2025-03-01",
|
|
603
|
+
"size": 34177202258,
|
|
604
|
+
"vram": 23622320128
|
|
605
|
+
},
|
|
606
|
+
{
|
|
607
|
+
"name": "flux_schnell_full_text_to_image",
|
|
608
|
+
"title": "Flux Schnell full text to image",
|
|
609
|
+
"mediaType": "image",
|
|
610
|
+
"mediaSubtype": "webp",
|
|
611
|
+
"description": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.",
|
|
612
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
613
|
+
"tags": ["Text to Image", "Image"],
|
|
614
|
+
"models": ["Flux", "BFL"],
|
|
615
|
+
"date": "2025-03-01",
|
|
616
|
+
"size": 34155727421
|
|
617
|
+
},
|
|
618
|
+
{
|
|
619
|
+
"name": "flux_fill_inpaint_example",
|
|
620
|
+
"title": "Flux Inpaint",
|
|
621
|
+
"mediaType": "image",
|
|
622
|
+
"mediaSubtype": "webp",
|
|
623
|
+
"description": "Fill missing parts of images using Flux inpainting.",
|
|
624
|
+
"thumbnailVariant": "compareSlider",
|
|
625
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
626
|
+
"tags": ["Image to Image", "Inpainting", "Image"],
|
|
627
|
+
"models": ["Flux", "BFL"],
|
|
628
|
+
"date": "2025-03-01",
|
|
629
|
+
"size": 10372346020
|
|
630
|
+
},
|
|
631
|
+
{
|
|
632
|
+
"name": "flux_fill_outpaint_example",
|
|
633
|
+
"title": "Flux Outpaint",
|
|
634
|
+
"mediaType": "image",
|
|
635
|
+
"mediaSubtype": "webp",
|
|
636
|
+
"description": "Extend images beyond boundaries using Flux outpainting.",
|
|
637
|
+
"thumbnailVariant": "compareSlider",
|
|
638
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
639
|
+
"tags": ["Outpainting", "Image", "Image to Image"],
|
|
640
|
+
"models": ["Flux", "BFL"],
|
|
641
|
+
"date": "2025-03-01",
|
|
642
|
+
"size": 10372346020
|
|
643
|
+
},
|
|
644
|
+
{
|
|
645
|
+
"name": "flux_canny_model_example",
|
|
646
|
+
"title": "Flux Canny Model",
|
|
647
|
+
"mediaType": "image",
|
|
648
|
+
"mediaSubtype": "webp",
|
|
649
|
+
"description": "Generate images guided by edge detection using Flux Canny.",
|
|
650
|
+
"thumbnailVariant": "hoverDissolve",
|
|
651
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
652
|
+
"tags": ["Image to Image", "ControlNet", "Image"],
|
|
653
|
+
"models": ["Flux", "BFL"],
|
|
654
|
+
"date": "2025-03-01",
|
|
655
|
+
"size": 34177202258
|
|
656
|
+
},
|
|
657
|
+
{
|
|
658
|
+
"name": "flux_depth_lora_example",
|
|
659
|
+
"title": "Flux Depth Lora",
|
|
660
|
+
"mediaType": "image",
|
|
661
|
+
"mediaSubtype": "webp",
|
|
662
|
+
"description": "Generate images guided by depth information using Flux LoRA.",
|
|
663
|
+
"thumbnailVariant": "hoverDissolve",
|
|
664
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
665
|
+
"tags": ["Image to Image", "ControlNet", "Image"],
|
|
666
|
+
"models": ["Flux", "BFL"],
|
|
667
|
+
"date": "2025-03-01",
|
|
668
|
+
"size": 35412005356
|
|
669
|
+
},
|
|
670
|
+
{
|
|
671
|
+
"name": "flux_redux_model_example",
|
|
672
|
+
"title": "Flux Redux Model",
|
|
673
|
+
"mediaType": "image",
|
|
674
|
+
"mediaSubtype": "webp",
|
|
675
|
+
"description": "Generate images by transferring style from reference images using Flux Redux.",
|
|
676
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
677
|
+
"tags": ["Image to Image", "ControlNet", "Image"],
|
|
678
|
+
"models": ["Flux", "BFL"],
|
|
679
|
+
"date": "2025-03-01",
|
|
680
|
+
"size": 35154307318
|
|
681
|
+
},
|
|
682
|
+
{
|
|
683
|
+
"name": "image_omnigen2_t2i",
|
|
684
|
+
"title": "OmniGen2 Text to Image",
|
|
685
|
+
"mediaType": "image",
|
|
686
|
+
"mediaSubtype": "webp",
|
|
687
|
+
"description": "Generate high-quality images from text prompts using OmniGen2's unified 7B multimodal model with dual-path architecture.",
|
|
688
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
689
|
+
"tags": ["Text to Image", "Image"],
|
|
690
|
+
"models": ["OmniGen"],
|
|
691
|
+
"date": "2025-06-30",
|
|
692
|
+
"size": 15784004813
|
|
693
|
+
},
|
|
694
|
+
{
|
|
695
|
+
"name": "image_omnigen2_image_edit",
|
|
696
|
+
"title": "OmniGen2 Image Edit",
|
|
697
|
+
"mediaType": "image",
|
|
698
|
+
"mediaSubtype": "webp",
|
|
699
|
+
"thumbnailVariant": "hoverDissolve",
|
|
700
|
+
"description": "Edit images with natural language instructions using OmniGen2's advanced image editing capabilities and text rendering support.",
|
|
701
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
702
|
+
"tags": ["Image Edit", "Image"],
|
|
703
|
+
"models": ["OmniGen"],
|
|
704
|
+
"date": "2025-06-30",
|
|
705
|
+
"size": 15784004813
|
|
706
|
+
},
|
|
707
|
+
{
|
|
708
|
+
"name": "hidream_i1_dev",
|
|
709
|
+
"title": "HiDream I1 Dev",
|
|
710
|
+
"mediaType": "image",
|
|
711
|
+
"mediaSubtype": "webp",
|
|
712
|
+
"description": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware.",
|
|
713
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
714
|
+
"tags": ["Text to Image", "Image"],
|
|
715
|
+
"models": ["HiDream"],
|
|
716
|
+
"date": "2025-04-17",
|
|
717
|
+
"size": 33318208799
|
|
718
|
+
},
|
|
719
|
+
{
|
|
720
|
+
"name": "hidream_i1_fast",
|
|
721
|
+
"title": "HiDream I1 Fast",
|
|
722
|
+
"mediaType": "image",
|
|
723
|
+
"mediaSubtype": "webp",
|
|
724
|
+
"description": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware.",
|
|
725
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
726
|
+
"tags": ["Text to Image", "Image"],
|
|
727
|
+
"models": ["HiDream"],
|
|
728
|
+
"date": "2025-04-17",
|
|
729
|
+
"size": 24234352968
|
|
730
|
+
},
|
|
731
|
+
{
|
|
732
|
+
"name": "hidream_i1_full",
|
|
733
|
+
"title": "HiDream I1 Full",
|
|
734
|
+
"mediaType": "image",
|
|
735
|
+
"mediaSubtype": "webp",
|
|
736
|
+
"description": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output.",
|
|
737
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
738
|
+
"tags": ["Text to Image", "Image"],
|
|
739
|
+
"models": ["HiDream"],
|
|
740
|
+
"date": "2025-04-17",
|
|
741
|
+
"size": 24234352968
|
|
742
|
+
},
|
|
743
|
+
{
|
|
744
|
+
"name": "hidream_e1_1",
|
|
745
|
+
"title": "HiDream E1.1 Image Edit",
|
|
746
|
+
"mediaType": "image",
|
|
747
|
+
"mediaSubtype": "webp",
|
|
748
|
+
"thumbnailVariant": "compareSlider",
|
|
749
|
+
"description": "Edit images with HiDream E1.1 – it’s better in image quality and editing accuracy than HiDream-E1-Full.",
|
|
750
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
751
|
+
"tags": ["Image Edit", "Image"],
|
|
752
|
+
"models": ["HiDream"],
|
|
753
|
+
"date": "2025-07-21",
|
|
754
|
+
"size": 50422916055
|
|
755
|
+
},
|
|
756
|
+
{
|
|
757
|
+
"name": "hidream_e1_full",
|
|
758
|
+
"title": "HiDream E1 Image Edit",
|
|
759
|
+
"mediaType": "image",
|
|
760
|
+
"mediaSubtype": "webp",
|
|
761
|
+
"thumbnailVariant": "compareSlider",
|
|
762
|
+
"description": "Edit images with HiDream E1 - Professional natural language image editing model.",
|
|
763
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
764
|
+
"tags": ["Image Edit", "Image"],
|
|
765
|
+
"models": ["HiDream"],
|
|
766
|
+
"date": "2025-05-01",
|
|
767
|
+
"size": 34209414513
|
|
768
|
+
},
|
|
769
|
+
{
|
|
770
|
+
"name": "sd3.5_simple_example",
|
|
771
|
+
"title": "SD3.5 Simple",
|
|
772
|
+
"mediaType": "image",
|
|
773
|
+
"mediaSubtype": "webp",
|
|
774
|
+
"description": "Generate images using SD 3.5.",
|
|
775
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
|
|
776
|
+
"tags": ["Text to Image", "Image"],
|
|
777
|
+
"models": ["SD3.5", "Stability"],
|
|
778
|
+
"date": "2025-03-01",
|
|
779
|
+
"size": 14935748772
|
|
780
|
+
},
|
|
781
|
+
{
|
|
782
|
+
"name": "sd3.5_large_canny_controlnet_example",
|
|
783
|
+
"title": "SD3.5 Large Canny ControlNet",
|
|
784
|
+
"mediaType": "image",
|
|
785
|
+
"mediaSubtype": "webp",
|
|
786
|
+
"description": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
|
|
787
|
+
"thumbnailVariant": "hoverDissolve",
|
|
788
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
789
|
+
"tags": ["Image to Image", "Image", "ControlNet"],
|
|
790
|
+
"models": ["SD3.5", "Stability"],
|
|
791
|
+
"date": "2025-03-01",
|
|
792
|
+
"size": 23590107873
|
|
793
|
+
},
|
|
794
|
+
{
|
|
795
|
+
"name": "sd3.5_large_depth",
|
|
796
|
+
"title": "SD3.5 Large Depth",
|
|
797
|
+
"mediaType": "image",
|
|
798
|
+
"mediaSubtype": "webp",
|
|
799
|
+
"description": "Generate images guided by depth information using SD 3.5.",
|
|
800
|
+
"thumbnailVariant": "hoverDissolve",
|
|
801
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
802
|
+
"tags": ["Image to Image", "Image", "ControlNet"],
|
|
803
|
+
"models": ["SD3.5", "Stability"],
|
|
804
|
+
"date": "2025-03-01",
|
|
805
|
+
"size": 23590107873
|
|
806
|
+
},
|
|
807
|
+
{
|
|
808
|
+
"name": "sd3.5_large_blur",
|
|
809
|
+
"title": "SD3.5 Large Blur",
|
|
810
|
+
"mediaType": "image",
|
|
811
|
+
"mediaSubtype": "webp",
|
|
812
|
+
"description": "Generate images guided by blurred reference images using SD 3.5.",
|
|
813
|
+
"thumbnailVariant": "hoverDissolve",
|
|
814
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
815
|
+
"tags": ["Image to Image", "Image"],
|
|
816
|
+
"models": ["SD3.5", "Stability"],
|
|
817
|
+
"date": "2025-03-01",
|
|
818
|
+
"size": 23590107873
|
|
819
|
+
},
|
|
820
|
+
{
|
|
821
|
+
"name": "sdxl_simple_example",
|
|
822
|
+
"title": "SDXL Simple",
|
|
823
|
+
"mediaType": "image",
|
|
824
|
+
"mediaSubtype": "webp",
|
|
825
|
+
"description": "Generate high-quality images using SDXL.",
|
|
826
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
827
|
+
"tags": ["Text to Image", "Image"],
|
|
828
|
+
"models": ["SDXL", "Stability"],
|
|
829
|
+
"date": "2025-03-01",
|
|
830
|
+
"size": 13013750907
|
|
831
|
+
},
|
|
832
|
+
{
|
|
833
|
+
"name": "sdxl_refiner_prompt_example",
|
|
834
|
+
"title": "SDXL Refiner Prompt",
|
|
835
|
+
"mediaType": "image",
|
|
836
|
+
"mediaSubtype": "webp",
|
|
837
|
+
"description": "Enhance SDXL images using refiner models.",
|
|
838
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
839
|
+
"tags": ["Text to Image", "Image"],
|
|
840
|
+
"models": ["SDXL", "Stability"],
|
|
841
|
+
"date": "2025-03-01",
|
|
842
|
+
"size": 13013750907
|
|
843
|
+
},
|
|
844
|
+
{
|
|
845
|
+
"name": "sdxl_revision_text_prompts",
|
|
846
|
+
"title": "SDXL Revision Text Prompts",
|
|
847
|
+
"mediaType": "image",
|
|
848
|
+
"mediaSubtype": "webp",
|
|
849
|
+
"description": "Generate images by transferring concepts from reference images using SDXL Revision.",
|
|
850
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
851
|
+
"tags": ["Text to Image", "Image"],
|
|
852
|
+
"models": ["SDXL", "Stability"],
|
|
853
|
+
"date": "2025-03-01",
|
|
854
|
+
"size": 10630044058
|
|
855
|
+
},
|
|
856
|
+
{
|
|
857
|
+
"name": "sdxlturbo_example",
|
|
858
|
+
"title": "SDXL Turbo",
|
|
859
|
+
"mediaType": "image",
|
|
860
|
+
"mediaSubtype": "webp",
|
|
861
|
+
"description": "Generate images in a single step using SDXL Turbo.",
|
|
862
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
|
|
863
|
+
"tags": ["Text to Image", "Image"],
|
|
864
|
+
"models": ["SDXL", "Stability"],
|
|
865
|
+
"date": "2025-03-01",
|
|
866
|
+
"size": 6936372183
|
|
867
|
+
},
|
|
868
|
+
{
|
|
869
|
+
"name": "image_lotus_depth_v1_1",
|
|
870
|
+
"title": "Lotus Depth",
|
|
871
|
+
"mediaType": "image",
|
|
872
|
+
"mediaSubtype": "webp",
|
|
873
|
+
"thumbnailVariant": "compareSlider",
|
|
874
|
+
"description": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention.",
|
|
875
|
+
"tags": ["Image", "Text to Image"],
|
|
876
|
+
"models": ["SD1.5", "Stability"],
|
|
877
|
+
"date": "2025-05-21",
|
|
878
|
+
"size": 2072321720
|
|
879
|
+
}
|
|
880
|
+
]
|
|
881
|
+
},
|
|
882
|
+
{
|
|
883
|
+
"moduleName": "default",
|
|
884
|
+
"category": "GENERATION TYPE",
|
|
885
|
+
"title": "Video",
|
|
886
|
+
"icon": "icon-[lucide--film]",
|
|
887
|
+
"type": "video",
|
|
888
|
+
"templates": [
|
|
889
|
+
{
|
|
890
|
+
"name": "video_wan2_2_14B_t2v",
|
|
891
|
+
"title": "Wan 2.2 14B Text to Video",
|
|
892
|
+
"description": "Generate high-quality videos from text prompts with cinematic aesthetic control and dynamic motion generation using Wan 2.2.",
|
|
893
|
+
"mediaType": "image",
|
|
894
|
+
"mediaSubtype": "webp",
|
|
895
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
896
|
+
"tags": ["Text to Video", "Video"],
|
|
897
|
+
"models": ["Wan2.2", "Wan"],
|
|
898
|
+
"date": "2025-07-29",
|
|
899
|
+
"size": 38031935406
|
|
900
|
+
},
|
|
901
|
+
{
|
|
902
|
+
"name": "video_wan2_2_14B_i2v",
|
|
903
|
+
"title": "Wan 2.2 14B Image to Video",
|
|
904
|
+
"description": "Transform static images into dynamic videos with precise motion control and style preservation using Wan 2.2.",
|
|
905
|
+
"mediaType": "image",
|
|
906
|
+
"mediaSubtype": "webp",
|
|
907
|
+
"thumbnailVariant": "hoverDissolve",
|
|
908
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
909
|
+
"tags": ["Image to Video", "Video"],
|
|
910
|
+
"models": ["Wan2.2", "Wan"],
|
|
911
|
+
"date": "2025-07-29",
|
|
912
|
+
"size": 38031935406
|
|
913
|
+
},
|
|
914
|
+
{
|
|
915
|
+
"name": "video_wan2_2_14B_flf2v",
|
|
916
|
+
"title": "Wan 2.2 14B First-Last Frame to Video",
|
|
917
|
+
"description": "Generate smooth video transitions by defining start and end frames.",
|
|
918
|
+
"mediaType": "image",
|
|
919
|
+
"mediaSubtype": "webp",
|
|
920
|
+
"thumbnailVariant": "hoverDissolve",
|
|
921
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
922
|
+
"tags": ["FLF2V", "Video"],
|
|
923
|
+
"models": ["Wan2.2", "Wan"],
|
|
924
|
+
"date": "2025-08-02",
|
|
925
|
+
"size": 38031935406
|
|
926
|
+
},
|
|
927
|
+
{
|
|
928
|
+
"name": "video_wan2_2_14B_animate",
|
|
929
|
+
"title": "Wan2.2 Animate, character animation and replacement",
|
|
930
|
+
"description": "Unified character animation and replacement framework with precise motion and expression replication.",
|
|
931
|
+
"mediaType": "image",
|
|
932
|
+
"mediaSubtype": "webp",
|
|
933
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-animate",
|
|
934
|
+
"tags": ["Video", "Image to Video"],
|
|
935
|
+
"models": ["Wan2.2", "Wan"],
|
|
936
|
+
"date": "2025-09-22",
|
|
937
|
+
"size": 27417997476
|
|
938
|
+
},
|
|
939
|
+
{
|
|
940
|
+
"name": "video_hunyuan_video_1.5_720p_t2v",
|
|
941
|
+
"title": "Hunyuan Video 1.5 Text to Video",
|
|
942
|
+
"description": "Generate high-quality 720p videos from text prompts with cinematic camera control, emotional expressions, and physics simulation. Supports multiple styles including realistic, anime, and 3D with text rendering.",
|
|
943
|
+
"mediaType": "image",
|
|
944
|
+
"mediaSubtype": "webp",
|
|
945
|
+
"tags": ["Text to Video", "Video"],
|
|
946
|
+
"models": ["Hunyuan Video"],
|
|
947
|
+
"date": "2025-11-21",
|
|
948
|
+
"size": 45384919416
|
|
949
|
+
},
|
|
950
|
+
{
|
|
951
|
+
"name": "video_hunyuan_video_1.5_720p_i2v",
|
|
952
|
+
"title": "Hunyuan Video 1.5 Image to Video",
|
|
953
|
+
"description": "Animate still images into dynamic videos with precise motion and camera control. Maintains visual consistency while bringing photos and illustrations to life with smooth, natural movements.",
|
|
954
|
+
"mediaType": "image",
|
|
955
|
+
"mediaSubtype": "webp",
|
|
956
|
+
"tags": ["Image to Video", "Video"],
|
|
957
|
+
"models": ["Hunyuan Video"],
|
|
958
|
+
"date": "2025-11-21",
|
|
959
|
+
"size": 45384919416
|
|
960
|
+
},
|
|
961
|
+
{
|
|
962
|
+
"name": "video_wan2_2_14B_s2v",
|
|
963
|
+
"title": "Wan2.2-S2V Audio-Driven Video Generation",
|
|
964
|
+
"description": "Transform static images and audio into dynamic videos with perfect synchronization and minute-level generation.",
|
|
965
|
+
"mediaType": "image",
|
|
966
|
+
"mediaSubtype": "webp",
|
|
967
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-s2v",
|
|
968
|
+
"tags": ["Video"],
|
|
969
|
+
"models": ["Wan2.2", "Wan"],
|
|
970
|
+
"date": "2025-08-02",
|
|
971
|
+
"size": 25254407700
|
|
972
|
+
},
|
|
973
|
+
{
|
|
974
|
+
"name": "video_humo",
|
|
975
|
+
"title": "HuMo Video Generation",
|
|
976
|
+
"description": "Generate videos basic on audio, image, and text, keep the character's lip sync.",
|
|
977
|
+
"mediaType": "image",
|
|
978
|
+
"mediaSubtype": "webp",
|
|
979
|
+
"tags": ["Video"],
|
|
980
|
+
"models": ["HuMo"],
|
|
981
|
+
"date": "2025-09-21",
|
|
982
|
+
"size": 27895812588
|
|
983
|
+
},
|
|
984
|
+
{
|
|
985
|
+
"name": "video_wan2_2_14B_fun_inpaint",
|
|
986
|
+
"title": "Wan 2.2 14B Fun Inp",
|
|
987
|
+
"description": "Generate videos from start and end frames using Wan 2.2 Fun Inp.",
|
|
988
|
+
"mediaType": "image",
|
|
989
|
+
"mediaSubtype": "webp",
|
|
990
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-inp",
|
|
991
|
+
"tags": ["FLF2V", "Video"],
|
|
992
|
+
"models": ["Wan2.2", "Wan"],
|
|
993
|
+
"date": "2025-08-12",
|
|
994
|
+
"size": 38031935406
|
|
995
|
+
},
|
|
996
|
+
{
|
|
997
|
+
"name": "video_wan2_2_14B_fun_control",
|
|
998
|
+
"title": "Wan 2.2 14B Fun Control",
|
|
999
|
+
"description": "Generate videos guided by pose, depth, and edge controls using Wan 2.2 Fun Control.",
|
|
1000
|
+
"mediaType": "image",
|
|
1001
|
+
"mediaSubtype": "webp",
|
|
1002
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-control",
|
|
1003
|
+
"tags": ["Video to Video", "Video"],
|
|
1004
|
+
"models": ["Wan2.2", "Wan"],
|
|
1005
|
+
"date": "2025-08-12",
|
|
1006
|
+
"size": 38031935406
|
|
1007
|
+
},
|
|
1008
|
+
{
|
|
1009
|
+
"name": "video_wan2_2_14B_fun_camera",
|
|
1010
|
+
"title": "Wan 2.2 14B Fun Camera Control",
|
|
1011
|
+
"description": "Generate videos with camera motion controls including pan, zoom, and rotation using Wan 2.2 Fun Camera Control.",
|
|
1012
|
+
"mediaType": "image",
|
|
1013
|
+
"mediaSubtype": "webp",
|
|
1014
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-camera",
|
|
1015
|
+
"tags": ["Video to Video", "Video"],
|
|
1016
|
+
"models": ["Wan2.2", "Wan"],
|
|
1017
|
+
"date": "2025-08-17",
|
|
1018
|
+
"size": 40050570035
|
|
1019
|
+
},
|
|
1020
|
+
{
|
|
1021
|
+
"name": "video_wan2_2_5B_ti2v",
|
|
1022
|
+
"title": "Wan 2.2 5B Video Generation",
|
|
1023
|
+
"description": "Fast text-to-video and image-to-video generation with 5B parameters. Optimized for rapid prototyping and creative exploration.",
|
|
1024
|
+
"mediaType": "image",
|
|
1025
|
+
"mediaSubtype": "webp",
|
|
1026
|
+
"tags": ["Text to Video", "Video"],
|
|
1027
|
+
"models": ["Wan2.2", "Wan"],
|
|
1028
|
+
"date": "2025-07-29",
|
|
1029
|
+
"size": 18146236826
|
|
1030
|
+
},
|
|
1031
|
+
{
|
|
1032
|
+
"name": "video_wan2_2_5B_fun_inpaint",
|
|
1033
|
+
"title": "Wan 2.2 5B Fun Inpaint",
|
|
1034
|
+
"description": "Efficient video inpainting from start and end frames. 5B model delivers quick iterations for testing workflows.",
|
|
1035
|
+
"mediaType": "image",
|
|
1036
|
+
"mediaSubtype": "webp",
|
|
1037
|
+
"tags": ["Text to Video", "Video"],
|
|
1038
|
+
"models": ["Wan2.2", "Wan"],
|
|
1039
|
+
"date": "2025-07-29",
|
|
1040
|
+
"size": 18146236826
|
|
1041
|
+
},
|
|
1042
|
+
{
|
|
1043
|
+
"name": "video_wan2_2_5B_fun_control",
|
|
1044
|
+
"title": "Wan 2.2 5B Fun Control",
|
|
1045
|
+
"description": "Multi-condition video control with pose, depth, and edge guidance. Compact 5B size for experimental development.",
|
|
1046
|
+
"mediaType": "image",
|
|
1047
|
+
"mediaSubtype": "webp",
|
|
1048
|
+
"tags": ["Text to Video", "Video"],
|
|
1049
|
+
"models": ["Wan2.2", "Wan"],
|
|
1050
|
+
"date": "2025-07-29",
|
|
1051
|
+
"size": 18146236826
|
|
1052
|
+
},
|
|
1053
|
+
{
|
|
1054
|
+
"name": "video_wan_vace_14B_t2v",
|
|
1055
|
+
"title": "Wan2.1 VACE Text to Video",
|
|
1056
|
+
"description": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
|
|
1057
|
+
"mediaType": "image",
|
|
1058
|
+
"mediaSubtype": "webp",
|
|
1059
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1060
|
+
"tags": ["Text to Video", "Video"],
|
|
1061
|
+
"models": ["Wan2.1", "Wan"],
|
|
1062
|
+
"date": "2025-05-21",
|
|
1063
|
+
"size": 57756572713
|
|
1064
|
+
},
|
|
1065
|
+
{
|
|
1066
|
+
"name": "video_wan_vace_14B_ref2v",
|
|
1067
|
+
"title": "Wan2.1 VACE Reference to Video",
|
|
1068
|
+
"description": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
|
|
1069
|
+
"mediaType": "image",
|
|
1070
|
+
"mediaSubtype": "webp",
|
|
1071
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1072
|
+
"tags": ["Video", "Image to Video"],
|
|
1073
|
+
"models": ["Wan2.1", "Wan"],
|
|
1074
|
+
"date": "2025-05-21",
|
|
1075
|
+
"size": 57756572713
|
|
1076
|
+
},
|
|
1077
|
+
{
|
|
1078
|
+
"name": "video_wan_vace_14B_v2v",
|
|
1079
|
+
"title": "Wan2.1 VACE Control Video",
|
|
1080
|
+
"description": "Generate videos by controlling input videos and reference images using Wan VACE.",
|
|
1081
|
+
"mediaType": "image",
|
|
1082
|
+
"mediaSubtype": "webp",
|
|
1083
|
+
"thumbnailVariant": "compareSlider",
|
|
1084
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1085
|
+
"tags": ["Video to Video", "Video"],
|
|
1086
|
+
"models": ["Wan2.1", "Wan"],
|
|
1087
|
+
"date": "2025-05-21",
|
|
1088
|
+
"size": 57756572713
|
|
1089
|
+
},
|
|
1090
|
+
{
|
|
1091
|
+
"name": "video_wan_vace_outpainting",
|
|
1092
|
+
"title": "Wan2.1 VACE Outpainting",
|
|
1093
|
+
"description": "Generate extended videos by expanding video size using Wan VACE outpainting.",
|
|
1094
|
+
"mediaType": "image",
|
|
1095
|
+
"mediaSubtype": "webp",
|
|
1096
|
+
"thumbnailVariant": "compareSlider",
|
|
1097
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1098
|
+
"tags": ["Outpainting", "Video"],
|
|
1099
|
+
"models": ["Wan2.1", "Wan"],
|
|
1100
|
+
"date": "2025-05-21",
|
|
1101
|
+
"size": 57756572713
|
|
1102
|
+
},
|
|
1103
|
+
{
|
|
1104
|
+
"name": "video_wan_vace_flf2v",
|
|
1105
|
+
"title": "Wan2.1 VACE First-Last Frame",
|
|
1106
|
+
"description": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
|
|
1107
|
+
"mediaType": "image",
|
|
1108
|
+
"mediaSubtype": "webp",
|
|
1109
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1110
|
+
"tags": ["FLF2V", "Video"],
|
|
1111
|
+
"models": ["Wan2.1", "Wan"],
|
|
1112
|
+
"date": "2025-05-21",
|
|
1113
|
+
"size": 57756572713
|
|
1114
|
+
},
|
|
1115
|
+
{
|
|
1116
|
+
"name": "video_wan_vace_inpainting",
|
|
1117
|
+
"title": "Wan2.1 VACE Inpainting",
|
|
1118
|
+
"description": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
|
|
1119
|
+
"mediaType": "image",
|
|
1120
|
+
"mediaSubtype": "webp",
|
|
1121
|
+
"thumbnailVariant": "compareSlider",
|
|
1122
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1123
|
+
"tags": ["Inpainting", "Video"],
|
|
1124
|
+
"models": ["Wan2.1", "Wan"],
|
|
1125
|
+
"date": "2025-05-21",
|
|
1126
|
+
"size": 57756572713
|
|
1127
|
+
},
|
|
1128
|
+
{
|
|
1129
|
+
"name": "video_wan2.1_alpha_t2v_14B",
|
|
1130
|
+
"title": "Wan2.1 Alpha T2V",
|
|
1131
|
+
"description": "Generate text-to-video with alpha channel support for transparent backgrounds and semi-transparent objects.",
|
|
1132
|
+
"mediaType": "image",
|
|
1133
|
+
"mediaSubtype": "webp",
|
|
1134
|
+
"tags": ["Text to Video", "Video"],
|
|
1135
|
+
"models": ["Wan2.1", "Wan"],
|
|
1136
|
+
"date": "2025-10-06",
|
|
1137
|
+
"size": 22494891213
|
|
1138
|
+
},
|
|
1139
|
+
{
|
|
1140
|
+
"name": "video_wan_ati",
|
|
1141
|
+
"title": "Wan2.1 ATI",
|
|
1142
|
+
"description": "Trajectory-controlled video generation.",
|
|
1143
|
+
"mediaType": "image",
|
|
1144
|
+
"mediaSubtype": "webp",
|
|
1145
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1146
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
|
|
1147
|
+
"tags": ["Video"],
|
|
1148
|
+
"models": ["Wan2.1", "Wan"],
|
|
1149
|
+
"date": "2025-05-21",
|
|
1150
|
+
"size": 25393994138
|
|
1151
|
+
},
|
|
1152
|
+
{
|
|
1153
|
+
"name": "video_wan2.1_fun_camera_v1.1_1.3B",
|
|
1154
|
+
"title": "Wan 2.1 Fun Camera 1.3B",
|
|
1155
|
+
"description": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
|
|
1156
|
+
"mediaType": "image",
|
|
1157
|
+
"mediaSubtype": "webp",
|
|
1158
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
1159
|
+
"tags": ["Video"],
|
|
1160
|
+
"models": ["Wan2.1", "Wan"],
|
|
1161
|
+
"date": "2025-04-15",
|
|
1162
|
+
"size": 11489037517
|
|
1163
|
+
},
|
|
1164
|
+
{
|
|
1165
|
+
"name": "video_wan2.1_fun_camera_v1.1_14B",
|
|
1166
|
+
"title": "Wan 2.1 Fun Camera 14B",
|
|
1167
|
+
"description": "Generate high-quality videos with advanced camera control using the full 14B model",
|
|
1168
|
+
"mediaType": "image",
|
|
1169
|
+
"mediaSubtype": "webp",
|
|
1170
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
1171
|
+
"tags": ["Video"],
|
|
1172
|
+
"models": ["Wan2.1", "Wan"],
|
|
1173
|
+
"date": "2025-04-15",
|
|
1174
|
+
"size": 42047729828
|
|
1175
|
+
},
|
|
1176
|
+
{
|
|
1177
|
+
"name": "text_to_video_wan",
|
|
1178
|
+
"title": "Wan 2.1 Text to Video",
|
|
1179
|
+
"description": "Generate videos from text prompts using Wan 2.1.",
|
|
1180
|
+
"mediaType": "image",
|
|
1181
|
+
"mediaSubtype": "webp",
|
|
1182
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
1183
|
+
"tags": ["Text to Video", "Video"],
|
|
1184
|
+
"models": ["Wan2.1", "Wan"],
|
|
1185
|
+
"date": "2025-03-01",
|
|
1186
|
+
"size": 9824737690
|
|
1187
|
+
},
|
|
1188
|
+
{
|
|
1189
|
+
"name": "image_to_video_wan",
|
|
1190
|
+
"title": "Wan 2.1 Image to Video",
|
|
1191
|
+
"description": "Generate videos from images using Wan 2.1.",
|
|
1192
|
+
"mediaType": "image",
|
|
1193
|
+
"mediaSubtype": "webp",
|
|
1194
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
1195
|
+
"tags": ["Text to Video", "Video"],
|
|
1196
|
+
"models": ["Wan2.1", "Wan"],
|
|
1197
|
+
"date": "2025-03-01",
|
|
1198
|
+
"size": 41049149932
|
|
1199
|
+
},
|
|
1200
|
+
{
|
|
1201
|
+
"name": "wan2.1_fun_inp",
|
|
1202
|
+
"title": "Wan 2.1 Inpainting",
|
|
1203
|
+
"description": "Generate videos from start and end frames using Wan 2.1 inpainting.",
|
|
1204
|
+
"mediaType": "image",
|
|
1205
|
+
"mediaSubtype": "webp",
|
|
1206
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
|
|
1207
|
+
"tags": ["Inpainting", "Video"],
|
|
1208
|
+
"models": ["Wan2.1", "Wan"],
|
|
1209
|
+
"date": "2025-04-15",
|
|
1210
|
+
"size": 11381663334
|
|
1211
|
+
},
|
|
1212
|
+
{
|
|
1213
|
+
"name": "wan2.1_fun_control",
|
|
1214
|
+
"title": "Wan 2.1 ControlNet",
|
|
1215
|
+
"description": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
|
|
1216
|
+
"mediaType": "image",
|
|
1217
|
+
"mediaSubtype": "webp",
|
|
1218
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1219
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
1220
|
+
"tags": ["Video to Video", "Video"],
|
|
1221
|
+
"models": ["Wan2.1", "Wan"],
|
|
1222
|
+
"date": "2025-04-15",
|
|
1223
|
+
"size": 11381663334
|
|
1224
|
+
},
|
|
1225
|
+
{
|
|
1226
|
+
"name": "wan2.1_flf2v_720_f16",
|
|
1227
|
+
"title": "Wan 2.1 FLF2V 720p F16",
|
|
1228
|
+
"description": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
|
|
1229
|
+
"mediaType": "image",
|
|
1230
|
+
"mediaSubtype": "webp",
|
|
1231
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
|
|
1232
|
+
"tags": ["FLF2V", "Video"],
|
|
1233
|
+
"models": ["Wan2.1", "Wan"],
|
|
1234
|
+
"date": "2025-04-15",
|
|
1235
|
+
"size": 41049149932
|
|
1236
|
+
},
|
|
1237
|
+
{
|
|
1238
|
+
"name": "ltxv_text_to_video",
|
|
1239
|
+
"title": "LTXV Text to Video",
|
|
1240
|
+
"mediaType": "image",
|
|
1241
|
+
"mediaSubtype": "webp",
|
|
1242
|
+
"description": "Generate videos from text prompts.",
|
|
1243
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
1244
|
+
"tags": ["Text to Video", "Video"],
|
|
1245
|
+
"models": ["LTXV"],
|
|
1246
|
+
"date": "2025-03-01",
|
|
1247
|
+
"size": 19155554140
|
|
1248
|
+
},
|
|
1249
|
+
{
|
|
1250
|
+
"name": "ltxv_image_to_video",
|
|
1251
|
+
"title": "LTXV Image to Video",
|
|
1252
|
+
"mediaType": "image",
|
|
1253
|
+
"mediaSubtype": "webp",
|
|
1254
|
+
"description": "Generate videos from still images.",
|
|
1255
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
1256
|
+
"tags": ["Image to Video", "Video"],
|
|
1257
|
+
"models": ["LTXV"],
|
|
1258
|
+
"date": "2025-03-01",
|
|
1259
|
+
"size": 19155554140
|
|
1260
|
+
},
|
|
1261
|
+
{
|
|
1262
|
+
"name": "mochi_text_to_video_example",
|
|
1263
|
+
"title": "Mochi Text to Video",
|
|
1264
|
+
"mediaType": "image",
|
|
1265
|
+
"mediaSubtype": "webp",
|
|
1266
|
+
"description": "Generate videos from text prompts using Mochi model.",
|
|
1267
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/",
|
|
1268
|
+
"tags": ["Text to Video", "Video"],
|
|
1269
|
+
"models": ["Mochi"],
|
|
1270
|
+
"date": "2025-03-01",
|
|
1271
|
+
"size": 30762703258
|
|
1272
|
+
},
|
|
1273
|
+
{
|
|
1274
|
+
"name": "hunyuan_video_text_to_video",
|
|
1275
|
+
"title": "Hunyuan Video Text to Video",
|
|
1276
|
+
"mediaType": "image",
|
|
1277
|
+
"mediaSubtype": "webp",
|
|
1278
|
+
"description": "Generate videos from text prompts using Hunyuan model.",
|
|
1279
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
|
|
1280
|
+
"tags": ["Text to Video", "Video"],
|
|
1281
|
+
"models": ["Hunyuan Video", "Tencent"],
|
|
1282
|
+
"date": "2025-03-01",
|
|
1283
|
+
"size": 35476429865
|
|
1284
|
+
},
|
|
1285
|
+
{
|
|
1286
|
+
"name": "image_to_video",
|
|
1287
|
+
"title": "SVD Image to Video",
|
|
1288
|
+
"mediaType": "image",
|
|
1289
|
+
"mediaSubtype": "webp",
|
|
1290
|
+
"description": "Generate videos from still images.",
|
|
1291
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
1292
|
+
"tags": ["Image to Video", "Video"],
|
|
1293
|
+
"models": ["SVD", "Stability"],
|
|
1294
|
+
"date": "2025-03-01",
|
|
1295
|
+
"size": 9556302234
|
|
1296
|
+
},
|
|
1297
|
+
{
|
|
1298
|
+
"name": "txt_to_image_to_video",
|
|
1299
|
+
"title": "SVD Text to Image to Video",
|
|
1300
|
+
"mediaType": "image",
|
|
1301
|
+
"mediaSubtype": "webp",
|
|
1302
|
+
"description": "Generate videos by first creating images from text prompts.",
|
|
1303
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
1304
|
+
"tags": ["Text to Video", "Video"],
|
|
1305
|
+
"models": ["SVD", "Stability"],
|
|
1306
|
+
"date": "2025-03-01",
|
|
1307
|
+
"size": 16492674417
|
|
1308
|
+
}
|
|
1309
|
+
]
|
|
1310
|
+
},
|
|
1311
|
+
{
|
|
1312
|
+
"moduleName": "default",
|
|
1313
|
+
"category": "GENERATION TYPE",
|
|
1314
|
+
"icon": "icon-[lucide--volume-2]",
|
|
1315
|
+
"title": "Audio",
|
|
1316
|
+
"type": "audio",
|
|
1317
|
+
"templates": [
|
|
1318
|
+
{
|
|
1319
|
+
"name": "audio_stable_audio_example",
|
|
1320
|
+
"title": "Stable Audio",
|
|
1321
|
+
"mediaType": "audio",
|
|
1322
|
+
"mediaSubtype": "mp3",
|
|
1323
|
+
"description": "Generate audio from text prompts using Stable Audio.",
|
|
1324
|
+
"tags": ["Text to Audio", "Audio"],
|
|
1325
|
+
"models": ["Stable Audio", "Stability"],
|
|
1326
|
+
"date": "2025-03-01",
|
|
1327
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/",
|
|
1328
|
+
"size": 5744518758
|
|
1329
|
+
},
|
|
1330
|
+
{
|
|
1331
|
+
"name": "audio_ace_step_1_t2a_instrumentals",
|
|
1332
|
+
"title": "ACE-Step v1 Text to Instrumentals Music",
|
|
1333
|
+
"mediaType": "audio",
|
|
1334
|
+
"mediaSubtype": "mp3",
|
|
1335
|
+
"description": "Generate instrumental music from text prompts using ACE-Step v1.",
|
|
1336
|
+
"tags": ["Text to Audio", "Audio"],
|
|
1337
|
+
"models": ["ACE-Step"],
|
|
1338
|
+
"date": "2025-03-01",
|
|
1339
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1340
|
+
"size": 7698728878
|
|
1341
|
+
},
|
|
1342
|
+
{
|
|
1343
|
+
"name": "audio_ace_step_1_t2a_song",
|
|
1344
|
+
"title": "ACE Step v1 Text to Song",
|
|
1345
|
+
"mediaType": "audio",
|
|
1346
|
+
"mediaSubtype": "mp3",
|
|
1347
|
+
"description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
|
|
1348
|
+
"tags": ["Text to Audio", "Audio"],
|
|
1349
|
+
"models": ["ACE-Step"],
|
|
1350
|
+
"date": "2025-03-01",
|
|
1351
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1352
|
+
"size": 7698728878
|
|
1353
|
+
},
|
|
1354
|
+
{
|
|
1355
|
+
"name": "audio_ace_step_1_m2m_editing",
|
|
1356
|
+
"title": "ACE Step v1 M2M Editing",
|
|
1357
|
+
"mediaType": "audio",
|
|
1358
|
+
"mediaSubtype": "mp3",
|
|
1359
|
+
"description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
|
|
1360
|
+
"tags": ["Audio Editing", "Audio"],
|
|
1361
|
+
"models": ["ACE-Step"],
|
|
1362
|
+
"date": "2025-03-01",
|
|
1363
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1364
|
+
"size": 7698728878
|
|
1365
|
+
}
|
|
1366
|
+
]
|
|
1367
|
+
},
|
|
1368
|
+
{
|
|
1369
|
+
"moduleName": "default",
|
|
1370
|
+
"category": "GENERATION TYPE",
|
|
1371
|
+
"icon": "icon-[lucide--box]",
|
|
1372
|
+
"title": "3D Model",
|
|
1373
|
+
"type": "3d",
|
|
1374
|
+
"templates": [
|
|
1375
|
+
{
|
|
1376
|
+
"name": "3d_hunyuan3d-v2.1",
|
|
1377
|
+
"title": "Hunyuan3D 2.1",
|
|
1378
|
+
"mediaType": "image",
|
|
1379
|
+
"mediaSubtype": "webp",
|
|
1380
|
+
"description": "Generate 3D models from single images using Hunyuan3D 2.1.",
|
|
1381
|
+
"tags": ["Image to 3D", "3D"],
|
|
1382
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
1383
|
+
"date": "2025-03-01",
|
|
1384
|
+
"tutorialUrl": "",
|
|
1385
|
+
"size": 4928474972
|
|
1386
|
+
},
|
|
1387
|
+
{
|
|
1388
|
+
"name": "3d_hunyuan3d_image_to_model",
|
|
1389
|
+
"title": "Hunyuan3D 2.0",
|
|
1390
|
+
"mediaType": "image",
|
|
1391
|
+
"mediaSubtype": "webp",
|
|
1392
|
+
"description": "Generate 3D models from single images using Hunyuan3D 2.0.",
|
|
1393
|
+
"tags": ["Image to 3D", "3D"],
|
|
1394
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
1395
|
+
"date": "2025-03-01",
|
|
1396
|
+
"tutorialUrl": "",
|
|
1397
|
+
"size": 4928474972
|
|
1398
|
+
},
|
|
1399
|
+
{
|
|
1400
|
+
"name": "3d_hunyuan3d_multiview_to_model",
|
|
1401
|
+
"title": "Hunyuan3D 2.0 MV",
|
|
1402
|
+
"mediaType": "image",
|
|
1403
|
+
"mediaSubtype": "webp",
|
|
1404
|
+
"description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
|
|
1405
|
+
"tags": ["3D", "Image to 3D"],
|
|
1406
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
1407
|
+
"date": "2025-03-01",
|
|
1408
|
+
"tutorialUrl": "",
|
|
1409
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1410
|
+
"size": 4928474972
|
|
1411
|
+
},
|
|
1412
|
+
{
|
|
1413
|
+
"name": "3d_hunyuan3d_multiview_to_model_turbo",
|
|
1414
|
+
"title": "Hunyuan3D 2.0 MV Turbo",
|
|
1415
|
+
"mediaType": "image",
|
|
1416
|
+
"mediaSubtype": "webp",
|
|
1417
|
+
"description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
|
|
1418
|
+
"tags": ["Image to 3D", "3D"],
|
|
1419
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
1420
|
+
"date": "2025-03-01",
|
|
1421
|
+
"tutorialUrl": "",
|
|
1422
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1423
|
+
"size": 4928474972
|
|
1424
|
+
}
|
|
1425
|
+
]
|
|
1426
|
+
},
|
|
1427
|
+
{
|
|
1428
|
+
"moduleName": "default",
|
|
1429
|
+
"category": "CLOSED SOURCE MODELS",
|
|
1430
|
+
"title": "Image API",
|
|
1431
|
+
"icon": "icon-[lucide--hand-coins]",
|
|
1432
|
+
"type": "image",
|
|
1433
|
+
"templates": [
|
|
1434
|
+
{
|
|
1435
|
+
"name": "api_nano_banana_pro",
|
|
1436
|
+
"title": "Nano Banana Pro",
|
|
1437
|
+
"description": "Nano-banana Pro (Gemini 3.0 Pro Image) - Studio-quality 4K image generation and editing with enhanced text rendering and character consistency.",
|
|
1438
|
+
"mediaType": "image",
|
|
1439
|
+
"mediaSubtype": "webp",
|
|
1440
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1441
|
+
"tags": ["Image Edit", "Image", "API"],
|
|
1442
|
+
"models": ["Gemini-3-pro-image-preview", "nano-banana", "Google"],
|
|
1443
|
+
"date": "2025-11-21",
|
|
1444
|
+
"OpenSource": false,
|
|
1445
|
+
"size": 0,
|
|
1446
|
+
"vram": 0
|
|
1447
|
+
},
|
|
1448
|
+
{
|
|
1449
|
+
"name": "api_from_photo_2_miniature",
|
|
1450
|
+
"title": "Photo to Blueprint to Model",
|
|
1451
|
+
"description": "Transform real building photos into architectural blueprints and then into detailed physical scale models. A complete architectural visualization pipeline from photo to miniature.",
|
|
1452
|
+
"mediaType": "image",
|
|
1453
|
+
"mediaSubtype": "webp",
|
|
1454
|
+
"tags": ["Image Edit", "Image", "3D"],
|
|
1455
|
+
"models": ["Gemini-3-pro-image-preview", "nano-banana", "Google"],
|
|
1456
|
+
"date": "2025-11-21",
|
|
1457
|
+
"OpenSource": false,
|
|
1458
|
+
"size": 0,
|
|
1459
|
+
"vram": 0
|
|
1460
|
+
},
|
|
1461
|
+
{
|
|
1462
|
+
"name": "api_bytedance_seedream4",
|
|
1463
|
+
"title": "ByteDance Seedream 4.0",
|
|
1464
|
+
"description": "Multi-modal AI model for text-to-image and image editing. Generate 2K images in under 2 seconds with natural language control.",
|
|
1465
|
+
"mediaType": "image",
|
|
1466
|
+
"mediaSubtype": "webp",
|
|
1467
|
+
"tags": ["Image Edit", "Image", "API", "Text to Image"],
|
|
1468
|
+
"models": ["Seedream 4.0", "ByteDance"],
|
|
1469
|
+
"date": "2025-09-11",
|
|
1470
|
+
"OpenSource": false,
|
|
1471
|
+
"size": 0,
|
|
1472
|
+
"vram": 0
|
|
1473
|
+
},
|
|
1474
|
+
{
|
|
1475
|
+
"name": "api_google_gemini_image",
|
|
1476
|
+
"title": "Nano Banana",
|
|
1477
|
+
"description": "Nano-banana (Gemini-2.5-Flash Image) - image editing with consistency.",
|
|
1478
|
+
"mediaType": "image",
|
|
1479
|
+
"mediaSubtype": "webp",
|
|
1480
|
+
"tags": ["Image Edit", "Image", "API", "Text to Image"],
|
|
1481
|
+
"models": ["Gemini-2.5-Flash", "nano-banana", "Google"],
|
|
1482
|
+
"date": "2025-08-27",
|
|
1483
|
+
"OpenSource": false,
|
|
1484
|
+
"size": 0,
|
|
1485
|
+
"vram": 0
|
|
1486
|
+
},
|
|
1487
|
+
{
|
|
1488
|
+
"name": "api_flux2",
|
|
1489
|
+
"title": "Flux.2 Pro",
|
|
1490
|
+
"description": "Generate up to 4MP photorealistic images with multi-reference consistency and professional text rendering.",
|
|
1491
|
+
"mediaType": "image",
|
|
1492
|
+
"mediaSubtype": "webp",
|
|
1493
|
+
"tags": ["Image Edit", "Image", "API", "Text to Image"],
|
|
1494
|
+
"models": ["Flux.2", "BFL"],
|
|
1495
|
+
"date": "2025-11-26",
|
|
1496
|
+
"OpenSource": false,
|
|
1497
|
+
"size": 0,
|
|
1498
|
+
"vram": 0
|
|
1499
|
+
},
|
|
1500
|
+
{
|
|
1501
|
+
"name": "api_topaz_image_enhance",
|
|
1502
|
+
"title": "Topaz Image Enhance",
|
|
1503
|
+
"description": "Professional image enhancement using Topaz's Reimagine model with face enhancement and detail restoration.",
|
|
1504
|
+
"mediaType": "image",
|
|
1505
|
+
"mediaSubtype": "webp",
|
|
1506
|
+
"thumbnailVariant": "compareSlider",
|
|
1507
|
+
"tags": ["Image", "API", "Upscale"],
|
|
1508
|
+
"models": ["Topaz", "Reimagine"],
|
|
1509
|
+
"date": "2025-11-25",
|
|
1510
|
+
"OpenSource": false,
|
|
1511
|
+
"size": 0,
|
|
1512
|
+
"vram": 0
|
|
1513
|
+
},
|
|
1514
|
+
{
|
|
1515
|
+
"name": "api_bfl_flux_1_kontext_multiple_images_input",
|
|
1516
|
+
"title": "BFL Flux.1 Kontext Multiple Image Input",
|
|
1517
|
+
"description": "Input multiple images and edit them with Flux.1 Kontext.",
|
|
1518
|
+
"mediaType": "image",
|
|
1519
|
+
"mediaSubtype": "webp",
|
|
1520
|
+
"thumbnailVariant": "compareSlider",
|
|
1521
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
1522
|
+
"tags": ["Image Edit", "Image"],
|
|
1523
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
1524
|
+
"date": "2025-05-29",
|
|
1525
|
+
"OpenSource": false,
|
|
1526
|
+
"size": 0,
|
|
1527
|
+
"vram": 0
|
|
1528
|
+
},
|
|
1529
|
+
{
|
|
1530
|
+
"name": "api_bfl_flux_1_kontext_pro_image",
|
|
1531
|
+
"title": "BFL Flux.1 Kontext Pro",
|
|
1532
|
+
"description": "Edit images with Flux.1 Kontext pro image.",
|
|
1533
|
+
"mediaType": "image",
|
|
1534
|
+
"mediaSubtype": "webp",
|
|
1535
|
+
"thumbnailVariant": "compareSlider",
|
|
1536
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
1537
|
+
"tags": ["Image Edit", "Image"],
|
|
1538
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
1539
|
+
"date": "2025-05-29",
|
|
1540
|
+
"OpenSource": false,
|
|
1541
|
+
"size": 0,
|
|
1542
|
+
"vram": 0
|
|
1543
|
+
},
|
|
1544
|
+
{
|
|
1545
|
+
"name": "api_bfl_flux_1_kontext_max_image",
|
|
1546
|
+
"title": "BFL Flux.1 Kontext Max",
|
|
1547
|
+
"description": "Edit images with Flux.1 Kontext max image.",
|
|
1548
|
+
"mediaType": "image",
|
|
1549
|
+
"mediaSubtype": "webp",
|
|
1550
|
+
"thumbnailVariant": "compareSlider",
|
|
1551
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
1552
|
+
"tags": ["Image Edit", "Image"],
|
|
1553
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
1554
|
+
"date": "2025-05-29",
|
|
1555
|
+
"OpenSource": false,
|
|
1556
|
+
"size": 0,
|
|
1557
|
+
"vram": 0
|
|
1558
|
+
},
|
|
1559
|
+
{
|
|
1560
|
+
"name": "api_wan_text_to_image",
|
|
1561
|
+
"title": "Wan2.5: Text to Image",
|
|
1562
|
+
"description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
|
|
1563
|
+
"mediaType": "image",
|
|
1564
|
+
"mediaSubtype": "webp",
|
|
1565
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1566
|
+
"models": ["Wan2.5", "Wan"],
|
|
1567
|
+
"date": "2025-09-25",
|
|
1568
|
+
"OpenSource": false,
|
|
1569
|
+
"size": 0,
|
|
1570
|
+
"vram": 0
|
|
1571
|
+
},
|
|
1572
|
+
{
|
|
1573
|
+
"name": "api_bfl_flux_pro_t2i",
|
|
1574
|
+
"title": "BFL Flux[Pro]: Text to Image",
|
|
1575
|
+
"description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
|
|
1576
|
+
"mediaType": "image",
|
|
1577
|
+
"mediaSubtype": "webp",
|
|
1578
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
|
|
1579
|
+
"tags": ["Image Edit", "Image"],
|
|
1580
|
+
"models": ["Flux", "BFL"],
|
|
1581
|
+
"date": "2025-05-01",
|
|
1582
|
+
"OpenSource": false,
|
|
1583
|
+
"size": 0,
|
|
1584
|
+
"vram": 0
|
|
1585
|
+
},
|
|
1586
|
+
{
|
|
1587
|
+
"name": "api_luma_photon_i2i",
|
|
1588
|
+
"title": "Luma Photon: Image to Image",
|
|
1589
|
+
"description": "Guide image generation using a combination of images and prompt.",
|
|
1590
|
+
"mediaType": "image",
|
|
1591
|
+
"mediaSubtype": "webp",
|
|
1592
|
+
"thumbnailVariant": "compareSlider",
|
|
1593
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
1594
|
+
"models": ["Luma"],
|
|
1595
|
+
"date": "2025-03-01",
|
|
1596
|
+
"OpenSource": false,
|
|
1597
|
+
"size": 0,
|
|
1598
|
+
"vram": 0
|
|
1599
|
+
},
|
|
1600
|
+
{
|
|
1601
|
+
"name": "api_luma_photon_style_ref",
|
|
1602
|
+
"title": "Luma Photon: Style Reference",
|
|
1603
|
+
"description": "Generate images by blending style references with precise control using Luma Photon.",
|
|
1604
|
+
"mediaType": "image",
|
|
1605
|
+
"mediaSubtype": "webp",
|
|
1606
|
+
"thumbnailVariant": "compareSlider",
|
|
1607
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1608
|
+
"models": ["Luma"],
|
|
1609
|
+
"date": "2025-03-01",
|
|
1610
|
+
"OpenSource": false,
|
|
1611
|
+
"size": 0,
|
|
1612
|
+
"vram": 0
|
|
1613
|
+
},
|
|
1614
|
+
{
|
|
1615
|
+
"name": "api_recraft_image_gen_with_color_control",
|
|
1616
|
+
"title": "Recraft: Color Control Image Generation",
|
|
1617
|
+
"description": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
|
|
1618
|
+
"mediaType": "image",
|
|
1619
|
+
"mediaSubtype": "webp",
|
|
1620
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1621
|
+
"models": ["Recraft"],
|
|
1622
|
+
"date": "2025-03-01",
|
|
1623
|
+
"OpenSource": false,
|
|
1624
|
+
"size": 0,
|
|
1625
|
+
"vram": 0
|
|
1626
|
+
},
|
|
1627
|
+
{
|
|
1628
|
+
"name": "api_recraft_image_gen_with_style_control",
|
|
1629
|
+
"title": "Recraft: Style Control Image Generation",
|
|
1630
|
+
"description": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.",
|
|
1631
|
+
"mediaType": "image",
|
|
1632
|
+
"mediaSubtype": "webp",
|
|
1633
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1634
|
+
"models": ["Recraft"],
|
|
1635
|
+
"date": "2025-03-01",
|
|
1636
|
+
"OpenSource": false,
|
|
1637
|
+
"size": 0,
|
|
1638
|
+
"vram": 0
|
|
1639
|
+
},
|
|
1640
|
+
{
|
|
1641
|
+
"name": "api_recraft_vector_gen",
|
|
1642
|
+
"title": "Recraft: Vector Generation",
|
|
1643
|
+
"description": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
|
|
1644
|
+
"mediaType": "image",
|
|
1645
|
+
"mediaSubtype": "webp",
|
|
1646
|
+
"tags": ["Text to Image", "Image", "API", "Vector"],
|
|
1647
|
+
"models": ["Recraft"],
|
|
1648
|
+
"date": "2025-03-01",
|
|
1649
|
+
"OpenSource": false,
|
|
1650
|
+
"size": 0,
|
|
1651
|
+
"vram": 0
|
|
1652
|
+
},
|
|
1653
|
+
{
|
|
1654
|
+
"name": "api_runway_text_to_image",
|
|
1655
|
+
"title": "Runway: Text to Image",
|
|
1656
|
+
"description": "Generate high-quality images from text prompts using Runway's AI model.",
|
|
1657
|
+
"mediaType": "image",
|
|
1658
|
+
"mediaSubtype": "webp",
|
|
1659
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1660
|
+
"models": ["Runway"],
|
|
1661
|
+
"date": "2025-03-01",
|
|
1662
|
+
"OpenSource": false,
|
|
1663
|
+
"size": 0,
|
|
1664
|
+
"vram": 0
|
|
1665
|
+
},
|
|
1666
|
+
{
|
|
1667
|
+
"name": "api_runway_reference_to_image",
|
|
1668
|
+
"title": "Runway: Reference to Image",
|
|
1669
|
+
"description": "Generate new images based on reference styles and compositions with Runway's AI.",
|
|
1670
|
+
"mediaType": "image",
|
|
1671
|
+
"thumbnailVariant": "compareSlider",
|
|
1672
|
+
"mediaSubtype": "webp",
|
|
1673
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
1674
|
+
"models": ["Runway"],
|
|
1675
|
+
"date": "2025-03-01",
|
|
1676
|
+
"OpenSource": false,
|
|
1677
|
+
"size": 0,
|
|
1678
|
+
"vram": 0
|
|
1679
|
+
},
|
|
1680
|
+
{
|
|
1681
|
+
"name": "api_stability_ai_stable_image_ultra_t2i",
|
|
1682
|
+
"title": "Stability AI: Stable Image Ultra Text to Image",
|
|
1683
|
+
"description": "Generate high quality images with excellent prompt adherence. Perfect for professional GENERATION TYPE at 1 megapixel resolution.",
|
|
1684
|
+
"mediaType": "image",
|
|
1685
|
+
"mediaSubtype": "webp",
|
|
1686
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1687
|
+
"models": ["Stability"],
|
|
1688
|
+
"date": "2025-03-01",
|
|
1689
|
+
"OpenSource": false,
|
|
1690
|
+
"size": 0,
|
|
1691
|
+
"vram": 0
|
|
1692
|
+
},
|
|
1693
|
+
{
|
|
1694
|
+
"name": "api_stability_ai_i2i",
|
|
1695
|
+
"title": "Stability AI: Image to Image",
|
|
1696
|
+
"description": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
|
|
1697
|
+
"mediaType": "image",
|
|
1698
|
+
"thumbnailVariant": "compareSlider",
|
|
1699
|
+
"mediaSubtype": "webp",
|
|
1700
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
1701
|
+
"models": ["Stability"],
|
|
1702
|
+
"date": "2025-03-01",
|
|
1703
|
+
"OpenSource": false,
|
|
1704
|
+
"size": 0,
|
|
1705
|
+
"vram": 0
|
|
1706
|
+
},
|
|
1707
|
+
{
|
|
1708
|
+
"name": "api_stability_ai_sd3.5_t2i",
|
|
1709
|
+
"title": "Stability AI: SD3.5 Text to Image",
|
|
1710
|
+
"description": "Generate high quality images with excellent prompt adherence. Perfect for professional GENERATION TYPE at 1 megapixel resolution.",
|
|
1711
|
+
"mediaType": "image",
|
|
1712
|
+
"mediaSubtype": "webp",
|
|
1713
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1714
|
+
"models": ["Stability"],
|
|
1715
|
+
"date": "2025-03-01",
|
|
1716
|
+
"OpenSource": false,
|
|
1717
|
+
"size": 0,
|
|
1718
|
+
"vram": 0
|
|
1719
|
+
},
|
|
1720
|
+
{
|
|
1721
|
+
"name": "api_stability_ai_sd3.5_i2i",
|
|
1722
|
+
"title": "Stability AI: SD3.5 Image to Image",
|
|
1723
|
+
"description": "Generate high quality images with excellent prompt adherence. Perfect for professional GENERATION TYPE at 1 megapixel resolution.",
|
|
1724
|
+
"mediaType": "image",
|
|
1725
|
+
"thumbnailVariant": "compareSlider",
|
|
1726
|
+
"mediaSubtype": "webp",
|
|
1727
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
1728
|
+
"models": ["Stability"],
|
|
1729
|
+
"date": "2025-03-01",
|
|
1730
|
+
"OpenSource": false,
|
|
1731
|
+
"size": 0,
|
|
1732
|
+
"vram": 0
|
|
1733
|
+
},
|
|
1734
|
+
{
|
|
1735
|
+
"name": "api_ideogram_v3_t2i",
|
|
1736
|
+
"title": "Ideogram V3: Text to Image",
|
|
1737
|
+
"description": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
|
|
1738
|
+
"mediaType": "image",
|
|
1739
|
+
"mediaSubtype": "webp",
|
|
1740
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1741
|
+
"models": ["Ideogram"],
|
|
1742
|
+
"date": "2025-03-01",
|
|
1743
|
+
"OpenSource": false,
|
|
1744
|
+
"size": 0,
|
|
1745
|
+
"vram": 0
|
|
1746
|
+
},
|
|
1747
|
+
{
|
|
1748
|
+
"name": "api_openai_image_1_t2i",
|
|
1749
|
+
"title": "OpenAI: GPT-Image-1 Text to Image",
|
|
1750
|
+
"description": "Generate images from text prompts using OpenAI GPT Image 1 API.",
|
|
1751
|
+
"mediaType": "image",
|
|
1752
|
+
"mediaSubtype": "webp",
|
|
1753
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1754
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
1755
|
+
"date": "2025-03-01",
|
|
1756
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1757
|
+
"OpenSource": false,
|
|
1758
|
+
"size": 0,
|
|
1759
|
+
"vram": 0
|
|
1760
|
+
},
|
|
1761
|
+
{
|
|
1762
|
+
"name": "api_openai_image_1_i2i",
|
|
1763
|
+
"title": "OpenAI: GPT-Image-1 Image to Image",
|
|
1764
|
+
"description": "Generate images from input images using OpenAI GPT Image 1 API.",
|
|
1765
|
+
"mediaType": "image",
|
|
1766
|
+
"mediaSubtype": "webp",
|
|
1767
|
+
"thumbnailVariant": "compareSlider",
|
|
1768
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
1769
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
1770
|
+
"date": "2025-03-01",
|
|
1771
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1772
|
+
"OpenSource": false,
|
|
1773
|
+
"size": 0,
|
|
1774
|
+
"vram": 0
|
|
1775
|
+
},
|
|
1776
|
+
{
|
|
1777
|
+
"name": "api_openai_image_1_inpaint",
|
|
1778
|
+
"title": "OpenAI: GPT-Image-1 Inpaint",
|
|
1779
|
+
"description": "Edit images using inpainting with OpenAI GPT Image 1 API.",
|
|
1780
|
+
"mediaType": "image",
|
|
1781
|
+
"mediaSubtype": "webp",
|
|
1782
|
+
"thumbnailVariant": "compareSlider",
|
|
1783
|
+
"tags": ["Inpainting", "Image", "API"],
|
|
1784
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
1785
|
+
"date": "2025-03-01",
|
|
1786
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1787
|
+
"OpenSource": false,
|
|
1788
|
+
"size": 0,
|
|
1789
|
+
"vram": 0
|
|
1790
|
+
},
|
|
1791
|
+
{
|
|
1792
|
+
"name": "api_openai_image_1_multi_inputs",
|
|
1793
|
+
"title": "OpenAI: GPT-Image-1 Multi Inputs",
|
|
1794
|
+
"description": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
|
|
1795
|
+
"mediaType": "image",
|
|
1796
|
+
"mediaSubtype": "webp",
|
|
1797
|
+
"thumbnailVariant": "compareSlider",
|
|
1798
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1799
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
1800
|
+
"date": "2025-03-01",
|
|
1801
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1802
|
+
"OpenSource": false,
|
|
1803
|
+
"size": 0,
|
|
1804
|
+
"vram": 0
|
|
1805
|
+
},
|
|
1806
|
+
{
|
|
1807
|
+
"name": "api_openai_dall_e_2_t2i",
|
|
1808
|
+
"title": "OpenAI: Dall-E 2 Text to Image",
|
|
1809
|
+
"description": "Generate images from text prompts using OpenAI Dall-E 2 API.",
|
|
1810
|
+
"mediaType": "image",
|
|
1811
|
+
"mediaSubtype": "webp",
|
|
1812
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1813
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1814
|
+
"date": "2025-03-01",
|
|
1815
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
|
|
1816
|
+
"OpenSource": false,
|
|
1817
|
+
"size": 0,
|
|
1818
|
+
"vram": 0
|
|
1819
|
+
},
|
|
1820
|
+
{
|
|
1821
|
+
"name": "api_openai_dall_e_2_inpaint",
|
|
1822
|
+
"title": "OpenAI: Dall-E 2 Inpaint",
|
|
1823
|
+
"description": "Edit images using inpainting with OpenAI Dall-E 2 API.",
|
|
1824
|
+
"mediaType": "image",
|
|
1825
|
+
"mediaSubtype": "webp",
|
|
1826
|
+
"thumbnailVariant": "compareSlider",
|
|
1827
|
+
"tags": ["Inpainting", "Image", "API"],
|
|
1828
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1829
|
+
"date": "2025-03-01",
|
|
1830
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
|
|
1831
|
+
"OpenSource": false,
|
|
1832
|
+
"size": 0,
|
|
1833
|
+
"vram": 0
|
|
1834
|
+
},
|
|
1835
|
+
{
|
|
1836
|
+
"name": "api_openai_dall_e_3_t2i",
|
|
1837
|
+
"title": "OpenAI: Dall-E 3 Text to Image",
|
|
1838
|
+
"description": "Generate images from text prompts using OpenAI Dall-E 3 API.",
|
|
1839
|
+
"mediaType": "image",
|
|
1840
|
+
"mediaSubtype": "webp",
|
|
1841
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1842
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1843
|
+
"date": "2025-03-01",
|
|
1844
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3",
|
|
1845
|
+
"OpenSource": false,
|
|
1846
|
+
"size": 0,
|
|
1847
|
+
"vram": 0
|
|
1848
|
+
}
|
|
1849
|
+
]
|
|
1850
|
+
},
|
|
1851
|
+
{
|
|
1852
|
+
"moduleName": "default",
|
|
1853
|
+
"category": "CLOSED SOURCE MODELS",
|
|
1854
|
+
"title": "Video API",
|
|
1855
|
+
"icon": "icon-[lucide--film]",
|
|
1856
|
+
"type": "video",
|
|
1857
|
+
"templates": [
|
|
1858
|
+
{
|
|
1859
|
+
"name": "api_openai_sora_video",
|
|
1860
|
+
"title": "Sora 2: Text & Image to Video",
|
|
1861
|
+
"description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
|
|
1862
|
+
"mediaType": "image",
|
|
1863
|
+
"mediaSubtype": "webp",
|
|
1864
|
+
"tags": ["Image to Video", "Text to Video", "API"],
|
|
1865
|
+
"models": ["OpenAI"],
|
|
1866
|
+
"date": "2025-10-08",
|
|
1867
|
+
"OpenSource": false,
|
|
1868
|
+
"size": 0,
|
|
1869
|
+
"vram": 0
|
|
1870
|
+
},
|
|
1871
|
+
{
|
|
1872
|
+
"name": "api_ltxv_text_to_video",
|
|
1873
|
+
"title": "LTX-2: Text to Video",
|
|
1874
|
+
"description": "Generate high-quality videos from text prompts using Lightricks LTX-2 with synchronized audio. Supports up to 4K resolution at 50fps with Fast, Pro, and Ultra modes for various production needs.",
|
|
1875
|
+
"mediaType": "image",
|
|
1876
|
+
"mediaSubtype": "webp",
|
|
1877
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
1878
|
+
"models": ["LTX-2", "Lightricks"],
|
|
1879
|
+
"date": "2025-10-28",
|
|
1880
|
+
"OpenSource": false,
|
|
1881
|
+
"size": 0,
|
|
1882
|
+
"vram": 0
|
|
1883
|
+
},
|
|
1884
|
+
{
|
|
1885
|
+
"name": "api_ltxv_image_to_video",
|
|
1886
|
+
"title": "LTX-2: Image to Video",
|
|
1887
|
+
"description": "Transform static images into dynamic videos with LTX-2 Pro. Generate cinematic video sequences with natural motion, synchronized audio, and support for up to 4K resolution at 50fps.",
|
|
1888
|
+
"mediaType": "image",
|
|
1889
|
+
"mediaSubtype": "webp",
|
|
1890
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1891
|
+
"models": ["LTX-2", "Lightricks"],
|
|
1892
|
+
"date": "2025-10-28",
|
|
1893
|
+
"OpenSource": false,
|
|
1894
|
+
"size": 0,
|
|
1895
|
+
"vram": 0
|
|
1896
|
+
},
|
|
1897
|
+
{
|
|
1898
|
+
"name": "api_wan_text_to_video",
|
|
1899
|
+
"title": "Wan2.5: Text to Video",
|
|
1900
|
+
"description": "Generate videos with synchronized audio, enhanced motion, and superior quality.",
|
|
1901
|
+
"mediaType": "image",
|
|
1902
|
+
"mediaSubtype": "webp",
|
|
1903
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1904
|
+
"models": ["Wan2.5", "Wan"],
|
|
1905
|
+
"date": "2025-09-27",
|
|
1906
|
+
"tutorialUrl": "",
|
|
1907
|
+
"OpenSource": false,
|
|
1908
|
+
"size": 0,
|
|
1909
|
+
"vram": 0
|
|
1910
|
+
},
|
|
1911
|
+
{
|
|
1912
|
+
"name": "api_wan_image_to_video",
|
|
1913
|
+
"title": "Wan2.5: Image to Video",
|
|
1914
|
+
"description": "Transform images into videos with synchronized audio, enhanced motion, and superior quality.",
|
|
1915
|
+
"mediaType": "image",
|
|
1916
|
+
"mediaSubtype": "webp",
|
|
1917
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1918
|
+
"models": ["Wan2.5", "Wan"],
|
|
1919
|
+
"date": "2025-09-27",
|
|
1920
|
+
"tutorialUrl": "",
|
|
1921
|
+
"OpenSource": false,
|
|
1922
|
+
"size": 0,
|
|
1923
|
+
"vram": 0
|
|
1924
|
+
},
|
|
1925
|
+
{
|
|
1926
|
+
"name": "api_kling_i2v",
|
|
1927
|
+
"title": "Kling: Image to Video",
|
|
1928
|
+
"description": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
|
|
1929
|
+
"mediaType": "image",
|
|
1930
|
+
"mediaSubtype": "webp",
|
|
1931
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1932
|
+
"models": ["Kling"],
|
|
1933
|
+
"date": "2025-03-01",
|
|
1934
|
+
"tutorialUrl": "",
|
|
1935
|
+
"OpenSource": false,
|
|
1936
|
+
"size": 0,
|
|
1937
|
+
"vram": 0
|
|
1938
|
+
},
|
|
1939
|
+
{
|
|
1940
|
+
"name": "api_kling_effects",
|
|
1941
|
+
"title": "Kling: Video Effects",
|
|
1942
|
+
"description": "Generate dynamic videos by applying visual effects to images using Kling.",
|
|
1943
|
+
"mediaType": "image",
|
|
1944
|
+
"mediaSubtype": "webp",
|
|
1945
|
+
"tags": ["Video", "API"],
|
|
1946
|
+
"models": ["Kling"],
|
|
1947
|
+
"date": "2025-03-01",
|
|
1948
|
+
"tutorialUrl": "",
|
|
1949
|
+
"OpenSource": false,
|
|
1950
|
+
"size": 0,
|
|
1951
|
+
"vram": 0
|
|
1952
|
+
},
|
|
1953
|
+
{
|
|
1954
|
+
"name": "api_kling_flf",
|
|
1955
|
+
"title": "Kling: FLF2V",
|
|
1956
|
+
"description": "Generate videos through controlling the first and last frames.",
|
|
1957
|
+
"mediaType": "image",
|
|
1958
|
+
"mediaSubtype": "webp",
|
|
1959
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
1960
|
+
"models": ["Kling"],
|
|
1961
|
+
"date": "2025-03-01",
|
|
1962
|
+
"tutorialUrl": "",
|
|
1963
|
+
"OpenSource": false,
|
|
1964
|
+
"size": 0,
|
|
1965
|
+
"vram": 0
|
|
1966
|
+
},
|
|
1967
|
+
{
|
|
1968
|
+
"name": "api_vidu_text_to_video",
|
|
1969
|
+
"title": "Vidu: Text to Video",
|
|
1970
|
+
"description": "Generate high-quality 1080p videos from text prompts with adjustable movement amplitude and duration control using Vidu's advanced AI model.",
|
|
1971
|
+
"mediaType": "image",
|
|
1972
|
+
"mediaSubtype": "webp",
|
|
1973
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
1974
|
+
"models": ["Vidu"],
|
|
1975
|
+
"date": "2025-08-23",
|
|
1976
|
+
"tutorialUrl": "",
|
|
1977
|
+
"OpenSource": false,
|
|
1978
|
+
"size": 0,
|
|
1979
|
+
"vram": 0
|
|
1980
|
+
},
|
|
1981
|
+
{
|
|
1982
|
+
"name": "api_vidu_image_to_video",
|
|
1983
|
+
"title": "Vidu: Image to Video",
|
|
1984
|
+
"description": "Transform static images into dynamic 1080p videos with precise motion control and customizable movement amplitude using Vidu.",
|
|
1985
|
+
"mediaType": "image",
|
|
1986
|
+
"mediaSubtype": "webp",
|
|
1987
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1988
|
+
"models": ["Vidu"],
|
|
1989
|
+
"date": "2025-08-23",
|
|
1990
|
+
"tutorialUrl": "",
|
|
1991
|
+
"OpenSource": false,
|
|
1992
|
+
"size": 0,
|
|
1993
|
+
"vram": 0
|
|
1994
|
+
},
|
|
1995
|
+
{
|
|
1996
|
+
"name": "api_vidu_reference_to_video",
|
|
1997
|
+
"title": "Vidu: Reference to Video",
|
|
1998
|
+
"description": "Generate videos with consistent subjects using multiple reference images (up to 7) for character and style continuity across the video sequence.",
|
|
1999
|
+
"mediaType": "image",
|
|
2000
|
+
"mediaSubtype": "webp",
|
|
2001
|
+
"tags": ["Video", "Image to Video", "API"],
|
|
2002
|
+
"models": ["Vidu"],
|
|
2003
|
+
"date": "2025-08-23",
|
|
2004
|
+
"tutorialUrl": "",
|
|
2005
|
+
"OpenSource": false,
|
|
2006
|
+
"size": 0,
|
|
2007
|
+
"vram": 0
|
|
2008
|
+
},
|
|
2009
|
+
{
|
|
2010
|
+
"name": "api_vidu_start_end_to_video",
|
|
2011
|
+
"title": "Vidu: Start End to Video",
|
|
2012
|
+
"description": "Create smooth video transitions between defined start and end frames with natural motion interpolation and consistent visual quality.",
|
|
2013
|
+
"mediaType": "image",
|
|
2014
|
+
"mediaSubtype": "webp",
|
|
2015
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
2016
|
+
"models": ["Vidu"],
|
|
2017
|
+
"date": "2025-08-23",
|
|
2018
|
+
"tutorialUrl": "",
|
|
2019
|
+
"OpenSource": false,
|
|
2020
|
+
"size": 0,
|
|
2021
|
+
"vram": 0
|
|
2022
|
+
},
|
|
2023
|
+
{
|
|
2024
|
+
"name": "api_bytedance_text_to_video",
|
|
2025
|
+
"title": "ByteDance: Text to Video",
|
|
2026
|
+
"description": "Generate high-quality videos directly from text prompts using ByteDance's Seedance model. Supports multiple resolutions and aspect ratios with natural motion and cinematic quality.",
|
|
2027
|
+
"mediaType": "image",
|
|
2028
|
+
"mediaSubtype": "webp",
|
|
2029
|
+
"tags": ["Video", "API", "Text to Video"],
|
|
2030
|
+
"models": ["ByteDance"],
|
|
2031
|
+
"date": "2025-10-6",
|
|
2032
|
+
"tutorialUrl": "",
|
|
2033
|
+
"OpenSource": false,
|
|
2034
|
+
"size": 0,
|
|
2035
|
+
"vram": 0
|
|
2036
|
+
},
|
|
2037
|
+
{
|
|
2038
|
+
"name": "api_bytedance_image_to_video",
|
|
2039
|
+
"title": "ByteDance: Image to Video",
|
|
2040
|
+
"description": "Transform static images into dynamic videos using ByteDance's Seedance model. Analyzes image structure and generates natural motion with consistent visual style and coherent video sequences.",
|
|
2041
|
+
"mediaType": "image",
|
|
2042
|
+
"mediaSubtype": "webp",
|
|
2043
|
+
"tags": ["Video", "API", "Image to Video"],
|
|
2044
|
+
"models": ["ByteDance"],
|
|
2045
|
+
"date": "2025-10-6",
|
|
2046
|
+
"tutorialUrl": "",
|
|
2047
|
+
"OpenSource": false,
|
|
2048
|
+
"size": 0,
|
|
2049
|
+
"vram": 0
|
|
2050
|
+
},
|
|
2051
|
+
{
|
|
2052
|
+
"name": "api_bytedance_flf2v",
|
|
2053
|
+
"title": "ByteDance: Start End to Video",
|
|
2054
|
+
"description": "Generate cinematic video transitions between start and end frames with fluid motion, scene consistency, and professional polish using ByteDance's Seedance model.",
|
|
2055
|
+
"mediaType": "image",
|
|
2056
|
+
"mediaSubtype": "webp",
|
|
2057
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
2058
|
+
"models": ["ByteDance"],
|
|
2059
|
+
"date": "2025-10-6",
|
|
2060
|
+
"tutorialUrl": "",
|
|
2061
|
+
"OpenSource": false,
|
|
2062
|
+
"size": 0,
|
|
2063
|
+
"vram": 0
|
|
2064
|
+
},
|
|
2065
|
+
{
|
|
2066
|
+
"name": "api_topaz_video_enhance",
|
|
2067
|
+
"title": "Topaz Video Enhance",
|
|
2068
|
+
"description": "Enhance videos with Topaz AI. Supports resolution upscaling using Starlight (Astra) Fast model and frame interpolation with apo-8 model.",
|
|
2069
|
+
"mediaType": "image",
|
|
2070
|
+
"mediaSubtype": "webp",
|
|
2071
|
+
"thumbnailVariant": "compareSlider",
|
|
2072
|
+
"tags": ["Video", "API", "Upscale"],
|
|
2073
|
+
"models": ["Topaz"],
|
|
2074
|
+
"date": "2025-11-25",
|
|
2075
|
+
"OpenSource": false,
|
|
2076
|
+
"size": 0,
|
|
2077
|
+
"vram": 0
|
|
2078
|
+
},
|
|
2079
|
+
{
|
|
2080
|
+
"name": "api_luma_i2v",
|
|
2081
|
+
"title": "Luma: Image to Video",
|
|
2082
|
+
"description": "Take static images and instantly create magical high quality animations.",
|
|
2083
|
+
"mediaType": "image",
|
|
2084
|
+
"mediaSubtype": "webp",
|
|
2085
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2086
|
+
"models": ["Luma"],
|
|
2087
|
+
"date": "2025-03-01",
|
|
2088
|
+
"tutorialUrl": "",
|
|
2089
|
+
"OpenSource": false,
|
|
2090
|
+
"size": 0,
|
|
2091
|
+
"vram": 0
|
|
2092
|
+
},
|
|
2093
|
+
{
|
|
2094
|
+
"name": "api_luma_t2v",
|
|
2095
|
+
"title": "Luma: Text to Video",
|
|
2096
|
+
"description": "High-quality videos can be generated using simple prompts.",
|
|
2097
|
+
"mediaType": "image",
|
|
2098
|
+
"mediaSubtype": "webp",
|
|
2099
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
2100
|
+
"models": ["Luma"],
|
|
2101
|
+
"date": "2025-03-01",
|
|
2102
|
+
"tutorialUrl": "",
|
|
2103
|
+
"OpenSource": false,
|
|
2104
|
+
"size": 0,
|
|
2105
|
+
"vram": 0
|
|
2106
|
+
},
|
|
2107
|
+
{
|
|
2108
|
+
"name": "api_moonvalley_text_to_video",
|
|
2109
|
+
"title": "Moonvalley: Text to Video",
|
|
2110
|
+
"description": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.",
|
|
2111
|
+
"mediaType": "image",
|
|
2112
|
+
"mediaSubtype": "webp",
|
|
2113
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
2114
|
+
"models": ["Moonvalley"],
|
|
2115
|
+
"date": "2025-03-01",
|
|
2116
|
+
"tutorialUrl": "",
|
|
2117
|
+
"OpenSource": false,
|
|
2118
|
+
"size": 0,
|
|
2119
|
+
"vram": 0
|
|
2120
|
+
},
|
|
2121
|
+
{
|
|
2122
|
+
"name": "api_moonvalley_image_to_video",
|
|
2123
|
+
"title": "Moonvalley: Image to Video",
|
|
2124
|
+
"description": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.",
|
|
2125
|
+
"mediaType": "image",
|
|
2126
|
+
"mediaSubtype": "webp",
|
|
2127
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2128
|
+
"models": ["Moonvalley"],
|
|
2129
|
+
"date": "2025-03-01",
|
|
2130
|
+
"tutorialUrl": "",
|
|
2131
|
+
"OpenSource": false,
|
|
2132
|
+
"size": 0,
|
|
2133
|
+
"vram": 0
|
|
2134
|
+
},
|
|
2135
|
+
{
|
|
2136
|
+
"name": "api_moonvalley_video_to_video_motion_transfer",
|
|
2137
|
+
"title": "Moonvalley: Motion Transfer",
|
|
2138
|
+
"description": "Apply motion from one video to another.",
|
|
2139
|
+
"mediaType": "image",
|
|
2140
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2141
|
+
"mediaSubtype": "webp",
|
|
2142
|
+
"tags": ["Video to Video", "Video", "API"],
|
|
2143
|
+
"models": ["Moonvalley"],
|
|
2144
|
+
"date": "2025-03-01",
|
|
2145
|
+
"tutorialUrl": "",
|
|
2146
|
+
"OpenSource": false,
|
|
2147
|
+
"size": 0,
|
|
2148
|
+
"vram": 0
|
|
2149
|
+
},
|
|
2150
|
+
{
|
|
2151
|
+
"name": "api_moonvalley_video_to_video_pose_control",
|
|
2152
|
+
"title": "Moonvalley: Pose Control",
|
|
2153
|
+
"description": "Apply human pose and movement from one video to another.",
|
|
2154
|
+
"mediaType": "image",
|
|
2155
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2156
|
+
"mediaSubtype": "webp",
|
|
2157
|
+
"tags": ["Video to Video", "Video", "API"],
|
|
2158
|
+
"models": ["Moonvalley"],
|
|
2159
|
+
"date": "2025-03-01",
|
|
2160
|
+
"tutorialUrl": "",
|
|
2161
|
+
"OpenSource": false,
|
|
2162
|
+
"size": 0,
|
|
2163
|
+
"vram": 0
|
|
2164
|
+
},
|
|
2165
|
+
{
|
|
2166
|
+
"name": "api_hailuo_minimax_video",
|
|
2167
|
+
"title": "MiniMax: Video",
|
|
2168
|
+
"description": "Generate high-quality videos from text prompts with optional first-frame control using MiniMax Hailuo-02 model. Supports multiple resolutions (768P/1080P) and durations (6/10s) with intelligent prompt optimization.",
|
|
2169
|
+
"mediaType": "image",
|
|
2170
|
+
"mediaSubtype": "webp",
|
|
2171
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
2172
|
+
"models": ["MiniMax"],
|
|
2173
|
+
"date": "2025-03-01",
|
|
2174
|
+
"tutorialUrl": "",
|
|
2175
|
+
"OpenSource": false,
|
|
2176
|
+
"size": 0,
|
|
2177
|
+
"vram": 0
|
|
2178
|
+
},
|
|
2179
|
+
{
|
|
2180
|
+
"name": "api_hailuo_minimax_t2v",
|
|
2181
|
+
"title": "MiniMax: Text to Video",
|
|
2182
|
+
"description": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
|
|
2183
|
+
"mediaType": "image",
|
|
2184
|
+
"mediaSubtype": "webp",
|
|
2185
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
2186
|
+
"models": ["MiniMax"],
|
|
2187
|
+
"date": "2025-03-01",
|
|
2188
|
+
"tutorialUrl": "",
|
|
2189
|
+
"OpenSource": false,
|
|
2190
|
+
"size": 0,
|
|
2191
|
+
"vram": 0
|
|
2192
|
+
},
|
|
2193
|
+
{
|
|
2194
|
+
"name": "api_hailuo_minimax_i2v",
|
|
2195
|
+
"title": "MiniMax: Image to Video",
|
|
2196
|
+
"description": "Generate refined videos from images and text with CGI integration using MiniMax.",
|
|
2197
|
+
"mediaType": "image",
|
|
2198
|
+
"mediaSubtype": "webp",
|
|
2199
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2200
|
+
"models": ["MiniMax"],
|
|
2201
|
+
"date": "2025-03-01",
|
|
2202
|
+
"tutorialUrl": "",
|
|
2203
|
+
"OpenSource": false,
|
|
2204
|
+
"size": 0,
|
|
2205
|
+
"vram": 0
|
|
2206
|
+
},
|
|
2207
|
+
{
|
|
2208
|
+
"name": "api_pixverse_i2v",
|
|
2209
|
+
"title": "PixVerse: Image to Video",
|
|
2210
|
+
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
|
2211
|
+
"mediaType": "image",
|
|
2212
|
+
"mediaSubtype": "webp",
|
|
2213
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2214
|
+
"models": ["PixVerse"],
|
|
2215
|
+
"date": "2025-03-01",
|
|
2216
|
+
"tutorialUrl": "",
|
|
2217
|
+
"OpenSource": false,
|
|
2218
|
+
"size": 0,
|
|
2219
|
+
"vram": 0
|
|
2220
|
+
},
|
|
2221
|
+
{
|
|
2222
|
+
"name": "api_pixverse_template_i2v",
|
|
2223
|
+
"title": "PixVerse Templates: Image to Video",
|
|
2224
|
+
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
|
2225
|
+
"mediaType": "image",
|
|
2226
|
+
"mediaSubtype": "webp",
|
|
2227
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2228
|
+
"models": ["PixVerse"],
|
|
2229
|
+
"date": "2025-03-01",
|
|
2230
|
+
"tutorialUrl": "",
|
|
2231
|
+
"OpenSource": false,
|
|
2232
|
+
"size": 0,
|
|
2233
|
+
"vram": 0
|
|
2234
|
+
},
|
|
2235
|
+
{
|
|
2236
|
+
"name": "api_pixverse_t2v",
|
|
2237
|
+
"title": "PixVerse: Text to Video",
|
|
2238
|
+
"description": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
|
|
2239
|
+
"mediaType": "image",
|
|
2240
|
+
"mediaSubtype": "webp",
|
|
2241
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
2242
|
+
"models": ["PixVerse"],
|
|
2243
|
+
"date": "2025-03-01",
|
|
2244
|
+
"tutorialUrl": "",
|
|
2245
|
+
"OpenSource": false,
|
|
2246
|
+
"size": 0,
|
|
2247
|
+
"vram": 0
|
|
2248
|
+
},
|
|
2249
|
+
{
|
|
2250
|
+
"name": "api_runway_gen3a_turbo_image_to_video",
|
|
2251
|
+
"title": "Runway: Gen3a Turbo Image to Video",
|
|
2252
|
+
"description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
|
|
2253
|
+
"mediaType": "image",
|
|
2254
|
+
"mediaSubtype": "webp",
|
|
2255
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2256
|
+
"models": ["Runway"],
|
|
2257
|
+
"date": "2025-03-01",
|
|
2258
|
+
"tutorialUrl": "",
|
|
2259
|
+
"OpenSource": false,
|
|
2260
|
+
"size": 0,
|
|
2261
|
+
"vram": 0
|
|
2262
|
+
},
|
|
2263
|
+
{
|
|
2264
|
+
"name": "api_runway_gen4_turo_image_to_video",
|
|
2265
|
+
"title": "Runway: Gen4 Turbo Image to Video",
|
|
2266
|
+
"description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
|
|
2267
|
+
"mediaType": "image",
|
|
2268
|
+
"mediaSubtype": "webp",
|
|
2269
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2270
|
+
"models": ["Runway"],
|
|
2271
|
+
"date": "2025-03-01",
|
|
2272
|
+
"tutorialUrl": "",
|
|
2273
|
+
"OpenSource": false,
|
|
2274
|
+
"size": 0,
|
|
2275
|
+
"vram": 0
|
|
2276
|
+
},
|
|
2277
|
+
{
|
|
2278
|
+
"name": "api_runway_first_last_frame",
|
|
2279
|
+
"title": "Runway: First Last Frame to Video",
|
|
2280
|
+
"description": "Generate smooth video transitions between two keyframes with Runway's precision.",
|
|
2281
|
+
"mediaType": "image",
|
|
2282
|
+
"mediaSubtype": "webp",
|
|
2283
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
2284
|
+
"models": ["Runway"],
|
|
2285
|
+
"date": "2025-03-01",
|
|
2286
|
+
"tutorialUrl": "",
|
|
2287
|
+
"OpenSource": false,
|
|
2288
|
+
"size": 0,
|
|
2289
|
+
"vram": 0
|
|
2290
|
+
},
|
|
2291
|
+
{
|
|
2292
|
+
"name": "api_pika_i2v",
|
|
2293
|
+
"title": "Pika: Image to Video",
|
|
2294
|
+
"description": "Generate smooth animated videos from single static images using Pika AI.",
|
|
2295
|
+
"mediaType": "image",
|
|
2296
|
+
"mediaSubtype": "webp",
|
|
2297
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2298
|
+
"models": ["Pika"],
|
|
2299
|
+
"date": "2025-03-01",
|
|
2300
|
+
"tutorialUrl": "",
|
|
2301
|
+
"OpenSource": false,
|
|
2302
|
+
"size": 0,
|
|
2303
|
+
"vram": 0
|
|
2304
|
+
},
|
|
2305
|
+
{
|
|
2306
|
+
"name": "api_pika_scene",
|
|
2307
|
+
"title": "Pika Scenes: Images to Video",
|
|
2308
|
+
"description": "Generate videos that incorporate multiple input images using Pika Scenes.",
|
|
2309
|
+
"mediaType": "image",
|
|
2310
|
+
"mediaSubtype": "webp",
|
|
2311
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2312
|
+
"models": ["Pika"],
|
|
2313
|
+
"date": "2025-03-01",
|
|
2314
|
+
"tutorialUrl": "",
|
|
2315
|
+
"OpenSource": false,
|
|
2316
|
+
"size": 0,
|
|
2317
|
+
"vram": 0
|
|
2318
|
+
},
|
|
2319
|
+
{
|
|
2320
|
+
"name": "api_veo2_i2v",
|
|
2321
|
+
"title": "Veo2: Image to Video",
|
|
2322
|
+
"description": "Generate videos from images using Google Veo2 API.",
|
|
2323
|
+
"mediaType": "image",
|
|
2324
|
+
"mediaSubtype": "webp",
|
|
2325
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2326
|
+
"models": ["Veo", "Google"],
|
|
2327
|
+
"date": "2025-03-01",
|
|
2328
|
+
"tutorialUrl": "",
|
|
2329
|
+
"OpenSource": false,
|
|
2330
|
+
"size": 0,
|
|
2331
|
+
"vram": 0
|
|
2332
|
+
},
|
|
2333
|
+
{
|
|
2334
|
+
"name": "api_veo3",
|
|
2335
|
+
"title": "Veo3: Image to Video",
|
|
2336
|
+
"description": "Generate high-quality 8-second videos from text prompts or images using Google's advanced Veo 3 API. Features audio generation, prompt enhancement, and dual model options for speed or quality.",
|
|
2337
|
+
"mediaType": "image",
|
|
2338
|
+
"mediaSubtype": "webp",
|
|
2339
|
+
"tags": ["Image to Video", "Text to Video", "API"],
|
|
2340
|
+
"models": ["Veo", "Google"],
|
|
2341
|
+
"date": "2025-03-01",
|
|
2342
|
+
"tutorialUrl": "",
|
|
2343
|
+
"OpenSource": false,
|
|
2344
|
+
"size": 0,
|
|
2345
|
+
"vram": 0
|
|
2346
|
+
}
|
|
2347
|
+
]
|
|
2348
|
+
},
|
|
2349
|
+
{
|
|
2350
|
+
"moduleName": "default",
|
|
2351
|
+
"category": "CLOSED SOURCE MODELS",
|
|
2352
|
+
"title": "3D API",
|
|
2353
|
+
"icon": "icon-[lucide--box]",
|
|
2354
|
+
"type": "image",
|
|
2355
|
+
"templates": [
|
|
2356
|
+
{
|
|
2357
|
+
"name": "api_rodin_gen2",
|
|
2358
|
+
"title": "Rodin: Gen-2 Image to Model",
|
|
2359
|
+
"description": "Generate detailed 4X mesh quality 3D models from photos using Rodin Gen2",
|
|
2360
|
+
"mediaType": "image",
|
|
2361
|
+
"mediaSubtype": "webp",
|
|
2362
|
+
"tags": ["Image to 3D", "3D", "API"],
|
|
2363
|
+
"models": ["Rodin"],
|
|
2364
|
+
"date": "2025-09-27",
|
|
2365
|
+
"tutorialUrl": "",
|
|
2366
|
+
"OpenSource": false,
|
|
2367
|
+
"size": 0,
|
|
2368
|
+
"vram": 0
|
|
2369
|
+
},
|
|
2370
|
+
{
|
|
2371
|
+
"name": "api_rodin_image_to_model",
|
|
2372
|
+
"title": "Rodin: Image to Model",
|
|
2373
|
+
"description": "Generate detailed 3D models from single photos using Rodin AI.",
|
|
2374
|
+
"mediaType": "image",
|
|
2375
|
+
"mediaSubtype": "webp",
|
|
2376
|
+
"tags": ["Image to 3D", "3D", "API"],
|
|
2377
|
+
"models": ["Rodin"],
|
|
2378
|
+
"date": "2025-03-01",
|
|
2379
|
+
"tutorialUrl": "",
|
|
2380
|
+
"OpenSource": false,
|
|
2381
|
+
"size": 0,
|
|
2382
|
+
"vram": 0
|
|
2383
|
+
},
|
|
2384
|
+
{
|
|
2385
|
+
"name": "api_rodin_multiview_to_model",
|
|
2386
|
+
"title": "Rodin: Multiview to Model",
|
|
2387
|
+
"description": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.",
|
|
2388
|
+
"mediaType": "image",
|
|
2389
|
+
"mediaSubtype": "webp",
|
|
2390
|
+
"tags": ["Image to 3D", "3D", "API"],
|
|
2391
|
+
"models": ["Rodin"],
|
|
2392
|
+
"date": "2025-03-01",
|
|
2393
|
+
"tutorialUrl": "",
|
|
2394
|
+
"OpenSource": false,
|
|
2395
|
+
"size": 0,
|
|
2396
|
+
"vram": 0
|
|
2397
|
+
},
|
|
2398
|
+
{
|
|
2399
|
+
"name": "api_tripo_text_to_model",
|
|
2400
|
+
"title": "Tripo: Text to Model",
|
|
2401
|
+
"description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
|
|
2402
|
+
"mediaType": "image",
|
|
2403
|
+
"mediaSubtype": "webp",
|
|
2404
|
+
"tags": ["Text to Model", "3D", "API"],
|
|
2405
|
+
"models": ["Tripo"],
|
|
2406
|
+
"date": "2025-03-01",
|
|
2407
|
+
"tutorialUrl": "",
|
|
2408
|
+
"OpenSource": false,
|
|
2409
|
+
"size": 0,
|
|
2410
|
+
"vram": 0
|
|
2411
|
+
},
|
|
2412
|
+
{
|
|
2413
|
+
"name": "api_tripo_image_to_model",
|
|
2414
|
+
"title": "Tripo: Image to Model",
|
|
2415
|
+
"description": "Generate professional 3D assets from 2D images using Tripo engine.",
|
|
2416
|
+
"mediaType": "image",
|
|
2417
|
+
"mediaSubtype": "webp",
|
|
2418
|
+
"tags": ["Image to 3D", "3D", "API"],
|
|
2419
|
+
"models": ["Tripo"],
|
|
2420
|
+
"date": "2025-03-01",
|
|
2421
|
+
"tutorialUrl": "",
|
|
2422
|
+
"OpenSource": false,
|
|
2423
|
+
"size": 0,
|
|
2424
|
+
"vram": 0
|
|
2425
|
+
},
|
|
2426
|
+
{
|
|
2427
|
+
"name": "api_tripo_multiview_to_model",
|
|
2428
|
+
"title": "Tripo: Multiview to Model",
|
|
2429
|
+
"description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
|
|
2430
|
+
"mediaType": "image",
|
|
2431
|
+
"mediaSubtype": "webp",
|
|
2432
|
+
"tags": ["Image to 3D", "3D", "API"],
|
|
2433
|
+
"models": ["Tripo"],
|
|
2434
|
+
"date": "2025-03-01",
|
|
2435
|
+
"tutorialUrl": "",
|
|
2436
|
+
"OpenSource": false,
|
|
2437
|
+
"size": 0,
|
|
2438
|
+
"vram": 0
|
|
2439
|
+
}
|
|
2440
|
+
]
|
|
2441
|
+
},
|
|
2442
|
+
{
|
|
2443
|
+
"moduleName": "default",
|
|
2444
|
+
"category": "CLOSED SOURCE MODELS",
|
|
2445
|
+
"title": "Audio API",
|
|
2446
|
+
"type": "audio",
|
|
2447
|
+
"icon": "icon-[lucide--volume-2]",
|
|
2448
|
+
"templates": [
|
|
2449
|
+
{
|
|
2450
|
+
"name": "api_stability_ai_text_to_audio",
|
|
2451
|
+
"title": "Stability AI: Text to Audio",
|
|
2452
|
+
"description": "Generate music from text using Stable Audio 2.5. Create minutes-long tracks in seconds.",
|
|
2453
|
+
"mediaType": "audio",
|
|
2454
|
+
"mediaSubtype": "mp3",
|
|
2455
|
+
"tags": ["Text to Audio", "Audio", "API"],
|
|
2456
|
+
"date": "2025-09-09",
|
|
2457
|
+
"models": ["Stability", "Stable Audio"],
|
|
2458
|
+
"OpenSource": false,
|
|
2459
|
+
"size": 0,
|
|
2460
|
+
"vram": 0
|
|
2461
|
+
},
|
|
2462
|
+
{
|
|
2463
|
+
"name": "api_stability_ai_audio_to_audio",
|
|
2464
|
+
"title": "Stability AI: Audio to Audio",
|
|
2465
|
+
"description": "Transform audio into new compositions using Stable Audio 2.5. Upload audio and AI creates complete tracks.",
|
|
2466
|
+
"mediaType": "audio",
|
|
2467
|
+
"mediaSubtype": "mp3",
|
|
2468
|
+
"tags": ["Audio to Audio", "Audio", "API"],
|
|
2469
|
+
"date": "2025-09-09",
|
|
2470
|
+
"models": ["Stability", "Stable Audio"],
|
|
2471
|
+
"OpenSource": false,
|
|
2472
|
+
"size": 0,
|
|
2473
|
+
"vram": 0
|
|
2474
|
+
},
|
|
2475
|
+
{
|
|
2476
|
+
"name": "api_stability_ai_audio_inpaint",
|
|
2477
|
+
"title": "Stability AI: Audio Inpainting",
|
|
2478
|
+
"description": "Complete or extend audio tracks using Stable Audio 2.5. Upload audio and AI generates the rest.",
|
|
2479
|
+
"mediaType": "audio",
|
|
2480
|
+
"mediaSubtype": "mp3",
|
|
2481
|
+
"tags": ["Audio to Audio", "Audio", "API"],
|
|
2482
|
+
"date": "2025-09-09",
|
|
2483
|
+
"models": ["Stability", "Stable Audio"],
|
|
2484
|
+
"OpenSource": false,
|
|
2485
|
+
"size": 0,
|
|
2486
|
+
"vram": 0
|
|
2487
|
+
}
|
|
2488
|
+
]
|
|
2489
|
+
},
|
|
2490
|
+
{
|
|
2491
|
+
"moduleName": "default",
|
|
2492
|
+
"category": "CLOSED SOURCE MODELS",
|
|
2493
|
+
"title": "LLM API",
|
|
2494
|
+
"icon": "icon-[lucide--message-square-text]",
|
|
2495
|
+
"type": "image",
|
|
2496
|
+
"templates": [
|
|
2497
|
+
{
|
|
2498
|
+
"name": "api_openai_chat",
|
|
2499
|
+
"title": "OpenAI: Chat",
|
|
2500
|
+
"description": "Engage with OpenAI's advanced language models for intelligent conversations.",
|
|
2501
|
+
"mediaType": "image",
|
|
2502
|
+
"mediaSubtype": "webp",
|
|
2503
|
+
"tags": ["LLM", "API"],
|
|
2504
|
+
"models": ["OpenAI"],
|
|
2505
|
+
"date": "2025-03-01",
|
|
2506
|
+
"tutorialUrl": "",
|
|
2507
|
+
"OpenSource": false,
|
|
2508
|
+
"size": 0,
|
|
2509
|
+
"vram": 0
|
|
2510
|
+
},
|
|
2511
|
+
{
|
|
2512
|
+
"name": "api_google_gemini",
|
|
2513
|
+
"title": "Google Gemini: Chat",
|
|
2514
|
+
"description": "Experience Google's multimodal AI with Gemini's reasoning capabilities.",
|
|
2515
|
+
"mediaType": "image",
|
|
2516
|
+
"mediaSubtype": "webp",
|
|
2517
|
+
"tags": ["LLM", "API"],
|
|
2518
|
+
"models": ["Google Gemini", "Google"],
|
|
2519
|
+
"date": "2025-03-01",
|
|
2520
|
+
"tutorialUrl": "",
|
|
2521
|
+
"OpenSource": false,
|
|
2522
|
+
"size": 0,
|
|
2523
|
+
"vram": 0
|
|
2524
|
+
}
|
|
2525
|
+
]
|
|
2526
|
+
}
|
|
2527
|
+
]
|