comfyui-workflow-templates-media-other 0.3.10__py3-none-any.whl → 0.3.61__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comfyui_workflow_templates_media_other/templates/04_hunyuan_3d_2.1_subgraphed.json +6 -6
- comfyui_workflow_templates_media_other/templates/05_audio_ace_step_1_t2a_song_subgraphed.json +81 -60
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d-v2.1.json +2 -2
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model.json +3 -3
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo.json +3 -3
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_m2m_editing.json +3 -3
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_instrumentals.json +4 -4
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_song.json +3 -3
- comfyui_workflow_templates_media_other/templates/audio_stable_audio_example.json +2 -2
- comfyui_workflow_templates_media_other/templates/gsc_starter_1-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_1.json +839 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_2-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_2.json +7037 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_3-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_3.json +2550 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_full.json +3 -3
- comfyui_workflow_templates_media_other/templates/hidream_i1_dev.json +3 -3
- comfyui_workflow_templates_media_other/templates/hidream_i1_fast.json +3 -3
- comfyui_workflow_templates_media_other/templates/hidream_i1_full.json +3 -3
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo.json +756 -0
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo_fun_union_controlnet-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo_fun_union_controlnet-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/index.ar.json +2187 -1591
- comfyui_workflow_templates_media_other/templates/index.es.json +2189 -1598
- comfyui_workflow_templates_media_other/templates/index.fr.json +2188 -1597
- comfyui_workflow_templates_media_other/templates/index.ja.json +2179 -1588
- comfyui_workflow_templates_media_other/templates/index.json +2182 -1592
- comfyui_workflow_templates_media_other/templates/index.ko.json +2179 -1588
- comfyui_workflow_templates_media_other/templates/index.pt-BR.json +3117 -0
- comfyui_workflow_templates_media_other/templates/index.ru.json +2188 -1597
- comfyui_workflow_templates_media_other/templates/index.schema.json +36 -3
- comfyui_workflow_templates_media_other/templates/index.tr.json +2185 -1589
- comfyui_workflow_templates_media_other/templates/index.zh-TW.json +2188 -1597
- comfyui_workflow_templates_media_other/templates/index.zh.json +2180 -1589
- comfyui_workflow_templates_media_other/templates/sd3.5_large_blur.json +3 -3
- comfyui_workflow_templates_media_other/templates/sd3.5_large_depth.json +4 -4
- comfyui_workflow_templates_media_other/templates/sd3.5_simple_example.json +181 -40
- comfyui_workflow_templates_media_other/templates/templates-color_illustration-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/templates-color_illustration-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/templates-color_illustration.json +176 -0
- comfyui_workflow_templates_media_other/templates/templates-image_to_real-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/templates-image_to_real-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/templates-image_to_real.json +1195 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_flf2v_720_f16.json +2 -2
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_control.json +2 -2
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_inp.json +2 -2
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.61.dist-info}/METADATA +1 -1
- comfyui_workflow_templates_media_other-0.3.61.dist-info/RECORD +77 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship.json +0 -1256
- comfyui_workflow_templates_media_other/templates/ByteDance-Seedance_00003_.json +0 -210
- comfyui_workflow_templates_media_other/templates/area_composition-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/area_composition.json +0 -1626
- comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject.json +0 -1114
- comfyui_workflow_templates_media_other/templates/default-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/default.json +0 -547
- comfyui_workflow_templates_media_other/templates/embedding_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/embedding_example.json +0 -267
- comfyui_workflow_templates_media_other/templates/esrgan_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example.json +0 -635
- comfyui_workflow_templates_media_other/templates/gligen_textbox_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gligen_textbox_example.json +0 -686
- comfyui_workflow_templates_media_other/templates/hidream_e1_1-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1.json +0 -1133
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow.json +0 -1029
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow.json +0 -772
- comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model.json +0 -929
- comfyui_workflow_templates_media_other/templates/lora-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/lora.json +0 -615
- comfyui_workflow_templates_media_other/templates/lora_multiple-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/lora_multiple.json +0 -656
- comfyui_workflow_templates_media_other-0.3.10.dist-info/RECORD +0 -92
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.61.dist-info}/WHEEL +0 -0
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.61.dist-info}/top_level.txt +0 -0
|
@@ -1,328 +1,392 @@
|
|
|
1
1
|
[
|
|
2
2
|
{
|
|
3
3
|
"moduleName": "default",
|
|
4
|
-
"
|
|
5
|
-
"
|
|
4
|
+
"category": "GENERATION TYPE",
|
|
5
|
+
"icon": "icon-[lucide--star]",
|
|
6
|
+
"title": "Use cases",
|
|
6
7
|
"type": "image",
|
|
7
8
|
"templates": [
|
|
8
9
|
{
|
|
9
|
-
"name": "
|
|
10
|
-
"title": "
|
|
10
|
+
"name": "templates-color_illustration",
|
|
11
|
+
"title": "Add Color to Line Art Illustration",
|
|
11
12
|
"mediaType": "image",
|
|
12
13
|
"mediaSubtype": "webp",
|
|
13
|
-
"
|
|
14
|
-
"
|
|
15
|
-
"tags": ["
|
|
16
|
-
"models": ["
|
|
17
|
-
"
|
|
18
|
-
"
|
|
14
|
+
"thumbnailVariant": "compareSlider",
|
|
15
|
+
"description": "Input a black and white illustration and generate a colored output.",
|
|
16
|
+
"tags": ["API"],
|
|
17
|
+
"models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
18
|
+
"openSource": false,
|
|
19
|
+
"date": "2025-12-20",
|
|
20
|
+
"searchRank": 8,
|
|
21
|
+
"size": 0,
|
|
22
|
+
"vram": 0
|
|
19
23
|
},
|
|
20
24
|
{
|
|
21
|
-
"name": "
|
|
22
|
-
"title": "
|
|
25
|
+
"name": "templates-image_to_real",
|
|
26
|
+
"title": "Illustration to Realism",
|
|
23
27
|
"mediaType": "image",
|
|
24
28
|
"mediaSubtype": "webp",
|
|
25
|
-
"
|
|
26
|
-
"
|
|
27
|
-
"tags": ["
|
|
28
|
-
"models": ["Qwen-Image"],
|
|
29
|
-
"date": "2025-
|
|
30
|
-
"size":
|
|
29
|
+
"thumbnailVariant": "compareSlider",
|
|
30
|
+
"description": "Input an illustration and generate a hyper realistic version using Qwen Image Edit 2509.",
|
|
31
|
+
"tags": ["Style Transfer"],
|
|
32
|
+
"models": ["Qwen-Image-Edit"],
|
|
33
|
+
"date": "2025-12-20",
|
|
34
|
+
"size": 0,
|
|
35
|
+
"vram": 0
|
|
31
36
|
},
|
|
32
37
|
{
|
|
33
|
-
"name": "
|
|
34
|
-
"title": "
|
|
35
|
-
"description": "Generate videos from an input image using Wan2.2 14B",
|
|
38
|
+
"name": "templates-8x8_grid-pfp",
|
|
39
|
+
"title": "Profile Picture Stylized Variations",
|
|
36
40
|
"mediaType": "image",
|
|
37
41
|
"mediaSubtype": "webp",
|
|
38
|
-
"
|
|
39
|
-
"tags": ["
|
|
40
|
-
"models": ["
|
|
41
|
-
"
|
|
42
|
-
"
|
|
42
|
+
"description": "Upload your profile picture, enter a theme and generate 64 variations.",
|
|
43
|
+
"tags": ["API"],
|
|
44
|
+
"models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
45
|
+
"openSource": false,
|
|
46
|
+
"date": "2025-12-18",
|
|
47
|
+
"searchRank": 8,
|
|
48
|
+
"size": 0,
|
|
49
|
+
"vram": 0,
|
|
50
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
51
|
+
"usage": 51
|
|
43
52
|
},
|
|
44
53
|
{
|
|
45
|
-
"name": "
|
|
46
|
-
"title": "
|
|
54
|
+
"name": "templates-subject_product_swap",
|
|
55
|
+
"title": "Swap Product in Character’s Hand, UGC Style",
|
|
47
56
|
"mediaType": "image",
|
|
48
57
|
"mediaSubtype": "webp",
|
|
49
|
-
"description": "
|
|
50
|
-
"tags": ["
|
|
51
|
-
"models": ["
|
|
52
|
-
"
|
|
53
|
-
"
|
|
54
|
-
"
|
|
58
|
+
"description": "Upload a photo of a character holding a product and your brands product. Generate an image with the products swapped.",
|
|
59
|
+
"tags": ["Product", "Replacement", "API"],
|
|
60
|
+
"models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
61
|
+
"openSource": false,
|
|
62
|
+
"date": "2025-12-18",
|
|
63
|
+
"searchRank": 8,
|
|
64
|
+
"size": 0,
|
|
65
|
+
"vram": 0,
|
|
66
|
+
"usage": 63
|
|
55
67
|
},
|
|
56
68
|
{
|
|
57
|
-
"name": "
|
|
58
|
-
"title": "
|
|
69
|
+
"name": "templates-subject_holding_product",
|
|
70
|
+
"title": "Add Product to Character’s Hand, AI UGC",
|
|
59
71
|
"mediaType": "image",
|
|
60
72
|
"mediaSubtype": "webp",
|
|
61
|
-
"description": "Generate
|
|
62
|
-
"tags": ["
|
|
63
|
-
"models": ["
|
|
64
|
-
"
|
|
65
|
-
"
|
|
66
|
-
"
|
|
73
|
+
"description": "Upload a photo of your character and your product. Generate an image of that character holding the product.",
|
|
74
|
+
"tags": ["Product", "Portrait", "API"],
|
|
75
|
+
"models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
76
|
+
"openSource": false,
|
|
77
|
+
"date": "2025-12-18",
|
|
78
|
+
"searchRank": 8,
|
|
79
|
+
"size": 0,
|
|
80
|
+
"vram": 0,
|
|
81
|
+
"usage": 43
|
|
67
82
|
},
|
|
68
83
|
{
|
|
69
|
-
"name": "
|
|
70
|
-
"title": "Image
|
|
84
|
+
"name": "templates-car_product",
|
|
85
|
+
"title": "1 Image to Car Product Shotse",
|
|
71
86
|
"mediaType": "image",
|
|
72
87
|
"mediaSubtype": "webp",
|
|
73
|
-
"description": "
|
|
74
|
-
"
|
|
75
|
-
"
|
|
76
|
-
"
|
|
77
|
-
"date": "2025-
|
|
78
|
-
"
|
|
79
|
-
"
|
|
88
|
+
"description": "Upload a photo of your vehicle and generate a studio quality video of the vehicle from multiple angles.",
|
|
89
|
+
"tags": ["Product", "Image to Video", "API", "FLF2V"],
|
|
90
|
+
"models": ["Seedream", "Kling"],
|
|
91
|
+
"openSource": false,
|
|
92
|
+
"date": "2025-12-18",
|
|
93
|
+
"searchRank": 8,
|
|
94
|
+
"size": 0,
|
|
95
|
+
"vram": 0,
|
|
96
|
+
"requiresCustomNodes": ["comfyui-videohelpersuite"],
|
|
97
|
+
"usage": 70
|
|
80
98
|
},
|
|
81
99
|
{
|
|
82
|
-
"name": "
|
|
83
|
-
"title": "Image to
|
|
100
|
+
"name": "templates-photo_to_product_vid",
|
|
101
|
+
"title": "Phone Image to Product Video: Shoe",
|
|
84
102
|
"mediaType": "image",
|
|
85
103
|
"mediaSubtype": "webp",
|
|
86
|
-
"
|
|
87
|
-
"
|
|
88
|
-
"
|
|
89
|
-
"
|
|
90
|
-
"
|
|
91
|
-
"
|
|
92
|
-
"size":
|
|
93
|
-
"vram":
|
|
104
|
+
"description": "Take a picture with your phone, upload it and generate a studio grade product video.",
|
|
105
|
+
"tags": ["Product", "Image to Video", "API"],
|
|
106
|
+
"models": ["Seedream", "Hailuo"],
|
|
107
|
+
"openSource": false,
|
|
108
|
+
"date": "2025-12-18",
|
|
109
|
+
"searchRank": 8,
|
|
110
|
+
"size": 0,
|
|
111
|
+
"vram": 0,
|
|
112
|
+
"requiresCustomNodes": ["comfyui-videohelpersuite"],
|
|
113
|
+
"usage": 124
|
|
94
114
|
},
|
|
95
115
|
{
|
|
96
|
-
"name": "
|
|
97
|
-
"title": "
|
|
116
|
+
"name": "templates-stitched_vid_contact_sheet",
|
|
117
|
+
"title": "Character & Outfit to Fashion Video",
|
|
98
118
|
"mediaType": "image",
|
|
99
119
|
"mediaSubtype": "webp",
|
|
100
|
-
"description": "Generate
|
|
101
|
-
"
|
|
102
|
-
"
|
|
103
|
-
"
|
|
104
|
-
"date": "2025-
|
|
105
|
-
"
|
|
106
|
-
"
|
|
120
|
+
"description": "Upload your character and clothing items or accessories. Generate a fashion photograph base and use as a reference to 8x grid images, together with multi-KeyFrame Video Stitching ",
|
|
121
|
+
"tags": ["Fashion", "Image to Video", "FLF2V", "API"],
|
|
122
|
+
"models": ["Google Gemini Image", "Nano Banana Pro", "Google", "Kling", "Kling O1", "OpenAI"],
|
|
123
|
+
"openSource": false,
|
|
124
|
+
"date": "2025-12-18",
|
|
125
|
+
"searchRank": 8,
|
|
126
|
+
"size": 0,
|
|
127
|
+
"vram": 0,
|
|
128
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
129
|
+
"usage": 78
|
|
107
130
|
},
|
|
108
131
|
{
|
|
109
|
-
"name": "
|
|
110
|
-
"title": "
|
|
132
|
+
"name": "templates-assemble_dieline",
|
|
133
|
+
"title": "Generate Brand Packaging from Dieline",
|
|
111
134
|
"mediaType": "image",
|
|
112
135
|
"mediaSubtype": "webp",
|
|
113
|
-
"
|
|
114
|
-
"
|
|
115
|
-
"tags": ["
|
|
116
|
-
"models": ["
|
|
117
|
-
"
|
|
118
|
-
"
|
|
119
|
-
"
|
|
136
|
+
"thumbnailVariant": "hoverDissolve",
|
|
137
|
+
"description": "Upload a dieline of your product and assemble into a 3D package.",
|
|
138
|
+
"tags": ["Product", "Image Edit"],
|
|
139
|
+
"models": ["Google Gemini Image", "Nano Banana Pro", "Google"],
|
|
140
|
+
"openSource": false,
|
|
141
|
+
"date": "2025-12-15",
|
|
142
|
+
"searchRank": 8,
|
|
143
|
+
"size": 0,
|
|
144
|
+
"vram": 0,
|
|
145
|
+
"usage": 12
|
|
120
146
|
},
|
|
121
147
|
{
|
|
122
|
-
"name": "
|
|
123
|
-
"title": "
|
|
148
|
+
"name": "templates-fashion_shoot_vton",
|
|
149
|
+
"title": "Character + Clothing (OOTD) Flat Lay to Studio Photoshoot",
|
|
124
150
|
"mediaType": "image",
|
|
125
151
|
"mediaSubtype": "webp",
|
|
126
|
-
"description": "
|
|
127
|
-
"
|
|
128
|
-
"
|
|
129
|
-
"
|
|
130
|
-
"
|
|
131
|
-
"
|
|
132
|
-
"size":
|
|
133
|
-
"vram":
|
|
152
|
+
"description": "Upload an image of your character and a flatlay image of your clothing items. Generate 4 fashion editorial photographs of your character in the outfit. Select which image to upscale and add back details.",
|
|
153
|
+
"tags": ["Fashion", "Image Edit"],
|
|
154
|
+
"models": ["Google Gemini Image", "Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
155
|
+
"openSource": false,
|
|
156
|
+
"date": "2025-12-15",
|
|
157
|
+
"searchRank": 8,
|
|
158
|
+
"size": 0,
|
|
159
|
+
"vram": 0,
|
|
160
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
161
|
+
"usage": 104
|
|
134
162
|
},
|
|
135
163
|
{
|
|
136
|
-
"name": "
|
|
137
|
-
"title": "
|
|
164
|
+
"name": "templates-fashion_shoot_prompt_doodle",
|
|
165
|
+
"title": "Selfie + Text Prompt to Studio Photoshoot Doodle",
|
|
138
166
|
"mediaType": "image",
|
|
139
167
|
"mediaSubtype": "webp",
|
|
140
|
-
"description": "
|
|
141
|
-
"
|
|
142
|
-
"
|
|
143
|
-
"
|
|
144
|
-
"
|
|
145
|
-
"
|
|
146
|
-
"size":
|
|
147
|
-
"vram":
|
|
168
|
+
"description": "Upload a selfie and describe your outfit in the prompt. Generate 4 fashion editorial photographs with fun doodle illustrations. Select which image to upscale and add back face details.",
|
|
169
|
+
"tags": ["Fashion", "Image Edit"],
|
|
170
|
+
"models": ["Google Gemini Image", "Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
171
|
+
"openSource": false,
|
|
172
|
+
"date": "2025-12-15",
|
|
173
|
+
"searchRank": 8,
|
|
174
|
+
"size": 0,
|
|
175
|
+
"vram": 0,
|
|
176
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
177
|
+
"usage": 20
|
|
148
178
|
},
|
|
149
179
|
{
|
|
150
|
-
"name": "
|
|
151
|
-
"title": "
|
|
180
|
+
"name": "templates-poster_product_integration",
|
|
181
|
+
"title": "Generate Poster/Ad Asset with your Product",
|
|
152
182
|
"mediaType": "image",
|
|
153
183
|
"mediaSubtype": "webp",
|
|
154
|
-
"
|
|
155
|
-
"
|
|
156
|
-
"tags": ["
|
|
157
|
-
"models": ["
|
|
158
|
-
"
|
|
159
|
-
"
|
|
160
|
-
"
|
|
184
|
+
"thumbnailVariant": "compareSlider",
|
|
185
|
+
"description": "Upload your product and a simple text prompt for the poster or ad design. Iterate on the look before swapping the product into the generate layout.",
|
|
186
|
+
"tags": ["Product", "Image Edit"],
|
|
187
|
+
"models": ["ByteDance", "Seedream", "Google Gemini"],
|
|
188
|
+
"openSource": false,
|
|
189
|
+
"date": "2025-12-15",
|
|
190
|
+
"searchRank": 8,
|
|
191
|
+
"size": 0,
|
|
192
|
+
"vram": 0,
|
|
193
|
+
"requiresCustomNodes": ["comfyui_essentials"],
|
|
194
|
+
"usage": 37
|
|
161
195
|
},
|
|
162
196
|
{
|
|
163
|
-
"name": "
|
|
164
|
-
"title": "
|
|
197
|
+
"name": "templates-3D_logo_texture_animation",
|
|
198
|
+
"title": "Dynamic 3D Logo Animations",
|
|
165
199
|
"mediaType": "image",
|
|
166
200
|
"mediaSubtype": "webp",
|
|
167
|
-
"description": "Generate
|
|
168
|
-
"
|
|
169
|
-
"
|
|
170
|
-
"
|
|
171
|
-
"date": "2025-
|
|
172
|
-
"
|
|
173
|
-
"
|
|
201
|
+
"description": "Upload a vector image of your logo, and prompt your desired texture. Generate a textured 3D first and last frame with automated prompting for the final animation.",
|
|
202
|
+
"tags": ["Brand Design", "FLF2V"],
|
|
203
|
+
"models": ["ByteDance", "Seedream", "Google Gemini", "Nano Banana Pro"],
|
|
204
|
+
"openSource": false,
|
|
205
|
+
"date": "2025-12-15",
|
|
206
|
+
"searchRank": 8,
|
|
207
|
+
"size": 0,
|
|
208
|
+
"vram": 0,
|
|
209
|
+
"usage": 42
|
|
174
210
|
},
|
|
175
211
|
{
|
|
176
|
-
"name": "
|
|
177
|
-
"title": "
|
|
212
|
+
"name": "templates-product_scene_relight",
|
|
213
|
+
"title": "Composite your Product + Scene and Relight",
|
|
178
214
|
"mediaType": "image",
|
|
179
215
|
"mediaSubtype": "webp",
|
|
180
|
-
"
|
|
181
|
-
"
|
|
182
|
-
"
|
|
183
|
-
"
|
|
184
|
-
"
|
|
185
|
-
"
|
|
186
|
-
"
|
|
216
|
+
"thumbnailVariant": "compareSlider",
|
|
217
|
+
"description": "Upload an image of your product and background. Composite them and seamlessly relight and fuse together using Seedream 4.5.",
|
|
218
|
+
"tags": ["Product", "Image Edit", "Relight"],
|
|
219
|
+
"models": ["ByteDance", "Seedream"],
|
|
220
|
+
"openSource": false,
|
|
221
|
+
"date": "2025-12-15",
|
|
222
|
+
"searchRank": 8,
|
|
223
|
+
"size": 0,
|
|
224
|
+
"vram": 0,
|
|
225
|
+
"usage": 11
|
|
187
226
|
},
|
|
188
227
|
{
|
|
189
|
-
"name": "
|
|
190
|
-
"title": "
|
|
228
|
+
"name": "templates-textured_logo_elements",
|
|
229
|
+
"title": "Apply Texture + Elements to Logo",
|
|
191
230
|
"mediaType": "image",
|
|
192
231
|
"mediaSubtype": "webp",
|
|
193
|
-
"description": "Generate
|
|
194
|
-
"tags": ["
|
|
195
|
-
"models": ["
|
|
196
|
-
"
|
|
197
|
-
"
|
|
198
|
-
"
|
|
199
|
-
"
|
|
232
|
+
"description": "Upload your logo, texture and elements. Generate a video of the textured logo for an on brand asset.",
|
|
233
|
+
"tags": ["Brand Design", "Image to Video"],
|
|
234
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google", "ByteDance", "Seedance"],
|
|
235
|
+
"openSource": false,
|
|
236
|
+
"date": "2025-12-11",
|
|
237
|
+
"searchRank": 8,
|
|
238
|
+
"size": 0,
|
|
239
|
+
"vram": 0,
|
|
240
|
+
"usage": 255
|
|
200
241
|
},
|
|
201
242
|
{
|
|
202
|
-
"name": "
|
|
203
|
-
"title": "
|
|
243
|
+
"name": "templates-qwen_image_edit-crop_and_stitch-fusion",
|
|
244
|
+
"title": "Relight Composited Product",
|
|
204
245
|
"mediaType": "image",
|
|
205
246
|
"mediaSubtype": "webp",
|
|
206
|
-
"description": "Upscale images by enhancing quality in latent space.",
|
|
207
247
|
"thumbnailVariant": "compareSlider",
|
|
208
|
-
"
|
|
209
|
-
"
|
|
210
|
-
"
|
|
211
|
-
"
|
|
212
|
-
"
|
|
213
|
-
"
|
|
248
|
+
"description": "Upload a composited image of your product, draw a mask in the mask editor and relight your product into the scene.",
|
|
249
|
+
"tags": ["Image Edit", "Relight"],
|
|
250
|
+
"models": ["Qwen-Image-Edit"],
|
|
251
|
+
"date": "2025-12-11",
|
|
252
|
+
"searchRank": 8,
|
|
253
|
+
"size": 0,
|
|
254
|
+
"vram": 0,
|
|
255
|
+
"requiresCustomNodes": ["comfyui-inpaint-cropandstitch"],
|
|
256
|
+
"usage": 361
|
|
214
257
|
},
|
|
215
258
|
{
|
|
216
|
-
"name": "
|
|
217
|
-
"title": "
|
|
259
|
+
"name": "templates-textured_logotype-v2.1",
|
|
260
|
+
"title": "Apply Texture to Logo",
|
|
218
261
|
"mediaType": "image",
|
|
219
262
|
"mediaSubtype": "webp",
|
|
220
|
-
"description": "
|
|
221
|
-
"
|
|
222
|
-
"
|
|
223
|
-
"
|
|
224
|
-
"
|
|
225
|
-
"
|
|
226
|
-
"size":
|
|
227
|
-
"vram":
|
|
263
|
+
"description": "Upload your logotype and apply a texture + elements for on brand asset",
|
|
264
|
+
"tags": ["Brand Design", "Image to Video", "FLF2V"],
|
|
265
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google", "ByteDance", "Seedance"],
|
|
266
|
+
"date": "2025-12-03",
|
|
267
|
+
"openSource": false,
|
|
268
|
+
"searchRank": 8,
|
|
269
|
+
"size": 0,
|
|
270
|
+
"vram": 0,
|
|
271
|
+
"usage": 299
|
|
228
272
|
},
|
|
229
273
|
{
|
|
230
|
-
"name": "
|
|
231
|
-
"title": "
|
|
274
|
+
"name": "templates-product_ad-v2.0",
|
|
275
|
+
"title": "Swap Product Into a Reference AD",
|
|
232
276
|
"mediaType": "image",
|
|
233
277
|
"mediaSubtype": "webp",
|
|
234
|
-
"description": "
|
|
235
|
-
"
|
|
236
|
-
"
|
|
237
|
-
"
|
|
238
|
-
"
|
|
239
|
-
"
|
|
240
|
-
"size":
|
|
241
|
-
"vram":
|
|
278
|
+
"description": "Create static ads for your product in the style of a reference advertisement",
|
|
279
|
+
"tags": ["Style Reference"],
|
|
280
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google", "ByteDance", "Seedance"],
|
|
281
|
+
"date": "2025-12-03",
|
|
282
|
+
"openSource": false,
|
|
283
|
+
"searchRank": 8,
|
|
284
|
+
"size": 0,
|
|
285
|
+
"vram": 0,
|
|
286
|
+
"usage": 222
|
|
242
287
|
},
|
|
243
288
|
{
|
|
244
|
-
"name": "
|
|
245
|
-
"title": "
|
|
289
|
+
"name": "templates-6-key-frames",
|
|
290
|
+
"title": "Multi-Keyframe Video Stitching",
|
|
246
291
|
"mediaType": "image",
|
|
247
292
|
"mediaSubtype": "webp",
|
|
248
|
-
"description": "
|
|
249
|
-
"
|
|
250
|
-
"
|
|
251
|
-
"
|
|
252
|
-
"
|
|
253
|
-
"
|
|
254
|
-
"
|
|
255
|
-
"
|
|
293
|
+
"description": "Creates a smooth video using 6 key frames.It auto fills in the motion between frames and stitchs the segments together seamlessly.",
|
|
294
|
+
"tags": ["Image to Video", "FLF2V"],
|
|
295
|
+
"models": ["Wan2.2"],
|
|
296
|
+
"date": "2025-12-03",
|
|
297
|
+
"searchRank": 8,
|
|
298
|
+
"size": 0,
|
|
299
|
+
"vram": 0,
|
|
300
|
+
"usage": 1972
|
|
256
301
|
},
|
|
257
302
|
{
|
|
258
|
-
"name": "
|
|
259
|
-
"title": "
|
|
303
|
+
"name": "templates-9grid_social_media-v2.0",
|
|
304
|
+
"title": "3x3 Grid For Product Ads",
|
|
260
305
|
"mediaType": "image",
|
|
261
306
|
"mediaSubtype": "webp",
|
|
262
|
-
"description": "
|
|
263
|
-
"
|
|
264
|
-
"
|
|
265
|
-
"
|
|
266
|
-
"
|
|
267
|
-
"
|
|
268
|
-
"
|
|
269
|
-
"
|
|
307
|
+
"description": "Upload your product and enter a brief prompt for each grid position in a 3x3 grid. Generates 9 distinct images. Select the images you like and upscale to 4k using your product as reference.",
|
|
308
|
+
"tags": ["Image Edit", "Image"],
|
|
309
|
+
"models": ["Nano Banana Pro", "Google"],
|
|
310
|
+
"date": "2025-12-06",
|
|
311
|
+
"searchRank": 8,
|
|
312
|
+
"size": 0,
|
|
313
|
+
"vram": 0,
|
|
314
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
315
|
+
"usage": 466
|
|
270
316
|
},
|
|
271
317
|
{
|
|
272
|
-
"name": "
|
|
273
|
-
"title": "
|
|
318
|
+
"name": "templates-poster_to_2x2_mockups-v2.0",
|
|
319
|
+
"title": "Poster Scene Mockups",
|
|
274
320
|
"mediaType": "image",
|
|
275
321
|
"mediaSubtype": "webp",
|
|
276
|
-
"description": "
|
|
277
|
-
"
|
|
278
|
-
"
|
|
279
|
-
"
|
|
280
|
-
"
|
|
281
|
-
"
|
|
282
|
-
"size":
|
|
283
|
-
"
|
|
322
|
+
"description": "Upload a poster/ad design and with a short input about your brand, generate 4 mockups in multiple scenes.",
|
|
323
|
+
"tags": ["Image Edit", "Mockup"],
|
|
324
|
+
"models": ["Nano Banana Pro", "Google"],
|
|
325
|
+
"date": "2025-12-06",
|
|
326
|
+
"searchRank": 8,
|
|
327
|
+
"openSource": false,
|
|
328
|
+
"size": 0,
|
|
329
|
+
"requiresCustomNodes": ["comfyui_essentials"],
|
|
330
|
+
"vram": 0,
|
|
331
|
+
"usage": 61
|
|
284
332
|
},
|
|
285
333
|
{
|
|
286
|
-
"name": "
|
|
287
|
-
"title": "
|
|
334
|
+
"name": "template-multistyle-magazine-cover-nanobananapro",
|
|
335
|
+
"title": "Magazine Cover & Package Design",
|
|
288
336
|
"mediaType": "image",
|
|
289
337
|
"mediaSubtype": "webp",
|
|
290
|
-
"description": "
|
|
291
|
-
"
|
|
292
|
-
"
|
|
293
|
-
"
|
|
294
|
-
"
|
|
295
|
-
"
|
|
296
|
-
"size":
|
|
297
|
-
"vram":
|
|
338
|
+
"description": "Design the text layout for your magazine cover photo, and explore packaging options for it.",
|
|
339
|
+
"tags": ["Image Edit", "Mockup", "Layout Design"],
|
|
340
|
+
"models": ["Nano Banana Pro", "Google"],
|
|
341
|
+
"date": "2025-12-06",
|
|
342
|
+
"searchRank": 8,
|
|
343
|
+
"openSource": false,
|
|
344
|
+
"size": 0,
|
|
345
|
+
"vram": 0,
|
|
346
|
+
"usage": 87
|
|
298
347
|
},
|
|
299
348
|
{
|
|
300
|
-
"name": "
|
|
301
|
-
"title": "
|
|
349
|
+
"name": "templates-1_click_multiple_scene_angles-v1.0",
|
|
350
|
+
"title": "1 click Multiple Scene Angles",
|
|
302
351
|
"mediaType": "image",
|
|
303
352
|
"mediaSubtype": "webp",
|
|
304
|
-
"description": "
|
|
305
|
-
"
|
|
306
|
-
"
|
|
307
|
-
"
|
|
308
|
-
"
|
|
309
|
-
"
|
|
310
|
-
"
|
|
311
|
-
"
|
|
353
|
+
"description": "Upload an image of your scene and generate multiple views of your input scene with 1 click.",
|
|
354
|
+
"tags": ["Image Eidt"],
|
|
355
|
+
"models": ["Qwen-Image-Edit"],
|
|
356
|
+
"date": "2025-12-08",
|
|
357
|
+
"searchRank": 8,
|
|
358
|
+
"size": 31198642438,
|
|
359
|
+
"vram": 31198642438,
|
|
360
|
+
"usage": 1508
|
|
312
361
|
},
|
|
313
362
|
{
|
|
314
|
-
"name": "
|
|
315
|
-
"title": "
|
|
363
|
+
"name": "templates-1_click_multiple_character_angles-v1.0",
|
|
364
|
+
"title": "1 Click Multiple Character Angles",
|
|
316
365
|
"mediaType": "image",
|
|
317
366
|
"mediaSubtype": "webp",
|
|
318
|
-
"description": "
|
|
319
|
-
"
|
|
320
|
-
"
|
|
321
|
-
"
|
|
322
|
-
"
|
|
323
|
-
"
|
|
324
|
-
"
|
|
325
|
-
"
|
|
367
|
+
"description": "Upload an image of your character and get multiple views of that image from different angles",
|
|
368
|
+
"tags": ["Image Eidt"],
|
|
369
|
+
"models": ["Qwen-Image-Edit"],
|
|
370
|
+
"date": "2025-12-08",
|
|
371
|
+
"searchRank": 8,
|
|
372
|
+
"size": 31198642438,
|
|
373
|
+
"vram": 31198642438,
|
|
374
|
+
"usage": 3637
|
|
375
|
+
},
|
|
376
|
+
{
|
|
377
|
+
"name": "template-Animation_Trajectory_Control_Wan_ATI",
|
|
378
|
+
"title": "Animation Trajectory Control",
|
|
379
|
+
"mediaType": "image",
|
|
380
|
+
"mediaSubtype": "webp",
|
|
381
|
+
"description": "Draw the movement trajectory you want for the input image.",
|
|
382
|
+
"tags": ["Image to Video"],
|
|
383
|
+
"models": ["Wan2.1"],
|
|
384
|
+
"date": "2025-12-11",
|
|
385
|
+
"searchRank": 8,
|
|
386
|
+
"size": 31604570534,
|
|
387
|
+
"requiresCustomNodes": ["ComfyUI-WanVideoWrapper", "comfyui_fill-nodes"],
|
|
388
|
+
"vram": 31604570534,
|
|
389
|
+
"usage": 449
|
|
326
390
|
}
|
|
327
391
|
]
|
|
328
392
|
},
|
|
@@ -333,18 +397,101 @@
|
|
|
333
397
|
"title": "Image",
|
|
334
398
|
"type": "image",
|
|
335
399
|
"templates": [
|
|
400
|
+
{
|
|
401
|
+
"name": "image_z_image_turbo",
|
|
402
|
+
"title": "Z-Image-Turbo Text to Image",
|
|
403
|
+
"mediaType": "image",
|
|
404
|
+
"mediaSubtype": "webp",
|
|
405
|
+
"description": "An Efficient Image Generation Foundation Model with Single-Stream Diffusion Transformer, supports English & Chinese.",
|
|
406
|
+
"tags": ["Text to Image", "Image"],
|
|
407
|
+
"models": ["Z-Image-Turbo"],
|
|
408
|
+
"date": "2025-11-27",
|
|
409
|
+
"size": 20862803640,
|
|
410
|
+
"vram": 20862803640,
|
|
411
|
+
"usage": 27801
|
|
412
|
+
},
|
|
413
|
+
{
|
|
414
|
+
"name": "image_z_image_turbo_fun_union_controlnet",
|
|
415
|
+
"title": "Z-Image-Turbo Fun Union ControlNet",
|
|
416
|
+
"mediaType": "image",
|
|
417
|
+
"mediaSubtype": "webp",
|
|
418
|
+
"thumbnailVariant": "compareSlider",
|
|
419
|
+
"description": "Multi-control ControlNet supporting Canny, HED, Depth, Pose, and MLSD for Z-Image-Turbo.",
|
|
420
|
+
"tags": ["Image", "ControlNet"],
|
|
421
|
+
"models": ["Z-Image-Turbo"],
|
|
422
|
+
"date": "2025-12-02",
|
|
423
|
+
"size": 23794118820,
|
|
424
|
+
"vram": 23794118820,
|
|
425
|
+
"usage": 3859
|
|
426
|
+
},
|
|
427
|
+
{
|
|
428
|
+
"name": "image_qwen_image_edit_2511",
|
|
429
|
+
"title": "Qwen Image Edit 2511 - Material Replacement",
|
|
430
|
+
"mediaType": "image",
|
|
431
|
+
"mediaSubtype": "webp",
|
|
432
|
+
"thumbnailVariant": "compareSlider",
|
|
433
|
+
"description": "Replace materials in objects (e.g., furniture) by combining reference images with Qwen-Image-Edit-2511.",
|
|
434
|
+
"tags": ["Image Edit"],
|
|
435
|
+
"models": ["Qwen-Image-Edit"],
|
|
436
|
+
"date": "2025-12-23",
|
|
437
|
+
"size": 51367808860,
|
|
438
|
+
"vram": 51367808860
|
|
439
|
+
},
|
|
440
|
+
{
|
|
441
|
+
"name": "image_qwen_image_edit_2509",
|
|
442
|
+
"title": "Qwen Image Edit 2509",
|
|
443
|
+
"mediaType": "image",
|
|
444
|
+
"mediaSubtype": "webp",
|
|
445
|
+
"thumbnailVariant": "compareSlider",
|
|
446
|
+
"description": "Advanced image editing with multi-image support, improved consistency, and ControlNet integration.",
|
|
447
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
448
|
+
"tags": ["Image to Image", "Image Edit", "ControlNet"],
|
|
449
|
+
"models": ["Qwen-Image"],
|
|
450
|
+
"date": "2025-09-25",
|
|
451
|
+
"size": 31772020572,
|
|
452
|
+
"vram": 31772020572,
|
|
453
|
+
"usage": 9323
|
|
454
|
+
},
|
|
455
|
+
{
|
|
456
|
+
"name": "image_qwen_image_edit_2509_relight",
|
|
457
|
+
"title": "Image Relight",
|
|
458
|
+
"mediaType": "image",
|
|
459
|
+
"mediaSubtype": "webp",
|
|
460
|
+
"thumbnailVariant": "compareSlider",
|
|
461
|
+
"description": "Relight images using Qwen-Image-Edit with LoRA support.",
|
|
462
|
+
"tags": ["Image Edit", "Relight"],
|
|
463
|
+
"models": ["Qwen-Image-Edit"],
|
|
464
|
+
"date": "2025-12-15",
|
|
465
|
+
"size": 31772020572,
|
|
466
|
+
"vram": 31772020572,
|
|
467
|
+
"usage": 192
|
|
468
|
+
},
|
|
336
469
|
{
|
|
337
470
|
"name": "image_flux2",
|
|
338
471
|
"title": "Flux.2 Dev",
|
|
339
472
|
"mediaType": "image",
|
|
340
473
|
"mediaSubtype": "webp",
|
|
341
474
|
"thumbnailVariant": "compareSlider",
|
|
342
|
-
"description": "Generate
|
|
475
|
+
"description": "Generate photorealistic images with multi-reference consistency and professional text rendering.",
|
|
343
476
|
"tags": ["Text to Image", "Image", "Image Edit"],
|
|
344
|
-
"models": ["Flux.2 Dev", "BFL"],
|
|
477
|
+
"models": ["Flux.2 Dev", "BFL", "Flux"],
|
|
478
|
+
"date": "2025-11-26",
|
|
479
|
+
"size": 71781788416,
|
|
480
|
+
"vram": 71781788416,
|
|
481
|
+
"usage": 9538
|
|
482
|
+
},
|
|
483
|
+
{
|
|
484
|
+
"name": "image_flux2_text_to_image",
|
|
485
|
+
"title": "Flux.2 Dev Text to Image",
|
|
486
|
+
"mediaType": "image",
|
|
487
|
+
"mediaSubtype": "webp",
|
|
488
|
+
"description": "Text-to-image with enhanced lighting, materials, and realistic details.",
|
|
489
|
+
"tags": ["Text to Image", "Image"],
|
|
490
|
+
"models": ["Flux.2 Dev", "BFL", "Flux"],
|
|
345
491
|
"date": "2025-11-26",
|
|
346
492
|
"size": 71382356459,
|
|
347
|
-
"vram":
|
|
493
|
+
"vram": 71382356459,
|
|
494
|
+
"usage": 4002
|
|
348
495
|
},
|
|
349
496
|
{
|
|
350
497
|
"name": "image_flux2_fp8",
|
|
@@ -352,40 +499,38 @@
|
|
|
352
499
|
"mediaType": "image",
|
|
353
500
|
"mediaSubtype": "webp",
|
|
354
501
|
"description": "Create product mockups by applying design patterns to packaging, mugs, and other products using multi-reference consistency.",
|
|
355
|
-
"tags": [
|
|
356
|
-
|
|
357
|
-
"Image",
|
|
358
|
-
"Image Edit",
|
|
359
|
-
"Mockup",
|
|
360
|
-
"Product Design"
|
|
361
|
-
],
|
|
362
|
-
"models": ["Flux.2 Dev", "BFL"],
|
|
502
|
+
"tags": ["Text to Image", "Image", "Image Edit", "Mockup", "Product"],
|
|
503
|
+
"models": ["Flux.2 Dev", "BFL", "Flux"],
|
|
363
504
|
"date": "2025-11-26",
|
|
364
505
|
"size": 53837415055,
|
|
365
|
-
"vram":
|
|
506
|
+
"vram": 53837415055,
|
|
507
|
+
"usage": 436
|
|
366
508
|
},
|
|
367
509
|
{
|
|
368
|
-
"name": "
|
|
369
|
-
"title": "
|
|
510
|
+
"name": "image_qwen_image_layered",
|
|
511
|
+
"title": "Qwen-Image-Layered Decomposition",
|
|
370
512
|
"mediaType": "image",
|
|
371
513
|
"mediaSubtype": "webp",
|
|
372
|
-
"description": "
|
|
373
|
-
"tags": ["
|
|
374
|
-
"models": ["
|
|
375
|
-
"date": "2025-
|
|
376
|
-
"size":
|
|
514
|
+
"description": "Decompose an image into editable RGBA layers for high-fidelity recolor, replace, resize, and reposition workflows using Qwen-Image-Layered.",
|
|
515
|
+
"tags": ["Layer Decompose"],
|
|
516
|
+
"models": ["Qwen-Image-Layered"],
|
|
517
|
+
"date": "2025-12-22",
|
|
518
|
+
"size": 50446538375,
|
|
519
|
+
"vram": 50446538375
|
|
377
520
|
},
|
|
378
521
|
{
|
|
379
522
|
"name": "image_qwen_image",
|
|
380
523
|
"title": "Qwen-Image Text to Image",
|
|
381
524
|
"mediaType": "image",
|
|
382
525
|
"mediaSubtype": "webp",
|
|
383
|
-
"description": "Generate images with exceptional multilingual text rendering and editing capabilities using Qwen-Image's 20B MMDiT model
|
|
526
|
+
"description": "Generate images with exceptional multilingual text rendering and editing capabilities using Qwen-Image's 20B MMDiT model.",
|
|
384
527
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
385
528
|
"tags": ["Text to Image", "Image"],
|
|
386
529
|
"models": ["Qwen-Image"],
|
|
387
530
|
"date": "2025-08-05",
|
|
388
|
-
"size": 31772020572
|
|
531
|
+
"size": 31772020572,
|
|
532
|
+
"vram": 31772020572,
|
|
533
|
+
"usage": 1143
|
|
389
534
|
},
|
|
390
535
|
{
|
|
391
536
|
"name": "image_qwen_image_instantx_controlnet",
|
|
@@ -397,7 +542,9 @@
|
|
|
397
542
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
398
543
|
"models": ["Qwen-Image"],
|
|
399
544
|
"date": "2025-08-23",
|
|
400
|
-
"size": 35304631173
|
|
545
|
+
"size": 35304631173,
|
|
546
|
+
"vram": 35304631173,
|
|
547
|
+
"usage": 472
|
|
401
548
|
},
|
|
402
549
|
{
|
|
403
550
|
"name": "image_qwen_image_instantx_inpainting_controlnet",
|
|
@@ -410,23 +557,28 @@
|
|
|
410
557
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
411
558
|
"models": ["Qwen-Image"],
|
|
412
559
|
"date": "2025-09-12",
|
|
413
|
-
"size": 36013300777
|
|
560
|
+
"size": 36013300777,
|
|
561
|
+
"vram": 36013300777,
|
|
562
|
+
"usage": 515
|
|
414
563
|
},
|
|
415
564
|
{
|
|
416
565
|
"name": "image_qwen_image_union_control_lora",
|
|
417
566
|
"title": "Qwen-Image Union Control",
|
|
418
567
|
"mediaType": "image",
|
|
419
568
|
"mediaSubtype": "webp",
|
|
569
|
+
"thumbnailVariant": "compareSlider",
|
|
420
570
|
"description": "Generate images with precise structural control using Qwen-Image's unified ControlNet LoRA. Supports multiple control types including canny, depth, lineart, softedge, normal, and openpose for diverse creative applications.",
|
|
421
571
|
"tags": ["Text to Image", "Image", "ControlNet"],
|
|
422
572
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
423
573
|
"models": ["Qwen-Image"],
|
|
424
574
|
"date": "2025-08-23",
|
|
425
|
-
"size": 32716913377
|
|
575
|
+
"size": 32716913377,
|
|
576
|
+
"vram": 32716913377,
|
|
577
|
+
"usage": 340
|
|
426
578
|
},
|
|
427
579
|
{
|
|
428
580
|
"name": "image_qwen_image_controlnet_patch",
|
|
429
|
-
"title": "Qwen-Image ControlNet
|
|
581
|
+
"title": "Qwen-Image ControlNet Model Patch",
|
|
430
582
|
"mediaType": "image",
|
|
431
583
|
"mediaSubtype": "webp",
|
|
432
584
|
"thumbnailVariant": "compareSlider",
|
|
@@ -435,1924 +587,2255 @@
|
|
|
435
587
|
"tags": ["Text to Image", "Image", "ControlNet"],
|
|
436
588
|
"models": ["Qwen-Image"],
|
|
437
589
|
"date": "2025-08-24",
|
|
438
|
-
"size": 34037615821
|
|
590
|
+
"size": 34037615821,
|
|
591
|
+
"vram": 34037615821,
|
|
592
|
+
"usage": 218
|
|
439
593
|
},
|
|
440
594
|
{
|
|
441
|
-
"name": "
|
|
442
|
-
"title": "
|
|
595
|
+
"name": "api_nano_banana_pro",
|
|
596
|
+
"title": "Nano Banana Pro",
|
|
597
|
+
"description": "Nano-banana Pro (Gemini 3.0 Pro Image) - Studio-quality 4K image generation and editing with enhanced text rendering and character consistency.",
|
|
443
598
|
"mediaType": "image",
|
|
444
599
|
"mediaSubtype": "webp",
|
|
445
|
-
"thumbnailVariant": "
|
|
446
|
-
"
|
|
447
|
-
"
|
|
448
|
-
"
|
|
449
|
-
"
|
|
450
|
-
"
|
|
451
|
-
"
|
|
600
|
+
"thumbnailVariant": "hoverDissolve",
|
|
601
|
+
"tags": ["Image Edit", "Image", "API"],
|
|
602
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google"],
|
|
603
|
+
"date": "2025-11-21",
|
|
604
|
+
"openSource": false,
|
|
605
|
+
"size": 0,
|
|
606
|
+
"vram": 0,
|
|
607
|
+
"usage": 6749
|
|
452
608
|
},
|
|
453
609
|
{
|
|
454
|
-
"name": "
|
|
455
|
-
"title": "
|
|
610
|
+
"name": "api_from_photo_2_miniature",
|
|
611
|
+
"title": "Photo to Blueprint to Model",
|
|
612
|
+
"description": "Transform real building photos into architectural blueprints and then into detailed physical scale models. A complete architectural visualization pipeline from photo to miniature.",
|
|
456
613
|
"mediaType": "image",
|
|
457
614
|
"mediaSubtype": "webp",
|
|
458
|
-
"
|
|
459
|
-
"
|
|
460
|
-
"
|
|
461
|
-
"
|
|
462
|
-
"
|
|
463
|
-
"
|
|
464
|
-
"
|
|
615
|
+
"tags": ["Image Edit", "Image", "3D"],
|
|
616
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google"],
|
|
617
|
+
"date": "2025-11-21",
|
|
618
|
+
"openSource": false,
|
|
619
|
+
"size": 0,
|
|
620
|
+
"vram": 0,
|
|
621
|
+
"usage": 288
|
|
465
622
|
},
|
|
466
623
|
{
|
|
467
|
-
"name": "
|
|
468
|
-
"title": "
|
|
624
|
+
"name": "api_openai_fashion_billboard_generator",
|
|
625
|
+
"title": "Fashion Billboard Generator",
|
|
626
|
+
"description": "**Transform clothing photos into professional mall billboard advertisements featuring realistic fashion models.",
|
|
469
627
|
"mediaType": "image",
|
|
470
628
|
"mediaSubtype": "webp",
|
|
471
|
-
"
|
|
472
|
-
"
|
|
473
|
-
"
|
|
474
|
-
"
|
|
475
|
-
"
|
|
476
|
-
"
|
|
629
|
+
"tags": ["Image Edit", "Image", "API", "Fashion", "Mockup"],
|
|
630
|
+
"models": ["GPT-Image-1.5", "OpenAI"],
|
|
631
|
+
"date": "2025-12-18",
|
|
632
|
+
"openSource": false,
|
|
633
|
+
"size": 0,
|
|
634
|
+
"vram": 0,
|
|
635
|
+
"usage": 50
|
|
477
636
|
},
|
|
478
637
|
{
|
|
479
|
-
"name": "
|
|
480
|
-
"title": "
|
|
638
|
+
"name": "api_bytedance_seedream4",
|
|
639
|
+
"title": "ByteDance Seedream 4.0",
|
|
640
|
+
"description": "Multi-modal AI model for text-to-image and image editing. Generate 2K images in under 2 seconds with natural language control.",
|
|
481
641
|
"mediaType": "image",
|
|
482
642
|
"mediaSubtype": "webp",
|
|
483
|
-
"
|
|
484
|
-
"
|
|
485
|
-
"
|
|
486
|
-
"
|
|
487
|
-
"
|
|
488
|
-
"
|
|
489
|
-
"
|
|
490
|
-
"vram": 19327352832
|
|
643
|
+
"tags": ["Image Edit", "Image", "API", "Text to Image"],
|
|
644
|
+
"models": ["Seedream 4.0", "ByteDance"],
|
|
645
|
+
"date": "2025-09-11",
|
|
646
|
+
"openSource": false,
|
|
647
|
+
"size": 0,
|
|
648
|
+
"vram": 0,
|
|
649
|
+
"usage": 2117
|
|
491
650
|
},
|
|
492
651
|
{
|
|
493
|
-
"name": "
|
|
494
|
-
"title": "
|
|
652
|
+
"name": "api_bfl_flux2_max_sofa_swap",
|
|
653
|
+
"title": "FLUX.2 [max]: Object Swap",
|
|
654
|
+
"description": "Replace objects in images with unmatched quality using FLUX.2 [max]. Perfect for product photography, furniture swaps, and maintaining scene consistency with highest editing precision.",
|
|
495
655
|
"mediaType": "image",
|
|
496
656
|
"mediaSubtype": "webp",
|
|
497
|
-
"
|
|
498
|
-
"tags": ["
|
|
499
|
-
"models": ["
|
|
500
|
-
"date": "2025-
|
|
501
|
-
"
|
|
502
|
-
"
|
|
657
|
+
"thumbnailVariant": "compareSlider",
|
|
658
|
+
"tags": ["Image Edit", "Image", "API"],
|
|
659
|
+
"models": ["Flux2", "Flux", "BFL"],
|
|
660
|
+
"date": "2025-12-22",
|
|
661
|
+
"searchRank": 7,
|
|
662
|
+
"openSource": false,
|
|
663
|
+
"size": 0,
|
|
664
|
+
"vram": 0
|
|
503
665
|
},
|
|
504
666
|
{
|
|
505
|
-
"name": "
|
|
506
|
-
"title": "
|
|
667
|
+
"name": "api_google_gemini_image",
|
|
668
|
+
"title": "Nano Banana",
|
|
669
|
+
"description": "Nano-banana (Gemini-2.5-Flash Image) - image editing with consistency.",
|
|
507
670
|
"mediaType": "image",
|
|
508
671
|
"mediaSubtype": "webp",
|
|
509
|
-
"
|
|
510
|
-
"
|
|
511
|
-
"
|
|
512
|
-
"
|
|
513
|
-
"size":
|
|
672
|
+
"tags": ["Image Edit", "Image", "API", "Text to Image"],
|
|
673
|
+
"models": ["Gemini-2.5-Flash", "nano-banana", "Google"],
|
|
674
|
+
"date": "2025-08-27",
|
|
675
|
+
"openSource": false,
|
|
676
|
+
"size": 0,
|
|
677
|
+
"vram": 0,
|
|
678
|
+
"usage": 1657
|
|
514
679
|
},
|
|
515
680
|
{
|
|
516
|
-
"name": "
|
|
517
|
-
"title": "
|
|
681
|
+
"name": "api_flux2",
|
|
682
|
+
"title": "Flux.2 Pro",
|
|
683
|
+
"description": "Generate up to 4MP photorealistic images with multi-reference consistency and professional text rendering.",
|
|
518
684
|
"mediaType": "image",
|
|
519
685
|
"mediaSubtype": "webp",
|
|
520
|
-
"
|
|
521
|
-
"
|
|
522
|
-
"
|
|
523
|
-
"
|
|
524
|
-
"size":
|
|
525
|
-
"vram":
|
|
686
|
+
"tags": ["Image Edit", "Image", "API", "Text to Image"],
|
|
687
|
+
"models": ["Flux.2", "BFL", "Flux"],
|
|
688
|
+
"date": "2025-11-26",
|
|
689
|
+
"openSource": false,
|
|
690
|
+
"size": 0,
|
|
691
|
+
"vram": 0,
|
|
692
|
+
"usage": 852
|
|
526
693
|
},
|
|
527
694
|
{
|
|
528
|
-
"name": "
|
|
529
|
-
"title": "
|
|
695
|
+
"name": "api_topaz_image_enhance",
|
|
696
|
+
"title": "Topaz Image Enhance",
|
|
697
|
+
"description": "Professional image enhancement using Topaz's Reimagine model with face enhancement and detail restoration.",
|
|
530
698
|
"mediaType": "image",
|
|
531
699
|
"mediaSubtype": "webp",
|
|
532
700
|
"thumbnailVariant": "compareSlider",
|
|
533
|
-
"
|
|
534
|
-
"
|
|
535
|
-
"
|
|
536
|
-
"
|
|
537
|
-
"size":
|
|
538
|
-
"vram":
|
|
701
|
+
"tags": ["Image", "API", "Upscale"],
|
|
702
|
+
"models": ["Topaz", "Reimagine"],
|
|
703
|
+
"date": "2025-11-25",
|
|
704
|
+
"openSource": false,
|
|
705
|
+
"size": 0,
|
|
706
|
+
"vram": 0,
|
|
707
|
+
"usage": 576
|
|
539
708
|
},
|
|
540
709
|
{
|
|
541
|
-
"name": "
|
|
542
|
-
"title": "Flux
|
|
710
|
+
"name": "api_bfl_flux_1_kontext_multiple_images_input",
|
|
711
|
+
"title": "BFL Flux.1 Kontext Multiple Image Input",
|
|
712
|
+
"description": "Input multiple images and edit them with Flux.1 Kontext.",
|
|
543
713
|
"mediaType": "image",
|
|
544
714
|
"mediaSubtype": "webp",
|
|
545
|
-
"
|
|
546
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
547
|
-
"tags": ["
|
|
548
|
-
"models": ["Flux", "BFL"],
|
|
549
|
-
"date": "2025-
|
|
550
|
-
"
|
|
551
|
-
"
|
|
715
|
+
"thumbnailVariant": "compareSlider",
|
|
716
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-kontext",
|
|
717
|
+
"tags": ["Image Edit", "Image"],
|
|
718
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
719
|
+
"date": "2025-05-29",
|
|
720
|
+
"openSource": false,
|
|
721
|
+
"size": 0,
|
|
722
|
+
"vram": 0,
|
|
723
|
+
"usage": 139
|
|
552
724
|
},
|
|
553
725
|
{
|
|
554
|
-
"name": "
|
|
555
|
-
"title": "Flux.1
|
|
556
|
-
"description": "
|
|
557
|
-
"thumbnailVariant": "hoverDissolve",
|
|
726
|
+
"name": "api_bfl_flux_1_kontext_pro_image",
|
|
727
|
+
"title": "BFL Flux.1 Kontext Pro",
|
|
728
|
+
"description": "Edit images with Flux.1 Kontext pro image.",
|
|
558
729
|
"mediaType": "image",
|
|
559
730
|
"mediaSubtype": "webp",
|
|
560
|
-
"
|
|
561
|
-
"
|
|
562
|
-
"
|
|
563
|
-
"
|
|
564
|
-
"
|
|
565
|
-
"
|
|
731
|
+
"thumbnailVariant": "compareSlider",
|
|
732
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-kontext",
|
|
733
|
+
"tags": ["Image Edit", "Image"],
|
|
734
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
735
|
+
"date": "2025-05-29",
|
|
736
|
+
"openSource": false,
|
|
737
|
+
"size": 0,
|
|
738
|
+
"vram": 0,
|
|
739
|
+
"usage": 403
|
|
566
740
|
},
|
|
567
741
|
{
|
|
568
|
-
"name": "
|
|
569
|
-
"title": "Flux
|
|
742
|
+
"name": "api_bfl_flux_1_kontext_max_image",
|
|
743
|
+
"title": "BFL Flux.1 Kontext Max",
|
|
744
|
+
"description": "Edit images with Flux.1 Kontext max image.",
|
|
570
745
|
"mediaType": "image",
|
|
571
746
|
"mediaSubtype": "webp",
|
|
572
|
-
"
|
|
573
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
574
|
-
"tags": ["
|
|
575
|
-
"models": ["Flux", "BFL"],
|
|
576
|
-
"date": "2025-
|
|
577
|
-
"
|
|
578
|
-
"
|
|
747
|
+
"thumbnailVariant": "compareSlider",
|
|
748
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-kontext",
|
|
749
|
+
"tags": ["Image Edit", "Image"],
|
|
750
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
751
|
+
"date": "2025-05-29",
|
|
752
|
+
"openSource": false,
|
|
753
|
+
"size": 0,
|
|
754
|
+
"vram": 0,
|
|
755
|
+
"usage": 74
|
|
579
756
|
},
|
|
580
757
|
{
|
|
581
|
-
"name": "
|
|
582
|
-
"title": "
|
|
758
|
+
"name": "api_wan_text_to_image",
|
|
759
|
+
"title": "Wan2.5: Text to Image",
|
|
760
|
+
"description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
|
|
583
761
|
"mediaType": "image",
|
|
584
762
|
"mediaSubtype": "webp",
|
|
585
|
-
"
|
|
586
|
-
"
|
|
587
|
-
"
|
|
588
|
-
"
|
|
589
|
-
"
|
|
590
|
-
"
|
|
591
|
-
"
|
|
763
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
764
|
+
"models": ["Wan2.5", "Wan"],
|
|
765
|
+
"date": "2025-09-25",
|
|
766
|
+
"openSource": false,
|
|
767
|
+
"size": 0,
|
|
768
|
+
"vram": 0,
|
|
769
|
+
"usage": 244
|
|
592
770
|
},
|
|
593
771
|
{
|
|
594
|
-
"name": "
|
|
595
|
-
"title": "Flux
|
|
772
|
+
"name": "api_bfl_flux_pro_t2i",
|
|
773
|
+
"title": "BFL Flux[Pro]: Text to Image",
|
|
774
|
+
"description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
|
|
596
775
|
"mediaType": "image",
|
|
597
776
|
"mediaSubtype": "webp",
|
|
598
|
-
"
|
|
599
|
-
"
|
|
600
|
-
"tags": ["Text to Image", "Image"],
|
|
777
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
|
|
778
|
+
"tags": ["Image Edit", "Image"],
|
|
601
779
|
"models": ["Flux", "BFL"],
|
|
602
|
-
"date": "2025-
|
|
603
|
-
"
|
|
604
|
-
"
|
|
780
|
+
"date": "2025-05-01",
|
|
781
|
+
"openSource": false,
|
|
782
|
+
"size": 0,
|
|
783
|
+
"vram": 0,
|
|
784
|
+
"usage": 117
|
|
605
785
|
},
|
|
606
786
|
{
|
|
607
|
-
"name": "
|
|
608
|
-
"title": "
|
|
787
|
+
"name": "api_runway_text_to_image",
|
|
788
|
+
"title": "Runway: Text to Image",
|
|
789
|
+
"description": "Generate high-quality images from text prompts using Runway's AI model.",
|
|
609
790
|
"mediaType": "image",
|
|
610
791
|
"mediaSubtype": "webp",
|
|
611
|
-
"
|
|
612
|
-
"
|
|
613
|
-
"tags": ["Text to Image", "Image"],
|
|
614
|
-
"models": ["Flux", "BFL"],
|
|
792
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
793
|
+
"models": ["Runway"],
|
|
615
794
|
"date": "2025-03-01",
|
|
616
|
-
"
|
|
795
|
+
"openSource": false,
|
|
796
|
+
"size": 0,
|
|
797
|
+
"vram": 0,
|
|
798
|
+
"usage": 37
|
|
617
799
|
},
|
|
618
800
|
{
|
|
619
|
-
"name": "
|
|
620
|
-
"title": "
|
|
801
|
+
"name": "api_runway_reference_to_image",
|
|
802
|
+
"title": "Runway: Reference to Image",
|
|
803
|
+
"description": "Generate new images based on reference styles and compositions with Runway's AI.",
|
|
621
804
|
"mediaType": "image",
|
|
622
|
-
"mediaSubtype": "webp",
|
|
623
|
-
"description": "Fill missing parts of images using Flux inpainting.",
|
|
624
805
|
"thumbnailVariant": "compareSlider",
|
|
625
|
-
"
|
|
626
|
-
"tags": ["Image to Image", "
|
|
627
|
-
"models": ["
|
|
806
|
+
"mediaSubtype": "webp",
|
|
807
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
808
|
+
"models": ["Runway"],
|
|
628
809
|
"date": "2025-03-01",
|
|
629
|
-
"
|
|
810
|
+
"openSource": false,
|
|
811
|
+
"size": 0,
|
|
812
|
+
"vram": 0,
|
|
813
|
+
"usage": 115
|
|
630
814
|
},
|
|
631
815
|
{
|
|
632
|
-
"name": "
|
|
633
|
-
"title": "
|
|
816
|
+
"name": "api_stability_ai_stable_image_ultra_t2i",
|
|
817
|
+
"title": "Stability AI: Stable Image Ultra Text to Image",
|
|
818
|
+
"description": "Generate high quality images with excellent prompt adherence. Perfect for professional GENERATION TYPE at 1 megapixel resolution.",
|
|
634
819
|
"mediaType": "image",
|
|
635
820
|
"mediaSubtype": "webp",
|
|
636
|
-
"
|
|
637
|
-
"
|
|
638
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
639
|
-
"tags": ["Outpainting", "Image", "Image to Image"],
|
|
640
|
-
"models": ["Flux", "BFL"],
|
|
821
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
822
|
+
"models": ["Stability"],
|
|
641
823
|
"date": "2025-03-01",
|
|
642
|
-
"
|
|
824
|
+
"openSource": false,
|
|
825
|
+
"size": 0,
|
|
826
|
+
"vram": 0,
|
|
827
|
+
"usage": 27
|
|
643
828
|
},
|
|
644
829
|
{
|
|
645
|
-
"name": "
|
|
646
|
-
"title": "
|
|
830
|
+
"name": "api_stability_ai_i2i",
|
|
831
|
+
"title": "Stability AI: Image to Image",
|
|
832
|
+
"description": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
|
|
647
833
|
"mediaType": "image",
|
|
834
|
+
"thumbnailVariant": "compareSlider",
|
|
648
835
|
"mediaSubtype": "webp",
|
|
649
|
-
"
|
|
650
|
-
"
|
|
651
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
652
|
-
"tags": ["Image to Image", "ControlNet", "Image"],
|
|
653
|
-
"models": ["Flux", "BFL"],
|
|
836
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
837
|
+
"models": ["Stability"],
|
|
654
838
|
"date": "2025-03-01",
|
|
655
|
-
"
|
|
839
|
+
"openSource": false,
|
|
840
|
+
"size": 0,
|
|
841
|
+
"vram": 0,
|
|
842
|
+
"usage": 65
|
|
656
843
|
},
|
|
657
844
|
{
|
|
658
|
-
"name": "
|
|
659
|
-
"title": "
|
|
845
|
+
"name": "api_stability_ai_sd3.5_t2i",
|
|
846
|
+
"title": "Stability AI: SD3.5 Text to Image",
|
|
847
|
+
"description": "Generate high quality images with excellent prompt adherence. Perfect for professional GENERATION TYPE at 1 megapixel resolution.",
|
|
660
848
|
"mediaType": "image",
|
|
661
849
|
"mediaSubtype": "webp",
|
|
662
|
-
"
|
|
663
|
-
"
|
|
664
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
665
|
-
"tags": ["Image to Image", "ControlNet", "Image"],
|
|
666
|
-
"models": ["Flux", "BFL"],
|
|
850
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
851
|
+
"models": ["Stability"],
|
|
667
852
|
"date": "2025-03-01",
|
|
668
|
-
"
|
|
853
|
+
"openSource": false,
|
|
854
|
+
"size": 0,
|
|
855
|
+
"vram": 0,
|
|
856
|
+
"usage": 18
|
|
669
857
|
},
|
|
670
858
|
{
|
|
671
|
-
"name": "
|
|
672
|
-
"title": "
|
|
859
|
+
"name": "api_stability_ai_sd3.5_i2i",
|
|
860
|
+
"title": "Stability AI: SD3.5 Image to Image",
|
|
861
|
+
"description": "Generate high quality images with excellent prompt adherence. Perfect for professional GENERATION TYPE at 1 megapixel resolution.",
|
|
673
862
|
"mediaType": "image",
|
|
863
|
+
"thumbnailVariant": "compareSlider",
|
|
674
864
|
"mediaSubtype": "webp",
|
|
675
|
-
"
|
|
676
|
-
"
|
|
677
|
-
"tags": ["Image to Image", "ControlNet", "Image"],
|
|
678
|
-
"models": ["Flux", "BFL"],
|
|
865
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
866
|
+
"models": ["Stability"],
|
|
679
867
|
"date": "2025-03-01",
|
|
680
|
-
"
|
|
868
|
+
"openSource": false,
|
|
869
|
+
"size": 0,
|
|
870
|
+
"vram": 0,
|
|
871
|
+
"usage": 88
|
|
681
872
|
},
|
|
682
873
|
{
|
|
683
|
-
"name": "
|
|
684
|
-
"title": "
|
|
874
|
+
"name": "image_qwen_image_edit",
|
|
875
|
+
"title": "Qwen Image Edit",
|
|
685
876
|
"mediaType": "image",
|
|
686
877
|
"mediaSubtype": "webp",
|
|
687
|
-
"
|
|
688
|
-
"
|
|
689
|
-
"
|
|
690
|
-
"
|
|
691
|
-
"
|
|
692
|
-
"
|
|
878
|
+
"thumbnailVariant": "compareSlider",
|
|
879
|
+
"description": "Edit images with precise bilingual text editing and dual semantic/appearance editing capabilities using Qwen-Image-Edit's 20B MMDiT model.",
|
|
880
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
881
|
+
"tags": ["Image to Image", "Image Edit"],
|
|
882
|
+
"models": ["Qwen-Image-Edit"],
|
|
883
|
+
"date": "2025-08-18",
|
|
884
|
+
"size": 31772020572,
|
|
885
|
+
"vram": 31772020572,
|
|
886
|
+
"usage": 1556
|
|
693
887
|
},
|
|
694
888
|
{
|
|
695
|
-
"name": "
|
|
696
|
-
"title": "
|
|
889
|
+
"name": "image_ovis_text_to_image",
|
|
890
|
+
"title": "Ovis-Image Text to Image",
|
|
697
891
|
"mediaType": "image",
|
|
698
892
|
"mediaSubtype": "webp",
|
|
699
|
-
"
|
|
700
|
-
"
|
|
701
|
-
"
|
|
702
|
-
"
|
|
703
|
-
"
|
|
704
|
-
"
|
|
705
|
-
"
|
|
893
|
+
"description": "Ovis-Image is a 7B text-to-image model specifically optimized for high-quality text rendering in generated images. Designed to operate efficiently under computational constraints, it excels at accurately generating images containing text content.",
|
|
894
|
+
"tags": ["Text to Image", "Image"],
|
|
895
|
+
"models": ["Ovis-Image"],
|
|
896
|
+
"date": "2025-12-02",
|
|
897
|
+
"size": 20228222222,
|
|
898
|
+
"vram": 20228222222,
|
|
899
|
+
"usage": 1456
|
|
706
900
|
},
|
|
707
901
|
{
|
|
708
|
-
"name": "
|
|
709
|
-
"title": "
|
|
902
|
+
"name": "image_chrono_edit_14B",
|
|
903
|
+
"title": "ChronoEdit 14B",
|
|
710
904
|
"mediaType": "image",
|
|
711
905
|
"mediaSubtype": "webp",
|
|
712
|
-
"
|
|
713
|
-
"
|
|
714
|
-
"tags": ["
|
|
715
|
-
"models": ["
|
|
716
|
-
"date": "2025-
|
|
717
|
-
"size":
|
|
906
|
+
"thumbnailVariant": "compareSlider",
|
|
907
|
+
"description": "Image editing powered by video models' dynamic understanding, creating physically plausible results while preserving character and style consistency.",
|
|
908
|
+
"tags": ["Image Edit", "Image to Image"],
|
|
909
|
+
"models": ["Wan2.1", "ChronoEdit", "Nvidia"],
|
|
910
|
+
"date": "2025-11-03",
|
|
911
|
+
"size": 41435696988,
|
|
912
|
+
"vram": 41435696988,
|
|
913
|
+
"usage": 611
|
|
718
914
|
},
|
|
719
915
|
{
|
|
720
|
-
"name": "
|
|
721
|
-
"title": "
|
|
916
|
+
"name": "flux_kontext_dev_basic",
|
|
917
|
+
"title": "Flux Kontext Dev Image Edit",
|
|
722
918
|
"mediaType": "image",
|
|
723
919
|
"mediaSubtype": "webp",
|
|
724
|
-
"
|
|
725
|
-
"
|
|
726
|
-
"
|
|
727
|
-
"
|
|
728
|
-
"
|
|
729
|
-
"
|
|
920
|
+
"thumbnailVariant": "hoverDissolve",
|
|
921
|
+
"description": "Smart image editing that keeps characters consistent, edits specific parts without affecting others, and preserves original styles.",
|
|
922
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
|
|
923
|
+
"tags": ["Image Edit", "Image to Image"],
|
|
924
|
+
"models": ["Flux", "BFL"],
|
|
925
|
+
"date": "2025-06-26",
|
|
926
|
+
"size": 17641578168,
|
|
927
|
+
"vram": 19327352832,
|
|
928
|
+
"usage": 866
|
|
730
929
|
},
|
|
731
930
|
{
|
|
732
|
-
"name": "
|
|
733
|
-
"title": "
|
|
931
|
+
"name": "api_luma_photon_i2i",
|
|
932
|
+
"title": "Luma Photon: Image to Image",
|
|
933
|
+
"description": "Guide image generation using a combination of images and prompt.",
|
|
734
934
|
"mediaType": "image",
|
|
735
935
|
"mediaSubtype": "webp",
|
|
736
|
-
"
|
|
737
|
-
"
|
|
738
|
-
"
|
|
739
|
-
"
|
|
740
|
-
"
|
|
741
|
-
"size":
|
|
936
|
+
"thumbnailVariant": "compareSlider",
|
|
937
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
938
|
+
"models": ["Luma"],
|
|
939
|
+
"date": "2025-03-01",
|
|
940
|
+
"openSource": false,
|
|
941
|
+
"size": 0,
|
|
942
|
+
"vram": 0,
|
|
943
|
+
"usage": 101
|
|
742
944
|
},
|
|
743
945
|
{
|
|
744
|
-
"name": "
|
|
745
|
-
"title": "
|
|
946
|
+
"name": "api_luma_photon_style_ref",
|
|
947
|
+
"title": "Luma Photon: Style Reference",
|
|
948
|
+
"description": "Generate images by blending style references with precise control using Luma Photon.",
|
|
746
949
|
"mediaType": "image",
|
|
747
950
|
"mediaSubtype": "webp",
|
|
748
951
|
"thumbnailVariant": "compareSlider",
|
|
749
|
-
"
|
|
750
|
-
"
|
|
751
|
-
"
|
|
752
|
-
"
|
|
753
|
-
"
|
|
754
|
-
"
|
|
952
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
953
|
+
"models": ["Luma"],
|
|
954
|
+
"date": "2025-03-01",
|
|
955
|
+
"openSource": false,
|
|
956
|
+
"size": 0,
|
|
957
|
+
"vram": 0,
|
|
958
|
+
"usage": 79
|
|
755
959
|
},
|
|
756
960
|
{
|
|
757
|
-
"name": "
|
|
758
|
-
"title": "
|
|
961
|
+
"name": "api_recraft_image_gen_with_color_control",
|
|
962
|
+
"title": "Recraft: Color Control Image Generation",
|
|
963
|
+
"description": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
|
|
759
964
|
"mediaType": "image",
|
|
760
965
|
"mediaSubtype": "webp",
|
|
761
|
-
"
|
|
762
|
-
"
|
|
763
|
-
"
|
|
764
|
-
"
|
|
765
|
-
"
|
|
766
|
-
"
|
|
767
|
-
"
|
|
966
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
967
|
+
"models": ["Recraft"],
|
|
968
|
+
"date": "2025-03-01",
|
|
969
|
+
"openSource": false,
|
|
970
|
+
"size": 0,
|
|
971
|
+
"vram": 0,
|
|
972
|
+
"usage": 3
|
|
768
973
|
},
|
|
769
974
|
{
|
|
770
|
-
"name": "
|
|
771
|
-
"title": "
|
|
975
|
+
"name": "api_recraft_image_gen_with_style_control",
|
|
976
|
+
"title": "Recraft: Style Control Image Generation",
|
|
977
|
+
"description": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.",
|
|
772
978
|
"mediaType": "image",
|
|
773
979
|
"mediaSubtype": "webp",
|
|
774
|
-
"
|
|
775
|
-
"
|
|
776
|
-
"tags": ["Text to Image", "Image"],
|
|
777
|
-
"models": ["SD3.5", "Stability"],
|
|
980
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
981
|
+
"models": ["Recraft"],
|
|
778
982
|
"date": "2025-03-01",
|
|
779
|
-
"
|
|
983
|
+
"openSource": false,
|
|
984
|
+
"size": 0,
|
|
985
|
+
"vram": 0,
|
|
986
|
+
"usage": 6
|
|
780
987
|
},
|
|
781
988
|
{
|
|
782
|
-
"name": "
|
|
783
|
-
"title": "
|
|
989
|
+
"name": "api_recraft_vector_gen",
|
|
990
|
+
"title": "Recraft: Vector Generation",
|
|
991
|
+
"description": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
|
|
784
992
|
"mediaType": "image",
|
|
785
993
|
"mediaSubtype": "webp",
|
|
786
|
-
"
|
|
787
|
-
"
|
|
788
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
789
|
-
"tags": ["Image to Image", "Image", "ControlNet"],
|
|
790
|
-
"models": ["SD3.5", "Stability"],
|
|
994
|
+
"tags": ["Text to Image", "Image", "API", "Vector"],
|
|
995
|
+
"models": ["Recraft"],
|
|
791
996
|
"date": "2025-03-01",
|
|
792
|
-
"
|
|
997
|
+
"openSource": false,
|
|
998
|
+
"size": 0,
|
|
999
|
+
"vram": 0,
|
|
1000
|
+
"usage": 16
|
|
793
1001
|
},
|
|
794
1002
|
{
|
|
795
|
-
"name": "
|
|
796
|
-
"title": "
|
|
1003
|
+
"name": "api_ideogram_v3_t2i",
|
|
1004
|
+
"title": "Ideogram V3: Text to Image",
|
|
1005
|
+
"description": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
|
|
797
1006
|
"mediaType": "image",
|
|
798
1007
|
"mediaSubtype": "webp",
|
|
799
|
-
"
|
|
800
|
-
"
|
|
801
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
802
|
-
"tags": ["Image to Image", "Image", "ControlNet"],
|
|
803
|
-
"models": ["SD3.5", "Stability"],
|
|
1008
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1009
|
+
"models": ["Ideogram"],
|
|
804
1010
|
"date": "2025-03-01",
|
|
805
|
-
"
|
|
1011
|
+
"openSource": false,
|
|
1012
|
+
"size": 0,
|
|
1013
|
+
"vram": 0,
|
|
1014
|
+
"usage": 8
|
|
806
1015
|
},
|
|
807
1016
|
{
|
|
808
|
-
"name": "
|
|
809
|
-
"title": "
|
|
1017
|
+
"name": "api_openai_image_1_t2i",
|
|
1018
|
+
"title": "OpenAI: GPT-Image-1 Text to Image",
|
|
1019
|
+
"description": "Generate images from text prompts using OpenAI GPT Image 1 API.",
|
|
810
1020
|
"mediaType": "image",
|
|
811
1021
|
"mediaSubtype": "webp",
|
|
812
|
-
"
|
|
813
|
-
"
|
|
814
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
815
|
-
"tags": ["Image to Image", "Image"],
|
|
816
|
-
"models": ["SD3.5", "Stability"],
|
|
1022
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1023
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
817
1024
|
"date": "2025-03-01",
|
|
818
|
-
"
|
|
1025
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
|
|
1026
|
+
"openSource": false,
|
|
1027
|
+
"size": 0,
|
|
1028
|
+
"vram": 0,
|
|
1029
|
+
"usage": 9
|
|
819
1030
|
},
|
|
820
1031
|
{
|
|
821
|
-
"name": "
|
|
822
|
-
"title": "
|
|
1032
|
+
"name": "api_openai_image_1_i2i",
|
|
1033
|
+
"title": "OpenAI: GPT-Image-1 Image to Image",
|
|
1034
|
+
"description": "Generate images from input images using OpenAI GPT Image 1 API.",
|
|
823
1035
|
"mediaType": "image",
|
|
824
1036
|
"mediaSubtype": "webp",
|
|
825
|
-
"
|
|
826
|
-
"
|
|
827
|
-
"
|
|
828
|
-
"models": ["SDXL", "Stability"],
|
|
1037
|
+
"thumbnailVariant": "compareSlider",
|
|
1038
|
+
"tags": ["Image to Image", "Image", "API"],
|
|
1039
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
829
1040
|
"date": "2025-03-01",
|
|
830
|
-
"
|
|
1041
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
|
|
1042
|
+
"openSource": false,
|
|
1043
|
+
"size": 0,
|
|
1044
|
+
"vram": 0,
|
|
1045
|
+
"usage": 76
|
|
831
1046
|
},
|
|
832
1047
|
{
|
|
833
|
-
"name": "
|
|
834
|
-
"title": "
|
|
1048
|
+
"name": "api_openai_image_1_inpaint",
|
|
1049
|
+
"title": "OpenAI: GPT-Image-1 Inpaint",
|
|
1050
|
+
"description": "Edit images using inpainting with OpenAI GPT Image 1 API.",
|
|
835
1051
|
"mediaType": "image",
|
|
836
1052
|
"mediaSubtype": "webp",
|
|
837
|
-
"
|
|
838
|
-
"
|
|
839
|
-
"
|
|
840
|
-
"models": ["SDXL", "Stability"],
|
|
1053
|
+
"thumbnailVariant": "compareSlider",
|
|
1054
|
+
"tags": ["Inpainting", "Image", "API"],
|
|
1055
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
841
1056
|
"date": "2025-03-01",
|
|
842
|
-
"
|
|
1057
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
|
|
1058
|
+
"openSource": false,
|
|
1059
|
+
"size": 0,
|
|
1060
|
+
"vram": 0,
|
|
1061
|
+
"usage": 21
|
|
843
1062
|
},
|
|
844
1063
|
{
|
|
845
|
-
"name": "
|
|
846
|
-
"title": "
|
|
1064
|
+
"name": "api_openai_image_1_multi_inputs",
|
|
1065
|
+
"title": "OpenAI: GPT-Image-1 Multi Inputs",
|
|
1066
|
+
"description": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
|
|
847
1067
|
"mediaType": "image",
|
|
848
1068
|
"mediaSubtype": "webp",
|
|
849
|
-
"
|
|
850
|
-
"
|
|
851
|
-
"
|
|
852
|
-
"models": ["SDXL", "Stability"],
|
|
1069
|
+
"thumbnailVariant": "compareSlider",
|
|
1070
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1071
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
853
1072
|
"date": "2025-03-01",
|
|
854
|
-
"
|
|
1073
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
|
|
1074
|
+
"openSource": false,
|
|
1075
|
+
"size": 0,
|
|
1076
|
+
"vram": 0,
|
|
1077
|
+
"usage": 5
|
|
855
1078
|
},
|
|
856
1079
|
{
|
|
857
|
-
"name": "
|
|
858
|
-
"title": "
|
|
1080
|
+
"name": "api_openai_dall_e_2_t2i",
|
|
1081
|
+
"title": "OpenAI: Dall-E 2 Text to Image",
|
|
1082
|
+
"description": "Generate images from text prompts using OpenAI Dall-E 2 API.",
|
|
859
1083
|
"mediaType": "image",
|
|
860
1084
|
"mediaSubtype": "webp",
|
|
861
|
-
"
|
|
862
|
-
"
|
|
863
|
-
"tags": ["Text to Image", "Image"],
|
|
864
|
-
"models": ["SDXL", "Stability"],
|
|
1085
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1086
|
+
"models": ["Dall-E", "OpenAI"],
|
|
865
1087
|
"date": "2025-03-01",
|
|
866
|
-
"
|
|
1088
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/dall-e-2",
|
|
1089
|
+
"openSource": false,
|
|
1090
|
+
"size": 0,
|
|
1091
|
+
"vram": 0,
|
|
1092
|
+
"usage": 4
|
|
867
1093
|
},
|
|
868
1094
|
{
|
|
869
|
-
"name": "
|
|
870
|
-
"title": "
|
|
1095
|
+
"name": "api_openai_dall_e_2_inpaint",
|
|
1096
|
+
"title": "OpenAI: Dall-E 2 Inpaint",
|
|
1097
|
+
"description": "Edit images using inpainting with OpenAI Dall-E 2 API.",
|
|
871
1098
|
"mediaType": "image",
|
|
872
1099
|
"mediaSubtype": "webp",
|
|
873
1100
|
"thumbnailVariant": "compareSlider",
|
|
874
|
-
"
|
|
875
|
-
"
|
|
876
|
-
"
|
|
877
|
-
"
|
|
878
|
-
"
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
"moduleName": "default",
|
|
884
|
-
"category": "GENERATION TYPE",
|
|
885
|
-
"title": "Video",
|
|
886
|
-
"icon": "icon-[lucide--film]",
|
|
887
|
-
"type": "video",
|
|
888
|
-
"templates": [
|
|
1101
|
+
"tags": ["Inpainting", "Image", "API"],
|
|
1102
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1103
|
+
"date": "2025-03-01",
|
|
1104
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/dall-e-2",
|
|
1105
|
+
"openSource": false,
|
|
1106
|
+
"size": 0,
|
|
1107
|
+
"vram": 0,
|
|
1108
|
+
"usage": 12
|
|
1109
|
+
},
|
|
889
1110
|
{
|
|
890
|
-
"name": "
|
|
891
|
-
"title": "
|
|
892
|
-
"description": "Generate
|
|
1111
|
+
"name": "api_openai_dall_e_3_t2i",
|
|
1112
|
+
"title": "OpenAI: Dall-E 3 Text to Image",
|
|
1113
|
+
"description": "Generate images from text prompts using OpenAI Dall-E 3 API.",
|
|
893
1114
|
"mediaType": "image",
|
|
894
1115
|
"mediaSubtype": "webp",
|
|
895
|
-
"
|
|
896
|
-
"
|
|
897
|
-
"
|
|
898
|
-
"
|
|
899
|
-
"
|
|
1116
|
+
"tags": ["Text to Image", "Image", "API"],
|
|
1117
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1118
|
+
"date": "2025-03-01",
|
|
1119
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/dall-e-3",
|
|
1120
|
+
"openSource": false,
|
|
1121
|
+
"size": 0,
|
|
1122
|
+
"vram": 0,
|
|
1123
|
+
"usage": 33
|
|
900
1124
|
},
|
|
901
1125
|
{
|
|
902
|
-
"name": "
|
|
903
|
-
"title": "
|
|
904
|
-
"description": "Transform static images into dynamic videos with precise motion control and style preservation using Wan 2.2.",
|
|
1126
|
+
"name": "image_chroma1_radiance_text_to_image",
|
|
1127
|
+
"title": "Chroma1 Radiance Text to Image",
|
|
905
1128
|
"mediaType": "image",
|
|
906
1129
|
"mediaSubtype": "webp",
|
|
907
|
-
"
|
|
908
|
-
"
|
|
909
|
-
"
|
|
910
|
-
"
|
|
911
|
-
"
|
|
912
|
-
"
|
|
1130
|
+
"description": "Chroma1-Radiance works directly with image pixels instead of compressed latents, delivering higher quality images with reduced artifacts and distortion.",
|
|
1131
|
+
"tags": ["Text to Image", "Image"],
|
|
1132
|
+
"models": ["Chroma"],
|
|
1133
|
+
"date": "2025-09-18",
|
|
1134
|
+
"size": 23622320128,
|
|
1135
|
+
"vram": 23622320128,
|
|
1136
|
+
"usage": 1149
|
|
913
1137
|
},
|
|
914
1138
|
{
|
|
915
|
-
"name": "
|
|
916
|
-
"title": "
|
|
917
|
-
"description": "Generate smooth video transitions by defining start and end frames.",
|
|
1139
|
+
"name": "image_chroma_text_to_image",
|
|
1140
|
+
"title": "Chroma Text to Image",
|
|
918
1141
|
"mediaType": "image",
|
|
919
1142
|
"mediaSubtype": "webp",
|
|
920
|
-
"
|
|
921
|
-
"
|
|
922
|
-
"
|
|
923
|
-
"
|
|
924
|
-
"
|
|
925
|
-
"
|
|
1143
|
+
"description": "Chroma - enhanced Flux model with improved image quality and better prompt understanding for stunning text-to-image generation.",
|
|
1144
|
+
"tags": ["Text to Image", "Image"],
|
|
1145
|
+
"models": ["Chroma", "Flux"],
|
|
1146
|
+
"date": "2025-06-04",
|
|
1147
|
+
"size": 23289460163,
|
|
1148
|
+
"vram": 15569256448,
|
|
1149
|
+
"usage": 1423
|
|
926
1150
|
},
|
|
927
1151
|
{
|
|
928
|
-
"name": "
|
|
929
|
-
"title": "
|
|
930
|
-
"description": "Unified character animation and replacement framework with precise motion and expression replication.",
|
|
1152
|
+
"name": "image_newbieimage_exp0_1-t2i",
|
|
1153
|
+
"title": "NewBie Exp0.1: Anime Generation",
|
|
931
1154
|
"mediaType": "image",
|
|
932
1155
|
"mediaSubtype": "webp",
|
|
933
|
-
"
|
|
934
|
-
"tags": ["
|
|
935
|
-
"models": ["
|
|
936
|
-
"date": "2025-
|
|
937
|
-
"size":
|
|
1156
|
+
"description": "Generate detailed anime-style images with NewBie Exp0.1's Next-DiT architecture. Supports XML structured prompts for better multi-character scenes and attribute control.",
|
|
1157
|
+
"tags": ["Text to Image", "Image", "Anime"],
|
|
1158
|
+
"models": ["NewBie"],
|
|
1159
|
+
"date": "2025-12-19",
|
|
1160
|
+
"size": 16181289287,
|
|
1161
|
+
"vram": 16181289287
|
|
938
1162
|
},
|
|
939
1163
|
{
|
|
940
|
-
"name": "
|
|
941
|
-
"title": "
|
|
942
|
-
"description": "Generate high-quality 720p videos from text prompts with cinematic camera control, emotional expressions, and physics simulation. Supports multiple styles including realistic, anime, and 3D with text rendering.",
|
|
1164
|
+
"name": "image_netayume_lumina_t2i",
|
|
1165
|
+
"title": "NetaYume Lumina Text to Image",
|
|
943
1166
|
"mediaType": "image",
|
|
944
1167
|
"mediaSubtype": "webp",
|
|
945
|
-
"
|
|
946
|
-
"
|
|
947
|
-
"
|
|
948
|
-
"
|
|
1168
|
+
"description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
|
|
1169
|
+
"tags": ["Text to Image", "Image", "Anime"],
|
|
1170
|
+
"models": ["OmniGen"],
|
|
1171
|
+
"date": "2025-10-10",
|
|
1172
|
+
"size": 10619306639,
|
|
1173
|
+
"vram": 10619306639,
|
|
1174
|
+
"usage": 1536
|
|
949
1175
|
},
|
|
950
1176
|
{
|
|
951
|
-
"name": "
|
|
952
|
-
"title": "
|
|
953
|
-
"description": "Animate still images into dynamic videos with precise motion and camera control. Maintains visual consistency while bringing photos and illustrations to life with smooth, natural movements.",
|
|
1177
|
+
"name": "image_flux.1_fill_dev_OneReward",
|
|
1178
|
+
"title": "Flux.1 Dev OneReward",
|
|
954
1179
|
"mediaType": "image",
|
|
955
1180
|
"mediaSubtype": "webp",
|
|
956
|
-
"
|
|
957
|
-
"
|
|
958
|
-
"
|
|
959
|
-
"
|
|
1181
|
+
"thumbnailVariant": "compareSlider",
|
|
1182
|
+
"description": "Supports various tasks such as image inpainting, outpainting, and object removal by bytedance-research team",
|
|
1183
|
+
"tags": ["Inpainting", "Outpainting"],
|
|
1184
|
+
"models": ["Flux", "BFL"],
|
|
1185
|
+
"date": "2025-09-21",
|
|
1186
|
+
"size": 29001766666,
|
|
1187
|
+
"vram": 21474836480,
|
|
1188
|
+
"usage": 368
|
|
960
1189
|
},
|
|
961
1190
|
{
|
|
962
|
-
"name": "
|
|
963
|
-
"title": "
|
|
964
|
-
"description": "Transform static images and audio into dynamic videos with perfect synchronization and minute-level generation.",
|
|
1191
|
+
"name": "flux_dev_checkpoint_example",
|
|
1192
|
+
"title": "Flux Dev fp8",
|
|
965
1193
|
"mediaType": "image",
|
|
966
1194
|
"mediaSubtype": "webp",
|
|
967
|
-
"
|
|
968
|
-
"
|
|
969
|
-
"
|
|
970
|
-
"
|
|
971
|
-
"
|
|
1195
|
+
"description": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.",
|
|
1196
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
1197
|
+
"tags": ["Text to Image", "Image"],
|
|
1198
|
+
"models": ["Flux", "BFL"],
|
|
1199
|
+
"date": "2025-03-01",
|
|
1200
|
+
"size": 17244293693,
|
|
1201
|
+
"vram": 18253611008,
|
|
1202
|
+
"usage": 310
|
|
972
1203
|
},
|
|
973
1204
|
{
|
|
974
|
-
"name": "
|
|
975
|
-
"title": "
|
|
976
|
-
"description": "
|
|
1205
|
+
"name": "flux1_dev_uso_reference_image_gen",
|
|
1206
|
+
"title": "Flux.1 Dev USO Reference Image Generation",
|
|
1207
|
+
"description": "Use reference images to control both style and subject - keep your character's face while changing artistic style, or apply artistic styles to new scenes",
|
|
1208
|
+
"thumbnailVariant": "hoverDissolve",
|
|
977
1209
|
"mediaType": "image",
|
|
978
1210
|
"mediaSubtype": "webp",
|
|
979
|
-
"tags": ["
|
|
980
|
-
"models": ["
|
|
981
|
-
"date": "2025-09-
|
|
982
|
-
"
|
|
1211
|
+
"tags": ["Image to Image", "Image"],
|
|
1212
|
+
"models": ["Flux", "BFL"],
|
|
1213
|
+
"date": "2025-09-02",
|
|
1214
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
|
|
1215
|
+
"size": 18597208392,
|
|
1216
|
+
"vram": 19864223744,
|
|
1217
|
+
"usage": 1624
|
|
983
1218
|
},
|
|
984
1219
|
{
|
|
985
|
-
"name": "
|
|
986
|
-
"title": "
|
|
987
|
-
"description": "Generate videos from start and end frames using Wan 2.2 Fun Inp.",
|
|
1220
|
+
"name": "flux_schnell",
|
|
1221
|
+
"title": "Flux Schnell FP8",
|
|
988
1222
|
"mediaType": "image",
|
|
989
1223
|
"mediaSubtype": "webp",
|
|
990
|
-
"
|
|
991
|
-
"
|
|
992
|
-
"
|
|
993
|
-
"
|
|
994
|
-
"
|
|
1224
|
+
"description": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.",
|
|
1225
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
1226
|
+
"tags": ["Text to Image", "Image"],
|
|
1227
|
+
"models": ["Flux", "BFL"],
|
|
1228
|
+
"date": "2025-03-01",
|
|
1229
|
+
"size": 17233556275,
|
|
1230
|
+
"vram": 18253611008,
|
|
1231
|
+
"usage": 99
|
|
995
1232
|
},
|
|
996
1233
|
{
|
|
997
|
-
"name": "
|
|
998
|
-
"title": "
|
|
999
|
-
"description": "Generate videos guided by pose, depth, and edge controls using Wan 2.2 Fun Control.",
|
|
1234
|
+
"name": "flux1_krea_dev",
|
|
1235
|
+
"title": "Flux.1 Krea Dev",
|
|
1000
1236
|
"mediaType": "image",
|
|
1001
1237
|
"mediaSubtype": "webp",
|
|
1002
|
-
"
|
|
1003
|
-
"
|
|
1004
|
-
"
|
|
1005
|
-
"
|
|
1006
|
-
"
|
|
1238
|
+
"description": "A fine-tuned FLUX model pushing photorealism to the max",
|
|
1239
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
|
|
1240
|
+
"tags": ["Text to Image", "Image"],
|
|
1241
|
+
"models": ["Flux", "BFL"],
|
|
1242
|
+
"date": "2025-07-31",
|
|
1243
|
+
"size": 22269405430,
|
|
1244
|
+
"vram": 23085449216,
|
|
1245
|
+
"usage": 1160
|
|
1007
1246
|
},
|
|
1008
1247
|
{
|
|
1009
|
-
"name": "
|
|
1010
|
-
"title": "
|
|
1011
|
-
"description": "Generate videos with camera motion controls including pan, zoom, and rotation using Wan 2.2 Fun Camera Control.",
|
|
1248
|
+
"name": "flux_dev_full_text_to_image",
|
|
1249
|
+
"title": "Flux Dev Full Text to Image",
|
|
1012
1250
|
"mediaType": "image",
|
|
1013
1251
|
"mediaSubtype": "webp",
|
|
1014
|
-
"
|
|
1015
|
-
"
|
|
1016
|
-
"
|
|
1017
|
-
"
|
|
1018
|
-
"
|
|
1252
|
+
"description": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.",
|
|
1253
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
1254
|
+
"tags": ["Text to Image", "Image"],
|
|
1255
|
+
"models": ["Flux", "BFL"],
|
|
1256
|
+
"date": "2025-03-01",
|
|
1257
|
+
"size": 34177202258,
|
|
1258
|
+
"vram": 23622320128,
|
|
1259
|
+
"usage": 309
|
|
1019
1260
|
},
|
|
1020
1261
|
{
|
|
1021
|
-
"name": "
|
|
1022
|
-
"title": "
|
|
1023
|
-
"description": "Fast text-to-video and image-to-video generation with 5B parameters. Optimized for rapid prototyping and creative exploration.",
|
|
1262
|
+
"name": "flux_schnell_full_text_to_image",
|
|
1263
|
+
"title": "Flux Schnell Full Text to Image",
|
|
1024
1264
|
"mediaType": "image",
|
|
1025
1265
|
"mediaSubtype": "webp",
|
|
1026
|
-
"
|
|
1027
|
-
"
|
|
1028
|
-
"
|
|
1029
|
-
"
|
|
1266
|
+
"description": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.",
|
|
1267
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
1268
|
+
"tags": ["Text to Image", "Image"],
|
|
1269
|
+
"models": ["Flux", "BFL"],
|
|
1270
|
+
"date": "2025-03-01",
|
|
1271
|
+
"size": 34155727421,
|
|
1272
|
+
"vram": 34155727421,
|
|
1273
|
+
"usage": 28
|
|
1030
1274
|
},
|
|
1031
1275
|
{
|
|
1032
|
-
"name": "
|
|
1033
|
-
"title": "
|
|
1034
|
-
"description": "Efficient video inpainting from start and end frames. 5B model delivers quick iterations for testing workflows.",
|
|
1276
|
+
"name": "flux_fill_inpaint_example",
|
|
1277
|
+
"title": "Flux Inpaint",
|
|
1035
1278
|
"mediaType": "image",
|
|
1036
1279
|
"mediaSubtype": "webp",
|
|
1037
|
-
"
|
|
1038
|
-
"
|
|
1039
|
-
"
|
|
1040
|
-
"
|
|
1280
|
+
"description": "Fill missing parts of images using Flux inpainting.",
|
|
1281
|
+
"thumbnailVariant": "compareSlider",
|
|
1282
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
1283
|
+
"tags": ["Image to Image", "Inpainting", "Image"],
|
|
1284
|
+
"models": ["Flux", "BFL"],
|
|
1285
|
+
"date": "2025-03-01",
|
|
1286
|
+
"size": 10372346020,
|
|
1287
|
+
"vram": 10372346020,
|
|
1288
|
+
"usage": 437
|
|
1041
1289
|
},
|
|
1042
1290
|
{
|
|
1043
|
-
"name": "
|
|
1044
|
-
"title": "
|
|
1045
|
-
"description": "Multi-condition video control with pose, depth, and edge guidance. Compact 5B size for experimental development.",
|
|
1291
|
+
"name": "flux_fill_outpaint_example",
|
|
1292
|
+
"title": "Flux Outpaint",
|
|
1046
1293
|
"mediaType": "image",
|
|
1047
1294
|
"mediaSubtype": "webp",
|
|
1048
|
-
"
|
|
1049
|
-
"
|
|
1050
|
-
"
|
|
1051
|
-
"
|
|
1295
|
+
"description": "Extend images beyond boundaries using Flux outpainting.",
|
|
1296
|
+
"thumbnailVariant": "compareSlider",
|
|
1297
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
1298
|
+
"tags": ["Outpainting", "Image", "Image to Image"],
|
|
1299
|
+
"models": ["Flux", "BFL"],
|
|
1300
|
+
"date": "2025-03-01",
|
|
1301
|
+
"size": 10372346020,
|
|
1302
|
+
"vram": 10372346020,
|
|
1303
|
+
"usage": 443
|
|
1052
1304
|
},
|
|
1053
1305
|
{
|
|
1054
|
-
"name": "
|
|
1055
|
-
"title": "
|
|
1056
|
-
"description": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
|
|
1306
|
+
"name": "flux_canny_model_example",
|
|
1307
|
+
"title": "Flux Canny Model",
|
|
1057
1308
|
"mediaType": "image",
|
|
1058
1309
|
"mediaSubtype": "webp",
|
|
1059
|
-
"
|
|
1060
|
-
"
|
|
1061
|
-
"
|
|
1062
|
-
"
|
|
1063
|
-
"
|
|
1310
|
+
"description": "Generate images guided by edge detection using Flux Canny.",
|
|
1311
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1312
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
1313
|
+
"tags": ["Image to Image", "ControlNet", "Image"],
|
|
1314
|
+
"models": ["Flux", "BFL"],
|
|
1315
|
+
"date": "2025-03-01",
|
|
1316
|
+
"size": 34177202258,
|
|
1317
|
+
"vram": 34177202258,
|
|
1318
|
+
"usage": 109
|
|
1064
1319
|
},
|
|
1065
1320
|
{
|
|
1066
|
-
"name": "
|
|
1067
|
-
"title": "
|
|
1068
|
-
"description": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
|
|
1321
|
+
"name": "flux_depth_lora_example",
|
|
1322
|
+
"title": "Flux Depth Lora",
|
|
1069
1323
|
"mediaType": "image",
|
|
1070
1324
|
"mediaSubtype": "webp",
|
|
1071
|
-
"
|
|
1072
|
-
"
|
|
1073
|
-
"
|
|
1074
|
-
"
|
|
1075
|
-
"
|
|
1325
|
+
"description": "Generate images guided by depth information using Flux LoRA.",
|
|
1326
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1327
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
1328
|
+
"tags": ["Image to Image", "ControlNet", "Image"],
|
|
1329
|
+
"models": ["Flux", "BFL"],
|
|
1330
|
+
"date": "2025-03-01",
|
|
1331
|
+
"size": 35412005356,
|
|
1332
|
+
"vram": 35412005356,
|
|
1333
|
+
"usage": 223
|
|
1076
1334
|
},
|
|
1077
1335
|
{
|
|
1078
|
-
"name": "
|
|
1079
|
-
"title": "
|
|
1080
|
-
"description": "Generate videos by controlling input videos and reference images using Wan VACE.",
|
|
1336
|
+
"name": "flux_redux_model_example",
|
|
1337
|
+
"title": "Flux Redux Model",
|
|
1081
1338
|
"mediaType": "image",
|
|
1082
1339
|
"mediaSubtype": "webp",
|
|
1083
|
-
"
|
|
1084
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
1085
|
-
"tags": ["
|
|
1086
|
-
"models": ["
|
|
1087
|
-
"date": "2025-
|
|
1088
|
-
"size":
|
|
1340
|
+
"description": "Generate images by transferring style from reference images using Flux Redux.",
|
|
1341
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
1342
|
+
"tags": ["Image to Image", "ControlNet", "Image"],
|
|
1343
|
+
"models": ["Flux", "BFL"],
|
|
1344
|
+
"date": "2025-03-01",
|
|
1345
|
+
"size": 35154307318,
|
|
1346
|
+
"vram": 35154307318,
|
|
1347
|
+
"usage": 226
|
|
1089
1348
|
},
|
|
1090
1349
|
{
|
|
1091
|
-
"name": "
|
|
1092
|
-
"title": "
|
|
1093
|
-
"description": "Generate extended videos by expanding video size using Wan VACE outpainting.",
|
|
1350
|
+
"name": "image_omnigen2_t2i",
|
|
1351
|
+
"title": "OmniGen2 Text to Image",
|
|
1094
1352
|
"mediaType": "image",
|
|
1095
1353
|
"mediaSubtype": "webp",
|
|
1096
|
-
"
|
|
1097
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
1098
|
-
"tags": ["
|
|
1099
|
-
"models": ["
|
|
1100
|
-
"date": "2025-
|
|
1101
|
-
"size":
|
|
1354
|
+
"description": "Generate high-quality images from text prompts using OmniGen2's unified 7B multimodal model with dual-path architecture.",
|
|
1355
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
1356
|
+
"tags": ["Text to Image", "Image"],
|
|
1357
|
+
"models": ["OmniGen"],
|
|
1358
|
+
"date": "2025-06-30",
|
|
1359
|
+
"size": 15784004813,
|
|
1360
|
+
"vram": 15784004813,
|
|
1361
|
+
"usage": 165
|
|
1102
1362
|
},
|
|
1103
1363
|
{
|
|
1104
|
-
"name": "
|
|
1105
|
-
"title": "
|
|
1106
|
-
"description": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
|
|
1364
|
+
"name": "image_omnigen2_image_edit",
|
|
1365
|
+
"title": "OmniGen2 Image Edit",
|
|
1107
1366
|
"mediaType": "image",
|
|
1108
1367
|
"mediaSubtype": "webp",
|
|
1109
|
-
"
|
|
1110
|
-
"
|
|
1111
|
-
"
|
|
1112
|
-
"
|
|
1113
|
-
"
|
|
1368
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1369
|
+
"description": "Edit images with natural language instructions using OmniGen2's advanced image editing capabilities and text rendering support.",
|
|
1370
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
1371
|
+
"tags": ["Image Edit", "Image"],
|
|
1372
|
+
"models": ["OmniGen"],
|
|
1373
|
+
"date": "2025-06-30",
|
|
1374
|
+
"size": 15784004813,
|
|
1375
|
+
"vram": 15784004813,
|
|
1376
|
+
"usage": 145
|
|
1114
1377
|
},
|
|
1115
1378
|
{
|
|
1116
|
-
"name": "
|
|
1117
|
-
"title": "
|
|
1118
|
-
"description": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
|
|
1379
|
+
"name": "hidream_i1_dev",
|
|
1380
|
+
"title": "HiDream I1 Dev",
|
|
1119
1381
|
"mediaType": "image",
|
|
1120
1382
|
"mediaSubtype": "webp",
|
|
1121
|
-
"
|
|
1122
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
1123
|
-
"tags": ["
|
|
1124
|
-
"models": ["
|
|
1125
|
-
"date": "2025-
|
|
1126
|
-
"size":
|
|
1383
|
+
"description": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware.",
|
|
1384
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
1385
|
+
"tags": ["Text to Image", "Image"],
|
|
1386
|
+
"models": ["HiDream"],
|
|
1387
|
+
"date": "2025-04-17",
|
|
1388
|
+
"size": 33318208799,
|
|
1389
|
+
"vram": 33318208799,
|
|
1390
|
+
"usage": 92
|
|
1127
1391
|
},
|
|
1128
1392
|
{
|
|
1129
|
-
"name": "
|
|
1130
|
-
"title": "
|
|
1131
|
-
"description": "Generate text-to-video with alpha channel support for transparent backgrounds and semi-transparent objects.",
|
|
1393
|
+
"name": "hidream_i1_fast",
|
|
1394
|
+
"title": "HiDream I1 Fast",
|
|
1132
1395
|
"mediaType": "image",
|
|
1133
1396
|
"mediaSubtype": "webp",
|
|
1134
|
-
"
|
|
1135
|
-
"
|
|
1136
|
-
"
|
|
1137
|
-
"
|
|
1397
|
+
"description": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware.",
|
|
1398
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
1399
|
+
"tags": ["Text to Image", "Image"],
|
|
1400
|
+
"models": ["HiDream"],
|
|
1401
|
+
"date": "2025-04-17",
|
|
1402
|
+
"size": 24234352968,
|
|
1403
|
+
"vram": 24234352968,
|
|
1404
|
+
"usage": 41
|
|
1138
1405
|
},
|
|
1139
1406
|
{
|
|
1140
|
-
"name": "
|
|
1141
|
-
"title": "
|
|
1142
|
-
"description": "Trajectory-controlled video generation.",
|
|
1407
|
+
"name": "hidream_i1_full",
|
|
1408
|
+
"title": "HiDream I1 Full",
|
|
1143
1409
|
"mediaType": "image",
|
|
1144
1410
|
"mediaSubtype": "webp",
|
|
1145
|
-
"
|
|
1146
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
1147
|
-
"tags": ["
|
|
1148
|
-
"models": ["
|
|
1149
|
-
"date": "2025-
|
|
1150
|
-
"size":
|
|
1411
|
+
"description": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output.",
|
|
1412
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
1413
|
+
"tags": ["Text to Image", "Image"],
|
|
1414
|
+
"models": ["HiDream"],
|
|
1415
|
+
"date": "2025-04-17",
|
|
1416
|
+
"size": 24234352968,
|
|
1417
|
+
"vram": 24234352968,
|
|
1418
|
+
"usage": 218
|
|
1151
1419
|
},
|
|
1152
1420
|
{
|
|
1153
|
-
"name": "
|
|
1154
|
-
"title": "
|
|
1155
|
-
"description": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
|
|
1421
|
+
"name": "hidream_e1_full",
|
|
1422
|
+
"title": "HiDream E1 Image Edit",
|
|
1156
1423
|
"mediaType": "image",
|
|
1157
1424
|
"mediaSubtype": "webp",
|
|
1158
|
-
"
|
|
1159
|
-
"
|
|
1160
|
-
"
|
|
1161
|
-
"
|
|
1162
|
-
"
|
|
1425
|
+
"thumbnailVariant": "compareSlider",
|
|
1426
|
+
"description": "Edit images with HiDream E1 - Professional natural language image editing model.",
|
|
1427
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
1428
|
+
"tags": ["Image Edit", "Image"],
|
|
1429
|
+
"models": ["HiDream"],
|
|
1430
|
+
"date": "2025-05-01",
|
|
1431
|
+
"size": 34209414513,
|
|
1432
|
+
"vram": 34209414513,
|
|
1433
|
+
"usage": 69
|
|
1163
1434
|
},
|
|
1164
1435
|
{
|
|
1165
|
-
"name": "
|
|
1166
|
-
"title": "
|
|
1167
|
-
"description": "Generate high-quality videos with advanced camera control using the full 14B model",
|
|
1436
|
+
"name": "sd3.5_simple_example",
|
|
1437
|
+
"title": "SD3.5 Simple",
|
|
1168
1438
|
"mediaType": "image",
|
|
1169
1439
|
"mediaSubtype": "webp",
|
|
1170
|
-
"
|
|
1171
|
-
"
|
|
1172
|
-
"
|
|
1173
|
-
"
|
|
1174
|
-
"
|
|
1440
|
+
"description": "Generate images using SD 3.5.",
|
|
1441
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
|
|
1442
|
+
"tags": ["Text to Image", "Image"],
|
|
1443
|
+
"models": ["SD3.5", "Stability"],
|
|
1444
|
+
"date": "2025-03-01",
|
|
1445
|
+
"size": 14935748772,
|
|
1446
|
+
"vram": 14935748772,
|
|
1447
|
+
"usage": 490
|
|
1175
1448
|
},
|
|
1176
1449
|
{
|
|
1177
|
-
"name": "
|
|
1178
|
-
"title": "
|
|
1179
|
-
"description": "Generate videos from text prompts using Wan 2.1.",
|
|
1450
|
+
"name": "sd3.5_large_canny_controlnet_example",
|
|
1451
|
+
"title": "SD3.5 Large Canny ControlNet",
|
|
1180
1452
|
"mediaType": "image",
|
|
1181
1453
|
"mediaSubtype": "webp",
|
|
1182
|
-
"
|
|
1183
|
-
"
|
|
1184
|
-
"
|
|
1454
|
+
"description": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
|
|
1455
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1456
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
1457
|
+
"tags": ["Image to Image", "Image", "ControlNet"],
|
|
1458
|
+
"models": ["SD3.5", "Stability"],
|
|
1185
1459
|
"date": "2025-03-01",
|
|
1186
|
-
"size":
|
|
1460
|
+
"size": 23590107873,
|
|
1461
|
+
"vram": 23590107873,
|
|
1462
|
+
"usage": 113
|
|
1187
1463
|
},
|
|
1188
1464
|
{
|
|
1189
|
-
"name": "
|
|
1190
|
-
"title": "
|
|
1191
|
-
"description": "Generate videos from images using Wan 2.1.",
|
|
1465
|
+
"name": "sd3.5_large_depth",
|
|
1466
|
+
"title": "SD3.5 Large Depth",
|
|
1192
1467
|
"mediaType": "image",
|
|
1193
1468
|
"mediaSubtype": "webp",
|
|
1194
|
-
"
|
|
1195
|
-
"
|
|
1196
|
-
"
|
|
1469
|
+
"description": "Generate images guided by depth information using SD 3.5.",
|
|
1470
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1471
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
1472
|
+
"tags": ["Image to Image", "Image", "ControlNet"],
|
|
1473
|
+
"models": ["SD3.5", "Stability"],
|
|
1197
1474
|
"date": "2025-03-01",
|
|
1198
|
-
"size":
|
|
1475
|
+
"size": 23590107873,
|
|
1476
|
+
"vram": 23590107873,
|
|
1477
|
+
"usage": 95
|
|
1199
1478
|
},
|
|
1200
1479
|
{
|
|
1201
|
-
"name": "
|
|
1202
|
-
"title": "
|
|
1203
|
-
"description": "Generate videos from start and end frames using Wan 2.1 inpainting.",
|
|
1204
|
-
"mediaType": "image",
|
|
1205
|
-
"mediaSubtype": "webp",
|
|
1206
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
|
|
1207
|
-
"tags": ["Inpainting", "Video"],
|
|
1208
|
-
"models": ["Wan2.1", "Wan"],
|
|
1209
|
-
"date": "2025-04-15",
|
|
1210
|
-
"size": 11381663334
|
|
1211
|
-
},
|
|
1212
|
-
{
|
|
1213
|
-
"name": "wan2.1_fun_control",
|
|
1214
|
-
"title": "Wan 2.1 ControlNet",
|
|
1215
|
-
"description": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
|
|
1480
|
+
"name": "sd3.5_large_blur",
|
|
1481
|
+
"title": "SD3.5 Large Blur",
|
|
1216
1482
|
"mediaType": "image",
|
|
1217
1483
|
"mediaSubtype": "webp",
|
|
1484
|
+
"description": "Generate images guided by blurred reference images using SD 3.5.",
|
|
1218
1485
|
"thumbnailVariant": "hoverDissolve",
|
|
1219
|
-
"tutorialUrl": "https://
|
|
1220
|
-
"tags": ["
|
|
1221
|
-
"models": ["
|
|
1222
|
-
"date": "2025-04-15",
|
|
1223
|
-
"size": 11381663334
|
|
1224
|
-
},
|
|
1225
|
-
{
|
|
1226
|
-
"name": "wan2.1_flf2v_720_f16",
|
|
1227
|
-
"title": "Wan 2.1 FLF2V 720p F16",
|
|
1228
|
-
"description": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
|
|
1229
|
-
"mediaType": "image",
|
|
1230
|
-
"mediaSubtype": "webp",
|
|
1231
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
|
|
1232
|
-
"tags": ["FLF2V", "Video"],
|
|
1233
|
-
"models": ["Wan2.1", "Wan"],
|
|
1234
|
-
"date": "2025-04-15",
|
|
1235
|
-
"size": 41049149932
|
|
1236
|
-
},
|
|
1237
|
-
{
|
|
1238
|
-
"name": "ltxv_text_to_video",
|
|
1239
|
-
"title": "LTXV Text to Video",
|
|
1240
|
-
"mediaType": "image",
|
|
1241
|
-
"mediaSubtype": "webp",
|
|
1242
|
-
"description": "Generate videos from text prompts.",
|
|
1243
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
1244
|
-
"tags": ["Text to Video", "Video"],
|
|
1245
|
-
"models": ["LTXV"],
|
|
1486
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
1487
|
+
"tags": ["Image to Image", "Image"],
|
|
1488
|
+
"models": ["SD3.5", "Stability"],
|
|
1246
1489
|
"date": "2025-03-01",
|
|
1247
|
-
"size":
|
|
1490
|
+
"size": 23590107873,
|
|
1491
|
+
"vram": 23590107873,
|
|
1492
|
+
"usage": 38
|
|
1248
1493
|
},
|
|
1249
1494
|
{
|
|
1250
|
-
"name": "
|
|
1251
|
-
"title": "
|
|
1495
|
+
"name": "sdxl_simple_example",
|
|
1496
|
+
"title": "SDXL Simple",
|
|
1252
1497
|
"mediaType": "image",
|
|
1253
1498
|
"mediaSubtype": "webp",
|
|
1254
|
-
"description": "Generate
|
|
1255
|
-
"tutorialUrl": "https://
|
|
1256
|
-
"tags": ["
|
|
1257
|
-
"models": ["
|
|
1499
|
+
"description": "Generate high-quality images using SDXL.",
|
|
1500
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
1501
|
+
"tags": ["Text to Image", "Image"],
|
|
1502
|
+
"models": ["SDXL", "Stability"],
|
|
1258
1503
|
"date": "2025-03-01",
|
|
1259
|
-
"size":
|
|
1504
|
+
"size": 13013750907,
|
|
1505
|
+
"vram": 13013750907,
|
|
1506
|
+
"usage": 278
|
|
1260
1507
|
},
|
|
1261
1508
|
{
|
|
1262
|
-
"name": "
|
|
1263
|
-
"title": "
|
|
1509
|
+
"name": "sdxl_refiner_prompt_example",
|
|
1510
|
+
"title": "SDXL Refiner Prompt",
|
|
1264
1511
|
"mediaType": "image",
|
|
1265
1512
|
"mediaSubtype": "webp",
|
|
1266
|
-
"description": "
|
|
1267
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/
|
|
1268
|
-
"tags": ["Text to
|
|
1269
|
-
"models": ["
|
|
1513
|
+
"description": "Enhance SDXL images using refiner models.",
|
|
1514
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
1515
|
+
"tags": ["Text to Image", "Image"],
|
|
1516
|
+
"models": ["SDXL", "Stability"],
|
|
1270
1517
|
"date": "2025-03-01",
|
|
1271
|
-
"size":
|
|
1518
|
+
"size": 13013750907,
|
|
1519
|
+
"vram": 13013750907,
|
|
1520
|
+
"usage": 59
|
|
1272
1521
|
},
|
|
1273
1522
|
{
|
|
1274
|
-
"name": "
|
|
1275
|
-
"title": "
|
|
1523
|
+
"name": "sdxl_revision_text_prompts",
|
|
1524
|
+
"title": "SDXL Revision Text Prompts",
|
|
1276
1525
|
"mediaType": "image",
|
|
1277
1526
|
"mediaSubtype": "webp",
|
|
1278
|
-
"description": "Generate
|
|
1279
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/
|
|
1280
|
-
"tags": ["Text to
|
|
1281
|
-
"models": ["
|
|
1527
|
+
"description": "Generate images by transferring concepts from reference images using SDXL Revision.",
|
|
1528
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
1529
|
+
"tags": ["Text to Image", "Image"],
|
|
1530
|
+
"models": ["SDXL", "Stability"],
|
|
1282
1531
|
"date": "2025-03-01",
|
|
1283
|
-
"size":
|
|
1532
|
+
"size": 10630044058,
|
|
1533
|
+
"vram": 10630044058,
|
|
1534
|
+
"usage": 67
|
|
1284
1535
|
},
|
|
1285
1536
|
{
|
|
1286
|
-
"name": "
|
|
1287
|
-
"title": "
|
|
1537
|
+
"name": "sdxlturbo_example",
|
|
1538
|
+
"title": "SDXL Turbo",
|
|
1288
1539
|
"mediaType": "image",
|
|
1289
1540
|
"mediaSubtype": "webp",
|
|
1290
|
-
"description": "Generate
|
|
1291
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/
|
|
1292
|
-
"tags": ["
|
|
1293
|
-
"models": ["
|
|
1541
|
+
"description": "Generate images in a single step using SDXL Turbo.",
|
|
1542
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
|
|
1543
|
+
"tags": ["Text to Image", "Image"],
|
|
1544
|
+
"models": ["SDXL", "Stability"],
|
|
1294
1545
|
"date": "2025-03-01",
|
|
1295
|
-
"size":
|
|
1546
|
+
"size": 6936372183,
|
|
1547
|
+
"vram": 6936372183,
|
|
1548
|
+
"usage": 452
|
|
1296
1549
|
},
|
|
1297
1550
|
{
|
|
1298
|
-
"name": "
|
|
1299
|
-
"title": "
|
|
1551
|
+
"name": "image_lotus_depth_v1_1",
|
|
1552
|
+
"title": "Lotus Depth",
|
|
1300
1553
|
"mediaType": "image",
|
|
1301
1554
|
"mediaSubtype": "webp",
|
|
1302
|
-
"
|
|
1303
|
-
"
|
|
1304
|
-
"tags": ["Text to
|
|
1305
|
-
"models": ["
|
|
1306
|
-
"date": "2025-
|
|
1307
|
-
"size":
|
|
1555
|
+
"thumbnailVariant": "compareSlider",
|
|
1556
|
+
"description": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention.",
|
|
1557
|
+
"tags": ["Image", "Text to Image"],
|
|
1558
|
+
"models": ["SD1.5", "Stability"],
|
|
1559
|
+
"date": "2025-05-21",
|
|
1560
|
+
"size": 2072321720,
|
|
1561
|
+
"vram": 2072321720,
|
|
1562
|
+
"usage": 79
|
|
1308
1563
|
}
|
|
1309
1564
|
]
|
|
1310
1565
|
},
|
|
1311
1566
|
{
|
|
1312
1567
|
"moduleName": "default",
|
|
1313
1568
|
"category": "GENERATION TYPE",
|
|
1314
|
-
"
|
|
1315
|
-
"
|
|
1316
|
-
"type": "
|
|
1569
|
+
"title": "Video",
|
|
1570
|
+
"icon": "icon-[lucide--film]",
|
|
1571
|
+
"type": "video",
|
|
1317
1572
|
"templates": [
|
|
1318
1573
|
{
|
|
1319
|
-
"name": "
|
|
1320
|
-
"title": "
|
|
1321
|
-
"
|
|
1322
|
-
"
|
|
1323
|
-
"
|
|
1324
|
-
"
|
|
1325
|
-
"
|
|
1326
|
-
"
|
|
1327
|
-
"
|
|
1328
|
-
"size":
|
|
1574
|
+
"name": "video_wan2_2_14B_t2v",
|
|
1575
|
+
"title": "Wan 2.2 14B Text to Video",
|
|
1576
|
+
"description": "Generate high-quality videos from text prompts with cinematic aesthetic control and dynamic motion generation using Wan 2.2.",
|
|
1577
|
+
"mediaType": "image",
|
|
1578
|
+
"mediaSubtype": "webp",
|
|
1579
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
1580
|
+
"tags": ["Text to Video", "Video"],
|
|
1581
|
+
"models": ["Wan2.2", "Wan"],
|
|
1582
|
+
"date": "2025-07-29",
|
|
1583
|
+
"size": 38031935406,
|
|
1584
|
+
"vram": 38031935406,
|
|
1585
|
+
"usage": 2369
|
|
1329
1586
|
},
|
|
1330
1587
|
{
|
|
1331
|
-
"name": "
|
|
1332
|
-
"title": "
|
|
1333
|
-
"
|
|
1334
|
-
"
|
|
1335
|
-
"
|
|
1336
|
-
"
|
|
1337
|
-
"
|
|
1338
|
-
"
|
|
1339
|
-
"
|
|
1340
|
-
"
|
|
1588
|
+
"name": "video_wan2_2_14B_i2v",
|
|
1589
|
+
"title": "Wan 2.2 14B Image to Video",
|
|
1590
|
+
"description": "Transform static images into dynamic videos with precise motion control and style preservation using Wan 2.2.",
|
|
1591
|
+
"mediaType": "image",
|
|
1592
|
+
"mediaSubtype": "webp",
|
|
1593
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1594
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
1595
|
+
"tags": ["Image to Video", "Video"],
|
|
1596
|
+
"models": ["Wan2.2", "Wan"],
|
|
1597
|
+
"date": "2025-07-29",
|
|
1598
|
+
"size": 38031935406,
|
|
1599
|
+
"vram": 38031935406,
|
|
1600
|
+
"usage": 10317
|
|
1341
1601
|
},
|
|
1342
1602
|
{
|
|
1343
|
-
"name": "
|
|
1344
|
-
"title": "
|
|
1345
|
-
"
|
|
1346
|
-
"
|
|
1347
|
-
"
|
|
1348
|
-
"
|
|
1349
|
-
"
|
|
1350
|
-
"
|
|
1351
|
-
"
|
|
1352
|
-
"
|
|
1603
|
+
"name": "video_wan2_2_14B_flf2v",
|
|
1604
|
+
"title": "Wan 2.2 14B First-Last Frame to Video",
|
|
1605
|
+
"description": "Generate smooth video transitions by defining start and end frames.",
|
|
1606
|
+
"mediaType": "image",
|
|
1607
|
+
"mediaSubtype": "webp",
|
|
1608
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1609
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
1610
|
+
"tags": ["FLF2V", "Video"],
|
|
1611
|
+
"models": ["Wan2.2", "Wan"],
|
|
1612
|
+
"date": "2025-08-02",
|
|
1613
|
+
"size": 38031935406,
|
|
1614
|
+
"vram": 38031935406,
|
|
1615
|
+
"usage": 1585
|
|
1353
1616
|
},
|
|
1354
1617
|
{
|
|
1355
|
-
"name": "
|
|
1356
|
-
"title": "
|
|
1357
|
-
"
|
|
1358
|
-
"
|
|
1359
|
-
"
|
|
1360
|
-
"
|
|
1361
|
-
"
|
|
1362
|
-
"
|
|
1363
|
-
"
|
|
1364
|
-
"size":
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
{
|
|
1369
|
-
"moduleName": "default",
|
|
1370
|
-
"category": "GENERATION TYPE",
|
|
1371
|
-
"icon": "icon-[lucide--box]",
|
|
1372
|
-
"title": "3D Model",
|
|
1373
|
-
"type": "3d",
|
|
1374
|
-
"templates": [
|
|
1618
|
+
"name": "video_wan2_2_14B_animate",
|
|
1619
|
+
"title": "Wan2.2 Animate, Character Animation and Replacement",
|
|
1620
|
+
"description": "Unified character animation and replacement framework with precise motion and expression replication.",
|
|
1621
|
+
"mediaType": "image",
|
|
1622
|
+
"mediaSubtype": "webp",
|
|
1623
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-animate",
|
|
1624
|
+
"tags": ["Video", "Image to Video"],
|
|
1625
|
+
"models": ["Wan2.2", "Wan"],
|
|
1626
|
+
"date": "2025-09-22",
|
|
1627
|
+
"size": 27417997476,
|
|
1628
|
+
"vram": 27417997476,
|
|
1629
|
+
"usage": 2141
|
|
1630
|
+
},
|
|
1375
1631
|
{
|
|
1376
|
-
"name": "
|
|
1377
|
-
"title": "
|
|
1632
|
+
"name": "video_hunyuan_video_1.5_720p_t2v",
|
|
1633
|
+
"title": "Hunyuan Video 1.5 Text to Video",
|
|
1634
|
+
"description": "Generate high-quality 720p videos from text prompts with cinematic camera control, emotional expressions, and physics simulation. Supports multiple styles including realistic, anime, and 3D with text rendering.",
|
|
1378
1635
|
"mediaType": "image",
|
|
1379
1636
|
"mediaSubtype": "webp",
|
|
1380
|
-
"
|
|
1381
|
-
"
|
|
1382
|
-
"
|
|
1383
|
-
"
|
|
1384
|
-
"
|
|
1385
|
-
"
|
|
1637
|
+
"tags": ["Text to Video", "Video"],
|
|
1638
|
+
"models": ["Hunyuan Video"],
|
|
1639
|
+
"date": "2025-11-21",
|
|
1640
|
+
"size": 45384919416,
|
|
1641
|
+
"vram": 45384919416,
|
|
1642
|
+
"usage": 451
|
|
1386
1643
|
},
|
|
1387
1644
|
{
|
|
1388
|
-
"name": "
|
|
1389
|
-
"title": "
|
|
1645
|
+
"name": "video_hunyuan_video_1.5_720p_i2v",
|
|
1646
|
+
"title": "Hunyuan Video 1.5 Image to Video",
|
|
1647
|
+
"description": "Animate still images into dynamic videos with precise motion and camera control. Maintains visual consistency while bringing photos and illustrations to life with smooth, natural movements.",
|
|
1390
1648
|
"mediaType": "image",
|
|
1391
1649
|
"mediaSubtype": "webp",
|
|
1392
|
-
"
|
|
1393
|
-
"
|
|
1394
|
-
"
|
|
1395
|
-
"
|
|
1396
|
-
"
|
|
1397
|
-
"
|
|
1650
|
+
"tags": ["Image to Video", "Video"],
|
|
1651
|
+
"models": ["Hunyuan Video"],
|
|
1652
|
+
"date": "2025-11-21",
|
|
1653
|
+
"size": 45384919416,
|
|
1654
|
+
"vram": 45384919416,
|
|
1655
|
+
"usage": 2150
|
|
1398
1656
|
},
|
|
1399
1657
|
{
|
|
1400
|
-
"name": "
|
|
1401
|
-
"title": "
|
|
1658
|
+
"name": "video_kandinsky5_i2v",
|
|
1659
|
+
"title": "Kandinsky 5.0 Video Lite Image to Video",
|
|
1660
|
+
"description": "A lightweight 2B model that generates videos from English and Russian prompts with high visual quality.",
|
|
1402
1661
|
"mediaType": "image",
|
|
1403
1662
|
"mediaSubtype": "webp",
|
|
1404
|
-
"
|
|
1405
|
-
"
|
|
1406
|
-
"
|
|
1407
|
-
"
|
|
1408
|
-
"
|
|
1409
|
-
"
|
|
1410
|
-
"size": 4928474972
|
|
1663
|
+
"tags": ["Image to Video", "Video"],
|
|
1664
|
+
"models": ["Kandinsky"],
|
|
1665
|
+
"date": "2025-12-09",
|
|
1666
|
+
"size": 14710262988,
|
|
1667
|
+
"vram": 14710262988,
|
|
1668
|
+
"usage": 1243
|
|
1411
1669
|
},
|
|
1412
1670
|
{
|
|
1413
|
-
"name": "
|
|
1414
|
-
"title": "
|
|
1671
|
+
"name": "video_kandinsky5_t2v",
|
|
1672
|
+
"title": "Kandinsky 5.0 Video Lite Text to Video",
|
|
1673
|
+
"description": "A lightweight 2B model that generates videos from English and Russian prompts with high visual quality.",
|
|
1415
1674
|
"mediaType": "image",
|
|
1416
1675
|
"mediaSubtype": "webp",
|
|
1417
|
-
"
|
|
1418
|
-
"
|
|
1419
|
-
"
|
|
1420
|
-
"
|
|
1421
|
-
"
|
|
1422
|
-
"
|
|
1423
|
-
|
|
1424
|
-
}
|
|
1425
|
-
]
|
|
1426
|
-
},
|
|
1427
|
-
{
|
|
1428
|
-
"moduleName": "default",
|
|
1429
|
-
"category": "CLOSED SOURCE MODELS",
|
|
1430
|
-
"title": "Image API",
|
|
1431
|
-
"icon": "icon-[lucide--hand-coins]",
|
|
1432
|
-
"type": "image",
|
|
1433
|
-
"templates": [
|
|
1676
|
+
"tags": ["Text to Video", "Video"],
|
|
1677
|
+
"models": ["Kandinsky"],
|
|
1678
|
+
"date": "2025-12-09",
|
|
1679
|
+
"size": 14710262988,
|
|
1680
|
+
"vram": 14710262988,
|
|
1681
|
+
"usage": 556
|
|
1682
|
+
},
|
|
1434
1683
|
{
|
|
1435
|
-
"name": "
|
|
1436
|
-
"title": "
|
|
1437
|
-
"description": "
|
|
1684
|
+
"name": "api_kling2_6_i2v",
|
|
1685
|
+
"title": "Kling2.6: Animate Images with Audio",
|
|
1686
|
+
"description": "Transform static images into dynamic videos with synchronized dialogue, singing, sound effects, and ambient audio.",
|
|
1438
1687
|
"mediaType": "image",
|
|
1439
1688
|
"mediaSubtype": "webp",
|
|
1440
|
-
"
|
|
1441
|
-
"
|
|
1442
|
-
"
|
|
1443
|
-
"
|
|
1444
|
-
"OpenSource": false,
|
|
1689
|
+
"tags": ["Image to Video", "Video", "API", "Audio"],
|
|
1690
|
+
"models": ["Kling"],
|
|
1691
|
+
"date": "2025-12-22",
|
|
1692
|
+
"openSource": false,
|
|
1445
1693
|
"size": 0,
|
|
1446
1694
|
"vram": 0
|
|
1447
1695
|
},
|
|
1448
1696
|
{
|
|
1449
|
-
"name": "
|
|
1450
|
-
"title": "
|
|
1451
|
-
"description": "
|
|
1697
|
+
"name": "api_kling2_6_t2v",
|
|
1698
|
+
"title": "Kling2.6: Storytelling Videos with Audio",
|
|
1699
|
+
"description": "Bring your stories to life with videos featuring synchronized dialogue, music, sound effects, and ambient audio from text prompts.",
|
|
1452
1700
|
"mediaType": "image",
|
|
1453
1701
|
"mediaSubtype": "webp",
|
|
1454
|
-
"tags": ["
|
|
1455
|
-
"models": ["
|
|
1456
|
-
"date": "2025-
|
|
1457
|
-
"
|
|
1702
|
+
"tags": ["Text to Video", "Video", "API", "Audio"],
|
|
1703
|
+
"models": ["Kling"],
|
|
1704
|
+
"date": "2025-12-22",
|
|
1705
|
+
"openSource": false,
|
|
1458
1706
|
"size": 0,
|
|
1459
1707
|
"vram": 0
|
|
1460
1708
|
},
|
|
1461
1709
|
{
|
|
1462
|
-
"name": "
|
|
1463
|
-
"title": "
|
|
1464
|
-
"description": "
|
|
1465
|
-
"mediaType": "image",
|
|
1466
|
-
"mediaSubtype": "webp",
|
|
1467
|
-
"tags": ["Image Edit", "Image", "API", "Text to Image"],
|
|
1468
|
-
"models": ["Seedream 4.0", "ByteDance"],
|
|
1469
|
-
"date": "2025-09-11",
|
|
1470
|
-
"OpenSource": false,
|
|
1471
|
-
"size": 0,
|
|
1472
|
-
"vram": 0
|
|
1473
|
-
},
|
|
1474
|
-
{
|
|
1475
|
-
"name": "api_google_gemini_image",
|
|
1476
|
-
"title": "Nano Banana",
|
|
1477
|
-
"description": "Nano-banana (Gemini-2.5-Flash Image) - image editing with consistency.",
|
|
1710
|
+
"name": "api_openai_sora_video",
|
|
1711
|
+
"title": "Sora 2: Text & Image to Video",
|
|
1712
|
+
"description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
|
|
1478
1713
|
"mediaType": "image",
|
|
1479
1714
|
"mediaSubtype": "webp",
|
|
1480
|
-
"tags": ["Image
|
|
1481
|
-
"models": ["
|
|
1482
|
-
"date": "2025-08
|
|
1483
|
-
"
|
|
1715
|
+
"tags": ["Image to Video", "Text to Video", "API"],
|
|
1716
|
+
"models": ["OpenAI"],
|
|
1717
|
+
"date": "2025-10-08",
|
|
1718
|
+
"openSource": false,
|
|
1484
1719
|
"size": 0,
|
|
1485
|
-
"vram": 0
|
|
1720
|
+
"vram": 0,
|
|
1721
|
+
"usage": 765
|
|
1486
1722
|
},
|
|
1487
1723
|
{
|
|
1488
|
-
"name": "
|
|
1489
|
-
"title": "
|
|
1490
|
-
"description": "Generate
|
|
1724
|
+
"name": "api_veo3",
|
|
1725
|
+
"title": "Veo3: Image to Video",
|
|
1726
|
+
"description": "Generate high-quality 8-second videos from text prompts or images using Google's advanced Veo 3 API. Features audio generation, prompt enhancement, and dual model options for speed or quality.",
|
|
1491
1727
|
"mediaType": "image",
|
|
1492
1728
|
"mediaSubtype": "webp",
|
|
1493
|
-
"tags": ["Image
|
|
1494
|
-
"models": ["
|
|
1495
|
-
"date": "2025-
|
|
1496
|
-
"
|
|
1729
|
+
"tags": ["Image to Video", "Text to Video", "API"],
|
|
1730
|
+
"models": ["Veo", "Google"],
|
|
1731
|
+
"date": "2025-03-01",
|
|
1732
|
+
"tutorialUrl": "",
|
|
1733
|
+
"openSource": false,
|
|
1497
1734
|
"size": 0,
|
|
1498
|
-
"vram": 0
|
|
1735
|
+
"vram": 0,
|
|
1736
|
+
"usage": 491
|
|
1499
1737
|
},
|
|
1500
1738
|
{
|
|
1501
|
-
"name": "
|
|
1502
|
-
"title": "Topaz
|
|
1503
|
-
"description": "
|
|
1739
|
+
"name": "api_topaz_video_enhance",
|
|
1740
|
+
"title": "Topaz Video Enhance",
|
|
1741
|
+
"description": "Enhance videos with Topaz AI. Supports resolution upscaling using Starlight (Astra) Fast model and frame interpolation with apo-8 model.",
|
|
1504
1742
|
"mediaType": "image",
|
|
1505
1743
|
"mediaSubtype": "webp",
|
|
1506
1744
|
"thumbnailVariant": "compareSlider",
|
|
1507
|
-
"tags": ["
|
|
1508
|
-
"models": ["Topaz"
|
|
1745
|
+
"tags": ["Video", "API", "Upscale"],
|
|
1746
|
+
"models": ["Topaz"],
|
|
1509
1747
|
"date": "2025-11-25",
|
|
1510
|
-
"
|
|
1748
|
+
"openSource": false,
|
|
1511
1749
|
"size": 0,
|
|
1512
|
-
"vram": 0
|
|
1750
|
+
"vram": 0,
|
|
1751
|
+
"usage": 471
|
|
1513
1752
|
},
|
|
1514
1753
|
{
|
|
1515
|
-
"name": "
|
|
1516
|
-
"title": "
|
|
1517
|
-
"description": "
|
|
1754
|
+
"name": "api_veo2_i2v",
|
|
1755
|
+
"title": "Veo2: Image to Video",
|
|
1756
|
+
"description": "Generate videos from images using Google Veo2 API.",
|
|
1518
1757
|
"mediaType": "image",
|
|
1519
1758
|
"mediaSubtype": "webp",
|
|
1520
|
-
"
|
|
1521
|
-
"
|
|
1522
|
-
"
|
|
1523
|
-
"
|
|
1524
|
-
"
|
|
1525
|
-
"OpenSource": false,
|
|
1759
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1760
|
+
"models": ["Veo", "Google"],
|
|
1761
|
+
"date": "2025-03-01",
|
|
1762
|
+
"tutorialUrl": "",
|
|
1763
|
+
"openSource": false,
|
|
1526
1764
|
"size": 0,
|
|
1527
|
-
"vram": 0
|
|
1765
|
+
"vram": 0,
|
|
1766
|
+
"usage": 61
|
|
1528
1767
|
},
|
|
1529
1768
|
{
|
|
1530
|
-
"name": "
|
|
1531
|
-
"title": "
|
|
1532
|
-
"description": "
|
|
1769
|
+
"name": "api_wan2_6_t2v",
|
|
1770
|
+
"title": "Wan2.6: Text to Video",
|
|
1771
|
+
"description": "Generate high-quality videos from text prompts with enhanced image quality, smoother motion, 1080P resolution support, and improved prompt understanding for natural, professional results.",
|
|
1533
1772
|
"mediaType": "image",
|
|
1534
1773
|
"mediaSubtype": "webp",
|
|
1535
|
-
"
|
|
1536
|
-
"
|
|
1537
|
-
"
|
|
1538
|
-
"
|
|
1539
|
-
"
|
|
1540
|
-
"OpenSource": false,
|
|
1774
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
1775
|
+
"models": ["Wan2.6", "Wan"],
|
|
1776
|
+
"date": "2025-12-20",
|
|
1777
|
+
"tutorialUrl": "",
|
|
1778
|
+
"openSource": false,
|
|
1541
1779
|
"size": 0,
|
|
1542
1780
|
"vram": 0
|
|
1543
1781
|
},
|
|
1544
1782
|
{
|
|
1545
|
-
"name": "
|
|
1546
|
-
"title": "
|
|
1547
|
-
"description": "
|
|
1783
|
+
"name": "api_wan2_6_i2v",
|
|
1784
|
+
"title": "Wan2.6: Image to Video",
|
|
1785
|
+
"description": "Transform images into high-quality videos with enhanced image quality, smoother motion, 1080P resolution support, and natural movement generation for professional results.",
|
|
1548
1786
|
"mediaType": "image",
|
|
1549
1787
|
"mediaSubtype": "webp",
|
|
1550
|
-
"
|
|
1551
|
-
"
|
|
1552
|
-
"
|
|
1553
|
-
"
|
|
1554
|
-
"
|
|
1555
|
-
"OpenSource": false,
|
|
1788
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1789
|
+
"models": ["Wan2.6", "Wan"],
|
|
1790
|
+
"date": "2025-12-20",
|
|
1791
|
+
"tutorialUrl": "",
|
|
1792
|
+
"openSource": false,
|
|
1556
1793
|
"size": 0,
|
|
1557
1794
|
"vram": 0
|
|
1558
1795
|
},
|
|
1559
1796
|
{
|
|
1560
|
-
"name": "
|
|
1561
|
-
"title": "Wan2.5: Text to
|
|
1562
|
-
"description": "Generate
|
|
1797
|
+
"name": "api_wan_text_to_video",
|
|
1798
|
+
"title": "Wan2.5: Text to Video",
|
|
1799
|
+
"description": "Generate videos with synchronized audio, enhanced motion, and superior quality.",
|
|
1563
1800
|
"mediaType": "image",
|
|
1564
1801
|
"mediaSubtype": "webp",
|
|
1565
|
-
"tags": ["
|
|
1802
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1566
1803
|
"models": ["Wan2.5", "Wan"],
|
|
1567
|
-
"date": "2025-09-
|
|
1568
|
-
"
|
|
1804
|
+
"date": "2025-09-27",
|
|
1805
|
+
"tutorialUrl": "",
|
|
1806
|
+
"openSource": false,
|
|
1569
1807
|
"size": 0,
|
|
1570
|
-
"vram": 0
|
|
1808
|
+
"vram": 0,
|
|
1809
|
+
"usage": 167
|
|
1571
1810
|
},
|
|
1572
1811
|
{
|
|
1573
|
-
"name": "
|
|
1574
|
-
"title": "
|
|
1575
|
-
"description": "
|
|
1812
|
+
"name": "api_wan_image_to_video",
|
|
1813
|
+
"title": "Wan2.5: Image to Video",
|
|
1814
|
+
"description": "Transform images into videos with synchronized audio, enhanced motion, and superior quality.",
|
|
1576
1815
|
"mediaType": "image",
|
|
1577
1816
|
"mediaSubtype": "webp",
|
|
1578
|
-
"
|
|
1579
|
-
"
|
|
1580
|
-
"
|
|
1581
|
-
"
|
|
1582
|
-
"
|
|
1817
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1818
|
+
"models": ["Wan2.5", "Wan"],
|
|
1819
|
+
"date": "2025-09-27",
|
|
1820
|
+
"tutorialUrl": "",
|
|
1821
|
+
"openSource": false,
|
|
1583
1822
|
"size": 0,
|
|
1584
|
-
"vram": 0
|
|
1823
|
+
"vram": 0,
|
|
1824
|
+
"usage": 1463
|
|
1585
1825
|
},
|
|
1586
1826
|
{
|
|
1587
|
-
"name": "
|
|
1588
|
-
"title": "
|
|
1589
|
-
"description": "
|
|
1827
|
+
"name": "api_kling_i2v",
|
|
1828
|
+
"title": "Kling: Image to Video",
|
|
1829
|
+
"description": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
|
|
1590
1830
|
"mediaType": "image",
|
|
1591
1831
|
"mediaSubtype": "webp",
|
|
1592
|
-
"
|
|
1593
|
-
"
|
|
1594
|
-
"models": ["Luma"],
|
|
1832
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1833
|
+
"models": ["Kling"],
|
|
1595
1834
|
"date": "2025-03-01",
|
|
1596
|
-
"
|
|
1835
|
+
"tutorialUrl": "",
|
|
1836
|
+
"openSource": false,
|
|
1597
1837
|
"size": 0,
|
|
1598
|
-
"vram": 0
|
|
1838
|
+
"vram": 0,
|
|
1839
|
+
"usage": 418
|
|
1599
1840
|
},
|
|
1600
1841
|
{
|
|
1601
|
-
"name": "
|
|
1602
|
-
"title": "
|
|
1603
|
-
"description": "
|
|
1842
|
+
"name": "api_kling_omni_edit_video",
|
|
1843
|
+
"title": "Kling: O1",
|
|
1844
|
+
"description": "Edit videos with natural language commands, featuring video reference mode for quick generation of high-quality style transfers, element additions, and background modifications.",
|
|
1604
1845
|
"mediaType": "image",
|
|
1605
|
-
"mediaSubtype": "webp",
|
|
1606
1846
|
"thumbnailVariant": "compareSlider",
|
|
1607
|
-
"
|
|
1608
|
-
"
|
|
1609
|
-
"
|
|
1610
|
-
"
|
|
1847
|
+
"mediaSubtype": "webp",
|
|
1848
|
+
"tags": ["Video", "API", "Video Editing", "Text to Video", "Image to Video"],
|
|
1849
|
+
"models": ["Kling"],
|
|
1850
|
+
"date": "2025-12-02",
|
|
1851
|
+
"tutorialUrl": "",
|
|
1852
|
+
"openSource": false,
|
|
1611
1853
|
"size": 0,
|
|
1612
|
-
"vram": 0
|
|
1854
|
+
"vram": 0,
|
|
1855
|
+
"usage": 1007
|
|
1613
1856
|
},
|
|
1614
1857
|
{
|
|
1615
|
-
"name": "
|
|
1616
|
-
"title": "
|
|
1617
|
-
"description": "Generate
|
|
1858
|
+
"name": "api_kling_effects",
|
|
1859
|
+
"title": "Kling: Video Effects",
|
|
1860
|
+
"description": "Generate dynamic videos by applying visual effects to images using Kling.",
|
|
1618
1861
|
"mediaType": "image",
|
|
1619
1862
|
"mediaSubtype": "webp",
|
|
1620
|
-
"tags": ["
|
|
1621
|
-
"models": ["
|
|
1863
|
+
"tags": ["Video", "API"],
|
|
1864
|
+
"models": ["Kling"],
|
|
1622
1865
|
"date": "2025-03-01",
|
|
1623
|
-
"
|
|
1866
|
+
"tutorialUrl": "",
|
|
1867
|
+
"openSource": false,
|
|
1624
1868
|
"size": 0,
|
|
1625
|
-
"vram": 0
|
|
1869
|
+
"vram": 0,
|
|
1870
|
+
"usage": 5
|
|
1626
1871
|
},
|
|
1627
1872
|
{
|
|
1628
|
-
"name": "
|
|
1629
|
-
"title": "
|
|
1630
|
-
"description": "
|
|
1873
|
+
"name": "api_kling_flf",
|
|
1874
|
+
"title": "Kling: FLF2V",
|
|
1875
|
+
"description": "Generate videos through controlling the first and last frames.",
|
|
1631
1876
|
"mediaType": "image",
|
|
1632
1877
|
"mediaSubtype": "webp",
|
|
1633
|
-
"tags": ["
|
|
1634
|
-
"models": ["
|
|
1878
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
1879
|
+
"models": ["Kling"],
|
|
1635
1880
|
"date": "2025-03-01",
|
|
1636
|
-
"
|
|
1881
|
+
"tutorialUrl": "",
|
|
1882
|
+
"openSource": false,
|
|
1637
1883
|
"size": 0,
|
|
1638
|
-
"vram": 0
|
|
1884
|
+
"vram": 0,
|
|
1885
|
+
"usage": 167
|
|
1639
1886
|
},
|
|
1640
1887
|
{
|
|
1641
|
-
"name": "
|
|
1642
|
-
"title": "
|
|
1643
|
-
"description": "Generate high-quality
|
|
1888
|
+
"name": "api_vidu_text_to_video",
|
|
1889
|
+
"title": "Vidu: Text to Video",
|
|
1890
|
+
"description": "Generate high-quality 1080p videos from text prompts with adjustable movement amplitude and duration control using Vidu's advanced AI model.",
|
|
1644
1891
|
"mediaType": "image",
|
|
1645
1892
|
"mediaSubtype": "webp",
|
|
1646
|
-
"tags": ["Text to
|
|
1647
|
-
"models": ["
|
|
1648
|
-
"date": "2025-
|
|
1649
|
-
"
|
|
1893
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
1894
|
+
"models": ["Vidu"],
|
|
1895
|
+
"date": "2025-08-23",
|
|
1896
|
+
"tutorialUrl": "",
|
|
1897
|
+
"openSource": false,
|
|
1650
1898
|
"size": 0,
|
|
1651
|
-
"vram": 0
|
|
1899
|
+
"vram": 0,
|
|
1900
|
+
"usage": 8
|
|
1652
1901
|
},
|
|
1653
1902
|
{
|
|
1654
|
-
"name": "
|
|
1655
|
-
"title": "
|
|
1656
|
-
"description": "
|
|
1903
|
+
"name": "api_vidu_image_to_video",
|
|
1904
|
+
"title": "Vidu: Image to Video",
|
|
1905
|
+
"description": "Transform static images into dynamic 1080p videos with precise motion control and customizable movement amplitude using Vidu.",
|
|
1657
1906
|
"mediaType": "image",
|
|
1658
1907
|
"mediaSubtype": "webp",
|
|
1659
|
-
"tags": ["
|
|
1660
|
-
"models": ["
|
|
1661
|
-
"date": "2025-
|
|
1662
|
-
"
|
|
1908
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
1909
|
+
"models": ["Vidu"],
|
|
1910
|
+
"date": "2025-08-23",
|
|
1911
|
+
"tutorialUrl": "",
|
|
1912
|
+
"openSource": false,
|
|
1663
1913
|
"size": 0,
|
|
1664
|
-
"vram": 0
|
|
1914
|
+
"vram": 0,
|
|
1915
|
+
"usage": 62
|
|
1665
1916
|
},
|
|
1666
1917
|
{
|
|
1667
|
-
"name": "
|
|
1668
|
-
"title": "
|
|
1669
|
-
"description": "Generate
|
|
1918
|
+
"name": "api_vidu_reference_to_video",
|
|
1919
|
+
"title": "Vidu: Reference to Video",
|
|
1920
|
+
"description": "Generate videos with consistent subjects using multiple reference images (up to 7) for character and style continuity across the video sequence.",
|
|
1670
1921
|
"mediaType": "image",
|
|
1671
|
-
"thumbnailVariant": "compareSlider",
|
|
1672
1922
|
"mediaSubtype": "webp",
|
|
1673
|
-
"tags": ["
|
|
1674
|
-
"models": ["
|
|
1675
|
-
"date": "2025-
|
|
1676
|
-
"
|
|
1923
|
+
"tags": ["Video", "Image to Video", "API"],
|
|
1924
|
+
"models": ["Vidu"],
|
|
1925
|
+
"date": "2025-08-23",
|
|
1926
|
+
"tutorialUrl": "",
|
|
1927
|
+
"openSource": false,
|
|
1677
1928
|
"size": 0,
|
|
1678
|
-
"vram": 0
|
|
1929
|
+
"vram": 0,
|
|
1930
|
+
"usage": 69
|
|
1679
1931
|
},
|
|
1680
1932
|
{
|
|
1681
|
-
"name": "
|
|
1682
|
-
"title": "
|
|
1683
|
-
"description": "
|
|
1933
|
+
"name": "api_vidu_start_end_to_video",
|
|
1934
|
+
"title": "Vidu: Start End to Video",
|
|
1935
|
+
"description": "Create smooth video transitions between defined start and end frames with natural motion interpolation and consistent visual quality.",
|
|
1684
1936
|
"mediaType": "image",
|
|
1685
1937
|
"mediaSubtype": "webp",
|
|
1686
|
-
"tags": ["
|
|
1687
|
-
"models": ["
|
|
1688
|
-
"date": "2025-
|
|
1689
|
-
"
|
|
1938
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
1939
|
+
"models": ["Vidu"],
|
|
1940
|
+
"date": "2025-08-23",
|
|
1941
|
+
"tutorialUrl": "",
|
|
1942
|
+
"openSource": false,
|
|
1690
1943
|
"size": 0,
|
|
1691
|
-
"vram": 0
|
|
1944
|
+
"vram": 0,
|
|
1945
|
+
"usage": 85
|
|
1692
1946
|
},
|
|
1693
1947
|
{
|
|
1694
|
-
"name": "
|
|
1695
|
-
"title": "
|
|
1696
|
-
"description": "
|
|
1948
|
+
"name": "api_bytedance_text_to_video",
|
|
1949
|
+
"title": "ByteDance: Text to Video",
|
|
1950
|
+
"description": "Generate high-quality videos directly from text prompts using ByteDance's Seedance model. Supports multiple resolutions and aspect ratios with natural motion and cinematic quality.",
|
|
1697
1951
|
"mediaType": "image",
|
|
1698
|
-
"thumbnailVariant": "compareSlider",
|
|
1699
1952
|
"mediaSubtype": "webp",
|
|
1700
|
-
"tags": ["
|
|
1701
|
-
"models": ["
|
|
1702
|
-
"date": "2025-
|
|
1703
|
-
"
|
|
1953
|
+
"tags": ["Video", "API", "Text to Video"],
|
|
1954
|
+
"models": ["ByteDance"],
|
|
1955
|
+
"date": "2025-10-6",
|
|
1956
|
+
"tutorialUrl": "",
|
|
1957
|
+
"openSource": false,
|
|
1704
1958
|
"size": 0,
|
|
1705
|
-
"vram": 0
|
|
1959
|
+
"vram": 0,
|
|
1960
|
+
"usage": 75
|
|
1706
1961
|
},
|
|
1707
1962
|
{
|
|
1708
|
-
"name": "
|
|
1709
|
-
"title": "
|
|
1710
|
-
"description": "
|
|
1963
|
+
"name": "api_bytedance_image_to_video",
|
|
1964
|
+
"title": "ByteDance: Image to Video",
|
|
1965
|
+
"description": "Transform static images into dynamic videos using ByteDance's Seedance model. Analyzes image structure and generates natural motion with consistent visual style and coherent video sequences.",
|
|
1711
1966
|
"mediaType": "image",
|
|
1712
1967
|
"mediaSubtype": "webp",
|
|
1713
|
-
"tags": ["
|
|
1714
|
-
"models": ["
|
|
1715
|
-
"date": "2025-
|
|
1716
|
-
"
|
|
1968
|
+
"tags": ["Video", "API", "Image to Video"],
|
|
1969
|
+
"models": ["ByteDance"],
|
|
1970
|
+
"date": "2025-10-6",
|
|
1971
|
+
"tutorialUrl": "",
|
|
1972
|
+
"openSource": false,
|
|
1717
1973
|
"size": 0,
|
|
1718
|
-
"vram": 0
|
|
1974
|
+
"vram": 0,
|
|
1975
|
+
"usage": 2275
|
|
1719
1976
|
},
|
|
1720
1977
|
{
|
|
1721
|
-
"name": "
|
|
1722
|
-
"title": "
|
|
1723
|
-
"description": "Generate
|
|
1978
|
+
"name": "api_bytedance_flf2v",
|
|
1979
|
+
"title": "ByteDance: Start End to Video",
|
|
1980
|
+
"description": "Generate cinematic video transitions between start and end frames with fluid motion, scene consistency, and professional polish using ByteDance's Seedance model.",
|
|
1724
1981
|
"mediaType": "image",
|
|
1725
|
-
"thumbnailVariant": "compareSlider",
|
|
1726
1982
|
"mediaSubtype": "webp",
|
|
1727
|
-
"tags": ["
|
|
1728
|
-
"models": ["
|
|
1729
|
-
"date": "2025-
|
|
1730
|
-
"
|
|
1983
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
1984
|
+
"models": ["ByteDance"],
|
|
1985
|
+
"date": "2025-10-6",
|
|
1986
|
+
"tutorialUrl": "",
|
|
1987
|
+
"openSource": false,
|
|
1731
1988
|
"size": 0,
|
|
1732
|
-
"vram": 0
|
|
1989
|
+
"vram": 0,
|
|
1990
|
+
"usage": 791
|
|
1733
1991
|
},
|
|
1734
1992
|
{
|
|
1735
|
-
"name": "
|
|
1736
|
-
"title": "
|
|
1737
|
-
"description": "
|
|
1738
|
-
"mediaType": "image",
|
|
1739
|
-
"mediaSubtype": "webp",
|
|
1740
|
-
"tags": ["Text to Image", "Image", "API"],
|
|
1741
|
-
"models": ["Ideogram"],
|
|
1742
|
-
"date": "2025-03-01",
|
|
1743
|
-
"OpenSource": false,
|
|
1744
|
-
"size": 0,
|
|
1745
|
-
"vram": 0
|
|
1746
|
-
},
|
|
1747
|
-
{
|
|
1748
|
-
"name": "api_openai_image_1_t2i",
|
|
1749
|
-
"title": "OpenAI: GPT-Image-1 Text to Image",
|
|
1750
|
-
"description": "Generate images from text prompts using OpenAI GPT Image 1 API.",
|
|
1993
|
+
"name": "video_wan2_2_14B_s2v",
|
|
1994
|
+
"title": "Wan2.2-S2V Audio-Driven Video Generation",
|
|
1995
|
+
"description": "Transform static images and audio into dynamic videos with perfect synchronization and minute-level generation.",
|
|
1751
1996
|
"mediaType": "image",
|
|
1752
1997
|
"mediaSubtype": "webp",
|
|
1753
|
-
"
|
|
1754
|
-
"
|
|
1755
|
-
"
|
|
1756
|
-
"
|
|
1757
|
-
"
|
|
1758
|
-
"
|
|
1759
|
-
"
|
|
1998
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-s2v",
|
|
1999
|
+
"tags": ["Video"],
|
|
2000
|
+
"models": ["Wan2.2", "Wan"],
|
|
2001
|
+
"date": "2025-08-02",
|
|
2002
|
+
"size": 25254407700,
|
|
2003
|
+
"vram": 25254407700,
|
|
2004
|
+
"usage": 648
|
|
1760
2005
|
},
|
|
1761
2006
|
{
|
|
1762
|
-
"name": "
|
|
1763
|
-
"title": "
|
|
1764
|
-
"description": "Generate
|
|
2007
|
+
"name": "api_ltxv_text_to_video",
|
|
2008
|
+
"title": "LTX-2: Text to Video",
|
|
2009
|
+
"description": "Generate high-quality videos from text prompts using Lightricks LTX-2 with synchronized audio. Supports up to 4K resolution at 50fps with Fast, Pro, and Ultra modes for various production needs.",
|
|
1765
2010
|
"mediaType": "image",
|
|
1766
2011
|
"mediaSubtype": "webp",
|
|
1767
|
-
"
|
|
1768
|
-
"
|
|
1769
|
-
"
|
|
1770
|
-
"
|
|
1771
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1772
|
-
"OpenSource": false,
|
|
2012
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
2013
|
+
"models": ["LTX-2", "Lightricks"],
|
|
2014
|
+
"date": "2025-10-28",
|
|
2015
|
+
"openSource": false,
|
|
1773
2016
|
"size": 0,
|
|
1774
|
-
"vram": 0
|
|
2017
|
+
"vram": 0,
|
|
2018
|
+
"usage": 73
|
|
1775
2019
|
},
|
|
1776
2020
|
{
|
|
1777
|
-
"name": "
|
|
1778
|
-
"title": "
|
|
1779
|
-
"description": "
|
|
2021
|
+
"name": "api_ltxv_image_to_video",
|
|
2022
|
+
"title": "LTX-2: Image to Video",
|
|
2023
|
+
"description": "Transform static images into dynamic videos with LTX-2 Pro. Generate cinematic video sequences with natural motion, synchronized audio, and support for up to 4K resolution at 50fps.",
|
|
1780
2024
|
"mediaType": "image",
|
|
1781
2025
|
"mediaSubtype": "webp",
|
|
1782
|
-
"
|
|
1783
|
-
"
|
|
1784
|
-
"
|
|
1785
|
-
"
|
|
1786
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1787
|
-
"OpenSource": false,
|
|
2026
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2027
|
+
"models": ["LTX-2", "Lightricks"],
|
|
2028
|
+
"date": "2025-10-28",
|
|
2029
|
+
"openSource": false,
|
|
1788
2030
|
"size": 0,
|
|
1789
|
-
"vram": 0
|
|
2031
|
+
"vram": 0,
|
|
2032
|
+
"usage": 448
|
|
1790
2033
|
},
|
|
1791
2034
|
{
|
|
1792
|
-
"name": "
|
|
1793
|
-
"title": "
|
|
1794
|
-
"description": "Generate
|
|
2035
|
+
"name": "api_hailuo_minimax_video",
|
|
2036
|
+
"title": "MiniMax: Video",
|
|
2037
|
+
"description": "Generate high-quality videos from text prompts with optional first-frame control using MiniMax Hailuo-02 model. Supports multiple resolutions (768P/1080P) and durations (6/10s) with intelligent prompt optimization.",
|
|
1795
2038
|
"mediaType": "image",
|
|
1796
2039
|
"mediaSubtype": "webp",
|
|
1797
|
-
"
|
|
1798
|
-
"
|
|
1799
|
-
"models": ["GPT-Image-1", "OpenAI"],
|
|
2040
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
2041
|
+
"models": ["MiniMax"],
|
|
1800
2042
|
"date": "2025-03-01",
|
|
1801
|
-
"tutorialUrl": "
|
|
1802
|
-
"
|
|
2043
|
+
"tutorialUrl": "",
|
|
2044
|
+
"openSource": false,
|
|
1803
2045
|
"size": 0,
|
|
1804
|
-
"vram": 0
|
|
2046
|
+
"vram": 0,
|
|
2047
|
+
"usage": 9
|
|
1805
2048
|
},
|
|
1806
2049
|
{
|
|
1807
|
-
"name": "
|
|
1808
|
-
"title": "
|
|
1809
|
-
"description": "Generate
|
|
2050
|
+
"name": "api_hailuo_minimax_t2v",
|
|
2051
|
+
"title": "MiniMax: Text to Video",
|
|
2052
|
+
"description": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
|
|
1810
2053
|
"mediaType": "image",
|
|
1811
2054
|
"mediaSubtype": "webp",
|
|
1812
|
-
"tags": ["Text to
|
|
1813
|
-
"models": ["
|
|
2055
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
2056
|
+
"models": ["MiniMax"],
|
|
1814
2057
|
"date": "2025-03-01",
|
|
1815
|
-
"tutorialUrl": "
|
|
1816
|
-
"
|
|
2058
|
+
"tutorialUrl": "",
|
|
2059
|
+
"openSource": false,
|
|
1817
2060
|
"size": 0,
|
|
1818
|
-
"vram": 0
|
|
2061
|
+
"vram": 0,
|
|
2062
|
+
"usage": 1
|
|
1819
2063
|
},
|
|
1820
2064
|
{
|
|
1821
|
-
"name": "
|
|
1822
|
-
"title": "
|
|
1823
|
-
"description": "
|
|
2065
|
+
"name": "api_hailuo_minimax_i2v",
|
|
2066
|
+
"title": "MiniMax: Image to Video",
|
|
2067
|
+
"description": "Generate refined videos from images and text with CGI integration using MiniMax.",
|
|
1824
2068
|
"mediaType": "image",
|
|
1825
2069
|
"mediaSubtype": "webp",
|
|
1826
|
-
"
|
|
1827
|
-
"
|
|
1828
|
-
"models": ["Dall-E", "OpenAI"],
|
|
2070
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2071
|
+
"models": ["MiniMax"],
|
|
1829
2072
|
"date": "2025-03-01",
|
|
1830
|
-
"tutorialUrl": "
|
|
1831
|
-
"
|
|
2073
|
+
"tutorialUrl": "",
|
|
2074
|
+
"openSource": false,
|
|
1832
2075
|
"size": 0,
|
|
1833
|
-
"vram": 0
|
|
2076
|
+
"vram": 0,
|
|
2077
|
+
"usage": 39
|
|
1834
2078
|
},
|
|
1835
2079
|
{
|
|
1836
|
-
"name": "
|
|
1837
|
-
"title": "
|
|
1838
|
-
"description": "
|
|
2080
|
+
"name": "api_luma_i2v",
|
|
2081
|
+
"title": "Luma: Image to Video",
|
|
2082
|
+
"description": "Take static images and instantly create magical high quality animations.",
|
|
1839
2083
|
"mediaType": "image",
|
|
1840
2084
|
"mediaSubtype": "webp",
|
|
1841
|
-
"tags": ["
|
|
1842
|
-
"models": ["
|
|
2085
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2086
|
+
"models": ["Luma"],
|
|
1843
2087
|
"date": "2025-03-01",
|
|
1844
|
-
"tutorialUrl": "
|
|
1845
|
-
"
|
|
1846
|
-
"size": 0,
|
|
1847
|
-
"vram": 0
|
|
1848
|
-
}
|
|
1849
|
-
]
|
|
1850
|
-
},
|
|
1851
|
-
{
|
|
1852
|
-
"moduleName": "default",
|
|
1853
|
-
"category": "CLOSED SOURCE MODELS",
|
|
1854
|
-
"title": "Video API",
|
|
1855
|
-
"icon": "icon-[lucide--film]",
|
|
1856
|
-
"type": "video",
|
|
1857
|
-
"templates": [
|
|
1858
|
-
{
|
|
1859
|
-
"name": "api_openai_sora_video",
|
|
1860
|
-
"title": "Sora 2: Text & Image to Video",
|
|
1861
|
-
"description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
|
|
1862
|
-
"mediaType": "image",
|
|
1863
|
-
"mediaSubtype": "webp",
|
|
1864
|
-
"tags": ["Image to Video", "Text to Video", "API"],
|
|
1865
|
-
"models": ["OpenAI"],
|
|
1866
|
-
"date": "2025-10-08",
|
|
1867
|
-
"OpenSource": false,
|
|
2088
|
+
"tutorialUrl": "",
|
|
2089
|
+
"openSource": false,
|
|
1868
2090
|
"size": 0,
|
|
1869
|
-
"vram": 0
|
|
2091
|
+
"vram": 0,
|
|
2092
|
+
"usage": 56
|
|
1870
2093
|
},
|
|
1871
2094
|
{
|
|
1872
|
-
"name": "
|
|
1873
|
-
"title": "
|
|
1874
|
-
"description": "
|
|
2095
|
+
"name": "api_luma_t2v",
|
|
2096
|
+
"title": "Luma: Text to Video",
|
|
2097
|
+
"description": "High-quality videos can be generated using simple prompts.",
|
|
1875
2098
|
"mediaType": "image",
|
|
1876
2099
|
"mediaSubtype": "webp",
|
|
1877
2100
|
"tags": ["Text to Video", "Video", "API"],
|
|
1878
|
-
"models": ["
|
|
1879
|
-
"date": "2025-
|
|
1880
|
-
"
|
|
2101
|
+
"models": ["Luma"],
|
|
2102
|
+
"date": "2025-03-01",
|
|
2103
|
+
"tutorialUrl": "",
|
|
2104
|
+
"openSource": false,
|
|
1881
2105
|
"size": 0,
|
|
1882
|
-
"vram": 0
|
|
2106
|
+
"vram": 0,
|
|
2107
|
+
"usage": 3
|
|
1883
2108
|
},
|
|
1884
2109
|
{
|
|
1885
|
-
"name": "
|
|
1886
|
-
"title": "
|
|
1887
|
-
"description": "
|
|
2110
|
+
"name": "api_moonvalley_text_to_video",
|
|
2111
|
+
"title": "Moonvalley: Text to Video",
|
|
2112
|
+
"description": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.",
|
|
1888
2113
|
"mediaType": "image",
|
|
1889
2114
|
"mediaSubtype": "webp",
|
|
1890
|
-
"tags": ["
|
|
1891
|
-
"models": ["
|
|
1892
|
-
"date": "2025-
|
|
1893
|
-
"
|
|
2115
|
+
"tags": ["Text to Video", "Video", "API"],
|
|
2116
|
+
"models": ["Moonvalley"],
|
|
2117
|
+
"date": "2025-03-01",
|
|
2118
|
+
"tutorialUrl": "",
|
|
2119
|
+
"openSource": false,
|
|
1894
2120
|
"size": 0,
|
|
1895
|
-
"vram": 0
|
|
2121
|
+
"vram": 0,
|
|
2122
|
+
"usage": 4
|
|
1896
2123
|
},
|
|
1897
2124
|
{
|
|
1898
|
-
"name": "
|
|
1899
|
-
"title": "
|
|
1900
|
-
"description": "Generate videos with
|
|
2125
|
+
"name": "api_moonvalley_image_to_video",
|
|
2126
|
+
"title": "Moonvalley: Image to Video",
|
|
2127
|
+
"description": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.",
|
|
1901
2128
|
"mediaType": "image",
|
|
1902
2129
|
"mediaSubtype": "webp",
|
|
1903
2130
|
"tags": ["Image to Video", "Video", "API"],
|
|
1904
|
-
"models": ["
|
|
1905
|
-
"date": "2025-
|
|
2131
|
+
"models": ["Moonvalley"],
|
|
2132
|
+
"date": "2025-03-01",
|
|
1906
2133
|
"tutorialUrl": "",
|
|
1907
|
-
"
|
|
2134
|
+
"openSource": false,
|
|
1908
2135
|
"size": 0,
|
|
1909
|
-
"vram": 0
|
|
2136
|
+
"vram": 0,
|
|
2137
|
+
"usage": 29
|
|
1910
2138
|
},
|
|
1911
2139
|
{
|
|
1912
|
-
"name": "
|
|
1913
|
-
"title": "
|
|
1914
|
-
"description": "
|
|
2140
|
+
"name": "api_moonvalley_video_to_video_motion_transfer",
|
|
2141
|
+
"title": "Moonvalley: Motion Transfer",
|
|
2142
|
+
"description": "Apply motion from one video to another.",
|
|
1915
2143
|
"mediaType": "image",
|
|
2144
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1916
2145
|
"mediaSubtype": "webp",
|
|
1917
|
-
"tags": ["
|
|
1918
|
-
"models": ["
|
|
1919
|
-
"date": "2025-
|
|
2146
|
+
"tags": ["Video to Video", "Video", "API"],
|
|
2147
|
+
"models": ["Moonvalley"],
|
|
2148
|
+
"date": "2025-03-01",
|
|
1920
2149
|
"tutorialUrl": "",
|
|
1921
|
-
"
|
|
2150
|
+
"openSource": false,
|
|
1922
2151
|
"size": 0,
|
|
1923
|
-
"vram": 0
|
|
2152
|
+
"vram": 0,
|
|
2153
|
+
"usage": 22
|
|
1924
2154
|
},
|
|
1925
2155
|
{
|
|
1926
|
-
"name": "
|
|
1927
|
-
"title": "
|
|
1928
|
-
"description": "
|
|
2156
|
+
"name": "api_moonvalley_video_to_video_pose_control",
|
|
2157
|
+
"title": "Moonvalley: Pose Control",
|
|
2158
|
+
"description": "Apply human pose and movement from one video to another.",
|
|
1929
2159
|
"mediaType": "image",
|
|
2160
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1930
2161
|
"mediaSubtype": "webp",
|
|
1931
|
-
"tags": ["
|
|
1932
|
-
"models": ["
|
|
2162
|
+
"tags": ["Video to Video", "Video", "API"],
|
|
2163
|
+
"models": ["Moonvalley"],
|
|
1933
2164
|
"date": "2025-03-01",
|
|
1934
2165
|
"tutorialUrl": "",
|
|
1935
|
-
"
|
|
2166
|
+
"openSource": false,
|
|
1936
2167
|
"size": 0,
|
|
1937
|
-
"vram": 0
|
|
2168
|
+
"vram": 0,
|
|
2169
|
+
"usage": 11
|
|
1938
2170
|
},
|
|
1939
2171
|
{
|
|
1940
|
-
"name": "
|
|
1941
|
-
"title": "
|
|
1942
|
-
"description": "Generate dynamic videos
|
|
2172
|
+
"name": "api_pixverse_i2v",
|
|
2173
|
+
"title": "PixVerse: Image to Video",
|
|
2174
|
+
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
|
1943
2175
|
"mediaType": "image",
|
|
1944
2176
|
"mediaSubtype": "webp",
|
|
1945
|
-
"tags": ["Video", "API"],
|
|
1946
|
-
"models": ["
|
|
2177
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2178
|
+
"models": ["PixVerse"],
|
|
1947
2179
|
"date": "2025-03-01",
|
|
1948
2180
|
"tutorialUrl": "",
|
|
1949
|
-
"
|
|
2181
|
+
"openSource": false,
|
|
1950
2182
|
"size": 0,
|
|
1951
|
-
"vram": 0
|
|
2183
|
+
"vram": 0,
|
|
2184
|
+
"usage": 25
|
|
1952
2185
|
},
|
|
1953
2186
|
{
|
|
1954
|
-
"name": "
|
|
1955
|
-
"title": "
|
|
1956
|
-
"description": "Generate videos
|
|
2187
|
+
"name": "api_pixverse_template_i2v",
|
|
2188
|
+
"title": "PixVerse Templates: Image to Video",
|
|
2189
|
+
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
|
1957
2190
|
"mediaType": "image",
|
|
1958
2191
|
"mediaSubtype": "webp",
|
|
1959
|
-
"tags": ["Video", "
|
|
1960
|
-
"models": ["
|
|
2192
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2193
|
+
"models": ["PixVerse"],
|
|
1961
2194
|
"date": "2025-03-01",
|
|
1962
2195
|
"tutorialUrl": "",
|
|
1963
|
-
"
|
|
2196
|
+
"openSource": false,
|
|
1964
2197
|
"size": 0,
|
|
1965
|
-
"vram": 0
|
|
2198
|
+
"vram": 0,
|
|
2199
|
+
"usage": 16
|
|
1966
2200
|
},
|
|
1967
2201
|
{
|
|
1968
|
-
"name": "
|
|
1969
|
-
"title": "
|
|
1970
|
-
"description": "Generate
|
|
2202
|
+
"name": "api_pixverse_t2v",
|
|
2203
|
+
"title": "PixVerse: Text to Video",
|
|
2204
|
+
"description": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
|
|
1971
2205
|
"mediaType": "image",
|
|
1972
2206
|
"mediaSubtype": "webp",
|
|
1973
2207
|
"tags": ["Text to Video", "Video", "API"],
|
|
1974
|
-
"models": ["
|
|
1975
|
-
"date": "2025-
|
|
2208
|
+
"models": ["PixVerse"],
|
|
2209
|
+
"date": "2025-03-01",
|
|
1976
2210
|
"tutorialUrl": "",
|
|
1977
|
-
"
|
|
2211
|
+
"openSource": false,
|
|
1978
2212
|
"size": 0,
|
|
1979
|
-
"vram": 0
|
|
2213
|
+
"vram": 0,
|
|
2214
|
+
"usage": 3
|
|
1980
2215
|
},
|
|
1981
2216
|
{
|
|
1982
|
-
"name": "
|
|
1983
|
-
"title": "
|
|
1984
|
-
"description": "
|
|
2217
|
+
"name": "api_runway_gen3a_turbo_image_to_video",
|
|
2218
|
+
"title": "Runway: Gen3a Turbo Image to Video",
|
|
2219
|
+
"description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
|
|
1985
2220
|
"mediaType": "image",
|
|
1986
2221
|
"mediaSubtype": "webp",
|
|
1987
2222
|
"tags": ["Image to Video", "Video", "API"],
|
|
1988
|
-
"models": ["
|
|
1989
|
-
"date": "2025-
|
|
2223
|
+
"models": ["Runway"],
|
|
2224
|
+
"date": "2025-03-01",
|
|
1990
2225
|
"tutorialUrl": "",
|
|
1991
|
-
"
|
|
2226
|
+
"openSource": false,
|
|
1992
2227
|
"size": 0,
|
|
1993
|
-
"vram": 0
|
|
2228
|
+
"vram": 0,
|
|
2229
|
+
"usage": 38
|
|
1994
2230
|
},
|
|
1995
2231
|
{
|
|
1996
|
-
"name": "
|
|
1997
|
-
"title": "
|
|
1998
|
-
"description": "Generate videos
|
|
2232
|
+
"name": "api_runway_gen4_turo_image_to_video",
|
|
2233
|
+
"title": "Runway: Gen4 Turbo Image to Video",
|
|
2234
|
+
"description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
|
|
1999
2235
|
"mediaType": "image",
|
|
2000
2236
|
"mediaSubtype": "webp",
|
|
2001
|
-
"tags": ["Video", "
|
|
2002
|
-
"models": ["
|
|
2003
|
-
"date": "2025-
|
|
2237
|
+
"tags": ["Image to Video", "Video", "API"],
|
|
2238
|
+
"models": ["Runway"],
|
|
2239
|
+
"date": "2025-03-01",
|
|
2004
2240
|
"tutorialUrl": "",
|
|
2005
|
-
"
|
|
2241
|
+
"openSource": false,
|
|
2006
2242
|
"size": 0,
|
|
2007
|
-
"vram": 0
|
|
2243
|
+
"vram": 0,
|
|
2244
|
+
"usage": 97
|
|
2008
2245
|
},
|
|
2009
2246
|
{
|
|
2010
|
-
"name": "
|
|
2011
|
-
"title": "
|
|
2012
|
-
"description": "
|
|
2247
|
+
"name": "api_runway_first_last_frame",
|
|
2248
|
+
"title": "Runway: First Last Frame to Video",
|
|
2249
|
+
"description": "Generate smooth video transitions between two keyframes with Runway's precision.",
|
|
2013
2250
|
"mediaType": "image",
|
|
2014
2251
|
"mediaSubtype": "webp",
|
|
2015
2252
|
"tags": ["Video", "API", "FLF2V"],
|
|
2016
|
-
"models": ["
|
|
2017
|
-
"date": "2025-
|
|
2253
|
+
"models": ["Runway"],
|
|
2254
|
+
"date": "2025-03-01",
|
|
2018
2255
|
"tutorialUrl": "",
|
|
2019
|
-
"
|
|
2256
|
+
"openSource": false,
|
|
2020
2257
|
"size": 0,
|
|
2021
|
-
"vram": 0
|
|
2258
|
+
"vram": 0,
|
|
2259
|
+
"usage": 97
|
|
2022
2260
|
},
|
|
2023
2261
|
{
|
|
2024
|
-
"name": "
|
|
2025
|
-
"title": "
|
|
2026
|
-
"description": "Generate
|
|
2027
|
-
"mediaType": "image",
|
|
2028
|
-
"mediaSubtype": "webp",
|
|
2029
|
-
"
|
|
2030
|
-
"
|
|
2031
|
-
"
|
|
2032
|
-
"
|
|
2033
|
-
"
|
|
2034
|
-
"
|
|
2035
|
-
"
|
|
2262
|
+
"name": "video_wan2_2_14B_fun_inpaint",
|
|
2263
|
+
"title": "Wan 2.2 14B Fun Inp",
|
|
2264
|
+
"description": "Generate videos from start and end frames using Wan 2.2 Fun Inp.",
|
|
2265
|
+
"mediaType": "image",
|
|
2266
|
+
"mediaSubtype": "webp",
|
|
2267
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-inp",
|
|
2268
|
+
"tags": ["FLF2V", "Video"],
|
|
2269
|
+
"models": ["Wan2.2", "Wan"],
|
|
2270
|
+
"date": "2025-08-12",
|
|
2271
|
+
"size": 38031935406,
|
|
2272
|
+
"vram": 38031935406,
|
|
2273
|
+
"usage": 547
|
|
2036
2274
|
},
|
|
2037
2275
|
{
|
|
2038
|
-
"name": "
|
|
2039
|
-
"title": "
|
|
2040
|
-
"description": "
|
|
2276
|
+
"name": "video_wan2_2_14B_fun_control",
|
|
2277
|
+
"title": "Wan 2.2 14B Fun Control",
|
|
2278
|
+
"description": "Generate videos guided by pose, depth, and edge controls using Wan 2.2 Fun Control.",
|
|
2041
2279
|
"mediaType": "image",
|
|
2042
2280
|
"mediaSubtype": "webp",
|
|
2043
|
-
"
|
|
2044
|
-
"
|
|
2045
|
-
"
|
|
2046
|
-
"
|
|
2047
|
-
"
|
|
2048
|
-
"
|
|
2049
|
-
"
|
|
2281
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-control",
|
|
2282
|
+
"tags": ["Video to Video", "Video"],
|
|
2283
|
+
"models": ["Wan2.2", "Wan"],
|
|
2284
|
+
"date": "2025-08-12",
|
|
2285
|
+
"size": 38031935406,
|
|
2286
|
+
"vram": 38031935406,
|
|
2287
|
+
"usage": 305
|
|
2050
2288
|
},
|
|
2051
2289
|
{
|
|
2052
|
-
"name": "
|
|
2053
|
-
"title": "
|
|
2054
|
-
"description": "Generate
|
|
2290
|
+
"name": "video_wan2_2_14B_fun_camera",
|
|
2291
|
+
"title": "Wan 2.2 14B Fun Camera Control",
|
|
2292
|
+
"description": "Generate videos with camera motion controls including pan, zoom, and rotation using Wan 2.2 Fun Camera Control.",
|
|
2055
2293
|
"mediaType": "image",
|
|
2056
2294
|
"mediaSubtype": "webp",
|
|
2057
|
-
"
|
|
2058
|
-
"
|
|
2059
|
-
"
|
|
2060
|
-
"
|
|
2061
|
-
"
|
|
2062
|
-
"
|
|
2063
|
-
"
|
|
2295
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-camera",
|
|
2296
|
+
"tags": ["Video to Video", "Video"],
|
|
2297
|
+
"models": ["Wan2.2", "Wan"],
|
|
2298
|
+
"date": "2025-08-17",
|
|
2299
|
+
"size": 40050570035,
|
|
2300
|
+
"vram": 40050570035,
|
|
2301
|
+
"usage": 228
|
|
2064
2302
|
},
|
|
2065
2303
|
{
|
|
2066
|
-
"name": "
|
|
2067
|
-
"title": "
|
|
2068
|
-
"description": "
|
|
2304
|
+
"name": "video_wan2_2_5B_ti2v",
|
|
2305
|
+
"title": "Wan 2.2 5B Video Generation",
|
|
2306
|
+
"description": "Fast text-to-video and image-to-video generation with 5B parameters. Optimized for rapid prototyping and creative exploration.",
|
|
2307
|
+
"mediaType": "image",
|
|
2308
|
+
"mediaSubtype": "webp",
|
|
2309
|
+
"tags": ["Text to Video", "Video"],
|
|
2310
|
+
"models": ["Wan2.2", "Wan"],
|
|
2311
|
+
"date": "2025-07-29",
|
|
2312
|
+
"size": 18146236826,
|
|
2313
|
+
"vram": 18146236826,
|
|
2314
|
+
"usage": 392
|
|
2315
|
+
},
|
|
2316
|
+
{
|
|
2317
|
+
"name": "video_humo",
|
|
2318
|
+
"title": "HuMo Video Generation",
|
|
2319
|
+
"description": "Generate videos basic on audio, image, and text, keep the character's lip sync.",
|
|
2320
|
+
"mediaType": "image",
|
|
2321
|
+
"mediaSubtype": "webp",
|
|
2322
|
+
"tags": ["Video"],
|
|
2323
|
+
"models": ["HuMo"],
|
|
2324
|
+
"date": "2025-09-21",
|
|
2325
|
+
"size": 27895812588,
|
|
2326
|
+
"vram": 27895812588,
|
|
2327
|
+
"usage": 424
|
|
2328
|
+
},
|
|
2329
|
+
{
|
|
2330
|
+
"name": "video_wan2_2_5B_fun_inpaint",
|
|
2331
|
+
"title": "Wan 2.2 5B Fun Inpaint",
|
|
2332
|
+
"description": "Efficient video inpainting from start and end frames. 5B model delivers quick iterations for testing workflows.",
|
|
2333
|
+
"mediaType": "image",
|
|
2334
|
+
"mediaSubtype": "webp",
|
|
2335
|
+
"tags": ["Text to Video", "Video"],
|
|
2336
|
+
"models": ["Wan2.2", "Wan"],
|
|
2337
|
+
"date": "2025-07-29",
|
|
2338
|
+
"size": 18146236826,
|
|
2339
|
+
"vram": 18146236826,
|
|
2340
|
+
"usage": 53
|
|
2341
|
+
},
|
|
2342
|
+
{
|
|
2343
|
+
"name": "video_wan2_2_5B_fun_control",
|
|
2344
|
+
"title": "Wan 2.2 5B Fun Control",
|
|
2345
|
+
"description": "Multi-condition video control with pose, depth, and edge guidance. Compact 5B size for experimental development.",
|
|
2346
|
+
"mediaType": "image",
|
|
2347
|
+
"mediaSubtype": "webp",
|
|
2348
|
+
"tags": ["Text to Video", "Video"],
|
|
2349
|
+
"models": ["Wan2.2", "Wan"],
|
|
2350
|
+
"date": "2025-07-29",
|
|
2351
|
+
"size": 18146236826,
|
|
2352
|
+
"vram": 18146236826,
|
|
2353
|
+
"usage": 110
|
|
2354
|
+
},
|
|
2355
|
+
{
|
|
2356
|
+
"name": "video_wan_vace_14B_t2v",
|
|
2357
|
+
"title": "Wan2.1 VACE Text to Video",
|
|
2358
|
+
"description": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
|
|
2359
|
+
"mediaType": "image",
|
|
2360
|
+
"mediaSubtype": "webp",
|
|
2361
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2362
|
+
"tags": ["Text to Video", "Video"],
|
|
2363
|
+
"models": ["Wan2.1", "Wan"],
|
|
2364
|
+
"date": "2025-05-21",
|
|
2365
|
+
"size": 57756572713,
|
|
2366
|
+
"vram": 57756572713,
|
|
2367
|
+
"usage": 162
|
|
2368
|
+
},
|
|
2369
|
+
{
|
|
2370
|
+
"name": "video_wan_vace_14B_ref2v",
|
|
2371
|
+
"title": "Wan2.1 VACE Reference to Video",
|
|
2372
|
+
"description": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
|
|
2373
|
+
"mediaType": "image",
|
|
2374
|
+
"mediaSubtype": "webp",
|
|
2375
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2376
|
+
"tags": ["Video", "Image to Video"],
|
|
2377
|
+
"models": ["Wan2.1", "Wan"],
|
|
2378
|
+
"date": "2025-05-21",
|
|
2379
|
+
"size": 57756572713,
|
|
2380
|
+
"vram": 57756572713,
|
|
2381
|
+
"usage": 171
|
|
2382
|
+
},
|
|
2383
|
+
{
|
|
2384
|
+
"name": "video_wan_vace_14B_v2v",
|
|
2385
|
+
"title": "Wan2.1 VACE Control Video",
|
|
2386
|
+
"description": "Generate videos by controlling input videos and reference images using Wan VACE.",
|
|
2069
2387
|
"mediaType": "image",
|
|
2070
2388
|
"mediaSubtype": "webp",
|
|
2071
2389
|
"thumbnailVariant": "compareSlider",
|
|
2072
|
-
"
|
|
2073
|
-
"
|
|
2074
|
-
"
|
|
2075
|
-
"
|
|
2076
|
-
"size":
|
|
2077
|
-
"vram":
|
|
2390
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2391
|
+
"tags": ["Video to Video", "Video"],
|
|
2392
|
+
"models": ["Wan2.1", "Wan"],
|
|
2393
|
+
"date": "2025-05-21",
|
|
2394
|
+
"size": 57756572713,
|
|
2395
|
+
"vram": 57756572713,
|
|
2396
|
+
"usage": 306
|
|
2078
2397
|
},
|
|
2079
2398
|
{
|
|
2080
|
-
"name": "
|
|
2081
|
-
"title": "
|
|
2082
|
-
"description": "
|
|
2399
|
+
"name": "video_wan_vace_outpainting",
|
|
2400
|
+
"title": "Wan2.1 VACE Outpainting",
|
|
2401
|
+
"description": "Generate extended videos by expanding video size using Wan VACE outpainting.",
|
|
2083
2402
|
"mediaType": "image",
|
|
2084
2403
|
"mediaSubtype": "webp",
|
|
2085
|
-
"
|
|
2086
|
-
"
|
|
2087
|
-
"
|
|
2088
|
-
"
|
|
2089
|
-
"
|
|
2090
|
-
"size":
|
|
2091
|
-
"vram":
|
|
2404
|
+
"thumbnailVariant": "compareSlider",
|
|
2405
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2406
|
+
"tags": ["Outpainting", "Video"],
|
|
2407
|
+
"models": ["Wan2.1", "Wan"],
|
|
2408
|
+
"date": "2025-05-21",
|
|
2409
|
+
"size": 57756572713,
|
|
2410
|
+
"vram": 57756572713,
|
|
2411
|
+
"usage": 117
|
|
2092
2412
|
},
|
|
2093
2413
|
{
|
|
2094
|
-
"name": "
|
|
2095
|
-
"title": "
|
|
2096
|
-
"description": "
|
|
2414
|
+
"name": "video_wan_vace_flf2v",
|
|
2415
|
+
"title": "Wan2.1 VACE First-Last Frame",
|
|
2416
|
+
"description": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
|
|
2097
2417
|
"mediaType": "image",
|
|
2098
2418
|
"mediaSubtype": "webp",
|
|
2099
|
-
"
|
|
2100
|
-
"
|
|
2101
|
-
"
|
|
2102
|
-
"
|
|
2103
|
-
"
|
|
2104
|
-
"
|
|
2105
|
-
"
|
|
2419
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2420
|
+
"tags": ["FLF2V", "Video"],
|
|
2421
|
+
"models": ["Wan2.1", "Wan"],
|
|
2422
|
+
"date": "2025-05-21",
|
|
2423
|
+
"size": 57756572713,
|
|
2424
|
+
"vram": 57756572713,
|
|
2425
|
+
"usage": 136
|
|
2106
2426
|
},
|
|
2107
2427
|
{
|
|
2108
|
-
"name": "
|
|
2109
|
-
"title": "
|
|
2110
|
-
"description": "
|
|
2428
|
+
"name": "video_wan_vace_inpainting",
|
|
2429
|
+
"title": "Wan2.1 VACE Inpainting",
|
|
2430
|
+
"description": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
|
|
2111
2431
|
"mediaType": "image",
|
|
2112
2432
|
"mediaSubtype": "webp",
|
|
2113
|
-
"
|
|
2114
|
-
"
|
|
2115
|
-
"
|
|
2116
|
-
"
|
|
2117
|
-
"
|
|
2118
|
-
"size":
|
|
2119
|
-
"vram":
|
|
2433
|
+
"thumbnailVariant": "compareSlider",
|
|
2434
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2435
|
+
"tags": ["Inpainting", "Video"],
|
|
2436
|
+
"models": ["Wan2.1", "Wan"],
|
|
2437
|
+
"date": "2025-05-21",
|
|
2438
|
+
"size": 57756572713,
|
|
2439
|
+
"vram": 57756572713,
|
|
2440
|
+
"usage": 261
|
|
2120
2441
|
},
|
|
2121
2442
|
{
|
|
2122
|
-
"name": "
|
|
2123
|
-
"title": "
|
|
2124
|
-
"description": "Generate
|
|
2443
|
+
"name": "video_wan2.1_alpha_t2v_14B",
|
|
2444
|
+
"title": "Wan2.1 Alpha T2V",
|
|
2445
|
+
"description": "Generate text-to-video with alpha channel support for transparent backgrounds and semi-transparent objects.",
|
|
2125
2446
|
"mediaType": "image",
|
|
2126
2447
|
"mediaSubtype": "webp",
|
|
2127
|
-
"tags": ["
|
|
2128
|
-
"models": ["
|
|
2129
|
-
"date": "2025-
|
|
2130
|
-
"
|
|
2131
|
-
"
|
|
2132
|
-
"
|
|
2133
|
-
"vram": 0
|
|
2448
|
+
"tags": ["Text to Video", "Video"],
|
|
2449
|
+
"models": ["Wan2.1", "Wan-Move", "Motion Control", "Wan"],
|
|
2450
|
+
"date": "2025-10-06",
|
|
2451
|
+
"size": 22494891213,
|
|
2452
|
+
"vram": 22494891213,
|
|
2453
|
+
"usage": 162
|
|
2134
2454
|
},
|
|
2135
2455
|
{
|
|
2136
|
-
"name": "
|
|
2137
|
-
"title": "
|
|
2138
|
-
"description": "
|
|
2456
|
+
"name": "video_wanmove_480p",
|
|
2457
|
+
"title": "Wan-Move Motion-Control Image to Video",
|
|
2458
|
+
"description": "Generate videos from a single image using Wan-Move, with fine-grained point-level motion control via trajectory guidance.",
|
|
2139
2459
|
"mediaType": "image",
|
|
2140
|
-
"thumbnailVariant": "hoverDissolve",
|
|
2141
2460
|
"mediaSubtype": "webp",
|
|
2142
|
-
"tags": ["
|
|
2143
|
-
"models": ["
|
|
2144
|
-
"date": "2025-
|
|
2145
|
-
"
|
|
2146
|
-
"
|
|
2147
|
-
"
|
|
2148
|
-
"vram": 0
|
|
2461
|
+
"tags": ["Image to Video", "Motion Control", "Video"],
|
|
2462
|
+
"models": ["Wan2.1", "Wan"],
|
|
2463
|
+
"date": "2025-12-15",
|
|
2464
|
+
"size": 25420837683,
|
|
2465
|
+
"vram": 25420837683,
|
|
2466
|
+
"usage": 176
|
|
2149
2467
|
},
|
|
2150
2468
|
{
|
|
2151
|
-
"name": "
|
|
2152
|
-
"title": "
|
|
2153
|
-
"description": "
|
|
2469
|
+
"name": "video_wanmove_480p_hallucination",
|
|
2470
|
+
"title": "WanMove: Daydream Illusion",
|
|
2471
|
+
"description": "Use WanMove to generate dynamic images from trajectories and create video dynamic effects with daydream illusion",
|
|
2154
2472
|
"mediaType": "image",
|
|
2473
|
+
"mediaSubtype": "webp",
|
|
2474
|
+
"tags": ["Image to Video", "Motion Control", "Video"],
|
|
2475
|
+
"models": ["Wan2.1", "Wan"],
|
|
2476
|
+
"date": "2025-12-15",
|
|
2477
|
+
"size": 25420837683,
|
|
2478
|
+
"vram": 25420837683,
|
|
2479
|
+
"usage": 176,
|
|
2480
|
+
"requiresCustomNodes": ["comfyui_fill-nodes"]
|
|
2481
|
+
},
|
|
2482
|
+
{
|
|
2483
|
+
"name": "video_wan_ati",
|
|
2484
|
+
"title": "Wan2.1 ATI",
|
|
2485
|
+
"description": "Trajectory-controlled Video Generation.",
|
|
2486
|
+
"mediaType": "image",
|
|
2487
|
+
"mediaSubtype": "webp",
|
|
2155
2488
|
"thumbnailVariant": "hoverDissolve",
|
|
2489
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
|
|
2490
|
+
"tags": ["Video"],
|
|
2491
|
+
"models": ["Wan2.1", "Wan"],
|
|
2492
|
+
"date": "2025-05-21",
|
|
2493
|
+
"size": 25393994138,
|
|
2494
|
+
"vram": 25393994138,
|
|
2495
|
+
"usage": 81
|
|
2496
|
+
},
|
|
2497
|
+
{
|
|
2498
|
+
"name": "video_wan2.1_fun_camera_v1.1_1.3B",
|
|
2499
|
+
"title": "Wan 2.1 Fun Camera 1.3B",
|
|
2500
|
+
"description": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
|
|
2501
|
+
"mediaType": "image",
|
|
2156
2502
|
"mediaSubtype": "webp",
|
|
2157
|
-
"
|
|
2158
|
-
"
|
|
2159
|
-
"
|
|
2160
|
-
"
|
|
2161
|
-
"
|
|
2162
|
-
"
|
|
2163
|
-
"
|
|
2503
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
2504
|
+
"tags": ["Video"],
|
|
2505
|
+
"models": ["Wan2.1", "Wan"],
|
|
2506
|
+
"date": "2025-04-15",
|
|
2507
|
+
"size": 11489037517,
|
|
2508
|
+
"vram": 11489037517,
|
|
2509
|
+
"usage": 22
|
|
2164
2510
|
},
|
|
2165
2511
|
{
|
|
2166
|
-
"name": "
|
|
2167
|
-
"title": "
|
|
2168
|
-
"description": "Generate high-quality videos
|
|
2512
|
+
"name": "video_wan2.1_fun_camera_v1.1_14B",
|
|
2513
|
+
"title": "Wan 2.1 Fun Camera 14B",
|
|
2514
|
+
"description": "Generate high-quality videos with advanced camera control using the full 14B model",
|
|
2169
2515
|
"mediaType": "image",
|
|
2170
2516
|
"mediaSubtype": "webp",
|
|
2171
|
-
"
|
|
2172
|
-
"
|
|
2173
|
-
"
|
|
2174
|
-
"
|
|
2175
|
-
"
|
|
2176
|
-
"
|
|
2177
|
-
"
|
|
2517
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
2518
|
+
"tags": ["Video"],
|
|
2519
|
+
"models": ["Wan2.1", "Wan"],
|
|
2520
|
+
"date": "2025-04-15",
|
|
2521
|
+
"size": 42047729828,
|
|
2522
|
+
"vram": 42047729828,
|
|
2523
|
+
"usage": 48
|
|
2178
2524
|
},
|
|
2179
2525
|
{
|
|
2180
|
-
"name": "
|
|
2181
|
-
"title": "
|
|
2182
|
-
"description": "Generate
|
|
2526
|
+
"name": "text_to_video_wan",
|
|
2527
|
+
"title": "Wan 2.1 Text to Video",
|
|
2528
|
+
"description": "Generate videos from text prompts using Wan 2.1.",
|
|
2183
2529
|
"mediaType": "image",
|
|
2184
2530
|
"mediaSubtype": "webp",
|
|
2185
|
-
"
|
|
2186
|
-
"
|
|
2531
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
2532
|
+
"tags": ["Text to Video", "Video"],
|
|
2533
|
+
"models": ["Wan2.1", "Wan"],
|
|
2187
2534
|
"date": "2025-03-01",
|
|
2188
|
-
"
|
|
2189
|
-
"
|
|
2190
|
-
"
|
|
2191
|
-
"vram": 0
|
|
2535
|
+
"size": 9824737690,
|
|
2536
|
+
"vram": 9824737690,
|
|
2537
|
+
"usage": 119
|
|
2192
2538
|
},
|
|
2193
2539
|
{
|
|
2194
|
-
"name": "
|
|
2195
|
-
"title": "
|
|
2196
|
-
"description": "Generate
|
|
2540
|
+
"name": "image_to_video_wan",
|
|
2541
|
+
"title": "Wan 2.1 Image to Video",
|
|
2542
|
+
"description": "Generate videos from images using Wan 2.1.",
|
|
2197
2543
|
"mediaType": "image",
|
|
2198
2544
|
"mediaSubtype": "webp",
|
|
2199
|
-
"
|
|
2200
|
-
"
|
|
2545
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
2546
|
+
"tags": ["Text to Video", "Video"],
|
|
2547
|
+
"models": ["Wan2.1", "Wan"],
|
|
2201
2548
|
"date": "2025-03-01",
|
|
2202
|
-
"
|
|
2203
|
-
"
|
|
2204
|
-
"
|
|
2205
|
-
"vram": 0
|
|
2549
|
+
"size": 41049149932,
|
|
2550
|
+
"vram": 41049149932,
|
|
2551
|
+
"usage": 143
|
|
2206
2552
|
},
|
|
2207
2553
|
{
|
|
2208
|
-
"name": "
|
|
2209
|
-
"title": "
|
|
2210
|
-
"description": "Generate
|
|
2554
|
+
"name": "wan2.1_fun_inp",
|
|
2555
|
+
"title": "Wan 2.1 Inpainting",
|
|
2556
|
+
"description": "Generate videos from start and end frames using Wan 2.1 inpainting.",
|
|
2211
2557
|
"mediaType": "image",
|
|
2212
2558
|
"mediaSubtype": "webp",
|
|
2213
|
-
"
|
|
2214
|
-
"
|
|
2215
|
-
"
|
|
2216
|
-
"
|
|
2217
|
-
"
|
|
2218
|
-
"
|
|
2219
|
-
"
|
|
2559
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
|
|
2560
|
+
"tags": ["Inpainting", "Video"],
|
|
2561
|
+
"models": ["Wan2.1", "Wan"],
|
|
2562
|
+
"date": "2025-04-15",
|
|
2563
|
+
"size": 11381663334,
|
|
2564
|
+
"vram": 11381663334,
|
|
2565
|
+
"usage": 13
|
|
2220
2566
|
},
|
|
2221
2567
|
{
|
|
2222
|
-
"name": "
|
|
2223
|
-
"title": "
|
|
2224
|
-
"description": "Generate
|
|
2568
|
+
"name": "wan2.1_fun_control",
|
|
2569
|
+
"title": "Wan 2.1 ControlNet",
|
|
2570
|
+
"description": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
|
|
2225
2571
|
"mediaType": "image",
|
|
2226
2572
|
"mediaSubtype": "webp",
|
|
2227
|
-
"
|
|
2228
|
-
"
|
|
2573
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2574
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
2575
|
+
"tags": ["Video to Video", "Video"],
|
|
2576
|
+
"models": ["Wan2.1", "Wan"],
|
|
2577
|
+
"date": "2025-04-15",
|
|
2578
|
+
"size": 11381663334,
|
|
2579
|
+
"vram": 11381663334,
|
|
2580
|
+
"usage": 115
|
|
2581
|
+
},
|
|
2582
|
+
{
|
|
2583
|
+
"name": "wan2.1_flf2v_720_f16",
|
|
2584
|
+
"title": "Wan 2.1 FLF2V 720p F16",
|
|
2585
|
+
"description": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
|
|
2586
|
+
"mediaType": "image",
|
|
2587
|
+
"mediaSubtype": "webp",
|
|
2588
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
|
|
2589
|
+
"tags": ["FLF2V", "Video"],
|
|
2590
|
+
"models": ["Wan2.1", "Wan"],
|
|
2591
|
+
"date": "2025-04-15",
|
|
2592
|
+
"size": 41049149932,
|
|
2593
|
+
"vram": 41049149932,
|
|
2594
|
+
"usage": 43
|
|
2595
|
+
},
|
|
2596
|
+
{
|
|
2597
|
+
"name": "ltxv_text_to_video",
|
|
2598
|
+
"title": "LTXV Text to Video",
|
|
2599
|
+
"mediaType": "image",
|
|
2600
|
+
"mediaSubtype": "webp",
|
|
2601
|
+
"description": "Generate videos from text prompts.",
|
|
2602
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
2603
|
+
"tags": ["Text to Video", "Video"],
|
|
2604
|
+
"models": ["LTXV"],
|
|
2229
2605
|
"date": "2025-03-01",
|
|
2230
|
-
"
|
|
2231
|
-
"
|
|
2232
|
-
"
|
|
2233
|
-
"vram": 0
|
|
2606
|
+
"size": 19155554140,
|
|
2607
|
+
"vram": 19155554140,
|
|
2608
|
+
"usage": 68
|
|
2234
2609
|
},
|
|
2235
2610
|
{
|
|
2236
|
-
"name": "
|
|
2237
|
-
"title": "
|
|
2238
|
-
"description": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
|
|
2611
|
+
"name": "ltxv_image_to_video",
|
|
2612
|
+
"title": "LTXV Image to Video",
|
|
2239
2613
|
"mediaType": "image",
|
|
2240
2614
|
"mediaSubtype": "webp",
|
|
2241
|
-
"
|
|
2242
|
-
"
|
|
2615
|
+
"description": "Generate videos from still images.",
|
|
2616
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
2617
|
+
"tags": ["Image to Video", "Video"],
|
|
2618
|
+
"models": ["LTXV"],
|
|
2243
2619
|
"date": "2025-03-01",
|
|
2244
|
-
"
|
|
2245
|
-
"
|
|
2246
|
-
"
|
|
2247
|
-
"vram": 0
|
|
2620
|
+
"size": 19155554140,
|
|
2621
|
+
"vram": 19155554140,
|
|
2622
|
+
"usage": 108
|
|
2248
2623
|
},
|
|
2249
2624
|
{
|
|
2250
|
-
"name": "
|
|
2251
|
-
"title": "
|
|
2252
|
-
"description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
|
|
2625
|
+
"name": "hunyuan_video_text_to_video",
|
|
2626
|
+
"title": "Hunyuan Video Text to Video",
|
|
2253
2627
|
"mediaType": "image",
|
|
2254
2628
|
"mediaSubtype": "webp",
|
|
2255
|
-
"
|
|
2256
|
-
"
|
|
2629
|
+
"description": "Generate videos from text prompts using Hunyuan model.",
|
|
2630
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
|
|
2631
|
+
"tags": ["Text to Video", "Video"],
|
|
2632
|
+
"models": ["Hunyuan Video", "Tencent"],
|
|
2257
2633
|
"date": "2025-03-01",
|
|
2258
|
-
"
|
|
2259
|
-
"
|
|
2260
|
-
"
|
|
2261
|
-
"vram": 0
|
|
2634
|
+
"size": 35476429865,
|
|
2635
|
+
"vram": 35476429865,
|
|
2636
|
+
"usage": 52
|
|
2262
2637
|
},
|
|
2263
2638
|
{
|
|
2264
|
-
"name": "
|
|
2265
|
-
"title": "
|
|
2266
|
-
"description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
|
|
2639
|
+
"name": "txt_to_image_to_video",
|
|
2640
|
+
"title": "SVD Text to Image to Video",
|
|
2267
2641
|
"mediaType": "image",
|
|
2268
2642
|
"mediaSubtype": "webp",
|
|
2269
|
-
"
|
|
2270
|
-
"
|
|
2643
|
+
"description": "Generate videos by first creating images from text prompts.",
|
|
2644
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
2645
|
+
"tags": ["Text to Video", "Video"],
|
|
2646
|
+
"models": ["SVD", "Stability"],
|
|
2647
|
+
"date": "2025-03-01",
|
|
2648
|
+
"size": 16492674417,
|
|
2649
|
+
"vram": 16492674417,
|
|
2650
|
+
"usage": 14
|
|
2651
|
+
}
|
|
2652
|
+
]
|
|
2653
|
+
},
|
|
2654
|
+
{
|
|
2655
|
+
"moduleName": "default",
|
|
2656
|
+
"category": "GENERATION TYPE",
|
|
2657
|
+
"icon": "icon-[lucide--volume-2]",
|
|
2658
|
+
"title": "Audio",
|
|
2659
|
+
"type": "audio",
|
|
2660
|
+
"templates": [
|
|
2661
|
+
{
|
|
2662
|
+
"name": "api_stability_ai_text_to_audio",
|
|
2663
|
+
"title": "Stability AI: Text to Audio",
|
|
2664
|
+
"description": "Generate music from text using Stable Audio 2.5. Create minutes-long tracks in seconds.",
|
|
2665
|
+
"mediaType": "audio",
|
|
2666
|
+
"mediaSubtype": "mp3",
|
|
2667
|
+
"tags": ["Text to Audio", "Audio", "API"],
|
|
2668
|
+
"date": "2025-09-09",
|
|
2669
|
+
"models": ["Stability", "Stable Audio"],
|
|
2670
|
+
"openSource": false,
|
|
2671
|
+
"size": 0,
|
|
2672
|
+
"vram": 0,
|
|
2673
|
+
"usage": 119
|
|
2674
|
+
},
|
|
2675
|
+
{
|
|
2676
|
+
"name": "api_stability_ai_audio_to_audio",
|
|
2677
|
+
"title": "Stability AI: Audio to Audio",
|
|
2678
|
+
"description": "Transform audio into new compositions using Stable Audio 2.5. Upload audio and AI creates complete tracks.",
|
|
2679
|
+
"mediaType": "audio",
|
|
2680
|
+
"mediaSubtype": "mp3",
|
|
2681
|
+
"tags": ["Audio to Audio", "Audio", "API"],
|
|
2682
|
+
"date": "2025-09-09",
|
|
2683
|
+
"models": ["Stability", "Stable Audio"],
|
|
2684
|
+
"openSource": false,
|
|
2685
|
+
"size": 0,
|
|
2686
|
+
"vram": 0,
|
|
2687
|
+
"usage": 67
|
|
2688
|
+
},
|
|
2689
|
+
{
|
|
2690
|
+
"name": "api_stability_ai_audio_inpaint",
|
|
2691
|
+
"title": "Stability AI: Audio Inpainting",
|
|
2692
|
+
"description": "Complete or extend audio tracks using Stable Audio 2.5. Upload audio and AI generates the rest.",
|
|
2693
|
+
"mediaType": "audio",
|
|
2694
|
+
"mediaSubtype": "mp3",
|
|
2695
|
+
"tags": ["Audio to Audio", "Audio", "API"],
|
|
2696
|
+
"date": "2025-09-09",
|
|
2697
|
+
"models": ["Stability", "Stable Audio"],
|
|
2698
|
+
"openSource": false,
|
|
2699
|
+
"size": 0,
|
|
2700
|
+
"vram": 0,
|
|
2701
|
+
"usage": 17
|
|
2702
|
+
},
|
|
2703
|
+
{
|
|
2704
|
+
"name": "audio_stable_audio_example",
|
|
2705
|
+
"title": "Stable Audio",
|
|
2706
|
+
"mediaType": "audio",
|
|
2707
|
+
"mediaSubtype": "mp3",
|
|
2708
|
+
"description": "Generate audio from text prompts using Stable Audio.",
|
|
2709
|
+
"tags": ["Text to Audio", "Audio"],
|
|
2710
|
+
"models": ["Stable Audio", "Stability"],
|
|
2711
|
+
"date": "2025-03-01",
|
|
2712
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/",
|
|
2713
|
+
"size": 5744518758,
|
|
2714
|
+
"vram": 5744518758,
|
|
2715
|
+
"usage": 270
|
|
2716
|
+
},
|
|
2717
|
+
{
|
|
2718
|
+
"name": "audio_ace_step_1_t2a_instrumentals",
|
|
2719
|
+
"title": "ACE-Step v1 Text to Instrumentals Music",
|
|
2720
|
+
"mediaType": "audio",
|
|
2721
|
+
"mediaSubtype": "mp3",
|
|
2722
|
+
"description": "Generate instrumental music from text prompts using ACE-Step v1.",
|
|
2723
|
+
"tags": ["Text to Audio", "Audio"],
|
|
2724
|
+
"models": ["ACE-Step"],
|
|
2725
|
+
"date": "2025-03-01",
|
|
2726
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
2727
|
+
"size": 7698728878,
|
|
2728
|
+
"vram": 7698728878,
|
|
2729
|
+
"usage": 139
|
|
2730
|
+
},
|
|
2731
|
+
{
|
|
2732
|
+
"name": "audio_ace_step_1_t2a_song",
|
|
2733
|
+
"title": "ACE Step v1 Text to Song",
|
|
2734
|
+
"mediaType": "audio",
|
|
2735
|
+
"mediaSubtype": "mp3",
|
|
2736
|
+
"description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
|
|
2737
|
+
"tags": ["Text to Audio", "Audio"],
|
|
2738
|
+
"models": ["ACE-Step"],
|
|
2271
2739
|
"date": "2025-03-01",
|
|
2272
|
-
"tutorialUrl": "",
|
|
2273
|
-
"
|
|
2274
|
-
"
|
|
2275
|
-
"
|
|
2740
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
2741
|
+
"size": 7698728878,
|
|
2742
|
+
"vram": 7698728878,
|
|
2743
|
+
"usage": 123
|
|
2276
2744
|
},
|
|
2277
2745
|
{
|
|
2278
|
-
"name": "
|
|
2279
|
-
"title": "
|
|
2280
|
-
"
|
|
2746
|
+
"name": "audio_ace_step_1_m2m_editing",
|
|
2747
|
+
"title": "ACE Step v1 M2M Editing",
|
|
2748
|
+
"mediaType": "audio",
|
|
2749
|
+
"mediaSubtype": "mp3",
|
|
2750
|
+
"description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
|
|
2751
|
+
"tags": ["Audio Editing", "Audio"],
|
|
2752
|
+
"models": ["ACE-Step"],
|
|
2753
|
+
"date": "2025-03-01",
|
|
2754
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
2755
|
+
"size": 7698728878,
|
|
2756
|
+
"vram": 7698728878,
|
|
2757
|
+
"usage": 138
|
|
2758
|
+
}
|
|
2759
|
+
]
|
|
2760
|
+
},
|
|
2761
|
+
{
|
|
2762
|
+
"moduleName": "default",
|
|
2763
|
+
"category": "GENERATION TYPE",
|
|
2764
|
+
"icon": "icon-[lucide--box]",
|
|
2765
|
+
"title": "3D Model",
|
|
2766
|
+
"type": "3d",
|
|
2767
|
+
"templates": [
|
|
2768
|
+
{
|
|
2769
|
+
"name": "api_tripo3_0_image_to_model",
|
|
2770
|
+
"title": "Tripo3.0: Image to Model",
|
|
2771
|
+
"description": "Transform images or sketches into 3D models with Tripo 3.0's sharp geometry and production-ready PBR textures.",
|
|
2281
2772
|
"mediaType": "image",
|
|
2282
2773
|
"mediaSubtype": "webp",
|
|
2283
|
-
"tags": ["
|
|
2284
|
-
"models": ["
|
|
2285
|
-
"date": "2025-
|
|
2286
|
-
"
|
|
2287
|
-
"OpenSource": false,
|
|
2774
|
+
"tags": ["Image to Model", "3D", "API"],
|
|
2775
|
+
"models": ["Tripo"],
|
|
2776
|
+
"date": "2025-12-23",
|
|
2777
|
+
"openSource": false,
|
|
2288
2778
|
"size": 0,
|
|
2289
2779
|
"vram": 0
|
|
2290
2780
|
},
|
|
2291
2781
|
{
|
|
2292
|
-
"name": "
|
|
2293
|
-
"title": "
|
|
2294
|
-
"description": "Generate
|
|
2782
|
+
"name": "api_tripo3_0_text_to_model",
|
|
2783
|
+
"title": "Tripo3.0: Text to Model",
|
|
2784
|
+
"description": "Generate precise 3D models from text with Tripo 3.0's ultra-high resolution geometry and realistic PBR materials.",
|
|
2295
2785
|
"mediaType": "image",
|
|
2296
2786
|
"mediaSubtype": "webp",
|
|
2297
|
-
"tags": ["
|
|
2298
|
-
"models": ["
|
|
2299
|
-
"date": "2025-
|
|
2300
|
-
"
|
|
2301
|
-
"OpenSource": false,
|
|
2787
|
+
"tags": ["Text to Model", "3D", "API"],
|
|
2788
|
+
"models": ["Tripo"],
|
|
2789
|
+
"date": "2025-12-23",
|
|
2790
|
+
"openSource": false,
|
|
2302
2791
|
"size": 0,
|
|
2303
2792
|
"vram": 0
|
|
2304
2793
|
},
|
|
2305
2794
|
{
|
|
2306
|
-
"name": "
|
|
2307
|
-
"title": "
|
|
2308
|
-
"description": "
|
|
2795
|
+
"name": "api_tripo_text_to_model",
|
|
2796
|
+
"title": "Tripo: Text to Model",
|
|
2797
|
+
"description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
|
|
2309
2798
|
"mediaType": "image",
|
|
2310
2799
|
"mediaSubtype": "webp",
|
|
2311
|
-
"tags": ["
|
|
2312
|
-
"models": ["
|
|
2800
|
+
"tags": ["Text to Model", "3D", "API"],
|
|
2801
|
+
"models": ["Tripo"],
|
|
2313
2802
|
"date": "2025-03-01",
|
|
2314
2803
|
"tutorialUrl": "",
|
|
2315
|
-
"
|
|
2804
|
+
"openSource": false,
|
|
2316
2805
|
"size": 0,
|
|
2317
|
-
"vram": 0
|
|
2806
|
+
"vram": 0,
|
|
2807
|
+
"usage": 48
|
|
2318
2808
|
},
|
|
2319
2809
|
{
|
|
2320
|
-
"name": "
|
|
2321
|
-
"title": "
|
|
2322
|
-
"description": "Generate
|
|
2810
|
+
"name": "api_tripo_image_to_model",
|
|
2811
|
+
"title": "Tripo: Image to Model",
|
|
2812
|
+
"description": "Generate professional 3D assets from 2D images using Tripo engine.",
|
|
2323
2813
|
"mediaType": "image",
|
|
2324
2814
|
"mediaSubtype": "webp",
|
|
2325
|
-
"tags": ["Image to
|
|
2326
|
-
"models": ["
|
|
2815
|
+
"tags": ["Image to 3D", "3D", "API"],
|
|
2816
|
+
"models": ["Tripo"],
|
|
2327
2817
|
"date": "2025-03-01",
|
|
2328
2818
|
"tutorialUrl": "",
|
|
2329
|
-
"
|
|
2819
|
+
"openSource": false,
|
|
2330
2820
|
"size": 0,
|
|
2331
|
-
"vram": 0
|
|
2821
|
+
"vram": 0,
|
|
2822
|
+
"usage": 50
|
|
2332
2823
|
},
|
|
2333
2824
|
{
|
|
2334
|
-
"name": "
|
|
2335
|
-
"title": "
|
|
2336
|
-
"description": "
|
|
2825
|
+
"name": "api_tripo_multiview_to_model",
|
|
2826
|
+
"title": "Tripo: Multiview to Model",
|
|
2827
|
+
"description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
|
|
2337
2828
|
"mediaType": "image",
|
|
2338
2829
|
"mediaSubtype": "webp",
|
|
2339
|
-
"tags": ["Image to
|
|
2340
|
-
"models": ["
|
|
2830
|
+
"tags": ["Image to 3D", "3D", "API"],
|
|
2831
|
+
"models": ["Tripo"],
|
|
2341
2832
|
"date": "2025-03-01",
|
|
2342
2833
|
"tutorialUrl": "",
|
|
2343
|
-
"
|
|
2834
|
+
"openSource": false,
|
|
2344
2835
|
"size": 0,
|
|
2345
|
-
"vram": 0
|
|
2346
|
-
|
|
2347
|
-
|
|
2348
|
-
},
|
|
2349
|
-
{
|
|
2350
|
-
"moduleName": "default",
|
|
2351
|
-
"category": "CLOSED SOURCE MODELS",
|
|
2352
|
-
"title": "3D API",
|
|
2353
|
-
"icon": "icon-[lucide--box]",
|
|
2354
|
-
"type": "image",
|
|
2355
|
-
"templates": [
|
|
2836
|
+
"vram": 0,
|
|
2837
|
+
"usage": 70
|
|
2838
|
+
},
|
|
2356
2839
|
{
|
|
2357
2840
|
"name": "api_rodin_gen2",
|
|
2358
2841
|
"title": "Rodin: Gen-2 Image to Model",
|
|
@@ -2363,9 +2846,10 @@
|
|
|
2363
2846
|
"models": ["Rodin"],
|
|
2364
2847
|
"date": "2025-09-27",
|
|
2365
2848
|
"tutorialUrl": "",
|
|
2366
|
-
"
|
|
2849
|
+
"openSource": false,
|
|
2367
2850
|
"size": 0,
|
|
2368
|
-
"vram": 0
|
|
2851
|
+
"vram": 0,
|
|
2852
|
+
"usage": 355
|
|
2369
2853
|
},
|
|
2370
2854
|
{
|
|
2371
2855
|
"name": "api_rodin_image_to_model",
|
|
@@ -2377,9 +2861,10 @@
|
|
|
2377
2861
|
"models": ["Rodin"],
|
|
2378
2862
|
"date": "2025-03-01",
|
|
2379
2863
|
"tutorialUrl": "",
|
|
2380
|
-
"
|
|
2864
|
+
"openSource": false,
|
|
2381
2865
|
"size": 0,
|
|
2382
|
-
"vram": 0
|
|
2866
|
+
"vram": 0,
|
|
2867
|
+
"usage": 25
|
|
2383
2868
|
},
|
|
2384
2869
|
{
|
|
2385
2870
|
"name": "api_rodin_multiview_to_model",
|
|
@@ -2391,106 +2876,75 @@
|
|
|
2391
2876
|
"models": ["Rodin"],
|
|
2392
2877
|
"date": "2025-03-01",
|
|
2393
2878
|
"tutorialUrl": "",
|
|
2394
|
-
"
|
|
2879
|
+
"openSource": false,
|
|
2395
2880
|
"size": 0,
|
|
2396
|
-
"vram": 0
|
|
2881
|
+
"vram": 0,
|
|
2882
|
+
"usage": 47
|
|
2397
2883
|
},
|
|
2398
2884
|
{
|
|
2399
|
-
"name": "
|
|
2400
|
-
"title": "
|
|
2401
|
-
"description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
|
|
2885
|
+
"name": "3d_hunyuan3d-v2.1",
|
|
2886
|
+
"title": "Hunyuan3D 2.1",
|
|
2402
2887
|
"mediaType": "image",
|
|
2403
2888
|
"mediaSubtype": "webp",
|
|
2404
|
-
"
|
|
2405
|
-
"
|
|
2889
|
+
"description": "Generate 3D models from single images using Hunyuan3D 2.1.",
|
|
2890
|
+
"tags": ["Image to 3D", "3D"],
|
|
2891
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
2406
2892
|
"date": "2025-03-01",
|
|
2407
2893
|
"tutorialUrl": "",
|
|
2408
|
-
"
|
|
2409
|
-
"
|
|
2410
|
-
"
|
|
2894
|
+
"size": 4928474972,
|
|
2895
|
+
"vram": 4928474972,
|
|
2896
|
+
"usage": 384
|
|
2411
2897
|
},
|
|
2412
2898
|
{
|
|
2413
|
-
"name": "
|
|
2414
|
-
"title": "
|
|
2415
|
-
"description": "Generate professional 3D assets from 2D images using Tripo engine.",
|
|
2899
|
+
"name": "3d_hunyuan3d_image_to_model",
|
|
2900
|
+
"title": "Hunyuan3D 2.0",
|
|
2416
2901
|
"mediaType": "image",
|
|
2417
2902
|
"mediaSubtype": "webp",
|
|
2418
|
-
"
|
|
2419
|
-
"
|
|
2903
|
+
"description": "Generate 3D models from single images using Hunyuan3D 2.0.",
|
|
2904
|
+
"tags": ["Image to 3D", "3D"],
|
|
2905
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
2420
2906
|
"date": "2025-03-01",
|
|
2421
2907
|
"tutorialUrl": "",
|
|
2422
|
-
"
|
|
2423
|
-
"
|
|
2424
|
-
"
|
|
2908
|
+
"size": 4928474972,
|
|
2909
|
+
"vram": 4928474972,
|
|
2910
|
+
"usage": 69
|
|
2425
2911
|
},
|
|
2426
2912
|
{
|
|
2427
|
-
"name": "
|
|
2428
|
-
"title": "
|
|
2429
|
-
"description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
|
|
2913
|
+
"name": "3d_hunyuan3d_multiview_to_model",
|
|
2914
|
+
"title": "Hunyuan3D 2.0 MV",
|
|
2430
2915
|
"mediaType": "image",
|
|
2431
2916
|
"mediaSubtype": "webp",
|
|
2432
|
-
"
|
|
2433
|
-
"
|
|
2917
|
+
"description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
|
|
2918
|
+
"tags": ["3D", "Image to 3D"],
|
|
2919
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
2434
2920
|
"date": "2025-03-01",
|
|
2435
2921
|
"tutorialUrl": "",
|
|
2436
|
-
"
|
|
2437
|
-
"size":
|
|
2438
|
-
"vram":
|
|
2439
|
-
|
|
2440
|
-
]
|
|
2441
|
-
},
|
|
2442
|
-
{
|
|
2443
|
-
"moduleName": "default",
|
|
2444
|
-
"category": "CLOSED SOURCE MODELS",
|
|
2445
|
-
"title": "Audio API",
|
|
2446
|
-
"type": "audio",
|
|
2447
|
-
"icon": "icon-[lucide--volume-2]",
|
|
2448
|
-
"templates": [
|
|
2449
|
-
{
|
|
2450
|
-
"name": "api_stability_ai_text_to_audio",
|
|
2451
|
-
"title": "Stability AI: Text to Audio",
|
|
2452
|
-
"description": "Generate music from text using Stable Audio 2.5. Create minutes-long tracks in seconds.",
|
|
2453
|
-
"mediaType": "audio",
|
|
2454
|
-
"mediaSubtype": "mp3",
|
|
2455
|
-
"tags": ["Text to Audio", "Audio", "API"],
|
|
2456
|
-
"date": "2025-09-09",
|
|
2457
|
-
"models": ["Stability", "Stable Audio"],
|
|
2458
|
-
"OpenSource": false,
|
|
2459
|
-
"size": 0,
|
|
2460
|
-
"vram": 0
|
|
2461
|
-
},
|
|
2462
|
-
{
|
|
2463
|
-
"name": "api_stability_ai_audio_to_audio",
|
|
2464
|
-
"title": "Stability AI: Audio to Audio",
|
|
2465
|
-
"description": "Transform audio into new compositions using Stable Audio 2.5. Upload audio and AI creates complete tracks.",
|
|
2466
|
-
"mediaType": "audio",
|
|
2467
|
-
"mediaSubtype": "mp3",
|
|
2468
|
-
"tags": ["Audio to Audio", "Audio", "API"],
|
|
2469
|
-
"date": "2025-09-09",
|
|
2470
|
-
"models": ["Stability", "Stable Audio"],
|
|
2471
|
-
"OpenSource": false,
|
|
2472
|
-
"size": 0,
|
|
2473
|
-
"vram": 0
|
|
2922
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2923
|
+
"size": 4928474972,
|
|
2924
|
+
"vram": 4928474972,
|
|
2925
|
+
"usage": 97
|
|
2474
2926
|
},
|
|
2475
2927
|
{
|
|
2476
|
-
"name": "
|
|
2477
|
-
"title": "
|
|
2478
|
-
"
|
|
2479
|
-
"
|
|
2480
|
-
"
|
|
2481
|
-
"tags": ["
|
|
2482
|
-
"
|
|
2483
|
-
"
|
|
2484
|
-
"
|
|
2485
|
-
"
|
|
2486
|
-
"
|
|
2928
|
+
"name": "3d_hunyuan3d_multiview_to_model_turbo",
|
|
2929
|
+
"title": "Hunyuan3D 2.0 MV Turbo",
|
|
2930
|
+
"mediaType": "image",
|
|
2931
|
+
"mediaSubtype": "webp",
|
|
2932
|
+
"description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
|
|
2933
|
+
"tags": ["Image to 3D", "3D"],
|
|
2934
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
2935
|
+
"date": "2025-03-01",
|
|
2936
|
+
"tutorialUrl": "",
|
|
2937
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2938
|
+
"size": 4928474972,
|
|
2939
|
+
"vram": 4928474972,
|
|
2940
|
+
"usage": 38
|
|
2487
2941
|
}
|
|
2488
2942
|
]
|
|
2489
2943
|
},
|
|
2490
2944
|
{
|
|
2491
2945
|
"moduleName": "default",
|
|
2492
|
-
"category": "
|
|
2493
|
-
"title": "LLM
|
|
2946
|
+
"category": "GENERATION TYPE",
|
|
2947
|
+
"title": "LLM",
|
|
2494
2948
|
"icon": "icon-[lucide--message-square-text]",
|
|
2495
2949
|
"type": "image",
|
|
2496
2950
|
"templates": [
|
|
@@ -2504,9 +2958,10 @@
|
|
|
2504
2958
|
"models": ["OpenAI"],
|
|
2505
2959
|
"date": "2025-03-01",
|
|
2506
2960
|
"tutorialUrl": "",
|
|
2507
|
-
"
|
|
2961
|
+
"openSource": false,
|
|
2508
2962
|
"size": 0,
|
|
2509
|
-
"vram": 0
|
|
2963
|
+
"vram": 0,
|
|
2964
|
+
"usage": 35
|
|
2510
2965
|
},
|
|
2511
2966
|
{
|
|
2512
2967
|
"name": "api_google_gemini",
|
|
@@ -2518,10 +2973,145 @@
|
|
|
2518
2973
|
"models": ["Google Gemini", "Google"],
|
|
2519
2974
|
"date": "2025-03-01",
|
|
2520
2975
|
"tutorialUrl": "",
|
|
2521
|
-
"
|
|
2976
|
+
"openSource": false,
|
|
2977
|
+
"size": 0,
|
|
2978
|
+
"vram": 0,
|
|
2979
|
+
"usage": 130
|
|
2980
|
+
}
|
|
2981
|
+
]
|
|
2982
|
+
},
|
|
2983
|
+
{
|
|
2984
|
+
"moduleName": "default",
|
|
2985
|
+
"isEssential": true,
|
|
2986
|
+
"title": "Getting Started",
|
|
2987
|
+
"type": "image",
|
|
2988
|
+
"templates": [
|
|
2989
|
+
{
|
|
2990
|
+
"name": "gsc_starter_1",
|
|
2991
|
+
"title": "Starter 1 – Text to Image",
|
|
2992
|
+
"mediaType": "image",
|
|
2993
|
+
"mediaSubtype": "webp",
|
|
2994
|
+
"description": "Learn how to generate an image, connect nodes, run a workflow and download an image using Z-Image Turbo.",
|
|
2995
|
+
"models": ["Z-Image-Turbo"],
|
|
2996
|
+
"date": "2025-12-10",
|
|
2997
|
+
"searchRank": 3,
|
|
2998
|
+
"includeOnDistributions": ["cloud"],
|
|
2522
2999
|
"size": 0,
|
|
2523
3000
|
"vram": 0
|
|
3001
|
+
},
|
|
3002
|
+
{
|
|
3003
|
+
"name": "gsc_starter_2",
|
|
3004
|
+
"title": "Starter 2 – Image to Video",
|
|
3005
|
+
"mediaType": "image",
|
|
3006
|
+
"mediaSubtype": "webp",
|
|
3007
|
+
"description": "Learn how to load images, generate a video and how to find a node using Wan 2.2.",
|
|
3008
|
+
"models": ["Wan2.2", "Wan"],
|
|
3009
|
+
"date": "2025-12-10",
|
|
3010
|
+
"searchRank": 3,
|
|
3011
|
+
"includeOnDistributions": ["cloud"],
|
|
3012
|
+
"size": 0,
|
|
3013
|
+
"vram": 0,
|
|
3014
|
+
"requiresCustomNodes": ["comfyui_essentials"]
|
|
3015
|
+
},
|
|
3016
|
+
{
|
|
3017
|
+
"name": "gsc_starter_3",
|
|
3018
|
+
"title": "Starter 3 – Product Photography",
|
|
3019
|
+
"mediaType": "image",
|
|
3020
|
+
"mediaSubtype": "webp",
|
|
3021
|
+
"description": "Learn how to create a product photography with image inputs, enter a subgraph, unbypass a node and get to know partner nodes using Nano Banana Pro.",
|
|
3022
|
+
"models": ["Nano Banana Pro", "Google"],
|
|
3023
|
+
"date": "2025-12-10",
|
|
3024
|
+
"searchRank": 3,
|
|
3025
|
+
"includeOnDistributions": ["cloud"],
|
|
3026
|
+
"size": 0,
|
|
3027
|
+
"vram": 0,
|
|
3028
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"]
|
|
3029
|
+
},
|
|
3030
|
+
{
|
|
3031
|
+
"name": "01_get_started_text_to_image",
|
|
3032
|
+
"title": "Text to Image (New)",
|
|
3033
|
+
"mediaType": "image",
|
|
3034
|
+
"mediaSubtype": "webp",
|
|
3035
|
+
"description": "Generate images from text prompts using the Z-Image-Turbo model.",
|
|
3036
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/z-image/z-image-turbo",
|
|
3037
|
+
"tags": ["Text to Image", "Image"],
|
|
3038
|
+
"models": ["Z-Image-Turbo"],
|
|
3039
|
+
"date": "2025-10-17",
|
|
3040
|
+
"size": 20862803640,
|
|
3041
|
+
"vram": 20862803640,
|
|
3042
|
+
"usage": 299
|
|
3043
|
+
},
|
|
3044
|
+
{
|
|
3045
|
+
"name": "02_qwen_Image_edit_subgraphed",
|
|
3046
|
+
"title": "Image Editing (New)",
|
|
3047
|
+
"mediaType": "image",
|
|
3048
|
+
"mediaSubtype": "webp",
|
|
3049
|
+
"description": "Edit your images with Qwen-Image-Edit, the latest OSS model",
|
|
3050
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
3051
|
+
"tags": ["Image to Image", "Image Edit", "ControlNet"],
|
|
3052
|
+
"models": ["Qwen-Image-Edit"],
|
|
3053
|
+
"date": "2025-10-17",
|
|
3054
|
+
"size": 31772020572,
|
|
3055
|
+
"vram": 31772020572,
|
|
3056
|
+
"usage": 6436
|
|
3057
|
+
},
|
|
3058
|
+
{
|
|
3059
|
+
"name": "03_video_wan2_2_14B_i2v_subgraphed",
|
|
3060
|
+
"title": "Image to Video (New)",
|
|
3061
|
+
"description": "Generate videos from an input image using Wan2.2 14B",
|
|
3062
|
+
"mediaType": "image",
|
|
3063
|
+
"mediaSubtype": "webp",
|
|
3064
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
3065
|
+
"tags": ["Image to Video", "Video"],
|
|
3066
|
+
"models": ["Wan2.2", "Wan"],
|
|
3067
|
+
"date": "2025-10-17",
|
|
3068
|
+
"size": 38031935406,
|
|
3069
|
+
"vram": 38031935406,
|
|
3070
|
+
"usage": 4084
|
|
3071
|
+
},
|
|
3072
|
+
{
|
|
3073
|
+
"name": "04_hunyuan_3d_2.1_subgraphed",
|
|
3074
|
+
"title": "Image to 3D (New)",
|
|
3075
|
+
"mediaType": "image",
|
|
3076
|
+
"mediaSubtype": "webp",
|
|
3077
|
+
"description": "Generate 3D models from single images using Hunyuan3D 2.1.",
|
|
3078
|
+
"tags": ["Image to 3D", "3D"],
|
|
3079
|
+
"models": ["Hunyuan3D"],
|
|
3080
|
+
"date": "2025-10-17",
|
|
3081
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/3d/hunyuan3D-2",
|
|
3082
|
+
"size": 4928474972,
|
|
3083
|
+
"vram": 4928474972,
|
|
3084
|
+
"usage": 152
|
|
3085
|
+
},
|
|
3086
|
+
{
|
|
3087
|
+
"name": "05_audio_ace_step_1_t2a_song_subgraphed",
|
|
3088
|
+
"title": "Text to Song (New)",
|
|
3089
|
+
"mediaType": "image",
|
|
3090
|
+
"mediaSubtype": "webp",
|
|
3091
|
+
"description": "Generate songs from text prompts using ACE-Step v1",
|
|
3092
|
+
"tags": ["Text to Audio", "Audio"],
|
|
3093
|
+
"models": ["ACE-Step"],
|
|
3094
|
+
"date": "2025-10-17",
|
|
3095
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
3096
|
+
"size": 7698728878,
|
|
3097
|
+
"vram": 7698728878,
|
|
3098
|
+
"usage": 101
|
|
3099
|
+
},
|
|
3100
|
+
{
|
|
3101
|
+
"name": "default",
|
|
3102
|
+
"title": "Image Generation",
|
|
3103
|
+
"mediaType": "image",
|
|
3104
|
+
"mediaSubtype": "webp",
|
|
3105
|
+
"description": "Generate images from text prompts.",
|
|
3106
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
|
|
3107
|
+
"tags": ["Text-to-Image", "Image"],
|
|
3108
|
+
"models": ["SD1.5", "Stability"],
|
|
3109
|
+
"date": "2025-03-01",
|
|
3110
|
+
"size": 2136746230,
|
|
3111
|
+
"vram": 3092376453,
|
|
3112
|
+
"status": "active",
|
|
3113
|
+
"usage": 168
|
|
2524
3114
|
}
|
|
2525
3115
|
]
|
|
2526
3116
|
}
|
|
2527
|
-
]
|
|
3117
|
+
]
|