comfyui-workflow-templates-media-other 0.3.10__py3-none-any.whl → 0.3.61__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comfyui_workflow_templates_media_other/templates/04_hunyuan_3d_2.1_subgraphed.json +6 -6
- comfyui_workflow_templates_media_other/templates/05_audio_ace_step_1_t2a_song_subgraphed.json +81 -60
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d-v2.1.json +2 -2
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model.json +3 -3
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo.json +3 -3
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_m2m_editing.json +3 -3
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_instrumentals.json +4 -4
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_song.json +3 -3
- comfyui_workflow_templates_media_other/templates/audio_stable_audio_example.json +2 -2
- comfyui_workflow_templates_media_other/templates/gsc_starter_1-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_1.json +839 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_2-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_2.json +7037 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_3-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_3.json +2550 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_full.json +3 -3
- comfyui_workflow_templates_media_other/templates/hidream_i1_dev.json +3 -3
- comfyui_workflow_templates_media_other/templates/hidream_i1_fast.json +3 -3
- comfyui_workflow_templates_media_other/templates/hidream_i1_full.json +3 -3
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo.json +756 -0
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo_fun_union_controlnet-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo_fun_union_controlnet-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/index.ar.json +2187 -1591
- comfyui_workflow_templates_media_other/templates/index.es.json +2189 -1598
- comfyui_workflow_templates_media_other/templates/index.fr.json +2188 -1597
- comfyui_workflow_templates_media_other/templates/index.ja.json +2179 -1588
- comfyui_workflow_templates_media_other/templates/index.json +2182 -1592
- comfyui_workflow_templates_media_other/templates/index.ko.json +2179 -1588
- comfyui_workflow_templates_media_other/templates/index.pt-BR.json +3117 -0
- comfyui_workflow_templates_media_other/templates/index.ru.json +2188 -1597
- comfyui_workflow_templates_media_other/templates/index.schema.json +36 -3
- comfyui_workflow_templates_media_other/templates/index.tr.json +2185 -1589
- comfyui_workflow_templates_media_other/templates/index.zh-TW.json +2188 -1597
- comfyui_workflow_templates_media_other/templates/index.zh.json +2180 -1589
- comfyui_workflow_templates_media_other/templates/sd3.5_large_blur.json +3 -3
- comfyui_workflow_templates_media_other/templates/sd3.5_large_depth.json +4 -4
- comfyui_workflow_templates_media_other/templates/sd3.5_simple_example.json +181 -40
- comfyui_workflow_templates_media_other/templates/templates-color_illustration-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/templates-color_illustration-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/templates-color_illustration.json +176 -0
- comfyui_workflow_templates_media_other/templates/templates-image_to_real-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/templates-image_to_real-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/templates-image_to_real.json +1195 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_flf2v_720_f16.json +2 -2
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_control.json +2 -2
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_inp.json +2 -2
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.61.dist-info}/METADATA +1 -1
- comfyui_workflow_templates_media_other-0.3.61.dist-info/RECORD +77 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship.json +0 -1256
- comfyui_workflow_templates_media_other/templates/ByteDance-Seedance_00003_.json +0 -210
- comfyui_workflow_templates_media_other/templates/area_composition-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/area_composition.json +0 -1626
- comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject.json +0 -1114
- comfyui_workflow_templates_media_other/templates/default-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/default.json +0 -547
- comfyui_workflow_templates_media_other/templates/embedding_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/embedding_example.json +0 -267
- comfyui_workflow_templates_media_other/templates/esrgan_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example.json +0 -635
- comfyui_workflow_templates_media_other/templates/gligen_textbox_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gligen_textbox_example.json +0 -686
- comfyui_workflow_templates_media_other/templates/hidream_e1_1-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1.json +0 -1133
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow.json +0 -1029
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow.json +0 -772
- comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model.json +0 -929
- comfyui_workflow_templates_media_other/templates/lora-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/lora.json +0 -615
- comfyui_workflow_templates_media_other/templates/lora_multiple-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/lora_multiple.json +0 -656
- comfyui_workflow_templates_media_other-0.3.10.dist-info/RECORD +0 -92
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.61.dist-info}/WHEEL +0 -0
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.61.dist-info}/top_level.txt +0 -0
|
@@ -2,337 +2,470 @@
|
|
|
2
2
|
{
|
|
3
3
|
"moduleName": "default",
|
|
4
4
|
"type": "image",
|
|
5
|
-
"
|
|
6
|
-
"
|
|
5
|
+
"category": "Type de génération",
|
|
6
|
+
"icon": "icon-[lucide--star]",
|
|
7
|
+
"title": "Cas d'utilisation",
|
|
7
8
|
"templates": [
|
|
8
9
|
{
|
|
9
|
-
"name": "
|
|
10
|
-
"title": "
|
|
10
|
+
"name": "templates-color_illustration",
|
|
11
|
+
"title": "Colorer un dessin au trait",
|
|
11
12
|
"mediaType": "image",
|
|
12
13
|
"mediaSubtype": "webp",
|
|
13
|
-
"
|
|
14
|
-
"
|
|
15
|
-
"tags": ["
|
|
16
|
-
"models": ["
|
|
17
|
-
"
|
|
18
|
-
"
|
|
14
|
+
"thumbnailVariant": "compareSlider",
|
|
15
|
+
"description": "Entrez un dessin au trait noir et blanc pour le coloriser.",
|
|
16
|
+
"tags": ["API"],
|
|
17
|
+
"models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
18
|
+
"openSource": false,
|
|
19
|
+
"date": "2025-12-20",
|
|
20
|
+
"size": 0,
|
|
21
|
+
"vram": 0,
|
|
22
|
+
"searchRank": 8
|
|
19
23
|
},
|
|
20
24
|
{
|
|
21
|
-
"name": "
|
|
22
|
-
"title": "
|
|
25
|
+
"name": "templates-image_to_real",
|
|
26
|
+
"title": "Illustration vers réalisme",
|
|
23
27
|
"mediaType": "image",
|
|
24
28
|
"mediaSubtype": "webp",
|
|
25
|
-
"
|
|
26
|
-
"
|
|
27
|
-
"tags": ["
|
|
28
|
-
"models": ["Qwen-Image"],
|
|
29
|
-
"date": "2025-
|
|
30
|
-
"size":
|
|
29
|
+
"thumbnailVariant": "compareSlider",
|
|
30
|
+
"description": "Saisissez une illustration et générez une version hyperréaliste avec Qwen Image Edit 2509.",
|
|
31
|
+
"tags": ["Transfert de style"],
|
|
32
|
+
"models": ["Qwen-Image-Edit"],
|
|
33
|
+
"date": "2025-12-20",
|
|
34
|
+
"size": 0,
|
|
35
|
+
"vram": 0
|
|
31
36
|
},
|
|
32
37
|
{
|
|
33
|
-
"name": "
|
|
34
|
-
"title": "
|
|
35
|
-
"description": "Générez des vidéos à partir d’une image avec Wan2.2 14B",
|
|
38
|
+
"name": "templates-8x8_grid-pfp",
|
|
39
|
+
"title": "Variations stylisées de photo de profil",
|
|
36
40
|
"mediaType": "image",
|
|
37
41
|
"mediaSubtype": "webp",
|
|
38
|
-
"
|
|
39
|
-
"tags": ["
|
|
40
|
-
"models": ["
|
|
41
|
-
"
|
|
42
|
-
"
|
|
42
|
+
"description": "Téléverse ta photo de profil, indique un thème et génère 64 variantes.",
|
|
43
|
+
"tags": ["API"],
|
|
44
|
+
"models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
45
|
+
"openSource": false,
|
|
46
|
+
"date": "2025-12-18",
|
|
47
|
+
"size": 0,
|
|
48
|
+
"vram": 0,
|
|
49
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
50
|
+
"usage": 51,
|
|
51
|
+
"searchRank": 8
|
|
43
52
|
},
|
|
44
53
|
{
|
|
45
|
-
"name": "
|
|
46
|
-
"title": "
|
|
54
|
+
"name": "templates-subject_product_swap",
|
|
55
|
+
"title": "Changer le produit en main (style UGC)",
|
|
47
56
|
"mediaType": "image",
|
|
48
57
|
"mediaSubtype": "webp",
|
|
49
|
-
"description": "
|
|
50
|
-
"tags": ["
|
|
51
|
-
"models": ["
|
|
52
|
-
"
|
|
53
|
-
"
|
|
54
|
-
"size":
|
|
58
|
+
"description": "Téléchargez une photo d'une personne tenant un produit et votre produit de marque. Générez une image avec les produits échangés.",
|
|
59
|
+
"tags": ["Produit", "Replacement", "API"],
|
|
60
|
+
"models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
61
|
+
"openSource": false,
|
|
62
|
+
"date": "2025-12-18",
|
|
63
|
+
"size": 0,
|
|
64
|
+
"vram": 0,
|
|
65
|
+
"usage": 63,
|
|
66
|
+
"searchRank": 8
|
|
55
67
|
},
|
|
56
68
|
{
|
|
57
|
-
"name": "
|
|
58
|
-
"title": "
|
|
69
|
+
"name": "templates-subject_holding_product",
|
|
70
|
+
"title": "Mannequin tenant le produit",
|
|
59
71
|
"mediaType": "image",
|
|
60
72
|
"mediaSubtype": "webp",
|
|
61
|
-
"description": "
|
|
62
|
-
"tags": ["
|
|
63
|
-
"models": ["
|
|
64
|
-
"
|
|
65
|
-
"
|
|
66
|
-
"size":
|
|
73
|
+
"description": "Téléchargez une photo de votre personnage et de votre produit. Générez une image du personnage tenant le produit.",
|
|
74
|
+
"tags": ["Produit", "Portrait", "API"],
|
|
75
|
+
"models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
76
|
+
"openSource": false,
|
|
77
|
+
"date": "2025-12-18",
|
|
78
|
+
"size": 0,
|
|
79
|
+
"vram": 0,
|
|
80
|
+
"usage": 43,
|
|
81
|
+
"searchRank": 8
|
|
67
82
|
},
|
|
68
83
|
{
|
|
69
|
-
"name": "
|
|
70
|
-
"title": "
|
|
84
|
+
"name": "templates-car_product",
|
|
85
|
+
"title": "Image vers vidéo auto",
|
|
71
86
|
"mediaType": "image",
|
|
72
87
|
"mediaSubtype": "webp",
|
|
73
|
-
"description": "
|
|
74
|
-
"
|
|
75
|
-
"
|
|
76
|
-
"
|
|
77
|
-
"date": "2025-
|
|
78
|
-
"size":
|
|
79
|
-
"vram":
|
|
88
|
+
"description": "Chargez une photo de votre véhicule et créez une vidéo multi-angles de qualité.",
|
|
89
|
+
"tags": ["Produit", "Image vers vidéo", "API", "FLF2V"],
|
|
90
|
+
"models": ["Seedream", "Kling"],
|
|
91
|
+
"openSource": false,
|
|
92
|
+
"date": "2025-12-18",
|
|
93
|
+
"size": 0,
|
|
94
|
+
"vram": 0,
|
|
95
|
+
"requiresCustomNodes": ["comfyui-videohelpersuite"],
|
|
96
|
+
"usage": 70,
|
|
97
|
+
"searchRank": 8
|
|
80
98
|
},
|
|
81
99
|
{
|
|
82
|
-
"name": "
|
|
83
|
-
"title": "Image
|
|
100
|
+
"name": "templates-photo_to_product_vid",
|
|
101
|
+
"title": "Image produit en vidéo : Chaussure",
|
|
84
102
|
"mediaType": "image",
|
|
85
103
|
"mediaSubtype": "webp",
|
|
86
|
-
"description": "
|
|
87
|
-
"
|
|
88
|
-
"
|
|
89
|
-
"
|
|
90
|
-
"date": "2025-
|
|
91
|
-
"size":
|
|
92
|
-
"vram":
|
|
93
|
-
"
|
|
104
|
+
"description": "Prenez une photo avec votre téléphone, téléchargez-la et créez une vidéo produit de qualité studio.",
|
|
105
|
+
"tags": ["Produit", "Image vers vidéo", "API"],
|
|
106
|
+
"models": ["Seedream", "Hailuo"],
|
|
107
|
+
"openSource": false,
|
|
108
|
+
"date": "2025-12-18",
|
|
109
|
+
"size": 0,
|
|
110
|
+
"vram": 0,
|
|
111
|
+
"requiresCustomNodes": ["comfyui-videohelpersuite"],
|
|
112
|
+
"usage": 124,
|
|
113
|
+
"searchRank": 8
|
|
94
114
|
},
|
|
95
115
|
{
|
|
96
|
-
"name": "
|
|
97
|
-
"title": "
|
|
116
|
+
"name": "templates-stitched_vid_contact_sheet",
|
|
117
|
+
"title": "Vidéo mode depuis personnage & accessoires",
|
|
98
118
|
"mediaType": "image",
|
|
99
119
|
"mediaSubtype": "webp",
|
|
100
|
-
"description": "
|
|
101
|
-
"
|
|
102
|
-
"
|
|
103
|
-
"
|
|
104
|
-
"date": "2025-
|
|
105
|
-
"size":
|
|
106
|
-
"vram":
|
|
120
|
+
"description": "Importez personnage et vêtements, obtenez photo et vidéo 8x.",
|
|
121
|
+
"tags": ["Fashion", "Image vers vidéo", "FLF2V", "API"],
|
|
122
|
+
"models": ["Google Gemini Image", "Nano Banana Pro", "Google", "Kling", "Kling O1", "OpenAI"],
|
|
123
|
+
"openSource": false,
|
|
124
|
+
"date": "2025-12-18",
|
|
125
|
+
"size": 0,
|
|
126
|
+
"vram": 0,
|
|
127
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
128
|
+
"usage": 78,
|
|
129
|
+
"searchRank": 8
|
|
107
130
|
},
|
|
108
131
|
{
|
|
109
|
-
"name": "
|
|
110
|
-
"title": "
|
|
132
|
+
"name": "templates-assemble_dieline",
|
|
133
|
+
"title": "Générer un emballage de marque à partir du tracé",
|
|
111
134
|
"mediaType": "image",
|
|
112
135
|
"mediaSubtype": "webp",
|
|
113
|
-
"
|
|
114
|
-
"
|
|
115
|
-
"tags": ["
|
|
116
|
-
"models": ["
|
|
117
|
-
"
|
|
118
|
-
"
|
|
119
|
-
"
|
|
136
|
+
"thumbnailVariant": "hoverDissolve",
|
|
137
|
+
"description": "Téléchargez la découpe de votre produit et assemblez un emballage 3D.",
|
|
138
|
+
"tags": ["Produit", "Éd. image"],
|
|
139
|
+
"models": ["Google Gemini Image", "Nano Banana Pro", "Google"],
|
|
140
|
+
"openSource": false,
|
|
141
|
+
"date": "2025-12-15",
|
|
142
|
+
"size": 0,
|
|
143
|
+
"vram": 0,
|
|
144
|
+
"usage": 12,
|
|
145
|
+
"searchRank": 8
|
|
120
146
|
},
|
|
121
147
|
{
|
|
122
|
-
"name": "
|
|
123
|
-
"title": "
|
|
148
|
+
"name": "templates-fashion_shoot_vton",
|
|
149
|
+
"title": "Personnage + vêtement (OOTD) flat lay vers séance studio",
|
|
124
150
|
"mediaType": "image",
|
|
125
151
|
"mediaSubtype": "webp",
|
|
126
|
-
"description": "
|
|
127
|
-
"
|
|
128
|
-
"
|
|
129
|
-
"
|
|
130
|
-
"
|
|
131
|
-
"
|
|
132
|
-
"
|
|
133
|
-
"
|
|
152
|
+
"description": "Téléchargez une photo de personnage et de tenues à plat, obtenez 4 photos mode. Sélectionnez-en une à améliorer et ajoutez des détails.",
|
|
153
|
+
"tags": ["Fashion", "Éd. image"],
|
|
154
|
+
"models": ["Google Gemini Image", "Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
155
|
+
"openSource": false,
|
|
156
|
+
"date": "2025-12-15",
|
|
157
|
+
"size": 0,
|
|
158
|
+
"vram": 0,
|
|
159
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
160
|
+
"usage": 104,
|
|
161
|
+
"searchRank": 8
|
|
134
162
|
},
|
|
135
163
|
{
|
|
136
|
-
"name": "
|
|
137
|
-
"title": "
|
|
164
|
+
"name": "templates-fashion_shoot_prompt_doodle",
|
|
165
|
+
"title": "Selfie + texte : photos studio avec doodles",
|
|
138
166
|
"mediaType": "image",
|
|
139
167
|
"mediaSubtype": "webp",
|
|
140
|
-
"description": "
|
|
141
|
-
"
|
|
142
|
-
"
|
|
143
|
-
"
|
|
144
|
-
"
|
|
145
|
-
"
|
|
146
|
-
"
|
|
147
|
-
"
|
|
168
|
+
"description": "Télécharge un selfie et décris ta tenue. Génère 4 photos mode doodle, choisis-en une à améliorer.",
|
|
169
|
+
"tags": ["Fashion", "Éd. image"],
|
|
170
|
+
"models": ["Google Gemini Image", "Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
171
|
+
"openSource": false,
|
|
172
|
+
"date": "2025-12-15",
|
|
173
|
+
"size": 0,
|
|
174
|
+
"vram": 0,
|
|
175
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
176
|
+
"usage": 20,
|
|
177
|
+
"searchRank": 8
|
|
148
178
|
},
|
|
149
179
|
{
|
|
150
|
-
"name": "
|
|
151
|
-
"title": "
|
|
180
|
+
"name": "templates-poster_product_integration",
|
|
181
|
+
"title": "Créer une affiche/pub avec votre produit",
|
|
152
182
|
"mediaType": "image",
|
|
153
183
|
"mediaSubtype": "webp",
|
|
154
|
-
"
|
|
155
|
-
"
|
|
156
|
-
"tags": ["
|
|
157
|
-
"models": ["
|
|
158
|
-
"
|
|
159
|
-
"
|
|
160
|
-
"
|
|
184
|
+
"thumbnailVariant": "compareSlider",
|
|
185
|
+
"description": "Télécharge ton produit et une courte description. Crée une affiche, modifie le design avant d’intégrer le produit.",
|
|
186
|
+
"tags": ["Produit", "Éd. image"],
|
|
187
|
+
"models": ["ByteDance", "Seedream", "Google Gemini"],
|
|
188
|
+
"openSource": false,
|
|
189
|
+
"date": "2025-12-15",
|
|
190
|
+
"size": 0,
|
|
191
|
+
"vram": 0,
|
|
192
|
+
"requiresCustomNodes": ["comfyui_essentials"],
|
|
193
|
+
"usage": 37,
|
|
194
|
+
"searchRank": 8
|
|
161
195
|
},
|
|
162
196
|
{
|
|
163
|
-
"name": "
|
|
164
|
-
"title": "
|
|
197
|
+
"name": "templates-3D_logo_texture_animation",
|
|
198
|
+
"title": "Animations 3D dynamiques de logo",
|
|
165
199
|
"mediaType": "image",
|
|
166
200
|
"mediaSubtype": "webp",
|
|
167
|
-
"description": "
|
|
168
|
-
"
|
|
169
|
-
"
|
|
170
|
-
"
|
|
171
|
-
"date": "2025-
|
|
172
|
-
"size":
|
|
173
|
-
"vram":
|
|
201
|
+
"description": "Télécharge ton logo vectoriel et choisis une texture. Les premiers et derniers plans 3D et l’animation sont générés automatiquement.",
|
|
202
|
+
"tags": ["Conception de marque", "FLF2V"],
|
|
203
|
+
"models": ["ByteDance", "Seedream", "Google Gemini", "Nano Banana Pro"],
|
|
204
|
+
"openSource": false,
|
|
205
|
+
"date": "2025-12-15",
|
|
206
|
+
"size": 0,
|
|
207
|
+
"vram": 0,
|
|
208
|
+
"usage": 42,
|
|
209
|
+
"searchRank": 8
|
|
174
210
|
},
|
|
175
211
|
{
|
|
176
|
-
"name": "
|
|
177
|
-
"title": "
|
|
212
|
+
"name": "templates-product_scene_relight",
|
|
213
|
+
"title": "Fusion produit + scène et rééclairage",
|
|
178
214
|
"mediaType": "image",
|
|
179
215
|
"mediaSubtype": "webp",
|
|
180
|
-
"
|
|
181
|
-
"
|
|
182
|
-
"
|
|
183
|
-
"
|
|
184
|
-
"
|
|
185
|
-
"
|
|
186
|
-
"
|
|
216
|
+
"thumbnailVariant": "compareSlider",
|
|
217
|
+
"description": "Ajoute ton produit et un fond, fusionne-les et ajuste la lumière avec Seedream 4.5.",
|
|
218
|
+
"tags": ["Produit", "Éd. image", "Relumi."],
|
|
219
|
+
"models": ["ByteDance", "Seedream"],
|
|
220
|
+
"openSource": false,
|
|
221
|
+
"date": "2025-12-15",
|
|
222
|
+
"size": 0,
|
|
223
|
+
"vram": 0,
|
|
224
|
+
"usage": 11,
|
|
225
|
+
"searchRank": 8
|
|
187
226
|
},
|
|
188
227
|
{
|
|
189
|
-
"name": "
|
|
190
|
-
"title": "
|
|
228
|
+
"name": "templates-textured_logo_elements",
|
|
229
|
+
"title": "Ajouter texture et éléments à logo",
|
|
191
230
|
"mediaType": "image",
|
|
192
231
|
"mediaSubtype": "webp",
|
|
193
|
-
"description": "
|
|
194
|
-
"tags": ["
|
|
195
|
-
"models": ["
|
|
196
|
-
"date": "2025-
|
|
197
|
-
"
|
|
198
|
-
"size":
|
|
199
|
-
"vram":
|
|
232
|
+
"description": "Téléchargez votre logo, texture et éléments. Générez une vidéo du logo texturé comme actif de marque.",
|
|
233
|
+
"tags": ["Conception de marque", "Image vers vidéo"],
|
|
234
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google", "ByteDance", "Seedance"],
|
|
235
|
+
"date": "2025-12-11",
|
|
236
|
+
"openSource": false,
|
|
237
|
+
"size": 0,
|
|
238
|
+
"vram": 0,
|
|
239
|
+
"usage": 255,
|
|
240
|
+
"searchRank": 8
|
|
200
241
|
},
|
|
201
242
|
{
|
|
202
|
-
"name": "
|
|
203
|
-
"title": "
|
|
243
|
+
"name": "templates-qwen_image_edit-crop_and_stitch-fusion",
|
|
244
|
+
"title": "Reluminer le produit composé",
|
|
204
245
|
"mediaType": "image",
|
|
205
246
|
"mediaSubtype": "webp",
|
|
206
|
-
"description": "
|
|
247
|
+
"description": "Téléchargez une image composite de votre produit, dessinez un masque dans l'éditeur de masque et reluminez votre produit dans la scène.",
|
|
248
|
+
"tags": ["Éd. image", "Relumi."],
|
|
249
|
+
"models": ["Qwen-Image-Edit"],
|
|
250
|
+
"date": "2025-12-11",
|
|
251
|
+
"size": 0,
|
|
207
252
|
"thumbnailVariant": "compareSlider",
|
|
208
|
-
"
|
|
209
|
-
"
|
|
210
|
-
"
|
|
211
|
-
"
|
|
212
|
-
"size": 2136746230,
|
|
213
|
-
"vram": 3929895076
|
|
253
|
+
"vram": 0,
|
|
254
|
+
"requiresCustomNodes": ["comfyui-inpaint-cropandstitch"],
|
|
255
|
+
"usage": 361,
|
|
256
|
+
"searchRank": 8
|
|
214
257
|
},
|
|
215
258
|
{
|
|
216
|
-
"name": "
|
|
217
|
-
"title": "
|
|
259
|
+
"name": "templates-textured_logotype-v2.1",
|
|
260
|
+
"title": "Appliquer une texture au logo",
|
|
218
261
|
"mediaType": "image",
|
|
219
262
|
"mediaSubtype": "webp",
|
|
220
|
-
"description": "
|
|
221
|
-
"
|
|
222
|
-
"
|
|
223
|
-
"
|
|
224
|
-
"
|
|
225
|
-
"
|
|
226
|
-
"
|
|
227
|
-
"
|
|
263
|
+
"description": "Téléchargez votre logo et appliquez une texture et des éléments pour un asset de marque",
|
|
264
|
+
"tags": ["Conception de marque", "Image vers vidéo", "FLF2V"],
|
|
265
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google", "ByteDance", "Seedance"],
|
|
266
|
+
"date": "2025-12-03",
|
|
267
|
+
"openSource": false,
|
|
268
|
+
"size": 0,
|
|
269
|
+
"vram": 0,
|
|
270
|
+
"usage": 299,
|
|
271
|
+
"searchRank": 8
|
|
228
272
|
},
|
|
229
273
|
{
|
|
230
|
-
"name": "
|
|
231
|
-
"title": "
|
|
274
|
+
"name": "templates-product_ad-v2.0",
|
|
275
|
+
"title": "Remplacement produit pub",
|
|
232
276
|
"mediaType": "image",
|
|
233
277
|
"mediaSubtype": "webp",
|
|
234
|
-
"description": "
|
|
235
|
-
"
|
|
236
|
-
"
|
|
237
|
-
"
|
|
238
|
-
"
|
|
239
|
-
"
|
|
240
|
-
"
|
|
241
|
-
"
|
|
278
|
+
"description": "Créez des annonces statiques pour votre produit dans le style d'une publicité de référence",
|
|
279
|
+
"tags": ["Réf. style"],
|
|
280
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google", "ByteDance", "Seedance"],
|
|
281
|
+
"date": "2025-12-03",
|
|
282
|
+
"openSource": false,
|
|
283
|
+
"size": 0,
|
|
284
|
+
"vram": 0,
|
|
285
|
+
"usage": 222,
|
|
286
|
+
"searchRank": 8
|
|
242
287
|
},
|
|
243
288
|
{
|
|
244
|
-
"name": "
|
|
245
|
-
"title": "
|
|
289
|
+
"name": "templates-6-key-frames",
|
|
290
|
+
"title": "Vidéo reliée début-fin en continu",
|
|
246
291
|
"mediaType": "image",
|
|
247
292
|
"mediaSubtype": "webp",
|
|
248
|
-
"description": "
|
|
249
|
-
"
|
|
250
|
-
"
|
|
251
|
-
"
|
|
252
|
-
"
|
|
253
|
-
"
|
|
254
|
-
"
|
|
255
|
-
"
|
|
293
|
+
"description": "Vidéo fluide 6 images clés, lien continu",
|
|
294
|
+
"tags": ["Image vers vidéo", "FLF2V"],
|
|
295
|
+
"models": ["Wan2.2"],
|
|
296
|
+
"date": "2025-12-03",
|
|
297
|
+
"size": 0,
|
|
298
|
+
"vram": 0,
|
|
299
|
+
"usage": 1972,
|
|
300
|
+
"searchRank": 8
|
|
256
301
|
},
|
|
257
302
|
{
|
|
258
|
-
"name": "
|
|
259
|
-
"title": "
|
|
303
|
+
"name": "templates-9grid_social_media-v2.0",
|
|
304
|
+
"title": "Générateur pub 3x3",
|
|
260
305
|
"mediaType": "image",
|
|
261
306
|
"mediaSubtype": "webp",
|
|
262
|
-
"description": "
|
|
263
|
-
"
|
|
264
|
-
"
|
|
265
|
-
"
|
|
266
|
-
"
|
|
267
|
-
"
|
|
268
|
-
"
|
|
269
|
-
"
|
|
307
|
+
"description": "Téléchargez votre produit et saisissez un court prompt pour chaque position de la grille 3x3. 9 images uniques seront générées à partir de vos prompts. Sélectionnez ensuite vos images préférées et agrandissez-les en 4K en utilisant votre produit comme référence.",
|
|
308
|
+
"tags": ["Éd. image", "Image"],
|
|
309
|
+
"models": ["Nano Banana Pro", "Google"],
|
|
310
|
+
"date": "2025-12-06",
|
|
311
|
+
"size": 0,
|
|
312
|
+
"vram": 0,
|
|
313
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
314
|
+
"usage": 466,
|
|
315
|
+
"searchRank": 8
|
|
270
316
|
},
|
|
271
317
|
{
|
|
272
|
-
"name": "
|
|
273
|
-
"title": "
|
|
318
|
+
"name": "templates-poster_to_2x2_mockups-v2.0",
|
|
319
|
+
"title": "Maquettes de scène d'affiche",
|
|
274
320
|
"mediaType": "image",
|
|
275
321
|
"mediaSubtype": "webp",
|
|
276
|
-
"description": "
|
|
277
|
-
"
|
|
278
|
-
"
|
|
279
|
-
"
|
|
280
|
-
"
|
|
281
|
-
"
|
|
282
|
-
"
|
|
283
|
-
"vram":
|
|
322
|
+
"description": "Téléchargez une affiche ou une publicité et, avec une brève description de votre marque, générez 4 maquettes dans plusieurs scènes.",
|
|
323
|
+
"tags": ["Éd. image", "Maquette"],
|
|
324
|
+
"models": ["Nano Banana Pro", "Google"],
|
|
325
|
+
"date": "2025-12-06",
|
|
326
|
+
"openSource": false,
|
|
327
|
+
"size": 0,
|
|
328
|
+
"requiresCustomNodes": ["comfyui_essentials"],
|
|
329
|
+
"vram": 0,
|
|
330
|
+
"usage": 61,
|
|
331
|
+
"searchRank": 8
|
|
284
332
|
},
|
|
285
333
|
{
|
|
286
|
-
"name": "
|
|
287
|
-
"title": "
|
|
334
|
+
"name": "template-multistyle-magazine-cover-nanobananapro",
|
|
335
|
+
"title": "Couverture de magazine et design d'emballage",
|
|
288
336
|
"mediaType": "image",
|
|
289
337
|
"mediaSubtype": "webp",
|
|
290
|
-
"description": "
|
|
291
|
-
"
|
|
292
|
-
"
|
|
293
|
-
"
|
|
294
|
-
"
|
|
295
|
-
"
|
|
296
|
-
"
|
|
297
|
-
"
|
|
338
|
+
"description": "Concevez la mise en page du texte pour la couverture de votre magazine et explorez les options d’emballage.",
|
|
339
|
+
"tags": ["Éd. image", "Maquette", "Mise en page"],
|
|
340
|
+
"models": ["Nano Banana Pro", "Google"],
|
|
341
|
+
"date": "2025-12-06",
|
|
342
|
+
"openSource": false,
|
|
343
|
+
"size": 0,
|
|
344
|
+
"vram": 0,
|
|
345
|
+
"usage": 87,
|
|
346
|
+
"searchRank": 8
|
|
298
347
|
},
|
|
299
348
|
{
|
|
300
|
-
"name": "
|
|
301
|
-
"title": "
|
|
349
|
+
"name": "templates-1_click_multiple_scene_angles-v1.0",
|
|
350
|
+
"title": "Scène multi-angle 1 clic",
|
|
302
351
|
"mediaType": "image",
|
|
303
352
|
"mediaSubtype": "webp",
|
|
304
|
-
"description": "
|
|
305
|
-
"
|
|
306
|
-
"
|
|
307
|
-
"
|
|
308
|
-
"
|
|
309
|
-
"
|
|
310
|
-
"
|
|
311
|
-
"
|
|
353
|
+
"description": "Téléchargez votre scène, obtenez plusieurs vues d’un clic.",
|
|
354
|
+
"tags": ["Image Eidt"],
|
|
355
|
+
"models": ["Qwen-Image-Edit"],
|
|
356
|
+
"date": "2025-12-08",
|
|
357
|
+
"size": 31198642438,
|
|
358
|
+
"vram": 31198642438,
|
|
359
|
+
"usage": 1508,
|
|
360
|
+
"searchRank": 8
|
|
312
361
|
},
|
|
313
362
|
{
|
|
314
|
-
"name": "
|
|
315
|
-
"title": "
|
|
363
|
+
"name": "templates-1_click_multiple_character_angles-v1.0",
|
|
364
|
+
"title": "Multi-angle personnage",
|
|
316
365
|
"mediaType": "image",
|
|
317
366
|
"mediaSubtype": "webp",
|
|
318
|
-
"description": "
|
|
319
|
-
"
|
|
320
|
-
"
|
|
321
|
-
"
|
|
322
|
-
"
|
|
323
|
-
"
|
|
324
|
-
"
|
|
325
|
-
"
|
|
367
|
+
"description": "Téléchargez un perso, obtenez plusieurs vues",
|
|
368
|
+
"tags": ["Image Eidt"],
|
|
369
|
+
"models": ["Qwen-Image-Edit"],
|
|
370
|
+
"date": "2025-12-08",
|
|
371
|
+
"size": 31198642438,
|
|
372
|
+
"vram": 31198642438,
|
|
373
|
+
"usage": 3637,
|
|
374
|
+
"searchRank": 8
|
|
375
|
+
},
|
|
376
|
+
{
|
|
377
|
+
"name": "template-Animation_Trajectory_Control_Wan_ATI",
|
|
378
|
+
"title": "Contrôle de trajectoire d'animation",
|
|
379
|
+
"mediaType": "image",
|
|
380
|
+
"mediaSubtype": "webp",
|
|
381
|
+
"description": "Dessinez une trajectoire de mouvement pour animer l'image le long de celle-ci.",
|
|
382
|
+
"tags": ["Image vers vidéo"],
|
|
383
|
+
"models": ["Wan2.1"],
|
|
384
|
+
"date": "2025-12-11",
|
|
385
|
+
"size": 31604570534,
|
|
386
|
+
"requiresCustomNodes": ["ComfyUI-WanVideoWrapper", "comfyui_fill-nodes"],
|
|
387
|
+
"vram": 31604570534,
|
|
388
|
+
"usage": 449,
|
|
389
|
+
"searchRank": 8
|
|
326
390
|
}
|
|
327
391
|
]
|
|
328
392
|
},
|
|
329
393
|
{
|
|
330
394
|
"moduleName": "default",
|
|
331
395
|
"type": "image",
|
|
332
|
-
"category": "
|
|
396
|
+
"category": "Type de génération",
|
|
333
397
|
"icon": "icon-[lucide--image]",
|
|
334
398
|
"title": "Image",
|
|
335
399
|
"templates": [
|
|
400
|
+
{
|
|
401
|
+
"name": "image_z_image_turbo",
|
|
402
|
+
"title": "Z-Image-Turbo texte vers image",
|
|
403
|
+
"mediaType": "image",
|
|
404
|
+
"mediaSubtype": "webp",
|
|
405
|
+
"description": "Un modèle fondamental efficace de génération d’images utilisant un transformateur de diffusion à flux unique, compatible anglais et chinois.",
|
|
406
|
+
"tags": ["Texte vers image", "Image"],
|
|
407
|
+
"models": ["Z-Image-Turbo"],
|
|
408
|
+
"date": "2025-11-27",
|
|
409
|
+
"size": 20862803640,
|
|
410
|
+
"vram": 20862803640,
|
|
411
|
+
"usage": 27801
|
|
412
|
+
},
|
|
413
|
+
{
|
|
414
|
+
"name": "image_z_image_turbo_fun_union_controlnet",
|
|
415
|
+
"title": "Z-Image-Turbo contrôle vers image",
|
|
416
|
+
"mediaType": "image",
|
|
417
|
+
"mediaSubtype": "webp",
|
|
418
|
+
"description": "ControlNet pour Z-Image-Turbo prenant en charge plusieurs modes de contrôle, dont Canny, HED, profondeur, pose et MLSD.",
|
|
419
|
+
"tags": ["Image", "ControlNet"],
|
|
420
|
+
"models": ["Z-Image-Turbo"],
|
|
421
|
+
"date": "2025-12-02",
|
|
422
|
+
"size": 23794118820,
|
|
423
|
+
"thumbnailVariant": "compareSlider",
|
|
424
|
+
"vram": 23794118820,
|
|
425
|
+
"usage": 3859
|
|
426
|
+
},
|
|
427
|
+
{
|
|
428
|
+
"name": "image_qwen_image_edit_2511",
|
|
429
|
+
"title": "Qwen Image Edit 2511 - Remplacement de matériau",
|
|
430
|
+
"mediaType": "image",
|
|
431
|
+
"mediaSubtype": "webp",
|
|
432
|
+
"thumbnailVariant": "compareSlider",
|
|
433
|
+
"description": "Remplacez les matériaux d'objets (ex. meubles) en combinant des images de référence avec Qwen-Image-Edit-2511.",
|
|
434
|
+
"tags": ["Éd. image"],
|
|
435
|
+
"models": ["Qwen-Image-Edit"],
|
|
436
|
+
"date": "2025-12-23",
|
|
437
|
+
"size": 51367808860,
|
|
438
|
+
"vram": 51367808860
|
|
439
|
+
},
|
|
440
|
+
{
|
|
441
|
+
"name": "image_qwen_image_edit_2509",
|
|
442
|
+
"title": "Qwen Image Edit 2509",
|
|
443
|
+
"mediaType": "image",
|
|
444
|
+
"mediaSubtype": "webp",
|
|
445
|
+
"thumbnailVariant": "compareSlider",
|
|
446
|
+
"description": "Édition d'images avancée avec support multi-images, cohérence améliorée et intégration ControlNet.",
|
|
447
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
448
|
+
"tags": ["Image vers image", "Éd. image", "ControlNet"],
|
|
449
|
+
"models": ["Qwen-Image"],
|
|
450
|
+
"date": "2025-09-25",
|
|
451
|
+
"size": 31772020572,
|
|
452
|
+
"vram": 31772020572,
|
|
453
|
+
"usage": 9323
|
|
454
|
+
},
|
|
455
|
+
{
|
|
456
|
+
"name": "image_qwen_image_edit_2509_relight",
|
|
457
|
+
"title": "Rééclairage de photo",
|
|
458
|
+
"mediaType": "image",
|
|
459
|
+
"mediaSubtype": "webp",
|
|
460
|
+
"thumbnailVariant": "compareSlider",
|
|
461
|
+
"description": "Rééclaire des images avec Qwen-Image-Edit et LoRA.",
|
|
462
|
+
"tags": ["Éd. image", "Relumi."],
|
|
463
|
+
"models": ["Qwen-Image-Edit"],
|
|
464
|
+
"date": "2025-12-15",
|
|
465
|
+
"size": 31772020572,
|
|
466
|
+
"vram": 31772020572,
|
|
467
|
+
"usage": 192
|
|
468
|
+
},
|
|
336
469
|
{
|
|
337
470
|
"name": "image_flux2",
|
|
338
471
|
"title": "Flux.2 Dev",
|
|
@@ -340,11 +473,25 @@
|
|
|
340
473
|
"mediaSubtype": "webp",
|
|
341
474
|
"thumbnailVariant": "compareSlider",
|
|
342
475
|
"description": "Générez des images photoréalistes avec cohérence multi-référence et rendu de texte professionnel.",
|
|
343
|
-
"tags": ["Texte vers image", "Image", "
|
|
344
|
-
"models": ["Flux.2 Dev", "BFL"],
|
|
476
|
+
"tags": ["Texte vers image", "Image", "Éd. image"],
|
|
477
|
+
"models": ["Flux.2 Dev", "BFL", "Flux"],
|
|
478
|
+
"date": "2025-11-26",
|
|
479
|
+
"size": 71781788416,
|
|
480
|
+
"vram": 71781788416,
|
|
481
|
+
"usage": 9538
|
|
482
|
+
},
|
|
483
|
+
{
|
|
484
|
+
"name": "image_flux2_text_to_image",
|
|
485
|
+
"title": "Flux.2 Dev texte vers image",
|
|
486
|
+
"mediaType": "image",
|
|
487
|
+
"mediaSubtype": "webp",
|
|
488
|
+
"description": "Génération d'image à partir de texte avec éclairage, matériaux et détails réalistes.",
|
|
489
|
+
"tags": ["Texte vers image", "Image"],
|
|
490
|
+
"models": ["Flux.2 Dev", "BFL", "Flux"],
|
|
345
491
|
"date": "2025-11-26",
|
|
346
492
|
"size": 71382356459,
|
|
347
|
-
"vram":
|
|
493
|
+
"vram": 71382356459,
|
|
494
|
+
"usage": 4002
|
|
348
495
|
},
|
|
349
496
|
{
|
|
350
497
|
"name": "image_flux2_fp8",
|
|
@@ -352,22 +499,24 @@
|
|
|
352
499
|
"mediaType": "image",
|
|
353
500
|
"mediaSubtype": "webp",
|
|
354
501
|
"description": "Créez des maquettes de produits en appliquant des motifs de conception sur des emballages, des mugs et d'autres produits à l'aide de la cohérence multi-références.",
|
|
355
|
-
"tags": ["Texte vers image", "Image", "
|
|
356
|
-
"models": ["Flux.2 Dev", "BFL"],
|
|
502
|
+
"tags": ["Texte vers image", "Image", "Éd. image", "Maquette", "Produit"],
|
|
503
|
+
"models": ["Flux.2 Dev", "BFL", "Flux"],
|
|
357
504
|
"date": "2025-11-26",
|
|
358
505
|
"size": 53837415055,
|
|
359
|
-
"vram":
|
|
506
|
+
"vram": 53837415055,
|
|
507
|
+
"usage": 436
|
|
360
508
|
},
|
|
361
509
|
{
|
|
362
|
-
"name": "
|
|
363
|
-
"title": "
|
|
510
|
+
"name": "image_qwen_image_layered",
|
|
511
|
+
"title": "Qwen-Image-Layered : décomposition en calques",
|
|
364
512
|
"mediaType": "image",
|
|
365
513
|
"mediaSubtype": "webp",
|
|
366
|
-
"description": "
|
|
367
|
-
"tags": ["
|
|
368
|
-
"models": ["
|
|
369
|
-
"date": "2025-
|
|
370
|
-
"size":
|
|
514
|
+
"description": "Décompose en RGBA éditable",
|
|
515
|
+
"tags": ["Layer Decompose"],
|
|
516
|
+
"models": ["Qwen-Image-Layered"],
|
|
517
|
+
"date": "2025-12-22",
|
|
518
|
+
"size": 50446538375,
|
|
519
|
+
"vram": 50446538375
|
|
371
520
|
},
|
|
372
521
|
{
|
|
373
522
|
"name": "image_qwen_image",
|
|
@@ -379,7 +528,9 @@
|
|
|
379
528
|
"tags": ["Texte vers image", "Image"],
|
|
380
529
|
"models": ["Qwen-Image"],
|
|
381
530
|
"date": "2025-08-05",
|
|
382
|
-
"size": 31772020572
|
|
531
|
+
"size": 31772020572,
|
|
532
|
+
"vram": 31772020572,
|
|
533
|
+
"usage": 1143
|
|
383
534
|
},
|
|
384
535
|
{
|
|
385
536
|
"name": "image_qwen_image_instantx_controlnet",
|
|
@@ -391,7 +542,9 @@
|
|
|
391
542
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
392
543
|
"models": ["Qwen-Image"],
|
|
393
544
|
"date": "2025-08-23",
|
|
394
|
-
"size": 35304631173
|
|
545
|
+
"size": 35304631173,
|
|
546
|
+
"vram": 35304631173,
|
|
547
|
+
"usage": 472
|
|
395
548
|
},
|
|
396
549
|
{
|
|
397
550
|
"name": "image_qwen_image_instantx_inpainting_controlnet",
|
|
@@ -404,7 +557,9 @@
|
|
|
404
557
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
405
558
|
"models": ["Qwen-Image"],
|
|
406
559
|
"date": "2025-09-12",
|
|
407
|
-
"size": 36013300777
|
|
560
|
+
"size": 36013300777,
|
|
561
|
+
"vram": 36013300777,
|
|
562
|
+
"usage": 515
|
|
408
563
|
},
|
|
409
564
|
{
|
|
410
565
|
"name": "image_qwen_image_union_control_lora",
|
|
@@ -416,7 +571,10 @@
|
|
|
416
571
|
"models": ["Qwen-Image"],
|
|
417
572
|
"date": "2025-08-23",
|
|
418
573
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
419
|
-
"size": 32716913377
|
|
574
|
+
"size": 32716913377,
|
|
575
|
+
"thumbnailVariant": "compareSlider",
|
|
576
|
+
"vram": 32716913377,
|
|
577
|
+
"usage": 340
|
|
420
578
|
},
|
|
421
579
|
{
|
|
422
580
|
"name": "image_qwen_image_controlnet_patch",
|
|
@@ -429,1453 +587,1435 @@
|
|
|
429
587
|
"date": "2025-08-24",
|
|
430
588
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
431
589
|
"size": 34037615821,
|
|
432
|
-
"thumbnailVariant": "compareSlider"
|
|
590
|
+
"thumbnailVariant": "compareSlider",
|
|
591
|
+
"vram": 34037615821,
|
|
592
|
+
"usage": 218
|
|
433
593
|
},
|
|
434
594
|
{
|
|
435
|
-
"name": "
|
|
436
|
-
"title": "
|
|
595
|
+
"name": "api_nano_banana_pro",
|
|
596
|
+
"title": "Nano Banana Pro",
|
|
597
|
+
"description": "Nano-banana Pro (Gemini 3.0 Pro Image) - Génération et édition d'images 4K de qualité studio avec rendu de texte amélioré et cohérence des personnages.",
|
|
437
598
|
"mediaType": "image",
|
|
438
599
|
"mediaSubtype": "webp",
|
|
439
|
-
"thumbnailVariant": "
|
|
440
|
-
"
|
|
441
|
-
"
|
|
442
|
-
"
|
|
443
|
-
"
|
|
444
|
-
"
|
|
445
|
-
"
|
|
600
|
+
"thumbnailVariant": "hoverDissolve",
|
|
601
|
+
"tags": ["Éd. image", "Image", "API"],
|
|
602
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google"],
|
|
603
|
+
"date": "2025-11-21",
|
|
604
|
+
"openSource": false,
|
|
605
|
+
"size": 0,
|
|
606
|
+
"vram": 0,
|
|
607
|
+
"usage": 6749
|
|
446
608
|
},
|
|
447
609
|
{
|
|
448
|
-
"name": "
|
|
449
|
-
"title": "
|
|
610
|
+
"name": "api_from_photo_2_miniature",
|
|
611
|
+
"title": "Style photo vers modèle",
|
|
612
|
+
"description": "Transformez des photos réelles de bâtiments en plans architecturaux puis en maquettes physiques détaillées. Un pipeline complet de visualisation architecturale, de la photo à la miniature.",
|
|
450
613
|
"mediaType": "image",
|
|
451
614
|
"mediaSubtype": "webp",
|
|
452
|
-
"
|
|
453
|
-
"
|
|
454
|
-
"
|
|
455
|
-
"
|
|
456
|
-
"
|
|
457
|
-
"
|
|
458
|
-
"
|
|
615
|
+
"tags": ["Éd. image", "Image", "3D"],
|
|
616
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google"],
|
|
617
|
+
"date": "2025-11-21",
|
|
618
|
+
"openSource": false,
|
|
619
|
+
"size": 0,
|
|
620
|
+
"vram": 0,
|
|
621
|
+
"usage": 288
|
|
459
622
|
},
|
|
460
623
|
{
|
|
461
|
-
"name": "
|
|
462
|
-
"title": "
|
|
624
|
+
"name": "api_openai_fashion_billboard_generator",
|
|
625
|
+
"title": "Générateur de panneau publicitaire de mode",
|
|
626
|
+
"description": "Transforme des photos de vêtements en affiches publicitaires réalistes en centre commercial.",
|
|
463
627
|
"mediaType": "image",
|
|
464
628
|
"mediaSubtype": "webp",
|
|
465
|
-
"
|
|
466
|
-
"
|
|
467
|
-
"
|
|
468
|
-
"
|
|
469
|
-
"
|
|
470
|
-
"
|
|
629
|
+
"tags": ["Éd. image", "Image", "API", "Fashion", "Maquette"],
|
|
630
|
+
"models": ["GPT-Image-1.5", "OpenAI"],
|
|
631
|
+
"date": "2025-12-18",
|
|
632
|
+
"openSource": false,
|
|
633
|
+
"size": 0,
|
|
634
|
+
"vram": 0,
|
|
635
|
+
"usage": 50
|
|
471
636
|
},
|
|
472
637
|
{
|
|
473
|
-
"name": "
|
|
474
|
-
"title": "
|
|
638
|
+
"name": "api_bytedance_seedream4",
|
|
639
|
+
"title": "ByteDance Seedream 4.0",
|
|
640
|
+
"description": "Modèle d'IA multimodal pour la génération d'images à partir de texte et l'édition d'images. Générez des images 2K en moins de 2 secondes avec un contrôle en langage naturel.",
|
|
475
641
|
"mediaType": "image",
|
|
476
642
|
"mediaSubtype": "webp",
|
|
477
|
-
"
|
|
478
|
-
"
|
|
479
|
-
"
|
|
480
|
-
"
|
|
481
|
-
"
|
|
482
|
-
"
|
|
483
|
-
"
|
|
484
|
-
"vram": 19327352832
|
|
643
|
+
"tags": ["Éd. image", "Image", "API", "Texte vers image"],
|
|
644
|
+
"models": ["Seedream 4.0", "ByteDance"],
|
|
645
|
+
"date": "2025-09-11",
|
|
646
|
+
"openSource": false,
|
|
647
|
+
"size": 0,
|
|
648
|
+
"vram": 0,
|
|
649
|
+
"usage": 2117
|
|
485
650
|
},
|
|
486
651
|
{
|
|
487
|
-
"name": "
|
|
488
|
-
"title": "
|
|
652
|
+
"name": "api_bfl_flux2_max_sofa_swap",
|
|
653
|
+
"title": "BFL FLUX.2 [max] : Remplacement d'objet",
|
|
654
|
+
"description": "Remplacez des objets avec FLUX.2 [max], qualité supérieure. Idéal pour photos produit, échanges de meubles et cohérence de scène.",
|
|
489
655
|
"mediaType": "image",
|
|
490
656
|
"mediaSubtype": "webp",
|
|
491
|
-
"
|
|
492
|
-
"
|
|
493
|
-
"
|
|
494
|
-
"
|
|
495
|
-
"
|
|
496
|
-
"
|
|
657
|
+
"tags": ["Éd. image", "Image", "API"],
|
|
658
|
+
"models": ["Flux2", "Flux", "BFL"],
|
|
659
|
+
"date": "2025-12-22",
|
|
660
|
+
"searchRank": 7,
|
|
661
|
+
"openSource": false,
|
|
662
|
+
"size": 0,
|
|
663
|
+
"vram": 0,
|
|
664
|
+
"thumbnailVariant": "compareSlider"
|
|
497
665
|
},
|
|
498
666
|
{
|
|
499
|
-
"name": "
|
|
500
|
-
"title": "
|
|
667
|
+
"name": "api_google_gemini_image",
|
|
668
|
+
"title": "Google Gemini Image",
|
|
669
|
+
"description": "Nano-banana (Gemini-2.5-Flash Image) - édition d'images avec cohérence.",
|
|
501
670
|
"mediaType": "image",
|
|
502
671
|
"mediaSubtype": "webp",
|
|
503
|
-
"
|
|
504
|
-
"
|
|
505
|
-
"
|
|
506
|
-
"
|
|
507
|
-
"size":
|
|
672
|
+
"tags": ["Éd. image", "Image", "API", "Texte vers image"],
|
|
673
|
+
"models": ["Gemini-2.5-Flash", "nano-banana", "Google"],
|
|
674
|
+
"date": "2025-08-27",
|
|
675
|
+
"openSource": false,
|
|
676
|
+
"size": 0,
|
|
677
|
+
"vram": 0,
|
|
678
|
+
"usage": 1657
|
|
508
679
|
},
|
|
509
680
|
{
|
|
510
|
-
"name": "
|
|
511
|
-
"title": "
|
|
681
|
+
"name": "api_flux2",
|
|
682
|
+
"title": "Flux.2 Pro",
|
|
683
|
+
"description": "Générez des images photoréalistes avec cohérence multi-références et rendu de texte professionnel.",
|
|
512
684
|
"mediaType": "image",
|
|
513
685
|
"mediaSubtype": "webp",
|
|
514
|
-
"
|
|
515
|
-
"
|
|
516
|
-
"
|
|
517
|
-
"
|
|
518
|
-
"size":
|
|
519
|
-
"vram":
|
|
520
|
-
|
|
686
|
+
"tags": ["Éd. image", "Image", "API", "Texte vers image"],
|
|
687
|
+
"models": ["Flux.2", "BFL", "Flux"],
|
|
688
|
+
"date": "2025-11-26",
|
|
689
|
+
"openSource": false,
|
|
690
|
+
"size": 0,
|
|
691
|
+
"vram": 0,
|
|
692
|
+
"usage": 852
|
|
693
|
+
},
|
|
521
694
|
{
|
|
522
|
-
"name": "
|
|
523
|
-
"title": "
|
|
695
|
+
"name": "api_topaz_image_enhance",
|
|
696
|
+
"title": "Amélioration d'image Topaz",
|
|
697
|
+
"description": "Amélioration d’image professionnelle avec le modèle Reimagine de Topaz, comprenant l’amélioration du visage et la restauration des détails.",
|
|
524
698
|
"mediaType": "image",
|
|
525
699
|
"mediaSubtype": "webp",
|
|
526
700
|
"thumbnailVariant": "compareSlider",
|
|
527
|
-
"
|
|
528
|
-
"
|
|
529
|
-
"
|
|
530
|
-
"
|
|
531
|
-
"size":
|
|
532
|
-
"vram":
|
|
701
|
+
"tags": ["Image", "API", "Amélioration"],
|
|
702
|
+
"models": ["Topaz", "Reimagine"],
|
|
703
|
+
"date": "2025-11-25",
|
|
704
|
+
"openSource": false,
|
|
705
|
+
"size": 0,
|
|
706
|
+
"vram": 0,
|
|
707
|
+
"usage": 576
|
|
533
708
|
},
|
|
534
709
|
{
|
|
535
|
-
"name": "
|
|
536
|
-
"title": "Flux
|
|
710
|
+
"name": "api_bfl_flux_1_kontext_multiple_images_input",
|
|
711
|
+
"title": "BFL Flux.1 Kontext Entrée Multi-Images",
|
|
712
|
+
"description": "Importer plusieurs images et les éditer avec Flux.1 Kontext.",
|
|
537
713
|
"mediaType": "image",
|
|
538
714
|
"mediaSubtype": "webp",
|
|
539
|
-
"
|
|
540
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
541
|
-
"tags": ["
|
|
542
|
-
"models": ["Flux", "BFL"],
|
|
543
|
-
"date": "2025-
|
|
544
|
-
"
|
|
545
|
-
"
|
|
715
|
+
"thumbnailVariant": "compareSlider",
|
|
716
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-kontext",
|
|
717
|
+
"tags": ["Éd. image", "Image"],
|
|
718
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
719
|
+
"date": "2025-05-29",
|
|
720
|
+
"openSource": false,
|
|
721
|
+
"size": 0,
|
|
722
|
+
"vram": 0,
|
|
723
|
+
"usage": 139
|
|
546
724
|
},
|
|
547
725
|
{
|
|
548
|
-
"name": "
|
|
549
|
-
"title": "
|
|
550
|
-
"description": "
|
|
551
|
-
"thumbnailVariant": "hoverDissolve",
|
|
726
|
+
"name": "api_bfl_flux_1_kontext_pro_image",
|
|
727
|
+
"title": "BFL Flux.1 Kontext Pro",
|
|
728
|
+
"description": "Éditer des images avec Flux.1 Kontext pro image.",
|
|
552
729
|
"mediaType": "image",
|
|
553
730
|
"mediaSubtype": "webp",
|
|
554
|
-
"
|
|
555
|
-
"
|
|
556
|
-
"
|
|
557
|
-
"
|
|
558
|
-
"
|
|
559
|
-
"
|
|
731
|
+
"thumbnailVariant": "compareSlider",
|
|
732
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-kontext",
|
|
733
|
+
"tags": ["Éd. image", "Image"],
|
|
734
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
735
|
+
"date": "2025-05-29",
|
|
736
|
+
"openSource": false,
|
|
737
|
+
"size": 0,
|
|
738
|
+
"vram": 0,
|
|
739
|
+
"usage": 403
|
|
560
740
|
},
|
|
561
741
|
{
|
|
562
|
-
"name": "
|
|
563
|
-
"title": "Flux
|
|
742
|
+
"name": "api_bfl_flux_1_kontext_max_image",
|
|
743
|
+
"title": "BFL Flux.1 Kontext Max",
|
|
744
|
+
"description": "Éditer des images avec Flux.1 Kontext max image.",
|
|
564
745
|
"mediaType": "image",
|
|
565
746
|
"mediaSubtype": "webp",
|
|
566
|
-
"
|
|
567
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
568
|
-
"tags": ["
|
|
569
|
-
"models": ["Flux", "BFL"],
|
|
570
|
-
"date": "2025-
|
|
571
|
-
"
|
|
572
|
-
"
|
|
747
|
+
"thumbnailVariant": "compareSlider",
|
|
748
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-kontext",
|
|
749
|
+
"tags": ["Éd. image", "Image"],
|
|
750
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
751
|
+
"date": "2025-05-29",
|
|
752
|
+
"openSource": false,
|
|
753
|
+
"size": 0,
|
|
754
|
+
"vram": 0,
|
|
755
|
+
"usage": 74
|
|
573
756
|
},
|
|
574
757
|
{
|
|
575
|
-
"name": "
|
|
576
|
-
"title": "
|
|
758
|
+
"name": "api_wan_text_to_image",
|
|
759
|
+
"title": "Wan2.5: Texte vers Image",
|
|
760
|
+
"description": "Générez des images avec un excellent suivi des prompts et une qualité visuelle élevée avec FLUX.1 Pro.",
|
|
577
761
|
"mediaType": "image",
|
|
578
762
|
"mediaSubtype": "webp",
|
|
579
|
-
"
|
|
580
|
-
"
|
|
581
|
-
"
|
|
582
|
-
"
|
|
583
|
-
"
|
|
584
|
-
"
|
|
585
|
-
"
|
|
763
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
764
|
+
"models": ["Wan2.5", "Wan"],
|
|
765
|
+
"date": "2025-09-25",
|
|
766
|
+
"openSource": false,
|
|
767
|
+
"size": 0,
|
|
768
|
+
"vram": 0,
|
|
769
|
+
"usage": 244
|
|
586
770
|
},
|
|
587
771
|
{
|
|
588
|
-
"name": "
|
|
589
|
-
"title": "Flux
|
|
772
|
+
"name": "api_bfl_flux_pro_t2i",
|
|
773
|
+
"title": "BFL Flux[Pro]: Texte vers Image",
|
|
774
|
+
"description": "Générer des images avec un excellent suivi des prompts et une qualité visuelle en utilisant FLUX.1 Pro.",
|
|
590
775
|
"mediaType": "image",
|
|
591
776
|
"mediaSubtype": "webp",
|
|
592
|
-
"
|
|
593
|
-
"
|
|
594
|
-
"tags": ["Texte vers image", "Image"],
|
|
777
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
|
|
778
|
+
"tags": ["Éd. image", "Image"],
|
|
595
779
|
"models": ["Flux", "BFL"],
|
|
596
|
-
"date": "2025-
|
|
597
|
-
"
|
|
598
|
-
"
|
|
780
|
+
"date": "2025-05-01",
|
|
781
|
+
"openSource": false,
|
|
782
|
+
"size": 0,
|
|
783
|
+
"vram": 0,
|
|
784
|
+
"usage": 117
|
|
599
785
|
},
|
|
600
786
|
{
|
|
601
|
-
"name": "
|
|
602
|
-
"title": "
|
|
787
|
+
"name": "api_runway_text_to_image",
|
|
788
|
+
"title": "Runway: Texte vers Image",
|
|
789
|
+
"description": "Générer des images de haute qualité à partir de prompts textuels en utilisant le modèle AI de Runway.",
|
|
603
790
|
"mediaType": "image",
|
|
604
791
|
"mediaSubtype": "webp",
|
|
605
|
-
"
|
|
606
|
-
"
|
|
607
|
-
"tags": ["Texte vers image", "Image"],
|
|
608
|
-
"models": ["Flux", "BFL"],
|
|
792
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
793
|
+
"models": ["Runway"],
|
|
609
794
|
"date": "2025-03-01",
|
|
610
|
-
"
|
|
795
|
+
"openSource": false,
|
|
796
|
+
"size": 0,
|
|
797
|
+
"vram": 0,
|
|
798
|
+
"usage": 37
|
|
611
799
|
},
|
|
612
800
|
{
|
|
613
|
-
"name": "
|
|
614
|
-
"title": "
|
|
801
|
+
"name": "api_runway_reference_to_image",
|
|
802
|
+
"title": "Runway: Référence vers Image",
|
|
803
|
+
"description": "Générer de nouvelles images basées sur des styles et compositions de référence avec l'AI de Runway.",
|
|
615
804
|
"mediaType": "image",
|
|
616
|
-
"mediaSubtype": "webp",
|
|
617
|
-
"description": "Combler les parties manquantes des images en utilisant l'inpainting Flux.",
|
|
618
805
|
"thumbnailVariant": "compareSlider",
|
|
619
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
620
|
-
"tags": ["Image vers image", "Inpainting", "Image"],
|
|
621
|
-
"models": ["Flux", "BFL"],
|
|
622
|
-
"date": "2025-03-01",
|
|
623
|
-
"size": 10372346020
|
|
624
|
-
},
|
|
625
|
-
{
|
|
626
|
-
"name": "flux_fill_outpaint_example",
|
|
627
|
-
"title": "Flux Outpainting",
|
|
628
|
-
"mediaType": "image",
|
|
629
806
|
"mediaSubtype": "webp",
|
|
630
|
-
"
|
|
631
|
-
"
|
|
632
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
633
|
-
"tags": ["Outpainting", "Image", "Image vers image"],
|
|
634
|
-
"models": ["Flux", "BFL"],
|
|
807
|
+
"tags": ["Image vers image", "Image", "API"],
|
|
808
|
+
"models": ["Runway"],
|
|
635
809
|
"date": "2025-03-01",
|
|
636
|
-
"
|
|
810
|
+
"openSource": false,
|
|
811
|
+
"size": 0,
|
|
812
|
+
"vram": 0,
|
|
813
|
+
"usage": 115
|
|
637
814
|
},
|
|
638
815
|
{
|
|
639
|
-
"name": "
|
|
640
|
-
"title": "
|
|
816
|
+
"name": "api_stability_ai_stable_image_ultra_t2i",
|
|
817
|
+
"title": "Stability AI: Stable Image Ultra Texte vers Image",
|
|
818
|
+
"description": "Générer des images de haute qualité avec un excellent respect des prompts. Parfait pour des cas d'utilisation professionnels à une résolution de 1 mégapixel.",
|
|
641
819
|
"mediaType": "image",
|
|
642
820
|
"mediaSubtype": "webp",
|
|
643
|
-
"
|
|
644
|
-
"
|
|
645
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
646
|
-
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
647
|
-
"models": ["Flux", "BFL"],
|
|
821
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
822
|
+
"models": ["Stability"],
|
|
648
823
|
"date": "2025-03-01",
|
|
649
|
-
"
|
|
824
|
+
"openSource": false,
|
|
825
|
+
"size": 0,
|
|
826
|
+
"vram": 0,
|
|
827
|
+
"usage": 27
|
|
650
828
|
},
|
|
651
829
|
{
|
|
652
|
-
"name": "
|
|
653
|
-
"title": "
|
|
830
|
+
"name": "api_stability_ai_i2i",
|
|
831
|
+
"title": "Stability AI: Image vers Image",
|
|
832
|
+
"description": "Transformer des images avec une génération de haute qualité en utilisant Stability AI, parfait pour l'édition professionnelle et le transfert de style.",
|
|
654
833
|
"mediaType": "image",
|
|
834
|
+
"thumbnailVariant": "compareSlider",
|
|
655
835
|
"mediaSubtype": "webp",
|
|
656
|
-
"
|
|
657
|
-
"
|
|
658
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
659
|
-
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
660
|
-
"models": ["Flux", "BFL"],
|
|
836
|
+
"tags": ["Image vers image", "Image", "API"],
|
|
837
|
+
"models": ["Stability"],
|
|
661
838
|
"date": "2025-03-01",
|
|
662
|
-
"
|
|
839
|
+
"openSource": false,
|
|
840
|
+
"size": 0,
|
|
841
|
+
"vram": 0,
|
|
842
|
+
"usage": 65
|
|
663
843
|
},
|
|
664
844
|
{
|
|
665
|
-
"name": "
|
|
666
|
-
"title": "
|
|
845
|
+
"name": "api_stability_ai_sd3.5_t2i",
|
|
846
|
+
"title": "Stability AI: SD3.5 Texte vers Image",
|
|
847
|
+
"description": "Générer des images de haute qualité avec un excellent respect des prompts. Parfait pour des cas d'utilisation professionnels à une résolution de 1 mégapixel.",
|
|
667
848
|
"mediaType": "image",
|
|
668
849
|
"mediaSubtype": "webp",
|
|
669
|
-
"
|
|
670
|
-
"
|
|
671
|
-
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
672
|
-
"models": ["Flux", "BFL"],
|
|
850
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
851
|
+
"models": ["Stability"],
|
|
673
852
|
"date": "2025-03-01",
|
|
674
|
-
"
|
|
853
|
+
"openSource": false,
|
|
854
|
+
"size": 0,
|
|
855
|
+
"vram": 0,
|
|
856
|
+
"usage": 18
|
|
675
857
|
},
|
|
676
858
|
{
|
|
677
|
-
"name": "
|
|
678
|
-
"title": "
|
|
859
|
+
"name": "api_stability_ai_sd3.5_i2i",
|
|
860
|
+
"title": "Stability AI: SD3.5 Image vers Image",
|
|
861
|
+
"description": "Générer des images de haute qualité avec un excellent respect des prompts. Parfait pour des cas d'utilisation professionnels à une résolution de 1 mégapixel.",
|
|
679
862
|
"mediaType": "image",
|
|
863
|
+
"thumbnailVariant": "compareSlider",
|
|
680
864
|
"mediaSubtype": "webp",
|
|
681
|
-
"
|
|
682
|
-
"
|
|
683
|
-
"
|
|
684
|
-
"
|
|
685
|
-
"
|
|
686
|
-
"
|
|
865
|
+
"tags": ["Image vers image", "Image", "API"],
|
|
866
|
+
"models": ["Stability"],
|
|
867
|
+
"date": "2025-03-01",
|
|
868
|
+
"openSource": false,
|
|
869
|
+
"size": 0,
|
|
870
|
+
"vram": 0,
|
|
871
|
+
"usage": 88
|
|
687
872
|
},
|
|
688
873
|
{
|
|
689
|
-
"name": "
|
|
690
|
-
"title": "Édition d'Image
|
|
874
|
+
"name": "image_qwen_image_edit",
|
|
875
|
+
"title": "Édition d'Image Qwen",
|
|
691
876
|
"mediaType": "image",
|
|
692
877
|
"mediaSubtype": "webp",
|
|
693
|
-
"thumbnailVariant": "
|
|
694
|
-
"description": "Éditer des images avec
|
|
695
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/
|
|
696
|
-
"tags": ["
|
|
697
|
-
"models": ["
|
|
698
|
-
"date": "2025-
|
|
699
|
-
"size":
|
|
878
|
+
"thumbnailVariant": "compareSlider",
|
|
879
|
+
"description": "Éditer des images avec une édition de texte bilingue précise et des capacités d'édition sémantique/apparence duales en utilisant le modèle MMDiT 20B de Qwen-Image-Edit.",
|
|
880
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
881
|
+
"tags": ["Image vers image", "Éd. image"],
|
|
882
|
+
"models": ["Qwen-Image-Edit"],
|
|
883
|
+
"date": "2025-08-18",
|
|
884
|
+
"size": 31772020572,
|
|
885
|
+
"vram": 31772020572,
|
|
886
|
+
"usage": 1556
|
|
700
887
|
},
|
|
701
888
|
{
|
|
702
|
-
"name": "
|
|
703
|
-
"title": "
|
|
889
|
+
"name": "image_ovis_text_to_image",
|
|
890
|
+
"title": "Ovis-Image texte vers image",
|
|
704
891
|
"mediaType": "image",
|
|
705
892
|
"mediaSubtype": "webp",
|
|
706
|
-
"description": "
|
|
707
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
893
|
+
"description": "Ovis-Image est un modèle texte-image de 7 milliards de paramètres spécifiquement optimisé pour le rendu de texte de haute qualité dans les images générées. Conçu pour fonctionner efficacement sous des contraintes de calcul, il excelle dans la génération précise d'images contenant du contenu textuel.",
|
|
708
894
|
"tags": ["Texte vers image", "Image"],
|
|
709
|
-
"models": ["
|
|
710
|
-
"date": "2025-
|
|
711
|
-
"size":
|
|
895
|
+
"models": ["Ovis-Image"],
|
|
896
|
+
"date": "2025-12-02",
|
|
897
|
+
"size": 20228222222,
|
|
898
|
+
"vram": 20228222222,
|
|
899
|
+
"usage": 1456
|
|
712
900
|
},
|
|
713
901
|
{
|
|
714
|
-
"name": "
|
|
715
|
-
"title": "
|
|
902
|
+
"name": "image_chrono_edit_14B",
|
|
903
|
+
"title": "ChronoEdit 14B",
|
|
716
904
|
"mediaType": "image",
|
|
717
905
|
"mediaSubtype": "webp",
|
|
718
|
-
"
|
|
719
|
-
"
|
|
720
|
-
"tags": ["
|
|
721
|
-
"models": ["
|
|
722
|
-
"date": "2025-
|
|
723
|
-
"size":
|
|
906
|
+
"thumbnailVariant": "compareSlider",
|
|
907
|
+
"description": "Édition d'images propulsée par la compréhension dynamique des modèles vidéo, créant des résultats physiquement plausibles tout en préservant la cohérence du personnage et du style.",
|
|
908
|
+
"tags": ["Éd. image", "Image vers image"],
|
|
909
|
+
"models": ["Wan2.1", "ChronoEdit", "Nvidia"],
|
|
910
|
+
"date": "2025-11-03",
|
|
911
|
+
"size": 41435696988,
|
|
912
|
+
"vram": 41435696988,
|
|
913
|
+
"usage": 611
|
|
724
914
|
},
|
|
725
915
|
{
|
|
726
|
-
"name": "
|
|
727
|
-
"title": "
|
|
916
|
+
"name": "flux_kontext_dev_basic",
|
|
917
|
+
"title": "Flux Kontext Dev (Basique)",
|
|
728
918
|
"mediaType": "image",
|
|
729
919
|
"mediaSubtype": "webp",
|
|
730
|
-
"
|
|
731
|
-
"
|
|
732
|
-
"
|
|
733
|
-
"
|
|
734
|
-
"
|
|
735
|
-
"
|
|
920
|
+
"thumbnailVariant": "hoverDissolve",
|
|
921
|
+
"description": "Éditer une image en utilisant Flux Kontext avec une visibilité complète des nœuds, parfait pour apprendre le flux de travail.",
|
|
922
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
|
|
923
|
+
"tags": ["Éd. image", "Image vers image"],
|
|
924
|
+
"models": ["Flux", "BFL"],
|
|
925
|
+
"date": "2025-06-26",
|
|
926
|
+
"size": 17641578168,
|
|
927
|
+
"vram": 19327352832,
|
|
928
|
+
"usage": 866
|
|
736
929
|
},
|
|
737
930
|
{
|
|
738
|
-
"name": "
|
|
739
|
-
"title": "
|
|
931
|
+
"name": "api_luma_photon_i2i",
|
|
932
|
+
"title": "Luma Photon: Image vers Image",
|
|
933
|
+
"description": "Guider la génération d'images en utilisant une combinaison d'images et de prompt.",
|
|
740
934
|
"mediaType": "image",
|
|
741
935
|
"mediaSubtype": "webp",
|
|
742
936
|
"thumbnailVariant": "compareSlider",
|
|
743
|
-
"
|
|
744
|
-
"
|
|
745
|
-
"
|
|
746
|
-
"
|
|
747
|
-
"
|
|
748
|
-
"
|
|
937
|
+
"tags": ["Image vers image", "Image", "API"],
|
|
938
|
+
"models": ["Luma"],
|
|
939
|
+
"date": "2025-03-01",
|
|
940
|
+
"openSource": false,
|
|
941
|
+
"size": 0,
|
|
942
|
+
"vram": 0,
|
|
943
|
+
"usage": 101
|
|
749
944
|
},
|
|
750
945
|
{
|
|
751
|
-
"name": "
|
|
752
|
-
"title": "
|
|
946
|
+
"name": "api_luma_photon_style_ref",
|
|
947
|
+
"title": "Luma Photon: Référence de Style",
|
|
948
|
+
"description": "Générer des images en mélangeant des références de style avec un contrôle précis en utilisant Luma Photon.",
|
|
753
949
|
"mediaType": "image",
|
|
754
950
|
"mediaSubtype": "webp",
|
|
755
951
|
"thumbnailVariant": "compareSlider",
|
|
756
|
-
"
|
|
757
|
-
"
|
|
758
|
-
"
|
|
759
|
-
"
|
|
760
|
-
"
|
|
761
|
-
"
|
|
952
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
953
|
+
"models": ["Luma"],
|
|
954
|
+
"date": "2025-03-01",
|
|
955
|
+
"openSource": false,
|
|
956
|
+
"size": 0,
|
|
957
|
+
"vram": 0,
|
|
958
|
+
"usage": 79
|
|
762
959
|
},
|
|
763
960
|
{
|
|
764
|
-
"name": "
|
|
765
|
-
"title": "
|
|
961
|
+
"name": "api_recraft_image_gen_with_color_control",
|
|
962
|
+
"title": "Recraft: Génération d'Image avec Contrôle Couleur",
|
|
963
|
+
"description": "Générer des images avec des palettes de couleurs personnalisées et des visuels spécifiques à la marque en utilisant Recraft.",
|
|
766
964
|
"mediaType": "image",
|
|
767
965
|
"mediaSubtype": "webp",
|
|
768
|
-
"
|
|
769
|
-
"
|
|
770
|
-
"tags": ["Texte vers image", "Image"],
|
|
771
|
-
"models": ["SD3.5", "Stability"],
|
|
966
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
967
|
+
"models": ["Recraft"],
|
|
772
968
|
"date": "2025-03-01",
|
|
773
|
-
"
|
|
969
|
+
"openSource": false,
|
|
970
|
+
"size": 0,
|
|
971
|
+
"vram": 0,
|
|
972
|
+
"usage": 3
|
|
774
973
|
},
|
|
775
974
|
{
|
|
776
|
-
"name": "
|
|
777
|
-
"title": "
|
|
975
|
+
"name": "api_recraft_image_gen_with_style_control",
|
|
976
|
+
"title": "Recraft: Génération d'Image avec Contrôle Style",
|
|
977
|
+
"description": "Contrôler le style avec des exemples visuels, aligner le positionnement et affiner les objets. Stocker et partager des styles pour une cohérence de marque parfaite.",
|
|
778
978
|
"mediaType": "image",
|
|
779
979
|
"mediaSubtype": "webp",
|
|
780
|
-
"
|
|
781
|
-
"
|
|
782
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
783
|
-
"tags": ["Image vers image", "Image", "ControlNet"],
|
|
784
|
-
"models": ["SD3.5", "Stability"],
|
|
980
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
981
|
+
"models": ["Recraft"],
|
|
785
982
|
"date": "2025-03-01",
|
|
786
|
-
"
|
|
983
|
+
"openSource": false,
|
|
984
|
+
"size": 0,
|
|
985
|
+
"vram": 0,
|
|
986
|
+
"usage": 6
|
|
787
987
|
},
|
|
788
988
|
{
|
|
789
|
-
"name": "
|
|
790
|
-
"title": "
|
|
989
|
+
"name": "api_recraft_vector_gen",
|
|
990
|
+
"title": "Recraft: Génération Vectorielle",
|
|
991
|
+
"description": "Générer des images vectorielles de haute qualité à partir de prompts textuels en utilisant le générateur AI vectoriel de Recraft.",
|
|
791
992
|
"mediaType": "image",
|
|
792
993
|
"mediaSubtype": "webp",
|
|
793
|
-
"
|
|
794
|
-
"
|
|
795
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
796
|
-
"tags": ["Image vers image", "Image", "ControlNet"],
|
|
797
|
-
"models": ["SD3.5", "Stability"],
|
|
994
|
+
"tags": ["Texte vers image", "Image", "API", "Vectoriel"],
|
|
995
|
+
"models": ["Recraft"],
|
|
798
996
|
"date": "2025-03-01",
|
|
799
|
-
"
|
|
997
|
+
"openSource": false,
|
|
998
|
+
"size": 0,
|
|
999
|
+
"vram": 0,
|
|
1000
|
+
"usage": 16
|
|
800
1001
|
},
|
|
801
1002
|
{
|
|
802
|
-
"name": "
|
|
803
|
-
"title": "
|
|
1003
|
+
"name": "api_ideogram_v3_t2i",
|
|
1004
|
+
"title": "Ideogram V3: Texte vers Image",
|
|
1005
|
+
"description": "Générer des images de qualité professionnelle avec un excellent alignement des prompts, du photoréalisme et un rendu de texte en utilisant Ideogram V3.",
|
|
804
1006
|
"mediaType": "image",
|
|
805
1007
|
"mediaSubtype": "webp",
|
|
806
|
-
"
|
|
807
|
-
"
|
|
808
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
809
|
-
"tags": ["Image vers image", "Image"],
|
|
810
|
-
"models": ["SD3.5", "Stability"],
|
|
1008
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1009
|
+
"models": ["Ideogram"],
|
|
811
1010
|
"date": "2025-03-01",
|
|
812
|
-
"
|
|
1011
|
+
"openSource": false,
|
|
1012
|
+
"size": 0,
|
|
1013
|
+
"vram": 0,
|
|
1014
|
+
"usage": 8
|
|
813
1015
|
},
|
|
814
1016
|
{
|
|
815
|
-
"name": "
|
|
816
|
-
"title": "
|
|
1017
|
+
"name": "api_openai_image_1_t2i",
|
|
1018
|
+
"title": "OpenAI: GPT-Image-1 Texte vers Image",
|
|
1019
|
+
"description": "Générer des images à partir de prompts textuels en utilisant l'API OpenAI GPT Image 1.",
|
|
817
1020
|
"mediaType": "image",
|
|
818
1021
|
"mediaSubtype": "webp",
|
|
819
|
-
"
|
|
820
|
-
"
|
|
821
|
-
"tags": ["Texte vers image", "Image"],
|
|
822
|
-
"models": ["SDXL", "Stability"],
|
|
1022
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1023
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
823
1024
|
"date": "2025-03-01",
|
|
824
|
-
"
|
|
1025
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
|
|
1026
|
+
"openSource": false,
|
|
1027
|
+
"size": 0,
|
|
1028
|
+
"vram": 0,
|
|
1029
|
+
"usage": 9
|
|
825
1030
|
},
|
|
826
1031
|
{
|
|
827
|
-
"name": "
|
|
828
|
-
"title": "
|
|
1032
|
+
"name": "api_openai_image_1_i2i",
|
|
1033
|
+
"title": "OpenAI: GPT-Image-1 Image vers Image",
|
|
1034
|
+
"description": "Générer des images à partir d'images d'entrée en utilisant l'API OpenAI GPT Image 1.",
|
|
829
1035
|
"mediaType": "image",
|
|
830
1036
|
"mediaSubtype": "webp",
|
|
831
|
-
"
|
|
832
|
-
"
|
|
833
|
-
"
|
|
834
|
-
"models": ["SDXL", "Stability"],
|
|
1037
|
+
"thumbnailVariant": "compareSlider",
|
|
1038
|
+
"tags": ["Image vers image", "Image", "API"],
|
|
1039
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
835
1040
|
"date": "2025-03-01",
|
|
836
|
-
"
|
|
1041
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
|
|
1042
|
+
"openSource": false,
|
|
1043
|
+
"size": 0,
|
|
1044
|
+
"vram": 0,
|
|
1045
|
+
"usage": 76
|
|
837
1046
|
},
|
|
838
1047
|
{
|
|
839
|
-
"name": "
|
|
840
|
-
"title": "
|
|
1048
|
+
"name": "api_openai_image_1_inpaint",
|
|
1049
|
+
"title": "OpenAI: GPT-Image-1 Inpainting",
|
|
1050
|
+
"description": "Éditer des images en utilisant l'inpainting avec l'API OpenAI GPT Image 1.",
|
|
841
1051
|
"mediaType": "image",
|
|
842
1052
|
"mediaSubtype": "webp",
|
|
843
|
-
"
|
|
844
|
-
"
|
|
845
|
-
"
|
|
846
|
-
"models": ["SDXL", "Stability"],
|
|
1053
|
+
"thumbnailVariant": "compareSlider",
|
|
1054
|
+
"tags": ["Inpainting", "Image", "API"],
|
|
1055
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
847
1056
|
"date": "2025-03-01",
|
|
848
|
-
"
|
|
1057
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
|
|
1058
|
+
"openSource": false,
|
|
1059
|
+
"size": 0,
|
|
1060
|
+
"vram": 0,
|
|
1061
|
+
"usage": 21
|
|
849
1062
|
},
|
|
850
1063
|
{
|
|
851
|
-
"name": "
|
|
852
|
-
"title": "
|
|
1064
|
+
"name": "api_openai_image_1_multi_inputs",
|
|
1065
|
+
"title": "OpenAI: GPT-Image-1 Multi Entrées",
|
|
1066
|
+
"description": "Générer des images à partir de plusieurs entrées en utilisant l'API OpenAI GPT Image 1.",
|
|
853
1067
|
"mediaType": "image",
|
|
854
1068
|
"mediaSubtype": "webp",
|
|
855
|
-
"
|
|
856
|
-
"
|
|
857
|
-
"
|
|
858
|
-
"models": ["SDXL", "Stability"],
|
|
1069
|
+
"thumbnailVariant": "compareSlider",
|
|
1070
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1071
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
859
1072
|
"date": "2025-03-01",
|
|
860
|
-
"
|
|
1073
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
|
|
1074
|
+
"openSource": false,
|
|
1075
|
+
"size": 0,
|
|
1076
|
+
"vram": 0,
|
|
1077
|
+
"usage": 5
|
|
861
1078
|
},
|
|
862
1079
|
{
|
|
863
|
-
"name": "
|
|
864
|
-
"title": "
|
|
865
|
-
"
|
|
866
|
-
"mediaSubtype": "webp",
|
|
867
|
-
"thumbnailVariant": "compareSlider",
|
|
868
|
-
"description": "Exécuter Lotus Depth dans ComfyUI pour une estimation de profondeur monoculaire efficace zero-shot avec une haute rétention de détails.",
|
|
869
|
-
"tags": ["Image", "Texte vers image"],
|
|
870
|
-
"models": ["SD1.5", "Stability"],
|
|
871
|
-
"date": "2025-05-21",
|
|
872
|
-
"size": 2072321720
|
|
873
|
-
}
|
|
874
|
-
]
|
|
875
|
-
},
|
|
876
|
-
{
|
|
877
|
-
"moduleName": "default",
|
|
878
|
-
"type": "video",
|
|
879
|
-
"category": "GENERATION TYPE",
|
|
880
|
-
"icon": "icon-[lucide--film]",
|
|
881
|
-
"title": "Video",
|
|
882
|
-
"templates": [
|
|
883
|
-
{
|
|
884
|
-
"name": "video_wan2_2_14B_t2v",
|
|
885
|
-
"title": "Wan 2.2 14B Texte vers Vidéo",
|
|
886
|
-
"description": "Générer des vidéos de haute qualité à partir de prompts textuels avec un contrôle esthétique cinématographique et une génération de mouvement dynamique en utilisant Wan 2.2.",
|
|
1080
|
+
"name": "api_openai_dall_e_2_t2i",
|
|
1081
|
+
"title": "OpenAI: Dall-E 2 Texte vers Image",
|
|
1082
|
+
"description": "Générer des images à partir de prompts textuels en utilisant l'API OpenAI Dall-E 2.",
|
|
887
1083
|
"mediaType": "image",
|
|
888
1084
|
"mediaSubtype": "webp",
|
|
889
|
-
"
|
|
890
|
-
"
|
|
891
|
-
"
|
|
892
|
-
"
|
|
893
|
-
"
|
|
1085
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1086
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1087
|
+
"date": "2025-03-01",
|
|
1088
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/dall-e-2",
|
|
1089
|
+
"openSource": false,
|
|
1090
|
+
"size": 0,
|
|
1091
|
+
"vram": 0,
|
|
1092
|
+
"usage": 4
|
|
894
1093
|
},
|
|
895
1094
|
{
|
|
896
|
-
"name": "
|
|
897
|
-
"title": "
|
|
898
|
-
"description": "
|
|
1095
|
+
"name": "api_openai_dall_e_2_inpaint",
|
|
1096
|
+
"title": "OpenAI: Dall-E 2 Inpainting",
|
|
1097
|
+
"description": "Éditer des images en utilisant l'inpainting avec l'API OpenAI Dall-E 2.",
|
|
899
1098
|
"mediaType": "image",
|
|
900
1099
|
"mediaSubtype": "webp",
|
|
901
|
-
"thumbnailVariant": "
|
|
902
|
-
"
|
|
903
|
-
"
|
|
904
|
-
"
|
|
905
|
-
"
|
|
906
|
-
"
|
|
1100
|
+
"thumbnailVariant": "compareSlider",
|
|
1101
|
+
"tags": ["Inpainting", "Image", "API"],
|
|
1102
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1103
|
+
"date": "2025-03-01",
|
|
1104
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/dall-e-2",
|
|
1105
|
+
"openSource": false,
|
|
1106
|
+
"size": 0,
|
|
1107
|
+
"vram": 0,
|
|
1108
|
+
"usage": 12
|
|
907
1109
|
},
|
|
908
1110
|
{
|
|
909
|
-
"name": "
|
|
910
|
-
"title": "
|
|
911
|
-
"description": "Générer des
|
|
1111
|
+
"name": "api_openai_dall_e_3_t2i",
|
|
1112
|
+
"title": "OpenAI: Dall-E 3 Texte vers Image",
|
|
1113
|
+
"description": "Générer des images à partir de prompts textuels en utilisant l'API OpenAI Dall-E 3.",
|
|
912
1114
|
"mediaType": "image",
|
|
913
1115
|
"mediaSubtype": "webp",
|
|
914
|
-
"
|
|
915
|
-
"
|
|
916
|
-
"
|
|
917
|
-
"
|
|
918
|
-
"
|
|
919
|
-
"size":
|
|
1116
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1117
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1118
|
+
"date": "2025-03-01",
|
|
1119
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/dall-e-3",
|
|
1120
|
+
"openSource": false,
|
|
1121
|
+
"size": 0,
|
|
1122
|
+
"vram": 0,
|
|
1123
|
+
"usage": 33
|
|
920
1124
|
},
|
|
921
1125
|
{
|
|
922
|
-
"name": "
|
|
923
|
-
"title": "
|
|
924
|
-
"description": "Cadre unifié d'animation et de remplacement de personnages avec réplication précise des mouvements et expressions。",
|
|
1126
|
+
"name": "image_chroma1_radiance_text_to_image",
|
|
1127
|
+
"title": "Chroma1 Radiance Texte vers Image",
|
|
925
1128
|
"mediaType": "image",
|
|
926
1129
|
"mediaSubtype": "webp",
|
|
927
|
-
"
|
|
928
|
-
"tags": ["
|
|
929
|
-
"models": ["
|
|
930
|
-
"date": "2025-09-
|
|
931
|
-
"size":
|
|
1130
|
+
"description": "Chroma1-Radiance travaille directement avec les pixels d'image au lieu des latents compressés, offrant des images de meilleure qualité avec moins d'artefacts et de distorsion.",
|
|
1131
|
+
"tags": ["Texte vers image", "Image"],
|
|
1132
|
+
"models": ["Chroma"],
|
|
1133
|
+
"date": "2025-09-18",
|
|
1134
|
+
"size": 23622320128,
|
|
1135
|
+
"vram": 23622320128,
|
|
1136
|
+
"usage": 1149
|
|
932
1137
|
},
|
|
933
1138
|
{
|
|
934
|
-
"name": "
|
|
935
|
-
"title": "
|
|
936
|
-
"description": "Générez des vidéos 720p de haute qualité à partir de prompts textuels, avec un contrôle cinématographique de la caméra, des expressions émotionnelles et une simulation physique. Prend en charge plusieurs styles dont réaliste, anime et rendu 3D du texte.",
|
|
1139
|
+
"name": "image_chroma_text_to_image",
|
|
1140
|
+
"title": "Chroma texte vers image",
|
|
937
1141
|
"mediaType": "image",
|
|
938
1142
|
"mediaSubtype": "webp",
|
|
939
|
-
"
|
|
940
|
-
"
|
|
941
|
-
"
|
|
942
|
-
"
|
|
1143
|
+
"description": "Chroma est modifié à partir de Flux et présente quelques changements dans l'architecture.",
|
|
1144
|
+
"tags": ["Texte vers image", "Image"],
|
|
1145
|
+
"models": ["Chroma", "Flux"],
|
|
1146
|
+
"date": "2025-06-04",
|
|
1147
|
+
"size": 23289460163,
|
|
1148
|
+
"vram": 15569256448,
|
|
1149
|
+
"usage": 1423
|
|
943
1150
|
},
|
|
944
1151
|
{
|
|
945
|
-
"name": "
|
|
946
|
-
"title": "
|
|
947
|
-
"description": "Animez des images fixes en vidéos dynamiques avec des mouvements précis et un contrôle de caméra. Préserve la cohérence visuelle tout en donnant vie aux photos et illustrations grâce à des mouvements fluides et naturels.",
|
|
1152
|
+
"name": "image_newbieimage_exp0_1-t2i",
|
|
1153
|
+
"title": "NewBie Exp0.1 : Génération d’anime",
|
|
948
1154
|
"mediaType": "image",
|
|
949
1155
|
"mediaSubtype": "webp",
|
|
950
|
-
"
|
|
951
|
-
"
|
|
952
|
-
"
|
|
953
|
-
"
|
|
1156
|
+
"description": "Générez des images d’anime détaillées avec NewBie Exp0.1, prompts XML pour scènes multi-personnages et contrôle d’attributs.",
|
|
1157
|
+
"tags": ["Texte vers image", "Image", "Anime"],
|
|
1158
|
+
"models": ["NewBie"],
|
|
1159
|
+
"date": "2025-12-19",
|
|
1160
|
+
"size": 16181289287,
|
|
1161
|
+
"vram": 16181289287
|
|
954
1162
|
},
|
|
955
1163
|
{
|
|
956
|
-
"name": "
|
|
957
|
-
"title": "
|
|
958
|
-
"description": "Transformer des images statiques et de l'audio en vidéos dynamiques avec une synchronisation parfaite et une génération au niveau de la minute.",
|
|
1164
|
+
"name": "image_netayume_lumina_t2i",
|
|
1165
|
+
"title": "NetaYume Lumina Texte vers Image",
|
|
959
1166
|
"mediaType": "image",
|
|
960
1167
|
"mediaSubtype": "webp",
|
|
961
|
-
"
|
|
962
|
-
"tags": ["
|
|
963
|
-
"models": ["
|
|
964
|
-
"date": "2025-
|
|
965
|
-
"size":
|
|
1168
|
+
"description": "Génération d'images de style anime de haute qualité avec compréhension améliorée des personnages et textures détaillées. Affinée à partir de Neta Lumina sur l'ensemble de données Danbooru.",
|
|
1169
|
+
"tags": ["Texte vers image", "Image", "Anime"],
|
|
1170
|
+
"models": ["OmniGen"],
|
|
1171
|
+
"date": "2025-10-10",
|
|
1172
|
+
"size": 10619306639,
|
|
1173
|
+
"vram": 10619306639,
|
|
1174
|
+
"usage": 1536
|
|
966
1175
|
},
|
|
967
1176
|
{
|
|
968
|
-
"name": "
|
|
969
|
-
"title": "
|
|
970
|
-
"description": "Générez des vidéos basées sur l'audio, l'image et le texte, en préservant la synchronisation labiale des personnages.",
|
|
1177
|
+
"name": "image_flux.1_fill_dev_OneReward",
|
|
1178
|
+
"title": "Flux.1 Dev OneReward",
|
|
971
1179
|
"mediaType": "image",
|
|
972
1180
|
"mediaSubtype": "webp",
|
|
973
|
-
"
|
|
974
|
-
"
|
|
1181
|
+
"thumbnailVariant": "compareSlider",
|
|
1182
|
+
"description": "Supports various tasks such as image inpainting, outpainting, and object removal",
|
|
1183
|
+
"tags": ["Inpainting", "Outpainting"],
|
|
1184
|
+
"models": ["Flux", "BFL"],
|
|
975
1185
|
"date": "2025-09-21",
|
|
976
|
-
"size":
|
|
1186
|
+
"size": 29001766666,
|
|
1187
|
+
"vram": 21474836480,
|
|
1188
|
+
"usage": 368
|
|
977
1189
|
},
|
|
978
1190
|
{
|
|
979
|
-
"name": "
|
|
980
|
-
"title": "
|
|
981
|
-
"description": "Générez des vidéos à partir des images de début et de fin avec Wan 2.2 Fun Inp.",
|
|
1191
|
+
"name": "flux_dev_checkpoint_example",
|
|
1192
|
+
"title": "Flux Dev fp8",
|
|
982
1193
|
"mediaType": "image",
|
|
983
1194
|
"mediaSubtype": "webp",
|
|
984
|
-
"
|
|
985
|
-
"
|
|
986
|
-
"
|
|
987
|
-
"
|
|
988
|
-
"
|
|
1195
|
+
"description": "Générer des images en utilisant la version quantifiée Flux Dev fp8. Convient aux appareils avec une VRAM limitée, ne nécessite qu'un seul fichier de modèle, mais la qualité de l'image est légèrement inférieure à la version complète.",
|
|
1196
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
1197
|
+
"tags": ["Texte vers image", "Image"],
|
|
1198
|
+
"models": ["Flux", "BFL"],
|
|
1199
|
+
"date": "2025-03-01",
|
|
1200
|
+
"size": 17244293693,
|
|
1201
|
+
"vram": 18253611008,
|
|
1202
|
+
"usage": 310
|
|
989
1203
|
},
|
|
990
1204
|
{
|
|
991
|
-
"name": "
|
|
992
|
-
"title": "
|
|
993
|
-
"description": "
|
|
1205
|
+
"name": "flux1_dev_uso_reference_image_gen",
|
|
1206
|
+
"title": "Génération d'images de référence Flux.1 Dev USO",
|
|
1207
|
+
"description": "Utilisez des images de référence pour contrôler à la fois le style et le sujet : conservez le visage de votre personnage tout en changeant de style artistique, ou appliquez des styles artistiques à de nouvelles scènes",
|
|
1208
|
+
"thumbnailVariant": "hoverDissolve",
|
|
994
1209
|
"mediaType": "image",
|
|
995
1210
|
"mediaSubtype": "webp",
|
|
996
|
-
"
|
|
997
|
-
"
|
|
998
|
-
"
|
|
999
|
-
"
|
|
1000
|
-
"size":
|
|
1211
|
+
"tags": ["Image vers image", "Image"],
|
|
1212
|
+
"models": ["Flux", "BFL"],
|
|
1213
|
+
"date": "2025-09-02",
|
|
1214
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
|
|
1215
|
+
"size": 18597208392,
|
|
1216
|
+
"vram": 19864223744,
|
|
1217
|
+
"usage": 1624
|
|
1001
1218
|
},
|
|
1002
1219
|
{
|
|
1003
|
-
"name": "
|
|
1004
|
-
"title": "
|
|
1005
|
-
"description": "Générer des vidéos avec des contrôles de mouvement de caméra incluant le panoramique, le zoom et la rotation en utilisant Wan 2.2 Fun Camera Control.",
|
|
1220
|
+
"name": "flux_schnell",
|
|
1221
|
+
"title": "Flux Schnell fp8",
|
|
1006
1222
|
"mediaType": "image",
|
|
1007
1223
|
"mediaSubtype": "webp",
|
|
1008
|
-
"
|
|
1009
|
-
"
|
|
1010
|
-
"
|
|
1011
|
-
"
|
|
1012
|
-
"
|
|
1224
|
+
"description": "Générer rapidement des images avec la version quantifiée Flux Schnell fp8. Idéal pour le matériel d'entrée de gamme, ne nécessite que 4 étapes pour générer des images.",
|
|
1225
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
1226
|
+
"tags": ["Texte vers image", "Image"],
|
|
1227
|
+
"models": ["Flux", "BFL"],
|
|
1228
|
+
"date": "2025-03-01",
|
|
1229
|
+
"size": 17233556275,
|
|
1230
|
+
"vram": 18253611008,
|
|
1231
|
+
"usage": 99
|
|
1013
1232
|
},
|
|
1014
1233
|
{
|
|
1015
|
-
"name": "
|
|
1016
|
-
"title": "
|
|
1017
|
-
"description": "Générer des vidéos à partir de texte ou d'images en utilisant le modèle hybride Wan 2.2 5B",
|
|
1234
|
+
"name": "flux1_krea_dev",
|
|
1235
|
+
"title": "Flux.1 Krea Dev",
|
|
1018
1236
|
"mediaType": "image",
|
|
1019
1237
|
"mediaSubtype": "webp",
|
|
1020
|
-
"
|
|
1021
|
-
"
|
|
1022
|
-
"
|
|
1023
|
-
"
|
|
1024
|
-
"
|
|
1238
|
+
"description": "Un modèle FLUX affiné poussant le photoréalisme à son maximum",
|
|
1239
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
|
|
1240
|
+
"tags": ["Texte vers image", "Image"],
|
|
1241
|
+
"models": ["Flux", "BFL"],
|
|
1242
|
+
"date": "2025-07-31",
|
|
1243
|
+
"size": 22269405430,
|
|
1244
|
+
"vram": 23085449216,
|
|
1245
|
+
"usage": 1160
|
|
1025
1246
|
},
|
|
1026
1247
|
{
|
|
1027
|
-
"name": "
|
|
1028
|
-
"title": "
|
|
1029
|
-
"description": "Inpainting vidéo efficace à partir des images de début et de fin. Le modèle 5B offre des itérations rapides pour tester les flux de travail.",
|
|
1248
|
+
"name": "flux_dev_full_text_to_image",
|
|
1249
|
+
"title": "Flux Dev texte vers image complet",
|
|
1030
1250
|
"mediaType": "image",
|
|
1031
1251
|
"mediaSubtype": "webp",
|
|
1032
|
-
"
|
|
1033
|
-
"
|
|
1034
|
-
"
|
|
1035
|
-
"
|
|
1252
|
+
"description": "Générer des images de haute qualité avec la version complète de Flux Dev. Nécessite plus de VRAM et plusieurs fichiers de modèles, mais offre la meilleure capacité de suivi des prompts et la qualité d'image.",
|
|
1253
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
1254
|
+
"tags": ["Texte vers image", "Image"],
|
|
1255
|
+
"models": ["Flux", "BFL"],
|
|
1256
|
+
"date": "2025-03-01",
|
|
1257
|
+
"size": 34177202258,
|
|
1258
|
+
"vram": 23622320128,
|
|
1259
|
+
"usage": 309
|
|
1036
1260
|
},
|
|
1037
1261
|
{
|
|
1038
|
-
"name": "
|
|
1039
|
-
"title": "
|
|
1040
|
-
"description": "Contrôle vidéo multi-conditions avec guidance par pose, profondeur et contours. Taille compacte 5B pour un développement expérimental.",
|
|
1262
|
+
"name": "flux_schnell_full_text_to_image",
|
|
1263
|
+
"title": "Flux Schnell texte vers image complet",
|
|
1041
1264
|
"mediaType": "image",
|
|
1042
1265
|
"mediaSubtype": "webp",
|
|
1043
|
-
"
|
|
1044
|
-
"
|
|
1045
|
-
"
|
|
1046
|
-
"
|
|
1266
|
+
"description": "Générer des images rapidement avec la version complète de Flux Schnell. Utilise la licence Apache2.0, ne nécessite que 4 étapes pour générer des images tout en maintenant une bonne qualité d'image.",
|
|
1267
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
1268
|
+
"tags": ["Texte vers image", "Image"],
|
|
1269
|
+
"models": ["Flux", "BFL"],
|
|
1270
|
+
"date": "2025-03-01",
|
|
1271
|
+
"size": 34155727421,
|
|
1272
|
+
"vram": 34155727421,
|
|
1273
|
+
"usage": 28
|
|
1047
1274
|
},
|
|
1048
1275
|
{
|
|
1049
|
-
"name": "
|
|
1050
|
-
"title": "
|
|
1051
|
-
"description": "Transformer des descriptions textuelles en vidéos de haute qualité. Prend en charge à la fois 480p et 720p avec le modèle VACE-14B.",
|
|
1276
|
+
"name": "flux_fill_inpaint_example",
|
|
1277
|
+
"title": "Flux Inpainting",
|
|
1052
1278
|
"mediaType": "image",
|
|
1053
1279
|
"mediaSubtype": "webp",
|
|
1054
|
-
"
|
|
1055
|
-
"
|
|
1056
|
-
"
|
|
1057
|
-
"
|
|
1058
|
-
"
|
|
1280
|
+
"description": "Combler les parties manquantes des images en utilisant l'inpainting Flux.",
|
|
1281
|
+
"thumbnailVariant": "compareSlider",
|
|
1282
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
1283
|
+
"tags": ["Image vers image", "Inpainting", "Image"],
|
|
1284
|
+
"models": ["Flux", "BFL"],
|
|
1285
|
+
"date": "2025-03-01",
|
|
1286
|
+
"size": 10372346020,
|
|
1287
|
+
"vram": 10372346020,
|
|
1288
|
+
"usage": 437
|
|
1059
1289
|
},
|
|
1060
1290
|
{
|
|
1061
|
-
"name": "
|
|
1062
|
-
"title": "
|
|
1063
|
-
"description": "Créer des vidéos qui correspondent au style et au contenu d'une image de référence. Parfait pour la génération de vidéos cohérentes en style.",
|
|
1291
|
+
"name": "flux_fill_outpaint_example",
|
|
1292
|
+
"title": "Flux Outpainting",
|
|
1064
1293
|
"mediaType": "image",
|
|
1065
1294
|
"mediaSubtype": "webp",
|
|
1066
|
-
"
|
|
1067
|
-
"
|
|
1068
|
-
"
|
|
1069
|
-
"
|
|
1070
|
-
"
|
|
1295
|
+
"description": "Étendre les images au-delà des limites en utilisant l'outpainting Flux.",
|
|
1296
|
+
"thumbnailVariant": "compareSlider",
|
|
1297
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
1298
|
+
"tags": ["Outpainting", "Image", "Image vers image"],
|
|
1299
|
+
"models": ["Flux", "BFL"],
|
|
1300
|
+
"date": "2025-03-01",
|
|
1301
|
+
"size": 10372346020,
|
|
1302
|
+
"vram": 10372346020,
|
|
1303
|
+
"usage": 443
|
|
1071
1304
|
},
|
|
1072
1305
|
{
|
|
1073
|
-
"name": "
|
|
1074
|
-
"title": "
|
|
1075
|
-
"description": "Générer des vidéos en contrôlant les vidéos d'entrée et les images de référence en utilisant Wan VACE.",
|
|
1306
|
+
"name": "flux_canny_model_example",
|
|
1307
|
+
"title": "Modèle Flux Canny",
|
|
1076
1308
|
"mediaType": "image",
|
|
1077
1309
|
"mediaSubtype": "webp",
|
|
1078
|
-
"
|
|
1079
|
-
"
|
|
1080
|
-
"
|
|
1081
|
-
"
|
|
1082
|
-
"
|
|
1083
|
-
"
|
|
1310
|
+
"description": "Générer des images guidées par la détection de contours en utilisant Flux Canny.",
|
|
1311
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1312
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
1313
|
+
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
1314
|
+
"models": ["Flux", "BFL"],
|
|
1315
|
+
"date": "2025-03-01",
|
|
1316
|
+
"size": 34177202258,
|
|
1317
|
+
"vram": 34177202258,
|
|
1318
|
+
"usage": 109
|
|
1084
1319
|
},
|
|
1085
1320
|
{
|
|
1086
|
-
"name": "
|
|
1087
|
-
"title": "
|
|
1088
|
-
"description": "Générer des vidéos étendues en agrandissant la taille de la vidéo en utilisant l'outpainting Wan VACE.",
|
|
1321
|
+
"name": "flux_depth_lora_example",
|
|
1322
|
+
"title": "Flux Depth LoRA",
|
|
1089
1323
|
"mediaType": "image",
|
|
1090
1324
|
"mediaSubtype": "webp",
|
|
1091
|
-
"
|
|
1092
|
-
"
|
|
1093
|
-
"
|
|
1094
|
-
"
|
|
1095
|
-
"
|
|
1096
|
-
"
|
|
1325
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant Flux LoRA.",
|
|
1326
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1327
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
1328
|
+
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
1329
|
+
"models": ["Flux", "BFL"],
|
|
1330
|
+
"date": "2025-03-01",
|
|
1331
|
+
"size": 35412005356,
|
|
1332
|
+
"vram": 35412005356,
|
|
1333
|
+
"usage": 223
|
|
1097
1334
|
},
|
|
1098
1335
|
{
|
|
1099
|
-
"name": "
|
|
1100
|
-
"title": "
|
|
1101
|
-
"description": "Générer des transitions vidéo fluides en définissant les images de début et de fin. Prend en charge les séquences d'images clés personnalisées.",
|
|
1336
|
+
"name": "flux_redux_model_example",
|
|
1337
|
+
"title": "Modèle Flux Redux",
|
|
1102
1338
|
"mediaType": "image",
|
|
1103
1339
|
"mediaSubtype": "webp",
|
|
1104
|
-
"
|
|
1105
|
-
"
|
|
1106
|
-
"
|
|
1107
|
-
"
|
|
1108
|
-
"
|
|
1340
|
+
"description": "Générer des images en transférant le style à partir d'images de référence en utilisant Flux Redux.",
|
|
1341
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
1342
|
+
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
1343
|
+
"models": ["Flux", "BFL"],
|
|
1344
|
+
"date": "2025-03-01",
|
|
1345
|
+
"size": 35154307318,
|
|
1346
|
+
"vram": 35154307318,
|
|
1347
|
+
"usage": 226
|
|
1109
1348
|
},
|
|
1110
1349
|
{
|
|
1111
|
-
"name": "
|
|
1112
|
-
"title": "
|
|
1113
|
-
"description": "Éditer des régions spécifiques dans les vidéos tout en préservant le contenu environnant. Idéal pour la suppression ou le remplacement d'objets.",
|
|
1350
|
+
"name": "image_omnigen2_t2i",
|
|
1351
|
+
"title": "OmniGen2 Texte vers Image",
|
|
1114
1352
|
"mediaType": "image",
|
|
1115
1353
|
"mediaSubtype": "webp",
|
|
1116
|
-
"
|
|
1117
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
1118
|
-
"tags": ["
|
|
1119
|
-
"models": ["
|
|
1120
|
-
"date": "2025-
|
|
1121
|
-
"size":
|
|
1354
|
+
"description": "Générer des images de haute qualité à partir de prompts textuels en utilisant le modèle multimodal unifié 7B d'OmniGen2 avec une architecture à double chemin.",
|
|
1355
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
1356
|
+
"tags": ["Texte vers image", "Image"],
|
|
1357
|
+
"models": ["OmniGen"],
|
|
1358
|
+
"date": "2025-06-30",
|
|
1359
|
+
"size": 15784004813,
|
|
1360
|
+
"vram": 15784004813,
|
|
1361
|
+
"usage": 165
|
|
1122
1362
|
},
|
|
1123
1363
|
{
|
|
1124
|
-
"name": "
|
|
1125
|
-
"title": "
|
|
1126
|
-
"description": "Générez des vidéos à partir de texte avec support de canal alpha pour des arrière-plans transparents et objets semi-transparents.",
|
|
1364
|
+
"name": "image_omnigen2_image_edit",
|
|
1365
|
+
"title": "Édition d'Image OmniGen2",
|
|
1127
1366
|
"mediaType": "image",
|
|
1128
1367
|
"mediaSubtype": "webp",
|
|
1129
|
-
"
|
|
1130
|
-
"
|
|
1131
|
-
"
|
|
1132
|
-
"
|
|
1368
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1369
|
+
"description": "Éditer des images avec des instructions en langage naturel en utilisant les capacités avancées d'édition d'images d'OmniGen2 et le support de rendu de texte.",
|
|
1370
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
1371
|
+
"tags": ["Éd. image", "Image"],
|
|
1372
|
+
"models": ["OmniGen"],
|
|
1373
|
+
"date": "2025-06-30",
|
|
1374
|
+
"size": 15784004813,
|
|
1375
|
+
"vram": 15784004813,
|
|
1376
|
+
"usage": 145
|
|
1133
1377
|
},
|
|
1134
1378
|
{
|
|
1135
|
-
"name": "
|
|
1136
|
-
"title": "
|
|
1137
|
-
"description": "Génération de vidéo contrôlée par trajectoire.",
|
|
1379
|
+
"name": "hidream_i1_dev",
|
|
1380
|
+
"title": "HiDream I1 Dev",
|
|
1138
1381
|
"mediaType": "image",
|
|
1139
1382
|
"mediaSubtype": "webp",
|
|
1140
|
-
"
|
|
1141
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
1142
|
-
"tags": ["
|
|
1143
|
-
"models": ["
|
|
1144
|
-
"date": "2025-
|
|
1145
|
-
"size":
|
|
1383
|
+
"description": "Générer des images avec HiDream I1 Dev - Version équilibrée avec 28 étapes d'inférence, adaptée au matériel de gamme moyenne.",
|
|
1384
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
1385
|
+
"tags": ["Texte vers image", "Image"],
|
|
1386
|
+
"models": ["HiDream"],
|
|
1387
|
+
"date": "2025-04-17",
|
|
1388
|
+
"size": 33318208799,
|
|
1389
|
+
"vram": 33318208799,
|
|
1390
|
+
"usage": 92
|
|
1146
1391
|
},
|
|
1147
1392
|
{
|
|
1148
|
-
"name": "
|
|
1149
|
-
"title": "
|
|
1150
|
-
"description": "Générer des vidéos dynamiques avec des mouvements de caméra cinématographiques en utilisant le modèle Wan 2.1 Fun Camera 1.3B.",
|
|
1393
|
+
"name": "hidream_i1_fast",
|
|
1394
|
+
"title": "HiDream I1 Fast",
|
|
1151
1395
|
"mediaType": "image",
|
|
1152
1396
|
"mediaSubtype": "webp",
|
|
1153
|
-
"
|
|
1154
|
-
"
|
|
1155
|
-
"
|
|
1156
|
-
"
|
|
1157
|
-
"
|
|
1397
|
+
"description": "Générer des images rapidement avec HiDream I1 Fast - Version légère avec 16 étapes d'inférence, idéale pour des aperçus rapides sur du matériel d'entrée de gamme.",
|
|
1398
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
1399
|
+
"tags": ["Texte vers image", "Image"],
|
|
1400
|
+
"models": ["HiDream"],
|
|
1401
|
+
"date": "2025-04-17",
|
|
1402
|
+
"size": 24234352968,
|
|
1403
|
+
"vram": 24234352968,
|
|
1404
|
+
"usage": 41
|
|
1158
1405
|
},
|
|
1159
1406
|
{
|
|
1160
|
-
"name": "
|
|
1161
|
-
"title": "
|
|
1162
|
-
"description": "Générer des vidéos de haute qualité avec un contrôle avancé de la caméra en utilisant le modèle 14B complet",
|
|
1407
|
+
"name": "hidream_i1_full",
|
|
1408
|
+
"title": "HiDream I1 Full",
|
|
1163
1409
|
"mediaType": "image",
|
|
1164
1410
|
"mediaSubtype": "webp",
|
|
1165
|
-
"
|
|
1166
|
-
"
|
|
1167
|
-
"
|
|
1168
|
-
"
|
|
1169
|
-
"
|
|
1411
|
+
"description": "Générer des images avec HiDream I1 Full - Version complète avec 50 étapes d'inférence pour une sortie de la plus haute qualité.",
|
|
1412
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
1413
|
+
"tags": ["Texte vers image", "Image"],
|
|
1414
|
+
"models": ["HiDream"],
|
|
1415
|
+
"date": "2025-04-17",
|
|
1416
|
+
"size": 24234352968,
|
|
1417
|
+
"vram": 24234352968,
|
|
1418
|
+
"usage": 218
|
|
1170
1419
|
},
|
|
1171
1420
|
{
|
|
1172
|
-
"name": "
|
|
1173
|
-
"title": "
|
|
1174
|
-
"description": "Générer des vidéos à partir de prompts textuels en utilisant Wan 2.1.",
|
|
1421
|
+
"name": "hidream_e1_full",
|
|
1422
|
+
"title": "Édition d'Image HiDream E1",
|
|
1175
1423
|
"mediaType": "image",
|
|
1176
1424
|
"mediaSubtype": "webp",
|
|
1177
|
-
"
|
|
1178
|
-
"
|
|
1179
|
-
"
|
|
1180
|
-
"
|
|
1181
|
-
"
|
|
1425
|
+
"thumbnailVariant": "compareSlider",
|
|
1426
|
+
"description": "Éditer des images avec HiDream E1 - Modèle professionnel d'édition d'images en langage naturel.",
|
|
1427
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
1428
|
+
"tags": ["Éd. image", "Image"],
|
|
1429
|
+
"models": ["HiDream"],
|
|
1430
|
+
"date": "2025-05-01",
|
|
1431
|
+
"size": 34209414513,
|
|
1432
|
+
"vram": 34209414513,
|
|
1433
|
+
"usage": 69
|
|
1182
1434
|
},
|
|
1183
1435
|
{
|
|
1184
|
-
"name": "
|
|
1185
|
-
"title": "
|
|
1186
|
-
"description": "Générer des vidéos à partir d'images en utilisant Wan 2.1.",
|
|
1436
|
+
"name": "sd3.5_simple_example",
|
|
1437
|
+
"title": "SD3.5 Simple",
|
|
1187
1438
|
"mediaType": "image",
|
|
1188
1439
|
"mediaSubtype": "webp",
|
|
1189
|
-
"
|
|
1190
|
-
"
|
|
1191
|
-
"
|
|
1440
|
+
"description": "Générer des images en utilisant SD 3.5.",
|
|
1441
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
|
|
1442
|
+
"tags": ["Texte vers image", "Image"],
|
|
1443
|
+
"models": ["SD3.5", "Stability"],
|
|
1192
1444
|
"date": "2025-03-01",
|
|
1193
|
-
"size":
|
|
1445
|
+
"size": 14935748772,
|
|
1446
|
+
"vram": 14935748772,
|
|
1447
|
+
"usage": 490
|
|
1194
1448
|
},
|
|
1195
1449
|
{
|
|
1196
|
-
"name": "
|
|
1197
|
-
"title": "
|
|
1198
|
-
"description": "Générer des vidéos à partir des images de début et de fin en utilisant l'inpainting Wan 2.1.",
|
|
1450
|
+
"name": "sd3.5_large_canny_controlnet_example",
|
|
1451
|
+
"title": "SD3.5 Large Canny ControlNet",
|
|
1199
1452
|
"mediaType": "image",
|
|
1200
1453
|
"mediaSubtype": "webp",
|
|
1201
|
-
"
|
|
1202
|
-
"
|
|
1203
|
-
"
|
|
1204
|
-
"
|
|
1205
|
-
"
|
|
1454
|
+
"description": "Générer des images guidées par la détection de contours en utilisant SD 3.5 Canny ControlNet.",
|
|
1455
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1456
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
1457
|
+
"tags": ["Image vers image", "Image", "ControlNet"],
|
|
1458
|
+
"models": ["SD3.5", "Stability"],
|
|
1459
|
+
"date": "2025-03-01",
|
|
1460
|
+
"size": 23590107873,
|
|
1461
|
+
"vram": 23590107873,
|
|
1462
|
+
"usage": 113
|
|
1206
1463
|
},
|
|
1207
1464
|
{
|
|
1208
|
-
"name": "
|
|
1209
|
-
"title": "
|
|
1210
|
-
"description": "Générer des vidéos guidées par des contrôles de pose, de profondeur et de contours en utilisant Wan 2.1 ControlNet.",
|
|
1465
|
+
"name": "sd3.5_large_depth",
|
|
1466
|
+
"title": "SD3.5 Large Profondeur",
|
|
1211
1467
|
"mediaType": "image",
|
|
1212
1468
|
"mediaSubtype": "webp",
|
|
1469
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant SD 3.5.",
|
|
1213
1470
|
"thumbnailVariant": "hoverDissolve",
|
|
1214
|
-
"tutorialUrl": "https://
|
|
1215
|
-
"tags": ["
|
|
1216
|
-
"models": ["
|
|
1217
|
-
"date": "2025-
|
|
1218
|
-
"size":
|
|
1471
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
1472
|
+
"tags": ["Image vers image", "Image", "ControlNet"],
|
|
1473
|
+
"models": ["SD3.5", "Stability"],
|
|
1474
|
+
"date": "2025-03-01",
|
|
1475
|
+
"size": 23590107873,
|
|
1476
|
+
"vram": 23590107873,
|
|
1477
|
+
"usage": 95
|
|
1219
1478
|
},
|
|
1220
1479
|
{
|
|
1221
|
-
"name": "
|
|
1222
|
-
"title": "
|
|
1223
|
-
"description": "Générer des vidéos en contrôlant les première et dernière images en utilisant Wan 2.1 FLF2V.",
|
|
1480
|
+
"name": "sd3.5_large_blur",
|
|
1481
|
+
"title": "SD3.5 Large Flou",
|
|
1224
1482
|
"mediaType": "image",
|
|
1225
1483
|
"mediaSubtype": "webp",
|
|
1226
|
-
"
|
|
1227
|
-
"
|
|
1228
|
-
"
|
|
1229
|
-
"
|
|
1230
|
-
"
|
|
1231
|
-
},
|
|
1232
|
-
{
|
|
1233
|
-
"name": "ltxv_text_to_video",
|
|
1234
|
-
"title": "LTXV Texte vers Vidéo",
|
|
1235
|
-
"mediaType": "image",
|
|
1236
|
-
"mediaSubtype": "webp",
|
|
1237
|
-
"description": "Générer des vidéos à partir de prompts textuels.",
|
|
1238
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
1239
|
-
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1240
|
-
"models": ["LTXV"],
|
|
1484
|
+
"description": "Générer des images guidées par des images de référence floues en utilisant SD 3.5.",
|
|
1485
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1486
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
1487
|
+
"tags": ["Image vers image", "Image"],
|
|
1488
|
+
"models": ["SD3.5", "Stability"],
|
|
1241
1489
|
"date": "2025-03-01",
|
|
1242
|
-
"size":
|
|
1490
|
+
"size": 23590107873,
|
|
1491
|
+
"vram": 23590107873,
|
|
1492
|
+
"usage": 38
|
|
1243
1493
|
},
|
|
1244
1494
|
{
|
|
1245
|
-
"name": "
|
|
1246
|
-
"title": "
|
|
1495
|
+
"name": "sdxl_simple_example",
|
|
1496
|
+
"title": "SDXL Simple",
|
|
1247
1497
|
"mediaType": "image",
|
|
1248
1498
|
"mediaSubtype": "webp",
|
|
1249
|
-
"description": "Générer des
|
|
1250
|
-
"tutorialUrl": "https://
|
|
1251
|
-
"tags": ["
|
|
1252
|
-
"models": ["
|
|
1499
|
+
"description": "Générer des images de haute qualité en utilisant SDXL.",
|
|
1500
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
1501
|
+
"tags": ["Texte vers image", "Image"],
|
|
1502
|
+
"models": ["SDXL", "Stability"],
|
|
1253
1503
|
"date": "2025-03-01",
|
|
1254
|
-
"size":
|
|
1504
|
+
"size": 13013750907,
|
|
1505
|
+
"vram": 13013750907,
|
|
1506
|
+
"usage": 278
|
|
1255
1507
|
},
|
|
1256
1508
|
{
|
|
1257
|
-
"name": "
|
|
1258
|
-
"title": "
|
|
1509
|
+
"name": "sdxl_refiner_prompt_example",
|
|
1510
|
+
"title": "SDXL Affineur de Prompt",
|
|
1259
1511
|
"mediaType": "image",
|
|
1260
1512
|
"mediaSubtype": "webp",
|
|
1261
|
-
"description": "
|
|
1262
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/
|
|
1263
|
-
"tags": ["Texte vers
|
|
1264
|
-
"models": ["
|
|
1513
|
+
"description": "Améliorer les images SDXL en utilisant des modèles de raffinement.",
|
|
1514
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
1515
|
+
"tags": ["Texte vers image", "Image"],
|
|
1516
|
+
"models": ["SDXL", "Stability"],
|
|
1265
1517
|
"date": "2025-03-01",
|
|
1266
|
-
"size":
|
|
1518
|
+
"size": 13013750907,
|
|
1519
|
+
"vram": 13013750907,
|
|
1520
|
+
"usage": 59
|
|
1267
1521
|
},
|
|
1268
1522
|
{
|
|
1269
|
-
"name": "
|
|
1270
|
-
"title": "
|
|
1523
|
+
"name": "sdxl_revision_text_prompts",
|
|
1524
|
+
"title": "SDXL Révision Prompts Texte",
|
|
1271
1525
|
"mediaType": "image",
|
|
1272
1526
|
"mediaSubtype": "webp",
|
|
1273
|
-
"description": "Générer des
|
|
1274
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/
|
|
1275
|
-
"tags": ["Texte vers
|
|
1276
|
-
"models": ["
|
|
1527
|
+
"description": "Générer des images en transférant des concepts à partir d'images de référence en utilisant SDXL Revision.",
|
|
1528
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
1529
|
+
"tags": ["Texte vers image", "Image"],
|
|
1530
|
+
"models": ["SDXL", "Stability"],
|
|
1277
1531
|
"date": "2025-03-01",
|
|
1278
|
-
"size":
|
|
1532
|
+
"size": 10630044058,
|
|
1533
|
+
"vram": 10630044058,
|
|
1534
|
+
"usage": 67
|
|
1279
1535
|
},
|
|
1280
1536
|
{
|
|
1281
|
-
"name": "
|
|
1282
|
-
"title": "
|
|
1537
|
+
"name": "sdxlturbo_example",
|
|
1538
|
+
"title": "SDXL Turbo",
|
|
1283
1539
|
"mediaType": "image",
|
|
1284
1540
|
"mediaSubtype": "webp",
|
|
1285
|
-
"description": "Générer des
|
|
1286
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/
|
|
1287
|
-
"tags": ["
|
|
1288
|
-
"models": ["
|
|
1541
|
+
"description": "Générer des images en une seule étape en utilisant SDXL Turbo.",
|
|
1542
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
|
|
1543
|
+
"tags": ["Texte vers image", "Image"],
|
|
1544
|
+
"models": ["SDXL", "Stability"],
|
|
1289
1545
|
"date": "2025-03-01",
|
|
1290
|
-
"size":
|
|
1546
|
+
"size": 6936372183,
|
|
1547
|
+
"vram": 6936372183,
|
|
1548
|
+
"usage": 452
|
|
1291
1549
|
},
|
|
1292
1550
|
{
|
|
1293
|
-
"name": "
|
|
1294
|
-
"title": "
|
|
1551
|
+
"name": "image_lotus_depth_v1_1",
|
|
1552
|
+
"title": "Lotus Profondeur",
|
|
1295
1553
|
"mediaType": "image",
|
|
1296
1554
|
"mediaSubtype": "webp",
|
|
1297
|
-
"
|
|
1298
|
-
"
|
|
1299
|
-
"tags": ["Texte vers
|
|
1300
|
-
"models": ["
|
|
1301
|
-
"date": "2025-
|
|
1302
|
-
"size":
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
},
|
|
1306
|
-
{
|
|
1307
|
-
"moduleName": "default",
|
|
1308
|
-
"type": "audio",
|
|
1309
|
-
"category": "GENERATION TYPE",
|
|
1310
|
-
"icon": "icon-[lucide--volume-2]",
|
|
1311
|
-
"title": "Audio",
|
|
1312
|
-
"templates": [
|
|
1313
|
-
{
|
|
1314
|
-
"name": "audio_stable_audio_example",
|
|
1315
|
-
"title": "Audio Stable",
|
|
1316
|
-
"mediaType": "audio",
|
|
1317
|
-
"mediaSubtype": "mp3",
|
|
1318
|
-
"description": "Générer de l'audio à partir de prompts textuels en utilisant Stable Audio.",
|
|
1319
|
-
"tags": ["Texte vers audio", "Audio"],
|
|
1320
|
-
"models": ["Stable Audio", "Stability"],
|
|
1321
|
-
"date": "2025-03-01",
|
|
1322
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/",
|
|
1323
|
-
"size": 5744518758
|
|
1324
|
-
},
|
|
1325
|
-
{
|
|
1326
|
-
"name": "audio_ace_step_1_t2a_instrumentals",
|
|
1327
|
-
"title": "ACE-Step v1 Texte vers Musique Instrumentale",
|
|
1328
|
-
"mediaType": "audio",
|
|
1329
|
-
"mediaSubtype": "mp3",
|
|
1330
|
-
"description": "Générer de la musique instrumentale à partir de prompts textuels en utilisant ACE-Step v1.",
|
|
1331
|
-
"tags": ["Texte vers audio", "Audio"],
|
|
1332
|
-
"models": ["ACE-Step"],
|
|
1333
|
-
"date": "2025-03-01",
|
|
1334
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1335
|
-
"size": 7698728878
|
|
1336
|
-
},
|
|
1337
|
-
{
|
|
1338
|
-
"name": "audio_ace_step_1_t2a_song",
|
|
1339
|
-
"title": "ACE Step v1 Texte vers Chanson",
|
|
1340
|
-
"mediaType": "audio",
|
|
1341
|
-
"mediaSubtype": "mp3",
|
|
1342
|
-
"description": "Générer des chansons avec des voix à partir de prompts textuels en utilisant ACE-Step v1, prenant en charge la multilingue et la personnalisation du style.",
|
|
1343
|
-
"tags": ["Texte vers audio", "Audio"],
|
|
1344
|
-
"models": ["ACE-Step"],
|
|
1345
|
-
"date": "2025-03-01",
|
|
1346
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1347
|
-
"size": 7698728878
|
|
1348
|
-
},
|
|
1349
|
-
{
|
|
1350
|
-
"name": "audio_ace_step_1_m2m_editing",
|
|
1351
|
-
"title": "ACE Step v1 Édition M2M",
|
|
1352
|
-
"mediaType": "audio",
|
|
1353
|
-
"mediaSubtype": "mp3",
|
|
1354
|
-
"description": "Éditer des chansons existantes pour changer le style et les paroles en utilisant ACE-Step v1 M2M.",
|
|
1355
|
-
"tags": ["Édition audio", "Audio"],
|
|
1356
|
-
"models": ["ACE-Step"],
|
|
1357
|
-
"date": "2025-03-01",
|
|
1358
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1359
|
-
"size": 7698728878
|
|
1555
|
+
"thumbnailVariant": "compareSlider",
|
|
1556
|
+
"description": "Exécuter Lotus Depth dans ComfyUI pour une estimation de profondeur monoculaire efficace zero-shot avec une haute rétention de détails.",
|
|
1557
|
+
"tags": ["Image", "Texte vers image"],
|
|
1558
|
+
"models": ["SD1.5", "Stability"],
|
|
1559
|
+
"date": "2025-05-21",
|
|
1560
|
+
"size": 2072321720,
|
|
1561
|
+
"vram": 2072321720,
|
|
1562
|
+
"usage": 79
|
|
1360
1563
|
}
|
|
1361
1564
|
]
|
|
1362
1565
|
},
|
|
1363
1566
|
{
|
|
1364
1567
|
"moduleName": "default",
|
|
1365
|
-
"type": "
|
|
1366
|
-
"category": "
|
|
1367
|
-
"icon": "icon-[lucide--
|
|
1368
|
-
"title": "
|
|
1568
|
+
"type": "video",
|
|
1569
|
+
"category": "Type de génération",
|
|
1570
|
+
"icon": "icon-[lucide--film]",
|
|
1571
|
+
"title": "Vidéo",
|
|
1369
1572
|
"templates": [
|
|
1370
1573
|
{
|
|
1371
|
-
"name": "
|
|
1372
|
-
"title": "
|
|
1574
|
+
"name": "video_wan2_2_14B_t2v",
|
|
1575
|
+
"title": "Wan 2.2 14B Texte vers Vidéo",
|
|
1576
|
+
"description": "Générer des vidéos de haute qualité à partir de prompts textuels avec un contrôle esthétique cinématographique et une génération de mouvement dynamique en utilisant Wan 2.2.",
|
|
1373
1577
|
"mediaType": "image",
|
|
1374
1578
|
"mediaSubtype": "webp",
|
|
1375
|
-
"
|
|
1376
|
-
"tags": ["
|
|
1377
|
-
"models": ["
|
|
1378
|
-
"date": "2025-
|
|
1379
|
-
"
|
|
1380
|
-
"
|
|
1579
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
1580
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1581
|
+
"models": ["Wan2.2", "Wan"],
|
|
1582
|
+
"date": "2025-07-29",
|
|
1583
|
+
"size": 38031935406,
|
|
1584
|
+
"vram": 38031935406,
|
|
1585
|
+
"usage": 2369
|
|
1381
1586
|
},
|
|
1382
1587
|
{
|
|
1383
|
-
"name": "
|
|
1384
|
-
"title": "
|
|
1588
|
+
"name": "video_wan2_2_14B_i2v",
|
|
1589
|
+
"title": "Wan 2.2 14B Image vers Vidéo",
|
|
1590
|
+
"description": "Transformer des images statiques en vidéos dynamiques avec un contrôle précis du mouvement et une préservation du style en utilisant Wan 2.2.",
|
|
1385
1591
|
"mediaType": "image",
|
|
1386
1592
|
"mediaSubtype": "webp",
|
|
1387
|
-
"
|
|
1388
|
-
"
|
|
1389
|
-
"
|
|
1390
|
-
"
|
|
1391
|
-
"
|
|
1392
|
-
"
|
|
1393
|
-
"
|
|
1593
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1594
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
1595
|
+
"tags": ["Image vers vidéo", "Vidéo"],
|
|
1596
|
+
"models": ["Wan2.2", "Wan"],
|
|
1597
|
+
"date": "2025-07-29",
|
|
1598
|
+
"size": 38031935406,
|
|
1599
|
+
"vram": 38031935406,
|
|
1600
|
+
"usage": 10317
|
|
1394
1601
|
},
|
|
1395
1602
|
{
|
|
1396
|
-
"name": "
|
|
1397
|
-
"title": "
|
|
1603
|
+
"name": "video_wan2_2_14B_flf2v",
|
|
1604
|
+
"title": "Wan 2.2 14B Première-Dernière Image vers Vidéo",
|
|
1605
|
+
"description": "Générer des transitions vidéo fluides en définissant les images de début et de fin.",
|
|
1398
1606
|
"mediaType": "image",
|
|
1399
1607
|
"mediaSubtype": "webp",
|
|
1400
|
-
"description": "Générer des modèles 3D à partir de vues multiples en utilisant Hunyuan3D 2.0 MV.",
|
|
1401
|
-
"tags": ["3D", "Image vers 3D"],
|
|
1402
|
-
"models": ["Hunyuan3D", "Tencent"],
|
|
1403
|
-
"date": "2025-03-01",
|
|
1404
|
-
"tutorialUrl": "",
|
|
1405
1608
|
"thumbnailVariant": "hoverDissolve",
|
|
1406
|
-
"
|
|
1609
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
1610
|
+
"tags": ["FLF2V", "Vidéo"],
|
|
1611
|
+
"models": ["Wan2.2", "Wan"],
|
|
1612
|
+
"date": "2025-08-02",
|
|
1613
|
+
"size": 38031935406,
|
|
1614
|
+
"vram": 38031935406,
|
|
1615
|
+
"usage": 1585
|
|
1407
1616
|
},
|
|
1408
1617
|
{
|
|
1409
|
-
"name": "
|
|
1410
|
-
"title": "
|
|
1618
|
+
"name": "video_wan2_2_14B_animate",
|
|
1619
|
+
"title": "Wan2.2 Animate animation et remplacement de personnages",
|
|
1620
|
+
"description": "Cadre unifié d'animation et de remplacement de personnages avec réplication précise des mouvements et expressions。",
|
|
1411
1621
|
"mediaType": "image",
|
|
1412
1622
|
"mediaSubtype": "webp",
|
|
1413
|
-
"
|
|
1414
|
-
"tags": ["Image vers
|
|
1415
|
-
"models": ["
|
|
1416
|
-
"date": "2025-
|
|
1417
|
-
"
|
|
1418
|
-
"
|
|
1419
|
-
"
|
|
1420
|
-
}
|
|
1421
|
-
]
|
|
1422
|
-
},
|
|
1423
|
-
{
|
|
1424
|
-
"moduleName": "default",
|
|
1425
|
-
"type": "image",
|
|
1426
|
-
"category": "CLOSED SOURCE MODELS",
|
|
1427
|
-
"icon": "icon-[lucide--hand-coins]",
|
|
1428
|
-
"title": "Image API",
|
|
1429
|
-
"templates": [
|
|
1623
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-animate",
|
|
1624
|
+
"tags": ["Vidéo", "Image vers vidéo"],
|
|
1625
|
+
"models": ["Wan2.2", "Wan"],
|
|
1626
|
+
"date": "2025-09-22",
|
|
1627
|
+
"size": 27417997476,
|
|
1628
|
+
"vram": 27417997476,
|
|
1629
|
+
"usage": 2141
|
|
1630
|
+
},
|
|
1430
1631
|
{
|
|
1431
|
-
"name": "
|
|
1432
|
-
"title": "
|
|
1433
|
-
"description": "
|
|
1632
|
+
"name": "video_hunyuan_video_1.5_720p_t2v",
|
|
1633
|
+
"title": "Hunyuan Video 1.5 Texte en vidéo",
|
|
1634
|
+
"description": "Générez des vidéos 720p de haute qualité à partir de prompts textuels, avec un contrôle cinématographique de la caméra, des expressions émotionnelles et une simulation physique. Prend en charge plusieurs styles dont réaliste, anime et rendu 3D du texte.",
|
|
1434
1635
|
"mediaType": "image",
|
|
1435
1636
|
"mediaSubtype": "webp",
|
|
1436
|
-
"
|
|
1437
|
-
"
|
|
1438
|
-
"models": ["Gemini-3-pro-image-preview", "nano-banana", "Google"],
|
|
1637
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1638
|
+
"models": ["Hunyuan Video"],
|
|
1439
1639
|
"date": "2025-11-21",
|
|
1440
|
-
"
|
|
1441
|
-
"
|
|
1442
|
-
"
|
|
1640
|
+
"size": 45384919416,
|
|
1641
|
+
"vram": 45384919416,
|
|
1642
|
+
"usage": 451
|
|
1443
1643
|
},
|
|
1444
1644
|
{
|
|
1445
|
-
"name": "
|
|
1446
|
-
"title": "
|
|
1447
|
-
"description": "
|
|
1645
|
+
"name": "video_hunyuan_video_1.5_720p_i2v",
|
|
1646
|
+
"title": "Hunyuan Video 1.5 Image vers Vidéo",
|
|
1647
|
+
"description": "Animez des images fixes en vidéos dynamiques avec des mouvements précis et un contrôle de caméra. Préserve la cohérence visuelle tout en donnant vie aux photos et illustrations grâce à des mouvements fluides et naturels.",
|
|
1448
1648
|
"mediaType": "image",
|
|
1449
1649
|
"mediaSubtype": "webp",
|
|
1450
|
-
"tags": ["
|
|
1451
|
-
"models": ["
|
|
1650
|
+
"tags": ["Image vers vidéo", "Vidéo"],
|
|
1651
|
+
"models": ["Hunyuan Video"],
|
|
1452
1652
|
"date": "2025-11-21",
|
|
1453
|
-
"
|
|
1454
|
-
"
|
|
1455
|
-
"
|
|
1653
|
+
"size": 45384919416,
|
|
1654
|
+
"vram": 45384919416,
|
|
1655
|
+
"usage": 2150
|
|
1456
1656
|
},
|
|
1457
1657
|
{
|
|
1458
|
-
"name": "
|
|
1459
|
-
"title": "
|
|
1460
|
-
"description": "
|
|
1658
|
+
"name": "video_kandinsky5_i2v",
|
|
1659
|
+
"title": "Kandinsky 5.0 Video Lite Image en Vidéo",
|
|
1660
|
+
"description": "Un modèle léger 2B qui génère des vidéos de haute qualité à partir de prompts en anglais et en russe.",
|
|
1461
1661
|
"mediaType": "image",
|
|
1462
1662
|
"mediaSubtype": "webp",
|
|
1463
|
-
"tags": ["
|
|
1464
|
-
"models": ["
|
|
1465
|
-
"date": "2025-09
|
|
1466
|
-
"
|
|
1467
|
-
"
|
|
1468
|
-
"
|
|
1663
|
+
"tags": ["Image vers vidéo", "Vidéo"],
|
|
1664
|
+
"models": ["Kandinsky"],
|
|
1665
|
+
"date": "2025-12-09",
|
|
1666
|
+
"size": 14710262988,
|
|
1667
|
+
"vram": 14710262988,
|
|
1668
|
+
"usage": 1243
|
|
1469
1669
|
},
|
|
1470
1670
|
{
|
|
1471
|
-
"name": "
|
|
1472
|
-
"title": "
|
|
1473
|
-
"description": "
|
|
1671
|
+
"name": "video_kandinsky5_t2v",
|
|
1672
|
+
"title": "Kandinsky 5.0 Video Lite Texte en Vidéo",
|
|
1673
|
+
"description": "Un modèle léger 2B qui génère des vidéos de haute qualité à partir de prompts en anglais et en russe.",
|
|
1474
1674
|
"mediaType": "image",
|
|
1475
1675
|
"mediaSubtype": "webp",
|
|
1476
|
-
"tags": ["
|
|
1477
|
-
"models": ["
|
|
1478
|
-
"date": "2025-
|
|
1479
|
-
"
|
|
1480
|
-
"
|
|
1481
|
-
"
|
|
1676
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1677
|
+
"models": ["Kandinsky"],
|
|
1678
|
+
"date": "2025-12-09",
|
|
1679
|
+
"size": 14710262988,
|
|
1680
|
+
"vram": 14710262988,
|
|
1681
|
+
"usage": 556
|
|
1482
1682
|
},
|
|
1483
1683
|
{
|
|
1484
|
-
"name": "
|
|
1485
|
-
"title": "
|
|
1486
|
-
"description": "
|
|
1684
|
+
"name": "api_kling2_6_i2v",
|
|
1685
|
+
"title": "Kling2.6 : Transformer des images en vidéo avec audio",
|
|
1686
|
+
"description": "Transformez des images statiques en vidéos dynamiques avec dialogues, chants, effets et ambiance sonore synchronisés",
|
|
1487
1687
|
"mediaType": "image",
|
|
1488
1688
|
"mediaSubtype": "webp",
|
|
1489
|
-
"tags": ["
|
|
1490
|
-
"models": ["
|
|
1491
|
-
"date": "2025-
|
|
1492
|
-
"
|
|
1689
|
+
"tags": ["Image vers vidéo", "Vidéo", "API", "Audio"],
|
|
1690
|
+
"models": ["Kling"],
|
|
1691
|
+
"date": "2025-12-22",
|
|
1692
|
+
"openSource": false,
|
|
1493
1693
|
"size": 0,
|
|
1494
1694
|
"vram": 0
|
|
1495
1695
|
},
|
|
1496
1696
|
{
|
|
1497
|
-
"name": "
|
|
1498
|
-
"title": "
|
|
1499
|
-
"description": "
|
|
1697
|
+
"name": "api_kling2_6_t2v",
|
|
1698
|
+
"title": "Kling2.6 : Générer des vidéos narratives avec audio depuis le texte",
|
|
1699
|
+
"description": "Créez des vidéos racontant vos histoires avec dialogues, musique, effets et ambiance sonore",
|
|
1500
1700
|
"mediaType": "image",
|
|
1501
1701
|
"mediaSubtype": "webp",
|
|
1502
|
-
"
|
|
1503
|
-
"
|
|
1504
|
-
"
|
|
1505
|
-
"
|
|
1506
|
-
"OpenSource": false,
|
|
1702
|
+
"tags": ["Texte vers vidéo", "Vidéo", "API", "Audio"],
|
|
1703
|
+
"models": ["Kling"],
|
|
1704
|
+
"date": "2025-12-22",
|
|
1705
|
+
"openSource": false,
|
|
1507
1706
|
"size": 0,
|
|
1508
1707
|
"vram": 0
|
|
1509
1708
|
},
|
|
1510
1709
|
{
|
|
1511
|
-
"name": "
|
|
1512
|
-
"title": "
|
|
1513
|
-
"description": "
|
|
1710
|
+
"name": "api_openai_sora_video",
|
|
1711
|
+
"title": "Sora 2: Texte et Image vers Vidéo",
|
|
1712
|
+
"description": "Génération vidéo Sora-2 et Sora-2 Pro d'OpenAI avec audio synchronisé.",
|
|
1514
1713
|
"mediaType": "image",
|
|
1515
1714
|
"mediaSubtype": "webp",
|
|
1516
|
-
"
|
|
1517
|
-
"
|
|
1518
|
-
"
|
|
1519
|
-
"
|
|
1520
|
-
"date": "2025-05-29",
|
|
1521
|
-
"OpenSource": false,
|
|
1715
|
+
"tags": ["Image vers vidéo", "Texte vers vidéo", "API"],
|
|
1716
|
+
"models": ["OpenAI"],
|
|
1717
|
+
"date": "2025-10-08",
|
|
1718
|
+
"openSource": false,
|
|
1522
1719
|
"size": 0,
|
|
1523
|
-
"vram": 0
|
|
1720
|
+
"vram": 0,
|
|
1721
|
+
"usage": 765
|
|
1524
1722
|
},
|
|
1525
1723
|
{
|
|
1526
|
-
"name": "
|
|
1527
|
-
"title": "
|
|
1528
|
-
"description": "
|
|
1724
|
+
"name": "api_veo3",
|
|
1725
|
+
"title": "Veo3: Image vers Vidéo",
|
|
1726
|
+
"description": "Generate high-quality 8-second videos from text prompts or images using Google's advanced Veo 3 API. Features audio generation, prompt enhancement, and dual model options for speed or quality.",
|
|
1529
1727
|
"mediaType": "image",
|
|
1530
1728
|
"mediaSubtype": "webp",
|
|
1531
|
-
"
|
|
1532
|
-
"
|
|
1533
|
-
"
|
|
1534
|
-
"
|
|
1535
|
-
"
|
|
1536
|
-
"OpenSource": false,
|
|
1729
|
+
"tags": ["Image vers vidéo", "Texte vers vidéo", "API"],
|
|
1730
|
+
"models": ["Veo", "Google"],
|
|
1731
|
+
"date": "2025-03-01",
|
|
1732
|
+
"tutorialUrl": "",
|
|
1733
|
+
"openSource": false,
|
|
1537
1734
|
"size": 0,
|
|
1538
|
-
"vram": 0
|
|
1735
|
+
"vram": 0,
|
|
1736
|
+
"usage": 491
|
|
1539
1737
|
},
|
|
1540
1738
|
{
|
|
1541
|
-
"name": "
|
|
1542
|
-
"title": "
|
|
1543
|
-
"description": "
|
|
1739
|
+
"name": "api_topaz_video_enhance",
|
|
1740
|
+
"title": "Topaz Amélioration vidéo",
|
|
1741
|
+
"description": "Améliorez les vidéos avec Topaz AI. Prend en charge l’upscaling de résolution avec le modèle Starlight (Astra) Fast et l’interpolation d’images avec le modèle apo-8.",
|
|
1544
1742
|
"mediaType": "image",
|
|
1545
1743
|
"mediaSubtype": "webp",
|
|
1546
1744
|
"thumbnailVariant": "compareSlider",
|
|
1547
|
-
"
|
|
1548
|
-
"
|
|
1549
|
-
"
|
|
1550
|
-
"
|
|
1551
|
-
"OpenSource": false,
|
|
1745
|
+
"tags": ["Vidéo", "API", "Amélioration"],
|
|
1746
|
+
"models": ["Topaz"],
|
|
1747
|
+
"date": "2025-11-25",
|
|
1748
|
+
"openSource": false,
|
|
1552
1749
|
"size": 0,
|
|
1553
|
-
"vram": 0
|
|
1750
|
+
"vram": 0,
|
|
1751
|
+
"usage": 471
|
|
1554
1752
|
},
|
|
1555
1753
|
{
|
|
1556
|
-
"name": "
|
|
1557
|
-
"title": "
|
|
1558
|
-
"description": "
|
|
1754
|
+
"name": "api_veo2_i2v",
|
|
1755
|
+
"title": "Veo2: Image vers Vidéo",
|
|
1756
|
+
"description": "Generate videos from images using Google Veo2 API.",
|
|
1559
1757
|
"mediaType": "image",
|
|
1560
1758
|
"mediaSubtype": "webp",
|
|
1561
|
-
"tags": ["
|
|
1562
|
-
"models": ["
|
|
1563
|
-
"date": "2025-
|
|
1564
|
-
"
|
|
1759
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1760
|
+
"models": ["Veo", "Google"],
|
|
1761
|
+
"date": "2025-03-01",
|
|
1762
|
+
"tutorialUrl": "",
|
|
1763
|
+
"openSource": false,
|
|
1565
1764
|
"size": 0,
|
|
1566
|
-
"vram": 0
|
|
1765
|
+
"vram": 0,
|
|
1766
|
+
"usage": 61
|
|
1567
1767
|
},
|
|
1568
1768
|
{
|
|
1569
|
-
"name": "
|
|
1570
|
-
"title": "
|
|
1571
|
-
"description": "
|
|
1769
|
+
"name": "api_wan2_6_t2v",
|
|
1770
|
+
"title": "Wan2.5: Texte vers Vidéo",
|
|
1771
|
+
"description": "Générez des vidéos avec audio synchronisé, mouvement amélioré et qualité supérieure.",
|
|
1572
1772
|
"mediaType": "image",
|
|
1573
1773
|
"mediaSubtype": "webp",
|
|
1574
|
-
"
|
|
1575
|
-
"
|
|
1576
|
-
"
|
|
1577
|
-
"
|
|
1578
|
-
"
|
|
1774
|
+
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
1775
|
+
"models": ["Wan2.6", "Wan"],
|
|
1776
|
+
"date": "2025-12-20",
|
|
1777
|
+
"tutorialUrl": "",
|
|
1778
|
+
"openSource": false,
|
|
1579
1779
|
"size": 0,
|
|
1580
1780
|
"vram": 0
|
|
1581
1781
|
},
|
|
1582
1782
|
{
|
|
1583
|
-
"name": "
|
|
1584
|
-
"title": "
|
|
1585
|
-
"description": "
|
|
1783
|
+
"name": "api_wan2_6_i2v",
|
|
1784
|
+
"title": "Wan2.6: Image to Video",
|
|
1785
|
+
"description": "Transform images into high-quality videos with enhanced image quality, smoother motion, 1080P resolution support, and natural movement generation for professional results.",
|
|
1586
1786
|
"mediaType": "image",
|
|
1587
1787
|
"mediaSubtype": "webp",
|
|
1588
|
-
"
|
|
1589
|
-
"
|
|
1590
|
-
"
|
|
1591
|
-
"
|
|
1592
|
-
"
|
|
1788
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1789
|
+
"models": ["Wan2.6", "Wan"],
|
|
1790
|
+
"date": "2025-12-20",
|
|
1791
|
+
"tutorialUrl": "",
|
|
1792
|
+
"openSource": false,
|
|
1593
1793
|
"size": 0,
|
|
1594
1794
|
"vram": 0
|
|
1595
1795
|
},
|
|
1596
1796
|
{
|
|
1597
|
-
"name": "
|
|
1598
|
-
"title": "
|
|
1599
|
-
"description": "
|
|
1797
|
+
"name": "api_wan_text_to_video",
|
|
1798
|
+
"title": "Wan2.5: Text to Video",
|
|
1799
|
+
"description": "Generate videos with synchronized audio, enhanced motion, and superior quality.",
|
|
1600
1800
|
"mediaType": "image",
|
|
1601
1801
|
"mediaSubtype": "webp",
|
|
1602
|
-
"
|
|
1603
|
-
"
|
|
1604
|
-
"
|
|
1605
|
-
"
|
|
1606
|
-
"
|
|
1802
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1803
|
+
"models": ["Wan2.5", "Wan"],
|
|
1804
|
+
"date": "2025-09-27",
|
|
1805
|
+
"tutorialUrl": "",
|
|
1806
|
+
"openSource": false,
|
|
1607
1807
|
"size": 0,
|
|
1608
|
-
"vram": 0
|
|
1808
|
+
"vram": 0,
|
|
1809
|
+
"usage": 167
|
|
1609
1810
|
},
|
|
1610
1811
|
{
|
|
1611
|
-
"name": "
|
|
1612
|
-
"title": "
|
|
1613
|
-
"description": "
|
|
1812
|
+
"name": "api_wan_image_to_video",
|
|
1813
|
+
"title": "Wan2.5: Image vers Vidéo",
|
|
1814
|
+
"description": "Transformez des images en vidéos avec audio synchronisé, mouvement amélioré et qualité supérieure.",
|
|
1614
1815
|
"mediaType": "image",
|
|
1615
1816
|
"mediaSubtype": "webp",
|
|
1616
|
-
"tags": ["
|
|
1617
|
-
"models": ["
|
|
1618
|
-
"date": "2025-
|
|
1619
|
-
"
|
|
1817
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1818
|
+
"models": ["Wan2.5", "Wan"],
|
|
1819
|
+
"date": "2025-09-27",
|
|
1820
|
+
"tutorialUrl": "",
|
|
1821
|
+
"openSource": false,
|
|
1620
1822
|
"size": 0,
|
|
1621
|
-
"vram": 0
|
|
1823
|
+
"vram": 0,
|
|
1824
|
+
"usage": 1463
|
|
1622
1825
|
},
|
|
1623
1826
|
{
|
|
1624
|
-
"name": "
|
|
1625
|
-
"title": "
|
|
1626
|
-
"description": "
|
|
1827
|
+
"name": "api_kling_i2v",
|
|
1828
|
+
"title": "Kling: Image vers Vidéo",
|
|
1829
|
+
"description": "Générer des vidéos avec une excellente adhérence aux prompts pour les actions, expressions et mouvements de caméra en utilisant Kling.",
|
|
1627
1830
|
"mediaType": "image",
|
|
1628
1831
|
"mediaSubtype": "webp",
|
|
1629
|
-
"tags": ["
|
|
1630
|
-
"models": ["
|
|
1832
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1833
|
+
"models": ["Kling"],
|
|
1631
1834
|
"date": "2025-03-01",
|
|
1632
|
-
"
|
|
1835
|
+
"tutorialUrl": "",
|
|
1836
|
+
"openSource": false,
|
|
1633
1837
|
"size": 0,
|
|
1634
|
-
"vram": 0
|
|
1838
|
+
"vram": 0,
|
|
1839
|
+
"usage": 418
|
|
1635
1840
|
},
|
|
1636
1841
|
{
|
|
1637
|
-
"name": "
|
|
1638
|
-
"title": "
|
|
1639
|
-
"description": "
|
|
1842
|
+
"name": "api_kling_omni_edit_video",
|
|
1843
|
+
"title": "Kling: Omni Edit Video",
|
|
1844
|
+
"description": "Éditez des vidéos avec des commandes en langage naturel, avec un mode de référence vidéo pour générer rapidement des transferts de style, ajouts d'éléments et modifications d'arrière-plan de haute qualité.",
|
|
1640
1845
|
"mediaType": "image",
|
|
1846
|
+
"thumbnailVariant": "compareSlider",
|
|
1641
1847
|
"mediaSubtype": "webp",
|
|
1642
|
-
"tags": ["
|
|
1643
|
-
"models": ["
|
|
1644
|
-
"date": "2025-
|
|
1645
|
-
"
|
|
1848
|
+
"tags": ["Vidéo", "API", "Édition vidéo", "Texte vers vidéo", "Image vers vidéo"],
|
|
1849
|
+
"models": ["Kling"],
|
|
1850
|
+
"date": "2025-12-02",
|
|
1851
|
+
"tutorialUrl": "",
|
|
1852
|
+
"openSource": false,
|
|
1646
1853
|
"size": 0,
|
|
1647
|
-
"vram": 0
|
|
1854
|
+
"vram": 0,
|
|
1855
|
+
"usage": 1007
|
|
1648
1856
|
},
|
|
1649
1857
|
{
|
|
1650
|
-
"name": "
|
|
1651
|
-
"title": "
|
|
1652
|
-
"description": "Générer des
|
|
1858
|
+
"name": "api_kling_effects",
|
|
1859
|
+
"title": "Kling: Effets Vidéo",
|
|
1860
|
+
"description": "Générer des vidéos dynamiques en appliquant des effets visuels aux images en utilisant Kling.",
|
|
1653
1861
|
"mediaType": "image",
|
|
1654
1862
|
"mediaSubtype": "webp",
|
|
1655
|
-
"tags": ["
|
|
1656
|
-
"models": ["
|
|
1863
|
+
"tags": ["Vidéo", "API"],
|
|
1864
|
+
"models": ["Kling"],
|
|
1657
1865
|
"date": "2025-03-01",
|
|
1658
|
-
"
|
|
1866
|
+
"tutorialUrl": "",
|
|
1867
|
+
"openSource": false,
|
|
1659
1868
|
"size": 0,
|
|
1660
|
-
"vram": 0
|
|
1869
|
+
"vram": 0,
|
|
1870
|
+
"usage": 5
|
|
1661
1871
|
},
|
|
1662
1872
|
{
|
|
1663
|
-
"name": "
|
|
1664
|
-
"title": "
|
|
1665
|
-
"description": "Générer
|
|
1873
|
+
"name": "api_kling_flf",
|
|
1874
|
+
"title": "Kling: FLF2V",
|
|
1875
|
+
"description": "Générer des vidéos en contrôlant les première et dernière images.",
|
|
1666
1876
|
"mediaType": "image",
|
|
1667
|
-
"thumbnailVariant": "compareSlider",
|
|
1668
1877
|
"mediaSubtype": "webp",
|
|
1669
|
-
"tags": ["
|
|
1670
|
-
"models": ["
|
|
1878
|
+
"tags": ["Vidéo", "API", "FLF2V"],
|
|
1879
|
+
"models": ["Kling"],
|
|
1671
1880
|
"date": "2025-03-01",
|
|
1672
|
-
"
|
|
1881
|
+
"tutorialUrl": "",
|
|
1882
|
+
"openSource": false,
|
|
1673
1883
|
"size": 0,
|
|
1674
|
-
"vram": 0
|
|
1884
|
+
"vram": 0,
|
|
1885
|
+
"usage": 167
|
|
1675
1886
|
},
|
|
1676
1887
|
{
|
|
1677
|
-
"name": "
|
|
1678
|
-
"title": "
|
|
1679
|
-
"description": "Générer des
|
|
1888
|
+
"name": "api_vidu_text_to_video",
|
|
1889
|
+
"title": "Vidu: Texte vers Vidéo",
|
|
1890
|
+
"description": "Générer des vidéos 1080p de haute qualité à partir de prompts textuels avec un contrôle ajustable de l'amplitude des mouvements et de la durée en utilisant le modèle AI avancé de Vidu.",
|
|
1680
1891
|
"mediaType": "image",
|
|
1681
1892
|
"mediaSubtype": "webp",
|
|
1682
|
-
"tags": ["Texte vers
|
|
1683
|
-
"models": ["
|
|
1684
|
-
"date": "2025-
|
|
1685
|
-
"
|
|
1893
|
+
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
1894
|
+
"models": ["Vidu"],
|
|
1895
|
+
"date": "2025-08-23",
|
|
1896
|
+
"tutorialUrl": "",
|
|
1897
|
+
"openSource": false,
|
|
1686
1898
|
"size": 0,
|
|
1687
|
-
"vram": 0
|
|
1899
|
+
"vram": 0,
|
|
1900
|
+
"usage": 8
|
|
1688
1901
|
},
|
|
1689
1902
|
{
|
|
1690
|
-
"name": "
|
|
1691
|
-
"title": "
|
|
1692
|
-
"description": "Transformer des images
|
|
1903
|
+
"name": "api_vidu_image_to_video",
|
|
1904
|
+
"title": "Vidu: Image vers Vidéo",
|
|
1905
|
+
"description": "Transformer des images statiques en vidéos 1080p dynamiques avec un contrôle précis du mouvement et une amplitude de mouvement personnalisable en utilisant Vidu.",
|
|
1693
1906
|
"mediaType": "image",
|
|
1694
|
-
"thumbnailVariant": "compareSlider",
|
|
1695
1907
|
"mediaSubtype": "webp",
|
|
1696
|
-
"tags": ["Image vers
|
|
1697
|
-
"models": ["
|
|
1698
|
-
"date": "2025-
|
|
1699
|
-
"
|
|
1908
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1909
|
+
"models": ["Vidu"],
|
|
1910
|
+
"date": "2025-08-23",
|
|
1911
|
+
"tutorialUrl": "",
|
|
1912
|
+
"openSource": false,
|
|
1700
1913
|
"size": 0,
|
|
1701
|
-
"vram": 0
|
|
1914
|
+
"vram": 0,
|
|
1915
|
+
"usage": 62
|
|
1702
1916
|
},
|
|
1703
1917
|
{
|
|
1704
|
-
"name": "
|
|
1705
|
-
"title": "
|
|
1706
|
-
"description": "
|
|
1918
|
+
"name": "api_vidu_reference_to_video",
|
|
1919
|
+
"title": "Vidu: Référence vers Vidéo",
|
|
1920
|
+
"description": "Generate videos with consistent subjects using multiple reference images (up to 7) for character and style continuity across the video sequence.",
|
|
1707
1921
|
"mediaType": "image",
|
|
1708
1922
|
"mediaSubtype": "webp",
|
|
1709
|
-
"tags": ["
|
|
1710
|
-
"models": ["
|
|
1711
|
-
"date": "2025-
|
|
1712
|
-
"
|
|
1923
|
+
"tags": ["Vidéo", "Image vers vidéo", "API"],
|
|
1924
|
+
"models": ["Vidu"],
|
|
1925
|
+
"date": "2025-08-23",
|
|
1926
|
+
"tutorialUrl": "",
|
|
1927
|
+
"openSource": false,
|
|
1713
1928
|
"size": 0,
|
|
1714
|
-
"vram": 0
|
|
1929
|
+
"vram": 0,
|
|
1930
|
+
"usage": 69
|
|
1715
1931
|
},
|
|
1716
1932
|
{
|
|
1717
|
-
"name": "
|
|
1718
|
-
"title": "
|
|
1719
|
-
"description": "
|
|
1933
|
+
"name": "api_vidu_start_end_to_video",
|
|
1934
|
+
"title": "Vidu: Début-Fin vers Vidéo",
|
|
1935
|
+
"description": "Create smooth video transitions between defined start and end frames with natural motion interpolation and consistent visual quality.",
|
|
1720
1936
|
"mediaType": "image",
|
|
1721
|
-
"thumbnailVariant": "compareSlider",
|
|
1722
1937
|
"mediaSubtype": "webp",
|
|
1723
|
-
"tags": ["
|
|
1724
|
-
"models": ["
|
|
1725
|
-
"date": "2025-
|
|
1726
|
-
"
|
|
1938
|
+
"tags": ["Vidéo", "API", "FLF2V"],
|
|
1939
|
+
"models": ["Vidu"],
|
|
1940
|
+
"date": "2025-08-23",
|
|
1941
|
+
"tutorialUrl": "",
|
|
1942
|
+
"openSource": false,
|
|
1727
1943
|
"size": 0,
|
|
1728
|
-
"vram": 0
|
|
1944
|
+
"vram": 0,
|
|
1945
|
+
"usage": 85
|
|
1729
1946
|
},
|
|
1730
1947
|
{
|
|
1731
|
-
"name": "
|
|
1732
|
-
"title": "
|
|
1733
|
-
"description": "
|
|
1948
|
+
"name": "api_bytedance_text_to_video",
|
|
1949
|
+
"title": "ByteDance: Texte vers Vidéo",
|
|
1950
|
+
"description": "Générez des vidéos de haute qualité directement à partir de prompts textuels avec le modèle Seedance de ByteDance. Prend en charge plusieurs résolutions et ratios d'aspect avec un mouvement naturel et une qualité cinématographique.",
|
|
1734
1951
|
"mediaType": "image",
|
|
1735
1952
|
"mediaSubtype": "webp",
|
|
1736
|
-
"tags": ["
|
|
1737
|
-
"models": ["
|
|
1738
|
-
"date": "2025-
|
|
1739
|
-
"
|
|
1953
|
+
"tags": ["Vidéo", "API", "Texte vers vidéo"],
|
|
1954
|
+
"models": ["ByteDance"],
|
|
1955
|
+
"date": "2025-10-6",
|
|
1956
|
+
"tutorialUrl": "",
|
|
1957
|
+
"openSource": false,
|
|
1740
1958
|
"size": 0,
|
|
1741
|
-
"vram": 0
|
|
1959
|
+
"vram": 0,
|
|
1960
|
+
"usage": 75
|
|
1742
1961
|
},
|
|
1743
1962
|
{
|
|
1744
|
-
"name": "
|
|
1745
|
-
"title": "
|
|
1746
|
-
"description": "
|
|
1963
|
+
"name": "api_bytedance_image_to_video",
|
|
1964
|
+
"title": "ByteDance: Image vers Vidéo",
|
|
1965
|
+
"description": "Transformez des images statiques en vidéos dynamiques avec le modèle Seedance de ByteDance. Analyse la structure de l'image et génère un mouvement naturel avec un style visuel cohérent et des séquences vidéo cohérentes.",
|
|
1747
1966
|
"mediaType": "image",
|
|
1748
1967
|
"mediaSubtype": "webp",
|
|
1749
|
-
"tags": ["
|
|
1750
|
-
"models": ["
|
|
1751
|
-
"date": "2025-
|
|
1752
|
-
"tutorialUrl": "
|
|
1753
|
-
"
|
|
1968
|
+
"tags": ["Vidéo", "API", "Image vers vidéo"],
|
|
1969
|
+
"models": ["ByteDance"],
|
|
1970
|
+
"date": "2025-10-6",
|
|
1971
|
+
"tutorialUrl": "",
|
|
1972
|
+
"openSource": false,
|
|
1754
1973
|
"size": 0,
|
|
1755
|
-
"vram": 0
|
|
1974
|
+
"vram": 0,
|
|
1975
|
+
"usage": 2275
|
|
1756
1976
|
},
|
|
1757
1977
|
{
|
|
1758
|
-
"name": "
|
|
1759
|
-
"title": "
|
|
1760
|
-
"description": "
|
|
1978
|
+
"name": "api_bytedance_flf2v",
|
|
1979
|
+
"title": "ByteDance: Début-Fin vers Vidéo",
|
|
1980
|
+
"description": "Générez des transitions vidéo cinématographiques entre les images de début et de fin avec un mouvement fluide, une cohérence de scène et une finition professionnelle avec le modèle Seedance de ByteDance.",
|
|
1761
1981
|
"mediaType": "image",
|
|
1762
1982
|
"mediaSubtype": "webp",
|
|
1763
|
-
"
|
|
1764
|
-
"
|
|
1765
|
-
"
|
|
1766
|
-
"
|
|
1767
|
-
"
|
|
1768
|
-
"OpenSource": false,
|
|
1983
|
+
"tags": ["Vidéo", "API", "FLF2V"],
|
|
1984
|
+
"models": ["ByteDance"],
|
|
1985
|
+
"date": "2025-10-6",
|
|
1986
|
+
"tutorialUrl": "",
|
|
1987
|
+
"openSource": false,
|
|
1769
1988
|
"size": 0,
|
|
1770
|
-
"vram": 0
|
|
1989
|
+
"vram": 0,
|
|
1990
|
+
"usage": 791
|
|
1771
1991
|
},
|
|
1772
1992
|
{
|
|
1773
|
-
"name": "
|
|
1774
|
-
"title": "
|
|
1775
|
-
"description": "
|
|
1993
|
+
"name": "video_wan2_2_14B_s2v",
|
|
1994
|
+
"title": "Wan2.2-S2V Génération de Vidéo Pilotée par l'Audio",
|
|
1995
|
+
"description": "Transformer des images statiques et de l'audio en vidéos dynamiques avec une synchronisation parfaite et une génération au niveau de la minute.",
|
|
1776
1996
|
"mediaType": "image",
|
|
1777
1997
|
"mediaSubtype": "webp",
|
|
1778
|
-
"
|
|
1779
|
-
"tags": ["
|
|
1780
|
-
"models": ["
|
|
1781
|
-
"date": "2025-
|
|
1782
|
-
"
|
|
1783
|
-
"
|
|
1784
|
-
"
|
|
1785
|
-
"vram": 0
|
|
1998
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-s2v",
|
|
1999
|
+
"tags": ["Vidéo"],
|
|
2000
|
+
"models": ["Wan2.2", "Wan"],
|
|
2001
|
+
"date": "2025-08-02",
|
|
2002
|
+
"size": 25254407700,
|
|
2003
|
+
"vram": 25254407700,
|
|
2004
|
+
"usage": 648
|
|
1786
2005
|
},
|
|
1787
2006
|
{
|
|
1788
|
-
"name": "
|
|
1789
|
-
"title": "
|
|
1790
|
-
"description": "
|
|
1791
|
-
"mediaType": "image",
|
|
1792
|
-
"mediaSubtype": "webp",
|
|
1793
|
-
"thumbnailVariant": "compareSlider",
|
|
1794
|
-
"tags": ["Texte vers image", "Image", "API"],
|
|
1795
|
-
"models": ["GPT-Image-1", "OpenAI"],
|
|
1796
|
-
"date": "2025-03-01",
|
|
1797
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1798
|
-
"OpenSource": false,
|
|
1799
|
-
"size": 0,
|
|
1800
|
-
"vram": 0
|
|
1801
|
-
},
|
|
1802
|
-
{
|
|
1803
|
-
"name": "api_openai_dall_e_2_t2i",
|
|
1804
|
-
"title": "OpenAI: Dall-E 2 Texte vers Image",
|
|
1805
|
-
"description": "Générer des images à partir de prompts textuels en utilisant l'API OpenAI Dall-E 2.",
|
|
1806
|
-
"mediaType": "image",
|
|
1807
|
-
"mediaSubtype": "webp",
|
|
1808
|
-
"tags": ["Texte vers image", "Image", "API"],
|
|
1809
|
-
"models": ["Dall-E", "OpenAI"],
|
|
1810
|
-
"date": "2025-03-01",
|
|
1811
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
|
|
1812
|
-
"OpenSource": false,
|
|
1813
|
-
"size": 0,
|
|
1814
|
-
"vram": 0
|
|
1815
|
-
},
|
|
1816
|
-
{
|
|
1817
|
-
"name": "api_openai_dall_e_2_inpaint",
|
|
1818
|
-
"title": "OpenAI: Dall-E 2 Inpainting",
|
|
1819
|
-
"description": "Éditer des images en utilisant l'inpainting avec l'API OpenAI Dall-E 2.",
|
|
1820
|
-
"mediaType": "image",
|
|
1821
|
-
"mediaSubtype": "webp",
|
|
1822
|
-
"thumbnailVariant": "compareSlider",
|
|
1823
|
-
"tags": ["Inpainting", "Image", "API"],
|
|
1824
|
-
"models": ["Dall-E", "OpenAI"],
|
|
1825
|
-
"date": "2025-03-01",
|
|
1826
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
|
|
1827
|
-
"OpenSource": false,
|
|
1828
|
-
"size": 0,
|
|
1829
|
-
"vram": 0
|
|
1830
|
-
},
|
|
1831
|
-
{
|
|
1832
|
-
"name": "api_openai_dall_e_3_t2i",
|
|
1833
|
-
"title": "OpenAI: Dall-E 3 Texte vers Image",
|
|
1834
|
-
"description": "Générer des images à partir de prompts textuels en utilisant l'API OpenAI Dall-E 3.",
|
|
1835
|
-
"mediaType": "image",
|
|
1836
|
-
"mediaSubtype": "webp",
|
|
1837
|
-
"tags": ["Texte vers image", "Image", "API"],
|
|
1838
|
-
"models": ["Dall-E", "OpenAI"],
|
|
1839
|
-
"date": "2025-03-01",
|
|
1840
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3",
|
|
1841
|
-
"OpenSource": false,
|
|
1842
|
-
"size": 0,
|
|
1843
|
-
"vram": 0
|
|
1844
|
-
}
|
|
1845
|
-
]
|
|
1846
|
-
},
|
|
1847
|
-
{
|
|
1848
|
-
"moduleName": "default",
|
|
1849
|
-
"type": "video",
|
|
1850
|
-
"category": "CLOSED SOURCE MODELS",
|
|
1851
|
-
"icon": "icon-[lucide--film]",
|
|
1852
|
-
"title": "Video API",
|
|
1853
|
-
"templates": [
|
|
1854
|
-
{
|
|
1855
|
-
"name": "api_openai_sora_video",
|
|
1856
|
-
"title": "Sora 2: Texte et Image vers Vidéo",
|
|
1857
|
-
"description": "Génération vidéo Sora-2 et Sora-2 Pro d'OpenAI avec audio synchronisé.",
|
|
1858
|
-
"mediaType": "image",
|
|
1859
|
-
"mediaSubtype": "webp",
|
|
1860
|
-
"tags": ["Image vers vidéo", "Texte vers vidéo", "API"],
|
|
1861
|
-
"models": ["OpenAI"],
|
|
1862
|
-
"date": "2025-10-08",
|
|
1863
|
-
"OpenSource": false,
|
|
1864
|
-
"size": 0,
|
|
1865
|
-
"vram": 0
|
|
1866
|
-
},
|
|
1867
|
-
{
|
|
1868
|
-
"name": "api_ltxv_text_to_video",
|
|
1869
|
-
"title": "LTX-2 : Texte en vidéo",
|
|
1870
|
-
"description": "Générez des vidéos de haute qualité à partir de suggestions textuelles avec Lightricks LTX-2 et audio synchronisé. Prise en charge jusqu'à 4K à 50fps avec les modes Rapide, Pro et Ultra pour divers besoins de production.",
|
|
2007
|
+
"name": "api_ltxv_text_to_video",
|
|
2008
|
+
"title": "LTX-2 : Texte en vidéo",
|
|
2009
|
+
"description": "Générez des vidéos de haute qualité à partir de suggestions textuelles avec Lightricks LTX-2 et audio synchronisé. Prise en charge jusqu'à 4K à 50fps avec les modes Rapide, Pro et Ultra pour divers besoins de production.",
|
|
1871
2010
|
"mediaType": "image",
|
|
1872
2011
|
"mediaSubtype": "webp",
|
|
1873
2012
|
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
1874
2013
|
"models": ["LTX-2", "Lightricks"],
|
|
1875
2014
|
"date": "2025-10-28",
|
|
1876
|
-
"
|
|
2015
|
+
"openSource": false,
|
|
1877
2016
|
"size": 0,
|
|
1878
|
-
"vram": 0
|
|
2017
|
+
"vram": 0,
|
|
2018
|
+
"usage": 73
|
|
1879
2019
|
},
|
|
1880
2020
|
{
|
|
1881
2021
|
"name": "api_ltxv_image_to_video",
|
|
@@ -1886,191 +2026,55 @@
|
|
|
1886
2026
|
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1887
2027
|
"models": ["LTX-2", "Lightricks"],
|
|
1888
2028
|
"date": "2025-10-28",
|
|
1889
|
-
"
|
|
1890
|
-
"size": 0,
|
|
1891
|
-
"vram": 0
|
|
1892
|
-
},
|
|
1893
|
-
{
|
|
1894
|
-
"name": "api_wan_text_to_video",
|
|
1895
|
-
"title": "Wan2.5: Texte vers Vidéo",
|
|
1896
|
-
"description": "Générez des vidéos avec audio synchronisé, mouvement amélioré et qualité supérieure.",
|
|
1897
|
-
"mediaType": "image",
|
|
1898
|
-
"mediaSubtype": "webp",
|
|
1899
|
-
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1900
|
-
"models": ["Wan2.5", "Wan"],
|
|
1901
|
-
"date": "2025-09-27",
|
|
1902
|
-
"tutorialUrl": "",
|
|
1903
|
-
"OpenSource": false,
|
|
1904
|
-
"size": 0,
|
|
1905
|
-
"vram": 0
|
|
1906
|
-
},
|
|
1907
|
-
{
|
|
1908
|
-
"name": "api_wan_image_to_video",
|
|
1909
|
-
"title": "Wan2.5: Image vers Vidéo",
|
|
1910
|
-
"description": "Transformez des images en vidéos avec audio synchronisé, mouvement amélioré et qualité supérieure.",
|
|
1911
|
-
"mediaType": "image",
|
|
1912
|
-
"mediaSubtype": "webp",
|
|
1913
|
-
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1914
|
-
"models": ["Wan2.5", "Wan"],
|
|
1915
|
-
"date": "2025-09-27",
|
|
1916
|
-
"tutorialUrl": "",
|
|
1917
|
-
"OpenSource": false,
|
|
2029
|
+
"openSource": false,
|
|
1918
2030
|
"size": 0,
|
|
1919
|
-
"vram": 0
|
|
1920
|
-
|
|
1921
|
-
{
|
|
1922
|
-
"name": "api_kling_i2v",
|
|
1923
|
-
"title": "Kling: Image vers Vidéo",
|
|
1924
|
-
"description": "Générer des vidéos avec une excellente adhérence aux prompts pour les actions, expressions et mouvements de caméra en utilisant Kling.",
|
|
1925
|
-
"mediaType": "image",
|
|
1926
|
-
"mediaSubtype": "webp",
|
|
1927
|
-
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1928
|
-
"models": ["Kling"],
|
|
1929
|
-
"date": "2025-03-01",
|
|
1930
|
-
"tutorialUrl": "",
|
|
1931
|
-
"OpenSource": false,
|
|
1932
|
-
"size": 0,
|
|
1933
|
-
"vram": 0
|
|
1934
|
-
},
|
|
1935
|
-
{
|
|
1936
|
-
"name": "api_kling_effects",
|
|
1937
|
-
"title": "Kling: Effets Vidéo",
|
|
1938
|
-
"description": "Générer des vidéos dynamiques en appliquant des effets visuels aux images en utilisant Kling.",
|
|
1939
|
-
"mediaType": "image",
|
|
1940
|
-
"mediaSubtype": "webp",
|
|
1941
|
-
"tags": ["Vidéo", "API"],
|
|
1942
|
-
"models": ["Kling"],
|
|
1943
|
-
"date": "2025-03-01",
|
|
1944
|
-
"tutorialUrl": "",
|
|
1945
|
-
"OpenSource": false,
|
|
1946
|
-
"size": 0,
|
|
1947
|
-
"vram": 0
|
|
2031
|
+
"vram": 0,
|
|
2032
|
+
"usage": 448
|
|
1948
2033
|
},
|
|
1949
2034
|
{
|
|
1950
|
-
"name": "
|
|
1951
|
-
"title": "
|
|
1952
|
-
"description": "
|
|
2035
|
+
"name": "api_hailuo_minimax_video",
|
|
2036
|
+
"title": "MiniMax: Vidéo",
|
|
2037
|
+
"description": "Générez des vidéos de haute qualité à partir de prompts textuels avec contrôle optionnel de la première image en utilisant le modèle MiniMax Hailuo-02. Prend en charge plusieurs résolutions (768P/1080P) et durées (6/10s) avec optimisation intelligente des prompts.",
|
|
1953
2038
|
"mediaType": "image",
|
|
1954
2039
|
"mediaSubtype": "webp",
|
|
1955
|
-
"tags": ["
|
|
1956
|
-
"models": ["
|
|
2040
|
+
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
2041
|
+
"models": ["MiniMax"],
|
|
1957
2042
|
"date": "2025-03-01",
|
|
1958
2043
|
"tutorialUrl": "",
|
|
1959
|
-
"
|
|
2044
|
+
"openSource": false,
|
|
1960
2045
|
"size": 0,
|
|
1961
|
-
"vram": 0
|
|
2046
|
+
"vram": 0,
|
|
2047
|
+
"usage": 9
|
|
1962
2048
|
},
|
|
1963
2049
|
{
|
|
1964
|
-
"name": "
|
|
1965
|
-
"title": "
|
|
1966
|
-
"description": "
|
|
2050
|
+
"name": "api_hailuo_minimax_t2v",
|
|
2051
|
+
"title": "MiniMax: Texte vers Vidéo",
|
|
2052
|
+
"description": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
|
|
1967
2053
|
"mediaType": "image",
|
|
1968
2054
|
"mediaSubtype": "webp",
|
|
1969
2055
|
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
1970
|
-
"models": ["
|
|
1971
|
-
"date": "2025-
|
|
2056
|
+
"models": ["MiniMax"],
|
|
2057
|
+
"date": "2025-03-01",
|
|
1972
2058
|
"tutorialUrl": "",
|
|
1973
|
-
"
|
|
2059
|
+
"openSource": false,
|
|
1974
2060
|
"size": 0,
|
|
1975
|
-
"vram": 0
|
|
2061
|
+
"vram": 0,
|
|
2062
|
+
"usage": 1
|
|
1976
2063
|
},
|
|
1977
2064
|
{
|
|
1978
|
-
"name": "
|
|
1979
|
-
"title": "
|
|
1980
|
-
"description": "
|
|
2065
|
+
"name": "api_hailuo_minimax_i2v",
|
|
2066
|
+
"title": "MiniMax: Image vers Vidéo",
|
|
2067
|
+
"description": "Generate refined videos from images and text with CGI integration using MiniMax.",
|
|
1981
2068
|
"mediaType": "image",
|
|
1982
2069
|
"mediaSubtype": "webp",
|
|
1983
2070
|
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1984
|
-
"models": ["
|
|
1985
|
-
"date": "2025-
|
|
1986
|
-
"tutorialUrl": "",
|
|
1987
|
-
"OpenSource": false,
|
|
1988
|
-
"size": 0,
|
|
1989
|
-
"vram": 0
|
|
1990
|
-
},
|
|
1991
|
-
{
|
|
1992
|
-
"name": "api_vidu_reference_to_video",
|
|
1993
|
-
"title": "Vidu: Référence vers Vidéo",
|
|
1994
|
-
"description": "Generate videos with consistent subjects using multiple reference images (up to 7) for character and style continuity across the video sequence.",
|
|
1995
|
-
"mediaType": "image",
|
|
1996
|
-
"mediaSubtype": "webp",
|
|
1997
|
-
"tags": ["Vidéo", "Image vers vidéo", "API"],
|
|
1998
|
-
"models": ["Vidu"],
|
|
1999
|
-
"date": "2025-08-23",
|
|
2000
|
-
"tutorialUrl": "",
|
|
2001
|
-
"OpenSource": false,
|
|
2002
|
-
"size": 0,
|
|
2003
|
-
"vram": 0
|
|
2004
|
-
},
|
|
2005
|
-
{
|
|
2006
|
-
"name": "api_vidu_start_end_to_video",
|
|
2007
|
-
"title": "Vidu: Début-Fin vers Vidéo",
|
|
2008
|
-
"description": "Create smooth video transitions between defined start and end frames with natural motion interpolation and consistent visual quality.",
|
|
2009
|
-
"mediaType": "image",
|
|
2010
|
-
"mediaSubtype": "webp",
|
|
2011
|
-
"tags": ["Vidéo", "API", "FLF2V"],
|
|
2012
|
-
"models": ["Vidu"],
|
|
2013
|
-
"date": "2025-08-23",
|
|
2014
|
-
"tutorialUrl": "",
|
|
2015
|
-
"OpenSource": false,
|
|
2016
|
-
"size": 0,
|
|
2017
|
-
"vram": 0
|
|
2018
|
-
},
|
|
2019
|
-
{
|
|
2020
|
-
"name": "api_bytedance_text_to_video",
|
|
2021
|
-
"title": "ByteDance: Texte vers Vidéo",
|
|
2022
|
-
"description": "Générez des vidéos de haute qualité directement à partir de prompts textuels avec le modèle Seedance de ByteDance. Prend en charge plusieurs résolutions et ratios d'aspect avec un mouvement naturel et une qualité cinématographique.",
|
|
2023
|
-
"mediaType": "image",
|
|
2024
|
-
"mediaSubtype": "webp",
|
|
2025
|
-
"tags": ["Vidéo", "API", "Texte vers vidéo"],
|
|
2026
|
-
"models": ["ByteDance"],
|
|
2027
|
-
"date": "2025-10-6",
|
|
2028
|
-
"tutorialUrl": "",
|
|
2029
|
-
"OpenSource": false,
|
|
2030
|
-
"size": 0,
|
|
2031
|
-
"vram": 0
|
|
2032
|
-
},
|
|
2033
|
-
{
|
|
2034
|
-
"name": "api_bytedance_image_to_video",
|
|
2035
|
-
"title": "ByteDance: Image vers Vidéo",
|
|
2036
|
-
"description": "Transformez des images statiques en vidéos dynamiques avec le modèle Seedance de ByteDance. Analyse la structure de l'image et génère un mouvement naturel avec un style visuel cohérent et des séquences vidéo cohérentes.",
|
|
2037
|
-
"mediaType": "image",
|
|
2038
|
-
"mediaSubtype": "webp",
|
|
2039
|
-
"tags": ["Vidéo", "API", "Image vers vidéo"],
|
|
2040
|
-
"models": ["ByteDance"],
|
|
2041
|
-
"date": "2025-10-6",
|
|
2042
|
-
"tutorialUrl": "",
|
|
2043
|
-
"OpenSource": false,
|
|
2044
|
-
"size": 0,
|
|
2045
|
-
"vram": 0
|
|
2046
|
-
},
|
|
2047
|
-
{
|
|
2048
|
-
"name": "api_bytedance_flf2v",
|
|
2049
|
-
"title": "ByteDance: Début-Fin vers Vidéo",
|
|
2050
|
-
"description": "Générez des transitions vidéo cinématographiques entre les images de début et de fin avec un mouvement fluide, une cohérence de scène et une finition professionnelle avec le modèle Seedance de ByteDance.",
|
|
2051
|
-
"mediaType": "image",
|
|
2052
|
-
"mediaSubtype": "webp",
|
|
2053
|
-
"tags": ["Vidéo", "API", "FLF2V"],
|
|
2054
|
-
"models": ["ByteDance"],
|
|
2055
|
-
"date": "2025-10-6",
|
|
2071
|
+
"models": ["MiniMax"],
|
|
2072
|
+
"date": "2025-03-01",
|
|
2056
2073
|
"tutorialUrl": "",
|
|
2057
|
-
"
|
|
2058
|
-
"size": 0,
|
|
2059
|
-
"vram": 0
|
|
2060
|
-
},
|
|
2061
|
-
{
|
|
2062
|
-
"name": "api_topaz_video_enhance",
|
|
2063
|
-
"title": "Topaz Amélioration vidéo",
|
|
2064
|
-
"description": "Améliorez les vidéos avec Topaz AI. Prend en charge l’upscaling de résolution avec le modèle Starlight (Astra) Fast et l’interpolation d’images avec le modèle apo-8.",
|
|
2065
|
-
"mediaType": "image",
|
|
2066
|
-
"mediaSubtype": "webp",
|
|
2067
|
-
"thumbnailVariant": "compareSlider",
|
|
2068
|
-
"tags": ["Vidéo", "API", "Amélioration"],
|
|
2069
|
-
"models": ["Topaz"],
|
|
2070
|
-
"date": "2025-11-25",
|
|
2071
|
-
"OpenSource": false,
|
|
2074
|
+
"openSource": false,
|
|
2072
2075
|
"size": 0,
|
|
2073
|
-
"vram": 0
|
|
2076
|
+
"vram": 0,
|
|
2077
|
+
"usage": 39
|
|
2074
2078
|
},
|
|
2075
2079
|
{
|
|
2076
2080
|
"name": "api_luma_i2v",
|
|
@@ -2082,9 +2086,10 @@
|
|
|
2082
2086
|
"models": ["Luma"],
|
|
2083
2087
|
"date": "2025-03-01",
|
|
2084
2088
|
"tutorialUrl": "",
|
|
2085
|
-
"
|
|
2089
|
+
"openSource": false,
|
|
2086
2090
|
"size": 0,
|
|
2087
|
-
"vram": 0
|
|
2091
|
+
"vram": 0,
|
|
2092
|
+
"usage": 56
|
|
2088
2093
|
},
|
|
2089
2094
|
{
|
|
2090
2095
|
"name": "api_luma_t2v",
|
|
@@ -2096,9 +2101,10 @@
|
|
|
2096
2101
|
"models": ["Luma"],
|
|
2097
2102
|
"date": "2025-03-01",
|
|
2098
2103
|
"tutorialUrl": "",
|
|
2099
|
-
"
|
|
2104
|
+
"openSource": false,
|
|
2100
2105
|
"size": 0,
|
|
2101
|
-
"vram": 0
|
|
2106
|
+
"vram": 0,
|
|
2107
|
+
"usage": 3
|
|
2102
2108
|
},
|
|
2103
2109
|
{
|
|
2104
2110
|
"name": "api_moonvalley_text_to_video",
|
|
@@ -2110,9 +2116,10 @@
|
|
|
2110
2116
|
"models": ["Moonvalley"],
|
|
2111
2117
|
"date": "2025-03-01",
|
|
2112
2118
|
"tutorialUrl": "",
|
|
2113
|
-
"
|
|
2119
|
+
"openSource": false,
|
|
2114
2120
|
"size": 0,
|
|
2115
|
-
"vram": 0
|
|
2121
|
+
"vram": 0,
|
|
2122
|
+
"usage": 4
|
|
2116
2123
|
},
|
|
2117
2124
|
{
|
|
2118
2125
|
"name": "api_moonvalley_image_to_video",
|
|
@@ -2124,9 +2131,10 @@
|
|
|
2124
2131
|
"models": ["Moonvalley"],
|
|
2125
2132
|
"date": "2025-03-01",
|
|
2126
2133
|
"tutorialUrl": "",
|
|
2127
|
-
"
|
|
2134
|
+
"openSource": false,
|
|
2128
2135
|
"size": 0,
|
|
2129
|
-
"vram": 0
|
|
2136
|
+
"vram": 0,
|
|
2137
|
+
"usage": 29
|
|
2130
2138
|
},
|
|
2131
2139
|
{
|
|
2132
2140
|
"name": "api_moonvalley_video_to_video_motion_transfer",
|
|
@@ -2139,9 +2147,10 @@
|
|
|
2139
2147
|
"models": ["Moonvalley"],
|
|
2140
2148
|
"date": "2025-03-01",
|
|
2141
2149
|
"tutorialUrl": "",
|
|
2142
|
-
"
|
|
2150
|
+
"openSource": false,
|
|
2143
2151
|
"size": 0,
|
|
2144
|
-
"vram": 0
|
|
2152
|
+
"vram": 0,
|
|
2153
|
+
"usage": 22
|
|
2145
2154
|
},
|
|
2146
2155
|
{
|
|
2147
2156
|
"name": "api_moonvalley_video_to_video_pose_control",
|
|
@@ -2154,65 +2163,25 @@
|
|
|
2154
2163
|
"models": ["Moonvalley"],
|
|
2155
2164
|
"date": "2025-03-01",
|
|
2156
2165
|
"tutorialUrl": "",
|
|
2157
|
-
"
|
|
2166
|
+
"openSource": false,
|
|
2158
2167
|
"size": 0,
|
|
2159
|
-
"vram": 0
|
|
2168
|
+
"vram": 0,
|
|
2169
|
+
"usage": 11
|
|
2160
2170
|
},
|
|
2161
2171
|
{
|
|
2162
|
-
"name": "
|
|
2163
|
-
"title": "
|
|
2164
|
-
"description": "
|
|
2172
|
+
"name": "api_pixverse_i2v",
|
|
2173
|
+
"title": "PixVerse: Image vers Vidéo",
|
|
2174
|
+
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
|
2165
2175
|
"mediaType": "image",
|
|
2166
2176
|
"mediaSubtype": "webp",
|
|
2167
|
-
"tags": ["
|
|
2168
|
-
"models": ["
|
|
2177
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2178
|
+
"models": ["PixVerse"],
|
|
2169
2179
|
"date": "2025-03-01",
|
|
2170
2180
|
"tutorialUrl": "",
|
|
2171
|
-
"
|
|
2181
|
+
"openSource": false,
|
|
2172
2182
|
"size": 0,
|
|
2173
|
-
"vram": 0
|
|
2174
|
-
|
|
2175
|
-
{
|
|
2176
|
-
"name": "api_hailuo_minimax_t2v",
|
|
2177
|
-
"title": "MiniMax: Texte vers Vidéo",
|
|
2178
|
-
"description": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
|
|
2179
|
-
"mediaType": "image",
|
|
2180
|
-
"mediaSubtype": "webp",
|
|
2181
|
-
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
2182
|
-
"models": ["MiniMax"],
|
|
2183
|
-
"date": "2025-03-01",
|
|
2184
|
-
"tutorialUrl": "",
|
|
2185
|
-
"OpenSource": false,
|
|
2186
|
-
"size": 0,
|
|
2187
|
-
"vram": 0
|
|
2188
|
-
},
|
|
2189
|
-
{
|
|
2190
|
-
"name": "api_hailuo_minimax_i2v",
|
|
2191
|
-
"title": "MiniMax: Image vers Vidéo",
|
|
2192
|
-
"description": "Generate refined videos from images and text with CGI integration using MiniMax.",
|
|
2193
|
-
"mediaType": "image",
|
|
2194
|
-
"mediaSubtype": "webp",
|
|
2195
|
-
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2196
|
-
"models": ["MiniMax"],
|
|
2197
|
-
"date": "2025-03-01",
|
|
2198
|
-
"tutorialUrl": "",
|
|
2199
|
-
"OpenSource": false,
|
|
2200
|
-
"size": 0,
|
|
2201
|
-
"vram": 0
|
|
2202
|
-
},
|
|
2203
|
-
{
|
|
2204
|
-
"name": "api_pixverse_i2v",
|
|
2205
|
-
"title": "PixVerse: Image vers Vidéo",
|
|
2206
|
-
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
|
2207
|
-
"mediaType": "image",
|
|
2208
|
-
"mediaSubtype": "webp",
|
|
2209
|
-
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2210
|
-
"models": ["PixVerse"],
|
|
2211
|
-
"date": "2025-03-01",
|
|
2212
|
-
"tutorialUrl": "",
|
|
2213
|
-
"OpenSource": false,
|
|
2214
|
-
"size": 0,
|
|
2215
|
-
"vram": 0
|
|
2183
|
+
"vram": 0,
|
|
2184
|
+
"usage": 25
|
|
2216
2185
|
},
|
|
2217
2186
|
{
|
|
2218
2187
|
"name": "api_pixverse_template_i2v",
|
|
@@ -2224,9 +2193,10 @@
|
|
|
2224
2193
|
"models": ["PixVerse"],
|
|
2225
2194
|
"date": "2025-03-01",
|
|
2226
2195
|
"tutorialUrl": "",
|
|
2227
|
-
"
|
|
2196
|
+
"openSource": false,
|
|
2228
2197
|
"size": 0,
|
|
2229
|
-
"vram": 0
|
|
2198
|
+
"vram": 0,
|
|
2199
|
+
"usage": 16
|
|
2230
2200
|
},
|
|
2231
2201
|
{
|
|
2232
2202
|
"name": "api_pixverse_t2v",
|
|
@@ -2238,9 +2208,10 @@
|
|
|
2238
2208
|
"models": ["PixVerse"],
|
|
2239
2209
|
"date": "2025-03-01",
|
|
2240
2210
|
"tutorialUrl": "",
|
|
2241
|
-
"
|
|
2211
|
+
"openSource": false,
|
|
2242
2212
|
"size": 0,
|
|
2243
|
-
"vram": 0
|
|
2213
|
+
"vram": 0,
|
|
2214
|
+
"usage": 3
|
|
2244
2215
|
},
|
|
2245
2216
|
{
|
|
2246
2217
|
"name": "api_runway_gen3a_turbo_image_to_video",
|
|
@@ -2252,9 +2223,10 @@
|
|
|
2252
2223
|
"models": ["Runway"],
|
|
2253
2224
|
"date": "2025-03-01",
|
|
2254
2225
|
"tutorialUrl": "",
|
|
2255
|
-
"
|
|
2226
|
+
"openSource": false,
|
|
2256
2227
|
"size": 0,
|
|
2257
|
-
"vram": 0
|
|
2228
|
+
"vram": 0,
|
|
2229
|
+
"usage": 38
|
|
2258
2230
|
},
|
|
2259
2231
|
{
|
|
2260
2232
|
"name": "api_runway_gen4_turo_image_to_video",
|
|
@@ -2265,90 +2237,605 @@
|
|
|
2265
2237
|
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2266
2238
|
"models": ["Runway"],
|
|
2267
2239
|
"date": "2025-03-01",
|
|
2268
|
-
"tutorialUrl": "",
|
|
2269
|
-
"
|
|
2270
|
-
"size": 0,
|
|
2271
|
-
"vram": 0
|
|
2240
|
+
"tutorialUrl": "",
|
|
2241
|
+
"openSource": false,
|
|
2242
|
+
"size": 0,
|
|
2243
|
+
"vram": 0,
|
|
2244
|
+
"usage": 97
|
|
2245
|
+
},
|
|
2246
|
+
{
|
|
2247
|
+
"name": "api_runway_first_last_frame",
|
|
2248
|
+
"title": "Runway: Première-Dernière Image vers Vidéo",
|
|
2249
|
+
"description": "Generate smooth video transitions between two keyframes with Runway's precision.",
|
|
2250
|
+
"mediaType": "image",
|
|
2251
|
+
"mediaSubtype": "webp",
|
|
2252
|
+
"tags": ["Vidéo", "API", "FLF2V"],
|
|
2253
|
+
"models": ["Runway"],
|
|
2254
|
+
"date": "2025-03-01",
|
|
2255
|
+
"tutorialUrl": "",
|
|
2256
|
+
"openSource": false,
|
|
2257
|
+
"size": 0,
|
|
2258
|
+
"vram": 0,
|
|
2259
|
+
"usage": 97
|
|
2260
|
+
},
|
|
2261
|
+
{
|
|
2262
|
+
"name": "video_wan2_2_14B_fun_inpaint",
|
|
2263
|
+
"title": "Wan 2.2 14B Fun Inpainting",
|
|
2264
|
+
"description": "Générez des vidéos à partir des images de début et de fin avec Wan 2.2 Fun Inp.",
|
|
2265
|
+
"mediaType": "image",
|
|
2266
|
+
"mediaSubtype": "webp",
|
|
2267
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-inp",
|
|
2268
|
+
"tags": ["FLF2V", "Vidéo"],
|
|
2269
|
+
"models": ["Wan2.2", "Wan"],
|
|
2270
|
+
"date": "2025-08-12",
|
|
2271
|
+
"size": 38031935406,
|
|
2272
|
+
"vram": 38031935406,
|
|
2273
|
+
"usage": 547
|
|
2274
|
+
},
|
|
2275
|
+
{
|
|
2276
|
+
"name": "video_wan2_2_14B_fun_control",
|
|
2277
|
+
"title": "Wan 2.2 14B Fun Control",
|
|
2278
|
+
"description": "Générer des vidéos guidées par des contrôles de pose, de profondeur et de contours en utilisant Wan 2.2 Fun Control.",
|
|
2279
|
+
"mediaType": "image",
|
|
2280
|
+
"mediaSubtype": "webp",
|
|
2281
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-control",
|
|
2282
|
+
"tags": ["Vidéo vers vidéo", "Vidéo"],
|
|
2283
|
+
"models": ["Wan2.2", "Wan"],
|
|
2284
|
+
"date": "2025-08-12",
|
|
2285
|
+
"size": 38031935406,
|
|
2286
|
+
"vram": 38031935406,
|
|
2287
|
+
"usage": 305
|
|
2288
|
+
},
|
|
2289
|
+
{
|
|
2290
|
+
"name": "video_wan2_2_14B_fun_camera",
|
|
2291
|
+
"title": "Wan 2.2 14B Contrôle Caméra Fun",
|
|
2292
|
+
"description": "Générer des vidéos avec des contrôles de mouvement de caméra incluant le panoramique, le zoom et la rotation en utilisant Wan 2.2 Fun Camera Control.",
|
|
2293
|
+
"mediaType": "image",
|
|
2294
|
+
"mediaSubtype": "webp",
|
|
2295
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-camera",
|
|
2296
|
+
"tags": ["Vidéo vers vidéo", "Vidéo"],
|
|
2297
|
+
"models": ["Wan2.2", "Wan"],
|
|
2298
|
+
"date": "2025-08-17",
|
|
2299
|
+
"size": 40050570035,
|
|
2300
|
+
"vram": 40050570035,
|
|
2301
|
+
"usage": 228
|
|
2302
|
+
},
|
|
2303
|
+
{
|
|
2304
|
+
"name": "video_wan2_2_5B_ti2v",
|
|
2305
|
+
"title": "Wan 2.2 5B Génération Vidéo",
|
|
2306
|
+
"description": "Générer des vidéos à partir de texte ou d'images en utilisant le modèle hybride Wan 2.2 5B",
|
|
2307
|
+
"mediaType": "image",
|
|
2308
|
+
"mediaSubtype": "webp",
|
|
2309
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
2310
|
+
"models": ["Wan2.2", "Wan"],
|
|
2311
|
+
"date": "2025-07-29",
|
|
2312
|
+
"size": 18146236826,
|
|
2313
|
+
"vram": 18146236826,
|
|
2314
|
+
"usage": 392
|
|
2315
|
+
},
|
|
2316
|
+
{
|
|
2317
|
+
"name": "video_humo",
|
|
2318
|
+
"title": "HuMo Génération Vidéo",
|
|
2319
|
+
"description": "Générez des vidéos basées sur l'audio, l'image et le texte, en préservant la synchronisation labiale des personnages.",
|
|
2320
|
+
"mediaType": "image",
|
|
2321
|
+
"mediaSubtype": "webp",
|
|
2322
|
+
"tags": ["Vidéo"],
|
|
2323
|
+
"models": ["HuMo"],
|
|
2324
|
+
"date": "2025-09-21",
|
|
2325
|
+
"size": 27895812588,
|
|
2326
|
+
"vram": 27895812588,
|
|
2327
|
+
"usage": 424
|
|
2328
|
+
},
|
|
2329
|
+
{
|
|
2330
|
+
"name": "video_wan2_2_5B_fun_inpaint",
|
|
2331
|
+
"title": "Wan 2.2 5B Fun Inpainting",
|
|
2332
|
+
"description": "Inpainting vidéo efficace à partir des images de début et de fin. Le modèle 5B offre des itérations rapides pour tester les flux de travail.",
|
|
2333
|
+
"mediaType": "image",
|
|
2334
|
+
"mediaSubtype": "webp",
|
|
2335
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
2336
|
+
"models": ["Wan2.2", "Wan"],
|
|
2337
|
+
"date": "2025-07-29",
|
|
2338
|
+
"size": 18146236826,
|
|
2339
|
+
"vram": 18146236826,
|
|
2340
|
+
"usage": 53
|
|
2341
|
+
},
|
|
2342
|
+
{
|
|
2343
|
+
"name": "video_wan2_2_5B_fun_control",
|
|
2344
|
+
"title": "Wan 2.2 5B Fun Control",
|
|
2345
|
+
"description": "Contrôle vidéo multi-conditions avec guidance par pose, profondeur et contours. Taille compacte 5B pour un développement expérimental.",
|
|
2346
|
+
"mediaType": "image",
|
|
2347
|
+
"mediaSubtype": "webp",
|
|
2348
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
2349
|
+
"models": ["Wan2.2", "Wan"],
|
|
2350
|
+
"date": "2025-07-29",
|
|
2351
|
+
"size": 18146236826,
|
|
2352
|
+
"vram": 18146236826,
|
|
2353
|
+
"usage": 110
|
|
2354
|
+
},
|
|
2355
|
+
{
|
|
2356
|
+
"name": "video_wan_vace_14B_t2v",
|
|
2357
|
+
"title": "Wan VACE Text to Video",
|
|
2358
|
+
"description": "Transformer des descriptions textuelles en vidéos de haute qualité. Prend en charge à la fois 480p et 720p avec le modèle VACE-14B.",
|
|
2359
|
+
"mediaType": "image",
|
|
2360
|
+
"mediaSubtype": "webp",
|
|
2361
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2362
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
2363
|
+
"models": ["Wan2.1", "Wan"],
|
|
2364
|
+
"date": "2025-05-21",
|
|
2365
|
+
"size": 57756572713,
|
|
2366
|
+
"vram": 57756572713,
|
|
2367
|
+
"usage": 162
|
|
2368
|
+
},
|
|
2369
|
+
{
|
|
2370
|
+
"name": "video_wan_vace_14B_ref2v",
|
|
2371
|
+
"title": "Wan VACE Reference to Video",
|
|
2372
|
+
"description": "Créer des vidéos qui correspondent au style et au contenu d'une image de référence. Parfait pour la génération de vidéos cohérentes en style.",
|
|
2373
|
+
"mediaType": "image",
|
|
2374
|
+
"mediaSubtype": "webp",
|
|
2375
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2376
|
+
"tags": ["Vidéo", "Image vers vidéo"],
|
|
2377
|
+
"models": ["Wan2.1", "Wan"],
|
|
2378
|
+
"date": "2025-05-21",
|
|
2379
|
+
"size": 57756572713,
|
|
2380
|
+
"vram": 57756572713,
|
|
2381
|
+
"usage": 171
|
|
2382
|
+
},
|
|
2383
|
+
{
|
|
2384
|
+
"name": "video_wan_vace_14B_v2v",
|
|
2385
|
+
"title": "Wan VACE Control Video",
|
|
2386
|
+
"description": "Générer des vidéos en contrôlant les vidéos d'entrée et les images de référence en utilisant Wan VACE.",
|
|
2387
|
+
"mediaType": "image",
|
|
2388
|
+
"mediaSubtype": "webp",
|
|
2389
|
+
"thumbnailVariant": "compareSlider",
|
|
2390
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2391
|
+
"tags": ["Vidéo vers vidéo", "Vidéo"],
|
|
2392
|
+
"models": ["Wan2.1", "Wan"],
|
|
2393
|
+
"date": "2025-05-21",
|
|
2394
|
+
"size": 57756572713,
|
|
2395
|
+
"vram": 57756572713,
|
|
2396
|
+
"usage": 306
|
|
2397
|
+
},
|
|
2398
|
+
{
|
|
2399
|
+
"name": "video_wan_vace_outpainting",
|
|
2400
|
+
"title": "Wan VACE Outpainting",
|
|
2401
|
+
"description": "Générer des vidéos étendues en agrandissant la taille de la vidéo en utilisant l'outpainting Wan VACE.",
|
|
2402
|
+
"mediaType": "image",
|
|
2403
|
+
"mediaSubtype": "webp",
|
|
2404
|
+
"thumbnailVariant": "compareSlider",
|
|
2405
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2406
|
+
"tags": ["Outpainting", "Vidéo"],
|
|
2407
|
+
"models": ["Wan2.1", "Wan"],
|
|
2408
|
+
"date": "2025-05-21",
|
|
2409
|
+
"size": 57756572713,
|
|
2410
|
+
"vram": 57756572713,
|
|
2411
|
+
"usage": 117
|
|
2412
|
+
},
|
|
2413
|
+
{
|
|
2414
|
+
"name": "video_wan_vace_flf2v",
|
|
2415
|
+
"title": "Wan VACE First-Last Frame",
|
|
2416
|
+
"description": "Générer des transitions vidéo fluides en définissant les images de début et de fin. Prend en charge les séquences d'images clés personnalisées.",
|
|
2417
|
+
"mediaType": "image",
|
|
2418
|
+
"mediaSubtype": "webp",
|
|
2419
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2420
|
+
"tags": ["FLF2V", "Vidéo"],
|
|
2421
|
+
"models": ["Wan2.1", "Wan"],
|
|
2422
|
+
"date": "2025-05-21",
|
|
2423
|
+
"size": 57756572713,
|
|
2424
|
+
"vram": 57756572713,
|
|
2425
|
+
"usage": 136
|
|
2426
|
+
},
|
|
2427
|
+
{
|
|
2428
|
+
"name": "video_wan_vace_inpainting",
|
|
2429
|
+
"title": "Wan VACE Inpainting",
|
|
2430
|
+
"description": "Éditer des régions spécifiques dans les vidéos tout en préservant le contenu environnant. Idéal pour la suppression ou le remplacement d'objets.",
|
|
2431
|
+
"mediaType": "image",
|
|
2432
|
+
"mediaSubtype": "webp",
|
|
2433
|
+
"thumbnailVariant": "compareSlider",
|
|
2434
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2435
|
+
"tags": ["Inpainting", "Vidéo"],
|
|
2436
|
+
"models": ["Wan2.1", "Wan"],
|
|
2437
|
+
"date": "2025-05-21",
|
|
2438
|
+
"size": 57756572713,
|
|
2439
|
+
"vram": 57756572713,
|
|
2440
|
+
"usage": 261
|
|
2441
|
+
},
|
|
2442
|
+
{
|
|
2443
|
+
"name": "video_wan2.1_alpha_t2v_14B",
|
|
2444
|
+
"title": "Wan2.1 Alpha Texte vers Vidéo",
|
|
2445
|
+
"description": "Générez des vidéos à partir de texte avec support de canal alpha pour des arrière-plans transparents et objets semi-transparents.",
|
|
2446
|
+
"mediaType": "image",
|
|
2447
|
+
"mediaSubtype": "webp",
|
|
2448
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
2449
|
+
"models": ["Wan2.1", "Wan-Move", "Motion Control", "Wan"],
|
|
2450
|
+
"date": "2025-10-06",
|
|
2451
|
+
"size": 22494891213,
|
|
2452
|
+
"vram": 22494891213,
|
|
2453
|
+
"usage": 162
|
|
2454
|
+
},
|
|
2455
|
+
{
|
|
2456
|
+
"name": "video_wanmove_480p",
|
|
2457
|
+
"title": "Wan-Move Image vers vidéo à contrôle de mouvement",
|
|
2458
|
+
"description": "Générez des vidéos à partir d'une seule image avec Wan-Move, en contrôlant précisément les mouvements au niveau des points grâce au guidage par trajectoire.",
|
|
2459
|
+
"mediaType": "image",
|
|
2460
|
+
"mediaSubtype": "webp",
|
|
2461
|
+
"tags": ["Image vers vidéo", "Contrôle dynamique", "Vidéo"],
|
|
2462
|
+
"models": ["Wan2.1", "Wan"],
|
|
2463
|
+
"date": "2025-12-15",
|
|
2464
|
+
"size": 25420837683,
|
|
2465
|
+
"vram": 25420837683,
|
|
2466
|
+
"usage": 176
|
|
2467
|
+
},
|
|
2468
|
+
{
|
|
2469
|
+
"name": "video_wanmove_480p_hallucination",
|
|
2470
|
+
"title": "WanMove : Illusion Rêve Éveillé",
|
|
2471
|
+
"description": "Générez des images dynamiques à partir de trajectoires et créez des effets vidéo oniriques avec WanMove",
|
|
2472
|
+
"mediaType": "image",
|
|
2473
|
+
"mediaSubtype": "webp",
|
|
2474
|
+
"tags": ["Image vers vidéo", "Contrôle dynamique", "Vidéo"],
|
|
2475
|
+
"models": ["Wan2.1", "Wan"],
|
|
2476
|
+
"date": "2025-12-15",
|
|
2477
|
+
"size": 25420837683,
|
|
2478
|
+
"vram": 25420837683,
|
|
2479
|
+
"usage": 176,
|
|
2480
|
+
"requiresCustomNodes": ["comfyui_fill-nodes"]
|
|
2481
|
+
},
|
|
2482
|
+
{
|
|
2483
|
+
"name": "video_wan_ati",
|
|
2484
|
+
"title": "Wan ATI",
|
|
2485
|
+
"description": "Génération de vidéo contrôlée par trajectoire.",
|
|
2486
|
+
"mediaType": "image",
|
|
2487
|
+
"mediaSubtype": "webp",
|
|
2488
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2489
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
|
|
2490
|
+
"tags": ["Vidéo"],
|
|
2491
|
+
"models": ["Wan2.1", "Wan"],
|
|
2492
|
+
"date": "2025-05-21",
|
|
2493
|
+
"size": 25393994138,
|
|
2494
|
+
"vram": 25393994138,
|
|
2495
|
+
"usage": 81
|
|
2496
|
+
},
|
|
2497
|
+
{
|
|
2498
|
+
"name": "video_wan2.1_fun_camera_v1.1_1.3B",
|
|
2499
|
+
"title": "Wan 2.1 Contrôle Caméra Fun 1.3B",
|
|
2500
|
+
"description": "Générer des vidéos dynamiques avec des mouvements de caméra cinématographiques en utilisant le modèle Wan 2.1 Fun Camera 1.3B.",
|
|
2501
|
+
"mediaType": "image",
|
|
2502
|
+
"mediaSubtype": "webp",
|
|
2503
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
2504
|
+
"tags": ["Vidéo"],
|
|
2505
|
+
"models": ["Wan2.1", "Wan"],
|
|
2506
|
+
"date": "2025-04-15",
|
|
2507
|
+
"size": 11489037517,
|
|
2508
|
+
"vram": 11489037517,
|
|
2509
|
+
"usage": 22
|
|
2510
|
+
},
|
|
2511
|
+
{
|
|
2512
|
+
"name": "video_wan2.1_fun_camera_v1.1_14B",
|
|
2513
|
+
"title": "Wan 2.1 Contrôle Caméra Fun 14B",
|
|
2514
|
+
"description": "Générer des vidéos de haute qualité avec un contrôle avancé de la caméra en utilisant le modèle 14B complet",
|
|
2515
|
+
"mediaType": "image",
|
|
2516
|
+
"mediaSubtype": "webp",
|
|
2517
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
2518
|
+
"tags": ["Vidéo"],
|
|
2519
|
+
"models": ["Wan2.1", "Wan"],
|
|
2520
|
+
"date": "2025-04-15",
|
|
2521
|
+
"size": 42047729828,
|
|
2522
|
+
"vram": 42047729828,
|
|
2523
|
+
"usage": 48
|
|
2524
|
+
},
|
|
2525
|
+
{
|
|
2526
|
+
"name": "text_to_video_wan",
|
|
2527
|
+
"title": "Wan 2.1 Texte vers Vidéo",
|
|
2528
|
+
"description": "Générer des vidéos à partir de prompts textuels en utilisant Wan 2.1.",
|
|
2529
|
+
"mediaType": "image",
|
|
2530
|
+
"mediaSubtype": "webp",
|
|
2531
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
2532
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
2533
|
+
"models": ["Wan2.1", "Wan"],
|
|
2534
|
+
"date": "2025-03-01",
|
|
2535
|
+
"size": 9824737690,
|
|
2536
|
+
"vram": 9824737690,
|
|
2537
|
+
"usage": 119
|
|
2538
|
+
},
|
|
2539
|
+
{
|
|
2540
|
+
"name": "image_to_video_wan",
|
|
2541
|
+
"title": "Wan 2.1 Image vers Vidéo",
|
|
2542
|
+
"description": "Générer des vidéos à partir d'images en utilisant Wan 2.1.",
|
|
2543
|
+
"mediaType": "image",
|
|
2544
|
+
"mediaSubtype": "webp",
|
|
2545
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
2546
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
2547
|
+
"models": ["Wan2.1", "Wan"],
|
|
2548
|
+
"date": "2025-03-01",
|
|
2549
|
+
"size": 41049149932,
|
|
2550
|
+
"vram": 41049149932,
|
|
2551
|
+
"usage": 143
|
|
2552
|
+
},
|
|
2553
|
+
{
|
|
2554
|
+
"name": "wan2.1_fun_inp",
|
|
2555
|
+
"title": "Wan 2.1 Inpainting",
|
|
2556
|
+
"description": "Générer des vidéos à partir des images de début et de fin en utilisant l'inpainting Wan 2.1.",
|
|
2557
|
+
"mediaType": "image",
|
|
2558
|
+
"mediaSubtype": "webp",
|
|
2559
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
|
|
2560
|
+
"tags": ["Inpainting", "Vidéo"],
|
|
2561
|
+
"models": ["Wan2.1", "Wan"],
|
|
2562
|
+
"date": "2025-04-15",
|
|
2563
|
+
"size": 11381663334,
|
|
2564
|
+
"vram": 11381663334,
|
|
2565
|
+
"usage": 13
|
|
2566
|
+
},
|
|
2567
|
+
{
|
|
2568
|
+
"name": "wan2.1_fun_control",
|
|
2569
|
+
"title": "Wan 2.1 ControlNet",
|
|
2570
|
+
"description": "Générer des vidéos guidées par des contrôles de pose, de profondeur et de contours en utilisant Wan 2.1 ControlNet.",
|
|
2571
|
+
"mediaType": "image",
|
|
2572
|
+
"mediaSubtype": "webp",
|
|
2573
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2574
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
2575
|
+
"tags": ["Vidéo vers vidéo", "Vidéo"],
|
|
2576
|
+
"models": ["Wan2.1", "Wan"],
|
|
2577
|
+
"date": "2025-04-15",
|
|
2578
|
+
"size": 11381663334,
|
|
2579
|
+
"vram": 11381663334,
|
|
2580
|
+
"usage": 115
|
|
2581
|
+
},
|
|
2582
|
+
{
|
|
2583
|
+
"name": "wan2.1_flf2v_720_f16",
|
|
2584
|
+
"title": "Wan 2.1 Vidéo Premier-Dernier Image 720p F16",
|
|
2585
|
+
"description": "Générer des vidéos en contrôlant les première et dernière images en utilisant Wan 2.1 FLF2V.",
|
|
2586
|
+
"mediaType": "image",
|
|
2587
|
+
"mediaSubtype": "webp",
|
|
2588
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
|
|
2589
|
+
"tags": ["FLF2V", "Vidéo"],
|
|
2590
|
+
"models": ["Wan2.1", "Wan"],
|
|
2591
|
+
"date": "2025-04-15",
|
|
2592
|
+
"size": 41049149932,
|
|
2593
|
+
"vram": 41049149932,
|
|
2594
|
+
"usage": 43
|
|
2595
|
+
},
|
|
2596
|
+
{
|
|
2597
|
+
"name": "ltxv_text_to_video",
|
|
2598
|
+
"title": "LTXV Texte vers Vidéo",
|
|
2599
|
+
"mediaType": "image",
|
|
2600
|
+
"mediaSubtype": "webp",
|
|
2601
|
+
"description": "Générer des vidéos à partir de prompts textuels.",
|
|
2602
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
2603
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
2604
|
+
"models": ["LTXV"],
|
|
2605
|
+
"date": "2025-03-01",
|
|
2606
|
+
"size": 19155554140,
|
|
2607
|
+
"vram": 19155554140,
|
|
2608
|
+
"usage": 68
|
|
2609
|
+
},
|
|
2610
|
+
{
|
|
2611
|
+
"name": "ltxv_image_to_video",
|
|
2612
|
+
"title": "LTXV Image vers Vidéo",
|
|
2613
|
+
"mediaType": "image",
|
|
2614
|
+
"mediaSubtype": "webp",
|
|
2615
|
+
"description": "Générer des vidéos à partir d'images fixes.",
|
|
2616
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
2617
|
+
"tags": ["Image vers vidéo", "Vidéo"],
|
|
2618
|
+
"models": ["LTXV"],
|
|
2619
|
+
"date": "2025-03-01",
|
|
2620
|
+
"size": 19155554140,
|
|
2621
|
+
"vram": 19155554140,
|
|
2622
|
+
"usage": 108
|
|
2623
|
+
},
|
|
2624
|
+
{
|
|
2625
|
+
"name": "hunyuan_video_text_to_video",
|
|
2626
|
+
"title": "Hunyuan Vidéo Texte vers Vidéo",
|
|
2627
|
+
"mediaType": "image",
|
|
2628
|
+
"mediaSubtype": "webp",
|
|
2629
|
+
"description": "Générer des vidéos à partir de prompts textuels en utilisant le modèle Hunyuan.",
|
|
2630
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
|
|
2631
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
2632
|
+
"models": ["Hunyuan Video", "Tencent"],
|
|
2633
|
+
"date": "2025-03-01",
|
|
2634
|
+
"size": 35476429865,
|
|
2635
|
+
"vram": 35476429865,
|
|
2636
|
+
"usage": 52
|
|
2637
|
+
},
|
|
2638
|
+
{
|
|
2639
|
+
"name": "txt_to_image_to_video",
|
|
2640
|
+
"title": "SVD Texte à Image à Vidéo",
|
|
2641
|
+
"mediaType": "image",
|
|
2642
|
+
"mediaSubtype": "webp",
|
|
2643
|
+
"description": "Générer des vidéos en créant d'abord des images à partir de prompts textuels.",
|
|
2644
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
2645
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
2646
|
+
"models": ["SVD", "Stability"],
|
|
2647
|
+
"date": "2025-03-01",
|
|
2648
|
+
"size": 16492674417,
|
|
2649
|
+
"vram": 16492674417,
|
|
2650
|
+
"usage": 14
|
|
2651
|
+
}
|
|
2652
|
+
]
|
|
2653
|
+
},
|
|
2654
|
+
{
|
|
2655
|
+
"moduleName": "default",
|
|
2656
|
+
"type": "audio",
|
|
2657
|
+
"category": "Type de génération",
|
|
2658
|
+
"icon": "icon-[lucide--volume-2]",
|
|
2659
|
+
"title": "Audio",
|
|
2660
|
+
"templates": [
|
|
2661
|
+
{
|
|
2662
|
+
"name": "api_stability_ai_text_to_audio",
|
|
2663
|
+
"title": "Stability AI : Texte vers Audio",
|
|
2664
|
+
"description": "Générez de la musique à partir de texte avec Stable Audio 2.5. Créez des pistes de plusieurs minutes en quelques secondes.",
|
|
2665
|
+
"mediaType": "audio",
|
|
2666
|
+
"mediaSubtype": "mp3",
|
|
2667
|
+
"tags": ["Texte vers audio", "Audio", "API"],
|
|
2668
|
+
"date": "2025-09-09",
|
|
2669
|
+
"models": ["Stability", "Stable Audio"],
|
|
2670
|
+
"openSource": false,
|
|
2671
|
+
"size": 0,
|
|
2672
|
+
"vram": 0,
|
|
2673
|
+
"usage": 119
|
|
2674
|
+
},
|
|
2675
|
+
{
|
|
2676
|
+
"name": "api_stability_ai_audio_to_audio",
|
|
2677
|
+
"title": "Stability AI : Audio vers Audio",
|
|
2678
|
+
"description": "Transformez de l'audio en de nouvelles compositions avec Stable Audio 2.5. Téléversez un audio et l'IA crée des pistes complètes.",
|
|
2679
|
+
"mediaType": "audio",
|
|
2680
|
+
"mediaSubtype": "mp3",
|
|
2681
|
+
"tags": ["Audio vers audio", "Audio", "API"],
|
|
2682
|
+
"date": "2025-09-09",
|
|
2683
|
+
"models": ["Stability", "Stable Audio"],
|
|
2684
|
+
"openSource": false,
|
|
2685
|
+
"size": 0,
|
|
2686
|
+
"vram": 0,
|
|
2687
|
+
"usage": 67
|
|
2688
|
+
},
|
|
2689
|
+
{
|
|
2690
|
+
"name": "api_stability_ai_audio_inpaint",
|
|
2691
|
+
"title": "Stability AI : Inpainting Audio",
|
|
2692
|
+
"description": "Complétez ou prolongez des pistes audio avec Stable Audio 2.5. Téléversez un audio et l'IA génère le reste.",
|
|
2693
|
+
"mediaType": "audio",
|
|
2694
|
+
"mediaSubtype": "mp3",
|
|
2695
|
+
"tags": ["Audio vers audio", "Audio", "API"],
|
|
2696
|
+
"date": "2025-09-09",
|
|
2697
|
+
"models": ["Stability", "Stable Audio"],
|
|
2698
|
+
"openSource": false,
|
|
2699
|
+
"size": 0,
|
|
2700
|
+
"vram": 0,
|
|
2701
|
+
"usage": 17
|
|
2702
|
+
},
|
|
2703
|
+
{
|
|
2704
|
+
"name": "audio_stable_audio_example",
|
|
2705
|
+
"title": "Audio Stable",
|
|
2706
|
+
"mediaType": "audio",
|
|
2707
|
+
"mediaSubtype": "mp3",
|
|
2708
|
+
"description": "Générer de l'audio à partir de prompts textuels en utilisant Stable Audio.",
|
|
2709
|
+
"tags": ["Texte vers audio", "Audio"],
|
|
2710
|
+
"models": ["Stable Audio", "Stability"],
|
|
2711
|
+
"date": "2025-03-01",
|
|
2712
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/",
|
|
2713
|
+
"size": 5744518758,
|
|
2714
|
+
"vram": 5744518758,
|
|
2715
|
+
"usage": 270
|
|
2716
|
+
},
|
|
2717
|
+
{
|
|
2718
|
+
"name": "audio_ace_step_1_t2a_instrumentals",
|
|
2719
|
+
"title": "ACE-Step v1 Texte vers Musique Instrumentale",
|
|
2720
|
+
"mediaType": "audio",
|
|
2721
|
+
"mediaSubtype": "mp3",
|
|
2722
|
+
"description": "Générer de la musique instrumentale à partir de prompts textuels en utilisant ACE-Step v1.",
|
|
2723
|
+
"tags": ["Texte vers audio", "Audio"],
|
|
2724
|
+
"models": ["ACE-Step"],
|
|
2725
|
+
"date": "2025-03-01",
|
|
2726
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
2727
|
+
"size": 7698728878,
|
|
2728
|
+
"vram": 7698728878,
|
|
2729
|
+
"usage": 139
|
|
2730
|
+
},
|
|
2731
|
+
{
|
|
2732
|
+
"name": "audio_ace_step_1_t2a_song",
|
|
2733
|
+
"title": "ACE Step v1 Texte vers Chanson",
|
|
2734
|
+
"mediaType": "audio",
|
|
2735
|
+
"mediaSubtype": "mp3",
|
|
2736
|
+
"description": "Générer des chansons avec des voix à partir de prompts textuels en utilisant ACE-Step v1, prenant en charge la multilingue et la personnalisation du style.",
|
|
2737
|
+
"tags": ["Texte vers audio", "Audio"],
|
|
2738
|
+
"models": ["ACE-Step"],
|
|
2739
|
+
"date": "2025-03-01",
|
|
2740
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
2741
|
+
"size": 7698728878,
|
|
2742
|
+
"vram": 7698728878,
|
|
2743
|
+
"usage": 123
|
|
2272
2744
|
},
|
|
2273
2745
|
{
|
|
2274
|
-
"name": "
|
|
2275
|
-
"title": "
|
|
2276
|
-
"
|
|
2746
|
+
"name": "audio_ace_step_1_m2m_editing",
|
|
2747
|
+
"title": "ACE Step v1 Édition M2M",
|
|
2748
|
+
"mediaType": "audio",
|
|
2749
|
+
"mediaSubtype": "mp3",
|
|
2750
|
+
"description": "Éditer des chansons existantes pour changer le style et les paroles en utilisant ACE-Step v1 M2M.",
|
|
2751
|
+
"tags": ["Édition audio", "Audio"],
|
|
2752
|
+
"models": ["ACE-Step"],
|
|
2753
|
+
"date": "2025-03-01",
|
|
2754
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
2755
|
+
"size": 7698728878,
|
|
2756
|
+
"vram": 7698728878,
|
|
2757
|
+
"usage": 138
|
|
2758
|
+
}
|
|
2759
|
+
]
|
|
2760
|
+
},
|
|
2761
|
+
{
|
|
2762
|
+
"moduleName": "default",
|
|
2763
|
+
"type": "3d",
|
|
2764
|
+
"category": "Type de génération",
|
|
2765
|
+
"icon": "icon-[lucide--box]",
|
|
2766
|
+
"title": "Modèle 3D",
|
|
2767
|
+
"templates": [
|
|
2768
|
+
{
|
|
2769
|
+
"name": "api_tripo3_0_image_to_model",
|
|
2770
|
+
"title": "Tripo3.0 : Image vers Modèle",
|
|
2771
|
+
"description": "Transformez des images ou croquis en modèles 3D avec la géométrie précise et les textures PBR prêtes à la production de Tripo 3.0.",
|
|
2277
2772
|
"mediaType": "image",
|
|
2278
2773
|
"mediaSubtype": "webp",
|
|
2279
|
-
"tags": ["
|
|
2280
|
-
"models": ["
|
|
2281
|
-
"date": "2025-
|
|
2282
|
-
"
|
|
2283
|
-
"OpenSource": false,
|
|
2774
|
+
"tags": ["Image to Model", "3D", "API"],
|
|
2775
|
+
"models": ["Tripo"],
|
|
2776
|
+
"date": "2025-12-23",
|
|
2777
|
+
"openSource": false,
|
|
2284
2778
|
"size": 0,
|
|
2285
2779
|
"vram": 0
|
|
2286
2780
|
},
|
|
2287
2781
|
{
|
|
2288
|
-
"name": "
|
|
2289
|
-
"title": "
|
|
2290
|
-
"description": "
|
|
2782
|
+
"name": "api_tripo3_0_text_to_model",
|
|
2783
|
+
"title": "Tripo3.0 : Texte vers Modèle",
|
|
2784
|
+
"description": "Générez des modèles 3D précis à partir de texte, géométrie ultra-haute résolution et matériaux PBR réalistes.",
|
|
2291
2785
|
"mediaType": "image",
|
|
2292
2786
|
"mediaSubtype": "webp",
|
|
2293
|
-
"tags": ["
|
|
2294
|
-
"models": ["
|
|
2295
|
-
"date": "2025-
|
|
2296
|
-
"
|
|
2297
|
-
"OpenSource": false,
|
|
2787
|
+
"tags": ["Texte vers modèle", "3D", "API"],
|
|
2788
|
+
"models": ["Tripo"],
|
|
2789
|
+
"date": "2025-12-23",
|
|
2790
|
+
"openSource": false,
|
|
2298
2791
|
"size": 0,
|
|
2299
2792
|
"vram": 0
|
|
2300
2793
|
},
|
|
2301
2794
|
{
|
|
2302
|
-
"name": "
|
|
2303
|
-
"title": "
|
|
2304
|
-
"description": "
|
|
2795
|
+
"name": "api_tripo_text_to_model",
|
|
2796
|
+
"title": "Tripo: Texte vers Modèle",
|
|
2797
|
+
"description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
|
|
2305
2798
|
"mediaType": "image",
|
|
2306
2799
|
"mediaSubtype": "webp",
|
|
2307
|
-
"tags": ["
|
|
2308
|
-
"models": ["
|
|
2800
|
+
"tags": ["Texte vers modèle", "3D", "API"],
|
|
2801
|
+
"models": ["Tripo"],
|
|
2309
2802
|
"date": "2025-03-01",
|
|
2310
2803
|
"tutorialUrl": "",
|
|
2311
|
-
"
|
|
2804
|
+
"openSource": false,
|
|
2312
2805
|
"size": 0,
|
|
2313
|
-
"vram": 0
|
|
2806
|
+
"vram": 0,
|
|
2807
|
+
"usage": 48
|
|
2314
2808
|
},
|
|
2315
2809
|
{
|
|
2316
|
-
"name": "
|
|
2317
|
-
"title": "
|
|
2318
|
-
"description": "Generate
|
|
2810
|
+
"name": "api_tripo_image_to_model",
|
|
2811
|
+
"title": "Tripo: Image vers Modèle",
|
|
2812
|
+
"description": "Generate professional 3D assets from 2D images using Tripo engine.",
|
|
2319
2813
|
"mediaType": "image",
|
|
2320
2814
|
"mediaSubtype": "webp",
|
|
2321
|
-
"tags": ["Image vers
|
|
2322
|
-
"models": ["
|
|
2815
|
+
"tags": ["Image vers 3D", "3D", "API"],
|
|
2816
|
+
"models": ["Tripo"],
|
|
2323
2817
|
"date": "2025-03-01",
|
|
2324
2818
|
"tutorialUrl": "",
|
|
2325
|
-
"
|
|
2819
|
+
"openSource": false,
|
|
2326
2820
|
"size": 0,
|
|
2327
|
-
"vram": 0
|
|
2821
|
+
"vram": 0,
|
|
2822
|
+
"usage": 50
|
|
2328
2823
|
},
|
|
2329
2824
|
{
|
|
2330
|
-
"name": "
|
|
2331
|
-
"title": "
|
|
2332
|
-
"description": "
|
|
2825
|
+
"name": "api_tripo_multiview_to_model",
|
|
2826
|
+
"title": "Tripo: Multivue vers Modèle",
|
|
2827
|
+
"description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
|
|
2333
2828
|
"mediaType": "image",
|
|
2334
2829
|
"mediaSubtype": "webp",
|
|
2335
|
-
"tags": ["Image vers
|
|
2336
|
-
"models": ["
|
|
2830
|
+
"tags": ["Image vers 3D", "3D", "API"],
|
|
2831
|
+
"models": ["Tripo"],
|
|
2337
2832
|
"date": "2025-03-01",
|
|
2338
2833
|
"tutorialUrl": "",
|
|
2339
|
-
"
|
|
2834
|
+
"openSource": false,
|
|
2340
2835
|
"size": 0,
|
|
2341
|
-
"vram": 0
|
|
2342
|
-
|
|
2343
|
-
|
|
2344
|
-
},
|
|
2345
|
-
{
|
|
2346
|
-
"moduleName": "default",
|
|
2347
|
-
"type": "image",
|
|
2348
|
-
"category": "CLOSED SOURCE MODELS",
|
|
2349
|
-
"icon": "icon-[lucide--box]",
|
|
2350
|
-
"title": "3D API",
|
|
2351
|
-
"templates": [
|
|
2836
|
+
"vram": 0,
|
|
2837
|
+
"usage": 70
|
|
2838
|
+
},
|
|
2352
2839
|
{
|
|
2353
2840
|
"name": "api_rodin_gen2",
|
|
2354
2841
|
"title": "Rodin: Gen-2 Image vers Modèle",
|
|
@@ -2359,138 +2846,106 @@
|
|
|
2359
2846
|
"models": ["Rodin"],
|
|
2360
2847
|
"date": "2025-09-27",
|
|
2361
2848
|
"tutorialUrl": "",
|
|
2362
|
-
"
|
|
2849
|
+
"openSource": false,
|
|
2363
2850
|
"size": 0,
|
|
2364
|
-
"vram": 0
|
|
2851
|
+
"vram": 0,
|
|
2852
|
+
"usage": 355
|
|
2365
2853
|
},
|
|
2366
2854
|
{
|
|
2367
2855
|
"name": "api_rodin_image_to_model",
|
|
2368
2856
|
"title": "Rodin: Image vers Modèle",
|
|
2369
2857
|
"description": "Generate detailed 3D models from single photos using Rodin AI.",
|
|
2370
2858
|
"mediaType": "image",
|
|
2371
|
-
"thumbnailVariant": "compareSlider",
|
|
2372
2859
|
"mediaSubtype": "webp",
|
|
2373
2860
|
"tags": ["Image vers 3D", "3D", "API"],
|
|
2374
2861
|
"models": ["Rodin"],
|
|
2375
2862
|
"date": "2025-03-01",
|
|
2376
2863
|
"tutorialUrl": "",
|
|
2377
|
-
"
|
|
2864
|
+
"openSource": false,
|
|
2378
2865
|
"size": 0,
|
|
2379
|
-
"vram": 0
|
|
2866
|
+
"vram": 0,
|
|
2867
|
+
"usage": 25
|
|
2380
2868
|
},
|
|
2381
2869
|
{
|
|
2382
2870
|
"name": "api_rodin_multiview_to_model",
|
|
2383
2871
|
"title": "Rodin: Multivue vers Modèle",
|
|
2384
2872
|
"description": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.",
|
|
2385
2873
|
"mediaType": "image",
|
|
2386
|
-
"thumbnailVariant": "compareSlider",
|
|
2387
2874
|
"mediaSubtype": "webp",
|
|
2388
2875
|
"tags": ["Image vers 3D", "3D", "API"],
|
|
2389
2876
|
"models": ["Rodin"],
|
|
2390
2877
|
"date": "2025-03-01",
|
|
2391
2878
|
"tutorialUrl": "",
|
|
2392
|
-
"
|
|
2879
|
+
"openSource": false,
|
|
2393
2880
|
"size": 0,
|
|
2394
|
-
"vram": 0
|
|
2881
|
+
"vram": 0,
|
|
2882
|
+
"usage": 47
|
|
2395
2883
|
},
|
|
2396
2884
|
{
|
|
2397
|
-
"name": "
|
|
2398
|
-
"title": "
|
|
2399
|
-
"description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
|
|
2885
|
+
"name": "3d_hunyuan3d-v2.1",
|
|
2886
|
+
"title": "Hunyuan3D 2.1",
|
|
2400
2887
|
"mediaType": "image",
|
|
2401
2888
|
"mediaSubtype": "webp",
|
|
2402
|
-
"
|
|
2403
|
-
"
|
|
2889
|
+
"description": "Générez des modèles 3D à partir d'images uniques avec Hunyuan3D 2.0.",
|
|
2890
|
+
"tags": ["Image vers 3D", "3D"],
|
|
2891
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
2404
2892
|
"date": "2025-03-01",
|
|
2405
2893
|
"tutorialUrl": "",
|
|
2406
|
-
"
|
|
2407
|
-
"
|
|
2408
|
-
"
|
|
2894
|
+
"size": 4928474972,
|
|
2895
|
+
"vram": 4928474972,
|
|
2896
|
+
"usage": 384
|
|
2409
2897
|
},
|
|
2410
2898
|
{
|
|
2411
|
-
"name": "
|
|
2412
|
-
"title": "
|
|
2413
|
-
"description": "Generate professional 3D assets from 2D images using Tripo engine.",
|
|
2899
|
+
"name": "3d_hunyuan3d_image_to_model",
|
|
2900
|
+
"title": "Hunyuan3D 2.0",
|
|
2414
2901
|
"mediaType": "image",
|
|
2415
|
-
"thumbnailVariant": "compareSlider",
|
|
2416
2902
|
"mediaSubtype": "webp",
|
|
2417
|
-
"
|
|
2418
|
-
"
|
|
2903
|
+
"description": "Générer des modèles 3D à partir d'images simples en utilisant Hunyuan3D 2.0.",
|
|
2904
|
+
"tags": ["Image vers 3D", "3D"],
|
|
2905
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
2419
2906
|
"date": "2025-03-01",
|
|
2420
2907
|
"tutorialUrl": "",
|
|
2421
|
-
"
|
|
2422
|
-
"size":
|
|
2423
|
-
"vram":
|
|
2908
|
+
"openSource": false,
|
|
2909
|
+
"size": 4928474972,
|
|
2910
|
+
"vram": 4928474972,
|
|
2911
|
+
"usage": 69
|
|
2424
2912
|
},
|
|
2425
2913
|
{
|
|
2426
|
-
"name": "
|
|
2427
|
-
"title": "
|
|
2428
|
-
"description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
|
|
2914
|
+
"name": "3d_hunyuan3d_multiview_to_model",
|
|
2915
|
+
"title": "Hunyuan3D 2.0 Multivue",
|
|
2429
2916
|
"mediaType": "image",
|
|
2430
|
-
"thumbnailVariant": "compareSlider",
|
|
2431
2917
|
"mediaSubtype": "webp",
|
|
2432
|
-
"
|
|
2433
|
-
"
|
|
2918
|
+
"description": "Générer des modèles 3D à partir de vues multiples en utilisant Hunyuan3D 2.0 MV.",
|
|
2919
|
+
"tags": ["3D", "Image vers 3D"],
|
|
2920
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
2434
2921
|
"date": "2025-03-01",
|
|
2435
2922
|
"tutorialUrl": "",
|
|
2436
|
-
"
|
|
2437
|
-
"size":
|
|
2438
|
-
"vram":
|
|
2439
|
-
|
|
2440
|
-
]
|
|
2441
|
-
},
|
|
2442
|
-
{
|
|
2443
|
-
"moduleName": "default",
|
|
2444
|
-
"type": "audio",
|
|
2445
|
-
"category": "CLOSED SOURCE MODELS",
|
|
2446
|
-
"icon": "icon-[lucide--volume-2]",
|
|
2447
|
-
"title": "Audio API",
|
|
2448
|
-
"templates": [
|
|
2449
|
-
{
|
|
2450
|
-
"name": "api_stability_ai_text_to_audio",
|
|
2451
|
-
"title": "Stability AI : Texte vers Audio",
|
|
2452
|
-
"description": "Générez de la musique à partir de texte avec Stable Audio 2.5. Créez des pistes de plusieurs minutes en quelques secondes.",
|
|
2453
|
-
"mediaType": "audio",
|
|
2454
|
-
"mediaSubtype": "mp3",
|
|
2455
|
-
"tags": ["Texte vers audio", "Audio", "API"],
|
|
2456
|
-
"date": "2025-09-09",
|
|
2457
|
-
"models": ["Stability", "Stable Audio"],
|
|
2458
|
-
"OpenSource": false,
|
|
2459
|
-
"size": 0,
|
|
2460
|
-
"vram": 0
|
|
2461
|
-
},
|
|
2462
|
-
{
|
|
2463
|
-
"name": "api_stability_ai_audio_to_audio",
|
|
2464
|
-
"title": "Stability AI : Audio vers Audio",
|
|
2465
|
-
"description": "Transformez de l'audio en de nouvelles compositions avec Stable Audio 2.5. Téléversez un audio et l'IA crée des pistes complètes.",
|
|
2466
|
-
"mediaType": "audio",
|
|
2467
|
-
"mediaSubtype": "mp3",
|
|
2468
|
-
"tags": ["Audio vers audio", "Audio", "API"],
|
|
2469
|
-
"date": "2025-09-09",
|
|
2470
|
-
"models": ["Stability", "Stable Audio"],
|
|
2471
|
-
"OpenSource": false,
|
|
2472
|
-
"size": 0,
|
|
2473
|
-
"vram": 0
|
|
2923
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2924
|
+
"size": 4928474972,
|
|
2925
|
+
"vram": 4928474972,
|
|
2926
|
+
"usage": 97
|
|
2474
2927
|
},
|
|
2475
2928
|
{
|
|
2476
|
-
"name": "
|
|
2477
|
-
"title": "
|
|
2478
|
-
"
|
|
2479
|
-
"
|
|
2480
|
-
"
|
|
2481
|
-
"tags": ["
|
|
2482
|
-
"
|
|
2483
|
-
"
|
|
2484
|
-
"
|
|
2485
|
-
"
|
|
2486
|
-
"
|
|
2929
|
+
"name": "3d_hunyuan3d_multiview_to_model_turbo",
|
|
2930
|
+
"title": "Hunyuan3D 2.0 Multivue Turbo",
|
|
2931
|
+
"mediaType": "image",
|
|
2932
|
+
"mediaSubtype": "webp",
|
|
2933
|
+
"description": "Générer des modèles 3D à partir de vues multiples en utilisant Hunyuan3D 2.0 MV Turbo.",
|
|
2934
|
+
"tags": ["Image vers 3D", "3D"],
|
|
2935
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
2936
|
+
"date": "2025-03-01",
|
|
2937
|
+
"tutorialUrl": "",
|
|
2938
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2939
|
+
"size": 4928474972,
|
|
2940
|
+
"vram": 4928474972,
|
|
2941
|
+
"usage": 38
|
|
2487
2942
|
}
|
|
2488
2943
|
]
|
|
2489
2944
|
},
|
|
2490
2945
|
{
|
|
2491
2946
|
"moduleName": "default",
|
|
2492
2947
|
"type": "image",
|
|
2493
|
-
"category": "
|
|
2948
|
+
"category": "Type de génération",
|
|
2494
2949
|
"icon": "icon-[lucide--message-square-text]",
|
|
2495
2950
|
"title": "LLM API",
|
|
2496
2951
|
"templates": [
|
|
@@ -2504,9 +2959,10 @@
|
|
|
2504
2959
|
"models": ["OpenAI"],
|
|
2505
2960
|
"date": "2025-03-01",
|
|
2506
2961
|
"tutorialUrl": "",
|
|
2507
|
-
"
|
|
2962
|
+
"openSource": false,
|
|
2508
2963
|
"size": 0,
|
|
2509
|
-
"vram": 0
|
|
2964
|
+
"vram": 0,
|
|
2965
|
+
"usage": 35
|
|
2510
2966
|
},
|
|
2511
2967
|
{
|
|
2512
2968
|
"name": "api_google_gemini",
|
|
@@ -2518,9 +2974,144 @@
|
|
|
2518
2974
|
"models": ["Google Gemini", "Google"],
|
|
2519
2975
|
"date": "2025-03-01",
|
|
2520
2976
|
"tutorialUrl": "",
|
|
2521
|
-
"
|
|
2977
|
+
"openSource": false,
|
|
2522
2978
|
"size": 0,
|
|
2523
|
-
"vram": 0
|
|
2979
|
+
"vram": 0,
|
|
2980
|
+
"usage": 130
|
|
2981
|
+
}
|
|
2982
|
+
]
|
|
2983
|
+
},
|
|
2984
|
+
{
|
|
2985
|
+
"moduleName": "default",
|
|
2986
|
+
"type": "image",
|
|
2987
|
+
"isEssential": true,
|
|
2988
|
+
"title": "Démarrage",
|
|
2989
|
+
"templates": [
|
|
2990
|
+
{
|
|
2991
|
+
"name": "gsc_starter_1",
|
|
2992
|
+
"title": "Débutant 1 : Texte en image",
|
|
2993
|
+
"mediaType": "image",
|
|
2994
|
+
"mediaSubtype": "webp",
|
|
2995
|
+
"description": "Générez une image, connectez les nœuds et téléchargez-la avec Z-Image Turbo",
|
|
2996
|
+
"models": ["Z-Image-Turbo"],
|
|
2997
|
+
"date": "2025-12-10",
|
|
2998
|
+
"searchRank": 3,
|
|
2999
|
+
"size": 0,
|
|
3000
|
+
"vram": 0,
|
|
3001
|
+
"includeOnDistributions": ["cloud"]
|
|
3002
|
+
},
|
|
3003
|
+
{
|
|
3004
|
+
"name": "gsc_starter_2",
|
|
3005
|
+
"title": "Débutant 2 : Image en vidéo",
|
|
3006
|
+
"mediaType": "image",
|
|
3007
|
+
"mediaSubtype": "webp",
|
|
3008
|
+
"description": "Découvrez comment charger des images, créer une vidéo et trouver un nœud avec Wan 2.2",
|
|
3009
|
+
"models": ["Wan2.2", "Wan"],
|
|
3010
|
+
"date": "2025-12-10",
|
|
3011
|
+
"searchRank": 3,
|
|
3012
|
+
"size": 0,
|
|
3013
|
+
"vram": 0,
|
|
3014
|
+
"requiresCustomNodes": ["comfyui_essentials"],
|
|
3015
|
+
"includeOnDistributions": ["cloud"]
|
|
3016
|
+
},
|
|
3017
|
+
{
|
|
3018
|
+
"name": "gsc_starter_3",
|
|
3019
|
+
"title": "Débutant 3 : Photo produit",
|
|
3020
|
+
"mediaType": "image",
|
|
3021
|
+
"mediaSubtype": "webp",
|
|
3022
|
+
"description": "Apprenez à réaliser une photo produit à partir d’images, entrer dans un sous-graphe, enlever le bypass d’un nœud et découvrir les nœuds partenaires (Nano Banana Pro)",
|
|
3023
|
+
"models": ["Nano Banana Pro", "Google"],
|
|
3024
|
+
"date": "2025-12-10",
|
|
3025
|
+
"searchRank": 3,
|
|
3026
|
+
"size": 0,
|
|
3027
|
+
"vram": 0,
|
|
3028
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
3029
|
+
"includeOnDistributions": ["cloud"]
|
|
3030
|
+
},
|
|
3031
|
+
{
|
|
3032
|
+
"name": "01_get_started_text_to_image",
|
|
3033
|
+
"title": "Texte en image (Nouveau)",
|
|
3034
|
+
"mediaType": "image",
|
|
3035
|
+
"mediaSubtype": "webp",
|
|
3036
|
+
"description": "Générez des images à partir d'invites textuelles avec le modèle z-image-turbo",
|
|
3037
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/z-image/z-image-turbo",
|
|
3038
|
+
"tags": ["Texte vers image", "Image"],
|
|
3039
|
+
"models": ["Z-Image-Turbo"],
|
|
3040
|
+
"date": "2025-10-17",
|
|
3041
|
+
"size": 20862803640,
|
|
3042
|
+
"vram": 20862803640,
|
|
3043
|
+
"usage": 299
|
|
3044
|
+
},
|
|
3045
|
+
{
|
|
3046
|
+
"name": "02_qwen_Image_edit_subgraphed",
|
|
3047
|
+
"title": "Édition d'image (Nouveau)",
|
|
3048
|
+
"mediaType": "image",
|
|
3049
|
+
"mediaSubtype": "webp",
|
|
3050
|
+
"description": "Éditez vos images avec Qwen-Image-Edit",
|
|
3051
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
3052
|
+
"tags": ["Image vers image", "Éd. image", "ControlNet"],
|
|
3053
|
+
"models": ["Qwen-Image-Edit"],
|
|
3054
|
+
"date": "2025-10-17",
|
|
3055
|
+
"size": 31772020572,
|
|
3056
|
+
"vram": 31772020572,
|
|
3057
|
+
"usage": 6436
|
|
3058
|
+
},
|
|
3059
|
+
{
|
|
3060
|
+
"name": "03_video_wan2_2_14B_i2v_subgraphed",
|
|
3061
|
+
"title": "Image en Vidéo (Nouveau)",
|
|
3062
|
+
"description": "Générez des vidéos à partir d’une image avec Wan2.2 14B",
|
|
3063
|
+
"mediaType": "image",
|
|
3064
|
+
"mediaSubtype": "webp",
|
|
3065
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
3066
|
+
"tags": ["Image vers vidéo", "Vidéo"],
|
|
3067
|
+
"models": ["Wan2.2", "Wan"],
|
|
3068
|
+
"date": "2025-10-17",
|
|
3069
|
+
"size": 38031935406,
|
|
3070
|
+
"vram": 38031935406,
|
|
3071
|
+
"usage": 4084
|
|
3072
|
+
},
|
|
3073
|
+
{
|
|
3074
|
+
"name": "04_hunyuan_3d_2.1_subgraphed",
|
|
3075
|
+
"title": "Image vers 3D (Nouveau)",
|
|
3076
|
+
"mediaType": "image",
|
|
3077
|
+
"mediaSubtype": "webp",
|
|
3078
|
+
"description": "Générez des modèles 3D à partir d'une seule image avec Hunyuan3D 2.0.",
|
|
3079
|
+
"tags": ["Image vers 3D", "3D"],
|
|
3080
|
+
"models": ["Hunyuan3D"],
|
|
3081
|
+
"date": "2025-10-17",
|
|
3082
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/3d/hunyuan3D-2",
|
|
3083
|
+
"size": 4928474972,
|
|
3084
|
+
"vram": 4928474972,
|
|
3085
|
+
"usage": 152
|
|
3086
|
+
},
|
|
3087
|
+
{
|
|
3088
|
+
"name": "05_audio_ace_step_1_t2a_song_subgraphed",
|
|
3089
|
+
"title": "Texte en audio (Nouveau)",
|
|
3090
|
+
"mediaType": "image",
|
|
3091
|
+
"mediaSubtype": "webp",
|
|
3092
|
+
"description": "Générez de l'audio à partir d'invites textuelles avec ACE-Step v1",
|
|
3093
|
+
"tags": ["Texte vers audio", "Audio"],
|
|
3094
|
+
"models": ["ACE-Step"],
|
|
3095
|
+
"date": "2025-10-17",
|
|
3096
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
3097
|
+
"size": 7698728878,
|
|
3098
|
+
"vram": 7698728878,
|
|
3099
|
+
"usage": 101
|
|
3100
|
+
},
|
|
3101
|
+
{
|
|
3102
|
+
"name": "default",
|
|
3103
|
+
"title": "Génération d'images",
|
|
3104
|
+
"mediaType": "image",
|
|
3105
|
+
"mediaSubtype": "webp",
|
|
3106
|
+
"description": "Générer des images à partir de prompts textuels.",
|
|
3107
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
|
|
3108
|
+
"tags": ["Texte vers image", "Image"],
|
|
3109
|
+
"models": ["SD1.5", "Stability"],
|
|
3110
|
+
"date": "2025-03-01",
|
|
3111
|
+
"size": 2136746230,
|
|
3112
|
+
"vram": 3092376453,
|
|
3113
|
+
"status": "active",
|
|
3114
|
+
"usage": 168
|
|
2524
3115
|
}
|
|
2525
3116
|
]
|
|
2526
3117
|
}
|