comfyui-workflow-templates-media-other 0.3.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comfyui_workflow_templates_media_other/__init__.py +6 -0
- comfyui_workflow_templates_media_other/templates/04_hunyuan_3d_2.1_subgraphed-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/04_hunyuan_3d_2.1_subgraphed.json +849 -0
- comfyui_workflow_templates_media_other/templates/05_audio_ace_step_1_t2a_song_subgraphed-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/05_audio_ace_step_1_t2a_song_subgraphed.json +1039 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship.json +1256 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d-v2.1-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d-v2.1.json +618 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model.json +1101 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo.json +1149 -0
- comfyui_workflow_templates_media_other/templates/ByteDance-Seedance_00003_.json +210 -0
- comfyui_workflow_templates_media_other/templates/area_composition-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/area_composition.json +1626 -0
- comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject.json +1114 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_m2m_editing-1.mp3 +0 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_m2m_editing.json +688 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_instrumentals-1.mp3 +0 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_instrumentals.json +650 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_song-1.mp3 +0 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_song.json +630 -0
- comfyui_workflow_templates_media_other/templates/audio_stable_audio_example-1.mp3 +0 -0
- comfyui_workflow_templates_media_other/templates/audio_stable_audio_example.json +495 -0
- comfyui_workflow_templates_media_other/templates/default-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/default.json +547 -0
- comfyui_workflow_templates_media_other/templates/embedding_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/embedding_example.json +267 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example.json +635 -0
- comfyui_workflow_templates_media_other/templates/gligen_textbox_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gligen_textbox_example.json +686 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1.json +1133 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_full-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_full-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_full.json +1021 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_dev-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_dev.json +700 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_fast-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_fast.json +700 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_full-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_full.json +700 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow.json +1029 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow.json +772 -0
- comfyui_workflow_templates_media_other/templates/index.ar.json +2521 -0
- comfyui_workflow_templates_media_other/templates/index.es.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.fr.json +2527 -0
- comfyui_workflow_templates_media_other/templates/index.ja.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.json +2527 -0
- comfyui_workflow_templates_media_other/templates/index.ko.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.ru.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.schema.json +123 -0
- comfyui_workflow_templates_media_other/templates/index.tr.json +2521 -0
- comfyui_workflow_templates_media_other/templates/index.zh-TW.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.zh.json +2526 -0
- comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model.json +929 -0
- comfyui_workflow_templates_media_other/templates/lora-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/lora.json +615 -0
- comfyui_workflow_templates_media_other/templates/lora_multiple-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/lora_multiple.json +656 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_blur-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_blur-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_blur.json +764 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_depth-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_depth-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_depth.json +1857 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_simple_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_simple_example.json +278 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_flf2v_720_f16-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_flf2v_720_f16.json +1038 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_control-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_control-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_control.json +1284 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_inp-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_inp.json +1142 -0
- comfyui_workflow_templates_media_other-0.3.10.dist-info/METADATA +9 -0
- comfyui_workflow_templates_media_other-0.3.10.dist-info/RECORD +92 -0
- comfyui_workflow_templates_media_other-0.3.10.dist-info/WHEEL +5 -0
- comfyui_workflow_templates_media_other-0.3.10.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2527 @@
|
|
|
1
|
+
[
|
|
2
|
+
{
|
|
3
|
+
"moduleName": "default",
|
|
4
|
+
"type": "image",
|
|
5
|
+
"isEssential": true,
|
|
6
|
+
"title": "Getting Started",
|
|
7
|
+
"templates": [
|
|
8
|
+
{
|
|
9
|
+
"name": "01_qwen_t2i_subgraphed",
|
|
10
|
+
"title": "Texte en image (Nouveau)",
|
|
11
|
+
"mediaType": "image",
|
|
12
|
+
"mediaSubtype": "webp",
|
|
13
|
+
"description": "Générez des images à partir d'invites textuelles avec le modèle Qwen-Image",
|
|
14
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
15
|
+
"tags": ["Texte vers image", "Image"],
|
|
16
|
+
"models": ["Qwen-Image"],
|
|
17
|
+
"date": "2025-10-17",
|
|
18
|
+
"size": 31772020572
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"name": "02_qwen_Image_edit_subgraphed",
|
|
22
|
+
"title": "Édition d'image (Nouveau)",
|
|
23
|
+
"mediaType": "image",
|
|
24
|
+
"mediaSubtype": "webp",
|
|
25
|
+
"description": "Éditez vos images avec Qwen-Image-Edit",
|
|
26
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
27
|
+
"tags": ["Image vers image", "Édition d'image", "ControlNet"],
|
|
28
|
+
"models": ["Qwen-Image"],
|
|
29
|
+
"date": "2025-10-17",
|
|
30
|
+
"size": 31772020572
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
"name": "03_video_wan2_2_14B_i2v_subgraphed",
|
|
34
|
+
"title": "Image en Vidéo (Nouveau)",
|
|
35
|
+
"description": "Générez des vidéos à partir d’une image avec Wan2.2 14B",
|
|
36
|
+
"mediaType": "image",
|
|
37
|
+
"mediaSubtype": "webp",
|
|
38
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
39
|
+
"tags": ["Image vers vidéo", "Vidéo"],
|
|
40
|
+
"models": ["Wan2.2", "Wan"],
|
|
41
|
+
"date": "2025-10-17",
|
|
42
|
+
"size": 38031935406
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
"name": "04_hunyuan_3d_2.1_subgraphed",
|
|
46
|
+
"title": "Image vers 3D (Nouveau)",
|
|
47
|
+
"mediaType": "image",
|
|
48
|
+
"mediaSubtype": "webp",
|
|
49
|
+
"description": "Générez des modèles 3D à partir d'une seule image avec Hunyuan3D 2.0.",
|
|
50
|
+
"tags": ["Image vers 3D", "3D"],
|
|
51
|
+
"models": ["Hunyuan3D"],
|
|
52
|
+
"date": "2025-10-17",
|
|
53
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/3d/hunyuan3D-2",
|
|
54
|
+
"size": 4928474972
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
"name": "05_audio_ace_step_1_t2a_song_subgraphed",
|
|
58
|
+
"title": "Texte en audio (Nouveau)",
|
|
59
|
+
"mediaType": "image",
|
|
60
|
+
"mediaSubtype": "webp",
|
|
61
|
+
"description": "Générez de l'audio à partir d'invites textuelles avec ACE-Step v1",
|
|
62
|
+
"tags": ["Texte vers audio", "Audio"],
|
|
63
|
+
"models": ["ACE-Step"],
|
|
64
|
+
"date": "2025-10-17",
|
|
65
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
66
|
+
"size": 7698728878
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
"name": "default",
|
|
70
|
+
"title": "Génération d'images",
|
|
71
|
+
"mediaType": "image",
|
|
72
|
+
"mediaSubtype": "webp",
|
|
73
|
+
"description": "Générer des images à partir de prompts textuels.",
|
|
74
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
|
|
75
|
+
"tags": ["Texte vers image", "Image"],
|
|
76
|
+
"models": ["SD1.5", "Stability"],
|
|
77
|
+
"date": "2025-03-01",
|
|
78
|
+
"size": 2136746230,
|
|
79
|
+
"vram": 3092376453
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
"name": "image2image",
|
|
83
|
+
"title": "Image vers Image",
|
|
84
|
+
"mediaType": "image",
|
|
85
|
+
"mediaSubtype": "webp",
|
|
86
|
+
"description": "Transformer des images existantes en utilisant des prompts textuels.",
|
|
87
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/image-to-image",
|
|
88
|
+
"tags": ["Image vers image", "Image"],
|
|
89
|
+
"models": ["SD1.5", "Stability"],
|
|
90
|
+
"date": "2025-03-01",
|
|
91
|
+
"size": 2136746230,
|
|
92
|
+
"vram": 3092376453,
|
|
93
|
+
"thumbnailVariant": "hoverDissolve"
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
"name": "lora",
|
|
97
|
+
"title": "LoRA",
|
|
98
|
+
"mediaType": "image",
|
|
99
|
+
"mediaSubtype": "webp",
|
|
100
|
+
"description": "Générer des images avec des modèles LoRA pour des styles ou sujets spécialisés.",
|
|
101
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
|
|
102
|
+
"tags": ["Texte vers image", "Image"],
|
|
103
|
+
"models": ["SD1.5", "Stability"],
|
|
104
|
+
"date": "2025-03-01",
|
|
105
|
+
"size": 2437393940,
|
|
106
|
+
"vram": 3092376453
|
|
107
|
+
},
|
|
108
|
+
{
|
|
109
|
+
"name": "lora_multiple",
|
|
110
|
+
"title": "LoRA Multiple",
|
|
111
|
+
"mediaType": "image",
|
|
112
|
+
"mediaSubtype": "webp",
|
|
113
|
+
"description": "Générer des images en combinant plusieurs modèles LoRA.",
|
|
114
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
|
|
115
|
+
"tags": ["Texte vers image", "Image"],
|
|
116
|
+
"models": ["SD1.5", "Stability"],
|
|
117
|
+
"date": "2025-03-01",
|
|
118
|
+
"size": 2437393940,
|
|
119
|
+
"vram": 3350074491
|
|
120
|
+
},
|
|
121
|
+
{
|
|
122
|
+
"name": "inpaint_example",
|
|
123
|
+
"title": "Inpainting",
|
|
124
|
+
"mediaType": "image",
|
|
125
|
+
"mediaSubtype": "webp",
|
|
126
|
+
"description": "Éditer des parties spécifiques d'images de manière transparente.",
|
|
127
|
+
"thumbnailVariant": "compareSlider",
|
|
128
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
|
|
129
|
+
"tags": ["Inpainting", "Image"],
|
|
130
|
+
"models": ["SD1.5", "Stability"],
|
|
131
|
+
"date": "2025-03-01",
|
|
132
|
+
"size": 5218385265,
|
|
133
|
+
"vram": 4101693768
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
"name": "inpaint_model_outpainting",
|
|
137
|
+
"title": "Outpainting",
|
|
138
|
+
"mediaType": "image",
|
|
139
|
+
"mediaSubtype": "webp",
|
|
140
|
+
"description": "Étendre les images au-delà de leurs limites d'origine.",
|
|
141
|
+
"thumbnailVariant": "compareSlider",
|
|
142
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
|
|
143
|
+
"tags": ["Outpainting", "Image"],
|
|
144
|
+
"models": ["SD1.5", "Stability"],
|
|
145
|
+
"date": "2025-03-01",
|
|
146
|
+
"size": 5218385265,
|
|
147
|
+
"vram": 4101693768
|
|
148
|
+
},
|
|
149
|
+
{
|
|
150
|
+
"name": "embedding_example",
|
|
151
|
+
"title": "Intégration",
|
|
152
|
+
"mediaType": "image",
|
|
153
|
+
"mediaSubtype": "webp",
|
|
154
|
+
"description": "Générer des images en utilisant l'inversion textuelle pour des styles cohérents.",
|
|
155
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/",
|
|
156
|
+
"tags": ["Texte vers image", "Image"],
|
|
157
|
+
"models": ["SD1.5", "Stability"],
|
|
158
|
+
"date": "2025-03-01",
|
|
159
|
+
"size": 5218385265,
|
|
160
|
+
"vram": 4123168604
|
|
161
|
+
},
|
|
162
|
+
{
|
|
163
|
+
"name": "gligen_textbox_example",
|
|
164
|
+
"title": "Boîte de Texte Gligen",
|
|
165
|
+
"mediaType": "image",
|
|
166
|
+
"mediaSubtype": "webp",
|
|
167
|
+
"description": "Générer des images avec un placement précis d'objets en utilisant des boîtes de texte.",
|
|
168
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/",
|
|
169
|
+
"tags": ["Image"],
|
|
170
|
+
"models": ["SD1.5", "Stability"],
|
|
171
|
+
"date": "2025-03-01",
|
|
172
|
+
"size": 2974264852,
|
|
173
|
+
"vram": 4080218931
|
|
174
|
+
},
|
|
175
|
+
{
|
|
176
|
+
"name": "area_composition",
|
|
177
|
+
"title": "Composition de Zone",
|
|
178
|
+
"mediaType": "image",
|
|
179
|
+
"mediaSubtype": "webp",
|
|
180
|
+
"description": "Générer des images en contrôlant la composition avec des zones définies.",
|
|
181
|
+
"tags": ["Texte vers image", "Image"],
|
|
182
|
+
"models": ["SD1.5", "Stability"],
|
|
183
|
+
"date": "2025-03-01",
|
|
184
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/",
|
|
185
|
+
"size": 2469606195,
|
|
186
|
+
"vram": 6184752906
|
|
187
|
+
},
|
|
188
|
+
{
|
|
189
|
+
"name": "area_composition_square_area_for_subject",
|
|
190
|
+
"title": "Composition Zone Carrée pour Sujet",
|
|
191
|
+
"mediaType": "image",
|
|
192
|
+
"mediaSubtype": "webp",
|
|
193
|
+
"description": "Générer des images avec un placement cohérent du sujet en utilisant la composition de zone.",
|
|
194
|
+
"tags": ["Texte vers image", "Image"],
|
|
195
|
+
"models": ["SD1.5", "Stability"],
|
|
196
|
+
"date": "2025-03-01",
|
|
197
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition",
|
|
198
|
+
"size": 2469606195,
|
|
199
|
+
"vram": 5927054868
|
|
200
|
+
},
|
|
201
|
+
{
|
|
202
|
+
"name": "hiresfix_latent_workflow",
|
|
203
|
+
"title": "Agrandissement",
|
|
204
|
+
"mediaType": "image",
|
|
205
|
+
"mediaSubtype": "webp",
|
|
206
|
+
"description": "Agrandir les images en améliorant la qualité dans l'espace latent.",
|
|
207
|
+
"thumbnailVariant": "compareSlider",
|
|
208
|
+
"tags": ["Amélioration", "Image"],
|
|
209
|
+
"models": ["SD1.5", "Stability"],
|
|
210
|
+
"date": "2025-03-01",
|
|
211
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/",
|
|
212
|
+
"size": 2136746230,
|
|
213
|
+
"vram": 3929895076
|
|
214
|
+
},
|
|
215
|
+
{
|
|
216
|
+
"name": "esrgan_example",
|
|
217
|
+
"title": "ESRGAN",
|
|
218
|
+
"mediaType": "image",
|
|
219
|
+
"mediaSubtype": "webp",
|
|
220
|
+
"description": "Agrandir les images en utilisant les modèles ESRGAN pour améliorer la qualité.",
|
|
221
|
+
"thumbnailVariant": "compareSlider",
|
|
222
|
+
"tags": ["Amélioration", "Image"],
|
|
223
|
+
"models": ["SD1.5", "Stability"],
|
|
224
|
+
"date": "2025-03-01",
|
|
225
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/",
|
|
226
|
+
"size": 2201170739,
|
|
227
|
+
"vram": 6442450944
|
|
228
|
+
},
|
|
229
|
+
{
|
|
230
|
+
"name": "hiresfix_esrgan_workflow",
|
|
231
|
+
"title": "Workflow HiresFix ESRGAN",
|
|
232
|
+
"mediaType": "image",
|
|
233
|
+
"mediaSubtype": "webp",
|
|
234
|
+
"description": "Agrandir les images en utilisant les modèles ESRGAN pendant les étapes de génération intermédiaires.",
|
|
235
|
+
"thumbnailVariant": "compareSlider",
|
|
236
|
+
"tags": ["Amélioration", "Image"],
|
|
237
|
+
"models": ["SD1.5", "Stability"],
|
|
238
|
+
"date": "2025-03-01",
|
|
239
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling",
|
|
240
|
+
"size": 2201170739,
|
|
241
|
+
"vram": 6442450944
|
|
242
|
+
},
|
|
243
|
+
{
|
|
244
|
+
"name": "latent_upscale_different_prompt_model",
|
|
245
|
+
"title": "Modèle Upscale Latent avec Prompt Différent",
|
|
246
|
+
"mediaType": "image",
|
|
247
|
+
"mediaSubtype": "webp",
|
|
248
|
+
"description": "Agrandir les images tout en changeant les prompts à travers les passes de génération.",
|
|
249
|
+
"thumbnailVariant": "zoomHover",
|
|
250
|
+
"tags": ["Amélioration", "Image"],
|
|
251
|
+
"models": ["SD1.5", "Stability"],
|
|
252
|
+
"date": "2025-03-01",
|
|
253
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples",
|
|
254
|
+
"size": 4262755041,
|
|
255
|
+
"vram": 5153960755
|
|
256
|
+
},
|
|
257
|
+
{
|
|
258
|
+
"name": "controlnet_example",
|
|
259
|
+
"title": "Scribble ControlNet",
|
|
260
|
+
"mediaType": "image",
|
|
261
|
+
"mediaSubtype": "webp",
|
|
262
|
+
"description": "Générer des images guidées par des images de référence griffonnées en utilisant ControlNet.",
|
|
263
|
+
"thumbnailVariant": "hoverDissolve",
|
|
264
|
+
"tags": ["ControlNet", "Image"],
|
|
265
|
+
"models": ["SD1.5", "Stability"],
|
|
266
|
+
"date": "2025-03-01",
|
|
267
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/",
|
|
268
|
+
"size": 3189013217,
|
|
269
|
+
"vram": 6442450944
|
|
270
|
+
},
|
|
271
|
+
{
|
|
272
|
+
"name": "2_pass_pose_worship",
|
|
273
|
+
"title": "Pose ControlNet 2 Passes",
|
|
274
|
+
"mediaType": "image",
|
|
275
|
+
"mediaSubtype": "webp",
|
|
276
|
+
"description": "Générer des images guidées par des références de pose en utilisant ControlNet.",
|
|
277
|
+
"thumbnailVariant": "hoverDissolve",
|
|
278
|
+
"tags": ["ControlNet", "Image"],
|
|
279
|
+
"models": ["SD1.5", "Stability"],
|
|
280
|
+
"date": "2025-03-01",
|
|
281
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet",
|
|
282
|
+
"size": 4660039516,
|
|
283
|
+
"vram": 6442450944
|
|
284
|
+
},
|
|
285
|
+
{
|
|
286
|
+
"name": "depth_controlnet",
|
|
287
|
+
"title": "Profondeur ControlNet",
|
|
288
|
+
"mediaType": "image",
|
|
289
|
+
"mediaSubtype": "webp",
|
|
290
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant ControlNet.",
|
|
291
|
+
"thumbnailVariant": "hoverDissolve",
|
|
292
|
+
"tags": ["ControlNet", "Image", "Texte vers image"],
|
|
293
|
+
"models": ["SD1.5", "Stability"],
|
|
294
|
+
"date": "2025-03-01",
|
|
295
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
|
|
296
|
+
"size": 2888365507,
|
|
297
|
+
"vram": 6442450944
|
|
298
|
+
},
|
|
299
|
+
{
|
|
300
|
+
"name": "depth_t2i_adapter",
|
|
301
|
+
"title": "Adaptateur T2I Profondeur",
|
|
302
|
+
"mediaType": "image",
|
|
303
|
+
"mediaSubtype": "webp",
|
|
304
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant l'adaptateur T2I.",
|
|
305
|
+
"thumbnailVariant": "hoverDissolve",
|
|
306
|
+
"tags": ["ControlNet", "Image", "Texte vers image"],
|
|
307
|
+
"models": ["SD1.5", "Stability"],
|
|
308
|
+
"date": "2025-03-01",
|
|
309
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
|
|
310
|
+
"size": 2523293286,
|
|
311
|
+
"vram": 6442450944
|
|
312
|
+
},
|
|
313
|
+
{
|
|
314
|
+
"name": "mixing_controlnets",
|
|
315
|
+
"title": "Mélange ControlNets",
|
|
316
|
+
"mediaType": "image",
|
|
317
|
+
"mediaSubtype": "webp",
|
|
318
|
+
"description": "Générer des images en combinant plusieurs modèles ControlNet.",
|
|
319
|
+
"thumbnailVariant": "hoverDissolve",
|
|
320
|
+
"tags": ["ControlNet", "Image", "Texte vers image"],
|
|
321
|
+
"models": ["SD1.5", "Stability"],
|
|
322
|
+
"date": "2025-03-01",
|
|
323
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets",
|
|
324
|
+
"size": 3328599654,
|
|
325
|
+
"vram": 6442450944
|
|
326
|
+
}
|
|
327
|
+
]
|
|
328
|
+
},
|
|
329
|
+
{
|
|
330
|
+
"moduleName": "default",
|
|
331
|
+
"type": "image",
|
|
332
|
+
"category": "GENERATION TYPE",
|
|
333
|
+
"icon": "icon-[lucide--image]",
|
|
334
|
+
"title": "Image",
|
|
335
|
+
"templates": [
|
|
336
|
+
{
|
|
337
|
+
"name": "image_flux2",
|
|
338
|
+
"title": "Flux.2 Dev",
|
|
339
|
+
"mediaType": "image",
|
|
340
|
+
"mediaSubtype": "webp",
|
|
341
|
+
"thumbnailVariant": "compareSlider",
|
|
342
|
+
"description": "Générez des images photoréalistes avec cohérence multi-référence et rendu de texte professionnel.",
|
|
343
|
+
"tags": ["Texte vers image", "Image", "Édition d'image"],
|
|
344
|
+
"models": ["Flux.2 Dev", "BFL"],
|
|
345
|
+
"date": "2025-11-26",
|
|
346
|
+
"size": 71382356459,
|
|
347
|
+
"vram": 0
|
|
348
|
+
},
|
|
349
|
+
{
|
|
350
|
+
"name": "image_flux2_fp8",
|
|
351
|
+
"title": "Maquette de produit (Flux.2 Dev FP8)",
|
|
352
|
+
"mediaType": "image",
|
|
353
|
+
"mediaSubtype": "webp",
|
|
354
|
+
"description": "Créez des maquettes de produits en appliquant des motifs de conception sur des emballages, des mugs et d'autres produits à l'aide de la cohérence multi-références.",
|
|
355
|
+
"tags": ["Texte vers image", "Image", "Édition d'image", "Maquette", "Design produit"],
|
|
356
|
+
"models": ["Flux.2 Dev", "BFL"],
|
|
357
|
+
"date": "2025-11-26",
|
|
358
|
+
"size": 53837415055,
|
|
359
|
+
"vram": 0
|
|
360
|
+
},
|
|
361
|
+
{
|
|
362
|
+
"name": "image_z_image_turbo",
|
|
363
|
+
"title": "Z-Image-Turbo texte vers image",
|
|
364
|
+
"mediaType": "image",
|
|
365
|
+
"mediaSubtype": "webp",
|
|
366
|
+
"description": "Un modèle fondamental efficace de génération d’images utilisant un transformateur de diffusion à flux unique, compatible anglais et chinois.",
|
|
367
|
+
"tags": ["Texte vers image", "Image"],
|
|
368
|
+
"models": ["Z-Image-Turbo"],
|
|
369
|
+
"date": "2025-11-27",
|
|
370
|
+
"size": 35326050304
|
|
371
|
+
},
|
|
372
|
+
{
|
|
373
|
+
"name": "image_qwen_image",
|
|
374
|
+
"title": "Qwen-Image Texte vers Image",
|
|
375
|
+
"mediaType": "image",
|
|
376
|
+
"mediaSubtype": "webp",
|
|
377
|
+
"description": "Générer des images avec des capacités exceptionnelles de rendu et d'édition de texte multilingue en utilisant le modèle MMDiT 20B de Qwen-Image.",
|
|
378
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
379
|
+
"tags": ["Texte vers image", "Image"],
|
|
380
|
+
"models": ["Qwen-Image"],
|
|
381
|
+
"date": "2025-08-05",
|
|
382
|
+
"size": 31772020572
|
|
383
|
+
},
|
|
384
|
+
{
|
|
385
|
+
"name": "image_qwen_image_instantx_controlnet",
|
|
386
|
+
"title": "Qwen-Image InstantX ControlNet",
|
|
387
|
+
"mediaType": "image",
|
|
388
|
+
"mediaSubtype": "webp",
|
|
389
|
+
"description": "Générer des images avec Qwen-Image InstantX ControlNet, prenant en charge canny, contours doux, profondeur, pose",
|
|
390
|
+
"tags": ["Image vers image", "Image", "ControlNet"],
|
|
391
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
392
|
+
"models": ["Qwen-Image"],
|
|
393
|
+
"date": "2025-08-23",
|
|
394
|
+
"size": 35304631173
|
|
395
|
+
},
|
|
396
|
+
{
|
|
397
|
+
"name": "image_qwen_image_instantx_inpainting_controlnet",
|
|
398
|
+
"title": "Qwen-Image InstantX ControlNet Inpainting",
|
|
399
|
+
"mediaType": "image",
|
|
400
|
+
"mediaSubtype": "webp",
|
|
401
|
+
"thumbnailVariant": "compareSlider",
|
|
402
|
+
"description": "Inpainting professionnel et édition d'images avec Qwen-Image InstantX ControlNet. Prend en charge le remplacement d'objets, la modification de texte, les changements d'arrière-plan et l'outpainting.",
|
|
403
|
+
"tags": ["Image vers image", "Image", "ControlNet", "Inpainting"],
|
|
404
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
405
|
+
"models": ["Qwen-Image"],
|
|
406
|
+
"date": "2025-09-12",
|
|
407
|
+
"size": 36013300777
|
|
408
|
+
},
|
|
409
|
+
{
|
|
410
|
+
"name": "image_qwen_image_union_control_lora",
|
|
411
|
+
"title": "Qwen-Image Contrôle Unifié",
|
|
412
|
+
"mediaType": "image",
|
|
413
|
+
"mediaSubtype": "webp",
|
|
414
|
+
"description": "Générer des images avec un contrôle structurel précis en utilisant le LoRA ControlNet unifié de Qwen-Image. Prend en charge plusieurs types de contrôle incluant canny, depth, lineart, softedge, normal et openpose pour diverses applications créatives.",
|
|
415
|
+
"tags": ["Texte vers image", "Image", "ControlNet"],
|
|
416
|
+
"models": ["Qwen-Image"],
|
|
417
|
+
"date": "2025-08-23",
|
|
418
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
419
|
+
"size": 32716913377
|
|
420
|
+
},
|
|
421
|
+
{
|
|
422
|
+
"name": "image_qwen_image_controlnet_patch",
|
|
423
|
+
"title": "Qwen-Image ControlNet Basique",
|
|
424
|
+
"mediaType": "image",
|
|
425
|
+
"mediaSubtype": "webp",
|
|
426
|
+
"description": "Contrôler la génération d'images en utilisant les modèles ControlNet de Qwen-Image. Prend en charge les contrôles canny, depth et inpainting via le patching de modèles.",
|
|
427
|
+
"tags": ["Texte vers image", "Image", "ControlNet"],
|
|
428
|
+
"models": ["Qwen-Image"],
|
|
429
|
+
"date": "2025-08-24",
|
|
430
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
431
|
+
"size": 34037615821,
|
|
432
|
+
"thumbnailVariant": "compareSlider"
|
|
433
|
+
},
|
|
434
|
+
{
|
|
435
|
+
"name": "image_qwen_image_edit_2509",
|
|
436
|
+
"title": "Qwen Édition d'Image 2509",
|
|
437
|
+
"mediaType": "image",
|
|
438
|
+
"mediaSubtype": "webp",
|
|
439
|
+
"thumbnailVariant": "compareSlider",
|
|
440
|
+
"description": "Édition d'images avancée avec support multi-images, cohérence améliorée et intégration ControlNet.",
|
|
441
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
442
|
+
"tags": ["Image vers image", "Édition d'image", "ControlNet"],
|
|
443
|
+
"models": ["Qwen-Image"],
|
|
444
|
+
"date": "2025-09-25",
|
|
445
|
+
"size": 31772020572
|
|
446
|
+
},
|
|
447
|
+
{
|
|
448
|
+
"name": "image_qwen_image_edit",
|
|
449
|
+
"title": "Édition d'Image Qwen",
|
|
450
|
+
"mediaType": "image",
|
|
451
|
+
"mediaSubtype": "webp",
|
|
452
|
+
"thumbnailVariant": "compareSlider",
|
|
453
|
+
"description": "Éditer des images avec une édition de texte bilingue précise et des capacités d'édition sémantique/apparence duales en utilisant le modèle MMDiT 20B de Qwen-Image-Edit.",
|
|
454
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
455
|
+
"tags": ["Image vers image", "Édition d'image"],
|
|
456
|
+
"models": ["Qwen-Image"],
|
|
457
|
+
"date": "2025-08-18",
|
|
458
|
+
"size": 31772020572
|
|
459
|
+
},
|
|
460
|
+
{
|
|
461
|
+
"name": "image_chrono_edit_14B",
|
|
462
|
+
"title": "ChronoEdit 14B",
|
|
463
|
+
"mediaType": "image",
|
|
464
|
+
"mediaSubtype": "webp",
|
|
465
|
+
"thumbnailVariant": "compareSlider",
|
|
466
|
+
"description": "Édition d'images propulsée par la compréhension dynamique des modèles vidéo, créant des résultats physiquement plausibles tout en préservant la cohérence du personnage et du style.",
|
|
467
|
+
"tags": ["Édition d'image", "Image vers image"],
|
|
468
|
+
"models": ["Wan2.1", "ChronoEdit", "Nvidia"],
|
|
469
|
+
"date": "2025-11-03",
|
|
470
|
+
"size": 40459304
|
|
471
|
+
},
|
|
472
|
+
{
|
|
473
|
+
"name": "flux_kontext_dev_basic",
|
|
474
|
+
"title": "Flux Kontext Dev (Basique)",
|
|
475
|
+
"mediaType": "image",
|
|
476
|
+
"mediaSubtype": "webp",
|
|
477
|
+
"thumbnailVariant": "hoverDissolve",
|
|
478
|
+
"description": "Éditer une image en utilisant Flux Kontext avec une visibilité complète des nœuds, parfait pour apprendre le flux de travail.",
|
|
479
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
|
|
480
|
+
"tags": ["Édition d'image", "Image vers image"],
|
|
481
|
+
"models": ["Flux", "BFL"],
|
|
482
|
+
"date": "2025-06-26",
|
|
483
|
+
"size": 17641578168,
|
|
484
|
+
"vram": 19327352832
|
|
485
|
+
},
|
|
486
|
+
{
|
|
487
|
+
"name": "image_chroma1_radiance_text_to_image",
|
|
488
|
+
"title": "Chroma1 Radiance Texte vers Image",
|
|
489
|
+
"mediaType": "image",
|
|
490
|
+
"mediaSubtype": "webp",
|
|
491
|
+
"description": "Chroma1-Radiance travaille directement avec les pixels d'image au lieu des latents compressés, offrant des images de meilleure qualité avec moins d'artefacts et de distorsion.",
|
|
492
|
+
"tags": ["Texte vers image", "Image"],
|
|
493
|
+
"models": ["Chroma"],
|
|
494
|
+
"date": "2025-09-18",
|
|
495
|
+
"size": 23622320128,
|
|
496
|
+
"vram": 23622320128
|
|
497
|
+
},
|
|
498
|
+
{
|
|
499
|
+
"name": "image_netayume_lumina_t2i",
|
|
500
|
+
"title": "NetaYume Lumina Texte vers Image",
|
|
501
|
+
"mediaType": "image",
|
|
502
|
+
"mediaSubtype": "webp",
|
|
503
|
+
"description": "Génération d'images de style anime de haute qualité avec compréhension améliorée des personnages et textures détaillées. Affinée à partir de Neta Lumina sur l'ensemble de données Danbooru.",
|
|
504
|
+
"tags": ["Texte vers image", "Image", "Anime"],
|
|
505
|
+
"models": ["OmniGen"],
|
|
506
|
+
"date": "2025-10-10",
|
|
507
|
+
"size": 10619306639
|
|
508
|
+
},
|
|
509
|
+
{
|
|
510
|
+
"name": "image_chroma_text_to_image",
|
|
511
|
+
"title": "Chroma texte vers image",
|
|
512
|
+
"mediaType": "image",
|
|
513
|
+
"mediaSubtype": "webp",
|
|
514
|
+
"description": "Chroma est modifié à partir de Flux et présente quelques changements dans l'architecture.",
|
|
515
|
+
"tags": ["Texte vers image", "Image"],
|
|
516
|
+
"models": ["Chroma", "Flux"],
|
|
517
|
+
"date": "2025-06-04",
|
|
518
|
+
"size": 23289460163,
|
|
519
|
+
"vram": 15569256448
|
|
520
|
+
},
|
|
521
|
+
{
|
|
522
|
+
"name": "image_flux.1_fill_dev_OneReward",
|
|
523
|
+
"title": "Flux.1 Dev OneReward",
|
|
524
|
+
"mediaType": "image",
|
|
525
|
+
"mediaSubtype": "webp",
|
|
526
|
+
"thumbnailVariant": "compareSlider",
|
|
527
|
+
"description": "Supports various tasks such as image inpainting, outpainting, and object removal",
|
|
528
|
+
"tags": ["Inpainting", "Outpainting"],
|
|
529
|
+
"models": ["Flux", "BFL"],
|
|
530
|
+
"date": "2025-09-21",
|
|
531
|
+
"size": 29001766666,
|
|
532
|
+
"vram": 21474836480
|
|
533
|
+
},
|
|
534
|
+
{
|
|
535
|
+
"name": "flux_dev_checkpoint_example",
|
|
536
|
+
"title": "Flux Dev fp8",
|
|
537
|
+
"mediaType": "image",
|
|
538
|
+
"mediaSubtype": "webp",
|
|
539
|
+
"description": "Générer des images en utilisant la version quantifiée Flux Dev fp8. Convient aux appareils avec une VRAM limitée, ne nécessite qu'un seul fichier de modèle, mais la qualité de l'image est légèrement inférieure à la version complète.",
|
|
540
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
541
|
+
"tags": ["Texte vers image", "Image"],
|
|
542
|
+
"models": ["Flux", "BFL"],
|
|
543
|
+
"date": "2025-03-01",
|
|
544
|
+
"size": 17244293693,
|
|
545
|
+
"vram": 18253611008
|
|
546
|
+
},
|
|
547
|
+
{
|
|
548
|
+
"name": "flux1_dev_uso_reference_image_gen",
|
|
549
|
+
"title": "Génération d'images de référence Flux.1 Dev USO",
|
|
550
|
+
"description": "Utilisez des images de référence pour contrôler à la fois le style et le sujet : conservez le visage de votre personnage tout en changeant de style artistique, ou appliquez des styles artistiques à de nouvelles scènes",
|
|
551
|
+
"thumbnailVariant": "hoverDissolve",
|
|
552
|
+
"mediaType": "image",
|
|
553
|
+
"mediaSubtype": "webp",
|
|
554
|
+
"tags": ["Image vers image", "Image"],
|
|
555
|
+
"models": ["Flux", "BFL"],
|
|
556
|
+
"date": "2025-09-02",
|
|
557
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
|
|
558
|
+
"size": 18597208392,
|
|
559
|
+
"vram": 19864223744
|
|
560
|
+
},
|
|
561
|
+
{
|
|
562
|
+
"name": "flux_schnell",
|
|
563
|
+
"title": "Flux Schnell fp8",
|
|
564
|
+
"mediaType": "image",
|
|
565
|
+
"mediaSubtype": "webp",
|
|
566
|
+
"description": "Générer rapidement des images avec la version quantifiée Flux Schnell fp8. Idéal pour le matériel d'entrée de gamme, ne nécessite que 4 étapes pour générer des images.",
|
|
567
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
568
|
+
"tags": ["Texte vers image", "Image"],
|
|
569
|
+
"models": ["Flux", "BFL"],
|
|
570
|
+
"date": "2025-03-01",
|
|
571
|
+
"size": 17233556275,
|
|
572
|
+
"vram": 18253611008
|
|
573
|
+
},
|
|
574
|
+
{
|
|
575
|
+
"name": "flux1_krea_dev",
|
|
576
|
+
"title": "Flux.1 Krea Dev",
|
|
577
|
+
"mediaType": "image",
|
|
578
|
+
"mediaSubtype": "webp",
|
|
579
|
+
"description": "Un modèle FLUX affiné poussant le photoréalisme à son maximum",
|
|
580
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
|
|
581
|
+
"tags": ["Texte vers image", "Image"],
|
|
582
|
+
"models": ["Flux", "BFL"],
|
|
583
|
+
"date": "2025-07-31",
|
|
584
|
+
"size": 22269405430,
|
|
585
|
+
"vram": 23085449216
|
|
586
|
+
},
|
|
587
|
+
{
|
|
588
|
+
"name": "flux_dev_full_text_to_image",
|
|
589
|
+
"title": "Flux Dev texte vers image complet",
|
|
590
|
+
"mediaType": "image",
|
|
591
|
+
"mediaSubtype": "webp",
|
|
592
|
+
"description": "Générer des images de haute qualité avec la version complète de Flux Dev. Nécessite plus de VRAM et plusieurs fichiers de modèles, mais offre la meilleure capacité de suivi des prompts et la qualité d'image.",
|
|
593
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
594
|
+
"tags": ["Texte vers image", "Image"],
|
|
595
|
+
"models": ["Flux", "BFL"],
|
|
596
|
+
"date": "2025-03-01",
|
|
597
|
+
"size": 34177202258,
|
|
598
|
+
"vram": 23622320128
|
|
599
|
+
},
|
|
600
|
+
{
|
|
601
|
+
"name": "flux_schnell_full_text_to_image",
|
|
602
|
+
"title": "Flux Schnell texte vers image complet",
|
|
603
|
+
"mediaType": "image",
|
|
604
|
+
"mediaSubtype": "webp",
|
|
605
|
+
"description": "Générer des images rapidement avec la version complète de Flux Schnell. Utilise la licence Apache2.0, ne nécessite que 4 étapes pour générer des images tout en maintenant une bonne qualité d'image.",
|
|
606
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
607
|
+
"tags": ["Texte vers image", "Image"],
|
|
608
|
+
"models": ["Flux", "BFL"],
|
|
609
|
+
"date": "2025-03-01",
|
|
610
|
+
"size": 34155727421
|
|
611
|
+
},
|
|
612
|
+
{
|
|
613
|
+
"name": "flux_fill_inpaint_example",
|
|
614
|
+
"title": "Flux Inpainting",
|
|
615
|
+
"mediaType": "image",
|
|
616
|
+
"mediaSubtype": "webp",
|
|
617
|
+
"description": "Combler les parties manquantes des images en utilisant l'inpainting Flux.",
|
|
618
|
+
"thumbnailVariant": "compareSlider",
|
|
619
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
620
|
+
"tags": ["Image vers image", "Inpainting", "Image"],
|
|
621
|
+
"models": ["Flux", "BFL"],
|
|
622
|
+
"date": "2025-03-01",
|
|
623
|
+
"size": 10372346020
|
|
624
|
+
},
|
|
625
|
+
{
|
|
626
|
+
"name": "flux_fill_outpaint_example",
|
|
627
|
+
"title": "Flux Outpainting",
|
|
628
|
+
"mediaType": "image",
|
|
629
|
+
"mediaSubtype": "webp",
|
|
630
|
+
"description": "Étendre les images au-delà des limites en utilisant l'outpainting Flux.",
|
|
631
|
+
"thumbnailVariant": "compareSlider",
|
|
632
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
633
|
+
"tags": ["Outpainting", "Image", "Image vers image"],
|
|
634
|
+
"models": ["Flux", "BFL"],
|
|
635
|
+
"date": "2025-03-01",
|
|
636
|
+
"size": 10372346020
|
|
637
|
+
},
|
|
638
|
+
{
|
|
639
|
+
"name": "flux_canny_model_example",
|
|
640
|
+
"title": "Modèle Flux Canny",
|
|
641
|
+
"mediaType": "image",
|
|
642
|
+
"mediaSubtype": "webp",
|
|
643
|
+
"description": "Générer des images guidées par la détection de contours en utilisant Flux Canny.",
|
|
644
|
+
"thumbnailVariant": "hoverDissolve",
|
|
645
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
646
|
+
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
647
|
+
"models": ["Flux", "BFL"],
|
|
648
|
+
"date": "2025-03-01",
|
|
649
|
+
"size": 34177202258
|
|
650
|
+
},
|
|
651
|
+
{
|
|
652
|
+
"name": "flux_depth_lora_example",
|
|
653
|
+
"title": "Flux Depth LoRA",
|
|
654
|
+
"mediaType": "image",
|
|
655
|
+
"mediaSubtype": "webp",
|
|
656
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant Flux LoRA.",
|
|
657
|
+
"thumbnailVariant": "hoverDissolve",
|
|
658
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
659
|
+
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
660
|
+
"models": ["Flux", "BFL"],
|
|
661
|
+
"date": "2025-03-01",
|
|
662
|
+
"size": 35412005356
|
|
663
|
+
},
|
|
664
|
+
{
|
|
665
|
+
"name": "flux_redux_model_example",
|
|
666
|
+
"title": "Modèle Flux Redux",
|
|
667
|
+
"mediaType": "image",
|
|
668
|
+
"mediaSubtype": "webp",
|
|
669
|
+
"description": "Générer des images en transférant le style à partir d'images de référence en utilisant Flux Redux.",
|
|
670
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
671
|
+
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
672
|
+
"models": ["Flux", "BFL"],
|
|
673
|
+
"date": "2025-03-01",
|
|
674
|
+
"size": 35154307318
|
|
675
|
+
},
|
|
676
|
+
{
|
|
677
|
+
"name": "image_omnigen2_t2i",
|
|
678
|
+
"title": "OmniGen2 Texte vers Image",
|
|
679
|
+
"mediaType": "image",
|
|
680
|
+
"mediaSubtype": "webp",
|
|
681
|
+
"description": "Générer des images de haute qualité à partir de prompts textuels en utilisant le modèle multimodal unifié 7B d'OmniGen2 avec une architecture à double chemin.",
|
|
682
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
683
|
+
"tags": ["Texte vers image", "Image"],
|
|
684
|
+
"models": ["OmniGen"],
|
|
685
|
+
"date": "2025-06-30",
|
|
686
|
+
"size": 15784004813
|
|
687
|
+
},
|
|
688
|
+
{
|
|
689
|
+
"name": "image_omnigen2_image_edit",
|
|
690
|
+
"title": "Édition d'Image OmniGen2",
|
|
691
|
+
"mediaType": "image",
|
|
692
|
+
"mediaSubtype": "webp",
|
|
693
|
+
"thumbnailVariant": "hoverDissolve",
|
|
694
|
+
"description": "Éditer des images avec des instructions en langage naturel en utilisant les capacités avancées d'édition d'images d'OmniGen2 et le support de rendu de texte.",
|
|
695
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
696
|
+
"tags": ["Édition d'image", "Image"],
|
|
697
|
+
"models": ["OmniGen"],
|
|
698
|
+
"date": "2025-06-30",
|
|
699
|
+
"size": 15784004813
|
|
700
|
+
},
|
|
701
|
+
{
|
|
702
|
+
"name": "hidream_i1_dev",
|
|
703
|
+
"title": "HiDream I1 Dev",
|
|
704
|
+
"mediaType": "image",
|
|
705
|
+
"mediaSubtype": "webp",
|
|
706
|
+
"description": "Générer des images avec HiDream I1 Dev - Version équilibrée avec 28 étapes d'inférence, adaptée au matériel de gamme moyenne.",
|
|
707
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
708
|
+
"tags": ["Texte vers image", "Image"],
|
|
709
|
+
"models": ["HiDream"],
|
|
710
|
+
"date": "2025-04-17",
|
|
711
|
+
"size": 33318208799
|
|
712
|
+
},
|
|
713
|
+
{
|
|
714
|
+
"name": "hidream_i1_fast",
|
|
715
|
+
"title": "HiDream I1 Fast",
|
|
716
|
+
"mediaType": "image",
|
|
717
|
+
"mediaSubtype": "webp",
|
|
718
|
+
"description": "Générer des images rapidement avec HiDream I1 Fast - Version légère avec 16 étapes d'inférence, idéale pour des aperçus rapides sur du matériel d'entrée de gamme.",
|
|
719
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
720
|
+
"tags": ["Texte vers image", "Image"],
|
|
721
|
+
"models": ["HiDream"],
|
|
722
|
+
"date": "2025-04-17",
|
|
723
|
+
"size": 24234352968
|
|
724
|
+
},
|
|
725
|
+
{
|
|
726
|
+
"name": "hidream_i1_full",
|
|
727
|
+
"title": "HiDream I1 Full",
|
|
728
|
+
"mediaType": "image",
|
|
729
|
+
"mediaSubtype": "webp",
|
|
730
|
+
"description": "Générer des images avec HiDream I1 Full - Version complète avec 50 étapes d'inférence pour une sortie de la plus haute qualité.",
|
|
731
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
732
|
+
"tags": ["Texte vers image", "Image"],
|
|
733
|
+
"models": ["HiDream"],
|
|
734
|
+
"date": "2025-04-17",
|
|
735
|
+
"size": 24234352968
|
|
736
|
+
},
|
|
737
|
+
{
|
|
738
|
+
"name": "hidream_e1_1",
|
|
739
|
+
"title": "Édition d'Image HiDream E1.1",
|
|
740
|
+
"mediaType": "image",
|
|
741
|
+
"mediaSubtype": "webp",
|
|
742
|
+
"thumbnailVariant": "compareSlider",
|
|
743
|
+
"description": "Éditer des images avec HiDream E1.1 – il est meilleur en qualité d'image et en précision d'édition que HiDream-E1-Full.",
|
|
744
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
745
|
+
"tags": ["Édition d'image", "Image"],
|
|
746
|
+
"models": ["HiDream"],
|
|
747
|
+
"date": "2025-07-21",
|
|
748
|
+
"size": 50422916055
|
|
749
|
+
},
|
|
750
|
+
{
|
|
751
|
+
"name": "hidream_e1_full",
|
|
752
|
+
"title": "Édition d'Image HiDream E1",
|
|
753
|
+
"mediaType": "image",
|
|
754
|
+
"mediaSubtype": "webp",
|
|
755
|
+
"thumbnailVariant": "compareSlider",
|
|
756
|
+
"description": "Éditer des images avec HiDream E1 - Modèle professionnel d'édition d'images en langage naturel.",
|
|
757
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
758
|
+
"tags": ["Édition d'image", "Image"],
|
|
759
|
+
"models": ["HiDream"],
|
|
760
|
+
"date": "2025-05-01",
|
|
761
|
+
"size": 34209414513
|
|
762
|
+
},
|
|
763
|
+
{
|
|
764
|
+
"name": "sd3.5_simple_example",
|
|
765
|
+
"title": "SD3.5 Simple",
|
|
766
|
+
"mediaType": "image",
|
|
767
|
+
"mediaSubtype": "webp",
|
|
768
|
+
"description": "Générer des images en utilisant SD 3.5.",
|
|
769
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
|
|
770
|
+
"tags": ["Texte vers image", "Image"],
|
|
771
|
+
"models": ["SD3.5", "Stability"],
|
|
772
|
+
"date": "2025-03-01",
|
|
773
|
+
"size": 14935748772
|
|
774
|
+
},
|
|
775
|
+
{
|
|
776
|
+
"name": "sd3.5_large_canny_controlnet_example",
|
|
777
|
+
"title": "SD3.5 Large Canny ControlNet",
|
|
778
|
+
"mediaType": "image",
|
|
779
|
+
"mediaSubtype": "webp",
|
|
780
|
+
"description": "Générer des images guidées par la détection de contours en utilisant SD 3.5 Canny ControlNet.",
|
|
781
|
+
"thumbnailVariant": "hoverDissolve",
|
|
782
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
783
|
+
"tags": ["Image vers image", "Image", "ControlNet"],
|
|
784
|
+
"models": ["SD3.5", "Stability"],
|
|
785
|
+
"date": "2025-03-01",
|
|
786
|
+
"size": 23590107873
|
|
787
|
+
},
|
|
788
|
+
{
|
|
789
|
+
"name": "sd3.5_large_depth",
|
|
790
|
+
"title": "SD3.5 Large Profondeur",
|
|
791
|
+
"mediaType": "image",
|
|
792
|
+
"mediaSubtype": "webp",
|
|
793
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant SD 3.5.",
|
|
794
|
+
"thumbnailVariant": "hoverDissolve",
|
|
795
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
796
|
+
"tags": ["Image vers image", "Image", "ControlNet"],
|
|
797
|
+
"models": ["SD3.5", "Stability"],
|
|
798
|
+
"date": "2025-03-01",
|
|
799
|
+
"size": 23590107873
|
|
800
|
+
},
|
|
801
|
+
{
|
|
802
|
+
"name": "sd3.5_large_blur",
|
|
803
|
+
"title": "SD3.5 Large Flou",
|
|
804
|
+
"mediaType": "image",
|
|
805
|
+
"mediaSubtype": "webp",
|
|
806
|
+
"description": "Générer des images guidées par des images de référence floues en utilisant SD 3.5.",
|
|
807
|
+
"thumbnailVariant": "hoverDissolve",
|
|
808
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
809
|
+
"tags": ["Image vers image", "Image"],
|
|
810
|
+
"models": ["SD3.5", "Stability"],
|
|
811
|
+
"date": "2025-03-01",
|
|
812
|
+
"size": 23590107873
|
|
813
|
+
},
|
|
814
|
+
{
|
|
815
|
+
"name": "sdxl_simple_example",
|
|
816
|
+
"title": "SDXL Simple",
|
|
817
|
+
"mediaType": "image",
|
|
818
|
+
"mediaSubtype": "webp",
|
|
819
|
+
"description": "Générer des images de haute qualité en utilisant SDXL.",
|
|
820
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
821
|
+
"tags": ["Texte vers image", "Image"],
|
|
822
|
+
"models": ["SDXL", "Stability"],
|
|
823
|
+
"date": "2025-03-01",
|
|
824
|
+
"size": 13013750907
|
|
825
|
+
},
|
|
826
|
+
{
|
|
827
|
+
"name": "sdxl_refiner_prompt_example",
|
|
828
|
+
"title": "SDXL Affineur de Prompt",
|
|
829
|
+
"mediaType": "image",
|
|
830
|
+
"mediaSubtype": "webp",
|
|
831
|
+
"description": "Améliorer les images SDXL en utilisant des modèles de raffinement.",
|
|
832
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
833
|
+
"tags": ["Texte vers image", "Image"],
|
|
834
|
+
"models": ["SDXL", "Stability"],
|
|
835
|
+
"date": "2025-03-01",
|
|
836
|
+
"size": 13013750907
|
|
837
|
+
},
|
|
838
|
+
{
|
|
839
|
+
"name": "sdxl_revision_text_prompts",
|
|
840
|
+
"title": "SDXL Révision Prompts Texte",
|
|
841
|
+
"mediaType": "image",
|
|
842
|
+
"mediaSubtype": "webp",
|
|
843
|
+
"description": "Générer des images en transférant des concepts à partir d'images de référence en utilisant SDXL Revision.",
|
|
844
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
845
|
+
"tags": ["Texte vers image", "Image"],
|
|
846
|
+
"models": ["SDXL", "Stability"],
|
|
847
|
+
"date": "2025-03-01",
|
|
848
|
+
"size": 10630044058
|
|
849
|
+
},
|
|
850
|
+
{
|
|
851
|
+
"name": "sdxlturbo_example",
|
|
852
|
+
"title": "SDXL Turbo",
|
|
853
|
+
"mediaType": "image",
|
|
854
|
+
"mediaSubtype": "webp",
|
|
855
|
+
"description": "Générer des images en une seule étape en utilisant SDXL Turbo.",
|
|
856
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
|
|
857
|
+
"tags": ["Texte vers image", "Image"],
|
|
858
|
+
"models": ["SDXL", "Stability"],
|
|
859
|
+
"date": "2025-03-01",
|
|
860
|
+
"size": 6936372183
|
|
861
|
+
},
|
|
862
|
+
{
|
|
863
|
+
"name": "image_lotus_depth_v1_1",
|
|
864
|
+
"title": "Lotus Profondeur",
|
|
865
|
+
"mediaType": "image",
|
|
866
|
+
"mediaSubtype": "webp",
|
|
867
|
+
"thumbnailVariant": "compareSlider",
|
|
868
|
+
"description": "Exécuter Lotus Depth dans ComfyUI pour une estimation de profondeur monoculaire efficace zero-shot avec une haute rétention de détails.",
|
|
869
|
+
"tags": ["Image", "Texte vers image"],
|
|
870
|
+
"models": ["SD1.5", "Stability"],
|
|
871
|
+
"date": "2025-05-21",
|
|
872
|
+
"size": 2072321720
|
|
873
|
+
}
|
|
874
|
+
]
|
|
875
|
+
},
|
|
876
|
+
{
|
|
877
|
+
"moduleName": "default",
|
|
878
|
+
"type": "video",
|
|
879
|
+
"category": "GENERATION TYPE",
|
|
880
|
+
"icon": "icon-[lucide--film]",
|
|
881
|
+
"title": "Video",
|
|
882
|
+
"templates": [
|
|
883
|
+
{
|
|
884
|
+
"name": "video_wan2_2_14B_t2v",
|
|
885
|
+
"title": "Wan 2.2 14B Texte vers Vidéo",
|
|
886
|
+
"description": "Générer des vidéos de haute qualité à partir de prompts textuels avec un contrôle esthétique cinématographique et une génération de mouvement dynamique en utilisant Wan 2.2.",
|
|
887
|
+
"mediaType": "image",
|
|
888
|
+
"mediaSubtype": "webp",
|
|
889
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
890
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
891
|
+
"models": ["Wan2.2", "Wan"],
|
|
892
|
+
"date": "2025-07-29",
|
|
893
|
+
"size": 38031935406
|
|
894
|
+
},
|
|
895
|
+
{
|
|
896
|
+
"name": "video_wan2_2_14B_i2v",
|
|
897
|
+
"title": "Wan 2.2 14B Image vers Vidéo",
|
|
898
|
+
"description": "Transformer des images statiques en vidéos dynamiques avec un contrôle précis du mouvement et une préservation du style en utilisant Wan 2.2.",
|
|
899
|
+
"mediaType": "image",
|
|
900
|
+
"mediaSubtype": "webp",
|
|
901
|
+
"thumbnailVariant": "hoverDissolve",
|
|
902
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
903
|
+
"tags": ["Image vers vidéo", "Vidéo"],
|
|
904
|
+
"models": ["Wan2.2", "Wan"],
|
|
905
|
+
"date": "2025-07-29",
|
|
906
|
+
"size": 38031935406
|
|
907
|
+
},
|
|
908
|
+
{
|
|
909
|
+
"name": "video_wan2_2_14B_flf2v",
|
|
910
|
+
"title": "Wan 2.2 14B Première-Dernière Image vers Vidéo",
|
|
911
|
+
"description": "Générer des transitions vidéo fluides en définissant les images de début et de fin.",
|
|
912
|
+
"mediaType": "image",
|
|
913
|
+
"mediaSubtype": "webp",
|
|
914
|
+
"thumbnailVariant": "hoverDissolve",
|
|
915
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
916
|
+
"tags": ["FLF2V", "Vidéo"],
|
|
917
|
+
"models": ["Wan2.2", "Wan"],
|
|
918
|
+
"date": "2025-08-02",
|
|
919
|
+
"size": 38031935406
|
|
920
|
+
},
|
|
921
|
+
{
|
|
922
|
+
"name": "video_wan2_2_14B_animate",
|
|
923
|
+
"title": "Wan2.2 Animate animation et remplacement de personnages",
|
|
924
|
+
"description": "Cadre unifié d'animation et de remplacement de personnages avec réplication précise des mouvements et expressions。",
|
|
925
|
+
"mediaType": "image",
|
|
926
|
+
"mediaSubtype": "webp",
|
|
927
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-animate",
|
|
928
|
+
"tags": ["Vidéo", "Image vers vidéo"],
|
|
929
|
+
"models": ["Wan2.2", "Wan"],
|
|
930
|
+
"date": "2025-09-22",
|
|
931
|
+
"size": 27417997476
|
|
932
|
+
},
|
|
933
|
+
{
|
|
934
|
+
"name": "video_hunyuan_video_1.5_720p_t2v",
|
|
935
|
+
"title": "Hunyuan Video 1.5 Texte en vidéo",
|
|
936
|
+
"description": "Générez des vidéos 720p de haute qualité à partir de prompts textuels, avec un contrôle cinématographique de la caméra, des expressions émotionnelles et une simulation physique. Prend en charge plusieurs styles dont réaliste, anime et rendu 3D du texte.",
|
|
937
|
+
"mediaType": "image",
|
|
938
|
+
"mediaSubtype": "webp",
|
|
939
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
940
|
+
"models": ["Hunyuan Video"],
|
|
941
|
+
"date": "2025-11-21",
|
|
942
|
+
"size": 45384919416
|
|
943
|
+
},
|
|
944
|
+
{
|
|
945
|
+
"name": "video_hunyuan_video_1.5_720p_i2v",
|
|
946
|
+
"title": "Hunyuan Video 1.5 Image vers Vidéo",
|
|
947
|
+
"description": "Animez des images fixes en vidéos dynamiques avec des mouvements précis et un contrôle de caméra. Préserve la cohérence visuelle tout en donnant vie aux photos et illustrations grâce à des mouvements fluides et naturels.",
|
|
948
|
+
"mediaType": "image",
|
|
949
|
+
"mediaSubtype": "webp",
|
|
950
|
+
"tags": ["Image vers vidéo", "Vidéo"],
|
|
951
|
+
"models": ["Hunyuan Video"],
|
|
952
|
+
"date": "2025-11-21",
|
|
953
|
+
"size": 45384919416
|
|
954
|
+
},
|
|
955
|
+
{
|
|
956
|
+
"name": "video_wan2_2_14B_s2v",
|
|
957
|
+
"title": "Wan2.2-S2V Génération de Vidéo Pilotée par l'Audio",
|
|
958
|
+
"description": "Transformer des images statiques et de l'audio en vidéos dynamiques avec une synchronisation parfaite et une génération au niveau de la minute.",
|
|
959
|
+
"mediaType": "image",
|
|
960
|
+
"mediaSubtype": "webp",
|
|
961
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-s2v",
|
|
962
|
+
"tags": ["Vidéo"],
|
|
963
|
+
"models": ["Wan2.2", "Wan"],
|
|
964
|
+
"date": "2025-08-02",
|
|
965
|
+
"size": 25254407700
|
|
966
|
+
},
|
|
967
|
+
{
|
|
968
|
+
"name": "video_humo",
|
|
969
|
+
"title": "HuMo Génération Vidéo",
|
|
970
|
+
"description": "Générez des vidéos basées sur l'audio, l'image et le texte, en préservant la synchronisation labiale des personnages.",
|
|
971
|
+
"mediaType": "image",
|
|
972
|
+
"mediaSubtype": "webp",
|
|
973
|
+
"tags": ["Vidéo"],
|
|
974
|
+
"models": ["HuMo"],
|
|
975
|
+
"date": "2025-09-21",
|
|
976
|
+
"size": 27895812588
|
|
977
|
+
},
|
|
978
|
+
{
|
|
979
|
+
"name": "video_wan2_2_14B_fun_inpaint",
|
|
980
|
+
"title": "Wan 2.2 14B Fun Inpainting",
|
|
981
|
+
"description": "Générez des vidéos à partir des images de début et de fin avec Wan 2.2 Fun Inp.",
|
|
982
|
+
"mediaType": "image",
|
|
983
|
+
"mediaSubtype": "webp",
|
|
984
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-inp",
|
|
985
|
+
"tags": ["FLF2V", "Vidéo"],
|
|
986
|
+
"models": ["Wan2.2", "Wan"],
|
|
987
|
+
"date": "2025-08-12",
|
|
988
|
+
"size": 38031935406
|
|
989
|
+
},
|
|
990
|
+
{
|
|
991
|
+
"name": "video_wan2_2_14B_fun_control",
|
|
992
|
+
"title": "Wan 2.2 14B Fun Control",
|
|
993
|
+
"description": "Générer des vidéos guidées par des contrôles de pose, de profondeur et de contours en utilisant Wan 2.2 Fun Control.",
|
|
994
|
+
"mediaType": "image",
|
|
995
|
+
"mediaSubtype": "webp",
|
|
996
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-control",
|
|
997
|
+
"tags": ["Vidéo vers vidéo", "Vidéo"],
|
|
998
|
+
"models": ["Wan2.2", "Wan"],
|
|
999
|
+
"date": "2025-08-12",
|
|
1000
|
+
"size": 38031935406
|
|
1001
|
+
},
|
|
1002
|
+
{
|
|
1003
|
+
"name": "video_wan2_2_14B_fun_camera",
|
|
1004
|
+
"title": "Wan 2.2 14B Contrôle Caméra Fun",
|
|
1005
|
+
"description": "Générer des vidéos avec des contrôles de mouvement de caméra incluant le panoramique, le zoom et la rotation en utilisant Wan 2.2 Fun Camera Control.",
|
|
1006
|
+
"mediaType": "image",
|
|
1007
|
+
"mediaSubtype": "webp",
|
|
1008
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-camera",
|
|
1009
|
+
"tags": ["Vidéo vers vidéo", "Vidéo"],
|
|
1010
|
+
"models": ["Wan2.2", "Wan"],
|
|
1011
|
+
"date": "2025-08-17",
|
|
1012
|
+
"size": 40050570035
|
|
1013
|
+
},
|
|
1014
|
+
{
|
|
1015
|
+
"name": "video_wan2_2_5B_ti2v",
|
|
1016
|
+
"title": "Wan 2.2 5B Génération Vidéo",
|
|
1017
|
+
"description": "Générer des vidéos à partir de texte ou d'images en utilisant le modèle hybride Wan 2.2 5B",
|
|
1018
|
+
"mediaType": "image",
|
|
1019
|
+
"mediaSubtype": "webp",
|
|
1020
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
1021
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1022
|
+
"models": ["Wan2.2", "Wan"],
|
|
1023
|
+
"date": "2025-07-29",
|
|
1024
|
+
"size": 18146236826
|
|
1025
|
+
},
|
|
1026
|
+
{
|
|
1027
|
+
"name": "video_wan2_2_5B_fun_inpaint",
|
|
1028
|
+
"title": "Wan 2.2 5B Fun Inpainting",
|
|
1029
|
+
"description": "Inpainting vidéo efficace à partir des images de début et de fin. Le modèle 5B offre des itérations rapides pour tester les flux de travail.",
|
|
1030
|
+
"mediaType": "image",
|
|
1031
|
+
"mediaSubtype": "webp",
|
|
1032
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1033
|
+
"models": ["Wan2.2", "Wan"],
|
|
1034
|
+
"date": "2025-07-29",
|
|
1035
|
+
"size": 18146236826
|
|
1036
|
+
},
|
|
1037
|
+
{
|
|
1038
|
+
"name": "video_wan2_2_5B_fun_control",
|
|
1039
|
+
"title": "Wan 2.2 5B Fun Control",
|
|
1040
|
+
"description": "Contrôle vidéo multi-conditions avec guidance par pose, profondeur et contours. Taille compacte 5B pour un développement expérimental.",
|
|
1041
|
+
"mediaType": "image",
|
|
1042
|
+
"mediaSubtype": "webp",
|
|
1043
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1044
|
+
"models": ["Wan2.2", "Wan"],
|
|
1045
|
+
"date": "2025-07-29",
|
|
1046
|
+
"size": 18146236826
|
|
1047
|
+
},
|
|
1048
|
+
{
|
|
1049
|
+
"name": "video_wan_vace_14B_t2v",
|
|
1050
|
+
"title": "Wan VACE Text to Video",
|
|
1051
|
+
"description": "Transformer des descriptions textuelles en vidéos de haute qualité. Prend en charge à la fois 480p et 720p avec le modèle VACE-14B.",
|
|
1052
|
+
"mediaType": "image",
|
|
1053
|
+
"mediaSubtype": "webp",
|
|
1054
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1055
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1056
|
+
"models": ["Wan2.1", "Wan"],
|
|
1057
|
+
"date": "2025-05-21",
|
|
1058
|
+
"size": 57756572713
|
|
1059
|
+
},
|
|
1060
|
+
{
|
|
1061
|
+
"name": "video_wan_vace_14B_ref2v",
|
|
1062
|
+
"title": "Wan VACE Reference to Video",
|
|
1063
|
+
"description": "Créer des vidéos qui correspondent au style et au contenu d'une image de référence. Parfait pour la génération de vidéos cohérentes en style.",
|
|
1064
|
+
"mediaType": "image",
|
|
1065
|
+
"mediaSubtype": "webp",
|
|
1066
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1067
|
+
"tags": ["Vidéo", "Image vers vidéo"],
|
|
1068
|
+
"models": ["Wan2.1", "Wan"],
|
|
1069
|
+
"date": "2025-05-21",
|
|
1070
|
+
"size": 57756572713
|
|
1071
|
+
},
|
|
1072
|
+
{
|
|
1073
|
+
"name": "video_wan_vace_14B_v2v",
|
|
1074
|
+
"title": "Wan VACE Control Video",
|
|
1075
|
+
"description": "Générer des vidéos en contrôlant les vidéos d'entrée et les images de référence en utilisant Wan VACE.",
|
|
1076
|
+
"mediaType": "image",
|
|
1077
|
+
"mediaSubtype": "webp",
|
|
1078
|
+
"thumbnailVariant": "compareSlider",
|
|
1079
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1080
|
+
"tags": ["Vidéo vers vidéo", "Vidéo"],
|
|
1081
|
+
"models": ["Wan2.1", "Wan"],
|
|
1082
|
+
"date": "2025-05-21",
|
|
1083
|
+
"size": 57756572713
|
|
1084
|
+
},
|
|
1085
|
+
{
|
|
1086
|
+
"name": "video_wan_vace_outpainting",
|
|
1087
|
+
"title": "Wan VACE Outpainting",
|
|
1088
|
+
"description": "Générer des vidéos étendues en agrandissant la taille de la vidéo en utilisant l'outpainting Wan VACE.",
|
|
1089
|
+
"mediaType": "image",
|
|
1090
|
+
"mediaSubtype": "webp",
|
|
1091
|
+
"thumbnailVariant": "compareSlider",
|
|
1092
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1093
|
+
"tags": ["Outpainting", "Vidéo"],
|
|
1094
|
+
"models": ["Wan2.1", "Wan"],
|
|
1095
|
+
"date": "2025-05-21",
|
|
1096
|
+
"size": 57756572713
|
|
1097
|
+
},
|
|
1098
|
+
{
|
|
1099
|
+
"name": "video_wan_vace_flf2v",
|
|
1100
|
+
"title": "Wan VACE First-Last Frame",
|
|
1101
|
+
"description": "Générer des transitions vidéo fluides en définissant les images de début et de fin. Prend en charge les séquences d'images clés personnalisées.",
|
|
1102
|
+
"mediaType": "image",
|
|
1103
|
+
"mediaSubtype": "webp",
|
|
1104
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1105
|
+
"tags": ["FLF2V", "Vidéo"],
|
|
1106
|
+
"models": ["Wan2.1", "Wan"],
|
|
1107
|
+
"date": "2025-05-21",
|
|
1108
|
+
"size": 57756572713
|
|
1109
|
+
},
|
|
1110
|
+
{
|
|
1111
|
+
"name": "video_wan_vace_inpainting",
|
|
1112
|
+
"title": "Wan VACE Inpainting",
|
|
1113
|
+
"description": "Éditer des régions spécifiques dans les vidéos tout en préservant le contenu environnant. Idéal pour la suppression ou le remplacement d'objets.",
|
|
1114
|
+
"mediaType": "image",
|
|
1115
|
+
"mediaSubtype": "webp",
|
|
1116
|
+
"thumbnailVariant": "compareSlider",
|
|
1117
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1118
|
+
"tags": ["Inpainting", "Vidéo"],
|
|
1119
|
+
"models": ["Wan2.1", "Wan"],
|
|
1120
|
+
"date": "2025-05-21",
|
|
1121
|
+
"size": 57756572713
|
|
1122
|
+
},
|
|
1123
|
+
{
|
|
1124
|
+
"name": "video_wan2.1_alpha_t2v_14B",
|
|
1125
|
+
"title": "Wan2.1 Alpha Texte vers Vidéo",
|
|
1126
|
+
"description": "Générez des vidéos à partir de texte avec support de canal alpha pour des arrière-plans transparents et objets semi-transparents.",
|
|
1127
|
+
"mediaType": "image",
|
|
1128
|
+
"mediaSubtype": "webp",
|
|
1129
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1130
|
+
"models": ["Wan2.1", "Wan"],
|
|
1131
|
+
"date": "2025-10-06",
|
|
1132
|
+
"size": 22494891213
|
|
1133
|
+
},
|
|
1134
|
+
{
|
|
1135
|
+
"name": "video_wan_ati",
|
|
1136
|
+
"title": "Wan ATI",
|
|
1137
|
+
"description": "Génération de vidéo contrôlée par trajectoire.",
|
|
1138
|
+
"mediaType": "image",
|
|
1139
|
+
"mediaSubtype": "webp",
|
|
1140
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1141
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
|
|
1142
|
+
"tags": ["Vidéo"],
|
|
1143
|
+
"models": ["Wan2.1", "Wan"],
|
|
1144
|
+
"date": "2025-05-21",
|
|
1145
|
+
"size": 25393994138
|
|
1146
|
+
},
|
|
1147
|
+
{
|
|
1148
|
+
"name": "video_wan2.1_fun_camera_v1.1_1.3B",
|
|
1149
|
+
"title": "Wan 2.1 Contrôle Caméra Fun 1.3B",
|
|
1150
|
+
"description": "Générer des vidéos dynamiques avec des mouvements de caméra cinématographiques en utilisant le modèle Wan 2.1 Fun Camera 1.3B.",
|
|
1151
|
+
"mediaType": "image",
|
|
1152
|
+
"mediaSubtype": "webp",
|
|
1153
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
1154
|
+
"tags": ["Vidéo"],
|
|
1155
|
+
"models": ["Wan2.1", "Wan"],
|
|
1156
|
+
"date": "2025-04-15",
|
|
1157
|
+
"size": 11489037517
|
|
1158
|
+
},
|
|
1159
|
+
{
|
|
1160
|
+
"name": "video_wan2.1_fun_camera_v1.1_14B",
|
|
1161
|
+
"title": "Wan 2.1 Contrôle Caméra Fun 14B",
|
|
1162
|
+
"description": "Générer des vidéos de haute qualité avec un contrôle avancé de la caméra en utilisant le modèle 14B complet",
|
|
1163
|
+
"mediaType": "image",
|
|
1164
|
+
"mediaSubtype": "webp",
|
|
1165
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
1166
|
+
"tags": ["Vidéo"],
|
|
1167
|
+
"models": ["Wan2.1", "Wan"],
|
|
1168
|
+
"date": "2025-04-15",
|
|
1169
|
+
"size": 42047729828
|
|
1170
|
+
},
|
|
1171
|
+
{
|
|
1172
|
+
"name": "text_to_video_wan",
|
|
1173
|
+
"title": "Wan 2.1 Texte vers Vidéo",
|
|
1174
|
+
"description": "Générer des vidéos à partir de prompts textuels en utilisant Wan 2.1.",
|
|
1175
|
+
"mediaType": "image",
|
|
1176
|
+
"mediaSubtype": "webp",
|
|
1177
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
1178
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1179
|
+
"models": ["Wan2.1", "Wan"],
|
|
1180
|
+
"date": "2025-03-01",
|
|
1181
|
+
"size": 9824737690
|
|
1182
|
+
},
|
|
1183
|
+
{
|
|
1184
|
+
"name": "image_to_video_wan",
|
|
1185
|
+
"title": "Wan 2.1 Image vers Vidéo",
|
|
1186
|
+
"description": "Générer des vidéos à partir d'images en utilisant Wan 2.1.",
|
|
1187
|
+
"mediaType": "image",
|
|
1188
|
+
"mediaSubtype": "webp",
|
|
1189
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
1190
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1191
|
+
"models": ["Wan2.1", "Wan"],
|
|
1192
|
+
"date": "2025-03-01",
|
|
1193
|
+
"size": 41049149932
|
|
1194
|
+
},
|
|
1195
|
+
{
|
|
1196
|
+
"name": "wan2.1_fun_inp",
|
|
1197
|
+
"title": "Wan 2.1 Inpainting",
|
|
1198
|
+
"description": "Générer des vidéos à partir des images de début et de fin en utilisant l'inpainting Wan 2.1.",
|
|
1199
|
+
"mediaType": "image",
|
|
1200
|
+
"mediaSubtype": "webp",
|
|
1201
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
|
|
1202
|
+
"tags": ["Inpainting", "Vidéo"],
|
|
1203
|
+
"models": ["Wan2.1", "Wan"],
|
|
1204
|
+
"date": "2025-04-15",
|
|
1205
|
+
"size": 11381663334
|
|
1206
|
+
},
|
|
1207
|
+
{
|
|
1208
|
+
"name": "wan2.1_fun_control",
|
|
1209
|
+
"title": "Wan 2.1 Réseau de Contrôle",
|
|
1210
|
+
"description": "Générer des vidéos guidées par des contrôles de pose, de profondeur et de contours en utilisant Wan 2.1 ControlNet.",
|
|
1211
|
+
"mediaType": "image",
|
|
1212
|
+
"mediaSubtype": "webp",
|
|
1213
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1214
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
1215
|
+
"tags": ["Vidéo vers vidéo", "Vidéo"],
|
|
1216
|
+
"models": ["Wan2.1", "Wan"],
|
|
1217
|
+
"date": "2025-04-15",
|
|
1218
|
+
"size": 11381663334
|
|
1219
|
+
},
|
|
1220
|
+
{
|
|
1221
|
+
"name": "wan2.1_flf2v_720_f16",
|
|
1222
|
+
"title": "Wan 2.1 Vidéo Premier-Dernier Image 720p F16",
|
|
1223
|
+
"description": "Générer des vidéos en contrôlant les première et dernière images en utilisant Wan 2.1 FLF2V.",
|
|
1224
|
+
"mediaType": "image",
|
|
1225
|
+
"mediaSubtype": "webp",
|
|
1226
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
|
|
1227
|
+
"tags": ["FLF2V", "Vidéo"],
|
|
1228
|
+
"models": ["Wan2.1", "Wan"],
|
|
1229
|
+
"date": "2025-04-15",
|
|
1230
|
+
"size": 41049149932
|
|
1231
|
+
},
|
|
1232
|
+
{
|
|
1233
|
+
"name": "ltxv_text_to_video",
|
|
1234
|
+
"title": "LTXV Texte vers Vidéo",
|
|
1235
|
+
"mediaType": "image",
|
|
1236
|
+
"mediaSubtype": "webp",
|
|
1237
|
+
"description": "Générer des vidéos à partir de prompts textuels.",
|
|
1238
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
1239
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1240
|
+
"models": ["LTXV"],
|
|
1241
|
+
"date": "2025-03-01",
|
|
1242
|
+
"size": 19155554140
|
|
1243
|
+
},
|
|
1244
|
+
{
|
|
1245
|
+
"name": "ltxv_image_to_video",
|
|
1246
|
+
"title": "LTXV Image vers Vidéo",
|
|
1247
|
+
"mediaType": "image",
|
|
1248
|
+
"mediaSubtype": "webp",
|
|
1249
|
+
"description": "Générer des vidéos à partir d'images fixes.",
|
|
1250
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
1251
|
+
"tags": ["Image vers vidéo", "Vidéo"],
|
|
1252
|
+
"models": ["LTXV"],
|
|
1253
|
+
"date": "2025-03-01",
|
|
1254
|
+
"size": 19155554140
|
|
1255
|
+
},
|
|
1256
|
+
{
|
|
1257
|
+
"name": "mochi_text_to_video_example",
|
|
1258
|
+
"title": "Mochi Texte vers Vidéo",
|
|
1259
|
+
"mediaType": "image",
|
|
1260
|
+
"mediaSubtype": "webp",
|
|
1261
|
+
"description": "Générer des vidéos à partir de prompts textuels en utilisant le modèle Mochi.",
|
|
1262
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/",
|
|
1263
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1264
|
+
"models": ["Mochi"],
|
|
1265
|
+
"date": "2025-03-01",
|
|
1266
|
+
"size": 30762703258
|
|
1267
|
+
},
|
|
1268
|
+
{
|
|
1269
|
+
"name": "hunyuan_video_text_to_video",
|
|
1270
|
+
"title": "Hunyuan Vidéo Texte vers Vidéo",
|
|
1271
|
+
"mediaType": "image",
|
|
1272
|
+
"mediaSubtype": "webp",
|
|
1273
|
+
"description": "Générer des vidéos à partir de prompts textuels en utilisant le modèle Hunyuan.",
|
|
1274
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
|
|
1275
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1276
|
+
"models": ["Hunyuan Video", "Tencent"],
|
|
1277
|
+
"date": "2025-03-01",
|
|
1278
|
+
"size": 35476429865
|
|
1279
|
+
},
|
|
1280
|
+
{
|
|
1281
|
+
"name": "image_to_video",
|
|
1282
|
+
"title": "SVD Image vers Vidéo",
|
|
1283
|
+
"mediaType": "image",
|
|
1284
|
+
"mediaSubtype": "webp",
|
|
1285
|
+
"description": "Générer des vidéos à partir d'images fixes.",
|
|
1286
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
1287
|
+
"tags": ["Image vers vidéo", "Vidéo"],
|
|
1288
|
+
"models": ["SVD", "Stability"],
|
|
1289
|
+
"date": "2025-03-01",
|
|
1290
|
+
"size": 9556302234
|
|
1291
|
+
},
|
|
1292
|
+
{
|
|
1293
|
+
"name": "txt_to_image_to_video",
|
|
1294
|
+
"title": "SVD Texte à Image à Vidéo",
|
|
1295
|
+
"mediaType": "image",
|
|
1296
|
+
"mediaSubtype": "webp",
|
|
1297
|
+
"description": "Générer des vidéos en créant d'abord des images à partir de prompts textuels.",
|
|
1298
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
1299
|
+
"tags": ["Texte vers vidéo", "Vidéo"],
|
|
1300
|
+
"models": ["SVD", "Stability"],
|
|
1301
|
+
"date": "2025-03-01",
|
|
1302
|
+
"size": 16492674417
|
|
1303
|
+
}
|
|
1304
|
+
]
|
|
1305
|
+
},
|
|
1306
|
+
{
|
|
1307
|
+
"moduleName": "default",
|
|
1308
|
+
"type": "audio",
|
|
1309
|
+
"category": "GENERATION TYPE",
|
|
1310
|
+
"icon": "icon-[lucide--volume-2]",
|
|
1311
|
+
"title": "Audio",
|
|
1312
|
+
"templates": [
|
|
1313
|
+
{
|
|
1314
|
+
"name": "audio_stable_audio_example",
|
|
1315
|
+
"title": "Audio Stable",
|
|
1316
|
+
"mediaType": "audio",
|
|
1317
|
+
"mediaSubtype": "mp3",
|
|
1318
|
+
"description": "Générer de l'audio à partir de prompts textuels en utilisant Stable Audio.",
|
|
1319
|
+
"tags": ["Texte vers audio", "Audio"],
|
|
1320
|
+
"models": ["Stable Audio", "Stability"],
|
|
1321
|
+
"date": "2025-03-01",
|
|
1322
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/",
|
|
1323
|
+
"size": 5744518758
|
|
1324
|
+
},
|
|
1325
|
+
{
|
|
1326
|
+
"name": "audio_ace_step_1_t2a_instrumentals",
|
|
1327
|
+
"title": "ACE-Step v1 Texte vers Musique Instrumentale",
|
|
1328
|
+
"mediaType": "audio",
|
|
1329
|
+
"mediaSubtype": "mp3",
|
|
1330
|
+
"description": "Générer de la musique instrumentale à partir de prompts textuels en utilisant ACE-Step v1.",
|
|
1331
|
+
"tags": ["Texte vers audio", "Audio"],
|
|
1332
|
+
"models": ["ACE-Step"],
|
|
1333
|
+
"date": "2025-03-01",
|
|
1334
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1335
|
+
"size": 7698728878
|
|
1336
|
+
},
|
|
1337
|
+
{
|
|
1338
|
+
"name": "audio_ace_step_1_t2a_song",
|
|
1339
|
+
"title": "ACE Step v1 Texte vers Chanson",
|
|
1340
|
+
"mediaType": "audio",
|
|
1341
|
+
"mediaSubtype": "mp3",
|
|
1342
|
+
"description": "Générer des chansons avec des voix à partir de prompts textuels en utilisant ACE-Step v1, prenant en charge la multilingue et la personnalisation du style.",
|
|
1343
|
+
"tags": ["Texte vers audio", "Audio"],
|
|
1344
|
+
"models": ["ACE-Step"],
|
|
1345
|
+
"date": "2025-03-01",
|
|
1346
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1347
|
+
"size": 7698728878
|
|
1348
|
+
},
|
|
1349
|
+
{
|
|
1350
|
+
"name": "audio_ace_step_1_m2m_editing",
|
|
1351
|
+
"title": "ACE Step v1 Édition M2M",
|
|
1352
|
+
"mediaType": "audio",
|
|
1353
|
+
"mediaSubtype": "mp3",
|
|
1354
|
+
"description": "Éditer des chansons existantes pour changer le style et les paroles en utilisant ACE-Step v1 M2M.",
|
|
1355
|
+
"tags": ["Édition audio", "Audio"],
|
|
1356
|
+
"models": ["ACE-Step"],
|
|
1357
|
+
"date": "2025-03-01",
|
|
1358
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1359
|
+
"size": 7698728878
|
|
1360
|
+
}
|
|
1361
|
+
]
|
|
1362
|
+
},
|
|
1363
|
+
{
|
|
1364
|
+
"moduleName": "default",
|
|
1365
|
+
"type": "3d",
|
|
1366
|
+
"category": "GENERATION TYPE",
|
|
1367
|
+
"icon": "icon-[lucide--box]",
|
|
1368
|
+
"title": "3D Model",
|
|
1369
|
+
"templates": [
|
|
1370
|
+
{
|
|
1371
|
+
"name": "3d_hunyuan3d-v2.1",
|
|
1372
|
+
"title": "Hunyuan3D 2.1",
|
|
1373
|
+
"mediaType": "image",
|
|
1374
|
+
"mediaSubtype": "webp",
|
|
1375
|
+
"description": "Générez des modèles 3D à partir d'images uniques avec Hunyuan3D 2.1.",
|
|
1376
|
+
"tags": ["Image vers 3D", "3D"],
|
|
1377
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
1378
|
+
"date": "2025-03-01",
|
|
1379
|
+
"tutorialUrl": "",
|
|
1380
|
+
"size": 4928474972
|
|
1381
|
+
},
|
|
1382
|
+
{
|
|
1383
|
+
"name": "3d_hunyuan3d_image_to_model",
|
|
1384
|
+
"title": "Hunyuan3D 2.0",
|
|
1385
|
+
"mediaType": "image",
|
|
1386
|
+
"mediaSubtype": "webp",
|
|
1387
|
+
"description": "Générer des modèles 3D à partir d'images simples en utilisant Hunyuan3D 2.0.",
|
|
1388
|
+
"tags": ["Image vers 3D", "3D"],
|
|
1389
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
1390
|
+
"date": "2025-03-01",
|
|
1391
|
+
"tutorialUrl": "",
|
|
1392
|
+
"OpenSource": false,
|
|
1393
|
+
"size": 4928474972
|
|
1394
|
+
},
|
|
1395
|
+
{
|
|
1396
|
+
"name": "3d_hunyuan3d_multiview_to_model",
|
|
1397
|
+
"title": "Hunyuan3D 2.0 Multivue",
|
|
1398
|
+
"mediaType": "image",
|
|
1399
|
+
"mediaSubtype": "webp",
|
|
1400
|
+
"description": "Générer des modèles 3D à partir de vues multiples en utilisant Hunyuan3D 2.0 MV.",
|
|
1401
|
+
"tags": ["3D", "Image vers 3D"],
|
|
1402
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
1403
|
+
"date": "2025-03-01",
|
|
1404
|
+
"tutorialUrl": "",
|
|
1405
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1406
|
+
"size": 4928474972
|
|
1407
|
+
},
|
|
1408
|
+
{
|
|
1409
|
+
"name": "3d_hunyuan3d_multiview_to_model_turbo",
|
|
1410
|
+
"title": "Hunyuan3D 2.0 Multivue Turbo",
|
|
1411
|
+
"mediaType": "image",
|
|
1412
|
+
"mediaSubtype": "webp",
|
|
1413
|
+
"description": "Générer des modèles 3D à partir de vues multiples en utilisant Hunyuan3D 2.0 MV Turbo.",
|
|
1414
|
+
"tags": ["Image vers 3D", "3D"],
|
|
1415
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
1416
|
+
"date": "2025-03-01",
|
|
1417
|
+
"tutorialUrl": "",
|
|
1418
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1419
|
+
"size": 4928474972
|
|
1420
|
+
}
|
|
1421
|
+
]
|
|
1422
|
+
},
|
|
1423
|
+
{
|
|
1424
|
+
"moduleName": "default",
|
|
1425
|
+
"type": "image",
|
|
1426
|
+
"category": "CLOSED SOURCE MODELS",
|
|
1427
|
+
"icon": "icon-[lucide--hand-coins]",
|
|
1428
|
+
"title": "Image API",
|
|
1429
|
+
"templates": [
|
|
1430
|
+
{
|
|
1431
|
+
"name": "api_nano_banana_pro",
|
|
1432
|
+
"title": "Nano Banana Pro",
|
|
1433
|
+
"description": "Nano-banana Pro (Gemini 3.0 Pro Image) - Génération et édition d'images 4K de qualité studio avec rendu de texte amélioré et cohérence des personnages.",
|
|
1434
|
+
"mediaType": "image",
|
|
1435
|
+
"mediaSubtype": "webp",
|
|
1436
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1437
|
+
"tags": ["Édition d'image", "Image", "API"],
|
|
1438
|
+
"models": ["Gemini-3-pro-image-preview", "nano-banana", "Google"],
|
|
1439
|
+
"date": "2025-11-21",
|
|
1440
|
+
"OpenSource": false,
|
|
1441
|
+
"size": 0,
|
|
1442
|
+
"vram": 0
|
|
1443
|
+
},
|
|
1444
|
+
{
|
|
1445
|
+
"name": "api_from_photo_2_miniature",
|
|
1446
|
+
"title": "Style photo vers modèle",
|
|
1447
|
+
"description": "Transformez des photos réelles de bâtiments en plans architecturaux puis en maquettes physiques détaillées. Un pipeline complet de visualisation architecturale, de la photo à la miniature.",
|
|
1448
|
+
"mediaType": "image",
|
|
1449
|
+
"mediaSubtype": "webp",
|
|
1450
|
+
"tags": ["Édition d'image", "Image", "3D"],
|
|
1451
|
+
"models": ["Gemini-3-pro-image-preview", "nano-banana", "Google"],
|
|
1452
|
+
"date": "2025-11-21",
|
|
1453
|
+
"OpenSource": false,
|
|
1454
|
+
"size": 0,
|
|
1455
|
+
"vram": 0
|
|
1456
|
+
},
|
|
1457
|
+
{
|
|
1458
|
+
"name": "api_bytedance_seedream4",
|
|
1459
|
+
"title": "ByteDance Seedream 4.0",
|
|
1460
|
+
"description": "Modèle d'IA multimodal pour la génération d'images à partir de texte et l'édition d'images. Générez des images 2K en moins de 2 secondes avec un contrôle en langage naturel.",
|
|
1461
|
+
"mediaType": "image",
|
|
1462
|
+
"mediaSubtype": "webp",
|
|
1463
|
+
"tags": ["Édition d'image", "Image", "API", "Texte vers image"],
|
|
1464
|
+
"models": ["Seedream 4.0", "ByteDance"],
|
|
1465
|
+
"date": "2025-09-11",
|
|
1466
|
+
"OpenSource": false,
|
|
1467
|
+
"size": 0,
|
|
1468
|
+
"vram": 0
|
|
1469
|
+
},
|
|
1470
|
+
{
|
|
1471
|
+
"name": "api_google_gemini_image",
|
|
1472
|
+
"title": "Google Gemini Image",
|
|
1473
|
+
"description": "Nano-banana (Gemini-2.5-Flash Image) - édition d'images avec cohérence.",
|
|
1474
|
+
"mediaType": "image",
|
|
1475
|
+
"mediaSubtype": "webp",
|
|
1476
|
+
"tags": ["Édition d'image", "Image", "API", "Texte vers image"],
|
|
1477
|
+
"models": ["Gemini-2.5-Flash", "nano-banana", "Google"],
|
|
1478
|
+
"date": "2025-08-27",
|
|
1479
|
+
"OpenSource": false,
|
|
1480
|
+
"size": 0,
|
|
1481
|
+
"vram": 0
|
|
1482
|
+
},
|
|
1483
|
+
{
|
|
1484
|
+
"name": "api_flux2",
|
|
1485
|
+
"title": "Flux.2 Pro",
|
|
1486
|
+
"description": "Générez des images photoréalistes jusqu'à 4MP avec cohérence multi-références et rendu de texte professionnel.",
|
|
1487
|
+
"mediaType": "image",
|
|
1488
|
+
"mediaSubtype": "webp",
|
|
1489
|
+
"tags": ["Édition d'image", "Image", "API", "Texte vers image"],
|
|
1490
|
+
"models": ["Flux.2", "BFL"],
|
|
1491
|
+
"date": "2025-11-26",
|
|
1492
|
+
"OpenSource": false,
|
|
1493
|
+
"size": 0,
|
|
1494
|
+
"vram": 0
|
|
1495
|
+
},
|
|
1496
|
+
{
|
|
1497
|
+
"name": "api_topaz_image_enhance",
|
|
1498
|
+
"title": "Amélioration d'image Topaz",
|
|
1499
|
+
"description": "Amélioration d’image professionnelle avec le modèle Reimagine de Topaz, comprenant l’amélioration du visage et la restauration des détails.",
|
|
1500
|
+
"mediaType": "image",
|
|
1501
|
+
"mediaSubtype": "webp",
|
|
1502
|
+
"thumbnailVariant": "compareSlider",
|
|
1503
|
+
"tags": ["Image", "API", "Amélioration"],
|
|
1504
|
+
"models": ["Topaz", "Reimagine"],
|
|
1505
|
+
"date": "2025-11-25",
|
|
1506
|
+
"OpenSource": false,
|
|
1507
|
+
"size": 0,
|
|
1508
|
+
"vram": 0
|
|
1509
|
+
},
|
|
1510
|
+
{
|
|
1511
|
+
"name": "api_bfl_flux_1_kontext_multiple_images_input",
|
|
1512
|
+
"title": "BFL Flux.1 Kontext Entrée Multi-Images",
|
|
1513
|
+
"description": "Importer plusieurs images et les éditer avec Flux.1 Kontext.",
|
|
1514
|
+
"mediaType": "image",
|
|
1515
|
+
"mediaSubtype": "webp",
|
|
1516
|
+
"thumbnailVariant": "compareSlider",
|
|
1517
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
1518
|
+
"tags": ["Édition d'image", "Image"],
|
|
1519
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
1520
|
+
"date": "2025-05-29",
|
|
1521
|
+
"OpenSource": false,
|
|
1522
|
+
"size": 0,
|
|
1523
|
+
"vram": 0
|
|
1524
|
+
},
|
|
1525
|
+
{
|
|
1526
|
+
"name": "api_bfl_flux_1_kontext_pro_image",
|
|
1527
|
+
"title": "BFL Flux.1 Kontext Pro",
|
|
1528
|
+
"description": "Éditer des images avec Flux.1 Kontext pro image.",
|
|
1529
|
+
"mediaType": "image",
|
|
1530
|
+
"mediaSubtype": "webp",
|
|
1531
|
+
"thumbnailVariant": "compareSlider",
|
|
1532
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
1533
|
+
"tags": ["Édition d'image", "Image"],
|
|
1534
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
1535
|
+
"date": "2025-05-29",
|
|
1536
|
+
"OpenSource": false,
|
|
1537
|
+
"size": 0,
|
|
1538
|
+
"vram": 0
|
|
1539
|
+
},
|
|
1540
|
+
{
|
|
1541
|
+
"name": "api_bfl_flux_1_kontext_max_image",
|
|
1542
|
+
"title": "BFL Flux.1 Kontext Max",
|
|
1543
|
+
"description": "Éditer des images avec Flux.1 Kontext max image.",
|
|
1544
|
+
"mediaType": "image",
|
|
1545
|
+
"mediaSubtype": "webp",
|
|
1546
|
+
"thumbnailVariant": "compareSlider",
|
|
1547
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
1548
|
+
"tags": ["Édition d'image", "Image"],
|
|
1549
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
1550
|
+
"date": "2025-05-29",
|
|
1551
|
+
"OpenSource": false,
|
|
1552
|
+
"size": 0,
|
|
1553
|
+
"vram": 0
|
|
1554
|
+
},
|
|
1555
|
+
{
|
|
1556
|
+
"name": "api_wan_text_to_image",
|
|
1557
|
+
"title": "Wan2.5: Texte vers Image",
|
|
1558
|
+
"description": "Générez des images avec un excellent suivi des prompts et une qualité visuelle élevée avec FLUX.1 Pro.",
|
|
1559
|
+
"mediaType": "image",
|
|
1560
|
+
"mediaSubtype": "webp",
|
|
1561
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1562
|
+
"models": ["Wan2.5", "Wan"],
|
|
1563
|
+
"date": "2025-09-25",
|
|
1564
|
+
"OpenSource": false,
|
|
1565
|
+
"size": 0,
|
|
1566
|
+
"vram": 0
|
|
1567
|
+
},
|
|
1568
|
+
{
|
|
1569
|
+
"name": "api_bfl_flux_pro_t2i",
|
|
1570
|
+
"title": "BFL Flux[Pro]: Texte vers Image",
|
|
1571
|
+
"description": "Générer des images avec un excellent suivi des prompts et une qualité visuelle en utilisant FLUX.1 Pro.",
|
|
1572
|
+
"mediaType": "image",
|
|
1573
|
+
"mediaSubtype": "webp",
|
|
1574
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
|
|
1575
|
+
"tags": ["Édition d'image", "Image"],
|
|
1576
|
+
"models": ["Flux", "BFL"],
|
|
1577
|
+
"date": "2025-05-01",
|
|
1578
|
+
"OpenSource": false,
|
|
1579
|
+
"size": 0,
|
|
1580
|
+
"vram": 0
|
|
1581
|
+
},
|
|
1582
|
+
{
|
|
1583
|
+
"name": "api_luma_photon_i2i",
|
|
1584
|
+
"title": "Luma Photon: Image vers Image",
|
|
1585
|
+
"description": "Guider la génération d'images en utilisant une combinaison d'images et de prompt.",
|
|
1586
|
+
"mediaType": "image",
|
|
1587
|
+
"mediaSubtype": "webp",
|
|
1588
|
+
"thumbnailVariant": "compareSlider",
|
|
1589
|
+
"tags": ["Image vers image", "Image", "API"],
|
|
1590
|
+
"models": ["Luma"],
|
|
1591
|
+
"date": "2025-03-01",
|
|
1592
|
+
"OpenSource": false,
|
|
1593
|
+
"size": 0,
|
|
1594
|
+
"vram": 0
|
|
1595
|
+
},
|
|
1596
|
+
{
|
|
1597
|
+
"name": "api_luma_photon_style_ref",
|
|
1598
|
+
"title": "Luma Photon: Référence de Style",
|
|
1599
|
+
"description": "Générer des images en mélangeant des références de style avec un contrôle précis en utilisant Luma Photon.",
|
|
1600
|
+
"mediaType": "image",
|
|
1601
|
+
"mediaSubtype": "webp",
|
|
1602
|
+
"thumbnailVariant": "compareSlider",
|
|
1603
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1604
|
+
"models": ["Luma"],
|
|
1605
|
+
"date": "2025-03-01",
|
|
1606
|
+
"OpenSource": false,
|
|
1607
|
+
"size": 0,
|
|
1608
|
+
"vram": 0
|
|
1609
|
+
},
|
|
1610
|
+
{
|
|
1611
|
+
"name": "api_recraft_image_gen_with_color_control",
|
|
1612
|
+
"title": "Recraft: Génération d'Image avec Contrôle Couleur",
|
|
1613
|
+
"description": "Générer des images avec des palettes de couleurs personnalisées et des visuels spécifiques à la marque en utilisant Recraft.",
|
|
1614
|
+
"mediaType": "image",
|
|
1615
|
+
"mediaSubtype": "webp",
|
|
1616
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1617
|
+
"models": ["Recraft"],
|
|
1618
|
+
"date": "2025-03-01",
|
|
1619
|
+
"OpenSource": false,
|
|
1620
|
+
"size": 0,
|
|
1621
|
+
"vram": 0
|
|
1622
|
+
},
|
|
1623
|
+
{
|
|
1624
|
+
"name": "api_recraft_image_gen_with_style_control",
|
|
1625
|
+
"title": "Recraft: Génération d'Image avec Contrôle Style",
|
|
1626
|
+
"description": "Contrôler le style avec des exemples visuels, aligner le positionnement et affiner les objets. Stocker et partager des styles pour une cohérence de marque parfaite.",
|
|
1627
|
+
"mediaType": "image",
|
|
1628
|
+
"mediaSubtype": "webp",
|
|
1629
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1630
|
+
"models": ["Recraft"],
|
|
1631
|
+
"date": "2025-03-01",
|
|
1632
|
+
"OpenSource": false,
|
|
1633
|
+
"size": 0,
|
|
1634
|
+
"vram": 0
|
|
1635
|
+
},
|
|
1636
|
+
{
|
|
1637
|
+
"name": "api_recraft_vector_gen",
|
|
1638
|
+
"title": "Recraft: Génération Vectorielle",
|
|
1639
|
+
"description": "Générer des images vectorielles de haute qualité à partir de prompts textuels en utilisant le générateur AI vectoriel de Recraft.",
|
|
1640
|
+
"mediaType": "image",
|
|
1641
|
+
"mediaSubtype": "webp",
|
|
1642
|
+
"tags": ["Texte vers image", "Image", "API", "Vectoriel"],
|
|
1643
|
+
"models": ["Recraft"],
|
|
1644
|
+
"date": "2025-03-01",
|
|
1645
|
+
"OpenSource": false,
|
|
1646
|
+
"size": 0,
|
|
1647
|
+
"vram": 0
|
|
1648
|
+
},
|
|
1649
|
+
{
|
|
1650
|
+
"name": "api_runway_text_to_image",
|
|
1651
|
+
"title": "Runway: Texte vers Image",
|
|
1652
|
+
"description": "Générer des images de haute qualité à partir de prompts textuels en utilisant le modèle AI de Runway.",
|
|
1653
|
+
"mediaType": "image",
|
|
1654
|
+
"mediaSubtype": "webp",
|
|
1655
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1656
|
+
"models": ["Runway"],
|
|
1657
|
+
"date": "2025-03-01",
|
|
1658
|
+
"OpenSource": false,
|
|
1659
|
+
"size": 0,
|
|
1660
|
+
"vram": 0
|
|
1661
|
+
},
|
|
1662
|
+
{
|
|
1663
|
+
"name": "api_runway_reference_to_image",
|
|
1664
|
+
"title": "Runway: Référence vers Image",
|
|
1665
|
+
"description": "Générer de nouvelles images basées sur des styles et compositions de référence avec l'AI de Runway.",
|
|
1666
|
+
"mediaType": "image",
|
|
1667
|
+
"thumbnailVariant": "compareSlider",
|
|
1668
|
+
"mediaSubtype": "webp",
|
|
1669
|
+
"tags": ["Image vers image", "Image", "API"],
|
|
1670
|
+
"models": ["Runway"],
|
|
1671
|
+
"date": "2025-03-01",
|
|
1672
|
+
"OpenSource": false,
|
|
1673
|
+
"size": 0,
|
|
1674
|
+
"vram": 0
|
|
1675
|
+
},
|
|
1676
|
+
{
|
|
1677
|
+
"name": "api_stability_ai_stable_image_ultra_t2i",
|
|
1678
|
+
"title": "Stability AI: Stable Image Ultra Texte vers Image",
|
|
1679
|
+
"description": "Générer des images de haute qualité avec un excellent respect des prompts. Parfait pour des cas d'utilisation professionnels à une résolution de 1 mégapixel.",
|
|
1680
|
+
"mediaType": "image",
|
|
1681
|
+
"mediaSubtype": "webp",
|
|
1682
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1683
|
+
"models": ["Stability"],
|
|
1684
|
+
"date": "2025-03-01",
|
|
1685
|
+
"OpenSource": false,
|
|
1686
|
+
"size": 0,
|
|
1687
|
+
"vram": 0
|
|
1688
|
+
},
|
|
1689
|
+
{
|
|
1690
|
+
"name": "api_stability_ai_i2i",
|
|
1691
|
+
"title": "Stability AI: Image vers Image",
|
|
1692
|
+
"description": "Transformer des images avec une génération de haute qualité en utilisant Stability AI, parfait pour l'édition professionnelle et le transfert de style.",
|
|
1693
|
+
"mediaType": "image",
|
|
1694
|
+
"thumbnailVariant": "compareSlider",
|
|
1695
|
+
"mediaSubtype": "webp",
|
|
1696
|
+
"tags": ["Image vers image", "Image", "API"],
|
|
1697
|
+
"models": ["Stability"],
|
|
1698
|
+
"date": "2025-03-01",
|
|
1699
|
+
"OpenSource": false,
|
|
1700
|
+
"size": 0,
|
|
1701
|
+
"vram": 0
|
|
1702
|
+
},
|
|
1703
|
+
{
|
|
1704
|
+
"name": "api_stability_ai_sd3.5_t2i",
|
|
1705
|
+
"title": "Stability AI: SD3.5 Texte vers Image",
|
|
1706
|
+
"description": "Générer des images de haute qualité avec un excellent respect des prompts. Parfait pour des cas d'utilisation professionnels à une résolution de 1 mégapixel.",
|
|
1707
|
+
"mediaType": "image",
|
|
1708
|
+
"mediaSubtype": "webp",
|
|
1709
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1710
|
+
"models": ["Stability"],
|
|
1711
|
+
"date": "2025-03-01",
|
|
1712
|
+
"OpenSource": false,
|
|
1713
|
+
"size": 0,
|
|
1714
|
+
"vram": 0
|
|
1715
|
+
},
|
|
1716
|
+
{
|
|
1717
|
+
"name": "api_stability_ai_sd3.5_i2i",
|
|
1718
|
+
"title": "Stability AI: SD3.5 Image vers Image",
|
|
1719
|
+
"description": "Générer des images de haute qualité avec un excellent respect des prompts. Parfait pour des cas d'utilisation professionnels à une résolution de 1 mégapixel.",
|
|
1720
|
+
"mediaType": "image",
|
|
1721
|
+
"thumbnailVariant": "compareSlider",
|
|
1722
|
+
"mediaSubtype": "webp",
|
|
1723
|
+
"tags": ["Image vers image", "Image", "API"],
|
|
1724
|
+
"models": ["Stability"],
|
|
1725
|
+
"date": "2025-03-01",
|
|
1726
|
+
"OpenSource": false,
|
|
1727
|
+
"size": 0,
|
|
1728
|
+
"vram": 0
|
|
1729
|
+
},
|
|
1730
|
+
{
|
|
1731
|
+
"name": "api_ideogram_v3_t2i",
|
|
1732
|
+
"title": "Ideogram V3: Texte vers Image",
|
|
1733
|
+
"description": "Générer des images de qualité professionnelle avec un excellent alignement des prompts, du photoréalisme et un rendu de texte en utilisant Ideogram V3.",
|
|
1734
|
+
"mediaType": "image",
|
|
1735
|
+
"mediaSubtype": "webp",
|
|
1736
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1737
|
+
"models": ["Ideogram"],
|
|
1738
|
+
"date": "2025-03-01",
|
|
1739
|
+
"OpenSource": false,
|
|
1740
|
+
"size": 0,
|
|
1741
|
+
"vram": 0
|
|
1742
|
+
},
|
|
1743
|
+
{
|
|
1744
|
+
"name": "api_openai_image_1_t2i",
|
|
1745
|
+
"title": "OpenAI: GPT-Image-1 Texte vers Image",
|
|
1746
|
+
"description": "Générer des images à partir de prompts textuels en utilisant l'API OpenAI GPT Image 1.",
|
|
1747
|
+
"mediaType": "image",
|
|
1748
|
+
"mediaSubtype": "webp",
|
|
1749
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1750
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
1751
|
+
"date": "2025-03-01",
|
|
1752
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1753
|
+
"OpenSource": false,
|
|
1754
|
+
"size": 0,
|
|
1755
|
+
"vram": 0
|
|
1756
|
+
},
|
|
1757
|
+
{
|
|
1758
|
+
"name": "api_openai_image_1_i2i",
|
|
1759
|
+
"title": "OpenAI: GPT-Image-1 Image vers Image",
|
|
1760
|
+
"description": "Générer des images à partir d'images d'entrée en utilisant l'API OpenAI GPT Image 1.",
|
|
1761
|
+
"mediaType": "image",
|
|
1762
|
+
"mediaSubtype": "webp",
|
|
1763
|
+
"thumbnailVariant": "compareSlider",
|
|
1764
|
+
"tags": ["Image vers image", "Image", "API"],
|
|
1765
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
1766
|
+
"date": "2025-03-01",
|
|
1767
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1768
|
+
"OpenSource": false,
|
|
1769
|
+
"size": 0,
|
|
1770
|
+
"vram": 0
|
|
1771
|
+
},
|
|
1772
|
+
{
|
|
1773
|
+
"name": "api_openai_image_1_inpaint",
|
|
1774
|
+
"title": "OpenAI: GPT-Image-1 Inpainting",
|
|
1775
|
+
"description": "Éditer des images en utilisant l'inpainting avec l'API OpenAI GPT Image 1.",
|
|
1776
|
+
"mediaType": "image",
|
|
1777
|
+
"mediaSubtype": "webp",
|
|
1778
|
+
"thumbnailVariant": "compareSlider",
|
|
1779
|
+
"tags": ["Inpainting", "Image", "API"],
|
|
1780
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
1781
|
+
"date": "2025-03-01",
|
|
1782
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1783
|
+
"OpenSource": false,
|
|
1784
|
+
"size": 0,
|
|
1785
|
+
"vram": 0
|
|
1786
|
+
},
|
|
1787
|
+
{
|
|
1788
|
+
"name": "api_openai_image_1_multi_inputs",
|
|
1789
|
+
"title": "OpenAI: GPT-Image-1 Multi Entrées",
|
|
1790
|
+
"description": "Générer des images à partir de plusieurs entrées en utilisant l'API OpenAI GPT Image 1.",
|
|
1791
|
+
"mediaType": "image",
|
|
1792
|
+
"mediaSubtype": "webp",
|
|
1793
|
+
"thumbnailVariant": "compareSlider",
|
|
1794
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1795
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
1796
|
+
"date": "2025-03-01",
|
|
1797
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1798
|
+
"OpenSource": false,
|
|
1799
|
+
"size": 0,
|
|
1800
|
+
"vram": 0
|
|
1801
|
+
},
|
|
1802
|
+
{
|
|
1803
|
+
"name": "api_openai_dall_e_2_t2i",
|
|
1804
|
+
"title": "OpenAI: Dall-E 2 Texte vers Image",
|
|
1805
|
+
"description": "Générer des images à partir de prompts textuels en utilisant l'API OpenAI Dall-E 2.",
|
|
1806
|
+
"mediaType": "image",
|
|
1807
|
+
"mediaSubtype": "webp",
|
|
1808
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1809
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1810
|
+
"date": "2025-03-01",
|
|
1811
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
|
|
1812
|
+
"OpenSource": false,
|
|
1813
|
+
"size": 0,
|
|
1814
|
+
"vram": 0
|
|
1815
|
+
},
|
|
1816
|
+
{
|
|
1817
|
+
"name": "api_openai_dall_e_2_inpaint",
|
|
1818
|
+
"title": "OpenAI: Dall-E 2 Inpainting",
|
|
1819
|
+
"description": "Éditer des images en utilisant l'inpainting avec l'API OpenAI Dall-E 2.",
|
|
1820
|
+
"mediaType": "image",
|
|
1821
|
+
"mediaSubtype": "webp",
|
|
1822
|
+
"thumbnailVariant": "compareSlider",
|
|
1823
|
+
"tags": ["Inpainting", "Image", "API"],
|
|
1824
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1825
|
+
"date": "2025-03-01",
|
|
1826
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
|
|
1827
|
+
"OpenSource": false,
|
|
1828
|
+
"size": 0,
|
|
1829
|
+
"vram": 0
|
|
1830
|
+
},
|
|
1831
|
+
{
|
|
1832
|
+
"name": "api_openai_dall_e_3_t2i",
|
|
1833
|
+
"title": "OpenAI: Dall-E 3 Texte vers Image",
|
|
1834
|
+
"description": "Générer des images à partir de prompts textuels en utilisant l'API OpenAI Dall-E 3.",
|
|
1835
|
+
"mediaType": "image",
|
|
1836
|
+
"mediaSubtype": "webp",
|
|
1837
|
+
"tags": ["Texte vers image", "Image", "API"],
|
|
1838
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1839
|
+
"date": "2025-03-01",
|
|
1840
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3",
|
|
1841
|
+
"OpenSource": false,
|
|
1842
|
+
"size": 0,
|
|
1843
|
+
"vram": 0
|
|
1844
|
+
}
|
|
1845
|
+
]
|
|
1846
|
+
},
|
|
1847
|
+
{
|
|
1848
|
+
"moduleName": "default",
|
|
1849
|
+
"type": "video",
|
|
1850
|
+
"category": "CLOSED SOURCE MODELS",
|
|
1851
|
+
"icon": "icon-[lucide--film]",
|
|
1852
|
+
"title": "Video API",
|
|
1853
|
+
"templates": [
|
|
1854
|
+
{
|
|
1855
|
+
"name": "api_openai_sora_video",
|
|
1856
|
+
"title": "Sora 2: Texte et Image vers Vidéo",
|
|
1857
|
+
"description": "Génération vidéo Sora-2 et Sora-2 Pro d'OpenAI avec audio synchronisé.",
|
|
1858
|
+
"mediaType": "image",
|
|
1859
|
+
"mediaSubtype": "webp",
|
|
1860
|
+
"tags": ["Image vers vidéo", "Texte vers vidéo", "API"],
|
|
1861
|
+
"models": ["OpenAI"],
|
|
1862
|
+
"date": "2025-10-08",
|
|
1863
|
+
"OpenSource": false,
|
|
1864
|
+
"size": 0,
|
|
1865
|
+
"vram": 0
|
|
1866
|
+
},
|
|
1867
|
+
{
|
|
1868
|
+
"name": "api_ltxv_text_to_video",
|
|
1869
|
+
"title": "LTX-2 : Texte en vidéo",
|
|
1870
|
+
"description": "Générez des vidéos de haute qualité à partir de suggestions textuelles avec Lightricks LTX-2 et audio synchronisé. Prise en charge jusqu'à 4K à 50fps avec les modes Rapide, Pro et Ultra pour divers besoins de production.",
|
|
1871
|
+
"mediaType": "image",
|
|
1872
|
+
"mediaSubtype": "webp",
|
|
1873
|
+
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
1874
|
+
"models": ["LTX-2", "Lightricks"],
|
|
1875
|
+
"date": "2025-10-28",
|
|
1876
|
+
"OpenSource": false,
|
|
1877
|
+
"size": 0,
|
|
1878
|
+
"vram": 0
|
|
1879
|
+
},
|
|
1880
|
+
{
|
|
1881
|
+
"name": "api_ltxv_image_to_video",
|
|
1882
|
+
"title": "LTX-2 : Image vers Vidéo",
|
|
1883
|
+
"description": "Transformez des images statiques en vidéos dynamiques avec LTX-2 Pro. Générez des séquences cinématographiques avec mouvement naturel, audio synchronisé et prise en charge jusqu'à 4K à 50 ips.",
|
|
1884
|
+
"mediaType": "image",
|
|
1885
|
+
"mediaSubtype": "webp",
|
|
1886
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1887
|
+
"models": ["LTX-2", "Lightricks"],
|
|
1888
|
+
"date": "2025-10-28",
|
|
1889
|
+
"OpenSource": false,
|
|
1890
|
+
"size": 0,
|
|
1891
|
+
"vram": 0
|
|
1892
|
+
},
|
|
1893
|
+
{
|
|
1894
|
+
"name": "api_wan_text_to_video",
|
|
1895
|
+
"title": "Wan2.5: Texte vers Vidéo",
|
|
1896
|
+
"description": "Générez des vidéos avec audio synchronisé, mouvement amélioré et qualité supérieure.",
|
|
1897
|
+
"mediaType": "image",
|
|
1898
|
+
"mediaSubtype": "webp",
|
|
1899
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1900
|
+
"models": ["Wan2.5", "Wan"],
|
|
1901
|
+
"date": "2025-09-27",
|
|
1902
|
+
"tutorialUrl": "",
|
|
1903
|
+
"OpenSource": false,
|
|
1904
|
+
"size": 0,
|
|
1905
|
+
"vram": 0
|
|
1906
|
+
},
|
|
1907
|
+
{
|
|
1908
|
+
"name": "api_wan_image_to_video",
|
|
1909
|
+
"title": "Wan2.5: Image vers Vidéo",
|
|
1910
|
+
"description": "Transformez des images en vidéos avec audio synchronisé, mouvement amélioré et qualité supérieure.",
|
|
1911
|
+
"mediaType": "image",
|
|
1912
|
+
"mediaSubtype": "webp",
|
|
1913
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1914
|
+
"models": ["Wan2.5", "Wan"],
|
|
1915
|
+
"date": "2025-09-27",
|
|
1916
|
+
"tutorialUrl": "",
|
|
1917
|
+
"OpenSource": false,
|
|
1918
|
+
"size": 0,
|
|
1919
|
+
"vram": 0
|
|
1920
|
+
},
|
|
1921
|
+
{
|
|
1922
|
+
"name": "api_kling_i2v",
|
|
1923
|
+
"title": "Kling: Image vers Vidéo",
|
|
1924
|
+
"description": "Générer des vidéos avec une excellente adhérence aux prompts pour les actions, expressions et mouvements de caméra en utilisant Kling.",
|
|
1925
|
+
"mediaType": "image",
|
|
1926
|
+
"mediaSubtype": "webp",
|
|
1927
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1928
|
+
"models": ["Kling"],
|
|
1929
|
+
"date": "2025-03-01",
|
|
1930
|
+
"tutorialUrl": "",
|
|
1931
|
+
"OpenSource": false,
|
|
1932
|
+
"size": 0,
|
|
1933
|
+
"vram": 0
|
|
1934
|
+
},
|
|
1935
|
+
{
|
|
1936
|
+
"name": "api_kling_effects",
|
|
1937
|
+
"title": "Kling: Effets Vidéo",
|
|
1938
|
+
"description": "Générer des vidéos dynamiques en appliquant des effets visuels aux images en utilisant Kling.",
|
|
1939
|
+
"mediaType": "image",
|
|
1940
|
+
"mediaSubtype": "webp",
|
|
1941
|
+
"tags": ["Vidéo", "API"],
|
|
1942
|
+
"models": ["Kling"],
|
|
1943
|
+
"date": "2025-03-01",
|
|
1944
|
+
"tutorialUrl": "",
|
|
1945
|
+
"OpenSource": false,
|
|
1946
|
+
"size": 0,
|
|
1947
|
+
"vram": 0
|
|
1948
|
+
},
|
|
1949
|
+
{
|
|
1950
|
+
"name": "api_kling_flf",
|
|
1951
|
+
"title": "Kling: FLF2V",
|
|
1952
|
+
"description": "Générer des vidéos en contrôlant les première et dernière images.",
|
|
1953
|
+
"mediaType": "image",
|
|
1954
|
+
"mediaSubtype": "webp",
|
|
1955
|
+
"tags": ["Vidéo", "API", "FLF2V"],
|
|
1956
|
+
"models": ["Kling"],
|
|
1957
|
+
"date": "2025-03-01",
|
|
1958
|
+
"tutorialUrl": "",
|
|
1959
|
+
"OpenSource": false,
|
|
1960
|
+
"size": 0,
|
|
1961
|
+
"vram": 0
|
|
1962
|
+
},
|
|
1963
|
+
{
|
|
1964
|
+
"name": "api_vidu_text_to_video",
|
|
1965
|
+
"title": "Vidu: Texte vers Vidéo",
|
|
1966
|
+
"description": "Générer des vidéos 1080p de haute qualité à partir de prompts textuels avec un contrôle ajustable de l'amplitude des mouvements et de la durée en utilisant le modèle AI avancé de Vidu.",
|
|
1967
|
+
"mediaType": "image",
|
|
1968
|
+
"mediaSubtype": "webp",
|
|
1969
|
+
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
1970
|
+
"models": ["Vidu"],
|
|
1971
|
+
"date": "2025-08-23",
|
|
1972
|
+
"tutorialUrl": "",
|
|
1973
|
+
"OpenSource": false,
|
|
1974
|
+
"size": 0,
|
|
1975
|
+
"vram": 0
|
|
1976
|
+
},
|
|
1977
|
+
{
|
|
1978
|
+
"name": "api_vidu_image_to_video",
|
|
1979
|
+
"title": "Vidu: Image vers Vidéo",
|
|
1980
|
+
"description": "Transformer des images statiques en vidéos 1080p dynamiques avec un contrôle précis du mouvement et une amplitude de mouvement personnalisable en utilisant Vidu.",
|
|
1981
|
+
"mediaType": "image",
|
|
1982
|
+
"mediaSubtype": "webp",
|
|
1983
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
1984
|
+
"models": ["Vidu"],
|
|
1985
|
+
"date": "2025-08-23",
|
|
1986
|
+
"tutorialUrl": "",
|
|
1987
|
+
"OpenSource": false,
|
|
1988
|
+
"size": 0,
|
|
1989
|
+
"vram": 0
|
|
1990
|
+
},
|
|
1991
|
+
{
|
|
1992
|
+
"name": "api_vidu_reference_to_video",
|
|
1993
|
+
"title": "Vidu: Référence vers Vidéo",
|
|
1994
|
+
"description": "Generate videos with consistent subjects using multiple reference images (up to 7) for character and style continuity across the video sequence.",
|
|
1995
|
+
"mediaType": "image",
|
|
1996
|
+
"mediaSubtype": "webp",
|
|
1997
|
+
"tags": ["Vidéo", "Image vers vidéo", "API"],
|
|
1998
|
+
"models": ["Vidu"],
|
|
1999
|
+
"date": "2025-08-23",
|
|
2000
|
+
"tutorialUrl": "",
|
|
2001
|
+
"OpenSource": false,
|
|
2002
|
+
"size": 0,
|
|
2003
|
+
"vram": 0
|
|
2004
|
+
},
|
|
2005
|
+
{
|
|
2006
|
+
"name": "api_vidu_start_end_to_video",
|
|
2007
|
+
"title": "Vidu: Début-Fin vers Vidéo",
|
|
2008
|
+
"description": "Create smooth video transitions between defined start and end frames with natural motion interpolation and consistent visual quality.",
|
|
2009
|
+
"mediaType": "image",
|
|
2010
|
+
"mediaSubtype": "webp",
|
|
2011
|
+
"tags": ["Vidéo", "API", "FLF2V"],
|
|
2012
|
+
"models": ["Vidu"],
|
|
2013
|
+
"date": "2025-08-23",
|
|
2014
|
+
"tutorialUrl": "",
|
|
2015
|
+
"OpenSource": false,
|
|
2016
|
+
"size": 0,
|
|
2017
|
+
"vram": 0
|
|
2018
|
+
},
|
|
2019
|
+
{
|
|
2020
|
+
"name": "api_bytedance_text_to_video",
|
|
2021
|
+
"title": "ByteDance: Texte vers Vidéo",
|
|
2022
|
+
"description": "Générez des vidéos de haute qualité directement à partir de prompts textuels avec le modèle Seedance de ByteDance. Prend en charge plusieurs résolutions et ratios d'aspect avec un mouvement naturel et une qualité cinématographique.",
|
|
2023
|
+
"mediaType": "image",
|
|
2024
|
+
"mediaSubtype": "webp",
|
|
2025
|
+
"tags": ["Vidéo", "API", "Texte vers vidéo"],
|
|
2026
|
+
"models": ["ByteDance"],
|
|
2027
|
+
"date": "2025-10-6",
|
|
2028
|
+
"tutorialUrl": "",
|
|
2029
|
+
"OpenSource": false,
|
|
2030
|
+
"size": 0,
|
|
2031
|
+
"vram": 0
|
|
2032
|
+
},
|
|
2033
|
+
{
|
|
2034
|
+
"name": "api_bytedance_image_to_video",
|
|
2035
|
+
"title": "ByteDance: Image vers Vidéo",
|
|
2036
|
+
"description": "Transformez des images statiques en vidéos dynamiques avec le modèle Seedance de ByteDance. Analyse la structure de l'image et génère un mouvement naturel avec un style visuel cohérent et des séquences vidéo cohérentes.",
|
|
2037
|
+
"mediaType": "image",
|
|
2038
|
+
"mediaSubtype": "webp",
|
|
2039
|
+
"tags": ["Vidéo", "API", "Image vers vidéo"],
|
|
2040
|
+
"models": ["ByteDance"],
|
|
2041
|
+
"date": "2025-10-6",
|
|
2042
|
+
"tutorialUrl": "",
|
|
2043
|
+
"OpenSource": false,
|
|
2044
|
+
"size": 0,
|
|
2045
|
+
"vram": 0
|
|
2046
|
+
},
|
|
2047
|
+
{
|
|
2048
|
+
"name": "api_bytedance_flf2v",
|
|
2049
|
+
"title": "ByteDance: Début-Fin vers Vidéo",
|
|
2050
|
+
"description": "Générez des transitions vidéo cinématographiques entre les images de début et de fin avec un mouvement fluide, une cohérence de scène et une finition professionnelle avec le modèle Seedance de ByteDance.",
|
|
2051
|
+
"mediaType": "image",
|
|
2052
|
+
"mediaSubtype": "webp",
|
|
2053
|
+
"tags": ["Vidéo", "API", "FLF2V"],
|
|
2054
|
+
"models": ["ByteDance"],
|
|
2055
|
+
"date": "2025-10-6",
|
|
2056
|
+
"tutorialUrl": "",
|
|
2057
|
+
"OpenSource": false,
|
|
2058
|
+
"size": 0,
|
|
2059
|
+
"vram": 0
|
|
2060
|
+
},
|
|
2061
|
+
{
|
|
2062
|
+
"name": "api_topaz_video_enhance",
|
|
2063
|
+
"title": "Topaz Amélioration vidéo",
|
|
2064
|
+
"description": "Améliorez les vidéos avec Topaz AI. Prend en charge l’upscaling de résolution avec le modèle Starlight (Astra) Fast et l’interpolation d’images avec le modèle apo-8.",
|
|
2065
|
+
"mediaType": "image",
|
|
2066
|
+
"mediaSubtype": "webp",
|
|
2067
|
+
"thumbnailVariant": "compareSlider",
|
|
2068
|
+
"tags": ["Vidéo", "API", "Amélioration"],
|
|
2069
|
+
"models": ["Topaz"],
|
|
2070
|
+
"date": "2025-11-25",
|
|
2071
|
+
"OpenSource": false,
|
|
2072
|
+
"size": 0,
|
|
2073
|
+
"vram": 0
|
|
2074
|
+
},
|
|
2075
|
+
{
|
|
2076
|
+
"name": "api_luma_i2v",
|
|
2077
|
+
"title": "Luma: Image vers Vidéo",
|
|
2078
|
+
"description": "Take static images and instantly create magical high quality animations.",
|
|
2079
|
+
"mediaType": "image",
|
|
2080
|
+
"mediaSubtype": "webp",
|
|
2081
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2082
|
+
"models": ["Luma"],
|
|
2083
|
+
"date": "2025-03-01",
|
|
2084
|
+
"tutorialUrl": "",
|
|
2085
|
+
"OpenSource": false,
|
|
2086
|
+
"size": 0,
|
|
2087
|
+
"vram": 0
|
|
2088
|
+
},
|
|
2089
|
+
{
|
|
2090
|
+
"name": "api_luma_t2v",
|
|
2091
|
+
"title": "Luma: Texte vers Vidéo",
|
|
2092
|
+
"description": "High-quality videos can be generated using simple prompts.",
|
|
2093
|
+
"mediaType": "image",
|
|
2094
|
+
"mediaSubtype": "webp",
|
|
2095
|
+
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
2096
|
+
"models": ["Luma"],
|
|
2097
|
+
"date": "2025-03-01",
|
|
2098
|
+
"tutorialUrl": "",
|
|
2099
|
+
"OpenSource": false,
|
|
2100
|
+
"size": 0,
|
|
2101
|
+
"vram": 0
|
|
2102
|
+
},
|
|
2103
|
+
{
|
|
2104
|
+
"name": "api_moonvalley_text_to_video",
|
|
2105
|
+
"title": "Moonvalley: Texte vers Vidéo",
|
|
2106
|
+
"description": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.",
|
|
2107
|
+
"mediaType": "image",
|
|
2108
|
+
"mediaSubtype": "webp",
|
|
2109
|
+
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
2110
|
+
"models": ["Moonvalley"],
|
|
2111
|
+
"date": "2025-03-01",
|
|
2112
|
+
"tutorialUrl": "",
|
|
2113
|
+
"OpenSource": false,
|
|
2114
|
+
"size": 0,
|
|
2115
|
+
"vram": 0
|
|
2116
|
+
},
|
|
2117
|
+
{
|
|
2118
|
+
"name": "api_moonvalley_image_to_video",
|
|
2119
|
+
"title": "Moonvalley: Image vers Vidéo",
|
|
2120
|
+
"description": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.",
|
|
2121
|
+
"mediaType": "image",
|
|
2122
|
+
"mediaSubtype": "webp",
|
|
2123
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2124
|
+
"models": ["Moonvalley"],
|
|
2125
|
+
"date": "2025-03-01",
|
|
2126
|
+
"tutorialUrl": "",
|
|
2127
|
+
"OpenSource": false,
|
|
2128
|
+
"size": 0,
|
|
2129
|
+
"vram": 0
|
|
2130
|
+
},
|
|
2131
|
+
{
|
|
2132
|
+
"name": "api_moonvalley_video_to_video_motion_transfer",
|
|
2133
|
+
"title": "Moonvalley: Transfert de Mouvement",
|
|
2134
|
+
"description": "Apply motion from one video to another.",
|
|
2135
|
+
"mediaType": "image",
|
|
2136
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2137
|
+
"mediaSubtype": "webp",
|
|
2138
|
+
"tags": ["Vidéo vers vidéo", "Vidéo", "API"],
|
|
2139
|
+
"models": ["Moonvalley"],
|
|
2140
|
+
"date": "2025-03-01",
|
|
2141
|
+
"tutorialUrl": "",
|
|
2142
|
+
"OpenSource": false,
|
|
2143
|
+
"size": 0,
|
|
2144
|
+
"vram": 0
|
|
2145
|
+
},
|
|
2146
|
+
{
|
|
2147
|
+
"name": "api_moonvalley_video_to_video_pose_control",
|
|
2148
|
+
"title": "Moonvalley: Contrôle de Pose",
|
|
2149
|
+
"description": "Apply human pose and movement from one video to another.",
|
|
2150
|
+
"mediaType": "image",
|
|
2151
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2152
|
+
"mediaSubtype": "webp",
|
|
2153
|
+
"tags": ["Vidéo vers vidéo", "Vidéo", "API"],
|
|
2154
|
+
"models": ["Moonvalley"],
|
|
2155
|
+
"date": "2025-03-01",
|
|
2156
|
+
"tutorialUrl": "",
|
|
2157
|
+
"OpenSource": false,
|
|
2158
|
+
"size": 0,
|
|
2159
|
+
"vram": 0
|
|
2160
|
+
},
|
|
2161
|
+
{
|
|
2162
|
+
"name": "api_hailuo_minimax_video",
|
|
2163
|
+
"title": "MiniMax: Vidéo",
|
|
2164
|
+
"description": "Générez des vidéos de haute qualité à partir de prompts textuels avec contrôle optionnel de la première image en utilisant le modèle MiniMax Hailuo-02. Prend en charge plusieurs résolutions (768P/1080P) et durées (6/10s) avec optimisation intelligente des prompts.",
|
|
2165
|
+
"mediaType": "image",
|
|
2166
|
+
"mediaSubtype": "webp",
|
|
2167
|
+
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
2168
|
+
"models": ["MiniMax"],
|
|
2169
|
+
"date": "2025-03-01",
|
|
2170
|
+
"tutorialUrl": "",
|
|
2171
|
+
"OpenSource": false,
|
|
2172
|
+
"size": 0,
|
|
2173
|
+
"vram": 0
|
|
2174
|
+
},
|
|
2175
|
+
{
|
|
2176
|
+
"name": "api_hailuo_minimax_t2v",
|
|
2177
|
+
"title": "MiniMax: Texte vers Vidéo",
|
|
2178
|
+
"description": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
|
|
2179
|
+
"mediaType": "image",
|
|
2180
|
+
"mediaSubtype": "webp",
|
|
2181
|
+
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
2182
|
+
"models": ["MiniMax"],
|
|
2183
|
+
"date": "2025-03-01",
|
|
2184
|
+
"tutorialUrl": "",
|
|
2185
|
+
"OpenSource": false,
|
|
2186
|
+
"size": 0,
|
|
2187
|
+
"vram": 0
|
|
2188
|
+
},
|
|
2189
|
+
{
|
|
2190
|
+
"name": "api_hailuo_minimax_i2v",
|
|
2191
|
+
"title": "MiniMax: Image vers Vidéo",
|
|
2192
|
+
"description": "Generate refined videos from images and text with CGI integration using MiniMax.",
|
|
2193
|
+
"mediaType": "image",
|
|
2194
|
+
"mediaSubtype": "webp",
|
|
2195
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2196
|
+
"models": ["MiniMax"],
|
|
2197
|
+
"date": "2025-03-01",
|
|
2198
|
+
"tutorialUrl": "",
|
|
2199
|
+
"OpenSource": false,
|
|
2200
|
+
"size": 0,
|
|
2201
|
+
"vram": 0
|
|
2202
|
+
},
|
|
2203
|
+
{
|
|
2204
|
+
"name": "api_pixverse_i2v",
|
|
2205
|
+
"title": "PixVerse: Image vers Vidéo",
|
|
2206
|
+
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
|
2207
|
+
"mediaType": "image",
|
|
2208
|
+
"mediaSubtype": "webp",
|
|
2209
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2210
|
+
"models": ["PixVerse"],
|
|
2211
|
+
"date": "2025-03-01",
|
|
2212
|
+
"tutorialUrl": "",
|
|
2213
|
+
"OpenSource": false,
|
|
2214
|
+
"size": 0,
|
|
2215
|
+
"vram": 0
|
|
2216
|
+
},
|
|
2217
|
+
{
|
|
2218
|
+
"name": "api_pixverse_template_i2v",
|
|
2219
|
+
"title": "PixVerse Templates: Image vers Vidéo",
|
|
2220
|
+
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
|
2221
|
+
"mediaType": "image",
|
|
2222
|
+
"mediaSubtype": "webp",
|
|
2223
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2224
|
+
"models": ["PixVerse"],
|
|
2225
|
+
"date": "2025-03-01",
|
|
2226
|
+
"tutorialUrl": "",
|
|
2227
|
+
"OpenSource": false,
|
|
2228
|
+
"size": 0,
|
|
2229
|
+
"vram": 0
|
|
2230
|
+
},
|
|
2231
|
+
{
|
|
2232
|
+
"name": "api_pixverse_t2v",
|
|
2233
|
+
"title": "PixVerse: Texte vers Vidéo",
|
|
2234
|
+
"description": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
|
|
2235
|
+
"mediaType": "image",
|
|
2236
|
+
"mediaSubtype": "webp",
|
|
2237
|
+
"tags": ["Texte vers vidéo", "Vidéo", "API"],
|
|
2238
|
+
"models": ["PixVerse"],
|
|
2239
|
+
"date": "2025-03-01",
|
|
2240
|
+
"tutorialUrl": "",
|
|
2241
|
+
"OpenSource": false,
|
|
2242
|
+
"size": 0,
|
|
2243
|
+
"vram": 0
|
|
2244
|
+
},
|
|
2245
|
+
{
|
|
2246
|
+
"name": "api_runway_gen3a_turbo_image_to_video",
|
|
2247
|
+
"title": "Runway: Gen3a Turbo Image vers Vidéo",
|
|
2248
|
+
"description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
|
|
2249
|
+
"mediaType": "image",
|
|
2250
|
+
"mediaSubtype": "webp",
|
|
2251
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2252
|
+
"models": ["Runway"],
|
|
2253
|
+
"date": "2025-03-01",
|
|
2254
|
+
"tutorialUrl": "",
|
|
2255
|
+
"OpenSource": false,
|
|
2256
|
+
"size": 0,
|
|
2257
|
+
"vram": 0
|
|
2258
|
+
},
|
|
2259
|
+
{
|
|
2260
|
+
"name": "api_runway_gen4_turo_image_to_video",
|
|
2261
|
+
"title": "Runway: Gen4 Turbo Image vers Vidéo",
|
|
2262
|
+
"description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
|
|
2263
|
+
"mediaType": "image",
|
|
2264
|
+
"mediaSubtype": "webp",
|
|
2265
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2266
|
+
"models": ["Runway"],
|
|
2267
|
+
"date": "2025-03-01",
|
|
2268
|
+
"tutorialUrl": "",
|
|
2269
|
+
"OpenSource": false,
|
|
2270
|
+
"size": 0,
|
|
2271
|
+
"vram": 0
|
|
2272
|
+
},
|
|
2273
|
+
{
|
|
2274
|
+
"name": "api_runway_first_last_frame",
|
|
2275
|
+
"title": "Runway: Première-Dernière Image vers Vidéo",
|
|
2276
|
+
"description": "Generate smooth video transitions between two keyframes with Runway's precision.",
|
|
2277
|
+
"mediaType": "image",
|
|
2278
|
+
"mediaSubtype": "webp",
|
|
2279
|
+
"tags": ["Vidéo", "API", "FLF2V"],
|
|
2280
|
+
"models": ["Runway"],
|
|
2281
|
+
"date": "2025-03-01",
|
|
2282
|
+
"tutorialUrl": "",
|
|
2283
|
+
"OpenSource": false,
|
|
2284
|
+
"size": 0,
|
|
2285
|
+
"vram": 0
|
|
2286
|
+
},
|
|
2287
|
+
{
|
|
2288
|
+
"name": "api_pika_i2v",
|
|
2289
|
+
"title": "Pika: Image vers Vidéo",
|
|
2290
|
+
"description": "Generate smooth animated videos from single static images using Pika AI.",
|
|
2291
|
+
"mediaType": "image",
|
|
2292
|
+
"mediaSubtype": "webp",
|
|
2293
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2294
|
+
"models": ["Pika"],
|
|
2295
|
+
"date": "2025-03-01",
|
|
2296
|
+
"tutorialUrl": "",
|
|
2297
|
+
"OpenSource": false,
|
|
2298
|
+
"size": 0,
|
|
2299
|
+
"vram": 0
|
|
2300
|
+
},
|
|
2301
|
+
{
|
|
2302
|
+
"name": "api_pika_scene",
|
|
2303
|
+
"title": "Pika Scenes: Images vers Vidéo",
|
|
2304
|
+
"description": "Generate videos that incorporate multiple input images using Pika Scenes.",
|
|
2305
|
+
"mediaType": "image",
|
|
2306
|
+
"mediaSubtype": "webp",
|
|
2307
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2308
|
+
"models": ["Pika"],
|
|
2309
|
+
"date": "2025-03-01",
|
|
2310
|
+
"tutorialUrl": "",
|
|
2311
|
+
"OpenSource": false,
|
|
2312
|
+
"size": 0,
|
|
2313
|
+
"vram": 0
|
|
2314
|
+
},
|
|
2315
|
+
{
|
|
2316
|
+
"name": "api_veo2_i2v",
|
|
2317
|
+
"title": "Veo2: Image vers Vidéo",
|
|
2318
|
+
"description": "Generate videos from images using Google Veo2 API.",
|
|
2319
|
+
"mediaType": "image",
|
|
2320
|
+
"mediaSubtype": "webp",
|
|
2321
|
+
"tags": ["Image vers vidéo", "Vidéo", "API"],
|
|
2322
|
+
"models": ["Veo", "Google"],
|
|
2323
|
+
"date": "2025-03-01",
|
|
2324
|
+
"tutorialUrl": "",
|
|
2325
|
+
"OpenSource": false,
|
|
2326
|
+
"size": 0,
|
|
2327
|
+
"vram": 0
|
|
2328
|
+
},
|
|
2329
|
+
{
|
|
2330
|
+
"name": "api_veo3",
|
|
2331
|
+
"title": "Veo3: Image vers Vidéo",
|
|
2332
|
+
"description": "Generate high-quality 8-second videos from text prompts or images using Google's advanced Veo 3 API. Features audio generation, prompt enhancement, and dual model options for speed or quality.",
|
|
2333
|
+
"mediaType": "image",
|
|
2334
|
+
"mediaSubtype": "webp",
|
|
2335
|
+
"tags": ["Image vers vidéo", "Texte vers vidéo", "API"],
|
|
2336
|
+
"models": ["Veo", "Google"],
|
|
2337
|
+
"date": "2025-03-01",
|
|
2338
|
+
"tutorialUrl": "",
|
|
2339
|
+
"OpenSource": false,
|
|
2340
|
+
"size": 0,
|
|
2341
|
+
"vram": 0
|
|
2342
|
+
}
|
|
2343
|
+
]
|
|
2344
|
+
},
|
|
2345
|
+
{
|
|
2346
|
+
"moduleName": "default",
|
|
2347
|
+
"type": "image",
|
|
2348
|
+
"category": "CLOSED SOURCE MODELS",
|
|
2349
|
+
"icon": "icon-[lucide--box]",
|
|
2350
|
+
"title": "3D API",
|
|
2351
|
+
"templates": [
|
|
2352
|
+
{
|
|
2353
|
+
"name": "api_rodin_gen2",
|
|
2354
|
+
"title": "Rodin: Gen-2 Image vers Modèle",
|
|
2355
|
+
"description": "Générez des modèles 3D détaillés avec une qualité de maillage 4X à partir de photos avec Rodin Gen2",
|
|
2356
|
+
"mediaType": "image",
|
|
2357
|
+
"mediaSubtype": "webp",
|
|
2358
|
+
"tags": ["Image vers 3D", "3D", "API"],
|
|
2359
|
+
"models": ["Rodin"],
|
|
2360
|
+
"date": "2025-09-27",
|
|
2361
|
+
"tutorialUrl": "",
|
|
2362
|
+
"OpenSource": false,
|
|
2363
|
+
"size": 0,
|
|
2364
|
+
"vram": 0
|
|
2365
|
+
},
|
|
2366
|
+
{
|
|
2367
|
+
"name": "api_rodin_image_to_model",
|
|
2368
|
+
"title": "Rodin: Image vers Modèle",
|
|
2369
|
+
"description": "Generate detailed 3D models from single photos using Rodin AI.",
|
|
2370
|
+
"mediaType": "image",
|
|
2371
|
+
"thumbnailVariant": "compareSlider",
|
|
2372
|
+
"mediaSubtype": "webp",
|
|
2373
|
+
"tags": ["Image vers 3D", "3D", "API"],
|
|
2374
|
+
"models": ["Rodin"],
|
|
2375
|
+
"date": "2025-03-01",
|
|
2376
|
+
"tutorialUrl": "",
|
|
2377
|
+
"OpenSource": false,
|
|
2378
|
+
"size": 0,
|
|
2379
|
+
"vram": 0
|
|
2380
|
+
},
|
|
2381
|
+
{
|
|
2382
|
+
"name": "api_rodin_multiview_to_model",
|
|
2383
|
+
"title": "Rodin: Multivue vers Modèle",
|
|
2384
|
+
"description": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.",
|
|
2385
|
+
"mediaType": "image",
|
|
2386
|
+
"thumbnailVariant": "compareSlider",
|
|
2387
|
+
"mediaSubtype": "webp",
|
|
2388
|
+
"tags": ["Image vers 3D", "3D", "API"],
|
|
2389
|
+
"models": ["Rodin"],
|
|
2390
|
+
"date": "2025-03-01",
|
|
2391
|
+
"tutorialUrl": "",
|
|
2392
|
+
"OpenSource": false,
|
|
2393
|
+
"size": 0,
|
|
2394
|
+
"vram": 0
|
|
2395
|
+
},
|
|
2396
|
+
{
|
|
2397
|
+
"name": "api_tripo_text_to_model",
|
|
2398
|
+
"title": "Tripo: Texte vers Modèle",
|
|
2399
|
+
"description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
|
|
2400
|
+
"mediaType": "image",
|
|
2401
|
+
"mediaSubtype": "webp",
|
|
2402
|
+
"tags": ["Texte vers modèle", "3D", "API"],
|
|
2403
|
+
"models": ["Tripo"],
|
|
2404
|
+
"date": "2025-03-01",
|
|
2405
|
+
"tutorialUrl": "",
|
|
2406
|
+
"OpenSource": false,
|
|
2407
|
+
"size": 0,
|
|
2408
|
+
"vram": 0
|
|
2409
|
+
},
|
|
2410
|
+
{
|
|
2411
|
+
"name": "api_tripo_image_to_model",
|
|
2412
|
+
"title": "Tripo: Image vers Modèle",
|
|
2413
|
+
"description": "Generate professional 3D assets from 2D images using Tripo engine.",
|
|
2414
|
+
"mediaType": "image",
|
|
2415
|
+
"thumbnailVariant": "compareSlider",
|
|
2416
|
+
"mediaSubtype": "webp",
|
|
2417
|
+
"tags": ["Image vers 3D", "3D", "API"],
|
|
2418
|
+
"models": ["Tripo"],
|
|
2419
|
+
"date": "2025-03-01",
|
|
2420
|
+
"tutorialUrl": "",
|
|
2421
|
+
"OpenSource": false,
|
|
2422
|
+
"size": 0,
|
|
2423
|
+
"vram": 0
|
|
2424
|
+
},
|
|
2425
|
+
{
|
|
2426
|
+
"name": "api_tripo_multiview_to_model",
|
|
2427
|
+
"title": "Tripo: Multivue vers Modèle",
|
|
2428
|
+
"description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
|
|
2429
|
+
"mediaType": "image",
|
|
2430
|
+
"thumbnailVariant": "compareSlider",
|
|
2431
|
+
"mediaSubtype": "webp",
|
|
2432
|
+
"tags": ["Image vers 3D", "3D", "API"],
|
|
2433
|
+
"models": ["Tripo"],
|
|
2434
|
+
"date": "2025-03-01",
|
|
2435
|
+
"tutorialUrl": "",
|
|
2436
|
+
"OpenSource": false,
|
|
2437
|
+
"size": 0,
|
|
2438
|
+
"vram": 0
|
|
2439
|
+
}
|
|
2440
|
+
]
|
|
2441
|
+
},
|
|
2442
|
+
{
|
|
2443
|
+
"moduleName": "default",
|
|
2444
|
+
"type": "audio",
|
|
2445
|
+
"category": "CLOSED SOURCE MODELS",
|
|
2446
|
+
"icon": "icon-[lucide--volume-2]",
|
|
2447
|
+
"title": "Audio API",
|
|
2448
|
+
"templates": [
|
|
2449
|
+
{
|
|
2450
|
+
"name": "api_stability_ai_text_to_audio",
|
|
2451
|
+
"title": "Stability AI : Texte vers Audio",
|
|
2452
|
+
"description": "Générez de la musique à partir de texte avec Stable Audio 2.5. Créez des pistes de plusieurs minutes en quelques secondes.",
|
|
2453
|
+
"mediaType": "audio",
|
|
2454
|
+
"mediaSubtype": "mp3",
|
|
2455
|
+
"tags": ["Texte vers audio", "Audio", "API"],
|
|
2456
|
+
"date": "2025-09-09",
|
|
2457
|
+
"models": ["Stability", "Stable Audio"],
|
|
2458
|
+
"OpenSource": false,
|
|
2459
|
+
"size": 0,
|
|
2460
|
+
"vram": 0
|
|
2461
|
+
},
|
|
2462
|
+
{
|
|
2463
|
+
"name": "api_stability_ai_audio_to_audio",
|
|
2464
|
+
"title": "Stability AI : Audio vers Audio",
|
|
2465
|
+
"description": "Transformez de l'audio en de nouvelles compositions avec Stable Audio 2.5. Téléversez un audio et l'IA crée des pistes complètes.",
|
|
2466
|
+
"mediaType": "audio",
|
|
2467
|
+
"mediaSubtype": "mp3",
|
|
2468
|
+
"tags": ["Audio vers audio", "Audio", "API"],
|
|
2469
|
+
"date": "2025-09-09",
|
|
2470
|
+
"models": ["Stability", "Stable Audio"],
|
|
2471
|
+
"OpenSource": false,
|
|
2472
|
+
"size": 0,
|
|
2473
|
+
"vram": 0
|
|
2474
|
+
},
|
|
2475
|
+
{
|
|
2476
|
+
"name": "api_stability_ai_audio_inpaint",
|
|
2477
|
+
"title": "Stability AI : Inpainting Audio",
|
|
2478
|
+
"description": "Complétez ou prolongez des pistes audio avec Stable Audio 2.5. Téléversez un audio et l'IA génère le reste.",
|
|
2479
|
+
"mediaType": "audio",
|
|
2480
|
+
"mediaSubtype": "mp3",
|
|
2481
|
+
"tags": ["Audio vers audio", "Audio", "API"],
|
|
2482
|
+
"date": "2025-09-09",
|
|
2483
|
+
"models": ["Stability", "Stable Audio"],
|
|
2484
|
+
"OpenSource": false,
|
|
2485
|
+
"size": 0,
|
|
2486
|
+
"vram": 0
|
|
2487
|
+
}
|
|
2488
|
+
]
|
|
2489
|
+
},
|
|
2490
|
+
{
|
|
2491
|
+
"moduleName": "default",
|
|
2492
|
+
"type": "image",
|
|
2493
|
+
"category": "CLOSED SOURCE MODELS",
|
|
2494
|
+
"icon": "icon-[lucide--message-square-text]",
|
|
2495
|
+
"title": "LLM API",
|
|
2496
|
+
"templates": [
|
|
2497
|
+
{
|
|
2498
|
+
"name": "api_openai_chat",
|
|
2499
|
+
"title": "OpenAI: Chat",
|
|
2500
|
+
"description": "Interagissez avec les modèles de langage avancés d'OpenAI pour des conversations intelligentes.",
|
|
2501
|
+
"mediaType": "image",
|
|
2502
|
+
"mediaSubtype": "webp",
|
|
2503
|
+
"tags": ["LLM", "API"],
|
|
2504
|
+
"models": ["OpenAI"],
|
|
2505
|
+
"date": "2025-03-01",
|
|
2506
|
+
"tutorialUrl": "",
|
|
2507
|
+
"OpenSource": false,
|
|
2508
|
+
"size": 0,
|
|
2509
|
+
"vram": 0
|
|
2510
|
+
},
|
|
2511
|
+
{
|
|
2512
|
+
"name": "api_google_gemini",
|
|
2513
|
+
"title": "Google Gemini: Chat",
|
|
2514
|
+
"description": "Découvrez l'IA multimodale de Google avec les capacités de raisonnement de Gemini.",
|
|
2515
|
+
"mediaType": "image",
|
|
2516
|
+
"mediaSubtype": "webp",
|
|
2517
|
+
"tags": ["LLM", "API"],
|
|
2518
|
+
"models": ["Google Gemini", "Google"],
|
|
2519
|
+
"date": "2025-03-01",
|
|
2520
|
+
"tutorialUrl": "",
|
|
2521
|
+
"OpenSource": false,
|
|
2522
|
+
"size": 0,
|
|
2523
|
+
"vram": 0
|
|
2524
|
+
}
|
|
2525
|
+
]
|
|
2526
|
+
}
|
|
2527
|
+
]
|