comfyui-workflow-templates 0.1.64__py3-none-any.whl → 0.1.66__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of comfyui-workflow-templates might be problematic. Click here for more details.
- comfyui_workflow_templates/templates/image_chroma_text_to_image.json +229 -277
- comfyui_workflow_templates/templates/image_qwen_image.json +16 -16
- comfyui_workflow_templates/templates/image_qwen_image_controlnet_patch-1.webp +0 -0
- comfyui_workflow_templates/templates/image_qwen_image_controlnet_patch-2.webp +0 -0
- comfyui_workflow_templates/templates/image_qwen_image_controlnet_patch.json +1053 -0
- comfyui_workflow_templates/templates/image_qwen_image_instantx_controlnet-1.webp +0 -0
- comfyui_workflow_templates/templates/image_qwen_image_instantx_controlnet-2.webp +0 -0
- comfyui_workflow_templates/templates/image_qwen_image_instantx_controlnet.json +2048 -0
- comfyui_workflow_templates/templates/index.es.json +696 -1381
- comfyui_workflow_templates/templates/index.fr.json +676 -1319
- comfyui_workflow_templates/templates/index.ja.json +684 -1337
- comfyui_workflow_templates/templates/index.json +25 -35
- comfyui_workflow_templates/templates/index.ko.json +969 -1564
- comfyui_workflow_templates/templates/index.ru.json +623 -1265
- comfyui_workflow_templates/templates/index.zh-TW.json +694 -1347
- comfyui_workflow_templates/templates/index.zh.json +700 -1353
- {comfyui_workflow_templates-0.1.64.dist-info → comfyui_workflow_templates-0.1.66.dist-info}/METADATA +1 -1
- {comfyui_workflow_templates-0.1.64.dist-info → comfyui_workflow_templates-0.1.66.dist-info}/RECORD +24 -18
- /comfyui_workflow_templates/templates/{image_qwen_image_union_control-1.webp → image_qwen_image_union_control_lora-1.webp} +0 -0
- /comfyui_workflow_templates/templates/{image_qwen_image_union_control-3.webp → image_qwen_image_union_control_lora-3.webp} +0 -0
- /comfyui_workflow_templates/templates/{image_qwen_image_union_control.json → image_qwen_image_union_control_lora.json} +0 -0
- {comfyui_workflow_templates-0.1.64.dist-info → comfyui_workflow_templates-0.1.66.dist-info}/WHEEL +0 -0
- {comfyui_workflow_templates-0.1.64.dist-info → comfyui_workflow_templates-0.1.66.dist-info}/licenses/LICENSE +0 -0
- {comfyui_workflow_templates-0.1.64.dist-info → comfyui_workflow_templates-0.1.66.dist-info}/top_level.txt +0 -0
|
@@ -2,39 +2,29 @@
|
|
|
2
2
|
{
|
|
3
3
|
"moduleName": "default",
|
|
4
4
|
"category": "USE CASES",
|
|
5
|
-
"title": "
|
|
5
|
+
"title": "Basics",
|
|
6
6
|
"type": "image",
|
|
7
7
|
"templates": [
|
|
8
8
|
{
|
|
9
9
|
"name": "default",
|
|
10
|
-
"title": "Génération d'
|
|
10
|
+
"title": "Génération d'images",
|
|
11
11
|
"mediaType": "image",
|
|
12
12
|
"mediaSubtype": "webp",
|
|
13
|
-
"description": "
|
|
13
|
+
"description": "Générer des images à partir de prompts textuels.",
|
|
14
14
|
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
|
|
15
|
-
"tags": [
|
|
16
|
-
|
|
17
|
-
"Image"
|
|
18
|
-
],
|
|
19
|
-
"models": [
|
|
20
|
-
"SD1.5"
|
|
21
|
-
],
|
|
15
|
+
"tags": ["Texte vers Image", "Image"],
|
|
16
|
+
"models": ["SD1.5"],
|
|
22
17
|
"date": "2025-03-01"
|
|
23
18
|
},
|
|
24
19
|
{
|
|
25
20
|
"name": "image2image",
|
|
26
|
-
"title": "Image
|
|
21
|
+
"title": "Image vers Image",
|
|
27
22
|
"mediaType": "image",
|
|
28
23
|
"mediaSubtype": "webp",
|
|
29
|
-
"description": "
|
|
24
|
+
"description": "Transformer des images existantes en utilisant des prompts textuels.",
|
|
30
25
|
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/image-to-image",
|
|
31
|
-
"tags": [
|
|
32
|
-
|
|
33
|
-
"Image"
|
|
34
|
-
],
|
|
35
|
-
"models": [
|
|
36
|
-
"SD1.5"
|
|
37
|
-
],
|
|
26
|
+
"tags": ["Image vers Image", "Image"],
|
|
27
|
+
"models": ["SD1.5"],
|
|
38
28
|
"date": "2025-03-01"
|
|
39
29
|
},
|
|
40
30
|
{
|
|
@@ -42,15 +32,10 @@
|
|
|
42
32
|
"title": "LoRA",
|
|
43
33
|
"mediaType": "image",
|
|
44
34
|
"mediaSubtype": "webp",
|
|
45
|
-
"description": "
|
|
35
|
+
"description": "Générer des images avec des modèles LoRA pour des styles ou sujets spécialisés.",
|
|
46
36
|
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
|
|
47
|
-
"tags": [
|
|
48
|
-
|
|
49
|
-
"Image"
|
|
50
|
-
],
|
|
51
|
-
"models": [
|
|
52
|
-
"SD1.5"
|
|
53
|
-
],
|
|
37
|
+
"tags": ["Texte vers Image", "Image"],
|
|
38
|
+
"models": ["SD1.5"],
|
|
54
39
|
"date": "2025-03-01"
|
|
55
40
|
},
|
|
56
41
|
{
|
|
@@ -58,33 +43,22 @@
|
|
|
58
43
|
"title": "LoRA Multiple",
|
|
59
44
|
"mediaType": "image",
|
|
60
45
|
"mediaSubtype": "webp",
|
|
61
|
-
"description": "
|
|
46
|
+
"description": "Générer des images en combinant plusieurs modèles LoRA.",
|
|
62
47
|
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
|
|
63
|
-
"tags": [
|
|
64
|
-
|
|
65
|
-
"Image",
|
|
66
|
-
"LoRA"
|
|
67
|
-
],
|
|
68
|
-
"models": [
|
|
69
|
-
"SD1.5"
|
|
70
|
-
],
|
|
48
|
+
"tags": ["Texte vers Image", "Image", "LoRA"],
|
|
49
|
+
"models": ["SD1.5"],
|
|
71
50
|
"date": "2025-03-01"
|
|
72
51
|
},
|
|
73
52
|
{
|
|
74
53
|
"name": "inpaint_example",
|
|
75
|
-
"title": "
|
|
54
|
+
"title": "Inpainting",
|
|
76
55
|
"mediaType": "image",
|
|
77
56
|
"mediaSubtype": "webp",
|
|
78
|
-
"description": "
|
|
57
|
+
"description": "Éditer des parties spécifiques d'images de manière transparente.",
|
|
79
58
|
"thumbnailVariant": "compareSlider",
|
|
80
59
|
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
|
|
81
|
-
"tags": [
|
|
82
|
-
|
|
83
|
-
"Image"
|
|
84
|
-
],
|
|
85
|
-
"models": [
|
|
86
|
-
"SD1.5"
|
|
87
|
-
],
|
|
60
|
+
"tags": ["Inpainting", "Image"],
|
|
61
|
+
"models": ["SD1.5"],
|
|
88
62
|
"date": "2025-03-01"
|
|
89
63
|
},
|
|
90
64
|
{
|
|
@@ -92,48 +66,33 @@
|
|
|
92
66
|
"title": "Outpainting",
|
|
93
67
|
"mediaType": "image",
|
|
94
68
|
"mediaSubtype": "webp",
|
|
95
|
-
"description": "
|
|
69
|
+
"description": "Étendre les images au-delà de leurs limites d'origine.",
|
|
96
70
|
"thumbnailVariant": "compareSlider",
|
|
97
71
|
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
|
|
98
|
-
"tags": [
|
|
99
|
-
|
|
100
|
-
"Image"
|
|
101
|
-
],
|
|
102
|
-
"models": [
|
|
103
|
-
"SD1.5"
|
|
104
|
-
],
|
|
72
|
+
"tags": ["Outpainting", "Image"],
|
|
73
|
+
"models": ["SD1.5"],
|
|
105
74
|
"date": "2025-03-01"
|
|
106
75
|
},
|
|
107
76
|
{
|
|
108
77
|
"name": "embedding_example",
|
|
109
|
-
"title": "
|
|
78
|
+
"title": "Embedding",
|
|
110
79
|
"mediaType": "image",
|
|
111
80
|
"mediaSubtype": "webp",
|
|
112
|
-
"description": "
|
|
81
|
+
"description": "Générer des images en utilisant l'inversion textuelle pour des styles cohérents.",
|
|
113
82
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/",
|
|
114
|
-
"tags": [
|
|
115
|
-
|
|
116
|
-
"Image"
|
|
117
|
-
],
|
|
118
|
-
"models": [
|
|
119
|
-
"SD1.5"
|
|
120
|
-
],
|
|
83
|
+
"tags": ["Embedding", "Image"],
|
|
84
|
+
"models": ["SD1.5"],
|
|
121
85
|
"date": "2025-03-01"
|
|
122
86
|
},
|
|
123
87
|
{
|
|
124
88
|
"name": "gligen_textbox_example",
|
|
125
|
-
"title": "
|
|
89
|
+
"title": "Gligen Textbox",
|
|
126
90
|
"mediaType": "image",
|
|
127
91
|
"mediaSubtype": "webp",
|
|
128
|
-
"description": "
|
|
92
|
+
"description": "Générer des images avec un placement précis d'objets en utilisant des boîtes de texte.",
|
|
129
93
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/",
|
|
130
|
-
"tags": [
|
|
131
|
-
|
|
132
|
-
"Image"
|
|
133
|
-
],
|
|
134
|
-
"models": [
|
|
135
|
-
"SD1.5"
|
|
136
|
-
],
|
|
94
|
+
"tags": ["Gligen", "Image"],
|
|
95
|
+
"models": ["SD1.5"],
|
|
137
96
|
"date": "2025-03-01"
|
|
138
97
|
}
|
|
139
98
|
]
|
|
@@ -145,21 +104,14 @@
|
|
|
145
104
|
"type": "image",
|
|
146
105
|
"templates": [
|
|
147
106
|
{
|
|
148
|
-
"name": "
|
|
149
|
-
"title": "
|
|
107
|
+
"name": "image_chroma_text_to_image",
|
|
108
|
+
"title": "Chroma texte vers image",
|
|
150
109
|
"mediaType": "image",
|
|
151
110
|
"mediaSubtype": "webp",
|
|
152
|
-
"description": "
|
|
153
|
-
"
|
|
154
|
-
"
|
|
155
|
-
|
|
156
|
-
"Image",
|
|
157
|
-
"Photorealism"
|
|
158
|
-
],
|
|
159
|
-
"models": [
|
|
160
|
-
"Flux.1 Krea Dev"
|
|
161
|
-
],
|
|
162
|
-
"date": "2025-07-31"
|
|
111
|
+
"description": "Chroma est modifié à partir de Flux et présente quelques changements dans l'architecture.",
|
|
112
|
+
"tags": ["Texte vers Image", "Image"],
|
|
113
|
+
"models": ["Chroma", "Flux"],
|
|
114
|
+
"date": "2025-06-04"
|
|
163
115
|
},
|
|
164
116
|
{
|
|
165
117
|
"name": "flux_kontext_dev_basic",
|
|
@@ -167,32 +119,10 @@
|
|
|
167
119
|
"mediaType": "image",
|
|
168
120
|
"mediaSubtype": "webp",
|
|
169
121
|
"thumbnailVariant": "hoverDissolve",
|
|
170
|
-
"description": "
|
|
171
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
|
|
172
|
-
"tags": [
|
|
173
|
-
"Image Edit",
|
|
174
|
-
"Image to Image"
|
|
175
|
-
],
|
|
176
|
-
"models": [
|
|
177
|
-
"Flux"
|
|
178
|
-
],
|
|
179
|
-
"date": "2025-06-26"
|
|
180
|
-
},
|
|
181
|
-
{
|
|
182
|
-
"name": "flux_kontext_dev_grouped",
|
|
183
|
-
"title": "Flux Kontext Dev (Groupé)",
|
|
184
|
-
"mediaType": "image",
|
|
185
|
-
"mediaSubtype": "webp",
|
|
186
|
-
"thumbnailVariant": "hoverDissolve",
|
|
187
|
-
"description": "Version simplifiée de Flux Kontext avec des nœuds groupés pour un espace de travail plus propre.",
|
|
122
|
+
"description": "Éditer une image en utilisant Flux Kontext avec une visibilité complète des nœuds, parfait pour apprendre le flux de travail.",
|
|
188
123
|
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
|
|
189
|
-
"tags": [
|
|
190
|
-
|
|
191
|
-
"Image to Image"
|
|
192
|
-
],
|
|
193
|
-
"models": [
|
|
194
|
-
"Flux"
|
|
195
|
-
],
|
|
124
|
+
"tags": ["Édition d'Image", "Image vers Image"],
|
|
125
|
+
"models": ["Flux"],
|
|
196
126
|
"date": "2025-06-26"
|
|
197
127
|
},
|
|
198
128
|
{
|
|
@@ -200,15 +130,10 @@
|
|
|
200
130
|
"title": "Flux Dev fp8",
|
|
201
131
|
"mediaType": "image",
|
|
202
132
|
"mediaSubtype": "webp",
|
|
203
|
-
"description": "
|
|
133
|
+
"description": "Générer des images en utilisant la version quantifiée Flux Dev fp8. Convient aux appareils avec une VRAM limitée, ne nécessite qu'un seul fichier de modèle, mais la qualité de l'image est légèrement inférieure à la version complète.",
|
|
204
134
|
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
205
|
-
"tags": [
|
|
206
|
-
|
|
207
|
-
"Image"
|
|
208
|
-
],
|
|
209
|
-
"models": [
|
|
210
|
-
"Flux"
|
|
211
|
-
],
|
|
135
|
+
"tags": ["Texte vers Image", "Image"],
|
|
136
|
+
"models": ["Flux"],
|
|
212
137
|
"date": "2025-03-01"
|
|
213
138
|
},
|
|
214
139
|
{
|
|
@@ -216,31 +141,32 @@
|
|
|
216
141
|
"title": "Flux Schnell fp8",
|
|
217
142
|
"mediaType": "image",
|
|
218
143
|
"mediaSubtype": "webp",
|
|
219
|
-
"description": "
|
|
144
|
+
"description": "Générer rapidement des images avec la version quantifiée Flux Schnell fp8. Idéal pour le matériel d'entrée de gamme, ne nécessite que 4 étapes pour générer des images.",
|
|
220
145
|
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
221
|
-
"tags": [
|
|
222
|
-
|
|
223
|
-
"Image"
|
|
224
|
-
],
|
|
225
|
-
"models": [
|
|
226
|
-
"Flux"
|
|
227
|
-
],
|
|
146
|
+
"tags": ["Texte vers Image", "Image"],
|
|
147
|
+
"models": ["Flux"],
|
|
228
148
|
"date": "2025-03-01"
|
|
229
149
|
},
|
|
150
|
+
{
|
|
151
|
+
"name": "flux1_krea_dev",
|
|
152
|
+
"title": "Flux.1 Krea Dev",
|
|
153
|
+
"mediaType": "image",
|
|
154
|
+
"mediaSubtype": "webp",
|
|
155
|
+
"description": "Un modèle FLUX affiné poussant le photoréalisme à son maximum",
|
|
156
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
|
|
157
|
+
"tags": ["Texte vers Image", "Image", "Photoréalisme"],
|
|
158
|
+
"models": ["Flux.1 Krea Dev"],
|
|
159
|
+
"date": "2025-07-31"
|
|
160
|
+
},
|
|
230
161
|
{
|
|
231
162
|
"name": "flux_dev_full_text_to_image",
|
|
232
163
|
"title": "Flux Dev texte vers image complet",
|
|
233
164
|
"mediaType": "image",
|
|
234
165
|
"mediaSubtype": "webp",
|
|
235
|
-
"description": "
|
|
166
|
+
"description": "Générer des images de haute qualité avec la version complète de Flux Dev. Nécessite plus de VRAM et plusieurs fichiers de modèles, mais offre la meilleure capacité de suivi des prompts et la qualité d'image.",
|
|
236
167
|
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
237
|
-
"tags": [
|
|
238
|
-
|
|
239
|
-
"Image"
|
|
240
|
-
],
|
|
241
|
-
"models": [
|
|
242
|
-
"Flux"
|
|
243
|
-
],
|
|
168
|
+
"tags": ["Texte vers Image", "Image"],
|
|
169
|
+
"models": ["Flux"],
|
|
244
170
|
"date": "2025-03-01"
|
|
245
171
|
},
|
|
246
172
|
{
|
|
@@ -248,69 +174,46 @@
|
|
|
248
174
|
"title": "Flux Schnell texte vers image complet",
|
|
249
175
|
"mediaType": "image",
|
|
250
176
|
"mediaSubtype": "webp",
|
|
251
|
-
"description": "
|
|
177
|
+
"description": "Générer des images rapidement avec la version complète de Flux Schnell. Utilise la licence Apache2.0, ne nécessite que 4 étapes pour générer des images tout en maintenant une bonne qualité d'image.",
|
|
252
178
|
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
253
|
-
"tags": [
|
|
254
|
-
|
|
255
|
-
"Image"
|
|
256
|
-
],
|
|
257
|
-
"models": [
|
|
258
|
-
"Flux"
|
|
259
|
-
],
|
|
179
|
+
"tags": ["Texte vers Image", "Image"],
|
|
180
|
+
"models": ["Flux"],
|
|
260
181
|
"date": "2025-03-01"
|
|
261
182
|
},
|
|
262
183
|
{
|
|
263
184
|
"name": "flux_fill_inpaint_example",
|
|
264
|
-
"title": "Flux
|
|
185
|
+
"title": "Flux Inpainting",
|
|
265
186
|
"mediaType": "image",
|
|
266
187
|
"mediaSubtype": "webp",
|
|
267
|
-
"description": "
|
|
188
|
+
"description": "Combler les parties manquantes des images en utilisant l'inpainting Flux.",
|
|
268
189
|
"thumbnailVariant": "compareSlider",
|
|
269
190
|
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
270
|
-
"tags": [
|
|
271
|
-
|
|
272
|
-
"Inpaint",
|
|
273
|
-
"Image"
|
|
274
|
-
],
|
|
275
|
-
"models": [
|
|
276
|
-
"Flux"
|
|
277
|
-
],
|
|
191
|
+
"tags": ["Image vers Image", "Inpainting", "Image"],
|
|
192
|
+
"models": ["Flux"],
|
|
278
193
|
"date": "2025-03-01"
|
|
279
194
|
},
|
|
280
195
|
{
|
|
281
196
|
"name": "flux_fill_outpaint_example",
|
|
282
|
-
"title": "Flux
|
|
197
|
+
"title": "Flux Outpainting",
|
|
283
198
|
"mediaType": "image",
|
|
284
199
|
"mediaSubtype": "webp",
|
|
285
|
-
"description": "
|
|
200
|
+
"description": "Étendre les images au-delà des limites en utilisant l'outpainting Flux.",
|
|
286
201
|
"thumbnailVariant": "compareSlider",
|
|
287
202
|
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
288
|
-
"tags": [
|
|
289
|
-
|
|
290
|
-
"Image",
|
|
291
|
-
"Image to Image"
|
|
292
|
-
],
|
|
293
|
-
"models": [
|
|
294
|
-
"Flux"
|
|
295
|
-
],
|
|
203
|
+
"tags": ["Outpainting", "Image", "Image vers Image"],
|
|
204
|
+
"models": ["Flux"],
|
|
296
205
|
"date": "2025-03-01"
|
|
297
206
|
},
|
|
298
207
|
{
|
|
299
208
|
"name": "flux_canny_model_example",
|
|
300
|
-
"title": "Flux Canny
|
|
209
|
+
"title": "Modèle Flux Canny",
|
|
301
210
|
"mediaType": "image",
|
|
302
211
|
"mediaSubtype": "webp",
|
|
303
|
-
"description": "
|
|
212
|
+
"description": "Générer des images guidées par la détection de contours en utilisant Flux Canny.",
|
|
304
213
|
"thumbnailVariant": "hoverDissolve",
|
|
305
214
|
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
306
|
-
"tags": [
|
|
307
|
-
|
|
308
|
-
"ControlNet",
|
|
309
|
-
"Image"
|
|
310
|
-
],
|
|
311
|
-
"models": [
|
|
312
|
-
"Flux"
|
|
313
|
-
],
|
|
215
|
+
"tags": ["Image vers Image", "ControlNet", "Image"],
|
|
216
|
+
"models": ["Flux"],
|
|
314
217
|
"date": "2025-03-01"
|
|
315
218
|
},
|
|
316
219
|
{
|
|
@@ -318,36 +221,22 @@
|
|
|
318
221
|
"title": "Flux Depth LoRA",
|
|
319
222
|
"mediaType": "image",
|
|
320
223
|
"mediaSubtype": "webp",
|
|
321
|
-
"description": "
|
|
224
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant Flux LoRA.",
|
|
322
225
|
"thumbnailVariant": "hoverDissolve",
|
|
323
226
|
"tutorialUrl": "ttps://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
324
|
-
"tags": [
|
|
325
|
-
|
|
326
|
-
"ControlNet",
|
|
327
|
-
"Image",
|
|
328
|
-
"LoRA"
|
|
329
|
-
],
|
|
330
|
-
"models": [
|
|
331
|
-
"Flux"
|
|
332
|
-
],
|
|
227
|
+
"tags": ["Image vers Image", "ControlNet", "Image", "LoRA"],
|
|
228
|
+
"models": ["Flux"],
|
|
333
229
|
"date": "2025-03-01"
|
|
334
230
|
},
|
|
335
231
|
{
|
|
336
232
|
"name": "flux_redux_model_example",
|
|
337
|
-
"title": "Flux Redux
|
|
233
|
+
"title": "Modèle Flux Redux",
|
|
338
234
|
"mediaType": "image",
|
|
339
235
|
"mediaSubtype": "webp",
|
|
340
|
-
"description": "
|
|
236
|
+
"description": "Générer des images en transférant le style à partir d'images de référence en utilisant Flux Redux.",
|
|
341
237
|
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
342
|
-
"tags": [
|
|
343
|
-
|
|
344
|
-
"ControlNet",
|
|
345
|
-
"Image",
|
|
346
|
-
"LoRA"
|
|
347
|
-
],
|
|
348
|
-
"models": [
|
|
349
|
-
"Flux"
|
|
350
|
-
],
|
|
238
|
+
"tags": ["Image vers Image", "ControlNet", "Image", "LoRA"],
|
|
239
|
+
"models": ["Flux"],
|
|
351
240
|
"date": "2025-03-01"
|
|
352
241
|
}
|
|
353
242
|
]
|
|
@@ -360,165 +249,125 @@
|
|
|
360
249
|
"templates": [
|
|
361
250
|
{
|
|
362
251
|
"name": "image_qwen_image",
|
|
363
|
-
"title": "Qwen-Image
|
|
252
|
+
"title": "Qwen-Image Texte vers Image",
|
|
364
253
|
"mediaType": "image",
|
|
365
254
|
"mediaSubtype": "webp",
|
|
366
|
-
"description": "
|
|
255
|
+
"description": "Générer des images avec des capacités exceptionnelles de rendu et d'édition de texte multilingue en utilisant le modèle MMDiT 20B de Qwen-Image.",
|
|
367
256
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
368
|
-
"tags": [
|
|
369
|
-
|
|
370
|
-
"Image"
|
|
371
|
-
],
|
|
372
|
-
"models": [
|
|
373
|
-
"Qwen-Image"
|
|
374
|
-
],
|
|
257
|
+
"tags": ["Texte vers Image", "Image"],
|
|
258
|
+
"models": ["Qwen-Image"],
|
|
375
259
|
"date": "2025-08-05"
|
|
376
260
|
},
|
|
377
261
|
{
|
|
378
|
-
"name": "
|
|
379
|
-
"title": "
|
|
262
|
+
"name": "image_qwen_image_union_control_lora",
|
|
263
|
+
"title": "Qwen-Image Union Control",
|
|
380
264
|
"mediaType": "image",
|
|
381
265
|
"mediaSubtype": "webp",
|
|
382
|
-
"description": "
|
|
383
|
-
"
|
|
384
|
-
"
|
|
385
|
-
|
|
386
|
-
"Image"
|
|
387
|
-
],
|
|
388
|
-
"models": [
|
|
389
|
-
"OmniGen"
|
|
390
|
-
],
|
|
391
|
-
"date": "2025-06-30"
|
|
266
|
+
"description": "Générer des images avec un contrôle structurel précis en utilisant le LoRA ControlNet unifié de Qwen-Image. Prend en charge plusieurs types de contrôle incluant canny, depth, lineart, softedge, normal et openpose pour diverses applications créatives.",
|
|
267
|
+
"tags": ["Texte vers Image", "Image", "ControlNet"],
|
|
268
|
+
"models": ["Qwen-Image"],
|
|
269
|
+
"date": "2025-08-23"
|
|
392
270
|
},
|
|
393
271
|
{
|
|
394
|
-
"name": "
|
|
395
|
-
"title": "
|
|
272
|
+
"name": "image_qwen_image_controlnet_patch",
|
|
273
|
+
"title": "Qwen-Image ControlNet Basique",
|
|
396
274
|
"mediaType": "image",
|
|
397
275
|
"mediaSubtype": "webp",
|
|
398
|
-
"
|
|
399
|
-
"
|
|
400
|
-
"
|
|
401
|
-
"
|
|
402
|
-
"Image Edit",
|
|
403
|
-
"Image"
|
|
404
|
-
],
|
|
405
|
-
"models": [
|
|
406
|
-
"OmniGen"
|
|
407
|
-
],
|
|
408
|
-
"date": "2025-06-30"
|
|
276
|
+
"description": "Contrôler la génération d'images en utilisant les modèles ControlNet de Qwen-Image. Prend en charge les contrôles canny, depth et inpainting via le patching de modèles.",
|
|
277
|
+
"tags": ["Texte vers Image", "Image", "ControlNet"],
|
|
278
|
+
"models": ["Qwen-Image"],
|
|
279
|
+
"date": "2025-08-24"
|
|
409
280
|
},
|
|
410
281
|
{
|
|
411
|
-
"name": "
|
|
412
|
-
"title": "
|
|
282
|
+
"name": "image_qwen_image_edit",
|
|
283
|
+
"title": "Édition d'Image Qwen",
|
|
413
284
|
"mediaType": "image",
|
|
414
285
|
"mediaSubtype": "webp",
|
|
415
|
-
"
|
|
416
|
-
"
|
|
417
|
-
"
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
"models": [
|
|
422
|
-
"Cosmos"
|
|
423
|
-
],
|
|
424
|
-
"date": "2025-06-16"
|
|
286
|
+
"thumbnailVariant": "compareSlider",
|
|
287
|
+
"description": "Éditer des images avec une édition de texte bilingue précise et des capacités d'édition sémantique/apparence duales en utilisant le modèle MMDiT 20B de Qwen-Image-Edit.",
|
|
288
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
289
|
+
"tags": ["Image vers Image", "Édition d'Image"],
|
|
290
|
+
"models": ["Qwen-Image"],
|
|
291
|
+
"date": "2025-08-18"
|
|
425
292
|
},
|
|
426
293
|
{
|
|
427
|
-
"name": "
|
|
428
|
-
"title": "
|
|
294
|
+
"name": "image_omnigen2_t2i",
|
|
295
|
+
"title": "OmniGen2 Texte vers Image",
|
|
429
296
|
"mediaType": "image",
|
|
430
297
|
"mediaSubtype": "webp",
|
|
431
|
-
"description": "
|
|
432
|
-
"
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
"
|
|
298
|
+
"description": "Générer des images de haute qualité à partir de prompts textuels en utilisant le modèle multimodal unifié 7B d'OmniGen2 avec une architecture à double chemin.",
|
|
299
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
300
|
+
"tags": ["Texte vers Image", "Image"],
|
|
301
|
+
"models": ["OmniGen"],
|
|
302
|
+
"date": "2025-06-30"
|
|
303
|
+
},
|
|
304
|
+
{
|
|
305
|
+
"name": "image_omnigen2_image_edit",
|
|
306
|
+
"title": "Édition d'Image OmniGen2",
|
|
307
|
+
"mediaType": "image",
|
|
308
|
+
"mediaSubtype": "webp",
|
|
309
|
+
"thumbnailVariant": "hoverDissolve",
|
|
310
|
+
"description": "Éditer des images avec des instructions en langage naturel en utilisant les capacités avancées d'édition d'images d'OmniGen2 et le support de rendu de texte.",
|
|
311
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
312
|
+
"tags": ["Édition d'Image", "Image"],
|
|
313
|
+
"models": ["OmniGen"],
|
|
314
|
+
"date": "2025-06-30"
|
|
441
315
|
},
|
|
442
316
|
{
|
|
443
317
|
"name": "hidream_i1_dev",
|
|
444
318
|
"title": "HiDream I1 Dev",
|
|
445
319
|
"mediaType": "image",
|
|
446
320
|
"mediaSubtype": "webp",
|
|
447
|
-
"description": "HiDream I1 Dev - Version équilibrée avec 28 étapes d'inférence, adaptée au matériel
|
|
321
|
+
"description": "Générer des images avec HiDream I1 Dev - Version équilibrée avec 28 étapes d'inférence, adaptée au matériel de gamme moyenne.",
|
|
448
322
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
449
|
-
"tags": [
|
|
450
|
-
|
|
451
|
-
"Image"
|
|
452
|
-
],
|
|
453
|
-
"models": [
|
|
454
|
-
"HiDream"
|
|
455
|
-
],
|
|
323
|
+
"tags": ["Texte vers Image", "Image"],
|
|
324
|
+
"models": ["HiDream"],
|
|
456
325
|
"date": "2025-04-17"
|
|
457
326
|
},
|
|
458
327
|
{
|
|
459
328
|
"name": "hidream_i1_fast",
|
|
460
|
-
"title": "HiDream I1
|
|
329
|
+
"title": "HiDream I1 Fast",
|
|
461
330
|
"mediaType": "image",
|
|
462
331
|
"mediaSubtype": "webp",
|
|
463
|
-
"description": "HiDream I1 Fast - Version légère avec 16 étapes, idéale pour des aperçus rapides sur du matériel
|
|
332
|
+
"description": "Générer des images rapidement avec HiDream I1 Fast - Version légère avec 16 étapes d'inférence, idéale pour des aperçus rapides sur du matériel d'entrée de gamme.",
|
|
464
333
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
465
|
-
"tags": [
|
|
466
|
-
|
|
467
|
-
"Image"
|
|
468
|
-
],
|
|
469
|
-
"models": [
|
|
470
|
-
"HiDream"
|
|
471
|
-
],
|
|
334
|
+
"tags": ["Texte vers Image", "Image"],
|
|
335
|
+
"models": ["HiDream"],
|
|
472
336
|
"date": "2025-04-17"
|
|
473
337
|
},
|
|
474
338
|
{
|
|
475
339
|
"name": "hidream_i1_full",
|
|
476
|
-
"title": "HiDream I1
|
|
340
|
+
"title": "HiDream I1 Full",
|
|
477
341
|
"mediaType": "image",
|
|
478
342
|
"mediaSubtype": "webp",
|
|
479
|
-
"description": "HiDream I1 Full - Version complète avec 50 étapes pour une
|
|
343
|
+
"description": "Générer des images avec HiDream I1 Full - Version complète avec 50 étapes d'inférence pour une sortie de la plus haute qualité.",
|
|
480
344
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
481
|
-
"tags": [
|
|
482
|
-
|
|
483
|
-
"Image"
|
|
484
|
-
],
|
|
485
|
-
"models": [
|
|
486
|
-
"HiDream"
|
|
487
|
-
],
|
|
345
|
+
"tags": ["Texte vers Image", "Image"],
|
|
346
|
+
"models": ["HiDream"],
|
|
488
347
|
"date": "2025-04-17"
|
|
489
348
|
},
|
|
490
349
|
{
|
|
491
350
|
"name": "hidream_e1_1",
|
|
492
|
-
"title": "HiDream E1.1
|
|
351
|
+
"title": "Édition d'Image HiDream E1.1",
|
|
493
352
|
"mediaType": "image",
|
|
494
353
|
"mediaSubtype": "webp",
|
|
495
354
|
"thumbnailVariant": "compareSlider",
|
|
496
|
-
"description": "
|
|
355
|
+
"description": "Éditer des images avec HiDream E1.1 – il est meilleur en qualité d'image et en précision d'édition que HiDream-E1-Full.",
|
|
497
356
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
498
|
-
"tags": [
|
|
499
|
-
|
|
500
|
-
"Image"
|
|
501
|
-
],
|
|
502
|
-
"models": [
|
|
503
|
-
"HiDream"
|
|
504
|
-
],
|
|
357
|
+
"tags": ["Édition d'Image", "Image"],
|
|
358
|
+
"models": ["HiDream"],
|
|
505
359
|
"date": "2025-07-21"
|
|
506
360
|
},
|
|
507
361
|
{
|
|
508
362
|
"name": "hidream_e1_full",
|
|
509
|
-
"title": "HiDream E1
|
|
363
|
+
"title": "Édition d'Image HiDream E1",
|
|
510
364
|
"mediaType": "image",
|
|
511
365
|
"mediaSubtype": "webp",
|
|
512
366
|
"thumbnailVariant": "compareSlider",
|
|
513
|
-
"description": "HiDream E1 - Modèle professionnel d'édition d'
|
|
367
|
+
"description": "Éditer des images avec HiDream E1 - Modèle professionnel d'édition d'images en langage naturel.",
|
|
514
368
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
515
|
-
"tags": [
|
|
516
|
-
|
|
517
|
-
"Image"
|
|
518
|
-
],
|
|
519
|
-
"models": [
|
|
520
|
-
"HiDream"
|
|
521
|
-
],
|
|
369
|
+
"tags": ["Édition d'Image", "Image"],
|
|
370
|
+
"models": ["HiDream"],
|
|
522
371
|
"date": "2025-05-01"
|
|
523
372
|
},
|
|
524
373
|
{
|
|
@@ -526,15 +375,10 @@
|
|
|
526
375
|
"title": "SD3.5 Simple",
|
|
527
376
|
"mediaType": "image",
|
|
528
377
|
"mediaSubtype": "webp",
|
|
529
|
-
"description": "
|
|
378
|
+
"description": "Générer des images en utilisant SD 3.5.",
|
|
530
379
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
|
|
531
|
-
"tags": [
|
|
532
|
-
|
|
533
|
-
"Image"
|
|
534
|
-
],
|
|
535
|
-
"models": [
|
|
536
|
-
"SD3.5"
|
|
537
|
-
],
|
|
380
|
+
"tags": ["Texte vers Image", "Image"],
|
|
381
|
+
"models": ["SD3.5"],
|
|
538
382
|
"date": "2025-03-01"
|
|
539
383
|
},
|
|
540
384
|
{
|
|
@@ -542,17 +386,11 @@
|
|
|
542
386
|
"title": "SD3.5 Large Canny ControlNet",
|
|
543
387
|
"mediaType": "image",
|
|
544
388
|
"mediaSubtype": "webp",
|
|
545
|
-
"description": "
|
|
389
|
+
"description": "Générer des images guidées par la détection de contours en utilisant SD 3.5 Canny ControlNet.",
|
|
546
390
|
"thumbnailVariant": "hoverDissolve",
|
|
547
391
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
548
|
-
"tags": [
|
|
549
|
-
|
|
550
|
-
"Image",
|
|
551
|
-
"ControlNet"
|
|
552
|
-
],
|
|
553
|
-
"models": [
|
|
554
|
-
"SD3.5"
|
|
555
|
-
],
|
|
392
|
+
"tags": ["Image vers Image", "Image", "ControlNet"],
|
|
393
|
+
"models": ["SD3.5"],
|
|
556
394
|
"date": "2025-03-01"
|
|
557
395
|
},
|
|
558
396
|
{
|
|
@@ -560,17 +398,11 @@
|
|
|
560
398
|
"title": "SD3.5 Large Depth",
|
|
561
399
|
"mediaType": "image",
|
|
562
400
|
"mediaSubtype": "webp",
|
|
563
|
-
"description": "
|
|
401
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant SD 3.5.",
|
|
564
402
|
"thumbnailVariant": "hoverDissolve",
|
|
565
403
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
566
|
-
"tags": [
|
|
567
|
-
|
|
568
|
-
"Image",
|
|
569
|
-
"ControlNet"
|
|
570
|
-
],
|
|
571
|
-
"models": [
|
|
572
|
-
"SD3.5"
|
|
573
|
-
],
|
|
404
|
+
"tags": ["Image vers Image", "Image", "ControlNet"],
|
|
405
|
+
"models": ["SD3.5"],
|
|
574
406
|
"date": "2025-03-01"
|
|
575
407
|
},
|
|
576
408
|
{
|
|
@@ -578,16 +410,11 @@
|
|
|
578
410
|
"title": "SD3.5 Large Blur",
|
|
579
411
|
"mediaType": "image",
|
|
580
412
|
"mediaSubtype": "webp",
|
|
581
|
-
"description": "
|
|
413
|
+
"description": "Générer des images guidées par des images de référence floues en utilisant SD 3.5.",
|
|
582
414
|
"thumbnailVariant": "hoverDissolve",
|
|
583
415
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
584
|
-
"tags": [
|
|
585
|
-
|
|
586
|
-
"Image"
|
|
587
|
-
],
|
|
588
|
-
"models": [
|
|
589
|
-
"SD3.5"
|
|
590
|
-
],
|
|
416
|
+
"tags": ["Image vers Image", "Image"],
|
|
417
|
+
"models": ["SD3.5"],
|
|
591
418
|
"date": "2025-03-01"
|
|
592
419
|
},
|
|
593
420
|
{
|
|
@@ -595,15 +422,10 @@
|
|
|
595
422
|
"title": "SDXL Simple",
|
|
596
423
|
"mediaType": "image",
|
|
597
424
|
"mediaSubtype": "webp",
|
|
598
|
-
"description": "
|
|
425
|
+
"description": "Générer des images de haute qualité en utilisant SDXL.",
|
|
599
426
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
600
|
-
"tags": [
|
|
601
|
-
|
|
602
|
-
"Image"
|
|
603
|
-
],
|
|
604
|
-
"models": [
|
|
605
|
-
"SDXL"
|
|
606
|
-
],
|
|
427
|
+
"tags": ["Texte vers Image", "Image"],
|
|
428
|
+
"models": ["SDXL"],
|
|
607
429
|
"date": "2025-03-01"
|
|
608
430
|
},
|
|
609
431
|
{
|
|
@@ -611,47 +433,32 @@
|
|
|
611
433
|
"title": "SDXL Refiner Prompt",
|
|
612
434
|
"mediaType": "image",
|
|
613
435
|
"mediaSubtype": "webp",
|
|
614
|
-
"description": "
|
|
436
|
+
"description": "Améliorer les images SDXL en utilisant des modèles de raffinement.",
|
|
615
437
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
616
|
-
"tags": [
|
|
617
|
-
|
|
618
|
-
"Image"
|
|
619
|
-
],
|
|
620
|
-
"models": [
|
|
621
|
-
"SDXL"
|
|
622
|
-
],
|
|
438
|
+
"tags": ["Texte vers Image", "Image"],
|
|
439
|
+
"models": ["SDXL"],
|
|
623
440
|
"date": "2025-03-01"
|
|
624
441
|
},
|
|
625
442
|
{
|
|
626
443
|
"name": "sdxl_revision_text_prompts",
|
|
627
|
-
"title": "
|
|
444
|
+
"title": "SDXL Revision Text Prompts",
|
|
628
445
|
"mediaType": "image",
|
|
629
446
|
"mediaSubtype": "webp",
|
|
630
|
-
"description": "
|
|
447
|
+
"description": "Générer des images en transférant des concepts à partir d'images de référence en utilisant SDXL Revision.",
|
|
631
448
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
632
|
-
"tags": [
|
|
633
|
-
|
|
634
|
-
"Image"
|
|
635
|
-
],
|
|
636
|
-
"models": [
|
|
637
|
-
"SDXL"
|
|
638
|
-
],
|
|
449
|
+
"tags": ["Texte vers Image", "Image"],
|
|
450
|
+
"models": ["SDXL"],
|
|
639
451
|
"date": "2025-03-01"
|
|
640
452
|
},
|
|
641
453
|
{
|
|
642
454
|
"name": "sdxl_revision_zero_positive",
|
|
643
|
-
"title": "
|
|
455
|
+
"title": "SDXL Revision Zero Positive",
|
|
644
456
|
"mediaType": "image",
|
|
645
457
|
"mediaSubtype": "webp",
|
|
646
|
-
"description": "
|
|
458
|
+
"description": "Générer des images en utilisant à la fois des prompts textuels et des images de référence avec SDXL Revision.",
|
|
647
459
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
648
|
-
"tags": [
|
|
649
|
-
|
|
650
|
-
"Image"
|
|
651
|
-
],
|
|
652
|
-
"models": [
|
|
653
|
-
"SDXL"
|
|
654
|
-
],
|
|
460
|
+
"tags": ["Texte vers Image", "Image"],
|
|
461
|
+
"models": ["SDXL"],
|
|
655
462
|
"date": "2025-03-01"
|
|
656
463
|
},
|
|
657
464
|
{
|
|
@@ -659,15 +466,10 @@
|
|
|
659
466
|
"title": "SDXL Turbo",
|
|
660
467
|
"mediaType": "image",
|
|
661
468
|
"mediaSubtype": "webp",
|
|
662
|
-
"description": "
|
|
469
|
+
"description": "Générer des images en une seule étape en utilisant SDXL Turbo.",
|
|
663
470
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
|
|
664
|
-
"tags": [
|
|
665
|
-
|
|
666
|
-
"Image"
|
|
667
|
-
],
|
|
668
|
-
"models": [
|
|
669
|
-
"SDXL Turbo"
|
|
670
|
-
],
|
|
471
|
+
"tags": ["Texte vers Image", "Image"],
|
|
472
|
+
"models": ["SDXL Turbo"],
|
|
671
473
|
"date": "2025-03-01"
|
|
672
474
|
},
|
|
673
475
|
{
|
|
@@ -676,14 +478,9 @@
|
|
|
676
478
|
"mediaType": "image",
|
|
677
479
|
"mediaSubtype": "webp",
|
|
678
480
|
"thumbnailVariant": "compareSlider",
|
|
679
|
-
"description": "
|
|
680
|
-
"tags": [
|
|
681
|
-
|
|
682
|
-
"Image"
|
|
683
|
-
],
|
|
684
|
-
"models": [
|
|
685
|
-
"SD1.5"
|
|
686
|
-
],
|
|
481
|
+
"description": "Exécuter Lotus Depth dans ComfyUI pour une estimation de profondeur monoculaire efficace zero-shot avec une haute rétention de détails.",
|
|
482
|
+
"tags": ["Profondeur", "Image"],
|
|
483
|
+
"models": ["SD1.5"],
|
|
687
484
|
"date": "2025-05-21"
|
|
688
485
|
}
|
|
689
486
|
]
|
|
@@ -701,13 +498,8 @@
|
|
|
701
498
|
"mediaType": "image",
|
|
702
499
|
"mediaSubtype": "webp",
|
|
703
500
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
704
|
-
"tags": [
|
|
705
|
-
|
|
706
|
-
"Video"
|
|
707
|
-
],
|
|
708
|
-
"models": [
|
|
709
|
-
"Wan"
|
|
710
|
-
],
|
|
501
|
+
"tags": ["Texte vers Vidéo", "Vidéo"],
|
|
502
|
+
"models": ["Wan"],
|
|
711
503
|
"date": "2025-07-29"
|
|
712
504
|
},
|
|
713
505
|
{
|
|
@@ -718,13 +510,8 @@
|
|
|
718
510
|
"mediaSubtype": "webp",
|
|
719
511
|
"thumbnailVariant": "hoverDissolve",
|
|
720
512
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
721
|
-
"tags": [
|
|
722
|
-
|
|
723
|
-
"Video"
|
|
724
|
-
],
|
|
725
|
-
"models": [
|
|
726
|
-
"Wan2.2"
|
|
727
|
-
],
|
|
513
|
+
"tags": ["Image vers Vidéo", "Vidéo"],
|
|
514
|
+
"models": ["Wan2.2"],
|
|
728
515
|
"date": "2025-07-29"
|
|
729
516
|
},
|
|
730
517
|
{
|
|
@@ -735,15 +522,43 @@
|
|
|
735
522
|
"mediaSubtype": "webp",
|
|
736
523
|
"thumbnailVariant": "hoverDissolve",
|
|
737
524
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
738
|
-
"tags": [
|
|
739
|
-
|
|
740
|
-
"Video"
|
|
741
|
-
],
|
|
742
|
-
"models": [
|
|
743
|
-
"Wan2.2"
|
|
744
|
-
],
|
|
525
|
+
"tags": ["FLF2V", "Vidéo"],
|
|
526
|
+
"models": ["Wan2.2"],
|
|
745
527
|
"date": "2025-08-02"
|
|
746
528
|
},
|
|
529
|
+
{
|
|
530
|
+
"name": "video_wan2_2_14B_fun_inpaint",
|
|
531
|
+
"title": "Wan 2.2 14B Fun Inp",
|
|
532
|
+
"description": "Generate videos from start and end frames using Wan 2.2 Fun Inp.",
|
|
533
|
+
"mediaType": "image",
|
|
534
|
+
"mediaSubtype": "webp",
|
|
535
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-inp",
|
|
536
|
+
"tags": ["FLF2V", "Vidéo"],
|
|
537
|
+
"models": ["Wan2.2"],
|
|
538
|
+
"date": "2025-08-12"
|
|
539
|
+
},
|
|
540
|
+
{
|
|
541
|
+
"name": "video_wan2_2_14B_fun_control",
|
|
542
|
+
"title": "Wan 2.2 14B Fun Control",
|
|
543
|
+
"description": "Generate videos guided by pose, depth, and edge controls using Wan 2.2 Fun Control.",
|
|
544
|
+
"mediaType": "image",
|
|
545
|
+
"mediaSubtype": "webp",
|
|
546
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-control",
|
|
547
|
+
"tags": ["Vidéo vers Vidéo", "Vidéo"],
|
|
548
|
+
"models": ["Wan2.2"],
|
|
549
|
+
"date": "2025-08-12"
|
|
550
|
+
},
|
|
551
|
+
{
|
|
552
|
+
"name": "video_wan2_2_14B_fun_camera",
|
|
553
|
+
"title": "Wan 2.2 14B Fun Camera Control",
|
|
554
|
+
"description": "Generate videos with camera motion controls including pan, zoom, and rotation using Wan 2.2 Fun Camera Control.",
|
|
555
|
+
"mediaType": "image",
|
|
556
|
+
"mediaSubtype": "webp",
|
|
557
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-camera",
|
|
558
|
+
"tags": ["Vidéo vers Vidéo", "Vidéo"],
|
|
559
|
+
"models": ["Wan2.2"],
|
|
560
|
+
"date": "2025-08-17"
|
|
561
|
+
},
|
|
747
562
|
{
|
|
748
563
|
"name": "video_wan2_2_5B_ti2v",
|
|
749
564
|
"title": "Wan 2.2 5B Video Generation",
|
|
@@ -751,112 +566,77 @@
|
|
|
751
566
|
"mediaType": "image",
|
|
752
567
|
"mediaSubtype": "webp",
|
|
753
568
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
754
|
-
"tags": [
|
|
755
|
-
|
|
756
|
-
"Video"
|
|
757
|
-
],
|
|
758
|
-
"models": [
|
|
759
|
-
"Wan2.2"
|
|
760
|
-
],
|
|
569
|
+
"tags": ["Texte vers Vidéo", "Vidéo"],
|
|
570
|
+
"models": ["Wan2.2"],
|
|
761
571
|
"date": "2025-07-29"
|
|
762
572
|
},
|
|
763
573
|
{
|
|
764
574
|
"name": "video_wan_vace_14B_t2v",
|
|
765
|
-
"title": "Wan VACE
|
|
766
|
-
"description": "
|
|
575
|
+
"title": "Wan VACE Text to Video",
|
|
576
|
+
"description": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
|
|
767
577
|
"mediaType": "image",
|
|
768
578
|
"mediaSubtype": "webp",
|
|
769
579
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
770
|
-
"tags": [
|
|
771
|
-
|
|
772
|
-
"Video"
|
|
773
|
-
],
|
|
774
|
-
"models": [
|
|
775
|
-
"Wan2.1"
|
|
776
|
-
],
|
|
580
|
+
"tags": ["Texte vers Vidéo", "Vidéo"],
|
|
581
|
+
"models": ["Wan2.1"],
|
|
777
582
|
"date": "2025-05-21"
|
|
778
583
|
},
|
|
779
584
|
{
|
|
780
585
|
"name": "video_wan_vace_14B_ref2v",
|
|
781
|
-
"title": "Wan VACE
|
|
782
|
-
"description": "
|
|
586
|
+
"title": "Wan VACE Reference to Video",
|
|
587
|
+
"description": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
|
|
783
588
|
"mediaType": "image",
|
|
784
589
|
"mediaSubtype": "webp",
|
|
785
590
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
786
|
-
"tags": [
|
|
787
|
-
|
|
788
|
-
"Video"
|
|
789
|
-
],
|
|
790
|
-
"models": [
|
|
791
|
-
"Wan2.1"
|
|
792
|
-
],
|
|
591
|
+
"tags": ["Référence vers Vidéo", "Vidéo"],
|
|
592
|
+
"models": ["Wan2.1"],
|
|
793
593
|
"date": "2025-05-21"
|
|
794
594
|
},
|
|
795
595
|
{
|
|
796
596
|
"name": "video_wan_vace_14B_v2v",
|
|
797
|
-
"title": "Wan VACE
|
|
798
|
-
"description": "
|
|
597
|
+
"title": "Wan VACE Control Video",
|
|
598
|
+
"description": "Generate videos by controlling input videos and reference images using Wan VACE.",
|
|
799
599
|
"mediaType": "image",
|
|
800
600
|
"mediaSubtype": "webp",
|
|
801
601
|
"thumbnailVariant": "compareSlider",
|
|
802
602
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
803
|
-
"tags": [
|
|
804
|
-
|
|
805
|
-
"Video"
|
|
806
|
-
],
|
|
807
|
-
"models": [
|
|
808
|
-
"Wan2.1"
|
|
809
|
-
],
|
|
603
|
+
"tags": ["Vidéo vers Vidéo", "Vidéo"],
|
|
604
|
+
"models": ["Wan2.1"],
|
|
810
605
|
"date": "2025-05-21"
|
|
811
606
|
},
|
|
812
607
|
{
|
|
813
608
|
"name": "video_wan_vace_outpainting",
|
|
814
609
|
"title": "Wan VACE Outpainting",
|
|
815
|
-
"description": "
|
|
610
|
+
"description": "Generate extended videos by expanding video size using Wan VACE outpainting.",
|
|
816
611
|
"mediaType": "image",
|
|
817
612
|
"mediaSubtype": "webp",
|
|
818
613
|
"thumbnailVariant": "compareSlider",
|
|
819
614
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
820
|
-
"tags": [
|
|
821
|
-
|
|
822
|
-
"Video"
|
|
823
|
-
],
|
|
824
|
-
"models": [
|
|
825
|
-
"Wan2.1"
|
|
826
|
-
],
|
|
615
|
+
"tags": ["Outpainting", "Vidéo"],
|
|
616
|
+
"models": ["Wan2.1"],
|
|
827
617
|
"date": "2025-05-21"
|
|
828
618
|
},
|
|
829
619
|
{
|
|
830
620
|
"name": "video_wan_vace_flf2v",
|
|
831
|
-
"title": "Wan VACE
|
|
832
|
-
"description": "
|
|
621
|
+
"title": "Wan VACE First-Last Frame",
|
|
622
|
+
"description": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
|
|
833
623
|
"mediaType": "image",
|
|
834
624
|
"mediaSubtype": "webp",
|
|
835
625
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
836
|
-
"tags": [
|
|
837
|
-
|
|
838
|
-
"Video"
|
|
839
|
-
],
|
|
840
|
-
"models": [
|
|
841
|
-
"Wan2.1"
|
|
842
|
-
],
|
|
626
|
+
"tags": ["FLF2V", "Vidéo"],
|
|
627
|
+
"models": ["Wan2.1"],
|
|
843
628
|
"date": "2025-05-21"
|
|
844
629
|
},
|
|
845
630
|
{
|
|
846
631
|
"name": "video_wan_vace_inpainting",
|
|
847
632
|
"title": "Wan VACE Inpainting",
|
|
848
|
-
"description": "
|
|
633
|
+
"description": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
|
|
849
634
|
"mediaType": "image",
|
|
850
635
|
"mediaSubtype": "webp",
|
|
851
636
|
"thumbnailVariant": "compareSlider",
|
|
852
637
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
853
|
-
"tags": [
|
|
854
|
-
|
|
855
|
-
"Video"
|
|
856
|
-
],
|
|
857
|
-
"models": [
|
|
858
|
-
"Wan2.1"
|
|
859
|
-
],
|
|
638
|
+
"tags": ["Inpainting", "Vidéo"],
|
|
639
|
+
"models": ["Wan2.1"],
|
|
860
640
|
"date": "2025-05-21"
|
|
861
641
|
},
|
|
862
642
|
{
|
|
@@ -867,12 +647,8 @@
|
|
|
867
647
|
"mediaSubtype": "webp",
|
|
868
648
|
"thumbnailVariant": "hoverDissolve",
|
|
869
649
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
|
|
870
|
-
"tags": [
|
|
871
|
-
|
|
872
|
-
],
|
|
873
|
-
"models": [
|
|
874
|
-
"Wan2.1"
|
|
875
|
-
],
|
|
650
|
+
"tags": ["Vidéo"],
|
|
651
|
+
"models": ["Wan2.1"],
|
|
876
652
|
"date": "2025-05-21"
|
|
877
653
|
},
|
|
878
654
|
{
|
|
@@ -882,12 +658,8 @@
|
|
|
882
658
|
"mediaType": "image",
|
|
883
659
|
"mediaSubtype": "webp",
|
|
884
660
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
885
|
-
"tags": [
|
|
886
|
-
|
|
887
|
-
],
|
|
888
|
-
"models": [
|
|
889
|
-
"Wan2.1"
|
|
890
|
-
],
|
|
661
|
+
"tags": ["Vidéo"],
|
|
662
|
+
"models": ["Wan2.1"],
|
|
891
663
|
"date": "2025-04-15"
|
|
892
664
|
},
|
|
893
665
|
{
|
|
@@ -897,44 +669,30 @@
|
|
|
897
669
|
"mediaType": "image",
|
|
898
670
|
"mediaSubtype": "webp",
|
|
899
671
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
900
|
-
"tags": [
|
|
901
|
-
|
|
902
|
-
],
|
|
903
|
-
"models": [
|
|
904
|
-
"Wan2.1"
|
|
905
|
-
],
|
|
672
|
+
"tags": ["Vidéo"],
|
|
673
|
+
"models": ["Wan2.1"],
|
|
906
674
|
"date": "2025-04-15"
|
|
907
675
|
},
|
|
908
676
|
{
|
|
909
677
|
"name": "text_to_video_wan",
|
|
910
|
-
"title": "Wan 2.1
|
|
911
|
-
"description": "
|
|
678
|
+
"title": "Wan 2.1 Text to Video",
|
|
679
|
+
"description": "Generate videos from text prompts using Wan 2.1.",
|
|
912
680
|
"mediaType": "image",
|
|
913
681
|
"mediaSubtype": "webp",
|
|
914
682
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
915
|
-
"tags": [
|
|
916
|
-
|
|
917
|
-
"Video"
|
|
918
|
-
],
|
|
919
|
-
"models": [
|
|
920
|
-
"Wan2.1"
|
|
921
|
-
],
|
|
683
|
+
"tags": ["Texte vers Vidéo", "Vidéo"],
|
|
684
|
+
"models": ["Wan2.1"],
|
|
922
685
|
"date": "2025-03-01"
|
|
923
686
|
},
|
|
924
687
|
{
|
|
925
688
|
"name": "image_to_video_wan",
|
|
926
|
-
"title": "Wan 2.1 Image
|
|
927
|
-
"description": "
|
|
689
|
+
"title": "Wan 2.1 Image to Video",
|
|
690
|
+
"description": "Generate videos from images using Wan 2.1.",
|
|
928
691
|
"mediaType": "image",
|
|
929
692
|
"mediaSubtype": "webp",
|
|
930
693
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
931
|
-
"tags": [
|
|
932
|
-
|
|
933
|
-
"Video"
|
|
934
|
-
],
|
|
935
|
-
"models": [
|
|
936
|
-
"Wan2.1"
|
|
937
|
-
],
|
|
694
|
+
"tags": ["Texte vers Vidéo", "Vidéo"],
|
|
695
|
+
"models": ["Wan2.1"],
|
|
938
696
|
"date": "2025-03-01"
|
|
939
697
|
},
|
|
940
698
|
{
|
|
@@ -944,13 +702,8 @@
|
|
|
944
702
|
"mediaType": "image",
|
|
945
703
|
"mediaSubtype": "webp",
|
|
946
704
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
|
|
947
|
-
"tags": [
|
|
948
|
-
|
|
949
|
-
"Video"
|
|
950
|
-
],
|
|
951
|
-
"models": [
|
|
952
|
-
"Wan2.1"
|
|
953
|
-
],
|
|
705
|
+
"tags": ["Inpainting", "Vidéo"],
|
|
706
|
+
"models": ["Wan2.1"],
|
|
954
707
|
"date": "2025-04-15"
|
|
955
708
|
},
|
|
956
709
|
{
|
|
@@ -961,13 +714,8 @@
|
|
|
961
714
|
"mediaSubtype": "webp",
|
|
962
715
|
"thumbnailVariant": "hoverDissolve",
|
|
963
716
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
964
|
-
"tags": [
|
|
965
|
-
|
|
966
|
-
"Video"
|
|
967
|
-
],
|
|
968
|
-
"models": [
|
|
969
|
-
"Wan2.1"
|
|
970
|
-
],
|
|
717
|
+
"tags": ["Vidéo vers Vidéo", "Vidéo"],
|
|
718
|
+
"models": ["Wan2.1"],
|
|
971
719
|
"date": "2025-04-15"
|
|
972
720
|
},
|
|
973
721
|
{
|
|
@@ -977,125 +725,74 @@
|
|
|
977
725
|
"mediaType": "image",
|
|
978
726
|
"mediaSubtype": "webp",
|
|
979
727
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
|
|
980
|
-
"tags": [
|
|
981
|
-
|
|
982
|
-
"Video"
|
|
983
|
-
],
|
|
984
|
-
"models": [
|
|
985
|
-
"Wan2.1"
|
|
986
|
-
],
|
|
728
|
+
"tags": ["FLF2V", "Vidéo"],
|
|
729
|
+
"models": ["Wan2.1"],
|
|
987
730
|
"date": "2025-04-15"
|
|
988
731
|
},
|
|
989
|
-
{
|
|
990
|
-
"name": "video_cosmos_predict2_2B_video2world_480p_16fps",
|
|
991
|
-
"title": "Cosmos Predict2 2B Video2World 480p 16fps",
|
|
992
|
-
"description": "Générez des vidéos avec Cosmos-Predict2 2B Video2World, pour des simulations physiques précises, haute fidélité et cohérentes.",
|
|
993
|
-
"mediaType": "image",
|
|
994
|
-
"mediaSubtype": "webp",
|
|
995
|
-
"tutorialUrl": "http://docs.comfy.org/tutorials/video/cosmos/cosmos-predict2-video2world",
|
|
996
|
-
"tags": [
|
|
997
|
-
"Video2World",
|
|
998
|
-
"Video"
|
|
999
|
-
],
|
|
1000
|
-
"models": [
|
|
1001
|
-
"Cosmos"
|
|
1002
|
-
],
|
|
1003
|
-
"date": "2025-06-16"
|
|
1004
|
-
},
|
|
1005
732
|
{
|
|
1006
733
|
"name": "ltxv_text_to_video",
|
|
1007
|
-
"title": "LTXV
|
|
734
|
+
"title": "LTXV Text to Video",
|
|
1008
735
|
"mediaType": "image",
|
|
1009
736
|
"mediaSubtype": "webp",
|
|
1010
|
-
"description": "
|
|
737
|
+
"description": "Generate videos from text prompts.",
|
|
1011
738
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
1012
|
-
"tags": [
|
|
1013
|
-
|
|
1014
|
-
"Video"
|
|
1015
|
-
],
|
|
1016
|
-
"models": [
|
|
1017
|
-
"LTXV"
|
|
1018
|
-
],
|
|
739
|
+
"tags": ["Texte vers Vidéo", "Vidéo"],
|
|
740
|
+
"models": ["LTXV"],
|
|
1019
741
|
"date": "2025-03-01"
|
|
1020
742
|
},
|
|
1021
743
|
{
|
|
1022
744
|
"name": "ltxv_image_to_video",
|
|
1023
|
-
"title": "LTXV Image
|
|
745
|
+
"title": "LTXV Image to Video",
|
|
1024
746
|
"mediaType": "image",
|
|
1025
747
|
"mediaSubtype": "webp",
|
|
1026
|
-
"description": "
|
|
748
|
+
"description": "Generate videos from still images.",
|
|
1027
749
|
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
1028
|
-
"tags": [
|
|
1029
|
-
|
|
1030
|
-
"Video"
|
|
1031
|
-
],
|
|
1032
|
-
"models": [
|
|
1033
|
-
"LTXV"
|
|
1034
|
-
],
|
|
750
|
+
"tags": ["Image vers Vidéo", "Vidéo"],
|
|
751
|
+
"models": ["LTXV"],
|
|
1035
752
|
"date": "2025-03-01"
|
|
1036
753
|
},
|
|
1037
754
|
{
|
|
1038
755
|
"name": "mochi_text_to_video_example",
|
|
1039
|
-
"title": "Mochi
|
|
756
|
+
"title": "Mochi Text to Video",
|
|
1040
757
|
"mediaType": "image",
|
|
1041
758
|
"mediaSubtype": "webp",
|
|
1042
|
-
"description": "
|
|
759
|
+
"description": "Generate videos from text prompts using Mochi model.",
|
|
1043
760
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/",
|
|
1044
|
-
"tags": [
|
|
1045
|
-
|
|
1046
|
-
"Video"
|
|
1047
|
-
],
|
|
1048
|
-
"models": [
|
|
1049
|
-
"Mochi"
|
|
1050
|
-
],
|
|
761
|
+
"tags": ["Texte vers Vidéo", "Vidéo"],
|
|
762
|
+
"models": ["Mochi"],
|
|
1051
763
|
"date": "2025-03-01"
|
|
1052
764
|
},
|
|
1053
765
|
{
|
|
1054
766
|
"name": "hunyuan_video_text_to_video",
|
|
1055
|
-
"title": "Hunyuan
|
|
767
|
+
"title": "Hunyuan Video Text to Video",
|
|
1056
768
|
"mediaType": "image",
|
|
1057
769
|
"mediaSubtype": "webp",
|
|
1058
|
-
"description": "
|
|
770
|
+
"description": "Generate videos from text prompts using Hunyuan model.",
|
|
1059
771
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
|
|
1060
|
-
"tags": [
|
|
1061
|
-
|
|
1062
|
-
"Video"
|
|
1063
|
-
],
|
|
1064
|
-
"models": [
|
|
1065
|
-
"Hunyuan Video"
|
|
1066
|
-
],
|
|
772
|
+
"tags": ["Texte vers Vidéo", "Vidéo"],
|
|
773
|
+
"models": ["Hunyuan Video"],
|
|
1067
774
|
"date": "2025-03-01"
|
|
1068
775
|
},
|
|
1069
776
|
{
|
|
1070
777
|
"name": "image_to_video",
|
|
1071
|
-
"title": "SVD Image
|
|
778
|
+
"title": "SVD Image to Video",
|
|
1072
779
|
"mediaType": "image",
|
|
1073
780
|
"mediaSubtype": "webp",
|
|
1074
|
-
"description": "
|
|
781
|
+
"description": "Generate videos from still images.",
|
|
1075
782
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
1076
|
-
"tags": [
|
|
1077
|
-
|
|
1078
|
-
"Video"
|
|
1079
|
-
],
|
|
1080
|
-
"models": [
|
|
1081
|
-
"SVD"
|
|
1082
|
-
],
|
|
783
|
+
"tags": ["Image vers Vidéo", "Vidéo"],
|
|
784
|
+
"models": ["SVD"],
|
|
1083
785
|
"date": "2025-03-01"
|
|
1084
786
|
},
|
|
1085
787
|
{
|
|
1086
788
|
"name": "txt_to_image_to_video",
|
|
1087
|
-
"title": "SVD
|
|
789
|
+
"title": "SVD Text to Image to Video",
|
|
1088
790
|
"mediaType": "image",
|
|
1089
791
|
"mediaSubtype": "webp",
|
|
1090
|
-
"description": "
|
|
792
|
+
"description": "Generate videos by first creating images from text prompts.",
|
|
1091
793
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
1092
|
-
"tags": [
|
|
1093
|
-
|
|
1094
|
-
"Video"
|
|
1095
|
-
],
|
|
1096
|
-
"models": [
|
|
1097
|
-
"SVD"
|
|
1098
|
-
],
|
|
794
|
+
"tags": ["Texte vers Vidéo", "Vidéo"],
|
|
795
|
+
"models": ["SVD"],
|
|
1099
796
|
"date": "2025-03-01"
|
|
1100
797
|
}
|
|
1101
798
|
]
|
|
@@ -1111,64 +808,42 @@
|
|
|
1111
808
|
"title": "Stable Audio",
|
|
1112
809
|
"mediaType": "audio",
|
|
1113
810
|
"mediaSubtype": "mp3",
|
|
1114
|
-
"description": "
|
|
1115
|
-
"tags": [
|
|
1116
|
-
|
|
1117
|
-
"Audio"
|
|
1118
|
-
],
|
|
1119
|
-
"models": [
|
|
1120
|
-
"Stable Audio"
|
|
1121
|
-
],
|
|
811
|
+
"description": "Generate audio from text prompts using Stable Audio.",
|
|
812
|
+
"tags": ["Texte vers Audio", "Audio"],
|
|
813
|
+
"models": ["Stable Audio"],
|
|
1122
814
|
"date": "2025-03-01",
|
|
1123
815
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/"
|
|
1124
816
|
},
|
|
1125
817
|
{
|
|
1126
818
|
"name": "audio_ace_step_1_t2a_instrumentals",
|
|
1127
|
-
"title": "ACE-Step v1
|
|
819
|
+
"title": "ACE-Step v1 Text to Instrumentals Music",
|
|
1128
820
|
"mediaType": "audio",
|
|
1129
821
|
"mediaSubtype": "mp3",
|
|
1130
|
-
"description": "
|
|
1131
|
-
"tags": [
|
|
1132
|
-
|
|
1133
|
-
"Audio",
|
|
1134
|
-
"Instrumentals"
|
|
1135
|
-
],
|
|
1136
|
-
"models": [
|
|
1137
|
-
"ACE-Step v1"
|
|
1138
|
-
],
|
|
822
|
+
"description": "Generate instrumental music from text prompts using ACE-Step v1.",
|
|
823
|
+
"tags": ["Texte vers Audio", "Audio", "Instrumentals"],
|
|
824
|
+
"models": ["ACE-Step v1"],
|
|
1139
825
|
"date": "2025-03-01",
|
|
1140
826
|
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
|
|
1141
827
|
},
|
|
1142
828
|
{
|
|
1143
829
|
"name": "audio_ace_step_1_t2a_song",
|
|
1144
|
-
"title": "ACE Step v1
|
|
830
|
+
"title": "ACE Step v1 Text to Song",
|
|
1145
831
|
"mediaType": "audio",
|
|
1146
832
|
"mediaSubtype": "mp3",
|
|
1147
|
-
"description": "
|
|
1148
|
-
"tags": [
|
|
1149
|
-
|
|
1150
|
-
"Audio",
|
|
1151
|
-
"Song"
|
|
1152
|
-
],
|
|
1153
|
-
"models": [
|
|
1154
|
-
"ACE-Step v1"
|
|
1155
|
-
],
|
|
833
|
+
"description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
|
|
834
|
+
"tags": ["Texte vers Audio", "Audio", "Song"],
|
|
835
|
+
"models": ["ACE-Step v1"],
|
|
1156
836
|
"date": "2025-03-01",
|
|
1157
837
|
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
|
|
1158
838
|
},
|
|
1159
839
|
{
|
|
1160
840
|
"name": "audio_ace_step_1_m2m_editing",
|
|
1161
|
-
"title": "ACE Step v1
|
|
841
|
+
"title": "ACE Step v1 M2M Editing",
|
|
1162
842
|
"mediaType": "audio",
|
|
1163
843
|
"mediaSubtype": "mp3",
|
|
1164
|
-
"description": "
|
|
1165
|
-
"tags": [
|
|
1166
|
-
|
|
1167
|
-
"Audio"
|
|
1168
|
-
],
|
|
1169
|
-
"models": [
|
|
1170
|
-
"ACE-Step v1"
|
|
1171
|
-
],
|
|
844
|
+
"description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
|
|
845
|
+
"tags": ["Édition Audio", "Audio"],
|
|
846
|
+
"models": ["ACE-Step v1"],
|
|
1172
847
|
"date": "2025-03-01",
|
|
1173
848
|
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
|
|
1174
849
|
}
|
|
@@ -1177,227 +852,148 @@
|
|
|
1177
852
|
{
|
|
1178
853
|
"moduleName": "default",
|
|
1179
854
|
"category": "TOOLS & BUILDING",
|
|
1180
|
-
"title": "API
|
|
855
|
+
"title": "Image API",
|
|
1181
856
|
"type": "image",
|
|
1182
857
|
"templates": [
|
|
1183
858
|
{
|
|
1184
859
|
"name": "api_bfl_flux_1_kontext_multiple_images_input",
|
|
1185
|
-
"title": "BFL Flux.1 Kontext
|
|
1186
|
-
"description": "
|
|
860
|
+
"title": "BFL Flux.1 Kontext Multiple Image Input",
|
|
861
|
+
"description": "Input multiple images and edit them with Flux.1 Kontext.",
|
|
1187
862
|
"mediaType": "image",
|
|
1188
863
|
"mediaSubtype": "webp",
|
|
1189
864
|
"thumbnailVariant": "compareSlider",
|
|
1190
865
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
1191
|
-
"tags": [
|
|
1192
|
-
|
|
1193
|
-
"Image"
|
|
1194
|
-
],
|
|
1195
|
-
"models": [
|
|
1196
|
-
"Flux"
|
|
1197
|
-
],
|
|
866
|
+
"tags": ["Édition d'Image", "Image"],
|
|
867
|
+
"models": ["Flux"],
|
|
1198
868
|
"date": "2025-05-29"
|
|
1199
869
|
},
|
|
1200
870
|
{
|
|
1201
871
|
"name": "api_bfl_flux_1_kontext_pro_image",
|
|
1202
872
|
"title": "BFL Flux.1 Kontext Pro",
|
|
1203
|
-
"description": "
|
|
873
|
+
"description": "Edit images with Flux.1 Kontext pro image.",
|
|
1204
874
|
"mediaType": "image",
|
|
1205
875
|
"mediaSubtype": "webp",
|
|
1206
876
|
"thumbnailVariant": "compareSlider",
|
|
1207
877
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
1208
|
-
"tags": [
|
|
1209
|
-
|
|
1210
|
-
"Image"
|
|
1211
|
-
],
|
|
1212
|
-
"models": [
|
|
1213
|
-
"Flux"
|
|
1214
|
-
],
|
|
878
|
+
"tags": ["Édition d'Image", "Image"],
|
|
879
|
+
"models": ["Flux"],
|
|
1215
880
|
"date": "2025-05-29"
|
|
1216
881
|
},
|
|
1217
882
|
{
|
|
1218
883
|
"name": "api_bfl_flux_1_kontext_max_image",
|
|
1219
884
|
"title": "BFL Flux.1 Kontext Max",
|
|
1220
|
-
"description": "
|
|
885
|
+
"description": "Edit images with Flux.1 Kontext max image.",
|
|
1221
886
|
"mediaType": "image",
|
|
1222
887
|
"mediaSubtype": "webp",
|
|
1223
888
|
"thumbnailVariant": "compareSlider",
|
|
1224
889
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
1225
|
-
"tags": [
|
|
1226
|
-
|
|
1227
|
-
"Image"
|
|
1228
|
-
],
|
|
1229
|
-
"models": [
|
|
1230
|
-
"Flux"
|
|
1231
|
-
],
|
|
890
|
+
"tags": ["Édition d'Image", "Image"],
|
|
891
|
+
"models": ["Flux"],
|
|
1232
892
|
"date": "2025-05-29"
|
|
1233
893
|
},
|
|
1234
894
|
{
|
|
1235
895
|
"name": "api_bfl_flux_pro_t2i",
|
|
1236
|
-
"title": "BFL Flux[Pro]
|
|
1237
|
-
"description": "
|
|
896
|
+
"title": "BFL Flux[Pro]: Text to Image",
|
|
897
|
+
"description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
|
|
1238
898
|
"mediaType": "image",
|
|
1239
899
|
"mediaSubtype": "webp",
|
|
1240
900
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
|
|
1241
|
-
"tags": [
|
|
1242
|
-
|
|
1243
|
-
"Image"
|
|
1244
|
-
],
|
|
1245
|
-
"models": [
|
|
1246
|
-
"Flux"
|
|
1247
|
-
],
|
|
901
|
+
"tags": ["Édition d'Image", "Image"],
|
|
902
|
+
"models": ["Flux"],
|
|
1248
903
|
"date": "2025-05-01"
|
|
1249
904
|
},
|
|
1250
905
|
{
|
|
1251
906
|
"name": "api_luma_photon_i2i",
|
|
1252
|
-
"title": "Luma Photon
|
|
1253
|
-
"description": "
|
|
907
|
+
"title": "Luma Photon: Image to Image",
|
|
908
|
+
"description": "Guider la génération d'images en utilisant une combinaison d'images et de prompt.",
|
|
1254
909
|
"mediaType": "image",
|
|
1255
910
|
"mediaSubtype": "webp",
|
|
1256
911
|
"thumbnailVariant": "compareSlider",
|
|
1257
|
-
"tags": [
|
|
1258
|
-
|
|
1259
|
-
"Image",
|
|
1260
|
-
"API"
|
|
1261
|
-
],
|
|
1262
|
-
"models": [
|
|
1263
|
-
"Luma Photon"
|
|
1264
|
-
],
|
|
912
|
+
"tags": ["Image vers Image", "Image", "API"],
|
|
913
|
+
"models": ["Luma Photon"],
|
|
1265
914
|
"date": "2025-03-01"
|
|
1266
915
|
},
|
|
1267
916
|
{
|
|
1268
917
|
"name": "api_luma_photon_style_ref",
|
|
1269
|
-
"title": "Luma Photon
|
|
1270
|
-
"description": "
|
|
918
|
+
"title": "Luma Photon: Style Reference",
|
|
919
|
+
"description": "Générer des images en mélangeant des références de style avec un contrôle précis en utilisant Luma Photon.",
|
|
1271
920
|
"mediaType": "image",
|
|
1272
921
|
"mediaSubtype": "webp",
|
|
1273
922
|
"thumbnailVariant": "compareSlider",
|
|
1274
|
-
"tags": [
|
|
1275
|
-
|
|
1276
|
-
"Image",
|
|
1277
|
-
"API",
|
|
1278
|
-
"Style Transfer"
|
|
1279
|
-
],
|
|
1280
|
-
"models": [
|
|
1281
|
-
"Luma Photon"
|
|
1282
|
-
],
|
|
923
|
+
"tags": ["Texte vers Image", "Image", "API", "Transfert de Style"],
|
|
924
|
+
"models": ["Luma Photon"],
|
|
1283
925
|
"date": "2025-03-01"
|
|
1284
926
|
},
|
|
1285
927
|
{
|
|
1286
928
|
"name": "api_recraft_image_gen_with_color_control",
|
|
1287
|
-
"title": "Recraft
|
|
1288
|
-
"description": "
|
|
1289
|
-
"mediaType": "image",
|
|
1290
|
-
"mediaSubtype": "webp",
|
|
1291
|
-
"tags": [
|
|
1292
|
-
|
|
1293
|
-
"Image",
|
|
1294
|
-
"API",
|
|
1295
|
-
"Color Control"
|
|
1296
|
-
],
|
|
1297
|
-
"models": [
|
|
1298
|
-
"Recraft"
|
|
1299
|
-
],
|
|
929
|
+
"title": "Recraft: Color Control Image Generation",
|
|
930
|
+
"description": "Générer des images avec des palettes de couleurs personnalisées et des visuels spécifiques à la marque en utilisant Recraft.",
|
|
931
|
+
"mediaType": "image",
|
|
932
|
+
"mediaSubtype": "webp",
|
|
933
|
+
"tags": ["Texte vers Image", "Image", "API", "Contrôle de Couleur"],
|
|
934
|
+
"models": ["Recraft"],
|
|
1300
935
|
"date": "2025-03-01"
|
|
1301
936
|
},
|
|
1302
937
|
{
|
|
1303
938
|
"name": "api_recraft_image_gen_with_style_control",
|
|
1304
|
-
"title": "Recraft
|
|
1305
|
-
"description": "
|
|
1306
|
-
"mediaType": "image",
|
|
1307
|
-
"mediaSubtype": "webp",
|
|
1308
|
-
"tags": [
|
|
1309
|
-
|
|
1310
|
-
"Image",
|
|
1311
|
-
"API",
|
|
1312
|
-
"Style Control"
|
|
1313
|
-
],
|
|
1314
|
-
"models": [
|
|
1315
|
-
"Recraft"
|
|
1316
|
-
],
|
|
939
|
+
"title": "Recraft: Style Control Image Generation",
|
|
940
|
+
"description": "Contrôler le style avec des exemples visuels, aligner le positionnement et affiner les objets. Stocker et partager des styles pour une cohérence de marque parfaite.",
|
|
941
|
+
"mediaType": "image",
|
|
942
|
+
"mediaSubtype": "webp",
|
|
943
|
+
"tags": ["Texte vers Image", "Image", "API", "Contrôle de Style"],
|
|
944
|
+
"models": ["Recraft"],
|
|
1317
945
|
"date": "2025-03-01"
|
|
1318
946
|
},
|
|
1319
947
|
{
|
|
1320
948
|
"name": "api_recraft_vector_gen",
|
|
1321
|
-
"title": "Recraft
|
|
1322
|
-
"description": "
|
|
1323
|
-
"mediaType": "image",
|
|
1324
|
-
"mediaSubtype": "webp",
|
|
1325
|
-
"tags": [
|
|
1326
|
-
|
|
1327
|
-
"Image",
|
|
1328
|
-
"API",
|
|
1329
|
-
"Vector"
|
|
1330
|
-
],
|
|
1331
|
-
"models": [
|
|
1332
|
-
"Recraft"
|
|
1333
|
-
],
|
|
949
|
+
"title": "Recraft: Vector Generation",
|
|
950
|
+
"description": "Générer des images vectorielles de haute qualité à partir de prompts textuels en utilisant le générateur AI vectoriel de Recraft.",
|
|
951
|
+
"mediaType": "image",
|
|
952
|
+
"mediaSubtype": "webp",
|
|
953
|
+
"tags": ["Texte vers Image", "Image", "API", "Vector"],
|
|
954
|
+
"models": ["Recraft"],
|
|
1334
955
|
"date": "2025-03-01"
|
|
1335
956
|
},
|
|
1336
957
|
{
|
|
1337
958
|
"name": "api_runway_text_to_image",
|
|
1338
|
-
"title": "Runway
|
|
1339
|
-
"description": "
|
|
1340
|
-
"mediaType": "image",
|
|
1341
|
-
"mediaSubtype": "webp",
|
|
1342
|
-
"tags": [
|
|
1343
|
-
|
|
1344
|
-
"Image",
|
|
1345
|
-
"API"
|
|
1346
|
-
],
|
|
1347
|
-
"models": [
|
|
1348
|
-
"Runway"
|
|
1349
|
-
],
|
|
959
|
+
"title": "Runway: Text to Image",
|
|
960
|
+
"description": "Generate high-quality images from text prompts using Runway's AI model.",
|
|
961
|
+
"mediaType": "image",
|
|
962
|
+
"mediaSubtype": "webp",
|
|
963
|
+
"tags": ["Texte vers Image", "Image", "API"],
|
|
964
|
+
"models": ["Runway"],
|
|
1350
965
|
"date": "2025-03-01"
|
|
1351
966
|
},
|
|
1352
967
|
{
|
|
1353
968
|
"name": "api_runway_reference_to_image",
|
|
1354
|
-
"title": "Runway
|
|
1355
|
-
"description": "
|
|
969
|
+
"title": "Runway: Reference to Image",
|
|
970
|
+
"description": "Generate new images based on reference styles and compositions with Runway's AI.",
|
|
1356
971
|
"mediaType": "image",
|
|
1357
972
|
"thumbnailVariant": "compareSlider",
|
|
1358
973
|
"mediaSubtype": "webp",
|
|
1359
|
-
"tags": [
|
|
1360
|
-
|
|
1361
|
-
"Image",
|
|
1362
|
-
"API",
|
|
1363
|
-
"Style Transfer"
|
|
1364
|
-
],
|
|
1365
|
-
"models": [
|
|
1366
|
-
"Runway"
|
|
1367
|
-
],
|
|
974
|
+
"tags": ["Image vers Image", "Image", "API", "Transfert de Style"],
|
|
975
|
+
"models": ["Runway"],
|
|
1368
976
|
"date": "2025-03-01"
|
|
1369
977
|
},
|
|
1370
978
|
{
|
|
1371
979
|
"name": "api_stability_ai_stable_image_ultra_t2i",
|
|
1372
|
-
"title": "Stability AI
|
|
1373
|
-
"description": "
|
|
1374
|
-
"mediaType": "image",
|
|
1375
|
-
"mediaSubtype": "webp",
|
|
1376
|
-
"tags": [
|
|
1377
|
-
|
|
1378
|
-
"Image",
|
|
1379
|
-
"API"
|
|
1380
|
-
],
|
|
1381
|
-
"models": [
|
|
1382
|
-
"Stable Image Ultra"
|
|
1383
|
-
],
|
|
980
|
+
"title": "Stability AI: Stable Image Ultra Text to Image",
|
|
981
|
+
"description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
|
|
982
|
+
"mediaType": "image",
|
|
983
|
+
"mediaSubtype": "webp",
|
|
984
|
+
"tags": ["Texte vers Image", "Image", "API"],
|
|
985
|
+
"models": ["Stable Image Ultra"],
|
|
1384
986
|
"date": "2025-03-01"
|
|
1385
987
|
},
|
|
1386
988
|
{
|
|
1387
989
|
"name": "api_stability_ai_i2i",
|
|
1388
|
-
"title": "Stability AI
|
|
1389
|
-
"description": "
|
|
990
|
+
"title": "Stability AI: Image to Image",
|
|
991
|
+
"description": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
|
|
1390
992
|
"mediaType": "image",
|
|
1391
993
|
"thumbnailVariant": "compareSlider",
|
|
1392
994
|
"mediaSubtype": "webp",
|
|
1393
|
-
"tags": [
|
|
1394
|
-
|
|
1395
|
-
"Image",
|
|
1396
|
-
"API"
|
|
1397
|
-
],
|
|
1398
|
-
"models": [
|
|
1399
|
-
"Stability AI"
|
|
1400
|
-
],
|
|
995
|
+
"tags": ["Image vers Image", "Image", "API"],
|
|
996
|
+
"models": ["Stability AI"],
|
|
1401
997
|
"date": "2025-03-01"
|
|
1402
998
|
},
|
|
1403
999
|
{
|
|
@@ -1406,14 +1002,8 @@
|
|
|
1406
1002
|
"description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
|
|
1407
1003
|
"mediaType": "image",
|
|
1408
1004
|
"mediaSubtype": "webp",
|
|
1409
|
-
"tags": [
|
|
1410
|
-
|
|
1411
|
-
"Image",
|
|
1412
|
-
"API"
|
|
1413
|
-
],
|
|
1414
|
-
"models": [
|
|
1415
|
-
"SD3.5"
|
|
1416
|
-
],
|
|
1005
|
+
"tags": ["Texte vers Image", "Image", "API"],
|
|
1006
|
+
"models": ["SD3.5"],
|
|
1417
1007
|
"date": "2025-03-01"
|
|
1418
1008
|
},
|
|
1419
1009
|
{
|
|
@@ -1423,154 +1013,98 @@
|
|
|
1423
1013
|
"mediaType": "image",
|
|
1424
1014
|
"thumbnailVariant": "compareSlider",
|
|
1425
1015
|
"mediaSubtype": "webp",
|
|
1426
|
-
"tags": [
|
|
1427
|
-
|
|
1428
|
-
"Image",
|
|
1429
|
-
"API"
|
|
1430
|
-
],
|
|
1431
|
-
"models": [
|
|
1432
|
-
"SD3.5"
|
|
1433
|
-
],
|
|
1016
|
+
"tags": ["Image vers Image", "Image", "API"],
|
|
1017
|
+
"models": ["SD3.5"],
|
|
1434
1018
|
"date": "2025-03-01"
|
|
1435
1019
|
},
|
|
1436
1020
|
{
|
|
1437
1021
|
"name": "api_ideogram_v3_t2i",
|
|
1438
|
-
"title": "Ideogram V3
|
|
1439
|
-
"description": "
|
|
1440
|
-
"mediaType": "image",
|
|
1441
|
-
"mediaSubtype": "webp",
|
|
1442
|
-
"tags": [
|
|
1443
|
-
|
|
1444
|
-
"Image",
|
|
1445
|
-
"API",
|
|
1446
|
-
"Text Rendering"
|
|
1447
|
-
],
|
|
1448
|
-
"models": [
|
|
1449
|
-
"Ideogram V3"
|
|
1450
|
-
],
|
|
1022
|
+
"title": "Ideogram V3: Text to Image",
|
|
1023
|
+
"description": "Générer des images de qualité professionnelle avec un excellent alignement des prompts, du photoréalisme et un rendu de texte en utilisant Ideogram V3.",
|
|
1024
|
+
"mediaType": "image",
|
|
1025
|
+
"mediaSubtype": "webp",
|
|
1026
|
+
"tags": ["Texte vers Image", "Image", "API", "Text Rendering"],
|
|
1027
|
+
"models": ["Ideogram V3"],
|
|
1451
1028
|
"date": "2025-03-01"
|
|
1452
1029
|
},
|
|
1453
1030
|
{
|
|
1454
1031
|
"name": "api_openai_image_1_t2i",
|
|
1455
|
-
"title": "OpenAI
|
|
1456
|
-
"description": "
|
|
1457
|
-
"mediaType": "image",
|
|
1458
|
-
"mediaSubtype": "webp",
|
|
1459
|
-
"tags": [
|
|
1460
|
-
|
|
1461
|
-
"Image",
|
|
1462
|
-
"API"
|
|
1463
|
-
],
|
|
1464
|
-
"models": [
|
|
1465
|
-
"GPT-Image-1"
|
|
1466
|
-
],
|
|
1032
|
+
"title": "OpenAI: GPT-Image-1 Text to Image",
|
|
1033
|
+
"description": "Générer des images à partir de prompts textuels en utilisant l'API OpenAI GPT Image 1.",
|
|
1034
|
+
"mediaType": "image",
|
|
1035
|
+
"mediaSubtype": "webp",
|
|
1036
|
+
"tags": ["Texte vers Image", "Image", "API"],
|
|
1037
|
+
"models": ["GPT-Image-1"],
|
|
1467
1038
|
"date": "2025-03-01",
|
|
1468
1039
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
|
|
1469
1040
|
},
|
|
1470
1041
|
{
|
|
1471
1042
|
"name": "api_openai_image_1_i2i",
|
|
1472
|
-
"title": "OpenAI
|
|
1473
|
-
"description": "
|
|
1043
|
+
"title": "OpenAI: GPT-Image-1 Image to Image",
|
|
1044
|
+
"description": "Générer des images à partir d'images d'entrée en utilisant l'API OpenAI GPT Image 1.",
|
|
1474
1045
|
"mediaType": "image",
|
|
1475
1046
|
"mediaSubtype": "webp",
|
|
1476
1047
|
"thumbnailVariant": "compareSlider",
|
|
1477
|
-
"tags": [
|
|
1478
|
-
|
|
1479
|
-
"Image",
|
|
1480
|
-
"API"
|
|
1481
|
-
],
|
|
1482
|
-
"models": [
|
|
1483
|
-
"GPT-Image-1"
|
|
1484
|
-
],
|
|
1048
|
+
"tags": ["Image vers Image", "Image", "API"],
|
|
1049
|
+
"models": ["GPT-Image-1"],
|
|
1485
1050
|
"date": "2025-03-01",
|
|
1486
1051
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
|
|
1487
1052
|
},
|
|
1488
1053
|
{
|
|
1489
1054
|
"name": "api_openai_image_1_inpaint",
|
|
1490
|
-
"title": "OpenAI
|
|
1491
|
-
"description": "
|
|
1055
|
+
"title": "OpenAI: GPT-Image-1 Inpaint",
|
|
1056
|
+
"description": "Edit images using inpainting with OpenAI GPT Image 1 API.",
|
|
1492
1057
|
"mediaType": "image",
|
|
1493
1058
|
"mediaSubtype": "webp",
|
|
1494
1059
|
"thumbnailVariant": "compareSlider",
|
|
1495
|
-
"tags": [
|
|
1496
|
-
|
|
1497
|
-
"Image",
|
|
1498
|
-
"API"
|
|
1499
|
-
],
|
|
1500
|
-
"models": [
|
|
1501
|
-
"GPT-Image-1"
|
|
1502
|
-
],
|
|
1060
|
+
"tags": ["Inpainting", "Image", "API"],
|
|
1061
|
+
"models": ["GPT-Image-1"],
|
|
1503
1062
|
"date": "2025-03-01",
|
|
1504
1063
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
|
|
1505
1064
|
},
|
|
1506
1065
|
{
|
|
1507
1066
|
"name": "api_openai_image_1_multi_inputs",
|
|
1508
|
-
"title": "OpenAI
|
|
1509
|
-
"description": "
|
|
1067
|
+
"title": "OpenAI: GPT-Image-1 Multi Inputs",
|
|
1068
|
+
"description": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
|
|
1510
1069
|
"mediaType": "image",
|
|
1511
1070
|
"mediaSubtype": "webp",
|
|
1512
1071
|
"thumbnailVariant": "compareSlider",
|
|
1513
|
-
"tags": [
|
|
1514
|
-
|
|
1515
|
-
"Image",
|
|
1516
|
-
"API",
|
|
1517
|
-
"Multi Input"
|
|
1518
|
-
],
|
|
1519
|
-
"models": [
|
|
1520
|
-
"GPT-Image-1"
|
|
1521
|
-
],
|
|
1072
|
+
"tags": ["Texte vers Image", "Image", "API", "Multi Input"],
|
|
1073
|
+
"models": ["GPT-Image-1"],
|
|
1522
1074
|
"date": "2025-03-01",
|
|
1523
1075
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
|
|
1524
1076
|
},
|
|
1525
1077
|
{
|
|
1526
1078
|
"name": "api_openai_dall_e_2_t2i",
|
|
1527
|
-
"title": "OpenAI
|
|
1528
|
-
"description": "
|
|
1529
|
-
"mediaType": "image",
|
|
1530
|
-
"mediaSubtype": "webp",
|
|
1531
|
-
"tags": [
|
|
1532
|
-
|
|
1533
|
-
"Image",
|
|
1534
|
-
"API"
|
|
1535
|
-
],
|
|
1536
|
-
"models": [
|
|
1537
|
-
"Dall-E 2"
|
|
1538
|
-
],
|
|
1079
|
+
"title": "OpenAI: Dall-E 2 Text to Image",
|
|
1080
|
+
"description": "Generate images from text prompts using OpenAI Dall-E 2 API.",
|
|
1081
|
+
"mediaType": "image",
|
|
1082
|
+
"mediaSubtype": "webp",
|
|
1083
|
+
"tags": ["Texte vers Image", "Image", "API"],
|
|
1084
|
+
"models": ["Dall-E 2"],
|
|
1539
1085
|
"date": "2025-03-01",
|
|
1540
1086
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
|
|
1541
1087
|
},
|
|
1542
1088
|
{
|
|
1543
1089
|
"name": "api_openai_dall_e_2_inpaint",
|
|
1544
|
-
"title": "OpenAI
|
|
1545
|
-
"description": "
|
|
1090
|
+
"title": "OpenAI: Dall-E 2 Inpaint",
|
|
1091
|
+
"description": "Edit images using inpainting with OpenAI Dall-E 2 API.",
|
|
1546
1092
|
"mediaType": "image",
|
|
1547
1093
|
"mediaSubtype": "webp",
|
|
1548
1094
|
"thumbnailVariant": "compareSlider",
|
|
1549
|
-
"tags": [
|
|
1550
|
-
|
|
1551
|
-
"Image",
|
|
1552
|
-
"API"
|
|
1553
|
-
],
|
|
1554
|
-
"models": [
|
|
1555
|
-
"Dall-E 2"
|
|
1556
|
-
],
|
|
1095
|
+
"tags": ["Inpainting", "Image", "API"],
|
|
1096
|
+
"models": ["Dall-E 2"],
|
|
1557
1097
|
"date": "2025-03-01",
|
|
1558
1098
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
|
|
1559
1099
|
},
|
|
1560
1100
|
{
|
|
1561
1101
|
"name": "api_openai_dall_e_3_t2i",
|
|
1562
|
-
"title": "OpenAI
|
|
1563
|
-
"description": "
|
|
1564
|
-
"mediaType": "image",
|
|
1565
|
-
"mediaSubtype": "webp",
|
|
1566
|
-
"tags": [
|
|
1567
|
-
|
|
1568
|
-
"Image",
|
|
1569
|
-
"API"
|
|
1570
|
-
],
|
|
1571
|
-
"models": [
|
|
1572
|
-
"Dall-E 3"
|
|
1573
|
-
],
|
|
1102
|
+
"title": "OpenAI: Dall-E 3 Text to Image",
|
|
1103
|
+
"description": "Générer des images à partir de prompts textuels en utilisant l'API OpenAI Dall-E 3.",
|
|
1104
|
+
"mediaType": "image",
|
|
1105
|
+
"mediaSubtype": "webp",
|
|
1106
|
+
"tags": ["Texte vers Image", "Image", "API"],
|
|
1107
|
+
"models": ["Dall-E 3"],
|
|
1574
1108
|
"date": "2025-03-01",
|
|
1575
1109
|
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3"
|
|
1576
1110
|
}
|
|
@@ -1579,126 +1113,127 @@
|
|
|
1579
1113
|
{
|
|
1580
1114
|
"moduleName": "default",
|
|
1581
1115
|
"category": "TOOLS & BUILDING",
|
|
1582
|
-
"title": "API
|
|
1116
|
+
"title": "Video API",
|
|
1583
1117
|
"type": "video",
|
|
1584
1118
|
"templates": [
|
|
1585
1119
|
{
|
|
1586
1120
|
"name": "api_kling_i2v",
|
|
1587
|
-
"title": "Kling
|
|
1588
|
-
"description": "
|
|
1589
|
-
"mediaType": "image",
|
|
1590
|
-
"mediaSubtype": "webp",
|
|
1591
|
-
"tags": [
|
|
1592
|
-
|
|
1593
|
-
"Video",
|
|
1594
|
-
"API"
|
|
1595
|
-
],
|
|
1596
|
-
"models": [
|
|
1597
|
-
"Kling"
|
|
1598
|
-
],
|
|
1121
|
+
"title": "Kling: Image to Video",
|
|
1122
|
+
"description": "Générer des vidéos avec une excellente adhérence aux prompts pour les actions, expressions et mouvements de caméra en utilisant Kling.",
|
|
1123
|
+
"mediaType": "image",
|
|
1124
|
+
"mediaSubtype": "webp",
|
|
1125
|
+
"tags": ["Image vers Vidéo", "Vidéo", "API"],
|
|
1126
|
+
"models": ["Kling"],
|
|
1599
1127
|
"date": "2025-03-01",
|
|
1600
1128
|
"tutorialUrl": ""
|
|
1601
1129
|
},
|
|
1602
1130
|
{
|
|
1603
1131
|
"name": "api_kling_effects",
|
|
1604
|
-
"title": "Kling
|
|
1605
|
-
"description": "
|
|
1606
|
-
"mediaType": "image",
|
|
1607
|
-
"mediaSubtype": "webp",
|
|
1608
|
-
"tags": [
|
|
1609
|
-
|
|
1610
|
-
"Video",
|
|
1611
|
-
"API"
|
|
1612
|
-
],
|
|
1613
|
-
"models": [
|
|
1614
|
-
"Kling"
|
|
1615
|
-
],
|
|
1132
|
+
"title": "Kling: Video Effects",
|
|
1133
|
+
"description": "Générer des vidéos dynamiques en appliquant des effets visuels aux images en utilisant Kling.",
|
|
1134
|
+
"mediaType": "image",
|
|
1135
|
+
"mediaSubtype": "webp",
|
|
1136
|
+
"tags": ["Effets Vidéo", "Vidéo", "API"],
|
|
1137
|
+
"models": ["Kling"],
|
|
1616
1138
|
"date": "2025-03-01",
|
|
1617
1139
|
"tutorialUrl": ""
|
|
1618
1140
|
},
|
|
1619
1141
|
{
|
|
1620
1142
|
"name": "api_kling_flf",
|
|
1621
|
-
"title": "Kling
|
|
1622
|
-
"description": "
|
|
1623
|
-
"mediaType": "image",
|
|
1624
|
-
"mediaSubtype": "webp",
|
|
1625
|
-
"tags": [
|
|
1626
|
-
|
|
1627
|
-
"Video",
|
|
1628
|
-
"API",
|
|
1629
|
-
"Frame Control"
|
|
1630
|
-
],
|
|
1631
|
-
"models": [
|
|
1632
|
-
"Kling"
|
|
1633
|
-
],
|
|
1143
|
+
"title": "Kling: FLF2V",
|
|
1144
|
+
"description": "Générer des vidéos en contrôlant les première et dernière images.",
|
|
1145
|
+
"mediaType": "image",
|
|
1146
|
+
"mediaSubtype": "webp",
|
|
1147
|
+
"tags": ["Génération de Vidéo", "Vidéo", "API", "Contrôle de Cadre"],
|
|
1148
|
+
"models": ["Kling"],
|
|
1634
1149
|
"date": "2025-03-01",
|
|
1635
1150
|
"tutorialUrl": ""
|
|
1636
1151
|
},
|
|
1152
|
+
{
|
|
1153
|
+
"name": "api_vidu_text_to_video",
|
|
1154
|
+
"title": "Vidu: Text to Video",
|
|
1155
|
+
"description": "Generate high-quality 1080p videos from text prompts with adjustable movement amplitude and duration control using Vidu's advanced AI model.",
|
|
1156
|
+
"mediaType": "image",
|
|
1157
|
+
"mediaSubtype": "webp",
|
|
1158
|
+
"tags": ["Texte vers Vidéo", "Vidéo", "API"],
|
|
1159
|
+
"models": ["Vidu"],
|
|
1160
|
+
"date": "2025-08-23",
|
|
1161
|
+
"tutorialUrl": ""
|
|
1162
|
+
},
|
|
1163
|
+
{
|
|
1164
|
+
"name": "api_vidu_image_to_video",
|
|
1165
|
+
"title": "Vidu: Image to Video",
|
|
1166
|
+
"description": "Transform static images into dynamic 1080p videos with precise motion control and customizable movement amplitude using Vidu.",
|
|
1167
|
+
"mediaType": "image",
|
|
1168
|
+
"mediaSubtype": "webp",
|
|
1169
|
+
"tags": ["Image vers Vidéo", "Vidéo", "API"],
|
|
1170
|
+
"models": ["Vidu"],
|
|
1171
|
+
"date": "2025-08-23",
|
|
1172
|
+
"tutorialUrl": ""
|
|
1173
|
+
},
|
|
1174
|
+
{
|
|
1175
|
+
"name": "api_vidu_reference_to_video",
|
|
1176
|
+
"title": "Vidu: Reference to Video",
|
|
1177
|
+
"description": "Generate videos with consistent subjects using multiple reference images (up to 7) for character and style continuity across the video sequence.",
|
|
1178
|
+
"mediaType": "image",
|
|
1179
|
+
"mediaSubtype": "webp",
|
|
1180
|
+
"tags": ["Référence vers Vidéo", "Vidéo", "API"],
|
|
1181
|
+
"models": ["Vidu"],
|
|
1182
|
+
"date": "2025-08-23",
|
|
1183
|
+
"tutorialUrl": ""
|
|
1184
|
+
},
|
|
1185
|
+
{
|
|
1186
|
+
"name": "api_vidu_start_end_to_video",
|
|
1187
|
+
"title": "Vidu: Start End to Video",
|
|
1188
|
+
"description": "Create smooth video transitions between defined start and end frames with natural motion interpolation and consistent visual quality.",
|
|
1189
|
+
"mediaType": "image",
|
|
1190
|
+
"mediaSubtype": "webp",
|
|
1191
|
+
"tags": ["FLF2V", "Vidéo", "API"],
|
|
1192
|
+
"models": ["Vidu"],
|
|
1193
|
+
"date": "2025-08-23",
|
|
1194
|
+
"tutorialUrl": ""
|
|
1195
|
+
},
|
|
1637
1196
|
{
|
|
1638
1197
|
"name": "api_luma_i2v",
|
|
1639
|
-
"title": "Luma
|
|
1640
|
-
"description": "
|
|
1641
|
-
"mediaType": "image",
|
|
1642
|
-
"mediaSubtype": "webp",
|
|
1643
|
-
"tags": [
|
|
1644
|
-
|
|
1645
|
-
"Video",
|
|
1646
|
-
"API"
|
|
1647
|
-
],
|
|
1648
|
-
"models": [
|
|
1649
|
-
"Luma"
|
|
1650
|
-
],
|
|
1198
|
+
"title": "Luma: Image to Video",
|
|
1199
|
+
"description": "Take static images and instantly create magical high quality animations.",
|
|
1200
|
+
"mediaType": "image",
|
|
1201
|
+
"mediaSubtype": "webp",
|
|
1202
|
+
"tags": ["Image vers Vidéo", "Vidéo", "API"],
|
|
1203
|
+
"models": ["Luma"],
|
|
1651
1204
|
"date": "2025-03-01",
|
|
1652
1205
|
"tutorialUrl": ""
|
|
1653
1206
|
},
|
|
1654
1207
|
{
|
|
1655
1208
|
"name": "api_luma_t2v",
|
|
1656
|
-
"title": "Luma
|
|
1657
|
-
"description": "
|
|
1658
|
-
"mediaType": "image",
|
|
1659
|
-
"mediaSubtype": "webp",
|
|
1660
|
-
"tags": [
|
|
1661
|
-
|
|
1662
|
-
"Video",
|
|
1663
|
-
"API"
|
|
1664
|
-
],
|
|
1665
|
-
"models": [
|
|
1666
|
-
"Luma"
|
|
1667
|
-
],
|
|
1209
|
+
"title": "Luma: Text to Video",
|
|
1210
|
+
"description": "High-quality videos can be generated using simple prompts.",
|
|
1211
|
+
"mediaType": "image",
|
|
1212
|
+
"mediaSubtype": "webp",
|
|
1213
|
+
"tags": ["Texte vers Vidéo", "Vidéo", "API"],
|
|
1214
|
+
"models": ["Luma"],
|
|
1668
1215
|
"date": "2025-03-01",
|
|
1669
1216
|
"tutorialUrl": ""
|
|
1670
1217
|
},
|
|
1671
1218
|
{
|
|
1672
1219
|
"name": "api_moonvalley_text_to_video",
|
|
1673
|
-
"title": "Moonvalley
|
|
1674
|
-
"description": "
|
|
1675
|
-
"mediaType": "image",
|
|
1676
|
-
"mediaSubtype": "webp",
|
|
1677
|
-
"tags": [
|
|
1678
|
-
|
|
1679
|
-
"Video",
|
|
1680
|
-
"API"
|
|
1681
|
-
],
|
|
1682
|
-
"models": [
|
|
1683
|
-
"Moonvalley"
|
|
1684
|
-
],
|
|
1220
|
+
"title": "Moonvalley: Text to Video",
|
|
1221
|
+
"description": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.",
|
|
1222
|
+
"mediaType": "image",
|
|
1223
|
+
"mediaSubtype": "webp",
|
|
1224
|
+
"tags": ["Texte vers Vidéo", "Vidéo", "API"],
|
|
1225
|
+
"models": ["Moonvalley"],
|
|
1685
1226
|
"date": "2025-03-01",
|
|
1686
1227
|
"tutorialUrl": ""
|
|
1687
1228
|
},
|
|
1688
1229
|
{
|
|
1689
1230
|
"name": "api_moonvalley_image_to_video",
|
|
1690
|
-
"title": "Moonvalley
|
|
1691
|
-
"description": "
|
|
1692
|
-
"mediaType": "image",
|
|
1693
|
-
"mediaSubtype": "webp",
|
|
1694
|
-
"tags": [
|
|
1695
|
-
|
|
1696
|
-
"Video",
|
|
1697
|
-
"API"
|
|
1698
|
-
],
|
|
1699
|
-
"models": [
|
|
1700
|
-
"Moonvalley"
|
|
1701
|
-
],
|
|
1231
|
+
"title": "Moonvalley: Image to Video",
|
|
1232
|
+
"description": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.",
|
|
1233
|
+
"mediaType": "image",
|
|
1234
|
+
"mediaSubtype": "webp",
|
|
1235
|
+
"tags": ["Image vers Vidéo", "Vidéo", "API"],
|
|
1236
|
+
"models": ["Moonvalley"],
|
|
1702
1237
|
"date": "2025-03-01",
|
|
1703
1238
|
"tutorialUrl": ""
|
|
1704
1239
|
},
|
|
@@ -1709,15 +1244,8 @@
|
|
|
1709
1244
|
"mediaType": "image",
|
|
1710
1245
|
"thumbnailVariant": "hoverDissolve",
|
|
1711
1246
|
"mediaSubtype": "webp",
|
|
1712
|
-
"tags": [
|
|
1713
|
-
|
|
1714
|
-
"Video",
|
|
1715
|
-
"API",
|
|
1716
|
-
"Motion Transfer"
|
|
1717
|
-
],
|
|
1718
|
-
"models": [
|
|
1719
|
-
"Moonvalley"
|
|
1720
|
-
],
|
|
1247
|
+
"tags": ["Vidéo vers Vidéo", "Vidéo", "API", "Transfert de Mouvement"],
|
|
1248
|
+
"models": ["Moonvalley"],
|
|
1721
1249
|
"date": "2025-03-01",
|
|
1722
1250
|
"tutorialUrl": ""
|
|
1723
1251
|
},
|
|
@@ -1728,205 +1256,151 @@
|
|
|
1728
1256
|
"mediaType": "image",
|
|
1729
1257
|
"thumbnailVariant": "hoverDissolve",
|
|
1730
1258
|
"mediaSubtype": "webp",
|
|
1731
|
-
"tags": [
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
"
|
|
1738
|
-
|
|
1739
|
-
|
|
1259
|
+
"tags": ["Vidéo vers Vidéo", "Vidéo", "API", "Contrôle de Pose"],
|
|
1260
|
+
"models": ["Moonvalley"],
|
|
1261
|
+
"date": "2025-03-01",
|
|
1262
|
+
"tutorialUrl": ""
|
|
1263
|
+
},
|
|
1264
|
+
{
|
|
1265
|
+
"name": "api_hailuo_minimax_video",
|
|
1266
|
+
"title": "MiniMax: Video",
|
|
1267
|
+
"description": "Generate high-quality videos from text prompts with optional first-frame control using MiniMax Hailuo-02 model. Supports multiple resolutions (768P/1080P) and durations (6/10s) with intelligent prompt optimization.",
|
|
1268
|
+
"mediaType": "image",
|
|
1269
|
+
"mediaSubtype": "webp",
|
|
1270
|
+
"tags": ["Texte vers Vidéo", "Vidéo", "API"],
|
|
1271
|
+
"models": ["MiniMax"],
|
|
1740
1272
|
"date": "2025-03-01",
|
|
1741
1273
|
"tutorialUrl": ""
|
|
1742
1274
|
},
|
|
1743
1275
|
{
|
|
1744
1276
|
"name": "api_hailuo_minimax_t2v",
|
|
1745
|
-
"title": "MiniMax
|
|
1746
|
-
"description": "
|
|
1747
|
-
"mediaType": "image",
|
|
1748
|
-
"mediaSubtype": "webp",
|
|
1749
|
-
"tags": [
|
|
1750
|
-
|
|
1751
|
-
"Video",
|
|
1752
|
-
"API"
|
|
1753
|
-
],
|
|
1754
|
-
"models": [
|
|
1755
|
-
"MiniMax"
|
|
1756
|
-
],
|
|
1277
|
+
"title": "MiniMax: Text to Video",
|
|
1278
|
+
"description": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
|
|
1279
|
+
"mediaType": "image",
|
|
1280
|
+
"mediaSubtype": "webp",
|
|
1281
|
+
"tags": ["Texte vers Vidéo", "Vidéo", "API"],
|
|
1282
|
+
"models": ["MiniMax"],
|
|
1757
1283
|
"date": "2025-03-01",
|
|
1758
1284
|
"tutorialUrl": ""
|
|
1759
1285
|
},
|
|
1760
1286
|
{
|
|
1761
1287
|
"name": "api_hailuo_minimax_i2v",
|
|
1762
|
-
"title": "MiniMax
|
|
1763
|
-
"description": "
|
|
1764
|
-
"mediaType": "image",
|
|
1765
|
-
"mediaSubtype": "webp",
|
|
1766
|
-
"tags": [
|
|
1767
|
-
|
|
1768
|
-
"Video",
|
|
1769
|
-
"API"
|
|
1770
|
-
],
|
|
1771
|
-
"models": [
|
|
1772
|
-
"MiniMax"
|
|
1773
|
-
],
|
|
1288
|
+
"title": "MiniMax: Image to Video",
|
|
1289
|
+
"description": "Generate refined videos from images and text with CGI integration using MiniMax.",
|
|
1290
|
+
"mediaType": "image",
|
|
1291
|
+
"mediaSubtype": "webp",
|
|
1292
|
+
"tags": ["Image vers Vidéo", "Vidéo", "API"],
|
|
1293
|
+
"models": ["MiniMax"],
|
|
1774
1294
|
"date": "2025-03-01",
|
|
1775
1295
|
"tutorialUrl": ""
|
|
1776
1296
|
},
|
|
1777
1297
|
{
|
|
1778
1298
|
"name": "api_pixverse_i2v",
|
|
1779
|
-
"title": "PixVerse
|
|
1780
|
-
"description": "
|
|
1781
|
-
"mediaType": "image",
|
|
1782
|
-
"mediaSubtype": "webp",
|
|
1783
|
-
"tags": [
|
|
1784
|
-
|
|
1785
|
-
"Video",
|
|
1786
|
-
"API"
|
|
1787
|
-
],
|
|
1788
|
-
"models": [
|
|
1789
|
-
"PixVerse"
|
|
1790
|
-
],
|
|
1299
|
+
"title": "PixVerse: Image to Video",
|
|
1300
|
+
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
|
1301
|
+
"mediaType": "image",
|
|
1302
|
+
"mediaSubtype": "webp",
|
|
1303
|
+
"tags": ["Image vers Vidéo", "Vidéo", "API"],
|
|
1304
|
+
"models": ["PixVerse"],
|
|
1791
1305
|
"date": "2025-03-01",
|
|
1792
1306
|
"tutorialUrl": ""
|
|
1793
1307
|
},
|
|
1794
1308
|
{
|
|
1795
1309
|
"name": "api_pixverse_template_i2v",
|
|
1796
|
-
"title": "PixVerse Templates
|
|
1797
|
-
"description": "
|
|
1798
|
-
"mediaType": "image",
|
|
1799
|
-
"mediaSubtype": "webp",
|
|
1800
|
-
"tags": [
|
|
1801
|
-
|
|
1802
|
-
"Video",
|
|
1803
|
-
"API",
|
|
1804
|
-
"Templates"
|
|
1805
|
-
],
|
|
1806
|
-
"models": [
|
|
1807
|
-
"PixVerse"
|
|
1808
|
-
],
|
|
1310
|
+
"title": "PixVerse Templates: Image to Video",
|
|
1311
|
+
"description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
|
|
1312
|
+
"mediaType": "image",
|
|
1313
|
+
"mediaSubtype": "webp",
|
|
1314
|
+
"tags": ["Image vers Vidéo", "Vidéo", "API", "Modèles"],
|
|
1315
|
+
"models": ["PixVerse"],
|
|
1809
1316
|
"date": "2025-03-01",
|
|
1810
1317
|
"tutorialUrl": ""
|
|
1811
1318
|
},
|
|
1812
1319
|
{
|
|
1813
1320
|
"name": "api_pixverse_t2v",
|
|
1814
|
-
"title": "PixVerse
|
|
1815
|
-
"description": "
|
|
1816
|
-
"mediaType": "image",
|
|
1817
|
-
"mediaSubtype": "webp",
|
|
1818
|
-
"tags": [
|
|
1819
|
-
|
|
1820
|
-
"Video",
|
|
1821
|
-
"API"
|
|
1822
|
-
],
|
|
1823
|
-
"models": [
|
|
1824
|
-
"PixVerse"
|
|
1825
|
-
],
|
|
1321
|
+
"title": "PixVerse: Text to Video",
|
|
1322
|
+
"description": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
|
|
1323
|
+
"mediaType": "image",
|
|
1324
|
+
"mediaSubtype": "webp",
|
|
1325
|
+
"tags": ["Texte vers Vidéo", "Vidéo", "API"],
|
|
1326
|
+
"models": ["PixVerse"],
|
|
1826
1327
|
"date": "2025-03-01",
|
|
1827
1328
|
"tutorialUrl": ""
|
|
1828
1329
|
},
|
|
1829
1330
|
{
|
|
1830
1331
|
"name": "api_runway_gen3a_turbo_image_to_video",
|
|
1831
|
-
"title": "Runway
|
|
1832
|
-
"description": "
|
|
1833
|
-
"mediaType": "image",
|
|
1834
|
-
"mediaSubtype": "webp",
|
|
1835
|
-
"tags": [
|
|
1836
|
-
|
|
1837
|
-
"Video",
|
|
1838
|
-
"API"
|
|
1839
|
-
],
|
|
1840
|
-
"models": [
|
|
1841
|
-
"Runway Gen3a Turbo"
|
|
1842
|
-
],
|
|
1332
|
+
"title": "Runway: Gen3a Turbo Image to Video",
|
|
1333
|
+
"description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
|
|
1334
|
+
"mediaType": "image",
|
|
1335
|
+
"mediaSubtype": "webp",
|
|
1336
|
+
"tags": ["Image vers Vidéo", "Vidéo", "API"],
|
|
1337
|
+
"models": ["Runway Gen3a Turbo"],
|
|
1843
1338
|
"date": "2025-03-01",
|
|
1844
1339
|
"tutorialUrl": ""
|
|
1845
1340
|
},
|
|
1846
1341
|
{
|
|
1847
1342
|
"name": "api_runway_gen4_turo_image_to_video",
|
|
1848
|
-
"title": "Runway
|
|
1849
|
-
"description": "
|
|
1850
|
-
"mediaType": "image",
|
|
1851
|
-
"mediaSubtype": "webp",
|
|
1852
|
-
"tags": [
|
|
1853
|
-
|
|
1854
|
-
"Video",
|
|
1855
|
-
"API"
|
|
1856
|
-
],
|
|
1857
|
-
"models": [
|
|
1858
|
-
"Runway Gen4 Turbo"
|
|
1859
|
-
],
|
|
1343
|
+
"title": "Runway: Gen4 Turbo Image to Video",
|
|
1344
|
+
"description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
|
|
1345
|
+
"mediaType": "image",
|
|
1346
|
+
"mediaSubtype": "webp",
|
|
1347
|
+
"tags": ["Image vers Vidéo", "Vidéo", "API"],
|
|
1348
|
+
"models": ["Runway Gen4 Turbo"],
|
|
1860
1349
|
"date": "2025-03-01",
|
|
1861
1350
|
"tutorialUrl": ""
|
|
1862
1351
|
},
|
|
1863
1352
|
{
|
|
1864
1353
|
"name": "api_runway_first_last_frame",
|
|
1865
|
-
"title": "Runway
|
|
1866
|
-
"description": "
|
|
1867
|
-
"mediaType": "image",
|
|
1868
|
-
"mediaSubtype": "webp",
|
|
1869
|
-
"tags": [
|
|
1870
|
-
|
|
1871
|
-
"Video",
|
|
1872
|
-
"API",
|
|
1873
|
-
"Frame Control"
|
|
1874
|
-
],
|
|
1875
|
-
"models": [
|
|
1876
|
-
"Runway"
|
|
1877
|
-
],
|
|
1354
|
+
"title": "Runway: First Last Frame to Video",
|
|
1355
|
+
"description": "Generate smooth video transitions between two keyframes with Runway's precision.",
|
|
1356
|
+
"mediaType": "image",
|
|
1357
|
+
"mediaSubtype": "webp",
|
|
1358
|
+
"tags": ["Génération de Vidéo", "Vidéo", "API", "Contrôle de Cadre"],
|
|
1359
|
+
"models": ["Runway"],
|
|
1878
1360
|
"date": "2025-03-01",
|
|
1879
1361
|
"tutorialUrl": ""
|
|
1880
1362
|
},
|
|
1881
1363
|
{
|
|
1882
1364
|
"name": "api_pika_i2v",
|
|
1883
|
-
"title": "Pika
|
|
1884
|
-
"description": "
|
|
1885
|
-
"mediaType": "image",
|
|
1886
|
-
"mediaSubtype": "webp",
|
|
1887
|
-
"tags": [
|
|
1888
|
-
|
|
1889
|
-
"Video",
|
|
1890
|
-
"API"
|
|
1891
|
-
],
|
|
1892
|
-
"models": [
|
|
1893
|
-
"Pika"
|
|
1894
|
-
],
|
|
1365
|
+
"title": "Pika: Image to Video",
|
|
1366
|
+
"description": "Generate smooth animated videos from single static images using Pika AI.",
|
|
1367
|
+
"mediaType": "image",
|
|
1368
|
+
"mediaSubtype": "webp",
|
|
1369
|
+
"tags": ["Image vers Vidéo", "Vidéo", "API"],
|
|
1370
|
+
"models": ["Pika"],
|
|
1895
1371
|
"date": "2025-03-01",
|
|
1896
1372
|
"tutorialUrl": ""
|
|
1897
1373
|
},
|
|
1898
1374
|
{
|
|
1899
1375
|
"name": "api_pika_scene",
|
|
1900
|
-
"title": "Pika
|
|
1901
|
-
"description": "
|
|
1902
|
-
"mediaType": "image",
|
|
1903
|
-
"mediaSubtype": "webp",
|
|
1904
|
-
"tags": [
|
|
1905
|
-
|
|
1906
|
-
"Video",
|
|
1907
|
-
"API",
|
|
1908
|
-
"Multi Image"
|
|
1909
|
-
],
|
|
1910
|
-
"models": [
|
|
1911
|
-
"Pika Scenes"
|
|
1912
|
-
],
|
|
1376
|
+
"title": "Pika Scenes: Images to Video",
|
|
1377
|
+
"description": "Generate videos that incorporate multiple input images using Pika Scenes.",
|
|
1378
|
+
"mediaType": "image",
|
|
1379
|
+
"mediaSubtype": "webp",
|
|
1380
|
+
"tags": ["Image vers Vidéo", "Vidéo", "API", "Multi-Image"],
|
|
1381
|
+
"models": ["Pika Scenes"],
|
|
1913
1382
|
"date": "2025-03-01",
|
|
1914
1383
|
"tutorialUrl": ""
|
|
1915
1384
|
},
|
|
1916
1385
|
{
|
|
1917
1386
|
"name": "api_veo2_i2v",
|
|
1918
|
-
"title": "Veo2
|
|
1919
|
-
"description": "
|
|
1920
|
-
"mediaType": "image",
|
|
1921
|
-
"mediaSubtype": "webp",
|
|
1922
|
-
"tags": [
|
|
1923
|
-
|
|
1924
|
-
|
|
1925
|
-
|
|
1926
|
-
|
|
1927
|
-
|
|
1928
|
-
|
|
1929
|
-
|
|
1387
|
+
"title": "Veo2: Image to Video",
|
|
1388
|
+
"description": "Generate videos from images using Google Veo2 API.",
|
|
1389
|
+
"mediaType": "image",
|
|
1390
|
+
"mediaSubtype": "webp",
|
|
1391
|
+
"tags": ["Image vers Vidéo", "Vidéo", "API"],
|
|
1392
|
+
"models": ["Veo2"],
|
|
1393
|
+
"date": "2025-03-01",
|
|
1394
|
+
"tutorialUrl": ""
|
|
1395
|
+
},
|
|
1396
|
+
{
|
|
1397
|
+
"name": "api_veo3",
|
|
1398
|
+
"title": "Veo3: Image to Video",
|
|
1399
|
+
"description": "Generate high-quality 8-second videos from text prompts or images using Google's advanced Veo 3 API. Features audio generation, prompt enhancement, and dual model options for speed or quality.",
|
|
1400
|
+
"mediaType": "image",
|
|
1401
|
+
"mediaSubtype": "webp",
|
|
1402
|
+
"tags": ["Image vers Vidéo", "Texte vers Vidéo", "API"],
|
|
1403
|
+
"models": ["Veo3"],
|
|
1930
1404
|
"date": "2025-03-01",
|
|
1931
1405
|
"tutorialUrl": ""
|
|
1932
1406
|
}
|
|
@@ -1940,90 +1414,60 @@
|
|
|
1940
1414
|
"templates": [
|
|
1941
1415
|
{
|
|
1942
1416
|
"name": "api_rodin_image_to_model",
|
|
1943
|
-
"title": "Rodin
|
|
1944
|
-
"description": "
|
|
1417
|
+
"title": "Rodin: Image to Model",
|
|
1418
|
+
"description": "Generate detailed 3D models from single photos using Rodin AI.",
|
|
1945
1419
|
"mediaType": "image",
|
|
1946
1420
|
"thumbnailVariant": "compareSlider",
|
|
1947
1421
|
"mediaSubtype": "webp",
|
|
1948
|
-
"tags": [
|
|
1949
|
-
|
|
1950
|
-
"3D",
|
|
1951
|
-
"API"
|
|
1952
|
-
],
|
|
1953
|
-
"models": [
|
|
1954
|
-
"Rodin"
|
|
1955
|
-
],
|
|
1422
|
+
"tags": ["Image vers Modèle", "3D", "API"],
|
|
1423
|
+
"models": ["Rodin"],
|
|
1956
1424
|
"date": "2025-03-01",
|
|
1957
1425
|
"tutorialUrl": ""
|
|
1958
1426
|
},
|
|
1959
1427
|
{
|
|
1960
1428
|
"name": "api_rodin_multiview_to_model",
|
|
1961
|
-
"title": "Rodin
|
|
1962
|
-
"description": "
|
|
1429
|
+
"title": "Rodin: Multiview to Model",
|
|
1430
|
+
"description": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.",
|
|
1963
1431
|
"mediaType": "image",
|
|
1964
1432
|
"thumbnailVariant": "compareSlider",
|
|
1965
1433
|
"mediaSubtype": "webp",
|
|
1966
|
-
"tags": [
|
|
1967
|
-
|
|
1968
|
-
"3D",
|
|
1969
|
-
"API"
|
|
1970
|
-
],
|
|
1971
|
-
"models": [
|
|
1972
|
-
"Rodin"
|
|
1973
|
-
],
|
|
1434
|
+
"tags": ["Multivue vers Modèle", "3D", "API"],
|
|
1435
|
+
"models": ["Rodin"],
|
|
1974
1436
|
"date": "2025-03-01",
|
|
1975
1437
|
"tutorialUrl": ""
|
|
1976
1438
|
},
|
|
1977
1439
|
{
|
|
1978
1440
|
"name": "api_tripo_text_to_model",
|
|
1979
|
-
"title": "Tripo
|
|
1980
|
-
"description": "
|
|
1981
|
-
"mediaType": "image",
|
|
1982
|
-
"mediaSubtype": "webp",
|
|
1983
|
-
"tags": [
|
|
1984
|
-
|
|
1985
|
-
"3D",
|
|
1986
|
-
"API"
|
|
1987
|
-
],
|
|
1988
|
-
"models": [
|
|
1989
|
-
"Tripo"
|
|
1990
|
-
],
|
|
1441
|
+
"title": "Tripo: Text to Model",
|
|
1442
|
+
"description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
|
|
1443
|
+
"mediaType": "image",
|
|
1444
|
+
"mediaSubtype": "webp",
|
|
1445
|
+
"tags": ["Texte vers Modèle", "3D", "API"],
|
|
1446
|
+
"models": ["Tripo"],
|
|
1991
1447
|
"date": "2025-03-01",
|
|
1992
1448
|
"tutorialUrl": ""
|
|
1993
1449
|
},
|
|
1994
1450
|
{
|
|
1995
1451
|
"name": "api_tripo_image_to_model",
|
|
1996
|
-
"title": "Tripo
|
|
1997
|
-
"description": "
|
|
1452
|
+
"title": "Tripo: Image to Model",
|
|
1453
|
+
"description": "Generate professional 3D assets from 2D images using Tripo engine.",
|
|
1998
1454
|
"mediaType": "image",
|
|
1999
1455
|
"thumbnailVariant": "compareSlider",
|
|
2000
1456
|
"mediaSubtype": "webp",
|
|
2001
|
-
"tags": [
|
|
2002
|
-
|
|
2003
|
-
"3D",
|
|
2004
|
-
"API"
|
|
2005
|
-
],
|
|
2006
|
-
"models": [
|
|
2007
|
-
"Tripo"
|
|
2008
|
-
],
|
|
1457
|
+
"tags": ["Image vers Modèle", "3D", "API"],
|
|
1458
|
+
"models": ["Tripo"],
|
|
2009
1459
|
"date": "2025-03-01",
|
|
2010
1460
|
"tutorialUrl": ""
|
|
2011
1461
|
},
|
|
2012
1462
|
{
|
|
2013
1463
|
"name": "api_tripo_multiview_to_model",
|
|
2014
|
-
"title": "Tripo
|
|
2015
|
-
"description": "
|
|
1464
|
+
"title": "Tripo: Multiview to Model",
|
|
1465
|
+
"description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
|
|
2016
1466
|
"mediaType": "image",
|
|
2017
1467
|
"thumbnailVariant": "compareSlider",
|
|
2018
1468
|
"mediaSubtype": "webp",
|
|
2019
|
-
"tags": [
|
|
2020
|
-
|
|
2021
|
-
"3D",
|
|
2022
|
-
"API"
|
|
2023
|
-
],
|
|
2024
|
-
"models": [
|
|
2025
|
-
"Tripo"
|
|
2026
|
-
],
|
|
1469
|
+
"tags": ["Multivue vers Modèle", "3D", "API"],
|
|
1470
|
+
"models": ["Tripo"],
|
|
2027
1471
|
"date": "2025-03-01",
|
|
2028
1472
|
"tutorialUrl": ""
|
|
2029
1473
|
}
|
|
@@ -2032,40 +1476,28 @@
|
|
|
2032
1476
|
{
|
|
2033
1477
|
"moduleName": "default",
|
|
2034
1478
|
"category": "TOOLS & BUILDING",
|
|
2035
|
-
"title": "API
|
|
1479
|
+
"title": "LLM API",
|
|
2036
1480
|
"type": "image",
|
|
2037
1481
|
"templates": [
|
|
2038
1482
|
{
|
|
2039
1483
|
"name": "api_openai_chat",
|
|
2040
|
-
"title": "OpenAI
|
|
2041
|
-
"description": "
|
|
2042
|
-
"mediaType": "image",
|
|
2043
|
-
"mediaSubtype": "webp",
|
|
2044
|
-
"tags": [
|
|
2045
|
-
|
|
2046
|
-
"LLM",
|
|
2047
|
-
"API"
|
|
2048
|
-
],
|
|
2049
|
-
"models": [
|
|
2050
|
-
"OpenAI"
|
|
2051
|
-
],
|
|
1484
|
+
"title": "OpenAI: Chat",
|
|
1485
|
+
"description": "Engage with OpenAI's advanced language models for intelligent conversations.",
|
|
1486
|
+
"mediaType": "image",
|
|
1487
|
+
"mediaSubtype": "webp",
|
|
1488
|
+
"tags": ["Chat", "LLM", "API"],
|
|
1489
|
+
"models": ["OpenAI"],
|
|
2052
1490
|
"date": "2025-03-01",
|
|
2053
1491
|
"tutorialUrl": ""
|
|
2054
1492
|
},
|
|
2055
1493
|
{
|
|
2056
1494
|
"name": "api_google_gemini",
|
|
2057
|
-
"title": "Google Gemini
|
|
2058
|
-
"description": "
|
|
2059
|
-
"mediaType": "image",
|
|
2060
|
-
"mediaSubtype": "webp",
|
|
2061
|
-
"tags": [
|
|
2062
|
-
|
|
2063
|
-
"LLM",
|
|
2064
|
-
"API"
|
|
2065
|
-
],
|
|
2066
|
-
"models": [
|
|
2067
|
-
"Google Gemini"
|
|
2068
|
-
],
|
|
1495
|
+
"title": "Google Gemini: Chat",
|
|
1496
|
+
"description": "Experience Google's multimodal AI with Gemini's reasoning capabilities.",
|
|
1497
|
+
"mediaType": "image",
|
|
1498
|
+
"mediaSubtype": "webp",
|
|
1499
|
+
"tags": ["Chat", "LLM", "API"],
|
|
1500
|
+
"models": ["Google Gemini"],
|
|
2069
1501
|
"date": "2025-03-01",
|
|
2070
1502
|
"tutorialUrl": ""
|
|
2071
1503
|
}
|
|
@@ -2074,23 +1506,18 @@
|
|
|
2074
1506
|
{
|
|
2075
1507
|
"moduleName": "default",
|
|
2076
1508
|
"category": "TOOLS & BUILDING",
|
|
2077
|
-
"title": "
|
|
1509
|
+
"title": "Upscaling",
|
|
2078
1510
|
"type": "image",
|
|
2079
1511
|
"templates": [
|
|
2080
1512
|
{
|
|
2081
1513
|
"name": "hiresfix_latent_workflow",
|
|
2082
|
-
"title": "
|
|
1514
|
+
"title": "Agrandissement",
|
|
2083
1515
|
"mediaType": "image",
|
|
2084
1516
|
"mediaSubtype": "webp",
|
|
2085
|
-
"description": "
|
|
1517
|
+
"description": "Agrandir les images en améliorant la qualité dans l'espace latent.",
|
|
2086
1518
|
"thumbnailVariant": "compareSlider",
|
|
2087
|
-
"tags": [
|
|
2088
|
-
|
|
2089
|
-
"Image"
|
|
2090
|
-
],
|
|
2091
|
-
"models": [
|
|
2092
|
-
"SD1.5"
|
|
2093
|
-
],
|
|
1519
|
+
"tags": ["Agrandissement", "Image"],
|
|
1520
|
+
"models": ["SD1.5"],
|
|
2094
1521
|
"date": "2025-03-01",
|
|
2095
1522
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/"
|
|
2096
1523
|
},
|
|
@@ -2099,49 +1526,34 @@
|
|
|
2099
1526
|
"title": "ESRGAN",
|
|
2100
1527
|
"mediaType": "image",
|
|
2101
1528
|
"mediaSubtype": "webp",
|
|
2102
|
-
"description": "
|
|
1529
|
+
"description": "Agrandir les images en utilisant les modèles ESRGAN pour améliorer la qualité.",
|
|
2103
1530
|
"thumbnailVariant": "compareSlider",
|
|
2104
|
-
"tags": [
|
|
2105
|
-
|
|
2106
|
-
"Image"
|
|
2107
|
-
],
|
|
2108
|
-
"models": [
|
|
2109
|
-
"SD1.5"
|
|
2110
|
-
],
|
|
1531
|
+
"tags": ["Agrandissement", "Image"],
|
|
1532
|
+
"models": ["SD1.5"],
|
|
2111
1533
|
"date": "2025-03-01",
|
|
2112
1534
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/"
|
|
2113
1535
|
},
|
|
2114
1536
|
{
|
|
2115
1537
|
"name": "hiresfix_esrgan_workflow",
|
|
2116
|
-
"title": "
|
|
1538
|
+
"title": "HiresFix ESRGAN Workflow",
|
|
2117
1539
|
"mediaType": "image",
|
|
2118
1540
|
"mediaSubtype": "webp",
|
|
2119
|
-
"description": "
|
|
1541
|
+
"description": "Agrandir les images en utilisant les modèles ESRGAN pendant les étapes de génération intermédiaires.",
|
|
2120
1542
|
"thumbnailVariant": "compareSlider",
|
|
2121
|
-
"tags": [
|
|
2122
|
-
|
|
2123
|
-
"Image"
|
|
2124
|
-
],
|
|
2125
|
-
"models": [
|
|
2126
|
-
"SD1.5"
|
|
2127
|
-
],
|
|
1543
|
+
"tags": ["Agrandissement", "Image"],
|
|
1544
|
+
"models": ["SD1.5"],
|
|
2128
1545
|
"date": "2025-03-01",
|
|
2129
1546
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling"
|
|
2130
1547
|
},
|
|
2131
1548
|
{
|
|
2132
1549
|
"name": "latent_upscale_different_prompt_model",
|
|
2133
|
-
"title": "
|
|
1550
|
+
"title": "Latent Upscale Different Prompt Model",
|
|
2134
1551
|
"mediaType": "image",
|
|
2135
1552
|
"mediaSubtype": "webp",
|
|
2136
|
-
"description": "
|
|
1553
|
+
"description": "Agrandir les images tout en changeant les prompts à travers les passes de génération.",
|
|
2137
1554
|
"thumbnailVariant": "zoomHover",
|
|
2138
|
-
"tags": [
|
|
2139
|
-
|
|
2140
|
-
"Image"
|
|
2141
|
-
],
|
|
2142
|
-
"models": [
|
|
2143
|
-
"SD1.5"
|
|
2144
|
-
],
|
|
1555
|
+
"tags": ["Agrandissement", "Image"],
|
|
1556
|
+
"models": ["SD1.5"],
|
|
2145
1557
|
"date": "2025-03-01",
|
|
2146
1558
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples"
|
|
2147
1559
|
}
|
|
@@ -2155,86 +1567,61 @@
|
|
|
2155
1567
|
"templates": [
|
|
2156
1568
|
{
|
|
2157
1569
|
"name": "controlnet_example",
|
|
2158
|
-
"title": "ControlNet",
|
|
1570
|
+
"title": "Scribble ControlNet",
|
|
2159
1571
|
"mediaType": "image",
|
|
2160
1572
|
"mediaSubtype": "webp",
|
|
2161
|
-
"description": "
|
|
1573
|
+
"description": "Générer des images guidées par des images de référence griffonnées en utilisant ControlNet.",
|
|
2162
1574
|
"thumbnailVariant": "hoverDissolve",
|
|
2163
|
-
"tags": [
|
|
2164
|
-
|
|
2165
|
-
"Image"
|
|
2166
|
-
],
|
|
2167
|
-
"models": [
|
|
2168
|
-
"SD1.5"
|
|
2169
|
-
],
|
|
1575
|
+
"tags": ["ControlNet", "Image"],
|
|
1576
|
+
"models": ["SD1.5"],
|
|
2170
1577
|
"date": "2025-03-01",
|
|
2171
1578
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/"
|
|
2172
1579
|
},
|
|
2173
1580
|
{
|
|
2174
1581
|
"name": "2_pass_pose_worship",
|
|
2175
|
-
"title": "
|
|
1582
|
+
"title": "Pose ControlNet 2 Pass",
|
|
2176
1583
|
"mediaType": "image",
|
|
2177
1584
|
"mediaSubtype": "webp",
|
|
2178
|
-
"description": "
|
|
1585
|
+
"description": "Générer des images guidées par des références de pose en utilisant ControlNet.",
|
|
2179
1586
|
"thumbnailVariant": "hoverDissolve",
|
|
2180
|
-
"tags": [
|
|
2181
|
-
|
|
2182
|
-
"Image"
|
|
2183
|
-
],
|
|
2184
|
-
"models": [
|
|
2185
|
-
"SD1.5"
|
|
2186
|
-
],
|
|
1587
|
+
"tags": ["ControlNet", "Image"],
|
|
1588
|
+
"models": ["SD1.5"],
|
|
2187
1589
|
"date": "2025-03-01",
|
|
2188
1590
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet"
|
|
2189
1591
|
},
|
|
2190
1592
|
{
|
|
2191
1593
|
"name": "depth_controlnet",
|
|
2192
|
-
"title": "
|
|
1594
|
+
"title": "Depth ControlNet",
|
|
2193
1595
|
"mediaType": "image",
|
|
2194
1596
|
"mediaSubtype": "webp",
|
|
2195
|
-
"description": "
|
|
1597
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant ControlNet.",
|
|
2196
1598
|
"thumbnailVariant": "hoverDissolve",
|
|
2197
|
-
"tags": [
|
|
2198
|
-
|
|
2199
|
-
"Image"
|
|
2200
|
-
],
|
|
2201
|
-
"models": [
|
|
2202
|
-
"SD1.5"
|
|
2203
|
-
],
|
|
1599
|
+
"tags": ["ControlNet", "Image"],
|
|
1600
|
+
"models": ["SD1.5"],
|
|
2204
1601
|
"date": "2025-03-01",
|
|
2205
1602
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
|
|
2206
1603
|
},
|
|
2207
1604
|
{
|
|
2208
1605
|
"name": "depth_t2i_adapter",
|
|
2209
|
-
"title": "
|
|
1606
|
+
"title": "Depth T2I Adapter",
|
|
2210
1607
|
"mediaType": "image",
|
|
2211
1608
|
"mediaSubtype": "webp",
|
|
2212
|
-
"description": "
|
|
1609
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant l'adaptateur T2I.",
|
|
2213
1610
|
"thumbnailVariant": "hoverDissolve",
|
|
2214
|
-
"tags": [
|
|
2215
|
-
|
|
2216
|
-
"Image"
|
|
2217
|
-
],
|
|
2218
|
-
"models": [
|
|
2219
|
-
"SD1.5"
|
|
2220
|
-
],
|
|
1611
|
+
"tags": ["Adaptateur T2I", "Image"],
|
|
1612
|
+
"models": ["SD1.5"],
|
|
2221
1613
|
"date": "2025-03-01",
|
|
2222
1614
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
|
|
2223
1615
|
},
|
|
2224
1616
|
{
|
|
2225
1617
|
"name": "mixing_controlnets",
|
|
2226
|
-
"title": "
|
|
1618
|
+
"title": "Mixing ControlNets",
|
|
2227
1619
|
"mediaType": "image",
|
|
2228
1620
|
"mediaSubtype": "webp",
|
|
2229
|
-
"description": "
|
|
1621
|
+
"description": "Générer des images en combinant plusieurs modèles ControlNet.",
|
|
2230
1622
|
"thumbnailVariant": "hoverDissolve",
|
|
2231
|
-
"tags": [
|
|
2232
|
-
|
|
2233
|
-
"Image"
|
|
2234
|
-
],
|
|
2235
|
-
"models": [
|
|
2236
|
-
"SD1.5"
|
|
2237
|
-
],
|
|
1623
|
+
"tags": ["ControlNet", "Image"],
|
|
1624
|
+
"models": ["SD1.5"],
|
|
2238
1625
|
"date": "2025-03-01",
|
|
2239
1626
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets"
|
|
2240
1627
|
}
|
|
@@ -2243,7 +1630,7 @@
|
|
|
2243
1630
|
{
|
|
2244
1631
|
"moduleName": "default",
|
|
2245
1632
|
"category": "TOOLS & BUILDING",
|
|
2246
|
-
"title": "Composition de
|
|
1633
|
+
"title": "Composition de Zone",
|
|
2247
1634
|
"type": "image",
|
|
2248
1635
|
"templates": [
|
|
2249
1636
|
{
|
|
@@ -2251,30 +1638,20 @@
|
|
|
2251
1638
|
"title": "Composition de Zone",
|
|
2252
1639
|
"mediaType": "image",
|
|
2253
1640
|
"mediaSubtype": "webp",
|
|
2254
|
-
"description": "
|
|
2255
|
-
"tags": [
|
|
2256
|
-
|
|
2257
|
-
"Image"
|
|
2258
|
-
],
|
|
2259
|
-
"models": [
|
|
2260
|
-
"SD1.5"
|
|
2261
|
-
],
|
|
1641
|
+
"description": "Générer des images en contrôlant la composition avec des zones définies.",
|
|
1642
|
+
"tags": ["Composition de Zone", "Image"],
|
|
1643
|
+
"models": ["SD1.5"],
|
|
2262
1644
|
"date": "2025-03-01",
|
|
2263
1645
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/"
|
|
2264
1646
|
},
|
|
2265
1647
|
{
|
|
2266
1648
|
"name": "area_composition_square_area_for_subject",
|
|
2267
|
-
"title": "Composition
|
|
1649
|
+
"title": "Area Composition Square Area for Subject",
|
|
2268
1650
|
"mediaType": "image",
|
|
2269
1651
|
"mediaSubtype": "webp",
|
|
2270
|
-
"description": "
|
|
2271
|
-
"tags": [
|
|
2272
|
-
|
|
2273
|
-
"Image"
|
|
2274
|
-
],
|
|
2275
|
-
"models": [
|
|
2276
|
-
"SD1.5"
|
|
2277
|
-
],
|
|
1652
|
+
"description": "Générer des images avec un placement cohérent du sujet en utilisant la composition de zone.",
|
|
1653
|
+
"tags": ["Composition de Zone", "Image"],
|
|
1654
|
+
"models": ["SD1.5"],
|
|
2278
1655
|
"date": "2025-03-01",
|
|
2279
1656
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition"
|
|
2280
1657
|
}
|
|
@@ -2288,50 +1665,35 @@
|
|
|
2288
1665
|
"templates": [
|
|
2289
1666
|
{
|
|
2290
1667
|
"name": "3d_hunyuan3d_image_to_model",
|
|
2291
|
-
"title": "Hunyuan3D",
|
|
1668
|
+
"title": "Hunyuan3D 2.0",
|
|
2292
1669
|
"mediaType": "image",
|
|
2293
1670
|
"mediaSubtype": "webp",
|
|
2294
|
-
"description": "
|
|
2295
|
-
"tags": [
|
|
2296
|
-
|
|
2297
|
-
"3D"
|
|
2298
|
-
],
|
|
2299
|
-
"models": [
|
|
2300
|
-
"Hunyuan3D 2.0"
|
|
2301
|
-
],
|
|
1671
|
+
"description": "Générer des modèles 3D à partir d'images simples en utilisant Hunyuan3D 2.0.",
|
|
1672
|
+
"tags": ["Image vers Modèle", "3D"],
|
|
1673
|
+
"models": ["Hunyuan3D 2.0"],
|
|
2302
1674
|
"date": "2025-03-01",
|
|
2303
1675
|
"tutorialUrl": ""
|
|
2304
1676
|
},
|
|
2305
1677
|
{
|
|
2306
1678
|
"name": "3d_hunyuan3d_multiview_to_model",
|
|
2307
|
-
"title": "Hunyuan3D
|
|
1679
|
+
"title": "Hunyuan3D 2.0 MV",
|
|
2308
1680
|
"mediaType": "image",
|
|
2309
1681
|
"mediaSubtype": "webp",
|
|
2310
|
-
"description": "
|
|
2311
|
-
"tags": [
|
|
2312
|
-
|
|
2313
|
-
"3D"
|
|
2314
|
-
],
|
|
2315
|
-
"models": [
|
|
2316
|
-
"Hunyuan3D 2.0 MV"
|
|
2317
|
-
],
|
|
1682
|
+
"description": "Générer des modèles 3D à partir de vues multiples en utilisant Hunyuan3D 2.0 MV.",
|
|
1683
|
+
"tags": ["Multivue vers Modèle", "3D"],
|
|
1684
|
+
"models": ["Hunyuan3D 2.0 MV"],
|
|
2318
1685
|
"date": "2025-03-01",
|
|
2319
1686
|
"tutorialUrl": "",
|
|
2320
1687
|
"thumbnailVariant": "hoverDissolve"
|
|
2321
1688
|
},
|
|
2322
1689
|
{
|
|
2323
1690
|
"name": "3d_hunyuan3d_multiview_to_model_turbo",
|
|
2324
|
-
"title": "Hunyuan3D Turbo",
|
|
1691
|
+
"title": "Hunyuan3D 2.0 MV Turbo",
|
|
2325
1692
|
"mediaType": "image",
|
|
2326
1693
|
"mediaSubtype": "webp",
|
|
2327
|
-
"description": "
|
|
2328
|
-
"tags": [
|
|
2329
|
-
|
|
2330
|
-
"3D"
|
|
2331
|
-
],
|
|
2332
|
-
"models": [
|
|
2333
|
-
"Hunyuan3D 2.0 MV Turbo"
|
|
2334
|
-
],
|
|
1694
|
+
"description": "Générer des modèles 3D à partir de vues multiples en utilisant Hunyuan3D 2.0 MV Turbo.",
|
|
1695
|
+
"tags": ["Multivue vers Modèle", "3D"],
|
|
1696
|
+
"models": ["Hunyuan3D 2.0 MV Turbo"],
|
|
2335
1697
|
"date": "2025-03-01",
|
|
2336
1698
|
"tutorialUrl": "",
|
|
2337
1699
|
"thumbnailVariant": "hoverDissolve"
|
|
@@ -2341,17 +1703,12 @@
|
|
|
2341
1703
|
"title": "Stable Zero123",
|
|
2342
1704
|
"mediaType": "image",
|
|
2343
1705
|
"mediaSubtype": "webp",
|
|
2344
|
-
"description": "
|
|
2345
|
-
"tags": [
|
|
2346
|
-
|
|
2347
|
-
"3D"
|
|
2348
|
-
],
|
|
2349
|
-
"models": [
|
|
2350
|
-
"Stable Zero123"
|
|
2351
|
-
],
|
|
1706
|
+
"description": "Générer des vues 3D à partir d'images simples en utilisant Stable Zero123.",
|
|
1707
|
+
"tags": ["Image vers 3D", "3D"],
|
|
1708
|
+
"models": ["Stable Zero123"],
|
|
2352
1709
|
"date": "2025-03-01",
|
|
2353
1710
|
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/"
|
|
2354
1711
|
}
|
|
2355
1712
|
]
|
|
2356
1713
|
}
|
|
2357
|
-
]
|
|
1714
|
+
]
|