comfyui-workflow-templates-media-other 0.3.10__py3-none-any.whl → 0.3.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo.json +655 -0
- comfyui_workflow_templates_media_other/templates/index.ar.json +672 -672
- comfyui_workflow_templates_media_other/templates/index.es.json +673 -673
- comfyui_workflow_templates_media_other/templates/index.fr.json +673 -673
- comfyui_workflow_templates_media_other/templates/index.ja.json +673 -673
- comfyui_workflow_templates_media_other/templates/index.json +672 -672
- comfyui_workflow_templates_media_other/templates/index.ko.json +673 -673
- comfyui_workflow_templates_media_other/templates/index.ru.json +673 -673
- comfyui_workflow_templates_media_other/templates/index.tr.json +672 -672
- comfyui_workflow_templates_media_other/templates/index.zh-TW.json +673 -673
- comfyui_workflow_templates_media_other/templates/index.zh.json +673 -673
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.14.dist-info}/METADATA +1 -1
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.14.dist-info}/RECORD +16 -14
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.14.dist-info}/WHEEL +0 -0
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.14.dist-info}/top_level.txt +0 -0
|
@@ -2,866 +2,539 @@
|
|
|
2
2
|
{
|
|
3
3
|
"moduleName": "default",
|
|
4
4
|
"type": "image",
|
|
5
|
-
"
|
|
6
|
-
"
|
|
5
|
+
"category": "GENERATION TYPE",
|
|
6
|
+
"icon": "icon-[lucide--image]",
|
|
7
|
+
"title": "Image",
|
|
7
8
|
"templates": [
|
|
8
9
|
{
|
|
9
|
-
"name": "
|
|
10
|
-
"title": "
|
|
10
|
+
"name": "image_flux2",
|
|
11
|
+
"title": "Flux.2 Dev",
|
|
11
12
|
"mediaType": "image",
|
|
12
13
|
"mediaSubtype": "webp",
|
|
13
|
-
"
|
|
14
|
-
"
|
|
15
|
-
"tags": ["Texte vers image", "Image"],
|
|
16
|
-
"models": ["
|
|
17
|
-
"date": "2025-
|
|
18
|
-
"size":
|
|
14
|
+
"thumbnailVariant": "compareSlider",
|
|
15
|
+
"description": "Générez des images photoréalistes avec cohérence multi-référence et rendu de texte professionnel.",
|
|
16
|
+
"tags": ["Texte vers image", "Image", "Édition d'image"],
|
|
17
|
+
"models": ["Flux.2 Dev", "BFL"],
|
|
18
|
+
"date": "2025-11-26",
|
|
19
|
+
"size": 71382356459,
|
|
20
|
+
"vram": 0
|
|
19
21
|
},
|
|
20
22
|
{
|
|
21
|
-
"name": "
|
|
22
|
-
"title": "
|
|
23
|
+
"name": "image_flux2_fp8",
|
|
24
|
+
"title": "Maquette de produit (Flux.2 Dev FP8)",
|
|
23
25
|
"mediaType": "image",
|
|
24
26
|
"mediaSubtype": "webp",
|
|
25
|
-
"description": "
|
|
26
|
-
"
|
|
27
|
-
"
|
|
28
|
-
"
|
|
29
|
-
"
|
|
30
|
-
"
|
|
27
|
+
"description": "Créez des maquettes de produits en appliquant des motifs de conception sur des emballages, des mugs et d'autres produits à l'aide de la cohérence multi-références.",
|
|
28
|
+
"tags": ["Texte vers image", "Image", "Édition d'image", "Maquette", "Design produit"],
|
|
29
|
+
"models": ["Flux.2 Dev", "BFL"],
|
|
30
|
+
"date": "2025-11-26",
|
|
31
|
+
"size": 53837415055,
|
|
32
|
+
"vram": 0
|
|
31
33
|
},
|
|
32
34
|
{
|
|
33
|
-
"name": "
|
|
34
|
-
"title": "Image
|
|
35
|
-
"description": "Générez des vidéos à partir d’une image avec Wan2.2 14B",
|
|
35
|
+
"name": "image_z_image_turbo",
|
|
36
|
+
"title": "Z-Image-Turbo texte vers image",
|
|
36
37
|
"mediaType": "image",
|
|
37
38
|
"mediaSubtype": "webp",
|
|
38
|
-
"
|
|
39
|
-
"tags": ["
|
|
40
|
-
"models": ["
|
|
41
|
-
"date": "2025-
|
|
42
|
-
"size":
|
|
39
|
+
"description": "Un modèle fondamental efficace de génération d’images utilisant un transformateur de diffusion à flux unique, compatible anglais et chinois.",
|
|
40
|
+
"tags": ["Texte vers image", "Image"],
|
|
41
|
+
"models": ["Z-Image-Turbo"],
|
|
42
|
+
"date": "2025-11-27",
|
|
43
|
+
"size": 35326050304
|
|
43
44
|
},
|
|
44
45
|
{
|
|
45
|
-
"name": "
|
|
46
|
-
"title": "Image vers
|
|
46
|
+
"name": "image_qwen_image",
|
|
47
|
+
"title": "Qwen-Image Texte vers Image",
|
|
47
48
|
"mediaType": "image",
|
|
48
49
|
"mediaSubtype": "webp",
|
|
49
|
-
"description": "
|
|
50
|
-
"
|
|
51
|
-
"
|
|
52
|
-
"
|
|
53
|
-
"
|
|
54
|
-
"size":
|
|
50
|
+
"description": "Générer des images avec des capacités exceptionnelles de rendu et d'édition de texte multilingue en utilisant le modèle MMDiT 20B de Qwen-Image.",
|
|
51
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
52
|
+
"tags": ["Texte vers image", "Image"],
|
|
53
|
+
"models": ["Qwen-Image"],
|
|
54
|
+
"date": "2025-08-05",
|
|
55
|
+
"size": 31772020572
|
|
55
56
|
},
|
|
56
57
|
{
|
|
57
|
-
"name": "
|
|
58
|
-
"title": "
|
|
58
|
+
"name": "image_qwen_image_instantx_controlnet",
|
|
59
|
+
"title": "Qwen-Image InstantX ControlNet",
|
|
59
60
|
"mediaType": "image",
|
|
60
61
|
"mediaSubtype": "webp",
|
|
61
|
-
"description": "
|
|
62
|
-
"tags": ["
|
|
63
|
-
"
|
|
64
|
-
"
|
|
65
|
-
"
|
|
66
|
-
"size":
|
|
62
|
+
"description": "Générer des images avec Qwen-Image InstantX ControlNet, prenant en charge canny, contours doux, profondeur, pose",
|
|
63
|
+
"tags": ["Image vers image", "Image", "ControlNet"],
|
|
64
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
65
|
+
"models": ["Qwen-Image"],
|
|
66
|
+
"date": "2025-08-23",
|
|
67
|
+
"size": 35304631173
|
|
67
68
|
},
|
|
68
69
|
{
|
|
69
|
-
"name": "
|
|
70
|
-
"title": "
|
|
70
|
+
"name": "image_qwen_image_instantx_inpainting_controlnet",
|
|
71
|
+
"title": "Qwen-Image InstantX ControlNet Inpainting",
|
|
71
72
|
"mediaType": "image",
|
|
72
73
|
"mediaSubtype": "webp",
|
|
73
|
-
"
|
|
74
|
-
"
|
|
75
|
-
"tags": ["
|
|
76
|
-
"
|
|
77
|
-
"
|
|
78
|
-
"
|
|
79
|
-
"
|
|
74
|
+
"thumbnailVariant": "compareSlider",
|
|
75
|
+
"description": "Inpainting professionnel et édition d'images avec Qwen-Image InstantX ControlNet. Prend en charge le remplacement d'objets, la modification de texte, les changements d'arrière-plan et l'outpainting.",
|
|
76
|
+
"tags": ["Image vers image", "Image", "ControlNet", "Inpainting"],
|
|
77
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
78
|
+
"models": ["Qwen-Image"],
|
|
79
|
+
"date": "2025-09-12",
|
|
80
|
+
"size": 36013300777
|
|
80
81
|
},
|
|
81
82
|
{
|
|
82
|
-
"name": "
|
|
83
|
-
"title": "Image
|
|
83
|
+
"name": "image_qwen_image_union_control_lora",
|
|
84
|
+
"title": "Qwen-Image Contrôle Unifié",
|
|
84
85
|
"mediaType": "image",
|
|
85
86
|
"mediaSubtype": "webp",
|
|
86
|
-
"description": "
|
|
87
|
-
"
|
|
88
|
-
"
|
|
89
|
-
"
|
|
90
|
-
"
|
|
91
|
-
"size":
|
|
92
|
-
"vram": 3092376453,
|
|
93
|
-
"thumbnailVariant": "hoverDissolve"
|
|
87
|
+
"description": "Générer des images avec un contrôle structurel précis en utilisant le LoRA ControlNet unifié de Qwen-Image. Prend en charge plusieurs types de contrôle incluant canny, depth, lineart, softedge, normal et openpose pour diverses applications créatives.",
|
|
88
|
+
"tags": ["Texte vers image", "Image", "ControlNet"],
|
|
89
|
+
"models": ["Qwen-Image"],
|
|
90
|
+
"date": "2025-08-23",
|
|
91
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
92
|
+
"size": 32716913377
|
|
94
93
|
},
|
|
95
94
|
{
|
|
96
|
-
"name": "
|
|
97
|
-
"title": "
|
|
95
|
+
"name": "image_qwen_image_controlnet_patch",
|
|
96
|
+
"title": "Qwen-Image ControlNet Basique",
|
|
98
97
|
"mediaType": "image",
|
|
99
98
|
"mediaSubtype": "webp",
|
|
100
|
-
"description": "
|
|
101
|
-
"
|
|
102
|
-
"
|
|
103
|
-
"
|
|
104
|
-
"
|
|
105
|
-
"size":
|
|
106
|
-
"
|
|
99
|
+
"description": "Contrôler la génération d'images en utilisant les modèles ControlNet de Qwen-Image. Prend en charge les contrôles canny, depth et inpainting via le patching de modèles.",
|
|
100
|
+
"tags": ["Texte vers image", "Image", "ControlNet"],
|
|
101
|
+
"models": ["Qwen-Image"],
|
|
102
|
+
"date": "2025-08-24",
|
|
103
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
104
|
+
"size": 34037615821,
|
|
105
|
+
"thumbnailVariant": "compareSlider"
|
|
107
106
|
},
|
|
108
107
|
{
|
|
109
|
-
"name": "
|
|
110
|
-
"title": "
|
|
108
|
+
"name": "image_qwen_image_edit_2509",
|
|
109
|
+
"title": "Qwen Édition d'Image 2509",
|
|
111
110
|
"mediaType": "image",
|
|
112
111
|
"mediaSubtype": "webp",
|
|
113
|
-
"
|
|
114
|
-
"
|
|
115
|
-
"
|
|
116
|
-
"
|
|
117
|
-
"
|
|
118
|
-
"
|
|
119
|
-
"
|
|
112
|
+
"thumbnailVariant": "compareSlider",
|
|
113
|
+
"description": "Édition d'images avancée avec support multi-images, cohérence améliorée et intégration ControlNet.",
|
|
114
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
115
|
+
"tags": ["Image vers image", "Édition d'image", "ControlNet"],
|
|
116
|
+
"models": ["Qwen-Image"],
|
|
117
|
+
"date": "2025-09-25",
|
|
118
|
+
"size": 31772020572
|
|
120
119
|
},
|
|
121
120
|
{
|
|
122
|
-
"name": "
|
|
123
|
-
"title": "
|
|
121
|
+
"name": "image_qwen_image_edit",
|
|
122
|
+
"title": "Édition d'Image Qwen",
|
|
124
123
|
"mediaType": "image",
|
|
125
124
|
"mediaSubtype": "webp",
|
|
126
|
-
"description": "Éditer des parties spécifiques d'images de manière transparente.",
|
|
127
125
|
"thumbnailVariant": "compareSlider",
|
|
128
|
-
"
|
|
129
|
-
"
|
|
130
|
-
"
|
|
131
|
-
"
|
|
132
|
-
"
|
|
133
|
-
"
|
|
126
|
+
"description": "Éditer des images avec une édition de texte bilingue précise et des capacités d'édition sémantique/apparence duales en utilisant le modèle MMDiT 20B de Qwen-Image-Edit.",
|
|
127
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
128
|
+
"tags": ["Image vers image", "Édition d'image"],
|
|
129
|
+
"models": ["Qwen-Image"],
|
|
130
|
+
"date": "2025-08-18",
|
|
131
|
+
"size": 31772020572
|
|
134
132
|
},
|
|
135
133
|
{
|
|
136
|
-
"name": "
|
|
137
|
-
"title": "
|
|
134
|
+
"name": "image_chrono_edit_14B",
|
|
135
|
+
"title": "ChronoEdit 14B",
|
|
138
136
|
"mediaType": "image",
|
|
139
137
|
"mediaSubtype": "webp",
|
|
140
|
-
"description": "Étendre les images au-delà de leurs limites d'origine.",
|
|
141
138
|
"thumbnailVariant": "compareSlider",
|
|
142
|
-
"
|
|
143
|
-
"tags": ["
|
|
144
|
-
"models": ["
|
|
145
|
-
"date": "2025-03
|
|
146
|
-
"size":
|
|
147
|
-
"vram": 4101693768
|
|
139
|
+
"description": "Édition d'images propulsée par la compréhension dynamique des modèles vidéo, créant des résultats physiquement plausibles tout en préservant la cohérence du personnage et du style.",
|
|
140
|
+
"tags": ["Édition d'image", "Image vers image"],
|
|
141
|
+
"models": ["Wan2.1", "ChronoEdit", "Nvidia"],
|
|
142
|
+
"date": "2025-11-03",
|
|
143
|
+
"size": 40459304
|
|
148
144
|
},
|
|
149
145
|
{
|
|
150
|
-
"name": "
|
|
151
|
-
"title": "
|
|
146
|
+
"name": "flux_kontext_dev_basic",
|
|
147
|
+
"title": "Flux Kontext Dev (Basique)",
|
|
152
148
|
"mediaType": "image",
|
|
153
149
|
"mediaSubtype": "webp",
|
|
154
|
-
"
|
|
155
|
-
"
|
|
150
|
+
"thumbnailVariant": "hoverDissolve",
|
|
151
|
+
"description": "Éditer une image en utilisant Flux Kontext avec une visibilité complète des nœuds, parfait pour apprendre le flux de travail.",
|
|
152
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
|
|
153
|
+
"tags": ["Édition d'image", "Image vers image"],
|
|
154
|
+
"models": ["Flux", "BFL"],
|
|
155
|
+
"date": "2025-06-26",
|
|
156
|
+
"size": 17641578168,
|
|
157
|
+
"vram": 19327352832
|
|
158
|
+
},
|
|
159
|
+
{
|
|
160
|
+
"name": "image_chroma1_radiance_text_to_image",
|
|
161
|
+
"title": "Chroma1 Radiance Texte vers Image",
|
|
162
|
+
"mediaType": "image",
|
|
163
|
+
"mediaSubtype": "webp",
|
|
164
|
+
"description": "Chroma1-Radiance travaille directement avec les pixels d'image au lieu des latents compressés, offrant des images de meilleure qualité avec moins d'artefacts et de distorsion.",
|
|
156
165
|
"tags": ["Texte vers image", "Image"],
|
|
157
|
-
"models": ["
|
|
158
|
-
"date": "2025-
|
|
159
|
-
"size":
|
|
160
|
-
"vram":
|
|
166
|
+
"models": ["Chroma"],
|
|
167
|
+
"date": "2025-09-18",
|
|
168
|
+
"size": 23622320128,
|
|
169
|
+
"vram": 23622320128
|
|
161
170
|
},
|
|
162
171
|
{
|
|
163
|
-
"name": "
|
|
164
|
-
"title": "
|
|
172
|
+
"name": "image_netayume_lumina_t2i",
|
|
173
|
+
"title": "NetaYume Lumina Texte vers Image",
|
|
165
174
|
"mediaType": "image",
|
|
166
175
|
"mediaSubtype": "webp",
|
|
167
|
-
"description": "
|
|
168
|
-
"
|
|
169
|
-
"
|
|
170
|
-
"
|
|
171
|
-
"
|
|
172
|
-
"size": 2974264852,
|
|
173
|
-
"vram": 4080218931
|
|
176
|
+
"description": "Génération d'images de style anime de haute qualité avec compréhension améliorée des personnages et textures détaillées. Affinée à partir de Neta Lumina sur l'ensemble de données Danbooru.",
|
|
177
|
+
"tags": ["Texte vers image", "Image", "Anime"],
|
|
178
|
+
"models": ["OmniGen"],
|
|
179
|
+
"date": "2025-10-10",
|
|
180
|
+
"size": 10619306639
|
|
174
181
|
},
|
|
175
182
|
{
|
|
176
|
-
"name": "
|
|
177
|
-
"title": "
|
|
183
|
+
"name": "image_chroma_text_to_image",
|
|
184
|
+
"title": "Chroma texte vers image",
|
|
178
185
|
"mediaType": "image",
|
|
179
186
|
"mediaSubtype": "webp",
|
|
180
|
-
"description": "
|
|
187
|
+
"description": "Chroma est modifié à partir de Flux et présente quelques changements dans l'architecture.",
|
|
181
188
|
"tags": ["Texte vers image", "Image"],
|
|
182
|
-
"models": ["
|
|
183
|
-
"date": "2025-
|
|
184
|
-
"
|
|
185
|
-
"
|
|
186
|
-
"vram": 6184752906
|
|
189
|
+
"models": ["Chroma", "Flux"],
|
|
190
|
+
"date": "2025-06-04",
|
|
191
|
+
"size": 23289460163,
|
|
192
|
+
"vram": 15569256448
|
|
187
193
|
},
|
|
188
194
|
{
|
|
189
|
-
"name": "
|
|
190
|
-
"title": "
|
|
195
|
+
"name": "image_flux.1_fill_dev_OneReward",
|
|
196
|
+
"title": "Flux.1 Dev OneReward",
|
|
191
197
|
"mediaType": "image",
|
|
192
198
|
"mediaSubtype": "webp",
|
|
193
|
-
"
|
|
194
|
-
"
|
|
195
|
-
"
|
|
196
|
-
"
|
|
197
|
-
"
|
|
198
|
-
"size":
|
|
199
|
-
"vram":
|
|
199
|
+
"thumbnailVariant": "compareSlider",
|
|
200
|
+
"description": "Supports various tasks such as image inpainting, outpainting, and object removal",
|
|
201
|
+
"tags": ["Inpainting", "Outpainting"],
|
|
202
|
+
"models": ["Flux", "BFL"],
|
|
203
|
+
"date": "2025-09-21",
|
|
204
|
+
"size": 29001766666,
|
|
205
|
+
"vram": 21474836480
|
|
200
206
|
},
|
|
201
207
|
{
|
|
202
|
-
"name": "
|
|
203
|
-
"title": "
|
|
208
|
+
"name": "flux_dev_checkpoint_example",
|
|
209
|
+
"title": "Flux Dev fp8",
|
|
204
210
|
"mediaType": "image",
|
|
205
211
|
"mediaSubtype": "webp",
|
|
206
|
-
"description": "
|
|
207
|
-
"
|
|
208
|
-
"tags": ["
|
|
209
|
-
"models": ["
|
|
212
|
+
"description": "Générer des images en utilisant la version quantifiée Flux Dev fp8. Convient aux appareils avec une VRAM limitée, ne nécessite qu'un seul fichier de modèle, mais la qualité de l'image est légèrement inférieure à la version complète.",
|
|
213
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
214
|
+
"tags": ["Texte vers image", "Image"],
|
|
215
|
+
"models": ["Flux", "BFL"],
|
|
210
216
|
"date": "2025-03-01",
|
|
211
|
-
"
|
|
212
|
-
"
|
|
213
|
-
"vram": 3929895076
|
|
217
|
+
"size": 17244293693,
|
|
218
|
+
"vram": 18253611008
|
|
214
219
|
},
|
|
215
220
|
{
|
|
216
|
-
"name": "
|
|
217
|
-
"title": "
|
|
221
|
+
"name": "flux1_dev_uso_reference_image_gen",
|
|
222
|
+
"title": "Génération d'images de référence Flux.1 Dev USO",
|
|
223
|
+
"description": "Utilisez des images de référence pour contrôler à la fois le style et le sujet : conservez le visage de votre personnage tout en changeant de style artistique, ou appliquez des styles artistiques à de nouvelles scènes",
|
|
224
|
+
"thumbnailVariant": "hoverDissolve",
|
|
218
225
|
"mediaType": "image",
|
|
219
226
|
"mediaSubtype": "webp",
|
|
220
|
-
"
|
|
221
|
-
"
|
|
222
|
-
"
|
|
223
|
-
"
|
|
224
|
-
"
|
|
225
|
-
"
|
|
226
|
-
"size": 2201170739,
|
|
227
|
-
"vram": 6442450944
|
|
227
|
+
"tags": ["Image vers image", "Image"],
|
|
228
|
+
"models": ["Flux", "BFL"],
|
|
229
|
+
"date": "2025-09-02",
|
|
230
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
|
|
231
|
+
"size": 18597208392,
|
|
232
|
+
"vram": 19864223744
|
|
228
233
|
},
|
|
229
234
|
{
|
|
230
|
-
"name": "
|
|
231
|
-
"title": "
|
|
235
|
+
"name": "flux_schnell",
|
|
236
|
+
"title": "Flux Schnell fp8",
|
|
232
237
|
"mediaType": "image",
|
|
233
238
|
"mediaSubtype": "webp",
|
|
234
|
-
"description": "
|
|
235
|
-
"
|
|
236
|
-
"tags": ["
|
|
237
|
-
"models": ["
|
|
239
|
+
"description": "Générer rapidement des images avec la version quantifiée Flux Schnell fp8. Idéal pour le matériel d'entrée de gamme, ne nécessite que 4 étapes pour générer des images.",
|
|
240
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
241
|
+
"tags": ["Texte vers image", "Image"],
|
|
242
|
+
"models": ["Flux", "BFL"],
|
|
238
243
|
"date": "2025-03-01",
|
|
239
|
-
"
|
|
240
|
-
"
|
|
241
|
-
"vram": 6442450944
|
|
244
|
+
"size": 17233556275,
|
|
245
|
+
"vram": 18253611008
|
|
242
246
|
},
|
|
243
247
|
{
|
|
244
|
-
"name": "
|
|
245
|
-
"title": "
|
|
248
|
+
"name": "flux1_krea_dev",
|
|
249
|
+
"title": "Flux.1 Krea Dev",
|
|
246
250
|
"mediaType": "image",
|
|
247
251
|
"mediaSubtype": "webp",
|
|
248
|
-
"description": "
|
|
249
|
-
"
|
|
250
|
-
"tags": ["
|
|
251
|
-
"models": ["
|
|
252
|
+
"description": "Un modèle FLUX affiné poussant le photoréalisme à son maximum",
|
|
253
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
|
|
254
|
+
"tags": ["Texte vers image", "Image"],
|
|
255
|
+
"models": ["Flux", "BFL"],
|
|
256
|
+
"date": "2025-07-31",
|
|
257
|
+
"size": 22269405430,
|
|
258
|
+
"vram": 23085449216
|
|
259
|
+
},
|
|
260
|
+
{
|
|
261
|
+
"name": "flux_dev_full_text_to_image",
|
|
262
|
+
"title": "Flux Dev texte vers image complet",
|
|
263
|
+
"mediaType": "image",
|
|
264
|
+
"mediaSubtype": "webp",
|
|
265
|
+
"description": "Générer des images de haute qualité avec la version complète de Flux Dev. Nécessite plus de VRAM et plusieurs fichiers de modèles, mais offre la meilleure capacité de suivi des prompts et la qualité d'image.",
|
|
266
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
267
|
+
"tags": ["Texte vers image", "Image"],
|
|
268
|
+
"models": ["Flux", "BFL"],
|
|
252
269
|
"date": "2025-03-01",
|
|
253
|
-
"
|
|
254
|
-
"
|
|
255
|
-
"vram": 5153960755
|
|
270
|
+
"size": 34177202258,
|
|
271
|
+
"vram": 23622320128
|
|
256
272
|
},
|
|
257
273
|
{
|
|
258
|
-
"name": "
|
|
259
|
-
"title": "
|
|
274
|
+
"name": "flux_schnell_full_text_to_image",
|
|
275
|
+
"title": "Flux Schnell texte vers image complet",
|
|
260
276
|
"mediaType": "image",
|
|
261
277
|
"mediaSubtype": "webp",
|
|
262
|
-
"description": "Générer des images
|
|
263
|
-
"
|
|
264
|
-
"tags": ["
|
|
265
|
-
"models": ["
|
|
278
|
+
"description": "Générer des images rapidement avec la version complète de Flux Schnell. Utilise la licence Apache2.0, ne nécessite que 4 étapes pour générer des images tout en maintenant une bonne qualité d'image.",
|
|
279
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
280
|
+
"tags": ["Texte vers image", "Image"],
|
|
281
|
+
"models": ["Flux", "BFL"],
|
|
266
282
|
"date": "2025-03-01",
|
|
267
|
-
"
|
|
268
|
-
"size": 3189013217,
|
|
269
|
-
"vram": 6442450944
|
|
283
|
+
"size": 34155727421
|
|
270
284
|
},
|
|
271
285
|
{
|
|
272
|
-
"name": "
|
|
273
|
-
"title": "
|
|
286
|
+
"name": "flux_fill_inpaint_example",
|
|
287
|
+
"title": "Flux Inpainting",
|
|
274
288
|
"mediaType": "image",
|
|
275
289
|
"mediaSubtype": "webp",
|
|
276
|
-
"description": "
|
|
277
|
-
"thumbnailVariant": "
|
|
278
|
-
"
|
|
279
|
-
"
|
|
290
|
+
"description": "Combler les parties manquantes des images en utilisant l'inpainting Flux.",
|
|
291
|
+
"thumbnailVariant": "compareSlider",
|
|
292
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
293
|
+
"tags": ["Image vers image", "Inpainting", "Image"],
|
|
294
|
+
"models": ["Flux", "BFL"],
|
|
280
295
|
"date": "2025-03-01",
|
|
281
|
-
"
|
|
282
|
-
"size": 4660039516,
|
|
283
|
-
"vram": 6442450944
|
|
296
|
+
"size": 10372346020
|
|
284
297
|
},
|
|
285
298
|
{
|
|
286
|
-
"name": "
|
|
287
|
-
"title": "
|
|
299
|
+
"name": "flux_fill_outpaint_example",
|
|
300
|
+
"title": "Flux Outpainting",
|
|
288
301
|
"mediaType": "image",
|
|
289
302
|
"mediaSubtype": "webp",
|
|
290
|
-
"description": "
|
|
291
|
-
"thumbnailVariant": "
|
|
292
|
-
"
|
|
293
|
-
"
|
|
303
|
+
"description": "Étendre les images au-delà des limites en utilisant l'outpainting Flux.",
|
|
304
|
+
"thumbnailVariant": "compareSlider",
|
|
305
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
306
|
+
"tags": ["Outpainting", "Image", "Image vers image"],
|
|
307
|
+
"models": ["Flux", "BFL"],
|
|
294
308
|
"date": "2025-03-01",
|
|
295
|
-
"
|
|
296
|
-
"size": 2888365507,
|
|
297
|
-
"vram": 6442450944
|
|
309
|
+
"size": 10372346020
|
|
298
310
|
},
|
|
299
311
|
{
|
|
300
|
-
"name": "
|
|
301
|
-
"title": "
|
|
312
|
+
"name": "flux_canny_model_example",
|
|
313
|
+
"title": "Modèle Flux Canny",
|
|
302
314
|
"mediaType": "image",
|
|
303
315
|
"mediaSubtype": "webp",
|
|
304
|
-
"description": "Générer des images guidées par
|
|
316
|
+
"description": "Générer des images guidées par la détection de contours en utilisant Flux Canny.",
|
|
305
317
|
"thumbnailVariant": "hoverDissolve",
|
|
306
|
-
"
|
|
307
|
-
"
|
|
318
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
319
|
+
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
320
|
+
"models": ["Flux", "BFL"],
|
|
308
321
|
"date": "2025-03-01",
|
|
309
|
-
"
|
|
310
|
-
"size": 2523293286,
|
|
311
|
-
"vram": 6442450944
|
|
322
|
+
"size": 34177202258
|
|
312
323
|
},
|
|
313
324
|
{
|
|
314
|
-
"name": "
|
|
315
|
-
"title": "
|
|
325
|
+
"name": "flux_depth_lora_example",
|
|
326
|
+
"title": "Flux Depth LoRA",
|
|
316
327
|
"mediaType": "image",
|
|
317
328
|
"mediaSubtype": "webp",
|
|
318
|
-
"description": "Générer des images en
|
|
329
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant Flux LoRA.",
|
|
319
330
|
"thumbnailVariant": "hoverDissolve",
|
|
320
|
-
"
|
|
321
|
-
"
|
|
331
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
332
|
+
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
333
|
+
"models": ["Flux", "BFL"],
|
|
322
334
|
"date": "2025-03-01",
|
|
323
|
-
"
|
|
324
|
-
|
|
325
|
-
"vram": 6442450944
|
|
326
|
-
}
|
|
327
|
-
]
|
|
328
|
-
},
|
|
329
|
-
{
|
|
330
|
-
"moduleName": "default",
|
|
331
|
-
"type": "image",
|
|
332
|
-
"category": "GENERATION TYPE",
|
|
333
|
-
"icon": "icon-[lucide--image]",
|
|
334
|
-
"title": "Image",
|
|
335
|
-
"templates": [
|
|
335
|
+
"size": 35412005356
|
|
336
|
+
},
|
|
336
337
|
{
|
|
337
|
-
"name": "
|
|
338
|
-
"title": "Flux
|
|
338
|
+
"name": "flux_redux_model_example",
|
|
339
|
+
"title": "Modèle Flux Redux",
|
|
339
340
|
"mediaType": "image",
|
|
340
341
|
"mediaSubtype": "webp",
|
|
341
|
-
"
|
|
342
|
-
"
|
|
343
|
-
"tags": ["
|
|
344
|
-
"models": ["Flux
|
|
345
|
-
"date": "2025-
|
|
346
|
-
"size":
|
|
347
|
-
"vram": 0
|
|
342
|
+
"description": "Générer des images en transférant le style à partir d'images de référence en utilisant Flux Redux.",
|
|
343
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
344
|
+
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
345
|
+
"models": ["Flux", "BFL"],
|
|
346
|
+
"date": "2025-03-01",
|
|
347
|
+
"size": 35154307318
|
|
348
348
|
},
|
|
349
349
|
{
|
|
350
|
-
"name": "
|
|
351
|
-
"title": "
|
|
350
|
+
"name": "image_omnigen2_t2i",
|
|
351
|
+
"title": "OmniGen2 Texte vers Image",
|
|
352
352
|
"mediaType": "image",
|
|
353
353
|
"mediaSubtype": "webp",
|
|
354
|
-
"description": "
|
|
355
|
-
"
|
|
356
|
-
"
|
|
357
|
-
"
|
|
358
|
-
"
|
|
359
|
-
"
|
|
354
|
+
"description": "Générer des images de haute qualité à partir de prompts textuels en utilisant le modèle multimodal unifié 7B d'OmniGen2 avec une architecture à double chemin.",
|
|
355
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
356
|
+
"tags": ["Texte vers image", "Image"],
|
|
357
|
+
"models": ["OmniGen"],
|
|
358
|
+
"date": "2025-06-30",
|
|
359
|
+
"size": 15784004813
|
|
360
360
|
},
|
|
361
361
|
{
|
|
362
|
-
"name": "
|
|
363
|
-
"title": "
|
|
362
|
+
"name": "image_omnigen2_image_edit",
|
|
363
|
+
"title": "Édition d'Image OmniGen2",
|
|
364
364
|
"mediaType": "image",
|
|
365
365
|
"mediaSubtype": "webp",
|
|
366
|
-
"
|
|
367
|
-
"
|
|
368
|
-
"
|
|
369
|
-
"
|
|
370
|
-
"
|
|
366
|
+
"thumbnailVariant": "hoverDissolve",
|
|
367
|
+
"description": "Éditer des images avec des instructions en langage naturel en utilisant les capacités avancées d'édition d'images d'OmniGen2 et le support de rendu de texte.",
|
|
368
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
369
|
+
"tags": ["Édition d'image", "Image"],
|
|
370
|
+
"models": ["OmniGen"],
|
|
371
|
+
"date": "2025-06-30",
|
|
372
|
+
"size": 15784004813
|
|
371
373
|
},
|
|
372
374
|
{
|
|
373
|
-
"name": "
|
|
374
|
-
"title": "
|
|
375
|
+
"name": "hidream_i1_dev",
|
|
376
|
+
"title": "HiDream I1 Dev",
|
|
375
377
|
"mediaType": "image",
|
|
376
378
|
"mediaSubtype": "webp",
|
|
377
|
-
"description": "Générer des images avec
|
|
378
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/
|
|
379
|
+
"description": "Générer des images avec HiDream I1 Dev - Version équilibrée avec 28 étapes d'inférence, adaptée au matériel de gamme moyenne.",
|
|
380
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
379
381
|
"tags": ["Texte vers image", "Image"],
|
|
380
|
-
"models": ["
|
|
381
|
-
"date": "2025-
|
|
382
|
-
"size":
|
|
382
|
+
"models": ["HiDream"],
|
|
383
|
+
"date": "2025-04-17",
|
|
384
|
+
"size": 33318208799
|
|
383
385
|
},
|
|
384
386
|
{
|
|
385
|
-
"name": "
|
|
386
|
-
"title": "
|
|
387
|
+
"name": "hidream_i1_fast",
|
|
388
|
+
"title": "HiDream I1 Fast",
|
|
387
389
|
"mediaType": "image",
|
|
388
390
|
"mediaSubtype": "webp",
|
|
389
|
-
"description": "Générer des images avec
|
|
390
|
-
"
|
|
391
|
-
"
|
|
392
|
-
"models": ["
|
|
393
|
-
"date": "2025-
|
|
394
|
-
"size":
|
|
395
|
-
},
|
|
396
|
-
{
|
|
397
|
-
"name": "image_qwen_image_instantx_inpainting_controlnet",
|
|
398
|
-
"title": "Qwen-Image InstantX ControlNet Inpainting",
|
|
399
|
-
"mediaType": "image",
|
|
400
|
-
"mediaSubtype": "webp",
|
|
401
|
-
"thumbnailVariant": "compareSlider",
|
|
402
|
-
"description": "Inpainting professionnel et édition d'images avec Qwen-Image InstantX ControlNet. Prend en charge le remplacement d'objets, la modification de texte, les changements d'arrière-plan et l'outpainting.",
|
|
403
|
-
"tags": ["Image vers image", "Image", "ControlNet", "Inpainting"],
|
|
404
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
405
|
-
"models": ["Qwen-Image"],
|
|
406
|
-
"date": "2025-09-12",
|
|
407
|
-
"size": 36013300777
|
|
408
|
-
},
|
|
409
|
-
{
|
|
410
|
-
"name": "image_qwen_image_union_control_lora",
|
|
411
|
-
"title": "Qwen-Image Contrôle Unifié",
|
|
412
|
-
"mediaType": "image",
|
|
413
|
-
"mediaSubtype": "webp",
|
|
414
|
-
"description": "Générer des images avec un contrôle structurel précis en utilisant le LoRA ControlNet unifié de Qwen-Image. Prend en charge plusieurs types de contrôle incluant canny, depth, lineart, softedge, normal et openpose pour diverses applications créatives.",
|
|
415
|
-
"tags": ["Texte vers image", "Image", "ControlNet"],
|
|
416
|
-
"models": ["Qwen-Image"],
|
|
417
|
-
"date": "2025-08-23",
|
|
418
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
419
|
-
"size": 32716913377
|
|
420
|
-
},
|
|
421
|
-
{
|
|
422
|
-
"name": "image_qwen_image_controlnet_patch",
|
|
423
|
-
"title": "Qwen-Image ControlNet Basique",
|
|
424
|
-
"mediaType": "image",
|
|
425
|
-
"mediaSubtype": "webp",
|
|
426
|
-
"description": "Contrôler la génération d'images en utilisant les modèles ControlNet de Qwen-Image. Prend en charge les contrôles canny, depth et inpainting via le patching de modèles.",
|
|
427
|
-
"tags": ["Texte vers image", "Image", "ControlNet"],
|
|
428
|
-
"models": ["Qwen-Image"],
|
|
429
|
-
"date": "2025-08-24",
|
|
430
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
431
|
-
"size": 34037615821,
|
|
432
|
-
"thumbnailVariant": "compareSlider"
|
|
391
|
+
"description": "Générer des images rapidement avec HiDream I1 Fast - Version légère avec 16 étapes d'inférence, idéale pour des aperçus rapides sur du matériel d'entrée de gamme.",
|
|
392
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
393
|
+
"tags": ["Texte vers image", "Image"],
|
|
394
|
+
"models": ["HiDream"],
|
|
395
|
+
"date": "2025-04-17",
|
|
396
|
+
"size": 24234352968
|
|
433
397
|
},
|
|
434
398
|
{
|
|
435
|
-
"name": "
|
|
436
|
-
"title": "
|
|
399
|
+
"name": "hidream_i1_full",
|
|
400
|
+
"title": "HiDream I1 Full",
|
|
437
401
|
"mediaType": "image",
|
|
438
402
|
"mediaSubtype": "webp",
|
|
439
|
-
"
|
|
440
|
-
"
|
|
441
|
-
"
|
|
442
|
-
"
|
|
443
|
-
"
|
|
444
|
-
"
|
|
445
|
-
"size": 31772020572
|
|
403
|
+
"description": "Générer des images avec HiDream I1 Full - Version complète avec 50 étapes d'inférence pour une sortie de la plus haute qualité.",
|
|
404
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
405
|
+
"tags": ["Texte vers image", "Image"],
|
|
406
|
+
"models": ["HiDream"],
|
|
407
|
+
"date": "2025-04-17",
|
|
408
|
+
"size": 24234352968
|
|
446
409
|
},
|
|
447
410
|
{
|
|
448
|
-
"name": "
|
|
449
|
-
"title": "Édition d'Image
|
|
411
|
+
"name": "hidream_e1_1",
|
|
412
|
+
"title": "Édition d'Image HiDream E1.1",
|
|
450
413
|
"mediaType": "image",
|
|
451
414
|
"mediaSubtype": "webp",
|
|
452
415
|
"thumbnailVariant": "compareSlider",
|
|
453
|
-
"description": "Éditer des images avec
|
|
454
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/
|
|
455
|
-
"tags": ["
|
|
456
|
-
"models": ["
|
|
457
|
-
"date": "2025-
|
|
458
|
-
"size":
|
|
416
|
+
"description": "Éditer des images avec HiDream E1.1 – il est meilleur en qualité d'image et en précision d'édition que HiDream-E1-Full.",
|
|
417
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
418
|
+
"tags": ["Édition d'image", "Image"],
|
|
419
|
+
"models": ["HiDream"],
|
|
420
|
+
"date": "2025-07-21",
|
|
421
|
+
"size": 50422916055
|
|
459
422
|
},
|
|
460
423
|
{
|
|
461
|
-
"name": "
|
|
462
|
-
"title": "
|
|
424
|
+
"name": "hidream_e1_full",
|
|
425
|
+
"title": "Édition d'Image HiDream E1",
|
|
463
426
|
"mediaType": "image",
|
|
464
427
|
"mediaSubtype": "webp",
|
|
465
428
|
"thumbnailVariant": "compareSlider",
|
|
466
|
-
"description": "
|
|
467
|
-
"
|
|
468
|
-
"
|
|
469
|
-
"
|
|
470
|
-
"
|
|
471
|
-
|
|
472
|
-
{
|
|
473
|
-
"name": "flux_kontext_dev_basic",
|
|
474
|
-
"title": "Flux Kontext Dev (Basique)",
|
|
475
|
-
"mediaType": "image",
|
|
476
|
-
"mediaSubtype": "webp",
|
|
477
|
-
"thumbnailVariant": "hoverDissolve",
|
|
478
|
-
"description": "Éditer une image en utilisant Flux Kontext avec une visibilité complète des nœuds, parfait pour apprendre le flux de travail.",
|
|
479
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
|
|
480
|
-
"tags": ["Édition d'image", "Image vers image"],
|
|
481
|
-
"models": ["Flux", "BFL"],
|
|
482
|
-
"date": "2025-06-26",
|
|
483
|
-
"size": 17641578168,
|
|
484
|
-
"vram": 19327352832
|
|
485
|
-
},
|
|
486
|
-
{
|
|
487
|
-
"name": "image_chroma1_radiance_text_to_image",
|
|
488
|
-
"title": "Chroma1 Radiance Texte vers Image",
|
|
489
|
-
"mediaType": "image",
|
|
490
|
-
"mediaSubtype": "webp",
|
|
491
|
-
"description": "Chroma1-Radiance travaille directement avec les pixels d'image au lieu des latents compressés, offrant des images de meilleure qualité avec moins d'artefacts et de distorsion.",
|
|
492
|
-
"tags": ["Texte vers image", "Image"],
|
|
493
|
-
"models": ["Chroma"],
|
|
494
|
-
"date": "2025-09-18",
|
|
495
|
-
"size": 23622320128,
|
|
496
|
-
"vram": 23622320128
|
|
497
|
-
},
|
|
498
|
-
{
|
|
499
|
-
"name": "image_netayume_lumina_t2i",
|
|
500
|
-
"title": "NetaYume Lumina Texte vers Image",
|
|
501
|
-
"mediaType": "image",
|
|
502
|
-
"mediaSubtype": "webp",
|
|
503
|
-
"description": "Génération d'images de style anime de haute qualité avec compréhension améliorée des personnages et textures détaillées. Affinée à partir de Neta Lumina sur l'ensemble de données Danbooru.",
|
|
504
|
-
"tags": ["Texte vers image", "Image", "Anime"],
|
|
505
|
-
"models": ["OmniGen"],
|
|
506
|
-
"date": "2025-10-10",
|
|
507
|
-
"size": 10619306639
|
|
429
|
+
"description": "Éditer des images avec HiDream E1 - Modèle professionnel d'édition d'images en langage naturel.",
|
|
430
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
431
|
+
"tags": ["Édition d'image", "Image"],
|
|
432
|
+
"models": ["HiDream"],
|
|
433
|
+
"date": "2025-05-01",
|
|
434
|
+
"size": 34209414513
|
|
508
435
|
},
|
|
509
436
|
{
|
|
510
|
-
"name": "
|
|
511
|
-
"title": "
|
|
437
|
+
"name": "sd3.5_simple_example",
|
|
438
|
+
"title": "SD3.5 Simple",
|
|
512
439
|
"mediaType": "image",
|
|
513
440
|
"mediaSubtype": "webp",
|
|
514
|
-
"description": "
|
|
441
|
+
"description": "Générer des images en utilisant SD 3.5.",
|
|
442
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
|
|
515
443
|
"tags": ["Texte vers image", "Image"],
|
|
516
|
-
"models": ["
|
|
517
|
-
"date": "2025-
|
|
518
|
-
"size":
|
|
519
|
-
"vram": 15569256448
|
|
444
|
+
"models": ["SD3.5", "Stability"],
|
|
445
|
+
"date": "2025-03-01",
|
|
446
|
+
"size": 14935748772
|
|
520
447
|
},
|
|
521
448
|
{
|
|
522
|
-
"name": "
|
|
523
|
-
"title": "
|
|
449
|
+
"name": "sd3.5_large_canny_controlnet_example",
|
|
450
|
+
"title": "SD3.5 Large Canny ControlNet",
|
|
524
451
|
"mediaType": "image",
|
|
525
452
|
"mediaSubtype": "webp",
|
|
526
|
-
"
|
|
527
|
-
"
|
|
528
|
-
"
|
|
529
|
-
"
|
|
530
|
-
"
|
|
531
|
-
"
|
|
532
|
-
"
|
|
453
|
+
"description": "Générer des images guidées par la détection de contours en utilisant SD 3.5 Canny ControlNet.",
|
|
454
|
+
"thumbnailVariant": "hoverDissolve",
|
|
455
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
456
|
+
"tags": ["Image vers image", "Image", "ControlNet"],
|
|
457
|
+
"models": ["SD3.5", "Stability"],
|
|
458
|
+
"date": "2025-03-01",
|
|
459
|
+
"size": 23590107873
|
|
533
460
|
},
|
|
534
461
|
{
|
|
535
|
-
"name": "
|
|
536
|
-
"title": "
|
|
462
|
+
"name": "sd3.5_large_depth",
|
|
463
|
+
"title": "SD3.5 Large Profondeur",
|
|
537
464
|
"mediaType": "image",
|
|
538
465
|
"mediaSubtype": "webp",
|
|
539
|
-
"description": "Générer des images
|
|
540
|
-
"
|
|
541
|
-
"
|
|
542
|
-
"
|
|
466
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant SD 3.5.",
|
|
467
|
+
"thumbnailVariant": "hoverDissolve",
|
|
468
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
469
|
+
"tags": ["Image vers image", "Image", "ControlNet"],
|
|
470
|
+
"models": ["SD3.5", "Stability"],
|
|
543
471
|
"date": "2025-03-01",
|
|
544
|
-
"size":
|
|
545
|
-
"vram": 18253611008
|
|
472
|
+
"size": 23590107873
|
|
546
473
|
},
|
|
547
474
|
{
|
|
548
|
-
"name": "
|
|
549
|
-
"title": "
|
|
550
|
-
"description": "Utilisez des images de référence pour contrôler à la fois le style et le sujet : conservez le visage de votre personnage tout en changeant de style artistique, ou appliquez des styles artistiques à de nouvelles scènes",
|
|
551
|
-
"thumbnailVariant": "hoverDissolve",
|
|
475
|
+
"name": "sd3.5_large_blur",
|
|
476
|
+
"title": "SD3.5 Large Flou",
|
|
552
477
|
"mediaType": "image",
|
|
553
478
|
"mediaSubtype": "webp",
|
|
479
|
+
"description": "Générer des images guidées par des images de référence floues en utilisant SD 3.5.",
|
|
480
|
+
"thumbnailVariant": "hoverDissolve",
|
|
481
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
554
482
|
"tags": ["Image vers image", "Image"],
|
|
555
|
-
"models": ["
|
|
556
|
-
"date": "2025-
|
|
557
|
-
"
|
|
558
|
-
"size": 18597208392,
|
|
559
|
-
"vram": 19864223744
|
|
483
|
+
"models": ["SD3.5", "Stability"],
|
|
484
|
+
"date": "2025-03-01",
|
|
485
|
+
"size": 23590107873
|
|
560
486
|
},
|
|
561
487
|
{
|
|
562
|
-
"name": "
|
|
563
|
-
"title": "
|
|
488
|
+
"name": "sdxl_simple_example",
|
|
489
|
+
"title": "SDXL Simple",
|
|
564
490
|
"mediaType": "image",
|
|
565
491
|
"mediaSubtype": "webp",
|
|
566
|
-
"description": "Générer
|
|
567
|
-
"tutorialUrl": "https://
|
|
492
|
+
"description": "Générer des images de haute qualité en utilisant SDXL.",
|
|
493
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
568
494
|
"tags": ["Texte vers image", "Image"],
|
|
569
|
-
"models": ["
|
|
495
|
+
"models": ["SDXL", "Stability"],
|
|
570
496
|
"date": "2025-03-01",
|
|
571
|
-
"size":
|
|
572
|
-
"vram": 18253611008
|
|
497
|
+
"size": 13013750907
|
|
573
498
|
},
|
|
574
499
|
{
|
|
575
|
-
"name": "
|
|
576
|
-
"title": "
|
|
500
|
+
"name": "sdxl_refiner_prompt_example",
|
|
501
|
+
"title": "SDXL Affineur de Prompt",
|
|
577
502
|
"mediaType": "image",
|
|
578
503
|
"mediaSubtype": "webp",
|
|
579
|
-
"description": "
|
|
580
|
-
"tutorialUrl": "https://
|
|
504
|
+
"description": "Améliorer les images SDXL en utilisant des modèles de raffinement.",
|
|
505
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
581
506
|
"tags": ["Texte vers image", "Image"],
|
|
582
|
-
"models": ["
|
|
583
|
-
"date": "2025-
|
|
584
|
-
"size":
|
|
585
|
-
"vram": 23085449216
|
|
507
|
+
"models": ["SDXL", "Stability"],
|
|
508
|
+
"date": "2025-03-01",
|
|
509
|
+
"size": 13013750907
|
|
586
510
|
},
|
|
587
511
|
{
|
|
588
|
-
"name": "
|
|
589
|
-
"title": "
|
|
512
|
+
"name": "sdxl_revision_text_prompts",
|
|
513
|
+
"title": "SDXL Révision Prompts Texte",
|
|
590
514
|
"mediaType": "image",
|
|
591
515
|
"mediaSubtype": "webp",
|
|
592
|
-
"description": "Générer des images
|
|
593
|
-
"tutorialUrl": "https://
|
|
516
|
+
"description": "Générer des images en transférant des concepts à partir d'images de référence en utilisant SDXL Revision.",
|
|
517
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
594
518
|
"tags": ["Texte vers image", "Image"],
|
|
595
|
-
"models": ["
|
|
519
|
+
"models": ["SDXL", "Stability"],
|
|
596
520
|
"date": "2025-03-01",
|
|
597
|
-
"size":
|
|
598
|
-
"vram": 23622320128
|
|
521
|
+
"size": 10630044058
|
|
599
522
|
},
|
|
600
523
|
{
|
|
601
|
-
"name": "
|
|
602
|
-
"title": "
|
|
524
|
+
"name": "sdxlturbo_example",
|
|
525
|
+
"title": "SDXL Turbo",
|
|
603
526
|
"mediaType": "image",
|
|
604
527
|
"mediaSubtype": "webp",
|
|
605
|
-
"description": "Générer des images
|
|
606
|
-
"tutorialUrl": "https://
|
|
528
|
+
"description": "Générer des images en une seule étape en utilisant SDXL Turbo.",
|
|
529
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
|
|
607
530
|
"tags": ["Texte vers image", "Image"],
|
|
608
|
-
"models": ["
|
|
531
|
+
"models": ["SDXL", "Stability"],
|
|
609
532
|
"date": "2025-03-01",
|
|
610
|
-
"size":
|
|
533
|
+
"size": 6936372183
|
|
611
534
|
},
|
|
612
535
|
{
|
|
613
|
-
"name": "
|
|
614
|
-
"title": "
|
|
615
|
-
"mediaType": "image",
|
|
616
|
-
"mediaSubtype": "webp",
|
|
617
|
-
"description": "Combler les parties manquantes des images en utilisant l'inpainting Flux.",
|
|
618
|
-
"thumbnailVariant": "compareSlider",
|
|
619
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
620
|
-
"tags": ["Image vers image", "Inpainting", "Image"],
|
|
621
|
-
"models": ["Flux", "BFL"],
|
|
622
|
-
"date": "2025-03-01",
|
|
623
|
-
"size": 10372346020
|
|
624
|
-
},
|
|
625
|
-
{
|
|
626
|
-
"name": "flux_fill_outpaint_example",
|
|
627
|
-
"title": "Flux Outpainting",
|
|
628
|
-
"mediaType": "image",
|
|
629
|
-
"mediaSubtype": "webp",
|
|
630
|
-
"description": "Étendre les images au-delà des limites en utilisant l'outpainting Flux.",
|
|
631
|
-
"thumbnailVariant": "compareSlider",
|
|
632
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
633
|
-
"tags": ["Outpainting", "Image", "Image vers image"],
|
|
634
|
-
"models": ["Flux", "BFL"],
|
|
635
|
-
"date": "2025-03-01",
|
|
636
|
-
"size": 10372346020
|
|
637
|
-
},
|
|
638
|
-
{
|
|
639
|
-
"name": "flux_canny_model_example",
|
|
640
|
-
"title": "Modèle Flux Canny",
|
|
641
|
-
"mediaType": "image",
|
|
642
|
-
"mediaSubtype": "webp",
|
|
643
|
-
"description": "Générer des images guidées par la détection de contours en utilisant Flux Canny.",
|
|
644
|
-
"thumbnailVariant": "hoverDissolve",
|
|
645
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
646
|
-
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
647
|
-
"models": ["Flux", "BFL"],
|
|
648
|
-
"date": "2025-03-01",
|
|
649
|
-
"size": 34177202258
|
|
650
|
-
},
|
|
651
|
-
{
|
|
652
|
-
"name": "flux_depth_lora_example",
|
|
653
|
-
"title": "Flux Depth LoRA",
|
|
654
|
-
"mediaType": "image",
|
|
655
|
-
"mediaSubtype": "webp",
|
|
656
|
-
"description": "Générer des images guidées par les informations de profondeur en utilisant Flux LoRA.",
|
|
657
|
-
"thumbnailVariant": "hoverDissolve",
|
|
658
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
659
|
-
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
660
|
-
"models": ["Flux", "BFL"],
|
|
661
|
-
"date": "2025-03-01",
|
|
662
|
-
"size": 35412005356
|
|
663
|
-
},
|
|
664
|
-
{
|
|
665
|
-
"name": "flux_redux_model_example",
|
|
666
|
-
"title": "Modèle Flux Redux",
|
|
667
|
-
"mediaType": "image",
|
|
668
|
-
"mediaSubtype": "webp",
|
|
669
|
-
"description": "Générer des images en transférant le style à partir d'images de référence en utilisant Flux Redux.",
|
|
670
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
671
|
-
"tags": ["Image vers image", "ControlNet", "Image"],
|
|
672
|
-
"models": ["Flux", "BFL"],
|
|
673
|
-
"date": "2025-03-01",
|
|
674
|
-
"size": 35154307318
|
|
675
|
-
},
|
|
676
|
-
{
|
|
677
|
-
"name": "image_omnigen2_t2i",
|
|
678
|
-
"title": "OmniGen2 Texte vers Image",
|
|
679
|
-
"mediaType": "image",
|
|
680
|
-
"mediaSubtype": "webp",
|
|
681
|
-
"description": "Générer des images de haute qualité à partir de prompts textuels en utilisant le modèle multimodal unifié 7B d'OmniGen2 avec une architecture à double chemin.",
|
|
682
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
683
|
-
"tags": ["Texte vers image", "Image"],
|
|
684
|
-
"models": ["OmniGen"],
|
|
685
|
-
"date": "2025-06-30",
|
|
686
|
-
"size": 15784004813
|
|
687
|
-
},
|
|
688
|
-
{
|
|
689
|
-
"name": "image_omnigen2_image_edit",
|
|
690
|
-
"title": "Édition d'Image OmniGen2",
|
|
691
|
-
"mediaType": "image",
|
|
692
|
-
"mediaSubtype": "webp",
|
|
693
|
-
"thumbnailVariant": "hoverDissolve",
|
|
694
|
-
"description": "Éditer des images avec des instructions en langage naturel en utilisant les capacités avancées d'édition d'images d'OmniGen2 et le support de rendu de texte.",
|
|
695
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
696
|
-
"tags": ["Édition d'image", "Image"],
|
|
697
|
-
"models": ["OmniGen"],
|
|
698
|
-
"date": "2025-06-30",
|
|
699
|
-
"size": 15784004813
|
|
700
|
-
},
|
|
701
|
-
{
|
|
702
|
-
"name": "hidream_i1_dev",
|
|
703
|
-
"title": "HiDream I1 Dev",
|
|
704
|
-
"mediaType": "image",
|
|
705
|
-
"mediaSubtype": "webp",
|
|
706
|
-
"description": "Générer des images avec HiDream I1 Dev - Version équilibrée avec 28 étapes d'inférence, adaptée au matériel de gamme moyenne.",
|
|
707
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
708
|
-
"tags": ["Texte vers image", "Image"],
|
|
709
|
-
"models": ["HiDream"],
|
|
710
|
-
"date": "2025-04-17",
|
|
711
|
-
"size": 33318208799
|
|
712
|
-
},
|
|
713
|
-
{
|
|
714
|
-
"name": "hidream_i1_fast",
|
|
715
|
-
"title": "HiDream I1 Fast",
|
|
716
|
-
"mediaType": "image",
|
|
717
|
-
"mediaSubtype": "webp",
|
|
718
|
-
"description": "Générer des images rapidement avec HiDream I1 Fast - Version légère avec 16 étapes d'inférence, idéale pour des aperçus rapides sur du matériel d'entrée de gamme.",
|
|
719
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
720
|
-
"tags": ["Texte vers image", "Image"],
|
|
721
|
-
"models": ["HiDream"],
|
|
722
|
-
"date": "2025-04-17",
|
|
723
|
-
"size": 24234352968
|
|
724
|
-
},
|
|
725
|
-
{
|
|
726
|
-
"name": "hidream_i1_full",
|
|
727
|
-
"title": "HiDream I1 Full",
|
|
728
|
-
"mediaType": "image",
|
|
729
|
-
"mediaSubtype": "webp",
|
|
730
|
-
"description": "Générer des images avec HiDream I1 Full - Version complète avec 50 étapes d'inférence pour une sortie de la plus haute qualité.",
|
|
731
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
732
|
-
"tags": ["Texte vers image", "Image"],
|
|
733
|
-
"models": ["HiDream"],
|
|
734
|
-
"date": "2025-04-17",
|
|
735
|
-
"size": 24234352968
|
|
736
|
-
},
|
|
737
|
-
{
|
|
738
|
-
"name": "hidream_e1_1",
|
|
739
|
-
"title": "Édition d'Image HiDream E1.1",
|
|
740
|
-
"mediaType": "image",
|
|
741
|
-
"mediaSubtype": "webp",
|
|
742
|
-
"thumbnailVariant": "compareSlider",
|
|
743
|
-
"description": "Éditer des images avec HiDream E1.1 – il est meilleur en qualité d'image et en précision d'édition que HiDream-E1-Full.",
|
|
744
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
745
|
-
"tags": ["Édition d'image", "Image"],
|
|
746
|
-
"models": ["HiDream"],
|
|
747
|
-
"date": "2025-07-21",
|
|
748
|
-
"size": 50422916055
|
|
749
|
-
},
|
|
750
|
-
{
|
|
751
|
-
"name": "hidream_e1_full",
|
|
752
|
-
"title": "Édition d'Image HiDream E1",
|
|
753
|
-
"mediaType": "image",
|
|
754
|
-
"mediaSubtype": "webp",
|
|
755
|
-
"thumbnailVariant": "compareSlider",
|
|
756
|
-
"description": "Éditer des images avec HiDream E1 - Modèle professionnel d'édition d'images en langage naturel.",
|
|
757
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
758
|
-
"tags": ["Édition d'image", "Image"],
|
|
759
|
-
"models": ["HiDream"],
|
|
760
|
-
"date": "2025-05-01",
|
|
761
|
-
"size": 34209414513
|
|
762
|
-
},
|
|
763
|
-
{
|
|
764
|
-
"name": "sd3.5_simple_example",
|
|
765
|
-
"title": "SD3.5 Simple",
|
|
766
|
-
"mediaType": "image",
|
|
767
|
-
"mediaSubtype": "webp",
|
|
768
|
-
"description": "Générer des images en utilisant SD 3.5.",
|
|
769
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
|
|
770
|
-
"tags": ["Texte vers image", "Image"],
|
|
771
|
-
"models": ["SD3.5", "Stability"],
|
|
772
|
-
"date": "2025-03-01",
|
|
773
|
-
"size": 14935748772
|
|
774
|
-
},
|
|
775
|
-
{
|
|
776
|
-
"name": "sd3.5_large_canny_controlnet_example",
|
|
777
|
-
"title": "SD3.5 Large Canny ControlNet",
|
|
778
|
-
"mediaType": "image",
|
|
779
|
-
"mediaSubtype": "webp",
|
|
780
|
-
"description": "Générer des images guidées par la détection de contours en utilisant SD 3.5 Canny ControlNet.",
|
|
781
|
-
"thumbnailVariant": "hoverDissolve",
|
|
782
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
783
|
-
"tags": ["Image vers image", "Image", "ControlNet"],
|
|
784
|
-
"models": ["SD3.5", "Stability"],
|
|
785
|
-
"date": "2025-03-01",
|
|
786
|
-
"size": 23590107873
|
|
787
|
-
},
|
|
788
|
-
{
|
|
789
|
-
"name": "sd3.5_large_depth",
|
|
790
|
-
"title": "SD3.5 Large Profondeur",
|
|
791
|
-
"mediaType": "image",
|
|
792
|
-
"mediaSubtype": "webp",
|
|
793
|
-
"description": "Générer des images guidées par les informations de profondeur en utilisant SD 3.5.",
|
|
794
|
-
"thumbnailVariant": "hoverDissolve",
|
|
795
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
796
|
-
"tags": ["Image vers image", "Image", "ControlNet"],
|
|
797
|
-
"models": ["SD3.5", "Stability"],
|
|
798
|
-
"date": "2025-03-01",
|
|
799
|
-
"size": 23590107873
|
|
800
|
-
},
|
|
801
|
-
{
|
|
802
|
-
"name": "sd3.5_large_blur",
|
|
803
|
-
"title": "SD3.5 Large Flou",
|
|
804
|
-
"mediaType": "image",
|
|
805
|
-
"mediaSubtype": "webp",
|
|
806
|
-
"description": "Générer des images guidées par des images de référence floues en utilisant SD 3.5.",
|
|
807
|
-
"thumbnailVariant": "hoverDissolve",
|
|
808
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
809
|
-
"tags": ["Image vers image", "Image"],
|
|
810
|
-
"models": ["SD3.5", "Stability"],
|
|
811
|
-
"date": "2025-03-01",
|
|
812
|
-
"size": 23590107873
|
|
813
|
-
},
|
|
814
|
-
{
|
|
815
|
-
"name": "sdxl_simple_example",
|
|
816
|
-
"title": "SDXL Simple",
|
|
817
|
-
"mediaType": "image",
|
|
818
|
-
"mediaSubtype": "webp",
|
|
819
|
-
"description": "Générer des images de haute qualité en utilisant SDXL.",
|
|
820
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
821
|
-
"tags": ["Texte vers image", "Image"],
|
|
822
|
-
"models": ["SDXL", "Stability"],
|
|
823
|
-
"date": "2025-03-01",
|
|
824
|
-
"size": 13013750907
|
|
825
|
-
},
|
|
826
|
-
{
|
|
827
|
-
"name": "sdxl_refiner_prompt_example",
|
|
828
|
-
"title": "SDXL Affineur de Prompt",
|
|
829
|
-
"mediaType": "image",
|
|
830
|
-
"mediaSubtype": "webp",
|
|
831
|
-
"description": "Améliorer les images SDXL en utilisant des modèles de raffinement.",
|
|
832
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
833
|
-
"tags": ["Texte vers image", "Image"],
|
|
834
|
-
"models": ["SDXL", "Stability"],
|
|
835
|
-
"date": "2025-03-01",
|
|
836
|
-
"size": 13013750907
|
|
837
|
-
},
|
|
838
|
-
{
|
|
839
|
-
"name": "sdxl_revision_text_prompts",
|
|
840
|
-
"title": "SDXL Révision Prompts Texte",
|
|
841
|
-
"mediaType": "image",
|
|
842
|
-
"mediaSubtype": "webp",
|
|
843
|
-
"description": "Générer des images en transférant des concepts à partir d'images de référence en utilisant SDXL Revision.",
|
|
844
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
845
|
-
"tags": ["Texte vers image", "Image"],
|
|
846
|
-
"models": ["SDXL", "Stability"],
|
|
847
|
-
"date": "2025-03-01",
|
|
848
|
-
"size": 10630044058
|
|
849
|
-
},
|
|
850
|
-
{
|
|
851
|
-
"name": "sdxlturbo_example",
|
|
852
|
-
"title": "SDXL Turbo",
|
|
853
|
-
"mediaType": "image",
|
|
854
|
-
"mediaSubtype": "webp",
|
|
855
|
-
"description": "Générer des images en une seule étape en utilisant SDXL Turbo.",
|
|
856
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
|
|
857
|
-
"tags": ["Texte vers image", "Image"],
|
|
858
|
-
"models": ["SDXL", "Stability"],
|
|
859
|
-
"date": "2025-03-01",
|
|
860
|
-
"size": 6936372183
|
|
861
|
-
},
|
|
862
|
-
{
|
|
863
|
-
"name": "image_lotus_depth_v1_1",
|
|
864
|
-
"title": "Lotus Profondeur",
|
|
536
|
+
"name": "image_lotus_depth_v1_1",
|
|
537
|
+
"title": "Lotus Profondeur",
|
|
865
538
|
"mediaType": "image",
|
|
866
539
|
"mediaSubtype": "webp",
|
|
867
540
|
"thumbnailVariant": "compareSlider",
|
|
@@ -1372,7 +1045,7 @@
|
|
|
1372
1045
|
"title": "Hunyuan3D 2.1",
|
|
1373
1046
|
"mediaType": "image",
|
|
1374
1047
|
"mediaSubtype": "webp",
|
|
1375
|
-
"description": "Générez des modèles 3D à partir d'images uniques avec Hunyuan3D 2.
|
|
1048
|
+
"description": "Générez des modèles 3D à partir d'images uniques avec Hunyuan3D 2.0.",
|
|
1376
1049
|
"tags": ["Image vers 3D", "3D"],
|
|
1377
1050
|
"models": ["Hunyuan3D", "Tencent"],
|
|
1378
1051
|
"date": "2025-03-01",
|
|
@@ -2523,5 +2196,332 @@
|
|
|
2523
2196
|
"vram": 0
|
|
2524
2197
|
}
|
|
2525
2198
|
]
|
|
2199
|
+
},
|
|
2200
|
+
{
|
|
2201
|
+
"moduleName": "default",
|
|
2202
|
+
"type": "image",
|
|
2203
|
+
"isEssential": true,
|
|
2204
|
+
"title": "Getting Started",
|
|
2205
|
+
"templates": [
|
|
2206
|
+
{
|
|
2207
|
+
"name": "01_qwen_t2i_subgraphed",
|
|
2208
|
+
"title": "Texte en image (Nouveau)",
|
|
2209
|
+
"mediaType": "image",
|
|
2210
|
+
"mediaSubtype": "webp",
|
|
2211
|
+
"description": "Générez des images à partir d'invites textuelles avec le modèle Qwen-Image",
|
|
2212
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
2213
|
+
"tags": ["Texte vers image", "Image"],
|
|
2214
|
+
"models": ["Qwen-Image"],
|
|
2215
|
+
"date": "2025-10-17",
|
|
2216
|
+
"size": 31772020572
|
|
2217
|
+
},
|
|
2218
|
+
{
|
|
2219
|
+
"name": "02_qwen_Image_edit_subgraphed",
|
|
2220
|
+
"title": "Édition d'image (Nouveau)",
|
|
2221
|
+
"mediaType": "image",
|
|
2222
|
+
"mediaSubtype": "webp",
|
|
2223
|
+
"description": "Éditez vos images avec Qwen-Image-Edit",
|
|
2224
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
2225
|
+
"tags": ["Image vers image", "Édition d'image", "ControlNet"],
|
|
2226
|
+
"models": ["Qwen-Image"],
|
|
2227
|
+
"date": "2025-10-17",
|
|
2228
|
+
"size": 31772020572
|
|
2229
|
+
},
|
|
2230
|
+
{
|
|
2231
|
+
"name": "03_video_wan2_2_14B_i2v_subgraphed",
|
|
2232
|
+
"title": "Image en Vidéo (Nouveau)",
|
|
2233
|
+
"description": "Générez des vidéos à partir d’une image avec Wan2.2 14B",
|
|
2234
|
+
"mediaType": "image",
|
|
2235
|
+
"mediaSubtype": "webp",
|
|
2236
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
2237
|
+
"tags": ["Image vers vidéo", "Vidéo"],
|
|
2238
|
+
"models": ["Wan2.2", "Wan"],
|
|
2239
|
+
"date": "2025-10-17",
|
|
2240
|
+
"size": 38031935406
|
|
2241
|
+
},
|
|
2242
|
+
{
|
|
2243
|
+
"name": "04_hunyuan_3d_2.1_subgraphed",
|
|
2244
|
+
"title": "Image vers 3D (Nouveau)",
|
|
2245
|
+
"mediaType": "image",
|
|
2246
|
+
"mediaSubtype": "webp",
|
|
2247
|
+
"description": "Générez des modèles 3D à partir d'une seule image avec Hunyuan3D 2.0.",
|
|
2248
|
+
"tags": ["Image vers 3D", "3D"],
|
|
2249
|
+
"models": ["Hunyuan3D"],
|
|
2250
|
+
"date": "2025-10-17",
|
|
2251
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/3d/hunyuan3D-2",
|
|
2252
|
+
"size": 4928474972
|
|
2253
|
+
},
|
|
2254
|
+
{
|
|
2255
|
+
"name": "05_audio_ace_step_1_t2a_song_subgraphed",
|
|
2256
|
+
"title": "Texte en audio (Nouveau)",
|
|
2257
|
+
"mediaType": "image",
|
|
2258
|
+
"mediaSubtype": "webp",
|
|
2259
|
+
"description": "Générez de l'audio à partir d'invites textuelles avec ACE-Step v1",
|
|
2260
|
+
"tags": ["Texte vers audio", "Audio"],
|
|
2261
|
+
"models": ["ACE-Step"],
|
|
2262
|
+
"date": "2025-10-17",
|
|
2263
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
2264
|
+
"size": 7698728878
|
|
2265
|
+
},
|
|
2266
|
+
{
|
|
2267
|
+
"name": "default",
|
|
2268
|
+
"title": "Génération d'images",
|
|
2269
|
+
"mediaType": "image",
|
|
2270
|
+
"mediaSubtype": "webp",
|
|
2271
|
+
"description": "Générer des images à partir de prompts textuels.",
|
|
2272
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
|
|
2273
|
+
"tags": ["Texte vers image", "Image"],
|
|
2274
|
+
"models": ["SD1.5", "Stability"],
|
|
2275
|
+
"date": "2025-03-01",
|
|
2276
|
+
"size": 2136746230,
|
|
2277
|
+
"vram": 3092376453
|
|
2278
|
+
},
|
|
2279
|
+
{
|
|
2280
|
+
"name": "image2image",
|
|
2281
|
+
"title": "Image vers Image",
|
|
2282
|
+
"mediaType": "image",
|
|
2283
|
+
"mediaSubtype": "webp",
|
|
2284
|
+
"description": "Transformer des images existantes en utilisant des prompts textuels.",
|
|
2285
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/image-to-image",
|
|
2286
|
+
"tags": ["Image vers image", "Image"],
|
|
2287
|
+
"models": ["SD1.5", "Stability"],
|
|
2288
|
+
"date": "2025-03-01",
|
|
2289
|
+
"size": 2136746230,
|
|
2290
|
+
"vram": 3092376453,
|
|
2291
|
+
"thumbnailVariant": "hoverDissolve"
|
|
2292
|
+
},
|
|
2293
|
+
{
|
|
2294
|
+
"name": "lora",
|
|
2295
|
+
"title": "LoRA",
|
|
2296
|
+
"mediaType": "image",
|
|
2297
|
+
"mediaSubtype": "webp",
|
|
2298
|
+
"description": "Générer des images avec des modèles LoRA pour des styles ou sujets spécialisés.",
|
|
2299
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
|
|
2300
|
+
"tags": ["Texte vers image", "Image"],
|
|
2301
|
+
"models": ["SD1.5", "Stability"],
|
|
2302
|
+
"date": "2025-03-01",
|
|
2303
|
+
"size": 2437393940,
|
|
2304
|
+
"vram": 3092376453
|
|
2305
|
+
},
|
|
2306
|
+
{
|
|
2307
|
+
"name": "lora_multiple",
|
|
2308
|
+
"title": "LoRA Multiple",
|
|
2309
|
+
"mediaType": "image",
|
|
2310
|
+
"mediaSubtype": "webp",
|
|
2311
|
+
"description": "Générer des images en combinant plusieurs modèles LoRA.",
|
|
2312
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
|
|
2313
|
+
"tags": ["Texte vers image", "Image"],
|
|
2314
|
+
"models": ["SD1.5", "Stability"],
|
|
2315
|
+
"date": "2025-03-01",
|
|
2316
|
+
"size": 2437393940,
|
|
2317
|
+
"vram": 3350074491
|
|
2318
|
+
},
|
|
2319
|
+
{
|
|
2320
|
+
"name": "inpaint_example",
|
|
2321
|
+
"title": "Inpainting",
|
|
2322
|
+
"mediaType": "image",
|
|
2323
|
+
"mediaSubtype": "webp",
|
|
2324
|
+
"description": "Éditer des parties spécifiques d'images de manière transparente.",
|
|
2325
|
+
"thumbnailVariant": "compareSlider",
|
|
2326
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
|
|
2327
|
+
"tags": ["Inpainting", "Image"],
|
|
2328
|
+
"models": ["SD1.5", "Stability"],
|
|
2329
|
+
"date": "2025-03-01",
|
|
2330
|
+
"size": 5218385265,
|
|
2331
|
+
"vram": 4101693768
|
|
2332
|
+
},
|
|
2333
|
+
{
|
|
2334
|
+
"name": "inpaint_model_outpainting",
|
|
2335
|
+
"title": "Outpainting",
|
|
2336
|
+
"mediaType": "image",
|
|
2337
|
+
"mediaSubtype": "webp",
|
|
2338
|
+
"description": "Étendre les images au-delà de leurs limites d'origine.",
|
|
2339
|
+
"thumbnailVariant": "compareSlider",
|
|
2340
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
|
|
2341
|
+
"tags": ["Outpainting", "Image"],
|
|
2342
|
+
"models": ["SD1.5", "Stability"],
|
|
2343
|
+
"date": "2025-03-01",
|
|
2344
|
+
"size": 5218385265,
|
|
2345
|
+
"vram": 4101693768
|
|
2346
|
+
},
|
|
2347
|
+
{
|
|
2348
|
+
"name": "embedding_example",
|
|
2349
|
+
"title": "Intégration",
|
|
2350
|
+
"mediaType": "image",
|
|
2351
|
+
"mediaSubtype": "webp",
|
|
2352
|
+
"description": "Générer des images en utilisant l'inversion textuelle pour des styles cohérents.",
|
|
2353
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/",
|
|
2354
|
+
"tags": ["Texte vers image", "Image"],
|
|
2355
|
+
"models": ["SD1.5", "Stability"],
|
|
2356
|
+
"date": "2025-03-01",
|
|
2357
|
+
"size": 5218385265,
|
|
2358
|
+
"vram": 4123168604
|
|
2359
|
+
},
|
|
2360
|
+
{
|
|
2361
|
+
"name": "gligen_textbox_example",
|
|
2362
|
+
"title": "Boîte de Texte Gligen",
|
|
2363
|
+
"mediaType": "image",
|
|
2364
|
+
"mediaSubtype": "webp",
|
|
2365
|
+
"description": "Générer des images avec un placement précis d'objets en utilisant des boîtes de texte.",
|
|
2366
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/",
|
|
2367
|
+
"tags": ["Image"],
|
|
2368
|
+
"models": ["SD1.5", "Stability"],
|
|
2369
|
+
"date": "2025-03-01",
|
|
2370
|
+
"size": 2974264852,
|
|
2371
|
+
"vram": 4080218931
|
|
2372
|
+
},
|
|
2373
|
+
{
|
|
2374
|
+
"name": "area_composition",
|
|
2375
|
+
"title": "Composition de Zone",
|
|
2376
|
+
"mediaType": "image",
|
|
2377
|
+
"mediaSubtype": "webp",
|
|
2378
|
+
"description": "Générer des images en contrôlant la composition avec des zones définies.",
|
|
2379
|
+
"tags": ["Texte vers image", "Image"],
|
|
2380
|
+
"models": ["SD1.5", "Stability"],
|
|
2381
|
+
"date": "2025-03-01",
|
|
2382
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/",
|
|
2383
|
+
"size": 2469606195,
|
|
2384
|
+
"vram": 6184752906
|
|
2385
|
+
},
|
|
2386
|
+
{
|
|
2387
|
+
"name": "area_composition_square_area_for_subject",
|
|
2388
|
+
"title": "Composition Zone Carrée pour Sujet",
|
|
2389
|
+
"mediaType": "image",
|
|
2390
|
+
"mediaSubtype": "webp",
|
|
2391
|
+
"description": "Générer des images avec un placement cohérent du sujet en utilisant la composition de zone.",
|
|
2392
|
+
"tags": ["Texte vers image", "Image"],
|
|
2393
|
+
"models": ["SD1.5", "Stability"],
|
|
2394
|
+
"date": "2025-03-01",
|
|
2395
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition",
|
|
2396
|
+
"size": 2469606195,
|
|
2397
|
+
"vram": 5927054868
|
|
2398
|
+
},
|
|
2399
|
+
{
|
|
2400
|
+
"name": "hiresfix_latent_workflow",
|
|
2401
|
+
"title": "Agrandissement",
|
|
2402
|
+
"mediaType": "image",
|
|
2403
|
+
"mediaSubtype": "webp",
|
|
2404
|
+
"description": "Agrandir les images en améliorant la qualité dans l'espace latent.",
|
|
2405
|
+
"thumbnailVariant": "compareSlider",
|
|
2406
|
+
"tags": ["Amélioration", "Image"],
|
|
2407
|
+
"models": ["SD1.5", "Stability"],
|
|
2408
|
+
"date": "2025-03-01",
|
|
2409
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/",
|
|
2410
|
+
"size": 2136746230,
|
|
2411
|
+
"vram": 3929895076
|
|
2412
|
+
},
|
|
2413
|
+
{
|
|
2414
|
+
"name": "esrgan_example",
|
|
2415
|
+
"title": "ESRGAN",
|
|
2416
|
+
"mediaType": "image",
|
|
2417
|
+
"mediaSubtype": "webp",
|
|
2418
|
+
"description": "Agrandir les images en utilisant les modèles ESRGAN pour améliorer la qualité.",
|
|
2419
|
+
"thumbnailVariant": "compareSlider",
|
|
2420
|
+
"tags": ["Amélioration", "Image"],
|
|
2421
|
+
"models": ["SD1.5", "Stability"],
|
|
2422
|
+
"date": "2025-03-01",
|
|
2423
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/",
|
|
2424
|
+
"size": 2201170739,
|
|
2425
|
+
"vram": 6442450944
|
|
2426
|
+
},
|
|
2427
|
+
{
|
|
2428
|
+
"name": "hiresfix_esrgan_workflow",
|
|
2429
|
+
"title": "Workflow HiresFix ESRGAN",
|
|
2430
|
+
"mediaType": "image",
|
|
2431
|
+
"mediaSubtype": "webp",
|
|
2432
|
+
"description": "Agrandir les images en utilisant les modèles ESRGAN pendant les étapes de génération intermédiaires.",
|
|
2433
|
+
"thumbnailVariant": "compareSlider",
|
|
2434
|
+
"tags": ["Amélioration", "Image"],
|
|
2435
|
+
"models": ["SD1.5", "Stability"],
|
|
2436
|
+
"date": "2025-03-01",
|
|
2437
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling",
|
|
2438
|
+
"size": 2201170739,
|
|
2439
|
+
"vram": 6442450944
|
|
2440
|
+
},
|
|
2441
|
+
{
|
|
2442
|
+
"name": "latent_upscale_different_prompt_model",
|
|
2443
|
+
"title": "Modèle Upscale Latent avec Prompt Différent",
|
|
2444
|
+
"mediaType": "image",
|
|
2445
|
+
"mediaSubtype": "webp",
|
|
2446
|
+
"description": "Agrandir les images tout en changeant les prompts à travers les passes de génération.",
|
|
2447
|
+
"thumbnailVariant": "zoomHover",
|
|
2448
|
+
"tags": ["Amélioration", "Image"],
|
|
2449
|
+
"models": ["SD1.5", "Stability"],
|
|
2450
|
+
"date": "2025-03-01",
|
|
2451
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples",
|
|
2452
|
+
"size": 4262755041,
|
|
2453
|
+
"vram": 5153960755
|
|
2454
|
+
},
|
|
2455
|
+
{
|
|
2456
|
+
"name": "controlnet_example",
|
|
2457
|
+
"title": "Scribble ControlNet",
|
|
2458
|
+
"mediaType": "image",
|
|
2459
|
+
"mediaSubtype": "webp",
|
|
2460
|
+
"description": "Générer des images guidées par des images de référence griffonnées en utilisant ControlNet.",
|
|
2461
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2462
|
+
"tags": ["ControlNet", "Image"],
|
|
2463
|
+
"models": ["SD1.5", "Stability"],
|
|
2464
|
+
"date": "2025-03-01",
|
|
2465
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/",
|
|
2466
|
+
"size": 3189013217,
|
|
2467
|
+
"vram": 6442450944
|
|
2468
|
+
},
|
|
2469
|
+
{
|
|
2470
|
+
"name": "2_pass_pose_worship",
|
|
2471
|
+
"title": "Pose ControlNet 2 Passes",
|
|
2472
|
+
"mediaType": "image",
|
|
2473
|
+
"mediaSubtype": "webp",
|
|
2474
|
+
"description": "Générer des images guidées par des références de pose en utilisant ControlNet.",
|
|
2475
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2476
|
+
"tags": ["ControlNet", "Image"],
|
|
2477
|
+
"models": ["SD1.5", "Stability"],
|
|
2478
|
+
"date": "2025-03-01",
|
|
2479
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet",
|
|
2480
|
+
"size": 4660039516,
|
|
2481
|
+
"vram": 6442450944
|
|
2482
|
+
},
|
|
2483
|
+
{
|
|
2484
|
+
"name": "depth_controlnet",
|
|
2485
|
+
"title": "Profondeur ControlNet",
|
|
2486
|
+
"mediaType": "image",
|
|
2487
|
+
"mediaSubtype": "webp",
|
|
2488
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant ControlNet.",
|
|
2489
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2490
|
+
"tags": ["ControlNet", "Image", "Texte vers image"],
|
|
2491
|
+
"models": ["SD1.5", "Stability"],
|
|
2492
|
+
"date": "2025-03-01",
|
|
2493
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
|
|
2494
|
+
"size": 2888365507,
|
|
2495
|
+
"vram": 6442450944
|
|
2496
|
+
},
|
|
2497
|
+
{
|
|
2498
|
+
"name": "depth_t2i_adapter",
|
|
2499
|
+
"title": "Adaptateur T2I Profondeur",
|
|
2500
|
+
"mediaType": "image",
|
|
2501
|
+
"mediaSubtype": "webp",
|
|
2502
|
+
"description": "Générer des images guidées par les informations de profondeur en utilisant l'adaptateur T2I.",
|
|
2503
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2504
|
+
"tags": ["ControlNet", "Image", "Texte vers image"],
|
|
2505
|
+
"models": ["SD1.5", "Stability"],
|
|
2506
|
+
"date": "2025-03-01",
|
|
2507
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
|
|
2508
|
+
"size": 2523293286,
|
|
2509
|
+
"vram": 6442450944
|
|
2510
|
+
},
|
|
2511
|
+
{
|
|
2512
|
+
"name": "mixing_controlnets",
|
|
2513
|
+
"title": "Mélange ControlNets",
|
|
2514
|
+
"mediaType": "image",
|
|
2515
|
+
"mediaSubtype": "webp",
|
|
2516
|
+
"description": "Générer des images en combinant plusieurs modèles ControlNet.",
|
|
2517
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2518
|
+
"tags": ["ControlNet", "Image", "Texte vers image"],
|
|
2519
|
+
"models": ["SD1.5", "Stability"],
|
|
2520
|
+
"date": "2025-03-01",
|
|
2521
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets",
|
|
2522
|
+
"size": 3328599654,
|
|
2523
|
+
"vram": 6442450944
|
|
2524
|
+
}
|
|
2525
|
+
]
|
|
2526
2526
|
}
|
|
2527
2527
|
]
|