comfyui-workflow-templates-media-other 0.3.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comfyui_workflow_templates_media_other/__init__.py +6 -0
- comfyui_workflow_templates_media_other/templates/04_hunyuan_3d_2.1_subgraphed-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/04_hunyuan_3d_2.1_subgraphed.json +849 -0
- comfyui_workflow_templates_media_other/templates/05_audio_ace_step_1_t2a_song_subgraphed-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/05_audio_ace_step_1_t2a_song_subgraphed.json +1039 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship.json +1256 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d-v2.1-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d-v2.1.json +618 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model.json +1101 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo.json +1149 -0
- comfyui_workflow_templates_media_other/templates/ByteDance-Seedance_00003_.json +210 -0
- comfyui_workflow_templates_media_other/templates/area_composition-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/area_composition.json +1626 -0
- comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject.json +1114 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_m2m_editing-1.mp3 +0 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_m2m_editing.json +688 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_instrumentals-1.mp3 +0 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_instrumentals.json +650 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_song-1.mp3 +0 -0
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_song.json +630 -0
- comfyui_workflow_templates_media_other/templates/audio_stable_audio_example-1.mp3 +0 -0
- comfyui_workflow_templates_media_other/templates/audio_stable_audio_example.json +495 -0
- comfyui_workflow_templates_media_other/templates/default-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/default.json +547 -0
- comfyui_workflow_templates_media_other/templates/embedding_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/embedding_example.json +267 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example.json +635 -0
- comfyui_workflow_templates_media_other/templates/gligen_textbox_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gligen_textbox_example.json +686 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1.json +1133 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_full-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_full-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_full.json +1021 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_dev-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_dev.json +700 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_fast-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_fast.json +700 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_full-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_i1_full.json +700 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow.json +1029 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow.json +772 -0
- comfyui_workflow_templates_media_other/templates/index.ar.json +2521 -0
- comfyui_workflow_templates_media_other/templates/index.es.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.fr.json +2527 -0
- comfyui_workflow_templates_media_other/templates/index.ja.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.json +2527 -0
- comfyui_workflow_templates_media_other/templates/index.ko.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.ru.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.schema.json +123 -0
- comfyui_workflow_templates_media_other/templates/index.tr.json +2521 -0
- comfyui_workflow_templates_media_other/templates/index.zh-TW.json +2526 -0
- comfyui_workflow_templates_media_other/templates/index.zh.json +2526 -0
- comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model.json +929 -0
- comfyui_workflow_templates_media_other/templates/lora-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/lora.json +615 -0
- comfyui_workflow_templates_media_other/templates/lora_multiple-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/lora_multiple.json +656 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_blur-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_blur-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_blur.json +764 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_depth-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_depth-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_large_depth.json +1857 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_simple_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/sd3.5_simple_example.json +278 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_flf2v_720_f16-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_flf2v_720_f16.json +1038 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_control-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_control-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_control.json +1284 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_inp-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_inp.json +1142 -0
- comfyui_workflow_templates_media_other-0.3.10.dist-info/METADATA +9 -0
- comfyui_workflow_templates_media_other-0.3.10.dist-info/RECORD +92 -0
- comfyui_workflow_templates_media_other-0.3.10.dist-info/WHEEL +5 -0
- comfyui_workflow_templates_media_other-0.3.10.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2526 @@
|
|
|
1
|
+
[
|
|
2
|
+
{
|
|
3
|
+
"moduleName": "default",
|
|
4
|
+
"type": "image",
|
|
5
|
+
"isEssential": true,
|
|
6
|
+
"title": "Getting Started",
|
|
7
|
+
"templates": [
|
|
8
|
+
{
|
|
9
|
+
"name": "01_qwen_t2i_subgraphed",
|
|
10
|
+
"title": "Texto a imagen (Nuevo)",
|
|
11
|
+
"mediaType": "image",
|
|
12
|
+
"mediaSubtype": "webp",
|
|
13
|
+
"description": "Genera imágenes a partir de indicaciones de texto usando el modelo Qwen-Image",
|
|
14
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
15
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
16
|
+
"models": ["Qwen-Image"],
|
|
17
|
+
"date": "2025-10-17",
|
|
18
|
+
"size": 31772020572
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"name": "02_qwen_Image_edit_subgraphed",
|
|
22
|
+
"title": "Edición de imágenes (Nuevo)",
|
|
23
|
+
"mediaType": "image",
|
|
24
|
+
"mediaSubtype": "webp",
|
|
25
|
+
"description": "Edita tus imágenes con Qwen-Image-Edit",
|
|
26
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
27
|
+
"tags": ["Imagen a imagen", "Edición de imagen", "ControlNet"],
|
|
28
|
+
"models": ["Qwen-Image"],
|
|
29
|
+
"date": "2025-10-17",
|
|
30
|
+
"size": 31772020572
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
"name": "03_video_wan2_2_14B_i2v_subgraphed",
|
|
34
|
+
"title": "Imagen a Video (Nuevo)",
|
|
35
|
+
"description": "Genera videos a partir de una imagen usando Wan2.2 14B",
|
|
36
|
+
"mediaType": "image",
|
|
37
|
+
"mediaSubtype": "webp",
|
|
38
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
39
|
+
"tags": ["Imagen a video", "Video"],
|
|
40
|
+
"models": ["Wan2.2", "Wan"],
|
|
41
|
+
"date": "2025-10-17",
|
|
42
|
+
"size": 38031935406
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
"name": "04_hunyuan_3d_2.1_subgraphed",
|
|
46
|
+
"title": "Imagen a 3D (Nuevo)",
|
|
47
|
+
"mediaType": "image",
|
|
48
|
+
"mediaSubtype": "webp",
|
|
49
|
+
"description": "Genera modelos 3D a partir de imágenes únicas usando Hunyuan3D 2.0.",
|
|
50
|
+
"tags": ["Imagen a 3D", "3D"],
|
|
51
|
+
"models": ["Hunyuan3D"],
|
|
52
|
+
"date": "2025-10-17",
|
|
53
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/3d/hunyuan3D-2",
|
|
54
|
+
"size": 4928474972
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
"name": "05_audio_ace_step_1_t2a_song_subgraphed",
|
|
58
|
+
"title": "Texto a audio (Nuevo)",
|
|
59
|
+
"mediaType": "image",
|
|
60
|
+
"mediaSubtype": "webp",
|
|
61
|
+
"description": "Genera audio a partir de indicaciones de texto usando ACE-Step v1",
|
|
62
|
+
"tags": ["Texto a audio", "Audio"],
|
|
63
|
+
"models": ["ACE-Step"],
|
|
64
|
+
"date": "2025-10-17",
|
|
65
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
66
|
+
"size": 7698728878
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
"name": "default",
|
|
70
|
+
"title": "Generación de imágenes",
|
|
71
|
+
"mediaType": "image",
|
|
72
|
+
"mediaSubtype": "webp",
|
|
73
|
+
"description": "Generar imágenes a partir de indicaciones de texto.",
|
|
74
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
|
|
75
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
76
|
+
"models": ["SD1.5", "Stability"],
|
|
77
|
+
"date": "2025-03-01",
|
|
78
|
+
"size": 2136746230,
|
|
79
|
+
"vram": 3092376453
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
"name": "image2image",
|
|
83
|
+
"title": "Imagen a imagen",
|
|
84
|
+
"mediaType": "image",
|
|
85
|
+
"mediaSubtype": "webp",
|
|
86
|
+
"description": "Transformar imágenes existentes usando indicaciones de texto.",
|
|
87
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/image-to-image",
|
|
88
|
+
"tags": ["Imagen a imagen", "Imagen"],
|
|
89
|
+
"models": ["SD1.5", "Stability"],
|
|
90
|
+
"date": "2025-03-01",
|
|
91
|
+
"size": 2136746230,
|
|
92
|
+
"vram": 3092376453,
|
|
93
|
+
"thumbnailVariant": "hoverDissolve"
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
"name": "lora",
|
|
97
|
+
"title": "LoRA",
|
|
98
|
+
"mediaType": "image",
|
|
99
|
+
"mediaSubtype": "webp",
|
|
100
|
+
"description": "Generar imágenes con modelos LoRA para estilos o temas especializados.",
|
|
101
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
|
|
102
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
103
|
+
"models": ["SD1.5", "Stability"],
|
|
104
|
+
"date": "2025-03-01",
|
|
105
|
+
"size": 2437393940,
|
|
106
|
+
"vram": 3092376453
|
|
107
|
+
},
|
|
108
|
+
{
|
|
109
|
+
"name": "lora_multiple",
|
|
110
|
+
"title": "LoRA múltiple",
|
|
111
|
+
"mediaType": "image",
|
|
112
|
+
"mediaSubtype": "webp",
|
|
113
|
+
"description": "Generar imágenes combinando múltiples modelos LoRA.",
|
|
114
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
|
|
115
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
116
|
+
"models": ["SD1.5", "Stability"],
|
|
117
|
+
"date": "2025-03-01",
|
|
118
|
+
"size": 2437393940,
|
|
119
|
+
"vram": 3350074491
|
|
120
|
+
},
|
|
121
|
+
{
|
|
122
|
+
"name": "inpaint_example",
|
|
123
|
+
"title": "Inpainting",
|
|
124
|
+
"mediaType": "image",
|
|
125
|
+
"mediaSubtype": "webp",
|
|
126
|
+
"description": "Editar partes específicas de imágenes sin problemas.",
|
|
127
|
+
"thumbnailVariant": "compareSlider",
|
|
128
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
|
|
129
|
+
"tags": ["Inpaint", "Imagen"],
|
|
130
|
+
"models": ["SD1.5", "Stability"],
|
|
131
|
+
"date": "2025-03-01",
|
|
132
|
+
"size": 5218385265,
|
|
133
|
+
"vram": 4101693768
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
"name": "inpaint_model_outpainting",
|
|
137
|
+
"title": "Outpainting",
|
|
138
|
+
"mediaType": "image",
|
|
139
|
+
"mediaSubtype": "webp",
|
|
140
|
+
"description": "Extender imágenes más allá de sus límites originales.",
|
|
141
|
+
"thumbnailVariant": "compareSlider",
|
|
142
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
|
|
143
|
+
"tags": ["Outpaint", "Imagen"],
|
|
144
|
+
"models": ["SD1.5", "Stability"],
|
|
145
|
+
"date": "2025-03-01",
|
|
146
|
+
"size": 5218385265,
|
|
147
|
+
"vram": 4101693768
|
|
148
|
+
},
|
|
149
|
+
{
|
|
150
|
+
"name": "embedding_example",
|
|
151
|
+
"title": "Incrustación",
|
|
152
|
+
"mediaType": "image",
|
|
153
|
+
"mediaSubtype": "webp",
|
|
154
|
+
"description": "Generar imágenes usando inversión textual para estilos consistentes.",
|
|
155
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/",
|
|
156
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
157
|
+
"models": ["SD1.5", "Stability"],
|
|
158
|
+
"date": "2025-03-01",
|
|
159
|
+
"size": 5218385265,
|
|
160
|
+
"vram": 4123168604
|
|
161
|
+
},
|
|
162
|
+
{
|
|
163
|
+
"name": "gligen_textbox_example",
|
|
164
|
+
"title": "Cuadro de texto Gligen",
|
|
165
|
+
"mediaType": "image",
|
|
166
|
+
"mediaSubtype": "webp",
|
|
167
|
+
"description": "Generar imágenes con colocación precisa de objetos usando cuadros de texto.",
|
|
168
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/",
|
|
169
|
+
"tags": ["Imagen"],
|
|
170
|
+
"models": ["SD1.5", "Stability"],
|
|
171
|
+
"date": "2025-03-01",
|
|
172
|
+
"size": 2974264852,
|
|
173
|
+
"vram": 4080218931
|
|
174
|
+
},
|
|
175
|
+
{
|
|
176
|
+
"name": "area_composition",
|
|
177
|
+
"title": "Composición de área",
|
|
178
|
+
"mediaType": "image",
|
|
179
|
+
"mediaSubtype": "webp",
|
|
180
|
+
"description": "Generar imágenes controlando la composición con áreas definidas.",
|
|
181
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
182
|
+
"models": ["SD1.5", "Stability"],
|
|
183
|
+
"date": "2025-03-01",
|
|
184
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/",
|
|
185
|
+
"size": 2469606195,
|
|
186
|
+
"vram": 6184752906
|
|
187
|
+
},
|
|
188
|
+
{
|
|
189
|
+
"name": "area_composition_square_area_for_subject",
|
|
190
|
+
"title": "Composición de área área cuadrada para sujeto",
|
|
191
|
+
"mediaType": "image",
|
|
192
|
+
"mediaSubtype": "webp",
|
|
193
|
+
"description": "Generar imágenes con colocación consistente de sujeto usando composición de área.",
|
|
194
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
195
|
+
"models": ["SD1.5", "Stability"],
|
|
196
|
+
"date": "2025-03-01",
|
|
197
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition",
|
|
198
|
+
"size": 2469606195,
|
|
199
|
+
"vram": 5927054868
|
|
200
|
+
},
|
|
201
|
+
{
|
|
202
|
+
"name": "hiresfix_latent_workflow",
|
|
203
|
+
"title": "Mejorar",
|
|
204
|
+
"mediaType": "image",
|
|
205
|
+
"mediaSubtype": "webp",
|
|
206
|
+
"description": "Mejorar imágenes aumentando la calidad en el espacio latente.",
|
|
207
|
+
"thumbnailVariant": "compareSlider",
|
|
208
|
+
"tags": ["Mejorar", "Imagen"],
|
|
209
|
+
"models": ["SD1.5", "Stability"],
|
|
210
|
+
"date": "2025-03-01",
|
|
211
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/",
|
|
212
|
+
"size": 2136746230,
|
|
213
|
+
"vram": 3929895076
|
|
214
|
+
},
|
|
215
|
+
{
|
|
216
|
+
"name": "esrgan_example",
|
|
217
|
+
"title": "ESRGAN",
|
|
218
|
+
"mediaType": "image",
|
|
219
|
+
"mediaSubtype": "webp",
|
|
220
|
+
"description": "Mejorar imágenes usando modelos ESRGAN para aumentar la calidad.",
|
|
221
|
+
"thumbnailVariant": "compareSlider",
|
|
222
|
+
"tags": ["Mejorar", "Imagen"],
|
|
223
|
+
"models": ["SD1.5", "Stability"],
|
|
224
|
+
"date": "2025-03-01",
|
|
225
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/",
|
|
226
|
+
"size": 2201170739,
|
|
227
|
+
"vram": 6442450944
|
|
228
|
+
},
|
|
229
|
+
{
|
|
230
|
+
"name": "hiresfix_esrgan_workflow",
|
|
231
|
+
"title": "Flujo de trabajo ESRGAN HiresFix",
|
|
232
|
+
"mediaType": "image",
|
|
233
|
+
"mediaSubtype": "webp",
|
|
234
|
+
"description": "Mejorar imágenes usando modelos ESRGAN durante pasos intermedios de generación.",
|
|
235
|
+
"thumbnailVariant": "compareSlider",
|
|
236
|
+
"tags": ["Mejorar", "Imagen"],
|
|
237
|
+
"models": ["SD1.5", "Stability"],
|
|
238
|
+
"date": "2025-03-01",
|
|
239
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling",
|
|
240
|
+
"size": 2201170739,
|
|
241
|
+
"vram": 6442450944
|
|
242
|
+
},
|
|
243
|
+
{
|
|
244
|
+
"name": "latent_upscale_different_prompt_model",
|
|
245
|
+
"title": "Mejora latente con modelo de indicación diferente",
|
|
246
|
+
"mediaType": "image",
|
|
247
|
+
"mediaSubtype": "webp",
|
|
248
|
+
"description": "Mejorar imágenes mientras se cambian las indicaciones a través de pasos de generación.",
|
|
249
|
+
"thumbnailVariant": "zoomHover",
|
|
250
|
+
"tags": ["Mejorar", "Imagen"],
|
|
251
|
+
"models": ["SD1.5", "Stability"],
|
|
252
|
+
"date": "2025-03-01",
|
|
253
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples",
|
|
254
|
+
"size": 4262755041,
|
|
255
|
+
"vram": 5153960755
|
|
256
|
+
},
|
|
257
|
+
{
|
|
258
|
+
"name": "controlnet_example",
|
|
259
|
+
"title": "ControlNet garabato",
|
|
260
|
+
"mediaType": "image",
|
|
261
|
+
"mediaSubtype": "webp",
|
|
262
|
+
"description": "Generar imágenes guiadas por imágenes de referencia de garabatos usando ControlNet.",
|
|
263
|
+
"thumbnailVariant": "hoverDissolve",
|
|
264
|
+
"tags": ["ControlNet", "Imagen"],
|
|
265
|
+
"models": ["SD1.5", "Stability"],
|
|
266
|
+
"date": "2025-03-01",
|
|
267
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/",
|
|
268
|
+
"size": 3189013217,
|
|
269
|
+
"vram": 6442450944
|
|
270
|
+
},
|
|
271
|
+
{
|
|
272
|
+
"name": "2_pass_pose_worship",
|
|
273
|
+
"title": "ControlNet pose 2 pasos",
|
|
274
|
+
"mediaType": "image",
|
|
275
|
+
"mediaSubtype": "webp",
|
|
276
|
+
"description": "Generar imágenes guiadas por referencias de pose usando ControlNet.",
|
|
277
|
+
"thumbnailVariant": "hoverDissolve",
|
|
278
|
+
"tags": ["ControlNet", "Imagen"],
|
|
279
|
+
"models": ["SD1.5", "Stability"],
|
|
280
|
+
"date": "2025-03-01",
|
|
281
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet",
|
|
282
|
+
"size": 4660039516,
|
|
283
|
+
"vram": 6442450944
|
|
284
|
+
},
|
|
285
|
+
{
|
|
286
|
+
"name": "depth_controlnet",
|
|
287
|
+
"title": "ControlNet profundidad",
|
|
288
|
+
"mediaType": "image",
|
|
289
|
+
"mediaSubtype": "webp",
|
|
290
|
+
"description": "Generar imágenes guiadas por información de profundidad usando ControlNet.",
|
|
291
|
+
"thumbnailVariant": "hoverDissolve",
|
|
292
|
+
"tags": ["ControlNet", "Imagen", "Texto a imagen"],
|
|
293
|
+
"models": ["SD1.5", "Stability"],
|
|
294
|
+
"date": "2025-03-01",
|
|
295
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
|
|
296
|
+
"size": 2888365507,
|
|
297
|
+
"vram": 6442450944
|
|
298
|
+
},
|
|
299
|
+
{
|
|
300
|
+
"name": "depth_t2i_adapter",
|
|
301
|
+
"title": "Adaptador T2I profundidad",
|
|
302
|
+
"mediaType": "image",
|
|
303
|
+
"mediaSubtype": "webp",
|
|
304
|
+
"description": "Generar imágenes guiadas por información de profundidad usando adaptador T2I.",
|
|
305
|
+
"thumbnailVariant": "hoverDissolve",
|
|
306
|
+
"tags": ["ControlNet", "Imagen", "Texto a imagen"],
|
|
307
|
+
"models": ["SD1.5", "Stability"],
|
|
308
|
+
"date": "2025-03-01",
|
|
309
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
|
|
310
|
+
"size": 2523293286,
|
|
311
|
+
"vram": 6442450944
|
|
312
|
+
},
|
|
313
|
+
{
|
|
314
|
+
"name": "mixing_controlnets",
|
|
315
|
+
"title": "Mezcla de ControlNets",
|
|
316
|
+
"mediaType": "image",
|
|
317
|
+
"mediaSubtype": "webp",
|
|
318
|
+
"description": "Generar imágenes combinando múltiples modelos ControlNet.",
|
|
319
|
+
"thumbnailVariant": "hoverDissolve",
|
|
320
|
+
"tags": ["ControlNet", "Imagen", "Texto a imagen"],
|
|
321
|
+
"models": ["SD1.5", "Stability"],
|
|
322
|
+
"date": "2025-03-01",
|
|
323
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets",
|
|
324
|
+
"size": 3328599654,
|
|
325
|
+
"vram": 6442450944
|
|
326
|
+
}
|
|
327
|
+
]
|
|
328
|
+
},
|
|
329
|
+
{
|
|
330
|
+
"moduleName": "default",
|
|
331
|
+
"type": "image",
|
|
332
|
+
"category": "GENERATION TYPE",
|
|
333
|
+
"icon": "icon-[lucide--image]",
|
|
334
|
+
"title": "Image",
|
|
335
|
+
"templates": [
|
|
336
|
+
{
|
|
337
|
+
"name": "image_flux2",
|
|
338
|
+
"title": "Flux.2 Dev",
|
|
339
|
+
"mediaType": "image",
|
|
340
|
+
"mediaSubtype": "webp",
|
|
341
|
+
"thumbnailVariant": "compareSlider",
|
|
342
|
+
"description": "Genera imágenes fotorrealistas con coherencia multi-referencia y renderizado profesional de texto.",
|
|
343
|
+
"tags": ["Texto a imagen", "Imagen", "Edición de imagen"],
|
|
344
|
+
"models": ["Flux.2 Dev", "BFL"],
|
|
345
|
+
"date": "2025-11-26",
|
|
346
|
+
"size": 71382356459,
|
|
347
|
+
"vram": 0
|
|
348
|
+
},
|
|
349
|
+
{
|
|
350
|
+
"name": "image_flux2_fp8",
|
|
351
|
+
"title": "Maqueta de producto (Flux.2 Dev FP8)",
|
|
352
|
+
"mediaType": "image",
|
|
353
|
+
"mediaSubtype": "webp",
|
|
354
|
+
"description": "Crea maquetas de productos aplicando patrones de diseño a envases, tazas y otros productos usando consistencia multi-referencia.",
|
|
355
|
+
"tags": ["Texto a imagen", "Imagen", "Edición de imagen", "Maqueta", "Diseño de producto"],
|
|
356
|
+
"models": ["Flux.2 Dev", "BFL"],
|
|
357
|
+
"date": "2025-11-26",
|
|
358
|
+
"size": 53837415055,
|
|
359
|
+
"vram": 0
|
|
360
|
+
},
|
|
361
|
+
{
|
|
362
|
+
"name": "image_z_image_turbo",
|
|
363
|
+
"title": "Z-Image-Turbo texto a imagen",
|
|
364
|
+
"mediaType": "image",
|
|
365
|
+
"mediaSubtype": "webp",
|
|
366
|
+
"description": "Modelo fundacional eficiente de generación de imágenes con transformador de difusión de flujo único, compatible con inglés y chino.",
|
|
367
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
368
|
+
"models": ["Z-Image-Turbo"],
|
|
369
|
+
"date": "2025-11-27",
|
|
370
|
+
"size": 35326050304
|
|
371
|
+
},
|
|
372
|
+
{
|
|
373
|
+
"name": "image_qwen_image",
|
|
374
|
+
"title": "Texto a imagen Qwen-Image",
|
|
375
|
+
"mediaType": "image",
|
|
376
|
+
"mediaSubtype": "webp",
|
|
377
|
+
"description": "Generar imágenes con capacidad excepcional de renderizado y edición de texto multilingüe usando el modelo MMDiT de 20B de Qwen-Image.",
|
|
378
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
379
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
380
|
+
"models": ["Qwen-Image"],
|
|
381
|
+
"date": "2025-08-05",
|
|
382
|
+
"size": 31772020572
|
|
383
|
+
},
|
|
384
|
+
{
|
|
385
|
+
"name": "image_qwen_image_instantx_controlnet",
|
|
386
|
+
"title": "Qwen-Image InstantX ControlNet",
|
|
387
|
+
"mediaType": "image",
|
|
388
|
+
"mediaSubtype": "webp",
|
|
389
|
+
"description": "Genera imágenes con Qwen-Image InstantX ControlNet, compatible con canny, bordes suaves, profundidad y pose",
|
|
390
|
+
"tags": ["Imagen a imagen", "Imagen", "ControlNet"],
|
|
391
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
392
|
+
"models": ["Qwen-Image"],
|
|
393
|
+
"date": "2025-08-23",
|
|
394
|
+
"size": 35304631173
|
|
395
|
+
},
|
|
396
|
+
{
|
|
397
|
+
"name": "image_qwen_image_instantx_inpainting_controlnet",
|
|
398
|
+
"title": "Qwen-Image InstantX ControlNet de Inpainting",
|
|
399
|
+
"mediaType": "image",
|
|
400
|
+
"mediaSubtype": "webp",
|
|
401
|
+
"thumbnailVariant": "compareSlider",
|
|
402
|
+
"description": "Inpainting profesional y edición de imágenes con Qwen-Image InstantX ControlNet. Compatible con reemplazo de objetos, modificación de texto, cambios de fondo y outpainting.",
|
|
403
|
+
"tags": ["Imagen a imagen", "Imagen", "ControlNet", "Inpaint"],
|
|
404
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
405
|
+
"models": ["Qwen-Image"],
|
|
406
|
+
"date": "2025-09-12",
|
|
407
|
+
"size": 36013300777
|
|
408
|
+
},
|
|
409
|
+
{
|
|
410
|
+
"name": "image_qwen_image_union_control_lora",
|
|
411
|
+
"title": "Control unificado Qwen-Image",
|
|
412
|
+
"mediaType": "image",
|
|
413
|
+
"mediaSubtype": "webp",
|
|
414
|
+
"description": "Generar imágenes con control estructural preciso usando el ControlNet LoRA unificado de Qwen-Image. Soporta múltiples tipos de control incluyendo canny, profundidad, lineart, softedge, normal y openpose para aplicaciones creativas diversas.",
|
|
415
|
+
"tags": ["Texto a imagen", "Imagen", "ControlNet"],
|
|
416
|
+
"models": ["Qwen-Image"],
|
|
417
|
+
"date": "2025-08-23",
|
|
418
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
419
|
+
"size": 32716913377
|
|
420
|
+
},
|
|
421
|
+
{
|
|
422
|
+
"name": "image_qwen_image_controlnet_patch",
|
|
423
|
+
"title": "Parche de modelo Qwen-Image ControlNet",
|
|
424
|
+
"mediaType": "image",
|
|
425
|
+
"mediaSubtype": "webp",
|
|
426
|
+
"thumbnailVariant": "compareSlider",
|
|
427
|
+
"description": "Controla la generación de imágenes usando modelos Qwen-Image ControlNet. Compatible con controles canny, profundidad e inpainting mediante parcheo de modelo.",
|
|
428
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
429
|
+
"tags": ["Texto a imagen", "Imagen", "ControlNet"],
|
|
430
|
+
"models": ["Qwen-Image"],
|
|
431
|
+
"date": "2025-08-24",
|
|
432
|
+
"size": 34037615821
|
|
433
|
+
},
|
|
434
|
+
{
|
|
435
|
+
"name": "image_qwen_image_edit_2509",
|
|
436
|
+
"title": "Qwen Edición de Imagen 2509",
|
|
437
|
+
"mediaType": "image",
|
|
438
|
+
"mediaSubtype": "webp",
|
|
439
|
+
"thumbnailVariant": "compareSlider",
|
|
440
|
+
"description": "Edición avanzada de imágenes con soporte multi-imagen, consistencia mejorada e integración de ControlNet.",
|
|
441
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
442
|
+
"tags": ["Imagen a imagen", "Edición de imagen", "ControlNet"],
|
|
443
|
+
"models": ["Qwen-Image"],
|
|
444
|
+
"date": "2025-09-25",
|
|
445
|
+
"size": 31772020572
|
|
446
|
+
},
|
|
447
|
+
{
|
|
448
|
+
"name": "image_qwen_image_edit",
|
|
449
|
+
"title": "Edición de imagen Qwen",
|
|
450
|
+
"mediaType": "image",
|
|
451
|
+
"mediaSubtype": "webp",
|
|
452
|
+
"thumbnailVariant": "compareSlider",
|
|
453
|
+
"description": "Editar imágenes con edición precisa de texto bilingüe y capacidades de edición dual semántica/apariencia usando el modelo MMDiT de 20B de Qwen-Image-Edit.",
|
|
454
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
455
|
+
"tags": ["Imagen a imagen", "Edición de imagen"],
|
|
456
|
+
"models": ["Qwen-Image"],
|
|
457
|
+
"date": "2025-08-18",
|
|
458
|
+
"size": 31772020572
|
|
459
|
+
},
|
|
460
|
+
{
|
|
461
|
+
"name": "image_chrono_edit_14B",
|
|
462
|
+
"title": "ChronoEdit 14B",
|
|
463
|
+
"mediaType": "image",
|
|
464
|
+
"mediaSubtype": "webp",
|
|
465
|
+
"thumbnailVariant": "compareSlider",
|
|
466
|
+
"description": "Edición de imágenes impulsada por la comprensión dinámica de modelos de video, creando resultados físicamente plausibles mientras preserva la consistencia del personaje y el estilo.",
|
|
467
|
+
"tags": ["Edición de imagen", "Imagen a imagen"],
|
|
468
|
+
"models": ["Wan2.1", "ChronoEdit", "Nvidia"],
|
|
469
|
+
"date": "2025-11-03",
|
|
470
|
+
"size": 40459304
|
|
471
|
+
},
|
|
472
|
+
{
|
|
473
|
+
"name": "flux_kontext_dev_basic",
|
|
474
|
+
"title": "Flux Kontext Dev(Básico)",
|
|
475
|
+
"mediaType": "image",
|
|
476
|
+
"mediaSubtype": "webp",
|
|
477
|
+
"thumbnailVariant": "hoverDissolve",
|
|
478
|
+
"description": "Editar imagen usando Flux Kontext con visibilidad completa de nodos, perfecto para aprender el flujo de trabajo.",
|
|
479
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
|
|
480
|
+
"tags": ["Edición de imagen", "Imagen a imagen"],
|
|
481
|
+
"models": ["Flux", "BFL"],
|
|
482
|
+
"date": "2025-06-26",
|
|
483
|
+
"size": 17641578168,
|
|
484
|
+
"vram": 19327352832
|
|
485
|
+
},
|
|
486
|
+
{
|
|
487
|
+
"name": "image_chroma1_radiance_text_to_image",
|
|
488
|
+
"title": "Chroma1 Radiance Texto a Imagen",
|
|
489
|
+
"mediaType": "image",
|
|
490
|
+
"mediaSubtype": "webp",
|
|
491
|
+
"description": "Chroma1-Radiance trabaja directamente con píxeles de imagen en lugar de latentes comprimidos, ofreciendo imágenes de mayor calidad con menos artefactos y distorsión.",
|
|
492
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
493
|
+
"models": ["Chroma"],
|
|
494
|
+
"date": "2025-09-18",
|
|
495
|
+
"size": 23622320128,
|
|
496
|
+
"vram": 23622320128
|
|
497
|
+
},
|
|
498
|
+
{
|
|
499
|
+
"name": "image_netayume_lumina_t2i",
|
|
500
|
+
"title": "NetaYume Lumina Texto a Imagen",
|
|
501
|
+
"mediaType": "image",
|
|
502
|
+
"mediaSubtype": "webp",
|
|
503
|
+
"description": "Generación de imágenes de estilo anime de alta calidad con comprensión mejorada de personajes y texturas detalladas. Ajustado finamente desde Neta Lumina en el conjunto de datos Danbooru.",
|
|
504
|
+
"tags": ["Texto a imagen", "Imagen", "Anime"],
|
|
505
|
+
"models": ["OmniGen"],
|
|
506
|
+
"date": "2025-10-10",
|
|
507
|
+
"size": 10619306639
|
|
508
|
+
},
|
|
509
|
+
{
|
|
510
|
+
"name": "image_chroma_text_to_image",
|
|
511
|
+
"title": "Texto a imagen Chroma",
|
|
512
|
+
"mediaType": "image",
|
|
513
|
+
"mediaSubtype": "webp",
|
|
514
|
+
"description": "Chroma está modificado de flux y tiene algunos cambios en la arquitectura.",
|
|
515
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
516
|
+
"models": ["Chroma", "Flux"],
|
|
517
|
+
"date": "2025-06-04",
|
|
518
|
+
"size": 23289460163,
|
|
519
|
+
"vram": 15569256448
|
|
520
|
+
},
|
|
521
|
+
{
|
|
522
|
+
"name": "image_flux.1_fill_dev_OneReward",
|
|
523
|
+
"title": "Flux.1 Dev OneReward",
|
|
524
|
+
"mediaType": "image",
|
|
525
|
+
"mediaSubtype": "webp",
|
|
526
|
+
"thumbnailVariant": "compareSlider",
|
|
527
|
+
"description": "Supports various tasks such as image inpainting, outpainting, and object removal",
|
|
528
|
+
"tags": ["Inpaint", "Outpaint"],
|
|
529
|
+
"models": ["Flux", "BFL"],
|
|
530
|
+
"date": "2025-09-21",
|
|
531
|
+
"size": 29001766666,
|
|
532
|
+
"vram": 21474836480
|
|
533
|
+
},
|
|
534
|
+
{
|
|
535
|
+
"name": "flux_dev_checkpoint_example",
|
|
536
|
+
"title": "Flux Dev fp8",
|
|
537
|
+
"mediaType": "image",
|
|
538
|
+
"mediaSubtype": "webp",
|
|
539
|
+
"description": "Generar imágenes usando la versión cuantizada Flux Dev fp8. Adecuado para dispositivos con VRAM limitada, requiere solo un archivo de modelo, pero la calidad de imagen es ligeramente inferior a la versión completa.",
|
|
540
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
541
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
542
|
+
"models": ["Flux", "BFL"],
|
|
543
|
+
"date": "2025-03-01",
|
|
544
|
+
"size": 17244293693,
|
|
545
|
+
"vram": 18253611008
|
|
546
|
+
},
|
|
547
|
+
{
|
|
548
|
+
"name": "flux1_dev_uso_reference_image_gen",
|
|
549
|
+
"title": "Generación de Imágenes de Referencia Flux.1 Dev USO",
|
|
550
|
+
"description": "Usa imágenes de referencia para controlar tanto el estilo como el sujeto: mantén el rostro de tu personaje mientras cambias el estilo artístico, o aplica estilos artísticos a nuevas escenas",
|
|
551
|
+
"thumbnailVariant": "hoverDissolve",
|
|
552
|
+
"mediaType": "image",
|
|
553
|
+
"mediaSubtype": "webp",
|
|
554
|
+
"tags": ["Imagen a imagen", "Imagen"],
|
|
555
|
+
"models": ["Flux", "BFL"],
|
|
556
|
+
"date": "2025-09-02",
|
|
557
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
|
|
558
|
+
"size": 18597208392,
|
|
559
|
+
"vram": 19864223744
|
|
560
|
+
},
|
|
561
|
+
{
|
|
562
|
+
"name": "flux_schnell",
|
|
563
|
+
"title": "Flux Schnell fp8",
|
|
564
|
+
"mediaType": "image",
|
|
565
|
+
"mediaSubtype": "webp",
|
|
566
|
+
"description": "Generar rápidamente imágenes con la versión cuantizada Flux Schnell fp8. Ideal para hardware de gama baja, requiere solo 4 pasos para generar imágenes.",
|
|
567
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
568
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
569
|
+
"models": ["Flux", "BFL"],
|
|
570
|
+
"date": "2025-03-01",
|
|
571
|
+
"size": 17233556275,
|
|
572
|
+
"vram": 18253611008
|
|
573
|
+
},
|
|
574
|
+
{
|
|
575
|
+
"name": "flux1_krea_dev",
|
|
576
|
+
"title": "Flux.1 Krea Dev",
|
|
577
|
+
"mediaType": "image",
|
|
578
|
+
"mediaSubtype": "webp",
|
|
579
|
+
"description": "Un modelo FLUX afinado que lleva el fotorrealismo al máximo",
|
|
580
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
|
|
581
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
582
|
+
"models": ["Flux", "BFL"],
|
|
583
|
+
"date": "2025-07-31",
|
|
584
|
+
"size": 22269405430,
|
|
585
|
+
"vram": 23085449216
|
|
586
|
+
},
|
|
587
|
+
{
|
|
588
|
+
"name": "flux_dev_full_text_to_image",
|
|
589
|
+
"title": "Texto a imagen completo Flux Dev",
|
|
590
|
+
"mediaType": "image",
|
|
591
|
+
"mediaSubtype": "webp",
|
|
592
|
+
"description": "Generar imágenes de alta calidad con la versión completa de Flux Dev. Requiere mayor VRAM y múltiples archivos de modelo, pero proporciona la mejor capacidad de seguimiento de indicaciones y calidad de imagen.",
|
|
593
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
594
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
595
|
+
"models": ["Flux", "BFL"],
|
|
596
|
+
"date": "2025-03-01",
|
|
597
|
+
"size": 34177202258,
|
|
598
|
+
"vram": 23622320128
|
|
599
|
+
},
|
|
600
|
+
{
|
|
601
|
+
"name": "flux_schnell_full_text_to_image",
|
|
602
|
+
"title": "Texto a imagen completo Flux Schnell",
|
|
603
|
+
"mediaType": "image",
|
|
604
|
+
"mediaSubtype": "webp",
|
|
605
|
+
"description": "Generar rápidamente imágenes con la versión completa de Flux Schnell. Usa licencia Apache2.0, requiere solo 4 pasos para generar imágenes manteniendo buena calidad de imagen.",
|
|
606
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
607
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
608
|
+
"models": ["Flux", "BFL"],
|
|
609
|
+
"date": "2025-03-01",
|
|
610
|
+
"size": 34155727421
|
|
611
|
+
},
|
|
612
|
+
{
|
|
613
|
+
"name": "flux_fill_inpaint_example",
|
|
614
|
+
"title": "Inpaint Flux",
|
|
615
|
+
"mediaType": "image",
|
|
616
|
+
"mediaSubtype": "webp",
|
|
617
|
+
"description": "Rellenar partes faltantes de imágenes usando inpainting de Flux.",
|
|
618
|
+
"thumbnailVariant": "compareSlider",
|
|
619
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
620
|
+
"tags": ["Imagen a imagen", "Inpaint", "Imagen"],
|
|
621
|
+
"models": ["Flux", "BFL"],
|
|
622
|
+
"date": "2025-03-01",
|
|
623
|
+
"size": 10372346020
|
|
624
|
+
},
|
|
625
|
+
{
|
|
626
|
+
"name": "flux_fill_outpaint_example",
|
|
627
|
+
"title": "Outpaint Flux",
|
|
628
|
+
"mediaType": "image",
|
|
629
|
+
"mediaSubtype": "webp",
|
|
630
|
+
"description": "Extender imágenes más allá de los límites usando outpainting de Flux.",
|
|
631
|
+
"thumbnailVariant": "compareSlider",
|
|
632
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
633
|
+
"tags": ["Outpaint", "Imagen", "Imagen a imagen"],
|
|
634
|
+
"models": ["Flux", "BFL"],
|
|
635
|
+
"date": "2025-03-01",
|
|
636
|
+
"size": 10372346020
|
|
637
|
+
},
|
|
638
|
+
{
|
|
639
|
+
"name": "flux_canny_model_example",
|
|
640
|
+
"title": "Modelo Canny Flux",
|
|
641
|
+
"mediaType": "image",
|
|
642
|
+
"mediaSubtype": "webp",
|
|
643
|
+
"description": "Generar imágenes guiadas por detección de bordes usando Flux Canny.",
|
|
644
|
+
"thumbnailVariant": "hoverDissolve",
|
|
645
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
646
|
+
"tags": ["Imagen a imagen", "ControlNet", "Imagen"],
|
|
647
|
+
"models": ["Flux", "BFL"],
|
|
648
|
+
"date": "2025-03-01",
|
|
649
|
+
"size": 34177202258
|
|
650
|
+
},
|
|
651
|
+
{
|
|
652
|
+
"name": "flux_depth_lora_example",
|
|
653
|
+
"title": "LoRA de profundidad Flux",
|
|
654
|
+
"mediaType": "image",
|
|
655
|
+
"mediaSubtype": "webp",
|
|
656
|
+
"description": "Generar imágenes guiadas por información de profundidad usando Flux LoRA.",
|
|
657
|
+
"thumbnailVariant": "hoverDissolve",
|
|
658
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
659
|
+
"tags": ["Imagen a imagen", "ControlNet", "Imagen"],
|
|
660
|
+
"models": ["Flux", "BFL"],
|
|
661
|
+
"date": "2025-03-01",
|
|
662
|
+
"size": 35412005356
|
|
663
|
+
},
|
|
664
|
+
{
|
|
665
|
+
"name": "flux_redux_model_example",
|
|
666
|
+
"title": "Modelo Redux Flux",
|
|
667
|
+
"mediaType": "image",
|
|
668
|
+
"mediaSubtype": "webp",
|
|
669
|
+
"description": "Generar imágenes transfiriendo estilo de imágenes de referencia usando Flux Redux.",
|
|
670
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
671
|
+
"tags": ["Imagen a imagen", "ControlNet", "Imagen"],
|
|
672
|
+
"models": ["Flux", "BFL"],
|
|
673
|
+
"date": "2025-03-01",
|
|
674
|
+
"size": 35154307318
|
|
675
|
+
},
|
|
676
|
+
{
|
|
677
|
+
"name": "image_omnigen2_t2i",
|
|
678
|
+
"title": "Texto a imagen OmniGen2",
|
|
679
|
+
"mediaType": "image",
|
|
680
|
+
"mediaSubtype": "webp",
|
|
681
|
+
"description": "Generar imágenes de alta calidad a partir de indicaciones de texto usando el modelo multimodal unificado de 7B de OmniGen2 con arquitectura de doble ruta.",
|
|
682
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
683
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
684
|
+
"models": ["OmniGen"],
|
|
685
|
+
"date": "2025-06-30",
|
|
686
|
+
"size": 15784004813
|
|
687
|
+
},
|
|
688
|
+
{
|
|
689
|
+
"name": "image_omnigen2_image_edit",
|
|
690
|
+
"title": "Edición de imagen OmniGen2",
|
|
691
|
+
"mediaType": "image",
|
|
692
|
+
"mediaSubtype": "webp",
|
|
693
|
+
"thumbnailVariant": "hoverDissolve",
|
|
694
|
+
"description": "Editar imágenes con instrucciones de lenguaje natural usando las capacidades avanzadas de edición de imágenes y soporte de renderizado de texto de OmniGen2.",
|
|
695
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
696
|
+
"tags": ["Edición de imagen", "Imagen"],
|
|
697
|
+
"models": ["OmniGen"],
|
|
698
|
+
"date": "2025-06-30",
|
|
699
|
+
"size": 15784004813
|
|
700
|
+
},
|
|
701
|
+
{
|
|
702
|
+
"name": "hidream_i1_dev",
|
|
703
|
+
"title": "HiDream I1 Dev",
|
|
704
|
+
"mediaType": "image",
|
|
705
|
+
"mediaSubtype": "webp",
|
|
706
|
+
"description": "Generar imágenes con HiDream I1 Dev - Versión equilibrada con 28 pasos de inferencia, adecuada para hardware de gama media.",
|
|
707
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
708
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
709
|
+
"models": ["HiDream"],
|
|
710
|
+
"date": "2025-04-17",
|
|
711
|
+
"size": 33318208799
|
|
712
|
+
},
|
|
713
|
+
{
|
|
714
|
+
"name": "hidream_i1_fast",
|
|
715
|
+
"title": "HiDream I1 Fast",
|
|
716
|
+
"mediaType": "image",
|
|
717
|
+
"mediaSubtype": "webp",
|
|
718
|
+
"description": "Generar rápidamente imágenes con HiDream I1 Fast - Versión ligera con 16 pasos de inferencia, ideal para vistas previas rápidas en hardware de gama baja.",
|
|
719
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
720
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
721
|
+
"models": ["HiDream"],
|
|
722
|
+
"date": "2025-04-17",
|
|
723
|
+
"size": 24234352968
|
|
724
|
+
},
|
|
725
|
+
{
|
|
726
|
+
"name": "hidream_i1_full",
|
|
727
|
+
"title": "HiDream I1 Full",
|
|
728
|
+
"mediaType": "image",
|
|
729
|
+
"mediaSubtype": "webp",
|
|
730
|
+
"description": "Generar imágenes con HiDream I1 Full - Versión completa con 50 pasos de inferencia para la mejor calidad de salida.",
|
|
731
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
732
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
733
|
+
"models": ["HiDream"],
|
|
734
|
+
"date": "2025-04-17",
|
|
735
|
+
"size": 24234352968
|
|
736
|
+
},
|
|
737
|
+
{
|
|
738
|
+
"name": "hidream_e1_1",
|
|
739
|
+
"title": "Edición de imagen HiDream E1.1",
|
|
740
|
+
"mediaType": "image",
|
|
741
|
+
"mediaSubtype": "webp",
|
|
742
|
+
"thumbnailVariant": "compareSlider",
|
|
743
|
+
"description": "Editar imágenes con HiDream E1.1 – Es mejor en calidad de imagen y precisión de edición que HiDream-E1-Full.",
|
|
744
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
745
|
+
"tags": ["Edición de imagen", "Imagen"],
|
|
746
|
+
"models": ["HiDream"],
|
|
747
|
+
"date": "2025-07-21",
|
|
748
|
+
"size": 50422916055
|
|
749
|
+
},
|
|
750
|
+
{
|
|
751
|
+
"name": "hidream_e1_full",
|
|
752
|
+
"title": "Edición de imagen HiDream E1",
|
|
753
|
+
"mediaType": "image",
|
|
754
|
+
"mediaSubtype": "webp",
|
|
755
|
+
"thumbnailVariant": "compareSlider",
|
|
756
|
+
"description": "Editar imágenes con HiDream E1 - Modelo profesional de edición de imagen con lenguaje natural.",
|
|
757
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
758
|
+
"tags": ["Edición de imagen", "Imagen"],
|
|
759
|
+
"models": ["HiDream"],
|
|
760
|
+
"date": "2025-05-01",
|
|
761
|
+
"size": 34209414513
|
|
762
|
+
},
|
|
763
|
+
{
|
|
764
|
+
"name": "sd3.5_simple_example",
|
|
765
|
+
"title": "SD3.5 Simple",
|
|
766
|
+
"mediaType": "image",
|
|
767
|
+
"mediaSubtype": "webp",
|
|
768
|
+
"description": "Generar imágenes usando SD 3.5.",
|
|
769
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
|
|
770
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
771
|
+
"models": ["SD3.5", "Stability"],
|
|
772
|
+
"date": "2025-03-01",
|
|
773
|
+
"size": 14935748772
|
|
774
|
+
},
|
|
775
|
+
{
|
|
776
|
+
"name": "sd3.5_large_canny_controlnet_example",
|
|
777
|
+
"title": "ControlNet Canny grande SD3.5",
|
|
778
|
+
"mediaType": "image",
|
|
779
|
+
"mediaSubtype": "webp",
|
|
780
|
+
"description": "Generar imágenes guiadas por detección de bordes usando ControlNet Canny SD 3.5.",
|
|
781
|
+
"thumbnailVariant": "hoverDissolve",
|
|
782
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
783
|
+
"tags": ["Imagen a imagen", "Imagen", "ControlNet"],
|
|
784
|
+
"models": ["SD3.5", "Stability"],
|
|
785
|
+
"date": "2025-03-01",
|
|
786
|
+
"size": 23590107873
|
|
787
|
+
},
|
|
788
|
+
{
|
|
789
|
+
"name": "sd3.5_large_depth",
|
|
790
|
+
"title": "Profundidad grande SD3.5",
|
|
791
|
+
"mediaType": "image",
|
|
792
|
+
"mediaSubtype": "webp",
|
|
793
|
+
"description": "Generar imágenes guiadas por información de profundidad usando SD 3.5.",
|
|
794
|
+
"thumbnailVariant": "hoverDissolve",
|
|
795
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
796
|
+
"tags": ["Imagen a imagen", "Imagen", "ControlNet"],
|
|
797
|
+
"models": ["SD3.5", "Stability"],
|
|
798
|
+
"date": "2025-03-01",
|
|
799
|
+
"size": 23590107873
|
|
800
|
+
},
|
|
801
|
+
{
|
|
802
|
+
"name": "sd3.5_large_blur",
|
|
803
|
+
"title": "Desenfoque grande SD3.5",
|
|
804
|
+
"mediaType": "image",
|
|
805
|
+
"mediaSubtype": "webp",
|
|
806
|
+
"description": "Generar imágenes guiadas por imágenes de referencia desenfocadas usando SD 3.5.",
|
|
807
|
+
"thumbnailVariant": "hoverDissolve",
|
|
808
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
809
|
+
"tags": ["Imagen a imagen", "Imagen"],
|
|
810
|
+
"models": ["SD3.5", "Stability"],
|
|
811
|
+
"date": "2025-03-01",
|
|
812
|
+
"size": 23590107873
|
|
813
|
+
},
|
|
814
|
+
{
|
|
815
|
+
"name": "sdxl_simple_example",
|
|
816
|
+
"title": "SDXL Simple",
|
|
817
|
+
"mediaType": "image",
|
|
818
|
+
"mediaSubtype": "webp",
|
|
819
|
+
"description": "Generar imágenes de alta calidad usando SDXL.",
|
|
820
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
821
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
822
|
+
"models": ["SDXL", "Stability"],
|
|
823
|
+
"date": "2025-03-01",
|
|
824
|
+
"size": 13013750907
|
|
825
|
+
},
|
|
826
|
+
{
|
|
827
|
+
"name": "sdxl_refiner_prompt_example",
|
|
828
|
+
"title": "Refinador de indicaciones SDXL",
|
|
829
|
+
"mediaType": "image",
|
|
830
|
+
"mediaSubtype": "webp",
|
|
831
|
+
"description": "Mejorar imágenes SDXL usando modelos refinadores.",
|
|
832
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
833
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
834
|
+
"models": ["SDXL", "Stability"],
|
|
835
|
+
"date": "2025-03-01",
|
|
836
|
+
"size": 13013750907
|
|
837
|
+
},
|
|
838
|
+
{
|
|
839
|
+
"name": "sdxl_revision_text_prompts",
|
|
840
|
+
"title": "Indicaciones de texto de revisión SDXL",
|
|
841
|
+
"mediaType": "image",
|
|
842
|
+
"mediaSubtype": "webp",
|
|
843
|
+
"description": "Generar imágenes transfiriendo conceptos de imágenes de referencia usando Revisión SDXL.",
|
|
844
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
845
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
846
|
+
"models": ["SDXL", "Stability"],
|
|
847
|
+
"date": "2025-03-01",
|
|
848
|
+
"size": 10630044058
|
|
849
|
+
},
|
|
850
|
+
{
|
|
851
|
+
"name": "sdxlturbo_example",
|
|
852
|
+
"title": "SDXL Turbo",
|
|
853
|
+
"mediaType": "image",
|
|
854
|
+
"mediaSubtype": "webp",
|
|
855
|
+
"description": "Generar imágenes en un solo paso usando SDXL Turbo.",
|
|
856
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
|
|
857
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
858
|
+
"models": ["SDXL", "Stability"],
|
|
859
|
+
"date": "2025-03-01",
|
|
860
|
+
"size": 6936372183
|
|
861
|
+
},
|
|
862
|
+
{
|
|
863
|
+
"name": "image_lotus_depth_v1_1",
|
|
864
|
+
"title": "Profundidad Lotus",
|
|
865
|
+
"mediaType": "image",
|
|
866
|
+
"mediaSubtype": "webp",
|
|
867
|
+
"thumbnailVariant": "compareSlider",
|
|
868
|
+
"description": "Ejecutar Profundidad Lotus en ComfyUI para estimación monocromática eficiente sin entrenamiento previo con alta retención de detalles.",
|
|
869
|
+
"tags": ["Imagen", "Texto a imagen"],
|
|
870
|
+
"models": ["SD1.5", "Stability"],
|
|
871
|
+
"date": "2025-05-21",
|
|
872
|
+
"size": 2072321720
|
|
873
|
+
}
|
|
874
|
+
]
|
|
875
|
+
},
|
|
876
|
+
{
|
|
877
|
+
"moduleName": "default",
|
|
878
|
+
"type": "video",
|
|
879
|
+
"category": "GENERATION TYPE",
|
|
880
|
+
"icon": "icon-[lucide--film]",
|
|
881
|
+
"title": "Video",
|
|
882
|
+
"templates": [
|
|
883
|
+
{
|
|
884
|
+
"name": "video_wan2_2_14B_t2v",
|
|
885
|
+
"title": "Texto a video Wan 2.2 14B",
|
|
886
|
+
"description": "Generar videos de alta calidad a partir de indicaciones de texto con control estético cinematográfico y generación de movimiento dinámico usando Wan 2.2.",
|
|
887
|
+
"mediaType": "image",
|
|
888
|
+
"mediaSubtype": "webp",
|
|
889
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
890
|
+
"tags": ["Texto a video", "Video"],
|
|
891
|
+
"models": ["Wan2.2", "Wan"],
|
|
892
|
+
"date": "2025-07-29",
|
|
893
|
+
"size": 38031935406
|
|
894
|
+
},
|
|
895
|
+
{
|
|
896
|
+
"name": "video_wan2_2_14B_i2v",
|
|
897
|
+
"title": "Imagen a video Wan 2.2 14B",
|
|
898
|
+
"description": "Transformar imágenes estáticas en videos dinámicos con control de movimiento preciso y preservación de estilo usando Wan 2.2.",
|
|
899
|
+
"mediaType": "image",
|
|
900
|
+
"mediaSubtype": "webp",
|
|
901
|
+
"thumbnailVariant": "hoverDissolve",
|
|
902
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
903
|
+
"tags": ["Imagen a video", "Video"],
|
|
904
|
+
"models": ["Wan2.2", "Wan"],
|
|
905
|
+
"date": "2025-07-29",
|
|
906
|
+
"size": 38031935406
|
|
907
|
+
},
|
|
908
|
+
{
|
|
909
|
+
"name": "video_wan2_2_14B_flf2v",
|
|
910
|
+
"title": "Primer-Último fotograma a video Wan 2.2 14B",
|
|
911
|
+
"description": "Generar transiciones de video suaves definiendo fotogramas de inicio y fin.",
|
|
912
|
+
"mediaType": "image",
|
|
913
|
+
"mediaSubtype": "webp",
|
|
914
|
+
"thumbnailVariant": "hoverDissolve",
|
|
915
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
916
|
+
"tags": ["FLF2V", "Video"],
|
|
917
|
+
"models": ["Wan2.2", "Wan"],
|
|
918
|
+
"date": "2025-08-02",
|
|
919
|
+
"size": 38031935406
|
|
920
|
+
},
|
|
921
|
+
{
|
|
922
|
+
"name": "video_wan2_2_14B_animate",
|
|
923
|
+
"title": "Wan2.2 Animate animación y reemplazo de personajes",
|
|
924
|
+
"description": "Marco unificado de animación y reemplazo de personajes con replicación precisa de movimiento y expresión。",
|
|
925
|
+
"mediaType": "image",
|
|
926
|
+
"mediaSubtype": "webp",
|
|
927
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-animate",
|
|
928
|
+
"tags": ["Video", "Imagen a video"],
|
|
929
|
+
"models": ["Wan2.2", "Wan"],
|
|
930
|
+
"date": "2025-09-22",
|
|
931
|
+
"size": 27417997476
|
|
932
|
+
},
|
|
933
|
+
{
|
|
934
|
+
"name": "video_hunyuan_video_1.5_720p_t2v",
|
|
935
|
+
"title": "Hunyuan Video 1.5 Texto a Video",
|
|
936
|
+
"description": "Genera vídeos 720p de alta calidad a partir de indicaciones de texto, con control cinematográfico de cámara, expresiones emocionales y simulación física. Soporta varios estilos, incluyendo realista, anime y renderizado de texto 3D.",
|
|
937
|
+
"mediaType": "image",
|
|
938
|
+
"mediaSubtype": "webp",
|
|
939
|
+
"tags": ["Texto a video", "Video"],
|
|
940
|
+
"models": ["Hunyuan Video"],
|
|
941
|
+
"date": "2025-11-21",
|
|
942
|
+
"size": 45384919416
|
|
943
|
+
},
|
|
944
|
+
{
|
|
945
|
+
"name": "video_hunyuan_video_1.5_720p_i2v",
|
|
946
|
+
"title": "Hunyuan Video 1.5 Imagen a Video",
|
|
947
|
+
"description": "Anima imágenes fijas y conviértelas en videos dinámicos con movimiento preciso y control de cámara. Mantiene la coherencia visual mientras da vida a fotos e ilustraciones con movimientos suaves y naturales.",
|
|
948
|
+
"mediaType": "image",
|
|
949
|
+
"mediaSubtype": "webp",
|
|
950
|
+
"tags": ["Imagen a video", "Video"],
|
|
951
|
+
"models": ["Hunyuan Video"],
|
|
952
|
+
"date": "2025-11-21",
|
|
953
|
+
"size": 45384919416
|
|
954
|
+
},
|
|
955
|
+
{
|
|
956
|
+
"name": "video_wan2_2_14B_s2v",
|
|
957
|
+
"title": "Wan2.2-S2V Generación de Video Impulsada por Audio",
|
|
958
|
+
"description": "Transforma imágenes estáticas y audio en videos dinámicos con sincronización perfecta y generación de nivel por minuto.",
|
|
959
|
+
"mediaType": "image",
|
|
960
|
+
"mediaSubtype": "webp",
|
|
961
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-s2v",
|
|
962
|
+
"tags": ["Video"],
|
|
963
|
+
"models": ["Wan2.2", "Wan"],
|
|
964
|
+
"date": "2025-08-02",
|
|
965
|
+
"size": 25254407700
|
|
966
|
+
},
|
|
967
|
+
{
|
|
968
|
+
"name": "video_humo",
|
|
969
|
+
"title": "HuMo Generación de Video",
|
|
970
|
+
"description": "Genera videos basados en audio, imagen y texto, manteniendo la sincronización labial del personaje.",
|
|
971
|
+
"mediaType": "image",
|
|
972
|
+
"mediaSubtype": "webp",
|
|
973
|
+
"tags": ["Video"],
|
|
974
|
+
"models": ["HuMo"],
|
|
975
|
+
"date": "2025-09-21",
|
|
976
|
+
"size": 27895812588
|
|
977
|
+
},
|
|
978
|
+
{
|
|
979
|
+
"name": "video_wan2_2_14B_fun_inpaint",
|
|
980
|
+
"title": "Wan 2.2 14B Fun Inpainting",
|
|
981
|
+
"description": "Genera videos a partir de fotogramas de inicio y fin usando Wan 2.2 Fun Inp.",
|
|
982
|
+
"mediaType": "image",
|
|
983
|
+
"mediaSubtype": "webp",
|
|
984
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-inp",
|
|
985
|
+
"tags": ["FLF2V", "Video"],
|
|
986
|
+
"models": ["Wan2.2", "Wan"],
|
|
987
|
+
"date": "2025-08-12",
|
|
988
|
+
"size": 38031935406
|
|
989
|
+
},
|
|
990
|
+
{
|
|
991
|
+
"name": "video_wan2_2_14B_fun_control",
|
|
992
|
+
"title": "Control Fun Wan 2.2 14B",
|
|
993
|
+
"description": "Generar videos guiados por controles de pose, profundidad y borde usando Wan 2.2 Fun Control.",
|
|
994
|
+
"mediaType": "image",
|
|
995
|
+
"mediaSubtype": "webp",
|
|
996
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-control",
|
|
997
|
+
"tags": ["Video a video", "Video"],
|
|
998
|
+
"models": ["Wan2.2", "Wan"],
|
|
999
|
+
"date": "2025-08-12",
|
|
1000
|
+
"size": 38031935406
|
|
1001
|
+
},
|
|
1002
|
+
{
|
|
1003
|
+
"name": "video_wan2_2_14B_fun_camera",
|
|
1004
|
+
"title": "Control de cámara Fun Wan 2.2 14B",
|
|
1005
|
+
"description": "Generar videos con controles de movimiento de cámara incluyendo panorámica, zoom y rotación usando Wan 2.2 Fun Camera Control.",
|
|
1006
|
+
"mediaType": "image",
|
|
1007
|
+
"mediaSubtype": "webp",
|
|
1008
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-camera",
|
|
1009
|
+
"tags": ["Video a video", "Video"],
|
|
1010
|
+
"models": ["Wan2.2", "Wan"],
|
|
1011
|
+
"date": "2025-08-17",
|
|
1012
|
+
"size": 40050570035
|
|
1013
|
+
},
|
|
1014
|
+
{
|
|
1015
|
+
"name": "video_wan2_2_5B_ti2v",
|
|
1016
|
+
"title": "Generación de video Wan 2.2 5B",
|
|
1017
|
+
"description": "Generar videos a partir de texto o imágenes usando el modelo híbrido Wan 2.2 5B",
|
|
1018
|
+
"mediaType": "image",
|
|
1019
|
+
"mediaSubtype": "webp",
|
|
1020
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
1021
|
+
"tags": ["Texto a video", "Video"],
|
|
1022
|
+
"models": ["Wan2.2", "Wan"],
|
|
1023
|
+
"date": "2025-07-29",
|
|
1024
|
+
"size": 18146236826
|
|
1025
|
+
},
|
|
1026
|
+
{
|
|
1027
|
+
"name": "video_wan2_2_5B_fun_inpaint",
|
|
1028
|
+
"title": "Wan 2.2 5B Fun Inpainting",
|
|
1029
|
+
"description": "Inpainting de video eficiente desde fotogramas de inicio y fin. El modelo 5B ofrece iteraciones rápidas para probar flujos de trabajo.",
|
|
1030
|
+
"mediaType": "image",
|
|
1031
|
+
"mediaSubtype": "webp",
|
|
1032
|
+
"tags": ["Texto a video", "Video"],
|
|
1033
|
+
"models": ["Wan2.2", "Wan"],
|
|
1034
|
+
"date": "2025-07-29",
|
|
1035
|
+
"size": 18146236826
|
|
1036
|
+
},
|
|
1037
|
+
{
|
|
1038
|
+
"name": "video_wan2_2_5B_fun_control",
|
|
1039
|
+
"title": "Wan 2.2 5B Fun Control",
|
|
1040
|
+
"description": "Control de video multicondición con guía de pose, profundidad y bordes. Tamaño compacto de 5B para desarrollo experimental.",
|
|
1041
|
+
"mediaType": "image",
|
|
1042
|
+
"mediaSubtype": "webp",
|
|
1043
|
+
"tags": ["Texto a video", "Video"],
|
|
1044
|
+
"models": ["Wan2.2", "Wan"],
|
|
1045
|
+
"date": "2025-07-29",
|
|
1046
|
+
"size": 18146236826
|
|
1047
|
+
},
|
|
1048
|
+
{
|
|
1049
|
+
"name": "video_wan_vace_14B_t2v",
|
|
1050
|
+
"title": "Texto a video Wan VACE",
|
|
1051
|
+
"description": "Transformar descripciones de texto en videos de alta calidad. Soporta tanto 480p como 720p con el modelo VACE-14B.",
|
|
1052
|
+
"mediaType": "image",
|
|
1053
|
+
"mediaSubtype": "webp",
|
|
1054
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1055
|
+
"tags": ["Texto a video", "Video"],
|
|
1056
|
+
"models": ["Wan2.1", "Wan"],
|
|
1057
|
+
"date": "2025-05-21",
|
|
1058
|
+
"size": 57756572713
|
|
1059
|
+
},
|
|
1060
|
+
{
|
|
1061
|
+
"name": "video_wan_vace_14B_ref2v",
|
|
1062
|
+
"title": "Referencia a video Wan VACE",
|
|
1063
|
+
"description": "Crear videos que coincidan con el estilo y contenido de una imagen de referencia. Perfecto para generación de video consistente en estilo.",
|
|
1064
|
+
"mediaType": "image",
|
|
1065
|
+
"mediaSubtype": "webp",
|
|
1066
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1067
|
+
"tags": ["Video", "Imagen a video"],
|
|
1068
|
+
"models": ["Wan2.1", "Wan"],
|
|
1069
|
+
"date": "2025-05-21",
|
|
1070
|
+
"size": 57756572713
|
|
1071
|
+
},
|
|
1072
|
+
{
|
|
1073
|
+
"name": "video_wan_vace_14B_v2v",
|
|
1074
|
+
"title": "Control de video Wan VACE",
|
|
1075
|
+
"description": "Generar videos controlando videos de entrada e imágenes de referencia usando Wan VACE.",
|
|
1076
|
+
"mediaType": "image",
|
|
1077
|
+
"mediaSubtype": "webp",
|
|
1078
|
+
"thumbnailVariant": "compareSlider",
|
|
1079
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1080
|
+
"tags": ["Video a video", "Video"],
|
|
1081
|
+
"models": ["Wan2.1", "Wan"],
|
|
1082
|
+
"date": "2025-05-21",
|
|
1083
|
+
"size": 57756572713
|
|
1084
|
+
},
|
|
1085
|
+
{
|
|
1086
|
+
"name": "video_wan_vace_outpainting",
|
|
1087
|
+
"title": "Outpainting Wan VACE",
|
|
1088
|
+
"description": "Generar videos extendidos expandiendo el tamaño de video usando outpainting de Wan VACE.",
|
|
1089
|
+
"mediaType": "image",
|
|
1090
|
+
"mediaSubtype": "webp",
|
|
1091
|
+
"thumbnailVariant": "compareSlider",
|
|
1092
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1093
|
+
"tags": ["Outpaint", "Video"],
|
|
1094
|
+
"models": ["Wan2.1", "Wan"],
|
|
1095
|
+
"date": "2025-05-21",
|
|
1096
|
+
"size": 57756572713
|
|
1097
|
+
},
|
|
1098
|
+
{
|
|
1099
|
+
"name": "video_wan_vace_flf2v",
|
|
1100
|
+
"title": "Primer-Último fotograma Wan VACE",
|
|
1101
|
+
"description": "Generar transiciones de video suaves definiendo fotogramas de inicio y fin. Soporta secuencias de fotogramas clave personalizadas.",
|
|
1102
|
+
"mediaType": "image",
|
|
1103
|
+
"mediaSubtype": "webp",
|
|
1104
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1105
|
+
"tags": ["FLF2V", "Video"],
|
|
1106
|
+
"models": ["Wan2.1", "Wan"],
|
|
1107
|
+
"date": "2025-05-21",
|
|
1108
|
+
"size": 57756572713
|
|
1109
|
+
},
|
|
1110
|
+
{
|
|
1111
|
+
"name": "video_wan_vace_inpainting",
|
|
1112
|
+
"title": "Inpainting Wan VACE",
|
|
1113
|
+
"description": "Editar regiones específicas en videos mientras se preserva el contenido circundante. Excelente para eliminación o reemplazo de objetos.",
|
|
1114
|
+
"mediaType": "image",
|
|
1115
|
+
"mediaSubtype": "webp",
|
|
1116
|
+
"thumbnailVariant": "compareSlider",
|
|
1117
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
1118
|
+
"tags": ["Inpaint", "Video"],
|
|
1119
|
+
"models": ["Wan2.1", "Wan"],
|
|
1120
|
+
"date": "2025-05-21",
|
|
1121
|
+
"size": 57756572713
|
|
1122
|
+
},
|
|
1123
|
+
{
|
|
1124
|
+
"name": "video_wan2.1_alpha_t2v_14B",
|
|
1125
|
+
"title": "Wan2.1 Alpha Texto a Video",
|
|
1126
|
+
"description": "Genera videos desde texto con soporte de canal alfa para fondos transparentes y objetos semitransparentes.",
|
|
1127
|
+
"mediaType": "image",
|
|
1128
|
+
"mediaSubtype": "webp",
|
|
1129
|
+
"tags": ["Texto a video", "Video"],
|
|
1130
|
+
"models": ["Wan2.1", "Wan"],
|
|
1131
|
+
"date": "2025-10-06",
|
|
1132
|
+
"size": 22494891213
|
|
1133
|
+
},
|
|
1134
|
+
{
|
|
1135
|
+
"name": "video_wan_ati",
|
|
1136
|
+
"title": "Wan ATI",
|
|
1137
|
+
"description": "Generación de video controlada por trayectoria.",
|
|
1138
|
+
"mediaType": "image",
|
|
1139
|
+
"mediaSubtype": "webp",
|
|
1140
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1141
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
|
|
1142
|
+
"tags": ["Video"],
|
|
1143
|
+
"models": ["Wan2.1", "Wan"],
|
|
1144
|
+
"date": "2025-05-21",
|
|
1145
|
+
"size": 25393994138
|
|
1146
|
+
},
|
|
1147
|
+
{
|
|
1148
|
+
"name": "video_wan2.1_fun_camera_v1.1_1.3B",
|
|
1149
|
+
"title": "Cámara Fun 1.3B Wan 2.1",
|
|
1150
|
+
"description": "Generar videos dinámicos con movimientos cinematográficos de cámara usando el modelo Wan 2.1 Fun Camera 1.3B.",
|
|
1151
|
+
"mediaType": "image",
|
|
1152
|
+
"mediaSubtype": "webp",
|
|
1153
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
1154
|
+
"tags": ["Video"],
|
|
1155
|
+
"models": ["Wan2.1", "Wan"],
|
|
1156
|
+
"date": "2025-04-15",
|
|
1157
|
+
"size": 11489037517
|
|
1158
|
+
},
|
|
1159
|
+
{
|
|
1160
|
+
"name": "video_wan2.1_fun_camera_v1.1_14B",
|
|
1161
|
+
"title": "Cámara Fun 14B Wan 2.1",
|
|
1162
|
+
"description": "Generar videos de alta calidad con control avanzado de cámara usando el modelo completo de 14B",
|
|
1163
|
+
"mediaType": "image",
|
|
1164
|
+
"mediaSubtype": "webp",
|
|
1165
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
1166
|
+
"tags": ["Video"],
|
|
1167
|
+
"models": ["Wan2.1", "Wan"],
|
|
1168
|
+
"date": "2025-04-15",
|
|
1169
|
+
"size": 42047729828
|
|
1170
|
+
},
|
|
1171
|
+
{
|
|
1172
|
+
"name": "text_to_video_wan",
|
|
1173
|
+
"title": "Texto a video Wan 2.1",
|
|
1174
|
+
"description": "Generar videos a partir de indicaciones de texto usando Wan 2.1.",
|
|
1175
|
+
"mediaType": "image",
|
|
1176
|
+
"mediaSubtype": "webp",
|
|
1177
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
1178
|
+
"tags": ["Texto a video", "Video"],
|
|
1179
|
+
"models": ["Wan2.1", "Wan"],
|
|
1180
|
+
"date": "2025-03-01",
|
|
1181
|
+
"size": 9824737690
|
|
1182
|
+
},
|
|
1183
|
+
{
|
|
1184
|
+
"name": "image_to_video_wan",
|
|
1185
|
+
"title": "Imagen a video Wan 2.1",
|
|
1186
|
+
"description": "Generar videos a partir de imágenes usando Wan 2.1.",
|
|
1187
|
+
"mediaType": "image",
|
|
1188
|
+
"mediaSubtype": "webp",
|
|
1189
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
1190
|
+
"tags": ["Texto a video", "Video"],
|
|
1191
|
+
"models": ["Wan2.1", "Wan"],
|
|
1192
|
+
"date": "2025-03-01",
|
|
1193
|
+
"size": 41049149932
|
|
1194
|
+
},
|
|
1195
|
+
{
|
|
1196
|
+
"name": "wan2.1_fun_inp",
|
|
1197
|
+
"title": "Inpainting Wan 2.1",
|
|
1198
|
+
"description": "Generar videos desde fotogramas de inicio y fin usando inpainting de Wan 2.1.",
|
|
1199
|
+
"mediaType": "image",
|
|
1200
|
+
"mediaSubtype": "webp",
|
|
1201
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
|
|
1202
|
+
"tags": ["Inpaint", "Video"],
|
|
1203
|
+
"models": ["Wan2.1", "Wan"],
|
|
1204
|
+
"date": "2025-04-15",
|
|
1205
|
+
"size": 11381663334
|
|
1206
|
+
},
|
|
1207
|
+
{
|
|
1208
|
+
"name": "wan2.1_fun_control",
|
|
1209
|
+
"title": "ControlNet Wan 2.1",
|
|
1210
|
+
"description": "Generar videos guiados por controles de pose, profundidad y borde usando ControlNet de Wan 2.1.",
|
|
1211
|
+
"mediaType": "image",
|
|
1212
|
+
"mediaSubtype": "webp",
|
|
1213
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1214
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
1215
|
+
"tags": ["Video a video", "Video"],
|
|
1216
|
+
"models": ["Wan2.1", "Wan"],
|
|
1217
|
+
"date": "2025-04-15",
|
|
1218
|
+
"size": 11381663334
|
|
1219
|
+
},
|
|
1220
|
+
{
|
|
1221
|
+
"name": "wan2.1_flf2v_720_f16",
|
|
1222
|
+
"title": "FLF2V 720p F16 Wan 2.1",
|
|
1223
|
+
"description": "Generar videos controlando primer y último fotogramas usando FLF2V de Wan 2.1.",
|
|
1224
|
+
"mediaType": "image",
|
|
1225
|
+
"mediaSubtype": "webp",
|
|
1226
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
|
|
1227
|
+
"tags": ["FLF2V", "Video"],
|
|
1228
|
+
"models": ["Wan2.1", "Wan"],
|
|
1229
|
+
"date": "2025-04-15",
|
|
1230
|
+
"size": 41049149932
|
|
1231
|
+
},
|
|
1232
|
+
{
|
|
1233
|
+
"name": "ltxv_text_to_video",
|
|
1234
|
+
"title": "Texto a video LTXV",
|
|
1235
|
+
"mediaType": "image",
|
|
1236
|
+
"mediaSubtype": "webp",
|
|
1237
|
+
"description": "Generar videos a partir de indicaciones de texto.",
|
|
1238
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
1239
|
+
"tags": ["Texto a video", "Video"],
|
|
1240
|
+
"models": ["LTXV"],
|
|
1241
|
+
"date": "2025-03-01",
|
|
1242
|
+
"size": 19155554140
|
|
1243
|
+
},
|
|
1244
|
+
{
|
|
1245
|
+
"name": "ltxv_image_to_video",
|
|
1246
|
+
"title": "Imagen a video LTXV",
|
|
1247
|
+
"mediaType": "image",
|
|
1248
|
+
"mediaSubtype": "webp",
|
|
1249
|
+
"description": "Generar videos a partir de imágenes fijas.",
|
|
1250
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
1251
|
+
"tags": ["Imagen a video", "Video"],
|
|
1252
|
+
"models": ["LTXV"],
|
|
1253
|
+
"date": "2025-03-01",
|
|
1254
|
+
"size": 19155554140
|
|
1255
|
+
},
|
|
1256
|
+
{
|
|
1257
|
+
"name": "mochi_text_to_video_example",
|
|
1258
|
+
"title": "Texto a video Mochi",
|
|
1259
|
+
"mediaType": "image",
|
|
1260
|
+
"mediaSubtype": "webp",
|
|
1261
|
+
"description": "Generar videos a partir de indicaciones de texto usando el modelo Mochi.",
|
|
1262
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/",
|
|
1263
|
+
"tags": ["Texto a video", "Video"],
|
|
1264
|
+
"models": ["Mochi"],
|
|
1265
|
+
"date": "2025-03-01",
|
|
1266
|
+
"size": 30762703258
|
|
1267
|
+
},
|
|
1268
|
+
{
|
|
1269
|
+
"name": "hunyuan_video_text_to_video",
|
|
1270
|
+
"title": "Texto a video Hunyuan Video",
|
|
1271
|
+
"mediaType": "image",
|
|
1272
|
+
"mediaSubtype": "webp",
|
|
1273
|
+
"description": "Generar videos a partir de indicaciones de texto usando el modelo Hunyuan.",
|
|
1274
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
|
|
1275
|
+
"tags": ["Texto a video", "Video"],
|
|
1276
|
+
"models": ["Hunyuan Video", "Tencent"],
|
|
1277
|
+
"date": "2025-03-01",
|
|
1278
|
+
"size": 35476429865
|
|
1279
|
+
},
|
|
1280
|
+
{
|
|
1281
|
+
"name": "image_to_video",
|
|
1282
|
+
"title": "Imagen a video SVD",
|
|
1283
|
+
"mediaType": "image",
|
|
1284
|
+
"mediaSubtype": "webp",
|
|
1285
|
+
"description": "Generar videos a partir de imágenes fijas.",
|
|
1286
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
1287
|
+
"tags": ["Imagen a video", "Video"],
|
|
1288
|
+
"models": ["SVD", "Stability"],
|
|
1289
|
+
"date": "2025-03-01",
|
|
1290
|
+
"size": 9556302234
|
|
1291
|
+
},
|
|
1292
|
+
{
|
|
1293
|
+
"name": "txt_to_image_to_video",
|
|
1294
|
+
"title": "Texto a imagen a video SVD",
|
|
1295
|
+
"mediaType": "image",
|
|
1296
|
+
"mediaSubtype": "webp",
|
|
1297
|
+
"description": "Generar videos creando primero imágenes a partir de indicaciones de texto.",
|
|
1298
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
1299
|
+
"tags": ["Texto a video", "Video"],
|
|
1300
|
+
"models": ["SVD", "Stability"],
|
|
1301
|
+
"date": "2025-03-01",
|
|
1302
|
+
"size": 16492674417
|
|
1303
|
+
}
|
|
1304
|
+
]
|
|
1305
|
+
},
|
|
1306
|
+
{
|
|
1307
|
+
"moduleName": "default",
|
|
1308
|
+
"type": "audio",
|
|
1309
|
+
"category": "GENERATION TYPE",
|
|
1310
|
+
"icon": "icon-[lucide--volume-2]",
|
|
1311
|
+
"title": "Audio",
|
|
1312
|
+
"templates": [
|
|
1313
|
+
{
|
|
1314
|
+
"name": "audio_stable_audio_example",
|
|
1315
|
+
"title": "Audio Estable",
|
|
1316
|
+
"mediaType": "audio",
|
|
1317
|
+
"mediaSubtype": "mp3",
|
|
1318
|
+
"description": "Generar audio a partir de indicaciones de texto usando Stable Audio.",
|
|
1319
|
+
"tags": ["Texto a audio", "Audio"],
|
|
1320
|
+
"models": ["Stable Audio", "Stability"],
|
|
1321
|
+
"date": "2025-03-01",
|
|
1322
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/",
|
|
1323
|
+
"size": 5744518758
|
|
1324
|
+
},
|
|
1325
|
+
{
|
|
1326
|
+
"name": "audio_ace_step_1_t2a_instrumentals",
|
|
1327
|
+
"title": "Música instrumental ACE-Step v1 texto a audio",
|
|
1328
|
+
"mediaType": "audio",
|
|
1329
|
+
"mediaSubtype": "mp3",
|
|
1330
|
+
"description": "Generar música instrumental a partir de indicaciones de texto usando ACE-Step v1.",
|
|
1331
|
+
"tags": ["Texto a audio", "Audio"],
|
|
1332
|
+
"models": ["ACE-Step"],
|
|
1333
|
+
"date": "2025-03-01",
|
|
1334
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1335
|
+
"size": 7698728878
|
|
1336
|
+
},
|
|
1337
|
+
{
|
|
1338
|
+
"name": "audio_ace_step_1_t2a_song",
|
|
1339
|
+
"title": "Canción ACE Step v1 texto a audio",
|
|
1340
|
+
"mediaType": "audio",
|
|
1341
|
+
"mediaSubtype": "mp3",
|
|
1342
|
+
"description": "Generar canciones con voces a partir de indicaciones de texto usando ACE-Step v1, soportando personalización multilingüe y de estilo.",
|
|
1343
|
+
"tags": ["Texto a audio", "Audio"],
|
|
1344
|
+
"models": ["ACE-Step"],
|
|
1345
|
+
"date": "2025-03-01",
|
|
1346
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1347
|
+
"size": 7698728878
|
|
1348
|
+
},
|
|
1349
|
+
{
|
|
1350
|
+
"name": "audio_ace_step_1_m2m_editing",
|
|
1351
|
+
"title": "Edición M2M ACE Step v1",
|
|
1352
|
+
"mediaType": "audio",
|
|
1353
|
+
"mediaSubtype": "mp3",
|
|
1354
|
+
"description": "Editar canciones existentes para cambiar estilo y letras usando ACE-Step v1 M2M.",
|
|
1355
|
+
"tags": ["Edición de audio", "Audio"],
|
|
1356
|
+
"models": ["ACE-Step"],
|
|
1357
|
+
"date": "2025-03-01",
|
|
1358
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1359
|
+
"size": 7698728878
|
|
1360
|
+
}
|
|
1361
|
+
]
|
|
1362
|
+
},
|
|
1363
|
+
{
|
|
1364
|
+
"moduleName": "default",
|
|
1365
|
+
"type": "3d",
|
|
1366
|
+
"category": "GENERATION TYPE",
|
|
1367
|
+
"icon": "icon-[lucide--box]",
|
|
1368
|
+
"title": "3D Model",
|
|
1369
|
+
"templates": [
|
|
1370
|
+
{
|
|
1371
|
+
"name": "3d_hunyuan3d-v2.1",
|
|
1372
|
+
"title": "Hunyuan3D 2.1",
|
|
1373
|
+
"mediaType": "image",
|
|
1374
|
+
"mediaSubtype": "webp",
|
|
1375
|
+
"description": "Genera modelos 3D a partir de imágenes individuales usando Hunyuan3D 2.1.",
|
|
1376
|
+
"tags": ["Imagen a 3D", "3D"],
|
|
1377
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
1378
|
+
"date": "2025-03-01",
|
|
1379
|
+
"tutorialUrl": "",
|
|
1380
|
+
"size": 4928474972
|
|
1381
|
+
},
|
|
1382
|
+
{
|
|
1383
|
+
"name": "3d_hunyuan3d_image_to_model",
|
|
1384
|
+
"title": "Hunyuan3D 2.0",
|
|
1385
|
+
"mediaType": "image",
|
|
1386
|
+
"mediaSubtype": "webp",
|
|
1387
|
+
"description": "Generar modelos 3D a partir de imágenes individuales usando Hunyuan3D 2.0.",
|
|
1388
|
+
"tags": ["Imagen a 3D", "3D"],
|
|
1389
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
1390
|
+
"date": "2025-03-01",
|
|
1391
|
+
"tutorialUrl": "",
|
|
1392
|
+
"size": 4928474972
|
|
1393
|
+
},
|
|
1394
|
+
{
|
|
1395
|
+
"name": "3d_hunyuan3d_multiview_to_model",
|
|
1396
|
+
"title": "Hunyuan3D 2.0 Multivista",
|
|
1397
|
+
"mediaType": "image",
|
|
1398
|
+
"mediaSubtype": "webp",
|
|
1399
|
+
"description": "Generar modelos 3D a partir de múltiples vistas usando Hunyuan3D 2.0 MV.",
|
|
1400
|
+
"tags": ["3D", "Imagen a 3D"],
|
|
1401
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
1402
|
+
"date": "2025-03-01",
|
|
1403
|
+
"tutorialUrl": "",
|
|
1404
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1405
|
+
"size": 4928474972
|
|
1406
|
+
},
|
|
1407
|
+
{
|
|
1408
|
+
"name": "3d_hunyuan3d_multiview_to_model_turbo",
|
|
1409
|
+
"title": "Hunyuan3D 2.0 Multivista Turbo",
|
|
1410
|
+
"mediaType": "image",
|
|
1411
|
+
"mediaSubtype": "webp",
|
|
1412
|
+
"description": "Generar modelos 3D a partir de múltiples vistas usando Hunyuan3D 2.0 MV Turbo.",
|
|
1413
|
+
"tags": ["Imagen a 3D", "3D"],
|
|
1414
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
1415
|
+
"date": "2025-03-01",
|
|
1416
|
+
"tutorialUrl": "",
|
|
1417
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1418
|
+
"size": 4928474972
|
|
1419
|
+
}
|
|
1420
|
+
]
|
|
1421
|
+
},
|
|
1422
|
+
{
|
|
1423
|
+
"moduleName": "default",
|
|
1424
|
+
"type": "image",
|
|
1425
|
+
"category": "CLOSED SOURCE MODELS",
|
|
1426
|
+
"icon": "icon-[lucide--hand-coins]",
|
|
1427
|
+
"title": "Image API",
|
|
1428
|
+
"templates": [
|
|
1429
|
+
{
|
|
1430
|
+
"name": "api_nano_banana_pro",
|
|
1431
|
+
"title": "Nano Banana Pro",
|
|
1432
|
+
"description": "Nano-banana Pro (Gemini 3.0 Pro Image) - Generación y edición de imágenes 4K de calidad de estudio con renderizado de texto mejorado y consistencia de personajes.",
|
|
1433
|
+
"mediaType": "image",
|
|
1434
|
+
"mediaSubtype": "webp",
|
|
1435
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1436
|
+
"tags": ["Edición de imagen", "Imagen", "API"],
|
|
1437
|
+
"models": ["Gemini-3-pro-image-preview", "nano-banana", "Google"],
|
|
1438
|
+
"date": "2025-11-21",
|
|
1439
|
+
"OpenSource": false,
|
|
1440
|
+
"size": 0,
|
|
1441
|
+
"vram": 0
|
|
1442
|
+
},
|
|
1443
|
+
{
|
|
1444
|
+
"name": "api_from_photo_2_miniature",
|
|
1445
|
+
"title": "Estilo de foto a modelo",
|
|
1446
|
+
"description": "Transforma fotos reales de edificios en planos arquitectónicos y luego en modelos físicos en miniatura detallados. Una cadena completa de visualización arquitectónica desde la foto hasta la maqueta.",
|
|
1447
|
+
"mediaType": "image",
|
|
1448
|
+
"mediaSubtype": "webp",
|
|
1449
|
+
"tags": ["Edición de imagen", "Imagen", "3D"],
|
|
1450
|
+
"models": ["Gemini-3-pro-image-preview", "nano-banana", "Google"],
|
|
1451
|
+
"date": "2025-11-21",
|
|
1452
|
+
"OpenSource": false,
|
|
1453
|
+
"size": 0,
|
|
1454
|
+
"vram": 0
|
|
1455
|
+
},
|
|
1456
|
+
{
|
|
1457
|
+
"name": "api_bytedance_seedream4",
|
|
1458
|
+
"title": "ByteDance Seedream 4.0",
|
|
1459
|
+
"description": "Modelo de IA multimodal para texto a imagen y edición de imágenes. Genera imágenes 2K en menos de 2 segundos con control en lenguaje natural.",
|
|
1460
|
+
"mediaType": "image",
|
|
1461
|
+
"mediaSubtype": "webp",
|
|
1462
|
+
"tags": ["Edición de imagen", "Imagen", "API", "Texto a imagen"],
|
|
1463
|
+
"models": ["Seedream 4.0", "ByteDance"],
|
|
1464
|
+
"date": "2025-09-11",
|
|
1465
|
+
"OpenSource": false,
|
|
1466
|
+
"size": 0,
|
|
1467
|
+
"vram": 0
|
|
1468
|
+
},
|
|
1469
|
+
{
|
|
1470
|
+
"name": "api_google_gemini_image",
|
|
1471
|
+
"title": "Google Gemini Imagen",
|
|
1472
|
+
"description": "Nano-banana (Gemini-2.5-Flash Image) - edición de imágenes con consistencia.",
|
|
1473
|
+
"mediaType": "image",
|
|
1474
|
+
"mediaSubtype": "webp",
|
|
1475
|
+
"tags": ["Edición de imagen", "Imagen", "API", "Texto a imagen"],
|
|
1476
|
+
"models": ["Gemini-2.5-Flash", "nano-banana", "Google"],
|
|
1477
|
+
"date": "2025-08-27",
|
|
1478
|
+
"OpenSource": false,
|
|
1479
|
+
"size": 0,
|
|
1480
|
+
"vram": 0
|
|
1481
|
+
},
|
|
1482
|
+
{
|
|
1483
|
+
"name": "api_flux2",
|
|
1484
|
+
"title": "Flux.2 Pro",
|
|
1485
|
+
"description": "Genera imágenes fotorrealistas de hasta 4MP con coherencia multirreferencia y renderizado profesional de texto.",
|
|
1486
|
+
"mediaType": "image",
|
|
1487
|
+
"mediaSubtype": "webp",
|
|
1488
|
+
"tags": ["Edición de imagen", "Imagen", "API", "Texto a imagen"],
|
|
1489
|
+
"models": ["Flux.2", "BFL"],
|
|
1490
|
+
"date": "2025-11-26",
|
|
1491
|
+
"OpenSource": false,
|
|
1492
|
+
"size": 0,
|
|
1493
|
+
"vram": 0
|
|
1494
|
+
},
|
|
1495
|
+
{
|
|
1496
|
+
"name": "api_topaz_image_enhance",
|
|
1497
|
+
"title": "Mejora de imagen Topaz",
|
|
1498
|
+
"description": "Mejora profesional de imágenes usando el modelo Reimagine de Topaz con mejora facial y restauración de detalles.",
|
|
1499
|
+
"mediaType": "image",
|
|
1500
|
+
"mediaSubtype": "webp",
|
|
1501
|
+
"thumbnailVariant": "compareSlider",
|
|
1502
|
+
"tags": ["Imagen", "API", "Mejorar"],
|
|
1503
|
+
"models": ["Topaz", "Reimagine"],
|
|
1504
|
+
"date": "2025-11-25",
|
|
1505
|
+
"OpenSource": false,
|
|
1506
|
+
"size": 0,
|
|
1507
|
+
"vram": 0
|
|
1508
|
+
},
|
|
1509
|
+
{
|
|
1510
|
+
"name": "api_bfl_flux_1_kontext_multiple_images_input",
|
|
1511
|
+
"title": "Entrada de múltiples imágenes BFL Flux.1 Kontext",
|
|
1512
|
+
"description": "Ingresar múltiples imágenes y editarlas con Flux.1 Kontext.",
|
|
1513
|
+
"mediaType": "image",
|
|
1514
|
+
"mediaSubtype": "webp",
|
|
1515
|
+
"thumbnailVariant": "compareSlider",
|
|
1516
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
1517
|
+
"tags": ["Edición de imagen", "Imagen"],
|
|
1518
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
1519
|
+
"date": "2025-05-29",
|
|
1520
|
+
"OpenSource": false,
|
|
1521
|
+
"size": 0,
|
|
1522
|
+
"vram": 0
|
|
1523
|
+
},
|
|
1524
|
+
{
|
|
1525
|
+
"name": "api_bfl_flux_1_kontext_pro_image",
|
|
1526
|
+
"title": "BFL Flux.1 Kontext Pro",
|
|
1527
|
+
"description": "Editar imágenes con imagen pro de Flux.1 Kontext.",
|
|
1528
|
+
"mediaType": "image",
|
|
1529
|
+
"mediaSubtype": "webp",
|
|
1530
|
+
"thumbnailVariant": "compareSlider",
|
|
1531
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
1532
|
+
"tags": ["Edición de imagen", "Imagen"],
|
|
1533
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
1534
|
+
"date": "2025-05-29",
|
|
1535
|
+
"OpenSource": false,
|
|
1536
|
+
"size": 0,
|
|
1537
|
+
"vram": 0
|
|
1538
|
+
},
|
|
1539
|
+
{
|
|
1540
|
+
"name": "api_bfl_flux_1_kontext_max_image",
|
|
1541
|
+
"title": "BFL Flux.1 Kontext Max",
|
|
1542
|
+
"description": "Editar imágenes con imagen max de Flux.1 Kontext.",
|
|
1543
|
+
"mediaType": "image",
|
|
1544
|
+
"mediaSubtype": "webp",
|
|
1545
|
+
"thumbnailVariant": "compareSlider",
|
|
1546
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
|
|
1547
|
+
"tags": ["Edición de imagen", "Imagen"],
|
|
1548
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
1549
|
+
"date": "2025-05-29",
|
|
1550
|
+
"OpenSource": false,
|
|
1551
|
+
"size": 0,
|
|
1552
|
+
"vram": 0
|
|
1553
|
+
},
|
|
1554
|
+
{
|
|
1555
|
+
"name": "api_wan_text_to_image",
|
|
1556
|
+
"title": "Wan2.5: Texto a Imagen",
|
|
1557
|
+
"description": "Genera imágenes con excelente seguimiento de prompts y calidad visual usando FLUX.1 Pro.",
|
|
1558
|
+
"mediaType": "image",
|
|
1559
|
+
"mediaSubtype": "webp",
|
|
1560
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1561
|
+
"models": ["Wan2.5", "Wan"],
|
|
1562
|
+
"date": "2025-09-25",
|
|
1563
|
+
"OpenSource": false,
|
|
1564
|
+
"size": 0,
|
|
1565
|
+
"vram": 0
|
|
1566
|
+
},
|
|
1567
|
+
{
|
|
1568
|
+
"name": "api_bfl_flux_pro_t2i",
|
|
1569
|
+
"title": "BFL Flux[Pro]: Texto a imagen",
|
|
1570
|
+
"description": "Generar imágenes con excelente seguimiento de indicaciones y calidad visual usando FLUX.1 Pro.",
|
|
1571
|
+
"mediaType": "image",
|
|
1572
|
+
"mediaSubtype": "webp",
|
|
1573
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
|
|
1574
|
+
"tags": ["Edición de imagen", "Imagen"],
|
|
1575
|
+
"models": ["Flux", "BFL"],
|
|
1576
|
+
"date": "2025-05-01",
|
|
1577
|
+
"OpenSource": false,
|
|
1578
|
+
"size": 0,
|
|
1579
|
+
"vram": 0
|
|
1580
|
+
},
|
|
1581
|
+
{
|
|
1582
|
+
"name": "api_luma_photon_i2i",
|
|
1583
|
+
"title": "Luma Photon: Imagen a imagen",
|
|
1584
|
+
"description": "Guiar la generación de imágenes usando una combinación de imágenes e indicaciones.",
|
|
1585
|
+
"mediaType": "image",
|
|
1586
|
+
"mediaSubtype": "webp",
|
|
1587
|
+
"thumbnailVariant": "compareSlider",
|
|
1588
|
+
"tags": ["Imagen a imagen", "Imagen", "API"],
|
|
1589
|
+
"models": ["Luma"],
|
|
1590
|
+
"date": "2025-03-01",
|
|
1591
|
+
"OpenSource": false,
|
|
1592
|
+
"size": 0,
|
|
1593
|
+
"vram": 0
|
|
1594
|
+
},
|
|
1595
|
+
{
|
|
1596
|
+
"name": "api_luma_photon_style_ref",
|
|
1597
|
+
"title": "Luma Photon: Referencia de estilo",
|
|
1598
|
+
"description": "Generar imágenes mezclando referencias de estilo con control preciso usando Luma Photon.",
|
|
1599
|
+
"mediaType": "image",
|
|
1600
|
+
"mediaSubtype": "webp",
|
|
1601
|
+
"thumbnailVariant": "compareSlider",
|
|
1602
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1603
|
+
"models": ["Luma"],
|
|
1604
|
+
"date": "2025-03-01",
|
|
1605
|
+
"OpenSource": false,
|
|
1606
|
+
"size": 0,
|
|
1607
|
+
"vram": 0
|
|
1608
|
+
},
|
|
1609
|
+
{
|
|
1610
|
+
"name": "api_recraft_image_gen_with_color_control",
|
|
1611
|
+
"title": "Recraft: Generación de imágenes con control de color",
|
|
1612
|
+
"description": "Generar imágenes con paletas de colores personalizadas y visuales específicos de marca usando Recraft.",
|
|
1613
|
+
"mediaType": "image",
|
|
1614
|
+
"mediaSubtype": "webp",
|
|
1615
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1616
|
+
"models": ["Recraft"],
|
|
1617
|
+
"date": "2025-03-01",
|
|
1618
|
+
"OpenSource": false,
|
|
1619
|
+
"size": 0,
|
|
1620
|
+
"vram": 0
|
|
1621
|
+
},
|
|
1622
|
+
{
|
|
1623
|
+
"name": "api_recraft_image_gen_with_style_control",
|
|
1624
|
+
"title": "Recraft: Generación de imágenes con control de estilo",
|
|
1625
|
+
"description": "Controlar estilo con ejemplos visuales, alinear posicionamiento y ajustar objetos finamente. Almacenar y compartir estilos para consistencia perfecta de marca.",
|
|
1626
|
+
"mediaType": "image",
|
|
1627
|
+
"mediaSubtype": "webp",
|
|
1628
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1629
|
+
"models": ["Recraft"],
|
|
1630
|
+
"date": "2025-03-01",
|
|
1631
|
+
"OpenSource": false,
|
|
1632
|
+
"size": 0,
|
|
1633
|
+
"vram": 0
|
|
1634
|
+
},
|
|
1635
|
+
{
|
|
1636
|
+
"name": "api_recraft_vector_gen",
|
|
1637
|
+
"title": "Recraft: Generación vectorial",
|
|
1638
|
+
"description": "Generar imágenes vectoriales de alta calidad a partir de indicaciones de texto usando el generador vectorial AI de Recraft.",
|
|
1639
|
+
"mediaType": "image",
|
|
1640
|
+
"mediaSubtype": "webp",
|
|
1641
|
+
"tags": ["Texto a imagen", "Imagen", "API", "Vectorial"],
|
|
1642
|
+
"models": ["Recraft"],
|
|
1643
|
+
"date": "2025-03-01",
|
|
1644
|
+
"OpenSource": false,
|
|
1645
|
+
"size": 0,
|
|
1646
|
+
"vram": 0
|
|
1647
|
+
},
|
|
1648
|
+
{
|
|
1649
|
+
"name": "api_runway_text_to_image",
|
|
1650
|
+
"title": "Runway: Texto a imagen",
|
|
1651
|
+
"description": "Generar imágenes de alta calidad a partir de indicaciones de texto usando el modelo AI de Runway.",
|
|
1652
|
+
"mediaType": "image",
|
|
1653
|
+
"mediaSubtype": "webp",
|
|
1654
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1655
|
+
"models": ["Runway"],
|
|
1656
|
+
"date": "2025-03-01",
|
|
1657
|
+
"OpenSource": false,
|
|
1658
|
+
"size": 0,
|
|
1659
|
+
"vram": 0
|
|
1660
|
+
},
|
|
1661
|
+
{
|
|
1662
|
+
"name": "api_runway_reference_to_image",
|
|
1663
|
+
"title": "Runway: Referencia a imagen",
|
|
1664
|
+
"description": "Generar nuevas imágenes basadas en estilos y composiciones de referencia con la IA de Runway.",
|
|
1665
|
+
"mediaType": "image",
|
|
1666
|
+
"thumbnailVariant": "compareSlider",
|
|
1667
|
+
"mediaSubtype": "webp",
|
|
1668
|
+
"tags": ["Imagen a imagen", "Imagen", "API"],
|
|
1669
|
+
"models": ["Runway"],
|
|
1670
|
+
"date": "2025-03-01",
|
|
1671
|
+
"OpenSource": false,
|
|
1672
|
+
"size": 0,
|
|
1673
|
+
"vram": 0
|
|
1674
|
+
},
|
|
1675
|
+
{
|
|
1676
|
+
"name": "api_stability_ai_stable_image_ultra_t2i",
|
|
1677
|
+
"title": "Stability AI: Texto a imagen Stable Image Ultra",
|
|
1678
|
+
"description": "Generar imágenes de alta calidad con excelente adherencia a las indicaciones. Perfecto para casos de uso profesionales en resolución de 1 megapíxel.",
|
|
1679
|
+
"mediaType": "image",
|
|
1680
|
+
"mediaSubtype": "webp",
|
|
1681
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1682
|
+
"models": ["Stability"],
|
|
1683
|
+
"date": "2025-03-01",
|
|
1684
|
+
"OpenSource": false,
|
|
1685
|
+
"size": 0,
|
|
1686
|
+
"vram": 0
|
|
1687
|
+
},
|
|
1688
|
+
{
|
|
1689
|
+
"name": "api_stability_ai_i2i",
|
|
1690
|
+
"title": "Stability AI: Imagen a imagen",
|
|
1691
|
+
"description": "Transformar imágenes con generación de alta calidad usando Stability AI, perfecto para edición profesional y transferencia de estilo.",
|
|
1692
|
+
"mediaType": "image",
|
|
1693
|
+
"thumbnailVariant": "compareSlider",
|
|
1694
|
+
"mediaSubtype": "webp",
|
|
1695
|
+
"tags": ["Imagen a imagen", "Imagen", "API"],
|
|
1696
|
+
"models": ["Stability"],
|
|
1697
|
+
"date": "2025-03-01",
|
|
1698
|
+
"OpenSource": false,
|
|
1699
|
+
"size": 0,
|
|
1700
|
+
"vram": 0
|
|
1701
|
+
},
|
|
1702
|
+
{
|
|
1703
|
+
"name": "api_stability_ai_sd3.5_t2i",
|
|
1704
|
+
"title": "Stability AI: Texto a imagen SD3.5",
|
|
1705
|
+
"description": "Generar imágenes de alta calidad con excelente adherencia a las indicaciones. Perfecto para casos de uso profesionales en resolución de 1 megapíxel.",
|
|
1706
|
+
"mediaType": "image",
|
|
1707
|
+
"mediaSubtype": "webp",
|
|
1708
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1709
|
+
"models": ["Stability"],
|
|
1710
|
+
"date": "2025-03-01",
|
|
1711
|
+
"OpenSource": false,
|
|
1712
|
+
"size": 0,
|
|
1713
|
+
"vram": 0
|
|
1714
|
+
},
|
|
1715
|
+
{
|
|
1716
|
+
"name": "api_stability_ai_sd3.5_i2i",
|
|
1717
|
+
"title": "Stability AI: Imagen a imagen SD3.5",
|
|
1718
|
+
"description": "Generar imágenes de alta calidad con excelente adherencia a las indicaciones. Perfecto para casos de uso profesionales en resolución de 1 megapíxel.",
|
|
1719
|
+
"mediaType": "image",
|
|
1720
|
+
"thumbnailVariant": "compareSlider",
|
|
1721
|
+
"mediaSubtype": "webp",
|
|
1722
|
+
"tags": ["Imagen a imagen", "Imagen", "API"],
|
|
1723
|
+
"models": ["Stability"],
|
|
1724
|
+
"date": "2025-03-01",
|
|
1725
|
+
"OpenSource": false,
|
|
1726
|
+
"size": 0,
|
|
1727
|
+
"vram": 0
|
|
1728
|
+
},
|
|
1729
|
+
{
|
|
1730
|
+
"name": "api_ideogram_v3_t2i",
|
|
1731
|
+
"title": "Ideogram V3: Texto a imagen",
|
|
1732
|
+
"description": "Generar imágenes de calidad profesional con excelente alineación de indicaciones, fotorrealismo y renderizado de texto usando Ideogram V3.",
|
|
1733
|
+
"mediaType": "image",
|
|
1734
|
+
"mediaSubtype": "webp",
|
|
1735
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1736
|
+
"models": ["Ideogram"],
|
|
1737
|
+
"date": "2025-03-01",
|
|
1738
|
+
"OpenSource": false,
|
|
1739
|
+
"size": 0,
|
|
1740
|
+
"vram": 0
|
|
1741
|
+
},
|
|
1742
|
+
{
|
|
1743
|
+
"name": "api_openai_image_1_t2i",
|
|
1744
|
+
"title": "OpenAI: Texto a imagen GPT-Image-1",
|
|
1745
|
+
"description": "Generar imágenes a partir de indicaciones de texto usando la API de OpenAI GPT Image 1.",
|
|
1746
|
+
"mediaType": "image",
|
|
1747
|
+
"mediaSubtype": "webp",
|
|
1748
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1749
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
1750
|
+
"date": "2025-03-01",
|
|
1751
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1752
|
+
"OpenSource": false,
|
|
1753
|
+
"size": 0,
|
|
1754
|
+
"vram": 0
|
|
1755
|
+
},
|
|
1756
|
+
{
|
|
1757
|
+
"name": "api_openai_image_1_i2i",
|
|
1758
|
+
"title": "OpenAI: Imagen a imagen GPT-Image-1",
|
|
1759
|
+
"description": "Generar imágenes a partir de imágenes de entrada usando la API de OpenAI GPT Image 1.",
|
|
1760
|
+
"mediaType": "image",
|
|
1761
|
+
"mediaSubtype": "webp",
|
|
1762
|
+
"thumbnailVariant": "compareSlider",
|
|
1763
|
+
"tags": ["Imagen a imagen", "Imagen", "API"],
|
|
1764
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
1765
|
+
"date": "2025-03-01",
|
|
1766
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1767
|
+
"OpenSource": false,
|
|
1768
|
+
"size": 0,
|
|
1769
|
+
"vram": 0
|
|
1770
|
+
},
|
|
1771
|
+
{
|
|
1772
|
+
"name": "api_openai_image_1_inpaint",
|
|
1773
|
+
"title": "OpenAI: Inpaint GPT-Image-1",
|
|
1774
|
+
"description": "Editar imágenes usando inpainting con la API de OpenAI GPT Image 1.",
|
|
1775
|
+
"mediaType": "image",
|
|
1776
|
+
"mediaSubtype": "webp",
|
|
1777
|
+
"thumbnailVariant": "compareSlider",
|
|
1778
|
+
"tags": ["Inpaint", "Imagen", "API"],
|
|
1779
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
1780
|
+
"date": "2025-03-01",
|
|
1781
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1782
|
+
"OpenSource": false,
|
|
1783
|
+
"size": 0,
|
|
1784
|
+
"vram": 0
|
|
1785
|
+
},
|
|
1786
|
+
{
|
|
1787
|
+
"name": "api_openai_image_1_multi_inputs",
|
|
1788
|
+
"title": "OpenAI: Múltiples entradas GPT-Image-1",
|
|
1789
|
+
"description": "Generar imágenes a partir de múltiples entradas usando la API de OpenAI GPT Image 1.",
|
|
1790
|
+
"mediaType": "image",
|
|
1791
|
+
"mediaSubtype": "webp",
|
|
1792
|
+
"thumbnailVariant": "compareSlider",
|
|
1793
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1794
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
1795
|
+
"date": "2025-03-01",
|
|
1796
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1797
|
+
"OpenSource": false,
|
|
1798
|
+
"size": 0,
|
|
1799
|
+
"vram": 0
|
|
1800
|
+
},
|
|
1801
|
+
{
|
|
1802
|
+
"name": "api_openai_dall_e_2_t2i",
|
|
1803
|
+
"title": "OpenAI: Texto a imagen Dall-E 2",
|
|
1804
|
+
"description": "Generar imágenes a partir de indicaciones de texto usando la API de OpenAI Dall-E 2.",
|
|
1805
|
+
"mediaType": "image",
|
|
1806
|
+
"mediaSubtype": "webp",
|
|
1807
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1808
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1809
|
+
"date": "2025-03-01",
|
|
1810
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
|
|
1811
|
+
"OpenSource": false,
|
|
1812
|
+
"size": 0,
|
|
1813
|
+
"vram": 0
|
|
1814
|
+
},
|
|
1815
|
+
{
|
|
1816
|
+
"name": "api_openai_dall_e_2_inpaint",
|
|
1817
|
+
"title": "OpenAI: Inpaint Dall-E 2",
|
|
1818
|
+
"description": "Editar imágenes usando inpainting con la API de OpenAI Dall-E 2.",
|
|
1819
|
+
"mediaType": "image",
|
|
1820
|
+
"mediaSubtype": "webp",
|
|
1821
|
+
"thumbnailVariant": "compareSlider",
|
|
1822
|
+
"tags": ["Inpaint", "Imagen", "API"],
|
|
1823
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1824
|
+
"date": "2025-03-01",
|
|
1825
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
|
|
1826
|
+
"OpenSource": false,
|
|
1827
|
+
"size": 0,
|
|
1828
|
+
"vram": 0
|
|
1829
|
+
},
|
|
1830
|
+
{
|
|
1831
|
+
"name": "api_openai_dall_e_3_t2i",
|
|
1832
|
+
"title": "OpenAI: Texto a imagen Dall-E 3",
|
|
1833
|
+
"description": "Generar imágenes a partir de indicaciones de texto usando la API de OpenAI Dall-E 3.",
|
|
1834
|
+
"mediaType": "image",
|
|
1835
|
+
"mediaSubtype": "webp",
|
|
1836
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1837
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1838
|
+
"date": "2025-03-01",
|
|
1839
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3",
|
|
1840
|
+
"OpenSource": false,
|
|
1841
|
+
"size": 0,
|
|
1842
|
+
"vram": 0
|
|
1843
|
+
}
|
|
1844
|
+
]
|
|
1845
|
+
},
|
|
1846
|
+
{
|
|
1847
|
+
"moduleName": "default",
|
|
1848
|
+
"type": "video",
|
|
1849
|
+
"category": "CLOSED SOURCE MODELS",
|
|
1850
|
+
"icon": "icon-[lucide--film]",
|
|
1851
|
+
"title": "Video API",
|
|
1852
|
+
"templates": [
|
|
1853
|
+
{
|
|
1854
|
+
"name": "api_openai_sora_video",
|
|
1855
|
+
"title": "Sora 2: Texto e Imagen a Video",
|
|
1856
|
+
"description": "Generación de video Sora-2 y Sora-2 Pro de OpenAI con audio sincronizado.",
|
|
1857
|
+
"mediaType": "image",
|
|
1858
|
+
"mediaSubtype": "webp",
|
|
1859
|
+
"tags": ["Imagen a video", "Texto a video", "API"],
|
|
1860
|
+
"models": ["OpenAI"],
|
|
1861
|
+
"date": "2025-10-08",
|
|
1862
|
+
"OpenSource": false,
|
|
1863
|
+
"size": 0,
|
|
1864
|
+
"vram": 0
|
|
1865
|
+
},
|
|
1866
|
+
{
|
|
1867
|
+
"name": "api_ltxv_text_to_video",
|
|
1868
|
+
"title": "LTX-2: Texto a vídeo",
|
|
1869
|
+
"description": "Genera vídeos de alta calidad a partir de indicaciones de texto usando Lightricks LTX-2 con audio sincronizado. Soporta hasta 4K de resolución a 50fps con modos Rápido, Pro y Ultra para diversas necesidades de producción.",
|
|
1870
|
+
"mediaType": "image",
|
|
1871
|
+
"mediaSubtype": "webp",
|
|
1872
|
+
"tags": ["Texto a video", "Video", "API"],
|
|
1873
|
+
"models": ["LTX-2", "Lightricks"],
|
|
1874
|
+
"date": "2025-10-28",
|
|
1875
|
+
"OpenSource": false,
|
|
1876
|
+
"size": 0,
|
|
1877
|
+
"vram": 0
|
|
1878
|
+
},
|
|
1879
|
+
{
|
|
1880
|
+
"name": "api_ltxv_image_to_video",
|
|
1881
|
+
"title": "LTX-2: Imagen a Video",
|
|
1882
|
+
"description": "Transforma imágenes estáticas en videos dinámicos con LTX-2 Pro. Genera secuencias cinematográficas con movimiento natural, audio sincronizado y soporte de hasta 4K y 50fps.",
|
|
1883
|
+
"mediaType": "image",
|
|
1884
|
+
"mediaSubtype": "webp",
|
|
1885
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
1886
|
+
"models": ["LTX-2", "Lightricks"],
|
|
1887
|
+
"date": "2025-10-28",
|
|
1888
|
+
"OpenSource": false,
|
|
1889
|
+
"size": 0,
|
|
1890
|
+
"vram": 0
|
|
1891
|
+
},
|
|
1892
|
+
{
|
|
1893
|
+
"name": "api_wan_text_to_video",
|
|
1894
|
+
"title": "Wan2.5: Texto a Video",
|
|
1895
|
+
"description": "Genera videos con audio sincronizado, movimiento mejorado y calidad superior.",
|
|
1896
|
+
"mediaType": "image",
|
|
1897
|
+
"mediaSubtype": "webp",
|
|
1898
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
1899
|
+
"models": ["Wan2.5", "Wan"],
|
|
1900
|
+
"date": "2025-09-27",
|
|
1901
|
+
"tutorialUrl": "",
|
|
1902
|
+
"OpenSource": false,
|
|
1903
|
+
"size": 0,
|
|
1904
|
+
"vram": 0
|
|
1905
|
+
},
|
|
1906
|
+
{
|
|
1907
|
+
"name": "api_wan_image_to_video",
|
|
1908
|
+
"title": "Wan2.5: Imagen a Video",
|
|
1909
|
+
"description": "Transforma imágenes en videos con audio sincronizado, movimiento mejorado y calidad superior.",
|
|
1910
|
+
"mediaType": "image",
|
|
1911
|
+
"mediaSubtype": "webp",
|
|
1912
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
1913
|
+
"models": ["Wan2.5", "Wan"],
|
|
1914
|
+
"date": "2025-09-27",
|
|
1915
|
+
"tutorialUrl": "",
|
|
1916
|
+
"OpenSource": false,
|
|
1917
|
+
"size": 0,
|
|
1918
|
+
"vram": 0
|
|
1919
|
+
},
|
|
1920
|
+
{
|
|
1921
|
+
"name": "api_kling_i2v",
|
|
1922
|
+
"title": "Kling: Imagen a video",
|
|
1923
|
+
"description": "Generar videos con excelente adherencia a las indicaciones para acciones, expresiones y movimientos de cámara usando Kling.",
|
|
1924
|
+
"mediaType": "image",
|
|
1925
|
+
"mediaSubtype": "webp",
|
|
1926
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
1927
|
+
"models": ["Kling"],
|
|
1928
|
+
"date": "2025-03-01",
|
|
1929
|
+
"tutorialUrl": "",
|
|
1930
|
+
"OpenSource": false,
|
|
1931
|
+
"size": 0,
|
|
1932
|
+
"vram": 0
|
|
1933
|
+
},
|
|
1934
|
+
{
|
|
1935
|
+
"name": "api_kling_effects",
|
|
1936
|
+
"title": "Kling: Efectos de video",
|
|
1937
|
+
"description": "Generar videos dinámicos aplicando efectos visuales a imágenes usando Kling.",
|
|
1938
|
+
"mediaType": "image",
|
|
1939
|
+
"mediaSubtype": "webp",
|
|
1940
|
+
"tags": ["Video", "API"],
|
|
1941
|
+
"models": ["Kling"],
|
|
1942
|
+
"date": "2025-03-01",
|
|
1943
|
+
"tutorialUrl": "",
|
|
1944
|
+
"OpenSource": false,
|
|
1945
|
+
"size": 0,
|
|
1946
|
+
"vram": 0
|
|
1947
|
+
},
|
|
1948
|
+
{
|
|
1949
|
+
"name": "api_kling_flf",
|
|
1950
|
+
"title": "Kling: FLF2V",
|
|
1951
|
+
"description": "Generar videos controlando el primer y último fotograma.",
|
|
1952
|
+
"mediaType": "image",
|
|
1953
|
+
"mediaSubtype": "webp",
|
|
1954
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
1955
|
+
"models": ["Kling"],
|
|
1956
|
+
"date": "2025-03-01",
|
|
1957
|
+
"tutorialUrl": "",
|
|
1958
|
+
"OpenSource": false,
|
|
1959
|
+
"size": 0,
|
|
1960
|
+
"vram": 0
|
|
1961
|
+
},
|
|
1962
|
+
{
|
|
1963
|
+
"name": "api_vidu_text_to_video",
|
|
1964
|
+
"title": "Vidu: Texto a video",
|
|
1965
|
+
"description": "Generar videos 1080p de alta calidad a partir de indicaciones de texto con control de amplitud de movimiento y duración ajustable usando el modelo AI avanzado de Vidu.",
|
|
1966
|
+
"mediaType": "image",
|
|
1967
|
+
"mediaSubtype": "webp",
|
|
1968
|
+
"tags": ["Texto a video", "Video", "API"],
|
|
1969
|
+
"models": ["Vidu"],
|
|
1970
|
+
"date": "2025-08-23",
|
|
1971
|
+
"tutorialUrl": "",
|
|
1972
|
+
"OpenSource": false,
|
|
1973
|
+
"size": 0,
|
|
1974
|
+
"vram": 0
|
|
1975
|
+
},
|
|
1976
|
+
{
|
|
1977
|
+
"name": "api_vidu_image_to_video",
|
|
1978
|
+
"title": "Vidu: Imagen a video",
|
|
1979
|
+
"description": "Transformar imágenes estáticas en videos 1080p dinámicos con control de movimiento preciso y amplitud de movimiento personalizable usando Vidu.",
|
|
1980
|
+
"mediaType": "image",
|
|
1981
|
+
"mediaSubtype": "webp",
|
|
1982
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
1983
|
+
"models": ["Vidu"],
|
|
1984
|
+
"date": "2025-08-23",
|
|
1985
|
+
"tutorialUrl": "",
|
|
1986
|
+
"OpenSource": false,
|
|
1987
|
+
"size": 0,
|
|
1988
|
+
"vram": 0
|
|
1989
|
+
},
|
|
1990
|
+
{
|
|
1991
|
+
"name": "api_vidu_reference_to_video",
|
|
1992
|
+
"title": "Vidu: Referencia a video",
|
|
1993
|
+
"description": "Generar videos con sujetos consistentes usando múltiples imágenes de referencia (hasta 7) para continuidad de personaje y estilo a lo largo de la secuencia de video.",
|
|
1994
|
+
"mediaType": "image",
|
|
1995
|
+
"mediaSubtype": "webp",
|
|
1996
|
+
"tags": ["Video", "Imagen a video", "API"],
|
|
1997
|
+
"models": ["Vidu"],
|
|
1998
|
+
"date": "2025-08-23",
|
|
1999
|
+
"tutorialUrl": "",
|
|
2000
|
+
"OpenSource": false,
|
|
2001
|
+
"size": 0,
|
|
2002
|
+
"vram": 0
|
|
2003
|
+
},
|
|
2004
|
+
{
|
|
2005
|
+
"name": "api_vidu_start_end_to_video",
|
|
2006
|
+
"title": "Vidu: Inicio fin a video",
|
|
2007
|
+
"description": "Crear transiciones de video suaves entre fotogramas de inicio y fin definidos con interpolación natural de movimiento y calidad visual consistente.",
|
|
2008
|
+
"mediaType": "image",
|
|
2009
|
+
"mediaSubtype": "webp",
|
|
2010
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
2011
|
+
"models": ["Vidu"],
|
|
2012
|
+
"date": "2025-08-23",
|
|
2013
|
+
"tutorialUrl": "",
|
|
2014
|
+
"OpenSource": false,
|
|
2015
|
+
"size": 0,
|
|
2016
|
+
"vram": 0
|
|
2017
|
+
},
|
|
2018
|
+
{
|
|
2019
|
+
"name": "api_bytedance_text_to_video",
|
|
2020
|
+
"title": "ByteDance: Texto a Video",
|
|
2021
|
+
"description": "Genera videos de alta calidad directamente desde prompts de texto usando el modelo Seedance de ByteDance. Compatible con múltiples resoluciones y relaciones de aspecto con movimiento natural y calidad cinematográfica.",
|
|
2022
|
+
"mediaType": "image",
|
|
2023
|
+
"mediaSubtype": "webp",
|
|
2024
|
+
"tags": ["Video", "API", "Texto a video"],
|
|
2025
|
+
"models": ["ByteDance"],
|
|
2026
|
+
"date": "2025-10-6",
|
|
2027
|
+
"tutorialUrl": "",
|
|
2028
|
+
"OpenSource": false,
|
|
2029
|
+
"size": 0,
|
|
2030
|
+
"vram": 0
|
|
2031
|
+
},
|
|
2032
|
+
{
|
|
2033
|
+
"name": "api_bytedance_image_to_video",
|
|
2034
|
+
"title": "ByteDance: Imagen a Video",
|
|
2035
|
+
"description": "Transforma imágenes estáticas en videos dinámicos usando el modelo Seedance de ByteDance. Analiza la estructura de la imagen y genera movimiento natural con estilo visual consistente y secuencias de video coherentes.",
|
|
2036
|
+
"mediaType": "image",
|
|
2037
|
+
"mediaSubtype": "webp",
|
|
2038
|
+
"tags": ["Video", "API", "Imagen a video"],
|
|
2039
|
+
"models": ["ByteDance"],
|
|
2040
|
+
"date": "2025-10-6",
|
|
2041
|
+
"tutorialUrl": "",
|
|
2042
|
+
"OpenSource": false,
|
|
2043
|
+
"size": 0,
|
|
2044
|
+
"vram": 0
|
|
2045
|
+
},
|
|
2046
|
+
{
|
|
2047
|
+
"name": "api_bytedance_flf2v",
|
|
2048
|
+
"title": "ByteDance: Inicio-Fin a Video",
|
|
2049
|
+
"description": "Genera transiciones de video cinematográficas entre fotogramas de inicio y fin con movimiento fluido, consistencia de escena y acabado profesional usando el modelo Seedance de ByteDance.",
|
|
2050
|
+
"mediaType": "image",
|
|
2051
|
+
"mediaSubtype": "webp",
|
|
2052
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
2053
|
+
"models": ["ByteDance"],
|
|
2054
|
+
"date": "2025-10-6",
|
|
2055
|
+
"tutorialUrl": "",
|
|
2056
|
+
"OpenSource": false,
|
|
2057
|
+
"size": 0,
|
|
2058
|
+
"vram": 0
|
|
2059
|
+
},
|
|
2060
|
+
{
|
|
2061
|
+
"name": "api_topaz_video_enhance",
|
|
2062
|
+
"title": "Mejora de video Topaz",
|
|
2063
|
+
"description": "Mejora vídeos con Topaz AI. Permite aumentar la resolución usando el modelo Starlight (Astra) Fast e interpolar fotogramas con el modelo apo-8.",
|
|
2064
|
+
"mediaType": "image",
|
|
2065
|
+
"mediaSubtype": "webp",
|
|
2066
|
+
"thumbnailVariant": "compareSlider",
|
|
2067
|
+
"tags": ["Video", "API", "Mejorar"],
|
|
2068
|
+
"models": ["Topaz"],
|
|
2069
|
+
"date": "2025-11-25",
|
|
2070
|
+
"OpenSource": false,
|
|
2071
|
+
"size": 0,
|
|
2072
|
+
"vram": 0
|
|
2073
|
+
},
|
|
2074
|
+
{
|
|
2075
|
+
"name": "api_luma_i2v",
|
|
2076
|
+
"title": "Luma: Imagen a video",
|
|
2077
|
+
"description": "Tomar imágenes estáticas e instantáneamente crear animaciones mágicas de alta calidad.",
|
|
2078
|
+
"mediaType": "image",
|
|
2079
|
+
"mediaSubtype": "webp",
|
|
2080
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
2081
|
+
"models": ["Luma"],
|
|
2082
|
+
"date": "2025-03-01",
|
|
2083
|
+
"tutorialUrl": "",
|
|
2084
|
+
"OpenSource": false,
|
|
2085
|
+
"size": 0,
|
|
2086
|
+
"vram": 0
|
|
2087
|
+
},
|
|
2088
|
+
{
|
|
2089
|
+
"name": "api_luma_t2v",
|
|
2090
|
+
"title": "Luma: Texto a video",
|
|
2091
|
+
"description": "Se pueden generar videos de alta calidad usando indicaciones simples.",
|
|
2092
|
+
"mediaType": "image",
|
|
2093
|
+
"mediaSubtype": "webp",
|
|
2094
|
+
"tags": ["Texto a video", "Video", "API"],
|
|
2095
|
+
"models": ["Luma"],
|
|
2096
|
+
"date": "2025-03-01",
|
|
2097
|
+
"tutorialUrl": "",
|
|
2098
|
+
"OpenSource": false,
|
|
2099
|
+
"size": 0,
|
|
2100
|
+
"vram": 0
|
|
2101
|
+
},
|
|
2102
|
+
{
|
|
2103
|
+
"name": "api_moonvalley_text_to_video",
|
|
2104
|
+
"title": "Moonvalley: Texto a video",
|
|
2105
|
+
"description": "Generar videos cinematográficos 1080p a partir de indicaciones de texto mediante un modelo entrenado exclusivamente en datos con licencia.",
|
|
2106
|
+
"mediaType": "image",
|
|
2107
|
+
"mediaSubtype": "webp",
|
|
2108
|
+
"tags": ["Texto a video", "Video", "API"],
|
|
2109
|
+
"models": ["Moonvalley"],
|
|
2110
|
+
"date": "2025-03-01",
|
|
2111
|
+
"tutorialUrl": "",
|
|
2112
|
+
"OpenSource": false,
|
|
2113
|
+
"size": 0,
|
|
2114
|
+
"vram": 0
|
|
2115
|
+
},
|
|
2116
|
+
{
|
|
2117
|
+
"name": "api_moonvalley_image_to_video",
|
|
2118
|
+
"title": "Moonvalley: Imagen a video",
|
|
2119
|
+
"description": "Generar videos cinematográficos 1080p con una imagen mediante un modelo entrenado exclusivamente en datos con licencia.",
|
|
2120
|
+
"mediaType": "image",
|
|
2121
|
+
"mediaSubtype": "webp",
|
|
2122
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
2123
|
+
"models": ["Moonvalley"],
|
|
2124
|
+
"date": "2025-03-01",
|
|
2125
|
+
"tutorialUrl": "",
|
|
2126
|
+
"OpenSource": false,
|
|
2127
|
+
"size": 0,
|
|
2128
|
+
"vram": 0
|
|
2129
|
+
},
|
|
2130
|
+
{
|
|
2131
|
+
"name": "api_moonvalley_video_to_video_motion_transfer",
|
|
2132
|
+
"title": "Moonvalley: Transferencia de movimiento",
|
|
2133
|
+
"description": "Aplicar movimiento de un video a otro.",
|
|
2134
|
+
"mediaType": "image",
|
|
2135
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2136
|
+
"mediaSubtype": "webp",
|
|
2137
|
+
"tags": ["Video a video", "Video", "API"],
|
|
2138
|
+
"models": ["Moonvalley"],
|
|
2139
|
+
"date": "2025-03-01",
|
|
2140
|
+
"tutorialUrl": "",
|
|
2141
|
+
"OpenSource": false,
|
|
2142
|
+
"size": 0,
|
|
2143
|
+
"vram": 0
|
|
2144
|
+
},
|
|
2145
|
+
{
|
|
2146
|
+
"name": "api_moonvalley_video_to_video_pose_control",
|
|
2147
|
+
"title": "Moonvalley: Control de pose",
|
|
2148
|
+
"description": "Aplicar pose y movimiento humano de un video a otro.",
|
|
2149
|
+
"mediaType": "image",
|
|
2150
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2151
|
+
"mediaSubtype": "webp",
|
|
2152
|
+
"tags": ["Video a video", "Video", "API"],
|
|
2153
|
+
"models": ["Moonvalley"],
|
|
2154
|
+
"date": "2025-03-01",
|
|
2155
|
+
"tutorialUrl": "",
|
|
2156
|
+
"OpenSource": false,
|
|
2157
|
+
"size": 0,
|
|
2158
|
+
"vram": 0
|
|
2159
|
+
},
|
|
2160
|
+
{
|
|
2161
|
+
"name": "api_hailuo_minimax_video",
|
|
2162
|
+
"title": "MiniMax: Video",
|
|
2163
|
+
"description": "Generar videos de alta calidad a partir de indicaciones de texto con control opcional del primer fotograma usando el modelo MiniMax Hailuo-02. Soporta múltiples resoluciones (768P/1080P) y duraciones (6/10s) con optimización inteligente de indicaciones.",
|
|
2164
|
+
"mediaType": "image",
|
|
2165
|
+
"mediaSubtype": "webp",
|
|
2166
|
+
"tags": ["Texto a video", "Video", "API"],
|
|
2167
|
+
"models": ["MiniMax"],
|
|
2168
|
+
"date": "2025-03-01",
|
|
2169
|
+
"tutorialUrl": "",
|
|
2170
|
+
"OpenSource": false,
|
|
2171
|
+
"size": 0,
|
|
2172
|
+
"vram": 0
|
|
2173
|
+
},
|
|
2174
|
+
{
|
|
2175
|
+
"name": "api_hailuo_minimax_t2v",
|
|
2176
|
+
"title": "MiniMax: Texto a video",
|
|
2177
|
+
"description": "Generar videos de alta calidad directamente a partir de indicaciones de texto. Explorar las capacidades avanzadas de IA de MiniMax para crear narrativas visuales diversas con efectos CGI profesionales y elementos estilísticos que den vida a sus descripciones.",
|
|
2178
|
+
"mediaType": "image",
|
|
2179
|
+
"mediaSubtype": "webp",
|
|
2180
|
+
"tags": ["Texto a video", "Video", "API"],
|
|
2181
|
+
"models": ["MiniMax"],
|
|
2182
|
+
"date": "2025-03-01",
|
|
2183
|
+
"tutorialUrl": "",
|
|
2184
|
+
"OpenSource": false,
|
|
2185
|
+
"size": 0,
|
|
2186
|
+
"vram": 0
|
|
2187
|
+
},
|
|
2188
|
+
{
|
|
2189
|
+
"name": "api_hailuo_minimax_i2v",
|
|
2190
|
+
"title": "MiniMax: Imagen a video",
|
|
2191
|
+
"description": "Generar videos refinados a partir de imágenes y texto con integración CGI usando MiniMax.",
|
|
2192
|
+
"mediaType": "image",
|
|
2193
|
+
"mediaSubtype": "webp",
|
|
2194
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
2195
|
+
"models": ["MiniMax"],
|
|
2196
|
+
"date": "2025-03-01",
|
|
2197
|
+
"tutorialUrl": "",
|
|
2198
|
+
"OpenSource": false,
|
|
2199
|
+
"size": 0,
|
|
2200
|
+
"vram": 0
|
|
2201
|
+
},
|
|
2202
|
+
{
|
|
2203
|
+
"name": "api_pixverse_i2v",
|
|
2204
|
+
"title": "PixVerse: Imagen a video",
|
|
2205
|
+
"description": "Generar videos dinámicos a partir de imágenes estáticas con movimiento y efectos usando PixVerse.",
|
|
2206
|
+
"mediaType": "image",
|
|
2207
|
+
"mediaSubtype": "webp",
|
|
2208
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
2209
|
+
"models": ["PixVerse"],
|
|
2210
|
+
"date": "2025-03-01",
|
|
2211
|
+
"tutorialUrl": "",
|
|
2212
|
+
"OpenSource": false,
|
|
2213
|
+
"size": 0,
|
|
2214
|
+
"vram": 0
|
|
2215
|
+
},
|
|
2216
|
+
{
|
|
2217
|
+
"name": "api_pixverse_template_i2v",
|
|
2218
|
+
"title": "PixVerse Plantillas: Imagen a video",
|
|
2219
|
+
"description": "Generar videos dinámicos a partir de imágenes estáticas con movimiento y efectos usando PixVerse.",
|
|
2220
|
+
"mediaType": "image",
|
|
2221
|
+
"mediaSubtype": "webp",
|
|
2222
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
2223
|
+
"models": ["PixVerse"],
|
|
2224
|
+
"date": "2025-03-01",
|
|
2225
|
+
"tutorialUrl": "",
|
|
2226
|
+
"OpenSource": false,
|
|
2227
|
+
"size": 0,
|
|
2228
|
+
"vram": 0
|
|
2229
|
+
},
|
|
2230
|
+
{
|
|
2231
|
+
"name": "api_pixverse_t2v",
|
|
2232
|
+
"title": "PixVerse: Texto a video",
|
|
2233
|
+
"description": "Generar videos con interpretación precisa de indicaciones y dinámicas de video impresionantes.",
|
|
2234
|
+
"mediaType": "image",
|
|
2235
|
+
"mediaSubtype": "webp",
|
|
2236
|
+
"tags": ["Texto a video", "Video", "API"],
|
|
2237
|
+
"models": ["PixVerse"],
|
|
2238
|
+
"date": "2025-03-01",
|
|
2239
|
+
"tutorialUrl": "",
|
|
2240
|
+
"OpenSource": false,
|
|
2241
|
+
"size": 0,
|
|
2242
|
+
"vram": 0
|
|
2243
|
+
},
|
|
2244
|
+
{
|
|
2245
|
+
"name": "api_runway_gen3a_turbo_image_to_video",
|
|
2246
|
+
"title": "Runway: Gen3a Turbo Imagen a video",
|
|
2247
|
+
"description": "Generar videos cinematográficos a partir de imágenes estáticas usando Runway Gen3a Turbo.",
|
|
2248
|
+
"mediaType": "image",
|
|
2249
|
+
"mediaSubtype": "webp",
|
|
2250
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
2251
|
+
"models": ["Runway"],
|
|
2252
|
+
"date": "2025-03-01",
|
|
2253
|
+
"tutorialUrl": "",
|
|
2254
|
+
"OpenSource": false,
|
|
2255
|
+
"size": 0,
|
|
2256
|
+
"vram": 0
|
|
2257
|
+
},
|
|
2258
|
+
{
|
|
2259
|
+
"name": "api_runway_gen4_turo_image_to_video",
|
|
2260
|
+
"title": "Runway: Gen4 Turbo Imagen a video",
|
|
2261
|
+
"description": "Generar videos dinámicos a partir de imágenes usando Runway Gen4 Turbo.",
|
|
2262
|
+
"mediaType": "image",
|
|
2263
|
+
"mediaSubtype": "webp",
|
|
2264
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
2265
|
+
"models": ["Runway"],
|
|
2266
|
+
"date": "2025-03-01",
|
|
2267
|
+
"tutorialUrl": "",
|
|
2268
|
+
"OpenSource": false,
|
|
2269
|
+
"size": 0,
|
|
2270
|
+
"vram": 0
|
|
2271
|
+
},
|
|
2272
|
+
{
|
|
2273
|
+
"name": "api_runway_first_last_frame",
|
|
2274
|
+
"title": "Runway: Primer último fotograma a video",
|
|
2275
|
+
"description": "Generar transiciones de video suaves entre dos fotogramas clave con precisión de Runway.",
|
|
2276
|
+
"mediaType": "image",
|
|
2277
|
+
"mediaSubtype": "webp",
|
|
2278
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
2279
|
+
"models": ["Runway"],
|
|
2280
|
+
"date": "2025-03-01",
|
|
2281
|
+
"tutorialUrl": "",
|
|
2282
|
+
"OpenSource": false,
|
|
2283
|
+
"size": 0,
|
|
2284
|
+
"vram": 0
|
|
2285
|
+
},
|
|
2286
|
+
{
|
|
2287
|
+
"name": "api_pika_i2v",
|
|
2288
|
+
"title": "Pika: Imagen a video",
|
|
2289
|
+
"description": "Generar videos animados suaves a partir de imágenes estáticas individuales usando Pika AI.",
|
|
2290
|
+
"mediaType": "image",
|
|
2291
|
+
"mediaSubtype": "webp",
|
|
2292
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
2293
|
+
"models": ["Pika"],
|
|
2294
|
+
"date": "2025-03-01",
|
|
2295
|
+
"tutorialUrl": "",
|
|
2296
|
+
"OpenSource": false,
|
|
2297
|
+
"size": 0,
|
|
2298
|
+
"vram": 0
|
|
2299
|
+
},
|
|
2300
|
+
{
|
|
2301
|
+
"name": "api_pika_scene",
|
|
2302
|
+
"title": "Pika Scenes: Imágenes a video",
|
|
2303
|
+
"description": "Generar videos que incorporen múltiples imágenes de entrada usando Pika Scenes.",
|
|
2304
|
+
"mediaType": "image",
|
|
2305
|
+
"mediaSubtype": "webp",
|
|
2306
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
2307
|
+
"models": ["Pika"],
|
|
2308
|
+
"date": "2025-03-01",
|
|
2309
|
+
"tutorialUrl": "",
|
|
2310
|
+
"OpenSource": false,
|
|
2311
|
+
"size": 0,
|
|
2312
|
+
"vram": 0
|
|
2313
|
+
},
|
|
2314
|
+
{
|
|
2315
|
+
"name": "api_veo2_i2v",
|
|
2316
|
+
"title": "Veo2: Imagen a video",
|
|
2317
|
+
"description": "Generar videos a partir de imágenes usando la API de Google Veo2.",
|
|
2318
|
+
"mediaType": "image",
|
|
2319
|
+
"mediaSubtype": "webp",
|
|
2320
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
2321
|
+
"models": ["Veo", "Google"],
|
|
2322
|
+
"date": "2025-03-01",
|
|
2323
|
+
"tutorialUrl": "",
|
|
2324
|
+
"OpenSource": false,
|
|
2325
|
+
"size": 0,
|
|
2326
|
+
"vram": 0
|
|
2327
|
+
},
|
|
2328
|
+
{
|
|
2329
|
+
"name": "api_veo3",
|
|
2330
|
+
"title": "Veo3: Imagen a video",
|
|
2331
|
+
"description": "Generar videos de 8 segundos de alta calidad a partir de indicaciones de texto o imágenes usando la API avanzada Veo 3 de Google. Cuenta con generación de audio, mejora de indicaciones y opciones duales de modelo para velocidad o calidad.",
|
|
2332
|
+
"mediaType": "image",
|
|
2333
|
+
"mediaSubtype": "webp",
|
|
2334
|
+
"tags": ["Imagen a video", "Texto a video", "API"],
|
|
2335
|
+
"models": ["Veo", "Google"],
|
|
2336
|
+
"date": "2025-03-01",
|
|
2337
|
+
"tutorialUrl": "",
|
|
2338
|
+
"OpenSource": false,
|
|
2339
|
+
"size": 0,
|
|
2340
|
+
"vram": 0
|
|
2341
|
+
}
|
|
2342
|
+
]
|
|
2343
|
+
},
|
|
2344
|
+
{
|
|
2345
|
+
"moduleName": "default",
|
|
2346
|
+
"type": "image",
|
|
2347
|
+
"category": "CLOSED SOURCE MODELS",
|
|
2348
|
+
"icon": "icon-[lucide--box]",
|
|
2349
|
+
"title": "3D API",
|
|
2350
|
+
"templates": [
|
|
2351
|
+
{
|
|
2352
|
+
"name": "api_rodin_gen2",
|
|
2353
|
+
"title": "Rodin: Gen-2 Imagen a Modelo",
|
|
2354
|
+
"description": "Genera modelos 3D detallados con 4X de calidad de malla a partir de fotos usando Rodin Gen2",
|
|
2355
|
+
"mediaType": "image",
|
|
2356
|
+
"mediaSubtype": "webp",
|
|
2357
|
+
"tags": ["Imagen a 3D", "3D", "API"],
|
|
2358
|
+
"models": ["Rodin"],
|
|
2359
|
+
"date": "2025-09-27",
|
|
2360
|
+
"tutorialUrl": "",
|
|
2361
|
+
"OpenSource": false,
|
|
2362
|
+
"size": 0,
|
|
2363
|
+
"vram": 0
|
|
2364
|
+
},
|
|
2365
|
+
{
|
|
2366
|
+
"name": "api_rodin_image_to_model",
|
|
2367
|
+
"title": "Rodin: Imagen a modelo",
|
|
2368
|
+
"description": "Generar modelos 3D detallados a partir de fotos individuales usando Rodin AI.",
|
|
2369
|
+
"mediaType": "image",
|
|
2370
|
+
"thumbnailVariant": "compareSlider",
|
|
2371
|
+
"mediaSubtype": "webp",
|
|
2372
|
+
"tags": ["Imagen a 3D", "3D", "API"],
|
|
2373
|
+
"models": ["Rodin"],
|
|
2374
|
+
"date": "2025-03-01",
|
|
2375
|
+
"tutorialUrl": "",
|
|
2376
|
+
"OpenSource": false,
|
|
2377
|
+
"size": 0,
|
|
2378
|
+
"vram": 0
|
|
2379
|
+
},
|
|
2380
|
+
{
|
|
2381
|
+
"name": "api_rodin_multiview_to_model",
|
|
2382
|
+
"title": "Rodin: Vista múltiple a modelo",
|
|
2383
|
+
"description": "Esculpir modelos 3D completos usando reconstrucción multiángulo de Rodin.",
|
|
2384
|
+
"mediaType": "image",
|
|
2385
|
+
"thumbnailVariant": "compareSlider",
|
|
2386
|
+
"mediaSubtype": "webp",
|
|
2387
|
+
"tags": ["Imagen a 3D", "3D", "API"],
|
|
2388
|
+
"models": ["Rodin"],
|
|
2389
|
+
"date": "2025-03-01",
|
|
2390
|
+
"tutorialUrl": "",
|
|
2391
|
+
"OpenSource": false,
|
|
2392
|
+
"size": 0,
|
|
2393
|
+
"vram": 0
|
|
2394
|
+
},
|
|
2395
|
+
{
|
|
2396
|
+
"name": "api_tripo_text_to_model",
|
|
2397
|
+
"title": "Tripo: Texto a modelo",
|
|
2398
|
+
"description": "Crear objetos 3D a partir de descripciones con modelado dirigido por texto de Tripo.",
|
|
2399
|
+
"mediaType": "image",
|
|
2400
|
+
"mediaSubtype": "webp",
|
|
2401
|
+
"tags": ["Texto a modelo", "3D", "API"],
|
|
2402
|
+
"models": ["Tripo"],
|
|
2403
|
+
"date": "2025-03-01",
|
|
2404
|
+
"tutorialUrl": "",
|
|
2405
|
+
"OpenSource": false,
|
|
2406
|
+
"size": 0,
|
|
2407
|
+
"vram": 0
|
|
2408
|
+
},
|
|
2409
|
+
{
|
|
2410
|
+
"name": "api_tripo_image_to_model",
|
|
2411
|
+
"title": "Tripo: Imagen a modelo",
|
|
2412
|
+
"description": "Generar activos 3D profesionales a partir de imágenes 2D usando el motor Tripo.",
|
|
2413
|
+
"mediaType": "image",
|
|
2414
|
+
"thumbnailVariant": "compareSlider",
|
|
2415
|
+
"mediaSubtype": "webp",
|
|
2416
|
+
"tags": ["Imagen a 3D", "3D", "API"],
|
|
2417
|
+
"models": ["Tripo"],
|
|
2418
|
+
"date": "2025-03-01",
|
|
2419
|
+
"tutorialUrl": "",
|
|
2420
|
+
"OpenSource": false,
|
|
2421
|
+
"size": 0,
|
|
2422
|
+
"vram": 0
|
|
2423
|
+
},
|
|
2424
|
+
{
|
|
2425
|
+
"name": "api_tripo_multiview_to_model",
|
|
2426
|
+
"title": "Tripo: Vista múltiple a modelo",
|
|
2427
|
+
"description": "Construir modelos 3D desde múltiples ángulos con el escáner avanzado de Tripo.",
|
|
2428
|
+
"mediaType": "image",
|
|
2429
|
+
"thumbnailVariant": "compareSlider",
|
|
2430
|
+
"mediaSubtype": "webp",
|
|
2431
|
+
"tags": ["Imagen a 3D", "3D", "API"],
|
|
2432
|
+
"models": ["Tripo"],
|
|
2433
|
+
"date": "2025-03-01",
|
|
2434
|
+
"tutorialUrl": "",
|
|
2435
|
+
"OpenSource": false,
|
|
2436
|
+
"size": 0,
|
|
2437
|
+
"vram": 0
|
|
2438
|
+
}
|
|
2439
|
+
]
|
|
2440
|
+
},
|
|
2441
|
+
{
|
|
2442
|
+
"moduleName": "default",
|
|
2443
|
+
"type": "audio",
|
|
2444
|
+
"category": "CLOSED SOURCE MODELS",
|
|
2445
|
+
"icon": "icon-[lucide--volume-2]",
|
|
2446
|
+
"title": "Audio API",
|
|
2447
|
+
"templates": [
|
|
2448
|
+
{
|
|
2449
|
+
"name": "api_stability_ai_text_to_audio",
|
|
2450
|
+
"title": "Stability AI: Texto a audio",
|
|
2451
|
+
"description": "Genera música a partir de texto usando Stable Audio 2.5. Crea pistas de varios minutos en segundos.",
|
|
2452
|
+
"mediaType": "audio",
|
|
2453
|
+
"mediaSubtype": "mp3",
|
|
2454
|
+
"tags": ["Texto a audio", "Audio", "API"],
|
|
2455
|
+
"date": "2025-09-09",
|
|
2456
|
+
"models": ["Stability", "Stable Audio"],
|
|
2457
|
+
"OpenSource": false,
|
|
2458
|
+
"size": 0,
|
|
2459
|
+
"vram": 0
|
|
2460
|
+
},
|
|
2461
|
+
{
|
|
2462
|
+
"name": "api_stability_ai_audio_to_audio",
|
|
2463
|
+
"title": "Stability AI: Audio a audio",
|
|
2464
|
+
"description": "Transforma audio en nuevas composiciones usando Stable Audio 2.5. Sube audio y la IA crea pistas completas.",
|
|
2465
|
+
"mediaType": "audio",
|
|
2466
|
+
"mediaSubtype": "mp3",
|
|
2467
|
+
"tags": ["Audio a audio", "Audio", "API"],
|
|
2468
|
+
"date": "2025-09-09",
|
|
2469
|
+
"models": ["Stability", "Stable Audio"],
|
|
2470
|
+
"OpenSource": false,
|
|
2471
|
+
"size": 0,
|
|
2472
|
+
"vram": 0
|
|
2473
|
+
},
|
|
2474
|
+
{
|
|
2475
|
+
"name": "api_stability_ai_audio_inpaint",
|
|
2476
|
+
"title": "Stability AI: Relleno de audio",
|
|
2477
|
+
"description": "Completa o extiende pistas de audio usando Stable Audio 2.5. Sube audio y la IA genera el resto.",
|
|
2478
|
+
"mediaType": "audio",
|
|
2479
|
+
"mediaSubtype": "mp3",
|
|
2480
|
+
"tags": ["Audio a audio", "Audio", "API"],
|
|
2481
|
+
"date": "2025-09-09",
|
|
2482
|
+
"models": ["Stability", "Stable Audio"],
|
|
2483
|
+
"OpenSource": false,
|
|
2484
|
+
"size": 0,
|
|
2485
|
+
"vram": 0
|
|
2486
|
+
}
|
|
2487
|
+
]
|
|
2488
|
+
},
|
|
2489
|
+
{
|
|
2490
|
+
"moduleName": "default",
|
|
2491
|
+
"type": "image",
|
|
2492
|
+
"category": "CLOSED SOURCE MODELS",
|
|
2493
|
+
"icon": "icon-[lucide--message-square-text]",
|
|
2494
|
+
"title": "LLM API",
|
|
2495
|
+
"templates": [
|
|
2496
|
+
{
|
|
2497
|
+
"name": "api_openai_chat",
|
|
2498
|
+
"title": "OpenAI: Chat",
|
|
2499
|
+
"description": "Interactuar con los modelos de lenguaje avanzados de OpenAI para conversaciones inteligentes.",
|
|
2500
|
+
"mediaType": "image",
|
|
2501
|
+
"mediaSubtype": "webp",
|
|
2502
|
+
"tags": ["LLM", "API"],
|
|
2503
|
+
"models": ["OpenAI"],
|
|
2504
|
+
"date": "2025-03-01",
|
|
2505
|
+
"tutorialUrl": "",
|
|
2506
|
+
"OpenSource": false,
|
|
2507
|
+
"size": 0,
|
|
2508
|
+
"vram": 0
|
|
2509
|
+
},
|
|
2510
|
+
{
|
|
2511
|
+
"name": "api_google_gemini",
|
|
2512
|
+
"title": "Google Gemini: Chat",
|
|
2513
|
+
"description": "Experimentar la IA multimodal de Google con las capacidades de razonamiento de Gemini.",
|
|
2514
|
+
"mediaType": "image",
|
|
2515
|
+
"mediaSubtype": "webp",
|
|
2516
|
+
"tags": ["LLM", "API"],
|
|
2517
|
+
"models": ["Google Gemini", "Google"],
|
|
2518
|
+
"date": "2025-03-01",
|
|
2519
|
+
"tutorialUrl": "",
|
|
2520
|
+
"OpenSource": false,
|
|
2521
|
+
"size": 0,
|
|
2522
|
+
"vram": 0
|
|
2523
|
+
}
|
|
2524
|
+
]
|
|
2525
|
+
}
|
|
2526
|
+
]
|