comfyui-workflow-templates-media-other 0.3.10__py3-none-any.whl → 0.3.61__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comfyui_workflow_templates_media_other/templates/04_hunyuan_3d_2.1_subgraphed.json +6 -6
- comfyui_workflow_templates_media_other/templates/05_audio_ace_step_1_t2a_song_subgraphed.json +81 -60
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d-v2.1.json +2 -2
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model.json +3 -3
- comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo.json +3 -3
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_m2m_editing.json +3 -3
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_instrumentals.json +4 -4
- comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_song.json +3 -3
- comfyui_workflow_templates_media_other/templates/audio_stable_audio_example.json +2 -2
- comfyui_workflow_templates_media_other/templates/gsc_starter_1-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_1.json +839 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_2-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_2.json +7037 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_3-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gsc_starter_3.json +2550 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_full.json +3 -3
- comfyui_workflow_templates_media_other/templates/hidream_i1_dev.json +3 -3
- comfyui_workflow_templates_media_other/templates/hidream_i1_fast.json +3 -3
- comfyui_workflow_templates_media_other/templates/hidream_i1_full.json +3 -3
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo.json +756 -0
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo_fun_union_controlnet-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/image_z_image_turbo_fun_union_controlnet-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/index.ar.json +2187 -1591
- comfyui_workflow_templates_media_other/templates/index.es.json +2189 -1598
- comfyui_workflow_templates_media_other/templates/index.fr.json +2188 -1597
- comfyui_workflow_templates_media_other/templates/index.ja.json +2179 -1588
- comfyui_workflow_templates_media_other/templates/index.json +2182 -1592
- comfyui_workflow_templates_media_other/templates/index.ko.json +2179 -1588
- comfyui_workflow_templates_media_other/templates/index.pt-BR.json +3117 -0
- comfyui_workflow_templates_media_other/templates/index.ru.json +2188 -1597
- comfyui_workflow_templates_media_other/templates/index.schema.json +36 -3
- comfyui_workflow_templates_media_other/templates/index.tr.json +2185 -1589
- comfyui_workflow_templates_media_other/templates/index.zh-TW.json +2188 -1597
- comfyui_workflow_templates_media_other/templates/index.zh.json +2180 -1589
- comfyui_workflow_templates_media_other/templates/sd3.5_large_blur.json +3 -3
- comfyui_workflow_templates_media_other/templates/sd3.5_large_depth.json +4 -4
- comfyui_workflow_templates_media_other/templates/sd3.5_simple_example.json +181 -40
- comfyui_workflow_templates_media_other/templates/templates-color_illustration-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/templates-color_illustration-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/templates-color_illustration.json +176 -0
- comfyui_workflow_templates_media_other/templates/templates-image_to_real-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/templates-image_to_real-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/templates-image_to_real.json +1195 -0
- comfyui_workflow_templates_media_other/templates/wan2.1_flf2v_720_f16.json +2 -2
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_control.json +2 -2
- comfyui_workflow_templates_media_other/templates/wan2.1_fun_inp.json +2 -2
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.61.dist-info}/METADATA +1 -1
- comfyui_workflow_templates_media_other-0.3.61.dist-info/RECORD +77 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/2_pass_pose_worship.json +0 -1256
- comfyui_workflow_templates_media_other/templates/ByteDance-Seedance_00003_.json +0 -210
- comfyui_workflow_templates_media_other/templates/area_composition-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/area_composition.json +0 -1626
- comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject.json +0 -1114
- comfyui_workflow_templates_media_other/templates/default-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/default.json +0 -547
- comfyui_workflow_templates_media_other/templates/embedding_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/embedding_example.json +0 -267
- comfyui_workflow_templates_media_other/templates/esrgan_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/esrgan_example.json +0 -635
- comfyui_workflow_templates_media_other/templates/gligen_textbox_example-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/gligen_textbox_example.json +0 -686
- comfyui_workflow_templates_media_other/templates/hidream_e1_1-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hidream_e1_1.json +0 -1133
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow.json +0 -1029
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-2.webp +0 -0
- comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow.json +0 -772
- comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model.json +0 -929
- comfyui_workflow_templates_media_other/templates/lora-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/lora.json +0 -615
- comfyui_workflow_templates_media_other/templates/lora_multiple-1.webp +0 -0
- comfyui_workflow_templates_media_other/templates/lora_multiple.json +0 -656
- comfyui_workflow_templates_media_other-0.3.10.dist-info/RECORD +0 -92
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.61.dist-info}/WHEEL +0 -0
- {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.61.dist-info}/top_level.txt +0 -0
|
@@ -2,337 +2,470 @@
|
|
|
2
2
|
{
|
|
3
3
|
"moduleName": "default",
|
|
4
4
|
"type": "image",
|
|
5
|
-
"
|
|
6
|
-
"
|
|
5
|
+
"category": "Tipo de generación",
|
|
6
|
+
"icon": "icon-[lucide--star]",
|
|
7
|
+
"title": "Casos de uso",
|
|
7
8
|
"templates": [
|
|
8
9
|
{
|
|
9
|
-
"name": "
|
|
10
|
-
"title": "
|
|
10
|
+
"name": "templates-color_illustration",
|
|
11
|
+
"title": "Colorear ilustración lineal",
|
|
11
12
|
"mediaType": "image",
|
|
12
13
|
"mediaSubtype": "webp",
|
|
13
|
-
"
|
|
14
|
-
"
|
|
15
|
-
"tags": ["
|
|
16
|
-
"models": ["
|
|
17
|
-
"
|
|
18
|
-
"
|
|
14
|
+
"thumbnailVariant": "compareSlider",
|
|
15
|
+
"description": "Introduce un dibujo lineal en blanco y negro para colorearlo.",
|
|
16
|
+
"tags": ["API"],
|
|
17
|
+
"models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
18
|
+
"openSource": false,
|
|
19
|
+
"date": "2025-12-20",
|
|
20
|
+
"size": 0,
|
|
21
|
+
"vram": 0,
|
|
22
|
+
"searchRank": 8
|
|
19
23
|
},
|
|
20
24
|
{
|
|
21
|
-
"name": "
|
|
22
|
-
"title": "
|
|
25
|
+
"name": "templates-image_to_real",
|
|
26
|
+
"title": "Ilustración a realismo",
|
|
23
27
|
"mediaType": "image",
|
|
24
28
|
"mediaSubtype": "webp",
|
|
25
|
-
"
|
|
26
|
-
"
|
|
27
|
-
"tags": ["
|
|
28
|
-
"models": ["Qwen-Image"],
|
|
29
|
-
"date": "2025-
|
|
30
|
-
"size":
|
|
29
|
+
"thumbnailVariant": "compareSlider",
|
|
30
|
+
"description": "Introduce una ilustración y genera una versión hiperrealista con Qwen Image Edit 2509.",
|
|
31
|
+
"tags": ["Transferencia de estilo"],
|
|
32
|
+
"models": ["Qwen-Image-Edit"],
|
|
33
|
+
"date": "2025-12-20",
|
|
34
|
+
"size": 0,
|
|
35
|
+
"vram": 0
|
|
31
36
|
},
|
|
32
37
|
{
|
|
33
|
-
"name": "
|
|
34
|
-
"title": "
|
|
35
|
-
"description": "Genera videos a partir de una imagen usando Wan2.2 14B",
|
|
38
|
+
"name": "templates-8x8_grid-pfp",
|
|
39
|
+
"title": "Variaciones estilizadas de foto de perfil",
|
|
36
40
|
"mediaType": "image",
|
|
37
41
|
"mediaSubtype": "webp",
|
|
38
|
-
"
|
|
39
|
-
"tags": ["
|
|
40
|
-
"models": ["
|
|
41
|
-
"
|
|
42
|
-
"
|
|
42
|
+
"description": "Sube tu foto de perfil, elige un tema y genera 64 variaciones.",
|
|
43
|
+
"tags": ["API"],
|
|
44
|
+
"models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
45
|
+
"openSource": false,
|
|
46
|
+
"date": "2025-12-18",
|
|
47
|
+
"size": 0,
|
|
48
|
+
"vram": 0,
|
|
49
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
50
|
+
"usage": 51,
|
|
51
|
+
"searchRank": 8
|
|
43
52
|
},
|
|
44
53
|
{
|
|
45
|
-
"name": "
|
|
46
|
-
"title": "
|
|
54
|
+
"name": "templates-subject_product_swap",
|
|
55
|
+
"title": "Cambiar producto en mano (estilo UGC)",
|
|
47
56
|
"mediaType": "image",
|
|
48
57
|
"mediaSubtype": "webp",
|
|
49
|
-
"description": "
|
|
50
|
-
"tags": ["
|
|
51
|
-
"models": ["
|
|
52
|
-
"
|
|
53
|
-
"
|
|
54
|
-
"size":
|
|
58
|
+
"description": "Sube una foto de una persona sosteniendo un producto y la de tu marca. Genera una imagen con los productos intercambiados.",
|
|
59
|
+
"tags": ["Producto", "Replacement", "API"],
|
|
60
|
+
"models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
61
|
+
"openSource": false,
|
|
62
|
+
"date": "2025-12-18",
|
|
63
|
+
"size": 0,
|
|
64
|
+
"vram": 0,
|
|
65
|
+
"usage": 63,
|
|
66
|
+
"searchRank": 8
|
|
55
67
|
},
|
|
56
68
|
{
|
|
57
|
-
"name": "
|
|
58
|
-
"title": "
|
|
69
|
+
"name": "templates-subject_holding_product",
|
|
70
|
+
"title": "Modelo sosteniendo el producto",
|
|
59
71
|
"mediaType": "image",
|
|
60
72
|
"mediaSubtype": "webp",
|
|
61
|
-
"description": "
|
|
62
|
-
"tags": ["
|
|
63
|
-
"models": ["
|
|
64
|
-
"
|
|
65
|
-
"
|
|
66
|
-
"size":
|
|
73
|
+
"description": "Sube una foto de tu personaje y tu producto. Genera una imagen del personaje sosteniendo el producto.",
|
|
74
|
+
"tags": ["Producto", "Portrait", "API"],
|
|
75
|
+
"models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
76
|
+
"openSource": false,
|
|
77
|
+
"date": "2025-12-18",
|
|
78
|
+
"size": 0,
|
|
79
|
+
"vram": 0,
|
|
80
|
+
"usage": 43,
|
|
81
|
+
"searchRank": 8
|
|
67
82
|
},
|
|
68
83
|
{
|
|
69
|
-
"name": "
|
|
70
|
-
"title": "
|
|
84
|
+
"name": "templates-car_product",
|
|
85
|
+
"title": "1 imagen a video de coche",
|
|
71
86
|
"mediaType": "image",
|
|
72
87
|
"mediaSubtype": "webp",
|
|
73
|
-
"description": "
|
|
74
|
-
"
|
|
75
|
-
"
|
|
76
|
-
"
|
|
77
|
-
"date": "2025-
|
|
78
|
-
"size":
|
|
79
|
-
"vram":
|
|
88
|
+
"description": "Sube una foto de tu vehículo y genera un video profesional con varios ángulos.",
|
|
89
|
+
"tags": ["Producto", "Imagen a video", "API", "FLF2V"],
|
|
90
|
+
"models": ["Seedream", "Kling"],
|
|
91
|
+
"openSource": false,
|
|
92
|
+
"date": "2025-12-18",
|
|
93
|
+
"size": 0,
|
|
94
|
+
"vram": 0,
|
|
95
|
+
"requiresCustomNodes": ["comfyui-videohelpersuite"],
|
|
96
|
+
"usage": 70,
|
|
97
|
+
"searchRank": 8
|
|
80
98
|
},
|
|
81
99
|
{
|
|
82
|
-
"name": "
|
|
83
|
-
"title": "Imagen a
|
|
100
|
+
"name": "templates-photo_to_product_vid",
|
|
101
|
+
"title": "Imagen de producto a video: Zapato",
|
|
84
102
|
"mediaType": "image",
|
|
85
103
|
"mediaSubtype": "webp",
|
|
86
|
-
"description": "
|
|
87
|
-
"
|
|
88
|
-
"
|
|
89
|
-
"
|
|
90
|
-
"date": "2025-
|
|
91
|
-
"size":
|
|
92
|
-
"vram":
|
|
93
|
-
"
|
|
104
|
+
"description": "Toma una foto con tu móvil, súbela y genera un video profesional del producto.",
|
|
105
|
+
"tags": ["Producto", "Imagen a video", "API"],
|
|
106
|
+
"models": ["Seedream", "Hailuo"],
|
|
107
|
+
"openSource": false,
|
|
108
|
+
"date": "2025-12-18",
|
|
109
|
+
"size": 0,
|
|
110
|
+
"vram": 0,
|
|
111
|
+
"requiresCustomNodes": ["comfyui-videohelpersuite"],
|
|
112
|
+
"usage": 124,
|
|
113
|
+
"searchRank": 8
|
|
94
114
|
},
|
|
95
115
|
{
|
|
96
|
-
"name": "
|
|
97
|
-
"title": "
|
|
116
|
+
"name": "templates-stitched_vid_contact_sheet",
|
|
117
|
+
"title": "De personaje y accesorios a video de moda",
|
|
98
118
|
"mediaType": "image",
|
|
99
119
|
"mediaSubtype": "webp",
|
|
100
|
-
"description": "
|
|
101
|
-
"
|
|
102
|
-
"
|
|
103
|
-
"
|
|
104
|
-
"date": "2025-
|
|
105
|
-
"size":
|
|
106
|
-
"vram":
|
|
120
|
+
"description": "Sube personaje y ropa, genera foto y video en cuadrícula 8x.",
|
|
121
|
+
"tags": ["Fashion", "Imagen a video", "FLF2V", "API"],
|
|
122
|
+
"models": ["Google Gemini Image", "Nano Banana Pro", "Google", "Kling", "Kling O1", "OpenAI"],
|
|
123
|
+
"openSource": false,
|
|
124
|
+
"date": "2025-12-18",
|
|
125
|
+
"size": 0,
|
|
126
|
+
"vram": 0,
|
|
127
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
128
|
+
"usage": 78,
|
|
129
|
+
"searchRank": 8
|
|
107
130
|
},
|
|
108
131
|
{
|
|
109
|
-
"name": "
|
|
110
|
-
"title": "
|
|
132
|
+
"name": "templates-assemble_dieline",
|
|
133
|
+
"title": "Generar empaque de marca desde dieline",
|
|
111
134
|
"mediaType": "image",
|
|
112
135
|
"mediaSubtype": "webp",
|
|
113
|
-
"
|
|
114
|
-
"
|
|
115
|
-
"tags": ["
|
|
116
|
-
"models": ["
|
|
117
|
-
"
|
|
118
|
-
"
|
|
119
|
-
"
|
|
136
|
+
"thumbnailVariant": "hoverDissolve",
|
|
137
|
+
"description": "Sube el dieline de tu producto y ensámblalo en un paquete 3D.",
|
|
138
|
+
"tags": ["Producto", "Edición imagen"],
|
|
139
|
+
"models": ["Google Gemini Image", "Nano Banana Pro", "Google"],
|
|
140
|
+
"openSource": false,
|
|
141
|
+
"date": "2025-12-15",
|
|
142
|
+
"size": 0,
|
|
143
|
+
"vram": 0,
|
|
144
|
+
"usage": 12,
|
|
145
|
+
"searchRank": 8
|
|
120
146
|
},
|
|
121
147
|
{
|
|
122
|
-
"name": "
|
|
123
|
-
"title": "
|
|
148
|
+
"name": "templates-fashion_shoot_vton",
|
|
149
|
+
"title": "Personaje + ropa (OOTD) flat lay a sesión de estudio",
|
|
124
150
|
"mediaType": "image",
|
|
125
151
|
"mediaSubtype": "webp",
|
|
126
|
-
"description": "
|
|
127
|
-
"
|
|
128
|
-
"
|
|
129
|
-
"
|
|
130
|
-
"
|
|
131
|
-
"
|
|
132
|
-
"
|
|
133
|
-
"
|
|
152
|
+
"description": "Sube una imagen de tu personaje y otra de la ropa. Se generan 4 fotos editoriales; elige una para mejorarla y añadir detalles.",
|
|
153
|
+
"tags": ["Fashion", "Edición imagen"],
|
|
154
|
+
"models": ["Google Gemini Image", "Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
155
|
+
"openSource": false,
|
|
156
|
+
"date": "2025-12-15",
|
|
157
|
+
"size": 0,
|
|
158
|
+
"vram": 0,
|
|
159
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
160
|
+
"usage": 104,
|
|
161
|
+
"searchRank": 8
|
|
134
162
|
},
|
|
135
163
|
{
|
|
136
|
-
"name": "
|
|
137
|
-
"title": "
|
|
164
|
+
"name": "templates-fashion_shoot_prompt_doodle",
|
|
165
|
+
"title": "Selfie y texto a sesión de fotos con doodles",
|
|
138
166
|
"mediaType": "image",
|
|
139
167
|
"mediaSubtype": "webp",
|
|
140
|
-
"description": "
|
|
141
|
-
"
|
|
142
|
-
"
|
|
143
|
-
"
|
|
144
|
-
"
|
|
145
|
-
"
|
|
146
|
-
"
|
|
147
|
-
"
|
|
168
|
+
"description": "Sube un selfie y describe tu outfit. Genera 4 fotos de moda con doodles. Elige una para mejorar detalles.",
|
|
169
|
+
"tags": ["Fashion", "Edición imagen"],
|
|
170
|
+
"models": ["Google Gemini Image", "Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
|
|
171
|
+
"openSource": false,
|
|
172
|
+
"date": "2025-12-15",
|
|
173
|
+
"size": 0,
|
|
174
|
+
"vram": 0,
|
|
175
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
176
|
+
"usage": 20,
|
|
177
|
+
"searchRank": 8
|
|
148
178
|
},
|
|
149
179
|
{
|
|
150
|
-
"name": "
|
|
151
|
-
"title": "
|
|
180
|
+
"name": "templates-poster_product_integration",
|
|
181
|
+
"title": "Genera póster/anuncio con tu producto",
|
|
152
182
|
"mediaType": "image",
|
|
153
183
|
"mediaSubtype": "webp",
|
|
154
|
-
"
|
|
155
|
-
"
|
|
156
|
-
"tags": ["
|
|
157
|
-
"models": ["
|
|
158
|
-
"
|
|
159
|
-
"
|
|
160
|
-
"
|
|
184
|
+
"thumbnailVariant": "compareSlider",
|
|
185
|
+
"description": "Sube tu producto y un texto breve. Genera un póster y ajusta el diseño antes de insertar el producto.",
|
|
186
|
+
"tags": ["Producto", "Edición imagen"],
|
|
187
|
+
"models": ["ByteDance", "Seedream", "Google Gemini"],
|
|
188
|
+
"openSource": false,
|
|
189
|
+
"date": "2025-12-15",
|
|
190
|
+
"size": 0,
|
|
191
|
+
"vram": 0,
|
|
192
|
+
"requiresCustomNodes": ["comfyui_essentials"],
|
|
193
|
+
"usage": 37,
|
|
194
|
+
"searchRank": 8
|
|
161
195
|
},
|
|
162
196
|
{
|
|
163
|
-
"name": "
|
|
164
|
-
"title": "
|
|
197
|
+
"name": "templates-3D_logo_texture_animation",
|
|
198
|
+
"title": "Animaciones de logotipo 3D dinámicas",
|
|
165
199
|
"mediaType": "image",
|
|
166
200
|
"mediaSubtype": "webp",
|
|
167
|
-
"description": "
|
|
168
|
-
"
|
|
169
|
-
"
|
|
170
|
-
"
|
|
171
|
-
"date": "2025-
|
|
172
|
-
"size":
|
|
173
|
-
"vram":
|
|
201
|
+
"description": "Sube el logo en vector y pide una textura. Se generan los primeros y últimos fotogramas 3D y la animación completa.",
|
|
202
|
+
"tags": ["Diseño de marca", "FLF2V"],
|
|
203
|
+
"models": ["ByteDance", "Seedream", "Google Gemini", "Nano Banana Pro"],
|
|
204
|
+
"openSource": false,
|
|
205
|
+
"date": "2025-12-15",
|
|
206
|
+
"size": 0,
|
|
207
|
+
"vram": 0,
|
|
208
|
+
"usage": 42,
|
|
209
|
+
"searchRank": 8
|
|
174
210
|
},
|
|
175
211
|
{
|
|
176
|
-
"name": "
|
|
177
|
-
"title": "
|
|
212
|
+
"name": "templates-product_scene_relight",
|
|
213
|
+
"title": "Fusión de producto y escena con reiluminación",
|
|
178
214
|
"mediaType": "image",
|
|
179
215
|
"mediaSubtype": "webp",
|
|
180
|
-
"
|
|
181
|
-
"
|
|
182
|
-
"
|
|
183
|
-
"
|
|
184
|
-
"
|
|
185
|
-
"
|
|
186
|
-
"
|
|
216
|
+
"thumbnailVariant": "compareSlider",
|
|
217
|
+
"description": "Sube imágenes de tu producto y fondo, combínalas y ajusta la luz con Seedream 4.5.",
|
|
218
|
+
"tags": ["Producto", "Edición imagen", "Reiluminar"],
|
|
219
|
+
"models": ["ByteDance", "Seedream"],
|
|
220
|
+
"openSource": false,
|
|
221
|
+
"date": "2025-12-15",
|
|
222
|
+
"size": 0,
|
|
223
|
+
"vram": 0,
|
|
224
|
+
"usage": 11,
|
|
225
|
+
"searchRank": 8
|
|
187
226
|
},
|
|
188
227
|
{
|
|
189
|
-
"name": "
|
|
190
|
-
"title": "
|
|
228
|
+
"name": "templates-textured_logo_elements",
|
|
229
|
+
"title": "Añadir textura y elementos a logo",
|
|
191
230
|
"mediaType": "image",
|
|
192
231
|
"mediaSubtype": "webp",
|
|
193
|
-
"description": "
|
|
194
|
-
"tags": ["
|
|
195
|
-
"models": ["
|
|
196
|
-
"date": "2025-
|
|
197
|
-
"
|
|
198
|
-
"size":
|
|
199
|
-
"vram":
|
|
232
|
+
"description": "Sube tu logo, textura y elementos. Genera un video del logo texturizado como recurso corporativo.",
|
|
233
|
+
"tags": ["Diseño de marca", "Imagen a video"],
|
|
234
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google", "ByteDance", "Seedance"],
|
|
235
|
+
"date": "2025-12-11",
|
|
236
|
+
"openSource": false,
|
|
237
|
+
"size": 0,
|
|
238
|
+
"vram": 0,
|
|
239
|
+
"usage": 255,
|
|
240
|
+
"searchRank": 8
|
|
200
241
|
},
|
|
201
242
|
{
|
|
202
|
-
"name": "
|
|
203
|
-
"title": "
|
|
243
|
+
"name": "templates-qwen_image_edit-crop_and_stitch-fusion",
|
|
244
|
+
"title": "Reiluminar producto compuesto",
|
|
204
245
|
"mediaType": "image",
|
|
205
246
|
"mediaSubtype": "webp",
|
|
206
|
-
"description": "
|
|
247
|
+
"description": "Sube una imagen compuesta de tu producto, dibuja una máscara en el editor de máscaras y vuelve a iluminar tu producto en la escena.",
|
|
248
|
+
"tags": ["Edición imagen", "Reiluminar"],
|
|
249
|
+
"models": ["Qwen-Image-Edit"],
|
|
250
|
+
"date": "2025-12-11",
|
|
251
|
+
"size": 0,
|
|
207
252
|
"thumbnailVariant": "compareSlider",
|
|
208
|
-
"
|
|
209
|
-
"
|
|
210
|
-
"
|
|
211
|
-
"
|
|
212
|
-
"size": 2136746230,
|
|
213
|
-
"vram": 3929895076
|
|
253
|
+
"vram": 0,
|
|
254
|
+
"requiresCustomNodes": ["comfyui-inpaint-cropandstitch"],
|
|
255
|
+
"usage": 361,
|
|
256
|
+
"searchRank": 8
|
|
214
257
|
},
|
|
215
258
|
{
|
|
216
|
-
"name": "
|
|
217
|
-
"title": "
|
|
259
|
+
"name": "templates-textured_logotype-v2.1",
|
|
260
|
+
"title": "Aplicar textura al logo",
|
|
218
261
|
"mediaType": "image",
|
|
219
262
|
"mediaSubtype": "webp",
|
|
220
|
-
"description": "
|
|
221
|
-
"
|
|
222
|
-
"
|
|
223
|
-
"
|
|
224
|
-
"
|
|
225
|
-
"
|
|
226
|
-
"
|
|
227
|
-
"
|
|
263
|
+
"description": "Sube tu logo y aplica textura y elementos para crear un recurso de marca",
|
|
264
|
+
"tags": ["Diseño de marca", "Imagen a video", "FLF2V"],
|
|
265
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google", "ByteDance", "Seedance"],
|
|
266
|
+
"date": "2025-12-03",
|
|
267
|
+
"openSource": false,
|
|
268
|
+
"size": 0,
|
|
269
|
+
"vram": 0,
|
|
270
|
+
"usage": 299,
|
|
271
|
+
"searchRank": 8
|
|
228
272
|
},
|
|
229
273
|
{
|
|
230
|
-
"name": "
|
|
231
|
-
"title": "
|
|
274
|
+
"name": "templates-product_ad-v2.0",
|
|
275
|
+
"title": "Intercambio de producto en anuncio",
|
|
232
276
|
"mediaType": "image",
|
|
233
277
|
"mediaSubtype": "webp",
|
|
234
|
-
"description": "
|
|
235
|
-
"
|
|
236
|
-
"
|
|
237
|
-
"
|
|
238
|
-
"
|
|
239
|
-
"
|
|
240
|
-
"
|
|
241
|
-
"
|
|
278
|
+
"description": "Crea anuncios estáticos para tu producto al estilo de un anuncio de referencia",
|
|
279
|
+
"tags": ["Referencia de estilo"],
|
|
280
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google", "ByteDance", "Seedance"],
|
|
281
|
+
"date": "2025-12-03",
|
|
282
|
+
"openSource": false,
|
|
283
|
+
"size": 0,
|
|
284
|
+
"vram": 0,
|
|
285
|
+
"usage": 222,
|
|
286
|
+
"searchRank": 8
|
|
242
287
|
},
|
|
243
288
|
{
|
|
244
|
-
"name": "
|
|
245
|
-
"title": "
|
|
289
|
+
"name": "templates-6-key-frames",
|
|
290
|
+
"title": "Video continuo por fotogramas clave",
|
|
246
291
|
"mediaType": "image",
|
|
247
292
|
"mediaSubtype": "webp",
|
|
248
|
-
"description": "
|
|
249
|
-
"
|
|
250
|
-
"
|
|
251
|
-
"
|
|
252
|
-
"
|
|
253
|
-
"
|
|
254
|
-
"
|
|
255
|
-
"
|
|
293
|
+
"description": "Video suave uniendo 6 fotogramas clave seguidos",
|
|
294
|
+
"tags": ["Imagen a video", "FLF2V"],
|
|
295
|
+
"models": ["Wan2.2"],
|
|
296
|
+
"date": "2025-12-03",
|
|
297
|
+
"size": 0,
|
|
298
|
+
"vram": 0,
|
|
299
|
+
"usage": 1972,
|
|
300
|
+
"searchRank": 8
|
|
256
301
|
},
|
|
257
302
|
{
|
|
258
|
-
"name": "
|
|
259
|
-
"title": "
|
|
303
|
+
"name": "templates-9grid_social_media-v2.0",
|
|
304
|
+
"title": "Generador de anuncio 3x3",
|
|
260
305
|
"mediaType": "image",
|
|
261
306
|
"mediaSubtype": "webp",
|
|
262
|
-
"description": "
|
|
263
|
-
"
|
|
264
|
-
"
|
|
265
|
-
"
|
|
266
|
-
"
|
|
267
|
-
"
|
|
268
|
-
"
|
|
269
|
-
"
|
|
307
|
+
"description": "Sube tu producto e introduce un mensaje corto para cada posición en la cuadrícula 3x3. Se generarán 9 imágenes únicas a partir de tus mensajes. A continuación, selecciona las que más te gusten y amplíalas a 4K usando tu producto original como referencia.",
|
|
308
|
+
"tags": ["Edición imagen", "Imagen"],
|
|
309
|
+
"models": ["Nano Banana Pro", "Google"],
|
|
310
|
+
"date": "2025-12-06",
|
|
311
|
+
"size": 0,
|
|
312
|
+
"vram": 0,
|
|
313
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
314
|
+
"usage": 466,
|
|
315
|
+
"searchRank": 8
|
|
270
316
|
},
|
|
271
317
|
{
|
|
272
|
-
"name": "
|
|
273
|
-
"title": "
|
|
318
|
+
"name": "templates-poster_to_2x2_mockups-v2.0",
|
|
319
|
+
"title": "Mockups de escenas de póster",
|
|
274
320
|
"mediaType": "image",
|
|
275
321
|
"mediaSubtype": "webp",
|
|
276
|
-
"description": "
|
|
277
|
-
"
|
|
278
|
-
"
|
|
279
|
-
"
|
|
280
|
-
"
|
|
281
|
-
"
|
|
282
|
-
"
|
|
283
|
-
"vram":
|
|
322
|
+
"description": "Sube un diseño de póster o anuncio y, añadiendo una breve información sobre tu marca, genera 4 bocetos en diferentes escenarios.",
|
|
323
|
+
"tags": ["Edición imagen", "Maqueta"],
|
|
324
|
+
"models": ["Nano Banana Pro", "Google"],
|
|
325
|
+
"date": "2025-12-06",
|
|
326
|
+
"openSource": false,
|
|
327
|
+
"size": 0,
|
|
328
|
+
"requiresCustomNodes": ["comfyui_essentials"],
|
|
329
|
+
"vram": 0,
|
|
330
|
+
"usage": 61,
|
|
331
|
+
"searchRank": 8
|
|
284
332
|
},
|
|
285
333
|
{
|
|
286
|
-
"name": "
|
|
287
|
-
"title": "
|
|
334
|
+
"name": "template-multistyle-magazine-cover-nanobananapro",
|
|
335
|
+
"title": "Portada de revista y diseño de embalaje",
|
|
288
336
|
"mediaType": "image",
|
|
289
337
|
"mediaSubtype": "webp",
|
|
290
|
-
"description": "
|
|
291
|
-
"
|
|
292
|
-
"
|
|
293
|
-
"
|
|
294
|
-
"
|
|
295
|
-
"
|
|
296
|
-
"
|
|
297
|
-
"
|
|
338
|
+
"description": "Diseña la disposición del texto para la portada de tu revista y explora opciones de empaquetado.",
|
|
339
|
+
"tags": ["Edición imagen", "Maqueta", "Maquetación"],
|
|
340
|
+
"models": ["Nano Banana Pro", "Google"],
|
|
341
|
+
"date": "2025-12-06",
|
|
342
|
+
"openSource": false,
|
|
343
|
+
"size": 0,
|
|
344
|
+
"vram": 0,
|
|
345
|
+
"usage": 87,
|
|
346
|
+
"searchRank": 8
|
|
298
347
|
},
|
|
299
348
|
{
|
|
300
|
-
"name": "
|
|
301
|
-
"title": "
|
|
349
|
+
"name": "templates-1_click_multiple_scene_angles-v1.0",
|
|
350
|
+
"title": "Escena multiángulo 1 clic",
|
|
302
351
|
"mediaType": "image",
|
|
303
352
|
"mediaSubtype": "webp",
|
|
304
|
-
"description": "
|
|
305
|
-
"
|
|
306
|
-
"
|
|
307
|
-
"
|
|
308
|
-
"
|
|
309
|
-
"
|
|
310
|
-
"
|
|
311
|
-
"
|
|
353
|
+
"description": "Sube tu escena y genera varias vistas con un clic.",
|
|
354
|
+
"tags": ["Image Eidt"],
|
|
355
|
+
"models": ["Qwen-Image-Edit"],
|
|
356
|
+
"date": "2025-12-08",
|
|
357
|
+
"size": 31198642438,
|
|
358
|
+
"vram": 31198642438,
|
|
359
|
+
"usage": 1508,
|
|
360
|
+
"searchRank": 8
|
|
312
361
|
},
|
|
313
362
|
{
|
|
314
|
-
"name": "
|
|
315
|
-
"title": "
|
|
363
|
+
"name": "templates-1_click_multiple_character_angles-v1.0",
|
|
364
|
+
"title": "Múltiples ángulos personaje",
|
|
316
365
|
"mediaType": "image",
|
|
317
366
|
"mediaSubtype": "webp",
|
|
318
|
-
"description": "
|
|
319
|
-
"
|
|
320
|
-
"
|
|
321
|
-
"
|
|
322
|
-
"
|
|
323
|
-
"
|
|
324
|
-
"
|
|
325
|
-
"
|
|
367
|
+
"description": "Sube tu personaje y obtén varias vistas",
|
|
368
|
+
"tags": ["Image Eidt"],
|
|
369
|
+
"models": ["Qwen-Image-Edit"],
|
|
370
|
+
"date": "2025-12-08",
|
|
371
|
+
"size": 31198642438,
|
|
372
|
+
"vram": 31198642438,
|
|
373
|
+
"usage": 3637,
|
|
374
|
+
"searchRank": 8
|
|
375
|
+
},
|
|
376
|
+
{
|
|
377
|
+
"name": "template-Animation_Trajectory_Control_Wan_ATI",
|
|
378
|
+
"title": "Control de trayectoria de animación",
|
|
379
|
+
"mediaType": "image",
|
|
380
|
+
"mediaSubtype": "webp",
|
|
381
|
+
"description": "Dibuja una trayectoria de movimiento para animar la imagen a lo largo de ella.",
|
|
382
|
+
"tags": ["Imagen a video"],
|
|
383
|
+
"models": ["Wan2.1"],
|
|
384
|
+
"date": "2025-12-11",
|
|
385
|
+
"size": 31604570534,
|
|
386
|
+
"requiresCustomNodes": ["ComfyUI-WanVideoWrapper", "comfyui_fill-nodes"],
|
|
387
|
+
"vram": 31604570534,
|
|
388
|
+
"usage": 449,
|
|
389
|
+
"searchRank": 8
|
|
326
390
|
}
|
|
327
391
|
]
|
|
328
392
|
},
|
|
329
393
|
{
|
|
330
394
|
"moduleName": "default",
|
|
331
395
|
"type": "image",
|
|
332
|
-
"category": "
|
|
396
|
+
"category": "Tipo de generación",
|
|
333
397
|
"icon": "icon-[lucide--image]",
|
|
334
|
-
"title": "
|
|
398
|
+
"title": "Imagen",
|
|
335
399
|
"templates": [
|
|
400
|
+
{
|
|
401
|
+
"name": "image_z_image_turbo",
|
|
402
|
+
"title": "Z-Image-Turbo texto a imagen",
|
|
403
|
+
"mediaType": "image",
|
|
404
|
+
"mediaSubtype": "webp",
|
|
405
|
+
"description": "Modelo fundacional eficiente de generación de imágenes con transformador de difusión de flujo único, compatible con inglés y chino.",
|
|
406
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
407
|
+
"models": ["Z-Image-Turbo"],
|
|
408
|
+
"date": "2025-11-27",
|
|
409
|
+
"size": 20862803640,
|
|
410
|
+
"vram": 20862803640,
|
|
411
|
+
"usage": 27801
|
|
412
|
+
},
|
|
413
|
+
{
|
|
414
|
+
"name": "image_z_image_turbo_fun_union_controlnet",
|
|
415
|
+
"title": "Z-Image-Turbo control a imagen",
|
|
416
|
+
"mediaType": "image",
|
|
417
|
+
"mediaSubtype": "webp",
|
|
418
|
+
"description": "ControlNet para Z-Image-Turbo compatible con distintos tipos de control como Canny, HED, profundidad, pose y MLSD.",
|
|
419
|
+
"tags": ["Imagen", "ControlNet"],
|
|
420
|
+
"models": ["Z-Image-Turbo"],
|
|
421
|
+
"date": "2025-12-02",
|
|
422
|
+
"size": 23794118820,
|
|
423
|
+
"thumbnailVariant": "compareSlider",
|
|
424
|
+
"vram": 23794118820,
|
|
425
|
+
"usage": 3859
|
|
426
|
+
},
|
|
427
|
+
{
|
|
428
|
+
"name": "image_qwen_image_edit_2511",
|
|
429
|
+
"title": "Qwen Image Edit 2511 - Reemplazo de Materiales",
|
|
430
|
+
"mediaType": "image",
|
|
431
|
+
"mediaSubtype": "webp",
|
|
432
|
+
"thumbnailVariant": "compareSlider",
|
|
433
|
+
"description": "Combina imágenes de referencia para reemplazar materiales en objetos (por ejemplo, muebles) con Qwen-Image-Edit-2511.",
|
|
434
|
+
"tags": ["Edición imagen"],
|
|
435
|
+
"models": ["Qwen-Image-Edit"],
|
|
436
|
+
"date": "2025-12-23",
|
|
437
|
+
"size": 51367808860,
|
|
438
|
+
"vram": 51367808860
|
|
439
|
+
},
|
|
440
|
+
{
|
|
441
|
+
"name": "image_qwen_image_edit_2509",
|
|
442
|
+
"title": "Qwen Image Edit 2509",
|
|
443
|
+
"mediaType": "image",
|
|
444
|
+
"mediaSubtype": "webp",
|
|
445
|
+
"thumbnailVariant": "compareSlider",
|
|
446
|
+
"description": "Edición avanzada de imágenes con soporte multi-imagen, consistencia mejorada e integración de ControlNet.",
|
|
447
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
448
|
+
"tags": ["Imagen a imagen", "Edición imagen", "ControlNet"],
|
|
449
|
+
"models": ["Qwen-Image"],
|
|
450
|
+
"date": "2025-09-25",
|
|
451
|
+
"size": 31772020572,
|
|
452
|
+
"vram": 31772020572,
|
|
453
|
+
"usage": 9323
|
|
454
|
+
},
|
|
455
|
+
{
|
|
456
|
+
"name": "image_qwen_image_edit_2509_relight",
|
|
457
|
+
"title": "Reluz de foto",
|
|
458
|
+
"mediaType": "image",
|
|
459
|
+
"mediaSubtype": "webp",
|
|
460
|
+
"thumbnailVariant": "compareSlider",
|
|
461
|
+
"description": "Reluz fotos usando Qwen-Image-Edit y LoRA.",
|
|
462
|
+
"tags": ["Edición imagen", "Reiluminar"],
|
|
463
|
+
"models": ["Qwen-Image-Edit"],
|
|
464
|
+
"date": "2025-12-15",
|
|
465
|
+
"size": 31772020572,
|
|
466
|
+
"vram": 31772020572,
|
|
467
|
+
"usage": 192
|
|
468
|
+
},
|
|
336
469
|
{
|
|
337
470
|
"name": "image_flux2",
|
|
338
471
|
"title": "Flux.2 Dev",
|
|
@@ -340,11 +473,25 @@
|
|
|
340
473
|
"mediaSubtype": "webp",
|
|
341
474
|
"thumbnailVariant": "compareSlider",
|
|
342
475
|
"description": "Genera imágenes fotorrealistas con coherencia multi-referencia y renderizado profesional de texto.",
|
|
343
|
-
"tags": ["Texto a imagen", "Imagen", "Edición
|
|
344
|
-
"models": ["Flux.2 Dev", "BFL"],
|
|
476
|
+
"tags": ["Texto a imagen", "Imagen", "Edición imagen"],
|
|
477
|
+
"models": ["Flux.2 Dev", "BFL", "Flux"],
|
|
478
|
+
"date": "2025-11-26",
|
|
479
|
+
"size": 71781788416,
|
|
480
|
+
"vram": 71781788416,
|
|
481
|
+
"usage": 9538
|
|
482
|
+
},
|
|
483
|
+
{
|
|
484
|
+
"name": "image_flux2_text_to_image",
|
|
485
|
+
"title": "Flux.2 Dev texto a imagen",
|
|
486
|
+
"mediaType": "image",
|
|
487
|
+
"mediaSubtype": "webp",
|
|
488
|
+
"description": "Texto a imagen con iluminación mejorada, materiales y detalles realistas.",
|
|
489
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
490
|
+
"models": ["Flux.2 Dev", "BFL", "Flux"],
|
|
345
491
|
"date": "2025-11-26",
|
|
346
492
|
"size": 71382356459,
|
|
347
|
-
"vram":
|
|
493
|
+
"vram": 71382356459,
|
|
494
|
+
"usage": 4002
|
|
348
495
|
},
|
|
349
496
|
{
|
|
350
497
|
"name": "image_flux2_fp8",
|
|
@@ -352,22 +499,24 @@
|
|
|
352
499
|
"mediaType": "image",
|
|
353
500
|
"mediaSubtype": "webp",
|
|
354
501
|
"description": "Crea maquetas de productos aplicando patrones de diseño a envases, tazas y otros productos usando consistencia multi-referencia.",
|
|
355
|
-
"tags": ["Texto a imagen", "Imagen", "Edición
|
|
356
|
-
"models": ["Flux.2 Dev", "BFL"],
|
|
502
|
+
"tags": ["Texto a imagen", "Imagen", "Edición imagen", "Maqueta", "Producto"],
|
|
503
|
+
"models": ["Flux.2 Dev", "BFL", "Flux"],
|
|
357
504
|
"date": "2025-11-26",
|
|
358
505
|
"size": 53837415055,
|
|
359
|
-
"vram":
|
|
506
|
+
"vram": 53837415055,
|
|
507
|
+
"usage": 436
|
|
360
508
|
},
|
|
361
509
|
{
|
|
362
|
-
"name": "
|
|
363
|
-
"title": "
|
|
510
|
+
"name": "image_qwen_image_layered",
|
|
511
|
+
"title": "Qwen-Image-Layered: Descomposición en capas",
|
|
364
512
|
"mediaType": "image",
|
|
365
513
|
"mediaSubtype": "webp",
|
|
366
|
-
"description": "
|
|
367
|
-
"tags": ["
|
|
368
|
-
"models": ["
|
|
369
|
-
"date": "2025-
|
|
370
|
-
"size":
|
|
514
|
+
"description": "Descompón en RGBA editable",
|
|
515
|
+
"tags": ["Layer Decompose"],
|
|
516
|
+
"models": ["Qwen-Image-Layered"],
|
|
517
|
+
"date": "2025-12-22",
|
|
518
|
+
"size": 50446538375,
|
|
519
|
+
"vram": 50446538375
|
|
371
520
|
},
|
|
372
521
|
{
|
|
373
522
|
"name": "image_qwen_image",
|
|
@@ -379,7 +528,9 @@
|
|
|
379
528
|
"tags": ["Texto a imagen", "Imagen"],
|
|
380
529
|
"models": ["Qwen-Image"],
|
|
381
530
|
"date": "2025-08-05",
|
|
382
|
-
"size": 31772020572
|
|
531
|
+
"size": 31772020572,
|
|
532
|
+
"vram": 31772020572,
|
|
533
|
+
"usage": 1143
|
|
383
534
|
},
|
|
384
535
|
{
|
|
385
536
|
"name": "image_qwen_image_instantx_controlnet",
|
|
@@ -391,7 +542,9 @@
|
|
|
391
542
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
392
543
|
"models": ["Qwen-Image"],
|
|
393
544
|
"date": "2025-08-23",
|
|
394
|
-
"size": 35304631173
|
|
545
|
+
"size": 35304631173,
|
|
546
|
+
"vram": 35304631173,
|
|
547
|
+
"usage": 472
|
|
395
548
|
},
|
|
396
549
|
{
|
|
397
550
|
"name": "image_qwen_image_instantx_inpainting_controlnet",
|
|
@@ -404,7 +557,9 @@
|
|
|
404
557
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
405
558
|
"models": ["Qwen-Image"],
|
|
406
559
|
"date": "2025-09-12",
|
|
407
|
-
"size": 36013300777
|
|
560
|
+
"size": 36013300777,
|
|
561
|
+
"vram": 36013300777,
|
|
562
|
+
"usage": 515
|
|
408
563
|
},
|
|
409
564
|
{
|
|
410
565
|
"name": "image_qwen_image_union_control_lora",
|
|
@@ -416,7 +571,10 @@
|
|
|
416
571
|
"models": ["Qwen-Image"],
|
|
417
572
|
"date": "2025-08-23",
|
|
418
573
|
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
|
|
419
|
-
"size": 32716913377
|
|
574
|
+
"size": 32716913377,
|
|
575
|
+
"thumbnailVariant": "compareSlider",
|
|
576
|
+
"vram": 32716913377,
|
|
577
|
+
"usage": 340
|
|
420
578
|
},
|
|
421
579
|
{
|
|
422
580
|
"name": "image_qwen_image_controlnet_patch",
|
|
@@ -429,1452 +587,1435 @@
|
|
|
429
587
|
"tags": ["Texto a imagen", "Imagen", "ControlNet"],
|
|
430
588
|
"models": ["Qwen-Image"],
|
|
431
589
|
"date": "2025-08-24",
|
|
432
|
-
"size": 34037615821
|
|
590
|
+
"size": 34037615821,
|
|
591
|
+
"vram": 34037615821,
|
|
592
|
+
"usage": 218
|
|
433
593
|
},
|
|
434
594
|
{
|
|
435
|
-
"name": "
|
|
436
|
-
"title": "
|
|
595
|
+
"name": "api_nano_banana_pro",
|
|
596
|
+
"title": "Nano Banana Pro",
|
|
597
|
+
"description": "Nano-banana Pro (Gemini 3.0 Pro Image) - Generación y edición de imágenes 4K de calidad de estudio con renderizado de texto mejorado y consistencia de personajes.",
|
|
437
598
|
"mediaType": "image",
|
|
438
599
|
"mediaSubtype": "webp",
|
|
439
|
-
"thumbnailVariant": "
|
|
440
|
-
"
|
|
441
|
-
"
|
|
442
|
-
"
|
|
443
|
-
"
|
|
444
|
-
"
|
|
445
|
-
"
|
|
600
|
+
"thumbnailVariant": "hoverDissolve",
|
|
601
|
+
"tags": ["Edición imagen", "Imagen", "API"],
|
|
602
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google"],
|
|
603
|
+
"date": "2025-11-21",
|
|
604
|
+
"openSource": false,
|
|
605
|
+
"size": 0,
|
|
606
|
+
"vram": 0,
|
|
607
|
+
"usage": 6749
|
|
446
608
|
},
|
|
447
609
|
{
|
|
448
|
-
"name": "
|
|
449
|
-
"title": "
|
|
610
|
+
"name": "api_from_photo_2_miniature",
|
|
611
|
+
"title": "Estilo de foto a modelo",
|
|
612
|
+
"description": "Transforma fotos reales de edificios en planos arquitectónicos y luego en modelos físicos en miniatura detallados. Una cadena completa de visualización arquitectónica desde la foto hasta la maqueta.",
|
|
450
613
|
"mediaType": "image",
|
|
451
614
|
"mediaSubtype": "webp",
|
|
452
|
-
"
|
|
453
|
-
"
|
|
454
|
-
"
|
|
455
|
-
"
|
|
456
|
-
"
|
|
457
|
-
"
|
|
458
|
-
"
|
|
615
|
+
"tags": ["Edición imagen", "Imagen", "3D"],
|
|
616
|
+
"models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google"],
|
|
617
|
+
"date": "2025-11-21",
|
|
618
|
+
"openSource": false,
|
|
619
|
+
"size": 0,
|
|
620
|
+
"vram": 0,
|
|
621
|
+
"usage": 288
|
|
459
622
|
},
|
|
460
623
|
{
|
|
461
|
-
"name": "
|
|
462
|
-
"title": "
|
|
624
|
+
"name": "api_openai_fashion_billboard_generator",
|
|
625
|
+
"title": "Generador de cartel publicitario de moda",
|
|
626
|
+
"description": "Convierte fotos de ropa en anuncios profesionales en centros comerciales con modelos realistas.",
|
|
463
627
|
"mediaType": "image",
|
|
464
628
|
"mediaSubtype": "webp",
|
|
465
|
-
"
|
|
466
|
-
"
|
|
467
|
-
"
|
|
468
|
-
"
|
|
469
|
-
"
|
|
470
|
-
"
|
|
629
|
+
"tags": ["Edición imagen", "Imagen", "API", "Fashion", "Maqueta"],
|
|
630
|
+
"models": ["GPT-Image-1.5", "OpenAI"],
|
|
631
|
+
"date": "2025-12-18",
|
|
632
|
+
"openSource": false,
|
|
633
|
+
"size": 0,
|
|
634
|
+
"vram": 0,
|
|
635
|
+
"usage": 50
|
|
471
636
|
},
|
|
472
637
|
{
|
|
473
|
-
"name": "
|
|
474
|
-
"title": "
|
|
638
|
+
"name": "api_bytedance_seedream4",
|
|
639
|
+
"title": "ByteDance Seedream 4.0",
|
|
640
|
+
"description": "Modelo de IA multimodal para texto a imagen y edición de imágenes. Genera imágenes 2K en menos de 2 segundos con control en lenguaje natural.",
|
|
475
641
|
"mediaType": "image",
|
|
476
642
|
"mediaSubtype": "webp",
|
|
477
|
-
"
|
|
478
|
-
"
|
|
479
|
-
"
|
|
480
|
-
"
|
|
481
|
-
"
|
|
482
|
-
"
|
|
483
|
-
"
|
|
484
|
-
"vram": 19327352832
|
|
643
|
+
"tags": ["Edición imagen", "Imagen", "API", "Texto a imagen"],
|
|
644
|
+
"models": ["Seedream 4.0", "ByteDance"],
|
|
645
|
+
"date": "2025-09-11",
|
|
646
|
+
"openSource": false,
|
|
647
|
+
"size": 0,
|
|
648
|
+
"vram": 0,
|
|
649
|
+
"usage": 2117
|
|
485
650
|
},
|
|
486
651
|
{
|
|
487
|
-
"name": "
|
|
488
|
-
"title": "
|
|
652
|
+
"name": "api_bfl_flux2_max_sofa_swap",
|
|
653
|
+
"title": "BFL FLUX.2 [max]: Cambio de objeto",
|
|
654
|
+
"description": "Reemplaza objetos en imágenes con FLUX.2 [max] de alta calidad. Perfecto para fotografía de productos y cambiar muebles manteniendo la coherencia.",
|
|
489
655
|
"mediaType": "image",
|
|
490
656
|
"mediaSubtype": "webp",
|
|
491
|
-
"
|
|
492
|
-
"
|
|
493
|
-
"
|
|
494
|
-
"
|
|
495
|
-
"
|
|
496
|
-
"
|
|
657
|
+
"tags": ["Edición imagen", "Imagen", "API"],
|
|
658
|
+
"models": ["Flux2", "Flux", "BFL"],
|
|
659
|
+
"date": "2025-12-22",
|
|
660
|
+
"searchRank": 7,
|
|
661
|
+
"openSource": false,
|
|
662
|
+
"size": 0,
|
|
663
|
+
"vram": 0,
|
|
664
|
+
"thumbnailVariant": "compareSlider"
|
|
497
665
|
},
|
|
498
666
|
{
|
|
499
|
-
"name": "
|
|
500
|
-
"title": "
|
|
667
|
+
"name": "api_google_gemini_image",
|
|
668
|
+
"title": "Google Gemini Imagen",
|
|
669
|
+
"description": "Nano-banana (Gemini-2.5-Flash Image) - edición de imágenes con consistencia.",
|
|
501
670
|
"mediaType": "image",
|
|
502
671
|
"mediaSubtype": "webp",
|
|
503
|
-
"
|
|
504
|
-
"
|
|
505
|
-
"
|
|
506
|
-
"
|
|
507
|
-
"size":
|
|
672
|
+
"tags": ["Edición imagen", "Imagen", "API", "Texto a imagen"],
|
|
673
|
+
"models": ["Gemini-2.5-Flash", "nano-banana", "Google"],
|
|
674
|
+
"date": "2025-08-27",
|
|
675
|
+
"openSource": false,
|
|
676
|
+
"size": 0,
|
|
677
|
+
"vram": 0,
|
|
678
|
+
"usage": 1657
|
|
508
679
|
},
|
|
509
680
|
{
|
|
510
|
-
"name": "
|
|
511
|
-
"title": "
|
|
681
|
+
"name": "api_flux2",
|
|
682
|
+
"title": "Flux.2 Pro",
|
|
683
|
+
"description": "Genera imágenes fotorrealistas con coherencia multirreferencia y renderizado profesional de texto.",
|
|
512
684
|
"mediaType": "image",
|
|
513
685
|
"mediaSubtype": "webp",
|
|
514
|
-
"
|
|
515
|
-
"
|
|
516
|
-
"
|
|
517
|
-
"
|
|
518
|
-
"size":
|
|
519
|
-
"vram":
|
|
686
|
+
"tags": ["Edición imagen", "Imagen", "API", "Texto a imagen"],
|
|
687
|
+
"models": ["Flux.2", "BFL", "Flux"],
|
|
688
|
+
"date": "2025-11-26",
|
|
689
|
+
"openSource": false,
|
|
690
|
+
"size": 0,
|
|
691
|
+
"vram": 0,
|
|
692
|
+
"usage": 852
|
|
520
693
|
},
|
|
521
694
|
{
|
|
522
|
-
"name": "
|
|
523
|
-
"title": "
|
|
695
|
+
"name": "api_topaz_image_enhance",
|
|
696
|
+
"title": "Mejora de imagen Topaz",
|
|
697
|
+
"description": "Mejora profesional de imágenes usando el modelo Reimagine de Topaz con mejora facial y restauración de detalles.",
|
|
524
698
|
"mediaType": "image",
|
|
525
699
|
"mediaSubtype": "webp",
|
|
526
700
|
"thumbnailVariant": "compareSlider",
|
|
527
|
-
"
|
|
528
|
-
"
|
|
529
|
-
"
|
|
530
|
-
"
|
|
531
|
-
"size":
|
|
532
|
-
"vram":
|
|
701
|
+
"tags": ["Imagen", "API", "Mejorar"],
|
|
702
|
+
"models": ["Topaz", "Reimagine"],
|
|
703
|
+
"date": "2025-11-25",
|
|
704
|
+
"openSource": false,
|
|
705
|
+
"size": 0,
|
|
706
|
+
"vram": 0,
|
|
707
|
+
"usage": 576
|
|
533
708
|
},
|
|
534
709
|
{
|
|
535
|
-
"name": "
|
|
536
|
-
"title": "Flux
|
|
710
|
+
"name": "api_bfl_flux_1_kontext_multiple_images_input",
|
|
711
|
+
"title": "Entrada de múltiples imágenes BFL Flux.1 Kontext",
|
|
712
|
+
"description": "Ingresar múltiples imágenes y editarlas con Flux.1 Kontext.",
|
|
537
713
|
"mediaType": "image",
|
|
538
714
|
"mediaSubtype": "webp",
|
|
539
|
-
"
|
|
540
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
541
|
-
"tags": ["
|
|
542
|
-
"models": ["Flux", "BFL"],
|
|
543
|
-
"date": "2025-
|
|
544
|
-
"
|
|
545
|
-
"
|
|
715
|
+
"thumbnailVariant": "compareSlider",
|
|
716
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-kontext",
|
|
717
|
+
"tags": ["Edición imagen", "Imagen"],
|
|
718
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
719
|
+
"date": "2025-05-29",
|
|
720
|
+
"openSource": false,
|
|
721
|
+
"size": 0,
|
|
722
|
+
"vram": 0,
|
|
723
|
+
"usage": 139
|
|
546
724
|
},
|
|
547
725
|
{
|
|
548
|
-
"name": "
|
|
549
|
-
"title": "
|
|
550
|
-
"description": "
|
|
551
|
-
"thumbnailVariant": "hoverDissolve",
|
|
726
|
+
"name": "api_bfl_flux_1_kontext_pro_image",
|
|
727
|
+
"title": "BFL Flux.1 Kontext Pro",
|
|
728
|
+
"description": "Editar imágenes con imagen pro de Flux.1 Kontext.",
|
|
552
729
|
"mediaType": "image",
|
|
553
730
|
"mediaSubtype": "webp",
|
|
554
|
-
"
|
|
555
|
-
"
|
|
556
|
-
"
|
|
557
|
-
"
|
|
558
|
-
"
|
|
559
|
-
"
|
|
731
|
+
"thumbnailVariant": "compareSlider",
|
|
732
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-kontext",
|
|
733
|
+
"tags": ["Edición imagen", "Imagen"],
|
|
734
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
735
|
+
"date": "2025-05-29",
|
|
736
|
+
"openSource": false,
|
|
737
|
+
"size": 0,
|
|
738
|
+
"vram": 0,
|
|
739
|
+
"usage": 403
|
|
560
740
|
},
|
|
561
741
|
{
|
|
562
|
-
"name": "
|
|
563
|
-
"title": "Flux
|
|
742
|
+
"name": "api_bfl_flux_1_kontext_max_image",
|
|
743
|
+
"title": "BFL Flux.1 Kontext Max",
|
|
744
|
+
"description": "Editar imágenes con imagen max de Flux.1 Kontext.",
|
|
564
745
|
"mediaType": "image",
|
|
565
746
|
"mediaSubtype": "webp",
|
|
566
|
-
"
|
|
567
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
568
|
-
"tags": ["
|
|
569
|
-
"models": ["Flux", "BFL"],
|
|
570
|
-
"date": "2025-
|
|
571
|
-
"
|
|
572
|
-
"
|
|
747
|
+
"thumbnailVariant": "compareSlider",
|
|
748
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-kontext",
|
|
749
|
+
"tags": ["Edición imagen", "Imagen"],
|
|
750
|
+
"models": ["Flux", "Kontext", "BFL"],
|
|
751
|
+
"date": "2025-05-29",
|
|
752
|
+
"openSource": false,
|
|
753
|
+
"size": 0,
|
|
754
|
+
"vram": 0,
|
|
755
|
+
"usage": 74
|
|
573
756
|
},
|
|
574
757
|
{
|
|
575
|
-
"name": "
|
|
576
|
-
"title": "
|
|
758
|
+
"name": "api_wan_text_to_image",
|
|
759
|
+
"title": "Wan2.5: Texto a Imagen",
|
|
760
|
+
"description": "Genera imágenes con excelente seguimiento de prompts y calidad visual usando FLUX.1 Pro.",
|
|
577
761
|
"mediaType": "image",
|
|
578
762
|
"mediaSubtype": "webp",
|
|
579
|
-
"
|
|
580
|
-
"
|
|
581
|
-
"
|
|
582
|
-
"
|
|
583
|
-
"
|
|
584
|
-
"
|
|
585
|
-
"
|
|
763
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
764
|
+
"models": ["Wan2.5", "Wan"],
|
|
765
|
+
"date": "2025-09-25",
|
|
766
|
+
"openSource": false,
|
|
767
|
+
"size": 0,
|
|
768
|
+
"vram": 0,
|
|
769
|
+
"usage": 244
|
|
586
770
|
},
|
|
587
771
|
{
|
|
588
|
-
"name": "
|
|
589
|
-
"title": "Texto a imagen
|
|
772
|
+
"name": "api_bfl_flux_pro_t2i",
|
|
773
|
+
"title": "BFL Flux[Pro]: Texto a imagen",
|
|
774
|
+
"description": "Generar imágenes con excelente seguimiento de indicaciones y calidad visual usando FLUX.1 Pro.",
|
|
590
775
|
"mediaType": "image",
|
|
591
776
|
"mediaSubtype": "webp",
|
|
592
|
-
"
|
|
593
|
-
"
|
|
594
|
-
"tags": ["Texto a imagen", "Imagen"],
|
|
777
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
|
|
778
|
+
"tags": ["Edición imagen", "Imagen"],
|
|
595
779
|
"models": ["Flux", "BFL"],
|
|
596
|
-
"date": "2025-
|
|
597
|
-
"
|
|
598
|
-
"
|
|
780
|
+
"date": "2025-05-01",
|
|
781
|
+
"openSource": false,
|
|
782
|
+
"size": 0,
|
|
783
|
+
"vram": 0,
|
|
784
|
+
"usage": 117
|
|
599
785
|
},
|
|
600
786
|
{
|
|
601
|
-
"name": "
|
|
602
|
-
"title": "Texto a imagen
|
|
787
|
+
"name": "api_runway_text_to_image",
|
|
788
|
+
"title": "Runway: Texto a imagen",
|
|
789
|
+
"description": "Generar imágenes de alta calidad a partir de indicaciones de texto usando el modelo AI de Runway.",
|
|
603
790
|
"mediaType": "image",
|
|
604
791
|
"mediaSubtype": "webp",
|
|
605
|
-
"
|
|
606
|
-
"
|
|
607
|
-
"tags": ["Texto a imagen", "Imagen"],
|
|
608
|
-
"models": ["Flux", "BFL"],
|
|
792
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
793
|
+
"models": ["Runway"],
|
|
609
794
|
"date": "2025-03-01",
|
|
610
|
-
"
|
|
795
|
+
"openSource": false,
|
|
796
|
+
"size": 0,
|
|
797
|
+
"vram": 0,
|
|
798
|
+
"usage": 37
|
|
611
799
|
},
|
|
612
800
|
{
|
|
613
|
-
"name": "
|
|
614
|
-
"title": "
|
|
801
|
+
"name": "api_runway_reference_to_image",
|
|
802
|
+
"title": "Runway: Referencia a imagen",
|
|
803
|
+
"description": "Generar nuevas imágenes basadas en estilos y composiciones de referencia con la IA de Runway.",
|
|
615
804
|
"mediaType": "image",
|
|
616
|
-
"mediaSubtype": "webp",
|
|
617
|
-
"description": "Rellenar partes faltantes de imágenes usando inpainting de Flux.",
|
|
618
805
|
"thumbnailVariant": "compareSlider",
|
|
619
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
620
|
-
"tags": ["Imagen a imagen", "Inpaint", "Imagen"],
|
|
621
|
-
"models": ["Flux", "BFL"],
|
|
622
|
-
"date": "2025-03-01",
|
|
623
|
-
"size": 10372346020
|
|
624
|
-
},
|
|
625
|
-
{
|
|
626
|
-
"name": "flux_fill_outpaint_example",
|
|
627
|
-
"title": "Outpaint Flux",
|
|
628
|
-
"mediaType": "image",
|
|
629
806
|
"mediaSubtype": "webp",
|
|
630
|
-
"
|
|
631
|
-
"
|
|
632
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
633
|
-
"tags": ["Outpaint", "Imagen", "Imagen a imagen"],
|
|
634
|
-
"models": ["Flux", "BFL"],
|
|
807
|
+
"tags": ["Imagen a imagen", "Imagen", "API"],
|
|
808
|
+
"models": ["Runway"],
|
|
635
809
|
"date": "2025-03-01",
|
|
636
|
-
"
|
|
810
|
+
"openSource": false,
|
|
811
|
+
"size": 0,
|
|
812
|
+
"vram": 0,
|
|
813
|
+
"usage": 115
|
|
637
814
|
},
|
|
638
815
|
{
|
|
639
|
-
"name": "
|
|
640
|
-
"title": "
|
|
816
|
+
"name": "api_stability_ai_stable_image_ultra_t2i",
|
|
817
|
+
"title": "Stability AI: Texto a imagen Stable Image Ultra",
|
|
818
|
+
"description": "Generar imágenes de alta calidad con excelente adherencia a las indicaciones. Perfecto para casos de uso profesionales en resolución de 1 megapíxel.",
|
|
641
819
|
"mediaType": "image",
|
|
642
820
|
"mediaSubtype": "webp",
|
|
643
|
-
"
|
|
644
|
-
"
|
|
645
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
646
|
-
"tags": ["Imagen a imagen", "ControlNet", "Imagen"],
|
|
647
|
-
"models": ["Flux", "BFL"],
|
|
821
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
822
|
+
"models": ["Stability"],
|
|
648
823
|
"date": "2025-03-01",
|
|
649
|
-
"
|
|
824
|
+
"openSource": false,
|
|
825
|
+
"size": 0,
|
|
826
|
+
"vram": 0,
|
|
827
|
+
"usage": 27
|
|
650
828
|
},
|
|
651
829
|
{
|
|
652
|
-
"name": "
|
|
653
|
-
"title": "
|
|
830
|
+
"name": "api_stability_ai_i2i",
|
|
831
|
+
"title": "Stability AI: Imagen a imagen",
|
|
832
|
+
"description": "Transformar imágenes con generación de alta calidad usando Stability AI, perfecto para edición profesional y transferencia de estilo.",
|
|
654
833
|
"mediaType": "image",
|
|
834
|
+
"thumbnailVariant": "compareSlider",
|
|
655
835
|
"mediaSubtype": "webp",
|
|
656
|
-
"
|
|
657
|
-
"
|
|
658
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
659
|
-
"tags": ["Imagen a imagen", "ControlNet", "Imagen"],
|
|
660
|
-
"models": ["Flux", "BFL"],
|
|
836
|
+
"tags": ["Imagen a imagen", "Imagen", "API"],
|
|
837
|
+
"models": ["Stability"],
|
|
661
838
|
"date": "2025-03-01",
|
|
662
|
-
"
|
|
839
|
+
"openSource": false,
|
|
840
|
+
"size": 0,
|
|
841
|
+
"vram": 0,
|
|
842
|
+
"usage": 65
|
|
663
843
|
},
|
|
664
844
|
{
|
|
665
|
-
"name": "
|
|
666
|
-
"title": "
|
|
845
|
+
"name": "api_stability_ai_sd3.5_t2i",
|
|
846
|
+
"title": "Stability AI: Texto a imagen SD3.5",
|
|
847
|
+
"description": "Generar imágenes de alta calidad con excelente adherencia a las indicaciones. Perfecto para casos de uso profesionales en resolución de 1 megapíxel.",
|
|
667
848
|
"mediaType": "image",
|
|
668
849
|
"mediaSubtype": "webp",
|
|
669
|
-
"
|
|
670
|
-
"
|
|
671
|
-
"tags": ["Imagen a imagen", "ControlNet", "Imagen"],
|
|
672
|
-
"models": ["Flux", "BFL"],
|
|
850
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
851
|
+
"models": ["Stability"],
|
|
673
852
|
"date": "2025-03-01",
|
|
674
|
-
"
|
|
853
|
+
"openSource": false,
|
|
854
|
+
"size": 0,
|
|
855
|
+
"vram": 0,
|
|
856
|
+
"usage": 18
|
|
675
857
|
},
|
|
676
858
|
{
|
|
677
|
-
"name": "
|
|
678
|
-
"title": "
|
|
859
|
+
"name": "api_stability_ai_sd3.5_i2i",
|
|
860
|
+
"title": "Stability AI: Imagen a imagen SD3.5",
|
|
861
|
+
"description": "Generar imágenes de alta calidad con excelente adherencia a las indicaciones. Perfecto para casos de uso profesionales en resolución de 1 megapíxel.",
|
|
679
862
|
"mediaType": "image",
|
|
863
|
+
"thumbnailVariant": "compareSlider",
|
|
680
864
|
"mediaSubtype": "webp",
|
|
681
|
-
"
|
|
682
|
-
"
|
|
683
|
-
"
|
|
684
|
-
"
|
|
685
|
-
"
|
|
686
|
-
"
|
|
865
|
+
"tags": ["Imagen a imagen", "Imagen", "API"],
|
|
866
|
+
"models": ["Stability"],
|
|
867
|
+
"date": "2025-03-01",
|
|
868
|
+
"openSource": false,
|
|
869
|
+
"size": 0,
|
|
870
|
+
"vram": 0,
|
|
871
|
+
"usage": 88
|
|
687
872
|
},
|
|
688
873
|
{
|
|
689
|
-
"name": "
|
|
690
|
-
"title": "Edición de imagen
|
|
874
|
+
"name": "image_qwen_image_edit",
|
|
875
|
+
"title": "Edición de imagen Qwen",
|
|
691
876
|
"mediaType": "image",
|
|
692
877
|
"mediaSubtype": "webp",
|
|
693
|
-
"thumbnailVariant": "
|
|
694
|
-
"description": "Editar imágenes con
|
|
695
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/
|
|
696
|
-
"tags": ["
|
|
697
|
-
"models": ["
|
|
698
|
-
"date": "2025-
|
|
699
|
-
"size":
|
|
878
|
+
"thumbnailVariant": "compareSlider",
|
|
879
|
+
"description": "Editar imágenes con edición precisa de texto bilingüe y capacidades de edición dual semántica/apariencia usando el modelo MMDiT de 20B de Qwen-Image-Edit.",
|
|
880
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
881
|
+
"tags": ["Imagen a imagen", "Edición imagen"],
|
|
882
|
+
"models": ["Qwen-Image-Edit"],
|
|
883
|
+
"date": "2025-08-18",
|
|
884
|
+
"size": 31772020572,
|
|
885
|
+
"vram": 31772020572,
|
|
886
|
+
"usage": 1556
|
|
700
887
|
},
|
|
701
888
|
{
|
|
702
|
-
"name": "
|
|
703
|
-
"title": "
|
|
889
|
+
"name": "image_ovis_text_to_image",
|
|
890
|
+
"title": "Ovis-Image texto a imagen",
|
|
704
891
|
"mediaType": "image",
|
|
705
892
|
"mediaSubtype": "webp",
|
|
706
|
-
"description": "
|
|
707
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
893
|
+
"description": "Ovis-Image es un modelo de texto a imagen de 7B parámetros optimizado específicamente para renderizado de texto de alta calidad en imágenes generadas. Diseñado para operar eficientemente bajo restricciones computacionales, sobresale en la generación precisa de imágenes que contienen contenido textual.",
|
|
708
894
|
"tags": ["Texto a imagen", "Imagen"],
|
|
709
|
-
"models": ["
|
|
710
|
-
"date": "2025-
|
|
711
|
-
"size":
|
|
895
|
+
"models": ["Ovis-Image"],
|
|
896
|
+
"date": "2025-12-02",
|
|
897
|
+
"size": 20228222222,
|
|
898
|
+
"vram": 20228222222,
|
|
899
|
+
"usage": 1456
|
|
712
900
|
},
|
|
713
901
|
{
|
|
714
|
-
"name": "
|
|
715
|
-
"title": "
|
|
902
|
+
"name": "image_chrono_edit_14B",
|
|
903
|
+
"title": "ChronoEdit 14B",
|
|
716
904
|
"mediaType": "image",
|
|
717
905
|
"mediaSubtype": "webp",
|
|
718
|
-
"
|
|
719
|
-
"
|
|
720
|
-
"tags": ["
|
|
721
|
-
"models": ["
|
|
722
|
-
"date": "2025-
|
|
723
|
-
"size":
|
|
906
|
+
"thumbnailVariant": "compareSlider",
|
|
907
|
+
"description": "Edición de imágenes impulsada por la comprensión dinámica de modelos de video, creando resultados físicamente plausibles mientras preserva la consistencia del personaje y el estilo.",
|
|
908
|
+
"tags": ["Edición imagen", "Imagen a imagen"],
|
|
909
|
+
"models": ["Wan2.1", "ChronoEdit", "Nvidia"],
|
|
910
|
+
"date": "2025-11-03",
|
|
911
|
+
"size": 41435696988,
|
|
912
|
+
"vram": 41435696988,
|
|
913
|
+
"usage": 611
|
|
724
914
|
},
|
|
725
915
|
{
|
|
726
|
-
"name": "
|
|
727
|
-
"title": "
|
|
916
|
+
"name": "flux_kontext_dev_basic",
|
|
917
|
+
"title": "Flux Kontext Dev(Básico)",
|
|
728
918
|
"mediaType": "image",
|
|
729
919
|
"mediaSubtype": "webp",
|
|
730
|
-
"
|
|
731
|
-
"
|
|
732
|
-
"
|
|
733
|
-
"
|
|
734
|
-
"
|
|
735
|
-
"
|
|
920
|
+
"thumbnailVariant": "hoverDissolve",
|
|
921
|
+
"description": "Editar imagen usando Flux Kontext con visibilidad completa de nodos, perfecto para aprender el flujo de trabajo.",
|
|
922
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
|
|
923
|
+
"tags": ["Edición imagen", "Imagen a imagen"],
|
|
924
|
+
"models": ["Flux", "BFL"],
|
|
925
|
+
"date": "2025-06-26",
|
|
926
|
+
"size": 17641578168,
|
|
927
|
+
"vram": 19327352832,
|
|
928
|
+
"usage": 866
|
|
736
929
|
},
|
|
737
930
|
{
|
|
738
|
-
"name": "
|
|
739
|
-
"title": "
|
|
931
|
+
"name": "api_luma_photon_i2i",
|
|
932
|
+
"title": "Luma Photon: Imagen a imagen",
|
|
933
|
+
"description": "Guiar la generación de imágenes usando una combinación de imágenes e indicaciones.",
|
|
740
934
|
"mediaType": "image",
|
|
741
935
|
"mediaSubtype": "webp",
|
|
742
936
|
"thumbnailVariant": "compareSlider",
|
|
743
|
-
"
|
|
744
|
-
"
|
|
745
|
-
"
|
|
746
|
-
"
|
|
747
|
-
"
|
|
748
|
-
"
|
|
937
|
+
"tags": ["Imagen a imagen", "Imagen", "API"],
|
|
938
|
+
"models": ["Luma"],
|
|
939
|
+
"date": "2025-03-01",
|
|
940
|
+
"openSource": false,
|
|
941
|
+
"size": 0,
|
|
942
|
+
"vram": 0,
|
|
943
|
+
"usage": 101
|
|
749
944
|
},
|
|
750
945
|
{
|
|
751
|
-
"name": "
|
|
752
|
-
"title": "
|
|
946
|
+
"name": "api_luma_photon_style_ref",
|
|
947
|
+
"title": "Luma Photon: Referencia de estilo",
|
|
948
|
+
"description": "Generar imágenes mezclando referencias de estilo con control preciso usando Luma Photon.",
|
|
753
949
|
"mediaType": "image",
|
|
754
950
|
"mediaSubtype": "webp",
|
|
755
951
|
"thumbnailVariant": "compareSlider",
|
|
756
|
-
"
|
|
757
|
-
"
|
|
758
|
-
"
|
|
759
|
-
"
|
|
760
|
-
"
|
|
761
|
-
"
|
|
952
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
953
|
+
"models": ["Luma"],
|
|
954
|
+
"date": "2025-03-01",
|
|
955
|
+
"openSource": false,
|
|
956
|
+
"size": 0,
|
|
957
|
+
"vram": 0,
|
|
958
|
+
"usage": 79
|
|
762
959
|
},
|
|
763
960
|
{
|
|
764
|
-
"name": "
|
|
765
|
-
"title": "
|
|
961
|
+
"name": "api_recraft_image_gen_with_color_control",
|
|
962
|
+
"title": "Recraft: Generación de imágenes con control de color",
|
|
963
|
+
"description": "Generar imágenes con paletas de colores personalizadas y visuales específicos de marca usando Recraft.",
|
|
766
964
|
"mediaType": "image",
|
|
767
965
|
"mediaSubtype": "webp",
|
|
768
|
-
"
|
|
769
|
-
"
|
|
770
|
-
"tags": ["Texto a imagen", "Imagen"],
|
|
771
|
-
"models": ["SD3.5", "Stability"],
|
|
966
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
967
|
+
"models": ["Recraft"],
|
|
772
968
|
"date": "2025-03-01",
|
|
773
|
-
"
|
|
969
|
+
"openSource": false,
|
|
970
|
+
"size": 0,
|
|
971
|
+
"vram": 0,
|
|
972
|
+
"usage": 3
|
|
774
973
|
},
|
|
775
974
|
{
|
|
776
|
-
"name": "
|
|
777
|
-
"title": "
|
|
975
|
+
"name": "api_recraft_image_gen_with_style_control",
|
|
976
|
+
"title": "Recraft: Generación de imágenes con control de estilo",
|
|
977
|
+
"description": "Controlar estilo con ejemplos visuales, alinear posicionamiento y ajustar objetos finamente. Almacenar y compartir estilos para consistencia perfecta de marca.",
|
|
778
978
|
"mediaType": "image",
|
|
779
979
|
"mediaSubtype": "webp",
|
|
780
|
-
"
|
|
781
|
-
"
|
|
782
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
783
|
-
"tags": ["Imagen a imagen", "Imagen", "ControlNet"],
|
|
784
|
-
"models": ["SD3.5", "Stability"],
|
|
980
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
981
|
+
"models": ["Recraft"],
|
|
785
982
|
"date": "2025-03-01",
|
|
786
|
-
"
|
|
983
|
+
"openSource": false,
|
|
984
|
+
"size": 0,
|
|
985
|
+
"vram": 0,
|
|
986
|
+
"usage": 6
|
|
787
987
|
},
|
|
788
988
|
{
|
|
789
|
-
"name": "
|
|
790
|
-
"title": "
|
|
989
|
+
"name": "api_recraft_vector_gen",
|
|
990
|
+
"title": "Recraft: Generación vectorial",
|
|
991
|
+
"description": "Generar imágenes vectoriales de alta calidad a partir de indicaciones de texto usando el generador vectorial AI de Recraft.",
|
|
791
992
|
"mediaType": "image",
|
|
792
993
|
"mediaSubtype": "webp",
|
|
793
|
-
"
|
|
794
|
-
"
|
|
795
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
796
|
-
"tags": ["Imagen a imagen", "Imagen", "ControlNet"],
|
|
797
|
-
"models": ["SD3.5", "Stability"],
|
|
994
|
+
"tags": ["Texto a imagen", "Imagen", "API", "Vectorial"],
|
|
995
|
+
"models": ["Recraft"],
|
|
798
996
|
"date": "2025-03-01",
|
|
799
|
-
"
|
|
997
|
+
"openSource": false,
|
|
998
|
+
"size": 0,
|
|
999
|
+
"vram": 0,
|
|
1000
|
+
"usage": 16
|
|
800
1001
|
},
|
|
801
1002
|
{
|
|
802
|
-
"name": "
|
|
803
|
-
"title": "
|
|
1003
|
+
"name": "api_ideogram_v3_t2i",
|
|
1004
|
+
"title": "Ideogram V3: Texto a imagen",
|
|
1005
|
+
"description": "Generar imágenes de calidad profesional con excelente alineación de indicaciones, fotorrealismo y renderizado de texto usando Ideogram V3.",
|
|
804
1006
|
"mediaType": "image",
|
|
805
1007
|
"mediaSubtype": "webp",
|
|
806
|
-
"
|
|
807
|
-
"
|
|
808
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
809
|
-
"tags": ["Imagen a imagen", "Imagen"],
|
|
810
|
-
"models": ["SD3.5", "Stability"],
|
|
1008
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1009
|
+
"models": ["Ideogram"],
|
|
811
1010
|
"date": "2025-03-01",
|
|
812
|
-
"
|
|
1011
|
+
"openSource": false,
|
|
1012
|
+
"size": 0,
|
|
1013
|
+
"vram": 0,
|
|
1014
|
+
"usage": 8
|
|
813
1015
|
},
|
|
814
1016
|
{
|
|
815
|
-
"name": "
|
|
816
|
-
"title": "
|
|
1017
|
+
"name": "api_openai_image_1_t2i",
|
|
1018
|
+
"title": "OpenAI: Texto a imagen GPT-Image-1",
|
|
1019
|
+
"description": "Generar imágenes a partir de indicaciones de texto usando la API de OpenAI GPT Image 1.",
|
|
817
1020
|
"mediaType": "image",
|
|
818
1021
|
"mediaSubtype": "webp",
|
|
819
|
-
"
|
|
820
|
-
"
|
|
821
|
-
"tags": ["Texto a imagen", "Imagen"],
|
|
822
|
-
"models": ["SDXL", "Stability"],
|
|
1022
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1023
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
823
1024
|
"date": "2025-03-01",
|
|
824
|
-
"
|
|
1025
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
|
|
1026
|
+
"openSource": false,
|
|
1027
|
+
"size": 0,
|
|
1028
|
+
"vram": 0,
|
|
1029
|
+
"usage": 9
|
|
825
1030
|
},
|
|
826
1031
|
{
|
|
827
|
-
"name": "
|
|
828
|
-
"title": "
|
|
1032
|
+
"name": "api_openai_image_1_i2i",
|
|
1033
|
+
"title": "OpenAI: Imagen a imagen GPT-Image-1",
|
|
1034
|
+
"description": "Generar imágenes a partir de imágenes de entrada usando la API de OpenAI GPT Image 1.",
|
|
829
1035
|
"mediaType": "image",
|
|
830
1036
|
"mediaSubtype": "webp",
|
|
831
|
-
"
|
|
832
|
-
"
|
|
833
|
-
"
|
|
834
|
-
"models": ["SDXL", "Stability"],
|
|
1037
|
+
"thumbnailVariant": "compareSlider",
|
|
1038
|
+
"tags": ["Imagen a imagen", "Imagen", "API"],
|
|
1039
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
835
1040
|
"date": "2025-03-01",
|
|
836
|
-
"
|
|
1041
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
|
|
1042
|
+
"openSource": false,
|
|
1043
|
+
"size": 0,
|
|
1044
|
+
"vram": 0,
|
|
1045
|
+
"usage": 76
|
|
837
1046
|
},
|
|
838
1047
|
{
|
|
839
|
-
"name": "
|
|
840
|
-
"title": "
|
|
1048
|
+
"name": "api_openai_image_1_inpaint",
|
|
1049
|
+
"title": "OpenAI: Inpaint GPT-Image-1",
|
|
1050
|
+
"description": "Editar imágenes usando inpainting con la API de OpenAI GPT Image 1.",
|
|
841
1051
|
"mediaType": "image",
|
|
842
1052
|
"mediaSubtype": "webp",
|
|
843
|
-
"
|
|
844
|
-
"
|
|
845
|
-
"
|
|
846
|
-
"models": ["SDXL", "Stability"],
|
|
1053
|
+
"thumbnailVariant": "compareSlider",
|
|
1054
|
+
"tags": ["Inpaint", "Imagen", "API"],
|
|
1055
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
847
1056
|
"date": "2025-03-01",
|
|
848
|
-
"
|
|
1057
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
|
|
1058
|
+
"openSource": false,
|
|
1059
|
+
"size": 0,
|
|
1060
|
+
"vram": 0,
|
|
1061
|
+
"usage": 21
|
|
849
1062
|
},
|
|
850
1063
|
{
|
|
851
|
-
"name": "
|
|
852
|
-
"title": "
|
|
1064
|
+
"name": "api_openai_image_1_multi_inputs",
|
|
1065
|
+
"title": "OpenAI: Múltiples entradas GPT-Image-1",
|
|
1066
|
+
"description": "Generar imágenes a partir de múltiples entradas usando la API de OpenAI GPT Image 1.",
|
|
853
1067
|
"mediaType": "image",
|
|
854
1068
|
"mediaSubtype": "webp",
|
|
855
|
-
"
|
|
856
|
-
"
|
|
857
|
-
"
|
|
858
|
-
"models": ["SDXL", "Stability"],
|
|
1069
|
+
"thumbnailVariant": "compareSlider",
|
|
1070
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1071
|
+
"models": ["GPT-Image-1", "OpenAI"],
|
|
859
1072
|
"date": "2025-03-01",
|
|
860
|
-
"
|
|
1073
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
|
|
1074
|
+
"openSource": false,
|
|
1075
|
+
"size": 0,
|
|
1076
|
+
"vram": 0,
|
|
1077
|
+
"usage": 5
|
|
861
1078
|
},
|
|
862
1079
|
{
|
|
863
|
-
"name": "
|
|
864
|
-
"title": "
|
|
1080
|
+
"name": "api_openai_dall_e_2_t2i",
|
|
1081
|
+
"title": "OpenAI: Texto a imagen Dall-E 2",
|
|
1082
|
+
"description": "Generar imágenes a partir de indicaciones de texto usando la API de OpenAI Dall-E 2.",
|
|
865
1083
|
"mediaType": "image",
|
|
866
1084
|
"mediaSubtype": "webp",
|
|
867
|
-
"
|
|
868
|
-
"
|
|
869
|
-
"
|
|
870
|
-
"
|
|
871
|
-
"
|
|
872
|
-
"size":
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
{
|
|
877
|
-
"moduleName": "default",
|
|
878
|
-
"type": "video",
|
|
879
|
-
"category": "GENERATION TYPE",
|
|
880
|
-
"icon": "icon-[lucide--film]",
|
|
881
|
-
"title": "Video",
|
|
882
|
-
"templates": [
|
|
1085
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1086
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1087
|
+
"date": "2025-03-01",
|
|
1088
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/dall-e-2",
|
|
1089
|
+
"openSource": false,
|
|
1090
|
+
"size": 0,
|
|
1091
|
+
"vram": 0,
|
|
1092
|
+
"usage": 4
|
|
1093
|
+
},
|
|
883
1094
|
{
|
|
884
|
-
"name": "
|
|
885
|
-
"title": "
|
|
886
|
-
"description": "
|
|
1095
|
+
"name": "api_openai_dall_e_2_inpaint",
|
|
1096
|
+
"title": "OpenAI: Inpaint Dall-E 2",
|
|
1097
|
+
"description": "Editar imágenes usando inpainting con la API de OpenAI Dall-E 2.",
|
|
887
1098
|
"mediaType": "image",
|
|
888
1099
|
"mediaSubtype": "webp",
|
|
889
|
-
"
|
|
890
|
-
"tags": ["
|
|
891
|
-
"models": ["
|
|
892
|
-
"date": "2025-
|
|
893
|
-
"
|
|
1100
|
+
"thumbnailVariant": "compareSlider",
|
|
1101
|
+
"tags": ["Inpaint", "Imagen", "API"],
|
|
1102
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1103
|
+
"date": "2025-03-01",
|
|
1104
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/dall-e-2",
|
|
1105
|
+
"openSource": false,
|
|
1106
|
+
"size": 0,
|
|
1107
|
+
"vram": 0,
|
|
1108
|
+
"usage": 12
|
|
894
1109
|
},
|
|
895
1110
|
{
|
|
896
|
-
"name": "
|
|
897
|
-
"title": "
|
|
898
|
-
"description": "
|
|
1111
|
+
"name": "api_openai_dall_e_3_t2i",
|
|
1112
|
+
"title": "OpenAI: Texto a imagen Dall-E 3",
|
|
1113
|
+
"description": "Generar imágenes a partir de indicaciones de texto usando la API de OpenAI Dall-E 3.",
|
|
899
1114
|
"mediaType": "image",
|
|
900
1115
|
"mediaSubtype": "webp",
|
|
901
|
-
"
|
|
902
|
-
"
|
|
903
|
-
"
|
|
904
|
-
"
|
|
905
|
-
"
|
|
906
|
-
"size":
|
|
1116
|
+
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1117
|
+
"models": ["Dall-E", "OpenAI"],
|
|
1118
|
+
"date": "2025-03-01",
|
|
1119
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/dall-e-3",
|
|
1120
|
+
"openSource": false,
|
|
1121
|
+
"size": 0,
|
|
1122
|
+
"vram": 0,
|
|
1123
|
+
"usage": 33
|
|
907
1124
|
},
|
|
908
1125
|
{
|
|
909
|
-
"name": "
|
|
910
|
-
"title": "
|
|
911
|
-
"description": "Generar transiciones de video suaves definiendo fotogramas de inicio y fin.",
|
|
1126
|
+
"name": "image_chroma1_radiance_text_to_image",
|
|
1127
|
+
"title": "Chroma1 Radiance Texto a Imagen",
|
|
912
1128
|
"mediaType": "image",
|
|
913
1129
|
"mediaSubtype": "webp",
|
|
914
|
-
"
|
|
915
|
-
"
|
|
916
|
-
"
|
|
917
|
-
"
|
|
918
|
-
"
|
|
919
|
-
"
|
|
1130
|
+
"description": "Chroma1-Radiance trabaja directamente con píxeles de imagen en lugar de latentes comprimidos, ofreciendo imágenes de mayor calidad con menos artefactos y distorsión.",
|
|
1131
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1132
|
+
"models": ["Chroma"],
|
|
1133
|
+
"date": "2025-09-18",
|
|
1134
|
+
"size": 23622320128,
|
|
1135
|
+
"vram": 23622320128,
|
|
1136
|
+
"usage": 1149
|
|
920
1137
|
},
|
|
921
1138
|
{
|
|
922
|
-
"name": "
|
|
923
|
-
"title": "
|
|
924
|
-
"description": "Marco unificado de animación y reemplazo de personajes con replicación precisa de movimiento y expresión。",
|
|
1139
|
+
"name": "image_chroma_text_to_image",
|
|
1140
|
+
"title": "Texto a imagen Chroma",
|
|
925
1141
|
"mediaType": "image",
|
|
926
1142
|
"mediaSubtype": "webp",
|
|
927
|
-
"
|
|
928
|
-
"tags": ["
|
|
929
|
-
"models": ["
|
|
930
|
-
"date": "2025-
|
|
931
|
-
"size":
|
|
1143
|
+
"description": "Chroma está modificado de flux y tiene algunos cambios en la arquitectura.",
|
|
1144
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1145
|
+
"models": ["Chroma", "Flux"],
|
|
1146
|
+
"date": "2025-06-04",
|
|
1147
|
+
"size": 23289460163,
|
|
1148
|
+
"vram": 15569256448,
|
|
1149
|
+
"usage": 1423
|
|
932
1150
|
},
|
|
933
1151
|
{
|
|
934
|
-
"name": "
|
|
935
|
-
"title": "
|
|
936
|
-
"description": "Genera vídeos 720p de alta calidad a partir de indicaciones de texto, con control cinematográfico de cámara, expresiones emocionales y simulación física. Soporta varios estilos, incluyendo realista, anime y renderizado de texto 3D.",
|
|
1152
|
+
"name": "image_newbieimage_exp0_1-t2i",
|
|
1153
|
+
"title": "NewBie Exp0.1: Generación de anime",
|
|
937
1154
|
"mediaType": "image",
|
|
938
1155
|
"mediaSubtype": "webp",
|
|
939
|
-
"
|
|
940
|
-
"
|
|
941
|
-
"
|
|
942
|
-
"
|
|
1156
|
+
"description": "Genera imágenes de anime detalladas con NewBie Exp0.1 y promts XML para mejores escenas con varios personajes.",
|
|
1157
|
+
"tags": ["Texto a imagen", "Imagen", "Anime"],
|
|
1158
|
+
"models": ["NewBie"],
|
|
1159
|
+
"date": "2025-12-19",
|
|
1160
|
+
"size": 16181289287,
|
|
1161
|
+
"vram": 16181289287
|
|
943
1162
|
},
|
|
944
1163
|
{
|
|
945
|
-
"name": "
|
|
946
|
-
"title": "
|
|
947
|
-
"description": "Anima imágenes fijas y conviértelas en videos dinámicos con movimiento preciso y control de cámara. Mantiene la coherencia visual mientras da vida a fotos e ilustraciones con movimientos suaves y naturales.",
|
|
1164
|
+
"name": "image_netayume_lumina_t2i",
|
|
1165
|
+
"title": "NetaYume Lumina Texto a Imagen",
|
|
948
1166
|
"mediaType": "image",
|
|
949
1167
|
"mediaSubtype": "webp",
|
|
950
|
-
"
|
|
951
|
-
"
|
|
952
|
-
"
|
|
953
|
-
"
|
|
1168
|
+
"description": "Generación de imágenes de estilo anime de alta calidad con comprensión mejorada de personajes y texturas detalladas. Ajustado finamente desde Neta Lumina en el conjunto de datos Danbooru.",
|
|
1169
|
+
"tags": ["Texto a imagen", "Imagen", "Anime"],
|
|
1170
|
+
"models": ["OmniGen"],
|
|
1171
|
+
"date": "2025-10-10",
|
|
1172
|
+
"size": 10619306639,
|
|
1173
|
+
"vram": 10619306639,
|
|
1174
|
+
"usage": 1536
|
|
954
1175
|
},
|
|
955
1176
|
{
|
|
956
|
-
"name": "
|
|
957
|
-
"title": "
|
|
958
|
-
"description": "Transforma imágenes estáticas y audio en videos dinámicos con sincronización perfecta y generación de nivel por minuto.",
|
|
1177
|
+
"name": "image_flux.1_fill_dev_OneReward",
|
|
1178
|
+
"title": "Flux.1 Dev OneReward",
|
|
959
1179
|
"mediaType": "image",
|
|
960
1180
|
"mediaSubtype": "webp",
|
|
961
|
-
"
|
|
962
|
-
"
|
|
963
|
-
"
|
|
964
|
-
"
|
|
965
|
-
"
|
|
1181
|
+
"thumbnailVariant": "compareSlider",
|
|
1182
|
+
"description": "Supports various tasks such as image inpainting, outpainting, and object removal",
|
|
1183
|
+
"tags": ["Inpaint", "Outpaint"],
|
|
1184
|
+
"models": ["Flux", "BFL"],
|
|
1185
|
+
"date": "2025-09-21",
|
|
1186
|
+
"size": 29001766666,
|
|
1187
|
+
"vram": 21474836480,
|
|
1188
|
+
"usage": 368
|
|
966
1189
|
},
|
|
967
1190
|
{
|
|
968
|
-
"name": "
|
|
969
|
-
"title": "
|
|
970
|
-
"description": "Genera videos basados en audio, imagen y texto, manteniendo la sincronización labial del personaje.",
|
|
1191
|
+
"name": "flux_dev_checkpoint_example",
|
|
1192
|
+
"title": "Flux Dev fp8",
|
|
971
1193
|
"mediaType": "image",
|
|
972
1194
|
"mediaSubtype": "webp",
|
|
973
|
-
"
|
|
974
|
-
"
|
|
975
|
-
"
|
|
976
|
-
"
|
|
1195
|
+
"description": "Generar imágenes usando la versión cuantizada Flux Dev fp8. Adecuado para dispositivos con VRAM limitada, requiere solo un archivo de modelo, pero la calidad de imagen es ligeramente inferior a la versión completa.",
|
|
1196
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
1197
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1198
|
+
"models": ["Flux", "BFL"],
|
|
1199
|
+
"date": "2025-03-01",
|
|
1200
|
+
"size": 17244293693,
|
|
1201
|
+
"vram": 18253611008,
|
|
1202
|
+
"usage": 310
|
|
977
1203
|
},
|
|
978
1204
|
{
|
|
979
|
-
"name": "
|
|
980
|
-
"title": "
|
|
981
|
-
"description": "
|
|
1205
|
+
"name": "flux1_dev_uso_reference_image_gen",
|
|
1206
|
+
"title": "Generación de Imágenes de Referencia Flux.1 Dev USO",
|
|
1207
|
+
"description": "Usa imágenes de referencia para controlar tanto el estilo como el sujeto: mantén el rostro de tu personaje mientras cambias el estilo artístico, o aplica estilos artísticos a nuevas escenas",
|
|
1208
|
+
"thumbnailVariant": "hoverDissolve",
|
|
982
1209
|
"mediaType": "image",
|
|
983
1210
|
"mediaSubtype": "webp",
|
|
984
|
-
"
|
|
985
|
-
"
|
|
986
|
-
"
|
|
987
|
-
"
|
|
988
|
-
"size":
|
|
1211
|
+
"tags": ["Imagen a imagen", "Imagen"],
|
|
1212
|
+
"models": ["Flux", "BFL"],
|
|
1213
|
+
"date": "2025-09-02",
|
|
1214
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
|
|
1215
|
+
"size": 18597208392,
|
|
1216
|
+
"vram": 19864223744,
|
|
1217
|
+
"usage": 1624
|
|
989
1218
|
},
|
|
990
1219
|
{
|
|
991
|
-
"name": "
|
|
992
|
-
"title": "
|
|
993
|
-
"description": "Generar videos guiados por controles de pose, profundidad y borde usando Wan 2.2 Fun Control.",
|
|
1220
|
+
"name": "flux_schnell",
|
|
1221
|
+
"title": "Flux Schnell fp8",
|
|
994
1222
|
"mediaType": "image",
|
|
995
1223
|
"mediaSubtype": "webp",
|
|
996
|
-
"
|
|
997
|
-
"
|
|
998
|
-
"
|
|
999
|
-
"
|
|
1000
|
-
"
|
|
1224
|
+
"description": "Generar rápidamente imágenes con la versión cuantizada Flux Schnell fp8. Ideal para hardware de gama baja, requiere solo 4 pasos para generar imágenes.",
|
|
1225
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
1226
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1227
|
+
"models": ["Flux", "BFL"],
|
|
1228
|
+
"date": "2025-03-01",
|
|
1229
|
+
"size": 17233556275,
|
|
1230
|
+
"vram": 18253611008,
|
|
1231
|
+
"usage": 99
|
|
1001
1232
|
},
|
|
1002
1233
|
{
|
|
1003
|
-
"name": "
|
|
1004
|
-
"title": "
|
|
1005
|
-
"description": "Generar videos con controles de movimiento de cámara incluyendo panorámica, zoom y rotación usando Wan 2.2 Fun Camera Control.",
|
|
1234
|
+
"name": "flux1_krea_dev",
|
|
1235
|
+
"title": "Flux.1 Krea Dev",
|
|
1006
1236
|
"mediaType": "image",
|
|
1007
1237
|
"mediaSubtype": "webp",
|
|
1008
|
-
"
|
|
1009
|
-
"
|
|
1010
|
-
"
|
|
1011
|
-
"
|
|
1012
|
-
"
|
|
1238
|
+
"description": "Un modelo FLUX afinado que lleva el fotorrealismo al máximo",
|
|
1239
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
|
|
1240
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1241
|
+
"models": ["Flux", "BFL"],
|
|
1242
|
+
"date": "2025-07-31",
|
|
1243
|
+
"size": 22269405430,
|
|
1244
|
+
"vram": 23085449216,
|
|
1245
|
+
"usage": 1160
|
|
1013
1246
|
},
|
|
1014
1247
|
{
|
|
1015
|
-
"name": "
|
|
1016
|
-
"title": "
|
|
1017
|
-
"description": "Generar videos a partir de texto o imágenes usando el modelo híbrido Wan 2.2 5B",
|
|
1248
|
+
"name": "flux_dev_full_text_to_image",
|
|
1249
|
+
"title": "Texto a imagen completo Flux Dev",
|
|
1018
1250
|
"mediaType": "image",
|
|
1019
1251
|
"mediaSubtype": "webp",
|
|
1020
|
-
"
|
|
1021
|
-
"
|
|
1022
|
-
"
|
|
1023
|
-
"
|
|
1024
|
-
"
|
|
1252
|
+
"description": "Generar imágenes de alta calidad con la versión completa de Flux Dev. Requiere mayor VRAM y múltiples archivos de modelo, pero proporciona la mejor capacidad de seguimiento de indicaciones y calidad de imagen.",
|
|
1253
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
1254
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1255
|
+
"models": ["Flux", "BFL"],
|
|
1256
|
+
"date": "2025-03-01",
|
|
1257
|
+
"size": 34177202258,
|
|
1258
|
+
"vram": 23622320128,
|
|
1259
|
+
"usage": 309
|
|
1025
1260
|
},
|
|
1026
1261
|
{
|
|
1027
|
-
"name": "
|
|
1028
|
-
"title": "
|
|
1029
|
-
"description": "Inpainting de video eficiente desde fotogramas de inicio y fin. El modelo 5B ofrece iteraciones rápidas para probar flujos de trabajo.",
|
|
1262
|
+
"name": "flux_schnell_full_text_to_image",
|
|
1263
|
+
"title": "Texto a imagen completo Flux Schnell",
|
|
1030
1264
|
"mediaType": "image",
|
|
1031
1265
|
"mediaSubtype": "webp",
|
|
1032
|
-
"
|
|
1033
|
-
"
|
|
1034
|
-
"
|
|
1035
|
-
"
|
|
1266
|
+
"description": "Generar rápidamente imágenes con la versión completa de Flux Schnell. Usa licencia Apache2.0, requiere solo 4 pasos para generar imágenes manteniendo buena calidad de imagen.",
|
|
1267
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
|
|
1268
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1269
|
+
"models": ["Flux", "BFL"],
|
|
1270
|
+
"date": "2025-03-01",
|
|
1271
|
+
"size": 34155727421,
|
|
1272
|
+
"vram": 34155727421,
|
|
1273
|
+
"usage": 28
|
|
1036
1274
|
},
|
|
1037
1275
|
{
|
|
1038
|
-
"name": "
|
|
1039
|
-
"title": "
|
|
1040
|
-
"description": "Control de video multicondición con guía de pose, profundidad y bordes. Tamaño compacto de 5B para desarrollo experimental.",
|
|
1276
|
+
"name": "flux_fill_inpaint_example",
|
|
1277
|
+
"title": "Inpaint Flux",
|
|
1041
1278
|
"mediaType": "image",
|
|
1042
1279
|
"mediaSubtype": "webp",
|
|
1043
|
-
"
|
|
1044
|
-
"
|
|
1045
|
-
"
|
|
1046
|
-
"
|
|
1280
|
+
"description": "Rellenar partes faltantes de imágenes usando inpainting de Flux.",
|
|
1281
|
+
"thumbnailVariant": "compareSlider",
|
|
1282
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
1283
|
+
"tags": ["Imagen a imagen", "Inpaint", "Imagen"],
|
|
1284
|
+
"models": ["Flux", "BFL"],
|
|
1285
|
+
"date": "2025-03-01",
|
|
1286
|
+
"size": 10372346020,
|
|
1287
|
+
"vram": 10372346020,
|
|
1288
|
+
"usage": 437
|
|
1047
1289
|
},
|
|
1048
1290
|
{
|
|
1049
|
-
"name": "
|
|
1050
|
-
"title": "
|
|
1051
|
-
"description": "Transformar descripciones de texto en videos de alta calidad. Soporta tanto 480p como 720p con el modelo VACE-14B.",
|
|
1291
|
+
"name": "flux_fill_outpaint_example",
|
|
1292
|
+
"title": "Outpaint Flux",
|
|
1052
1293
|
"mediaType": "image",
|
|
1053
1294
|
"mediaSubtype": "webp",
|
|
1054
|
-
"
|
|
1055
|
-
"
|
|
1056
|
-
"
|
|
1057
|
-
"
|
|
1058
|
-
"
|
|
1295
|
+
"description": "Extender imágenes más allá de los límites usando outpainting de Flux.",
|
|
1296
|
+
"thumbnailVariant": "compareSlider",
|
|
1297
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
|
|
1298
|
+
"tags": ["Outpaint", "Imagen", "Imagen a imagen"],
|
|
1299
|
+
"models": ["Flux", "BFL"],
|
|
1300
|
+
"date": "2025-03-01",
|
|
1301
|
+
"size": 10372346020,
|
|
1302
|
+
"vram": 10372346020,
|
|
1303
|
+
"usage": 443
|
|
1059
1304
|
},
|
|
1060
1305
|
{
|
|
1061
|
-
"name": "
|
|
1062
|
-
"title": "
|
|
1063
|
-
"description": "Crear videos que coincidan con el estilo y contenido de una imagen de referencia. Perfecto para generación de video consistente en estilo.",
|
|
1306
|
+
"name": "flux_canny_model_example",
|
|
1307
|
+
"title": "Modelo Canny Flux",
|
|
1064
1308
|
"mediaType": "image",
|
|
1065
1309
|
"mediaSubtype": "webp",
|
|
1066
|
-
"
|
|
1067
|
-
"
|
|
1068
|
-
"
|
|
1069
|
-
"
|
|
1070
|
-
"
|
|
1310
|
+
"description": "Generar imágenes guiadas por detección de bordes usando Flux Canny.",
|
|
1311
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1312
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
1313
|
+
"tags": ["Imagen a imagen", "ControlNet", "Imagen"],
|
|
1314
|
+
"models": ["Flux", "BFL"],
|
|
1315
|
+
"date": "2025-03-01",
|
|
1316
|
+
"size": 34177202258,
|
|
1317
|
+
"vram": 34177202258,
|
|
1318
|
+
"usage": 109
|
|
1071
1319
|
},
|
|
1072
1320
|
{
|
|
1073
|
-
"name": "
|
|
1074
|
-
"title": "
|
|
1075
|
-
"description": "Generar videos controlando videos de entrada e imágenes de referencia usando Wan VACE.",
|
|
1321
|
+
"name": "flux_depth_lora_example",
|
|
1322
|
+
"title": "LoRA de profundidad Flux",
|
|
1076
1323
|
"mediaType": "image",
|
|
1077
1324
|
"mediaSubtype": "webp",
|
|
1078
|
-
"
|
|
1079
|
-
"
|
|
1080
|
-
"
|
|
1081
|
-
"
|
|
1082
|
-
"
|
|
1083
|
-
"
|
|
1325
|
+
"description": "Generar imágenes guiadas por información de profundidad usando Flux LoRA.",
|
|
1326
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1327
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
1328
|
+
"tags": ["Imagen a imagen", "ControlNet", "Imagen"],
|
|
1329
|
+
"models": ["Flux", "BFL"],
|
|
1330
|
+
"date": "2025-03-01",
|
|
1331
|
+
"size": 35412005356,
|
|
1332
|
+
"vram": 35412005356,
|
|
1333
|
+
"usage": 223
|
|
1084
1334
|
},
|
|
1085
1335
|
{
|
|
1086
|
-
"name": "
|
|
1087
|
-
"title": "
|
|
1088
|
-
"description": "Generar videos extendidos expandiendo el tamaño de video usando outpainting de Wan VACE.",
|
|
1336
|
+
"name": "flux_redux_model_example",
|
|
1337
|
+
"title": "Modelo Redux Flux",
|
|
1089
1338
|
"mediaType": "image",
|
|
1090
1339
|
"mediaSubtype": "webp",
|
|
1091
|
-
"
|
|
1092
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
1093
|
-
"tags": ["
|
|
1094
|
-
"models": ["
|
|
1095
|
-
"date": "2025-
|
|
1096
|
-
"size":
|
|
1340
|
+
"description": "Generar imágenes transfiriendo estilo de imágenes de referencia usando Flux Redux.",
|
|
1341
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
|
|
1342
|
+
"tags": ["Imagen a imagen", "ControlNet", "Imagen"],
|
|
1343
|
+
"models": ["Flux", "BFL"],
|
|
1344
|
+
"date": "2025-03-01",
|
|
1345
|
+
"size": 35154307318,
|
|
1346
|
+
"vram": 35154307318,
|
|
1347
|
+
"usage": 226
|
|
1097
1348
|
},
|
|
1098
1349
|
{
|
|
1099
|
-
"name": "
|
|
1100
|
-
"title": "
|
|
1101
|
-
"description": "Generar transiciones de video suaves definiendo fotogramas de inicio y fin. Soporta secuencias de fotogramas clave personalizadas.",
|
|
1350
|
+
"name": "image_omnigen2_t2i",
|
|
1351
|
+
"title": "Texto a imagen OmniGen2",
|
|
1102
1352
|
"mediaType": "image",
|
|
1103
1353
|
"mediaSubtype": "webp",
|
|
1104
|
-
"
|
|
1105
|
-
"
|
|
1106
|
-
"
|
|
1107
|
-
"
|
|
1108
|
-
"
|
|
1354
|
+
"description": "Generar imágenes de alta calidad a partir de indicaciones de texto usando el modelo multimodal unificado de 7B de OmniGen2 con arquitectura de doble ruta.",
|
|
1355
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
1356
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1357
|
+
"models": ["OmniGen"],
|
|
1358
|
+
"date": "2025-06-30",
|
|
1359
|
+
"size": 15784004813,
|
|
1360
|
+
"vram": 15784004813,
|
|
1361
|
+
"usage": 165
|
|
1109
1362
|
},
|
|
1110
1363
|
{
|
|
1111
|
-
"name": "
|
|
1112
|
-
"title": "
|
|
1113
|
-
"description": "Editar regiones específicas en videos mientras se preserva el contenido circundante. Excelente para eliminación o reemplazo de objetos.",
|
|
1364
|
+
"name": "image_omnigen2_image_edit",
|
|
1365
|
+
"title": "Edición de imagen OmniGen2",
|
|
1114
1366
|
"mediaType": "image",
|
|
1115
1367
|
"mediaSubtype": "webp",
|
|
1116
|
-
"thumbnailVariant": "
|
|
1117
|
-
"
|
|
1118
|
-
"
|
|
1119
|
-
"
|
|
1120
|
-
"
|
|
1121
|
-
"
|
|
1368
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1369
|
+
"description": "Editar imágenes con instrucciones de lenguaje natural usando las capacidades avanzadas de edición de imágenes y soporte de renderizado de texto de OmniGen2.",
|
|
1370
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
|
|
1371
|
+
"tags": ["Edición imagen", "Imagen"],
|
|
1372
|
+
"models": ["OmniGen"],
|
|
1373
|
+
"date": "2025-06-30",
|
|
1374
|
+
"size": 15784004813,
|
|
1375
|
+
"vram": 15784004813,
|
|
1376
|
+
"usage": 145
|
|
1122
1377
|
},
|
|
1123
1378
|
{
|
|
1124
|
-
"name": "
|
|
1125
|
-
"title": "
|
|
1126
|
-
"description": "Genera videos desde texto con soporte de canal alfa para fondos transparentes y objetos semitransparentes.",
|
|
1379
|
+
"name": "hidream_i1_dev",
|
|
1380
|
+
"title": "HiDream I1 Dev",
|
|
1127
1381
|
"mediaType": "image",
|
|
1128
1382
|
"mediaSubtype": "webp",
|
|
1129
|
-
"
|
|
1130
|
-
"
|
|
1131
|
-
"
|
|
1132
|
-
"
|
|
1383
|
+
"description": "Generar imágenes con HiDream I1 Dev - Versión equilibrada con 28 pasos de inferencia, adecuada para hardware de gama media.",
|
|
1384
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
1385
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1386
|
+
"models": ["HiDream"],
|
|
1387
|
+
"date": "2025-04-17",
|
|
1388
|
+
"size": 33318208799,
|
|
1389
|
+
"vram": 33318208799,
|
|
1390
|
+
"usage": 92
|
|
1133
1391
|
},
|
|
1134
1392
|
{
|
|
1135
|
-
"name": "
|
|
1136
|
-
"title": "
|
|
1137
|
-
"description": "Generación de video controlada por trayectoria.",
|
|
1393
|
+
"name": "hidream_i1_fast",
|
|
1394
|
+
"title": "HiDream I1 Fast",
|
|
1138
1395
|
"mediaType": "image",
|
|
1139
1396
|
"mediaSubtype": "webp",
|
|
1140
|
-
"
|
|
1141
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/
|
|
1142
|
-
"tags": ["
|
|
1143
|
-
"models": ["
|
|
1144
|
-
"date": "2025-
|
|
1145
|
-
"size":
|
|
1397
|
+
"description": "Generar rápidamente imágenes con HiDream I1 Fast - Versión ligera con 16 pasos de inferencia, ideal para vistas previas rápidas en hardware de gama baja.",
|
|
1398
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
1399
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1400
|
+
"models": ["HiDream"],
|
|
1401
|
+
"date": "2025-04-17",
|
|
1402
|
+
"size": 24234352968,
|
|
1403
|
+
"vram": 24234352968,
|
|
1404
|
+
"usage": 41
|
|
1146
1405
|
},
|
|
1147
1406
|
{
|
|
1148
|
-
"name": "
|
|
1149
|
-
"title": "
|
|
1150
|
-
"description": "Generar videos dinámicos con movimientos cinematográficos de cámara usando el modelo Wan 2.1 Fun Camera 1.3B.",
|
|
1407
|
+
"name": "hidream_i1_full",
|
|
1408
|
+
"title": "HiDream I1 Full",
|
|
1151
1409
|
"mediaType": "image",
|
|
1152
1410
|
"mediaSubtype": "webp",
|
|
1153
|
-
"
|
|
1154
|
-
"
|
|
1155
|
-
"
|
|
1156
|
-
"
|
|
1157
|
-
"
|
|
1411
|
+
"description": "Generar imágenes con HiDream I1 Full - Versión completa con 50 pasos de inferencia para la mejor calidad de salida.",
|
|
1412
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
|
|
1413
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1414
|
+
"models": ["HiDream"],
|
|
1415
|
+
"date": "2025-04-17",
|
|
1416
|
+
"size": 24234352968,
|
|
1417
|
+
"vram": 24234352968,
|
|
1418
|
+
"usage": 218
|
|
1158
1419
|
},
|
|
1159
1420
|
{
|
|
1160
|
-
"name": "
|
|
1161
|
-
"title": "
|
|
1162
|
-
"description": "Generar videos de alta calidad con control avanzado de cámara usando el modelo completo de 14B",
|
|
1421
|
+
"name": "hidream_e1_full",
|
|
1422
|
+
"title": "Edición de imagen HiDream E1",
|
|
1163
1423
|
"mediaType": "image",
|
|
1164
1424
|
"mediaSubtype": "webp",
|
|
1165
|
-
"
|
|
1166
|
-
"
|
|
1167
|
-
"
|
|
1168
|
-
"
|
|
1169
|
-
"
|
|
1425
|
+
"thumbnailVariant": "compareSlider",
|
|
1426
|
+
"description": "Editar imágenes con HiDream E1 - Modelo profesional de edición de imagen con lenguaje natural.",
|
|
1427
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
|
|
1428
|
+
"tags": ["Edición imagen", "Imagen"],
|
|
1429
|
+
"models": ["HiDream"],
|
|
1430
|
+
"date": "2025-05-01",
|
|
1431
|
+
"size": 34209414513,
|
|
1432
|
+
"vram": 34209414513,
|
|
1433
|
+
"usage": 69
|
|
1170
1434
|
},
|
|
1171
1435
|
{
|
|
1172
|
-
"name": "
|
|
1173
|
-
"title": "
|
|
1174
|
-
"description": "Generar videos a partir de indicaciones de texto usando Wan 2.1.",
|
|
1436
|
+
"name": "sd3.5_simple_example",
|
|
1437
|
+
"title": "SD3.5 Simple",
|
|
1175
1438
|
"mediaType": "image",
|
|
1176
1439
|
"mediaSubtype": "webp",
|
|
1177
|
-
"
|
|
1178
|
-
"
|
|
1179
|
-
"
|
|
1440
|
+
"description": "Generar imágenes usando SD 3.5.",
|
|
1441
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
|
|
1442
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1443
|
+
"models": ["SD3.5", "Stability"],
|
|
1180
1444
|
"date": "2025-03-01",
|
|
1181
|
-
"size":
|
|
1445
|
+
"size": 14935748772,
|
|
1446
|
+
"vram": 14935748772,
|
|
1447
|
+
"usage": 490
|
|
1182
1448
|
},
|
|
1183
1449
|
{
|
|
1184
|
-
"name": "
|
|
1185
|
-
"title": "
|
|
1186
|
-
"description": "Generar videos a partir de imágenes usando Wan 2.1.",
|
|
1450
|
+
"name": "sd3.5_large_canny_controlnet_example",
|
|
1451
|
+
"title": "ControlNet Canny grande SD3.5",
|
|
1187
1452
|
"mediaType": "image",
|
|
1188
1453
|
"mediaSubtype": "webp",
|
|
1189
|
-
"
|
|
1190
|
-
"
|
|
1191
|
-
"
|
|
1454
|
+
"description": "Generar imágenes guiadas por detección de bordes usando ControlNet Canny SD 3.5.",
|
|
1455
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1456
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
1457
|
+
"tags": ["Imagen a imagen", "Imagen", "ControlNet"],
|
|
1458
|
+
"models": ["SD3.5", "Stability"],
|
|
1192
1459
|
"date": "2025-03-01",
|
|
1193
|
-
"size":
|
|
1460
|
+
"size": 23590107873,
|
|
1461
|
+
"vram": 23590107873,
|
|
1462
|
+
"usage": 113
|
|
1194
1463
|
},
|
|
1195
1464
|
{
|
|
1196
|
-
"name": "
|
|
1197
|
-
"title": "
|
|
1198
|
-
"description": "Generar videos desde fotogramas de inicio y fin usando inpainting de Wan 2.1.",
|
|
1465
|
+
"name": "sd3.5_large_depth",
|
|
1466
|
+
"title": "Profundidad grande SD3.5",
|
|
1199
1467
|
"mediaType": "image",
|
|
1200
1468
|
"mediaSubtype": "webp",
|
|
1201
|
-
"
|
|
1202
|
-
"
|
|
1203
|
-
"
|
|
1204
|
-
"
|
|
1205
|
-
"
|
|
1469
|
+
"description": "Generar imágenes guiadas por información de profundidad usando SD 3.5.",
|
|
1470
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1471
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
1472
|
+
"tags": ["Imagen a imagen", "Imagen", "ControlNet"],
|
|
1473
|
+
"models": ["SD3.5", "Stability"],
|
|
1474
|
+
"date": "2025-03-01",
|
|
1475
|
+
"size": 23590107873,
|
|
1476
|
+
"vram": 23590107873,
|
|
1477
|
+
"usage": 95
|
|
1206
1478
|
},
|
|
1207
1479
|
{
|
|
1208
|
-
"name": "
|
|
1209
|
-
"title": "
|
|
1210
|
-
"description": "Generar videos guiados por controles de pose, profundidad y borde usando ControlNet de Wan 2.1.",
|
|
1480
|
+
"name": "sd3.5_large_blur",
|
|
1481
|
+
"title": "Desenfoque grande SD3.5",
|
|
1211
1482
|
"mediaType": "image",
|
|
1212
1483
|
"mediaSubtype": "webp",
|
|
1484
|
+
"description": "Generar imágenes guiadas por imágenes de referencia desenfocadas usando SD 3.5.",
|
|
1213
1485
|
"thumbnailVariant": "hoverDissolve",
|
|
1214
|
-
"tutorialUrl": "https://
|
|
1215
|
-
"tags": ["
|
|
1216
|
-
"models": ["
|
|
1217
|
-
"date": "2025-
|
|
1218
|
-
"size":
|
|
1486
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
|
|
1487
|
+
"tags": ["Imagen a imagen", "Imagen"],
|
|
1488
|
+
"models": ["SD3.5", "Stability"],
|
|
1489
|
+
"date": "2025-03-01",
|
|
1490
|
+
"size": 23590107873,
|
|
1491
|
+
"vram": 23590107873,
|
|
1492
|
+
"usage": 38
|
|
1219
1493
|
},
|
|
1220
1494
|
{
|
|
1221
|
-
"name": "
|
|
1222
|
-
"title": "
|
|
1223
|
-
"description": "Generar videos controlando primer y último fotogramas usando FLF2V de Wan 2.1.",
|
|
1224
|
-
"mediaType": "image",
|
|
1225
|
-
"mediaSubtype": "webp",
|
|
1226
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
|
|
1227
|
-
"tags": ["FLF2V", "Video"],
|
|
1228
|
-
"models": ["Wan2.1", "Wan"],
|
|
1229
|
-
"date": "2025-04-15",
|
|
1230
|
-
"size": 41049149932
|
|
1231
|
-
},
|
|
1232
|
-
{
|
|
1233
|
-
"name": "ltxv_text_to_video",
|
|
1234
|
-
"title": "Texto a video LTXV",
|
|
1235
|
-
"mediaType": "image",
|
|
1236
|
-
"mediaSubtype": "webp",
|
|
1237
|
-
"description": "Generar videos a partir de indicaciones de texto.",
|
|
1238
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
1239
|
-
"tags": ["Texto a video", "Video"],
|
|
1240
|
-
"models": ["LTXV"],
|
|
1241
|
-
"date": "2025-03-01",
|
|
1242
|
-
"size": 19155554140
|
|
1243
|
-
},
|
|
1244
|
-
{
|
|
1245
|
-
"name": "ltxv_image_to_video",
|
|
1246
|
-
"title": "Imagen a video LTXV",
|
|
1495
|
+
"name": "sdxl_simple_example",
|
|
1496
|
+
"title": "SDXL Simple",
|
|
1247
1497
|
"mediaType": "image",
|
|
1248
1498
|
"mediaSubtype": "webp",
|
|
1249
|
-
"description": "Generar
|
|
1250
|
-
"tutorialUrl": "https://
|
|
1251
|
-
"tags": ["
|
|
1252
|
-
"models": ["
|
|
1499
|
+
"description": "Generar imágenes de alta calidad usando SDXL.",
|
|
1500
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
1501
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1502
|
+
"models": ["SDXL", "Stability"],
|
|
1253
1503
|
"date": "2025-03-01",
|
|
1254
|
-
"size":
|
|
1504
|
+
"size": 13013750907,
|
|
1505
|
+
"vram": 13013750907,
|
|
1506
|
+
"usage": 278
|
|
1255
1507
|
},
|
|
1256
1508
|
{
|
|
1257
|
-
"name": "
|
|
1258
|
-
"title": "
|
|
1509
|
+
"name": "sdxl_refiner_prompt_example",
|
|
1510
|
+
"title": "Refinador de indicaciones SDXL",
|
|
1259
1511
|
"mediaType": "image",
|
|
1260
1512
|
"mediaSubtype": "webp",
|
|
1261
|
-
"description": "
|
|
1262
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/
|
|
1263
|
-
"tags": ["Texto a
|
|
1264
|
-
"models": ["
|
|
1513
|
+
"description": "Mejorar imágenes SDXL usando modelos refinadores.",
|
|
1514
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
|
|
1515
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1516
|
+
"models": ["SDXL", "Stability"],
|
|
1265
1517
|
"date": "2025-03-01",
|
|
1266
|
-
"size":
|
|
1518
|
+
"size": 13013750907,
|
|
1519
|
+
"vram": 13013750907,
|
|
1520
|
+
"usage": 59
|
|
1267
1521
|
},
|
|
1268
1522
|
{
|
|
1269
|
-
"name": "
|
|
1270
|
-
"title": "
|
|
1523
|
+
"name": "sdxl_revision_text_prompts",
|
|
1524
|
+
"title": "Indicaciones de texto de revisión SDXL",
|
|
1271
1525
|
"mediaType": "image",
|
|
1272
1526
|
"mediaSubtype": "webp",
|
|
1273
|
-
"description": "Generar
|
|
1274
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/
|
|
1275
|
-
"tags": ["Texto a
|
|
1276
|
-
"models": ["
|
|
1527
|
+
"description": "Generar imágenes transfiriendo conceptos de imágenes de referencia usando Revisión SDXL.",
|
|
1528
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
|
|
1529
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1530
|
+
"models": ["SDXL", "Stability"],
|
|
1277
1531
|
"date": "2025-03-01",
|
|
1278
|
-
"size":
|
|
1532
|
+
"size": 10630044058,
|
|
1533
|
+
"vram": 10630044058,
|
|
1534
|
+
"usage": 67
|
|
1279
1535
|
},
|
|
1280
1536
|
{
|
|
1281
|
-
"name": "
|
|
1282
|
-
"title": "
|
|
1537
|
+
"name": "sdxlturbo_example",
|
|
1538
|
+
"title": "SDXL Turbo",
|
|
1283
1539
|
"mediaType": "image",
|
|
1284
1540
|
"mediaSubtype": "webp",
|
|
1285
|
-
"description": "Generar
|
|
1286
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/
|
|
1287
|
-
"tags": ["
|
|
1288
|
-
"models": ["
|
|
1541
|
+
"description": "Generar imágenes en un solo paso usando SDXL Turbo.",
|
|
1542
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
|
|
1543
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
1544
|
+
"models": ["SDXL", "Stability"],
|
|
1289
1545
|
"date": "2025-03-01",
|
|
1290
|
-
"size":
|
|
1546
|
+
"size": 6936372183,
|
|
1547
|
+
"vram": 6936372183,
|
|
1548
|
+
"usage": 452
|
|
1291
1549
|
},
|
|
1292
1550
|
{
|
|
1293
|
-
"name": "
|
|
1294
|
-
"title": "
|
|
1551
|
+
"name": "image_lotus_depth_v1_1",
|
|
1552
|
+
"title": "Profundidad Lotus",
|
|
1295
1553
|
"mediaType": "image",
|
|
1296
1554
|
"mediaSubtype": "webp",
|
|
1297
|
-
"
|
|
1298
|
-
"
|
|
1299
|
-
"tags": ["Texto a
|
|
1300
|
-
"models": ["
|
|
1301
|
-
"date": "2025-
|
|
1302
|
-
"size":
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
},
|
|
1306
|
-
{
|
|
1307
|
-
"moduleName": "default",
|
|
1308
|
-
"type": "audio",
|
|
1309
|
-
"category": "GENERATION TYPE",
|
|
1310
|
-
"icon": "icon-[lucide--volume-2]",
|
|
1311
|
-
"title": "Audio",
|
|
1312
|
-
"templates": [
|
|
1313
|
-
{
|
|
1314
|
-
"name": "audio_stable_audio_example",
|
|
1315
|
-
"title": "Audio Estable",
|
|
1316
|
-
"mediaType": "audio",
|
|
1317
|
-
"mediaSubtype": "mp3",
|
|
1318
|
-
"description": "Generar audio a partir de indicaciones de texto usando Stable Audio.",
|
|
1319
|
-
"tags": ["Texto a audio", "Audio"],
|
|
1320
|
-
"models": ["Stable Audio", "Stability"],
|
|
1321
|
-
"date": "2025-03-01",
|
|
1322
|
-
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/",
|
|
1323
|
-
"size": 5744518758
|
|
1324
|
-
},
|
|
1325
|
-
{
|
|
1326
|
-
"name": "audio_ace_step_1_t2a_instrumentals",
|
|
1327
|
-
"title": "Música instrumental ACE-Step v1 texto a audio",
|
|
1328
|
-
"mediaType": "audio",
|
|
1329
|
-
"mediaSubtype": "mp3",
|
|
1330
|
-
"description": "Generar música instrumental a partir de indicaciones de texto usando ACE-Step v1.",
|
|
1331
|
-
"tags": ["Texto a audio", "Audio"],
|
|
1332
|
-
"models": ["ACE-Step"],
|
|
1333
|
-
"date": "2025-03-01",
|
|
1334
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1335
|
-
"size": 7698728878
|
|
1336
|
-
},
|
|
1337
|
-
{
|
|
1338
|
-
"name": "audio_ace_step_1_t2a_song",
|
|
1339
|
-
"title": "Canción ACE Step v1 texto a audio",
|
|
1340
|
-
"mediaType": "audio",
|
|
1341
|
-
"mediaSubtype": "mp3",
|
|
1342
|
-
"description": "Generar canciones con voces a partir de indicaciones de texto usando ACE-Step v1, soportando personalización multilingüe y de estilo.",
|
|
1343
|
-
"tags": ["Texto a audio", "Audio"],
|
|
1344
|
-
"models": ["ACE-Step"],
|
|
1345
|
-
"date": "2025-03-01",
|
|
1346
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1347
|
-
"size": 7698728878
|
|
1348
|
-
},
|
|
1349
|
-
{
|
|
1350
|
-
"name": "audio_ace_step_1_m2m_editing",
|
|
1351
|
-
"title": "Edición M2M ACE Step v1",
|
|
1352
|
-
"mediaType": "audio",
|
|
1353
|
-
"mediaSubtype": "mp3",
|
|
1354
|
-
"description": "Editar canciones existentes para cambiar estilo y letras usando ACE-Step v1 M2M.",
|
|
1355
|
-
"tags": ["Edición de audio", "Audio"],
|
|
1356
|
-
"models": ["ACE-Step"],
|
|
1357
|
-
"date": "2025-03-01",
|
|
1358
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
1359
|
-
"size": 7698728878
|
|
1555
|
+
"thumbnailVariant": "compareSlider",
|
|
1556
|
+
"description": "Ejecutar Profundidad Lotus en ComfyUI para estimación monocromática eficiente sin entrenamiento previo con alta retención de detalles.",
|
|
1557
|
+
"tags": ["Imagen", "Texto a imagen"],
|
|
1558
|
+
"models": ["SD1.5", "Stability"],
|
|
1559
|
+
"date": "2025-05-21",
|
|
1560
|
+
"size": 2072321720,
|
|
1561
|
+
"vram": 2072321720,
|
|
1562
|
+
"usage": 79
|
|
1360
1563
|
}
|
|
1361
1564
|
]
|
|
1362
1565
|
},
|
|
1363
1566
|
{
|
|
1364
1567
|
"moduleName": "default",
|
|
1365
|
-
"type": "
|
|
1366
|
-
"category": "
|
|
1367
|
-
"icon": "icon-[lucide--
|
|
1368
|
-
"title": "
|
|
1568
|
+
"type": "video",
|
|
1569
|
+
"category": "Tipo de generación",
|
|
1570
|
+
"icon": "icon-[lucide--film]",
|
|
1571
|
+
"title": "Vídeo",
|
|
1369
1572
|
"templates": [
|
|
1370
1573
|
{
|
|
1371
|
-
"name": "
|
|
1372
|
-
"title": "
|
|
1574
|
+
"name": "video_wan2_2_14B_t2v",
|
|
1575
|
+
"title": "Texto a video Wan 2.2 14B",
|
|
1576
|
+
"description": "Generar videos de alta calidad a partir de indicaciones de texto con control estético cinematográfico y generación de movimiento dinámico usando Wan 2.2.",
|
|
1373
1577
|
"mediaType": "image",
|
|
1374
1578
|
"mediaSubtype": "webp",
|
|
1375
|
-
"
|
|
1376
|
-
"tags": ["
|
|
1377
|
-
"models": ["
|
|
1378
|
-
"date": "2025-
|
|
1379
|
-
"
|
|
1380
|
-
"
|
|
1579
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
1580
|
+
"tags": ["Texto a video", "Video"],
|
|
1581
|
+
"models": ["Wan2.2", "Wan"],
|
|
1582
|
+
"date": "2025-07-29",
|
|
1583
|
+
"size": 38031935406,
|
|
1584
|
+
"vram": 38031935406,
|
|
1585
|
+
"usage": 2369
|
|
1381
1586
|
},
|
|
1382
1587
|
{
|
|
1383
|
-
"name": "
|
|
1384
|
-
"title": "
|
|
1588
|
+
"name": "video_wan2_2_14B_i2v",
|
|
1589
|
+
"title": "Imagen a video Wan 2.2 14B",
|
|
1590
|
+
"description": "Transformar imágenes estáticas en videos dinámicos con control de movimiento preciso y preservación de estilo usando Wan 2.2.",
|
|
1385
1591
|
"mediaType": "image",
|
|
1386
1592
|
"mediaSubtype": "webp",
|
|
1387
|
-
"
|
|
1388
|
-
"
|
|
1389
|
-
"
|
|
1390
|
-
"
|
|
1391
|
-
"
|
|
1392
|
-
"size":
|
|
1593
|
+
"thumbnailVariant": "hoverDissolve",
|
|
1594
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
1595
|
+
"tags": ["Imagen a video", "Video"],
|
|
1596
|
+
"models": ["Wan2.2", "Wan"],
|
|
1597
|
+
"date": "2025-07-29",
|
|
1598
|
+
"size": 38031935406,
|
|
1599
|
+
"vram": 38031935406,
|
|
1600
|
+
"usage": 10317
|
|
1393
1601
|
},
|
|
1394
1602
|
{
|
|
1395
|
-
"name": "
|
|
1396
|
-
"title": "
|
|
1603
|
+
"name": "video_wan2_2_14B_flf2v",
|
|
1604
|
+
"title": "Primer-Último fotograma a video Wan 2.2 14B",
|
|
1605
|
+
"description": "Generar transiciones de video suaves definiendo fotogramas de inicio y fin.",
|
|
1397
1606
|
"mediaType": "image",
|
|
1398
1607
|
"mediaSubtype": "webp",
|
|
1399
|
-
"description": "Generar modelos 3D a partir de múltiples vistas usando Hunyuan3D 2.0 MV.",
|
|
1400
|
-
"tags": ["3D", "Imagen a 3D"],
|
|
1401
|
-
"models": ["Hunyuan3D", "Tencent"],
|
|
1402
|
-
"date": "2025-03-01",
|
|
1403
|
-
"tutorialUrl": "",
|
|
1404
1608
|
"thumbnailVariant": "hoverDissolve",
|
|
1405
|
-
"
|
|
1609
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
1610
|
+
"tags": ["FLF2V", "Video"],
|
|
1611
|
+
"models": ["Wan2.2", "Wan"],
|
|
1612
|
+
"date": "2025-08-02",
|
|
1613
|
+
"size": 38031935406,
|
|
1614
|
+
"vram": 38031935406,
|
|
1615
|
+
"usage": 1585
|
|
1406
1616
|
},
|
|
1407
1617
|
{
|
|
1408
|
-
"name": "
|
|
1409
|
-
"title": "
|
|
1618
|
+
"name": "video_wan2_2_14B_animate",
|
|
1619
|
+
"title": "Wan2.2 Animate animación y reemplazo de personajes",
|
|
1620
|
+
"description": "Marco unificado de animación y reemplazo de personajes con replicación precisa de movimiento y expresión。",
|
|
1410
1621
|
"mediaType": "image",
|
|
1411
1622
|
"mediaSubtype": "webp",
|
|
1412
|
-
"
|
|
1413
|
-
"tags": ["Imagen a
|
|
1414
|
-
"models": ["
|
|
1415
|
-
"date": "2025-
|
|
1416
|
-
"
|
|
1417
|
-
"
|
|
1418
|
-
"
|
|
1419
|
-
}
|
|
1420
|
-
]
|
|
1421
|
-
},
|
|
1422
|
-
{
|
|
1423
|
-
"moduleName": "default",
|
|
1424
|
-
"type": "image",
|
|
1425
|
-
"category": "CLOSED SOURCE MODELS",
|
|
1426
|
-
"icon": "icon-[lucide--hand-coins]",
|
|
1427
|
-
"title": "Image API",
|
|
1428
|
-
"templates": [
|
|
1623
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-animate",
|
|
1624
|
+
"tags": ["Video", "Imagen a video"],
|
|
1625
|
+
"models": ["Wan2.2", "Wan"],
|
|
1626
|
+
"date": "2025-09-22",
|
|
1627
|
+
"size": 27417997476,
|
|
1628
|
+
"vram": 27417997476,
|
|
1629
|
+
"usage": 2141
|
|
1630
|
+
},
|
|
1429
1631
|
{
|
|
1430
|
-
"name": "
|
|
1431
|
-
"title": "
|
|
1432
|
-
"description": "
|
|
1632
|
+
"name": "video_hunyuan_video_1.5_720p_t2v",
|
|
1633
|
+
"title": "Hunyuan Video 1.5 Texto a Video",
|
|
1634
|
+
"description": "Genera vídeos 720p de alta calidad a partir de indicaciones de texto, con control cinematográfico de cámara, expresiones emocionales y simulación física. Soporta varios estilos, incluyendo realista, anime y renderizado de texto 3D.",
|
|
1433
1635
|
"mediaType": "image",
|
|
1434
1636
|
"mediaSubtype": "webp",
|
|
1435
|
-
"
|
|
1436
|
-
"
|
|
1437
|
-
"models": ["Gemini-3-pro-image-preview", "nano-banana", "Google"],
|
|
1637
|
+
"tags": ["Texto a video", "Video"],
|
|
1638
|
+
"models": ["Hunyuan Video"],
|
|
1438
1639
|
"date": "2025-11-21",
|
|
1439
|
-
"
|
|
1440
|
-
"
|
|
1441
|
-
"
|
|
1640
|
+
"size": 45384919416,
|
|
1641
|
+
"vram": 45384919416,
|
|
1642
|
+
"usage": 451
|
|
1442
1643
|
},
|
|
1443
1644
|
{
|
|
1444
|
-
"name": "
|
|
1445
|
-
"title": "
|
|
1446
|
-
"description": "
|
|
1645
|
+
"name": "video_hunyuan_video_1.5_720p_i2v",
|
|
1646
|
+
"title": "Hunyuan Video 1.5 Imagen a Video",
|
|
1647
|
+
"description": "Anima imágenes fijas y conviértelas en videos dinámicos con movimiento preciso y control de cámara. Mantiene la coherencia visual mientras da vida a fotos e ilustraciones con movimientos suaves y naturales.",
|
|
1447
1648
|
"mediaType": "image",
|
|
1448
1649
|
"mediaSubtype": "webp",
|
|
1449
|
-
"tags": ["
|
|
1450
|
-
"models": ["
|
|
1650
|
+
"tags": ["Imagen a video", "Video"],
|
|
1651
|
+
"models": ["Hunyuan Video"],
|
|
1451
1652
|
"date": "2025-11-21",
|
|
1452
|
-
"
|
|
1453
|
-
"
|
|
1454
|
-
"
|
|
1653
|
+
"size": 45384919416,
|
|
1654
|
+
"vram": 45384919416,
|
|
1655
|
+
"usage": 2150
|
|
1455
1656
|
},
|
|
1456
1657
|
{
|
|
1457
|
-
"name": "
|
|
1458
|
-
"title": "
|
|
1459
|
-
"description": "
|
|
1658
|
+
"name": "video_kandinsky5_i2v",
|
|
1659
|
+
"title": "Kandinsky 5.0 Video Lite imagen a video",
|
|
1660
|
+
"description": "Un modelo 2B ligero que genera videos de alta calidad a partir de indicaciones en inglés y ruso.",
|
|
1460
1661
|
"mediaType": "image",
|
|
1461
1662
|
"mediaSubtype": "webp",
|
|
1462
|
-
"tags": ["
|
|
1463
|
-
"models": ["
|
|
1464
|
-
"date": "2025-09
|
|
1465
|
-
"
|
|
1466
|
-
"
|
|
1467
|
-
"
|
|
1663
|
+
"tags": ["Imagen a video", "Video"],
|
|
1664
|
+
"models": ["Kandinsky"],
|
|
1665
|
+
"date": "2025-12-09",
|
|
1666
|
+
"size": 14710262988,
|
|
1667
|
+
"vram": 14710262988,
|
|
1668
|
+
"usage": 1243
|
|
1468
1669
|
},
|
|
1469
1670
|
{
|
|
1470
|
-
"name": "
|
|
1471
|
-
"title": "
|
|
1472
|
-
"description": "
|
|
1671
|
+
"name": "video_kandinsky5_t2v",
|
|
1672
|
+
"title": "Kandinsky 5.0 Video Lite texto a video",
|
|
1673
|
+
"description": "Un modelo 2B ligero que genera videos de alta calidad a partir de indicaciones en inglés y ruso.",
|
|
1473
1674
|
"mediaType": "image",
|
|
1474
1675
|
"mediaSubtype": "webp",
|
|
1475
|
-
"tags": ["
|
|
1476
|
-
"models": ["
|
|
1477
|
-
"date": "2025-
|
|
1478
|
-
"
|
|
1479
|
-
"
|
|
1480
|
-
"
|
|
1676
|
+
"tags": ["Texto a video", "Video"],
|
|
1677
|
+
"models": ["Kandinsky"],
|
|
1678
|
+
"date": "2025-12-09",
|
|
1679
|
+
"size": 14710262988,
|
|
1680
|
+
"vram": 14710262988,
|
|
1681
|
+
"usage": 556
|
|
1481
1682
|
},
|
|
1482
1683
|
{
|
|
1483
|
-
"name": "
|
|
1484
|
-
"title": "
|
|
1485
|
-
"description": "
|
|
1684
|
+
"name": "api_kling2_6_i2v",
|
|
1685
|
+
"title": "Kling2.6: Convertir imágenes en video con audio",
|
|
1686
|
+
"description": "Convierte imágenes estáticas en videos dinámicos con diálogos, canto, efectos y audio ambiental sincronizados",
|
|
1486
1687
|
"mediaType": "image",
|
|
1487
1688
|
"mediaSubtype": "webp",
|
|
1488
|
-
"tags": ["
|
|
1489
|
-
"models": ["
|
|
1490
|
-
"date": "2025-
|
|
1491
|
-
"
|
|
1689
|
+
"tags": ["Imagen a video", "Video", "API", "Audio"],
|
|
1690
|
+
"models": ["Kling"],
|
|
1691
|
+
"date": "2025-12-22",
|
|
1692
|
+
"openSource": false,
|
|
1492
1693
|
"size": 0,
|
|
1493
1694
|
"vram": 0
|
|
1494
1695
|
},
|
|
1495
1696
|
{
|
|
1496
|
-
"name": "
|
|
1497
|
-
"title": "
|
|
1498
|
-
"description": "
|
|
1697
|
+
"name": "api_kling2_6_t2v",
|
|
1698
|
+
"title": "Kling2.6: Generar videos narrativos con audio desde texto",
|
|
1699
|
+
"description": "Convierte tus historias en videos con diálogo, música, efectos y audio ambiental",
|
|
1499
1700
|
"mediaType": "image",
|
|
1500
1701
|
"mediaSubtype": "webp",
|
|
1501
|
-
"
|
|
1502
|
-
"
|
|
1503
|
-
"
|
|
1504
|
-
"
|
|
1505
|
-
"OpenSource": false,
|
|
1702
|
+
"tags": ["Texto a video", "Video", "API", "Audio"],
|
|
1703
|
+
"models": ["Kling"],
|
|
1704
|
+
"date": "2025-12-22",
|
|
1705
|
+
"openSource": false,
|
|
1506
1706
|
"size": 0,
|
|
1507
1707
|
"vram": 0
|
|
1508
1708
|
},
|
|
1509
1709
|
{
|
|
1510
|
-
"name": "
|
|
1511
|
-
"title": "
|
|
1512
|
-
"description": "
|
|
1710
|
+
"name": "api_openai_sora_video",
|
|
1711
|
+
"title": "Sora 2: Texto e Imagen a Video",
|
|
1712
|
+
"description": "Generación de video Sora-2 y Sora-2 Pro de OpenAI con audio sincronizado.",
|
|
1513
1713
|
"mediaType": "image",
|
|
1514
1714
|
"mediaSubtype": "webp",
|
|
1515
|
-
"
|
|
1516
|
-
"
|
|
1517
|
-
"
|
|
1518
|
-
"
|
|
1519
|
-
"date": "2025-05-29",
|
|
1520
|
-
"OpenSource": false,
|
|
1715
|
+
"tags": ["Imagen a video", "Texto a video", "API"],
|
|
1716
|
+
"models": ["OpenAI"],
|
|
1717
|
+
"date": "2025-10-08",
|
|
1718
|
+
"openSource": false,
|
|
1521
1719
|
"size": 0,
|
|
1522
|
-
"vram": 0
|
|
1720
|
+
"vram": 0,
|
|
1721
|
+
"usage": 765
|
|
1523
1722
|
},
|
|
1524
1723
|
{
|
|
1525
|
-
"name": "
|
|
1526
|
-
"title": "
|
|
1527
|
-
"description": "
|
|
1724
|
+
"name": "api_veo3",
|
|
1725
|
+
"title": "Veo3: Imagen a video",
|
|
1726
|
+
"description": "Generar videos de 8 segundos de alta calidad a partir de indicaciones de texto o imágenes usando la API avanzada Veo 3 de Google. Cuenta con generación de audio, mejora de indicaciones y opciones duales de modelo para velocidad o calidad.",
|
|
1528
1727
|
"mediaType": "image",
|
|
1529
1728
|
"mediaSubtype": "webp",
|
|
1530
|
-
"
|
|
1531
|
-
"
|
|
1532
|
-
"
|
|
1533
|
-
"
|
|
1534
|
-
"
|
|
1535
|
-
"OpenSource": false,
|
|
1729
|
+
"tags": ["Imagen a video", "Texto a video", "API"],
|
|
1730
|
+
"models": ["Veo", "Google"],
|
|
1731
|
+
"date": "2025-03-01",
|
|
1732
|
+
"tutorialUrl": "",
|
|
1733
|
+
"openSource": false,
|
|
1536
1734
|
"size": 0,
|
|
1537
|
-
"vram": 0
|
|
1735
|
+
"vram": 0,
|
|
1736
|
+
"usage": 491
|
|
1538
1737
|
},
|
|
1539
1738
|
{
|
|
1540
|
-
"name": "
|
|
1541
|
-
"title": "
|
|
1542
|
-
"description": "
|
|
1739
|
+
"name": "api_topaz_video_enhance",
|
|
1740
|
+
"title": "Mejora de video Topaz",
|
|
1741
|
+
"description": "Mejora vídeos con Topaz AI. Permite aumentar la resolución usando el modelo Starlight (Astra) Fast e interpolar fotogramas con el modelo apo-8.",
|
|
1543
1742
|
"mediaType": "image",
|
|
1544
1743
|
"mediaSubtype": "webp",
|
|
1545
1744
|
"thumbnailVariant": "compareSlider",
|
|
1546
|
-
"
|
|
1547
|
-
"
|
|
1548
|
-
"
|
|
1549
|
-
"
|
|
1550
|
-
"OpenSource": false,
|
|
1745
|
+
"tags": ["Video", "API", "Mejorar"],
|
|
1746
|
+
"models": ["Topaz"],
|
|
1747
|
+
"date": "2025-11-25",
|
|
1748
|
+
"openSource": false,
|
|
1551
1749
|
"size": 0,
|
|
1552
|
-
"vram": 0
|
|
1750
|
+
"vram": 0,
|
|
1751
|
+
"usage": 471
|
|
1553
1752
|
},
|
|
1554
1753
|
{
|
|
1555
|
-
"name": "
|
|
1556
|
-
"title": "
|
|
1557
|
-
"description": "
|
|
1754
|
+
"name": "api_veo2_i2v",
|
|
1755
|
+
"title": "Veo2: Imagen a video",
|
|
1756
|
+
"description": "Generar videos a partir de imágenes usando la API de Google Veo2.",
|
|
1558
1757
|
"mediaType": "image",
|
|
1559
1758
|
"mediaSubtype": "webp",
|
|
1560
|
-
"tags": ["
|
|
1561
|
-
"models": ["
|
|
1562
|
-
"date": "2025-
|
|
1563
|
-
"
|
|
1759
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
1760
|
+
"models": ["Veo", "Google"],
|
|
1761
|
+
"date": "2025-03-01",
|
|
1762
|
+
"tutorialUrl": "",
|
|
1763
|
+
"openSource": false,
|
|
1564
1764
|
"size": 0,
|
|
1565
|
-
"vram": 0
|
|
1765
|
+
"vram": 0,
|
|
1766
|
+
"usage": 61
|
|
1566
1767
|
},
|
|
1567
1768
|
{
|
|
1568
|
-
"name": "
|
|
1569
|
-
"title": "
|
|
1570
|
-
"description": "
|
|
1769
|
+
"name": "api_wan2_6_t2v",
|
|
1770
|
+
"title": "Wan2.5: Texto a Video",
|
|
1771
|
+
"description": "Genera videos con audio sincronizado, movimiento mejorado y calidad superior.",
|
|
1571
1772
|
"mediaType": "image",
|
|
1572
1773
|
"mediaSubtype": "webp",
|
|
1573
|
-
"
|
|
1574
|
-
"
|
|
1575
|
-
"
|
|
1576
|
-
"
|
|
1577
|
-
"
|
|
1774
|
+
"tags": ["Texto a video", "Video", "API"],
|
|
1775
|
+
"models": ["Wan2.6", "Wan"],
|
|
1776
|
+
"date": "2025-12-20",
|
|
1777
|
+
"tutorialUrl": "",
|
|
1778
|
+
"openSource": false,
|
|
1578
1779
|
"size": 0,
|
|
1579
1780
|
"vram": 0
|
|
1580
1781
|
},
|
|
1581
1782
|
{
|
|
1582
|
-
"name": "
|
|
1583
|
-
"title": "
|
|
1584
|
-
"description": "
|
|
1783
|
+
"name": "api_wan2_6_i2v",
|
|
1784
|
+
"title": "Wan2.6: Image to Video",
|
|
1785
|
+
"description": "Transform images into high-quality videos with enhanced image quality, smoother motion, 1080P resolution support, and natural movement generation for professional results.",
|
|
1585
1786
|
"mediaType": "image",
|
|
1586
1787
|
"mediaSubtype": "webp",
|
|
1587
|
-
"
|
|
1588
|
-
"
|
|
1589
|
-
"
|
|
1590
|
-
"
|
|
1591
|
-
"
|
|
1788
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
1789
|
+
"models": ["Wan2.6", "Wan"],
|
|
1790
|
+
"date": "2025-12-20",
|
|
1791
|
+
"tutorialUrl": "",
|
|
1792
|
+
"openSource": false,
|
|
1592
1793
|
"size": 0,
|
|
1593
1794
|
"vram": 0
|
|
1594
1795
|
},
|
|
1595
1796
|
{
|
|
1596
|
-
"name": "
|
|
1597
|
-
"title": "
|
|
1598
|
-
"description": "
|
|
1797
|
+
"name": "api_wan_text_to_video",
|
|
1798
|
+
"title": "Wan2.5: Text to Video",
|
|
1799
|
+
"description": "Generate videos with synchronized audio, enhanced motion, and superior quality.",
|
|
1599
1800
|
"mediaType": "image",
|
|
1600
1801
|
"mediaSubtype": "webp",
|
|
1601
|
-
"
|
|
1602
|
-
"
|
|
1603
|
-
"
|
|
1604
|
-
"
|
|
1605
|
-
"
|
|
1802
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
1803
|
+
"models": ["Wan2.5", "Wan"],
|
|
1804
|
+
"date": "2025-09-27",
|
|
1805
|
+
"tutorialUrl": "",
|
|
1806
|
+
"openSource": false,
|
|
1606
1807
|
"size": 0,
|
|
1607
|
-
"vram": 0
|
|
1808
|
+
"vram": 0,
|
|
1809
|
+
"usage": 167
|
|
1608
1810
|
},
|
|
1609
1811
|
{
|
|
1610
|
-
"name": "
|
|
1611
|
-
"title": "
|
|
1612
|
-
"description": "
|
|
1812
|
+
"name": "api_wan_image_to_video",
|
|
1813
|
+
"title": "Wan2.5: Imagen a Video",
|
|
1814
|
+
"description": "Transforma imágenes en videos con audio sincronizado, movimiento mejorado y calidad superior.",
|
|
1613
1815
|
"mediaType": "image",
|
|
1614
1816
|
"mediaSubtype": "webp",
|
|
1615
|
-
"tags": ["
|
|
1616
|
-
"models": ["
|
|
1617
|
-
"date": "2025-
|
|
1618
|
-
"
|
|
1817
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
1818
|
+
"models": ["Wan2.5", "Wan"],
|
|
1819
|
+
"date": "2025-09-27",
|
|
1820
|
+
"tutorialUrl": "",
|
|
1821
|
+
"openSource": false,
|
|
1619
1822
|
"size": 0,
|
|
1620
|
-
"vram": 0
|
|
1823
|
+
"vram": 0,
|
|
1824
|
+
"usage": 1463
|
|
1621
1825
|
},
|
|
1622
1826
|
{
|
|
1623
|
-
"name": "
|
|
1624
|
-
"title": "
|
|
1625
|
-
"description": "
|
|
1827
|
+
"name": "api_kling_i2v",
|
|
1828
|
+
"title": "Kling: Imagen a video",
|
|
1829
|
+
"description": "Generar videos con excelente adherencia a las indicaciones para acciones, expresiones y movimientos de cámara usando Kling.",
|
|
1626
1830
|
"mediaType": "image",
|
|
1627
1831
|
"mediaSubtype": "webp",
|
|
1628
|
-
"tags": ["
|
|
1629
|
-
"models": ["
|
|
1832
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
1833
|
+
"models": ["Kling"],
|
|
1630
1834
|
"date": "2025-03-01",
|
|
1631
|
-
"
|
|
1835
|
+
"tutorialUrl": "",
|
|
1836
|
+
"openSource": false,
|
|
1632
1837
|
"size": 0,
|
|
1633
|
-
"vram": 0
|
|
1838
|
+
"vram": 0,
|
|
1839
|
+
"usage": 418
|
|
1634
1840
|
},
|
|
1635
1841
|
{
|
|
1636
|
-
"name": "
|
|
1637
|
-
"title": "
|
|
1638
|
-
"description": "
|
|
1842
|
+
"name": "api_kling_omni_edit_video",
|
|
1843
|
+
"title": "Kling: Omni Edit Video",
|
|
1844
|
+
"description": "Edita videos con comandos de lenguaje natural, con modo de referencia de video para generar rápidamente transferencias de estilo, adiciones de elementos y modificaciones de fondo de alta calidad.",
|
|
1639
1845
|
"mediaType": "image",
|
|
1846
|
+
"thumbnailVariant": "compareSlider",
|
|
1640
1847
|
"mediaSubtype": "webp",
|
|
1641
|
-
"tags": ["
|
|
1642
|
-
"models": ["
|
|
1643
|
-
"date": "2025-
|
|
1644
|
-
"
|
|
1848
|
+
"tags": ["Video", "API", "Edición de video", "Texto a video", "Imagen a video"],
|
|
1849
|
+
"models": ["Kling"],
|
|
1850
|
+
"date": "2025-12-02",
|
|
1851
|
+
"tutorialUrl": "",
|
|
1852
|
+
"openSource": false,
|
|
1645
1853
|
"size": 0,
|
|
1646
|
-
"vram": 0
|
|
1854
|
+
"vram": 0,
|
|
1855
|
+
"usage": 1007
|
|
1647
1856
|
},
|
|
1648
1857
|
{
|
|
1649
|
-
"name": "
|
|
1650
|
-
"title": "
|
|
1651
|
-
"description": "Generar
|
|
1858
|
+
"name": "api_kling_effects",
|
|
1859
|
+
"title": "Kling: Efectos de video",
|
|
1860
|
+
"description": "Generar videos dinámicos aplicando efectos visuales a imágenes usando Kling.",
|
|
1652
1861
|
"mediaType": "image",
|
|
1653
1862
|
"mediaSubtype": "webp",
|
|
1654
|
-
"tags": ["
|
|
1655
|
-
"models": ["
|
|
1863
|
+
"tags": ["Video", "API"],
|
|
1864
|
+
"models": ["Kling"],
|
|
1656
1865
|
"date": "2025-03-01",
|
|
1657
|
-
"
|
|
1866
|
+
"tutorialUrl": "",
|
|
1867
|
+
"openSource": false,
|
|
1658
1868
|
"size": 0,
|
|
1659
|
-
"vram": 0
|
|
1869
|
+
"vram": 0,
|
|
1870
|
+
"usage": 5
|
|
1660
1871
|
},
|
|
1661
1872
|
{
|
|
1662
|
-
"name": "
|
|
1663
|
-
"title": "
|
|
1664
|
-
"description": "Generar
|
|
1873
|
+
"name": "api_kling_flf",
|
|
1874
|
+
"title": "Kling: FLF2V",
|
|
1875
|
+
"description": "Generar videos controlando el primer y último fotograma.",
|
|
1665
1876
|
"mediaType": "image",
|
|
1666
|
-
"thumbnailVariant": "compareSlider",
|
|
1667
1877
|
"mediaSubtype": "webp",
|
|
1668
|
-
"tags": ["
|
|
1669
|
-
"models": ["
|
|
1878
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
1879
|
+
"models": ["Kling"],
|
|
1670
1880
|
"date": "2025-03-01",
|
|
1671
|
-
"
|
|
1881
|
+
"tutorialUrl": "",
|
|
1882
|
+
"openSource": false,
|
|
1672
1883
|
"size": 0,
|
|
1673
|
-
"vram": 0
|
|
1884
|
+
"vram": 0,
|
|
1885
|
+
"usage": 167
|
|
1674
1886
|
},
|
|
1675
1887
|
{
|
|
1676
|
-
"name": "
|
|
1677
|
-
"title": "
|
|
1678
|
-
"description": "Generar
|
|
1888
|
+
"name": "api_vidu_text_to_video",
|
|
1889
|
+
"title": "Vidu: Texto a video",
|
|
1890
|
+
"description": "Generar videos 1080p de alta calidad a partir de indicaciones de texto con control de amplitud de movimiento y duración ajustable usando el modelo AI avanzado de Vidu.",
|
|
1679
1891
|
"mediaType": "image",
|
|
1680
1892
|
"mediaSubtype": "webp",
|
|
1681
|
-
"tags": ["Texto a
|
|
1682
|
-
"models": ["
|
|
1683
|
-
"date": "2025-
|
|
1684
|
-
"
|
|
1893
|
+
"tags": ["Texto a video", "Video", "API"],
|
|
1894
|
+
"models": ["Vidu"],
|
|
1895
|
+
"date": "2025-08-23",
|
|
1896
|
+
"tutorialUrl": "",
|
|
1897
|
+
"openSource": false,
|
|
1685
1898
|
"size": 0,
|
|
1686
|
-
"vram": 0
|
|
1899
|
+
"vram": 0,
|
|
1900
|
+
"usage": 8
|
|
1687
1901
|
},
|
|
1688
1902
|
{
|
|
1689
|
-
"name": "
|
|
1690
|
-
"title": "
|
|
1691
|
-
"description": "Transformar imágenes
|
|
1903
|
+
"name": "api_vidu_image_to_video",
|
|
1904
|
+
"title": "Vidu: Imagen a video",
|
|
1905
|
+
"description": "Transformar imágenes estáticas en videos 1080p dinámicos con control de movimiento preciso y amplitud de movimiento personalizable usando Vidu.",
|
|
1692
1906
|
"mediaType": "image",
|
|
1693
|
-
"thumbnailVariant": "compareSlider",
|
|
1694
1907
|
"mediaSubtype": "webp",
|
|
1695
|
-
"tags": ["Imagen a
|
|
1696
|
-
"models": ["
|
|
1697
|
-
"date": "2025-
|
|
1698
|
-
"
|
|
1908
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
1909
|
+
"models": ["Vidu"],
|
|
1910
|
+
"date": "2025-08-23",
|
|
1911
|
+
"tutorialUrl": "",
|
|
1912
|
+
"openSource": false,
|
|
1699
1913
|
"size": 0,
|
|
1700
|
-
"vram": 0
|
|
1914
|
+
"vram": 0,
|
|
1915
|
+
"usage": 62
|
|
1701
1916
|
},
|
|
1702
1917
|
{
|
|
1703
|
-
"name": "
|
|
1704
|
-
"title": "
|
|
1705
|
-
"description": "Generar
|
|
1918
|
+
"name": "api_vidu_reference_to_video",
|
|
1919
|
+
"title": "Vidu: Referencia a video",
|
|
1920
|
+
"description": "Generar videos con sujetos consistentes usando múltiples imágenes de referencia (hasta 7) para continuidad de personaje y estilo a lo largo de la secuencia de video.",
|
|
1706
1921
|
"mediaType": "image",
|
|
1707
1922
|
"mediaSubtype": "webp",
|
|
1708
|
-
"tags": ["
|
|
1709
|
-
"models": ["
|
|
1710
|
-
"date": "2025-
|
|
1711
|
-
"
|
|
1923
|
+
"tags": ["Video", "Imagen a video", "API"],
|
|
1924
|
+
"models": ["Vidu"],
|
|
1925
|
+
"date": "2025-08-23",
|
|
1926
|
+
"tutorialUrl": "",
|
|
1927
|
+
"openSource": false,
|
|
1712
1928
|
"size": 0,
|
|
1713
|
-
"vram": 0
|
|
1929
|
+
"vram": 0,
|
|
1930
|
+
"usage": 69
|
|
1714
1931
|
},
|
|
1715
1932
|
{
|
|
1716
|
-
"name": "
|
|
1717
|
-
"title": "
|
|
1718
|
-
"description": "
|
|
1933
|
+
"name": "api_vidu_start_end_to_video",
|
|
1934
|
+
"title": "Vidu: Inicio fin a video",
|
|
1935
|
+
"description": "Crear transiciones de video suaves entre fotogramas de inicio y fin definidos con interpolación natural de movimiento y calidad visual consistente.",
|
|
1719
1936
|
"mediaType": "image",
|
|
1720
|
-
"thumbnailVariant": "compareSlider",
|
|
1721
1937
|
"mediaSubtype": "webp",
|
|
1722
|
-
"tags": ["
|
|
1723
|
-
"models": ["
|
|
1724
|
-
"date": "2025-
|
|
1725
|
-
"
|
|
1938
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
1939
|
+
"models": ["Vidu"],
|
|
1940
|
+
"date": "2025-08-23",
|
|
1941
|
+
"tutorialUrl": "",
|
|
1942
|
+
"openSource": false,
|
|
1726
1943
|
"size": 0,
|
|
1727
|
-
"vram": 0
|
|
1944
|
+
"vram": 0,
|
|
1945
|
+
"usage": 85
|
|
1728
1946
|
},
|
|
1729
1947
|
{
|
|
1730
|
-
"name": "
|
|
1731
|
-
"title": "
|
|
1732
|
-
"description": "
|
|
1948
|
+
"name": "api_bytedance_text_to_video",
|
|
1949
|
+
"title": "ByteDance: Texto a Video",
|
|
1950
|
+
"description": "Genera videos de alta calidad directamente desde prompts de texto usando el modelo Seedance de ByteDance. Compatible con múltiples resoluciones y relaciones de aspecto con movimiento natural y calidad cinematográfica.",
|
|
1733
1951
|
"mediaType": "image",
|
|
1734
1952
|
"mediaSubtype": "webp",
|
|
1735
|
-
"tags": ["
|
|
1736
|
-
"models": ["
|
|
1737
|
-
"date": "2025-
|
|
1738
|
-
"
|
|
1953
|
+
"tags": ["Video", "API", "Texto a video"],
|
|
1954
|
+
"models": ["ByteDance"],
|
|
1955
|
+
"date": "2025-10-6",
|
|
1956
|
+
"tutorialUrl": "",
|
|
1957
|
+
"openSource": false,
|
|
1739
1958
|
"size": 0,
|
|
1740
|
-
"vram": 0
|
|
1959
|
+
"vram": 0,
|
|
1960
|
+
"usage": 75
|
|
1741
1961
|
},
|
|
1742
1962
|
{
|
|
1743
|
-
"name": "
|
|
1744
|
-
"title": "
|
|
1745
|
-
"description": "
|
|
1963
|
+
"name": "api_bytedance_image_to_video",
|
|
1964
|
+
"title": "ByteDance: Imagen a Video",
|
|
1965
|
+
"description": "Transforma imágenes estáticas en videos dinámicos usando el modelo Seedance de ByteDance. Analiza la estructura de la imagen y genera movimiento natural con estilo visual consistente y secuencias de video coherentes.",
|
|
1746
1966
|
"mediaType": "image",
|
|
1747
1967
|
"mediaSubtype": "webp",
|
|
1748
|
-
"tags": ["
|
|
1749
|
-
"models": ["
|
|
1750
|
-
"date": "2025-
|
|
1751
|
-
"tutorialUrl": "
|
|
1752
|
-
"
|
|
1968
|
+
"tags": ["Video", "API", "Imagen a video"],
|
|
1969
|
+
"models": ["ByteDance"],
|
|
1970
|
+
"date": "2025-10-6",
|
|
1971
|
+
"tutorialUrl": "",
|
|
1972
|
+
"openSource": false,
|
|
1753
1973
|
"size": 0,
|
|
1754
|
-
"vram": 0
|
|
1974
|
+
"vram": 0,
|
|
1975
|
+
"usage": 2275
|
|
1755
1976
|
},
|
|
1756
1977
|
{
|
|
1757
|
-
"name": "
|
|
1758
|
-
"title": "
|
|
1759
|
-
"description": "
|
|
1978
|
+
"name": "api_bytedance_flf2v",
|
|
1979
|
+
"title": "ByteDance: Inicio-Fin a Video",
|
|
1980
|
+
"description": "Genera transiciones de video cinematográficas entre fotogramas de inicio y fin con movimiento fluido, consistencia de escena y acabado profesional usando el modelo Seedance de ByteDance.",
|
|
1760
1981
|
"mediaType": "image",
|
|
1761
1982
|
"mediaSubtype": "webp",
|
|
1762
|
-
"
|
|
1763
|
-
"
|
|
1764
|
-
"
|
|
1765
|
-
"
|
|
1766
|
-
"
|
|
1767
|
-
"OpenSource": false,
|
|
1983
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
1984
|
+
"models": ["ByteDance"],
|
|
1985
|
+
"date": "2025-10-6",
|
|
1986
|
+
"tutorialUrl": "",
|
|
1987
|
+
"openSource": false,
|
|
1768
1988
|
"size": 0,
|
|
1769
|
-
"vram": 0
|
|
1989
|
+
"vram": 0,
|
|
1990
|
+
"usage": 791
|
|
1770
1991
|
},
|
|
1771
1992
|
{
|
|
1772
|
-
"name": "
|
|
1773
|
-
"title": "
|
|
1774
|
-
"description": "
|
|
1993
|
+
"name": "video_wan2_2_14B_s2v",
|
|
1994
|
+
"title": "Wan2.2-S2V Generación de Video Impulsada por Audio",
|
|
1995
|
+
"description": "Transforma imágenes estáticas y audio en videos dinámicos con sincronización perfecta y generación de nivel por minuto.",
|
|
1775
1996
|
"mediaType": "image",
|
|
1776
1997
|
"mediaSubtype": "webp",
|
|
1777
|
-
"
|
|
1778
|
-
"tags": ["
|
|
1779
|
-
"models": ["
|
|
1780
|
-
"date": "2025-
|
|
1781
|
-
"
|
|
1782
|
-
"
|
|
1783
|
-
"
|
|
1784
|
-
"vram": 0
|
|
1998
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-s2v",
|
|
1999
|
+
"tags": ["Video"],
|
|
2000
|
+
"models": ["Wan2.2", "Wan"],
|
|
2001
|
+
"date": "2025-08-02",
|
|
2002
|
+
"size": 25254407700,
|
|
2003
|
+
"vram": 25254407700,
|
|
2004
|
+
"usage": 648
|
|
1785
2005
|
},
|
|
1786
2006
|
{
|
|
1787
|
-
"name": "
|
|
1788
|
-
"title": "
|
|
1789
|
-
"description": "
|
|
1790
|
-
"mediaType": "image",
|
|
1791
|
-
"mediaSubtype": "webp",
|
|
1792
|
-
"thumbnailVariant": "compareSlider",
|
|
1793
|
-
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1794
|
-
"models": ["GPT-Image-1", "OpenAI"],
|
|
1795
|
-
"date": "2025-03-01",
|
|
1796
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
|
|
1797
|
-
"OpenSource": false,
|
|
1798
|
-
"size": 0,
|
|
1799
|
-
"vram": 0
|
|
1800
|
-
},
|
|
1801
|
-
{
|
|
1802
|
-
"name": "api_openai_dall_e_2_t2i",
|
|
1803
|
-
"title": "OpenAI: Texto a imagen Dall-E 2",
|
|
1804
|
-
"description": "Generar imágenes a partir de indicaciones de texto usando la API de OpenAI Dall-E 2.",
|
|
1805
|
-
"mediaType": "image",
|
|
1806
|
-
"mediaSubtype": "webp",
|
|
1807
|
-
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1808
|
-
"models": ["Dall-E", "OpenAI"],
|
|
1809
|
-
"date": "2025-03-01",
|
|
1810
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
|
|
1811
|
-
"OpenSource": false,
|
|
1812
|
-
"size": 0,
|
|
1813
|
-
"vram": 0
|
|
1814
|
-
},
|
|
1815
|
-
{
|
|
1816
|
-
"name": "api_openai_dall_e_2_inpaint",
|
|
1817
|
-
"title": "OpenAI: Inpaint Dall-E 2",
|
|
1818
|
-
"description": "Editar imágenes usando inpainting con la API de OpenAI Dall-E 2.",
|
|
1819
|
-
"mediaType": "image",
|
|
1820
|
-
"mediaSubtype": "webp",
|
|
1821
|
-
"thumbnailVariant": "compareSlider",
|
|
1822
|
-
"tags": ["Inpaint", "Imagen", "API"],
|
|
1823
|
-
"models": ["Dall-E", "OpenAI"],
|
|
1824
|
-
"date": "2025-03-01",
|
|
1825
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
|
|
1826
|
-
"OpenSource": false,
|
|
1827
|
-
"size": 0,
|
|
1828
|
-
"vram": 0
|
|
1829
|
-
},
|
|
1830
|
-
{
|
|
1831
|
-
"name": "api_openai_dall_e_3_t2i",
|
|
1832
|
-
"title": "OpenAI: Texto a imagen Dall-E 3",
|
|
1833
|
-
"description": "Generar imágenes a partir de indicaciones de texto usando la API de OpenAI Dall-E 3.",
|
|
1834
|
-
"mediaType": "image",
|
|
1835
|
-
"mediaSubtype": "webp",
|
|
1836
|
-
"tags": ["Texto a imagen", "Imagen", "API"],
|
|
1837
|
-
"models": ["Dall-E", "OpenAI"],
|
|
1838
|
-
"date": "2025-03-01",
|
|
1839
|
-
"tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3",
|
|
1840
|
-
"OpenSource": false,
|
|
1841
|
-
"size": 0,
|
|
1842
|
-
"vram": 0
|
|
1843
|
-
}
|
|
1844
|
-
]
|
|
1845
|
-
},
|
|
1846
|
-
{
|
|
1847
|
-
"moduleName": "default",
|
|
1848
|
-
"type": "video",
|
|
1849
|
-
"category": "CLOSED SOURCE MODELS",
|
|
1850
|
-
"icon": "icon-[lucide--film]",
|
|
1851
|
-
"title": "Video API",
|
|
1852
|
-
"templates": [
|
|
1853
|
-
{
|
|
1854
|
-
"name": "api_openai_sora_video",
|
|
1855
|
-
"title": "Sora 2: Texto e Imagen a Video",
|
|
1856
|
-
"description": "Generación de video Sora-2 y Sora-2 Pro de OpenAI con audio sincronizado.",
|
|
1857
|
-
"mediaType": "image",
|
|
1858
|
-
"mediaSubtype": "webp",
|
|
1859
|
-
"tags": ["Imagen a video", "Texto a video", "API"],
|
|
1860
|
-
"models": ["OpenAI"],
|
|
1861
|
-
"date": "2025-10-08",
|
|
1862
|
-
"OpenSource": false,
|
|
1863
|
-
"size": 0,
|
|
1864
|
-
"vram": 0
|
|
1865
|
-
},
|
|
1866
|
-
{
|
|
1867
|
-
"name": "api_ltxv_text_to_video",
|
|
1868
|
-
"title": "LTX-2: Texto a vídeo",
|
|
1869
|
-
"description": "Genera vídeos de alta calidad a partir de indicaciones de texto usando Lightricks LTX-2 con audio sincronizado. Soporta hasta 4K de resolución a 50fps con modos Rápido, Pro y Ultra para diversas necesidades de producción.",
|
|
2007
|
+
"name": "api_ltxv_text_to_video",
|
|
2008
|
+
"title": "LTX-2: Texto a vídeo",
|
|
2009
|
+
"description": "Genera vídeos de alta calidad a partir de indicaciones de texto usando Lightricks LTX-2 con audio sincronizado. Soporta hasta 4K de resolución a 50fps con modos Rápido, Pro y Ultra para diversas necesidades de producción.",
|
|
1870
2010
|
"mediaType": "image",
|
|
1871
2011
|
"mediaSubtype": "webp",
|
|
1872
2012
|
"tags": ["Texto a video", "Video", "API"],
|
|
1873
2013
|
"models": ["LTX-2", "Lightricks"],
|
|
1874
2014
|
"date": "2025-10-28",
|
|
1875
|
-
"
|
|
2015
|
+
"openSource": false,
|
|
1876
2016
|
"size": 0,
|
|
1877
|
-
"vram": 0
|
|
2017
|
+
"vram": 0,
|
|
2018
|
+
"usage": 73
|
|
1878
2019
|
},
|
|
1879
2020
|
{
|
|
1880
2021
|
"name": "api_ltxv_image_to_video",
|
|
@@ -1885,191 +2026,55 @@
|
|
|
1885
2026
|
"tags": ["Imagen a video", "Video", "API"],
|
|
1886
2027
|
"models": ["LTX-2", "Lightricks"],
|
|
1887
2028
|
"date": "2025-10-28",
|
|
1888
|
-
"
|
|
1889
|
-
"size": 0,
|
|
1890
|
-
"vram": 0
|
|
1891
|
-
},
|
|
1892
|
-
{
|
|
1893
|
-
"name": "api_wan_text_to_video",
|
|
1894
|
-
"title": "Wan2.5: Texto a Video",
|
|
1895
|
-
"description": "Genera videos con audio sincronizado, movimiento mejorado y calidad superior.",
|
|
1896
|
-
"mediaType": "image",
|
|
1897
|
-
"mediaSubtype": "webp",
|
|
1898
|
-
"tags": ["Imagen a video", "Video", "API"],
|
|
1899
|
-
"models": ["Wan2.5", "Wan"],
|
|
1900
|
-
"date": "2025-09-27",
|
|
1901
|
-
"tutorialUrl": "",
|
|
1902
|
-
"OpenSource": false,
|
|
1903
|
-
"size": 0,
|
|
1904
|
-
"vram": 0
|
|
1905
|
-
},
|
|
1906
|
-
{
|
|
1907
|
-
"name": "api_wan_image_to_video",
|
|
1908
|
-
"title": "Wan2.5: Imagen a Video",
|
|
1909
|
-
"description": "Transforma imágenes en videos con audio sincronizado, movimiento mejorado y calidad superior.",
|
|
1910
|
-
"mediaType": "image",
|
|
1911
|
-
"mediaSubtype": "webp",
|
|
1912
|
-
"tags": ["Imagen a video", "Video", "API"],
|
|
1913
|
-
"models": ["Wan2.5", "Wan"],
|
|
1914
|
-
"date": "2025-09-27",
|
|
1915
|
-
"tutorialUrl": "",
|
|
1916
|
-
"OpenSource": false,
|
|
2029
|
+
"openSource": false,
|
|
1917
2030
|
"size": 0,
|
|
1918
|
-
"vram": 0
|
|
1919
|
-
|
|
1920
|
-
{
|
|
1921
|
-
"name": "api_kling_i2v",
|
|
1922
|
-
"title": "Kling: Imagen a video",
|
|
1923
|
-
"description": "Generar videos con excelente adherencia a las indicaciones para acciones, expresiones y movimientos de cámara usando Kling.",
|
|
1924
|
-
"mediaType": "image",
|
|
1925
|
-
"mediaSubtype": "webp",
|
|
1926
|
-
"tags": ["Imagen a video", "Video", "API"],
|
|
1927
|
-
"models": ["Kling"],
|
|
1928
|
-
"date": "2025-03-01",
|
|
1929
|
-
"tutorialUrl": "",
|
|
1930
|
-
"OpenSource": false,
|
|
1931
|
-
"size": 0,
|
|
1932
|
-
"vram": 0
|
|
1933
|
-
},
|
|
1934
|
-
{
|
|
1935
|
-
"name": "api_kling_effects",
|
|
1936
|
-
"title": "Kling: Efectos de video",
|
|
1937
|
-
"description": "Generar videos dinámicos aplicando efectos visuales a imágenes usando Kling.",
|
|
1938
|
-
"mediaType": "image",
|
|
1939
|
-
"mediaSubtype": "webp",
|
|
1940
|
-
"tags": ["Video", "API"],
|
|
1941
|
-
"models": ["Kling"],
|
|
1942
|
-
"date": "2025-03-01",
|
|
1943
|
-
"tutorialUrl": "",
|
|
1944
|
-
"OpenSource": false,
|
|
1945
|
-
"size": 0,
|
|
1946
|
-
"vram": 0
|
|
2031
|
+
"vram": 0,
|
|
2032
|
+
"usage": 448
|
|
1947
2033
|
},
|
|
1948
2034
|
{
|
|
1949
|
-
"name": "
|
|
1950
|
-
"title": "
|
|
1951
|
-
"description": "Generar videos
|
|
2035
|
+
"name": "api_hailuo_minimax_video",
|
|
2036
|
+
"title": "MiniMax: Video",
|
|
2037
|
+
"description": "Generar videos de alta calidad a partir de indicaciones de texto con control opcional del primer fotograma usando el modelo MiniMax Hailuo-02. Soporta múltiples resoluciones (768P/1080P) y duraciones (6/10s) con optimización inteligente de indicaciones.",
|
|
1952
2038
|
"mediaType": "image",
|
|
1953
2039
|
"mediaSubtype": "webp",
|
|
1954
|
-
"tags": ["
|
|
1955
|
-
"models": ["
|
|
2040
|
+
"tags": ["Texto a video", "Video", "API"],
|
|
2041
|
+
"models": ["MiniMax"],
|
|
1956
2042
|
"date": "2025-03-01",
|
|
1957
2043
|
"tutorialUrl": "",
|
|
1958
|
-
"
|
|
2044
|
+
"openSource": false,
|
|
1959
2045
|
"size": 0,
|
|
1960
|
-
"vram": 0
|
|
2046
|
+
"vram": 0,
|
|
2047
|
+
"usage": 9
|
|
1961
2048
|
},
|
|
1962
2049
|
{
|
|
1963
|
-
"name": "
|
|
1964
|
-
"title": "
|
|
1965
|
-
"description": "Generar videos
|
|
2050
|
+
"name": "api_hailuo_minimax_t2v",
|
|
2051
|
+
"title": "MiniMax: Texto a video",
|
|
2052
|
+
"description": "Generar videos de alta calidad directamente a partir de indicaciones de texto. Explorar las capacidades avanzadas de IA de MiniMax para crear narrativas visuales diversas con efectos CGI profesionales y elementos estilísticos que den vida a sus descripciones.",
|
|
1966
2053
|
"mediaType": "image",
|
|
1967
2054
|
"mediaSubtype": "webp",
|
|
1968
2055
|
"tags": ["Texto a video", "Video", "API"],
|
|
1969
|
-
"models": ["
|
|
1970
|
-
"date": "2025-
|
|
2056
|
+
"models": ["MiniMax"],
|
|
2057
|
+
"date": "2025-03-01",
|
|
1971
2058
|
"tutorialUrl": "",
|
|
1972
|
-
"
|
|
2059
|
+
"openSource": false,
|
|
1973
2060
|
"size": 0,
|
|
1974
|
-
"vram": 0
|
|
2061
|
+
"vram": 0,
|
|
2062
|
+
"usage": 1
|
|
1975
2063
|
},
|
|
1976
2064
|
{
|
|
1977
|
-
"name": "
|
|
1978
|
-
"title": "
|
|
1979
|
-
"description": "
|
|
2065
|
+
"name": "api_hailuo_minimax_i2v",
|
|
2066
|
+
"title": "MiniMax: Imagen a video",
|
|
2067
|
+
"description": "Generar videos refinados a partir de imágenes y texto con integración CGI usando MiniMax.",
|
|
1980
2068
|
"mediaType": "image",
|
|
1981
2069
|
"mediaSubtype": "webp",
|
|
1982
2070
|
"tags": ["Imagen a video", "Video", "API"],
|
|
1983
|
-
"models": ["
|
|
1984
|
-
"date": "2025-
|
|
1985
|
-
"tutorialUrl": "",
|
|
1986
|
-
"OpenSource": false,
|
|
1987
|
-
"size": 0,
|
|
1988
|
-
"vram": 0
|
|
1989
|
-
},
|
|
1990
|
-
{
|
|
1991
|
-
"name": "api_vidu_reference_to_video",
|
|
1992
|
-
"title": "Vidu: Referencia a video",
|
|
1993
|
-
"description": "Generar videos con sujetos consistentes usando múltiples imágenes de referencia (hasta 7) para continuidad de personaje y estilo a lo largo de la secuencia de video.",
|
|
1994
|
-
"mediaType": "image",
|
|
1995
|
-
"mediaSubtype": "webp",
|
|
1996
|
-
"tags": ["Video", "Imagen a video", "API"],
|
|
1997
|
-
"models": ["Vidu"],
|
|
1998
|
-
"date": "2025-08-23",
|
|
1999
|
-
"tutorialUrl": "",
|
|
2000
|
-
"OpenSource": false,
|
|
2001
|
-
"size": 0,
|
|
2002
|
-
"vram": 0
|
|
2003
|
-
},
|
|
2004
|
-
{
|
|
2005
|
-
"name": "api_vidu_start_end_to_video",
|
|
2006
|
-
"title": "Vidu: Inicio fin a video",
|
|
2007
|
-
"description": "Crear transiciones de video suaves entre fotogramas de inicio y fin definidos con interpolación natural de movimiento y calidad visual consistente.",
|
|
2008
|
-
"mediaType": "image",
|
|
2009
|
-
"mediaSubtype": "webp",
|
|
2010
|
-
"tags": ["Video", "API", "FLF2V"],
|
|
2011
|
-
"models": ["Vidu"],
|
|
2012
|
-
"date": "2025-08-23",
|
|
2013
|
-
"tutorialUrl": "",
|
|
2014
|
-
"OpenSource": false,
|
|
2015
|
-
"size": 0,
|
|
2016
|
-
"vram": 0
|
|
2017
|
-
},
|
|
2018
|
-
{
|
|
2019
|
-
"name": "api_bytedance_text_to_video",
|
|
2020
|
-
"title": "ByteDance: Texto a Video",
|
|
2021
|
-
"description": "Genera videos de alta calidad directamente desde prompts de texto usando el modelo Seedance de ByteDance. Compatible con múltiples resoluciones y relaciones de aspecto con movimiento natural y calidad cinematográfica.",
|
|
2022
|
-
"mediaType": "image",
|
|
2023
|
-
"mediaSubtype": "webp",
|
|
2024
|
-
"tags": ["Video", "API", "Texto a video"],
|
|
2025
|
-
"models": ["ByteDance"],
|
|
2026
|
-
"date": "2025-10-6",
|
|
2027
|
-
"tutorialUrl": "",
|
|
2028
|
-
"OpenSource": false,
|
|
2029
|
-
"size": 0,
|
|
2030
|
-
"vram": 0
|
|
2031
|
-
},
|
|
2032
|
-
{
|
|
2033
|
-
"name": "api_bytedance_image_to_video",
|
|
2034
|
-
"title": "ByteDance: Imagen a Video",
|
|
2035
|
-
"description": "Transforma imágenes estáticas en videos dinámicos usando el modelo Seedance de ByteDance. Analiza la estructura de la imagen y genera movimiento natural con estilo visual consistente y secuencias de video coherentes.",
|
|
2036
|
-
"mediaType": "image",
|
|
2037
|
-
"mediaSubtype": "webp",
|
|
2038
|
-
"tags": ["Video", "API", "Imagen a video"],
|
|
2039
|
-
"models": ["ByteDance"],
|
|
2040
|
-
"date": "2025-10-6",
|
|
2041
|
-
"tutorialUrl": "",
|
|
2042
|
-
"OpenSource": false,
|
|
2043
|
-
"size": 0,
|
|
2044
|
-
"vram": 0
|
|
2045
|
-
},
|
|
2046
|
-
{
|
|
2047
|
-
"name": "api_bytedance_flf2v",
|
|
2048
|
-
"title": "ByteDance: Inicio-Fin a Video",
|
|
2049
|
-
"description": "Genera transiciones de video cinematográficas entre fotogramas de inicio y fin con movimiento fluido, consistencia de escena y acabado profesional usando el modelo Seedance de ByteDance.",
|
|
2050
|
-
"mediaType": "image",
|
|
2051
|
-
"mediaSubtype": "webp",
|
|
2052
|
-
"tags": ["Video", "API", "FLF2V"],
|
|
2053
|
-
"models": ["ByteDance"],
|
|
2054
|
-
"date": "2025-10-6",
|
|
2071
|
+
"models": ["MiniMax"],
|
|
2072
|
+
"date": "2025-03-01",
|
|
2055
2073
|
"tutorialUrl": "",
|
|
2056
|
-
"
|
|
2057
|
-
"size": 0,
|
|
2058
|
-
"vram": 0
|
|
2059
|
-
},
|
|
2060
|
-
{
|
|
2061
|
-
"name": "api_topaz_video_enhance",
|
|
2062
|
-
"title": "Mejora de video Topaz",
|
|
2063
|
-
"description": "Mejora vídeos con Topaz AI. Permite aumentar la resolución usando el modelo Starlight (Astra) Fast e interpolar fotogramas con el modelo apo-8.",
|
|
2064
|
-
"mediaType": "image",
|
|
2065
|
-
"mediaSubtype": "webp",
|
|
2066
|
-
"thumbnailVariant": "compareSlider",
|
|
2067
|
-
"tags": ["Video", "API", "Mejorar"],
|
|
2068
|
-
"models": ["Topaz"],
|
|
2069
|
-
"date": "2025-11-25",
|
|
2070
|
-
"OpenSource": false,
|
|
2074
|
+
"openSource": false,
|
|
2071
2075
|
"size": 0,
|
|
2072
|
-
"vram": 0
|
|
2076
|
+
"vram": 0,
|
|
2077
|
+
"usage": 39
|
|
2073
2078
|
},
|
|
2074
2079
|
{
|
|
2075
2080
|
"name": "api_luma_i2v",
|
|
@@ -2081,9 +2086,10 @@
|
|
|
2081
2086
|
"models": ["Luma"],
|
|
2082
2087
|
"date": "2025-03-01",
|
|
2083
2088
|
"tutorialUrl": "",
|
|
2084
|
-
"
|
|
2089
|
+
"openSource": false,
|
|
2085
2090
|
"size": 0,
|
|
2086
|
-
"vram": 0
|
|
2091
|
+
"vram": 0,
|
|
2092
|
+
"usage": 56
|
|
2087
2093
|
},
|
|
2088
2094
|
{
|
|
2089
2095
|
"name": "api_luma_t2v",
|
|
@@ -2095,9 +2101,10 @@
|
|
|
2095
2101
|
"models": ["Luma"],
|
|
2096
2102
|
"date": "2025-03-01",
|
|
2097
2103
|
"tutorialUrl": "",
|
|
2098
|
-
"
|
|
2104
|
+
"openSource": false,
|
|
2099
2105
|
"size": 0,
|
|
2100
|
-
"vram": 0
|
|
2106
|
+
"vram": 0,
|
|
2107
|
+
"usage": 3
|
|
2101
2108
|
},
|
|
2102
2109
|
{
|
|
2103
2110
|
"name": "api_moonvalley_text_to_video",
|
|
@@ -2109,9 +2116,10 @@
|
|
|
2109
2116
|
"models": ["Moonvalley"],
|
|
2110
2117
|
"date": "2025-03-01",
|
|
2111
2118
|
"tutorialUrl": "",
|
|
2112
|
-
"
|
|
2119
|
+
"openSource": false,
|
|
2113
2120
|
"size": 0,
|
|
2114
|
-
"vram": 0
|
|
2121
|
+
"vram": 0,
|
|
2122
|
+
"usage": 4
|
|
2115
2123
|
},
|
|
2116
2124
|
{
|
|
2117
2125
|
"name": "api_moonvalley_image_to_video",
|
|
@@ -2123,9 +2131,10 @@
|
|
|
2123
2131
|
"models": ["Moonvalley"],
|
|
2124
2132
|
"date": "2025-03-01",
|
|
2125
2133
|
"tutorialUrl": "",
|
|
2126
|
-
"
|
|
2134
|
+
"openSource": false,
|
|
2127
2135
|
"size": 0,
|
|
2128
|
-
"vram": 0
|
|
2136
|
+
"vram": 0,
|
|
2137
|
+
"usage": 29
|
|
2129
2138
|
},
|
|
2130
2139
|
{
|
|
2131
2140
|
"name": "api_moonvalley_video_to_video_motion_transfer",
|
|
@@ -2138,9 +2147,10 @@
|
|
|
2138
2147
|
"models": ["Moonvalley"],
|
|
2139
2148
|
"date": "2025-03-01",
|
|
2140
2149
|
"tutorialUrl": "",
|
|
2141
|
-
"
|
|
2150
|
+
"openSource": false,
|
|
2142
2151
|
"size": 0,
|
|
2143
|
-
"vram": 0
|
|
2152
|
+
"vram": 0,
|
|
2153
|
+
"usage": 22
|
|
2144
2154
|
},
|
|
2145
2155
|
{
|
|
2146
2156
|
"name": "api_moonvalley_video_to_video_pose_control",
|
|
@@ -2153,65 +2163,25 @@
|
|
|
2153
2163
|
"models": ["Moonvalley"],
|
|
2154
2164
|
"date": "2025-03-01",
|
|
2155
2165
|
"tutorialUrl": "",
|
|
2156
|
-
"
|
|
2166
|
+
"openSource": false,
|
|
2157
2167
|
"size": 0,
|
|
2158
|
-
"vram": 0
|
|
2168
|
+
"vram": 0,
|
|
2169
|
+
"usage": 11
|
|
2159
2170
|
},
|
|
2160
2171
|
{
|
|
2161
|
-
"name": "
|
|
2162
|
-
"title": "
|
|
2163
|
-
"description": "Generar videos
|
|
2172
|
+
"name": "api_pixverse_i2v",
|
|
2173
|
+
"title": "PixVerse: Imagen a video",
|
|
2174
|
+
"description": "Generar videos dinámicos a partir de imágenes estáticas con movimiento y efectos usando PixVerse.",
|
|
2164
2175
|
"mediaType": "image",
|
|
2165
2176
|
"mediaSubtype": "webp",
|
|
2166
|
-
"tags": ["
|
|
2167
|
-
"models": ["
|
|
2177
|
+
"tags": ["Imagen a video", "Video", "API"],
|
|
2178
|
+
"models": ["PixVerse"],
|
|
2168
2179
|
"date": "2025-03-01",
|
|
2169
2180
|
"tutorialUrl": "",
|
|
2170
|
-
"
|
|
2181
|
+
"openSource": false,
|
|
2171
2182
|
"size": 0,
|
|
2172
|
-
"vram": 0
|
|
2173
|
-
|
|
2174
|
-
{
|
|
2175
|
-
"name": "api_hailuo_minimax_t2v",
|
|
2176
|
-
"title": "MiniMax: Texto a video",
|
|
2177
|
-
"description": "Generar videos de alta calidad directamente a partir de indicaciones de texto. Explorar las capacidades avanzadas de IA de MiniMax para crear narrativas visuales diversas con efectos CGI profesionales y elementos estilísticos que den vida a sus descripciones.",
|
|
2178
|
-
"mediaType": "image",
|
|
2179
|
-
"mediaSubtype": "webp",
|
|
2180
|
-
"tags": ["Texto a video", "Video", "API"],
|
|
2181
|
-
"models": ["MiniMax"],
|
|
2182
|
-
"date": "2025-03-01",
|
|
2183
|
-
"tutorialUrl": "",
|
|
2184
|
-
"OpenSource": false,
|
|
2185
|
-
"size": 0,
|
|
2186
|
-
"vram": 0
|
|
2187
|
-
},
|
|
2188
|
-
{
|
|
2189
|
-
"name": "api_hailuo_minimax_i2v",
|
|
2190
|
-
"title": "MiniMax: Imagen a video",
|
|
2191
|
-
"description": "Generar videos refinados a partir de imágenes y texto con integración CGI usando MiniMax.",
|
|
2192
|
-
"mediaType": "image",
|
|
2193
|
-
"mediaSubtype": "webp",
|
|
2194
|
-
"tags": ["Imagen a video", "Video", "API"],
|
|
2195
|
-
"models": ["MiniMax"],
|
|
2196
|
-
"date": "2025-03-01",
|
|
2197
|
-
"tutorialUrl": "",
|
|
2198
|
-
"OpenSource": false,
|
|
2199
|
-
"size": 0,
|
|
2200
|
-
"vram": 0
|
|
2201
|
-
},
|
|
2202
|
-
{
|
|
2203
|
-
"name": "api_pixverse_i2v",
|
|
2204
|
-
"title": "PixVerse: Imagen a video",
|
|
2205
|
-
"description": "Generar videos dinámicos a partir de imágenes estáticas con movimiento y efectos usando PixVerse.",
|
|
2206
|
-
"mediaType": "image",
|
|
2207
|
-
"mediaSubtype": "webp",
|
|
2208
|
-
"tags": ["Imagen a video", "Video", "API"],
|
|
2209
|
-
"models": ["PixVerse"],
|
|
2210
|
-
"date": "2025-03-01",
|
|
2211
|
-
"tutorialUrl": "",
|
|
2212
|
-
"OpenSource": false,
|
|
2213
|
-
"size": 0,
|
|
2214
|
-
"vram": 0
|
|
2183
|
+
"vram": 0,
|
|
2184
|
+
"usage": 25
|
|
2215
2185
|
},
|
|
2216
2186
|
{
|
|
2217
2187
|
"name": "api_pixverse_template_i2v",
|
|
@@ -2223,9 +2193,10 @@
|
|
|
2223
2193
|
"models": ["PixVerse"],
|
|
2224
2194
|
"date": "2025-03-01",
|
|
2225
2195
|
"tutorialUrl": "",
|
|
2226
|
-
"
|
|
2196
|
+
"openSource": false,
|
|
2227
2197
|
"size": 0,
|
|
2228
|
-
"vram": 0
|
|
2198
|
+
"vram": 0,
|
|
2199
|
+
"usage": 16
|
|
2229
2200
|
},
|
|
2230
2201
|
{
|
|
2231
2202
|
"name": "api_pixverse_t2v",
|
|
@@ -2237,9 +2208,10 @@
|
|
|
2237
2208
|
"models": ["PixVerse"],
|
|
2238
2209
|
"date": "2025-03-01",
|
|
2239
2210
|
"tutorialUrl": "",
|
|
2240
|
-
"
|
|
2211
|
+
"openSource": false,
|
|
2241
2212
|
"size": 0,
|
|
2242
|
-
"vram": 0
|
|
2213
|
+
"vram": 0,
|
|
2214
|
+
"usage": 3
|
|
2243
2215
|
},
|
|
2244
2216
|
{
|
|
2245
2217
|
"name": "api_runway_gen3a_turbo_image_to_video",
|
|
@@ -2251,9 +2223,10 @@
|
|
|
2251
2223
|
"models": ["Runway"],
|
|
2252
2224
|
"date": "2025-03-01",
|
|
2253
2225
|
"tutorialUrl": "",
|
|
2254
|
-
"
|
|
2226
|
+
"openSource": false,
|
|
2255
2227
|
"size": 0,
|
|
2256
|
-
"vram": 0
|
|
2228
|
+
"vram": 0,
|
|
2229
|
+
"usage": 38
|
|
2257
2230
|
},
|
|
2258
2231
|
{
|
|
2259
2232
|
"name": "api_runway_gen4_turo_image_to_video",
|
|
@@ -2264,90 +2237,605 @@
|
|
|
2264
2237
|
"tags": ["Imagen a video", "Video", "API"],
|
|
2265
2238
|
"models": ["Runway"],
|
|
2266
2239
|
"date": "2025-03-01",
|
|
2267
|
-
"tutorialUrl": "",
|
|
2268
|
-
"
|
|
2269
|
-
"size": 0,
|
|
2270
|
-
"vram": 0
|
|
2240
|
+
"tutorialUrl": "",
|
|
2241
|
+
"openSource": false,
|
|
2242
|
+
"size": 0,
|
|
2243
|
+
"vram": 0,
|
|
2244
|
+
"usage": 97
|
|
2245
|
+
},
|
|
2246
|
+
{
|
|
2247
|
+
"name": "api_runway_first_last_frame",
|
|
2248
|
+
"title": "Runway: Primer último fotograma a video",
|
|
2249
|
+
"description": "Generar transiciones de video suaves entre dos fotogramas clave con precisión de Runway.",
|
|
2250
|
+
"mediaType": "image",
|
|
2251
|
+
"mediaSubtype": "webp",
|
|
2252
|
+
"tags": ["Video", "API", "FLF2V"],
|
|
2253
|
+
"models": ["Runway"],
|
|
2254
|
+
"date": "2025-03-01",
|
|
2255
|
+
"tutorialUrl": "",
|
|
2256
|
+
"openSource": false,
|
|
2257
|
+
"size": 0,
|
|
2258
|
+
"vram": 0,
|
|
2259
|
+
"usage": 97
|
|
2260
|
+
},
|
|
2261
|
+
{
|
|
2262
|
+
"name": "video_wan2_2_14B_fun_inpaint",
|
|
2263
|
+
"title": "Wan 2.2 14B Fun Inpainting",
|
|
2264
|
+
"description": "Genera videos a partir de fotogramas de inicio y fin usando Wan 2.2 Fun Inp.",
|
|
2265
|
+
"mediaType": "image",
|
|
2266
|
+
"mediaSubtype": "webp",
|
|
2267
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-inp",
|
|
2268
|
+
"tags": ["FLF2V", "Video"],
|
|
2269
|
+
"models": ["Wan2.2", "Wan"],
|
|
2270
|
+
"date": "2025-08-12",
|
|
2271
|
+
"size": 38031935406,
|
|
2272
|
+
"vram": 38031935406,
|
|
2273
|
+
"usage": 547
|
|
2274
|
+
},
|
|
2275
|
+
{
|
|
2276
|
+
"name": "video_wan2_2_14B_fun_control",
|
|
2277
|
+
"title": "Control Fun Wan 2.2 14B",
|
|
2278
|
+
"description": "Generar videos guiados por controles de pose, profundidad y borde usando Wan 2.2 Fun Control.",
|
|
2279
|
+
"mediaType": "image",
|
|
2280
|
+
"mediaSubtype": "webp",
|
|
2281
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-control",
|
|
2282
|
+
"tags": ["Video a video", "Video"],
|
|
2283
|
+
"models": ["Wan2.2", "Wan"],
|
|
2284
|
+
"date": "2025-08-12",
|
|
2285
|
+
"size": 38031935406,
|
|
2286
|
+
"vram": 38031935406,
|
|
2287
|
+
"usage": 305
|
|
2288
|
+
},
|
|
2289
|
+
{
|
|
2290
|
+
"name": "video_wan2_2_14B_fun_camera",
|
|
2291
|
+
"title": "Control de cámara Fun Wan 2.2 14B",
|
|
2292
|
+
"description": "Generar videos con controles de movimiento de cámara incluyendo panorámica, zoom y rotación usando Wan 2.2 Fun Camera Control.",
|
|
2293
|
+
"mediaType": "image",
|
|
2294
|
+
"mediaSubtype": "webp",
|
|
2295
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-camera",
|
|
2296
|
+
"tags": ["Video a video", "Video"],
|
|
2297
|
+
"models": ["Wan2.2", "Wan"],
|
|
2298
|
+
"date": "2025-08-17",
|
|
2299
|
+
"size": 40050570035,
|
|
2300
|
+
"vram": 40050570035,
|
|
2301
|
+
"usage": 228
|
|
2302
|
+
},
|
|
2303
|
+
{
|
|
2304
|
+
"name": "video_wan2_2_5B_ti2v",
|
|
2305
|
+
"title": "Generación de video Wan 2.2 5B",
|
|
2306
|
+
"description": "Generar videos a partir de texto o imágenes usando el modelo híbrido Wan 2.2 5B",
|
|
2307
|
+
"mediaType": "image",
|
|
2308
|
+
"mediaSubtype": "webp",
|
|
2309
|
+
"tags": ["Texto a video", "Video"],
|
|
2310
|
+
"models": ["Wan2.2", "Wan"],
|
|
2311
|
+
"date": "2025-07-29",
|
|
2312
|
+
"size": 18146236826,
|
|
2313
|
+
"vram": 18146236826,
|
|
2314
|
+
"usage": 392
|
|
2315
|
+
},
|
|
2316
|
+
{
|
|
2317
|
+
"name": "video_humo",
|
|
2318
|
+
"title": "HuMo Generación de Video",
|
|
2319
|
+
"description": "Genera videos basados en audio, imagen y texto, manteniendo la sincronización labial del personaje.",
|
|
2320
|
+
"mediaType": "image",
|
|
2321
|
+
"mediaSubtype": "webp",
|
|
2322
|
+
"tags": ["Video"],
|
|
2323
|
+
"models": ["HuMo"],
|
|
2324
|
+
"date": "2025-09-21",
|
|
2325
|
+
"size": 27895812588,
|
|
2326
|
+
"vram": 27895812588,
|
|
2327
|
+
"usage": 424
|
|
2328
|
+
},
|
|
2329
|
+
{
|
|
2330
|
+
"name": "video_wan2_2_5B_fun_inpaint",
|
|
2331
|
+
"title": "Wan 2.2 5B Fun Inpainting",
|
|
2332
|
+
"description": "Inpainting de video eficiente desde fotogramas de inicio y fin. El modelo 5B ofrece iteraciones rápidas para probar flujos de trabajo.",
|
|
2333
|
+
"mediaType": "image",
|
|
2334
|
+
"mediaSubtype": "webp",
|
|
2335
|
+
"tags": ["Texto a video", "Video"],
|
|
2336
|
+
"models": ["Wan2.2", "Wan"],
|
|
2337
|
+
"date": "2025-07-29",
|
|
2338
|
+
"size": 18146236826,
|
|
2339
|
+
"vram": 18146236826,
|
|
2340
|
+
"usage": 53
|
|
2341
|
+
},
|
|
2342
|
+
{
|
|
2343
|
+
"name": "video_wan2_2_5B_fun_control",
|
|
2344
|
+
"title": "Wan 2.2 5B Fun Control",
|
|
2345
|
+
"description": "Control de video multicondición con guía de pose, profundidad y bordes. Tamaño compacto de 5B para desarrollo experimental.",
|
|
2346
|
+
"mediaType": "image",
|
|
2347
|
+
"mediaSubtype": "webp",
|
|
2348
|
+
"tags": ["Texto a video", "Video"],
|
|
2349
|
+
"models": ["Wan2.2", "Wan"],
|
|
2350
|
+
"date": "2025-07-29",
|
|
2351
|
+
"size": 18146236826,
|
|
2352
|
+
"vram": 18146236826,
|
|
2353
|
+
"usage": 110
|
|
2354
|
+
},
|
|
2355
|
+
{
|
|
2356
|
+
"name": "video_wan_vace_14B_t2v",
|
|
2357
|
+
"title": "Texto a video Wan VACE",
|
|
2358
|
+
"description": "Transformar descripciones de texto en videos de alta calidad. Soporta tanto 480p como 720p con el modelo VACE-14B.",
|
|
2359
|
+
"mediaType": "image",
|
|
2360
|
+
"mediaSubtype": "webp",
|
|
2361
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2362
|
+
"tags": ["Texto a video", "Video"],
|
|
2363
|
+
"models": ["Wan2.1", "Wan"],
|
|
2364
|
+
"date": "2025-05-21",
|
|
2365
|
+
"size": 57756572713,
|
|
2366
|
+
"vram": 57756572713,
|
|
2367
|
+
"usage": 162
|
|
2368
|
+
},
|
|
2369
|
+
{
|
|
2370
|
+
"name": "video_wan_vace_14B_ref2v",
|
|
2371
|
+
"title": "Referencia a video Wan VACE",
|
|
2372
|
+
"description": "Crear videos que coincidan con el estilo y contenido de una imagen de referencia. Perfecto para generación de video consistente en estilo.",
|
|
2373
|
+
"mediaType": "image",
|
|
2374
|
+
"mediaSubtype": "webp",
|
|
2375
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2376
|
+
"tags": ["Video", "Imagen a video"],
|
|
2377
|
+
"models": ["Wan2.1", "Wan"],
|
|
2378
|
+
"date": "2025-05-21",
|
|
2379
|
+
"size": 57756572713,
|
|
2380
|
+
"vram": 57756572713,
|
|
2381
|
+
"usage": 171
|
|
2382
|
+
},
|
|
2383
|
+
{
|
|
2384
|
+
"name": "video_wan_vace_14B_v2v",
|
|
2385
|
+
"title": "Control de video Wan VACE",
|
|
2386
|
+
"description": "Generar videos controlando videos de entrada e imágenes de referencia usando Wan VACE.",
|
|
2387
|
+
"mediaType": "image",
|
|
2388
|
+
"mediaSubtype": "webp",
|
|
2389
|
+
"thumbnailVariant": "compareSlider",
|
|
2390
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2391
|
+
"tags": ["Video a video", "Video"],
|
|
2392
|
+
"models": ["Wan2.1", "Wan"],
|
|
2393
|
+
"date": "2025-05-21",
|
|
2394
|
+
"size": 57756572713,
|
|
2395
|
+
"vram": 57756572713,
|
|
2396
|
+
"usage": 306
|
|
2397
|
+
},
|
|
2398
|
+
{
|
|
2399
|
+
"name": "video_wan_vace_outpainting",
|
|
2400
|
+
"title": "Outpainting Wan VACE",
|
|
2401
|
+
"description": "Generar videos extendidos expandiendo el tamaño de video usando outpainting de Wan VACE.",
|
|
2402
|
+
"mediaType": "image",
|
|
2403
|
+
"mediaSubtype": "webp",
|
|
2404
|
+
"thumbnailVariant": "compareSlider",
|
|
2405
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2406
|
+
"tags": ["Outpaint", "Video"],
|
|
2407
|
+
"models": ["Wan2.1", "Wan"],
|
|
2408
|
+
"date": "2025-05-21",
|
|
2409
|
+
"size": 57756572713,
|
|
2410
|
+
"vram": 57756572713,
|
|
2411
|
+
"usage": 117
|
|
2412
|
+
},
|
|
2413
|
+
{
|
|
2414
|
+
"name": "video_wan_vace_flf2v",
|
|
2415
|
+
"title": "Primer-Último fotograma Wan VACE",
|
|
2416
|
+
"description": "Generar transiciones de video suaves definiendo fotogramas de inicio y fin. Soporta secuencias de fotogramas clave personalizadas.",
|
|
2417
|
+
"mediaType": "image",
|
|
2418
|
+
"mediaSubtype": "webp",
|
|
2419
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2420
|
+
"tags": ["FLF2V", "Video"],
|
|
2421
|
+
"models": ["Wan2.1", "Wan"],
|
|
2422
|
+
"date": "2025-05-21",
|
|
2423
|
+
"size": 57756572713,
|
|
2424
|
+
"vram": 57756572713,
|
|
2425
|
+
"usage": 136
|
|
2426
|
+
},
|
|
2427
|
+
{
|
|
2428
|
+
"name": "video_wan_vace_inpainting",
|
|
2429
|
+
"title": "Inpainting Wan VACE",
|
|
2430
|
+
"description": "Editar regiones específicas en videos mientras se preserva el contenido circundante. Excelente para eliminación o reemplazo de objetos.",
|
|
2431
|
+
"mediaType": "image",
|
|
2432
|
+
"mediaSubtype": "webp",
|
|
2433
|
+
"thumbnailVariant": "compareSlider",
|
|
2434
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
|
|
2435
|
+
"tags": ["Inpaint", "Video"],
|
|
2436
|
+
"models": ["Wan2.1", "Wan"],
|
|
2437
|
+
"date": "2025-05-21",
|
|
2438
|
+
"size": 57756572713,
|
|
2439
|
+
"vram": 57756572713,
|
|
2440
|
+
"usage": 261
|
|
2441
|
+
},
|
|
2442
|
+
{
|
|
2443
|
+
"name": "video_wan2.1_alpha_t2v_14B",
|
|
2444
|
+
"title": "Wan2.1 Alpha Texto a Video",
|
|
2445
|
+
"description": "Genera videos desde texto con soporte de canal alfa para fondos transparentes y objetos semitransparentes.",
|
|
2446
|
+
"mediaType": "image",
|
|
2447
|
+
"mediaSubtype": "webp",
|
|
2448
|
+
"tags": ["Texto a video", "Video"],
|
|
2449
|
+
"models": ["Wan2.1", "Wan-Move", "Motion Control", "Wan"],
|
|
2450
|
+
"date": "2025-10-06",
|
|
2451
|
+
"size": 22494891213,
|
|
2452
|
+
"vram": 22494891213,
|
|
2453
|
+
"usage": 162
|
|
2454
|
+
},
|
|
2455
|
+
{
|
|
2456
|
+
"name": "video_wanmove_480p",
|
|
2457
|
+
"title": "Wan-Move Imagen a vídeo con control de movimiento",
|
|
2458
|
+
"description": "Genera videos a partir de una sola imagen usando Wan-Move, con un control de movimiento preciso a nivel de punto mediante guía de trayectorias.",
|
|
2459
|
+
"mediaType": "image",
|
|
2460
|
+
"mediaSubtype": "webp",
|
|
2461
|
+
"tags": ["Imagen a video", "Control dinámico", "Video"],
|
|
2462
|
+
"models": ["Wan2.1", "Wan"],
|
|
2463
|
+
"date": "2025-12-15",
|
|
2464
|
+
"size": 25420837683,
|
|
2465
|
+
"vram": 25420837683,
|
|
2466
|
+
"usage": 176
|
|
2467
|
+
},
|
|
2468
|
+
{
|
|
2469
|
+
"name": "video_wanmove_480p_hallucination",
|
|
2470
|
+
"title": "WanMove: Ilusión de ensueño",
|
|
2471
|
+
"description": "Genera imágenes dinámicas desde trayectorias y efectos de video oníricos con WanMove",
|
|
2472
|
+
"mediaType": "image",
|
|
2473
|
+
"mediaSubtype": "webp",
|
|
2474
|
+
"tags": ["Imagen a video", "Control dinámico", "Video"],
|
|
2475
|
+
"models": ["Wan2.1", "Wan"],
|
|
2476
|
+
"date": "2025-12-15",
|
|
2477
|
+
"size": 25420837683,
|
|
2478
|
+
"vram": 25420837683,
|
|
2479
|
+
"usage": 176,
|
|
2480
|
+
"requiresCustomNodes": ["comfyui_fill-nodes"]
|
|
2481
|
+
},
|
|
2482
|
+
{
|
|
2483
|
+
"name": "video_wan_ati",
|
|
2484
|
+
"title": "Wan ATI",
|
|
2485
|
+
"description": "Generación de video controlada por trayectoria.",
|
|
2486
|
+
"mediaType": "image",
|
|
2487
|
+
"mediaSubtype": "webp",
|
|
2488
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2489
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
|
|
2490
|
+
"tags": ["Video"],
|
|
2491
|
+
"models": ["Wan2.1", "Wan"],
|
|
2492
|
+
"date": "2025-05-21",
|
|
2493
|
+
"size": 25393994138,
|
|
2494
|
+
"vram": 25393994138,
|
|
2495
|
+
"usage": 81
|
|
2496
|
+
},
|
|
2497
|
+
{
|
|
2498
|
+
"name": "video_wan2.1_fun_camera_v1.1_1.3B",
|
|
2499
|
+
"title": "Cámara Fun 1.3B Wan 2.1",
|
|
2500
|
+
"description": "Generar videos dinámicos con movimientos cinematográficos de cámara usando el modelo Wan 2.1 Fun Camera 1.3B.",
|
|
2501
|
+
"mediaType": "image",
|
|
2502
|
+
"mediaSubtype": "webp",
|
|
2503
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
2504
|
+
"tags": ["Video"],
|
|
2505
|
+
"models": ["Wan2.1", "Wan"],
|
|
2506
|
+
"date": "2025-04-15",
|
|
2507
|
+
"size": 11489037517,
|
|
2508
|
+
"vram": 11489037517,
|
|
2509
|
+
"usage": 22
|
|
2510
|
+
},
|
|
2511
|
+
{
|
|
2512
|
+
"name": "video_wan2.1_fun_camera_v1.1_14B",
|
|
2513
|
+
"title": "Cámara Fun 14B Wan 2.1",
|
|
2514
|
+
"description": "Generar videos de alta calidad con control avanzado de cámara usando el modelo completo de 14B",
|
|
2515
|
+
"mediaType": "image",
|
|
2516
|
+
"mediaSubtype": "webp",
|
|
2517
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
2518
|
+
"tags": ["Video"],
|
|
2519
|
+
"models": ["Wan2.1", "Wan"],
|
|
2520
|
+
"date": "2025-04-15",
|
|
2521
|
+
"size": 42047729828,
|
|
2522
|
+
"vram": 42047729828,
|
|
2523
|
+
"usage": 48
|
|
2524
|
+
},
|
|
2525
|
+
{
|
|
2526
|
+
"name": "text_to_video_wan",
|
|
2527
|
+
"title": "Texto a video Wan 2.1",
|
|
2528
|
+
"description": "Generar videos a partir de indicaciones de texto usando Wan 2.1.",
|
|
2529
|
+
"mediaType": "image",
|
|
2530
|
+
"mediaSubtype": "webp",
|
|
2531
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
2532
|
+
"tags": ["Texto a video", "Video"],
|
|
2533
|
+
"models": ["Wan2.1", "Wan"],
|
|
2534
|
+
"date": "2025-03-01",
|
|
2535
|
+
"size": 9824737690,
|
|
2536
|
+
"vram": 9824737690,
|
|
2537
|
+
"usage": 119
|
|
2538
|
+
},
|
|
2539
|
+
{
|
|
2540
|
+
"name": "image_to_video_wan",
|
|
2541
|
+
"title": "Imagen a video Wan 2.1",
|
|
2542
|
+
"description": "Generar videos a partir de imágenes usando Wan 2.1.",
|
|
2543
|
+
"mediaType": "image",
|
|
2544
|
+
"mediaSubtype": "webp",
|
|
2545
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
|
|
2546
|
+
"tags": ["Texto a video", "Video"],
|
|
2547
|
+
"models": ["Wan2.1", "Wan"],
|
|
2548
|
+
"date": "2025-03-01",
|
|
2549
|
+
"size": 41049149932,
|
|
2550
|
+
"vram": 41049149932,
|
|
2551
|
+
"usage": 143
|
|
2552
|
+
},
|
|
2553
|
+
{
|
|
2554
|
+
"name": "wan2.1_fun_inp",
|
|
2555
|
+
"title": "Inpainting Wan 2.1",
|
|
2556
|
+
"description": "Generar videos desde fotogramas de inicio y fin usando inpainting de Wan 2.1.",
|
|
2557
|
+
"mediaType": "image",
|
|
2558
|
+
"mediaSubtype": "webp",
|
|
2559
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
|
|
2560
|
+
"tags": ["Inpaint", "Video"],
|
|
2561
|
+
"models": ["Wan2.1", "Wan"],
|
|
2562
|
+
"date": "2025-04-15",
|
|
2563
|
+
"size": 11381663334,
|
|
2564
|
+
"vram": 11381663334,
|
|
2565
|
+
"usage": 13
|
|
2566
|
+
},
|
|
2567
|
+
{
|
|
2568
|
+
"name": "wan2.1_fun_control",
|
|
2569
|
+
"title": "ControlNet Wan 2.1",
|
|
2570
|
+
"description": "Generar videos guiados por controles de pose, profundidad y borde usando ControlNet de Wan 2.1.",
|
|
2571
|
+
"mediaType": "image",
|
|
2572
|
+
"mediaSubtype": "webp",
|
|
2573
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2574
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
|
|
2575
|
+
"tags": ["Video a video", "Video"],
|
|
2576
|
+
"models": ["Wan2.1", "Wan"],
|
|
2577
|
+
"date": "2025-04-15",
|
|
2578
|
+
"size": 11381663334,
|
|
2579
|
+
"vram": 11381663334,
|
|
2580
|
+
"usage": 115
|
|
2581
|
+
},
|
|
2582
|
+
{
|
|
2583
|
+
"name": "wan2.1_flf2v_720_f16",
|
|
2584
|
+
"title": "FLF2V 720p F16 Wan 2.1",
|
|
2585
|
+
"description": "Generar videos controlando primer y último fotogramas usando FLF2V de Wan 2.1.",
|
|
2586
|
+
"mediaType": "image",
|
|
2587
|
+
"mediaSubtype": "webp",
|
|
2588
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
|
|
2589
|
+
"tags": ["FLF2V", "Video"],
|
|
2590
|
+
"models": ["Wan2.1", "Wan"],
|
|
2591
|
+
"date": "2025-04-15",
|
|
2592
|
+
"size": 41049149932,
|
|
2593
|
+
"vram": 41049149932,
|
|
2594
|
+
"usage": 43
|
|
2595
|
+
},
|
|
2596
|
+
{
|
|
2597
|
+
"name": "ltxv_text_to_video",
|
|
2598
|
+
"title": "Texto a video LTXV",
|
|
2599
|
+
"mediaType": "image",
|
|
2600
|
+
"mediaSubtype": "webp",
|
|
2601
|
+
"description": "Generar videos a partir de indicaciones de texto.",
|
|
2602
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
2603
|
+
"tags": ["Texto a video", "Video"],
|
|
2604
|
+
"models": ["LTXV"],
|
|
2605
|
+
"date": "2025-03-01",
|
|
2606
|
+
"size": 19155554140,
|
|
2607
|
+
"vram": 19155554140,
|
|
2608
|
+
"usage": 68
|
|
2609
|
+
},
|
|
2610
|
+
{
|
|
2611
|
+
"name": "ltxv_image_to_video",
|
|
2612
|
+
"title": "Imagen a video LTXV",
|
|
2613
|
+
"mediaType": "image",
|
|
2614
|
+
"mediaSubtype": "webp",
|
|
2615
|
+
"description": "Generar videos a partir de imágenes fijas.",
|
|
2616
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
|
|
2617
|
+
"tags": ["Imagen a video", "Video"],
|
|
2618
|
+
"models": ["LTXV"],
|
|
2619
|
+
"date": "2025-03-01",
|
|
2620
|
+
"size": 19155554140,
|
|
2621
|
+
"vram": 19155554140,
|
|
2622
|
+
"usage": 108
|
|
2623
|
+
},
|
|
2624
|
+
{
|
|
2625
|
+
"name": "hunyuan_video_text_to_video",
|
|
2626
|
+
"title": "Texto a video Hunyuan Video",
|
|
2627
|
+
"mediaType": "image",
|
|
2628
|
+
"mediaSubtype": "webp",
|
|
2629
|
+
"description": "Generar videos a partir de indicaciones de texto usando el modelo Hunyuan.",
|
|
2630
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
|
|
2631
|
+
"tags": ["Texto a video", "Video"],
|
|
2632
|
+
"models": ["Hunyuan Video", "Tencent"],
|
|
2633
|
+
"date": "2025-03-01",
|
|
2634
|
+
"size": 35476429865,
|
|
2635
|
+
"vram": 35476429865,
|
|
2636
|
+
"usage": 52
|
|
2637
|
+
},
|
|
2638
|
+
{
|
|
2639
|
+
"name": "txt_to_image_to_video",
|
|
2640
|
+
"title": "Texto a imagen a video SVD",
|
|
2641
|
+
"mediaType": "image",
|
|
2642
|
+
"mediaSubtype": "webp",
|
|
2643
|
+
"description": "Generar videos creando primero imágenes a partir de indicaciones de texto.",
|
|
2644
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
|
|
2645
|
+
"tags": ["Texto a video", "Video"],
|
|
2646
|
+
"models": ["SVD", "Stability"],
|
|
2647
|
+
"date": "2025-03-01",
|
|
2648
|
+
"size": 16492674417,
|
|
2649
|
+
"vram": 16492674417,
|
|
2650
|
+
"usage": 14
|
|
2651
|
+
}
|
|
2652
|
+
]
|
|
2653
|
+
},
|
|
2654
|
+
{
|
|
2655
|
+
"moduleName": "default",
|
|
2656
|
+
"type": "audio",
|
|
2657
|
+
"category": "Tipo de generación",
|
|
2658
|
+
"icon": "icon-[lucide--volume-2]",
|
|
2659
|
+
"title": "Audio",
|
|
2660
|
+
"templates": [
|
|
2661
|
+
{
|
|
2662
|
+
"name": "api_stability_ai_text_to_audio",
|
|
2663
|
+
"title": "Stability AI: Texto a audio",
|
|
2664
|
+
"description": "Genera música a partir de texto usando Stable Audio 2.5. Crea pistas de varios minutos en segundos.",
|
|
2665
|
+
"mediaType": "audio",
|
|
2666
|
+
"mediaSubtype": "mp3",
|
|
2667
|
+
"tags": ["Texto a audio", "Audio", "API"],
|
|
2668
|
+
"date": "2025-09-09",
|
|
2669
|
+
"models": ["Stability", "Stable Audio"],
|
|
2670
|
+
"openSource": false,
|
|
2671
|
+
"size": 0,
|
|
2672
|
+
"vram": 0,
|
|
2673
|
+
"usage": 119
|
|
2674
|
+
},
|
|
2675
|
+
{
|
|
2676
|
+
"name": "api_stability_ai_audio_to_audio",
|
|
2677
|
+
"title": "Stability AI: Audio a audio",
|
|
2678
|
+
"description": "Transforma audio en nuevas composiciones usando Stable Audio 2.5. Sube audio y la IA crea pistas completas.",
|
|
2679
|
+
"mediaType": "audio",
|
|
2680
|
+
"mediaSubtype": "mp3",
|
|
2681
|
+
"tags": ["Audio a audio", "Audio", "API"],
|
|
2682
|
+
"date": "2025-09-09",
|
|
2683
|
+
"models": ["Stability", "Stable Audio"],
|
|
2684
|
+
"openSource": false,
|
|
2685
|
+
"size": 0,
|
|
2686
|
+
"vram": 0,
|
|
2687
|
+
"usage": 67
|
|
2688
|
+
},
|
|
2689
|
+
{
|
|
2690
|
+
"name": "api_stability_ai_audio_inpaint",
|
|
2691
|
+
"title": "Stability AI: Relleno de audio",
|
|
2692
|
+
"description": "Completa o extiende pistas de audio usando Stable Audio 2.5. Sube audio y la IA genera el resto.",
|
|
2693
|
+
"mediaType": "audio",
|
|
2694
|
+
"mediaSubtype": "mp3",
|
|
2695
|
+
"tags": ["Audio a audio", "Audio", "API"],
|
|
2696
|
+
"date": "2025-09-09",
|
|
2697
|
+
"models": ["Stability", "Stable Audio"],
|
|
2698
|
+
"openSource": false,
|
|
2699
|
+
"size": 0,
|
|
2700
|
+
"vram": 0,
|
|
2701
|
+
"usage": 17
|
|
2702
|
+
},
|
|
2703
|
+
{
|
|
2704
|
+
"name": "audio_stable_audio_example",
|
|
2705
|
+
"title": "Audio Estable",
|
|
2706
|
+
"mediaType": "audio",
|
|
2707
|
+
"mediaSubtype": "mp3",
|
|
2708
|
+
"description": "Generar audio a partir de indicaciones de texto usando Stable Audio.",
|
|
2709
|
+
"tags": ["Texto a audio", "Audio"],
|
|
2710
|
+
"models": ["Stable Audio", "Stability"],
|
|
2711
|
+
"date": "2025-03-01",
|
|
2712
|
+
"tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/",
|
|
2713
|
+
"size": 5744518758,
|
|
2714
|
+
"vram": 5744518758,
|
|
2715
|
+
"usage": 270
|
|
2716
|
+
},
|
|
2717
|
+
{
|
|
2718
|
+
"name": "audio_ace_step_1_t2a_instrumentals",
|
|
2719
|
+
"title": "Música instrumental ACE-Step v1 texto a audio",
|
|
2720
|
+
"mediaType": "audio",
|
|
2721
|
+
"mediaSubtype": "mp3",
|
|
2722
|
+
"description": "Generar música instrumental a partir de indicaciones de texto usando ACE-Step v1.",
|
|
2723
|
+
"tags": ["Texto a audio", "Audio"],
|
|
2724
|
+
"models": ["ACE-Step"],
|
|
2725
|
+
"date": "2025-03-01",
|
|
2726
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
2727
|
+
"size": 7698728878,
|
|
2728
|
+
"vram": 7698728878,
|
|
2729
|
+
"usage": 139
|
|
2730
|
+
},
|
|
2731
|
+
{
|
|
2732
|
+
"name": "audio_ace_step_1_t2a_song",
|
|
2733
|
+
"title": "Canción ACE Step v1 texto a audio",
|
|
2734
|
+
"mediaType": "audio",
|
|
2735
|
+
"mediaSubtype": "mp3",
|
|
2736
|
+
"description": "Generar canciones con voces a partir de indicaciones de texto usando ACE-Step v1, soportando personalización multilingüe y de estilo.",
|
|
2737
|
+
"tags": ["Texto a audio", "Audio"],
|
|
2738
|
+
"models": ["ACE-Step"],
|
|
2739
|
+
"date": "2025-03-01",
|
|
2740
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
2741
|
+
"size": 7698728878,
|
|
2742
|
+
"vram": 7698728878,
|
|
2743
|
+
"usage": 123
|
|
2271
2744
|
},
|
|
2272
2745
|
{
|
|
2273
|
-
"name": "
|
|
2274
|
-
"title": "
|
|
2275
|
-
"
|
|
2746
|
+
"name": "audio_ace_step_1_m2m_editing",
|
|
2747
|
+
"title": "Edición M2M ACE Step v1",
|
|
2748
|
+
"mediaType": "audio",
|
|
2749
|
+
"mediaSubtype": "mp3",
|
|
2750
|
+
"description": "Editar canciones existentes para cambiar estilo y letras usando ACE-Step v1 M2M.",
|
|
2751
|
+
"tags": ["Edición de audio", "Audio"],
|
|
2752
|
+
"models": ["ACE-Step"],
|
|
2753
|
+
"date": "2025-03-01",
|
|
2754
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
2755
|
+
"size": 7698728878,
|
|
2756
|
+
"vram": 7698728878,
|
|
2757
|
+
"usage": 138
|
|
2758
|
+
}
|
|
2759
|
+
]
|
|
2760
|
+
},
|
|
2761
|
+
{
|
|
2762
|
+
"moduleName": "default",
|
|
2763
|
+
"type": "3d",
|
|
2764
|
+
"category": "Tipo de generación",
|
|
2765
|
+
"icon": "icon-[lucide--box]",
|
|
2766
|
+
"title": "Modelo 3D",
|
|
2767
|
+
"templates": [
|
|
2768
|
+
{
|
|
2769
|
+
"name": "api_tripo3_0_image_to_model",
|
|
2770
|
+
"title": "Tripo3.0: Imagen a Modelo",
|
|
2771
|
+
"description": "Convierte imágenes o bocetos en modelos 3D con la geometría precisa y texturas PBR listas para producción de Tripo 3.0.",
|
|
2276
2772
|
"mediaType": "image",
|
|
2277
2773
|
"mediaSubtype": "webp",
|
|
2278
|
-
"tags": ["
|
|
2279
|
-
"models": ["
|
|
2280
|
-
"date": "2025-
|
|
2281
|
-
"
|
|
2282
|
-
"OpenSource": false,
|
|
2774
|
+
"tags": ["Image to Model", "3D", "API"],
|
|
2775
|
+
"models": ["Tripo"],
|
|
2776
|
+
"date": "2025-12-23",
|
|
2777
|
+
"openSource": false,
|
|
2283
2778
|
"size": 0,
|
|
2284
2779
|
"vram": 0
|
|
2285
2780
|
},
|
|
2286
2781
|
{
|
|
2287
|
-
"name": "
|
|
2288
|
-
"title": "
|
|
2289
|
-
"description": "
|
|
2782
|
+
"name": "api_tripo3_0_text_to_model",
|
|
2783
|
+
"title": "Tripo3.0: Texto a Modelo",
|
|
2784
|
+
"description": "Genera modelos 3D precisos desde texto, con geometría de alta resolución y materiales PBR realistas.",
|
|
2290
2785
|
"mediaType": "image",
|
|
2291
2786
|
"mediaSubtype": "webp",
|
|
2292
|
-
"tags": ["
|
|
2293
|
-
"models": ["
|
|
2294
|
-
"date": "2025-
|
|
2295
|
-
"
|
|
2296
|
-
"OpenSource": false,
|
|
2787
|
+
"tags": ["Texto a modelo", "3D", "API"],
|
|
2788
|
+
"models": ["Tripo"],
|
|
2789
|
+
"date": "2025-12-23",
|
|
2790
|
+
"openSource": false,
|
|
2297
2791
|
"size": 0,
|
|
2298
2792
|
"vram": 0
|
|
2299
2793
|
},
|
|
2300
2794
|
{
|
|
2301
|
-
"name": "
|
|
2302
|
-
"title": "
|
|
2303
|
-
"description": "
|
|
2795
|
+
"name": "api_tripo_text_to_model",
|
|
2796
|
+
"title": "Tripo: Texto a modelo",
|
|
2797
|
+
"description": "Crear objetos 3D a partir de descripciones con modelado dirigido por texto de Tripo.",
|
|
2304
2798
|
"mediaType": "image",
|
|
2305
2799
|
"mediaSubtype": "webp",
|
|
2306
|
-
"tags": ["
|
|
2307
|
-
"models": ["
|
|
2800
|
+
"tags": ["Texto a modelo", "3D", "API"],
|
|
2801
|
+
"models": ["Tripo"],
|
|
2308
2802
|
"date": "2025-03-01",
|
|
2309
2803
|
"tutorialUrl": "",
|
|
2310
|
-
"
|
|
2804
|
+
"openSource": false,
|
|
2311
2805
|
"size": 0,
|
|
2312
|
-
"vram": 0
|
|
2806
|
+
"vram": 0,
|
|
2807
|
+
"usage": 48
|
|
2313
2808
|
},
|
|
2314
2809
|
{
|
|
2315
|
-
"name": "
|
|
2316
|
-
"title": "
|
|
2317
|
-
"description": "Generar
|
|
2810
|
+
"name": "api_tripo_image_to_model",
|
|
2811
|
+
"title": "Tripo: Imagen a modelo",
|
|
2812
|
+
"description": "Generar activos 3D profesionales a partir de imágenes 2D usando el motor Tripo.",
|
|
2318
2813
|
"mediaType": "image",
|
|
2319
2814
|
"mediaSubtype": "webp",
|
|
2320
|
-
"tags": ["Imagen a
|
|
2321
|
-
"models": ["
|
|
2815
|
+
"tags": ["Imagen a 3D", "3D", "API"],
|
|
2816
|
+
"models": ["Tripo"],
|
|
2322
2817
|
"date": "2025-03-01",
|
|
2323
2818
|
"tutorialUrl": "",
|
|
2324
|
-
"
|
|
2819
|
+
"openSource": false,
|
|
2325
2820
|
"size": 0,
|
|
2326
|
-
"vram": 0
|
|
2821
|
+
"vram": 0,
|
|
2822
|
+
"usage": 50
|
|
2327
2823
|
},
|
|
2328
2824
|
{
|
|
2329
|
-
"name": "
|
|
2330
|
-
"title": "
|
|
2331
|
-
"description": "
|
|
2825
|
+
"name": "api_tripo_multiview_to_model",
|
|
2826
|
+
"title": "Tripo: Vista múltiple a modelo",
|
|
2827
|
+
"description": "Construir modelos 3D desde múltiples ángulos con el escáner avanzado de Tripo.",
|
|
2332
2828
|
"mediaType": "image",
|
|
2333
2829
|
"mediaSubtype": "webp",
|
|
2334
|
-
"tags": ["Imagen a
|
|
2335
|
-
"models": ["
|
|
2830
|
+
"tags": ["Imagen a 3D", "3D", "API"],
|
|
2831
|
+
"models": ["Tripo"],
|
|
2336
2832
|
"date": "2025-03-01",
|
|
2337
2833
|
"tutorialUrl": "",
|
|
2338
|
-
"
|
|
2834
|
+
"openSource": false,
|
|
2339
2835
|
"size": 0,
|
|
2340
|
-
"vram": 0
|
|
2341
|
-
|
|
2342
|
-
|
|
2343
|
-
},
|
|
2344
|
-
{
|
|
2345
|
-
"moduleName": "default",
|
|
2346
|
-
"type": "image",
|
|
2347
|
-
"category": "CLOSED SOURCE MODELS",
|
|
2348
|
-
"icon": "icon-[lucide--box]",
|
|
2349
|
-
"title": "3D API",
|
|
2350
|
-
"templates": [
|
|
2836
|
+
"vram": 0,
|
|
2837
|
+
"usage": 70
|
|
2838
|
+
},
|
|
2351
2839
|
{
|
|
2352
2840
|
"name": "api_rodin_gen2",
|
|
2353
2841
|
"title": "Rodin: Gen-2 Imagen a Modelo",
|
|
@@ -2358,138 +2846,105 @@
|
|
|
2358
2846
|
"models": ["Rodin"],
|
|
2359
2847
|
"date": "2025-09-27",
|
|
2360
2848
|
"tutorialUrl": "",
|
|
2361
|
-
"
|
|
2849
|
+
"openSource": false,
|
|
2362
2850
|
"size": 0,
|
|
2363
|
-
"vram": 0
|
|
2851
|
+
"vram": 0,
|
|
2852
|
+
"usage": 355
|
|
2364
2853
|
},
|
|
2365
2854
|
{
|
|
2366
2855
|
"name": "api_rodin_image_to_model",
|
|
2367
2856
|
"title": "Rodin: Imagen a modelo",
|
|
2368
2857
|
"description": "Generar modelos 3D detallados a partir de fotos individuales usando Rodin AI.",
|
|
2369
2858
|
"mediaType": "image",
|
|
2370
|
-
"thumbnailVariant": "compareSlider",
|
|
2371
2859
|
"mediaSubtype": "webp",
|
|
2372
2860
|
"tags": ["Imagen a 3D", "3D", "API"],
|
|
2373
2861
|
"models": ["Rodin"],
|
|
2374
2862
|
"date": "2025-03-01",
|
|
2375
2863
|
"tutorialUrl": "",
|
|
2376
|
-
"
|
|
2864
|
+
"openSource": false,
|
|
2377
2865
|
"size": 0,
|
|
2378
|
-
"vram": 0
|
|
2866
|
+
"vram": 0,
|
|
2867
|
+
"usage": 25
|
|
2379
2868
|
},
|
|
2380
2869
|
{
|
|
2381
2870
|
"name": "api_rodin_multiview_to_model",
|
|
2382
2871
|
"title": "Rodin: Vista múltiple a modelo",
|
|
2383
2872
|
"description": "Esculpir modelos 3D completos usando reconstrucción multiángulo de Rodin.",
|
|
2384
2873
|
"mediaType": "image",
|
|
2385
|
-
"thumbnailVariant": "compareSlider",
|
|
2386
2874
|
"mediaSubtype": "webp",
|
|
2387
2875
|
"tags": ["Imagen a 3D", "3D", "API"],
|
|
2388
2876
|
"models": ["Rodin"],
|
|
2389
2877
|
"date": "2025-03-01",
|
|
2390
2878
|
"tutorialUrl": "",
|
|
2391
|
-
"
|
|
2879
|
+
"openSource": false,
|
|
2392
2880
|
"size": 0,
|
|
2393
|
-
"vram": 0
|
|
2881
|
+
"vram": 0,
|
|
2882
|
+
"usage": 47
|
|
2394
2883
|
},
|
|
2395
2884
|
{
|
|
2396
|
-
"name": "
|
|
2397
|
-
"title": "
|
|
2398
|
-
"description": "Crear objetos 3D a partir de descripciones con modelado dirigido por texto de Tripo.",
|
|
2885
|
+
"name": "3d_hunyuan3d-v2.1",
|
|
2886
|
+
"title": "Hunyuan3D 2.1",
|
|
2399
2887
|
"mediaType": "image",
|
|
2400
2888
|
"mediaSubtype": "webp",
|
|
2401
|
-
"
|
|
2402
|
-
"
|
|
2889
|
+
"description": "Genera modelos 3D a partir de imágenes individuales usando Hunyuan3D 2.0.",
|
|
2890
|
+
"tags": ["Imagen a 3D", "3D"],
|
|
2891
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
2403
2892
|
"date": "2025-03-01",
|
|
2404
2893
|
"tutorialUrl": "",
|
|
2405
|
-
"
|
|
2406
|
-
"
|
|
2407
|
-
"
|
|
2894
|
+
"size": 4928474972,
|
|
2895
|
+
"vram": 4928474972,
|
|
2896
|
+
"usage": 384
|
|
2408
2897
|
},
|
|
2409
2898
|
{
|
|
2410
|
-
"name": "
|
|
2411
|
-
"title": "
|
|
2412
|
-
"description": "Generar activos 3D profesionales a partir de imágenes 2D usando el motor Tripo.",
|
|
2899
|
+
"name": "3d_hunyuan3d_image_to_model",
|
|
2900
|
+
"title": "Hunyuan3D 2.0",
|
|
2413
2901
|
"mediaType": "image",
|
|
2414
|
-
"thumbnailVariant": "compareSlider",
|
|
2415
2902
|
"mediaSubtype": "webp",
|
|
2416
|
-
"
|
|
2417
|
-
"
|
|
2903
|
+
"description": "Generar modelos 3D a partir de imágenes individuales usando Hunyuan3D 2.0.",
|
|
2904
|
+
"tags": ["Imagen a 3D", "3D"],
|
|
2905
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
2418
2906
|
"date": "2025-03-01",
|
|
2419
2907
|
"tutorialUrl": "",
|
|
2420
|
-
"
|
|
2421
|
-
"
|
|
2422
|
-
"
|
|
2908
|
+
"size": 4928474972,
|
|
2909
|
+
"vram": 4928474972,
|
|
2910
|
+
"usage": 69
|
|
2423
2911
|
},
|
|
2424
2912
|
{
|
|
2425
|
-
"name": "
|
|
2426
|
-
"title": "
|
|
2427
|
-
"description": "Construir modelos 3D desde múltiples ángulos con el escáner avanzado de Tripo.",
|
|
2913
|
+
"name": "3d_hunyuan3d_multiview_to_model",
|
|
2914
|
+
"title": "Hunyuan3D 2.0 Multivista",
|
|
2428
2915
|
"mediaType": "image",
|
|
2429
|
-
"thumbnailVariant": "compareSlider",
|
|
2430
2916
|
"mediaSubtype": "webp",
|
|
2431
|
-
"
|
|
2432
|
-
"
|
|
2917
|
+
"description": "Generar modelos 3D a partir de múltiples vistas usando Hunyuan3D 2.0 MV.",
|
|
2918
|
+
"tags": ["3D", "Imagen a 3D"],
|
|
2919
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
2433
2920
|
"date": "2025-03-01",
|
|
2434
2921
|
"tutorialUrl": "",
|
|
2435
|
-
"
|
|
2436
|
-
"size":
|
|
2437
|
-
"vram":
|
|
2438
|
-
|
|
2439
|
-
]
|
|
2440
|
-
},
|
|
2441
|
-
{
|
|
2442
|
-
"moduleName": "default",
|
|
2443
|
-
"type": "audio",
|
|
2444
|
-
"category": "CLOSED SOURCE MODELS",
|
|
2445
|
-
"icon": "icon-[lucide--volume-2]",
|
|
2446
|
-
"title": "Audio API",
|
|
2447
|
-
"templates": [
|
|
2448
|
-
{
|
|
2449
|
-
"name": "api_stability_ai_text_to_audio",
|
|
2450
|
-
"title": "Stability AI: Texto a audio",
|
|
2451
|
-
"description": "Genera música a partir de texto usando Stable Audio 2.5. Crea pistas de varios minutos en segundos.",
|
|
2452
|
-
"mediaType": "audio",
|
|
2453
|
-
"mediaSubtype": "mp3",
|
|
2454
|
-
"tags": ["Texto a audio", "Audio", "API"],
|
|
2455
|
-
"date": "2025-09-09",
|
|
2456
|
-
"models": ["Stability", "Stable Audio"],
|
|
2457
|
-
"OpenSource": false,
|
|
2458
|
-
"size": 0,
|
|
2459
|
-
"vram": 0
|
|
2460
|
-
},
|
|
2461
|
-
{
|
|
2462
|
-
"name": "api_stability_ai_audio_to_audio",
|
|
2463
|
-
"title": "Stability AI: Audio a audio",
|
|
2464
|
-
"description": "Transforma audio en nuevas composiciones usando Stable Audio 2.5. Sube audio y la IA crea pistas completas.",
|
|
2465
|
-
"mediaType": "audio",
|
|
2466
|
-
"mediaSubtype": "mp3",
|
|
2467
|
-
"tags": ["Audio a audio", "Audio", "API"],
|
|
2468
|
-
"date": "2025-09-09",
|
|
2469
|
-
"models": ["Stability", "Stable Audio"],
|
|
2470
|
-
"OpenSource": false,
|
|
2471
|
-
"size": 0,
|
|
2472
|
-
"vram": 0
|
|
2922
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2923
|
+
"size": 4928474972,
|
|
2924
|
+
"vram": 4928474972,
|
|
2925
|
+
"usage": 97
|
|
2473
2926
|
},
|
|
2474
2927
|
{
|
|
2475
|
-
"name": "
|
|
2476
|
-
"title": "
|
|
2477
|
-
"
|
|
2478
|
-
"
|
|
2479
|
-
"
|
|
2480
|
-
"tags": ["
|
|
2481
|
-
"
|
|
2482
|
-
"
|
|
2483
|
-
"
|
|
2484
|
-
"
|
|
2485
|
-
"
|
|
2928
|
+
"name": "3d_hunyuan3d_multiview_to_model_turbo",
|
|
2929
|
+
"title": "Hunyuan3D 2.0 Multivista Turbo",
|
|
2930
|
+
"mediaType": "image",
|
|
2931
|
+
"mediaSubtype": "webp",
|
|
2932
|
+
"description": "Generar modelos 3D a partir de múltiples vistas usando Hunyuan3D 2.0 MV Turbo.",
|
|
2933
|
+
"tags": ["Imagen a 3D", "3D"],
|
|
2934
|
+
"models": ["Hunyuan3D", "Tencent"],
|
|
2935
|
+
"date": "2025-03-01",
|
|
2936
|
+
"tutorialUrl": "",
|
|
2937
|
+
"thumbnailVariant": "hoverDissolve",
|
|
2938
|
+
"size": 4928474972,
|
|
2939
|
+
"vram": 4928474972,
|
|
2940
|
+
"usage": 38
|
|
2486
2941
|
}
|
|
2487
2942
|
]
|
|
2488
2943
|
},
|
|
2489
2944
|
{
|
|
2490
2945
|
"moduleName": "default",
|
|
2491
2946
|
"type": "image",
|
|
2492
|
-
"category": "
|
|
2947
|
+
"category": "Tipo de generación",
|
|
2493
2948
|
"icon": "icon-[lucide--message-square-text]",
|
|
2494
2949
|
"title": "LLM API",
|
|
2495
2950
|
"templates": [
|
|
@@ -2503,9 +2958,10 @@
|
|
|
2503
2958
|
"models": ["OpenAI"],
|
|
2504
2959
|
"date": "2025-03-01",
|
|
2505
2960
|
"tutorialUrl": "",
|
|
2506
|
-
"
|
|
2961
|
+
"openSource": false,
|
|
2507
2962
|
"size": 0,
|
|
2508
|
-
"vram": 0
|
|
2963
|
+
"vram": 0,
|
|
2964
|
+
"usage": 35
|
|
2509
2965
|
},
|
|
2510
2966
|
{
|
|
2511
2967
|
"name": "api_google_gemini",
|
|
@@ -2517,9 +2973,144 @@
|
|
|
2517
2973
|
"models": ["Google Gemini", "Google"],
|
|
2518
2974
|
"date": "2025-03-01",
|
|
2519
2975
|
"tutorialUrl": "",
|
|
2520
|
-
"
|
|
2976
|
+
"openSource": false,
|
|
2521
2977
|
"size": 0,
|
|
2522
|
-
"vram": 0
|
|
2978
|
+
"vram": 0,
|
|
2979
|
+
"usage": 130
|
|
2980
|
+
}
|
|
2981
|
+
]
|
|
2982
|
+
},
|
|
2983
|
+
{
|
|
2984
|
+
"moduleName": "default",
|
|
2985
|
+
"type": "image",
|
|
2986
|
+
"isEssential": true,
|
|
2987
|
+
"title": "Primeros pasos",
|
|
2988
|
+
"templates": [
|
|
2989
|
+
{
|
|
2990
|
+
"name": "gsc_starter_1",
|
|
2991
|
+
"title": "Inicio 1: Texto a imagen",
|
|
2992
|
+
"mediaType": "image",
|
|
2993
|
+
"mediaSubtype": "webp",
|
|
2994
|
+
"description": "Aprende a crear imágenes, conectar nodos y descargar resultados con Z-Image Turbo",
|
|
2995
|
+
"models": ["Z-Image-Turbo"],
|
|
2996
|
+
"date": "2025-12-10",
|
|
2997
|
+
"searchRank": 3,
|
|
2998
|
+
"size": 0,
|
|
2999
|
+
"vram": 0,
|
|
3000
|
+
"includeOnDistributions": ["cloud"]
|
|
3001
|
+
},
|
|
3002
|
+
{
|
|
3003
|
+
"name": "gsc_starter_2",
|
|
3004
|
+
"title": "Inicio 2: Imagen a video",
|
|
3005
|
+
"mediaType": "image",
|
|
3006
|
+
"mediaSubtype": "webp",
|
|
3007
|
+
"description": "Aprende a cargar imágenes, crear videos y buscar nodos con Wan 2.2",
|
|
3008
|
+
"models": ["Wan2.2", "Wan"],
|
|
3009
|
+
"date": "2025-12-10",
|
|
3010
|
+
"searchRank": 3,
|
|
3011
|
+
"size": 0,
|
|
3012
|
+
"vram": 0,
|
|
3013
|
+
"requiresCustomNodes": ["comfyui_essentials"],
|
|
3014
|
+
"includeOnDistributions": ["cloud"]
|
|
3015
|
+
},
|
|
3016
|
+
{
|
|
3017
|
+
"name": "gsc_starter_3",
|
|
3018
|
+
"title": "Inicio 3: Fotografía de producto",
|
|
3019
|
+
"mediaType": "image",
|
|
3020
|
+
"mediaSubtype": "webp",
|
|
3021
|
+
"description": "Aprende a crear fotos de producto con imágenes, ingresar a un subgrafo, desactivar el bypass y conocer nodos asociados usando Nano Banana Pro.",
|
|
3022
|
+
"models": ["Nano Banana Pro", "Google"],
|
|
3023
|
+
"date": "2025-12-10",
|
|
3024
|
+
"searchRank": 3,
|
|
3025
|
+
"size": 0,
|
|
3026
|
+
"vram": 0,
|
|
3027
|
+
"requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
|
|
3028
|
+
"includeOnDistributions": ["cloud"]
|
|
3029
|
+
},
|
|
3030
|
+
{
|
|
3031
|
+
"name": "01_get_started_text_to_image",
|
|
3032
|
+
"title": "Texto a imagen (Nuevo)",
|
|
3033
|
+
"mediaType": "image",
|
|
3034
|
+
"mediaSubtype": "webp",
|
|
3035
|
+
"description": "Genera imágenes a partir de indicaciones de texto usando el modelo z-image-turbo",
|
|
3036
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/z-image/z-image-turbo",
|
|
3037
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
3038
|
+
"models": ["Z-Image-Turbo"],
|
|
3039
|
+
"date": "2025-10-17",
|
|
3040
|
+
"size": 20862803640,
|
|
3041
|
+
"vram": 20862803640,
|
|
3042
|
+
"usage": 299
|
|
3043
|
+
},
|
|
3044
|
+
{
|
|
3045
|
+
"name": "02_qwen_Image_edit_subgraphed",
|
|
3046
|
+
"title": "Edición de imágenes (Nuevo)",
|
|
3047
|
+
"mediaType": "image",
|
|
3048
|
+
"mediaSubtype": "webp",
|
|
3049
|
+
"description": "Edita tus imágenes con Qwen-Image-Edit",
|
|
3050
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
|
|
3051
|
+
"tags": ["Imagen a imagen", "Edición imagen", "ControlNet"],
|
|
3052
|
+
"models": ["Qwen-Image-Edit"],
|
|
3053
|
+
"date": "2025-10-17",
|
|
3054
|
+
"size": 31772020572,
|
|
3055
|
+
"vram": 31772020572,
|
|
3056
|
+
"usage": 6436
|
|
3057
|
+
},
|
|
3058
|
+
{
|
|
3059
|
+
"name": "03_video_wan2_2_14B_i2v_subgraphed",
|
|
3060
|
+
"title": "Imagen a Video (Nuevo)",
|
|
3061
|
+
"description": "Genera videos a partir de una imagen usando Wan2.2 14B",
|
|
3062
|
+
"mediaType": "image",
|
|
3063
|
+
"mediaSubtype": "webp",
|
|
3064
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
|
|
3065
|
+
"tags": ["Imagen a video", "Video"],
|
|
3066
|
+
"models": ["Wan2.2", "Wan"],
|
|
3067
|
+
"date": "2025-10-17",
|
|
3068
|
+
"size": 38031935406,
|
|
3069
|
+
"vram": 38031935406,
|
|
3070
|
+
"usage": 4084
|
|
3071
|
+
},
|
|
3072
|
+
{
|
|
3073
|
+
"name": "04_hunyuan_3d_2.1_subgraphed",
|
|
3074
|
+
"title": "Imagen a 3D (Nuevo)",
|
|
3075
|
+
"mediaType": "image",
|
|
3076
|
+
"mediaSubtype": "webp",
|
|
3077
|
+
"description": "Genera modelos 3D a partir de imágenes únicas usando Hunyuan3D 2.0.",
|
|
3078
|
+
"tags": ["Imagen a 3D", "3D"],
|
|
3079
|
+
"models": ["Hunyuan3D"],
|
|
3080
|
+
"date": "2025-10-17",
|
|
3081
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/3d/hunyuan3D-2",
|
|
3082
|
+
"size": 4928474972,
|
|
3083
|
+
"vram": 4928474972,
|
|
3084
|
+
"usage": 152
|
|
3085
|
+
},
|
|
3086
|
+
{
|
|
3087
|
+
"name": "05_audio_ace_step_1_t2a_song_subgraphed",
|
|
3088
|
+
"title": "Texto a audio (Nuevo)",
|
|
3089
|
+
"mediaType": "image",
|
|
3090
|
+
"mediaSubtype": "webp",
|
|
3091
|
+
"description": "Genera audio a partir de indicaciones de texto usando ACE-Step v1",
|
|
3092
|
+
"tags": ["Texto a audio", "Audio"],
|
|
3093
|
+
"models": ["ACE-Step"],
|
|
3094
|
+
"date": "2025-10-17",
|
|
3095
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
|
|
3096
|
+
"size": 7698728878,
|
|
3097
|
+
"vram": 7698728878,
|
|
3098
|
+
"usage": 101
|
|
3099
|
+
},
|
|
3100
|
+
{
|
|
3101
|
+
"name": "default",
|
|
3102
|
+
"title": "Generación de imágenes",
|
|
3103
|
+
"mediaType": "image",
|
|
3104
|
+
"mediaSubtype": "webp",
|
|
3105
|
+
"description": "Generar imágenes a partir de indicaciones de texto.",
|
|
3106
|
+
"tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
|
|
3107
|
+
"tags": ["Texto a imagen", "Imagen"],
|
|
3108
|
+
"models": ["SD1.5", "Stability"],
|
|
3109
|
+
"date": "2025-03-01",
|
|
3110
|
+
"size": 2136746230,
|
|
3111
|
+
"vram": 3092376453,
|
|
3112
|
+
"status": "active",
|
|
3113
|
+
"usage": 168
|
|
2523
3114
|
}
|
|
2524
3115
|
]
|
|
2525
3116
|
}
|