comfyui-workflow-templates 0.1.95__py3-none-any.whl → 0.1.97__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of comfyui-workflow-templates might be problematic. Click here for more details.

Files changed (151) hide show
  1. comfyui_workflow_templates/templates/01_qwen_t2i_subgraphed-1.webp +0 -0
  2. comfyui_workflow_templates/templates/01_qwen_t2i_subgraphed.json +1288 -0
  3. comfyui_workflow_templates/templates/02_qwen_Image_edit_subgraphed-1.webp +0 -0
  4. comfyui_workflow_templates/templates/02_qwen_Image_edit_subgraphed.json +1754 -0
  5. comfyui_workflow_templates/templates/03_video_wan2_2_14B_i2v_subgraphed-1.webp +0 -0
  6. comfyui_workflow_templates/templates/03_video_wan2_2_14B_i2v_subgraphed.json +1416 -0
  7. comfyui_workflow_templates/templates/04_hunyuan_3d_2.1_subgraphed-1.webp +0 -0
  8. comfyui_workflow_templates/templates/04_hunyuan_3d_2.1_subgraphed.json +850 -0
  9. comfyui_workflow_templates/templates/05_audio_ace_step_1_t2a_song_subgraphed-1.webp +0 -0
  10. comfyui_workflow_templates/templates/05_audio_ace_step_1_t2a_song_subgraphed.json +1014 -0
  11. comfyui_workflow_templates/templates/2_pass_pose_worship.json +551 -139
  12. comfyui_workflow_templates/templates/3d_hunyuan3d-v2.1.json +8 -8
  13. comfyui_workflow_templates/templates/3d_hunyuan3d_image_to_model.json +176 -284
  14. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model.json +131 -133
  15. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo.json +84 -86
  16. comfyui_workflow_templates/templates/ByteDance-Seedance_00003_.json +210 -0
  17. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_max_image.json +73 -14
  18. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_multiple_images_input.json +86 -23
  19. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_pro_image.json +145 -86
  20. comfyui_workflow_templates/templates/api_bfl_flux_pro_t2i.json +78 -78
  21. comfyui_workflow_templates/templates/api_bytedance_flf2v.json +86 -86
  22. comfyui_workflow_templates/templates/api_bytedance_image_to_video.json +36 -36
  23. comfyui_workflow_templates/templates/api_bytedance_seedream4.json +227 -222
  24. comfyui_workflow_templates/templates/api_google_gemini_image.json +113 -47
  25. comfyui_workflow_templates/templates/api_hailuo_minimax_i2v.json +52 -42
  26. comfyui_workflow_templates/templates/api_hailuo_minimax_video.json +10 -4
  27. comfyui_workflow_templates/templates/api_ideogram_v3_t2i.json +50 -48
  28. comfyui_workflow_templates/templates/api_kling_effects.json +34 -9
  29. comfyui_workflow_templates/templates/api_kling_flf.json +68 -39
  30. comfyui_workflow_templates/templates/api_kling_i2v.json +35 -9
  31. comfyui_workflow_templates/templates/api_luma_i2v.json +124 -110
  32. comfyui_workflow_templates/templates/api_luma_photon_i2i.json +64 -25
  33. comfyui_workflow_templates/templates/api_luma_photon_style_ref.json +210 -60
  34. comfyui_workflow_templates/templates/api_moonvalley_image_to_video.json +40 -41
  35. comfyui_workflow_templates/templates/api_moonvalley_video_to_video_motion_transfer.json +10 -11
  36. comfyui_workflow_templates/templates/api_moonvalley_video_to_video_pose_control.json +36 -37
  37. comfyui_workflow_templates/templates/api_openai_dall_e_2_inpaint.json +47 -11
  38. comfyui_workflow_templates/templates/api_openai_image_1_i2i.json +37 -8
  39. comfyui_workflow_templates/templates/api_openai_image_1_inpaint.json +29 -29
  40. comfyui_workflow_templates/templates/api_openai_image_1_multi_inputs.json +56 -13
  41. comfyui_workflow_templates/templates/api_openai_image_1_t2i.json +28 -28
  42. comfyui_workflow_templates/templates/api_openai_sora_video.json +203 -162
  43. comfyui_workflow_templates/templates/api_pika_i2v.json +29 -29
  44. comfyui_workflow_templates/templates/api_pika_scene.json +9 -9
  45. comfyui_workflow_templates/templates/api_pixverse_i2v.json +58 -58
  46. comfyui_workflow_templates/templates/api_pixverse_template_i2v.json +4 -4
  47. comfyui_workflow_templates/templates/api_recraft_image_gen_with_color_control.json +168 -73
  48. comfyui_workflow_templates/templates/api_rodin_gen2.json +156 -152
  49. comfyui_workflow_templates/templates/api_rodin_image_to_model.json +287 -247
  50. comfyui_workflow_templates/templates/api_rodin_multiview_to_model.json +7 -7
  51. comfyui_workflow_templates/templates/api_runway_first_last_frame.json +44 -44
  52. comfyui_workflow_templates/templates/api_runway_gen3a_turbo_image_to_video.json +29 -29
  53. comfyui_workflow_templates/templates/api_runway_gen4_turo_image_to_video.json +40 -40
  54. comfyui_workflow_templates/templates/api_runway_reference_to_image.json +84 -45
  55. comfyui_workflow_templates/templates/api_runway_text_to_image.json +40 -40
  56. comfyui_workflow_templates/templates/api_stability_ai_audio_inpaint.json +59 -83
  57. comfyui_workflow_templates/templates/api_stability_ai_audio_to_audio.json +43 -46
  58. comfyui_workflow_templates/templates/api_stability_ai_i2i.json +39 -14
  59. comfyui_workflow_templates/templates/api_stability_ai_sd3.5_i2i.json +54 -8
  60. comfyui_workflow_templates/templates/api_stability_ai_sd3.5_t2i.json +44 -44
  61. comfyui_workflow_templates/templates/api_stability_ai_stable_image_ultra_t2i.json +28 -28
  62. comfyui_workflow_templates/templates/api_tripo_image_to_model.json +4 -4
  63. comfyui_workflow_templates/templates/api_tripo_multiview_to_model.json +89 -89
  64. comfyui_workflow_templates/templates/api_veo2_i2v.json +3 -3
  65. comfyui_workflow_templates/templates/api_veo3.json +90 -84
  66. comfyui_workflow_templates/templates/api_vidu_image_to_video.json +37 -9
  67. comfyui_workflow_templates/templates/api_vidu_reference_to_video.json +86 -19
  68. comfyui_workflow_templates/templates/api_vidu_start_end_to_video.json +14 -6
  69. comfyui_workflow_templates/templates/api_wan_image_to_video.json +73 -73
  70. comfyui_workflow_templates/templates/audio_ace_step_1_m2m_editing.json +152 -329
  71. comfyui_workflow_templates/templates/audio_ace_step_1_t2a_instrumentals.json +207 -398
  72. comfyui_workflow_templates/templates/audio_ace_step_1_t2a_song.json +167 -378
  73. comfyui_workflow_templates/templates/audio_stable_audio_example.json +156 -118
  74. comfyui_workflow_templates/templates/controlnet_example.json +110 -122
  75. comfyui_workflow_templates/templates/default.json +329 -139
  76. comfyui_workflow_templates/templates/depth_controlnet.json +463 -206
  77. comfyui_workflow_templates/templates/depth_t2i_adapter.json +1522 -236
  78. comfyui_workflow_templates/templates/flux1_dev_uso_reference_image_gen.json +383 -354
  79. comfyui_workflow_templates/templates/flux_canny_model_example.json +576 -268
  80. comfyui_workflow_templates/templates/flux_depth_lora_example.json +1234 -213
  81. comfyui_workflow_templates/templates/flux_fill_inpaint_example.json +553 -250
  82. comfyui_workflow_templates/templates/flux_fill_outpaint_example.json +556 -228
  83. comfyui_workflow_templates/templates/flux_kontext_dev_basic.json +104 -109
  84. comfyui_workflow_templates/templates/flux_redux_model_example-1.webp +0 -0
  85. comfyui_workflow_templates/templates/flux_redux_model_example.json +609 -557
  86. comfyui_workflow_templates/templates/hidream_e1_1.json +5 -5
  87. comfyui_workflow_templates/templates/hidream_e1_full.json +5 -5
  88. comfyui_workflow_templates/templates/image2image-1.webp +0 -0
  89. comfyui_workflow_templates/templates/image2image-2.webp +0 -0
  90. comfyui_workflow_templates/templates/image2image.json +203 -201
  91. comfyui_workflow_templates/templates/image_flux.1_fill_dev_OneReward.json +178 -162
  92. comfyui_workflow_templates/templates/image_lotus_depth_v1_1.json +4 -4
  93. comfyui_workflow_templates/templates/image_omnigen2_image_edit.json +347 -341
  94. comfyui_workflow_templates/templates/image_qwen_image_controlnet_patch.json +71 -70
  95. comfyui_workflow_templates/templates/image_qwen_image_edit.json +199 -199
  96. comfyui_workflow_templates/templates/image_qwen_image_edit_2509.json +401 -401
  97. comfyui_workflow_templates/templates/image_qwen_image_instantx_controlnet.json +72 -71
  98. comfyui_workflow_templates/templates/image_qwen_image_instantx_inpainting_controlnet.json +1675 -850
  99. comfyui_workflow_templates/templates/image_qwen_image_union_control_lora.json +190 -189
  100. comfyui_workflow_templates/templates/image_to_video.json +64 -64
  101. comfyui_workflow_templates/templates/image_to_video_wan.json +163 -140
  102. comfyui_workflow_templates/templates/index.ar.json +2357 -0
  103. comfyui_workflow_templates/templates/index.es.json +182 -145
  104. comfyui_workflow_templates/templates/index.fr.json +348 -311
  105. comfyui_workflow_templates/templates/index.ja.json +229 -192
  106. comfyui_workflow_templates/templates/index.json +63 -26
  107. comfyui_workflow_templates/templates/index.ko.json +183 -146
  108. comfyui_workflow_templates/templates/index.ru.json +222 -185
  109. comfyui_workflow_templates/templates/index.tr.json +2357 -0
  110. comfyui_workflow_templates/templates/index.tr_translated.json +2357 -0
  111. comfyui_workflow_templates/templates/index.zh-TW.json +278 -241
  112. comfyui_workflow_templates/templates/index.zh.json +235 -198
  113. comfyui_workflow_templates/templates/inpaint_example.json +22 -19
  114. comfyui_workflow_templates/templates/inpaint_model_outpainting.json +23 -20
  115. comfyui_workflow_templates/templates/ltxv_image_to_video-1.webp +0 -0
  116. comfyui_workflow_templates/templates/ltxv_image_to_video.json +367 -337
  117. comfyui_workflow_templates/templates/mixing_controlnets.json +422 -373
  118. comfyui_workflow_templates/templates/sd3.5_large_blur.json +14 -14
  119. comfyui_workflow_templates/templates/sd3.5_large_canny_controlnet_example.json +615 -258
  120. comfyui_workflow_templates/templates/sd3.5_large_depth.json +1317 -210
  121. comfyui_workflow_templates/templates/sdxl_revision_text_prompts.json +619 -256
  122. comfyui_workflow_templates/templates/video_humo.json +194 -194
  123. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_1.3B.json +30 -30
  124. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_14B.json +97 -152
  125. comfyui_workflow_templates/templates/video_wan2_2_14B_animate.json +5 -5
  126. comfyui_workflow_templates/templates/video_wan2_2_14B_flf2v.json +121 -121
  127. comfyui_workflow_templates/templates/video_wan2_2_14B_fun_camera.json +67 -67
  128. comfyui_workflow_templates/templates/video_wan2_2_14B_fun_control.json +380 -358
  129. comfyui_workflow_templates/templates/video_wan2_2_14B_fun_inpaint.json +111 -111
  130. comfyui_workflow_templates/templates/video_wan2_2_14B_i2v.json +58 -58
  131. comfyui_workflow_templates/templates/video_wan2_2_14B_s2v.json +216 -216
  132. comfyui_workflow_templates/templates/video_wan2_2_5B_fun_control.json +222 -202
  133. comfyui_workflow_templates/templates/video_wan2_2_5B_fun_inpaint.json +27 -27
  134. comfyui_workflow_templates/templates/video_wan_ati.json +5 -5
  135. comfyui_workflow_templates/templates/video_wan_vace_14B_ref2v.json +100 -100
  136. comfyui_workflow_templates/templates/video_wan_vace_14B_v2v.json +87 -105
  137. comfyui_workflow_templates/templates/video_wan_vace_flf2v.json +149 -149
  138. comfyui_workflow_templates/templates/video_wan_vace_inpainting.json +235 -243
  139. comfyui_workflow_templates/templates/video_wan_vace_outpainting.json +67 -124
  140. comfyui_workflow_templates/templates/wan2.1_flf2v_720_f16.json +32 -32
  141. comfyui_workflow_templates/templates/wan2.1_fun_control.json +183 -138
  142. comfyui_workflow_templates/templates/wan2.1_fun_inp.json +62 -62
  143. {comfyui_workflow_templates-0.1.95.dist-info → comfyui_workflow_templates-0.1.97.dist-info}/METADATA +1 -1
  144. {comfyui_workflow_templates-0.1.95.dist-info → comfyui_workflow_templates-0.1.97.dist-info}/RECORD +147 -136
  145. comfyui_workflow_templates/templates/sdxl_revision_zero_positive-1.webp +0 -0
  146. comfyui_workflow_templates/templates/sdxl_revision_zero_positive.json +0 -496
  147. comfyui_workflow_templates/templates/stable_zero123_example-1.webp +0 -0
  148. comfyui_workflow_templates/templates/stable_zero123_example.json +0 -273
  149. {comfyui_workflow_templates-0.1.95.dist-info → comfyui_workflow_templates-0.1.97.dist-info}/WHEEL +0 -0
  150. {comfyui_workflow_templates-0.1.95.dist-info → comfyui_workflow_templates-0.1.97.dist-info}/licenses/LICENSE +0 -0
  151. {comfyui_workflow_templates-0.1.95.dist-info → comfyui_workflow_templates-0.1.97.dist-info}/top_level.txt +0 -0
@@ -3,8 +3,68 @@
3
3
  "moduleName": "default",
4
4
  "type": "image",
5
5
  "isEssential": true,
6
- "title": "Conceptos básicos",
6
+ "title": "Getting Started",
7
7
  "templates": [
8
+ {
9
+ "name": "01_qwen_t2i_subgraphed",
10
+ "title": "Qwen-Image Text to Image",
11
+ "mediaType": "image",
12
+ "mediaSubtype": "webp",
13
+ "description": "Generate images with exceptional multilingual text rendering and editing capabilities using Qwen-Image's 20B MMDiT model..",
14
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
15
+ "tags": ["Texto a imagen", "Imagen"],
16
+ "models": ["Qwen-Image"],
17
+ "date": "2025-10-17",
18
+ "size": 29.59
19
+ },
20
+ {
21
+ "name": "02_qwen_Image_edit_subgraphed",
22
+ "title": "Qwen Image Edit 2509",
23
+ "mediaType": "image",
24
+ "mediaSubtype": "webp",
25
+ "description": "Advanced image editing with multi-image support, improved consistency, and ControlNet integration.",
26
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
27
+ "tags": ["Imagen a imagen", "Edición de imagen", "ControlNet"],
28
+ "models": ["Qwen-Image"],
29
+ "date": "2025-10-17",
30
+ "size": 29.59
31
+ },
32
+ {
33
+ "name": "03_video_wan2_2_14B_i2v_subgraphed",
34
+ "title": "Wan 2.2 14B Image to Video",
35
+ "description": "Transform static images into dynamic videos with precise motion control and style preservation using Wan 2.2.",
36
+ "mediaType": "image",
37
+ "mediaSubtype": "webp",
38
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
39
+ "tags": ["Imagen a video", "Video"],
40
+ "models": ["Wan2.2", "Wan"],
41
+ "date": "2025-10-17",
42
+ "size": 35.42
43
+ },
44
+ {
45
+ "name": "04_hunyuan_3d_2.1_subgraphed",
46
+ "title": "Hunyuan3D 2.1: image to 3D",
47
+ "mediaType": "image",
48
+ "mediaSubtype": "webp",
49
+ "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
50
+ "tags": ["Imagen a 3D", "3D"],
51
+ "models": ["Hunyuan3D"],
52
+ "date": "2025-10-17",
53
+ "tutorialUrl": "https://docs.comfy.org/tutorials/3d/hunyuan3D-2",
54
+ "size": 4.59
55
+ },
56
+ {
57
+ "name": "05_audio_ace_step_1_t2a_song_subgraphed",
58
+ "title": "ACE Step v1 Text to Song",
59
+ "mediaType": "image",
60
+ "mediaSubtype": "webp",
61
+ "description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
62
+ "tags": ["Texto a audio", "Audio"],
63
+ "models": ["ACE-Step"],
64
+ "date": "2025-10-17",
65
+ "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
66
+ "size": 7.17
67
+ },
8
68
  {
9
69
  "name": "default",
10
70
  "title": "Generación de imágenes",
@@ -29,7 +89,8 @@
29
89
  "models": ["SD1.5", "Stability"],
30
90
  "date": "2025-03-01",
31
91
  "size": 1.99,
32
- "vram": 2.88
92
+ "vram": 2.88,
93
+ "thumbnailVariant": "hoverDissolve"
33
94
  },
34
95
  {
35
96
  "name": "lora",
@@ -51,7 +112,7 @@
51
112
  "mediaSubtype": "webp",
52
113
  "description": "Generar imágenes combinando múltiples modelos LoRA.",
53
114
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
54
- "tags": ["Texto a imagen", "Imagen", "LoRA"],
115
+ "tags": ["Texto a imagen", "Imagen"],
55
116
  "models": ["SD1.5", "Stability"],
56
117
  "date": "2025-03-01",
57
118
  "size": 2.27,
@@ -59,7 +120,7 @@
59
120
  },
60
121
  {
61
122
  "name": "inpaint_example",
62
- "title": "Inpaint",
123
+ "title": "Inpainting",
63
124
  "mediaType": "image",
64
125
  "mediaSubtype": "webp",
65
126
  "description": "Editar partes específicas de imágenes sin problemas.",
@@ -73,7 +134,7 @@
73
134
  },
74
135
  {
75
136
  "name": "inpaint_model_outpainting",
76
- "title": "Outpaint",
137
+ "title": "Outpainting",
77
138
  "mediaType": "image",
78
139
  "mediaSubtype": "webp",
79
140
  "description": "Extender imágenes más allá de sus límites originales.",
@@ -92,7 +153,7 @@
92
153
  "mediaSubtype": "webp",
93
154
  "description": "Generar imágenes usando inversión textual para estilos consistentes.",
94
155
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/",
95
- "tags": ["Incrustación", "Imagen"],
156
+ "tags": ["Texto a imagen", "Imagen"],
96
157
  "models": ["SD1.5", "Stability"],
97
158
  "date": "2025-03-01",
98
159
  "size": 4.86,
@@ -105,7 +166,7 @@
105
166
  "mediaSubtype": "webp",
106
167
  "description": "Generar imágenes con colocación precisa de objetos usando cuadros de texto.",
107
168
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/",
108
- "tags": ["Gligen", "Imagen"],
169
+ "tags": ["Imagen"],
109
170
  "models": ["SD1.5", "Stability"],
110
171
  "date": "2025-03-01",
111
172
  "size": 2.77,
@@ -117,7 +178,7 @@
117
178
  "mediaType": "image",
118
179
  "mediaSubtype": "webp",
119
180
  "description": "Generar imágenes controlando la composición con áreas definidas.",
120
- "tags": ["Composición de área", "Imagen"],
181
+ "tags": ["Texto a imagen", "Imagen"],
121
182
  "models": ["SD1.5", "Stability"],
122
183
  "date": "2025-03-01",
123
184
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/",
@@ -130,7 +191,7 @@
130
191
  "mediaType": "image",
131
192
  "mediaSubtype": "webp",
132
193
  "description": "Generar imágenes con colocación consistente de sujeto usando composición de área.",
133
- "tags": ["Composición de área", "Imagen"],
194
+ "tags": ["Texto a imagen", "Imagen"],
134
195
  "models": ["SD1.5", "Stability"],
135
196
  "date": "2025-03-01",
136
197
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition",
@@ -228,7 +289,7 @@
228
289
  "mediaSubtype": "webp",
229
290
  "description": "Generar imágenes guiadas por información de profundidad usando ControlNet.",
230
291
  "thumbnailVariant": "hoverDissolve",
231
- "tags": ["ControlNet", "Imagen"],
292
+ "tags": ["ControlNet", "Imagen", "Texto a imagen"],
232
293
  "models": ["SD1.5", "Stability"],
233
294
  "date": "2025-03-01",
234
295
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
@@ -242,7 +303,7 @@
242
303
  "mediaSubtype": "webp",
243
304
  "description": "Generar imágenes guiadas por información de profundidad usando adaptador T2I.",
244
305
  "thumbnailVariant": "hoverDissolve",
245
- "tags": ["Adaptador T2I", "Imagen"],
306
+ "tags": ["ControlNet", "Imagen", "Texto a imagen"],
246
307
  "models": ["SD1.5", "Stability"],
247
308
  "date": "2025-03-01",
248
309
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
@@ -256,7 +317,7 @@
256
317
  "mediaSubtype": "webp",
257
318
  "description": "Generar imágenes combinando múltiples modelos ControlNet.",
258
319
  "thumbnailVariant": "hoverDissolve",
259
- "tags": ["ControlNet", "Imagen"],
320
+ "tags": ["ControlNet", "Imagen", "Texto a imagen"],
260
321
  "models": ["SD1.5", "Stability"],
261
322
  "date": "2025-03-01",
262
323
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets",
@@ -270,7 +331,7 @@
270
331
  "type": "image",
271
332
  "category": "GENERATION TYPE",
272
333
  "icon": "icon-[lucide--image]",
273
- "title": "Imagen",
334
+ "title": "Image",
274
335
  "templates": [
275
336
  {
276
337
  "name": "image_qwen_image",
@@ -289,8 +350,8 @@
289
350
  "title": "Qwen-Image InstantX ControlNet",
290
351
  "mediaType": "image",
291
352
  "mediaSubtype": "webp",
292
- "description": "Generate images with Qwen-Image InstantX ControlNet, supporting canny, soft edge, depth, pose",
293
- "tags": ["Image to Image", "Image", "ControlNet"],
353
+ "description": "Genera imágenes con Qwen-Image InstantX ControlNet, compatible con canny, bordes suaves, profundidad y pose",
354
+ "tags": ["Imagen a imagen", "Imagen", "ControlNet"],
294
355
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
295
356
  "models": ["Qwen-Image"],
296
357
  "date": "2025-08-23",
@@ -298,12 +359,12 @@
298
359
  },
299
360
  {
300
361
  "name": "image_qwen_image_instantx_inpainting_controlnet",
301
- "title": "Qwen-Image InstantX Inpainting ControlNet",
362
+ "title": "Qwen-Image InstantX ControlNet de Inpainting",
302
363
  "mediaType": "image",
303
364
  "mediaSubtype": "webp",
304
365
  "thumbnailVariant": "compareSlider",
305
- "description": "Professional inpainting and image editing with Qwen-Image InstantX ControlNet. Supports object replacement, text modification, background changes, and outpainting.",
306
- "tags": ["Image to Image", "Image", "ControlNet", "Inpainting"],
366
+ "description": "Inpainting profesional y edición de imágenes con Qwen-Image InstantX ControlNet. Compatible con reemplazo de objetos, modificación de texto, cambios de fondo y outpainting.",
367
+ "tags": ["Imagen a imagen", "Imagen", "ControlNet", "Inpaint"],
307
368
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
308
369
  "models": ["Qwen-Image"],
309
370
  "date": "2025-09-12",
@@ -323,26 +384,26 @@
323
384
  },
324
385
  {
325
386
  "name": "image_qwen_image_controlnet_patch",
326
- "title": "Qwen-Image ControlNet model patch",
387
+ "title": "Parche de modelo Qwen-Image ControlNet",
327
388
  "mediaType": "image",
328
389
  "mediaSubtype": "webp",
329
390
  "thumbnailVariant": "compareSlider",
330
- "description": "Control image generation using Qwen-Image ControlNet models. Supports canny, depth, and inpainting controls through model patching.",
391
+ "description": "Controla la generación de imágenes usando modelos Qwen-Image ControlNet. Compatible con controles canny, profundidad e inpainting mediante parcheo de modelo.",
331
392
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
332
- "tags": ["Text to Image", "Image", "ControlNet"],
393
+ "tags": ["Texto a imagen", "Imagen", "ControlNet"],
333
394
  "models": ["Qwen-Image"],
334
395
  "date": "2025-08-24",
335
396
  "size": 31.7
336
397
  },
337
398
  {
338
399
  "name": "image_qwen_image_edit_2509",
339
- "title": "Qwen Image Edit 2509",
400
+ "title": "Qwen Edición de Imagen 2509",
340
401
  "mediaType": "image",
341
402
  "mediaSubtype": "webp",
342
403
  "thumbnailVariant": "compareSlider",
343
- "description": "Advanced image editing with multi-image support, improved consistency, and ControlNet integration.",
404
+ "description": "Edición avanzada de imágenes con soporte multi-imagen, consistencia mejorada e integración de ControlNet.",
344
405
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
345
- "tags": ["Image to Image", "Image Edit", "Multi-Image", "ControlNet"],
406
+ "tags": ["Imagen a imagen", "Edición de imagen", "ControlNet"],
346
407
  "models": ["Qwen-Image"],
347
408
  "date": "2025-09-25",
348
409
  "size": 29.59
@@ -376,11 +437,11 @@
376
437
  },
377
438
  {
378
439
  "name": "image_chroma1_radiance_text_to_image",
379
- "title": "Chroma1 Radiance text to image",
440
+ "title": "Chroma1 Radiance Texto a Imagen",
380
441
  "mediaType": "image",
381
442
  "mediaSubtype": "webp",
382
- "description": "Chroma1-Radiance works directly with image pixels instead of compressed latents, delivering higher quality images with reduced artifacts and distortion.",
383
- "tags": ["Text to Image", "Image"],
443
+ "description": "Chroma1-Radiance trabaja directamente con píxeles de imagen en lugar de latentes comprimidos, ofreciendo imágenes de mayor calidad con menos artefactos y distorsión.",
444
+ "tags": ["Texto a imagen", "Imagen"],
384
445
  "models": ["Chroma"],
385
446
  "date": "2025-09-18",
386
447
  "size": 22.0,
@@ -388,11 +449,11 @@
388
449
  },
389
450
  {
390
451
  "name": "image_netayume_lumina_t2i",
391
- "title": "NetaYume Lumina Text to Image",
452
+ "title": "NetaYume Lumina Texto a Imagen",
392
453
  "mediaType": "image",
393
454
  "mediaSubtype": "webp",
394
- "description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
395
- "tags": ["Text to Image", "Image", "Anime"],
455
+ "description": "Generación de imágenes de estilo anime de alta calidad con comprensión mejorada de personajes y texturas detalladas. Ajustado finamente desde Neta Lumina en el conjunto de datos Danbooru.",
456
+ "tags": ["Texto a imagen", "Imagen", "Anime"],
396
457
  "models": ["NetaYume Lumina"],
397
458
  "date": "2025-10-10",
398
459
  "size": 9.89
@@ -416,7 +477,7 @@
416
477
  "mediaSubtype": "webp",
417
478
  "thumbnailVariant": "compareSlider",
418
479
  "description": "Supports various tasks such as image inpainting, outpainting, and object removal",
419
- "tags": ["Inpainting", "Outpainting"],
480
+ "tags": ["Inpaint", "Outpaint"],
420
481
  "models": ["Flux"],
421
482
  "date": "2025-09-21",
422
483
  "size": 27.01,
@@ -442,7 +503,7 @@
442
503
  "thumbnailVariant": "hoverDissolve",
443
504
  "mediaType": "image",
444
505
  "mediaSubtype": "webp",
445
- "tags": ["Image to Image", "Image"],
506
+ "tags": ["Imagen a imagen", "Imagen"],
446
507
  "models": ["Flux"],
447
508
  "date": "2025-09-02",
448
509
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
@@ -469,7 +530,7 @@
469
530
  "mediaSubtype": "webp",
470
531
  "description": "Un modelo FLUX afinado que lleva el fotorrealismo al máximo",
471
532
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
472
- "tags": ["Texto a imagen", "Imagen", "Fotorrealismo"],
533
+ "tags": ["Texto a imagen", "Imagen"],
473
534
  "models": ["Flux"],
474
535
  "date": "2025-07-31",
475
536
  "size": 20.74,
@@ -547,7 +608,7 @@
547
608
  "description": "Generar imágenes guiadas por información de profundidad usando Flux LoRA.",
548
609
  "thumbnailVariant": "hoverDissolve",
549
610
  "tutorialUrl": "ttps://docs.comfy.org/tutorials/flux/flux-1-controlnet",
550
- "tags": ["Imagen a imagen", "ControlNet", "Imagen", "LoRA"],
611
+ "tags": ["Imagen a imagen", "ControlNet", "Imagen"],
551
612
  "models": ["Flux"],
552
613
  "date": "2025-03-01",
553
614
  "size": 32.98
@@ -559,7 +620,7 @@
559
620
  "mediaSubtype": "webp",
560
621
  "description": "Generar imágenes transfiriendo estilo de imágenes de referencia usando Flux Redux.",
561
622
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
562
- "tags": ["Imagen a imagen", "ControlNet", "Imagen", "LoRA"],
623
+ "tags": ["Imagen a imagen", "ControlNet", "Imagen"],
563
624
  "models": ["Flux"],
564
625
  "date": "2025-03-01",
565
626
  "size": 32.74
@@ -738,18 +799,6 @@
738
799
  "date": "2025-03-01",
739
800
  "size": 9.9
740
801
  },
741
- {
742
- "name": "sdxl_revision_zero_positive",
743
- "title": "Revisión cero positivo SDXL",
744
- "mediaType": "image",
745
- "mediaSubtype": "webp",
746
- "description": "Generar imágenes usando tanto indicaciones de texto como imágenes de referencia con Revisión SDXL.",
747
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
748
- "tags": ["Texto a imagen", "Imagen"],
749
- "models": ["SDXL", "Stability"],
750
- "date": "2025-03-01",
751
- "size": 9.9
752
- },
753
802
  {
754
803
  "name": "sdxlturbo_example",
755
804
  "title": "SDXL Turbo",
@@ -769,7 +818,7 @@
769
818
  "mediaSubtype": "webp",
770
819
  "thumbnailVariant": "compareSlider",
771
820
  "description": "Ejecutar Profundidad Lotus en ComfyUI para estimación monocromática eficiente sin entrenamiento previo con alta retención de detalles.",
772
- "tags": ["Profundidad", "Imagen"],
821
+ "tags": ["Imagen", "Texto a imagen"],
773
822
  "models": ["SD1.5"],
774
823
  "date": "2025-05-21",
775
824
  "size": 1.93
@@ -823,20 +872,20 @@
823
872
  },
824
873
  {
825
874
  "name": "video_wan2_2_14B_animate",
826
- "title": "Wan2.2 Animate, character animation and replacement",
827
- "description": "Unified character animation and replacement framework with precise motion and expression replication.",
875
+ "title": "Wan2.2 Animate animación y reemplazo de personajes",
876
+ "description": "Marco unificado de animación y reemplazo de personajes con replicación precisa de movimiento y expresión。",
828
877
  "mediaType": "image",
829
878
  "mediaSubtype": "webp",
830
879
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-animate",
831
- "tags": ["Video", "Image to Video"],
880
+ "tags": ["Video", "Imagen a video"],
832
881
  "models": ["Wan2.2", "Wan"],
833
882
  "date": "2025-09-22",
834
883
  "size": 25.535
835
884
  },
836
885
  {
837
886
  "name": "video_wan2_2_14B_s2v",
838
- "title": "Wan2.2-S2V Audio-Driven Video Generation",
839
- "description": "Transform static images and audio into dynamic videos with perfect synchronization and minute-level generation.",
887
+ "title": "Wan2.2-S2V Generación de Video Impulsada por Audio",
888
+ "description": "Transforma imágenes estáticas y audio en videos dinámicos con sincronización perfecta y generación de nivel por minuto.",
840
889
  "mediaType": "image",
841
890
  "mediaSubtype": "webp",
842
891
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-s2v",
@@ -847,8 +896,8 @@
847
896
  },
848
897
  {
849
898
  "name": "video_humo",
850
- "title": "HuMo Video Generation",
851
- "description": "Generate videos basic on audio, image, and text, keep the character's lip sync.",
899
+ "title": "HuMo Generación de Video",
900
+ "description": "Genera videos basados en audio, imagen y texto, manteniendo la sincronización labial del personaje.",
852
901
  "mediaType": "image",
853
902
  "mediaSubtype": "webp",
854
903
  "tags": ["Video"],
@@ -858,8 +907,8 @@
858
907
  },
859
908
  {
860
909
  "name": "video_wan2_2_14B_fun_inpaint",
861
- "title": "Wan 2.2 14B Fun Inp",
862
- "description": "Generate videos from start and end frames using Wan 2.2 Fun Inp.",
910
+ "title": "Wan 2.2 14B Fun Inpainting",
911
+ "description": "Genera videos a partir de fotogramas de inicio y fin usando Wan 2.2 Fun Inp.",
863
912
  "mediaType": "image",
864
913
  "mediaSubtype": "webp",
865
914
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-inp",
@@ -906,11 +955,11 @@
906
955
  },
907
956
  {
908
957
  "name": "video_wan2_2_5B_fun_inpaint",
909
- "title": "Wan 2.2 5B Fun Inpaint",
910
- "description": "Efficient video inpainting from start and end frames. 5B model delivers quick iterations for testing workflows.",
958
+ "title": "Wan 2.2 5B Fun Inpainting",
959
+ "description": "Inpainting de video eficiente desde fotogramas de inicio y fin. El modelo 5B ofrece iteraciones rápidas para probar flujos de trabajo.",
911
960
  "mediaType": "image",
912
961
  "mediaSubtype": "webp",
913
- "tags": ["Text to Video", "Video"],
962
+ "tags": ["Texto a video", "Video"],
914
963
  "models": ["Wan2.2", "Wan"],
915
964
  "date": "2025-07-29",
916
965
  "size": 16.9
@@ -918,10 +967,10 @@
918
967
  {
919
968
  "name": "video_wan2_2_5B_fun_control",
920
969
  "title": "Wan 2.2 5B Fun Control",
921
- "description": "Multi-condition video control with pose, depth, and edge guidance. Compact 5B size for experimental development.",
970
+ "description": "Control de video multicondición con guía de pose, profundidad y bordes. Tamaño compacto de 5B para desarrollo experimental.",
922
971
  "mediaType": "image",
923
972
  "mediaSubtype": "webp",
924
- "tags": ["Text to Video", "Video"],
973
+ "tags": ["Texto a video", "Video"],
925
974
  "models": ["Wan2.2", "Wan"],
926
975
  "date": "2025-07-29",
927
976
  "size": 16.9
@@ -945,7 +994,7 @@
945
994
  "mediaType": "image",
946
995
  "mediaSubtype": "webp",
947
996
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
948
- "tags": ["Referencia a video", "Video"],
997
+ "tags": ["Video", "Imagen a video"],
949
998
  "models": ["Wan2.1", "Wan"],
950
999
  "date": "2025-05-21",
951
1000
  "size": 53.79
@@ -971,7 +1020,7 @@
971
1020
  "mediaSubtype": "webp",
972
1021
  "thumbnailVariant": "compareSlider",
973
1022
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
974
- "tags": ["Outpainting", "Video"],
1023
+ "tags": ["Outpaint", "Video"],
975
1024
  "models": ["Wan2.1", "Wan"],
976
1025
  "date": "2025-05-21",
977
1026
  "size": 53.79
@@ -996,18 +1045,18 @@
996
1045
  "mediaSubtype": "webp",
997
1046
  "thumbnailVariant": "compareSlider",
998
1047
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
999
- "tags": ["Inpainting", "Video"],
1048
+ "tags": ["Inpaint", "Video"],
1000
1049
  "models": ["Wan2.1", "Wan"],
1001
1050
  "date": "2025-05-21",
1002
1051
  "size": 53.79
1003
1052
  },
1004
1053
  {
1005
1054
  "name": "video_wan2.1_alpha_t2v_14B",
1006
- "title": "Wan2.1 Alpha T2V",
1007
- "description": "Generate text-to-video with alpha channel support for transparent backgrounds and semi-transparent objects.",
1055
+ "title": "Wan2.1 Alpha Texto a Video",
1056
+ "description": "Genera videos desde texto con soporte de canal alfa para fondos transparentes y objetos semitransparentes.",
1008
1057
  "mediaType": "image",
1009
1058
  "mediaSubtype": "webp",
1010
- "tags": ["Text to Video", "Video"],
1059
+ "tags": ["Texto a video", "Video"],
1011
1060
  "models": ["Wan2.1", "Wan"],
1012
1061
  "date": "2025-10-06",
1013
1062
  "size": 20.95
@@ -1193,7 +1242,7 @@
1193
1242
  "templates": [
1194
1243
  {
1195
1244
  "name": "audio_stable_audio_example",
1196
- "title": "Stable Audio",
1245
+ "title": "Audio Estable",
1197
1246
  "mediaType": "audio",
1198
1247
  "mediaSubtype": "mp3",
1199
1248
  "description": "Generar audio a partir de indicaciones de texto usando Stable Audio.",
@@ -1209,7 +1258,7 @@
1209
1258
  "mediaType": "audio",
1210
1259
  "mediaSubtype": "mp3",
1211
1260
  "description": "Generar música instrumental a partir de indicaciones de texto usando ACE-Step v1.",
1212
- "tags": ["Texto a audio", "Audio", "Instrumentales"],
1261
+ "tags": ["Texto a audio", "Audio"],
1213
1262
  "models": ["ACE-Step"],
1214
1263
  "date": "2025-03-01",
1215
1264
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
@@ -1221,7 +1270,7 @@
1221
1270
  "mediaType": "audio",
1222
1271
  "mediaSubtype": "mp3",
1223
1272
  "description": "Generar canciones con voces a partir de indicaciones de texto usando ACE-Step v1, soportando personalización multilingüe y de estilo.",
1224
- "tags": ["Texto a audio", "Audio", "Canción"],
1273
+ "tags": ["Texto a audio", "Audio"],
1225
1274
  "models": ["ACE-Step"],
1226
1275
  "date": "2025-03-01",
1227
1276
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
@@ -1246,15 +1295,15 @@
1246
1295
  "type": "3d",
1247
1296
  "category": "GENERATION TYPE",
1248
1297
  "icon": "icon-[lucide--box]",
1249
- "title": "3D",
1298
+ "title": "3D Model",
1250
1299
  "templates": [
1251
1300
  {
1252
1301
  "name": "3d_hunyuan3d-v2.1",
1253
1302
  "title": "Hunyuan3D 2.1",
1254
1303
  "mediaType": "image",
1255
1304
  "mediaSubtype": "webp",
1256
- "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
1257
- "tags": ["Image to Model", "3D"],
1305
+ "description": "Genera modelos 3D a partir de imágenes individuales usando Hunyuan3D 2.0.",
1306
+ "tags": ["Imagen a 3D", "3D"],
1258
1307
  "models": ["Hunyuan3D"],
1259
1308
  "date": "2025-03-01",
1260
1309
  "tutorialUrl": "",
@@ -1266,7 +1315,7 @@
1266
1315
  "mediaType": "image",
1267
1316
  "mediaSubtype": "webp",
1268
1317
  "description": "Generar modelos 3D a partir de imágenes individuales usando Hunyuan3D 2.0.",
1269
- "tags": ["Imagen a modelo", "3D"],
1318
+ "tags": ["Imagen a 3D", "3D"],
1270
1319
  "models": ["Hunyuan3D"],
1271
1320
  "date": "2025-03-01",
1272
1321
  "tutorialUrl": "",
@@ -1274,11 +1323,11 @@
1274
1323
  },
1275
1324
  {
1276
1325
  "name": "3d_hunyuan3d_multiview_to_model",
1277
- "title": "Hunyuan3D 2.0 MV",
1326
+ "title": "Hunyuan3D 2.0 Multivista",
1278
1327
  "mediaType": "image",
1279
1328
  "mediaSubtype": "webp",
1280
1329
  "description": "Generar modelos 3D a partir de múltiples vistas usando Hunyuan3D 2.0 MV.",
1281
- "tags": ["Vista múltiple a modelo", "3D"],
1330
+ "tags": ["3D", "Imagen a 3D"],
1282
1331
  "models": ["Hunyuan3D"],
1283
1332
  "date": "2025-03-01",
1284
1333
  "tutorialUrl": "",
@@ -1287,28 +1336,16 @@
1287
1336
  },
1288
1337
  {
1289
1338
  "name": "3d_hunyuan3d_multiview_to_model_turbo",
1290
- "title": "Hunyuan3D 2.0 MV Turbo",
1339
+ "title": "Hunyuan3D 2.0 Multivista Turbo",
1291
1340
  "mediaType": "image",
1292
1341
  "mediaSubtype": "webp",
1293
1342
  "description": "Generar modelos 3D a partir de múltiples vistas usando Hunyuan3D 2.0 MV Turbo.",
1294
- "tags": ["Vista múltiple a modelo", "3D"],
1343
+ "tags": ["Imagen a 3D", "3D"],
1295
1344
  "models": ["Hunyuan3D"],
1296
1345
  "date": "2025-03-01",
1297
1346
  "tutorialUrl": "",
1298
1347
  "thumbnailVariant": "hoverDissolve",
1299
1348
  "size": 4.59
1300
- },
1301
- {
1302
- "name": "stable_zero123_example",
1303
- "title": "Stable Zero123",
1304
- "mediaType": "image",
1305
- "mediaSubtype": "webp",
1306
- "description": "Generar vistas 3D a partir de imágenes individuales usando Stable Zero123.",
1307
- "tags": ["Imagen a 3D", "3D"],
1308
- "models": ["Stable Zero123"],
1309
- "date": "2025-03-01",
1310
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/",
1311
- "size": 7.99
1312
1349
  }
1313
1350
  ]
1314
1351
  },
@@ -1317,15 +1354,15 @@
1317
1354
  "type": "image",
1318
1355
  "category": "CLOSED SOURCE MODELS",
1319
1356
  "icon": "icon-[lucide--hand-coins]",
1320
- "title": "API de imagen",
1357
+ "title": "Image API",
1321
1358
  "templates": [
1322
1359
  {
1323
1360
  "name": "api_bytedance_seedream4",
1324
1361
  "title": "ByteDance Seedream 4.0",
1325
- "description": "Multi-modal AI model for text-to-image and image editing. Generate 2K images in under 2 seconds with natural language control.",
1362
+ "description": "Modelo de IA multimodal para texto a imagen y edición de imágenes. Genera imágenes 2K en menos de 2 segundos con control en lenguaje natural.",
1326
1363
  "mediaType": "image",
1327
1364
  "mediaSubtype": "webp",
1328
- "tags": ["Image Edit", "Image", "API", "Text-to-Image"],
1365
+ "tags": ["Edición de imagen", "Imagen", "API", "Texto a imagen"],
1329
1366
  "models": ["Seedream 4.0", "ByteDance"],
1330
1367
  "date": "2025-09-11",
1331
1368
  "OpenSource": false,
@@ -1334,11 +1371,11 @@
1334
1371
  },
1335
1372
  {
1336
1373
  "name": "api_google_gemini_image",
1337
- "title": "Google Gemini Image",
1338
- "description": "Nano-banana (Gemini-2.5-Flash Image) - image editing with consistency.",
1374
+ "title": "Google Gemini Imagen",
1375
+ "description": "Nano-banana (Gemini-2.5-Flash Image) - edición de imágenes con consistencia.",
1339
1376
  "mediaType": "image",
1340
1377
  "mediaSubtype": "webp",
1341
- "tags": ["Image Edit", "Image", "API", "Text-to-Image"],
1378
+ "tags": ["Edición de imagen", "Imagen", "API", "Texto a imagen"],
1342
1379
  "models": ["Gemini-2.5-Flash", "nano-banana", "Google"],
1343
1380
  "date": "2025-08-27",
1344
1381
  "OpenSource": false,
@@ -1392,11 +1429,11 @@
1392
1429
  },
1393
1430
  {
1394
1431
  "name": "api_wan_text_to_image",
1395
- "title": "Wan2.5: Text to Image",
1396
- "description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
1432
+ "title": "Wan2.5: Texto a Imagen",
1433
+ "description": "Genera imágenes con excelente seguimiento de prompts y calidad visual usando FLUX.1 Pro.",
1397
1434
  "mediaType": "image",
1398
1435
  "mediaSubtype": "webp",
1399
- "tags": ["Text to Image", "Image", "API"],
1436
+ "tags": ["Texto a imagen", "Imagen", "API"],
1400
1437
  "models": ["Wan2.5"],
1401
1438
  "date": "2025-09-25",
1402
1439
  "OpenSource": false,
@@ -1438,7 +1475,7 @@
1438
1475
  "mediaType": "image",
1439
1476
  "mediaSubtype": "webp",
1440
1477
  "thumbnailVariant": "compareSlider",
1441
- "tags": ["Texto a imagen", "Imagen", "API", "Transferencia de estilo"],
1478
+ "tags": ["Texto a imagen", "Imagen", "API"],
1442
1479
  "models": ["Luma"],
1443
1480
  "date": "2025-03-01",
1444
1481
  "OpenSource": false,
@@ -1451,7 +1488,7 @@
1451
1488
  "description": "Generar imágenes con paletas de colores personalizadas y visuales específicos de marca usando Recraft.",
1452
1489
  "mediaType": "image",
1453
1490
  "mediaSubtype": "webp",
1454
- "tags": ["Texto a imagen", "Imagen", "API", "Control de color"],
1491
+ "tags": ["Texto a imagen", "Imagen", "API"],
1455
1492
  "models": ["Recraft"],
1456
1493
  "date": "2025-03-01",
1457
1494
  "OpenSource": false,
@@ -1464,7 +1501,7 @@
1464
1501
  "description": "Controlar estilo con ejemplos visuales, alinear posicionamiento y ajustar objetos finamente. Almacenar y compartir estilos para consistencia perfecta de marca.",
1465
1502
  "mediaType": "image",
1466
1503
  "mediaSubtype": "webp",
1467
- "tags": ["Texto a imagen", "Imagen", "API", "Control de estilo"],
1504
+ "tags": ["Texto a imagen", "Imagen", "API"],
1468
1505
  "models": ["Recraft"],
1469
1506
  "date": "2025-03-01",
1470
1507
  "OpenSource": false,
@@ -1504,7 +1541,7 @@
1504
1541
  "mediaType": "image",
1505
1542
  "thumbnailVariant": "compareSlider",
1506
1543
  "mediaSubtype": "webp",
1507
- "tags": ["Imagen a imagen", "Imagen", "API", "Transferencia de estilo"],
1544
+ "tags": ["Imagen a imagen", "Imagen", "API"],
1508
1545
  "models": ["Runway"],
1509
1546
  "date": "2025-03-01",
1510
1547
  "OpenSource": false,
@@ -1571,7 +1608,7 @@
1571
1608
  "description": "Generar imágenes de calidad profesional con excelente alineación de indicaciones, fotorrealismo y renderizado de texto usando Ideogram V3.",
1572
1609
  "mediaType": "image",
1573
1610
  "mediaSubtype": "webp",
1574
- "tags": ["Texto a imagen", "Imagen", "API", "Renderizado de texto"],
1611
+ "tags": ["Texto a imagen", "Imagen", "API"],
1575
1612
  "models": ["Ideogram"],
1576
1613
  "date": "2025-03-01",
1577
1614
  "OpenSource": false,
@@ -1629,7 +1666,7 @@
1629
1666
  "mediaType": "image",
1630
1667
  "mediaSubtype": "webp",
1631
1668
  "thumbnailVariant": "compareSlider",
1632
- "tags": ["Texto a imagen", "Imagen", "API", "Entrada múltiple"],
1669
+ "tags": ["Texto a imagen", "Imagen", "API"],
1633
1670
  "models": ["GPT-Image-1"],
1634
1671
  "date": "2025-03-01",
1635
1672
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
@@ -1687,15 +1724,15 @@
1687
1724
  "type": "video",
1688
1725
  "category": "CLOSED SOURCE MODELS",
1689
1726
  "icon": "icon-[lucide--film]",
1690
- "title": "API de video",
1727
+ "title": "Video API",
1691
1728
  "templates": [
1692
1729
  {
1693
1730
  "name": "api_openai_sora_video",
1694
- "title": "Sora 2: Text & Image to Video",
1695
- "description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
1731
+ "title": "Sora 2: Texto e Imagen a Video",
1732
+ "description": "Generación de video Sora-2 y Sora-2 Pro de OpenAI con audio sincronizado.",
1696
1733
  "mediaType": "image",
1697
1734
  "mediaSubtype": "webp",
1698
- "tags": ["Image to Video", "Text to Video", "API"],
1735
+ "tags": ["Imagen a video", "Texto a video", "API"],
1699
1736
  "models": ["OpenAI"],
1700
1737
  "date": "2025-10-08",
1701
1738
  "OpenSource": false,
@@ -1704,11 +1741,11 @@
1704
1741
  },
1705
1742
  {
1706
1743
  "name": "api_wan_text_to_video",
1707
- "title": "Wan2.5: Text to Video",
1708
- "description": "Generate videos with synchronized audio, enhanced motion, and superior quality.",
1744
+ "title": "Wan2.5: Texto a Video",
1745
+ "description": "Genera videos con audio sincronizado, movimiento mejorado y calidad superior.",
1709
1746
  "mediaType": "image",
1710
1747
  "mediaSubtype": "webp",
1711
- "tags": ["Image to Video", "Video", "API"],
1748
+ "tags": ["Imagen a video", "Video", "API"],
1712
1749
  "models": ["Wan2.5"],
1713
1750
  "date": "2025-09-27",
1714
1751
  "tutorialUrl": "",
@@ -1718,11 +1755,11 @@
1718
1755
  },
1719
1756
  {
1720
1757
  "name": "api_wan_image_to_video",
1721
- "title": "Wan2.5: Image to Video",
1722
- "description": "Transform images into videos with synchronized audio, enhanced motion, and superior quality.",
1758
+ "title": "Wan2.5: Imagen a Video",
1759
+ "description": "Transforma imágenes en videos con audio sincronizado, movimiento mejorado y calidad superior.",
1723
1760
  "mediaType": "image",
1724
1761
  "mediaSubtype": "webp",
1725
- "tags": ["Image to Video", "Video", "API"],
1762
+ "tags": ["Imagen a video", "Video", "API"],
1726
1763
  "models": ["Wan2.5"],
1727
1764
  "date": "2025-09-27",
1728
1765
  "tutorialUrl": "",
@@ -1750,7 +1787,7 @@
1750
1787
  "description": "Generar videos dinámicos aplicando efectos visuales a imágenes usando Kling.",
1751
1788
  "mediaType": "image",
1752
1789
  "mediaSubtype": "webp",
1753
- "tags": ["Efectos de video", "Video", "API"],
1790
+ "tags": ["Video", "API"],
1754
1791
  "models": ["Kling"],
1755
1792
  "date": "2025-03-01",
1756
1793
  "tutorialUrl": "",
@@ -1764,7 +1801,7 @@
1764
1801
  "description": "Generar videos controlando el primer y último fotograma.",
1765
1802
  "mediaType": "image",
1766
1803
  "mediaSubtype": "webp",
1767
- "tags": ["Generación de video", "Video", "API", "Control de fotograma"],
1804
+ "tags": ["Video", "API", "FLF2V"],
1768
1805
  "models": ["Kling"],
1769
1806
  "date": "2025-03-01",
1770
1807
  "tutorialUrl": "",
@@ -1806,7 +1843,7 @@
1806
1843
  "description": "Generar videos con sujetos consistentes usando múltiples imágenes de referencia (hasta 7) para continuidad de personaje y estilo a lo largo de la secuencia de video.",
1807
1844
  "mediaType": "image",
1808
1845
  "mediaSubtype": "webp",
1809
- "tags": ["Referencia a video", "Video", "API"],
1846
+ "tags": ["Video", "Imagen a video", "API"],
1810
1847
  "models": ["Vidu"],
1811
1848
  "date": "2025-08-23",
1812
1849
  "tutorialUrl": "",
@@ -1820,7 +1857,7 @@
1820
1857
  "description": "Crear transiciones de video suaves entre fotogramas de inicio y fin definidos con interpolación natural de movimiento y calidad visual consistente.",
1821
1858
  "mediaType": "image",
1822
1859
  "mediaSubtype": "webp",
1823
- "tags": ["FLF2V", "Video", "API"],
1860
+ "tags": ["Video", "API", "FLF2V"],
1824
1861
  "models": ["Vidu"],
1825
1862
  "date": "2025-08-23",
1826
1863
  "tutorialUrl": "",
@@ -1830,11 +1867,11 @@
1830
1867
  },
1831
1868
  {
1832
1869
  "name": "api_bytedance_text_to_video",
1833
- "title": "ByteDance: Text to Video",
1834
- "description": "Generate high-quality videos directly from text prompts using ByteDance's Seedance model. Supports multiple resolutions and aspect ratios with natural motion and cinematic quality.",
1870
+ "title": "ByteDance: Texto a Video",
1871
+ "description": "Genera videos de alta calidad directamente desde prompts de texto usando el modelo Seedance de ByteDance. Compatible con múltiples resoluciones y relaciones de aspecto con movimiento natural y calidad cinematográfica.",
1835
1872
  "mediaType": "image",
1836
1873
  "mediaSubtype": "webp",
1837
- "tags": ["Video", "API", "Text to Video"],
1874
+ "tags": ["Video", "API", "Texto a video"],
1838
1875
  "models": ["ByteDance"],
1839
1876
  "date": "2025-10-6",
1840
1877
  "tutorialUrl": "",
@@ -1844,11 +1881,11 @@
1844
1881
  },
1845
1882
  {
1846
1883
  "name": "api_bytedance_image_to_video",
1847
- "title": "ByteDance: Image to Video",
1848
- "description": "Transform static images into dynamic videos using ByteDance's Seedance model. Analyzes image structure and generates natural motion with consistent visual style and coherent video sequences.",
1884
+ "title": "ByteDance: Imagen a Video",
1885
+ "description": "Transforma imágenes estáticas en videos dinámicos usando el modelo Seedance de ByteDance. Analiza la estructura de la imagen y genera movimiento natural con estilo visual consistente y secuencias de video coherentes.",
1849
1886
  "mediaType": "image",
1850
1887
  "mediaSubtype": "webp",
1851
- "tags": ["Video", "API", "Image to Video"],
1888
+ "tags": ["Video", "API", "Imagen a video"],
1852
1889
  "models": ["ByteDance"],
1853
1890
  "date": "2025-10-6",
1854
1891
  "tutorialUrl": "",
@@ -1858,8 +1895,8 @@
1858
1895
  },
1859
1896
  {
1860
1897
  "name": "api_bytedance_flf2v",
1861
- "title": "ByteDance: Start End to Video",
1862
- "description": "Generate cinematic video transitions between start and end frames with fluid motion, scene consistency, and professional polish using ByteDance's Seedance model.",
1898
+ "title": "ByteDance: Inicio-Fin a Video",
1899
+ "description": "Genera transiciones de video cinematográficas entre fotogramas de inicio y fin con movimiento fluido, consistencia de escena y acabado profesional usando el modelo Seedance de ByteDance.",
1863
1900
  "mediaType": "image",
1864
1901
  "mediaSubtype": "webp",
1865
1902
  "tags": ["Video", "API", "FLF2V"],
@@ -1933,7 +1970,7 @@
1933
1970
  "mediaType": "image",
1934
1971
  "thumbnailVariant": "hoverDissolve",
1935
1972
  "mediaSubtype": "webp",
1936
- "tags": ["Video a video", "Video", "API", "Transferencia de movimiento"],
1973
+ "tags": ["Video a video", "Video", "API"],
1937
1974
  "models": ["Moonvalley"],
1938
1975
  "date": "2025-03-01",
1939
1976
  "tutorialUrl": "",
@@ -1948,7 +1985,7 @@
1948
1985
  "mediaType": "image",
1949
1986
  "thumbnailVariant": "hoverDissolve",
1950
1987
  "mediaSubtype": "webp",
1951
- "tags": ["Video a video", "Video", "API", "Control de pose"],
1988
+ "tags": ["Video a video", "Video", "API"],
1952
1989
  "models": ["Moonvalley"],
1953
1990
  "date": "2025-03-01",
1954
1991
  "tutorialUrl": "",
@@ -2018,7 +2055,7 @@
2018
2055
  "description": "Generar videos dinámicos a partir de imágenes estáticas con movimiento y efectos usando PixVerse.",
2019
2056
  "mediaType": "image",
2020
2057
  "mediaSubtype": "webp",
2021
- "tags": ["Imagen a video", "Video", "API", "Plantillas"],
2058
+ "tags": ["Imagen a video", "Video", "API"],
2022
2059
  "models": ["PixVerse"],
2023
2060
  "date": "2025-03-01",
2024
2061
  "tutorialUrl": "",
@@ -2074,7 +2111,7 @@
2074
2111
  "description": "Generar transiciones de video suaves entre dos fotogramas clave con precisión de Runway.",
2075
2112
  "mediaType": "image",
2076
2113
  "mediaSubtype": "webp",
2077
- "tags": ["Generación de video", "Video", "API", "Control de fotograma"],
2114
+ "tags": ["Video", "API", "FLF2V"],
2078
2115
  "models": ["Runway"],
2079
2116
  "date": "2025-03-01",
2080
2117
  "tutorialUrl": "",
@@ -2102,7 +2139,7 @@
2102
2139
  "description": "Generar videos que incorporen múltiples imágenes de entrada usando Pika Scenes.",
2103
2140
  "mediaType": "image",
2104
2141
  "mediaSubtype": "webp",
2105
- "tags": ["Imagen a video", "Video", "API", "Imagen múltiple"],
2142
+ "tags": ["Imagen a video", "Video", "API"],
2106
2143
  "models": ["Pika"],
2107
2144
  "date": "2025-03-01",
2108
2145
  "tutorialUrl": "",
@@ -2145,15 +2182,15 @@
2145
2182
  "type": "image",
2146
2183
  "category": "CLOSED SOURCE MODELS",
2147
2184
  "icon": "icon-[lucide--box]",
2148
- "title": "API 3D",
2185
+ "title": "3D API",
2149
2186
  "templates": [
2150
2187
  {
2151
2188
  "name": "api_rodin_gen2",
2152
- "title": "Rodin: Gen-2 Image to Model",
2153
- "description": "Generate detailed 4X mesh quality 3D models from photos using Rodin Gen2",
2189
+ "title": "Rodin: Gen-2 Imagen a Modelo",
2190
+ "description": "Genera modelos 3D detallados con 4X de calidad de malla a partir de fotos usando Rodin Gen2",
2154
2191
  "mediaType": "image",
2155
2192
  "mediaSubtype": "webp",
2156
- "tags": ["Image to Model", "3D", "API"],
2193
+ "tags": ["Imagen a 3D", "3D", "API"],
2157
2194
  "models": ["Rodin"],
2158
2195
  "date": "2025-09-27",
2159
2196
  "tutorialUrl": "",
@@ -2168,7 +2205,7 @@
2168
2205
  "mediaType": "image",
2169
2206
  "thumbnailVariant": "compareSlider",
2170
2207
  "mediaSubtype": "webp",
2171
- "tags": ["Imagen a modelo", "3D", "API"],
2208
+ "tags": ["Imagen a 3D", "3D", "API"],
2172
2209
  "models": ["Rodin"],
2173
2210
  "date": "2025-03-01",
2174
2211
  "tutorialUrl": "",
@@ -2183,7 +2220,7 @@
2183
2220
  "mediaType": "image",
2184
2221
  "thumbnailVariant": "compareSlider",
2185
2222
  "mediaSubtype": "webp",
2186
- "tags": ["Vista múltiple a modelo", "3D", "API"],
2223
+ "tags": ["Imagen a 3D", "3D", "API"],
2187
2224
  "models": ["Rodin"],
2188
2225
  "date": "2025-03-01",
2189
2226
  "tutorialUrl": "",
@@ -2212,7 +2249,7 @@
2212
2249
  "mediaType": "image",
2213
2250
  "thumbnailVariant": "compareSlider",
2214
2251
  "mediaSubtype": "webp",
2215
- "tags": ["Imagen a modelo", "3D", "API"],
2252
+ "tags": ["Imagen a 3D", "3D", "API"],
2216
2253
  "models": ["Tripo"],
2217
2254
  "date": "2025-03-01",
2218
2255
  "tutorialUrl": "",
@@ -2227,7 +2264,7 @@
2227
2264
  "mediaType": "image",
2228
2265
  "thumbnailVariant": "compareSlider",
2229
2266
  "mediaSubtype": "webp",
2230
- "tags": ["Vista múltiple a modelo", "3D", "API"],
2267
+ "tags": ["Imagen a 3D", "3D", "API"],
2231
2268
  "models": ["Tripo"],
2232
2269
  "date": "2025-03-01",
2233
2270
  "tutorialUrl": "",
@@ -2242,7 +2279,7 @@
2242
2279
  "type": "audio",
2243
2280
  "category": "CLOSED SOURCE MODELS",
2244
2281
  "icon": "icon-[lucide--volume-2]",
2245
- "title": "API de Audio",
2282
+ "title": "Audio API",
2246
2283
  "templates": [
2247
2284
  {
2248
2285
  "name": "api_stability_ai_text_to_audio",
@@ -2290,7 +2327,7 @@
2290
2327
  "type": "image",
2291
2328
  "category": "CLOSED SOURCE MODELS",
2292
2329
  "icon": "icon-[lucide--message-square-text]",
2293
- "title": "API LLM",
2330
+ "title": "LLM API",
2294
2331
  "templates": [
2295
2332
  {
2296
2333
  "name": "api_openai_chat",
@@ -2298,7 +2335,7 @@
2298
2335
  "description": "Interactuar con los modelos de lenguaje avanzados de OpenAI para conversaciones inteligentes.",
2299
2336
  "mediaType": "image",
2300
2337
  "mediaSubtype": "webp",
2301
- "tags": ["Chat", "LLM", "API"],
2338
+ "tags": ["LLM", "API"],
2302
2339
  "models": ["OpenAI"],
2303
2340
  "date": "2025-03-01",
2304
2341
  "tutorialUrl": "",
@@ -2312,7 +2349,7 @@
2312
2349
  "description": "Experimentar la IA multimodal de Google con las capacidades de razonamiento de Gemini.",
2313
2350
  "mediaType": "image",
2314
2351
  "mediaSubtype": "webp",
2315
- "tags": ["Chat", "LLM", "API"],
2352
+ "tags": ["LLM", "API"],
2316
2353
  "models": ["Google Gemini", "Google"],
2317
2354
  "date": "2025-03-01",
2318
2355
  "tutorialUrl": "",