comfyui-workflow-templates 0.1.96__py3-none-any.whl → 0.1.97__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of comfyui-workflow-templates might be problematic. Click here for more details.

Files changed (29) hide show
  1. comfyui_workflow_templates/templates/01_qwen_t2i_subgraphed-1.webp +0 -0
  2. comfyui_workflow_templates/templates/01_qwen_t2i_subgraphed.json +1288 -0
  3. comfyui_workflow_templates/templates/02_qwen_Image_edit_subgraphed-1.webp +0 -0
  4. comfyui_workflow_templates/templates/02_qwen_Image_edit_subgraphed.json +1754 -0
  5. comfyui_workflow_templates/templates/03_video_wan2_2_14B_i2v_subgraphed-1.webp +0 -0
  6. comfyui_workflow_templates/templates/03_video_wan2_2_14B_i2v_subgraphed.json +1416 -0
  7. comfyui_workflow_templates/templates/04_hunyuan_3d_2.1_subgraphed-1.webp +0 -0
  8. comfyui_workflow_templates/templates/04_hunyuan_3d_2.1_subgraphed.json +850 -0
  9. comfyui_workflow_templates/templates/05_audio_ace_step_1_t2a_song_subgraphed-1.webp +0 -0
  10. comfyui_workflow_templates/templates/05_audio_ace_step_1_t2a_song_subgraphed.json +1014 -0
  11. comfyui_workflow_templates/templates/api_rodin_gen2.json +123 -125
  12. comfyui_workflow_templates/templates/image_qwen_image_instantx_inpainting_controlnet.json +1536 -715
  13. comfyui_workflow_templates/templates/index.ar.json +2357 -0
  14. comfyui_workflow_templates/templates/index.es.json +180 -120
  15. comfyui_workflow_templates/templates/index.fr.json +346 -286
  16. comfyui_workflow_templates/templates/index.ja.json +227 -167
  17. comfyui_workflow_templates/templates/index.json +62 -2
  18. comfyui_workflow_templates/templates/index.ko.json +181 -121
  19. comfyui_workflow_templates/templates/index.ru.json +220 -160
  20. comfyui_workflow_templates/templates/index.tr.json +2357 -0
  21. comfyui_workflow_templates/templates/index.tr_translated.json +2357 -0
  22. comfyui_workflow_templates/templates/index.zh-TW.json +276 -216
  23. comfyui_workflow_templates/templates/index.zh.json +233 -173
  24. comfyui_workflow_templates/templates/video_wan_vace_inpainting.json +1 -1
  25. {comfyui_workflow_templates-0.1.96.dist-info → comfyui_workflow_templates-0.1.97.dist-info}/METADATA +1 -1
  26. {comfyui_workflow_templates-0.1.96.dist-info → comfyui_workflow_templates-0.1.97.dist-info}/RECORD +29 -16
  27. {comfyui_workflow_templates-0.1.96.dist-info → comfyui_workflow_templates-0.1.97.dist-info}/WHEEL +0 -0
  28. {comfyui_workflow_templates-0.1.96.dist-info → comfyui_workflow_templates-0.1.97.dist-info}/licenses/LICENSE +0 -0
  29. {comfyui_workflow_templates-0.1.96.dist-info → comfyui_workflow_templates-0.1.97.dist-info}/top_level.txt +0 -0
@@ -3,8 +3,68 @@
3
3
  "moduleName": "default",
4
4
  "type": "image",
5
5
  "isEssential": true,
6
- "title": "基礎",
6
+ "title": "Getting Started",
7
7
  "templates": [
8
+ {
9
+ "name": "01_qwen_t2i_subgraphed",
10
+ "title": "Qwen-Image Text to Image",
11
+ "mediaType": "image",
12
+ "mediaSubtype": "webp",
13
+ "description": "Generate images with exceptional multilingual text rendering and editing capabilities using Qwen-Image's 20B MMDiT model..",
14
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
15
+ "tags": ["文生圖", "圖像"],
16
+ "models": ["Qwen-Image"],
17
+ "date": "2025-10-17",
18
+ "size": 29.59
19
+ },
20
+ {
21
+ "name": "02_qwen_Image_edit_subgraphed",
22
+ "title": "Qwen Image Edit 2509",
23
+ "mediaType": "image",
24
+ "mediaSubtype": "webp",
25
+ "description": "Advanced image editing with multi-image support, improved consistency, and ControlNet integration.",
26
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
27
+ "tags": ["圖生圖", "圖像編輯", "ControlNet"],
28
+ "models": ["Qwen-Image"],
29
+ "date": "2025-10-17",
30
+ "size": 29.59
31
+ },
32
+ {
33
+ "name": "03_video_wan2_2_14B_i2v_subgraphed",
34
+ "title": "Wan 2.2 14B Image to Video",
35
+ "description": "Transform static images into dynamic videos with precise motion control and style preservation using Wan 2.2.",
36
+ "mediaType": "image",
37
+ "mediaSubtype": "webp",
38
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
39
+ "tags": ["圖生視頻", "視頻"],
40
+ "models": ["Wan2.2", "Wan"],
41
+ "date": "2025-10-17",
42
+ "size": 35.42
43
+ },
44
+ {
45
+ "name": "04_hunyuan_3d_2.1_subgraphed",
46
+ "title": "Hunyuan3D 2.1: image to 3D",
47
+ "mediaType": "image",
48
+ "mediaSubtype": "webp",
49
+ "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
50
+ "tags": ["圖像轉3D", "3D"],
51
+ "models": ["Hunyuan3D"],
52
+ "date": "2025-10-17",
53
+ "tutorialUrl": "https://docs.comfy.org/tutorials/3d/hunyuan3D-2",
54
+ "size": 4.59
55
+ },
56
+ {
57
+ "name": "05_audio_ace_step_1_t2a_song_subgraphed",
58
+ "title": "ACE Step v1 Text to Song",
59
+ "mediaType": "image",
60
+ "mediaSubtype": "webp",
61
+ "description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
62
+ "tags": ["文本轉音頻", "音頻"],
63
+ "models": ["ACE-Step"],
64
+ "date": "2025-10-17",
65
+ "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
66
+ "size": 7.17
67
+ },
8
68
  {
9
69
  "name": "default",
10
70
  "title": "影像生成",
@@ -12,7 +72,7 @@
12
72
  "mediaSubtype": "webp",
13
73
  "description": "透過文字提示生成影像。",
14
74
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
15
- "tags": ["文字到影像", "影像"],
75
+ "tags": ["文生圖", "圖像"],
16
76
  "models": ["SD1.5", "Stability"],
17
77
  "date": "2025-03-01",
18
78
  "size": 1.99,
@@ -25,7 +85,7 @@
25
85
  "mediaSubtype": "webp",
26
86
  "description": "使用文字提示轉換現有影像。",
27
87
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/image-to-image",
28
- "tags": ["影像到影像", "影像"],
88
+ "tags": ["圖生圖", "圖像"],
29
89
  "models": ["SD1.5", "Stability"],
30
90
  "date": "2025-03-01",
31
91
  "size": 1.99,
@@ -39,7 +99,7 @@
39
99
  "mediaSubtype": "webp",
40
100
  "description": "使用 LoRA 模型為特定風格或主題生成影像。",
41
101
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
42
- "tags": ["文字到影像", "影像"],
102
+ "tags": ["文生圖", "圖像"],
43
103
  "models": ["SD1.5", "Stability"],
44
104
  "date": "2025-03-01",
45
105
  "size": 2.27,
@@ -52,7 +112,7 @@
52
112
  "mediaSubtype": "webp",
53
113
  "description": "結合多個 LoRA 模型生成影像。",
54
114
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
55
- "tags": ["文字到影像", "影像", "LoRA"],
115
+ "tags": ["文生圖", "圖像"],
56
116
  "models": ["SD1.5", "Stability"],
57
117
  "date": "2025-03-01",
58
118
  "size": 2.27,
@@ -66,7 +126,7 @@
66
126
  "description": "無縫編輯影像的特定區域。",
67
127
  "thumbnailVariant": "compareSlider",
68
128
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
69
- "tags": ["修復", "影像"],
129
+ "tags": ["修復", "圖像"],
70
130
  "models": ["SD1.5", "Stability"],
71
131
  "date": "2025-03-01",
72
132
  "size": 4.86,
@@ -80,7 +140,7 @@
80
140
  "description": "將影像擴展至原始邊界之外。",
81
141
  "thumbnailVariant": "compareSlider",
82
142
  "tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
83
- "tags": ["外延", "影像"],
143
+ "tags": ["外繪", "圖像"],
84
144
  "models": ["SD1.5", "Stability"],
85
145
  "date": "2025-03-01",
86
146
  "size": 4.86,
@@ -93,7 +153,7 @@
93
153
  "mediaSubtype": "webp",
94
154
  "description": "使用文字反轉生成一致風格的影像。",
95
155
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/",
96
- "tags": ["嵌入", "影像"],
156
+ "tags": ["文生圖", "圖像"],
97
157
  "models": ["SD1.5", "Stability"],
98
158
  "date": "2025-03-01",
99
159
  "size": 4.86,
@@ -106,7 +166,7 @@
106
166
  "mediaSubtype": "webp",
107
167
  "description": "使用文字框精確放置物件來生成影像。",
108
168
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/",
109
- "tags": ["Gligen", "影像"],
169
+ "tags": ["圖像"],
110
170
  "models": ["SD1.5", "Stability"],
111
171
  "date": "2025-03-01",
112
172
  "size": 2.77,
@@ -118,7 +178,7 @@
118
178
  "mediaType": "image",
119
179
  "mediaSubtype": "webp",
120
180
  "description": "透過控制定義區域的構圖來生成影像。",
121
- "tags": ["區域構圖", "影像"],
181
+ "tags": ["文生圖", "圖像"],
122
182
  "models": ["SD1.5", "Stability"],
123
183
  "date": "2025-03-01",
124
184
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/",
@@ -131,7 +191,7 @@
131
191
  "mediaType": "image",
132
192
  "mediaSubtype": "webp",
133
193
  "description": "使用區域構圖生成具有一致主體放置的影像。",
134
- "tags": ["區域構圖", "影像"],
194
+ "tags": ["文生圖", "圖像"],
135
195
  "models": ["SD1.5", "Stability"],
136
196
  "date": "2025-03-01",
137
197
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition",
@@ -145,7 +205,7 @@
145
205
  "mediaSubtype": "webp",
146
206
  "description": "透過在潛在空間中增強品質來進行影像超解析度。",
147
207
  "thumbnailVariant": "compareSlider",
148
- "tags": ["超解析度", "影像"],
208
+ "tags": ["超分辨率", "圖像"],
149
209
  "models": ["SD1.5", "Stability"],
150
210
  "date": "2025-03-01",
151
211
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/",
@@ -159,7 +219,7 @@
159
219
  "mediaSubtype": "webp",
160
220
  "description": "使用 ESRGAN 模型進行影像超解析度以提升品質。",
161
221
  "thumbnailVariant": "compareSlider",
162
- "tags": ["超解析度", "影像"],
222
+ "tags": ["超分辨率", "圖像"],
163
223
  "models": ["SD1.5", "Stability"],
164
224
  "date": "2025-03-01",
165
225
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/",
@@ -173,7 +233,7 @@
173
233
  "mediaSubtype": "webp",
174
234
  "description": "在中間生成步驟中使用 ESRGAN 模型進行影像超解析度。",
175
235
  "thumbnailVariant": "compareSlider",
176
- "tags": ["超解析度", "影像"],
236
+ "tags": ["超分辨率", "圖像"],
177
237
  "models": ["SD1.5", "Stability"],
178
238
  "date": "2025-03-01",
179
239
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling",
@@ -187,7 +247,7 @@
187
247
  "mediaSubtype": "webp",
188
248
  "description": "在生成過程中變更提示來進行影像超解析度。",
189
249
  "thumbnailVariant": "zoomHover",
190
- "tags": ["超解析度", "影像"],
250
+ "tags": ["超分辨率", "圖像"],
191
251
  "models": ["SD1.5", "Stability"],
192
252
  "date": "2025-03-01",
193
253
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples",
@@ -201,7 +261,7 @@
201
261
  "mediaSubtype": "webp",
202
262
  "description": "使用 ControlNet 透過塗鴉參考影像引導生成影像。",
203
263
  "thumbnailVariant": "hoverDissolve",
204
- "tags": ["ControlNet", "影像"],
264
+ "tags": ["ControlNet", "圖像"],
205
265
  "models": ["SD1.5", "Stability"],
206
266
  "date": "2025-03-01",
207
267
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/",
@@ -215,7 +275,7 @@
215
275
  "mediaSubtype": "webp",
216
276
  "description": "使用 ControlNet 透過姿勢參考引導生成影像。",
217
277
  "thumbnailVariant": "hoverDissolve",
218
- "tags": ["ControlNet", "影像"],
278
+ "tags": ["ControlNet", "圖像"],
219
279
  "models": ["SD1.5", "Stability"],
220
280
  "date": "2025-03-01",
221
281
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet",
@@ -229,7 +289,7 @@
229
289
  "mediaSubtype": "webp",
230
290
  "description": "使用 ControlNet 透過深度資訊引導生成影像。",
231
291
  "thumbnailVariant": "hoverDissolve",
232
- "tags": ["ControlNet", "影像"],
292
+ "tags": ["ControlNet", "圖像", "文生圖"],
233
293
  "models": ["SD1.5", "Stability"],
234
294
  "date": "2025-03-01",
235
295
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
@@ -243,7 +303,7 @@
243
303
  "mediaSubtype": "webp",
244
304
  "description": "使用 T2I 配接器透過深度資訊引導生成影像。",
245
305
  "thumbnailVariant": "hoverDissolve",
246
- "tags": ["T2I 配接器", "影像"],
306
+ "tags": ["ControlNet", "圖像", "文生圖"],
247
307
  "models": ["SD1.5", "Stability"],
248
308
  "date": "2025-03-01",
249
309
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
@@ -257,7 +317,7 @@
257
317
  "mediaSubtype": "webp",
258
318
  "description": "透過結合多個 ControlNet 模型生成影像。",
259
319
  "thumbnailVariant": "hoverDissolve",
260
- "tags": ["ControlNet", "影像"],
320
+ "tags": ["ControlNet", "圖像", "文生圖"],
261
321
  "models": ["SD1.5", "Stability"],
262
322
  "date": "2025-03-01",
263
323
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets",
@@ -271,7 +331,7 @@
271
331
  "type": "image",
272
332
  "category": "GENERATION TYPE",
273
333
  "icon": "icon-[lucide--image]",
274
- "title": "影像",
334
+ "title": "Image",
275
335
  "templates": [
276
336
  {
277
337
  "name": "image_qwen_image",
@@ -280,7 +340,7 @@
280
340
  "mediaSubtype": "webp",
281
341
  "description": "使用 Qwen-Image 的 20B MMDiT 模型生成具有卓越多語言文字渲染與編輯功能的影像。",
282
342
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
283
- "tags": ["文字到影像", "影像"],
343
+ "tags": ["文生圖", "圖像"],
284
344
  "models": ["Qwen-Image"],
285
345
  "date": "2025-08-05",
286
346
  "size": 29.59
@@ -291,7 +351,7 @@
291
351
  "mediaType": "image",
292
352
  "mediaSubtype": "webp",
293
353
  "description": "使用 Qwen-Image InstantX ControlNet 生成影像,支援 canny、柔邊、深度、姿勢",
294
- "tags": ["影像到影像", "影像", "ControlNet"],
354
+ "tags": ["圖生圖", "圖像", "ControlNet"],
295
355
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
296
356
  "models": ["Qwen-Image"],
297
357
  "date": "2025-08-23",
@@ -299,12 +359,12 @@
299
359
  },
300
360
  {
301
361
  "name": "image_qwen_image_instantx_inpainting_controlnet",
302
- "title": "Qwen-Image InstantX Inpainting ControlNet",
362
+ "title": "Qwen-Image InstantX 修復 ControlNet",
303
363
  "mediaType": "image",
304
364
  "mediaSubtype": "webp",
305
365
  "thumbnailVariant": "compareSlider",
306
- "description": "Professional inpainting and image editing with Qwen-Image InstantX ControlNet. Supports object replacement, text modification, background changes, and outpainting.",
307
- "tags": ["Image to Image", "Image", "ControlNet", "Inpainting"],
366
+ "description": "使用 Qwen-Image InstantX ControlNet 進行專業圖像修復和編輯。支持對象替換、文本修改、背景更改和外擴繪製。",
367
+ "tags": ["圖生圖", "圖像", "ControlNet", "修復"],
308
368
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
309
369
  "models": ["Qwen-Image"],
310
370
  "date": "2025-09-12",
@@ -316,7 +376,7 @@
316
376
  "mediaType": "image",
317
377
  "mediaSubtype": "webp",
318
378
  "description": "使用 Qwen-Image 統一的 ControlNet LoRA 進行精確結構控制生成影像。支援多種控制類型,包括 canny、depth、lineart、softedge、normal 與 openpose,適用於各種創意應用。",
319
- "tags": ["文字到影像", "影像", "ControlNet"],
379
+ "tags": ["文生圖", "圖像", "ControlNet"],
320
380
  "models": ["Qwen-Image"],
321
381
  "date": "2025-08-23",
322
382
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
@@ -330,20 +390,20 @@
330
390
  "thumbnailVariant": "compareSlider",
331
391
  "description": "使用 Qwen-Image ControlNet 模型控制影像生成。透過模型修補支援 canny、深度與修復控制。",
332
392
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
333
- "tags": ["文字到影像", "影像", "ControlNet"],
393
+ "tags": ["文生圖", "圖像", "ControlNet"],
334
394
  "models": ["Qwen-Image"],
335
395
  "date": "2025-08-24",
336
396
  "size": 31.7
337
397
  },
338
398
  {
339
399
  "name": "image_qwen_image_edit_2509",
340
- "title": "Qwen Image Edit 2509",
400
+ "title": "Qwen 圖像編輯 2509",
341
401
  "mediaType": "image",
342
402
  "mediaSubtype": "webp",
343
403
  "thumbnailVariant": "compareSlider",
344
- "description": "Advanced image editing with multi-image support, improved consistency, and ControlNet integration.",
404
+ "description": "高級圖像編輯,支持多圖像輸入、改進的一致性和 ControlNet 集成。",
345
405
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
346
- "tags": ["Image to Image", "Image Edit", "Multi-Image", "ControlNet"],
406
+ "tags": ["圖生圖", "圖像編輯", "ControlNet"],
347
407
  "models": ["Qwen-Image"],
348
408
  "date": "2025-09-25",
349
409
  "size": 29.59
@@ -356,7 +416,7 @@
356
416
  "thumbnailVariant": "compareSlider",
357
417
  "description": "使用 Qwen-Image-Edit 的 20B MMDiT 模型進行精確雙語文字編輯與雙重語意/外觀編輯功能來編輯影像。",
358
418
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
359
- "tags": ["影像到影像", "影像編輯"],
419
+ "tags": ["圖生圖", "圖像編輯"],
360
420
  "models": ["Qwen-Image"],
361
421
  "date": "2025-08-18",
362
422
  "size": 29.59
@@ -369,7 +429,7 @@
369
429
  "thumbnailVariant": "hoverDissolve",
370
430
  "description": "使用 Flux Kontext 全節點可見性編輯影像,適合學習工作流程。",
371
431
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
372
- "tags": ["影像編輯", "影像到影像"],
432
+ "tags": ["圖像編輯", "圖生圖"],
373
433
  "models": ["Flux"],
374
434
  "date": "2025-06-26",
375
435
  "size": 16.43,
@@ -377,11 +437,11 @@
377
437
  },
378
438
  {
379
439
  "name": "image_chroma1_radiance_text_to_image",
380
- "title": "Chroma1 Radiance text to image",
440
+ "title": "Chroma1 Radiance 文本生成圖像",
381
441
  "mediaType": "image",
382
442
  "mediaSubtype": "webp",
383
- "description": "Chroma1-Radiance works directly with image pixels instead of compressed latents, delivering higher quality images with reduced artifacts and distortion.",
384
- "tags": ["Text to Image", "Image"],
443
+ "description": "Chroma1-Radiance 直接處理圖像像素而非壓縮潛變量,提供更高質量的圖像,減少偽影和失真。",
444
+ "tags": ["文生圖", "圖像"],
385
445
  "models": ["Chroma"],
386
446
  "date": "2025-09-18",
387
447
  "size": 22.0,
@@ -389,11 +449,11 @@
389
449
  },
390
450
  {
391
451
  "name": "image_netayume_lumina_t2i",
392
- "title": "NetaYume Lumina Text to Image",
452
+ "title": "NetaYume Lumina 文本生成圖像",
393
453
  "mediaType": "image",
394
454
  "mediaSubtype": "webp",
395
- "description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
396
- "tags": ["Text to Image", "Image", "Anime"],
455
+ "description": "高質量動漫風格圖像生成,具有增強的角色理解和細節紋理。基於 Danbooru 數據集從 Neta Lumina 微調。",
456
+ "tags": ["文生圖", "圖像", "動漫"],
397
457
  "models": ["NetaYume Lumina"],
398
458
  "date": "2025-10-10",
399
459
  "size": 9.89
@@ -404,7 +464,7 @@
404
464
  "mediaType": "image",
405
465
  "mediaSubtype": "webp",
406
466
  "description": "Chroma 從 flux 修改而來,在架構上有部分變更。",
407
- "tags": ["文字到影像", "影像"],
467
+ "tags": ["文生圖", "圖像"],
408
468
  "models": ["Chroma", "Flux"],
409
469
  "date": "2025-06-04",
410
470
  "size": 21.69,
@@ -417,7 +477,7 @@
417
477
  "mediaSubtype": "webp",
418
478
  "thumbnailVariant": "compareSlider",
419
479
  "description": "Supports various tasks such as image inpainting, outpainting, and object removal",
420
- "tags": ["Inpainting", "Outpainting"],
480
+ "tags": ["修復", "外繪"],
421
481
  "models": ["Flux"],
422
482
  "date": "2025-09-21",
423
483
  "size": 27.01,
@@ -430,7 +490,7 @@
430
490
  "mediaSubtype": "webp",
431
491
  "description": "使用 Flux Dev fp8 量化版本生成影像。適合顯存有限的裝置,僅需一個模型檔案,但畫質略低於完整版。",
432
492
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
433
- "tags": ["文字到影像", "影像"],
493
+ "tags": ["文生圖", "圖像"],
434
494
  "models": ["Flux"],
435
495
  "date": "2025-03-01",
436
496
  "size": 16.06,
@@ -443,7 +503,7 @@
443
503
  "thumbnailVariant": "hoverDissolve",
444
504
  "mediaType": "image",
445
505
  "mediaSubtype": "webp",
446
- "tags": ["Image to Image", "Image"],
506
+ "tags": ["圖生圖", "圖像"],
447
507
  "models": ["Flux"],
448
508
  "date": "2025-09-02",
449
509
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
@@ -457,7 +517,7 @@
457
517
  "mediaSubtype": "webp",
458
518
  "description": "使用 Flux Schnell fp8 量化版本快速生成影像。適合入門級硬體,僅需 4 個步驟即可生成影像。",
459
519
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
460
- "tags": ["文字到影像", "影像"],
520
+ "tags": ["文生圖", "圖像"],
461
521
  "models": ["Flux"],
462
522
  "date": "2025-03-01",
463
523
  "size": 16.05,
@@ -470,7 +530,7 @@
470
530
  "mediaSubtype": "webp",
471
531
  "description": "微調的 FLUX 模型,將寫實程度推向極限",
472
532
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
473
- "tags": ["文字到影像", "影像", "寫實"],
533
+ "tags": ["文生圖", "圖像"],
474
534
  "models": ["Flux"],
475
535
  "date": "2025-07-31",
476
536
  "size": 20.74,
@@ -483,7 +543,7 @@
483
543
  "mediaSubtype": "webp",
484
544
  "description": "使用 Flux Dev 完整版生成高品質影像。需要較大顯存與多個模型檔案,但提供最佳提示遵循能力與畫質。",
485
545
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
486
- "tags": ["文字到影像", "影像"],
546
+ "tags": ["文生圖", "圖像"],
487
547
  "models": ["Flux"],
488
548
  "date": "2025-03-01",
489
549
  "size": 31.83,
@@ -496,7 +556,7 @@
496
556
  "mediaSubtype": "webp",
497
557
  "description": "使用 Flux Schnell 完整版快速生成影像。採用 Apache2.0 授權,僅需 4 個步驟即可生成影像並維持良好畫質。",
498
558
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
499
- "tags": ["文字到影像", "影像"],
559
+ "tags": ["文生圖", "圖像"],
500
560
  "models": ["Flux"],
501
561
  "date": "2025-03-01",
502
562
  "size": 31.81
@@ -509,7 +569,7 @@
509
569
  "description": "使用 Flux 修復功能填補影像缺失部分。",
510
570
  "thumbnailVariant": "compareSlider",
511
571
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
512
- "tags": ["影像到影像", "修復", "影像"],
572
+ "tags": ["圖生圖", "修復", "圖像"],
513
573
  "models": ["Flux"],
514
574
  "date": "2025-03-01",
515
575
  "size": 9.66
@@ -522,7 +582,7 @@
522
582
  "description": "使用 Flux 外延功能將影像擴展至邊界之外。",
523
583
  "thumbnailVariant": "compareSlider",
524
584
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
525
- "tags": ["外延", "影像", "影像到影像"],
585
+ "tags": ["外繪", "圖像", "圖生圖"],
526
586
  "models": ["Flux"],
527
587
  "date": "2025-03-01",
528
588
  "size": 9.66
@@ -535,7 +595,7 @@
535
595
  "description": "使用 Flux Canny 邊緣偵測引導生成影像。",
536
596
  "thumbnailVariant": "hoverDissolve",
537
597
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
538
- "tags": ["影像到影像", "ControlNet", "影像"],
598
+ "tags": ["圖生圖", "ControlNet", "圖像"],
539
599
  "models": ["Flux"],
540
600
  "date": "2025-03-01",
541
601
  "size": 31.83
@@ -548,7 +608,7 @@
548
608
  "description": "使用 Flux LoRA 深度資訊引導生成影像。",
549
609
  "thumbnailVariant": "hoverDissolve",
550
610
  "tutorialUrl": "ttps://docs.comfy.org/tutorials/flux/flux-1-controlnet",
551
- "tags": ["影像到影像", "ControlNet", "影像", "LoRA"],
611
+ "tags": ["圖生圖", "ControlNet", "圖像"],
552
612
  "models": ["Flux"],
553
613
  "date": "2025-03-01",
554
614
  "size": 32.98
@@ -560,7 +620,7 @@
560
620
  "mediaSubtype": "webp",
561
621
  "description": "使用 Flux Redux 從參考影像轉移風格生成影像。",
562
622
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
563
- "tags": ["影像到影像", "ControlNet", "影像", "LoRA"],
623
+ "tags": ["圖生圖", "ControlNet", "圖像"],
564
624
  "models": ["Flux"],
565
625
  "date": "2025-03-01",
566
626
  "size": 32.74
@@ -572,7 +632,7 @@
572
632
  "mediaSubtype": "webp",
573
633
  "description": "使用 OmniGen2 統一的 7B 多模態模型與雙路徑架構從文字提示生成高品質影像。",
574
634
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
575
- "tags": ["文字到影像", "影像"],
635
+ "tags": ["文生圖", "圖像"],
576
636
  "models": ["OmniGen"],
577
637
  "date": "2025-06-30",
578
638
  "size": 14.7
@@ -585,7 +645,7 @@
585
645
  "thumbnailVariant": "hoverDissolve",
586
646
  "description": "使用 OmniGen2 的進階影像編輯功能與文字渲染支援,透過自然語言指令編輯影像。",
587
647
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
588
- "tags": ["影像編輯", "影像"],
648
+ "tags": ["圖像編輯", "圖像"],
589
649
  "models": ["OmniGen"],
590
650
  "date": "2025-06-30",
591
651
  "size": 14.7
@@ -597,7 +657,7 @@
597
657
  "mediaSubtype": "webp",
598
658
  "description": "使用 HiDream I1 開發版生成影像 - 平衡版本,28 個推理步驟,適合中階硬體。",
599
659
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
600
- "tags": ["文字到影像", "影像"],
660
+ "tags": ["文生圖", "圖像"],
601
661
  "models": ["HiDream"],
602
662
  "date": "2025-04-17",
603
663
  "size": 31.03
@@ -609,7 +669,7 @@
609
669
  "mediaSubtype": "webp",
610
670
  "description": "使用 HiDream I1 快速版生成影像 - 輕量版本,16 個推理步驟,適合在入門級硬體上快速預覽。",
611
671
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
612
- "tags": ["文字到影像", "影像"],
672
+ "tags": ["文生圖", "圖像"],
613
673
  "models": ["HiDream"],
614
674
  "date": "2025-04-17",
615
675
  "size": 22.57
@@ -621,7 +681,7 @@
621
681
  "mediaSubtype": "webp",
622
682
  "description": "使用 HiDream I1 完整版生成影像 - 完整版本,50 個推理步驟,輸出畫質最高。",
623
683
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
624
- "tags": ["文字到影像", "影像"],
684
+ "tags": ["文生圖", "圖像"],
625
685
  "models": ["HiDream"],
626
686
  "date": "2025-04-17",
627
687
  "size": 22.57
@@ -634,7 +694,7 @@
634
694
  "thumbnailVariant": "compareSlider",
635
695
  "description": "使用 HiDream E1.1 編輯影像 - 在影像畫質與編輯準確性方面優於 HiDream-E1-Full。",
636
696
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
637
- "tags": ["影像編輯", "影像"],
697
+ "tags": ["圖像編輯", "圖像"],
638
698
  "models": ["HiDream"],
639
699
  "date": "2025-07-21",
640
700
  "size": 46.96
@@ -647,7 +707,7 @@
647
707
  "thumbnailVariant": "compareSlider",
648
708
  "description": "使用 HiDream E1 編輯影像 - 專業的自然語言影像編輯模型。",
649
709
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
650
- "tags": ["影像編輯", "影像"],
710
+ "tags": ["圖像編輯", "圖像"],
651
711
  "models": ["HiDream"],
652
712
  "date": "2025-05-01",
653
713
  "size": 31.86
@@ -659,7 +719,7 @@
659
719
  "mediaSubtype": "webp",
660
720
  "description": "使用 SD 3.5 生成影像。",
661
721
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
662
- "tags": ["文字到影像", "影像"],
722
+ "tags": ["文生圖", "圖像"],
663
723
  "models": ["SD3.5"],
664
724
  "date": "2025-03-01",
665
725
  "size": 13.91
@@ -672,7 +732,7 @@
672
732
  "description": "使用 SD 3.5 Canny ControlNet 邊緣偵測引導生成影像。",
673
733
  "thumbnailVariant": "hoverDissolve",
674
734
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
675
- "tags": ["影像到影像", "影像", "ControlNet"],
735
+ "tags": ["圖生圖", "圖像", "ControlNet"],
676
736
  "models": ["SD3.5"],
677
737
  "date": "2025-03-01",
678
738
  "size": 21.97
@@ -685,7 +745,7 @@
685
745
  "description": "使用 SD 3.5 深度資訊引導生成影像。",
686
746
  "thumbnailVariant": "hoverDissolve",
687
747
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
688
- "tags": ["影像到影像", "影像", "ControlNet"],
748
+ "tags": ["圖生圖", "圖像", "ControlNet"],
689
749
  "models": ["SD3.5"],
690
750
  "date": "2025-03-01",
691
751
  "size": 21.97
@@ -698,7 +758,7 @@
698
758
  "description": "使用 SD 3.5 模糊參考影像引導生成影像。",
699
759
  "thumbnailVariant": "hoverDissolve",
700
760
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
701
- "tags": ["影像到影像", "影像"],
761
+ "tags": ["圖生圖", "圖像"],
702
762
  "models": ["SD3.5"],
703
763
  "date": "2025-03-01",
704
764
  "size": 21.97
@@ -710,7 +770,7 @@
710
770
  "mediaSubtype": "webp",
711
771
  "description": "使用 SDXL 生成高品質影像。",
712
772
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
713
- "tags": ["文字到影像", "影像"],
773
+ "tags": ["文生圖", "圖像"],
714
774
  "models": ["SDXL", "Stability"],
715
775
  "date": "2025-03-01",
716
776
  "size": 12.12
@@ -722,7 +782,7 @@
722
782
  "mediaSubtype": "webp",
723
783
  "description": "使用精煉模型增強 SDXL 影像。",
724
784
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
725
- "tags": ["文字到影像", "影像"],
785
+ "tags": ["文生圖", "圖像"],
726
786
  "models": ["SDXL", "Stability"],
727
787
  "date": "2025-03-01",
728
788
  "size": 12.12
@@ -734,7 +794,7 @@
734
794
  "mediaSubtype": "webp",
735
795
  "description": "使用 SDXL 修訂版從參考影像轉移概念生成影像。",
736
796
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
737
- "tags": ["文字到影像", "影像"],
797
+ "tags": ["文生圖", "圖像"],
738
798
  "models": ["SDXL", "Stability"],
739
799
  "date": "2025-03-01",
740
800
  "size": 9.9
@@ -746,7 +806,7 @@
746
806
  "mediaSubtype": "webp",
747
807
  "description": "使用 SDXL Turbo 單步驟生成影像。",
748
808
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
749
- "tags": ["文字到影像", "影像"],
809
+ "tags": ["文生圖", "圖像"],
750
810
  "models": ["SDXL", "Stability"],
751
811
  "date": "2025-03-01",
752
812
  "size": 6.46
@@ -758,7 +818,7 @@
758
818
  "mediaSubtype": "webp",
759
819
  "thumbnailVariant": "compareSlider",
760
820
  "description": "在 ComfyUI 中執行 Lotus 深度,實現零樣本、高效的單目深度估計並保持高細節保留。",
761
- "tags": ["深度", "影像"],
821
+ "tags": ["圖像", "文生圖"],
762
822
  "models": ["SD1.5"],
763
823
  "date": "2025-05-21",
764
824
  "size": 1.93
@@ -770,7 +830,7 @@
770
830
  "type": "video",
771
831
  "category": "GENERATION TYPE",
772
832
  "icon": "icon-[lucide--film]",
773
- "title": "影片",
833
+ "title": "Video",
774
834
  "templates": [
775
835
  {
776
836
  "name": "video_wan2_2_14B_t2v",
@@ -779,7 +839,7 @@
779
839
  "mediaType": "image",
780
840
  "mediaSubtype": "webp",
781
841
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
782
- "tags": ["文字到影片", "影片"],
842
+ "tags": ["文生視頻", "視頻"],
783
843
  "models": ["Wan2.2", "Wan"],
784
844
  "date": "2025-07-29",
785
845
  "size": 35.42
@@ -792,7 +852,7 @@
792
852
  "mediaSubtype": "webp",
793
853
  "thumbnailVariant": "hoverDissolve",
794
854
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
795
- "tags": ["影像到影片", "影片"],
855
+ "tags": ["圖生視頻", "視頻"],
796
856
  "models": ["Wan2.2", "Wan"],
797
857
  "date": "2025-07-29",
798
858
  "size": 35.42
@@ -805,19 +865,19 @@
805
865
  "mediaSubtype": "webp",
806
866
  "thumbnailVariant": "hoverDissolve",
807
867
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
808
- "tags": ["FLF2V", "影片"],
868
+ "tags": ["首尾幀視頻", "視頻"],
809
869
  "models": ["Wan2.2", "Wan"],
810
870
  "date": "2025-08-02",
811
871
  "size": 35.42
812
872
  },
813
873
  {
814
874
  "name": "video_wan2_2_14B_animate",
815
- "title": "Wan2.2 Animate, character animation and replacement",
816
- "description": "Unified character animation and replacement framework with precise motion and expression replication.",
875
+ "title": "Wan2.2 Animate 角色動畫和替換",
876
+ "description": "統一的角色動畫和替換框架,具有精確的動作和表情複製。",
817
877
  "mediaType": "image",
818
878
  "mediaSubtype": "webp",
819
879
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-animate",
820
- "tags": ["Video", "Image to Video"],
880
+ "tags": ["視頻", "圖生視頻"],
821
881
  "models": ["Wan2.2", "Wan"],
822
882
  "date": "2025-09-22",
823
883
  "size": 25.535
@@ -829,30 +889,30 @@
829
889
  "mediaType": "image",
830
890
  "mediaSubtype": "webp",
831
891
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-s2v",
832
- "tags": ["影片"],
892
+ "tags": ["視頻"],
833
893
  "models": ["Wan2.2", "Wan"],
834
894
  "date": "2025-08-02",
835
895
  "size": 23.52
836
896
  },
837
897
  {
838
898
  "name": "video_humo",
839
- "title": "HuMo Video Generation",
840
- "description": "Generate videos basic on audio, image, and text, keep the character's lip sync.",
899
+ "title": "HuMo 視頻生成",
900
+ "description": "基於音頻、圖像和文本生成視頻,保持角色的唇部同步。",
841
901
  "mediaType": "image",
842
902
  "mediaSubtype": "webp",
843
- "tags": ["Video"],
903
+ "tags": ["視頻"],
844
904
  "models": ["HuMo"],
845
905
  "date": "2025-09-21",
846
906
  "size": 25.98
847
907
  },
848
908
  {
849
909
  "name": "video_wan2_2_14B_fun_inpaint",
850
- "title": "Wan 2.2 14B Fun Inp",
851
- "description": "Generate videos from start and end frames using Wan 2.2 Fun Inp.",
910
+ "title": "Wan 2.2 14B 趣味修復",
911
+ "description": "使用 Wan 2.2 Fun Inp 從起始幀和結束幀生成視頻。",
852
912
  "mediaType": "image",
853
913
  "mediaSubtype": "webp",
854
914
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-inp",
855
- "tags": ["FLF2V", "Video"],
915
+ "tags": ["首尾幀視頻", "視頻"],
856
916
  "models": ["Wan2.2", "Wan"],
857
917
  "date": "2025-08-12",
858
918
  "size": 35.42
@@ -864,7 +924,7 @@
864
924
  "mediaType": "image",
865
925
  "mediaSubtype": "webp",
866
926
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-control",
867
- "tags": ["影片到影片", "影片"],
927
+ "tags": ["視頻轉視頻", "視頻"],
868
928
  "models": ["Wan2.2", "Wan"],
869
929
  "date": "2025-08-12",
870
930
  "size": 35.42
@@ -876,7 +936,7 @@
876
936
  "mediaType": "image",
877
937
  "mediaSubtype": "webp",
878
938
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-camera",
879
- "tags": ["影片到影片", "影片"],
939
+ "tags": ["視頻轉視頻", "視頻"],
880
940
  "models": ["Wan2.2", "Wan"],
881
941
  "date": "2025-08-17",
882
942
  "size": 37.3
@@ -888,7 +948,7 @@
888
948
  "mediaType": "image",
889
949
  "mediaSubtype": "webp",
890
950
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
891
- "tags": ["文字到影片", "影片"],
951
+ "tags": ["文生視頻", "視頻"],
892
952
  "models": ["Wan2.2", "Wan"],
893
953
  "date": "2025-07-29",
894
954
  "size": 16.9
@@ -899,7 +959,7 @@
899
959
  "description": "從起始與結束幀高效進行影片修復。5B 模型提供快速迭代以測試工作流程。",
900
960
  "mediaType": "image",
901
961
  "mediaSubtype": "webp",
902
- "tags": ["文字到影片", "影片"],
962
+ "tags": ["文生視頻", "視頻"],
903
963
  "models": ["Wan2.2", "Wan"],
904
964
  "date": "2025-07-29",
905
965
  "size": 16.9
@@ -910,7 +970,7 @@
910
970
  "description": "多條件影片控制,包含姿勢、深度與邊緣引導。緊湊的 5B 大小適合實驗性開發。",
911
971
  "mediaType": "image",
912
972
  "mediaSubtype": "webp",
913
- "tags": ["文字到影片", "影片"],
973
+ "tags": ["文生視頻", "視頻"],
914
974
  "models": ["Wan2.2", "Wan"],
915
975
  "date": "2025-07-29",
916
976
  "size": 16.9
@@ -922,7 +982,7 @@
922
982
  "mediaType": "image",
923
983
  "mediaSubtype": "webp",
924
984
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
925
- "tags": ["文字到影片", "影片"],
985
+ "tags": ["文生視頻", "視頻"],
926
986
  "models": ["Wan2.1", "Wan"],
927
987
  "date": "2025-05-21",
928
988
  "size": 53.79
@@ -934,7 +994,7 @@
934
994
  "mediaType": "image",
935
995
  "mediaSubtype": "webp",
936
996
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
937
- "tags": ["參考到影片", "影片"],
997
+ "tags": ["視頻", "圖生視頻"],
938
998
  "models": ["Wan2.1", "Wan"],
939
999
  "date": "2025-05-21",
940
1000
  "size": 53.79
@@ -947,7 +1007,7 @@
947
1007
  "mediaSubtype": "webp",
948
1008
  "thumbnailVariant": "compareSlider",
949
1009
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
950
- "tags": ["影片到影片", "影片"],
1010
+ "tags": ["視頻轉視頻", "視頻"],
951
1011
  "models": ["Wan2.1", "Wan"],
952
1012
  "date": "2025-05-21",
953
1013
  "size": 53.79
@@ -960,7 +1020,7 @@
960
1020
  "mediaSubtype": "webp",
961
1021
  "thumbnailVariant": "compareSlider",
962
1022
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
963
- "tags": ["外延", "影片"],
1023
+ "tags": ["外繪", "視頻"],
964
1024
  "models": ["Wan2.1", "Wan"],
965
1025
  "date": "2025-05-21",
966
1026
  "size": 53.79
@@ -972,7 +1032,7 @@
972
1032
  "mediaType": "image",
973
1033
  "mediaSubtype": "webp",
974
1034
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
975
- "tags": ["FLF2V", "影片"],
1035
+ "tags": ["首尾幀視頻", "視頻"],
976
1036
  "models": ["Wan2.1", "Wan"],
977
1037
  "date": "2025-05-21",
978
1038
  "size": 53.79
@@ -985,18 +1045,18 @@
985
1045
  "mediaSubtype": "webp",
986
1046
  "thumbnailVariant": "compareSlider",
987
1047
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
988
- "tags": ["修復", "影片"],
1048
+ "tags": ["修復", "視頻"],
989
1049
  "models": ["Wan2.1", "Wan"],
990
1050
  "date": "2025-05-21",
991
1051
  "size": 53.79
992
1052
  },
993
1053
  {
994
1054
  "name": "video_wan2.1_alpha_t2v_14B",
995
- "title": "Wan2.1 Alpha T2V",
996
- "description": "Generate text-to-video with alpha channel support for transparent backgrounds and semi-transparent objects.",
1055
+ "title": "Wan2.1 Alpha 文生視頻",
1056
+ "description": "生成支持透明背景和半透明對象的 Alpha 通道文本轉視頻。",
997
1057
  "mediaType": "image",
998
1058
  "mediaSubtype": "webp",
999
- "tags": ["Text to Video", "Video"],
1059
+ "tags": ["文生視頻", "視頻"],
1000
1060
  "models": ["Wan2.1", "Wan"],
1001
1061
  "date": "2025-10-06",
1002
1062
  "size": 20.95
@@ -1009,7 +1069,7 @@
1009
1069
  "mediaSubtype": "webp",
1010
1070
  "thumbnailVariant": "hoverDissolve",
1011
1071
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
1012
- "tags": ["影片"],
1072
+ "tags": ["視頻"],
1013
1073
  "models": ["Wan2.1", "Wan"],
1014
1074
  "date": "2025-05-21",
1015
1075
  "size": 23.65
@@ -1021,7 +1081,7 @@
1021
1081
  "mediaType": "image",
1022
1082
  "mediaSubtype": "webp",
1023
1083
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
1024
- "tags": ["影片"],
1084
+ "tags": ["視頻"],
1025
1085
  "models": ["Wan2.1", "Wan"],
1026
1086
  "date": "2025-04-15",
1027
1087
  "size": 10.7
@@ -1033,7 +1093,7 @@
1033
1093
  "mediaType": "image",
1034
1094
  "mediaSubtype": "webp",
1035
1095
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
1036
- "tags": ["影片"],
1096
+ "tags": ["視頻"],
1037
1097
  "models": ["Wan2.1", "Wan"],
1038
1098
  "date": "2025-04-15",
1039
1099
  "size": 39.16
@@ -1045,7 +1105,7 @@
1045
1105
  "mediaType": "image",
1046
1106
  "mediaSubtype": "webp",
1047
1107
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
1048
- "tags": ["文字到影片", "影片"],
1108
+ "tags": ["文生視頻", "視頻"],
1049
1109
  "models": ["Wan2.1", "Wan"],
1050
1110
  "date": "2025-03-01",
1051
1111
  "size": 9.15
@@ -1057,7 +1117,7 @@
1057
1117
  "mediaType": "image",
1058
1118
  "mediaSubtype": "webp",
1059
1119
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
1060
- "tags": ["文字到影片", "影片"],
1120
+ "tags": ["文生視頻", "視頻"],
1061
1121
  "models": ["Wan2.1", "Wan"],
1062
1122
  "date": "2025-03-01",
1063
1123
  "size": 38.23
@@ -1069,32 +1129,32 @@
1069
1129
  "mediaType": "image",
1070
1130
  "mediaSubtype": "webp",
1071
1131
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
1072
- "tags": ["修復", "影片"],
1132
+ "tags": ["修復", "視頻"],
1073
1133
  "models": ["Wan2.1", "Wan"],
1074
1134
  "date": "2025-04-15",
1075
1135
  "size": 10.6
1076
1136
  },
1077
1137
  {
1078
1138
  "name": "wan2.1_fun_control",
1079
- "title": "Wan 2.1 ControlNet",
1139
+ "title": "Wan 2.1 控制網絡",
1080
1140
  "description": "使用 Wan 2.1 ControlNet 透過姿勢、深度與邊緣控制引導生成影片。",
1081
1141
  "mediaType": "image",
1082
1142
  "mediaSubtype": "webp",
1083
1143
  "thumbnailVariant": "hoverDissolve",
1084
1144
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
1085
- "tags": ["影片到影片", "影片"],
1145
+ "tags": ["視頻轉視頻", "視頻"],
1086
1146
  "models": ["Wan2.1", "Wan"],
1087
1147
  "date": "2025-04-15",
1088
1148
  "size": 10.6
1089
1149
  },
1090
1150
  {
1091
1151
  "name": "wan2.1_flf2v_720_f16",
1092
- "title": "Wan 2.1 FLF2V 720p F16",
1152
+ "title": "Wan 2.1 首尾幀視頻 720p F16",
1093
1153
  "description": "使用 Wan 2.1 FLF2V 透過控制首尾幀生成影片。",
1094
1154
  "mediaType": "image",
1095
1155
  "mediaSubtype": "webp",
1096
1156
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
1097
- "tags": ["FLF2V", "影片"],
1157
+ "tags": ["首尾幀視頻", "視頻"],
1098
1158
  "models": ["Wan2.1", "Wan"],
1099
1159
  "date": "2025-04-15",
1100
1160
  "size": 38.23
@@ -1106,7 +1166,7 @@
1106
1166
  "mediaSubtype": "webp",
1107
1167
  "description": "從文字提示生成影片。",
1108
1168
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
1109
- "tags": ["文字到影片", "影片"],
1169
+ "tags": ["文生視頻", "視頻"],
1110
1170
  "models": ["LTXV"],
1111
1171
  "date": "2025-03-01",
1112
1172
  "size": 17.84
@@ -1118,7 +1178,7 @@
1118
1178
  "mediaSubtype": "webp",
1119
1179
  "description": "從靜態影像生成影片。",
1120
1180
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
1121
- "tags": ["影像到影片", "影片"],
1181
+ "tags": ["圖生視頻", "視頻"],
1122
1182
  "models": ["LTXV"],
1123
1183
  "date": "2025-03-01",
1124
1184
  "size": 17.84
@@ -1130,7 +1190,7 @@
1130
1190
  "mediaSubtype": "webp",
1131
1191
  "description": "使用 Mochi 模型從文字提示生成影片。",
1132
1192
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/",
1133
- "tags": ["文字到影片", "影片"],
1193
+ "tags": ["文生視頻", "視頻"],
1134
1194
  "models": ["Mochi"],
1135
1195
  "date": "2025-03-01",
1136
1196
  "size": 28.65
@@ -1142,7 +1202,7 @@
1142
1202
  "mediaSubtype": "webp",
1143
1203
  "description": "使用 Hunyuan 模型從文字提示生成影片。",
1144
1204
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
1145
- "tags": ["文字到影片", "影片"],
1205
+ "tags": ["文生視頻", "視頻"],
1146
1206
  "models": ["Hunyuan Video"],
1147
1207
  "date": "2025-03-01",
1148
1208
  "size": 33.04
@@ -1154,7 +1214,7 @@
1154
1214
  "mediaSubtype": "webp",
1155
1215
  "description": "從靜態影像生成影片。",
1156
1216
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
1157
- "tags": ["影像到影片", "影片"],
1217
+ "tags": ["圖生視頻", "視頻"],
1158
1218
  "models": ["SVD"],
1159
1219
  "date": "2025-03-01",
1160
1220
  "size": 8.9
@@ -1166,7 +1226,7 @@
1166
1226
  "mediaSubtype": "webp",
1167
1227
  "description": "先從文字提示建立影像再生成影片。",
1168
1228
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
1169
- "tags": ["文字到影片", "影片"],
1229
+ "tags": ["文生視頻", "視頻"],
1170
1230
  "models": ["SVD"],
1171
1231
  "date": "2025-03-01",
1172
1232
  "size": 15.36
@@ -1178,7 +1238,7 @@
1178
1238
  "type": "audio",
1179
1239
  "category": "GENERATION TYPE",
1180
1240
  "icon": "icon-[lucide--volume-2]",
1181
- "title": "音訊",
1241
+ "title": "Audio",
1182
1242
  "templates": [
1183
1243
  {
1184
1244
  "name": "audio_stable_audio_example",
@@ -1186,7 +1246,7 @@
1186
1246
  "mediaType": "audio",
1187
1247
  "mediaSubtype": "mp3",
1188
1248
  "description": "使用 Stable Audio 從文字提示生成音訊。",
1189
- "tags": ["文字到音訊", "音訊"],
1249
+ "tags": ["文本轉音頻", "音頻"],
1190
1250
  "models": ["Stable Audio"],
1191
1251
  "date": "2025-03-01",
1192
1252
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/",
@@ -1198,7 +1258,7 @@
1198
1258
  "mediaType": "audio",
1199
1259
  "mediaSubtype": "mp3",
1200
1260
  "description": "使用 ACE-Step v1 從文字提示生成器樂音樂。",
1201
- "tags": ["文字到音訊", "音訊", "器樂"],
1261
+ "tags": ["文本轉音頻", "音頻"],
1202
1262
  "models": ["ACE-Step"],
1203
1263
  "date": "2025-03-01",
1204
1264
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
@@ -1210,7 +1270,7 @@
1210
1270
  "mediaType": "audio",
1211
1271
  "mediaSubtype": "mp3",
1212
1272
  "description": "使用 ACE-Step v1 從文字提示生成帶有人聲的歌曲,支援多語言與風格自訂。",
1213
- "tags": ["文字到音訊", "音訊", "歌曲"],
1273
+ "tags": ["文本轉音頻", "音頻"],
1214
1274
  "models": ["ACE-Step"],
1215
1275
  "date": "2025-03-01",
1216
1276
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
@@ -1222,7 +1282,7 @@
1222
1282
  "mediaType": "audio",
1223
1283
  "mediaSubtype": "mp3",
1224
1284
  "description": "使用 ACE-Step v1 M2M 編輯既有歌曲以變更風格與歌詞。",
1225
- "tags": ["音訊編輯", "音訊"],
1285
+ "tags": ["音頻編輯", "音頻"],
1226
1286
  "models": ["ACE-Step"],
1227
1287
  "date": "2025-03-01",
1228
1288
  "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
@@ -1235,15 +1295,15 @@
1235
1295
  "type": "3d",
1236
1296
  "category": "GENERATION TYPE",
1237
1297
  "icon": "icon-[lucide--box]",
1238
- "title": "3D",
1298
+ "title": "3D Model",
1239
1299
  "templates": [
1240
1300
  {
1241
1301
  "name": "3d_hunyuan3d-v2.1",
1242
- "title": "Hunyuan3D 2.1",
1302
+ "title": "混元3D 2.1",
1243
1303
  "mediaType": "image",
1244
1304
  "mediaSubtype": "webp",
1245
- "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
1246
- "tags": ["Image to Model", "3D"],
1305
+ "description": "使用 Hunyuan3D 2.0 從單張圖像生成 3D 模型。",
1306
+ "tags": ["圖像轉3D", "3D"],
1247
1307
  "models": ["Hunyuan3D"],
1248
1308
  "date": "2025-03-01",
1249
1309
  "tutorialUrl": "",
@@ -1251,11 +1311,11 @@
1251
1311
  },
1252
1312
  {
1253
1313
  "name": "3d_hunyuan3d_image_to_model",
1254
- "title": "Hunyuan3D 2.0",
1314
+ "title": "混元3D 2.0",
1255
1315
  "mediaType": "image",
1256
1316
  "mediaSubtype": "webp",
1257
1317
  "description": "使用 Hunyuan3D 2.0 從單張影像生成 3D 模型。",
1258
- "tags": ["影像到模型", "3D"],
1318
+ "tags": ["圖像轉3D", "3D"],
1259
1319
  "models": ["Hunyuan3D"],
1260
1320
  "date": "2025-03-01",
1261
1321
  "tutorialUrl": "",
@@ -1263,11 +1323,11 @@
1263
1323
  },
1264
1324
  {
1265
1325
  "name": "3d_hunyuan3d_multiview_to_model",
1266
- "title": "Hunyuan3D 2.0 MV",
1326
+ "title": "混元3D 2.0 多視圖",
1267
1327
  "mediaType": "image",
1268
1328
  "mediaSubtype": "webp",
1269
1329
  "description": "使用 Hunyuan3D 2.0 MV 從多個視角生成 3D 模型。",
1270
- "tags": ["多視角到模型", "3D"],
1330
+ "tags": ["3D", "圖像轉3D"],
1271
1331
  "models": ["Hunyuan3D"],
1272
1332
  "date": "2025-03-01",
1273
1333
  "tutorialUrl": "",
@@ -1276,11 +1336,11 @@
1276
1336
  },
1277
1337
  {
1278
1338
  "name": "3d_hunyuan3d_multiview_to_model_turbo",
1279
- "title": "Hunyuan3D 2.0 MV Turbo",
1339
+ "title": "混元3D 2.0 多視圖 Turbo",
1280
1340
  "mediaType": "image",
1281
1341
  "mediaSubtype": "webp",
1282
1342
  "description": "使用 Hunyuan3D 2.0 MV Turbo 從多個視角生成 3D 模型。",
1283
- "tags": ["多視角到模型", "3D"],
1343
+ "tags": ["圖像轉3D", "3D"],
1284
1344
  "models": ["Hunyuan3D"],
1285
1345
  "date": "2025-03-01",
1286
1346
  "tutorialUrl": "",
@@ -1294,15 +1354,15 @@
1294
1354
  "type": "image",
1295
1355
  "category": "CLOSED SOURCE MODELS",
1296
1356
  "icon": "icon-[lucide--hand-coins]",
1297
- "title": "影像 API",
1357
+ "title": "Image API",
1298
1358
  "templates": [
1299
1359
  {
1300
1360
  "name": "api_bytedance_seedream4",
1301
- "title": "ByteDance Seedream 4.0",
1302
- "description": "Multi-modal AI model for text-to-image and image editing. Generate 2K images in under 2 seconds with natural language control.",
1361
+ "title": "字節跳動 Seedream 4.0",
1362
+ "description": "用於文本生成圖像和圖像編輯的多模態 AI 模型。使用自然語言控制在 2 秒內生成 2K 圖像。",
1303
1363
  "mediaType": "image",
1304
1364
  "mediaSubtype": "webp",
1305
- "tags": ["Image Edit", "Image", "API", "Text-to-Image"],
1365
+ "tags": ["圖像編輯", "圖像", "API", "文生圖"],
1306
1366
  "models": ["Seedream 4.0", "ByteDance"],
1307
1367
  "date": "2025-09-11",
1308
1368
  "OpenSource": false,
@@ -1315,7 +1375,7 @@
1315
1375
  "description": "Nano-banana (Gemini-2.5-Flash 影像) - 具備一致性的影像編輯。",
1316
1376
  "mediaType": "image",
1317
1377
  "mediaSubtype": "webp",
1318
- "tags": ["影像編輯", "影像", "API", "文字到影像"],
1378
+ "tags": ["圖像編輯", "圖像", "API", "文生圖"],
1319
1379
  "models": ["Gemini-2.5-Flash", "nano-banana", "Google"],
1320
1380
  "date": "2025-08-27",
1321
1381
  "OpenSource": false,
@@ -1330,7 +1390,7 @@
1330
1390
  "mediaSubtype": "webp",
1331
1391
  "thumbnailVariant": "compareSlider",
1332
1392
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
1333
- "tags": ["影像編輯", "影像"],
1393
+ "tags": ["圖像編輯", "圖像"],
1334
1394
  "models": ["Flux", "Kontext"],
1335
1395
  "date": "2025-05-29",
1336
1396
  "OpenSource": false,
@@ -1345,7 +1405,7 @@
1345
1405
  "mediaSubtype": "webp",
1346
1406
  "thumbnailVariant": "compareSlider",
1347
1407
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
1348
- "tags": ["影像編輯", "影像"],
1408
+ "tags": ["圖像編輯", "圖像"],
1349
1409
  "models": ["Flux", "Kontext", "BFL"],
1350
1410
  "date": "2025-05-29",
1351
1411
  "OpenSource": false,
@@ -1360,7 +1420,7 @@
1360
1420
  "mediaSubtype": "webp",
1361
1421
  "thumbnailVariant": "compareSlider",
1362
1422
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
1363
- "tags": ["影像編輯", "影像"],
1423
+ "tags": ["圖像編輯", "圖像"],
1364
1424
  "models": ["Flux", "Kontext", "BFL"],
1365
1425
  "date": "2025-05-29",
1366
1426
  "OpenSource": false,
@@ -1369,11 +1429,11 @@
1369
1429
  },
1370
1430
  {
1371
1431
  "name": "api_wan_text_to_image",
1372
- "title": "Wan2.5: Text to Image",
1373
- "description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
1432
+ "title": "Wan2.5: 文生圖",
1433
+ "description": "使用 FLUX.1 Pro 生成具有出色提示跟隨和視覺質量的圖像。",
1374
1434
  "mediaType": "image",
1375
1435
  "mediaSubtype": "webp",
1376
- "tags": ["Text to Image", "Image", "API"],
1436
+ "tags": ["文生圖", "圖像", "API"],
1377
1437
  "models": ["Wan2.5"],
1378
1438
  "date": "2025-09-25",
1379
1439
  "OpenSource": false,
@@ -1387,7 +1447,7 @@
1387
1447
  "mediaType": "image",
1388
1448
  "mediaSubtype": "webp",
1389
1449
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
1390
- "tags": ["影像編輯", "影像"],
1450
+ "tags": ["圖像編輯", "圖像"],
1391
1451
  "models": ["Flux", "BFL"],
1392
1452
  "date": "2025-05-01",
1393
1453
  "OpenSource": false,
@@ -1401,7 +1461,7 @@
1401
1461
  "mediaType": "image",
1402
1462
  "mediaSubtype": "webp",
1403
1463
  "thumbnailVariant": "compareSlider",
1404
- "tags": ["影像到影像", "影像", "API"],
1464
+ "tags": ["圖生圖", "圖像", "API"],
1405
1465
  "models": ["Luma"],
1406
1466
  "date": "2025-03-01",
1407
1467
  "OpenSource": false,
@@ -1415,7 +1475,7 @@
1415
1475
  "mediaType": "image",
1416
1476
  "mediaSubtype": "webp",
1417
1477
  "thumbnailVariant": "compareSlider",
1418
- "tags": ["文字到影像", "影像", "API", "風格轉移"],
1478
+ "tags": ["文生圖", "圖像", "API"],
1419
1479
  "models": ["Luma"],
1420
1480
  "date": "2025-03-01",
1421
1481
  "OpenSource": false,
@@ -1428,7 +1488,7 @@
1428
1488
  "description": "使用 Recraft 生成具有自訂調色盤與品牌特定視覺效果的影像。",
1429
1489
  "mediaType": "image",
1430
1490
  "mediaSubtype": "webp",
1431
- "tags": ["文字到影像", "影像", "API", "色彩控制"],
1491
+ "tags": ["文生圖", "圖像", "API"],
1432
1492
  "models": ["Recraft"],
1433
1493
  "date": "2025-03-01",
1434
1494
  "OpenSource": false,
@@ -1441,7 +1501,7 @@
1441
1501
  "description": "透過視覺範例控制風格、調整定位並微調物件。儲存與分享風格以實現完美的品牌一致性。",
1442
1502
  "mediaType": "image",
1443
1503
  "mediaSubtype": "webp",
1444
- "tags": ["文字到影像", "影像", "API", "風格控制"],
1504
+ "tags": ["文生圖", "圖像", "API"],
1445
1505
  "models": ["Recraft"],
1446
1506
  "date": "2025-03-01",
1447
1507
  "OpenSource": false,
@@ -1454,7 +1514,7 @@
1454
1514
  "description": "使用 Recraft 的 AI 向量產生器從文字提示生成高品質向量影像。",
1455
1515
  "mediaType": "image",
1456
1516
  "mediaSubtype": "webp",
1457
- "tags": ["文字到影像", "影像", "API", "向量"],
1517
+ "tags": ["文生圖", "圖像", "API", "矢量"],
1458
1518
  "models": ["Recraft"],
1459
1519
  "date": "2025-03-01",
1460
1520
  "OpenSource": false,
@@ -1467,7 +1527,7 @@
1467
1527
  "description": "使用 Runway 的 AI 模型從文字提示生成高品質影像。",
1468
1528
  "mediaType": "image",
1469
1529
  "mediaSubtype": "webp",
1470
- "tags": ["文字到影像", "影像", "API"],
1530
+ "tags": ["文生圖", "圖像", "API"],
1471
1531
  "models": ["Runway"],
1472
1532
  "date": "2025-03-01",
1473
1533
  "OpenSource": false,
@@ -1481,7 +1541,7 @@
1481
1541
  "mediaType": "image",
1482
1542
  "thumbnailVariant": "compareSlider",
1483
1543
  "mediaSubtype": "webp",
1484
- "tags": ["影像到影像", "影像", "API", "風格轉移"],
1544
+ "tags": ["圖生圖", "圖像", "API"],
1485
1545
  "models": ["Runway"],
1486
1546
  "date": "2025-03-01",
1487
1547
  "OpenSource": false,
@@ -1494,7 +1554,7 @@
1494
1554
  "description": "生成具有優秀提示遵循性的高品質影像。適用於 100 萬畫素解析度的專業用途。",
1495
1555
  "mediaType": "image",
1496
1556
  "mediaSubtype": "webp",
1497
- "tags": ["文字到影像", "影像", "API"],
1557
+ "tags": ["文生圖", "圖像", "API"],
1498
1558
  "models": ["Stability"],
1499
1559
  "date": "2025-03-01",
1500
1560
  "OpenSource": false,
@@ -1508,7 +1568,7 @@
1508
1568
  "mediaType": "image",
1509
1569
  "thumbnailVariant": "compareSlider",
1510
1570
  "mediaSubtype": "webp",
1511
- "tags": ["影像到影像", "影像", "API"],
1571
+ "tags": ["圖生圖", "圖像", "API"],
1512
1572
  "models": ["Stability"],
1513
1573
  "date": "2025-03-01",
1514
1574
  "OpenSource": false,
@@ -1521,7 +1581,7 @@
1521
1581
  "description": "生成具有優秀提示遵循性的高品質影像。適用於 100 萬畫素解析度的專業用途。",
1522
1582
  "mediaType": "image",
1523
1583
  "mediaSubtype": "webp",
1524
- "tags": ["文字到影像", "影像", "API"],
1584
+ "tags": ["文生圖", "圖像", "API"],
1525
1585
  "models": ["Stability"],
1526
1586
  "date": "2025-03-01",
1527
1587
  "OpenSource": false,
@@ -1535,7 +1595,7 @@
1535
1595
  "mediaType": "image",
1536
1596
  "thumbnailVariant": "compareSlider",
1537
1597
  "mediaSubtype": "webp",
1538
- "tags": ["影像到影像", "影像", "API"],
1598
+ "tags": ["圖生圖", "圖像", "API"],
1539
1599
  "models": ["Stability"],
1540
1600
  "date": "2025-03-01",
1541
1601
  "OpenSource": false,
@@ -1548,7 +1608,7 @@
1548
1608
  "description": "使用 Ideogram V3 生成具有優秀提示對齊、寫實與文字渲染的專業品質影像。",
1549
1609
  "mediaType": "image",
1550
1610
  "mediaSubtype": "webp",
1551
- "tags": ["文字到影像", "影像", "API", "文字渲染"],
1611
+ "tags": ["文生圖", "圖像", "API"],
1552
1612
  "models": ["Ideogram"],
1553
1613
  "date": "2025-03-01",
1554
1614
  "OpenSource": false,
@@ -1561,7 +1621,7 @@
1561
1621
  "description": "使用 OpenAI GPT Image 1 API 從文字提示生成影像。",
1562
1622
  "mediaType": "image",
1563
1623
  "mediaSubtype": "webp",
1564
- "tags": ["文字到影像", "影像", "API"],
1624
+ "tags": ["文生圖", "圖像", "API"],
1565
1625
  "models": ["GPT-Image-1", "OpenAI"],
1566
1626
  "date": "2025-03-01",
1567
1627
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
@@ -1576,7 +1636,7 @@
1576
1636
  "mediaType": "image",
1577
1637
  "mediaSubtype": "webp",
1578
1638
  "thumbnailVariant": "compareSlider",
1579
- "tags": ["影像到影像", "影像", "API"],
1639
+ "tags": ["圖生圖", "圖像", "API"],
1580
1640
  "models": ["GPT-Image-1"],
1581
1641
  "date": "2025-03-01",
1582
1642
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
@@ -1591,7 +1651,7 @@
1591
1651
  "mediaType": "image",
1592
1652
  "mediaSubtype": "webp",
1593
1653
  "thumbnailVariant": "compareSlider",
1594
- "tags": ["修復", "影像", "API"],
1654
+ "tags": ["修復", "圖像", "API"],
1595
1655
  "models": ["GPT-Image-1"],
1596
1656
  "date": "2025-03-01",
1597
1657
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
@@ -1606,7 +1666,7 @@
1606
1666
  "mediaType": "image",
1607
1667
  "mediaSubtype": "webp",
1608
1668
  "thumbnailVariant": "compareSlider",
1609
- "tags": ["文字到影像", "影像", "API", "多輸入"],
1669
+ "tags": ["文生圖", "圖像", "API"],
1610
1670
  "models": ["GPT-Image-1"],
1611
1671
  "date": "2025-03-01",
1612
1672
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
@@ -1620,7 +1680,7 @@
1620
1680
  "description": "使用 OpenAI Dall-E 2 API 從文字提示生成影像。",
1621
1681
  "mediaType": "image",
1622
1682
  "mediaSubtype": "webp",
1623
- "tags": ["文字到影像", "影像", "API"],
1683
+ "tags": ["文生圖", "圖像", "API"],
1624
1684
  "models": ["Dall-E", "OpenAI"],
1625
1685
  "date": "2025-03-01",
1626
1686
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
@@ -1635,7 +1695,7 @@
1635
1695
  "mediaType": "image",
1636
1696
  "mediaSubtype": "webp",
1637
1697
  "thumbnailVariant": "compareSlider",
1638
- "tags": ["修復", "影像", "API"],
1698
+ "tags": ["修復", "圖像", "API"],
1639
1699
  "models": ["Dall-E", "OpenAI"],
1640
1700
  "date": "2025-03-01",
1641
1701
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
@@ -1649,7 +1709,7 @@
1649
1709
  "description": "使用 OpenAI Dall-E 3 API 從文字提示生成影像。",
1650
1710
  "mediaType": "image",
1651
1711
  "mediaSubtype": "webp",
1652
- "tags": ["文字到影像", "影像", "API"],
1712
+ "tags": ["文生圖", "圖像", "API"],
1653
1713
  "models": ["Dall-E", "OpenAI"],
1654
1714
  "date": "2025-03-01",
1655
1715
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3",
@@ -1664,15 +1724,15 @@
1664
1724
  "type": "video",
1665
1725
  "category": "CLOSED SOURCE MODELS",
1666
1726
  "icon": "icon-[lucide--film]",
1667
- "title": "影片 API",
1727
+ "title": "Video API",
1668
1728
  "templates": [
1669
1729
  {
1670
1730
  "name": "api_openai_sora_video",
1671
- "title": "Sora 2: Text & Image to Video",
1672
- "description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
1731
+ "title": "Sora 2: 文本和圖像生成視頻",
1732
+ "description": "OpenAI Sora-2 Sora-2 Pro 視頻生成,帶同步音頻。",
1673
1733
  "mediaType": "image",
1674
1734
  "mediaSubtype": "webp",
1675
- "tags": ["Image to Video", "Text to Video", "API"],
1735
+ "tags": ["圖生視頻", "文生視頻", "API"],
1676
1736
  "models": ["OpenAI"],
1677
1737
  "date": "2025-10-08",
1678
1738
  "OpenSource": false,
@@ -1681,11 +1741,11 @@
1681
1741
  },
1682
1742
  {
1683
1743
  "name": "api_wan_text_to_video",
1684
- "title": "Wan2.5: Text to Video",
1685
- "description": "Generate videos with synchronized audio, enhanced motion, and superior quality.",
1744
+ "title": "Wan2.5: 文生視頻",
1745
+ "description": "生成具有同步音頻、增強動作和卓越質量的視頻。",
1686
1746
  "mediaType": "image",
1687
1747
  "mediaSubtype": "webp",
1688
- "tags": ["Image to Video", "Video", "API"],
1748
+ "tags": ["圖生視頻", "視頻", "API"],
1689
1749
  "models": ["Wan2.5"],
1690
1750
  "date": "2025-09-27",
1691
1751
  "tutorialUrl": "",
@@ -1695,11 +1755,11 @@
1695
1755
  },
1696
1756
  {
1697
1757
  "name": "api_wan_image_to_video",
1698
- "title": "Wan2.5: Image to Video",
1699
- "description": "Transform images into videos with synchronized audio, enhanced motion, and superior quality.",
1758
+ "title": "Wan2.5: 圖生視頻",
1759
+ "description": "將圖像轉換為具有同步音頻、增強動作和卓越質量的視頻。",
1700
1760
  "mediaType": "image",
1701
1761
  "mediaSubtype": "webp",
1702
- "tags": ["Image to Video", "Video", "API"],
1762
+ "tags": ["圖生視頻", "視頻", "API"],
1703
1763
  "models": ["Wan2.5"],
1704
1764
  "date": "2025-09-27",
1705
1765
  "tutorialUrl": "",
@@ -1713,7 +1773,7 @@
1713
1773
  "description": "使用 Kling 生成具有良好提示遵循性的影片,適用於動作、表情與攝影機移動。",
1714
1774
  "mediaType": "image",
1715
1775
  "mediaSubtype": "webp",
1716
- "tags": ["影像到影片", "影片", "API"],
1776
+ "tags": ["圖生視頻", "視頻", "API"],
1717
1777
  "models": ["Kling"],
1718
1778
  "date": "2025-03-01",
1719
1779
  "tutorialUrl": "",
@@ -1727,7 +1787,7 @@
1727
1787
  "description": "使用 Kling 透過對影像套用視覺特效生成動態影片。",
1728
1788
  "mediaType": "image",
1729
1789
  "mediaSubtype": "webp",
1730
- "tags": ["影片特效", "影片", "API"],
1790
+ "tags": ["視頻", "API"],
1731
1791
  "models": ["Kling"],
1732
1792
  "date": "2025-03-01",
1733
1793
  "tutorialUrl": "",
@@ -1741,7 +1801,7 @@
1741
1801
  "description": "透過控制首尾幀生成影片。",
1742
1802
  "mediaType": "image",
1743
1803
  "mediaSubtype": "webp",
1744
- "tags": ["影片生成", "影片", "API", "幀控制"],
1804
+ "tags": ["視頻", "API", "首尾幀視頻"],
1745
1805
  "models": ["Kling"],
1746
1806
  "date": "2025-03-01",
1747
1807
  "tutorialUrl": "",
@@ -1755,7 +1815,7 @@
1755
1815
  "description": "使用 Vidu 的進階 AI 模型,透過可調整的運動幅度與持續時間控制從文字提示生成高品質 1080p 影片。",
1756
1816
  "mediaType": "image",
1757
1817
  "mediaSubtype": "webp",
1758
- "tags": ["文字到影片", "影片", "API"],
1818
+ "tags": ["文生視頻", "視頻", "API"],
1759
1819
  "models": ["Vidu"],
1760
1820
  "date": "2025-08-23",
1761
1821
  "tutorialUrl": "",
@@ -1769,7 +1829,7 @@
1769
1829
  "description": "使用 Vidu 將靜態影像轉換為具有精確運動控制與可自訂運動幅度的動態 1080p 影片。",
1770
1830
  "mediaType": "image",
1771
1831
  "mediaSubtype": "webp",
1772
- "tags": ["影像到影片", "影片", "API"],
1832
+ "tags": ["圖生視頻", "視頻", "API"],
1773
1833
  "models": ["Vidu"],
1774
1834
  "date": "2025-08-23",
1775
1835
  "tutorialUrl": "",
@@ -1783,7 +1843,7 @@
1783
1843
  "description": "使用多個參考影像(最多 7 張)生成具有一致主題的影片,確保角色與風格在整個影片序列中的連續性。",
1784
1844
  "mediaType": "image",
1785
1845
  "mediaSubtype": "webp",
1786
- "tags": ["參考到影片", "影片", "API"],
1846
+ "tags": ["視頻", "圖生視頻", "API"],
1787
1847
  "models": ["Vidu"],
1788
1848
  "date": "2025-08-23",
1789
1849
  "tutorialUrl": "",
@@ -1797,7 +1857,7 @@
1797
1857
  "description": "在定義的起始與結束幀之間建立流暢的影片過渡,具有自然運動插值與一致的視覺品質。",
1798
1858
  "mediaType": "image",
1799
1859
  "mediaSubtype": "webp",
1800
- "tags": ["FLF2V", "影片", "API"],
1860
+ "tags": ["視頻", "API", "首尾幀視頻"],
1801
1861
  "models": ["Vidu"],
1802
1862
  "date": "2025-08-23",
1803
1863
  "tutorialUrl": "",
@@ -1807,11 +1867,11 @@
1807
1867
  },
1808
1868
  {
1809
1869
  "name": "api_bytedance_text_to_video",
1810
- "title": "ByteDance: Text to Video",
1811
- "description": "Generate high-quality videos directly from text prompts using ByteDance's Seedance model. Supports multiple resolutions and aspect ratios with natural motion and cinematic quality.",
1870
+ "title": "ByteDance: 文生視頻",
1871
+ "description": "使用 ByteDance Seedance 模型直接從文本提示生成高質量視頻。支持多種分辨率和寬高比,具有自然運動和電影質量。",
1812
1872
  "mediaType": "image",
1813
1873
  "mediaSubtype": "webp",
1814
- "tags": ["Video", "API", "Text to Video"],
1874
+ "tags": ["視頻", "API", "文生視頻"],
1815
1875
  "models": ["ByteDance"],
1816
1876
  "date": "2025-10-6",
1817
1877
  "tutorialUrl": "",
@@ -1821,11 +1881,11 @@
1821
1881
  },
1822
1882
  {
1823
1883
  "name": "api_bytedance_image_to_video",
1824
- "title": "ByteDance: Image to Video",
1825
- "description": "Transform static images into dynamic videos using ByteDance's Seedance model. Analyzes image structure and generates natural motion with consistent visual style and coherent video sequences.",
1884
+ "title": "ByteDance: 圖生視頻",
1885
+ "description": "使用 ByteDance Seedance 模型將靜態圖像轉換為動態視頻。分析圖像結構並生成具有一致視覺風格和連貫視頻序列的自然運動。",
1826
1886
  "mediaType": "image",
1827
1887
  "mediaSubtype": "webp",
1828
- "tags": ["Video", "API", "Image to Video"],
1888
+ "tags": ["視頻", "API", "圖生視頻"],
1829
1889
  "models": ["ByteDance"],
1830
1890
  "date": "2025-10-6",
1831
1891
  "tutorialUrl": "",
@@ -1835,11 +1895,11 @@
1835
1895
  },
1836
1896
  {
1837
1897
  "name": "api_bytedance_flf2v",
1838
- "title": "ByteDance: Start End to Video",
1839
- "description": "Generate cinematic video transitions between start and end frames with fluid motion, scene consistency, and professional polish using ByteDance's Seedance model.",
1898
+ "title": "ByteDance: 起始結束幀視頻",
1899
+ "description": "使用 ByteDance Seedance 模型生成起始幀和結束幀之間的電影級視頻過渡,具有流暢運動、場景一致性和專業品質。",
1840
1900
  "mediaType": "image",
1841
1901
  "mediaSubtype": "webp",
1842
- "tags": ["Video", "API", "FLF2V"],
1902
+ "tags": ["視頻", "API", "首尾幀視頻"],
1843
1903
  "models": ["ByteDance"],
1844
1904
  "date": "2025-10-6",
1845
1905
  "tutorialUrl": "",
@@ -1853,7 +1913,7 @@
1853
1913
  "description": "取得靜態影像並立即建立神奇的高品質動畫。",
1854
1914
  "mediaType": "image",
1855
1915
  "mediaSubtype": "webp",
1856
- "tags": ["影像到影片", "影片", "API"],
1916
+ "tags": ["圖生視頻", "視頻", "API"],
1857
1917
  "models": ["Luma"],
1858
1918
  "date": "2025-03-01",
1859
1919
  "tutorialUrl": "",
@@ -1867,7 +1927,7 @@
1867
1927
  "description": "可以使用簡單的提示生成高品質影片。",
1868
1928
  "mediaType": "image",
1869
1929
  "mediaSubtype": "webp",
1870
- "tags": ["文字到影片", "影片", "API"],
1930
+ "tags": ["文生視頻", "視頻", "API"],
1871
1931
  "models": ["Luma"],
1872
1932
  "date": "2025-03-01",
1873
1933
  "tutorialUrl": "",
@@ -1881,7 +1941,7 @@
1881
1941
  "description": "透過專為授權資料訓練的模型,從文字提示生成電影級 1080p 影片。",
1882
1942
  "mediaType": "image",
1883
1943
  "mediaSubtype": "webp",
1884
- "tags": ["文字到影片", "影片", "API"],
1944
+ "tags": ["文生視頻", "視頻", "API"],
1885
1945
  "models": ["Moonvalley"],
1886
1946
  "date": "2025-03-01",
1887
1947
  "tutorialUrl": "",
@@ -1895,7 +1955,7 @@
1895
1955
  "description": "透過專為授權資料訓練的模型,使用影像生成電影級 1080p 影片。",
1896
1956
  "mediaType": "image",
1897
1957
  "mediaSubtype": "webp",
1898
- "tags": ["影像到影片", "影片", "API"],
1958
+ "tags": ["圖生視頻", "視頻", "API"],
1899
1959
  "models": ["Moonvalley"],
1900
1960
  "date": "2025-03-01",
1901
1961
  "tutorialUrl": "",
@@ -1910,7 +1970,7 @@
1910
1970
  "mediaType": "image",
1911
1971
  "thumbnailVariant": "hoverDissolve",
1912
1972
  "mediaSubtype": "webp",
1913
- "tags": ["影片到影片", "影片", "API", "運動轉移"],
1973
+ "tags": ["視頻轉視頻", "視頻", "API"],
1914
1974
  "models": ["Moonvalley"],
1915
1975
  "date": "2025-03-01",
1916
1976
  "tutorialUrl": "",
@@ -1925,7 +1985,7 @@
1925
1985
  "mediaType": "image",
1926
1986
  "thumbnailVariant": "hoverDissolve",
1927
1987
  "mediaSubtype": "webp",
1928
- "tags": ["影片到影片", "影片", "API", "姿勢控制"],
1988
+ "tags": ["視頻轉視頻", "視頻", "API"],
1929
1989
  "models": ["Moonvalley"],
1930
1990
  "date": "2025-03-01",
1931
1991
  "tutorialUrl": "",
@@ -1939,7 +1999,7 @@
1939
1999
  "description": "使用 MiniMax Hailuo-02 模型,透過可選的首幀控制從文字提示生成高品質影片。支援多種解析度(768P/1080P)與時長(6/10 秒),具有智慧提示最佳化功能。",
1940
2000
  "mediaType": "image",
1941
2001
  "mediaSubtype": "webp",
1942
- "tags": ["文字到影片", "影片", "API"],
2002
+ "tags": ["文生視頻", "視頻", "API"],
1943
2003
  "models": ["MiniMax"],
1944
2004
  "date": "2025-03-01",
1945
2005
  "tutorialUrl": "",
@@ -1953,7 +2013,7 @@
1953
2013
  "description": "直接從文字提示生成高品質影片。探索 MiniMax 的進階 AI 功能,利用專業 CGI 效果與風格元素建立多樣化的視覺敘事,讓您的描述生動起來。",
1954
2014
  "mediaType": "image",
1955
2015
  "mediaSubtype": "webp",
1956
- "tags": ["文字到影片", "影片", "API"],
2016
+ "tags": ["文生視頻", "視頻", "API"],
1957
2017
  "models": ["MiniMax"],
1958
2018
  "date": "2025-03-01",
1959
2019
  "tutorialUrl": "",
@@ -1967,7 +2027,7 @@
1967
2027
  "description": "使用 MiniMax 透過 CGI 整合從影像與文字生成精緻影片。",
1968
2028
  "mediaType": "image",
1969
2029
  "mediaSubtype": "webp",
1970
- "tags": ["影像到影片", "影片", "API"],
2030
+ "tags": ["圖生視頻", "視頻", "API"],
1971
2031
  "models": ["MiniMax"],
1972
2032
  "date": "2025-03-01",
1973
2033
  "tutorialUrl": "",
@@ -1981,7 +2041,7 @@
1981
2041
  "description": "使用 PixVerse 從靜態影像生成具有運動與特效的動態影片。",
1982
2042
  "mediaType": "image",
1983
2043
  "mediaSubtype": "webp",
1984
- "tags": ["影像到影片", "影片", "API"],
2044
+ "tags": ["圖生視頻", "視頻", "API"],
1985
2045
  "models": ["PixVerse"],
1986
2046
  "date": "2025-03-01",
1987
2047
  "tutorialUrl": "",
@@ -1995,7 +2055,7 @@
1995
2055
  "description": "使用 PixVerse 從靜態影像生成具有運動與特效的動態影片。",
1996
2056
  "mediaType": "image",
1997
2057
  "mediaSubtype": "webp",
1998
- "tags": ["影像到影片", "影片", "API", "範本"],
2058
+ "tags": ["圖生視頻", "視頻", "API"],
1999
2059
  "models": ["PixVerse"],
2000
2060
  "date": "2025-03-01",
2001
2061
  "tutorialUrl": "",
@@ -2009,7 +2069,7 @@
2009
2069
  "description": "生成具有準確提示解釋與驚豔影片動態的影片。",
2010
2070
  "mediaType": "image",
2011
2071
  "mediaSubtype": "webp",
2012
- "tags": ["文字到影片", "影片", "API"],
2072
+ "tags": ["文生視頻", "視頻", "API"],
2013
2073
  "models": ["PixVerse"],
2014
2074
  "date": "2025-03-01",
2015
2075
  "tutorialUrl": "",
@@ -2023,7 +2083,7 @@
2023
2083
  "description": "使用 Runway Gen3a Turbo 從靜態影像生成電影級影片。",
2024
2084
  "mediaType": "image",
2025
2085
  "mediaSubtype": "webp",
2026
- "tags": ["影像到影片", "影片", "API"],
2086
+ "tags": ["圖生視頻", "視頻", "API"],
2027
2087
  "models": ["Runway"],
2028
2088
  "date": "2025-03-01",
2029
2089
  "tutorialUrl": "",
@@ -2037,7 +2097,7 @@
2037
2097
  "description": "使用 Runway Gen4 Turbo 從影像生成動態影片。",
2038
2098
  "mediaType": "image",
2039
2099
  "mediaSubtype": "webp",
2040
- "tags": ["影像到影片", "影片", "API"],
2100
+ "tags": ["圖生視頻", "視頻", "API"],
2041
2101
  "models": ["Runway"],
2042
2102
  "date": "2025-03-01",
2043
2103
  "tutorialUrl": "",
@@ -2051,7 +2111,7 @@
2051
2111
  "description": "使用 Runway 的精確度在兩個關鍵幀之間生成流暢的影片過渡。",
2052
2112
  "mediaType": "image",
2053
2113
  "mediaSubtype": "webp",
2054
- "tags": ["影片生成", "影片", "API", "幀控制"],
2114
+ "tags": ["視頻", "API", "首尾幀視頻"],
2055
2115
  "models": ["Runway"],
2056
2116
  "date": "2025-03-01",
2057
2117
  "tutorialUrl": "",
@@ -2065,7 +2125,7 @@
2065
2125
  "description": "使用 Pika AI 從單一靜態影像生成流暢的動畫影片。",
2066
2126
  "mediaType": "image",
2067
2127
  "mediaSubtype": "webp",
2068
- "tags": ["影像到影片", "影片", "API"],
2128
+ "tags": ["圖生視頻", "視頻", "API"],
2069
2129
  "models": ["Pika"],
2070
2130
  "date": "2025-03-01",
2071
2131
  "tutorialUrl": "",
@@ -2079,7 +2139,7 @@
2079
2139
  "description": "使用 Pika 場景生成包含多個輸入影像的影片。",
2080
2140
  "mediaType": "image",
2081
2141
  "mediaSubtype": "webp",
2082
- "tags": ["影像到影片", "影片", "API", "多影像"],
2142
+ "tags": ["圖生視頻", "視頻", "API"],
2083
2143
  "models": ["Pika"],
2084
2144
  "date": "2025-03-01",
2085
2145
  "tutorialUrl": "",
@@ -2093,7 +2153,7 @@
2093
2153
  "description": "使用 Google Veo2 API 從影像生成影片。",
2094
2154
  "mediaType": "image",
2095
2155
  "mediaSubtype": "webp",
2096
- "tags": ["影像到影片", "影片", "API"],
2156
+ "tags": ["圖生視頻", "視頻", "API"],
2097
2157
  "models": ["Veo", "Google"],
2098
2158
  "date": "2025-03-01",
2099
2159
  "tutorialUrl": "",
@@ -2107,7 +2167,7 @@
2107
2167
  "description": "使用 Google 的進階 Veo 3 API 從文字提示或影像生成高品質 8 秒影片。具備音訊生成、提示增強與速度或品質的雙模型選項。",
2108
2168
  "mediaType": "image",
2109
2169
  "mediaSubtype": "webp",
2110
- "tags": ["影像到影片", "文字到影片", "API"],
2170
+ "tags": ["圖生視頻", "文生視頻", "API"],
2111
2171
  "models": ["Veo", "Google"],
2112
2172
  "date": "2025-03-01",
2113
2173
  "tutorialUrl": "",
@@ -2126,11 +2186,11 @@
2126
2186
  "templates": [
2127
2187
  {
2128
2188
  "name": "api_rodin_gen2",
2129
- "title": "Rodin: Gen-2 Image to Model",
2130
- "description": "Generate detailed 4X mesh quality 3D models from photos using Rodin Gen2",
2189
+ "title": "Rodin: Gen-2 圖像轉模型",
2190
+ "description": "使用 Rodin Gen2 從照片生成具有 4 倍網格質量的詳細 3D 模型",
2131
2191
  "mediaType": "image",
2132
2192
  "mediaSubtype": "webp",
2133
- "tags": ["Image to Model", "3D", "API"],
2193
+ "tags": ["圖像轉3D", "3D", "API"],
2134
2194
  "models": ["Rodin"],
2135
2195
  "date": "2025-09-27",
2136
2196
  "tutorialUrl": "",
@@ -2145,7 +2205,7 @@
2145
2205
  "mediaType": "image",
2146
2206
  "thumbnailVariant": "compareSlider",
2147
2207
  "mediaSubtype": "webp",
2148
- "tags": ["影像到模型", "3D", "API"],
2208
+ "tags": ["圖像轉3D", "3D", "API"],
2149
2209
  "models": ["Rodin"],
2150
2210
  "date": "2025-03-01",
2151
2211
  "tutorialUrl": "",
@@ -2160,7 +2220,7 @@
2160
2220
  "mediaType": "image",
2161
2221
  "thumbnailVariant": "compareSlider",
2162
2222
  "mediaSubtype": "webp",
2163
- "tags": ["多視角到模型", "3D", "API"],
2223
+ "tags": ["圖像轉3D", "3D", "API"],
2164
2224
  "models": ["Rodin"],
2165
2225
  "date": "2025-03-01",
2166
2226
  "tutorialUrl": "",
@@ -2174,7 +2234,7 @@
2174
2234
  "description": "使用 Tripo 的文字驅動建模從描述中製作 3D 物件。",
2175
2235
  "mediaType": "image",
2176
2236
  "mediaSubtype": "webp",
2177
- "tags": ["文字到模型", "3D", "API"],
2237
+ "tags": ["文本轉模型", "3D", "API"],
2178
2238
  "models": ["Tripo"],
2179
2239
  "date": "2025-03-01",
2180
2240
  "tutorialUrl": "",
@@ -2189,7 +2249,7 @@
2189
2249
  "mediaType": "image",
2190
2250
  "thumbnailVariant": "compareSlider",
2191
2251
  "mediaSubtype": "webp",
2192
- "tags": ["影像到模型", "3D", "API"],
2252
+ "tags": ["圖像轉3D", "3D", "API"],
2193
2253
  "models": ["Tripo"],
2194
2254
  "date": "2025-03-01",
2195
2255
  "tutorialUrl": "",
@@ -2204,7 +2264,7 @@
2204
2264
  "mediaType": "image",
2205
2265
  "thumbnailVariant": "compareSlider",
2206
2266
  "mediaSubtype": "webp",
2207
- "tags": ["多視角到模型", "3D", "API"],
2267
+ "tags": ["圖像轉3D", "3D", "API"],
2208
2268
  "models": ["Tripo"],
2209
2269
  "date": "2025-03-01",
2210
2270
  "tutorialUrl": "",
@@ -2227,7 +2287,7 @@
2227
2287
  "description": "使用 Stable Audio 2.5 从文本描述生成音乐,几秒钟内创作数分钟的音轨。",
2228
2288
  "mediaType": "audio",
2229
2289
  "mediaSubtype": "mp3",
2230
- "tags": ["文字转音频", "音频", "API"],
2290
+ "tags": ["文本轉音頻", "音頻", "API"],
2231
2291
  "date": "2025-09-09",
2232
2292
  "models": ["Stability", "Stable Audio"],
2233
2293
  "OpenSource": false,
@@ -2240,7 +2300,7 @@
2240
2300
  "description": "使用 Stable Audio 2.5 将现有音频转化为全新作品,上传音频,AI 自动生成完整音轨。",
2241
2301
  "mediaType": "audio",
2242
2302
  "mediaSubtype": "mp3",
2243
- "tags": ["音频转音频", "音频", "API"],
2303
+ "tags": ["音頻轉音頻", "音頻", "API"],
2244
2304
  "date": "2025-09-09",
2245
2305
  "models": ["Stability", "Stable Audio"],
2246
2306
  "OpenSource": false,
@@ -2253,7 +2313,7 @@
2253
2313
  "description": "使用 Stable Audio 2.5 扩展音频,上传音频,AI 自动补充扩展剩余部分。",
2254
2314
  "mediaType": "audio",
2255
2315
  "mediaSubtype": "mp3",
2256
- "tags": ["音频转音频", "音频", "API"],
2316
+ "tags": ["音頻轉音頻", "音頻", "API"],
2257
2317
  "date": "2025-09-09",
2258
2318
  "models": ["Stability", "Stable Audio"],
2259
2319
  "OpenSource": false,
@@ -2275,7 +2335,7 @@
2275
2335
  "description": "與 OpenAI 的進階語言模型互動進行智慧對話。",
2276
2336
  "mediaType": "image",
2277
2337
  "mediaSubtype": "webp",
2278
- "tags": ["聊天", "LLM", "API"],
2338
+ "tags": ["LLM", "API"],
2279
2339
  "models": ["OpenAI"],
2280
2340
  "date": "2025-03-01",
2281
2341
  "tutorialUrl": "",
@@ -2289,7 +2349,7 @@
2289
2349
  "description": "體驗 Google 的多模態 AI 與 Gemini 的推理能力。",
2290
2350
  "mediaType": "image",
2291
2351
  "mediaSubtype": "webp",
2292
- "tags": ["聊天", "LLM", "API"],
2352
+ "tags": ["LLM", "API"],
2293
2353
  "models": ["Google Gemini", "Google"],
2294
2354
  "date": "2025-03-01",
2295
2355
  "tutorialUrl": "",