comfyui-workflow-templates 0.1.97__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of comfyui-workflow-templates might be problematic. Click here for more details.

@@ -7,10 +7,10 @@
7
7
  "templates": [
8
8
  {
9
9
  "name": "01_qwen_t2i_subgraphed",
10
- "title": "Qwen-Image Text to Image",
10
+ "title": "文字轉圖片[新]",
11
11
  "mediaType": "image",
12
12
  "mediaSubtype": "webp",
13
- "description": "Generate images with exceptional multilingual text rendering and editing capabilities using Qwen-Image's 20B MMDiT model..",
13
+ "description": "使用 Qwen-Image 模型根據文本提示生成圖像",
14
14
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
15
15
  "tags": ["文生圖", "圖像"],
16
16
  "models": ["Qwen-Image"],
@@ -19,10 +19,10 @@
19
19
  },
20
20
  {
21
21
  "name": "02_qwen_Image_edit_subgraphed",
22
- "title": "Qwen Image Edit 2509",
22
+ "title": "影像編輯[新]",
23
23
  "mediaType": "image",
24
24
  "mediaSubtype": "webp",
25
- "description": "Advanced image editing with multi-image support, improved consistency, and ControlNet integration.",
25
+ "description": " Qwen-Image-Edit 編輯你的圖片",
26
26
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
27
27
  "tags": ["圖生圖", "圖像編輯", "ControlNet"],
28
28
  "models": ["Qwen-Image"],
@@ -31,8 +31,8 @@
31
31
  },
32
32
  {
33
33
  "name": "03_video_wan2_2_14B_i2v_subgraphed",
34
- "title": "Wan 2.2 14B Image to Video",
35
- "description": "Transform static images into dynamic videos with precise motion control and style preservation using Wan 2.2.",
34
+ "title": "圖片轉影片[新]",
35
+ "description": "使用 Wan2.2 14B 從輸入圖片生成影片",
36
36
  "mediaType": "image",
37
37
  "mediaSubtype": "webp",
38
38
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
@@ -43,10 +43,10 @@
43
43
  },
44
44
  {
45
45
  "name": "04_hunyuan_3d_2.1_subgraphed",
46
- "title": "Hunyuan3D 2.1: image to 3D",
46
+ "title": "圖片轉3D【新】",
47
47
  "mediaType": "image",
48
48
  "mediaSubtype": "webp",
49
- "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
49
+ "description": "使用 Hunyuan3D 2.0 從單張圖片生成 3D 模型。",
50
50
  "tags": ["圖像轉3D", "3D"],
51
51
  "models": ["Hunyuan3D"],
52
52
  "date": "2025-10-17",
@@ -55,10 +55,10 @@
55
55
  },
56
56
  {
57
57
  "name": "05_audio_ace_step_1_t2a_song_subgraphed",
58
- "title": "ACE Step v1 Text to Song",
58
+ "title": "文字轉音訊【新】",
59
59
  "mediaType": "image",
60
60
  "mediaSubtype": "webp",
61
- "description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
61
+ "description": "使用ACE-Step v1根據文字提示生成音訊",
62
62
  "tags": ["文本轉音頻", "音頻"],
63
63
  "models": ["ACE-Step"],
64
64
  "date": "2025-10-17",
@@ -430,7 +430,7 @@
430
430
  "description": "使用 Flux Kontext 全節點可見性編輯影像,適合學習工作流程。",
431
431
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
432
432
  "tags": ["圖像編輯", "圖生圖"],
433
- "models": ["Flux"],
433
+ "models": ["Flux", "BFL"],
434
434
  "date": "2025-06-26",
435
435
  "size": 16.43,
436
436
  "vram": 18.0
@@ -454,7 +454,7 @@
454
454
  "mediaSubtype": "webp",
455
455
  "description": "高質量動漫風格圖像生成,具有增強的角色理解和細節紋理。基於 Danbooru 數據集從 Neta Lumina 微調。",
456
456
  "tags": ["文生圖", "圖像", "動漫"],
457
- "models": ["NetaYume Lumina"],
457
+ "models": ["OmniGen"],
458
458
  "date": "2025-10-10",
459
459
  "size": 9.89
460
460
  },
@@ -478,7 +478,7 @@
478
478
  "thumbnailVariant": "compareSlider",
479
479
  "description": "Supports various tasks such as image inpainting, outpainting, and object removal",
480
480
  "tags": ["修復", "外繪"],
481
- "models": ["Flux"],
481
+ "models": ["Flux", "BFL"],
482
482
  "date": "2025-09-21",
483
483
  "size": 27.01,
484
484
  "vram": 20.0
@@ -491,7 +491,7 @@
491
491
  "description": "使用 Flux Dev fp8 量化版本生成影像。適合顯存有限的裝置,僅需一個模型檔案,但畫質略低於完整版。",
492
492
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
493
493
  "tags": ["文生圖", "圖像"],
494
- "models": ["Flux"],
494
+ "models": ["Flux", "BFL"],
495
495
  "date": "2025-03-01",
496
496
  "size": 16.06,
497
497
  "vram": 17.0
@@ -504,7 +504,7 @@
504
504
  "mediaType": "image",
505
505
  "mediaSubtype": "webp",
506
506
  "tags": ["圖生圖", "圖像"],
507
- "models": ["Flux"],
507
+ "models": ["Flux", "BFL"],
508
508
  "date": "2025-09-02",
509
509
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
510
510
  "size": 17.32,
@@ -518,7 +518,7 @@
518
518
  "description": "使用 Flux Schnell fp8 量化版本快速生成影像。適合入門級硬體,僅需 4 個步驟即可生成影像。",
519
519
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
520
520
  "tags": ["文生圖", "圖像"],
521
- "models": ["Flux"],
521
+ "models": ["Flux", "BFL"],
522
522
  "date": "2025-03-01",
523
523
  "size": 16.05,
524
524
  "vram": 17.0
@@ -531,7 +531,7 @@
531
531
  "description": "微調的 FLUX 模型,將寫實程度推向極限",
532
532
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
533
533
  "tags": ["文生圖", "圖像"],
534
- "models": ["Flux"],
534
+ "models": ["Flux", "BFL"],
535
535
  "date": "2025-07-31",
536
536
  "size": 20.74,
537
537
  "vram": 21.5
@@ -544,7 +544,7 @@
544
544
  "description": "使用 Flux Dev 完整版生成高品質影像。需要較大顯存與多個模型檔案,但提供最佳提示遵循能力與畫質。",
545
545
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
546
546
  "tags": ["文生圖", "圖像"],
547
- "models": ["Flux"],
547
+ "models": ["Flux", "BFL"],
548
548
  "date": "2025-03-01",
549
549
  "size": 31.83,
550
550
  "vram": 22.0
@@ -557,7 +557,7 @@
557
557
  "description": "使用 Flux Schnell 完整版快速生成影像。採用 Apache2.0 授權,僅需 4 個步驟即可生成影像並維持良好畫質。",
558
558
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
559
559
  "tags": ["文生圖", "圖像"],
560
- "models": ["Flux"],
560
+ "models": ["Flux", "BFL"],
561
561
  "date": "2025-03-01",
562
562
  "size": 31.81
563
563
  },
@@ -570,7 +570,7 @@
570
570
  "thumbnailVariant": "compareSlider",
571
571
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
572
572
  "tags": ["圖生圖", "修復", "圖像"],
573
- "models": ["Flux"],
573
+ "models": ["Flux", "BFL"],
574
574
  "date": "2025-03-01",
575
575
  "size": 9.66
576
576
  },
@@ -583,7 +583,7 @@
583
583
  "thumbnailVariant": "compareSlider",
584
584
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
585
585
  "tags": ["外繪", "圖像", "圖生圖"],
586
- "models": ["Flux"],
586
+ "models": ["Flux", "BFL"],
587
587
  "date": "2025-03-01",
588
588
  "size": 9.66
589
589
  },
@@ -596,7 +596,7 @@
596
596
  "thumbnailVariant": "hoverDissolve",
597
597
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
598
598
  "tags": ["圖生圖", "ControlNet", "圖像"],
599
- "models": ["Flux"],
599
+ "models": ["Flux", "BFL"],
600
600
  "date": "2025-03-01",
601
601
  "size": 31.83
602
602
  },
@@ -609,7 +609,7 @@
609
609
  "thumbnailVariant": "hoverDissolve",
610
610
  "tutorialUrl": "ttps://docs.comfy.org/tutorials/flux/flux-1-controlnet",
611
611
  "tags": ["圖生圖", "ControlNet", "圖像"],
612
- "models": ["Flux"],
612
+ "models": ["Flux", "BFL"],
613
613
  "date": "2025-03-01",
614
614
  "size": 32.98
615
615
  },
@@ -621,7 +621,7 @@
621
621
  "description": "使用 Flux Redux 從參考影像轉移風格生成影像。",
622
622
  "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
623
623
  "tags": ["圖生圖", "ControlNet", "圖像"],
624
- "models": ["Flux"],
624
+ "models": ["Flux", "BFL"],
625
625
  "date": "2025-03-01",
626
626
  "size": 32.74
627
627
  },
@@ -720,7 +720,7 @@
720
720
  "description": "使用 SD 3.5 生成影像。",
721
721
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
722
722
  "tags": ["文生圖", "圖像"],
723
- "models": ["SD3.5"],
723
+ "models": ["SD3.5", "Stability"],
724
724
  "date": "2025-03-01",
725
725
  "size": 13.91
726
726
  },
@@ -733,7 +733,7 @@
733
733
  "thumbnailVariant": "hoverDissolve",
734
734
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
735
735
  "tags": ["圖生圖", "圖像", "ControlNet"],
736
- "models": ["SD3.5"],
736
+ "models": ["SD3.5", "Stability"],
737
737
  "date": "2025-03-01",
738
738
  "size": 21.97
739
739
  },
@@ -746,7 +746,7 @@
746
746
  "thumbnailVariant": "hoverDissolve",
747
747
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
748
748
  "tags": ["圖生圖", "圖像", "ControlNet"],
749
- "models": ["SD3.5"],
749
+ "models": ["SD3.5", "Stability"],
750
750
  "date": "2025-03-01",
751
751
  "size": 21.97
752
752
  },
@@ -759,7 +759,7 @@
759
759
  "thumbnailVariant": "hoverDissolve",
760
760
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
761
761
  "tags": ["圖生圖", "圖像"],
762
- "models": ["SD3.5"],
762
+ "models": ["SD3.5", "Stability"],
763
763
  "date": "2025-03-01",
764
764
  "size": 21.97
765
765
  },
@@ -819,7 +819,7 @@
819
819
  "thumbnailVariant": "compareSlider",
820
820
  "description": "在 ComfyUI 中執行 Lotus 深度,實現零樣本、高效的單目深度估計並保持高細節保留。",
821
821
  "tags": ["圖像", "文生圖"],
822
- "models": ["SD1.5"],
822
+ "models": ["SD1.5", "Stability"],
823
823
  "date": "2025-05-21",
824
824
  "size": 1.93
825
825
  }
@@ -907,7 +907,7 @@
907
907
  },
908
908
  {
909
909
  "name": "video_wan2_2_14B_fun_inpaint",
910
- "title": "Wan 2.2 14B 趣味修復",
910
+ "title": "Wan 2.2 14B Fun Inpaint",
911
911
  "description": "使用 Wan 2.2 Fun Inp 從起始幀和結束幀生成視頻。",
912
912
  "mediaType": "image",
913
913
  "mediaSubtype": "webp",
@@ -1203,7 +1203,7 @@
1203
1203
  "description": "使用 Hunyuan 模型從文字提示生成影片。",
1204
1204
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
1205
1205
  "tags": ["文生視頻", "視頻"],
1206
- "models": ["Hunyuan Video"],
1206
+ "models": ["Hunyuan Video", "Tencent"],
1207
1207
  "date": "2025-03-01",
1208
1208
  "size": 33.04
1209
1209
  },
@@ -1215,7 +1215,7 @@
1215
1215
  "description": "從靜態影像生成影片。",
1216
1216
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
1217
1217
  "tags": ["圖生視頻", "視頻"],
1218
- "models": ["SVD"],
1218
+ "models": ["SVD", "Stability"],
1219
1219
  "date": "2025-03-01",
1220
1220
  "size": 8.9
1221
1221
  },
@@ -1227,7 +1227,7 @@
1227
1227
  "description": "先從文字提示建立影像再生成影片。",
1228
1228
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
1229
1229
  "tags": ["文生視頻", "視頻"],
1230
- "models": ["SVD"],
1230
+ "models": ["SVD", "Stability"],
1231
1231
  "date": "2025-03-01",
1232
1232
  "size": 15.36
1233
1233
  }
@@ -1242,12 +1242,12 @@
1242
1242
  "templates": [
1243
1243
  {
1244
1244
  "name": "audio_stable_audio_example",
1245
- "title": "Stable Audio",
1245
+ "title": "穩定音頻",
1246
1246
  "mediaType": "audio",
1247
1247
  "mediaSubtype": "mp3",
1248
1248
  "description": "使用 Stable Audio 從文字提示生成音訊。",
1249
1249
  "tags": ["文本轉音頻", "音頻"],
1250
- "models": ["Stable Audio"],
1250
+ "models": ["Stable Audio", "Stability"],
1251
1251
  "date": "2025-03-01",
1252
1252
  "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/",
1253
1253
  "size": 5.35
@@ -1304,7 +1304,7 @@
1304
1304
  "mediaSubtype": "webp",
1305
1305
  "description": "使用 Hunyuan3D 2.0 從單張圖像生成 3D 模型。",
1306
1306
  "tags": ["圖像轉3D", "3D"],
1307
- "models": ["Hunyuan3D"],
1307
+ "models": ["Hunyuan3D", "Tencent"],
1308
1308
  "date": "2025-03-01",
1309
1309
  "tutorialUrl": "",
1310
1310
  "size": 4.59
@@ -1316,7 +1316,7 @@
1316
1316
  "mediaSubtype": "webp",
1317
1317
  "description": "使用 Hunyuan3D 2.0 從單張影像生成 3D 模型。",
1318
1318
  "tags": ["圖像轉3D", "3D"],
1319
- "models": ["Hunyuan3D"],
1319
+ "models": ["Hunyuan3D", "Tencent"],
1320
1320
  "date": "2025-03-01",
1321
1321
  "tutorialUrl": "",
1322
1322
  "size": 4.59
@@ -1328,7 +1328,7 @@
1328
1328
  "mediaSubtype": "webp",
1329
1329
  "description": "使用 Hunyuan3D 2.0 MV 從多個視角生成 3D 模型。",
1330
1330
  "tags": ["3D", "圖像轉3D"],
1331
- "models": ["Hunyuan3D"],
1331
+ "models": ["Hunyuan3D", "Tencent"],
1332
1332
  "date": "2025-03-01",
1333
1333
  "tutorialUrl": "",
1334
1334
  "thumbnailVariant": "hoverDissolve",
@@ -1341,7 +1341,7 @@
1341
1341
  "mediaSubtype": "webp",
1342
1342
  "description": "使用 Hunyuan3D 2.0 MV Turbo 從多個視角生成 3D 模型。",
1343
1343
  "tags": ["圖像轉3D", "3D"],
1344
- "models": ["Hunyuan3D"],
1344
+ "models": ["Hunyuan3D", "Tencent"],
1345
1345
  "date": "2025-03-01",
1346
1346
  "tutorialUrl": "",
1347
1347
  "thumbnailVariant": "hoverDissolve",
@@ -1391,7 +1391,7 @@
1391
1391
  "thumbnailVariant": "compareSlider",
1392
1392
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
1393
1393
  "tags": ["圖像編輯", "圖像"],
1394
- "models": ["Flux", "Kontext"],
1394
+ "models": ["Flux", "Kontext", "BFL"],
1395
1395
  "date": "2025-05-29",
1396
1396
  "OpenSource": false,
1397
1397
  "size": 0,
@@ -1434,7 +1434,7 @@
1434
1434
  "mediaType": "image",
1435
1435
  "mediaSubtype": "webp",
1436
1436
  "tags": ["文生圖", "圖像", "API"],
1437
- "models": ["Wan2.5"],
1437
+ "models": ["Wan2.5", "Wan"],
1438
1438
  "date": "2025-09-25",
1439
1439
  "OpenSource": false,
1440
1440
  "size": 0,
@@ -1637,7 +1637,7 @@
1637
1637
  "mediaSubtype": "webp",
1638
1638
  "thumbnailVariant": "compareSlider",
1639
1639
  "tags": ["圖生圖", "圖像", "API"],
1640
- "models": ["GPT-Image-1"],
1640
+ "models": ["GPT-Image-1", "OpenAI"],
1641
1641
  "date": "2025-03-01",
1642
1642
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
1643
1643
  "OpenSource": false,
@@ -1652,7 +1652,7 @@
1652
1652
  "mediaSubtype": "webp",
1653
1653
  "thumbnailVariant": "compareSlider",
1654
1654
  "tags": ["修復", "圖像", "API"],
1655
- "models": ["GPT-Image-1"],
1655
+ "models": ["GPT-Image-1", "OpenAI"],
1656
1656
  "date": "2025-03-01",
1657
1657
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
1658
1658
  "OpenSource": false,
@@ -1667,7 +1667,7 @@
1667
1667
  "mediaSubtype": "webp",
1668
1668
  "thumbnailVariant": "compareSlider",
1669
1669
  "tags": ["文生圖", "圖像", "API"],
1670
- "models": ["GPT-Image-1"],
1670
+ "models": ["GPT-Image-1", "OpenAI"],
1671
1671
  "date": "2025-03-01",
1672
1672
  "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
1673
1673
  "OpenSource": false,
@@ -1746,7 +1746,7 @@
1746
1746
  "mediaType": "image",
1747
1747
  "mediaSubtype": "webp",
1748
1748
  "tags": ["圖生視頻", "視頻", "API"],
1749
- "models": ["Wan2.5"],
1749
+ "models": ["Wan2.5", "Wan"],
1750
1750
  "date": "2025-09-27",
1751
1751
  "tutorialUrl": "",
1752
1752
  "OpenSource": false,
@@ -1760,7 +1760,7 @@
1760
1760
  "mediaType": "image",
1761
1761
  "mediaSubtype": "webp",
1762
1762
  "tags": ["圖生視頻", "視頻", "API"],
1763
- "models": ["Wan2.5"],
1763
+ "models": ["Wan2.5", "Wan"],
1764
1764
  "date": "2025-09-27",
1765
1765
  "tutorialUrl": "",
1766
1766
  "OpenSource": false,