comfyui-workflow-templates 0.1.83__py3-none-any.whl → 0.1.84__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of comfyui-workflow-templates might be problematic. Click here for more details.

@@ -212,32 +212,6 @@
212
212
  "default"
213
213
  ]
214
214
  },
215
- {
216
- "id": 99,
217
- "type": "MarkdownNote",
218
- "pos": [
219
- -840,
220
- -140
221
- ],
222
- "size": [
223
- 540,
224
- 550
225
- ],
226
- "flags": {},
227
- "order": 2,
228
- "mode": 0,
229
- "inputs": [],
230
- "outputs": [],
231
- "title": "Model links",
232
- "properties": {
233
- "widget_ue_connectable": {}
234
- },
235
- "widgets_values": [
236
- "[Tutorial](https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit) | [教程](https://docs.comfy.org/zh-CN/tutorials/image/qwen/qwen-image-edit)\n\n\n## Model links\n\nYou can find all the models on [Comfy-Org/Qwen-Image_ComfyUI](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/tree/main) and [Comfy-Org/Qwen-Image-Edit_ComfyUI](https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI) \n\n**Diffusion model**\n\n- [qwen_image_edit_2509_fp8_e4m3fn.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI/resolve/main/split_files/diffusion_models/qwen_image_edit_2509_fp8_e4m3fn.safetensors)\n\n**LoRA**\n\n- [Qwen-Image-Lightning-4steps-V1.0.safetensors](https://huggingface.co/lightx2v/Qwen-Image-Lightning/resolve/main/Qwen-Image-Lightning-4steps-V1.0.safetensors)\n\n**Text encoder**\n\n- [qwen_2.5_vl_7b_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/text_encoders/qwen_2.5_vl_7b_fp8_scaled.safetensors)\n\n**VAE**\n\n- [qwen_image_vae.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/vae/qwen_image_vae.safetensors)\n\nModel Storage Location\n\n```\n📂 ComfyUI/\n├── 📂 models/\n│ ├── 📂 diffusion_models/\n│ │ └── qwen_image_edit_2509_fp8_e4m3fn.safetensors\n│ ├── 📂 loras/\n│ │ └── Qwen-Image-Lightning-4steps-V1.0.safetensors\n│ ├── 📂 vae/\n│ │ └── qwen_image_vae.safetensors\n│ └── 📂 text_encoders/\n│ └── qwen_2.5_vl_7b_fp8_scaled.safetensors\n```\n"
237
- ],
238
- "color": "#432",
239
- "bgcolor": "#653"
240
- },
241
215
  {
242
216
  "id": 66,
243
217
  "type": "ModelSamplingAuraFlow",
@@ -426,7 +400,7 @@
426
400
  90
427
401
  ],
428
402
  "flags": {},
429
- "order": 3,
403
+ "order": 2,
430
404
  "mode": 0,
431
405
  "inputs": [],
432
406
  "outputs": [
@@ -464,30 +438,6 @@
464
438
  "default"
465
439
  ]
466
440
  },
467
- {
468
- "id": 97,
469
- "type": "MarkdownNote",
470
- "pos": [
471
- 740,
472
- 610
473
- ],
474
- "size": [
475
- 300,
476
- 160
477
- ],
478
- "flags": {},
479
- "order": 4,
480
- "mode": 0,
481
- "inputs": [],
482
- "outputs": [],
483
- "title": "KSampler settings",
484
- "properties": {},
485
- "widgets_values": [
486
- "You can test and find the best setting by yourself. The following table is for reference.\n\n| Model | Steps | CFG |\n|---------------------|---------------|---------------|\n| Offical | 50 | 4.0 \n| fp8_e4m3fn | 20 | 2.5 |\n| fp8_e4m3fn + 4steps LoRA | 4 | 1.0 |\n"
487
- ],
488
- "color": "#432",
489
- "bgcolor": "#653"
490
- },
491
441
  {
492
442
  "id": 60,
493
443
  "type": "SaveImage",
@@ -539,7 +489,7 @@
539
489
  314.0000305175781
540
490
  ],
541
491
  "flags": {},
542
- "order": 5,
492
+ "order": 3,
543
493
  "mode": 4,
544
494
  "inputs": [],
545
495
  "outputs": [
@@ -592,7 +542,7 @@
592
542
  314.0000305175781
593
543
  ],
594
544
  "flags": {},
595
- "order": 6,
545
+ "order": 4,
596
546
  "mode": 4,
597
547
  "inputs": [],
598
548
  "outputs": [
@@ -645,7 +595,7 @@
645
595
  106
646
596
  ],
647
597
  "flags": {},
648
- "order": 7,
598
+ "order": 5,
649
599
  "mode": 0,
650
600
  "inputs": [],
651
601
  "outputs": [
@@ -860,7 +810,7 @@
860
810
  314.0000305175781
861
811
  ],
862
812
  "flags": {},
863
- "order": 8,
813
+ "order": 6,
864
814
  "mode": 0,
865
815
  "inputs": [],
866
816
  "outputs": [
@@ -956,38 +906,89 @@
956
906
  ]
957
907
  },
958
908
  {
959
- "id": 96,
909
+ "id": 113,
960
910
  "type": "MarkdownNote",
961
911
  "pos": [
962
- -240,
912
+ 730,
963
913
  1030
964
914
  ],
965
915
  "size": [
966
- 290,
967
- 140
916
+ 330,
917
+ 90
968
918
  ],
969
919
  "flags": {},
970
- "order": 9,
920
+ "order": 7,
971
921
  "mode": 0,
972
922
  "inputs": [],
973
923
  "outputs": [],
924
+ "title": "Note: About image size",
974
925
  "properties": {},
975
926
  "widgets_values": [
976
- "This node is to avoid bad output results caused by excessively large input image sizes. Because when we input one image, we use the size of that input image.\n\nThe **TextEncodeQwenImageEditPlus** will scale your input to 1024×104 pixels. We use the size of your first input image. This node is to avoid having an input image size that is too large (such as 3000×3000 pixels), which could bring bad results."
927
+ "You can use the latent from the **EmptySD3LatentImage** to replace **VAE Encode**, so you can customize the image size."
977
928
  ],
978
929
  "color": "#432",
979
930
  "bgcolor": "#653"
980
931
  },
981
932
  {
982
- "id": 113,
933
+ "id": 97,
983
934
  "type": "MarkdownNote",
984
935
  "pos": [
985
- 730,
936
+ 740,
937
+ 610
938
+ ],
939
+ "size": [
940
+ 300,
941
+ 160
942
+ ],
943
+ "flags": {},
944
+ "order": 8,
945
+ "mode": 0,
946
+ "inputs": [],
947
+ "outputs": [],
948
+ "title": "Note: KSampler settings",
949
+ "properties": {},
950
+ "widgets_values": [
951
+ "You can test and find the best setting by yourself. The following table is for reference.\n\n| Model | Steps | CFG |\n|---------------------|---------------|---------------|\n| Offical | 50 | 4.0 \n| fp8_e4m3fn | 20 | 2.5 |\n| fp8_e4m3fn + 4steps LoRA | 4 | 1.0 |\n"
952
+ ],
953
+ "color": "#432",
954
+ "bgcolor": "#653"
955
+ },
956
+ {
957
+ "id": 99,
958
+ "type": "MarkdownNote",
959
+ "pos": [
960
+ -840,
961
+ -140
962
+ ],
963
+ "size": [
964
+ 550,
965
+ 550
966
+ ],
967
+ "flags": {},
968
+ "order": 9,
969
+ "mode": 0,
970
+ "inputs": [],
971
+ "outputs": [],
972
+ "title": "Model links",
973
+ "properties": {
974
+ "widget_ue_connectable": {}
975
+ },
976
+ "widgets_values": [
977
+ "[Tutorial](https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit) | [教程](https://docs.comfy.org/zh-CN/tutorials/image/qwen/qwen-image-edit)\n\n\n## Model links\n\nYou can find all the models on [Comfy-Org/Qwen-Image_ComfyUI](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/tree/main) and [Comfy-Org/Qwen-Image-Edit_ComfyUI](https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI) \n\n**Diffusion model**\n\n- [qwen_image_edit_2509_fp8_e4m3fn.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI/resolve/main/split_files/diffusion_models/qwen_image_edit_2509_fp8_e4m3fn.safetensors)\n\n**LoRA**\n\n- [Qwen-Image-Lightning-4steps-V1.0.safetensors](https://huggingface.co/lightx2v/Qwen-Image-Lightning/resolve/main/Qwen-Image-Lightning-4steps-V1.0.safetensors)\n\n**Text encoder**\n\n- [qwen_2.5_vl_7b_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/text_encoders/qwen_2.5_vl_7b_fp8_scaled.safetensors)\n\n**VAE**\n\n- [qwen_image_vae.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/vae/qwen_image_vae.safetensors)\n\nModel Storage Location\n\n```\n📂 ComfyUI/\n├── 📂 models/\n│ ├── 📂 diffusion_models/\n│ │ └── qwen_image_edit_2509_fp8_e4m3fn.safetensors\n│ ├── 📂 loras/\n│ │ └── Qwen-Image-Lightning-4steps-V1.0.safetensors\n│ ├── 📂 vae/\n│ │ └── qwen_image_vae.safetensors\n│ └── 📂 text_encoders/\n│ └── qwen_2.5_vl_7b_fp8_scaled.safetensors\n```\n"
978
+ ],
979
+ "color": "#432",
980
+ "bgcolor": "#653"
981
+ },
982
+ {
983
+ "id": 96,
984
+ "type": "MarkdownNote",
985
+ "pos": [
986
+ -240,
986
987
  1030
987
988
  ],
988
989
  "size": [
989
- 330,
990
- 90
990
+ 290,
991
+ 140
991
992
  ],
992
993
  "flags": {},
993
994
  "order": 10,
@@ -996,7 +997,7 @@
996
997
  "outputs": [],
997
998
  "properties": {},
998
999
  "widgets_values": [
999
- "You can use the latent from the **EmptySD3LatentImage** to replace **VAE Encode**, so you can customize the image size."
1000
+ "This node is to avoid bad output results caused by excessively large input image sizes. Because when we input one image, we use the size of that input image.\n\nThe **TextEncodeQwenImageEditPlus** will scale your input to 1024×104 pixels. We use the size of your first input image. This node is to avoid having an input image size that is too large (such as 3000×3000 pixels), which could bring bad results."
1000
1001
  ],
1001
1002
  "color": "#432",
1002
1003
  "bgcolor": "#653"
@@ -1245,10 +1246,10 @@
1245
1246
  "config": {},
1246
1247
  "extra": {
1247
1248
  "ds": {
1248
- "scale": 0.6413348663336487,
1249
+ "scale": 0.5300288151517762,
1249
1250
  "offset": [
1250
- 1036.839241485038,
1251
- 412.30160240147245
1251
+ 1236.5826111125436,
1252
+ 518.8996996799999
1252
1253
  ]
1253
1254
  },
1254
1255
  "frontendVersion": "1.28.1",