comfyui-workflow-templates 0.1.35__py3-none-any.whl → 0.1.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. comfyui_workflow_templates/templates/3d_hunyuan3d_image_to_model-1.webp +0 -0
  2. comfyui_workflow_templates/templates/3d_hunyuan3d_image_to_model.json +317 -288
  3. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model-1.webp +0 -0
  4. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model-2.webp +0 -0
  5. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model.json +272 -236
  6. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo-1.webp +0 -0
  7. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo-2.webp +0 -0
  8. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo.json +203 -167
  9. comfyui_workflow_templates/templates/area_composition-1.webp +0 -0
  10. comfyui_workflow_templates/templates/area_composition.json +1158 -519
  11. comfyui_workflow_templates/templates/area_composition_square_area_for_subject-1.webp +0 -0
  12. comfyui_workflow_templates/templates/area_composition_square_area_for_subject.json +856 -363
  13. comfyui_workflow_templates/templates/audio_stable_audio_example.json +201 -48
  14. comfyui_workflow_templates/templates/controlnet_example.json +625 -208
  15. comfyui_workflow_templates/templates/flux_kontext_dev_grouped.json +245 -1085
  16. comfyui_workflow_templates/templates/flux_redux_model_example.json +1112 -617
  17. comfyui_workflow_templates/templates/hiresfix_latent_workflow-1.webp +0 -0
  18. comfyui_workflow_templates/templates/hiresfix_latent_workflow-2.webp +0 -0
  19. comfyui_workflow_templates/templates/hiresfix_latent_workflow.json +556 -215
  20. comfyui_workflow_templates/templates/image2image.json +434 -124
  21. comfyui_workflow_templates/templates/image_lotus_depth_v1_1.json +35 -22
  22. comfyui_workflow_templates/templates/index.json +3 -11
  23. comfyui_workflow_templates/templates/inpaint_example.json +473 -157
  24. comfyui_workflow_templates/templates/inpaint_model_outpainting.json +507 -175
  25. comfyui_workflow_templates/templates/latent_upscale_different_prompt_model-1.webp +0 -0
  26. comfyui_workflow_templates/templates/latent_upscale_different_prompt_model.json +682 -275
  27. comfyui_workflow_templates/templates/video_wan_vace_14B_ref2v.json +187 -187
  28. comfyui_workflow_templates/templates/video_wan_vace_14B_t2v.json +151 -151
  29. comfyui_workflow_templates/templates/video_wan_vace_14B_v2v.json +139 -139
  30. comfyui_workflow_templates/templates/video_wan_vace_flf2v.json +182 -182
  31. comfyui_workflow_templates/templates/video_wan_vace_inpainting.json +141 -167
  32. comfyui_workflow_templates/templates/video_wan_vace_outpainting.json +34 -34
  33. {comfyui_workflow_templates-0.1.35.dist-info → comfyui_workflow_templates-0.1.37.dist-info}/METADATA +1 -1
  34. {comfyui_workflow_templates-0.1.35.dist-info → comfyui_workflow_templates-0.1.37.dist-info}/RECORD +37 -39
  35. comfyui_workflow_templates/templates/area_composition_reversed-1.webp +0 -0
  36. comfyui_workflow_templates/templates/area_composition_reversed.json +0 -967
  37. comfyui_workflow_templates/templates/flux_dev_example.json +0 -771
  38. {comfyui_workflow_templates-0.1.35.dist-info → comfyui_workflow_templates-0.1.37.dist-info}/WHEEL +0 -0
  39. {comfyui_workflow_templates-0.1.35.dist-info → comfyui_workflow_templates-0.1.37.dist-info}/licenses/LICENSE +0 -0
  40. {comfyui_workflow_templates-0.1.35.dist-info → comfyui_workflow_templates-0.1.37.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "id": "96995b8f-85c5-47af-b3cf-7b6a24675694",
3
3
  "revision": 0,
4
- "last_node_id": 188,
4
+ "last_node_id": 189,
5
5
  "last_link_id": 284,
6
6
  "nodes": [
7
7
  {
@@ -258,43 +258,6 @@
258
258
  },
259
259
  "widgets_values": []
260
260
  },
261
- {
262
- "id": 131,
263
- "type": "PrimitiveInt",
264
- "pos": [
265
- -1230,
266
- 710
267
- ],
268
- "size": [
269
- 310,
270
- 90
271
- ],
272
- "flags": {},
273
- "order": 0,
274
- "mode": 0,
275
- "inputs": [],
276
- "outputs": [
277
- {
278
- "name": "INT",
279
- "type": "INT",
280
- "links": [
281
- 209
282
- ]
283
- }
284
- ],
285
- "title": "Length",
286
- "properties": {
287
- "cnr_id": "comfy-core",
288
- "ver": "0.3.38",
289
- "Node name for S&R": "PrimitiveInt"
290
- },
291
- "widgets_values": [
292
- 81,
293
- "fixed"
294
- ],
295
- "color": "#322",
296
- "bgcolor": "#533"
297
- },
298
261
  {
299
262
  "id": 7,
300
263
  "type": "CLIPTextEncode",
@@ -839,7 +802,7 @@
839
802
  58
840
803
  ],
841
804
  "flags": {},
842
- "order": 1,
805
+ "order": 0,
843
806
  "mode": 0,
844
807
  "inputs": [],
845
808
  "outputs": [
@@ -883,7 +846,7 @@
883
846
  314.00006103515625
884
847
  ],
885
848
  "flags": {},
886
- "order": 2,
849
+ "order": 1,
887
850
  "mode": 0,
888
851
  "inputs": [],
889
852
  "outputs": [
@@ -912,29 +875,6 @@
912
875
  "color": "#322",
913
876
  "bgcolor": "#533"
914
877
  },
915
- {
916
- "id": 176,
917
- "type": "Note",
918
- "pos": [
919
- -510,
920
- 1480
921
- ],
922
- "size": [
923
- 560,
924
- 140
925
- ],
926
- "flags": {},
927
- "order": 3,
928
- "mode": 0,
929
- "inputs": [],
930
- "outputs": [],
931
- "properties": {},
932
- "widgets_values": [
933
- "In fact, VACE supports the generation from any frame to video. You can use the method of creating image and mask sequences in our examples to create sequences containing any frames and their corresponding masks, thus achieving the image generation from any frame to video.\n\n---\n\n实际上 VACE 支持任意帧到视频的生成,你可以使用我们示例中创建图像和蒙版序列的方法来创建包含任意帧的序列和对应的蒙版序列,从而实现任意帧到视频的图像生成"
934
- ],
935
- "color": "#432",
936
- "bgcolor": "#653"
937
- },
938
878
  {
939
879
  "id": 184,
940
880
  "type": "CLIPLoader",
@@ -947,7 +887,7 @@
947
887
  106
948
888
  ],
949
889
  "flags": {},
950
- "order": 4,
890
+ "order": 2,
951
891
  "mode": 4,
952
892
  "inputs": [],
953
893
  "outputs": [
@@ -990,7 +930,7 @@
990
930
  82
991
931
  ],
992
932
  "flags": {},
993
- "order": 5,
933
+ "order": 3,
994
934
  "mode": 0,
995
935
  "inputs": [],
996
936
  "outputs": [
@@ -1028,7 +968,7 @@
1028
968
  82.62108612060547
1029
969
  ],
1030
970
  "flags": {},
1031
- "order": 6,
971
+ "order": 4,
1032
972
  "mode": 0,
1033
973
  "inputs": [],
1034
974
  "outputs": [
@@ -1054,89 +994,6 @@
1054
994
  "fixed"
1055
995
  ]
1056
996
  },
1057
- {
1058
- "id": 77,
1059
- "type": "MarkdownNote",
1060
- "pos": [
1061
- -1830,
1062
- 10
1063
- ],
1064
- "size": [
1065
- 490,
1066
- 800
1067
- ],
1068
- "flags": {},
1069
- "order": 7,
1070
- "mode": 0,
1071
- "inputs": [],
1072
- "outputs": [],
1073
- "properties": {},
1074
- "widgets_values": [
1075
- "[Tutorial](https://docs.comfy.org/tutorials/video/wan/vace) | [教程](https://docs.comfy.org/zh-CN/tutorials/video/wan/vace)\n\n[Causvid Lora by Kijai](https://www.reddit.com/r/StableDiffusion/comments/1knuafk/causvid_lora_massive_speedup_for_wan21_made_by/)\n\n## 14B Support 480P 720P\n\n**Diffusion Model**\n- [wan2.1_vace_14B_fp16.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_vace_14B_fp16.safetensors)\n\n**LoRA**\n- [Wan21_CausVid_14B_T2V_lora_rank32.safetensors](https://huggingface.co/Kijai/WanVideo_comfy/blob/main/Wan21_CausVid_14B_T2V_lora_rank32.safetensors)\n\nFYI: Using an RTX 4090, it takes approximately 40 minutes at 81 frames and 720P resolution.\n\nAfter using Wan21_CausVid_14B_T2V_lora_rank32.safetensors, it only takes about 4 minutes.\n\n## 1.3B Support 480P only\n\n**Diffusion Model**\n- [wan2.1_vace_1.3B_fp16.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_vace_14B_fp16.safetensors)\n\n**LoRA**\n- [Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors](https://huggingface.co/Kijai/WanVideo_comfy/blob/main/Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors)\n\n## Other Models\n\n* You may already have these models if you use Wan workflow before.\n\n**VAE**\n- [wan_2.1_vae.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors?download=true)\n\n**Text encoders** Chose one of following model\n- [umt5_xxl_fp16.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp16.safetensors?download=true)\n- [umt5_xxl_fp8_e4m3fn_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors?download=true)\n\n> You can choose between fp16 of fp8; I used fp16 to match what kijai's wrapper is compatible with.\n\nFile save location\n\n```\nComfyUI/\n├── models/\n│ ├── diffusion_models/\n│ │ ├-── wan2.1_vace_14B_fp16.safetensors\n│ │ └─── wan2.1_vace_1.3B_fp16.safetensors \n│ ├── text_encoders/\n│ │ └─── umt5_xxl_fp8_e4m3fn_scaled.safetensors # or fp16\n│ ├── loras/\n│ │ ├── Wan21_CausVid_14B_T2V_lora_rank32.safetensors\n│ │ └── Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors\n│ └── vae/\n│ └── wan_2.1_vae.safetensors\n```\n"
1076
- ],
1077
- "color": "#432",
1078
- "bgcolor": "#653"
1079
- },
1080
- {
1081
- "id": 181,
1082
- "type": "MarkdownNote",
1083
- "pos": [
1084
- -1480,
1085
- 1040
1086
- ],
1087
- "size": [
1088
- 210,
1089
- 110
1090
- ],
1091
- "flags": {},
1092
- "order": 8,
1093
- "mode": 0,
1094
- "inputs": [],
1095
- "outputs": [],
1096
- "properties": {},
1097
- "widgets_values": [
1098
- "| Model | 480P | 720P |\n| ------------------------------------------------------------ | ---- | ---- |\n| [VACE-1.3B](https://huggingface.co/Wan-AI/Wan2.1-VACE-1.3B) | ✅ | ❌ |\n| [VACE-14B](https://huggingface.co/Wan-AI/Wan2.1-VACE-14B) | ✅ | ✅ |"
1099
- ],
1100
- "color": "#432",
1101
- "bgcolor": "#653"
1102
- },
1103
- {
1104
- "id": 171,
1105
- "type": "PrimitiveInt",
1106
- "pos": [
1107
- -1240,
1108
- 900
1109
- ],
1110
- "size": [
1111
- 310,
1112
- 90
1113
- ],
1114
- "flags": {},
1115
- "order": 9,
1116
- "mode": 0,
1117
- "inputs": [],
1118
- "outputs": [
1119
- {
1120
- "name": "INT",
1121
- "type": "INT",
1122
- "links": [
1123
- 257
1124
- ]
1125
- }
1126
- ],
1127
- "title": "Length",
1128
- "properties": {
1129
- "cnr_id": "comfy-core",
1130
- "ver": "0.3.38",
1131
- "Node name for S&R": "PrimitiveInt"
1132
- },
1133
- "widgets_values": [
1134
- 82,
1135
- "increment"
1136
- ],
1137
- "color": "#322",
1138
- "bgcolor": "#533"
1139
- },
1140
997
  {
1141
998
  "id": 151,
1142
999
  "type": "PreviewImage",
@@ -1178,7 +1035,7 @@
1178
1035
  314.00006103515625
1179
1036
  ],
1180
1037
  "flags": {},
1181
- "order": 10,
1038
+ "order": 5,
1182
1039
  "mode": 0,
1183
1040
  "inputs": [],
1184
1041
  "outputs": [
@@ -1368,7 +1225,7 @@
1368
1225
  106
1369
1226
  ],
1370
1227
  "flags": {},
1371
- "order": 11,
1228
+ "order": 6,
1372
1229
  "mode": 0,
1373
1230
  "inputs": [],
1374
1231
  "outputs": [
@@ -1474,7 +1331,7 @@
1474
1331
  82
1475
1332
  ],
1476
1333
  "flags": {},
1477
- "order": 12,
1334
+ "order": 7,
1478
1335
  "mode": 4,
1479
1336
  "inputs": [],
1480
1337
  "outputs": [
@@ -1580,7 +1437,7 @@
1580
1437
  90
1581
1438
  ],
1582
1439
  "flags": {},
1583
- "order": 13,
1440
+ "order": 8,
1584
1441
  "mode": 0,
1585
1442
  "inputs": [],
1586
1443
  "outputs": [
@@ -1612,30 +1469,6 @@
1612
1469
  "color": "#322",
1613
1470
  "bgcolor": "#533"
1614
1471
  },
1615
- {
1616
- "id": 188,
1617
- "type": "MarkdownNote",
1618
- "pos": [
1619
- -870,
1620
- -180
1621
- ],
1622
- "size": [
1623
- 410,
1624
- 140
1625
- ],
1626
- "flags": {},
1627
- "order": 14,
1628
- "mode": 0,
1629
- "inputs": [],
1630
- "outputs": [],
1631
- "title": "About CausVid LoRA",
1632
- "properties": {},
1633
- "widgets_values": [
1634
- "We have added CausVid LoRA by default to achieve acceleration. However, in some cases, the video may shake and become blurry. You might need to test different LoRA intensities to get the best results, which should be between 0.3 and 0.7. If you don't need it, you can use the bypass mode to disable it, and then restore the settings of `KSampler` to the default ones.\n\n\n我们默认添加了 CausVid LoRA 来实现加速,但有些情况下会出现视频抖动和模糊的情况,你可能需要测试不同的 LoRA 强度来获取最好的结果,0.3~0.7 之间。如果你不需要的话,可以使用 bypass 模式禁用它,然后恢复 `KSampler`的设置到默认的设置即可。"
1635
- ],
1636
- "color": "#432",
1637
- "bgcolor": "#653"
1638
- },
1639
1472
  {
1640
1473
  "id": 6,
1641
1474
  "type": "CLIPTextEncode",
@@ -1691,7 +1524,7 @@
1691
1524
  160
1692
1525
  ],
1693
1526
  "flags": {},
1694
- "order": 15,
1527
+ "order": 9,
1695
1528
  "mode": 0,
1696
1529
  "inputs": [],
1697
1530
  "outputs": [],
@@ -1702,6 +1535,173 @@
1702
1535
  ],
1703
1536
  "color": "#432",
1704
1537
  "bgcolor": "#653"
1538
+ },
1539
+ {
1540
+ "id": 77,
1541
+ "type": "MarkdownNote",
1542
+ "pos": [
1543
+ -1830,
1544
+ 10
1545
+ ],
1546
+ "size": [
1547
+ 490,
1548
+ 800
1549
+ ],
1550
+ "flags": {},
1551
+ "order": 10,
1552
+ "mode": 0,
1553
+ "inputs": [],
1554
+ "outputs": [],
1555
+ "properties": {},
1556
+ "widgets_values": [
1557
+ "[Tutorial](https://docs.comfy.org/tutorials/video/wan/vace) | [教程](https://docs.comfy.org/zh-CN/tutorials/video/wan/vace)\n\n[Causvid Lora by Kijai](https://www.reddit.com/r/StableDiffusion/comments/1knuafk/causvid_lora_massive_speedup_for_wan21_made_by/)\n\n## 14B Support 480P 720P\n\n**Diffusion Model**\n- [wan2.1_vace_14B_fp16.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_vace_14B_fp16.safetensors)\n\n**LoRA**\n- [Wan21_CausVid_14B_T2V_lora_rank32.safetensors](https://huggingface.co/Kijai/WanVideo_comfy/blob/main/Wan21_CausVid_14B_T2V_lora_rank32.safetensors)\n\nFYI: Using an RTX 4090, it takes approximately 40 minutes at 81 frames and 720P resolution.\n\nAfter using Wan21_CausVid_14B_T2V_lora_rank32.safetensors, it only takes about 4 minutes.\n\n## 1.3B Support 480P only\n\n**Diffusion Model**\n- [wan2.1_vace_1.3B_fp16.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_vace_1.3B_fp16.safetensors)\n\n**LoRA**\n- [Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors](https://huggingface.co/Kijai/WanVideo_comfy/blob/main/Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors)\n\n## Other Models\n\n* You may already have these models if you use Wan workflow before.\n\n**VAE**\n- [wan_2.1_vae.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors?download=true)\n\n**Text encoders** Chose one of following model\n- [umt5_xxl_fp16.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp16.safetensors?download=true)\n- [umt5_xxl_fp8_e4m3fn_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors?download=true)\n\n> You can choose between fp16 of fp8; I used fp16 to match what kijai's wrapper is compatible with.\n\nFile save location\n\n```\nComfyUI/\n├── models/\n│ ├── diffusion_models/\n│ │ ├-── wan2.1_vace_14B_fp16.safetensors\n│ │ └─── wan2.1_vace_1.3B_fp16.safetensors \n│ ├── text_encoders/\n│ │ └─── umt5_xxl_fp8_e4m3fn_scaled.safetensors # or fp16\n│ ├── loras/\n│ │ ├── Wan21_CausVid_14B_T2V_lora_rank32.safetensors\n│ │ └── Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors\n│ └── vae/\n│ └── wan_2.1_vae.safetensors\n```\n"
1558
+ ],
1559
+ "color": "#432",
1560
+ "bgcolor": "#653"
1561
+ },
1562
+ {
1563
+ "id": 176,
1564
+ "type": "Note",
1565
+ "pos": [
1566
+ -510,
1567
+ 1480
1568
+ ],
1569
+ "size": [
1570
+ 560,
1571
+ 110
1572
+ ],
1573
+ "flags": {},
1574
+ "order": 11,
1575
+ "mode": 0,
1576
+ "inputs": [],
1577
+ "outputs": [],
1578
+ "properties": {},
1579
+ "widgets_values": [
1580
+ "In fact, VACE supports the generation from any frame to video. You can use the method of creating image and mask sequences in our examples to create sequences containing any frames and their corresponding masks, thus achieving the image generation from any frame to video."
1581
+ ],
1582
+ "color": "#432",
1583
+ "bgcolor": "#653"
1584
+ },
1585
+ {
1586
+ "id": 188,
1587
+ "type": "MarkdownNote",
1588
+ "pos": [
1589
+ -870,
1590
+ -150
1591
+ ],
1592
+ "size": [
1593
+ 400,
1594
+ 110
1595
+ ],
1596
+ "flags": {},
1597
+ "order": 12,
1598
+ "mode": 0,
1599
+ "inputs": [],
1600
+ "outputs": [],
1601
+ "title": "About CausVid LoRA",
1602
+ "properties": {},
1603
+ "widgets_values": [
1604
+ "We have added CausVid LoRA by default to achieve acceleration. However, in some cases, the video may shake and become blurry. You might need to test different LoRA intensities to get the best results, which should be between 0.3 and 0.7. If you don't need it, you can use the bypass mode to disable it, and then restore the settings of `KSampler` to the default ones."
1605
+ ],
1606
+ "color": "#432",
1607
+ "bgcolor": "#653"
1608
+ },
1609
+ {
1610
+ "id": 181,
1611
+ "type": "MarkdownNote",
1612
+ "pos": [
1613
+ -1480,
1614
+ 1040
1615
+ ],
1616
+ "size": [
1617
+ 210,
1618
+ 110
1619
+ ],
1620
+ "flags": {},
1621
+ "order": 13,
1622
+ "mode": 0,
1623
+ "inputs": [],
1624
+ "outputs": [],
1625
+ "properties": {},
1626
+ "widgets_values": [
1627
+ "| Model | 480P | 720P |\n| ------------------------------------------------------------ | ---- | ---- |\n| [VACE-1.3B](https://huggingface.co/Wan-AI/Wan2.1-VACE-1.3B) | ✅ | ❌ |\n| [VACE-14B](https://huggingface.co/Wan-AI/Wan2.1-VACE-14B) | ✅ | ✅ |"
1628
+ ],
1629
+ "color": "#432",
1630
+ "bgcolor": "#653"
1631
+ },
1632
+ {
1633
+ "id": 131,
1634
+ "type": "PrimitiveInt",
1635
+ "pos": [
1636
+ -1230,
1637
+ 710
1638
+ ],
1639
+ "size": [
1640
+ 310,
1641
+ 90
1642
+ ],
1643
+ "flags": {},
1644
+ "order": 14,
1645
+ "mode": 0,
1646
+ "inputs": [],
1647
+ "outputs": [
1648
+ {
1649
+ "name": "INT",
1650
+ "type": "INT",
1651
+ "links": [
1652
+ 209
1653
+ ]
1654
+ }
1655
+ ],
1656
+ "title": "Length",
1657
+ "properties": {
1658
+ "cnr_id": "comfy-core",
1659
+ "ver": "0.3.38",
1660
+ "Node name for S&R": "PrimitiveInt"
1661
+ },
1662
+ "widgets_values": [
1663
+ 81,
1664
+ "fixed"
1665
+ ],
1666
+ "color": "#322",
1667
+ "bgcolor": "#533"
1668
+ },
1669
+ {
1670
+ "id": 171,
1671
+ "type": "PrimitiveInt",
1672
+ "pos": [
1673
+ -1220,
1674
+ 900
1675
+ ],
1676
+ "size": [
1677
+ 310,
1678
+ 90
1679
+ ],
1680
+ "flags": {},
1681
+ "order": 15,
1682
+ "mode": 0,
1683
+ "inputs": [],
1684
+ "outputs": [
1685
+ {
1686
+ "name": "INT",
1687
+ "type": "INT",
1688
+ "links": [
1689
+ 257
1690
+ ]
1691
+ }
1692
+ ],
1693
+ "title": "Length",
1694
+ "properties": {
1695
+ "cnr_id": "comfy-core",
1696
+ "ver": "0.3.38",
1697
+ "Node name for S&R": "PrimitiveInt"
1698
+ },
1699
+ "widgets_values": [
1700
+ 79,
1701
+ "fixed"
1702
+ ],
1703
+ "color": "#322",
1704
+ "bgcolor": "#533"
1705
1705
  }
1706
1706
  ],
1707
1707
  "links": [
@@ -2253,13 +2253,13 @@
2253
2253
  "config": {},
2254
2254
  "extra": {
2255
2255
  "ds": {
2256
- "scale": 0.5054470284993043,
2256
+ "scale": 0.8140274938684171,
2257
2257
  "offset": [
2258
- 1793.6487219589956,
2259
- 397.7060029196524
2258
+ 1815.85355976548,
2259
+ 75.49507857319055
2260
2260
  ]
2261
2261
  },
2262
- "frontendVersion": "1.20.7",
2262
+ "frontendVersion": "1.23.4",
2263
2263
  "node_versions": {
2264
2264
  "comfy-core": "0.3.34"
2265
2265
  },