comfyui-workflow-templates 0.1.36__py3-none-any.whl → 0.1.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "id": "7cb6261d-3b03-4171-bbd1-a4b256b42404",
3
3
  "revision": 0,
4
- "last_node_id": 17,
5
- "last_link_id": 17,
4
+ "last_node_id": 19,
5
+ "last_link_id": 19,
6
6
  "nodes": [
7
7
  {
8
8
  "id": 14,
@@ -48,9 +48,9 @@
48
48
  }
49
49
  ],
50
50
  "properties": {
51
+ "Node name for S&R": "CheckpointLoaderSimple",
51
52
  "cnr_id": "comfy-core",
52
53
  "ver": "0.3.44",
53
- "Node name for S&R": "CheckpointLoaderSimple",
54
54
  "models": [
55
55
  {
56
56
  "name": "v1-5-pruned-emaonly-fp16.safetensors",
@@ -65,49 +65,6 @@
65
65
  "color": "#322",
66
66
  "bgcolor": "#533"
67
67
  },
68
- {
69
- "id": 12,
70
- "type": "VAEEncode",
71
- "pos": [
72
- 270,
73
- 650
74
- ],
75
- "size": [
76
- 210,
77
- 46
78
- ],
79
- "flags": {},
80
- "order": 6,
81
- "mode": 0,
82
- "inputs": [
83
- {
84
- "name": "pixels",
85
- "type": "IMAGE",
86
- "link": 10
87
- },
88
- {
89
- "name": "vae",
90
- "type": "VAE",
91
- "link": 16
92
- }
93
- ],
94
- "outputs": [
95
- {
96
- "name": "LATENT",
97
- "type": "LATENT",
98
- "slot_index": 0,
99
- "links": [
100
- 11
101
- ]
102
- }
103
- ],
104
- "properties": {
105
- "cnr_id": "comfy-core",
106
- "ver": "0.3.44",
107
- "Node name for S&R": "VAEEncode"
108
- },
109
- "widgets_values": []
110
- },
111
68
  {
112
69
  "id": 6,
113
70
  "type": "CLIPTextEncode",
@@ -120,7 +77,7 @@
120
77
  164.31304931640625
121
78
  ],
122
79
  "flags": {},
123
- "order": 4,
80
+ "order": 5,
124
81
  "mode": 0,
125
82
  "inputs": [
126
83
  {
@@ -140,9 +97,9 @@
140
97
  }
141
98
  ],
142
99
  "properties": {
100
+ "Node name for S&R": "CLIPTextEncode",
143
101
  "cnr_id": "comfy-core",
144
- "ver": "0.3.44",
145
- "Node name for S&R": "CLIPTextEncode"
102
+ "ver": "0.3.44"
146
103
  },
147
104
  "widgets_values": [
148
105
  "photograph of victorian woman with wings, sky clouds, meadow grass\n"
@@ -162,7 +119,7 @@
162
119
  180.6060791015625
163
120
  ],
164
121
  "flags": {},
165
- "order": 5,
122
+ "order": 6,
166
123
  "mode": 0,
167
124
  "inputs": [
168
125
  {
@@ -182,9 +139,9 @@
182
139
  }
183
140
  ],
184
141
  "properties": {
142
+ "Node name for S&R": "CLIPTextEncode",
185
143
  "cnr_id": "comfy-core",
186
- "ver": "0.3.44",
187
- "Node name for S&R": "CLIPTextEncode"
144
+ "ver": "0.3.44"
188
145
  },
189
146
  "widgets_values": [
190
147
  "watermark, text\n"
@@ -215,48 +172,6 @@
215
172
  "color": "#432",
216
173
  "bgcolor": "#653"
217
174
  },
218
- {
219
- "id": 10,
220
- "type": "LoadImage",
221
- "pos": [
222
- -90,
223
- 410
224
- ],
225
- "size": [
226
- 310,
227
- 300
228
- ],
229
- "flags": {},
230
- "order": 2,
231
- "mode": 0,
232
- "inputs": [],
233
- "outputs": [
234
- {
235
- "name": "IMAGE",
236
- "type": "IMAGE",
237
- "slot_index": 0,
238
- "links": [
239
- 10
240
- ]
241
- },
242
- {
243
- "name": "MASK",
244
- "type": "MASK",
245
- "links": null
246
- }
247
- ],
248
- "properties": {
249
- "cnr_id": "comfy-core",
250
- "ver": "0.3.44",
251
- "Node name for S&R": "LoadImage"
252
- },
253
- "widgets_values": [
254
- "example.png",
255
- "image"
256
- ],
257
- "color": "#322",
258
- "bgcolor": "#533"
259
- },
260
175
  {
261
176
  "id": 3,
262
177
  "type": "KSampler",
@@ -269,7 +184,7 @@
269
184
  262
270
185
  ],
271
186
  "flags": {},
272
- "order": 7,
187
+ "order": 9,
273
188
  "mode": 0,
274
189
  "inputs": [
275
190
  {
@@ -304,9 +219,9 @@
304
219
  }
305
220
  ],
306
221
  "properties": {
222
+ "Node name for S&R": "KSampler",
307
223
  "cnr_id": "comfy-core",
308
- "ver": "0.3.44",
309
- "Node name for S&R": "KSampler"
224
+ "ver": "0.3.44"
310
225
  },
311
226
  "widgets_values": [
312
227
  280823642470253,
@@ -318,29 +233,6 @@
318
233
  0.8700000000000001
319
234
  ]
320
235
  },
321
- {
322
- "id": 17,
323
- "type": "MarkdownNote",
324
- "pos": [
325
- 740,
326
- 490
327
- ],
328
- "size": [
329
- 310,
330
- 110
331
- ],
332
- "flags": {},
333
- "order": 3,
334
- "mode": 0,
335
- "inputs": [],
336
- "outputs": [],
337
- "properties": {},
338
- "widgets_values": [
339
- "When using the image - to - image workflow, you should remember that the **denoise** value should be less than 1. The closer the value is to 0, the more features of the input image the output image will retain."
340
- ],
341
- "color": "#432",
342
- "bgcolor": "#653"
343
- },
344
236
  {
345
237
  "id": 8,
346
238
  "type": "VAEDecode",
@@ -353,7 +245,7 @@
353
245
  46
354
246
  ],
355
247
  "flags": {},
356
- "order": 8,
248
+ "order": 10,
357
249
  "mode": 0,
358
250
  "inputs": [
359
251
  {
@@ -378,9 +270,9 @@
378
270
  }
379
271
  ],
380
272
  "properties": {
273
+ "Node name for S&R": "VAEDecode",
381
274
  "cnr_id": "comfy-core",
382
- "ver": "0.3.44",
383
- "Node name for S&R": "VAEDecode"
275
+ "ver": "0.3.44"
384
276
  },
385
277
  "widgets_values": []
386
278
  },
@@ -396,7 +288,7 @@
396
288
  430
397
289
  ],
398
290
  "flags": {},
399
- "order": 9,
291
+ "order": 11,
400
292
  "mode": 0,
401
293
  "inputs": [
402
294
  {
@@ -413,6 +305,176 @@
413
305
  "widgets_values": [
414
306
  "ComfyUI"
415
307
  ]
308
+ },
309
+ {
310
+ "id": 10,
311
+ "type": "LoadImage",
312
+ "pos": [
313
+ -90,
314
+ 410
315
+ ],
316
+ "size": [
317
+ 310,
318
+ 314
319
+ ],
320
+ "flags": {},
321
+ "order": 2,
322
+ "mode": 0,
323
+ "inputs": [],
324
+ "outputs": [
325
+ {
326
+ "name": "IMAGE",
327
+ "type": "IMAGE",
328
+ "slot_index": 0,
329
+ "links": [
330
+ 18
331
+ ]
332
+ },
333
+ {
334
+ "name": "MASK",
335
+ "type": "MASK",
336
+ "links": null
337
+ }
338
+ ],
339
+ "properties": {
340
+ "Node name for S&R": "LoadImage",
341
+ "cnr_id": "comfy-core",
342
+ "ver": "0.3.44"
343
+ },
344
+ "widgets_values": [
345
+ "example.png",
346
+ "image"
347
+ ],
348
+ "color": "#322",
349
+ "bgcolor": "#533"
350
+ },
351
+ {
352
+ "id": 12,
353
+ "type": "VAEEncode",
354
+ "pos": [
355
+ 560,
356
+ 650
357
+ ],
358
+ "size": [
359
+ 210,
360
+ 46
361
+ ],
362
+ "flags": {},
363
+ "order": 8,
364
+ "mode": 0,
365
+ "inputs": [
366
+ {
367
+ "name": "pixels",
368
+ "type": "IMAGE",
369
+ "link": 19
370
+ },
371
+ {
372
+ "name": "vae",
373
+ "type": "VAE",
374
+ "link": 16
375
+ }
376
+ ],
377
+ "outputs": [
378
+ {
379
+ "name": "LATENT",
380
+ "type": "LATENT",
381
+ "slot_index": 0,
382
+ "links": [
383
+ 11
384
+ ]
385
+ }
386
+ ],
387
+ "properties": {
388
+ "Node name for S&R": "VAEEncode",
389
+ "cnr_id": "comfy-core",
390
+ "ver": "0.3.44"
391
+ },
392
+ "widgets_values": []
393
+ },
394
+ {
395
+ "id": 17,
396
+ "type": "MarkdownNote",
397
+ "pos": [
398
+ 740,
399
+ 490
400
+ ],
401
+ "size": [
402
+ 310,
403
+ 110
404
+ ],
405
+ "flags": {},
406
+ "order": 3,
407
+ "mode": 0,
408
+ "inputs": [],
409
+ "outputs": [],
410
+ "properties": {},
411
+ "widgets_values": [
412
+ "When using the image - to - image workflow, you should remember that the **denoise** value should be less than 1. The closer the value is to 0, the more features of the input image the output image will retain."
413
+ ],
414
+ "color": "#432",
415
+ "bgcolor": "#653"
416
+ },
417
+ {
418
+ "id": 18,
419
+ "type": "ImageScaleToTotalPixels",
420
+ "pos": [
421
+ 280,
422
+ 650
423
+ ],
424
+ "size": [
425
+ 260,
426
+ 82
427
+ ],
428
+ "flags": {},
429
+ "order": 7,
430
+ "mode": 0,
431
+ "inputs": [
432
+ {
433
+ "name": "image",
434
+ "type": "IMAGE",
435
+ "link": 18
436
+ }
437
+ ],
438
+ "outputs": [
439
+ {
440
+ "name": "IMAGE",
441
+ "type": "IMAGE",
442
+ "links": [
443
+ 19
444
+ ]
445
+ }
446
+ ],
447
+ "properties": {
448
+ "Node name for S&R": "ImageScaleToTotalPixels"
449
+ },
450
+ "widgets_values": [
451
+ "nearest-exact",
452
+ 0.25
453
+ ]
454
+ },
455
+ {
456
+ "id": 19,
457
+ "type": "MarkdownNote",
458
+ "pos": [
459
+ 280,
460
+ 780
461
+ ],
462
+ "size": [
463
+ 270,
464
+ 120
465
+ ],
466
+ "flags": {},
467
+ "order": 4,
468
+ "mode": 0,
469
+ "inputs": [],
470
+ "outputs": [],
471
+ "title": "About Scale Image to Total Pixels",
472
+ "properties": {},
473
+ "widgets_values": [
474
+ "The model this template uses is trained based on a 512*512 image dataset. So if you use an input image that's too large, it might cause some issues. We've added the Scale image to total pixels node to scale images. If you're quite familiar with this model, you can remove it."
475
+ ],
476
+ "color": "#432",
477
+ "bgcolor": "#653"
416
478
  }
417
479
  ],
418
480
  "links": [
@@ -448,14 +510,6 @@
448
510
  0,
449
511
  "IMAGE"
450
512
  ],
451
- [
452
- 10,
453
- 10,
454
- 0,
455
- 12,
456
- 0,
457
- "IMAGE"
458
- ],
459
513
  [
460
514
  11,
461
515
  12,
@@ -503,6 +557,22 @@
503
557
  8,
504
558
  1,
505
559
  "VAE"
560
+ ],
561
+ [
562
+ 18,
563
+ 10,
564
+ 0,
565
+ 18,
566
+ 0,
567
+ "IMAGE"
568
+ ],
569
+ [
570
+ 19,
571
+ 18,
572
+ 0,
573
+ 12,
574
+ 0,
575
+ "IMAGE"
506
576
  ]
507
577
  ],
508
578
  "groups": [
@@ -549,13 +619,13 @@
549
619
  "config": {},
550
620
  "extra": {
551
621
  "ds": {
552
- "scale": 3.513092809084811,
622
+ "scale": 0.925107135909973,
553
623
  "offset": [
554
- -714.0186032233146,
555
- -106.31183471266924
624
+ 590.5116922244889,
625
+ -18.70663404599139
556
626
  ]
557
627
  },
558
- "frontendVersion": "1.23.4"
628
+ "frontendVersion": "1.24.1"
559
629
  },
560
630
  "version": 0.4
561
631
  }