comfyui-workflow-templates 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,12 +6,14 @@
6
6
  "templates": [
7
7
  {
8
8
  "name": "default",
9
+ "title": "Image Generation",
9
10
  "mediaType": "image",
10
11
  "mediaSubtype": "webp",
11
12
  "description": "Generate images from text descriptions."
12
13
  },
13
14
  {
14
15
  "name": "image2image",
16
+ "title": "Image to Image",
15
17
  "mediaType": "image",
16
18
  "mediaSubtype": "webp",
17
19
  "description": "Transform existing images using text prompts.",
@@ -19,6 +21,7 @@
19
21
  },
20
22
  {
21
23
  "name": "lora",
24
+ "title": "Lora",
22
25
  "mediaType": "image",
23
26
  "mediaSubtype": "webp",
24
27
  "description": "Apply LoRA models for specialized styles or subjects.",
@@ -26,6 +29,7 @@
26
29
  },
27
30
  {
28
31
  "name": "inpaint_example",
32
+ "title": "Inpaint",
29
33
  "mediaType": "image",
30
34
  "mediaSubtype": "webp",
31
35
  "description": "Edit specific parts of images seamlessly.",
@@ -34,6 +38,7 @@
34
38
  },
35
39
  {
36
40
  "name": "inpain_model_outpainting",
41
+ "title": "Outpaint",
37
42
  "mediaType": "image",
38
43
  "mediaSubtype": "webp",
39
44
  "description": "Extend images beyond their original boundaries.",
@@ -42,6 +47,7 @@
42
47
  },
43
48
  {
44
49
  "name": "embedding_example",
50
+ "title": "Embedding",
45
51
  "mediaType": "image",
46
52
  "mediaSubtype": "webp",
47
53
  "description": "Use textual inversion for consistent styles.",
@@ -49,6 +55,7 @@
49
55
  },
50
56
  {
51
57
  "name": "gligen_textbox_example",
58
+ "title": "Gligen Textbox",
52
59
  "mediaType": "image",
53
60
  "mediaSubtype": "webp",
54
61
  "description": "Specify the location and size of objects.",
@@ -56,6 +63,7 @@
56
63
  },
57
64
  {
58
65
  "name": "lora_multiple",
66
+ "title": "Lora Multiple",
59
67
  "mediaType": "image",
60
68
  "mediaSubtype": "webp",
61
69
  "description": "Combine multiple LoRA models for unique results.",
@@ -70,6 +78,7 @@
70
78
  "templates": [
71
79
  {
72
80
  "name": "flux_dev_checkpoint_example",
81
+ "title": "Flux Dev",
73
82
  "mediaType": "image",
74
83
  "mediaSubtype": "webp",
75
84
  "description": "Create images using Flux development models.",
@@ -77,6 +86,7 @@
77
86
  },
78
87
  {
79
88
  "name": "flux_schnell",
89
+ "title": "Flux Schnell",
80
90
  "mediaType": "image",
81
91
  "mediaSubtype": "webp",
82
92
  "description": "Generate images quickly with Flux Schnell.",
@@ -84,6 +94,7 @@
84
94
  },
85
95
  {
86
96
  "name": "flux_fill_inpaint_example",
97
+ "title": "Flux Inpaint",
87
98
  "mediaType": "image",
88
99
  "mediaSubtype": "webp",
89
100
  "description": "Fill in missing parts of images.",
@@ -92,6 +103,7 @@
92
103
  },
93
104
  {
94
105
  "name": "flux_fill_outpaint_example",
106
+ "title": "Flux Outpaint",
95
107
  "mediaType": "image",
96
108
  "mediaSubtype": "webp",
97
109
  "description": "Extend images using Flux outpainting.",
@@ -100,6 +112,7 @@
100
112
  },
101
113
  {
102
114
  "name": "flux_canny_model_example",
115
+ "title": "Flux Canny Model",
103
116
  "mediaType": "image",
104
117
  "mediaSubtype": "webp",
105
118
  "description": "Generate images from edge detection.",
@@ -108,6 +121,7 @@
108
121
  },
109
122
  {
110
123
  "name": "flux_depth_lora_example",
124
+ "title": "Flux Depth Lora",
111
125
  "mediaType": "image",
112
126
  "mediaSubtype": "webp",
113
127
  "description": "Create images with depth-aware LoRA.",
@@ -116,6 +130,7 @@
116
130
  },
117
131
  {
118
132
  "name": "flux_redux_model_example",
133
+ "title": "Flux Redux Model",
119
134
  "mediaType": "image",
120
135
  "mediaSubtype": "webp",
121
136
  "description": "Transfer style from a reference image to guide image generation with Flux.",
@@ -130,24 +145,28 @@
130
145
  "templates": [
131
146
  {
132
147
  "name": "hidream_i1_dev",
148
+ "title": "HiDream I1 Dev",
133
149
  "mediaType": "image",
134
- "mediaSubtype": "png",
150
+ "mediaSubtype": "webp",
135
151
  "description": "Generate images with HiDream I1 Dev."
136
152
  },
137
153
  {
138
154
  "name": "hidream_i1_fast",
155
+ "title": "HiDream I1 Fast",
139
156
  "mediaType": "image",
140
- "mediaSubtype": "png",
157
+ "mediaSubtype": "webp",
141
158
  "description": "Generate images quickly with HiDream I1."
142
159
  },
143
160
  {
144
161
  "name": "hidream_i1_full",
162
+ "title": "HiDream I1 Full",
145
163
  "mediaType": "image",
146
- "mediaSubtype": "png",
164
+ "mediaSubtype": "webp",
147
165
  "description": "Generate images with HiDream I1."
148
166
  },
149
167
  {
150
168
  "name": "sd3.5_simple_example",
169
+ "title": "SD3.5 Simple",
151
170
  "mediaType": "image",
152
171
  "mediaSubtype": "webp",
153
172
  "description": "Generate images with SD 3.5.",
@@ -155,6 +174,7 @@
155
174
  },
156
175
  {
157
176
  "name": "sd3.5_large_canny_controlnet_example",
177
+ "title": "SD3.5 Large Canny ControlNet",
158
178
  "mediaType": "image",
159
179
  "mediaSubtype": "webp",
160
180
  "description": "Use edge detection to guide image generation with SD 3.5.",
@@ -163,6 +183,7 @@
163
183
  },
164
184
  {
165
185
  "name": "sd3.5_large_depth",
186
+ "title": "SD3.5 Large Depth",
166
187
  "mediaType": "image",
167
188
  "mediaSubtype": "webp",
168
189
  "description": "Create depth-aware images with SD 3.5.",
@@ -171,6 +192,7 @@
171
192
  },
172
193
  {
173
194
  "name": "sd3.5_large_blur",
195
+ "title": "SD3.5 Large Blur",
174
196
  "mediaType": "image",
175
197
  "mediaSubtype": "webp",
176
198
  "description": "Generate images from blurred reference images with SD 3.5.",
@@ -179,6 +201,7 @@
179
201
  },
180
202
  {
181
203
  "name": "sdxl_simple_example",
204
+ "title": "SDXL Simple",
182
205
  "mediaType": "image",
183
206
  "mediaSubtype": "webp",
184
207
  "description": "Create high-quality images with SDXL.",
@@ -186,6 +209,7 @@
186
209
  },
187
210
  {
188
211
  "name": "sdxl_refiner_prompt_example",
212
+ "title": "SDXL Refiner Prompt",
189
213
  "mediaType": "image",
190
214
  "mediaSubtype": "webp",
191
215
  "description": "Enhance SDXL outputs with refiners.",
@@ -193,6 +217,7 @@
193
217
  },
194
218
  {
195
219
  "name": "sdxl_revision_text_prompts",
220
+ "title": "SDXL Revision Text Prompts",
196
221
  "mediaType": "image",
197
222
  "mediaSubtype": "webp",
198
223
  "description": "Transfer concepts from reference images to guide image generation with SDXL.",
@@ -200,6 +225,7 @@
200
225
  },
201
226
  {
202
227
  "name": "sdxl_revision_zero_positive",
228
+ "title": "SDXL Revision Zero Positive",
203
229
  "mediaType": "image",
204
230
  "mediaSubtype": "webp",
205
231
  "description": "Add text prompts alongside reference images to guide image generation with SDXL.",
@@ -207,6 +233,7 @@
207
233
  },
208
234
  {
209
235
  "name": "sdxlturbo_example",
236
+ "title": "SDXL Turbo",
210
237
  "mediaType": "image",
211
238
  "mediaSubtype": "webp",
212
239
  "description": "Generate images in a single step with SDXL Turbo.",
@@ -221,6 +248,7 @@
221
248
  "templates": [
222
249
  {
223
250
  "name": "controlnet_example",
251
+ "title": "Scribble ControlNet",
224
252
  "mediaType": "image",
225
253
  "mediaSubtype": "webp",
226
254
  "description": "Control image generation with reference images.",
@@ -229,6 +257,7 @@
229
257
  },
230
258
  {
231
259
  "name": "2_pass_pose_worship",
260
+ "title": "Pose ControlNet 2 Pass",
232
261
  "mediaType": "image",
233
262
  "mediaSubtype": "webp",
234
263
  "description": "Generate images from pose references.",
@@ -237,6 +266,7 @@
237
266
  },
238
267
  {
239
268
  "name": "depth_controlnet",
269
+ "title": "Depth ControlNet",
240
270
  "mediaType": "image",
241
271
  "mediaSubtype": "webp",
242
272
  "description": "Create images with depth-aware generation.",
@@ -245,6 +275,7 @@
245
275
  },
246
276
  {
247
277
  "name": "depth_t2i_adapter",
278
+ "title": "Depth T2I Adapter",
248
279
  "mediaType": "image",
249
280
  "mediaSubtype": "webp",
250
281
  "description": "Quickly generate depth-aware images with a T2I adapter.",
@@ -253,6 +284,7 @@
253
284
  },
254
285
  {
255
286
  "name": "mixing_controlnets",
287
+ "title": "Mixing ControlNets",
256
288
  "mediaType": "image",
257
289
  "mediaSubtype": "webp",
258
290
  "description": "Combine multiple ControlNet models together.",
@@ -268,6 +300,7 @@
268
300
  "templates": [
269
301
  {
270
302
  "name": "hiresfix_latent_workflow",
303
+ "title": "Upscale",
271
304
  "mediaType": "image",
272
305
  "mediaSubtype": "webp",
273
306
  "description": "Enhance image quality in latent space.",
@@ -276,6 +309,7 @@
276
309
  },
277
310
  {
278
311
  "name": "esrgan_example",
312
+ "title": "ESRGAN",
279
313
  "mediaType": "image",
280
314
  "mediaSubtype": "webp",
281
315
  "description": "Use upscale models to enhance image quality.",
@@ -284,6 +318,7 @@
284
318
  },
285
319
  {
286
320
  "name": "hiresfix_esrgan_workflow",
321
+ "title": "HiresFix ESRGAN Workflow",
287
322
  "mediaType": "image",
288
323
  "mediaSubtype": "webp",
289
324
  "description": "Use upscale models during intermediate steps.",
@@ -292,6 +327,7 @@
292
327
  },
293
328
  {
294
329
  "name": "latent_upscale_different_prompt_model",
330
+ "title": "Latent Upscale Different Prompt Model",
295
331
  "mediaType": "image",
296
332
  "mediaSubtype": "webp",
297
333
  "description": "Upscale and change prompt across passes.",
@@ -307,6 +343,7 @@
307
343
  "templates": [
308
344
  {
309
345
  "name": "text_to_video_wan",
346
+ "title": "Wan 2.1 Text to Video",
310
347
  "description": "Quickly Generate videos from text descriptions.",
311
348
  "mediaType": "image",
312
349
  "mediaSubtype": "webp",
@@ -314,6 +351,7 @@
314
351
  },
315
352
  {
316
353
  "name": "image_to_video_wan",
354
+ "title": "Wan 2.1 Image to Video",
317
355
  "description": "Quickly Generate videos from images.",
318
356
  "mediaType": "image",
319
357
  "mediaSubtype": "webp",
@@ -321,6 +359,7 @@
321
359
  },
322
360
  {
323
361
  "name": "wan2.1_fun_inp",
362
+ "title": "Wan 2.1 Inpainting",
324
363
  "description": "Create videos from start and end frames.",
325
364
  "mediaType": "image",
326
365
  "mediaSubtype": "webp",
@@ -328,13 +367,24 @@
328
367
  },
329
368
  {
330
369
  "name": "wan2.1_fun_control",
370
+ "title": "Wan 2.1 ControlNet",
331
371
  "description": "Guide video generation with pose, depth, edge controls and more.",
332
372
  "mediaType": "image",
333
373
  "mediaSubtype": "webp",
374
+ "thumbnailVariant": "hoverDissolve",
334
375
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control"
335
376
  },
377
+ {
378
+ "name": "wan2.1_flf2v_720_f16",
379
+ "title": "Wan 2.1 FLF2V 720p F16",
380
+ "description": "Generate video through controlling the first and last frames.",
381
+ "mediaType": "image",
382
+ "mediaSubtype": "webp",
383
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf"
384
+ },
336
385
  {
337
386
  "name": "ltxv_text_to_video",
387
+ "title": "LTXV Text to Video",
338
388
  "mediaType": "image",
339
389
  "mediaSubtype": "webp",
340
390
  "description": "Generate videos from text descriptions.",
@@ -342,6 +392,7 @@
342
392
  },
343
393
  {
344
394
  "name": "ltxv_image_to_video",
395
+ "title": "LTXV Image to Video",
345
396
  "mediaType": "image",
346
397
  "mediaSubtype": "webp",
347
398
  "description": "Convert still images into videos.",
@@ -349,6 +400,7 @@
349
400
  },
350
401
  {
351
402
  "name": "mochi_text_to_video_example",
403
+ "title": "Mochi Text to Video",
352
404
  "mediaType": "image",
353
405
  "mediaSubtype": "webp",
354
406
  "description": "Create videos with Mochi model.",
@@ -356,6 +408,7 @@
356
408
  },
357
409
  {
358
410
  "name": "hunyuan_video_text_to_video",
411
+ "title": "Hunyuan Video Text to Video",
359
412
  "mediaType": "image",
360
413
  "mediaSubtype": "webp",
361
414
  "description": "Generate videos using Hunyuan model.",
@@ -363,6 +416,7 @@
363
416
  },
364
417
  {
365
418
  "name": "image_to_video",
419
+ "title": "SVD Image to Video",
366
420
  "mediaType": "image",
367
421
  "mediaSubtype": "webp",
368
422
  "description": "Transform images into animated videos.",
@@ -370,6 +424,7 @@
370
424
  },
371
425
  {
372
426
  "name": "txt_to_image_to_video",
427
+ "title": "SVD Text to Image to Video",
373
428
  "mediaType": "image",
374
429
  "mediaSubtype": "webp",
375
430
  "description": "Generate images from text and then convert them into videos.",
@@ -384,6 +439,7 @@
384
439
  "templates": [
385
440
  {
386
441
  "name": "area_composition",
442
+ "title": "Area Composition",
387
443
  "mediaType": "image",
388
444
  "mediaSubtype": "webp",
389
445
  "description": "Control image composition with areas.",
@@ -391,6 +447,7 @@
391
447
  },
392
448
  {
393
449
  "name": "area_composition_reversed",
450
+ "title": "Area Composition Reversed",
394
451
  "mediaType": "image",
395
452
  "mediaSubtype": "webp",
396
453
  "description": "Reverse area composition workflow.",
@@ -398,6 +455,7 @@
398
455
  },
399
456
  {
400
457
  "name": "area_composition_square_area_for_subject",
458
+ "title": "Area Composition Square Area for Subject",
401
459
  "mediaType": "image",
402
460
  "mediaSubtype": "webp",
403
461
  "description": "Create consistent subject placement.",
@@ -412,6 +470,7 @@
412
470
  "templates": [
413
471
  {
414
472
  "name": "hunyuan3d-non-multiview-train",
473
+ "title": "Hunyuan3D 2.0",
415
474
  "mediaType": "image",
416
475
  "mediaSubtype": "webp",
417
476
  "description": "Use Hunyuan3D 2.0 to generate models from a single view.",
@@ -419,6 +478,7 @@
419
478
  },
420
479
  {
421
480
  "name": "hunyuan-3d-multiview-elf",
481
+ "title": "Hunyuan3D 2.0 MV",
422
482
  "mediaType": "image",
423
483
  "mediaSubtype": "webp",
424
484
  "description": " Use Hunyuan3D 2mv to generate models from multiple views.",
@@ -427,6 +487,7 @@
427
487
  },
428
488
  {
429
489
  "name": "hunyuan-3d-turbo",
490
+ "title": "Hunyuan3D 2.0 MV Turbo",
430
491
  "mediaType": "image",
431
492
  "mediaSubtype": "webp",
432
493
  "description": "Use Hunyuan3D 2mv turbo to generate models from multiple views.",
@@ -435,6 +496,7 @@
435
496
  },
436
497
  {
437
498
  "name": "stable_zero123_example",
499
+ "title": "Stable Zero123",
438
500
  "mediaType": "image",
439
501
  "mediaSubtype": "webp",
440
502
  "description": "Generate 3D views from single images.",
@@ -449,6 +511,7 @@
449
511
  "templates": [
450
512
  {
451
513
  "name": "stable_audio_example",
514
+ "title": "Stable Audio",
452
515
  "mediaType": "audio",
453
516
  "mediaSubtype": "mp3",
454
517
  "description": "Generate audio from text descriptions.",