comfyui-workflow-templates 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,12 +6,14 @@
6
6
  "templates": [
7
7
  {
8
8
  "name": "default",
9
+ "title": "Image Generation",
9
10
  "mediaType": "image",
10
11
  "mediaSubtype": "webp",
11
12
  "description": "Generate images from text descriptions."
12
13
  },
13
14
  {
14
15
  "name": "image2image",
16
+ "title": "Image to Image",
15
17
  "mediaType": "image",
16
18
  "mediaSubtype": "webp",
17
19
  "description": "Transform existing images using text prompts.",
@@ -19,6 +21,7 @@
19
21
  },
20
22
  {
21
23
  "name": "lora",
24
+ "title": "Lora",
22
25
  "mediaType": "image",
23
26
  "mediaSubtype": "webp",
24
27
  "description": "Apply LoRA models for specialized styles or subjects.",
@@ -26,6 +29,7 @@
26
29
  },
27
30
  {
28
31
  "name": "inpaint_example",
32
+ "title": "Inpaint",
29
33
  "mediaType": "image",
30
34
  "mediaSubtype": "webp",
31
35
  "description": "Edit specific parts of images seamlessly.",
@@ -34,6 +38,7 @@
34
38
  },
35
39
  {
36
40
  "name": "inpain_model_outpainting",
41
+ "title": "Outpaint",
37
42
  "mediaType": "image",
38
43
  "mediaSubtype": "webp",
39
44
  "description": "Extend images beyond their original boundaries.",
@@ -42,6 +47,7 @@
42
47
  },
43
48
  {
44
49
  "name": "embedding_example",
50
+ "title": "Embedding",
45
51
  "mediaType": "image",
46
52
  "mediaSubtype": "webp",
47
53
  "description": "Use textual inversion for consistent styles.",
@@ -49,6 +55,7 @@
49
55
  },
50
56
  {
51
57
  "name": "gligen_textbox_example",
58
+ "title": "Gligen Textbox",
52
59
  "mediaType": "image",
53
60
  "mediaSubtype": "webp",
54
61
  "description": "Specify the location and size of objects.",
@@ -56,6 +63,7 @@
56
63
  },
57
64
  {
58
65
  "name": "lora_multiple",
66
+ "title": "Lora Multiple",
59
67
  "mediaType": "image",
60
68
  "mediaSubtype": "webp",
61
69
  "description": "Combine multiple LoRA models for unique results.",
@@ -70,6 +78,7 @@
70
78
  "templates": [
71
79
  {
72
80
  "name": "flux_dev_checkpoint_example",
81
+ "title": "Flux Dev",
73
82
  "mediaType": "image",
74
83
  "mediaSubtype": "webp",
75
84
  "description": "Create images using Flux development models.",
@@ -77,6 +86,7 @@
77
86
  },
78
87
  {
79
88
  "name": "flux_schnell",
89
+ "title": "Flux Schnell",
80
90
  "mediaType": "image",
81
91
  "mediaSubtype": "webp",
82
92
  "description": "Generate images quickly with Flux Schnell.",
@@ -84,6 +94,7 @@
84
94
  },
85
95
  {
86
96
  "name": "flux_fill_inpaint_example",
97
+ "title": "Flux Inpaint",
87
98
  "mediaType": "image",
88
99
  "mediaSubtype": "webp",
89
100
  "description": "Fill in missing parts of images.",
@@ -92,6 +103,7 @@
92
103
  },
93
104
  {
94
105
  "name": "flux_fill_outpaint_example",
106
+ "title": "Flux Outpaint",
95
107
  "mediaType": "image",
96
108
  "mediaSubtype": "webp",
97
109
  "description": "Extend images using Flux outpainting.",
@@ -100,6 +112,7 @@
100
112
  },
101
113
  {
102
114
  "name": "flux_canny_model_example",
115
+ "title": "Flux Canny Model",
103
116
  "mediaType": "image",
104
117
  "mediaSubtype": "webp",
105
118
  "description": "Generate images from edge detection.",
@@ -108,6 +121,7 @@
108
121
  },
109
122
  {
110
123
  "name": "flux_depth_lora_example",
124
+ "title": "Flux Depth Lora",
111
125
  "mediaType": "image",
112
126
  "mediaSubtype": "webp",
113
127
  "description": "Create images with depth-aware LoRA.",
@@ -116,6 +130,7 @@
116
130
  },
117
131
  {
118
132
  "name": "flux_redux_model_example",
133
+ "title": "Flux Redux Model",
119
134
  "mediaType": "image",
120
135
  "mediaSubtype": "webp",
121
136
  "description": "Transfer style from a reference image to guide image generation with Flux.",
@@ -123,6 +138,109 @@
123
138
  }
124
139
  ]
125
140
  },
141
+ {
142
+ "moduleName": "default",
143
+ "title": "Image",
144
+ "type": "image",
145
+ "templates": [
146
+ {
147
+ "name": "hidream_i1_dev",
148
+ "title": "HiDream I1 Dev",
149
+ "mediaType": "image",
150
+ "mediaSubtype": "webp",
151
+ "description": "Generate images with HiDream I1 Dev."
152
+ },
153
+ {
154
+ "name": "hidream_i1_fast",
155
+ "title": "HiDream I1 Fast",
156
+ "mediaType": "image",
157
+ "mediaSubtype": "webp",
158
+ "description": "Generate images quickly with HiDream I1."
159
+ },
160
+ {
161
+ "name": "hidream_i1_full",
162
+ "title": "HiDream I1 Full",
163
+ "mediaType": "image",
164
+ "mediaSubtype": "webp",
165
+ "description": "Generate images with HiDream I1."
166
+ },
167
+ {
168
+ "name": "sd3.5_simple_example",
169
+ "title": "SD3.5 Simple",
170
+ "mediaType": "image",
171
+ "mediaSubtype": "webp",
172
+ "description": "Generate images with SD 3.5.",
173
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35"
174
+ },
175
+ {
176
+ "name": "sd3.5_large_canny_controlnet_example",
177
+ "title": "SD3.5 Large Canny ControlNet",
178
+ "mediaType": "image",
179
+ "mediaSubtype": "webp",
180
+ "description": "Use edge detection to guide image generation with SD 3.5.",
181
+ "thumbnailVariant": "hoverDissolve",
182
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
183
+ },
184
+ {
185
+ "name": "sd3.5_large_depth",
186
+ "title": "SD3.5 Large Depth",
187
+ "mediaType": "image",
188
+ "mediaSubtype": "webp",
189
+ "description": "Create depth-aware images with SD 3.5.",
190
+ "thumbnailVariant": "hoverDissolve",
191
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
192
+ },
193
+ {
194
+ "name": "sd3.5_large_blur",
195
+ "title": "SD3.5 Large Blur",
196
+ "mediaType": "image",
197
+ "mediaSubtype": "webp",
198
+ "description": "Generate images from blurred reference images with SD 3.5.",
199
+ "thumbnailVariant": "hoverDissolve",
200
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
201
+ },
202
+ {
203
+ "name": "sdxl_simple_example",
204
+ "title": "SDXL Simple",
205
+ "mediaType": "image",
206
+ "mediaSubtype": "webp",
207
+ "description": "Create high-quality images with SDXL.",
208
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
209
+ },
210
+ {
211
+ "name": "sdxl_refiner_prompt_example",
212
+ "title": "SDXL Refiner Prompt",
213
+ "mediaType": "image",
214
+ "mediaSubtype": "webp",
215
+ "description": "Enhance SDXL outputs with refiners.",
216
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
217
+ },
218
+ {
219
+ "name": "sdxl_revision_text_prompts",
220
+ "title": "SDXL Revision Text Prompts",
221
+ "mediaType": "image",
222
+ "mediaSubtype": "webp",
223
+ "description": "Transfer concepts from reference images to guide image generation with SDXL.",
224
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
225
+ },
226
+ {
227
+ "name": "sdxl_revision_zero_positive",
228
+ "title": "SDXL Revision Zero Positive",
229
+ "mediaType": "image",
230
+ "mediaSubtype": "webp",
231
+ "description": "Add text prompts alongside reference images to guide image generation with SDXL.",
232
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
233
+ },
234
+ {
235
+ "name": "sdxlturbo_example",
236
+ "title": "SDXL Turbo",
237
+ "mediaType": "image",
238
+ "mediaSubtype": "webp",
239
+ "description": "Generate images in a single step with SDXL Turbo.",
240
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/"
241
+ }
242
+ ]
243
+ },
126
244
  {
127
245
  "moduleName": "default",
128
246
  "title": "ControlNet",
@@ -130,6 +248,7 @@
130
248
  "templates": [
131
249
  {
132
250
  "name": "controlnet_example",
251
+ "title": "Scribble ControlNet",
133
252
  "mediaType": "image",
134
253
  "mediaSubtype": "webp",
135
254
  "description": "Control image generation with reference images.",
@@ -138,6 +257,7 @@
138
257
  },
139
258
  {
140
259
  "name": "2_pass_pose_worship",
260
+ "title": "Pose ControlNet 2 Pass",
141
261
  "mediaType": "image",
142
262
  "mediaSubtype": "webp",
143
263
  "description": "Generate images from pose references.",
@@ -146,6 +266,7 @@
146
266
  },
147
267
  {
148
268
  "name": "depth_controlnet",
269
+ "title": "Depth ControlNet",
149
270
  "mediaType": "image",
150
271
  "mediaSubtype": "webp",
151
272
  "description": "Create images with depth-aware generation.",
@@ -154,6 +275,7 @@
154
275
  },
155
276
  {
156
277
  "name": "depth_t2i_adapter",
278
+ "title": "Depth T2I Adapter",
157
279
  "mediaType": "image",
158
280
  "mediaSubtype": "webp",
159
281
  "description": "Quickly generate depth-aware images with a T2I adapter.",
@@ -162,6 +284,7 @@
162
284
  },
163
285
  {
164
286
  "name": "mixing_controlnets",
287
+ "title": "Mixing ControlNets",
165
288
  "mediaType": "image",
166
289
  "mediaSubtype": "webp",
167
290
  "description": "Combine multiple ControlNet models together.",
@@ -177,6 +300,7 @@
177
300
  "templates": [
178
301
  {
179
302
  "name": "hiresfix_latent_workflow",
303
+ "title": "Upscale",
180
304
  "mediaType": "image",
181
305
  "mediaSubtype": "webp",
182
306
  "description": "Enhance image quality in latent space.",
@@ -185,6 +309,7 @@
185
309
  },
186
310
  {
187
311
  "name": "esrgan_example",
312
+ "title": "ESRGAN",
188
313
  "mediaType": "image",
189
314
  "mediaSubtype": "webp",
190
315
  "description": "Use upscale models to enhance image quality.",
@@ -193,6 +318,7 @@
193
318
  },
194
319
  {
195
320
  "name": "hiresfix_esrgan_workflow",
321
+ "title": "HiresFix ESRGAN Workflow",
196
322
  "mediaType": "image",
197
323
  "mediaSubtype": "webp",
198
324
  "description": "Use upscale models during intermediate steps.",
@@ -201,6 +327,7 @@
201
327
  },
202
328
  {
203
329
  "name": "latent_upscale_different_prompt_model",
330
+ "title": "Latent Upscale Different Prompt Model",
204
331
  "mediaType": "image",
205
332
  "mediaSubtype": "webp",
206
333
  "description": "Upscale and change prompt across passes.",
@@ -216,6 +343,7 @@
216
343
  "templates": [
217
344
  {
218
345
  "name": "text_to_video_wan",
346
+ "title": "Wan 2.1 Text to Video",
219
347
  "description": "Quickly Generate videos from text descriptions.",
220
348
  "mediaType": "image",
221
349
  "mediaSubtype": "webp",
@@ -223,6 +351,7 @@
223
351
  },
224
352
  {
225
353
  "name": "image_to_video_wan",
354
+ "title": "Wan 2.1 Image to Video",
226
355
  "description": "Quickly Generate videos from images.",
227
356
  "mediaType": "image",
228
357
  "mediaSubtype": "webp",
@@ -230,6 +359,7 @@
230
359
  },
231
360
  {
232
361
  "name": "wan2.1_fun_inp",
362
+ "title": "Wan 2.1 Inpainting",
233
363
  "description": "Create videos from start and end frames.",
234
364
  "mediaType": "image",
235
365
  "mediaSubtype": "webp",
@@ -237,13 +367,16 @@
237
367
  },
238
368
  {
239
369
  "name": "wan2.1_fun_control",
370
+ "title": "Wan 2.1 ControlNet",
240
371
  "description": "Guide video generation with pose, depth, edge controls and more.",
241
372
  "mediaType": "image",
242
373
  "mediaSubtype": "webp",
374
+ "thumbnailVariant": "hoverDissolve",
243
375
  "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control"
244
376
  },
245
377
  {
246
378
  "name": "ltxv_text_to_video",
379
+ "title": "LTXV Text to Video",
247
380
  "mediaType": "image",
248
381
  "mediaSubtype": "webp",
249
382
  "description": "Generate videos from text descriptions.",
@@ -251,6 +384,7 @@
251
384
  },
252
385
  {
253
386
  "name": "ltxv_image_to_video",
387
+ "title": "LTXV Image to Video",
254
388
  "mediaType": "image",
255
389
  "mediaSubtype": "webp",
256
390
  "description": "Convert still images into videos.",
@@ -258,6 +392,7 @@
258
392
  },
259
393
  {
260
394
  "name": "mochi_text_to_video_example",
395
+ "title": "Mochi Text to Video",
261
396
  "mediaType": "image",
262
397
  "mediaSubtype": "webp",
263
398
  "description": "Create videos with Mochi model.",
@@ -265,6 +400,7 @@
265
400
  },
266
401
  {
267
402
  "name": "hunyuan_video_text_to_video",
403
+ "title": "Hunyuan Video Text to Video",
268
404
  "mediaType": "image",
269
405
  "mediaSubtype": "webp",
270
406
  "description": "Generate videos using Hunyuan model.",
@@ -272,6 +408,7 @@
272
408
  },
273
409
  {
274
410
  "name": "image_to_video",
411
+ "title": "SVD Image to Video",
275
412
  "mediaType": "image",
276
413
  "mediaSubtype": "webp",
277
414
  "description": "Transform images into animated videos.",
@@ -279,6 +416,7 @@
279
416
  },
280
417
  {
281
418
  "name": "txt_to_image_to_video",
419
+ "title": "SVD Text to Image to Video",
282
420
  "mediaType": "image",
283
421
  "mediaSubtype": "webp",
284
422
  "description": "Generate images from text and then convert them into videos.",
@@ -286,86 +424,6 @@
286
424
  }
287
425
  ]
288
426
  },
289
- {
290
- "moduleName": "default",
291
- "title": "SD3.5",
292
- "type": "image",
293
- "templates": [
294
- {
295
- "name": "sd3.5_simple_example",
296
- "mediaType": "image",
297
- "mediaSubtype": "webp",
298
- "description": "Generate images with SD 3.5.",
299
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35"
300
- },
301
- {
302
- "name": "sd3.5_large_canny_controlnet_example",
303
- "mediaType": "image",
304
- "mediaSubtype": "webp",
305
- "description": "Use edge detection to guide image generation with SD 3.5.",
306
- "thumbnailVariant": "hoverDissolve",
307
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
308
- },
309
- {
310
- "name": "sd3.5_large_depth",
311
- "mediaType": "image",
312
- "mediaSubtype": "webp",
313
- "description": "Create depth-aware images with SD 3.5.",
314
- "thumbnailVariant": "hoverDissolve",
315
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
316
- },
317
- {
318
- "name": "sd3.5_large_blur",
319
- "mediaType": "image",
320
- "mediaSubtype": "webp",
321
- "description": "Generate images from blurred reference images with SD 3.5.",
322
- "thumbnailVariant": "hoverDissolve",
323
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
324
- }
325
- ]
326
- },
327
- {
328
- "moduleName": "default",
329
- "title": "SDXL",
330
- "type": "image",
331
- "templates": [
332
- {
333
- "name": "sdxl_simple_example",
334
- "mediaType": "image",
335
- "mediaSubtype": "webp",
336
- "description": "Create high-quality images with SDXL.",
337
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
338
- },
339
- {
340
- "name": "sdxl_refiner_prompt_example",
341
- "mediaType": "image",
342
- "mediaSubtype": "webp",
343
- "description": "Enhance SDXL outputs with refiners.",
344
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
345
- },
346
- {
347
- "name": "sdxl_revision_text_prompts",
348
- "mediaType": "image",
349
- "mediaSubtype": "webp",
350
- "description": "Transfer concepts from reference images to guide image generation with SDXL.",
351
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
352
- },
353
- {
354
- "name": "sdxl_revision_zero_positive",
355
- "mediaType": "image",
356
- "mediaSubtype": "webp",
357
- "description": "Add text prompts alongside reference images to guide image generation with SDXL.",
358
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
359
- },
360
- {
361
- "name": "sdxlturbo_example",
362
- "mediaType": "image",
363
- "mediaSubtype": "webp",
364
- "description": "Generate images in a single step with SDXL Turbo.",
365
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/"
366
- }
367
- ]
368
- },
369
427
  {
370
428
  "moduleName": "default",
371
429
  "title": "Area Composition",
@@ -373,6 +431,7 @@
373
431
  "templates": [
374
432
  {
375
433
  "name": "area_composition",
434
+ "title": "Area Composition",
376
435
  "mediaType": "image",
377
436
  "mediaSubtype": "webp",
378
437
  "description": "Control image composition with areas.",
@@ -380,6 +439,7 @@
380
439
  },
381
440
  {
382
441
  "name": "area_composition_reversed",
442
+ "title": "Area Composition Reversed",
383
443
  "mediaType": "image",
384
444
  "mediaSubtype": "webp",
385
445
  "description": "Reverse area composition workflow.",
@@ -387,6 +447,7 @@
387
447
  },
388
448
  {
389
449
  "name": "area_composition_square_area_for_subject",
450
+ "title": "Area Composition Square Area for Subject",
390
451
  "mediaType": "image",
391
452
  "mediaSubtype": "webp",
392
453
  "description": "Create consistent subject placement.",
@@ -401,6 +462,7 @@
401
462
  "templates": [
402
463
  {
403
464
  "name": "hunyuan3d-non-multiview-train",
465
+ "title": "Hunyuan3D 2.0",
404
466
  "mediaType": "image",
405
467
  "mediaSubtype": "webp",
406
468
  "description": "Use Hunyuan3D 2.0 to generate models from a single view.",
@@ -408,6 +470,7 @@
408
470
  },
409
471
  {
410
472
  "name": "hunyuan-3d-multiview-elf",
473
+ "title": "Hunyuan3D 2.0 MV",
411
474
  "mediaType": "image",
412
475
  "mediaSubtype": "webp",
413
476
  "description": " Use Hunyuan3D 2mv to generate models from multiple views.",
@@ -416,6 +479,7 @@
416
479
  },
417
480
  {
418
481
  "name": "hunyuan-3d-turbo",
482
+ "title": "Hunyuan3D 2.0 MV Turbo",
419
483
  "mediaType": "image",
420
484
  "mediaSubtype": "webp",
421
485
  "description": "Use Hunyuan3D 2mv turbo to generate models from multiple views.",
@@ -424,6 +488,7 @@
424
488
  },
425
489
  {
426
490
  "name": "stable_zero123_example",
491
+ "title": "Stable Zero123",
427
492
  "mediaType": "image",
428
493
  "mediaSubtype": "webp",
429
494
  "description": "Generate 3D views from single images.",
@@ -438,6 +503,7 @@
438
503
  "templates": [
439
504
  {
440
505
  "name": "stable_audio_example",
506
+ "title": "Stable Audio",
441
507
  "mediaType": "audio",
442
508
  "mediaSubtype": "mp3",
443
509
  "description": "Generate audio from text descriptions.",