comfyui-workflow-templates-media-other 0.3.10__py3-none-any.whl → 0.3.61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. comfyui_workflow_templates_media_other/templates/04_hunyuan_3d_2.1_subgraphed.json +6 -6
  2. comfyui_workflow_templates_media_other/templates/05_audio_ace_step_1_t2a_song_subgraphed.json +81 -60
  3. comfyui_workflow_templates_media_other/templates/3d_hunyuan3d-v2.1.json +2 -2
  4. comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model.json +3 -3
  5. comfyui_workflow_templates_media_other/templates/3d_hunyuan3d_multiview_to_model_turbo.json +3 -3
  6. comfyui_workflow_templates_media_other/templates/audio_ace_step_1_m2m_editing.json +3 -3
  7. comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_instrumentals.json +4 -4
  8. comfyui_workflow_templates_media_other/templates/audio_ace_step_1_t2a_song.json +3 -3
  9. comfyui_workflow_templates_media_other/templates/audio_stable_audio_example.json +2 -2
  10. comfyui_workflow_templates_media_other/templates/gsc_starter_1-1.webp +0 -0
  11. comfyui_workflow_templates_media_other/templates/gsc_starter_1.json +839 -0
  12. comfyui_workflow_templates_media_other/templates/gsc_starter_2-1.webp +0 -0
  13. comfyui_workflow_templates_media_other/templates/gsc_starter_2.json +7037 -0
  14. comfyui_workflow_templates_media_other/templates/gsc_starter_3-1.webp +0 -0
  15. comfyui_workflow_templates_media_other/templates/gsc_starter_3.json +2550 -0
  16. comfyui_workflow_templates_media_other/templates/hidream_e1_full.json +3 -3
  17. comfyui_workflow_templates_media_other/templates/hidream_i1_dev.json +3 -3
  18. comfyui_workflow_templates_media_other/templates/hidream_i1_fast.json +3 -3
  19. comfyui_workflow_templates_media_other/templates/hidream_i1_full.json +3 -3
  20. comfyui_workflow_templates_media_other/templates/image_z_image_turbo-1.webp +0 -0
  21. comfyui_workflow_templates_media_other/templates/image_z_image_turbo.json +756 -0
  22. comfyui_workflow_templates_media_other/templates/image_z_image_turbo_fun_union_controlnet-1.webp +0 -0
  23. comfyui_workflow_templates_media_other/templates/image_z_image_turbo_fun_union_controlnet-2.webp +0 -0
  24. comfyui_workflow_templates_media_other/templates/index.ar.json +2187 -1591
  25. comfyui_workflow_templates_media_other/templates/index.es.json +2189 -1598
  26. comfyui_workflow_templates_media_other/templates/index.fr.json +2188 -1597
  27. comfyui_workflow_templates_media_other/templates/index.ja.json +2179 -1588
  28. comfyui_workflow_templates_media_other/templates/index.json +2182 -1592
  29. comfyui_workflow_templates_media_other/templates/index.ko.json +2179 -1588
  30. comfyui_workflow_templates_media_other/templates/index.pt-BR.json +3117 -0
  31. comfyui_workflow_templates_media_other/templates/index.ru.json +2188 -1597
  32. comfyui_workflow_templates_media_other/templates/index.schema.json +36 -3
  33. comfyui_workflow_templates_media_other/templates/index.tr.json +2185 -1589
  34. comfyui_workflow_templates_media_other/templates/index.zh-TW.json +2188 -1597
  35. comfyui_workflow_templates_media_other/templates/index.zh.json +2180 -1589
  36. comfyui_workflow_templates_media_other/templates/sd3.5_large_blur.json +3 -3
  37. comfyui_workflow_templates_media_other/templates/sd3.5_large_depth.json +4 -4
  38. comfyui_workflow_templates_media_other/templates/sd3.5_simple_example.json +181 -40
  39. comfyui_workflow_templates_media_other/templates/templates-color_illustration-1.webp +0 -0
  40. comfyui_workflow_templates_media_other/templates/templates-color_illustration-2.webp +0 -0
  41. comfyui_workflow_templates_media_other/templates/templates-color_illustration.json +176 -0
  42. comfyui_workflow_templates_media_other/templates/templates-image_to_real-1.webp +0 -0
  43. comfyui_workflow_templates_media_other/templates/templates-image_to_real-2.webp +0 -0
  44. comfyui_workflow_templates_media_other/templates/templates-image_to_real.json +1195 -0
  45. comfyui_workflow_templates_media_other/templates/wan2.1_flf2v_720_f16.json +2 -2
  46. comfyui_workflow_templates_media_other/templates/wan2.1_fun_control.json +2 -2
  47. comfyui_workflow_templates_media_other/templates/wan2.1_fun_inp.json +2 -2
  48. {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.61.dist-info}/METADATA +1 -1
  49. comfyui_workflow_templates_media_other-0.3.61.dist-info/RECORD +77 -0
  50. comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-1.webp +0 -0
  51. comfyui_workflow_templates_media_other/templates/2_pass_pose_worship-2.webp +0 -0
  52. comfyui_workflow_templates_media_other/templates/2_pass_pose_worship.json +0 -1256
  53. comfyui_workflow_templates_media_other/templates/ByteDance-Seedance_00003_.json +0 -210
  54. comfyui_workflow_templates_media_other/templates/area_composition-1.webp +0 -0
  55. comfyui_workflow_templates_media_other/templates/area_composition.json +0 -1626
  56. comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject-1.webp +0 -0
  57. comfyui_workflow_templates_media_other/templates/area_composition_square_area_for_subject.json +0 -1114
  58. comfyui_workflow_templates_media_other/templates/default-1.webp +0 -0
  59. comfyui_workflow_templates_media_other/templates/default.json +0 -547
  60. comfyui_workflow_templates_media_other/templates/embedding_example-1.webp +0 -0
  61. comfyui_workflow_templates_media_other/templates/embedding_example.json +0 -267
  62. comfyui_workflow_templates_media_other/templates/esrgan_example-1.webp +0 -0
  63. comfyui_workflow_templates_media_other/templates/esrgan_example-2.webp +0 -0
  64. comfyui_workflow_templates_media_other/templates/esrgan_example.json +0 -635
  65. comfyui_workflow_templates_media_other/templates/gligen_textbox_example-1.webp +0 -0
  66. comfyui_workflow_templates_media_other/templates/gligen_textbox_example.json +0 -686
  67. comfyui_workflow_templates_media_other/templates/hidream_e1_1-1.webp +0 -0
  68. comfyui_workflow_templates_media_other/templates/hidream_e1_1-2.webp +0 -0
  69. comfyui_workflow_templates_media_other/templates/hidream_e1_1.json +0 -1133
  70. comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-1.webp +0 -0
  71. comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow-2.webp +0 -0
  72. comfyui_workflow_templates_media_other/templates/hiresfix_esrgan_workflow.json +0 -1029
  73. comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-1.webp +0 -0
  74. comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow-2.webp +0 -0
  75. comfyui_workflow_templates_media_other/templates/hiresfix_latent_workflow.json +0 -772
  76. comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model-1.webp +0 -0
  77. comfyui_workflow_templates_media_other/templates/latent_upscale_different_prompt_model.json +0 -929
  78. comfyui_workflow_templates_media_other/templates/lora-1.webp +0 -0
  79. comfyui_workflow_templates_media_other/templates/lora.json +0 -615
  80. comfyui_workflow_templates_media_other/templates/lora_multiple-1.webp +0 -0
  81. comfyui_workflow_templates_media_other/templates/lora_multiple.json +0 -656
  82. comfyui_workflow_templates_media_other-0.3.10.dist-info/RECORD +0 -92
  83. {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.61.dist-info}/WHEEL +0 -0
  84. {comfyui_workflow_templates_media_other-0.3.10.dist-info → comfyui_workflow_templates_media_other-0.3.61.dist-info}/top_level.txt +0 -0
@@ -1,328 +1,392 @@
1
1
  [
2
2
  {
3
3
  "moduleName": "default",
4
- "isEssential": true,
5
- "title": "Getting Started",
4
+ "category": "GENERATION TYPE",
5
+ "icon": "icon-[lucide--star]",
6
+ "title": "Use cases",
6
7
  "type": "image",
7
8
  "templates": [
8
9
  {
9
- "name": "01_qwen_t2i_subgraphed",
10
- "title": "Text to Image (New)",
10
+ "name": "templates-color_illustration",
11
+ "title": "Add Color to Line Art Illustration",
11
12
  "mediaType": "image",
12
13
  "mediaSubtype": "webp",
13
- "description": "Generate images from text prompts using the Qwen-Image model.",
14
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
15
- "tags": ["Text to Image", "Image"],
16
- "models": ["Qwen-Image"],
17
- "date": "2025-10-17",
18
- "size": 31772020572
14
+ "thumbnailVariant": "compareSlider",
15
+ "description": "Input a black and white illustration and generate a colored output.",
16
+ "tags": ["API"],
17
+ "models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
18
+ "openSource": false,
19
+ "date": "2025-12-20",
20
+ "searchRank": 8,
21
+ "size": 0,
22
+ "vram": 0
19
23
  },
20
24
  {
21
- "name": "02_qwen_Image_edit_subgraphed",
22
- "title": "Image Editing (New)",
25
+ "name": "templates-image_to_real",
26
+ "title": "Illustration to Realism",
23
27
  "mediaType": "image",
24
28
  "mediaSubtype": "webp",
25
- "description": "Edit your images with Qwen-Image-Edit, the latest OSS model",
26
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
27
- "tags": ["Image to Image", "Image Edit", "ControlNet"],
28
- "models": ["Qwen-Image"],
29
- "date": "2025-10-17",
30
- "size": 31772020572
29
+ "thumbnailVariant": "compareSlider",
30
+ "description": "Input an illustration and generate a hyper realistic version using Qwen Image Edit 2509.",
31
+ "tags": ["Style Transfer"],
32
+ "models": ["Qwen-Image-Edit"],
33
+ "date": "2025-12-20",
34
+ "size": 0,
35
+ "vram": 0
31
36
  },
32
37
  {
33
- "name": "03_video_wan2_2_14B_i2v_subgraphed",
34
- "title": "Image to Video (New)",
35
- "description": "Generate videos from an input image using Wan2.2 14B",
38
+ "name": "templates-8x8_grid-pfp",
39
+ "title": "Profile Picture Stylized Variations",
36
40
  "mediaType": "image",
37
41
  "mediaSubtype": "webp",
38
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
39
- "tags": ["Image to Video", "Video"],
40
- "models": ["Wan2.2", "Wan"],
41
- "date": "2025-10-17",
42
- "size": 38031935406
42
+ "description": "Upload your profile picture, enter a theme and generate 64 variations.",
43
+ "tags": ["API"],
44
+ "models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
45
+ "openSource": false,
46
+ "date": "2025-12-18",
47
+ "searchRank": 8,
48
+ "size": 0,
49
+ "vram": 0,
50
+ "requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
51
+ "usage": 51
43
52
  },
44
53
  {
45
- "name": "04_hunyuan_3d_2.1_subgraphed",
46
- "title": "Image to 3D (New)",
54
+ "name": "templates-subject_product_swap",
55
+ "title": "Swap Product in Character’s Hand, UGC Style",
47
56
  "mediaType": "image",
48
57
  "mediaSubtype": "webp",
49
- "description": "Generate 3D models from single images using Hunyuan3D 2.1.",
50
- "tags": ["Image to 3D", "3D"],
51
- "models": ["Hunyuan3D"],
52
- "date": "2025-10-17",
53
- "tutorialUrl": "https://docs.comfy.org/tutorials/3d/hunyuan3D-2",
54
- "size": 4928474972
58
+ "description": "Upload a photo of a character holding a product and your brands product. Generate an image with the products swapped.",
59
+ "tags": ["Product", "Replacement", "API"],
60
+ "models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
61
+ "openSource": false,
62
+ "date": "2025-12-18",
63
+ "searchRank": 8,
64
+ "size": 0,
65
+ "vram": 0,
66
+ "usage": 63
55
67
  },
56
68
  {
57
- "name": "05_audio_ace_step_1_t2a_song_subgraphed",
58
- "title": "Text to Song (New)",
69
+ "name": "templates-subject_holding_product",
70
+ "title": "Add Product to Character’s Hand, AI UGC",
59
71
  "mediaType": "image",
60
72
  "mediaSubtype": "webp",
61
- "description": "Generate songs from text prompts using ACE-Step v1",
62
- "tags": ["Text to Audio", "Audio"],
63
- "models": ["ACE-Step"],
64
- "date": "2025-10-17",
65
- "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
66
- "size": 7698728878
73
+ "description": "Upload a photo of your character and your product. Generate an image of that character holding the product.",
74
+ "tags": ["Product", "Portrait", "API"],
75
+ "models": ["Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
76
+ "openSource": false,
77
+ "date": "2025-12-18",
78
+ "searchRank": 8,
79
+ "size": 0,
80
+ "vram": 0,
81
+ "usage": 43
67
82
  },
68
83
  {
69
- "name": "default",
70
- "title": "Image Generation",
84
+ "name": "templates-car_product",
85
+ "title": "1 Image to Car Product Shotse",
71
86
  "mediaType": "image",
72
87
  "mediaSubtype": "webp",
73
- "description": "Generate images from text prompts.",
74
- "tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
75
- "tags": ["Text to Image", "Image"],
76
- "models": ["SD1.5", "Stability"],
77
- "date": "2025-03-01",
78
- "size": 2136746230,
79
- "vram": 3092376453
88
+ "description": "Upload a photo of your vehicle and generate a studio quality video of the vehicle from multiple angles.",
89
+ "tags": ["Product", "Image to Video", "API", "FLF2V"],
90
+ "models": ["Seedream", "Kling"],
91
+ "openSource": false,
92
+ "date": "2025-12-18",
93
+ "searchRank": 8,
94
+ "size": 0,
95
+ "vram": 0,
96
+ "requiresCustomNodes": ["comfyui-videohelpersuite"],
97
+ "usage": 70
80
98
  },
81
99
  {
82
- "name": "image2image",
83
- "title": "Image to Image",
100
+ "name": "templates-photo_to_product_vid",
101
+ "title": "Phone Image to Product Video: Shoe",
84
102
  "mediaType": "image",
85
103
  "mediaSubtype": "webp",
86
- "thumbnailVariant": "hoverDissolve",
87
- "description": "Transform existing images using text prompts.",
88
- "tutorialUrl": "https://docs.comfy.org/tutorials/basic/image-to-image",
89
- "tags": ["Image to Image", "Image"],
90
- "models": ["SD1.5", "Stability"],
91
- "date": "2025-03-01",
92
- "size": 2136746230,
93
- "vram": 3092376453
104
+ "description": "Take a picture with your phone, upload it and generate a studio grade product video.",
105
+ "tags": ["Product", "Image to Video", "API"],
106
+ "models": ["Seedream", "Hailuo"],
107
+ "openSource": false,
108
+ "date": "2025-12-18",
109
+ "searchRank": 8,
110
+ "size": 0,
111
+ "vram": 0,
112
+ "requiresCustomNodes": ["comfyui-videohelpersuite"],
113
+ "usage": 124
94
114
  },
95
115
  {
96
- "name": "lora",
97
- "title": "LoRA",
116
+ "name": "templates-stitched_vid_contact_sheet",
117
+ "title": "Character & Outfit to Fashion Video",
98
118
  "mediaType": "image",
99
119
  "mediaSubtype": "webp",
100
- "description": "Generate images with LoRA models for specialized styles or subjects.",
101
- "tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
102
- "tags": ["Text to Image", "Image"],
103
- "models": ["SD1.5", "Stability"],
104
- "date": "2025-03-01",
105
- "size": 2437393940,
106
- "vram": 3092376453
120
+ "description": "Upload your character and clothing items or accessories. Generate a fashion photograph base and use as a reference to 8x grid images, together with multi-KeyFrame Video Stitching ",
121
+ "tags": ["Fashion", "Image to Video", "FLF2V", "API"],
122
+ "models": ["Google Gemini Image", "Nano Banana Pro", "Google", "Kling", "Kling O1", "OpenAI"],
123
+ "openSource": false,
124
+ "date": "2025-12-18",
125
+ "searchRank": 8,
126
+ "size": 0,
127
+ "vram": 0,
128
+ "requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
129
+ "usage": 78
107
130
  },
108
131
  {
109
- "name": "lora_multiple",
110
- "title": "LoRA Multiple",
132
+ "name": "templates-assemble_dieline",
133
+ "title": "Generate Brand Packaging from Dieline",
111
134
  "mediaType": "image",
112
135
  "mediaSubtype": "webp",
113
- "description": "Generate images by combining multiple LoRA models.",
114
- "tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
115
- "tags": ["Text to Image", "Image"],
116
- "models": ["SD1.5", "Stability"],
117
- "date": "2025-03-01",
118
- "size": 2437393940,
119
- "vram": 3350074491
136
+ "thumbnailVariant": "hoverDissolve",
137
+ "description": "Upload a dieline of your product and assemble into a 3D package.",
138
+ "tags": ["Product", "Image Edit"],
139
+ "models": ["Google Gemini Image", "Nano Banana Pro", "Google"],
140
+ "openSource": false,
141
+ "date": "2025-12-15",
142
+ "searchRank": 8,
143
+ "size": 0,
144
+ "vram": 0,
145
+ "usage": 12
120
146
  },
121
147
  {
122
- "name": "inpaint_example",
123
- "title": "Inpaint",
148
+ "name": "templates-fashion_shoot_vton",
149
+ "title": "Character + Clothing (OOTD) Flat Lay to Studio Photoshoot",
124
150
  "mediaType": "image",
125
151
  "mediaSubtype": "webp",
126
- "description": "Edit specific parts of images seamlessly.",
127
- "thumbnailVariant": "compareSlider",
128
- "tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
129
- "tags": ["Inpainting", "Image"],
130
- "models": ["SD1.5", "Stability"],
131
- "date": "2025-03-01",
132
- "size": 5218385265,
133
- "vram": 4101693768
152
+ "description": "Upload an image of your character and a flatlay image of your clothing items. Generate 4 fashion editorial photographs of your character in the outfit. Select which image to upscale and add back details.",
153
+ "tags": ["Fashion", "Image Edit"],
154
+ "models": ["Google Gemini Image", "Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
155
+ "openSource": false,
156
+ "date": "2025-12-15",
157
+ "searchRank": 8,
158
+ "size": 0,
159
+ "vram": 0,
160
+ "requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
161
+ "usage": 104
134
162
  },
135
163
  {
136
- "name": "inpaint_model_outpainting",
137
- "title": "Outpaint",
164
+ "name": "templates-fashion_shoot_prompt_doodle",
165
+ "title": "Selfie + Text Prompt to Studio Photoshoot Doodle",
138
166
  "mediaType": "image",
139
167
  "mediaSubtype": "webp",
140
- "description": "Extend images beyond their original boundaries.",
141
- "thumbnailVariant": "compareSlider",
142
- "tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
143
- "tags": ["Outpainting", "Image"],
144
- "models": ["SD1.5", "Stability"],
145
- "date": "2025-03-01",
146
- "size": 5218385265,
147
- "vram": 4101693768
168
+ "description": "Upload a selfie and describe your outfit in the prompt. Generate 4 fashion editorial photographs with fun doodle illustrations. Select which image to upscale and add back face details.",
169
+ "tags": ["Fashion", "Image Edit"],
170
+ "models": ["Google Gemini Image", "Nano Banana Pro", "Google", "Gemini3 Pro Image Preview"],
171
+ "openSource": false,
172
+ "date": "2025-12-15",
173
+ "searchRank": 8,
174
+ "size": 0,
175
+ "vram": 0,
176
+ "requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
177
+ "usage": 20
148
178
  },
149
179
  {
150
- "name": "embedding_example",
151
- "title": "Embedding",
180
+ "name": "templates-poster_product_integration",
181
+ "title": "Generate Poster/Ad Asset with your Product",
152
182
  "mediaType": "image",
153
183
  "mediaSubtype": "webp",
154
- "description": "Generate images using textual inversion for consistent styles.",
155
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/",
156
- "tags": ["Text to Image", "Image"],
157
- "models": ["SD1.5", "Stability"],
158
- "date": "2025-03-01",
159
- "size": 5218385265,
160
- "vram": 4123168604
184
+ "thumbnailVariant": "compareSlider",
185
+ "description": "Upload your product and a simple text prompt for the poster or ad design. Iterate on the look before swapping the product into the generate layout.",
186
+ "tags": ["Product", "Image Edit"],
187
+ "models": ["ByteDance", "Seedream", "Google Gemini"],
188
+ "openSource": false,
189
+ "date": "2025-12-15",
190
+ "searchRank": 8,
191
+ "size": 0,
192
+ "vram": 0,
193
+ "requiresCustomNodes": ["comfyui_essentials"],
194
+ "usage": 37
161
195
  },
162
196
  {
163
- "name": "gligen_textbox_example",
164
- "title": "Gligen Textbox",
197
+ "name": "templates-3D_logo_texture_animation",
198
+ "title": "Dynamic 3D Logo Animations",
165
199
  "mediaType": "image",
166
200
  "mediaSubtype": "webp",
167
- "description": "Generate images with precise object placement using text boxes.",
168
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/",
169
- "tags": ["Image"],
170
- "models": ["SD1.5", "Stability"],
171
- "date": "2025-03-01",
172
- "size": 2974264852,
173
- "vram": 4080218931
201
+ "description": "Upload a vector image of your logo, and prompt your desired texture. Generate a textured 3D first and last frame with automated prompting for the final animation.",
202
+ "tags": ["Brand Design", "FLF2V"],
203
+ "models": ["ByteDance", "Seedream", "Google Gemini", "Nano Banana Pro"],
204
+ "openSource": false,
205
+ "date": "2025-12-15",
206
+ "searchRank": 8,
207
+ "size": 0,
208
+ "vram": 0,
209
+ "usage": 42
174
210
  },
175
211
  {
176
- "name": "area_composition",
177
- "title": "Area Composition",
212
+ "name": "templates-product_scene_relight",
213
+ "title": "Composite your Product + Scene and Relight",
178
214
  "mediaType": "image",
179
215
  "mediaSubtype": "webp",
180
- "description": "Generate images by controlling composition with defined areas.",
181
- "tags": ["Text to Image", "Image"],
182
- "models": ["SD1.5", "Stability"],
183
- "date": "2025-03-01",
184
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/",
185
- "size": 2469606195,
186
- "vram": 6184752906
216
+ "thumbnailVariant": "compareSlider",
217
+ "description": "Upload an image of your product and background. Composite them and seamlessly relight and fuse together using Seedream 4.5.",
218
+ "tags": ["Product", "Image Edit", "Relight"],
219
+ "models": ["ByteDance", "Seedream"],
220
+ "openSource": false,
221
+ "date": "2025-12-15",
222
+ "searchRank": 8,
223
+ "size": 0,
224
+ "vram": 0,
225
+ "usage": 11
187
226
  },
188
227
  {
189
- "name": "area_composition_square_area_for_subject",
190
- "title": "Area Composition Square Area for Subject",
228
+ "name": "templates-textured_logo_elements",
229
+ "title": "Apply Texture + Elements to Logo",
191
230
  "mediaType": "image",
192
231
  "mediaSubtype": "webp",
193
- "description": "Generate images with consistent subject placement using area composition.",
194
- "tags": ["Text to Image", "Image"],
195
- "models": ["SD1.5", "Stability"],
196
- "date": "2025-03-01",
197
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition",
198
- "size": 2469606195,
199
- "vram": 5927054868
232
+ "description": "Upload your logo, texture and elements. Generate a video of the textured logo for an on brand asset.",
233
+ "tags": ["Brand Design", "Image to Video"],
234
+ "models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google", "ByteDance", "Seedance"],
235
+ "openSource": false,
236
+ "date": "2025-12-11",
237
+ "searchRank": 8,
238
+ "size": 0,
239
+ "vram": 0,
240
+ "usage": 255
200
241
  },
201
242
  {
202
- "name": "hiresfix_latent_workflow",
203
- "title": "Upscale",
243
+ "name": "templates-qwen_image_edit-crop_and_stitch-fusion",
244
+ "title": "Relight Composited Product",
204
245
  "mediaType": "image",
205
246
  "mediaSubtype": "webp",
206
- "description": "Upscale images by enhancing quality in latent space.",
207
247
  "thumbnailVariant": "compareSlider",
208
- "tags": ["Upscale", "Image"],
209
- "models": ["SD1.5", "Stability"],
210
- "date": "2025-03-01",
211
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/",
212
- "size": 2136746230,
213
- "vram": 3929895076
248
+ "description": "Upload a composited image of your product, draw a mask in the mask editor and relight your product into the scene.",
249
+ "tags": ["Image Edit", "Relight"],
250
+ "models": ["Qwen-Image-Edit"],
251
+ "date": "2025-12-11",
252
+ "searchRank": 8,
253
+ "size": 0,
254
+ "vram": 0,
255
+ "requiresCustomNodes": ["comfyui-inpaint-cropandstitch"],
256
+ "usage": 361
214
257
  },
215
258
  {
216
- "name": "esrgan_example",
217
- "title": "ESRGAN",
259
+ "name": "templates-textured_logotype-v2.1",
260
+ "title": "Apply Texture to Logo",
218
261
  "mediaType": "image",
219
262
  "mediaSubtype": "webp",
220
- "description": "Upscale images using ESRGAN models to enhance quality.",
221
- "thumbnailVariant": "compareSlider",
222
- "tags": ["Upscale", "Image"],
223
- "models": ["SD1.5", "Stability"],
224
- "date": "2025-03-01",
225
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/",
226
- "size": 2201170739,
227
- "vram": 6442450944
263
+ "description": "Upload your logotype and apply a texture + elements for on brand asset",
264
+ "tags": ["Brand Design", "Image to Video", "FLF2V"],
265
+ "models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google", "ByteDance", "Seedance"],
266
+ "date": "2025-12-03",
267
+ "openSource": false,
268
+ "searchRank": 8,
269
+ "size": 0,
270
+ "vram": 0,
271
+ "usage": 299
228
272
  },
229
273
  {
230
- "name": "hiresfix_esrgan_workflow",
231
- "title": "HiresFix ESRGAN Workflow",
274
+ "name": "templates-product_ad-v2.0",
275
+ "title": "Swap Product Into a Reference AD",
232
276
  "mediaType": "image",
233
277
  "mediaSubtype": "webp",
234
- "description": "Upscale images using ESRGAN models during intermediate generation steps.",
235
- "thumbnailVariant": "compareSlider",
236
- "tags": ["Upscale", "Image"],
237
- "models": ["SD1.5", "Stability"],
238
- "date": "2025-03-01",
239
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling",
240
- "size": 2201170739,
241
- "vram": 6442450944
278
+ "description": "Create static ads for your product in the style of a reference advertisement",
279
+ "tags": ["Style Reference"],
280
+ "models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google", "ByteDance", "Seedance"],
281
+ "date": "2025-12-03",
282
+ "openSource": false,
283
+ "searchRank": 8,
284
+ "size": 0,
285
+ "vram": 0,
286
+ "usage": 222
242
287
  },
243
288
  {
244
- "name": "latent_upscale_different_prompt_model",
245
- "title": "Latent Upscale Different Prompt Model",
289
+ "name": "templates-6-key-frames",
290
+ "title": "Multi-Keyframe Video Stitching",
246
291
  "mediaType": "image",
247
292
  "mediaSubtype": "webp",
248
- "description": "Upscale images while changing prompts across generation passes.",
249
- "thumbnailVariant": "zoomHover",
250
- "tags": ["Upscale", "Image"],
251
- "models": ["SD1.5", "Stability"],
252
- "date": "2025-03-01",
253
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples",
254
- "size": 4262755041,
255
- "vram": 5153960755
293
+ "description": "Creates a smooth video using 6 key frames.It auto fills in the motion between frames and stitchs the segments together seamlessly.",
294
+ "tags": ["Image to Video", "FLF2V"],
295
+ "models": ["Wan2.2"],
296
+ "date": "2025-12-03",
297
+ "searchRank": 8,
298
+ "size": 0,
299
+ "vram": 0,
300
+ "usage": 1972
256
301
  },
257
302
  {
258
- "name": "controlnet_example",
259
- "title": "Scribble ControlNet",
303
+ "name": "templates-9grid_social_media-v2.0",
304
+ "title": "3x3 Grid For Product Ads",
260
305
  "mediaType": "image",
261
306
  "mediaSubtype": "webp",
262
- "description": "Generate images guided by scribble reference images using ControlNet.",
263
- "thumbnailVariant": "hoverDissolve",
264
- "tags": ["ControlNet", "Image"],
265
- "models": ["SD1.5", "Stability"],
266
- "date": "2025-03-01",
267
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/",
268
- "size": 3189013217,
269
- "vram": 6442450944
307
+ "description": "Upload your product and enter a brief prompt for each grid position in a 3x3 grid. Generates 9 distinct images. Select the images you like and upscale to 4k using your product as reference.",
308
+ "tags": ["Image Edit", "Image"],
309
+ "models": ["Nano Banana Pro", "Google"],
310
+ "date": "2025-12-06",
311
+ "searchRank": 8,
312
+ "size": 0,
313
+ "vram": 0,
314
+ "requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"],
315
+ "usage": 466
270
316
  },
271
317
  {
272
- "name": "2_pass_pose_worship",
273
- "title": "Pose ControlNet 2 Pass",
318
+ "name": "templates-poster_to_2x2_mockups-v2.0",
319
+ "title": "Poster Scene Mockups",
274
320
  "mediaType": "image",
275
321
  "mediaSubtype": "webp",
276
- "description": "Generate images guided by pose references using ControlNet.",
277
- "thumbnailVariant": "hoverDissolve",
278
- "tags": ["ControlNet", "Image"],
279
- "models": ["SD1.5", "Stability"],
280
- "date": "2025-03-01",
281
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet",
282
- "size": 4660039516,
283
- "vram": 6442450944
322
+ "description": "Upload a poster/ad design and with a short input about your brand, generate 4 mockups in multiple scenes.",
323
+ "tags": ["Image Edit", "Mockup"],
324
+ "models": ["Nano Banana Pro", "Google"],
325
+ "date": "2025-12-06",
326
+ "searchRank": 8,
327
+ "openSource": false,
328
+ "size": 0,
329
+ "requiresCustomNodes": ["comfyui_essentials"],
330
+ "vram": 0,
331
+ "usage": 61
284
332
  },
285
333
  {
286
- "name": "depth_controlnet",
287
- "title": "Depth ControlNet",
334
+ "name": "template-multistyle-magazine-cover-nanobananapro",
335
+ "title": "Magazine Cover & Package Design",
288
336
  "mediaType": "image",
289
337
  "mediaSubtype": "webp",
290
- "description": "Generate images guided by depth information using ControlNet.",
291
- "thumbnailVariant": "hoverDissolve",
292
- "tags": ["ControlNet", "Image", "Text to Image"],
293
- "models": ["SD1.5", "Stability"],
294
- "date": "2025-03-01",
295
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
296
- "size": 2888365507,
297
- "vram": 6442450944
338
+ "description": "Design the text layout for your magazine cover photo, and explore packaging options for it.",
339
+ "tags": ["Image Edit", "Mockup", "Layout Design"],
340
+ "models": ["Nano Banana Pro", "Google"],
341
+ "date": "2025-12-06",
342
+ "searchRank": 8,
343
+ "openSource": false,
344
+ "size": 0,
345
+ "vram": 0,
346
+ "usage": 87
298
347
  },
299
348
  {
300
- "name": "depth_t2i_adapter",
301
- "title": "Depth T2I Adapter",
349
+ "name": "templates-1_click_multiple_scene_angles-v1.0",
350
+ "title": "1 click Multiple Scene Angles",
302
351
  "mediaType": "image",
303
352
  "mediaSubtype": "webp",
304
- "description": "Generate images guided by depth information using T2I adapter.",
305
- "thumbnailVariant": "hoverDissolve",
306
- "tags": ["ControlNet", "Image", "Text to Image"],
307
- "models": ["SD1.5", "Stability"],
308
- "date": "2025-03-01",
309
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets",
310
- "size": 2523293286,
311
- "vram": 6442450944
353
+ "description": "Upload an image of your scene and generate multiple views of your input scene with 1 click.",
354
+ "tags": ["Image Eidt"],
355
+ "models": ["Qwen-Image-Edit"],
356
+ "date": "2025-12-08",
357
+ "searchRank": 8,
358
+ "size": 31198642438,
359
+ "vram": 31198642438,
360
+ "usage": 1508
312
361
  },
313
362
  {
314
- "name": "mixing_controlnets",
315
- "title": "Mixing ControlNets",
363
+ "name": "templates-1_click_multiple_character_angles-v1.0",
364
+ "title": "1 Click Multiple Character Angles",
316
365
  "mediaType": "image",
317
366
  "mediaSubtype": "webp",
318
- "description": "Generate images by combining multiple ControlNet models.",
319
- "thumbnailVariant": "hoverDissolve",
320
- "tags": ["ControlNet", "Image", "Text to Image"],
321
- "models": ["SD1.5", "Stability"],
322
- "date": "2025-03-01",
323
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets",
324
- "size": 3328599654,
325
- "vram": 6442450944
367
+ "description": "Upload an image of your character and get multiple views of that image from different angles",
368
+ "tags": ["Image Eidt"],
369
+ "models": ["Qwen-Image-Edit"],
370
+ "date": "2025-12-08",
371
+ "searchRank": 8,
372
+ "size": 31198642438,
373
+ "vram": 31198642438,
374
+ "usage": 3637
375
+ },
376
+ {
377
+ "name": "template-Animation_Trajectory_Control_Wan_ATI",
378
+ "title": "Animation Trajectory Control",
379
+ "mediaType": "image",
380
+ "mediaSubtype": "webp",
381
+ "description": "Draw the movement trajectory you want for the input image.",
382
+ "tags": ["Image to Video"],
383
+ "models": ["Wan2.1"],
384
+ "date": "2025-12-11",
385
+ "searchRank": 8,
386
+ "size": 31604570534,
387
+ "requiresCustomNodes": ["ComfyUI-WanVideoWrapper", "comfyui_fill-nodes"],
388
+ "vram": 31604570534,
389
+ "usage": 449
326
390
  }
327
391
  ]
328
392
  },
@@ -333,18 +397,101 @@
333
397
  "title": "Image",
334
398
  "type": "image",
335
399
  "templates": [
400
+ {
401
+ "name": "image_z_image_turbo",
402
+ "title": "Z-Image-Turbo Text to Image",
403
+ "mediaType": "image",
404
+ "mediaSubtype": "webp",
405
+ "description": "An Efficient Image Generation Foundation Model with Single-Stream Diffusion Transformer, supports English & Chinese.",
406
+ "tags": ["Text to Image", "Image"],
407
+ "models": ["Z-Image-Turbo"],
408
+ "date": "2025-11-27",
409
+ "size": 20862803640,
410
+ "vram": 20862803640,
411
+ "usage": 27801
412
+ },
413
+ {
414
+ "name": "image_z_image_turbo_fun_union_controlnet",
415
+ "title": "Z-Image-Turbo Fun Union ControlNet",
416
+ "mediaType": "image",
417
+ "mediaSubtype": "webp",
418
+ "thumbnailVariant": "compareSlider",
419
+ "description": "Multi-control ControlNet supporting Canny, HED, Depth, Pose, and MLSD for Z-Image-Turbo.",
420
+ "tags": ["Image", "ControlNet"],
421
+ "models": ["Z-Image-Turbo"],
422
+ "date": "2025-12-02",
423
+ "size": 23794118820,
424
+ "vram": 23794118820,
425
+ "usage": 3859
426
+ },
427
+ {
428
+ "name": "image_qwen_image_edit_2511",
429
+ "title": "Qwen Image Edit 2511 - Material Replacement",
430
+ "mediaType": "image",
431
+ "mediaSubtype": "webp",
432
+ "thumbnailVariant": "compareSlider",
433
+ "description": "Replace materials in objects (e.g., furniture) by combining reference images with Qwen-Image-Edit-2511.",
434
+ "tags": ["Image Edit"],
435
+ "models": ["Qwen-Image-Edit"],
436
+ "date": "2025-12-23",
437
+ "size": 51367808860,
438
+ "vram": 51367808860
439
+ },
440
+ {
441
+ "name": "image_qwen_image_edit_2509",
442
+ "title": "Qwen Image Edit 2509",
443
+ "mediaType": "image",
444
+ "mediaSubtype": "webp",
445
+ "thumbnailVariant": "compareSlider",
446
+ "description": "Advanced image editing with multi-image support, improved consistency, and ControlNet integration.",
447
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
448
+ "tags": ["Image to Image", "Image Edit", "ControlNet"],
449
+ "models": ["Qwen-Image"],
450
+ "date": "2025-09-25",
451
+ "size": 31772020572,
452
+ "vram": 31772020572,
453
+ "usage": 9323
454
+ },
455
+ {
456
+ "name": "image_qwen_image_edit_2509_relight",
457
+ "title": "Image Relight",
458
+ "mediaType": "image",
459
+ "mediaSubtype": "webp",
460
+ "thumbnailVariant": "compareSlider",
461
+ "description": "Relight images using Qwen-Image-Edit with LoRA support.",
462
+ "tags": ["Image Edit", "Relight"],
463
+ "models": ["Qwen-Image-Edit"],
464
+ "date": "2025-12-15",
465
+ "size": 31772020572,
466
+ "vram": 31772020572,
467
+ "usage": 192
468
+ },
336
469
  {
337
470
  "name": "image_flux2",
338
471
  "title": "Flux.2 Dev",
339
472
  "mediaType": "image",
340
473
  "mediaSubtype": "webp",
341
474
  "thumbnailVariant": "compareSlider",
342
- "description": "Generate up to 4MP photorealistic images with multi-reference consistency and professional text rendering.",
475
+ "description": "Generate photorealistic images with multi-reference consistency and professional text rendering.",
343
476
  "tags": ["Text to Image", "Image", "Image Edit"],
344
- "models": ["Flux.2 Dev", "BFL"],
477
+ "models": ["Flux.2 Dev", "BFL", "Flux"],
478
+ "date": "2025-11-26",
479
+ "size": 71781788416,
480
+ "vram": 71781788416,
481
+ "usage": 9538
482
+ },
483
+ {
484
+ "name": "image_flux2_text_to_image",
485
+ "title": "Flux.2 Dev Text to Image",
486
+ "mediaType": "image",
487
+ "mediaSubtype": "webp",
488
+ "description": "Text-to-image with enhanced lighting, materials, and realistic details.",
489
+ "tags": ["Text to Image", "Image"],
490
+ "models": ["Flux.2 Dev", "BFL", "Flux"],
345
491
  "date": "2025-11-26",
346
492
  "size": 71382356459,
347
- "vram": 0
493
+ "vram": 71382356459,
494
+ "usage": 4002
348
495
  },
349
496
  {
350
497
  "name": "image_flux2_fp8",
@@ -352,40 +499,38 @@
352
499
  "mediaType": "image",
353
500
  "mediaSubtype": "webp",
354
501
  "description": "Create product mockups by applying design patterns to packaging, mugs, and other products using multi-reference consistency.",
355
- "tags": [
356
- "Text to Image",
357
- "Image",
358
- "Image Edit",
359
- "Mockup",
360
- "Product Design"
361
- ],
362
- "models": ["Flux.2 Dev", "BFL"],
502
+ "tags": ["Text to Image", "Image", "Image Edit", "Mockup", "Product"],
503
+ "models": ["Flux.2 Dev", "BFL", "Flux"],
363
504
  "date": "2025-11-26",
364
505
  "size": 53837415055,
365
- "vram": 0
506
+ "vram": 53837415055,
507
+ "usage": 436
366
508
  },
367
509
  {
368
- "name": "image_z_image_turbo",
369
- "title": "Z-Image-Turbo Text to Image",
510
+ "name": "image_qwen_image_layered",
511
+ "title": "Qwen-Image-Layered Decomposition",
370
512
  "mediaType": "image",
371
513
  "mediaSubtype": "webp",
372
- "description": "An Efficient Image Generation Foundation Model with Single-Stream Diffusion Transformer, supports English & Chinese.",
373
- "tags": ["Text to Image","Image"],
374
- "models": ["Z-Image-Turbo"],
375
- "date": "2025-11-27",
376
- "size": 35326050304
514
+ "description": "Decompose an image into editable RGBA layers for high-fidelity recolor, replace, resize, and reposition workflows using Qwen-Image-Layered.",
515
+ "tags": ["Layer Decompose"],
516
+ "models": ["Qwen-Image-Layered"],
517
+ "date": "2025-12-22",
518
+ "size": 50446538375,
519
+ "vram": 50446538375
377
520
  },
378
521
  {
379
522
  "name": "image_qwen_image",
380
523
  "title": "Qwen-Image Text to Image",
381
524
  "mediaType": "image",
382
525
  "mediaSubtype": "webp",
383
- "description": "Generate images with exceptional multilingual text rendering and editing capabilities using Qwen-Image's 20B MMDiT model..",
526
+ "description": "Generate images with exceptional multilingual text rendering and editing capabilities using Qwen-Image's 20B MMDiT model.",
384
527
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
385
528
  "tags": ["Text to Image", "Image"],
386
529
  "models": ["Qwen-Image"],
387
530
  "date": "2025-08-05",
388
- "size": 31772020572
531
+ "size": 31772020572,
532
+ "vram": 31772020572,
533
+ "usage": 1143
389
534
  },
390
535
  {
391
536
  "name": "image_qwen_image_instantx_controlnet",
@@ -397,7 +542,9 @@
397
542
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
398
543
  "models": ["Qwen-Image"],
399
544
  "date": "2025-08-23",
400
- "size": 35304631173
545
+ "size": 35304631173,
546
+ "vram": 35304631173,
547
+ "usage": 472
401
548
  },
402
549
  {
403
550
  "name": "image_qwen_image_instantx_inpainting_controlnet",
@@ -410,23 +557,28 @@
410
557
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
411
558
  "models": ["Qwen-Image"],
412
559
  "date": "2025-09-12",
413
- "size": 36013300777
560
+ "size": 36013300777,
561
+ "vram": 36013300777,
562
+ "usage": 515
414
563
  },
415
564
  {
416
565
  "name": "image_qwen_image_union_control_lora",
417
566
  "title": "Qwen-Image Union Control",
418
567
  "mediaType": "image",
419
568
  "mediaSubtype": "webp",
569
+ "thumbnailVariant": "compareSlider",
420
570
  "description": "Generate images with precise structural control using Qwen-Image's unified ControlNet LoRA. Supports multiple control types including canny, depth, lineart, softedge, normal, and openpose for diverse creative applications.",
421
571
  "tags": ["Text to Image", "Image", "ControlNet"],
422
572
  "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
423
573
  "models": ["Qwen-Image"],
424
574
  "date": "2025-08-23",
425
- "size": 32716913377
575
+ "size": 32716913377,
576
+ "vram": 32716913377,
577
+ "usage": 340
426
578
  },
427
579
  {
428
580
  "name": "image_qwen_image_controlnet_patch",
429
- "title": "Qwen-Image ControlNet model patch",
581
+ "title": "Qwen-Image ControlNet Model Patch",
430
582
  "mediaType": "image",
431
583
  "mediaSubtype": "webp",
432
584
  "thumbnailVariant": "compareSlider",
@@ -435,1924 +587,2255 @@
435
587
  "tags": ["Text to Image", "Image", "ControlNet"],
436
588
  "models": ["Qwen-Image"],
437
589
  "date": "2025-08-24",
438
- "size": 34037615821
590
+ "size": 34037615821,
591
+ "vram": 34037615821,
592
+ "usage": 218
439
593
  },
440
594
  {
441
- "name": "image_qwen_image_edit_2509",
442
- "title": "Qwen Image Edit 2509",
595
+ "name": "api_nano_banana_pro",
596
+ "title": "Nano Banana Pro",
597
+ "description": "Nano-banana Pro (Gemini 3.0 Pro Image) - Studio-quality 4K image generation and editing with enhanced text rendering and character consistency.",
443
598
  "mediaType": "image",
444
599
  "mediaSubtype": "webp",
445
- "thumbnailVariant": "compareSlider",
446
- "description": "Advanced image editing with multi-image support, improved consistency, and ControlNet integration.",
447
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
448
- "tags": ["Image to Image", "Image Edit", "ControlNet"],
449
- "models": ["Qwen-Image"],
450
- "date": "2025-09-25",
451
- "size": 31772020572
600
+ "thumbnailVariant": "hoverDissolve",
601
+ "tags": ["Image Edit", "Image", "API"],
602
+ "models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google"],
603
+ "date": "2025-11-21",
604
+ "openSource": false,
605
+ "size": 0,
606
+ "vram": 0,
607
+ "usage": 6749
452
608
  },
453
609
  {
454
- "name": "image_qwen_image_edit",
455
- "title": "Qwen Image Edit",
610
+ "name": "api_from_photo_2_miniature",
611
+ "title": "Photo to Blueprint to Model",
612
+ "description": "Transform real building photos into architectural blueprints and then into detailed physical scale models. A complete architectural visualization pipeline from photo to miniature.",
456
613
  "mediaType": "image",
457
614
  "mediaSubtype": "webp",
458
- "thumbnailVariant": "compareSlider",
459
- "description": "Edit images with precise bilingual text editing and dual semantic/appearance editing capabilities using Qwen-Image-Edit's 20B MMDiT model.",
460
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
461
- "tags": ["Image to Image", "Image Edit"],
462
- "models": ["Qwen-Image"],
463
- "date": "2025-08-18",
464
- "size": 31772020572
615
+ "tags": ["Image Edit", "Image", "3D"],
616
+ "models": ["Gemini3 Pro Image Preview", "Nano Banana Pro", "Google"],
617
+ "date": "2025-11-21",
618
+ "openSource": false,
619
+ "size": 0,
620
+ "vram": 0,
621
+ "usage": 288
465
622
  },
466
623
  {
467
- "name": "image_chrono_edit_14B",
468
- "title": "ChronoEdit 14B",
624
+ "name": "api_openai_fashion_billboard_generator",
625
+ "title": "Fashion Billboard Generator",
626
+ "description": "**Transform clothing photos into professional mall billboard advertisements featuring realistic fashion models.",
469
627
  "mediaType": "image",
470
628
  "mediaSubtype": "webp",
471
- "thumbnailVariant": "compareSlider",
472
- "description": "Image editing powered by video models' dynamic understanding, creating physically plausible results while preserving character and style consistency.",
473
- "tags": ["Image Edit", "Image to Image"],
474
- "models": ["Wan2.1", "ChronoEdit", "Nvidia"],
475
- "date": "2025-11-03",
476
- "size": 40459304
629
+ "tags": ["Image Edit", "Image", "API", "Fashion", "Mockup"],
630
+ "models": ["GPT-Image-1.5", "OpenAI"],
631
+ "date": "2025-12-18",
632
+ "openSource": false,
633
+ "size": 0,
634
+ "vram": 0,
635
+ "usage": 50
477
636
  },
478
637
  {
479
- "name": "flux_kontext_dev_basic",
480
- "title": "Flux Kontext Dev Image Edit",
638
+ "name": "api_bytedance_seedream4",
639
+ "title": "ByteDance Seedream 4.0",
640
+ "description": "Multi-modal AI model for text-to-image and image editing. Generate 2K images in under 2 seconds with natural language control.",
481
641
  "mediaType": "image",
482
642
  "mediaSubtype": "webp",
483
- "thumbnailVariant": "hoverDissolve",
484
- "description": "Smart image editing that keeps characters consistent, edits specific parts without affecting others, and preserves original styles.",
485
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
486
- "tags": ["Image Edit", "Image to Image"],
487
- "models": ["Flux", "BFL"],
488
- "date": "2025-06-26",
489
- "size": 17641578168,
490
- "vram": 19327352832
643
+ "tags": ["Image Edit", "Image", "API", "Text to Image"],
644
+ "models": ["Seedream 4.0", "ByteDance"],
645
+ "date": "2025-09-11",
646
+ "openSource": false,
647
+ "size": 0,
648
+ "vram": 0,
649
+ "usage": 2117
491
650
  },
492
651
  {
493
- "name": "image_chroma1_radiance_text_to_image",
494
- "title": "Chroma1 Radiance text to image",
652
+ "name": "api_bfl_flux2_max_sofa_swap",
653
+ "title": "FLUX.2 [max]: Object Swap",
654
+ "description": "Replace objects in images with unmatched quality using FLUX.2 [max]. Perfect for product photography, furniture swaps, and maintaining scene consistency with highest editing precision.",
495
655
  "mediaType": "image",
496
656
  "mediaSubtype": "webp",
497
- "description": "Chroma1-Radiance works directly with image pixels instead of compressed latents, delivering higher quality images with reduced artifacts and distortion.",
498
- "tags": ["Text to Image", "Image"],
499
- "models": ["Chroma"],
500
- "date": "2025-09-18",
501
- "size": 23622320128,
502
- "vram": 23622320128
657
+ "thumbnailVariant": "compareSlider",
658
+ "tags": ["Image Edit", "Image", "API"],
659
+ "models": ["Flux2", "Flux", "BFL"],
660
+ "date": "2025-12-22",
661
+ "searchRank": 7,
662
+ "openSource": false,
663
+ "size": 0,
664
+ "vram": 0
503
665
  },
504
666
  {
505
- "name": "image_netayume_lumina_t2i",
506
- "title": "NetaYume Lumina Text to Image",
667
+ "name": "api_google_gemini_image",
668
+ "title": "Nano Banana",
669
+ "description": "Nano-banana (Gemini-2.5-Flash Image) - image editing with consistency.",
507
670
  "mediaType": "image",
508
671
  "mediaSubtype": "webp",
509
- "description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
510
- "tags": ["Text to Image", "Image", "Anime"],
511
- "models": ["OmniGen"],
512
- "date": "2025-10-10",
513
- "size": 10619306639
672
+ "tags": ["Image Edit", "Image", "API", "Text to Image"],
673
+ "models": ["Gemini-2.5-Flash", "nano-banana", "Google"],
674
+ "date": "2025-08-27",
675
+ "openSource": false,
676
+ "size": 0,
677
+ "vram": 0,
678
+ "usage": 1657
514
679
  },
515
680
  {
516
- "name": "image_chroma_text_to_image",
517
- "title": "Chroma text to image",
681
+ "name": "api_flux2",
682
+ "title": "Flux.2 Pro",
683
+ "description": "Generate up to 4MP photorealistic images with multi-reference consistency and professional text rendering.",
518
684
  "mediaType": "image",
519
685
  "mediaSubtype": "webp",
520
- "description": "Chroma - enhanced Flux model with improved image quality and better prompt understanding for stunning text-to-image generation.",
521
- "tags": ["Text to Image", "Image"],
522
- "models": ["Chroma", "Flux"],
523
- "date": "2025-06-04",
524
- "size": 23289460163,
525
- "vram": 15569256448
686
+ "tags": ["Image Edit", "Image", "API", "Text to Image"],
687
+ "models": ["Flux.2", "BFL", "Flux"],
688
+ "date": "2025-11-26",
689
+ "openSource": false,
690
+ "size": 0,
691
+ "vram": 0,
692
+ "usage": 852
526
693
  },
527
694
  {
528
- "name": "image_flux.1_fill_dev_OneReward",
529
- "title": "Flux.1 Dev OneReward",
695
+ "name": "api_topaz_image_enhance",
696
+ "title": "Topaz Image Enhance",
697
+ "description": "Professional image enhancement using Topaz's Reimagine model with face enhancement and detail restoration.",
530
698
  "mediaType": "image",
531
699
  "mediaSubtype": "webp",
532
700
  "thumbnailVariant": "compareSlider",
533
- "description": "Supports various tasks such as image inpainting, outpainting, and object removal by bytedance-research team",
534
- "tags": ["Inpainting", "Outpainting"],
535
- "models": ["Flux", "BFL"],
536
- "date": "2025-09-21",
537
- "size": 29001766666,
538
- "vram": 21474836480
701
+ "tags": ["Image", "API", "Upscale"],
702
+ "models": ["Topaz", "Reimagine"],
703
+ "date": "2025-11-25",
704
+ "openSource": false,
705
+ "size": 0,
706
+ "vram": 0,
707
+ "usage": 576
539
708
  },
540
709
  {
541
- "name": "flux_dev_checkpoint_example",
542
- "title": "Flux Dev fp8",
710
+ "name": "api_bfl_flux_1_kontext_multiple_images_input",
711
+ "title": "BFL Flux.1 Kontext Multiple Image Input",
712
+ "description": "Input multiple images and edit them with Flux.1 Kontext.",
543
713
  "mediaType": "image",
544
714
  "mediaSubtype": "webp",
545
- "description": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.",
546
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
547
- "tags": ["Text to Image", "Image"],
548
- "models": ["Flux", "BFL"],
549
- "date": "2025-03-01",
550
- "size": 17244293693,
551
- "vram": 18253611008
715
+ "thumbnailVariant": "compareSlider",
716
+ "tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-kontext",
717
+ "tags": ["Image Edit", "Image"],
718
+ "models": ["Flux", "Kontext", "BFL"],
719
+ "date": "2025-05-29",
720
+ "openSource": false,
721
+ "size": 0,
722
+ "vram": 0,
723
+ "usage": 139
552
724
  },
553
725
  {
554
- "name": "flux1_dev_uso_reference_image_gen",
555
- "title": "Flux.1 Dev USO Reference Image Generation",
556
- "description": "Use reference images to control both style and subject - keep your character's face while changing artistic style, or apply artistic styles to new scenes",
557
- "thumbnailVariant": "hoverDissolve",
726
+ "name": "api_bfl_flux_1_kontext_pro_image",
727
+ "title": "BFL Flux.1 Kontext Pro",
728
+ "description": "Edit images with Flux.1 Kontext pro image.",
558
729
  "mediaType": "image",
559
730
  "mediaSubtype": "webp",
560
- "tags": ["Image to Image", "Image"],
561
- "models": ["Flux", "BFL"],
562
- "date": "2025-09-02",
563
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
564
- "size": 18597208392,
565
- "vram": 19864223744
731
+ "thumbnailVariant": "compareSlider",
732
+ "tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-kontext",
733
+ "tags": ["Image Edit", "Image"],
734
+ "models": ["Flux", "Kontext", "BFL"],
735
+ "date": "2025-05-29",
736
+ "openSource": false,
737
+ "size": 0,
738
+ "vram": 0,
739
+ "usage": 403
566
740
  },
567
741
  {
568
- "name": "flux_schnell",
569
- "title": "Flux Schnell fp8",
742
+ "name": "api_bfl_flux_1_kontext_max_image",
743
+ "title": "BFL Flux.1 Kontext Max",
744
+ "description": "Edit images with Flux.1 Kontext max image.",
570
745
  "mediaType": "image",
571
746
  "mediaSubtype": "webp",
572
- "description": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.",
573
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
574
- "tags": ["Text to Image", "Image"],
575
- "models": ["Flux", "BFL"],
576
- "date": "2025-03-01",
577
- "size": 17233556275,
578
- "vram": 18253611008
747
+ "thumbnailVariant": "compareSlider",
748
+ "tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-kontext",
749
+ "tags": ["Image Edit", "Image"],
750
+ "models": ["Flux", "Kontext", "BFL"],
751
+ "date": "2025-05-29",
752
+ "openSource": false,
753
+ "size": 0,
754
+ "vram": 0,
755
+ "usage": 74
579
756
  },
580
757
  {
581
- "name": "flux1_krea_dev",
582
- "title": "Flux.1 Krea Dev",
758
+ "name": "api_wan_text_to_image",
759
+ "title": "Wan2.5: Text to Image",
760
+ "description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
583
761
  "mediaType": "image",
584
762
  "mediaSubtype": "webp",
585
- "description": "A fine-tuned FLUX model pushing photorealism to the max",
586
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
587
- "tags": ["Text to Image", "Image"],
588
- "models": ["Flux", "BFL"],
589
- "date": "2025-07-31",
590
- "size": 22269405430,
591
- "vram": 23085449216
763
+ "tags": ["Text to Image", "Image", "API"],
764
+ "models": ["Wan2.5", "Wan"],
765
+ "date": "2025-09-25",
766
+ "openSource": false,
767
+ "size": 0,
768
+ "vram": 0,
769
+ "usage": 244
592
770
  },
593
771
  {
594
- "name": "flux_dev_full_text_to_image",
595
- "title": "Flux Dev full text to image",
772
+ "name": "api_bfl_flux_pro_t2i",
773
+ "title": "BFL Flux[Pro]: Text to Image",
774
+ "description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
596
775
  "mediaType": "image",
597
776
  "mediaSubtype": "webp",
598
- "description": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.",
599
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
600
- "tags": ["Text to Image", "Image"],
777
+ "tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
778
+ "tags": ["Image Edit", "Image"],
601
779
  "models": ["Flux", "BFL"],
602
- "date": "2025-03-01",
603
- "size": 34177202258,
604
- "vram": 23622320128
780
+ "date": "2025-05-01",
781
+ "openSource": false,
782
+ "size": 0,
783
+ "vram": 0,
784
+ "usage": 117
605
785
  },
606
786
  {
607
- "name": "flux_schnell_full_text_to_image",
608
- "title": "Flux Schnell full text to image",
787
+ "name": "api_runway_text_to_image",
788
+ "title": "Runway: Text to Image",
789
+ "description": "Generate high-quality images from text prompts using Runway's AI model.",
609
790
  "mediaType": "image",
610
791
  "mediaSubtype": "webp",
611
- "description": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.",
612
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
613
- "tags": ["Text to Image", "Image"],
614
- "models": ["Flux", "BFL"],
792
+ "tags": ["Text to Image", "Image", "API"],
793
+ "models": ["Runway"],
615
794
  "date": "2025-03-01",
616
- "size": 34155727421
795
+ "openSource": false,
796
+ "size": 0,
797
+ "vram": 0,
798
+ "usage": 37
617
799
  },
618
800
  {
619
- "name": "flux_fill_inpaint_example",
620
- "title": "Flux Inpaint",
801
+ "name": "api_runway_reference_to_image",
802
+ "title": "Runway: Reference to Image",
803
+ "description": "Generate new images based on reference styles and compositions with Runway's AI.",
621
804
  "mediaType": "image",
622
- "mediaSubtype": "webp",
623
- "description": "Fill missing parts of images using Flux inpainting.",
624
805
  "thumbnailVariant": "compareSlider",
625
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
626
- "tags": ["Image to Image", "Inpainting", "Image"],
627
- "models": ["Flux", "BFL"],
806
+ "mediaSubtype": "webp",
807
+ "tags": ["Image to Image", "Image", "API"],
808
+ "models": ["Runway"],
628
809
  "date": "2025-03-01",
629
- "size": 10372346020
810
+ "openSource": false,
811
+ "size": 0,
812
+ "vram": 0,
813
+ "usage": 115
630
814
  },
631
815
  {
632
- "name": "flux_fill_outpaint_example",
633
- "title": "Flux Outpaint",
816
+ "name": "api_stability_ai_stable_image_ultra_t2i",
817
+ "title": "Stability AI: Stable Image Ultra Text to Image",
818
+ "description": "Generate high quality images with excellent prompt adherence. Perfect for professional GENERATION TYPE at 1 megapixel resolution.",
634
819
  "mediaType": "image",
635
820
  "mediaSubtype": "webp",
636
- "description": "Extend images beyond boundaries using Flux outpainting.",
637
- "thumbnailVariant": "compareSlider",
638
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
639
- "tags": ["Outpainting", "Image", "Image to Image"],
640
- "models": ["Flux", "BFL"],
821
+ "tags": ["Text to Image", "Image", "API"],
822
+ "models": ["Stability"],
641
823
  "date": "2025-03-01",
642
- "size": 10372346020
824
+ "openSource": false,
825
+ "size": 0,
826
+ "vram": 0,
827
+ "usage": 27
643
828
  },
644
829
  {
645
- "name": "flux_canny_model_example",
646
- "title": "Flux Canny Model",
830
+ "name": "api_stability_ai_i2i",
831
+ "title": "Stability AI: Image to Image",
832
+ "description": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
647
833
  "mediaType": "image",
834
+ "thumbnailVariant": "compareSlider",
648
835
  "mediaSubtype": "webp",
649
- "description": "Generate images guided by edge detection using Flux Canny.",
650
- "thumbnailVariant": "hoverDissolve",
651
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
652
- "tags": ["Image to Image", "ControlNet", "Image"],
653
- "models": ["Flux", "BFL"],
836
+ "tags": ["Image to Image", "Image", "API"],
837
+ "models": ["Stability"],
654
838
  "date": "2025-03-01",
655
- "size": 34177202258
839
+ "openSource": false,
840
+ "size": 0,
841
+ "vram": 0,
842
+ "usage": 65
656
843
  },
657
844
  {
658
- "name": "flux_depth_lora_example",
659
- "title": "Flux Depth Lora",
845
+ "name": "api_stability_ai_sd3.5_t2i",
846
+ "title": "Stability AI: SD3.5 Text to Image",
847
+ "description": "Generate high quality images with excellent prompt adherence. Perfect for professional GENERATION TYPE at 1 megapixel resolution.",
660
848
  "mediaType": "image",
661
849
  "mediaSubtype": "webp",
662
- "description": "Generate images guided by depth information using Flux LoRA.",
663
- "thumbnailVariant": "hoverDissolve",
664
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
665
- "tags": ["Image to Image", "ControlNet", "Image"],
666
- "models": ["Flux", "BFL"],
850
+ "tags": ["Text to Image", "Image", "API"],
851
+ "models": ["Stability"],
667
852
  "date": "2025-03-01",
668
- "size": 35412005356
853
+ "openSource": false,
854
+ "size": 0,
855
+ "vram": 0,
856
+ "usage": 18
669
857
  },
670
858
  {
671
- "name": "flux_redux_model_example",
672
- "title": "Flux Redux Model",
859
+ "name": "api_stability_ai_sd3.5_i2i",
860
+ "title": "Stability AI: SD3.5 Image to Image",
861
+ "description": "Generate high quality images with excellent prompt adherence. Perfect for professional GENERATION TYPE at 1 megapixel resolution.",
673
862
  "mediaType": "image",
863
+ "thumbnailVariant": "compareSlider",
674
864
  "mediaSubtype": "webp",
675
- "description": "Generate images by transferring style from reference images using Flux Redux.",
676
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
677
- "tags": ["Image to Image", "ControlNet", "Image"],
678
- "models": ["Flux", "BFL"],
865
+ "tags": ["Image to Image", "Image", "API"],
866
+ "models": ["Stability"],
679
867
  "date": "2025-03-01",
680
- "size": 35154307318
868
+ "openSource": false,
869
+ "size": 0,
870
+ "vram": 0,
871
+ "usage": 88
681
872
  },
682
873
  {
683
- "name": "image_omnigen2_t2i",
684
- "title": "OmniGen2 Text to Image",
874
+ "name": "image_qwen_image_edit",
875
+ "title": "Qwen Image Edit",
685
876
  "mediaType": "image",
686
877
  "mediaSubtype": "webp",
687
- "description": "Generate high-quality images from text prompts using OmniGen2's unified 7B multimodal model with dual-path architecture.",
688
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
689
- "tags": ["Text to Image", "Image"],
690
- "models": ["OmniGen"],
691
- "date": "2025-06-30",
692
- "size": 15784004813
878
+ "thumbnailVariant": "compareSlider",
879
+ "description": "Edit images with precise bilingual text editing and dual semantic/appearance editing capabilities using Qwen-Image-Edit's 20B MMDiT model.",
880
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
881
+ "tags": ["Image to Image", "Image Edit"],
882
+ "models": ["Qwen-Image-Edit"],
883
+ "date": "2025-08-18",
884
+ "size": 31772020572,
885
+ "vram": 31772020572,
886
+ "usage": 1556
693
887
  },
694
888
  {
695
- "name": "image_omnigen2_image_edit",
696
- "title": "OmniGen2 Image Edit",
889
+ "name": "image_ovis_text_to_image",
890
+ "title": "Ovis-Image Text to Image",
697
891
  "mediaType": "image",
698
892
  "mediaSubtype": "webp",
699
- "thumbnailVariant": "hoverDissolve",
700
- "description": "Edit images with natural language instructions using OmniGen2's advanced image editing capabilities and text rendering support.",
701
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
702
- "tags": ["Image Edit", "Image"],
703
- "models": ["OmniGen"],
704
- "date": "2025-06-30",
705
- "size": 15784004813
893
+ "description": "Ovis-Image is a 7B text-to-image model specifically optimized for high-quality text rendering in generated images. Designed to operate efficiently under computational constraints, it excels at accurately generating images containing text content.",
894
+ "tags": ["Text to Image", "Image"],
895
+ "models": ["Ovis-Image"],
896
+ "date": "2025-12-02",
897
+ "size": 20228222222,
898
+ "vram": 20228222222,
899
+ "usage": 1456
706
900
  },
707
901
  {
708
- "name": "hidream_i1_dev",
709
- "title": "HiDream I1 Dev",
902
+ "name": "image_chrono_edit_14B",
903
+ "title": "ChronoEdit 14B",
710
904
  "mediaType": "image",
711
905
  "mediaSubtype": "webp",
712
- "description": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware.",
713
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
714
- "tags": ["Text to Image", "Image"],
715
- "models": ["HiDream"],
716
- "date": "2025-04-17",
717
- "size": 33318208799
906
+ "thumbnailVariant": "compareSlider",
907
+ "description": "Image editing powered by video models' dynamic understanding, creating physically plausible results while preserving character and style consistency.",
908
+ "tags": ["Image Edit", "Image to Image"],
909
+ "models": ["Wan2.1", "ChronoEdit", "Nvidia"],
910
+ "date": "2025-11-03",
911
+ "size": 41435696988,
912
+ "vram": 41435696988,
913
+ "usage": 611
718
914
  },
719
915
  {
720
- "name": "hidream_i1_fast",
721
- "title": "HiDream I1 Fast",
916
+ "name": "flux_kontext_dev_basic",
917
+ "title": "Flux Kontext Dev Image Edit",
722
918
  "mediaType": "image",
723
919
  "mediaSubtype": "webp",
724
- "description": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware.",
725
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
726
- "tags": ["Text to Image", "Image"],
727
- "models": ["HiDream"],
728
- "date": "2025-04-17",
729
- "size": 24234352968
920
+ "thumbnailVariant": "hoverDissolve",
921
+ "description": "Smart image editing that keeps characters consistent, edits specific parts without affecting others, and preserves original styles.",
922
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
923
+ "tags": ["Image Edit", "Image to Image"],
924
+ "models": ["Flux", "BFL"],
925
+ "date": "2025-06-26",
926
+ "size": 17641578168,
927
+ "vram": 19327352832,
928
+ "usage": 866
730
929
  },
731
930
  {
732
- "name": "hidream_i1_full",
733
- "title": "HiDream I1 Full",
931
+ "name": "api_luma_photon_i2i",
932
+ "title": "Luma Photon: Image to Image",
933
+ "description": "Guide image generation using a combination of images and prompt.",
734
934
  "mediaType": "image",
735
935
  "mediaSubtype": "webp",
736
- "description": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output.",
737
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
738
- "tags": ["Text to Image", "Image"],
739
- "models": ["HiDream"],
740
- "date": "2025-04-17",
741
- "size": 24234352968
936
+ "thumbnailVariant": "compareSlider",
937
+ "tags": ["Image to Image", "Image", "API"],
938
+ "models": ["Luma"],
939
+ "date": "2025-03-01",
940
+ "openSource": false,
941
+ "size": 0,
942
+ "vram": 0,
943
+ "usage": 101
742
944
  },
743
945
  {
744
- "name": "hidream_e1_1",
745
- "title": "HiDream E1.1 Image Edit",
946
+ "name": "api_luma_photon_style_ref",
947
+ "title": "Luma Photon: Style Reference",
948
+ "description": "Generate images by blending style references with precise control using Luma Photon.",
746
949
  "mediaType": "image",
747
950
  "mediaSubtype": "webp",
748
951
  "thumbnailVariant": "compareSlider",
749
- "description": "Edit images with HiDream E1.1 – it’s better in image quality and editing accuracy than HiDream-E1-Full.",
750
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
751
- "tags": ["Image Edit", "Image"],
752
- "models": ["HiDream"],
753
- "date": "2025-07-21",
754
- "size": 50422916055
952
+ "tags": ["Text to Image", "Image", "API"],
953
+ "models": ["Luma"],
954
+ "date": "2025-03-01",
955
+ "openSource": false,
956
+ "size": 0,
957
+ "vram": 0,
958
+ "usage": 79
755
959
  },
756
960
  {
757
- "name": "hidream_e1_full",
758
- "title": "HiDream E1 Image Edit",
961
+ "name": "api_recraft_image_gen_with_color_control",
962
+ "title": "Recraft: Color Control Image Generation",
963
+ "description": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
759
964
  "mediaType": "image",
760
965
  "mediaSubtype": "webp",
761
- "thumbnailVariant": "compareSlider",
762
- "description": "Edit images with HiDream E1 - Professional natural language image editing model.",
763
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
764
- "tags": ["Image Edit", "Image"],
765
- "models": ["HiDream"],
766
- "date": "2025-05-01",
767
- "size": 34209414513
966
+ "tags": ["Text to Image", "Image", "API"],
967
+ "models": ["Recraft"],
968
+ "date": "2025-03-01",
969
+ "openSource": false,
970
+ "size": 0,
971
+ "vram": 0,
972
+ "usage": 3
768
973
  },
769
974
  {
770
- "name": "sd3.5_simple_example",
771
- "title": "SD3.5 Simple",
975
+ "name": "api_recraft_image_gen_with_style_control",
976
+ "title": "Recraft: Style Control Image Generation",
977
+ "description": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.",
772
978
  "mediaType": "image",
773
979
  "mediaSubtype": "webp",
774
- "description": "Generate images using SD 3.5.",
775
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
776
- "tags": ["Text to Image", "Image"],
777
- "models": ["SD3.5", "Stability"],
980
+ "tags": ["Text to Image", "Image", "API"],
981
+ "models": ["Recraft"],
778
982
  "date": "2025-03-01",
779
- "size": 14935748772
983
+ "openSource": false,
984
+ "size": 0,
985
+ "vram": 0,
986
+ "usage": 6
780
987
  },
781
988
  {
782
- "name": "sd3.5_large_canny_controlnet_example",
783
- "title": "SD3.5 Large Canny ControlNet",
989
+ "name": "api_recraft_vector_gen",
990
+ "title": "Recraft: Vector Generation",
991
+ "description": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
784
992
  "mediaType": "image",
785
993
  "mediaSubtype": "webp",
786
- "description": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
787
- "thumbnailVariant": "hoverDissolve",
788
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
789
- "tags": ["Image to Image", "Image", "ControlNet"],
790
- "models": ["SD3.5", "Stability"],
994
+ "tags": ["Text to Image", "Image", "API", "Vector"],
995
+ "models": ["Recraft"],
791
996
  "date": "2025-03-01",
792
- "size": 23590107873
997
+ "openSource": false,
998
+ "size": 0,
999
+ "vram": 0,
1000
+ "usage": 16
793
1001
  },
794
1002
  {
795
- "name": "sd3.5_large_depth",
796
- "title": "SD3.5 Large Depth",
1003
+ "name": "api_ideogram_v3_t2i",
1004
+ "title": "Ideogram V3: Text to Image",
1005
+ "description": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
797
1006
  "mediaType": "image",
798
1007
  "mediaSubtype": "webp",
799
- "description": "Generate images guided by depth information using SD 3.5.",
800
- "thumbnailVariant": "hoverDissolve",
801
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
802
- "tags": ["Image to Image", "Image", "ControlNet"],
803
- "models": ["SD3.5", "Stability"],
1008
+ "tags": ["Text to Image", "Image", "API"],
1009
+ "models": ["Ideogram"],
804
1010
  "date": "2025-03-01",
805
- "size": 23590107873
1011
+ "openSource": false,
1012
+ "size": 0,
1013
+ "vram": 0,
1014
+ "usage": 8
806
1015
  },
807
1016
  {
808
- "name": "sd3.5_large_blur",
809
- "title": "SD3.5 Large Blur",
1017
+ "name": "api_openai_image_1_t2i",
1018
+ "title": "OpenAI: GPT-Image-1 Text to Image",
1019
+ "description": "Generate images from text prompts using OpenAI GPT Image 1 API.",
810
1020
  "mediaType": "image",
811
1021
  "mediaSubtype": "webp",
812
- "description": "Generate images guided by blurred reference images using SD 3.5.",
813
- "thumbnailVariant": "hoverDissolve",
814
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
815
- "tags": ["Image to Image", "Image"],
816
- "models": ["SD3.5", "Stability"],
1022
+ "tags": ["Text to Image", "Image", "API"],
1023
+ "models": ["GPT-Image-1", "OpenAI"],
817
1024
  "date": "2025-03-01",
818
- "size": 23590107873
1025
+ "tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
1026
+ "openSource": false,
1027
+ "size": 0,
1028
+ "vram": 0,
1029
+ "usage": 9
819
1030
  },
820
1031
  {
821
- "name": "sdxl_simple_example",
822
- "title": "SDXL Simple",
1032
+ "name": "api_openai_image_1_i2i",
1033
+ "title": "OpenAI: GPT-Image-1 Image to Image",
1034
+ "description": "Generate images from input images using OpenAI GPT Image 1 API.",
823
1035
  "mediaType": "image",
824
1036
  "mediaSubtype": "webp",
825
- "description": "Generate high-quality images using SDXL.",
826
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
827
- "tags": ["Text to Image", "Image"],
828
- "models": ["SDXL", "Stability"],
1037
+ "thumbnailVariant": "compareSlider",
1038
+ "tags": ["Image to Image", "Image", "API"],
1039
+ "models": ["GPT-Image-1", "OpenAI"],
829
1040
  "date": "2025-03-01",
830
- "size": 13013750907
1041
+ "tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
1042
+ "openSource": false,
1043
+ "size": 0,
1044
+ "vram": 0,
1045
+ "usage": 76
831
1046
  },
832
1047
  {
833
- "name": "sdxl_refiner_prompt_example",
834
- "title": "SDXL Refiner Prompt",
1048
+ "name": "api_openai_image_1_inpaint",
1049
+ "title": "OpenAI: GPT-Image-1 Inpaint",
1050
+ "description": "Edit images using inpainting with OpenAI GPT Image 1 API.",
835
1051
  "mediaType": "image",
836
1052
  "mediaSubtype": "webp",
837
- "description": "Enhance SDXL images using refiner models.",
838
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
839
- "tags": ["Text to Image", "Image"],
840
- "models": ["SDXL", "Stability"],
1053
+ "thumbnailVariant": "compareSlider",
1054
+ "tags": ["Inpainting", "Image", "API"],
1055
+ "models": ["GPT-Image-1", "OpenAI"],
841
1056
  "date": "2025-03-01",
842
- "size": 13013750907
1057
+ "tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
1058
+ "openSource": false,
1059
+ "size": 0,
1060
+ "vram": 0,
1061
+ "usage": 21
843
1062
  },
844
1063
  {
845
- "name": "sdxl_revision_text_prompts",
846
- "title": "SDXL Revision Text Prompts",
1064
+ "name": "api_openai_image_1_multi_inputs",
1065
+ "title": "OpenAI: GPT-Image-1 Multi Inputs",
1066
+ "description": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
847
1067
  "mediaType": "image",
848
1068
  "mediaSubtype": "webp",
849
- "description": "Generate images by transferring concepts from reference images using SDXL Revision.",
850
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
851
- "tags": ["Text to Image", "Image"],
852
- "models": ["SDXL", "Stability"],
1069
+ "thumbnailVariant": "compareSlider",
1070
+ "tags": ["Text to Image", "Image", "API"],
1071
+ "models": ["GPT-Image-1", "OpenAI"],
853
1072
  "date": "2025-03-01",
854
- "size": 10630044058
1073
+ "tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/gpt-image-1",
1074
+ "openSource": false,
1075
+ "size": 0,
1076
+ "vram": 0,
1077
+ "usage": 5
855
1078
  },
856
1079
  {
857
- "name": "sdxlturbo_example",
858
- "title": "SDXL Turbo",
1080
+ "name": "api_openai_dall_e_2_t2i",
1081
+ "title": "OpenAI: Dall-E 2 Text to Image",
1082
+ "description": "Generate images from text prompts using OpenAI Dall-E 2 API.",
859
1083
  "mediaType": "image",
860
1084
  "mediaSubtype": "webp",
861
- "description": "Generate images in a single step using SDXL Turbo.",
862
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
863
- "tags": ["Text to Image", "Image"],
864
- "models": ["SDXL", "Stability"],
1085
+ "tags": ["Text to Image", "Image", "API"],
1086
+ "models": ["Dall-E", "OpenAI"],
865
1087
  "date": "2025-03-01",
866
- "size": 6936372183
1088
+ "tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/dall-e-2",
1089
+ "openSource": false,
1090
+ "size": 0,
1091
+ "vram": 0,
1092
+ "usage": 4
867
1093
  },
868
1094
  {
869
- "name": "image_lotus_depth_v1_1",
870
- "title": "Lotus Depth",
1095
+ "name": "api_openai_dall_e_2_inpaint",
1096
+ "title": "OpenAI: Dall-E 2 Inpaint",
1097
+ "description": "Edit images using inpainting with OpenAI Dall-E 2 API.",
871
1098
  "mediaType": "image",
872
1099
  "mediaSubtype": "webp",
873
1100
  "thumbnailVariant": "compareSlider",
874
- "description": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention.",
875
- "tags": ["Image", "Text to Image"],
876
- "models": ["SD1.5", "Stability"],
877
- "date": "2025-05-21",
878
- "size": 2072321720
879
- }
880
- ]
881
- },
882
- {
883
- "moduleName": "default",
884
- "category": "GENERATION TYPE",
885
- "title": "Video",
886
- "icon": "icon-[lucide--film]",
887
- "type": "video",
888
- "templates": [
1101
+ "tags": ["Inpainting", "Image", "API"],
1102
+ "models": ["Dall-E", "OpenAI"],
1103
+ "date": "2025-03-01",
1104
+ "tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/dall-e-2",
1105
+ "openSource": false,
1106
+ "size": 0,
1107
+ "vram": 0,
1108
+ "usage": 12
1109
+ },
889
1110
  {
890
- "name": "video_wan2_2_14B_t2v",
891
- "title": "Wan 2.2 14B Text to Video",
892
- "description": "Generate high-quality videos from text prompts with cinematic aesthetic control and dynamic motion generation using Wan 2.2.",
1111
+ "name": "api_openai_dall_e_3_t2i",
1112
+ "title": "OpenAI: Dall-E 3 Text to Image",
1113
+ "description": "Generate images from text prompts using OpenAI Dall-E 3 API.",
893
1114
  "mediaType": "image",
894
1115
  "mediaSubtype": "webp",
895
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
896
- "tags": ["Text to Video", "Video"],
897
- "models": ["Wan2.2", "Wan"],
898
- "date": "2025-07-29",
899
- "size": 38031935406
1116
+ "tags": ["Text to Image", "Image", "API"],
1117
+ "models": ["Dall-E", "OpenAI"],
1118
+ "date": "2025-03-01",
1119
+ "tutorialUrl": "https://docs.comfy.org/tutorials/partner-nodes/openai/dall-e-3",
1120
+ "openSource": false,
1121
+ "size": 0,
1122
+ "vram": 0,
1123
+ "usage": 33
900
1124
  },
901
1125
  {
902
- "name": "video_wan2_2_14B_i2v",
903
- "title": "Wan 2.2 14B Image to Video",
904
- "description": "Transform static images into dynamic videos with precise motion control and style preservation using Wan 2.2.",
1126
+ "name": "image_chroma1_radiance_text_to_image",
1127
+ "title": "Chroma1 Radiance Text to Image",
905
1128
  "mediaType": "image",
906
1129
  "mediaSubtype": "webp",
907
- "thumbnailVariant": "hoverDissolve",
908
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
909
- "tags": ["Image to Video", "Video"],
910
- "models": ["Wan2.2", "Wan"],
911
- "date": "2025-07-29",
912
- "size": 38031935406
1130
+ "description": "Chroma1-Radiance works directly with image pixels instead of compressed latents, delivering higher quality images with reduced artifacts and distortion.",
1131
+ "tags": ["Text to Image", "Image"],
1132
+ "models": ["Chroma"],
1133
+ "date": "2025-09-18",
1134
+ "size": 23622320128,
1135
+ "vram": 23622320128,
1136
+ "usage": 1149
913
1137
  },
914
1138
  {
915
- "name": "video_wan2_2_14B_flf2v",
916
- "title": "Wan 2.2 14B First-Last Frame to Video",
917
- "description": "Generate smooth video transitions by defining start and end frames.",
1139
+ "name": "image_chroma_text_to_image",
1140
+ "title": "Chroma Text to Image",
918
1141
  "mediaType": "image",
919
1142
  "mediaSubtype": "webp",
920
- "thumbnailVariant": "hoverDissolve",
921
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
922
- "tags": ["FLF2V", "Video"],
923
- "models": ["Wan2.2", "Wan"],
924
- "date": "2025-08-02",
925
- "size": 38031935406
1143
+ "description": "Chroma - enhanced Flux model with improved image quality and better prompt understanding for stunning text-to-image generation.",
1144
+ "tags": ["Text to Image", "Image"],
1145
+ "models": ["Chroma", "Flux"],
1146
+ "date": "2025-06-04",
1147
+ "size": 23289460163,
1148
+ "vram": 15569256448,
1149
+ "usage": 1423
926
1150
  },
927
1151
  {
928
- "name": "video_wan2_2_14B_animate",
929
- "title": "Wan2.2 Animate, character animation and replacement",
930
- "description": "Unified character animation and replacement framework with precise motion and expression replication.",
1152
+ "name": "image_newbieimage_exp0_1-t2i",
1153
+ "title": "NewBie Exp0.1: Anime Generation",
931
1154
  "mediaType": "image",
932
1155
  "mediaSubtype": "webp",
933
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-animate",
934
- "tags": ["Video", "Image to Video"],
935
- "models": ["Wan2.2", "Wan"],
936
- "date": "2025-09-22",
937
- "size": 27417997476
1156
+ "description": "Generate detailed anime-style images with NewBie Exp0.1's Next-DiT architecture. Supports XML structured prompts for better multi-character scenes and attribute control.",
1157
+ "tags": ["Text to Image", "Image", "Anime"],
1158
+ "models": ["NewBie"],
1159
+ "date": "2025-12-19",
1160
+ "size": 16181289287,
1161
+ "vram": 16181289287
938
1162
  },
939
1163
  {
940
- "name": "video_hunyuan_video_1.5_720p_t2v",
941
- "title": "Hunyuan Video 1.5 Text to Video",
942
- "description": "Generate high-quality 720p videos from text prompts with cinematic camera control, emotional expressions, and physics simulation. Supports multiple styles including realistic, anime, and 3D with text rendering.",
1164
+ "name": "image_netayume_lumina_t2i",
1165
+ "title": "NetaYume Lumina Text to Image",
943
1166
  "mediaType": "image",
944
1167
  "mediaSubtype": "webp",
945
- "tags": ["Text to Video", "Video"],
946
- "models": ["Hunyuan Video"],
947
- "date": "2025-11-21",
948
- "size": 45384919416
1168
+ "description": "High-quality anime-style image generation with enhanced character understanding and detailed textures. Fine-tuned from Neta Lumina on Danbooru dataset.",
1169
+ "tags": ["Text to Image", "Image", "Anime"],
1170
+ "models": ["OmniGen"],
1171
+ "date": "2025-10-10",
1172
+ "size": 10619306639,
1173
+ "vram": 10619306639,
1174
+ "usage": 1536
949
1175
  },
950
1176
  {
951
- "name": "video_hunyuan_video_1.5_720p_i2v",
952
- "title": "Hunyuan Video 1.5 Image to Video",
953
- "description": "Animate still images into dynamic videos with precise motion and camera control. Maintains visual consistency while bringing photos and illustrations to life with smooth, natural movements.",
1177
+ "name": "image_flux.1_fill_dev_OneReward",
1178
+ "title": "Flux.1 Dev OneReward",
954
1179
  "mediaType": "image",
955
1180
  "mediaSubtype": "webp",
956
- "tags": ["Image to Video", "Video"],
957
- "models": ["Hunyuan Video"],
958
- "date": "2025-11-21",
959
- "size": 45384919416
1181
+ "thumbnailVariant": "compareSlider",
1182
+ "description": "Supports various tasks such as image inpainting, outpainting, and object removal by bytedance-research team",
1183
+ "tags": ["Inpainting", "Outpainting"],
1184
+ "models": ["Flux", "BFL"],
1185
+ "date": "2025-09-21",
1186
+ "size": 29001766666,
1187
+ "vram": 21474836480,
1188
+ "usage": 368
960
1189
  },
961
1190
  {
962
- "name": "video_wan2_2_14B_s2v",
963
- "title": "Wan2.2-S2V Audio-Driven Video Generation",
964
- "description": "Transform static images and audio into dynamic videos with perfect synchronization and minute-level generation.",
1191
+ "name": "flux_dev_checkpoint_example",
1192
+ "title": "Flux Dev fp8",
965
1193
  "mediaType": "image",
966
1194
  "mediaSubtype": "webp",
967
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-s2v",
968
- "tags": ["Video"],
969
- "models": ["Wan2.2", "Wan"],
970
- "date": "2025-08-02",
971
- "size": 25254407700
1195
+ "description": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.",
1196
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
1197
+ "tags": ["Text to Image", "Image"],
1198
+ "models": ["Flux", "BFL"],
1199
+ "date": "2025-03-01",
1200
+ "size": 17244293693,
1201
+ "vram": 18253611008,
1202
+ "usage": 310
972
1203
  },
973
1204
  {
974
- "name": "video_humo",
975
- "title": "HuMo Video Generation",
976
- "description": "Generate videos basic on audio, image, and text, keep the character's lip sync.",
1205
+ "name": "flux1_dev_uso_reference_image_gen",
1206
+ "title": "Flux.1 Dev USO Reference Image Generation",
1207
+ "description": "Use reference images to control both style and subject - keep your character's face while changing artistic style, or apply artistic styles to new scenes",
1208
+ "thumbnailVariant": "hoverDissolve",
977
1209
  "mediaType": "image",
978
1210
  "mediaSubtype": "webp",
979
- "tags": ["Video"],
980
- "models": ["HuMo"],
981
- "date": "2025-09-21",
982
- "size": 27895812588
1211
+ "tags": ["Image to Image", "Image"],
1212
+ "models": ["Flux", "BFL"],
1213
+ "date": "2025-09-02",
1214
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-uso",
1215
+ "size": 18597208392,
1216
+ "vram": 19864223744,
1217
+ "usage": 1624
983
1218
  },
984
1219
  {
985
- "name": "video_wan2_2_14B_fun_inpaint",
986
- "title": "Wan 2.2 14B Fun Inp",
987
- "description": "Generate videos from start and end frames using Wan 2.2 Fun Inp.",
1220
+ "name": "flux_schnell",
1221
+ "title": "Flux Schnell FP8",
988
1222
  "mediaType": "image",
989
1223
  "mediaSubtype": "webp",
990
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-inp",
991
- "tags": ["FLF2V", "Video"],
992
- "models": ["Wan2.2", "Wan"],
993
- "date": "2025-08-12",
994
- "size": 38031935406
1224
+ "description": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.",
1225
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
1226
+ "tags": ["Text to Image", "Image"],
1227
+ "models": ["Flux", "BFL"],
1228
+ "date": "2025-03-01",
1229
+ "size": 17233556275,
1230
+ "vram": 18253611008,
1231
+ "usage": 99
995
1232
  },
996
1233
  {
997
- "name": "video_wan2_2_14B_fun_control",
998
- "title": "Wan 2.2 14B Fun Control",
999
- "description": "Generate videos guided by pose, depth, and edge controls using Wan 2.2 Fun Control.",
1234
+ "name": "flux1_krea_dev",
1235
+ "title": "Flux.1 Krea Dev",
1000
1236
  "mediaType": "image",
1001
1237
  "mediaSubtype": "webp",
1002
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-control",
1003
- "tags": ["Video to Video", "Video"],
1004
- "models": ["Wan2.2", "Wan"],
1005
- "date": "2025-08-12",
1006
- "size": 38031935406
1238
+ "description": "A fine-tuned FLUX model pushing photorealism to the max",
1239
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
1240
+ "tags": ["Text to Image", "Image"],
1241
+ "models": ["Flux", "BFL"],
1242
+ "date": "2025-07-31",
1243
+ "size": 22269405430,
1244
+ "vram": 23085449216,
1245
+ "usage": 1160
1007
1246
  },
1008
1247
  {
1009
- "name": "video_wan2_2_14B_fun_camera",
1010
- "title": "Wan 2.2 14B Fun Camera Control",
1011
- "description": "Generate videos with camera motion controls including pan, zoom, and rotation using Wan 2.2 Fun Camera Control.",
1248
+ "name": "flux_dev_full_text_to_image",
1249
+ "title": "Flux Dev Full Text to Image",
1012
1250
  "mediaType": "image",
1013
1251
  "mediaSubtype": "webp",
1014
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-camera",
1015
- "tags": ["Video to Video", "Video"],
1016
- "models": ["Wan2.2", "Wan"],
1017
- "date": "2025-08-17",
1018
- "size": 40050570035
1252
+ "description": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.",
1253
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
1254
+ "tags": ["Text to Image", "Image"],
1255
+ "models": ["Flux", "BFL"],
1256
+ "date": "2025-03-01",
1257
+ "size": 34177202258,
1258
+ "vram": 23622320128,
1259
+ "usage": 309
1019
1260
  },
1020
1261
  {
1021
- "name": "video_wan2_2_5B_ti2v",
1022
- "title": "Wan 2.2 5B Video Generation",
1023
- "description": "Fast text-to-video and image-to-video generation with 5B parameters. Optimized for rapid prototyping and creative exploration.",
1262
+ "name": "flux_schnell_full_text_to_image",
1263
+ "title": "Flux Schnell Full Text to Image",
1024
1264
  "mediaType": "image",
1025
1265
  "mediaSubtype": "webp",
1026
- "tags": ["Text to Video", "Video"],
1027
- "models": ["Wan2.2", "Wan"],
1028
- "date": "2025-07-29",
1029
- "size": 18146236826
1266
+ "description": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.",
1267
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
1268
+ "tags": ["Text to Image", "Image"],
1269
+ "models": ["Flux", "BFL"],
1270
+ "date": "2025-03-01",
1271
+ "size": 34155727421,
1272
+ "vram": 34155727421,
1273
+ "usage": 28
1030
1274
  },
1031
1275
  {
1032
- "name": "video_wan2_2_5B_fun_inpaint",
1033
- "title": "Wan 2.2 5B Fun Inpaint",
1034
- "description": "Efficient video inpainting from start and end frames. 5B model delivers quick iterations for testing workflows.",
1276
+ "name": "flux_fill_inpaint_example",
1277
+ "title": "Flux Inpaint",
1035
1278
  "mediaType": "image",
1036
1279
  "mediaSubtype": "webp",
1037
- "tags": ["Text to Video", "Video"],
1038
- "models": ["Wan2.2", "Wan"],
1039
- "date": "2025-07-29",
1040
- "size": 18146236826
1280
+ "description": "Fill missing parts of images using Flux inpainting.",
1281
+ "thumbnailVariant": "compareSlider",
1282
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
1283
+ "tags": ["Image to Image", "Inpainting", "Image"],
1284
+ "models": ["Flux", "BFL"],
1285
+ "date": "2025-03-01",
1286
+ "size": 10372346020,
1287
+ "vram": 10372346020,
1288
+ "usage": 437
1041
1289
  },
1042
1290
  {
1043
- "name": "video_wan2_2_5B_fun_control",
1044
- "title": "Wan 2.2 5B Fun Control",
1045
- "description": "Multi-condition video control with pose, depth, and edge guidance. Compact 5B size for experimental development.",
1291
+ "name": "flux_fill_outpaint_example",
1292
+ "title": "Flux Outpaint",
1046
1293
  "mediaType": "image",
1047
1294
  "mediaSubtype": "webp",
1048
- "tags": ["Text to Video", "Video"],
1049
- "models": ["Wan2.2", "Wan"],
1050
- "date": "2025-07-29",
1051
- "size": 18146236826
1295
+ "description": "Extend images beyond boundaries using Flux outpainting.",
1296
+ "thumbnailVariant": "compareSlider",
1297
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
1298
+ "tags": ["Outpainting", "Image", "Image to Image"],
1299
+ "models": ["Flux", "BFL"],
1300
+ "date": "2025-03-01",
1301
+ "size": 10372346020,
1302
+ "vram": 10372346020,
1303
+ "usage": 443
1052
1304
  },
1053
1305
  {
1054
- "name": "video_wan_vace_14B_t2v",
1055
- "title": "Wan2.1 VACE Text to Video",
1056
- "description": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
1306
+ "name": "flux_canny_model_example",
1307
+ "title": "Flux Canny Model",
1057
1308
  "mediaType": "image",
1058
1309
  "mediaSubtype": "webp",
1059
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
1060
- "tags": ["Text to Video", "Video"],
1061
- "models": ["Wan2.1", "Wan"],
1062
- "date": "2025-05-21",
1063
- "size": 57756572713
1310
+ "description": "Generate images guided by edge detection using Flux Canny.",
1311
+ "thumbnailVariant": "hoverDissolve",
1312
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
1313
+ "tags": ["Image to Image", "ControlNet", "Image"],
1314
+ "models": ["Flux", "BFL"],
1315
+ "date": "2025-03-01",
1316
+ "size": 34177202258,
1317
+ "vram": 34177202258,
1318
+ "usage": 109
1064
1319
  },
1065
1320
  {
1066
- "name": "video_wan_vace_14B_ref2v",
1067
- "title": "Wan2.1 VACE Reference to Video",
1068
- "description": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
1321
+ "name": "flux_depth_lora_example",
1322
+ "title": "Flux Depth Lora",
1069
1323
  "mediaType": "image",
1070
1324
  "mediaSubtype": "webp",
1071
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
1072
- "tags": ["Video", "Image to Video"],
1073
- "models": ["Wan2.1", "Wan"],
1074
- "date": "2025-05-21",
1075
- "size": 57756572713
1325
+ "description": "Generate images guided by depth information using Flux LoRA.",
1326
+ "thumbnailVariant": "hoverDissolve",
1327
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
1328
+ "tags": ["Image to Image", "ControlNet", "Image"],
1329
+ "models": ["Flux", "BFL"],
1330
+ "date": "2025-03-01",
1331
+ "size": 35412005356,
1332
+ "vram": 35412005356,
1333
+ "usage": 223
1076
1334
  },
1077
1335
  {
1078
- "name": "video_wan_vace_14B_v2v",
1079
- "title": "Wan2.1 VACE Control Video",
1080
- "description": "Generate videos by controlling input videos and reference images using Wan VACE.",
1336
+ "name": "flux_redux_model_example",
1337
+ "title": "Flux Redux Model",
1081
1338
  "mediaType": "image",
1082
1339
  "mediaSubtype": "webp",
1083
- "thumbnailVariant": "compareSlider",
1084
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
1085
- "tags": ["Video to Video", "Video"],
1086
- "models": ["Wan2.1", "Wan"],
1087
- "date": "2025-05-21",
1088
- "size": 57756572713
1340
+ "description": "Generate images by transferring style from reference images using Flux Redux.",
1341
+ "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
1342
+ "tags": ["Image to Image", "ControlNet", "Image"],
1343
+ "models": ["Flux", "BFL"],
1344
+ "date": "2025-03-01",
1345
+ "size": 35154307318,
1346
+ "vram": 35154307318,
1347
+ "usage": 226
1089
1348
  },
1090
1349
  {
1091
- "name": "video_wan_vace_outpainting",
1092
- "title": "Wan2.1 VACE Outpainting",
1093
- "description": "Generate extended videos by expanding video size using Wan VACE outpainting.",
1350
+ "name": "image_omnigen2_t2i",
1351
+ "title": "OmniGen2 Text to Image",
1094
1352
  "mediaType": "image",
1095
1353
  "mediaSubtype": "webp",
1096
- "thumbnailVariant": "compareSlider",
1097
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
1098
- "tags": ["Outpainting", "Video"],
1099
- "models": ["Wan2.1", "Wan"],
1100
- "date": "2025-05-21",
1101
- "size": 57756572713
1354
+ "description": "Generate high-quality images from text prompts using OmniGen2's unified 7B multimodal model with dual-path architecture.",
1355
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
1356
+ "tags": ["Text to Image", "Image"],
1357
+ "models": ["OmniGen"],
1358
+ "date": "2025-06-30",
1359
+ "size": 15784004813,
1360
+ "vram": 15784004813,
1361
+ "usage": 165
1102
1362
  },
1103
1363
  {
1104
- "name": "video_wan_vace_flf2v",
1105
- "title": "Wan2.1 VACE First-Last Frame",
1106
- "description": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
1364
+ "name": "image_omnigen2_image_edit",
1365
+ "title": "OmniGen2 Image Edit",
1107
1366
  "mediaType": "image",
1108
1367
  "mediaSubtype": "webp",
1109
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
1110
- "tags": ["FLF2V", "Video"],
1111
- "models": ["Wan2.1", "Wan"],
1112
- "date": "2025-05-21",
1113
- "size": 57756572713
1368
+ "thumbnailVariant": "hoverDissolve",
1369
+ "description": "Edit images with natural language instructions using OmniGen2's advanced image editing capabilities and text rendering support.",
1370
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
1371
+ "tags": ["Image Edit", "Image"],
1372
+ "models": ["OmniGen"],
1373
+ "date": "2025-06-30",
1374
+ "size": 15784004813,
1375
+ "vram": 15784004813,
1376
+ "usage": 145
1114
1377
  },
1115
1378
  {
1116
- "name": "video_wan_vace_inpainting",
1117
- "title": "Wan2.1 VACE Inpainting",
1118
- "description": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
1379
+ "name": "hidream_i1_dev",
1380
+ "title": "HiDream I1 Dev",
1119
1381
  "mediaType": "image",
1120
1382
  "mediaSubtype": "webp",
1121
- "thumbnailVariant": "compareSlider",
1122
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
1123
- "tags": ["Inpainting", "Video"],
1124
- "models": ["Wan2.1", "Wan"],
1125
- "date": "2025-05-21",
1126
- "size": 57756572713
1383
+ "description": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware.",
1384
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
1385
+ "tags": ["Text to Image", "Image"],
1386
+ "models": ["HiDream"],
1387
+ "date": "2025-04-17",
1388
+ "size": 33318208799,
1389
+ "vram": 33318208799,
1390
+ "usage": 92
1127
1391
  },
1128
1392
  {
1129
- "name": "video_wan2.1_alpha_t2v_14B",
1130
- "title": "Wan2.1 Alpha T2V",
1131
- "description": "Generate text-to-video with alpha channel support for transparent backgrounds and semi-transparent objects.",
1393
+ "name": "hidream_i1_fast",
1394
+ "title": "HiDream I1 Fast",
1132
1395
  "mediaType": "image",
1133
1396
  "mediaSubtype": "webp",
1134
- "tags": ["Text to Video", "Video"],
1135
- "models": ["Wan2.1", "Wan"],
1136
- "date": "2025-10-06",
1137
- "size": 22494891213
1397
+ "description": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware.",
1398
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
1399
+ "tags": ["Text to Image", "Image"],
1400
+ "models": ["HiDream"],
1401
+ "date": "2025-04-17",
1402
+ "size": 24234352968,
1403
+ "vram": 24234352968,
1404
+ "usage": 41
1138
1405
  },
1139
1406
  {
1140
- "name": "video_wan_ati",
1141
- "title": "Wan2.1 ATI",
1142
- "description": "Trajectory-controlled video generation.",
1407
+ "name": "hidream_i1_full",
1408
+ "title": "HiDream I1 Full",
1143
1409
  "mediaType": "image",
1144
1410
  "mediaSubtype": "webp",
1145
- "thumbnailVariant": "hoverDissolve",
1146
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
1147
- "tags": ["Video"],
1148
- "models": ["Wan2.1", "Wan"],
1149
- "date": "2025-05-21",
1150
- "size": 25393994138
1411
+ "description": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output.",
1412
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
1413
+ "tags": ["Text to Image", "Image"],
1414
+ "models": ["HiDream"],
1415
+ "date": "2025-04-17",
1416
+ "size": 24234352968,
1417
+ "vram": 24234352968,
1418
+ "usage": 218
1151
1419
  },
1152
1420
  {
1153
- "name": "video_wan2.1_fun_camera_v1.1_1.3B",
1154
- "title": "Wan 2.1 Fun Camera 1.3B",
1155
- "description": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
1421
+ "name": "hidream_e1_full",
1422
+ "title": "HiDream E1 Image Edit",
1156
1423
  "mediaType": "image",
1157
1424
  "mediaSubtype": "webp",
1158
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
1159
- "tags": ["Video"],
1160
- "models": ["Wan2.1", "Wan"],
1161
- "date": "2025-04-15",
1162
- "size": 11489037517
1425
+ "thumbnailVariant": "compareSlider",
1426
+ "description": "Edit images with HiDream E1 - Professional natural language image editing model.",
1427
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
1428
+ "tags": ["Image Edit", "Image"],
1429
+ "models": ["HiDream"],
1430
+ "date": "2025-05-01",
1431
+ "size": 34209414513,
1432
+ "vram": 34209414513,
1433
+ "usage": 69
1163
1434
  },
1164
1435
  {
1165
- "name": "video_wan2.1_fun_camera_v1.1_14B",
1166
- "title": "Wan 2.1 Fun Camera 14B",
1167
- "description": "Generate high-quality videos with advanced camera control using the full 14B model",
1436
+ "name": "sd3.5_simple_example",
1437
+ "title": "SD3.5 Simple",
1168
1438
  "mediaType": "image",
1169
1439
  "mediaSubtype": "webp",
1170
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
1171
- "tags": ["Video"],
1172
- "models": ["Wan2.1", "Wan"],
1173
- "date": "2025-04-15",
1174
- "size": 42047729828
1440
+ "description": "Generate images using SD 3.5.",
1441
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
1442
+ "tags": ["Text to Image", "Image"],
1443
+ "models": ["SD3.5", "Stability"],
1444
+ "date": "2025-03-01",
1445
+ "size": 14935748772,
1446
+ "vram": 14935748772,
1447
+ "usage": 490
1175
1448
  },
1176
1449
  {
1177
- "name": "text_to_video_wan",
1178
- "title": "Wan 2.1 Text to Video",
1179
- "description": "Generate videos from text prompts using Wan 2.1.",
1450
+ "name": "sd3.5_large_canny_controlnet_example",
1451
+ "title": "SD3.5 Large Canny ControlNet",
1180
1452
  "mediaType": "image",
1181
1453
  "mediaSubtype": "webp",
1182
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
1183
- "tags": ["Text to Video", "Video"],
1184
- "models": ["Wan2.1", "Wan"],
1454
+ "description": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
1455
+ "thumbnailVariant": "hoverDissolve",
1456
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
1457
+ "tags": ["Image to Image", "Image", "ControlNet"],
1458
+ "models": ["SD3.5", "Stability"],
1185
1459
  "date": "2025-03-01",
1186
- "size": 9824737690
1460
+ "size": 23590107873,
1461
+ "vram": 23590107873,
1462
+ "usage": 113
1187
1463
  },
1188
1464
  {
1189
- "name": "image_to_video_wan",
1190
- "title": "Wan 2.1 Image to Video",
1191
- "description": "Generate videos from images using Wan 2.1.",
1465
+ "name": "sd3.5_large_depth",
1466
+ "title": "SD3.5 Large Depth",
1192
1467
  "mediaType": "image",
1193
1468
  "mediaSubtype": "webp",
1194
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
1195
- "tags": ["Text to Video", "Video"],
1196
- "models": ["Wan2.1", "Wan"],
1469
+ "description": "Generate images guided by depth information using SD 3.5.",
1470
+ "thumbnailVariant": "hoverDissolve",
1471
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
1472
+ "tags": ["Image to Image", "Image", "ControlNet"],
1473
+ "models": ["SD3.5", "Stability"],
1197
1474
  "date": "2025-03-01",
1198
- "size": 41049149932
1475
+ "size": 23590107873,
1476
+ "vram": 23590107873,
1477
+ "usage": 95
1199
1478
  },
1200
1479
  {
1201
- "name": "wan2.1_fun_inp",
1202
- "title": "Wan 2.1 Inpainting",
1203
- "description": "Generate videos from start and end frames using Wan 2.1 inpainting.",
1204
- "mediaType": "image",
1205
- "mediaSubtype": "webp",
1206
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
1207
- "tags": ["Inpainting", "Video"],
1208
- "models": ["Wan2.1", "Wan"],
1209
- "date": "2025-04-15",
1210
- "size": 11381663334
1211
- },
1212
- {
1213
- "name": "wan2.1_fun_control",
1214
- "title": "Wan 2.1 ControlNet",
1215
- "description": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
1480
+ "name": "sd3.5_large_blur",
1481
+ "title": "SD3.5 Large Blur",
1216
1482
  "mediaType": "image",
1217
1483
  "mediaSubtype": "webp",
1484
+ "description": "Generate images guided by blurred reference images using SD 3.5.",
1218
1485
  "thumbnailVariant": "hoverDissolve",
1219
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
1220
- "tags": ["Video to Video", "Video"],
1221
- "models": ["Wan2.1", "Wan"],
1222
- "date": "2025-04-15",
1223
- "size": 11381663334
1224
- },
1225
- {
1226
- "name": "wan2.1_flf2v_720_f16",
1227
- "title": "Wan 2.1 FLF2V 720p F16",
1228
- "description": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
1229
- "mediaType": "image",
1230
- "mediaSubtype": "webp",
1231
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
1232
- "tags": ["FLF2V", "Video"],
1233
- "models": ["Wan2.1", "Wan"],
1234
- "date": "2025-04-15",
1235
- "size": 41049149932
1236
- },
1237
- {
1238
- "name": "ltxv_text_to_video",
1239
- "title": "LTXV Text to Video",
1240
- "mediaType": "image",
1241
- "mediaSubtype": "webp",
1242
- "description": "Generate videos from text prompts.",
1243
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
1244
- "tags": ["Text to Video", "Video"],
1245
- "models": ["LTXV"],
1486
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
1487
+ "tags": ["Image to Image", "Image"],
1488
+ "models": ["SD3.5", "Stability"],
1246
1489
  "date": "2025-03-01",
1247
- "size": 19155554140
1490
+ "size": 23590107873,
1491
+ "vram": 23590107873,
1492
+ "usage": 38
1248
1493
  },
1249
1494
  {
1250
- "name": "ltxv_image_to_video",
1251
- "title": "LTXV Image to Video",
1495
+ "name": "sdxl_simple_example",
1496
+ "title": "SDXL Simple",
1252
1497
  "mediaType": "image",
1253
1498
  "mediaSubtype": "webp",
1254
- "description": "Generate videos from still images.",
1255
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
1256
- "tags": ["Image to Video", "Video"],
1257
- "models": ["LTXV"],
1499
+ "description": "Generate high-quality images using SDXL.",
1500
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
1501
+ "tags": ["Text to Image", "Image"],
1502
+ "models": ["SDXL", "Stability"],
1258
1503
  "date": "2025-03-01",
1259
- "size": 19155554140
1504
+ "size": 13013750907,
1505
+ "vram": 13013750907,
1506
+ "usage": 278
1260
1507
  },
1261
1508
  {
1262
- "name": "mochi_text_to_video_example",
1263
- "title": "Mochi Text to Video",
1509
+ "name": "sdxl_refiner_prompt_example",
1510
+ "title": "SDXL Refiner Prompt",
1264
1511
  "mediaType": "image",
1265
1512
  "mediaSubtype": "webp",
1266
- "description": "Generate videos from text prompts using Mochi model.",
1267
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/",
1268
- "tags": ["Text to Video", "Video"],
1269
- "models": ["Mochi"],
1513
+ "description": "Enhance SDXL images using refiner models.",
1514
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
1515
+ "tags": ["Text to Image", "Image"],
1516
+ "models": ["SDXL", "Stability"],
1270
1517
  "date": "2025-03-01",
1271
- "size": 30762703258
1518
+ "size": 13013750907,
1519
+ "vram": 13013750907,
1520
+ "usage": 59
1272
1521
  },
1273
1522
  {
1274
- "name": "hunyuan_video_text_to_video",
1275
- "title": "Hunyuan Video Text to Video",
1523
+ "name": "sdxl_revision_text_prompts",
1524
+ "title": "SDXL Revision Text Prompts",
1276
1525
  "mediaType": "image",
1277
1526
  "mediaSubtype": "webp",
1278
- "description": "Generate videos from text prompts using Hunyuan model.",
1279
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
1280
- "tags": ["Text to Video", "Video"],
1281
- "models": ["Hunyuan Video", "Tencent"],
1527
+ "description": "Generate images by transferring concepts from reference images using SDXL Revision.",
1528
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
1529
+ "tags": ["Text to Image", "Image"],
1530
+ "models": ["SDXL", "Stability"],
1282
1531
  "date": "2025-03-01",
1283
- "size": 35476429865
1532
+ "size": 10630044058,
1533
+ "vram": 10630044058,
1534
+ "usage": 67
1284
1535
  },
1285
1536
  {
1286
- "name": "image_to_video",
1287
- "title": "SVD Image to Video",
1537
+ "name": "sdxlturbo_example",
1538
+ "title": "SDXL Turbo",
1288
1539
  "mediaType": "image",
1289
1540
  "mediaSubtype": "webp",
1290
- "description": "Generate videos from still images.",
1291
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
1292
- "tags": ["Image to Video", "Video"],
1293
- "models": ["SVD", "Stability"],
1541
+ "description": "Generate images in a single step using SDXL Turbo.",
1542
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
1543
+ "tags": ["Text to Image", "Image"],
1544
+ "models": ["SDXL", "Stability"],
1294
1545
  "date": "2025-03-01",
1295
- "size": 9556302234
1546
+ "size": 6936372183,
1547
+ "vram": 6936372183,
1548
+ "usage": 452
1296
1549
  },
1297
1550
  {
1298
- "name": "txt_to_image_to_video",
1299
- "title": "SVD Text to Image to Video",
1551
+ "name": "image_lotus_depth_v1_1",
1552
+ "title": "Lotus Depth",
1300
1553
  "mediaType": "image",
1301
1554
  "mediaSubtype": "webp",
1302
- "description": "Generate videos by first creating images from text prompts.",
1303
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
1304
- "tags": ["Text to Video", "Video"],
1305
- "models": ["SVD", "Stability"],
1306
- "date": "2025-03-01",
1307
- "size": 16492674417
1555
+ "thumbnailVariant": "compareSlider",
1556
+ "description": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention.",
1557
+ "tags": ["Image", "Text to Image"],
1558
+ "models": ["SD1.5", "Stability"],
1559
+ "date": "2025-05-21",
1560
+ "size": 2072321720,
1561
+ "vram": 2072321720,
1562
+ "usage": 79
1308
1563
  }
1309
1564
  ]
1310
1565
  },
1311
1566
  {
1312
1567
  "moduleName": "default",
1313
1568
  "category": "GENERATION TYPE",
1314
- "icon": "icon-[lucide--volume-2]",
1315
- "title": "Audio",
1316
- "type": "audio",
1569
+ "title": "Video",
1570
+ "icon": "icon-[lucide--film]",
1571
+ "type": "video",
1317
1572
  "templates": [
1318
1573
  {
1319
- "name": "audio_stable_audio_example",
1320
- "title": "Stable Audio",
1321
- "mediaType": "audio",
1322
- "mediaSubtype": "mp3",
1323
- "description": "Generate audio from text prompts using Stable Audio.",
1324
- "tags": ["Text to Audio", "Audio"],
1325
- "models": ["Stable Audio", "Stability"],
1326
- "date": "2025-03-01",
1327
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/",
1328
- "size": 5744518758
1574
+ "name": "video_wan2_2_14B_t2v",
1575
+ "title": "Wan 2.2 14B Text to Video",
1576
+ "description": "Generate high-quality videos from text prompts with cinematic aesthetic control and dynamic motion generation using Wan 2.2.",
1577
+ "mediaType": "image",
1578
+ "mediaSubtype": "webp",
1579
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
1580
+ "tags": ["Text to Video", "Video"],
1581
+ "models": ["Wan2.2", "Wan"],
1582
+ "date": "2025-07-29",
1583
+ "size": 38031935406,
1584
+ "vram": 38031935406,
1585
+ "usage": 2369
1329
1586
  },
1330
1587
  {
1331
- "name": "audio_ace_step_1_t2a_instrumentals",
1332
- "title": "ACE-Step v1 Text to Instrumentals Music",
1333
- "mediaType": "audio",
1334
- "mediaSubtype": "mp3",
1335
- "description": "Generate instrumental music from text prompts using ACE-Step v1.",
1336
- "tags": ["Text to Audio", "Audio"],
1337
- "models": ["ACE-Step"],
1338
- "date": "2025-03-01",
1339
- "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
1340
- "size": 7698728878
1588
+ "name": "video_wan2_2_14B_i2v",
1589
+ "title": "Wan 2.2 14B Image to Video",
1590
+ "description": "Transform static images into dynamic videos with precise motion control and style preservation using Wan 2.2.",
1591
+ "mediaType": "image",
1592
+ "mediaSubtype": "webp",
1593
+ "thumbnailVariant": "hoverDissolve",
1594
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
1595
+ "tags": ["Image to Video", "Video"],
1596
+ "models": ["Wan2.2", "Wan"],
1597
+ "date": "2025-07-29",
1598
+ "size": 38031935406,
1599
+ "vram": 38031935406,
1600
+ "usage": 10317
1341
1601
  },
1342
1602
  {
1343
- "name": "audio_ace_step_1_t2a_song",
1344
- "title": "ACE Step v1 Text to Song",
1345
- "mediaType": "audio",
1346
- "mediaSubtype": "mp3",
1347
- "description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
1348
- "tags": ["Text to Audio", "Audio"],
1349
- "models": ["ACE-Step"],
1350
- "date": "2025-03-01",
1351
- "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
1352
- "size": 7698728878
1603
+ "name": "video_wan2_2_14B_flf2v",
1604
+ "title": "Wan 2.2 14B First-Last Frame to Video",
1605
+ "description": "Generate smooth video transitions by defining start and end frames.",
1606
+ "mediaType": "image",
1607
+ "mediaSubtype": "webp",
1608
+ "thumbnailVariant": "hoverDissolve",
1609
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
1610
+ "tags": ["FLF2V", "Video"],
1611
+ "models": ["Wan2.2", "Wan"],
1612
+ "date": "2025-08-02",
1613
+ "size": 38031935406,
1614
+ "vram": 38031935406,
1615
+ "usage": 1585
1353
1616
  },
1354
1617
  {
1355
- "name": "audio_ace_step_1_m2m_editing",
1356
- "title": "ACE Step v1 M2M Editing",
1357
- "mediaType": "audio",
1358
- "mediaSubtype": "mp3",
1359
- "description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
1360
- "tags": ["Audio Editing", "Audio"],
1361
- "models": ["ACE-Step"],
1362
- "date": "2025-03-01",
1363
- "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
1364
- "size": 7698728878
1365
- }
1366
- ]
1367
- },
1368
- {
1369
- "moduleName": "default",
1370
- "category": "GENERATION TYPE",
1371
- "icon": "icon-[lucide--box]",
1372
- "title": "3D Model",
1373
- "type": "3d",
1374
- "templates": [
1618
+ "name": "video_wan2_2_14B_animate",
1619
+ "title": "Wan2.2 Animate, Character Animation and Replacement",
1620
+ "description": "Unified character animation and replacement framework with precise motion and expression replication.",
1621
+ "mediaType": "image",
1622
+ "mediaSubtype": "webp",
1623
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-animate",
1624
+ "tags": ["Video", "Image to Video"],
1625
+ "models": ["Wan2.2", "Wan"],
1626
+ "date": "2025-09-22",
1627
+ "size": 27417997476,
1628
+ "vram": 27417997476,
1629
+ "usage": 2141
1630
+ },
1375
1631
  {
1376
- "name": "3d_hunyuan3d-v2.1",
1377
- "title": "Hunyuan3D 2.1",
1632
+ "name": "video_hunyuan_video_1.5_720p_t2v",
1633
+ "title": "Hunyuan Video 1.5 Text to Video",
1634
+ "description": "Generate high-quality 720p videos from text prompts with cinematic camera control, emotional expressions, and physics simulation. Supports multiple styles including realistic, anime, and 3D with text rendering.",
1378
1635
  "mediaType": "image",
1379
1636
  "mediaSubtype": "webp",
1380
- "description": "Generate 3D models from single images using Hunyuan3D 2.1.",
1381
- "tags": ["Image to 3D", "3D"],
1382
- "models": ["Hunyuan3D", "Tencent"],
1383
- "date": "2025-03-01",
1384
- "tutorialUrl": "",
1385
- "size": 4928474972
1637
+ "tags": ["Text to Video", "Video"],
1638
+ "models": ["Hunyuan Video"],
1639
+ "date": "2025-11-21",
1640
+ "size": 45384919416,
1641
+ "vram": 45384919416,
1642
+ "usage": 451
1386
1643
  },
1387
1644
  {
1388
- "name": "3d_hunyuan3d_image_to_model",
1389
- "title": "Hunyuan3D 2.0",
1645
+ "name": "video_hunyuan_video_1.5_720p_i2v",
1646
+ "title": "Hunyuan Video 1.5 Image to Video",
1647
+ "description": "Animate still images into dynamic videos with precise motion and camera control. Maintains visual consistency while bringing photos and illustrations to life with smooth, natural movements.",
1390
1648
  "mediaType": "image",
1391
1649
  "mediaSubtype": "webp",
1392
- "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
1393
- "tags": ["Image to 3D", "3D"],
1394
- "models": ["Hunyuan3D", "Tencent"],
1395
- "date": "2025-03-01",
1396
- "tutorialUrl": "",
1397
- "size": 4928474972
1650
+ "tags": ["Image to Video", "Video"],
1651
+ "models": ["Hunyuan Video"],
1652
+ "date": "2025-11-21",
1653
+ "size": 45384919416,
1654
+ "vram": 45384919416,
1655
+ "usage": 2150
1398
1656
  },
1399
1657
  {
1400
- "name": "3d_hunyuan3d_multiview_to_model",
1401
- "title": "Hunyuan3D 2.0 MV",
1658
+ "name": "video_kandinsky5_i2v",
1659
+ "title": "Kandinsky 5.0 Video Lite Image to Video",
1660
+ "description": "A lightweight 2B model that generates videos from English and Russian prompts with high visual quality.",
1402
1661
  "mediaType": "image",
1403
1662
  "mediaSubtype": "webp",
1404
- "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
1405
- "tags": ["3D", "Image to 3D"],
1406
- "models": ["Hunyuan3D", "Tencent"],
1407
- "date": "2025-03-01",
1408
- "tutorialUrl": "",
1409
- "thumbnailVariant": "hoverDissolve",
1410
- "size": 4928474972
1663
+ "tags": ["Image to Video", "Video"],
1664
+ "models": ["Kandinsky"],
1665
+ "date": "2025-12-09",
1666
+ "size": 14710262988,
1667
+ "vram": 14710262988,
1668
+ "usage": 1243
1411
1669
  },
1412
1670
  {
1413
- "name": "3d_hunyuan3d_multiview_to_model_turbo",
1414
- "title": "Hunyuan3D 2.0 MV Turbo",
1671
+ "name": "video_kandinsky5_t2v",
1672
+ "title": "Kandinsky 5.0 Video Lite Text to Video",
1673
+ "description": "A lightweight 2B model that generates videos from English and Russian prompts with high visual quality.",
1415
1674
  "mediaType": "image",
1416
1675
  "mediaSubtype": "webp",
1417
- "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
1418
- "tags": ["Image to 3D", "3D"],
1419
- "models": ["Hunyuan3D", "Tencent"],
1420
- "date": "2025-03-01",
1421
- "tutorialUrl": "",
1422
- "thumbnailVariant": "hoverDissolve",
1423
- "size": 4928474972
1424
- }
1425
- ]
1426
- },
1427
- {
1428
- "moduleName": "default",
1429
- "category": "CLOSED SOURCE MODELS",
1430
- "title": "Image API",
1431
- "icon": "icon-[lucide--hand-coins]",
1432
- "type": "image",
1433
- "templates": [
1676
+ "tags": ["Text to Video", "Video"],
1677
+ "models": ["Kandinsky"],
1678
+ "date": "2025-12-09",
1679
+ "size": 14710262988,
1680
+ "vram": 14710262988,
1681
+ "usage": 556
1682
+ },
1434
1683
  {
1435
- "name": "api_nano_banana_pro",
1436
- "title": "Nano Banana Pro",
1437
- "description": "Nano-banana Pro (Gemini 3.0 Pro Image) - Studio-quality 4K image generation and editing with enhanced text rendering and character consistency.",
1684
+ "name": "api_kling2_6_i2v",
1685
+ "title": "Kling2.6: Animate Images with Audio",
1686
+ "description": "Transform static images into dynamic videos with synchronized dialogue, singing, sound effects, and ambient audio.",
1438
1687
  "mediaType": "image",
1439
1688
  "mediaSubtype": "webp",
1440
- "thumbnailVariant": "hoverDissolve",
1441
- "tags": ["Image Edit", "Image", "API"],
1442
- "models": ["Gemini-3-pro-image-preview", "nano-banana", "Google"],
1443
- "date": "2025-11-21",
1444
- "OpenSource": false,
1689
+ "tags": ["Image to Video", "Video", "API", "Audio"],
1690
+ "models": ["Kling"],
1691
+ "date": "2025-12-22",
1692
+ "openSource": false,
1445
1693
  "size": 0,
1446
1694
  "vram": 0
1447
1695
  },
1448
1696
  {
1449
- "name": "api_from_photo_2_miniature",
1450
- "title": "Photo to Blueprint to Model",
1451
- "description": "Transform real building photos into architectural blueprints and then into detailed physical scale models. A complete architectural visualization pipeline from photo to miniature.",
1697
+ "name": "api_kling2_6_t2v",
1698
+ "title": "Kling2.6: Storytelling Videos with Audio",
1699
+ "description": "Bring your stories to life with videos featuring synchronized dialogue, music, sound effects, and ambient audio from text prompts.",
1452
1700
  "mediaType": "image",
1453
1701
  "mediaSubtype": "webp",
1454
- "tags": ["Image Edit", "Image", "3D"],
1455
- "models": ["Gemini-3-pro-image-preview", "nano-banana", "Google"],
1456
- "date": "2025-11-21",
1457
- "OpenSource": false,
1702
+ "tags": ["Text to Video", "Video", "API", "Audio"],
1703
+ "models": ["Kling"],
1704
+ "date": "2025-12-22",
1705
+ "openSource": false,
1458
1706
  "size": 0,
1459
1707
  "vram": 0
1460
1708
  },
1461
1709
  {
1462
- "name": "api_bytedance_seedream4",
1463
- "title": "ByteDance Seedream 4.0",
1464
- "description": "Multi-modal AI model for text-to-image and image editing. Generate 2K images in under 2 seconds with natural language control.",
1465
- "mediaType": "image",
1466
- "mediaSubtype": "webp",
1467
- "tags": ["Image Edit", "Image", "API", "Text to Image"],
1468
- "models": ["Seedream 4.0", "ByteDance"],
1469
- "date": "2025-09-11",
1470
- "OpenSource": false,
1471
- "size": 0,
1472
- "vram": 0
1473
- },
1474
- {
1475
- "name": "api_google_gemini_image",
1476
- "title": "Nano Banana",
1477
- "description": "Nano-banana (Gemini-2.5-Flash Image) - image editing with consistency.",
1710
+ "name": "api_openai_sora_video",
1711
+ "title": "Sora 2: Text & Image to Video",
1712
+ "description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
1478
1713
  "mediaType": "image",
1479
1714
  "mediaSubtype": "webp",
1480
- "tags": ["Image Edit", "Image", "API", "Text to Image"],
1481
- "models": ["Gemini-2.5-Flash", "nano-banana", "Google"],
1482
- "date": "2025-08-27",
1483
- "OpenSource": false,
1715
+ "tags": ["Image to Video", "Text to Video", "API"],
1716
+ "models": ["OpenAI"],
1717
+ "date": "2025-10-08",
1718
+ "openSource": false,
1484
1719
  "size": 0,
1485
- "vram": 0
1720
+ "vram": 0,
1721
+ "usage": 765
1486
1722
  },
1487
1723
  {
1488
- "name": "api_flux2",
1489
- "title": "Flux.2 Pro",
1490
- "description": "Generate up to 4MP photorealistic images with multi-reference consistency and professional text rendering.",
1724
+ "name": "api_veo3",
1725
+ "title": "Veo3: Image to Video",
1726
+ "description": "Generate high-quality 8-second videos from text prompts or images using Google's advanced Veo 3 API. Features audio generation, prompt enhancement, and dual model options for speed or quality.",
1491
1727
  "mediaType": "image",
1492
1728
  "mediaSubtype": "webp",
1493
- "tags": ["Image Edit", "Image", "API", "Text to Image"],
1494
- "models": ["Flux.2", "BFL"],
1495
- "date": "2025-11-26",
1496
- "OpenSource": false,
1729
+ "tags": ["Image to Video", "Text to Video", "API"],
1730
+ "models": ["Veo", "Google"],
1731
+ "date": "2025-03-01",
1732
+ "tutorialUrl": "",
1733
+ "openSource": false,
1497
1734
  "size": 0,
1498
- "vram": 0
1735
+ "vram": 0,
1736
+ "usage": 491
1499
1737
  },
1500
1738
  {
1501
- "name": "api_topaz_image_enhance",
1502
- "title": "Topaz Image Enhance",
1503
- "description": "Professional image enhancement using Topaz's Reimagine model with face enhancement and detail restoration.",
1739
+ "name": "api_topaz_video_enhance",
1740
+ "title": "Topaz Video Enhance",
1741
+ "description": "Enhance videos with Topaz AI. Supports resolution upscaling using Starlight (Astra) Fast model and frame interpolation with apo-8 model.",
1504
1742
  "mediaType": "image",
1505
1743
  "mediaSubtype": "webp",
1506
1744
  "thumbnailVariant": "compareSlider",
1507
- "tags": ["Image", "API", "Upscale"],
1508
- "models": ["Topaz", "Reimagine"],
1745
+ "tags": ["Video", "API", "Upscale"],
1746
+ "models": ["Topaz"],
1509
1747
  "date": "2025-11-25",
1510
- "OpenSource": false,
1748
+ "openSource": false,
1511
1749
  "size": 0,
1512
- "vram": 0
1750
+ "vram": 0,
1751
+ "usage": 471
1513
1752
  },
1514
1753
  {
1515
- "name": "api_bfl_flux_1_kontext_multiple_images_input",
1516
- "title": "BFL Flux.1 Kontext Multiple Image Input",
1517
- "description": "Input multiple images and edit them with Flux.1 Kontext.",
1754
+ "name": "api_veo2_i2v",
1755
+ "title": "Veo2: Image to Video",
1756
+ "description": "Generate videos from images using Google Veo2 API.",
1518
1757
  "mediaType": "image",
1519
1758
  "mediaSubtype": "webp",
1520
- "thumbnailVariant": "compareSlider",
1521
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
1522
- "tags": ["Image Edit", "Image"],
1523
- "models": ["Flux", "Kontext", "BFL"],
1524
- "date": "2025-05-29",
1525
- "OpenSource": false,
1759
+ "tags": ["Image to Video", "Video", "API"],
1760
+ "models": ["Veo", "Google"],
1761
+ "date": "2025-03-01",
1762
+ "tutorialUrl": "",
1763
+ "openSource": false,
1526
1764
  "size": 0,
1527
- "vram": 0
1765
+ "vram": 0,
1766
+ "usage": 61
1528
1767
  },
1529
1768
  {
1530
- "name": "api_bfl_flux_1_kontext_pro_image",
1531
- "title": "BFL Flux.1 Kontext Pro",
1532
- "description": "Edit images with Flux.1 Kontext pro image.",
1769
+ "name": "api_wan2_6_t2v",
1770
+ "title": "Wan2.6: Text to Video",
1771
+ "description": "Generate high-quality videos from text prompts with enhanced image quality, smoother motion, 1080P resolution support, and improved prompt understanding for natural, professional results.",
1533
1772
  "mediaType": "image",
1534
1773
  "mediaSubtype": "webp",
1535
- "thumbnailVariant": "compareSlider",
1536
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
1537
- "tags": ["Image Edit", "Image"],
1538
- "models": ["Flux", "Kontext", "BFL"],
1539
- "date": "2025-05-29",
1540
- "OpenSource": false,
1774
+ "tags": ["Text to Video", "Video", "API"],
1775
+ "models": ["Wan2.6", "Wan"],
1776
+ "date": "2025-12-20",
1777
+ "tutorialUrl": "",
1778
+ "openSource": false,
1541
1779
  "size": 0,
1542
1780
  "vram": 0
1543
1781
  },
1544
1782
  {
1545
- "name": "api_bfl_flux_1_kontext_max_image",
1546
- "title": "BFL Flux.1 Kontext Max",
1547
- "description": "Edit images with Flux.1 Kontext max image.",
1783
+ "name": "api_wan2_6_i2v",
1784
+ "title": "Wan2.6: Image to Video",
1785
+ "description": "Transform images into high-quality videos with enhanced image quality, smoother motion, 1080P resolution support, and natural movement generation for professional results.",
1548
1786
  "mediaType": "image",
1549
1787
  "mediaSubtype": "webp",
1550
- "thumbnailVariant": "compareSlider",
1551
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
1552
- "tags": ["Image Edit", "Image"],
1553
- "models": ["Flux", "Kontext", "BFL"],
1554
- "date": "2025-05-29",
1555
- "OpenSource": false,
1788
+ "tags": ["Image to Video", "Video", "API"],
1789
+ "models": ["Wan2.6", "Wan"],
1790
+ "date": "2025-12-20",
1791
+ "tutorialUrl": "",
1792
+ "openSource": false,
1556
1793
  "size": 0,
1557
1794
  "vram": 0
1558
1795
  },
1559
1796
  {
1560
- "name": "api_wan_text_to_image",
1561
- "title": "Wan2.5: Text to Image",
1562
- "description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
1797
+ "name": "api_wan_text_to_video",
1798
+ "title": "Wan2.5: Text to Video",
1799
+ "description": "Generate videos with synchronized audio, enhanced motion, and superior quality.",
1563
1800
  "mediaType": "image",
1564
1801
  "mediaSubtype": "webp",
1565
- "tags": ["Text to Image", "Image", "API"],
1802
+ "tags": ["Image to Video", "Video", "API"],
1566
1803
  "models": ["Wan2.5", "Wan"],
1567
- "date": "2025-09-25",
1568
- "OpenSource": false,
1804
+ "date": "2025-09-27",
1805
+ "tutorialUrl": "",
1806
+ "openSource": false,
1569
1807
  "size": 0,
1570
- "vram": 0
1808
+ "vram": 0,
1809
+ "usage": 167
1571
1810
  },
1572
1811
  {
1573
- "name": "api_bfl_flux_pro_t2i",
1574
- "title": "BFL Flux[Pro]: Text to Image",
1575
- "description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
1812
+ "name": "api_wan_image_to_video",
1813
+ "title": "Wan2.5: Image to Video",
1814
+ "description": "Transform images into videos with synchronized audio, enhanced motion, and superior quality.",
1576
1815
  "mediaType": "image",
1577
1816
  "mediaSubtype": "webp",
1578
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
1579
- "tags": ["Image Edit", "Image"],
1580
- "models": ["Flux", "BFL"],
1581
- "date": "2025-05-01",
1582
- "OpenSource": false,
1817
+ "tags": ["Image to Video", "Video", "API"],
1818
+ "models": ["Wan2.5", "Wan"],
1819
+ "date": "2025-09-27",
1820
+ "tutorialUrl": "",
1821
+ "openSource": false,
1583
1822
  "size": 0,
1584
- "vram": 0
1823
+ "vram": 0,
1824
+ "usage": 1463
1585
1825
  },
1586
1826
  {
1587
- "name": "api_luma_photon_i2i",
1588
- "title": "Luma Photon: Image to Image",
1589
- "description": "Guide image generation using a combination of images and prompt.",
1827
+ "name": "api_kling_i2v",
1828
+ "title": "Kling: Image to Video",
1829
+ "description": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
1590
1830
  "mediaType": "image",
1591
1831
  "mediaSubtype": "webp",
1592
- "thumbnailVariant": "compareSlider",
1593
- "tags": ["Image to Image", "Image", "API"],
1594
- "models": ["Luma"],
1832
+ "tags": ["Image to Video", "Video", "API"],
1833
+ "models": ["Kling"],
1595
1834
  "date": "2025-03-01",
1596
- "OpenSource": false,
1835
+ "tutorialUrl": "",
1836
+ "openSource": false,
1597
1837
  "size": 0,
1598
- "vram": 0
1838
+ "vram": 0,
1839
+ "usage": 418
1599
1840
  },
1600
1841
  {
1601
- "name": "api_luma_photon_style_ref",
1602
- "title": "Luma Photon: Style Reference",
1603
- "description": "Generate images by blending style references with precise control using Luma Photon.",
1842
+ "name": "api_kling_omni_edit_video",
1843
+ "title": "Kling: O1",
1844
+ "description": "Edit videos with natural language commands, featuring video reference mode for quick generation of high-quality style transfers, element additions, and background modifications.",
1604
1845
  "mediaType": "image",
1605
- "mediaSubtype": "webp",
1606
1846
  "thumbnailVariant": "compareSlider",
1607
- "tags": ["Text to Image", "Image", "API"],
1608
- "models": ["Luma"],
1609
- "date": "2025-03-01",
1610
- "OpenSource": false,
1847
+ "mediaSubtype": "webp",
1848
+ "tags": ["Video", "API", "Video Editing", "Text to Video", "Image to Video"],
1849
+ "models": ["Kling"],
1850
+ "date": "2025-12-02",
1851
+ "tutorialUrl": "",
1852
+ "openSource": false,
1611
1853
  "size": 0,
1612
- "vram": 0
1854
+ "vram": 0,
1855
+ "usage": 1007
1613
1856
  },
1614
1857
  {
1615
- "name": "api_recraft_image_gen_with_color_control",
1616
- "title": "Recraft: Color Control Image Generation",
1617
- "description": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
1858
+ "name": "api_kling_effects",
1859
+ "title": "Kling: Video Effects",
1860
+ "description": "Generate dynamic videos by applying visual effects to images using Kling.",
1618
1861
  "mediaType": "image",
1619
1862
  "mediaSubtype": "webp",
1620
- "tags": ["Text to Image", "Image", "API"],
1621
- "models": ["Recraft"],
1863
+ "tags": ["Video", "API"],
1864
+ "models": ["Kling"],
1622
1865
  "date": "2025-03-01",
1623
- "OpenSource": false,
1866
+ "tutorialUrl": "",
1867
+ "openSource": false,
1624
1868
  "size": 0,
1625
- "vram": 0
1869
+ "vram": 0,
1870
+ "usage": 5
1626
1871
  },
1627
1872
  {
1628
- "name": "api_recraft_image_gen_with_style_control",
1629
- "title": "Recraft: Style Control Image Generation",
1630
- "description": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.",
1873
+ "name": "api_kling_flf",
1874
+ "title": "Kling: FLF2V",
1875
+ "description": "Generate videos through controlling the first and last frames.",
1631
1876
  "mediaType": "image",
1632
1877
  "mediaSubtype": "webp",
1633
- "tags": ["Text to Image", "Image", "API"],
1634
- "models": ["Recraft"],
1878
+ "tags": ["Video", "API", "FLF2V"],
1879
+ "models": ["Kling"],
1635
1880
  "date": "2025-03-01",
1636
- "OpenSource": false,
1881
+ "tutorialUrl": "",
1882
+ "openSource": false,
1637
1883
  "size": 0,
1638
- "vram": 0
1884
+ "vram": 0,
1885
+ "usage": 167
1639
1886
  },
1640
1887
  {
1641
- "name": "api_recraft_vector_gen",
1642
- "title": "Recraft: Vector Generation",
1643
- "description": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
1888
+ "name": "api_vidu_text_to_video",
1889
+ "title": "Vidu: Text to Video",
1890
+ "description": "Generate high-quality 1080p videos from text prompts with adjustable movement amplitude and duration control using Vidu's advanced AI model.",
1644
1891
  "mediaType": "image",
1645
1892
  "mediaSubtype": "webp",
1646
- "tags": ["Text to Image", "Image", "API", "Vector"],
1647
- "models": ["Recraft"],
1648
- "date": "2025-03-01",
1649
- "OpenSource": false,
1893
+ "tags": ["Text to Video", "Video", "API"],
1894
+ "models": ["Vidu"],
1895
+ "date": "2025-08-23",
1896
+ "tutorialUrl": "",
1897
+ "openSource": false,
1650
1898
  "size": 0,
1651
- "vram": 0
1899
+ "vram": 0,
1900
+ "usage": 8
1652
1901
  },
1653
1902
  {
1654
- "name": "api_runway_text_to_image",
1655
- "title": "Runway: Text to Image",
1656
- "description": "Generate high-quality images from text prompts using Runway's AI model.",
1903
+ "name": "api_vidu_image_to_video",
1904
+ "title": "Vidu: Image to Video",
1905
+ "description": "Transform static images into dynamic 1080p videos with precise motion control and customizable movement amplitude using Vidu.",
1657
1906
  "mediaType": "image",
1658
1907
  "mediaSubtype": "webp",
1659
- "tags": ["Text to Image", "Image", "API"],
1660
- "models": ["Runway"],
1661
- "date": "2025-03-01",
1662
- "OpenSource": false,
1908
+ "tags": ["Image to Video", "Video", "API"],
1909
+ "models": ["Vidu"],
1910
+ "date": "2025-08-23",
1911
+ "tutorialUrl": "",
1912
+ "openSource": false,
1663
1913
  "size": 0,
1664
- "vram": 0
1914
+ "vram": 0,
1915
+ "usage": 62
1665
1916
  },
1666
1917
  {
1667
- "name": "api_runway_reference_to_image",
1668
- "title": "Runway: Reference to Image",
1669
- "description": "Generate new images based on reference styles and compositions with Runway's AI.",
1918
+ "name": "api_vidu_reference_to_video",
1919
+ "title": "Vidu: Reference to Video",
1920
+ "description": "Generate videos with consistent subjects using multiple reference images (up to 7) for character and style continuity across the video sequence.",
1670
1921
  "mediaType": "image",
1671
- "thumbnailVariant": "compareSlider",
1672
1922
  "mediaSubtype": "webp",
1673
- "tags": ["Image to Image", "Image", "API"],
1674
- "models": ["Runway"],
1675
- "date": "2025-03-01",
1676
- "OpenSource": false,
1923
+ "tags": ["Video", "Image to Video", "API"],
1924
+ "models": ["Vidu"],
1925
+ "date": "2025-08-23",
1926
+ "tutorialUrl": "",
1927
+ "openSource": false,
1677
1928
  "size": 0,
1678
- "vram": 0
1929
+ "vram": 0,
1930
+ "usage": 69
1679
1931
  },
1680
1932
  {
1681
- "name": "api_stability_ai_stable_image_ultra_t2i",
1682
- "title": "Stability AI: Stable Image Ultra Text to Image",
1683
- "description": "Generate high quality images with excellent prompt adherence. Perfect for professional GENERATION TYPE at 1 megapixel resolution.",
1933
+ "name": "api_vidu_start_end_to_video",
1934
+ "title": "Vidu: Start End to Video",
1935
+ "description": "Create smooth video transitions between defined start and end frames with natural motion interpolation and consistent visual quality.",
1684
1936
  "mediaType": "image",
1685
1937
  "mediaSubtype": "webp",
1686
- "tags": ["Text to Image", "Image", "API"],
1687
- "models": ["Stability"],
1688
- "date": "2025-03-01",
1689
- "OpenSource": false,
1938
+ "tags": ["Video", "API", "FLF2V"],
1939
+ "models": ["Vidu"],
1940
+ "date": "2025-08-23",
1941
+ "tutorialUrl": "",
1942
+ "openSource": false,
1690
1943
  "size": 0,
1691
- "vram": 0
1944
+ "vram": 0,
1945
+ "usage": 85
1692
1946
  },
1693
1947
  {
1694
- "name": "api_stability_ai_i2i",
1695
- "title": "Stability AI: Image to Image",
1696
- "description": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
1948
+ "name": "api_bytedance_text_to_video",
1949
+ "title": "ByteDance: Text to Video",
1950
+ "description": "Generate high-quality videos directly from text prompts using ByteDance's Seedance model. Supports multiple resolutions and aspect ratios with natural motion and cinematic quality.",
1697
1951
  "mediaType": "image",
1698
- "thumbnailVariant": "compareSlider",
1699
1952
  "mediaSubtype": "webp",
1700
- "tags": ["Image to Image", "Image", "API"],
1701
- "models": ["Stability"],
1702
- "date": "2025-03-01",
1703
- "OpenSource": false,
1953
+ "tags": ["Video", "API", "Text to Video"],
1954
+ "models": ["ByteDance"],
1955
+ "date": "2025-10-6",
1956
+ "tutorialUrl": "",
1957
+ "openSource": false,
1704
1958
  "size": 0,
1705
- "vram": 0
1959
+ "vram": 0,
1960
+ "usage": 75
1706
1961
  },
1707
1962
  {
1708
- "name": "api_stability_ai_sd3.5_t2i",
1709
- "title": "Stability AI: SD3.5 Text to Image",
1710
- "description": "Generate high quality images with excellent prompt adherence. Perfect for professional GENERATION TYPE at 1 megapixel resolution.",
1963
+ "name": "api_bytedance_image_to_video",
1964
+ "title": "ByteDance: Image to Video",
1965
+ "description": "Transform static images into dynamic videos using ByteDance's Seedance model. Analyzes image structure and generates natural motion with consistent visual style and coherent video sequences.",
1711
1966
  "mediaType": "image",
1712
1967
  "mediaSubtype": "webp",
1713
- "tags": ["Text to Image", "Image", "API"],
1714
- "models": ["Stability"],
1715
- "date": "2025-03-01",
1716
- "OpenSource": false,
1968
+ "tags": ["Video", "API", "Image to Video"],
1969
+ "models": ["ByteDance"],
1970
+ "date": "2025-10-6",
1971
+ "tutorialUrl": "",
1972
+ "openSource": false,
1717
1973
  "size": 0,
1718
- "vram": 0
1974
+ "vram": 0,
1975
+ "usage": 2275
1719
1976
  },
1720
1977
  {
1721
- "name": "api_stability_ai_sd3.5_i2i",
1722
- "title": "Stability AI: SD3.5 Image to Image",
1723
- "description": "Generate high quality images with excellent prompt adherence. Perfect for professional GENERATION TYPE at 1 megapixel resolution.",
1978
+ "name": "api_bytedance_flf2v",
1979
+ "title": "ByteDance: Start End to Video",
1980
+ "description": "Generate cinematic video transitions between start and end frames with fluid motion, scene consistency, and professional polish using ByteDance's Seedance model.",
1724
1981
  "mediaType": "image",
1725
- "thumbnailVariant": "compareSlider",
1726
1982
  "mediaSubtype": "webp",
1727
- "tags": ["Image to Image", "Image", "API"],
1728
- "models": ["Stability"],
1729
- "date": "2025-03-01",
1730
- "OpenSource": false,
1983
+ "tags": ["Video", "API", "FLF2V"],
1984
+ "models": ["ByteDance"],
1985
+ "date": "2025-10-6",
1986
+ "tutorialUrl": "",
1987
+ "openSource": false,
1731
1988
  "size": 0,
1732
- "vram": 0
1989
+ "vram": 0,
1990
+ "usage": 791
1733
1991
  },
1734
1992
  {
1735
- "name": "api_ideogram_v3_t2i",
1736
- "title": "Ideogram V3: Text to Image",
1737
- "description": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
1738
- "mediaType": "image",
1739
- "mediaSubtype": "webp",
1740
- "tags": ["Text to Image", "Image", "API"],
1741
- "models": ["Ideogram"],
1742
- "date": "2025-03-01",
1743
- "OpenSource": false,
1744
- "size": 0,
1745
- "vram": 0
1746
- },
1747
- {
1748
- "name": "api_openai_image_1_t2i",
1749
- "title": "OpenAI: GPT-Image-1 Text to Image",
1750
- "description": "Generate images from text prompts using OpenAI GPT Image 1 API.",
1993
+ "name": "video_wan2_2_14B_s2v",
1994
+ "title": "Wan2.2-S2V Audio-Driven Video Generation",
1995
+ "description": "Transform static images and audio into dynamic videos with perfect synchronization and minute-level generation.",
1751
1996
  "mediaType": "image",
1752
1997
  "mediaSubtype": "webp",
1753
- "tags": ["Text to Image", "Image", "API"],
1754
- "models": ["GPT-Image-1", "OpenAI"],
1755
- "date": "2025-03-01",
1756
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
1757
- "OpenSource": false,
1758
- "size": 0,
1759
- "vram": 0
1998
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-s2v",
1999
+ "tags": ["Video"],
2000
+ "models": ["Wan2.2", "Wan"],
2001
+ "date": "2025-08-02",
2002
+ "size": 25254407700,
2003
+ "vram": 25254407700,
2004
+ "usage": 648
1760
2005
  },
1761
2006
  {
1762
- "name": "api_openai_image_1_i2i",
1763
- "title": "OpenAI: GPT-Image-1 Image to Image",
1764
- "description": "Generate images from input images using OpenAI GPT Image 1 API.",
2007
+ "name": "api_ltxv_text_to_video",
2008
+ "title": "LTX-2: Text to Video",
2009
+ "description": "Generate high-quality videos from text prompts using Lightricks LTX-2 with synchronized audio. Supports up to 4K resolution at 50fps with Fast, Pro, and Ultra modes for various production needs.",
1765
2010
  "mediaType": "image",
1766
2011
  "mediaSubtype": "webp",
1767
- "thumbnailVariant": "compareSlider",
1768
- "tags": ["Image to Image", "Image", "API"],
1769
- "models": ["GPT-Image-1", "OpenAI"],
1770
- "date": "2025-03-01",
1771
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
1772
- "OpenSource": false,
2012
+ "tags": ["Text to Video", "Video", "API"],
2013
+ "models": ["LTX-2", "Lightricks"],
2014
+ "date": "2025-10-28",
2015
+ "openSource": false,
1773
2016
  "size": 0,
1774
- "vram": 0
2017
+ "vram": 0,
2018
+ "usage": 73
1775
2019
  },
1776
2020
  {
1777
- "name": "api_openai_image_1_inpaint",
1778
- "title": "OpenAI: GPT-Image-1 Inpaint",
1779
- "description": "Edit images using inpainting with OpenAI GPT Image 1 API.",
2021
+ "name": "api_ltxv_image_to_video",
2022
+ "title": "LTX-2: Image to Video",
2023
+ "description": "Transform static images into dynamic videos with LTX-2 Pro. Generate cinematic video sequences with natural motion, synchronized audio, and support for up to 4K resolution at 50fps.",
1780
2024
  "mediaType": "image",
1781
2025
  "mediaSubtype": "webp",
1782
- "thumbnailVariant": "compareSlider",
1783
- "tags": ["Inpainting", "Image", "API"],
1784
- "models": ["GPT-Image-1", "OpenAI"],
1785
- "date": "2025-03-01",
1786
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
1787
- "OpenSource": false,
2026
+ "tags": ["Image to Video", "Video", "API"],
2027
+ "models": ["LTX-2", "Lightricks"],
2028
+ "date": "2025-10-28",
2029
+ "openSource": false,
1788
2030
  "size": 0,
1789
- "vram": 0
2031
+ "vram": 0,
2032
+ "usage": 448
1790
2033
  },
1791
2034
  {
1792
- "name": "api_openai_image_1_multi_inputs",
1793
- "title": "OpenAI: GPT-Image-1 Multi Inputs",
1794
- "description": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
2035
+ "name": "api_hailuo_minimax_video",
2036
+ "title": "MiniMax: Video",
2037
+ "description": "Generate high-quality videos from text prompts with optional first-frame control using MiniMax Hailuo-02 model. Supports multiple resolutions (768P/1080P) and durations (6/10s) with intelligent prompt optimization.",
1795
2038
  "mediaType": "image",
1796
2039
  "mediaSubtype": "webp",
1797
- "thumbnailVariant": "compareSlider",
1798
- "tags": ["Text to Image", "Image", "API"],
1799
- "models": ["GPT-Image-1", "OpenAI"],
2040
+ "tags": ["Text to Video", "Video", "API"],
2041
+ "models": ["MiniMax"],
1800
2042
  "date": "2025-03-01",
1801
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1",
1802
- "OpenSource": false,
2043
+ "tutorialUrl": "",
2044
+ "openSource": false,
1803
2045
  "size": 0,
1804
- "vram": 0
2046
+ "vram": 0,
2047
+ "usage": 9
1805
2048
  },
1806
2049
  {
1807
- "name": "api_openai_dall_e_2_t2i",
1808
- "title": "OpenAI: Dall-E 2 Text to Image",
1809
- "description": "Generate images from text prompts using OpenAI Dall-E 2 API.",
2050
+ "name": "api_hailuo_minimax_t2v",
2051
+ "title": "MiniMax: Text to Video",
2052
+ "description": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
1810
2053
  "mediaType": "image",
1811
2054
  "mediaSubtype": "webp",
1812
- "tags": ["Text to Image", "Image", "API"],
1813
- "models": ["Dall-E", "OpenAI"],
2055
+ "tags": ["Text to Video", "Video", "API"],
2056
+ "models": ["MiniMax"],
1814
2057
  "date": "2025-03-01",
1815
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
1816
- "OpenSource": false,
2058
+ "tutorialUrl": "",
2059
+ "openSource": false,
1817
2060
  "size": 0,
1818
- "vram": 0
2061
+ "vram": 0,
2062
+ "usage": 1
1819
2063
  },
1820
2064
  {
1821
- "name": "api_openai_dall_e_2_inpaint",
1822
- "title": "OpenAI: Dall-E 2 Inpaint",
1823
- "description": "Edit images using inpainting with OpenAI Dall-E 2 API.",
2065
+ "name": "api_hailuo_minimax_i2v",
2066
+ "title": "MiniMax: Image to Video",
2067
+ "description": "Generate refined videos from images and text with CGI integration using MiniMax.",
1824
2068
  "mediaType": "image",
1825
2069
  "mediaSubtype": "webp",
1826
- "thumbnailVariant": "compareSlider",
1827
- "tags": ["Inpainting", "Image", "API"],
1828
- "models": ["Dall-E", "OpenAI"],
2070
+ "tags": ["Image to Video", "Video", "API"],
2071
+ "models": ["MiniMax"],
1829
2072
  "date": "2025-03-01",
1830
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2",
1831
- "OpenSource": false,
2073
+ "tutorialUrl": "",
2074
+ "openSource": false,
1832
2075
  "size": 0,
1833
- "vram": 0
2076
+ "vram": 0,
2077
+ "usage": 39
1834
2078
  },
1835
2079
  {
1836
- "name": "api_openai_dall_e_3_t2i",
1837
- "title": "OpenAI: Dall-E 3 Text to Image",
1838
- "description": "Generate images from text prompts using OpenAI Dall-E 3 API.",
2080
+ "name": "api_luma_i2v",
2081
+ "title": "Luma: Image to Video",
2082
+ "description": "Take static images and instantly create magical high quality animations.",
1839
2083
  "mediaType": "image",
1840
2084
  "mediaSubtype": "webp",
1841
- "tags": ["Text to Image", "Image", "API"],
1842
- "models": ["Dall-E", "OpenAI"],
2085
+ "tags": ["Image to Video", "Video", "API"],
2086
+ "models": ["Luma"],
1843
2087
  "date": "2025-03-01",
1844
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3",
1845
- "OpenSource": false,
1846
- "size": 0,
1847
- "vram": 0
1848
- }
1849
- ]
1850
- },
1851
- {
1852
- "moduleName": "default",
1853
- "category": "CLOSED SOURCE MODELS",
1854
- "title": "Video API",
1855
- "icon": "icon-[lucide--film]",
1856
- "type": "video",
1857
- "templates": [
1858
- {
1859
- "name": "api_openai_sora_video",
1860
- "title": "Sora 2: Text & Image to Video",
1861
- "description": "OpenAI's Sora-2 and Sora-2 Pro video generation with synchronized audio.",
1862
- "mediaType": "image",
1863
- "mediaSubtype": "webp",
1864
- "tags": ["Image to Video", "Text to Video", "API"],
1865
- "models": ["OpenAI"],
1866
- "date": "2025-10-08",
1867
- "OpenSource": false,
2088
+ "tutorialUrl": "",
2089
+ "openSource": false,
1868
2090
  "size": 0,
1869
- "vram": 0
2091
+ "vram": 0,
2092
+ "usage": 56
1870
2093
  },
1871
2094
  {
1872
- "name": "api_ltxv_text_to_video",
1873
- "title": "LTX-2: Text to Video",
1874
- "description": "Generate high-quality videos from text prompts using Lightricks LTX-2 with synchronized audio. Supports up to 4K resolution at 50fps with Fast, Pro, and Ultra modes for various production needs.",
2095
+ "name": "api_luma_t2v",
2096
+ "title": "Luma: Text to Video",
2097
+ "description": "High-quality videos can be generated using simple prompts.",
1875
2098
  "mediaType": "image",
1876
2099
  "mediaSubtype": "webp",
1877
2100
  "tags": ["Text to Video", "Video", "API"],
1878
- "models": ["LTX-2", "Lightricks"],
1879
- "date": "2025-10-28",
1880
- "OpenSource": false,
2101
+ "models": ["Luma"],
2102
+ "date": "2025-03-01",
2103
+ "tutorialUrl": "",
2104
+ "openSource": false,
1881
2105
  "size": 0,
1882
- "vram": 0
2106
+ "vram": 0,
2107
+ "usage": 3
1883
2108
  },
1884
2109
  {
1885
- "name": "api_ltxv_image_to_video",
1886
- "title": "LTX-2: Image to Video",
1887
- "description": "Transform static images into dynamic videos with LTX-2 Pro. Generate cinematic video sequences with natural motion, synchronized audio, and support for up to 4K resolution at 50fps.",
2110
+ "name": "api_moonvalley_text_to_video",
2111
+ "title": "Moonvalley: Text to Video",
2112
+ "description": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.",
1888
2113
  "mediaType": "image",
1889
2114
  "mediaSubtype": "webp",
1890
- "tags": ["Image to Video", "Video", "API"],
1891
- "models": ["LTX-2", "Lightricks"],
1892
- "date": "2025-10-28",
1893
- "OpenSource": false,
2115
+ "tags": ["Text to Video", "Video", "API"],
2116
+ "models": ["Moonvalley"],
2117
+ "date": "2025-03-01",
2118
+ "tutorialUrl": "",
2119
+ "openSource": false,
1894
2120
  "size": 0,
1895
- "vram": 0
2121
+ "vram": 0,
2122
+ "usage": 4
1896
2123
  },
1897
2124
  {
1898
- "name": "api_wan_text_to_video",
1899
- "title": "Wan2.5: Text to Video",
1900
- "description": "Generate videos with synchronized audio, enhanced motion, and superior quality.",
2125
+ "name": "api_moonvalley_image_to_video",
2126
+ "title": "Moonvalley: Image to Video",
2127
+ "description": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.",
1901
2128
  "mediaType": "image",
1902
2129
  "mediaSubtype": "webp",
1903
2130
  "tags": ["Image to Video", "Video", "API"],
1904
- "models": ["Wan2.5", "Wan"],
1905
- "date": "2025-09-27",
2131
+ "models": ["Moonvalley"],
2132
+ "date": "2025-03-01",
1906
2133
  "tutorialUrl": "",
1907
- "OpenSource": false,
2134
+ "openSource": false,
1908
2135
  "size": 0,
1909
- "vram": 0
2136
+ "vram": 0,
2137
+ "usage": 29
1910
2138
  },
1911
2139
  {
1912
- "name": "api_wan_image_to_video",
1913
- "title": "Wan2.5: Image to Video",
1914
- "description": "Transform images into videos with synchronized audio, enhanced motion, and superior quality.",
2140
+ "name": "api_moonvalley_video_to_video_motion_transfer",
2141
+ "title": "Moonvalley: Motion Transfer",
2142
+ "description": "Apply motion from one video to another.",
1915
2143
  "mediaType": "image",
2144
+ "thumbnailVariant": "hoverDissolve",
1916
2145
  "mediaSubtype": "webp",
1917
- "tags": ["Image to Video", "Video", "API"],
1918
- "models": ["Wan2.5", "Wan"],
1919
- "date": "2025-09-27",
2146
+ "tags": ["Video to Video", "Video", "API"],
2147
+ "models": ["Moonvalley"],
2148
+ "date": "2025-03-01",
1920
2149
  "tutorialUrl": "",
1921
- "OpenSource": false,
2150
+ "openSource": false,
1922
2151
  "size": 0,
1923
- "vram": 0
2152
+ "vram": 0,
2153
+ "usage": 22
1924
2154
  },
1925
2155
  {
1926
- "name": "api_kling_i2v",
1927
- "title": "Kling: Image to Video",
1928
- "description": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
2156
+ "name": "api_moonvalley_video_to_video_pose_control",
2157
+ "title": "Moonvalley: Pose Control",
2158
+ "description": "Apply human pose and movement from one video to another.",
1929
2159
  "mediaType": "image",
2160
+ "thumbnailVariant": "hoverDissolve",
1930
2161
  "mediaSubtype": "webp",
1931
- "tags": ["Image to Video", "Video", "API"],
1932
- "models": ["Kling"],
2162
+ "tags": ["Video to Video", "Video", "API"],
2163
+ "models": ["Moonvalley"],
1933
2164
  "date": "2025-03-01",
1934
2165
  "tutorialUrl": "",
1935
- "OpenSource": false,
2166
+ "openSource": false,
1936
2167
  "size": 0,
1937
- "vram": 0
2168
+ "vram": 0,
2169
+ "usage": 11
1938
2170
  },
1939
2171
  {
1940
- "name": "api_kling_effects",
1941
- "title": "Kling: Video Effects",
1942
- "description": "Generate dynamic videos by applying visual effects to images using Kling.",
2172
+ "name": "api_pixverse_i2v",
2173
+ "title": "PixVerse: Image to Video",
2174
+ "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
1943
2175
  "mediaType": "image",
1944
2176
  "mediaSubtype": "webp",
1945
- "tags": ["Video", "API"],
1946
- "models": ["Kling"],
2177
+ "tags": ["Image to Video", "Video", "API"],
2178
+ "models": ["PixVerse"],
1947
2179
  "date": "2025-03-01",
1948
2180
  "tutorialUrl": "",
1949
- "OpenSource": false,
2181
+ "openSource": false,
1950
2182
  "size": 0,
1951
- "vram": 0
2183
+ "vram": 0,
2184
+ "usage": 25
1952
2185
  },
1953
2186
  {
1954
- "name": "api_kling_flf",
1955
- "title": "Kling: FLF2V",
1956
- "description": "Generate videos through controlling the first and last frames.",
2187
+ "name": "api_pixverse_template_i2v",
2188
+ "title": "PixVerse Templates: Image to Video",
2189
+ "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
1957
2190
  "mediaType": "image",
1958
2191
  "mediaSubtype": "webp",
1959
- "tags": ["Video", "API", "FLF2V"],
1960
- "models": ["Kling"],
2192
+ "tags": ["Image to Video", "Video", "API"],
2193
+ "models": ["PixVerse"],
1961
2194
  "date": "2025-03-01",
1962
2195
  "tutorialUrl": "",
1963
- "OpenSource": false,
2196
+ "openSource": false,
1964
2197
  "size": 0,
1965
- "vram": 0
2198
+ "vram": 0,
2199
+ "usage": 16
1966
2200
  },
1967
2201
  {
1968
- "name": "api_vidu_text_to_video",
1969
- "title": "Vidu: Text to Video",
1970
- "description": "Generate high-quality 1080p videos from text prompts with adjustable movement amplitude and duration control using Vidu's advanced AI model.",
2202
+ "name": "api_pixverse_t2v",
2203
+ "title": "PixVerse: Text to Video",
2204
+ "description": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
1971
2205
  "mediaType": "image",
1972
2206
  "mediaSubtype": "webp",
1973
2207
  "tags": ["Text to Video", "Video", "API"],
1974
- "models": ["Vidu"],
1975
- "date": "2025-08-23",
2208
+ "models": ["PixVerse"],
2209
+ "date": "2025-03-01",
1976
2210
  "tutorialUrl": "",
1977
- "OpenSource": false,
2211
+ "openSource": false,
1978
2212
  "size": 0,
1979
- "vram": 0
2213
+ "vram": 0,
2214
+ "usage": 3
1980
2215
  },
1981
2216
  {
1982
- "name": "api_vidu_image_to_video",
1983
- "title": "Vidu: Image to Video",
1984
- "description": "Transform static images into dynamic 1080p videos with precise motion control and customizable movement amplitude using Vidu.",
2217
+ "name": "api_runway_gen3a_turbo_image_to_video",
2218
+ "title": "Runway: Gen3a Turbo Image to Video",
2219
+ "description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
1985
2220
  "mediaType": "image",
1986
2221
  "mediaSubtype": "webp",
1987
2222
  "tags": ["Image to Video", "Video", "API"],
1988
- "models": ["Vidu"],
1989
- "date": "2025-08-23",
2223
+ "models": ["Runway"],
2224
+ "date": "2025-03-01",
1990
2225
  "tutorialUrl": "",
1991
- "OpenSource": false,
2226
+ "openSource": false,
1992
2227
  "size": 0,
1993
- "vram": 0
2228
+ "vram": 0,
2229
+ "usage": 38
1994
2230
  },
1995
2231
  {
1996
- "name": "api_vidu_reference_to_video",
1997
- "title": "Vidu: Reference to Video",
1998
- "description": "Generate videos with consistent subjects using multiple reference images (up to 7) for character and style continuity across the video sequence.",
2232
+ "name": "api_runway_gen4_turo_image_to_video",
2233
+ "title": "Runway: Gen4 Turbo Image to Video",
2234
+ "description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
1999
2235
  "mediaType": "image",
2000
2236
  "mediaSubtype": "webp",
2001
- "tags": ["Video", "Image to Video", "API"],
2002
- "models": ["Vidu"],
2003
- "date": "2025-08-23",
2237
+ "tags": ["Image to Video", "Video", "API"],
2238
+ "models": ["Runway"],
2239
+ "date": "2025-03-01",
2004
2240
  "tutorialUrl": "",
2005
- "OpenSource": false,
2241
+ "openSource": false,
2006
2242
  "size": 0,
2007
- "vram": 0
2243
+ "vram": 0,
2244
+ "usage": 97
2008
2245
  },
2009
2246
  {
2010
- "name": "api_vidu_start_end_to_video",
2011
- "title": "Vidu: Start End to Video",
2012
- "description": "Create smooth video transitions between defined start and end frames with natural motion interpolation and consistent visual quality.",
2247
+ "name": "api_runway_first_last_frame",
2248
+ "title": "Runway: First Last Frame to Video",
2249
+ "description": "Generate smooth video transitions between two keyframes with Runway's precision.",
2013
2250
  "mediaType": "image",
2014
2251
  "mediaSubtype": "webp",
2015
2252
  "tags": ["Video", "API", "FLF2V"],
2016
- "models": ["Vidu"],
2017
- "date": "2025-08-23",
2253
+ "models": ["Runway"],
2254
+ "date": "2025-03-01",
2018
2255
  "tutorialUrl": "",
2019
- "OpenSource": false,
2256
+ "openSource": false,
2020
2257
  "size": 0,
2021
- "vram": 0
2258
+ "vram": 0,
2259
+ "usage": 97
2022
2260
  },
2023
2261
  {
2024
- "name": "api_bytedance_text_to_video",
2025
- "title": "ByteDance: Text to Video",
2026
- "description": "Generate high-quality videos directly from text prompts using ByteDance's Seedance model. Supports multiple resolutions and aspect ratios with natural motion and cinematic quality.",
2027
- "mediaType": "image",
2028
- "mediaSubtype": "webp",
2029
- "tags": ["Video", "API", "Text to Video"],
2030
- "models": ["ByteDance"],
2031
- "date": "2025-10-6",
2032
- "tutorialUrl": "",
2033
- "OpenSource": false,
2034
- "size": 0,
2035
- "vram": 0
2262
+ "name": "video_wan2_2_14B_fun_inpaint",
2263
+ "title": "Wan 2.2 14B Fun Inp",
2264
+ "description": "Generate videos from start and end frames using Wan 2.2 Fun Inp.",
2265
+ "mediaType": "image",
2266
+ "mediaSubtype": "webp",
2267
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-inp",
2268
+ "tags": ["FLF2V", "Video"],
2269
+ "models": ["Wan2.2", "Wan"],
2270
+ "date": "2025-08-12",
2271
+ "size": 38031935406,
2272
+ "vram": 38031935406,
2273
+ "usage": 547
2036
2274
  },
2037
2275
  {
2038
- "name": "api_bytedance_image_to_video",
2039
- "title": "ByteDance: Image to Video",
2040
- "description": "Transform static images into dynamic videos using ByteDance's Seedance model. Analyzes image structure and generates natural motion with consistent visual style and coherent video sequences.",
2276
+ "name": "video_wan2_2_14B_fun_control",
2277
+ "title": "Wan 2.2 14B Fun Control",
2278
+ "description": "Generate videos guided by pose, depth, and edge controls using Wan 2.2 Fun Control.",
2041
2279
  "mediaType": "image",
2042
2280
  "mediaSubtype": "webp",
2043
- "tags": ["Video", "API", "Image to Video"],
2044
- "models": ["ByteDance"],
2045
- "date": "2025-10-6",
2046
- "tutorialUrl": "",
2047
- "OpenSource": false,
2048
- "size": 0,
2049
- "vram": 0
2281
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-control",
2282
+ "tags": ["Video to Video", "Video"],
2283
+ "models": ["Wan2.2", "Wan"],
2284
+ "date": "2025-08-12",
2285
+ "size": 38031935406,
2286
+ "vram": 38031935406,
2287
+ "usage": 305
2050
2288
  },
2051
2289
  {
2052
- "name": "api_bytedance_flf2v",
2053
- "title": "ByteDance: Start End to Video",
2054
- "description": "Generate cinematic video transitions between start and end frames with fluid motion, scene consistency, and professional polish using ByteDance's Seedance model.",
2290
+ "name": "video_wan2_2_14B_fun_camera",
2291
+ "title": "Wan 2.2 14B Fun Camera Control",
2292
+ "description": "Generate videos with camera motion controls including pan, zoom, and rotation using Wan 2.2 Fun Camera Control.",
2055
2293
  "mediaType": "image",
2056
2294
  "mediaSubtype": "webp",
2057
- "tags": ["Video", "API", "FLF2V"],
2058
- "models": ["ByteDance"],
2059
- "date": "2025-10-6",
2060
- "tutorialUrl": "",
2061
- "OpenSource": false,
2062
- "size": 0,
2063
- "vram": 0
2295
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2-2-fun-camera",
2296
+ "tags": ["Video to Video", "Video"],
2297
+ "models": ["Wan2.2", "Wan"],
2298
+ "date": "2025-08-17",
2299
+ "size": 40050570035,
2300
+ "vram": 40050570035,
2301
+ "usage": 228
2064
2302
  },
2065
2303
  {
2066
- "name": "api_topaz_video_enhance",
2067
- "title": "Topaz Video Enhance",
2068
- "description": "Enhance videos with Topaz AI. Supports resolution upscaling using Starlight (Astra) Fast model and frame interpolation with apo-8 model.",
2304
+ "name": "video_wan2_2_5B_ti2v",
2305
+ "title": "Wan 2.2 5B Video Generation",
2306
+ "description": "Fast text-to-video and image-to-video generation with 5B parameters. Optimized for rapid prototyping and creative exploration.",
2307
+ "mediaType": "image",
2308
+ "mediaSubtype": "webp",
2309
+ "tags": ["Text to Video", "Video"],
2310
+ "models": ["Wan2.2", "Wan"],
2311
+ "date": "2025-07-29",
2312
+ "size": 18146236826,
2313
+ "vram": 18146236826,
2314
+ "usage": 392
2315
+ },
2316
+ {
2317
+ "name": "video_humo",
2318
+ "title": "HuMo Video Generation",
2319
+ "description": "Generate videos basic on audio, image, and text, keep the character's lip sync.",
2320
+ "mediaType": "image",
2321
+ "mediaSubtype": "webp",
2322
+ "tags": ["Video"],
2323
+ "models": ["HuMo"],
2324
+ "date": "2025-09-21",
2325
+ "size": 27895812588,
2326
+ "vram": 27895812588,
2327
+ "usage": 424
2328
+ },
2329
+ {
2330
+ "name": "video_wan2_2_5B_fun_inpaint",
2331
+ "title": "Wan 2.2 5B Fun Inpaint",
2332
+ "description": "Efficient video inpainting from start and end frames. 5B model delivers quick iterations for testing workflows.",
2333
+ "mediaType": "image",
2334
+ "mediaSubtype": "webp",
2335
+ "tags": ["Text to Video", "Video"],
2336
+ "models": ["Wan2.2", "Wan"],
2337
+ "date": "2025-07-29",
2338
+ "size": 18146236826,
2339
+ "vram": 18146236826,
2340
+ "usage": 53
2341
+ },
2342
+ {
2343
+ "name": "video_wan2_2_5B_fun_control",
2344
+ "title": "Wan 2.2 5B Fun Control",
2345
+ "description": "Multi-condition video control with pose, depth, and edge guidance. Compact 5B size for experimental development.",
2346
+ "mediaType": "image",
2347
+ "mediaSubtype": "webp",
2348
+ "tags": ["Text to Video", "Video"],
2349
+ "models": ["Wan2.2", "Wan"],
2350
+ "date": "2025-07-29",
2351
+ "size": 18146236826,
2352
+ "vram": 18146236826,
2353
+ "usage": 110
2354
+ },
2355
+ {
2356
+ "name": "video_wan_vace_14B_t2v",
2357
+ "title": "Wan2.1 VACE Text to Video",
2358
+ "description": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
2359
+ "mediaType": "image",
2360
+ "mediaSubtype": "webp",
2361
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
2362
+ "tags": ["Text to Video", "Video"],
2363
+ "models": ["Wan2.1", "Wan"],
2364
+ "date": "2025-05-21",
2365
+ "size": 57756572713,
2366
+ "vram": 57756572713,
2367
+ "usage": 162
2368
+ },
2369
+ {
2370
+ "name": "video_wan_vace_14B_ref2v",
2371
+ "title": "Wan2.1 VACE Reference to Video",
2372
+ "description": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
2373
+ "mediaType": "image",
2374
+ "mediaSubtype": "webp",
2375
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
2376
+ "tags": ["Video", "Image to Video"],
2377
+ "models": ["Wan2.1", "Wan"],
2378
+ "date": "2025-05-21",
2379
+ "size": 57756572713,
2380
+ "vram": 57756572713,
2381
+ "usage": 171
2382
+ },
2383
+ {
2384
+ "name": "video_wan_vace_14B_v2v",
2385
+ "title": "Wan2.1 VACE Control Video",
2386
+ "description": "Generate videos by controlling input videos and reference images using Wan VACE.",
2069
2387
  "mediaType": "image",
2070
2388
  "mediaSubtype": "webp",
2071
2389
  "thumbnailVariant": "compareSlider",
2072
- "tags": ["Video", "API", "Upscale"],
2073
- "models": ["Topaz"],
2074
- "date": "2025-11-25",
2075
- "OpenSource": false,
2076
- "size": 0,
2077
- "vram": 0
2390
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
2391
+ "tags": ["Video to Video", "Video"],
2392
+ "models": ["Wan2.1", "Wan"],
2393
+ "date": "2025-05-21",
2394
+ "size": 57756572713,
2395
+ "vram": 57756572713,
2396
+ "usage": 306
2078
2397
  },
2079
2398
  {
2080
- "name": "api_luma_i2v",
2081
- "title": "Luma: Image to Video",
2082
- "description": "Take static images and instantly create magical high quality animations.",
2399
+ "name": "video_wan_vace_outpainting",
2400
+ "title": "Wan2.1 VACE Outpainting",
2401
+ "description": "Generate extended videos by expanding video size using Wan VACE outpainting.",
2083
2402
  "mediaType": "image",
2084
2403
  "mediaSubtype": "webp",
2085
- "tags": ["Image to Video", "Video", "API"],
2086
- "models": ["Luma"],
2087
- "date": "2025-03-01",
2088
- "tutorialUrl": "",
2089
- "OpenSource": false,
2090
- "size": 0,
2091
- "vram": 0
2404
+ "thumbnailVariant": "compareSlider",
2405
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
2406
+ "tags": ["Outpainting", "Video"],
2407
+ "models": ["Wan2.1", "Wan"],
2408
+ "date": "2025-05-21",
2409
+ "size": 57756572713,
2410
+ "vram": 57756572713,
2411
+ "usage": 117
2092
2412
  },
2093
2413
  {
2094
- "name": "api_luma_t2v",
2095
- "title": "Luma: Text to Video",
2096
- "description": "High-quality videos can be generated using simple prompts.",
2414
+ "name": "video_wan_vace_flf2v",
2415
+ "title": "Wan2.1 VACE First-Last Frame",
2416
+ "description": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
2097
2417
  "mediaType": "image",
2098
2418
  "mediaSubtype": "webp",
2099
- "tags": ["Text to Video", "Video", "API"],
2100
- "models": ["Luma"],
2101
- "date": "2025-03-01",
2102
- "tutorialUrl": "",
2103
- "OpenSource": false,
2104
- "size": 0,
2105
- "vram": 0
2419
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
2420
+ "tags": ["FLF2V", "Video"],
2421
+ "models": ["Wan2.1", "Wan"],
2422
+ "date": "2025-05-21",
2423
+ "size": 57756572713,
2424
+ "vram": 57756572713,
2425
+ "usage": 136
2106
2426
  },
2107
2427
  {
2108
- "name": "api_moonvalley_text_to_video",
2109
- "title": "Moonvalley: Text to Video",
2110
- "description": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.",
2428
+ "name": "video_wan_vace_inpainting",
2429
+ "title": "Wan2.1 VACE Inpainting",
2430
+ "description": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
2111
2431
  "mediaType": "image",
2112
2432
  "mediaSubtype": "webp",
2113
- "tags": ["Text to Video", "Video", "API"],
2114
- "models": ["Moonvalley"],
2115
- "date": "2025-03-01",
2116
- "tutorialUrl": "",
2117
- "OpenSource": false,
2118
- "size": 0,
2119
- "vram": 0
2433
+ "thumbnailVariant": "compareSlider",
2434
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
2435
+ "tags": ["Inpainting", "Video"],
2436
+ "models": ["Wan2.1", "Wan"],
2437
+ "date": "2025-05-21",
2438
+ "size": 57756572713,
2439
+ "vram": 57756572713,
2440
+ "usage": 261
2120
2441
  },
2121
2442
  {
2122
- "name": "api_moonvalley_image_to_video",
2123
- "title": "Moonvalley: Image to Video",
2124
- "description": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.",
2443
+ "name": "video_wan2.1_alpha_t2v_14B",
2444
+ "title": "Wan2.1 Alpha T2V",
2445
+ "description": "Generate text-to-video with alpha channel support for transparent backgrounds and semi-transparent objects.",
2125
2446
  "mediaType": "image",
2126
2447
  "mediaSubtype": "webp",
2127
- "tags": ["Image to Video", "Video", "API"],
2128
- "models": ["Moonvalley"],
2129
- "date": "2025-03-01",
2130
- "tutorialUrl": "",
2131
- "OpenSource": false,
2132
- "size": 0,
2133
- "vram": 0
2448
+ "tags": ["Text to Video", "Video"],
2449
+ "models": ["Wan2.1", "Wan-Move", "Motion Control", "Wan"],
2450
+ "date": "2025-10-06",
2451
+ "size": 22494891213,
2452
+ "vram": 22494891213,
2453
+ "usage": 162
2134
2454
  },
2135
2455
  {
2136
- "name": "api_moonvalley_video_to_video_motion_transfer",
2137
- "title": "Moonvalley: Motion Transfer",
2138
- "description": "Apply motion from one video to another.",
2456
+ "name": "video_wanmove_480p",
2457
+ "title": "Wan-Move Motion-Control Image to Video",
2458
+ "description": "Generate videos from a single image using Wan-Move, with fine-grained point-level motion control via trajectory guidance.",
2139
2459
  "mediaType": "image",
2140
- "thumbnailVariant": "hoverDissolve",
2141
2460
  "mediaSubtype": "webp",
2142
- "tags": ["Video to Video", "Video", "API"],
2143
- "models": ["Moonvalley"],
2144
- "date": "2025-03-01",
2145
- "tutorialUrl": "",
2146
- "OpenSource": false,
2147
- "size": 0,
2148
- "vram": 0
2461
+ "tags": ["Image to Video", "Motion Control", "Video"],
2462
+ "models": ["Wan2.1", "Wan"],
2463
+ "date": "2025-12-15",
2464
+ "size": 25420837683,
2465
+ "vram": 25420837683,
2466
+ "usage": 176
2149
2467
  },
2150
2468
  {
2151
- "name": "api_moonvalley_video_to_video_pose_control",
2152
- "title": "Moonvalley: Pose Control",
2153
- "description": "Apply human pose and movement from one video to another.",
2469
+ "name": "video_wanmove_480p_hallucination",
2470
+ "title": "WanMove: Daydream Illusion",
2471
+ "description": "Use WanMove to generate dynamic images from trajectories and create video dynamic effects with daydream illusion",
2154
2472
  "mediaType": "image",
2473
+ "mediaSubtype": "webp",
2474
+ "tags": ["Image to Video", "Motion Control", "Video"],
2475
+ "models": ["Wan2.1", "Wan"],
2476
+ "date": "2025-12-15",
2477
+ "size": 25420837683,
2478
+ "vram": 25420837683,
2479
+ "usage": 176,
2480
+ "requiresCustomNodes": ["comfyui_fill-nodes"]
2481
+ },
2482
+ {
2483
+ "name": "video_wan_ati",
2484
+ "title": "Wan2.1 ATI",
2485
+ "description": "Trajectory-controlled Video Generation.",
2486
+ "mediaType": "image",
2487
+ "mediaSubtype": "webp",
2155
2488
  "thumbnailVariant": "hoverDissolve",
2489
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
2490
+ "tags": ["Video"],
2491
+ "models": ["Wan2.1", "Wan"],
2492
+ "date": "2025-05-21",
2493
+ "size": 25393994138,
2494
+ "vram": 25393994138,
2495
+ "usage": 81
2496
+ },
2497
+ {
2498
+ "name": "video_wan2.1_fun_camera_v1.1_1.3B",
2499
+ "title": "Wan 2.1 Fun Camera 1.3B",
2500
+ "description": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
2501
+ "mediaType": "image",
2156
2502
  "mediaSubtype": "webp",
2157
- "tags": ["Video to Video", "Video", "API"],
2158
- "models": ["Moonvalley"],
2159
- "date": "2025-03-01",
2160
- "tutorialUrl": "",
2161
- "OpenSource": false,
2162
- "size": 0,
2163
- "vram": 0
2503
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
2504
+ "tags": ["Video"],
2505
+ "models": ["Wan2.1", "Wan"],
2506
+ "date": "2025-04-15",
2507
+ "size": 11489037517,
2508
+ "vram": 11489037517,
2509
+ "usage": 22
2164
2510
  },
2165
2511
  {
2166
- "name": "api_hailuo_minimax_video",
2167
- "title": "MiniMax: Video",
2168
- "description": "Generate high-quality videos from text prompts with optional first-frame control using MiniMax Hailuo-02 model. Supports multiple resolutions (768P/1080P) and durations (6/10s) with intelligent prompt optimization.",
2512
+ "name": "video_wan2.1_fun_camera_v1.1_14B",
2513
+ "title": "Wan 2.1 Fun Camera 14B",
2514
+ "description": "Generate high-quality videos with advanced camera control using the full 14B model",
2169
2515
  "mediaType": "image",
2170
2516
  "mediaSubtype": "webp",
2171
- "tags": ["Text to Video", "Video", "API"],
2172
- "models": ["MiniMax"],
2173
- "date": "2025-03-01",
2174
- "tutorialUrl": "",
2175
- "OpenSource": false,
2176
- "size": 0,
2177
- "vram": 0
2517
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
2518
+ "tags": ["Video"],
2519
+ "models": ["Wan2.1", "Wan"],
2520
+ "date": "2025-04-15",
2521
+ "size": 42047729828,
2522
+ "vram": 42047729828,
2523
+ "usage": 48
2178
2524
  },
2179
2525
  {
2180
- "name": "api_hailuo_minimax_t2v",
2181
- "title": "MiniMax: Text to Video",
2182
- "description": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
2526
+ "name": "text_to_video_wan",
2527
+ "title": "Wan 2.1 Text to Video",
2528
+ "description": "Generate videos from text prompts using Wan 2.1.",
2183
2529
  "mediaType": "image",
2184
2530
  "mediaSubtype": "webp",
2185
- "tags": ["Text to Video", "Video", "API"],
2186
- "models": ["MiniMax"],
2531
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
2532
+ "tags": ["Text to Video", "Video"],
2533
+ "models": ["Wan2.1", "Wan"],
2187
2534
  "date": "2025-03-01",
2188
- "tutorialUrl": "",
2189
- "OpenSource": false,
2190
- "size": 0,
2191
- "vram": 0
2535
+ "size": 9824737690,
2536
+ "vram": 9824737690,
2537
+ "usage": 119
2192
2538
  },
2193
2539
  {
2194
- "name": "api_hailuo_minimax_i2v",
2195
- "title": "MiniMax: Image to Video",
2196
- "description": "Generate refined videos from images and text with CGI integration using MiniMax.",
2540
+ "name": "image_to_video_wan",
2541
+ "title": "Wan 2.1 Image to Video",
2542
+ "description": "Generate videos from images using Wan 2.1.",
2197
2543
  "mediaType": "image",
2198
2544
  "mediaSubtype": "webp",
2199
- "tags": ["Image to Video", "Video", "API"],
2200
- "models": ["MiniMax"],
2545
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
2546
+ "tags": ["Text to Video", "Video"],
2547
+ "models": ["Wan2.1", "Wan"],
2201
2548
  "date": "2025-03-01",
2202
- "tutorialUrl": "",
2203
- "OpenSource": false,
2204
- "size": 0,
2205
- "vram": 0
2549
+ "size": 41049149932,
2550
+ "vram": 41049149932,
2551
+ "usage": 143
2206
2552
  },
2207
2553
  {
2208
- "name": "api_pixverse_i2v",
2209
- "title": "PixVerse: Image to Video",
2210
- "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
2554
+ "name": "wan2.1_fun_inp",
2555
+ "title": "Wan 2.1 Inpainting",
2556
+ "description": "Generate videos from start and end frames using Wan 2.1 inpainting.",
2211
2557
  "mediaType": "image",
2212
2558
  "mediaSubtype": "webp",
2213
- "tags": ["Image to Video", "Video", "API"],
2214
- "models": ["PixVerse"],
2215
- "date": "2025-03-01",
2216
- "tutorialUrl": "",
2217
- "OpenSource": false,
2218
- "size": 0,
2219
- "vram": 0
2559
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
2560
+ "tags": ["Inpainting", "Video"],
2561
+ "models": ["Wan2.1", "Wan"],
2562
+ "date": "2025-04-15",
2563
+ "size": 11381663334,
2564
+ "vram": 11381663334,
2565
+ "usage": 13
2220
2566
  },
2221
2567
  {
2222
- "name": "api_pixverse_template_i2v",
2223
- "title": "PixVerse Templates: Image to Video",
2224
- "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
2568
+ "name": "wan2.1_fun_control",
2569
+ "title": "Wan 2.1 ControlNet",
2570
+ "description": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
2225
2571
  "mediaType": "image",
2226
2572
  "mediaSubtype": "webp",
2227
- "tags": ["Image to Video", "Video", "API"],
2228
- "models": ["PixVerse"],
2573
+ "thumbnailVariant": "hoverDissolve",
2574
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
2575
+ "tags": ["Video to Video", "Video"],
2576
+ "models": ["Wan2.1", "Wan"],
2577
+ "date": "2025-04-15",
2578
+ "size": 11381663334,
2579
+ "vram": 11381663334,
2580
+ "usage": 115
2581
+ },
2582
+ {
2583
+ "name": "wan2.1_flf2v_720_f16",
2584
+ "title": "Wan 2.1 FLF2V 720p F16",
2585
+ "description": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
2586
+ "mediaType": "image",
2587
+ "mediaSubtype": "webp",
2588
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
2589
+ "tags": ["FLF2V", "Video"],
2590
+ "models": ["Wan2.1", "Wan"],
2591
+ "date": "2025-04-15",
2592
+ "size": 41049149932,
2593
+ "vram": 41049149932,
2594
+ "usage": 43
2595
+ },
2596
+ {
2597
+ "name": "ltxv_text_to_video",
2598
+ "title": "LTXV Text to Video",
2599
+ "mediaType": "image",
2600
+ "mediaSubtype": "webp",
2601
+ "description": "Generate videos from text prompts.",
2602
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
2603
+ "tags": ["Text to Video", "Video"],
2604
+ "models": ["LTXV"],
2229
2605
  "date": "2025-03-01",
2230
- "tutorialUrl": "",
2231
- "OpenSource": false,
2232
- "size": 0,
2233
- "vram": 0
2606
+ "size": 19155554140,
2607
+ "vram": 19155554140,
2608
+ "usage": 68
2234
2609
  },
2235
2610
  {
2236
- "name": "api_pixverse_t2v",
2237
- "title": "PixVerse: Text to Video",
2238
- "description": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
2611
+ "name": "ltxv_image_to_video",
2612
+ "title": "LTXV Image to Video",
2239
2613
  "mediaType": "image",
2240
2614
  "mediaSubtype": "webp",
2241
- "tags": ["Text to Video", "Video", "API"],
2242
- "models": ["PixVerse"],
2615
+ "description": "Generate videos from still images.",
2616
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
2617
+ "tags": ["Image to Video", "Video"],
2618
+ "models": ["LTXV"],
2243
2619
  "date": "2025-03-01",
2244
- "tutorialUrl": "",
2245
- "OpenSource": false,
2246
- "size": 0,
2247
- "vram": 0
2620
+ "size": 19155554140,
2621
+ "vram": 19155554140,
2622
+ "usage": 108
2248
2623
  },
2249
2624
  {
2250
- "name": "api_runway_gen3a_turbo_image_to_video",
2251
- "title": "Runway: Gen3a Turbo Image to Video",
2252
- "description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
2625
+ "name": "hunyuan_video_text_to_video",
2626
+ "title": "Hunyuan Video Text to Video",
2253
2627
  "mediaType": "image",
2254
2628
  "mediaSubtype": "webp",
2255
- "tags": ["Image to Video", "Video", "API"],
2256
- "models": ["Runway"],
2629
+ "description": "Generate videos from text prompts using Hunyuan model.",
2630
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
2631
+ "tags": ["Text to Video", "Video"],
2632
+ "models": ["Hunyuan Video", "Tencent"],
2257
2633
  "date": "2025-03-01",
2258
- "tutorialUrl": "",
2259
- "OpenSource": false,
2260
- "size": 0,
2261
- "vram": 0
2634
+ "size": 35476429865,
2635
+ "vram": 35476429865,
2636
+ "usage": 52
2262
2637
  },
2263
2638
  {
2264
- "name": "api_runway_gen4_turo_image_to_video",
2265
- "title": "Runway: Gen4 Turbo Image to Video",
2266
- "description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
2639
+ "name": "txt_to_image_to_video",
2640
+ "title": "SVD Text to Image to Video",
2267
2641
  "mediaType": "image",
2268
2642
  "mediaSubtype": "webp",
2269
- "tags": ["Image to Video", "Video", "API"],
2270
- "models": ["Runway"],
2643
+ "description": "Generate videos by first creating images from text prompts.",
2644
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
2645
+ "tags": ["Text to Video", "Video"],
2646
+ "models": ["SVD", "Stability"],
2647
+ "date": "2025-03-01",
2648
+ "size": 16492674417,
2649
+ "vram": 16492674417,
2650
+ "usage": 14
2651
+ }
2652
+ ]
2653
+ },
2654
+ {
2655
+ "moduleName": "default",
2656
+ "category": "GENERATION TYPE",
2657
+ "icon": "icon-[lucide--volume-2]",
2658
+ "title": "Audio",
2659
+ "type": "audio",
2660
+ "templates": [
2661
+ {
2662
+ "name": "api_stability_ai_text_to_audio",
2663
+ "title": "Stability AI: Text to Audio",
2664
+ "description": "Generate music from text using Stable Audio 2.5. Create minutes-long tracks in seconds.",
2665
+ "mediaType": "audio",
2666
+ "mediaSubtype": "mp3",
2667
+ "tags": ["Text to Audio", "Audio", "API"],
2668
+ "date": "2025-09-09",
2669
+ "models": ["Stability", "Stable Audio"],
2670
+ "openSource": false,
2671
+ "size": 0,
2672
+ "vram": 0,
2673
+ "usage": 119
2674
+ },
2675
+ {
2676
+ "name": "api_stability_ai_audio_to_audio",
2677
+ "title": "Stability AI: Audio to Audio",
2678
+ "description": "Transform audio into new compositions using Stable Audio 2.5. Upload audio and AI creates complete tracks.",
2679
+ "mediaType": "audio",
2680
+ "mediaSubtype": "mp3",
2681
+ "tags": ["Audio to Audio", "Audio", "API"],
2682
+ "date": "2025-09-09",
2683
+ "models": ["Stability", "Stable Audio"],
2684
+ "openSource": false,
2685
+ "size": 0,
2686
+ "vram": 0,
2687
+ "usage": 67
2688
+ },
2689
+ {
2690
+ "name": "api_stability_ai_audio_inpaint",
2691
+ "title": "Stability AI: Audio Inpainting",
2692
+ "description": "Complete or extend audio tracks using Stable Audio 2.5. Upload audio and AI generates the rest.",
2693
+ "mediaType": "audio",
2694
+ "mediaSubtype": "mp3",
2695
+ "tags": ["Audio to Audio", "Audio", "API"],
2696
+ "date": "2025-09-09",
2697
+ "models": ["Stability", "Stable Audio"],
2698
+ "openSource": false,
2699
+ "size": 0,
2700
+ "vram": 0,
2701
+ "usage": 17
2702
+ },
2703
+ {
2704
+ "name": "audio_stable_audio_example",
2705
+ "title": "Stable Audio",
2706
+ "mediaType": "audio",
2707
+ "mediaSubtype": "mp3",
2708
+ "description": "Generate audio from text prompts using Stable Audio.",
2709
+ "tags": ["Text to Audio", "Audio"],
2710
+ "models": ["Stable Audio", "Stability"],
2711
+ "date": "2025-03-01",
2712
+ "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/",
2713
+ "size": 5744518758,
2714
+ "vram": 5744518758,
2715
+ "usage": 270
2716
+ },
2717
+ {
2718
+ "name": "audio_ace_step_1_t2a_instrumentals",
2719
+ "title": "ACE-Step v1 Text to Instrumentals Music",
2720
+ "mediaType": "audio",
2721
+ "mediaSubtype": "mp3",
2722
+ "description": "Generate instrumental music from text prompts using ACE-Step v1.",
2723
+ "tags": ["Text to Audio", "Audio"],
2724
+ "models": ["ACE-Step"],
2725
+ "date": "2025-03-01",
2726
+ "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
2727
+ "size": 7698728878,
2728
+ "vram": 7698728878,
2729
+ "usage": 139
2730
+ },
2731
+ {
2732
+ "name": "audio_ace_step_1_t2a_song",
2733
+ "title": "ACE Step v1 Text to Song",
2734
+ "mediaType": "audio",
2735
+ "mediaSubtype": "mp3",
2736
+ "description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
2737
+ "tags": ["Text to Audio", "Audio"],
2738
+ "models": ["ACE-Step"],
2271
2739
  "date": "2025-03-01",
2272
- "tutorialUrl": "",
2273
- "OpenSource": false,
2274
- "size": 0,
2275
- "vram": 0
2740
+ "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
2741
+ "size": 7698728878,
2742
+ "vram": 7698728878,
2743
+ "usage": 123
2276
2744
  },
2277
2745
  {
2278
- "name": "api_runway_first_last_frame",
2279
- "title": "Runway: First Last Frame to Video",
2280
- "description": "Generate smooth video transitions between two keyframes with Runway's precision.",
2746
+ "name": "audio_ace_step_1_m2m_editing",
2747
+ "title": "ACE Step v1 M2M Editing",
2748
+ "mediaType": "audio",
2749
+ "mediaSubtype": "mp3",
2750
+ "description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
2751
+ "tags": ["Audio Editing", "Audio"],
2752
+ "models": ["ACE-Step"],
2753
+ "date": "2025-03-01",
2754
+ "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
2755
+ "size": 7698728878,
2756
+ "vram": 7698728878,
2757
+ "usage": 138
2758
+ }
2759
+ ]
2760
+ },
2761
+ {
2762
+ "moduleName": "default",
2763
+ "category": "GENERATION TYPE",
2764
+ "icon": "icon-[lucide--box]",
2765
+ "title": "3D Model",
2766
+ "type": "3d",
2767
+ "templates": [
2768
+ {
2769
+ "name": "api_tripo3_0_image_to_model",
2770
+ "title": "Tripo3.0: Image to Model",
2771
+ "description": "Transform images or sketches into 3D models with Tripo 3.0's sharp geometry and production-ready PBR textures.",
2281
2772
  "mediaType": "image",
2282
2773
  "mediaSubtype": "webp",
2283
- "tags": ["Video", "API", "FLF2V"],
2284
- "models": ["Runway"],
2285
- "date": "2025-03-01",
2286
- "tutorialUrl": "",
2287
- "OpenSource": false,
2774
+ "tags": ["Image to Model", "3D", "API"],
2775
+ "models": ["Tripo"],
2776
+ "date": "2025-12-23",
2777
+ "openSource": false,
2288
2778
  "size": 0,
2289
2779
  "vram": 0
2290
2780
  },
2291
2781
  {
2292
- "name": "api_pika_i2v",
2293
- "title": "Pika: Image to Video",
2294
- "description": "Generate smooth animated videos from single static images using Pika AI.",
2782
+ "name": "api_tripo3_0_text_to_model",
2783
+ "title": "Tripo3.0: Text to Model",
2784
+ "description": "Generate precise 3D models from text with Tripo 3.0's ultra-high resolution geometry and realistic PBR materials.",
2295
2785
  "mediaType": "image",
2296
2786
  "mediaSubtype": "webp",
2297
- "tags": ["Image to Video", "Video", "API"],
2298
- "models": ["Pika"],
2299
- "date": "2025-03-01",
2300
- "tutorialUrl": "",
2301
- "OpenSource": false,
2787
+ "tags": ["Text to Model", "3D", "API"],
2788
+ "models": ["Tripo"],
2789
+ "date": "2025-12-23",
2790
+ "openSource": false,
2302
2791
  "size": 0,
2303
2792
  "vram": 0
2304
2793
  },
2305
2794
  {
2306
- "name": "api_pika_scene",
2307
- "title": "Pika Scenes: Images to Video",
2308
- "description": "Generate videos that incorporate multiple input images using Pika Scenes.",
2795
+ "name": "api_tripo_text_to_model",
2796
+ "title": "Tripo: Text to Model",
2797
+ "description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
2309
2798
  "mediaType": "image",
2310
2799
  "mediaSubtype": "webp",
2311
- "tags": ["Image to Video", "Video", "API"],
2312
- "models": ["Pika"],
2800
+ "tags": ["Text to Model", "3D", "API"],
2801
+ "models": ["Tripo"],
2313
2802
  "date": "2025-03-01",
2314
2803
  "tutorialUrl": "",
2315
- "OpenSource": false,
2804
+ "openSource": false,
2316
2805
  "size": 0,
2317
- "vram": 0
2806
+ "vram": 0,
2807
+ "usage": 48
2318
2808
  },
2319
2809
  {
2320
- "name": "api_veo2_i2v",
2321
- "title": "Veo2: Image to Video",
2322
- "description": "Generate videos from images using Google Veo2 API.",
2810
+ "name": "api_tripo_image_to_model",
2811
+ "title": "Tripo: Image to Model",
2812
+ "description": "Generate professional 3D assets from 2D images using Tripo engine.",
2323
2813
  "mediaType": "image",
2324
2814
  "mediaSubtype": "webp",
2325
- "tags": ["Image to Video", "Video", "API"],
2326
- "models": ["Veo", "Google"],
2815
+ "tags": ["Image to 3D", "3D", "API"],
2816
+ "models": ["Tripo"],
2327
2817
  "date": "2025-03-01",
2328
2818
  "tutorialUrl": "",
2329
- "OpenSource": false,
2819
+ "openSource": false,
2330
2820
  "size": 0,
2331
- "vram": 0
2821
+ "vram": 0,
2822
+ "usage": 50
2332
2823
  },
2333
2824
  {
2334
- "name": "api_veo3",
2335
- "title": "Veo3: Image to Video",
2336
- "description": "Generate high-quality 8-second videos from text prompts or images using Google's advanced Veo 3 API. Features audio generation, prompt enhancement, and dual model options for speed or quality.",
2825
+ "name": "api_tripo_multiview_to_model",
2826
+ "title": "Tripo: Multiview to Model",
2827
+ "description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
2337
2828
  "mediaType": "image",
2338
2829
  "mediaSubtype": "webp",
2339
- "tags": ["Image to Video", "Text to Video", "API"],
2340
- "models": ["Veo", "Google"],
2830
+ "tags": ["Image to 3D", "3D", "API"],
2831
+ "models": ["Tripo"],
2341
2832
  "date": "2025-03-01",
2342
2833
  "tutorialUrl": "",
2343
- "OpenSource": false,
2834
+ "openSource": false,
2344
2835
  "size": 0,
2345
- "vram": 0
2346
- }
2347
- ]
2348
- },
2349
- {
2350
- "moduleName": "default",
2351
- "category": "CLOSED SOURCE MODELS",
2352
- "title": "3D API",
2353
- "icon": "icon-[lucide--box]",
2354
- "type": "image",
2355
- "templates": [
2836
+ "vram": 0,
2837
+ "usage": 70
2838
+ },
2356
2839
  {
2357
2840
  "name": "api_rodin_gen2",
2358
2841
  "title": "Rodin: Gen-2 Image to Model",
@@ -2363,9 +2846,10 @@
2363
2846
  "models": ["Rodin"],
2364
2847
  "date": "2025-09-27",
2365
2848
  "tutorialUrl": "",
2366
- "OpenSource": false,
2849
+ "openSource": false,
2367
2850
  "size": 0,
2368
- "vram": 0
2851
+ "vram": 0,
2852
+ "usage": 355
2369
2853
  },
2370
2854
  {
2371
2855
  "name": "api_rodin_image_to_model",
@@ -2377,9 +2861,10 @@
2377
2861
  "models": ["Rodin"],
2378
2862
  "date": "2025-03-01",
2379
2863
  "tutorialUrl": "",
2380
- "OpenSource": false,
2864
+ "openSource": false,
2381
2865
  "size": 0,
2382
- "vram": 0
2866
+ "vram": 0,
2867
+ "usage": 25
2383
2868
  },
2384
2869
  {
2385
2870
  "name": "api_rodin_multiview_to_model",
@@ -2391,106 +2876,75 @@
2391
2876
  "models": ["Rodin"],
2392
2877
  "date": "2025-03-01",
2393
2878
  "tutorialUrl": "",
2394
- "OpenSource": false,
2879
+ "openSource": false,
2395
2880
  "size": 0,
2396
- "vram": 0
2881
+ "vram": 0,
2882
+ "usage": 47
2397
2883
  },
2398
2884
  {
2399
- "name": "api_tripo_text_to_model",
2400
- "title": "Tripo: Text to Model",
2401
- "description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
2885
+ "name": "3d_hunyuan3d-v2.1",
2886
+ "title": "Hunyuan3D 2.1",
2402
2887
  "mediaType": "image",
2403
2888
  "mediaSubtype": "webp",
2404
- "tags": ["Text to Model", "3D", "API"],
2405
- "models": ["Tripo"],
2889
+ "description": "Generate 3D models from single images using Hunyuan3D 2.1.",
2890
+ "tags": ["Image to 3D", "3D"],
2891
+ "models": ["Hunyuan3D", "Tencent"],
2406
2892
  "date": "2025-03-01",
2407
2893
  "tutorialUrl": "",
2408
- "OpenSource": false,
2409
- "size": 0,
2410
- "vram": 0
2894
+ "size": 4928474972,
2895
+ "vram": 4928474972,
2896
+ "usage": 384
2411
2897
  },
2412
2898
  {
2413
- "name": "api_tripo_image_to_model",
2414
- "title": "Tripo: Image to Model",
2415
- "description": "Generate professional 3D assets from 2D images using Tripo engine.",
2899
+ "name": "3d_hunyuan3d_image_to_model",
2900
+ "title": "Hunyuan3D 2.0",
2416
2901
  "mediaType": "image",
2417
2902
  "mediaSubtype": "webp",
2418
- "tags": ["Image to 3D", "3D", "API"],
2419
- "models": ["Tripo"],
2903
+ "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
2904
+ "tags": ["Image to 3D", "3D"],
2905
+ "models": ["Hunyuan3D", "Tencent"],
2420
2906
  "date": "2025-03-01",
2421
2907
  "tutorialUrl": "",
2422
- "OpenSource": false,
2423
- "size": 0,
2424
- "vram": 0
2908
+ "size": 4928474972,
2909
+ "vram": 4928474972,
2910
+ "usage": 69
2425
2911
  },
2426
2912
  {
2427
- "name": "api_tripo_multiview_to_model",
2428
- "title": "Tripo: Multiview to Model",
2429
- "description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
2913
+ "name": "3d_hunyuan3d_multiview_to_model",
2914
+ "title": "Hunyuan3D 2.0 MV",
2430
2915
  "mediaType": "image",
2431
2916
  "mediaSubtype": "webp",
2432
- "tags": ["Image to 3D", "3D", "API"],
2433
- "models": ["Tripo"],
2917
+ "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
2918
+ "tags": ["3D", "Image to 3D"],
2919
+ "models": ["Hunyuan3D", "Tencent"],
2434
2920
  "date": "2025-03-01",
2435
2921
  "tutorialUrl": "",
2436
- "OpenSource": false,
2437
- "size": 0,
2438
- "vram": 0
2439
- }
2440
- ]
2441
- },
2442
- {
2443
- "moduleName": "default",
2444
- "category": "CLOSED SOURCE MODELS",
2445
- "title": "Audio API",
2446
- "type": "audio",
2447
- "icon": "icon-[lucide--volume-2]",
2448
- "templates": [
2449
- {
2450
- "name": "api_stability_ai_text_to_audio",
2451
- "title": "Stability AI: Text to Audio",
2452
- "description": "Generate music from text using Stable Audio 2.5. Create minutes-long tracks in seconds.",
2453
- "mediaType": "audio",
2454
- "mediaSubtype": "mp3",
2455
- "tags": ["Text to Audio", "Audio", "API"],
2456
- "date": "2025-09-09",
2457
- "models": ["Stability", "Stable Audio"],
2458
- "OpenSource": false,
2459
- "size": 0,
2460
- "vram": 0
2461
- },
2462
- {
2463
- "name": "api_stability_ai_audio_to_audio",
2464
- "title": "Stability AI: Audio to Audio",
2465
- "description": "Transform audio into new compositions using Stable Audio 2.5. Upload audio and AI creates complete tracks.",
2466
- "mediaType": "audio",
2467
- "mediaSubtype": "mp3",
2468
- "tags": ["Audio to Audio", "Audio", "API"],
2469
- "date": "2025-09-09",
2470
- "models": ["Stability", "Stable Audio"],
2471
- "OpenSource": false,
2472
- "size": 0,
2473
- "vram": 0
2922
+ "thumbnailVariant": "hoverDissolve",
2923
+ "size": 4928474972,
2924
+ "vram": 4928474972,
2925
+ "usage": 97
2474
2926
  },
2475
2927
  {
2476
- "name": "api_stability_ai_audio_inpaint",
2477
- "title": "Stability AI: Audio Inpainting",
2478
- "description": "Complete or extend audio tracks using Stable Audio 2.5. Upload audio and AI generates the rest.",
2479
- "mediaType": "audio",
2480
- "mediaSubtype": "mp3",
2481
- "tags": ["Audio to Audio", "Audio", "API"],
2482
- "date": "2025-09-09",
2483
- "models": ["Stability", "Stable Audio"],
2484
- "OpenSource": false,
2485
- "size": 0,
2486
- "vram": 0
2928
+ "name": "3d_hunyuan3d_multiview_to_model_turbo",
2929
+ "title": "Hunyuan3D 2.0 MV Turbo",
2930
+ "mediaType": "image",
2931
+ "mediaSubtype": "webp",
2932
+ "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
2933
+ "tags": ["Image to 3D", "3D"],
2934
+ "models": ["Hunyuan3D", "Tencent"],
2935
+ "date": "2025-03-01",
2936
+ "tutorialUrl": "",
2937
+ "thumbnailVariant": "hoverDissolve",
2938
+ "size": 4928474972,
2939
+ "vram": 4928474972,
2940
+ "usage": 38
2487
2941
  }
2488
2942
  ]
2489
2943
  },
2490
2944
  {
2491
2945
  "moduleName": "default",
2492
- "category": "CLOSED SOURCE MODELS",
2493
- "title": "LLM API",
2946
+ "category": "GENERATION TYPE",
2947
+ "title": "LLM",
2494
2948
  "icon": "icon-[lucide--message-square-text]",
2495
2949
  "type": "image",
2496
2950
  "templates": [
@@ -2504,9 +2958,10 @@
2504
2958
  "models": ["OpenAI"],
2505
2959
  "date": "2025-03-01",
2506
2960
  "tutorialUrl": "",
2507
- "OpenSource": false,
2961
+ "openSource": false,
2508
2962
  "size": 0,
2509
- "vram": 0
2963
+ "vram": 0,
2964
+ "usage": 35
2510
2965
  },
2511
2966
  {
2512
2967
  "name": "api_google_gemini",
@@ -2518,10 +2973,145 @@
2518
2973
  "models": ["Google Gemini", "Google"],
2519
2974
  "date": "2025-03-01",
2520
2975
  "tutorialUrl": "",
2521
- "OpenSource": false,
2976
+ "openSource": false,
2977
+ "size": 0,
2978
+ "vram": 0,
2979
+ "usage": 130
2980
+ }
2981
+ ]
2982
+ },
2983
+ {
2984
+ "moduleName": "default",
2985
+ "isEssential": true,
2986
+ "title": "Getting Started",
2987
+ "type": "image",
2988
+ "templates": [
2989
+ {
2990
+ "name": "gsc_starter_1",
2991
+ "title": "Starter 1 – Text to Image",
2992
+ "mediaType": "image",
2993
+ "mediaSubtype": "webp",
2994
+ "description": "Learn how to generate an image, connect nodes, run a workflow and download an image using Z-Image Turbo.",
2995
+ "models": ["Z-Image-Turbo"],
2996
+ "date": "2025-12-10",
2997
+ "searchRank": 3,
2998
+ "includeOnDistributions": ["cloud"],
2522
2999
  "size": 0,
2523
3000
  "vram": 0
3001
+ },
3002
+ {
3003
+ "name": "gsc_starter_2",
3004
+ "title": "Starter 2 – Image to Video",
3005
+ "mediaType": "image",
3006
+ "mediaSubtype": "webp",
3007
+ "description": "Learn how to load images, generate a video and how to find a node using Wan 2.2.",
3008
+ "models": ["Wan2.2", "Wan"],
3009
+ "date": "2025-12-10",
3010
+ "searchRank": 3,
3011
+ "includeOnDistributions": ["cloud"],
3012
+ "size": 0,
3013
+ "vram": 0,
3014
+ "requiresCustomNodes": ["comfyui_essentials"]
3015
+ },
3016
+ {
3017
+ "name": "gsc_starter_3",
3018
+ "title": "Starter 3 – Product Photography",
3019
+ "mediaType": "image",
3020
+ "mediaSubtype": "webp",
3021
+ "description": "Learn how to create a product photography with image inputs, enter a subgraph, unbypass a node and get to know partner nodes using Nano Banana Pro.",
3022
+ "models": ["Nano Banana Pro", "Google"],
3023
+ "date": "2025-12-10",
3024
+ "searchRank": 3,
3025
+ "includeOnDistributions": ["cloud"],
3026
+ "size": 0,
3027
+ "vram": 0,
3028
+ "requiresCustomNodes": ["comfyui-kjnodes", "comfyui_essentials"]
3029
+ },
3030
+ {
3031
+ "name": "01_get_started_text_to_image",
3032
+ "title": "Text to Image (New)",
3033
+ "mediaType": "image",
3034
+ "mediaSubtype": "webp",
3035
+ "description": "Generate images from text prompts using the Z-Image-Turbo model.",
3036
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/z-image/z-image-turbo",
3037
+ "tags": ["Text to Image", "Image"],
3038
+ "models": ["Z-Image-Turbo"],
3039
+ "date": "2025-10-17",
3040
+ "size": 20862803640,
3041
+ "vram": 20862803640,
3042
+ "usage": 299
3043
+ },
3044
+ {
3045
+ "name": "02_qwen_Image_edit_subgraphed",
3046
+ "title": "Image Editing (New)",
3047
+ "mediaType": "image",
3048
+ "mediaSubtype": "webp",
3049
+ "description": "Edit your images with Qwen-Image-Edit, the latest OSS model",
3050
+ "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image-edit",
3051
+ "tags": ["Image to Image", "Image Edit", "ControlNet"],
3052
+ "models": ["Qwen-Image-Edit"],
3053
+ "date": "2025-10-17",
3054
+ "size": 31772020572,
3055
+ "vram": 31772020572,
3056
+ "usage": 6436
3057
+ },
3058
+ {
3059
+ "name": "03_video_wan2_2_14B_i2v_subgraphed",
3060
+ "title": "Image to Video (New)",
3061
+ "description": "Generate videos from an input image using Wan2.2 14B",
3062
+ "mediaType": "image",
3063
+ "mediaSubtype": "webp",
3064
+ "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
3065
+ "tags": ["Image to Video", "Video"],
3066
+ "models": ["Wan2.2", "Wan"],
3067
+ "date": "2025-10-17",
3068
+ "size": 38031935406,
3069
+ "vram": 38031935406,
3070
+ "usage": 4084
3071
+ },
3072
+ {
3073
+ "name": "04_hunyuan_3d_2.1_subgraphed",
3074
+ "title": "Image to 3D (New)",
3075
+ "mediaType": "image",
3076
+ "mediaSubtype": "webp",
3077
+ "description": "Generate 3D models from single images using Hunyuan3D 2.1.",
3078
+ "tags": ["Image to 3D", "3D"],
3079
+ "models": ["Hunyuan3D"],
3080
+ "date": "2025-10-17",
3081
+ "tutorialUrl": "https://docs.comfy.org/tutorials/3d/hunyuan3D-2",
3082
+ "size": 4928474972,
3083
+ "vram": 4928474972,
3084
+ "usage": 152
3085
+ },
3086
+ {
3087
+ "name": "05_audio_ace_step_1_t2a_song_subgraphed",
3088
+ "title": "Text to Song (New)",
3089
+ "mediaType": "image",
3090
+ "mediaSubtype": "webp",
3091
+ "description": "Generate songs from text prompts using ACE-Step v1",
3092
+ "tags": ["Text to Audio", "Audio"],
3093
+ "models": ["ACE-Step"],
3094
+ "date": "2025-10-17",
3095
+ "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1",
3096
+ "size": 7698728878,
3097
+ "vram": 7698728878,
3098
+ "usage": 101
3099
+ },
3100
+ {
3101
+ "name": "default",
3102
+ "title": "Image Generation",
3103
+ "mediaType": "image",
3104
+ "mediaSubtype": "webp",
3105
+ "description": "Generate images from text prompts.",
3106
+ "tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
3107
+ "tags": ["Text-to-Image", "Image"],
3108
+ "models": ["SD1.5", "Stability"],
3109
+ "date": "2025-03-01",
3110
+ "size": 2136746230,
3111
+ "vram": 3092376453,
3112
+ "status": "active",
3113
+ "usage": 168
2524
3114
  }
2525
3115
  ]
2526
3116
  }
2527
- ]
3117
+ ]