comfyui-workflow-templates 0.1.32__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (298) hide show
  1. comfyui_workflow_templates/__init__.py +32 -11
  2. comfyui_workflow_templates-0.6.0.dist-info/METADATA +434 -0
  3. comfyui_workflow_templates-0.6.0.dist-info/RECORD +6 -0
  4. comfyui_workflow_templates/templates/2_pass_pose_worship-1.webp +0 -0
  5. comfyui_workflow_templates/templates/2_pass_pose_worship-2.webp +0 -0
  6. comfyui_workflow_templates/templates/2_pass_pose_worship.json +0 -844
  7. comfyui_workflow_templates/templates/3d_hunyuan3d_image_to_model-1.webp +0 -0
  8. comfyui_workflow_templates/templates/3d_hunyuan3d_image_to_model.json +0 -757
  9. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model-1.webp +0 -0
  10. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model-2.webp +0 -0
  11. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model.json +0 -1067
  12. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo-1.webp +0 -0
  13. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo-2.webp +0 -0
  14. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo.json +0 -1115
  15. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_max_image-1.webp +0 -0
  16. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_max_image-2.webp +0 -0
  17. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_max_image.json +0 -343
  18. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_multiple_images_input-1.webp +0 -0
  19. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_multiple_images_input-2.webp +0 -0
  20. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_multiple_images_input.json +0 -470
  21. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_pro_image-1.webp +0 -0
  22. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_pro_image-2.webp +0 -0
  23. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_pro_image.json +0 -343
  24. comfyui_workflow_templates/templates/api_bfl_flux_pro_t2i-1.webp +0 -0
  25. comfyui_workflow_templates/templates/api_bfl_flux_pro_t2i.json +0 -219
  26. comfyui_workflow_templates/templates/api_google_gemini-1.webp +0 -0
  27. comfyui_workflow_templates/templates/api_google_gemini.json +0 -597
  28. comfyui_workflow_templates/templates/api_hailuo_minimax_i2v-1.webp +0 -0
  29. comfyui_workflow_templates/templates/api_hailuo_minimax_i2v.json +0 -164
  30. comfyui_workflow_templates/templates/api_hailuo_minimax_t2v-1.webp +0 -0
  31. comfyui_workflow_templates/templates/api_hailuo_minimax_t2v.json +0 -138
  32. comfyui_workflow_templates/templates/api_ideogram_v3_t2i-1.webp +0 -0
  33. comfyui_workflow_templates/templates/api_ideogram_v3_t2i.json +0 -212
  34. comfyui_workflow_templates/templates/api_kling_effects-1.webp +0 -0
  35. comfyui_workflow_templates/templates/api_kling_effects.json +0 -188
  36. comfyui_workflow_templates/templates/api_kling_flf-1.webp +0 -0
  37. comfyui_workflow_templates/templates/api_kling_flf.json +0 -246
  38. comfyui_workflow_templates/templates/api_kling_i2v-1.webp +0 -0
  39. comfyui_workflow_templates/templates/api_kling_i2v.json +0 -184
  40. comfyui_workflow_templates/templates/api_luma_i2v-1.webp +0 -0
  41. comfyui_workflow_templates/templates/api_luma_i2v.json +0 -351
  42. comfyui_workflow_templates/templates/api_luma_photon_i2i-1.webp +0 -0
  43. comfyui_workflow_templates/templates/api_luma_photon_i2i-2.webp +0 -0
  44. comfyui_workflow_templates/templates/api_luma_photon_i2i.json +0 -185
  45. comfyui_workflow_templates/templates/api_luma_photon_style_ref-1.webp +0 -0
  46. comfyui_workflow_templates/templates/api_luma_photon_style_ref-2.webp +0 -0
  47. comfyui_workflow_templates/templates/api_luma_photon_style_ref.json +0 -974
  48. comfyui_workflow_templates/templates/api_luma_t2v-1.webp +0 -0
  49. comfyui_workflow_templates/templates/api_luma_t2v.json +0 -246
  50. comfyui_workflow_templates/templates/api_openai_chat-1.webp +0 -0
  51. comfyui_workflow_templates/templates/api_openai_chat.json +0 -585
  52. comfyui_workflow_templates/templates/api_openai_dall_e_2_inpaint-1.webp +0 -0
  53. comfyui_workflow_templates/templates/api_openai_dall_e_2_inpaint-2.webp +0 -0
  54. comfyui_workflow_templates/templates/api_openai_dall_e_2_inpaint.json +0 -215
  55. comfyui_workflow_templates/templates/api_openai_dall_e_2_t2i-1.webp +0 -0
  56. comfyui_workflow_templates/templates/api_openai_dall_e_2_t2i.json +0 -160
  57. comfyui_workflow_templates/templates/api_openai_dall_e_3_t2i-1.webp +0 -0
  58. comfyui_workflow_templates/templates/api_openai_dall_e_3_t2i.json +0 -148
  59. comfyui_workflow_templates/templates/api_openai_image_1_i2i-1.webp +0 -0
  60. comfyui_workflow_templates/templates/api_openai_image_1_i2i-2.webp +0 -0
  61. comfyui_workflow_templates/templates/api_openai_image_1_i2i.json +0 -207
  62. comfyui_workflow_templates/templates/api_openai_image_1_inpaint-1.webp +0 -0
  63. comfyui_workflow_templates/templates/api_openai_image_1_inpaint-2.webp +0 -0
  64. comfyui_workflow_templates/templates/api_openai_image_1_inpaint.json +0 -217
  65. comfyui_workflow_templates/templates/api_openai_image_1_multi_inputs-1.webp +0 -0
  66. comfyui_workflow_templates/templates/api_openai_image_1_multi_inputs-2.webp +0 -0
  67. comfyui_workflow_templates/templates/api_openai_image_1_multi_inputs.json +0 -302
  68. comfyui_workflow_templates/templates/api_openai_image_1_t2i-1.webp +0 -0
  69. comfyui_workflow_templates/templates/api_openai_image_1_t2i.json +0 -217
  70. comfyui_workflow_templates/templates/api_pika_i2v-1.webp +0 -0
  71. comfyui_workflow_templates/templates/api_pika_i2v.json +0 -199
  72. comfyui_workflow_templates/templates/api_pika_scene-1.webp +0 -0
  73. comfyui_workflow_templates/templates/api_pika_scene.json +0 -328
  74. comfyui_workflow_templates/templates/api_pixverse_i2v-1.webp +0 -0
  75. comfyui_workflow_templates/templates/api_pixverse_i2v.json +0 -241
  76. comfyui_workflow_templates/templates/api_pixverse_t2v-1.webp +0 -0
  77. comfyui_workflow_templates/templates/api_pixverse_t2v.json +0 -168
  78. comfyui_workflow_templates/templates/api_pixverse_template_i2v-1.webp +0 -0
  79. comfyui_workflow_templates/templates/api_pixverse_template_i2v.json +0 -217
  80. comfyui_workflow_templates/templates/api_recraft_image_gen_with_color_control-1.webp +0 -0
  81. comfyui_workflow_templates/templates/api_recraft_image_gen_with_color_control.json +0 -585
  82. comfyui_workflow_templates/templates/api_recraft_image_gen_with_style_control-1.webp +0 -0
  83. comfyui_workflow_templates/templates/api_recraft_image_gen_with_style_control.json +0 -572
  84. comfyui_workflow_templates/templates/api_recraft_vector_gen-1.webp +0 -0
  85. comfyui_workflow_templates/templates/api_recraft_vector_gen.json +0 -395
  86. comfyui_workflow_templates/templates/api_rodin_image_to_model-1.webp +0 -0
  87. comfyui_workflow_templates/templates/api_rodin_image_to_model-2.webp +0 -0
  88. comfyui_workflow_templates/templates/api_rodin_image_to_model.json +0 -1018
  89. comfyui_workflow_templates/templates/api_rodin_multiview_to_model-1.webp +0 -0
  90. comfyui_workflow_templates/templates/api_rodin_multiview_to_model-2.webp +0 -0
  91. comfyui_workflow_templates/templates/api_rodin_multiview_to_model.json +0 -768
  92. comfyui_workflow_templates/templates/api_runway_first_last_frame-1.webp +0 -0
  93. comfyui_workflow_templates/templates/api_runway_first_last_frame.json +0 -224
  94. comfyui_workflow_templates/templates/api_runway_gen3a_turbo_image_to_video-1.webp +0 -0
  95. comfyui_workflow_templates/templates/api_runway_gen3a_turbo_image_to_video.json +0 -175
  96. comfyui_workflow_templates/templates/api_runway_gen4_turo_image_to_video-1.webp +0 -0
  97. comfyui_workflow_templates/templates/api_runway_gen4_turo_image_to_video.json +0 -175
  98. comfyui_workflow_templates/templates/api_runway_reference_to_image-1.webp +0 -0
  99. comfyui_workflow_templates/templates/api_runway_reference_to_image-2.webp +0 -0
  100. comfyui_workflow_templates/templates/api_runway_reference_to_image.json +0 -192
  101. comfyui_workflow_templates/templates/api_runway_text_to_image-1.webp +0 -0
  102. comfyui_workflow_templates/templates/api_runway_text_to_image.json +0 -191
  103. comfyui_workflow_templates/templates/api_stability_ai_i2i-1.webp +0 -0
  104. comfyui_workflow_templates/templates/api_stability_ai_i2i-2.webp +0 -0
  105. comfyui_workflow_templates/templates/api_stability_ai_i2i.json +0 -192
  106. comfyui_workflow_templates/templates/api_stability_ai_sd3.5_i2i-1.webp +0 -0
  107. comfyui_workflow_templates/templates/api_stability_ai_sd3.5_i2i-2.webp +0 -0
  108. comfyui_workflow_templates/templates/api_stability_ai_sd3.5_i2i.json +0 -241
  109. comfyui_workflow_templates/templates/api_stability_ai_sd3.5_t2i-1.webp +0 -0
  110. comfyui_workflow_templates/templates/api_stability_ai_sd3.5_t2i.json +0 -231
  111. comfyui_workflow_templates/templates/api_stability_ai_stable_image_ultra_t2i-1.webp +0 -0
  112. comfyui_workflow_templates/templates/api_stability_ai_stable_image_ultra_t2i.json +0 -176
  113. comfyui_workflow_templates/templates/api_tripo_image_to_model-1.webp +0 -0
  114. comfyui_workflow_templates/templates/api_tripo_image_to_model-2.webp +0 -0
  115. comfyui_workflow_templates/templates/api_tripo_image_to_model.json +0 -523
  116. comfyui_workflow_templates/templates/api_tripo_multiview_to_model-1.webp +0 -0
  117. comfyui_workflow_templates/templates/api_tripo_multiview_to_model-2.webp +0 -0
  118. comfyui_workflow_templates/templates/api_tripo_multiview_to_model.json +0 -933
  119. comfyui_workflow_templates/templates/api_tripo_text_to_model-1.webp +0 -0
  120. comfyui_workflow_templates/templates/api_tripo_text_to_model.json +0 -703
  121. comfyui_workflow_templates/templates/api_veo2_i2v-1.webp +0 -0
  122. comfyui_workflow_templates/templates/api_veo2_i2v.json +0 -176
  123. comfyui_workflow_templates/templates/area_composition-1.webp +0 -0
  124. comfyui_workflow_templates/templates/area_composition.json +0 -966
  125. comfyui_workflow_templates/templates/area_composition_reversed-1.webp +0 -0
  126. comfyui_workflow_templates/templates/area_composition_reversed.json +0 -967
  127. comfyui_workflow_templates/templates/area_composition_square_area_for_subject-1.webp +0 -0
  128. comfyui_workflow_templates/templates/area_composition_square_area_for_subject.json +0 -620
  129. comfyui_workflow_templates/templates/audio_ace_step_1_m2m_editing-1.mp3 +0 -0
  130. comfyui_workflow_templates/templates/audio_ace_step_1_m2m_editing.json +0 -865
  131. comfyui_workflow_templates/templates/audio_ace_step_1_t2a_instrumentals-1.mp3 +0 -0
  132. comfyui_workflow_templates/templates/audio_ace_step_1_t2a_instrumentals.json +0 -841
  133. comfyui_workflow_templates/templates/audio_ace_step_1_t2a_song-1.mp3 +0 -0
  134. comfyui_workflow_templates/templates/audio_ace_step_1_t2a_song.json +0 -841
  135. comfyui_workflow_templates/templates/audio_stable_audio_example-1.mp3 +0 -0
  136. comfyui_workflow_templates/templates/audio_stable_audio_example.json +0 -304
  137. comfyui_workflow_templates/templates/controlnet_example-1.webp +0 -0
  138. comfyui_workflow_templates/templates/controlnet_example-2.webp +0 -0
  139. comfyui_workflow_templates/templates/controlnet_example.json +0 -443
  140. comfyui_workflow_templates/templates/default-1.webp +0 -0
  141. comfyui_workflow_templates/templates/default.json +0 -357
  142. comfyui_workflow_templates/templates/depth_controlnet-1.webp +0 -0
  143. comfyui_workflow_templates/templates/depth_controlnet-2.webp +0 -0
  144. comfyui_workflow_templates/templates/depth_controlnet.json +0 -421
  145. comfyui_workflow_templates/templates/depth_t2i_adapter-1.webp +0 -0
  146. comfyui_workflow_templates/templates/depth_t2i_adapter-2.webp +0 -0
  147. comfyui_workflow_templates/templates/depth_t2i_adapter.json +0 -422
  148. comfyui_workflow_templates/templates/embedding_example-1.webp +0 -0
  149. comfyui_workflow_templates/templates/embedding_example.json +0 -267
  150. comfyui_workflow_templates/templates/esrgan_example-1.webp +0 -0
  151. comfyui_workflow_templates/templates/esrgan_example.json +0 -327
  152. comfyui_workflow_templates/templates/flux_canny_model_example-1.webp +0 -0
  153. comfyui_workflow_templates/templates/flux_canny_model_example-2.webp +0 -0
  154. comfyui_workflow_templates/templates/flux_canny_model_example.json +0 -478
  155. comfyui_workflow_templates/templates/flux_depth_lora_example-1.webp +0 -0
  156. comfyui_workflow_templates/templates/flux_depth_lora_example-2.webp +0 -0
  157. comfyui_workflow_templates/templates/flux_depth_lora_example.json +0 -717
  158. comfyui_workflow_templates/templates/flux_dev_checkpoint_example-1.webp +0 -0
  159. comfyui_workflow_templates/templates/flux_dev_checkpoint_example.json +0 -332
  160. comfyui_workflow_templates/templates/flux_dev_example.json +0 -771
  161. comfyui_workflow_templates/templates/flux_dev_full_text_to_image-1.webp +0 -0
  162. comfyui_workflow_templates/templates/flux_dev_full_text_to_image.json +0 -552
  163. comfyui_workflow_templates/templates/flux_fill_inpaint_example-1.webp +0 -0
  164. comfyui_workflow_templates/templates/flux_fill_inpaint_example-2.webp +0 -0
  165. comfyui_workflow_templates/templates/flux_fill_inpaint_example.json +0 -462
  166. comfyui_workflow_templates/templates/flux_fill_outpaint_example-1.webp +0 -0
  167. comfyui_workflow_templates/templates/flux_fill_outpaint_example-2.webp +0 -0
  168. comfyui_workflow_templates/templates/flux_fill_outpaint_example.json +0 -495
  169. comfyui_workflow_templates/templates/flux_kontext_dev_basic-1.webp +0 -0
  170. comfyui_workflow_templates/templates/flux_kontext_dev_basic-2.webp +0 -0
  171. comfyui_workflow_templates/templates/flux_kontext_dev_basic.json +0 -1138
  172. comfyui_workflow_templates/templates/flux_kontext_dev_grouped-1.webp +0 -0
  173. comfyui_workflow_templates/templates/flux_kontext_dev_grouped-2.webp +0 -0
  174. comfyui_workflow_templates/templates/flux_kontext_dev_grouped.json +0 -2296
  175. comfyui_workflow_templates/templates/flux_redux_model_example-1.webp +0 -0
  176. comfyui_workflow_templates/templates/flux_redux_model_example.json +0 -959
  177. comfyui_workflow_templates/templates/flux_schnell-1.webp +0 -0
  178. comfyui_workflow_templates/templates/flux_schnell.json +0 -302
  179. comfyui_workflow_templates/templates/flux_schnell_full_text_to_image-1.webp +0 -0
  180. comfyui_workflow_templates/templates/flux_schnell_full_text_to_image.json +0 -552
  181. comfyui_workflow_templates/templates/gligen_textbox_example-1.webp +0 -0
  182. comfyui_workflow_templates/templates/gligen_textbox_example.json +0 -626
  183. comfyui_workflow_templates/templates/hidream_e1_full-1.webp +0 -0
  184. comfyui_workflow_templates/templates/hidream_e1_full-2.webp +0 -0
  185. comfyui_workflow_templates/templates/hidream_e1_full.json +0 -1749
  186. comfyui_workflow_templates/templates/hidream_i1_dev-1.webp +0 -0
  187. comfyui_workflow_templates/templates/hidream_i1_dev.json +0 -532
  188. comfyui_workflow_templates/templates/hidream_i1_fast-1.webp +0 -0
  189. comfyui_workflow_templates/templates/hidream_i1_fast.json +0 -532
  190. comfyui_workflow_templates/templates/hidream_i1_full-1.webp +0 -0
  191. comfyui_workflow_templates/templates/hidream_i1_full.json +0 -532
  192. comfyui_workflow_templates/templates/hiresfix_esrgan_workflow-1.webp +0 -0
  193. comfyui_workflow_templates/templates/hiresfix_esrgan_workflow.json +0 -602
  194. comfyui_workflow_templates/templates/hiresfix_latent_workflow-1.webp +0 -0
  195. comfyui_workflow_templates/templates/hiresfix_latent_workflow.json +0 -435
  196. comfyui_workflow_templates/templates/hunyuan_video_text_to_video-1.webp +0 -0
  197. comfyui_workflow_templates/templates/hunyuan_video_text_to_video.json +0 -557
  198. comfyui_workflow_templates/templates/image2image-1.webp +0 -0
  199. comfyui_workflow_templates/templates/image2image.json +0 -321
  200. comfyui_workflow_templates/templates/image_chroma_text_to_image-1.webp +0 -0
  201. comfyui_workflow_templates/templates/image_chroma_text_to_image.json +0 -754
  202. comfyui_workflow_templates/templates/image_cosmos_predict2_2B_t2i-1.webp +0 -0
  203. comfyui_workflow_templates/templates/image_cosmos_predict2_2B_t2i.json +0 -544
  204. comfyui_workflow_templates/templates/image_lotus_depth_v1_1-1.webp +0 -0
  205. comfyui_workflow_templates/templates/image_lotus_depth_v1_1-2.webp +0 -0
  206. comfyui_workflow_templates/templates/image_lotus_depth_v1_1.json +0 -789
  207. comfyui_workflow_templates/templates/image_omnigen2_image_edit-1.webp +0 -0
  208. comfyui_workflow_templates/templates/image_omnigen2_image_edit-2.webp +0 -0
  209. comfyui_workflow_templates/templates/image_omnigen2_image_edit.json +0 -1497
  210. comfyui_workflow_templates/templates/image_omnigen2_t2i-1.webp +0 -0
  211. comfyui_workflow_templates/templates/image_omnigen2_t2i.json +0 -774
  212. comfyui_workflow_templates/templates/image_to_video-1.webp +0 -0
  213. comfyui_workflow_templates/templates/image_to_video.json +0 -314
  214. comfyui_workflow_templates/templates/image_to_video_wan-1.webp +0 -0
  215. comfyui_workflow_templates/templates/image_to_video_wan.json +0 -498
  216. comfyui_workflow_templates/templates/index.json +0 -1064
  217. comfyui_workflow_templates/templates/index.schema.json +0 -79
  218. comfyui_workflow_templates/templates/inpaint_example-1.webp +0 -0
  219. comfyui_workflow_templates/templates/inpaint_example-2.webp +0 -0
  220. comfyui_workflow_templates/templates/inpaint_example.json +0 -333
  221. comfyui_workflow_templates/templates/inpaint_model_outpainting-1.webp +0 -0
  222. comfyui_workflow_templates/templates/inpaint_model_outpainting-2.webp +0 -0
  223. comfyui_workflow_templates/templates/inpaint_model_outpainting.json +0 -375
  224. comfyui_workflow_templates/templates/latent_upscale_different_prompt_model-1.webp +0 -0
  225. comfyui_workflow_templates/templates/latent_upscale_different_prompt_model.json +0 -528
  226. comfyui_workflow_templates/templates/lora-1.webp +0 -0
  227. comfyui_workflow_templates/templates/lora.json +0 -610
  228. comfyui_workflow_templates/templates/lora_multiple-1.webp +0 -0
  229. comfyui_workflow_templates/templates/lora_multiple.json +0 -409
  230. comfyui_workflow_templates/templates/ltxv_image_to_video-1.webp +0 -0
  231. comfyui_workflow_templates/templates/ltxv_image_to_video.json +0 -484
  232. comfyui_workflow_templates/templates/ltxv_text_to_video-1.webp +0 -0
  233. comfyui_workflow_templates/templates/ltxv_text_to_video.json +0 -421
  234. comfyui_workflow_templates/templates/mixing_controlnets-1.webp +0 -0
  235. comfyui_workflow_templates/templates/mixing_controlnets-2.webp +0 -0
  236. comfyui_workflow_templates/templates/mixing_controlnets.json +0 -864
  237. comfyui_workflow_templates/templates/mochi_text_to_video_example-1.webp +0 -0
  238. comfyui_workflow_templates/templates/mochi_text_to_video_example.json +0 -312
  239. comfyui_workflow_templates/templates/sd3.5_large_blur-1.webp +0 -0
  240. comfyui_workflow_templates/templates/sd3.5_large_blur-2.webp +0 -0
  241. comfyui_workflow_templates/templates/sd3.5_large_blur.json +0 -685
  242. comfyui_workflow_templates/templates/sd3.5_large_canny_controlnet_example-1.webp +0 -0
  243. comfyui_workflow_templates/templates/sd3.5_large_canny_controlnet_example-2.webp +0 -0
  244. comfyui_workflow_templates/templates/sd3.5_large_canny_controlnet_example.json +0 -472
  245. comfyui_workflow_templates/templates/sd3.5_large_depth-1.webp +0 -0
  246. comfyui_workflow_templates/templates/sd3.5_large_depth-2.webp +0 -0
  247. comfyui_workflow_templates/templates/sd3.5_large_depth.json +0 -675
  248. comfyui_workflow_templates/templates/sd3.5_simple_example-1.webp +0 -0
  249. comfyui_workflow_templates/templates/sd3.5_simple_example.json +0 -278
  250. comfyui_workflow_templates/templates/sdxl_refiner_prompt_example-1.webp +0 -0
  251. comfyui_workflow_templates/templates/sdxl_refiner_prompt_example.json +0 -758
  252. comfyui_workflow_templates/templates/sdxl_revision_text_prompts-1.webp +0 -0
  253. comfyui_workflow_templates/templates/sdxl_revision_text_prompts.json +0 -492
  254. comfyui_workflow_templates/templates/sdxl_revision_zero_positive-1.webp +0 -0
  255. comfyui_workflow_templates/templates/sdxl_revision_zero_positive.json +0 -496
  256. comfyui_workflow_templates/templates/sdxl_simple_example-1.webp +0 -0
  257. comfyui_workflow_templates/templates/sdxl_simple_example.json +0 -1346
  258. comfyui_workflow_templates/templates/sdxlturbo_example-1.webp +0 -0
  259. comfyui_workflow_templates/templates/sdxlturbo_example.json +0 -372
  260. comfyui_workflow_templates/templates/stable_zero123_example-1.webp +0 -0
  261. comfyui_workflow_templates/templates/stable_zero123_example.json +0 -273
  262. comfyui_workflow_templates/templates/text_to_video_wan-1.webp +0 -0
  263. comfyui_workflow_templates/templates/text_to_video_wan.json +0 -361
  264. comfyui_workflow_templates/templates/txt_to_image_to_video-1.webp +0 -0
  265. comfyui_workflow_templates/templates/txt_to_image_to_video.json +0 -537
  266. comfyui_workflow_templates/templates/video_cosmos_predict2_2B_video2world_480p_16fps-1.webp +0 -0
  267. comfyui_workflow_templates/templates/video_cosmos_predict2_2B_video2world_480p_16fps.json +0 -724
  268. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_1.3B-1.webp +0 -0
  269. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_1.3B.json +0 -1030
  270. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_14B-1.webp +0 -0
  271. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_14B.json +0 -1063
  272. comfyui_workflow_templates/templates/video_wan_vace_14B_ref2v-1.webp +0 -0
  273. comfyui_workflow_templates/templates/video_wan_vace_14B_ref2v.json +0 -2165
  274. comfyui_workflow_templates/templates/video_wan_vace_14B_t2v-1.webp +0 -0
  275. comfyui_workflow_templates/templates/video_wan_vace_14B_t2v.json +0 -1242
  276. comfyui_workflow_templates/templates/video_wan_vace_14B_v2v-1.webp +0 -0
  277. comfyui_workflow_templates/templates/video_wan_vace_14B_v2v-2.webp +0 -0
  278. comfyui_workflow_templates/templates/video_wan_vace_14B_v2v.json +0 -1538
  279. comfyui_workflow_templates/templates/video_wan_vace_flf2v-1.webp +0 -0
  280. comfyui_workflow_templates/templates/video_wan_vace_flf2v.json +0 -2272
  281. comfyui_workflow_templates/templates/video_wan_vace_inpainting-1.webp +0 -0
  282. comfyui_workflow_templates/templates/video_wan_vace_inpainting-2.webp +0 -0
  283. comfyui_workflow_templates/templates/video_wan_vace_inpainting.json +0 -2139
  284. comfyui_workflow_templates/templates/video_wan_vace_outpainting-1.webp +0 -0
  285. comfyui_workflow_templates/templates/video_wan_vace_outpainting-2.webp +0 -0
  286. comfyui_workflow_templates/templates/video_wan_vace_outpainting.json +0 -1913
  287. comfyui_workflow_templates/templates/wan2.1_flf2v_720_f16-1.webp +0 -0
  288. comfyui_workflow_templates/templates/wan2.1_flf2v_720_f16.json +0 -661
  289. comfyui_workflow_templates/templates/wan2.1_fun_control-1.webp +0 -0
  290. comfyui_workflow_templates/templates/wan2.1_fun_control-2.webp +0 -0
  291. comfyui_workflow_templates/templates/wan2.1_fun_control.json +0 -1157
  292. comfyui_workflow_templates/templates/wan2.1_fun_inp-1.webp +0 -0
  293. comfyui_workflow_templates/templates/wan2.1_fun_inp.json +0 -1064
  294. comfyui_workflow_templates-0.1.32.dist-info/METADATA +0 -1059
  295. comfyui_workflow_templates-0.1.32.dist-info/RECORD +0 -296
  296. {comfyui_workflow_templates-0.1.32.dist-info → comfyui_workflow_templates-0.6.0.dist-info}/WHEEL +0 -0
  297. {comfyui_workflow_templates-0.1.32.dist-info → comfyui_workflow_templates-0.6.0.dist-info}/licenses/LICENSE +0 -0
  298. {comfyui_workflow_templates-0.1.32.dist-info → comfyui_workflow_templates-0.6.0.dist-info}/top_level.txt +0 -0
@@ -1,1064 +0,0 @@
1
- [
2
- {
3
- "moduleName": "default",
4
- "title": "Basics",
5
- "type": "image",
6
- "templates": [
7
- {
8
- "name": "default",
9
- "title": "Image Generation",
10
- "mediaType": "image",
11
- "mediaSubtype": "webp",
12
- "description": "Generate images from text prompts."
13
- },
14
- {
15
- "name": "image2image",
16
- "title": "Image to Image",
17
- "mediaType": "image",
18
- "mediaSubtype": "webp",
19
- "description": "Transform existing images using text prompts.",
20
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/img2img/"
21
- },
22
- {
23
- "name": "lora",
24
- "title": "Lora",
25
- "mediaType": "image",
26
- "mediaSubtype": "webp",
27
- "description": "Generate images with LoRA models for specialized styles or subjects.",
28
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/lora/"
29
- },
30
- {
31
- "name": "lora_multiple",
32
- "title": "Lora Multiple",
33
- "mediaType": "image",
34
- "mediaSubtype": "webp",
35
- "description": "Generate images by combining multiple LoRA models.",
36
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/lora/"
37
- },
38
- {
39
- "name": "inpaint_example",
40
- "title": "Inpaint",
41
- "mediaType": "image",
42
- "mediaSubtype": "webp",
43
- "description": "Edit specific parts of images seamlessly.",
44
- "thumbnailVariant": "compareSlider",
45
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/inpaint/"
46
- },
47
- {
48
- "name": "inpaint_model_outpainting",
49
- "title": "Outpaint",
50
- "mediaType": "image",
51
- "mediaSubtype": "webp",
52
- "description": "Extend images beyond their original boundaries.",
53
- "thumbnailVariant": "compareSlider",
54
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/inpaint/#outpainting"
55
- },
56
- {
57
- "name": "embedding_example",
58
- "title": "Embedding",
59
- "mediaType": "image",
60
- "mediaSubtype": "webp",
61
- "description": "Generate images using textual inversion for consistent styles.",
62
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/"
63
- },
64
- {
65
- "name": "gligen_textbox_example",
66
- "title": "Gligen Textbox",
67
- "mediaType": "image",
68
- "mediaSubtype": "webp",
69
- "description": "Generate images with precise object placement using text boxes.",
70
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/"
71
- }
72
- ]
73
- },
74
- {
75
- "moduleName": "default",
76
- "title": "Flux",
77
- "type": "image",
78
- "templates": [
79
- {
80
- "name": "flux_kontext_dev_basic",
81
- "title": "Flux Kontext Dev(Basic)",
82
- "mediaType": "image",
83
- "mediaSubtype": "webp",
84
- "thumbnailVariant": "hoverDissolve",
85
- "description": "Edit image using Flux Kontext with full node visibility, perfect for learning the workflow."
86
- },
87
- {
88
- "name": "flux_kontext_dev_grouped",
89
- "title": "Flux Kontext Dev(Grouped)",
90
- "mediaType": "image",
91
- "mediaSubtype": "webp",
92
- "thumbnailVariant": "hoverDissolve",
93
- "description": "Streamlined version of Flux Kontext with grouped nodes for cleaner workspace."
94
- },
95
- {
96
- "name": "flux_dev_checkpoint_example",
97
- "title": "Flux Dev fp8",
98
- "mediaType": "image",
99
- "mediaSubtype": "webp",
100
- "description": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.",
101
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-dev-1"
102
- },
103
- {
104
- "name": "flux_schnell",
105
- "title": "Flux Schnell fp8",
106
- "mediaType": "image",
107
- "mediaSubtype": "webp",
108
- "description": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.",
109
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-schnell-1"
110
- },
111
- {
112
- "name": "flux_dev_full_text_to_image",
113
- "title": "Flux Dev full text to image",
114
- "mediaType": "image",
115
- "mediaSubtype": "webp",
116
- "description": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.",
117
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-dev-1"
118
- },
119
- {
120
- "name": "flux_schnell_full_text_to_image",
121
- "title": "Flux Schnell full text to image",
122
- "mediaType": "image",
123
- "mediaSubtype": "webp",
124
- "description": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.",
125
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-schnell-1"
126
- },
127
- {
128
- "name": "flux_fill_inpaint_example",
129
- "title": "Flux Inpaint",
130
- "mediaType": "image",
131
- "mediaSubtype": "webp",
132
- "description": "Fill missing parts of images using Flux inpainting.",
133
- "thumbnailVariant": "compareSlider",
134
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#fill-inpainting-model"
135
- },
136
- {
137
- "name": "flux_fill_outpaint_example",
138
- "title": "Flux Outpaint",
139
- "mediaType": "image",
140
- "mediaSubtype": "webp",
141
- "description": "Extend images beyond boundaries using Flux outpainting.",
142
- "thumbnailVariant": "compareSlider",
143
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#fill-inpainting-model"
144
- },
145
- {
146
- "name": "flux_canny_model_example",
147
- "title": "Flux Canny Model",
148
- "mediaType": "image",
149
- "mediaSubtype": "webp",
150
- "description": "Generate images guided by edge detection using Flux Canny.",
151
- "thumbnailVariant": "hoverDissolve",
152
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#canny-and-depth"
153
- },
154
- {
155
- "name": "flux_depth_lora_example",
156
- "title": "Flux Depth Lora",
157
- "mediaType": "image",
158
- "mediaSubtype": "webp",
159
- "description": "Generate images guided by depth information using Flux LoRA.",
160
- "thumbnailVariant": "hoverDissolve",
161
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#canny-and-depth"
162
- },
163
- {
164
- "name": "flux_redux_model_example",
165
- "title": "Flux Redux Model",
166
- "mediaType": "image",
167
- "mediaSubtype": "webp",
168
- "description": "Generate images by transferring style from reference images using Flux Redux.",
169
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/flux/#redux"
170
- }
171
- ]
172
- },
173
- {
174
- "moduleName": "default",
175
- "title": "Image",
176
- "type": "image",
177
- "templates": [
178
- {
179
- "name": "image_omnigen2_t2i",
180
- "title": "OmniGen2 Text to Image",
181
- "mediaType": "image",
182
- "mediaSubtype": "webp",
183
- "description": "Generate high-quality images from text prompts using OmniGen2's unified 7B multimodal model with dual-path architecture.",
184
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2"
185
- },
186
- {
187
- "name": "image_omnigen2_image_edit",
188
- "title": "OmniGen2 Image Edit",
189
- "mediaType": "image",
190
- "mediaSubtype": "webp",
191
- "thumbnailVariant": "hoverDissolve",
192
- "description": "Edit images with natural language instructions using OmniGen2's advanced image editing capabilities and text rendering support.",
193
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2"
194
- },
195
- {
196
- "name": "image_cosmos_predict2_2B_t2i",
197
- "title": "Cosmos Predict2 2B T2I",
198
- "mediaType": "image",
199
- "mediaSubtype": "webp",
200
- "description": "Generate images with Cosmos-Predict2 2B T2I, delivering physically accurate, high-fidelity, and detail-rich image generation.",
201
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2"
202
- },
203
- {
204
- "name": "image_chroma_text_to_image",
205
- "title": "Chroma text to image",
206
- "mediaType": "image",
207
- "mediaSubtype": "webp",
208
- "description": "Chroma is modified from flux and has some changes in the architecture."
209
- },
210
- {
211
- "name": "hidream_i1_dev",
212
- "title": "HiDream I1 Dev",
213
- "mediaType": "image",
214
- "mediaSubtype": "webp",
215
- "description": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware."
216
- },
217
- {
218
- "name": "hidream_i1_fast",
219
- "title": "HiDream I1 Fast",
220
- "mediaType": "image",
221
- "mediaSubtype": "webp",
222
- "description": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware."
223
- },
224
- {
225
- "name": "hidream_i1_full",
226
- "title": "HiDream I1 Full",
227
- "mediaType": "image",
228
- "mediaSubtype": "webp",
229
- "description": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output."
230
- },
231
- {
232
- "name": "hidream_e1_full",
233
- "title": "HiDream E1 Full",
234
- "mediaType": "image",
235
- "mediaSubtype": "webp",
236
- "thumbnailVariant": "compareSlider",
237
- "description": "Edit images with HiDream E1 - Professional natural language image editing model."
238
- },
239
- {
240
- "name": "sd3.5_simple_example",
241
- "title": "SD3.5 Simple",
242
- "mediaType": "image",
243
- "mediaSubtype": "webp",
244
- "description": "Generate images using SD 3.5.",
245
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35"
246
- },
247
- {
248
- "name": "sd3.5_large_canny_controlnet_example",
249
- "title": "SD3.5 Large Canny ControlNet",
250
- "mediaType": "image",
251
- "mediaSubtype": "webp",
252
- "description": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
253
- "thumbnailVariant": "hoverDissolve",
254
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
255
- },
256
- {
257
- "name": "sd3.5_large_depth",
258
- "title": "SD3.5 Large Depth",
259
- "mediaType": "image",
260
- "mediaSubtype": "webp",
261
- "description": "Generate images guided by depth information using SD 3.5.",
262
- "thumbnailVariant": "hoverDissolve",
263
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
264
- },
265
- {
266
- "name": "sd3.5_large_blur",
267
- "title": "SD3.5 Large Blur",
268
- "mediaType": "image",
269
- "mediaSubtype": "webp",
270
- "description": "Generate images guided by blurred reference images using SD 3.5.",
271
- "thumbnailVariant": "hoverDissolve",
272
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets"
273
- },
274
- {
275
- "name": "sdxl_simple_example",
276
- "title": "SDXL Simple",
277
- "mediaType": "image",
278
- "mediaSubtype": "webp",
279
- "description": "Generate high-quality images using SDXL.",
280
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
281
- },
282
- {
283
- "name": "sdxl_refiner_prompt_example",
284
- "title": "SDXL Refiner Prompt",
285
- "mediaType": "image",
286
- "mediaSubtype": "webp",
287
- "description": "Enhance SDXL images using refiner models.",
288
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/"
289
- },
290
- {
291
- "name": "sdxl_revision_text_prompts",
292
- "title": "SDXL Revision Text Prompts",
293
- "mediaType": "image",
294
- "mediaSubtype": "webp",
295
- "description": "Generate images by transferring concepts from reference images using SDXL Revision.",
296
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
297
- },
298
- {
299
- "name": "sdxl_revision_zero_positive",
300
- "title": "SDXL Revision Zero Positive",
301
- "mediaType": "image",
302
- "mediaSubtype": "webp",
303
- "description": "Generate images using both text prompts and reference images with SDXL Revision.",
304
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision"
305
- },
306
- {
307
- "name": "sdxlturbo_example",
308
- "title": "SDXL Turbo",
309
- "mediaType": "image",
310
- "mediaSubtype": "webp",
311
- "description": "Generate images in a single step using SDXL Turbo.",
312
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/"
313
- },
314
- {
315
- "name": "image_lotus_depth_v1_1",
316
- "title": "Lotus Depth",
317
- "mediaType": "image",
318
- "mediaSubtype": "webp",
319
- "thumbnailVariant": "compareSlider",
320
- "description": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention."
321
- }
322
- ]
323
- },
324
- {
325
- "moduleName": "default",
326
- "title": "Video",
327
- "type": "video",
328
- "templates": [
329
- {
330
- "name": "video_cosmos_predict2_2B_video2world_480p_16fps",
331
- "title": "Cosmos Predict2 2B Video2World 480p 16fps",
332
- "description": "Generate videos with Cosmos-Predict2 2B Video2World, generating physically accurate, high-fidelity, and consistent video simulations.",
333
- "mediaType": "image",
334
- "mediaSubtype": "webp",
335
- "tutorialUrl": "http://docs.comfy.org/tutorials/video/cosmos/cosmos-predict2-video2world"
336
- },
337
- {
338
- "name": "video_wan_vace_14B_t2v",
339
- "title": "Wan VACE Text to Video",
340
- "description": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
341
- "mediaType": "image",
342
- "mediaSubtype": "webp",
343
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
344
- },{
345
- "name": "video_wan_vace_14B_ref2v",
346
- "title": "Wan VACE Reference to Video",
347
- "description": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
348
- "mediaType": "image",
349
- "mediaSubtype": "webp",
350
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
351
- },{
352
- "name": "video_wan_vace_14B_v2v",
353
- "title": "Wan VACE Control Video",
354
- "description": "Generate videos by controlling input videos and reference images using Wan VACE.",
355
- "mediaType": "image",
356
- "mediaSubtype": "webp",
357
- "thumbnailVariant": "compareSlider",
358
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
359
- },{
360
- "name": "video_wan_vace_outpainting",
361
- "title": "Wan VACE Outpainting",
362
- "description": "Generate extended videos by expanding video size using Wan VACE outpainting.",
363
- "mediaType": "image",
364
- "mediaSubtype": "webp",
365
- "thumbnailVariant": "compareSlider",
366
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
367
- },{
368
- "name": "video_wan_vace_flf2v",
369
- "title": "Wan VACE First-Last Frame",
370
- "description": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
371
- "mediaType": "image",
372
- "mediaSubtype": "webp",
373
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
374
- },{
375
- "name": "video_wan_vace_inpainting",
376
- "title": "Wan VACE Inpainting",
377
- "description": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
378
- "mediaType": "image",
379
- "mediaSubtype": "webp",
380
- "thumbnailVariant": "compareSlider",
381
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace"
382
- },{
383
- "name": "video_wan2.1_fun_camera_v1.1_1.3B",
384
- "title": "Wan 2.1 Fun Camera 1.3B",
385
- "description": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
386
- "mediaType": "image",
387
- "mediaSubtype": "webp"
388
- },{
389
- "name": "video_wan2.1_fun_camera_v1.1_14B",
390
- "title": "Wan 2.1 Fun Camera 14B",
391
- "description": "Generate high-quality videos with advanced camera control using the full 14B model",
392
- "mediaType": "image",
393
- "mediaSubtype": "webp"
394
- },
395
- {
396
- "name": "text_to_video_wan",
397
- "title": "Wan 2.1 Text to Video",
398
- "description": "Generate videos from text prompts using Wan 2.1.",
399
- "mediaType": "image",
400
- "mediaSubtype": "webp",
401
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/wan/#text-to-video"
402
- },
403
- {
404
- "name": "image_to_video_wan",
405
- "title": "Wan 2.1 Image to Video",
406
- "description": "Generate videos from images using Wan 2.1.",
407
- "mediaType": "image",
408
- "mediaSubtype": "webp",
409
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/wan/#image-to-video"
410
- },
411
- {
412
- "name": "wan2.1_fun_inp",
413
- "title": "Wan 2.1 Inpainting",
414
- "description": "Generate videos from start and end frames using Wan 2.1 inpainting.",
415
- "mediaType": "image",
416
- "mediaSubtype": "webp",
417
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp"
418
- },
419
- {
420
- "name": "wan2.1_fun_control",
421
- "title": "Wan 2.1 ControlNet",
422
- "description": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
423
- "mediaType": "image",
424
- "mediaSubtype": "webp",
425
- "thumbnailVariant": "hoverDissolve",
426
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control"
427
- },
428
- {
429
- "name": "wan2.1_flf2v_720_f16",
430
- "title": "Wan 2.1 FLF2V 720p F16",
431
- "description": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
432
- "mediaType": "image",
433
- "mediaSubtype": "webp",
434
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf"
435
- },
436
- {
437
- "name": "ltxv_text_to_video",
438
- "title": "LTXV Text to Video",
439
- "mediaType": "image",
440
- "mediaSubtype": "webp",
441
- "description": "Generate videos from text prompts.",
442
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/ltxv/#text-to-video"
443
- },
444
- {
445
- "name": "ltxv_image_to_video",
446
- "title": "LTXV Image to Video",
447
- "mediaType": "image",
448
- "mediaSubtype": "webp",
449
- "description": "Generate videos from still images.",
450
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/ltxv/#image-to-video"
451
- },
452
- {
453
- "name": "mochi_text_to_video_example",
454
- "title": "Mochi Text to Video",
455
- "mediaType": "image",
456
- "mediaSubtype": "webp",
457
- "description": "Generate videos from text prompts using Mochi model.",
458
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/"
459
- },
460
- {
461
- "name": "hunyuan_video_text_to_video",
462
- "title": "Hunyuan Video Text to Video",
463
- "mediaType": "image",
464
- "mediaSubtype": "webp",
465
- "description": "Generate videos from text prompts using Hunyuan model.",
466
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/"
467
- },
468
- {
469
- "name": "image_to_video",
470
- "title": "SVD Image to Video",
471
- "mediaType": "image",
472
- "mediaSubtype": "webp",
473
- "description": "Generate videos from still images.",
474
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video"
475
- },
476
- {
477
- "name": "txt_to_image_to_video",
478
- "title": "SVD Text to Image to Video",
479
- "mediaType": "image",
480
- "mediaSubtype": "webp",
481
- "description": "Generate videos by first creating images from text prompts.",
482
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video"
483
- }
484
- ]
485
- },
486
- {
487
- "moduleName": "default",
488
- "title": "Image API",
489
- "type": "image",
490
- "templates": [
491
- {
492
- "name": "api_bfl_flux_1_kontext_multiple_images_input",
493
- "title": "BFL Flux.1 Kontext Multiple Image Input",
494
- "description": "Input multiple images and edit them with Flux.1 Kontext.",
495
- "mediaType": "image",
496
- "mediaSubtype": "webp",
497
- "thumbnailVariant": "compareSlider"
498
- },
499
- {
500
- "name": "api_bfl_flux_1_kontext_pro_image",
501
- "title": "BFL Flux.1 Kontext Pro",
502
- "description": "Edit images with Flux.1 Kontext pro image.",
503
- "mediaType": "image",
504
- "mediaSubtype": "webp",
505
- "thumbnailVariant": "compareSlider"
506
- },
507
- {
508
- "name": "api_bfl_flux_1_kontext_max_image",
509
- "title": "BFL Flux.1 Kontext Max",
510
- "description": "Edit images with Flux.1 Kontext max image.",
511
- "mediaType": "image",
512
- "mediaSubtype": "webp",
513
- "thumbnailVariant": "compareSlider"
514
- },
515
- {
516
- "name": "api_bfl_flux_pro_t2i",
517
- "title": "BFL Flux[Pro]: Text to Image",
518
- "description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
519
- "mediaType": "image",
520
- "mediaSubtype": "webp"
521
- },
522
- {
523
- "name": "api_luma_photon_i2i",
524
- "title": "Luma Photon: Image to Image",
525
- "description": "Guide image generation using a combination of images and prompt.",
526
- "mediaType": "image",
527
- "mediaSubtype": "webp",
528
- "thumbnailVariant": "compareSlider"
529
- },
530
- {
531
- "name": "api_luma_photon_style_ref",
532
- "title": "Luma Photon: Style Reference",
533
- "description": "Generate images by blending style references with precise control using Luma Photon.",
534
- "mediaType": "image",
535
- "mediaSubtype": "webp",
536
- "thumbnailVariant": "compareSlider"
537
- },
538
- {
539
- "name": "api_recraft_image_gen_with_color_control",
540
- "title": "Recraft: Color Control Image Generation",
541
- "description": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
542
- "mediaType": "image",
543
- "mediaSubtype": "webp"
544
- },
545
- {
546
- "name": "api_recraft_image_gen_with_style_control",
547
- "title": "Recraft: Style Control Image Generation",
548
- "description": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.",
549
- "mediaType": "image",
550
- "mediaSubtype": "webp"
551
- },
552
- {
553
- "name": "api_recraft_vector_gen",
554
- "title": "Recraft: Vector Generation",
555
- "description": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
556
- "mediaType": "image",
557
- "mediaSubtype": "webp"
558
- },{
559
- "name": "api_runway_text_to_image",
560
- "title": "Runway: Text to Image",
561
- "description": "Generate high-quality images from text prompts using Runway's AI model.",
562
- "mediaType": "image",
563
- "mediaSubtype": "webp"
564
- },
565
- {
566
- "name": "api_runway_reference_to_image",
567
- "title": "Runway: Reference to Image",
568
- "description": "Generate new images based on reference styles and compositions with Runway's AI.",
569
- "mediaType": "image",
570
- "thumbnailVariant": "compareSlider",
571
- "mediaSubtype": "webp"
572
- },
573
- {
574
- "name": "api_stability_ai_stable_image_ultra_t2i",
575
- "title": "Stability AI: Stable Image Ultra Text to Image",
576
- "description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
577
- "mediaType": "image",
578
- "mediaSubtype": "webp"
579
- },
580
- {
581
- "name": "api_stability_ai_i2i",
582
- "title": "Stability AI: Image to Image",
583
- "description": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
584
- "mediaType": "image",
585
- "thumbnailVariant": "compareSlider",
586
- "mediaSubtype": "webp"
587
- },
588
- {
589
- "name": "api_stability_ai_sd3.5_t2i",
590
- "title": "Stability AI: SD3.5 Text to Image",
591
- "description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
592
- "mediaType": "image",
593
- "mediaSubtype": "webp"
594
- },
595
- {
596
- "name": "api_stability_ai_sd3.5_i2i",
597
- "title": "Stability AI: SD3.5 Image to Image",
598
- "description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
599
- "mediaType": "image",
600
- "thumbnailVariant": "compareSlider",
601
- "mediaSubtype": "webp"
602
- },
603
- {
604
- "name": "api_ideogram_v3_t2i",
605
- "title": "Ideogram V3: Text to Image",
606
- "description": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
607
- "mediaType": "image",
608
- "mediaSubtype": "webp"
609
- },
610
- {
611
- "name": "api_openai_image_1_t2i",
612
- "title": "OpenAI: GPT-Image-1 Text to Image",
613
- "description": "Generate images from text prompts using OpenAI GPT Image 1 API.",
614
- "mediaType": "image",
615
- "mediaSubtype": "webp",
616
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
617
- },
618
- {
619
- "name": "api_openai_image_1_i2i",
620
- "title": "OpenAI: GPT-Image-1 Image to Image",
621
- "description": "Generate images from input images using OpenAI GPT Image 1 API.",
622
- "mediaType": "image",
623
- "mediaSubtype": "webp",
624
- "thumbnailVariant": "compareSlider",
625
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
626
- },
627
- {
628
- "name": "api_openai_image_1_inpaint",
629
- "title": "OpenAI: GPT-Image-1 Inpaint",
630
- "description": "Edit images using inpainting with OpenAI GPT Image 1 API.",
631
- "mediaType": "image",
632
- "mediaSubtype": "webp",
633
- "thumbnailVariant": "compareSlider",
634
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
635
- },
636
- {
637
- "name": "api_openai_image_1_multi_inputs",
638
- "title": "OpenAI: GPT-Image-1 Multi Inputs",
639
- "description": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
640
- "mediaType": "image",
641
- "mediaSubtype": "webp",
642
- "thumbnailVariant": "compareSlider",
643
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
644
- },
645
- {
646
- "name": "api_openai_dall_e_2_t2i",
647
- "title": "OpenAI: Dall-E 2 Text to Image",
648
- "description": "Generate images from text prompts using OpenAI Dall-E 2 API.",
649
- "mediaType": "image",
650
- "mediaSubtype": "webp",
651
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
652
- },
653
- {
654
- "name": "api_openai_dall_e_2_inpaint",
655
- "title": "OpenAI: Dall-E 2 Inpaint",
656
- "description": "Edit images using inpainting with OpenAI Dall-E 2 API.",
657
- "mediaType": "image",
658
- "mediaSubtype": "webp",
659
- "thumbnailVariant": "compareSlider",
660
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
661
- },
662
- {
663
- "name": "api_openai_dall_e_3_t2i",
664
- "title": "OpenAI: Dall-E 3 Text to Image",
665
- "description": "Generate images from text prompts using OpenAI Dall-E 3 API.",
666
- "mediaType": "image",
667
- "mediaSubtype": "webp",
668
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3"
669
- }
670
- ]
671
- },
672
- {
673
- "moduleName": "default",
674
- "title": "Video API",
675
- "type": "video",
676
- "templates": [
677
- {
678
- "name": "api_kling_i2v",
679
- "title": "Kling: Image to Video",
680
- "description": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
681
- "mediaType": "image",
682
- "mediaSubtype": "webp"
683
- },
684
- {
685
- "name": "api_kling_effects",
686
- "title": "Kling: Video Effects",
687
- "description": "Generate dynamic videos by applying visual effects to images using Kling.",
688
- "mediaType": "image",
689
- "mediaSubtype": "webp"
690
- },
691
- {
692
- "name": "api_kling_flf",
693
- "title": "Kling: FLF2V",
694
- "description": "Generate videos through controlling the first and last frames.",
695
- "mediaType": "image",
696
- "mediaSubtype": "webp"
697
- },
698
- {
699
- "name": "api_luma_i2v",
700
- "title": "Luma: Image to Video",
701
- "description": "Take static images and instantly create magical high quality animations.",
702
- "mediaType": "image",
703
- "mediaSubtype": "webp"
704
- },
705
- {
706
- "name": "api_luma_t2v",
707
- "title": "Luma: Text to Video",
708
- "description": "High-quality videos can be generated using simple prompts.",
709
- "mediaType": "image",
710
- "mediaSubtype": "webp"
711
- },
712
- {
713
- "name": "api_hailuo_minimax_t2v",
714
- "title": "MiniMax: Text to Video",
715
- "description": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
716
- "mediaType": "image",
717
- "mediaSubtype": "webp"
718
- },
719
- {
720
- "name": "api_hailuo_minimax_i2v",
721
- "title": "MiniMax: Image to Video",
722
- "description": "Generate refined videos from images and text with CGI integration using MiniMax.",
723
- "mediaType": "image",
724
- "mediaSubtype": "webp"
725
- },
726
- {
727
- "name": "api_pixverse_i2v",
728
- "title": "PixVerse: Image to Video",
729
- "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
730
- "mediaType": "image",
731
- "mediaSubtype": "webp"
732
- },
733
- {
734
- "name": "api_pixverse_template_i2v",
735
- "title": "PixVerse Templates: Image to Video",
736
- "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
737
- "mediaType": "image",
738
- "mediaSubtype": "webp"
739
- },
740
- {
741
- "name": "api_pixverse_t2v",
742
- "title": "PixVerse: Text to Video",
743
- "description": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
744
- "mediaType": "image",
745
- "mediaSubtype": "webp"
746
- },
747
- {
748
- "name": "api_runway_gen3a_turbo_image_to_video",
749
- "title": "Runway: Gen3a Turbo Image to Video",
750
- "description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
751
- "mediaType": "image",
752
- "mediaSubtype": "webp"
753
- },
754
- {
755
- "name": "api_runway_gen4_turo_image_to_video",
756
- "title": "Runway: Gen4 Turbo Image to Video",
757
- "description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
758
- "mediaType": "image",
759
- "mediaSubtype": "webp"
760
- },
761
- {
762
- "name": "api_runway_first_last_frame",
763
- "title": "Runway: First Last Frame to Video",
764
- "description": "Generate smooth video transitions between two keyframes with Runway's precision.",
765
- "mediaType": "image",
766
- "mediaSubtype": "webp"
767
- },
768
- {
769
- "name": "api_pika_i2v",
770
- "title": "Pika: Image to Video",
771
- "description": "Generate smooth animated videos from single static images using Pika AI.",
772
- "mediaType": "image",
773
- "mediaSubtype": "webp"
774
- },
775
- {
776
- "name": "api_pika_scene",
777
- "title": "Pika Scenes: Images to Video",
778
- "description": "Generate videos that incorporate multiple input images using Pika Scenes.",
779
- "mediaType": "image",
780
- "mediaSubtype": "webp"
781
- },
782
- {
783
- "name": "api_veo2_i2v",
784
- "title": "Veo2: Image to Video",
785
- "description": "Generate videos from images using Google Veo2 API.",
786
- "mediaType": "image",
787
- "mediaSubtype": "webp"
788
- }
789
- ]
790
- },
791
- {
792
- "moduleName": "default",
793
- "title": "3D API",
794
- "type": "image",
795
- "templates": [
796
- {
797
- "name": "api_rodin_image_to_model",
798
- "title": "Rodin: Image to Model",
799
- "description": "Generate detailed 3D models from single photos using Rodin AI.",
800
- "mediaType": "image",
801
- "thumbnailVariant": "compareSlider",
802
- "mediaSubtype": "webp"
803
- },
804
- {
805
- "name": "api_rodin_multiview_to_model",
806
- "title": "Rodin: Multiview to Model",
807
- "description": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.",
808
- "mediaType": "image",
809
- "thumbnailVariant": "compareSlider",
810
- "mediaSubtype": "webp"
811
- },
812
- {
813
- "name": "api_tripo_text_to_model",
814
- "title": "Tripo: Text to Model",
815
- "description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
816
- "mediaType": "image",
817
- "mediaSubtype": "webp"
818
- },
819
- {
820
- "name": "api_tripo_image_to_model",
821
- "title": "Tripo: Image to Model",
822
- "description": "Generate professional 3D assets from 2D images using Tripo engine.",
823
- "mediaType": "image",
824
- "thumbnailVariant": "compareSlider",
825
- "mediaSubtype": "webp"
826
- },
827
- {
828
- "name": "api_tripo_multiview_to_model",
829
- "title": "Tripo: Multiview to Model",
830
- "description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
831
- "mediaType": "image",
832
- "thumbnailVariant": "compareSlider",
833
- "mediaSubtype": "webp"
834
- }
835
- ]
836
- },
837
- {
838
- "moduleName": "default",
839
- "title": "LLM API",
840
- "type": "image",
841
- "templates": [
842
- {
843
- "name": "api_openai_chat",
844
- "title": "OpenAI: Chat",
845
- "description": "Engage with OpenAI's advanced language models for intelligent conversations.",
846
- "mediaType": "image",
847
- "mediaSubtype": "webp"
848
- },
849
- {
850
- "name": "api_google_gemini",
851
- "title": "Google Gemini: Chat",
852
- "description": "Experience Google's multimodal AI with Gemini's reasoning capabilities.",
853
- "mediaType": "image",
854
- "mediaSubtype": "webp"
855
- }
856
- ]
857
- },
858
- {
859
- "moduleName": "default",
860
- "title": "Upscaling",
861
- "type": "image",
862
- "templates": [
863
- {
864
- "name": "hiresfix_latent_workflow",
865
- "title": "Upscale",
866
- "mediaType": "image",
867
- "mediaSubtype": "webp",
868
- "description": "Upscale images by enhancing quality in latent space.",
869
- "thumbnailVariant": "zoomHover",
870
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/"
871
- },
872
- {
873
- "name": "esrgan_example",
874
- "title": "ESRGAN",
875
- "mediaType": "image",
876
- "mediaSubtype": "webp",
877
- "description": "Upscale images using ESRGAN models to enhance quality.",
878
- "thumbnailVariant": "zoomHover",
879
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/"
880
- },
881
- {
882
- "name": "hiresfix_esrgan_workflow",
883
- "title": "HiresFix ESRGAN Workflow",
884
- "mediaType": "image",
885
- "mediaSubtype": "webp",
886
- "description": "Upscale images using ESRGAN models during intermediate generation steps.",
887
- "thumbnailVariant": "zoomHover",
888
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling"
889
- },
890
- {
891
- "name": "latent_upscale_different_prompt_model",
892
- "title": "Latent Upscale Different Prompt Model",
893
- "mediaType": "image",
894
- "mediaSubtype": "webp",
895
- "description": "Upscale images while changing prompts across generation passes.",
896
- "thumbnailVariant": "zoomHover",
897
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples"
898
- }
899
- ]
900
- },
901
- {
902
- "moduleName": "default",
903
- "title": "ControlNet",
904
- "type": "image",
905
- "templates": [
906
- {
907
- "name": "controlnet_example",
908
- "title": "Scribble ControlNet",
909
- "mediaType": "image",
910
- "mediaSubtype": "webp",
911
- "description": "Generate images guided by scribble reference images using ControlNet.",
912
- "thumbnailVariant": "hoverDissolve",
913
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/"
914
- },
915
- {
916
- "name": "2_pass_pose_worship",
917
- "title": "Pose ControlNet 2 Pass",
918
- "mediaType": "image",
919
- "mediaSubtype": "webp",
920
- "description": "Generate images guided by pose references using ControlNet.",
921
- "thumbnailVariant": "hoverDissolve",
922
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet"
923
- },
924
- {
925
- "name": "depth_controlnet",
926
- "title": "Depth ControlNet",
927
- "mediaType": "image",
928
- "mediaSubtype": "webp",
929
- "description": "Generate images guided by depth information using ControlNet.",
930
- "thumbnailVariant": "hoverDissolve",
931
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
932
- },
933
- {
934
- "name": "depth_t2i_adapter",
935
- "title": "Depth T2I Adapter",
936
- "mediaType": "image",
937
- "mediaSubtype": "webp",
938
- "description": "Generate images guided by depth information using T2I adapter.",
939
- "thumbnailVariant": "hoverDissolve",
940
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
941
- },
942
- {
943
- "name": "mixing_controlnets",
944
- "title": "Mixing ControlNets",
945
- "mediaType": "image",
946
- "mediaSubtype": "webp",
947
- "description": "Generate images by combining multiple ControlNet models.",
948
- "thumbnailVariant": "hoverDissolve",
949
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets"
950
- }
951
- ]
952
- },
953
- {
954
- "moduleName": "default",
955
- "title": "Area Composition",
956
- "type": "image",
957
- "templates": [
958
- {
959
- "name": "area_composition",
960
- "title": "Area Composition",
961
- "mediaType": "image",
962
- "mediaSubtype": "webp",
963
- "description": "Generate images by controlling composition with defined areas.",
964
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/"
965
- },
966
- {
967
- "name": "area_composition_reversed",
968
- "title": "Area Composition Reversed",
969
- "mediaType": "image",
970
- "mediaSubtype": "webp",
971
- "description": "Generate images using reverse area composition workflow.",
972
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/"
973
- },
974
- {
975
- "name": "area_composition_square_area_for_subject",
976
- "title": "Area Composition Square Area for Subject",
977
- "mediaType": "image",
978
- "mediaSubtype": "webp",
979
- "description": "Generate images with consistent subject placement using area composition.",
980
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition"
981
- }
982
- ]
983
- },
984
- {
985
- "moduleName": "default",
986
- "title": "3D",
987
- "type": "3d",
988
- "templates": [
989
- {
990
- "name": "3d_hunyuan3d_image_to_model",
991
- "title": "Hunyuan3D 2.0",
992
- "mediaType": "image",
993
- "mediaSubtype": "webp",
994
- "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
995
- "tutorialUrl": ""
996
- },
997
- {
998
- "name": "3d_hunyuan3d_multiview_to_model",
999
- "title": "Hunyuan3D 2.0 MV",
1000
- "mediaType": "image",
1001
- "mediaSubtype": "webp",
1002
- "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
1003
- "tutorialUrl": "",
1004
- "thumbnailVariant": "compareSlider"
1005
- },
1006
- {
1007
- "name": "3d_hunyuan3d_multiview_to_model_turbo",
1008
- "title": "Hunyuan3D 2.0 MV Turbo",
1009
- "mediaType": "image",
1010
- "mediaSubtype": "webp",
1011
- "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
1012
- "tutorialUrl": "",
1013
- "thumbnailVariant": "compareSlider"
1014
- },
1015
- {
1016
- "name": "stable_zero123_example",
1017
- "title": "Stable Zero123",
1018
- "mediaType": "image",
1019
- "mediaSubtype": "webp",
1020
- "description": "Generate 3D views from single images using Stable Zero123.",
1021
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/"
1022
- }
1023
- ]
1024
- },
1025
- {
1026
- "moduleName": "default",
1027
- "title": "Audio",
1028
- "type": "audio",
1029
- "templates": [
1030
- {
1031
- "name": "audio_stable_audio_example",
1032
- "title": "Stable Audio",
1033
- "mediaType": "audio",
1034
- "mediaSubtype": "mp3",
1035
- "description": "Generate audio from text prompts using Stable Audio.",
1036
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/"
1037
- },
1038
- {
1039
- "name": "audio_ace_step_1_t2a_instrumentals",
1040
- "title": "ACE-Step v1 Text to Instrumentals Music",
1041
- "mediaType": "audio",
1042
- "mediaSubtype": "mp3",
1043
- "description": "Generate instrumental music from text prompts using ACE-Step v1.",
1044
- "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
1045
- },
1046
- {
1047
- "name": "audio_ace_step_1_t2a_song",
1048
- "title": "ACE Step v1 Text to Song",
1049
- "mediaType": "audio",
1050
- "mediaSubtype": "mp3",
1051
- "description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
1052
- "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
1053
- },
1054
- {
1055
- "name": "audio_ace_step_1_m2m_editing",
1056
- "title": "ACE Step v1 M2M Editing",
1057
- "mediaType": "audio",
1058
- "mediaSubtype": "mp3",
1059
- "description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
1060
- "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
1061
- }
1062
- ]
1063
- }
1064
- ]