comfyui-workflow-templates 0.1.59__py3-none-any.whl → 0.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (339) hide show
  1. comfyui_workflow_templates/__init__.py +32 -11
  2. comfyui_workflow_templates-0.7.8.dist-info/METADATA +434 -0
  3. comfyui_workflow_templates-0.7.8.dist-info/RECORD +6 -0
  4. comfyui_workflow_templates/templates/2_pass_pose_worship-1.webp +0 -0
  5. comfyui_workflow_templates/templates/2_pass_pose_worship-2.webp +0 -0
  6. comfyui_workflow_templates/templates/2_pass_pose_worship.json +0 -844
  7. comfyui_workflow_templates/templates/3d_hunyuan3d_image_to_model-1.webp +0 -0
  8. comfyui_workflow_templates/templates/3d_hunyuan3d_image_to_model.json +0 -786
  9. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model-1.webp +0 -0
  10. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model-2.webp +0 -0
  11. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model.json +0 -1103
  12. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo-1.webp +0 -0
  13. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo-2.webp +0 -0
  14. comfyui_workflow_templates/templates/3d_hunyuan3d_multiview_to_model_turbo.json +0 -1151
  15. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_max_image-1.webp +0 -0
  16. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_max_image-2.webp +0 -0
  17. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_max_image.json +0 -343
  18. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_multiple_images_input-1.webp +0 -0
  19. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_multiple_images_input-2.webp +0 -0
  20. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_multiple_images_input.json +0 -470
  21. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_pro_image-1.webp +0 -0
  22. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_pro_image-2.webp +0 -0
  23. comfyui_workflow_templates/templates/api_bfl_flux_1_kontext_pro_image.json +0 -343
  24. comfyui_workflow_templates/templates/api_bfl_flux_pro_t2i-1.webp +0 -0
  25. comfyui_workflow_templates/templates/api_bfl_flux_pro_t2i.json +0 -219
  26. comfyui_workflow_templates/templates/api_google_gemini-1.webp +0 -0
  27. comfyui_workflow_templates/templates/api_google_gemini.json +0 -597
  28. comfyui_workflow_templates/templates/api_hailuo_minimax_i2v-1.webp +0 -0
  29. comfyui_workflow_templates/templates/api_hailuo_minimax_i2v.json +0 -164
  30. comfyui_workflow_templates/templates/api_hailuo_minimax_t2v-1.webp +0 -0
  31. comfyui_workflow_templates/templates/api_hailuo_minimax_t2v.json +0 -138
  32. comfyui_workflow_templates/templates/api_ideogram_v3_t2i-1.webp +0 -0
  33. comfyui_workflow_templates/templates/api_ideogram_v3_t2i.json +0 -212
  34. comfyui_workflow_templates/templates/api_kling_effects-1.webp +0 -0
  35. comfyui_workflow_templates/templates/api_kling_effects.json +0 -188
  36. comfyui_workflow_templates/templates/api_kling_flf-1.webp +0 -0
  37. comfyui_workflow_templates/templates/api_kling_flf.json +0 -246
  38. comfyui_workflow_templates/templates/api_kling_i2v-1.webp +0 -0
  39. comfyui_workflow_templates/templates/api_kling_i2v.json +0 -184
  40. comfyui_workflow_templates/templates/api_luma_i2v-1.webp +0 -0
  41. comfyui_workflow_templates/templates/api_luma_i2v.json +0 -351
  42. comfyui_workflow_templates/templates/api_luma_photon_i2i-1.webp +0 -0
  43. comfyui_workflow_templates/templates/api_luma_photon_i2i-2.webp +0 -0
  44. comfyui_workflow_templates/templates/api_luma_photon_i2i.json +0 -185
  45. comfyui_workflow_templates/templates/api_luma_photon_style_ref-1.webp +0 -0
  46. comfyui_workflow_templates/templates/api_luma_photon_style_ref-2.webp +0 -0
  47. comfyui_workflow_templates/templates/api_luma_photon_style_ref.json +0 -974
  48. comfyui_workflow_templates/templates/api_luma_t2v-1.webp +0 -0
  49. comfyui_workflow_templates/templates/api_luma_t2v.json +0 -246
  50. comfyui_workflow_templates/templates/api_moonvalley_image_to_video-1.webp +0 -0
  51. comfyui_workflow_templates/templates/api_moonvalley_image_to_video.json +0 -177
  52. comfyui_workflow_templates/templates/api_moonvalley_text_to_video-1.webp +0 -0
  53. comfyui_workflow_templates/templates/api_moonvalley_text_to_video.json +0 -125
  54. comfyui_workflow_templates/templates/api_moonvalley_video_to_video_motion_transfer-1.webp +0 -0
  55. comfyui_workflow_templates/templates/api_moonvalley_video_to_video_motion_transfer-2.webp +0 -0
  56. comfyui_workflow_templates/templates/api_moonvalley_video_to_video_motion_transfer.json +0 -189
  57. comfyui_workflow_templates/templates/api_moonvalley_video_to_video_pose_control-1.webp +0 -0
  58. comfyui_workflow_templates/templates/api_moonvalley_video_to_video_pose_control-2.webp +0 -0
  59. comfyui_workflow_templates/templates/api_moonvalley_video_to_video_pose_control.json +0 -185
  60. comfyui_workflow_templates/templates/api_openai_chat-1.webp +0 -0
  61. comfyui_workflow_templates/templates/api_openai_chat.json +0 -585
  62. comfyui_workflow_templates/templates/api_openai_dall_e_2_inpaint-1.webp +0 -0
  63. comfyui_workflow_templates/templates/api_openai_dall_e_2_inpaint-2.webp +0 -0
  64. comfyui_workflow_templates/templates/api_openai_dall_e_2_inpaint.json +0 -215
  65. comfyui_workflow_templates/templates/api_openai_dall_e_2_t2i-1.webp +0 -0
  66. comfyui_workflow_templates/templates/api_openai_dall_e_2_t2i.json +0 -160
  67. comfyui_workflow_templates/templates/api_openai_dall_e_3_t2i-1.webp +0 -0
  68. comfyui_workflow_templates/templates/api_openai_dall_e_3_t2i.json +0 -148
  69. comfyui_workflow_templates/templates/api_openai_image_1_i2i-1.webp +0 -0
  70. comfyui_workflow_templates/templates/api_openai_image_1_i2i-2.webp +0 -0
  71. comfyui_workflow_templates/templates/api_openai_image_1_i2i.json +0 -207
  72. comfyui_workflow_templates/templates/api_openai_image_1_inpaint-1.webp +0 -0
  73. comfyui_workflow_templates/templates/api_openai_image_1_inpaint-2.webp +0 -0
  74. comfyui_workflow_templates/templates/api_openai_image_1_inpaint.json +0 -217
  75. comfyui_workflow_templates/templates/api_openai_image_1_multi_inputs-1.webp +0 -0
  76. comfyui_workflow_templates/templates/api_openai_image_1_multi_inputs-2.webp +0 -0
  77. comfyui_workflow_templates/templates/api_openai_image_1_multi_inputs.json +0 -302
  78. comfyui_workflow_templates/templates/api_openai_image_1_t2i-1.webp +0 -0
  79. comfyui_workflow_templates/templates/api_openai_image_1_t2i.json +0 -217
  80. comfyui_workflow_templates/templates/api_pika_i2v-1.webp +0 -0
  81. comfyui_workflow_templates/templates/api_pika_i2v.json +0 -199
  82. comfyui_workflow_templates/templates/api_pika_scene-1.webp +0 -0
  83. comfyui_workflow_templates/templates/api_pika_scene.json +0 -328
  84. comfyui_workflow_templates/templates/api_pixverse_i2v-1.webp +0 -0
  85. comfyui_workflow_templates/templates/api_pixverse_i2v.json +0 -241
  86. comfyui_workflow_templates/templates/api_pixverse_t2v-1.webp +0 -0
  87. comfyui_workflow_templates/templates/api_pixverse_t2v.json +0 -168
  88. comfyui_workflow_templates/templates/api_pixverse_template_i2v-1.webp +0 -0
  89. comfyui_workflow_templates/templates/api_pixverse_template_i2v.json +0 -217
  90. comfyui_workflow_templates/templates/api_recraft_image_gen_with_color_control-1.webp +0 -0
  91. comfyui_workflow_templates/templates/api_recraft_image_gen_with_color_control.json +0 -585
  92. comfyui_workflow_templates/templates/api_recraft_image_gen_with_style_control-1.webp +0 -0
  93. comfyui_workflow_templates/templates/api_recraft_image_gen_with_style_control.json +0 -572
  94. comfyui_workflow_templates/templates/api_recraft_vector_gen-1.webp +0 -0
  95. comfyui_workflow_templates/templates/api_recraft_vector_gen.json +0 -395
  96. comfyui_workflow_templates/templates/api_rodin_image_to_model-1.webp +0 -0
  97. comfyui_workflow_templates/templates/api_rodin_image_to_model-2.webp +0 -0
  98. comfyui_workflow_templates/templates/api_rodin_image_to_model.json +0 -1018
  99. comfyui_workflow_templates/templates/api_rodin_multiview_to_model-1.webp +0 -0
  100. comfyui_workflow_templates/templates/api_rodin_multiview_to_model-2.webp +0 -0
  101. comfyui_workflow_templates/templates/api_rodin_multiview_to_model.json +0 -768
  102. comfyui_workflow_templates/templates/api_runway_first_last_frame-1.webp +0 -0
  103. comfyui_workflow_templates/templates/api_runway_first_last_frame.json +0 -224
  104. comfyui_workflow_templates/templates/api_runway_gen3a_turbo_image_to_video-1.webp +0 -0
  105. comfyui_workflow_templates/templates/api_runway_gen3a_turbo_image_to_video.json +0 -175
  106. comfyui_workflow_templates/templates/api_runway_gen4_turo_image_to_video-1.webp +0 -0
  107. comfyui_workflow_templates/templates/api_runway_gen4_turo_image_to_video.json +0 -175
  108. comfyui_workflow_templates/templates/api_runway_reference_to_image-1.webp +0 -0
  109. comfyui_workflow_templates/templates/api_runway_reference_to_image-2.webp +0 -0
  110. comfyui_workflow_templates/templates/api_runway_reference_to_image.json +0 -192
  111. comfyui_workflow_templates/templates/api_runway_text_to_image-1.webp +0 -0
  112. comfyui_workflow_templates/templates/api_runway_text_to_image.json +0 -191
  113. comfyui_workflow_templates/templates/api_stability_ai_i2i-1.webp +0 -0
  114. comfyui_workflow_templates/templates/api_stability_ai_i2i-2.webp +0 -0
  115. comfyui_workflow_templates/templates/api_stability_ai_i2i.json +0 -192
  116. comfyui_workflow_templates/templates/api_stability_ai_sd3.5_i2i-1.webp +0 -0
  117. comfyui_workflow_templates/templates/api_stability_ai_sd3.5_i2i-2.webp +0 -0
  118. comfyui_workflow_templates/templates/api_stability_ai_sd3.5_i2i.json +0 -241
  119. comfyui_workflow_templates/templates/api_stability_ai_sd3.5_t2i-1.webp +0 -0
  120. comfyui_workflow_templates/templates/api_stability_ai_sd3.5_t2i.json +0 -231
  121. comfyui_workflow_templates/templates/api_stability_ai_stable_image_ultra_t2i-1.webp +0 -0
  122. comfyui_workflow_templates/templates/api_stability_ai_stable_image_ultra_t2i.json +0 -176
  123. comfyui_workflow_templates/templates/api_tripo_image_to_model-1.webp +0 -0
  124. comfyui_workflow_templates/templates/api_tripo_image_to_model-2.webp +0 -0
  125. comfyui_workflow_templates/templates/api_tripo_image_to_model.json +0 -523
  126. comfyui_workflow_templates/templates/api_tripo_multiview_to_model-1.webp +0 -0
  127. comfyui_workflow_templates/templates/api_tripo_multiview_to_model-2.webp +0 -0
  128. comfyui_workflow_templates/templates/api_tripo_multiview_to_model.json +0 -933
  129. comfyui_workflow_templates/templates/api_tripo_text_to_model-1.webp +0 -0
  130. comfyui_workflow_templates/templates/api_tripo_text_to_model.json +0 -703
  131. comfyui_workflow_templates/templates/api_veo2_i2v-1.webp +0 -0
  132. comfyui_workflow_templates/templates/api_veo2_i2v.json +0 -176
  133. comfyui_workflow_templates/templates/area_composition-1.webp +0 -0
  134. comfyui_workflow_templates/templates/area_composition.json +0 -1605
  135. comfyui_workflow_templates/templates/area_composition_square_area_for_subject-1.webp +0 -0
  136. comfyui_workflow_templates/templates/area_composition_square_area_for_subject.json +0 -1113
  137. comfyui_workflow_templates/templates/audio_ace_step_1_m2m_editing-1.mp3 +0 -0
  138. comfyui_workflow_templates/templates/audio_ace_step_1_m2m_editing.json +0 -865
  139. comfyui_workflow_templates/templates/audio_ace_step_1_t2a_instrumentals-1.mp3 +0 -0
  140. comfyui_workflow_templates/templates/audio_ace_step_1_t2a_instrumentals.json +0 -841
  141. comfyui_workflow_templates/templates/audio_ace_step_1_t2a_song-1.mp3 +0 -0
  142. comfyui_workflow_templates/templates/audio_ace_step_1_t2a_song.json +0 -841
  143. comfyui_workflow_templates/templates/audio_stable_audio_example-1.mp3 +0 -0
  144. comfyui_workflow_templates/templates/audio_stable_audio_example.json +0 -457
  145. comfyui_workflow_templates/templates/controlnet_example-1.webp +0 -0
  146. comfyui_workflow_templates/templates/controlnet_example-2.webp +0 -0
  147. comfyui_workflow_templates/templates/controlnet_example.json +0 -860
  148. comfyui_workflow_templates/templates/default-1.webp +0 -0
  149. comfyui_workflow_templates/templates/default.json +0 -357
  150. comfyui_workflow_templates/templates/depth_controlnet-1.webp +0 -0
  151. comfyui_workflow_templates/templates/depth_controlnet-2.webp +0 -0
  152. comfyui_workflow_templates/templates/depth_controlnet.json +0 -421
  153. comfyui_workflow_templates/templates/depth_t2i_adapter-1.webp +0 -0
  154. comfyui_workflow_templates/templates/depth_t2i_adapter-2.webp +0 -0
  155. comfyui_workflow_templates/templates/depth_t2i_adapter.json +0 -422
  156. comfyui_workflow_templates/templates/embedding_example-1.webp +0 -0
  157. comfyui_workflow_templates/templates/embedding_example.json +0 -267
  158. comfyui_workflow_templates/templates/esrgan_example-1.webp +0 -0
  159. comfyui_workflow_templates/templates/esrgan_example-2.webp +0 -0
  160. comfyui_workflow_templates/templates/esrgan_example.json +0 -641
  161. comfyui_workflow_templates/templates/flux1_krea_dev-1.webp +0 -0
  162. comfyui_workflow_templates/templates/flux1_krea_dev.json +0 -543
  163. comfyui_workflow_templates/templates/flux_canny_model_example-1.webp +0 -0
  164. comfyui_workflow_templates/templates/flux_canny_model_example-2.webp +0 -0
  165. comfyui_workflow_templates/templates/flux_canny_model_example.json +0 -478
  166. comfyui_workflow_templates/templates/flux_depth_lora_example-1.webp +0 -0
  167. comfyui_workflow_templates/templates/flux_depth_lora_example-2.webp +0 -0
  168. comfyui_workflow_templates/templates/flux_depth_lora_example.json +0 -717
  169. comfyui_workflow_templates/templates/flux_dev_checkpoint_example-1.webp +0 -0
  170. comfyui_workflow_templates/templates/flux_dev_checkpoint_example.json +0 -332
  171. comfyui_workflow_templates/templates/flux_dev_full_text_to_image-1.webp +0 -0
  172. comfyui_workflow_templates/templates/flux_dev_full_text_to_image.json +0 -552
  173. comfyui_workflow_templates/templates/flux_fill_inpaint_example-1.webp +0 -0
  174. comfyui_workflow_templates/templates/flux_fill_inpaint_example-2.webp +0 -0
  175. comfyui_workflow_templates/templates/flux_fill_inpaint_example.json +0 -462
  176. comfyui_workflow_templates/templates/flux_fill_outpaint_example-1.webp +0 -0
  177. comfyui_workflow_templates/templates/flux_fill_outpaint_example-2.webp +0 -0
  178. comfyui_workflow_templates/templates/flux_fill_outpaint_example.json +0 -495
  179. comfyui_workflow_templates/templates/flux_kontext_dev_basic-1.webp +0 -0
  180. comfyui_workflow_templates/templates/flux_kontext_dev_basic-2.webp +0 -0
  181. comfyui_workflow_templates/templates/flux_kontext_dev_basic.json +0 -1138
  182. comfyui_workflow_templates/templates/flux_kontext_dev_grouped-1.webp +0 -0
  183. comfyui_workflow_templates/templates/flux_kontext_dev_grouped-2.webp +0 -0
  184. comfyui_workflow_templates/templates/flux_kontext_dev_grouped.json +0 -1456
  185. comfyui_workflow_templates/templates/flux_redux_model_example-1.webp +0 -0
  186. comfyui_workflow_templates/templates/flux_redux_model_example.json +0 -1454
  187. comfyui_workflow_templates/templates/flux_schnell-1.webp +0 -0
  188. comfyui_workflow_templates/templates/flux_schnell.json +0 -302
  189. comfyui_workflow_templates/templates/flux_schnell_full_text_to_image-1.webp +0 -0
  190. comfyui_workflow_templates/templates/flux_schnell_full_text_to_image.json +0 -552
  191. comfyui_workflow_templates/templates/gligen_textbox_example-1.webp +0 -0
  192. comfyui_workflow_templates/templates/gligen_textbox_example.json +0 -626
  193. comfyui_workflow_templates/templates/hidream_e1_1-1.webp +0 -0
  194. comfyui_workflow_templates/templates/hidream_e1_1-2.webp +0 -0
  195. comfyui_workflow_templates/templates/hidream_e1_1.json +0 -1163
  196. comfyui_workflow_templates/templates/hidream_e1_full-1.webp +0 -0
  197. comfyui_workflow_templates/templates/hidream_e1_full-2.webp +0 -0
  198. comfyui_workflow_templates/templates/hidream_e1_full.json +0 -1027
  199. comfyui_workflow_templates/templates/hidream_i1_dev-1.webp +0 -0
  200. comfyui_workflow_templates/templates/hidream_i1_dev.json +0 -532
  201. comfyui_workflow_templates/templates/hidream_i1_fast-1.webp +0 -0
  202. comfyui_workflow_templates/templates/hidream_i1_fast.json +0 -532
  203. comfyui_workflow_templates/templates/hidream_i1_full-1.webp +0 -0
  204. comfyui_workflow_templates/templates/hidream_i1_full.json +0 -532
  205. comfyui_workflow_templates/templates/hiresfix_esrgan_workflow-1.webp +0 -0
  206. comfyui_workflow_templates/templates/hiresfix_esrgan_workflow-2.webp +0 -0
  207. comfyui_workflow_templates/templates/hiresfix_esrgan_workflow.json +0 -1035
  208. comfyui_workflow_templates/templates/hiresfix_latent_workflow-1.webp +0 -0
  209. comfyui_workflow_templates/templates/hiresfix_latent_workflow-2.webp +0 -0
  210. comfyui_workflow_templates/templates/hiresfix_latent_workflow.json +0 -776
  211. comfyui_workflow_templates/templates/hunyuan_video_text_to_video-1.webp +0 -0
  212. comfyui_workflow_templates/templates/hunyuan_video_text_to_video.json +0 -934
  213. comfyui_workflow_templates/templates/image2image-1.webp +0 -0
  214. comfyui_workflow_templates/templates/image2image.json +0 -631
  215. comfyui_workflow_templates/templates/image_chroma_text_to_image-1.webp +0 -0
  216. comfyui_workflow_templates/templates/image_chroma_text_to_image.json +0 -754
  217. comfyui_workflow_templates/templates/image_cosmos_predict2_2B_t2i-1.webp +0 -0
  218. comfyui_workflow_templates/templates/image_cosmos_predict2_2B_t2i.json +0 -544
  219. comfyui_workflow_templates/templates/image_lotus_depth_v1_1-1.webp +0 -0
  220. comfyui_workflow_templates/templates/image_lotus_depth_v1_1-2.webp +0 -0
  221. comfyui_workflow_templates/templates/image_lotus_depth_v1_1.json +0 -802
  222. comfyui_workflow_templates/templates/image_omnigen2_image_edit-1.webp +0 -0
  223. comfyui_workflow_templates/templates/image_omnigen2_image_edit-2.webp +0 -0
  224. comfyui_workflow_templates/templates/image_omnigen2_image_edit.json +0 -1497
  225. comfyui_workflow_templates/templates/image_omnigen2_t2i-1.webp +0 -0
  226. comfyui_workflow_templates/templates/image_omnigen2_t2i.json +0 -774
  227. comfyui_workflow_templates/templates/image_qwen_image-1.webp +0 -0
  228. comfyui_workflow_templates/templates/image_qwen_image.json +0 -844
  229. comfyui_workflow_templates/templates/image_to_video-1.webp +0 -0
  230. comfyui_workflow_templates/templates/image_to_video.json +0 -543
  231. comfyui_workflow_templates/templates/image_to_video_wan-1.webp +0 -0
  232. comfyui_workflow_templates/templates/image_to_video_wan.json +0 -837
  233. comfyui_workflow_templates/templates/index.es.json +0 -2357
  234. comfyui_workflow_templates/templates/index.fr.json +0 -2357
  235. comfyui_workflow_templates/templates/index.ja.json +0 -2357
  236. comfyui_workflow_templates/templates/index.json +0 -1639
  237. comfyui_workflow_templates/templates/index.ko.json +0 -2357
  238. comfyui_workflow_templates/templates/index.ru.json +0 -2357
  239. comfyui_workflow_templates/templates/index.schema.json +0 -102
  240. comfyui_workflow_templates/templates/index.zh-TW.json +0 -2357
  241. comfyui_workflow_templates/templates/index.zh.json +0 -2357
  242. comfyui_workflow_templates/templates/inpaint_example-1.webp +0 -0
  243. comfyui_workflow_templates/templates/inpaint_example-2.webp +0 -0
  244. comfyui_workflow_templates/templates/inpaint_example.json +0 -649
  245. comfyui_workflow_templates/templates/inpaint_model_outpainting-1.webp +0 -0
  246. comfyui_workflow_templates/templates/inpaint_model_outpainting-2.webp +0 -0
  247. comfyui_workflow_templates/templates/inpaint_model_outpainting.json +0 -707
  248. comfyui_workflow_templates/templates/latent_upscale_different_prompt_model-1.webp +0 -0
  249. comfyui_workflow_templates/templates/latent_upscale_different_prompt_model.json +0 -935
  250. comfyui_workflow_templates/templates/lora-1.webp +0 -0
  251. comfyui_workflow_templates/templates/lora.json +0 -610
  252. comfyui_workflow_templates/templates/lora_multiple-1.webp +0 -0
  253. comfyui_workflow_templates/templates/lora_multiple.json +0 -409
  254. comfyui_workflow_templates/templates/ltxv_image_to_video-1.webp +0 -0
  255. comfyui_workflow_templates/templates/ltxv_image_to_video.json +0 -886
  256. comfyui_workflow_templates/templates/ltxv_text_to_video-1.webp +0 -0
  257. comfyui_workflow_templates/templates/ltxv_text_to_video.json +0 -765
  258. comfyui_workflow_templates/templates/mixing_controlnets-1.webp +0 -0
  259. comfyui_workflow_templates/templates/mixing_controlnets-2.webp +0 -0
  260. comfyui_workflow_templates/templates/mixing_controlnets.json +0 -864
  261. comfyui_workflow_templates/templates/mochi_text_to_video_example-1.webp +0 -0
  262. comfyui_workflow_templates/templates/mochi_text_to_video_example.json +0 -575
  263. comfyui_workflow_templates/templates/sd3.5_large_blur-1.webp +0 -0
  264. comfyui_workflow_templates/templates/sd3.5_large_blur-2.webp +0 -0
  265. comfyui_workflow_templates/templates/sd3.5_large_blur.json +0 -685
  266. comfyui_workflow_templates/templates/sd3.5_large_canny_controlnet_example-1.webp +0 -0
  267. comfyui_workflow_templates/templates/sd3.5_large_canny_controlnet_example-2.webp +0 -0
  268. comfyui_workflow_templates/templates/sd3.5_large_canny_controlnet_example.json +0 -472
  269. comfyui_workflow_templates/templates/sd3.5_large_depth-1.webp +0 -0
  270. comfyui_workflow_templates/templates/sd3.5_large_depth-2.webp +0 -0
  271. comfyui_workflow_templates/templates/sd3.5_large_depth.json +0 -675
  272. comfyui_workflow_templates/templates/sd3.5_simple_example-1.webp +0 -0
  273. comfyui_workflow_templates/templates/sd3.5_simple_example.json +0 -278
  274. comfyui_workflow_templates/templates/sdxl_refiner_prompt_example-1.webp +0 -0
  275. comfyui_workflow_templates/templates/sdxl_refiner_prompt_example.json +0 -758
  276. comfyui_workflow_templates/templates/sdxl_revision_text_prompts-1.webp +0 -0
  277. comfyui_workflow_templates/templates/sdxl_revision_text_prompts.json +0 -492
  278. comfyui_workflow_templates/templates/sdxl_revision_zero_positive-1.webp +0 -0
  279. comfyui_workflow_templates/templates/sdxl_revision_zero_positive.json +0 -496
  280. comfyui_workflow_templates/templates/sdxl_simple_example-1.webp +0 -0
  281. comfyui_workflow_templates/templates/sdxl_simple_example.json +0 -1346
  282. comfyui_workflow_templates/templates/sdxlturbo_example-1.webp +0 -0
  283. comfyui_workflow_templates/templates/sdxlturbo_example.json +0 -372
  284. comfyui_workflow_templates/templates/stable_zero123_example-1.webp +0 -0
  285. comfyui_workflow_templates/templates/stable_zero123_example.json +0 -273
  286. comfyui_workflow_templates/templates/text_to_video_wan-1.webp +0 -0
  287. comfyui_workflow_templates/templates/text_to_video_wan.json +0 -601
  288. comfyui_workflow_templates/templates/txt_to_image_to_video-1.webp +0 -0
  289. comfyui_workflow_templates/templates/txt_to_image_to_video.json +0 -870
  290. comfyui_workflow_templates/templates/video_cosmos_predict2_2B_video2world_480p_16fps-1.webp +0 -0
  291. comfyui_workflow_templates/templates/video_cosmos_predict2_2B_video2world_480p_16fps.json +0 -724
  292. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_1.3B-1.webp +0 -0
  293. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_1.3B.json +0 -1030
  294. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_14B-1.webp +0 -0
  295. comfyui_workflow_templates/templates/video_wan2.1_fun_camera_v1.1_14B.json +0 -1063
  296. comfyui_workflow_templates/templates/video_wan2_2_14B_flf2v-1.webp +0 -0
  297. comfyui_workflow_templates/templates/video_wan2_2_14B_flf2v-2.webp +0 -0
  298. comfyui_workflow_templates/templates/video_wan2_2_14B_flf2v.json +0 -2629
  299. comfyui_workflow_templates/templates/video_wan2_2_14B_fun_control-1.webp +0 -0
  300. comfyui_workflow_templates/templates/video_wan2_2_14B_fun_control.json +0 -2837
  301. comfyui_workflow_templates/templates/video_wan2_2_14B_fun_inpaint-1.webp +0 -0
  302. comfyui_workflow_templates/templates/video_wan2_2_14B_fun_inpaint.json +0 -2549
  303. comfyui_workflow_templates/templates/video_wan2_2_14B_i2v-1.webp +0 -0
  304. comfyui_workflow_templates/templates/video_wan2_2_14B_i2v-2.webp +0 -0
  305. comfyui_workflow_templates/templates/video_wan2_2_14B_i2v.json +0 -2114
  306. comfyui_workflow_templates/templates/video_wan2_2_14B_t2v-1.webp +0 -0
  307. comfyui_workflow_templates/templates/video_wan2_2_14B_t2v.json +0 -1852
  308. comfyui_workflow_templates/templates/video_wan2_2_5B_ti2v-1.webp +0 -0
  309. comfyui_workflow_templates/templates/video_wan2_2_5B_ti2v.json +0 -733
  310. comfyui_workflow_templates/templates/video_wan_ati-1.webp +0 -0
  311. comfyui_workflow_templates/templates/video_wan_ati-2.webp +0 -0
  312. comfyui_workflow_templates/templates/video_wan_ati.json +0 -1070
  313. comfyui_workflow_templates/templates/video_wan_vace_14B_ref2v-1.webp +0 -0
  314. comfyui_workflow_templates/templates/video_wan_vace_14B_ref2v.json +0 -2165
  315. comfyui_workflow_templates/templates/video_wan_vace_14B_t2v-1.webp +0 -0
  316. comfyui_workflow_templates/templates/video_wan_vace_14B_t2v.json +0 -1242
  317. comfyui_workflow_templates/templates/video_wan_vace_14B_v2v-1.webp +0 -0
  318. comfyui_workflow_templates/templates/video_wan_vace_14B_v2v-2.webp +0 -0
  319. comfyui_workflow_templates/templates/video_wan_vace_14B_v2v.json +0 -1538
  320. comfyui_workflow_templates/templates/video_wan_vace_flf2v-1.webp +0 -0
  321. comfyui_workflow_templates/templates/video_wan_vace_flf2v.json +0 -2272
  322. comfyui_workflow_templates/templates/video_wan_vace_inpainting-1.webp +0 -0
  323. comfyui_workflow_templates/templates/video_wan_vace_inpainting-2.webp +0 -0
  324. comfyui_workflow_templates/templates/video_wan_vace_inpainting.json +0 -2113
  325. comfyui_workflow_templates/templates/video_wan_vace_outpainting-1.webp +0 -0
  326. comfyui_workflow_templates/templates/video_wan_vace_outpainting-2.webp +0 -0
  327. comfyui_workflow_templates/templates/video_wan_vace_outpainting.json +0 -1913
  328. comfyui_workflow_templates/templates/wan2.1_flf2v_720_f16-1.webp +0 -0
  329. comfyui_workflow_templates/templates/wan2.1_flf2v_720_f16.json +0 -1046
  330. comfyui_workflow_templates/templates/wan2.1_fun_control-1.webp +0 -0
  331. comfyui_workflow_templates/templates/wan2.1_fun_control-2.webp +0 -0
  332. comfyui_workflow_templates/templates/wan2.1_fun_control.json +0 -1215
  333. comfyui_workflow_templates/templates/wan2.1_fun_inp-1.webp +0 -0
  334. comfyui_workflow_templates/templates/wan2.1_fun_inp.json +0 -1116
  335. comfyui_workflow_templates-0.1.59.dist-info/METADATA +0 -1060
  336. comfyui_workflow_templates-0.1.59.dist-info/RECORD +0 -337
  337. {comfyui_workflow_templates-0.1.59.dist-info → comfyui_workflow_templates-0.7.8.dist-info}/WHEEL +0 -0
  338. {comfyui_workflow_templates-0.1.59.dist-info → comfyui_workflow_templates-0.7.8.dist-info}/licenses/LICENSE +0 -0
  339. {comfyui_workflow_templates-0.1.59.dist-info → comfyui_workflow_templates-0.7.8.dist-info}/top_level.txt +0 -0
@@ -1,1639 +0,0 @@
1
- [
2
- {
3
- "moduleName": "default",
4
- "category": "USE CASES",
5
- "title": "Basics",
6
- "type": "image",
7
- "templates": [
8
- {
9
- "name": "default",
10
- "title": "Image Generation",
11
- "mediaType": "image",
12
- "mediaSubtype": "webp",
13
- "description": "Generate images from text prompts.",
14
- "tutorialUrl": "https://docs.comfy.org/tutorials/basic/text-to-image",
15
- "tags": ["Text to Image", "Image"],
16
- "models": ["SD1.5"],
17
- "date": "2025-03-01"
18
- },
19
- {
20
- "name": "image2image",
21
- "title": "Image to Image",
22
- "mediaType": "image",
23
- "mediaSubtype": "webp",
24
- "description": "Transform existing images using text prompts.",
25
- "tutorialUrl": "https://docs.comfy.org/tutorials/basic/image-to-image",
26
- "tags": ["Image to Image", "Image"],
27
- "models": ["SD1.5"],
28
- "date": "2025-03-01"
29
- },
30
- {
31
- "name": "lora",
32
- "title": "LoRA",
33
- "mediaType": "image",
34
- "mediaSubtype": "webp",
35
- "description": "Generate images with LoRA models for specialized styles or subjects.",
36
- "tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
37
- "tags": ["Text to Image", "Image"],
38
- "models": ["SD1.5"],
39
- "date": "2025-03-01"
40
- },
41
- {
42
- "name": "lora_multiple",
43
- "title": "LoRA Multiple",
44
- "mediaType": "image",
45
- "mediaSubtype": "webp",
46
- "description": "Generate images by combining multiple LoRA models.",
47
- "tutorialUrl": "https://docs.comfy.org/tutorials/basic/lora",
48
- "tags": ["Text to Image", "Image", "LoRA"],
49
- "models": ["SD1.5"],
50
- "date": "2025-03-01"
51
- },
52
- {
53
- "name": "inpaint_example",
54
- "title": "Inpaint",
55
- "mediaType": "image",
56
- "mediaSubtype": "webp",
57
- "description": "Edit specific parts of images seamlessly.",
58
- "thumbnailVariant": "compareSlider",
59
- "tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
60
- "tags": ["Inpaint", "Image"],
61
- "models": ["SD1.5"],
62
- "date": "2025-03-01"
63
- },
64
- {
65
- "name": "inpaint_model_outpainting",
66
- "title": "Outpaint",
67
- "mediaType": "image",
68
- "mediaSubtype": "webp",
69
- "description": "Extend images beyond their original boundaries.",
70
- "thumbnailVariant": "compareSlider",
71
- "tutorialUrl": "https://docs.comfy.org/tutorials/basic/inpaint",
72
- "tags": ["Outpaint", "Image"],
73
- "models": ["SD1.5"],
74
- "date": "2025-03-01"
75
- },
76
- {
77
- "name": "embedding_example",
78
- "title": "Embedding",
79
- "mediaType": "image",
80
- "mediaSubtype": "webp",
81
- "description": "Generate images using textual inversion for consistent styles.",
82
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/",
83
- "tags": ["Embedding", "Image"],
84
- "models": ["SD1.5"],
85
- "date": "2025-03-01"
86
- },
87
- {
88
- "name": "gligen_textbox_example",
89
- "title": "Gligen Textbox",
90
- "mediaType": "image",
91
- "mediaSubtype": "webp",
92
- "description": "Generate images with precise object placement using text boxes.",
93
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/gligen/",
94
- "tags": ["Gligen", "Image"],
95
- "models": ["SD1.5"],
96
- "date": "2025-03-01"
97
- }
98
- ]
99
- },
100
- {
101
- "moduleName": "default",
102
- "category": "USE CASES",
103
- "title": "Flux",
104
- "type": "image",
105
- "templates": [
106
- {
107
- "name": "image_chroma_text_to_image",
108
- "title": "Chroma text to image",
109
- "mediaType": "image",
110
- "mediaSubtype": "webp",
111
- "description": "Chroma is modified from flux and has some changes in the architecture.",
112
- "tags": ["Text to Image", "Image"],
113
- "models": ["Chroma", "Flux"],
114
- "date": "2025-06-04"
115
- },
116
- {
117
- "name": "flux_kontext_dev_basic",
118
- "title": "Flux Kontext Dev(Basic)",
119
- "mediaType": "image",
120
- "mediaSubtype": "webp",
121
- "thumbnailVariant": "hoverDissolve",
122
- "description": "Edit image using Flux Kontext with full node visibility, perfect for learning the workflow.",
123
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
124
- "tags": ["Image Edit", "Image to Image"],
125
- "models": ["Flux"],
126
- "date": "2025-06-26"
127
- },
128
- {
129
- "name": "flux_kontext_dev_grouped",
130
- "title": "Flux Kontext Dev(Grouped)",
131
- "mediaType": "image",
132
- "mediaSubtype": "webp",
133
- "thumbnailVariant": "hoverDissolve",
134
- "description": "Streamlined version of Flux Kontext with grouped nodes for cleaner workspace.",
135
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-kontext-dev",
136
- "tags": ["Image Edit", "Image to Image"],
137
- "models": ["Flux"],
138
- "date": "2025-06-26"
139
- },
140
- {
141
- "name": "flux_dev_checkpoint_example",
142
- "title": "Flux Dev fp8",
143
- "mediaType": "image",
144
- "mediaSubtype": "webp",
145
- "description": "Generate images using Flux Dev fp8 quantized version. Suitable for devices with limited VRAM, requires only one model file, but image quality is slightly lower than the full version.",
146
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
147
- "tags": ["Text to Image", "Image"],
148
- "models": ["Flux"],
149
- "date": "2025-03-01"
150
- },
151
- {
152
- "name": "flux_schnell",
153
- "title": "Flux Schnell fp8",
154
- "mediaType": "image",
155
- "mediaSubtype": "webp",
156
- "description": "Quickly generate images with Flux Schnell fp8 quantized version. Ideal for low-end hardware, requires only 4 steps to generate images.",
157
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
158
- "tags": ["Text to Image", "Image"],
159
- "models": ["Flux"],
160
- "date": "2025-03-01"
161
- },
162
- {
163
- "name": "flux1_krea_dev",
164
- "title": "Flux.1 Krea Dev",
165
- "mediaType": "image",
166
- "mediaSubtype": "webp",
167
- "description": "A fine-tuned FLUX model pushing photorealism to the max",
168
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux1-krea-dev",
169
- "tags": ["Text to Image", "Image", "Photorealism"],
170
- "models": ["Flux.1 Krea Dev"],
171
- "date": "2025-07-31"
172
- },
173
- {
174
- "name": "flux_dev_full_text_to_image",
175
- "title": "Flux Dev full text to image",
176
- "mediaType": "image",
177
- "mediaSubtype": "webp",
178
- "description": "Generate high-quality images with Flux Dev full version. Requires larger VRAM and multiple model files, but provides the best prompt following capability and image quality.",
179
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
180
- "tags": ["Text to Image", "Image"],
181
- "models": ["Flux"],
182
- "date": "2025-03-01"
183
- },
184
- {
185
- "name": "flux_schnell_full_text_to_image",
186
- "title": "Flux Schnell full text to image",
187
- "mediaType": "image",
188
- "mediaSubtype": "webp",
189
- "description": "Generate images quickly with Flux Schnell full version. Uses Apache2.0 license, requires only 4 steps to generate images while maintaining good image quality.",
190
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-text-to-image",
191
- "tags": ["Text to Image", "Image"],
192
- "models": ["Flux"],
193
- "date": "2025-03-01"
194
- },
195
- {
196
- "name": "flux_fill_inpaint_example",
197
- "title": "Flux Inpaint",
198
- "mediaType": "image",
199
- "mediaSubtype": "webp",
200
- "description": "Fill missing parts of images using Flux inpainting.",
201
- "thumbnailVariant": "compareSlider",
202
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
203
- "tags": ["Image to Image", "Inpaint", "Image"],
204
- "models": ["Flux"],
205
- "date": "2025-03-01"
206
- },
207
- {
208
- "name": "flux_fill_outpaint_example",
209
- "title": "Flux Outpaint",
210
- "mediaType": "image",
211
- "mediaSubtype": "webp",
212
- "description": "Extend images beyond boundaries using Flux outpainting.",
213
- "thumbnailVariant": "compareSlider",
214
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-fill-dev",
215
- "tags": ["Outpaint", "Image", "Image to Image"],
216
- "models": ["Flux"],
217
- "date": "2025-03-01"
218
- },
219
- {
220
- "name": "flux_canny_model_example",
221
- "title": "Flux Canny Model",
222
- "mediaType": "image",
223
- "mediaSubtype": "webp",
224
- "description": "Generate images guided by edge detection using Flux Canny.",
225
- "thumbnailVariant": "hoverDissolve",
226
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
227
- "tags": ["Image to Image", "ControlNet", "Image"],
228
- "models": ["Flux"],
229
- "date": "2025-03-01"
230
- },
231
- {
232
- "name": "flux_depth_lora_example",
233
- "title": "Flux Depth Lora",
234
- "mediaType": "image",
235
- "mediaSubtype": "webp",
236
- "description": "Generate images guided by depth information using Flux LoRA.",
237
- "thumbnailVariant": "hoverDissolve",
238
- "tutorialUrl": "ttps://docs.comfy.org/tutorials/flux/flux-1-controlnet",
239
- "tags": ["Image to Image", "ControlNet", "Image", "LoRA"],
240
- "models": ["Flux"],
241
- "date": "2025-03-01"
242
- },
243
- {
244
- "name": "flux_redux_model_example",
245
- "title": "Flux Redux Model",
246
- "mediaType": "image",
247
- "mediaSubtype": "webp",
248
- "description": "Generate images by transferring style from reference images using Flux Redux.",
249
- "tutorialUrl": "https://docs.comfy.org/tutorials/flux/flux-1-controlnet",
250
- "tags": ["Image to Image", "ControlNet", "Image", "LoRA"],
251
- "models": ["Flux"],
252
- "date": "2025-03-01"
253
- }
254
- ]
255
- },
256
- {
257
- "moduleName": "default",
258
- "category": "USE CASES",
259
- "title": "Image",
260
- "type": "image",
261
- "templates": [
262
- {
263
- "name": "image_qwen_image",
264
- "title": "Qwen-Image Text to Image",
265
- "mediaType": "image",
266
- "mediaSubtype": "webp",
267
- "description": "Generate images with exceptional multilingual text rendering and editing capabilities using Qwen-Image's 20B MMDiT model..",
268
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/qwen/qwen-image",
269
- "tags": ["Text to Image", "Image"],
270
- "models": ["Qwen-Image"],
271
- "date": "2025-08-05"
272
- },
273
- {
274
- "name": "image_omnigen2_t2i",
275
- "title": "OmniGen2 Text to Image",
276
- "mediaType": "image",
277
- "mediaSubtype": "webp",
278
- "description": "Generate high-quality images from text prompts using OmniGen2's unified 7B multimodal model with dual-path architecture.",
279
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
280
- "tags": ["Text to Image", "Image"],
281
- "models": ["OmniGen"],
282
- "date": "2025-06-30"
283
- },
284
- {
285
- "name": "image_omnigen2_image_edit",
286
- "title": "OmniGen2 Image Edit",
287
- "mediaType": "image",
288
- "mediaSubtype": "webp",
289
- "thumbnailVariant": "hoverDissolve",
290
- "description": "Edit images with natural language instructions using OmniGen2's advanced image editing capabilities and text rendering support.",
291
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/omnigen/omnigen2",
292
- "tags": ["Image Edit", "Image"],
293
- "models": ["OmniGen"],
294
- "date": "2025-06-30"
295
- },
296
- {
297
- "name": "image_cosmos_predict2_2B_t2i",
298
- "title": "Cosmos Predict2 2B T2I",
299
- "mediaType": "image",
300
- "mediaSubtype": "webp",
301
- "description": "Generate images with Cosmos-Predict2 2B T2I, delivering physically accurate, high-fidelity, and detail-rich image generation.",
302
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/cosmos/cosmos-predict2-t2i",
303
- "tags": ["Text to Image", "Image"],
304
- "models": ["Cosmos"],
305
- "date": "2025-06-16"
306
- },
307
- {
308
- "name": "hidream_i1_dev",
309
- "title": "HiDream I1 Dev",
310
- "mediaType": "image",
311
- "mediaSubtype": "webp",
312
- "description": "Generate images with HiDream I1 Dev - Balanced version with 28 inference steps, suitable for medium-range hardware.",
313
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
314
- "tags": ["Text to Image", "Image"],
315
- "models": ["HiDream"],
316
- "date": "2025-04-17"
317
- },
318
- {
319
- "name": "hidream_i1_fast",
320
- "title": "HiDream I1 Fast",
321
- "mediaType": "image",
322
- "mediaSubtype": "webp",
323
- "description": "Generate images quickly with HiDream I1 Fast - Lightweight version with 16 inference steps, ideal for rapid previews on lower-end hardware.",
324
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
325
- "tags": ["Text to Image", "Image"],
326
- "models": ["HiDream"],
327
- "date": "2025-04-17"
328
- },
329
- {
330
- "name": "hidream_i1_full",
331
- "title": "HiDream I1 Full",
332
- "mediaType": "image",
333
- "mediaSubtype": "webp",
334
- "description": "Generate images with HiDream I1 Full - Complete version with 50 inference steps for highest quality output.",
335
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-i1",
336
- "tags": ["Text to Image", "Image"],
337
- "models": ["HiDream"],
338
- "date": "2025-04-17"
339
- },
340
- {
341
- "name": "hidream_e1_1",
342
- "title": "HiDream E1.1 Image Edit",
343
- "mediaType": "image",
344
- "mediaSubtype": "webp",
345
- "thumbnailVariant": "compareSlider",
346
- "description": "Edit images with HiDream E1.1 – it’s better in image quality and editing accuracy than HiDream-E1-Full.",
347
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
348
- "tags": ["Image Edit", "Image"],
349
- "models": ["HiDream"],
350
- "date": "2025-07-21"
351
- },
352
- {
353
- "name": "hidream_e1_full",
354
- "title": "HiDream E1 Image Edit",
355
- "mediaType": "image",
356
- "mediaSubtype": "webp",
357
- "thumbnailVariant": "compareSlider",
358
- "description": "Edit images with HiDream E1 - Professional natural language image editing model.",
359
- "tutorialUrl": "https://docs.comfy.org/tutorials/image/hidream/hidream-e1",
360
- "tags": ["Image Edit", "Image"],
361
- "models": ["HiDream"],
362
- "date": "2025-05-01"
363
- },
364
- {
365
- "name": "sd3.5_simple_example",
366
- "title": "SD3.5 Simple",
367
- "mediaType": "image",
368
- "mediaSubtype": "webp",
369
- "description": "Generate images using SD 3.5.",
370
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35",
371
- "tags": ["Text to Image", "Image"],
372
- "models": ["SD3.5"],
373
- "date": "2025-03-01"
374
- },
375
- {
376
- "name": "sd3.5_large_canny_controlnet_example",
377
- "title": "SD3.5 Large Canny ControlNet",
378
- "mediaType": "image",
379
- "mediaSubtype": "webp",
380
- "description": "Generate images guided by edge detection using SD 3.5 Canny ControlNet.",
381
- "thumbnailVariant": "hoverDissolve",
382
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
383
- "tags": ["Image to Image", "Image", "ControlNet"],
384
- "models": ["SD3.5"],
385
- "date": "2025-03-01"
386
- },
387
- {
388
- "name": "sd3.5_large_depth",
389
- "title": "SD3.5 Large Depth",
390
- "mediaType": "image",
391
- "mediaSubtype": "webp",
392
- "description": "Generate images guided by depth information using SD 3.5.",
393
- "thumbnailVariant": "hoverDissolve",
394
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
395
- "tags": ["Image to Image", "Image", "ControlNet"],
396
- "models": ["SD3.5"],
397
- "date": "2025-03-01"
398
- },
399
- {
400
- "name": "sd3.5_large_blur",
401
- "title": "SD3.5 Large Blur",
402
- "mediaType": "image",
403
- "mediaSubtype": "webp",
404
- "description": "Generate images guided by blurred reference images using SD 3.5.",
405
- "thumbnailVariant": "hoverDissolve",
406
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sd3/#sd35-controlnets",
407
- "tags": ["Image to Image", "Image"],
408
- "models": ["SD3.5"],
409
- "date": "2025-03-01"
410
- },
411
- {
412
- "name": "sdxl_simple_example",
413
- "title": "SDXL Simple",
414
- "mediaType": "image",
415
- "mediaSubtype": "webp",
416
- "description": "Generate high-quality images using SDXL.",
417
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
418
- "tags": ["Text to Image", "Image"],
419
- "models": ["SDXL"],
420
- "date": "2025-03-01"
421
- },
422
- {
423
- "name": "sdxl_refiner_prompt_example",
424
- "title": "SDXL Refiner Prompt",
425
- "mediaType": "image",
426
- "mediaSubtype": "webp",
427
- "description": "Enhance SDXL images using refiner models.",
428
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/",
429
- "tags": ["Text to Image", "Image"],
430
- "models": ["SDXL"],
431
- "date": "2025-03-01"
432
- },
433
- {
434
- "name": "sdxl_revision_text_prompts",
435
- "title": "SDXL Revision Text Prompts",
436
- "mediaType": "image",
437
- "mediaSubtype": "webp",
438
- "description": "Generate images by transferring concepts from reference images using SDXL Revision.",
439
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
440
- "tags": ["Text to Image", "Image"],
441
- "models": ["SDXL"],
442
- "date": "2025-03-01"
443
- },
444
- {
445
- "name": "sdxl_revision_zero_positive",
446
- "title": "SDXL Revision Zero Positive",
447
- "mediaType": "image",
448
- "mediaSubtype": "webp",
449
- "description": "Generate images using both text prompts and reference images with SDXL Revision.",
450
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision",
451
- "tags": ["Text to Image", "Image"],
452
- "models": ["SDXL"],
453
- "date": "2025-03-01"
454
- },
455
- {
456
- "name": "sdxlturbo_example",
457
- "title": "SDXL Turbo",
458
- "mediaType": "image",
459
- "mediaSubtype": "webp",
460
- "description": "Generate images in a single step using SDXL Turbo.",
461
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/",
462
- "tags": ["Text to Image", "Image"],
463
- "models": ["SDXL Turbo"],
464
- "date": "2025-03-01"
465
- },
466
- {
467
- "name": "image_lotus_depth_v1_1",
468
- "title": "Lotus Depth",
469
- "mediaType": "image",
470
- "mediaSubtype": "webp",
471
- "thumbnailVariant": "compareSlider",
472
- "description": "Run Lotus Depth in ComfyUI for zero-shot, efficient monocular depth estimation with high detail retention.",
473
- "tags": ["Depth", "Image"],
474
- "models": ["SD1.5"],
475
- "date": "2025-05-21"
476
- }
477
- ]
478
- },
479
- {
480
- "moduleName": "default",
481
- "category": "USE CASES",
482
- "title": "Video",
483
- "type": "video",
484
- "templates": [
485
- {
486
- "name": "video_wan2_2_14B_t2v",
487
- "title": "Wan 2.2 14B Text to Video",
488
- "description": "Generate high-quality videos from text prompts with cinematic aesthetic control and dynamic motion generation using Wan 2.2.",
489
- "mediaType": "image",
490
- "mediaSubtype": "webp",
491
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
492
- "tags": ["Text to Video", "Video"],
493
- "models": ["Wan"],
494
- "date": "2025-07-29"
495
- },
496
- {
497
- "name": "video_wan2_2_14B_i2v",
498
- "title": "Wan 2.2 14B Image to Video",
499
- "description": "Transform static images into dynamic videos with precise motion control and style preservation using Wan 2.2.",
500
- "mediaType": "image",
501
- "mediaSubtype": "webp",
502
- "thumbnailVariant": "hoverDissolve",
503
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
504
- "tags": ["Image to Video", "Video"],
505
- "models": ["Wan2.2"],
506
- "date": "2025-07-29"
507
- },
508
- {
509
- "name": "video_wan2_2_14B_flf2v",
510
- "title": "Wan 2.2 14B First-Last Frame to Video",
511
- "description": "Generate smooth video transitions by defining start and end frames.",
512
- "mediaType": "image",
513
- "mediaSubtype": "webp",
514
- "thumbnailVariant": "hoverDissolve",
515
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
516
- "tags": ["FLF2V", "Video"],
517
- "models": ["Wan2.2"],
518
- "date": "2025-08-02"
519
- },
520
- {
521
- "name": "video_wan2_2_14B_fun_inpaint",
522
- "title": "Wan 2.2 14B Fun Inp",
523
- "description": "Generate videos from start and end frames using Wan 2.2 Fun Inp.",
524
- "mediaType": "image",
525
- "mediaSubtype": "webp",
526
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
527
- "tags": ["FLF2V", "Video"],
528
- "models": ["Wan2.2"],
529
- "date": "2025-08-12"
530
- },
531
- {
532
- "name": "video_wan2_2_14B_fun_control",
533
- "title": "Wan 2.2 14B Fun Control",
534
- "description": "Generate videos guided by pose, depth, and edge controls using Wan 2.2 Fun Control.",
535
- "mediaType": "image",
536
- "mediaSubtype": "webp",
537
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
538
- "tags": ["Video to Video", "Video"],
539
- "models": ["Wan2.2"],
540
- "date": "2025-08-12"
541
- },
542
- {
543
- "name": "video_wan2_2_5B_ti2v",
544
- "title": "Wan 2.2 5B Video Generation",
545
- "description": "Generate videos from text or images using Wan 2.2 5B hybrid model",
546
- "mediaType": "image",
547
- "mediaSubtype": "webp",
548
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan2_2",
549
- "tags": ["Text to Video", "Video"],
550
- "models": ["Wan2.2"],
551
- "date": "2025-07-29"
552
- },
553
- {
554
- "name": "video_wan_vace_14B_t2v",
555
- "title": "Wan VACE Text to Video",
556
- "description": "Transform text descriptions into high-quality videos. Supports both 480p and 720p with VACE-14B model.",
557
- "mediaType": "image",
558
- "mediaSubtype": "webp",
559
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
560
- "tags": ["Text to Video", "Video"],
561
- "models": ["Wan2.1"],
562
- "date": "2025-05-21"
563
- },
564
- {
565
- "name": "video_wan_vace_14B_ref2v",
566
- "title": "Wan VACE Reference to Video",
567
- "description": "Create videos that match the style and content of a reference image. Perfect for style-consistent video generation.",
568
- "mediaType": "image",
569
- "mediaSubtype": "webp",
570
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
571
- "tags": ["Reference to Video", "Video"],
572
- "models": ["Wan2.1"],
573
- "date": "2025-05-21"
574
- },
575
- {
576
- "name": "video_wan_vace_14B_v2v",
577
- "title": "Wan VACE Control Video",
578
- "description": "Generate videos by controlling input videos and reference images using Wan VACE.",
579
- "mediaType": "image",
580
- "mediaSubtype": "webp",
581
- "thumbnailVariant": "compareSlider",
582
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
583
- "tags": ["Video to Video", "Video"],
584
- "models": ["Wan2.1"],
585
- "date": "2025-05-21"
586
- },
587
- {
588
- "name": "video_wan_vace_outpainting",
589
- "title": "Wan VACE Outpainting",
590
- "description": "Generate extended videos by expanding video size using Wan VACE outpainting.",
591
- "mediaType": "image",
592
- "mediaSubtype": "webp",
593
- "thumbnailVariant": "compareSlider",
594
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
595
- "tags": ["Outpainting", "Video"],
596
- "models": ["Wan2.1"],
597
- "date": "2025-05-21"
598
- },
599
- {
600
- "name": "video_wan_vace_flf2v",
601
- "title": "Wan VACE First-Last Frame",
602
- "description": "Generate smooth video transitions by defining start and end frames. Supports custom keyframe sequences.",
603
- "mediaType": "image",
604
- "mediaSubtype": "webp",
605
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
606
- "tags": ["FLF2V", "Video"],
607
- "models": ["Wan2.1"],
608
- "date": "2025-05-21"
609
- },
610
- {
611
- "name": "video_wan_vace_inpainting",
612
- "title": "Wan VACE Inpainting",
613
- "description": "Edit specific regions in videos while preserving surrounding content. Great for object removal or replacement.",
614
- "mediaType": "image",
615
- "mediaSubtype": "webp",
616
- "thumbnailVariant": "compareSlider",
617
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/vace",
618
- "tags": ["Inpainting", "Video"],
619
- "models": ["Wan2.1"],
620
- "date": "2025-05-21"
621
- },
622
- {
623
- "name": "video_wan_ati",
624
- "title": "Wan ATI",
625
- "description": "Trajectory-controlled video generation.",
626
- "mediaType": "image",
627
- "mediaSubtype": "webp",
628
- "thumbnailVariant": "hoverDissolve",
629
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-ati",
630
- "tags": ["Video"],
631
- "models": ["Wan2.1"],
632
- "date": "2025-05-21"
633
- },
634
- {
635
- "name": "video_wan2.1_fun_camera_v1.1_1.3B",
636
- "title": "Wan 2.1 Fun Camera 1.3B",
637
- "description": "Generate dynamic videos with cinematic camera movements using Wan 2.1 Fun Camera 1.3B model.",
638
- "mediaType": "image",
639
- "mediaSubtype": "webp",
640
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
641
- "tags": ["Video"],
642
- "models": ["Wan2.1"],
643
- "date": "2025-04-15"
644
- },
645
- {
646
- "name": "video_wan2.1_fun_camera_v1.1_14B",
647
- "title": "Wan 2.1 Fun Camera 14B",
648
- "description": "Generate high-quality videos with advanced camera control using the full 14B model",
649
- "mediaType": "image",
650
- "mediaSubtype": "webp",
651
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
652
- "tags": ["Video"],
653
- "models": ["Wan2.1"],
654
- "date": "2025-04-15"
655
- },
656
- {
657
- "name": "text_to_video_wan",
658
- "title": "Wan 2.1 Text to Video",
659
- "description": "Generate videos from text prompts using Wan 2.1.",
660
- "mediaType": "image",
661
- "mediaSubtype": "webp",
662
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
663
- "tags": ["Text to Video", "Video"],
664
- "models": ["Wan2.1"],
665
- "date": "2025-03-01"
666
- },
667
- {
668
- "name": "image_to_video_wan",
669
- "title": "Wan 2.1 Image to Video",
670
- "description": "Generate videos from images using Wan 2.1.",
671
- "mediaType": "image",
672
- "mediaSubtype": "webp",
673
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-video",
674
- "tags": ["Text to Video", "Video"],
675
- "models": ["Wan2.1"],
676
- "date": "2025-03-01"
677
- },
678
- {
679
- "name": "wan2.1_fun_inp",
680
- "title": "Wan 2.1 Inpainting",
681
- "description": "Generate videos from start and end frames using Wan 2.1 inpainting.",
682
- "mediaType": "image",
683
- "mediaSubtype": "webp",
684
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-inp",
685
- "tags": ["Inpaint", "Video"],
686
- "models": ["Wan2.1"],
687
- "date": "2025-04-15"
688
- },
689
- {
690
- "name": "wan2.1_fun_control",
691
- "title": "Wan 2.1 ControlNet",
692
- "description": "Generate videos guided by pose, depth, and edge controls using Wan 2.1 ControlNet.",
693
- "mediaType": "image",
694
- "mediaSubtype": "webp",
695
- "thumbnailVariant": "hoverDissolve",
696
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/fun-control",
697
- "tags": ["Video to Video", "Video"],
698
- "models": ["Wan2.1"],
699
- "date": "2025-04-15"
700
- },
701
- {
702
- "name": "wan2.1_flf2v_720_f16",
703
- "title": "Wan 2.1 FLF2V 720p F16",
704
- "description": "Generate videos by controlling first and last frames using Wan 2.1 FLF2V.",
705
- "mediaType": "image",
706
- "mediaSubtype": "webp",
707
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/wan/wan-flf",
708
- "tags": ["FLF2V", "Video"],
709
- "models": ["Wan2.1"],
710
- "date": "2025-04-15"
711
- },
712
- {
713
- "name": "video_cosmos_predict2_2B_video2world_480p_16fps",
714
- "title": "Cosmos Predict2 2B Video2World 480p 16fps",
715
- "description": "Generate videos with Cosmos-Predict2 2B Video2World, generating physically accurate, high-fidelity, and consistent video simulations.",
716
- "mediaType": "image",
717
- "mediaSubtype": "webp",
718
- "tutorialUrl": "http://docs.comfy.org/tutorials/video/cosmos/cosmos-predict2-video2world",
719
- "tags": ["Video2World", "Video"],
720
- "models": ["Cosmos"],
721
- "date": "2025-06-16"
722
- },
723
- {
724
- "name": "ltxv_text_to_video",
725
- "title": "LTXV Text to Video",
726
- "mediaType": "image",
727
- "mediaSubtype": "webp",
728
- "description": "Generate videos from text prompts.",
729
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
730
- "tags": ["Text to Video", "Video"],
731
- "models": ["LTXV"],
732
- "date": "2025-03-01"
733
- },
734
- {
735
- "name": "ltxv_image_to_video",
736
- "title": "LTXV Image to Video",
737
- "mediaType": "image",
738
- "mediaSubtype": "webp",
739
- "description": "Generate videos from still images.",
740
- "tutorialUrl": "https://docs.comfy.org/tutorials/video/ltxv",
741
- "tags": ["Image to Video", "Video"],
742
- "models": ["LTXV"],
743
- "date": "2025-03-01"
744
- },
745
- {
746
- "name": "mochi_text_to_video_example",
747
- "title": "Mochi Text to Video",
748
- "mediaType": "image",
749
- "mediaSubtype": "webp",
750
- "description": "Generate videos from text prompts using Mochi model.",
751
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/mochi/",
752
- "tags": ["Text to Video", "Video"],
753
- "models": ["Mochi"],
754
- "date": "2025-03-01"
755
- },
756
- {
757
- "name": "hunyuan_video_text_to_video",
758
- "title": "Hunyuan Video Text to Video",
759
- "mediaType": "image",
760
- "mediaSubtype": "webp",
761
- "description": "Generate videos from text prompts using Hunyuan model.",
762
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/",
763
- "tags": ["Text to Video", "Video"],
764
- "models": ["Hunyuan Video"],
765
- "date": "2025-03-01"
766
- },
767
- {
768
- "name": "image_to_video",
769
- "title": "SVD Image to Video",
770
- "mediaType": "image",
771
- "mediaSubtype": "webp",
772
- "description": "Generate videos from still images.",
773
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
774
- "tags": ["Image to Video", "Video"],
775
- "models": ["SVD"],
776
- "date": "2025-03-01"
777
- },
778
- {
779
- "name": "txt_to_image_to_video",
780
- "title": "SVD Text to Image to Video",
781
- "mediaType": "image",
782
- "mediaSubtype": "webp",
783
- "description": "Generate videos by first creating images from text prompts.",
784
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video",
785
- "tags": ["Text to Video", "Video"],
786
- "models": ["SVD"],
787
- "date": "2025-03-01"
788
- }
789
- ]
790
- },
791
- {
792
- "moduleName": "default",
793
- "category": "USE CASES",
794
- "title": "Audio",
795
- "type": "audio",
796
- "templates": [
797
- {
798
- "name": "audio_stable_audio_example",
799
- "title": "Stable Audio",
800
- "mediaType": "audio",
801
- "mediaSubtype": "mp3",
802
- "description": "Generate audio from text prompts using Stable Audio.",
803
- "tags": ["Text to Audio", "Audio"],
804
- "models": ["Stable Audio"],
805
- "date": "2025-03-01",
806
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/audio/"
807
- },
808
- {
809
- "name": "audio_ace_step_1_t2a_instrumentals",
810
- "title": "ACE-Step v1 Text to Instrumentals Music",
811
- "mediaType": "audio",
812
- "mediaSubtype": "mp3",
813
- "description": "Generate instrumental music from text prompts using ACE-Step v1.",
814
- "tags": ["Text to Audio", "Audio", "Instrumentals"],
815
- "models": ["ACE-Step v1"],
816
- "date": "2025-03-01",
817
- "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
818
- },
819
- {
820
- "name": "audio_ace_step_1_t2a_song",
821
- "title": "ACE Step v1 Text to Song",
822
- "mediaType": "audio",
823
- "mediaSubtype": "mp3",
824
- "description": "Generate songs with vocals from text prompts using ACE-Step v1, supporting multilingual and style customization.",
825
- "tags": ["Text to Audio", "Audio", "Song"],
826
- "models": ["ACE-Step v1"],
827
- "date": "2025-03-01",
828
- "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
829
- },
830
- {
831
- "name": "audio_ace_step_1_m2m_editing",
832
- "title": "ACE Step v1 M2M Editing",
833
- "mediaType": "audio",
834
- "mediaSubtype": "mp3",
835
- "description": "Edit existing songs to change style and lyrics using ACE-Step v1 M2M.",
836
- "tags": ["Audio Editing", "Audio"],
837
- "models": ["ACE-Step v1"],
838
- "date": "2025-03-01",
839
- "tutorialUrl": "https://docs.comfy.org/tutorials/audio/ace-step/ace-step-v1"
840
- }
841
- ]
842
- },
843
- {
844
- "moduleName": "default",
845
- "category": "TOOLS & BUILDING",
846
- "title": "Image API",
847
- "type": "image",
848
- "templates": [
849
- {
850
- "name": "api_bfl_flux_1_kontext_multiple_images_input",
851
- "title": "BFL Flux.1 Kontext Multiple Image Input",
852
- "description": "Input multiple images and edit them with Flux.1 Kontext.",
853
- "mediaType": "image",
854
- "mediaSubtype": "webp",
855
- "thumbnailVariant": "compareSlider",
856
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
857
- "tags": ["Image Edit", "Image"],
858
- "models": ["Flux"],
859
- "date": "2025-05-29"
860
- },
861
- {
862
- "name": "api_bfl_flux_1_kontext_pro_image",
863
- "title": "BFL Flux.1 Kontext Pro",
864
- "description": "Edit images with Flux.1 Kontext pro image.",
865
- "mediaType": "image",
866
- "mediaSubtype": "webp",
867
- "thumbnailVariant": "compareSlider",
868
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
869
- "tags": ["Image Edit", "Image"],
870
- "models": ["Flux"],
871
- "date": "2025-05-29"
872
- },
873
- {
874
- "name": "api_bfl_flux_1_kontext_max_image",
875
- "title": "BFL Flux.1 Kontext Max",
876
- "description": "Edit images with Flux.1 Kontext max image.",
877
- "mediaType": "image",
878
- "mediaSubtype": "webp",
879
- "thumbnailVariant": "compareSlider",
880
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-kontext",
881
- "tags": ["Image Edit", "Image"],
882
- "models": ["Flux"],
883
- "date": "2025-05-29"
884
- },
885
- {
886
- "name": "api_bfl_flux_pro_t2i",
887
- "title": "BFL Flux[Pro]: Text to Image",
888
- "description": "Generate images with excellent prompt following and visual quality using FLUX.1 Pro.",
889
- "mediaType": "image",
890
- "mediaSubtype": "webp",
891
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/black-forest-labs/flux-1-1-pro-ultra-image",
892
- "tags": ["Image Edit", "Image"],
893
- "models": ["Flux"],
894
- "date": "2025-05-01"
895
- },
896
- {
897
- "name": "api_luma_photon_i2i",
898
- "title": "Luma Photon: Image to Image",
899
- "description": "Guide image generation using a combination of images and prompt.",
900
- "mediaType": "image",
901
- "mediaSubtype": "webp",
902
- "thumbnailVariant": "compareSlider",
903
- "tags": ["Image to Image", "Image", "API"],
904
- "models": ["Luma Photon"],
905
- "date": "2025-03-01"
906
- },
907
- {
908
- "name": "api_luma_photon_style_ref",
909
- "title": "Luma Photon: Style Reference",
910
- "description": "Generate images by blending style references with precise control using Luma Photon.",
911
- "mediaType": "image",
912
- "mediaSubtype": "webp",
913
- "thumbnailVariant": "compareSlider",
914
- "tags": ["Text to Image", "Image", "API", "Style Transfer"],
915
- "models": ["Luma Photon"],
916
- "date": "2025-03-01"
917
- },
918
- {
919
- "name": "api_recraft_image_gen_with_color_control",
920
- "title": "Recraft: Color Control Image Generation",
921
- "description": "Generate images with custom color palettes and brand-specific visuals using Recraft.",
922
- "mediaType": "image",
923
- "mediaSubtype": "webp",
924
- "tags": ["Text to Image", "Image", "API", "Color Control"],
925
- "models": ["Recraft"],
926
- "date": "2025-03-01"
927
- },
928
- {
929
- "name": "api_recraft_image_gen_with_style_control",
930
- "title": "Recraft: Style Control Image Generation",
931
- "description": "Control style with visual examples, align positioning, and fine-tune objects. Store and share styles for perfect brand consistency.",
932
- "mediaType": "image",
933
- "mediaSubtype": "webp",
934
- "tags": ["Text to Image", "Image", "API", "Style Control"],
935
- "models": ["Recraft"],
936
- "date": "2025-03-01"
937
- },
938
- {
939
- "name": "api_recraft_vector_gen",
940
- "title": "Recraft: Vector Generation",
941
- "description": "Generate high-quality vector images from text prompts using Recraft's AI vector generator.",
942
- "mediaType": "image",
943
- "mediaSubtype": "webp",
944
- "tags": ["Text to Image", "Image", "API", "Vector"],
945
- "models": ["Recraft"],
946
- "date": "2025-03-01"
947
- },
948
- {
949
- "name": "api_runway_text_to_image",
950
- "title": "Runway: Text to Image",
951
- "description": "Generate high-quality images from text prompts using Runway's AI model.",
952
- "mediaType": "image",
953
- "mediaSubtype": "webp",
954
- "tags": ["Text to Image", "Image", "API"],
955
- "models": ["Runway"],
956
- "date": "2025-03-01"
957
- },
958
- {
959
- "name": "api_runway_reference_to_image",
960
- "title": "Runway: Reference to Image",
961
- "description": "Generate new images based on reference styles and compositions with Runway's AI.",
962
- "mediaType": "image",
963
- "thumbnailVariant": "compareSlider",
964
- "mediaSubtype": "webp",
965
- "tags": ["Image to Image", "Image", "API", "Style Transfer"],
966
- "models": ["Runway"],
967
- "date": "2025-03-01"
968
- },
969
- {
970
- "name": "api_stability_ai_stable_image_ultra_t2i",
971
- "title": "Stability AI: Stable Image Ultra Text to Image",
972
- "description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
973
- "mediaType": "image",
974
- "mediaSubtype": "webp",
975
- "tags": ["Text to Image", "Image", "API"],
976
- "models": ["Stable Image Ultra"],
977
- "date": "2025-03-01"
978
- },
979
- {
980
- "name": "api_stability_ai_i2i",
981
- "title": "Stability AI: Image to Image",
982
- "description": "Transform images with high-quality generation using Stability AI, perfect for professional editing and style transfer.",
983
- "mediaType": "image",
984
- "thumbnailVariant": "compareSlider",
985
- "mediaSubtype": "webp",
986
- "tags": ["Image to Image", "Image", "API"],
987
- "models": ["Stability AI"],
988
- "date": "2025-03-01"
989
- },
990
- {
991
- "name": "api_stability_ai_sd3.5_t2i",
992
- "title": "Stability AI: SD3.5 Text to Image",
993
- "description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
994
- "mediaType": "image",
995
- "mediaSubtype": "webp",
996
- "tags": ["Text to Image", "Image", "API"],
997
- "models": ["SD3.5"],
998
- "date": "2025-03-01"
999
- },
1000
- {
1001
- "name": "api_stability_ai_sd3.5_i2i",
1002
- "title": "Stability AI: SD3.5 Image to Image",
1003
- "description": "Generate high quality images with excellent prompt adherence. Perfect for professional use cases at 1 megapixel resolution.",
1004
- "mediaType": "image",
1005
- "thumbnailVariant": "compareSlider",
1006
- "mediaSubtype": "webp",
1007
- "tags": ["Image to Image", "Image", "API"],
1008
- "models": ["SD3.5"],
1009
- "date": "2025-03-01"
1010
- },
1011
- {
1012
- "name": "api_ideogram_v3_t2i",
1013
- "title": "Ideogram V3: Text to Image",
1014
- "description": "Generate professional-quality images with excellent prompt alignment, photorealism, and text rendering using Ideogram V3.",
1015
- "mediaType": "image",
1016
- "mediaSubtype": "webp",
1017
- "tags": ["Text to Image", "Image", "API", "Text Rendering"],
1018
- "models": ["Ideogram V3"],
1019
- "date": "2025-03-01"
1020
- },
1021
- {
1022
- "name": "api_openai_image_1_t2i",
1023
- "title": "OpenAI: GPT-Image-1 Text to Image",
1024
- "description": "Generate images from text prompts using OpenAI GPT Image 1 API.",
1025
- "mediaType": "image",
1026
- "mediaSubtype": "webp",
1027
- "tags": ["Text to Image", "Image", "API"],
1028
- "models": ["GPT-Image-1"],
1029
- "date": "2025-03-01",
1030
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
1031
- },
1032
- {
1033
- "name": "api_openai_image_1_i2i",
1034
- "title": "OpenAI: GPT-Image-1 Image to Image",
1035
- "description": "Generate images from input images using OpenAI GPT Image 1 API.",
1036
- "mediaType": "image",
1037
- "mediaSubtype": "webp",
1038
- "thumbnailVariant": "compareSlider",
1039
- "tags": ["Image to Image", "Image", "API"],
1040
- "models": ["GPT-Image-1"],
1041
- "date": "2025-03-01",
1042
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
1043
- },
1044
- {
1045
- "name": "api_openai_image_1_inpaint",
1046
- "title": "OpenAI: GPT-Image-1 Inpaint",
1047
- "description": "Edit images using inpainting with OpenAI GPT Image 1 API.",
1048
- "mediaType": "image",
1049
- "mediaSubtype": "webp",
1050
- "thumbnailVariant": "compareSlider",
1051
- "tags": ["Inpaint", "Image", "API"],
1052
- "models": ["GPT-Image-1"],
1053
- "date": "2025-03-01",
1054
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
1055
- },
1056
- {
1057
- "name": "api_openai_image_1_multi_inputs",
1058
- "title": "OpenAI: GPT-Image-1 Multi Inputs",
1059
- "description": "Generate images from multiple inputs using OpenAI GPT Image 1 API.",
1060
- "mediaType": "image",
1061
- "mediaSubtype": "webp",
1062
- "thumbnailVariant": "compareSlider",
1063
- "tags": ["Text to Image", "Image", "API", "Multi Input"],
1064
- "models": ["GPT-Image-1"],
1065
- "date": "2025-03-01",
1066
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/gpt-image-1"
1067
- },
1068
- {
1069
- "name": "api_openai_dall_e_2_t2i",
1070
- "title": "OpenAI: Dall-E 2 Text to Image",
1071
- "description": "Generate images from text prompts using OpenAI Dall-E 2 API.",
1072
- "mediaType": "image",
1073
- "mediaSubtype": "webp",
1074
- "tags": ["Text to Image", "Image", "API"],
1075
- "models": ["Dall-E 2"],
1076
- "date": "2025-03-01",
1077
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
1078
- },
1079
- {
1080
- "name": "api_openai_dall_e_2_inpaint",
1081
- "title": "OpenAI: Dall-E 2 Inpaint",
1082
- "description": "Edit images using inpainting with OpenAI Dall-E 2 API.",
1083
- "mediaType": "image",
1084
- "mediaSubtype": "webp",
1085
- "thumbnailVariant": "compareSlider",
1086
- "tags": ["Inpaint", "Image", "API"],
1087
- "models": ["Dall-E 2"],
1088
- "date": "2025-03-01",
1089
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-2"
1090
- },
1091
- {
1092
- "name": "api_openai_dall_e_3_t2i",
1093
- "title": "OpenAI: Dall-E 3 Text to Image",
1094
- "description": "Generate images from text prompts using OpenAI Dall-E 3 API.",
1095
- "mediaType": "image",
1096
- "mediaSubtype": "webp",
1097
- "tags": ["Text to Image", "Image", "API"],
1098
- "models": ["Dall-E 3"],
1099
- "date": "2025-03-01",
1100
- "tutorialUrl": "https://docs.comfy.org/tutorials/api-nodes/openai/dall-e-3"
1101
- }
1102
- ]
1103
- },
1104
- {
1105
- "moduleName": "default",
1106
- "category": "TOOLS & BUILDING",
1107
- "title": "Video API",
1108
- "type": "video",
1109
- "templates": [
1110
- {
1111
- "name": "api_kling_i2v",
1112
- "title": "Kling: Image to Video",
1113
- "description": "Generate videos with excellent prompt adherence for actions, expressions, and camera movements using Kling.",
1114
- "mediaType": "image",
1115
- "mediaSubtype": "webp",
1116
- "tags": ["Image to Video", "Video", "API"],
1117
- "models": ["Kling"],
1118
- "date": "2025-03-01",
1119
- "tutorialUrl": ""
1120
- },
1121
- {
1122
- "name": "api_kling_effects",
1123
- "title": "Kling: Video Effects",
1124
- "description": "Generate dynamic videos by applying visual effects to images using Kling.",
1125
- "mediaType": "image",
1126
- "mediaSubtype": "webp",
1127
- "tags": ["Video Effects", "Video", "API"],
1128
- "models": ["Kling"],
1129
- "date": "2025-03-01",
1130
- "tutorialUrl": ""
1131
- },
1132
- {
1133
- "name": "api_kling_flf",
1134
- "title": "Kling: FLF2V",
1135
- "description": "Generate videos through controlling the first and last frames.",
1136
- "mediaType": "image",
1137
- "mediaSubtype": "webp",
1138
- "tags": ["Video Generation", "Video", "API", "Frame Control"],
1139
- "models": ["Kling"],
1140
- "date": "2025-03-01",
1141
- "tutorialUrl": ""
1142
- },
1143
- {
1144
- "name": "api_luma_i2v",
1145
- "title": "Luma: Image to Video",
1146
- "description": "Take static images and instantly create magical high quality animations.",
1147
- "mediaType": "image",
1148
- "mediaSubtype": "webp",
1149
- "tags": ["Image to Video", "Video", "API"],
1150
- "models": ["Luma"],
1151
- "date": "2025-03-01",
1152
- "tutorialUrl": ""
1153
- },
1154
- {
1155
- "name": "api_luma_t2v",
1156
- "title": "Luma: Text to Video",
1157
- "description": "High-quality videos can be generated using simple prompts.",
1158
- "mediaType": "image",
1159
- "mediaSubtype": "webp",
1160
- "tags": ["Text to Video", "Video", "API"],
1161
- "models": ["Luma"],
1162
- "date": "2025-03-01",
1163
- "tutorialUrl": ""
1164
- },
1165
- {
1166
- "name": "api_moonvalley_text_to_video",
1167
- "title": "Moonvalley: Text to Video",
1168
- "description": "Generate cinematic, 1080p videos from text prompts through a model trained exclusively on licensed data.",
1169
- "mediaType": "image",
1170
- "mediaSubtype": "webp",
1171
- "tags": ["Text to Video", "Video", "API"],
1172
- "models": ["Moonvalley"],
1173
- "date": "2025-03-01",
1174
- "tutorialUrl": ""
1175
- },
1176
- {
1177
- "name": "api_moonvalley_image_to_video",
1178
- "title": "Moonvalley: Image to Video",
1179
- "description": "Generate cinematic, 1080p videos with an image through a model trained exclusively on licensed data.",
1180
- "mediaType": "image",
1181
- "mediaSubtype": "webp",
1182
- "tags": ["Image to Video", "Video", "API"],
1183
- "models": ["Moonvalley"],
1184
- "date": "2025-03-01",
1185
- "tutorialUrl": ""
1186
- },
1187
- {
1188
- "name": "api_moonvalley_video_to_video_motion_transfer",
1189
- "title": "Moonvalley: Motion Transfer",
1190
- "description": "Apply motion from one video to another.",
1191
- "mediaType": "image",
1192
- "thumbnailVariant": "hoverDissolve",
1193
- "mediaSubtype": "webp",
1194
- "tags": ["Video to Video", "Video", "API", "Motion Transfer"],
1195
- "models": ["Moonvalley"],
1196
- "date": "2025-03-01",
1197
- "tutorialUrl": ""
1198
- },
1199
- {
1200
- "name": "api_moonvalley_video_to_video_pose_control",
1201
- "title": "Moonvalley: Pose Control",
1202
- "description": "Apply human pose and movement from one video to another.",
1203
- "mediaType": "image",
1204
- "thumbnailVariant": "hoverDissolve",
1205
- "mediaSubtype": "webp",
1206
- "tags": ["Video to Video", "Video", "API", "Pose Control"],
1207
- "models": ["Moonvalley"],
1208
- "date": "2025-03-01",
1209
- "tutorialUrl": ""
1210
- },
1211
- {
1212
- "name": "api_hailuo_minimax_t2v",
1213
- "title": "MiniMax: Text to Video",
1214
- "description": "Generate high-quality videos directly from text prompts. Explore MiniMax's advanced AI capabilities to create diverse visual narratives with professional CGI effects and stylistic elements to bring your descriptions to life.",
1215
- "mediaType": "image",
1216
- "mediaSubtype": "webp",
1217
- "tags": ["Text to Video", "Video", "API"],
1218
- "models": ["MiniMax"],
1219
- "date": "2025-03-01",
1220
- "tutorialUrl": ""
1221
- },
1222
- {
1223
- "name": "api_hailuo_minimax_i2v",
1224
- "title": "MiniMax: Image to Video",
1225
- "description": "Generate refined videos from images and text with CGI integration using MiniMax.",
1226
- "mediaType": "image",
1227
- "mediaSubtype": "webp",
1228
- "tags": ["Image to Video", "Video", "API"],
1229
- "models": ["MiniMax"],
1230
- "date": "2025-03-01",
1231
- "tutorialUrl": ""
1232
- },
1233
- {
1234
- "name": "api_pixverse_i2v",
1235
- "title": "PixVerse: Image to Video",
1236
- "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
1237
- "mediaType": "image",
1238
- "mediaSubtype": "webp",
1239
- "tags": ["Image to Video", "Video", "API"],
1240
- "models": ["PixVerse"],
1241
- "date": "2025-03-01",
1242
- "tutorialUrl": ""
1243
- },
1244
- {
1245
- "name": "api_pixverse_template_i2v",
1246
- "title": "PixVerse Templates: Image to Video",
1247
- "description": "Generate dynamic videos from static images with motion and effects using PixVerse.",
1248
- "mediaType": "image",
1249
- "mediaSubtype": "webp",
1250
- "tags": ["Image to Video", "Video", "API", "Templates"],
1251
- "models": ["PixVerse"],
1252
- "date": "2025-03-01",
1253
- "tutorialUrl": ""
1254
- },
1255
- {
1256
- "name": "api_pixverse_t2v",
1257
- "title": "PixVerse: Text to Video",
1258
- "description": "Generate videos with accurate prompt interpretation and stunning video dynamics.",
1259
- "mediaType": "image",
1260
- "mediaSubtype": "webp",
1261
- "tags": ["Text to Video", "Video", "API"],
1262
- "models": ["PixVerse"],
1263
- "date": "2025-03-01",
1264
- "tutorialUrl": ""
1265
- },
1266
- {
1267
- "name": "api_runway_gen3a_turbo_image_to_video",
1268
- "title": "Runway: Gen3a Turbo Image to Video",
1269
- "description": "Generate cinematic videos from static images using Runway Gen3a Turbo.",
1270
- "mediaType": "image",
1271
- "mediaSubtype": "webp",
1272
- "tags": ["Image to Video", "Video", "API"],
1273
- "models": ["Runway Gen3a Turbo"],
1274
- "date": "2025-03-01",
1275
- "tutorialUrl": ""
1276
- },
1277
- {
1278
- "name": "api_runway_gen4_turo_image_to_video",
1279
- "title": "Runway: Gen4 Turbo Image to Video",
1280
- "description": "Generate dynamic videos from images using Runway Gen4 Turbo.",
1281
- "mediaType": "image",
1282
- "mediaSubtype": "webp",
1283
- "tags": ["Image to Video", "Video", "API"],
1284
- "models": ["Runway Gen4 Turbo"],
1285
- "date": "2025-03-01",
1286
- "tutorialUrl": ""
1287
- },
1288
- {
1289
- "name": "api_runway_first_last_frame",
1290
- "title": "Runway: First Last Frame to Video",
1291
- "description": "Generate smooth video transitions between two keyframes with Runway's precision.",
1292
- "mediaType": "image",
1293
- "mediaSubtype": "webp",
1294
- "tags": ["Video Generation", "Video", "API", "Frame Control"],
1295
- "models": ["Runway"],
1296
- "date": "2025-03-01",
1297
- "tutorialUrl": ""
1298
- },
1299
- {
1300
- "name": "api_pika_i2v",
1301
- "title": "Pika: Image to Video",
1302
- "description": "Generate smooth animated videos from single static images using Pika AI.",
1303
- "mediaType": "image",
1304
- "mediaSubtype": "webp",
1305
- "tags": ["Image to Video", "Video", "API"],
1306
- "models": ["Pika"],
1307
- "date": "2025-03-01",
1308
- "tutorialUrl": ""
1309
- },
1310
- {
1311
- "name": "api_pika_scene",
1312
- "title": "Pika Scenes: Images to Video",
1313
- "description": "Generate videos that incorporate multiple input images using Pika Scenes.",
1314
- "mediaType": "image",
1315
- "mediaSubtype": "webp",
1316
- "tags": ["Image to Video", "Video", "API", "Multi Image"],
1317
- "models": ["Pika Scenes"],
1318
- "date": "2025-03-01",
1319
- "tutorialUrl": ""
1320
- },
1321
- {
1322
- "name": "api_veo2_i2v",
1323
- "title": "Veo2: Image to Video",
1324
- "description": "Generate videos from images using Google Veo2 API.",
1325
- "mediaType": "image",
1326
- "mediaSubtype": "webp",
1327
- "tags": ["Image to Video", "Video", "API"],
1328
- "models": ["Veo2"],
1329
- "date": "2025-03-01",
1330
- "tutorialUrl": ""
1331
- }
1332
- ]
1333
- },
1334
- {
1335
- "moduleName": "default",
1336
- "category": "TOOLS & BUILDING",
1337
- "title": "3D API",
1338
- "type": "image",
1339
- "templates": [
1340
- {
1341
- "name": "api_rodin_image_to_model",
1342
- "title": "Rodin: Image to Model",
1343
- "description": "Generate detailed 3D models from single photos using Rodin AI.",
1344
- "mediaType": "image",
1345
- "thumbnailVariant": "compareSlider",
1346
- "mediaSubtype": "webp",
1347
- "tags": ["Image to Model", "3D", "API"],
1348
- "models": ["Rodin"],
1349
- "date": "2025-03-01",
1350
- "tutorialUrl": ""
1351
- },
1352
- {
1353
- "name": "api_rodin_multiview_to_model",
1354
- "title": "Rodin: Multiview to Model",
1355
- "description": "Sculpt comprehensive 3D models using Rodin's multi-angle reconstruction.",
1356
- "mediaType": "image",
1357
- "thumbnailVariant": "compareSlider",
1358
- "mediaSubtype": "webp",
1359
- "tags": ["Multiview to Model", "3D", "API"],
1360
- "models": ["Rodin"],
1361
- "date": "2025-03-01",
1362
- "tutorialUrl": ""
1363
- },
1364
- {
1365
- "name": "api_tripo_text_to_model",
1366
- "title": "Tripo: Text to Model",
1367
- "description": "Craft 3D objects from descriptions with Tripo's text-driven modeling.",
1368
- "mediaType": "image",
1369
- "mediaSubtype": "webp",
1370
- "tags": ["Text to Model", "3D", "API"],
1371
- "models": ["Tripo"],
1372
- "date": "2025-03-01",
1373
- "tutorialUrl": ""
1374
- },
1375
- {
1376
- "name": "api_tripo_image_to_model",
1377
- "title": "Tripo: Image to Model",
1378
- "description": "Generate professional 3D assets from 2D images using Tripo engine.",
1379
- "mediaType": "image",
1380
- "thumbnailVariant": "compareSlider",
1381
- "mediaSubtype": "webp",
1382
- "tags": ["Image to Model", "3D", "API"],
1383
- "models": ["Tripo"],
1384
- "date": "2025-03-01",
1385
- "tutorialUrl": ""
1386
- },
1387
- {
1388
- "name": "api_tripo_multiview_to_model",
1389
- "title": "Tripo: Multiview to Model",
1390
- "description": "Build 3D models from multiple angles with Tripo's advanced scanner.",
1391
- "mediaType": "image",
1392
- "thumbnailVariant": "compareSlider",
1393
- "mediaSubtype": "webp",
1394
- "tags": ["Multiview to Model", "3D", "API"],
1395
- "models": ["Tripo"],
1396
- "date": "2025-03-01",
1397
- "tutorialUrl": ""
1398
- }
1399
- ]
1400
- },
1401
- {
1402
- "moduleName": "default",
1403
- "category": "TOOLS & BUILDING",
1404
- "title": "LLM API",
1405
- "type": "image",
1406
- "templates": [
1407
- {
1408
- "name": "api_openai_chat",
1409
- "title": "OpenAI: Chat",
1410
- "description": "Engage with OpenAI's advanced language models for intelligent conversations.",
1411
- "mediaType": "image",
1412
- "mediaSubtype": "webp",
1413
- "tags": ["Chat", "LLM", "API"],
1414
- "models": ["OpenAI"],
1415
- "date": "2025-03-01",
1416
- "tutorialUrl": ""
1417
- },
1418
- {
1419
- "name": "api_google_gemini",
1420
- "title": "Google Gemini: Chat",
1421
- "description": "Experience Google's multimodal AI with Gemini's reasoning capabilities.",
1422
- "mediaType": "image",
1423
- "mediaSubtype": "webp",
1424
- "tags": ["Chat", "LLM", "API"],
1425
- "models": ["Google Gemini"],
1426
- "date": "2025-03-01",
1427
- "tutorialUrl": ""
1428
- }
1429
- ]
1430
- },
1431
- {
1432
- "moduleName": "default",
1433
- "category": "TOOLS & BUILDING",
1434
- "title": "Upscaling",
1435
- "type": "image",
1436
- "templates": [
1437
- {
1438
- "name": "hiresfix_latent_workflow",
1439
- "title": "Upscale",
1440
- "mediaType": "image",
1441
- "mediaSubtype": "webp",
1442
- "description": "Upscale images by enhancing quality in latent space.",
1443
- "thumbnailVariant": "compareSlider",
1444
- "tags": ["Upscale", "Image"],
1445
- "models": ["SD1.5"],
1446
- "date": "2025-03-01",
1447
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/"
1448
- },
1449
- {
1450
- "name": "esrgan_example",
1451
- "title": "ESRGAN",
1452
- "mediaType": "image",
1453
- "mediaSubtype": "webp",
1454
- "description": "Upscale images using ESRGAN models to enhance quality.",
1455
- "thumbnailVariant": "compareSlider",
1456
- "tags": ["Upscale", "Image"],
1457
- "models": ["SD1.5"],
1458
- "date": "2025-03-01",
1459
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/"
1460
- },
1461
- {
1462
- "name": "hiresfix_esrgan_workflow",
1463
- "title": "HiresFix ESRGAN Workflow",
1464
- "mediaType": "image",
1465
- "mediaSubtype": "webp",
1466
- "description": "Upscale images using ESRGAN models during intermediate generation steps.",
1467
- "thumbnailVariant": "compareSlider",
1468
- "tags": ["Upscale", "Image"],
1469
- "models": ["SD1.5"],
1470
- "date": "2025-03-01",
1471
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#non-latent-upscaling"
1472
- },
1473
- {
1474
- "name": "latent_upscale_different_prompt_model",
1475
- "title": "Latent Upscale Different Prompt Model",
1476
- "mediaType": "image",
1477
- "mediaSubtype": "webp",
1478
- "description": "Upscale images while changing prompts across generation passes.",
1479
- "thumbnailVariant": "zoomHover",
1480
- "tags": ["Upscale", "Image"],
1481
- "models": ["SD1.5"],
1482
- "date": "2025-03-01",
1483
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/#more-examples"
1484
- }
1485
- ]
1486
- },
1487
- {
1488
- "moduleName": "default",
1489
- "category": "TOOLS & BUILDING",
1490
- "title": "ControlNet",
1491
- "type": "image",
1492
- "templates": [
1493
- {
1494
- "name": "controlnet_example",
1495
- "title": "Scribble ControlNet",
1496
- "mediaType": "image",
1497
- "mediaSubtype": "webp",
1498
- "description": "Generate images guided by scribble reference images using ControlNet.",
1499
- "thumbnailVariant": "hoverDissolve",
1500
- "tags": ["ControlNet", "Image"],
1501
- "models": ["SD1.5"],
1502
- "date": "2025-03-01",
1503
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/"
1504
- },
1505
- {
1506
- "name": "2_pass_pose_worship",
1507
- "title": "Pose ControlNet 2 Pass",
1508
- "mediaType": "image",
1509
- "mediaSubtype": "webp",
1510
- "description": "Generate images guided by pose references using ControlNet.",
1511
- "thumbnailVariant": "hoverDissolve",
1512
- "tags": ["ControlNet", "Image"],
1513
- "models": ["SD1.5"],
1514
- "date": "2025-03-01",
1515
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#pose-controlnet"
1516
- },
1517
- {
1518
- "name": "depth_controlnet",
1519
- "title": "Depth ControlNet",
1520
- "mediaType": "image",
1521
- "mediaSubtype": "webp",
1522
- "description": "Generate images guided by depth information using ControlNet.",
1523
- "thumbnailVariant": "hoverDissolve",
1524
- "tags": ["ControlNet", "Image"],
1525
- "models": ["SD1.5"],
1526
- "date": "2025-03-01",
1527
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
1528
- },
1529
- {
1530
- "name": "depth_t2i_adapter",
1531
- "title": "Depth T2I Adapter",
1532
- "mediaType": "image",
1533
- "mediaSubtype": "webp",
1534
- "description": "Generate images guided by depth information using T2I adapter.",
1535
- "thumbnailVariant": "hoverDissolve",
1536
- "tags": ["T2I Adapter", "Image"],
1537
- "models": ["SD1.5"],
1538
- "date": "2025-03-01",
1539
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets"
1540
- },
1541
- {
1542
- "name": "mixing_controlnets",
1543
- "title": "Mixing ControlNets",
1544
- "mediaType": "image",
1545
- "mediaSubtype": "webp",
1546
- "description": "Generate images by combining multiple ControlNet models.",
1547
- "thumbnailVariant": "hoverDissolve",
1548
- "tags": ["ControlNet", "Image"],
1549
- "models": ["SD1.5"],
1550
- "date": "2025-03-01",
1551
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets"
1552
- }
1553
- ]
1554
- },
1555
- {
1556
- "moduleName": "default",
1557
- "category": "TOOLS & BUILDING",
1558
- "title": "Area Composition",
1559
- "type": "image",
1560
- "templates": [
1561
- {
1562
- "name": "area_composition",
1563
- "title": "Area Composition",
1564
- "mediaType": "image",
1565
- "mediaSubtype": "webp",
1566
- "description": "Generate images by controlling composition with defined areas.",
1567
- "tags": ["Area Composition", "Image"],
1568
- "models": ["SD1.5"],
1569
- "date": "2025-03-01",
1570
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/"
1571
- },
1572
- {
1573
- "name": "area_composition_square_area_for_subject",
1574
- "title": "Area Composition Square Area for Subject",
1575
- "mediaType": "image",
1576
- "mediaSubtype": "webp",
1577
- "description": "Generate images with consistent subject placement using area composition.",
1578
- "tags": ["Area Composition", "Image"],
1579
- "models": ["SD1.5"],
1580
- "date": "2025-03-01",
1581
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/area_composition/#increasing-consistency-of-images-with-area-composition"
1582
- }
1583
- ]
1584
- },
1585
- {
1586
- "moduleName": "default",
1587
- "category": "USE CASES",
1588
- "title": "3D",
1589
- "type": "3d",
1590
- "templates": [
1591
- {
1592
- "name": "3d_hunyuan3d_image_to_model",
1593
- "title": "Hunyuan3D 2.0",
1594
- "mediaType": "image",
1595
- "mediaSubtype": "webp",
1596
- "description": "Generate 3D models from single images using Hunyuan3D 2.0.",
1597
- "tags": ["Image to Model", "3D"],
1598
- "models": ["Hunyuan3D 2.0"],
1599
- "date": "2025-03-01",
1600
- "tutorialUrl": ""
1601
- },
1602
- {
1603
- "name": "3d_hunyuan3d_multiview_to_model",
1604
- "title": "Hunyuan3D 2.0 MV",
1605
- "mediaType": "image",
1606
- "mediaSubtype": "webp",
1607
- "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV.",
1608
- "tags": ["Multiview to Model", "3D"],
1609
- "models": ["Hunyuan3D 2.0 MV"],
1610
- "date": "2025-03-01",
1611
- "tutorialUrl": "",
1612
- "thumbnailVariant": "hoverDissolve"
1613
- },
1614
- {
1615
- "name": "3d_hunyuan3d_multiview_to_model_turbo",
1616
- "title": "Hunyuan3D 2.0 MV Turbo",
1617
- "mediaType": "image",
1618
- "mediaSubtype": "webp",
1619
- "description": "Generate 3D models from multiple views using Hunyuan3D 2.0 MV Turbo.",
1620
- "tags": ["Multiview to Model", "3D"],
1621
- "models": ["Hunyuan3D 2.0 MV Turbo"],
1622
- "date": "2025-03-01",
1623
- "tutorialUrl": "",
1624
- "thumbnailVariant": "hoverDissolve"
1625
- },
1626
- {
1627
- "name": "stable_zero123_example",
1628
- "title": "Stable Zero123",
1629
- "mediaType": "image",
1630
- "mediaSubtype": "webp",
1631
- "description": "Generate 3D views from single images using Stable Zero123.",
1632
- "tags": ["Image to 3D", "3D"],
1633
- "models": ["Stable Zero123"],
1634
- "date": "2025-03-01",
1635
- "tutorialUrl": "https://comfyanonymous.github.io/ComfyUI_examples/3d/"
1636
- }
1637
- ]
1638
- }
1639
- ]