diffusers 0.27.1__py3-none-any.whl → 0.32.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (445) hide show
  1. diffusers/__init__.py +233 -6
  2. diffusers/callbacks.py +209 -0
  3. diffusers/commands/env.py +102 -6
  4. diffusers/configuration_utils.py +45 -16
  5. diffusers/dependency_versions_table.py +4 -3
  6. diffusers/image_processor.py +434 -110
  7. diffusers/loaders/__init__.py +42 -9
  8. diffusers/loaders/ip_adapter.py +626 -36
  9. diffusers/loaders/lora_base.py +900 -0
  10. diffusers/loaders/lora_conversion_utils.py +991 -125
  11. diffusers/loaders/lora_pipeline.py +3812 -0
  12. diffusers/loaders/peft.py +571 -7
  13. diffusers/loaders/single_file.py +405 -173
  14. diffusers/loaders/single_file_model.py +385 -0
  15. diffusers/loaders/single_file_utils.py +1783 -713
  16. diffusers/loaders/textual_inversion.py +41 -23
  17. diffusers/loaders/transformer_flux.py +181 -0
  18. diffusers/loaders/transformer_sd3.py +89 -0
  19. diffusers/loaders/unet.py +464 -540
  20. diffusers/loaders/unet_loader_utils.py +163 -0
  21. diffusers/models/__init__.py +76 -7
  22. diffusers/models/activations.py +65 -10
  23. diffusers/models/adapter.py +53 -53
  24. diffusers/models/attention.py +605 -18
  25. diffusers/models/attention_flax.py +1 -1
  26. diffusers/models/attention_processor.py +4304 -687
  27. diffusers/models/autoencoders/__init__.py +8 -0
  28. diffusers/models/autoencoders/autoencoder_asym_kl.py +15 -17
  29. diffusers/models/autoencoders/autoencoder_dc.py +620 -0
  30. diffusers/models/autoencoders/autoencoder_kl.py +110 -28
  31. diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
  32. diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1482 -0
  33. diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
  34. diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
  35. diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
  36. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +19 -24
  37. diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
  38. diffusers/models/autoencoders/autoencoder_tiny.py +21 -18
  39. diffusers/models/autoencoders/consistency_decoder_vae.py +45 -20
  40. diffusers/models/autoencoders/vae.py +41 -29
  41. diffusers/models/autoencoders/vq_model.py +182 -0
  42. diffusers/models/controlnet.py +47 -800
  43. diffusers/models/controlnet_flux.py +70 -0
  44. diffusers/models/controlnet_sd3.py +68 -0
  45. diffusers/models/controlnet_sparsectrl.py +116 -0
  46. diffusers/models/controlnets/__init__.py +23 -0
  47. diffusers/models/controlnets/controlnet.py +872 -0
  48. diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +9 -9
  49. diffusers/models/controlnets/controlnet_flux.py +536 -0
  50. diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
  51. diffusers/models/controlnets/controlnet_sd3.py +489 -0
  52. diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
  53. diffusers/models/controlnets/controlnet_union.py +832 -0
  54. diffusers/models/controlnets/controlnet_xs.py +1946 -0
  55. diffusers/models/controlnets/multicontrolnet.py +183 -0
  56. diffusers/models/downsampling.py +85 -18
  57. diffusers/models/embeddings.py +1856 -158
  58. diffusers/models/embeddings_flax.py +23 -9
  59. diffusers/models/model_loading_utils.py +480 -0
  60. diffusers/models/modeling_flax_pytorch_utils.py +2 -1
  61. diffusers/models/modeling_flax_utils.py +2 -7
  62. diffusers/models/modeling_outputs.py +14 -0
  63. diffusers/models/modeling_pytorch_flax_utils.py +1 -1
  64. diffusers/models/modeling_utils.py +611 -146
  65. diffusers/models/normalization.py +361 -20
  66. diffusers/models/resnet.py +18 -23
  67. diffusers/models/transformers/__init__.py +16 -0
  68. diffusers/models/transformers/auraflow_transformer_2d.py +544 -0
  69. diffusers/models/transformers/cogvideox_transformer_3d.py +542 -0
  70. diffusers/models/transformers/dit_transformer_2d.py +240 -0
  71. diffusers/models/transformers/dual_transformer_2d.py +9 -8
  72. diffusers/models/transformers/hunyuan_transformer_2d.py +578 -0
  73. diffusers/models/transformers/latte_transformer_3d.py +327 -0
  74. diffusers/models/transformers/lumina_nextdit2d.py +340 -0
  75. diffusers/models/transformers/pixart_transformer_2d.py +445 -0
  76. diffusers/models/transformers/prior_transformer.py +13 -13
  77. diffusers/models/transformers/sana_transformer.py +488 -0
  78. diffusers/models/transformers/stable_audio_transformer.py +458 -0
  79. diffusers/models/transformers/t5_film_transformer.py +17 -19
  80. diffusers/models/transformers/transformer_2d.py +297 -187
  81. diffusers/models/transformers/transformer_allegro.py +422 -0
  82. diffusers/models/transformers/transformer_cogview3plus.py +386 -0
  83. diffusers/models/transformers/transformer_flux.py +593 -0
  84. diffusers/models/transformers/transformer_hunyuan_video.py +791 -0
  85. diffusers/models/transformers/transformer_ltx.py +469 -0
  86. diffusers/models/transformers/transformer_mochi.py +499 -0
  87. diffusers/models/transformers/transformer_sd3.py +461 -0
  88. diffusers/models/transformers/transformer_temporal.py +21 -19
  89. diffusers/models/unets/unet_1d.py +8 -8
  90. diffusers/models/unets/unet_1d_blocks.py +31 -31
  91. diffusers/models/unets/unet_2d.py +17 -10
  92. diffusers/models/unets/unet_2d_blocks.py +225 -149
  93. diffusers/models/unets/unet_2d_condition.py +41 -40
  94. diffusers/models/unets/unet_2d_condition_flax.py +6 -5
  95. diffusers/models/unets/unet_3d_blocks.py +192 -1057
  96. diffusers/models/unets/unet_3d_condition.py +22 -27
  97. diffusers/models/unets/unet_i2vgen_xl.py +22 -18
  98. diffusers/models/unets/unet_kandinsky3.py +2 -2
  99. diffusers/models/unets/unet_motion_model.py +1413 -89
  100. diffusers/models/unets/unet_spatio_temporal_condition.py +40 -16
  101. diffusers/models/unets/unet_stable_cascade.py +19 -18
  102. diffusers/models/unets/uvit_2d.py +2 -2
  103. diffusers/models/upsampling.py +95 -26
  104. diffusers/models/vq_model.py +12 -164
  105. diffusers/optimization.py +1 -1
  106. diffusers/pipelines/__init__.py +202 -3
  107. diffusers/pipelines/allegro/__init__.py +48 -0
  108. diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
  109. diffusers/pipelines/allegro/pipeline_output.py +23 -0
  110. diffusers/pipelines/amused/pipeline_amused.py +12 -12
  111. diffusers/pipelines/amused/pipeline_amused_img2img.py +14 -12
  112. diffusers/pipelines/amused/pipeline_amused_inpaint.py +13 -11
  113. diffusers/pipelines/animatediff/__init__.py +8 -0
  114. diffusers/pipelines/animatediff/pipeline_animatediff.py +122 -109
  115. diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1106 -0
  116. diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +1288 -0
  117. diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1010 -0
  118. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +236 -180
  119. diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
  120. diffusers/pipelines/animatediff/pipeline_output.py +3 -2
  121. diffusers/pipelines/audioldm/pipeline_audioldm.py +14 -14
  122. diffusers/pipelines/audioldm2/modeling_audioldm2.py +58 -39
  123. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +121 -36
  124. diffusers/pipelines/aura_flow/__init__.py +48 -0
  125. diffusers/pipelines/aura_flow/pipeline_aura_flow.py +584 -0
  126. diffusers/pipelines/auto_pipeline.py +196 -28
  127. diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
  128. diffusers/pipelines/blip_diffusion/modeling_blip2.py +6 -6
  129. diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +1 -1
  130. diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
  131. diffusers/pipelines/cogvideo/__init__.py +54 -0
  132. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +772 -0
  133. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
  134. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +885 -0
  135. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +851 -0
  136. diffusers/pipelines/cogvideo/pipeline_output.py +20 -0
  137. diffusers/pipelines/cogview3/__init__.py +47 -0
  138. diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
  139. diffusers/pipelines/cogview3/pipeline_output.py +21 -0
  140. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +6 -6
  141. diffusers/pipelines/controlnet/__init__.py +86 -80
  142. diffusers/pipelines/controlnet/multicontrolnet.py +7 -182
  143. diffusers/pipelines/controlnet/pipeline_controlnet.py +134 -87
  144. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
  145. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +93 -77
  146. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +88 -197
  147. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +136 -90
  148. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +176 -80
  149. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +125 -89
  150. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
  151. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
  152. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
  153. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
  154. diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
  155. diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1060 -0
  156. diffusers/pipelines/controlnet_sd3/__init__.py +57 -0
  157. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1133 -0
  158. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
  159. diffusers/pipelines/controlnet_xs/__init__.py +68 -0
  160. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
  161. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
  162. diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
  163. diffusers/pipelines/deepfloyd_if/pipeline_if.py +16 -30
  164. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +20 -35
  165. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +23 -41
  166. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +22 -38
  167. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +25 -41
  168. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +19 -34
  169. diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
  170. diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
  171. diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +11 -11
  172. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +70 -30
  173. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +48 -25
  174. diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +2 -2
  175. diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +7 -7
  176. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +21 -20
  177. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +27 -29
  178. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +33 -27
  179. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +33 -23
  180. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +36 -30
  181. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +102 -69
  182. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +13 -13
  183. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +10 -5
  184. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +11 -6
  185. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +10 -5
  186. diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +5 -5
  187. diffusers/pipelines/dit/pipeline_dit.py +7 -4
  188. diffusers/pipelines/flux/__init__.py +69 -0
  189. diffusers/pipelines/flux/modeling_flux.py +47 -0
  190. diffusers/pipelines/flux/pipeline_flux.py +957 -0
  191. diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
  192. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
  193. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
  194. diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
  195. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
  196. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
  197. diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
  198. diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
  199. diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
  200. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
  201. diffusers/pipelines/flux/pipeline_output.py +37 -0
  202. diffusers/pipelines/free_init_utils.py +41 -38
  203. diffusers/pipelines/free_noise_utils.py +596 -0
  204. diffusers/pipelines/hunyuan_video/__init__.py +48 -0
  205. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
  206. diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
  207. diffusers/pipelines/hunyuandit/__init__.py +48 -0
  208. diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +916 -0
  209. diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +33 -48
  210. diffusers/pipelines/kandinsky/pipeline_kandinsky.py +8 -8
  211. diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +32 -29
  212. diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +11 -11
  213. diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +12 -12
  214. diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +10 -10
  215. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
  216. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +34 -31
  217. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +10 -10
  218. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +10 -10
  219. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +6 -6
  220. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +8 -8
  221. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +7 -7
  222. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +6 -6
  223. diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +3 -3
  224. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +22 -35
  225. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +26 -37
  226. diffusers/pipelines/kolors/__init__.py +54 -0
  227. diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
  228. diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1250 -0
  229. diffusers/pipelines/kolors/pipeline_output.py +21 -0
  230. diffusers/pipelines/kolors/text_encoder.py +889 -0
  231. diffusers/pipelines/kolors/tokenizer.py +338 -0
  232. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +82 -62
  233. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +77 -60
  234. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +12 -12
  235. diffusers/pipelines/latte/__init__.py +48 -0
  236. diffusers/pipelines/latte/pipeline_latte.py +881 -0
  237. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +80 -74
  238. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +85 -76
  239. diffusers/pipelines/ledits_pp/pipeline_output.py +2 -2
  240. diffusers/pipelines/ltx/__init__.py +50 -0
  241. diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
  242. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
  243. diffusers/pipelines/ltx/pipeline_output.py +20 -0
  244. diffusers/pipelines/lumina/__init__.py +48 -0
  245. diffusers/pipelines/lumina/pipeline_lumina.py +890 -0
  246. diffusers/pipelines/marigold/__init__.py +50 -0
  247. diffusers/pipelines/marigold/marigold_image_processing.py +576 -0
  248. diffusers/pipelines/marigold/pipeline_marigold_depth.py +813 -0
  249. diffusers/pipelines/marigold/pipeline_marigold_normals.py +690 -0
  250. diffusers/pipelines/mochi/__init__.py +48 -0
  251. diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
  252. diffusers/pipelines/mochi/pipeline_output.py +20 -0
  253. diffusers/pipelines/musicldm/pipeline_musicldm.py +14 -14
  254. diffusers/pipelines/pag/__init__.py +80 -0
  255. diffusers/pipelines/pag/pag_utils.py +243 -0
  256. diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1328 -0
  257. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
  258. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1610 -0
  259. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
  260. diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +969 -0
  261. diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
  262. diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +865 -0
  263. diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
  264. diffusers/pipelines/pag/pipeline_pag_sd.py +1062 -0
  265. diffusers/pipelines/pag/pipeline_pag_sd_3.py +994 -0
  266. diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
  267. diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +866 -0
  268. diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
  269. diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
  270. diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1345 -0
  271. diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1544 -0
  272. diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1776 -0
  273. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +17 -12
  274. diffusers/pipelines/pia/pipeline_pia.py +74 -164
  275. diffusers/pipelines/pipeline_flax_utils.py +5 -10
  276. diffusers/pipelines/pipeline_loading_utils.py +515 -53
  277. diffusers/pipelines/pipeline_utils.py +411 -222
  278. diffusers/pipelines/pixart_alpha/__init__.py +8 -1
  279. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +76 -93
  280. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +873 -0
  281. diffusers/pipelines/sana/__init__.py +47 -0
  282. diffusers/pipelines/sana/pipeline_output.py +21 -0
  283. diffusers/pipelines/sana/pipeline_sana.py +884 -0
  284. diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +27 -23
  285. diffusers/pipelines/shap_e/pipeline_shap_e.py +3 -3
  286. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +14 -14
  287. diffusers/pipelines/shap_e/renderer.py +1 -1
  288. diffusers/pipelines/stable_audio/__init__.py +50 -0
  289. diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
  290. diffusers/pipelines/stable_audio/pipeline_stable_audio.py +756 -0
  291. diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +71 -25
  292. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +23 -19
  293. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +35 -34
  294. diffusers/pipelines/stable_diffusion/__init__.py +0 -1
  295. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +20 -11
  296. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
  297. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
  298. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +6 -6
  299. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +145 -79
  300. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +43 -28
  301. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +13 -8
  302. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +100 -68
  303. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +109 -201
  304. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +131 -32
  305. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +247 -87
  306. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +30 -29
  307. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +35 -27
  308. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +49 -42
  309. diffusers/pipelines/stable_diffusion/safety_checker.py +2 -1
  310. diffusers/pipelines/stable_diffusion_3/__init__.py +54 -0
  311. diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
  312. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +1140 -0
  313. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +1036 -0
  314. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1250 -0
  315. diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +29 -20
  316. diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +59 -58
  317. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +31 -25
  318. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +38 -22
  319. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -24
  320. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +24 -23
  321. diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +107 -67
  322. diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +316 -69
  323. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +10 -5
  324. diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
  325. diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +98 -30
  326. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +121 -83
  327. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +161 -105
  328. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +142 -218
  329. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +45 -29
  330. diffusers/pipelines/stable_diffusion_xl/watermark.py +9 -3
  331. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +110 -57
  332. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +69 -39
  333. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +105 -74
  334. diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +3 -2
  335. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +29 -49
  336. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +32 -93
  337. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +37 -25
  338. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +54 -40
  339. diffusers/pipelines/unclip/pipeline_unclip.py +6 -6
  340. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +6 -6
  341. diffusers/pipelines/unidiffuser/modeling_text_decoder.py +1 -1
  342. diffusers/pipelines/unidiffuser/modeling_uvit.py +12 -12
  343. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +29 -28
  344. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +5 -5
  345. diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +5 -10
  346. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +6 -8
  347. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +4 -4
  348. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +12 -12
  349. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +15 -14
  350. diffusers/{models/dual_transformer_2d.py → quantizers/__init__.py} +2 -6
  351. diffusers/quantizers/auto.py +139 -0
  352. diffusers/quantizers/base.py +233 -0
  353. diffusers/quantizers/bitsandbytes/__init__.py +2 -0
  354. diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
  355. diffusers/quantizers/bitsandbytes/utils.py +306 -0
  356. diffusers/quantizers/gguf/__init__.py +1 -0
  357. diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
  358. diffusers/quantizers/gguf/utils.py +456 -0
  359. diffusers/quantizers/quantization_config.py +669 -0
  360. diffusers/quantizers/torchao/__init__.py +15 -0
  361. diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
  362. diffusers/schedulers/__init__.py +12 -2
  363. diffusers/schedulers/deprecated/__init__.py +1 -1
  364. diffusers/schedulers/deprecated/scheduling_karras_ve.py +25 -25
  365. diffusers/schedulers/scheduling_amused.py +5 -5
  366. diffusers/schedulers/scheduling_consistency_decoder.py +11 -11
  367. diffusers/schedulers/scheduling_consistency_models.py +23 -25
  368. diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
  369. diffusers/schedulers/scheduling_ddim.py +27 -26
  370. diffusers/schedulers/scheduling_ddim_cogvideox.py +452 -0
  371. diffusers/schedulers/scheduling_ddim_flax.py +2 -1
  372. diffusers/schedulers/scheduling_ddim_inverse.py +16 -16
  373. diffusers/schedulers/scheduling_ddim_parallel.py +32 -31
  374. diffusers/schedulers/scheduling_ddpm.py +27 -30
  375. diffusers/schedulers/scheduling_ddpm_flax.py +7 -3
  376. diffusers/schedulers/scheduling_ddpm_parallel.py +33 -36
  377. diffusers/schedulers/scheduling_ddpm_wuerstchen.py +14 -14
  378. diffusers/schedulers/scheduling_deis_multistep.py +150 -50
  379. diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
  380. diffusers/schedulers/scheduling_dpmsolver_multistep.py +221 -84
  381. diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +2 -2
  382. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +158 -52
  383. diffusers/schedulers/scheduling_dpmsolver_sde.py +153 -34
  384. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +275 -86
  385. diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +81 -57
  386. diffusers/schedulers/scheduling_edm_euler.py +62 -39
  387. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +30 -29
  388. diffusers/schedulers/scheduling_euler_discrete.py +255 -74
  389. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +458 -0
  390. diffusers/schedulers/scheduling_flow_match_heun_discrete.py +320 -0
  391. diffusers/schedulers/scheduling_heun_discrete.py +174 -46
  392. diffusers/schedulers/scheduling_ipndm.py +9 -9
  393. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +138 -29
  394. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +132 -26
  395. diffusers/schedulers/scheduling_karras_ve_flax.py +6 -6
  396. diffusers/schedulers/scheduling_lcm.py +23 -29
  397. diffusers/schedulers/scheduling_lms_discrete.py +105 -28
  398. diffusers/schedulers/scheduling_pndm.py +20 -20
  399. diffusers/schedulers/scheduling_repaint.py +21 -21
  400. diffusers/schedulers/scheduling_sasolver.py +157 -60
  401. diffusers/schedulers/scheduling_sde_ve.py +19 -19
  402. diffusers/schedulers/scheduling_tcd.py +41 -36
  403. diffusers/schedulers/scheduling_unclip.py +19 -16
  404. diffusers/schedulers/scheduling_unipc_multistep.py +243 -47
  405. diffusers/schedulers/scheduling_utils.py +12 -5
  406. diffusers/schedulers/scheduling_utils_flax.py +1 -3
  407. diffusers/schedulers/scheduling_vq_diffusion.py +10 -10
  408. diffusers/training_utils.py +214 -30
  409. diffusers/utils/__init__.py +17 -1
  410. diffusers/utils/constants.py +3 -0
  411. diffusers/utils/doc_utils.py +1 -0
  412. diffusers/utils/dummy_pt_objects.py +592 -7
  413. diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
  414. diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
  415. diffusers/utils/dummy_torch_and_transformers_objects.py +1001 -71
  416. diffusers/utils/dynamic_modules_utils.py +34 -29
  417. diffusers/utils/export_utils.py +50 -6
  418. diffusers/utils/hub_utils.py +131 -17
  419. diffusers/utils/import_utils.py +210 -8
  420. diffusers/utils/loading_utils.py +118 -5
  421. diffusers/utils/logging.py +4 -2
  422. diffusers/utils/peft_utils.py +37 -7
  423. diffusers/utils/state_dict_utils.py +13 -2
  424. diffusers/utils/testing_utils.py +193 -11
  425. diffusers/utils/torch_utils.py +4 -0
  426. diffusers/video_processor.py +113 -0
  427. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/METADATA +82 -91
  428. diffusers-0.32.2.dist-info/RECORD +550 -0
  429. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/WHEEL +1 -1
  430. diffusers/loaders/autoencoder.py +0 -146
  431. diffusers/loaders/controlnet.py +0 -136
  432. diffusers/loaders/lora.py +0 -1349
  433. diffusers/models/prior_transformer.py +0 -12
  434. diffusers/models/t5_film_transformer.py +0 -70
  435. diffusers/models/transformer_2d.py +0 -25
  436. diffusers/models/transformer_temporal.py +0 -34
  437. diffusers/models/unet_1d.py +0 -26
  438. diffusers/models/unet_1d_blocks.py +0 -203
  439. diffusers/models/unet_2d.py +0 -27
  440. diffusers/models/unet_2d_blocks.py +0 -375
  441. diffusers/models/unet_2d_condition.py +0 -25
  442. diffusers-0.27.1.dist-info/RECORD +0 -399
  443. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/LICENSE +0 -0
  444. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/entry_points.txt +0 -0
  445. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,596 @@
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Callable, Dict, List, Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+
20
+ from ..models.attention import BasicTransformerBlock, FreeNoiseTransformerBlock
21
+ from ..models.resnet import Downsample2D, ResnetBlock2D, Upsample2D
22
+ from ..models.transformers.transformer_2d import Transformer2DModel
23
+ from ..models.unets.unet_motion_model import (
24
+ AnimateDiffTransformer3D,
25
+ CrossAttnDownBlockMotion,
26
+ DownBlockMotion,
27
+ UpBlockMotion,
28
+ )
29
+ from ..pipelines.pipeline_utils import DiffusionPipeline
30
+ from ..utils import logging
31
+ from ..utils.torch_utils import randn_tensor
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ class SplitInferenceModule(nn.Module):
38
+ r"""
39
+ A wrapper module class that splits inputs along a specified dimension before performing a forward pass.
40
+
41
+ This module is useful when you need to perform inference on large tensors in a memory-efficient way by breaking
42
+ them into smaller chunks, processing each chunk separately, and then reassembling the results.
43
+
44
+ Args:
45
+ module (`nn.Module`):
46
+ The underlying PyTorch module that will be applied to each chunk of split inputs.
47
+ split_size (`int`, defaults to `1`):
48
+ The size of each chunk after splitting the input tensor.
49
+ split_dim (`int`, defaults to `0`):
50
+ The dimension along which the input tensors are split.
51
+ input_kwargs_to_split (`List[str]`, defaults to `["hidden_states"]`):
52
+ A list of keyword arguments (strings) that represent the input tensors to be split.
53
+
54
+ Workflow:
55
+ 1. The keyword arguments specified in `input_kwargs_to_split` are split into smaller chunks using
56
+ `torch.split()` along the dimension `split_dim` and with a chunk size of `split_size`.
57
+ 2. The `module` is invoked once for each split with both the split inputs and any unchanged arguments
58
+ that were passed.
59
+ 3. The output tensors from each split are concatenated back together along `split_dim` before returning.
60
+
61
+ Example:
62
+ ```python
63
+ >>> import torch
64
+ >>> import torch.nn as nn
65
+
66
+ >>> model = nn.Linear(1000, 1000)
67
+ >>> split_module = SplitInferenceModule(model, split_size=2, split_dim=0, input_kwargs_to_split=["input"])
68
+
69
+ >>> input_tensor = torch.randn(42, 1000)
70
+ >>> # Will split the tensor into 21 slices of shape [2, 1000].
71
+ >>> output = split_module(input=input_tensor)
72
+ ```
73
+
74
+ It is also possible to nest `SplitInferenceModule` across different split dimensions for more complex
75
+ multi-dimensional splitting.
76
+ """
77
+
78
+ def __init__(
79
+ self,
80
+ module: nn.Module,
81
+ split_size: int = 1,
82
+ split_dim: int = 0,
83
+ input_kwargs_to_split: List[str] = ["hidden_states"],
84
+ ) -> None:
85
+ super().__init__()
86
+
87
+ self.module = module
88
+ self.split_size = split_size
89
+ self.split_dim = split_dim
90
+ self.input_kwargs_to_split = set(input_kwargs_to_split)
91
+
92
+ def forward(self, *args, **kwargs) -> Union[torch.Tensor, Tuple[torch.Tensor]]:
93
+ r"""Forward method for the `SplitInferenceModule`.
94
+
95
+ This method processes the input by splitting specified keyword arguments along a given dimension, running the
96
+ underlying module on each split, and then concatenating the results. The splitting is controlled by the
97
+ `split_size` and `split_dim` parameters specified during initialization.
98
+
99
+ Args:
100
+ *args (`Any`):
101
+ Positional arguments that are passed directly to the `module` without modification.
102
+ **kwargs (`Dict[str, torch.Tensor]`):
103
+ Keyword arguments passed to the underlying `module`. Only keyword arguments whose names match the
104
+ entries in `input_kwargs_to_split` and are of type `torch.Tensor` will be split. The remaining keyword
105
+ arguments are passed unchanged.
106
+
107
+ Returns:
108
+ `Union[torch.Tensor, Tuple[torch.Tensor]]`:
109
+ The outputs obtained from `SplitInferenceModule` are the same as if the underlying module was inferred
110
+ without it.
111
+ - If the underlying module returns a single tensor, the result will be a single concatenated tensor
112
+ along the same `split_dim` after processing all splits.
113
+ - If the underlying module returns a tuple of tensors, each element of the tuple will be concatenated
114
+ along the `split_dim` across all splits, and the final result will be a tuple of concatenated tensors.
115
+ """
116
+ split_inputs = {}
117
+
118
+ # 1. Split inputs that were specified during initialization and also present in passed kwargs
119
+ for key in list(kwargs.keys()):
120
+ if key not in self.input_kwargs_to_split or not torch.is_tensor(kwargs[key]):
121
+ continue
122
+ split_inputs[key] = torch.split(kwargs[key], self.split_size, self.split_dim)
123
+ kwargs.pop(key)
124
+
125
+ # 2. Invoke forward pass across each split
126
+ results = []
127
+ for split_input in zip(*split_inputs.values()):
128
+ inputs = dict(zip(split_inputs.keys(), split_input))
129
+ inputs.update(kwargs)
130
+
131
+ intermediate_tensor_or_tensor_tuple = self.module(*args, **inputs)
132
+ results.append(intermediate_tensor_or_tensor_tuple)
133
+
134
+ # 3. Concatenate split restuls to obtain final outputs
135
+ if isinstance(results[0], torch.Tensor):
136
+ return torch.cat(results, dim=self.split_dim)
137
+ elif isinstance(results[0], tuple):
138
+ return tuple([torch.cat(x, dim=self.split_dim) for x in zip(*results)])
139
+ else:
140
+ raise ValueError(
141
+ "In order to use the SplitInferenceModule, it is necessary for the underlying `module` to either return a torch.Tensor or a tuple of torch.Tensor's."
142
+ )
143
+
144
+
145
+ class AnimateDiffFreeNoiseMixin:
146
+ r"""Mixin class for [FreeNoise](https://arxiv.org/abs/2310.15169)."""
147
+
148
+ def _enable_free_noise_in_block(self, block: Union[CrossAttnDownBlockMotion, DownBlockMotion, UpBlockMotion]):
149
+ r"""Helper function to enable FreeNoise in transformer blocks."""
150
+
151
+ for motion_module in block.motion_modules:
152
+ num_transformer_blocks = len(motion_module.transformer_blocks)
153
+
154
+ for i in range(num_transformer_blocks):
155
+ if isinstance(motion_module.transformer_blocks[i], FreeNoiseTransformerBlock):
156
+ motion_module.transformer_blocks[i].set_free_noise_properties(
157
+ self._free_noise_context_length,
158
+ self._free_noise_context_stride,
159
+ self._free_noise_weighting_scheme,
160
+ )
161
+ else:
162
+ assert isinstance(motion_module.transformer_blocks[i], BasicTransformerBlock)
163
+ basic_transfomer_block = motion_module.transformer_blocks[i]
164
+
165
+ motion_module.transformer_blocks[i] = FreeNoiseTransformerBlock(
166
+ dim=basic_transfomer_block.dim,
167
+ num_attention_heads=basic_transfomer_block.num_attention_heads,
168
+ attention_head_dim=basic_transfomer_block.attention_head_dim,
169
+ dropout=basic_transfomer_block.dropout,
170
+ cross_attention_dim=basic_transfomer_block.cross_attention_dim,
171
+ activation_fn=basic_transfomer_block.activation_fn,
172
+ attention_bias=basic_transfomer_block.attention_bias,
173
+ only_cross_attention=basic_transfomer_block.only_cross_attention,
174
+ double_self_attention=basic_transfomer_block.double_self_attention,
175
+ positional_embeddings=basic_transfomer_block.positional_embeddings,
176
+ num_positional_embeddings=basic_transfomer_block.num_positional_embeddings,
177
+ context_length=self._free_noise_context_length,
178
+ context_stride=self._free_noise_context_stride,
179
+ weighting_scheme=self._free_noise_weighting_scheme,
180
+ ).to(device=self.device, dtype=self.dtype)
181
+
182
+ motion_module.transformer_blocks[i].load_state_dict(
183
+ basic_transfomer_block.state_dict(), strict=True
184
+ )
185
+ motion_module.transformer_blocks[i].set_chunk_feed_forward(
186
+ basic_transfomer_block._chunk_size, basic_transfomer_block._chunk_dim
187
+ )
188
+
189
+ def _disable_free_noise_in_block(self, block: Union[CrossAttnDownBlockMotion, DownBlockMotion, UpBlockMotion]):
190
+ r"""Helper function to disable FreeNoise in transformer blocks."""
191
+
192
+ for motion_module in block.motion_modules:
193
+ num_transformer_blocks = len(motion_module.transformer_blocks)
194
+
195
+ for i in range(num_transformer_blocks):
196
+ if isinstance(motion_module.transformer_blocks[i], FreeNoiseTransformerBlock):
197
+ free_noise_transfomer_block = motion_module.transformer_blocks[i]
198
+
199
+ motion_module.transformer_blocks[i] = BasicTransformerBlock(
200
+ dim=free_noise_transfomer_block.dim,
201
+ num_attention_heads=free_noise_transfomer_block.num_attention_heads,
202
+ attention_head_dim=free_noise_transfomer_block.attention_head_dim,
203
+ dropout=free_noise_transfomer_block.dropout,
204
+ cross_attention_dim=free_noise_transfomer_block.cross_attention_dim,
205
+ activation_fn=free_noise_transfomer_block.activation_fn,
206
+ attention_bias=free_noise_transfomer_block.attention_bias,
207
+ only_cross_attention=free_noise_transfomer_block.only_cross_attention,
208
+ double_self_attention=free_noise_transfomer_block.double_self_attention,
209
+ positional_embeddings=free_noise_transfomer_block.positional_embeddings,
210
+ num_positional_embeddings=free_noise_transfomer_block.num_positional_embeddings,
211
+ ).to(device=self.device, dtype=self.dtype)
212
+
213
+ motion_module.transformer_blocks[i].load_state_dict(
214
+ free_noise_transfomer_block.state_dict(), strict=True
215
+ )
216
+ motion_module.transformer_blocks[i].set_chunk_feed_forward(
217
+ free_noise_transfomer_block._chunk_size, free_noise_transfomer_block._chunk_dim
218
+ )
219
+
220
+ def _check_inputs_free_noise(
221
+ self,
222
+ prompt,
223
+ negative_prompt,
224
+ prompt_embeds,
225
+ negative_prompt_embeds,
226
+ num_frames,
227
+ ) -> None:
228
+ if not isinstance(prompt, (str, dict)):
229
+ raise ValueError(f"Expected `prompt` to have type `str` or `dict` but found {type(prompt)=}")
230
+
231
+ if negative_prompt is not None:
232
+ if not isinstance(negative_prompt, (str, dict)):
233
+ raise ValueError(
234
+ f"Expected `negative_prompt` to have type `str` or `dict` but found {type(negative_prompt)=}"
235
+ )
236
+
237
+ if prompt_embeds is not None or negative_prompt_embeds is not None:
238
+ raise ValueError("`prompt_embeds` and `negative_prompt_embeds` is not supported in FreeNoise yet.")
239
+
240
+ frame_indices = [isinstance(x, int) for x in prompt.keys()]
241
+ frame_prompts = [isinstance(x, str) for x in prompt.values()]
242
+ min_frame = min(list(prompt.keys()))
243
+ max_frame = max(list(prompt.keys()))
244
+
245
+ if not all(frame_indices):
246
+ raise ValueError("Expected integer keys in `prompt` dict for FreeNoise.")
247
+ if not all(frame_prompts):
248
+ raise ValueError("Expected str values in `prompt` dict for FreeNoise.")
249
+ if min_frame != 0:
250
+ raise ValueError("The minimum frame index in `prompt` dict must be 0 as a starting prompt is necessary.")
251
+ if max_frame >= num_frames:
252
+ raise ValueError(
253
+ f"The maximum frame index in `prompt` dict must be lesser than {num_frames=} and follow 0-based indexing."
254
+ )
255
+
256
+ def _encode_prompt_free_noise(
257
+ self,
258
+ prompt: Union[str, Dict[int, str]],
259
+ num_frames: int,
260
+ device: torch.device,
261
+ num_videos_per_prompt: int,
262
+ do_classifier_free_guidance: bool,
263
+ negative_prompt: Optional[Union[str, Dict[int, str]]] = None,
264
+ prompt_embeds: Optional[torch.Tensor] = None,
265
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
266
+ lora_scale: Optional[float] = None,
267
+ clip_skip: Optional[int] = None,
268
+ ) -> torch.Tensor:
269
+ if negative_prompt is None:
270
+ negative_prompt = ""
271
+
272
+ # Ensure that we have a dictionary of prompts
273
+ if isinstance(prompt, str):
274
+ prompt = {0: prompt}
275
+ if isinstance(negative_prompt, str):
276
+ negative_prompt = {0: negative_prompt}
277
+
278
+ self._check_inputs_free_noise(prompt, negative_prompt, prompt_embeds, negative_prompt_embeds, num_frames)
279
+
280
+ # Sort the prompts based on frame indices
281
+ prompt = dict(sorted(prompt.items()))
282
+ negative_prompt = dict(sorted(negative_prompt.items()))
283
+
284
+ # Ensure that we have a prompt for the last frame index
285
+ prompt[num_frames - 1] = prompt[list(prompt.keys())[-1]]
286
+ negative_prompt[num_frames - 1] = negative_prompt[list(negative_prompt.keys())[-1]]
287
+
288
+ frame_indices = list(prompt.keys())
289
+ frame_prompts = list(prompt.values())
290
+ frame_negative_indices = list(negative_prompt.keys())
291
+ frame_negative_prompts = list(negative_prompt.values())
292
+
293
+ # Generate and interpolate positive prompts
294
+ prompt_embeds, _ = self.encode_prompt(
295
+ prompt=frame_prompts,
296
+ device=device,
297
+ num_images_per_prompt=num_videos_per_prompt,
298
+ do_classifier_free_guidance=False,
299
+ negative_prompt=None,
300
+ prompt_embeds=None,
301
+ negative_prompt_embeds=None,
302
+ lora_scale=lora_scale,
303
+ clip_skip=clip_skip,
304
+ )
305
+
306
+ shape = (num_frames, *prompt_embeds.shape[1:])
307
+ prompt_interpolation_embeds = prompt_embeds.new_zeros(shape)
308
+
309
+ for i in range(len(frame_indices) - 1):
310
+ start_frame = frame_indices[i]
311
+ end_frame = frame_indices[i + 1]
312
+ start_tensor = prompt_embeds[i].unsqueeze(0)
313
+ end_tensor = prompt_embeds[i + 1].unsqueeze(0)
314
+
315
+ prompt_interpolation_embeds[start_frame : end_frame + 1] = self._free_noise_prompt_interpolation_callback(
316
+ start_frame, end_frame, start_tensor, end_tensor
317
+ )
318
+
319
+ # Generate and interpolate negative prompts
320
+ negative_prompt_embeds = None
321
+ negative_prompt_interpolation_embeds = None
322
+
323
+ if do_classifier_free_guidance:
324
+ _, negative_prompt_embeds = self.encode_prompt(
325
+ prompt=[""] * len(frame_negative_prompts),
326
+ device=device,
327
+ num_images_per_prompt=num_videos_per_prompt,
328
+ do_classifier_free_guidance=True,
329
+ negative_prompt=frame_negative_prompts,
330
+ prompt_embeds=None,
331
+ negative_prompt_embeds=None,
332
+ lora_scale=lora_scale,
333
+ clip_skip=clip_skip,
334
+ )
335
+
336
+ negative_prompt_interpolation_embeds = negative_prompt_embeds.new_zeros(shape)
337
+
338
+ for i in range(len(frame_negative_indices) - 1):
339
+ start_frame = frame_negative_indices[i]
340
+ end_frame = frame_negative_indices[i + 1]
341
+ start_tensor = negative_prompt_embeds[i].unsqueeze(0)
342
+ end_tensor = negative_prompt_embeds[i + 1].unsqueeze(0)
343
+
344
+ negative_prompt_interpolation_embeds[
345
+ start_frame : end_frame + 1
346
+ ] = self._free_noise_prompt_interpolation_callback(start_frame, end_frame, start_tensor, end_tensor)
347
+
348
+ prompt_embeds = prompt_interpolation_embeds
349
+ negative_prompt_embeds = negative_prompt_interpolation_embeds
350
+
351
+ if do_classifier_free_guidance:
352
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
353
+
354
+ return prompt_embeds, negative_prompt_embeds
355
+
356
+ def _prepare_latents_free_noise(
357
+ self,
358
+ batch_size: int,
359
+ num_channels_latents: int,
360
+ num_frames: int,
361
+ height: int,
362
+ width: int,
363
+ dtype: torch.dtype,
364
+ device: torch.device,
365
+ generator: Optional[torch.Generator] = None,
366
+ latents: Optional[torch.Tensor] = None,
367
+ ):
368
+ if isinstance(generator, list) and len(generator) != batch_size:
369
+ raise ValueError(
370
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
371
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
372
+ )
373
+
374
+ context_num_frames = (
375
+ self._free_noise_context_length if self._free_noise_context_length == "repeat_context" else num_frames
376
+ )
377
+
378
+ shape = (
379
+ batch_size,
380
+ num_channels_latents,
381
+ context_num_frames,
382
+ height // self.vae_scale_factor,
383
+ width // self.vae_scale_factor,
384
+ )
385
+
386
+ if latents is None:
387
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
388
+ if self._free_noise_noise_type == "random":
389
+ return latents
390
+ else:
391
+ if latents.size(2) == num_frames:
392
+ return latents
393
+ elif latents.size(2) != self._free_noise_context_length:
394
+ raise ValueError(
395
+ f"You have passed `latents` as a parameter to FreeNoise. The expected number of frames is either {num_frames} or {self._free_noise_context_length}, but found {latents.size(2)}"
396
+ )
397
+ latents = latents.to(device)
398
+
399
+ if self._free_noise_noise_type == "shuffle_context":
400
+ for i in range(self._free_noise_context_length, num_frames, self._free_noise_context_stride):
401
+ # ensure window is within bounds
402
+ window_start = max(0, i - self._free_noise_context_length)
403
+ window_end = min(num_frames, window_start + self._free_noise_context_stride)
404
+ window_length = window_end - window_start
405
+
406
+ if window_length == 0:
407
+ break
408
+
409
+ indices = torch.LongTensor(list(range(window_start, window_end)))
410
+ shuffled_indices = indices[torch.randperm(window_length, generator=generator)]
411
+
412
+ current_start = i
413
+ current_end = min(num_frames, current_start + window_length)
414
+ if current_end == current_start + window_length:
415
+ # batch of frames perfectly fits the window
416
+ latents[:, :, current_start:current_end] = latents[:, :, shuffled_indices]
417
+ else:
418
+ # handle the case where the last batch of frames does not fit perfectly with the window
419
+ prefix_length = current_end - current_start
420
+ shuffled_indices = shuffled_indices[:prefix_length]
421
+ latents[:, :, current_start:current_end] = latents[:, :, shuffled_indices]
422
+
423
+ elif self._free_noise_noise_type == "repeat_context":
424
+ num_repeats = (num_frames + self._free_noise_context_length - 1) // self._free_noise_context_length
425
+ latents = torch.cat([latents] * num_repeats, dim=2)
426
+
427
+ latents = latents[:, :, :num_frames]
428
+ return latents
429
+
430
+ def _lerp(
431
+ self, start_index: int, end_index: int, start_tensor: torch.Tensor, end_tensor: torch.Tensor
432
+ ) -> torch.Tensor:
433
+ num_indices = end_index - start_index + 1
434
+ interpolated_tensors = []
435
+
436
+ for i in range(num_indices):
437
+ alpha = i / (num_indices - 1)
438
+ interpolated_tensor = (1 - alpha) * start_tensor + alpha * end_tensor
439
+ interpolated_tensors.append(interpolated_tensor)
440
+
441
+ interpolated_tensors = torch.cat(interpolated_tensors)
442
+ return interpolated_tensors
443
+
444
+ def enable_free_noise(
445
+ self,
446
+ context_length: Optional[int] = 16,
447
+ context_stride: int = 4,
448
+ weighting_scheme: str = "pyramid",
449
+ noise_type: str = "shuffle_context",
450
+ prompt_interpolation_callback: Optional[
451
+ Callable[[DiffusionPipeline, int, int, torch.Tensor, torch.Tensor], torch.Tensor]
452
+ ] = None,
453
+ ) -> None:
454
+ r"""
455
+ Enable long video generation using FreeNoise.
456
+
457
+ Args:
458
+ context_length (`int`, defaults to `16`, *optional*):
459
+ The number of video frames to process at once. It's recommended to set this to the maximum frames the
460
+ Motion Adapter was trained with (usually 16/24/32). If `None`, the default value from the motion
461
+ adapter config is used.
462
+ context_stride (`int`, *optional*):
463
+ Long videos are generated by processing many frames. FreeNoise processes these frames in sliding
464
+ windows of size `context_length`. Context stride allows you to specify how many frames to skip between
465
+ each window. For example, a context length of 16 and context stride of 4 would process 24 frames as:
466
+ [0, 15], [4, 19], [8, 23] (0-based indexing)
467
+ weighting_scheme (`str`, defaults to `pyramid`):
468
+ Weighting scheme for averaging latents after accumulation in FreeNoise blocks. The following weighting
469
+ schemes are supported currently:
470
+ - "flat"
471
+ Performs weighting averaging with a flat weight pattern: [1, 1, 1, 1, 1].
472
+ - "pyramid"
473
+ Performs weighted averaging with a pyramid like weight pattern: [1, 2, 3, 2, 1].
474
+ - "delayed_reverse_sawtooth"
475
+ Performs weighted averaging with low weights for earlier frames and high-to-low weights for
476
+ later frames: [0.01, 0.01, 3, 2, 1].
477
+ noise_type (`str`, defaults to "shuffle_context"):
478
+ Must be one of ["shuffle_context", "repeat_context", "random"].
479
+ - "shuffle_context"
480
+ Shuffles a fixed batch of `context_length` latents to create a final latent of size
481
+ `num_frames`. This is usually the best setting for most generation scenarious. However, there
482
+ might be visible repetition noticeable in the kinds of motion/animation generated.
483
+ - "repeated_context"
484
+ Repeats a fixed batch of `context_length` latents to create a final latent of size
485
+ `num_frames`.
486
+ - "random"
487
+ The final latents are random without any repetition.
488
+ """
489
+
490
+ allowed_weighting_scheme = ["flat", "pyramid", "delayed_reverse_sawtooth"]
491
+ allowed_noise_type = ["shuffle_context", "repeat_context", "random"]
492
+
493
+ if context_length > self.motion_adapter.config.motion_max_seq_length:
494
+ logger.warning(
495
+ f"You have set {context_length=} which is greater than {self.motion_adapter.config.motion_max_seq_length=}. This can lead to bad generation results."
496
+ )
497
+ if weighting_scheme not in allowed_weighting_scheme:
498
+ raise ValueError(
499
+ f"The parameter `weighting_scheme` must be one of {allowed_weighting_scheme}, but got {weighting_scheme=}"
500
+ )
501
+ if noise_type not in allowed_noise_type:
502
+ raise ValueError(f"The parameter `noise_type` must be one of {allowed_noise_type}, but got {noise_type=}")
503
+
504
+ self._free_noise_context_length = context_length or self.motion_adapter.config.motion_max_seq_length
505
+ self._free_noise_context_stride = context_stride
506
+ self._free_noise_weighting_scheme = weighting_scheme
507
+ self._free_noise_noise_type = noise_type
508
+ self._free_noise_prompt_interpolation_callback = prompt_interpolation_callback or self._lerp
509
+
510
+ if hasattr(self.unet.mid_block, "motion_modules"):
511
+ blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks]
512
+ else:
513
+ blocks = [*self.unet.down_blocks, *self.unet.up_blocks]
514
+
515
+ for block in blocks:
516
+ self._enable_free_noise_in_block(block)
517
+
518
+ def disable_free_noise(self) -> None:
519
+ r"""Disable the FreeNoise sampling mechanism."""
520
+ self._free_noise_context_length = None
521
+
522
+ if hasattr(self.unet.mid_block, "motion_modules"):
523
+ blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks]
524
+ else:
525
+ blocks = [*self.unet.down_blocks, *self.unet.up_blocks]
526
+
527
+ blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks]
528
+ for block in blocks:
529
+ self._disable_free_noise_in_block(block)
530
+
531
+ def _enable_split_inference_motion_modules_(
532
+ self, motion_modules: List[AnimateDiffTransformer3D], spatial_split_size: int
533
+ ) -> None:
534
+ for motion_module in motion_modules:
535
+ motion_module.proj_in = SplitInferenceModule(motion_module.proj_in, spatial_split_size, 0, ["input"])
536
+
537
+ for i in range(len(motion_module.transformer_blocks)):
538
+ motion_module.transformer_blocks[i] = SplitInferenceModule(
539
+ motion_module.transformer_blocks[i],
540
+ spatial_split_size,
541
+ 0,
542
+ ["hidden_states", "encoder_hidden_states"],
543
+ )
544
+
545
+ motion_module.proj_out = SplitInferenceModule(motion_module.proj_out, spatial_split_size, 0, ["input"])
546
+
547
+ def _enable_split_inference_attentions_(
548
+ self, attentions: List[Transformer2DModel], temporal_split_size: int
549
+ ) -> None:
550
+ for i in range(len(attentions)):
551
+ attentions[i] = SplitInferenceModule(
552
+ attentions[i], temporal_split_size, 0, ["hidden_states", "encoder_hidden_states"]
553
+ )
554
+
555
+ def _enable_split_inference_resnets_(self, resnets: List[ResnetBlock2D], temporal_split_size: int) -> None:
556
+ for i in range(len(resnets)):
557
+ resnets[i] = SplitInferenceModule(resnets[i], temporal_split_size, 0, ["input_tensor", "temb"])
558
+
559
+ def _enable_split_inference_samplers_(
560
+ self, samplers: Union[List[Downsample2D], List[Upsample2D]], temporal_split_size: int
561
+ ) -> None:
562
+ for i in range(len(samplers)):
563
+ samplers[i] = SplitInferenceModule(samplers[i], temporal_split_size, 0, ["hidden_states"])
564
+
565
+ def enable_free_noise_split_inference(self, spatial_split_size: int = 256, temporal_split_size: int = 16) -> None:
566
+ r"""
567
+ Enable FreeNoise memory optimizations by utilizing
568
+ [`~diffusers.pipelines.free_noise_utils.SplitInferenceModule`] across different intermediate modeling blocks.
569
+
570
+ Args:
571
+ spatial_split_size (`int`, defaults to `256`):
572
+ The split size across spatial dimensions for internal blocks. This is used in facilitating split
573
+ inference across the effective batch dimension (`[B x H x W, F, C]`) of intermediate tensors in motion
574
+ modeling blocks.
575
+ temporal_split_size (`int`, defaults to `16`):
576
+ The split size across temporal dimensions for internal blocks. This is used in facilitating split
577
+ inference across the effective batch dimension (`[B x F, H x W, C]`) of intermediate tensors in spatial
578
+ attention, resnets, downsampling and upsampling blocks.
579
+ """
580
+ # TODO(aryan): Discuss on what's the best way to provide more control to users
581
+ blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks]
582
+ for block in blocks:
583
+ if getattr(block, "motion_modules", None) is not None:
584
+ self._enable_split_inference_motion_modules_(block.motion_modules, spatial_split_size)
585
+ if getattr(block, "attentions", None) is not None:
586
+ self._enable_split_inference_attentions_(block.attentions, temporal_split_size)
587
+ if getattr(block, "resnets", None) is not None:
588
+ self._enable_split_inference_resnets_(block.resnets, temporal_split_size)
589
+ if getattr(block, "downsamplers", None) is not None:
590
+ self._enable_split_inference_samplers_(block.downsamplers, temporal_split_size)
591
+ if getattr(block, "upsamplers", None) is not None:
592
+ self._enable_split_inference_samplers_(block.upsamplers, temporal_split_size)
593
+
594
+ @property
595
+ def free_noise_enabled(self):
596
+ return hasattr(self, "_free_noise_context_length") and self._free_noise_context_length is not None
@@ -0,0 +1,48 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {}
15
+
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available()):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["pipeline_hunyuan_video"] = ["HunyuanVideoPipeline"]
26
+
27
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
28
+ try:
29
+ if not (is_transformers_available() and is_torch_available()):
30
+ raise OptionalDependencyNotAvailable()
31
+
32
+ except OptionalDependencyNotAvailable:
33
+ from ...utils.dummy_torch_and_transformers_objects import *
34
+ else:
35
+ from .pipeline_hunyuan_video import HunyuanVideoPipeline
36
+
37
+ else:
38
+ import sys
39
+
40
+ sys.modules[__name__] = _LazyModule(
41
+ __name__,
42
+ globals()["__file__"],
43
+ _import_structure,
44
+ module_spec=__spec__,
45
+ )
46
+
47
+ for name, value in _dummy_objects.items():
48
+ setattr(sys.modules[__name__], name, value)