diffusers 0.27.0__py3-none-any.whl → 0.32.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (445) hide show
  1. diffusers/__init__.py +233 -6
  2. diffusers/callbacks.py +209 -0
  3. diffusers/commands/env.py +102 -6
  4. diffusers/configuration_utils.py +45 -16
  5. diffusers/dependency_versions_table.py +4 -3
  6. diffusers/image_processor.py +434 -110
  7. diffusers/loaders/__init__.py +42 -9
  8. diffusers/loaders/ip_adapter.py +626 -36
  9. diffusers/loaders/lora_base.py +900 -0
  10. diffusers/loaders/lora_conversion_utils.py +991 -125
  11. diffusers/loaders/lora_pipeline.py +3812 -0
  12. diffusers/loaders/peft.py +571 -7
  13. diffusers/loaders/single_file.py +405 -173
  14. diffusers/loaders/single_file_model.py +385 -0
  15. diffusers/loaders/single_file_utils.py +1783 -713
  16. diffusers/loaders/textual_inversion.py +41 -23
  17. diffusers/loaders/transformer_flux.py +181 -0
  18. diffusers/loaders/transformer_sd3.py +89 -0
  19. diffusers/loaders/unet.py +464 -540
  20. diffusers/loaders/unet_loader_utils.py +163 -0
  21. diffusers/models/__init__.py +76 -7
  22. diffusers/models/activations.py +65 -10
  23. diffusers/models/adapter.py +53 -53
  24. diffusers/models/attention.py +605 -18
  25. diffusers/models/attention_flax.py +1 -1
  26. diffusers/models/attention_processor.py +4304 -687
  27. diffusers/models/autoencoders/__init__.py +8 -0
  28. diffusers/models/autoencoders/autoencoder_asym_kl.py +15 -17
  29. diffusers/models/autoencoders/autoencoder_dc.py +620 -0
  30. diffusers/models/autoencoders/autoencoder_kl.py +110 -28
  31. diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
  32. diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1482 -0
  33. diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
  34. diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
  35. diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
  36. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +19 -24
  37. diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
  38. diffusers/models/autoencoders/autoencoder_tiny.py +21 -18
  39. diffusers/models/autoencoders/consistency_decoder_vae.py +45 -20
  40. diffusers/models/autoencoders/vae.py +41 -29
  41. diffusers/models/autoencoders/vq_model.py +182 -0
  42. diffusers/models/controlnet.py +47 -800
  43. diffusers/models/controlnet_flux.py +70 -0
  44. diffusers/models/controlnet_sd3.py +68 -0
  45. diffusers/models/controlnet_sparsectrl.py +116 -0
  46. diffusers/models/controlnets/__init__.py +23 -0
  47. diffusers/models/controlnets/controlnet.py +872 -0
  48. diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +9 -9
  49. diffusers/models/controlnets/controlnet_flux.py +536 -0
  50. diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
  51. diffusers/models/controlnets/controlnet_sd3.py +489 -0
  52. diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
  53. diffusers/models/controlnets/controlnet_union.py +832 -0
  54. diffusers/models/controlnets/controlnet_xs.py +1946 -0
  55. diffusers/models/controlnets/multicontrolnet.py +183 -0
  56. diffusers/models/downsampling.py +85 -18
  57. diffusers/models/embeddings.py +1856 -158
  58. diffusers/models/embeddings_flax.py +23 -9
  59. diffusers/models/model_loading_utils.py +480 -0
  60. diffusers/models/modeling_flax_pytorch_utils.py +2 -1
  61. diffusers/models/modeling_flax_utils.py +2 -7
  62. diffusers/models/modeling_outputs.py +14 -0
  63. diffusers/models/modeling_pytorch_flax_utils.py +1 -1
  64. diffusers/models/modeling_utils.py +611 -146
  65. diffusers/models/normalization.py +361 -20
  66. diffusers/models/resnet.py +18 -23
  67. diffusers/models/transformers/__init__.py +16 -0
  68. diffusers/models/transformers/auraflow_transformer_2d.py +544 -0
  69. diffusers/models/transformers/cogvideox_transformer_3d.py +542 -0
  70. diffusers/models/transformers/dit_transformer_2d.py +240 -0
  71. diffusers/models/transformers/dual_transformer_2d.py +9 -8
  72. diffusers/models/transformers/hunyuan_transformer_2d.py +578 -0
  73. diffusers/models/transformers/latte_transformer_3d.py +327 -0
  74. diffusers/models/transformers/lumina_nextdit2d.py +340 -0
  75. diffusers/models/transformers/pixart_transformer_2d.py +445 -0
  76. diffusers/models/transformers/prior_transformer.py +13 -13
  77. diffusers/models/transformers/sana_transformer.py +488 -0
  78. diffusers/models/transformers/stable_audio_transformer.py +458 -0
  79. diffusers/models/transformers/t5_film_transformer.py +17 -19
  80. diffusers/models/transformers/transformer_2d.py +297 -187
  81. diffusers/models/transformers/transformer_allegro.py +422 -0
  82. diffusers/models/transformers/transformer_cogview3plus.py +386 -0
  83. diffusers/models/transformers/transformer_flux.py +593 -0
  84. diffusers/models/transformers/transformer_hunyuan_video.py +791 -0
  85. diffusers/models/transformers/transformer_ltx.py +469 -0
  86. diffusers/models/transformers/transformer_mochi.py +499 -0
  87. diffusers/models/transformers/transformer_sd3.py +461 -0
  88. diffusers/models/transformers/transformer_temporal.py +21 -19
  89. diffusers/models/unets/unet_1d.py +8 -8
  90. diffusers/models/unets/unet_1d_blocks.py +31 -31
  91. diffusers/models/unets/unet_2d.py +17 -10
  92. diffusers/models/unets/unet_2d_blocks.py +225 -149
  93. diffusers/models/unets/unet_2d_condition.py +50 -53
  94. diffusers/models/unets/unet_2d_condition_flax.py +6 -5
  95. diffusers/models/unets/unet_3d_blocks.py +192 -1057
  96. diffusers/models/unets/unet_3d_condition.py +22 -27
  97. diffusers/models/unets/unet_i2vgen_xl.py +22 -18
  98. diffusers/models/unets/unet_kandinsky3.py +2 -2
  99. diffusers/models/unets/unet_motion_model.py +1413 -89
  100. diffusers/models/unets/unet_spatio_temporal_condition.py +40 -16
  101. diffusers/models/unets/unet_stable_cascade.py +19 -18
  102. diffusers/models/unets/uvit_2d.py +2 -2
  103. diffusers/models/upsampling.py +95 -26
  104. diffusers/models/vq_model.py +12 -164
  105. diffusers/optimization.py +1 -1
  106. diffusers/pipelines/__init__.py +202 -3
  107. diffusers/pipelines/allegro/__init__.py +48 -0
  108. diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
  109. diffusers/pipelines/allegro/pipeline_output.py +23 -0
  110. diffusers/pipelines/amused/pipeline_amused.py +12 -12
  111. diffusers/pipelines/amused/pipeline_amused_img2img.py +14 -12
  112. diffusers/pipelines/amused/pipeline_amused_inpaint.py +13 -11
  113. diffusers/pipelines/animatediff/__init__.py +8 -0
  114. diffusers/pipelines/animatediff/pipeline_animatediff.py +122 -109
  115. diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1106 -0
  116. diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +1288 -0
  117. diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1010 -0
  118. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +236 -180
  119. diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
  120. diffusers/pipelines/animatediff/pipeline_output.py +3 -2
  121. diffusers/pipelines/audioldm/pipeline_audioldm.py +14 -14
  122. diffusers/pipelines/audioldm2/modeling_audioldm2.py +58 -39
  123. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +121 -36
  124. diffusers/pipelines/aura_flow/__init__.py +48 -0
  125. diffusers/pipelines/aura_flow/pipeline_aura_flow.py +584 -0
  126. diffusers/pipelines/auto_pipeline.py +196 -28
  127. diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
  128. diffusers/pipelines/blip_diffusion/modeling_blip2.py +6 -6
  129. diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +1 -1
  130. diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
  131. diffusers/pipelines/cogvideo/__init__.py +54 -0
  132. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +772 -0
  133. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
  134. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +885 -0
  135. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +851 -0
  136. diffusers/pipelines/cogvideo/pipeline_output.py +20 -0
  137. diffusers/pipelines/cogview3/__init__.py +47 -0
  138. diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
  139. diffusers/pipelines/cogview3/pipeline_output.py +21 -0
  140. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +6 -6
  141. diffusers/pipelines/controlnet/__init__.py +86 -80
  142. diffusers/pipelines/controlnet/multicontrolnet.py +7 -182
  143. diffusers/pipelines/controlnet/pipeline_controlnet.py +134 -87
  144. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
  145. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +93 -77
  146. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +88 -197
  147. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +136 -90
  148. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +176 -80
  149. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +125 -89
  150. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
  151. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
  152. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
  153. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
  154. diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
  155. diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1060 -0
  156. diffusers/pipelines/controlnet_sd3/__init__.py +57 -0
  157. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1133 -0
  158. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
  159. diffusers/pipelines/controlnet_xs/__init__.py +68 -0
  160. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
  161. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
  162. diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
  163. diffusers/pipelines/deepfloyd_if/pipeline_if.py +16 -30
  164. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +20 -35
  165. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +23 -41
  166. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +22 -38
  167. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +25 -41
  168. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +19 -34
  169. diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
  170. diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
  171. diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +11 -11
  172. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +70 -30
  173. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +48 -25
  174. diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +2 -2
  175. diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +7 -7
  176. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +21 -20
  177. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +27 -29
  178. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +33 -27
  179. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +33 -23
  180. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +36 -30
  181. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +102 -69
  182. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +13 -13
  183. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +10 -5
  184. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +11 -6
  185. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +10 -5
  186. diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +5 -5
  187. diffusers/pipelines/dit/pipeline_dit.py +7 -4
  188. diffusers/pipelines/flux/__init__.py +69 -0
  189. diffusers/pipelines/flux/modeling_flux.py +47 -0
  190. diffusers/pipelines/flux/pipeline_flux.py +957 -0
  191. diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
  192. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
  193. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
  194. diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
  195. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
  196. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
  197. diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
  198. diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
  199. diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
  200. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
  201. diffusers/pipelines/flux/pipeline_output.py +37 -0
  202. diffusers/pipelines/free_init_utils.py +41 -38
  203. diffusers/pipelines/free_noise_utils.py +596 -0
  204. diffusers/pipelines/hunyuan_video/__init__.py +48 -0
  205. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
  206. diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
  207. diffusers/pipelines/hunyuandit/__init__.py +48 -0
  208. diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +916 -0
  209. diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +33 -48
  210. diffusers/pipelines/kandinsky/pipeline_kandinsky.py +8 -8
  211. diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +32 -29
  212. diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +11 -11
  213. diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +12 -12
  214. diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +10 -10
  215. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
  216. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +34 -31
  217. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +10 -10
  218. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +10 -10
  219. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +6 -6
  220. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +8 -8
  221. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +7 -7
  222. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +6 -6
  223. diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +3 -3
  224. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +22 -35
  225. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +26 -37
  226. diffusers/pipelines/kolors/__init__.py +54 -0
  227. diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
  228. diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1250 -0
  229. diffusers/pipelines/kolors/pipeline_output.py +21 -0
  230. diffusers/pipelines/kolors/text_encoder.py +889 -0
  231. diffusers/pipelines/kolors/tokenizer.py +338 -0
  232. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +82 -62
  233. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +77 -60
  234. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +12 -12
  235. diffusers/pipelines/latte/__init__.py +48 -0
  236. diffusers/pipelines/latte/pipeline_latte.py +881 -0
  237. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +80 -74
  238. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +85 -76
  239. diffusers/pipelines/ledits_pp/pipeline_output.py +2 -2
  240. diffusers/pipelines/ltx/__init__.py +50 -0
  241. diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
  242. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
  243. diffusers/pipelines/ltx/pipeline_output.py +20 -0
  244. diffusers/pipelines/lumina/__init__.py +48 -0
  245. diffusers/pipelines/lumina/pipeline_lumina.py +890 -0
  246. diffusers/pipelines/marigold/__init__.py +50 -0
  247. diffusers/pipelines/marigold/marigold_image_processing.py +576 -0
  248. diffusers/pipelines/marigold/pipeline_marigold_depth.py +813 -0
  249. diffusers/pipelines/marigold/pipeline_marigold_normals.py +690 -0
  250. diffusers/pipelines/mochi/__init__.py +48 -0
  251. diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
  252. diffusers/pipelines/mochi/pipeline_output.py +20 -0
  253. diffusers/pipelines/musicldm/pipeline_musicldm.py +14 -14
  254. diffusers/pipelines/pag/__init__.py +80 -0
  255. diffusers/pipelines/pag/pag_utils.py +243 -0
  256. diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1328 -0
  257. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
  258. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1610 -0
  259. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
  260. diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +969 -0
  261. diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
  262. diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +865 -0
  263. diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
  264. diffusers/pipelines/pag/pipeline_pag_sd.py +1062 -0
  265. diffusers/pipelines/pag/pipeline_pag_sd_3.py +994 -0
  266. diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
  267. diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +866 -0
  268. diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
  269. diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
  270. diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1345 -0
  271. diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1544 -0
  272. diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1776 -0
  273. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +17 -12
  274. diffusers/pipelines/pia/pipeline_pia.py +74 -164
  275. diffusers/pipelines/pipeline_flax_utils.py +5 -10
  276. diffusers/pipelines/pipeline_loading_utils.py +515 -53
  277. diffusers/pipelines/pipeline_utils.py +411 -222
  278. diffusers/pipelines/pixart_alpha/__init__.py +8 -1
  279. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +76 -93
  280. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +873 -0
  281. diffusers/pipelines/sana/__init__.py +47 -0
  282. diffusers/pipelines/sana/pipeline_output.py +21 -0
  283. diffusers/pipelines/sana/pipeline_sana.py +884 -0
  284. diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +27 -23
  285. diffusers/pipelines/shap_e/pipeline_shap_e.py +3 -3
  286. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +14 -14
  287. diffusers/pipelines/shap_e/renderer.py +1 -1
  288. diffusers/pipelines/stable_audio/__init__.py +50 -0
  289. diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
  290. diffusers/pipelines/stable_audio/pipeline_stable_audio.py +756 -0
  291. diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +71 -25
  292. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +23 -19
  293. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +35 -34
  294. diffusers/pipelines/stable_diffusion/__init__.py +0 -1
  295. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +20 -11
  296. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
  297. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
  298. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +6 -6
  299. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +145 -79
  300. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +43 -28
  301. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +13 -8
  302. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +100 -68
  303. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +109 -201
  304. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +131 -32
  305. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +247 -87
  306. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +30 -29
  307. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +35 -27
  308. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +49 -42
  309. diffusers/pipelines/stable_diffusion/safety_checker.py +2 -1
  310. diffusers/pipelines/stable_diffusion_3/__init__.py +54 -0
  311. diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
  312. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +1140 -0
  313. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +1036 -0
  314. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1250 -0
  315. diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +29 -20
  316. diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +59 -58
  317. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +31 -25
  318. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +38 -22
  319. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -24
  320. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +24 -23
  321. diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +107 -67
  322. diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +316 -69
  323. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +10 -5
  324. diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
  325. diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +98 -30
  326. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +121 -83
  327. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +161 -105
  328. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +142 -218
  329. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +45 -29
  330. diffusers/pipelines/stable_diffusion_xl/watermark.py +9 -3
  331. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +110 -57
  332. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +69 -39
  333. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +105 -74
  334. diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +3 -2
  335. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +29 -49
  336. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +32 -93
  337. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +37 -25
  338. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +54 -40
  339. diffusers/pipelines/unclip/pipeline_unclip.py +6 -6
  340. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +6 -6
  341. diffusers/pipelines/unidiffuser/modeling_text_decoder.py +1 -1
  342. diffusers/pipelines/unidiffuser/modeling_uvit.py +12 -12
  343. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +29 -28
  344. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +5 -5
  345. diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +5 -10
  346. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +6 -8
  347. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +4 -4
  348. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +12 -12
  349. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +15 -14
  350. diffusers/{models/dual_transformer_2d.py → quantizers/__init__.py} +2 -6
  351. diffusers/quantizers/auto.py +139 -0
  352. diffusers/quantizers/base.py +233 -0
  353. diffusers/quantizers/bitsandbytes/__init__.py +2 -0
  354. diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
  355. diffusers/quantizers/bitsandbytes/utils.py +306 -0
  356. diffusers/quantizers/gguf/__init__.py +1 -0
  357. diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
  358. diffusers/quantizers/gguf/utils.py +456 -0
  359. diffusers/quantizers/quantization_config.py +669 -0
  360. diffusers/quantizers/torchao/__init__.py +15 -0
  361. diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
  362. diffusers/schedulers/__init__.py +12 -2
  363. diffusers/schedulers/deprecated/__init__.py +1 -1
  364. diffusers/schedulers/deprecated/scheduling_karras_ve.py +25 -25
  365. diffusers/schedulers/scheduling_amused.py +5 -5
  366. diffusers/schedulers/scheduling_consistency_decoder.py +11 -11
  367. diffusers/schedulers/scheduling_consistency_models.py +23 -25
  368. diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
  369. diffusers/schedulers/scheduling_ddim.py +27 -26
  370. diffusers/schedulers/scheduling_ddim_cogvideox.py +452 -0
  371. diffusers/schedulers/scheduling_ddim_flax.py +2 -1
  372. diffusers/schedulers/scheduling_ddim_inverse.py +16 -16
  373. diffusers/schedulers/scheduling_ddim_parallel.py +32 -31
  374. diffusers/schedulers/scheduling_ddpm.py +27 -30
  375. diffusers/schedulers/scheduling_ddpm_flax.py +7 -3
  376. diffusers/schedulers/scheduling_ddpm_parallel.py +33 -36
  377. diffusers/schedulers/scheduling_ddpm_wuerstchen.py +14 -14
  378. diffusers/schedulers/scheduling_deis_multistep.py +150 -50
  379. diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
  380. diffusers/schedulers/scheduling_dpmsolver_multistep.py +221 -84
  381. diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +2 -2
  382. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +158 -52
  383. diffusers/schedulers/scheduling_dpmsolver_sde.py +153 -34
  384. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +275 -86
  385. diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +81 -57
  386. diffusers/schedulers/scheduling_edm_euler.py +62 -39
  387. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +30 -29
  388. diffusers/schedulers/scheduling_euler_discrete.py +255 -74
  389. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +458 -0
  390. diffusers/schedulers/scheduling_flow_match_heun_discrete.py +320 -0
  391. diffusers/schedulers/scheduling_heun_discrete.py +174 -46
  392. diffusers/schedulers/scheduling_ipndm.py +9 -9
  393. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +138 -29
  394. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +132 -26
  395. diffusers/schedulers/scheduling_karras_ve_flax.py +6 -6
  396. diffusers/schedulers/scheduling_lcm.py +23 -29
  397. diffusers/schedulers/scheduling_lms_discrete.py +105 -28
  398. diffusers/schedulers/scheduling_pndm.py +20 -20
  399. diffusers/schedulers/scheduling_repaint.py +21 -21
  400. diffusers/schedulers/scheduling_sasolver.py +157 -60
  401. diffusers/schedulers/scheduling_sde_ve.py +19 -19
  402. diffusers/schedulers/scheduling_tcd.py +41 -36
  403. diffusers/schedulers/scheduling_unclip.py +19 -16
  404. diffusers/schedulers/scheduling_unipc_multistep.py +243 -47
  405. diffusers/schedulers/scheduling_utils.py +12 -5
  406. diffusers/schedulers/scheduling_utils_flax.py +1 -3
  407. diffusers/schedulers/scheduling_vq_diffusion.py +10 -10
  408. diffusers/training_utils.py +214 -30
  409. diffusers/utils/__init__.py +17 -1
  410. diffusers/utils/constants.py +3 -0
  411. diffusers/utils/doc_utils.py +1 -0
  412. diffusers/utils/dummy_pt_objects.py +592 -7
  413. diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
  414. diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
  415. diffusers/utils/dummy_torch_and_transformers_objects.py +1001 -71
  416. diffusers/utils/dynamic_modules_utils.py +34 -29
  417. diffusers/utils/export_utils.py +50 -6
  418. diffusers/utils/hub_utils.py +131 -17
  419. diffusers/utils/import_utils.py +210 -8
  420. diffusers/utils/loading_utils.py +118 -5
  421. diffusers/utils/logging.py +4 -2
  422. diffusers/utils/peft_utils.py +37 -7
  423. diffusers/utils/state_dict_utils.py +13 -2
  424. diffusers/utils/testing_utils.py +193 -11
  425. diffusers/utils/torch_utils.py +4 -0
  426. diffusers/video_processor.py +113 -0
  427. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/METADATA +82 -91
  428. diffusers-0.32.2.dist-info/RECORD +550 -0
  429. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/WHEEL +1 -1
  430. diffusers/loaders/autoencoder.py +0 -146
  431. diffusers/loaders/controlnet.py +0 -136
  432. diffusers/loaders/lora.py +0 -1349
  433. diffusers/models/prior_transformer.py +0 -12
  434. diffusers/models/t5_film_transformer.py +0 -70
  435. diffusers/models/transformer_2d.py +0 -25
  436. diffusers/models/transformer_temporal.py +0 -34
  437. diffusers/models/unet_1d.py +0 -26
  438. diffusers/models/unet_1d_blocks.py +0 -203
  439. diffusers/models/unet_2d.py +0 -27
  440. diffusers/models/unet_2d_blocks.py +0 -375
  441. diffusers/models/unet_2d_condition.py +0 -25
  442. diffusers-0.27.0.dist-info/RECORD +0 -399
  443. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/LICENSE +0 -0
  444. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/entry_points.txt +0 -0
  445. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,458 @@
1
+ # Copyright 2024 Stability AI and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.utils.checkpoint
22
+
23
+ from ...configuration_utils import ConfigMixin, register_to_config
24
+ from ...models.attention import FeedForward
25
+ from ...models.attention_processor import (
26
+ Attention,
27
+ AttentionProcessor,
28
+ StableAudioAttnProcessor2_0,
29
+ )
30
+ from ...models.modeling_utils import ModelMixin
31
+ from ...models.transformers.transformer_2d import Transformer2DModelOutput
32
+ from ...utils import is_torch_version, logging
33
+ from ...utils.torch_utils import maybe_allow_in_graph
34
+
35
+
36
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
37
+
38
+
39
+ class StableAudioGaussianFourierProjection(nn.Module):
40
+ """Gaussian Fourier embeddings for noise levels."""
41
+
42
+ # Copied from diffusers.models.embeddings.GaussianFourierProjection.__init__
43
+ def __init__(
44
+ self, embedding_size: int = 256, scale: float = 1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False
45
+ ):
46
+ super().__init__()
47
+ self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
48
+ self.log = log
49
+ self.flip_sin_to_cos = flip_sin_to_cos
50
+
51
+ if set_W_to_weight:
52
+ # to delete later
53
+ del self.weight
54
+ self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
55
+ self.weight = self.W
56
+ del self.W
57
+
58
+ def forward(self, x):
59
+ if self.log:
60
+ x = torch.log(x)
61
+
62
+ x_proj = 2 * np.pi * x[:, None] @ self.weight[None, :]
63
+
64
+ if self.flip_sin_to_cos:
65
+ out = torch.cat([torch.cos(x_proj), torch.sin(x_proj)], dim=-1)
66
+ else:
67
+ out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
68
+ return out
69
+
70
+
71
+ @maybe_allow_in_graph
72
+ class StableAudioDiTBlock(nn.Module):
73
+ r"""
74
+ Transformer block used in Stable Audio model (https://github.com/Stability-AI/stable-audio-tools). Allow skip
75
+ connection and QKNorm
76
+
77
+ Parameters:
78
+ dim (`int`): The number of channels in the input and output.
79
+ num_attention_heads (`int`): The number of heads to use for the query states.
80
+ num_key_value_attention_heads (`int`): The number of heads to use for the key and value states.
81
+ attention_head_dim (`int`): The number of channels in each head.
82
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
83
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
84
+ upcast_attention (`bool`, *optional*):
85
+ Whether to upcast the attention computation to float32. This is useful for mixed precision training.
86
+ """
87
+
88
+ def __init__(
89
+ self,
90
+ dim: int,
91
+ num_attention_heads: int,
92
+ num_key_value_attention_heads: int,
93
+ attention_head_dim: int,
94
+ dropout=0.0,
95
+ cross_attention_dim: Optional[int] = None,
96
+ upcast_attention: bool = False,
97
+ norm_eps: float = 1e-5,
98
+ ff_inner_dim: Optional[int] = None,
99
+ ):
100
+ super().__init__()
101
+ # Define 3 blocks. Each block has its own normalization layer.
102
+ # 1. Self-Attn
103
+ self.norm1 = nn.LayerNorm(dim, elementwise_affine=True, eps=norm_eps)
104
+ self.attn1 = Attention(
105
+ query_dim=dim,
106
+ heads=num_attention_heads,
107
+ dim_head=attention_head_dim,
108
+ dropout=dropout,
109
+ bias=False,
110
+ upcast_attention=upcast_attention,
111
+ out_bias=False,
112
+ processor=StableAudioAttnProcessor2_0(),
113
+ )
114
+
115
+ # 2. Cross-Attn
116
+ self.norm2 = nn.LayerNorm(dim, norm_eps, True)
117
+
118
+ self.attn2 = Attention(
119
+ query_dim=dim,
120
+ cross_attention_dim=cross_attention_dim,
121
+ heads=num_attention_heads,
122
+ dim_head=attention_head_dim,
123
+ kv_heads=num_key_value_attention_heads,
124
+ dropout=dropout,
125
+ bias=False,
126
+ upcast_attention=upcast_attention,
127
+ out_bias=False,
128
+ processor=StableAudioAttnProcessor2_0(),
129
+ ) # is self-attn if encoder_hidden_states is none
130
+
131
+ # 3. Feed-forward
132
+ self.norm3 = nn.LayerNorm(dim, norm_eps, True)
133
+ self.ff = FeedForward(
134
+ dim,
135
+ dropout=dropout,
136
+ activation_fn="swiglu",
137
+ final_dropout=False,
138
+ inner_dim=ff_inner_dim,
139
+ bias=True,
140
+ )
141
+
142
+ # let chunk size default to None
143
+ self._chunk_size = None
144
+ self._chunk_dim = 0
145
+
146
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
147
+ # Sets chunk feed-forward
148
+ self._chunk_size = chunk_size
149
+ self._chunk_dim = dim
150
+
151
+ def forward(
152
+ self,
153
+ hidden_states: torch.Tensor,
154
+ attention_mask: Optional[torch.Tensor] = None,
155
+ encoder_hidden_states: Optional[torch.Tensor] = None,
156
+ encoder_attention_mask: Optional[torch.Tensor] = None,
157
+ rotary_embedding: Optional[torch.FloatTensor] = None,
158
+ ) -> torch.Tensor:
159
+ # Notice that normalization is always applied before the real computation in the following blocks.
160
+ # 0. Self-Attention
161
+ norm_hidden_states = self.norm1(hidden_states)
162
+
163
+ attn_output = self.attn1(
164
+ norm_hidden_states,
165
+ attention_mask=attention_mask,
166
+ rotary_emb=rotary_embedding,
167
+ )
168
+
169
+ hidden_states = attn_output + hidden_states
170
+
171
+ # 2. Cross-Attention
172
+ norm_hidden_states = self.norm2(hidden_states)
173
+
174
+ attn_output = self.attn2(
175
+ norm_hidden_states,
176
+ encoder_hidden_states=encoder_hidden_states,
177
+ attention_mask=encoder_attention_mask,
178
+ )
179
+ hidden_states = attn_output + hidden_states
180
+
181
+ # 3. Feed-forward
182
+ norm_hidden_states = self.norm3(hidden_states)
183
+ ff_output = self.ff(norm_hidden_states)
184
+
185
+ hidden_states = ff_output + hidden_states
186
+
187
+ return hidden_states
188
+
189
+
190
+ class StableAudioDiTModel(ModelMixin, ConfigMixin):
191
+ """
192
+ The Diffusion Transformer model introduced in Stable Audio.
193
+
194
+ Reference: https://github.com/Stability-AI/stable-audio-tools
195
+
196
+ Parameters:
197
+ sample_size ( `int`, *optional*, defaults to 1024): The size of the input sample.
198
+ in_channels (`int`, *optional*, defaults to 64): The number of channels in the input.
199
+ num_layers (`int`, *optional*, defaults to 24): The number of layers of Transformer blocks to use.
200
+ attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head.
201
+ num_attention_heads (`int`, *optional*, defaults to 24): The number of heads to use for the query states.
202
+ num_key_value_attention_heads (`int`, *optional*, defaults to 12):
203
+ The number of heads to use for the key and value states.
204
+ out_channels (`int`, defaults to 64): Number of output channels.
205
+ cross_attention_dim ( `int`, *optional*, defaults to 768): Dimension of the cross-attention projection.
206
+ time_proj_dim ( `int`, *optional*, defaults to 256): Dimension of the timestep inner projection.
207
+ global_states_input_dim ( `int`, *optional*, defaults to 1536):
208
+ Input dimension of the global hidden states projection.
209
+ cross_attention_input_dim ( `int`, *optional*, defaults to 768):
210
+ Input dimension of the cross-attention projection
211
+ """
212
+
213
+ _supports_gradient_checkpointing = True
214
+
215
+ @register_to_config
216
+ def __init__(
217
+ self,
218
+ sample_size: int = 1024,
219
+ in_channels: int = 64,
220
+ num_layers: int = 24,
221
+ attention_head_dim: int = 64,
222
+ num_attention_heads: int = 24,
223
+ num_key_value_attention_heads: int = 12,
224
+ out_channels: int = 64,
225
+ cross_attention_dim: int = 768,
226
+ time_proj_dim: int = 256,
227
+ global_states_input_dim: int = 1536,
228
+ cross_attention_input_dim: int = 768,
229
+ ):
230
+ super().__init__()
231
+ self.sample_size = sample_size
232
+ self.out_channels = out_channels
233
+ self.inner_dim = num_attention_heads * attention_head_dim
234
+
235
+ self.time_proj = StableAudioGaussianFourierProjection(
236
+ embedding_size=time_proj_dim // 2,
237
+ flip_sin_to_cos=True,
238
+ log=False,
239
+ set_W_to_weight=False,
240
+ )
241
+
242
+ self.timestep_proj = nn.Sequential(
243
+ nn.Linear(time_proj_dim, self.inner_dim, bias=True),
244
+ nn.SiLU(),
245
+ nn.Linear(self.inner_dim, self.inner_dim, bias=True),
246
+ )
247
+
248
+ self.global_proj = nn.Sequential(
249
+ nn.Linear(global_states_input_dim, self.inner_dim, bias=False),
250
+ nn.SiLU(),
251
+ nn.Linear(self.inner_dim, self.inner_dim, bias=False),
252
+ )
253
+
254
+ self.cross_attention_proj = nn.Sequential(
255
+ nn.Linear(cross_attention_input_dim, cross_attention_dim, bias=False),
256
+ nn.SiLU(),
257
+ nn.Linear(cross_attention_dim, cross_attention_dim, bias=False),
258
+ )
259
+
260
+ self.preprocess_conv = nn.Conv1d(in_channels, in_channels, 1, bias=False)
261
+ self.proj_in = nn.Linear(in_channels, self.inner_dim, bias=False)
262
+
263
+ self.transformer_blocks = nn.ModuleList(
264
+ [
265
+ StableAudioDiTBlock(
266
+ dim=self.inner_dim,
267
+ num_attention_heads=num_attention_heads,
268
+ num_key_value_attention_heads=num_key_value_attention_heads,
269
+ attention_head_dim=attention_head_dim,
270
+ cross_attention_dim=cross_attention_dim,
271
+ )
272
+ for i in range(num_layers)
273
+ ]
274
+ )
275
+
276
+ self.proj_out = nn.Linear(self.inner_dim, self.out_channels, bias=False)
277
+ self.postprocess_conv = nn.Conv1d(self.out_channels, self.out_channels, 1, bias=False)
278
+
279
+ self.gradient_checkpointing = False
280
+
281
+ @property
282
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
283
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
284
+ r"""
285
+ Returns:
286
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
287
+ indexed by its weight name.
288
+ """
289
+ # set recursively
290
+ processors = {}
291
+
292
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
293
+ if hasattr(module, "get_processor"):
294
+ processors[f"{name}.processor"] = module.get_processor()
295
+
296
+ for sub_name, child in module.named_children():
297
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
298
+
299
+ return processors
300
+
301
+ for name, module in self.named_children():
302
+ fn_recursive_add_processors(name, module, processors)
303
+
304
+ return processors
305
+
306
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
307
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
308
+ r"""
309
+ Sets the attention processor to use to compute attention.
310
+
311
+ Parameters:
312
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
313
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
314
+ for **all** `Attention` layers.
315
+
316
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
317
+ processor. This is strongly recommended when setting trainable attention processors.
318
+
319
+ """
320
+ count = len(self.attn_processors.keys())
321
+
322
+ if isinstance(processor, dict) and len(processor) != count:
323
+ raise ValueError(
324
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
325
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
326
+ )
327
+
328
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
329
+ if hasattr(module, "set_processor"):
330
+ if not isinstance(processor, dict):
331
+ module.set_processor(processor)
332
+ else:
333
+ module.set_processor(processor.pop(f"{name}.processor"))
334
+
335
+ for sub_name, child in module.named_children():
336
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
337
+
338
+ for name, module in self.named_children():
339
+ fn_recursive_attn_processor(name, module, processor)
340
+
341
+ # Copied from diffusers.models.transformers.hunyuan_transformer_2d.HunyuanDiT2DModel.set_default_attn_processor with Hunyuan->StableAudio
342
+ def set_default_attn_processor(self):
343
+ """
344
+ Disables custom attention processors and sets the default attention implementation.
345
+ """
346
+ self.set_attn_processor(StableAudioAttnProcessor2_0())
347
+
348
+ def _set_gradient_checkpointing(self, module, value=False):
349
+ if hasattr(module, "gradient_checkpointing"):
350
+ module.gradient_checkpointing = value
351
+
352
+ def forward(
353
+ self,
354
+ hidden_states: torch.FloatTensor,
355
+ timestep: torch.LongTensor = None,
356
+ encoder_hidden_states: torch.FloatTensor = None,
357
+ global_hidden_states: torch.FloatTensor = None,
358
+ rotary_embedding: torch.FloatTensor = None,
359
+ return_dict: bool = True,
360
+ attention_mask: Optional[torch.LongTensor] = None,
361
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
362
+ ) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
363
+ """
364
+ The [`StableAudioDiTModel`] forward method.
365
+
366
+ Args:
367
+ hidden_states (`torch.FloatTensor` of shape `(batch size, in_channels, sequence_len)`):
368
+ Input `hidden_states`.
369
+ timestep ( `torch.LongTensor`):
370
+ Used to indicate denoising step.
371
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, encoder_sequence_len, cross_attention_input_dim)`):
372
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
373
+ global_hidden_states (`torch.FloatTensor` of shape `(batch size, global_sequence_len, global_states_input_dim)`):
374
+ Global embeddings that will be prepended to the hidden states.
375
+ rotary_embedding (`torch.Tensor`):
376
+ The rotary embeddings to apply on query and key tensors during attention calculation.
377
+ return_dict (`bool`, *optional*, defaults to `True`):
378
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
379
+ tuple.
380
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_len)`, *optional*):
381
+ Mask to avoid performing attention on padding token indices, formed by concatenating the attention
382
+ masks
383
+ for the two text encoders together. Mask values selected in `[0, 1]`:
384
+
385
+ - 1 for tokens that are **not masked**,
386
+ - 0 for tokens that are **masked**.
387
+ encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_len)`, *optional*):
388
+ Mask to avoid performing attention on padding token cross-attention indices, formed by concatenating
389
+ the attention masks
390
+ for the two text encoders together. Mask values selected in `[0, 1]`:
391
+
392
+ - 1 for tokens that are **not masked**,
393
+ - 0 for tokens that are **masked**.
394
+ Returns:
395
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
396
+ `tuple` where the first element is the sample tensor.
397
+ """
398
+ cross_attention_hidden_states = self.cross_attention_proj(encoder_hidden_states)
399
+ global_hidden_states = self.global_proj(global_hidden_states)
400
+ time_hidden_states = self.timestep_proj(self.time_proj(timestep.to(self.dtype)))
401
+
402
+ global_hidden_states = global_hidden_states + time_hidden_states.unsqueeze(1)
403
+
404
+ hidden_states = self.preprocess_conv(hidden_states) + hidden_states
405
+ # (batch_size, dim, sequence_length) -> (batch_size, sequence_length, dim)
406
+ hidden_states = hidden_states.transpose(1, 2)
407
+
408
+ hidden_states = self.proj_in(hidden_states)
409
+
410
+ # prepend global states to hidden states
411
+ hidden_states = torch.cat([global_hidden_states, hidden_states], dim=-2)
412
+ if attention_mask is not None:
413
+ prepend_mask = torch.ones((hidden_states.shape[0], 1), device=hidden_states.device, dtype=torch.bool)
414
+ attention_mask = torch.cat([prepend_mask, attention_mask], dim=-1)
415
+
416
+ for block in self.transformer_blocks:
417
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
418
+
419
+ def create_custom_forward(module, return_dict=None):
420
+ def custom_forward(*inputs):
421
+ if return_dict is not None:
422
+ return module(*inputs, return_dict=return_dict)
423
+ else:
424
+ return module(*inputs)
425
+
426
+ return custom_forward
427
+
428
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
429
+ hidden_states = torch.utils.checkpoint.checkpoint(
430
+ create_custom_forward(block),
431
+ hidden_states,
432
+ attention_mask,
433
+ cross_attention_hidden_states,
434
+ encoder_attention_mask,
435
+ rotary_embedding,
436
+ **ckpt_kwargs,
437
+ )
438
+
439
+ else:
440
+ hidden_states = block(
441
+ hidden_states=hidden_states,
442
+ attention_mask=attention_mask,
443
+ encoder_hidden_states=cross_attention_hidden_states,
444
+ encoder_attention_mask=encoder_attention_mask,
445
+ rotary_embedding=rotary_embedding,
446
+ )
447
+
448
+ hidden_states = self.proj_out(hidden_states)
449
+
450
+ # (batch_size, sequence_length, dim) -> (batch_size, dim, sequence_length)
451
+ # remove prepend length that has been added by global hidden states
452
+ hidden_states = hidden_states.transpose(1, 2)[:, :, 1:]
453
+ hidden_states = self.postprocess_conv(hidden_states) + hidden_states
454
+
455
+ if not return_dict:
456
+ return (hidden_states,)
457
+
458
+ return Transformer2DModelOutput(sample=hidden_states)
@@ -86,7 +86,7 @@ class T5FilmDecoder(ModelMixin, ConfigMixin):
86
86
  self.post_dropout = nn.Dropout(p=dropout_rate)
87
87
  self.spec_out = nn.Linear(d_model, input_dims, bias=False)
88
88
 
89
- def encoder_decoder_mask(self, query_input: torch.FloatTensor, key_input: torch.FloatTensor) -> torch.FloatTensor:
89
+ def encoder_decoder_mask(self, query_input: torch.Tensor, key_input: torch.Tensor) -> torch.Tensor:
90
90
  mask = torch.mul(query_input.unsqueeze(-1), key_input.unsqueeze(-2))
91
91
  return mask.unsqueeze(-3)
92
92
 
@@ -195,13 +195,13 @@ class DecoderLayer(nn.Module):
195
195
 
196
196
  def forward(
197
197
  self,
198
- hidden_states: torch.FloatTensor,
199
- conditioning_emb: Optional[torch.FloatTensor] = None,
200
- attention_mask: Optional[torch.FloatTensor] = None,
198
+ hidden_states: torch.Tensor,
199
+ conditioning_emb: Optional[torch.Tensor] = None,
200
+ attention_mask: Optional[torch.Tensor] = None,
201
201
  encoder_hidden_states: Optional[torch.Tensor] = None,
202
202
  encoder_attention_mask: Optional[torch.Tensor] = None,
203
203
  encoder_decoder_position_bias=None,
204
- ) -> Tuple[torch.FloatTensor]:
204
+ ) -> Tuple[torch.Tensor]:
205
205
  hidden_states = self.layer[0](
206
206
  hidden_states,
207
207
  conditioning_emb=conditioning_emb,
@@ -249,10 +249,10 @@ class T5LayerSelfAttentionCond(nn.Module):
249
249
 
250
250
  def forward(
251
251
  self,
252
- hidden_states: torch.FloatTensor,
253
- conditioning_emb: Optional[torch.FloatTensor] = None,
254
- attention_mask: Optional[torch.FloatTensor] = None,
255
- ) -> torch.FloatTensor:
252
+ hidden_states: torch.Tensor,
253
+ conditioning_emb: Optional[torch.Tensor] = None,
254
+ attention_mask: Optional[torch.Tensor] = None,
255
+ ) -> torch.Tensor:
256
256
  # pre_self_attention_layer_norm
257
257
  normed_hidden_states = self.layer_norm(hidden_states)
258
258
 
@@ -292,10 +292,10 @@ class T5LayerCrossAttention(nn.Module):
292
292
 
293
293
  def forward(
294
294
  self,
295
- hidden_states: torch.FloatTensor,
296
- key_value_states: Optional[torch.FloatTensor] = None,
297
- attention_mask: Optional[torch.FloatTensor] = None,
298
- ) -> torch.FloatTensor:
295
+ hidden_states: torch.Tensor,
296
+ key_value_states: Optional[torch.Tensor] = None,
297
+ attention_mask: Optional[torch.Tensor] = None,
298
+ ) -> torch.Tensor:
299
299
  normed_hidden_states = self.layer_norm(hidden_states)
300
300
  attention_output = self.attention(
301
301
  normed_hidden_states,
@@ -328,9 +328,7 @@ class T5LayerFFCond(nn.Module):
328
328
  self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon)
329
329
  self.dropout = nn.Dropout(dropout_rate)
330
330
 
331
- def forward(
332
- self, hidden_states: torch.FloatTensor, conditioning_emb: Optional[torch.FloatTensor] = None
333
- ) -> torch.FloatTensor:
331
+ def forward(self, hidden_states: torch.Tensor, conditioning_emb: Optional[torch.Tensor] = None) -> torch.Tensor:
334
332
  forwarded_states = self.layer_norm(hidden_states)
335
333
  if conditioning_emb is not None:
336
334
  forwarded_states = self.film(forwarded_states, conditioning_emb)
@@ -361,7 +359,7 @@ class T5DenseGatedActDense(nn.Module):
361
359
  self.dropout = nn.Dropout(dropout_rate)
362
360
  self.act = NewGELUActivation()
363
361
 
364
- def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
362
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
365
363
  hidden_gelu = self.act(self.wi_0(hidden_states))
366
364
  hidden_linear = self.wi_1(hidden_states)
367
365
  hidden_states = hidden_gelu * hidden_linear
@@ -390,7 +388,7 @@ class T5LayerNorm(nn.Module):
390
388
  self.weight = nn.Parameter(torch.ones(hidden_size))
391
389
  self.variance_epsilon = eps
392
390
 
393
- def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
391
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
394
392
  # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
395
393
  # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
396
394
  # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
@@ -431,7 +429,7 @@ class T5FiLMLayer(nn.Module):
431
429
  super().__init__()
432
430
  self.scale_bias = nn.Linear(in_features, out_features * 2, bias=False)
433
431
 
434
- def forward(self, x: torch.FloatTensor, conditioning_emb: torch.FloatTensor) -> torch.FloatTensor:
432
+ def forward(self, x: torch.Tensor, conditioning_emb: torch.Tensor) -> torch.Tensor:
435
433
  emb = self.scale_bias(conditioning_emb)
436
434
  scale, shift = torch.chunk(emb, 2, -1)
437
435
  x = x * (1 + scale) + shift