diffusers 0.32.2__py3-none-any.whl → 0.33.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (389) hide show
  1. diffusers/__init__.py +186 -3
  2. diffusers/configuration_utils.py +40 -12
  3. diffusers/dependency_versions_table.py +9 -2
  4. diffusers/hooks/__init__.py +9 -0
  5. diffusers/hooks/faster_cache.py +653 -0
  6. diffusers/hooks/group_offloading.py +793 -0
  7. diffusers/hooks/hooks.py +236 -0
  8. diffusers/hooks/layerwise_casting.py +245 -0
  9. diffusers/hooks/pyramid_attention_broadcast.py +311 -0
  10. diffusers/loaders/__init__.py +6 -0
  11. diffusers/loaders/ip_adapter.py +38 -30
  12. diffusers/loaders/lora_base.py +121 -86
  13. diffusers/loaders/lora_conversion_utils.py +504 -44
  14. diffusers/loaders/lora_pipeline.py +1769 -181
  15. diffusers/loaders/peft.py +167 -57
  16. diffusers/loaders/single_file.py +17 -2
  17. diffusers/loaders/single_file_model.py +53 -5
  18. diffusers/loaders/single_file_utils.py +646 -72
  19. diffusers/loaders/textual_inversion.py +9 -9
  20. diffusers/loaders/transformer_flux.py +8 -9
  21. diffusers/loaders/transformer_sd3.py +120 -39
  22. diffusers/loaders/unet.py +20 -7
  23. diffusers/models/__init__.py +22 -0
  24. diffusers/models/activations.py +9 -9
  25. diffusers/models/attention.py +0 -1
  26. diffusers/models/attention_processor.py +163 -25
  27. diffusers/models/auto_model.py +169 -0
  28. diffusers/models/autoencoders/__init__.py +2 -0
  29. diffusers/models/autoencoders/autoencoder_asym_kl.py +2 -0
  30. diffusers/models/autoencoders/autoencoder_dc.py +106 -4
  31. diffusers/models/autoencoders/autoencoder_kl.py +0 -4
  32. diffusers/models/autoencoders/autoencoder_kl_allegro.py +5 -23
  33. diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +17 -55
  34. diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +17 -97
  35. diffusers/models/autoencoders/autoencoder_kl_ltx.py +326 -107
  36. diffusers/models/autoencoders/autoencoder_kl_magvit.py +1094 -0
  37. diffusers/models/autoencoders/autoencoder_kl_mochi.py +21 -56
  38. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +11 -42
  39. diffusers/models/autoencoders/autoencoder_kl_wan.py +855 -0
  40. diffusers/models/autoencoders/autoencoder_oobleck.py +1 -0
  41. diffusers/models/autoencoders/autoencoder_tiny.py +0 -4
  42. diffusers/models/autoencoders/consistency_decoder_vae.py +3 -1
  43. diffusers/models/autoencoders/vae.py +31 -141
  44. diffusers/models/autoencoders/vq_model.py +3 -0
  45. diffusers/models/cache_utils.py +108 -0
  46. diffusers/models/controlnets/__init__.py +1 -0
  47. diffusers/models/controlnets/controlnet.py +3 -8
  48. diffusers/models/controlnets/controlnet_flux.py +14 -42
  49. diffusers/models/controlnets/controlnet_sd3.py +58 -34
  50. diffusers/models/controlnets/controlnet_sparsectrl.py +4 -7
  51. diffusers/models/controlnets/controlnet_union.py +27 -18
  52. diffusers/models/controlnets/controlnet_xs.py +7 -46
  53. diffusers/models/controlnets/multicontrolnet_union.py +196 -0
  54. diffusers/models/embeddings.py +18 -7
  55. diffusers/models/model_loading_utils.py +122 -80
  56. diffusers/models/modeling_flax_pytorch_utils.py +1 -1
  57. diffusers/models/modeling_flax_utils.py +1 -1
  58. diffusers/models/modeling_pytorch_flax_utils.py +1 -1
  59. diffusers/models/modeling_utils.py +617 -272
  60. diffusers/models/normalization.py +67 -14
  61. diffusers/models/resnet.py +1 -1
  62. diffusers/models/transformers/__init__.py +6 -0
  63. diffusers/models/transformers/auraflow_transformer_2d.py +9 -35
  64. diffusers/models/transformers/cogvideox_transformer_3d.py +13 -24
  65. diffusers/models/transformers/consisid_transformer_3d.py +789 -0
  66. diffusers/models/transformers/dit_transformer_2d.py +5 -19
  67. diffusers/models/transformers/hunyuan_transformer_2d.py +4 -3
  68. diffusers/models/transformers/latte_transformer_3d.py +20 -15
  69. diffusers/models/transformers/lumina_nextdit2d.py +3 -1
  70. diffusers/models/transformers/pixart_transformer_2d.py +4 -19
  71. diffusers/models/transformers/prior_transformer.py +5 -1
  72. diffusers/models/transformers/sana_transformer.py +144 -40
  73. diffusers/models/transformers/stable_audio_transformer.py +5 -20
  74. diffusers/models/transformers/transformer_2d.py +7 -22
  75. diffusers/models/transformers/transformer_allegro.py +9 -17
  76. diffusers/models/transformers/transformer_cogview3plus.py +6 -17
  77. diffusers/models/transformers/transformer_cogview4.py +462 -0
  78. diffusers/models/transformers/transformer_easyanimate.py +527 -0
  79. diffusers/models/transformers/transformer_flux.py +68 -110
  80. diffusers/models/transformers/transformer_hunyuan_video.py +404 -46
  81. diffusers/models/transformers/transformer_ltx.py +53 -35
  82. diffusers/models/transformers/transformer_lumina2.py +548 -0
  83. diffusers/models/transformers/transformer_mochi.py +6 -17
  84. diffusers/models/transformers/transformer_omnigen.py +469 -0
  85. diffusers/models/transformers/transformer_sd3.py +56 -86
  86. diffusers/models/transformers/transformer_temporal.py +5 -11
  87. diffusers/models/transformers/transformer_wan.py +469 -0
  88. diffusers/models/unets/unet_1d.py +3 -1
  89. diffusers/models/unets/unet_2d.py +21 -20
  90. diffusers/models/unets/unet_2d_blocks.py +19 -243
  91. diffusers/models/unets/unet_2d_condition.py +4 -6
  92. diffusers/models/unets/unet_3d_blocks.py +14 -127
  93. diffusers/models/unets/unet_3d_condition.py +8 -12
  94. diffusers/models/unets/unet_i2vgen_xl.py +5 -13
  95. diffusers/models/unets/unet_kandinsky3.py +0 -4
  96. diffusers/models/unets/unet_motion_model.py +20 -114
  97. diffusers/models/unets/unet_spatio_temporal_condition.py +7 -8
  98. diffusers/models/unets/unet_stable_cascade.py +8 -35
  99. diffusers/models/unets/uvit_2d.py +1 -4
  100. diffusers/optimization.py +2 -2
  101. diffusers/pipelines/__init__.py +57 -8
  102. diffusers/pipelines/allegro/pipeline_allegro.py +22 -2
  103. diffusers/pipelines/amused/pipeline_amused.py +15 -2
  104. diffusers/pipelines/amused/pipeline_amused_img2img.py +15 -2
  105. diffusers/pipelines/amused/pipeline_amused_inpaint.py +15 -2
  106. diffusers/pipelines/animatediff/pipeline_animatediff.py +15 -2
  107. diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +15 -3
  108. diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +24 -4
  109. diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +15 -2
  110. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +16 -4
  111. diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +16 -4
  112. diffusers/pipelines/audioldm/pipeline_audioldm.py +13 -2
  113. diffusers/pipelines/audioldm2/modeling_audioldm2.py +13 -68
  114. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +39 -9
  115. diffusers/pipelines/aura_flow/pipeline_aura_flow.py +63 -7
  116. diffusers/pipelines/auto_pipeline.py +35 -14
  117. diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
  118. diffusers/pipelines/blip_diffusion/modeling_blip2.py +5 -8
  119. diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +12 -0
  120. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +22 -6
  121. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +22 -6
  122. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +22 -5
  123. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +22 -6
  124. diffusers/pipelines/cogview3/pipeline_cogview3plus.py +12 -4
  125. diffusers/pipelines/cogview4/__init__.py +49 -0
  126. diffusers/pipelines/cogview4/pipeline_cogview4.py +684 -0
  127. diffusers/pipelines/cogview4/pipeline_cogview4_control.py +732 -0
  128. diffusers/pipelines/cogview4/pipeline_output.py +21 -0
  129. diffusers/pipelines/consisid/__init__.py +49 -0
  130. diffusers/pipelines/consisid/consisid_utils.py +357 -0
  131. diffusers/pipelines/consisid/pipeline_consisid.py +974 -0
  132. diffusers/pipelines/consisid/pipeline_output.py +20 -0
  133. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +11 -0
  134. diffusers/pipelines/controlnet/pipeline_controlnet.py +6 -5
  135. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +13 -0
  136. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +17 -5
  137. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +31 -12
  138. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +26 -7
  139. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +20 -3
  140. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +22 -3
  141. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +26 -25
  142. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +224 -109
  143. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +25 -29
  144. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +7 -4
  145. diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +3 -5
  146. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +121 -10
  147. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +122 -11
  148. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +12 -1
  149. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +20 -3
  150. diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +14 -2
  151. diffusers/pipelines/ddim/pipeline_ddim.py +14 -1
  152. diffusers/pipelines/ddpm/pipeline_ddpm.py +15 -1
  153. diffusers/pipelines/deepfloyd_if/pipeline_if.py +12 -0
  154. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +12 -0
  155. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +14 -1
  156. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +12 -0
  157. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +14 -1
  158. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +14 -1
  159. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +11 -7
  160. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +11 -7
  161. diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +1 -1
  162. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +10 -6
  163. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py +2 -2
  164. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +11 -7
  165. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +1 -1
  166. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +1 -1
  167. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +1 -1
  168. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +10 -105
  169. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +1 -1
  170. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +1 -1
  171. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +1 -1
  172. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +1 -1
  173. diffusers/pipelines/dit/pipeline_dit.py +15 -2
  174. diffusers/pipelines/easyanimate/__init__.py +52 -0
  175. diffusers/pipelines/easyanimate/pipeline_easyanimate.py +770 -0
  176. diffusers/pipelines/easyanimate/pipeline_easyanimate_control.py +994 -0
  177. diffusers/pipelines/easyanimate/pipeline_easyanimate_inpaint.py +1234 -0
  178. diffusers/pipelines/easyanimate/pipeline_output.py +20 -0
  179. diffusers/pipelines/flux/pipeline_flux.py +53 -21
  180. diffusers/pipelines/flux/pipeline_flux_control.py +9 -12
  181. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +6 -10
  182. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +8 -10
  183. diffusers/pipelines/flux/pipeline_flux_controlnet.py +185 -13
  184. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +8 -10
  185. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +16 -16
  186. diffusers/pipelines/flux/pipeline_flux_fill.py +107 -39
  187. diffusers/pipelines/flux/pipeline_flux_img2img.py +193 -15
  188. diffusers/pipelines/flux/pipeline_flux_inpaint.py +199 -19
  189. diffusers/pipelines/free_noise_utils.py +3 -3
  190. diffusers/pipelines/hunyuan_video/__init__.py +4 -0
  191. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_skyreels_image2video.py +804 -0
  192. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +90 -23
  193. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_image2video.py +924 -0
  194. diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +3 -5
  195. diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +13 -1
  196. diffusers/pipelines/kandinsky/pipeline_kandinsky.py +12 -0
  197. diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +1 -1
  198. diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +12 -0
  199. diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +13 -1
  200. diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +12 -0
  201. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +12 -1
  202. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +13 -0
  203. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +12 -0
  204. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +12 -1
  205. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +12 -1
  206. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +12 -0
  207. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +12 -0
  208. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +12 -0
  209. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +12 -0
  210. diffusers/pipelines/kolors/pipeline_kolors.py +10 -8
  211. diffusers/pipelines/kolors/pipeline_kolors_img2img.py +6 -4
  212. diffusers/pipelines/kolors/text_encoder.py +7 -34
  213. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +12 -1
  214. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +13 -1
  215. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +14 -13
  216. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py +12 -1
  217. diffusers/pipelines/latte/pipeline_latte.py +36 -7
  218. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +67 -13
  219. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +60 -15
  220. diffusers/pipelines/ltx/__init__.py +2 -0
  221. diffusers/pipelines/ltx/pipeline_ltx.py +25 -13
  222. diffusers/pipelines/ltx/pipeline_ltx_condition.py +1194 -0
  223. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +31 -17
  224. diffusers/pipelines/lumina/__init__.py +2 -2
  225. diffusers/pipelines/lumina/pipeline_lumina.py +83 -20
  226. diffusers/pipelines/lumina2/__init__.py +48 -0
  227. diffusers/pipelines/lumina2/pipeline_lumina2.py +790 -0
  228. diffusers/pipelines/marigold/__init__.py +2 -0
  229. diffusers/pipelines/marigold/marigold_image_processing.py +127 -14
  230. diffusers/pipelines/marigold/pipeline_marigold_depth.py +31 -16
  231. diffusers/pipelines/marigold/pipeline_marigold_intrinsics.py +721 -0
  232. diffusers/pipelines/marigold/pipeline_marigold_normals.py +31 -16
  233. diffusers/pipelines/mochi/pipeline_mochi.py +14 -18
  234. diffusers/pipelines/musicldm/pipeline_musicldm.py +16 -1
  235. diffusers/pipelines/omnigen/__init__.py +50 -0
  236. diffusers/pipelines/omnigen/pipeline_omnigen.py +512 -0
  237. diffusers/pipelines/omnigen/processor_omnigen.py +327 -0
  238. diffusers/pipelines/onnx_utils.py +5 -3
  239. diffusers/pipelines/pag/pag_utils.py +1 -1
  240. diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +12 -1
  241. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +15 -4
  242. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +20 -3
  243. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +20 -3
  244. diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +1 -3
  245. diffusers/pipelines/pag/pipeline_pag_kolors.py +6 -4
  246. diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +16 -3
  247. diffusers/pipelines/pag/pipeline_pag_sana.py +65 -8
  248. diffusers/pipelines/pag/pipeline_pag_sd.py +23 -7
  249. diffusers/pipelines/pag/pipeline_pag_sd_3.py +3 -5
  250. diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +3 -5
  251. diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +13 -1
  252. diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +23 -7
  253. diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +26 -10
  254. diffusers/pipelines/pag/pipeline_pag_sd_xl.py +12 -4
  255. diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +7 -3
  256. diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +10 -6
  257. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +13 -3
  258. diffusers/pipelines/pia/pipeline_pia.py +13 -1
  259. diffusers/pipelines/pipeline_flax_utils.py +7 -7
  260. diffusers/pipelines/pipeline_loading_utils.py +193 -83
  261. diffusers/pipelines/pipeline_utils.py +221 -106
  262. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +17 -5
  263. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +17 -4
  264. diffusers/pipelines/sana/__init__.py +2 -0
  265. diffusers/pipelines/sana/pipeline_sana.py +183 -58
  266. diffusers/pipelines/sana/pipeline_sana_sprint.py +889 -0
  267. diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +12 -2
  268. diffusers/pipelines/shap_e/pipeline_shap_e.py +12 -0
  269. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +12 -0
  270. diffusers/pipelines/shap_e/renderer.py +6 -6
  271. diffusers/pipelines/stable_audio/pipeline_stable_audio.py +1 -1
  272. diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +15 -4
  273. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +12 -8
  274. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +12 -1
  275. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +3 -2
  276. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +14 -10
  277. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py +3 -3
  278. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py +14 -10
  279. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
  280. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +4 -3
  281. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +5 -4
  282. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +2 -2
  283. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +18 -13
  284. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +30 -8
  285. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +24 -10
  286. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +28 -12
  287. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +39 -18
  288. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +17 -6
  289. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +13 -3
  290. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +20 -3
  291. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +14 -2
  292. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +13 -1
  293. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +16 -17
  294. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +136 -18
  295. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +150 -21
  296. diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +15 -3
  297. diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +26 -11
  298. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +15 -3
  299. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +22 -4
  300. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -13
  301. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +12 -4
  302. diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +15 -3
  303. diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +15 -3
  304. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +26 -12
  305. diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +16 -4
  306. diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py +1 -1
  307. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +12 -4
  308. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +7 -3
  309. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +10 -6
  310. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +11 -4
  311. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +13 -2
  312. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +18 -4
  313. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +26 -5
  314. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +13 -1
  315. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +13 -1
  316. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +28 -6
  317. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +26 -4
  318. diffusers/pipelines/transformers_loading_utils.py +121 -0
  319. diffusers/pipelines/unclip/pipeline_unclip.py +11 -1
  320. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +11 -1
  321. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +19 -2
  322. diffusers/pipelines/wan/__init__.py +51 -0
  323. diffusers/pipelines/wan/pipeline_output.py +20 -0
  324. diffusers/pipelines/wan/pipeline_wan.py +595 -0
  325. diffusers/pipelines/wan/pipeline_wan_i2v.py +724 -0
  326. diffusers/pipelines/wan/pipeline_wan_video2video.py +727 -0
  327. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +7 -31
  328. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +12 -1
  329. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +12 -1
  330. diffusers/quantizers/auto.py +5 -1
  331. diffusers/quantizers/base.py +5 -9
  332. diffusers/quantizers/bitsandbytes/bnb_quantizer.py +41 -29
  333. diffusers/quantizers/bitsandbytes/utils.py +30 -20
  334. diffusers/quantizers/gguf/gguf_quantizer.py +1 -0
  335. diffusers/quantizers/gguf/utils.py +4 -2
  336. diffusers/quantizers/quantization_config.py +59 -4
  337. diffusers/quantizers/quanto/__init__.py +1 -0
  338. diffusers/quantizers/quanto/quanto_quantizer.py +177 -0
  339. diffusers/quantizers/quanto/utils.py +60 -0
  340. diffusers/quantizers/torchao/__init__.py +1 -1
  341. diffusers/quantizers/torchao/torchao_quantizer.py +47 -2
  342. diffusers/schedulers/__init__.py +2 -1
  343. diffusers/schedulers/scheduling_consistency_models.py +1 -2
  344. diffusers/schedulers/scheduling_ddim_inverse.py +1 -1
  345. diffusers/schedulers/scheduling_ddpm.py +2 -3
  346. diffusers/schedulers/scheduling_ddpm_parallel.py +1 -2
  347. diffusers/schedulers/scheduling_dpmsolver_multistep.py +12 -4
  348. diffusers/schedulers/scheduling_edm_euler.py +45 -10
  349. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +116 -28
  350. diffusers/schedulers/scheduling_flow_match_heun_discrete.py +7 -6
  351. diffusers/schedulers/scheduling_heun_discrete.py +1 -1
  352. diffusers/schedulers/scheduling_lcm.py +1 -2
  353. diffusers/schedulers/scheduling_lms_discrete.py +1 -1
  354. diffusers/schedulers/scheduling_repaint.py +5 -1
  355. diffusers/schedulers/scheduling_scm.py +265 -0
  356. diffusers/schedulers/scheduling_tcd.py +1 -2
  357. diffusers/schedulers/scheduling_utils.py +2 -1
  358. diffusers/training_utils.py +14 -7
  359. diffusers/utils/__init__.py +9 -1
  360. diffusers/utils/constants.py +13 -1
  361. diffusers/utils/deprecation_utils.py +1 -1
  362. diffusers/utils/dummy_bitsandbytes_objects.py +17 -0
  363. diffusers/utils/dummy_gguf_objects.py +17 -0
  364. diffusers/utils/dummy_optimum_quanto_objects.py +17 -0
  365. diffusers/utils/dummy_pt_objects.py +233 -0
  366. diffusers/utils/dummy_torch_and_transformers_and_opencv_objects.py +17 -0
  367. diffusers/utils/dummy_torch_and_transformers_objects.py +270 -0
  368. diffusers/utils/dummy_torchao_objects.py +17 -0
  369. diffusers/utils/dynamic_modules_utils.py +1 -1
  370. diffusers/utils/export_utils.py +28 -3
  371. diffusers/utils/hub_utils.py +52 -102
  372. diffusers/utils/import_utils.py +121 -221
  373. diffusers/utils/loading_utils.py +2 -1
  374. diffusers/utils/logging.py +1 -2
  375. diffusers/utils/peft_utils.py +6 -14
  376. diffusers/utils/remote_utils.py +425 -0
  377. diffusers/utils/source_code_parsing_utils.py +52 -0
  378. diffusers/utils/state_dict_utils.py +15 -1
  379. diffusers/utils/testing_utils.py +243 -13
  380. diffusers/utils/torch_utils.py +10 -0
  381. diffusers/utils/typing_utils.py +91 -0
  382. diffusers/video_processor.py +1 -1
  383. {diffusers-0.32.2.dist-info → diffusers-0.33.1.dist-info}/METADATA +21 -4
  384. diffusers-0.33.1.dist-info/RECORD +608 -0
  385. {diffusers-0.32.2.dist-info → diffusers-0.33.1.dist-info}/WHEEL +1 -1
  386. diffusers-0.32.2.dist-info/RECORD +0 -550
  387. {diffusers-0.32.2.dist-info → diffusers-0.33.1.dist-info}/LICENSE +0 -0
  388. {diffusers-0.32.2.dist-info → diffusers-0.33.1.dist-info}/entry_points.txt +0 -0
  389. {diffusers-0.32.2.dist-info → diffusers-0.33.1.dist-info}/top_level.txt +0 -0
@@ -14,10 +14,11 @@ import tempfile
14
14
  import time
15
15
  import unittest
16
16
  import urllib.parse
17
+ from collections import UserDict
17
18
  from contextlib import contextmanager
18
19
  from io import BytesIO, StringIO
19
20
  from pathlib import Path
20
- from typing import Callable, Dict, List, Optional, Union
21
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
21
22
 
22
23
  import numpy as np
23
24
  import PIL.Image
@@ -26,6 +27,7 @@ import requests
26
27
  from numpy.linalg import norm
27
28
  from packaging import version
28
29
 
30
+ from .constants import DIFFUSERS_REQUEST_TIMEOUT
29
31
  from .import_utils import (
30
32
  BACKENDS_MAPPING,
31
33
  is_accelerate_available,
@@ -47,6 +49,17 @@ from .import_utils import (
47
49
  from .logging import get_logger
48
50
 
49
51
 
52
+ if is_torch_available():
53
+ import torch
54
+
55
+ IS_ROCM_SYSTEM = torch.version.hip is not None
56
+ IS_CUDA_SYSTEM = torch.version.cuda is not None
57
+ IS_XPU_SYSTEM = getattr(torch.version, "xpu", None) is not None
58
+ else:
59
+ IS_ROCM_SYSTEM = False
60
+ IS_CUDA_SYSTEM = False
61
+ IS_XPU_SYSTEM = False
62
+
50
63
  global_rng = random.Random()
51
64
 
52
65
  logger = get_logger(__name__)
@@ -86,7 +99,12 @@ if is_torch_available():
86
99
  ) from e
87
100
  logger.info(f"torch_device overrode to {torch_device}")
88
101
  else:
89
- torch_device = "cuda" if torch.cuda.is_available() else "cpu"
102
+ if torch.cuda.is_available():
103
+ torch_device = "cuda"
104
+ elif torch.xpu.is_available():
105
+ torch_device = "xpu"
106
+ else:
107
+ torch_device = "cpu"
90
108
  is_torch_higher_equal_than_1_12 = version.parse(
91
109
  version.parse(torch.__version__).base_version
92
110
  ) >= version.parse("1.12")
@@ -96,6 +114,8 @@ if is_torch_available():
96
114
  mps_backend_registered = hasattr(torch.backends, "mps")
97
115
  torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device
98
116
 
117
+ from .torch_utils import get_torch_cuda_device_capability
118
+
99
119
 
100
120
  def torch_all_close(a, b, *args, **kwargs):
101
121
  if not is_torch_available():
@@ -277,6 +297,20 @@ def require_torch_gpu(test_case):
277
297
  )
278
298
 
279
299
 
300
+ def require_torch_cuda_compatibility(expected_compute_capability):
301
+ def decorator(test_case):
302
+ if not torch.cuda.is_available():
303
+ return unittest.skip(test_case)
304
+ else:
305
+ current_compute_capability = get_torch_cuda_device_capability()
306
+ return unittest.skipUnless(
307
+ float(current_compute_capability) == float(expected_compute_capability),
308
+ "Test not supported for this compute capability.",
309
+ )
310
+
311
+ return decorator
312
+
313
+
280
314
  # These decorators are for accelerator-specific behaviours that are not GPU-specific
281
315
  def require_torch_accelerator(test_case):
282
316
  """Decorator marking a test that requires an accelerator backend and PyTorch."""
@@ -299,6 +333,21 @@ def require_torch_multi_gpu(test_case):
299
333
  return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
300
334
 
301
335
 
336
+ def require_torch_multi_accelerator(test_case):
337
+ """
338
+ Decorator marking a test that requires a multi-accelerator setup (in PyTorch). These tests are skipped on a machine
339
+ without multiple hardware accelerators.
340
+ """
341
+ if not is_torch_available():
342
+ return unittest.skip("test requires PyTorch")(test_case)
343
+
344
+ import torch
345
+
346
+ return unittest.skipUnless(
347
+ torch.cuda.device_count() > 1 or torch.xpu.device_count() > 1, "test requires multiple hardware accelerators"
348
+ )(test_case)
349
+
350
+
302
351
  def require_torch_accelerator_with_fp16(test_case):
303
352
  """Decorator marking a test that requires an accelerator with support for the FP16 data type."""
304
353
  return unittest.skipUnless(_is_torch_fp16_available(torch_device), "test requires accelerator with fp16 support")(
@@ -333,6 +382,31 @@ def require_big_gpu_with_torch_cuda(test_case):
333
382
  )(test_case)
334
383
 
335
384
 
385
+ def require_big_accelerator(test_case):
386
+ """
387
+ Decorator marking a test that requires a bigger hardware accelerator (24GB) for execution. Some example pipelines:
388
+ Flux, SD3, Cog, etc.
389
+ """
390
+ if not is_torch_available():
391
+ return unittest.skip("test requires PyTorch")(test_case)
392
+
393
+ import torch
394
+
395
+ if not (torch.cuda.is_available() or torch.xpu.is_available()):
396
+ return unittest.skip("test requires PyTorch CUDA")(test_case)
397
+
398
+ if torch.xpu.is_available():
399
+ device_properties = torch.xpu.get_device_properties(0)
400
+ else:
401
+ device_properties = torch.cuda.get_device_properties(0)
402
+
403
+ total_memory = device_properties.total_memory / (1024**3)
404
+ return unittest.skipUnless(
405
+ total_memory >= BIG_GPU_MEMORY,
406
+ f"test requires a hardware accelerator with at least {BIG_GPU_MEMORY} GB memory",
407
+ )(test_case)
408
+
409
+
336
410
  def require_torch_accelerator_with_training(test_case):
337
411
  """Decorator marking a test that requires an accelerator with support for training."""
338
412
  return unittest.skipUnless(
@@ -478,6 +552,18 @@ def require_bitsandbytes_version_greater(bnb_version):
478
552
  return decorator
479
553
 
480
554
 
555
+ def require_hf_hub_version_greater(hf_hub_version):
556
+ def decorator(test_case):
557
+ correct_hf_hub_version = version.parse(
558
+ version.parse(importlib.metadata.version("huggingface_hub")).base_version
559
+ ) > version.parse(hf_hub_version)
560
+ return unittest.skipUnless(
561
+ correct_hf_hub_version, f"Test requires huggingface_hub with the version greater than {hf_hub_version}."
562
+ )(test_case)
563
+
564
+ return decorator
565
+
566
+
481
567
  def require_gguf_version_greater_or_equal(gguf_version):
482
568
  def decorator(test_case):
483
569
  correct_gguf_version = is_gguf_available() and version.parse(
@@ -521,7 +607,7 @@ def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -
521
607
  # local_path can be passed to correct images of tests
522
608
  return Path(local_path, arry.split("/")[-5], arry.split("/")[-2], arry.split("/")[-1]).as_posix()
523
609
  elif arry.startswith("http://") or arry.startswith("https://"):
524
- response = requests.get(arry)
610
+ response = requests.get(arry, timeout=DIFFUSERS_REQUEST_TIMEOUT)
525
611
  response.raise_for_status()
526
612
  arry = np.load(BytesIO(response.content))
527
613
  elif os.path.isfile(arry):
@@ -541,10 +627,10 @@ def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -
541
627
  return arry
542
628
 
543
629
 
544
- def load_pt(url: str):
545
- response = requests.get(url)
630
+ def load_pt(url: str, map_location: str):
631
+ response = requests.get(url, timeout=DIFFUSERS_REQUEST_TIMEOUT)
546
632
  response.raise_for_status()
547
- arry = torch.load(BytesIO(response.content))
633
+ arry = torch.load(BytesIO(response.content), map_location=map_location)
548
634
  return arry
549
635
 
550
636
 
@@ -561,7 +647,7 @@ def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image:
561
647
  """
562
648
  if isinstance(image, str):
563
649
  if image.startswith("http://") or image.startswith("https://"):
564
- image = PIL.Image.open(requests.get(image, stream=True).raw)
650
+ image = PIL.Image.open(requests.get(image, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT).raw)
565
651
  elif os.path.isfile(image):
566
652
  image = PIL.Image.open(image)
567
653
  else:
@@ -796,7 +882,7 @@ def pytest_terminal_summary_main(tr, id):
796
882
  f.write("slowest durations\n")
797
883
  for i, rep in enumerate(dlist):
798
884
  if rep.duration < durations_min:
799
- f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
885
+ f.write(f"{len(dlist) - i} durations < {durations_min} secs were omitted")
800
886
  break
801
887
  f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
802
888
 
@@ -941,7 +1027,7 @@ def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
941
1027
  process.join(timeout=timeout)
942
1028
 
943
1029
  if results["error"] is not None:
944
- test_case.fail(f'{results["error"]}')
1030
+ test_case.fail(f"{results['error']}")
945
1031
 
946
1032
 
947
1033
  class CaptureLogger:
@@ -1055,12 +1141,51 @@ def _is_torch_fp64_available(device):
1055
1141
  # Guard these lookups for when Torch is not used - alternative accelerator support is for PyTorch
1056
1142
  if is_torch_available():
1057
1143
  # Behaviour flags
1058
- BACKEND_SUPPORTS_TRAINING = {"cuda": True, "cpu": True, "mps": False, "default": True}
1144
+ BACKEND_SUPPORTS_TRAINING = {"cuda": True, "xpu": True, "cpu": True, "mps": False, "default": True}
1059
1145
 
1060
1146
  # Function definitions
1061
- BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "mps": None, "default": None}
1062
- BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "mps": lambda: 0, "default": 0}
1063
- BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
1147
+ BACKEND_EMPTY_CACHE = {
1148
+ "cuda": torch.cuda.empty_cache,
1149
+ "xpu": torch.xpu.empty_cache,
1150
+ "cpu": None,
1151
+ "mps": torch.mps.empty_cache,
1152
+ "default": None,
1153
+ }
1154
+ BACKEND_DEVICE_COUNT = {
1155
+ "cuda": torch.cuda.device_count,
1156
+ "xpu": torch.xpu.device_count,
1157
+ "cpu": lambda: 0,
1158
+ "mps": lambda: 0,
1159
+ "default": 0,
1160
+ }
1161
+ BACKEND_MANUAL_SEED = {
1162
+ "cuda": torch.cuda.manual_seed,
1163
+ "xpu": torch.xpu.manual_seed,
1164
+ "cpu": torch.manual_seed,
1165
+ "mps": torch.mps.manual_seed,
1166
+ "default": torch.manual_seed,
1167
+ }
1168
+ BACKEND_RESET_PEAK_MEMORY_STATS = {
1169
+ "cuda": torch.cuda.reset_peak_memory_stats,
1170
+ "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None),
1171
+ "cpu": None,
1172
+ "mps": None,
1173
+ "default": None,
1174
+ }
1175
+ BACKEND_RESET_MAX_MEMORY_ALLOCATED = {
1176
+ "cuda": torch.cuda.reset_max_memory_allocated,
1177
+ "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None),
1178
+ "cpu": None,
1179
+ "mps": None,
1180
+ "default": None,
1181
+ }
1182
+ BACKEND_MAX_MEMORY_ALLOCATED = {
1183
+ "cuda": torch.cuda.max_memory_allocated,
1184
+ "xpu": getattr(torch.xpu, "max_memory_allocated", None),
1185
+ "cpu": 0,
1186
+ "mps": 0,
1187
+ "default": 0,
1188
+ }
1064
1189
 
1065
1190
 
1066
1191
  # This dispatches a defined function according to the accelerator from the function definitions.
@@ -1091,6 +1216,18 @@ def backend_device_count(device: str):
1091
1216
  return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
1092
1217
 
1093
1218
 
1219
+ def backend_reset_peak_memory_stats(device: str):
1220
+ return _device_agnostic_dispatch(device, BACKEND_RESET_PEAK_MEMORY_STATS)
1221
+
1222
+
1223
+ def backend_reset_max_memory_allocated(device: str):
1224
+ return _device_agnostic_dispatch(device, BACKEND_RESET_MAX_MEMORY_ALLOCATED)
1225
+
1226
+
1227
+ def backend_max_memory_allocated(device: str):
1228
+ return _device_agnostic_dispatch(device, BACKEND_MAX_MEMORY_ALLOCATED)
1229
+
1230
+
1094
1231
  # These are callables which return boolean behaviour flags and can be used to specify some
1095
1232
  # device agnostic alternative where the feature is unsupported.
1096
1233
  def backend_supports_training(device: str):
@@ -1147,3 +1284,96 @@ if is_torch_available():
1147
1284
  update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
1148
1285
  update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
1149
1286
  update_mapping_from_spec(BACKEND_SUPPORTS_TRAINING, "SUPPORTS_TRAINING")
1287
+ update_mapping_from_spec(BACKEND_RESET_PEAK_MEMORY_STATS, "RESET_PEAK_MEMORY_STATS_FN")
1288
+ update_mapping_from_spec(BACKEND_RESET_MAX_MEMORY_ALLOCATED, "RESET_MAX_MEMORY_ALLOCATED_FN")
1289
+ update_mapping_from_spec(BACKEND_MAX_MEMORY_ALLOCATED, "MAX_MEMORY_ALLOCATED_FN")
1290
+
1291
+
1292
+ # Modified from https://github.com/huggingface/transformers/blob/cdfb018d0300fef3b07d9220f3efe9c2a9974662/src/transformers/testing_utils.py#L3090
1293
+
1294
+ # Type definition of key used in `Expectations` class.
1295
+ DeviceProperties = Tuple[Union[str, None], Union[int, None]]
1296
+
1297
+
1298
+ @functools.lru_cache
1299
+ def get_device_properties() -> DeviceProperties:
1300
+ """
1301
+ Get environment device properties.
1302
+ """
1303
+ if IS_CUDA_SYSTEM or IS_ROCM_SYSTEM:
1304
+ import torch
1305
+
1306
+ major, _ = torch.cuda.get_device_capability()
1307
+ if IS_ROCM_SYSTEM:
1308
+ return ("rocm", major)
1309
+ else:
1310
+ return ("cuda", major)
1311
+ elif IS_XPU_SYSTEM:
1312
+ import torch
1313
+
1314
+ # To get more info of the architecture meaning and bit allocation, refer to https://github.com/intel/llvm/blob/sycl/sycl/include/sycl/ext/oneapi/experimental/device_architecture.def
1315
+ arch = torch.xpu.get_device_capability()["architecture"]
1316
+ gen_mask = 0x000000FF00000000
1317
+ gen = (arch & gen_mask) >> 32
1318
+ return ("xpu", gen)
1319
+ else:
1320
+ return (torch_device, None)
1321
+
1322
+
1323
+ if TYPE_CHECKING:
1324
+ DevicePropertiesUserDict = UserDict[DeviceProperties, Any]
1325
+ else:
1326
+ DevicePropertiesUserDict = UserDict
1327
+
1328
+
1329
+ class Expectations(DevicePropertiesUserDict):
1330
+ def get_expectation(self) -> Any:
1331
+ """
1332
+ Find best matching expectation based on environment device properties.
1333
+ """
1334
+ return self.find_expectation(get_device_properties())
1335
+
1336
+ @staticmethod
1337
+ def is_default(key: DeviceProperties) -> bool:
1338
+ return all(p is None for p in key)
1339
+
1340
+ @staticmethod
1341
+ def score(key: DeviceProperties, other: DeviceProperties) -> int:
1342
+ """
1343
+ Returns score indicating how similar two instances of the `Properties` tuple are. Points are calculated using
1344
+ bits, but documented as int. Rules are as follows:
1345
+ * Matching `type` gives 8 points.
1346
+ * Semi-matching `type`, for example cuda and rocm, gives 4 points.
1347
+ * Matching `major` (compute capability major version) gives 2 points.
1348
+ * Default expectation (if present) gives 1 points.
1349
+ """
1350
+ (device_type, major) = key
1351
+ (other_device_type, other_major) = other
1352
+
1353
+ score = 0b0
1354
+ if device_type == other_device_type:
1355
+ score |= 0b1000
1356
+ elif device_type in ["cuda", "rocm"] and other_device_type in ["cuda", "rocm"]:
1357
+ score |= 0b100
1358
+
1359
+ if major == other_major and other_major is not None:
1360
+ score |= 0b10
1361
+
1362
+ if Expectations.is_default(other):
1363
+ score |= 0b1
1364
+
1365
+ return int(score)
1366
+
1367
+ def find_expectation(self, key: DeviceProperties = (None, None)) -> Any:
1368
+ """
1369
+ Find best matching expectation based on provided device properties.
1370
+ """
1371
+ (result_key, result) = max(self.data.items(), key=lambda x: Expectations.score(key, x[0]))
1372
+
1373
+ if Expectations.score(key, result_key) == 0:
1374
+ raise ValueError(f"No matching expectation found for {key}")
1375
+
1376
+ return result
1377
+
1378
+ def __repr__(self):
1379
+ return f"{self.data}"
@@ -149,3 +149,13 @@ def apply_freeu(
149
149
  res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s2"])
150
150
 
151
151
  return hidden_states, res_hidden_states
152
+
153
+
154
+ def get_torch_cuda_device_capability():
155
+ if torch.cuda.is_available():
156
+ device = torch.device("cuda")
157
+ compute_capability = torch.cuda.get_device_capability(device)
158
+ compute_capability = f"{compute_capability[0]}.{compute_capability[1]}"
159
+ return float(compute_capability)
160
+ else:
161
+ return None
@@ -0,0 +1,91 @@
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ Typing utilities: Utilities related to type checking and validation
16
+ """
17
+
18
+ from typing import Any, Dict, List, Set, Tuple, Type, Union, get_args, get_origin
19
+
20
+
21
+ def _is_valid_type(obj: Any, class_or_tuple: Union[Type, Tuple[Type, ...]]) -> bool:
22
+ """
23
+ Checks if an object is an instance of any of the provided types. For collections, it checks if every element is of
24
+ the correct type as well.
25
+ """
26
+ if not isinstance(class_or_tuple, tuple):
27
+ class_or_tuple = (class_or_tuple,)
28
+
29
+ # Unpack unions
30
+ unpacked_class_or_tuple = []
31
+ for t in class_or_tuple:
32
+ if get_origin(t) is Union:
33
+ unpacked_class_or_tuple.extend(get_args(t))
34
+ else:
35
+ unpacked_class_or_tuple.append(t)
36
+ class_or_tuple = tuple(unpacked_class_or_tuple)
37
+
38
+ if Any in class_or_tuple:
39
+ return True
40
+
41
+ obj_type = type(obj)
42
+ # Classes with obj's type
43
+ class_or_tuple = {t for t in class_or_tuple if isinstance(obj, get_origin(t) or t)}
44
+
45
+ # Singular types (e.g. int, ControlNet, ...)
46
+ # Untyped collections (e.g. List, but not List[int])
47
+ elem_class_or_tuple = {get_args(t) for t in class_or_tuple}
48
+ if () in elem_class_or_tuple:
49
+ return True
50
+ # Typed lists or sets
51
+ elif obj_type in (list, set):
52
+ return any(all(_is_valid_type(x, t) for x in obj) for t in elem_class_or_tuple)
53
+ # Typed tuples
54
+ elif obj_type is tuple:
55
+ return any(
56
+ # Tuples with any length and single type (e.g. Tuple[int, ...])
57
+ (len(t) == 2 and t[-1] is Ellipsis and all(_is_valid_type(x, t[0]) for x in obj))
58
+ or
59
+ # Tuples with fixed length and any types (e.g. Tuple[int, str])
60
+ (len(obj) == len(t) and all(_is_valid_type(x, tt) for x, tt in zip(obj, t)))
61
+ for t in elem_class_or_tuple
62
+ )
63
+ # Typed dicts
64
+ elif obj_type is dict:
65
+ return any(
66
+ all(_is_valid_type(k, kt) and _is_valid_type(v, vt) for k, v in obj.items())
67
+ for kt, vt in elem_class_or_tuple
68
+ )
69
+
70
+ else:
71
+ return False
72
+
73
+
74
+ def _get_detailed_type(obj: Any) -> Type:
75
+ """
76
+ Gets a detailed type for an object, including nested types for collections.
77
+ """
78
+ obj_type = type(obj)
79
+
80
+ if obj_type in (list, set):
81
+ obj_origin_type = List if obj_type is list else Set
82
+ elems_type = Union[tuple({_get_detailed_type(x) for x in obj})]
83
+ return obj_origin_type[elems_type]
84
+ elif obj_type is tuple:
85
+ return Tuple[tuple(_get_detailed_type(x) for x in obj)]
86
+ elif obj_type is dict:
87
+ keys_type = Union[tuple({_get_detailed_type(k) for k in obj.keys()})]
88
+ values_type = Union[tuple({_get_detailed_type(k) for k in obj.values()})]
89
+ return Dict[keys_type, values_type]
90
+ else:
91
+ return obj_type
@@ -67,7 +67,7 @@ class VideoProcessor(VaeImageProcessor):
67
67
 
68
68
  # ensure the input is a list of videos:
69
69
  # - if it is a batch of videos (5d torch.Tensor or np.ndarray), it is converted to a list of videos (a list of 4d torch.Tensor or np.ndarray)
70
- # - if it is is a single video, it is convereted to a list of one video.
70
+ # - if it is a single video, it is convereted to a list of one video.
71
71
  if isinstance(video, (np.ndarray, torch.Tensor)) and video.ndim == 5:
72
72
  video = list(video)
73
73
  elif isinstance(video, list) and is_valid_image(video[0]) or is_valid_image_imagelist(video):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: diffusers
3
- Version: 0.32.2
3
+ Version: 0.33.1
4
4
  Summary: State-of-the-art diffusion in PyTorch and JAX.
5
5
  Home-page: https://github.com/huggingface/diffusers
6
6
  Author: The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/diffusers/graphs/contributors)
@@ -18,21 +18,25 @@ Classifier: Programming Language :: Python :: 3
18
18
  Classifier: Programming Language :: Python :: 3.8
19
19
  Classifier: Programming Language :: Python :: 3.9
20
20
  Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
21
22
  Requires-Python: >=3.8.0
22
23
  Description-Content-Type: text/markdown
23
24
  License-File: LICENSE
24
25
  Requires-Dist: importlib-metadata
25
26
  Requires-Dist: filelock
26
- Requires-Dist: huggingface-hub>=0.23.2
27
+ Requires-Dist: huggingface-hub>=0.27.0
27
28
  Requires-Dist: numpy
28
29
  Requires-Dist: regex!=2019.12.17
29
30
  Requires-Dist: requests
30
31
  Requires-Dist: safetensors>=0.3.1
31
32
  Requires-Dist: Pillow
33
+ Provides-Extra: bitsandbytes
34
+ Requires-Dist: bitsandbytes>=0.43.3; extra == "bitsandbytes"
35
+ Requires-Dist: accelerate>=0.31.0; extra == "bitsandbytes"
32
36
  Provides-Extra: dev
33
37
  Requires-Dist: urllib3<=2.0.0; extra == "dev"
34
38
  Requires-Dist: isort>=5.5.4; extra == "dev"
35
- Requires-Dist: ruff==0.1.5; extra == "dev"
39
+ Requires-Dist: ruff==0.9.10; extra == "dev"
36
40
  Requires-Dist: hf-doc-builder>=0.3.0; extra == "dev"
37
41
  Requires-Dist: compel==0.1.8; extra == "dev"
38
42
  Requires-Dist: GitPython<3.1.19; extra == "dev"
@@ -49,8 +53,10 @@ Requires-Dist: requests-mock==1.10.0; extra == "dev"
49
53
  Requires-Dist: safetensors>=0.3.1; extra == "dev"
50
54
  Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "dev"
51
55
  Requires-Dist: scipy; extra == "dev"
56
+ Requires-Dist: tiktoken>=0.7.0; extra == "dev"
52
57
  Requires-Dist: torchvision; extra == "dev"
53
58
  Requires-Dist: transformers>=4.41.2; extra == "dev"
59
+ Requires-Dist: phonemizer; extra == "dev"
54
60
  Requires-Dist: accelerate>=0.31.0; extra == "dev"
55
61
  Requires-Dist: protobuf<4,>=3.20.3; extra == "dev"
56
62
  Requires-Dist: tensorboard; extra == "dev"
@@ -65,10 +71,16 @@ Provides-Extra: flax
65
71
  Requires-Dist: jax>=0.4.1; extra == "flax"
66
72
  Requires-Dist: jaxlib>=0.4.1; extra == "flax"
67
73
  Requires-Dist: flax>=0.4.1; extra == "flax"
74
+ Provides-Extra: gguf
75
+ Requires-Dist: gguf>=0.10.0; extra == "gguf"
76
+ Requires-Dist: accelerate>=0.31.0; extra == "gguf"
77
+ Provides-Extra: optimum_quanto
78
+ Requires-Dist: optimum-quanto>=0.2.6; extra == "optimum-quanto"
79
+ Requires-Dist: accelerate>=0.31.0; extra == "optimum-quanto"
68
80
  Provides-Extra: quality
69
81
  Requires-Dist: urllib3<=2.0.0; extra == "quality"
70
82
  Requires-Dist: isort>=5.5.4; extra == "quality"
71
- Requires-Dist: ruff==0.1.5; extra == "quality"
83
+ Requires-Dist: ruff==0.9.10; extra == "quality"
72
84
  Requires-Dist: hf-doc-builder>=0.3.0; extra == "quality"
73
85
  Provides-Extra: test
74
86
  Requires-Dist: compel==0.1.8; extra == "test"
@@ -86,11 +98,16 @@ Requires-Dist: requests-mock==1.10.0; extra == "test"
86
98
  Requires-Dist: safetensors>=0.3.1; extra == "test"
87
99
  Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "test"
88
100
  Requires-Dist: scipy; extra == "test"
101
+ Requires-Dist: tiktoken>=0.7.0; extra == "test"
89
102
  Requires-Dist: torchvision; extra == "test"
90
103
  Requires-Dist: transformers>=4.41.2; extra == "test"
104
+ Requires-Dist: phonemizer; extra == "test"
91
105
  Provides-Extra: torch
92
106
  Requires-Dist: torch>=1.4; extra == "torch"
93
107
  Requires-Dist: accelerate>=0.31.0; extra == "torch"
108
+ Provides-Extra: torchao
109
+ Requires-Dist: torchao>=0.7.0; extra == "torchao"
110
+ Requires-Dist: accelerate>=0.31.0; extra == "torchao"
94
111
  Provides-Extra: training
95
112
  Requires-Dist: accelerate>=0.31.0; extra == "training"
96
113
  Requires-Dist: datasets; extra == "training"