diffusers 0.27.0__py3-none-any.whl → 0.32.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (445) hide show
  1. diffusers/__init__.py +233 -6
  2. diffusers/callbacks.py +209 -0
  3. diffusers/commands/env.py +102 -6
  4. diffusers/configuration_utils.py +45 -16
  5. diffusers/dependency_versions_table.py +4 -3
  6. diffusers/image_processor.py +434 -110
  7. diffusers/loaders/__init__.py +42 -9
  8. diffusers/loaders/ip_adapter.py +626 -36
  9. diffusers/loaders/lora_base.py +900 -0
  10. diffusers/loaders/lora_conversion_utils.py +991 -125
  11. diffusers/loaders/lora_pipeline.py +3812 -0
  12. diffusers/loaders/peft.py +571 -7
  13. diffusers/loaders/single_file.py +405 -173
  14. diffusers/loaders/single_file_model.py +385 -0
  15. diffusers/loaders/single_file_utils.py +1783 -713
  16. diffusers/loaders/textual_inversion.py +41 -23
  17. diffusers/loaders/transformer_flux.py +181 -0
  18. diffusers/loaders/transformer_sd3.py +89 -0
  19. diffusers/loaders/unet.py +464 -540
  20. diffusers/loaders/unet_loader_utils.py +163 -0
  21. diffusers/models/__init__.py +76 -7
  22. diffusers/models/activations.py +65 -10
  23. diffusers/models/adapter.py +53 -53
  24. diffusers/models/attention.py +605 -18
  25. diffusers/models/attention_flax.py +1 -1
  26. diffusers/models/attention_processor.py +4304 -687
  27. diffusers/models/autoencoders/__init__.py +8 -0
  28. diffusers/models/autoencoders/autoencoder_asym_kl.py +15 -17
  29. diffusers/models/autoencoders/autoencoder_dc.py +620 -0
  30. diffusers/models/autoencoders/autoencoder_kl.py +110 -28
  31. diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
  32. diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1482 -0
  33. diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
  34. diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
  35. diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
  36. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +19 -24
  37. diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
  38. diffusers/models/autoencoders/autoencoder_tiny.py +21 -18
  39. diffusers/models/autoencoders/consistency_decoder_vae.py +45 -20
  40. diffusers/models/autoencoders/vae.py +41 -29
  41. diffusers/models/autoencoders/vq_model.py +182 -0
  42. diffusers/models/controlnet.py +47 -800
  43. diffusers/models/controlnet_flux.py +70 -0
  44. diffusers/models/controlnet_sd3.py +68 -0
  45. diffusers/models/controlnet_sparsectrl.py +116 -0
  46. diffusers/models/controlnets/__init__.py +23 -0
  47. diffusers/models/controlnets/controlnet.py +872 -0
  48. diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +9 -9
  49. diffusers/models/controlnets/controlnet_flux.py +536 -0
  50. diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
  51. diffusers/models/controlnets/controlnet_sd3.py +489 -0
  52. diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
  53. diffusers/models/controlnets/controlnet_union.py +832 -0
  54. diffusers/models/controlnets/controlnet_xs.py +1946 -0
  55. diffusers/models/controlnets/multicontrolnet.py +183 -0
  56. diffusers/models/downsampling.py +85 -18
  57. diffusers/models/embeddings.py +1856 -158
  58. diffusers/models/embeddings_flax.py +23 -9
  59. diffusers/models/model_loading_utils.py +480 -0
  60. diffusers/models/modeling_flax_pytorch_utils.py +2 -1
  61. diffusers/models/modeling_flax_utils.py +2 -7
  62. diffusers/models/modeling_outputs.py +14 -0
  63. diffusers/models/modeling_pytorch_flax_utils.py +1 -1
  64. diffusers/models/modeling_utils.py +611 -146
  65. diffusers/models/normalization.py +361 -20
  66. diffusers/models/resnet.py +18 -23
  67. diffusers/models/transformers/__init__.py +16 -0
  68. diffusers/models/transformers/auraflow_transformer_2d.py +544 -0
  69. diffusers/models/transformers/cogvideox_transformer_3d.py +542 -0
  70. diffusers/models/transformers/dit_transformer_2d.py +240 -0
  71. diffusers/models/transformers/dual_transformer_2d.py +9 -8
  72. diffusers/models/transformers/hunyuan_transformer_2d.py +578 -0
  73. diffusers/models/transformers/latte_transformer_3d.py +327 -0
  74. diffusers/models/transformers/lumina_nextdit2d.py +340 -0
  75. diffusers/models/transformers/pixart_transformer_2d.py +445 -0
  76. diffusers/models/transformers/prior_transformer.py +13 -13
  77. diffusers/models/transformers/sana_transformer.py +488 -0
  78. diffusers/models/transformers/stable_audio_transformer.py +458 -0
  79. diffusers/models/transformers/t5_film_transformer.py +17 -19
  80. diffusers/models/transformers/transformer_2d.py +297 -187
  81. diffusers/models/transformers/transformer_allegro.py +422 -0
  82. diffusers/models/transformers/transformer_cogview3plus.py +386 -0
  83. diffusers/models/transformers/transformer_flux.py +593 -0
  84. diffusers/models/transformers/transformer_hunyuan_video.py +791 -0
  85. diffusers/models/transformers/transformer_ltx.py +469 -0
  86. diffusers/models/transformers/transformer_mochi.py +499 -0
  87. diffusers/models/transformers/transformer_sd3.py +461 -0
  88. diffusers/models/transformers/transformer_temporal.py +21 -19
  89. diffusers/models/unets/unet_1d.py +8 -8
  90. diffusers/models/unets/unet_1d_blocks.py +31 -31
  91. diffusers/models/unets/unet_2d.py +17 -10
  92. diffusers/models/unets/unet_2d_blocks.py +225 -149
  93. diffusers/models/unets/unet_2d_condition.py +50 -53
  94. diffusers/models/unets/unet_2d_condition_flax.py +6 -5
  95. diffusers/models/unets/unet_3d_blocks.py +192 -1057
  96. diffusers/models/unets/unet_3d_condition.py +22 -27
  97. diffusers/models/unets/unet_i2vgen_xl.py +22 -18
  98. diffusers/models/unets/unet_kandinsky3.py +2 -2
  99. diffusers/models/unets/unet_motion_model.py +1413 -89
  100. diffusers/models/unets/unet_spatio_temporal_condition.py +40 -16
  101. diffusers/models/unets/unet_stable_cascade.py +19 -18
  102. diffusers/models/unets/uvit_2d.py +2 -2
  103. diffusers/models/upsampling.py +95 -26
  104. diffusers/models/vq_model.py +12 -164
  105. diffusers/optimization.py +1 -1
  106. diffusers/pipelines/__init__.py +202 -3
  107. diffusers/pipelines/allegro/__init__.py +48 -0
  108. diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
  109. diffusers/pipelines/allegro/pipeline_output.py +23 -0
  110. diffusers/pipelines/amused/pipeline_amused.py +12 -12
  111. diffusers/pipelines/amused/pipeline_amused_img2img.py +14 -12
  112. diffusers/pipelines/amused/pipeline_amused_inpaint.py +13 -11
  113. diffusers/pipelines/animatediff/__init__.py +8 -0
  114. diffusers/pipelines/animatediff/pipeline_animatediff.py +122 -109
  115. diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1106 -0
  116. diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +1288 -0
  117. diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1010 -0
  118. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +236 -180
  119. diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
  120. diffusers/pipelines/animatediff/pipeline_output.py +3 -2
  121. diffusers/pipelines/audioldm/pipeline_audioldm.py +14 -14
  122. diffusers/pipelines/audioldm2/modeling_audioldm2.py +58 -39
  123. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +121 -36
  124. diffusers/pipelines/aura_flow/__init__.py +48 -0
  125. diffusers/pipelines/aura_flow/pipeline_aura_flow.py +584 -0
  126. diffusers/pipelines/auto_pipeline.py +196 -28
  127. diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
  128. diffusers/pipelines/blip_diffusion/modeling_blip2.py +6 -6
  129. diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +1 -1
  130. diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
  131. diffusers/pipelines/cogvideo/__init__.py +54 -0
  132. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +772 -0
  133. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
  134. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +885 -0
  135. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +851 -0
  136. diffusers/pipelines/cogvideo/pipeline_output.py +20 -0
  137. diffusers/pipelines/cogview3/__init__.py +47 -0
  138. diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
  139. diffusers/pipelines/cogview3/pipeline_output.py +21 -0
  140. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +6 -6
  141. diffusers/pipelines/controlnet/__init__.py +86 -80
  142. diffusers/pipelines/controlnet/multicontrolnet.py +7 -182
  143. diffusers/pipelines/controlnet/pipeline_controlnet.py +134 -87
  144. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
  145. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +93 -77
  146. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +88 -197
  147. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +136 -90
  148. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +176 -80
  149. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +125 -89
  150. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
  151. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
  152. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
  153. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
  154. diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
  155. diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1060 -0
  156. diffusers/pipelines/controlnet_sd3/__init__.py +57 -0
  157. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1133 -0
  158. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
  159. diffusers/pipelines/controlnet_xs/__init__.py +68 -0
  160. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
  161. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
  162. diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
  163. diffusers/pipelines/deepfloyd_if/pipeline_if.py +16 -30
  164. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +20 -35
  165. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +23 -41
  166. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +22 -38
  167. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +25 -41
  168. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +19 -34
  169. diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
  170. diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
  171. diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +11 -11
  172. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +70 -30
  173. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +48 -25
  174. diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +2 -2
  175. diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +7 -7
  176. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +21 -20
  177. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +27 -29
  178. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +33 -27
  179. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +33 -23
  180. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +36 -30
  181. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +102 -69
  182. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +13 -13
  183. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +10 -5
  184. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +11 -6
  185. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +10 -5
  186. diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +5 -5
  187. diffusers/pipelines/dit/pipeline_dit.py +7 -4
  188. diffusers/pipelines/flux/__init__.py +69 -0
  189. diffusers/pipelines/flux/modeling_flux.py +47 -0
  190. diffusers/pipelines/flux/pipeline_flux.py +957 -0
  191. diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
  192. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
  193. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
  194. diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
  195. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
  196. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
  197. diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
  198. diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
  199. diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
  200. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
  201. diffusers/pipelines/flux/pipeline_output.py +37 -0
  202. diffusers/pipelines/free_init_utils.py +41 -38
  203. diffusers/pipelines/free_noise_utils.py +596 -0
  204. diffusers/pipelines/hunyuan_video/__init__.py +48 -0
  205. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
  206. diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
  207. diffusers/pipelines/hunyuandit/__init__.py +48 -0
  208. diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +916 -0
  209. diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +33 -48
  210. diffusers/pipelines/kandinsky/pipeline_kandinsky.py +8 -8
  211. diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +32 -29
  212. diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +11 -11
  213. diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +12 -12
  214. diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +10 -10
  215. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
  216. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +34 -31
  217. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +10 -10
  218. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +10 -10
  219. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +6 -6
  220. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +8 -8
  221. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +7 -7
  222. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +6 -6
  223. diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +3 -3
  224. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +22 -35
  225. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +26 -37
  226. diffusers/pipelines/kolors/__init__.py +54 -0
  227. diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
  228. diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1250 -0
  229. diffusers/pipelines/kolors/pipeline_output.py +21 -0
  230. diffusers/pipelines/kolors/text_encoder.py +889 -0
  231. diffusers/pipelines/kolors/tokenizer.py +338 -0
  232. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +82 -62
  233. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +77 -60
  234. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +12 -12
  235. diffusers/pipelines/latte/__init__.py +48 -0
  236. diffusers/pipelines/latte/pipeline_latte.py +881 -0
  237. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +80 -74
  238. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +85 -76
  239. diffusers/pipelines/ledits_pp/pipeline_output.py +2 -2
  240. diffusers/pipelines/ltx/__init__.py +50 -0
  241. diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
  242. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
  243. diffusers/pipelines/ltx/pipeline_output.py +20 -0
  244. diffusers/pipelines/lumina/__init__.py +48 -0
  245. diffusers/pipelines/lumina/pipeline_lumina.py +890 -0
  246. diffusers/pipelines/marigold/__init__.py +50 -0
  247. diffusers/pipelines/marigold/marigold_image_processing.py +576 -0
  248. diffusers/pipelines/marigold/pipeline_marigold_depth.py +813 -0
  249. diffusers/pipelines/marigold/pipeline_marigold_normals.py +690 -0
  250. diffusers/pipelines/mochi/__init__.py +48 -0
  251. diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
  252. diffusers/pipelines/mochi/pipeline_output.py +20 -0
  253. diffusers/pipelines/musicldm/pipeline_musicldm.py +14 -14
  254. diffusers/pipelines/pag/__init__.py +80 -0
  255. diffusers/pipelines/pag/pag_utils.py +243 -0
  256. diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1328 -0
  257. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
  258. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1610 -0
  259. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
  260. diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +969 -0
  261. diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
  262. diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +865 -0
  263. diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
  264. diffusers/pipelines/pag/pipeline_pag_sd.py +1062 -0
  265. diffusers/pipelines/pag/pipeline_pag_sd_3.py +994 -0
  266. diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
  267. diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +866 -0
  268. diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
  269. diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
  270. diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1345 -0
  271. diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1544 -0
  272. diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1776 -0
  273. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +17 -12
  274. diffusers/pipelines/pia/pipeline_pia.py +74 -164
  275. diffusers/pipelines/pipeline_flax_utils.py +5 -10
  276. diffusers/pipelines/pipeline_loading_utils.py +515 -53
  277. diffusers/pipelines/pipeline_utils.py +411 -222
  278. diffusers/pipelines/pixart_alpha/__init__.py +8 -1
  279. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +76 -93
  280. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +873 -0
  281. diffusers/pipelines/sana/__init__.py +47 -0
  282. diffusers/pipelines/sana/pipeline_output.py +21 -0
  283. diffusers/pipelines/sana/pipeline_sana.py +884 -0
  284. diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +27 -23
  285. diffusers/pipelines/shap_e/pipeline_shap_e.py +3 -3
  286. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +14 -14
  287. diffusers/pipelines/shap_e/renderer.py +1 -1
  288. diffusers/pipelines/stable_audio/__init__.py +50 -0
  289. diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
  290. diffusers/pipelines/stable_audio/pipeline_stable_audio.py +756 -0
  291. diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +71 -25
  292. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +23 -19
  293. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +35 -34
  294. diffusers/pipelines/stable_diffusion/__init__.py +0 -1
  295. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +20 -11
  296. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
  297. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
  298. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +6 -6
  299. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +145 -79
  300. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +43 -28
  301. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +13 -8
  302. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +100 -68
  303. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +109 -201
  304. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +131 -32
  305. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +247 -87
  306. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +30 -29
  307. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +35 -27
  308. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +49 -42
  309. diffusers/pipelines/stable_diffusion/safety_checker.py +2 -1
  310. diffusers/pipelines/stable_diffusion_3/__init__.py +54 -0
  311. diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
  312. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +1140 -0
  313. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +1036 -0
  314. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1250 -0
  315. diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +29 -20
  316. diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +59 -58
  317. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +31 -25
  318. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +38 -22
  319. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -24
  320. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +24 -23
  321. diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +107 -67
  322. diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +316 -69
  323. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +10 -5
  324. diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
  325. diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +98 -30
  326. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +121 -83
  327. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +161 -105
  328. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +142 -218
  329. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +45 -29
  330. diffusers/pipelines/stable_diffusion_xl/watermark.py +9 -3
  331. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +110 -57
  332. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +69 -39
  333. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +105 -74
  334. diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +3 -2
  335. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +29 -49
  336. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +32 -93
  337. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +37 -25
  338. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +54 -40
  339. diffusers/pipelines/unclip/pipeline_unclip.py +6 -6
  340. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +6 -6
  341. diffusers/pipelines/unidiffuser/modeling_text_decoder.py +1 -1
  342. diffusers/pipelines/unidiffuser/modeling_uvit.py +12 -12
  343. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +29 -28
  344. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +5 -5
  345. diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +5 -10
  346. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +6 -8
  347. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +4 -4
  348. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +12 -12
  349. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +15 -14
  350. diffusers/{models/dual_transformer_2d.py → quantizers/__init__.py} +2 -6
  351. diffusers/quantizers/auto.py +139 -0
  352. diffusers/quantizers/base.py +233 -0
  353. diffusers/quantizers/bitsandbytes/__init__.py +2 -0
  354. diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
  355. diffusers/quantizers/bitsandbytes/utils.py +306 -0
  356. diffusers/quantizers/gguf/__init__.py +1 -0
  357. diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
  358. diffusers/quantizers/gguf/utils.py +456 -0
  359. diffusers/quantizers/quantization_config.py +669 -0
  360. diffusers/quantizers/torchao/__init__.py +15 -0
  361. diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
  362. diffusers/schedulers/__init__.py +12 -2
  363. diffusers/schedulers/deprecated/__init__.py +1 -1
  364. diffusers/schedulers/deprecated/scheduling_karras_ve.py +25 -25
  365. diffusers/schedulers/scheduling_amused.py +5 -5
  366. diffusers/schedulers/scheduling_consistency_decoder.py +11 -11
  367. diffusers/schedulers/scheduling_consistency_models.py +23 -25
  368. diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
  369. diffusers/schedulers/scheduling_ddim.py +27 -26
  370. diffusers/schedulers/scheduling_ddim_cogvideox.py +452 -0
  371. diffusers/schedulers/scheduling_ddim_flax.py +2 -1
  372. diffusers/schedulers/scheduling_ddim_inverse.py +16 -16
  373. diffusers/schedulers/scheduling_ddim_parallel.py +32 -31
  374. diffusers/schedulers/scheduling_ddpm.py +27 -30
  375. diffusers/schedulers/scheduling_ddpm_flax.py +7 -3
  376. diffusers/schedulers/scheduling_ddpm_parallel.py +33 -36
  377. diffusers/schedulers/scheduling_ddpm_wuerstchen.py +14 -14
  378. diffusers/schedulers/scheduling_deis_multistep.py +150 -50
  379. diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
  380. diffusers/schedulers/scheduling_dpmsolver_multistep.py +221 -84
  381. diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +2 -2
  382. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +158 -52
  383. diffusers/schedulers/scheduling_dpmsolver_sde.py +153 -34
  384. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +275 -86
  385. diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +81 -57
  386. diffusers/schedulers/scheduling_edm_euler.py +62 -39
  387. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +30 -29
  388. diffusers/schedulers/scheduling_euler_discrete.py +255 -74
  389. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +458 -0
  390. diffusers/schedulers/scheduling_flow_match_heun_discrete.py +320 -0
  391. diffusers/schedulers/scheduling_heun_discrete.py +174 -46
  392. diffusers/schedulers/scheduling_ipndm.py +9 -9
  393. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +138 -29
  394. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +132 -26
  395. diffusers/schedulers/scheduling_karras_ve_flax.py +6 -6
  396. diffusers/schedulers/scheduling_lcm.py +23 -29
  397. diffusers/schedulers/scheduling_lms_discrete.py +105 -28
  398. diffusers/schedulers/scheduling_pndm.py +20 -20
  399. diffusers/schedulers/scheduling_repaint.py +21 -21
  400. diffusers/schedulers/scheduling_sasolver.py +157 -60
  401. diffusers/schedulers/scheduling_sde_ve.py +19 -19
  402. diffusers/schedulers/scheduling_tcd.py +41 -36
  403. diffusers/schedulers/scheduling_unclip.py +19 -16
  404. diffusers/schedulers/scheduling_unipc_multistep.py +243 -47
  405. diffusers/schedulers/scheduling_utils.py +12 -5
  406. diffusers/schedulers/scheduling_utils_flax.py +1 -3
  407. diffusers/schedulers/scheduling_vq_diffusion.py +10 -10
  408. diffusers/training_utils.py +214 -30
  409. diffusers/utils/__init__.py +17 -1
  410. diffusers/utils/constants.py +3 -0
  411. diffusers/utils/doc_utils.py +1 -0
  412. diffusers/utils/dummy_pt_objects.py +592 -7
  413. diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
  414. diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
  415. diffusers/utils/dummy_torch_and_transformers_objects.py +1001 -71
  416. diffusers/utils/dynamic_modules_utils.py +34 -29
  417. diffusers/utils/export_utils.py +50 -6
  418. diffusers/utils/hub_utils.py +131 -17
  419. diffusers/utils/import_utils.py +210 -8
  420. diffusers/utils/loading_utils.py +118 -5
  421. diffusers/utils/logging.py +4 -2
  422. diffusers/utils/peft_utils.py +37 -7
  423. diffusers/utils/state_dict_utils.py +13 -2
  424. diffusers/utils/testing_utils.py +193 -11
  425. diffusers/utils/torch_utils.py +4 -0
  426. diffusers/video_processor.py +113 -0
  427. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/METADATA +82 -91
  428. diffusers-0.32.2.dist-info/RECORD +550 -0
  429. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/WHEEL +1 -1
  430. diffusers/loaders/autoencoder.py +0 -146
  431. diffusers/loaders/controlnet.py +0 -136
  432. diffusers/loaders/lora.py +0 -1349
  433. diffusers/models/prior_transformer.py +0 -12
  434. diffusers/models/t5_film_transformer.py +0 -70
  435. diffusers/models/transformer_2d.py +0 -25
  436. diffusers/models/transformer_temporal.py +0 -34
  437. diffusers/models/unet_1d.py +0 -26
  438. diffusers/models/unet_1d_blocks.py +0 -203
  439. diffusers/models/unet_2d.py +0 -27
  440. diffusers/models/unet_2d_blocks.py +0 -375
  441. diffusers/models/unet_2d_condition.py +0 -25
  442. diffusers-0.27.0.dist-info/RECORD +0 -399
  443. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/LICENSE +0 -0
  444. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/entry_points.txt +0 -0
  445. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/top_level.txt +0 -0
@@ -30,17 +30,17 @@ from transformers import (
30
30
 
31
31
  from diffusers.utils.import_utils import is_invisible_watermark_available
32
32
 
33
+ from ...callbacks import MultiPipelineCallbacks, PipelineCallback
33
34
  from ...image_processor import PipelineImageInput, VaeImageProcessor
34
35
  from ...loaders import (
36
+ FromSingleFileMixin,
35
37
  IPAdapterMixin,
36
38
  StableDiffusionXLLoraLoaderMixin,
37
39
  TextualInversionLoaderMixin,
38
40
  )
39
- from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel
41
+ from ...models import AutoencoderKL, ControlNetModel, ImageProjection, MultiControlNetModel, UNet2DConditionModel
40
42
  from ...models.attention_processor import (
41
43
  AttnProcessor2_0,
42
- LoRAAttnProcessor2_0,
43
- LoRAXFormersAttnProcessor,
44
44
  XFormersAttnProcessor,
45
45
  )
46
46
  from ...models.lora import adjust_lora_scale_text_encoder
@@ -61,8 +61,6 @@ from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutpu
61
61
  if is_invisible_watermark_available():
62
62
  from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
63
63
 
64
- from .multicontrolnet import MultiControlNetModel
65
-
66
64
 
67
65
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
68
66
 
@@ -76,20 +74,20 @@ EXAMPLE_DOC_STRING = """
76
74
  >>> import numpy as np
77
75
  >>> from PIL import Image
78
76
 
79
- >>> from transformers import DPTFeatureExtractor, DPTForDepthEstimation
77
+ >>> from transformers import DPTImageProcessor, DPTForDepthEstimation
80
78
  >>> from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL
81
79
  >>> from diffusers.utils import load_image
82
80
 
83
81
 
84
82
  >>> depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda")
85
- >>> feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-hybrid-midas")
83
+ >>> feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas")
86
84
  >>> controlnet = ControlNetModel.from_pretrained(
87
85
  ... "diffusers/controlnet-depth-sdxl-1.0-small",
88
86
  ... variant="fp16",
89
87
  ... use_safetensors=True,
90
88
  ... torch_dtype=torch.float16,
91
- ... ).to("cuda")
92
- >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to("cuda")
89
+ ... )
90
+ >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
93
91
  >>> pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
94
92
  ... "stabilityai/stable-diffusion-xl-base-1.0",
95
93
  ... controlnet=controlnet,
@@ -97,7 +95,7 @@ EXAMPLE_DOC_STRING = """
97
95
  ... variant="fp16",
98
96
  ... use_safetensors=True,
99
97
  ... torch_dtype=torch.float16,
100
- ... ).to("cuda")
98
+ ... )
101
99
  >>> pipe.enable_model_cpu_offload()
102
100
 
103
101
 
@@ -161,6 +159,7 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
161
159
  StableDiffusionMixin,
162
160
  TextualInversionLoaderMixin,
163
161
  StableDiffusionXLLoraLoaderMixin,
162
+ FromSingleFileMixin,
164
163
  IPAdapterMixin,
165
164
  ):
166
165
  r"""
@@ -225,7 +224,15 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
225
224
  "feature_extractor",
226
225
  "image_encoder",
227
226
  ]
228
- _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
227
+ _callback_tensor_inputs = [
228
+ "latents",
229
+ "prompt_embeds",
230
+ "negative_prompt_embeds",
231
+ "add_text_embeds",
232
+ "add_time_ids",
233
+ "negative_pooled_prompt_embeds",
234
+ "add_neg_time_ids",
235
+ ]
229
236
 
230
237
  def __init__(
231
238
  self,
@@ -285,10 +292,10 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
285
292
  do_classifier_free_guidance: bool = True,
286
293
  negative_prompt: Optional[str] = None,
287
294
  negative_prompt_2: Optional[str] = None,
288
- prompt_embeds: Optional[torch.FloatTensor] = None,
289
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
290
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
291
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
295
+ prompt_embeds: Optional[torch.Tensor] = None,
296
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
297
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
298
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
292
299
  lora_scale: Optional[float] = None,
293
300
  clip_skip: Optional[int] = None,
294
301
  ):
@@ -314,17 +321,17 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
314
321
  negative_prompt_2 (`str` or `List[str]`, *optional*):
315
322
  The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
316
323
  `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
317
- prompt_embeds (`torch.FloatTensor`, *optional*):
324
+ prompt_embeds (`torch.Tensor`, *optional*):
318
325
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
319
326
  provided, text embeddings will be generated from `prompt` input argument.
320
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
327
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
321
328
  Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
322
329
  weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
323
330
  argument.
324
- pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
331
+ pooled_prompt_embeds (`torch.Tensor`, *optional*):
325
332
  Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
326
333
  If not provided, pooled text embeddings will be generated from `prompt` input argument.
327
- negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
334
+ negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
328
335
  Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
329
336
  weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
330
337
  input argument.
@@ -539,6 +546,9 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
539
546
  def prepare_ip_adapter_image_embeds(
540
547
  self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
541
548
  ):
549
+ image_embeds = []
550
+ if do_classifier_free_guidance:
551
+ negative_image_embeds = []
542
552
  if ip_adapter_image_embeds is None:
543
553
  if not isinstance(ip_adapter_image, list):
544
554
  ip_adapter_image = [ip_adapter_image]
@@ -548,7 +558,6 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
548
558
  f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
549
559
  )
550
560
 
551
- image_embeds = []
552
561
  for single_ip_adapter_image, image_proj_layer in zip(
553
562
  ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
554
563
  ):
@@ -556,36 +565,28 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
556
565
  single_image_embeds, single_negative_image_embeds = self.encode_image(
557
566
  single_ip_adapter_image, device, 1, output_hidden_state
558
567
  )
559
- single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
560
- single_negative_image_embeds = torch.stack(
561
- [single_negative_image_embeds] * num_images_per_prompt, dim=0
562
- )
563
568
 
569
+ image_embeds.append(single_image_embeds[None, :])
564
570
  if do_classifier_free_guidance:
565
- single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
566
- single_image_embeds = single_image_embeds.to(device)
567
-
568
- image_embeds.append(single_image_embeds)
571
+ negative_image_embeds.append(single_negative_image_embeds[None, :])
569
572
  else:
570
- repeat_dims = [1]
571
- image_embeds = []
572
573
  for single_image_embeds in ip_adapter_image_embeds:
573
574
  if do_classifier_free_guidance:
574
575
  single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
575
- single_image_embeds = single_image_embeds.repeat(
576
- num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
577
- )
578
- single_negative_image_embeds = single_negative_image_embeds.repeat(
579
- num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
580
- )
581
- single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
582
- else:
583
- single_image_embeds = single_image_embeds.repeat(
584
- num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
585
- )
576
+ negative_image_embeds.append(single_negative_image_embeds)
586
577
  image_embeds.append(single_image_embeds)
587
578
 
588
- return image_embeds
579
+ ip_adapter_image_embeds = []
580
+ for i, single_image_embeds in enumerate(image_embeds):
581
+ single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
582
+ if do_classifier_free_guidance:
583
+ single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0)
584
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0)
585
+
586
+ single_image_embeds = single_image_embeds.to(device=device)
587
+ ip_adapter_image_embeds.append(single_image_embeds)
588
+
589
+ return ip_adapter_image_embeds
589
590
 
590
591
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
591
592
  def prepare_extra_step_kwargs(self, generator, eta):
@@ -896,6 +897,12 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
896
897
  f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
897
898
  )
898
899
 
900
+ latents_mean = latents_std = None
901
+ if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None:
902
+ latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1)
903
+ if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None:
904
+ latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1)
905
+
899
906
  # Offload text encoder if `enable_model_cpu_offload` was enabled
900
907
  if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
901
908
  self.text_encoder_2.to("cpu")
@@ -921,6 +928,13 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
921
928
  )
922
929
 
923
930
  elif isinstance(generator, list):
931
+ if image.shape[0] < batch_size and batch_size % image.shape[0] == 0:
932
+ image = torch.cat([image] * (batch_size // image.shape[0]), dim=0)
933
+ elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0:
934
+ raise ValueError(
935
+ f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} "
936
+ )
937
+
924
938
  init_latents = [
925
939
  retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
926
940
  for i in range(batch_size)
@@ -933,7 +947,12 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
933
947
  self.vae.to(dtype)
934
948
 
935
949
  init_latents = init_latents.to(dtype)
936
- init_latents = self.vae.config.scaling_factor * init_latents
950
+ if latents_mean is not None and latents_std is not None:
951
+ latents_mean = latents_mean.to(device=device, dtype=dtype)
952
+ latents_std = latents_std.to(device=device, dtype=dtype)
953
+ init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std
954
+ else:
955
+ init_latents = self.vae.config.scaling_factor * init_latents
937
956
 
938
957
  if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
939
958
  # expand init_latents for batch_size
@@ -1017,8 +1036,6 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1017
1036
  (
1018
1037
  AttnProcessor2_0,
1019
1038
  XFormersAttnProcessor,
1020
- LoRAXFormersAttnProcessor,
1021
- LoRAAttnProcessor2_0,
1022
1039
  ),
1023
1040
  )
1024
1041
  # if xformers or torch_2_0 is used attention block does not need
@@ -1051,6 +1068,10 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1051
1068
  def num_timesteps(self):
1052
1069
  return self._num_timesteps
1053
1070
 
1071
+ @property
1072
+ def interrupt(self):
1073
+ return self._interrupt
1074
+
1054
1075
  @torch.no_grad()
1055
1076
  @replace_example_docstring(EXAMPLE_DOC_STRING)
1056
1077
  def __call__(
@@ -1069,13 +1090,13 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1069
1090
  num_images_per_prompt: Optional[int] = 1,
1070
1091
  eta: float = 0.0,
1071
1092
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1072
- latents: Optional[torch.FloatTensor] = None,
1073
- prompt_embeds: Optional[torch.FloatTensor] = None,
1074
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1075
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1076
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1093
+ latents: Optional[torch.Tensor] = None,
1094
+ prompt_embeds: Optional[torch.Tensor] = None,
1095
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
1096
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
1097
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1077
1098
  ip_adapter_image: Optional[PipelineImageInput] = None,
1078
- ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
1099
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
1079
1100
  output_type: Optional[str] = "pil",
1080
1101
  return_dict: bool = True,
1081
1102
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
@@ -1092,7 +1113,9 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1092
1113
  aesthetic_score: float = 6.0,
1093
1114
  negative_aesthetic_score: float = 2.5,
1094
1115
  clip_skip: Optional[int] = None,
1095
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
1116
+ callback_on_step_end: Optional[
1117
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
1118
+ ] = None,
1096
1119
  callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1097
1120
  **kwargs,
1098
1121
  ):
@@ -1106,18 +1129,18 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1106
1129
  prompt_2 (`str` or `List[str]`, *optional*):
1107
1130
  The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
1108
1131
  used in both text-encoders
1109
- image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
1110
- `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
1132
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
1133
+ `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
1111
1134
  The initial image will be used as the starting point for the image generation process. Can also accept
1112
1135
  image latents as `image`, if passing latents directly, it will not be encoded again.
1113
- control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
1114
- `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
1136
+ control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
1137
+ `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
1115
1138
  The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
1116
- the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
1117
- also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
1118
- height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
1119
- specified in init, images must be passed as a list such that each element of the list can be correctly
1120
- batched for input to a single controlnet.
1139
+ the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also
1140
+ be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
1141
+ and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in
1142
+ init, images must be passed as a list such that each element of the list can be correctly batched for
1143
+ input to a single controlnet.
1121
1144
  height (`int`, *optional*, defaults to the size of control_image):
1122
1145
  The height in pixels of the generated image. Anything below 512 pixels won't work well for
1123
1146
  [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
@@ -1156,30 +1179,30 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1156
1179
  generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1157
1180
  One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1158
1181
  to make generation deterministic.
1159
- latents (`torch.FloatTensor`, *optional*):
1182
+ latents (`torch.Tensor`, *optional*):
1160
1183
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1161
1184
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1162
1185
  tensor will ge generated by sampling using the supplied random `generator`.
1163
- prompt_embeds (`torch.FloatTensor`, *optional*):
1186
+ prompt_embeds (`torch.Tensor`, *optional*):
1164
1187
  Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1165
1188
  provided, text embeddings will be generated from `prompt` input argument.
1166
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1189
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
1167
1190
  Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1168
1191
  weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1169
1192
  argument.
1170
- pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1193
+ pooled_prompt_embeds (`torch.Tensor`, *optional*):
1171
1194
  Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1172
1195
  If not provided, pooled text embeddings will be generated from `prompt` input argument.
1173
- negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1196
+ negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
1174
1197
  Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1175
1198
  weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1176
1199
  input argument.
1177
1200
  ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1178
- ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
1179
- Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
1180
- Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
1181
- if `do_classifier_free_guidance` is set to `True`.
1182
- If not provided, embeddings are computed from the `ip_adapter_image` input argument.
1201
+ ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
1202
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
1203
+ IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
1204
+ contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
1205
+ provided, embeddings are computed from the `ip_adapter_image` input argument.
1183
1206
  output_type (`str`, *optional*, defaults to `"pil"`):
1184
1207
  The output format of the generate image. Choose between
1185
1208
  [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
@@ -1241,15 +1264,15 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1241
1264
  clip_skip (`int`, *optional*):
1242
1265
  Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1243
1266
  the output of the pre-final layer will be used for computing the prompt embeddings.
1244
- callback_on_step_end (`Callable`, *optional*):
1245
- A function that calls at the end of each denoising steps during the inference. The function is called
1246
- with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1247
- callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1248
- `callback_on_step_end_tensor_inputs`.
1267
+ callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
1268
+ A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
1269
+ each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
1270
+ DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
1271
+ list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
1249
1272
  callback_on_step_end_tensor_inputs (`List`, *optional*):
1250
1273
  The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1251
1274
  will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1252
- `._callback_tensor_inputs` attribute of your pipeine class.
1275
+ `._callback_tensor_inputs` attribute of your pipeline class.
1253
1276
 
1254
1277
  Examples:
1255
1278
 
@@ -1275,6 +1298,9 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1275
1298
  "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1276
1299
  )
1277
1300
 
1301
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
1302
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
1303
+
1278
1304
  controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1279
1305
 
1280
1306
  # align format for control guidance
@@ -1314,6 +1340,7 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1314
1340
  self._guidance_scale = guidance_scale
1315
1341
  self._clip_skip = clip_skip
1316
1342
  self._cross_attention_kwargs = cross_attention_kwargs
1343
+ self._interrupt = False
1317
1344
 
1318
1345
  # 2. Define call parameters
1319
1346
  if prompt is not None and isinstance(prompt, str):
@@ -1416,16 +1443,17 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1416
1443
  self._num_timesteps = len(timesteps)
1417
1444
 
1418
1445
  # 6. Prepare latent variables
1419
- latents = self.prepare_latents(
1420
- image,
1421
- latent_timestep,
1422
- batch_size,
1423
- num_images_per_prompt,
1424
- prompt_embeds.dtype,
1425
- device,
1426
- generator,
1427
- True,
1428
- )
1446
+ if latents is None:
1447
+ latents = self.prepare_latents(
1448
+ image,
1449
+ latent_timestep,
1450
+ batch_size,
1451
+ num_images_per_prompt,
1452
+ prompt_embeds.dtype,
1453
+ device,
1454
+ generator,
1455
+ True,
1456
+ )
1429
1457
 
1430
1458
  # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1431
1459
  extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
@@ -1485,6 +1513,9 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1485
1513
  num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1486
1514
  with self.progress_bar(total=num_inference_steps) as progress_bar:
1487
1515
  for i, t in enumerate(timesteps):
1516
+ if self.interrupt:
1517
+ continue
1518
+
1488
1519
  # expand the latents if we are doing classifier free guidance
1489
1520
  latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1490
1521
  latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
@@ -1513,7 +1544,6 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1513
1544
  if isinstance(controlnet_cond_scale, list):
1514
1545
  controlnet_cond_scale = controlnet_cond_scale[0]
1515
1546
  cond_scale = controlnet_cond_scale * controlnet_keep[i]
1516
-
1517
1547
  down_block_res_samples, mid_block_res_sample = self.controlnet(
1518
1548
  control_model_input,
1519
1549
  t,
@@ -1526,7 +1556,7 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1526
1556
  )
1527
1557
 
1528
1558
  if guess_mode and self.do_classifier_free_guidance:
1529
- # Infered ControlNet only for the conditional batch.
1559
+ # Inferred ControlNet only for the conditional batch.
1530
1560
  # To apply the output of ControlNet to both the unconditional and conditional batches,
1531
1561
  # add 0 to the unconditional batch to keep it unchanged.
1532
1562
  down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
@@ -1564,6 +1594,12 @@ class StableDiffusionXLControlNetImg2ImgPipeline(
1564
1594
  latents = callback_outputs.pop("latents", latents)
1565
1595
  prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1566
1596
  negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1597
+ add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
1598
+ negative_pooled_prompt_embeds = callback_outputs.pop(
1599
+ "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
1600
+ )
1601
+ add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
1602
+ add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids)
1567
1603
 
1568
1604
  # call the callback, if provided
1569
1605
  if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):