diffusers 0.27.0__py3-none-any.whl → 0.32.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (445) hide show
  1. diffusers/__init__.py +233 -6
  2. diffusers/callbacks.py +209 -0
  3. diffusers/commands/env.py +102 -6
  4. diffusers/configuration_utils.py +45 -16
  5. diffusers/dependency_versions_table.py +4 -3
  6. diffusers/image_processor.py +434 -110
  7. diffusers/loaders/__init__.py +42 -9
  8. diffusers/loaders/ip_adapter.py +626 -36
  9. diffusers/loaders/lora_base.py +900 -0
  10. diffusers/loaders/lora_conversion_utils.py +991 -125
  11. diffusers/loaders/lora_pipeline.py +3812 -0
  12. diffusers/loaders/peft.py +571 -7
  13. diffusers/loaders/single_file.py +405 -173
  14. diffusers/loaders/single_file_model.py +385 -0
  15. diffusers/loaders/single_file_utils.py +1783 -713
  16. diffusers/loaders/textual_inversion.py +41 -23
  17. diffusers/loaders/transformer_flux.py +181 -0
  18. diffusers/loaders/transformer_sd3.py +89 -0
  19. diffusers/loaders/unet.py +464 -540
  20. diffusers/loaders/unet_loader_utils.py +163 -0
  21. diffusers/models/__init__.py +76 -7
  22. diffusers/models/activations.py +65 -10
  23. diffusers/models/adapter.py +53 -53
  24. diffusers/models/attention.py +605 -18
  25. diffusers/models/attention_flax.py +1 -1
  26. diffusers/models/attention_processor.py +4304 -687
  27. diffusers/models/autoencoders/__init__.py +8 -0
  28. diffusers/models/autoencoders/autoencoder_asym_kl.py +15 -17
  29. diffusers/models/autoencoders/autoencoder_dc.py +620 -0
  30. diffusers/models/autoencoders/autoencoder_kl.py +110 -28
  31. diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
  32. diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1482 -0
  33. diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
  34. diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
  35. diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
  36. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +19 -24
  37. diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
  38. diffusers/models/autoencoders/autoencoder_tiny.py +21 -18
  39. diffusers/models/autoencoders/consistency_decoder_vae.py +45 -20
  40. diffusers/models/autoencoders/vae.py +41 -29
  41. diffusers/models/autoencoders/vq_model.py +182 -0
  42. diffusers/models/controlnet.py +47 -800
  43. diffusers/models/controlnet_flux.py +70 -0
  44. diffusers/models/controlnet_sd3.py +68 -0
  45. diffusers/models/controlnet_sparsectrl.py +116 -0
  46. diffusers/models/controlnets/__init__.py +23 -0
  47. diffusers/models/controlnets/controlnet.py +872 -0
  48. diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +9 -9
  49. diffusers/models/controlnets/controlnet_flux.py +536 -0
  50. diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
  51. diffusers/models/controlnets/controlnet_sd3.py +489 -0
  52. diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
  53. diffusers/models/controlnets/controlnet_union.py +832 -0
  54. diffusers/models/controlnets/controlnet_xs.py +1946 -0
  55. diffusers/models/controlnets/multicontrolnet.py +183 -0
  56. diffusers/models/downsampling.py +85 -18
  57. diffusers/models/embeddings.py +1856 -158
  58. diffusers/models/embeddings_flax.py +23 -9
  59. diffusers/models/model_loading_utils.py +480 -0
  60. diffusers/models/modeling_flax_pytorch_utils.py +2 -1
  61. diffusers/models/modeling_flax_utils.py +2 -7
  62. diffusers/models/modeling_outputs.py +14 -0
  63. diffusers/models/modeling_pytorch_flax_utils.py +1 -1
  64. diffusers/models/modeling_utils.py +611 -146
  65. diffusers/models/normalization.py +361 -20
  66. diffusers/models/resnet.py +18 -23
  67. diffusers/models/transformers/__init__.py +16 -0
  68. diffusers/models/transformers/auraflow_transformer_2d.py +544 -0
  69. diffusers/models/transformers/cogvideox_transformer_3d.py +542 -0
  70. diffusers/models/transformers/dit_transformer_2d.py +240 -0
  71. diffusers/models/transformers/dual_transformer_2d.py +9 -8
  72. diffusers/models/transformers/hunyuan_transformer_2d.py +578 -0
  73. diffusers/models/transformers/latte_transformer_3d.py +327 -0
  74. diffusers/models/transformers/lumina_nextdit2d.py +340 -0
  75. diffusers/models/transformers/pixart_transformer_2d.py +445 -0
  76. diffusers/models/transformers/prior_transformer.py +13 -13
  77. diffusers/models/transformers/sana_transformer.py +488 -0
  78. diffusers/models/transformers/stable_audio_transformer.py +458 -0
  79. diffusers/models/transformers/t5_film_transformer.py +17 -19
  80. diffusers/models/transformers/transformer_2d.py +297 -187
  81. diffusers/models/transformers/transformer_allegro.py +422 -0
  82. diffusers/models/transformers/transformer_cogview3plus.py +386 -0
  83. diffusers/models/transformers/transformer_flux.py +593 -0
  84. diffusers/models/transformers/transformer_hunyuan_video.py +791 -0
  85. diffusers/models/transformers/transformer_ltx.py +469 -0
  86. diffusers/models/transformers/transformer_mochi.py +499 -0
  87. diffusers/models/transformers/transformer_sd3.py +461 -0
  88. diffusers/models/transformers/transformer_temporal.py +21 -19
  89. diffusers/models/unets/unet_1d.py +8 -8
  90. diffusers/models/unets/unet_1d_blocks.py +31 -31
  91. diffusers/models/unets/unet_2d.py +17 -10
  92. diffusers/models/unets/unet_2d_blocks.py +225 -149
  93. diffusers/models/unets/unet_2d_condition.py +50 -53
  94. diffusers/models/unets/unet_2d_condition_flax.py +6 -5
  95. diffusers/models/unets/unet_3d_blocks.py +192 -1057
  96. diffusers/models/unets/unet_3d_condition.py +22 -27
  97. diffusers/models/unets/unet_i2vgen_xl.py +22 -18
  98. diffusers/models/unets/unet_kandinsky3.py +2 -2
  99. diffusers/models/unets/unet_motion_model.py +1413 -89
  100. diffusers/models/unets/unet_spatio_temporal_condition.py +40 -16
  101. diffusers/models/unets/unet_stable_cascade.py +19 -18
  102. diffusers/models/unets/uvit_2d.py +2 -2
  103. diffusers/models/upsampling.py +95 -26
  104. diffusers/models/vq_model.py +12 -164
  105. diffusers/optimization.py +1 -1
  106. diffusers/pipelines/__init__.py +202 -3
  107. diffusers/pipelines/allegro/__init__.py +48 -0
  108. diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
  109. diffusers/pipelines/allegro/pipeline_output.py +23 -0
  110. diffusers/pipelines/amused/pipeline_amused.py +12 -12
  111. diffusers/pipelines/amused/pipeline_amused_img2img.py +14 -12
  112. diffusers/pipelines/amused/pipeline_amused_inpaint.py +13 -11
  113. diffusers/pipelines/animatediff/__init__.py +8 -0
  114. diffusers/pipelines/animatediff/pipeline_animatediff.py +122 -109
  115. diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1106 -0
  116. diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +1288 -0
  117. diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1010 -0
  118. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +236 -180
  119. diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
  120. diffusers/pipelines/animatediff/pipeline_output.py +3 -2
  121. diffusers/pipelines/audioldm/pipeline_audioldm.py +14 -14
  122. diffusers/pipelines/audioldm2/modeling_audioldm2.py +58 -39
  123. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +121 -36
  124. diffusers/pipelines/aura_flow/__init__.py +48 -0
  125. diffusers/pipelines/aura_flow/pipeline_aura_flow.py +584 -0
  126. diffusers/pipelines/auto_pipeline.py +196 -28
  127. diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
  128. diffusers/pipelines/blip_diffusion/modeling_blip2.py +6 -6
  129. diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +1 -1
  130. diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
  131. diffusers/pipelines/cogvideo/__init__.py +54 -0
  132. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +772 -0
  133. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
  134. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +885 -0
  135. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +851 -0
  136. diffusers/pipelines/cogvideo/pipeline_output.py +20 -0
  137. diffusers/pipelines/cogview3/__init__.py +47 -0
  138. diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
  139. diffusers/pipelines/cogview3/pipeline_output.py +21 -0
  140. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +6 -6
  141. diffusers/pipelines/controlnet/__init__.py +86 -80
  142. diffusers/pipelines/controlnet/multicontrolnet.py +7 -182
  143. diffusers/pipelines/controlnet/pipeline_controlnet.py +134 -87
  144. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
  145. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +93 -77
  146. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +88 -197
  147. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +136 -90
  148. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +176 -80
  149. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +125 -89
  150. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
  151. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
  152. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
  153. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
  154. diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
  155. diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1060 -0
  156. diffusers/pipelines/controlnet_sd3/__init__.py +57 -0
  157. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1133 -0
  158. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
  159. diffusers/pipelines/controlnet_xs/__init__.py +68 -0
  160. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
  161. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
  162. diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
  163. diffusers/pipelines/deepfloyd_if/pipeline_if.py +16 -30
  164. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +20 -35
  165. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +23 -41
  166. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +22 -38
  167. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +25 -41
  168. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +19 -34
  169. diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
  170. diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
  171. diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +11 -11
  172. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +70 -30
  173. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +48 -25
  174. diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +2 -2
  175. diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +7 -7
  176. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +21 -20
  177. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +27 -29
  178. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +33 -27
  179. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +33 -23
  180. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +36 -30
  181. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +102 -69
  182. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +13 -13
  183. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +10 -5
  184. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +11 -6
  185. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +10 -5
  186. diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +5 -5
  187. diffusers/pipelines/dit/pipeline_dit.py +7 -4
  188. diffusers/pipelines/flux/__init__.py +69 -0
  189. diffusers/pipelines/flux/modeling_flux.py +47 -0
  190. diffusers/pipelines/flux/pipeline_flux.py +957 -0
  191. diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
  192. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
  193. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
  194. diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
  195. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
  196. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
  197. diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
  198. diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
  199. diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
  200. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
  201. diffusers/pipelines/flux/pipeline_output.py +37 -0
  202. diffusers/pipelines/free_init_utils.py +41 -38
  203. diffusers/pipelines/free_noise_utils.py +596 -0
  204. diffusers/pipelines/hunyuan_video/__init__.py +48 -0
  205. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
  206. diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
  207. diffusers/pipelines/hunyuandit/__init__.py +48 -0
  208. diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +916 -0
  209. diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +33 -48
  210. diffusers/pipelines/kandinsky/pipeline_kandinsky.py +8 -8
  211. diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +32 -29
  212. diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +11 -11
  213. diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +12 -12
  214. diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +10 -10
  215. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
  216. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +34 -31
  217. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +10 -10
  218. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +10 -10
  219. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +6 -6
  220. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +8 -8
  221. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +7 -7
  222. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +6 -6
  223. diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +3 -3
  224. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +22 -35
  225. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +26 -37
  226. diffusers/pipelines/kolors/__init__.py +54 -0
  227. diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
  228. diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1250 -0
  229. diffusers/pipelines/kolors/pipeline_output.py +21 -0
  230. diffusers/pipelines/kolors/text_encoder.py +889 -0
  231. diffusers/pipelines/kolors/tokenizer.py +338 -0
  232. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +82 -62
  233. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +77 -60
  234. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +12 -12
  235. diffusers/pipelines/latte/__init__.py +48 -0
  236. diffusers/pipelines/latte/pipeline_latte.py +881 -0
  237. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +80 -74
  238. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +85 -76
  239. diffusers/pipelines/ledits_pp/pipeline_output.py +2 -2
  240. diffusers/pipelines/ltx/__init__.py +50 -0
  241. diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
  242. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
  243. diffusers/pipelines/ltx/pipeline_output.py +20 -0
  244. diffusers/pipelines/lumina/__init__.py +48 -0
  245. diffusers/pipelines/lumina/pipeline_lumina.py +890 -0
  246. diffusers/pipelines/marigold/__init__.py +50 -0
  247. diffusers/pipelines/marigold/marigold_image_processing.py +576 -0
  248. diffusers/pipelines/marigold/pipeline_marigold_depth.py +813 -0
  249. diffusers/pipelines/marigold/pipeline_marigold_normals.py +690 -0
  250. diffusers/pipelines/mochi/__init__.py +48 -0
  251. diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
  252. diffusers/pipelines/mochi/pipeline_output.py +20 -0
  253. diffusers/pipelines/musicldm/pipeline_musicldm.py +14 -14
  254. diffusers/pipelines/pag/__init__.py +80 -0
  255. diffusers/pipelines/pag/pag_utils.py +243 -0
  256. diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1328 -0
  257. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
  258. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1610 -0
  259. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
  260. diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +969 -0
  261. diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
  262. diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +865 -0
  263. diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
  264. diffusers/pipelines/pag/pipeline_pag_sd.py +1062 -0
  265. diffusers/pipelines/pag/pipeline_pag_sd_3.py +994 -0
  266. diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
  267. diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +866 -0
  268. diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
  269. diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
  270. diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1345 -0
  271. diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1544 -0
  272. diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1776 -0
  273. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +17 -12
  274. diffusers/pipelines/pia/pipeline_pia.py +74 -164
  275. diffusers/pipelines/pipeline_flax_utils.py +5 -10
  276. diffusers/pipelines/pipeline_loading_utils.py +515 -53
  277. diffusers/pipelines/pipeline_utils.py +411 -222
  278. diffusers/pipelines/pixart_alpha/__init__.py +8 -1
  279. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +76 -93
  280. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +873 -0
  281. diffusers/pipelines/sana/__init__.py +47 -0
  282. diffusers/pipelines/sana/pipeline_output.py +21 -0
  283. diffusers/pipelines/sana/pipeline_sana.py +884 -0
  284. diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +27 -23
  285. diffusers/pipelines/shap_e/pipeline_shap_e.py +3 -3
  286. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +14 -14
  287. diffusers/pipelines/shap_e/renderer.py +1 -1
  288. diffusers/pipelines/stable_audio/__init__.py +50 -0
  289. diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
  290. diffusers/pipelines/stable_audio/pipeline_stable_audio.py +756 -0
  291. diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +71 -25
  292. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +23 -19
  293. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +35 -34
  294. diffusers/pipelines/stable_diffusion/__init__.py +0 -1
  295. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +20 -11
  296. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
  297. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
  298. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +6 -6
  299. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +145 -79
  300. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +43 -28
  301. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +13 -8
  302. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +100 -68
  303. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +109 -201
  304. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +131 -32
  305. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +247 -87
  306. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +30 -29
  307. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +35 -27
  308. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +49 -42
  309. diffusers/pipelines/stable_diffusion/safety_checker.py +2 -1
  310. diffusers/pipelines/stable_diffusion_3/__init__.py +54 -0
  311. diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
  312. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +1140 -0
  313. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +1036 -0
  314. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1250 -0
  315. diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +29 -20
  316. diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +59 -58
  317. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +31 -25
  318. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +38 -22
  319. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -24
  320. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +24 -23
  321. diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +107 -67
  322. diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +316 -69
  323. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +10 -5
  324. diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
  325. diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +98 -30
  326. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +121 -83
  327. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +161 -105
  328. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +142 -218
  329. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +45 -29
  330. diffusers/pipelines/stable_diffusion_xl/watermark.py +9 -3
  331. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +110 -57
  332. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +69 -39
  333. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +105 -74
  334. diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +3 -2
  335. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +29 -49
  336. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +32 -93
  337. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +37 -25
  338. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +54 -40
  339. diffusers/pipelines/unclip/pipeline_unclip.py +6 -6
  340. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +6 -6
  341. diffusers/pipelines/unidiffuser/modeling_text_decoder.py +1 -1
  342. diffusers/pipelines/unidiffuser/modeling_uvit.py +12 -12
  343. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +29 -28
  344. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +5 -5
  345. diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +5 -10
  346. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +6 -8
  347. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +4 -4
  348. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +12 -12
  349. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +15 -14
  350. diffusers/{models/dual_transformer_2d.py → quantizers/__init__.py} +2 -6
  351. diffusers/quantizers/auto.py +139 -0
  352. diffusers/quantizers/base.py +233 -0
  353. diffusers/quantizers/bitsandbytes/__init__.py +2 -0
  354. diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
  355. diffusers/quantizers/bitsandbytes/utils.py +306 -0
  356. diffusers/quantizers/gguf/__init__.py +1 -0
  357. diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
  358. diffusers/quantizers/gguf/utils.py +456 -0
  359. diffusers/quantizers/quantization_config.py +669 -0
  360. diffusers/quantizers/torchao/__init__.py +15 -0
  361. diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
  362. diffusers/schedulers/__init__.py +12 -2
  363. diffusers/schedulers/deprecated/__init__.py +1 -1
  364. diffusers/schedulers/deprecated/scheduling_karras_ve.py +25 -25
  365. diffusers/schedulers/scheduling_amused.py +5 -5
  366. diffusers/schedulers/scheduling_consistency_decoder.py +11 -11
  367. diffusers/schedulers/scheduling_consistency_models.py +23 -25
  368. diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
  369. diffusers/schedulers/scheduling_ddim.py +27 -26
  370. diffusers/schedulers/scheduling_ddim_cogvideox.py +452 -0
  371. diffusers/schedulers/scheduling_ddim_flax.py +2 -1
  372. diffusers/schedulers/scheduling_ddim_inverse.py +16 -16
  373. diffusers/schedulers/scheduling_ddim_parallel.py +32 -31
  374. diffusers/schedulers/scheduling_ddpm.py +27 -30
  375. diffusers/schedulers/scheduling_ddpm_flax.py +7 -3
  376. diffusers/schedulers/scheduling_ddpm_parallel.py +33 -36
  377. diffusers/schedulers/scheduling_ddpm_wuerstchen.py +14 -14
  378. diffusers/schedulers/scheduling_deis_multistep.py +150 -50
  379. diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
  380. diffusers/schedulers/scheduling_dpmsolver_multistep.py +221 -84
  381. diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +2 -2
  382. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +158 -52
  383. diffusers/schedulers/scheduling_dpmsolver_sde.py +153 -34
  384. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +275 -86
  385. diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +81 -57
  386. diffusers/schedulers/scheduling_edm_euler.py +62 -39
  387. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +30 -29
  388. diffusers/schedulers/scheduling_euler_discrete.py +255 -74
  389. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +458 -0
  390. diffusers/schedulers/scheduling_flow_match_heun_discrete.py +320 -0
  391. diffusers/schedulers/scheduling_heun_discrete.py +174 -46
  392. diffusers/schedulers/scheduling_ipndm.py +9 -9
  393. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +138 -29
  394. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +132 -26
  395. diffusers/schedulers/scheduling_karras_ve_flax.py +6 -6
  396. diffusers/schedulers/scheduling_lcm.py +23 -29
  397. diffusers/schedulers/scheduling_lms_discrete.py +105 -28
  398. diffusers/schedulers/scheduling_pndm.py +20 -20
  399. diffusers/schedulers/scheduling_repaint.py +21 -21
  400. diffusers/schedulers/scheduling_sasolver.py +157 -60
  401. diffusers/schedulers/scheduling_sde_ve.py +19 -19
  402. diffusers/schedulers/scheduling_tcd.py +41 -36
  403. diffusers/schedulers/scheduling_unclip.py +19 -16
  404. diffusers/schedulers/scheduling_unipc_multistep.py +243 -47
  405. diffusers/schedulers/scheduling_utils.py +12 -5
  406. diffusers/schedulers/scheduling_utils_flax.py +1 -3
  407. diffusers/schedulers/scheduling_vq_diffusion.py +10 -10
  408. diffusers/training_utils.py +214 -30
  409. diffusers/utils/__init__.py +17 -1
  410. diffusers/utils/constants.py +3 -0
  411. diffusers/utils/doc_utils.py +1 -0
  412. diffusers/utils/dummy_pt_objects.py +592 -7
  413. diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
  414. diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
  415. diffusers/utils/dummy_torch_and_transformers_objects.py +1001 -71
  416. diffusers/utils/dynamic_modules_utils.py +34 -29
  417. diffusers/utils/export_utils.py +50 -6
  418. diffusers/utils/hub_utils.py +131 -17
  419. diffusers/utils/import_utils.py +210 -8
  420. diffusers/utils/loading_utils.py +118 -5
  421. diffusers/utils/logging.py +4 -2
  422. diffusers/utils/peft_utils.py +37 -7
  423. diffusers/utils/state_dict_utils.py +13 -2
  424. diffusers/utils/testing_utils.py +193 -11
  425. diffusers/utils/torch_utils.py +4 -0
  426. diffusers/video_processor.py +113 -0
  427. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/METADATA +82 -91
  428. diffusers-0.32.2.dist-info/RECORD +550 -0
  429. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/WHEEL +1 -1
  430. diffusers/loaders/autoencoder.py +0 -146
  431. diffusers/loaders/controlnet.py +0 -136
  432. diffusers/loaders/lora.py +0 -1349
  433. diffusers/models/prior_transformer.py +0 -12
  434. diffusers/models/t5_film_transformer.py +0 -70
  435. diffusers/models/transformer_2d.py +0 -25
  436. diffusers/models/transformer_temporal.py +0 -34
  437. diffusers/models/unet_1d.py +0 -26
  438. diffusers/models/unet_1d_blocks.py +0 -203
  439. diffusers/models/unet_2d.py +0 -27
  440. diffusers/models/unet_2d_blocks.py +0 -375
  441. diffusers/models/unet_2d_condition.py +0 -25
  442. diffusers-0.27.0.dist-info/RECORD +0 -399
  443. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/LICENSE +0 -0
  444. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/entry_points.txt +0 -0
  445. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1133 @@
1
+ # Copyright 2024 Stability AI, The HuggingFace Team and The InstantX Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ from transformers import (
20
+ CLIPTextModelWithProjection,
21
+ CLIPTokenizer,
22
+ T5EncoderModel,
23
+ T5TokenizerFast,
24
+ )
25
+
26
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
27
+ from ...loaders import FromSingleFileMixin, SD3LoraLoaderMixin
28
+ from ...models.autoencoders import AutoencoderKL
29
+ from ...models.controlnets.controlnet_sd3 import SD3ControlNetModel, SD3MultiControlNetModel
30
+ from ...models.transformers import SD3Transformer2DModel
31
+ from ...schedulers import FlowMatchEulerDiscreteScheduler
32
+ from ...utils import (
33
+ USE_PEFT_BACKEND,
34
+ is_torch_xla_available,
35
+ logging,
36
+ replace_example_docstring,
37
+ scale_lora_layers,
38
+ unscale_lora_layers,
39
+ )
40
+ from ...utils.torch_utils import randn_tensor
41
+ from ..pipeline_utils import DiffusionPipeline
42
+ from ..stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput
43
+
44
+
45
+ if is_torch_xla_available():
46
+ import torch_xla.core.xla_model as xm
47
+
48
+ XLA_AVAILABLE = True
49
+ else:
50
+ XLA_AVAILABLE = False
51
+
52
+
53
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
54
+
55
+ EXAMPLE_DOC_STRING = """
56
+ Examples:
57
+ ```py
58
+ >>> import torch
59
+ >>> from diffusers import StableDiffusion3ControlNetPipeline
60
+ >>> from diffusers.models import SD3ControlNetModel, SD3MultiControlNetModel
61
+ >>> from diffusers.utils import load_image
62
+
63
+ >>> controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Canny", torch_dtype=torch.float16)
64
+
65
+ >>> pipe = StableDiffusion3ControlNetPipeline.from_pretrained(
66
+ ... "stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16
67
+ ... )
68
+ >>> pipe.to("cuda")
69
+ >>> control_image = load_image(
70
+ ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
71
+ ... )
72
+ >>> prompt = "A bird in space"
73
+ >>> image = pipe(
74
+ ... prompt, control_image=control_image, height=1024, width=768, controlnet_conditioning_scale=0.7
75
+ ... ).images[0]
76
+ >>> image.save("sd3.png")
77
+ ```
78
+ """
79
+
80
+
81
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
82
+ def retrieve_timesteps(
83
+ scheduler,
84
+ num_inference_steps: Optional[int] = None,
85
+ device: Optional[Union[str, torch.device]] = None,
86
+ timesteps: Optional[List[int]] = None,
87
+ sigmas: Optional[List[float]] = None,
88
+ **kwargs,
89
+ ):
90
+ r"""
91
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
92
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
93
+
94
+ Args:
95
+ scheduler (`SchedulerMixin`):
96
+ The scheduler to get timesteps from.
97
+ num_inference_steps (`int`):
98
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
99
+ must be `None`.
100
+ device (`str` or `torch.device`, *optional*):
101
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
102
+ timesteps (`List[int]`, *optional*):
103
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
104
+ `num_inference_steps` and `sigmas` must be `None`.
105
+ sigmas (`List[float]`, *optional*):
106
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
107
+ `num_inference_steps` and `timesteps` must be `None`.
108
+
109
+ Returns:
110
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
111
+ second element is the number of inference steps.
112
+ """
113
+ if timesteps is not None and sigmas is not None:
114
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
115
+ if timesteps is not None:
116
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
117
+ if not accepts_timesteps:
118
+ raise ValueError(
119
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
120
+ f" timestep schedules. Please check whether you are using the correct scheduler."
121
+ )
122
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
123
+ timesteps = scheduler.timesteps
124
+ num_inference_steps = len(timesteps)
125
+ elif sigmas is not None:
126
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
127
+ if not accept_sigmas:
128
+ raise ValueError(
129
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
130
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
131
+ )
132
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
133
+ timesteps = scheduler.timesteps
134
+ num_inference_steps = len(timesteps)
135
+ else:
136
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
137
+ timesteps = scheduler.timesteps
138
+ return timesteps, num_inference_steps
139
+
140
+
141
+ class StableDiffusion3ControlNetPipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin):
142
+ r"""
143
+ Args:
144
+ transformer ([`SD3Transformer2DModel`]):
145
+ Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
146
+ scheduler ([`FlowMatchEulerDiscreteScheduler`]):
147
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
148
+ vae ([`AutoencoderKL`]):
149
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
150
+ text_encoder ([`CLIPTextModelWithProjection`]):
151
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
152
+ specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant,
153
+ with an additional added projection layer that is initialized with a diagonal matrix with the `hidden_size`
154
+ as its dimension.
155
+ text_encoder_2 ([`CLIPTextModelWithProjection`]):
156
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
157
+ specifically the
158
+ [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
159
+ variant.
160
+ text_encoder_3 ([`T5EncoderModel`]):
161
+ Frozen text-encoder. Stable Diffusion 3 uses
162
+ [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the
163
+ [t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
164
+ tokenizer (`CLIPTokenizer`):
165
+ Tokenizer of class
166
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
167
+ tokenizer_2 (`CLIPTokenizer`):
168
+ Second Tokenizer of class
169
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
170
+ tokenizer_3 (`T5TokenizerFast`):
171
+ Tokenizer of class
172
+ [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
173
+ controlnet ([`SD3ControlNetModel`] or `List[SD3ControlNetModel]` or [`SD3MultiControlNetModel`]):
174
+ Provides additional conditioning to the `unet` during the denoising process. If you set multiple
175
+ ControlNets as a list, the outputs from each ControlNet are added together to create one combined
176
+ additional conditioning.
177
+ """
178
+
179
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->text_encoder_3->transformer->vae"
180
+ _optional_components = []
181
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "negative_pooled_prompt_embeds"]
182
+
183
+ def __init__(
184
+ self,
185
+ transformer: SD3Transformer2DModel,
186
+ scheduler: FlowMatchEulerDiscreteScheduler,
187
+ vae: AutoencoderKL,
188
+ text_encoder: CLIPTextModelWithProjection,
189
+ tokenizer: CLIPTokenizer,
190
+ text_encoder_2: CLIPTextModelWithProjection,
191
+ tokenizer_2: CLIPTokenizer,
192
+ text_encoder_3: T5EncoderModel,
193
+ tokenizer_3: T5TokenizerFast,
194
+ controlnet: Union[
195
+ SD3ControlNetModel, List[SD3ControlNetModel], Tuple[SD3ControlNetModel], SD3MultiControlNetModel
196
+ ],
197
+ ):
198
+ super().__init__()
199
+ if isinstance(controlnet, (list, tuple)):
200
+ controlnet = SD3MultiControlNetModel(controlnet)
201
+ if isinstance(controlnet, SD3MultiControlNetModel):
202
+ for controlnet_model in controlnet.nets:
203
+ # for SD3.5 8b controlnet, it shares the pos_embed with the transformer
204
+ if (
205
+ hasattr(controlnet_model.config, "use_pos_embed")
206
+ and controlnet_model.config.use_pos_embed is False
207
+ ):
208
+ pos_embed = controlnet_model._get_pos_embed_from_transformer(transformer)
209
+ controlnet_model.pos_embed = pos_embed.to(controlnet_model.dtype).to(controlnet_model.device)
210
+ elif isinstance(controlnet, SD3ControlNetModel):
211
+ if hasattr(controlnet.config, "use_pos_embed") and controlnet.config.use_pos_embed is False:
212
+ pos_embed = controlnet._get_pos_embed_from_transformer(transformer)
213
+ controlnet.pos_embed = pos_embed.to(controlnet.dtype).to(controlnet.device)
214
+
215
+ self.register_modules(
216
+ vae=vae,
217
+ text_encoder=text_encoder,
218
+ text_encoder_2=text_encoder_2,
219
+ text_encoder_3=text_encoder_3,
220
+ tokenizer=tokenizer,
221
+ tokenizer_2=tokenizer_2,
222
+ tokenizer_3=tokenizer_3,
223
+ transformer=transformer,
224
+ scheduler=scheduler,
225
+ controlnet=controlnet,
226
+ )
227
+ self.vae_scale_factor = (
228
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
229
+ )
230
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
231
+ self.tokenizer_max_length = (
232
+ self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
233
+ )
234
+ self.default_sample_size = (
235
+ self.transformer.config.sample_size
236
+ if hasattr(self, "transformer") and self.transformer is not None
237
+ else 128
238
+ )
239
+
240
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_t5_prompt_embeds
241
+ def _get_t5_prompt_embeds(
242
+ self,
243
+ prompt: Union[str, List[str]] = None,
244
+ num_images_per_prompt: int = 1,
245
+ max_sequence_length: int = 256,
246
+ device: Optional[torch.device] = None,
247
+ dtype: Optional[torch.dtype] = None,
248
+ ):
249
+ device = device or self._execution_device
250
+ dtype = dtype or self.text_encoder.dtype
251
+
252
+ prompt = [prompt] if isinstance(prompt, str) else prompt
253
+ batch_size = len(prompt)
254
+
255
+ if self.text_encoder_3 is None:
256
+ return torch.zeros(
257
+ (
258
+ batch_size * num_images_per_prompt,
259
+ self.tokenizer_max_length,
260
+ self.transformer.config.joint_attention_dim,
261
+ ),
262
+ device=device,
263
+ dtype=dtype,
264
+ )
265
+
266
+ text_inputs = self.tokenizer_3(
267
+ prompt,
268
+ padding="max_length",
269
+ max_length=max_sequence_length,
270
+ truncation=True,
271
+ add_special_tokens=True,
272
+ return_tensors="pt",
273
+ )
274
+ text_input_ids = text_inputs.input_ids
275
+ untruncated_ids = self.tokenizer_3(prompt, padding="longest", return_tensors="pt").input_ids
276
+
277
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
278
+ removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
279
+ logger.warning(
280
+ "The following part of your input was truncated because `max_sequence_length` is set to "
281
+ f" {max_sequence_length} tokens: {removed_text}"
282
+ )
283
+
284
+ prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0]
285
+
286
+ dtype = self.text_encoder_3.dtype
287
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
288
+
289
+ _, seq_len, _ = prompt_embeds.shape
290
+
291
+ # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
292
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
293
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
294
+
295
+ return prompt_embeds
296
+
297
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_clip_prompt_embeds
298
+ def _get_clip_prompt_embeds(
299
+ self,
300
+ prompt: Union[str, List[str]],
301
+ num_images_per_prompt: int = 1,
302
+ device: Optional[torch.device] = None,
303
+ clip_skip: Optional[int] = None,
304
+ clip_model_index: int = 0,
305
+ ):
306
+ device = device or self._execution_device
307
+
308
+ clip_tokenizers = [self.tokenizer, self.tokenizer_2]
309
+ clip_text_encoders = [self.text_encoder, self.text_encoder_2]
310
+
311
+ tokenizer = clip_tokenizers[clip_model_index]
312
+ text_encoder = clip_text_encoders[clip_model_index]
313
+
314
+ prompt = [prompt] if isinstance(prompt, str) else prompt
315
+ batch_size = len(prompt)
316
+
317
+ text_inputs = tokenizer(
318
+ prompt,
319
+ padding="max_length",
320
+ max_length=self.tokenizer_max_length,
321
+ truncation=True,
322
+ return_tensors="pt",
323
+ )
324
+
325
+ text_input_ids = text_inputs.input_ids
326
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
327
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
328
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
329
+ logger.warning(
330
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
331
+ f" {self.tokenizer_max_length} tokens: {removed_text}"
332
+ )
333
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
334
+ pooled_prompt_embeds = prompt_embeds[0]
335
+
336
+ if clip_skip is None:
337
+ prompt_embeds = prompt_embeds.hidden_states[-2]
338
+ else:
339
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
340
+
341
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
342
+
343
+ _, seq_len, _ = prompt_embeds.shape
344
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
345
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
346
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
347
+
348
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1)
349
+ pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1)
350
+
351
+ return prompt_embeds, pooled_prompt_embeds
352
+
353
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.encode_prompt
354
+ def encode_prompt(
355
+ self,
356
+ prompt: Union[str, List[str]],
357
+ prompt_2: Union[str, List[str]],
358
+ prompt_3: Union[str, List[str]],
359
+ device: Optional[torch.device] = None,
360
+ num_images_per_prompt: int = 1,
361
+ do_classifier_free_guidance: bool = True,
362
+ negative_prompt: Optional[Union[str, List[str]]] = None,
363
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
364
+ negative_prompt_3: Optional[Union[str, List[str]]] = None,
365
+ prompt_embeds: Optional[torch.FloatTensor] = None,
366
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
367
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
368
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
369
+ clip_skip: Optional[int] = None,
370
+ max_sequence_length: int = 256,
371
+ lora_scale: Optional[float] = None,
372
+ ):
373
+ r"""
374
+
375
+ Args:
376
+ prompt (`str` or `List[str]`, *optional*):
377
+ prompt to be encoded
378
+ prompt_2 (`str` or `List[str]`, *optional*):
379
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
380
+ used in all text-encoders
381
+ prompt_3 (`str` or `List[str]`, *optional*):
382
+ The prompt or prompts to be sent to the `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is
383
+ used in all text-encoders
384
+ device: (`torch.device`):
385
+ torch device
386
+ num_images_per_prompt (`int`):
387
+ number of images that should be generated per prompt
388
+ do_classifier_free_guidance (`bool`):
389
+ whether to use classifier free guidance or not
390
+ negative_prompt (`str` or `List[str]`, *optional*):
391
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
392
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
393
+ less than `1`).
394
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
395
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
396
+ `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders.
397
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
398
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and
399
+ `text_encoder_3`. If not defined, `negative_prompt` is used in both text-encoders
400
+ prompt_embeds (`torch.FloatTensor`, *optional*):
401
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
402
+ provided, text embeddings will be generated from `prompt` input argument.
403
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
404
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
405
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
406
+ argument.
407
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
408
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
409
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
410
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
411
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
412
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
413
+ input argument.
414
+ clip_skip (`int`, *optional*):
415
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
416
+ the output of the pre-final layer will be used for computing the prompt embeddings.
417
+ lora_scale (`float`, *optional*):
418
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
419
+ """
420
+ device = device or self._execution_device
421
+
422
+ # set lora scale so that monkey patched LoRA
423
+ # function of text encoder can correctly access it
424
+ if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin):
425
+ self._lora_scale = lora_scale
426
+
427
+ # dynamically adjust the LoRA scale
428
+ if self.text_encoder is not None and USE_PEFT_BACKEND:
429
+ scale_lora_layers(self.text_encoder, lora_scale)
430
+ if self.text_encoder_2 is not None and USE_PEFT_BACKEND:
431
+ scale_lora_layers(self.text_encoder_2, lora_scale)
432
+
433
+ prompt = [prompt] if isinstance(prompt, str) else prompt
434
+ if prompt is not None:
435
+ batch_size = len(prompt)
436
+ else:
437
+ batch_size = prompt_embeds.shape[0]
438
+
439
+ if prompt_embeds is None:
440
+ prompt_2 = prompt_2 or prompt
441
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
442
+
443
+ prompt_3 = prompt_3 or prompt
444
+ prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3
445
+
446
+ prompt_embed, pooled_prompt_embed = self._get_clip_prompt_embeds(
447
+ prompt=prompt,
448
+ device=device,
449
+ num_images_per_prompt=num_images_per_prompt,
450
+ clip_skip=clip_skip,
451
+ clip_model_index=0,
452
+ )
453
+ prompt_2_embed, pooled_prompt_2_embed = self._get_clip_prompt_embeds(
454
+ prompt=prompt_2,
455
+ device=device,
456
+ num_images_per_prompt=num_images_per_prompt,
457
+ clip_skip=clip_skip,
458
+ clip_model_index=1,
459
+ )
460
+ clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1)
461
+
462
+ t5_prompt_embed = self._get_t5_prompt_embeds(
463
+ prompt=prompt_3,
464
+ num_images_per_prompt=num_images_per_prompt,
465
+ max_sequence_length=max_sequence_length,
466
+ device=device,
467
+ )
468
+
469
+ clip_prompt_embeds = torch.nn.functional.pad(
470
+ clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1])
471
+ )
472
+
473
+ prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2)
474
+ pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1)
475
+
476
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
477
+ negative_prompt = negative_prompt or ""
478
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
479
+ negative_prompt_3 = negative_prompt_3 or negative_prompt
480
+
481
+ # normalize str to list
482
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
483
+ negative_prompt_2 = (
484
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
485
+ )
486
+ negative_prompt_3 = (
487
+ batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3
488
+ )
489
+
490
+ if prompt is not None and type(prompt) is not type(negative_prompt):
491
+ raise TypeError(
492
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
493
+ f" {type(prompt)}."
494
+ )
495
+ elif batch_size != len(negative_prompt):
496
+ raise ValueError(
497
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
498
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
499
+ " the batch size of `prompt`."
500
+ )
501
+
502
+ negative_prompt_embed, negative_pooled_prompt_embed = self._get_clip_prompt_embeds(
503
+ negative_prompt,
504
+ device=device,
505
+ num_images_per_prompt=num_images_per_prompt,
506
+ clip_skip=None,
507
+ clip_model_index=0,
508
+ )
509
+ negative_prompt_2_embed, negative_pooled_prompt_2_embed = self._get_clip_prompt_embeds(
510
+ negative_prompt_2,
511
+ device=device,
512
+ num_images_per_prompt=num_images_per_prompt,
513
+ clip_skip=None,
514
+ clip_model_index=1,
515
+ )
516
+ negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1)
517
+
518
+ t5_negative_prompt_embed = self._get_t5_prompt_embeds(
519
+ prompt=negative_prompt_3,
520
+ num_images_per_prompt=num_images_per_prompt,
521
+ max_sequence_length=max_sequence_length,
522
+ device=device,
523
+ )
524
+
525
+ negative_clip_prompt_embeds = torch.nn.functional.pad(
526
+ negative_clip_prompt_embeds,
527
+ (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1]),
528
+ )
529
+
530
+ negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2)
531
+ negative_pooled_prompt_embeds = torch.cat(
532
+ [negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1
533
+ )
534
+
535
+ if self.text_encoder is not None:
536
+ if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND:
537
+ # Retrieve the original scale by scaling back the LoRA layers
538
+ unscale_lora_layers(self.text_encoder, lora_scale)
539
+
540
+ if self.text_encoder_2 is not None:
541
+ if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND:
542
+ # Retrieve the original scale by scaling back the LoRA layers
543
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
544
+
545
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
546
+
547
+ def check_inputs(
548
+ self,
549
+ prompt,
550
+ prompt_2,
551
+ prompt_3,
552
+ height,
553
+ width,
554
+ negative_prompt=None,
555
+ negative_prompt_2=None,
556
+ negative_prompt_3=None,
557
+ prompt_embeds=None,
558
+ negative_prompt_embeds=None,
559
+ pooled_prompt_embeds=None,
560
+ negative_pooled_prompt_embeds=None,
561
+ callback_on_step_end_tensor_inputs=None,
562
+ max_sequence_length=None,
563
+ ):
564
+ if height % 8 != 0 or width % 8 != 0:
565
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
566
+
567
+ if callback_on_step_end_tensor_inputs is not None and not all(
568
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
569
+ ):
570
+ raise ValueError(
571
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
572
+ )
573
+
574
+ if prompt is not None and prompt_embeds is not None:
575
+ raise ValueError(
576
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
577
+ " only forward one of the two."
578
+ )
579
+ elif prompt_2 is not None and prompt_embeds is not None:
580
+ raise ValueError(
581
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
582
+ " only forward one of the two."
583
+ )
584
+ elif prompt_3 is not None and prompt_embeds is not None:
585
+ raise ValueError(
586
+ f"Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
587
+ " only forward one of the two."
588
+ )
589
+ elif prompt is None and prompt_embeds is None:
590
+ raise ValueError(
591
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
592
+ )
593
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
594
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
595
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
596
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
597
+ elif prompt_3 is not None and (not isinstance(prompt_3, str) and not isinstance(prompt_3, list)):
598
+ raise ValueError(f"`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}")
599
+
600
+ if negative_prompt is not None and negative_prompt_embeds is not None:
601
+ raise ValueError(
602
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
603
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
604
+ )
605
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
606
+ raise ValueError(
607
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
608
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
609
+ )
610
+ elif negative_prompt_3 is not None and negative_prompt_embeds is not None:
611
+ raise ValueError(
612
+ f"Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`:"
613
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
614
+ )
615
+
616
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
617
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
618
+ raise ValueError(
619
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
620
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
621
+ f" {negative_prompt_embeds.shape}."
622
+ )
623
+
624
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
625
+ raise ValueError(
626
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
627
+ )
628
+
629
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
630
+ raise ValueError(
631
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
632
+ )
633
+
634
+ if max_sequence_length is not None and max_sequence_length > 512:
635
+ raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
636
+
637
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.prepare_latents
638
+ def prepare_latents(
639
+ self,
640
+ batch_size,
641
+ num_channels_latents,
642
+ height,
643
+ width,
644
+ dtype,
645
+ device,
646
+ generator,
647
+ latents=None,
648
+ ):
649
+ if latents is not None:
650
+ return latents.to(device=device, dtype=dtype)
651
+
652
+ shape = (
653
+ batch_size,
654
+ num_channels_latents,
655
+ int(height) // self.vae_scale_factor,
656
+ int(width) // self.vae_scale_factor,
657
+ )
658
+
659
+ if isinstance(generator, list) and len(generator) != batch_size:
660
+ raise ValueError(
661
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
662
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
663
+ )
664
+
665
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
666
+
667
+ return latents
668
+
669
+ def prepare_image(
670
+ self,
671
+ image,
672
+ width,
673
+ height,
674
+ batch_size,
675
+ num_images_per_prompt,
676
+ device,
677
+ dtype,
678
+ do_classifier_free_guidance=False,
679
+ guess_mode=False,
680
+ ):
681
+ if isinstance(image, torch.Tensor):
682
+ pass
683
+ else:
684
+ image = self.image_processor.preprocess(image, height=height, width=width)
685
+
686
+ image_batch_size = image.shape[0]
687
+
688
+ if image_batch_size == 1:
689
+ repeat_by = batch_size
690
+ else:
691
+ # image batch size is the same as prompt batch size
692
+ repeat_by = num_images_per_prompt
693
+
694
+ image = image.repeat_interleave(repeat_by, dim=0)
695
+
696
+ image = image.to(device=device, dtype=dtype)
697
+
698
+ if do_classifier_free_guidance and not guess_mode:
699
+ image = torch.cat([image] * 2)
700
+
701
+ return image
702
+
703
+ @property
704
+ def guidance_scale(self):
705
+ return self._guidance_scale
706
+
707
+ @property
708
+ def clip_skip(self):
709
+ return self._clip_skip
710
+
711
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
712
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
713
+ # corresponds to doing no classifier free guidance.
714
+ @property
715
+ def do_classifier_free_guidance(self):
716
+ return self._guidance_scale > 1
717
+
718
+ @property
719
+ def joint_attention_kwargs(self):
720
+ return self._joint_attention_kwargs
721
+
722
+ @property
723
+ def num_timesteps(self):
724
+ return self._num_timesteps
725
+
726
+ @property
727
+ def interrupt(self):
728
+ return self._interrupt
729
+
730
+ @torch.no_grad()
731
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
732
+ def __call__(
733
+ self,
734
+ prompt: Union[str, List[str]] = None,
735
+ prompt_2: Optional[Union[str, List[str]]] = None,
736
+ prompt_3: Optional[Union[str, List[str]]] = None,
737
+ height: Optional[int] = None,
738
+ width: Optional[int] = None,
739
+ num_inference_steps: int = 28,
740
+ sigmas: Optional[List[float]] = None,
741
+ guidance_scale: float = 7.0,
742
+ control_guidance_start: Union[float, List[float]] = 0.0,
743
+ control_guidance_end: Union[float, List[float]] = 1.0,
744
+ control_image: PipelineImageInput = None,
745
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
746
+ controlnet_pooled_projections: Optional[torch.FloatTensor] = None,
747
+ negative_prompt: Optional[Union[str, List[str]]] = None,
748
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
749
+ negative_prompt_3: Optional[Union[str, List[str]]] = None,
750
+ num_images_per_prompt: Optional[int] = 1,
751
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
752
+ latents: Optional[torch.FloatTensor] = None,
753
+ prompt_embeds: Optional[torch.FloatTensor] = None,
754
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
755
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
756
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
757
+ output_type: Optional[str] = "pil",
758
+ return_dict: bool = True,
759
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
760
+ clip_skip: Optional[int] = None,
761
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
762
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
763
+ max_sequence_length: int = 256,
764
+ ):
765
+ r"""
766
+ Function invoked when calling the pipeline for generation.
767
+
768
+ Args:
769
+ prompt (`str` or `List[str]`, *optional*):
770
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
771
+ instead.
772
+ prompt_2 (`str` or `List[str]`, *optional*):
773
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
774
+ will be used instead
775
+ prompt_3 (`str` or `List[str]`, *optional*):
776
+ The prompt or prompts to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is
777
+ will be used instead
778
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
779
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
780
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
781
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
782
+ num_inference_steps (`int`, *optional*, defaults to 50):
783
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
784
+ expense of slower inference.
785
+ sigmas (`List[float]`, *optional*):
786
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
787
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
788
+ will be used.
789
+ guidance_scale (`float`, *optional*, defaults to 5.0):
790
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
791
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
792
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
793
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
794
+ usually at the expense of lower image quality.
795
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
796
+ The percentage of total steps at which the ControlNet starts applying.
797
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
798
+ The percentage of total steps at which the ControlNet stops applying.
799
+ control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
800
+ `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
801
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
802
+ specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted
803
+ as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or
804
+ width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`,
805
+ images must be passed as a list such that each element of the list can be correctly batched for input
806
+ to a single ControlNet.
807
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
808
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
809
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
810
+ the corresponding scale as a list.
811
+ controlnet_pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`):
812
+ Embeddings projected from the embeddings of controlnet input conditions.
813
+ negative_prompt (`str` or `List[str]`, *optional*):
814
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
815
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
816
+ less than `1`).
817
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
818
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
819
+ `text_encoder_2`. If not defined, `negative_prompt` is used instead
820
+ negative_prompt_3 (`str` or `List[str]`, *optional*):
821
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and
822
+ `text_encoder_3`. If not defined, `negative_prompt` is used instead
823
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
824
+ The number of images to generate per prompt.
825
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
826
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
827
+ to make generation deterministic.
828
+ latents (`torch.FloatTensor`, *optional*):
829
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
830
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
831
+ tensor will ge generated by sampling using the supplied random `generator`.
832
+ prompt_embeds (`torch.FloatTensor`, *optional*):
833
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
834
+ provided, text embeddings will be generated from `prompt` input argument.
835
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
836
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
837
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
838
+ argument.
839
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
840
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
841
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
842
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
843
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
844
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
845
+ input argument.
846
+ output_type (`str`, *optional*, defaults to `"pil"`):
847
+ The output format of the generate image. Choose between
848
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
849
+ return_dict (`bool`, *optional*, defaults to `True`):
850
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
851
+ of a plain tuple.
852
+ joint_attention_kwargs (`dict`, *optional*):
853
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
854
+ `self.processor` in
855
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
856
+ callback_on_step_end (`Callable`, *optional*):
857
+ A function that calls at the end of each denoising steps during the inference. The function is called
858
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
859
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
860
+ `callback_on_step_end_tensor_inputs`.
861
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
862
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
863
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
864
+ `._callback_tensor_inputs` attribute of your pipeline class.
865
+ max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`.
866
+
867
+ Examples:
868
+
869
+ Returns:
870
+ [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
871
+ [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
872
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
873
+ """
874
+
875
+ height = height or self.default_sample_size * self.vae_scale_factor
876
+ width = width or self.default_sample_size * self.vae_scale_factor
877
+
878
+ controlnet_config = (
879
+ self.controlnet.config
880
+ if isinstance(self.controlnet, SD3ControlNetModel)
881
+ else self.controlnet.nets[0].config
882
+ )
883
+
884
+ # align format for control guidance
885
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
886
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
887
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
888
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
889
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
890
+ mult = len(self.controlnet.nets) if isinstance(self.controlnet, SD3MultiControlNetModel) else 1
891
+ control_guidance_start, control_guidance_end = (
892
+ mult * [control_guidance_start],
893
+ mult * [control_guidance_end],
894
+ )
895
+
896
+ # 1. Check inputs. Raise error if not correct
897
+ self.check_inputs(
898
+ prompt,
899
+ prompt_2,
900
+ prompt_3,
901
+ height,
902
+ width,
903
+ negative_prompt=negative_prompt,
904
+ negative_prompt_2=negative_prompt_2,
905
+ negative_prompt_3=negative_prompt_3,
906
+ prompt_embeds=prompt_embeds,
907
+ negative_prompt_embeds=negative_prompt_embeds,
908
+ pooled_prompt_embeds=pooled_prompt_embeds,
909
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
910
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
911
+ max_sequence_length=max_sequence_length,
912
+ )
913
+
914
+ self._guidance_scale = guidance_scale
915
+ self._clip_skip = clip_skip
916
+ self._joint_attention_kwargs = joint_attention_kwargs
917
+ self._interrupt = False
918
+
919
+ # 2. Define call parameters
920
+ if prompt is not None and isinstance(prompt, str):
921
+ batch_size = 1
922
+ elif prompt is not None and isinstance(prompt, list):
923
+ batch_size = len(prompt)
924
+ else:
925
+ batch_size = prompt_embeds.shape[0]
926
+
927
+ device = self._execution_device
928
+ dtype = self.transformer.dtype
929
+
930
+ (
931
+ prompt_embeds,
932
+ negative_prompt_embeds,
933
+ pooled_prompt_embeds,
934
+ negative_pooled_prompt_embeds,
935
+ ) = self.encode_prompt(
936
+ prompt=prompt,
937
+ prompt_2=prompt_2,
938
+ prompt_3=prompt_3,
939
+ negative_prompt=negative_prompt,
940
+ negative_prompt_2=negative_prompt_2,
941
+ negative_prompt_3=negative_prompt_3,
942
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
943
+ prompt_embeds=prompt_embeds,
944
+ negative_prompt_embeds=negative_prompt_embeds,
945
+ pooled_prompt_embeds=pooled_prompt_embeds,
946
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
947
+ device=device,
948
+ clip_skip=self.clip_skip,
949
+ num_images_per_prompt=num_images_per_prompt,
950
+ max_sequence_length=max_sequence_length,
951
+ )
952
+
953
+ if self.do_classifier_free_guidance:
954
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
955
+ pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
956
+
957
+ # 3. Prepare control image
958
+ if controlnet_config.force_zeros_for_pooled_projection:
959
+ # instantx sd3 controlnet does not apply shift factor
960
+ vae_shift_factor = 0
961
+ else:
962
+ vae_shift_factor = self.vae.config.shift_factor
963
+ if isinstance(self.controlnet, SD3ControlNetModel):
964
+ control_image = self.prepare_image(
965
+ image=control_image,
966
+ width=width,
967
+ height=height,
968
+ batch_size=batch_size * num_images_per_prompt,
969
+ num_images_per_prompt=num_images_per_prompt,
970
+ device=device,
971
+ dtype=dtype,
972
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
973
+ guess_mode=False,
974
+ )
975
+ height, width = control_image.shape[-2:]
976
+
977
+ control_image = self.vae.encode(control_image).latent_dist.sample()
978
+ control_image = (control_image - vae_shift_factor) * self.vae.config.scaling_factor
979
+ elif isinstance(self.controlnet, SD3MultiControlNetModel):
980
+ control_images = []
981
+
982
+ for control_image_ in control_image:
983
+ control_image_ = self.prepare_image(
984
+ image=control_image_,
985
+ width=width,
986
+ height=height,
987
+ batch_size=batch_size * num_images_per_prompt,
988
+ num_images_per_prompt=num_images_per_prompt,
989
+ device=device,
990
+ dtype=dtype,
991
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
992
+ guess_mode=False,
993
+ )
994
+
995
+ control_image_ = self.vae.encode(control_image_).latent_dist.sample()
996
+ control_image_ = (control_image_ - vae_shift_factor) * self.vae.config.scaling_factor
997
+
998
+ control_images.append(control_image_)
999
+
1000
+ control_image = control_images
1001
+ else:
1002
+ assert False
1003
+
1004
+ # 4. Prepare timesteps
1005
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, sigmas=sigmas)
1006
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1007
+ self._num_timesteps = len(timesteps)
1008
+
1009
+ # 5. Prepare latent variables
1010
+ num_channels_latents = self.transformer.config.in_channels
1011
+ latents = self.prepare_latents(
1012
+ batch_size * num_images_per_prompt,
1013
+ num_channels_latents,
1014
+ height,
1015
+ width,
1016
+ prompt_embeds.dtype,
1017
+ device,
1018
+ generator,
1019
+ latents,
1020
+ )
1021
+
1022
+ # 6. Create tensor stating which controlnets to keep
1023
+ controlnet_keep = []
1024
+ for i in range(len(timesteps)):
1025
+ keeps = [
1026
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1027
+ for s, e in zip(control_guidance_start, control_guidance_end)
1028
+ ]
1029
+ controlnet_keep.append(keeps[0] if isinstance(self.controlnet, SD3ControlNetModel) else keeps)
1030
+
1031
+ if controlnet_config.force_zeros_for_pooled_projection:
1032
+ # instantx sd3 controlnet used zero pooled projection
1033
+ controlnet_pooled_projections = torch.zeros_like(pooled_prompt_embeds)
1034
+ else:
1035
+ controlnet_pooled_projections = controlnet_pooled_projections or pooled_prompt_embeds
1036
+
1037
+ if controlnet_config.joint_attention_dim is not None:
1038
+ controlnet_encoder_hidden_states = prompt_embeds
1039
+ else:
1040
+ # SD35 official 8b controlnet does not use encoder_hidden_states
1041
+ controlnet_encoder_hidden_states = None
1042
+
1043
+ # 7. Denoising loop
1044
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1045
+ for i, t in enumerate(timesteps):
1046
+ if self.interrupt:
1047
+ continue
1048
+
1049
+ # expand the latents if we are doing classifier free guidance
1050
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1051
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
1052
+ timestep = t.expand(latent_model_input.shape[0])
1053
+
1054
+ if isinstance(controlnet_keep[i], list):
1055
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1056
+ else:
1057
+ controlnet_cond_scale = controlnet_conditioning_scale
1058
+ if isinstance(controlnet_cond_scale, list):
1059
+ controlnet_cond_scale = controlnet_cond_scale[0]
1060
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1061
+
1062
+ # controlnet(s) inference
1063
+ control_block_samples = self.controlnet(
1064
+ hidden_states=latent_model_input,
1065
+ timestep=timestep,
1066
+ encoder_hidden_states=controlnet_encoder_hidden_states,
1067
+ pooled_projections=controlnet_pooled_projections,
1068
+ joint_attention_kwargs=self.joint_attention_kwargs,
1069
+ controlnet_cond=control_image,
1070
+ conditioning_scale=cond_scale,
1071
+ return_dict=False,
1072
+ )[0]
1073
+
1074
+ noise_pred = self.transformer(
1075
+ hidden_states=latent_model_input,
1076
+ timestep=timestep,
1077
+ encoder_hidden_states=prompt_embeds,
1078
+ pooled_projections=pooled_prompt_embeds,
1079
+ block_controlnet_hidden_states=control_block_samples,
1080
+ joint_attention_kwargs=self.joint_attention_kwargs,
1081
+ return_dict=False,
1082
+ )[0]
1083
+
1084
+ # perform guidance
1085
+ if self.do_classifier_free_guidance:
1086
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1087
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1088
+
1089
+ # compute the previous noisy sample x_t -> x_t-1
1090
+ latents_dtype = latents.dtype
1091
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
1092
+
1093
+ if latents.dtype != latents_dtype:
1094
+ if torch.backends.mps.is_available():
1095
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
1096
+ latents = latents.to(latents_dtype)
1097
+
1098
+ if callback_on_step_end is not None:
1099
+ callback_kwargs = {}
1100
+ for k in callback_on_step_end_tensor_inputs:
1101
+ callback_kwargs[k] = locals()[k]
1102
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1103
+
1104
+ latents = callback_outputs.pop("latents", latents)
1105
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1106
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1107
+ negative_pooled_prompt_embeds = callback_outputs.pop(
1108
+ "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
1109
+ )
1110
+
1111
+ # call the callback, if provided
1112
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1113
+ progress_bar.update()
1114
+
1115
+ if XLA_AVAILABLE:
1116
+ xm.mark_step()
1117
+
1118
+ if output_type == "latent":
1119
+ image = latents
1120
+
1121
+ else:
1122
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
1123
+
1124
+ image = self.vae.decode(latents, return_dict=False)[0]
1125
+ image = self.image_processor.postprocess(image, output_type=output_type)
1126
+
1127
+ # Offload all models
1128
+ self.maybe_free_model_hooks()
1129
+
1130
+ if not return_dict:
1131
+ return (image,)
1132
+
1133
+ return StableDiffusion3PipelineOutput(images=image)