diffusers 0.27.1__py3-none-any.whl → 0.32.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (445) hide show
  1. diffusers/__init__.py +233 -6
  2. diffusers/callbacks.py +209 -0
  3. diffusers/commands/env.py +102 -6
  4. diffusers/configuration_utils.py +45 -16
  5. diffusers/dependency_versions_table.py +4 -3
  6. diffusers/image_processor.py +434 -110
  7. diffusers/loaders/__init__.py +42 -9
  8. diffusers/loaders/ip_adapter.py +626 -36
  9. diffusers/loaders/lora_base.py +900 -0
  10. diffusers/loaders/lora_conversion_utils.py +991 -125
  11. diffusers/loaders/lora_pipeline.py +3812 -0
  12. diffusers/loaders/peft.py +571 -7
  13. diffusers/loaders/single_file.py +405 -173
  14. diffusers/loaders/single_file_model.py +385 -0
  15. diffusers/loaders/single_file_utils.py +1783 -713
  16. diffusers/loaders/textual_inversion.py +41 -23
  17. diffusers/loaders/transformer_flux.py +181 -0
  18. diffusers/loaders/transformer_sd3.py +89 -0
  19. diffusers/loaders/unet.py +464 -540
  20. diffusers/loaders/unet_loader_utils.py +163 -0
  21. diffusers/models/__init__.py +76 -7
  22. diffusers/models/activations.py +65 -10
  23. diffusers/models/adapter.py +53 -53
  24. diffusers/models/attention.py +605 -18
  25. diffusers/models/attention_flax.py +1 -1
  26. diffusers/models/attention_processor.py +4304 -687
  27. diffusers/models/autoencoders/__init__.py +8 -0
  28. diffusers/models/autoencoders/autoencoder_asym_kl.py +15 -17
  29. diffusers/models/autoencoders/autoencoder_dc.py +620 -0
  30. diffusers/models/autoencoders/autoencoder_kl.py +110 -28
  31. diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
  32. diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1482 -0
  33. diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
  34. diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
  35. diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
  36. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +19 -24
  37. diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
  38. diffusers/models/autoencoders/autoencoder_tiny.py +21 -18
  39. diffusers/models/autoencoders/consistency_decoder_vae.py +45 -20
  40. diffusers/models/autoencoders/vae.py +41 -29
  41. diffusers/models/autoencoders/vq_model.py +182 -0
  42. diffusers/models/controlnet.py +47 -800
  43. diffusers/models/controlnet_flux.py +70 -0
  44. diffusers/models/controlnet_sd3.py +68 -0
  45. diffusers/models/controlnet_sparsectrl.py +116 -0
  46. diffusers/models/controlnets/__init__.py +23 -0
  47. diffusers/models/controlnets/controlnet.py +872 -0
  48. diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +9 -9
  49. diffusers/models/controlnets/controlnet_flux.py +536 -0
  50. diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
  51. diffusers/models/controlnets/controlnet_sd3.py +489 -0
  52. diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
  53. diffusers/models/controlnets/controlnet_union.py +832 -0
  54. diffusers/models/controlnets/controlnet_xs.py +1946 -0
  55. diffusers/models/controlnets/multicontrolnet.py +183 -0
  56. diffusers/models/downsampling.py +85 -18
  57. diffusers/models/embeddings.py +1856 -158
  58. diffusers/models/embeddings_flax.py +23 -9
  59. diffusers/models/model_loading_utils.py +480 -0
  60. diffusers/models/modeling_flax_pytorch_utils.py +2 -1
  61. diffusers/models/modeling_flax_utils.py +2 -7
  62. diffusers/models/modeling_outputs.py +14 -0
  63. diffusers/models/modeling_pytorch_flax_utils.py +1 -1
  64. diffusers/models/modeling_utils.py +611 -146
  65. diffusers/models/normalization.py +361 -20
  66. diffusers/models/resnet.py +18 -23
  67. diffusers/models/transformers/__init__.py +16 -0
  68. diffusers/models/transformers/auraflow_transformer_2d.py +544 -0
  69. diffusers/models/transformers/cogvideox_transformer_3d.py +542 -0
  70. diffusers/models/transformers/dit_transformer_2d.py +240 -0
  71. diffusers/models/transformers/dual_transformer_2d.py +9 -8
  72. diffusers/models/transformers/hunyuan_transformer_2d.py +578 -0
  73. diffusers/models/transformers/latte_transformer_3d.py +327 -0
  74. diffusers/models/transformers/lumina_nextdit2d.py +340 -0
  75. diffusers/models/transformers/pixart_transformer_2d.py +445 -0
  76. diffusers/models/transformers/prior_transformer.py +13 -13
  77. diffusers/models/transformers/sana_transformer.py +488 -0
  78. diffusers/models/transformers/stable_audio_transformer.py +458 -0
  79. diffusers/models/transformers/t5_film_transformer.py +17 -19
  80. diffusers/models/transformers/transformer_2d.py +297 -187
  81. diffusers/models/transformers/transformer_allegro.py +422 -0
  82. diffusers/models/transformers/transformer_cogview3plus.py +386 -0
  83. diffusers/models/transformers/transformer_flux.py +593 -0
  84. diffusers/models/transformers/transformer_hunyuan_video.py +791 -0
  85. diffusers/models/transformers/transformer_ltx.py +469 -0
  86. diffusers/models/transformers/transformer_mochi.py +499 -0
  87. diffusers/models/transformers/transformer_sd3.py +461 -0
  88. diffusers/models/transformers/transformer_temporal.py +21 -19
  89. diffusers/models/unets/unet_1d.py +8 -8
  90. diffusers/models/unets/unet_1d_blocks.py +31 -31
  91. diffusers/models/unets/unet_2d.py +17 -10
  92. diffusers/models/unets/unet_2d_blocks.py +225 -149
  93. diffusers/models/unets/unet_2d_condition.py +41 -40
  94. diffusers/models/unets/unet_2d_condition_flax.py +6 -5
  95. diffusers/models/unets/unet_3d_blocks.py +192 -1057
  96. diffusers/models/unets/unet_3d_condition.py +22 -27
  97. diffusers/models/unets/unet_i2vgen_xl.py +22 -18
  98. diffusers/models/unets/unet_kandinsky3.py +2 -2
  99. diffusers/models/unets/unet_motion_model.py +1413 -89
  100. diffusers/models/unets/unet_spatio_temporal_condition.py +40 -16
  101. diffusers/models/unets/unet_stable_cascade.py +19 -18
  102. diffusers/models/unets/uvit_2d.py +2 -2
  103. diffusers/models/upsampling.py +95 -26
  104. diffusers/models/vq_model.py +12 -164
  105. diffusers/optimization.py +1 -1
  106. diffusers/pipelines/__init__.py +202 -3
  107. diffusers/pipelines/allegro/__init__.py +48 -0
  108. diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
  109. diffusers/pipelines/allegro/pipeline_output.py +23 -0
  110. diffusers/pipelines/amused/pipeline_amused.py +12 -12
  111. diffusers/pipelines/amused/pipeline_amused_img2img.py +14 -12
  112. diffusers/pipelines/amused/pipeline_amused_inpaint.py +13 -11
  113. diffusers/pipelines/animatediff/__init__.py +8 -0
  114. diffusers/pipelines/animatediff/pipeline_animatediff.py +122 -109
  115. diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1106 -0
  116. diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +1288 -0
  117. diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1010 -0
  118. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +236 -180
  119. diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
  120. diffusers/pipelines/animatediff/pipeline_output.py +3 -2
  121. diffusers/pipelines/audioldm/pipeline_audioldm.py +14 -14
  122. diffusers/pipelines/audioldm2/modeling_audioldm2.py +58 -39
  123. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +121 -36
  124. diffusers/pipelines/aura_flow/__init__.py +48 -0
  125. diffusers/pipelines/aura_flow/pipeline_aura_flow.py +584 -0
  126. diffusers/pipelines/auto_pipeline.py +196 -28
  127. diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
  128. diffusers/pipelines/blip_diffusion/modeling_blip2.py +6 -6
  129. diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +1 -1
  130. diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
  131. diffusers/pipelines/cogvideo/__init__.py +54 -0
  132. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +772 -0
  133. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
  134. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +885 -0
  135. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +851 -0
  136. diffusers/pipelines/cogvideo/pipeline_output.py +20 -0
  137. diffusers/pipelines/cogview3/__init__.py +47 -0
  138. diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
  139. diffusers/pipelines/cogview3/pipeline_output.py +21 -0
  140. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +6 -6
  141. diffusers/pipelines/controlnet/__init__.py +86 -80
  142. diffusers/pipelines/controlnet/multicontrolnet.py +7 -182
  143. diffusers/pipelines/controlnet/pipeline_controlnet.py +134 -87
  144. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
  145. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +93 -77
  146. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +88 -197
  147. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +136 -90
  148. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +176 -80
  149. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +125 -89
  150. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
  151. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
  152. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
  153. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
  154. diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
  155. diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1060 -0
  156. diffusers/pipelines/controlnet_sd3/__init__.py +57 -0
  157. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1133 -0
  158. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
  159. diffusers/pipelines/controlnet_xs/__init__.py +68 -0
  160. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
  161. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
  162. diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
  163. diffusers/pipelines/deepfloyd_if/pipeline_if.py +16 -30
  164. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +20 -35
  165. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +23 -41
  166. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +22 -38
  167. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +25 -41
  168. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +19 -34
  169. diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
  170. diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
  171. diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +11 -11
  172. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +70 -30
  173. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +48 -25
  174. diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +2 -2
  175. diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +7 -7
  176. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +21 -20
  177. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +27 -29
  178. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +33 -27
  179. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +33 -23
  180. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +36 -30
  181. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +102 -69
  182. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +13 -13
  183. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +10 -5
  184. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +11 -6
  185. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +10 -5
  186. diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +5 -5
  187. diffusers/pipelines/dit/pipeline_dit.py +7 -4
  188. diffusers/pipelines/flux/__init__.py +69 -0
  189. diffusers/pipelines/flux/modeling_flux.py +47 -0
  190. diffusers/pipelines/flux/pipeline_flux.py +957 -0
  191. diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
  192. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
  193. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
  194. diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
  195. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
  196. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
  197. diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
  198. diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
  199. diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
  200. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
  201. diffusers/pipelines/flux/pipeline_output.py +37 -0
  202. diffusers/pipelines/free_init_utils.py +41 -38
  203. diffusers/pipelines/free_noise_utils.py +596 -0
  204. diffusers/pipelines/hunyuan_video/__init__.py +48 -0
  205. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
  206. diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
  207. diffusers/pipelines/hunyuandit/__init__.py +48 -0
  208. diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +916 -0
  209. diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +33 -48
  210. diffusers/pipelines/kandinsky/pipeline_kandinsky.py +8 -8
  211. diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +32 -29
  212. diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +11 -11
  213. diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +12 -12
  214. diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +10 -10
  215. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
  216. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +34 -31
  217. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +10 -10
  218. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +10 -10
  219. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +6 -6
  220. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +8 -8
  221. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +7 -7
  222. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +6 -6
  223. diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +3 -3
  224. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +22 -35
  225. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +26 -37
  226. diffusers/pipelines/kolors/__init__.py +54 -0
  227. diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
  228. diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1250 -0
  229. diffusers/pipelines/kolors/pipeline_output.py +21 -0
  230. diffusers/pipelines/kolors/text_encoder.py +889 -0
  231. diffusers/pipelines/kolors/tokenizer.py +338 -0
  232. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +82 -62
  233. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +77 -60
  234. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +12 -12
  235. diffusers/pipelines/latte/__init__.py +48 -0
  236. diffusers/pipelines/latte/pipeline_latte.py +881 -0
  237. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +80 -74
  238. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +85 -76
  239. diffusers/pipelines/ledits_pp/pipeline_output.py +2 -2
  240. diffusers/pipelines/ltx/__init__.py +50 -0
  241. diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
  242. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
  243. diffusers/pipelines/ltx/pipeline_output.py +20 -0
  244. diffusers/pipelines/lumina/__init__.py +48 -0
  245. diffusers/pipelines/lumina/pipeline_lumina.py +890 -0
  246. diffusers/pipelines/marigold/__init__.py +50 -0
  247. diffusers/pipelines/marigold/marigold_image_processing.py +576 -0
  248. diffusers/pipelines/marigold/pipeline_marigold_depth.py +813 -0
  249. diffusers/pipelines/marigold/pipeline_marigold_normals.py +690 -0
  250. diffusers/pipelines/mochi/__init__.py +48 -0
  251. diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
  252. diffusers/pipelines/mochi/pipeline_output.py +20 -0
  253. diffusers/pipelines/musicldm/pipeline_musicldm.py +14 -14
  254. diffusers/pipelines/pag/__init__.py +80 -0
  255. diffusers/pipelines/pag/pag_utils.py +243 -0
  256. diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1328 -0
  257. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
  258. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1610 -0
  259. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
  260. diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +969 -0
  261. diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
  262. diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +865 -0
  263. diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
  264. diffusers/pipelines/pag/pipeline_pag_sd.py +1062 -0
  265. diffusers/pipelines/pag/pipeline_pag_sd_3.py +994 -0
  266. diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
  267. diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +866 -0
  268. diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
  269. diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
  270. diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1345 -0
  271. diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1544 -0
  272. diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1776 -0
  273. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +17 -12
  274. diffusers/pipelines/pia/pipeline_pia.py +74 -164
  275. diffusers/pipelines/pipeline_flax_utils.py +5 -10
  276. diffusers/pipelines/pipeline_loading_utils.py +515 -53
  277. diffusers/pipelines/pipeline_utils.py +411 -222
  278. diffusers/pipelines/pixart_alpha/__init__.py +8 -1
  279. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +76 -93
  280. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +873 -0
  281. diffusers/pipelines/sana/__init__.py +47 -0
  282. diffusers/pipelines/sana/pipeline_output.py +21 -0
  283. diffusers/pipelines/sana/pipeline_sana.py +884 -0
  284. diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +27 -23
  285. diffusers/pipelines/shap_e/pipeline_shap_e.py +3 -3
  286. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +14 -14
  287. diffusers/pipelines/shap_e/renderer.py +1 -1
  288. diffusers/pipelines/stable_audio/__init__.py +50 -0
  289. diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
  290. diffusers/pipelines/stable_audio/pipeline_stable_audio.py +756 -0
  291. diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +71 -25
  292. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +23 -19
  293. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +35 -34
  294. diffusers/pipelines/stable_diffusion/__init__.py +0 -1
  295. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +20 -11
  296. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
  297. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
  298. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +6 -6
  299. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +145 -79
  300. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +43 -28
  301. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +13 -8
  302. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +100 -68
  303. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +109 -201
  304. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +131 -32
  305. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +247 -87
  306. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +30 -29
  307. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +35 -27
  308. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +49 -42
  309. diffusers/pipelines/stable_diffusion/safety_checker.py +2 -1
  310. diffusers/pipelines/stable_diffusion_3/__init__.py +54 -0
  311. diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
  312. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +1140 -0
  313. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +1036 -0
  314. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1250 -0
  315. diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +29 -20
  316. diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +59 -58
  317. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +31 -25
  318. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +38 -22
  319. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -24
  320. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +24 -23
  321. diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +107 -67
  322. diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +316 -69
  323. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +10 -5
  324. diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
  325. diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +98 -30
  326. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +121 -83
  327. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +161 -105
  328. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +142 -218
  329. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +45 -29
  330. diffusers/pipelines/stable_diffusion_xl/watermark.py +9 -3
  331. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +110 -57
  332. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +69 -39
  333. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +105 -74
  334. diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +3 -2
  335. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +29 -49
  336. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +32 -93
  337. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +37 -25
  338. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +54 -40
  339. diffusers/pipelines/unclip/pipeline_unclip.py +6 -6
  340. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +6 -6
  341. diffusers/pipelines/unidiffuser/modeling_text_decoder.py +1 -1
  342. diffusers/pipelines/unidiffuser/modeling_uvit.py +12 -12
  343. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +29 -28
  344. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +5 -5
  345. diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +5 -10
  346. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +6 -8
  347. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +4 -4
  348. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +12 -12
  349. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +15 -14
  350. diffusers/{models/dual_transformer_2d.py → quantizers/__init__.py} +2 -6
  351. diffusers/quantizers/auto.py +139 -0
  352. diffusers/quantizers/base.py +233 -0
  353. diffusers/quantizers/bitsandbytes/__init__.py +2 -0
  354. diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
  355. diffusers/quantizers/bitsandbytes/utils.py +306 -0
  356. diffusers/quantizers/gguf/__init__.py +1 -0
  357. diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
  358. diffusers/quantizers/gguf/utils.py +456 -0
  359. diffusers/quantizers/quantization_config.py +669 -0
  360. diffusers/quantizers/torchao/__init__.py +15 -0
  361. diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
  362. diffusers/schedulers/__init__.py +12 -2
  363. diffusers/schedulers/deprecated/__init__.py +1 -1
  364. diffusers/schedulers/deprecated/scheduling_karras_ve.py +25 -25
  365. diffusers/schedulers/scheduling_amused.py +5 -5
  366. diffusers/schedulers/scheduling_consistency_decoder.py +11 -11
  367. diffusers/schedulers/scheduling_consistency_models.py +23 -25
  368. diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
  369. diffusers/schedulers/scheduling_ddim.py +27 -26
  370. diffusers/schedulers/scheduling_ddim_cogvideox.py +452 -0
  371. diffusers/schedulers/scheduling_ddim_flax.py +2 -1
  372. diffusers/schedulers/scheduling_ddim_inverse.py +16 -16
  373. diffusers/schedulers/scheduling_ddim_parallel.py +32 -31
  374. diffusers/schedulers/scheduling_ddpm.py +27 -30
  375. diffusers/schedulers/scheduling_ddpm_flax.py +7 -3
  376. diffusers/schedulers/scheduling_ddpm_parallel.py +33 -36
  377. diffusers/schedulers/scheduling_ddpm_wuerstchen.py +14 -14
  378. diffusers/schedulers/scheduling_deis_multistep.py +150 -50
  379. diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
  380. diffusers/schedulers/scheduling_dpmsolver_multistep.py +221 -84
  381. diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +2 -2
  382. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +158 -52
  383. diffusers/schedulers/scheduling_dpmsolver_sde.py +153 -34
  384. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +275 -86
  385. diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +81 -57
  386. diffusers/schedulers/scheduling_edm_euler.py +62 -39
  387. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +30 -29
  388. diffusers/schedulers/scheduling_euler_discrete.py +255 -74
  389. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +458 -0
  390. diffusers/schedulers/scheduling_flow_match_heun_discrete.py +320 -0
  391. diffusers/schedulers/scheduling_heun_discrete.py +174 -46
  392. diffusers/schedulers/scheduling_ipndm.py +9 -9
  393. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +138 -29
  394. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +132 -26
  395. diffusers/schedulers/scheduling_karras_ve_flax.py +6 -6
  396. diffusers/schedulers/scheduling_lcm.py +23 -29
  397. diffusers/schedulers/scheduling_lms_discrete.py +105 -28
  398. diffusers/schedulers/scheduling_pndm.py +20 -20
  399. diffusers/schedulers/scheduling_repaint.py +21 -21
  400. diffusers/schedulers/scheduling_sasolver.py +157 -60
  401. diffusers/schedulers/scheduling_sde_ve.py +19 -19
  402. diffusers/schedulers/scheduling_tcd.py +41 -36
  403. diffusers/schedulers/scheduling_unclip.py +19 -16
  404. diffusers/schedulers/scheduling_unipc_multistep.py +243 -47
  405. diffusers/schedulers/scheduling_utils.py +12 -5
  406. diffusers/schedulers/scheduling_utils_flax.py +1 -3
  407. diffusers/schedulers/scheduling_vq_diffusion.py +10 -10
  408. diffusers/training_utils.py +214 -30
  409. diffusers/utils/__init__.py +17 -1
  410. diffusers/utils/constants.py +3 -0
  411. diffusers/utils/doc_utils.py +1 -0
  412. diffusers/utils/dummy_pt_objects.py +592 -7
  413. diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
  414. diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
  415. diffusers/utils/dummy_torch_and_transformers_objects.py +1001 -71
  416. diffusers/utils/dynamic_modules_utils.py +34 -29
  417. diffusers/utils/export_utils.py +50 -6
  418. diffusers/utils/hub_utils.py +131 -17
  419. diffusers/utils/import_utils.py +210 -8
  420. diffusers/utils/loading_utils.py +118 -5
  421. diffusers/utils/logging.py +4 -2
  422. diffusers/utils/peft_utils.py +37 -7
  423. diffusers/utils/state_dict_utils.py +13 -2
  424. diffusers/utils/testing_utils.py +193 -11
  425. diffusers/utils/torch_utils.py +4 -0
  426. diffusers/video_processor.py +113 -0
  427. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/METADATA +82 -91
  428. diffusers-0.32.2.dist-info/RECORD +550 -0
  429. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/WHEEL +1 -1
  430. diffusers/loaders/autoencoder.py +0 -146
  431. diffusers/loaders/controlnet.py +0 -136
  432. diffusers/loaders/lora.py +0 -1349
  433. diffusers/models/prior_transformer.py +0 -12
  434. diffusers/models/t5_film_transformer.py +0 -70
  435. diffusers/models/transformer_2d.py +0 -25
  436. diffusers/models/transformer_temporal.py +0 -34
  437. diffusers/models/unet_1d.py +0 -26
  438. diffusers/models/unet_1d_blocks.py +0 -203
  439. diffusers/models/unet_2d.py +0 -27
  440. diffusers/models/unet_2d_blocks.py +0 -375
  441. diffusers/models/unet_2d_condition.py +0 -25
  442. diffusers-0.27.1.dist-info/RECORD +0 -399
  443. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/LICENSE +0 -0
  444. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/entry_points.txt +0 -0
  445. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/top_level.txt +0 -0
@@ -16,29 +16,37 @@ from pathlib import Path
16
16
  from typing import Dict, List, Optional, Union
17
17
 
18
18
  import torch
19
+ import torch.nn.functional as F
19
20
  from huggingface_hub.utils import validate_hf_hub_args
20
21
  from safetensors import safe_open
21
22
 
22
- from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT
23
+ from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_state_dict
23
24
  from ..utils import (
25
+ USE_PEFT_BACKEND,
24
26
  _get_model_file,
25
27
  is_accelerate_available,
26
28
  is_torch_version,
27
29
  is_transformers_available,
28
30
  logging,
29
31
  )
32
+ from .unet_loader_utils import _maybe_expand_lora_scales
30
33
 
31
34
 
32
35
  if is_transformers_available():
33
- from transformers import (
34
- CLIPImageProcessor,
35
- CLIPVisionModelWithProjection,
36
- )
36
+ from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, SiglipImageProcessor, SiglipVisionModel
37
+
38
+ from ..models.attention_processor import (
39
+ AttnProcessor,
40
+ AttnProcessor2_0,
41
+ FluxAttnProcessor2_0,
42
+ FluxIPAdapterJointAttnProcessor2_0,
43
+ IPAdapterAttnProcessor,
44
+ IPAdapterAttnProcessor2_0,
45
+ IPAdapterXFormersAttnProcessor,
46
+ JointAttnProcessor2_0,
47
+ SD3IPAdapterJointAttnProcessor2_0,
48
+ )
37
49
 
38
- from ..models.attention_processor import (
39
- IPAdapterAttnProcessor,
40
- IPAdapterAttnProcessor2_0,
41
- )
42
50
 
43
51
  logger = logging.get_logger(__name__)
44
52
 
@@ -67,26 +75,25 @@ class IPAdapterMixin:
67
75
  - A [torch state
68
76
  dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
69
77
  subfolder (`str` or `List[str]`):
70
- The subfolder location of a model file within a larger model repository on the Hub or locally.
71
- If a list is passed, it should have the same length as `weight_name`.
78
+ The subfolder location of a model file within a larger model repository on the Hub or locally. If a
79
+ list is passed, it should have the same length as `weight_name`.
72
80
  weight_name (`str` or `List[str]`):
73
81
  The name of the weight file to load. If a list is passed, it should have the same length as
74
- `weight_name`.
82
+ `subfolder`.
75
83
  image_encoder_folder (`str`, *optional*, defaults to `image_encoder`):
76
84
  The subfolder location of the image encoder within a larger model repository on the Hub or locally.
77
- Pass `None` to not load the image encoder. If the image encoder is located in a folder inside `subfolder`,
78
- you only need to pass the name of the folder that contains image encoder weights, e.g. `image_encoder_folder="image_encoder"`.
79
- If the image encoder is located in a folder other than `subfolder`, you should pass the path to the folder that contains image encoder weights,
80
- for example, `image_encoder_folder="different_subfolder/image_encoder"`.
85
+ Pass `None` to not load the image encoder. If the image encoder is located in a folder inside
86
+ `subfolder`, you only need to pass the name of the folder that contains image encoder weights, e.g.
87
+ `image_encoder_folder="image_encoder"`. If the image encoder is located in a folder other than
88
+ `subfolder`, you should pass the path to the folder that contains image encoder weights, for example,
89
+ `image_encoder_folder="different_subfolder/image_encoder"`.
81
90
  cache_dir (`Union[str, os.PathLike]`, *optional*):
82
91
  Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
83
92
  is not used.
84
93
  force_download (`bool`, *optional*, defaults to `False`):
85
94
  Whether or not to force the (re-)download of the model weights and configuration files, overriding the
86
95
  cached versions if they exist.
87
- resume_download (`bool`, *optional*, defaults to `False`):
88
- Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
89
- incompletely downloaded files are deleted.
96
+
90
97
  proxies (`Dict[str, str]`, *optional*):
91
98
  A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
92
99
  'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
@@ -129,7 +136,6 @@ class IPAdapterMixin:
129
136
  # Load the main state dict first.
130
137
  cache_dir = kwargs.pop("cache_dir", None)
131
138
  force_download = kwargs.pop("force_download", False)
132
- resume_download = kwargs.pop("resume_download", False)
133
139
  proxies = kwargs.pop("proxies", None)
134
140
  local_files_only = kwargs.pop("local_files_only", None)
135
141
  token = kwargs.pop("token", None)
@@ -165,7 +171,6 @@ class IPAdapterMixin:
165
171
  weights_name=weight_name,
166
172
  cache_dir=cache_dir,
167
173
  force_download=force_download,
168
- resume_download=resume_download,
169
174
  proxies=proxies,
170
175
  local_files_only=local_files_only,
171
176
  token=token,
@@ -182,12 +187,12 @@ class IPAdapterMixin:
182
187
  elif key.startswith("ip_adapter."):
183
188
  state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
184
189
  else:
185
- state_dict = torch.load(model_file, map_location="cpu")
190
+ state_dict = load_state_dict(model_file)
186
191
  else:
187
192
  state_dict = pretrained_model_name_or_path_or_dict
188
193
 
189
194
  keys = list(state_dict.keys())
190
- if keys != ["image_proj", "ip_adapter"]:
195
+ if "image_proj" not in keys and "ip_adapter" not in keys:
191
196
  raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
192
197
 
193
198
  state_dicts.append(state_dict)
@@ -206,6 +211,8 @@ class IPAdapterMixin:
206
211
  pretrained_model_name_or_path_or_dict,
207
212
  subfolder=image_encoder_subfolder,
208
213
  low_cpu_mem_usage=low_cpu_mem_usage,
214
+ cache_dir=cache_dir,
215
+ local_files_only=local_files_only,
209
216
  ).to(self.device, dtype=self.dtype)
210
217
  self.register_modules(image_encoder=image_encoder)
211
218
  else:
@@ -220,34 +227,83 @@ class IPAdapterMixin:
220
227
 
221
228
  # create feature extractor if it has not been registered to the pipeline yet
222
229
  if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
223
- feature_extractor = CLIPImageProcessor()
230
+ # FaceID IP adapters don't need the image encoder so it's not present, in this case we default to 224
231
+ default_clip_size = 224
232
+ clip_image_size = (
233
+ self.image_encoder.config.image_size if self.image_encoder is not None else default_clip_size
234
+ )
235
+ feature_extractor = CLIPImageProcessor(size=clip_image_size, crop_size=clip_image_size)
224
236
  self.register_modules(feature_extractor=feature_extractor)
225
237
 
226
238
  # load ip-adapter into unet
227
239
  unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
228
240
  unet._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage)
229
241
 
242
+ extra_loras = unet._load_ip_adapter_loras(state_dicts)
243
+ if extra_loras != {}:
244
+ if not USE_PEFT_BACKEND:
245
+ logger.warning("PEFT backend is required to load these weights.")
246
+ else:
247
+ # apply the IP Adapter Face ID LoRA weights
248
+ peft_config = getattr(unet, "peft_config", {})
249
+ for k, lora in extra_loras.items():
250
+ if f"faceid_{k}" not in peft_config:
251
+ self.load_lora_weights(lora, adapter_name=f"faceid_{k}")
252
+ self.set_adapters([f"faceid_{k}"], adapter_weights=[1.0])
253
+
230
254
  def set_ip_adapter_scale(self, scale):
231
255
  """
232
- Sets the conditioning scale between text and image.
256
+ Set IP-Adapter scales per-transformer block. Input `scale` could be a single config or a list of configs for
257
+ granular control over each IP-Adapter behavior. A config can be a float or a dictionary.
233
258
 
234
259
  Example:
235
260
 
236
261
  ```py
237
- pipeline.set_ip_adapter_scale(0.5)
262
+ # To use original IP-Adapter
263
+ scale = 1.0
264
+ pipeline.set_ip_adapter_scale(scale)
265
+
266
+ # To use style block only
267
+ scale = {
268
+ "up": {"block_0": [0.0, 1.0, 0.0]},
269
+ }
270
+ pipeline.set_ip_adapter_scale(scale)
271
+
272
+ # To use style+layout blocks
273
+ scale = {
274
+ "down": {"block_2": [0.0, 1.0]},
275
+ "up": {"block_0": [0.0, 1.0, 0.0]},
276
+ }
277
+ pipeline.set_ip_adapter_scale(scale)
278
+
279
+ # To use style and layout from 2 reference images
280
+ scales = [{"down": {"block_2": [0.0, 1.0]}}, {"up": {"block_0": [0.0, 1.0, 0.0]}}]
281
+ pipeline.set_ip_adapter_scale(scales)
238
282
  ```
239
283
  """
240
284
  unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
241
- for attn_processor in unet.attn_processors.values():
242
- if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
243
- if not isinstance(scale, list):
244
- scale = [scale] * len(attn_processor.scale)
245
- if len(attn_processor.scale) != len(scale):
285
+ if not isinstance(scale, list):
286
+ scale = [scale]
287
+ scale_configs = _maybe_expand_lora_scales(unet, scale, default_scale=0.0)
288
+
289
+ for attn_name, attn_processor in unet.attn_processors.items():
290
+ if isinstance(
291
+ attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, IPAdapterXFormersAttnProcessor)
292
+ ):
293
+ if len(scale_configs) != len(attn_processor.scale):
246
294
  raise ValueError(
247
- f"`scale` should be a list of same length as the number if ip-adapters "
248
- f"Expected {len(attn_processor.scale)} but got {len(scale)}."
295
+ f"Cannot assign {len(scale_configs)} scale_configs to "
296
+ f"{len(attn_processor.scale)} IP-Adapter."
249
297
  )
250
- attn_processor.scale = scale
298
+ elif len(scale_configs) == 1:
299
+ scale_configs = scale_configs * len(attn_processor.scale)
300
+ for i, scale_config in enumerate(scale_configs):
301
+ if isinstance(scale_config, dict):
302
+ for k, s in scale_config.items():
303
+ if attn_name.startswith(k):
304
+ attn_processor.scale[i] = s
305
+ else:
306
+ attn_processor.scale[i] = scale_config
251
307
 
252
308
  def unload_ip_adapter(self):
253
309
  """
@@ -275,7 +331,541 @@ class IPAdapterMixin:
275
331
 
276
332
  # remove hidden encoder
277
333
  self.unet.encoder_hid_proj = None
278
- self.config.encoder_hid_dim_type = None
334
+ self.unet.config.encoder_hid_dim_type = None
335
+
336
+ # Kolors: restore `encoder_hid_proj` with `text_encoder_hid_proj`
337
+ if hasattr(self.unet, "text_encoder_hid_proj") and self.unet.text_encoder_hid_proj is not None:
338
+ self.unet.encoder_hid_proj = self.unet.text_encoder_hid_proj
339
+ self.unet.text_encoder_hid_proj = None
340
+ self.unet.config.encoder_hid_dim_type = "text_proj"
279
341
 
280
342
  # restore original Unet attention processors layers
281
- self.unet.set_default_attn_processor()
343
+ attn_procs = {}
344
+ for name, value in self.unet.attn_processors.items():
345
+ attn_processor_class = (
346
+ AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnProcessor()
347
+ )
348
+ attn_procs[name] = (
349
+ attn_processor_class
350
+ if isinstance(
351
+ value, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, IPAdapterXFormersAttnProcessor)
352
+ )
353
+ else value.__class__()
354
+ )
355
+ self.unet.set_attn_processor(attn_procs)
356
+
357
+
358
+ class FluxIPAdapterMixin:
359
+ """Mixin for handling Flux IP Adapters."""
360
+
361
+ @validate_hf_hub_args
362
+ def load_ip_adapter(
363
+ self,
364
+ pretrained_model_name_or_path_or_dict: Union[str, List[str], Dict[str, torch.Tensor]],
365
+ weight_name: Union[str, List[str]],
366
+ subfolder: Optional[Union[str, List[str]]] = "",
367
+ image_encoder_pretrained_model_name_or_path: Optional[str] = "image_encoder",
368
+ image_encoder_subfolder: Optional[str] = "",
369
+ image_encoder_dtype: torch.dtype = torch.float16,
370
+ **kwargs,
371
+ ):
372
+ """
373
+ Parameters:
374
+ pretrained_model_name_or_path_or_dict (`str` or `List[str]` or `os.PathLike` or `List[os.PathLike]` or `dict` or `List[dict]`):
375
+ Can be either:
376
+
377
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
378
+ the Hub.
379
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
380
+ with [`ModelMixin.save_pretrained`].
381
+ - A [torch state
382
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
383
+ subfolder (`str` or `List[str]`):
384
+ The subfolder location of a model file within a larger model repository on the Hub or locally. If a
385
+ list is passed, it should have the same length as `weight_name`.
386
+ weight_name (`str` or `List[str]`):
387
+ The name of the weight file to load. If a list is passed, it should have the same length as
388
+ `weight_name`.
389
+ image_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `./image_encoder`):
390
+ Can be either:
391
+
392
+ - A string, the *model id* (for example `openai/clip-vit-large-patch14`) of a pretrained model
393
+ hosted on the Hub.
394
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
395
+ with [`ModelMixin.save_pretrained`].
396
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
397
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
398
+ is not used.
399
+ force_download (`bool`, *optional*, defaults to `False`):
400
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
401
+ cached versions if they exist.
402
+
403
+ proxies (`Dict[str, str]`, *optional*):
404
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
405
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
406
+ local_files_only (`bool`, *optional*, defaults to `False`):
407
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
408
+ won't be downloaded from the Hub.
409
+ token (`str` or *bool*, *optional*):
410
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
411
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
412
+ revision (`str`, *optional*, defaults to `"main"`):
413
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
414
+ allowed by Git.
415
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
416
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
417
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
418
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
419
+ argument to `True` will raise an error.
420
+ """
421
+
422
+ # handle the list inputs for multiple IP Adapters
423
+ if not isinstance(weight_name, list):
424
+ weight_name = [weight_name]
425
+
426
+ if not isinstance(pretrained_model_name_or_path_or_dict, list):
427
+ pretrained_model_name_or_path_or_dict = [pretrained_model_name_or_path_or_dict]
428
+ if len(pretrained_model_name_or_path_or_dict) == 1:
429
+ pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict * len(weight_name)
430
+
431
+ if not isinstance(subfolder, list):
432
+ subfolder = [subfolder]
433
+ if len(subfolder) == 1:
434
+ subfolder = subfolder * len(weight_name)
435
+
436
+ if len(weight_name) != len(pretrained_model_name_or_path_or_dict):
437
+ raise ValueError("`weight_name` and `pretrained_model_name_or_path_or_dict` must have the same length.")
438
+
439
+ if len(weight_name) != len(subfolder):
440
+ raise ValueError("`weight_name` and `subfolder` must have the same length.")
441
+
442
+ # Load the main state dict first.
443
+ cache_dir = kwargs.pop("cache_dir", None)
444
+ force_download = kwargs.pop("force_download", False)
445
+ proxies = kwargs.pop("proxies", None)
446
+ local_files_only = kwargs.pop("local_files_only", None)
447
+ token = kwargs.pop("token", None)
448
+ revision = kwargs.pop("revision", None)
449
+ low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
450
+
451
+ if low_cpu_mem_usage and not is_accelerate_available():
452
+ low_cpu_mem_usage = False
453
+ logger.warning(
454
+ "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
455
+ " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
456
+ " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
457
+ " install accelerate\n```\n."
458
+ )
459
+
460
+ if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
461
+ raise NotImplementedError(
462
+ "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
463
+ " `low_cpu_mem_usage=False`."
464
+ )
465
+
466
+ user_agent = {
467
+ "file_type": "attn_procs_weights",
468
+ "framework": "pytorch",
469
+ }
470
+ state_dicts = []
471
+ for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip(
472
+ pretrained_model_name_or_path_or_dict, weight_name, subfolder
473
+ ):
474
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
475
+ model_file = _get_model_file(
476
+ pretrained_model_name_or_path_or_dict,
477
+ weights_name=weight_name,
478
+ cache_dir=cache_dir,
479
+ force_download=force_download,
480
+ proxies=proxies,
481
+ local_files_only=local_files_only,
482
+ token=token,
483
+ revision=revision,
484
+ subfolder=subfolder,
485
+ user_agent=user_agent,
486
+ )
487
+ if weight_name.endswith(".safetensors"):
488
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
489
+ with safe_open(model_file, framework="pt", device="cpu") as f:
490
+ image_proj_keys = ["ip_adapter_proj_model.", "image_proj."]
491
+ ip_adapter_keys = ["double_blocks.", "ip_adapter."]
492
+ for key in f.keys():
493
+ if any(key.startswith(prefix) for prefix in image_proj_keys):
494
+ diffusers_name = ".".join(key.split(".")[1:])
495
+ state_dict["image_proj"][diffusers_name] = f.get_tensor(key)
496
+ elif any(key.startswith(prefix) for prefix in ip_adapter_keys):
497
+ diffusers_name = (
498
+ ".".join(key.split(".")[1:])
499
+ .replace("ip_adapter_double_stream_k_proj", "to_k_ip")
500
+ .replace("ip_adapter_double_stream_v_proj", "to_v_ip")
501
+ .replace("processor.", "")
502
+ )
503
+ state_dict["ip_adapter"][diffusers_name] = f.get_tensor(key)
504
+ else:
505
+ state_dict = load_state_dict(model_file)
506
+ else:
507
+ state_dict = pretrained_model_name_or_path_or_dict
508
+
509
+ keys = list(state_dict.keys())
510
+ if keys != ["image_proj", "ip_adapter"]:
511
+ raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
512
+
513
+ state_dicts.append(state_dict)
514
+
515
+ # load CLIP image encoder here if it has not been registered to the pipeline yet
516
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
517
+ if image_encoder_pretrained_model_name_or_path is not None:
518
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
519
+ logger.info(f"loading image_encoder from {image_encoder_pretrained_model_name_or_path}")
520
+ image_encoder = (
521
+ CLIPVisionModelWithProjection.from_pretrained(
522
+ image_encoder_pretrained_model_name_or_path,
523
+ subfolder=image_encoder_subfolder,
524
+ low_cpu_mem_usage=low_cpu_mem_usage,
525
+ cache_dir=cache_dir,
526
+ local_files_only=local_files_only,
527
+ )
528
+ .to(self.device, dtype=image_encoder_dtype)
529
+ .eval()
530
+ )
531
+ self.register_modules(image_encoder=image_encoder)
532
+ else:
533
+ raise ValueError(
534
+ "`image_encoder` cannot be loaded because `pretrained_model_name_or_path_or_dict` is a state dict."
535
+ )
536
+ else:
537
+ logger.warning(
538
+ "image_encoder is not loaded since `image_encoder_folder=None` passed. You will not be able to use `ip_adapter_image` when calling the pipeline with IP-Adapter."
539
+ "Use `ip_adapter_image_embeds` to pass pre-generated image embedding instead."
540
+ )
541
+
542
+ # create feature extractor if it has not been registered to the pipeline yet
543
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
544
+ # FaceID IP adapters don't need the image encoder so it's not present, in this case we default to 224
545
+ default_clip_size = 224
546
+ clip_image_size = (
547
+ self.image_encoder.config.image_size if self.image_encoder is not None else default_clip_size
548
+ )
549
+ feature_extractor = CLIPImageProcessor(size=clip_image_size, crop_size=clip_image_size)
550
+ self.register_modules(feature_extractor=feature_extractor)
551
+
552
+ # load ip-adapter into transformer
553
+ self.transformer._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage)
554
+
555
+ def set_ip_adapter_scale(self, scale: Union[float, List[float], List[List[float]]]):
556
+ """
557
+ Set IP-Adapter scales per-transformer block. Input `scale` could be a single config or a list of configs for
558
+ granular control over each IP-Adapter behavior. A config can be a float or a list.
559
+
560
+ `float` is converted to list and repeated for the number of blocks and the number of IP adapters. `List[float]`
561
+ length match the number of blocks, it is repeated for each IP adapter. `List[List[float]]` must match the
562
+ number of IP adapters and each must match the number of blocks.
563
+
564
+ Example:
565
+
566
+ ```py
567
+ # To use original IP-Adapter
568
+ scale = 1.0
569
+ pipeline.set_ip_adapter_scale(scale)
570
+
571
+
572
+ def LinearStrengthModel(start, finish, size):
573
+ return [(start + (finish - start) * (i / (size - 1))) for i in range(size)]
574
+
575
+
576
+ ip_strengths = LinearStrengthModel(0.3, 0.92, 19)
577
+ pipeline.set_ip_adapter_scale(ip_strengths)
578
+ ```
579
+ """
580
+ transformer = self.transformer
581
+ if not isinstance(scale, list):
582
+ scale = [[scale] * transformer.config.num_layers]
583
+ elif isinstance(scale, list) and isinstance(scale[0], int) or isinstance(scale[0], float):
584
+ if len(scale) != transformer.config.num_layers:
585
+ raise ValueError(f"Expected list of {transformer.config.num_layers} scales, got {len(scale)}.")
586
+ scale = [scale]
587
+
588
+ scale_configs = scale
589
+
590
+ key_id = 0
591
+ for attn_name, attn_processor in transformer.attn_processors.items():
592
+ if isinstance(attn_processor, (FluxIPAdapterJointAttnProcessor2_0)):
593
+ if len(scale_configs) != len(attn_processor.scale):
594
+ raise ValueError(
595
+ f"Cannot assign {len(scale_configs)} scale_configs to "
596
+ f"{len(attn_processor.scale)} IP-Adapter."
597
+ )
598
+ elif len(scale_configs) == 1:
599
+ scale_configs = scale_configs * len(attn_processor.scale)
600
+ for i, scale_config in enumerate(scale_configs):
601
+ attn_processor.scale[i] = scale_config[key_id]
602
+ key_id += 1
603
+
604
+ def unload_ip_adapter(self):
605
+ """
606
+ Unloads the IP Adapter weights
607
+
608
+ Examples:
609
+
610
+ ```python
611
+ >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.
612
+ >>> pipeline.unload_ip_adapter()
613
+ >>> ...
614
+ ```
615
+ """
616
+ # remove CLIP image encoder
617
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None:
618
+ self.image_encoder = None
619
+ self.register_to_config(image_encoder=[None, None])
620
+
621
+ # remove feature extractor only when safety_checker is None as safety_checker uses
622
+ # the feature_extractor later
623
+ if not hasattr(self, "safety_checker"):
624
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None:
625
+ self.feature_extractor = None
626
+ self.register_to_config(feature_extractor=[None, None])
627
+
628
+ # remove hidden encoder
629
+ self.transformer.encoder_hid_proj = None
630
+ self.transformer.config.encoder_hid_dim_type = None
631
+
632
+ # restore original Transformer attention processors layers
633
+ attn_procs = {}
634
+ for name, value in self.transformer.attn_processors.items():
635
+ attn_processor_class = FluxAttnProcessor2_0()
636
+ attn_procs[name] = (
637
+ attn_processor_class if isinstance(value, (FluxIPAdapterJointAttnProcessor2_0)) else value.__class__()
638
+ )
639
+ self.transformer.set_attn_processor(attn_procs)
640
+
641
+
642
+ class SD3IPAdapterMixin:
643
+ """Mixin for handling StableDiffusion 3 IP Adapters."""
644
+
645
+ @property
646
+ def is_ip_adapter_active(self) -> bool:
647
+ """Checks if IP-Adapter is loaded and scale > 0.
648
+
649
+ IP-Adapter scale controls the influence of the image prompt versus text prompt. When this value is set to 0,
650
+ the image context is irrelevant.
651
+
652
+ Returns:
653
+ `bool`: True when IP-Adapter is loaded and any layer has scale > 0.
654
+ """
655
+ scales = [
656
+ attn_proc.scale
657
+ for attn_proc in self.transformer.attn_processors.values()
658
+ if isinstance(attn_proc, SD3IPAdapterJointAttnProcessor2_0)
659
+ ]
660
+
661
+ return len(scales) > 0 and any(scale > 0 for scale in scales)
662
+
663
+ @validate_hf_hub_args
664
+ def load_ip_adapter(
665
+ self,
666
+ pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
667
+ weight_name: str = "ip-adapter.safetensors",
668
+ subfolder: Optional[str] = None,
669
+ image_encoder_folder: Optional[str] = "image_encoder",
670
+ **kwargs,
671
+ ) -> None:
672
+ """
673
+ Parameters:
674
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
675
+ Can be either:
676
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
677
+ the Hub.
678
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
679
+ with [`ModelMixin.save_pretrained`].
680
+ - A [torch state
681
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
682
+ weight_name (`str`, defaults to "ip-adapter.safetensors"):
683
+ The name of the weight file to load. If a list is passed, it should have the same length as
684
+ `subfolder`.
685
+ subfolder (`str`, *optional*):
686
+ The subfolder location of a model file within a larger model repository on the Hub or locally. If a
687
+ list is passed, it should have the same length as `weight_name`.
688
+ image_encoder_folder (`str`, *optional*, defaults to `image_encoder`):
689
+ The subfolder location of the image encoder within a larger model repository on the Hub or locally.
690
+ Pass `None` to not load the image encoder. If the image encoder is located in a folder inside
691
+ `subfolder`, you only need to pass the name of the folder that contains image encoder weights, e.g.
692
+ `image_encoder_folder="image_encoder"`. If the image encoder is located in a folder other than
693
+ `subfolder`, you should pass the path to the folder that contains image encoder weights, for example,
694
+ `image_encoder_folder="different_subfolder/image_encoder"`.
695
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
696
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
697
+ is not used.
698
+ force_download (`bool`, *optional*, defaults to `False`):
699
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
700
+ cached versions if they exist.
701
+ proxies (`Dict[str, str]`, *optional*):
702
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
703
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
704
+ local_files_only (`bool`, *optional*, defaults to `False`):
705
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
706
+ won't be downloaded from the Hub.
707
+ token (`str` or *bool*, *optional*):
708
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
709
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
710
+ revision (`str`, *optional*, defaults to `"main"`):
711
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
712
+ allowed by Git.
713
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
714
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
715
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
716
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
717
+ argument to `True` will raise an error.
718
+ """
719
+ # Load the main state dict first
720
+ cache_dir = kwargs.pop("cache_dir", None)
721
+ force_download = kwargs.pop("force_download", False)
722
+ proxies = kwargs.pop("proxies", None)
723
+ local_files_only = kwargs.pop("local_files_only", None)
724
+ token = kwargs.pop("token", None)
725
+ revision = kwargs.pop("revision", None)
726
+ low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
727
+
728
+ if low_cpu_mem_usage and not is_accelerate_available():
729
+ low_cpu_mem_usage = False
730
+ logger.warning(
731
+ "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
732
+ " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
733
+ " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
734
+ " install accelerate\n```\n."
735
+ )
736
+
737
+ if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
738
+ raise NotImplementedError(
739
+ "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
740
+ " `low_cpu_mem_usage=False`."
741
+ )
742
+
743
+ user_agent = {
744
+ "file_type": "attn_procs_weights",
745
+ "framework": "pytorch",
746
+ }
747
+
748
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
749
+ model_file = _get_model_file(
750
+ pretrained_model_name_or_path_or_dict,
751
+ weights_name=weight_name,
752
+ cache_dir=cache_dir,
753
+ force_download=force_download,
754
+ proxies=proxies,
755
+ local_files_only=local_files_only,
756
+ token=token,
757
+ revision=revision,
758
+ subfolder=subfolder,
759
+ user_agent=user_agent,
760
+ )
761
+ if weight_name.endswith(".safetensors"):
762
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
763
+ with safe_open(model_file, framework="pt", device="cpu") as f:
764
+ for key in f.keys():
765
+ if key.startswith("image_proj."):
766
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
767
+ elif key.startswith("ip_adapter."):
768
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
769
+ else:
770
+ state_dict = load_state_dict(model_file)
771
+ else:
772
+ state_dict = pretrained_model_name_or_path_or_dict
773
+
774
+ keys = list(state_dict.keys())
775
+ if "image_proj" not in keys and "ip_adapter" not in keys:
776
+ raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
777
+
778
+ # Load image_encoder and feature_extractor here if they haven't been registered to the pipeline yet
779
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
780
+ if image_encoder_folder is not None:
781
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
782
+ logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
783
+ if image_encoder_folder.count("/") == 0:
784
+ image_encoder_subfolder = Path(subfolder, image_encoder_folder).as_posix()
785
+ else:
786
+ image_encoder_subfolder = Path(image_encoder_folder).as_posix()
787
+
788
+ # Commons args for loading image encoder and image processor
789
+ kwargs = {
790
+ "low_cpu_mem_usage": low_cpu_mem_usage,
791
+ "cache_dir": cache_dir,
792
+ "local_files_only": local_files_only,
793
+ }
794
+
795
+ self.register_modules(
796
+ feature_extractor=SiglipImageProcessor.from_pretrained(image_encoder_subfolder, **kwargs).to(
797
+ self.device, dtype=self.dtype
798
+ ),
799
+ image_encoder=SiglipVisionModel.from_pretrained(image_encoder_subfolder, **kwargs).to(
800
+ self.device, dtype=self.dtype
801
+ ),
802
+ )
803
+ else:
804
+ raise ValueError(
805
+ "`image_encoder` cannot be loaded because `pretrained_model_name_or_path_or_dict` is a state dict."
806
+ )
807
+ else:
808
+ logger.warning(
809
+ "image_encoder is not loaded since `image_encoder_folder=None` passed. You will not be able to use `ip_adapter_image` when calling the pipeline with IP-Adapter."
810
+ "Use `ip_adapter_image_embeds` to pass pre-generated image embedding instead."
811
+ )
812
+
813
+ # Load IP-Adapter into transformer
814
+ self.transformer._load_ip_adapter_weights(state_dict, low_cpu_mem_usage=low_cpu_mem_usage)
815
+
816
+ def set_ip_adapter_scale(self, scale: float) -> None:
817
+ """
818
+ Set IP-Adapter scale, which controls image prompt conditioning. A value of 1.0 means the model is only
819
+ conditioned on the image prompt, and 0.0 only conditioned by the text prompt. Lowering this value encourages
820
+ the model to produce more diverse images, but they may not be as aligned with the image prompt.
821
+
822
+ Example:
823
+
824
+ ```python
825
+ >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.
826
+ >>> pipeline.set_ip_adapter_scale(0.6)
827
+ >>> ...
828
+ ```
829
+
830
+ Args:
831
+ scale (float):
832
+ IP-Adapter scale to be set.
833
+
834
+ """
835
+ for attn_processor in self.transformer.attn_processors.values():
836
+ if isinstance(attn_processor, SD3IPAdapterJointAttnProcessor2_0):
837
+ attn_processor.scale = scale
838
+
839
+ def unload_ip_adapter(self) -> None:
840
+ """
841
+ Unloads the IP Adapter weights.
842
+
843
+ Example:
844
+
845
+ ```python
846
+ >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.
847
+ >>> pipeline.unload_ip_adapter()
848
+ >>> ...
849
+ ```
850
+ """
851
+ # Remove image encoder
852
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None:
853
+ self.image_encoder = None
854
+ self.register_to_config(image_encoder=None)
855
+
856
+ # Remove feature extractor
857
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None:
858
+ self.feature_extractor = None
859
+ self.register_to_config(feature_extractor=None)
860
+
861
+ # Remove image projection
862
+ self.transformer.image_proj = None
863
+
864
+ # Restore original attention processors layers
865
+ attn_procs = {
866
+ name: (
867
+ JointAttnProcessor2_0() if isinstance(value, SD3IPAdapterJointAttnProcessor2_0) else value.__class__()
868
+ )
869
+ for name, value in self.transformer.attn_processors.items()
870
+ }
871
+ self.transformer.set_attn_processor(attn_procs)