diffusers 0.27.1__py3-none-any.whl → 0.32.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (445) hide show
  1. diffusers/__init__.py +233 -6
  2. diffusers/callbacks.py +209 -0
  3. diffusers/commands/env.py +102 -6
  4. diffusers/configuration_utils.py +45 -16
  5. diffusers/dependency_versions_table.py +4 -3
  6. diffusers/image_processor.py +434 -110
  7. diffusers/loaders/__init__.py +42 -9
  8. diffusers/loaders/ip_adapter.py +626 -36
  9. diffusers/loaders/lora_base.py +900 -0
  10. diffusers/loaders/lora_conversion_utils.py +991 -125
  11. diffusers/loaders/lora_pipeline.py +3812 -0
  12. diffusers/loaders/peft.py +571 -7
  13. diffusers/loaders/single_file.py +405 -173
  14. diffusers/loaders/single_file_model.py +385 -0
  15. diffusers/loaders/single_file_utils.py +1783 -713
  16. diffusers/loaders/textual_inversion.py +41 -23
  17. diffusers/loaders/transformer_flux.py +181 -0
  18. diffusers/loaders/transformer_sd3.py +89 -0
  19. diffusers/loaders/unet.py +464 -540
  20. diffusers/loaders/unet_loader_utils.py +163 -0
  21. diffusers/models/__init__.py +76 -7
  22. diffusers/models/activations.py +65 -10
  23. diffusers/models/adapter.py +53 -53
  24. diffusers/models/attention.py +605 -18
  25. diffusers/models/attention_flax.py +1 -1
  26. diffusers/models/attention_processor.py +4304 -687
  27. diffusers/models/autoencoders/__init__.py +8 -0
  28. diffusers/models/autoencoders/autoencoder_asym_kl.py +15 -17
  29. diffusers/models/autoencoders/autoencoder_dc.py +620 -0
  30. diffusers/models/autoencoders/autoencoder_kl.py +110 -28
  31. diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
  32. diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1482 -0
  33. diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
  34. diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
  35. diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
  36. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +19 -24
  37. diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
  38. diffusers/models/autoencoders/autoencoder_tiny.py +21 -18
  39. diffusers/models/autoencoders/consistency_decoder_vae.py +45 -20
  40. diffusers/models/autoencoders/vae.py +41 -29
  41. diffusers/models/autoencoders/vq_model.py +182 -0
  42. diffusers/models/controlnet.py +47 -800
  43. diffusers/models/controlnet_flux.py +70 -0
  44. diffusers/models/controlnet_sd3.py +68 -0
  45. diffusers/models/controlnet_sparsectrl.py +116 -0
  46. diffusers/models/controlnets/__init__.py +23 -0
  47. diffusers/models/controlnets/controlnet.py +872 -0
  48. diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +9 -9
  49. diffusers/models/controlnets/controlnet_flux.py +536 -0
  50. diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
  51. diffusers/models/controlnets/controlnet_sd3.py +489 -0
  52. diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
  53. diffusers/models/controlnets/controlnet_union.py +832 -0
  54. diffusers/models/controlnets/controlnet_xs.py +1946 -0
  55. diffusers/models/controlnets/multicontrolnet.py +183 -0
  56. diffusers/models/downsampling.py +85 -18
  57. diffusers/models/embeddings.py +1856 -158
  58. diffusers/models/embeddings_flax.py +23 -9
  59. diffusers/models/model_loading_utils.py +480 -0
  60. diffusers/models/modeling_flax_pytorch_utils.py +2 -1
  61. diffusers/models/modeling_flax_utils.py +2 -7
  62. diffusers/models/modeling_outputs.py +14 -0
  63. diffusers/models/modeling_pytorch_flax_utils.py +1 -1
  64. diffusers/models/modeling_utils.py +611 -146
  65. diffusers/models/normalization.py +361 -20
  66. diffusers/models/resnet.py +18 -23
  67. diffusers/models/transformers/__init__.py +16 -0
  68. diffusers/models/transformers/auraflow_transformer_2d.py +544 -0
  69. diffusers/models/transformers/cogvideox_transformer_3d.py +542 -0
  70. diffusers/models/transformers/dit_transformer_2d.py +240 -0
  71. diffusers/models/transformers/dual_transformer_2d.py +9 -8
  72. diffusers/models/transformers/hunyuan_transformer_2d.py +578 -0
  73. diffusers/models/transformers/latte_transformer_3d.py +327 -0
  74. diffusers/models/transformers/lumina_nextdit2d.py +340 -0
  75. diffusers/models/transformers/pixart_transformer_2d.py +445 -0
  76. diffusers/models/transformers/prior_transformer.py +13 -13
  77. diffusers/models/transformers/sana_transformer.py +488 -0
  78. diffusers/models/transformers/stable_audio_transformer.py +458 -0
  79. diffusers/models/transformers/t5_film_transformer.py +17 -19
  80. diffusers/models/transformers/transformer_2d.py +297 -187
  81. diffusers/models/transformers/transformer_allegro.py +422 -0
  82. diffusers/models/transformers/transformer_cogview3plus.py +386 -0
  83. diffusers/models/transformers/transformer_flux.py +593 -0
  84. diffusers/models/transformers/transformer_hunyuan_video.py +791 -0
  85. diffusers/models/transformers/transformer_ltx.py +469 -0
  86. diffusers/models/transformers/transformer_mochi.py +499 -0
  87. diffusers/models/transformers/transformer_sd3.py +461 -0
  88. diffusers/models/transformers/transformer_temporal.py +21 -19
  89. diffusers/models/unets/unet_1d.py +8 -8
  90. diffusers/models/unets/unet_1d_blocks.py +31 -31
  91. diffusers/models/unets/unet_2d.py +17 -10
  92. diffusers/models/unets/unet_2d_blocks.py +225 -149
  93. diffusers/models/unets/unet_2d_condition.py +41 -40
  94. diffusers/models/unets/unet_2d_condition_flax.py +6 -5
  95. diffusers/models/unets/unet_3d_blocks.py +192 -1057
  96. diffusers/models/unets/unet_3d_condition.py +22 -27
  97. diffusers/models/unets/unet_i2vgen_xl.py +22 -18
  98. diffusers/models/unets/unet_kandinsky3.py +2 -2
  99. diffusers/models/unets/unet_motion_model.py +1413 -89
  100. diffusers/models/unets/unet_spatio_temporal_condition.py +40 -16
  101. diffusers/models/unets/unet_stable_cascade.py +19 -18
  102. diffusers/models/unets/uvit_2d.py +2 -2
  103. diffusers/models/upsampling.py +95 -26
  104. diffusers/models/vq_model.py +12 -164
  105. diffusers/optimization.py +1 -1
  106. diffusers/pipelines/__init__.py +202 -3
  107. diffusers/pipelines/allegro/__init__.py +48 -0
  108. diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
  109. diffusers/pipelines/allegro/pipeline_output.py +23 -0
  110. diffusers/pipelines/amused/pipeline_amused.py +12 -12
  111. diffusers/pipelines/amused/pipeline_amused_img2img.py +14 -12
  112. diffusers/pipelines/amused/pipeline_amused_inpaint.py +13 -11
  113. diffusers/pipelines/animatediff/__init__.py +8 -0
  114. diffusers/pipelines/animatediff/pipeline_animatediff.py +122 -109
  115. diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1106 -0
  116. diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +1288 -0
  117. diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1010 -0
  118. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +236 -180
  119. diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
  120. diffusers/pipelines/animatediff/pipeline_output.py +3 -2
  121. diffusers/pipelines/audioldm/pipeline_audioldm.py +14 -14
  122. diffusers/pipelines/audioldm2/modeling_audioldm2.py +58 -39
  123. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +121 -36
  124. diffusers/pipelines/aura_flow/__init__.py +48 -0
  125. diffusers/pipelines/aura_flow/pipeline_aura_flow.py +584 -0
  126. diffusers/pipelines/auto_pipeline.py +196 -28
  127. diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
  128. diffusers/pipelines/blip_diffusion/modeling_blip2.py +6 -6
  129. diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +1 -1
  130. diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
  131. diffusers/pipelines/cogvideo/__init__.py +54 -0
  132. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +772 -0
  133. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
  134. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +885 -0
  135. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +851 -0
  136. diffusers/pipelines/cogvideo/pipeline_output.py +20 -0
  137. diffusers/pipelines/cogview3/__init__.py +47 -0
  138. diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
  139. diffusers/pipelines/cogview3/pipeline_output.py +21 -0
  140. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +6 -6
  141. diffusers/pipelines/controlnet/__init__.py +86 -80
  142. diffusers/pipelines/controlnet/multicontrolnet.py +7 -182
  143. diffusers/pipelines/controlnet/pipeline_controlnet.py +134 -87
  144. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
  145. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +93 -77
  146. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +88 -197
  147. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +136 -90
  148. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +176 -80
  149. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +125 -89
  150. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
  151. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
  152. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
  153. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
  154. diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
  155. diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1060 -0
  156. diffusers/pipelines/controlnet_sd3/__init__.py +57 -0
  157. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1133 -0
  158. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
  159. diffusers/pipelines/controlnet_xs/__init__.py +68 -0
  160. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
  161. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
  162. diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
  163. diffusers/pipelines/deepfloyd_if/pipeline_if.py +16 -30
  164. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +20 -35
  165. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +23 -41
  166. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +22 -38
  167. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +25 -41
  168. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +19 -34
  169. diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
  170. diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
  171. diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +11 -11
  172. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +70 -30
  173. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +48 -25
  174. diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +2 -2
  175. diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +7 -7
  176. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +21 -20
  177. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +27 -29
  178. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +33 -27
  179. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +33 -23
  180. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +36 -30
  181. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +102 -69
  182. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +13 -13
  183. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +10 -5
  184. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +11 -6
  185. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +10 -5
  186. diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +5 -5
  187. diffusers/pipelines/dit/pipeline_dit.py +7 -4
  188. diffusers/pipelines/flux/__init__.py +69 -0
  189. diffusers/pipelines/flux/modeling_flux.py +47 -0
  190. diffusers/pipelines/flux/pipeline_flux.py +957 -0
  191. diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
  192. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
  193. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
  194. diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
  195. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
  196. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
  197. diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
  198. diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
  199. diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
  200. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
  201. diffusers/pipelines/flux/pipeline_output.py +37 -0
  202. diffusers/pipelines/free_init_utils.py +41 -38
  203. diffusers/pipelines/free_noise_utils.py +596 -0
  204. diffusers/pipelines/hunyuan_video/__init__.py +48 -0
  205. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
  206. diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
  207. diffusers/pipelines/hunyuandit/__init__.py +48 -0
  208. diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +916 -0
  209. diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +33 -48
  210. diffusers/pipelines/kandinsky/pipeline_kandinsky.py +8 -8
  211. diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +32 -29
  212. diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +11 -11
  213. diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +12 -12
  214. diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +10 -10
  215. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
  216. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +34 -31
  217. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +10 -10
  218. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +10 -10
  219. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +6 -6
  220. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +8 -8
  221. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +7 -7
  222. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +6 -6
  223. diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +3 -3
  224. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +22 -35
  225. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +26 -37
  226. diffusers/pipelines/kolors/__init__.py +54 -0
  227. diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
  228. diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1250 -0
  229. diffusers/pipelines/kolors/pipeline_output.py +21 -0
  230. diffusers/pipelines/kolors/text_encoder.py +889 -0
  231. diffusers/pipelines/kolors/tokenizer.py +338 -0
  232. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +82 -62
  233. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +77 -60
  234. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +12 -12
  235. diffusers/pipelines/latte/__init__.py +48 -0
  236. diffusers/pipelines/latte/pipeline_latte.py +881 -0
  237. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +80 -74
  238. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +85 -76
  239. diffusers/pipelines/ledits_pp/pipeline_output.py +2 -2
  240. diffusers/pipelines/ltx/__init__.py +50 -0
  241. diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
  242. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
  243. diffusers/pipelines/ltx/pipeline_output.py +20 -0
  244. diffusers/pipelines/lumina/__init__.py +48 -0
  245. diffusers/pipelines/lumina/pipeline_lumina.py +890 -0
  246. diffusers/pipelines/marigold/__init__.py +50 -0
  247. diffusers/pipelines/marigold/marigold_image_processing.py +576 -0
  248. diffusers/pipelines/marigold/pipeline_marigold_depth.py +813 -0
  249. diffusers/pipelines/marigold/pipeline_marigold_normals.py +690 -0
  250. diffusers/pipelines/mochi/__init__.py +48 -0
  251. diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
  252. diffusers/pipelines/mochi/pipeline_output.py +20 -0
  253. diffusers/pipelines/musicldm/pipeline_musicldm.py +14 -14
  254. diffusers/pipelines/pag/__init__.py +80 -0
  255. diffusers/pipelines/pag/pag_utils.py +243 -0
  256. diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1328 -0
  257. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
  258. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1610 -0
  259. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
  260. diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +969 -0
  261. diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
  262. diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +865 -0
  263. diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
  264. diffusers/pipelines/pag/pipeline_pag_sd.py +1062 -0
  265. diffusers/pipelines/pag/pipeline_pag_sd_3.py +994 -0
  266. diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
  267. diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +866 -0
  268. diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
  269. diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
  270. diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1345 -0
  271. diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1544 -0
  272. diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1776 -0
  273. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +17 -12
  274. diffusers/pipelines/pia/pipeline_pia.py +74 -164
  275. diffusers/pipelines/pipeline_flax_utils.py +5 -10
  276. diffusers/pipelines/pipeline_loading_utils.py +515 -53
  277. diffusers/pipelines/pipeline_utils.py +411 -222
  278. diffusers/pipelines/pixart_alpha/__init__.py +8 -1
  279. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +76 -93
  280. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +873 -0
  281. diffusers/pipelines/sana/__init__.py +47 -0
  282. diffusers/pipelines/sana/pipeline_output.py +21 -0
  283. diffusers/pipelines/sana/pipeline_sana.py +884 -0
  284. diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +27 -23
  285. diffusers/pipelines/shap_e/pipeline_shap_e.py +3 -3
  286. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +14 -14
  287. diffusers/pipelines/shap_e/renderer.py +1 -1
  288. diffusers/pipelines/stable_audio/__init__.py +50 -0
  289. diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
  290. diffusers/pipelines/stable_audio/pipeline_stable_audio.py +756 -0
  291. diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +71 -25
  292. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +23 -19
  293. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +35 -34
  294. diffusers/pipelines/stable_diffusion/__init__.py +0 -1
  295. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +20 -11
  296. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
  297. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
  298. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +6 -6
  299. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +145 -79
  300. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +43 -28
  301. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +13 -8
  302. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +100 -68
  303. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +109 -201
  304. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +131 -32
  305. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +247 -87
  306. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +30 -29
  307. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +35 -27
  308. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +49 -42
  309. diffusers/pipelines/stable_diffusion/safety_checker.py +2 -1
  310. diffusers/pipelines/stable_diffusion_3/__init__.py +54 -0
  311. diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
  312. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +1140 -0
  313. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +1036 -0
  314. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1250 -0
  315. diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +29 -20
  316. diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +59 -58
  317. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +31 -25
  318. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +38 -22
  319. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -24
  320. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +24 -23
  321. diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +107 -67
  322. diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +316 -69
  323. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +10 -5
  324. diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
  325. diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +98 -30
  326. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +121 -83
  327. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +161 -105
  328. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +142 -218
  329. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +45 -29
  330. diffusers/pipelines/stable_diffusion_xl/watermark.py +9 -3
  331. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +110 -57
  332. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +69 -39
  333. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +105 -74
  334. diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +3 -2
  335. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +29 -49
  336. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +32 -93
  337. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +37 -25
  338. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +54 -40
  339. diffusers/pipelines/unclip/pipeline_unclip.py +6 -6
  340. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +6 -6
  341. diffusers/pipelines/unidiffuser/modeling_text_decoder.py +1 -1
  342. diffusers/pipelines/unidiffuser/modeling_uvit.py +12 -12
  343. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +29 -28
  344. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +5 -5
  345. diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +5 -10
  346. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +6 -8
  347. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +4 -4
  348. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +12 -12
  349. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +15 -14
  350. diffusers/{models/dual_transformer_2d.py → quantizers/__init__.py} +2 -6
  351. diffusers/quantizers/auto.py +139 -0
  352. diffusers/quantizers/base.py +233 -0
  353. diffusers/quantizers/bitsandbytes/__init__.py +2 -0
  354. diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
  355. diffusers/quantizers/bitsandbytes/utils.py +306 -0
  356. diffusers/quantizers/gguf/__init__.py +1 -0
  357. diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
  358. diffusers/quantizers/gguf/utils.py +456 -0
  359. diffusers/quantizers/quantization_config.py +669 -0
  360. diffusers/quantizers/torchao/__init__.py +15 -0
  361. diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
  362. diffusers/schedulers/__init__.py +12 -2
  363. diffusers/schedulers/deprecated/__init__.py +1 -1
  364. diffusers/schedulers/deprecated/scheduling_karras_ve.py +25 -25
  365. diffusers/schedulers/scheduling_amused.py +5 -5
  366. diffusers/schedulers/scheduling_consistency_decoder.py +11 -11
  367. diffusers/schedulers/scheduling_consistency_models.py +23 -25
  368. diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
  369. diffusers/schedulers/scheduling_ddim.py +27 -26
  370. diffusers/schedulers/scheduling_ddim_cogvideox.py +452 -0
  371. diffusers/schedulers/scheduling_ddim_flax.py +2 -1
  372. diffusers/schedulers/scheduling_ddim_inverse.py +16 -16
  373. diffusers/schedulers/scheduling_ddim_parallel.py +32 -31
  374. diffusers/schedulers/scheduling_ddpm.py +27 -30
  375. diffusers/schedulers/scheduling_ddpm_flax.py +7 -3
  376. diffusers/schedulers/scheduling_ddpm_parallel.py +33 -36
  377. diffusers/schedulers/scheduling_ddpm_wuerstchen.py +14 -14
  378. diffusers/schedulers/scheduling_deis_multistep.py +150 -50
  379. diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
  380. diffusers/schedulers/scheduling_dpmsolver_multistep.py +221 -84
  381. diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +2 -2
  382. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +158 -52
  383. diffusers/schedulers/scheduling_dpmsolver_sde.py +153 -34
  384. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +275 -86
  385. diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +81 -57
  386. diffusers/schedulers/scheduling_edm_euler.py +62 -39
  387. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +30 -29
  388. diffusers/schedulers/scheduling_euler_discrete.py +255 -74
  389. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +458 -0
  390. diffusers/schedulers/scheduling_flow_match_heun_discrete.py +320 -0
  391. diffusers/schedulers/scheduling_heun_discrete.py +174 -46
  392. diffusers/schedulers/scheduling_ipndm.py +9 -9
  393. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +138 -29
  394. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +132 -26
  395. diffusers/schedulers/scheduling_karras_ve_flax.py +6 -6
  396. diffusers/schedulers/scheduling_lcm.py +23 -29
  397. diffusers/schedulers/scheduling_lms_discrete.py +105 -28
  398. diffusers/schedulers/scheduling_pndm.py +20 -20
  399. diffusers/schedulers/scheduling_repaint.py +21 -21
  400. diffusers/schedulers/scheduling_sasolver.py +157 -60
  401. diffusers/schedulers/scheduling_sde_ve.py +19 -19
  402. diffusers/schedulers/scheduling_tcd.py +41 -36
  403. diffusers/schedulers/scheduling_unclip.py +19 -16
  404. diffusers/schedulers/scheduling_unipc_multistep.py +243 -47
  405. diffusers/schedulers/scheduling_utils.py +12 -5
  406. diffusers/schedulers/scheduling_utils_flax.py +1 -3
  407. diffusers/schedulers/scheduling_vq_diffusion.py +10 -10
  408. diffusers/training_utils.py +214 -30
  409. diffusers/utils/__init__.py +17 -1
  410. diffusers/utils/constants.py +3 -0
  411. diffusers/utils/doc_utils.py +1 -0
  412. diffusers/utils/dummy_pt_objects.py +592 -7
  413. diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
  414. diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
  415. diffusers/utils/dummy_torch_and_transformers_objects.py +1001 -71
  416. diffusers/utils/dynamic_modules_utils.py +34 -29
  417. diffusers/utils/export_utils.py +50 -6
  418. diffusers/utils/hub_utils.py +131 -17
  419. diffusers/utils/import_utils.py +210 -8
  420. diffusers/utils/loading_utils.py +118 -5
  421. diffusers/utils/logging.py +4 -2
  422. diffusers/utils/peft_utils.py +37 -7
  423. diffusers/utils/state_dict_utils.py +13 -2
  424. diffusers/utils/testing_utils.py +193 -11
  425. diffusers/utils/torch_utils.py +4 -0
  426. diffusers/video_processor.py +113 -0
  427. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/METADATA +82 -91
  428. diffusers-0.32.2.dist-info/RECORD +550 -0
  429. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/WHEEL +1 -1
  430. diffusers/loaders/autoencoder.py +0 -146
  431. diffusers/loaders/controlnet.py +0 -136
  432. diffusers/loaders/lora.py +0 -1349
  433. diffusers/models/prior_transformer.py +0 -12
  434. diffusers/models/t5_film_transformer.py +0 -70
  435. diffusers/models/transformer_2d.py +0 -25
  436. diffusers/models/transformer_temporal.py +0 -34
  437. diffusers/models/unet_1d.py +0 -26
  438. diffusers/models/unet_1d_blocks.py +0 -203
  439. diffusers/models/unet_2d.py +0 -27
  440. diffusers/models/unet_2d_blocks.py +0 -375
  441. diffusers/models/unet_2d_condition.py +0 -25
  442. diffusers-0.27.1.dist-info/RECORD +0 -399
  443. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/LICENSE +0 -0
  444. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/entry_points.txt +0 -0
  445. {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1288 @@
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ from transformers import (
20
+ CLIPImageProcessor,
21
+ CLIPTextModel,
22
+ CLIPTextModelWithProjection,
23
+ CLIPTokenizer,
24
+ CLIPVisionModelWithProjection,
25
+ )
26
+
27
+ from ...image_processor import PipelineImageInput
28
+ from ...loaders import (
29
+ FromSingleFileMixin,
30
+ IPAdapterMixin,
31
+ StableDiffusionXLLoraLoaderMixin,
32
+ TextualInversionLoaderMixin,
33
+ )
34
+ from ...models import AutoencoderKL, ImageProjection, MotionAdapter, UNet2DConditionModel, UNetMotionModel
35
+ from ...models.attention_processor import (
36
+ AttnProcessor2_0,
37
+ FusedAttnProcessor2_0,
38
+ XFormersAttnProcessor,
39
+ )
40
+ from ...models.lora import adjust_lora_scale_text_encoder
41
+ from ...schedulers import (
42
+ DDIMScheduler,
43
+ DPMSolverMultistepScheduler,
44
+ EulerAncestralDiscreteScheduler,
45
+ EulerDiscreteScheduler,
46
+ LMSDiscreteScheduler,
47
+ PNDMScheduler,
48
+ )
49
+ from ...utils import (
50
+ USE_PEFT_BACKEND,
51
+ logging,
52
+ replace_example_docstring,
53
+ scale_lora_layers,
54
+ unscale_lora_layers,
55
+ )
56
+ from ...utils.torch_utils import randn_tensor
57
+ from ...video_processor import VideoProcessor
58
+ from ..free_init_utils import FreeInitMixin
59
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
60
+ from .pipeline_output import AnimateDiffPipelineOutput
61
+
62
+
63
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
64
+
65
+ EXAMPLE_DOC_STRING = """
66
+ Examples:
67
+ ```py
68
+ >>> import torch
69
+ >>> from diffusers.models import MotionAdapter
70
+ >>> from diffusers import AnimateDiffSDXLPipeline, DDIMScheduler
71
+ >>> from diffusers.utils import export_to_gif
72
+
73
+ >>> adapter = MotionAdapter.from_pretrained(
74
+ ... "a-r-r-o-w/animatediff-motion-adapter-sdxl-beta", torch_dtype=torch.float16
75
+ ... )
76
+
77
+ >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0"
78
+ >>> scheduler = DDIMScheduler.from_pretrained(
79
+ ... model_id,
80
+ ... subfolder="scheduler",
81
+ ... clip_sample=False,
82
+ ... timestep_spacing="linspace",
83
+ ... beta_schedule="linear",
84
+ ... steps_offset=1,
85
+ ... )
86
+ >>> pipe = AnimateDiffSDXLPipeline.from_pretrained(
87
+ ... model_id,
88
+ ... motion_adapter=adapter,
89
+ ... scheduler=scheduler,
90
+ ... torch_dtype=torch.float16,
91
+ ... variant="fp16",
92
+ ... ).to("cuda")
93
+
94
+ >>> # enable memory savings
95
+ >>> pipe.enable_vae_slicing()
96
+ >>> pipe.enable_vae_tiling()
97
+
98
+ >>> output = pipe(
99
+ ... prompt="a panda surfing in the ocean, realistic, high quality",
100
+ ... negative_prompt="low quality, worst quality",
101
+ ... num_inference_steps=20,
102
+ ... guidance_scale=8,
103
+ ... width=1024,
104
+ ... height=1024,
105
+ ... num_frames=16,
106
+ ... )
107
+
108
+ >>> frames = output.frames[0]
109
+ >>> export_to_gif(frames, "animation.gif")
110
+ ```
111
+ """
112
+
113
+
114
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
115
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
116
+ r"""
117
+ Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on
118
+ Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are
119
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf).
120
+
121
+ Args:
122
+ noise_cfg (`torch.Tensor`):
123
+ The predicted noise tensor for the guided diffusion process.
124
+ noise_pred_text (`torch.Tensor`):
125
+ The predicted noise tensor for the text-guided diffusion process.
126
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
127
+ A rescale factor applied to the noise predictions.
128
+
129
+ Returns:
130
+ noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor.
131
+ """
132
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
133
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
134
+ # rescale the results from guidance (fixes overexposure)
135
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
136
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
137
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
138
+ return noise_cfg
139
+
140
+
141
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
142
+ def retrieve_timesteps(
143
+ scheduler,
144
+ num_inference_steps: Optional[int] = None,
145
+ device: Optional[Union[str, torch.device]] = None,
146
+ timesteps: Optional[List[int]] = None,
147
+ sigmas: Optional[List[float]] = None,
148
+ **kwargs,
149
+ ):
150
+ r"""
151
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
152
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
153
+
154
+ Args:
155
+ scheduler (`SchedulerMixin`):
156
+ The scheduler to get timesteps from.
157
+ num_inference_steps (`int`):
158
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
159
+ must be `None`.
160
+ device (`str` or `torch.device`, *optional*):
161
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
162
+ timesteps (`List[int]`, *optional*):
163
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
164
+ `num_inference_steps` and `sigmas` must be `None`.
165
+ sigmas (`List[float]`, *optional*):
166
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
167
+ `num_inference_steps` and `timesteps` must be `None`.
168
+
169
+ Returns:
170
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
171
+ second element is the number of inference steps.
172
+ """
173
+ if timesteps is not None and sigmas is not None:
174
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
175
+ if timesteps is not None:
176
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
177
+ if not accepts_timesteps:
178
+ raise ValueError(
179
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
180
+ f" timestep schedules. Please check whether you are using the correct scheduler."
181
+ )
182
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
183
+ timesteps = scheduler.timesteps
184
+ num_inference_steps = len(timesteps)
185
+ elif sigmas is not None:
186
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
187
+ if not accept_sigmas:
188
+ raise ValueError(
189
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
190
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
191
+ )
192
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
193
+ timesteps = scheduler.timesteps
194
+ num_inference_steps = len(timesteps)
195
+ else:
196
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
197
+ timesteps = scheduler.timesteps
198
+ return timesteps, num_inference_steps
199
+
200
+
201
+ class AnimateDiffSDXLPipeline(
202
+ DiffusionPipeline,
203
+ StableDiffusionMixin,
204
+ FromSingleFileMixin,
205
+ StableDiffusionXLLoraLoaderMixin,
206
+ TextualInversionLoaderMixin,
207
+ IPAdapterMixin,
208
+ FreeInitMixin,
209
+ ):
210
+ r"""
211
+ Pipeline for text-to-video generation using Stable Diffusion XL.
212
+
213
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
214
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
215
+
216
+ The pipeline also inherits the following loading methods:
217
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
218
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
219
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
220
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
221
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
222
+
223
+ Args:
224
+ vae ([`AutoencoderKL`]):
225
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
226
+ text_encoder ([`CLIPTextModel`]):
227
+ Frozen text-encoder. Stable Diffusion XL uses the text portion of
228
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
229
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
230
+ text_encoder_2 ([` CLIPTextModelWithProjection`]):
231
+ Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
232
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
233
+ specifically the
234
+ [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
235
+ variant.
236
+ tokenizer (`CLIPTokenizer`):
237
+ Tokenizer of class
238
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
239
+ tokenizer_2 (`CLIPTokenizer`):
240
+ Second Tokenizer of class
241
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
242
+ unet ([`UNet2DConditionModel`]):
243
+ Conditional U-Net architecture to denoise the encoded image latents.
244
+ scheduler ([`SchedulerMixin`]):
245
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
246
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
247
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
248
+ Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
249
+ `stabilityai/stable-diffusion-xl-base-1-0`.
250
+ """
251
+
252
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
253
+ _optional_components = [
254
+ "tokenizer",
255
+ "tokenizer_2",
256
+ "text_encoder",
257
+ "text_encoder_2",
258
+ "image_encoder",
259
+ "feature_extractor",
260
+ ]
261
+ _callback_tensor_inputs = [
262
+ "latents",
263
+ "prompt_embeds",
264
+ "negative_prompt_embeds",
265
+ "add_text_embeds",
266
+ "add_time_ids",
267
+ "negative_pooled_prompt_embeds",
268
+ "negative_add_time_ids",
269
+ ]
270
+
271
+ def __init__(
272
+ self,
273
+ vae: AutoencoderKL,
274
+ text_encoder: CLIPTextModel,
275
+ text_encoder_2: CLIPTextModelWithProjection,
276
+ tokenizer: CLIPTokenizer,
277
+ tokenizer_2: CLIPTokenizer,
278
+ unet: Union[UNet2DConditionModel, UNetMotionModel],
279
+ motion_adapter: MotionAdapter,
280
+ scheduler: Union[
281
+ DDIMScheduler,
282
+ PNDMScheduler,
283
+ LMSDiscreteScheduler,
284
+ EulerDiscreteScheduler,
285
+ EulerAncestralDiscreteScheduler,
286
+ DPMSolverMultistepScheduler,
287
+ ],
288
+ image_encoder: CLIPVisionModelWithProjection = None,
289
+ feature_extractor: CLIPImageProcessor = None,
290
+ force_zeros_for_empty_prompt: bool = True,
291
+ ):
292
+ super().__init__()
293
+
294
+ if isinstance(unet, UNet2DConditionModel):
295
+ unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
296
+
297
+ self.register_modules(
298
+ vae=vae,
299
+ text_encoder=text_encoder,
300
+ text_encoder_2=text_encoder_2,
301
+ tokenizer=tokenizer,
302
+ tokenizer_2=tokenizer_2,
303
+ unet=unet,
304
+ motion_adapter=motion_adapter,
305
+ scheduler=scheduler,
306
+ image_encoder=image_encoder,
307
+ feature_extractor=feature_extractor,
308
+ )
309
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
310
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
311
+ self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor)
312
+
313
+ self.default_sample_size = self.unet.config.sample_size
314
+
315
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt with num_images_per_prompt->num_videos_per_prompt
316
+ def encode_prompt(
317
+ self,
318
+ prompt: str,
319
+ prompt_2: Optional[str] = None,
320
+ device: Optional[torch.device] = None,
321
+ num_videos_per_prompt: int = 1,
322
+ do_classifier_free_guidance: bool = True,
323
+ negative_prompt: Optional[str] = None,
324
+ negative_prompt_2: Optional[str] = None,
325
+ prompt_embeds: Optional[torch.Tensor] = None,
326
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
327
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
328
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
329
+ lora_scale: Optional[float] = None,
330
+ clip_skip: Optional[int] = None,
331
+ ):
332
+ r"""
333
+ Encodes the prompt into text encoder hidden states.
334
+
335
+ Args:
336
+ prompt (`str` or `List[str]`, *optional*):
337
+ prompt to be encoded
338
+ prompt_2 (`str` or `List[str]`, *optional*):
339
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
340
+ used in both text-encoders
341
+ device: (`torch.device`):
342
+ torch device
343
+ num_videos_per_prompt (`int`):
344
+ number of images that should be generated per prompt
345
+ do_classifier_free_guidance (`bool`):
346
+ whether to use classifier free guidance or not
347
+ negative_prompt (`str` or `List[str]`, *optional*):
348
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
349
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
350
+ less than `1`).
351
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
352
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
353
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
354
+ prompt_embeds (`torch.Tensor`, *optional*):
355
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
356
+ provided, text embeddings will be generated from `prompt` input argument.
357
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
358
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
359
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
360
+ argument.
361
+ pooled_prompt_embeds (`torch.Tensor`, *optional*):
362
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
363
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
364
+ negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
365
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
366
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
367
+ input argument.
368
+ lora_scale (`float`, *optional*):
369
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
370
+ clip_skip (`int`, *optional*):
371
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
372
+ the output of the pre-final layer will be used for computing the prompt embeddings.
373
+ """
374
+ device = device or self._execution_device
375
+
376
+ # set lora scale so that monkey patched LoRA
377
+ # function of text encoder can correctly access it
378
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
379
+ self._lora_scale = lora_scale
380
+
381
+ # dynamically adjust the LoRA scale
382
+ if self.text_encoder is not None:
383
+ if not USE_PEFT_BACKEND:
384
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
385
+ else:
386
+ scale_lora_layers(self.text_encoder, lora_scale)
387
+
388
+ if self.text_encoder_2 is not None:
389
+ if not USE_PEFT_BACKEND:
390
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
391
+ else:
392
+ scale_lora_layers(self.text_encoder_2, lora_scale)
393
+
394
+ prompt = [prompt] if isinstance(prompt, str) else prompt
395
+
396
+ if prompt is not None:
397
+ batch_size = len(prompt)
398
+ else:
399
+ batch_size = prompt_embeds.shape[0]
400
+
401
+ # Define tokenizers and text encoders
402
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
403
+ text_encoders = (
404
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
405
+ )
406
+
407
+ if prompt_embeds is None:
408
+ prompt_2 = prompt_2 or prompt
409
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
410
+
411
+ # textual inversion: process multi-vector tokens if necessary
412
+ prompt_embeds_list = []
413
+ prompts = [prompt, prompt_2]
414
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
415
+ if isinstance(self, TextualInversionLoaderMixin):
416
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
417
+
418
+ text_inputs = tokenizer(
419
+ prompt,
420
+ padding="max_length",
421
+ max_length=tokenizer.model_max_length,
422
+ truncation=True,
423
+ return_tensors="pt",
424
+ )
425
+
426
+ text_input_ids = text_inputs.input_ids
427
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
428
+
429
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
430
+ text_input_ids, untruncated_ids
431
+ ):
432
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
433
+ logger.warning(
434
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
435
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
436
+ )
437
+
438
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
439
+
440
+ # We are only ALWAYS interested in the pooled output of the final text encoder
441
+ pooled_prompt_embeds = prompt_embeds[0]
442
+ if clip_skip is None:
443
+ prompt_embeds = prompt_embeds.hidden_states[-2]
444
+ else:
445
+ # "2" because SDXL always indexes from the penultimate layer.
446
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
447
+
448
+ prompt_embeds_list.append(prompt_embeds)
449
+
450
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
451
+
452
+ # get unconditional embeddings for classifier free guidance
453
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
454
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
455
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
456
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
457
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
458
+ negative_prompt = negative_prompt or ""
459
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
460
+
461
+ # normalize str to list
462
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
463
+ negative_prompt_2 = (
464
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
465
+ )
466
+
467
+ uncond_tokens: List[str]
468
+ if prompt is not None and type(prompt) is not type(negative_prompt):
469
+ raise TypeError(
470
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
471
+ f" {type(prompt)}."
472
+ )
473
+ elif batch_size != len(negative_prompt):
474
+ raise ValueError(
475
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
476
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
477
+ " the batch size of `prompt`."
478
+ )
479
+ else:
480
+ uncond_tokens = [negative_prompt, negative_prompt_2]
481
+
482
+ negative_prompt_embeds_list = []
483
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
484
+ if isinstance(self, TextualInversionLoaderMixin):
485
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
486
+
487
+ max_length = prompt_embeds.shape[1]
488
+ uncond_input = tokenizer(
489
+ negative_prompt,
490
+ padding="max_length",
491
+ max_length=max_length,
492
+ truncation=True,
493
+ return_tensors="pt",
494
+ )
495
+
496
+ negative_prompt_embeds = text_encoder(
497
+ uncond_input.input_ids.to(device),
498
+ output_hidden_states=True,
499
+ )
500
+ # We are only ALWAYS interested in the pooled output of the final text encoder
501
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
502
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
503
+
504
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
505
+
506
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
507
+
508
+ if self.text_encoder_2 is not None:
509
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
510
+ else:
511
+ prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
512
+
513
+ bs_embed, seq_len, _ = prompt_embeds.shape
514
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
515
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
516
+ prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, seq_len, -1)
517
+
518
+ if do_classifier_free_guidance:
519
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
520
+ seq_len = negative_prompt_embeds.shape[1]
521
+
522
+ if self.text_encoder_2 is not None:
523
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
524
+ else:
525
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
526
+
527
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1)
528
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
529
+
530
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_videos_per_prompt).view(
531
+ bs_embed * num_videos_per_prompt, -1
532
+ )
533
+ if do_classifier_free_guidance:
534
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_videos_per_prompt).view(
535
+ bs_embed * num_videos_per_prompt, -1
536
+ )
537
+
538
+ if self.text_encoder is not None:
539
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
540
+ # Retrieve the original scale by scaling back the LoRA layers
541
+ unscale_lora_layers(self.text_encoder, lora_scale)
542
+
543
+ if self.text_encoder_2 is not None:
544
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
545
+ # Retrieve the original scale by scaling back the LoRA layers
546
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
547
+
548
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
549
+
550
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
551
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
552
+ dtype = next(self.image_encoder.parameters()).dtype
553
+
554
+ if not isinstance(image, torch.Tensor):
555
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
556
+
557
+ image = image.to(device=device, dtype=dtype)
558
+ if output_hidden_states:
559
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
560
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
561
+ uncond_image_enc_hidden_states = self.image_encoder(
562
+ torch.zeros_like(image), output_hidden_states=True
563
+ ).hidden_states[-2]
564
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
565
+ num_images_per_prompt, dim=0
566
+ )
567
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
568
+ else:
569
+ image_embeds = self.image_encoder(image).image_embeds
570
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
571
+ uncond_image_embeds = torch.zeros_like(image_embeds)
572
+
573
+ return image_embeds, uncond_image_embeds
574
+
575
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
576
+ def prepare_ip_adapter_image_embeds(
577
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
578
+ ):
579
+ image_embeds = []
580
+ if do_classifier_free_guidance:
581
+ negative_image_embeds = []
582
+ if ip_adapter_image_embeds is None:
583
+ if not isinstance(ip_adapter_image, list):
584
+ ip_adapter_image = [ip_adapter_image]
585
+
586
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
587
+ raise ValueError(
588
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
589
+ )
590
+
591
+ for single_ip_adapter_image, image_proj_layer in zip(
592
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
593
+ ):
594
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
595
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
596
+ single_ip_adapter_image, device, 1, output_hidden_state
597
+ )
598
+
599
+ image_embeds.append(single_image_embeds[None, :])
600
+ if do_classifier_free_guidance:
601
+ negative_image_embeds.append(single_negative_image_embeds[None, :])
602
+ else:
603
+ for single_image_embeds in ip_adapter_image_embeds:
604
+ if do_classifier_free_guidance:
605
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
606
+ negative_image_embeds.append(single_negative_image_embeds)
607
+ image_embeds.append(single_image_embeds)
608
+
609
+ ip_adapter_image_embeds = []
610
+ for i, single_image_embeds in enumerate(image_embeds):
611
+ single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
612
+ if do_classifier_free_guidance:
613
+ single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0)
614
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0)
615
+
616
+ single_image_embeds = single_image_embeds.to(device=device)
617
+ ip_adapter_image_embeds.append(single_image_embeds)
618
+
619
+ return ip_adapter_image_embeds
620
+
621
+ # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
622
+ def decode_latents(self, latents):
623
+ latents = 1 / self.vae.config.scaling_factor * latents
624
+
625
+ batch_size, channels, num_frames, height, width = latents.shape
626
+ latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
627
+
628
+ image = self.vae.decode(latents).sample
629
+ video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4)
630
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
631
+ video = video.float()
632
+ return video
633
+
634
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
635
+ def prepare_extra_step_kwargs(self, generator, eta):
636
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
637
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
638
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
639
+ # and should be between [0, 1]
640
+
641
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
642
+ extra_step_kwargs = {}
643
+ if accepts_eta:
644
+ extra_step_kwargs["eta"] = eta
645
+
646
+ # check if the scheduler accepts generator
647
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
648
+ if accepts_generator:
649
+ extra_step_kwargs["generator"] = generator
650
+ return extra_step_kwargs
651
+
652
+ def check_inputs(
653
+ self,
654
+ prompt,
655
+ prompt_2,
656
+ height,
657
+ width,
658
+ negative_prompt=None,
659
+ negative_prompt_2=None,
660
+ prompt_embeds=None,
661
+ negative_prompt_embeds=None,
662
+ pooled_prompt_embeds=None,
663
+ negative_pooled_prompt_embeds=None,
664
+ callback_on_step_end_tensor_inputs=None,
665
+ ):
666
+ if height % 8 != 0 or width % 8 != 0:
667
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
668
+
669
+ if callback_on_step_end_tensor_inputs is not None and not all(
670
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
671
+ ):
672
+ raise ValueError(
673
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
674
+ )
675
+
676
+ if prompt is not None and prompt_embeds is not None:
677
+ raise ValueError(
678
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
679
+ " only forward one of the two."
680
+ )
681
+ elif prompt_2 is not None and prompt_embeds is not None:
682
+ raise ValueError(
683
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
684
+ " only forward one of the two."
685
+ )
686
+ elif prompt is None and prompt_embeds is None:
687
+ raise ValueError(
688
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
689
+ )
690
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
691
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
692
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
693
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
694
+
695
+ if negative_prompt is not None and negative_prompt_embeds is not None:
696
+ raise ValueError(
697
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
698
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
699
+ )
700
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
701
+ raise ValueError(
702
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
703
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
704
+ )
705
+
706
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
707
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
708
+ raise ValueError(
709
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
710
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
711
+ f" {negative_prompt_embeds.shape}."
712
+ )
713
+
714
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
715
+ raise ValueError(
716
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
717
+ )
718
+
719
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
720
+ raise ValueError(
721
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
722
+ )
723
+
724
+ # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents
725
+ def prepare_latents(
726
+ self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
727
+ ):
728
+ shape = (
729
+ batch_size,
730
+ num_channels_latents,
731
+ num_frames,
732
+ height // self.vae_scale_factor,
733
+ width // self.vae_scale_factor,
734
+ )
735
+ if isinstance(generator, list) and len(generator) != batch_size:
736
+ raise ValueError(
737
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
738
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
739
+ )
740
+
741
+ if latents is None:
742
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
743
+ else:
744
+ latents = latents.to(device)
745
+
746
+ # scale the initial noise by the standard deviation required by the scheduler
747
+ latents = latents * self.scheduler.init_noise_sigma
748
+ return latents
749
+
750
+ def _get_add_time_ids(
751
+ self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
752
+ ):
753
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
754
+
755
+ passed_add_embed_dim = (
756
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
757
+ )
758
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
759
+
760
+ if expected_add_embed_dim != passed_add_embed_dim:
761
+ raise ValueError(
762
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
763
+ )
764
+
765
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
766
+ return add_time_ids
767
+
768
+ def upcast_vae(self):
769
+ dtype = self.vae.dtype
770
+ self.vae.to(dtype=torch.float32)
771
+ use_torch_2_0_or_xformers = isinstance(
772
+ self.vae.decoder.mid_block.attentions[0].processor,
773
+ (
774
+ AttnProcessor2_0,
775
+ XFormersAttnProcessor,
776
+ FusedAttnProcessor2_0,
777
+ ),
778
+ )
779
+ # if xformers or torch_2_0 is used attention block does not need
780
+ # to be in float32 which can save lots of memory
781
+ if use_torch_2_0_or_xformers:
782
+ self.vae.post_quant_conv.to(dtype)
783
+ self.vae.decoder.conv_in.to(dtype)
784
+ self.vae.decoder.mid_block.to(dtype)
785
+
786
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
787
+ def get_guidance_scale_embedding(
788
+ self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
789
+ ) -> torch.Tensor:
790
+ """
791
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
792
+
793
+ Args:
794
+ w (`torch.Tensor`):
795
+ Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
796
+ embedding_dim (`int`, *optional*, defaults to 512):
797
+ Dimension of the embeddings to generate.
798
+ dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
799
+ Data type of the generated embeddings.
800
+
801
+ Returns:
802
+ `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
803
+ """
804
+ assert len(w.shape) == 1
805
+ w = w * 1000.0
806
+
807
+ half_dim = embedding_dim // 2
808
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
809
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
810
+ emb = w.to(dtype)[:, None] * emb[None, :]
811
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
812
+ if embedding_dim % 2 == 1: # zero pad
813
+ emb = torch.nn.functional.pad(emb, (0, 1))
814
+ assert emb.shape == (w.shape[0], embedding_dim)
815
+ return emb
816
+
817
+ @property
818
+ def guidance_scale(self):
819
+ return self._guidance_scale
820
+
821
+ @property
822
+ def guidance_rescale(self):
823
+ return self._guidance_rescale
824
+
825
+ @property
826
+ def clip_skip(self):
827
+ return self._clip_skip
828
+
829
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
830
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
831
+ # corresponds to doing no classifier free guidance.
832
+ @property
833
+ def do_classifier_free_guidance(self):
834
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
835
+
836
+ @property
837
+ def cross_attention_kwargs(self):
838
+ return self._cross_attention_kwargs
839
+
840
+ @property
841
+ def denoising_end(self):
842
+ return self._denoising_end
843
+
844
+ @property
845
+ def num_timesteps(self):
846
+ return self._num_timesteps
847
+
848
+ @property
849
+ def interrupt(self):
850
+ return self._interrupt
851
+
852
+ @torch.no_grad()
853
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
854
+ def __call__(
855
+ self,
856
+ prompt: Union[str, List[str]] = None,
857
+ prompt_2: Optional[Union[str, List[str]]] = None,
858
+ num_frames: int = 16,
859
+ height: Optional[int] = None,
860
+ width: Optional[int] = None,
861
+ num_inference_steps: int = 50,
862
+ timesteps: List[int] = None,
863
+ sigmas: List[float] = None,
864
+ denoising_end: Optional[float] = None,
865
+ guidance_scale: float = 5.0,
866
+ negative_prompt: Optional[Union[str, List[str]]] = None,
867
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
868
+ num_videos_per_prompt: Optional[int] = 1,
869
+ eta: float = 0.0,
870
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
871
+ latents: Optional[torch.Tensor] = None,
872
+ prompt_embeds: Optional[torch.Tensor] = None,
873
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
874
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
875
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
876
+ ip_adapter_image: Optional[PipelineImageInput] = None,
877
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
878
+ output_type: Optional[str] = "pil",
879
+ return_dict: bool = True,
880
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
881
+ guidance_rescale: float = 0.0,
882
+ original_size: Optional[Tuple[int, int]] = None,
883
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
884
+ target_size: Optional[Tuple[int, int]] = None,
885
+ negative_original_size: Optional[Tuple[int, int]] = None,
886
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
887
+ negative_target_size: Optional[Tuple[int, int]] = None,
888
+ clip_skip: Optional[int] = None,
889
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
890
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
891
+ ):
892
+ r"""
893
+ Function invoked when calling the pipeline for generation.
894
+
895
+ Args:
896
+ prompt (`str` or `List[str]`, *optional*):
897
+ The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds`.
898
+ instead.
899
+ prompt_2 (`str` or `List[str]`, *optional*):
900
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
901
+ used in both text-encoders
902
+ num_frames:
903
+ The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
904
+ amounts to 2 seconds of video.
905
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
906
+ The height in pixels of the generated video. This is set to 1024 by default for the best results.
907
+ Anything below 512 pixels won't work well for
908
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
909
+ and checkpoints that are not specifically fine-tuned on low resolutions.
910
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
911
+ The width in pixels of the generated video. This is set to 1024 by default for the best results.
912
+ Anything below 512 pixels won't work well for
913
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
914
+ and checkpoints that are not specifically fine-tuned on low resolutions.
915
+ num_inference_steps (`int`, *optional*, defaults to 50):
916
+ The number of denoising steps. More denoising steps usually lead to a higher quality video at the
917
+ expense of slower inference.
918
+ timesteps (`List[int]`, *optional*):
919
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
920
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
921
+ passed will be used. Must be in descending order.
922
+ sigmas (`List[float]`, *optional*):
923
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
924
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
925
+ will be used.
926
+ denoising_end (`float`, *optional*):
927
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
928
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
929
+ still retain a substantial amount of noise as determined by the discrete timesteps selected by the
930
+ scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
931
+ "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
932
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
933
+ guidance_scale (`float`, *optional*, defaults to 5.0):
934
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
935
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
936
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
937
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
938
+ usually at the expense of lower video quality.
939
+ negative_prompt (`str` or `List[str]`, *optional*):
940
+ The prompt or prompts not to guide the video generation. If not defined, one has to pass
941
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
942
+ less than `1`).
943
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
944
+ The prompt or prompts not to guide the video generation to be sent to `tokenizer_2` and
945
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
946
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
947
+ The number of videos to generate per prompt.
948
+ eta (`float`, *optional*, defaults to 0.0):
949
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
950
+ [`schedulers.DDIMScheduler`], will be ignored for others.
951
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
952
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
953
+ to make generation deterministic.
954
+ latents (`torch.Tensor`, *optional*):
955
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video
956
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
957
+ tensor will ge generated by sampling using the supplied random `generator`.
958
+ prompt_embeds (`torch.Tensor`, *optional*):
959
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
960
+ provided, text embeddings will be generated from `prompt` input argument.
961
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
962
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
963
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
964
+ argument.
965
+ pooled_prompt_embeds (`torch.Tensor`, *optional*):
966
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
967
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
968
+ negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
969
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
970
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
971
+ input argument.
972
+ ip_adapter_image: (`PipelineImageInput`, *optional*):
973
+ Optional image input to work with IP Adapters.
974
+ ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
975
+ Pre-generated image embeddings for IP-Adapter. If not provided, embeddings are computed from the
976
+ `ip_adapter_image` input argument.
977
+ output_type (`str`, *optional*, defaults to `"pil"`):
978
+ The output format of the generated video. Choose between
979
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
980
+ return_dict (`bool`, *optional*, defaults to `True`):
981
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.AnimateDiffPipelineOutput`] instead of a
982
+ plain tuple.
983
+ cross_attention_kwargs (`dict`, *optional*):
984
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
985
+ `self.processor` in
986
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
987
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
988
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
989
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
990
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
991
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
992
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
993
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
994
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
995
+ explained in section 2.2 of
996
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
997
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
998
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
999
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1000
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1001
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1002
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1003
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1004
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
1005
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1006
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1007
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
1008
+ micro-conditioning as explained in section 2.2 of
1009
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1010
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1011
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1012
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
1013
+ micro-conditioning as explained in section 2.2 of
1014
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1015
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1016
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1017
+ To negatively condition the generation process based on a target image resolution. It should be as same
1018
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
1019
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1020
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1021
+ callback_on_step_end (`Callable`, *optional*):
1022
+ A function that calls at the end of each denoising steps during the inference. The function is called
1023
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1024
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1025
+ `callback_on_step_end_tensor_inputs`.
1026
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1027
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1028
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1029
+ `._callback_tensor_inputs` attribute of your pipeline class.
1030
+
1031
+ Examples:
1032
+
1033
+ Returns:
1034
+ [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
1035
+ If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
1036
+ returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
1037
+ """
1038
+
1039
+ # 0. Default height and width to unet
1040
+ height = height or self.default_sample_size * self.vae_scale_factor
1041
+ width = width or self.default_sample_size * self.vae_scale_factor
1042
+
1043
+ num_videos_per_prompt = 1
1044
+
1045
+ original_size = original_size or (height, width)
1046
+ target_size = target_size or (height, width)
1047
+
1048
+ # 1. Check inputs. Raise error if not correct
1049
+ self.check_inputs(
1050
+ prompt,
1051
+ prompt_2,
1052
+ height,
1053
+ width,
1054
+ negative_prompt,
1055
+ negative_prompt_2,
1056
+ prompt_embeds,
1057
+ negative_prompt_embeds,
1058
+ pooled_prompt_embeds,
1059
+ negative_pooled_prompt_embeds,
1060
+ callback_on_step_end_tensor_inputs,
1061
+ )
1062
+
1063
+ self._guidance_scale = guidance_scale
1064
+ self._guidance_rescale = guidance_rescale
1065
+ self._clip_skip = clip_skip
1066
+ self._cross_attention_kwargs = cross_attention_kwargs
1067
+ self._denoising_end = denoising_end
1068
+ self._interrupt = False
1069
+
1070
+ # 2. Define call parameters
1071
+ if prompt is not None and isinstance(prompt, str):
1072
+ batch_size = 1
1073
+ elif prompt is not None and isinstance(prompt, list):
1074
+ batch_size = len(prompt)
1075
+ else:
1076
+ batch_size = prompt_embeds.shape[0]
1077
+
1078
+ device = self._execution_device
1079
+
1080
+ # 3. Encode input prompt
1081
+ lora_scale = (
1082
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1083
+ )
1084
+
1085
+ (
1086
+ prompt_embeds,
1087
+ negative_prompt_embeds,
1088
+ pooled_prompt_embeds,
1089
+ negative_pooled_prompt_embeds,
1090
+ ) = self.encode_prompt(
1091
+ prompt=prompt,
1092
+ prompt_2=prompt_2,
1093
+ device=device,
1094
+ num_videos_per_prompt=num_videos_per_prompt,
1095
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1096
+ negative_prompt=negative_prompt,
1097
+ negative_prompt_2=negative_prompt_2,
1098
+ prompt_embeds=prompt_embeds,
1099
+ negative_prompt_embeds=negative_prompt_embeds,
1100
+ pooled_prompt_embeds=pooled_prompt_embeds,
1101
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1102
+ lora_scale=lora_scale,
1103
+ clip_skip=self.clip_skip,
1104
+ )
1105
+
1106
+ # 4. Prepare timesteps
1107
+ timesteps, num_inference_steps = retrieve_timesteps(
1108
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
1109
+ )
1110
+
1111
+ # 5. Prepare latent variables
1112
+ num_channels_latents = self.unet.config.in_channels
1113
+ latents = self.prepare_latents(
1114
+ batch_size * num_videos_per_prompt,
1115
+ num_channels_latents,
1116
+ num_frames,
1117
+ height,
1118
+ width,
1119
+ prompt_embeds.dtype,
1120
+ device,
1121
+ generator,
1122
+ latents,
1123
+ )
1124
+
1125
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1126
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1127
+
1128
+ # 7. Prepare added time ids & embeddings
1129
+ add_text_embeds = pooled_prompt_embeds
1130
+ if self.text_encoder_2 is None:
1131
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1132
+ else:
1133
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1134
+
1135
+ add_time_ids = self._get_add_time_ids(
1136
+ original_size,
1137
+ crops_coords_top_left,
1138
+ target_size,
1139
+ dtype=prompt_embeds.dtype,
1140
+ text_encoder_projection_dim=text_encoder_projection_dim,
1141
+ )
1142
+ if negative_original_size is not None and negative_target_size is not None:
1143
+ negative_add_time_ids = self._get_add_time_ids(
1144
+ negative_original_size,
1145
+ negative_crops_coords_top_left,
1146
+ negative_target_size,
1147
+ dtype=prompt_embeds.dtype,
1148
+ text_encoder_projection_dim=text_encoder_projection_dim,
1149
+ )
1150
+ else:
1151
+ negative_add_time_ids = add_time_ids
1152
+
1153
+ if self.do_classifier_free_guidance:
1154
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1155
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1156
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
1157
+
1158
+ prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0)
1159
+
1160
+ prompt_embeds = prompt_embeds.to(device)
1161
+ add_text_embeds = add_text_embeds.to(device)
1162
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_videos_per_prompt, 1)
1163
+
1164
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1165
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1166
+ ip_adapter_image,
1167
+ ip_adapter_image_embeds,
1168
+ device,
1169
+ batch_size * num_videos_per_prompt,
1170
+ self.do_classifier_free_guidance,
1171
+ )
1172
+
1173
+ # 7.1 Apply denoising_end
1174
+ if (
1175
+ self.denoising_end is not None
1176
+ and isinstance(self.denoising_end, float)
1177
+ and self.denoising_end > 0
1178
+ and self.denoising_end < 1
1179
+ ):
1180
+ discrete_timestep_cutoff = int(
1181
+ round(
1182
+ self.scheduler.config.num_train_timesteps
1183
+ - (self.denoising_end * self.scheduler.config.num_train_timesteps)
1184
+ )
1185
+ )
1186
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1187
+ timesteps = timesteps[:num_inference_steps]
1188
+
1189
+ # 8. Optionally get Guidance Scale Embedding
1190
+ timestep_cond = None
1191
+ if self.unet.config.time_cond_proj_dim is not None:
1192
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_videos_per_prompt)
1193
+ timestep_cond = self.get_guidance_scale_embedding(
1194
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1195
+ ).to(device=device, dtype=latents.dtype)
1196
+
1197
+ num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1
1198
+ for free_init_iter in range(num_free_init_iters):
1199
+ if self.free_init_enabled:
1200
+ latents, timesteps = self._apply_free_init(
1201
+ latents, free_init_iter, num_inference_steps, device, latents.dtype, generator
1202
+ )
1203
+
1204
+ self._num_timesteps = len(timesteps)
1205
+
1206
+ # 9. Denoising loop
1207
+ with self.progress_bar(total=self._num_timesteps) as progress_bar:
1208
+ for i, t in enumerate(timesteps):
1209
+ if self.interrupt:
1210
+ continue
1211
+
1212
+ # expand the latents if we are doing classifier free guidance
1213
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1214
+
1215
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1216
+
1217
+ # predict the noise residual
1218
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1219
+ if ip_adapter_image is not None or ip_adapter_image_embeds:
1220
+ added_cond_kwargs["image_embeds"] = image_embeds
1221
+
1222
+ noise_pred = self.unet(
1223
+ latent_model_input,
1224
+ t,
1225
+ encoder_hidden_states=prompt_embeds,
1226
+ timestep_cond=timestep_cond,
1227
+ cross_attention_kwargs=self.cross_attention_kwargs,
1228
+ added_cond_kwargs=added_cond_kwargs,
1229
+ return_dict=False,
1230
+ )[0]
1231
+
1232
+ # perform guidance
1233
+ if self.do_classifier_free_guidance:
1234
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1235
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1236
+
1237
+ if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1238
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1239
+ noise_pred = rescale_noise_cfg(
1240
+ noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale
1241
+ )
1242
+
1243
+ # compute the previous noisy sample x_t -> x_t-1
1244
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1245
+
1246
+ if callback_on_step_end is not None:
1247
+ callback_kwargs = {}
1248
+ for k in callback_on_step_end_tensor_inputs:
1249
+ callback_kwargs[k] = locals()[k]
1250
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1251
+
1252
+ latents = callback_outputs.pop("latents", latents)
1253
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1254
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1255
+ add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
1256
+ negative_pooled_prompt_embeds = callback_outputs.pop(
1257
+ "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
1258
+ )
1259
+ add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
1260
+ negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids)
1261
+
1262
+ progress_bar.update()
1263
+
1264
+ # make sure the VAE is in float32 mode, as it overflows in float16
1265
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1266
+
1267
+ if needs_upcasting:
1268
+ self.upcast_vae()
1269
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1270
+
1271
+ # 10. Post processing
1272
+ if output_type == "latent":
1273
+ video = latents
1274
+ else:
1275
+ video_tensor = self.decode_latents(latents)
1276
+ video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type)
1277
+
1278
+ # cast back to fp16 if needed
1279
+ if needs_upcasting:
1280
+ self.vae.to(dtype=torch.float16)
1281
+
1282
+ # 11. Offload all models
1283
+ self.maybe_free_model_hooks()
1284
+
1285
+ if not return_dict:
1286
+ return (video,)
1287
+
1288
+ return AnimateDiffPipelineOutput(frames=video)