diffusers 0.27.0__py3-none-any.whl → 0.32.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (445) hide show
  1. diffusers/__init__.py +233 -6
  2. diffusers/callbacks.py +209 -0
  3. diffusers/commands/env.py +102 -6
  4. diffusers/configuration_utils.py +45 -16
  5. diffusers/dependency_versions_table.py +4 -3
  6. diffusers/image_processor.py +434 -110
  7. diffusers/loaders/__init__.py +42 -9
  8. diffusers/loaders/ip_adapter.py +626 -36
  9. diffusers/loaders/lora_base.py +900 -0
  10. diffusers/loaders/lora_conversion_utils.py +991 -125
  11. diffusers/loaders/lora_pipeline.py +3812 -0
  12. diffusers/loaders/peft.py +571 -7
  13. diffusers/loaders/single_file.py +405 -173
  14. diffusers/loaders/single_file_model.py +385 -0
  15. diffusers/loaders/single_file_utils.py +1783 -713
  16. diffusers/loaders/textual_inversion.py +41 -23
  17. diffusers/loaders/transformer_flux.py +181 -0
  18. diffusers/loaders/transformer_sd3.py +89 -0
  19. diffusers/loaders/unet.py +464 -540
  20. diffusers/loaders/unet_loader_utils.py +163 -0
  21. diffusers/models/__init__.py +76 -7
  22. diffusers/models/activations.py +65 -10
  23. diffusers/models/adapter.py +53 -53
  24. diffusers/models/attention.py +605 -18
  25. diffusers/models/attention_flax.py +1 -1
  26. diffusers/models/attention_processor.py +4304 -687
  27. diffusers/models/autoencoders/__init__.py +8 -0
  28. diffusers/models/autoencoders/autoencoder_asym_kl.py +15 -17
  29. diffusers/models/autoencoders/autoencoder_dc.py +620 -0
  30. diffusers/models/autoencoders/autoencoder_kl.py +110 -28
  31. diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
  32. diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1482 -0
  33. diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
  34. diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
  35. diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
  36. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +19 -24
  37. diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
  38. diffusers/models/autoencoders/autoencoder_tiny.py +21 -18
  39. diffusers/models/autoencoders/consistency_decoder_vae.py +45 -20
  40. diffusers/models/autoencoders/vae.py +41 -29
  41. diffusers/models/autoencoders/vq_model.py +182 -0
  42. diffusers/models/controlnet.py +47 -800
  43. diffusers/models/controlnet_flux.py +70 -0
  44. diffusers/models/controlnet_sd3.py +68 -0
  45. diffusers/models/controlnet_sparsectrl.py +116 -0
  46. diffusers/models/controlnets/__init__.py +23 -0
  47. diffusers/models/controlnets/controlnet.py +872 -0
  48. diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +9 -9
  49. diffusers/models/controlnets/controlnet_flux.py +536 -0
  50. diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
  51. diffusers/models/controlnets/controlnet_sd3.py +489 -0
  52. diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
  53. diffusers/models/controlnets/controlnet_union.py +832 -0
  54. diffusers/models/controlnets/controlnet_xs.py +1946 -0
  55. diffusers/models/controlnets/multicontrolnet.py +183 -0
  56. diffusers/models/downsampling.py +85 -18
  57. diffusers/models/embeddings.py +1856 -158
  58. diffusers/models/embeddings_flax.py +23 -9
  59. diffusers/models/model_loading_utils.py +480 -0
  60. diffusers/models/modeling_flax_pytorch_utils.py +2 -1
  61. diffusers/models/modeling_flax_utils.py +2 -7
  62. diffusers/models/modeling_outputs.py +14 -0
  63. diffusers/models/modeling_pytorch_flax_utils.py +1 -1
  64. diffusers/models/modeling_utils.py +611 -146
  65. diffusers/models/normalization.py +361 -20
  66. diffusers/models/resnet.py +18 -23
  67. diffusers/models/transformers/__init__.py +16 -0
  68. diffusers/models/transformers/auraflow_transformer_2d.py +544 -0
  69. diffusers/models/transformers/cogvideox_transformer_3d.py +542 -0
  70. diffusers/models/transformers/dit_transformer_2d.py +240 -0
  71. diffusers/models/transformers/dual_transformer_2d.py +9 -8
  72. diffusers/models/transformers/hunyuan_transformer_2d.py +578 -0
  73. diffusers/models/transformers/latte_transformer_3d.py +327 -0
  74. diffusers/models/transformers/lumina_nextdit2d.py +340 -0
  75. diffusers/models/transformers/pixart_transformer_2d.py +445 -0
  76. diffusers/models/transformers/prior_transformer.py +13 -13
  77. diffusers/models/transformers/sana_transformer.py +488 -0
  78. diffusers/models/transformers/stable_audio_transformer.py +458 -0
  79. diffusers/models/transformers/t5_film_transformer.py +17 -19
  80. diffusers/models/transformers/transformer_2d.py +297 -187
  81. diffusers/models/transformers/transformer_allegro.py +422 -0
  82. diffusers/models/transformers/transformer_cogview3plus.py +386 -0
  83. diffusers/models/transformers/transformer_flux.py +593 -0
  84. diffusers/models/transformers/transformer_hunyuan_video.py +791 -0
  85. diffusers/models/transformers/transformer_ltx.py +469 -0
  86. diffusers/models/transformers/transformer_mochi.py +499 -0
  87. diffusers/models/transformers/transformer_sd3.py +461 -0
  88. diffusers/models/transformers/transformer_temporal.py +21 -19
  89. diffusers/models/unets/unet_1d.py +8 -8
  90. diffusers/models/unets/unet_1d_blocks.py +31 -31
  91. diffusers/models/unets/unet_2d.py +17 -10
  92. diffusers/models/unets/unet_2d_blocks.py +225 -149
  93. diffusers/models/unets/unet_2d_condition.py +50 -53
  94. diffusers/models/unets/unet_2d_condition_flax.py +6 -5
  95. diffusers/models/unets/unet_3d_blocks.py +192 -1057
  96. diffusers/models/unets/unet_3d_condition.py +22 -27
  97. diffusers/models/unets/unet_i2vgen_xl.py +22 -18
  98. diffusers/models/unets/unet_kandinsky3.py +2 -2
  99. diffusers/models/unets/unet_motion_model.py +1413 -89
  100. diffusers/models/unets/unet_spatio_temporal_condition.py +40 -16
  101. diffusers/models/unets/unet_stable_cascade.py +19 -18
  102. diffusers/models/unets/uvit_2d.py +2 -2
  103. diffusers/models/upsampling.py +95 -26
  104. diffusers/models/vq_model.py +12 -164
  105. diffusers/optimization.py +1 -1
  106. diffusers/pipelines/__init__.py +202 -3
  107. diffusers/pipelines/allegro/__init__.py +48 -0
  108. diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
  109. diffusers/pipelines/allegro/pipeline_output.py +23 -0
  110. diffusers/pipelines/amused/pipeline_amused.py +12 -12
  111. diffusers/pipelines/amused/pipeline_amused_img2img.py +14 -12
  112. diffusers/pipelines/amused/pipeline_amused_inpaint.py +13 -11
  113. diffusers/pipelines/animatediff/__init__.py +8 -0
  114. diffusers/pipelines/animatediff/pipeline_animatediff.py +122 -109
  115. diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1106 -0
  116. diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +1288 -0
  117. diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1010 -0
  118. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +236 -180
  119. diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
  120. diffusers/pipelines/animatediff/pipeline_output.py +3 -2
  121. diffusers/pipelines/audioldm/pipeline_audioldm.py +14 -14
  122. diffusers/pipelines/audioldm2/modeling_audioldm2.py +58 -39
  123. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +121 -36
  124. diffusers/pipelines/aura_flow/__init__.py +48 -0
  125. diffusers/pipelines/aura_flow/pipeline_aura_flow.py +584 -0
  126. diffusers/pipelines/auto_pipeline.py +196 -28
  127. diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
  128. diffusers/pipelines/blip_diffusion/modeling_blip2.py +6 -6
  129. diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +1 -1
  130. diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
  131. diffusers/pipelines/cogvideo/__init__.py +54 -0
  132. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +772 -0
  133. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
  134. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +885 -0
  135. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +851 -0
  136. diffusers/pipelines/cogvideo/pipeline_output.py +20 -0
  137. diffusers/pipelines/cogview3/__init__.py +47 -0
  138. diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
  139. diffusers/pipelines/cogview3/pipeline_output.py +21 -0
  140. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +6 -6
  141. diffusers/pipelines/controlnet/__init__.py +86 -80
  142. diffusers/pipelines/controlnet/multicontrolnet.py +7 -182
  143. diffusers/pipelines/controlnet/pipeline_controlnet.py +134 -87
  144. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
  145. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +93 -77
  146. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +88 -197
  147. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +136 -90
  148. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +176 -80
  149. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +125 -89
  150. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
  151. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
  152. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
  153. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
  154. diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
  155. diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1060 -0
  156. diffusers/pipelines/controlnet_sd3/__init__.py +57 -0
  157. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1133 -0
  158. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
  159. diffusers/pipelines/controlnet_xs/__init__.py +68 -0
  160. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
  161. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
  162. diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
  163. diffusers/pipelines/deepfloyd_if/pipeline_if.py +16 -30
  164. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +20 -35
  165. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +23 -41
  166. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +22 -38
  167. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +25 -41
  168. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +19 -34
  169. diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
  170. diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
  171. diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +11 -11
  172. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +70 -30
  173. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +48 -25
  174. diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +2 -2
  175. diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +7 -7
  176. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +21 -20
  177. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +27 -29
  178. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +33 -27
  179. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +33 -23
  180. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +36 -30
  181. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +102 -69
  182. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +13 -13
  183. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +10 -5
  184. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +11 -6
  185. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +10 -5
  186. diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +5 -5
  187. diffusers/pipelines/dit/pipeline_dit.py +7 -4
  188. diffusers/pipelines/flux/__init__.py +69 -0
  189. diffusers/pipelines/flux/modeling_flux.py +47 -0
  190. diffusers/pipelines/flux/pipeline_flux.py +957 -0
  191. diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
  192. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
  193. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
  194. diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
  195. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
  196. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
  197. diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
  198. diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
  199. diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
  200. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
  201. diffusers/pipelines/flux/pipeline_output.py +37 -0
  202. diffusers/pipelines/free_init_utils.py +41 -38
  203. diffusers/pipelines/free_noise_utils.py +596 -0
  204. diffusers/pipelines/hunyuan_video/__init__.py +48 -0
  205. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
  206. diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
  207. diffusers/pipelines/hunyuandit/__init__.py +48 -0
  208. diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +916 -0
  209. diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +33 -48
  210. diffusers/pipelines/kandinsky/pipeline_kandinsky.py +8 -8
  211. diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +32 -29
  212. diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +11 -11
  213. diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +12 -12
  214. diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +10 -10
  215. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
  216. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +34 -31
  217. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +10 -10
  218. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +10 -10
  219. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +6 -6
  220. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +8 -8
  221. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +7 -7
  222. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +6 -6
  223. diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +3 -3
  224. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +22 -35
  225. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +26 -37
  226. diffusers/pipelines/kolors/__init__.py +54 -0
  227. diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
  228. diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1250 -0
  229. diffusers/pipelines/kolors/pipeline_output.py +21 -0
  230. diffusers/pipelines/kolors/text_encoder.py +889 -0
  231. diffusers/pipelines/kolors/tokenizer.py +338 -0
  232. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +82 -62
  233. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +77 -60
  234. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +12 -12
  235. diffusers/pipelines/latte/__init__.py +48 -0
  236. diffusers/pipelines/latte/pipeline_latte.py +881 -0
  237. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +80 -74
  238. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +85 -76
  239. diffusers/pipelines/ledits_pp/pipeline_output.py +2 -2
  240. diffusers/pipelines/ltx/__init__.py +50 -0
  241. diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
  242. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
  243. diffusers/pipelines/ltx/pipeline_output.py +20 -0
  244. diffusers/pipelines/lumina/__init__.py +48 -0
  245. diffusers/pipelines/lumina/pipeline_lumina.py +890 -0
  246. diffusers/pipelines/marigold/__init__.py +50 -0
  247. diffusers/pipelines/marigold/marigold_image_processing.py +576 -0
  248. diffusers/pipelines/marigold/pipeline_marigold_depth.py +813 -0
  249. diffusers/pipelines/marigold/pipeline_marigold_normals.py +690 -0
  250. diffusers/pipelines/mochi/__init__.py +48 -0
  251. diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
  252. diffusers/pipelines/mochi/pipeline_output.py +20 -0
  253. diffusers/pipelines/musicldm/pipeline_musicldm.py +14 -14
  254. diffusers/pipelines/pag/__init__.py +80 -0
  255. diffusers/pipelines/pag/pag_utils.py +243 -0
  256. diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1328 -0
  257. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
  258. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1610 -0
  259. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
  260. diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +969 -0
  261. diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
  262. diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +865 -0
  263. diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
  264. diffusers/pipelines/pag/pipeline_pag_sd.py +1062 -0
  265. diffusers/pipelines/pag/pipeline_pag_sd_3.py +994 -0
  266. diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
  267. diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +866 -0
  268. diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
  269. diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
  270. diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1345 -0
  271. diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1544 -0
  272. diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1776 -0
  273. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +17 -12
  274. diffusers/pipelines/pia/pipeline_pia.py +74 -164
  275. diffusers/pipelines/pipeline_flax_utils.py +5 -10
  276. diffusers/pipelines/pipeline_loading_utils.py +515 -53
  277. diffusers/pipelines/pipeline_utils.py +411 -222
  278. diffusers/pipelines/pixart_alpha/__init__.py +8 -1
  279. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +76 -93
  280. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +873 -0
  281. diffusers/pipelines/sana/__init__.py +47 -0
  282. diffusers/pipelines/sana/pipeline_output.py +21 -0
  283. diffusers/pipelines/sana/pipeline_sana.py +884 -0
  284. diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +27 -23
  285. diffusers/pipelines/shap_e/pipeline_shap_e.py +3 -3
  286. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +14 -14
  287. diffusers/pipelines/shap_e/renderer.py +1 -1
  288. diffusers/pipelines/stable_audio/__init__.py +50 -0
  289. diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
  290. diffusers/pipelines/stable_audio/pipeline_stable_audio.py +756 -0
  291. diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +71 -25
  292. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +23 -19
  293. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +35 -34
  294. diffusers/pipelines/stable_diffusion/__init__.py +0 -1
  295. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +20 -11
  296. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
  297. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
  298. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +6 -6
  299. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +145 -79
  300. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +43 -28
  301. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +13 -8
  302. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +100 -68
  303. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +109 -201
  304. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +131 -32
  305. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +247 -87
  306. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +30 -29
  307. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +35 -27
  308. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +49 -42
  309. diffusers/pipelines/stable_diffusion/safety_checker.py +2 -1
  310. diffusers/pipelines/stable_diffusion_3/__init__.py +54 -0
  311. diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
  312. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +1140 -0
  313. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +1036 -0
  314. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1250 -0
  315. diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +29 -20
  316. diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +59 -58
  317. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +31 -25
  318. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +38 -22
  319. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -24
  320. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +24 -23
  321. diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +107 -67
  322. diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +316 -69
  323. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +10 -5
  324. diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
  325. diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +98 -30
  326. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +121 -83
  327. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +161 -105
  328. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +142 -218
  329. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +45 -29
  330. diffusers/pipelines/stable_diffusion_xl/watermark.py +9 -3
  331. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +110 -57
  332. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +69 -39
  333. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +105 -74
  334. diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +3 -2
  335. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +29 -49
  336. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +32 -93
  337. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +37 -25
  338. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +54 -40
  339. diffusers/pipelines/unclip/pipeline_unclip.py +6 -6
  340. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +6 -6
  341. diffusers/pipelines/unidiffuser/modeling_text_decoder.py +1 -1
  342. diffusers/pipelines/unidiffuser/modeling_uvit.py +12 -12
  343. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +29 -28
  344. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +5 -5
  345. diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +5 -10
  346. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +6 -8
  347. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +4 -4
  348. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +12 -12
  349. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +15 -14
  350. diffusers/{models/dual_transformer_2d.py → quantizers/__init__.py} +2 -6
  351. diffusers/quantizers/auto.py +139 -0
  352. diffusers/quantizers/base.py +233 -0
  353. diffusers/quantizers/bitsandbytes/__init__.py +2 -0
  354. diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
  355. diffusers/quantizers/bitsandbytes/utils.py +306 -0
  356. diffusers/quantizers/gguf/__init__.py +1 -0
  357. diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
  358. diffusers/quantizers/gguf/utils.py +456 -0
  359. diffusers/quantizers/quantization_config.py +669 -0
  360. diffusers/quantizers/torchao/__init__.py +15 -0
  361. diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
  362. diffusers/schedulers/__init__.py +12 -2
  363. diffusers/schedulers/deprecated/__init__.py +1 -1
  364. diffusers/schedulers/deprecated/scheduling_karras_ve.py +25 -25
  365. diffusers/schedulers/scheduling_amused.py +5 -5
  366. diffusers/schedulers/scheduling_consistency_decoder.py +11 -11
  367. diffusers/schedulers/scheduling_consistency_models.py +23 -25
  368. diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
  369. diffusers/schedulers/scheduling_ddim.py +27 -26
  370. diffusers/schedulers/scheduling_ddim_cogvideox.py +452 -0
  371. diffusers/schedulers/scheduling_ddim_flax.py +2 -1
  372. diffusers/schedulers/scheduling_ddim_inverse.py +16 -16
  373. diffusers/schedulers/scheduling_ddim_parallel.py +32 -31
  374. diffusers/schedulers/scheduling_ddpm.py +27 -30
  375. diffusers/schedulers/scheduling_ddpm_flax.py +7 -3
  376. diffusers/schedulers/scheduling_ddpm_parallel.py +33 -36
  377. diffusers/schedulers/scheduling_ddpm_wuerstchen.py +14 -14
  378. diffusers/schedulers/scheduling_deis_multistep.py +150 -50
  379. diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
  380. diffusers/schedulers/scheduling_dpmsolver_multistep.py +221 -84
  381. diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +2 -2
  382. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +158 -52
  383. diffusers/schedulers/scheduling_dpmsolver_sde.py +153 -34
  384. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +275 -86
  385. diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +81 -57
  386. diffusers/schedulers/scheduling_edm_euler.py +62 -39
  387. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +30 -29
  388. diffusers/schedulers/scheduling_euler_discrete.py +255 -74
  389. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +458 -0
  390. diffusers/schedulers/scheduling_flow_match_heun_discrete.py +320 -0
  391. diffusers/schedulers/scheduling_heun_discrete.py +174 -46
  392. diffusers/schedulers/scheduling_ipndm.py +9 -9
  393. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +138 -29
  394. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +132 -26
  395. diffusers/schedulers/scheduling_karras_ve_flax.py +6 -6
  396. diffusers/schedulers/scheduling_lcm.py +23 -29
  397. diffusers/schedulers/scheduling_lms_discrete.py +105 -28
  398. diffusers/schedulers/scheduling_pndm.py +20 -20
  399. diffusers/schedulers/scheduling_repaint.py +21 -21
  400. diffusers/schedulers/scheduling_sasolver.py +157 -60
  401. diffusers/schedulers/scheduling_sde_ve.py +19 -19
  402. diffusers/schedulers/scheduling_tcd.py +41 -36
  403. diffusers/schedulers/scheduling_unclip.py +19 -16
  404. diffusers/schedulers/scheduling_unipc_multistep.py +243 -47
  405. diffusers/schedulers/scheduling_utils.py +12 -5
  406. diffusers/schedulers/scheduling_utils_flax.py +1 -3
  407. diffusers/schedulers/scheduling_vq_diffusion.py +10 -10
  408. diffusers/training_utils.py +214 -30
  409. diffusers/utils/__init__.py +17 -1
  410. diffusers/utils/constants.py +3 -0
  411. diffusers/utils/doc_utils.py +1 -0
  412. diffusers/utils/dummy_pt_objects.py +592 -7
  413. diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
  414. diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
  415. diffusers/utils/dummy_torch_and_transformers_objects.py +1001 -71
  416. diffusers/utils/dynamic_modules_utils.py +34 -29
  417. diffusers/utils/export_utils.py +50 -6
  418. diffusers/utils/hub_utils.py +131 -17
  419. diffusers/utils/import_utils.py +210 -8
  420. diffusers/utils/loading_utils.py +118 -5
  421. diffusers/utils/logging.py +4 -2
  422. diffusers/utils/peft_utils.py +37 -7
  423. diffusers/utils/state_dict_utils.py +13 -2
  424. diffusers/utils/testing_utils.py +193 -11
  425. diffusers/utils/torch_utils.py +4 -0
  426. diffusers/video_processor.py +113 -0
  427. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/METADATA +82 -91
  428. diffusers-0.32.2.dist-info/RECORD +550 -0
  429. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/WHEEL +1 -1
  430. diffusers/loaders/autoencoder.py +0 -146
  431. diffusers/loaders/controlnet.py +0 -136
  432. diffusers/loaders/lora.py +0 -1349
  433. diffusers/models/prior_transformer.py +0 -12
  434. diffusers/models/t5_film_transformer.py +0 -70
  435. diffusers/models/transformer_2d.py +0 -25
  436. diffusers/models/transformer_temporal.py +0 -34
  437. diffusers/models/unet_1d.py +0 -26
  438. diffusers/models/unet_1d_blocks.py +0 -203
  439. diffusers/models/unet_2d.py +0 -27
  440. diffusers/models/unet_2d_blocks.py +0 -375
  441. diffusers/models/unet_2d_condition.py +0 -25
  442. diffusers-0.27.0.dist-info/RECORD +0 -399
  443. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/LICENSE +0 -0
  444. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/entry_points.txt +0 -0
  445. {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,851 @@
1
+ # Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import inspect
17
+ import math
18
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ from PIL import Image
22
+ from transformers import T5EncoderModel, T5Tokenizer
23
+
24
+ from ...callbacks import MultiPipelineCallbacks, PipelineCallback
25
+ from ...loaders import CogVideoXLoraLoaderMixin
26
+ from ...models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel
27
+ from ...models.embeddings import get_3d_rotary_pos_embed
28
+ from ...pipelines.pipeline_utils import DiffusionPipeline
29
+ from ...schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler
30
+ from ...utils import logging, replace_example_docstring
31
+ from ...utils.torch_utils import randn_tensor
32
+ from ...video_processor import VideoProcessor
33
+ from .pipeline_output import CogVideoXPipelineOutput
34
+
35
+
36
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
37
+
38
+
39
+ EXAMPLE_DOC_STRING = """
40
+ Examples:
41
+ ```python
42
+ >>> import torch
43
+ >>> from diffusers import CogVideoXDPMScheduler, CogVideoXVideoToVideoPipeline
44
+ >>> from diffusers.utils import export_to_video, load_video
45
+
46
+ >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b"
47
+ >>> pipe = CogVideoXVideoToVideoPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)
48
+ >>> pipe.to("cuda")
49
+ >>> pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config)
50
+
51
+ >>> input_video = load_video(
52
+ ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hiker.mp4"
53
+ ... )
54
+ >>> prompt = (
55
+ ... "An astronaut stands triumphantly at the peak of a towering mountain. Panorama of rugged peaks and "
56
+ ... "valleys. Very futuristic vibe and animated aesthetic. Highlights of purple and golden colors in "
57
+ ... "the scene. The sky is looks like an animated/cartoonish dream of galaxies, nebulae, stars, planets, "
58
+ ... "moons, but the remainder of the scene is mostly realistic."
59
+ ... )
60
+
61
+ >>> video = pipe(
62
+ ... video=input_video, prompt=prompt, strength=0.8, guidance_scale=6, num_inference_steps=50
63
+ ... ).frames[0]
64
+ >>> export_to_video(video, "output.mp4", fps=8)
65
+ ```
66
+ """
67
+
68
+
69
+ # Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid
70
+ def get_resize_crop_region_for_grid(src, tgt_width, tgt_height):
71
+ tw = tgt_width
72
+ th = tgt_height
73
+ h, w = src
74
+ r = h / w
75
+ if r > (th / tw):
76
+ resize_height = th
77
+ resize_width = int(round(th / h * w))
78
+ else:
79
+ resize_width = tw
80
+ resize_height = int(round(tw / w * h))
81
+
82
+ crop_top = int(round((th - resize_height) / 2.0))
83
+ crop_left = int(round((tw - resize_width) / 2.0))
84
+
85
+ return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)
86
+
87
+
88
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
89
+ def retrieve_timesteps(
90
+ scheduler,
91
+ num_inference_steps: Optional[int] = None,
92
+ device: Optional[Union[str, torch.device]] = None,
93
+ timesteps: Optional[List[int]] = None,
94
+ sigmas: Optional[List[float]] = None,
95
+ **kwargs,
96
+ ):
97
+ r"""
98
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
99
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
100
+
101
+ Args:
102
+ scheduler (`SchedulerMixin`):
103
+ The scheduler to get timesteps from.
104
+ num_inference_steps (`int`):
105
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
106
+ must be `None`.
107
+ device (`str` or `torch.device`, *optional*):
108
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
109
+ timesteps (`List[int]`, *optional*):
110
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
111
+ `num_inference_steps` and `sigmas` must be `None`.
112
+ sigmas (`List[float]`, *optional*):
113
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
114
+ `num_inference_steps` and `timesteps` must be `None`.
115
+
116
+ Returns:
117
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
118
+ second element is the number of inference steps.
119
+ """
120
+ if timesteps is not None and sigmas is not None:
121
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
122
+ if timesteps is not None:
123
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
124
+ if not accepts_timesteps:
125
+ raise ValueError(
126
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
127
+ f" timestep schedules. Please check whether you are using the correct scheduler."
128
+ )
129
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
130
+ timesteps = scheduler.timesteps
131
+ num_inference_steps = len(timesteps)
132
+ elif sigmas is not None:
133
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
134
+ if not accept_sigmas:
135
+ raise ValueError(
136
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
137
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
138
+ )
139
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
140
+ timesteps = scheduler.timesteps
141
+ num_inference_steps = len(timesteps)
142
+ else:
143
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
144
+ timesteps = scheduler.timesteps
145
+ return timesteps, num_inference_steps
146
+
147
+
148
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
149
+ def retrieve_latents(
150
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
151
+ ):
152
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
153
+ return encoder_output.latent_dist.sample(generator)
154
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
155
+ return encoder_output.latent_dist.mode()
156
+ elif hasattr(encoder_output, "latents"):
157
+ return encoder_output.latents
158
+ else:
159
+ raise AttributeError("Could not access latents of provided encoder_output")
160
+
161
+
162
+ class CogVideoXVideoToVideoPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin):
163
+ r"""
164
+ Pipeline for video-to-video generation using CogVideoX.
165
+
166
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
167
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
168
+
169
+ Args:
170
+ vae ([`AutoencoderKL`]):
171
+ Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
172
+ text_encoder ([`T5EncoderModel`]):
173
+ Frozen text-encoder. CogVideoX uses
174
+ [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the
175
+ [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant.
176
+ tokenizer (`T5Tokenizer`):
177
+ Tokenizer of class
178
+ [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
179
+ transformer ([`CogVideoXTransformer3DModel`]):
180
+ A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents.
181
+ scheduler ([`SchedulerMixin`]):
182
+ A scheduler to be used in combination with `transformer` to denoise the encoded video latents.
183
+ """
184
+
185
+ _optional_components = []
186
+ model_cpu_offload_seq = "text_encoder->transformer->vae"
187
+
188
+ _callback_tensor_inputs = [
189
+ "latents",
190
+ "prompt_embeds",
191
+ "negative_prompt_embeds",
192
+ ]
193
+
194
+ def __init__(
195
+ self,
196
+ tokenizer: T5Tokenizer,
197
+ text_encoder: T5EncoderModel,
198
+ vae: AutoencoderKLCogVideoX,
199
+ transformer: CogVideoXTransformer3DModel,
200
+ scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler],
201
+ ):
202
+ super().__init__()
203
+
204
+ self.register_modules(
205
+ tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
206
+ )
207
+
208
+ self.vae_scale_factor_spatial = (
209
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
210
+ )
211
+ self.vae_scale_factor_temporal = (
212
+ self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4
213
+ )
214
+ self.vae_scaling_factor_image = (
215
+ self.vae.config.scaling_factor if hasattr(self, "vae") and self.vae is not None else 0.7
216
+ )
217
+
218
+ self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
219
+
220
+ # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline._get_t5_prompt_embeds
221
+ def _get_t5_prompt_embeds(
222
+ self,
223
+ prompt: Union[str, List[str]] = None,
224
+ num_videos_per_prompt: int = 1,
225
+ max_sequence_length: int = 226,
226
+ device: Optional[torch.device] = None,
227
+ dtype: Optional[torch.dtype] = None,
228
+ ):
229
+ device = device or self._execution_device
230
+ dtype = dtype or self.text_encoder.dtype
231
+
232
+ prompt = [prompt] if isinstance(prompt, str) else prompt
233
+ batch_size = len(prompt)
234
+
235
+ text_inputs = self.tokenizer(
236
+ prompt,
237
+ padding="max_length",
238
+ max_length=max_sequence_length,
239
+ truncation=True,
240
+ add_special_tokens=True,
241
+ return_tensors="pt",
242
+ )
243
+ text_input_ids = text_inputs.input_ids
244
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
245
+
246
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
247
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1])
248
+ logger.warning(
249
+ "The following part of your input was truncated because `max_sequence_length` is set to "
250
+ f" {max_sequence_length} tokens: {removed_text}"
251
+ )
252
+
253
+ prompt_embeds = self.text_encoder(text_input_ids.to(device))[0]
254
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
255
+
256
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
257
+ _, seq_len, _ = prompt_embeds.shape
258
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
259
+ prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
260
+
261
+ return prompt_embeds
262
+
263
+ # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.encode_prompt
264
+ def encode_prompt(
265
+ self,
266
+ prompt: Union[str, List[str]],
267
+ negative_prompt: Optional[Union[str, List[str]]] = None,
268
+ do_classifier_free_guidance: bool = True,
269
+ num_videos_per_prompt: int = 1,
270
+ prompt_embeds: Optional[torch.Tensor] = None,
271
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
272
+ max_sequence_length: int = 226,
273
+ device: Optional[torch.device] = None,
274
+ dtype: Optional[torch.dtype] = None,
275
+ ):
276
+ r"""
277
+ Encodes the prompt into text encoder hidden states.
278
+
279
+ Args:
280
+ prompt (`str` or `List[str]`, *optional*):
281
+ prompt to be encoded
282
+ negative_prompt (`str` or `List[str]`, *optional*):
283
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
284
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
285
+ less than `1`).
286
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
287
+ Whether to use classifier free guidance or not.
288
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
289
+ Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
290
+ prompt_embeds (`torch.Tensor`, *optional*):
291
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
292
+ provided, text embeddings will be generated from `prompt` input argument.
293
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
294
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
295
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
296
+ argument.
297
+ device: (`torch.device`, *optional*):
298
+ torch device
299
+ dtype: (`torch.dtype`, *optional*):
300
+ torch dtype
301
+ """
302
+ device = device or self._execution_device
303
+
304
+ prompt = [prompt] if isinstance(prompt, str) else prompt
305
+ if prompt is not None:
306
+ batch_size = len(prompt)
307
+ else:
308
+ batch_size = prompt_embeds.shape[0]
309
+
310
+ if prompt_embeds is None:
311
+ prompt_embeds = self._get_t5_prompt_embeds(
312
+ prompt=prompt,
313
+ num_videos_per_prompt=num_videos_per_prompt,
314
+ max_sequence_length=max_sequence_length,
315
+ device=device,
316
+ dtype=dtype,
317
+ )
318
+
319
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
320
+ negative_prompt = negative_prompt or ""
321
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
322
+
323
+ if prompt is not None and type(prompt) is not type(negative_prompt):
324
+ raise TypeError(
325
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
326
+ f" {type(prompt)}."
327
+ )
328
+ elif batch_size != len(negative_prompt):
329
+ raise ValueError(
330
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
331
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
332
+ " the batch size of `prompt`."
333
+ )
334
+
335
+ negative_prompt_embeds = self._get_t5_prompt_embeds(
336
+ prompt=negative_prompt,
337
+ num_videos_per_prompt=num_videos_per_prompt,
338
+ max_sequence_length=max_sequence_length,
339
+ device=device,
340
+ dtype=dtype,
341
+ )
342
+
343
+ return prompt_embeds, negative_prompt_embeds
344
+
345
+ def prepare_latents(
346
+ self,
347
+ video: Optional[torch.Tensor] = None,
348
+ batch_size: int = 1,
349
+ num_channels_latents: int = 16,
350
+ height: int = 60,
351
+ width: int = 90,
352
+ dtype: Optional[torch.dtype] = None,
353
+ device: Optional[torch.device] = None,
354
+ generator: Optional[torch.Generator] = None,
355
+ latents: Optional[torch.Tensor] = None,
356
+ timestep: Optional[torch.Tensor] = None,
357
+ ):
358
+ if isinstance(generator, list) and len(generator) != batch_size:
359
+ raise ValueError(
360
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
361
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
362
+ )
363
+
364
+ num_frames = (video.size(2) - 1) // self.vae_scale_factor_temporal + 1 if latents is None else latents.size(1)
365
+
366
+ shape = (
367
+ batch_size,
368
+ num_frames,
369
+ num_channels_latents,
370
+ height // self.vae_scale_factor_spatial,
371
+ width // self.vae_scale_factor_spatial,
372
+ )
373
+
374
+ if latents is None:
375
+ if isinstance(generator, list):
376
+ init_latents = [
377
+ retrieve_latents(self.vae.encode(video[i].unsqueeze(0)), generator[i]) for i in range(batch_size)
378
+ ]
379
+ else:
380
+ init_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), generator) for vid in video]
381
+
382
+ init_latents = torch.cat(init_latents, dim=0).to(dtype).permute(0, 2, 1, 3, 4) # [B, F, C, H, W]
383
+ init_latents = self.vae_scaling_factor_image * init_latents
384
+
385
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
386
+ latents = self.scheduler.add_noise(init_latents, noise, timestep)
387
+ else:
388
+ latents = latents.to(device)
389
+
390
+ # scale the initial noise by the standard deviation required by the scheduler
391
+ latents = latents * self.scheduler.init_noise_sigma
392
+ return latents
393
+
394
+ # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.decode_latents
395
+ def decode_latents(self, latents: torch.Tensor) -> torch.Tensor:
396
+ latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width]
397
+ latents = 1 / self.vae_scaling_factor_image * latents
398
+
399
+ frames = self.vae.decode(latents).sample
400
+ return frames
401
+
402
+ # Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.get_timesteps
403
+ def get_timesteps(self, num_inference_steps, timesteps, strength, device):
404
+ # get the original timestep using init_timestep
405
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
406
+
407
+ t_start = max(num_inference_steps - init_timestep, 0)
408
+ timesteps = timesteps[t_start * self.scheduler.order :]
409
+
410
+ return timesteps, num_inference_steps - t_start
411
+
412
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
413
+ def prepare_extra_step_kwargs(self, generator, eta):
414
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
415
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
416
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
417
+ # and should be between [0, 1]
418
+
419
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
420
+ extra_step_kwargs = {}
421
+ if accepts_eta:
422
+ extra_step_kwargs["eta"] = eta
423
+
424
+ # check if the scheduler accepts generator
425
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
426
+ if accepts_generator:
427
+ extra_step_kwargs["generator"] = generator
428
+ return extra_step_kwargs
429
+
430
+ def check_inputs(
431
+ self,
432
+ prompt,
433
+ height,
434
+ width,
435
+ strength,
436
+ negative_prompt,
437
+ callback_on_step_end_tensor_inputs,
438
+ video=None,
439
+ latents=None,
440
+ prompt_embeds=None,
441
+ negative_prompt_embeds=None,
442
+ ):
443
+ if height % 8 != 0 or width % 8 != 0:
444
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
445
+
446
+ if strength < 0 or strength > 1:
447
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
448
+
449
+ if callback_on_step_end_tensor_inputs is not None and not all(
450
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
451
+ ):
452
+ raise ValueError(
453
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
454
+ )
455
+ if prompt is not None and prompt_embeds is not None:
456
+ raise ValueError(
457
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
458
+ " only forward one of the two."
459
+ )
460
+ elif prompt is None and prompt_embeds is None:
461
+ raise ValueError(
462
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
463
+ )
464
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
465
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
466
+
467
+ if prompt is not None and negative_prompt_embeds is not None:
468
+ raise ValueError(
469
+ f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
470
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
471
+ )
472
+
473
+ if negative_prompt is not None and negative_prompt_embeds is not None:
474
+ raise ValueError(
475
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
476
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
477
+ )
478
+
479
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
480
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
481
+ raise ValueError(
482
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
483
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
484
+ f" {negative_prompt_embeds.shape}."
485
+ )
486
+
487
+ if video is not None and latents is not None:
488
+ raise ValueError("Only one of `video` or `latents` should be provided")
489
+
490
+ # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.fuse_qkv_projections
491
+ def fuse_qkv_projections(self) -> None:
492
+ r"""Enables fused QKV projections."""
493
+ self.fusing_transformer = True
494
+ self.transformer.fuse_qkv_projections()
495
+
496
+ # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.unfuse_qkv_projections
497
+ def unfuse_qkv_projections(self) -> None:
498
+ r"""Disable QKV projection fusion if enabled."""
499
+ if not self.fusing_transformer:
500
+ logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.")
501
+ else:
502
+ self.transformer.unfuse_qkv_projections()
503
+ self.fusing_transformer = False
504
+
505
+ # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline._prepare_rotary_positional_embeddings
506
+ def _prepare_rotary_positional_embeddings(
507
+ self,
508
+ height: int,
509
+ width: int,
510
+ num_frames: int,
511
+ device: torch.device,
512
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
513
+ grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
514
+ grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
515
+
516
+ p = self.transformer.config.patch_size
517
+ p_t = self.transformer.config.patch_size_t
518
+
519
+ base_size_width = self.transformer.config.sample_width // p
520
+ base_size_height = self.transformer.config.sample_height // p
521
+
522
+ if p_t is None:
523
+ # CogVideoX 1.0
524
+ grid_crops_coords = get_resize_crop_region_for_grid(
525
+ (grid_height, grid_width), base_size_width, base_size_height
526
+ )
527
+ freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
528
+ embed_dim=self.transformer.config.attention_head_dim,
529
+ crops_coords=grid_crops_coords,
530
+ grid_size=(grid_height, grid_width),
531
+ temporal_size=num_frames,
532
+ device=device,
533
+ )
534
+ else:
535
+ # CogVideoX 1.5
536
+ base_num_frames = (num_frames + p_t - 1) // p_t
537
+
538
+ freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
539
+ embed_dim=self.transformer.config.attention_head_dim,
540
+ crops_coords=None,
541
+ grid_size=(grid_height, grid_width),
542
+ temporal_size=base_num_frames,
543
+ grid_type="slice",
544
+ max_size=(base_size_height, base_size_width),
545
+ device=device,
546
+ )
547
+
548
+ return freqs_cos, freqs_sin
549
+
550
+ @property
551
+ def guidance_scale(self):
552
+ return self._guidance_scale
553
+
554
+ @property
555
+ def num_timesteps(self):
556
+ return self._num_timesteps
557
+
558
+ @property
559
+ def attention_kwargs(self):
560
+ return self._attention_kwargs
561
+
562
+ @property
563
+ def interrupt(self):
564
+ return self._interrupt
565
+
566
+ @torch.no_grad()
567
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
568
+ def __call__(
569
+ self,
570
+ video: List[Image.Image] = None,
571
+ prompt: Optional[Union[str, List[str]]] = None,
572
+ negative_prompt: Optional[Union[str, List[str]]] = None,
573
+ height: Optional[int] = None,
574
+ width: Optional[int] = None,
575
+ num_inference_steps: int = 50,
576
+ timesteps: Optional[List[int]] = None,
577
+ strength: float = 0.8,
578
+ guidance_scale: float = 6,
579
+ use_dynamic_cfg: bool = False,
580
+ num_videos_per_prompt: int = 1,
581
+ eta: float = 0.0,
582
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
583
+ latents: Optional[torch.FloatTensor] = None,
584
+ prompt_embeds: Optional[torch.FloatTensor] = None,
585
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
586
+ output_type: str = "pil",
587
+ return_dict: bool = True,
588
+ attention_kwargs: Optional[Dict[str, Any]] = None,
589
+ callback_on_step_end: Optional[
590
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
591
+ ] = None,
592
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
593
+ max_sequence_length: int = 226,
594
+ ) -> Union[CogVideoXPipelineOutput, Tuple]:
595
+ """
596
+ Function invoked when calling the pipeline for generation.
597
+
598
+ Args:
599
+ video (`List[PIL.Image.Image]`):
600
+ The input video to condition the generation on. Must be a list of images/frames of the video.
601
+ prompt (`str` or `List[str]`, *optional*):
602
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
603
+ instead.
604
+ negative_prompt (`str` or `List[str]`, *optional*):
605
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
606
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
607
+ less than `1`).
608
+ height (`int`, *optional*, defaults to self.transformer.config.sample_height * self.vae_scale_factor_spatial):
609
+ The height in pixels of the generated image. This is set to 480 by default for the best results.
610
+ width (`int`, *optional*, defaults to self.transformer.config.sample_height * self.vae_scale_factor_spatial):
611
+ The width in pixels of the generated image. This is set to 720 by default for the best results.
612
+ num_inference_steps (`int`, *optional*, defaults to 50):
613
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
614
+ expense of slower inference.
615
+ timesteps (`List[int]`, *optional*):
616
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
617
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
618
+ passed will be used. Must be in descending order.
619
+ strength (`float`, *optional*, defaults to 0.8):
620
+ Higher strength leads to more differences between original video and generated video.
621
+ guidance_scale (`float`, *optional*, defaults to 7.0):
622
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
623
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
624
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
625
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
626
+ usually at the expense of lower image quality.
627
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
628
+ The number of videos to generate per prompt.
629
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
630
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
631
+ to make generation deterministic.
632
+ latents (`torch.FloatTensor`, *optional*):
633
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
634
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
635
+ tensor will ge generated by sampling using the supplied random `generator`.
636
+ prompt_embeds (`torch.FloatTensor`, *optional*):
637
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
638
+ provided, text embeddings will be generated from `prompt` input argument.
639
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
640
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
641
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
642
+ argument.
643
+ output_type (`str`, *optional*, defaults to `"pil"`):
644
+ The output format of the generate image. Choose between
645
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
646
+ return_dict (`bool`, *optional*, defaults to `True`):
647
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
648
+ of a plain tuple.
649
+ attention_kwargs (`dict`, *optional*):
650
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
651
+ `self.processor` in
652
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
653
+ callback_on_step_end (`Callable`, *optional*):
654
+ A function that calls at the end of each denoising steps during the inference. The function is called
655
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
656
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
657
+ `callback_on_step_end_tensor_inputs`.
658
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
659
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
660
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
661
+ `._callback_tensor_inputs` attribute of your pipeline class.
662
+ max_sequence_length (`int`, defaults to `226`):
663
+ Maximum sequence length in encoded prompt. Must be consistent with
664
+ `self.transformer.config.max_text_seq_length` otherwise may lead to poor results.
665
+
666
+ Examples:
667
+
668
+ Returns:
669
+ [`~pipelines.cogvideo.pipeline_output.CogVideoXPipelineOutput`] or `tuple`:
670
+ [`~pipelines.cogvideo.pipeline_output.CogVideoXPipelineOutput`] if `return_dict` is True, otherwise a
671
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
672
+ """
673
+
674
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
675
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
676
+
677
+ height = height or self.transformer.config.sample_height * self.vae_scale_factor_spatial
678
+ width = width or self.transformer.config.sample_width * self.vae_scale_factor_spatial
679
+ num_frames = len(video) if latents is None else latents.size(1)
680
+
681
+ num_videos_per_prompt = 1
682
+
683
+ # 1. Check inputs. Raise error if not correct
684
+ self.check_inputs(
685
+ prompt=prompt,
686
+ height=height,
687
+ width=width,
688
+ strength=strength,
689
+ negative_prompt=negative_prompt,
690
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
691
+ video=video,
692
+ latents=latents,
693
+ prompt_embeds=prompt_embeds,
694
+ negative_prompt_embeds=negative_prompt_embeds,
695
+ )
696
+ self._guidance_scale = guidance_scale
697
+ self._attention_kwargs = attention_kwargs
698
+ self._interrupt = False
699
+
700
+ # 2. Default call parameters
701
+ if prompt is not None and isinstance(prompt, str):
702
+ batch_size = 1
703
+ elif prompt is not None and isinstance(prompt, list):
704
+ batch_size = len(prompt)
705
+ else:
706
+ batch_size = prompt_embeds.shape[0]
707
+
708
+ device = self._execution_device
709
+
710
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
711
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
712
+ # corresponds to doing no classifier free guidance.
713
+ do_classifier_free_guidance = guidance_scale > 1.0
714
+
715
+ # 3. Encode input prompt
716
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
717
+ prompt,
718
+ negative_prompt,
719
+ do_classifier_free_guidance,
720
+ num_videos_per_prompt=num_videos_per_prompt,
721
+ prompt_embeds=prompt_embeds,
722
+ negative_prompt_embeds=negative_prompt_embeds,
723
+ max_sequence_length=max_sequence_length,
724
+ device=device,
725
+ )
726
+ if do_classifier_free_guidance:
727
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
728
+
729
+ # 4. Prepare timesteps
730
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
731
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device)
732
+ latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt)
733
+ self._num_timesteps = len(timesteps)
734
+
735
+ # 5. Prepare latents
736
+ latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1
737
+
738
+ # For CogVideoX 1.5, the latent frames should be padded to make it divisible by patch_size_t
739
+ patch_size_t = self.transformer.config.patch_size_t
740
+ if patch_size_t is not None and latent_frames % patch_size_t != 0:
741
+ raise ValueError(
742
+ f"The number of latent frames must be divisible by `{patch_size_t=}` but the given video "
743
+ f"contains {latent_frames=}, which is not divisible."
744
+ )
745
+
746
+ if latents is None:
747
+ video = self.video_processor.preprocess_video(video, height=height, width=width)
748
+ video = video.to(device=device, dtype=prompt_embeds.dtype)
749
+
750
+ latent_channels = self.transformer.config.in_channels
751
+ latents = self.prepare_latents(
752
+ video,
753
+ batch_size * num_videos_per_prompt,
754
+ latent_channels,
755
+ height,
756
+ width,
757
+ prompt_embeds.dtype,
758
+ device,
759
+ generator,
760
+ latents,
761
+ latent_timestep,
762
+ )
763
+
764
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
765
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
766
+
767
+ # 7. Create rotary embeds if required
768
+ image_rotary_emb = (
769
+ self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device)
770
+ if self.transformer.config.use_rotary_positional_embeddings
771
+ else None
772
+ )
773
+
774
+ # 8. Denoising loop
775
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
776
+
777
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
778
+ # for DPM-solver++
779
+ old_pred_original_sample = None
780
+ for i, t in enumerate(timesteps):
781
+ if self.interrupt:
782
+ continue
783
+
784
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
785
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
786
+
787
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
788
+ timestep = t.expand(latent_model_input.shape[0])
789
+
790
+ # predict noise model_output
791
+ noise_pred = self.transformer(
792
+ hidden_states=latent_model_input,
793
+ encoder_hidden_states=prompt_embeds,
794
+ timestep=timestep,
795
+ image_rotary_emb=image_rotary_emb,
796
+ attention_kwargs=attention_kwargs,
797
+ return_dict=False,
798
+ )[0]
799
+ noise_pred = noise_pred.float()
800
+
801
+ # perform guidance
802
+ if use_dynamic_cfg:
803
+ self._guidance_scale = 1 + guidance_scale * (
804
+ (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
805
+ )
806
+ if do_classifier_free_guidance:
807
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
808
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
809
+
810
+ # compute the previous noisy sample x_t -> x_t-1
811
+ if not isinstance(self.scheduler, CogVideoXDPMScheduler):
812
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
813
+ else:
814
+ latents, old_pred_original_sample = self.scheduler.step(
815
+ noise_pred,
816
+ old_pred_original_sample,
817
+ t,
818
+ timesteps[i - 1] if i > 0 else None,
819
+ latents,
820
+ **extra_step_kwargs,
821
+ return_dict=False,
822
+ )
823
+ latents = latents.to(prompt_embeds.dtype)
824
+
825
+ # call the callback, if provided
826
+ if callback_on_step_end is not None:
827
+ callback_kwargs = {}
828
+ for k in callback_on_step_end_tensor_inputs:
829
+ callback_kwargs[k] = locals()[k]
830
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
831
+
832
+ latents = callback_outputs.pop("latents", latents)
833
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
834
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
835
+
836
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
837
+ progress_bar.update()
838
+
839
+ if not output_type == "latent":
840
+ video = self.decode_latents(latents)
841
+ video = self.video_processor.postprocess_video(video=video, output_type=output_type)
842
+ else:
843
+ video = latents
844
+
845
+ # Offload all models
846
+ self.maybe_free_model_hooks()
847
+
848
+ if not return_dict:
849
+ return (video,)
850
+
851
+ return CogVideoXPipelineOutput(frames=video)