diffusers 0.27.0__py3-none-any.whl → 0.32.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- diffusers/__init__.py +233 -6
- diffusers/callbacks.py +209 -0
- diffusers/commands/env.py +102 -6
- diffusers/configuration_utils.py +45 -16
- diffusers/dependency_versions_table.py +4 -3
- diffusers/image_processor.py +434 -110
- diffusers/loaders/__init__.py +42 -9
- diffusers/loaders/ip_adapter.py +626 -36
- diffusers/loaders/lora_base.py +900 -0
- diffusers/loaders/lora_conversion_utils.py +991 -125
- diffusers/loaders/lora_pipeline.py +3812 -0
- diffusers/loaders/peft.py +571 -7
- diffusers/loaders/single_file.py +405 -173
- diffusers/loaders/single_file_model.py +385 -0
- diffusers/loaders/single_file_utils.py +1783 -713
- diffusers/loaders/textual_inversion.py +41 -23
- diffusers/loaders/transformer_flux.py +181 -0
- diffusers/loaders/transformer_sd3.py +89 -0
- diffusers/loaders/unet.py +464 -540
- diffusers/loaders/unet_loader_utils.py +163 -0
- diffusers/models/__init__.py +76 -7
- diffusers/models/activations.py +65 -10
- diffusers/models/adapter.py +53 -53
- diffusers/models/attention.py +605 -18
- diffusers/models/attention_flax.py +1 -1
- diffusers/models/attention_processor.py +4304 -687
- diffusers/models/autoencoders/__init__.py +8 -0
- diffusers/models/autoencoders/autoencoder_asym_kl.py +15 -17
- diffusers/models/autoencoders/autoencoder_dc.py +620 -0
- diffusers/models/autoencoders/autoencoder_kl.py +110 -28
- diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
- diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1482 -0
- diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
- diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
- diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +19 -24
- diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
- diffusers/models/autoencoders/autoencoder_tiny.py +21 -18
- diffusers/models/autoencoders/consistency_decoder_vae.py +45 -20
- diffusers/models/autoencoders/vae.py +41 -29
- diffusers/models/autoencoders/vq_model.py +182 -0
- diffusers/models/controlnet.py +47 -800
- diffusers/models/controlnet_flux.py +70 -0
- diffusers/models/controlnet_sd3.py +68 -0
- diffusers/models/controlnet_sparsectrl.py +116 -0
- diffusers/models/controlnets/__init__.py +23 -0
- diffusers/models/controlnets/controlnet.py +872 -0
- diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +9 -9
- diffusers/models/controlnets/controlnet_flux.py +536 -0
- diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
- diffusers/models/controlnets/controlnet_sd3.py +489 -0
- diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
- diffusers/models/controlnets/controlnet_union.py +832 -0
- diffusers/models/controlnets/controlnet_xs.py +1946 -0
- diffusers/models/controlnets/multicontrolnet.py +183 -0
- diffusers/models/downsampling.py +85 -18
- diffusers/models/embeddings.py +1856 -158
- diffusers/models/embeddings_flax.py +23 -9
- diffusers/models/model_loading_utils.py +480 -0
- diffusers/models/modeling_flax_pytorch_utils.py +2 -1
- diffusers/models/modeling_flax_utils.py +2 -7
- diffusers/models/modeling_outputs.py +14 -0
- diffusers/models/modeling_pytorch_flax_utils.py +1 -1
- diffusers/models/modeling_utils.py +611 -146
- diffusers/models/normalization.py +361 -20
- diffusers/models/resnet.py +18 -23
- diffusers/models/transformers/__init__.py +16 -0
- diffusers/models/transformers/auraflow_transformer_2d.py +544 -0
- diffusers/models/transformers/cogvideox_transformer_3d.py +542 -0
- diffusers/models/transformers/dit_transformer_2d.py +240 -0
- diffusers/models/transformers/dual_transformer_2d.py +9 -8
- diffusers/models/transformers/hunyuan_transformer_2d.py +578 -0
- diffusers/models/transformers/latte_transformer_3d.py +327 -0
- diffusers/models/transformers/lumina_nextdit2d.py +340 -0
- diffusers/models/transformers/pixart_transformer_2d.py +445 -0
- diffusers/models/transformers/prior_transformer.py +13 -13
- diffusers/models/transformers/sana_transformer.py +488 -0
- diffusers/models/transformers/stable_audio_transformer.py +458 -0
- diffusers/models/transformers/t5_film_transformer.py +17 -19
- diffusers/models/transformers/transformer_2d.py +297 -187
- diffusers/models/transformers/transformer_allegro.py +422 -0
- diffusers/models/transformers/transformer_cogview3plus.py +386 -0
- diffusers/models/transformers/transformer_flux.py +593 -0
- diffusers/models/transformers/transformer_hunyuan_video.py +791 -0
- diffusers/models/transformers/transformer_ltx.py +469 -0
- diffusers/models/transformers/transformer_mochi.py +499 -0
- diffusers/models/transformers/transformer_sd3.py +461 -0
- diffusers/models/transformers/transformer_temporal.py +21 -19
- diffusers/models/unets/unet_1d.py +8 -8
- diffusers/models/unets/unet_1d_blocks.py +31 -31
- diffusers/models/unets/unet_2d.py +17 -10
- diffusers/models/unets/unet_2d_blocks.py +225 -149
- diffusers/models/unets/unet_2d_condition.py +50 -53
- diffusers/models/unets/unet_2d_condition_flax.py +6 -5
- diffusers/models/unets/unet_3d_blocks.py +192 -1057
- diffusers/models/unets/unet_3d_condition.py +22 -27
- diffusers/models/unets/unet_i2vgen_xl.py +22 -18
- diffusers/models/unets/unet_kandinsky3.py +2 -2
- diffusers/models/unets/unet_motion_model.py +1413 -89
- diffusers/models/unets/unet_spatio_temporal_condition.py +40 -16
- diffusers/models/unets/unet_stable_cascade.py +19 -18
- diffusers/models/unets/uvit_2d.py +2 -2
- diffusers/models/upsampling.py +95 -26
- diffusers/models/vq_model.py +12 -164
- diffusers/optimization.py +1 -1
- diffusers/pipelines/__init__.py +202 -3
- diffusers/pipelines/allegro/__init__.py +48 -0
- diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
- diffusers/pipelines/allegro/pipeline_output.py +23 -0
- diffusers/pipelines/amused/pipeline_amused.py +12 -12
- diffusers/pipelines/amused/pipeline_amused_img2img.py +14 -12
- diffusers/pipelines/amused/pipeline_amused_inpaint.py +13 -11
- diffusers/pipelines/animatediff/__init__.py +8 -0
- diffusers/pipelines/animatediff/pipeline_animatediff.py +122 -109
- diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1106 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +1288 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1010 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +236 -180
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
- diffusers/pipelines/animatediff/pipeline_output.py +3 -2
- diffusers/pipelines/audioldm/pipeline_audioldm.py +14 -14
- diffusers/pipelines/audioldm2/modeling_audioldm2.py +58 -39
- diffusers/pipelines/audioldm2/pipeline_audioldm2.py +121 -36
- diffusers/pipelines/aura_flow/__init__.py +48 -0
- diffusers/pipelines/aura_flow/pipeline_aura_flow.py +584 -0
- diffusers/pipelines/auto_pipeline.py +196 -28
- diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
- diffusers/pipelines/blip_diffusion/modeling_blip2.py +6 -6
- diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +1 -1
- diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
- diffusers/pipelines/cogvideo/__init__.py +54 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox.py +772 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +885 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +851 -0
- diffusers/pipelines/cogvideo/pipeline_output.py +20 -0
- diffusers/pipelines/cogview3/__init__.py +47 -0
- diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
- diffusers/pipelines/cogview3/pipeline_output.py +21 -0
- diffusers/pipelines/consistency_models/pipeline_consistency_models.py +6 -6
- diffusers/pipelines/controlnet/__init__.py +86 -80
- diffusers/pipelines/controlnet/multicontrolnet.py +7 -182
- diffusers/pipelines/controlnet/pipeline_controlnet.py +134 -87
- diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +93 -77
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +88 -197
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +136 -90
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +176 -80
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +125 -89
- diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
- diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
- diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
- diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1060 -0
- diffusers/pipelines/controlnet_sd3/__init__.py +57 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1133 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
- diffusers/pipelines/controlnet_xs/__init__.py +68 -0
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
- diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
- diffusers/pipelines/deepfloyd_if/pipeline_if.py +16 -30
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +20 -35
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +23 -41
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +22 -38
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +25 -41
- diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +19 -34
- diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
- diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
- diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +11 -11
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +70 -30
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +48 -25
- diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +2 -2
- diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +7 -7
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +21 -20
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +27 -29
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +33 -27
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +33 -23
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +36 -30
- diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +102 -69
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +13 -13
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +10 -5
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +11 -6
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +10 -5
- diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +5 -5
- diffusers/pipelines/dit/pipeline_dit.py +7 -4
- diffusers/pipelines/flux/__init__.py +69 -0
- diffusers/pipelines/flux/modeling_flux.py +47 -0
- diffusers/pipelines/flux/pipeline_flux.py +957 -0
- diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
- diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
- diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
- diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
- diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
- diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
- diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
- diffusers/pipelines/flux/pipeline_output.py +37 -0
- diffusers/pipelines/free_init_utils.py +41 -38
- diffusers/pipelines/free_noise_utils.py +596 -0
- diffusers/pipelines/hunyuan_video/__init__.py +48 -0
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
- diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
- diffusers/pipelines/hunyuandit/__init__.py +48 -0
- diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +916 -0
- diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +33 -48
- diffusers/pipelines/kandinsky/pipeline_kandinsky.py +8 -8
- diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +32 -29
- diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +11 -11
- diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +12 -12
- diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +34 -31
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +6 -6
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +8 -8
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +7 -7
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +6 -6
- diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +3 -3
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +22 -35
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +26 -37
- diffusers/pipelines/kolors/__init__.py +54 -0
- diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
- diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1250 -0
- diffusers/pipelines/kolors/pipeline_output.py +21 -0
- diffusers/pipelines/kolors/text_encoder.py +889 -0
- diffusers/pipelines/kolors/tokenizer.py +338 -0
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +82 -62
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +77 -60
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +12 -12
- diffusers/pipelines/latte/__init__.py +48 -0
- diffusers/pipelines/latte/pipeline_latte.py +881 -0
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +80 -74
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +85 -76
- diffusers/pipelines/ledits_pp/pipeline_output.py +2 -2
- diffusers/pipelines/ltx/__init__.py +50 -0
- diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
- diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
- diffusers/pipelines/ltx/pipeline_output.py +20 -0
- diffusers/pipelines/lumina/__init__.py +48 -0
- diffusers/pipelines/lumina/pipeline_lumina.py +890 -0
- diffusers/pipelines/marigold/__init__.py +50 -0
- diffusers/pipelines/marigold/marigold_image_processing.py +576 -0
- diffusers/pipelines/marigold/pipeline_marigold_depth.py +813 -0
- diffusers/pipelines/marigold/pipeline_marigold_normals.py +690 -0
- diffusers/pipelines/mochi/__init__.py +48 -0
- diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
- diffusers/pipelines/mochi/pipeline_output.py +20 -0
- diffusers/pipelines/musicldm/pipeline_musicldm.py +14 -14
- diffusers/pipelines/pag/__init__.py +80 -0
- diffusers/pipelines/pag/pag_utils.py +243 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1328 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1610 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
- diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +969 -0
- diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
- diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +865 -0
- diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
- diffusers/pipelines/pag/pipeline_pag_sd.py +1062 -0
- diffusers/pipelines/pag/pipeline_pag_sd_3.py +994 -0
- diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
- diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +866 -0
- diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
- diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1345 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1544 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1776 -0
- diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +17 -12
- diffusers/pipelines/pia/pipeline_pia.py +74 -164
- diffusers/pipelines/pipeline_flax_utils.py +5 -10
- diffusers/pipelines/pipeline_loading_utils.py +515 -53
- diffusers/pipelines/pipeline_utils.py +411 -222
- diffusers/pipelines/pixart_alpha/__init__.py +8 -1
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +76 -93
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +873 -0
- diffusers/pipelines/sana/__init__.py +47 -0
- diffusers/pipelines/sana/pipeline_output.py +21 -0
- diffusers/pipelines/sana/pipeline_sana.py +884 -0
- diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +27 -23
- diffusers/pipelines/shap_e/pipeline_shap_e.py +3 -3
- diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +14 -14
- diffusers/pipelines/shap_e/renderer.py +1 -1
- diffusers/pipelines/stable_audio/__init__.py +50 -0
- diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
- diffusers/pipelines/stable_audio/pipeline_stable_audio.py +756 -0
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +71 -25
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +23 -19
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +35 -34
- diffusers/pipelines/stable_diffusion/__init__.py +0 -1
- diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +20 -11
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +6 -6
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +145 -79
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +43 -28
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +13 -8
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +100 -68
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +109 -201
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +131 -32
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +247 -87
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +30 -29
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +35 -27
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +49 -42
- diffusers/pipelines/stable_diffusion/safety_checker.py +2 -1
- diffusers/pipelines/stable_diffusion_3/__init__.py +54 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +1140 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +1036 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1250 -0
- diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +29 -20
- diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +59 -58
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +31 -25
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +38 -22
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -24
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +24 -23
- diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +107 -67
- diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +316 -69
- diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +10 -5
- diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
- diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +98 -30
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +121 -83
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +161 -105
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +142 -218
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +45 -29
- diffusers/pipelines/stable_diffusion_xl/watermark.py +9 -3
- diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +110 -57
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +69 -39
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +105 -74
- diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +3 -2
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +29 -49
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +32 -93
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +37 -25
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +54 -40
- diffusers/pipelines/unclip/pipeline_unclip.py +6 -6
- diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +6 -6
- diffusers/pipelines/unidiffuser/modeling_text_decoder.py +1 -1
- diffusers/pipelines/unidiffuser/modeling_uvit.py +12 -12
- diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +29 -28
- diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +5 -5
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +5 -10
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +6 -8
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +4 -4
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +12 -12
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +15 -14
- diffusers/{models/dual_transformer_2d.py → quantizers/__init__.py} +2 -6
- diffusers/quantizers/auto.py +139 -0
- diffusers/quantizers/base.py +233 -0
- diffusers/quantizers/bitsandbytes/__init__.py +2 -0
- diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
- diffusers/quantizers/bitsandbytes/utils.py +306 -0
- diffusers/quantizers/gguf/__init__.py +1 -0
- diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
- diffusers/quantizers/gguf/utils.py +456 -0
- diffusers/quantizers/quantization_config.py +669 -0
- diffusers/quantizers/torchao/__init__.py +15 -0
- diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
- diffusers/schedulers/__init__.py +12 -2
- diffusers/schedulers/deprecated/__init__.py +1 -1
- diffusers/schedulers/deprecated/scheduling_karras_ve.py +25 -25
- diffusers/schedulers/scheduling_amused.py +5 -5
- diffusers/schedulers/scheduling_consistency_decoder.py +11 -11
- diffusers/schedulers/scheduling_consistency_models.py +23 -25
- diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
- diffusers/schedulers/scheduling_ddim.py +27 -26
- diffusers/schedulers/scheduling_ddim_cogvideox.py +452 -0
- diffusers/schedulers/scheduling_ddim_flax.py +2 -1
- diffusers/schedulers/scheduling_ddim_inverse.py +16 -16
- diffusers/schedulers/scheduling_ddim_parallel.py +32 -31
- diffusers/schedulers/scheduling_ddpm.py +27 -30
- diffusers/schedulers/scheduling_ddpm_flax.py +7 -3
- diffusers/schedulers/scheduling_ddpm_parallel.py +33 -36
- diffusers/schedulers/scheduling_ddpm_wuerstchen.py +14 -14
- diffusers/schedulers/scheduling_deis_multistep.py +150 -50
- diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +221 -84
- diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +2 -2
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +158 -52
- diffusers/schedulers/scheduling_dpmsolver_sde.py +153 -34
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +275 -86
- diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +81 -57
- diffusers/schedulers/scheduling_edm_euler.py +62 -39
- diffusers/schedulers/scheduling_euler_ancestral_discrete.py +30 -29
- diffusers/schedulers/scheduling_euler_discrete.py +255 -74
- diffusers/schedulers/scheduling_flow_match_euler_discrete.py +458 -0
- diffusers/schedulers/scheduling_flow_match_heun_discrete.py +320 -0
- diffusers/schedulers/scheduling_heun_discrete.py +174 -46
- diffusers/schedulers/scheduling_ipndm.py +9 -9
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +138 -29
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +132 -26
- diffusers/schedulers/scheduling_karras_ve_flax.py +6 -6
- diffusers/schedulers/scheduling_lcm.py +23 -29
- diffusers/schedulers/scheduling_lms_discrete.py +105 -28
- diffusers/schedulers/scheduling_pndm.py +20 -20
- diffusers/schedulers/scheduling_repaint.py +21 -21
- diffusers/schedulers/scheduling_sasolver.py +157 -60
- diffusers/schedulers/scheduling_sde_ve.py +19 -19
- diffusers/schedulers/scheduling_tcd.py +41 -36
- diffusers/schedulers/scheduling_unclip.py +19 -16
- diffusers/schedulers/scheduling_unipc_multistep.py +243 -47
- diffusers/schedulers/scheduling_utils.py +12 -5
- diffusers/schedulers/scheduling_utils_flax.py +1 -3
- diffusers/schedulers/scheduling_vq_diffusion.py +10 -10
- diffusers/training_utils.py +214 -30
- diffusers/utils/__init__.py +17 -1
- diffusers/utils/constants.py +3 -0
- diffusers/utils/doc_utils.py +1 -0
- diffusers/utils/dummy_pt_objects.py +592 -7
- diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
- diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +1001 -71
- diffusers/utils/dynamic_modules_utils.py +34 -29
- diffusers/utils/export_utils.py +50 -6
- diffusers/utils/hub_utils.py +131 -17
- diffusers/utils/import_utils.py +210 -8
- diffusers/utils/loading_utils.py +118 -5
- diffusers/utils/logging.py +4 -2
- diffusers/utils/peft_utils.py +37 -7
- diffusers/utils/state_dict_utils.py +13 -2
- diffusers/utils/testing_utils.py +193 -11
- diffusers/utils/torch_utils.py +4 -0
- diffusers/video_processor.py +113 -0
- {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/METADATA +82 -91
- diffusers-0.32.2.dist-info/RECORD +550 -0
- {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/WHEEL +1 -1
- diffusers/loaders/autoencoder.py +0 -146
- diffusers/loaders/controlnet.py +0 -136
- diffusers/loaders/lora.py +0 -1349
- diffusers/models/prior_transformer.py +0 -12
- diffusers/models/t5_film_transformer.py +0 -70
- diffusers/models/transformer_2d.py +0 -25
- diffusers/models/transformer_temporal.py +0 -34
- diffusers/models/unet_1d.py +0 -26
- diffusers/models/unet_1d_blocks.py +0 -203
- diffusers/models/unet_2d.py +0 -27
- diffusers/models/unet_2d_blocks.py +0 -375
- diffusers/models/unet_2d_condition.py +0 -25
- diffusers-0.27.0.dist-info/RECORD +0 -399
- {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/LICENSE +0 -0
- {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/entry_points.txt +0 -0
- {diffusers-0.27.0.dist-info → diffusers-0.32.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,885 @@
|
|
1
|
+
# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
|
2
|
+
# All rights reserved.
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
|
16
|
+
import inspect
|
17
|
+
import math
|
18
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
19
|
+
|
20
|
+
import PIL
|
21
|
+
import torch
|
22
|
+
from transformers import T5EncoderModel, T5Tokenizer
|
23
|
+
|
24
|
+
from ...callbacks import MultiPipelineCallbacks, PipelineCallback
|
25
|
+
from ...image_processor import PipelineImageInput
|
26
|
+
from ...loaders import CogVideoXLoraLoaderMixin
|
27
|
+
from ...models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel
|
28
|
+
from ...models.embeddings import get_3d_rotary_pos_embed
|
29
|
+
from ...pipelines.pipeline_utils import DiffusionPipeline
|
30
|
+
from ...schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler
|
31
|
+
from ...utils import (
|
32
|
+
logging,
|
33
|
+
replace_example_docstring,
|
34
|
+
)
|
35
|
+
from ...utils.torch_utils import randn_tensor
|
36
|
+
from ...video_processor import VideoProcessor
|
37
|
+
from .pipeline_output import CogVideoXPipelineOutput
|
38
|
+
|
39
|
+
|
40
|
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
41
|
+
|
42
|
+
|
43
|
+
EXAMPLE_DOC_STRING = """
|
44
|
+
Examples:
|
45
|
+
```py
|
46
|
+
>>> import torch
|
47
|
+
>>> from diffusers import CogVideoXImageToVideoPipeline
|
48
|
+
>>> from diffusers.utils import export_to_video, load_image
|
49
|
+
|
50
|
+
>>> pipe = CogVideoXImageToVideoPipeline.from_pretrained("THUDM/CogVideoX-5b-I2V", torch_dtype=torch.bfloat16)
|
51
|
+
>>> pipe.to("cuda")
|
52
|
+
|
53
|
+
>>> prompt = "An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in the background. High quality, ultrarealistic detail and breath-taking movie-like camera shot."
|
54
|
+
>>> image = load_image(
|
55
|
+
... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg"
|
56
|
+
... )
|
57
|
+
>>> video = pipe(image, prompt, use_dynamic_cfg=True)
|
58
|
+
>>> export_to_video(video.frames[0], "output.mp4", fps=8)
|
59
|
+
```
|
60
|
+
"""
|
61
|
+
|
62
|
+
|
63
|
+
# Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid
|
64
|
+
def get_resize_crop_region_for_grid(src, tgt_width, tgt_height):
|
65
|
+
tw = tgt_width
|
66
|
+
th = tgt_height
|
67
|
+
h, w = src
|
68
|
+
r = h / w
|
69
|
+
if r > (th / tw):
|
70
|
+
resize_height = th
|
71
|
+
resize_width = int(round(th / h * w))
|
72
|
+
else:
|
73
|
+
resize_width = tw
|
74
|
+
resize_height = int(round(tw / w * h))
|
75
|
+
|
76
|
+
crop_top = int(round((th - resize_height) / 2.0))
|
77
|
+
crop_left = int(round((tw - resize_width) / 2.0))
|
78
|
+
|
79
|
+
return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)
|
80
|
+
|
81
|
+
|
82
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
83
|
+
def retrieve_timesteps(
|
84
|
+
scheduler,
|
85
|
+
num_inference_steps: Optional[int] = None,
|
86
|
+
device: Optional[Union[str, torch.device]] = None,
|
87
|
+
timesteps: Optional[List[int]] = None,
|
88
|
+
sigmas: Optional[List[float]] = None,
|
89
|
+
**kwargs,
|
90
|
+
):
|
91
|
+
r"""
|
92
|
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
93
|
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
94
|
+
|
95
|
+
Args:
|
96
|
+
scheduler (`SchedulerMixin`):
|
97
|
+
The scheduler to get timesteps from.
|
98
|
+
num_inference_steps (`int`):
|
99
|
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
100
|
+
must be `None`.
|
101
|
+
device (`str` or `torch.device`, *optional*):
|
102
|
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
103
|
+
timesteps (`List[int]`, *optional*):
|
104
|
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
105
|
+
`num_inference_steps` and `sigmas` must be `None`.
|
106
|
+
sigmas (`List[float]`, *optional*):
|
107
|
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
108
|
+
`num_inference_steps` and `timesteps` must be `None`.
|
109
|
+
|
110
|
+
Returns:
|
111
|
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
112
|
+
second element is the number of inference steps.
|
113
|
+
"""
|
114
|
+
if timesteps is not None and sigmas is not None:
|
115
|
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
116
|
+
if timesteps is not None:
|
117
|
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
118
|
+
if not accepts_timesteps:
|
119
|
+
raise ValueError(
|
120
|
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
121
|
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
122
|
+
)
|
123
|
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
124
|
+
timesteps = scheduler.timesteps
|
125
|
+
num_inference_steps = len(timesteps)
|
126
|
+
elif sigmas is not None:
|
127
|
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
128
|
+
if not accept_sigmas:
|
129
|
+
raise ValueError(
|
130
|
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
131
|
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
132
|
+
)
|
133
|
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
134
|
+
timesteps = scheduler.timesteps
|
135
|
+
num_inference_steps = len(timesteps)
|
136
|
+
else:
|
137
|
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
138
|
+
timesteps = scheduler.timesteps
|
139
|
+
return timesteps, num_inference_steps
|
140
|
+
|
141
|
+
|
142
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
143
|
+
def retrieve_latents(
|
144
|
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
145
|
+
):
|
146
|
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
147
|
+
return encoder_output.latent_dist.sample(generator)
|
148
|
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
149
|
+
return encoder_output.latent_dist.mode()
|
150
|
+
elif hasattr(encoder_output, "latents"):
|
151
|
+
return encoder_output.latents
|
152
|
+
else:
|
153
|
+
raise AttributeError("Could not access latents of provided encoder_output")
|
154
|
+
|
155
|
+
|
156
|
+
class CogVideoXImageToVideoPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin):
|
157
|
+
r"""
|
158
|
+
Pipeline for image-to-video generation using CogVideoX.
|
159
|
+
|
160
|
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
161
|
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
162
|
+
|
163
|
+
Args:
|
164
|
+
vae ([`AutoencoderKL`]):
|
165
|
+
Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
|
166
|
+
text_encoder ([`T5EncoderModel`]):
|
167
|
+
Frozen text-encoder. CogVideoX uses
|
168
|
+
[T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the
|
169
|
+
[t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant.
|
170
|
+
tokenizer (`T5Tokenizer`):
|
171
|
+
Tokenizer of class
|
172
|
+
[T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
|
173
|
+
transformer ([`CogVideoXTransformer3DModel`]):
|
174
|
+
A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents.
|
175
|
+
scheduler ([`SchedulerMixin`]):
|
176
|
+
A scheduler to be used in combination with `transformer` to denoise the encoded video latents.
|
177
|
+
"""
|
178
|
+
|
179
|
+
_optional_components = []
|
180
|
+
model_cpu_offload_seq = "text_encoder->transformer->vae"
|
181
|
+
|
182
|
+
_callback_tensor_inputs = [
|
183
|
+
"latents",
|
184
|
+
"prompt_embeds",
|
185
|
+
"negative_prompt_embeds",
|
186
|
+
]
|
187
|
+
|
188
|
+
def __init__(
|
189
|
+
self,
|
190
|
+
tokenizer: T5Tokenizer,
|
191
|
+
text_encoder: T5EncoderModel,
|
192
|
+
vae: AutoencoderKLCogVideoX,
|
193
|
+
transformer: CogVideoXTransformer3DModel,
|
194
|
+
scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler],
|
195
|
+
):
|
196
|
+
super().__init__()
|
197
|
+
|
198
|
+
self.register_modules(
|
199
|
+
tokenizer=tokenizer,
|
200
|
+
text_encoder=text_encoder,
|
201
|
+
vae=vae,
|
202
|
+
transformer=transformer,
|
203
|
+
scheduler=scheduler,
|
204
|
+
)
|
205
|
+
self.vae_scale_factor_spatial = (
|
206
|
+
2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
|
207
|
+
)
|
208
|
+
self.vae_scale_factor_temporal = (
|
209
|
+
self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4
|
210
|
+
)
|
211
|
+
self.vae_scaling_factor_image = (
|
212
|
+
self.vae.config.scaling_factor if hasattr(self, "vae") and self.vae is not None else 0.7
|
213
|
+
)
|
214
|
+
|
215
|
+
self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
|
216
|
+
|
217
|
+
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline._get_t5_prompt_embeds
|
218
|
+
def _get_t5_prompt_embeds(
|
219
|
+
self,
|
220
|
+
prompt: Union[str, List[str]] = None,
|
221
|
+
num_videos_per_prompt: int = 1,
|
222
|
+
max_sequence_length: int = 226,
|
223
|
+
device: Optional[torch.device] = None,
|
224
|
+
dtype: Optional[torch.dtype] = None,
|
225
|
+
):
|
226
|
+
device = device or self._execution_device
|
227
|
+
dtype = dtype or self.text_encoder.dtype
|
228
|
+
|
229
|
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
230
|
+
batch_size = len(prompt)
|
231
|
+
|
232
|
+
text_inputs = self.tokenizer(
|
233
|
+
prompt,
|
234
|
+
padding="max_length",
|
235
|
+
max_length=max_sequence_length,
|
236
|
+
truncation=True,
|
237
|
+
add_special_tokens=True,
|
238
|
+
return_tensors="pt",
|
239
|
+
)
|
240
|
+
text_input_ids = text_inputs.input_ids
|
241
|
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
242
|
+
|
243
|
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
244
|
+
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1])
|
245
|
+
logger.warning(
|
246
|
+
"The following part of your input was truncated because `max_sequence_length` is set to "
|
247
|
+
f" {max_sequence_length} tokens: {removed_text}"
|
248
|
+
)
|
249
|
+
|
250
|
+
prompt_embeds = self.text_encoder(text_input_ids.to(device))[0]
|
251
|
+
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
|
252
|
+
|
253
|
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
254
|
+
_, seq_len, _ = prompt_embeds.shape
|
255
|
+
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
|
256
|
+
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
|
257
|
+
|
258
|
+
return prompt_embeds
|
259
|
+
|
260
|
+
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.encode_prompt
|
261
|
+
def encode_prompt(
|
262
|
+
self,
|
263
|
+
prompt: Union[str, List[str]],
|
264
|
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
265
|
+
do_classifier_free_guidance: bool = True,
|
266
|
+
num_videos_per_prompt: int = 1,
|
267
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
268
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
269
|
+
max_sequence_length: int = 226,
|
270
|
+
device: Optional[torch.device] = None,
|
271
|
+
dtype: Optional[torch.dtype] = None,
|
272
|
+
):
|
273
|
+
r"""
|
274
|
+
Encodes the prompt into text encoder hidden states.
|
275
|
+
|
276
|
+
Args:
|
277
|
+
prompt (`str` or `List[str]`, *optional*):
|
278
|
+
prompt to be encoded
|
279
|
+
negative_prompt (`str` or `List[str]`, *optional*):
|
280
|
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
281
|
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
282
|
+
less than `1`).
|
283
|
+
do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
|
284
|
+
Whether to use classifier free guidance or not.
|
285
|
+
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
286
|
+
Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
|
287
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
288
|
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
289
|
+
provided, text embeddings will be generated from `prompt` input argument.
|
290
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
291
|
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
292
|
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
293
|
+
argument.
|
294
|
+
device: (`torch.device`, *optional*):
|
295
|
+
torch device
|
296
|
+
dtype: (`torch.dtype`, *optional*):
|
297
|
+
torch dtype
|
298
|
+
"""
|
299
|
+
device = device or self._execution_device
|
300
|
+
|
301
|
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
302
|
+
if prompt is not None:
|
303
|
+
batch_size = len(prompt)
|
304
|
+
else:
|
305
|
+
batch_size = prompt_embeds.shape[0]
|
306
|
+
|
307
|
+
if prompt_embeds is None:
|
308
|
+
prompt_embeds = self._get_t5_prompt_embeds(
|
309
|
+
prompt=prompt,
|
310
|
+
num_videos_per_prompt=num_videos_per_prompt,
|
311
|
+
max_sequence_length=max_sequence_length,
|
312
|
+
device=device,
|
313
|
+
dtype=dtype,
|
314
|
+
)
|
315
|
+
|
316
|
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
317
|
+
negative_prompt = negative_prompt or ""
|
318
|
+
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
319
|
+
|
320
|
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
321
|
+
raise TypeError(
|
322
|
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
323
|
+
f" {type(prompt)}."
|
324
|
+
)
|
325
|
+
elif batch_size != len(negative_prompt):
|
326
|
+
raise ValueError(
|
327
|
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
328
|
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
329
|
+
" the batch size of `prompt`."
|
330
|
+
)
|
331
|
+
|
332
|
+
negative_prompt_embeds = self._get_t5_prompt_embeds(
|
333
|
+
prompt=negative_prompt,
|
334
|
+
num_videos_per_prompt=num_videos_per_prompt,
|
335
|
+
max_sequence_length=max_sequence_length,
|
336
|
+
device=device,
|
337
|
+
dtype=dtype,
|
338
|
+
)
|
339
|
+
|
340
|
+
return prompt_embeds, negative_prompt_embeds
|
341
|
+
|
342
|
+
def prepare_latents(
|
343
|
+
self,
|
344
|
+
image: torch.Tensor,
|
345
|
+
batch_size: int = 1,
|
346
|
+
num_channels_latents: int = 16,
|
347
|
+
num_frames: int = 13,
|
348
|
+
height: int = 60,
|
349
|
+
width: int = 90,
|
350
|
+
dtype: Optional[torch.dtype] = None,
|
351
|
+
device: Optional[torch.device] = None,
|
352
|
+
generator: Optional[torch.Generator] = None,
|
353
|
+
latents: Optional[torch.Tensor] = None,
|
354
|
+
):
|
355
|
+
if isinstance(generator, list) and len(generator) != batch_size:
|
356
|
+
raise ValueError(
|
357
|
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
358
|
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
359
|
+
)
|
360
|
+
|
361
|
+
num_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1
|
362
|
+
shape = (
|
363
|
+
batch_size,
|
364
|
+
num_frames,
|
365
|
+
num_channels_latents,
|
366
|
+
height // self.vae_scale_factor_spatial,
|
367
|
+
width // self.vae_scale_factor_spatial,
|
368
|
+
)
|
369
|
+
|
370
|
+
# For CogVideoX1.5, the latent should add 1 for padding (Not use)
|
371
|
+
if self.transformer.config.patch_size_t is not None:
|
372
|
+
shape = shape[:1] + (shape[1] + shape[1] % self.transformer.config.patch_size_t,) + shape[2:]
|
373
|
+
|
374
|
+
image = image.unsqueeze(2) # [B, C, F, H, W]
|
375
|
+
|
376
|
+
if isinstance(generator, list):
|
377
|
+
image_latents = [
|
378
|
+
retrieve_latents(self.vae.encode(image[i].unsqueeze(0)), generator[i]) for i in range(batch_size)
|
379
|
+
]
|
380
|
+
else:
|
381
|
+
image_latents = [retrieve_latents(self.vae.encode(img.unsqueeze(0)), generator) for img in image]
|
382
|
+
|
383
|
+
image_latents = torch.cat(image_latents, dim=0).to(dtype).permute(0, 2, 1, 3, 4) # [B, F, C, H, W]
|
384
|
+
|
385
|
+
if not self.vae.config.invert_scale_latents:
|
386
|
+
image_latents = self.vae_scaling_factor_image * image_latents
|
387
|
+
else:
|
388
|
+
# This is awkward but required because the CogVideoX team forgot to multiply the
|
389
|
+
# scaling factor during training :)
|
390
|
+
image_latents = 1 / self.vae_scaling_factor_image * image_latents
|
391
|
+
|
392
|
+
padding_shape = (
|
393
|
+
batch_size,
|
394
|
+
num_frames - 1,
|
395
|
+
num_channels_latents,
|
396
|
+
height // self.vae_scale_factor_spatial,
|
397
|
+
width // self.vae_scale_factor_spatial,
|
398
|
+
)
|
399
|
+
|
400
|
+
latent_padding = torch.zeros(padding_shape, device=device, dtype=dtype)
|
401
|
+
image_latents = torch.cat([image_latents, latent_padding], dim=1)
|
402
|
+
|
403
|
+
# Select the first frame along the second dimension
|
404
|
+
if self.transformer.config.patch_size_t is not None:
|
405
|
+
first_frame = image_latents[:, : image_latents.size(1) % self.transformer.config.patch_size_t, ...]
|
406
|
+
image_latents = torch.cat([first_frame, image_latents], dim=1)
|
407
|
+
|
408
|
+
if latents is None:
|
409
|
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
410
|
+
else:
|
411
|
+
latents = latents.to(device)
|
412
|
+
|
413
|
+
# scale the initial noise by the standard deviation required by the scheduler
|
414
|
+
latents = latents * self.scheduler.init_noise_sigma
|
415
|
+
return latents, image_latents
|
416
|
+
|
417
|
+
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.decode_latents
|
418
|
+
def decode_latents(self, latents: torch.Tensor) -> torch.Tensor:
|
419
|
+
latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width]
|
420
|
+
latents = 1 / self.vae_scaling_factor_image * latents
|
421
|
+
|
422
|
+
frames = self.vae.decode(latents).sample
|
423
|
+
return frames
|
424
|
+
|
425
|
+
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.get_timesteps
|
426
|
+
def get_timesteps(self, num_inference_steps, timesteps, strength, device):
|
427
|
+
# get the original timestep using init_timestep
|
428
|
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
429
|
+
|
430
|
+
t_start = max(num_inference_steps - init_timestep, 0)
|
431
|
+
timesteps = timesteps[t_start * self.scheduler.order :]
|
432
|
+
|
433
|
+
return timesteps, num_inference_steps - t_start
|
434
|
+
|
435
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
436
|
+
def prepare_extra_step_kwargs(self, generator, eta):
|
437
|
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
438
|
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
439
|
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
440
|
+
# and should be between [0, 1]
|
441
|
+
|
442
|
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
443
|
+
extra_step_kwargs = {}
|
444
|
+
if accepts_eta:
|
445
|
+
extra_step_kwargs["eta"] = eta
|
446
|
+
|
447
|
+
# check if the scheduler accepts generator
|
448
|
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
449
|
+
if accepts_generator:
|
450
|
+
extra_step_kwargs["generator"] = generator
|
451
|
+
return extra_step_kwargs
|
452
|
+
|
453
|
+
def check_inputs(
|
454
|
+
self,
|
455
|
+
image,
|
456
|
+
prompt,
|
457
|
+
height,
|
458
|
+
width,
|
459
|
+
negative_prompt,
|
460
|
+
callback_on_step_end_tensor_inputs,
|
461
|
+
latents=None,
|
462
|
+
prompt_embeds=None,
|
463
|
+
negative_prompt_embeds=None,
|
464
|
+
):
|
465
|
+
if (
|
466
|
+
not isinstance(image, torch.Tensor)
|
467
|
+
and not isinstance(image, PIL.Image.Image)
|
468
|
+
and not isinstance(image, list)
|
469
|
+
):
|
470
|
+
raise ValueError(
|
471
|
+
"`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
|
472
|
+
f" {type(image)}"
|
473
|
+
)
|
474
|
+
|
475
|
+
if height % 8 != 0 or width % 8 != 0:
|
476
|
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
477
|
+
|
478
|
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
479
|
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
480
|
+
):
|
481
|
+
raise ValueError(
|
482
|
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
483
|
+
)
|
484
|
+
if prompt is not None and prompt_embeds is not None:
|
485
|
+
raise ValueError(
|
486
|
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
487
|
+
" only forward one of the two."
|
488
|
+
)
|
489
|
+
elif prompt is None and prompt_embeds is None:
|
490
|
+
raise ValueError(
|
491
|
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
492
|
+
)
|
493
|
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
494
|
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
495
|
+
|
496
|
+
if prompt is not None and negative_prompt_embeds is not None:
|
497
|
+
raise ValueError(
|
498
|
+
f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
|
499
|
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
500
|
+
)
|
501
|
+
|
502
|
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
503
|
+
raise ValueError(
|
504
|
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
505
|
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
506
|
+
)
|
507
|
+
|
508
|
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
509
|
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
510
|
+
raise ValueError(
|
511
|
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
512
|
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
513
|
+
f" {negative_prompt_embeds.shape}."
|
514
|
+
)
|
515
|
+
|
516
|
+
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.fuse_qkv_projections
|
517
|
+
def fuse_qkv_projections(self) -> None:
|
518
|
+
r"""Enables fused QKV projections."""
|
519
|
+
self.fusing_transformer = True
|
520
|
+
self.transformer.fuse_qkv_projections()
|
521
|
+
|
522
|
+
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.unfuse_qkv_projections
|
523
|
+
def unfuse_qkv_projections(self) -> None:
|
524
|
+
r"""Disable QKV projection fusion if enabled."""
|
525
|
+
if not self.fusing_transformer:
|
526
|
+
logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.")
|
527
|
+
else:
|
528
|
+
self.transformer.unfuse_qkv_projections()
|
529
|
+
self.fusing_transformer = False
|
530
|
+
|
531
|
+
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline._prepare_rotary_positional_embeddings
|
532
|
+
def _prepare_rotary_positional_embeddings(
|
533
|
+
self,
|
534
|
+
height: int,
|
535
|
+
width: int,
|
536
|
+
num_frames: int,
|
537
|
+
device: torch.device,
|
538
|
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
539
|
+
grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
|
540
|
+
grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
|
541
|
+
|
542
|
+
p = self.transformer.config.patch_size
|
543
|
+
p_t = self.transformer.config.patch_size_t
|
544
|
+
|
545
|
+
base_size_width = self.transformer.config.sample_width // p
|
546
|
+
base_size_height = self.transformer.config.sample_height // p
|
547
|
+
|
548
|
+
if p_t is None:
|
549
|
+
# CogVideoX 1.0
|
550
|
+
grid_crops_coords = get_resize_crop_region_for_grid(
|
551
|
+
(grid_height, grid_width), base_size_width, base_size_height
|
552
|
+
)
|
553
|
+
freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
|
554
|
+
embed_dim=self.transformer.config.attention_head_dim,
|
555
|
+
crops_coords=grid_crops_coords,
|
556
|
+
grid_size=(grid_height, grid_width),
|
557
|
+
temporal_size=num_frames,
|
558
|
+
device=device,
|
559
|
+
)
|
560
|
+
else:
|
561
|
+
# CogVideoX 1.5
|
562
|
+
base_num_frames = (num_frames + p_t - 1) // p_t
|
563
|
+
|
564
|
+
freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
|
565
|
+
embed_dim=self.transformer.config.attention_head_dim,
|
566
|
+
crops_coords=None,
|
567
|
+
grid_size=(grid_height, grid_width),
|
568
|
+
temporal_size=base_num_frames,
|
569
|
+
grid_type="slice",
|
570
|
+
max_size=(base_size_height, base_size_width),
|
571
|
+
device=device,
|
572
|
+
)
|
573
|
+
|
574
|
+
return freqs_cos, freqs_sin
|
575
|
+
|
576
|
+
@property
|
577
|
+
def guidance_scale(self):
|
578
|
+
return self._guidance_scale
|
579
|
+
|
580
|
+
@property
|
581
|
+
def num_timesteps(self):
|
582
|
+
return self._num_timesteps
|
583
|
+
|
584
|
+
@property
|
585
|
+
def attention_kwargs(self):
|
586
|
+
return self._attention_kwargs
|
587
|
+
|
588
|
+
@property
|
589
|
+
def interrupt(self):
|
590
|
+
return self._interrupt
|
591
|
+
|
592
|
+
@torch.no_grad()
|
593
|
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
594
|
+
def __call__(
|
595
|
+
self,
|
596
|
+
image: PipelineImageInput,
|
597
|
+
prompt: Optional[Union[str, List[str]]] = None,
|
598
|
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
599
|
+
height: Optional[int] = None,
|
600
|
+
width: Optional[int] = None,
|
601
|
+
num_frames: int = 49,
|
602
|
+
num_inference_steps: int = 50,
|
603
|
+
timesteps: Optional[List[int]] = None,
|
604
|
+
guidance_scale: float = 6,
|
605
|
+
use_dynamic_cfg: bool = False,
|
606
|
+
num_videos_per_prompt: int = 1,
|
607
|
+
eta: float = 0.0,
|
608
|
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
609
|
+
latents: Optional[torch.FloatTensor] = None,
|
610
|
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
611
|
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
612
|
+
output_type: str = "pil",
|
613
|
+
return_dict: bool = True,
|
614
|
+
attention_kwargs: Optional[Dict[str, Any]] = None,
|
615
|
+
callback_on_step_end: Optional[
|
616
|
+
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
|
617
|
+
] = None,
|
618
|
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
619
|
+
max_sequence_length: int = 226,
|
620
|
+
) -> Union[CogVideoXPipelineOutput, Tuple]:
|
621
|
+
"""
|
622
|
+
Function invoked when calling the pipeline for generation.
|
623
|
+
|
624
|
+
Args:
|
625
|
+
image (`PipelineImageInput`):
|
626
|
+
The input image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`.
|
627
|
+
prompt (`str` or `List[str]`, *optional*):
|
628
|
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
629
|
+
instead.
|
630
|
+
negative_prompt (`str` or `List[str]`, *optional*):
|
631
|
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
632
|
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
633
|
+
less than `1`).
|
634
|
+
height (`int`, *optional*, defaults to self.transformer.config.sample_height * self.vae_scale_factor_spatial):
|
635
|
+
The height in pixels of the generated image. This is set to 480 by default for the best results.
|
636
|
+
width (`int`, *optional*, defaults to self.transformer.config.sample_height * self.vae_scale_factor_spatial):
|
637
|
+
The width in pixels of the generated image. This is set to 720 by default for the best results.
|
638
|
+
num_frames (`int`, defaults to `48`):
|
639
|
+
Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will
|
640
|
+
contain 1 extra frame because CogVideoX is conditioned with (num_seconds * fps + 1) frames where
|
641
|
+
num_seconds is 6 and fps is 8. However, since videos can be saved at any fps, the only condition that
|
642
|
+
needs to be satisfied is that of divisibility mentioned above.
|
643
|
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
644
|
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
645
|
+
expense of slower inference.
|
646
|
+
timesteps (`List[int]`, *optional*):
|
647
|
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
648
|
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
649
|
+
passed will be used. Must be in descending order.
|
650
|
+
guidance_scale (`float`, *optional*, defaults to 7.0):
|
651
|
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
652
|
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
653
|
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
654
|
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
655
|
+
usually at the expense of lower image quality.
|
656
|
+
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
657
|
+
The number of videos to generate per prompt.
|
658
|
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
659
|
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
660
|
+
to make generation deterministic.
|
661
|
+
latents (`torch.FloatTensor`, *optional*):
|
662
|
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
663
|
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
664
|
+
tensor will ge generated by sampling using the supplied random `generator`.
|
665
|
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
666
|
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
667
|
+
provided, text embeddings will be generated from `prompt` input argument.
|
668
|
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
669
|
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
670
|
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
671
|
+
argument.
|
672
|
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
673
|
+
The output format of the generate image. Choose between
|
674
|
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
675
|
+
return_dict (`bool`, *optional*, defaults to `True`):
|
676
|
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
|
677
|
+
of a plain tuple.
|
678
|
+
attention_kwargs (`dict`, *optional*):
|
679
|
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
680
|
+
`self.processor` in
|
681
|
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
682
|
+
callback_on_step_end (`Callable`, *optional*):
|
683
|
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
684
|
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
685
|
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
686
|
+
`callback_on_step_end_tensor_inputs`.
|
687
|
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
688
|
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
689
|
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
690
|
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
691
|
+
max_sequence_length (`int`, defaults to `226`):
|
692
|
+
Maximum sequence length in encoded prompt. Must be consistent with
|
693
|
+
`self.transformer.config.max_text_seq_length` otherwise may lead to poor results.
|
694
|
+
|
695
|
+
Examples:
|
696
|
+
|
697
|
+
Returns:
|
698
|
+
[`~pipelines.cogvideo.pipeline_output.CogVideoXPipelineOutput`] or `tuple`:
|
699
|
+
[`~pipelines.cogvideo.pipeline_output.CogVideoXPipelineOutput`] if `return_dict` is True, otherwise a
|
700
|
+
`tuple`. When returning a tuple, the first element is a list with the generated images.
|
701
|
+
"""
|
702
|
+
|
703
|
+
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
|
704
|
+
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
705
|
+
|
706
|
+
height = height or self.transformer.config.sample_height * self.vae_scale_factor_spatial
|
707
|
+
width = width or self.transformer.config.sample_width * self.vae_scale_factor_spatial
|
708
|
+
num_frames = num_frames or self.transformer.config.sample_frames
|
709
|
+
|
710
|
+
num_videos_per_prompt = 1
|
711
|
+
|
712
|
+
# 1. Check inputs. Raise error if not correct
|
713
|
+
self.check_inputs(
|
714
|
+
image=image,
|
715
|
+
prompt=prompt,
|
716
|
+
height=height,
|
717
|
+
width=width,
|
718
|
+
negative_prompt=negative_prompt,
|
719
|
+
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
720
|
+
latents=latents,
|
721
|
+
prompt_embeds=prompt_embeds,
|
722
|
+
negative_prompt_embeds=negative_prompt_embeds,
|
723
|
+
)
|
724
|
+
self._guidance_scale = guidance_scale
|
725
|
+
self._attention_kwargs = attention_kwargs
|
726
|
+
self._interrupt = False
|
727
|
+
|
728
|
+
# 2. Default call parameters
|
729
|
+
if prompt is not None and isinstance(prompt, str):
|
730
|
+
batch_size = 1
|
731
|
+
elif prompt is not None and isinstance(prompt, list):
|
732
|
+
batch_size = len(prompt)
|
733
|
+
else:
|
734
|
+
batch_size = prompt_embeds.shape[0]
|
735
|
+
|
736
|
+
device = self._execution_device
|
737
|
+
|
738
|
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
739
|
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
740
|
+
# corresponds to doing no classifier free guidance.
|
741
|
+
do_classifier_free_guidance = guidance_scale > 1.0
|
742
|
+
|
743
|
+
# 3. Encode input prompt
|
744
|
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
745
|
+
prompt=prompt,
|
746
|
+
negative_prompt=negative_prompt,
|
747
|
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
748
|
+
num_videos_per_prompt=num_videos_per_prompt,
|
749
|
+
prompt_embeds=prompt_embeds,
|
750
|
+
negative_prompt_embeds=negative_prompt_embeds,
|
751
|
+
max_sequence_length=max_sequence_length,
|
752
|
+
device=device,
|
753
|
+
)
|
754
|
+
if do_classifier_free_guidance:
|
755
|
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
756
|
+
|
757
|
+
# 4. Prepare timesteps
|
758
|
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
759
|
+
self._num_timesteps = len(timesteps)
|
760
|
+
|
761
|
+
# 5. Prepare latents
|
762
|
+
latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1
|
763
|
+
|
764
|
+
# For CogVideoX 1.5, the latent frames should be padded to make it divisible by patch_size_t
|
765
|
+
patch_size_t = self.transformer.config.patch_size_t
|
766
|
+
additional_frames = 0
|
767
|
+
if patch_size_t is not None and latent_frames % patch_size_t != 0:
|
768
|
+
additional_frames = patch_size_t - latent_frames % patch_size_t
|
769
|
+
num_frames += additional_frames * self.vae_scale_factor_temporal
|
770
|
+
|
771
|
+
image = self.video_processor.preprocess(image, height=height, width=width).to(
|
772
|
+
device, dtype=prompt_embeds.dtype
|
773
|
+
)
|
774
|
+
|
775
|
+
latent_channels = self.transformer.config.in_channels // 2
|
776
|
+
latents, image_latents = self.prepare_latents(
|
777
|
+
image,
|
778
|
+
batch_size * num_videos_per_prompt,
|
779
|
+
latent_channels,
|
780
|
+
num_frames,
|
781
|
+
height,
|
782
|
+
width,
|
783
|
+
prompt_embeds.dtype,
|
784
|
+
device,
|
785
|
+
generator,
|
786
|
+
latents,
|
787
|
+
)
|
788
|
+
|
789
|
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
790
|
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
791
|
+
|
792
|
+
# 7. Create rotary embeds if required
|
793
|
+
image_rotary_emb = (
|
794
|
+
self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device)
|
795
|
+
if self.transformer.config.use_rotary_positional_embeddings
|
796
|
+
else None
|
797
|
+
)
|
798
|
+
|
799
|
+
# 8. Create ofs embeds if required
|
800
|
+
ofs_emb = None if self.transformer.config.ofs_embed_dim is None else latents.new_full((1,), fill_value=2.0)
|
801
|
+
|
802
|
+
# 8. Denoising loop
|
803
|
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
804
|
+
|
805
|
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
806
|
+
# for DPM-solver++
|
807
|
+
old_pred_original_sample = None
|
808
|
+
for i, t in enumerate(timesteps):
|
809
|
+
if self.interrupt:
|
810
|
+
continue
|
811
|
+
|
812
|
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
813
|
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
814
|
+
|
815
|
+
latent_image_input = torch.cat([image_latents] * 2) if do_classifier_free_guidance else image_latents
|
816
|
+
latent_model_input = torch.cat([latent_model_input, latent_image_input], dim=2)
|
817
|
+
|
818
|
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
819
|
+
timestep = t.expand(latent_model_input.shape[0])
|
820
|
+
|
821
|
+
# predict noise model_output
|
822
|
+
noise_pred = self.transformer(
|
823
|
+
hidden_states=latent_model_input,
|
824
|
+
encoder_hidden_states=prompt_embeds,
|
825
|
+
timestep=timestep,
|
826
|
+
ofs=ofs_emb,
|
827
|
+
image_rotary_emb=image_rotary_emb,
|
828
|
+
attention_kwargs=attention_kwargs,
|
829
|
+
return_dict=False,
|
830
|
+
)[0]
|
831
|
+
noise_pred = noise_pred.float()
|
832
|
+
|
833
|
+
# perform guidance
|
834
|
+
if use_dynamic_cfg:
|
835
|
+
self._guidance_scale = 1 + guidance_scale * (
|
836
|
+
(1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
|
837
|
+
)
|
838
|
+
if do_classifier_free_guidance:
|
839
|
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
840
|
+
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
841
|
+
|
842
|
+
# compute the previous noisy sample x_t -> x_t-1
|
843
|
+
if not isinstance(self.scheduler, CogVideoXDPMScheduler):
|
844
|
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
845
|
+
else:
|
846
|
+
latents, old_pred_original_sample = self.scheduler.step(
|
847
|
+
noise_pred,
|
848
|
+
old_pred_original_sample,
|
849
|
+
t,
|
850
|
+
timesteps[i - 1] if i > 0 else None,
|
851
|
+
latents,
|
852
|
+
**extra_step_kwargs,
|
853
|
+
return_dict=False,
|
854
|
+
)
|
855
|
+
latents = latents.to(prompt_embeds.dtype)
|
856
|
+
|
857
|
+
# call the callback, if provided
|
858
|
+
if callback_on_step_end is not None:
|
859
|
+
callback_kwargs = {}
|
860
|
+
for k in callback_on_step_end_tensor_inputs:
|
861
|
+
callback_kwargs[k] = locals()[k]
|
862
|
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
863
|
+
|
864
|
+
latents = callback_outputs.pop("latents", latents)
|
865
|
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
866
|
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
867
|
+
|
868
|
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
869
|
+
progress_bar.update()
|
870
|
+
|
871
|
+
if not output_type == "latent":
|
872
|
+
# Discard any padding frames that were added for CogVideoX 1.5
|
873
|
+
latents = latents[:, additional_frames:]
|
874
|
+
video = self.decode_latents(latents)
|
875
|
+
video = self.video_processor.postprocess_video(video=video, output_type=output_type)
|
876
|
+
else:
|
877
|
+
video = latents
|
878
|
+
|
879
|
+
# Offload all models
|
880
|
+
self.maybe_free_model_hooks()
|
881
|
+
|
882
|
+
if not return_dict:
|
883
|
+
return (video,)
|
884
|
+
|
885
|
+
return CogVideoXPipelineOutput(frames=video)
|