diffusers 0.27.1__py3-none-any.whl → 0.32.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- diffusers/__init__.py +233 -6
- diffusers/callbacks.py +209 -0
- diffusers/commands/env.py +102 -6
- diffusers/configuration_utils.py +45 -16
- diffusers/dependency_versions_table.py +4 -3
- diffusers/image_processor.py +434 -110
- diffusers/loaders/__init__.py +42 -9
- diffusers/loaders/ip_adapter.py +626 -36
- diffusers/loaders/lora_base.py +900 -0
- diffusers/loaders/lora_conversion_utils.py +991 -125
- diffusers/loaders/lora_pipeline.py +3812 -0
- diffusers/loaders/peft.py +571 -7
- diffusers/loaders/single_file.py +405 -173
- diffusers/loaders/single_file_model.py +385 -0
- diffusers/loaders/single_file_utils.py +1783 -713
- diffusers/loaders/textual_inversion.py +41 -23
- diffusers/loaders/transformer_flux.py +181 -0
- diffusers/loaders/transformer_sd3.py +89 -0
- diffusers/loaders/unet.py +464 -540
- diffusers/loaders/unet_loader_utils.py +163 -0
- diffusers/models/__init__.py +76 -7
- diffusers/models/activations.py +65 -10
- diffusers/models/adapter.py +53 -53
- diffusers/models/attention.py +605 -18
- diffusers/models/attention_flax.py +1 -1
- diffusers/models/attention_processor.py +4304 -687
- diffusers/models/autoencoders/__init__.py +8 -0
- diffusers/models/autoencoders/autoencoder_asym_kl.py +15 -17
- diffusers/models/autoencoders/autoencoder_dc.py +620 -0
- diffusers/models/autoencoders/autoencoder_kl.py +110 -28
- diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
- diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1482 -0
- diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
- diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
- diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +19 -24
- diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
- diffusers/models/autoencoders/autoencoder_tiny.py +21 -18
- diffusers/models/autoencoders/consistency_decoder_vae.py +45 -20
- diffusers/models/autoencoders/vae.py +41 -29
- diffusers/models/autoencoders/vq_model.py +182 -0
- diffusers/models/controlnet.py +47 -800
- diffusers/models/controlnet_flux.py +70 -0
- diffusers/models/controlnet_sd3.py +68 -0
- diffusers/models/controlnet_sparsectrl.py +116 -0
- diffusers/models/controlnets/__init__.py +23 -0
- diffusers/models/controlnets/controlnet.py +872 -0
- diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +9 -9
- diffusers/models/controlnets/controlnet_flux.py +536 -0
- diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
- diffusers/models/controlnets/controlnet_sd3.py +489 -0
- diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
- diffusers/models/controlnets/controlnet_union.py +832 -0
- diffusers/models/controlnets/controlnet_xs.py +1946 -0
- diffusers/models/controlnets/multicontrolnet.py +183 -0
- diffusers/models/downsampling.py +85 -18
- diffusers/models/embeddings.py +1856 -158
- diffusers/models/embeddings_flax.py +23 -9
- diffusers/models/model_loading_utils.py +480 -0
- diffusers/models/modeling_flax_pytorch_utils.py +2 -1
- diffusers/models/modeling_flax_utils.py +2 -7
- diffusers/models/modeling_outputs.py +14 -0
- diffusers/models/modeling_pytorch_flax_utils.py +1 -1
- diffusers/models/modeling_utils.py +611 -146
- diffusers/models/normalization.py +361 -20
- diffusers/models/resnet.py +18 -23
- diffusers/models/transformers/__init__.py +16 -0
- diffusers/models/transformers/auraflow_transformer_2d.py +544 -0
- diffusers/models/transformers/cogvideox_transformer_3d.py +542 -0
- diffusers/models/transformers/dit_transformer_2d.py +240 -0
- diffusers/models/transformers/dual_transformer_2d.py +9 -8
- diffusers/models/transformers/hunyuan_transformer_2d.py +578 -0
- diffusers/models/transformers/latte_transformer_3d.py +327 -0
- diffusers/models/transformers/lumina_nextdit2d.py +340 -0
- diffusers/models/transformers/pixart_transformer_2d.py +445 -0
- diffusers/models/transformers/prior_transformer.py +13 -13
- diffusers/models/transformers/sana_transformer.py +488 -0
- diffusers/models/transformers/stable_audio_transformer.py +458 -0
- diffusers/models/transformers/t5_film_transformer.py +17 -19
- diffusers/models/transformers/transformer_2d.py +297 -187
- diffusers/models/transformers/transformer_allegro.py +422 -0
- diffusers/models/transformers/transformer_cogview3plus.py +386 -0
- diffusers/models/transformers/transformer_flux.py +593 -0
- diffusers/models/transformers/transformer_hunyuan_video.py +791 -0
- diffusers/models/transformers/transformer_ltx.py +469 -0
- diffusers/models/transformers/transformer_mochi.py +499 -0
- diffusers/models/transformers/transformer_sd3.py +461 -0
- diffusers/models/transformers/transformer_temporal.py +21 -19
- diffusers/models/unets/unet_1d.py +8 -8
- diffusers/models/unets/unet_1d_blocks.py +31 -31
- diffusers/models/unets/unet_2d.py +17 -10
- diffusers/models/unets/unet_2d_blocks.py +225 -149
- diffusers/models/unets/unet_2d_condition.py +41 -40
- diffusers/models/unets/unet_2d_condition_flax.py +6 -5
- diffusers/models/unets/unet_3d_blocks.py +192 -1057
- diffusers/models/unets/unet_3d_condition.py +22 -27
- diffusers/models/unets/unet_i2vgen_xl.py +22 -18
- diffusers/models/unets/unet_kandinsky3.py +2 -2
- diffusers/models/unets/unet_motion_model.py +1413 -89
- diffusers/models/unets/unet_spatio_temporal_condition.py +40 -16
- diffusers/models/unets/unet_stable_cascade.py +19 -18
- diffusers/models/unets/uvit_2d.py +2 -2
- diffusers/models/upsampling.py +95 -26
- diffusers/models/vq_model.py +12 -164
- diffusers/optimization.py +1 -1
- diffusers/pipelines/__init__.py +202 -3
- diffusers/pipelines/allegro/__init__.py +48 -0
- diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
- diffusers/pipelines/allegro/pipeline_output.py +23 -0
- diffusers/pipelines/amused/pipeline_amused.py +12 -12
- diffusers/pipelines/amused/pipeline_amused_img2img.py +14 -12
- diffusers/pipelines/amused/pipeline_amused_inpaint.py +13 -11
- diffusers/pipelines/animatediff/__init__.py +8 -0
- diffusers/pipelines/animatediff/pipeline_animatediff.py +122 -109
- diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1106 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +1288 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1010 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +236 -180
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
- diffusers/pipelines/animatediff/pipeline_output.py +3 -2
- diffusers/pipelines/audioldm/pipeline_audioldm.py +14 -14
- diffusers/pipelines/audioldm2/modeling_audioldm2.py +58 -39
- diffusers/pipelines/audioldm2/pipeline_audioldm2.py +121 -36
- diffusers/pipelines/aura_flow/__init__.py +48 -0
- diffusers/pipelines/aura_flow/pipeline_aura_flow.py +584 -0
- diffusers/pipelines/auto_pipeline.py +196 -28
- diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
- diffusers/pipelines/blip_diffusion/modeling_blip2.py +6 -6
- diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +1 -1
- diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
- diffusers/pipelines/cogvideo/__init__.py +54 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox.py +772 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +885 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +851 -0
- diffusers/pipelines/cogvideo/pipeline_output.py +20 -0
- diffusers/pipelines/cogview3/__init__.py +47 -0
- diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
- diffusers/pipelines/cogview3/pipeline_output.py +21 -0
- diffusers/pipelines/consistency_models/pipeline_consistency_models.py +6 -6
- diffusers/pipelines/controlnet/__init__.py +86 -80
- diffusers/pipelines/controlnet/multicontrolnet.py +7 -182
- diffusers/pipelines/controlnet/pipeline_controlnet.py +134 -87
- diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +93 -77
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +88 -197
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +136 -90
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +176 -80
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +125 -89
- diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
- diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
- diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
- diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1060 -0
- diffusers/pipelines/controlnet_sd3/__init__.py +57 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1133 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
- diffusers/pipelines/controlnet_xs/__init__.py +68 -0
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
- diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
- diffusers/pipelines/deepfloyd_if/pipeline_if.py +16 -30
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +20 -35
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +23 -41
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +22 -38
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +25 -41
- diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +19 -34
- diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
- diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
- diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +11 -11
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +70 -30
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +48 -25
- diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +2 -2
- diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +7 -7
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +21 -20
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +27 -29
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +33 -27
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +33 -23
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +36 -30
- diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +102 -69
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +13 -13
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +10 -5
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +11 -6
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +10 -5
- diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +5 -5
- diffusers/pipelines/dit/pipeline_dit.py +7 -4
- diffusers/pipelines/flux/__init__.py +69 -0
- diffusers/pipelines/flux/modeling_flux.py +47 -0
- diffusers/pipelines/flux/pipeline_flux.py +957 -0
- diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
- diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
- diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
- diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
- diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
- diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
- diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
- diffusers/pipelines/flux/pipeline_output.py +37 -0
- diffusers/pipelines/free_init_utils.py +41 -38
- diffusers/pipelines/free_noise_utils.py +596 -0
- diffusers/pipelines/hunyuan_video/__init__.py +48 -0
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
- diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
- diffusers/pipelines/hunyuandit/__init__.py +48 -0
- diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +916 -0
- diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +33 -48
- diffusers/pipelines/kandinsky/pipeline_kandinsky.py +8 -8
- diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +32 -29
- diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +11 -11
- diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +12 -12
- diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +34 -31
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +6 -6
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +8 -8
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +7 -7
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +6 -6
- diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +3 -3
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +22 -35
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +26 -37
- diffusers/pipelines/kolors/__init__.py +54 -0
- diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
- diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1250 -0
- diffusers/pipelines/kolors/pipeline_output.py +21 -0
- diffusers/pipelines/kolors/text_encoder.py +889 -0
- diffusers/pipelines/kolors/tokenizer.py +338 -0
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +82 -62
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +77 -60
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +12 -12
- diffusers/pipelines/latte/__init__.py +48 -0
- diffusers/pipelines/latte/pipeline_latte.py +881 -0
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +80 -74
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +85 -76
- diffusers/pipelines/ledits_pp/pipeline_output.py +2 -2
- diffusers/pipelines/ltx/__init__.py +50 -0
- diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
- diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
- diffusers/pipelines/ltx/pipeline_output.py +20 -0
- diffusers/pipelines/lumina/__init__.py +48 -0
- diffusers/pipelines/lumina/pipeline_lumina.py +890 -0
- diffusers/pipelines/marigold/__init__.py +50 -0
- diffusers/pipelines/marigold/marigold_image_processing.py +576 -0
- diffusers/pipelines/marigold/pipeline_marigold_depth.py +813 -0
- diffusers/pipelines/marigold/pipeline_marigold_normals.py +690 -0
- diffusers/pipelines/mochi/__init__.py +48 -0
- diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
- diffusers/pipelines/mochi/pipeline_output.py +20 -0
- diffusers/pipelines/musicldm/pipeline_musicldm.py +14 -14
- diffusers/pipelines/pag/__init__.py +80 -0
- diffusers/pipelines/pag/pag_utils.py +243 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1328 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1610 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
- diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +969 -0
- diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
- diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +865 -0
- diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
- diffusers/pipelines/pag/pipeline_pag_sd.py +1062 -0
- diffusers/pipelines/pag/pipeline_pag_sd_3.py +994 -0
- diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
- diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +866 -0
- diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
- diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1345 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1544 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1776 -0
- diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +17 -12
- diffusers/pipelines/pia/pipeline_pia.py +74 -164
- diffusers/pipelines/pipeline_flax_utils.py +5 -10
- diffusers/pipelines/pipeline_loading_utils.py +515 -53
- diffusers/pipelines/pipeline_utils.py +411 -222
- diffusers/pipelines/pixart_alpha/__init__.py +8 -1
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +76 -93
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +873 -0
- diffusers/pipelines/sana/__init__.py +47 -0
- diffusers/pipelines/sana/pipeline_output.py +21 -0
- diffusers/pipelines/sana/pipeline_sana.py +884 -0
- diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +27 -23
- diffusers/pipelines/shap_e/pipeline_shap_e.py +3 -3
- diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +14 -14
- diffusers/pipelines/shap_e/renderer.py +1 -1
- diffusers/pipelines/stable_audio/__init__.py +50 -0
- diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
- diffusers/pipelines/stable_audio/pipeline_stable_audio.py +756 -0
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +71 -25
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +23 -19
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +35 -34
- diffusers/pipelines/stable_diffusion/__init__.py +0 -1
- diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +20 -11
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +6 -6
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +145 -79
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +43 -28
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +13 -8
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +100 -68
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +109 -201
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +131 -32
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +247 -87
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +30 -29
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +35 -27
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +49 -42
- diffusers/pipelines/stable_diffusion/safety_checker.py +2 -1
- diffusers/pipelines/stable_diffusion_3/__init__.py +54 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +1140 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +1036 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1250 -0
- diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +29 -20
- diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +59 -58
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +31 -25
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +38 -22
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -24
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +24 -23
- diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +107 -67
- diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +316 -69
- diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +10 -5
- diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
- diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +98 -30
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +121 -83
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +161 -105
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +142 -218
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +45 -29
- diffusers/pipelines/stable_diffusion_xl/watermark.py +9 -3
- diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +110 -57
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +69 -39
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +105 -74
- diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +3 -2
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +29 -49
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +32 -93
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +37 -25
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +54 -40
- diffusers/pipelines/unclip/pipeline_unclip.py +6 -6
- diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +6 -6
- diffusers/pipelines/unidiffuser/modeling_text_decoder.py +1 -1
- diffusers/pipelines/unidiffuser/modeling_uvit.py +12 -12
- diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +29 -28
- diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +5 -5
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +5 -10
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +6 -8
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +4 -4
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +12 -12
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +15 -14
- diffusers/{models/dual_transformer_2d.py → quantizers/__init__.py} +2 -6
- diffusers/quantizers/auto.py +139 -0
- diffusers/quantizers/base.py +233 -0
- diffusers/quantizers/bitsandbytes/__init__.py +2 -0
- diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
- diffusers/quantizers/bitsandbytes/utils.py +306 -0
- diffusers/quantizers/gguf/__init__.py +1 -0
- diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
- diffusers/quantizers/gguf/utils.py +456 -0
- diffusers/quantizers/quantization_config.py +669 -0
- diffusers/quantizers/torchao/__init__.py +15 -0
- diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
- diffusers/schedulers/__init__.py +12 -2
- diffusers/schedulers/deprecated/__init__.py +1 -1
- diffusers/schedulers/deprecated/scheduling_karras_ve.py +25 -25
- diffusers/schedulers/scheduling_amused.py +5 -5
- diffusers/schedulers/scheduling_consistency_decoder.py +11 -11
- diffusers/schedulers/scheduling_consistency_models.py +23 -25
- diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
- diffusers/schedulers/scheduling_ddim.py +27 -26
- diffusers/schedulers/scheduling_ddim_cogvideox.py +452 -0
- diffusers/schedulers/scheduling_ddim_flax.py +2 -1
- diffusers/schedulers/scheduling_ddim_inverse.py +16 -16
- diffusers/schedulers/scheduling_ddim_parallel.py +32 -31
- diffusers/schedulers/scheduling_ddpm.py +27 -30
- diffusers/schedulers/scheduling_ddpm_flax.py +7 -3
- diffusers/schedulers/scheduling_ddpm_parallel.py +33 -36
- diffusers/schedulers/scheduling_ddpm_wuerstchen.py +14 -14
- diffusers/schedulers/scheduling_deis_multistep.py +150 -50
- diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +221 -84
- diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +2 -2
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +158 -52
- diffusers/schedulers/scheduling_dpmsolver_sde.py +153 -34
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +275 -86
- diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +81 -57
- diffusers/schedulers/scheduling_edm_euler.py +62 -39
- diffusers/schedulers/scheduling_euler_ancestral_discrete.py +30 -29
- diffusers/schedulers/scheduling_euler_discrete.py +255 -74
- diffusers/schedulers/scheduling_flow_match_euler_discrete.py +458 -0
- diffusers/schedulers/scheduling_flow_match_heun_discrete.py +320 -0
- diffusers/schedulers/scheduling_heun_discrete.py +174 -46
- diffusers/schedulers/scheduling_ipndm.py +9 -9
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +138 -29
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +132 -26
- diffusers/schedulers/scheduling_karras_ve_flax.py +6 -6
- diffusers/schedulers/scheduling_lcm.py +23 -29
- diffusers/schedulers/scheduling_lms_discrete.py +105 -28
- diffusers/schedulers/scheduling_pndm.py +20 -20
- diffusers/schedulers/scheduling_repaint.py +21 -21
- diffusers/schedulers/scheduling_sasolver.py +157 -60
- diffusers/schedulers/scheduling_sde_ve.py +19 -19
- diffusers/schedulers/scheduling_tcd.py +41 -36
- diffusers/schedulers/scheduling_unclip.py +19 -16
- diffusers/schedulers/scheduling_unipc_multistep.py +243 -47
- diffusers/schedulers/scheduling_utils.py +12 -5
- diffusers/schedulers/scheduling_utils_flax.py +1 -3
- diffusers/schedulers/scheduling_vq_diffusion.py +10 -10
- diffusers/training_utils.py +214 -30
- diffusers/utils/__init__.py +17 -1
- diffusers/utils/constants.py +3 -0
- diffusers/utils/doc_utils.py +1 -0
- diffusers/utils/dummy_pt_objects.py +592 -7
- diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
- diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +1001 -71
- diffusers/utils/dynamic_modules_utils.py +34 -29
- diffusers/utils/export_utils.py +50 -6
- diffusers/utils/hub_utils.py +131 -17
- diffusers/utils/import_utils.py +210 -8
- diffusers/utils/loading_utils.py +118 -5
- diffusers/utils/logging.py +4 -2
- diffusers/utils/peft_utils.py +37 -7
- diffusers/utils/state_dict_utils.py +13 -2
- diffusers/utils/testing_utils.py +193 -11
- diffusers/utils/torch_utils.py +4 -0
- diffusers/video_processor.py +113 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/METADATA +82 -91
- diffusers-0.32.2.dist-info/RECORD +550 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/WHEEL +1 -1
- diffusers/loaders/autoencoder.py +0 -146
- diffusers/loaders/controlnet.py +0 -136
- diffusers/loaders/lora.py +0 -1349
- diffusers/models/prior_transformer.py +0 -12
- diffusers/models/t5_film_transformer.py +0 -70
- diffusers/models/transformer_2d.py +0 -25
- diffusers/models/transformer_temporal.py +0 -34
- diffusers/models/unet_1d.py +0 -26
- diffusers/models/unet_1d_blocks.py +0 -203
- diffusers/models/unet_2d.py +0 -27
- diffusers/models/unet_2d_blocks.py +0 -375
- diffusers/models/unet_2d_condition.py +0 -25
- diffusers-0.27.1.dist-info/RECORD +0 -399
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/LICENSE +0 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/entry_points.txt +0 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1341 @@
|
|
1
|
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import inspect
|
16
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
17
|
+
|
18
|
+
import torch
|
19
|
+
import torch.nn.functional as F
|
20
|
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
21
|
+
|
22
|
+
from ...image_processor import PipelineImageInput
|
23
|
+
from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
|
24
|
+
from ...models import (
|
25
|
+
AutoencoderKL,
|
26
|
+
ControlNetModel,
|
27
|
+
ImageProjection,
|
28
|
+
MultiControlNetModel,
|
29
|
+
UNet2DConditionModel,
|
30
|
+
UNetMotionModel,
|
31
|
+
)
|
32
|
+
from ...models.lora import adjust_lora_scale_text_encoder
|
33
|
+
from ...models.unets.unet_motion_model import MotionAdapter
|
34
|
+
from ...schedulers import (
|
35
|
+
DDIMScheduler,
|
36
|
+
DPMSolverMultistepScheduler,
|
37
|
+
EulerAncestralDiscreteScheduler,
|
38
|
+
EulerDiscreteScheduler,
|
39
|
+
LMSDiscreteScheduler,
|
40
|
+
PNDMScheduler,
|
41
|
+
)
|
42
|
+
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
|
43
|
+
from ...utils.torch_utils import is_compiled_module, randn_tensor
|
44
|
+
from ...video_processor import VideoProcessor
|
45
|
+
from ..free_init_utils import FreeInitMixin
|
46
|
+
from ..free_noise_utils import AnimateDiffFreeNoiseMixin
|
47
|
+
from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
48
|
+
from .pipeline_output import AnimateDiffPipelineOutput
|
49
|
+
|
50
|
+
|
51
|
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
52
|
+
|
53
|
+
EXAMPLE_DOC_STRING = """
|
54
|
+
Examples:
|
55
|
+
```py
|
56
|
+
>>> import torch
|
57
|
+
>>> from PIL import Image
|
58
|
+
>>> from tqdm.auto import tqdm
|
59
|
+
|
60
|
+
>>> from diffusers import AnimateDiffVideoToVideoControlNetPipeline
|
61
|
+
>>> from diffusers.utils import export_to_gif, load_video
|
62
|
+
>>> from diffusers import AutoencoderKL, ControlNetModel, MotionAdapter, LCMScheduler
|
63
|
+
|
64
|
+
>>> controlnet = ControlNetModel.from_pretrained(
|
65
|
+
... "lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16
|
66
|
+
... )
|
67
|
+
>>> motion_adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM")
|
68
|
+
>>> vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
|
69
|
+
|
70
|
+
>>> pipe = AnimateDiffVideoToVideoControlNetPipeline.from_pretrained(
|
71
|
+
... "SG161222/Realistic_Vision_V5.1_noVAE",
|
72
|
+
... motion_adapter=motion_adapter,
|
73
|
+
... controlnet=controlnet,
|
74
|
+
... vae=vae,
|
75
|
+
... ).to(device="cuda", dtype=torch.float16)
|
76
|
+
|
77
|
+
>>> pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear")
|
78
|
+
>>> pipe.load_lora_weights(
|
79
|
+
... "wangfuyun/AnimateLCM", weight_name="AnimateLCM_sd15_t2v_lora.safetensors", adapter_name="lcm-lora"
|
80
|
+
... )
|
81
|
+
>>> pipe.set_adapters(["lcm-lora"], [0.8])
|
82
|
+
|
83
|
+
>>> video = load_video(
|
84
|
+
... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/dance.gif"
|
85
|
+
... )
|
86
|
+
>>> video = [frame.convert("RGB") for frame in video]
|
87
|
+
|
88
|
+
>>> from controlnet_aux.processor import OpenposeDetector
|
89
|
+
|
90
|
+
>>> open_pose = OpenposeDetector.from_pretrained("lllyasviel/Annotators").to("cuda")
|
91
|
+
>>> for frame in tqdm(video):
|
92
|
+
... conditioning_frames.append(open_pose(frame))
|
93
|
+
|
94
|
+
>>> prompt = "astronaut in space, dancing"
|
95
|
+
>>> negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly"
|
96
|
+
|
97
|
+
>>> strength = 0.8
|
98
|
+
>>> with torch.inference_mode():
|
99
|
+
... video = pipe(
|
100
|
+
... video=video,
|
101
|
+
... prompt=prompt,
|
102
|
+
... negative_prompt=negative_prompt,
|
103
|
+
... num_inference_steps=10,
|
104
|
+
... guidance_scale=2.0,
|
105
|
+
... controlnet_conditioning_scale=0.75,
|
106
|
+
... conditioning_frames=conditioning_frames,
|
107
|
+
... strength=strength,
|
108
|
+
... generator=torch.Generator().manual_seed(42),
|
109
|
+
... ).frames[0]
|
110
|
+
|
111
|
+
>>> video = [frame.resize(conditioning_frames[0].size) for frame in video]
|
112
|
+
>>> export_to_gif(video, f"animatediff_vid2vid_controlnet.gif", fps=8)
|
113
|
+
```
|
114
|
+
"""
|
115
|
+
|
116
|
+
|
117
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
118
|
+
def retrieve_latents(
|
119
|
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
120
|
+
):
|
121
|
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
122
|
+
return encoder_output.latent_dist.sample(generator)
|
123
|
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
124
|
+
return encoder_output.latent_dist.mode()
|
125
|
+
elif hasattr(encoder_output, "latents"):
|
126
|
+
return encoder_output.latents
|
127
|
+
else:
|
128
|
+
raise AttributeError("Could not access latents of provided encoder_output")
|
129
|
+
|
130
|
+
|
131
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
132
|
+
def retrieve_timesteps(
|
133
|
+
scheduler,
|
134
|
+
num_inference_steps: Optional[int] = None,
|
135
|
+
device: Optional[Union[str, torch.device]] = None,
|
136
|
+
timesteps: Optional[List[int]] = None,
|
137
|
+
sigmas: Optional[List[float]] = None,
|
138
|
+
**kwargs,
|
139
|
+
):
|
140
|
+
r"""
|
141
|
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
142
|
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
143
|
+
|
144
|
+
Args:
|
145
|
+
scheduler (`SchedulerMixin`):
|
146
|
+
The scheduler to get timesteps from.
|
147
|
+
num_inference_steps (`int`):
|
148
|
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
149
|
+
must be `None`.
|
150
|
+
device (`str` or `torch.device`, *optional*):
|
151
|
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
152
|
+
timesteps (`List[int]`, *optional*):
|
153
|
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
154
|
+
`num_inference_steps` and `sigmas` must be `None`.
|
155
|
+
sigmas (`List[float]`, *optional*):
|
156
|
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
157
|
+
`num_inference_steps` and `timesteps` must be `None`.
|
158
|
+
|
159
|
+
Returns:
|
160
|
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
161
|
+
second element is the number of inference steps.
|
162
|
+
"""
|
163
|
+
if timesteps is not None and sigmas is not None:
|
164
|
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
165
|
+
if timesteps is not None:
|
166
|
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
167
|
+
if not accepts_timesteps:
|
168
|
+
raise ValueError(
|
169
|
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
170
|
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
171
|
+
)
|
172
|
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
173
|
+
timesteps = scheduler.timesteps
|
174
|
+
num_inference_steps = len(timesteps)
|
175
|
+
elif sigmas is not None:
|
176
|
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
177
|
+
if not accept_sigmas:
|
178
|
+
raise ValueError(
|
179
|
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
180
|
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
181
|
+
)
|
182
|
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
183
|
+
timesteps = scheduler.timesteps
|
184
|
+
num_inference_steps = len(timesteps)
|
185
|
+
else:
|
186
|
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
187
|
+
timesteps = scheduler.timesteps
|
188
|
+
return timesteps, num_inference_steps
|
189
|
+
|
190
|
+
|
191
|
+
class AnimateDiffVideoToVideoControlNetPipeline(
|
192
|
+
DiffusionPipeline,
|
193
|
+
StableDiffusionMixin,
|
194
|
+
TextualInversionLoaderMixin,
|
195
|
+
IPAdapterMixin,
|
196
|
+
StableDiffusionLoraLoaderMixin,
|
197
|
+
FreeInitMixin,
|
198
|
+
AnimateDiffFreeNoiseMixin,
|
199
|
+
):
|
200
|
+
r"""
|
201
|
+
Pipeline for video-to-video generation with ControlNet guidance.
|
202
|
+
|
203
|
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
204
|
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
205
|
+
|
206
|
+
The pipeline also inherits the following loading methods:
|
207
|
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
208
|
+
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
209
|
+
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
210
|
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
211
|
+
|
212
|
+
Args:
|
213
|
+
vae ([`AutoencoderKL`]):
|
214
|
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
215
|
+
text_encoder ([`CLIPTextModel`]):
|
216
|
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
217
|
+
tokenizer (`CLIPTokenizer`):
|
218
|
+
A [`~transformers.CLIPTokenizer`] to tokenize text.
|
219
|
+
unet ([`UNet2DConditionModel`]):
|
220
|
+
A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
|
221
|
+
motion_adapter ([`MotionAdapter`]):
|
222
|
+
A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
|
223
|
+
controlnet ([`ControlNetModel`] or `List[ControlNetModel]` or `Tuple[ControlNetModel]` or `MultiControlNetModel`):
|
224
|
+
Provides additional conditioning to the `unet` during the denoising process. If you set multiple
|
225
|
+
ControlNets as a list, the outputs from each ControlNet are added together to create one combined
|
226
|
+
additional conditioning.
|
227
|
+
scheduler ([`SchedulerMixin`]):
|
228
|
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
229
|
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
230
|
+
"""
|
231
|
+
|
232
|
+
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
233
|
+
_optional_components = ["feature_extractor", "image_encoder", "motion_adapter"]
|
234
|
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
235
|
+
|
236
|
+
def __init__(
|
237
|
+
self,
|
238
|
+
vae: AutoencoderKL,
|
239
|
+
text_encoder: CLIPTextModel,
|
240
|
+
tokenizer: CLIPTokenizer,
|
241
|
+
unet: UNet2DConditionModel,
|
242
|
+
motion_adapter: MotionAdapter,
|
243
|
+
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
|
244
|
+
scheduler: Union[
|
245
|
+
DDIMScheduler,
|
246
|
+
PNDMScheduler,
|
247
|
+
LMSDiscreteScheduler,
|
248
|
+
EulerDiscreteScheduler,
|
249
|
+
EulerAncestralDiscreteScheduler,
|
250
|
+
DPMSolverMultistepScheduler,
|
251
|
+
],
|
252
|
+
feature_extractor: CLIPImageProcessor = None,
|
253
|
+
image_encoder: CLIPVisionModelWithProjection = None,
|
254
|
+
):
|
255
|
+
super().__init__()
|
256
|
+
if isinstance(unet, UNet2DConditionModel):
|
257
|
+
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
|
258
|
+
|
259
|
+
if isinstance(controlnet, (list, tuple)):
|
260
|
+
controlnet = MultiControlNetModel(controlnet)
|
261
|
+
|
262
|
+
self.register_modules(
|
263
|
+
vae=vae,
|
264
|
+
text_encoder=text_encoder,
|
265
|
+
tokenizer=tokenizer,
|
266
|
+
unet=unet,
|
267
|
+
motion_adapter=motion_adapter,
|
268
|
+
controlnet=controlnet,
|
269
|
+
scheduler=scheduler,
|
270
|
+
feature_extractor=feature_extractor,
|
271
|
+
image_encoder=image_encoder,
|
272
|
+
)
|
273
|
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
274
|
+
self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor)
|
275
|
+
self.control_video_processor = VideoProcessor(
|
276
|
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
277
|
+
)
|
278
|
+
|
279
|
+
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.encode_prompt
|
280
|
+
def encode_prompt(
|
281
|
+
self,
|
282
|
+
prompt,
|
283
|
+
device,
|
284
|
+
num_images_per_prompt,
|
285
|
+
do_classifier_free_guidance,
|
286
|
+
negative_prompt=None,
|
287
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
288
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
289
|
+
lora_scale: Optional[float] = None,
|
290
|
+
clip_skip: Optional[int] = None,
|
291
|
+
):
|
292
|
+
r"""
|
293
|
+
Encodes the prompt into text encoder hidden states.
|
294
|
+
|
295
|
+
Args:
|
296
|
+
prompt (`str` or `List[str]`, *optional*):
|
297
|
+
prompt to be encoded
|
298
|
+
device: (`torch.device`):
|
299
|
+
torch device
|
300
|
+
num_images_per_prompt (`int`):
|
301
|
+
number of images that should be generated per prompt
|
302
|
+
do_classifier_free_guidance (`bool`):
|
303
|
+
whether to use classifier free guidance or not
|
304
|
+
negative_prompt (`str` or `List[str]`, *optional*):
|
305
|
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
306
|
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
307
|
+
less than `1`).
|
308
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
309
|
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
310
|
+
provided, text embeddings will be generated from `prompt` input argument.
|
311
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
312
|
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
313
|
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
314
|
+
argument.
|
315
|
+
lora_scale (`float`, *optional*):
|
316
|
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
317
|
+
clip_skip (`int`, *optional*):
|
318
|
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
319
|
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
320
|
+
"""
|
321
|
+
# set lora scale so that monkey patched LoRA
|
322
|
+
# function of text encoder can correctly access it
|
323
|
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
324
|
+
self._lora_scale = lora_scale
|
325
|
+
|
326
|
+
# dynamically adjust the LoRA scale
|
327
|
+
if not USE_PEFT_BACKEND:
|
328
|
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
329
|
+
else:
|
330
|
+
scale_lora_layers(self.text_encoder, lora_scale)
|
331
|
+
|
332
|
+
if prompt is not None and isinstance(prompt, (str, dict)):
|
333
|
+
batch_size = 1
|
334
|
+
elif prompt is not None and isinstance(prompt, list):
|
335
|
+
batch_size = len(prompt)
|
336
|
+
else:
|
337
|
+
batch_size = prompt_embeds.shape[0]
|
338
|
+
|
339
|
+
if prompt_embeds is None:
|
340
|
+
# textual inversion: process multi-vector tokens if necessary
|
341
|
+
if isinstance(self, TextualInversionLoaderMixin):
|
342
|
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
343
|
+
|
344
|
+
text_inputs = self.tokenizer(
|
345
|
+
prompt,
|
346
|
+
padding="max_length",
|
347
|
+
max_length=self.tokenizer.model_max_length,
|
348
|
+
truncation=True,
|
349
|
+
return_tensors="pt",
|
350
|
+
)
|
351
|
+
text_input_ids = text_inputs.input_ids
|
352
|
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
353
|
+
|
354
|
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
355
|
+
text_input_ids, untruncated_ids
|
356
|
+
):
|
357
|
+
removed_text = self.tokenizer.batch_decode(
|
358
|
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
359
|
+
)
|
360
|
+
logger.warning(
|
361
|
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
362
|
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
363
|
+
)
|
364
|
+
|
365
|
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
366
|
+
attention_mask = text_inputs.attention_mask.to(device)
|
367
|
+
else:
|
368
|
+
attention_mask = None
|
369
|
+
|
370
|
+
if clip_skip is None:
|
371
|
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
372
|
+
prompt_embeds = prompt_embeds[0]
|
373
|
+
else:
|
374
|
+
prompt_embeds = self.text_encoder(
|
375
|
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
376
|
+
)
|
377
|
+
# Access the `hidden_states` first, that contains a tuple of
|
378
|
+
# all the hidden states from the encoder layers. Then index into
|
379
|
+
# the tuple to access the hidden states from the desired layer.
|
380
|
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
381
|
+
# We also need to apply the final LayerNorm here to not mess with the
|
382
|
+
# representations. The `last_hidden_states` that we typically use for
|
383
|
+
# obtaining the final prompt representations passes through the LayerNorm
|
384
|
+
# layer.
|
385
|
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
386
|
+
|
387
|
+
if self.text_encoder is not None:
|
388
|
+
prompt_embeds_dtype = self.text_encoder.dtype
|
389
|
+
elif self.unet is not None:
|
390
|
+
prompt_embeds_dtype = self.unet.dtype
|
391
|
+
else:
|
392
|
+
prompt_embeds_dtype = prompt_embeds.dtype
|
393
|
+
|
394
|
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
395
|
+
|
396
|
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
397
|
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
398
|
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
399
|
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
400
|
+
|
401
|
+
# get unconditional embeddings for classifier free guidance
|
402
|
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
403
|
+
uncond_tokens: List[str]
|
404
|
+
if negative_prompt is None:
|
405
|
+
uncond_tokens = [""] * batch_size
|
406
|
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
407
|
+
raise TypeError(
|
408
|
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
409
|
+
f" {type(prompt)}."
|
410
|
+
)
|
411
|
+
elif isinstance(negative_prompt, str):
|
412
|
+
uncond_tokens = [negative_prompt]
|
413
|
+
elif batch_size != len(negative_prompt):
|
414
|
+
raise ValueError(
|
415
|
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
416
|
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
417
|
+
" the batch size of `prompt`."
|
418
|
+
)
|
419
|
+
else:
|
420
|
+
uncond_tokens = negative_prompt
|
421
|
+
|
422
|
+
# textual inversion: process multi-vector tokens if necessary
|
423
|
+
if isinstance(self, TextualInversionLoaderMixin):
|
424
|
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
425
|
+
|
426
|
+
max_length = prompt_embeds.shape[1]
|
427
|
+
uncond_input = self.tokenizer(
|
428
|
+
uncond_tokens,
|
429
|
+
padding="max_length",
|
430
|
+
max_length=max_length,
|
431
|
+
truncation=True,
|
432
|
+
return_tensors="pt",
|
433
|
+
)
|
434
|
+
|
435
|
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
436
|
+
attention_mask = uncond_input.attention_mask.to(device)
|
437
|
+
else:
|
438
|
+
attention_mask = None
|
439
|
+
|
440
|
+
negative_prompt_embeds = self.text_encoder(
|
441
|
+
uncond_input.input_ids.to(device),
|
442
|
+
attention_mask=attention_mask,
|
443
|
+
)
|
444
|
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
445
|
+
|
446
|
+
if do_classifier_free_guidance:
|
447
|
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
448
|
+
seq_len = negative_prompt_embeds.shape[1]
|
449
|
+
|
450
|
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
451
|
+
|
452
|
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
453
|
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
454
|
+
|
455
|
+
if self.text_encoder is not None:
|
456
|
+
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
|
457
|
+
# Retrieve the original scale by scaling back the LoRA layers
|
458
|
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
459
|
+
|
460
|
+
return prompt_embeds, negative_prompt_embeds
|
461
|
+
|
462
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
463
|
+
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
|
464
|
+
dtype = next(self.image_encoder.parameters()).dtype
|
465
|
+
|
466
|
+
if not isinstance(image, torch.Tensor):
|
467
|
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
468
|
+
|
469
|
+
image = image.to(device=device, dtype=dtype)
|
470
|
+
if output_hidden_states:
|
471
|
+
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
|
472
|
+
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
473
|
+
uncond_image_enc_hidden_states = self.image_encoder(
|
474
|
+
torch.zeros_like(image), output_hidden_states=True
|
475
|
+
).hidden_states[-2]
|
476
|
+
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
|
477
|
+
num_images_per_prompt, dim=0
|
478
|
+
)
|
479
|
+
return image_enc_hidden_states, uncond_image_enc_hidden_states
|
480
|
+
else:
|
481
|
+
image_embeds = self.image_encoder(image).image_embeds
|
482
|
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
483
|
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
484
|
+
|
485
|
+
return image_embeds, uncond_image_embeds
|
486
|
+
|
487
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
|
488
|
+
def prepare_ip_adapter_image_embeds(
|
489
|
+
self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
|
490
|
+
):
|
491
|
+
image_embeds = []
|
492
|
+
if do_classifier_free_guidance:
|
493
|
+
negative_image_embeds = []
|
494
|
+
if ip_adapter_image_embeds is None:
|
495
|
+
if not isinstance(ip_adapter_image, list):
|
496
|
+
ip_adapter_image = [ip_adapter_image]
|
497
|
+
|
498
|
+
if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
|
499
|
+
raise ValueError(
|
500
|
+
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
|
501
|
+
)
|
502
|
+
|
503
|
+
for single_ip_adapter_image, image_proj_layer in zip(
|
504
|
+
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
|
505
|
+
):
|
506
|
+
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
|
507
|
+
single_image_embeds, single_negative_image_embeds = self.encode_image(
|
508
|
+
single_ip_adapter_image, device, 1, output_hidden_state
|
509
|
+
)
|
510
|
+
|
511
|
+
image_embeds.append(single_image_embeds[None, :])
|
512
|
+
if do_classifier_free_guidance:
|
513
|
+
negative_image_embeds.append(single_negative_image_embeds[None, :])
|
514
|
+
else:
|
515
|
+
for single_image_embeds in ip_adapter_image_embeds:
|
516
|
+
if do_classifier_free_guidance:
|
517
|
+
single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
|
518
|
+
negative_image_embeds.append(single_negative_image_embeds)
|
519
|
+
image_embeds.append(single_image_embeds)
|
520
|
+
|
521
|
+
ip_adapter_image_embeds = []
|
522
|
+
for i, single_image_embeds in enumerate(image_embeds):
|
523
|
+
single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
|
524
|
+
if do_classifier_free_guidance:
|
525
|
+
single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0)
|
526
|
+
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0)
|
527
|
+
|
528
|
+
single_image_embeds = single_image_embeds.to(device=device)
|
529
|
+
ip_adapter_image_embeds.append(single_image_embeds)
|
530
|
+
|
531
|
+
return ip_adapter_image_embeds
|
532
|
+
|
533
|
+
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.encode_video
|
534
|
+
def encode_video(self, video, generator, decode_chunk_size: int = 16) -> torch.Tensor:
|
535
|
+
latents = []
|
536
|
+
for i in range(0, len(video), decode_chunk_size):
|
537
|
+
batch_video = video[i : i + decode_chunk_size]
|
538
|
+
batch_video = retrieve_latents(self.vae.encode(batch_video), generator=generator)
|
539
|
+
latents.append(batch_video)
|
540
|
+
return torch.cat(latents)
|
541
|
+
|
542
|
+
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff.AnimateDiffPipeline.decode_latents
|
543
|
+
def decode_latents(self, latents, decode_chunk_size: int = 16):
|
544
|
+
latents = 1 / self.vae.config.scaling_factor * latents
|
545
|
+
|
546
|
+
batch_size, channels, num_frames, height, width = latents.shape
|
547
|
+
latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
|
548
|
+
|
549
|
+
video = []
|
550
|
+
for i in range(0, latents.shape[0], decode_chunk_size):
|
551
|
+
batch_latents = latents[i : i + decode_chunk_size]
|
552
|
+
batch_latents = self.vae.decode(batch_latents).sample
|
553
|
+
video.append(batch_latents)
|
554
|
+
|
555
|
+
video = torch.cat(video)
|
556
|
+
video = video[None, :].reshape((batch_size, num_frames, -1) + video.shape[2:]).permute(0, 2, 1, 3, 4)
|
557
|
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
558
|
+
video = video.float()
|
559
|
+
return video
|
560
|
+
|
561
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
562
|
+
def prepare_extra_step_kwargs(self, generator, eta):
|
563
|
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
564
|
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
565
|
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
566
|
+
# and should be between [0, 1]
|
567
|
+
|
568
|
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
569
|
+
extra_step_kwargs = {}
|
570
|
+
if accepts_eta:
|
571
|
+
extra_step_kwargs["eta"] = eta
|
572
|
+
|
573
|
+
# check if the scheduler accepts generator
|
574
|
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
575
|
+
if accepts_generator:
|
576
|
+
extra_step_kwargs["generator"] = generator
|
577
|
+
return extra_step_kwargs
|
578
|
+
|
579
|
+
def check_inputs(
|
580
|
+
self,
|
581
|
+
prompt,
|
582
|
+
strength,
|
583
|
+
height,
|
584
|
+
width,
|
585
|
+
video=None,
|
586
|
+
conditioning_frames=None,
|
587
|
+
latents=None,
|
588
|
+
negative_prompt=None,
|
589
|
+
prompt_embeds=None,
|
590
|
+
negative_prompt_embeds=None,
|
591
|
+
ip_adapter_image=None,
|
592
|
+
ip_adapter_image_embeds=None,
|
593
|
+
callback_on_step_end_tensor_inputs=None,
|
594
|
+
controlnet_conditioning_scale=1.0,
|
595
|
+
control_guidance_start=0.0,
|
596
|
+
control_guidance_end=1.0,
|
597
|
+
):
|
598
|
+
if strength < 0 or strength > 1:
|
599
|
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
600
|
+
|
601
|
+
if height % 8 != 0 or width % 8 != 0:
|
602
|
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
603
|
+
|
604
|
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
605
|
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
606
|
+
):
|
607
|
+
raise ValueError(
|
608
|
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
609
|
+
)
|
610
|
+
|
611
|
+
if prompt is not None and prompt_embeds is not None:
|
612
|
+
raise ValueError(
|
613
|
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
614
|
+
" only forward one of the two."
|
615
|
+
)
|
616
|
+
elif prompt is None and prompt_embeds is None:
|
617
|
+
raise ValueError(
|
618
|
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
619
|
+
)
|
620
|
+
elif prompt is not None and not isinstance(prompt, (str, list, dict)):
|
621
|
+
raise ValueError(f"`prompt` has to be of type `str`, `list` or `dict` but is {type(prompt)}")
|
622
|
+
|
623
|
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
624
|
+
raise ValueError(
|
625
|
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
626
|
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
627
|
+
)
|
628
|
+
|
629
|
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
630
|
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
631
|
+
raise ValueError(
|
632
|
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
633
|
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
634
|
+
f" {negative_prompt_embeds.shape}."
|
635
|
+
)
|
636
|
+
|
637
|
+
if video is not None and latents is not None:
|
638
|
+
raise ValueError("Only one of `video` or `latents` should be provided")
|
639
|
+
|
640
|
+
if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
|
641
|
+
raise ValueError(
|
642
|
+
"Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
|
643
|
+
)
|
644
|
+
|
645
|
+
if ip_adapter_image_embeds is not None:
|
646
|
+
if not isinstance(ip_adapter_image_embeds, list):
|
647
|
+
raise ValueError(
|
648
|
+
f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
|
649
|
+
)
|
650
|
+
elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
|
651
|
+
raise ValueError(
|
652
|
+
f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
|
653
|
+
)
|
654
|
+
|
655
|
+
if isinstance(self.controlnet, MultiControlNetModel):
|
656
|
+
if isinstance(prompt, list):
|
657
|
+
logger.warning(
|
658
|
+
f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
|
659
|
+
" prompts. The conditionings will be fixed across the prompts."
|
660
|
+
)
|
661
|
+
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
662
|
+
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
663
|
+
)
|
664
|
+
|
665
|
+
num_frames = len(video) if latents is None else latents.shape[2]
|
666
|
+
|
667
|
+
if (
|
668
|
+
isinstance(self.controlnet, ControlNetModel)
|
669
|
+
or is_compiled
|
670
|
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
671
|
+
):
|
672
|
+
if not isinstance(conditioning_frames, list):
|
673
|
+
raise TypeError(
|
674
|
+
f"For single controlnet, `image` must be of type `list` but got {type(conditioning_frames)}"
|
675
|
+
)
|
676
|
+
if len(conditioning_frames) != num_frames:
|
677
|
+
raise ValueError(f"Excepted image to have length {num_frames} but got {len(conditioning_frames)=}")
|
678
|
+
elif (
|
679
|
+
isinstance(self.controlnet, MultiControlNetModel)
|
680
|
+
or is_compiled
|
681
|
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
682
|
+
):
|
683
|
+
if not isinstance(conditioning_frames, list) or not isinstance(conditioning_frames[0], list):
|
684
|
+
raise TypeError(
|
685
|
+
f"For multiple controlnets: `image` must be type list of lists but got {type(conditioning_frames)=}"
|
686
|
+
)
|
687
|
+
if len(conditioning_frames[0]) != num_frames:
|
688
|
+
raise ValueError(
|
689
|
+
f"Expected length of image sublist as {num_frames} but got {len(conditioning_frames)=}"
|
690
|
+
)
|
691
|
+
if any(len(img) != len(conditioning_frames[0]) for img in conditioning_frames):
|
692
|
+
raise ValueError("All conditioning frame batches for multicontrolnet must be same size")
|
693
|
+
else:
|
694
|
+
assert False
|
695
|
+
|
696
|
+
# Check `controlnet_conditioning_scale`
|
697
|
+
if (
|
698
|
+
isinstance(self.controlnet, ControlNetModel)
|
699
|
+
or is_compiled
|
700
|
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
701
|
+
):
|
702
|
+
if not isinstance(controlnet_conditioning_scale, float):
|
703
|
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
704
|
+
elif (
|
705
|
+
isinstance(self.controlnet, MultiControlNetModel)
|
706
|
+
or is_compiled
|
707
|
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
708
|
+
):
|
709
|
+
if isinstance(controlnet_conditioning_scale, list):
|
710
|
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
711
|
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
712
|
+
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
713
|
+
self.controlnet.nets
|
714
|
+
):
|
715
|
+
raise ValueError(
|
716
|
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
717
|
+
" the same length as the number of controlnets"
|
718
|
+
)
|
719
|
+
else:
|
720
|
+
assert False
|
721
|
+
|
722
|
+
if not isinstance(control_guidance_start, (tuple, list)):
|
723
|
+
control_guidance_start = [control_guidance_start]
|
724
|
+
|
725
|
+
if not isinstance(control_guidance_end, (tuple, list)):
|
726
|
+
control_guidance_end = [control_guidance_end]
|
727
|
+
|
728
|
+
if len(control_guidance_start) != len(control_guidance_end):
|
729
|
+
raise ValueError(
|
730
|
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
731
|
+
)
|
732
|
+
|
733
|
+
if isinstance(self.controlnet, MultiControlNetModel):
|
734
|
+
if len(control_guidance_start) != len(self.controlnet.nets):
|
735
|
+
raise ValueError(
|
736
|
+
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
|
737
|
+
)
|
738
|
+
|
739
|
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
740
|
+
if start >= end:
|
741
|
+
raise ValueError(
|
742
|
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
743
|
+
)
|
744
|
+
if start < 0.0:
|
745
|
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
746
|
+
if end > 1.0:
|
747
|
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
748
|
+
|
749
|
+
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.get_timesteps
|
750
|
+
def get_timesteps(self, num_inference_steps, timesteps, strength, device):
|
751
|
+
# get the original timestep using init_timestep
|
752
|
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
753
|
+
|
754
|
+
t_start = max(num_inference_steps - init_timestep, 0)
|
755
|
+
timesteps = timesteps[t_start * self.scheduler.order :]
|
756
|
+
|
757
|
+
return timesteps, num_inference_steps - t_start
|
758
|
+
|
759
|
+
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.prepare_latents
|
760
|
+
def prepare_latents(
|
761
|
+
self,
|
762
|
+
video: Optional[torch.Tensor] = None,
|
763
|
+
height: int = 64,
|
764
|
+
width: int = 64,
|
765
|
+
num_channels_latents: int = 4,
|
766
|
+
batch_size: int = 1,
|
767
|
+
timestep: Optional[int] = None,
|
768
|
+
dtype: Optional[torch.dtype] = None,
|
769
|
+
device: Optional[torch.device] = None,
|
770
|
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
771
|
+
latents: Optional[torch.Tensor] = None,
|
772
|
+
decode_chunk_size: int = 16,
|
773
|
+
add_noise: bool = False,
|
774
|
+
) -> torch.Tensor:
|
775
|
+
num_frames = video.shape[1] if latents is None else latents.shape[2]
|
776
|
+
shape = (
|
777
|
+
batch_size,
|
778
|
+
num_channels_latents,
|
779
|
+
num_frames,
|
780
|
+
height // self.vae_scale_factor,
|
781
|
+
width // self.vae_scale_factor,
|
782
|
+
)
|
783
|
+
|
784
|
+
if isinstance(generator, list) and len(generator) != batch_size:
|
785
|
+
raise ValueError(
|
786
|
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
787
|
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
788
|
+
)
|
789
|
+
|
790
|
+
if latents is None:
|
791
|
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
792
|
+
if self.vae.config.force_upcast:
|
793
|
+
video = video.float()
|
794
|
+
self.vae.to(dtype=torch.float32)
|
795
|
+
|
796
|
+
if isinstance(generator, list):
|
797
|
+
init_latents = [
|
798
|
+
self.encode_video(video[i], generator[i], decode_chunk_size).unsqueeze(0)
|
799
|
+
for i in range(batch_size)
|
800
|
+
]
|
801
|
+
else:
|
802
|
+
init_latents = [self.encode_video(vid, generator, decode_chunk_size).unsqueeze(0) for vid in video]
|
803
|
+
|
804
|
+
init_latents = torch.cat(init_latents, dim=0)
|
805
|
+
|
806
|
+
# restore vae to original dtype
|
807
|
+
if self.vae.config.force_upcast:
|
808
|
+
self.vae.to(dtype)
|
809
|
+
|
810
|
+
init_latents = init_latents.to(dtype)
|
811
|
+
init_latents = self.vae.config.scaling_factor * init_latents
|
812
|
+
|
813
|
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
814
|
+
# expand init_latents for batch_size
|
815
|
+
error_message = (
|
816
|
+
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
|
817
|
+
" images (`image`). Please make sure to update your script to pass as many initial images as text prompts"
|
818
|
+
)
|
819
|
+
raise ValueError(error_message)
|
820
|
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
821
|
+
raise ValueError(
|
822
|
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
823
|
+
)
|
824
|
+
else:
|
825
|
+
init_latents = torch.cat([init_latents], dim=0)
|
826
|
+
|
827
|
+
noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
|
828
|
+
latents = self.scheduler.add_noise(init_latents, noise, timestep).permute(0, 2, 1, 3, 4)
|
829
|
+
else:
|
830
|
+
if shape != latents.shape:
|
831
|
+
# [B, C, F, H, W]
|
832
|
+
raise ValueError(f"`latents` expected to have {shape=}, but found {latents.shape=}")
|
833
|
+
|
834
|
+
latents = latents.to(device, dtype=dtype)
|
835
|
+
|
836
|
+
if add_noise:
|
837
|
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
838
|
+
latents = self.scheduler.add_noise(latents, noise, timestep)
|
839
|
+
|
840
|
+
return latents
|
841
|
+
|
842
|
+
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff_controlnet.AnimateDiffControlNetPipeline.prepare_video
|
843
|
+
def prepare_conditioning_frames(
|
844
|
+
self,
|
845
|
+
video,
|
846
|
+
width,
|
847
|
+
height,
|
848
|
+
batch_size,
|
849
|
+
num_videos_per_prompt,
|
850
|
+
device,
|
851
|
+
dtype,
|
852
|
+
do_classifier_free_guidance=False,
|
853
|
+
guess_mode=False,
|
854
|
+
):
|
855
|
+
video = self.control_video_processor.preprocess_video(video, height=height, width=width).to(
|
856
|
+
dtype=torch.float32
|
857
|
+
)
|
858
|
+
video = video.permute(0, 2, 1, 3, 4).flatten(0, 1)
|
859
|
+
video_batch_size = video.shape[0]
|
860
|
+
|
861
|
+
if video_batch_size == 1:
|
862
|
+
repeat_by = batch_size
|
863
|
+
else:
|
864
|
+
# image batch size is the same as prompt batch size
|
865
|
+
repeat_by = num_videos_per_prompt
|
866
|
+
|
867
|
+
video = video.repeat_interleave(repeat_by, dim=0)
|
868
|
+
video = video.to(device=device, dtype=dtype)
|
869
|
+
|
870
|
+
if do_classifier_free_guidance and not guess_mode:
|
871
|
+
video = torch.cat([video] * 2)
|
872
|
+
|
873
|
+
return video
|
874
|
+
|
875
|
+
@property
|
876
|
+
def guidance_scale(self):
|
877
|
+
return self._guidance_scale
|
878
|
+
|
879
|
+
@property
|
880
|
+
def clip_skip(self):
|
881
|
+
return self._clip_skip
|
882
|
+
|
883
|
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
884
|
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
885
|
+
# corresponds to doing no classifier free guidance.
|
886
|
+
@property
|
887
|
+
def do_classifier_free_guidance(self):
|
888
|
+
return self._guidance_scale > 1
|
889
|
+
|
890
|
+
@property
|
891
|
+
def cross_attention_kwargs(self):
|
892
|
+
return self._cross_attention_kwargs
|
893
|
+
|
894
|
+
@property
|
895
|
+
def num_timesteps(self):
|
896
|
+
return self._num_timesteps
|
897
|
+
|
898
|
+
@property
|
899
|
+
def interrupt(self):
|
900
|
+
return self._interrupt
|
901
|
+
|
902
|
+
@torch.no_grad()
|
903
|
+
def __call__(
|
904
|
+
self,
|
905
|
+
video: List[List[PipelineImageInput]] = None,
|
906
|
+
prompt: Optional[Union[str, List[str]]] = None,
|
907
|
+
height: Optional[int] = None,
|
908
|
+
width: Optional[int] = None,
|
909
|
+
num_inference_steps: int = 50,
|
910
|
+
enforce_inference_steps: bool = False,
|
911
|
+
timesteps: Optional[List[int]] = None,
|
912
|
+
sigmas: Optional[List[float]] = None,
|
913
|
+
guidance_scale: float = 7.5,
|
914
|
+
strength: float = 0.8,
|
915
|
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
916
|
+
num_videos_per_prompt: Optional[int] = 1,
|
917
|
+
eta: float = 0.0,
|
918
|
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
919
|
+
latents: Optional[torch.Tensor] = None,
|
920
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
921
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
922
|
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
923
|
+
ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
|
924
|
+
conditioning_frames: Optional[List[PipelineImageInput]] = None,
|
925
|
+
output_type: Optional[str] = "pil",
|
926
|
+
return_dict: bool = True,
|
927
|
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
928
|
+
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
|
929
|
+
guess_mode: bool = False,
|
930
|
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
931
|
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
932
|
+
clip_skip: Optional[int] = None,
|
933
|
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
934
|
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
935
|
+
decode_chunk_size: int = 16,
|
936
|
+
):
|
937
|
+
r"""
|
938
|
+
The call function to the pipeline for generation.
|
939
|
+
|
940
|
+
Args:
|
941
|
+
video (`List[PipelineImageInput]`):
|
942
|
+
The input video to condition the generation on. Must be a list of images/frames of the video.
|
943
|
+
prompt (`str` or `List[str]`, *optional*):
|
944
|
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
945
|
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
946
|
+
The height in pixels of the generated video.
|
947
|
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
948
|
+
The width in pixels of the generated video.
|
949
|
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
950
|
+
The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
|
951
|
+
expense of slower inference.
|
952
|
+
timesteps (`List[int]`, *optional*):
|
953
|
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
954
|
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
955
|
+
passed will be used. Must be in descending order.
|
956
|
+
sigmas (`List[float]`, *optional*):
|
957
|
+
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
|
958
|
+
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
|
959
|
+
will be used.
|
960
|
+
strength (`float`, *optional*, defaults to 0.8):
|
961
|
+
Higher strength leads to more differences between original video and generated video.
|
962
|
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
963
|
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
964
|
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
965
|
+
negative_prompt (`str` or `List[str]`, *optional*):
|
966
|
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
967
|
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
968
|
+
eta (`float`, *optional*, defaults to 0.0):
|
969
|
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
970
|
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
971
|
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
972
|
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
973
|
+
generation deterministic.
|
974
|
+
latents (`torch.Tensor`, *optional*):
|
975
|
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
|
976
|
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
977
|
+
tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
|
978
|
+
`(batch_size, num_channel, num_frames, height, width)`.
|
979
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
980
|
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
981
|
+
provided, text embeddings are generated from the `prompt` input argument.
|
982
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
983
|
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
984
|
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
985
|
+
ip_adapter_image: (`PipelineImageInput`, *optional*):
|
986
|
+
Optional image input to work with IP Adapters.
|
987
|
+
ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
|
988
|
+
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
|
989
|
+
IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
|
990
|
+
contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
|
991
|
+
provided, embeddings are computed from the `ip_adapter_image` input argument.
|
992
|
+
conditioning_frames (`List[PipelineImageInput]`, *optional*):
|
993
|
+
The ControlNet input condition to provide guidance to the `unet` for generation. If multiple
|
994
|
+
ControlNets are specified, images must be passed as a list such that each element of the list can be
|
995
|
+
correctly batched for input to a single ControlNet.
|
996
|
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
997
|
+
The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or `np.array`.
|
998
|
+
return_dict (`bool`, *optional*, defaults to `True`):
|
999
|
+
Whether or not to return a [`AnimateDiffPipelineOutput`] instead of a plain tuple.
|
1000
|
+
cross_attention_kwargs (`dict`, *optional*):
|
1001
|
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
1002
|
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
1003
|
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
1004
|
+
The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
|
1005
|
+
to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
|
1006
|
+
the corresponding scale as a list.
|
1007
|
+
guess_mode (`bool`, *optional*, defaults to `False`):
|
1008
|
+
The ControlNet encoder tries to recognize the content of the input image even if you remove all
|
1009
|
+
prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
|
1010
|
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
1011
|
+
The percentage of total steps at which the ControlNet starts applying.
|
1012
|
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
1013
|
+
The percentage of total steps at which the ControlNet stops applying.
|
1014
|
+
clip_skip (`int`, *optional*):
|
1015
|
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
1016
|
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
1017
|
+
callback_on_step_end (`Callable`, *optional*):
|
1018
|
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
1019
|
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
1020
|
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
1021
|
+
`callback_on_step_end_tensor_inputs`.
|
1022
|
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
1023
|
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
1024
|
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
1025
|
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
1026
|
+
decode_chunk_size (`int`, defaults to `16`):
|
1027
|
+
The number of frames to decode at a time when calling `decode_latents` method.
|
1028
|
+
|
1029
|
+
Examples:
|
1030
|
+
|
1031
|
+
Returns:
|
1032
|
+
[`pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
|
1033
|
+
If `return_dict` is `True`, [`pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
|
1034
|
+
returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
|
1035
|
+
"""
|
1036
|
+
|
1037
|
+
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
1038
|
+
|
1039
|
+
# align format for control guidance
|
1040
|
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
1041
|
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
1042
|
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
1043
|
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
1044
|
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
1045
|
+
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
1046
|
+
control_guidance_start, control_guidance_end = (
|
1047
|
+
mult * [control_guidance_start],
|
1048
|
+
mult * [control_guidance_end],
|
1049
|
+
)
|
1050
|
+
|
1051
|
+
# 0. Default height and width to unet
|
1052
|
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
1053
|
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
1054
|
+
|
1055
|
+
num_videos_per_prompt = 1
|
1056
|
+
|
1057
|
+
# 1. Check inputs. Raise error if not correct
|
1058
|
+
self.check_inputs(
|
1059
|
+
prompt=prompt,
|
1060
|
+
strength=strength,
|
1061
|
+
height=height,
|
1062
|
+
width=width,
|
1063
|
+
negative_prompt=negative_prompt,
|
1064
|
+
prompt_embeds=prompt_embeds,
|
1065
|
+
negative_prompt_embeds=negative_prompt_embeds,
|
1066
|
+
video=video,
|
1067
|
+
conditioning_frames=conditioning_frames,
|
1068
|
+
latents=latents,
|
1069
|
+
ip_adapter_image=ip_adapter_image,
|
1070
|
+
ip_adapter_image_embeds=ip_adapter_image_embeds,
|
1071
|
+
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
1072
|
+
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
1073
|
+
control_guidance_start=control_guidance_start,
|
1074
|
+
control_guidance_end=control_guidance_end,
|
1075
|
+
)
|
1076
|
+
|
1077
|
+
self._guidance_scale = guidance_scale
|
1078
|
+
self._clip_skip = clip_skip
|
1079
|
+
self._cross_attention_kwargs = cross_attention_kwargs
|
1080
|
+
self._interrupt = False
|
1081
|
+
|
1082
|
+
# 2. Define call parameters
|
1083
|
+
if prompt is not None and isinstance(prompt, (str, dict)):
|
1084
|
+
batch_size = 1
|
1085
|
+
elif prompt is not None and isinstance(prompt, list):
|
1086
|
+
batch_size = len(prompt)
|
1087
|
+
else:
|
1088
|
+
batch_size = prompt_embeds.shape[0]
|
1089
|
+
|
1090
|
+
device = self._execution_device
|
1091
|
+
dtype = self.dtype
|
1092
|
+
|
1093
|
+
# 3. Prepare timesteps
|
1094
|
+
if not enforce_inference_steps:
|
1095
|
+
timesteps, num_inference_steps = retrieve_timesteps(
|
1096
|
+
self.scheduler, num_inference_steps, device, timesteps, sigmas
|
1097
|
+
)
|
1098
|
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device)
|
1099
|
+
latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt)
|
1100
|
+
else:
|
1101
|
+
denoising_inference_steps = int(num_inference_steps / strength)
|
1102
|
+
timesteps, denoising_inference_steps = retrieve_timesteps(
|
1103
|
+
self.scheduler, denoising_inference_steps, device, timesteps, sigmas
|
1104
|
+
)
|
1105
|
+
timesteps = timesteps[-num_inference_steps:]
|
1106
|
+
latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt)
|
1107
|
+
|
1108
|
+
# 4. Prepare latent variables
|
1109
|
+
if latents is None:
|
1110
|
+
video = self.video_processor.preprocess_video(video, height=height, width=width)
|
1111
|
+
# Move the number of frames before the number of channels.
|
1112
|
+
video = video.permute(0, 2, 1, 3, 4)
|
1113
|
+
video = video.to(device=device, dtype=dtype)
|
1114
|
+
|
1115
|
+
num_channels_latents = self.unet.config.in_channels
|
1116
|
+
latents = self.prepare_latents(
|
1117
|
+
video=video,
|
1118
|
+
height=height,
|
1119
|
+
width=width,
|
1120
|
+
num_channels_latents=num_channels_latents,
|
1121
|
+
batch_size=batch_size * num_videos_per_prompt,
|
1122
|
+
timestep=latent_timestep,
|
1123
|
+
dtype=dtype,
|
1124
|
+
device=device,
|
1125
|
+
generator=generator,
|
1126
|
+
latents=latents,
|
1127
|
+
decode_chunk_size=decode_chunk_size,
|
1128
|
+
add_noise=enforce_inference_steps,
|
1129
|
+
)
|
1130
|
+
|
1131
|
+
# 5. Encode input prompt
|
1132
|
+
text_encoder_lora_scale = (
|
1133
|
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
1134
|
+
)
|
1135
|
+
num_frames = latents.shape[2]
|
1136
|
+
if self.free_noise_enabled:
|
1137
|
+
prompt_embeds, negative_prompt_embeds = self._encode_prompt_free_noise(
|
1138
|
+
prompt=prompt,
|
1139
|
+
num_frames=num_frames,
|
1140
|
+
device=device,
|
1141
|
+
num_videos_per_prompt=num_videos_per_prompt,
|
1142
|
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
1143
|
+
negative_prompt=negative_prompt,
|
1144
|
+
prompt_embeds=prompt_embeds,
|
1145
|
+
negative_prompt_embeds=negative_prompt_embeds,
|
1146
|
+
lora_scale=text_encoder_lora_scale,
|
1147
|
+
clip_skip=self.clip_skip,
|
1148
|
+
)
|
1149
|
+
else:
|
1150
|
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
1151
|
+
prompt,
|
1152
|
+
device,
|
1153
|
+
num_videos_per_prompt,
|
1154
|
+
self.do_classifier_free_guidance,
|
1155
|
+
negative_prompt,
|
1156
|
+
prompt_embeds=prompt_embeds,
|
1157
|
+
negative_prompt_embeds=negative_prompt_embeds,
|
1158
|
+
lora_scale=text_encoder_lora_scale,
|
1159
|
+
clip_skip=self.clip_skip,
|
1160
|
+
)
|
1161
|
+
|
1162
|
+
# For classifier free guidance, we need to do two forward passes.
|
1163
|
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
1164
|
+
# to avoid doing two forward passes
|
1165
|
+
if self.do_classifier_free_guidance:
|
1166
|
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
1167
|
+
|
1168
|
+
prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0)
|
1169
|
+
|
1170
|
+
# 6. Prepare IP-Adapter embeddings
|
1171
|
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
|
1172
|
+
image_embeds = self.prepare_ip_adapter_image_embeds(
|
1173
|
+
ip_adapter_image,
|
1174
|
+
ip_adapter_image_embeds,
|
1175
|
+
device,
|
1176
|
+
batch_size * num_videos_per_prompt,
|
1177
|
+
self.do_classifier_free_guidance,
|
1178
|
+
)
|
1179
|
+
|
1180
|
+
# 7. Prepare ControlNet conditions
|
1181
|
+
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
1182
|
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
1183
|
+
|
1184
|
+
global_pool_conditions = (
|
1185
|
+
controlnet.config.global_pool_conditions
|
1186
|
+
if isinstance(controlnet, ControlNetModel)
|
1187
|
+
else controlnet.nets[0].config.global_pool_conditions
|
1188
|
+
)
|
1189
|
+
guess_mode = guess_mode or global_pool_conditions
|
1190
|
+
|
1191
|
+
controlnet_keep = []
|
1192
|
+
for i in range(len(timesteps)):
|
1193
|
+
keeps = [
|
1194
|
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
1195
|
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
1196
|
+
]
|
1197
|
+
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
|
1198
|
+
|
1199
|
+
if isinstance(controlnet, ControlNetModel):
|
1200
|
+
conditioning_frames = self.prepare_conditioning_frames(
|
1201
|
+
video=conditioning_frames,
|
1202
|
+
width=width,
|
1203
|
+
height=height,
|
1204
|
+
batch_size=batch_size * num_videos_per_prompt * num_frames,
|
1205
|
+
num_videos_per_prompt=num_videos_per_prompt,
|
1206
|
+
device=device,
|
1207
|
+
dtype=controlnet.dtype,
|
1208
|
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
1209
|
+
guess_mode=guess_mode,
|
1210
|
+
)
|
1211
|
+
elif isinstance(controlnet, MultiControlNetModel):
|
1212
|
+
cond_prepared_videos = []
|
1213
|
+
for frame_ in conditioning_frames:
|
1214
|
+
prepared_video = self.prepare_conditioning_frames(
|
1215
|
+
video=frame_,
|
1216
|
+
width=width,
|
1217
|
+
height=height,
|
1218
|
+
batch_size=batch_size * num_videos_per_prompt * num_frames,
|
1219
|
+
num_videos_per_prompt=num_videos_per_prompt,
|
1220
|
+
device=device,
|
1221
|
+
dtype=controlnet.dtype,
|
1222
|
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
1223
|
+
guess_mode=guess_mode,
|
1224
|
+
)
|
1225
|
+
cond_prepared_videos.append(prepared_video)
|
1226
|
+
conditioning_frames = cond_prepared_videos
|
1227
|
+
else:
|
1228
|
+
assert False
|
1229
|
+
|
1230
|
+
# 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
1231
|
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
1232
|
+
|
1233
|
+
# 9. Add image embeds for IP-Adapter
|
1234
|
+
added_cond_kwargs = (
|
1235
|
+
{"image_embeds": image_embeds}
|
1236
|
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None
|
1237
|
+
else None
|
1238
|
+
)
|
1239
|
+
|
1240
|
+
num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1
|
1241
|
+
for free_init_iter in range(num_free_init_iters):
|
1242
|
+
if self.free_init_enabled:
|
1243
|
+
latents, timesteps = self._apply_free_init(
|
1244
|
+
latents, free_init_iter, num_inference_steps, device, latents.dtype, generator
|
1245
|
+
)
|
1246
|
+
num_inference_steps = len(timesteps)
|
1247
|
+
# make sure to readjust timesteps based on strength
|
1248
|
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device)
|
1249
|
+
|
1250
|
+
self._num_timesteps = len(timesteps)
|
1251
|
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
1252
|
+
|
1253
|
+
# 10. Denoising loop
|
1254
|
+
with self.progress_bar(total=self._num_timesteps) as progress_bar:
|
1255
|
+
for i, t in enumerate(timesteps):
|
1256
|
+
if self.interrupt:
|
1257
|
+
continue
|
1258
|
+
|
1259
|
+
# expand the latents if we are doing classifier free guidance
|
1260
|
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
1261
|
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
1262
|
+
|
1263
|
+
if guess_mode and self.do_classifier_free_guidance:
|
1264
|
+
# Infer ControlNet only for the conditional batch.
|
1265
|
+
control_model_input = latents
|
1266
|
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
1267
|
+
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
1268
|
+
else:
|
1269
|
+
control_model_input = latent_model_input
|
1270
|
+
controlnet_prompt_embeds = prompt_embeds
|
1271
|
+
|
1272
|
+
if isinstance(controlnet_keep[i], list):
|
1273
|
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
1274
|
+
else:
|
1275
|
+
controlnet_cond_scale = controlnet_conditioning_scale
|
1276
|
+
if isinstance(controlnet_cond_scale, list):
|
1277
|
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
1278
|
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
1279
|
+
|
1280
|
+
control_model_input = torch.transpose(control_model_input, 1, 2)
|
1281
|
+
control_model_input = control_model_input.reshape(
|
1282
|
+
(-1, control_model_input.shape[2], control_model_input.shape[3], control_model_input.shape[4])
|
1283
|
+
)
|
1284
|
+
|
1285
|
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
1286
|
+
control_model_input,
|
1287
|
+
t,
|
1288
|
+
encoder_hidden_states=controlnet_prompt_embeds,
|
1289
|
+
controlnet_cond=conditioning_frames,
|
1290
|
+
conditioning_scale=cond_scale,
|
1291
|
+
guess_mode=guess_mode,
|
1292
|
+
return_dict=False,
|
1293
|
+
)
|
1294
|
+
|
1295
|
+
# predict the noise residual
|
1296
|
+
noise_pred = self.unet(
|
1297
|
+
latent_model_input,
|
1298
|
+
t,
|
1299
|
+
encoder_hidden_states=prompt_embeds,
|
1300
|
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
1301
|
+
added_cond_kwargs=added_cond_kwargs,
|
1302
|
+
down_block_additional_residuals=down_block_res_samples,
|
1303
|
+
mid_block_additional_residual=mid_block_res_sample,
|
1304
|
+
).sample
|
1305
|
+
|
1306
|
+
# perform guidance
|
1307
|
+
if self.do_classifier_free_guidance:
|
1308
|
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
1309
|
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
1310
|
+
|
1311
|
+
# compute the previous noisy sample x_t -> x_t-1
|
1312
|
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
1313
|
+
|
1314
|
+
if callback_on_step_end is not None:
|
1315
|
+
callback_kwargs = {}
|
1316
|
+
for k in callback_on_step_end_tensor_inputs:
|
1317
|
+
callback_kwargs[k] = locals()[k]
|
1318
|
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
1319
|
+
|
1320
|
+
latents = callback_outputs.pop("latents", latents)
|
1321
|
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
1322
|
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
1323
|
+
|
1324
|
+
# call the callback, if provided
|
1325
|
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
1326
|
+
progress_bar.update()
|
1327
|
+
|
1328
|
+
# 11. Post-processing
|
1329
|
+
if output_type == "latent":
|
1330
|
+
video = latents
|
1331
|
+
else:
|
1332
|
+
video_tensor = self.decode_latents(latents, decode_chunk_size)
|
1333
|
+
video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type)
|
1334
|
+
|
1335
|
+
# 12. Offload all models
|
1336
|
+
self.maybe_free_model_hooks()
|
1337
|
+
|
1338
|
+
if not return_dict:
|
1339
|
+
return (video,)
|
1340
|
+
|
1341
|
+
return AnimateDiffPipelineOutput(frames=video)
|