diffusers 0.27.1__py3-none-any.whl → 0.32.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- diffusers/__init__.py +233 -6
- diffusers/callbacks.py +209 -0
- diffusers/commands/env.py +102 -6
- diffusers/configuration_utils.py +45 -16
- diffusers/dependency_versions_table.py +4 -3
- diffusers/image_processor.py +434 -110
- diffusers/loaders/__init__.py +42 -9
- diffusers/loaders/ip_adapter.py +626 -36
- diffusers/loaders/lora_base.py +900 -0
- diffusers/loaders/lora_conversion_utils.py +991 -125
- diffusers/loaders/lora_pipeline.py +3812 -0
- diffusers/loaders/peft.py +571 -7
- diffusers/loaders/single_file.py +405 -173
- diffusers/loaders/single_file_model.py +385 -0
- diffusers/loaders/single_file_utils.py +1783 -713
- diffusers/loaders/textual_inversion.py +41 -23
- diffusers/loaders/transformer_flux.py +181 -0
- diffusers/loaders/transformer_sd3.py +89 -0
- diffusers/loaders/unet.py +464 -540
- diffusers/loaders/unet_loader_utils.py +163 -0
- diffusers/models/__init__.py +76 -7
- diffusers/models/activations.py +65 -10
- diffusers/models/adapter.py +53 -53
- diffusers/models/attention.py +605 -18
- diffusers/models/attention_flax.py +1 -1
- diffusers/models/attention_processor.py +4304 -687
- diffusers/models/autoencoders/__init__.py +8 -0
- diffusers/models/autoencoders/autoencoder_asym_kl.py +15 -17
- diffusers/models/autoencoders/autoencoder_dc.py +620 -0
- diffusers/models/autoencoders/autoencoder_kl.py +110 -28
- diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
- diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1482 -0
- diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
- diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
- diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +19 -24
- diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
- diffusers/models/autoencoders/autoencoder_tiny.py +21 -18
- diffusers/models/autoencoders/consistency_decoder_vae.py +45 -20
- diffusers/models/autoencoders/vae.py +41 -29
- diffusers/models/autoencoders/vq_model.py +182 -0
- diffusers/models/controlnet.py +47 -800
- diffusers/models/controlnet_flux.py +70 -0
- diffusers/models/controlnet_sd3.py +68 -0
- diffusers/models/controlnet_sparsectrl.py +116 -0
- diffusers/models/controlnets/__init__.py +23 -0
- diffusers/models/controlnets/controlnet.py +872 -0
- diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +9 -9
- diffusers/models/controlnets/controlnet_flux.py +536 -0
- diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
- diffusers/models/controlnets/controlnet_sd3.py +489 -0
- diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
- diffusers/models/controlnets/controlnet_union.py +832 -0
- diffusers/models/controlnets/controlnet_xs.py +1946 -0
- diffusers/models/controlnets/multicontrolnet.py +183 -0
- diffusers/models/downsampling.py +85 -18
- diffusers/models/embeddings.py +1856 -158
- diffusers/models/embeddings_flax.py +23 -9
- diffusers/models/model_loading_utils.py +480 -0
- diffusers/models/modeling_flax_pytorch_utils.py +2 -1
- diffusers/models/modeling_flax_utils.py +2 -7
- diffusers/models/modeling_outputs.py +14 -0
- diffusers/models/modeling_pytorch_flax_utils.py +1 -1
- diffusers/models/modeling_utils.py +611 -146
- diffusers/models/normalization.py +361 -20
- diffusers/models/resnet.py +18 -23
- diffusers/models/transformers/__init__.py +16 -0
- diffusers/models/transformers/auraflow_transformer_2d.py +544 -0
- diffusers/models/transformers/cogvideox_transformer_3d.py +542 -0
- diffusers/models/transformers/dit_transformer_2d.py +240 -0
- diffusers/models/transformers/dual_transformer_2d.py +9 -8
- diffusers/models/transformers/hunyuan_transformer_2d.py +578 -0
- diffusers/models/transformers/latte_transformer_3d.py +327 -0
- diffusers/models/transformers/lumina_nextdit2d.py +340 -0
- diffusers/models/transformers/pixart_transformer_2d.py +445 -0
- diffusers/models/transformers/prior_transformer.py +13 -13
- diffusers/models/transformers/sana_transformer.py +488 -0
- diffusers/models/transformers/stable_audio_transformer.py +458 -0
- diffusers/models/transformers/t5_film_transformer.py +17 -19
- diffusers/models/transformers/transformer_2d.py +297 -187
- diffusers/models/transformers/transformer_allegro.py +422 -0
- diffusers/models/transformers/transformer_cogview3plus.py +386 -0
- diffusers/models/transformers/transformer_flux.py +593 -0
- diffusers/models/transformers/transformer_hunyuan_video.py +791 -0
- diffusers/models/transformers/transformer_ltx.py +469 -0
- diffusers/models/transformers/transformer_mochi.py +499 -0
- diffusers/models/transformers/transformer_sd3.py +461 -0
- diffusers/models/transformers/transformer_temporal.py +21 -19
- diffusers/models/unets/unet_1d.py +8 -8
- diffusers/models/unets/unet_1d_blocks.py +31 -31
- diffusers/models/unets/unet_2d.py +17 -10
- diffusers/models/unets/unet_2d_blocks.py +225 -149
- diffusers/models/unets/unet_2d_condition.py +41 -40
- diffusers/models/unets/unet_2d_condition_flax.py +6 -5
- diffusers/models/unets/unet_3d_blocks.py +192 -1057
- diffusers/models/unets/unet_3d_condition.py +22 -27
- diffusers/models/unets/unet_i2vgen_xl.py +22 -18
- diffusers/models/unets/unet_kandinsky3.py +2 -2
- diffusers/models/unets/unet_motion_model.py +1413 -89
- diffusers/models/unets/unet_spatio_temporal_condition.py +40 -16
- diffusers/models/unets/unet_stable_cascade.py +19 -18
- diffusers/models/unets/uvit_2d.py +2 -2
- diffusers/models/upsampling.py +95 -26
- diffusers/models/vq_model.py +12 -164
- diffusers/optimization.py +1 -1
- diffusers/pipelines/__init__.py +202 -3
- diffusers/pipelines/allegro/__init__.py +48 -0
- diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
- diffusers/pipelines/allegro/pipeline_output.py +23 -0
- diffusers/pipelines/amused/pipeline_amused.py +12 -12
- diffusers/pipelines/amused/pipeline_amused_img2img.py +14 -12
- diffusers/pipelines/amused/pipeline_amused_inpaint.py +13 -11
- diffusers/pipelines/animatediff/__init__.py +8 -0
- diffusers/pipelines/animatediff/pipeline_animatediff.py +122 -109
- diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1106 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +1288 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1010 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +236 -180
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
- diffusers/pipelines/animatediff/pipeline_output.py +3 -2
- diffusers/pipelines/audioldm/pipeline_audioldm.py +14 -14
- diffusers/pipelines/audioldm2/modeling_audioldm2.py +58 -39
- diffusers/pipelines/audioldm2/pipeline_audioldm2.py +121 -36
- diffusers/pipelines/aura_flow/__init__.py +48 -0
- diffusers/pipelines/aura_flow/pipeline_aura_flow.py +584 -0
- diffusers/pipelines/auto_pipeline.py +196 -28
- diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
- diffusers/pipelines/blip_diffusion/modeling_blip2.py +6 -6
- diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +1 -1
- diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
- diffusers/pipelines/cogvideo/__init__.py +54 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox.py +772 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +885 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +851 -0
- diffusers/pipelines/cogvideo/pipeline_output.py +20 -0
- diffusers/pipelines/cogview3/__init__.py +47 -0
- diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
- diffusers/pipelines/cogview3/pipeline_output.py +21 -0
- diffusers/pipelines/consistency_models/pipeline_consistency_models.py +6 -6
- diffusers/pipelines/controlnet/__init__.py +86 -80
- diffusers/pipelines/controlnet/multicontrolnet.py +7 -182
- diffusers/pipelines/controlnet/pipeline_controlnet.py +134 -87
- diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +93 -77
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +88 -197
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +136 -90
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +176 -80
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +125 -89
- diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
- diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
- diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
- diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1060 -0
- diffusers/pipelines/controlnet_sd3/__init__.py +57 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1133 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
- diffusers/pipelines/controlnet_xs/__init__.py +68 -0
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
- diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
- diffusers/pipelines/deepfloyd_if/pipeline_if.py +16 -30
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +20 -35
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +23 -41
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +22 -38
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +25 -41
- diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +19 -34
- diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
- diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
- diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +11 -11
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +70 -30
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +48 -25
- diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +2 -2
- diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +7 -7
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +21 -20
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +27 -29
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +33 -27
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +33 -23
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +36 -30
- diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +102 -69
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +13 -13
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +10 -5
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +11 -6
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +10 -5
- diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +5 -5
- diffusers/pipelines/dit/pipeline_dit.py +7 -4
- diffusers/pipelines/flux/__init__.py +69 -0
- diffusers/pipelines/flux/modeling_flux.py +47 -0
- diffusers/pipelines/flux/pipeline_flux.py +957 -0
- diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
- diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
- diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
- diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
- diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
- diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
- diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
- diffusers/pipelines/flux/pipeline_output.py +37 -0
- diffusers/pipelines/free_init_utils.py +41 -38
- diffusers/pipelines/free_noise_utils.py +596 -0
- diffusers/pipelines/hunyuan_video/__init__.py +48 -0
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
- diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
- diffusers/pipelines/hunyuandit/__init__.py +48 -0
- diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +916 -0
- diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +33 -48
- diffusers/pipelines/kandinsky/pipeline_kandinsky.py +8 -8
- diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +32 -29
- diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +11 -11
- diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +12 -12
- diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +34 -31
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +6 -6
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +8 -8
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +7 -7
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +6 -6
- diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +3 -3
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +22 -35
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +26 -37
- diffusers/pipelines/kolors/__init__.py +54 -0
- diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
- diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1250 -0
- diffusers/pipelines/kolors/pipeline_output.py +21 -0
- diffusers/pipelines/kolors/text_encoder.py +889 -0
- diffusers/pipelines/kolors/tokenizer.py +338 -0
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +82 -62
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +77 -60
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +12 -12
- diffusers/pipelines/latte/__init__.py +48 -0
- diffusers/pipelines/latte/pipeline_latte.py +881 -0
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +80 -74
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +85 -76
- diffusers/pipelines/ledits_pp/pipeline_output.py +2 -2
- diffusers/pipelines/ltx/__init__.py +50 -0
- diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
- diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
- diffusers/pipelines/ltx/pipeline_output.py +20 -0
- diffusers/pipelines/lumina/__init__.py +48 -0
- diffusers/pipelines/lumina/pipeline_lumina.py +890 -0
- diffusers/pipelines/marigold/__init__.py +50 -0
- diffusers/pipelines/marigold/marigold_image_processing.py +576 -0
- diffusers/pipelines/marigold/pipeline_marigold_depth.py +813 -0
- diffusers/pipelines/marigold/pipeline_marigold_normals.py +690 -0
- diffusers/pipelines/mochi/__init__.py +48 -0
- diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
- diffusers/pipelines/mochi/pipeline_output.py +20 -0
- diffusers/pipelines/musicldm/pipeline_musicldm.py +14 -14
- diffusers/pipelines/pag/__init__.py +80 -0
- diffusers/pipelines/pag/pag_utils.py +243 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1328 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1610 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
- diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +969 -0
- diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
- diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +865 -0
- diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
- diffusers/pipelines/pag/pipeline_pag_sd.py +1062 -0
- diffusers/pipelines/pag/pipeline_pag_sd_3.py +994 -0
- diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
- diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +866 -0
- diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
- diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1345 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1544 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1776 -0
- diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +17 -12
- diffusers/pipelines/pia/pipeline_pia.py +74 -164
- diffusers/pipelines/pipeline_flax_utils.py +5 -10
- diffusers/pipelines/pipeline_loading_utils.py +515 -53
- diffusers/pipelines/pipeline_utils.py +411 -222
- diffusers/pipelines/pixart_alpha/__init__.py +8 -1
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +76 -93
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +873 -0
- diffusers/pipelines/sana/__init__.py +47 -0
- diffusers/pipelines/sana/pipeline_output.py +21 -0
- diffusers/pipelines/sana/pipeline_sana.py +884 -0
- diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +27 -23
- diffusers/pipelines/shap_e/pipeline_shap_e.py +3 -3
- diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +14 -14
- diffusers/pipelines/shap_e/renderer.py +1 -1
- diffusers/pipelines/stable_audio/__init__.py +50 -0
- diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
- diffusers/pipelines/stable_audio/pipeline_stable_audio.py +756 -0
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +71 -25
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +23 -19
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +35 -34
- diffusers/pipelines/stable_diffusion/__init__.py +0 -1
- diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +20 -11
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +6 -6
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +145 -79
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +43 -28
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +13 -8
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +100 -68
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +109 -201
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +131 -32
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +247 -87
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +30 -29
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +35 -27
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +49 -42
- diffusers/pipelines/stable_diffusion/safety_checker.py +2 -1
- diffusers/pipelines/stable_diffusion_3/__init__.py +54 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +1140 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +1036 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1250 -0
- diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +29 -20
- diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +59 -58
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +31 -25
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +38 -22
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -24
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +24 -23
- diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +107 -67
- diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +316 -69
- diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +10 -5
- diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
- diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +98 -30
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +121 -83
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +161 -105
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +142 -218
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +45 -29
- diffusers/pipelines/stable_diffusion_xl/watermark.py +9 -3
- diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +110 -57
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +69 -39
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +105 -74
- diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +3 -2
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +29 -49
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +32 -93
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +37 -25
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +54 -40
- diffusers/pipelines/unclip/pipeline_unclip.py +6 -6
- diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +6 -6
- diffusers/pipelines/unidiffuser/modeling_text_decoder.py +1 -1
- diffusers/pipelines/unidiffuser/modeling_uvit.py +12 -12
- diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +29 -28
- diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +5 -5
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +5 -10
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +6 -8
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +4 -4
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +12 -12
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +15 -14
- diffusers/{models/dual_transformer_2d.py → quantizers/__init__.py} +2 -6
- diffusers/quantizers/auto.py +139 -0
- diffusers/quantizers/base.py +233 -0
- diffusers/quantizers/bitsandbytes/__init__.py +2 -0
- diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
- diffusers/quantizers/bitsandbytes/utils.py +306 -0
- diffusers/quantizers/gguf/__init__.py +1 -0
- diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
- diffusers/quantizers/gguf/utils.py +456 -0
- diffusers/quantizers/quantization_config.py +669 -0
- diffusers/quantizers/torchao/__init__.py +15 -0
- diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
- diffusers/schedulers/__init__.py +12 -2
- diffusers/schedulers/deprecated/__init__.py +1 -1
- diffusers/schedulers/deprecated/scheduling_karras_ve.py +25 -25
- diffusers/schedulers/scheduling_amused.py +5 -5
- diffusers/schedulers/scheduling_consistency_decoder.py +11 -11
- diffusers/schedulers/scheduling_consistency_models.py +23 -25
- diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
- diffusers/schedulers/scheduling_ddim.py +27 -26
- diffusers/schedulers/scheduling_ddim_cogvideox.py +452 -0
- diffusers/schedulers/scheduling_ddim_flax.py +2 -1
- diffusers/schedulers/scheduling_ddim_inverse.py +16 -16
- diffusers/schedulers/scheduling_ddim_parallel.py +32 -31
- diffusers/schedulers/scheduling_ddpm.py +27 -30
- diffusers/schedulers/scheduling_ddpm_flax.py +7 -3
- diffusers/schedulers/scheduling_ddpm_parallel.py +33 -36
- diffusers/schedulers/scheduling_ddpm_wuerstchen.py +14 -14
- diffusers/schedulers/scheduling_deis_multistep.py +150 -50
- diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +221 -84
- diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +2 -2
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +158 -52
- diffusers/schedulers/scheduling_dpmsolver_sde.py +153 -34
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +275 -86
- diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +81 -57
- diffusers/schedulers/scheduling_edm_euler.py +62 -39
- diffusers/schedulers/scheduling_euler_ancestral_discrete.py +30 -29
- diffusers/schedulers/scheduling_euler_discrete.py +255 -74
- diffusers/schedulers/scheduling_flow_match_euler_discrete.py +458 -0
- diffusers/schedulers/scheduling_flow_match_heun_discrete.py +320 -0
- diffusers/schedulers/scheduling_heun_discrete.py +174 -46
- diffusers/schedulers/scheduling_ipndm.py +9 -9
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +138 -29
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +132 -26
- diffusers/schedulers/scheduling_karras_ve_flax.py +6 -6
- diffusers/schedulers/scheduling_lcm.py +23 -29
- diffusers/schedulers/scheduling_lms_discrete.py +105 -28
- diffusers/schedulers/scheduling_pndm.py +20 -20
- diffusers/schedulers/scheduling_repaint.py +21 -21
- diffusers/schedulers/scheduling_sasolver.py +157 -60
- diffusers/schedulers/scheduling_sde_ve.py +19 -19
- diffusers/schedulers/scheduling_tcd.py +41 -36
- diffusers/schedulers/scheduling_unclip.py +19 -16
- diffusers/schedulers/scheduling_unipc_multistep.py +243 -47
- diffusers/schedulers/scheduling_utils.py +12 -5
- diffusers/schedulers/scheduling_utils_flax.py +1 -3
- diffusers/schedulers/scheduling_vq_diffusion.py +10 -10
- diffusers/training_utils.py +214 -30
- diffusers/utils/__init__.py +17 -1
- diffusers/utils/constants.py +3 -0
- diffusers/utils/doc_utils.py +1 -0
- diffusers/utils/dummy_pt_objects.py +592 -7
- diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
- diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +1001 -71
- diffusers/utils/dynamic_modules_utils.py +34 -29
- diffusers/utils/export_utils.py +50 -6
- diffusers/utils/hub_utils.py +131 -17
- diffusers/utils/import_utils.py +210 -8
- diffusers/utils/loading_utils.py +118 -5
- diffusers/utils/logging.py +4 -2
- diffusers/utils/peft_utils.py +37 -7
- diffusers/utils/state_dict_utils.py +13 -2
- diffusers/utils/testing_utils.py +193 -11
- diffusers/utils/torch_utils.py +4 -0
- diffusers/video_processor.py +113 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/METADATA +82 -91
- diffusers-0.32.2.dist-info/RECORD +550 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/WHEEL +1 -1
- diffusers/loaders/autoencoder.py +0 -146
- diffusers/loaders/controlnet.py +0 -136
- diffusers/loaders/lora.py +0 -1349
- diffusers/models/prior_transformer.py +0 -12
- diffusers/models/t5_film_transformer.py +0 -70
- diffusers/models/transformer_2d.py +0 -25
- diffusers/models/transformer_temporal.py +0 -34
- diffusers/models/unet_1d.py +0 -26
- diffusers/models/unet_1d_blocks.py +0 -203
- diffusers/models/unet_2d.py +0 -27
- diffusers/models/unet_2d_blocks.py +0 -375
- diffusers/models/unet_2d_condition.py +0 -25
- diffusers-0.27.1.dist-info/RECORD +0 -399
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/LICENSE +0 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/entry_points.txt +0 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/top_level.txt +0 -0
@@ -15,9 +15,10 @@ class TextToVideoSDPipelineOutput(BaseOutput):
|
|
15
15
|
"""
|
16
16
|
Output class for text-to-video pipelines.
|
17
17
|
|
18
|
-
|
18
|
+
Args:
|
19
19
|
frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
|
20
|
-
List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing
|
20
|
+
List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing
|
21
|
+
denoised
|
21
22
|
PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape
|
22
23
|
`(batch_size, num_frames, channels, height, width)`
|
23
24
|
"""
|
@@ -15,12 +15,10 @@
|
|
15
15
|
import inspect
|
16
16
|
from typing import Any, Callable, Dict, List, Optional, Union
|
17
17
|
|
18
|
-
import numpy as np
|
19
18
|
import torch
|
20
19
|
from transformers import CLIPTextModel, CLIPTokenizer
|
21
20
|
|
22
|
-
from ...
|
23
|
-
from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
21
|
+
from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
|
24
22
|
from ...models import AutoencoderKL, UNet3DConditionModel
|
25
23
|
from ...models.lora import adjust_lora_scale_text_encoder
|
26
24
|
from ...schedulers import KarrasDiffusionSchedulers
|
@@ -33,6 +31,7 @@ from ...utils import (
|
|
33
31
|
unscale_lora_layers,
|
34
32
|
)
|
35
33
|
from ...utils.torch_utils import randn_tensor
|
34
|
+
from ...video_processor import VideoProcessor
|
36
35
|
from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
37
36
|
from . import TextToVideoSDPipelineOutput
|
38
37
|
|
@@ -59,29 +58,9 @@ EXAMPLE_DOC_STRING = """
|
|
59
58
|
"""
|
60
59
|
|
61
60
|
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
outputs = []
|
66
|
-
for batch_idx in range(batch_size):
|
67
|
-
batch_vid = video[batch_idx].permute(1, 0, 2, 3)
|
68
|
-
batch_output = processor.postprocess(batch_vid, output_type)
|
69
|
-
|
70
|
-
outputs.append(batch_output)
|
71
|
-
|
72
|
-
if output_type == "np":
|
73
|
-
outputs = np.stack(outputs)
|
74
|
-
|
75
|
-
elif output_type == "pt":
|
76
|
-
outputs = torch.stack(outputs)
|
77
|
-
|
78
|
-
elif not output_type == "pil":
|
79
|
-
raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
|
80
|
-
|
81
|
-
return outputs
|
82
|
-
|
83
|
-
|
84
|
-
class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin):
|
61
|
+
class TextToVideoSDPipeline(
|
62
|
+
DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin
|
63
|
+
):
|
85
64
|
r"""
|
86
65
|
Pipeline for text-to-video generation.
|
87
66
|
|
@@ -90,8 +69,8 @@ class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInve
|
|
90
69
|
|
91
70
|
The pipeline also inherits the following loading methods:
|
92
71
|
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
93
|
-
- [`~loaders.
|
94
|
-
- [`~loaders.
|
72
|
+
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
73
|
+
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
95
74
|
|
96
75
|
Args:
|
97
76
|
vae ([`AutoencoderKL`]):
|
@@ -127,7 +106,7 @@ class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInve
|
|
127
106
|
scheduler=scheduler,
|
128
107
|
)
|
129
108
|
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
130
|
-
self.
|
109
|
+
self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor)
|
131
110
|
|
132
111
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
133
112
|
def _encode_prompt(
|
@@ -137,8 +116,8 @@ class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInve
|
|
137
116
|
num_images_per_prompt,
|
138
117
|
do_classifier_free_guidance,
|
139
118
|
negative_prompt=None,
|
140
|
-
prompt_embeds: Optional[torch.
|
141
|
-
negative_prompt_embeds: Optional[torch.
|
119
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
120
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
142
121
|
lora_scale: Optional[float] = None,
|
143
122
|
**kwargs,
|
144
123
|
):
|
@@ -170,8 +149,8 @@ class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInve
|
|
170
149
|
num_images_per_prompt,
|
171
150
|
do_classifier_free_guidance,
|
172
151
|
negative_prompt=None,
|
173
|
-
prompt_embeds: Optional[torch.
|
174
|
-
negative_prompt_embeds: Optional[torch.
|
152
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
153
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
175
154
|
lora_scale: Optional[float] = None,
|
176
155
|
clip_skip: Optional[int] = None,
|
177
156
|
):
|
@@ -191,10 +170,10 @@ class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInve
|
|
191
170
|
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
192
171
|
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
193
172
|
less than `1`).
|
194
|
-
prompt_embeds (`torch.
|
173
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
195
174
|
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
196
175
|
provided, text embeddings will be generated from `prompt` input argument.
|
197
|
-
negative_prompt_embeds (`torch.
|
176
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
198
177
|
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
199
178
|
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
200
179
|
argument.
|
@@ -206,7 +185,7 @@ class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInve
|
|
206
185
|
"""
|
207
186
|
# set lora scale so that monkey patched LoRA
|
208
187
|
# function of text encoder can correctly access it
|
209
|
-
if lora_scale is not None and isinstance(self,
|
188
|
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
210
189
|
self._lora_scale = lora_scale
|
211
190
|
|
212
191
|
# dynamically adjust the LoRA scale
|
@@ -338,9 +317,10 @@ class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInve
|
|
338
317
|
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
339
318
|
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
340
319
|
|
341
|
-
if
|
342
|
-
|
343
|
-
|
320
|
+
if self.text_encoder is not None:
|
321
|
+
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
|
322
|
+
# Retrieve the original scale by scaling back the LoRA layers
|
323
|
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
344
324
|
|
345
325
|
return prompt_embeds, negative_prompt_embeds
|
346
326
|
|
@@ -465,12 +445,12 @@ class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInve
|
|
465
445
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
466
446
|
eta: float = 0.0,
|
467
447
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
468
|
-
latents: Optional[torch.
|
469
|
-
prompt_embeds: Optional[torch.
|
470
|
-
negative_prompt_embeds: Optional[torch.
|
448
|
+
latents: Optional[torch.Tensor] = None,
|
449
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
450
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
471
451
|
output_type: Optional[str] = "np",
|
472
452
|
return_dict: bool = True,
|
473
|
-
callback: Optional[Callable[[int, int, torch.
|
453
|
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
474
454
|
callback_steps: int = 1,
|
475
455
|
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
476
456
|
clip_skip: Optional[int] = None,
|
@@ -505,25 +485,25 @@ class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInve
|
|
505
485
|
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
506
486
|
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
507
487
|
generation deterministic.
|
508
|
-
latents (`torch.
|
488
|
+
latents (`torch.Tensor`, *optional*):
|
509
489
|
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
|
510
490
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
511
491
|
tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
|
512
492
|
`(batch_size, num_channel, num_frames, height, width)`.
|
513
|
-
prompt_embeds (`torch.
|
493
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
514
494
|
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
515
495
|
provided, text embeddings are generated from the `prompt` input argument.
|
516
|
-
negative_prompt_embeds (`torch.
|
496
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
517
497
|
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
518
498
|
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
519
499
|
output_type (`str`, *optional*, defaults to `"np"`):
|
520
|
-
The output format of the generated video. Choose between `torch.
|
500
|
+
The output format of the generated video. Choose between `torch.Tensor` or `np.array`.
|
521
501
|
return_dict (`bool`, *optional*, defaults to `True`):
|
522
502
|
Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
|
523
503
|
of a plain tuple.
|
524
504
|
callback (`Callable`, *optional*):
|
525
505
|
A function that calls every `callback_steps` steps during inference. The function is called with the
|
526
|
-
following arguments: `callback(step: int, timestep: int, latents: torch.
|
506
|
+
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
527
507
|
callback_steps (`int`, *optional*, defaults to 1):
|
528
508
|
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
529
509
|
every step.
|
@@ -652,7 +632,7 @@ class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInve
|
|
652
632
|
video = latents
|
653
633
|
else:
|
654
634
|
video_tensor = self.decode_latents(latents)
|
655
|
-
video =
|
635
|
+
video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type)
|
656
636
|
|
657
637
|
# 9. Offload all models
|
658
638
|
self.maybe_free_model_hooks()
|
@@ -16,12 +16,10 @@ import inspect
|
|
16
16
|
from typing import Any, Callable, Dict, List, Optional, Union
|
17
17
|
|
18
18
|
import numpy as np
|
19
|
-
import PIL.Image
|
20
19
|
import torch
|
21
20
|
from transformers import CLIPTextModel, CLIPTokenizer
|
22
21
|
|
23
|
-
from ...
|
24
|
-
from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
22
|
+
from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
|
25
23
|
from ...models import AutoencoderKL, UNet3DConditionModel
|
26
24
|
from ...models.lora import adjust_lora_scale_text_encoder
|
27
25
|
from ...schedulers import KarrasDiffusionSchedulers
|
@@ -34,6 +32,7 @@ from ...utils import (
|
|
34
32
|
unscale_lora_layers,
|
35
33
|
)
|
36
34
|
from ...utils.torch_utils import randn_tensor
|
35
|
+
from ...video_processor import VideoProcessor
|
37
36
|
from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
38
37
|
from . import TextToVideoSDPipelineOutput
|
39
38
|
|
@@ -94,70 +93,9 @@ def retrieve_latents(
|
|
94
93
|
raise AttributeError("Could not access latents of provided encoder_output")
|
95
94
|
|
96
95
|
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
outputs = []
|
101
|
-
for batch_idx in range(batch_size):
|
102
|
-
batch_vid = video[batch_idx].permute(1, 0, 2, 3)
|
103
|
-
batch_output = processor.postprocess(batch_vid, output_type)
|
104
|
-
|
105
|
-
outputs.append(batch_output)
|
106
|
-
|
107
|
-
if output_type == "np":
|
108
|
-
outputs = np.stack(outputs)
|
109
|
-
|
110
|
-
elif output_type == "pt":
|
111
|
-
outputs = torch.stack(outputs)
|
112
|
-
|
113
|
-
elif not output_type == "pil":
|
114
|
-
raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
|
115
|
-
|
116
|
-
return outputs
|
117
|
-
|
118
|
-
|
119
|
-
def preprocess_video(video):
|
120
|
-
supported_formats = (np.ndarray, torch.Tensor, PIL.Image.Image)
|
121
|
-
|
122
|
-
if isinstance(video, supported_formats):
|
123
|
-
video = [video]
|
124
|
-
elif not (isinstance(video, list) and all(isinstance(i, supported_formats) for i in video)):
|
125
|
-
raise ValueError(
|
126
|
-
f"Input is in incorrect format: {[type(i) for i in video]}. Currently, we only support {', '.join(supported_formats)}"
|
127
|
-
)
|
128
|
-
|
129
|
-
if isinstance(video[0], PIL.Image.Image):
|
130
|
-
video = [np.array(frame) for frame in video]
|
131
|
-
|
132
|
-
if isinstance(video[0], np.ndarray):
|
133
|
-
video = np.concatenate(video, axis=0) if video[0].ndim == 5 else np.stack(video, axis=0)
|
134
|
-
|
135
|
-
if video.dtype == np.uint8:
|
136
|
-
video = np.array(video).astype(np.float32) / 255.0
|
137
|
-
|
138
|
-
if video.ndim == 4:
|
139
|
-
video = video[None, ...]
|
140
|
-
|
141
|
-
video = torch.from_numpy(video.transpose(0, 4, 1, 2, 3))
|
142
|
-
|
143
|
-
elif isinstance(video[0], torch.Tensor):
|
144
|
-
video = torch.cat(video, axis=0) if video[0].ndim == 5 else torch.stack(video, axis=0)
|
145
|
-
|
146
|
-
# don't need any preprocess if the video is latents
|
147
|
-
channel = video.shape[1]
|
148
|
-
if channel == 4:
|
149
|
-
return video
|
150
|
-
|
151
|
-
# move channels before num_frames
|
152
|
-
video = video.permute(0, 2, 1, 3, 4)
|
153
|
-
|
154
|
-
# normalize video
|
155
|
-
video = 2.0 * video - 1.0
|
156
|
-
|
157
|
-
return video
|
158
|
-
|
159
|
-
|
160
|
-
class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin):
|
96
|
+
class VideoToVideoSDPipeline(
|
97
|
+
DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin
|
98
|
+
):
|
161
99
|
r"""
|
162
100
|
Pipeline for text-guided video-to-video generation.
|
163
101
|
|
@@ -166,8 +104,8 @@ class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInv
|
|
166
104
|
|
167
105
|
The pipeline also inherits the following loading methods:
|
168
106
|
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
169
|
-
- [`~loaders.
|
170
|
-
- [`~loaders.
|
107
|
+
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
108
|
+
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
171
109
|
|
172
110
|
Args:
|
173
111
|
vae ([`AutoencoderKL`]):
|
@@ -203,7 +141,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInv
|
|
203
141
|
scheduler=scheduler,
|
204
142
|
)
|
205
143
|
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
206
|
-
self.
|
144
|
+
self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor)
|
207
145
|
|
208
146
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
209
147
|
def _encode_prompt(
|
@@ -213,8 +151,8 @@ class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInv
|
|
213
151
|
num_images_per_prompt,
|
214
152
|
do_classifier_free_guidance,
|
215
153
|
negative_prompt=None,
|
216
|
-
prompt_embeds: Optional[torch.
|
217
|
-
negative_prompt_embeds: Optional[torch.
|
154
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
155
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
218
156
|
lora_scale: Optional[float] = None,
|
219
157
|
**kwargs,
|
220
158
|
):
|
@@ -246,8 +184,8 @@ class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInv
|
|
246
184
|
num_images_per_prompt,
|
247
185
|
do_classifier_free_guidance,
|
248
186
|
negative_prompt=None,
|
249
|
-
prompt_embeds: Optional[torch.
|
250
|
-
negative_prompt_embeds: Optional[torch.
|
187
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
188
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
251
189
|
lora_scale: Optional[float] = None,
|
252
190
|
clip_skip: Optional[int] = None,
|
253
191
|
):
|
@@ -267,10 +205,10 @@ class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInv
|
|
267
205
|
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
268
206
|
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
269
207
|
less than `1`).
|
270
|
-
prompt_embeds (`torch.
|
208
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
271
209
|
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
272
210
|
provided, text embeddings will be generated from `prompt` input argument.
|
273
|
-
negative_prompt_embeds (`torch.
|
211
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
274
212
|
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
275
213
|
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
276
214
|
argument.
|
@@ -282,7 +220,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInv
|
|
282
220
|
"""
|
283
221
|
# set lora scale so that monkey patched LoRA
|
284
222
|
# function of text encoder can correctly access it
|
285
|
-
if lora_scale is not None and isinstance(self,
|
223
|
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
286
224
|
self._lora_scale = lora_scale
|
287
225
|
|
288
226
|
# dynamically adjust the LoRA scale
|
@@ -414,9 +352,10 @@ class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInv
|
|
414
352
|
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
415
353
|
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
416
354
|
|
417
|
-
if
|
418
|
-
|
419
|
-
|
355
|
+
if self.text_encoder is not None:
|
356
|
+
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
|
357
|
+
# Retrieve the original scale by scaling back the LoRA layers
|
358
|
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
420
359
|
|
421
360
|
return prompt_embeds, negative_prompt_embeds
|
422
361
|
|
@@ -563,19 +502,19 @@ class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInv
|
|
563
502
|
def __call__(
|
564
503
|
self,
|
565
504
|
prompt: Union[str, List[str]] = None,
|
566
|
-
video: Union[List[np.ndarray], torch.
|
505
|
+
video: Union[List[np.ndarray], torch.Tensor] = None,
|
567
506
|
strength: float = 0.6,
|
568
507
|
num_inference_steps: int = 50,
|
569
508
|
guidance_scale: float = 15.0,
|
570
509
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
571
510
|
eta: float = 0.0,
|
572
511
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
573
|
-
latents: Optional[torch.
|
574
|
-
prompt_embeds: Optional[torch.
|
575
|
-
negative_prompt_embeds: Optional[torch.
|
512
|
+
latents: Optional[torch.Tensor] = None,
|
513
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
514
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
576
515
|
output_type: Optional[str] = "np",
|
577
516
|
return_dict: bool = True,
|
578
|
-
callback: Optional[Callable[[int, int, torch.
|
517
|
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
579
518
|
callback_steps: int = 1,
|
580
519
|
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
581
520
|
clip_skip: Optional[int] = None,
|
@@ -586,7 +525,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInv
|
|
586
525
|
Args:
|
587
526
|
prompt (`str` or `List[str]`, *optional*):
|
588
527
|
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
589
|
-
video (`List[np.ndarray]` or `torch.
|
528
|
+
video (`List[np.ndarray]` or `torch.Tensor`):
|
590
529
|
`video` frames or tensor representing a video batch to be used as the starting point for the process.
|
591
530
|
Can also accept video latents as `image`, if passing latents directly, it will not be encoded again.
|
592
531
|
strength (`float`, *optional*, defaults to 0.8):
|
@@ -610,25 +549,25 @@ class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInv
|
|
610
549
|
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
611
550
|
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
612
551
|
generation deterministic.
|
613
|
-
latents (`torch.
|
552
|
+
latents (`torch.Tensor`, *optional*):
|
614
553
|
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
|
615
554
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
616
555
|
tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
|
617
556
|
`(batch_size, num_channel, num_frames, height, width)`.
|
618
|
-
prompt_embeds (`torch.
|
557
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
619
558
|
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
620
559
|
provided, text embeddings are generated from the `prompt` input argument.
|
621
|
-
negative_prompt_embeds (`torch.
|
560
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
622
561
|
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
623
562
|
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
624
563
|
output_type (`str`, *optional*, defaults to `"np"`):
|
625
|
-
The output format of the generated video. Choose between `torch.
|
564
|
+
The output format of the generated video. Choose between `torch.Tensor` or `np.array`.
|
626
565
|
return_dict (`bool`, *optional*, defaults to `True`):
|
627
566
|
Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
|
628
567
|
of a plain tuple.
|
629
568
|
callback (`Callable`, *optional*):
|
630
569
|
A function that calls every `callback_steps` steps during inference. The function is called with the
|
631
|
-
following arguments: `callback(step: int, timestep: int, latents: torch.
|
570
|
+
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
632
571
|
callback_steps (`int`, *optional*, defaults to 1):
|
633
572
|
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
634
573
|
every step.
|
@@ -687,7 +626,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInv
|
|
687
626
|
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
688
627
|
|
689
628
|
# 4. Preprocess video
|
690
|
-
video = preprocess_video(video)
|
629
|
+
video = self.video_processor.preprocess_video(video)
|
691
630
|
|
692
631
|
# 5. Prepare timesteps
|
693
632
|
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
@@ -749,7 +688,7 @@ class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInv
|
|
749
688
|
video = latents
|
750
689
|
else:
|
751
690
|
video_tensor = self.decode_latents(latents)
|
752
|
-
video =
|
691
|
+
video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type)
|
753
692
|
|
754
693
|
# 10. Offload all models
|
755
694
|
self.maybe_free_model_hooks()
|
@@ -11,7 +11,7 @@ from torch.nn.functional import grid_sample
|
|
11
11
|
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
12
12
|
|
13
13
|
from ...image_processor import VaeImageProcessor
|
14
|
-
from ...loaders import
|
14
|
+
from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
|
15
15
|
from ...models import AutoencoderKL, UNet2DConditionModel
|
16
16
|
from ...models.lora import adjust_lora_scale_text_encoder
|
17
17
|
from ...schedulers import KarrasDiffusionSchedulers
|
@@ -281,7 +281,9 @@ def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_s
|
|
281
281
|
return warped_latents
|
282
282
|
|
283
283
|
|
284
|
-
class TextToVideoZeroPipeline(
|
284
|
+
class TextToVideoZeroPipeline(
|
285
|
+
DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin
|
286
|
+
):
|
285
287
|
r"""
|
286
288
|
Pipeline for zero-shot text-to-video generation using Stable Diffusion.
|
287
289
|
|
@@ -329,13 +331,6 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn
|
|
329
331
|
safety_checker=safety_checker,
|
330
332
|
feature_extractor=feature_extractor,
|
331
333
|
)
|
332
|
-
processor = (
|
333
|
-
CrossFrameAttnProcessor2_0(batch_size=2)
|
334
|
-
if hasattr(F, "scaled_dot_product_attention")
|
335
|
-
else CrossFrameAttnProcessor(batch_size=2)
|
336
|
-
)
|
337
|
-
self.unet.set_attn_processor(processor)
|
338
|
-
|
339
334
|
if safety_checker is None and requires_safety_checker:
|
340
335
|
logger.warning(
|
341
336
|
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
@@ -399,7 +394,7 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn
|
|
399
394
|
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
400
395
|
callback (`Callable`, *optional*):
|
401
396
|
A function that calls every `callback_steps` steps during inference. The function is called with the
|
402
|
-
following arguments: `callback(step: int, timestep: int, latents: torch.
|
397
|
+
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
403
398
|
callback_steps (`int`, *optional*, defaults to 1):
|
404
399
|
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
405
400
|
every step.
|
@@ -502,7 +497,12 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn
|
|
502
497
|
|
503
498
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
504
499
|
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
505
|
-
shape = (
|
500
|
+
shape = (
|
501
|
+
batch_size,
|
502
|
+
num_channels_latents,
|
503
|
+
int(height) // self.vae_scale_factor,
|
504
|
+
int(width) // self.vae_scale_factor,
|
505
|
+
)
|
506
506
|
if isinstance(generator, list) and len(generator) != batch_size:
|
507
507
|
raise ValueError(
|
508
508
|
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
@@ -531,12 +531,12 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn
|
|
531
531
|
num_videos_per_prompt: Optional[int] = 1,
|
532
532
|
eta: float = 0.0,
|
533
533
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
534
|
-
latents: Optional[torch.
|
534
|
+
latents: Optional[torch.Tensor] = None,
|
535
535
|
motion_field_strength_x: float = 12,
|
536
536
|
motion_field_strength_y: float = 12,
|
537
537
|
output_type: Optional[str] = "tensor",
|
538
538
|
return_dict: bool = True,
|
539
|
-
callback: Optional[Callable[[int, int, torch.
|
539
|
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
540
540
|
callback_steps: Optional[int] = 1,
|
541
541
|
t0: int = 44,
|
542
542
|
t1: int = 47,
|
@@ -571,19 +571,19 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn
|
|
571
571
|
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
572
572
|
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
573
573
|
generation deterministic.
|
574
|
-
latents (`torch.
|
574
|
+
latents (`torch.Tensor`, *optional*):
|
575
575
|
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
|
576
576
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
577
577
|
tensor is generated by sampling using the supplied random `generator`.
|
578
|
-
output_type (`str`, *optional*, defaults to `"
|
579
|
-
The output format of the generated video. Choose between `"latent"` and `"
|
578
|
+
output_type (`str`, *optional*, defaults to `"np"`):
|
579
|
+
The output format of the generated video. Choose between `"latent"` and `"np"`.
|
580
580
|
return_dict (`bool`, *optional*, defaults to `True`):
|
581
581
|
Whether or not to return a
|
582
582
|
[`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`] instead of
|
583
583
|
a plain tuple.
|
584
584
|
callback (`Callable`, *optional*):
|
585
585
|
A function that calls every `callback_steps` steps during inference. The function is called with the
|
586
|
-
following arguments: `callback(step: int, timestep: int, latents: torch.
|
586
|
+
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
587
587
|
callback_steps (`int`, *optional*, defaults to 1):
|
588
588
|
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
589
589
|
every step.
|
@@ -616,6 +616,15 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn
|
|
616
616
|
|
617
617
|
assert num_videos_per_prompt == 1
|
618
618
|
|
619
|
+
# set the processor
|
620
|
+
original_attn_proc = self.unet.attn_processors
|
621
|
+
processor = (
|
622
|
+
CrossFrameAttnProcessor2_0(batch_size=2)
|
623
|
+
if hasattr(F, "scaled_dot_product_attention")
|
624
|
+
else CrossFrameAttnProcessor(batch_size=2)
|
625
|
+
)
|
626
|
+
self.unet.set_attn_processor(processor)
|
627
|
+
|
619
628
|
if isinstance(prompt, str):
|
620
629
|
prompt = [prompt]
|
621
630
|
if isinstance(negative_prompt, str):
|
@@ -739,6 +748,8 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn
|
|
739
748
|
|
740
749
|
# Offload all models
|
741
750
|
self.maybe_free_model_hooks()
|
751
|
+
# make sure to set the original attention processors back
|
752
|
+
self.unet.set_attn_processor(original_attn_proc)
|
742
753
|
|
743
754
|
if not return_dict:
|
744
755
|
return (image, has_nsfw_concept)
|
@@ -786,8 +797,8 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn
|
|
786
797
|
num_images_per_prompt,
|
787
798
|
do_classifier_free_guidance,
|
788
799
|
negative_prompt=None,
|
789
|
-
prompt_embeds: Optional[torch.
|
790
|
-
negative_prompt_embeds: Optional[torch.
|
800
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
801
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
791
802
|
lora_scale: Optional[float] = None,
|
792
803
|
clip_skip: Optional[int] = None,
|
793
804
|
):
|
@@ -807,10 +818,10 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn
|
|
807
818
|
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
808
819
|
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
809
820
|
less than `1`).
|
810
|
-
prompt_embeds (`torch.
|
821
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
811
822
|
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
812
823
|
provided, text embeddings will be generated from `prompt` input argument.
|
813
|
-
negative_prompt_embeds (`torch.
|
824
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
814
825
|
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
815
826
|
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
816
827
|
argument.
|
@@ -822,7 +833,7 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn
|
|
822
833
|
"""
|
823
834
|
# set lora scale so that monkey patched LoRA
|
824
835
|
# function of text encoder can correctly access it
|
825
|
-
if lora_scale is not None and isinstance(self,
|
836
|
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
826
837
|
self._lora_scale = lora_scale
|
827
838
|
|
828
839
|
# dynamically adjust the LoRA scale
|
@@ -954,9 +965,10 @@ class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualIn
|
|
954
965
|
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
955
966
|
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
956
967
|
|
957
|
-
if
|
958
|
-
|
959
|
-
|
968
|
+
if self.text_encoder is not None:
|
969
|
+
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
|
970
|
+
# Retrieve the original scale by scaling back the LoRA layers
|
971
|
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
960
972
|
|
961
973
|
return prompt_embeds, negative_prompt_embeds
|
962
974
|
|