diffusers 0.27.1__py3-none-any.whl → 0.32.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- diffusers/__init__.py +233 -6
- diffusers/callbacks.py +209 -0
- diffusers/commands/env.py +102 -6
- diffusers/configuration_utils.py +45 -16
- diffusers/dependency_versions_table.py +4 -3
- diffusers/image_processor.py +434 -110
- diffusers/loaders/__init__.py +42 -9
- diffusers/loaders/ip_adapter.py +626 -36
- diffusers/loaders/lora_base.py +900 -0
- diffusers/loaders/lora_conversion_utils.py +991 -125
- diffusers/loaders/lora_pipeline.py +3812 -0
- diffusers/loaders/peft.py +571 -7
- diffusers/loaders/single_file.py +405 -173
- diffusers/loaders/single_file_model.py +385 -0
- diffusers/loaders/single_file_utils.py +1783 -713
- diffusers/loaders/textual_inversion.py +41 -23
- diffusers/loaders/transformer_flux.py +181 -0
- diffusers/loaders/transformer_sd3.py +89 -0
- diffusers/loaders/unet.py +464 -540
- diffusers/loaders/unet_loader_utils.py +163 -0
- diffusers/models/__init__.py +76 -7
- diffusers/models/activations.py +65 -10
- diffusers/models/adapter.py +53 -53
- diffusers/models/attention.py +605 -18
- diffusers/models/attention_flax.py +1 -1
- diffusers/models/attention_processor.py +4304 -687
- diffusers/models/autoencoders/__init__.py +8 -0
- diffusers/models/autoencoders/autoencoder_asym_kl.py +15 -17
- diffusers/models/autoencoders/autoencoder_dc.py +620 -0
- diffusers/models/autoencoders/autoencoder_kl.py +110 -28
- diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
- diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1482 -0
- diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
- diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
- diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +19 -24
- diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
- diffusers/models/autoencoders/autoencoder_tiny.py +21 -18
- diffusers/models/autoencoders/consistency_decoder_vae.py +45 -20
- diffusers/models/autoencoders/vae.py +41 -29
- diffusers/models/autoencoders/vq_model.py +182 -0
- diffusers/models/controlnet.py +47 -800
- diffusers/models/controlnet_flux.py +70 -0
- diffusers/models/controlnet_sd3.py +68 -0
- diffusers/models/controlnet_sparsectrl.py +116 -0
- diffusers/models/controlnets/__init__.py +23 -0
- diffusers/models/controlnets/controlnet.py +872 -0
- diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +9 -9
- diffusers/models/controlnets/controlnet_flux.py +536 -0
- diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
- diffusers/models/controlnets/controlnet_sd3.py +489 -0
- diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
- diffusers/models/controlnets/controlnet_union.py +832 -0
- diffusers/models/controlnets/controlnet_xs.py +1946 -0
- diffusers/models/controlnets/multicontrolnet.py +183 -0
- diffusers/models/downsampling.py +85 -18
- diffusers/models/embeddings.py +1856 -158
- diffusers/models/embeddings_flax.py +23 -9
- diffusers/models/model_loading_utils.py +480 -0
- diffusers/models/modeling_flax_pytorch_utils.py +2 -1
- diffusers/models/modeling_flax_utils.py +2 -7
- diffusers/models/modeling_outputs.py +14 -0
- diffusers/models/modeling_pytorch_flax_utils.py +1 -1
- diffusers/models/modeling_utils.py +611 -146
- diffusers/models/normalization.py +361 -20
- diffusers/models/resnet.py +18 -23
- diffusers/models/transformers/__init__.py +16 -0
- diffusers/models/transformers/auraflow_transformer_2d.py +544 -0
- diffusers/models/transformers/cogvideox_transformer_3d.py +542 -0
- diffusers/models/transformers/dit_transformer_2d.py +240 -0
- diffusers/models/transformers/dual_transformer_2d.py +9 -8
- diffusers/models/transformers/hunyuan_transformer_2d.py +578 -0
- diffusers/models/transformers/latte_transformer_3d.py +327 -0
- diffusers/models/transformers/lumina_nextdit2d.py +340 -0
- diffusers/models/transformers/pixart_transformer_2d.py +445 -0
- diffusers/models/transformers/prior_transformer.py +13 -13
- diffusers/models/transformers/sana_transformer.py +488 -0
- diffusers/models/transformers/stable_audio_transformer.py +458 -0
- diffusers/models/transformers/t5_film_transformer.py +17 -19
- diffusers/models/transformers/transformer_2d.py +297 -187
- diffusers/models/transformers/transformer_allegro.py +422 -0
- diffusers/models/transformers/transformer_cogview3plus.py +386 -0
- diffusers/models/transformers/transformer_flux.py +593 -0
- diffusers/models/transformers/transformer_hunyuan_video.py +791 -0
- diffusers/models/transformers/transformer_ltx.py +469 -0
- diffusers/models/transformers/transformer_mochi.py +499 -0
- diffusers/models/transformers/transformer_sd3.py +461 -0
- diffusers/models/transformers/transformer_temporal.py +21 -19
- diffusers/models/unets/unet_1d.py +8 -8
- diffusers/models/unets/unet_1d_blocks.py +31 -31
- diffusers/models/unets/unet_2d.py +17 -10
- diffusers/models/unets/unet_2d_blocks.py +225 -149
- diffusers/models/unets/unet_2d_condition.py +41 -40
- diffusers/models/unets/unet_2d_condition_flax.py +6 -5
- diffusers/models/unets/unet_3d_blocks.py +192 -1057
- diffusers/models/unets/unet_3d_condition.py +22 -27
- diffusers/models/unets/unet_i2vgen_xl.py +22 -18
- diffusers/models/unets/unet_kandinsky3.py +2 -2
- diffusers/models/unets/unet_motion_model.py +1413 -89
- diffusers/models/unets/unet_spatio_temporal_condition.py +40 -16
- diffusers/models/unets/unet_stable_cascade.py +19 -18
- diffusers/models/unets/uvit_2d.py +2 -2
- diffusers/models/upsampling.py +95 -26
- diffusers/models/vq_model.py +12 -164
- diffusers/optimization.py +1 -1
- diffusers/pipelines/__init__.py +202 -3
- diffusers/pipelines/allegro/__init__.py +48 -0
- diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
- diffusers/pipelines/allegro/pipeline_output.py +23 -0
- diffusers/pipelines/amused/pipeline_amused.py +12 -12
- diffusers/pipelines/amused/pipeline_amused_img2img.py +14 -12
- diffusers/pipelines/amused/pipeline_amused_inpaint.py +13 -11
- diffusers/pipelines/animatediff/__init__.py +8 -0
- diffusers/pipelines/animatediff/pipeline_animatediff.py +122 -109
- diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1106 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +1288 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1010 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +236 -180
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
- diffusers/pipelines/animatediff/pipeline_output.py +3 -2
- diffusers/pipelines/audioldm/pipeline_audioldm.py +14 -14
- diffusers/pipelines/audioldm2/modeling_audioldm2.py +58 -39
- diffusers/pipelines/audioldm2/pipeline_audioldm2.py +121 -36
- diffusers/pipelines/aura_flow/__init__.py +48 -0
- diffusers/pipelines/aura_flow/pipeline_aura_flow.py +584 -0
- diffusers/pipelines/auto_pipeline.py +196 -28
- diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
- diffusers/pipelines/blip_diffusion/modeling_blip2.py +6 -6
- diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +1 -1
- diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
- diffusers/pipelines/cogvideo/__init__.py +54 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox.py +772 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +885 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +851 -0
- diffusers/pipelines/cogvideo/pipeline_output.py +20 -0
- diffusers/pipelines/cogview3/__init__.py +47 -0
- diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
- diffusers/pipelines/cogview3/pipeline_output.py +21 -0
- diffusers/pipelines/consistency_models/pipeline_consistency_models.py +6 -6
- diffusers/pipelines/controlnet/__init__.py +86 -80
- diffusers/pipelines/controlnet/multicontrolnet.py +7 -182
- diffusers/pipelines/controlnet/pipeline_controlnet.py +134 -87
- diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +93 -77
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +88 -197
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +136 -90
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +176 -80
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +125 -89
- diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
- diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
- diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
- diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1060 -0
- diffusers/pipelines/controlnet_sd3/__init__.py +57 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1133 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
- diffusers/pipelines/controlnet_xs/__init__.py +68 -0
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
- diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
- diffusers/pipelines/deepfloyd_if/pipeline_if.py +16 -30
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +20 -35
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +23 -41
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +22 -38
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +25 -41
- diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +19 -34
- diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
- diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
- diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +11 -11
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +70 -30
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +48 -25
- diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +2 -2
- diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +7 -7
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +21 -20
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +27 -29
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +33 -27
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +33 -23
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +36 -30
- diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +102 -69
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +13 -13
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +10 -5
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +11 -6
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +10 -5
- diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +5 -5
- diffusers/pipelines/dit/pipeline_dit.py +7 -4
- diffusers/pipelines/flux/__init__.py +69 -0
- diffusers/pipelines/flux/modeling_flux.py +47 -0
- diffusers/pipelines/flux/pipeline_flux.py +957 -0
- diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
- diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
- diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
- diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
- diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
- diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
- diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
- diffusers/pipelines/flux/pipeline_output.py +37 -0
- diffusers/pipelines/free_init_utils.py +41 -38
- diffusers/pipelines/free_noise_utils.py +596 -0
- diffusers/pipelines/hunyuan_video/__init__.py +48 -0
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
- diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
- diffusers/pipelines/hunyuandit/__init__.py +48 -0
- diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +916 -0
- diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +33 -48
- diffusers/pipelines/kandinsky/pipeline_kandinsky.py +8 -8
- diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +32 -29
- diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +11 -11
- diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +12 -12
- diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +34 -31
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +6 -6
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +8 -8
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +7 -7
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +6 -6
- diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +3 -3
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +22 -35
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +26 -37
- diffusers/pipelines/kolors/__init__.py +54 -0
- diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
- diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1250 -0
- diffusers/pipelines/kolors/pipeline_output.py +21 -0
- diffusers/pipelines/kolors/text_encoder.py +889 -0
- diffusers/pipelines/kolors/tokenizer.py +338 -0
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +82 -62
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +77 -60
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +12 -12
- diffusers/pipelines/latte/__init__.py +48 -0
- diffusers/pipelines/latte/pipeline_latte.py +881 -0
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +80 -74
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +85 -76
- diffusers/pipelines/ledits_pp/pipeline_output.py +2 -2
- diffusers/pipelines/ltx/__init__.py +50 -0
- diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
- diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
- diffusers/pipelines/ltx/pipeline_output.py +20 -0
- diffusers/pipelines/lumina/__init__.py +48 -0
- diffusers/pipelines/lumina/pipeline_lumina.py +890 -0
- diffusers/pipelines/marigold/__init__.py +50 -0
- diffusers/pipelines/marigold/marigold_image_processing.py +576 -0
- diffusers/pipelines/marigold/pipeline_marigold_depth.py +813 -0
- diffusers/pipelines/marigold/pipeline_marigold_normals.py +690 -0
- diffusers/pipelines/mochi/__init__.py +48 -0
- diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
- diffusers/pipelines/mochi/pipeline_output.py +20 -0
- diffusers/pipelines/musicldm/pipeline_musicldm.py +14 -14
- diffusers/pipelines/pag/__init__.py +80 -0
- diffusers/pipelines/pag/pag_utils.py +243 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1328 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1610 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
- diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +969 -0
- diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
- diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +865 -0
- diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
- diffusers/pipelines/pag/pipeline_pag_sd.py +1062 -0
- diffusers/pipelines/pag/pipeline_pag_sd_3.py +994 -0
- diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
- diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +866 -0
- diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
- diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1345 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1544 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1776 -0
- diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +17 -12
- diffusers/pipelines/pia/pipeline_pia.py +74 -164
- diffusers/pipelines/pipeline_flax_utils.py +5 -10
- diffusers/pipelines/pipeline_loading_utils.py +515 -53
- diffusers/pipelines/pipeline_utils.py +411 -222
- diffusers/pipelines/pixart_alpha/__init__.py +8 -1
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +76 -93
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +873 -0
- diffusers/pipelines/sana/__init__.py +47 -0
- diffusers/pipelines/sana/pipeline_output.py +21 -0
- diffusers/pipelines/sana/pipeline_sana.py +884 -0
- diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +27 -23
- diffusers/pipelines/shap_e/pipeline_shap_e.py +3 -3
- diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +14 -14
- diffusers/pipelines/shap_e/renderer.py +1 -1
- diffusers/pipelines/stable_audio/__init__.py +50 -0
- diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
- diffusers/pipelines/stable_audio/pipeline_stable_audio.py +756 -0
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +71 -25
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +23 -19
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +35 -34
- diffusers/pipelines/stable_diffusion/__init__.py +0 -1
- diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +20 -11
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +6 -6
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +145 -79
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +43 -28
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +13 -8
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +100 -68
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +109 -201
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +131 -32
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +247 -87
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +30 -29
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +35 -27
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +49 -42
- diffusers/pipelines/stable_diffusion/safety_checker.py +2 -1
- diffusers/pipelines/stable_diffusion_3/__init__.py +54 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +1140 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +1036 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1250 -0
- diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +29 -20
- diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +59 -58
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +31 -25
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +38 -22
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -24
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +24 -23
- diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +107 -67
- diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +316 -69
- diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +10 -5
- diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
- diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +98 -30
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +121 -83
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +161 -105
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +142 -218
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +45 -29
- diffusers/pipelines/stable_diffusion_xl/watermark.py +9 -3
- diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +110 -57
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +69 -39
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +105 -74
- diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +3 -2
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +29 -49
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +32 -93
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +37 -25
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +54 -40
- diffusers/pipelines/unclip/pipeline_unclip.py +6 -6
- diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +6 -6
- diffusers/pipelines/unidiffuser/modeling_text_decoder.py +1 -1
- diffusers/pipelines/unidiffuser/modeling_uvit.py +12 -12
- diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +29 -28
- diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +5 -5
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +5 -10
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +6 -8
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +4 -4
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +12 -12
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +15 -14
- diffusers/{models/dual_transformer_2d.py → quantizers/__init__.py} +2 -6
- diffusers/quantizers/auto.py +139 -0
- diffusers/quantizers/base.py +233 -0
- diffusers/quantizers/bitsandbytes/__init__.py +2 -0
- diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
- diffusers/quantizers/bitsandbytes/utils.py +306 -0
- diffusers/quantizers/gguf/__init__.py +1 -0
- diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
- diffusers/quantizers/gguf/utils.py +456 -0
- diffusers/quantizers/quantization_config.py +669 -0
- diffusers/quantizers/torchao/__init__.py +15 -0
- diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
- diffusers/schedulers/__init__.py +12 -2
- diffusers/schedulers/deprecated/__init__.py +1 -1
- diffusers/schedulers/deprecated/scheduling_karras_ve.py +25 -25
- diffusers/schedulers/scheduling_amused.py +5 -5
- diffusers/schedulers/scheduling_consistency_decoder.py +11 -11
- diffusers/schedulers/scheduling_consistency_models.py +23 -25
- diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
- diffusers/schedulers/scheduling_ddim.py +27 -26
- diffusers/schedulers/scheduling_ddim_cogvideox.py +452 -0
- diffusers/schedulers/scheduling_ddim_flax.py +2 -1
- diffusers/schedulers/scheduling_ddim_inverse.py +16 -16
- diffusers/schedulers/scheduling_ddim_parallel.py +32 -31
- diffusers/schedulers/scheduling_ddpm.py +27 -30
- diffusers/schedulers/scheduling_ddpm_flax.py +7 -3
- diffusers/schedulers/scheduling_ddpm_parallel.py +33 -36
- diffusers/schedulers/scheduling_ddpm_wuerstchen.py +14 -14
- diffusers/schedulers/scheduling_deis_multistep.py +150 -50
- diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +221 -84
- diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +2 -2
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +158 -52
- diffusers/schedulers/scheduling_dpmsolver_sde.py +153 -34
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +275 -86
- diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +81 -57
- diffusers/schedulers/scheduling_edm_euler.py +62 -39
- diffusers/schedulers/scheduling_euler_ancestral_discrete.py +30 -29
- diffusers/schedulers/scheduling_euler_discrete.py +255 -74
- diffusers/schedulers/scheduling_flow_match_euler_discrete.py +458 -0
- diffusers/schedulers/scheduling_flow_match_heun_discrete.py +320 -0
- diffusers/schedulers/scheduling_heun_discrete.py +174 -46
- diffusers/schedulers/scheduling_ipndm.py +9 -9
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +138 -29
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +132 -26
- diffusers/schedulers/scheduling_karras_ve_flax.py +6 -6
- diffusers/schedulers/scheduling_lcm.py +23 -29
- diffusers/schedulers/scheduling_lms_discrete.py +105 -28
- diffusers/schedulers/scheduling_pndm.py +20 -20
- diffusers/schedulers/scheduling_repaint.py +21 -21
- diffusers/schedulers/scheduling_sasolver.py +157 -60
- diffusers/schedulers/scheduling_sde_ve.py +19 -19
- diffusers/schedulers/scheduling_tcd.py +41 -36
- diffusers/schedulers/scheduling_unclip.py +19 -16
- diffusers/schedulers/scheduling_unipc_multistep.py +243 -47
- diffusers/schedulers/scheduling_utils.py +12 -5
- diffusers/schedulers/scheduling_utils_flax.py +1 -3
- diffusers/schedulers/scheduling_vq_diffusion.py +10 -10
- diffusers/training_utils.py +214 -30
- diffusers/utils/__init__.py +17 -1
- diffusers/utils/constants.py +3 -0
- diffusers/utils/doc_utils.py +1 -0
- diffusers/utils/dummy_pt_objects.py +592 -7
- diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
- diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +1001 -71
- diffusers/utils/dynamic_modules_utils.py +34 -29
- diffusers/utils/export_utils.py +50 -6
- diffusers/utils/hub_utils.py +131 -17
- diffusers/utils/import_utils.py +210 -8
- diffusers/utils/loading_utils.py +118 -5
- diffusers/utils/logging.py +4 -2
- diffusers/utils/peft_utils.py +37 -7
- diffusers/utils/state_dict_utils.py +13 -2
- diffusers/utils/testing_utils.py +193 -11
- diffusers/utils/torch_utils.py +4 -0
- diffusers/video_processor.py +113 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/METADATA +82 -91
- diffusers-0.32.2.dist-info/RECORD +550 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/WHEEL +1 -1
- diffusers/loaders/autoencoder.py +0 -146
- diffusers/loaders/controlnet.py +0 -136
- diffusers/loaders/lora.py +0 -1349
- diffusers/models/prior_transformer.py +0 -12
- diffusers/models/t5_film_transformer.py +0 -70
- diffusers/models/transformer_2d.py +0 -25
- diffusers/models/transformer_temporal.py +0 -34
- diffusers/models/unet_1d.py +0 -26
- diffusers/models/unet_1d_blocks.py +0 -203
- diffusers/models/unet_2d.py +0 -27
- diffusers/models/unet_2d_blocks.py +0 -375
- diffusers/models/unet_2d_condition.py +0 -25
- diffusers-0.27.1.dist-info/RECORD +0 -399
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/LICENSE +0 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/entry_points.txt +0 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/top_level.txt +0 -0
@@ -27,6 +27,8 @@ from transformers import (
|
|
27
27
|
T5EncoderModel,
|
28
28
|
T5Tokenizer,
|
29
29
|
T5TokenizerFast,
|
30
|
+
VitsModel,
|
31
|
+
VitsTokenizer,
|
30
32
|
)
|
31
33
|
|
32
34
|
from ...models import AutoencoderKL
|
@@ -79,6 +81,37 @@ EXAMPLE_DOC_STRING = """
|
|
79
81
|
>>> # save the best audio sample (index 0) as a .wav file
|
80
82
|
>>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio[0])
|
81
83
|
```
|
84
|
+
```
|
85
|
+
#Using AudioLDM2 for Text To Speech
|
86
|
+
>>> import scipy
|
87
|
+
>>> import torch
|
88
|
+
>>> from diffusers import AudioLDM2Pipeline
|
89
|
+
|
90
|
+
>>> repo_id = "anhnct/audioldm2_gigaspeech"
|
91
|
+
>>> pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch.float16)
|
92
|
+
>>> pipe = pipe.to("cuda")
|
93
|
+
|
94
|
+
>>> # define the prompts
|
95
|
+
>>> prompt = "A female reporter is speaking"
|
96
|
+
>>> transcript = "wish you have a good day"
|
97
|
+
|
98
|
+
>>> # set the seed for generator
|
99
|
+
>>> generator = torch.Generator("cuda").manual_seed(0)
|
100
|
+
|
101
|
+
>>> # run the generation
|
102
|
+
>>> audio = pipe(
|
103
|
+
... prompt,
|
104
|
+
... transcription=transcript,
|
105
|
+
... num_inference_steps=200,
|
106
|
+
... audio_length_in_s=10.0,
|
107
|
+
... num_waveforms_per_prompt=2,
|
108
|
+
... generator=generator,
|
109
|
+
... max_new_tokens=512, #Must set max_new_tokens equa to 512 for TTS
|
110
|
+
... ).audios
|
111
|
+
|
112
|
+
>>> # save the best audio sample (index 0) as a .wav file
|
113
|
+
>>> scipy.io.wavfile.write("tts.wav", rate=16000, data=audio[0])
|
114
|
+
```
|
82
115
|
"""
|
83
116
|
|
84
117
|
|
@@ -116,20 +149,23 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
116
149
|
specifically the [laion/clap-htsat-unfused](https://huggingface.co/laion/clap-htsat-unfused) variant. The
|
117
150
|
text branch is used to encode the text prompt to a prompt embedding. The full audio-text model is used to
|
118
151
|
rank generated waveforms against the text prompt by computing similarity scores.
|
119
|
-
text_encoder_2 ([`~transformers.T5EncoderModel`]):
|
152
|
+
text_encoder_2 ([`~transformers.T5EncoderModel`, `~transformers.VitsModel`]):
|
120
153
|
Second frozen text-encoder. AudioLDM2 uses the encoder of
|
121
154
|
[T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the
|
122
|
-
[google/flan-t5-large](https://huggingface.co/google/flan-t5-large) variant.
|
155
|
+
[google/flan-t5-large](https://huggingface.co/google/flan-t5-large) variant. Second frozen text-encoder use
|
156
|
+
for TTS. AudioLDM2 uses the encoder of
|
157
|
+
[Vits](https://huggingface.co/docs/transformers/model_doc/vits#transformers.VitsModel).
|
123
158
|
projection_model ([`AudioLDM2ProjectionModel`]):
|
124
159
|
A trained model used to linearly project the hidden-states from the first and second text encoder models
|
125
160
|
and insert learned SOS and EOS token embeddings. The projected hidden-states from the two text encoders are
|
126
|
-
concatenated to give the input to the language model.
|
161
|
+
concatenated to give the input to the language model. A Learned Position Embedding for the Vits
|
162
|
+
hidden-states
|
127
163
|
language_model ([`~transformers.GPT2Model`]):
|
128
164
|
An auto-regressive language model used to generate a sequence of hidden-states conditioned on the projected
|
129
165
|
outputs from the two text encoders.
|
130
166
|
tokenizer ([`~transformers.RobertaTokenizer`]):
|
131
167
|
Tokenizer to tokenize text for the first frozen text-encoder.
|
132
|
-
tokenizer_2 ([`~transformers.T5Tokenizer`]):
|
168
|
+
tokenizer_2 ([`~transformers.T5Tokenizer`, `~transformers.VitsTokenizer`]):
|
133
169
|
Tokenizer to tokenize text for the second frozen text-encoder.
|
134
170
|
feature_extractor ([`~transformers.ClapFeatureExtractor`]):
|
135
171
|
Feature extractor to pre-process generated audio waveforms to log-mel spectrograms for automatic scoring.
|
@@ -146,11 +182,11 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
146
182
|
self,
|
147
183
|
vae: AutoencoderKL,
|
148
184
|
text_encoder: ClapModel,
|
149
|
-
text_encoder_2: T5EncoderModel,
|
185
|
+
text_encoder_2: Union[T5EncoderModel, VitsModel],
|
150
186
|
projection_model: AudioLDM2ProjectionModel,
|
151
187
|
language_model: GPT2Model,
|
152
188
|
tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast],
|
153
|
-
tokenizer_2: Union[T5Tokenizer, T5TokenizerFast],
|
189
|
+
tokenizer_2: Union[T5Tokenizer, T5TokenizerFast, VitsTokenizer],
|
154
190
|
feature_extractor: ClapFeatureExtractor,
|
155
191
|
unet: AudioLDM2UNet2DConditionModel,
|
156
192
|
scheduler: KarrasDiffusionSchedulers,
|
@@ -237,7 +273,7 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
237
273
|
Generates a sequence of hidden-states from the language model, conditioned on the embedding inputs.
|
238
274
|
|
239
275
|
Parameters:
|
240
|
-
inputs_embeds (`torch.
|
276
|
+
inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
241
277
|
The sequence used as a prompt for the generation.
|
242
278
|
max_new_tokens (`int`):
|
243
279
|
Number of new tokens to generate.
|
@@ -246,10 +282,11 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
246
282
|
function of the model.
|
247
283
|
|
248
284
|
Return:
|
249
|
-
`inputs_embeds (`torch.
|
285
|
+
`inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
250
286
|
The sequence of generated hidden-states.
|
251
287
|
"""
|
252
288
|
max_new_tokens = max_new_tokens if max_new_tokens is not None else self.language_model.config.max_new_tokens
|
289
|
+
model_kwargs = self.language_model._get_initial_cache_position(inputs_embeds, model_kwargs)
|
253
290
|
for _ in range(max_new_tokens):
|
254
291
|
# prepare model inputs
|
255
292
|
model_inputs = prepare_inputs_for_generation(inputs_embeds, **model_kwargs)
|
@@ -273,11 +310,12 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
273
310
|
device,
|
274
311
|
num_waveforms_per_prompt,
|
275
312
|
do_classifier_free_guidance,
|
313
|
+
transcription=None,
|
276
314
|
negative_prompt=None,
|
277
|
-
prompt_embeds: Optional[torch.
|
278
|
-
negative_prompt_embeds: Optional[torch.
|
279
|
-
generated_prompt_embeds: Optional[torch.
|
280
|
-
negative_generated_prompt_embeds: Optional[torch.
|
315
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
316
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
317
|
+
generated_prompt_embeds: Optional[torch.Tensor] = None,
|
318
|
+
negative_generated_prompt_embeds: Optional[torch.Tensor] = None,
|
281
319
|
attention_mask: Optional[torch.LongTensor] = None,
|
282
320
|
negative_attention_mask: Optional[torch.LongTensor] = None,
|
283
321
|
max_new_tokens: Optional[int] = None,
|
@@ -288,6 +326,8 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
288
326
|
Args:
|
289
327
|
prompt (`str` or `List[str]`, *optional*):
|
290
328
|
prompt to be encoded
|
329
|
+
transcription (`str` or `List[str]`):
|
330
|
+
transcription of text to speech
|
291
331
|
device (`torch.device`):
|
292
332
|
torch device
|
293
333
|
num_waveforms_per_prompt (`int`):
|
@@ -298,18 +338,18 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
298
338
|
The prompt or prompts not to guide the audio generation. If not defined, one has to pass
|
299
339
|
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
300
340
|
less than `1`).
|
301
|
-
prompt_embeds (`torch.
|
341
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
302
342
|
Pre-computed text embeddings from the Flan T5 model. Can be used to easily tweak text inputs, *e.g.*
|
303
343
|
prompt weighting. If not provided, text embeddings will be computed from `prompt` input argument.
|
304
|
-
negative_prompt_embeds (`torch.
|
344
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
305
345
|
Pre-computed negative text embeddings from the Flan T5 model. Can be used to easily tweak text inputs,
|
306
346
|
*e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from
|
307
347
|
`negative_prompt` input argument.
|
308
|
-
generated_prompt_embeds (`torch.
|
348
|
+
generated_prompt_embeds (`torch.Tensor`, *optional*):
|
309
349
|
Pre-generated text embeddings from the GPT2 langauge model. Can be used to easily tweak text inputs,
|
310
350
|
*e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input
|
311
351
|
argument.
|
312
|
-
negative_generated_prompt_embeds (`torch.
|
352
|
+
negative_generated_prompt_embeds (`torch.Tensor`, *optional*):
|
313
353
|
Pre-generated negative text embeddings from the GPT2 language model. Can be used to easily tweak text
|
314
354
|
inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from
|
315
355
|
`negative_prompt` input argument.
|
@@ -322,11 +362,11 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
322
362
|
max_new_tokens (`int`, *optional*, defaults to None):
|
323
363
|
The number of new tokens to generate with the GPT2 language model.
|
324
364
|
Returns:
|
325
|
-
prompt_embeds (`torch.
|
365
|
+
prompt_embeds (`torch.Tensor`):
|
326
366
|
Text embeddings from the Flan T5 model.
|
327
367
|
attention_mask (`torch.LongTensor`):
|
328
368
|
Attention mask to be applied to the `prompt_embeds`.
|
329
|
-
generated_prompt_embeds (`torch.
|
369
|
+
generated_prompt_embeds (`torch.Tensor`):
|
330
370
|
Text embeddings generated from the GPT2 langauge model.
|
331
371
|
|
332
372
|
Example:
|
@@ -368,16 +408,26 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
368
408
|
|
369
409
|
# Define tokenizers and text encoders
|
370
410
|
tokenizers = [self.tokenizer, self.tokenizer_2]
|
371
|
-
|
411
|
+
is_vits_text_encoder = isinstance(self.text_encoder_2, VitsModel)
|
412
|
+
|
413
|
+
if is_vits_text_encoder:
|
414
|
+
text_encoders = [self.text_encoder, self.text_encoder_2.text_encoder]
|
415
|
+
else:
|
416
|
+
text_encoders = [self.text_encoder, self.text_encoder_2]
|
372
417
|
|
373
418
|
if prompt_embeds is None:
|
374
419
|
prompt_embeds_list = []
|
375
420
|
attention_mask_list = []
|
376
421
|
|
377
422
|
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
|
423
|
+
use_prompt = isinstance(
|
424
|
+
tokenizer, (RobertaTokenizer, RobertaTokenizerFast, T5Tokenizer, T5TokenizerFast)
|
425
|
+
)
|
378
426
|
text_inputs = tokenizer(
|
379
|
-
prompt,
|
380
|
-
padding="max_length"
|
427
|
+
prompt if use_prompt else transcription,
|
428
|
+
padding="max_length"
|
429
|
+
if isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast, VitsTokenizer))
|
430
|
+
else True,
|
381
431
|
max_length=tokenizer.model_max_length,
|
382
432
|
truncation=True,
|
383
433
|
return_tensors="pt",
|
@@ -407,6 +457,18 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
407
457
|
prompt_embeds = prompt_embeds[:, None, :]
|
408
458
|
# make sure that we attend to this single hidden-state
|
409
459
|
attention_mask = attention_mask.new_ones((batch_size, 1))
|
460
|
+
elif is_vits_text_encoder:
|
461
|
+
# Add end_token_id and attention mask in the end of sequence phonemes
|
462
|
+
for text_input_id, text_attention_mask in zip(text_input_ids, attention_mask):
|
463
|
+
for idx, phoneme_id in enumerate(text_input_id):
|
464
|
+
if phoneme_id == 0:
|
465
|
+
text_input_id[idx] = 182
|
466
|
+
text_attention_mask[idx] = 1
|
467
|
+
break
|
468
|
+
prompt_embeds = text_encoder(
|
469
|
+
text_input_ids, attention_mask=attention_mask, padding_mask=attention_mask.unsqueeze(-1)
|
470
|
+
)
|
471
|
+
prompt_embeds = prompt_embeds[0]
|
410
472
|
else:
|
411
473
|
prompt_embeds = text_encoder(
|
412
474
|
text_input_ids,
|
@@ -485,7 +547,7 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
485
547
|
uncond_tokens,
|
486
548
|
padding="max_length",
|
487
549
|
max_length=tokenizer.model_max_length
|
488
|
-
if isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast))
|
550
|
+
if isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast, VitsTokenizer))
|
489
551
|
else max_length,
|
490
552
|
truncation=True,
|
491
553
|
return_tensors="pt",
|
@@ -503,6 +565,15 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
503
565
|
negative_prompt_embeds = negative_prompt_embeds[:, None, :]
|
504
566
|
# make sure that we attend to this single hidden-state
|
505
567
|
negative_attention_mask = negative_attention_mask.new_ones((batch_size, 1))
|
568
|
+
elif is_vits_text_encoder:
|
569
|
+
negative_prompt_embeds = torch.zeros(
|
570
|
+
batch_size,
|
571
|
+
tokenizer.model_max_length,
|
572
|
+
text_encoder.config.hidden_size,
|
573
|
+
).to(dtype=self.text_encoder_2.dtype, device=device)
|
574
|
+
negative_attention_mask = torch.zeros(batch_size, tokenizer.model_max_length).to(
|
575
|
+
dtype=self.text_encoder_2.dtype, device=device
|
576
|
+
)
|
506
577
|
else:
|
507
578
|
negative_prompt_embeds = text_encoder(
|
508
579
|
uncond_input_ids,
|
@@ -623,6 +694,7 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
623
694
|
audio_length_in_s,
|
624
695
|
vocoder_upsample_factor,
|
625
696
|
callback_steps,
|
697
|
+
transcription=None,
|
626
698
|
negative_prompt=None,
|
627
699
|
prompt_embeds=None,
|
628
700
|
negative_prompt_embeds=None,
|
@@ -690,6 +762,14 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
690
762
|
f"`attention_mask: {attention_mask.shape} != `prompt_embeds` {prompt_embeds.shape}"
|
691
763
|
)
|
692
764
|
|
765
|
+
if transcription is None:
|
766
|
+
if self.text_encoder_2.config.model_type == "vits":
|
767
|
+
raise ValueError("Cannot forward without transcription. Please make sure to" " have transcription")
|
768
|
+
elif transcription is not None and (
|
769
|
+
not isinstance(transcription, str) and not isinstance(transcription, list)
|
770
|
+
):
|
771
|
+
raise ValueError(f"`transcription` has to be of type `str` or `list` but is {type(transcription)}")
|
772
|
+
|
693
773
|
if generated_prompt_embeds is not None and negative_generated_prompt_embeds is not None:
|
694
774
|
if generated_prompt_embeds.shape != negative_generated_prompt_embeds.shape:
|
695
775
|
raise ValueError(
|
@@ -711,8 +791,8 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
711
791
|
shape = (
|
712
792
|
batch_size,
|
713
793
|
num_channels_latents,
|
714
|
-
height // self.vae_scale_factor,
|
715
|
-
self.vocoder.config.model_in_dim // self.vae_scale_factor,
|
794
|
+
int(height) // self.vae_scale_factor,
|
795
|
+
int(self.vocoder.config.model_in_dim) // self.vae_scale_factor,
|
716
796
|
)
|
717
797
|
if isinstance(generator, list) and len(generator) != batch_size:
|
718
798
|
raise ValueError(
|
@@ -734,6 +814,7 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
734
814
|
def __call__(
|
735
815
|
self,
|
736
816
|
prompt: Union[str, List[str]] = None,
|
817
|
+
transcription: Union[str, List[str]] = None,
|
737
818
|
audio_length_in_s: Optional[float] = None,
|
738
819
|
num_inference_steps: int = 200,
|
739
820
|
guidance_scale: float = 3.5,
|
@@ -741,16 +822,16 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
741
822
|
num_waveforms_per_prompt: Optional[int] = 1,
|
742
823
|
eta: float = 0.0,
|
743
824
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
744
|
-
latents: Optional[torch.
|
745
|
-
prompt_embeds: Optional[torch.
|
746
|
-
negative_prompt_embeds: Optional[torch.
|
747
|
-
generated_prompt_embeds: Optional[torch.
|
748
|
-
negative_generated_prompt_embeds: Optional[torch.
|
825
|
+
latents: Optional[torch.Tensor] = None,
|
826
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
827
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
828
|
+
generated_prompt_embeds: Optional[torch.Tensor] = None,
|
829
|
+
negative_generated_prompt_embeds: Optional[torch.Tensor] = None,
|
749
830
|
attention_mask: Optional[torch.LongTensor] = None,
|
750
831
|
negative_attention_mask: Optional[torch.LongTensor] = None,
|
751
832
|
max_new_tokens: Optional[int] = None,
|
752
833
|
return_dict: bool = True,
|
753
|
-
callback: Optional[Callable[[int, int, torch.
|
834
|
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
754
835
|
callback_steps: Optional[int] = 1,
|
755
836
|
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
756
837
|
output_type: Optional[str] = "np",
|
@@ -761,6 +842,8 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
761
842
|
Args:
|
762
843
|
prompt (`str` or `List[str]`, *optional*):
|
763
844
|
The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`.
|
845
|
+
transcription (`str` or `List[str]`, *optional*):\
|
846
|
+
The transcript for text to speech.
|
764
847
|
audio_length_in_s (`int`, *optional*, defaults to 10.24):
|
765
848
|
The length of the generated audio sample in seconds.
|
766
849
|
num_inference_steps (`int`, *optional*, defaults to 200):
|
@@ -783,21 +866,21 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
783
866
|
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
784
867
|
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
785
868
|
generation deterministic.
|
786
|
-
latents (`torch.
|
869
|
+
latents (`torch.Tensor`, *optional*):
|
787
870
|
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for spectrogram
|
788
871
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
789
872
|
tensor is generated by sampling using the supplied random `generator`.
|
790
|
-
prompt_embeds (`torch.
|
873
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
791
874
|
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
792
875
|
provided, text embeddings are generated from the `prompt` input argument.
|
793
|
-
negative_prompt_embeds (`torch.
|
876
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
794
877
|
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
795
878
|
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
796
|
-
generated_prompt_embeds (`torch.
|
879
|
+
generated_prompt_embeds (`torch.Tensor`, *optional*):
|
797
880
|
Pre-generated text embeddings from the GPT2 langauge model. Can be used to easily tweak text inputs,
|
798
881
|
*e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input
|
799
882
|
argument.
|
800
|
-
negative_generated_prompt_embeds (`torch.
|
883
|
+
negative_generated_prompt_embeds (`torch.Tensor`, *optional*):
|
801
884
|
Pre-generated negative text embeddings from the GPT2 language model. Can be used to easily tweak text
|
802
885
|
inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from
|
803
886
|
`negative_prompt` input argument.
|
@@ -815,7 +898,7 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
815
898
|
plain tuple.
|
816
899
|
callback (`Callable`, *optional*):
|
817
900
|
A function that calls every `callback_steps` steps during inference. The function is called with the
|
818
|
-
following arguments: `callback(step: int, timestep: int, latents: torch.
|
901
|
+
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
819
902
|
callback_steps (`int`, *optional*, defaults to 1):
|
820
903
|
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
821
904
|
every step.
|
@@ -857,6 +940,7 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
857
940
|
audio_length_in_s,
|
858
941
|
vocoder_upsample_factor,
|
859
942
|
callback_steps,
|
943
|
+
transcription,
|
860
944
|
negative_prompt,
|
861
945
|
prompt_embeds,
|
862
946
|
negative_prompt_embeds,
|
@@ -886,6 +970,7 @@ class AudioLDM2Pipeline(DiffusionPipeline):
|
|
886
970
|
device,
|
887
971
|
num_waveforms_per_prompt,
|
888
972
|
do_classifier_free_guidance,
|
973
|
+
transcription,
|
889
974
|
negative_prompt,
|
890
975
|
prompt_embeds=prompt_embeds,
|
891
976
|
negative_prompt_embeds=negative_prompt_embeds,
|
@@ -0,0 +1,48 @@
|
|
1
|
+
from typing import TYPE_CHECKING
|
2
|
+
|
3
|
+
from ...utils import (
|
4
|
+
DIFFUSERS_SLOW_IMPORT,
|
5
|
+
OptionalDependencyNotAvailable,
|
6
|
+
_LazyModule,
|
7
|
+
get_objects_from_module,
|
8
|
+
is_torch_available,
|
9
|
+
is_transformers_available,
|
10
|
+
)
|
11
|
+
|
12
|
+
|
13
|
+
_dummy_objects = {}
|
14
|
+
_import_structure = {}
|
15
|
+
|
16
|
+
|
17
|
+
try:
|
18
|
+
if not (is_transformers_available() and is_torch_available()):
|
19
|
+
raise OptionalDependencyNotAvailable()
|
20
|
+
except OptionalDependencyNotAvailable:
|
21
|
+
from ...utils import dummy_torch_and_transformers_objects # noqa F403
|
22
|
+
|
23
|
+
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
|
24
|
+
else:
|
25
|
+
_import_structure["pipeline_aura_flow"] = ["AuraFlowPipeline"]
|
26
|
+
|
27
|
+
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
28
|
+
try:
|
29
|
+
if not (is_transformers_available() and is_torch_available()):
|
30
|
+
raise OptionalDependencyNotAvailable()
|
31
|
+
|
32
|
+
except OptionalDependencyNotAvailable:
|
33
|
+
from ...utils.dummy_torch_and_transformers_objects import *
|
34
|
+
else:
|
35
|
+
from .pipeline_aura_flow import AuraFlowPipeline
|
36
|
+
|
37
|
+
else:
|
38
|
+
import sys
|
39
|
+
|
40
|
+
sys.modules[__name__] = _LazyModule(
|
41
|
+
__name__,
|
42
|
+
globals()["__file__"],
|
43
|
+
_import_structure,
|
44
|
+
module_spec=__spec__,
|
45
|
+
)
|
46
|
+
|
47
|
+
for name, value in _dummy_objects.items():
|
48
|
+
setattr(sys.modules[__name__], name, value)
|