diffusers 0.27.1__py3-none-any.whl → 0.32.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- diffusers/__init__.py +233 -6
- diffusers/callbacks.py +209 -0
- diffusers/commands/env.py +102 -6
- diffusers/configuration_utils.py +45 -16
- diffusers/dependency_versions_table.py +4 -3
- diffusers/image_processor.py +434 -110
- diffusers/loaders/__init__.py +42 -9
- diffusers/loaders/ip_adapter.py +626 -36
- diffusers/loaders/lora_base.py +900 -0
- diffusers/loaders/lora_conversion_utils.py +991 -125
- diffusers/loaders/lora_pipeline.py +3812 -0
- diffusers/loaders/peft.py +571 -7
- diffusers/loaders/single_file.py +405 -173
- diffusers/loaders/single_file_model.py +385 -0
- diffusers/loaders/single_file_utils.py +1783 -713
- diffusers/loaders/textual_inversion.py +41 -23
- diffusers/loaders/transformer_flux.py +181 -0
- diffusers/loaders/transformer_sd3.py +89 -0
- diffusers/loaders/unet.py +464 -540
- diffusers/loaders/unet_loader_utils.py +163 -0
- diffusers/models/__init__.py +76 -7
- diffusers/models/activations.py +65 -10
- diffusers/models/adapter.py +53 -53
- diffusers/models/attention.py +605 -18
- diffusers/models/attention_flax.py +1 -1
- diffusers/models/attention_processor.py +4304 -687
- diffusers/models/autoencoders/__init__.py +8 -0
- diffusers/models/autoencoders/autoencoder_asym_kl.py +15 -17
- diffusers/models/autoencoders/autoencoder_dc.py +620 -0
- diffusers/models/autoencoders/autoencoder_kl.py +110 -28
- diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
- diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1482 -0
- diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
- diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
- diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +19 -24
- diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
- diffusers/models/autoencoders/autoencoder_tiny.py +21 -18
- diffusers/models/autoencoders/consistency_decoder_vae.py +45 -20
- diffusers/models/autoencoders/vae.py +41 -29
- diffusers/models/autoencoders/vq_model.py +182 -0
- diffusers/models/controlnet.py +47 -800
- diffusers/models/controlnet_flux.py +70 -0
- diffusers/models/controlnet_sd3.py +68 -0
- diffusers/models/controlnet_sparsectrl.py +116 -0
- diffusers/models/controlnets/__init__.py +23 -0
- diffusers/models/controlnets/controlnet.py +872 -0
- diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +9 -9
- diffusers/models/controlnets/controlnet_flux.py +536 -0
- diffusers/models/controlnets/controlnet_hunyuan.py +401 -0
- diffusers/models/controlnets/controlnet_sd3.py +489 -0
- diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
- diffusers/models/controlnets/controlnet_union.py +832 -0
- diffusers/models/controlnets/controlnet_xs.py +1946 -0
- diffusers/models/controlnets/multicontrolnet.py +183 -0
- diffusers/models/downsampling.py +85 -18
- diffusers/models/embeddings.py +1856 -158
- diffusers/models/embeddings_flax.py +23 -9
- diffusers/models/model_loading_utils.py +480 -0
- diffusers/models/modeling_flax_pytorch_utils.py +2 -1
- diffusers/models/modeling_flax_utils.py +2 -7
- diffusers/models/modeling_outputs.py +14 -0
- diffusers/models/modeling_pytorch_flax_utils.py +1 -1
- diffusers/models/modeling_utils.py +611 -146
- diffusers/models/normalization.py +361 -20
- diffusers/models/resnet.py +18 -23
- diffusers/models/transformers/__init__.py +16 -0
- diffusers/models/transformers/auraflow_transformer_2d.py +544 -0
- diffusers/models/transformers/cogvideox_transformer_3d.py +542 -0
- diffusers/models/transformers/dit_transformer_2d.py +240 -0
- diffusers/models/transformers/dual_transformer_2d.py +9 -8
- diffusers/models/transformers/hunyuan_transformer_2d.py +578 -0
- diffusers/models/transformers/latte_transformer_3d.py +327 -0
- diffusers/models/transformers/lumina_nextdit2d.py +340 -0
- diffusers/models/transformers/pixart_transformer_2d.py +445 -0
- diffusers/models/transformers/prior_transformer.py +13 -13
- diffusers/models/transformers/sana_transformer.py +488 -0
- diffusers/models/transformers/stable_audio_transformer.py +458 -0
- diffusers/models/transformers/t5_film_transformer.py +17 -19
- diffusers/models/transformers/transformer_2d.py +297 -187
- diffusers/models/transformers/transformer_allegro.py +422 -0
- diffusers/models/transformers/transformer_cogview3plus.py +386 -0
- diffusers/models/transformers/transformer_flux.py +593 -0
- diffusers/models/transformers/transformer_hunyuan_video.py +791 -0
- diffusers/models/transformers/transformer_ltx.py +469 -0
- diffusers/models/transformers/transformer_mochi.py +499 -0
- diffusers/models/transformers/transformer_sd3.py +461 -0
- diffusers/models/transformers/transformer_temporal.py +21 -19
- diffusers/models/unets/unet_1d.py +8 -8
- diffusers/models/unets/unet_1d_blocks.py +31 -31
- diffusers/models/unets/unet_2d.py +17 -10
- diffusers/models/unets/unet_2d_blocks.py +225 -149
- diffusers/models/unets/unet_2d_condition.py +41 -40
- diffusers/models/unets/unet_2d_condition_flax.py +6 -5
- diffusers/models/unets/unet_3d_blocks.py +192 -1057
- diffusers/models/unets/unet_3d_condition.py +22 -27
- diffusers/models/unets/unet_i2vgen_xl.py +22 -18
- diffusers/models/unets/unet_kandinsky3.py +2 -2
- diffusers/models/unets/unet_motion_model.py +1413 -89
- diffusers/models/unets/unet_spatio_temporal_condition.py +40 -16
- diffusers/models/unets/unet_stable_cascade.py +19 -18
- diffusers/models/unets/uvit_2d.py +2 -2
- diffusers/models/upsampling.py +95 -26
- diffusers/models/vq_model.py +12 -164
- diffusers/optimization.py +1 -1
- diffusers/pipelines/__init__.py +202 -3
- diffusers/pipelines/allegro/__init__.py +48 -0
- diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
- diffusers/pipelines/allegro/pipeline_output.py +23 -0
- diffusers/pipelines/amused/pipeline_amused.py +12 -12
- diffusers/pipelines/amused/pipeline_amused_img2img.py +14 -12
- diffusers/pipelines/amused/pipeline_amused_inpaint.py +13 -11
- diffusers/pipelines/animatediff/__init__.py +8 -0
- diffusers/pipelines/animatediff/pipeline_animatediff.py +122 -109
- diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1106 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +1288 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1010 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +236 -180
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
- diffusers/pipelines/animatediff/pipeline_output.py +3 -2
- diffusers/pipelines/audioldm/pipeline_audioldm.py +14 -14
- diffusers/pipelines/audioldm2/modeling_audioldm2.py +58 -39
- diffusers/pipelines/audioldm2/pipeline_audioldm2.py +121 -36
- diffusers/pipelines/aura_flow/__init__.py +48 -0
- diffusers/pipelines/aura_flow/pipeline_aura_flow.py +584 -0
- diffusers/pipelines/auto_pipeline.py +196 -28
- diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
- diffusers/pipelines/blip_diffusion/modeling_blip2.py +6 -6
- diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +1 -1
- diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
- diffusers/pipelines/cogvideo/__init__.py +54 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox.py +772 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +885 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +851 -0
- diffusers/pipelines/cogvideo/pipeline_output.py +20 -0
- diffusers/pipelines/cogview3/__init__.py +47 -0
- diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
- diffusers/pipelines/cogview3/pipeline_output.py +21 -0
- diffusers/pipelines/consistency_models/pipeline_consistency_models.py +6 -6
- diffusers/pipelines/controlnet/__init__.py +86 -80
- diffusers/pipelines/controlnet/multicontrolnet.py +7 -182
- diffusers/pipelines/controlnet/pipeline_controlnet.py +134 -87
- diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +93 -77
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +88 -197
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +136 -90
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +176 -80
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +125 -89
- diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
- diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
- diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
- diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1060 -0
- diffusers/pipelines/controlnet_sd3/__init__.py +57 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1133 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
- diffusers/pipelines/controlnet_xs/__init__.py +68 -0
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +916 -0
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +1111 -0
- diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
- diffusers/pipelines/deepfloyd_if/pipeline_if.py +16 -30
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +20 -35
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +23 -41
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +22 -38
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +25 -41
- diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +19 -34
- diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
- diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
- diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +11 -11
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +70 -30
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +48 -25
- diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +2 -2
- diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +7 -7
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +21 -20
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +27 -29
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +33 -27
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +33 -23
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +36 -30
- diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +102 -69
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +13 -13
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +10 -5
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +11 -6
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +10 -5
- diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +5 -5
- diffusers/pipelines/dit/pipeline_dit.py +7 -4
- diffusers/pipelines/flux/__init__.py +69 -0
- diffusers/pipelines/flux/modeling_flux.py +47 -0
- diffusers/pipelines/flux/pipeline_flux.py +957 -0
- diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
- diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
- diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
- diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
- diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
- diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
- diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
- diffusers/pipelines/flux/pipeline_output.py +37 -0
- diffusers/pipelines/free_init_utils.py +41 -38
- diffusers/pipelines/free_noise_utils.py +596 -0
- diffusers/pipelines/hunyuan_video/__init__.py +48 -0
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
- diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
- diffusers/pipelines/hunyuandit/__init__.py +48 -0
- diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +916 -0
- diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +33 -48
- diffusers/pipelines/kandinsky/pipeline_kandinsky.py +8 -8
- diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +32 -29
- diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +11 -11
- diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +12 -12
- diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +34 -31
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +6 -6
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +8 -8
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +7 -7
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +6 -6
- diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +3 -3
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +22 -35
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +26 -37
- diffusers/pipelines/kolors/__init__.py +54 -0
- diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
- diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1250 -0
- diffusers/pipelines/kolors/pipeline_output.py +21 -0
- diffusers/pipelines/kolors/text_encoder.py +889 -0
- diffusers/pipelines/kolors/tokenizer.py +338 -0
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +82 -62
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +77 -60
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +12 -12
- diffusers/pipelines/latte/__init__.py +48 -0
- diffusers/pipelines/latte/pipeline_latte.py +881 -0
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +80 -74
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +85 -76
- diffusers/pipelines/ledits_pp/pipeline_output.py +2 -2
- diffusers/pipelines/ltx/__init__.py +50 -0
- diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
- diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
- diffusers/pipelines/ltx/pipeline_output.py +20 -0
- diffusers/pipelines/lumina/__init__.py +48 -0
- diffusers/pipelines/lumina/pipeline_lumina.py +890 -0
- diffusers/pipelines/marigold/__init__.py +50 -0
- diffusers/pipelines/marigold/marigold_image_processing.py +576 -0
- diffusers/pipelines/marigold/pipeline_marigold_depth.py +813 -0
- diffusers/pipelines/marigold/pipeline_marigold_normals.py +690 -0
- diffusers/pipelines/mochi/__init__.py +48 -0
- diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
- diffusers/pipelines/mochi/pipeline_output.py +20 -0
- diffusers/pipelines/musicldm/pipeline_musicldm.py +14 -14
- diffusers/pipelines/pag/__init__.py +80 -0
- diffusers/pipelines/pag/pag_utils.py +243 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1328 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1610 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
- diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +969 -0
- diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
- diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +865 -0
- diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
- diffusers/pipelines/pag/pipeline_pag_sd.py +1062 -0
- diffusers/pipelines/pag/pipeline_pag_sd_3.py +994 -0
- diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
- diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +866 -0
- diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
- diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1345 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1544 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1776 -0
- diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +17 -12
- diffusers/pipelines/pia/pipeline_pia.py +74 -164
- diffusers/pipelines/pipeline_flax_utils.py +5 -10
- diffusers/pipelines/pipeline_loading_utils.py +515 -53
- diffusers/pipelines/pipeline_utils.py +411 -222
- diffusers/pipelines/pixart_alpha/__init__.py +8 -1
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +76 -93
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +873 -0
- diffusers/pipelines/sana/__init__.py +47 -0
- diffusers/pipelines/sana/pipeline_output.py +21 -0
- diffusers/pipelines/sana/pipeline_sana.py +884 -0
- diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +27 -23
- diffusers/pipelines/shap_e/pipeline_shap_e.py +3 -3
- diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +14 -14
- diffusers/pipelines/shap_e/renderer.py +1 -1
- diffusers/pipelines/stable_audio/__init__.py +50 -0
- diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
- diffusers/pipelines/stable_audio/pipeline_stable_audio.py +756 -0
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +71 -25
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +23 -19
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +35 -34
- diffusers/pipelines/stable_diffusion/__init__.py +0 -1
- diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +20 -11
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -2
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +6 -6
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +145 -79
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +43 -28
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +13 -8
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +100 -68
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +109 -201
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +131 -32
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +247 -87
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +30 -29
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +35 -27
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +49 -42
- diffusers/pipelines/stable_diffusion/safety_checker.py +2 -1
- diffusers/pipelines/stable_diffusion_3/__init__.py +54 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +1140 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +1036 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1250 -0
- diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +29 -20
- diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +59 -58
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +31 -25
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +38 -22
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +30 -24
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +24 -23
- diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +107 -67
- diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +316 -69
- diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +10 -5
- diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
- diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +98 -30
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +121 -83
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +161 -105
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +142 -218
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +45 -29
- diffusers/pipelines/stable_diffusion_xl/watermark.py +9 -3
- diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +110 -57
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +69 -39
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +105 -74
- diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +3 -2
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +29 -49
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +32 -93
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +37 -25
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +54 -40
- diffusers/pipelines/unclip/pipeline_unclip.py +6 -6
- diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +6 -6
- diffusers/pipelines/unidiffuser/modeling_text_decoder.py +1 -1
- diffusers/pipelines/unidiffuser/modeling_uvit.py +12 -12
- diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +29 -28
- diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +5 -5
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +5 -10
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +6 -8
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +4 -4
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +12 -12
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +15 -14
- diffusers/{models/dual_transformer_2d.py → quantizers/__init__.py} +2 -6
- diffusers/quantizers/auto.py +139 -0
- diffusers/quantizers/base.py +233 -0
- diffusers/quantizers/bitsandbytes/__init__.py +2 -0
- diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
- diffusers/quantizers/bitsandbytes/utils.py +306 -0
- diffusers/quantizers/gguf/__init__.py +1 -0
- diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
- diffusers/quantizers/gguf/utils.py +456 -0
- diffusers/quantizers/quantization_config.py +669 -0
- diffusers/quantizers/torchao/__init__.py +15 -0
- diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
- diffusers/schedulers/__init__.py +12 -2
- diffusers/schedulers/deprecated/__init__.py +1 -1
- diffusers/schedulers/deprecated/scheduling_karras_ve.py +25 -25
- diffusers/schedulers/scheduling_amused.py +5 -5
- diffusers/schedulers/scheduling_consistency_decoder.py +11 -11
- diffusers/schedulers/scheduling_consistency_models.py +23 -25
- diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
- diffusers/schedulers/scheduling_ddim.py +27 -26
- diffusers/schedulers/scheduling_ddim_cogvideox.py +452 -0
- diffusers/schedulers/scheduling_ddim_flax.py +2 -1
- diffusers/schedulers/scheduling_ddim_inverse.py +16 -16
- diffusers/schedulers/scheduling_ddim_parallel.py +32 -31
- diffusers/schedulers/scheduling_ddpm.py +27 -30
- diffusers/schedulers/scheduling_ddpm_flax.py +7 -3
- diffusers/schedulers/scheduling_ddpm_parallel.py +33 -36
- diffusers/schedulers/scheduling_ddpm_wuerstchen.py +14 -14
- diffusers/schedulers/scheduling_deis_multistep.py +150 -50
- diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +221 -84
- diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +2 -2
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +158 -52
- diffusers/schedulers/scheduling_dpmsolver_sde.py +153 -34
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +275 -86
- diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +81 -57
- diffusers/schedulers/scheduling_edm_euler.py +62 -39
- diffusers/schedulers/scheduling_euler_ancestral_discrete.py +30 -29
- diffusers/schedulers/scheduling_euler_discrete.py +255 -74
- diffusers/schedulers/scheduling_flow_match_euler_discrete.py +458 -0
- diffusers/schedulers/scheduling_flow_match_heun_discrete.py +320 -0
- diffusers/schedulers/scheduling_heun_discrete.py +174 -46
- diffusers/schedulers/scheduling_ipndm.py +9 -9
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +138 -29
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +132 -26
- diffusers/schedulers/scheduling_karras_ve_flax.py +6 -6
- diffusers/schedulers/scheduling_lcm.py +23 -29
- diffusers/schedulers/scheduling_lms_discrete.py +105 -28
- diffusers/schedulers/scheduling_pndm.py +20 -20
- diffusers/schedulers/scheduling_repaint.py +21 -21
- diffusers/schedulers/scheduling_sasolver.py +157 -60
- diffusers/schedulers/scheduling_sde_ve.py +19 -19
- diffusers/schedulers/scheduling_tcd.py +41 -36
- diffusers/schedulers/scheduling_unclip.py +19 -16
- diffusers/schedulers/scheduling_unipc_multistep.py +243 -47
- diffusers/schedulers/scheduling_utils.py +12 -5
- diffusers/schedulers/scheduling_utils_flax.py +1 -3
- diffusers/schedulers/scheduling_vq_diffusion.py +10 -10
- diffusers/training_utils.py +214 -30
- diffusers/utils/__init__.py +17 -1
- diffusers/utils/constants.py +3 -0
- diffusers/utils/doc_utils.py +1 -0
- diffusers/utils/dummy_pt_objects.py +592 -7
- diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
- diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +1001 -71
- diffusers/utils/dynamic_modules_utils.py +34 -29
- diffusers/utils/export_utils.py +50 -6
- diffusers/utils/hub_utils.py +131 -17
- diffusers/utils/import_utils.py +210 -8
- diffusers/utils/loading_utils.py +118 -5
- diffusers/utils/logging.py +4 -2
- diffusers/utils/peft_utils.py +37 -7
- diffusers/utils/state_dict_utils.py +13 -2
- diffusers/utils/testing_utils.py +193 -11
- diffusers/utils/torch_utils.py +4 -0
- diffusers/video_processor.py +113 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/METADATA +82 -91
- diffusers-0.32.2.dist-info/RECORD +550 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/WHEEL +1 -1
- diffusers/loaders/autoencoder.py +0 -146
- diffusers/loaders/controlnet.py +0 -136
- diffusers/loaders/lora.py +0 -1349
- diffusers/models/prior_transformer.py +0 -12
- diffusers/models/t5_film_transformer.py +0 -70
- diffusers/models/transformer_2d.py +0 -25
- diffusers/models/transformer_temporal.py +0 -34
- diffusers/models/unet_1d.py +0 -26
- diffusers/models/unet_1d_blocks.py +0 -203
- diffusers/models/unet_2d.py +0 -27
- diffusers/models/unet_2d_blocks.py +0 -375
- diffusers/models/unet_2d_condition.py +0 -25
- diffusers-0.27.1.dist-info/RECORD +0 -399
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/LICENSE +0 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/entry_points.txt +0 -0
- {diffusers-0.27.1.dist-info → diffusers-0.32.2.dist-info}/top_level.txt +0 -0
@@ -209,7 +209,7 @@ class WuerstchenDecoderPipeline(DiffusionPipeline):
|
|
209
209
|
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
210
210
|
def __call__(
|
211
211
|
self,
|
212
|
-
image_embeddings: Union[torch.
|
212
|
+
image_embeddings: Union[torch.Tensor, List[torch.Tensor]],
|
213
213
|
prompt: Union[str, List[str]] = None,
|
214
214
|
num_inference_steps: int = 12,
|
215
215
|
timesteps: Optional[List[float]] = None,
|
@@ -217,7 +217,7 @@ class WuerstchenDecoderPipeline(DiffusionPipeline):
|
|
217
217
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
218
218
|
num_images_per_prompt: int = 1,
|
219
219
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
220
|
-
latents: Optional[torch.
|
220
|
+
latents: Optional[torch.Tensor] = None,
|
221
221
|
output_type: Optional[str] = "pil",
|
222
222
|
return_dict: bool = True,
|
223
223
|
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
@@ -228,7 +228,7 @@ class WuerstchenDecoderPipeline(DiffusionPipeline):
|
|
228
228
|
Function invoked when calling the pipeline for generation.
|
229
229
|
|
230
230
|
Args:
|
231
|
-
image_embedding (`torch.
|
231
|
+
image_embedding (`torch.Tensor` or `List[torch.Tensor]`):
|
232
232
|
Image Embeddings either extracted from an image or generated by a Prior Model.
|
233
233
|
prompt (`str` or `List[str]`):
|
234
234
|
The prompt or prompts to guide the image generation.
|
@@ -252,7 +252,7 @@ class WuerstchenDecoderPipeline(DiffusionPipeline):
|
|
252
252
|
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
253
253
|
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
254
254
|
to make generation deterministic.
|
255
|
-
latents (`torch.
|
255
|
+
latents (`torch.Tensor`, *optional*):
|
256
256
|
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
257
257
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
258
258
|
tensor will ge generated by sampling using the supplied random `generator`.
|
@@ -112,25 +112,25 @@ class WuerstchenCombinedPipeline(DiffusionPipeline):
|
|
112
112
|
def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):
|
113
113
|
self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op)
|
114
114
|
|
115
|
-
def enable_model_cpu_offload(self, gpu_id=
|
115
|
+
def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"):
|
116
116
|
r"""
|
117
117
|
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
|
118
118
|
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
|
119
119
|
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
|
120
120
|
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
|
121
121
|
"""
|
122
|
-
self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id)
|
123
|
-
self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id)
|
122
|
+
self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device)
|
123
|
+
self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device)
|
124
124
|
|
125
|
-
def enable_sequential_cpu_offload(self, gpu_id=
|
125
|
+
def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"):
|
126
126
|
r"""
|
127
127
|
Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using 🤗
|
128
128
|
Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a
|
129
129
|
GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis.
|
130
130
|
Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower.
|
131
131
|
"""
|
132
|
-
self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id)
|
133
|
-
self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id)
|
132
|
+
self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device)
|
133
|
+
self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device)
|
134
134
|
|
135
135
|
def progress_bar(self, iterable=None, total=None):
|
136
136
|
self.prior_pipe.progress_bar(iterable=iterable, total=total)
|
@@ -154,11 +154,11 @@ class WuerstchenCombinedPipeline(DiffusionPipeline):
|
|
154
154
|
decoder_timesteps: Optional[List[float]] = None,
|
155
155
|
decoder_guidance_scale: float = 0.0,
|
156
156
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
157
|
-
prompt_embeds: Optional[torch.
|
158
|
-
negative_prompt_embeds: Optional[torch.
|
157
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
158
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
159
159
|
num_images_per_prompt: int = 1,
|
160
160
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
161
|
-
latents: Optional[torch.
|
161
|
+
latents: Optional[torch.Tensor] = None,
|
162
162
|
output_type: Optional[str] = "pil",
|
163
163
|
return_dict: bool = True,
|
164
164
|
prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
@@ -176,10 +176,10 @@ class WuerstchenCombinedPipeline(DiffusionPipeline):
|
|
176
176
|
negative_prompt (`str` or `List[str]`, *optional*):
|
177
177
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
178
178
|
if `guidance_scale` is less than `1`).
|
179
|
-
prompt_embeds (`torch.
|
179
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
180
180
|
Pre-generated text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* prompt
|
181
181
|
weighting. If not provided, text embeddings will be generated from `prompt` input argument.
|
182
|
-
negative_prompt_embeds (`torch.
|
182
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
183
183
|
Pre-generated negative text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.*
|
184
184
|
prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt`
|
185
185
|
input argument.
|
@@ -218,7 +218,7 @@ class WuerstchenCombinedPipeline(DiffusionPipeline):
|
|
218
218
|
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
219
219
|
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
220
220
|
to make generation deterministic.
|
221
|
-
latents (`torch.
|
221
|
+
latents (`torch.Tensor`, *optional*):
|
222
222
|
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
223
223
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
224
224
|
tensor will ge generated by sampling using the supplied random `generator`.
|
@@ -20,7 +20,7 @@ import numpy as np
|
|
20
20
|
import torch
|
21
21
|
from transformers import CLIPTextModel, CLIPTokenizer
|
22
22
|
|
23
|
-
from ...loaders import
|
23
|
+
from ...loaders import StableDiffusionLoraLoaderMixin
|
24
24
|
from ...schedulers import DDPMWuerstchenScheduler
|
25
25
|
from ...utils import BaseOutput, deprecate, logging, replace_example_docstring
|
26
26
|
from ...utils.torch_utils import randn_tensor
|
@@ -54,15 +54,15 @@ class WuerstchenPriorPipelineOutput(BaseOutput):
|
|
54
54
|
Output class for WuerstchenPriorPipeline.
|
55
55
|
|
56
56
|
Args:
|
57
|
-
image_embeddings (`torch.
|
57
|
+
image_embeddings (`torch.Tensor` or `np.ndarray`)
|
58
58
|
Prior image embeddings for text prompt
|
59
59
|
|
60
60
|
"""
|
61
61
|
|
62
|
-
image_embeddings: Union[torch.
|
62
|
+
image_embeddings: Union[torch.Tensor, np.ndarray]
|
63
63
|
|
64
64
|
|
65
|
-
class WuerstchenPriorPipeline(DiffusionPipeline,
|
65
|
+
class WuerstchenPriorPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin):
|
66
66
|
"""
|
67
67
|
Pipeline for generating image prior for Wuerstchen.
|
68
68
|
|
@@ -70,8 +70,8 @@ class WuerstchenPriorPipeline(DiffusionPipeline, LoraLoaderMixin):
|
|
70
70
|
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
71
71
|
|
72
72
|
The pipeline also inherits the following loading methods:
|
73
|
-
- [`~loaders.
|
74
|
-
- [`~loaders.
|
73
|
+
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
74
|
+
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
75
75
|
|
76
76
|
Args:
|
77
77
|
prior ([`Prior`]):
|
@@ -95,6 +95,7 @@ class WuerstchenPriorPipeline(DiffusionPipeline, LoraLoaderMixin):
|
|
95
95
|
text_encoder_name = "text_encoder"
|
96
96
|
model_cpu_offload_seq = "text_encoder->prior"
|
97
97
|
_callback_tensor_inputs = ["latents", "text_encoder_hidden_states", "negative_prompt_embeds"]
|
98
|
+
_lora_loadable_modules = ["prior", "text_encoder"]
|
98
99
|
|
99
100
|
def __init__(
|
100
101
|
self,
|
@@ -136,8 +137,8 @@ class WuerstchenPriorPipeline(DiffusionPipeline, LoraLoaderMixin):
|
|
136
137
|
do_classifier_free_guidance,
|
137
138
|
prompt=None,
|
138
139
|
negative_prompt=None,
|
139
|
-
prompt_embeds: Optional[torch.
|
140
|
-
negative_prompt_embeds: Optional[torch.
|
140
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
141
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
141
142
|
):
|
142
143
|
if prompt is not None and isinstance(prompt, str):
|
143
144
|
batch_size = 1
|
@@ -288,11 +289,11 @@ class WuerstchenPriorPipeline(DiffusionPipeline, LoraLoaderMixin):
|
|
288
289
|
timesteps: List[float] = None,
|
289
290
|
guidance_scale: float = 8.0,
|
290
291
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
291
|
-
prompt_embeds: Optional[torch.
|
292
|
-
negative_prompt_embeds: Optional[torch.
|
292
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
293
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
293
294
|
num_images_per_prompt: Optional[int] = 1,
|
294
295
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
295
|
-
latents: Optional[torch.
|
296
|
+
latents: Optional[torch.Tensor] = None,
|
296
297
|
output_type: Optional[str] = "pt",
|
297
298
|
return_dict: bool = True,
|
298
299
|
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
@@ -324,10 +325,10 @@ class WuerstchenPriorPipeline(DiffusionPipeline, LoraLoaderMixin):
|
|
324
325
|
negative_prompt (`str` or `List[str]`, *optional*):
|
325
326
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
326
327
|
if `decoder_guidance_scale` is less than `1`).
|
327
|
-
prompt_embeds (`torch.
|
328
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
328
329
|
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
329
330
|
provided, text embeddings will be generated from `prompt` input argument.
|
330
|
-
negative_prompt_embeds (`torch.
|
331
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
331
332
|
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
332
333
|
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
333
334
|
argument.
|
@@ -336,7 +337,7 @@ class WuerstchenPriorPipeline(DiffusionPipeline, LoraLoaderMixin):
|
|
336
337
|
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
337
338
|
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
338
339
|
to make generation deterministic.
|
339
|
-
latents (`torch.
|
340
|
+
latents (`torch.Tensor`, *optional*):
|
340
341
|
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
341
342
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
342
343
|
tensor will ge generated by sampling using the supplied random `generator`.
|
@@ -11,10 +11,6 @@
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
|
-
from ..utils import deprecate
|
15
|
-
from .transformers.dual_transformer_2d import DualTransformer2DModel
|
16
14
|
|
17
|
-
|
18
|
-
|
19
|
-
deprecation_message = "Importing `DualTransformer2DModel` from `diffusers.models.dual_transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.transformers.dual_transformer_2d import DualTransformer2DModel`, instead."
|
20
|
-
deprecate("DualTransformer2DModel", "0.29", deprecation_message)
|
15
|
+
from .auto import DiffusersAutoQuantizer
|
16
|
+
from .base import DiffusersQuantizer
|
@@ -0,0 +1,139 @@
|
|
1
|
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""
|
15
|
+
Adapted from
|
16
|
+
https://github.com/huggingface/transformers/blob/c409cd81777fb27aadc043ed3d8339dbc020fb3b/src/transformers/quantizers/auto.py
|
17
|
+
"""
|
18
|
+
|
19
|
+
import warnings
|
20
|
+
from typing import Dict, Optional, Union
|
21
|
+
|
22
|
+
from .bitsandbytes import BnB4BitDiffusersQuantizer, BnB8BitDiffusersQuantizer
|
23
|
+
from .gguf import GGUFQuantizer
|
24
|
+
from .quantization_config import (
|
25
|
+
BitsAndBytesConfig,
|
26
|
+
GGUFQuantizationConfig,
|
27
|
+
QuantizationConfigMixin,
|
28
|
+
QuantizationMethod,
|
29
|
+
TorchAoConfig,
|
30
|
+
)
|
31
|
+
from .torchao import TorchAoHfQuantizer
|
32
|
+
|
33
|
+
|
34
|
+
AUTO_QUANTIZER_MAPPING = {
|
35
|
+
"bitsandbytes_4bit": BnB4BitDiffusersQuantizer,
|
36
|
+
"bitsandbytes_8bit": BnB8BitDiffusersQuantizer,
|
37
|
+
"gguf": GGUFQuantizer,
|
38
|
+
"torchao": TorchAoHfQuantizer,
|
39
|
+
}
|
40
|
+
|
41
|
+
AUTO_QUANTIZATION_CONFIG_MAPPING = {
|
42
|
+
"bitsandbytes_4bit": BitsAndBytesConfig,
|
43
|
+
"bitsandbytes_8bit": BitsAndBytesConfig,
|
44
|
+
"gguf": GGUFQuantizationConfig,
|
45
|
+
"torchao": TorchAoConfig,
|
46
|
+
}
|
47
|
+
|
48
|
+
|
49
|
+
class DiffusersAutoQuantizer:
|
50
|
+
"""
|
51
|
+
The auto diffusers quantizer class that takes care of automatically instantiating to the correct
|
52
|
+
`DiffusersQuantizer` given the `QuantizationConfig`.
|
53
|
+
"""
|
54
|
+
|
55
|
+
@classmethod
|
56
|
+
def from_dict(cls, quantization_config_dict: Dict):
|
57
|
+
quant_method = quantization_config_dict.get("quant_method", None)
|
58
|
+
# We need a special care for bnb models to make sure everything is BC ..
|
59
|
+
if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False):
|
60
|
+
suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit"
|
61
|
+
quant_method = QuantizationMethod.BITS_AND_BYTES + suffix
|
62
|
+
elif quant_method is None:
|
63
|
+
raise ValueError(
|
64
|
+
"The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized"
|
65
|
+
)
|
66
|
+
|
67
|
+
if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING.keys():
|
68
|
+
raise ValueError(
|
69
|
+
f"Unknown quantization type, got {quant_method} - supported types are:"
|
70
|
+
f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
|
71
|
+
)
|
72
|
+
|
73
|
+
target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method]
|
74
|
+
return target_cls.from_dict(quantization_config_dict)
|
75
|
+
|
76
|
+
@classmethod
|
77
|
+
def from_config(cls, quantization_config: Union[QuantizationConfigMixin, Dict], **kwargs):
|
78
|
+
# Convert it to a QuantizationConfig if the q_config is a dict
|
79
|
+
if isinstance(quantization_config, dict):
|
80
|
+
quantization_config = cls.from_dict(quantization_config)
|
81
|
+
|
82
|
+
quant_method = quantization_config.quant_method
|
83
|
+
|
84
|
+
# Again, we need a special care for bnb as we have a single quantization config
|
85
|
+
# class for both 4-bit and 8-bit quantization
|
86
|
+
if quant_method == QuantizationMethod.BITS_AND_BYTES:
|
87
|
+
if quantization_config.load_in_8bit:
|
88
|
+
quant_method += "_8bit"
|
89
|
+
else:
|
90
|
+
quant_method += "_4bit"
|
91
|
+
|
92
|
+
if quant_method not in AUTO_QUANTIZER_MAPPING.keys():
|
93
|
+
raise ValueError(
|
94
|
+
f"Unknown quantization type, got {quant_method} - supported types are:"
|
95
|
+
f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
|
96
|
+
)
|
97
|
+
|
98
|
+
target_cls = AUTO_QUANTIZER_MAPPING[quant_method]
|
99
|
+
return target_cls(quantization_config, **kwargs)
|
100
|
+
|
101
|
+
@classmethod
|
102
|
+
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
|
103
|
+
model_config = cls.load_config(pretrained_model_name_or_path, **kwargs)
|
104
|
+
if getattr(model_config, "quantization_config", None) is None:
|
105
|
+
raise ValueError(
|
106
|
+
f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized."
|
107
|
+
)
|
108
|
+
quantization_config_dict = model_config.quantization_config
|
109
|
+
quantization_config = cls.from_dict(quantization_config_dict)
|
110
|
+
# Update with potential kwargs that are passed through from_pretrained.
|
111
|
+
quantization_config.update(kwargs)
|
112
|
+
|
113
|
+
return cls.from_config(quantization_config)
|
114
|
+
|
115
|
+
@classmethod
|
116
|
+
def merge_quantization_configs(
|
117
|
+
cls,
|
118
|
+
quantization_config: Union[dict, QuantizationConfigMixin],
|
119
|
+
quantization_config_from_args: Optional[QuantizationConfigMixin],
|
120
|
+
):
|
121
|
+
"""
|
122
|
+
handles situations where both quantization_config from args and quantization_config from model config are
|
123
|
+
present.
|
124
|
+
"""
|
125
|
+
if quantization_config_from_args is not None:
|
126
|
+
warning_msg = (
|
127
|
+
"You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading"
|
128
|
+
" already has a `quantization_config` attribute. The `quantization_config` from the model will be used."
|
129
|
+
)
|
130
|
+
else:
|
131
|
+
warning_msg = ""
|
132
|
+
|
133
|
+
if isinstance(quantization_config, dict):
|
134
|
+
quantization_config = cls.from_dict(quantization_config)
|
135
|
+
|
136
|
+
if warning_msg != "":
|
137
|
+
warnings.warn(warning_msg)
|
138
|
+
|
139
|
+
return quantization_config
|
@@ -0,0 +1,233 @@
|
|
1
|
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""
|
16
|
+
Adapted from
|
17
|
+
https://github.com/huggingface/transformers/blob/52cb4034ada381fe1ffe8d428a1076e5411a8026/src/transformers/quantizers/base.py
|
18
|
+
"""
|
19
|
+
|
20
|
+
from abc import ABC, abstractmethod
|
21
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
22
|
+
|
23
|
+
from ..utils import is_torch_available
|
24
|
+
from .quantization_config import QuantizationConfigMixin
|
25
|
+
|
26
|
+
|
27
|
+
if TYPE_CHECKING:
|
28
|
+
from ..models.modeling_utils import ModelMixin
|
29
|
+
|
30
|
+
if is_torch_available():
|
31
|
+
import torch
|
32
|
+
|
33
|
+
|
34
|
+
class DiffusersQuantizer(ABC):
|
35
|
+
"""
|
36
|
+
Abstract class of the HuggingFace quantizer. Supports for now quantizing HF diffusers models for inference and/or
|
37
|
+
quantization. This class is used only for diffusers.models.modeling_utils.ModelMixin.from_pretrained and cannot be
|
38
|
+
easily used outside the scope of that method yet.
|
39
|
+
|
40
|
+
Attributes
|
41
|
+
quantization_config (`diffusers.quantizers.quantization_config.QuantizationConfigMixin`):
|
42
|
+
The quantization config that defines the quantization parameters of your model that you want to quantize.
|
43
|
+
modules_to_not_convert (`List[str]`, *optional*):
|
44
|
+
The list of module names to not convert when quantizing the model.
|
45
|
+
required_packages (`List[str]`, *optional*):
|
46
|
+
The list of required pip packages to install prior to using the quantizer
|
47
|
+
requires_calibration (`bool`):
|
48
|
+
Whether the quantization method requires to calibrate the model before using it.
|
49
|
+
"""
|
50
|
+
|
51
|
+
requires_calibration = False
|
52
|
+
required_packages = None
|
53
|
+
|
54
|
+
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
|
55
|
+
self.quantization_config = quantization_config
|
56
|
+
|
57
|
+
# -- Handle extra kwargs below --
|
58
|
+
self.modules_to_not_convert = kwargs.pop("modules_to_not_convert", [])
|
59
|
+
self.pre_quantized = kwargs.pop("pre_quantized", True)
|
60
|
+
|
61
|
+
if not self.pre_quantized and self.requires_calibration:
|
62
|
+
raise ValueError(
|
63
|
+
f"The quantization method {quantization_config.quant_method} does require the model to be pre-quantized."
|
64
|
+
f" You explicitly passed `pre_quantized=False` meaning your model weights are not quantized. Make sure to "
|
65
|
+
f"pass `pre_quantized=True` while knowing what you are doing."
|
66
|
+
)
|
67
|
+
|
68
|
+
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
|
69
|
+
"""
|
70
|
+
Some quantization methods require to explicitly set the dtype of the model to a target dtype. You need to
|
71
|
+
override this method in case you want to make sure that behavior is preserved
|
72
|
+
|
73
|
+
Args:
|
74
|
+
torch_dtype (`torch.dtype`):
|
75
|
+
The input dtype that is passed in `from_pretrained`
|
76
|
+
"""
|
77
|
+
return torch_dtype
|
78
|
+
|
79
|
+
def update_device_map(self, device_map: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
80
|
+
"""
|
81
|
+
Override this method if you want to pass a override the existing device map with a new one. E.g. for
|
82
|
+
bitsandbytes, since `accelerate` is a hard requirement, if no device_map is passed, the device_map is set to
|
83
|
+
`"auto"``
|
84
|
+
|
85
|
+
Args:
|
86
|
+
device_map (`Union[dict, str]`, *optional*):
|
87
|
+
The device_map that is passed through the `from_pretrained` method.
|
88
|
+
"""
|
89
|
+
return device_map
|
90
|
+
|
91
|
+
def adjust_target_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
|
92
|
+
"""
|
93
|
+
Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained` to compute the
|
94
|
+
device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype` to `torch.int8`
|
95
|
+
and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`.
|
96
|
+
|
97
|
+
Args:
|
98
|
+
torch_dtype (`torch.dtype`, *optional*):
|
99
|
+
The torch_dtype that is used to compute the device_map.
|
100
|
+
"""
|
101
|
+
return torch_dtype
|
102
|
+
|
103
|
+
def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]:
|
104
|
+
"""
|
105
|
+
Override this method if you want to adjust the `missing_keys`.
|
106
|
+
|
107
|
+
Args:
|
108
|
+
missing_keys (`List[str]`, *optional*):
|
109
|
+
The list of missing keys in the checkpoint compared to the state dict of the model
|
110
|
+
"""
|
111
|
+
return missing_keys
|
112
|
+
|
113
|
+
def get_special_dtypes_update(self, model, torch_dtype: "torch.dtype") -> Dict[str, "torch.dtype"]:
|
114
|
+
"""
|
115
|
+
returns dtypes for modules that are not quantized - used for the computation of the device_map in case one
|
116
|
+
passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified in
|
117
|
+
`_process_model_before_weight_loading`. `diffusers` models don't have any `modules_to_not_convert` attributes
|
118
|
+
yet but this can change soon in the future.
|
119
|
+
|
120
|
+
Args:
|
121
|
+
model (`~diffusers.models.modeling_utils.ModelMixin`):
|
122
|
+
The model to quantize
|
123
|
+
torch_dtype (`torch.dtype`):
|
124
|
+
The dtype passed in `from_pretrained` method.
|
125
|
+
"""
|
126
|
+
|
127
|
+
return {
|
128
|
+
name: torch_dtype
|
129
|
+
for name, _ in model.named_parameters()
|
130
|
+
if any(m in name for m in self.modules_to_not_convert)
|
131
|
+
}
|
132
|
+
|
133
|
+
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
|
134
|
+
"""adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization"""
|
135
|
+
return max_memory
|
136
|
+
|
137
|
+
def check_if_quantized_param(
|
138
|
+
self,
|
139
|
+
model: "ModelMixin",
|
140
|
+
param_value: "torch.Tensor",
|
141
|
+
param_name: str,
|
142
|
+
state_dict: Dict[str, Any],
|
143
|
+
**kwargs,
|
144
|
+
) -> bool:
|
145
|
+
"""
|
146
|
+
checks if a loaded state_dict component is part of quantized param + some validation; only defined for
|
147
|
+
quantization methods that require to create a new parameters for quantization.
|
148
|
+
"""
|
149
|
+
return False
|
150
|
+
|
151
|
+
def create_quantized_param(self, *args, **kwargs) -> "torch.nn.Parameter":
|
152
|
+
"""
|
153
|
+
takes needed components from state_dict and creates quantized param.
|
154
|
+
"""
|
155
|
+
return
|
156
|
+
|
157
|
+
def check_quantized_param_shape(self, *args, **kwargs):
|
158
|
+
"""
|
159
|
+
checks if the quantized param has expected shape.
|
160
|
+
"""
|
161
|
+
return True
|
162
|
+
|
163
|
+
def validate_environment(self, *args, **kwargs):
|
164
|
+
"""
|
165
|
+
This method is used to potentially check for potential conflicts with arguments that are passed in
|
166
|
+
`from_pretrained`. You need to define it for all future quantizers that are integrated with diffusers. If no
|
167
|
+
explicit check are needed, simply return nothing.
|
168
|
+
"""
|
169
|
+
return
|
170
|
+
|
171
|
+
def preprocess_model(self, model: "ModelMixin", **kwargs):
|
172
|
+
"""
|
173
|
+
Setting model attributes and/or converting model before weights loading. At this point the model should be
|
174
|
+
initialized on the meta device so you can freely manipulate the skeleton of the model in order to replace
|
175
|
+
modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`.
|
176
|
+
|
177
|
+
Args:
|
178
|
+
model (`~diffusers.models.modeling_utils.ModelMixin`):
|
179
|
+
The model to quantize
|
180
|
+
kwargs (`dict`, *optional*):
|
181
|
+
The keyword arguments that are passed along `_process_model_before_weight_loading`.
|
182
|
+
"""
|
183
|
+
model.is_quantized = True
|
184
|
+
model.quantization_method = self.quantization_config.quant_method
|
185
|
+
return self._process_model_before_weight_loading(model, **kwargs)
|
186
|
+
|
187
|
+
def postprocess_model(self, model: "ModelMixin", **kwargs):
|
188
|
+
"""
|
189
|
+
Post-process the model post weights loading. Make sure to override the abstract method
|
190
|
+
`_process_model_after_weight_loading`.
|
191
|
+
|
192
|
+
Args:
|
193
|
+
model (`~diffusers.models.modeling_utils.ModelMixin`):
|
194
|
+
The model to quantize
|
195
|
+
kwargs (`dict`, *optional*):
|
196
|
+
The keyword arguments that are passed along `_process_model_after_weight_loading`.
|
197
|
+
"""
|
198
|
+
return self._process_model_after_weight_loading(model, **kwargs)
|
199
|
+
|
200
|
+
def dequantize(self, model):
|
201
|
+
"""
|
202
|
+
Potentially dequantize the model to retrive the original model, with some loss in accuracy / performance. Note
|
203
|
+
not all quantization schemes support this.
|
204
|
+
"""
|
205
|
+
model = self._dequantize(model)
|
206
|
+
|
207
|
+
# Delete quantizer and quantization config
|
208
|
+
del model.hf_quantizer
|
209
|
+
|
210
|
+
return model
|
211
|
+
|
212
|
+
def _dequantize(self, model):
|
213
|
+
raise NotImplementedError(
|
214
|
+
f"{self.quantization_config.quant_method} has no implementation of `dequantize`, please raise an issue on GitHub."
|
215
|
+
)
|
216
|
+
|
217
|
+
@abstractmethod
|
218
|
+
def _process_model_before_weight_loading(self, model, **kwargs):
|
219
|
+
...
|
220
|
+
|
221
|
+
@abstractmethod
|
222
|
+
def _process_model_after_weight_loading(self, model, **kwargs):
|
223
|
+
...
|
224
|
+
|
225
|
+
@property
|
226
|
+
@abstractmethod
|
227
|
+
def is_serializable(self):
|
228
|
+
...
|
229
|
+
|
230
|
+
@property
|
231
|
+
@abstractmethod
|
232
|
+
def is_trainable(self):
|
233
|
+
...
|