diffusers 0.33.1__py3-none-any.whl → 0.35.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- diffusers/__init__.py +145 -1
- diffusers/callbacks.py +35 -0
- diffusers/commands/__init__.py +1 -1
- diffusers/commands/custom_blocks.py +134 -0
- diffusers/commands/diffusers_cli.py +3 -1
- diffusers/commands/env.py +1 -1
- diffusers/commands/fp16_safetensors.py +2 -2
- diffusers/configuration_utils.py +11 -2
- diffusers/dependency_versions_check.py +1 -1
- diffusers/dependency_versions_table.py +3 -3
- diffusers/experimental/rl/value_guided_sampling.py +1 -1
- diffusers/guiders/__init__.py +41 -0
- diffusers/guiders/adaptive_projected_guidance.py +188 -0
- diffusers/guiders/auto_guidance.py +190 -0
- diffusers/guiders/classifier_free_guidance.py +141 -0
- diffusers/guiders/classifier_free_zero_star_guidance.py +152 -0
- diffusers/guiders/frequency_decoupled_guidance.py +327 -0
- diffusers/guiders/guider_utils.py +309 -0
- diffusers/guiders/perturbed_attention_guidance.py +271 -0
- diffusers/guiders/skip_layer_guidance.py +262 -0
- diffusers/guiders/smoothed_energy_guidance.py +251 -0
- diffusers/guiders/tangential_classifier_free_guidance.py +143 -0
- diffusers/hooks/__init__.py +17 -0
- diffusers/hooks/_common.py +56 -0
- diffusers/hooks/_helpers.py +293 -0
- diffusers/hooks/faster_cache.py +9 -8
- diffusers/hooks/first_block_cache.py +259 -0
- diffusers/hooks/group_offloading.py +332 -227
- diffusers/hooks/hooks.py +58 -3
- diffusers/hooks/layer_skip.py +263 -0
- diffusers/hooks/layerwise_casting.py +5 -10
- diffusers/hooks/pyramid_attention_broadcast.py +15 -12
- diffusers/hooks/smoothed_energy_guidance_utils.py +167 -0
- diffusers/hooks/utils.py +43 -0
- diffusers/image_processor.py +7 -2
- diffusers/loaders/__init__.py +10 -0
- diffusers/loaders/ip_adapter.py +260 -18
- diffusers/loaders/lora_base.py +261 -127
- diffusers/loaders/lora_conversion_utils.py +657 -35
- diffusers/loaders/lora_pipeline.py +2778 -1246
- diffusers/loaders/peft.py +78 -112
- diffusers/loaders/single_file.py +2 -2
- diffusers/loaders/single_file_model.py +64 -15
- diffusers/loaders/single_file_utils.py +395 -7
- diffusers/loaders/textual_inversion.py +3 -2
- diffusers/loaders/transformer_flux.py +10 -11
- diffusers/loaders/transformer_sd3.py +8 -3
- diffusers/loaders/unet.py +24 -21
- diffusers/loaders/unet_loader_utils.py +6 -3
- diffusers/loaders/utils.py +1 -1
- diffusers/models/__init__.py +23 -1
- diffusers/models/activations.py +5 -5
- diffusers/models/adapter.py +2 -3
- diffusers/models/attention.py +488 -7
- diffusers/models/attention_dispatch.py +1218 -0
- diffusers/models/attention_flax.py +10 -10
- diffusers/models/attention_processor.py +113 -667
- diffusers/models/auto_model.py +49 -12
- diffusers/models/autoencoders/__init__.py +2 -0
- diffusers/models/autoencoders/autoencoder_asym_kl.py +4 -4
- diffusers/models/autoencoders/autoencoder_dc.py +17 -4
- diffusers/models/autoencoders/autoencoder_kl.py +5 -5
- diffusers/models/autoencoders/autoencoder_kl_allegro.py +4 -4
- diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +6 -6
- diffusers/models/autoencoders/autoencoder_kl_cosmos.py +1110 -0
- diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +2 -2
- diffusers/models/autoencoders/autoencoder_kl_ltx.py +3 -3
- diffusers/models/autoencoders/autoencoder_kl_magvit.py +4 -4
- diffusers/models/autoencoders/autoencoder_kl_mochi.py +3 -3
- diffusers/models/autoencoders/autoencoder_kl_qwenimage.py +1070 -0
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +4 -4
- diffusers/models/autoencoders/autoencoder_kl_wan.py +626 -62
- diffusers/models/autoencoders/autoencoder_oobleck.py +1 -1
- diffusers/models/autoencoders/autoencoder_tiny.py +3 -3
- diffusers/models/autoencoders/consistency_decoder_vae.py +1 -1
- diffusers/models/autoencoders/vae.py +13 -2
- diffusers/models/autoencoders/vq_model.py +2 -2
- diffusers/models/cache_utils.py +32 -10
- diffusers/models/controlnet.py +1 -1
- diffusers/models/controlnet_flux.py +1 -1
- diffusers/models/controlnet_sd3.py +1 -1
- diffusers/models/controlnet_sparsectrl.py +1 -1
- diffusers/models/controlnets/__init__.py +1 -0
- diffusers/models/controlnets/controlnet.py +3 -3
- diffusers/models/controlnets/controlnet_flax.py +1 -1
- diffusers/models/controlnets/controlnet_flux.py +21 -20
- diffusers/models/controlnets/controlnet_hunyuan.py +2 -2
- diffusers/models/controlnets/controlnet_sana.py +290 -0
- diffusers/models/controlnets/controlnet_sd3.py +1 -1
- diffusers/models/controlnets/controlnet_sparsectrl.py +2 -2
- diffusers/models/controlnets/controlnet_union.py +5 -5
- diffusers/models/controlnets/controlnet_xs.py +7 -7
- diffusers/models/controlnets/multicontrolnet.py +4 -5
- diffusers/models/controlnets/multicontrolnet_union.py +5 -6
- diffusers/models/downsampling.py +2 -2
- diffusers/models/embeddings.py +36 -46
- diffusers/models/embeddings_flax.py +2 -2
- diffusers/models/lora.py +3 -3
- diffusers/models/model_loading_utils.py +233 -1
- diffusers/models/modeling_flax_utils.py +1 -2
- diffusers/models/modeling_utils.py +203 -108
- diffusers/models/normalization.py +4 -4
- diffusers/models/resnet.py +2 -2
- diffusers/models/resnet_flax.py +1 -1
- diffusers/models/transformers/__init__.py +7 -0
- diffusers/models/transformers/auraflow_transformer_2d.py +70 -24
- diffusers/models/transformers/cogvideox_transformer_3d.py +1 -1
- diffusers/models/transformers/consisid_transformer_3d.py +1 -1
- diffusers/models/transformers/dit_transformer_2d.py +2 -2
- diffusers/models/transformers/dual_transformer_2d.py +1 -1
- diffusers/models/transformers/hunyuan_transformer_2d.py +2 -2
- diffusers/models/transformers/latte_transformer_3d.py +4 -5
- diffusers/models/transformers/lumina_nextdit2d.py +2 -2
- diffusers/models/transformers/pixart_transformer_2d.py +3 -3
- diffusers/models/transformers/prior_transformer.py +1 -1
- diffusers/models/transformers/sana_transformer.py +8 -3
- diffusers/models/transformers/stable_audio_transformer.py +5 -9
- diffusers/models/transformers/t5_film_transformer.py +3 -3
- diffusers/models/transformers/transformer_2d.py +1 -1
- diffusers/models/transformers/transformer_allegro.py +1 -1
- diffusers/models/transformers/transformer_chroma.py +641 -0
- diffusers/models/transformers/transformer_cogview3plus.py +5 -10
- diffusers/models/transformers/transformer_cogview4.py +353 -27
- diffusers/models/transformers/transformer_cosmos.py +586 -0
- diffusers/models/transformers/transformer_flux.py +376 -138
- diffusers/models/transformers/transformer_hidream_image.py +942 -0
- diffusers/models/transformers/transformer_hunyuan_video.py +12 -8
- diffusers/models/transformers/transformer_hunyuan_video_framepack.py +416 -0
- diffusers/models/transformers/transformer_ltx.py +105 -24
- diffusers/models/transformers/transformer_lumina2.py +1 -1
- diffusers/models/transformers/transformer_mochi.py +1 -1
- diffusers/models/transformers/transformer_omnigen.py +2 -2
- diffusers/models/transformers/transformer_qwenimage.py +645 -0
- diffusers/models/transformers/transformer_sd3.py +7 -7
- diffusers/models/transformers/transformer_skyreels_v2.py +607 -0
- diffusers/models/transformers/transformer_temporal.py +1 -1
- diffusers/models/transformers/transformer_wan.py +316 -87
- diffusers/models/transformers/transformer_wan_vace.py +387 -0
- diffusers/models/unets/unet_1d.py +1 -1
- diffusers/models/unets/unet_1d_blocks.py +1 -1
- diffusers/models/unets/unet_2d.py +1 -1
- diffusers/models/unets/unet_2d_blocks.py +1 -1
- diffusers/models/unets/unet_2d_blocks_flax.py +8 -7
- diffusers/models/unets/unet_2d_condition.py +4 -3
- diffusers/models/unets/unet_2d_condition_flax.py +2 -2
- diffusers/models/unets/unet_3d_blocks.py +1 -1
- diffusers/models/unets/unet_3d_condition.py +3 -3
- diffusers/models/unets/unet_i2vgen_xl.py +3 -3
- diffusers/models/unets/unet_kandinsky3.py +1 -1
- diffusers/models/unets/unet_motion_model.py +2 -2
- diffusers/models/unets/unet_stable_cascade.py +1 -1
- diffusers/models/upsampling.py +2 -2
- diffusers/models/vae_flax.py +2 -2
- diffusers/models/vq_model.py +1 -1
- diffusers/modular_pipelines/__init__.py +83 -0
- diffusers/modular_pipelines/components_manager.py +1068 -0
- diffusers/modular_pipelines/flux/__init__.py +66 -0
- diffusers/modular_pipelines/flux/before_denoise.py +689 -0
- diffusers/modular_pipelines/flux/decoders.py +109 -0
- diffusers/modular_pipelines/flux/denoise.py +227 -0
- diffusers/modular_pipelines/flux/encoders.py +412 -0
- diffusers/modular_pipelines/flux/modular_blocks.py +181 -0
- diffusers/modular_pipelines/flux/modular_pipeline.py +59 -0
- diffusers/modular_pipelines/modular_pipeline.py +2446 -0
- diffusers/modular_pipelines/modular_pipeline_utils.py +672 -0
- diffusers/modular_pipelines/node_utils.py +665 -0
- diffusers/modular_pipelines/stable_diffusion_xl/__init__.py +77 -0
- diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py +1874 -0
- diffusers/modular_pipelines/stable_diffusion_xl/decoders.py +208 -0
- diffusers/modular_pipelines/stable_diffusion_xl/denoise.py +771 -0
- diffusers/modular_pipelines/stable_diffusion_xl/encoders.py +887 -0
- diffusers/modular_pipelines/stable_diffusion_xl/modular_blocks.py +380 -0
- diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py +365 -0
- diffusers/modular_pipelines/wan/__init__.py +66 -0
- diffusers/modular_pipelines/wan/before_denoise.py +365 -0
- diffusers/modular_pipelines/wan/decoders.py +105 -0
- diffusers/modular_pipelines/wan/denoise.py +261 -0
- diffusers/modular_pipelines/wan/encoders.py +242 -0
- diffusers/modular_pipelines/wan/modular_blocks.py +144 -0
- diffusers/modular_pipelines/wan/modular_pipeline.py +90 -0
- diffusers/pipelines/__init__.py +68 -6
- diffusers/pipelines/allegro/pipeline_allegro.py +11 -11
- diffusers/pipelines/amused/pipeline_amused.py +7 -6
- diffusers/pipelines/amused/pipeline_amused_img2img.py +6 -5
- diffusers/pipelines/amused/pipeline_amused_inpaint.py +6 -5
- diffusers/pipelines/animatediff/pipeline_animatediff.py +6 -6
- diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +6 -6
- diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +16 -15
- diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +6 -6
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +5 -5
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +5 -5
- diffusers/pipelines/audioldm/pipeline_audioldm.py +8 -7
- diffusers/pipelines/audioldm2/modeling_audioldm2.py +1 -1
- diffusers/pipelines/audioldm2/pipeline_audioldm2.py +22 -13
- diffusers/pipelines/aura_flow/pipeline_aura_flow.py +48 -11
- diffusers/pipelines/auto_pipeline.py +23 -20
- diffusers/pipelines/blip_diffusion/modeling_blip2.py +1 -1
- diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +2 -2
- diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +11 -10
- diffusers/pipelines/chroma/__init__.py +49 -0
- diffusers/pipelines/chroma/pipeline_chroma.py +949 -0
- diffusers/pipelines/chroma/pipeline_chroma_img2img.py +1034 -0
- diffusers/pipelines/chroma/pipeline_output.py +21 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox.py +17 -16
- diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +17 -16
- diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +18 -17
- diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +17 -16
- diffusers/pipelines/cogview3/pipeline_cogview3plus.py +9 -9
- diffusers/pipelines/cogview4/pipeline_cogview4.py +23 -22
- diffusers/pipelines/cogview4/pipeline_cogview4_control.py +7 -7
- diffusers/pipelines/consisid/consisid_utils.py +2 -2
- diffusers/pipelines/consisid/pipeline_consisid.py +8 -8
- diffusers/pipelines/consistency_models/pipeline_consistency_models.py +1 -1
- diffusers/pipelines/controlnet/pipeline_controlnet.py +7 -7
- diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +11 -10
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +7 -7
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +7 -7
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +14 -14
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +10 -6
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +13 -13
- diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +226 -107
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +12 -8
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +207 -105
- diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +1 -1
- diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +8 -8
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +7 -7
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +7 -7
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +12 -10
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +9 -7
- diffusers/pipelines/cosmos/__init__.py +54 -0
- diffusers/pipelines/cosmos/pipeline_cosmos2_text2image.py +673 -0
- diffusers/pipelines/cosmos/pipeline_cosmos2_video2world.py +792 -0
- diffusers/pipelines/cosmos/pipeline_cosmos_text2world.py +664 -0
- diffusers/pipelines/cosmos/pipeline_cosmos_video2world.py +826 -0
- diffusers/pipelines/cosmos/pipeline_output.py +40 -0
- diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +5 -4
- diffusers/pipelines/ddim/pipeline_ddim.py +4 -4
- diffusers/pipelines/ddpm/pipeline_ddpm.py +1 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if.py +10 -10
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +10 -10
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +10 -10
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +10 -10
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +10 -10
- diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +10 -10
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +8 -8
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +5 -5
- diffusers/pipelines/deprecated/audio_diffusion/mel.py +1 -1
- diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py +3 -3
- diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +1 -1
- diffusers/pipelines/deprecated/pndm/pipeline_pndm.py +2 -2
- diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +4 -3
- diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py +1 -1
- diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py +1 -1
- diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py +1 -1
- diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py +1 -1
- diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +1 -1
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +8 -8
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py +9 -9
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +10 -10
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +10 -8
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +5 -5
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +18 -18
- diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py +1 -1
- diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +2 -2
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +6 -6
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +5 -5
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +5 -5
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +5 -5
- diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +1 -1
- diffusers/pipelines/dit/pipeline_dit.py +4 -2
- diffusers/pipelines/easyanimate/pipeline_easyanimate.py +4 -4
- diffusers/pipelines/easyanimate/pipeline_easyanimate_control.py +4 -4
- diffusers/pipelines/easyanimate/pipeline_easyanimate_inpaint.py +7 -6
- diffusers/pipelines/flux/__init__.py +4 -0
- diffusers/pipelines/flux/modeling_flux.py +1 -1
- diffusers/pipelines/flux/pipeline_flux.py +37 -36
- diffusers/pipelines/flux/pipeline_flux_control.py +9 -9
- diffusers/pipelines/flux/pipeline_flux_control_img2img.py +7 -7
- diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +7 -7
- diffusers/pipelines/flux/pipeline_flux_controlnet.py +7 -7
- diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +31 -23
- diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +3 -2
- diffusers/pipelines/flux/pipeline_flux_fill.py +7 -7
- diffusers/pipelines/flux/pipeline_flux_img2img.py +40 -7
- diffusers/pipelines/flux/pipeline_flux_inpaint.py +12 -7
- diffusers/pipelines/flux/pipeline_flux_kontext.py +1134 -0
- diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py +1460 -0
- diffusers/pipelines/flux/pipeline_flux_prior_redux.py +2 -2
- diffusers/pipelines/flux/pipeline_output.py +6 -4
- diffusers/pipelines/free_init_utils.py +2 -2
- diffusers/pipelines/free_noise_utils.py +3 -3
- diffusers/pipelines/hidream_image/__init__.py +47 -0
- diffusers/pipelines/hidream_image/pipeline_hidream_image.py +1026 -0
- diffusers/pipelines/hidream_image/pipeline_output.py +35 -0
- diffusers/pipelines/hunyuan_video/__init__.py +2 -0
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_skyreels_image2video.py +8 -8
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +26 -25
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_framepack.py +1114 -0
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_image2video.py +71 -15
- diffusers/pipelines/hunyuan_video/pipeline_output.py +19 -0
- diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +8 -8
- diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +10 -8
- diffusers/pipelines/kandinsky/pipeline_kandinsky.py +6 -6
- diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +34 -34
- diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +19 -26
- diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +7 -7
- diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +11 -11
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +35 -35
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +6 -6
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +17 -39
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +17 -45
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +7 -7
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +10 -10
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +10 -10
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +7 -7
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +17 -38
- diffusers/pipelines/kolors/pipeline_kolors.py +10 -10
- diffusers/pipelines/kolors/pipeline_kolors_img2img.py +12 -12
- diffusers/pipelines/kolors/text_encoder.py +3 -3
- diffusers/pipelines/kolors/tokenizer.py +1 -1
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +2 -2
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +2 -2
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -1
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py +3 -3
- diffusers/pipelines/latte/pipeline_latte.py +12 -12
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +13 -13
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +17 -16
- diffusers/pipelines/ltx/__init__.py +4 -0
- diffusers/pipelines/ltx/modeling_latent_upsampler.py +188 -0
- diffusers/pipelines/ltx/pipeline_ltx.py +64 -18
- diffusers/pipelines/ltx/pipeline_ltx_condition.py +117 -38
- diffusers/pipelines/ltx/pipeline_ltx_image2video.py +63 -18
- diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py +277 -0
- diffusers/pipelines/lumina/pipeline_lumina.py +13 -13
- diffusers/pipelines/lumina2/pipeline_lumina2.py +10 -10
- diffusers/pipelines/marigold/marigold_image_processing.py +2 -2
- diffusers/pipelines/mochi/pipeline_mochi.py +15 -14
- diffusers/pipelines/musicldm/pipeline_musicldm.py +16 -13
- diffusers/pipelines/omnigen/pipeline_omnigen.py +13 -11
- diffusers/pipelines/omnigen/processor_omnigen.py +8 -3
- diffusers/pipelines/onnx_utils.py +15 -2
- diffusers/pipelines/pag/pag_utils.py +2 -2
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +12 -8
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +7 -7
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +10 -6
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +14 -14
- diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +8 -8
- diffusers/pipelines/pag/pipeline_pag_kolors.py +10 -10
- diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +11 -11
- diffusers/pipelines/pag/pipeline_pag_sana.py +18 -12
- diffusers/pipelines/pag/pipeline_pag_sd.py +8 -8
- diffusers/pipelines/pag/pipeline_pag_sd_3.py +7 -7
- diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +7 -7
- diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +6 -6
- diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +5 -5
- diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +8 -8
- diffusers/pipelines/pag/pipeline_pag_sd_xl.py +16 -15
- diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +18 -17
- diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +12 -12
- diffusers/pipelines/paint_by_example/image_encoder.py +1 -1
- diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +8 -7
- diffusers/pipelines/pia/pipeline_pia.py +8 -6
- diffusers/pipelines/pipeline_flax_utils.py +5 -6
- diffusers/pipelines/pipeline_loading_utils.py +113 -15
- diffusers/pipelines/pipeline_utils.py +127 -48
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +14 -12
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +31 -11
- diffusers/pipelines/qwenimage/__init__.py +55 -0
- diffusers/pipelines/qwenimage/pipeline_output.py +21 -0
- diffusers/pipelines/qwenimage/pipeline_qwenimage.py +726 -0
- diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +882 -0
- diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py +829 -0
- diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py +1015 -0
- diffusers/pipelines/sana/__init__.py +4 -0
- diffusers/pipelines/sana/pipeline_sana.py +23 -21
- diffusers/pipelines/sana/pipeline_sana_controlnet.py +1106 -0
- diffusers/pipelines/sana/pipeline_sana_sprint.py +23 -19
- diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py +981 -0
- diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +7 -6
- diffusers/pipelines/shap_e/camera.py +1 -1
- diffusers/pipelines/shap_e/pipeline_shap_e.py +1 -1
- diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +1 -1
- diffusers/pipelines/shap_e/renderer.py +3 -3
- diffusers/pipelines/skyreels_v2/__init__.py +59 -0
- diffusers/pipelines/skyreels_v2/pipeline_output.py +20 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2.py +610 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing.py +978 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_i2v.py +1059 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_v2v.py +1063 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_i2v.py +745 -0
- diffusers/pipelines/stable_audio/modeling_stable_audio.py +1 -1
- diffusers/pipelines/stable_audio/pipeline_stable_audio.py +5 -5
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +8 -8
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +13 -13
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +9 -9
- diffusers/pipelines/stable_diffusion/__init__.py +0 -7
- diffusers/pipelines/stable_diffusion/clip_image_project_model.py +1 -1
- diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +11 -4
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +12 -11
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +10 -10
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +11 -11
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +10 -10
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +10 -9
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +5 -5
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +5 -5
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +5 -5
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +5 -5
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +5 -5
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +4 -4
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +5 -5
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +7 -7
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +5 -5
- diffusers/pipelines/stable_diffusion/safety_checker.py +1 -1
- diffusers/pipelines/stable_diffusion/safety_checker_flax.py +1 -1
- diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py +1 -1
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +13 -12
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +7 -7
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +7 -7
- diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +12 -8
- diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +15 -9
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +11 -9
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +11 -9
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +18 -12
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +11 -8
- diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +11 -8
- diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +15 -12
- diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +8 -6
- diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
- diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +15 -11
- diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py +1 -1
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +16 -15
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +18 -17
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +12 -12
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +16 -15
- diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +3 -3
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +12 -12
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +18 -17
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +12 -7
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +12 -7
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +15 -13
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +24 -21
- diffusers/pipelines/unclip/pipeline_unclip.py +4 -3
- diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +4 -3
- diffusers/pipelines/unclip/text_proj.py +2 -2
- diffusers/pipelines/unidiffuser/modeling_text_decoder.py +2 -2
- diffusers/pipelines/unidiffuser/modeling_uvit.py +1 -1
- diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +8 -7
- diffusers/pipelines/visualcloze/__init__.py +52 -0
- diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py +444 -0
- diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py +952 -0
- diffusers/pipelines/visualcloze/visualcloze_utils.py +251 -0
- diffusers/pipelines/wan/__init__.py +2 -0
- diffusers/pipelines/wan/pipeline_wan.py +91 -30
- diffusers/pipelines/wan/pipeline_wan_i2v.py +145 -45
- diffusers/pipelines/wan/pipeline_wan_vace.py +975 -0
- diffusers/pipelines/wan/pipeline_wan_video2video.py +14 -16
- diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +1 -1
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py +1 -1
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +1 -1
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +8 -8
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +16 -15
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +6 -6
- diffusers/quantizers/__init__.py +3 -1
- diffusers/quantizers/base.py +17 -1
- diffusers/quantizers/bitsandbytes/bnb_quantizer.py +4 -0
- diffusers/quantizers/bitsandbytes/utils.py +10 -7
- diffusers/quantizers/gguf/gguf_quantizer.py +13 -4
- diffusers/quantizers/gguf/utils.py +108 -16
- diffusers/quantizers/pipe_quant_config.py +202 -0
- diffusers/quantizers/quantization_config.py +18 -16
- diffusers/quantizers/quanto/quanto_quantizer.py +4 -0
- diffusers/quantizers/torchao/torchao_quantizer.py +31 -1
- diffusers/schedulers/__init__.py +3 -1
- diffusers/schedulers/deprecated/scheduling_karras_ve.py +4 -3
- diffusers/schedulers/deprecated/scheduling_sde_vp.py +1 -1
- diffusers/schedulers/scheduling_consistency_models.py +1 -1
- diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +10 -5
- diffusers/schedulers/scheduling_ddim.py +8 -8
- diffusers/schedulers/scheduling_ddim_cogvideox.py +5 -5
- diffusers/schedulers/scheduling_ddim_flax.py +6 -6
- diffusers/schedulers/scheduling_ddim_inverse.py +6 -6
- diffusers/schedulers/scheduling_ddim_parallel.py +22 -22
- diffusers/schedulers/scheduling_ddpm.py +9 -9
- diffusers/schedulers/scheduling_ddpm_flax.py +7 -7
- diffusers/schedulers/scheduling_ddpm_parallel.py +18 -18
- diffusers/schedulers/scheduling_ddpm_wuerstchen.py +2 -2
- diffusers/schedulers/scheduling_deis_multistep.py +16 -9
- diffusers/schedulers/scheduling_dpm_cogvideox.py +5 -5
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +18 -12
- diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +22 -20
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +11 -11
- diffusers/schedulers/scheduling_dpmsolver_sde.py +2 -2
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +19 -13
- diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +13 -8
- diffusers/schedulers/scheduling_edm_euler.py +20 -11
- diffusers/schedulers/scheduling_euler_ancestral_discrete.py +3 -3
- diffusers/schedulers/scheduling_euler_discrete.py +3 -3
- diffusers/schedulers/scheduling_euler_discrete_flax.py +3 -3
- diffusers/schedulers/scheduling_flow_match_euler_discrete.py +20 -5
- diffusers/schedulers/scheduling_flow_match_heun_discrete.py +1 -1
- diffusers/schedulers/scheduling_flow_match_lcm.py +561 -0
- diffusers/schedulers/scheduling_heun_discrete.py +2 -2
- diffusers/schedulers/scheduling_ipndm.py +2 -2
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +2 -2
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +2 -2
- diffusers/schedulers/scheduling_karras_ve_flax.py +5 -5
- diffusers/schedulers/scheduling_lcm.py +3 -3
- diffusers/schedulers/scheduling_lms_discrete.py +2 -2
- diffusers/schedulers/scheduling_lms_discrete_flax.py +1 -1
- diffusers/schedulers/scheduling_pndm.py +4 -4
- diffusers/schedulers/scheduling_pndm_flax.py +4 -4
- diffusers/schedulers/scheduling_repaint.py +9 -9
- diffusers/schedulers/scheduling_sasolver.py +15 -15
- diffusers/schedulers/scheduling_scm.py +1 -2
- diffusers/schedulers/scheduling_sde_ve.py +1 -1
- diffusers/schedulers/scheduling_sde_ve_flax.py +2 -2
- diffusers/schedulers/scheduling_tcd.py +3 -3
- diffusers/schedulers/scheduling_unclip.py +5 -5
- diffusers/schedulers/scheduling_unipc_multistep.py +21 -12
- diffusers/schedulers/scheduling_utils.py +3 -3
- diffusers/schedulers/scheduling_utils_flax.py +2 -2
- diffusers/schedulers/scheduling_vq_diffusion.py +1 -1
- diffusers/training_utils.py +91 -5
- diffusers/utils/__init__.py +15 -0
- diffusers/utils/accelerate_utils.py +1 -1
- diffusers/utils/constants.py +4 -0
- diffusers/utils/doc_utils.py +1 -1
- diffusers/utils/dummy_pt_objects.py +432 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +480 -0
- diffusers/utils/dynamic_modules_utils.py +85 -8
- diffusers/utils/export_utils.py +1 -1
- diffusers/utils/hub_utils.py +33 -17
- diffusers/utils/import_utils.py +151 -18
- diffusers/utils/logging.py +1 -1
- diffusers/utils/outputs.py +2 -1
- diffusers/utils/peft_utils.py +96 -10
- diffusers/utils/state_dict_utils.py +20 -3
- diffusers/utils/testing_utils.py +195 -17
- diffusers/utils/torch_utils.py +43 -5
- diffusers/video_processor.py +2 -2
- {diffusers-0.33.1.dist-info → diffusers-0.35.0.dist-info}/METADATA +72 -57
- diffusers-0.35.0.dist-info/RECORD +703 -0
- {diffusers-0.33.1.dist-info → diffusers-0.35.0.dist-info}/WHEEL +1 -1
- diffusers-0.33.1.dist-info/RECORD +0 -608
- {diffusers-0.33.1.dist-info → diffusers-0.35.0.dist-info}/LICENSE +0 -0
- {diffusers-0.33.1.dist-info → diffusers-0.35.0.dist-info}/entry_points.txt +0 -0
- {diffusers-0.33.1.dist-info → diffusers-0.35.0.dist-info}/top_level.txt +0 -0
diffusers/hooks/utils.py
ADDED
@@ -0,0 +1,43 @@
|
|
1
|
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import torch
|
16
|
+
|
17
|
+
from ._common import _ALL_TRANSFORMER_BLOCK_IDENTIFIERS, _ATTENTION_CLASSES, _FEEDFORWARD_CLASSES
|
18
|
+
|
19
|
+
|
20
|
+
def _get_identifiable_transformer_blocks_in_module(module: torch.nn.Module):
|
21
|
+
module_list_with_transformer_blocks = []
|
22
|
+
for name, submodule in module.named_modules():
|
23
|
+
name_endswith_identifier = any(name.endswith(identifier) for identifier in _ALL_TRANSFORMER_BLOCK_IDENTIFIERS)
|
24
|
+
is_modulelist = isinstance(submodule, torch.nn.ModuleList)
|
25
|
+
if name_endswith_identifier and is_modulelist:
|
26
|
+
module_list_with_transformer_blocks.append((name, submodule))
|
27
|
+
return module_list_with_transformer_blocks
|
28
|
+
|
29
|
+
|
30
|
+
def _get_identifiable_attention_layers_in_module(module: torch.nn.Module):
|
31
|
+
attention_layers = []
|
32
|
+
for name, submodule in module.named_modules():
|
33
|
+
if isinstance(submodule, _ATTENTION_CLASSES):
|
34
|
+
attention_layers.append((name, submodule))
|
35
|
+
return attention_layers
|
36
|
+
|
37
|
+
|
38
|
+
def _get_identifiable_feedforward_layers_in_module(module: torch.nn.Module):
|
39
|
+
feedforward_layers = []
|
40
|
+
for name, submodule in module.named_modules():
|
41
|
+
if isinstance(submodule, _FEEDFORWARD_CLASSES):
|
42
|
+
feedforward_layers.append((name, submodule))
|
43
|
+
return feedforward_layers
|
diffusers/image_processor.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright
|
1
|
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -116,6 +116,7 @@ class VaeImageProcessor(ConfigMixin):
|
|
116
116
|
vae_scale_factor: int = 8,
|
117
117
|
vae_latent_channels: int = 4,
|
118
118
|
resample: str = "lanczos",
|
119
|
+
reducing_gap: int = None,
|
119
120
|
do_normalize: bool = True,
|
120
121
|
do_binarize: bool = False,
|
121
122
|
do_convert_rgb: bool = False,
|
@@ -498,7 +499,11 @@ class VaeImageProcessor(ConfigMixin):
|
|
498
499
|
raise ValueError(f"Only PIL image input is supported for resize_mode {resize_mode}")
|
499
500
|
if isinstance(image, PIL.Image.Image):
|
500
501
|
if resize_mode == "default":
|
501
|
-
image = image.resize(
|
502
|
+
image = image.resize(
|
503
|
+
(width, height),
|
504
|
+
resample=PIL_INTERPOLATION[self.config.resample],
|
505
|
+
reducing_gap=self.config.reducing_gap,
|
506
|
+
)
|
502
507
|
elif resize_mode == "fill":
|
503
508
|
image = self._resize_and_fill(image, width, height)
|
504
509
|
elif resize_mode == "crop":
|
diffusers/loaders/__init__.py
CHANGED
@@ -65,6 +65,7 @@ if is_torch_available():
|
|
65
65
|
"AmusedLoraLoaderMixin",
|
66
66
|
"StableDiffusionLoraLoaderMixin",
|
67
67
|
"SD3LoraLoaderMixin",
|
68
|
+
"AuraFlowLoraLoaderMixin",
|
68
69
|
"StableDiffusionXLLoraLoaderMixin",
|
69
70
|
"LTXVideoLoraLoaderMixin",
|
70
71
|
"LoraLoaderMixin",
|
@@ -76,12 +77,16 @@ if is_torch_available():
|
|
76
77
|
"SanaLoraLoaderMixin",
|
77
78
|
"Lumina2LoraLoaderMixin",
|
78
79
|
"WanLoraLoaderMixin",
|
80
|
+
"HiDreamImageLoraLoaderMixin",
|
81
|
+
"SkyReelsV2LoraLoaderMixin",
|
82
|
+
"QwenImageLoraLoaderMixin",
|
79
83
|
]
|
80
84
|
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
|
81
85
|
_import_structure["ip_adapter"] = [
|
82
86
|
"IPAdapterMixin",
|
83
87
|
"FluxIPAdapterMixin",
|
84
88
|
"SD3IPAdapterMixin",
|
89
|
+
"ModularIPAdapterMixin",
|
85
90
|
]
|
86
91
|
|
87
92
|
_import_structure["peft"] = ["PeftAdapterMixin"]
|
@@ -99,20 +104,25 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
|
99
104
|
from .ip_adapter import (
|
100
105
|
FluxIPAdapterMixin,
|
101
106
|
IPAdapterMixin,
|
107
|
+
ModularIPAdapterMixin,
|
102
108
|
SD3IPAdapterMixin,
|
103
109
|
)
|
104
110
|
from .lora_pipeline import (
|
105
111
|
AmusedLoraLoaderMixin,
|
112
|
+
AuraFlowLoraLoaderMixin,
|
106
113
|
CogVideoXLoraLoaderMixin,
|
107
114
|
CogView4LoraLoaderMixin,
|
108
115
|
FluxLoraLoaderMixin,
|
116
|
+
HiDreamImageLoraLoaderMixin,
|
109
117
|
HunyuanVideoLoraLoaderMixin,
|
110
118
|
LoraLoaderMixin,
|
111
119
|
LTXVideoLoraLoaderMixin,
|
112
120
|
Lumina2LoraLoaderMixin,
|
113
121
|
Mochi1LoraLoaderMixin,
|
122
|
+
QwenImageLoraLoaderMixin,
|
114
123
|
SanaLoraLoaderMixin,
|
115
124
|
SD3LoraLoaderMixin,
|
125
|
+
SkyReelsV2LoraLoaderMixin,
|
116
126
|
StableDiffusionLoraLoaderMixin,
|
117
127
|
StableDiffusionXLLoraLoaderMixin,
|
118
128
|
WanLoraLoaderMixin,
|
diffusers/loaders/ip_adapter.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright
|
1
|
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -40,8 +40,6 @@ if is_transformers_available():
|
|
40
40
|
from ..models.attention_processor import (
|
41
41
|
AttnProcessor,
|
42
42
|
AttnProcessor2_0,
|
43
|
-
FluxAttnProcessor2_0,
|
44
|
-
FluxIPAdapterJointAttnProcessor2_0,
|
45
43
|
IPAdapterAttnProcessor,
|
46
44
|
IPAdapterAttnProcessor2_0,
|
47
45
|
IPAdapterXFormersAttnProcessor,
|
@@ -159,10 +157,7 @@ class IPAdapterMixin:
|
|
159
157
|
" `low_cpu_mem_usage=False`."
|
160
158
|
)
|
161
159
|
|
162
|
-
user_agent = {
|
163
|
-
"file_type": "attn_procs_weights",
|
164
|
-
"framework": "pytorch",
|
165
|
-
}
|
160
|
+
user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"}
|
166
161
|
state_dicts = []
|
167
162
|
for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip(
|
168
163
|
pretrained_model_name_or_path_or_dict, weight_name, subfolder
|
@@ -357,6 +352,256 @@ class IPAdapterMixin:
|
|
357
352
|
self.unet.set_attn_processor(attn_procs)
|
358
353
|
|
359
354
|
|
355
|
+
class ModularIPAdapterMixin:
|
356
|
+
"""Mixin for handling IP Adapters."""
|
357
|
+
|
358
|
+
@validate_hf_hub_args
|
359
|
+
def load_ip_adapter(
|
360
|
+
self,
|
361
|
+
pretrained_model_name_or_path_or_dict: Union[str, List[str], Dict[str, torch.Tensor]],
|
362
|
+
subfolder: Union[str, List[str]],
|
363
|
+
weight_name: Union[str, List[str]],
|
364
|
+
**kwargs,
|
365
|
+
):
|
366
|
+
"""
|
367
|
+
Parameters:
|
368
|
+
pretrained_model_name_or_path_or_dict (`str` or `List[str]` or `os.PathLike` or `List[os.PathLike]` or `dict` or `List[dict]`):
|
369
|
+
Can be either:
|
370
|
+
|
371
|
+
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
|
372
|
+
the Hub.
|
373
|
+
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
|
374
|
+
with [`ModelMixin.save_pretrained`].
|
375
|
+
- A [torch state
|
376
|
+
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
|
377
|
+
subfolder (`str` or `List[str]`):
|
378
|
+
The subfolder location of a model file within a larger model repository on the Hub or locally. If a
|
379
|
+
list is passed, it should have the same length as `weight_name`.
|
380
|
+
weight_name (`str` or `List[str]`):
|
381
|
+
The name of the weight file to load. If a list is passed, it should have the same length as
|
382
|
+
`subfolder`.
|
383
|
+
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
384
|
+
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
385
|
+
is not used.
|
386
|
+
force_download (`bool`, *optional*, defaults to `False`):
|
387
|
+
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
388
|
+
cached versions if they exist.
|
389
|
+
|
390
|
+
proxies (`Dict[str, str]`, *optional*):
|
391
|
+
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
392
|
+
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
393
|
+
local_files_only (`bool`, *optional*, defaults to `False`):
|
394
|
+
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
395
|
+
won't be downloaded from the Hub.
|
396
|
+
token (`str` or *bool*, *optional*):
|
397
|
+
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
398
|
+
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
399
|
+
revision (`str`, *optional*, defaults to `"main"`):
|
400
|
+
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
401
|
+
allowed by Git.
|
402
|
+
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
|
403
|
+
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
|
404
|
+
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
|
405
|
+
Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
|
406
|
+
argument to `True` will raise an error.
|
407
|
+
"""
|
408
|
+
|
409
|
+
# handle the list inputs for multiple IP Adapters
|
410
|
+
if not isinstance(weight_name, list):
|
411
|
+
weight_name = [weight_name]
|
412
|
+
|
413
|
+
if not isinstance(pretrained_model_name_or_path_or_dict, list):
|
414
|
+
pretrained_model_name_or_path_or_dict = [pretrained_model_name_or_path_or_dict]
|
415
|
+
if len(pretrained_model_name_or_path_or_dict) == 1:
|
416
|
+
pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict * len(weight_name)
|
417
|
+
|
418
|
+
if not isinstance(subfolder, list):
|
419
|
+
subfolder = [subfolder]
|
420
|
+
if len(subfolder) == 1:
|
421
|
+
subfolder = subfolder * len(weight_name)
|
422
|
+
|
423
|
+
if len(weight_name) != len(pretrained_model_name_or_path_or_dict):
|
424
|
+
raise ValueError("`weight_name` and `pretrained_model_name_or_path_or_dict` must have the same length.")
|
425
|
+
|
426
|
+
if len(weight_name) != len(subfolder):
|
427
|
+
raise ValueError("`weight_name` and `subfolder` must have the same length.")
|
428
|
+
|
429
|
+
# Load the main state dict first.
|
430
|
+
cache_dir = kwargs.pop("cache_dir", None)
|
431
|
+
force_download = kwargs.pop("force_download", False)
|
432
|
+
proxies = kwargs.pop("proxies", None)
|
433
|
+
local_files_only = kwargs.pop("local_files_only", None)
|
434
|
+
token = kwargs.pop("token", None)
|
435
|
+
revision = kwargs.pop("revision", None)
|
436
|
+
low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
|
437
|
+
|
438
|
+
if low_cpu_mem_usage and not is_accelerate_available():
|
439
|
+
low_cpu_mem_usage = False
|
440
|
+
logger.warning(
|
441
|
+
"Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
|
442
|
+
" environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
|
443
|
+
" `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
|
444
|
+
" install accelerate\n```\n."
|
445
|
+
)
|
446
|
+
|
447
|
+
if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
|
448
|
+
raise NotImplementedError(
|
449
|
+
"Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
|
450
|
+
" `low_cpu_mem_usage=False`."
|
451
|
+
)
|
452
|
+
|
453
|
+
user_agent = {
|
454
|
+
"file_type": "attn_procs_weights",
|
455
|
+
"framework": "pytorch",
|
456
|
+
}
|
457
|
+
state_dicts = []
|
458
|
+
for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip(
|
459
|
+
pretrained_model_name_or_path_or_dict, weight_name, subfolder
|
460
|
+
):
|
461
|
+
if not isinstance(pretrained_model_name_or_path_or_dict, dict):
|
462
|
+
model_file = _get_model_file(
|
463
|
+
pretrained_model_name_or_path_or_dict,
|
464
|
+
weights_name=weight_name,
|
465
|
+
cache_dir=cache_dir,
|
466
|
+
force_download=force_download,
|
467
|
+
proxies=proxies,
|
468
|
+
local_files_only=local_files_only,
|
469
|
+
token=token,
|
470
|
+
revision=revision,
|
471
|
+
subfolder=subfolder,
|
472
|
+
user_agent=user_agent,
|
473
|
+
)
|
474
|
+
if weight_name.endswith(".safetensors"):
|
475
|
+
state_dict = {"image_proj": {}, "ip_adapter": {}}
|
476
|
+
with safe_open(model_file, framework="pt", device="cpu") as f:
|
477
|
+
for key in f.keys():
|
478
|
+
if key.startswith("image_proj."):
|
479
|
+
state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
|
480
|
+
elif key.startswith("ip_adapter."):
|
481
|
+
state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
|
482
|
+
else:
|
483
|
+
state_dict = load_state_dict(model_file)
|
484
|
+
else:
|
485
|
+
state_dict = pretrained_model_name_or_path_or_dict
|
486
|
+
|
487
|
+
keys = list(state_dict.keys())
|
488
|
+
if "image_proj" not in keys and "ip_adapter" not in keys:
|
489
|
+
raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
|
490
|
+
|
491
|
+
state_dicts.append(state_dict)
|
492
|
+
|
493
|
+
unet_name = getattr(self, "unet_name", "unet")
|
494
|
+
unet = getattr(self, unet_name)
|
495
|
+
unet._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage)
|
496
|
+
|
497
|
+
extra_loras = unet._load_ip_adapter_loras(state_dicts)
|
498
|
+
if extra_loras != {}:
|
499
|
+
if not USE_PEFT_BACKEND:
|
500
|
+
logger.warning("PEFT backend is required to load these weights.")
|
501
|
+
else:
|
502
|
+
# apply the IP Adapter Face ID LoRA weights
|
503
|
+
peft_config = getattr(unet, "peft_config", {})
|
504
|
+
for k, lora in extra_loras.items():
|
505
|
+
if f"faceid_{k}" not in peft_config:
|
506
|
+
self.load_lora_weights(lora, adapter_name=f"faceid_{k}")
|
507
|
+
self.set_adapters([f"faceid_{k}"], adapter_weights=[1.0])
|
508
|
+
|
509
|
+
def set_ip_adapter_scale(self, scale):
|
510
|
+
"""
|
511
|
+
Set IP-Adapter scales per-transformer block. Input `scale` could be a single config or a list of configs for
|
512
|
+
granular control over each IP-Adapter behavior. A config can be a float or a dictionary.
|
513
|
+
|
514
|
+
Example:
|
515
|
+
|
516
|
+
```py
|
517
|
+
# To use original IP-Adapter
|
518
|
+
scale = 1.0
|
519
|
+
pipeline.set_ip_adapter_scale(scale)
|
520
|
+
|
521
|
+
# To use style block only
|
522
|
+
scale = {
|
523
|
+
"up": {"block_0": [0.0, 1.0, 0.0]},
|
524
|
+
}
|
525
|
+
pipeline.set_ip_adapter_scale(scale)
|
526
|
+
|
527
|
+
# To use style+layout blocks
|
528
|
+
scale = {
|
529
|
+
"down": {"block_2": [0.0, 1.0]},
|
530
|
+
"up": {"block_0": [0.0, 1.0, 0.0]},
|
531
|
+
}
|
532
|
+
pipeline.set_ip_adapter_scale(scale)
|
533
|
+
|
534
|
+
# To use style and layout from 2 reference images
|
535
|
+
scales = [{"down": {"block_2": [0.0, 1.0]}}, {"up": {"block_0": [0.0, 1.0, 0.0]}}]
|
536
|
+
pipeline.set_ip_adapter_scale(scales)
|
537
|
+
```
|
538
|
+
"""
|
539
|
+
unet_name = getattr(self, "unet_name", "unet")
|
540
|
+
unet = getattr(self, unet_name)
|
541
|
+
if not isinstance(scale, list):
|
542
|
+
scale = [scale]
|
543
|
+
scale_configs = _maybe_expand_lora_scales(unet, scale, default_scale=0.0)
|
544
|
+
|
545
|
+
for attn_name, attn_processor in unet.attn_processors.items():
|
546
|
+
if isinstance(
|
547
|
+
attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, IPAdapterXFormersAttnProcessor)
|
548
|
+
):
|
549
|
+
if len(scale_configs) != len(attn_processor.scale):
|
550
|
+
raise ValueError(
|
551
|
+
f"Cannot assign {len(scale_configs)} scale_configs to {len(attn_processor.scale)} IP-Adapter."
|
552
|
+
)
|
553
|
+
elif len(scale_configs) == 1:
|
554
|
+
scale_configs = scale_configs * len(attn_processor.scale)
|
555
|
+
for i, scale_config in enumerate(scale_configs):
|
556
|
+
if isinstance(scale_config, dict):
|
557
|
+
for k, s in scale_config.items():
|
558
|
+
if attn_name.startswith(k):
|
559
|
+
attn_processor.scale[i] = s
|
560
|
+
else:
|
561
|
+
attn_processor.scale[i] = scale_config
|
562
|
+
|
563
|
+
def unload_ip_adapter(self):
|
564
|
+
"""
|
565
|
+
Unloads the IP Adapter weights
|
566
|
+
|
567
|
+
Examples:
|
568
|
+
|
569
|
+
```python
|
570
|
+
>>> # Assuming `pipeline` is already loaded with the IP Adapter weights.
|
571
|
+
>>> pipeline.unload_ip_adapter()
|
572
|
+
>>> ...
|
573
|
+
```
|
574
|
+
"""
|
575
|
+
|
576
|
+
# remove hidden encoder
|
577
|
+
if self.unet is None:
|
578
|
+
return
|
579
|
+
|
580
|
+
self.unet.encoder_hid_proj = None
|
581
|
+
self.unet.config.encoder_hid_dim_type = None
|
582
|
+
|
583
|
+
# Kolors: restore `encoder_hid_proj` with `text_encoder_hid_proj`
|
584
|
+
if hasattr(self.unet, "text_encoder_hid_proj") and self.unet.text_encoder_hid_proj is not None:
|
585
|
+
self.unet.encoder_hid_proj = self.unet.text_encoder_hid_proj
|
586
|
+
self.unet.text_encoder_hid_proj = None
|
587
|
+
self.unet.config.encoder_hid_dim_type = "text_proj"
|
588
|
+
|
589
|
+
# restore original Unet attention processors layers
|
590
|
+
attn_procs = {}
|
591
|
+
for name, value in self.unet.attn_processors.items():
|
592
|
+
attn_processor_class = (
|
593
|
+
AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnProcessor()
|
594
|
+
)
|
595
|
+
attn_procs[name] = (
|
596
|
+
attn_processor_class
|
597
|
+
if isinstance(
|
598
|
+
value, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, IPAdapterXFormersAttnProcessor)
|
599
|
+
)
|
600
|
+
else value.__class__()
|
601
|
+
)
|
602
|
+
self.unet.set_attn_processor(attn_procs)
|
603
|
+
|
604
|
+
|
360
605
|
class FluxIPAdapterMixin:
|
361
606
|
"""Mixin for handling Flux IP Adapters."""
|
362
607
|
|
@@ -465,10 +710,7 @@ class FluxIPAdapterMixin:
|
|
465
710
|
" `low_cpu_mem_usage=False`."
|
466
711
|
)
|
467
712
|
|
468
|
-
user_agent = {
|
469
|
-
"file_type": "attn_procs_weights",
|
470
|
-
"framework": "pytorch",
|
471
|
-
}
|
713
|
+
user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"}
|
472
714
|
state_dicts = []
|
473
715
|
for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip(
|
474
716
|
pretrained_model_name_or_path_or_dict, weight_name, subfolder
|
@@ -526,7 +768,7 @@ class FluxIPAdapterMixin:
|
|
526
768
|
low_cpu_mem_usage=low_cpu_mem_usage,
|
527
769
|
cache_dir=cache_dir,
|
528
770
|
local_files_only=local_files_only,
|
529
|
-
|
771
|
+
torch_dtype=image_encoder_dtype,
|
530
772
|
)
|
531
773
|
.to(self.device)
|
532
774
|
.eval()
|
@@ -623,6 +865,9 @@ class FluxIPAdapterMixin:
|
|
623
865
|
>>> ...
|
624
866
|
```
|
625
867
|
"""
|
868
|
+
# TODO: once the 1.0.0 deprecations are in, we can move the imports to top-level
|
869
|
+
from ..models.transformers.transformer_flux import FluxAttnProcessor, FluxIPAdapterAttnProcessor
|
870
|
+
|
626
871
|
# remove CLIP image encoder
|
627
872
|
if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None:
|
628
873
|
self.image_encoder = None
|
@@ -642,9 +887,9 @@ class FluxIPAdapterMixin:
|
|
642
887
|
# restore original Transformer attention processors layers
|
643
888
|
attn_procs = {}
|
644
889
|
for name, value in self.transformer.attn_processors.items():
|
645
|
-
attn_processor_class =
|
890
|
+
attn_processor_class = FluxAttnProcessor()
|
646
891
|
attn_procs[name] = (
|
647
|
-
attn_processor_class if isinstance(value,
|
892
|
+
attn_processor_class if isinstance(value, FluxIPAdapterAttnProcessor) else value.__class__()
|
648
893
|
)
|
649
894
|
self.transformer.set_attn_processor(attn_procs)
|
650
895
|
|
@@ -750,10 +995,7 @@ class SD3IPAdapterMixin:
|
|
750
995
|
" `low_cpu_mem_usage=False`."
|
751
996
|
)
|
752
997
|
|
753
|
-
user_agent = {
|
754
|
-
"file_type": "attn_procs_weights",
|
755
|
-
"framework": "pytorch",
|
756
|
-
}
|
998
|
+
user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"}
|
757
999
|
|
758
1000
|
if not isinstance(pretrained_model_name_or_path_or_dict, dict):
|
759
1001
|
model_file = _get_model_file(
|