diffusers 0.28.0__tar.gz → 0.28.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {diffusers-0.28.0/src/diffusers.egg-info → diffusers-0.28.1}/PKG-INFO +76 -2
- {diffusers-0.28.0 → diffusers-0.28.1}/setup.py +1 -1
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/__init__.py +9 -1
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/configuration_utils.py +17 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/__init__.py +6 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/activations.py +12 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/attention_processor.py +108 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/embeddings.py +216 -8
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/model_loading_utils.py +28 -0
- diffusers-0.28.1/src/diffusers/models/modeling_outputs.py +31 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/modeling_utils.py +57 -1
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/normalization.py +2 -1
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/transformers/__init__.py +3 -0
- diffusers-0.28.1/src/diffusers/models/transformers/dit_transformer_2d.py +240 -0
- diffusers-0.28.1/src/diffusers/models/transformers/hunyuan_transformer_2d.py +427 -0
- diffusers-0.28.1/src/diffusers/models/transformers/pixart_transformer_2d.py +336 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/transformers/transformer_2d.py +37 -45
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/__init__.py +2 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/dit/pipeline_dit.py +4 -4
- diffusers-0.28.1/src/diffusers/pipelines/hunyuandit/__init__.py +48 -0
- diffusers-0.28.1/src/diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +881 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/pipeline_loading_utils.py +1 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +4 -4
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +2 -2
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dummy_pt_objects.py +45 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dummy_torch_and_transformers_objects.py +15 -0
- {diffusers-0.28.0 → diffusers-0.28.1/src/diffusers.egg-info}/PKG-INFO +76 -2
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers.egg-info/SOURCES.txt +5 -0
- diffusers-0.28.0/src/diffusers/models/modeling_outputs.py +0 -17
- {diffusers-0.28.0 → diffusers-0.28.1}/LICENSE +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/MANIFEST.in +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/README.md +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/pyproject.toml +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/setup.cfg +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/callbacks.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/commands/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/commands/diffusers_cli.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/commands/env.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/commands/fp16_safetensors.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/dependency_versions_check.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/dependency_versions_table.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/experimental/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/experimental/rl/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/experimental/rl/value_guided_sampling.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/image_processor.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/autoencoder.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/controlnet.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/ip_adapter.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/lora.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/lora_conversion_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/peft.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/single_file.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/single_file_model.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/single_file_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/textual_inversion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/unet.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/unet_loader_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/loaders/utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/adapter.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/attention.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/attention_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/autoencoders/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/autoencoders/autoencoder_asym_kl.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/autoencoders/autoencoder_kl.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/autoencoders/autoencoder_tiny.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/autoencoders/consistency_decoder_vae.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/autoencoders/vae.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/controlnet.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/controlnet_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/controlnet_xs.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/downsampling.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/dual_transformer_2d.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/embeddings_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/lora.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/modeling_flax_pytorch_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/modeling_flax_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/modeling_pytorch_flax_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/prior_transformer.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/resnet.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/resnet_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/t5_film_transformer.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/transformer_2d.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/transformer_temporal.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/transformers/dual_transformer_2d.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/transformers/prior_transformer.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/transformers/t5_film_transformer.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/transformers/transformer_temporal.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unet_1d.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unet_1d_blocks.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unet_2d.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unet_2d_blocks.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unet_2d_condition.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_1d.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_1d_blocks.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_2d.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_2d_blocks.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_2d_blocks_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_2d_condition.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_2d_condition_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_3d_blocks.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_3d_condition.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_i2vgen_xl.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_kandinsky3.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_motion_model.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_spatio_temporal_condition.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/unet_stable_cascade.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/unets/uvit_2d.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/upsampling.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/vae_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/models/vq_model.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/optimization.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/amused/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/amused/pipeline_amused.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/amused/pipeline_amused_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/amused/pipeline_amused_inpaint.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/animatediff/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/animatediff/pipeline_animatediff.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/animatediff/pipeline_output.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/audioldm/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/audioldm/pipeline_audioldm.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/audioldm2/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/audioldm2/modeling_audioldm2.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/auto_pipeline.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/blip_diffusion/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/blip_diffusion/blip_image_processing.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/blip_diffusion/modeling_blip2.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/consistency_models/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet/multicontrolnet.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet/pipeline_controlnet.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet_xs/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/dance_diffusion/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/ddim/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/ddim/pipeline_ddim.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/ddpm/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/ddpm/pipeline_ddpm.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deepfloyd_if/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deepfloyd_if/pipeline_output.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deepfloyd_if/safety_checker.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deepfloyd_if/timesteps.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deepfloyd_if/watermark.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/alt_diffusion/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_output.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/pndm/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/repaint/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/spectrogram_diffusion/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/stochastic_karras_ve/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/vq_diffusion/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/dit/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/free_init_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/i2vgen_xl/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky/text_encoder.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky2_2/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky3/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/latent_consistency_models/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/latent_diffusion/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/ledits_pp/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/ledits_pp/pipeline_output.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/marigold/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/marigold/marigold_image_processing.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/musicldm/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/musicldm/pipeline_musicldm.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/onnx_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/paint_by_example/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/paint_by_example/image_encoder.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/pia/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/pia/pipeline_pia.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/pipeline_flax_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/pipeline_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/pixart_alpha/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/shap_e/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/shap_e/camera.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/shap_e/pipeline_shap_e.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/shap_e/renderer.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_cascade/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/clip_image_project_model.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_output.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/safety_checker.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/safety_checker_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_diffedit/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_gligen/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_panorama/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_safe/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_sag/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_xl/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_xl/pipeline_output.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_diffusion_xl/watermark.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_video_diffusion/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/t2i_adapter/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/text_to_video_synthesis/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/unclip/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/unclip/pipeline_unclip.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/unclip/text_proj.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/unidiffuser/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/unidiffuser/modeling_uvit.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/wuerstchen/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/py.typed +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/deprecated/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/deprecated/scheduling_sde_vp.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_amused.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_consistency_decoder.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_consistency_models.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_ddim.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_ddim_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_ddim_inverse.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_ddim_parallel.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_ddpm.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_ddpm_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_ddpm_parallel.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_ddpm_wuerstchen.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_deis_multistep.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_dpmsolver_sde.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_edm_euler.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_euler_discrete.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_euler_discrete_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_heun_discrete.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_ipndm.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_karras_ve_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_lcm.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_lms_discrete.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_lms_discrete_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_pndm.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_pndm_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_repaint.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_sasolver.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_sde_ve.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_sde_ve_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_tcd.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_unclip.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_unipc_multistep.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_utils_flax.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/schedulers/scheduling_vq_diffusion.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/training_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/__init__.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/accelerate_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/constants.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/deprecation_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/doc_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dummy_flax_and_transformers_objects.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dummy_flax_objects.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dummy_note_seq_objects.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dummy_onnx_objects.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dummy_torch_and_librosa_objects.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dummy_torch_and_scipy_objects.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dummy_torch_and_torchsde_objects.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/dynamic_modules_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/export_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/hub_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/import_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/loading_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/logging.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/model_card_template.md +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/outputs.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/peft_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/pil_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/state_dict_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/testing_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/torch_utils.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/utils/versions.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers/video_processor.py +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers.egg-info/dependency_links.txt +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers.egg-info/entry_points.txt +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers.egg-info/requires.txt +0 -0
- {diffusers-0.28.0 → diffusers-0.28.1}/src/diffusers.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: diffusers
|
3
|
-
Version: 0.28.
|
3
|
+
Version: 0.28.1
|
4
4
|
Summary: State-of-the-art diffusion in PyTorch and JAX.
|
5
5
|
Home-page: https://github.com/huggingface/diffusers
|
6
6
|
Author: The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/diffusers/graphs/contributors)
|
@@ -20,14 +20,88 @@ Classifier: Programming Language :: Python :: 3.9
|
|
20
20
|
Classifier: Programming Language :: Python :: 3.10
|
21
21
|
Requires-Python: >=3.8.0
|
22
22
|
Description-Content-Type: text/markdown
|
23
|
+
License-File: LICENSE
|
24
|
+
Requires-Dist: importlib_metadata
|
25
|
+
Requires-Dist: filelock
|
26
|
+
Requires-Dist: huggingface-hub>=0.20.2
|
27
|
+
Requires-Dist: numpy
|
28
|
+
Requires-Dist: regex!=2019.12.17
|
29
|
+
Requires-Dist: requests
|
30
|
+
Requires-Dist: safetensors>=0.3.1
|
31
|
+
Requires-Dist: Pillow
|
23
32
|
Provides-Extra: quality
|
33
|
+
Requires-Dist: urllib3<=2.0.0; extra == "quality"
|
34
|
+
Requires-Dist: isort>=5.5.4; extra == "quality"
|
35
|
+
Requires-Dist: ruff==0.1.5; extra == "quality"
|
36
|
+
Requires-Dist: hf-doc-builder>=0.3.0; extra == "quality"
|
24
37
|
Provides-Extra: docs
|
38
|
+
Requires-Dist: hf-doc-builder>=0.3.0; extra == "docs"
|
25
39
|
Provides-Extra: training
|
40
|
+
Requires-Dist: accelerate>=0.29.3; extra == "training"
|
41
|
+
Requires-Dist: datasets; extra == "training"
|
42
|
+
Requires-Dist: protobuf<4,>=3.20.3; extra == "training"
|
43
|
+
Requires-Dist: tensorboard; extra == "training"
|
44
|
+
Requires-Dist: Jinja2; extra == "training"
|
45
|
+
Requires-Dist: peft>=0.6.0; extra == "training"
|
26
46
|
Provides-Extra: test
|
47
|
+
Requires-Dist: compel==0.1.8; extra == "test"
|
48
|
+
Requires-Dist: GitPython<3.1.19; extra == "test"
|
49
|
+
Requires-Dist: datasets; extra == "test"
|
50
|
+
Requires-Dist: Jinja2; extra == "test"
|
51
|
+
Requires-Dist: invisible-watermark>=0.2.0; extra == "test"
|
52
|
+
Requires-Dist: k-diffusion>=0.0.12; extra == "test"
|
53
|
+
Requires-Dist: librosa; extra == "test"
|
54
|
+
Requires-Dist: parameterized; extra == "test"
|
55
|
+
Requires-Dist: pytest; extra == "test"
|
56
|
+
Requires-Dist: pytest-timeout; extra == "test"
|
57
|
+
Requires-Dist: pytest-xdist; extra == "test"
|
58
|
+
Requires-Dist: requests-mock==1.10.0; extra == "test"
|
59
|
+
Requires-Dist: safetensors>=0.3.1; extra == "test"
|
60
|
+
Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "test"
|
61
|
+
Requires-Dist: scipy; extra == "test"
|
62
|
+
Requires-Dist: torchvision; extra == "test"
|
63
|
+
Requires-Dist: transformers>=4.25.1; extra == "test"
|
27
64
|
Provides-Extra: torch
|
65
|
+
Requires-Dist: torch>=1.4; extra == "torch"
|
66
|
+
Requires-Dist: accelerate>=0.29.3; extra == "torch"
|
28
67
|
Provides-Extra: flax
|
68
|
+
Requires-Dist: jax>=0.4.1; extra == "flax"
|
69
|
+
Requires-Dist: jaxlib>=0.4.1; extra == "flax"
|
70
|
+
Requires-Dist: flax>=0.4.1; extra == "flax"
|
29
71
|
Provides-Extra: dev
|
30
|
-
|
72
|
+
Requires-Dist: urllib3<=2.0.0; extra == "dev"
|
73
|
+
Requires-Dist: isort>=5.5.4; extra == "dev"
|
74
|
+
Requires-Dist: ruff==0.1.5; extra == "dev"
|
75
|
+
Requires-Dist: hf-doc-builder>=0.3.0; extra == "dev"
|
76
|
+
Requires-Dist: compel==0.1.8; extra == "dev"
|
77
|
+
Requires-Dist: GitPython<3.1.19; extra == "dev"
|
78
|
+
Requires-Dist: datasets; extra == "dev"
|
79
|
+
Requires-Dist: Jinja2; extra == "dev"
|
80
|
+
Requires-Dist: invisible-watermark>=0.2.0; extra == "dev"
|
81
|
+
Requires-Dist: k-diffusion>=0.0.12; extra == "dev"
|
82
|
+
Requires-Dist: librosa; extra == "dev"
|
83
|
+
Requires-Dist: parameterized; extra == "dev"
|
84
|
+
Requires-Dist: pytest; extra == "dev"
|
85
|
+
Requires-Dist: pytest-timeout; extra == "dev"
|
86
|
+
Requires-Dist: pytest-xdist; extra == "dev"
|
87
|
+
Requires-Dist: requests-mock==1.10.0; extra == "dev"
|
88
|
+
Requires-Dist: safetensors>=0.3.1; extra == "dev"
|
89
|
+
Requires-Dist: sentencepiece!=0.1.92,>=0.1.91; extra == "dev"
|
90
|
+
Requires-Dist: scipy; extra == "dev"
|
91
|
+
Requires-Dist: torchvision; extra == "dev"
|
92
|
+
Requires-Dist: transformers>=4.25.1; extra == "dev"
|
93
|
+
Requires-Dist: accelerate>=0.29.3; extra == "dev"
|
94
|
+
Requires-Dist: datasets; extra == "dev"
|
95
|
+
Requires-Dist: protobuf<4,>=3.20.3; extra == "dev"
|
96
|
+
Requires-Dist: tensorboard; extra == "dev"
|
97
|
+
Requires-Dist: Jinja2; extra == "dev"
|
98
|
+
Requires-Dist: peft>=0.6.0; extra == "dev"
|
99
|
+
Requires-Dist: hf-doc-builder>=0.3.0; extra == "dev"
|
100
|
+
Requires-Dist: torch>=1.4; extra == "dev"
|
101
|
+
Requires-Dist: accelerate>=0.29.3; extra == "dev"
|
102
|
+
Requires-Dist: jax>=0.4.1; extra == "dev"
|
103
|
+
Requires-Dist: jaxlib>=0.4.1; extra == "dev"
|
104
|
+
Requires-Dist: flax>=0.4.1; extra == "dev"
|
31
105
|
|
32
106
|
<!---
|
33
107
|
Copyright 2022 - The HuggingFace Team. All rights reserved.
|
@@ -254,7 +254,7 @@ version_range_max = max(sys.version_info[1], 10) + 1
|
|
254
254
|
|
255
255
|
setup(
|
256
256
|
name="diffusers",
|
257
|
-
version="0.28.
|
257
|
+
version="0.28.1", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
|
258
258
|
description="State-of-the-art diffusion in PyTorch and JAX.",
|
259
259
|
long_description=open("README.md", "r", encoding="utf-8").read(),
|
260
260
|
long_description_content_type="text/markdown",
|
@@ -1,4 +1,4 @@
|
|
1
|
-
__version__ = "0.28.
|
1
|
+
__version__ = "0.28.1"
|
2
2
|
|
3
3
|
from typing import TYPE_CHECKING
|
4
4
|
|
@@ -82,11 +82,14 @@ else:
|
|
82
82
|
"ConsistencyDecoderVAE",
|
83
83
|
"ControlNetModel",
|
84
84
|
"ControlNetXSAdapter",
|
85
|
+
"DiTTransformer2DModel",
|
86
|
+
"HunyuanDiT2DModel",
|
85
87
|
"I2VGenXLUNet",
|
86
88
|
"Kandinsky3UNet",
|
87
89
|
"ModelMixin",
|
88
90
|
"MotionAdapter",
|
89
91
|
"MultiAdapter",
|
92
|
+
"PixArtTransformer2DModel",
|
90
93
|
"PriorTransformer",
|
91
94
|
"StableCascadeUNet",
|
92
95
|
"T2IAdapter",
|
@@ -227,6 +230,7 @@ else:
|
|
227
230
|
"BlipDiffusionPipeline",
|
228
231
|
"CLIPImageProjection",
|
229
232
|
"CycleDiffusionPipeline",
|
233
|
+
"HunyuanDiTPipeline",
|
230
234
|
"I2VGenXLPipeline",
|
231
235
|
"IFImg2ImgPipeline",
|
232
236
|
"IFImg2ImgSuperResolutionPipeline",
|
@@ -484,11 +488,14 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
|
484
488
|
ConsistencyDecoderVAE,
|
485
489
|
ControlNetModel,
|
486
490
|
ControlNetXSAdapter,
|
491
|
+
DiTTransformer2DModel,
|
492
|
+
HunyuanDiT2DModel,
|
487
493
|
I2VGenXLUNet,
|
488
494
|
Kandinsky3UNet,
|
489
495
|
ModelMixin,
|
490
496
|
MotionAdapter,
|
491
497
|
MultiAdapter,
|
498
|
+
PixArtTransformer2DModel,
|
492
499
|
PriorTransformer,
|
493
500
|
T2IAdapter,
|
494
501
|
T5FilmDecoder,
|
@@ -607,6 +614,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
|
607
614
|
AudioLDMPipeline,
|
608
615
|
CLIPImageProjection,
|
609
616
|
CycleDiffusionPipeline,
|
617
|
+
HunyuanDiTPipeline,
|
610
618
|
I2VGenXLPipeline,
|
611
619
|
IFImg2ImgPipeline,
|
612
620
|
IFImg2ImgSuperResolutionPipeline,
|
@@ -706,3 +706,20 @@ def flax_register_to_config(cls):
|
|
706
706
|
|
707
707
|
cls.__init__ = init
|
708
708
|
return cls
|
709
|
+
|
710
|
+
|
711
|
+
class LegacyConfigMixin(ConfigMixin):
|
712
|
+
r"""
|
713
|
+
A subclass of `ConfigMixin` to resolve class mapping from legacy classes (like `Transformer2DModel`) to more
|
714
|
+
pipeline-specific classes (like `DiTTransformer2DModel`).
|
715
|
+
"""
|
716
|
+
|
717
|
+
@classmethod
|
718
|
+
def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):
|
719
|
+
# To prevent depedency import problem.
|
720
|
+
from .models.model_loading_utils import _fetch_remapped_cls_from_config
|
721
|
+
|
722
|
+
# resolve remapping
|
723
|
+
remapped_class = _fetch_remapped_cls_from_config(config, cls)
|
724
|
+
|
725
|
+
return remapped_class.from_config(config, return_unused_kwargs, **kwargs)
|
@@ -36,6 +36,9 @@ if is_torch_available():
|
|
36
36
|
_import_structure["dual_transformer_2d"] = ["DualTransformer2DModel"]
|
37
37
|
_import_structure["embeddings"] = ["ImageProjection"]
|
38
38
|
_import_structure["modeling_utils"] = ["ModelMixin"]
|
39
|
+
_import_structure["transformers.dit_transformer_2d"] = ["DiTTransformer2DModel"]
|
40
|
+
_import_structure["transformers.hunyuan_transformer_2d"] = ["HunyuanDiT2DModel"]
|
41
|
+
_import_structure["transformers.pixart_transformer_2d"] = ["PixArtTransformer2DModel"]
|
39
42
|
_import_structure["transformers.prior_transformer"] = ["PriorTransformer"]
|
40
43
|
_import_structure["transformers.t5_film_transformer"] = ["T5FilmDecoder"]
|
41
44
|
_import_structure["transformers.transformer_2d"] = ["Transformer2DModel"]
|
@@ -73,7 +76,10 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
|
73
76
|
from .embeddings import ImageProjection
|
74
77
|
from .modeling_utils import ModelMixin
|
75
78
|
from .transformers import (
|
79
|
+
DiTTransformer2DModel,
|
76
80
|
DualTransformer2DModel,
|
81
|
+
HunyuanDiT2DModel,
|
82
|
+
PixArtTransformer2DModel,
|
77
83
|
PriorTransformer,
|
78
84
|
T5FilmDecoder,
|
79
85
|
Transformer2DModel,
|
@@ -50,6 +50,18 @@ def get_activation(act_fn: str) -> nn.Module:
|
|
50
50
|
raise ValueError(f"Unsupported activation function: {act_fn}")
|
51
51
|
|
52
52
|
|
53
|
+
class FP32SiLU(nn.Module):
|
54
|
+
r"""
|
55
|
+
SiLU activation function with input upcasted to torch.float32.
|
56
|
+
"""
|
57
|
+
|
58
|
+
def __init__(self):
|
59
|
+
super().__init__()
|
60
|
+
|
61
|
+
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
62
|
+
return F.silu(inputs.float(), inplace=False).to(inputs.dtype)
|
63
|
+
|
64
|
+
|
53
65
|
class GELU(nn.Module):
|
54
66
|
r"""
|
55
67
|
GELU activation function with tanh approximation support with `approximate="tanh"`.
|
@@ -103,6 +103,7 @@ class Attention(nn.Module):
|
|
103
103
|
upcast_softmax: bool = False,
|
104
104
|
cross_attention_norm: Optional[str] = None,
|
105
105
|
cross_attention_norm_num_groups: int = 32,
|
106
|
+
qk_norm: Optional[str] = None,
|
106
107
|
added_kv_proj_dim: Optional[int] = None,
|
107
108
|
norm_num_groups: Optional[int] = None,
|
108
109
|
spatial_norm_dim: Optional[int] = None,
|
@@ -161,6 +162,15 @@ class Attention(nn.Module):
|
|
161
162
|
else:
|
162
163
|
self.spatial_norm = None
|
163
164
|
|
165
|
+
if qk_norm is None:
|
166
|
+
self.norm_q = None
|
167
|
+
self.norm_k = None
|
168
|
+
elif qk_norm == "layer_norm":
|
169
|
+
self.norm_q = nn.LayerNorm(dim_head, eps=eps)
|
170
|
+
self.norm_k = nn.LayerNorm(dim_head, eps=eps)
|
171
|
+
else:
|
172
|
+
raise ValueError(f"unknown qk_norm: {qk_norm}. Should be None or 'layer_norm'")
|
173
|
+
|
164
174
|
if cross_attention_norm is None:
|
165
175
|
self.norm_cross = None
|
166
176
|
elif cross_attention_norm == "layer_norm":
|
@@ -1426,6 +1436,104 @@ class AttnProcessor2_0:
|
|
1426
1436
|
return hidden_states
|
1427
1437
|
|
1428
1438
|
|
1439
|
+
class HunyuanAttnProcessor2_0:
|
1440
|
+
r"""
|
1441
|
+
Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). This is
|
1442
|
+
used in the HunyuanDiT model. It applies a s normalization layer and rotary embedding on query and key vector.
|
1443
|
+
"""
|
1444
|
+
|
1445
|
+
def __init__(self):
|
1446
|
+
if not hasattr(F, "scaled_dot_product_attention"):
|
1447
|
+
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
1448
|
+
|
1449
|
+
def __call__(
|
1450
|
+
self,
|
1451
|
+
attn: Attention,
|
1452
|
+
hidden_states: torch.Tensor,
|
1453
|
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
1454
|
+
attention_mask: Optional[torch.Tensor] = None,
|
1455
|
+
temb: Optional[torch.Tensor] = None,
|
1456
|
+
image_rotary_emb: Optional[torch.Tensor] = None,
|
1457
|
+
) -> torch.Tensor:
|
1458
|
+
from .embeddings import apply_rotary_emb
|
1459
|
+
|
1460
|
+
residual = hidden_states
|
1461
|
+
if attn.spatial_norm is not None:
|
1462
|
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
1463
|
+
|
1464
|
+
input_ndim = hidden_states.ndim
|
1465
|
+
|
1466
|
+
if input_ndim == 4:
|
1467
|
+
batch_size, channel, height, width = hidden_states.shape
|
1468
|
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
1469
|
+
|
1470
|
+
batch_size, sequence_length, _ = (
|
1471
|
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
1472
|
+
)
|
1473
|
+
|
1474
|
+
if attention_mask is not None:
|
1475
|
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
1476
|
+
# scaled_dot_product_attention expects attention_mask shape to be
|
1477
|
+
# (batch, heads, source_length, target_length)
|
1478
|
+
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
|
1479
|
+
|
1480
|
+
if attn.group_norm is not None:
|
1481
|
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
1482
|
+
|
1483
|
+
query = attn.to_q(hidden_states)
|
1484
|
+
|
1485
|
+
if encoder_hidden_states is None:
|
1486
|
+
encoder_hidden_states = hidden_states
|
1487
|
+
elif attn.norm_cross:
|
1488
|
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
1489
|
+
|
1490
|
+
key = attn.to_k(encoder_hidden_states)
|
1491
|
+
value = attn.to_v(encoder_hidden_states)
|
1492
|
+
|
1493
|
+
inner_dim = key.shape[-1]
|
1494
|
+
head_dim = inner_dim // attn.heads
|
1495
|
+
|
1496
|
+
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
1497
|
+
|
1498
|
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
1499
|
+
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
1500
|
+
|
1501
|
+
if attn.norm_q is not None:
|
1502
|
+
query = attn.norm_q(query)
|
1503
|
+
if attn.norm_k is not None:
|
1504
|
+
key = attn.norm_k(key)
|
1505
|
+
|
1506
|
+
# Apply RoPE if needed
|
1507
|
+
if image_rotary_emb is not None:
|
1508
|
+
query = apply_rotary_emb(query, image_rotary_emb)
|
1509
|
+
if not attn.is_cross_attention:
|
1510
|
+
key = apply_rotary_emb(key, image_rotary_emb)
|
1511
|
+
|
1512
|
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
1513
|
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
1514
|
+
hidden_states = F.scaled_dot_product_attention(
|
1515
|
+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
|
1516
|
+
)
|
1517
|
+
|
1518
|
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
1519
|
+
hidden_states = hidden_states.to(query.dtype)
|
1520
|
+
|
1521
|
+
# linear proj
|
1522
|
+
hidden_states = attn.to_out[0](hidden_states)
|
1523
|
+
# dropout
|
1524
|
+
hidden_states = attn.to_out[1](hidden_states)
|
1525
|
+
|
1526
|
+
if input_ndim == 4:
|
1527
|
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
1528
|
+
|
1529
|
+
if attn.residual_connection:
|
1530
|
+
hidden_states = hidden_states + residual
|
1531
|
+
|
1532
|
+
hidden_states = hidden_states / attn.rescale_output_factor
|
1533
|
+
|
1534
|
+
return hidden_states
|
1535
|
+
|
1536
|
+
|
1429
1537
|
class FusedAttnProcessor2_0:
|
1430
1538
|
r"""
|
1431
1539
|
Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). It uses
|
@@ -16,10 +16,11 @@ from typing import List, Optional, Tuple, Union
|
|
16
16
|
|
17
17
|
import numpy as np
|
18
18
|
import torch
|
19
|
+
import torch.nn.functional as F
|
19
20
|
from torch import nn
|
20
21
|
|
21
22
|
from ..utils import deprecate
|
22
|
-
from .activations import get_activation
|
23
|
+
from .activations import FP32SiLU, get_activation
|
23
24
|
from .attention_processor import Attention
|
24
25
|
|
25
26
|
|
@@ -135,6 +136,7 @@ class PatchEmbed(nn.Module):
|
|
135
136
|
flatten=True,
|
136
137
|
bias=True,
|
137
138
|
interpolation_scale=1,
|
139
|
+
pos_embed_type="sincos",
|
138
140
|
):
|
139
141
|
super().__init__()
|
140
142
|
|
@@ -156,10 +158,18 @@ class PatchEmbed(nn.Module):
|
|
156
158
|
self.height, self.width = height // patch_size, width // patch_size
|
157
159
|
self.base_size = height // patch_size
|
158
160
|
self.interpolation_scale = interpolation_scale
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
161
|
+
if pos_embed_type is None:
|
162
|
+
self.pos_embed = None
|
163
|
+
elif pos_embed_type == "sincos":
|
164
|
+
pos_embed = get_2d_sincos_pos_embed(
|
165
|
+
embed_dim,
|
166
|
+
int(num_patches**0.5),
|
167
|
+
base_size=self.base_size,
|
168
|
+
interpolation_scale=self.interpolation_scale,
|
169
|
+
)
|
170
|
+
self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False)
|
171
|
+
else:
|
172
|
+
raise ValueError(f"Unsupported pos_embed_type: {pos_embed_type}")
|
163
173
|
|
164
174
|
def forward(self, latent):
|
165
175
|
height, width = latent.shape[-2] // self.patch_size, latent.shape[-1] // self.patch_size
|
@@ -169,6 +179,8 @@ class PatchEmbed(nn.Module):
|
|
169
179
|
latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC
|
170
180
|
if self.layer_norm:
|
171
181
|
latent = self.norm(latent)
|
182
|
+
if self.pos_embed is None:
|
183
|
+
return latent.to(latent.dtype)
|
172
184
|
|
173
185
|
# Interpolate positional embeddings if needed.
|
174
186
|
# (For PixArt-Alpha: https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L162C151-L162C160)
|
@@ -187,6 +199,113 @@ class PatchEmbed(nn.Module):
|
|
187
199
|
return (latent + pos_embed).to(latent.dtype)
|
188
200
|
|
189
201
|
|
202
|
+
def get_2d_rotary_pos_embed(embed_dim, crops_coords, grid_size, use_real=True):
|
203
|
+
"""
|
204
|
+
RoPE for image tokens with 2d structure.
|
205
|
+
|
206
|
+
Args:
|
207
|
+
embed_dim: (`int`):
|
208
|
+
The embedding dimension size
|
209
|
+
crops_coords (`Tuple[int]`)
|
210
|
+
The top-left and bottom-right coordinates of the crop.
|
211
|
+
grid_size (`Tuple[int]`):
|
212
|
+
The grid size of the positional embedding.
|
213
|
+
use_real (`bool`):
|
214
|
+
If True, return real part and imaginary part separately. Otherwise, return complex numbers.
|
215
|
+
|
216
|
+
Returns:
|
217
|
+
`torch.Tensor`: positional embdding with shape `( grid_size * grid_size, embed_dim/2)`.
|
218
|
+
"""
|
219
|
+
start, stop = crops_coords
|
220
|
+
grid_h = np.linspace(start[0], stop[0], grid_size[0], endpoint=False, dtype=np.float32)
|
221
|
+
grid_w = np.linspace(start[1], stop[1], grid_size[1], endpoint=False, dtype=np.float32)
|
222
|
+
grid = np.meshgrid(grid_w, grid_h) # here w goes first
|
223
|
+
grid = np.stack(grid, axis=0) # [2, W, H]
|
224
|
+
|
225
|
+
grid = grid.reshape([2, 1, *grid.shape[1:]])
|
226
|
+
pos_embed = get_2d_rotary_pos_embed_from_grid(embed_dim, grid, use_real=use_real)
|
227
|
+
return pos_embed
|
228
|
+
|
229
|
+
|
230
|
+
def get_2d_rotary_pos_embed_from_grid(embed_dim, grid, use_real=False):
|
231
|
+
assert embed_dim % 4 == 0
|
232
|
+
|
233
|
+
# use half of dimensions to encode grid_h
|
234
|
+
emb_h = get_1d_rotary_pos_embed(embed_dim // 2, grid[0].reshape(-1), use_real=use_real) # (H*W, D/4)
|
235
|
+
emb_w = get_1d_rotary_pos_embed(embed_dim // 2, grid[1].reshape(-1), use_real=use_real) # (H*W, D/4)
|
236
|
+
|
237
|
+
if use_real:
|
238
|
+
cos = torch.cat([emb_h[0], emb_w[0]], dim=1) # (H*W, D/2)
|
239
|
+
sin = torch.cat([emb_h[1], emb_w[1]], dim=1) # (H*W, D/2)
|
240
|
+
return cos, sin
|
241
|
+
else:
|
242
|
+
emb = torch.cat([emb_h, emb_w], dim=1) # (H*W, D/2)
|
243
|
+
return emb
|
244
|
+
|
245
|
+
|
246
|
+
def get_1d_rotary_pos_embed(dim: int, pos: Union[np.ndarray, int], theta: float = 10000.0, use_real=False):
|
247
|
+
"""
|
248
|
+
Precompute the frequency tensor for complex exponentials (cis) with given dimensions.
|
249
|
+
|
250
|
+
This function calculates a frequency tensor with complex exponentials using the given dimension 'dim' and the end
|
251
|
+
index 'end'. The 'theta' parameter scales the frequencies. The returned tensor contains complex values in complex64
|
252
|
+
data type.
|
253
|
+
|
254
|
+
Args:
|
255
|
+
dim (`int`): Dimension of the frequency tensor.
|
256
|
+
pos (`np.ndarray` or `int`): Position indices for the frequency tensor. [S] or scalar
|
257
|
+
theta (`float`, *optional*, defaults to 10000.0):
|
258
|
+
Scaling factor for frequency computation. Defaults to 10000.0.
|
259
|
+
use_real (`bool`, *optional*):
|
260
|
+
If True, return real part and imaginary part separately. Otherwise, return complex numbers.
|
261
|
+
|
262
|
+
Returns:
|
263
|
+
`torch.Tensor`: Precomputed frequency tensor with complex exponentials. [S, D/2]
|
264
|
+
"""
|
265
|
+
if isinstance(pos, int):
|
266
|
+
pos = np.arange(pos)
|
267
|
+
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim)) # [D/2]
|
268
|
+
t = torch.from_numpy(pos).to(freqs.device) # type: ignore # [S]
|
269
|
+
freqs = torch.outer(t, freqs).float() # type: ignore # [S, D/2]
|
270
|
+
if use_real:
|
271
|
+
freqs_cos = freqs.cos().repeat_interleave(2, dim=1) # [S, D]
|
272
|
+
freqs_sin = freqs.sin().repeat_interleave(2, dim=1) # [S, D]
|
273
|
+
return freqs_cos, freqs_sin
|
274
|
+
else:
|
275
|
+
freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64 # [S, D/2]
|
276
|
+
return freqs_cis
|
277
|
+
|
278
|
+
|
279
|
+
def apply_rotary_emb(
|
280
|
+
x: torch.Tensor,
|
281
|
+
freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]],
|
282
|
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
283
|
+
"""
|
284
|
+
Apply rotary embeddings to input tensors using the given frequency tensor. This function applies rotary embeddings
|
285
|
+
to the given query or key 'x' tensors using the provided frequency tensor 'freqs_cis'. The input tensors are
|
286
|
+
reshaped as complex numbers, and the frequency tensor is reshaped for broadcasting compatibility. The resulting
|
287
|
+
tensors contain rotary embeddings and are returned as real tensors.
|
288
|
+
|
289
|
+
Args:
|
290
|
+
x (`torch.Tensor`):
|
291
|
+
Query or key tensor to apply rotary embeddings. [B, H, S, D] xk (torch.Tensor): Key tensor to apply
|
292
|
+
freqs_cis (`Tuple[torch.Tensor]`): Precomputed frequency tensor for complex exponentials. ([S, D], [S, D],)
|
293
|
+
|
294
|
+
Returns:
|
295
|
+
Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings.
|
296
|
+
"""
|
297
|
+
cos, sin = freqs_cis # [S, D]
|
298
|
+
cos = cos[None, None]
|
299
|
+
sin = sin[None, None]
|
300
|
+
cos, sin = cos.to(x.device), sin.to(x.device)
|
301
|
+
|
302
|
+
x_real, x_imag = x.reshape(*x.shape[:-1], -1, 2).unbind(-1) # [B, S, H, D//2]
|
303
|
+
x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3)
|
304
|
+
out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype)
|
305
|
+
|
306
|
+
return out
|
307
|
+
|
308
|
+
|
190
309
|
class TimestepEmbedding(nn.Module):
|
191
310
|
def __init__(
|
192
311
|
self,
|
@@ -507,6 +626,88 @@ class CombinedTimestepLabelEmbeddings(nn.Module):
|
|
507
626
|
return conditioning
|
508
627
|
|
509
628
|
|
629
|
+
class HunyuanDiTAttentionPool(nn.Module):
|
630
|
+
# Copied from https://github.com/Tencent/HunyuanDiT/blob/cb709308d92e6c7e8d59d0dff41b74d35088db6a/hydit/modules/poolers.py#L6
|
631
|
+
|
632
|
+
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
|
633
|
+
super().__init__()
|
634
|
+
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim + 1, embed_dim) / embed_dim**0.5)
|
635
|
+
self.k_proj = nn.Linear(embed_dim, embed_dim)
|
636
|
+
self.q_proj = nn.Linear(embed_dim, embed_dim)
|
637
|
+
self.v_proj = nn.Linear(embed_dim, embed_dim)
|
638
|
+
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
|
639
|
+
self.num_heads = num_heads
|
640
|
+
|
641
|
+
def forward(self, x):
|
642
|
+
x = x.permute(1, 0, 2) # NLC -> LNC
|
643
|
+
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (L+1)NC
|
644
|
+
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (L+1)NC
|
645
|
+
x, _ = F.multi_head_attention_forward(
|
646
|
+
query=x[:1],
|
647
|
+
key=x,
|
648
|
+
value=x,
|
649
|
+
embed_dim_to_check=x.shape[-1],
|
650
|
+
num_heads=self.num_heads,
|
651
|
+
q_proj_weight=self.q_proj.weight,
|
652
|
+
k_proj_weight=self.k_proj.weight,
|
653
|
+
v_proj_weight=self.v_proj.weight,
|
654
|
+
in_proj_weight=None,
|
655
|
+
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
|
656
|
+
bias_k=None,
|
657
|
+
bias_v=None,
|
658
|
+
add_zero_attn=False,
|
659
|
+
dropout_p=0,
|
660
|
+
out_proj_weight=self.c_proj.weight,
|
661
|
+
out_proj_bias=self.c_proj.bias,
|
662
|
+
use_separate_proj_weight=True,
|
663
|
+
training=self.training,
|
664
|
+
need_weights=False,
|
665
|
+
)
|
666
|
+
return x.squeeze(0)
|
667
|
+
|
668
|
+
|
669
|
+
class HunyuanCombinedTimestepTextSizeStyleEmbedding(nn.Module):
|
670
|
+
def __init__(self, embedding_dim, pooled_projection_dim=1024, seq_len=256, cross_attention_dim=2048):
|
671
|
+
super().__init__()
|
672
|
+
|
673
|
+
self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
|
674
|
+
self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
|
675
|
+
|
676
|
+
self.pooler = HunyuanDiTAttentionPool(
|
677
|
+
seq_len, cross_attention_dim, num_heads=8, output_dim=pooled_projection_dim
|
678
|
+
)
|
679
|
+
# Here we use a default learned embedder layer for future extension.
|
680
|
+
self.style_embedder = nn.Embedding(1, embedding_dim)
|
681
|
+
extra_in_dim = 256 * 6 + embedding_dim + pooled_projection_dim
|
682
|
+
self.extra_embedder = PixArtAlphaTextProjection(
|
683
|
+
in_features=extra_in_dim,
|
684
|
+
hidden_size=embedding_dim * 4,
|
685
|
+
out_features=embedding_dim,
|
686
|
+
act_fn="silu_fp32",
|
687
|
+
)
|
688
|
+
|
689
|
+
def forward(self, timestep, encoder_hidden_states, image_meta_size, style, hidden_dtype=None):
|
690
|
+
timesteps_proj = self.time_proj(timestep)
|
691
|
+
timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, 256)
|
692
|
+
|
693
|
+
# extra condition1: text
|
694
|
+
pooled_projections = self.pooler(encoder_hidden_states) # (N, 1024)
|
695
|
+
|
696
|
+
# extra condition2: image meta size embdding
|
697
|
+
image_meta_size = get_timestep_embedding(image_meta_size.view(-1), 256, True, 0)
|
698
|
+
image_meta_size = image_meta_size.to(dtype=hidden_dtype)
|
699
|
+
image_meta_size = image_meta_size.view(-1, 6 * 256) # (N, 1536)
|
700
|
+
|
701
|
+
# extra condition3: style embedding
|
702
|
+
style_embedding = self.style_embedder(style) # (N, embedding_dim)
|
703
|
+
|
704
|
+
# Concatenate all extra vectors
|
705
|
+
extra_cond = torch.cat([pooled_projections, image_meta_size, style_embedding], dim=1)
|
706
|
+
conditioning = timesteps_emb + self.extra_embedder(extra_cond) # [B, D]
|
707
|
+
|
708
|
+
return conditioning
|
709
|
+
|
710
|
+
|
510
711
|
class TextTimeEmbedding(nn.Module):
|
511
712
|
def __init__(self, encoder_dim: int, time_embed_dim: int, num_heads: int = 64):
|
512
713
|
super().__init__()
|
@@ -793,11 +994,18 @@ class PixArtAlphaTextProjection(nn.Module):
|
|
793
994
|
Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py
|
794
995
|
"""
|
795
996
|
|
796
|
-
def __init__(self, in_features, hidden_size,
|
997
|
+
def __init__(self, in_features, hidden_size, out_features=None, act_fn="gelu_tanh"):
|
797
998
|
super().__init__()
|
999
|
+
if out_features is None:
|
1000
|
+
out_features = hidden_size
|
798
1001
|
self.linear_1 = nn.Linear(in_features=in_features, out_features=hidden_size, bias=True)
|
799
|
-
|
800
|
-
|
1002
|
+
if act_fn == "gelu_tanh":
|
1003
|
+
self.act_1 = nn.GELU(approximate="tanh")
|
1004
|
+
elif act_fn == "silu_fp32":
|
1005
|
+
self.act_1 = FP32SiLU()
|
1006
|
+
else:
|
1007
|
+
raise ValueError(f"Unknown activation function: {act_fn}")
|
1008
|
+
self.linear_2 = nn.Linear(in_features=hidden_size, out_features=out_features, bias=True)
|
801
1009
|
|
802
1010
|
def forward(self, caption):
|
803
1011
|
hidden_states = self.linear_1(caption)
|
@@ -14,6 +14,7 @@
|
|
14
14
|
# See the License for the specific language governing permissions and
|
15
15
|
# limitations under the License.
|
16
16
|
|
17
|
+
import importlib
|
17
18
|
import inspect
|
18
19
|
import os
|
19
20
|
from collections import OrderedDict
|
@@ -32,6 +33,13 @@ from ..utils import (
|
|
32
33
|
|
33
34
|
logger = logging.get_logger(__name__)
|
34
35
|
|
36
|
+
_CLASS_REMAPPING_DICT = {
|
37
|
+
"Transformer2DModel": {
|
38
|
+
"ada_norm_zero": "DiTTransformer2DModel",
|
39
|
+
"ada_norm_single": "PixArtTransformer2DModel",
|
40
|
+
}
|
41
|
+
}
|
42
|
+
|
35
43
|
|
36
44
|
if is_accelerate_available():
|
37
45
|
from accelerate import infer_auto_device_map
|
@@ -61,6 +69,26 @@ def _determine_device_map(model: torch.nn.Module, device_map, max_memory, torch_
|
|
61
69
|
return device_map
|
62
70
|
|
63
71
|
|
72
|
+
def _fetch_remapped_cls_from_config(config, old_class):
|
73
|
+
previous_class_name = old_class.__name__
|
74
|
+
remapped_class_name = _CLASS_REMAPPING_DICT.get(previous_class_name).get(config["norm_type"], None)
|
75
|
+
|
76
|
+
# Details:
|
77
|
+
# https://github.com/huggingface/diffusers/pull/7647#discussion_r1621344818
|
78
|
+
if remapped_class_name:
|
79
|
+
# load diffusers library to import compatible and original scheduler
|
80
|
+
diffusers_library = importlib.import_module(__name__.split(".")[0])
|
81
|
+
remapped_class = getattr(diffusers_library, remapped_class_name)
|
82
|
+
logger.info(
|
83
|
+
f"Changing class object to be of `{remapped_class_name}` type from `{previous_class_name}` type."
|
84
|
+
f"This is because `{previous_class_name}` is scheduled to be deprecated in a future version. Note that this"
|
85
|
+
" DOESN'T affect the final results."
|
86
|
+
)
|
87
|
+
return remapped_class
|
88
|
+
else:
|
89
|
+
return old_class
|
90
|
+
|
91
|
+
|
64
92
|
def load_state_dict(checkpoint_file: Union[str, os.PathLike], variant: Optional[str] = None):
|
65
93
|
"""
|
66
94
|
Reads a checkpoint file, returning properly formatted errors if they arise.
|