diffusers 0.26.3__py3-none-any.whl → 0.27.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- diffusers/__init__.py +20 -1
- diffusers/commands/__init__.py +1 -1
- diffusers/commands/diffusers_cli.py +1 -1
- diffusers/commands/env.py +1 -1
- diffusers/commands/fp16_safetensors.py +1 -1
- diffusers/configuration_utils.py +7 -3
- diffusers/dependency_versions_check.py +1 -1
- diffusers/dependency_versions_table.py +2 -2
- diffusers/experimental/rl/value_guided_sampling.py +1 -1
- diffusers/image_processor.py +110 -4
- diffusers/loaders/autoencoder.py +7 -8
- diffusers/loaders/controlnet.py +17 -8
- diffusers/loaders/ip_adapter.py +86 -23
- diffusers/loaders/lora.py +105 -310
- diffusers/loaders/lora_conversion_utils.py +1 -1
- diffusers/loaders/peft.py +1 -1
- diffusers/loaders/single_file.py +51 -12
- diffusers/loaders/single_file_utils.py +274 -49
- diffusers/loaders/textual_inversion.py +23 -4
- diffusers/loaders/unet.py +195 -41
- diffusers/loaders/utils.py +1 -1
- diffusers/models/__init__.py +3 -1
- diffusers/models/activations.py +9 -9
- diffusers/models/attention.py +26 -36
- diffusers/models/attention_flax.py +1 -1
- diffusers/models/attention_processor.py +171 -114
- diffusers/models/autoencoders/autoencoder_asym_kl.py +1 -1
- diffusers/models/autoencoders/autoencoder_kl.py +3 -1
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +1 -1
- diffusers/models/autoencoders/autoencoder_tiny.py +4 -2
- diffusers/models/autoencoders/consistency_decoder_vae.py +1 -1
- diffusers/models/autoencoders/vae.py +1 -1
- diffusers/models/controlnet.py +1 -1
- diffusers/models/controlnet_flax.py +1 -1
- diffusers/models/downsampling.py +8 -12
- diffusers/models/dual_transformer_2d.py +1 -1
- diffusers/models/embeddings.py +3 -4
- diffusers/models/embeddings_flax.py +1 -1
- diffusers/models/lora.py +33 -10
- diffusers/models/modeling_flax_pytorch_utils.py +1 -1
- diffusers/models/modeling_flax_utils.py +1 -1
- diffusers/models/modeling_pytorch_flax_utils.py +1 -1
- diffusers/models/modeling_utils.py +4 -6
- diffusers/models/normalization.py +1 -1
- diffusers/models/resnet.py +31 -58
- diffusers/models/resnet_flax.py +1 -1
- diffusers/models/t5_film_transformer.py +1 -1
- diffusers/models/transformer_2d.py +1 -1
- diffusers/models/transformer_temporal.py +1 -1
- diffusers/models/transformers/dual_transformer_2d.py +1 -1
- diffusers/models/transformers/t5_film_transformer.py +1 -1
- diffusers/models/transformers/transformer_2d.py +29 -31
- diffusers/models/transformers/transformer_temporal.py +1 -1
- diffusers/models/unet_1d.py +1 -1
- diffusers/models/unet_1d_blocks.py +1 -1
- diffusers/models/unet_2d.py +1 -1
- diffusers/models/unet_2d_blocks.py +1 -1
- diffusers/models/unet_2d_condition.py +1 -1
- diffusers/models/unets/__init__.py +1 -0
- diffusers/models/unets/unet_1d.py +1 -1
- diffusers/models/unets/unet_1d_blocks.py +1 -1
- diffusers/models/unets/unet_2d.py +4 -4
- diffusers/models/unets/unet_2d_blocks.py +238 -98
- diffusers/models/unets/unet_2d_blocks_flax.py +1 -1
- diffusers/models/unets/unet_2d_condition.py +420 -323
- diffusers/models/unets/unet_2d_condition_flax.py +21 -12
- diffusers/models/unets/unet_3d_blocks.py +50 -40
- diffusers/models/unets/unet_3d_condition.py +47 -8
- diffusers/models/unets/unet_i2vgen_xl.py +75 -30
- diffusers/models/unets/unet_kandinsky3.py +1 -1
- diffusers/models/unets/unet_motion_model.py +48 -8
- diffusers/models/unets/unet_spatio_temporal_condition.py +1 -1
- diffusers/models/unets/unet_stable_cascade.py +610 -0
- diffusers/models/unets/uvit_2d.py +1 -1
- diffusers/models/upsampling.py +10 -16
- diffusers/models/vae_flax.py +1 -1
- diffusers/models/vq_model.py +1 -1
- diffusers/optimization.py +1 -1
- diffusers/pipelines/__init__.py +26 -0
- diffusers/pipelines/amused/pipeline_amused.py +1 -1
- diffusers/pipelines/amused/pipeline_amused_img2img.py +1 -1
- diffusers/pipelines/amused/pipeline_amused_inpaint.py +1 -1
- diffusers/pipelines/animatediff/pipeline_animatediff.py +162 -417
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +165 -137
- diffusers/pipelines/animatediff/pipeline_output.py +7 -6
- diffusers/pipelines/audioldm/pipeline_audioldm.py +3 -19
- diffusers/pipelines/audioldm2/modeling_audioldm2.py +1 -1
- diffusers/pipelines/audioldm2/pipeline_audioldm2.py +3 -3
- diffusers/pipelines/auto_pipeline.py +7 -16
- diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
- diffusers/pipelines/blip_diffusion/modeling_blip2.py +1 -1
- diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +2 -2
- diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
- diffusers/pipelines/consistency_models/pipeline_consistency_models.py +1 -1
- diffusers/pipelines/controlnet/pipeline_controlnet.py +90 -90
- diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +98 -90
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +92 -90
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +145 -70
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +126 -89
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +108 -96
- diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
- diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +1 -1
- diffusers/pipelines/ddim/pipeline_ddim.py +1 -1
- diffusers/pipelines/ddpm/pipeline_ddpm.py +1 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if.py +4 -4
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +4 -4
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +5 -5
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +4 -4
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +5 -5
- diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +5 -5
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +10 -120
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +10 -91
- diffusers/pipelines/deprecated/audio_diffusion/mel.py +1 -1
- diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py +1 -1
- diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +1 -1
- diffusers/pipelines/deprecated/pndm/pipeline_pndm.py +1 -1
- diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +1 -1
- diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py +1 -1
- diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py +1 -1
- diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py +1 -1
- diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py +1 -1
- diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +1 -1
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +5 -4
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +5 -4
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +7 -22
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +5 -39
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +5 -5
- diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py +1 -1
- diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +31 -22
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +1 -1
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +1 -1
- diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +1 -2
- diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +1 -1
- diffusers/pipelines/dit/pipeline_dit.py +1 -1
- diffusers/pipelines/free_init_utils.py +184 -0
- diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +22 -104
- diffusers/pipelines/kandinsky/pipeline_kandinsky.py +1 -1
- diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +1 -1
- diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +1 -1
- diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +2 -2
- diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +2 -2
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +104 -93
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +112 -74
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -1
- diffusers/pipelines/ledits_pp/__init__.py +55 -0
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +1505 -0
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +1797 -0
- diffusers/pipelines/ledits_pp/pipeline_output.py +43 -0
- diffusers/pipelines/musicldm/pipeline_musicldm.py +3 -19
- diffusers/pipelines/onnx_utils.py +1 -1
- diffusers/pipelines/paint_by_example/image_encoder.py +1 -1
- diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +3 -3
- diffusers/pipelines/pia/pipeline_pia.py +168 -327
- diffusers/pipelines/pipeline_flax_utils.py +1 -1
- diffusers/pipelines/pipeline_loading_utils.py +508 -0
- diffusers/pipelines/pipeline_utils.py +188 -534
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +56 -10
- diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +3 -3
- diffusers/pipelines/shap_e/camera.py +1 -1
- diffusers/pipelines/shap_e/pipeline_shap_e.py +1 -1
- diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +1 -1
- diffusers/pipelines/shap_e/renderer.py +1 -1
- diffusers/pipelines/stable_cascade/__init__.py +50 -0
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +482 -0
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +311 -0
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +638 -0
- diffusers/pipelines/stable_diffusion/clip_image_project_model.py +1 -1
- diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +4 -1
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py +2 -2
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +90 -146
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +5 -4
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +4 -32
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +92 -119
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +92 -119
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +13 -59
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +3 -31
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +5 -33
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +5 -21
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +7 -21
- diffusers/pipelines/stable_diffusion/safety_checker.py +1 -1
- diffusers/pipelines/stable_diffusion/safety_checker_flax.py +1 -1
- diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py +1 -1
- diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +5 -21
- diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +9 -38
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +5 -34
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +6 -35
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +7 -6
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +4 -124
- diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +282 -80
- diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +94 -46
- diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +3 -3
- diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
- diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +6 -22
- diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py +1 -1
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +96 -148
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +98 -154
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +98 -153
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +25 -87
- diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +89 -80
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +5 -49
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +80 -88
- diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +8 -6
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +15 -86
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +20 -93
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +5 -5
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +3 -19
- diffusers/pipelines/unclip/pipeline_unclip.py +1 -1
- diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +1 -1
- diffusers/pipelines/unclip/text_proj.py +1 -1
- diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +35 -35
- diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +1 -1
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +4 -21
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py +2 -2
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +4 -5
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +8 -8
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +1 -1
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +2 -2
- diffusers/schedulers/__init__.py +7 -1
- diffusers/schedulers/deprecated/scheduling_karras_ve.py +1 -1
- diffusers/schedulers/deprecated/scheduling_sde_vp.py +1 -1
- diffusers/schedulers/scheduling_consistency_models.py +42 -19
- diffusers/schedulers/scheduling_ddim.py +2 -4
- diffusers/schedulers/scheduling_ddim_flax.py +13 -5
- diffusers/schedulers/scheduling_ddim_inverse.py +2 -4
- diffusers/schedulers/scheduling_ddim_parallel.py +2 -4
- diffusers/schedulers/scheduling_ddpm.py +2 -4
- diffusers/schedulers/scheduling_ddpm_flax.py +1 -1
- diffusers/schedulers/scheduling_ddpm_parallel.py +2 -4
- diffusers/schedulers/scheduling_ddpm_wuerstchen.py +1 -1
- diffusers/schedulers/scheduling_deis_multistep.py +46 -19
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +107 -21
- diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +1 -1
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +9 -7
- diffusers/schedulers/scheduling_dpmsolver_sde.py +35 -35
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +49 -18
- diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +683 -0
- diffusers/schedulers/scheduling_edm_euler.py +381 -0
- diffusers/schedulers/scheduling_euler_ancestral_discrete.py +43 -15
- diffusers/schedulers/scheduling_euler_discrete.py +42 -17
- diffusers/schedulers/scheduling_euler_discrete_flax.py +1 -1
- diffusers/schedulers/scheduling_heun_discrete.py +35 -35
- diffusers/schedulers/scheduling_ipndm.py +37 -11
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +44 -44
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +44 -44
- diffusers/schedulers/scheduling_karras_ve_flax.py +1 -1
- diffusers/schedulers/scheduling_lcm.py +38 -14
- diffusers/schedulers/scheduling_lms_discrete.py +43 -15
- diffusers/schedulers/scheduling_lms_discrete_flax.py +1 -1
- diffusers/schedulers/scheduling_pndm.py +2 -4
- diffusers/schedulers/scheduling_pndm_flax.py +2 -4
- diffusers/schedulers/scheduling_repaint.py +1 -1
- diffusers/schedulers/scheduling_sasolver.py +41 -9
- diffusers/schedulers/scheduling_sde_ve.py +1 -1
- diffusers/schedulers/scheduling_sde_ve_flax.py +1 -1
- diffusers/schedulers/scheduling_tcd.py +686 -0
- diffusers/schedulers/scheduling_unclip.py +1 -1
- diffusers/schedulers/scheduling_unipc_multistep.py +46 -19
- diffusers/schedulers/scheduling_utils.py +2 -1
- diffusers/schedulers/scheduling_utils_flax.py +1 -1
- diffusers/schedulers/scheduling_vq_diffusion.py +1 -1
- diffusers/training_utils.py +9 -2
- diffusers/utils/__init__.py +2 -1
- diffusers/utils/accelerate_utils.py +1 -1
- diffusers/utils/constants.py +1 -1
- diffusers/utils/doc_utils.py +1 -1
- diffusers/utils/dummy_pt_objects.py +60 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +75 -0
- diffusers/utils/dynamic_modules_utils.py +1 -1
- diffusers/utils/export_utils.py +3 -3
- diffusers/utils/hub_utils.py +60 -16
- diffusers/utils/import_utils.py +15 -1
- diffusers/utils/loading_utils.py +2 -0
- diffusers/utils/logging.py +1 -1
- diffusers/utils/model_card_template.md +24 -0
- diffusers/utils/outputs.py +14 -7
- diffusers/utils/peft_utils.py +1 -1
- diffusers/utils/state_dict_utils.py +1 -1
- diffusers/utils/testing_utils.py +2 -0
- diffusers/utils/torch_utils.py +1 -1
- {diffusers-0.26.3.dist-info → diffusers-0.27.0.dist-info}/METADATA +46 -46
- diffusers-0.27.0.dist-info/RECORD +399 -0
- {diffusers-0.26.3.dist-info → diffusers-0.27.0.dist-info}/WHEEL +1 -1
- diffusers-0.26.3.dist-info/RECORD +0 -384
- {diffusers-0.26.3.dist-info → diffusers-0.27.0.dist-info}/LICENSE +0 -0
- {diffusers-0.26.3.dist-info → diffusers-0.27.0.dist-info}/entry_points.txt +0 -0
- {diffusers-0.26.3.dist-info → diffusers-0.27.0.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright
|
1
|
+
# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright
|
1
|
+
# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -126,9 +126,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
|
|
126
126
|
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
|
127
127
|
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
|
128
128
|
steps_offset (`int`, defaults to 0):
|
129
|
-
An offset added to the inference steps
|
130
|
-
`set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable
|
131
|
-
Diffusion.
|
129
|
+
An offset added to the inference steps, as required by some model families.
|
132
130
|
"""
|
133
131
|
|
134
132
|
_compatibles = [e.name for e in KarrasDiffusionSchedulers]
|
@@ -198,6 +196,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
|
|
198
196
|
self.solver_p = solver_p
|
199
197
|
self.last_sample = None
|
200
198
|
self._step_index = None
|
199
|
+
self._begin_index = None
|
201
200
|
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
|
202
201
|
|
203
202
|
@property
|
@@ -207,6 +206,24 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
|
|
207
206
|
"""
|
208
207
|
return self._step_index
|
209
208
|
|
209
|
+
@property
|
210
|
+
def begin_index(self):
|
211
|
+
"""
|
212
|
+
The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
|
213
|
+
"""
|
214
|
+
return self._begin_index
|
215
|
+
|
216
|
+
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
|
217
|
+
def set_begin_index(self, begin_index: int = 0):
|
218
|
+
"""
|
219
|
+
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
|
220
|
+
|
221
|
+
Args:
|
222
|
+
begin_index (`int`):
|
223
|
+
The begin index for the scheduler.
|
224
|
+
"""
|
225
|
+
self._begin_index = begin_index
|
226
|
+
|
210
227
|
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
|
211
228
|
"""
|
212
229
|
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
|
@@ -269,6 +286,7 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
|
|
269
286
|
|
270
287
|
# add an index counter for schedulers that allow duplicated timesteps
|
271
288
|
self._step_index = None
|
289
|
+
self._begin_index = None
|
272
290
|
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
|
273
291
|
|
274
292
|
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
|
@@ -698,11 +716,12 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
|
|
698
716
|
x_t = x_t.to(x.dtype)
|
699
717
|
return x_t
|
700
718
|
|
701
|
-
|
702
|
-
|
703
|
-
|
719
|
+
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep
|
720
|
+
def index_for_timestep(self, timestep, schedule_timesteps=None):
|
721
|
+
if schedule_timesteps is None:
|
722
|
+
schedule_timesteps = self.timesteps
|
704
723
|
|
705
|
-
index_candidates = (
|
724
|
+
index_candidates = (schedule_timesteps == timestep).nonzero()
|
706
725
|
|
707
726
|
if len(index_candidates) == 0:
|
708
727
|
step_index = len(self.timesteps) - 1
|
@@ -715,7 +734,20 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
|
|
715
734
|
else:
|
716
735
|
step_index = index_candidates[0].item()
|
717
736
|
|
718
|
-
|
737
|
+
return step_index
|
738
|
+
|
739
|
+
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index
|
740
|
+
def _init_step_index(self, timestep):
|
741
|
+
"""
|
742
|
+
Initialize the step_index counter for the scheduler.
|
743
|
+
"""
|
744
|
+
|
745
|
+
if self.begin_index is None:
|
746
|
+
if isinstance(timestep, torch.Tensor):
|
747
|
+
timestep = timestep.to(self.timesteps.device)
|
748
|
+
self._step_index = self.index_for_timestep(timestep)
|
749
|
+
else:
|
750
|
+
self._step_index = self._begin_index
|
719
751
|
|
720
752
|
def step(
|
721
753
|
self,
|
@@ -830,16 +862,11 @@ class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
|
|
830
862
|
schedule_timesteps = self.timesteps.to(original_samples.device)
|
831
863
|
timesteps = timesteps.to(original_samples.device)
|
832
864
|
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
|
838
|
-
elif len(index_candidates) > 1:
|
839
|
-
step_index = index_candidates[1].item()
|
840
|
-
else:
|
841
|
-
step_index = index_candidates[0].item()
|
842
|
-
step_indices.append(step_index)
|
865
|
+
# begin_index is None when the scheduler is used for training
|
866
|
+
if self.begin_index is None:
|
867
|
+
step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps]
|
868
|
+
else:
|
869
|
+
step_indices = [self.begin_index] * timesteps.shape[0]
|
843
870
|
|
844
871
|
sigma = sigmas[step_indices].flatten()
|
845
872
|
while len(sigma.shape) < len(original_samples.shape):
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright
|
1
|
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -45,6 +45,7 @@ class KarrasDiffusionSchedulers(Enum):
|
|
45
45
|
DEISMultistepScheduler = 12
|
46
46
|
UniPCMultistepScheduler = 13
|
47
47
|
DPMSolverSDEScheduler = 14
|
48
|
+
EDMEulerScheduler = 15
|
48
49
|
|
49
50
|
|
50
51
|
@dataclass
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright
|
1
|
+
# Copyright 2024 Microsoft and The HuggingFace Team. All rights reserved.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
diffusers/training_utils.py
CHANGED
@@ -12,6 +12,7 @@ from .utils import (
|
|
12
12
|
convert_state_dict_to_peft,
|
13
13
|
deprecate,
|
14
14
|
is_peft_available,
|
15
|
+
is_torch_npu_available,
|
15
16
|
is_torchvision_available,
|
16
17
|
is_transformers_available,
|
17
18
|
)
|
@@ -26,6 +27,9 @@ if is_peft_available():
|
|
26
27
|
if is_torchvision_available():
|
27
28
|
from torchvision import transforms
|
28
29
|
|
30
|
+
if is_torch_npu_available():
|
31
|
+
import torch_npu # noqa: F401
|
32
|
+
|
29
33
|
|
30
34
|
def set_seed(seed: int):
|
31
35
|
"""
|
@@ -36,8 +40,11 @@ def set_seed(seed: int):
|
|
36
40
|
random.seed(seed)
|
37
41
|
np.random.seed(seed)
|
38
42
|
torch.manual_seed(seed)
|
39
|
-
|
40
|
-
|
43
|
+
if is_torch_npu_available():
|
44
|
+
torch.npu.manual_seed_all(seed)
|
45
|
+
else:
|
46
|
+
torch.cuda.manual_seed_all(seed)
|
47
|
+
# ^^ safe to call this function even if cuda is not available
|
41
48
|
|
42
49
|
|
43
50
|
def compute_snr(noise_scheduler, timesteps):
|
diffusers/utils/__init__.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright
|
1
|
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -72,6 +72,7 @@ from .import_utils import (
|
|
72
72
|
is_scipy_available,
|
73
73
|
is_tensorboard_available,
|
74
74
|
is_torch_available,
|
75
|
+
is_torch_npu_available,
|
75
76
|
is_torch_version,
|
76
77
|
is_torch_xla_available,
|
77
78
|
is_torchsde_available,
|
diffusers/utils/constants.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright
|
1
|
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
diffusers/utils/doc_utils.py
CHANGED
@@ -675,6 +675,21 @@ class ScoreSdeVePipeline(metaclass=DummyObject):
|
|
675
675
|
requires_backends(cls, ["torch"])
|
676
676
|
|
677
677
|
|
678
|
+
class StableDiffusionMixin(metaclass=DummyObject):
|
679
|
+
_backends = ["torch"]
|
680
|
+
|
681
|
+
def __init__(self, *args, **kwargs):
|
682
|
+
requires_backends(self, ["torch"])
|
683
|
+
|
684
|
+
@classmethod
|
685
|
+
def from_config(cls, *args, **kwargs):
|
686
|
+
requires_backends(cls, ["torch"])
|
687
|
+
|
688
|
+
@classmethod
|
689
|
+
def from_pretrained(cls, *args, **kwargs):
|
690
|
+
requires_backends(cls, ["torch"])
|
691
|
+
|
692
|
+
|
678
693
|
class AmusedScheduler(metaclass=DummyObject):
|
679
694
|
_backends = ["torch"]
|
680
695
|
|
@@ -855,6 +870,36 @@ class DPMSolverSinglestepScheduler(metaclass=DummyObject):
|
|
855
870
|
requires_backends(cls, ["torch"])
|
856
871
|
|
857
872
|
|
873
|
+
class EDMDPMSolverMultistepScheduler(metaclass=DummyObject):
|
874
|
+
_backends = ["torch"]
|
875
|
+
|
876
|
+
def __init__(self, *args, **kwargs):
|
877
|
+
requires_backends(self, ["torch"])
|
878
|
+
|
879
|
+
@classmethod
|
880
|
+
def from_config(cls, *args, **kwargs):
|
881
|
+
requires_backends(cls, ["torch"])
|
882
|
+
|
883
|
+
@classmethod
|
884
|
+
def from_pretrained(cls, *args, **kwargs):
|
885
|
+
requires_backends(cls, ["torch"])
|
886
|
+
|
887
|
+
|
888
|
+
class EDMEulerScheduler(metaclass=DummyObject):
|
889
|
+
_backends = ["torch"]
|
890
|
+
|
891
|
+
def __init__(self, *args, **kwargs):
|
892
|
+
requires_backends(self, ["torch"])
|
893
|
+
|
894
|
+
@classmethod
|
895
|
+
def from_config(cls, *args, **kwargs):
|
896
|
+
requires_backends(cls, ["torch"])
|
897
|
+
|
898
|
+
@classmethod
|
899
|
+
def from_pretrained(cls, *args, **kwargs):
|
900
|
+
requires_backends(cls, ["torch"])
|
901
|
+
|
902
|
+
|
858
903
|
class EulerAncestralDiscreteScheduler(metaclass=DummyObject):
|
859
904
|
_backends = ["torch"]
|
860
905
|
|
@@ -1050,6 +1095,21 @@ class ScoreSdeVeScheduler(metaclass=DummyObject):
|
|
1050
1095
|
requires_backends(cls, ["torch"])
|
1051
1096
|
|
1052
1097
|
|
1098
|
+
class TCDScheduler(metaclass=DummyObject):
|
1099
|
+
_backends = ["torch"]
|
1100
|
+
|
1101
|
+
def __init__(self, *args, **kwargs):
|
1102
|
+
requires_backends(self, ["torch"])
|
1103
|
+
|
1104
|
+
@classmethod
|
1105
|
+
def from_config(cls, *args, **kwargs):
|
1106
|
+
requires_backends(cls, ["torch"])
|
1107
|
+
|
1108
|
+
@classmethod
|
1109
|
+
def from_pretrained(cls, *args, **kwargs):
|
1110
|
+
requires_backends(cls, ["torch"])
|
1111
|
+
|
1112
|
+
|
1053
1113
|
class UnCLIPScheduler(metaclass=DummyObject):
|
1054
1114
|
_backends = ["torch"]
|
1055
1115
|
|
@@ -647,6 +647,36 @@ class LDMTextToImagePipeline(metaclass=DummyObject):
|
|
647
647
|
requires_backends(cls, ["torch", "transformers"])
|
648
648
|
|
649
649
|
|
650
|
+
class LEditsPPPipelineStableDiffusion(metaclass=DummyObject):
|
651
|
+
_backends = ["torch", "transformers"]
|
652
|
+
|
653
|
+
def __init__(self, *args, **kwargs):
|
654
|
+
requires_backends(self, ["torch", "transformers"])
|
655
|
+
|
656
|
+
@classmethod
|
657
|
+
def from_config(cls, *args, **kwargs):
|
658
|
+
requires_backends(cls, ["torch", "transformers"])
|
659
|
+
|
660
|
+
@classmethod
|
661
|
+
def from_pretrained(cls, *args, **kwargs):
|
662
|
+
requires_backends(cls, ["torch", "transformers"])
|
663
|
+
|
664
|
+
|
665
|
+
class LEditsPPPipelineStableDiffusionXL(metaclass=DummyObject):
|
666
|
+
_backends = ["torch", "transformers"]
|
667
|
+
|
668
|
+
def __init__(self, *args, **kwargs):
|
669
|
+
requires_backends(self, ["torch", "transformers"])
|
670
|
+
|
671
|
+
@classmethod
|
672
|
+
def from_config(cls, *args, **kwargs):
|
673
|
+
requires_backends(cls, ["torch", "transformers"])
|
674
|
+
|
675
|
+
@classmethod
|
676
|
+
def from_pretrained(cls, *args, **kwargs):
|
677
|
+
requires_backends(cls, ["torch", "transformers"])
|
678
|
+
|
679
|
+
|
650
680
|
class MusicLDMPipeline(metaclass=DummyObject):
|
651
681
|
_backends = ["torch", "transformers"]
|
652
682
|
|
@@ -752,6 +782,51 @@ class ShapEPipeline(metaclass=DummyObject):
|
|
752
782
|
requires_backends(cls, ["torch", "transformers"])
|
753
783
|
|
754
784
|
|
785
|
+
class StableCascadeCombinedPipeline(metaclass=DummyObject):
|
786
|
+
_backends = ["torch", "transformers"]
|
787
|
+
|
788
|
+
def __init__(self, *args, **kwargs):
|
789
|
+
requires_backends(self, ["torch", "transformers"])
|
790
|
+
|
791
|
+
@classmethod
|
792
|
+
def from_config(cls, *args, **kwargs):
|
793
|
+
requires_backends(cls, ["torch", "transformers"])
|
794
|
+
|
795
|
+
@classmethod
|
796
|
+
def from_pretrained(cls, *args, **kwargs):
|
797
|
+
requires_backends(cls, ["torch", "transformers"])
|
798
|
+
|
799
|
+
|
800
|
+
class StableCascadeDecoderPipeline(metaclass=DummyObject):
|
801
|
+
_backends = ["torch", "transformers"]
|
802
|
+
|
803
|
+
def __init__(self, *args, **kwargs):
|
804
|
+
requires_backends(self, ["torch", "transformers"])
|
805
|
+
|
806
|
+
@classmethod
|
807
|
+
def from_config(cls, *args, **kwargs):
|
808
|
+
requires_backends(cls, ["torch", "transformers"])
|
809
|
+
|
810
|
+
@classmethod
|
811
|
+
def from_pretrained(cls, *args, **kwargs):
|
812
|
+
requires_backends(cls, ["torch", "transformers"])
|
813
|
+
|
814
|
+
|
815
|
+
class StableCascadePriorPipeline(metaclass=DummyObject):
|
816
|
+
_backends = ["torch", "transformers"]
|
817
|
+
|
818
|
+
def __init__(self, *args, **kwargs):
|
819
|
+
requires_backends(self, ["torch", "transformers"])
|
820
|
+
|
821
|
+
@classmethod
|
822
|
+
def from_config(cls, *args, **kwargs):
|
823
|
+
requires_backends(cls, ["torch", "transformers"])
|
824
|
+
|
825
|
+
@classmethod
|
826
|
+
def from_pretrained(cls, *args, **kwargs):
|
827
|
+
requires_backends(cls, ["torch", "transformers"])
|
828
|
+
|
829
|
+
|
755
830
|
class StableDiffusionAdapterPipeline(metaclass=DummyObject):
|
756
831
|
_backends = ["torch", "transformers"]
|
757
832
|
|
diffusers/utils/export_utils.py
CHANGED
@@ -28,7 +28,7 @@ def buffered_writer(raw_f):
|
|
28
28
|
f.flush()
|
29
29
|
|
30
30
|
|
31
|
-
def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) -> str:
|
31
|
+
def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None, fps: int = 10) -> str:
|
32
32
|
if output_gif_path is None:
|
33
33
|
output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name
|
34
34
|
|
@@ -37,7 +37,7 @@ def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) ->
|
|
37
37
|
save_all=True,
|
38
38
|
append_images=image[1:],
|
39
39
|
optimize=False,
|
40
|
-
duration=
|
40
|
+
duration=1000 // fps,
|
41
41
|
loop=0,
|
42
42
|
)
|
43
43
|
return output_gif_path
|
@@ -116,7 +116,7 @@ def export_to_obj(mesh, output_obj_path: str = None):
|
|
116
116
|
|
117
117
|
|
118
118
|
def export_to_video(
|
119
|
-
video_frames: Union[List[np.ndarray], List[PIL.Image.Image]], output_video_path: str = None, fps: int =
|
119
|
+
video_frames: Union[List[np.ndarray], List[PIL.Image.Image]], output_video_path: str = None, fps: int = 10
|
120
120
|
) -> str:
|
121
121
|
if is_opencv_available():
|
122
122
|
import cv2
|
diffusers/utils/hub_utils.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
# coding=utf-8
|
2
|
-
# Copyright
|
2
|
+
# Copyright 2024 The HuggingFace Inc. team.
|
3
3
|
#
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
5
|
# you may not use this file except in compliance with the License.
|
@@ -21,7 +21,7 @@ import tempfile
|
|
21
21
|
import traceback
|
22
22
|
import warnings
|
23
23
|
from pathlib import Path
|
24
|
-
from typing import Dict, Optional, Union
|
24
|
+
from typing import Dict, List, Optional, Union
|
25
25
|
from uuid import uuid4
|
26
26
|
|
27
27
|
from huggingface_hub import (
|
@@ -65,7 +65,7 @@ from .logging import get_logger
|
|
65
65
|
|
66
66
|
logger = get_logger(__name__)
|
67
67
|
|
68
|
-
|
68
|
+
MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "model_card_template.md"
|
69
69
|
SESSION_ID = uuid4().hex
|
70
70
|
|
71
71
|
|
@@ -94,43 +94,87 @@ def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:
|
|
94
94
|
|
95
95
|
|
96
96
|
def load_or_create_model_card(
|
97
|
-
repo_id_or_path:
|
97
|
+
repo_id_or_path: str = None,
|
98
|
+
token: Optional[str] = None,
|
99
|
+
is_pipeline: bool = False,
|
100
|
+
from_training: bool = False,
|
101
|
+
model_description: Optional[str] = None,
|
102
|
+
base_model: str = None,
|
103
|
+
prompt: Optional[str] = None,
|
104
|
+
license: Optional[str] = None,
|
105
|
+
widget: Optional[List[dict]] = None,
|
106
|
+
inference: Optional[bool] = None,
|
98
107
|
) -> ModelCard:
|
99
108
|
"""
|
100
109
|
Loads or creates a model card.
|
101
110
|
|
102
111
|
Args:
|
103
|
-
|
104
|
-
The
|
112
|
+
repo_id_or_path (`str`):
|
113
|
+
The repo id (e.g., "runwayml/stable-diffusion-v1-5") or local path where to look for the model card.
|
105
114
|
token (`str`, *optional*):
|
106
115
|
Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more details.
|
107
|
-
is_pipeline (`bool
|
116
|
+
is_pipeline (`bool`):
|
108
117
|
Boolean to indicate if we're adding tag to a [`DiffusionPipeline`].
|
118
|
+
from_training: (`bool`): Boolean flag to denote if the model card is being created from a training script.
|
119
|
+
model_description (`str`, *optional*): Model description to add to the model card. Helpful when using
|
120
|
+
`load_or_create_model_card` from a training script.
|
121
|
+
base_model (`str`): Base model identifier (e.g., "stabilityai/stable-diffusion-xl-base-1.0"). Useful
|
122
|
+
for DreamBooth-like training.
|
123
|
+
prompt (`str`, *optional*): Prompt used for training. Useful for DreamBooth-like training.
|
124
|
+
license: (`str`, *optional*): License of the output artifact. Helpful when using
|
125
|
+
`load_or_create_model_card` from a training script.
|
126
|
+
widget (`List[dict]`, *optional*): Widget to accompany a gallery template.
|
127
|
+
inference: (`bool`, optional): Whether to turn on inference widget. Helpful when using
|
128
|
+
`load_or_create_model_card` from a training script.
|
109
129
|
"""
|
110
130
|
if not is_jinja_available():
|
111
131
|
raise ValueError(
|
112
132
|
"Modelcard rendering is based on Jinja templates."
|
113
|
-
" Please make sure to have `jinja` installed before using `
|
133
|
+
" Please make sure to have `jinja` installed before using `load_or_create_model_card`."
|
114
134
|
" To install it, please run `pip install Jinja2`."
|
115
135
|
)
|
116
136
|
|
117
137
|
try:
|
118
138
|
# Check if the model card is present on the remote repo
|
119
139
|
model_card = ModelCard.load(repo_id_or_path, token=token)
|
120
|
-
except EntryNotFoundError:
|
121
|
-
# Otherwise create a
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
140
|
+
except (EntryNotFoundError, RepositoryNotFoundError):
|
141
|
+
# Otherwise create a model card from template
|
142
|
+
if from_training:
|
143
|
+
model_card = ModelCard.from_template(
|
144
|
+
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
|
145
|
+
license=license,
|
146
|
+
library_name="diffusers",
|
147
|
+
inference=inference,
|
148
|
+
base_model=base_model,
|
149
|
+
instance_prompt=prompt,
|
150
|
+
widget=widget,
|
151
|
+
),
|
152
|
+
template_path=MODEL_CARD_TEMPLATE_PATH,
|
153
|
+
model_description=model_description,
|
154
|
+
)
|
155
|
+
else:
|
156
|
+
card_data = ModelCardData()
|
157
|
+
component = "pipeline" if is_pipeline else "model"
|
158
|
+
if model_description is None:
|
159
|
+
model_description = f"This is the model card of a 🧨 diffusers {component} that has been pushed on the Hub. This model card has been automatically generated."
|
160
|
+
model_card = ModelCard.from_template(card_data, model_description=model_description)
|
126
161
|
|
127
162
|
return model_card
|
128
163
|
|
129
164
|
|
130
|
-
def populate_model_card(model_card: ModelCard) -> ModelCard:
|
131
|
-
"""Populates the `model_card` with library name."""
|
165
|
+
def populate_model_card(model_card: ModelCard, tags: Union[str, List[str]] = None) -> ModelCard:
|
166
|
+
"""Populates the `model_card` with library name and optional tags."""
|
132
167
|
if model_card.data.library_name is None:
|
133
168
|
model_card.data.library_name = "diffusers"
|
169
|
+
|
170
|
+
if tags is not None:
|
171
|
+
if isinstance(tags, str):
|
172
|
+
tags = [tags]
|
173
|
+
if model_card.data.tags is None:
|
174
|
+
model_card.data.tags = []
|
175
|
+
for tag in tags:
|
176
|
+
model_card.data.tags.append(tag)
|
177
|
+
|
134
178
|
return model_card
|
135
179
|
|
136
180
|
|
diffusers/utils/import_utils.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright
|
1
|
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -14,6 +14,7 @@
|
|
14
14
|
"""
|
15
15
|
Import utilities: Utilities related to imports and our lazy inits.
|
16
16
|
"""
|
17
|
+
|
17
18
|
import importlib.util
|
18
19
|
import operator as op
|
19
20
|
import os
|
@@ -72,6 +73,15 @@ if _torch_xla_available:
|
|
72
73
|
except ImportError:
|
73
74
|
_torch_xla_available = False
|
74
75
|
|
76
|
+
# check whether torch_npu is available
|
77
|
+
_torch_npu_available = importlib.util.find_spec("torch_npu") is not None
|
78
|
+
if _torch_npu_available:
|
79
|
+
try:
|
80
|
+
_torch_npu_version = importlib_metadata.version("torch_npu")
|
81
|
+
logger.info(f"torch_npu version {_torch_npu_version} available.")
|
82
|
+
except ImportError:
|
83
|
+
_torch_npu_available = False
|
84
|
+
|
75
85
|
_jax_version = "N/A"
|
76
86
|
_flax_version = "N/A"
|
77
87
|
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
|
@@ -294,6 +304,10 @@ def is_torch_xla_available():
|
|
294
304
|
return _torch_xla_available
|
295
305
|
|
296
306
|
|
307
|
+
def is_torch_npu_available():
|
308
|
+
return _torch_npu_available
|
309
|
+
|
310
|
+
|
297
311
|
def is_flax_available():
|
298
312
|
return _flax_available
|
299
313
|
|
diffusers/utils/loading_utils.py
CHANGED
@@ -32,6 +32,8 @@ def load_image(
|
|
32
32
|
raise ValueError(
|
33
33
|
f"Incorrect path or URL. URLs must start with `http://` or `https://`, and {image} is not a valid path."
|
34
34
|
)
|
35
|
+
elif isinstance(image, PIL.Image.Image):
|
36
|
+
image = image
|
35
37
|
else:
|
36
38
|
raise ValueError(
|
37
39
|
"Incorrect format used for the image. Should be a URL linking to an image, a local path, or a PIL image."
|
diffusers/utils/logging.py
CHANGED
@@ -0,0 +1,24 @@
|
|
1
|
+
---
|
2
|
+
{{ card_data }}
|
3
|
+
---
|
4
|
+
|
5
|
+
<!-- This model card has been generated automatically according to the information the training script had access to. You
|
6
|
+
should probably proofread and complete it, then remove this comment. -->
|
7
|
+
|
8
|
+
{{ model_description }}
|
9
|
+
|
10
|
+
## Intended uses & limitations
|
11
|
+
|
12
|
+
#### How to use
|
13
|
+
|
14
|
+
```python
|
15
|
+
# TODO: add an example code snippet for running this diffusion pipeline
|
16
|
+
```
|
17
|
+
|
18
|
+
#### Limitations and bias
|
19
|
+
|
20
|
+
[TODO: provide examples of latent issues and potential remediations]
|
21
|
+
|
22
|
+
## Training details
|
23
|
+
|
24
|
+
[TODO: describe the data used to train the model]
|
diffusers/utils/outputs.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright
|
1
|
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -21,7 +21,7 @@ from typing import Any, Tuple
|
|
21
21
|
|
22
22
|
import numpy as np
|
23
23
|
|
24
|
-
from .import_utils import is_torch_available
|
24
|
+
from .import_utils import is_torch_available, is_torch_version
|
25
25
|
|
26
26
|
|
27
27
|
def is_tensor(x) -> bool:
|
@@ -60,11 +60,18 @@ class BaseOutput(OrderedDict):
|
|
60
60
|
if is_torch_available():
|
61
61
|
import torch.utils._pytree
|
62
62
|
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
63
|
+
if is_torch_version("<", "2.2"):
|
64
|
+
torch.utils._pytree._register_pytree_node(
|
65
|
+
cls,
|
66
|
+
torch.utils._pytree._dict_flatten,
|
67
|
+
lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)),
|
68
|
+
)
|
69
|
+
else:
|
70
|
+
torch.utils._pytree.register_pytree_node(
|
71
|
+
cls,
|
72
|
+
torch.utils._pytree._dict_flatten,
|
73
|
+
lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)),
|
74
|
+
)
|
68
75
|
|
69
76
|
def __post_init__(self) -> None:
|
70
77
|
class_fields = fields(self)
|