diffusers 0.31.0__py3-none-any.whl → 0.32.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- diffusers/__init__.py +66 -5
- diffusers/callbacks.py +56 -3
- diffusers/configuration_utils.py +1 -1
- diffusers/dependency_versions_table.py +1 -1
- diffusers/image_processor.py +25 -17
- diffusers/loaders/__init__.py +22 -3
- diffusers/loaders/ip_adapter.py +538 -15
- diffusers/loaders/lora_base.py +124 -118
- diffusers/loaders/lora_conversion_utils.py +318 -3
- diffusers/loaders/lora_pipeline.py +1688 -368
- diffusers/loaders/peft.py +379 -0
- diffusers/loaders/single_file_model.py +71 -4
- diffusers/loaders/single_file_utils.py +519 -9
- diffusers/loaders/textual_inversion.py +3 -3
- diffusers/loaders/transformer_flux.py +181 -0
- diffusers/loaders/transformer_sd3.py +89 -0
- diffusers/loaders/unet.py +17 -4
- diffusers/models/__init__.py +47 -14
- diffusers/models/activations.py +22 -9
- diffusers/models/attention.py +13 -4
- diffusers/models/attention_flax.py +1 -1
- diffusers/models/attention_processor.py +2059 -281
- diffusers/models/autoencoders/__init__.py +5 -0
- diffusers/models/autoencoders/autoencoder_dc.py +620 -0
- diffusers/models/autoencoders/autoencoder_kl.py +2 -1
- diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
- diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +36 -27
- diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
- diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
- diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +3 -10
- diffusers/models/autoencoders/autoencoder_tiny.py +4 -2
- diffusers/models/autoencoders/vae.py +18 -5
- diffusers/models/controlnet.py +47 -802
- diffusers/models/controlnet_flux.py +29 -495
- diffusers/models/controlnet_sd3.py +25 -379
- diffusers/models/controlnet_sparsectrl.py +46 -718
- diffusers/models/controlnets/__init__.py +23 -0
- diffusers/models/controlnets/controlnet.py +872 -0
- diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +5 -5
- diffusers/models/controlnets/controlnet_flux.py +536 -0
- diffusers/models/{controlnet_hunyuan.py → controlnets/controlnet_hunyuan.py} +7 -7
- diffusers/models/controlnets/controlnet_sd3.py +489 -0
- diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
- diffusers/models/controlnets/controlnet_union.py +832 -0
- diffusers/models/{controlnet_xs.py → controlnets/controlnet_xs.py} +14 -13
- diffusers/models/controlnets/multicontrolnet.py +183 -0
- diffusers/models/embeddings.py +838 -43
- diffusers/models/model_loading_utils.py +88 -6
- diffusers/models/modeling_flax_utils.py +1 -1
- diffusers/models/modeling_utils.py +72 -26
- diffusers/models/normalization.py +78 -13
- diffusers/models/transformers/__init__.py +5 -0
- diffusers/models/transformers/auraflow_transformer_2d.py +2 -2
- diffusers/models/transformers/cogvideox_transformer_3d.py +46 -11
- diffusers/models/transformers/dit_transformer_2d.py +1 -1
- diffusers/models/transformers/latte_transformer_3d.py +4 -4
- diffusers/models/transformers/pixart_transformer_2d.py +1 -1
- diffusers/models/transformers/sana_transformer.py +488 -0
- diffusers/models/transformers/stable_audio_transformer.py +1 -1
- diffusers/models/transformers/transformer_2d.py +1 -1
- diffusers/models/transformers/transformer_allegro.py +422 -0
- diffusers/models/transformers/transformer_cogview3plus.py +1 -1
- diffusers/models/transformers/transformer_flux.py +30 -9
- diffusers/models/transformers/transformer_hunyuan_video.py +789 -0
- diffusers/models/transformers/transformer_ltx.py +469 -0
- diffusers/models/transformers/transformer_mochi.py +499 -0
- diffusers/models/transformers/transformer_sd3.py +105 -17
- diffusers/models/transformers/transformer_temporal.py +1 -1
- diffusers/models/unets/unet_1d_blocks.py +1 -1
- diffusers/models/unets/unet_2d.py +8 -1
- diffusers/models/unets/unet_2d_blocks.py +88 -21
- diffusers/models/unets/unet_2d_condition.py +1 -1
- diffusers/models/unets/unet_3d_blocks.py +9 -7
- diffusers/models/unets/unet_motion_model.py +5 -5
- diffusers/models/unets/unet_spatio_temporal_condition.py +23 -0
- diffusers/models/unets/unet_stable_cascade.py +2 -2
- diffusers/models/unets/uvit_2d.py +1 -1
- diffusers/models/upsampling.py +8 -0
- diffusers/pipelines/__init__.py +34 -0
- diffusers/pipelines/allegro/__init__.py +48 -0
- diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
- diffusers/pipelines/allegro/pipeline_output.py +23 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +8 -2
- diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1 -1
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +0 -6
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +8 -8
- diffusers/pipelines/audioldm2/modeling_audioldm2.py +3 -3
- diffusers/pipelines/aura_flow/pipeline_aura_flow.py +1 -8
- diffusers/pipelines/auto_pipeline.py +53 -6
- diffusers/pipelines/blip_diffusion/modeling_blip2.py +1 -1
- diffusers/pipelines/cogvideo/pipeline_cogvideox.py +50 -22
- diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +51 -20
- diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +69 -21
- diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +47 -21
- diffusers/pipelines/cogview3/pipeline_cogview3plus.py +1 -1
- diffusers/pipelines/controlnet/__init__.py +86 -80
- diffusers/pipelines/controlnet/multicontrolnet.py +7 -178
- diffusers/pipelines/controlnet/pipeline_controlnet.py +11 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +1 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +1 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +1 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +3 -3
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +1 -3
- diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
- diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +5 -1
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +53 -19
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +7 -7
- diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +31 -8
- diffusers/pipelines/flux/__init__.py +13 -1
- diffusers/pipelines/flux/modeling_flux.py +47 -0
- diffusers/pipelines/flux/pipeline_flux.py +204 -29
- diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
- diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
- diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet.py +49 -27
- diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +40 -30
- diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +78 -56
- diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
- diffusers/pipelines/flux/pipeline_flux_img2img.py +33 -27
- diffusers/pipelines/flux/pipeline_flux_inpaint.py +36 -29
- diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
- diffusers/pipelines/flux/pipeline_output.py +16 -0
- diffusers/pipelines/hunyuan_video/__init__.py +48 -0
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
- diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
- diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +5 -1
- diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +9 -9
- diffusers/pipelines/kolors/text_encoder.py +2 -2
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -1
- diffusers/pipelines/ltx/__init__.py +50 -0
- diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
- diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
- diffusers/pipelines/ltx/pipeline_output.py +20 -0
- diffusers/pipelines/lumina/pipeline_lumina.py +1 -8
- diffusers/pipelines/mochi/__init__.py +48 -0
- diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
- diffusers/pipelines/mochi/pipeline_output.py +20 -0
- diffusers/pipelines/pag/__init__.py +7 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1 -2
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1 -2
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1 -3
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1 -3
- diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +5 -1
- diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +6 -13
- diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
- diffusers/pipelines/pag/pipeline_pag_sd_3.py +6 -6
- diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
- diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +3 -0
- diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
- diffusers/pipelines/pipeline_flax_utils.py +1 -1
- diffusers/pipelines/pipeline_loading_utils.py +25 -4
- diffusers/pipelines/pipeline_utils.py +35 -6
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +6 -13
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +6 -13
- diffusers/pipelines/sana/__init__.py +47 -0
- diffusers/pipelines/sana/pipeline_output.py +21 -0
- diffusers/pipelines/sana/pipeline_sana.py +884 -0
- diffusers/pipelines/stable_audio/pipeline_stable_audio.py +12 -1
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +18 -3
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +216 -20
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +62 -9
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +57 -8
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +11 -1
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +0 -8
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +0 -8
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +0 -8
- diffusers/pipelines/unidiffuser/modeling_uvit.py +2 -2
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +1 -1
- diffusers/quantizers/auto.py +14 -1
- diffusers/quantizers/bitsandbytes/bnb_quantizer.py +4 -1
- diffusers/quantizers/gguf/__init__.py +1 -0
- diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
- diffusers/quantizers/gguf/utils.py +456 -0
- diffusers/quantizers/quantization_config.py +280 -2
- diffusers/quantizers/torchao/__init__.py +15 -0
- diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
- diffusers/schedulers/scheduling_ddpm.py +2 -6
- diffusers/schedulers/scheduling_ddpm_parallel.py +2 -6
- diffusers/schedulers/scheduling_deis_multistep.py +28 -9
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +35 -9
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +35 -8
- diffusers/schedulers/scheduling_dpmsolver_sde.py +4 -4
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +48 -10
- diffusers/schedulers/scheduling_euler_discrete.py +4 -4
- diffusers/schedulers/scheduling_flow_match_euler_discrete.py +153 -6
- diffusers/schedulers/scheduling_heun_discrete.py +4 -4
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +4 -4
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +4 -4
- diffusers/schedulers/scheduling_lcm.py +2 -6
- diffusers/schedulers/scheduling_lms_discrete.py +4 -4
- diffusers/schedulers/scheduling_repaint.py +1 -1
- diffusers/schedulers/scheduling_sasolver.py +28 -9
- diffusers/schedulers/scheduling_tcd.py +2 -6
- diffusers/schedulers/scheduling_unipc_multistep.py +53 -8
- diffusers/training_utils.py +16 -2
- diffusers/utils/__init__.py +5 -0
- diffusers/utils/constants.py +1 -0
- diffusers/utils/dummy_pt_objects.py +180 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +270 -0
- diffusers/utils/dynamic_modules_utils.py +3 -3
- diffusers/utils/hub_utils.py +31 -39
- diffusers/utils/import_utils.py +67 -0
- diffusers/utils/peft_utils.py +3 -0
- diffusers/utils/testing_utils.py +56 -1
- diffusers/utils/torch_utils.py +3 -0
- {diffusers-0.31.0.dist-info → diffusers-0.32.1.dist-info}/METADATA +6 -6
- {diffusers-0.31.0.dist-info → diffusers-0.32.1.dist-info}/RECORD +214 -162
- {diffusers-0.31.0.dist-info → diffusers-0.32.1.dist-info}/WHEEL +1 -1
- {diffusers-0.31.0.dist-info → diffusers-0.32.1.dist-info}/LICENSE +0 -0
- {diffusers-0.31.0.dist-info → diffusers-0.32.1.dist-info}/entry_points.txt +0 -0
- {diffusers-0.31.0.dist-info → diffusers-0.32.1.dist-info}/top_level.txt +0 -0
diffusers/utils/hub_utils.py
CHANGED
@@ -455,48 +455,39 @@ def _get_checkpoint_shard_files(
|
|
455
455
|
allow_patterns = [os.path.join(subfolder, p) for p in allow_patterns]
|
456
456
|
|
457
457
|
ignore_patterns = ["*.json", "*.md"]
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
if not shard_file_present:
|
464
|
-
raise EnvironmentError(
|
465
|
-
f"{shards_path} does not appear to have a file named {shard_file} which is "
|
466
|
-
"required according to the checkpoint index."
|
467
|
-
)
|
468
|
-
|
469
|
-
try:
|
470
|
-
# Load from URL
|
471
|
-
cached_folder = snapshot_download(
|
472
|
-
pretrained_model_name_or_path,
|
473
|
-
cache_dir=cache_dir,
|
474
|
-
proxies=proxies,
|
475
|
-
local_files_only=local_files_only,
|
476
|
-
token=token,
|
477
|
-
revision=revision,
|
478
|
-
allow_patterns=allow_patterns,
|
479
|
-
ignore_patterns=ignore_patterns,
|
480
|
-
user_agent=user_agent,
|
481
|
-
)
|
482
|
-
if subfolder is not None:
|
483
|
-
cached_folder = os.path.join(cached_folder, subfolder)
|
484
|
-
|
485
|
-
# We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so
|
486
|
-
# we don't have to catch them here. We have also dealt with EntryNotFoundError.
|
487
|
-
except HTTPError as e:
|
458
|
+
# `model_info` call must guarded with the above condition.
|
459
|
+
model_files_info = model_info(pretrained_model_name_or_path, revision=revision, token=token)
|
460
|
+
for shard_file in original_shard_filenames:
|
461
|
+
shard_file_present = any(shard_file in k.rfilename for k in model_files_info.siblings)
|
462
|
+
if not shard_file_present:
|
488
463
|
raise EnvironmentError(
|
489
|
-
f"
|
490
|
-
"
|
491
|
-
)
|
464
|
+
f"{shards_path} does not appear to have a file named {shard_file} which is "
|
465
|
+
"required according to the checkpoint index."
|
466
|
+
)
|
492
467
|
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
468
|
+
try:
|
469
|
+
# Load from URL
|
470
|
+
cached_folder = snapshot_download(
|
471
|
+
pretrained_model_name_or_path,
|
472
|
+
cache_dir=cache_dir,
|
473
|
+
proxies=proxies,
|
474
|
+
local_files_only=local_files_only,
|
475
|
+
token=token,
|
476
|
+
revision=revision,
|
477
|
+
allow_patterns=allow_patterns,
|
478
|
+
ignore_patterns=ignore_patterns,
|
479
|
+
user_agent=user_agent,
|
497
480
|
)
|
498
481
|
if subfolder is not None:
|
499
|
-
cached_folder = os.path.join(
|
482
|
+
cached_folder = os.path.join(cached_folder, subfolder)
|
483
|
+
|
484
|
+
# We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so
|
485
|
+
# we don't have to catch them here. We have also dealt with EntryNotFoundError.
|
486
|
+
except HTTPError as e:
|
487
|
+
raise EnvironmentError(
|
488
|
+
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load {pretrained_model_name_or_path}. You should try"
|
489
|
+
" again after checking your internet connection."
|
490
|
+
) from e
|
500
491
|
|
501
492
|
return cached_folder, sharded_metadata
|
502
493
|
|
@@ -564,7 +555,8 @@ class PushToHubMixin:
|
|
564
555
|
commit_message (`str`, *optional*):
|
565
556
|
Message to commit while pushing. Default to `"Upload {object}"`.
|
566
557
|
private (`bool`, *optional*):
|
567
|
-
Whether
|
558
|
+
Whether to make the repo private. If `None` (default), the repo will be public unless the
|
559
|
+
organization's default is private. This value is ignored if the repo already exists.
|
568
560
|
token (`str`, *optional*):
|
569
561
|
The token to use as HTTP bearer authorization for remote files. The token generated when running
|
570
562
|
`huggingface-cli login` (stored in `~/.huggingface`).
|
diffusers/utils/import_utils.py
CHANGED
@@ -339,6 +339,23 @@ if _imageio_available:
|
|
339
339
|
except importlib_metadata.PackageNotFoundError:
|
340
340
|
_imageio_available = False
|
341
341
|
|
342
|
+
_is_gguf_available = importlib.util.find_spec("gguf") is not None
|
343
|
+
if _is_gguf_available:
|
344
|
+
try:
|
345
|
+
_gguf_version = importlib_metadata.version("gguf")
|
346
|
+
logger.debug(f"Successfully import gguf version {_gguf_version}")
|
347
|
+
except importlib_metadata.PackageNotFoundError:
|
348
|
+
_is_gguf_available = False
|
349
|
+
|
350
|
+
|
351
|
+
_is_torchao_available = importlib.util.find_spec("torchao") is not None
|
352
|
+
if _is_torchao_available:
|
353
|
+
try:
|
354
|
+
_torchao_version = importlib_metadata.version("torchao")
|
355
|
+
logger.debug(f"Successfully import torchao version {_torchao_version}")
|
356
|
+
except importlib_metadata.PackageNotFoundError:
|
357
|
+
_is_torchao_available = False
|
358
|
+
|
342
359
|
|
343
360
|
def is_torch_available():
|
344
361
|
return _torch_available
|
@@ -460,6 +477,14 @@ def is_imageio_available():
|
|
460
477
|
return _imageio_available
|
461
478
|
|
462
479
|
|
480
|
+
def is_gguf_available():
|
481
|
+
return _is_gguf_available
|
482
|
+
|
483
|
+
|
484
|
+
def is_torchao_available():
|
485
|
+
return _is_torchao_available
|
486
|
+
|
487
|
+
|
463
488
|
# docstyle-ignore
|
464
489
|
FLAX_IMPORT_ERROR = """
|
465
490
|
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
|
@@ -593,6 +618,16 @@ IMAGEIO_IMPORT_ERROR = """
|
|
593
618
|
{0} requires the imageio library and ffmpeg but it was not found in your environment. You can install it with pip: `pip install imageio imageio-ffmpeg`
|
594
619
|
"""
|
595
620
|
|
621
|
+
# docstyle-ignore
|
622
|
+
GGUF_IMPORT_ERROR = """
|
623
|
+
{0} requires the gguf library but it was not found in your environment. You can install it with pip: `pip install gguf`
|
624
|
+
"""
|
625
|
+
|
626
|
+
TORCHAO_IMPORT_ERROR = """
|
627
|
+
{0} requires the torchao library but it was not found in your environment. You can install it with pip: `pip install
|
628
|
+
torchao`
|
629
|
+
"""
|
630
|
+
|
596
631
|
BACKENDS_MAPPING = OrderedDict(
|
597
632
|
[
|
598
633
|
("bs4", (is_bs4_available, BS4_IMPORT_ERROR)),
|
@@ -618,6 +653,8 @@ BACKENDS_MAPPING = OrderedDict(
|
|
618
653
|
("bitsandbytes", (is_bitsandbytes_available, BITSANDBYTES_IMPORT_ERROR)),
|
619
654
|
("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)),
|
620
655
|
("imageio", (is_imageio_available, IMAGEIO_IMPORT_ERROR)),
|
656
|
+
("gguf", (is_gguf_available, GGUF_IMPORT_ERROR)),
|
657
|
+
("torchao", (is_torchao_available, TORCHAO_IMPORT_ERROR)),
|
621
658
|
]
|
622
659
|
)
|
623
660
|
|
@@ -700,6 +737,21 @@ def is_torch_version(operation: str, version: str):
|
|
700
737
|
return compare_versions(parse(_torch_version), operation, version)
|
701
738
|
|
702
739
|
|
740
|
+
def is_torch_xla_version(operation: str, version: str):
|
741
|
+
"""
|
742
|
+
Compares the current torch_xla version to a given reference with an operation.
|
743
|
+
|
744
|
+
Args:
|
745
|
+
operation (`str`):
|
746
|
+
A string representation of an operator, such as `">"` or `"<="`
|
747
|
+
version (`str`):
|
748
|
+
A string version of torch_xla
|
749
|
+
"""
|
750
|
+
if not is_torch_xla_available:
|
751
|
+
return False
|
752
|
+
return compare_versions(parse(_torch_xla_version), operation, version)
|
753
|
+
|
754
|
+
|
703
755
|
def is_transformers_version(operation: str, version: str):
|
704
756
|
"""
|
705
757
|
Compares the current Transformers version to a given reference with an operation.
|
@@ -759,6 +811,21 @@ def is_bitsandbytes_version(operation: str, version: str):
|
|
759
811
|
return compare_versions(parse(_bitsandbytes_version), operation, version)
|
760
812
|
|
761
813
|
|
814
|
+
def is_gguf_version(operation: str, version: str):
|
815
|
+
"""
|
816
|
+
Compares the current Accelerate version to a given reference with an operation.
|
817
|
+
|
818
|
+
Args:
|
819
|
+
operation (`str`):
|
820
|
+
A string representation of an operator, such as `">"` or `"<="`
|
821
|
+
version (`str`):
|
822
|
+
A version string
|
823
|
+
"""
|
824
|
+
if not _is_gguf_available:
|
825
|
+
return False
|
826
|
+
return compare_versions(parse(_gguf_version), operation, version)
|
827
|
+
|
828
|
+
|
762
829
|
def is_k_diffusion_version(operation: str, version: str):
|
763
830
|
"""
|
764
831
|
Compares the current k-diffusion version to a given reference with an operation.
|
diffusers/utils/peft_utils.py
CHANGED
@@ -180,6 +180,8 @@ def get_peft_kwargs(rank_dict, network_alpha_dict, peft_state_dict, is_unet=True
|
|
180
180
|
# layer names without the Diffusers specific
|
181
181
|
target_modules = list({name.split(".lora")[0] for name in peft_state_dict.keys()})
|
182
182
|
use_dora = any("lora_magnitude_vector" in k for k in peft_state_dict)
|
183
|
+
# for now we know that the "bias" keys are only associated with `lora_B`.
|
184
|
+
lora_bias = any("lora_B" in k and k.endswith(".bias") for k in peft_state_dict)
|
183
185
|
|
184
186
|
lora_config_kwargs = {
|
185
187
|
"r": r,
|
@@ -188,6 +190,7 @@ def get_peft_kwargs(rank_dict, network_alpha_dict, peft_state_dict, is_unet=True
|
|
188
190
|
"alpha_pattern": alpha_pattern,
|
189
191
|
"target_modules": target_modules,
|
190
192
|
"use_dora": use_dora,
|
193
|
+
"lora_bias": lora_bias,
|
191
194
|
}
|
192
195
|
return lora_config_kwargs
|
193
196
|
|
diffusers/utils/testing_utils.py
CHANGED
@@ -32,6 +32,7 @@ from .import_utils import (
|
|
32
32
|
is_bitsandbytes_available,
|
33
33
|
is_compel_available,
|
34
34
|
is_flax_available,
|
35
|
+
is_gguf_available,
|
35
36
|
is_note_seq_available,
|
36
37
|
is_onnx_available,
|
37
38
|
is_opencv_available,
|
@@ -39,6 +40,7 @@ from .import_utils import (
|
|
39
40
|
is_timm_available,
|
40
41
|
is_torch_available,
|
41
42
|
is_torch_version,
|
43
|
+
is_torchao_available,
|
42
44
|
is_torchsde_available,
|
43
45
|
is_transformers_available,
|
44
46
|
)
|
@@ -57,6 +59,7 @@ _required_transformers_version = is_transformers_available() and version.parse(
|
|
57
59
|
) > version.parse("4.33")
|
58
60
|
|
59
61
|
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
|
62
|
+
BIG_GPU_MEMORY = int(os.getenv("BIG_GPU_MEMORY", 40))
|
60
63
|
|
61
64
|
if is_torch_available():
|
62
65
|
import torch
|
@@ -310,6 +313,26 @@ def require_torch_accelerator_with_fp64(test_case):
|
|
310
313
|
)
|
311
314
|
|
312
315
|
|
316
|
+
def require_big_gpu_with_torch_cuda(test_case):
|
317
|
+
"""
|
318
|
+
Decorator marking a test that requires a bigger GPU (24GB) for execution. Some example pipelines: Flux, SD3, Cog,
|
319
|
+
etc.
|
320
|
+
"""
|
321
|
+
if not is_torch_available():
|
322
|
+
return unittest.skip("test requires PyTorch")(test_case)
|
323
|
+
|
324
|
+
import torch
|
325
|
+
|
326
|
+
if not torch.cuda.is_available():
|
327
|
+
return unittest.skip("test requires PyTorch CUDA")(test_case)
|
328
|
+
|
329
|
+
device_properties = torch.cuda.get_device_properties(0)
|
330
|
+
total_memory = device_properties.total_memory / (1024**3)
|
331
|
+
return unittest.skipUnless(
|
332
|
+
total_memory >= BIG_GPU_MEMORY, f"test requires a GPU with at least {BIG_GPU_MEMORY} GB memory"
|
333
|
+
)(test_case)
|
334
|
+
|
335
|
+
|
313
336
|
def require_torch_accelerator_with_training(test_case):
|
314
337
|
"""Decorator marking a test that requires an accelerator with support for training."""
|
315
338
|
return unittest.skipUnless(
|
@@ -352,6 +375,14 @@ def require_note_seq(test_case):
|
|
352
375
|
return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case)
|
353
376
|
|
354
377
|
|
378
|
+
def require_accelerator(test_case):
|
379
|
+
"""
|
380
|
+
Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
|
381
|
+
hardware accelerator available.
|
382
|
+
"""
|
383
|
+
return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case)
|
384
|
+
|
385
|
+
|
355
386
|
def require_torchsde(test_case):
|
356
387
|
"""
|
357
388
|
Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed.
|
@@ -425,7 +456,7 @@ def require_transformers_version_greater(transformers_version):
|
|
425
456
|
|
426
457
|
def require_accelerate_version_greater(accelerate_version):
|
427
458
|
def decorator(test_case):
|
428
|
-
correct_accelerate_version =
|
459
|
+
correct_accelerate_version = is_accelerate_available() and version.parse(
|
429
460
|
version.parse(importlib.metadata.version("accelerate")).base_version
|
430
461
|
) > version.parse(accelerate_version)
|
431
462
|
return unittest.skipUnless(
|
@@ -447,6 +478,30 @@ def require_bitsandbytes_version_greater(bnb_version):
|
|
447
478
|
return decorator
|
448
479
|
|
449
480
|
|
481
|
+
def require_gguf_version_greater_or_equal(gguf_version):
|
482
|
+
def decorator(test_case):
|
483
|
+
correct_gguf_version = is_gguf_available() and version.parse(
|
484
|
+
version.parse(importlib.metadata.version("gguf")).base_version
|
485
|
+
) >= version.parse(gguf_version)
|
486
|
+
return unittest.skipUnless(
|
487
|
+
correct_gguf_version, f"Test requires gguf with the version greater than {gguf_version}."
|
488
|
+
)(test_case)
|
489
|
+
|
490
|
+
return decorator
|
491
|
+
|
492
|
+
|
493
|
+
def require_torchao_version_greater_or_equal(torchao_version):
|
494
|
+
def decorator(test_case):
|
495
|
+
correct_torchao_version = is_torchao_available() and version.parse(
|
496
|
+
version.parse(importlib.metadata.version("torchao")).base_version
|
497
|
+
) >= version.parse(torchao_version)
|
498
|
+
return unittest.skipUnless(
|
499
|
+
correct_torchao_version, f"Test requires torchao with version greater than {torchao_version}."
|
500
|
+
)(test_case)
|
501
|
+
|
502
|
+
return decorator
|
503
|
+
|
504
|
+
|
450
505
|
def deprecate_after_peft_backend(test_case):
|
451
506
|
"""
|
452
507
|
Decorator marking a test that will be skipped after PEFT backend
|
diffusers/utils/torch_utils.py
CHANGED
@@ -102,6 +102,9 @@ def fourier_filter(x_in: "torch.Tensor", threshold: int, scale: int) -> "torch.T
|
|
102
102
|
# Non-power of 2 images must be float32
|
103
103
|
if (W & (W - 1)) != 0 or (H & (H - 1)) != 0:
|
104
104
|
x = x.to(dtype=torch.float32)
|
105
|
+
# fftn does not support bfloat16
|
106
|
+
elif x.dtype == torch.bfloat16:
|
107
|
+
x = x.to(dtype=torch.float32)
|
105
108
|
|
106
109
|
# FFT
|
107
110
|
x_freq = fftn(x, dim=(-2, -1))
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: diffusers
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.32.1
|
4
4
|
Summary: State-of-the-art diffusion in PyTorch and JAX.
|
5
5
|
Home-page: https://github.com/huggingface/diffusers
|
6
6
|
Author: The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/diffusers/graphs/contributors)
|
@@ -55,7 +55,7 @@ Requires-Dist: accelerate>=0.31.0; extra == "dev"
|
|
55
55
|
Requires-Dist: protobuf<4,>=3.20.3; extra == "dev"
|
56
56
|
Requires-Dist: tensorboard; extra == "dev"
|
57
57
|
Requires-Dist: peft>=0.6.0; extra == "dev"
|
58
|
-
Requires-Dist: torch
|
58
|
+
Requires-Dist: torch>=1.4; extra == "dev"
|
59
59
|
Requires-Dist: jax>=0.4.1; extra == "dev"
|
60
60
|
Requires-Dist: jaxlib>=0.4.1; extra == "dev"
|
61
61
|
Requires-Dist: flax>=0.4.1; extra == "dev"
|
@@ -89,7 +89,7 @@ Requires-Dist: scipy; extra == "test"
|
|
89
89
|
Requires-Dist: torchvision; extra == "test"
|
90
90
|
Requires-Dist: transformers>=4.41.2; extra == "test"
|
91
91
|
Provides-Extra: torch
|
92
|
-
Requires-Dist: torch
|
92
|
+
Requires-Dist: torch>=1.4; extra == "torch"
|
93
93
|
Requires-Dist: accelerate>=0.31.0; extra == "torch"
|
94
94
|
Provides-Extra: training
|
95
95
|
Requires-Dist: accelerate>=0.31.0; extra == "training"
|
@@ -213,9 +213,9 @@ Check out the [Quickstart](https://huggingface.co/docs/diffusers/quicktour) to l
|
|
213
213
|
| **Documentation** | **What can I learn?** |
|
214
214
|
|---------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
215
215
|
| [Tutorial](https://huggingface.co/docs/diffusers/tutorials/tutorial_overview) | A basic crash course for learning how to use the library's most important features like using models and schedulers to build your own diffusion system, and training your own diffusion model. |
|
216
|
-
| [Loading](https://huggingface.co/docs/diffusers/using-diffusers/
|
217
|
-
| [Pipelines for inference](https://huggingface.co/docs/diffusers/using-diffusers/
|
218
|
-
| [Optimization](https://huggingface.co/docs/diffusers/optimization/
|
216
|
+
| [Loading](https://huggingface.co/docs/diffusers/using-diffusers/loading) | Guides for how to load and configure all the components (pipelines, models, and schedulers) of the library, as well as how to use different schedulers. |
|
217
|
+
| [Pipelines for inference](https://huggingface.co/docs/diffusers/using-diffusers/overview_techniques) | Guides for how to use pipelines for different inference tasks, batched generation, controlling generated outputs and randomness, and how to contribute a pipeline to the library. |
|
218
|
+
| [Optimization](https://huggingface.co/docs/diffusers/optimization/fp16) | Guides for how to optimize your diffusion model to run faster and consume less memory. |
|
219
219
|
| [Training](https://huggingface.co/docs/diffusers/training/overview) | Guides for how to train a diffusion model for different tasks with different training techniques. |
|
220
220
|
## Contribution
|
221
221
|
|