diffusers 0.31.0__py3-none-any.whl → 0.32.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (214) hide show
  1. diffusers/__init__.py +66 -5
  2. diffusers/callbacks.py +56 -3
  3. diffusers/configuration_utils.py +1 -1
  4. diffusers/dependency_versions_table.py +1 -1
  5. diffusers/image_processor.py +25 -17
  6. diffusers/loaders/__init__.py +22 -3
  7. diffusers/loaders/ip_adapter.py +538 -15
  8. diffusers/loaders/lora_base.py +124 -118
  9. diffusers/loaders/lora_conversion_utils.py +318 -3
  10. diffusers/loaders/lora_pipeline.py +1688 -368
  11. diffusers/loaders/peft.py +379 -0
  12. diffusers/loaders/single_file_model.py +71 -4
  13. diffusers/loaders/single_file_utils.py +519 -9
  14. diffusers/loaders/textual_inversion.py +3 -3
  15. diffusers/loaders/transformer_flux.py +181 -0
  16. diffusers/loaders/transformer_sd3.py +89 -0
  17. diffusers/loaders/unet.py +17 -4
  18. diffusers/models/__init__.py +47 -14
  19. diffusers/models/activations.py +22 -9
  20. diffusers/models/attention.py +13 -4
  21. diffusers/models/attention_flax.py +1 -1
  22. diffusers/models/attention_processor.py +2059 -281
  23. diffusers/models/autoencoders/__init__.py +5 -0
  24. diffusers/models/autoencoders/autoencoder_dc.py +620 -0
  25. diffusers/models/autoencoders/autoencoder_kl.py +2 -1
  26. diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
  27. diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +36 -27
  28. diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
  29. diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
  30. diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
  31. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +3 -10
  32. diffusers/models/autoencoders/autoencoder_tiny.py +4 -2
  33. diffusers/models/autoencoders/vae.py +18 -5
  34. diffusers/models/controlnet.py +47 -802
  35. diffusers/models/controlnet_flux.py +29 -495
  36. diffusers/models/controlnet_sd3.py +25 -379
  37. diffusers/models/controlnet_sparsectrl.py +46 -718
  38. diffusers/models/controlnets/__init__.py +23 -0
  39. diffusers/models/controlnets/controlnet.py +872 -0
  40. diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +5 -5
  41. diffusers/models/controlnets/controlnet_flux.py +536 -0
  42. diffusers/models/{controlnet_hunyuan.py → controlnets/controlnet_hunyuan.py} +7 -7
  43. diffusers/models/controlnets/controlnet_sd3.py +489 -0
  44. diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
  45. diffusers/models/controlnets/controlnet_union.py +832 -0
  46. diffusers/models/{controlnet_xs.py → controlnets/controlnet_xs.py} +14 -13
  47. diffusers/models/controlnets/multicontrolnet.py +183 -0
  48. diffusers/models/embeddings.py +838 -43
  49. diffusers/models/model_loading_utils.py +88 -6
  50. diffusers/models/modeling_flax_utils.py +1 -1
  51. diffusers/models/modeling_utils.py +74 -28
  52. diffusers/models/normalization.py +78 -13
  53. diffusers/models/transformers/__init__.py +5 -0
  54. diffusers/models/transformers/auraflow_transformer_2d.py +2 -2
  55. diffusers/models/transformers/cogvideox_transformer_3d.py +46 -11
  56. diffusers/models/transformers/dit_transformer_2d.py +1 -1
  57. diffusers/models/transformers/latte_transformer_3d.py +4 -4
  58. diffusers/models/transformers/pixart_transformer_2d.py +1 -1
  59. diffusers/models/transformers/sana_transformer.py +488 -0
  60. diffusers/models/transformers/stable_audio_transformer.py +1 -1
  61. diffusers/models/transformers/transformer_2d.py +1 -1
  62. diffusers/models/transformers/transformer_allegro.py +422 -0
  63. diffusers/models/transformers/transformer_cogview3plus.py +1 -1
  64. diffusers/models/transformers/transformer_flux.py +30 -9
  65. diffusers/models/transformers/transformer_hunyuan_video.py +789 -0
  66. diffusers/models/transformers/transformer_ltx.py +469 -0
  67. diffusers/models/transformers/transformer_mochi.py +499 -0
  68. diffusers/models/transformers/transformer_sd3.py +105 -17
  69. diffusers/models/transformers/transformer_temporal.py +1 -1
  70. diffusers/models/unets/unet_1d_blocks.py +1 -1
  71. diffusers/models/unets/unet_2d.py +8 -1
  72. diffusers/models/unets/unet_2d_blocks.py +88 -21
  73. diffusers/models/unets/unet_2d_condition.py +1 -1
  74. diffusers/models/unets/unet_3d_blocks.py +9 -7
  75. diffusers/models/unets/unet_motion_model.py +5 -5
  76. diffusers/models/unets/unet_spatio_temporal_condition.py +23 -0
  77. diffusers/models/unets/unet_stable_cascade.py +2 -2
  78. diffusers/models/unets/uvit_2d.py +1 -1
  79. diffusers/models/upsampling.py +8 -0
  80. diffusers/pipelines/__init__.py +34 -0
  81. diffusers/pipelines/allegro/__init__.py +48 -0
  82. diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
  83. diffusers/pipelines/allegro/pipeline_output.py +23 -0
  84. diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +8 -2
  85. diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1 -1
  86. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +0 -6
  87. diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +8 -8
  88. diffusers/pipelines/audioldm2/modeling_audioldm2.py +3 -3
  89. diffusers/pipelines/aura_flow/pipeline_aura_flow.py +1 -8
  90. diffusers/pipelines/auto_pipeline.py +53 -6
  91. diffusers/pipelines/blip_diffusion/modeling_blip2.py +1 -1
  92. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +50 -22
  93. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +51 -20
  94. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +69 -21
  95. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +47 -21
  96. diffusers/pipelines/cogview3/pipeline_cogview3plus.py +1 -1
  97. diffusers/pipelines/controlnet/__init__.py +86 -80
  98. diffusers/pipelines/controlnet/multicontrolnet.py +7 -178
  99. diffusers/pipelines/controlnet/pipeline_controlnet.py +11 -2
  100. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +1 -2
  101. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +1 -2
  102. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +1 -2
  103. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +3 -3
  104. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +1 -3
  105. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
  106. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
  107. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
  108. diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +5 -1
  109. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +53 -19
  110. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +7 -7
  111. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +31 -8
  112. diffusers/pipelines/flux/__init__.py +13 -1
  113. diffusers/pipelines/flux/modeling_flux.py +47 -0
  114. diffusers/pipelines/flux/pipeline_flux.py +204 -29
  115. diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
  116. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
  117. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
  118. diffusers/pipelines/flux/pipeline_flux_controlnet.py +49 -27
  119. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +40 -30
  120. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +78 -56
  121. diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
  122. diffusers/pipelines/flux/pipeline_flux_img2img.py +33 -27
  123. diffusers/pipelines/flux/pipeline_flux_inpaint.py +36 -29
  124. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
  125. diffusers/pipelines/flux/pipeline_output.py +16 -0
  126. diffusers/pipelines/hunyuan_video/__init__.py +48 -0
  127. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
  128. diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
  129. diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +5 -1
  130. diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +9 -9
  131. diffusers/pipelines/kolors/text_encoder.py +2 -2
  132. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -1
  133. diffusers/pipelines/ltx/__init__.py +50 -0
  134. diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
  135. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
  136. diffusers/pipelines/ltx/pipeline_output.py +20 -0
  137. diffusers/pipelines/lumina/pipeline_lumina.py +1 -8
  138. diffusers/pipelines/mochi/__init__.py +48 -0
  139. diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
  140. diffusers/pipelines/mochi/pipeline_output.py +20 -0
  141. diffusers/pipelines/pag/__init__.py +7 -0
  142. diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1 -2
  143. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1 -2
  144. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1 -3
  145. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1 -3
  146. diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +5 -1
  147. diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +6 -13
  148. diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
  149. diffusers/pipelines/pag/pipeline_pag_sd_3.py +6 -6
  150. diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
  151. diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +3 -0
  152. diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
  153. diffusers/pipelines/pipeline_flax_utils.py +1 -1
  154. diffusers/pipelines/pipeline_loading_utils.py +25 -4
  155. diffusers/pipelines/pipeline_utils.py +35 -6
  156. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +6 -13
  157. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +6 -13
  158. diffusers/pipelines/sana/__init__.py +47 -0
  159. diffusers/pipelines/sana/pipeline_output.py +21 -0
  160. diffusers/pipelines/sana/pipeline_sana.py +884 -0
  161. diffusers/pipelines/stable_audio/pipeline_stable_audio.py +12 -1
  162. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +18 -3
  163. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +216 -20
  164. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +62 -9
  165. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +57 -8
  166. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +11 -1
  167. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +0 -8
  168. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +0 -8
  169. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +0 -8
  170. diffusers/pipelines/unidiffuser/modeling_uvit.py +2 -2
  171. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +1 -1
  172. diffusers/quantizers/auto.py +14 -1
  173. diffusers/quantizers/bitsandbytes/bnb_quantizer.py +4 -1
  174. diffusers/quantizers/gguf/__init__.py +1 -0
  175. diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
  176. diffusers/quantizers/gguf/utils.py +456 -0
  177. diffusers/quantizers/quantization_config.py +280 -2
  178. diffusers/quantizers/torchao/__init__.py +15 -0
  179. diffusers/quantizers/torchao/torchao_quantizer.py +285 -0
  180. diffusers/schedulers/scheduling_ddpm.py +2 -6
  181. diffusers/schedulers/scheduling_ddpm_parallel.py +2 -6
  182. diffusers/schedulers/scheduling_deis_multistep.py +28 -9
  183. diffusers/schedulers/scheduling_dpmsolver_multistep.py +35 -9
  184. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +35 -8
  185. diffusers/schedulers/scheduling_dpmsolver_sde.py +4 -4
  186. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +48 -10
  187. diffusers/schedulers/scheduling_euler_discrete.py +4 -4
  188. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +153 -6
  189. diffusers/schedulers/scheduling_heun_discrete.py +4 -4
  190. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +4 -4
  191. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +4 -4
  192. diffusers/schedulers/scheduling_lcm.py +2 -6
  193. diffusers/schedulers/scheduling_lms_discrete.py +4 -4
  194. diffusers/schedulers/scheduling_repaint.py +1 -1
  195. diffusers/schedulers/scheduling_sasolver.py +28 -9
  196. diffusers/schedulers/scheduling_tcd.py +2 -6
  197. diffusers/schedulers/scheduling_unipc_multistep.py +53 -8
  198. diffusers/training_utils.py +16 -2
  199. diffusers/utils/__init__.py +5 -0
  200. diffusers/utils/constants.py +1 -0
  201. diffusers/utils/dummy_pt_objects.py +180 -0
  202. diffusers/utils/dummy_torch_and_transformers_objects.py +270 -0
  203. diffusers/utils/dynamic_modules_utils.py +3 -3
  204. diffusers/utils/hub_utils.py +31 -39
  205. diffusers/utils/import_utils.py +67 -0
  206. diffusers/utils/peft_utils.py +3 -0
  207. diffusers/utils/testing_utils.py +56 -1
  208. diffusers/utils/torch_utils.py +3 -0
  209. {diffusers-0.31.0.dist-info → diffusers-0.32.0.dist-info}/METADATA +69 -69
  210. {diffusers-0.31.0.dist-info → diffusers-0.32.0.dist-info}/RECORD +214 -162
  211. {diffusers-0.31.0.dist-info → diffusers-0.32.0.dist-info}/WHEEL +1 -1
  212. {diffusers-0.31.0.dist-info → diffusers-0.32.0.dist-info}/LICENSE +0 -0
  213. {diffusers-0.31.0.dist-info → diffusers-0.32.0.dist-info}/entry_points.txt +0 -0
  214. {diffusers-0.31.0.dist-info → diffusers-0.32.0.dist-info}/top_level.txt +0 -0
@@ -22,15 +22,17 @@ https://github.com/huggingface/transformers/blob/52cb4034ada381fe1ffe8d428a1076e
22
22
 
23
23
  import copy
24
24
  import importlib.metadata
25
+ import inspect
25
26
  import json
26
27
  import os
27
28
  from dataclasses import dataclass
28
29
  from enum import Enum
29
- from typing import Any, Dict, Union
30
+ from functools import partial
31
+ from typing import Any, Dict, List, Optional, Union
30
32
 
31
33
  from packaging import version
32
34
 
33
- from ..utils import is_torch_available, logging
35
+ from ..utils import is_torch_available, is_torchao_available, logging
34
36
 
35
37
 
36
38
  if is_torch_available():
@@ -41,6 +43,8 @@ logger = logging.get_logger(__name__)
41
43
 
42
44
  class QuantizationMethod(str, Enum):
43
45
  BITS_AND_BYTES = "bitsandbytes"
46
+ GGUF = "gguf"
47
+ TORCHAO = "torchao"
44
48
 
45
49
 
46
50
  @dataclass
@@ -389,3 +393,277 @@ class BitsAndBytesConfig(QuantizationConfigMixin):
389
393
  serializable_config_dict[key] = value
390
394
 
391
395
  return serializable_config_dict
396
+
397
+
398
+ @dataclass
399
+ class GGUFQuantizationConfig(QuantizationConfigMixin):
400
+ """This is a config class for GGUF Quantization techniques.
401
+
402
+ Args:
403
+ compute_dtype: (`torch.dtype`, defaults to `torch.float32`):
404
+ This sets the computational type which might be different than the input type. For example, inputs might be
405
+ fp32, but computation can be set to bf16 for speedups.
406
+
407
+ """
408
+
409
+ def __init__(self, compute_dtype: Optional["torch.dtype"] = None):
410
+ self.quant_method = QuantizationMethod.GGUF
411
+ self.compute_dtype = compute_dtype
412
+ self.pre_quantized = True
413
+
414
+ # TODO: (Dhruv) Add this as an init argument when we can support loading unquantized checkpoints.
415
+ self.modules_to_not_convert = None
416
+
417
+ if self.compute_dtype is None:
418
+ self.compute_dtype = torch.float32
419
+
420
+
421
+ @dataclass
422
+ class TorchAoConfig(QuantizationConfigMixin):
423
+ """This is a config class for torchao quantization/sparsity techniques.
424
+
425
+ Args:
426
+ quant_type (`str`):
427
+ The type of quantization we want to use, currently supporting:
428
+ - **Integer quantization:**
429
+ - Full function names: `int4_weight_only`, `int8_dynamic_activation_int4_weight`,
430
+ `int8_weight_only`, `int8_dynamic_activation_int8_weight`
431
+ - Shorthands: `int4wo`, `int4dq`, `int8wo`, `int8dq`
432
+
433
+ - **Floating point 8-bit quantization:**
434
+ - Full function names: `float8_weight_only`, `float8_dynamic_activation_float8_weight`,
435
+ `float8_static_activation_float8_weight`
436
+ - Shorthands: `float8wo`, `float8wo_e5m2`, `float8wo_e4m3`, `float8dq`, `float8dq_e4m3`,
437
+ `float8_e4m3_tensor`, `float8_e4m3_row`,
438
+
439
+ - **Floating point X-bit quantization:**
440
+ - Full function names: `fpx_weight_only`
441
+ - Shorthands: `fpX_eAwB`, where `X` is the number of bits (between `1` to `7`), `A` is the number
442
+ of exponent bits and `B` is the number of mantissa bits. The constraint of `X == A + B + 1` must
443
+ be satisfied for a given shorthand notation.
444
+
445
+ - **Unsigned Integer quantization:**
446
+ - Full function names: `uintx_weight_only`
447
+ - Shorthands: `uint1wo`, `uint2wo`, `uint3wo`, `uint4wo`, `uint5wo`, `uint6wo`, `uint7wo`
448
+ modules_to_not_convert (`List[str]`, *optional*, default to `None`):
449
+ The list of modules to not quantize, useful for quantizing models that explicitly require to have some
450
+ modules left in their original precision.
451
+ kwargs (`Dict[str, Any]`, *optional*):
452
+ The keyword arguments for the chosen type of quantization, for example, int4_weight_only quantization
453
+ supports two keyword arguments `group_size` and `inner_k_tiles` currently. More API examples and
454
+ documentation of arguments can be found in
455
+ https://github.com/pytorch/ao/tree/main/torchao/quantization#other-available-quantization-techniques
456
+
457
+ Example:
458
+ ```python
459
+ from diffusers import FluxTransformer2DModel, TorchAoConfig
460
+
461
+ quantization_config = TorchAoConfig("int8wo")
462
+ transformer = FluxTransformer2DModel.from_pretrained(
463
+ "black-forest-labs/Flux.1-Dev",
464
+ subfolder="transformer",
465
+ quantization_config=quantization_config,
466
+ torch_dtype=torch.bfloat16,
467
+ )
468
+ ```
469
+ """
470
+
471
+ def __init__(self, quant_type: str, modules_to_not_convert: Optional[List[str]] = None, **kwargs) -> None:
472
+ self.quant_method = QuantizationMethod.TORCHAO
473
+ self.quant_type = quant_type
474
+ self.modules_to_not_convert = modules_to_not_convert
475
+
476
+ # When we load from serialized config, "quant_type_kwargs" will be the key
477
+ if "quant_type_kwargs" in kwargs:
478
+ self.quant_type_kwargs = kwargs["quant_type_kwargs"]
479
+ else:
480
+ self.quant_type_kwargs = kwargs
481
+
482
+ TORCHAO_QUANT_TYPE_METHODS = self._get_torchao_quant_type_to_method()
483
+ if self.quant_type not in TORCHAO_QUANT_TYPE_METHODS.keys():
484
+ raise ValueError(
485
+ f"Requested quantization type: {self.quant_type} is not supported yet or is incorrect. If you think the "
486
+ f"provided quantization type should be supported, please open an issue at https://github.com/huggingface/diffusers/issues."
487
+ )
488
+
489
+ method = TORCHAO_QUANT_TYPE_METHODS[self.quant_type]
490
+ signature = inspect.signature(method)
491
+ all_kwargs = {
492
+ param.name
493
+ for param in signature.parameters.values()
494
+ if param.kind in [inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD]
495
+ }
496
+ unsupported_kwargs = list(self.quant_type_kwargs.keys() - all_kwargs)
497
+
498
+ if len(unsupported_kwargs) > 0:
499
+ raise ValueError(
500
+ f'The quantization method "{quant_type}" does not support the following keyword arguments: '
501
+ f"{unsupported_kwargs}. The following keywords arguments are supported: {all_kwargs}."
502
+ )
503
+
504
+ @classmethod
505
+ def _get_torchao_quant_type_to_method(cls):
506
+ r"""
507
+ Returns supported torchao quantization types with all commonly used notations.
508
+ """
509
+
510
+ if is_torchao_available():
511
+ # TODO(aryan): Support autoquant and sparsify
512
+ from torchao.quantization import (
513
+ float8_dynamic_activation_float8_weight,
514
+ float8_static_activation_float8_weight,
515
+ float8_weight_only,
516
+ fpx_weight_only,
517
+ int4_weight_only,
518
+ int8_dynamic_activation_int4_weight,
519
+ int8_dynamic_activation_int8_weight,
520
+ int8_weight_only,
521
+ uintx_weight_only,
522
+ )
523
+
524
+ # TODO(aryan): Add a note on how to use PerAxis and PerGroup observers
525
+ from torchao.quantization.observer import PerRow, PerTensor
526
+
527
+ def generate_float8dq_types(dtype: torch.dtype):
528
+ name = "e5m2" if dtype == torch.float8_e5m2 else "e4m3"
529
+ types = {}
530
+
531
+ for granularity_cls in [PerTensor, PerRow]:
532
+ # Note: Activation and Weights cannot have different granularities
533
+ granularity_name = "tensor" if granularity_cls is PerTensor else "row"
534
+ types[f"float8dq_{name}_{granularity_name}"] = partial(
535
+ float8_dynamic_activation_float8_weight,
536
+ activation_dtype=dtype,
537
+ weight_dtype=dtype,
538
+ granularity=(granularity_cls(), granularity_cls()),
539
+ )
540
+
541
+ return types
542
+
543
+ def generate_fpx_quantization_types(bits: int):
544
+ types = {}
545
+
546
+ for ebits in range(1, bits):
547
+ mbits = bits - ebits - 1
548
+ types[f"fp{bits}_e{ebits}m{mbits}"] = partial(fpx_weight_only, ebits=ebits, mbits=mbits)
549
+
550
+ non_sign_bits = bits - 1
551
+ default_ebits = (non_sign_bits + 1) // 2
552
+ default_mbits = non_sign_bits - default_ebits
553
+ types[f"fp{bits}"] = partial(fpx_weight_only, ebits=default_ebits, mbits=default_mbits)
554
+
555
+ return types
556
+
557
+ INT4_QUANTIZATION_TYPES = {
558
+ # int4 weight + bfloat16/float16 activation
559
+ "int4wo": int4_weight_only,
560
+ "int4_weight_only": int4_weight_only,
561
+ # int4 weight + int8 activation
562
+ "int4dq": int8_dynamic_activation_int4_weight,
563
+ "int8_dynamic_activation_int4_weight": int8_dynamic_activation_int4_weight,
564
+ }
565
+
566
+ INT8_QUANTIZATION_TYPES = {
567
+ # int8 weight + bfloat16/float16 activation
568
+ "int8wo": int8_weight_only,
569
+ "int8_weight_only": int8_weight_only,
570
+ # int8 weight + int8 activation
571
+ "int8dq": int8_dynamic_activation_int8_weight,
572
+ "int8_dynamic_activation_int8_weight": int8_dynamic_activation_int8_weight,
573
+ }
574
+
575
+ # TODO(aryan): handle torch 2.2/2.3
576
+ FLOATX_QUANTIZATION_TYPES = {
577
+ # float8_e5m2 weight + bfloat16/float16 activation
578
+ "float8wo": partial(float8_weight_only, weight_dtype=torch.float8_e5m2),
579
+ "float8_weight_only": float8_weight_only,
580
+ "float8wo_e5m2": partial(float8_weight_only, weight_dtype=torch.float8_e5m2),
581
+ # float8_e4m3 weight + bfloat16/float16 activation
582
+ "float8wo_e4m3": partial(float8_weight_only, weight_dtype=torch.float8_e4m3fn),
583
+ # float8_e5m2 weight + float8 activation (dynamic)
584
+ "float8dq": float8_dynamic_activation_float8_weight,
585
+ "float8_dynamic_activation_float8_weight": float8_dynamic_activation_float8_weight,
586
+ # ===== Matrix multiplication is not supported in float8_e5m2 so the following errors out.
587
+ # However, changing activation_dtype=torch.float8_e4m3 might work here =====
588
+ # "float8dq_e5m2": partial(
589
+ # float8_dynamic_activation_float8_weight,
590
+ # activation_dtype=torch.float8_e5m2,
591
+ # weight_dtype=torch.float8_e5m2,
592
+ # ),
593
+ # **generate_float8dq_types(torch.float8_e5m2),
594
+ # ===== =====
595
+ # float8_e4m3 weight + float8 activation (dynamic)
596
+ "float8dq_e4m3": partial(
597
+ float8_dynamic_activation_float8_weight,
598
+ activation_dtype=torch.float8_e4m3fn,
599
+ weight_dtype=torch.float8_e4m3fn,
600
+ ),
601
+ **generate_float8dq_types(torch.float8_e4m3fn),
602
+ # float8 weight + float8 activation (static)
603
+ "float8_static_activation_float8_weight": float8_static_activation_float8_weight,
604
+ # For fpx, only x <= 8 is supported by default. Other dtypes can be explored by users directly
605
+ # fpx weight + bfloat16/float16 activation
606
+ **generate_fpx_quantization_types(3),
607
+ **generate_fpx_quantization_types(4),
608
+ **generate_fpx_quantization_types(5),
609
+ **generate_fpx_quantization_types(6),
610
+ **generate_fpx_quantization_types(7),
611
+ }
612
+
613
+ UINTX_QUANTIZATION_DTYPES = {
614
+ "uintx_weight_only": uintx_weight_only,
615
+ "uint1wo": partial(uintx_weight_only, dtype=torch.uint1),
616
+ "uint2wo": partial(uintx_weight_only, dtype=torch.uint2),
617
+ "uint3wo": partial(uintx_weight_only, dtype=torch.uint3),
618
+ "uint4wo": partial(uintx_weight_only, dtype=torch.uint4),
619
+ "uint5wo": partial(uintx_weight_only, dtype=torch.uint5),
620
+ "uint6wo": partial(uintx_weight_only, dtype=torch.uint6),
621
+ "uint7wo": partial(uintx_weight_only, dtype=torch.uint7),
622
+ # "uint8wo": partial(uintx_weight_only, dtype=torch.uint8), # uint8 quantization is not supported
623
+ }
624
+
625
+ QUANTIZATION_TYPES = {}
626
+ QUANTIZATION_TYPES.update(INT4_QUANTIZATION_TYPES)
627
+ QUANTIZATION_TYPES.update(INT8_QUANTIZATION_TYPES)
628
+ QUANTIZATION_TYPES.update(UINTX_QUANTIZATION_DTYPES)
629
+
630
+ if cls._is_cuda_capability_atleast_8_9():
631
+ QUANTIZATION_TYPES.update(FLOATX_QUANTIZATION_TYPES)
632
+
633
+ return QUANTIZATION_TYPES
634
+ else:
635
+ raise ValueError(
636
+ "TorchAoConfig requires torchao to be installed, please install with `pip install torchao`"
637
+ )
638
+
639
+ @staticmethod
640
+ def _is_cuda_capability_atleast_8_9() -> bool:
641
+ if not torch.cuda.is_available():
642
+ raise RuntimeError("TorchAO requires a CUDA compatible GPU and installation of PyTorch.")
643
+
644
+ major, minor = torch.cuda.get_device_capability()
645
+ if major == 8:
646
+ return minor >= 9
647
+ return major >= 9
648
+
649
+ def get_apply_tensor_subclass(self):
650
+ TORCHAO_QUANT_TYPE_METHODS = self._get_torchao_quant_type_to_method()
651
+ return TORCHAO_QUANT_TYPE_METHODS[self.quant_type](**self.quant_type_kwargs)
652
+
653
+ def __repr__(self):
654
+ r"""
655
+ Example of how this looks for `TorchAoConfig("uint_a16w4", group_size=32)`:
656
+
657
+ ```
658
+ TorchAoConfig {
659
+ "modules_to_not_convert": null,
660
+ "quant_method": "torchao",
661
+ "quant_type": "uint_a16w4",
662
+ "quant_type_kwargs": {
663
+ "group_size": 32
664
+ }
665
+ }
666
+ ```
667
+ """
668
+ config_dict = self.to_dict()
669
+ return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n"
@@ -0,0 +1,15 @@
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .torchao_quantizer import TorchAoHfQuantizer
@@ -0,0 +1,285 @@
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Adapted from
17
+ https://github.com/huggingface/transformers/blob/3a8eb74668e9c2cc563b2f5c62fac174797063e0/src/transformers/quantizers/quantizer_torchao.py
18
+ """
19
+
20
+ import importlib
21
+ import types
22
+ from typing import TYPE_CHECKING, Any, Dict, List, Union
23
+
24
+ from packaging import version
25
+
26
+ from ...utils import get_module_from_name, is_torch_available, is_torchao_available, logging
27
+ from ..base import DiffusersQuantizer
28
+
29
+
30
+ if TYPE_CHECKING:
31
+ from ...models.modeling_utils import ModelMixin
32
+
33
+
34
+ if is_torch_available():
35
+ import torch
36
+ import torch.nn as nn
37
+
38
+ SUPPORTED_TORCH_DTYPES_FOR_QUANTIZATION = (
39
+ # At the moment, only int8 is supported for integer quantization dtypes.
40
+ # In Torch 2.6, int1-int7 will be introduced, so this can be visited in the future
41
+ # to support more quantization methods, such as intx_weight_only.
42
+ torch.int8,
43
+ torch.float8_e4m3fn,
44
+ torch.float8_e5m2,
45
+ torch.uint1,
46
+ torch.uint2,
47
+ torch.uint3,
48
+ torch.uint4,
49
+ torch.uint5,
50
+ torch.uint6,
51
+ torch.uint7,
52
+ )
53
+
54
+ if is_torchao_available():
55
+ from torchao.quantization import quantize_
56
+
57
+
58
+ logger = logging.get_logger(__name__)
59
+
60
+
61
+ def _quantization_type(weight):
62
+ from torchao.dtypes import AffineQuantizedTensor
63
+ from torchao.quantization.linear_activation_quantized_tensor import LinearActivationQuantizedTensor
64
+
65
+ if isinstance(weight, AffineQuantizedTensor):
66
+ return f"{weight.__class__.__name__}({weight._quantization_type()})"
67
+
68
+ if isinstance(weight, LinearActivationQuantizedTensor):
69
+ return f"{weight.__class__.__name__}(activation={weight.input_quant_func}, weight={_quantization_type(weight.original_weight_tensor)})"
70
+
71
+
72
+ def _linear_extra_repr(self):
73
+ weight = _quantization_type(self.weight)
74
+ if weight is None:
75
+ return f"in_features={self.weight.shape[1]}, out_features={self.weight.shape[0]}, weight=None"
76
+ else:
77
+ return f"in_features={self.weight.shape[1]}, out_features={self.weight.shape[0]}, weight={weight}"
78
+
79
+
80
+ class TorchAoHfQuantizer(DiffusersQuantizer):
81
+ r"""
82
+ Diffusers Quantizer for TorchAO: https://github.com/pytorch/ao/.
83
+ """
84
+
85
+ requires_calibration = False
86
+ required_packages = ["torchao"]
87
+
88
+ def __init__(self, quantization_config, **kwargs):
89
+ super().__init__(quantization_config, **kwargs)
90
+
91
+ def validate_environment(self, *args, **kwargs):
92
+ if not is_torchao_available():
93
+ raise ImportError(
94
+ "Loading a TorchAO quantized model requires the torchao library. Please install with `pip install torchao`"
95
+ )
96
+ torchao_version = version.parse(importlib.metadata.version("torch"))
97
+ if torchao_version < version.parse("0.7.0"):
98
+ raise RuntimeError(
99
+ f"The minimum required version of `torchao` is 0.7.0, but the current version is {torchao_version}. Please upgrade with `pip install -U torchao`."
100
+ )
101
+
102
+ self.offload = False
103
+
104
+ device_map = kwargs.get("device_map", None)
105
+ if isinstance(device_map, dict):
106
+ if "cpu" in device_map.values() or "disk" in device_map.values():
107
+ if self.pre_quantized:
108
+ raise ValueError(
109
+ "You are attempting to perform cpu/disk offload with a pre-quantized torchao model "
110
+ "This is not supported yet. Please remove the CPU or disk device from the `device_map` argument."
111
+ )
112
+ else:
113
+ self.offload = True
114
+
115
+ if self.pre_quantized:
116
+ weights_only = kwargs.get("weights_only", None)
117
+ if weights_only:
118
+ torch_version = version.parse(importlib.metadata.version("torch"))
119
+ if torch_version < version.parse("2.5.0"):
120
+ # TODO(aryan): TorchAO is compatible with Pytorch >= 2.2 for certain quantization types. Try to see if we can support it in future
121
+ raise RuntimeError(
122
+ f"In order to use TorchAO pre-quantized model, you need to have torch>=2.5.0. However, the current version is {torch_version}."
123
+ )
124
+
125
+ def update_torch_dtype(self, torch_dtype):
126
+ quant_type = self.quantization_config.quant_type
127
+
128
+ if quant_type.startswith("int"):
129
+ if torch_dtype is not None and torch_dtype != torch.bfloat16:
130
+ logger.warning(
131
+ f"You are trying to set torch_dtype to {torch_dtype} for int4/int8/uintx quantization, but "
132
+ f"only bfloat16 is supported right now. Please set `torch_dtype=torch.bfloat16`."
133
+ )
134
+
135
+ if torch_dtype is None:
136
+ # We need to set the torch_dtype, otherwise we have dtype mismatch when performing the quantized linear op
137
+ logger.warning(
138
+ "Overriding `torch_dtype` with `torch_dtype=torch.bfloat16` due to requirements of `torchao` "
139
+ "to enable model loading in different precisions. Pass your own `torch_dtype` to specify the "
140
+ "dtype of the remaining non-linear layers, or pass torch_dtype=torch.bfloat16, to remove this warning."
141
+ )
142
+ torch_dtype = torch.bfloat16
143
+
144
+ return torch_dtype
145
+
146
+ def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
147
+ quant_type = self.quantization_config.quant_type
148
+
149
+ if quant_type.startswith("int8") or quant_type.startswith("int4"):
150
+ # Note that int4 weights are created by packing into torch.int8, but since there is no torch.int4, we use torch.int8
151
+ return torch.int8
152
+ elif quant_type == "uintx_weight_only":
153
+ return self.quantization_config.quant_type_kwargs.get("dtype", torch.uint8)
154
+ elif quant_type.startswith("uint"):
155
+ return {
156
+ 1: torch.uint1,
157
+ 2: torch.uint2,
158
+ 3: torch.uint3,
159
+ 4: torch.uint4,
160
+ 5: torch.uint5,
161
+ 6: torch.uint6,
162
+ 7: torch.uint7,
163
+ }[int(quant_type[4])]
164
+ elif quant_type.startswith("float") or quant_type.startswith("fp"):
165
+ return torch.bfloat16
166
+
167
+ if isinstance(target_dtype, SUPPORTED_TORCH_DTYPES_FOR_QUANTIZATION):
168
+ return target_dtype
169
+
170
+ # We need one of the supported dtypes to be selected in order for accelerate to determine
171
+ # the total size of modules/parameters for auto device placement.
172
+ possible_device_maps = ["auto", "balanced", "balanced_low_0", "sequential"]
173
+ raise ValueError(
174
+ f"You have set `device_map` as one of {possible_device_maps} on a TorchAO quantized model but a suitable target dtype "
175
+ f"could not be inferred. The supported target_dtypes are: {SUPPORTED_TORCH_DTYPES_FOR_QUANTIZATION}. If you think the "
176
+ f"dtype you are using should be supported, please open an issue at https://github.com/huggingface/diffusers/issues."
177
+ )
178
+
179
+ def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
180
+ max_memory = {key: val * 0.9 for key, val in max_memory.items()}
181
+ return max_memory
182
+
183
+ def check_if_quantized_param(
184
+ self,
185
+ model: "ModelMixin",
186
+ param_value: "torch.Tensor",
187
+ param_name: str,
188
+ state_dict: Dict[str, Any],
189
+ **kwargs,
190
+ ) -> bool:
191
+ param_device = kwargs.pop("param_device", None)
192
+ # Check if the param_name is not in self.modules_to_not_convert
193
+ if any((key + "." in param_name) or (key == param_name) for key in self.modules_to_not_convert):
194
+ return False
195
+ elif param_device == "cpu" and self.offload:
196
+ # We don't quantize weights that we offload
197
+ return False
198
+ else:
199
+ # We only quantize the weight of nn.Linear
200
+ module, tensor_name = get_module_from_name(model, param_name)
201
+ return isinstance(module, torch.nn.Linear) and (tensor_name == "weight")
202
+
203
+ def create_quantized_param(
204
+ self,
205
+ model: "ModelMixin",
206
+ param_value: "torch.Tensor",
207
+ param_name: str,
208
+ target_device: "torch.device",
209
+ state_dict: Dict[str, Any],
210
+ unexpected_keys: List[str],
211
+ ):
212
+ r"""
213
+ Each nn.Linear layer that needs to be quantized is processsed here. First, we set the value the weight tensor,
214
+ then we move it to the target device. Finally, we quantize the module.
215
+ """
216
+ module, tensor_name = get_module_from_name(model, param_name)
217
+
218
+ if self.pre_quantized:
219
+ # If we're loading pre-quantized weights, replace the repr of linear layers for pretty printing info
220
+ # about AffineQuantizedTensor
221
+ module._parameters[tensor_name] = torch.nn.Parameter(param_value.to(device=target_device))
222
+ if isinstance(module, nn.Linear):
223
+ module.extra_repr = types.MethodType(_linear_extra_repr, module)
224
+ else:
225
+ # As we perform quantization here, the repr of linear layers is that of AQT, so we don't have to do it ourselves
226
+ module._parameters[tensor_name] = torch.nn.Parameter(param_value).to(device=target_device)
227
+ quantize_(module, self.quantization_config.get_apply_tensor_subclass())
228
+
229
+ def _process_model_before_weight_loading(
230
+ self,
231
+ model: "ModelMixin",
232
+ device_map,
233
+ keep_in_fp32_modules: List[str] = [],
234
+ **kwargs,
235
+ ):
236
+ self.modules_to_not_convert = self.quantization_config.modules_to_not_convert
237
+
238
+ if not isinstance(self.modules_to_not_convert, list):
239
+ self.modules_to_not_convert = [self.modules_to_not_convert]
240
+
241
+ self.modules_to_not_convert.extend(keep_in_fp32_modules)
242
+
243
+ # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk`
244
+ if isinstance(device_map, dict) and len(device_map.keys()) > 1:
245
+ keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
246
+ self.modules_to_not_convert.extend(keys_on_cpu)
247
+
248
+ # Purge `None`.
249
+ # Unlike `transformers`, we don't know if we should always keep certain modules in FP32
250
+ # in case of diffusion transformer models. For language models and others alike, `lm_head`
251
+ # and tied modules are usually kept in FP32.
252
+ self.modules_to_not_convert = [module for module in self.modules_to_not_convert if module is not None]
253
+
254
+ model.config.quantization_config = self.quantization_config
255
+
256
+ def _process_model_after_weight_loading(self, model: "ModelMixin"):
257
+ return model
258
+
259
+ def is_serializable(self, safe_serialization=None):
260
+ # TODO(aryan): needs to be tested
261
+ if safe_serialization:
262
+ logger.warning(
263
+ "torchao quantized model does not support safe serialization, please set `safe_serialization` to False."
264
+ )
265
+ return False
266
+
267
+ _is_torchao_serializable = version.parse(importlib.metadata.version("huggingface_hub")) >= version.parse(
268
+ "0.25.0"
269
+ )
270
+
271
+ if not _is_torchao_serializable:
272
+ logger.warning("torchao quantized model is only serializable after huggingface_hub >= 0.25.0 ")
273
+
274
+ if self.offload and self.quantization_config.modules_to_not_convert is None:
275
+ logger.warning(
276
+ "The model contains offloaded modules and these modules are not quantized. We don't recommend saving the model as we won't be able to reload them."
277
+ "If you want to specify modules to not quantize, please specify modules_to_not_convert in the quantization_config."
278
+ )
279
+ return False
280
+
281
+ return _is_torchao_serializable
282
+
283
+ @property
284
+ def is_trainable(self):
285
+ return self.quantization_config.quant_type.startswith("int8")
@@ -548,16 +548,12 @@ class DDPMScheduler(SchedulerMixin, ConfigMixin):
548
548
  return self.config.num_train_timesteps
549
549
 
550
550
  def previous_timestep(self, timestep):
551
- if self.custom_timesteps:
551
+ if self.custom_timesteps or self.num_inference_steps:
552
552
  index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0]
553
553
  if index == self.timesteps.shape[0] - 1:
554
554
  prev_t = torch.tensor(-1)
555
555
  else:
556
556
  prev_t = self.timesteps[index + 1]
557
557
  else:
558
- num_inference_steps = (
559
- self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps
560
- )
561
- prev_t = timestep - self.config.num_train_timesteps // num_inference_steps
562
-
558
+ prev_t = timestep - 1
563
559
  return prev_t
@@ -639,16 +639,12 @@ class DDPMParallelScheduler(SchedulerMixin, ConfigMixin):
639
639
 
640
640
  # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep
641
641
  def previous_timestep(self, timestep):
642
- if self.custom_timesteps:
642
+ if self.custom_timesteps or self.num_inference_steps:
643
643
  index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0]
644
644
  if index == self.timesteps.shape[0] - 1:
645
645
  prev_t = torch.tensor(-1)
646
646
  else:
647
647
  prev_t = self.timesteps[index + 1]
648
648
  else:
649
- num_inference_steps = (
650
- self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps
651
- )
652
- prev_t = timestep - self.config.num_train_timesteps // num_inference_steps
653
-
649
+ prev_t = timestep - 1
654
650
  return prev_t