diffusers 0.34.0__py3-none-any.whl → 0.35.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (191) hide show
  1. diffusers/__init__.py +98 -1
  2. diffusers/callbacks.py +35 -0
  3. diffusers/commands/custom_blocks.py +134 -0
  4. diffusers/commands/diffusers_cli.py +2 -0
  5. diffusers/commands/fp16_safetensors.py +1 -1
  6. diffusers/configuration_utils.py +11 -2
  7. diffusers/dependency_versions_table.py +3 -3
  8. diffusers/guiders/__init__.py +41 -0
  9. diffusers/guiders/adaptive_projected_guidance.py +188 -0
  10. diffusers/guiders/auto_guidance.py +190 -0
  11. diffusers/guiders/classifier_free_guidance.py +141 -0
  12. diffusers/guiders/classifier_free_zero_star_guidance.py +152 -0
  13. diffusers/guiders/frequency_decoupled_guidance.py +327 -0
  14. diffusers/guiders/guider_utils.py +309 -0
  15. diffusers/guiders/perturbed_attention_guidance.py +271 -0
  16. diffusers/guiders/skip_layer_guidance.py +262 -0
  17. diffusers/guiders/smoothed_energy_guidance.py +251 -0
  18. diffusers/guiders/tangential_classifier_free_guidance.py +143 -0
  19. diffusers/hooks/__init__.py +17 -0
  20. diffusers/hooks/_common.py +56 -0
  21. diffusers/hooks/_helpers.py +293 -0
  22. diffusers/hooks/faster_cache.py +7 -6
  23. diffusers/hooks/first_block_cache.py +259 -0
  24. diffusers/hooks/group_offloading.py +292 -286
  25. diffusers/hooks/hooks.py +56 -1
  26. diffusers/hooks/layer_skip.py +263 -0
  27. diffusers/hooks/layerwise_casting.py +2 -7
  28. diffusers/hooks/pyramid_attention_broadcast.py +14 -11
  29. diffusers/hooks/smoothed_energy_guidance_utils.py +167 -0
  30. diffusers/hooks/utils.py +43 -0
  31. diffusers/loaders/__init__.py +6 -0
  32. diffusers/loaders/ip_adapter.py +255 -4
  33. diffusers/loaders/lora_base.py +63 -30
  34. diffusers/loaders/lora_conversion_utils.py +434 -53
  35. diffusers/loaders/lora_pipeline.py +834 -37
  36. diffusers/loaders/peft.py +28 -5
  37. diffusers/loaders/single_file_model.py +44 -11
  38. diffusers/loaders/single_file_utils.py +170 -2
  39. diffusers/loaders/transformer_flux.py +9 -10
  40. diffusers/loaders/transformer_sd3.py +6 -1
  41. diffusers/loaders/unet.py +22 -5
  42. diffusers/loaders/unet_loader_utils.py +5 -2
  43. diffusers/models/__init__.py +8 -0
  44. diffusers/models/attention.py +484 -3
  45. diffusers/models/attention_dispatch.py +1218 -0
  46. diffusers/models/attention_processor.py +105 -663
  47. diffusers/models/auto_model.py +2 -2
  48. diffusers/models/autoencoders/__init__.py +1 -0
  49. diffusers/models/autoencoders/autoencoder_dc.py +14 -1
  50. diffusers/models/autoencoders/autoencoder_kl.py +1 -1
  51. diffusers/models/autoencoders/autoencoder_kl_cosmos.py +3 -1
  52. diffusers/models/autoencoders/autoencoder_kl_qwenimage.py +1070 -0
  53. diffusers/models/autoencoders/autoencoder_kl_wan.py +370 -40
  54. diffusers/models/cache_utils.py +31 -9
  55. diffusers/models/controlnets/controlnet_flux.py +5 -5
  56. diffusers/models/controlnets/controlnet_union.py +4 -4
  57. diffusers/models/embeddings.py +26 -34
  58. diffusers/models/model_loading_utils.py +233 -1
  59. diffusers/models/modeling_flax_utils.py +1 -2
  60. diffusers/models/modeling_utils.py +159 -94
  61. diffusers/models/transformers/__init__.py +2 -0
  62. diffusers/models/transformers/transformer_chroma.py +16 -117
  63. diffusers/models/transformers/transformer_cogview4.py +36 -2
  64. diffusers/models/transformers/transformer_cosmos.py +11 -4
  65. diffusers/models/transformers/transformer_flux.py +372 -132
  66. diffusers/models/transformers/transformer_hunyuan_video.py +6 -0
  67. diffusers/models/transformers/transformer_ltx.py +104 -23
  68. diffusers/models/transformers/transformer_qwenimage.py +645 -0
  69. diffusers/models/transformers/transformer_skyreels_v2.py +607 -0
  70. diffusers/models/transformers/transformer_wan.py +298 -85
  71. diffusers/models/transformers/transformer_wan_vace.py +15 -21
  72. diffusers/models/unets/unet_2d_condition.py +2 -1
  73. diffusers/modular_pipelines/__init__.py +83 -0
  74. diffusers/modular_pipelines/components_manager.py +1068 -0
  75. diffusers/modular_pipelines/flux/__init__.py +66 -0
  76. diffusers/modular_pipelines/flux/before_denoise.py +689 -0
  77. diffusers/modular_pipelines/flux/decoders.py +109 -0
  78. diffusers/modular_pipelines/flux/denoise.py +227 -0
  79. diffusers/modular_pipelines/flux/encoders.py +412 -0
  80. diffusers/modular_pipelines/flux/modular_blocks.py +181 -0
  81. diffusers/modular_pipelines/flux/modular_pipeline.py +59 -0
  82. diffusers/modular_pipelines/modular_pipeline.py +2446 -0
  83. diffusers/modular_pipelines/modular_pipeline_utils.py +672 -0
  84. diffusers/modular_pipelines/node_utils.py +665 -0
  85. diffusers/modular_pipelines/stable_diffusion_xl/__init__.py +77 -0
  86. diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py +1874 -0
  87. diffusers/modular_pipelines/stable_diffusion_xl/decoders.py +208 -0
  88. diffusers/modular_pipelines/stable_diffusion_xl/denoise.py +771 -0
  89. diffusers/modular_pipelines/stable_diffusion_xl/encoders.py +887 -0
  90. diffusers/modular_pipelines/stable_diffusion_xl/modular_blocks.py +380 -0
  91. diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py +365 -0
  92. diffusers/modular_pipelines/wan/__init__.py +66 -0
  93. diffusers/modular_pipelines/wan/before_denoise.py +365 -0
  94. diffusers/modular_pipelines/wan/decoders.py +105 -0
  95. diffusers/modular_pipelines/wan/denoise.py +261 -0
  96. diffusers/modular_pipelines/wan/encoders.py +242 -0
  97. diffusers/modular_pipelines/wan/modular_blocks.py +144 -0
  98. diffusers/modular_pipelines/wan/modular_pipeline.py +90 -0
  99. diffusers/pipelines/__init__.py +31 -0
  100. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +2 -3
  101. diffusers/pipelines/auto_pipeline.py +17 -13
  102. diffusers/pipelines/chroma/pipeline_chroma.py +5 -5
  103. diffusers/pipelines/chroma/pipeline_chroma_img2img.py +5 -5
  104. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +9 -8
  105. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +9 -8
  106. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +10 -9
  107. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +9 -8
  108. diffusers/pipelines/cogview4/pipeline_cogview4.py +16 -15
  109. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +3 -2
  110. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +212 -93
  111. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +7 -3
  112. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +194 -92
  113. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +1 -1
  114. diffusers/pipelines/dit/pipeline_dit.py +3 -1
  115. diffusers/pipelines/flux/__init__.py +4 -0
  116. diffusers/pipelines/flux/pipeline_flux.py +34 -26
  117. diffusers/pipelines/flux/pipeline_flux_control.py +8 -8
  118. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +1 -1
  119. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1 -1
  120. diffusers/pipelines/flux/pipeline_flux_controlnet.py +1 -1
  121. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +1 -1
  122. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1 -1
  123. diffusers/pipelines/flux/pipeline_flux_fill.py +1 -1
  124. diffusers/pipelines/flux/pipeline_flux_img2img.py +1 -1
  125. diffusers/pipelines/flux/pipeline_flux_inpaint.py +1 -1
  126. diffusers/pipelines/flux/pipeline_flux_kontext.py +1134 -0
  127. diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py +1460 -0
  128. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +1 -1
  129. diffusers/pipelines/flux/pipeline_output.py +6 -4
  130. diffusers/pipelines/hidream_image/pipeline_hidream_image.py +5 -5
  131. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +25 -24
  132. diffusers/pipelines/ltx/pipeline_ltx.py +13 -12
  133. diffusers/pipelines/ltx/pipeline_ltx_condition.py +10 -9
  134. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +13 -12
  135. diffusers/pipelines/mochi/pipeline_mochi.py +9 -8
  136. diffusers/pipelines/pipeline_flax_utils.py +2 -2
  137. diffusers/pipelines/pipeline_loading_utils.py +24 -2
  138. diffusers/pipelines/pipeline_utils.py +22 -15
  139. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +3 -1
  140. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +20 -0
  141. diffusers/pipelines/qwenimage/__init__.py +55 -0
  142. diffusers/pipelines/qwenimage/pipeline_output.py +21 -0
  143. diffusers/pipelines/qwenimage/pipeline_qwenimage.py +726 -0
  144. diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +849 -0
  145. diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py +829 -0
  146. diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py +1015 -0
  147. diffusers/pipelines/sana/pipeline_sana_sprint.py +5 -5
  148. diffusers/pipelines/skyreels_v2/__init__.py +59 -0
  149. diffusers/pipelines/skyreels_v2/pipeline_output.py +20 -0
  150. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2.py +610 -0
  151. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing.py +978 -0
  152. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_i2v.py +1059 -0
  153. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_v2v.py +1063 -0
  154. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_i2v.py +745 -0
  155. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -1
  156. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +1 -1
  157. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +1 -1
  158. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +2 -1
  159. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +6 -5
  160. diffusers/pipelines/wan/pipeline_wan.py +78 -20
  161. diffusers/pipelines/wan/pipeline_wan_i2v.py +112 -32
  162. diffusers/pipelines/wan/pipeline_wan_vace.py +1 -2
  163. diffusers/quantizers/__init__.py +1 -177
  164. diffusers/quantizers/base.py +11 -0
  165. diffusers/quantizers/gguf/utils.py +92 -3
  166. diffusers/quantizers/pipe_quant_config.py +202 -0
  167. diffusers/quantizers/torchao/torchao_quantizer.py +26 -0
  168. diffusers/schedulers/scheduling_deis_multistep.py +8 -1
  169. diffusers/schedulers/scheduling_dpmsolver_multistep.py +6 -0
  170. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +6 -0
  171. diffusers/schedulers/scheduling_scm.py +0 -1
  172. diffusers/schedulers/scheduling_unipc_multistep.py +10 -1
  173. diffusers/schedulers/scheduling_utils.py +2 -2
  174. diffusers/schedulers/scheduling_utils_flax.py +1 -1
  175. diffusers/training_utils.py +78 -0
  176. diffusers/utils/__init__.py +10 -0
  177. diffusers/utils/constants.py +4 -0
  178. diffusers/utils/dummy_pt_objects.py +312 -0
  179. diffusers/utils/dummy_torch_and_transformers_objects.py +255 -0
  180. diffusers/utils/dynamic_modules_utils.py +84 -25
  181. diffusers/utils/hub_utils.py +33 -17
  182. diffusers/utils/import_utils.py +70 -0
  183. diffusers/utils/peft_utils.py +11 -8
  184. diffusers/utils/testing_utils.py +136 -10
  185. diffusers/utils/torch_utils.py +18 -0
  186. {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/METADATA +6 -6
  187. {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/RECORD +191 -127
  188. {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/LICENSE +0 -0
  189. {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/WHEEL +0 -0
  190. {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/entry_points.txt +0 -0
  191. {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,271 @@
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+
20
+ from ..configuration_utils import register_to_config
21
+ from ..hooks import HookRegistry, LayerSkipConfig
22
+ from ..hooks.layer_skip import _apply_layer_skip_hook
23
+ from ..utils import get_logger
24
+ from .guider_utils import BaseGuidance, rescale_noise_cfg
25
+
26
+
27
+ if TYPE_CHECKING:
28
+ from ..modular_pipelines.modular_pipeline import BlockState
29
+
30
+
31
+ logger = get_logger(__name__) # pylint: disable=invalid-name
32
+
33
+
34
+ class PerturbedAttentionGuidance(BaseGuidance):
35
+ """
36
+ Perturbed Attention Guidance (PAG): https://huggingface.co/papers/2403.17377
37
+
38
+ The intution behind PAG can be thought of as moving the CFG predicted distribution estimates further away from
39
+ worse versions of the conditional distribution estimates. PAG was one of the first techniques to introduce the idea
40
+ of using a worse version of the trained model for better guiding itself in the denoising process. It perturbs the
41
+ attention scores of the latent stream by replacing the score matrix with an identity matrix for selectively chosen
42
+ layers.
43
+
44
+ Additional reading:
45
+ - [Guiding a Diffusion Model with a Bad Version of Itself](https://huggingface.co/papers/2406.02507)
46
+
47
+ PAG is implemented with similar implementation to SkipLayerGuidance due to overlap in the configuration parameters
48
+ and implementation details.
49
+
50
+ Args:
51
+ guidance_scale (`float`, defaults to `7.5`):
52
+ The scale parameter for classifier-free guidance. Higher values result in stronger conditioning on the text
53
+ prompt, while lower values allow for more freedom in generation. Higher values may lead to saturation and
54
+ deterioration of image quality.
55
+ perturbed_guidance_scale (`float`, defaults to `2.8`):
56
+ The scale parameter for perturbed attention guidance.
57
+ perturbed_guidance_start (`float`, defaults to `0.01`):
58
+ The fraction of the total number of denoising steps after which perturbed attention guidance starts.
59
+ perturbed_guidance_stop (`float`, defaults to `0.2`):
60
+ The fraction of the total number of denoising steps after which perturbed attention guidance stops.
61
+ perturbed_guidance_layers (`int` or `List[int]`, *optional*):
62
+ The layer indices to apply perturbed attention guidance to. Can be a single integer or a list of integers.
63
+ If not provided, `perturbed_guidance_config` must be provided.
64
+ perturbed_guidance_config (`LayerSkipConfig` or `List[LayerSkipConfig]`, *optional*):
65
+ The configuration for the perturbed attention guidance. Can be a single `LayerSkipConfig` or a list of
66
+ `LayerSkipConfig`. If not provided, `perturbed_guidance_layers` must be provided.
67
+ guidance_rescale (`float`, defaults to `0.0`):
68
+ The rescale factor applied to the noise predictions. This is used to improve image quality and fix
69
+ overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are
70
+ Flawed](https://huggingface.co/papers/2305.08891).
71
+ use_original_formulation (`bool`, defaults to `False`):
72
+ Whether to use the original formulation of classifier-free guidance as proposed in the paper. By default,
73
+ we use the diffusers-native implementation that has been in the codebase for a long time. See
74
+ [~guiders.classifier_free_guidance.ClassifierFreeGuidance] for more details.
75
+ start (`float`, defaults to `0.01`):
76
+ The fraction of the total number of denoising steps after which guidance starts.
77
+ stop (`float`, defaults to `0.2`):
78
+ The fraction of the total number of denoising steps after which guidance stops.
79
+ """
80
+
81
+ # NOTE: The current implementation does not account for joint latent conditioning (text + image/video tokens in
82
+ # the same latent stream). It assumes the entire latent is a single stream of visual tokens. It would be very
83
+ # complex to support joint latent conditioning in a model-agnostic manner without specializing the implementation
84
+ # for each model architecture.
85
+
86
+ _input_predictions = ["pred_cond", "pred_uncond", "pred_cond_skip"]
87
+
88
+ @register_to_config
89
+ def __init__(
90
+ self,
91
+ guidance_scale: float = 7.5,
92
+ perturbed_guidance_scale: float = 2.8,
93
+ perturbed_guidance_start: float = 0.01,
94
+ perturbed_guidance_stop: float = 0.2,
95
+ perturbed_guidance_layers: Optional[Union[int, List[int]]] = None,
96
+ perturbed_guidance_config: Union[LayerSkipConfig, List[LayerSkipConfig], Dict[str, Any]] = None,
97
+ guidance_rescale: float = 0.0,
98
+ use_original_formulation: bool = False,
99
+ start: float = 0.0,
100
+ stop: float = 1.0,
101
+ ):
102
+ super().__init__(start, stop)
103
+
104
+ self.guidance_scale = guidance_scale
105
+ self.skip_layer_guidance_scale = perturbed_guidance_scale
106
+ self.skip_layer_guidance_start = perturbed_guidance_start
107
+ self.skip_layer_guidance_stop = perturbed_guidance_stop
108
+ self.guidance_rescale = guidance_rescale
109
+ self.use_original_formulation = use_original_formulation
110
+
111
+ if perturbed_guidance_config is None:
112
+ if perturbed_guidance_layers is None:
113
+ raise ValueError(
114
+ "`perturbed_guidance_layers` must be provided if `perturbed_guidance_config` is not specified."
115
+ )
116
+ perturbed_guidance_config = LayerSkipConfig(
117
+ indices=perturbed_guidance_layers,
118
+ fqn="auto",
119
+ skip_attention=False,
120
+ skip_attention_scores=True,
121
+ skip_ff=False,
122
+ )
123
+ else:
124
+ if perturbed_guidance_layers is not None:
125
+ raise ValueError(
126
+ "`perturbed_guidance_layers` should not be provided if `perturbed_guidance_config` is specified."
127
+ )
128
+
129
+ if isinstance(perturbed_guidance_config, dict):
130
+ perturbed_guidance_config = LayerSkipConfig.from_dict(perturbed_guidance_config)
131
+
132
+ if isinstance(perturbed_guidance_config, LayerSkipConfig):
133
+ perturbed_guidance_config = [perturbed_guidance_config]
134
+
135
+ if not isinstance(perturbed_guidance_config, list):
136
+ raise ValueError(
137
+ "`perturbed_guidance_config` must be a `LayerSkipConfig`, a list of `LayerSkipConfig`, or a dict that can be converted to a `LayerSkipConfig`."
138
+ )
139
+ elif isinstance(next(iter(perturbed_guidance_config), None), dict):
140
+ perturbed_guidance_config = [LayerSkipConfig.from_dict(config) for config in perturbed_guidance_config]
141
+
142
+ for config in perturbed_guidance_config:
143
+ if config.skip_attention or not config.skip_attention_scores or config.skip_ff:
144
+ logger.warning(
145
+ "Perturbed Attention Guidance is designed to perturb attention scores, so `skip_attention` should be False, `skip_attention_scores` should be True, and `skip_ff` should be False. "
146
+ "Please check your configuration. Modifying the config to match the expected values."
147
+ )
148
+ config.skip_attention = False
149
+ config.skip_attention_scores = True
150
+ config.skip_ff = False
151
+
152
+ self.skip_layer_config = perturbed_guidance_config
153
+ self._skip_layer_hook_names = [f"SkipLayerGuidance_{i}" for i in range(len(self.skip_layer_config))]
154
+
155
+ # Copied from diffusers.guiders.skip_layer_guidance.SkipLayerGuidance.prepare_models
156
+ def prepare_models(self, denoiser: torch.nn.Module) -> None:
157
+ self._count_prepared += 1
158
+ if self._is_slg_enabled() and self.is_conditional and self._count_prepared > 1:
159
+ for name, config in zip(self._skip_layer_hook_names, self.skip_layer_config):
160
+ _apply_layer_skip_hook(denoiser, config, name=name)
161
+
162
+ # Copied from diffusers.guiders.skip_layer_guidance.SkipLayerGuidance.cleanup_models
163
+ def cleanup_models(self, denoiser: torch.nn.Module) -> None:
164
+ if self._is_slg_enabled() and self.is_conditional and self._count_prepared > 1:
165
+ registry = HookRegistry.check_if_exists_or_initialize(denoiser)
166
+ # Remove the hooks after inference
167
+ for hook_name in self._skip_layer_hook_names:
168
+ registry.remove_hook(hook_name, recurse=True)
169
+
170
+ # Copied from diffusers.guiders.skip_layer_guidance.SkipLayerGuidance.prepare_inputs
171
+ def prepare_inputs(
172
+ self, data: "BlockState", input_fields: Optional[Dict[str, Union[str, Tuple[str, str]]]] = None
173
+ ) -> List["BlockState"]:
174
+ if input_fields is None:
175
+ input_fields = self._input_fields
176
+
177
+ if self.num_conditions == 1:
178
+ tuple_indices = [0]
179
+ input_predictions = ["pred_cond"]
180
+ elif self.num_conditions == 2:
181
+ tuple_indices = [0, 1]
182
+ input_predictions = (
183
+ ["pred_cond", "pred_uncond"] if self._is_cfg_enabled() else ["pred_cond", "pred_cond_skip"]
184
+ )
185
+ else:
186
+ tuple_indices = [0, 1, 0]
187
+ input_predictions = ["pred_cond", "pred_uncond", "pred_cond_skip"]
188
+ data_batches = []
189
+ for i in range(self.num_conditions):
190
+ data_batch = self._prepare_batch(input_fields, data, tuple_indices[i], input_predictions[i])
191
+ data_batches.append(data_batch)
192
+ return data_batches
193
+
194
+ # Copied from diffusers.guiders.skip_layer_guidance.SkipLayerGuidance.forward
195
+ def forward(
196
+ self,
197
+ pred_cond: torch.Tensor,
198
+ pred_uncond: Optional[torch.Tensor] = None,
199
+ pred_cond_skip: Optional[torch.Tensor] = None,
200
+ ) -> torch.Tensor:
201
+ pred = None
202
+
203
+ if not self._is_cfg_enabled() and not self._is_slg_enabled():
204
+ pred = pred_cond
205
+ elif not self._is_cfg_enabled():
206
+ shift = pred_cond - pred_cond_skip
207
+ pred = pred_cond if self.use_original_formulation else pred_cond_skip
208
+ pred = pred + self.skip_layer_guidance_scale * shift
209
+ elif not self._is_slg_enabled():
210
+ shift = pred_cond - pred_uncond
211
+ pred = pred_cond if self.use_original_formulation else pred_uncond
212
+ pred = pred + self.guidance_scale * shift
213
+ else:
214
+ shift = pred_cond - pred_uncond
215
+ shift_skip = pred_cond - pred_cond_skip
216
+ pred = pred_cond if self.use_original_formulation else pred_uncond
217
+ pred = pred + self.guidance_scale * shift + self.skip_layer_guidance_scale * shift_skip
218
+
219
+ if self.guidance_rescale > 0.0:
220
+ pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale)
221
+
222
+ return pred, {}
223
+
224
+ @property
225
+ # Copied from diffusers.guiders.skip_layer_guidance.SkipLayerGuidance.is_conditional
226
+ def is_conditional(self) -> bool:
227
+ return self._count_prepared == 1 or self._count_prepared == 3
228
+
229
+ @property
230
+ # Copied from diffusers.guiders.skip_layer_guidance.SkipLayerGuidance.num_conditions
231
+ def num_conditions(self) -> int:
232
+ num_conditions = 1
233
+ if self._is_cfg_enabled():
234
+ num_conditions += 1
235
+ if self._is_slg_enabled():
236
+ num_conditions += 1
237
+ return num_conditions
238
+
239
+ # Copied from diffusers.guiders.skip_layer_guidance.SkipLayerGuidance._is_cfg_enabled
240
+ def _is_cfg_enabled(self) -> bool:
241
+ if not self._enabled:
242
+ return False
243
+
244
+ is_within_range = True
245
+ if self._num_inference_steps is not None:
246
+ skip_start_step = int(self._start * self._num_inference_steps)
247
+ skip_stop_step = int(self._stop * self._num_inference_steps)
248
+ is_within_range = skip_start_step <= self._step < skip_stop_step
249
+
250
+ is_close = False
251
+ if self.use_original_formulation:
252
+ is_close = math.isclose(self.guidance_scale, 0.0)
253
+ else:
254
+ is_close = math.isclose(self.guidance_scale, 1.0)
255
+
256
+ return is_within_range and not is_close
257
+
258
+ # Copied from diffusers.guiders.skip_layer_guidance.SkipLayerGuidance._is_slg_enabled
259
+ def _is_slg_enabled(self) -> bool:
260
+ if not self._enabled:
261
+ return False
262
+
263
+ is_within_range = True
264
+ if self._num_inference_steps is not None:
265
+ skip_start_step = int(self.skip_layer_guidance_start * self._num_inference_steps)
266
+ skip_stop_step = int(self.skip_layer_guidance_stop * self._num_inference_steps)
267
+ is_within_range = skip_start_step < self._step < skip_stop_step
268
+
269
+ is_zero = math.isclose(self.skip_layer_guidance_scale, 0.0)
270
+
271
+ return is_within_range and not is_zero
@@ -0,0 +1,262 @@
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+
20
+ from ..configuration_utils import register_to_config
21
+ from ..hooks import HookRegistry, LayerSkipConfig
22
+ from ..hooks.layer_skip import _apply_layer_skip_hook
23
+ from .guider_utils import BaseGuidance, rescale_noise_cfg
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ from ..modular_pipelines.modular_pipeline import BlockState
28
+
29
+
30
+ class SkipLayerGuidance(BaseGuidance):
31
+ """
32
+ Skip Layer Guidance (SLG): https://github.com/Stability-AI/sd3.5
33
+
34
+ Spatio-Temporal Guidance (STG): https://huggingface.co/papers/2411.18664
35
+
36
+ SLG was introduced by StabilityAI for improving structure and anotomy coherence in generated images. It works by
37
+ skipping the forward pass of specified transformer blocks during the denoising process on an additional conditional
38
+ batch of data, apart from the conditional and unconditional batches already used in CFG
39
+ ([~guiders.classifier_free_guidance.ClassifierFreeGuidance]), and then scaling and shifting the CFG predictions
40
+ based on the difference between conditional without skipping and conditional with skipping predictions.
41
+
42
+ The intution behind SLG can be thought of as moving the CFG predicted distribution estimates further away from
43
+ worse versions of the conditional distribution estimates (because skipping layers is equivalent to using a worse
44
+ version of the model for the conditional prediction).
45
+
46
+ STG is an improvement and follow-up work combining ideas from SLG, PAG and similar techniques for improving
47
+ generation quality in video diffusion models.
48
+
49
+ Additional reading:
50
+ - [Guiding a Diffusion Model with a Bad Version of Itself](https://huggingface.co/papers/2406.02507)
51
+
52
+ The values for `skip_layer_guidance_scale`, `skip_layer_guidance_start`, and `skip_layer_guidance_stop` are
53
+ defaulted to the recommendations by StabilityAI for Stable Diffusion 3.5 Medium.
54
+
55
+ Args:
56
+ guidance_scale (`float`, defaults to `7.5`):
57
+ The scale parameter for classifier-free guidance. Higher values result in stronger conditioning on the text
58
+ prompt, while lower values allow for more freedom in generation. Higher values may lead to saturation and
59
+ deterioration of image quality.
60
+ skip_layer_guidance_scale (`float`, defaults to `2.8`):
61
+ The scale parameter for skip layer guidance. Anatomy and structure coherence may improve with higher
62
+ values, but it may also lead to overexposure and saturation.
63
+ skip_layer_guidance_start (`float`, defaults to `0.01`):
64
+ The fraction of the total number of denoising steps after which skip layer guidance starts.
65
+ skip_layer_guidance_stop (`float`, defaults to `0.2`):
66
+ The fraction of the total number of denoising steps after which skip layer guidance stops.
67
+ skip_layer_guidance_layers (`int` or `List[int]`, *optional*):
68
+ The layer indices to apply skip layer guidance to. Can be a single integer or a list of integers. If not
69
+ provided, `skip_layer_config` must be provided. The recommended values are `[7, 8, 9]` for Stable Diffusion
70
+ 3.5 Medium.
71
+ skip_layer_config (`LayerSkipConfig` or `List[LayerSkipConfig]`, *optional*):
72
+ The configuration for the skip layer guidance. Can be a single `LayerSkipConfig` or a list of
73
+ `LayerSkipConfig`. If not provided, `skip_layer_guidance_layers` must be provided.
74
+ guidance_rescale (`float`, defaults to `0.0`):
75
+ The rescale factor applied to the noise predictions. This is used to improve image quality and fix
76
+ overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are
77
+ Flawed](https://huggingface.co/papers/2305.08891).
78
+ use_original_formulation (`bool`, defaults to `False`):
79
+ Whether to use the original formulation of classifier-free guidance as proposed in the paper. By default,
80
+ we use the diffusers-native implementation that has been in the codebase for a long time. See
81
+ [~guiders.classifier_free_guidance.ClassifierFreeGuidance] for more details.
82
+ start (`float`, defaults to `0.01`):
83
+ The fraction of the total number of denoising steps after which guidance starts.
84
+ stop (`float`, defaults to `0.2`):
85
+ The fraction of the total number of denoising steps after which guidance stops.
86
+ """
87
+
88
+ _input_predictions = ["pred_cond", "pred_uncond", "pred_cond_skip"]
89
+
90
+ @register_to_config
91
+ def __init__(
92
+ self,
93
+ guidance_scale: float = 7.5,
94
+ skip_layer_guidance_scale: float = 2.8,
95
+ skip_layer_guidance_start: float = 0.01,
96
+ skip_layer_guidance_stop: float = 0.2,
97
+ skip_layer_guidance_layers: Optional[Union[int, List[int]]] = None,
98
+ skip_layer_config: Union[LayerSkipConfig, List[LayerSkipConfig], Dict[str, Any]] = None,
99
+ guidance_rescale: float = 0.0,
100
+ use_original_formulation: bool = False,
101
+ start: float = 0.0,
102
+ stop: float = 1.0,
103
+ ):
104
+ super().__init__(start, stop)
105
+
106
+ self.guidance_scale = guidance_scale
107
+ self.skip_layer_guidance_scale = skip_layer_guidance_scale
108
+ self.skip_layer_guidance_start = skip_layer_guidance_start
109
+ self.skip_layer_guidance_stop = skip_layer_guidance_stop
110
+ self.guidance_rescale = guidance_rescale
111
+ self.use_original_formulation = use_original_formulation
112
+
113
+ if not (0.0 <= skip_layer_guidance_start < 1.0):
114
+ raise ValueError(
115
+ f"Expected `skip_layer_guidance_start` to be between 0.0 and 1.0, but got {skip_layer_guidance_start}."
116
+ )
117
+ if not (skip_layer_guidance_start <= skip_layer_guidance_stop <= 1.0):
118
+ raise ValueError(
119
+ f"Expected `skip_layer_guidance_stop` to be between 0.0 and 1.0, but got {skip_layer_guidance_stop}."
120
+ )
121
+
122
+ if skip_layer_guidance_layers is None and skip_layer_config is None:
123
+ raise ValueError(
124
+ "Either `skip_layer_guidance_layers` or `skip_layer_config` must be provided to enable Skip Layer Guidance."
125
+ )
126
+ if skip_layer_guidance_layers is not None and skip_layer_config is not None:
127
+ raise ValueError("Only one of `skip_layer_guidance_layers` or `skip_layer_config` can be provided.")
128
+
129
+ if skip_layer_guidance_layers is not None:
130
+ if isinstance(skip_layer_guidance_layers, int):
131
+ skip_layer_guidance_layers = [skip_layer_guidance_layers]
132
+ if not isinstance(skip_layer_guidance_layers, list):
133
+ raise ValueError(
134
+ f"Expected `skip_layer_guidance_layers` to be an int or a list of ints, but got {type(skip_layer_guidance_layers)}."
135
+ )
136
+ skip_layer_config = [LayerSkipConfig(layer, fqn="auto") for layer in skip_layer_guidance_layers]
137
+
138
+ if isinstance(skip_layer_config, dict):
139
+ skip_layer_config = LayerSkipConfig.from_dict(skip_layer_config)
140
+
141
+ if isinstance(skip_layer_config, LayerSkipConfig):
142
+ skip_layer_config = [skip_layer_config]
143
+
144
+ if not isinstance(skip_layer_config, list):
145
+ raise ValueError(
146
+ f"Expected `skip_layer_config` to be a LayerSkipConfig or a list of LayerSkipConfig, but got {type(skip_layer_config)}."
147
+ )
148
+ elif isinstance(next(iter(skip_layer_config), None), dict):
149
+ skip_layer_config = [LayerSkipConfig.from_dict(config) for config in skip_layer_config]
150
+
151
+ self.skip_layer_config = skip_layer_config
152
+ self._skip_layer_hook_names = [f"SkipLayerGuidance_{i}" for i in range(len(self.skip_layer_config))]
153
+
154
+ def prepare_models(self, denoiser: torch.nn.Module) -> None:
155
+ self._count_prepared += 1
156
+ if self._is_slg_enabled() and self.is_conditional and self._count_prepared > 1:
157
+ for name, config in zip(self._skip_layer_hook_names, self.skip_layer_config):
158
+ _apply_layer_skip_hook(denoiser, config, name=name)
159
+
160
+ def cleanup_models(self, denoiser: torch.nn.Module) -> None:
161
+ if self._is_slg_enabled() and self.is_conditional and self._count_prepared > 1:
162
+ registry = HookRegistry.check_if_exists_or_initialize(denoiser)
163
+ # Remove the hooks after inference
164
+ for hook_name in self._skip_layer_hook_names:
165
+ registry.remove_hook(hook_name, recurse=True)
166
+
167
+ def prepare_inputs(
168
+ self, data: "BlockState", input_fields: Optional[Dict[str, Union[str, Tuple[str, str]]]] = None
169
+ ) -> List["BlockState"]:
170
+ if input_fields is None:
171
+ input_fields = self._input_fields
172
+
173
+ if self.num_conditions == 1:
174
+ tuple_indices = [0]
175
+ input_predictions = ["pred_cond"]
176
+ elif self.num_conditions == 2:
177
+ tuple_indices = [0, 1]
178
+ input_predictions = (
179
+ ["pred_cond", "pred_uncond"] if self._is_cfg_enabled() else ["pred_cond", "pred_cond_skip"]
180
+ )
181
+ else:
182
+ tuple_indices = [0, 1, 0]
183
+ input_predictions = ["pred_cond", "pred_uncond", "pred_cond_skip"]
184
+ data_batches = []
185
+ for i in range(self.num_conditions):
186
+ data_batch = self._prepare_batch(input_fields, data, tuple_indices[i], input_predictions[i])
187
+ data_batches.append(data_batch)
188
+ return data_batches
189
+
190
+ def forward(
191
+ self,
192
+ pred_cond: torch.Tensor,
193
+ pred_uncond: Optional[torch.Tensor] = None,
194
+ pred_cond_skip: Optional[torch.Tensor] = None,
195
+ ) -> torch.Tensor:
196
+ pred = None
197
+
198
+ if not self._is_cfg_enabled() and not self._is_slg_enabled():
199
+ pred = pred_cond
200
+ elif not self._is_cfg_enabled():
201
+ shift = pred_cond - pred_cond_skip
202
+ pred = pred_cond if self.use_original_formulation else pred_cond_skip
203
+ pred = pred + self.skip_layer_guidance_scale * shift
204
+ elif not self._is_slg_enabled():
205
+ shift = pred_cond - pred_uncond
206
+ pred = pred_cond if self.use_original_formulation else pred_uncond
207
+ pred = pred + self.guidance_scale * shift
208
+ else:
209
+ shift = pred_cond - pred_uncond
210
+ shift_skip = pred_cond - pred_cond_skip
211
+ pred = pred_cond if self.use_original_formulation else pred_uncond
212
+ pred = pred + self.guidance_scale * shift + self.skip_layer_guidance_scale * shift_skip
213
+
214
+ if self.guidance_rescale > 0.0:
215
+ pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale)
216
+
217
+ return pred, {}
218
+
219
+ @property
220
+ def is_conditional(self) -> bool:
221
+ return self._count_prepared == 1 or self._count_prepared == 3
222
+
223
+ @property
224
+ def num_conditions(self) -> int:
225
+ num_conditions = 1
226
+ if self._is_cfg_enabled():
227
+ num_conditions += 1
228
+ if self._is_slg_enabled():
229
+ num_conditions += 1
230
+ return num_conditions
231
+
232
+ def _is_cfg_enabled(self) -> bool:
233
+ if not self._enabled:
234
+ return False
235
+
236
+ is_within_range = True
237
+ if self._num_inference_steps is not None:
238
+ skip_start_step = int(self._start * self._num_inference_steps)
239
+ skip_stop_step = int(self._stop * self._num_inference_steps)
240
+ is_within_range = skip_start_step <= self._step < skip_stop_step
241
+
242
+ is_close = False
243
+ if self.use_original_formulation:
244
+ is_close = math.isclose(self.guidance_scale, 0.0)
245
+ else:
246
+ is_close = math.isclose(self.guidance_scale, 1.0)
247
+
248
+ return is_within_range and not is_close
249
+
250
+ def _is_slg_enabled(self) -> bool:
251
+ if not self._enabled:
252
+ return False
253
+
254
+ is_within_range = True
255
+ if self._num_inference_steps is not None:
256
+ skip_start_step = int(self.skip_layer_guidance_start * self._num_inference_steps)
257
+ skip_stop_step = int(self.skip_layer_guidance_stop * self._num_inference_steps)
258
+ is_within_range = skip_start_step < self._step < skip_stop_step
259
+
260
+ is_zero = math.isclose(self.skip_layer_guidance_scale, 0.0)
261
+
262
+ return is_within_range and not is_zero