diffusers 0.34.0__py3-none-any.whl → 0.35.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (191) hide show
  1. diffusers/__init__.py +98 -1
  2. diffusers/callbacks.py +35 -0
  3. diffusers/commands/custom_blocks.py +134 -0
  4. diffusers/commands/diffusers_cli.py +2 -0
  5. diffusers/commands/fp16_safetensors.py +1 -1
  6. diffusers/configuration_utils.py +11 -2
  7. diffusers/dependency_versions_table.py +3 -3
  8. diffusers/guiders/__init__.py +41 -0
  9. diffusers/guiders/adaptive_projected_guidance.py +188 -0
  10. diffusers/guiders/auto_guidance.py +190 -0
  11. diffusers/guiders/classifier_free_guidance.py +141 -0
  12. diffusers/guiders/classifier_free_zero_star_guidance.py +152 -0
  13. diffusers/guiders/frequency_decoupled_guidance.py +327 -0
  14. diffusers/guiders/guider_utils.py +309 -0
  15. diffusers/guiders/perturbed_attention_guidance.py +271 -0
  16. diffusers/guiders/skip_layer_guidance.py +262 -0
  17. diffusers/guiders/smoothed_energy_guidance.py +251 -0
  18. diffusers/guiders/tangential_classifier_free_guidance.py +143 -0
  19. diffusers/hooks/__init__.py +17 -0
  20. diffusers/hooks/_common.py +56 -0
  21. diffusers/hooks/_helpers.py +293 -0
  22. diffusers/hooks/faster_cache.py +7 -6
  23. diffusers/hooks/first_block_cache.py +259 -0
  24. diffusers/hooks/group_offloading.py +292 -286
  25. diffusers/hooks/hooks.py +56 -1
  26. diffusers/hooks/layer_skip.py +263 -0
  27. diffusers/hooks/layerwise_casting.py +2 -7
  28. diffusers/hooks/pyramid_attention_broadcast.py +14 -11
  29. diffusers/hooks/smoothed_energy_guidance_utils.py +167 -0
  30. diffusers/hooks/utils.py +43 -0
  31. diffusers/loaders/__init__.py +6 -0
  32. diffusers/loaders/ip_adapter.py +255 -4
  33. diffusers/loaders/lora_base.py +63 -30
  34. diffusers/loaders/lora_conversion_utils.py +434 -53
  35. diffusers/loaders/lora_pipeline.py +834 -37
  36. diffusers/loaders/peft.py +28 -5
  37. diffusers/loaders/single_file_model.py +44 -11
  38. diffusers/loaders/single_file_utils.py +170 -2
  39. diffusers/loaders/transformer_flux.py +9 -10
  40. diffusers/loaders/transformer_sd3.py +6 -1
  41. diffusers/loaders/unet.py +22 -5
  42. diffusers/loaders/unet_loader_utils.py +5 -2
  43. diffusers/models/__init__.py +8 -0
  44. diffusers/models/attention.py +484 -3
  45. diffusers/models/attention_dispatch.py +1218 -0
  46. diffusers/models/attention_processor.py +105 -663
  47. diffusers/models/auto_model.py +2 -2
  48. diffusers/models/autoencoders/__init__.py +1 -0
  49. diffusers/models/autoencoders/autoencoder_dc.py +14 -1
  50. diffusers/models/autoencoders/autoencoder_kl.py +1 -1
  51. diffusers/models/autoencoders/autoencoder_kl_cosmos.py +3 -1
  52. diffusers/models/autoencoders/autoencoder_kl_qwenimage.py +1070 -0
  53. diffusers/models/autoencoders/autoencoder_kl_wan.py +370 -40
  54. diffusers/models/cache_utils.py +31 -9
  55. diffusers/models/controlnets/controlnet_flux.py +5 -5
  56. diffusers/models/controlnets/controlnet_union.py +4 -4
  57. diffusers/models/embeddings.py +26 -34
  58. diffusers/models/model_loading_utils.py +233 -1
  59. diffusers/models/modeling_flax_utils.py +1 -2
  60. diffusers/models/modeling_utils.py +159 -94
  61. diffusers/models/transformers/__init__.py +2 -0
  62. diffusers/models/transformers/transformer_chroma.py +16 -117
  63. diffusers/models/transformers/transformer_cogview4.py +36 -2
  64. diffusers/models/transformers/transformer_cosmos.py +11 -4
  65. diffusers/models/transformers/transformer_flux.py +372 -132
  66. diffusers/models/transformers/transformer_hunyuan_video.py +6 -0
  67. diffusers/models/transformers/transformer_ltx.py +104 -23
  68. diffusers/models/transformers/transformer_qwenimage.py +645 -0
  69. diffusers/models/transformers/transformer_skyreels_v2.py +607 -0
  70. diffusers/models/transformers/transformer_wan.py +298 -85
  71. diffusers/models/transformers/transformer_wan_vace.py +15 -21
  72. diffusers/models/unets/unet_2d_condition.py +2 -1
  73. diffusers/modular_pipelines/__init__.py +83 -0
  74. diffusers/modular_pipelines/components_manager.py +1068 -0
  75. diffusers/modular_pipelines/flux/__init__.py +66 -0
  76. diffusers/modular_pipelines/flux/before_denoise.py +689 -0
  77. diffusers/modular_pipelines/flux/decoders.py +109 -0
  78. diffusers/modular_pipelines/flux/denoise.py +227 -0
  79. diffusers/modular_pipelines/flux/encoders.py +412 -0
  80. diffusers/modular_pipelines/flux/modular_blocks.py +181 -0
  81. diffusers/modular_pipelines/flux/modular_pipeline.py +59 -0
  82. diffusers/modular_pipelines/modular_pipeline.py +2446 -0
  83. diffusers/modular_pipelines/modular_pipeline_utils.py +672 -0
  84. diffusers/modular_pipelines/node_utils.py +665 -0
  85. diffusers/modular_pipelines/stable_diffusion_xl/__init__.py +77 -0
  86. diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py +1874 -0
  87. diffusers/modular_pipelines/stable_diffusion_xl/decoders.py +208 -0
  88. diffusers/modular_pipelines/stable_diffusion_xl/denoise.py +771 -0
  89. diffusers/modular_pipelines/stable_diffusion_xl/encoders.py +887 -0
  90. diffusers/modular_pipelines/stable_diffusion_xl/modular_blocks.py +380 -0
  91. diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py +365 -0
  92. diffusers/modular_pipelines/wan/__init__.py +66 -0
  93. diffusers/modular_pipelines/wan/before_denoise.py +365 -0
  94. diffusers/modular_pipelines/wan/decoders.py +105 -0
  95. diffusers/modular_pipelines/wan/denoise.py +261 -0
  96. diffusers/modular_pipelines/wan/encoders.py +242 -0
  97. diffusers/modular_pipelines/wan/modular_blocks.py +144 -0
  98. diffusers/modular_pipelines/wan/modular_pipeline.py +90 -0
  99. diffusers/pipelines/__init__.py +31 -0
  100. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +2 -3
  101. diffusers/pipelines/auto_pipeline.py +17 -13
  102. diffusers/pipelines/chroma/pipeline_chroma.py +5 -5
  103. diffusers/pipelines/chroma/pipeline_chroma_img2img.py +5 -5
  104. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +9 -8
  105. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +9 -8
  106. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +10 -9
  107. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +9 -8
  108. diffusers/pipelines/cogview4/pipeline_cogview4.py +16 -15
  109. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +3 -2
  110. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +212 -93
  111. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +7 -3
  112. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +194 -92
  113. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +1 -1
  114. diffusers/pipelines/dit/pipeline_dit.py +3 -1
  115. diffusers/pipelines/flux/__init__.py +4 -0
  116. diffusers/pipelines/flux/pipeline_flux.py +34 -26
  117. diffusers/pipelines/flux/pipeline_flux_control.py +8 -8
  118. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +1 -1
  119. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1 -1
  120. diffusers/pipelines/flux/pipeline_flux_controlnet.py +1 -1
  121. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +1 -1
  122. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1 -1
  123. diffusers/pipelines/flux/pipeline_flux_fill.py +1 -1
  124. diffusers/pipelines/flux/pipeline_flux_img2img.py +1 -1
  125. diffusers/pipelines/flux/pipeline_flux_inpaint.py +1 -1
  126. diffusers/pipelines/flux/pipeline_flux_kontext.py +1134 -0
  127. diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py +1460 -0
  128. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +1 -1
  129. diffusers/pipelines/flux/pipeline_output.py +6 -4
  130. diffusers/pipelines/hidream_image/pipeline_hidream_image.py +5 -5
  131. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +25 -24
  132. diffusers/pipelines/ltx/pipeline_ltx.py +13 -12
  133. diffusers/pipelines/ltx/pipeline_ltx_condition.py +10 -9
  134. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +13 -12
  135. diffusers/pipelines/mochi/pipeline_mochi.py +9 -8
  136. diffusers/pipelines/pipeline_flax_utils.py +2 -2
  137. diffusers/pipelines/pipeline_loading_utils.py +24 -2
  138. diffusers/pipelines/pipeline_utils.py +22 -15
  139. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +3 -1
  140. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +20 -0
  141. diffusers/pipelines/qwenimage/__init__.py +55 -0
  142. diffusers/pipelines/qwenimage/pipeline_output.py +21 -0
  143. diffusers/pipelines/qwenimage/pipeline_qwenimage.py +726 -0
  144. diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +882 -0
  145. diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py +829 -0
  146. diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py +1015 -0
  147. diffusers/pipelines/sana/pipeline_sana_sprint.py +5 -5
  148. diffusers/pipelines/skyreels_v2/__init__.py +59 -0
  149. diffusers/pipelines/skyreels_v2/pipeline_output.py +20 -0
  150. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2.py +610 -0
  151. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing.py +978 -0
  152. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_i2v.py +1059 -0
  153. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_v2v.py +1063 -0
  154. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_i2v.py +745 -0
  155. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -1
  156. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +1 -1
  157. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +1 -1
  158. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +2 -1
  159. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +6 -5
  160. diffusers/pipelines/wan/pipeline_wan.py +78 -20
  161. diffusers/pipelines/wan/pipeline_wan_i2v.py +112 -32
  162. diffusers/pipelines/wan/pipeline_wan_vace.py +1 -2
  163. diffusers/quantizers/__init__.py +1 -177
  164. diffusers/quantizers/base.py +11 -0
  165. diffusers/quantizers/gguf/utils.py +92 -3
  166. diffusers/quantizers/pipe_quant_config.py +202 -0
  167. diffusers/quantizers/torchao/torchao_quantizer.py +26 -0
  168. diffusers/schedulers/scheduling_deis_multistep.py +8 -1
  169. diffusers/schedulers/scheduling_dpmsolver_multistep.py +6 -0
  170. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +6 -0
  171. diffusers/schedulers/scheduling_scm.py +0 -1
  172. diffusers/schedulers/scheduling_unipc_multistep.py +10 -1
  173. diffusers/schedulers/scheduling_utils.py +2 -2
  174. diffusers/schedulers/scheduling_utils_flax.py +1 -1
  175. diffusers/training_utils.py +78 -0
  176. diffusers/utils/__init__.py +10 -0
  177. diffusers/utils/constants.py +4 -0
  178. diffusers/utils/dummy_pt_objects.py +312 -0
  179. diffusers/utils/dummy_torch_and_transformers_objects.py +255 -0
  180. diffusers/utils/dynamic_modules_utils.py +84 -25
  181. diffusers/utils/hub_utils.py +33 -17
  182. diffusers/utils/import_utils.py +70 -0
  183. diffusers/utils/peft_utils.py +11 -8
  184. diffusers/utils/testing_utils.py +136 -10
  185. diffusers/utils/torch_utils.py +18 -0
  186. {diffusers-0.34.0.dist-info → diffusers-0.35.0.dist-info}/METADATA +6 -6
  187. {diffusers-0.34.0.dist-info → diffusers-0.35.0.dist-info}/RECORD +191 -127
  188. {diffusers-0.34.0.dist-info → diffusers-0.35.0.dist-info}/LICENSE +0 -0
  189. {diffusers-0.34.0.dist-info → diffusers-0.35.0.dist-info}/WHEEL +0 -0
  190. {diffusers-0.34.0.dist-info → diffusers-0.35.0.dist-info}/entry_points.txt +0 -0
  191. {diffusers-0.34.0.dist-info → diffusers-0.35.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,327 @@
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+
20
+ from ..configuration_utils import register_to_config
21
+ from ..utils import is_kornia_available
22
+ from .guider_utils import BaseGuidance, rescale_noise_cfg
23
+
24
+
25
+ if TYPE_CHECKING:
26
+ from ..modular_pipelines.modular_pipeline import BlockState
27
+
28
+
29
+ _CAN_USE_KORNIA = is_kornia_available()
30
+
31
+
32
+ if _CAN_USE_KORNIA:
33
+ from kornia.geometry import pyrup as upsample_and_blur_func
34
+ from kornia.geometry.transform import build_laplacian_pyramid as build_laplacian_pyramid_func
35
+ else:
36
+ upsample_and_blur_func = None
37
+ build_laplacian_pyramid_func = None
38
+
39
+
40
+ def project(v0: torch.Tensor, v1: torch.Tensor, upcast_to_double: bool = True) -> Tuple[torch.Tensor, torch.Tensor]:
41
+ """
42
+ Project vector v0 onto vector v1, returning the parallel and orthogonal components of v0. Implementation from paper
43
+ (Algorithm 2).
44
+ """
45
+ # v0 shape: [B, ...]
46
+ # v1 shape: [B, ...]
47
+ # Assume first dim is a batch dim and all other dims are channel or "spatial" dims
48
+ all_dims_but_first = list(range(1, len(v0.shape)))
49
+ if upcast_to_double:
50
+ dtype = v0.dtype
51
+ v0, v1 = v0.double(), v1.double()
52
+ v1 = torch.nn.functional.normalize(v1, dim=all_dims_but_first)
53
+ v0_parallel = (v0 * v1).sum(dim=all_dims_but_first, keepdim=True) * v1
54
+ v0_orthogonal = v0 - v0_parallel
55
+ if upcast_to_double:
56
+ v0_parallel = v0_parallel.to(dtype)
57
+ v0_orthogonal = v0_orthogonal.to(dtype)
58
+ return v0_parallel, v0_orthogonal
59
+
60
+
61
+ def build_image_from_pyramid(pyramid: List[torch.Tensor]) -> torch.Tensor:
62
+ """
63
+ Recovers the data space latents from the Laplacian pyramid frequency space. Implementation from the paper
64
+ (Algorihtm 2).
65
+ """
66
+ # pyramid shapes: [[B, C, H, W], [B, C, H/2, W/2], ...]
67
+ img = pyramid[-1]
68
+ for i in range(len(pyramid) - 2, -1, -1):
69
+ img = upsample_and_blur_func(img) + pyramid[i]
70
+ return img
71
+
72
+
73
+ class FrequencyDecoupledGuidance(BaseGuidance):
74
+ """
75
+ Frequency-Decoupled Guidance (FDG): https://huggingface.co/papers/2506.19713
76
+
77
+ FDG is a technique similar to (and based on) classifier-free guidance (CFG) which is used to improve generation
78
+ quality and condition-following in diffusion models. Like CFG, during training we jointly train the model on both
79
+ conditional and unconditional data, and use a combination of the two during inference. (If you want more details on
80
+ how CFG works, you can check out the CFG guider.)
81
+
82
+ FDG differs from CFG in that the normal CFG prediction is instead decoupled into low- and high-frequency components
83
+ using a frequency transform (such as a Laplacian pyramid). The CFG update is then performed in frequency space
84
+ separately for the low- and high-frequency components with different guidance scales. Finally, the inverse
85
+ frequency transform is used to map the CFG frequency predictions back to data space (e.g. pixel space for images)
86
+ to form the final FDG prediction.
87
+
88
+ For images, the FDG authors found that using low guidance scales for the low-frequency components retains sample
89
+ diversity and realistic color composition, while using high guidance scales for high-frequency components enhances
90
+ sample quality (such as better visual details). Therefore, they recommend using low guidance scales (low w_low) for
91
+ the low-frequency components and high guidance scales (high w_high) for the high-frequency components. As an
92
+ example, they suggest w_low = 5.0 and w_high = 10.0 for Stable Diffusion XL (see Table 8 in the paper).
93
+
94
+ As with CFG, Diffusers implements the scaling and shifting on the unconditional prediction based on the [Imagen
95
+ paper](https://huggingface.co/papers/2205.11487), which is equivalent to what the original CFG paper proposed in
96
+ theory. [x_pred = x_uncond + scale * (x_cond - x_uncond)]
97
+
98
+ The `use_original_formulation` argument can be set to `True` to use the original CFG formulation mentioned in the
99
+ paper. By default, we use the diffusers-native implementation that has been in the codebase for a long time.
100
+
101
+ Args:
102
+ guidance_scales (`List[float]`, defaults to `[10.0, 5.0]`):
103
+ The scale parameter for frequency-decoupled guidance for each frequency component, listed from highest
104
+ frequency level to lowest. Higher values result in stronger conditioning on the text prompt, while lower
105
+ values allow for more freedom in generation. Higher values may lead to saturation and deterioration of
106
+ image quality. The FDG authors recommend using higher guidance scales for higher frequency components and
107
+ lower guidance scales for lower frequency components (so `guidance_scales` should typically be sorted in
108
+ descending order).
109
+ guidance_rescale (`float` or `List[float]`, defaults to `0.0`):
110
+ The rescale factor applied to the noise predictions. This is used to improve image quality and fix
111
+ overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are
112
+ Flawed](https://huggingface.co/papers/2305.08891). If a list is supplied, it should be the same length as
113
+ `guidance_scales`.
114
+ parallel_weights (`float` or `List[float]`, *optional*):
115
+ Optional weights for the parallel component of each frequency component of the projected CFG shift. If not
116
+ set, the weights will default to `1.0` for all components, which corresponds to using the normal CFG shift
117
+ (that is, equal weights for the parallel and orthogonal components). If set, a value in `[0, 1]` is
118
+ recommended. If a list is supplied, it should be the same length as `guidance_scales`.
119
+ use_original_formulation (`bool`, defaults to `False`):
120
+ Whether to use the original formulation of classifier-free guidance as proposed in the paper. By default,
121
+ we use the diffusers-native implementation that has been in the codebase for a long time. See
122
+ [~guiders.classifier_free_guidance.ClassifierFreeGuidance] for more details.
123
+ start (`float` or `List[float]`, defaults to `0.0`):
124
+ The fraction of the total number of denoising steps after which guidance starts. If a list is supplied, it
125
+ should be the same length as `guidance_scales`.
126
+ stop (`float` or `List[float]`, defaults to `1.0`):
127
+ The fraction of the total number of denoising steps after which guidance stops. If a list is supplied, it
128
+ should be the same length as `guidance_scales`.
129
+ guidance_rescale_space (`str`, defaults to `"data"`):
130
+ Whether to performance guidance rescaling in `"data"` space (after the full FDG update in data space) or in
131
+ `"freq"` space (right after the CFG update, for each freq level). Note that frequency space rescaling is
132
+ speculative and may not produce expected results. If `"data"` is set, the first `guidance_rescale` value
133
+ will be used; otherwise, per-frequency-level guidance rescale values will be used if available.
134
+ upcast_to_double (`bool`, defaults to `True`):
135
+ Whether to upcast certain operations, such as the projection operation when using `parallel_weights`, to
136
+ float64 when performing guidance. This may result in better performance at the cost of increased runtime.
137
+ """
138
+
139
+ _input_predictions = ["pred_cond", "pred_uncond"]
140
+
141
+ @register_to_config
142
+ def __init__(
143
+ self,
144
+ guidance_scales: Union[List[float], Tuple[float]] = [10.0, 5.0],
145
+ guidance_rescale: Union[float, List[float], Tuple[float]] = 0.0,
146
+ parallel_weights: Optional[Union[float, List[float], Tuple[float]]] = None,
147
+ use_original_formulation: bool = False,
148
+ start: Union[float, List[float], Tuple[float]] = 0.0,
149
+ stop: Union[float, List[float], Tuple[float]] = 1.0,
150
+ guidance_rescale_space: str = "data",
151
+ upcast_to_double: bool = True,
152
+ ):
153
+ if not _CAN_USE_KORNIA:
154
+ raise ImportError(
155
+ "The `FrequencyDecoupledGuidance` guider cannot be instantiated because the `kornia` library on which "
156
+ "it depends is not available in the current environment. You can install `kornia` with `pip install "
157
+ "kornia`."
158
+ )
159
+
160
+ # Set start to earliest start for any freq component and stop to latest stop for any freq component
161
+ min_start = start if isinstance(start, float) else min(start)
162
+ max_stop = stop if isinstance(stop, float) else max(stop)
163
+ super().__init__(min_start, max_stop)
164
+
165
+ self.guidance_scales = guidance_scales
166
+ self.levels = len(guidance_scales)
167
+
168
+ if isinstance(guidance_rescale, float):
169
+ self.guidance_rescale = [guidance_rescale] * self.levels
170
+ elif len(guidance_rescale) == self.levels:
171
+ self.guidance_rescale = guidance_rescale
172
+ else:
173
+ raise ValueError(
174
+ f"`guidance_rescale` has length {len(guidance_rescale)} but should have the same length as "
175
+ f"`guidance_scales` ({len(self.guidance_scales)})"
176
+ )
177
+ # Whether to perform guidance rescaling in frequency space (right after the CFG update) or data space (after
178
+ # transforming from frequency space back to data space)
179
+ if guidance_rescale_space not in ["data", "freq"]:
180
+ raise ValueError(
181
+ f"Guidance rescale space is {guidance_rescale_space} but must be one of `data` or `freq`."
182
+ )
183
+ self.guidance_rescale_space = guidance_rescale_space
184
+
185
+ if parallel_weights is None:
186
+ # Use normal CFG shift (equal weights for parallel and orthogonal components)
187
+ self.parallel_weights = [1.0] * self.levels
188
+ elif isinstance(parallel_weights, float):
189
+ self.parallel_weights = [parallel_weights] * self.levels
190
+ elif len(parallel_weights) == self.levels:
191
+ self.parallel_weights = parallel_weights
192
+ else:
193
+ raise ValueError(
194
+ f"`parallel_weights` has length {len(parallel_weights)} but should have the same length as "
195
+ f"`guidance_scales` ({len(self.guidance_scales)})"
196
+ )
197
+
198
+ self.use_original_formulation = use_original_formulation
199
+ self.upcast_to_double = upcast_to_double
200
+
201
+ if isinstance(start, float):
202
+ self.guidance_start = [start] * self.levels
203
+ elif len(start) == self.levels:
204
+ self.guidance_start = start
205
+ else:
206
+ raise ValueError(
207
+ f"`start` has length {len(start)} but should have the same length as `guidance_scales` "
208
+ f"({len(self.guidance_scales)})"
209
+ )
210
+ if isinstance(stop, float):
211
+ self.guidance_stop = [stop] * self.levels
212
+ elif len(stop) == self.levels:
213
+ self.guidance_stop = stop
214
+ else:
215
+ raise ValueError(
216
+ f"`stop` has length {len(stop)} but should have the same length as `guidance_scales` "
217
+ f"({len(self.guidance_scales)})"
218
+ )
219
+
220
+ def prepare_inputs(
221
+ self, data: "BlockState", input_fields: Optional[Dict[str, Union[str, Tuple[str, str]]]] = None
222
+ ) -> List["BlockState"]:
223
+ if input_fields is None:
224
+ input_fields = self._input_fields
225
+
226
+ tuple_indices = [0] if self.num_conditions == 1 else [0, 1]
227
+ data_batches = []
228
+ for i in range(self.num_conditions):
229
+ data_batch = self._prepare_batch(input_fields, data, tuple_indices[i], self._input_predictions[i])
230
+ data_batches.append(data_batch)
231
+ return data_batches
232
+
233
+ def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> torch.Tensor:
234
+ pred = None
235
+
236
+ if not self._is_fdg_enabled():
237
+ pred = pred_cond
238
+ else:
239
+ # Apply the frequency transform (e.g. Laplacian pyramid) to the conditional and unconditional predictions.
240
+ pred_cond_pyramid = build_laplacian_pyramid_func(pred_cond, self.levels)
241
+ pred_uncond_pyramid = build_laplacian_pyramid_func(pred_uncond, self.levels)
242
+
243
+ # From high frequencies to low frequencies, following the paper implementation
244
+ pred_guided_pyramid = []
245
+ parameters = zip(self.guidance_scales, self.parallel_weights, self.guidance_rescale)
246
+ for level, (guidance_scale, parallel_weight, guidance_rescale) in enumerate(parameters):
247
+ if self._is_fdg_enabled_for_level(level):
248
+ # Get the cond/uncond preds (in freq space) at the current frequency level
249
+ pred_cond_freq = pred_cond_pyramid[level]
250
+ pred_uncond_freq = pred_uncond_pyramid[level]
251
+
252
+ shift = pred_cond_freq - pred_uncond_freq
253
+
254
+ # Apply parallel weights, if used (1.0 corresponds to using the normal CFG shift)
255
+ if not math.isclose(parallel_weight, 1.0):
256
+ shift_parallel, shift_orthogonal = project(shift, pred_cond_freq, self.upcast_to_double)
257
+ shift = parallel_weight * shift_parallel + shift_orthogonal
258
+
259
+ # Apply CFG update for the current frequency level
260
+ pred = pred_cond_freq if self.use_original_formulation else pred_uncond_freq
261
+ pred = pred + guidance_scale * shift
262
+
263
+ if self.guidance_rescale_space == "freq" and guidance_rescale > 0.0:
264
+ pred = rescale_noise_cfg(pred, pred_cond_freq, guidance_rescale)
265
+
266
+ # Add the current FDG guided level to the FDG prediction pyramid
267
+ pred_guided_pyramid.append(pred)
268
+ else:
269
+ # Add the current pred_cond_pyramid level as the "non-FDG" prediction
270
+ pred_guided_pyramid.append(pred_cond_freq)
271
+
272
+ # Convert from frequency space back to data (e.g. pixel) space by applying inverse freq transform
273
+ pred = build_image_from_pyramid(pred_guided_pyramid)
274
+
275
+ # If rescaling in data space, use the first elem of self.guidance_rescale as the "global" rescale value
276
+ # across all freq levels
277
+ if self.guidance_rescale_space == "data" and self.guidance_rescale[0] > 0.0:
278
+ pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale[0])
279
+
280
+ return pred, {}
281
+
282
+ @property
283
+ def is_conditional(self) -> bool:
284
+ return self._count_prepared == 1
285
+
286
+ @property
287
+ def num_conditions(self) -> int:
288
+ num_conditions = 1
289
+ if self._is_fdg_enabled():
290
+ num_conditions += 1
291
+ return num_conditions
292
+
293
+ def _is_fdg_enabled(self) -> bool:
294
+ if not self._enabled:
295
+ return False
296
+
297
+ is_within_range = True
298
+ if self._num_inference_steps is not None:
299
+ skip_start_step = int(self._start * self._num_inference_steps)
300
+ skip_stop_step = int(self._stop * self._num_inference_steps)
301
+ is_within_range = skip_start_step <= self._step < skip_stop_step
302
+
303
+ is_close = False
304
+ if self.use_original_formulation:
305
+ is_close = all(math.isclose(guidance_scale, 0.0) for guidance_scale in self.guidance_scales)
306
+ else:
307
+ is_close = all(math.isclose(guidance_scale, 1.0) for guidance_scale in self.guidance_scales)
308
+
309
+ return is_within_range and not is_close
310
+
311
+ def _is_fdg_enabled_for_level(self, level: int) -> bool:
312
+ if not self._enabled:
313
+ return False
314
+
315
+ is_within_range = True
316
+ if self._num_inference_steps is not None:
317
+ skip_start_step = int(self.guidance_start[level] * self._num_inference_steps)
318
+ skip_stop_step = int(self.guidance_stop[level] * self._num_inference_steps)
319
+ is_within_range = skip_start_step <= self._step < skip_stop_step
320
+
321
+ is_close = False
322
+ if self.use_original_formulation:
323
+ is_close = math.isclose(self.guidance_scales[level], 0.0)
324
+ else:
325
+ is_close = math.isclose(self.guidance_scales[level], 1.0)
326
+
327
+ return is_within_range and not is_close
@@ -0,0 +1,309 @@
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ from huggingface_hub.utils import validate_hf_hub_args
20
+ from typing_extensions import Self
21
+
22
+ from ..configuration_utils import ConfigMixin
23
+ from ..utils import PushToHubMixin, get_logger
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ from ..modular_pipelines.modular_pipeline import BlockState
28
+
29
+
30
+ GUIDER_CONFIG_NAME = "guider_config.json"
31
+
32
+
33
+ logger = get_logger(__name__) # pylint: disable=invalid-name
34
+
35
+
36
+ class BaseGuidance(ConfigMixin, PushToHubMixin):
37
+ r"""Base class providing the skeleton for implementing guidance techniques."""
38
+
39
+ config_name = GUIDER_CONFIG_NAME
40
+ _input_predictions = None
41
+ _identifier_key = "__guidance_identifier__"
42
+
43
+ def __init__(self, start: float = 0.0, stop: float = 1.0):
44
+ self._start = start
45
+ self._stop = stop
46
+ self._step: int = None
47
+ self._num_inference_steps: int = None
48
+ self._timestep: torch.LongTensor = None
49
+ self._count_prepared = 0
50
+ self._input_fields: Dict[str, Union[str, Tuple[str, str]]] = None
51
+ self._enabled = True
52
+
53
+ if not (0.0 <= start < 1.0):
54
+ raise ValueError(f"Expected `start` to be between 0.0 and 1.0, but got {start}.")
55
+ if not (start <= stop <= 1.0):
56
+ raise ValueError(f"Expected `stop` to be between {start} and 1.0, but got {stop}.")
57
+
58
+ if self._input_predictions is None or not isinstance(self._input_predictions, list):
59
+ raise ValueError(
60
+ "`_input_predictions` must be a list of required prediction names for the guidance technique."
61
+ )
62
+
63
+ def disable(self):
64
+ self._enabled = False
65
+
66
+ def enable(self):
67
+ self._enabled = True
68
+
69
+ def set_state(self, step: int, num_inference_steps: int, timestep: torch.LongTensor) -> None:
70
+ self._step = step
71
+ self._num_inference_steps = num_inference_steps
72
+ self._timestep = timestep
73
+ self._count_prepared = 0
74
+
75
+ def set_input_fields(self, **kwargs: Dict[str, Union[str, Tuple[str, str]]]) -> None:
76
+ """
77
+ Set the input fields for the guidance technique. The input fields are used to specify the names of the returned
78
+ attributes containing the prepared data after `prepare_inputs` is called. The prepared data is obtained from
79
+ the values of the provided keyword arguments to this method.
80
+
81
+ Args:
82
+ **kwargs (`Dict[str, Union[str, Tuple[str, str]]]`):
83
+ A dictionary where the keys are the names of the fields that will be used to store the data once it is
84
+ prepared with `prepare_inputs`. The values can be either a string or a tuple of length 2, which is used
85
+ to look up the required data provided for preparation.
86
+
87
+ If a string is provided, it will be used as the conditional data (or unconditional if used with a
88
+ guidance method that requires it). If a tuple of length 2 is provided, the first element must be the
89
+ conditional data identifier and the second element must be the unconditional data identifier or None.
90
+
91
+ Example:
92
+ ```
93
+ data = {"prompt_embeds": <some tensor>, "negative_prompt_embeds": <some tensor>, "latents": <some tensor>}
94
+
95
+ BaseGuidance.set_input_fields(
96
+ latents="latents",
97
+ prompt_embeds=("prompt_embeds", "negative_prompt_embeds"),
98
+ )
99
+ ```
100
+ """
101
+ for key, value in kwargs.items():
102
+ is_string = isinstance(value, str)
103
+ is_tuple_of_str_with_len_2 = (
104
+ isinstance(value, tuple) and len(value) == 2 and all(isinstance(v, str) for v in value)
105
+ )
106
+ if not (is_string or is_tuple_of_str_with_len_2):
107
+ raise ValueError(
108
+ f"Expected `set_input_fields` to be called with a string or a tuple of string with length 2, but got {type(value)} for key {key}."
109
+ )
110
+ self._input_fields = kwargs
111
+
112
+ def prepare_models(self, denoiser: torch.nn.Module) -> None:
113
+ """
114
+ Prepares the models for the guidance technique on a given batch of data. This method should be overridden in
115
+ subclasses to implement specific model preparation logic.
116
+ """
117
+ self._count_prepared += 1
118
+
119
+ def cleanup_models(self, denoiser: torch.nn.Module) -> None:
120
+ """
121
+ Cleans up the models for the guidance technique after a given batch of data. This method should be overridden
122
+ in subclasses to implement specific model cleanup logic. It is useful for removing any hooks or other stateful
123
+ modifications made during `prepare_models`.
124
+ """
125
+ pass
126
+
127
+ def prepare_inputs(self, data: "BlockState") -> List["BlockState"]:
128
+ raise NotImplementedError("BaseGuidance::prepare_inputs must be implemented in subclasses.")
129
+
130
+ def __call__(self, data: List["BlockState"]) -> Any:
131
+ if not all(hasattr(d, "noise_pred") for d in data):
132
+ raise ValueError("Expected all data to have `noise_pred` attribute.")
133
+ if len(data) != self.num_conditions:
134
+ raise ValueError(
135
+ f"Expected {self.num_conditions} data items, but got {len(data)}. Please check the input data."
136
+ )
137
+ forward_inputs = {getattr(d, self._identifier_key): d.noise_pred for d in data}
138
+ return self.forward(**forward_inputs)
139
+
140
+ def forward(self, *args, **kwargs) -> Any:
141
+ raise NotImplementedError("BaseGuidance::forward must be implemented in subclasses.")
142
+
143
+ @property
144
+ def is_conditional(self) -> bool:
145
+ raise NotImplementedError("BaseGuidance::is_conditional must be implemented in subclasses.")
146
+
147
+ @property
148
+ def is_unconditional(self) -> bool:
149
+ return not self.is_conditional
150
+
151
+ @property
152
+ def num_conditions(self) -> int:
153
+ raise NotImplementedError("BaseGuidance::num_conditions must be implemented in subclasses.")
154
+
155
+ @classmethod
156
+ def _prepare_batch(
157
+ cls,
158
+ input_fields: Dict[str, Union[str, Tuple[str, str]]],
159
+ data: "BlockState",
160
+ tuple_index: int,
161
+ identifier: str,
162
+ ) -> "BlockState":
163
+ """
164
+ Prepares a batch of data for the guidance technique. This method is used in the `prepare_inputs` method of the
165
+ `BaseGuidance` class. It prepares the batch based on the provided tuple index.
166
+
167
+ Args:
168
+ input_fields (`Dict[str, Union[str, Tuple[str, str]]]`):
169
+ A dictionary where the keys are the names of the fields that will be used to store the data once it is
170
+ prepared with `prepare_inputs`. The values can be either a string or a tuple of length 2, which is used
171
+ to look up the required data provided for preparation. If a string is provided, it will be used as the
172
+ conditional data (or unconditional if used with a guidance method that requires it). If a tuple of
173
+ length 2 is provided, the first element must be the conditional data identifier and the second element
174
+ must be the unconditional data identifier or None.
175
+ data (`BlockState`):
176
+ The input data to be prepared.
177
+ tuple_index (`int`):
178
+ The index to use when accessing input fields that are tuples.
179
+
180
+ Returns:
181
+ `BlockState`: The prepared batch of data.
182
+ """
183
+ from ..modular_pipelines.modular_pipeline import BlockState
184
+
185
+ if input_fields is None:
186
+ raise ValueError(
187
+ "Input fields cannot be None. Please pass `input_fields` to `prepare_inputs` or call `set_input_fields` before preparing inputs."
188
+ )
189
+ data_batch = {}
190
+ for key, value in input_fields.items():
191
+ try:
192
+ if isinstance(value, str):
193
+ data_batch[key] = getattr(data, value)
194
+ elif isinstance(value, tuple):
195
+ data_batch[key] = getattr(data, value[tuple_index])
196
+ else:
197
+ # We've already checked that value is a string or a tuple of strings with length 2
198
+ pass
199
+ except AttributeError:
200
+ logger.debug(f"`data` does not have attribute(s) {value}, skipping.")
201
+ data_batch[cls._identifier_key] = identifier
202
+ return BlockState(**data_batch)
203
+
204
+ @classmethod
205
+ @validate_hf_hub_args
206
+ def from_pretrained(
207
+ cls,
208
+ pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,
209
+ subfolder: Optional[str] = None,
210
+ return_unused_kwargs=False,
211
+ **kwargs,
212
+ ) -> Self:
213
+ r"""
214
+ Instantiate a guider from a pre-defined JSON configuration file in a local directory or Hub repository.
215
+
216
+ Parameters:
217
+ pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
218
+ Can be either:
219
+
220
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
221
+ the Hub.
222
+ - A path to a *directory* (for example `./my_model_directory`) containing the guider configuration
223
+ saved with [`~BaseGuidance.save_pretrained`].
224
+ subfolder (`str`, *optional*):
225
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
226
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
227
+ Whether kwargs that are not consumed by the Python class should be returned or not.
228
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
229
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
230
+ is not used.
231
+ force_download (`bool`, *optional*, defaults to `False`):
232
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
233
+ cached versions if they exist.
234
+
235
+ proxies (`Dict[str, str]`, *optional*):
236
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
237
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
238
+ output_loading_info(`bool`, *optional*, defaults to `False`):
239
+ Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
240
+ local_files_only(`bool`, *optional*, defaults to `False`):
241
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
242
+ won't be downloaded from the Hub.
243
+ token (`str` or *bool*, *optional*):
244
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
245
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
246
+ revision (`str`, *optional*, defaults to `"main"`):
247
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
248
+ allowed by Git.
249
+
250
+ <Tip>
251
+
252
+ To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
253
+ auth login`. You can also activate the special
254
+ ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
255
+ firewalled environment.
256
+
257
+ </Tip>
258
+
259
+ """
260
+ config, kwargs, commit_hash = cls.load_config(
261
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
262
+ subfolder=subfolder,
263
+ return_unused_kwargs=True,
264
+ return_commit_hash=True,
265
+ **kwargs,
266
+ )
267
+ return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs)
268
+
269
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
270
+ """
271
+ Save a guider configuration object to a directory so that it can be reloaded using the
272
+ [`~BaseGuidance.from_pretrained`] class method.
273
+
274
+ Args:
275
+ save_directory (`str` or `os.PathLike`):
276
+ Directory where the configuration JSON file will be saved (will be created if it does not exist).
277
+ push_to_hub (`bool`, *optional*, defaults to `False`):
278
+ Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the
279
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
280
+ namespace).
281
+ kwargs (`Dict[str, Any]`, *optional*):
282
+ Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
283
+ """
284
+ self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)
285
+
286
+
287
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
288
+ r"""
289
+ Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on
290
+ Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are
291
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf).
292
+
293
+ Args:
294
+ noise_cfg (`torch.Tensor`):
295
+ The predicted noise tensor for the guided diffusion process.
296
+ noise_pred_text (`torch.Tensor`):
297
+ The predicted noise tensor for the text-guided diffusion process.
298
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
299
+ A rescale factor applied to the noise predictions.
300
+ Returns:
301
+ noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor.
302
+ """
303
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
304
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
305
+ # rescale the results from guidance (fixes overexposure)
306
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
307
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
308
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
309
+ return noise_cfg