diffusers 0.34.0__py3-none-any.whl → 0.35.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (191) hide show
  1. diffusers/__init__.py +98 -1
  2. diffusers/callbacks.py +35 -0
  3. diffusers/commands/custom_blocks.py +134 -0
  4. diffusers/commands/diffusers_cli.py +2 -0
  5. diffusers/commands/fp16_safetensors.py +1 -1
  6. diffusers/configuration_utils.py +11 -2
  7. diffusers/dependency_versions_table.py +3 -3
  8. diffusers/guiders/__init__.py +41 -0
  9. diffusers/guiders/adaptive_projected_guidance.py +188 -0
  10. diffusers/guiders/auto_guidance.py +190 -0
  11. diffusers/guiders/classifier_free_guidance.py +141 -0
  12. diffusers/guiders/classifier_free_zero_star_guidance.py +152 -0
  13. diffusers/guiders/frequency_decoupled_guidance.py +327 -0
  14. diffusers/guiders/guider_utils.py +309 -0
  15. diffusers/guiders/perturbed_attention_guidance.py +271 -0
  16. diffusers/guiders/skip_layer_guidance.py +262 -0
  17. diffusers/guiders/smoothed_energy_guidance.py +251 -0
  18. diffusers/guiders/tangential_classifier_free_guidance.py +143 -0
  19. diffusers/hooks/__init__.py +17 -0
  20. diffusers/hooks/_common.py +56 -0
  21. diffusers/hooks/_helpers.py +293 -0
  22. diffusers/hooks/faster_cache.py +7 -6
  23. diffusers/hooks/first_block_cache.py +259 -0
  24. diffusers/hooks/group_offloading.py +292 -286
  25. diffusers/hooks/hooks.py +56 -1
  26. diffusers/hooks/layer_skip.py +263 -0
  27. diffusers/hooks/layerwise_casting.py +2 -7
  28. diffusers/hooks/pyramid_attention_broadcast.py +14 -11
  29. diffusers/hooks/smoothed_energy_guidance_utils.py +167 -0
  30. diffusers/hooks/utils.py +43 -0
  31. diffusers/loaders/__init__.py +6 -0
  32. diffusers/loaders/ip_adapter.py +255 -4
  33. diffusers/loaders/lora_base.py +63 -30
  34. diffusers/loaders/lora_conversion_utils.py +434 -53
  35. diffusers/loaders/lora_pipeline.py +834 -37
  36. diffusers/loaders/peft.py +28 -5
  37. diffusers/loaders/single_file_model.py +44 -11
  38. diffusers/loaders/single_file_utils.py +170 -2
  39. diffusers/loaders/transformer_flux.py +9 -10
  40. diffusers/loaders/transformer_sd3.py +6 -1
  41. diffusers/loaders/unet.py +22 -5
  42. diffusers/loaders/unet_loader_utils.py +5 -2
  43. diffusers/models/__init__.py +8 -0
  44. diffusers/models/attention.py +484 -3
  45. diffusers/models/attention_dispatch.py +1218 -0
  46. diffusers/models/attention_processor.py +105 -663
  47. diffusers/models/auto_model.py +2 -2
  48. diffusers/models/autoencoders/__init__.py +1 -0
  49. diffusers/models/autoencoders/autoencoder_dc.py +14 -1
  50. diffusers/models/autoencoders/autoencoder_kl.py +1 -1
  51. diffusers/models/autoencoders/autoencoder_kl_cosmos.py +3 -1
  52. diffusers/models/autoencoders/autoencoder_kl_qwenimage.py +1070 -0
  53. diffusers/models/autoencoders/autoencoder_kl_wan.py +370 -40
  54. diffusers/models/cache_utils.py +31 -9
  55. diffusers/models/controlnets/controlnet_flux.py +5 -5
  56. diffusers/models/controlnets/controlnet_union.py +4 -4
  57. diffusers/models/embeddings.py +26 -34
  58. diffusers/models/model_loading_utils.py +233 -1
  59. diffusers/models/modeling_flax_utils.py +1 -2
  60. diffusers/models/modeling_utils.py +159 -94
  61. diffusers/models/transformers/__init__.py +2 -0
  62. diffusers/models/transformers/transformer_chroma.py +16 -117
  63. diffusers/models/transformers/transformer_cogview4.py +36 -2
  64. diffusers/models/transformers/transformer_cosmos.py +11 -4
  65. diffusers/models/transformers/transformer_flux.py +372 -132
  66. diffusers/models/transformers/transformer_hunyuan_video.py +6 -0
  67. diffusers/models/transformers/transformer_ltx.py +104 -23
  68. diffusers/models/transformers/transformer_qwenimage.py +645 -0
  69. diffusers/models/transformers/transformer_skyreels_v2.py +607 -0
  70. diffusers/models/transformers/transformer_wan.py +298 -85
  71. diffusers/models/transformers/transformer_wan_vace.py +15 -21
  72. diffusers/models/unets/unet_2d_condition.py +2 -1
  73. diffusers/modular_pipelines/__init__.py +83 -0
  74. diffusers/modular_pipelines/components_manager.py +1068 -0
  75. diffusers/modular_pipelines/flux/__init__.py +66 -0
  76. diffusers/modular_pipelines/flux/before_denoise.py +689 -0
  77. diffusers/modular_pipelines/flux/decoders.py +109 -0
  78. diffusers/modular_pipelines/flux/denoise.py +227 -0
  79. diffusers/modular_pipelines/flux/encoders.py +412 -0
  80. diffusers/modular_pipelines/flux/modular_blocks.py +181 -0
  81. diffusers/modular_pipelines/flux/modular_pipeline.py +59 -0
  82. diffusers/modular_pipelines/modular_pipeline.py +2446 -0
  83. diffusers/modular_pipelines/modular_pipeline_utils.py +672 -0
  84. diffusers/modular_pipelines/node_utils.py +665 -0
  85. diffusers/modular_pipelines/stable_diffusion_xl/__init__.py +77 -0
  86. diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py +1874 -0
  87. diffusers/modular_pipelines/stable_diffusion_xl/decoders.py +208 -0
  88. diffusers/modular_pipelines/stable_diffusion_xl/denoise.py +771 -0
  89. diffusers/modular_pipelines/stable_diffusion_xl/encoders.py +887 -0
  90. diffusers/modular_pipelines/stable_diffusion_xl/modular_blocks.py +380 -0
  91. diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py +365 -0
  92. diffusers/modular_pipelines/wan/__init__.py +66 -0
  93. diffusers/modular_pipelines/wan/before_denoise.py +365 -0
  94. diffusers/modular_pipelines/wan/decoders.py +105 -0
  95. diffusers/modular_pipelines/wan/denoise.py +261 -0
  96. diffusers/modular_pipelines/wan/encoders.py +242 -0
  97. diffusers/modular_pipelines/wan/modular_blocks.py +144 -0
  98. diffusers/modular_pipelines/wan/modular_pipeline.py +90 -0
  99. diffusers/pipelines/__init__.py +31 -0
  100. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +2 -3
  101. diffusers/pipelines/auto_pipeline.py +17 -13
  102. diffusers/pipelines/chroma/pipeline_chroma.py +5 -5
  103. diffusers/pipelines/chroma/pipeline_chroma_img2img.py +5 -5
  104. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +9 -8
  105. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +9 -8
  106. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +10 -9
  107. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +9 -8
  108. diffusers/pipelines/cogview4/pipeline_cogview4.py +16 -15
  109. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +3 -2
  110. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +212 -93
  111. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +7 -3
  112. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +194 -92
  113. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +1 -1
  114. diffusers/pipelines/dit/pipeline_dit.py +3 -1
  115. diffusers/pipelines/flux/__init__.py +4 -0
  116. diffusers/pipelines/flux/pipeline_flux.py +34 -26
  117. diffusers/pipelines/flux/pipeline_flux_control.py +8 -8
  118. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +1 -1
  119. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1 -1
  120. diffusers/pipelines/flux/pipeline_flux_controlnet.py +1 -1
  121. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +1 -1
  122. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1 -1
  123. diffusers/pipelines/flux/pipeline_flux_fill.py +1 -1
  124. diffusers/pipelines/flux/pipeline_flux_img2img.py +1 -1
  125. diffusers/pipelines/flux/pipeline_flux_inpaint.py +1 -1
  126. diffusers/pipelines/flux/pipeline_flux_kontext.py +1134 -0
  127. diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py +1460 -0
  128. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +1 -1
  129. diffusers/pipelines/flux/pipeline_output.py +6 -4
  130. diffusers/pipelines/hidream_image/pipeline_hidream_image.py +5 -5
  131. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +25 -24
  132. diffusers/pipelines/ltx/pipeline_ltx.py +13 -12
  133. diffusers/pipelines/ltx/pipeline_ltx_condition.py +10 -9
  134. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +13 -12
  135. diffusers/pipelines/mochi/pipeline_mochi.py +9 -8
  136. diffusers/pipelines/pipeline_flax_utils.py +2 -2
  137. diffusers/pipelines/pipeline_loading_utils.py +24 -2
  138. diffusers/pipelines/pipeline_utils.py +22 -15
  139. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +3 -1
  140. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +20 -0
  141. diffusers/pipelines/qwenimage/__init__.py +55 -0
  142. diffusers/pipelines/qwenimage/pipeline_output.py +21 -0
  143. diffusers/pipelines/qwenimage/pipeline_qwenimage.py +726 -0
  144. diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +849 -0
  145. diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py +829 -0
  146. diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py +1015 -0
  147. diffusers/pipelines/sana/pipeline_sana_sprint.py +5 -5
  148. diffusers/pipelines/skyreels_v2/__init__.py +59 -0
  149. diffusers/pipelines/skyreels_v2/pipeline_output.py +20 -0
  150. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2.py +610 -0
  151. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing.py +978 -0
  152. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_i2v.py +1059 -0
  153. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_v2v.py +1063 -0
  154. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_i2v.py +745 -0
  155. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -1
  156. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +1 -1
  157. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +1 -1
  158. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +2 -1
  159. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +6 -5
  160. diffusers/pipelines/wan/pipeline_wan.py +78 -20
  161. diffusers/pipelines/wan/pipeline_wan_i2v.py +112 -32
  162. diffusers/pipelines/wan/pipeline_wan_vace.py +1 -2
  163. diffusers/quantizers/__init__.py +1 -177
  164. diffusers/quantizers/base.py +11 -0
  165. diffusers/quantizers/gguf/utils.py +92 -3
  166. diffusers/quantizers/pipe_quant_config.py +202 -0
  167. diffusers/quantizers/torchao/torchao_quantizer.py +26 -0
  168. diffusers/schedulers/scheduling_deis_multistep.py +8 -1
  169. diffusers/schedulers/scheduling_dpmsolver_multistep.py +6 -0
  170. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +6 -0
  171. diffusers/schedulers/scheduling_scm.py +0 -1
  172. diffusers/schedulers/scheduling_unipc_multistep.py +10 -1
  173. diffusers/schedulers/scheduling_utils.py +2 -2
  174. diffusers/schedulers/scheduling_utils_flax.py +1 -1
  175. diffusers/training_utils.py +78 -0
  176. diffusers/utils/__init__.py +10 -0
  177. diffusers/utils/constants.py +4 -0
  178. diffusers/utils/dummy_pt_objects.py +312 -0
  179. diffusers/utils/dummy_torch_and_transformers_objects.py +255 -0
  180. diffusers/utils/dynamic_modules_utils.py +84 -25
  181. diffusers/utils/hub_utils.py +33 -17
  182. diffusers/utils/import_utils.py +70 -0
  183. diffusers/utils/peft_utils.py +11 -8
  184. diffusers/utils/testing_utils.py +136 -10
  185. diffusers/utils/torch_utils.py +18 -0
  186. {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/METADATA +6 -6
  187. {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/RECORD +191 -127
  188. {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/LICENSE +0 -0
  189. {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/WHEEL +0 -0
  190. {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/entry_points.txt +0 -0
  191. {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1874 @@
1
+ # Copyright 2025 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, List, Optional, Tuple, Union
17
+
18
+ import PIL
19
+ import torch
20
+
21
+ from ...configuration_utils import FrozenDict
22
+ from ...guiders import ClassifierFreeGuidance
23
+ from ...image_processor import VaeImageProcessor
24
+ from ...models import AutoencoderKL, ControlNetModel, ControlNetUnionModel, UNet2DConditionModel
25
+ from ...pipelines.controlnet.multicontrolnet import MultiControlNetModel
26
+ from ...schedulers import EulerDiscreteScheduler
27
+ from ...utils import logging
28
+ from ...utils.torch_utils import randn_tensor, unwrap_module
29
+ from ..modular_pipeline import (
30
+ ModularPipelineBlocks,
31
+ PipelineState,
32
+ )
33
+ from ..modular_pipeline_utils import ComponentSpec, ConfigSpec, InputParam, OutputParam
34
+ from .modular_pipeline import StableDiffusionXLModularPipeline
35
+
36
+
37
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
38
+
39
+
40
+ # TODO(yiyi, aryan): We need another step before text encoder to set the `num_inference_steps` attribute for guider so that
41
+ # things like when to do guidance and how many conditions to be prepared can be determined. Currently, this is done by
42
+ # always assuming you want to do guidance in the Guiders. So, negative embeddings are prepared regardless of what the
43
+ # configuration of guider is.
44
+
45
+
46
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
47
+ def retrieve_timesteps(
48
+ scheduler,
49
+ num_inference_steps: Optional[int] = None,
50
+ device: Optional[Union[str, torch.device]] = None,
51
+ timesteps: Optional[List[int]] = None,
52
+ sigmas: Optional[List[float]] = None,
53
+ **kwargs,
54
+ ):
55
+ r"""
56
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
57
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
58
+
59
+ Args:
60
+ scheduler (`SchedulerMixin`):
61
+ The scheduler to get timesteps from.
62
+ num_inference_steps (`int`):
63
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
64
+ must be `None`.
65
+ device (`str` or `torch.device`, *optional*):
66
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
67
+ timesteps (`List[int]`, *optional*):
68
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
69
+ `num_inference_steps` and `sigmas` must be `None`.
70
+ sigmas (`List[float]`, *optional*):
71
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
72
+ `num_inference_steps` and `timesteps` must be `None`.
73
+
74
+ Returns:
75
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
76
+ second element is the number of inference steps.
77
+ """
78
+ if timesteps is not None and sigmas is not None:
79
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
80
+ if timesteps is not None:
81
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
82
+ if not accepts_timesteps:
83
+ raise ValueError(
84
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
85
+ f" timestep schedules. Please check whether you are using the correct scheduler."
86
+ )
87
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
88
+ timesteps = scheduler.timesteps
89
+ num_inference_steps = len(timesteps)
90
+ elif sigmas is not None:
91
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
92
+ if not accept_sigmas:
93
+ raise ValueError(
94
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
95
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
96
+ )
97
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
98
+ timesteps = scheduler.timesteps
99
+ num_inference_steps = len(timesteps)
100
+ else:
101
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
102
+ timesteps = scheduler.timesteps
103
+ return timesteps, num_inference_steps
104
+
105
+
106
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
107
+ def retrieve_latents(
108
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
109
+ ):
110
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
111
+ return encoder_output.latent_dist.sample(generator)
112
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
113
+ return encoder_output.latent_dist.mode()
114
+ elif hasattr(encoder_output, "latents"):
115
+ return encoder_output.latents
116
+ else:
117
+ raise AttributeError("Could not access latents of provided encoder_output")
118
+
119
+
120
+ def prepare_latents_img2img(
121
+ vae, scheduler, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
122
+ ):
123
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
124
+ raise ValueError(f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}")
125
+
126
+ image = image.to(device=device, dtype=dtype)
127
+
128
+ batch_size = batch_size * num_images_per_prompt
129
+
130
+ if image.shape[1] == 4:
131
+ init_latents = image
132
+
133
+ else:
134
+ latents_mean = latents_std = None
135
+ if hasattr(vae.config, "latents_mean") and vae.config.latents_mean is not None:
136
+ latents_mean = torch.tensor(vae.config.latents_mean).view(1, 4, 1, 1)
137
+ if hasattr(vae.config, "latents_std") and vae.config.latents_std is not None:
138
+ latents_std = torch.tensor(vae.config.latents_std).view(1, 4, 1, 1)
139
+ # make sure the VAE is in float32 mode, as it overflows in float16
140
+ if vae.config.force_upcast:
141
+ image = image.float()
142
+ vae.to(dtype=torch.float32)
143
+
144
+ if isinstance(generator, list) and len(generator) != batch_size:
145
+ raise ValueError(
146
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
147
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
148
+ )
149
+
150
+ elif isinstance(generator, list):
151
+ if image.shape[0] < batch_size and batch_size % image.shape[0] == 0:
152
+ image = torch.cat([image] * (batch_size // image.shape[0]), dim=0)
153
+ elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0:
154
+ raise ValueError(
155
+ f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} "
156
+ )
157
+
158
+ init_latents = [
159
+ retrieve_latents(vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size)
160
+ ]
161
+ init_latents = torch.cat(init_latents, dim=0)
162
+ else:
163
+ init_latents = retrieve_latents(vae.encode(image), generator=generator)
164
+
165
+ if vae.config.force_upcast:
166
+ vae.to(dtype)
167
+
168
+ init_latents = init_latents.to(dtype)
169
+ if latents_mean is not None and latents_std is not None:
170
+ latents_mean = latents_mean.to(device=device, dtype=dtype)
171
+ latents_std = latents_std.to(device=device, dtype=dtype)
172
+ init_latents = (init_latents - latents_mean) * vae.config.scaling_factor / latents_std
173
+ else:
174
+ init_latents = vae.config.scaling_factor * init_latents
175
+
176
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
177
+ # expand init_latents for batch_size
178
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
179
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
180
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
181
+ raise ValueError(
182
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
183
+ )
184
+ else:
185
+ init_latents = torch.cat([init_latents], dim=0)
186
+
187
+ if add_noise:
188
+ shape = init_latents.shape
189
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
190
+ # get latents
191
+ init_latents = scheduler.add_noise(init_latents, noise, timestep)
192
+
193
+ latents = init_latents
194
+
195
+ return latents
196
+
197
+
198
+ class StableDiffusionXLInputStep(ModularPipelineBlocks):
199
+ model_name = "stable-diffusion-xl"
200
+
201
+ @property
202
+ def description(self) -> str:
203
+ return (
204
+ "Input processing step that:\n"
205
+ " 1. Determines `batch_size` and `dtype` based on `prompt_embeds`\n"
206
+ " 2. Adjusts input tensor shapes based on `batch_size` (number of prompts) and `num_images_per_prompt`\n\n"
207
+ "All input tensors are expected to have either batch_size=1 or match the batch_size\n"
208
+ "of prompt_embeds. The tensors will be duplicated across the batch dimension to\n"
209
+ "have a final batch_size of batch_size * num_images_per_prompt."
210
+ )
211
+
212
+ @property
213
+ def inputs(self) -> List[InputParam]:
214
+ return [
215
+ InputParam("num_images_per_prompt", default=1),
216
+ InputParam(
217
+ "prompt_embeds",
218
+ required=True,
219
+ type_hint=torch.Tensor,
220
+ description="Pre-generated text embeddings. Can be generated from text_encoder step.",
221
+ ),
222
+ InputParam(
223
+ "negative_prompt_embeds",
224
+ type_hint=torch.Tensor,
225
+ description="Pre-generated negative text embeddings. Can be generated from text_encoder step.",
226
+ ),
227
+ InputParam(
228
+ "pooled_prompt_embeds",
229
+ required=True,
230
+ type_hint=torch.Tensor,
231
+ description="Pre-generated pooled text embeddings. Can be generated from text_encoder step.",
232
+ ),
233
+ InputParam(
234
+ "negative_pooled_prompt_embeds",
235
+ description="Pre-generated negative pooled text embeddings. Can be generated from text_encoder step.",
236
+ ),
237
+ InputParam(
238
+ "ip_adapter_embeds",
239
+ type_hint=List[torch.Tensor],
240
+ description="Pre-generated image embeddings for IP-Adapter. Can be generated from ip_adapter step.",
241
+ ),
242
+ InputParam(
243
+ "negative_ip_adapter_embeds",
244
+ type_hint=List[torch.Tensor],
245
+ description="Pre-generated negative image embeddings for IP-Adapter. Can be generated from ip_adapter step.",
246
+ ),
247
+ ]
248
+
249
+ @property
250
+ def intermediate_outputs(self) -> List[str]:
251
+ return [
252
+ OutputParam(
253
+ "batch_size",
254
+ type_hint=int,
255
+ description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt",
256
+ ),
257
+ OutputParam(
258
+ "dtype",
259
+ type_hint=torch.dtype,
260
+ description="Data type of model tensor inputs (determined by `prompt_embeds`)",
261
+ ),
262
+ OutputParam(
263
+ "prompt_embeds",
264
+ type_hint=torch.Tensor,
265
+ kwargs_type="guider_input_fields", # already in intermedites state but declare here again for guider_input_fields
266
+ description="text embeddings used to guide the image generation",
267
+ ),
268
+ OutputParam(
269
+ "negative_prompt_embeds",
270
+ type_hint=torch.Tensor,
271
+ kwargs_type="guider_input_fields", # already in intermedites state but declare here again for guider_input_fields
272
+ description="negative text embeddings used to guide the image generation",
273
+ ),
274
+ OutputParam(
275
+ "pooled_prompt_embeds",
276
+ type_hint=torch.Tensor,
277
+ kwargs_type="guider_input_fields", # already in intermedites state but declare here again for guider_input_fields
278
+ description="pooled text embeddings used to guide the image generation",
279
+ ),
280
+ OutputParam(
281
+ "negative_pooled_prompt_embeds",
282
+ type_hint=torch.Tensor,
283
+ kwargs_type="guider_input_fields", # already in intermedites state but declare here again for guider_input_fields
284
+ description="negative pooled text embeddings used to guide the image generation",
285
+ ),
286
+ OutputParam(
287
+ "ip_adapter_embeds",
288
+ type_hint=List[torch.Tensor],
289
+ kwargs_type="guider_input_fields", # already in intermedites state but declare here again for guider_input_fields
290
+ description="image embeddings for IP-Adapter",
291
+ ),
292
+ OutputParam(
293
+ "negative_ip_adapter_embeds",
294
+ type_hint=List[torch.Tensor],
295
+ kwargs_type="guider_input_fields", # already in intermedites state but declare here again for guider_input_fields
296
+ description="negative image embeddings for IP-Adapter",
297
+ ),
298
+ ]
299
+
300
+ def check_inputs(self, components, block_state):
301
+ if block_state.prompt_embeds is not None and block_state.negative_prompt_embeds is not None:
302
+ if block_state.prompt_embeds.shape != block_state.negative_prompt_embeds.shape:
303
+ raise ValueError(
304
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
305
+ f" got: `prompt_embeds` {block_state.prompt_embeds.shape} != `negative_prompt_embeds`"
306
+ f" {block_state.negative_prompt_embeds.shape}."
307
+ )
308
+
309
+ if block_state.prompt_embeds is not None and block_state.pooled_prompt_embeds is None:
310
+ raise ValueError(
311
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
312
+ )
313
+
314
+ if block_state.negative_prompt_embeds is not None and block_state.negative_pooled_prompt_embeds is None:
315
+ raise ValueError(
316
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
317
+ )
318
+
319
+ if block_state.ip_adapter_embeds is not None and not isinstance(block_state.ip_adapter_embeds, list):
320
+ raise ValueError("`ip_adapter_embeds` must be a list")
321
+
322
+ if block_state.negative_ip_adapter_embeds is not None and not isinstance(
323
+ block_state.negative_ip_adapter_embeds, list
324
+ ):
325
+ raise ValueError("`negative_ip_adapter_embeds` must be a list")
326
+
327
+ if block_state.ip_adapter_embeds is not None and block_state.negative_ip_adapter_embeds is not None:
328
+ for i, ip_adapter_embed in enumerate(block_state.ip_adapter_embeds):
329
+ if ip_adapter_embed.shape != block_state.negative_ip_adapter_embeds[i].shape:
330
+ raise ValueError(
331
+ "`ip_adapter_embeds` and `negative_ip_adapter_embeds` must have the same shape when passed directly, but"
332
+ f" got: `ip_adapter_embeds` {ip_adapter_embed.shape} != `negative_ip_adapter_embeds`"
333
+ f" {block_state.negative_ip_adapter_embeds[i].shape}."
334
+ )
335
+
336
+ @torch.no_grad()
337
+ def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState:
338
+ block_state = self.get_block_state(state)
339
+ self.check_inputs(components, block_state)
340
+
341
+ block_state.batch_size = block_state.prompt_embeds.shape[0]
342
+ block_state.dtype = block_state.prompt_embeds.dtype
343
+
344
+ _, seq_len, _ = block_state.prompt_embeds.shape
345
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
346
+ block_state.prompt_embeds = block_state.prompt_embeds.repeat(1, block_state.num_images_per_prompt, 1)
347
+ block_state.prompt_embeds = block_state.prompt_embeds.view(
348
+ block_state.batch_size * block_state.num_images_per_prompt, seq_len, -1
349
+ )
350
+
351
+ if block_state.negative_prompt_embeds is not None:
352
+ _, seq_len, _ = block_state.negative_prompt_embeds.shape
353
+ block_state.negative_prompt_embeds = block_state.negative_prompt_embeds.repeat(
354
+ 1, block_state.num_images_per_prompt, 1
355
+ )
356
+ block_state.negative_prompt_embeds = block_state.negative_prompt_embeds.view(
357
+ block_state.batch_size * block_state.num_images_per_prompt, seq_len, -1
358
+ )
359
+
360
+ block_state.pooled_prompt_embeds = block_state.pooled_prompt_embeds.repeat(
361
+ 1, block_state.num_images_per_prompt, 1
362
+ )
363
+ block_state.pooled_prompt_embeds = block_state.pooled_prompt_embeds.view(
364
+ block_state.batch_size * block_state.num_images_per_prompt, -1
365
+ )
366
+
367
+ if block_state.negative_pooled_prompt_embeds is not None:
368
+ block_state.negative_pooled_prompt_embeds = block_state.negative_pooled_prompt_embeds.repeat(
369
+ 1, block_state.num_images_per_prompt, 1
370
+ )
371
+ block_state.negative_pooled_prompt_embeds = block_state.negative_pooled_prompt_embeds.view(
372
+ block_state.batch_size * block_state.num_images_per_prompt, -1
373
+ )
374
+
375
+ if block_state.ip_adapter_embeds is not None:
376
+ for i, ip_adapter_embed in enumerate(block_state.ip_adapter_embeds):
377
+ block_state.ip_adapter_embeds[i] = torch.cat(
378
+ [ip_adapter_embed] * block_state.num_images_per_prompt, dim=0
379
+ )
380
+
381
+ if block_state.negative_ip_adapter_embeds is not None:
382
+ for i, negative_ip_adapter_embed in enumerate(block_state.negative_ip_adapter_embeds):
383
+ block_state.negative_ip_adapter_embeds[i] = torch.cat(
384
+ [negative_ip_adapter_embed] * block_state.num_images_per_prompt, dim=0
385
+ )
386
+
387
+ self.set_block_state(state, block_state)
388
+
389
+ return components, state
390
+
391
+
392
+ class StableDiffusionXLImg2ImgSetTimestepsStep(ModularPipelineBlocks):
393
+ model_name = "stable-diffusion-xl"
394
+
395
+ @property
396
+ def expected_components(self) -> List[ComponentSpec]:
397
+ return [
398
+ ComponentSpec("scheduler", EulerDiscreteScheduler),
399
+ ]
400
+
401
+ @property
402
+ def description(self) -> str:
403
+ return (
404
+ "Step that sets the timesteps for the scheduler and determines the initial noise level (latent_timestep) for image-to-image/inpainting generation.\n"
405
+ + "The latent_timestep is calculated from the `strength` parameter - higher strength means starting from a noisier version of the input image."
406
+ )
407
+
408
+ @property
409
+ def inputs(self) -> List[InputParam]:
410
+ return [
411
+ InputParam("num_inference_steps", default=50),
412
+ InputParam("timesteps"),
413
+ InputParam("sigmas"),
414
+ InputParam("denoising_end"),
415
+ InputParam("strength", default=0.3),
416
+ InputParam("denoising_start"),
417
+ # YiYi TODO: do we need num_images_per_prompt here?
418
+ InputParam("num_images_per_prompt", default=1),
419
+ InputParam(
420
+ "batch_size",
421
+ required=True,
422
+ type_hint=int,
423
+ description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt",
424
+ ),
425
+ ]
426
+
427
+ @property
428
+ def intermediate_outputs(self) -> List[str]:
429
+ return [
430
+ OutputParam("timesteps", type_hint=torch.Tensor, description="The timesteps to use for inference"),
431
+ OutputParam(
432
+ "num_inference_steps",
433
+ type_hint=int,
434
+ description="The number of denoising steps to perform at inference time",
435
+ ),
436
+ OutputParam(
437
+ "latent_timestep",
438
+ type_hint=torch.Tensor,
439
+ description="The timestep that represents the initial noise level for image-to-image generation",
440
+ ),
441
+ ]
442
+
443
+ @staticmethod
444
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps with self->components
445
+ def get_timesteps(components, num_inference_steps, strength, device, denoising_start=None):
446
+ # get the original timestep using init_timestep
447
+ if denoising_start is None:
448
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
449
+ t_start = max(num_inference_steps - init_timestep, 0)
450
+
451
+ timesteps = components.scheduler.timesteps[t_start * components.scheduler.order :]
452
+ if hasattr(components.scheduler, "set_begin_index"):
453
+ components.scheduler.set_begin_index(t_start * components.scheduler.order)
454
+
455
+ return timesteps, num_inference_steps - t_start
456
+
457
+ else:
458
+ # Strength is irrelevant if we directly request a timestep to start at;
459
+ # that is, strength is determined by the denoising_start instead.
460
+ discrete_timestep_cutoff = int(
461
+ round(
462
+ components.scheduler.config.num_train_timesteps
463
+ - (denoising_start * components.scheduler.config.num_train_timesteps)
464
+ )
465
+ )
466
+
467
+ num_inference_steps = (components.scheduler.timesteps < discrete_timestep_cutoff).sum().item()
468
+ if components.scheduler.order == 2 and num_inference_steps % 2 == 0:
469
+ # if the scheduler is a 2nd order scheduler we might have to do +1
470
+ # because `num_inference_steps` might be even given that every timestep
471
+ # (except the highest one) is duplicated. If `num_inference_steps` is even it would
472
+ # mean that we cut the timesteps in the middle of the denoising step
473
+ # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1
474
+ # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
475
+ num_inference_steps = num_inference_steps + 1
476
+
477
+ # because t_n+1 >= t_n, we slice the timesteps starting from the end
478
+ t_start = len(components.scheduler.timesteps) - num_inference_steps
479
+ timesteps = components.scheduler.timesteps[t_start:]
480
+ if hasattr(components.scheduler, "set_begin_index"):
481
+ components.scheduler.set_begin_index(t_start)
482
+ return timesteps, num_inference_steps
483
+
484
+ @torch.no_grad()
485
+ def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState:
486
+ block_state = self.get_block_state(state)
487
+
488
+ block_state.device = components._execution_device
489
+
490
+ block_state.timesteps, block_state.num_inference_steps = retrieve_timesteps(
491
+ components.scheduler,
492
+ block_state.num_inference_steps,
493
+ block_state.device,
494
+ block_state.timesteps,
495
+ block_state.sigmas,
496
+ )
497
+
498
+ def denoising_value_valid(dnv):
499
+ return isinstance(dnv, float) and 0 < dnv < 1
500
+
501
+ block_state.timesteps, block_state.num_inference_steps = self.get_timesteps(
502
+ components,
503
+ block_state.num_inference_steps,
504
+ block_state.strength,
505
+ block_state.device,
506
+ denoising_start=block_state.denoising_start
507
+ if denoising_value_valid(block_state.denoising_start)
508
+ else None,
509
+ )
510
+ block_state.latent_timestep = block_state.timesteps[:1].repeat(
511
+ block_state.batch_size * block_state.num_images_per_prompt
512
+ )
513
+
514
+ if (
515
+ block_state.denoising_end is not None
516
+ and isinstance(block_state.denoising_end, float)
517
+ and block_state.denoising_end > 0
518
+ and block_state.denoising_end < 1
519
+ ):
520
+ block_state.discrete_timestep_cutoff = int(
521
+ round(
522
+ components.scheduler.config.num_train_timesteps
523
+ - (block_state.denoising_end * components.scheduler.config.num_train_timesteps)
524
+ )
525
+ )
526
+ block_state.num_inference_steps = len(
527
+ list(filter(lambda ts: ts >= block_state.discrete_timestep_cutoff, block_state.timesteps))
528
+ )
529
+ block_state.timesteps = block_state.timesteps[: block_state.num_inference_steps]
530
+
531
+ self.set_block_state(state, block_state)
532
+
533
+ return components, state
534
+
535
+
536
+ class StableDiffusionXLSetTimestepsStep(ModularPipelineBlocks):
537
+ model_name = "stable-diffusion-xl"
538
+
539
+ @property
540
+ def expected_components(self) -> List[ComponentSpec]:
541
+ return [
542
+ ComponentSpec("scheduler", EulerDiscreteScheduler),
543
+ ]
544
+
545
+ @property
546
+ def description(self) -> str:
547
+ return "Step that sets the scheduler's timesteps for inference"
548
+
549
+ @property
550
+ def inputs(self) -> List[InputParam]:
551
+ return [
552
+ InputParam("num_inference_steps", default=50),
553
+ InputParam("timesteps"),
554
+ InputParam("sigmas"),
555
+ InputParam("denoising_end"),
556
+ ]
557
+
558
+ @property
559
+ def intermediate_outputs(self) -> List[OutputParam]:
560
+ return [
561
+ OutputParam("timesteps", type_hint=torch.Tensor, description="The timesteps to use for inference"),
562
+ OutputParam(
563
+ "num_inference_steps",
564
+ type_hint=int,
565
+ description="The number of denoising steps to perform at inference time",
566
+ ),
567
+ ]
568
+
569
+ @torch.no_grad()
570
+ def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState:
571
+ block_state = self.get_block_state(state)
572
+
573
+ block_state.device = components._execution_device
574
+
575
+ block_state.timesteps, block_state.num_inference_steps = retrieve_timesteps(
576
+ components.scheduler,
577
+ block_state.num_inference_steps,
578
+ block_state.device,
579
+ block_state.timesteps,
580
+ block_state.sigmas,
581
+ )
582
+
583
+ if (
584
+ block_state.denoising_end is not None
585
+ and isinstance(block_state.denoising_end, float)
586
+ and block_state.denoising_end > 0
587
+ and block_state.denoising_end < 1
588
+ ):
589
+ block_state.discrete_timestep_cutoff = int(
590
+ round(
591
+ components.scheduler.config.num_train_timesteps
592
+ - (block_state.denoising_end * components.scheduler.config.num_train_timesteps)
593
+ )
594
+ )
595
+ block_state.num_inference_steps = len(
596
+ list(filter(lambda ts: ts >= block_state.discrete_timestep_cutoff, block_state.timesteps))
597
+ )
598
+ block_state.timesteps = block_state.timesteps[: block_state.num_inference_steps]
599
+
600
+ self.set_block_state(state, block_state)
601
+ return components, state
602
+
603
+
604
+ class StableDiffusionXLInpaintPrepareLatentsStep(ModularPipelineBlocks):
605
+ model_name = "stable-diffusion-xl"
606
+
607
+ @property
608
+ def expected_components(self) -> List[ComponentSpec]:
609
+ return [
610
+ ComponentSpec("scheduler", EulerDiscreteScheduler),
611
+ ]
612
+
613
+ @property
614
+ def description(self) -> str:
615
+ return "Step that prepares the latents for the inpainting process"
616
+
617
+ @property
618
+ def inputs(self) -> List[Tuple[str, Any]]:
619
+ return [
620
+ InputParam("latents"),
621
+ InputParam("num_images_per_prompt", default=1),
622
+ InputParam("denoising_start"),
623
+ InputParam(
624
+ "strength",
625
+ default=0.9999,
626
+ description="Conceptually, indicates how much to transform the reference `image` (the masked portion of image for inpainting). Must be between 0 and 1. `image` "
627
+ "will be used as a starting point, adding more noise to it the larger the `strength`. The number of "
628
+ "denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will "
629
+ "be maximum and the denoising process will run for the full number of iterations specified in "
630
+ "`num_inference_steps`. A value of 1, therefore, essentially ignores `image`. Note that in the case of "
631
+ "`denoising_start` being declared as an integer, the value of `strength` will be ignored.",
632
+ ),
633
+ InputParam("generator"),
634
+ InputParam(
635
+ "batch_size",
636
+ required=True,
637
+ type_hint=int,
638
+ description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.",
639
+ ),
640
+ InputParam(
641
+ "latent_timestep",
642
+ required=True,
643
+ type_hint=torch.Tensor,
644
+ description="The timestep that represents the initial noise level for image-to-image/inpainting generation. Can be generated in set_timesteps step.",
645
+ ),
646
+ InputParam(
647
+ "image_latents",
648
+ required=True,
649
+ type_hint=torch.Tensor,
650
+ description="The latents representing the reference image for image-to-image/inpainting generation. Can be generated in vae_encode step.",
651
+ ),
652
+ InputParam(
653
+ "mask",
654
+ required=True,
655
+ type_hint=torch.Tensor,
656
+ description="The mask for the inpainting generation. Can be generated in vae_encode step.",
657
+ ),
658
+ InputParam(
659
+ "masked_image_latents",
660
+ type_hint=torch.Tensor,
661
+ description="The masked image latents for the inpainting generation (only for inpainting-specific unet). Can be generated in vae_encode step.",
662
+ ),
663
+ InputParam("dtype", type_hint=torch.dtype, description="The dtype of the model inputs"),
664
+ ]
665
+
666
+ @property
667
+ def intermediate_outputs(self) -> List[str]:
668
+ return [
669
+ OutputParam(
670
+ "latents", type_hint=torch.Tensor, description="The initial latents to use for the denoising process"
671
+ ),
672
+ OutputParam(
673
+ "noise",
674
+ type_hint=torch.Tensor,
675
+ description="The noise added to the image latents, used for inpainting generation",
676
+ ),
677
+ ]
678
+
679
+ # Modified from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline._encode_vae_image with self->components
680
+ # YiYi TODO: update the _encode_vae_image so that we can use #Coped from
681
+ @staticmethod
682
+ def _encode_vae_image(components, image: torch.Tensor, generator: torch.Generator):
683
+ latents_mean = latents_std = None
684
+ if hasattr(components.vae.config, "latents_mean") and components.vae.config.latents_mean is not None:
685
+ latents_mean = torch.tensor(components.vae.config.latents_mean).view(1, 4, 1, 1)
686
+ if hasattr(components.vae.config, "latents_std") and components.vae.config.latents_std is not None:
687
+ latents_std = torch.tensor(components.vae.config.latents_std).view(1, 4, 1, 1)
688
+
689
+ dtype = image.dtype
690
+ if components.vae.config.force_upcast:
691
+ image = image.float()
692
+ components.vae.to(dtype=torch.float32)
693
+
694
+ if isinstance(generator, list):
695
+ image_latents = [
696
+ retrieve_latents(components.vae.encode(image[i : i + 1]), generator=generator[i])
697
+ for i in range(image.shape[0])
698
+ ]
699
+ image_latents = torch.cat(image_latents, dim=0)
700
+ else:
701
+ image_latents = retrieve_latents(components.vae.encode(image), generator=generator)
702
+
703
+ if components.vae.config.force_upcast:
704
+ components.vae.to(dtype)
705
+
706
+ image_latents = image_latents.to(dtype)
707
+ if latents_mean is not None and latents_std is not None:
708
+ latents_mean = latents_mean.to(device=image_latents.device, dtype=dtype)
709
+ latents_std = latents_std.to(device=image_latents.device, dtype=dtype)
710
+ image_latents = (image_latents - latents_mean) * components.vae.config.scaling_factor / latents_std
711
+ else:
712
+ image_latents = components.vae.config.scaling_factor * image_latents
713
+
714
+ return image_latents
715
+
716
+ # Modified from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline.prepare_latents adding components as first argument
717
+ def prepare_latents_inpaint(
718
+ self,
719
+ components,
720
+ batch_size,
721
+ num_channels_latents,
722
+ height,
723
+ width,
724
+ dtype,
725
+ device,
726
+ generator,
727
+ latents=None,
728
+ image=None,
729
+ timestep=None,
730
+ is_strength_max=True,
731
+ add_noise=True,
732
+ ):
733
+ shape = (
734
+ batch_size,
735
+ num_channels_latents,
736
+ int(height) // components.vae_scale_factor,
737
+ int(width) // components.vae_scale_factor,
738
+ )
739
+ if isinstance(generator, list) and len(generator) != batch_size:
740
+ raise ValueError(
741
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
742
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
743
+ )
744
+
745
+ if (image is None or timestep is None) and not is_strength_max:
746
+ raise ValueError(
747
+ "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
748
+ "However, either the image or the noise timestep has not been provided."
749
+ )
750
+
751
+ if image.shape[1] == 4:
752
+ image_latents = image.to(device=device, dtype=dtype)
753
+ image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
754
+ elif latents is None and not is_strength_max:
755
+ image = image.to(device=device, dtype=dtype)
756
+ image_latents = self._encode_vae_image(components, image=image, generator=generator)
757
+ image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
758
+
759
+ if latents is None and add_noise:
760
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
761
+ # if strength is 1. then initialise the latents to noise, else initial to image + noise
762
+ latents = noise if is_strength_max else components.scheduler.add_noise(image_latents, noise, timestep)
763
+ # if pure noise then scale the initial latents by the Scheduler's init sigma
764
+ latents = latents * components.scheduler.init_noise_sigma if is_strength_max else latents
765
+ elif add_noise:
766
+ noise = latents.to(device)
767
+ latents = noise * components.scheduler.init_noise_sigma
768
+ else:
769
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
770
+ latents = image_latents.to(device)
771
+
772
+ outputs = (latents, noise, image_latents)
773
+
774
+ return outputs
775
+
776
+ # modified from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline.prepare_mask_latents
777
+ # do not accept do_classifier_free_guidance
778
+ def prepare_mask_latents(
779
+ self, components, mask, masked_image, batch_size, height, width, dtype, device, generator
780
+ ):
781
+ # resize the mask to latents shape as we concatenate the mask to the latents
782
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
783
+ # and half precision
784
+ mask = torch.nn.functional.interpolate(
785
+ mask, size=(height // components.vae_scale_factor, width // components.vae_scale_factor)
786
+ )
787
+ mask = mask.to(device=device, dtype=dtype)
788
+
789
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
790
+ if mask.shape[0] < batch_size:
791
+ if not batch_size % mask.shape[0] == 0:
792
+ raise ValueError(
793
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
794
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
795
+ " of masks that you pass is divisible by the total requested batch size."
796
+ )
797
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
798
+
799
+ if masked_image is not None and masked_image.shape[1] == 4:
800
+ masked_image_latents = masked_image
801
+ else:
802
+ masked_image_latents = None
803
+
804
+ if masked_image is not None:
805
+ if masked_image_latents is None:
806
+ masked_image = masked_image.to(device=device, dtype=dtype)
807
+ masked_image_latents = self._encode_vae_image(components, masked_image, generator=generator)
808
+
809
+ if masked_image_latents.shape[0] < batch_size:
810
+ if not batch_size % masked_image_latents.shape[0] == 0:
811
+ raise ValueError(
812
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
813
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
814
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
815
+ )
816
+ masked_image_latents = masked_image_latents.repeat(
817
+ batch_size // masked_image_latents.shape[0], 1, 1, 1
818
+ )
819
+
820
+ # aligning device to prevent device errors when concating it with the latent model input
821
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
822
+
823
+ return mask, masked_image_latents
824
+
825
+ @torch.no_grad()
826
+ def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState:
827
+ block_state = self.get_block_state(state)
828
+
829
+ block_state.dtype = block_state.dtype if block_state.dtype is not None else components.vae.dtype
830
+ block_state.device = components._execution_device
831
+
832
+ block_state.is_strength_max = block_state.strength == 1.0
833
+
834
+ # for non-inpainting specific unet, we do not need masked_image_latents
835
+ if hasattr(components, "unet") and components.unet is not None:
836
+ if components.unet.config.in_channels == 4:
837
+ block_state.masked_image_latents = None
838
+
839
+ block_state.add_noise = True if block_state.denoising_start is None else False
840
+
841
+ block_state.height = block_state.image_latents.shape[-2] * components.vae_scale_factor
842
+ block_state.width = block_state.image_latents.shape[-1] * components.vae_scale_factor
843
+
844
+ block_state.latents, block_state.noise, block_state.image_latents = self.prepare_latents_inpaint(
845
+ components,
846
+ block_state.batch_size * block_state.num_images_per_prompt,
847
+ components.num_channels_latents,
848
+ block_state.height,
849
+ block_state.width,
850
+ block_state.dtype,
851
+ block_state.device,
852
+ block_state.generator,
853
+ block_state.latents,
854
+ image=block_state.image_latents,
855
+ timestep=block_state.latent_timestep,
856
+ is_strength_max=block_state.is_strength_max,
857
+ add_noise=block_state.add_noise,
858
+ )
859
+
860
+ # 7. Prepare mask latent variables
861
+ block_state.mask, block_state.masked_image_latents = self.prepare_mask_latents(
862
+ components,
863
+ block_state.mask,
864
+ block_state.masked_image_latents,
865
+ block_state.batch_size * block_state.num_images_per_prompt,
866
+ block_state.height,
867
+ block_state.width,
868
+ block_state.dtype,
869
+ block_state.device,
870
+ block_state.generator,
871
+ )
872
+
873
+ self.set_block_state(state, block_state)
874
+
875
+ return components, state
876
+
877
+
878
+ class StableDiffusionXLImg2ImgPrepareLatentsStep(ModularPipelineBlocks):
879
+ model_name = "stable-diffusion-xl"
880
+
881
+ @property
882
+ def expected_components(self) -> List[ComponentSpec]:
883
+ return [
884
+ ComponentSpec("vae", AutoencoderKL),
885
+ ComponentSpec("scheduler", EulerDiscreteScheduler),
886
+ ]
887
+
888
+ @property
889
+ def description(self) -> str:
890
+ return "Step that prepares the latents for the image-to-image generation process"
891
+
892
+ @property
893
+ def inputs(self) -> List[Tuple[str, Any]]:
894
+ return [
895
+ InputParam("latents"),
896
+ InputParam("num_images_per_prompt", default=1),
897
+ InputParam("denoising_start"),
898
+ InputParam("generator"),
899
+ InputParam(
900
+ "latent_timestep",
901
+ required=True,
902
+ type_hint=torch.Tensor,
903
+ description="The timestep that represents the initial noise level for image-to-image/inpainting generation. Can be generated in set_timesteps step.",
904
+ ),
905
+ InputParam(
906
+ "image_latents",
907
+ required=True,
908
+ type_hint=torch.Tensor,
909
+ description="The latents representing the reference image for image-to-image/inpainting generation. Can be generated in vae_encode step.",
910
+ ),
911
+ InputParam(
912
+ "batch_size",
913
+ required=True,
914
+ type_hint=int,
915
+ description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.",
916
+ ),
917
+ InputParam("dtype", required=True, type_hint=torch.dtype, description="The dtype of the model inputs"),
918
+ ]
919
+
920
+ @property
921
+ def intermediate_outputs(self) -> List[OutputParam]:
922
+ return [
923
+ OutputParam(
924
+ "latents", type_hint=torch.Tensor, description="The initial latents to use for the denoising process"
925
+ )
926
+ ]
927
+
928
+ @torch.no_grad()
929
+ def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState:
930
+ block_state = self.get_block_state(state)
931
+
932
+ block_state.dtype = block_state.dtype if block_state.dtype is not None else components.vae.dtype
933
+ block_state.device = components._execution_device
934
+ block_state.add_noise = True if block_state.denoising_start is None else False
935
+ if block_state.latents is None:
936
+ block_state.latents = prepare_latents_img2img(
937
+ components.vae,
938
+ components.scheduler,
939
+ block_state.image_latents,
940
+ block_state.latent_timestep,
941
+ block_state.batch_size,
942
+ block_state.num_images_per_prompt,
943
+ block_state.dtype,
944
+ block_state.device,
945
+ block_state.generator,
946
+ block_state.add_noise,
947
+ )
948
+
949
+ self.set_block_state(state, block_state)
950
+
951
+ return components, state
952
+
953
+
954
+ class StableDiffusionXLPrepareLatentsStep(ModularPipelineBlocks):
955
+ model_name = "stable-diffusion-xl"
956
+
957
+ @property
958
+ def expected_components(self) -> List[ComponentSpec]:
959
+ return [
960
+ ComponentSpec("scheduler", EulerDiscreteScheduler),
961
+ ComponentSpec("vae", AutoencoderKL),
962
+ ]
963
+
964
+ @property
965
+ def description(self) -> str:
966
+ return "Prepare latents step that prepares the latents for the text-to-image generation process"
967
+
968
+ @property
969
+ def inputs(self) -> List[InputParam]:
970
+ return [
971
+ InputParam("height"),
972
+ InputParam("width"),
973
+ InputParam("latents"),
974
+ InputParam("num_images_per_prompt", default=1),
975
+ InputParam("generator"),
976
+ InputParam(
977
+ "batch_size",
978
+ required=True,
979
+ type_hint=int,
980
+ description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.",
981
+ ),
982
+ InputParam("dtype", type_hint=torch.dtype, description="The dtype of the model inputs"),
983
+ ]
984
+
985
+ @property
986
+ def intermediate_outputs(self) -> List[OutputParam]:
987
+ return [
988
+ OutputParam(
989
+ "latents", type_hint=torch.Tensor, description="The initial latents to use for the denoising process"
990
+ )
991
+ ]
992
+
993
+ @staticmethod
994
+ def check_inputs(components, block_state):
995
+ if (
996
+ block_state.height is not None
997
+ and block_state.height % components.vae_scale_factor != 0
998
+ or block_state.width is not None
999
+ and block_state.width % components.vae_scale_factor != 0
1000
+ ):
1001
+ raise ValueError(
1002
+ f"`height` and `width` have to be divisible by {components.vae_scale_factor} but are {block_state.height} and {block_state.width}."
1003
+ )
1004
+
1005
+ @staticmethod
1006
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents with self->comp
1007
+ def prepare_latents(comp, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
1008
+ shape = (
1009
+ batch_size,
1010
+ num_channels_latents,
1011
+ int(height) // comp.vae_scale_factor,
1012
+ int(width) // comp.vae_scale_factor,
1013
+ )
1014
+ if isinstance(generator, list) and len(generator) != batch_size:
1015
+ raise ValueError(
1016
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
1017
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
1018
+ )
1019
+
1020
+ if latents is None:
1021
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
1022
+ else:
1023
+ latents = latents.to(device)
1024
+
1025
+ # scale the initial noise by the standard deviation required by the scheduler
1026
+ latents = latents * comp.scheduler.init_noise_sigma
1027
+ return latents
1028
+
1029
+ @torch.no_grad()
1030
+ def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState:
1031
+ block_state = self.get_block_state(state)
1032
+
1033
+ if block_state.dtype is None:
1034
+ block_state.dtype = components.vae.dtype
1035
+
1036
+ block_state.device = components._execution_device
1037
+
1038
+ self.check_inputs(components, block_state)
1039
+
1040
+ block_state.height = block_state.height or components.default_sample_size * components.vae_scale_factor
1041
+ block_state.width = block_state.width or components.default_sample_size * components.vae_scale_factor
1042
+ block_state.num_channels_latents = components.num_channels_latents
1043
+ block_state.latents = self.prepare_latents(
1044
+ components,
1045
+ block_state.batch_size * block_state.num_images_per_prompt,
1046
+ block_state.num_channels_latents,
1047
+ block_state.height,
1048
+ block_state.width,
1049
+ block_state.dtype,
1050
+ block_state.device,
1051
+ block_state.generator,
1052
+ block_state.latents,
1053
+ )
1054
+
1055
+ self.set_block_state(state, block_state)
1056
+
1057
+ return components, state
1058
+
1059
+
1060
+ class StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep(ModularPipelineBlocks):
1061
+ model_name = "stable-diffusion-xl"
1062
+
1063
+ @property
1064
+ def expected_configs(self) -> List[ConfigSpec]:
1065
+ return [
1066
+ ConfigSpec("requires_aesthetics_score", False),
1067
+ ]
1068
+
1069
+ @property
1070
+ def expected_components(self) -> List[ComponentSpec]:
1071
+ return [
1072
+ ComponentSpec("unet", UNet2DConditionModel),
1073
+ ComponentSpec(
1074
+ "guider",
1075
+ ClassifierFreeGuidance,
1076
+ config=FrozenDict({"guidance_scale": 7.5}),
1077
+ default_creation_method="from_config",
1078
+ ),
1079
+ ]
1080
+
1081
+ @property
1082
+ def description(self) -> str:
1083
+ return "Step that prepares the additional conditioning for the image-to-image/inpainting generation process"
1084
+
1085
+ @property
1086
+ def inputs(self) -> List[Tuple[str, Any]]:
1087
+ return [
1088
+ InputParam("original_size"),
1089
+ InputParam("target_size"),
1090
+ InputParam("negative_original_size"),
1091
+ InputParam("negative_target_size"),
1092
+ InputParam("crops_coords_top_left", default=(0, 0)),
1093
+ InputParam("negative_crops_coords_top_left", default=(0, 0)),
1094
+ InputParam("num_images_per_prompt", default=1),
1095
+ InputParam("aesthetic_score", default=6.0),
1096
+ InputParam("negative_aesthetic_score", default=2.0),
1097
+ InputParam(
1098
+ "latents",
1099
+ required=True,
1100
+ type_hint=torch.Tensor,
1101
+ description="The initial latents to use for the denoising process. Can be generated in prepare_latent step.",
1102
+ ),
1103
+ InputParam(
1104
+ "pooled_prompt_embeds",
1105
+ required=True,
1106
+ type_hint=torch.Tensor,
1107
+ description="The pooled prompt embeddings to use for the denoising process (used to determine shapes and dtypes for other additional conditioning inputs). Can be generated in text_encoder step.",
1108
+ ),
1109
+ InputParam(
1110
+ "batch_size",
1111
+ required=True,
1112
+ type_hint=int,
1113
+ description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.",
1114
+ ),
1115
+ ]
1116
+
1117
+ @property
1118
+ def intermediate_outputs(self) -> List[OutputParam]:
1119
+ return [
1120
+ OutputParam(
1121
+ "add_time_ids",
1122
+ type_hint=torch.Tensor,
1123
+ kwargs_type="guider_input_fields",
1124
+ description="The time ids to condition the denoising process",
1125
+ ),
1126
+ OutputParam(
1127
+ "negative_add_time_ids",
1128
+ type_hint=torch.Tensor,
1129
+ kwargs_type="guider_input_fields",
1130
+ description="The negative time ids to condition the denoising process",
1131
+ ),
1132
+ OutputParam("timestep_cond", type_hint=torch.Tensor, description="The timestep cond to use for LCM"),
1133
+ ]
1134
+
1135
+ @staticmethod
1136
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids with self->components
1137
+ def _get_add_time_ids(
1138
+ components,
1139
+ original_size,
1140
+ crops_coords_top_left,
1141
+ target_size,
1142
+ aesthetic_score,
1143
+ negative_aesthetic_score,
1144
+ negative_original_size,
1145
+ negative_crops_coords_top_left,
1146
+ negative_target_size,
1147
+ dtype,
1148
+ text_encoder_projection_dim=None,
1149
+ ):
1150
+ if components.config.requires_aesthetics_score:
1151
+ add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
1152
+ add_neg_time_ids = list(
1153
+ negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)
1154
+ )
1155
+ else:
1156
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
1157
+ add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)
1158
+
1159
+ passed_add_embed_dim = (
1160
+ components.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
1161
+ )
1162
+ expected_add_embed_dim = components.unet.add_embedding.linear_1.in_features
1163
+
1164
+ if (
1165
+ expected_add_embed_dim > passed_add_embed_dim
1166
+ and (expected_add_embed_dim - passed_add_embed_dim) == components.unet.config.addition_time_embed_dim
1167
+ ):
1168
+ raise ValueError(
1169
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
1170
+ )
1171
+ elif (
1172
+ expected_add_embed_dim < passed_add_embed_dim
1173
+ and (passed_add_embed_dim - expected_add_embed_dim) == components.unet.config.addition_time_embed_dim
1174
+ ):
1175
+ raise ValueError(
1176
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
1177
+ )
1178
+ elif expected_add_embed_dim != passed_add_embed_dim:
1179
+ raise ValueError(
1180
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
1181
+ )
1182
+
1183
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
1184
+ add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
1185
+
1186
+ return add_time_ids, add_neg_time_ids
1187
+
1188
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1189
+ def get_guidance_scale_embedding(
1190
+ self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
1191
+ ) -> torch.Tensor:
1192
+ """
1193
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
1194
+
1195
+ Args:
1196
+ w (`torch.Tensor`):
1197
+ Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
1198
+ embedding_dim (`int`, *optional*, defaults to 512):
1199
+ Dimension of the embeddings to generate.
1200
+ dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
1201
+ Data type of the generated embeddings.
1202
+
1203
+ Returns:
1204
+ `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
1205
+ """
1206
+ assert len(w.shape) == 1
1207
+ w = w * 1000.0
1208
+
1209
+ half_dim = embedding_dim // 2
1210
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
1211
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
1212
+ emb = w.to(dtype)[:, None] * emb[None, :]
1213
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
1214
+ if embedding_dim % 2 == 1: # zero pad
1215
+ emb = torch.nn.functional.pad(emb, (0, 1))
1216
+ assert emb.shape == (w.shape[0], embedding_dim)
1217
+ return emb
1218
+
1219
+ @torch.no_grad()
1220
+ def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState:
1221
+ block_state = self.get_block_state(state)
1222
+ block_state.device = components._execution_device
1223
+
1224
+ block_state.vae_scale_factor = components.vae_scale_factor
1225
+
1226
+ block_state.height, block_state.width = block_state.latents.shape[-2:]
1227
+ block_state.height = block_state.height * block_state.vae_scale_factor
1228
+ block_state.width = block_state.width * block_state.vae_scale_factor
1229
+
1230
+ block_state.original_size = block_state.original_size or (block_state.height, block_state.width)
1231
+ block_state.target_size = block_state.target_size or (block_state.height, block_state.width)
1232
+
1233
+ block_state.text_encoder_projection_dim = int(block_state.pooled_prompt_embeds.shape[-1])
1234
+
1235
+ if block_state.negative_original_size is None:
1236
+ block_state.negative_original_size = block_state.original_size
1237
+ if block_state.negative_target_size is None:
1238
+ block_state.negative_target_size = block_state.target_size
1239
+
1240
+ block_state.add_time_ids, block_state.negative_add_time_ids = self._get_add_time_ids(
1241
+ components,
1242
+ block_state.original_size,
1243
+ block_state.crops_coords_top_left,
1244
+ block_state.target_size,
1245
+ block_state.aesthetic_score,
1246
+ block_state.negative_aesthetic_score,
1247
+ block_state.negative_original_size,
1248
+ block_state.negative_crops_coords_top_left,
1249
+ block_state.negative_target_size,
1250
+ dtype=block_state.pooled_prompt_embeds.dtype,
1251
+ text_encoder_projection_dim=block_state.text_encoder_projection_dim,
1252
+ )
1253
+ block_state.add_time_ids = block_state.add_time_ids.repeat(
1254
+ block_state.batch_size * block_state.num_images_per_prompt, 1
1255
+ ).to(device=block_state.device)
1256
+ block_state.negative_add_time_ids = block_state.negative_add_time_ids.repeat(
1257
+ block_state.batch_size * block_state.num_images_per_prompt, 1
1258
+ ).to(device=block_state.device)
1259
+
1260
+ # Optionally get Guidance Scale Embedding for LCM
1261
+ block_state.timestep_cond = None
1262
+ if (
1263
+ hasattr(components, "unet")
1264
+ and components.unet is not None
1265
+ and components.unet.config.time_cond_proj_dim is not None
1266
+ ):
1267
+ # TODO(yiyi, aryan): Ideally, this should be `embedded_guidance_scale` instead of pulling from guider. Guider scales should be different from this!
1268
+ block_state.guidance_scale_tensor = torch.tensor(components.guider.guidance_scale - 1).repeat(
1269
+ block_state.batch_size * block_state.num_images_per_prompt
1270
+ )
1271
+ block_state.timestep_cond = self.get_guidance_scale_embedding(
1272
+ block_state.guidance_scale_tensor, embedding_dim=components.unet.config.time_cond_proj_dim
1273
+ ).to(device=block_state.device, dtype=block_state.latents.dtype)
1274
+
1275
+ self.set_block_state(state, block_state)
1276
+ return components, state
1277
+
1278
+
1279
+ class StableDiffusionXLPrepareAdditionalConditioningStep(ModularPipelineBlocks):
1280
+ model_name = "stable-diffusion-xl"
1281
+
1282
+ @property
1283
+ def description(self) -> str:
1284
+ return "Step that prepares the additional conditioning for the text-to-image generation process"
1285
+
1286
+ @property
1287
+ def expected_components(self) -> List[ComponentSpec]:
1288
+ return [
1289
+ ComponentSpec("unet", UNet2DConditionModel),
1290
+ ComponentSpec(
1291
+ "guider",
1292
+ ClassifierFreeGuidance,
1293
+ config=FrozenDict({"guidance_scale": 7.5}),
1294
+ default_creation_method="from_config",
1295
+ ),
1296
+ ]
1297
+
1298
+ @property
1299
+ def inputs(self) -> List[Tuple[str, Any]]:
1300
+ return [
1301
+ InputParam("original_size"),
1302
+ InputParam("target_size"),
1303
+ InputParam("negative_original_size"),
1304
+ InputParam("negative_target_size"),
1305
+ InputParam("crops_coords_top_left", default=(0, 0)),
1306
+ InputParam("negative_crops_coords_top_left", default=(0, 0)),
1307
+ InputParam("num_images_per_prompt", default=1),
1308
+ InputParam(
1309
+ "latents",
1310
+ required=True,
1311
+ type_hint=torch.Tensor,
1312
+ description="The initial latents to use for the denoising process. Can be generated in prepare_latent step.",
1313
+ ),
1314
+ InputParam(
1315
+ "pooled_prompt_embeds",
1316
+ required=True,
1317
+ type_hint=torch.Tensor,
1318
+ description="The pooled prompt embeddings to use for the denoising process (used to determine shapes and dtypes for other additional conditioning inputs). Can be generated in text_encoder step.",
1319
+ ),
1320
+ InputParam(
1321
+ "batch_size",
1322
+ required=True,
1323
+ type_hint=int,
1324
+ description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.",
1325
+ ),
1326
+ ]
1327
+
1328
+ @property
1329
+ def intermediate_outputs(self) -> List[OutputParam]:
1330
+ return [
1331
+ OutputParam(
1332
+ "add_time_ids",
1333
+ type_hint=torch.Tensor,
1334
+ kwargs_type="guider_input_fields",
1335
+ description="The time ids to condition the denoising process",
1336
+ ),
1337
+ OutputParam(
1338
+ "negative_add_time_ids",
1339
+ type_hint=torch.Tensor,
1340
+ kwargs_type="guider_input_fields",
1341
+ description="The negative time ids to condition the denoising process",
1342
+ ),
1343
+ OutputParam("timestep_cond", type_hint=torch.Tensor, description="The timestep cond to use for LCM"),
1344
+ ]
1345
+
1346
+ @staticmethod
1347
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids with self->components
1348
+ def _get_add_time_ids(
1349
+ components, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
1350
+ ):
1351
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
1352
+
1353
+ passed_add_embed_dim = (
1354
+ components.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
1355
+ )
1356
+ expected_add_embed_dim = components.unet.add_embedding.linear_1.in_features
1357
+
1358
+ if expected_add_embed_dim != passed_add_embed_dim:
1359
+ raise ValueError(
1360
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
1361
+ )
1362
+
1363
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
1364
+ return add_time_ids
1365
+
1366
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1367
+ def get_guidance_scale_embedding(
1368
+ self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
1369
+ ) -> torch.Tensor:
1370
+ """
1371
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
1372
+
1373
+ Args:
1374
+ w (`torch.Tensor`):
1375
+ Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
1376
+ embedding_dim (`int`, *optional*, defaults to 512):
1377
+ Dimension of the embeddings to generate.
1378
+ dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
1379
+ Data type of the generated embeddings.
1380
+
1381
+ Returns:
1382
+ `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
1383
+ """
1384
+ assert len(w.shape) == 1
1385
+ w = w * 1000.0
1386
+
1387
+ half_dim = embedding_dim // 2
1388
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
1389
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
1390
+ emb = w.to(dtype)[:, None] * emb[None, :]
1391
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
1392
+ if embedding_dim % 2 == 1: # zero pad
1393
+ emb = torch.nn.functional.pad(emb, (0, 1))
1394
+ assert emb.shape == (w.shape[0], embedding_dim)
1395
+ return emb
1396
+
1397
+ @torch.no_grad()
1398
+ def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState:
1399
+ block_state = self.get_block_state(state)
1400
+ block_state.device = components._execution_device
1401
+
1402
+ block_state.height, block_state.width = block_state.latents.shape[-2:]
1403
+ block_state.height = block_state.height * components.vae_scale_factor
1404
+ block_state.width = block_state.width * components.vae_scale_factor
1405
+
1406
+ block_state.original_size = block_state.original_size or (block_state.height, block_state.width)
1407
+ block_state.target_size = block_state.target_size or (block_state.height, block_state.width)
1408
+
1409
+ block_state.text_encoder_projection_dim = int(block_state.pooled_prompt_embeds.shape[-1])
1410
+
1411
+ block_state.add_time_ids = self._get_add_time_ids(
1412
+ components,
1413
+ block_state.original_size,
1414
+ block_state.crops_coords_top_left,
1415
+ block_state.target_size,
1416
+ block_state.pooled_prompt_embeds.dtype,
1417
+ text_encoder_projection_dim=block_state.text_encoder_projection_dim,
1418
+ )
1419
+ if block_state.negative_original_size is not None and block_state.negative_target_size is not None:
1420
+ block_state.negative_add_time_ids = self._get_add_time_ids(
1421
+ components,
1422
+ block_state.negative_original_size,
1423
+ block_state.negative_crops_coords_top_left,
1424
+ block_state.negative_target_size,
1425
+ block_state.pooled_prompt_embeds.dtype,
1426
+ text_encoder_projection_dim=block_state.text_encoder_projection_dim,
1427
+ )
1428
+ else:
1429
+ block_state.negative_add_time_ids = block_state.add_time_ids
1430
+
1431
+ block_state.add_time_ids = block_state.add_time_ids.repeat(
1432
+ block_state.batch_size * block_state.num_images_per_prompt, 1
1433
+ ).to(device=block_state.device)
1434
+ block_state.negative_add_time_ids = block_state.negative_add_time_ids.repeat(
1435
+ block_state.batch_size * block_state.num_images_per_prompt, 1
1436
+ ).to(device=block_state.device)
1437
+
1438
+ # Optionally get Guidance Scale Embedding for LCM
1439
+ block_state.timestep_cond = None
1440
+ if (
1441
+ hasattr(components, "unet")
1442
+ and components.unet is not None
1443
+ and components.unet.config.time_cond_proj_dim is not None
1444
+ ):
1445
+ # TODO(yiyi, aryan): Ideally, this should be `embedded_guidance_scale` instead of pulling from guider. Guider scales should be different from this!
1446
+ block_state.guidance_scale_tensor = torch.tensor(components.guider.guidance_scale - 1).repeat(
1447
+ block_state.batch_size * block_state.num_images_per_prompt
1448
+ )
1449
+ block_state.timestep_cond = self.get_guidance_scale_embedding(
1450
+ block_state.guidance_scale_tensor, embedding_dim=components.unet.config.time_cond_proj_dim
1451
+ ).to(device=block_state.device, dtype=block_state.latents.dtype)
1452
+
1453
+ self.set_block_state(state, block_state)
1454
+ return components, state
1455
+
1456
+
1457
+ class StableDiffusionXLControlNetInputStep(ModularPipelineBlocks):
1458
+ model_name = "stable-diffusion-xl"
1459
+
1460
+ @property
1461
+ def expected_components(self) -> List[ComponentSpec]:
1462
+ return [
1463
+ ComponentSpec("controlnet", ControlNetModel),
1464
+ ComponentSpec(
1465
+ "control_image_processor",
1466
+ VaeImageProcessor,
1467
+ config=FrozenDict({"do_convert_rgb": True, "do_normalize": False}),
1468
+ default_creation_method="from_config",
1469
+ ),
1470
+ ]
1471
+
1472
+ @property
1473
+ def description(self) -> str:
1474
+ return "step that prepare inputs for controlnet"
1475
+
1476
+ @property
1477
+ def inputs(self) -> List[Tuple[str, Any]]:
1478
+ return [
1479
+ InputParam("control_image", required=True),
1480
+ InputParam("control_guidance_start", default=0.0),
1481
+ InputParam("control_guidance_end", default=1.0),
1482
+ InputParam("controlnet_conditioning_scale", default=1.0),
1483
+ InputParam("guess_mode", default=False),
1484
+ InputParam("num_images_per_prompt", default=1),
1485
+ InputParam(
1486
+ "latents",
1487
+ required=True,
1488
+ type_hint=torch.Tensor,
1489
+ description="The initial latents to use for the denoising process. Can be generated in prepare_latent step.",
1490
+ ),
1491
+ InputParam(
1492
+ "batch_size",
1493
+ required=True,
1494
+ type_hint=int,
1495
+ description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.",
1496
+ ),
1497
+ InputParam(
1498
+ "timesteps",
1499
+ required=True,
1500
+ type_hint=torch.Tensor,
1501
+ description="The timesteps to use for the denoising process. Can be generated in set_timesteps step.",
1502
+ ),
1503
+ InputParam(
1504
+ "crops_coords",
1505
+ type_hint=Optional[Tuple[int]],
1506
+ description="The crop coordinates to use for preprocess/postprocess the image and mask, for inpainting task only. Can be generated in vae_encode step.",
1507
+ ),
1508
+ ]
1509
+
1510
+ @property
1511
+ def intermediate_outputs(self) -> List[OutputParam]:
1512
+ return [
1513
+ OutputParam("controlnet_cond", type_hint=torch.Tensor, description="The processed control image"),
1514
+ OutputParam(
1515
+ "control_guidance_start", type_hint=List[float], description="The controlnet guidance start values"
1516
+ ),
1517
+ OutputParam(
1518
+ "control_guidance_end", type_hint=List[float], description="The controlnet guidance end values"
1519
+ ),
1520
+ OutputParam(
1521
+ "conditioning_scale", type_hint=List[float], description="The controlnet conditioning scale values"
1522
+ ),
1523
+ OutputParam("guess_mode", type_hint=bool, description="Whether guess mode is used"),
1524
+ OutputParam("controlnet_keep", type_hint=List[float], description="The controlnet keep values"),
1525
+ ]
1526
+
1527
+ # Modified from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image
1528
+ # 1. return image without apply any guidance
1529
+ # 2. add crops_coords and resize_mode to preprocess()
1530
+ @staticmethod
1531
+ def prepare_control_image(
1532
+ components,
1533
+ image,
1534
+ width,
1535
+ height,
1536
+ batch_size,
1537
+ num_images_per_prompt,
1538
+ device,
1539
+ dtype,
1540
+ crops_coords=None,
1541
+ ):
1542
+ if crops_coords is not None:
1543
+ image = components.control_image_processor.preprocess(
1544
+ image, height=height, width=width, crops_coords=crops_coords, resize_mode="fill"
1545
+ ).to(dtype=torch.float32)
1546
+ else:
1547
+ image = components.control_image_processor.preprocess(image, height=height, width=width).to(
1548
+ dtype=torch.float32
1549
+ )
1550
+
1551
+ image_batch_size = image.shape[0]
1552
+ if image_batch_size == 1:
1553
+ repeat_by = batch_size
1554
+ else:
1555
+ # image batch size is the same as prompt batch size
1556
+ repeat_by = num_images_per_prompt
1557
+
1558
+ image = image.repeat_interleave(repeat_by, dim=0)
1559
+ image = image.to(device=device, dtype=dtype)
1560
+ return image
1561
+
1562
+ @torch.no_grad()
1563
+ def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState:
1564
+ block_state = self.get_block_state(state)
1565
+
1566
+ # (1) prepare controlnet inputs
1567
+ block_state.device = components._execution_device
1568
+ block_state.height, block_state.width = block_state.latents.shape[-2:]
1569
+ block_state.height = block_state.height * components.vae_scale_factor
1570
+ block_state.width = block_state.width * components.vae_scale_factor
1571
+
1572
+ controlnet = unwrap_module(components.controlnet)
1573
+
1574
+ # (1.1)
1575
+ # control_guidance_start/control_guidance_end (align format)
1576
+ if not isinstance(block_state.control_guidance_start, list) and isinstance(
1577
+ block_state.control_guidance_end, list
1578
+ ):
1579
+ block_state.control_guidance_start = len(block_state.control_guidance_end) * [
1580
+ block_state.control_guidance_start
1581
+ ]
1582
+ elif not isinstance(block_state.control_guidance_end, list) and isinstance(
1583
+ block_state.control_guidance_start, list
1584
+ ):
1585
+ block_state.control_guidance_end = len(block_state.control_guidance_start) * [
1586
+ block_state.control_guidance_end
1587
+ ]
1588
+ elif not isinstance(block_state.control_guidance_start, list) and not isinstance(
1589
+ block_state.control_guidance_end, list
1590
+ ):
1591
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1592
+ block_state.control_guidance_start, block_state.control_guidance_end = (
1593
+ mult * [block_state.control_guidance_start],
1594
+ mult * [block_state.control_guidance_end],
1595
+ )
1596
+
1597
+ # (1.2)
1598
+ # controlnet_conditioning_scale (align format)
1599
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(
1600
+ block_state.controlnet_conditioning_scale, float
1601
+ ):
1602
+ block_state.controlnet_conditioning_scale = [block_state.controlnet_conditioning_scale] * len(
1603
+ controlnet.nets
1604
+ )
1605
+
1606
+ # (1.3)
1607
+ # global_pool_conditions
1608
+ block_state.global_pool_conditions = (
1609
+ controlnet.config.global_pool_conditions
1610
+ if isinstance(controlnet, ControlNetModel)
1611
+ else controlnet.nets[0].config.global_pool_conditions
1612
+ )
1613
+ # (1.4)
1614
+ # guess_mode
1615
+ block_state.guess_mode = block_state.guess_mode or block_state.global_pool_conditions
1616
+
1617
+ # (1.5)
1618
+ # control_image
1619
+ if isinstance(controlnet, ControlNetModel):
1620
+ block_state.control_image = self.prepare_control_image(
1621
+ components,
1622
+ image=block_state.control_image,
1623
+ width=block_state.width,
1624
+ height=block_state.height,
1625
+ batch_size=block_state.batch_size * block_state.num_images_per_prompt,
1626
+ num_images_per_prompt=block_state.num_images_per_prompt,
1627
+ device=block_state.device,
1628
+ dtype=controlnet.dtype,
1629
+ crops_coords=block_state.crops_coords,
1630
+ )
1631
+ elif isinstance(controlnet, MultiControlNetModel):
1632
+ control_images = []
1633
+
1634
+ for control_image_ in block_state.control_image:
1635
+ control_image = self.prepare_control_image(
1636
+ components,
1637
+ image=control_image_,
1638
+ width=block_state.width,
1639
+ height=block_state.height,
1640
+ batch_size=block_state.batch_size * block_state.num_images_per_prompt,
1641
+ num_images_per_prompt=block_state.num_images_per_prompt,
1642
+ device=block_state.device,
1643
+ dtype=controlnet.dtype,
1644
+ crops_coords=block_state.crops_coords,
1645
+ )
1646
+
1647
+ control_images.append(control_image)
1648
+
1649
+ block_state.control_image = control_images
1650
+ else:
1651
+ assert False
1652
+
1653
+ # (1.6)
1654
+ # controlnet_keep
1655
+ block_state.controlnet_keep = []
1656
+ for i in range(len(block_state.timesteps)):
1657
+ keeps = [
1658
+ 1.0 - float(i / len(block_state.timesteps) < s or (i + 1) / len(block_state.timesteps) > e)
1659
+ for s, e in zip(block_state.control_guidance_start, block_state.control_guidance_end)
1660
+ ]
1661
+ block_state.controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1662
+
1663
+ block_state.controlnet_cond = block_state.control_image
1664
+ block_state.conditioning_scale = block_state.controlnet_conditioning_scale
1665
+
1666
+ self.set_block_state(state, block_state)
1667
+
1668
+ return components, state
1669
+
1670
+
1671
+ class StableDiffusionXLControlNetUnionInputStep(ModularPipelineBlocks):
1672
+ model_name = "stable-diffusion-xl"
1673
+
1674
+ @property
1675
+ def expected_components(self) -> List[ComponentSpec]:
1676
+ return [
1677
+ ComponentSpec("controlnet", ControlNetUnionModel),
1678
+ ComponentSpec(
1679
+ "control_image_processor",
1680
+ VaeImageProcessor,
1681
+ config=FrozenDict({"do_convert_rgb": True, "do_normalize": False}),
1682
+ default_creation_method="from_config",
1683
+ ),
1684
+ ]
1685
+
1686
+ @property
1687
+ def description(self) -> str:
1688
+ return "step that prepares inputs for the ControlNetUnion model"
1689
+
1690
+ @property
1691
+ def inputs(self) -> List[Tuple[str, Any]]:
1692
+ return [
1693
+ InputParam("control_image", required=True),
1694
+ InputParam("control_mode", required=True),
1695
+ InputParam("control_guidance_start", default=0.0),
1696
+ InputParam("control_guidance_end", default=1.0),
1697
+ InputParam("controlnet_conditioning_scale", default=1.0),
1698
+ InputParam("guess_mode", default=False),
1699
+ InputParam("num_images_per_prompt", default=1),
1700
+ InputParam(
1701
+ "latents",
1702
+ required=True,
1703
+ type_hint=torch.Tensor,
1704
+ description="The initial latents to use for the denoising process. Used to determine the shape of the control images. Can be generated in prepare_latent step.",
1705
+ ),
1706
+ InputParam(
1707
+ "batch_size",
1708
+ required=True,
1709
+ type_hint=int,
1710
+ description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.",
1711
+ ),
1712
+ InputParam(
1713
+ "dtype",
1714
+ required=True,
1715
+ type_hint=torch.dtype,
1716
+ description="The dtype of model tensor inputs. Can be generated in input step.",
1717
+ ),
1718
+ InputParam(
1719
+ "timesteps",
1720
+ required=True,
1721
+ type_hint=torch.Tensor,
1722
+ description="The timesteps to use for the denoising process. Needed to determine `controlnet_keep`. Can be generated in set_timesteps step.",
1723
+ ),
1724
+ InputParam(
1725
+ "crops_coords",
1726
+ type_hint=Optional[Tuple[int]],
1727
+ description="The crop coordinates to use for preprocess/postprocess the image and mask, for inpainting task only. Can be generated in vae_encode step.",
1728
+ ),
1729
+ ]
1730
+
1731
+ @property
1732
+ def intermediate_outputs(self) -> List[OutputParam]:
1733
+ return [
1734
+ OutputParam("controlnet_cond", type_hint=List[torch.Tensor], description="The processed control images"),
1735
+ OutputParam(
1736
+ "control_type_idx",
1737
+ type_hint=List[int],
1738
+ description="The control mode indices",
1739
+ kwargs_type="controlnet_kwargs",
1740
+ ),
1741
+ OutputParam(
1742
+ "control_type",
1743
+ type_hint=torch.Tensor,
1744
+ description="The control type tensor that specifies which control type is active",
1745
+ kwargs_type="controlnet_kwargs",
1746
+ ),
1747
+ OutputParam("control_guidance_start", type_hint=float, description="The controlnet guidance start value"),
1748
+ OutputParam("control_guidance_end", type_hint=float, description="The controlnet guidance end value"),
1749
+ OutputParam(
1750
+ "conditioning_scale", type_hint=List[float], description="The controlnet conditioning scale values"
1751
+ ),
1752
+ OutputParam("guess_mode", type_hint=bool, description="Whether guess mode is used"),
1753
+ OutputParam("controlnet_keep", type_hint=List[float], description="The controlnet keep values"),
1754
+ ]
1755
+
1756
+ # Modified from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image
1757
+ # 1. return image without apply any guidance
1758
+ # 2. add crops_coords and resize_mode to preprocess()
1759
+ @staticmethod
1760
+ def prepare_control_image(
1761
+ components,
1762
+ image,
1763
+ width,
1764
+ height,
1765
+ batch_size,
1766
+ num_images_per_prompt,
1767
+ device,
1768
+ dtype,
1769
+ crops_coords=None,
1770
+ ):
1771
+ if crops_coords is not None:
1772
+ image = components.control_image_processor.preprocess(
1773
+ image, height=height, width=width, crops_coords=crops_coords, resize_mode="fill"
1774
+ ).to(dtype=torch.float32)
1775
+ else:
1776
+ image = components.control_image_processor.preprocess(image, height=height, width=width).to(
1777
+ dtype=torch.float32
1778
+ )
1779
+
1780
+ image_batch_size = image.shape[0]
1781
+ if image_batch_size == 1:
1782
+ repeat_by = batch_size
1783
+ else:
1784
+ # image batch size is the same as prompt batch size
1785
+ repeat_by = num_images_per_prompt
1786
+
1787
+ image = image.repeat_interleave(repeat_by, dim=0)
1788
+ image = image.to(device=device, dtype=dtype)
1789
+ return image
1790
+
1791
+ @torch.no_grad()
1792
+ def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState:
1793
+ block_state = self.get_block_state(state)
1794
+
1795
+ controlnet = unwrap_module(components.controlnet)
1796
+
1797
+ device = components._execution_device
1798
+ dtype = block_state.dtype or components.controlnet.dtype
1799
+
1800
+ block_state.height, block_state.width = block_state.latents.shape[-2:]
1801
+ block_state.height = block_state.height * components.vae_scale_factor
1802
+ block_state.width = block_state.width * components.vae_scale_factor
1803
+
1804
+ # control_guidance_start/control_guidance_end (align format)
1805
+ if not isinstance(block_state.control_guidance_start, list) and isinstance(
1806
+ block_state.control_guidance_end, list
1807
+ ):
1808
+ block_state.control_guidance_start = len(block_state.control_guidance_end) * [
1809
+ block_state.control_guidance_start
1810
+ ]
1811
+ elif not isinstance(block_state.control_guidance_end, list) and isinstance(
1812
+ block_state.control_guidance_start, list
1813
+ ):
1814
+ block_state.control_guidance_end = len(block_state.control_guidance_start) * [
1815
+ block_state.control_guidance_end
1816
+ ]
1817
+
1818
+ # guess_mode
1819
+ block_state.global_pool_conditions = controlnet.config.global_pool_conditions
1820
+ block_state.guess_mode = block_state.guess_mode or block_state.global_pool_conditions
1821
+
1822
+ # control_image
1823
+ if not isinstance(block_state.control_image, list):
1824
+ block_state.control_image = [block_state.control_image]
1825
+ # control_mode
1826
+ if not isinstance(block_state.control_mode, list):
1827
+ block_state.control_mode = [block_state.control_mode]
1828
+
1829
+ if len(block_state.control_image) != len(block_state.control_mode):
1830
+ raise ValueError("Expected len(control_image) == len(control_type)")
1831
+
1832
+ # control_type
1833
+ block_state.num_control_type = controlnet.config.num_control_type
1834
+ block_state.control_type = [0 for _ in range(block_state.num_control_type)]
1835
+ for control_idx in block_state.control_mode:
1836
+ block_state.control_type[control_idx] = 1
1837
+ block_state.control_type = torch.Tensor(block_state.control_type)
1838
+
1839
+ block_state.control_type = block_state.control_type.reshape(1, -1).to(device, dtype=block_state.dtype)
1840
+ repeat_by = block_state.batch_size * block_state.num_images_per_prompt // block_state.control_type.shape[0]
1841
+ block_state.control_type = block_state.control_type.repeat_interleave(repeat_by, dim=0)
1842
+
1843
+ # prepare control_image
1844
+ for idx, _ in enumerate(block_state.control_image):
1845
+ block_state.control_image[idx] = self.prepare_control_image(
1846
+ components,
1847
+ image=block_state.control_image[idx],
1848
+ width=block_state.width,
1849
+ height=block_state.height,
1850
+ batch_size=block_state.batch_size * block_state.num_images_per_prompt,
1851
+ num_images_per_prompt=block_state.num_images_per_prompt,
1852
+ device=device,
1853
+ dtype=dtype,
1854
+ crops_coords=block_state.crops_coords,
1855
+ )
1856
+ block_state.height, block_state.width = block_state.control_image[idx].shape[-2:]
1857
+
1858
+ # controlnet_keep
1859
+ block_state.controlnet_keep = []
1860
+ for i in range(len(block_state.timesteps)):
1861
+ block_state.controlnet_keep.append(
1862
+ 1.0
1863
+ - float(
1864
+ i / len(block_state.timesteps) < block_state.control_guidance_start
1865
+ or (i + 1) / len(block_state.timesteps) > block_state.control_guidance_end
1866
+ )
1867
+ )
1868
+ block_state.control_type_idx = block_state.control_mode
1869
+ block_state.controlnet_cond = block_state.control_image
1870
+ block_state.conditioning_scale = block_state.controlnet_conditioning_scale
1871
+
1872
+ self.set_block_state(state, block_state)
1873
+
1874
+ return components, state