diffusers 0.28.2__py3-none-any.whl → 0.29.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. diffusers/__init__.py +9 -1
  2. diffusers/commands/env.py +1 -5
  3. diffusers/dependency_versions_table.py +1 -1
  4. diffusers/image_processor.py +2 -1
  5. diffusers/loaders/__init__.py +2 -2
  6. diffusers/loaders/lora.py +406 -140
  7. diffusers/loaders/lora_conversion_utils.py +7 -1
  8. diffusers/loaders/single_file.py +1 -1
  9. diffusers/loaders/single_file_model.py +5 -0
  10. diffusers/loaders/single_file_utils.py +242 -2
  11. diffusers/loaders/unet.py +307 -272
  12. diffusers/models/__init__.py +5 -3
  13. diffusers/models/attention.py +125 -1
  14. diffusers/models/attention_processor.py +169 -1
  15. diffusers/models/autoencoders/__init__.py +1 -0
  16. diffusers/models/autoencoders/autoencoder_asym_kl.py +1 -1
  17. diffusers/models/autoencoders/autoencoder_kl.py +17 -6
  18. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +4 -2
  19. diffusers/models/autoencoders/consistency_decoder_vae.py +9 -9
  20. diffusers/models/autoencoders/vq_model.py +182 -0
  21. diffusers/models/controlnet_xs.py +6 -6
  22. diffusers/models/embeddings.py +112 -84
  23. diffusers/models/model_loading_utils.py +55 -0
  24. diffusers/models/modeling_utils.py +128 -17
  25. diffusers/models/normalization.py +11 -6
  26. diffusers/models/transformers/__init__.py +1 -0
  27. diffusers/models/transformers/dual_transformer_2d.py +5 -4
  28. diffusers/models/transformers/hunyuan_transformer_2d.py +149 -2
  29. diffusers/models/transformers/prior_transformer.py +5 -5
  30. diffusers/models/transformers/transformer_2d.py +2 -2
  31. diffusers/models/transformers/transformer_sd3.py +344 -0
  32. diffusers/models/transformers/transformer_temporal.py +12 -10
  33. diffusers/models/unets/unet_1d.py +3 -3
  34. diffusers/models/unets/unet_2d.py +3 -3
  35. diffusers/models/unets/unet_2d_condition.py +4 -15
  36. diffusers/models/unets/unet_3d_condition.py +5 -17
  37. diffusers/models/unets/unet_i2vgen_xl.py +4 -4
  38. diffusers/models/unets/unet_motion_model.py +4 -4
  39. diffusers/models/unets/unet_spatio_temporal_condition.py +3 -3
  40. diffusers/models/vq_model.py +8 -165
  41. diffusers/pipelines/__init__.py +2 -0
  42. diffusers/pipelines/animatediff/pipeline_animatediff.py +4 -3
  43. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +4 -3
  44. diffusers/pipelines/controlnet/pipeline_controlnet.py +4 -3
  45. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +4 -3
  46. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +4 -3
  47. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +4 -3
  48. diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
  49. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +4 -3
  50. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +4 -3
  51. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +4 -3
  52. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +4 -3
  53. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +4 -3
  54. diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +24 -5
  55. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +4 -3
  56. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +4 -3
  57. diffusers/pipelines/marigold/marigold_image_processing.py +35 -20
  58. diffusers/pipelines/pia/pipeline_pia.py +4 -3
  59. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +1 -1
  60. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +1 -1
  61. diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +17 -17
  62. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +4 -3
  63. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +5 -4
  64. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +4 -3
  65. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +4 -3
  66. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +4 -3
  67. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +4 -3
  68. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +7 -6
  69. diffusers/pipelines/stable_diffusion_3/__init__.py +52 -0
  70. diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
  71. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +886 -0
  72. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +923 -0
  73. diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +4 -3
  74. diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +10 -11
  75. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +4 -3
  76. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +4 -3
  77. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +4 -3
  78. diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +4 -3
  79. diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +4 -3
  80. diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +4 -3
  81. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +4 -3
  82. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +4 -3
  83. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +4 -3
  84. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +4 -3
  85. diffusers/pipelines/unidiffuser/modeling_uvit.py +1 -1
  86. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +4 -3
  87. diffusers/schedulers/__init__.py +2 -0
  88. diffusers/schedulers/scheduling_dpmsolver_sde.py +2 -2
  89. diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +2 -3
  90. diffusers/schedulers/scheduling_edm_euler.py +2 -4
  91. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +287 -0
  92. diffusers/schedulers/scheduling_lms_discrete.py +2 -2
  93. diffusers/training_utils.py +4 -4
  94. diffusers/utils/__init__.py +3 -0
  95. diffusers/utils/constants.py +2 -0
  96. diffusers/utils/dummy_pt_objects.py +30 -0
  97. diffusers/utils/dummy_torch_and_transformers_objects.py +30 -0
  98. diffusers/utils/dynamic_modules_utils.py +15 -13
  99. diffusers/utils/hub_utils.py +106 -0
  100. diffusers/utils/import_utils.py +0 -1
  101. diffusers/utils/logging.py +3 -1
  102. diffusers/utils/state_dict_utils.py +2 -0
  103. {diffusers-0.28.2.dist-info → diffusers-0.29.0.dist-info}/METADATA +45 -45
  104. {diffusers-0.28.2.dist-info → diffusers-0.29.0.dist-info}/RECORD +108 -111
  105. {diffusers-0.28.2.dist-info → diffusers-0.29.0.dist-info}/WHEEL +1 -1
  106. diffusers/models/dual_transformer_2d.py +0 -20
  107. diffusers/models/prior_transformer.py +0 -12
  108. diffusers/models/t5_film_transformer.py +0 -70
  109. diffusers/models/transformer_2d.py +0 -25
  110. diffusers/models/transformer_temporal.py +0 -34
  111. diffusers/models/unet_1d.py +0 -26
  112. diffusers/models/unet_1d_blocks.py +0 -203
  113. diffusers/models/unet_2d.py +0 -27
  114. diffusers/models/unet_2d_blocks.py +0 -375
  115. diffusers/models/unet_2d_condition.py +0 -25
  116. {diffusers-0.28.2.dist-info → diffusers-0.29.0.dist-info}/LICENSE +0 -0
  117. {diffusers-0.28.2.dist-info → diffusers-0.29.0.dist-info}/entry_points.txt +0 -0
  118. {diffusers-0.28.2.dist-info → diffusers-0.29.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,344 @@
1
+ # Copyright 2024 Stability AI and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+
21
+ from ...configuration_utils import ConfigMixin, register_to_config
22
+ from ...loaders import FromOriginalModelMixin, PeftAdapterMixin
23
+ from ...models.attention import JointTransformerBlock
24
+ from ...models.attention_processor import Attention, AttentionProcessor
25
+ from ...models.modeling_utils import ModelMixin
26
+ from ...models.normalization import AdaLayerNormContinuous
27
+ from ...utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers
28
+ from ..embeddings import CombinedTimestepTextProjEmbeddings, PatchEmbed
29
+ from .transformer_2d import Transformer2DModelOutput
30
+
31
+
32
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
33
+
34
+
35
+ class SD3Transformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
36
+ """
37
+ The Transformer model introduced in Stable Diffusion 3.
38
+
39
+ Reference: https://arxiv.org/abs/2403.03206
40
+
41
+ Parameters:
42
+ sample_size (`int`): The width of the latent images. This is fixed during training since
43
+ it is used to learn a number of position embeddings.
44
+ patch_size (`int`): Patch size to turn the input data into small patches.
45
+ in_channels (`int`, *optional*, defaults to 16): The number of channels in the input.
46
+ num_layers (`int`, *optional*, defaults to 18): The number of layers of Transformer blocks to use.
47
+ attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head.
48
+ num_attention_heads (`int`, *optional*, defaults to 18): The number of heads to use for multi-head attention.
49
+ cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
50
+ caption_projection_dim (`int`): Number of dimensions to use when projecting the `encoder_hidden_states`.
51
+ pooled_projection_dim (`int`): Number of dimensions to use when projecting the `pooled_projections`.
52
+ out_channels (`int`, defaults to 16): Number of output channels.
53
+
54
+ """
55
+
56
+ _supports_gradient_checkpointing = True
57
+
58
+ @register_to_config
59
+ def __init__(
60
+ self,
61
+ sample_size: int = 128,
62
+ patch_size: int = 2,
63
+ in_channels: int = 16,
64
+ num_layers: int = 18,
65
+ attention_head_dim: int = 64,
66
+ num_attention_heads: int = 18,
67
+ joint_attention_dim: int = 4096,
68
+ caption_projection_dim: int = 1152,
69
+ pooled_projection_dim: int = 2048,
70
+ out_channels: int = 16,
71
+ pos_embed_max_size: int = 96,
72
+ ):
73
+ super().__init__()
74
+ default_out_channels = in_channels
75
+ self.out_channels = out_channels if out_channels is not None else default_out_channels
76
+ self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
77
+
78
+ self.pos_embed = PatchEmbed(
79
+ height=self.config.sample_size,
80
+ width=self.config.sample_size,
81
+ patch_size=self.config.patch_size,
82
+ in_channels=self.config.in_channels,
83
+ embed_dim=self.inner_dim,
84
+ pos_embed_max_size=pos_embed_max_size, # hard-code for now.
85
+ )
86
+ self.time_text_embed = CombinedTimestepTextProjEmbeddings(
87
+ embedding_dim=self.inner_dim, pooled_projection_dim=self.config.pooled_projection_dim
88
+ )
89
+ self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.config.caption_projection_dim)
90
+
91
+ # `attention_head_dim` is doubled to account for the mixing.
92
+ # It needs to crafted when we get the actual checkpoints.
93
+ self.transformer_blocks = nn.ModuleList(
94
+ [
95
+ JointTransformerBlock(
96
+ dim=self.inner_dim,
97
+ num_attention_heads=self.config.num_attention_heads,
98
+ attention_head_dim=self.inner_dim,
99
+ context_pre_only=i == num_layers - 1,
100
+ )
101
+ for i in range(self.config.num_layers)
102
+ ]
103
+ )
104
+
105
+ self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6)
106
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
107
+
108
+ self.gradient_checkpointing = False
109
+
110
+ # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking
111
+ def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None:
112
+ """
113
+ Sets the attention processor to use [feed forward
114
+ chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers).
115
+
116
+ Parameters:
117
+ chunk_size (`int`, *optional*):
118
+ The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually
119
+ over each tensor of dim=`dim`.
120
+ dim (`int`, *optional*, defaults to `0`):
121
+ The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch)
122
+ or dim=1 (sequence length).
123
+ """
124
+ if dim not in [0, 1]:
125
+ raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}")
126
+
127
+ # By default chunk size is 1
128
+ chunk_size = chunk_size or 1
129
+
130
+ def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int):
131
+ if hasattr(module, "set_chunk_feed_forward"):
132
+ module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
133
+
134
+ for child in module.children():
135
+ fn_recursive_feed_forward(child, chunk_size, dim)
136
+
137
+ for module in self.children():
138
+ fn_recursive_feed_forward(module, chunk_size, dim)
139
+
140
+ @property
141
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
142
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
143
+ r"""
144
+ Returns:
145
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
146
+ indexed by its weight name.
147
+ """
148
+ # set recursively
149
+ processors = {}
150
+
151
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
152
+ if hasattr(module, "get_processor"):
153
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
154
+
155
+ for sub_name, child in module.named_children():
156
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
157
+
158
+ return processors
159
+
160
+ for name, module in self.named_children():
161
+ fn_recursive_add_processors(name, module, processors)
162
+
163
+ return processors
164
+
165
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
166
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
167
+ r"""
168
+ Sets the attention processor to use to compute attention.
169
+
170
+ Parameters:
171
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
172
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
173
+ for **all** `Attention` layers.
174
+
175
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
176
+ processor. This is strongly recommended when setting trainable attention processors.
177
+
178
+ """
179
+ count = len(self.attn_processors.keys())
180
+
181
+ if isinstance(processor, dict) and len(processor) != count:
182
+ raise ValueError(
183
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
184
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
185
+ )
186
+
187
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
188
+ if hasattr(module, "set_processor"):
189
+ if not isinstance(processor, dict):
190
+ module.set_processor(processor)
191
+ else:
192
+ module.set_processor(processor.pop(f"{name}.processor"))
193
+
194
+ for sub_name, child in module.named_children():
195
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
196
+
197
+ for name, module in self.named_children():
198
+ fn_recursive_attn_processor(name, module, processor)
199
+
200
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
201
+ def fuse_qkv_projections(self):
202
+ """
203
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
204
+ are fused. For cross-attention modules, key and value projection matrices are fused.
205
+
206
+ <Tip warning={true}>
207
+
208
+ This API is 🧪 experimental.
209
+
210
+ </Tip>
211
+ """
212
+ self.original_attn_processors = None
213
+
214
+ for _, attn_processor in self.attn_processors.items():
215
+ if "Added" in str(attn_processor.__class__.__name__):
216
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
217
+
218
+ self.original_attn_processors = self.attn_processors
219
+
220
+ for module in self.modules():
221
+ if isinstance(module, Attention):
222
+ module.fuse_projections(fuse=True)
223
+
224
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
225
+ def unfuse_qkv_projections(self):
226
+ """Disables the fused QKV projection if enabled.
227
+
228
+ <Tip warning={true}>
229
+
230
+ This API is 🧪 experimental.
231
+
232
+ </Tip>
233
+
234
+ """
235
+ if self.original_attn_processors is not None:
236
+ self.set_attn_processor(self.original_attn_processors)
237
+
238
+ def _set_gradient_checkpointing(self, module, value=False):
239
+ if hasattr(module, "gradient_checkpointing"):
240
+ module.gradient_checkpointing = value
241
+
242
+ def forward(
243
+ self,
244
+ hidden_states: torch.FloatTensor,
245
+ encoder_hidden_states: torch.FloatTensor = None,
246
+ pooled_projections: torch.FloatTensor = None,
247
+ timestep: torch.LongTensor = None,
248
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
249
+ return_dict: bool = True,
250
+ ) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
251
+ """
252
+ The [`SD3Transformer2DModel`] forward method.
253
+
254
+ Args:
255
+ hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`):
256
+ Input `hidden_states`.
257
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`):
258
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
259
+ pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected
260
+ from the embeddings of input conditions.
261
+ timestep ( `torch.LongTensor`):
262
+ Used to indicate denoising step.
263
+ joint_attention_kwargs (`dict`, *optional*):
264
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
265
+ `self.processor` in
266
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
267
+ return_dict (`bool`, *optional*, defaults to `True`):
268
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
269
+ tuple.
270
+
271
+ Returns:
272
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
273
+ `tuple` where the first element is the sample tensor.
274
+ """
275
+ if joint_attention_kwargs is not None:
276
+ joint_attention_kwargs = joint_attention_kwargs.copy()
277
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
278
+ else:
279
+ lora_scale = 1.0
280
+
281
+ if USE_PEFT_BACKEND:
282
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
283
+ scale_lora_layers(self, lora_scale)
284
+ else:
285
+ logger.warning(
286
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
287
+ )
288
+
289
+ height, width = hidden_states.shape[-2:]
290
+
291
+ hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too.
292
+ temb = self.time_text_embed(timestep, pooled_projections)
293
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
294
+
295
+ for block in self.transformer_blocks:
296
+ if self.training and self.gradient_checkpointing:
297
+
298
+ def create_custom_forward(module, return_dict=None):
299
+ def custom_forward(*inputs):
300
+ if return_dict is not None:
301
+ return module(*inputs, return_dict=return_dict)
302
+ else:
303
+ return module(*inputs)
304
+
305
+ return custom_forward
306
+
307
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
308
+ hidden_states = torch.utils.checkpoint.checkpoint(
309
+ create_custom_forward(block),
310
+ hidden_states,
311
+ encoder_hidden_states,
312
+ temb,
313
+ **ckpt_kwargs,
314
+ )
315
+
316
+ else:
317
+ encoder_hidden_states, hidden_states = block(
318
+ hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb
319
+ )
320
+
321
+ hidden_states = self.norm_out(hidden_states, temb)
322
+ hidden_states = self.proj_out(hidden_states)
323
+
324
+ # unpatchify
325
+ patch_size = self.config.patch_size
326
+ height = height // patch_size
327
+ width = width // patch_size
328
+
329
+ hidden_states = hidden_states.reshape(
330
+ shape=(hidden_states.shape[0], height, width, patch_size, patch_size, self.out_channels)
331
+ )
332
+ hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
333
+ output = hidden_states.reshape(
334
+ shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size)
335
+ )
336
+
337
+ if USE_PEFT_BACKEND:
338
+ # remove `lora_scale` from each PEFT layer
339
+ unscale_lora_layers(self, lora_scale)
340
+
341
+ if not return_dict:
342
+ return (output,)
343
+
344
+ return Transformer2DModelOutput(sample=output)
@@ -149,13 +149,14 @@ class TransformerTemporalModel(ModelMixin, ConfigMixin):
149
149
  `self.processor` in
150
150
  [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
151
151
  return_dict (`bool`, *optional*, defaults to `True`):
152
- Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
153
- tuple.
152
+ Whether or not to return a [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`]
153
+ instead of a plain tuple.
154
154
 
155
155
  Returns:
156
- [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:
157
- If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is
158
- returned, otherwise a `tuple` where the first element is the sample tensor.
156
+ [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:
157
+ If `return_dict` is True, an
158
+ [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] is returned, otherwise a
159
+ `tuple` where the first element is the sample tensor.
159
160
  """
160
161
  # 1. Input
161
162
  batch_frames, channel, height, width = hidden_states.shape
@@ -294,13 +295,14 @@ class TransformerSpatioTemporalModel(nn.Module):
294
295
  A tensor indicating whether the input contains only images. 1 indicates that the input contains only
295
296
  images, 0 indicates that the input contains video frames.
296
297
  return_dict (`bool`, *optional*, defaults to `True`):
297
- Whether or not to return a [`~models.transformer_temporal.TransformerTemporalModelOutput`] instead of a
298
- plain tuple.
298
+ Whether or not to return a [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`]
299
+ instead of a plain tuple.
299
300
 
300
301
  Returns:
301
- [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:
302
- If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is
303
- returned, otherwise a `tuple` where the first element is the sample tensor.
302
+ [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:
303
+ If `return_dict` is True, an
304
+ [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] is returned, otherwise a
305
+ `tuple` where the first element is the sample tensor.
304
306
  """
305
307
  # 1. Input
306
308
  batch_frames, _, height, width = hidden_states.shape
@@ -206,11 +206,11 @@ class UNet1DModel(ModelMixin, ConfigMixin):
206
206
  The noisy input tensor with the following shape `(batch_size, num_channels, sample_size)`.
207
207
  timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input.
208
208
  return_dict (`bool`, *optional*, defaults to `True`):
209
- Whether or not to return a [`~models.unet_1d.UNet1DOutput`] instead of a plain tuple.
209
+ Whether or not to return a [`~models.unets.unet_1d.UNet1DOutput`] instead of a plain tuple.
210
210
 
211
211
  Returns:
212
- [`~models.unet_1d.UNet1DOutput`] or `tuple`:
213
- If `return_dict` is True, an [`~models.unet_1d.UNet1DOutput`] is returned, otherwise a `tuple` is
212
+ [`~models.unets.unet_1d.UNet1DOutput`] or `tuple`:
213
+ If `return_dict` is True, an [`~models.unets.unet_1d.UNet1DOutput`] is returned, otherwise a `tuple` is
214
214
  returned where the first element is the sample tensor.
215
215
  """
216
216
 
@@ -257,11 +257,11 @@ class UNet2DModel(ModelMixin, ConfigMixin):
257
257
  class_labels (`torch.Tensor`, *optional*, defaults to `None`):
258
258
  Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
259
259
  return_dict (`bool`, *optional*, defaults to `True`):
260
- Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple.
260
+ Whether or not to return a [`~models.unets.unet_2d.UNet2DOutput`] instead of a plain tuple.
261
261
 
262
262
  Returns:
263
- [`~models.unet_2d.UNet2DOutput`] or `tuple`:
264
- If `return_dict` is True, an [`~models.unet_2d.UNet2DOutput`] is returned, otherwise a `tuple` is
263
+ [`~models.unets.unet_2d.UNet2DOutput`] or `tuple`:
264
+ If `return_dict` is True, an [`~models.unets.unet_2d.UNet2DOutput`] is returned, otherwise a `tuple` is
265
265
  returned where the first element is the sample tensor.
266
266
  """
267
267
  # 0. center input if necessary
@@ -110,13 +110,13 @@ class UNet2DConditionModel(
110
110
  The dimension of the cross attention features.
111
111
  transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
112
112
  The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
113
- [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
114
- [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
113
+ [`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
114
+ [`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
115
115
  reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None):
116
116
  The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling
117
117
  blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for
118
- [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
119
- [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
118
+ [`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
119
+ [`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
120
120
  encoder_hid_dim (`int`, *optional*, defaults to None):
121
121
  If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
122
122
  dimension to `cross_attention_dim`.
@@ -903,17 +903,6 @@ class UNet2DConditionModel(
903
903
  if self.original_attn_processors is not None:
904
904
  self.set_attn_processor(self.original_attn_processors)
905
905
 
906
- def unload_lora(self):
907
- """Unloads LoRA weights."""
908
- deprecate(
909
- "unload_lora",
910
- "0.28.0",
911
- "Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().",
912
- )
913
- for module in self.modules():
914
- if hasattr(module, "set_lora_layer"):
915
- module.set_lora_layer(None)
916
-
917
906
  def get_time_embed(
918
907
  self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int]
919
908
  ) -> Optional[torch.Tensor]:
@@ -22,7 +22,7 @@ import torch.utils.checkpoint
22
22
 
23
23
  from ...configuration_utils import ConfigMixin, register_to_config
24
24
  from ...loaders import UNet2DConditionLoadersMixin
25
- from ...utils import BaseOutput, deprecate, logging
25
+ from ...utils import BaseOutput, logging
26
26
  from ..activations import get_activation
27
27
  from ..attention_processor import (
28
28
  ADDED_KV_ATTENTION_PROCESSORS,
@@ -546,18 +546,6 @@ class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin)
546
546
  if self.original_attn_processors is not None:
547
547
  self.set_attn_processor(self.original_attn_processors)
548
548
 
549
- # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unload_lora
550
- def unload_lora(self):
551
- """Unloads LoRA weights."""
552
- deprecate(
553
- "unload_lora",
554
- "0.28.0",
555
- "Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().",
556
- )
557
- for module in self.modules():
558
- if hasattr(module, "set_lora_layer"):
559
- module.set_lora_layer(None)
560
-
561
549
  def forward(
562
550
  self,
563
551
  sample: torch.Tensor,
@@ -598,15 +586,15 @@ class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin)
598
586
  mid_block_additional_residual: (`torch.Tensor`, *optional*):
599
587
  A tensor that if specified is added to the residual of the middle unet block.
600
588
  return_dict (`bool`, *optional*, defaults to `True`):
601
- Whether or not to return a [`~models.unet_3d_condition.UNet3DConditionOutput`] instead of a plain
589
+ Whether or not to return a [`~models.unets.unet_3d_condition.UNet3DConditionOutput`] instead of a plain
602
590
  tuple.
603
591
  cross_attention_kwargs (`dict`, *optional*):
604
592
  A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
605
593
 
606
594
  Returns:
607
- [`~models.unet_3d_condition.UNet3DConditionOutput`] or `tuple`:
608
- If `return_dict` is True, an [`~models.unet_3d_condition.UNet3DConditionOutput`] is returned, otherwise
609
- a `tuple` is returned where the first element is the sample tensor.
595
+ [`~models.unets.unet_3d_condition.UNet3DConditionOutput`] or `tuple`:
596
+ If `return_dict` is True, an [`~models.unets.unet_3d_condition.UNet3DConditionOutput`] is returned,
597
+ otherwise a `tuple` is returned where the first element is the sample tensor.
610
598
  """
611
599
  # By default samples have to be AT least a multiple of the overall upsampling factor.
612
600
  # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
@@ -542,13 +542,13 @@ class I2VGenXLUNet(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
542
542
  `self.processor` in
543
543
  [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
544
544
  return_dict (`bool`, *optional*, defaults to `True`):
545
- Whether or not to return a [`~models.unet_3d_condition.UNet3DConditionOutput`] instead of a plain
545
+ Whether or not to return a [`~models.unets.unet_3d_condition.UNet3DConditionOutput`] instead of a plain
546
546
  tuple.
547
547
 
548
548
  Returns:
549
- [`~models.unet_3d_condition.UNet3DConditionOutput`] or `tuple`:
550
- If `return_dict` is True, an [`~models.unet_3d_condition.UNet3DConditionOutput`] is returned, otherwise
551
- a `tuple` is returned where the first element is the sample tensor.
549
+ [`~models.unets.unet_3d_condition.UNet3DConditionOutput`] or `tuple`:
550
+ If `return_dict` is True, an [`~models.unets.unet_3d_condition.UNet3DConditionOutput`] is returned,
551
+ otherwise a `tuple` is returned where the first element is the sample tensor.
552
552
  """
553
553
  batch_size, channels, num_frames, height, width = sample.shape
554
554
 
@@ -856,13 +856,13 @@ class UNetMotionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
856
856
  mid_block_additional_residual: (`torch.Tensor`, *optional*):
857
857
  A tensor that if specified is added to the residual of the middle unet block.
858
858
  return_dict (`bool`, *optional*, defaults to `True`):
859
- Whether or not to return a [`~models.unet_3d_condition.UNet3DConditionOutput`] instead of a plain
859
+ Whether or not to return a [`~models.unets.unet_3d_condition.UNet3DConditionOutput`] instead of a plain
860
860
  tuple.
861
861
 
862
862
  Returns:
863
- [`~models.unet_3d_condition.UNet3DConditionOutput`] or `tuple`:
864
- If `return_dict` is True, an [`~models.unet_3d_condition.UNet3DConditionOutput`] is returned, otherwise
865
- a `tuple` is returned where the first element is the sample tensor.
863
+ [`~models.unets.unet_3d_condition.UNet3DConditionOutput`] or `tuple`:
864
+ If `return_dict` is True, an [`~models.unets.unet_3d_condition.UNet3DConditionOutput`] is returned,
865
+ otherwise a `tuple` is returned where the first element is the sample tensor.
866
866
  """
867
867
  # By default samples have to be AT least a multiple of the overall upsampling factor.
868
868
  # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
@@ -57,9 +57,9 @@ class UNetSpatioTemporalConditionModel(ModelMixin, ConfigMixin, UNet2DConditionL
57
57
  The dimension of the cross attention features.
58
58
  transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
59
59
  The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
60
- [`~models.unet_3d_blocks.CrossAttnDownBlockSpatioTemporal`],
61
- [`~models.unet_3d_blocks.CrossAttnUpBlockSpatioTemporal`],
62
- [`~models.unet_3d_blocks.UNetMidBlockSpatioTemporal`].
60
+ [`~models.unets.unet_3d_blocks.CrossAttnDownBlockSpatioTemporal`],
61
+ [`~models.unets.unet_3d_blocks.CrossAttnUpBlockSpatioTemporal`],
62
+ [`~models.unets.unet_3d_blocks.UNetMidBlockSpatioTemporal`].
63
63
  num_attention_heads (`int`, `Tuple[int]`, defaults to `(5, 10, 10, 20)`):
64
64
  The number of attention heads.
65
65
  dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.