diffusers 0.28.2__py3-none-any.whl → 0.29.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- diffusers/__init__.py +15 -1
- diffusers/commands/env.py +1 -5
- diffusers/dependency_versions_table.py +1 -1
- diffusers/image_processor.py +2 -1
- diffusers/loaders/__init__.py +2 -2
- diffusers/loaders/lora.py +406 -140
- diffusers/loaders/lora_conversion_utils.py +7 -1
- diffusers/loaders/single_file.py +13 -1
- diffusers/loaders/single_file_model.py +15 -8
- diffusers/loaders/single_file_utils.py +267 -17
- diffusers/loaders/unet.py +307 -272
- diffusers/models/__init__.py +7 -3
- diffusers/models/attention.py +125 -1
- diffusers/models/attention_processor.py +169 -1
- diffusers/models/autoencoders/__init__.py +1 -0
- diffusers/models/autoencoders/autoencoder_asym_kl.py +1 -1
- diffusers/models/autoencoders/autoencoder_kl.py +17 -6
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +4 -2
- diffusers/models/autoencoders/consistency_decoder_vae.py +9 -9
- diffusers/models/autoencoders/vq_model.py +182 -0
- diffusers/models/controlnet_sd3.py +418 -0
- diffusers/models/controlnet_xs.py +6 -6
- diffusers/models/embeddings.py +112 -84
- diffusers/models/model_loading_utils.py +55 -0
- diffusers/models/modeling_utils.py +138 -20
- diffusers/models/normalization.py +11 -6
- diffusers/models/transformers/__init__.py +1 -0
- diffusers/models/transformers/dual_transformer_2d.py +5 -4
- diffusers/models/transformers/hunyuan_transformer_2d.py +149 -2
- diffusers/models/transformers/prior_transformer.py +5 -5
- diffusers/models/transformers/transformer_2d.py +2 -2
- diffusers/models/transformers/transformer_sd3.py +353 -0
- diffusers/models/transformers/transformer_temporal.py +12 -10
- diffusers/models/unets/unet_1d.py +3 -3
- diffusers/models/unets/unet_2d.py +3 -3
- diffusers/models/unets/unet_2d_condition.py +4 -15
- diffusers/models/unets/unet_3d_condition.py +5 -17
- diffusers/models/unets/unet_i2vgen_xl.py +4 -4
- diffusers/models/unets/unet_motion_model.py +4 -4
- diffusers/models/unets/unet_spatio_temporal_condition.py +3 -3
- diffusers/models/vq_model.py +8 -165
- diffusers/pipelines/__init__.py +11 -0
- diffusers/pipelines/animatediff/pipeline_animatediff.py +4 -3
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +4 -3
- diffusers/pipelines/auto_pipeline.py +8 -0
- diffusers/pipelines/controlnet/pipeline_controlnet.py +4 -3
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +4 -3
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +4 -3
- diffusers/pipelines/controlnet_sd3/__init__.py +53 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +1062 -0
- diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +4 -3
- diffusers/pipelines/deepfloyd_if/watermark.py +1 -1
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +4 -3
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +4 -3
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +4 -3
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +4 -3
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +4 -3
- diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +24 -5
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +4 -3
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +4 -3
- diffusers/pipelines/marigold/marigold_image_processing.py +35 -20
- diffusers/pipelines/pia/pipeline_pia.py +4 -3
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +1 -1
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +1 -1
- diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +17 -17
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +4 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +5 -4
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +4 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +4 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +4 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +4 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +7 -6
- diffusers/pipelines/stable_diffusion_3/__init__.py +52 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_output.py +21 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +904 -0
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +941 -0
- diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +4 -3
- diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +10 -11
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +4 -3
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +4 -3
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +4 -3
- diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +4 -3
- diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +4 -3
- diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +4 -3
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +4 -3
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +4 -3
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +4 -3
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +4 -3
- diffusers/pipelines/unidiffuser/modeling_uvit.py +1 -1
- diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +4 -3
- diffusers/schedulers/__init__.py +2 -0
- diffusers/schedulers/scheduling_dpmsolver_sde.py +2 -2
- diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +2 -3
- diffusers/schedulers/scheduling_edm_euler.py +2 -4
- diffusers/schedulers/scheduling_flow_match_euler_discrete.py +287 -0
- diffusers/schedulers/scheduling_lms_discrete.py +2 -2
- diffusers/training_utils.py +4 -4
- diffusers/utils/__init__.py +3 -0
- diffusers/utils/constants.py +2 -0
- diffusers/utils/dummy_pt_objects.py +60 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +45 -0
- diffusers/utils/dynamic_modules_utils.py +15 -13
- diffusers/utils/hub_utils.py +106 -0
- diffusers/utils/import_utils.py +0 -1
- diffusers/utils/logging.py +3 -1
- diffusers/utils/state_dict_utils.py +2 -0
- {diffusers-0.28.2.dist-info → diffusers-0.29.1.dist-info}/METADATA +3 -3
- {diffusers-0.28.2.dist-info → diffusers-0.29.1.dist-info}/RECORD +112 -112
- {diffusers-0.28.2.dist-info → diffusers-0.29.1.dist-info}/WHEEL +1 -1
- diffusers/models/dual_transformer_2d.py +0 -20
- diffusers/models/prior_transformer.py +0 -12
- diffusers/models/t5_film_transformer.py +0 -70
- diffusers/models/transformer_2d.py +0 -25
- diffusers/models/transformer_temporal.py +0 -34
- diffusers/models/unet_1d.py +0 -26
- diffusers/models/unet_1d_blocks.py +0 -203
- diffusers/models/unet_2d.py +0 -27
- diffusers/models/unet_2d_blocks.py +0 -375
- diffusers/models/unet_2d_condition.py +0 -25
- {diffusers-0.28.2.dist-info → diffusers-0.29.1.dist-info}/LICENSE +0 -0
- {diffusers-0.28.2.dist-info → diffusers-0.29.1.dist-info}/entry_points.txt +0 -0
- {diffusers-0.28.2.dist-info → diffusers-0.29.1.dist-info}/top_level.txt +0 -0
@@ -57,10 +57,12 @@ class AdaLayerNormZero(nn.Module):
|
|
57
57
|
num_embeddings (`int`): The size of the embeddings dictionary.
|
58
58
|
"""
|
59
59
|
|
60
|
-
def __init__(self, embedding_dim: int, num_embeddings: int):
|
60
|
+
def __init__(self, embedding_dim: int, num_embeddings: Optional[int] = None):
|
61
61
|
super().__init__()
|
62
|
-
|
63
|
-
|
62
|
+
if num_embeddings is not None:
|
63
|
+
self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim)
|
64
|
+
else:
|
65
|
+
self.emb = None
|
64
66
|
|
65
67
|
self.silu = nn.SiLU()
|
66
68
|
self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True)
|
@@ -69,11 +71,14 @@ class AdaLayerNormZero(nn.Module):
|
|
69
71
|
def forward(
|
70
72
|
self,
|
71
73
|
x: torch.Tensor,
|
72
|
-
timestep: torch.Tensor,
|
73
|
-
class_labels: torch.LongTensor,
|
74
|
+
timestep: Optional[torch.Tensor] = None,
|
75
|
+
class_labels: Optional[torch.LongTensor] = None,
|
74
76
|
hidden_dtype: Optional[torch.dtype] = None,
|
77
|
+
emb: Optional[torch.Tensor] = None,
|
75
78
|
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
76
|
-
|
79
|
+
if self.emb is not None:
|
80
|
+
emb = self.emb(timestep, class_labels, hidden_dtype=hidden_dtype)
|
81
|
+
emb = self.linear(self.silu(emb))
|
77
82
|
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1)
|
78
83
|
x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
|
79
84
|
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
|
@@ -9,4 +9,5 @@ if is_torch_available():
|
|
9
9
|
from .prior_transformer import PriorTransformer
|
10
10
|
from .t5_film_transformer import T5FilmDecoder
|
11
11
|
from .transformer_2d import Transformer2DModel
|
12
|
+
from .transformer_sd3 import SD3Transformer2DModel
|
12
13
|
from .transformer_temporal import TransformerTemporalModel
|
@@ -15,7 +15,8 @@ from typing import Optional
|
|
15
15
|
|
16
16
|
from torch import nn
|
17
17
|
|
18
|
-
from
|
18
|
+
from ..modeling_outputs import Transformer2DModelOutput
|
19
|
+
from .transformer_2d import Transformer2DModel
|
19
20
|
|
20
21
|
|
21
22
|
class DualTransformer2DModel(nn.Module):
|
@@ -123,9 +124,9 @@ class DualTransformer2DModel(nn.Module):
|
|
123
124
|
tuple.
|
124
125
|
|
125
126
|
Returns:
|
126
|
-
[`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`:
|
127
|
-
[`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a
|
128
|
-
returning a tuple, the first element is the sample tensor.
|
127
|
+
[`~models.transformers.transformer_2d.Transformer2DModelOutput`] or `tuple`:
|
128
|
+
[`~models.transformers.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a
|
129
|
+
`tuple`. When returning a tuple, the first element is the sample tensor.
|
129
130
|
"""
|
130
131
|
input_states = hidden_states
|
131
132
|
|
@@ -11,7 +11,7 @@
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
|
-
from typing import Optional
|
14
|
+
from typing import Dict, Optional, Union
|
15
15
|
|
16
16
|
import torch
|
17
17
|
import torch.nn.functional as F
|
@@ -21,7 +21,7 @@ from ...configuration_utils import ConfigMixin, register_to_config
|
|
21
21
|
from ...utils import logging
|
22
22
|
from ...utils.torch_utils import maybe_allow_in_graph
|
23
23
|
from ..attention import FeedForward
|
24
|
-
from ..attention_processor import Attention, HunyuanAttnProcessor2_0
|
24
|
+
from ..attention_processor import Attention, AttentionProcessor, HunyuanAttnProcessor2_0
|
25
25
|
from ..embeddings import (
|
26
26
|
HunyuanCombinedTimestepTextSizeStyleEmbedding,
|
27
27
|
PatchEmbed,
|
@@ -166,6 +166,7 @@ class HunyuanDiTBlock(nn.Module):
|
|
166
166
|
self._chunk_size = None
|
167
167
|
self._chunk_dim = 0
|
168
168
|
|
169
|
+
# Copied from diffusers.models.attention.BasicTransformerBlock.set_chunk_feed_forward
|
169
170
|
def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
|
170
171
|
# Sets chunk feed-forward
|
171
172
|
self._chunk_size = chunk_size
|
@@ -321,6 +322,110 @@ class HunyuanDiT2DModel(ModelMixin, ConfigMixin):
|
|
321
322
|
self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6)
|
322
323
|
self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
|
323
324
|
|
325
|
+
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
|
326
|
+
def fuse_qkv_projections(self):
|
327
|
+
"""
|
328
|
+
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
|
329
|
+
are fused. For cross-attention modules, key and value projection matrices are fused.
|
330
|
+
|
331
|
+
<Tip warning={true}>
|
332
|
+
|
333
|
+
This API is 🧪 experimental.
|
334
|
+
|
335
|
+
</Tip>
|
336
|
+
"""
|
337
|
+
self.original_attn_processors = None
|
338
|
+
|
339
|
+
for _, attn_processor in self.attn_processors.items():
|
340
|
+
if "Added" in str(attn_processor.__class__.__name__):
|
341
|
+
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
|
342
|
+
|
343
|
+
self.original_attn_processors = self.attn_processors
|
344
|
+
|
345
|
+
for module in self.modules():
|
346
|
+
if isinstance(module, Attention):
|
347
|
+
module.fuse_projections(fuse=True)
|
348
|
+
|
349
|
+
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
|
350
|
+
def unfuse_qkv_projections(self):
|
351
|
+
"""Disables the fused QKV projection if enabled.
|
352
|
+
|
353
|
+
<Tip warning={true}>
|
354
|
+
|
355
|
+
This API is 🧪 experimental.
|
356
|
+
|
357
|
+
</Tip>
|
358
|
+
|
359
|
+
"""
|
360
|
+
if self.original_attn_processors is not None:
|
361
|
+
self.set_attn_processor(self.original_attn_processors)
|
362
|
+
|
363
|
+
@property
|
364
|
+
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
|
365
|
+
def attn_processors(self) -> Dict[str, AttentionProcessor]:
|
366
|
+
r"""
|
367
|
+
Returns:
|
368
|
+
`dict` of attention processors: A dictionary containing all attention processors used in the model with
|
369
|
+
indexed by its weight name.
|
370
|
+
"""
|
371
|
+
# set recursively
|
372
|
+
processors = {}
|
373
|
+
|
374
|
+
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
|
375
|
+
if hasattr(module, "get_processor"):
|
376
|
+
processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
|
377
|
+
|
378
|
+
for sub_name, child in module.named_children():
|
379
|
+
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
|
380
|
+
|
381
|
+
return processors
|
382
|
+
|
383
|
+
for name, module in self.named_children():
|
384
|
+
fn_recursive_add_processors(name, module, processors)
|
385
|
+
|
386
|
+
return processors
|
387
|
+
|
388
|
+
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
|
389
|
+
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
|
390
|
+
r"""
|
391
|
+
Sets the attention processor to use to compute attention.
|
392
|
+
|
393
|
+
Parameters:
|
394
|
+
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
|
395
|
+
The instantiated processor class or a dictionary of processor classes that will be set as the processor
|
396
|
+
for **all** `Attention` layers.
|
397
|
+
|
398
|
+
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
|
399
|
+
processor. This is strongly recommended when setting trainable attention processors.
|
400
|
+
|
401
|
+
"""
|
402
|
+
count = len(self.attn_processors.keys())
|
403
|
+
|
404
|
+
if isinstance(processor, dict) and len(processor) != count:
|
405
|
+
raise ValueError(
|
406
|
+
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
|
407
|
+
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
|
408
|
+
)
|
409
|
+
|
410
|
+
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
|
411
|
+
if hasattr(module, "set_processor"):
|
412
|
+
if not isinstance(processor, dict):
|
413
|
+
module.set_processor(processor)
|
414
|
+
else:
|
415
|
+
module.set_processor(processor.pop(f"{name}.processor"))
|
416
|
+
|
417
|
+
for sub_name, child in module.named_children():
|
418
|
+
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
|
419
|
+
|
420
|
+
for name, module in self.named_children():
|
421
|
+
fn_recursive_attn_processor(name, module, processor)
|
422
|
+
|
423
|
+
def set_default_attn_processor(self):
|
424
|
+
"""
|
425
|
+
Disables custom attention processors and sets the default attention implementation.
|
426
|
+
"""
|
427
|
+
self.set_attn_processor(HunyuanAttnProcessor2_0())
|
428
|
+
|
324
429
|
def forward(
|
325
430
|
self,
|
326
431
|
hidden_states,
|
@@ -425,3 +530,45 @@ class HunyuanDiT2DModel(ModelMixin, ConfigMixin):
|
|
425
530
|
if not return_dict:
|
426
531
|
return (output,)
|
427
532
|
return Transformer2DModelOutput(sample=output)
|
533
|
+
|
534
|
+
# Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking
|
535
|
+
def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None:
|
536
|
+
"""
|
537
|
+
Sets the attention processor to use [feed forward
|
538
|
+
chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers).
|
539
|
+
|
540
|
+
Parameters:
|
541
|
+
chunk_size (`int`, *optional*):
|
542
|
+
The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually
|
543
|
+
over each tensor of dim=`dim`.
|
544
|
+
dim (`int`, *optional*, defaults to `0`):
|
545
|
+
The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch)
|
546
|
+
or dim=1 (sequence length).
|
547
|
+
"""
|
548
|
+
if dim not in [0, 1]:
|
549
|
+
raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}")
|
550
|
+
|
551
|
+
# By default chunk size is 1
|
552
|
+
chunk_size = chunk_size or 1
|
553
|
+
|
554
|
+
def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int):
|
555
|
+
if hasattr(module, "set_chunk_feed_forward"):
|
556
|
+
module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
|
557
|
+
|
558
|
+
for child in module.children():
|
559
|
+
fn_recursive_feed_forward(child, chunk_size, dim)
|
560
|
+
|
561
|
+
for module in self.children():
|
562
|
+
fn_recursive_feed_forward(module, chunk_size, dim)
|
563
|
+
|
564
|
+
# Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.disable_forward_chunking
|
565
|
+
def disable_forward_chunking(self):
|
566
|
+
def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int):
|
567
|
+
if hasattr(module, "set_chunk_feed_forward"):
|
568
|
+
module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
|
569
|
+
|
570
|
+
for child in module.children():
|
571
|
+
fn_recursive_feed_forward(child, chunk_size, dim)
|
572
|
+
|
573
|
+
for module in self.children():
|
574
|
+
fn_recursive_feed_forward(module, None, 0)
|
@@ -266,13 +266,13 @@ class PriorTransformer(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, Pef
|
|
266
266
|
attention_mask (`torch.BoolTensor` of shape `(batch_size, num_embeddings)`):
|
267
267
|
Text mask for the text embeddings.
|
268
268
|
return_dict (`bool`, *optional*, defaults to `True`):
|
269
|
-
Whether or not to return a [`~models.prior_transformer.PriorTransformerOutput`] instead of
|
270
|
-
tuple.
|
269
|
+
Whether or not to return a [`~models.transformers.prior_transformer.PriorTransformerOutput`] instead of
|
270
|
+
a plain tuple.
|
271
271
|
|
272
272
|
Returns:
|
273
|
-
[`~models.prior_transformer.PriorTransformerOutput`] or `tuple`:
|
274
|
-
If return_dict is True, a [`~models.prior_transformer.PriorTransformerOutput`] is
|
275
|
-
tuple is returned where the first element is the sample tensor.
|
273
|
+
[`~models.transformers.prior_transformer.PriorTransformerOutput`] or `tuple`:
|
274
|
+
If return_dict is True, a [`~models.transformers.prior_transformer.PriorTransformerOutput`] is
|
275
|
+
returned, otherwise a tuple is returned where the first element is the sample tensor.
|
276
276
|
"""
|
277
277
|
batch_size = hidden_states.shape[0]
|
278
278
|
|
@@ -369,8 +369,8 @@ class Transformer2DModel(LegacyModelMixin, LegacyConfigMixin):
|
|
369
369
|
tuple.
|
370
370
|
|
371
371
|
Returns:
|
372
|
-
If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned,
|
373
|
-
`tuple` where the first element is the sample tensor.
|
372
|
+
If `return_dict` is True, an [`~models.transformers.transformer_2d.Transformer2DModelOutput`] is returned,
|
373
|
+
otherwise a `tuple` where the first element is the sample tensor.
|
374
374
|
"""
|
375
375
|
if cross_attention_kwargs is not None:
|
376
376
|
if cross_attention_kwargs.get("scale", None) is not None:
|
@@ -0,0 +1,353 @@
|
|
1
|
+
# Copyright 2024 Stability AI, The HuggingFace Team and The InstantX Team. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
|
16
|
+
from typing import Any, Dict, List, Optional, Union
|
17
|
+
|
18
|
+
import torch
|
19
|
+
import torch.nn as nn
|
20
|
+
|
21
|
+
from ...configuration_utils import ConfigMixin, register_to_config
|
22
|
+
from ...loaders import FromOriginalModelMixin, PeftAdapterMixin
|
23
|
+
from ...models.attention import JointTransformerBlock
|
24
|
+
from ...models.attention_processor import Attention, AttentionProcessor
|
25
|
+
from ...models.modeling_utils import ModelMixin
|
26
|
+
from ...models.normalization import AdaLayerNormContinuous
|
27
|
+
from ...utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers
|
28
|
+
from ..embeddings import CombinedTimestepTextProjEmbeddings, PatchEmbed
|
29
|
+
from .transformer_2d import Transformer2DModelOutput
|
30
|
+
|
31
|
+
|
32
|
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
33
|
+
|
34
|
+
|
35
|
+
class SD3Transformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
|
36
|
+
"""
|
37
|
+
The Transformer model introduced in Stable Diffusion 3.
|
38
|
+
|
39
|
+
Reference: https://arxiv.org/abs/2403.03206
|
40
|
+
|
41
|
+
Parameters:
|
42
|
+
sample_size (`int`): The width of the latent images. This is fixed during training since
|
43
|
+
it is used to learn a number of position embeddings.
|
44
|
+
patch_size (`int`): Patch size to turn the input data into small patches.
|
45
|
+
in_channels (`int`, *optional*, defaults to 16): The number of channels in the input.
|
46
|
+
num_layers (`int`, *optional*, defaults to 18): The number of layers of Transformer blocks to use.
|
47
|
+
attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head.
|
48
|
+
num_attention_heads (`int`, *optional*, defaults to 18): The number of heads to use for multi-head attention.
|
49
|
+
cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
|
50
|
+
caption_projection_dim (`int`): Number of dimensions to use when projecting the `encoder_hidden_states`.
|
51
|
+
pooled_projection_dim (`int`): Number of dimensions to use when projecting the `pooled_projections`.
|
52
|
+
out_channels (`int`, defaults to 16): Number of output channels.
|
53
|
+
|
54
|
+
"""
|
55
|
+
|
56
|
+
_supports_gradient_checkpointing = True
|
57
|
+
|
58
|
+
@register_to_config
|
59
|
+
def __init__(
|
60
|
+
self,
|
61
|
+
sample_size: int = 128,
|
62
|
+
patch_size: int = 2,
|
63
|
+
in_channels: int = 16,
|
64
|
+
num_layers: int = 18,
|
65
|
+
attention_head_dim: int = 64,
|
66
|
+
num_attention_heads: int = 18,
|
67
|
+
joint_attention_dim: int = 4096,
|
68
|
+
caption_projection_dim: int = 1152,
|
69
|
+
pooled_projection_dim: int = 2048,
|
70
|
+
out_channels: int = 16,
|
71
|
+
pos_embed_max_size: int = 96,
|
72
|
+
):
|
73
|
+
super().__init__()
|
74
|
+
default_out_channels = in_channels
|
75
|
+
self.out_channels = out_channels if out_channels is not None else default_out_channels
|
76
|
+
self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
|
77
|
+
|
78
|
+
self.pos_embed = PatchEmbed(
|
79
|
+
height=self.config.sample_size,
|
80
|
+
width=self.config.sample_size,
|
81
|
+
patch_size=self.config.patch_size,
|
82
|
+
in_channels=self.config.in_channels,
|
83
|
+
embed_dim=self.inner_dim,
|
84
|
+
pos_embed_max_size=pos_embed_max_size, # hard-code for now.
|
85
|
+
)
|
86
|
+
self.time_text_embed = CombinedTimestepTextProjEmbeddings(
|
87
|
+
embedding_dim=self.inner_dim, pooled_projection_dim=self.config.pooled_projection_dim
|
88
|
+
)
|
89
|
+
self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.config.caption_projection_dim)
|
90
|
+
|
91
|
+
# `attention_head_dim` is doubled to account for the mixing.
|
92
|
+
# It needs to crafted when we get the actual checkpoints.
|
93
|
+
self.transformer_blocks = nn.ModuleList(
|
94
|
+
[
|
95
|
+
JointTransformerBlock(
|
96
|
+
dim=self.inner_dim,
|
97
|
+
num_attention_heads=self.config.num_attention_heads,
|
98
|
+
attention_head_dim=self.inner_dim,
|
99
|
+
context_pre_only=i == num_layers - 1,
|
100
|
+
)
|
101
|
+
for i in range(self.config.num_layers)
|
102
|
+
]
|
103
|
+
)
|
104
|
+
|
105
|
+
self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6)
|
106
|
+
self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
|
107
|
+
|
108
|
+
self.gradient_checkpointing = False
|
109
|
+
|
110
|
+
# Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking
|
111
|
+
def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None:
|
112
|
+
"""
|
113
|
+
Sets the attention processor to use [feed forward
|
114
|
+
chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers).
|
115
|
+
|
116
|
+
Parameters:
|
117
|
+
chunk_size (`int`, *optional*):
|
118
|
+
The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually
|
119
|
+
over each tensor of dim=`dim`.
|
120
|
+
dim (`int`, *optional*, defaults to `0`):
|
121
|
+
The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch)
|
122
|
+
or dim=1 (sequence length).
|
123
|
+
"""
|
124
|
+
if dim not in [0, 1]:
|
125
|
+
raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}")
|
126
|
+
|
127
|
+
# By default chunk size is 1
|
128
|
+
chunk_size = chunk_size or 1
|
129
|
+
|
130
|
+
def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int):
|
131
|
+
if hasattr(module, "set_chunk_feed_forward"):
|
132
|
+
module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
|
133
|
+
|
134
|
+
for child in module.children():
|
135
|
+
fn_recursive_feed_forward(child, chunk_size, dim)
|
136
|
+
|
137
|
+
for module in self.children():
|
138
|
+
fn_recursive_feed_forward(module, chunk_size, dim)
|
139
|
+
|
140
|
+
@property
|
141
|
+
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
|
142
|
+
def attn_processors(self) -> Dict[str, AttentionProcessor]:
|
143
|
+
r"""
|
144
|
+
Returns:
|
145
|
+
`dict` of attention processors: A dictionary containing all attention processors used in the model with
|
146
|
+
indexed by its weight name.
|
147
|
+
"""
|
148
|
+
# set recursively
|
149
|
+
processors = {}
|
150
|
+
|
151
|
+
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
|
152
|
+
if hasattr(module, "get_processor"):
|
153
|
+
processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
|
154
|
+
|
155
|
+
for sub_name, child in module.named_children():
|
156
|
+
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
|
157
|
+
|
158
|
+
return processors
|
159
|
+
|
160
|
+
for name, module in self.named_children():
|
161
|
+
fn_recursive_add_processors(name, module, processors)
|
162
|
+
|
163
|
+
return processors
|
164
|
+
|
165
|
+
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
|
166
|
+
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
|
167
|
+
r"""
|
168
|
+
Sets the attention processor to use to compute attention.
|
169
|
+
|
170
|
+
Parameters:
|
171
|
+
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
|
172
|
+
The instantiated processor class or a dictionary of processor classes that will be set as the processor
|
173
|
+
for **all** `Attention` layers.
|
174
|
+
|
175
|
+
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
|
176
|
+
processor. This is strongly recommended when setting trainable attention processors.
|
177
|
+
|
178
|
+
"""
|
179
|
+
count = len(self.attn_processors.keys())
|
180
|
+
|
181
|
+
if isinstance(processor, dict) and len(processor) != count:
|
182
|
+
raise ValueError(
|
183
|
+
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
|
184
|
+
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
|
185
|
+
)
|
186
|
+
|
187
|
+
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
|
188
|
+
if hasattr(module, "set_processor"):
|
189
|
+
if not isinstance(processor, dict):
|
190
|
+
module.set_processor(processor)
|
191
|
+
else:
|
192
|
+
module.set_processor(processor.pop(f"{name}.processor"))
|
193
|
+
|
194
|
+
for sub_name, child in module.named_children():
|
195
|
+
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
|
196
|
+
|
197
|
+
for name, module in self.named_children():
|
198
|
+
fn_recursive_attn_processor(name, module, processor)
|
199
|
+
|
200
|
+
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
|
201
|
+
def fuse_qkv_projections(self):
|
202
|
+
"""
|
203
|
+
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
|
204
|
+
are fused. For cross-attention modules, key and value projection matrices are fused.
|
205
|
+
|
206
|
+
<Tip warning={true}>
|
207
|
+
|
208
|
+
This API is 🧪 experimental.
|
209
|
+
|
210
|
+
</Tip>
|
211
|
+
"""
|
212
|
+
self.original_attn_processors = None
|
213
|
+
|
214
|
+
for _, attn_processor in self.attn_processors.items():
|
215
|
+
if "Added" in str(attn_processor.__class__.__name__):
|
216
|
+
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
|
217
|
+
|
218
|
+
self.original_attn_processors = self.attn_processors
|
219
|
+
|
220
|
+
for module in self.modules():
|
221
|
+
if isinstance(module, Attention):
|
222
|
+
module.fuse_projections(fuse=True)
|
223
|
+
|
224
|
+
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
|
225
|
+
def unfuse_qkv_projections(self):
|
226
|
+
"""Disables the fused QKV projection if enabled.
|
227
|
+
|
228
|
+
<Tip warning={true}>
|
229
|
+
|
230
|
+
This API is 🧪 experimental.
|
231
|
+
|
232
|
+
</Tip>
|
233
|
+
|
234
|
+
"""
|
235
|
+
if self.original_attn_processors is not None:
|
236
|
+
self.set_attn_processor(self.original_attn_processors)
|
237
|
+
|
238
|
+
def _set_gradient_checkpointing(self, module, value=False):
|
239
|
+
if hasattr(module, "gradient_checkpointing"):
|
240
|
+
module.gradient_checkpointing = value
|
241
|
+
|
242
|
+
def forward(
|
243
|
+
self,
|
244
|
+
hidden_states: torch.FloatTensor,
|
245
|
+
encoder_hidden_states: torch.FloatTensor = None,
|
246
|
+
pooled_projections: torch.FloatTensor = None,
|
247
|
+
timestep: torch.LongTensor = None,
|
248
|
+
block_controlnet_hidden_states: List = None,
|
249
|
+
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
|
250
|
+
return_dict: bool = True,
|
251
|
+
) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
|
252
|
+
"""
|
253
|
+
The [`SD3Transformer2DModel`] forward method.
|
254
|
+
|
255
|
+
Args:
|
256
|
+
hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`):
|
257
|
+
Input `hidden_states`.
|
258
|
+
encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`):
|
259
|
+
Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
|
260
|
+
pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected
|
261
|
+
from the embeddings of input conditions.
|
262
|
+
timestep ( `torch.LongTensor`):
|
263
|
+
Used to indicate denoising step.
|
264
|
+
block_controlnet_hidden_states: (`list` of `torch.Tensor`):
|
265
|
+
A list of tensors that if specified are added to the residuals of transformer blocks.
|
266
|
+
joint_attention_kwargs (`dict`, *optional*):
|
267
|
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
268
|
+
`self.processor` in
|
269
|
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
270
|
+
return_dict (`bool`, *optional*, defaults to `True`):
|
271
|
+
Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
|
272
|
+
tuple.
|
273
|
+
|
274
|
+
Returns:
|
275
|
+
If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
|
276
|
+
`tuple` where the first element is the sample tensor.
|
277
|
+
"""
|
278
|
+
if joint_attention_kwargs is not None:
|
279
|
+
joint_attention_kwargs = joint_attention_kwargs.copy()
|
280
|
+
lora_scale = joint_attention_kwargs.pop("scale", 1.0)
|
281
|
+
else:
|
282
|
+
lora_scale = 1.0
|
283
|
+
|
284
|
+
if USE_PEFT_BACKEND:
|
285
|
+
# weight the lora layers by setting `lora_scale` for each PEFT layer
|
286
|
+
scale_lora_layers(self, lora_scale)
|
287
|
+
else:
|
288
|
+
if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
|
289
|
+
logger.warning(
|
290
|
+
"Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
|
291
|
+
)
|
292
|
+
|
293
|
+
height, width = hidden_states.shape[-2:]
|
294
|
+
|
295
|
+
hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too.
|
296
|
+
temb = self.time_text_embed(timestep, pooled_projections)
|
297
|
+
encoder_hidden_states = self.context_embedder(encoder_hidden_states)
|
298
|
+
|
299
|
+
for index_block, block in enumerate(self.transformer_blocks):
|
300
|
+
if self.training and self.gradient_checkpointing:
|
301
|
+
|
302
|
+
def create_custom_forward(module, return_dict=None):
|
303
|
+
def custom_forward(*inputs):
|
304
|
+
if return_dict is not None:
|
305
|
+
return module(*inputs, return_dict=return_dict)
|
306
|
+
else:
|
307
|
+
return module(*inputs)
|
308
|
+
|
309
|
+
return custom_forward
|
310
|
+
|
311
|
+
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
312
|
+
encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint(
|
313
|
+
create_custom_forward(block),
|
314
|
+
hidden_states,
|
315
|
+
encoder_hidden_states,
|
316
|
+
temb,
|
317
|
+
**ckpt_kwargs,
|
318
|
+
)
|
319
|
+
|
320
|
+
else:
|
321
|
+
encoder_hidden_states, hidden_states = block(
|
322
|
+
hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb
|
323
|
+
)
|
324
|
+
|
325
|
+
# controlnet residual
|
326
|
+
if block_controlnet_hidden_states is not None and block.context_pre_only is False:
|
327
|
+
interval_control = len(self.transformer_blocks) // len(block_controlnet_hidden_states)
|
328
|
+
hidden_states = hidden_states + block_controlnet_hidden_states[index_block // interval_control]
|
329
|
+
|
330
|
+
hidden_states = self.norm_out(hidden_states, temb)
|
331
|
+
hidden_states = self.proj_out(hidden_states)
|
332
|
+
|
333
|
+
# unpatchify
|
334
|
+
patch_size = self.config.patch_size
|
335
|
+
height = height // patch_size
|
336
|
+
width = width // patch_size
|
337
|
+
|
338
|
+
hidden_states = hidden_states.reshape(
|
339
|
+
shape=(hidden_states.shape[0], height, width, patch_size, patch_size, self.out_channels)
|
340
|
+
)
|
341
|
+
hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
|
342
|
+
output = hidden_states.reshape(
|
343
|
+
shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size)
|
344
|
+
)
|
345
|
+
|
346
|
+
if USE_PEFT_BACKEND:
|
347
|
+
# remove `lora_scale` from each PEFT layer
|
348
|
+
unscale_lora_layers(self, lora_scale)
|
349
|
+
|
350
|
+
if not return_dict:
|
351
|
+
return (output,)
|
352
|
+
|
353
|
+
return Transformer2DModelOutput(sample=output)
|
@@ -149,13 +149,14 @@ class TransformerTemporalModel(ModelMixin, ConfigMixin):
|
|
149
149
|
`self.processor` in
|
150
150
|
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
151
151
|
return_dict (`bool`, *optional*, defaults to `True`):
|
152
|
-
Whether or not to return a [`~models.
|
153
|
-
tuple.
|
152
|
+
Whether or not to return a [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`]
|
153
|
+
instead of a plain tuple.
|
154
154
|
|
155
155
|
Returns:
|
156
|
-
[`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:
|
157
|
-
If `return_dict` is True, an
|
158
|
-
returned, otherwise a
|
156
|
+
[`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:
|
157
|
+
If `return_dict` is True, an
|
158
|
+
[`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] is returned, otherwise a
|
159
|
+
`tuple` where the first element is the sample tensor.
|
159
160
|
"""
|
160
161
|
# 1. Input
|
161
162
|
batch_frames, channel, height, width = hidden_states.shape
|
@@ -294,13 +295,14 @@ class TransformerSpatioTemporalModel(nn.Module):
|
|
294
295
|
A tensor indicating whether the input contains only images. 1 indicates that the input contains only
|
295
296
|
images, 0 indicates that the input contains video frames.
|
296
297
|
return_dict (`bool`, *optional*, defaults to `True`):
|
297
|
-
Whether or not to return a [`~models.transformer_temporal.TransformerTemporalModelOutput`]
|
298
|
-
plain tuple.
|
298
|
+
Whether or not to return a [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`]
|
299
|
+
instead of a plain tuple.
|
299
300
|
|
300
301
|
Returns:
|
301
|
-
[`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:
|
302
|
-
If `return_dict` is True, an
|
303
|
-
returned, otherwise a
|
302
|
+
[`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:
|
303
|
+
If `return_dict` is True, an
|
304
|
+
[`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] is returned, otherwise a
|
305
|
+
`tuple` where the first element is the sample tensor.
|
304
306
|
"""
|
305
307
|
# 1. Input
|
306
308
|
batch_frames, _, height, width = hidden_states.shape
|