diffusers 0.23.1__py3-none-any.whl → 0.25.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- diffusers/__init__.py +26 -2
- diffusers/commands/fp16_safetensors.py +10 -11
- diffusers/configuration_utils.py +13 -8
- diffusers/dependency_versions_check.py +0 -1
- diffusers/dependency_versions_table.py +5 -5
- diffusers/experimental/rl/value_guided_sampling.py +1 -1
- diffusers/image_processor.py +463 -51
- diffusers/loaders/__init__.py +82 -0
- diffusers/loaders/ip_adapter.py +159 -0
- diffusers/loaders/lora.py +1553 -0
- diffusers/loaders/lora_conversion_utils.py +284 -0
- diffusers/loaders/single_file.py +637 -0
- diffusers/loaders/textual_inversion.py +455 -0
- diffusers/loaders/unet.py +828 -0
- diffusers/loaders/utils.py +59 -0
- diffusers/models/__init__.py +26 -9
- diffusers/models/activations.py +9 -6
- diffusers/models/attention.py +301 -29
- diffusers/models/attention_flax.py +9 -1
- diffusers/models/attention_processor.py +378 -6
- diffusers/models/autoencoders/__init__.py +5 -0
- diffusers/models/{autoencoder_asym_kl.py → autoencoders/autoencoder_asym_kl.py} +17 -12
- diffusers/models/{autoencoder_kl.py → autoencoders/autoencoder_kl.py} +47 -23
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +402 -0
- diffusers/models/{autoencoder_tiny.py → autoencoders/autoencoder_tiny.py} +24 -28
- diffusers/models/{consistency_decoder_vae.py → autoencoders/consistency_decoder_vae.py} +51 -44
- diffusers/models/{vae.py → autoencoders/vae.py} +71 -17
- diffusers/models/controlnet.py +59 -39
- diffusers/models/controlnet_flax.py +19 -18
- diffusers/models/downsampling.py +338 -0
- diffusers/models/embeddings.py +112 -29
- diffusers/models/embeddings_flax.py +2 -0
- diffusers/models/lora.py +131 -1
- diffusers/models/modeling_flax_utils.py +14 -8
- diffusers/models/modeling_outputs.py +17 -0
- diffusers/models/modeling_utils.py +37 -29
- diffusers/models/normalization.py +110 -4
- diffusers/models/resnet.py +299 -652
- diffusers/models/transformer_2d.py +22 -5
- diffusers/models/transformer_temporal.py +183 -1
- diffusers/models/unet_2d_blocks_flax.py +5 -0
- diffusers/models/unet_2d_condition.py +46 -0
- diffusers/models/unet_2d_condition_flax.py +13 -13
- diffusers/models/unet_3d_blocks.py +957 -173
- diffusers/models/unet_3d_condition.py +16 -8
- diffusers/models/unet_kandinsky3.py +535 -0
- diffusers/models/unet_motion_model.py +48 -33
- diffusers/models/unet_spatio_temporal_condition.py +489 -0
- diffusers/models/upsampling.py +454 -0
- diffusers/models/uvit_2d.py +471 -0
- diffusers/models/vae_flax.py +7 -0
- diffusers/models/vq_model.py +12 -3
- diffusers/optimization.py +16 -9
- diffusers/pipelines/__init__.py +137 -76
- diffusers/pipelines/amused/__init__.py +62 -0
- diffusers/pipelines/amused/pipeline_amused.py +328 -0
- diffusers/pipelines/amused/pipeline_amused_img2img.py +347 -0
- diffusers/pipelines/amused/pipeline_amused_inpaint.py +378 -0
- diffusers/pipelines/animatediff/pipeline_animatediff.py +66 -8
- diffusers/pipelines/audioldm/pipeline_audioldm.py +1 -0
- diffusers/pipelines/auto_pipeline.py +23 -13
- diffusers/pipelines/consistency_models/pipeline_consistency_models.py +1 -0
- diffusers/pipelines/controlnet/pipeline_controlnet.py +238 -35
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +148 -37
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +155 -41
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +123 -43
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +216 -39
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +106 -34
- diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +1 -0
- diffusers/pipelines/ddim/pipeline_ddim.py +1 -0
- diffusers/pipelines/ddpm/pipeline_ddpm.py +1 -0
- diffusers/pipelines/deepfloyd_if/pipeline_if.py +13 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +13 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +13 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +13 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +13 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +13 -1
- diffusers/pipelines/deprecated/__init__.py +153 -0
- diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/__init__.py +3 -3
- diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_alt_diffusion.py +177 -34
- diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_alt_diffusion_img2img.py +182 -37
- diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_output.py +1 -1
- diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/__init__.py +1 -1
- diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/mel.py +2 -2
- diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/pipeline_audio_diffusion.py +4 -4
- diffusers/pipelines/{latent_diffusion_uncond → deprecated/latent_diffusion_uncond}/__init__.py +1 -1
- diffusers/pipelines/{latent_diffusion_uncond → deprecated/latent_diffusion_uncond}/pipeline_latent_diffusion_uncond.py +4 -4
- diffusers/pipelines/{pndm → deprecated/pndm}/__init__.py +1 -1
- diffusers/pipelines/{pndm → deprecated/pndm}/pipeline_pndm.py +4 -4
- diffusers/pipelines/{repaint → deprecated/repaint}/__init__.py +1 -1
- diffusers/pipelines/{repaint → deprecated/repaint}/pipeline_repaint.py +5 -5
- diffusers/pipelines/{score_sde_ve → deprecated/score_sde_ve}/__init__.py +1 -1
- diffusers/pipelines/{score_sde_ve → deprecated/score_sde_ve}/pipeline_score_sde_ve.py +5 -4
- diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/__init__.py +6 -6
- diffusers/pipelines/{spectrogram_diffusion/continous_encoder.py → deprecated/spectrogram_diffusion/continuous_encoder.py} +2 -2
- diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/midi_utils.py +1 -1
- diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/notes_encoder.py +2 -2
- diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/pipeline_spectrogram_diffusion.py +8 -7
- diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py +55 -0
- diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_cycle_diffusion.py +34 -13
- diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_onnx_stable_diffusion_inpaint_legacy.py +7 -6
- diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_inpaint_legacy.py +12 -11
- diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_model_editing.py +17 -11
- diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_paradigms.py +11 -10
- diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_pix2pix_zero.py +14 -13
- diffusers/pipelines/{stochastic_karras_ve → deprecated/stochastic_karras_ve}/__init__.py +1 -1
- diffusers/pipelines/{stochastic_karras_ve → deprecated/stochastic_karras_ve}/pipeline_stochastic_karras_ve.py +4 -4
- diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/__init__.py +3 -3
- diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/modeling_text_unet.py +83 -51
- diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion.py +4 -4
- diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_dual_guided.py +7 -6
- diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_image_variation.py +7 -6
- diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_text_to_image.py +7 -6
- diffusers/pipelines/{vq_diffusion → deprecated/vq_diffusion}/__init__.py +3 -3
- diffusers/pipelines/{vq_diffusion → deprecated/vq_diffusion}/pipeline_vq_diffusion.py +5 -5
- diffusers/pipelines/dit/pipeline_dit.py +1 -0
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +3 -3
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +1 -1
- diffusers/pipelines/kandinsky3/__init__.py +49 -0
- diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +98 -0
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +589 -0
- diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +654 -0
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +111 -11
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +102 -9
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -0
- diffusers/pipelines/musicldm/pipeline_musicldm.py +1 -1
- diffusers/pipelines/onnx_utils.py +8 -5
- diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +7 -2
- diffusers/pipelines/pipeline_flax_utils.py +11 -8
- diffusers/pipelines/pipeline_utils.py +63 -42
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +247 -38
- diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +3 -3
- diffusers/pipelines/stable_diffusion/__init__.py +37 -65
- diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +75 -78
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +2 -2
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +2 -4
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +174 -11
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +8 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +178 -11
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +224 -13
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +74 -20
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +4 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +7 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +5 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +5 -0
- diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py +48 -0
- diffusers/pipelines/{stable_diffusion → stable_diffusion_attend_and_excite}/pipeline_stable_diffusion_attend_and_excite.py +6 -2
- diffusers/pipelines/stable_diffusion_diffedit/__init__.py +48 -0
- diffusers/pipelines/{stable_diffusion → stable_diffusion_diffedit}/pipeline_stable_diffusion_diffedit.py +3 -3
- diffusers/pipelines/stable_diffusion_gligen/__init__.py +50 -0
- diffusers/pipelines/{stable_diffusion → stable_diffusion_gligen}/pipeline_stable_diffusion_gligen.py +3 -2
- diffusers/pipelines/{stable_diffusion → stable_diffusion_gligen}/pipeline_stable_diffusion_gligen_text_image.py +4 -3
- diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py +60 -0
- diffusers/pipelines/{stable_diffusion → stable_diffusion_k_diffusion}/pipeline_stable_diffusion_k_diffusion.py +7 -1
- diffusers/pipelines/stable_diffusion_ldm3d/__init__.py +48 -0
- diffusers/pipelines/{stable_diffusion → stable_diffusion_ldm3d}/pipeline_stable_diffusion_ldm3d.py +51 -7
- diffusers/pipelines/stable_diffusion_panorama/__init__.py +48 -0
- diffusers/pipelines/{stable_diffusion → stable_diffusion_panorama}/pipeline_stable_diffusion_panorama.py +57 -8
- diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +58 -6
- diffusers/pipelines/stable_diffusion_sag/__init__.py +48 -0
- diffusers/pipelines/{stable_diffusion → stable_diffusion_sag}/pipeline_stable_diffusion_sag.py +68 -10
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +194 -17
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +205 -16
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +206 -17
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +23 -17
- diffusers/pipelines/stable_video_diffusion/__init__.py +58 -0
- diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +652 -0
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +108 -12
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +115 -14
- diffusers/pipelines/text_to_video_synthesis/__init__.py +2 -0
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +6 -0
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +23 -3
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +334 -10
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +1331 -0
- diffusers/pipelines/unclip/pipeline_unclip.py +2 -1
- diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +1 -0
- diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +1 -1
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +14 -4
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +9 -5
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +1 -1
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +2 -2
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +5 -1
- diffusers/schedulers/__init__.py +4 -4
- diffusers/schedulers/deprecated/__init__.py +50 -0
- diffusers/schedulers/{scheduling_karras_ve.py → deprecated/scheduling_karras_ve.py} +4 -4
- diffusers/schedulers/{scheduling_sde_vp.py → deprecated/scheduling_sde_vp.py} +4 -6
- diffusers/schedulers/scheduling_amused.py +162 -0
- diffusers/schedulers/scheduling_consistency_models.py +2 -0
- diffusers/schedulers/scheduling_ddim.py +1 -3
- diffusers/schedulers/scheduling_ddim_inverse.py +2 -7
- diffusers/schedulers/scheduling_ddim_parallel.py +1 -3
- diffusers/schedulers/scheduling_ddpm.py +47 -3
- diffusers/schedulers/scheduling_ddpm_parallel.py +47 -3
- diffusers/schedulers/scheduling_deis_multistep.py +28 -6
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +28 -6
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +28 -6
- diffusers/schedulers/scheduling_dpmsolver_sde.py +3 -3
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +28 -6
- diffusers/schedulers/scheduling_euler_ancestral_discrete.py +59 -3
- diffusers/schedulers/scheduling_euler_discrete.py +102 -16
- diffusers/schedulers/scheduling_heun_discrete.py +17 -5
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +17 -5
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +17 -5
- diffusers/schedulers/scheduling_lcm.py +123 -29
- diffusers/schedulers/scheduling_lms_discrete.py +3 -3
- diffusers/schedulers/scheduling_pndm.py +1 -3
- diffusers/schedulers/scheduling_repaint.py +1 -3
- diffusers/schedulers/scheduling_unipc_multistep.py +28 -6
- diffusers/schedulers/scheduling_utils.py +3 -1
- diffusers/schedulers/scheduling_utils_flax.py +3 -1
- diffusers/training_utils.py +1 -1
- diffusers/utils/__init__.py +1 -2
- diffusers/utils/constants.py +10 -12
- diffusers/utils/dummy_pt_objects.py +75 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +105 -0
- diffusers/utils/dynamic_modules_utils.py +18 -22
- diffusers/utils/export_utils.py +8 -3
- diffusers/utils/hub_utils.py +24 -36
- diffusers/utils/logging.py +11 -11
- diffusers/utils/outputs.py +5 -5
- diffusers/utils/peft_utils.py +88 -44
- diffusers/utils/state_dict_utils.py +8 -0
- diffusers/utils/testing_utils.py +199 -1
- diffusers/utils/torch_utils.py +4 -4
- {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/METADATA +86 -69
- diffusers-0.25.0.dist-info/RECORD +360 -0
- {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/WHEEL +1 -1
- {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/entry_points.txt +0 -1
- diffusers/loaders.py +0 -3336
- diffusers-0.23.1.dist-info/RECORD +0 -323
- /diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/modeling_roberta_series.py +0 -0
- {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/LICENSE +0 -0
- {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,454 @@
|
|
1
|
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from typing import Optional, Tuple
|
16
|
+
|
17
|
+
import torch
|
18
|
+
import torch.nn as nn
|
19
|
+
import torch.nn.functional as F
|
20
|
+
|
21
|
+
from ..utils import USE_PEFT_BACKEND
|
22
|
+
from .lora import LoRACompatibleConv
|
23
|
+
from .normalization import RMSNorm
|
24
|
+
|
25
|
+
|
26
|
+
class Upsample1D(nn.Module):
|
27
|
+
"""A 1D upsampling layer with an optional convolution.
|
28
|
+
|
29
|
+
Parameters:
|
30
|
+
channels (`int`):
|
31
|
+
number of channels in the inputs and outputs.
|
32
|
+
use_conv (`bool`, default `False`):
|
33
|
+
option to use a convolution.
|
34
|
+
use_conv_transpose (`bool`, default `False`):
|
35
|
+
option to use a convolution transpose.
|
36
|
+
out_channels (`int`, optional):
|
37
|
+
number of output channels. Defaults to `channels`.
|
38
|
+
name (`str`, default `conv`):
|
39
|
+
name of the upsampling 1D layer.
|
40
|
+
"""
|
41
|
+
|
42
|
+
def __init__(
|
43
|
+
self,
|
44
|
+
channels: int,
|
45
|
+
use_conv: bool = False,
|
46
|
+
use_conv_transpose: bool = False,
|
47
|
+
out_channels: Optional[int] = None,
|
48
|
+
name: str = "conv",
|
49
|
+
):
|
50
|
+
super().__init__()
|
51
|
+
self.channels = channels
|
52
|
+
self.out_channels = out_channels or channels
|
53
|
+
self.use_conv = use_conv
|
54
|
+
self.use_conv_transpose = use_conv_transpose
|
55
|
+
self.name = name
|
56
|
+
|
57
|
+
self.conv = None
|
58
|
+
if use_conv_transpose:
|
59
|
+
self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1)
|
60
|
+
elif use_conv:
|
61
|
+
self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1)
|
62
|
+
|
63
|
+
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
64
|
+
assert inputs.shape[1] == self.channels
|
65
|
+
if self.use_conv_transpose:
|
66
|
+
return self.conv(inputs)
|
67
|
+
|
68
|
+
outputs = F.interpolate(inputs, scale_factor=2.0, mode="nearest")
|
69
|
+
|
70
|
+
if self.use_conv:
|
71
|
+
outputs = self.conv(outputs)
|
72
|
+
|
73
|
+
return outputs
|
74
|
+
|
75
|
+
|
76
|
+
class Upsample2D(nn.Module):
|
77
|
+
"""A 2D upsampling layer with an optional convolution.
|
78
|
+
|
79
|
+
Parameters:
|
80
|
+
channels (`int`):
|
81
|
+
number of channels in the inputs and outputs.
|
82
|
+
use_conv (`bool`, default `False`):
|
83
|
+
option to use a convolution.
|
84
|
+
use_conv_transpose (`bool`, default `False`):
|
85
|
+
option to use a convolution transpose.
|
86
|
+
out_channels (`int`, optional):
|
87
|
+
number of output channels. Defaults to `channels`.
|
88
|
+
name (`str`, default `conv`):
|
89
|
+
name of the upsampling 2D layer.
|
90
|
+
"""
|
91
|
+
|
92
|
+
def __init__(
|
93
|
+
self,
|
94
|
+
channels: int,
|
95
|
+
use_conv: bool = False,
|
96
|
+
use_conv_transpose: bool = False,
|
97
|
+
out_channels: Optional[int] = None,
|
98
|
+
name: str = "conv",
|
99
|
+
kernel_size: Optional[int] = None,
|
100
|
+
padding=1,
|
101
|
+
norm_type=None,
|
102
|
+
eps=None,
|
103
|
+
elementwise_affine=None,
|
104
|
+
bias=True,
|
105
|
+
interpolate=True,
|
106
|
+
):
|
107
|
+
super().__init__()
|
108
|
+
self.channels = channels
|
109
|
+
self.out_channels = out_channels or channels
|
110
|
+
self.use_conv = use_conv
|
111
|
+
self.use_conv_transpose = use_conv_transpose
|
112
|
+
self.name = name
|
113
|
+
self.interpolate = interpolate
|
114
|
+
conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv
|
115
|
+
|
116
|
+
if norm_type == "ln_norm":
|
117
|
+
self.norm = nn.LayerNorm(channels, eps, elementwise_affine)
|
118
|
+
elif norm_type == "rms_norm":
|
119
|
+
self.norm = RMSNorm(channels, eps, elementwise_affine)
|
120
|
+
elif norm_type is None:
|
121
|
+
self.norm = None
|
122
|
+
else:
|
123
|
+
raise ValueError(f"unknown norm_type: {norm_type}")
|
124
|
+
|
125
|
+
conv = None
|
126
|
+
if use_conv_transpose:
|
127
|
+
if kernel_size is None:
|
128
|
+
kernel_size = 4
|
129
|
+
conv = nn.ConvTranspose2d(
|
130
|
+
channels, self.out_channels, kernel_size=kernel_size, stride=2, padding=padding, bias=bias
|
131
|
+
)
|
132
|
+
elif use_conv:
|
133
|
+
if kernel_size is None:
|
134
|
+
kernel_size = 3
|
135
|
+
conv = conv_cls(self.channels, self.out_channels, kernel_size=kernel_size, padding=padding, bias=bias)
|
136
|
+
|
137
|
+
# TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
|
138
|
+
if name == "conv":
|
139
|
+
self.conv = conv
|
140
|
+
else:
|
141
|
+
self.Conv2d_0 = conv
|
142
|
+
|
143
|
+
def forward(
|
144
|
+
self,
|
145
|
+
hidden_states: torch.FloatTensor,
|
146
|
+
output_size: Optional[int] = None,
|
147
|
+
scale: float = 1.0,
|
148
|
+
) -> torch.FloatTensor:
|
149
|
+
assert hidden_states.shape[1] == self.channels
|
150
|
+
|
151
|
+
if self.norm is not None:
|
152
|
+
hidden_states = self.norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
|
153
|
+
|
154
|
+
if self.use_conv_transpose:
|
155
|
+
return self.conv(hidden_states)
|
156
|
+
|
157
|
+
# Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
|
158
|
+
# TODO(Suraj): Remove this cast once the issue is fixed in PyTorch
|
159
|
+
# https://github.com/pytorch/pytorch/issues/86679
|
160
|
+
dtype = hidden_states.dtype
|
161
|
+
if dtype == torch.bfloat16:
|
162
|
+
hidden_states = hidden_states.to(torch.float32)
|
163
|
+
|
164
|
+
# upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
|
165
|
+
if hidden_states.shape[0] >= 64:
|
166
|
+
hidden_states = hidden_states.contiguous()
|
167
|
+
|
168
|
+
# if `output_size` is passed we force the interpolation output
|
169
|
+
# size and do not make use of `scale_factor=2`
|
170
|
+
if self.interpolate:
|
171
|
+
if output_size is None:
|
172
|
+
hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
|
173
|
+
else:
|
174
|
+
hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")
|
175
|
+
|
176
|
+
# If the input is bfloat16, we cast back to bfloat16
|
177
|
+
if dtype == torch.bfloat16:
|
178
|
+
hidden_states = hidden_states.to(dtype)
|
179
|
+
|
180
|
+
# TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
|
181
|
+
if self.use_conv:
|
182
|
+
if self.name == "conv":
|
183
|
+
if isinstance(self.conv, LoRACompatibleConv) and not USE_PEFT_BACKEND:
|
184
|
+
hidden_states = self.conv(hidden_states, scale)
|
185
|
+
else:
|
186
|
+
hidden_states = self.conv(hidden_states)
|
187
|
+
else:
|
188
|
+
if isinstance(self.Conv2d_0, LoRACompatibleConv) and not USE_PEFT_BACKEND:
|
189
|
+
hidden_states = self.Conv2d_0(hidden_states, scale)
|
190
|
+
else:
|
191
|
+
hidden_states = self.Conv2d_0(hidden_states)
|
192
|
+
|
193
|
+
return hidden_states
|
194
|
+
|
195
|
+
|
196
|
+
class FirUpsample2D(nn.Module):
|
197
|
+
"""A 2D FIR upsampling layer with an optional convolution.
|
198
|
+
|
199
|
+
Parameters:
|
200
|
+
channels (`int`, optional):
|
201
|
+
number of channels in the inputs and outputs.
|
202
|
+
use_conv (`bool`, default `False`):
|
203
|
+
option to use a convolution.
|
204
|
+
out_channels (`int`, optional):
|
205
|
+
number of output channels. Defaults to `channels`.
|
206
|
+
fir_kernel (`tuple`, default `(1, 3, 3, 1)`):
|
207
|
+
kernel for the FIR filter.
|
208
|
+
"""
|
209
|
+
|
210
|
+
def __init__(
|
211
|
+
self,
|
212
|
+
channels: Optional[int] = None,
|
213
|
+
out_channels: Optional[int] = None,
|
214
|
+
use_conv: bool = False,
|
215
|
+
fir_kernel: Tuple[int, int, int, int] = (1, 3, 3, 1),
|
216
|
+
):
|
217
|
+
super().__init__()
|
218
|
+
out_channels = out_channels if out_channels else channels
|
219
|
+
if use_conv:
|
220
|
+
self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1)
|
221
|
+
self.use_conv = use_conv
|
222
|
+
self.fir_kernel = fir_kernel
|
223
|
+
self.out_channels = out_channels
|
224
|
+
|
225
|
+
def _upsample_2d(
|
226
|
+
self,
|
227
|
+
hidden_states: torch.FloatTensor,
|
228
|
+
weight: Optional[torch.FloatTensor] = None,
|
229
|
+
kernel: Optional[torch.FloatTensor] = None,
|
230
|
+
factor: int = 2,
|
231
|
+
gain: float = 1,
|
232
|
+
) -> torch.FloatTensor:
|
233
|
+
"""Fused `upsample_2d()` followed by `Conv2d()`.
|
234
|
+
|
235
|
+
Padding is performed only once at the beginning, not between the operations. The fused op is considerably more
|
236
|
+
efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of
|
237
|
+
arbitrary order.
|
238
|
+
|
239
|
+
Args:
|
240
|
+
hidden_states (`torch.FloatTensor`):
|
241
|
+
Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
|
242
|
+
weight (`torch.FloatTensor`, *optional*):
|
243
|
+
Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be
|
244
|
+
performed by `inChannels = x.shape[0] // numGroups`.
|
245
|
+
kernel (`torch.FloatTensor`, *optional*):
|
246
|
+
FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which
|
247
|
+
corresponds to nearest-neighbor upsampling.
|
248
|
+
factor (`int`, *optional*): Integer upsampling factor (default: 2).
|
249
|
+
gain (`float`, *optional*): Scaling factor for signal magnitude (default: 1.0).
|
250
|
+
|
251
|
+
Returns:
|
252
|
+
output (`torch.FloatTensor`):
|
253
|
+
Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same
|
254
|
+
datatype as `hidden_states`.
|
255
|
+
"""
|
256
|
+
|
257
|
+
assert isinstance(factor, int) and factor >= 1
|
258
|
+
|
259
|
+
# Setup filter kernel.
|
260
|
+
if kernel is None:
|
261
|
+
kernel = [1] * factor
|
262
|
+
|
263
|
+
# setup kernel
|
264
|
+
kernel = torch.tensor(kernel, dtype=torch.float32)
|
265
|
+
if kernel.ndim == 1:
|
266
|
+
kernel = torch.outer(kernel, kernel)
|
267
|
+
kernel /= torch.sum(kernel)
|
268
|
+
|
269
|
+
kernel = kernel * (gain * (factor**2))
|
270
|
+
|
271
|
+
if self.use_conv:
|
272
|
+
convH = weight.shape[2]
|
273
|
+
convW = weight.shape[3]
|
274
|
+
inC = weight.shape[1]
|
275
|
+
|
276
|
+
pad_value = (kernel.shape[0] - factor) - (convW - 1)
|
277
|
+
|
278
|
+
stride = (factor, factor)
|
279
|
+
# Determine data dimensions.
|
280
|
+
output_shape = (
|
281
|
+
(hidden_states.shape[2] - 1) * factor + convH,
|
282
|
+
(hidden_states.shape[3] - 1) * factor + convW,
|
283
|
+
)
|
284
|
+
output_padding = (
|
285
|
+
output_shape[0] - (hidden_states.shape[2] - 1) * stride[0] - convH,
|
286
|
+
output_shape[1] - (hidden_states.shape[3] - 1) * stride[1] - convW,
|
287
|
+
)
|
288
|
+
assert output_padding[0] >= 0 and output_padding[1] >= 0
|
289
|
+
num_groups = hidden_states.shape[1] // inC
|
290
|
+
|
291
|
+
# Transpose weights.
|
292
|
+
weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW))
|
293
|
+
weight = torch.flip(weight, dims=[3, 4]).permute(0, 2, 1, 3, 4)
|
294
|
+
weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW))
|
295
|
+
|
296
|
+
inverse_conv = F.conv_transpose2d(
|
297
|
+
hidden_states,
|
298
|
+
weight,
|
299
|
+
stride=stride,
|
300
|
+
output_padding=output_padding,
|
301
|
+
padding=0,
|
302
|
+
)
|
303
|
+
|
304
|
+
output = upfirdn2d_native(
|
305
|
+
inverse_conv,
|
306
|
+
torch.tensor(kernel, device=inverse_conv.device),
|
307
|
+
pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2 + 1),
|
308
|
+
)
|
309
|
+
else:
|
310
|
+
pad_value = kernel.shape[0] - factor
|
311
|
+
output = upfirdn2d_native(
|
312
|
+
hidden_states,
|
313
|
+
torch.tensor(kernel, device=hidden_states.device),
|
314
|
+
up=factor,
|
315
|
+
pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2),
|
316
|
+
)
|
317
|
+
|
318
|
+
return output
|
319
|
+
|
320
|
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
321
|
+
if self.use_conv:
|
322
|
+
height = self._upsample_2d(hidden_states, self.Conv2d_0.weight, kernel=self.fir_kernel)
|
323
|
+
height = height + self.Conv2d_0.bias.reshape(1, -1, 1, 1)
|
324
|
+
else:
|
325
|
+
height = self._upsample_2d(hidden_states, kernel=self.fir_kernel, factor=2)
|
326
|
+
|
327
|
+
return height
|
328
|
+
|
329
|
+
|
330
|
+
class KUpsample2D(nn.Module):
|
331
|
+
r"""A 2D K-upsampling layer.
|
332
|
+
|
333
|
+
Parameters:
|
334
|
+
pad_mode (`str`, *optional*, default to `"reflect"`): the padding mode to use.
|
335
|
+
"""
|
336
|
+
|
337
|
+
def __init__(self, pad_mode: str = "reflect"):
|
338
|
+
super().__init__()
|
339
|
+
self.pad_mode = pad_mode
|
340
|
+
kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) * 2
|
341
|
+
self.pad = kernel_1d.shape[1] // 2 - 1
|
342
|
+
self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False)
|
343
|
+
|
344
|
+
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
345
|
+
inputs = F.pad(inputs, ((self.pad + 1) // 2,) * 4, self.pad_mode)
|
346
|
+
weight = inputs.new_zeros(
|
347
|
+
[
|
348
|
+
inputs.shape[1],
|
349
|
+
inputs.shape[1],
|
350
|
+
self.kernel.shape[0],
|
351
|
+
self.kernel.shape[1],
|
352
|
+
]
|
353
|
+
)
|
354
|
+
indices = torch.arange(inputs.shape[1], device=inputs.device)
|
355
|
+
kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1)
|
356
|
+
weight[indices, indices] = kernel
|
357
|
+
return F.conv_transpose2d(inputs, weight, stride=2, padding=self.pad * 2 + 1)
|
358
|
+
|
359
|
+
|
360
|
+
def upfirdn2d_native(
|
361
|
+
tensor: torch.Tensor,
|
362
|
+
kernel: torch.Tensor,
|
363
|
+
up: int = 1,
|
364
|
+
down: int = 1,
|
365
|
+
pad: Tuple[int, int] = (0, 0),
|
366
|
+
) -> torch.Tensor:
|
367
|
+
up_x = up_y = up
|
368
|
+
down_x = down_y = down
|
369
|
+
pad_x0 = pad_y0 = pad[0]
|
370
|
+
pad_x1 = pad_y1 = pad[1]
|
371
|
+
|
372
|
+
_, channel, in_h, in_w = tensor.shape
|
373
|
+
tensor = tensor.reshape(-1, in_h, in_w, 1)
|
374
|
+
|
375
|
+
_, in_h, in_w, minor = tensor.shape
|
376
|
+
kernel_h, kernel_w = kernel.shape
|
377
|
+
|
378
|
+
out = tensor.view(-1, in_h, 1, in_w, 1, minor)
|
379
|
+
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
|
380
|
+
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
|
381
|
+
|
382
|
+
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)])
|
383
|
+
out = out.to(tensor.device) # Move back to mps if necessary
|
384
|
+
out = out[
|
385
|
+
:,
|
386
|
+
max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
|
387
|
+
max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
|
388
|
+
:,
|
389
|
+
]
|
390
|
+
|
391
|
+
out = out.permute(0, 3, 1, 2)
|
392
|
+
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1])
|
393
|
+
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
|
394
|
+
out = F.conv2d(out, w)
|
395
|
+
out = out.reshape(
|
396
|
+
-1,
|
397
|
+
minor,
|
398
|
+
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
|
399
|
+
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
|
400
|
+
)
|
401
|
+
out = out.permute(0, 2, 3, 1)
|
402
|
+
out = out[:, ::down_y, ::down_x, :]
|
403
|
+
|
404
|
+
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
|
405
|
+
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
|
406
|
+
|
407
|
+
return out.view(-1, channel, out_h, out_w)
|
408
|
+
|
409
|
+
|
410
|
+
def upsample_2d(
|
411
|
+
hidden_states: torch.FloatTensor,
|
412
|
+
kernel: Optional[torch.FloatTensor] = None,
|
413
|
+
factor: int = 2,
|
414
|
+
gain: float = 1,
|
415
|
+
) -> torch.FloatTensor:
|
416
|
+
r"""Upsample2D a batch of 2D images with the given filter.
|
417
|
+
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given
|
418
|
+
filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified
|
419
|
+
`gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is
|
420
|
+
a: multiple of the upsampling factor.
|
421
|
+
|
422
|
+
Args:
|
423
|
+
hidden_states (`torch.FloatTensor`):
|
424
|
+
Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
|
425
|
+
kernel (`torch.FloatTensor`, *optional*):
|
426
|
+
FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which
|
427
|
+
corresponds to nearest-neighbor upsampling.
|
428
|
+
factor (`int`, *optional*, default to `2`):
|
429
|
+
Integer upsampling factor.
|
430
|
+
gain (`float`, *optional*, default to `1.0`):
|
431
|
+
Scaling factor for signal magnitude (default: 1.0).
|
432
|
+
|
433
|
+
Returns:
|
434
|
+
output (`torch.FloatTensor`):
|
435
|
+
Tensor of the shape `[N, C, H * factor, W * factor]`
|
436
|
+
"""
|
437
|
+
assert isinstance(factor, int) and factor >= 1
|
438
|
+
if kernel is None:
|
439
|
+
kernel = [1] * factor
|
440
|
+
|
441
|
+
kernel = torch.tensor(kernel, dtype=torch.float32)
|
442
|
+
if kernel.ndim == 1:
|
443
|
+
kernel = torch.outer(kernel, kernel)
|
444
|
+
kernel /= torch.sum(kernel)
|
445
|
+
|
446
|
+
kernel = kernel * (gain * (factor**2))
|
447
|
+
pad_value = kernel.shape[0] - factor
|
448
|
+
output = upfirdn2d_native(
|
449
|
+
hidden_states,
|
450
|
+
kernel.to(device=hidden_states.device),
|
451
|
+
up=factor,
|
452
|
+
pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2),
|
453
|
+
)
|
454
|
+
return output
|