diffusers 0.29.2__py3-none-any.whl → 0.30.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (220) hide show
  1. diffusers/__init__.py +94 -3
  2. diffusers/commands/env.py +1 -5
  3. diffusers/configuration_utils.py +4 -9
  4. diffusers/dependency_versions_table.py +2 -2
  5. diffusers/image_processor.py +1 -2
  6. diffusers/loaders/__init__.py +17 -2
  7. diffusers/loaders/ip_adapter.py +10 -7
  8. diffusers/loaders/lora_base.py +752 -0
  9. diffusers/loaders/lora_pipeline.py +2222 -0
  10. diffusers/loaders/peft.py +213 -5
  11. diffusers/loaders/single_file.py +1 -12
  12. diffusers/loaders/single_file_model.py +31 -10
  13. diffusers/loaders/single_file_utils.py +262 -2
  14. diffusers/loaders/textual_inversion.py +1 -6
  15. diffusers/loaders/unet.py +23 -208
  16. diffusers/models/__init__.py +20 -0
  17. diffusers/models/activations.py +22 -0
  18. diffusers/models/attention.py +386 -7
  19. diffusers/models/attention_processor.py +1795 -629
  20. diffusers/models/autoencoders/__init__.py +2 -0
  21. diffusers/models/autoencoders/autoencoder_kl.py +14 -3
  22. diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +1035 -0
  23. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +1 -1
  24. diffusers/models/autoencoders/autoencoder_oobleck.py +464 -0
  25. diffusers/models/autoencoders/autoencoder_tiny.py +1 -0
  26. diffusers/models/autoencoders/consistency_decoder_vae.py +1 -1
  27. diffusers/models/autoencoders/vq_model.py +4 -4
  28. diffusers/models/controlnet.py +2 -3
  29. diffusers/models/controlnet_hunyuan.py +401 -0
  30. diffusers/models/controlnet_sd3.py +11 -11
  31. diffusers/models/controlnet_sparsectrl.py +789 -0
  32. diffusers/models/controlnet_xs.py +40 -10
  33. diffusers/models/downsampling.py +68 -0
  34. diffusers/models/embeddings.py +319 -36
  35. diffusers/models/model_loading_utils.py +1 -3
  36. diffusers/models/modeling_flax_utils.py +1 -6
  37. diffusers/models/modeling_utils.py +4 -16
  38. diffusers/models/normalization.py +203 -12
  39. diffusers/models/transformers/__init__.py +6 -0
  40. diffusers/models/transformers/auraflow_transformer_2d.py +527 -0
  41. diffusers/models/transformers/cogvideox_transformer_3d.py +345 -0
  42. diffusers/models/transformers/hunyuan_transformer_2d.py +19 -15
  43. diffusers/models/transformers/latte_transformer_3d.py +327 -0
  44. diffusers/models/transformers/lumina_nextdit2d.py +340 -0
  45. diffusers/models/transformers/pixart_transformer_2d.py +102 -1
  46. diffusers/models/transformers/prior_transformer.py +1 -1
  47. diffusers/models/transformers/stable_audio_transformer.py +458 -0
  48. diffusers/models/transformers/transformer_flux.py +455 -0
  49. diffusers/models/transformers/transformer_sd3.py +18 -4
  50. diffusers/models/unets/unet_1d_blocks.py +1 -1
  51. diffusers/models/unets/unet_2d_condition.py +8 -1
  52. diffusers/models/unets/unet_3d_blocks.py +51 -920
  53. diffusers/models/unets/unet_3d_condition.py +4 -1
  54. diffusers/models/unets/unet_i2vgen_xl.py +4 -1
  55. diffusers/models/unets/unet_kandinsky3.py +1 -1
  56. diffusers/models/unets/unet_motion_model.py +1330 -84
  57. diffusers/models/unets/unet_spatio_temporal_condition.py +1 -1
  58. diffusers/models/unets/unet_stable_cascade.py +1 -3
  59. diffusers/models/unets/uvit_2d.py +1 -1
  60. diffusers/models/upsampling.py +64 -0
  61. diffusers/models/vq_model.py +8 -4
  62. diffusers/optimization.py +1 -1
  63. diffusers/pipelines/__init__.py +100 -3
  64. diffusers/pipelines/animatediff/__init__.py +4 -0
  65. diffusers/pipelines/animatediff/pipeline_animatediff.py +50 -40
  66. diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +1076 -0
  67. diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +17 -27
  68. diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1008 -0
  69. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +51 -38
  70. diffusers/pipelines/audioldm2/modeling_audioldm2.py +1 -1
  71. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +1 -0
  72. diffusers/pipelines/aura_flow/__init__.py +48 -0
  73. diffusers/pipelines/aura_flow/pipeline_aura_flow.py +591 -0
  74. diffusers/pipelines/auto_pipeline.py +97 -19
  75. diffusers/pipelines/cogvideo/__init__.py +48 -0
  76. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +687 -0
  77. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +1 -1
  78. diffusers/pipelines/controlnet/pipeline_controlnet.py +24 -30
  79. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +31 -30
  80. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +24 -153
  81. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +19 -28
  82. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +18 -28
  83. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +29 -32
  84. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
  85. diffusers/pipelines/controlnet_hunyuandit/__init__.py +48 -0
  86. diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +1042 -0
  87. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +35 -0
  88. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +10 -6
  89. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +0 -4
  90. diffusers/pipelines/deepfloyd_if/pipeline_if.py +2 -2
  91. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +2 -2
  92. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +2 -2
  93. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +2 -2
  94. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +2 -2
  95. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +2 -2
  96. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +11 -6
  97. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +11 -6
  98. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +6 -6
  99. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +6 -6
  100. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +10 -10
  101. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +10 -6
  102. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +3 -3
  103. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +1 -1
  104. diffusers/pipelines/flux/__init__.py +47 -0
  105. diffusers/pipelines/flux/pipeline_flux.py +749 -0
  106. diffusers/pipelines/flux/pipeline_output.py +21 -0
  107. diffusers/pipelines/free_init_utils.py +2 -0
  108. diffusers/pipelines/free_noise_utils.py +236 -0
  109. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +2 -2
  110. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +2 -2
  111. diffusers/pipelines/kolors/__init__.py +54 -0
  112. diffusers/pipelines/kolors/pipeline_kolors.py +1070 -0
  113. diffusers/pipelines/kolors/pipeline_kolors_img2img.py +1247 -0
  114. diffusers/pipelines/kolors/pipeline_output.py +21 -0
  115. diffusers/pipelines/kolors/text_encoder.py +889 -0
  116. diffusers/pipelines/kolors/tokenizer.py +334 -0
  117. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +30 -29
  118. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +23 -29
  119. diffusers/pipelines/latte/__init__.py +48 -0
  120. diffusers/pipelines/latte/pipeline_latte.py +881 -0
  121. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +4 -4
  122. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +0 -4
  123. diffusers/pipelines/lumina/__init__.py +48 -0
  124. diffusers/pipelines/lumina/pipeline_lumina.py +897 -0
  125. diffusers/pipelines/pag/__init__.py +67 -0
  126. diffusers/pipelines/pag/pag_utils.py +237 -0
  127. diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1329 -0
  128. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1612 -0
  129. diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +953 -0
  130. diffusers/pipelines/pag/pipeline_pag_kolors.py +1136 -0
  131. diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +872 -0
  132. diffusers/pipelines/pag/pipeline_pag_sd.py +1050 -0
  133. diffusers/pipelines/pag/pipeline_pag_sd_3.py +985 -0
  134. diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +862 -0
  135. diffusers/pipelines/pag/pipeline_pag_sd_xl.py +1333 -0
  136. diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +1529 -0
  137. diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +1753 -0
  138. diffusers/pipelines/pia/pipeline_pia.py +30 -37
  139. diffusers/pipelines/pipeline_flax_utils.py +4 -9
  140. diffusers/pipelines/pipeline_loading_utils.py +0 -3
  141. diffusers/pipelines/pipeline_utils.py +2 -14
  142. diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +0 -1
  143. diffusers/pipelines/stable_audio/__init__.py +50 -0
  144. diffusers/pipelines/stable_audio/modeling_stable_audio.py +158 -0
  145. diffusers/pipelines/stable_audio/pipeline_stable_audio.py +745 -0
  146. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +2 -0
  147. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
  148. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +23 -29
  149. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +15 -8
  150. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +30 -29
  151. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +23 -152
  152. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +8 -4
  153. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +11 -11
  154. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +8 -6
  155. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +6 -6
  156. diffusers/pipelines/stable_diffusion_3/__init__.py +2 -0
  157. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +34 -3
  158. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +33 -7
  159. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +1201 -0
  160. diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +3 -3
  161. diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +6 -6
  162. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +5 -5
  163. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +5 -5
  164. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +6 -6
  165. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +0 -4
  166. diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +23 -29
  167. diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +27 -29
  168. diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +3 -3
  169. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +17 -27
  170. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +26 -29
  171. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +17 -145
  172. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +0 -4
  173. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +6 -6
  174. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +18 -28
  175. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +8 -6
  176. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +8 -6
  177. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +6 -4
  178. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +0 -4
  179. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +3 -3
  180. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +1 -1
  181. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +5 -4
  182. diffusers/schedulers/__init__.py +8 -0
  183. diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +572 -0
  184. diffusers/schedulers/scheduling_ddim.py +1 -1
  185. diffusers/schedulers/scheduling_ddim_cogvideox.py +449 -0
  186. diffusers/schedulers/scheduling_ddpm.py +1 -1
  187. diffusers/schedulers/scheduling_ddpm_parallel.py +1 -1
  188. diffusers/schedulers/scheduling_deis_multistep.py +2 -2
  189. diffusers/schedulers/scheduling_dpm_cogvideox.py +489 -0
  190. diffusers/schedulers/scheduling_dpmsolver_multistep.py +1 -1
  191. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +1 -1
  192. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +64 -19
  193. diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +2 -2
  194. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +63 -39
  195. diffusers/schedulers/scheduling_flow_match_heun_discrete.py +321 -0
  196. diffusers/schedulers/scheduling_ipndm.py +1 -1
  197. diffusers/schedulers/scheduling_unipc_multistep.py +1 -1
  198. diffusers/schedulers/scheduling_utils.py +1 -3
  199. diffusers/schedulers/scheduling_utils_flax.py +1 -3
  200. diffusers/training_utils.py +99 -14
  201. diffusers/utils/__init__.py +2 -2
  202. diffusers/utils/dummy_pt_objects.py +210 -0
  203. diffusers/utils/dummy_torch_and_torchsde_objects.py +15 -0
  204. diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py +47 -0
  205. diffusers/utils/dummy_torch_and_transformers_objects.py +315 -0
  206. diffusers/utils/dynamic_modules_utils.py +1 -11
  207. diffusers/utils/export_utils.py +1 -4
  208. diffusers/utils/hub_utils.py +45 -42
  209. diffusers/utils/import_utils.py +19 -16
  210. diffusers/utils/loading_utils.py +76 -3
  211. diffusers/utils/testing_utils.py +11 -8
  212. {diffusers-0.29.2.dist-info → diffusers-0.30.0.dist-info}/METADATA +73 -83
  213. {diffusers-0.29.2.dist-info → diffusers-0.30.0.dist-info}/RECORD +217 -164
  214. {diffusers-0.29.2.dist-info → diffusers-0.30.0.dist-info}/WHEEL +1 -1
  215. diffusers/loaders/autoencoder.py +0 -146
  216. diffusers/loaders/controlnet.py +0 -136
  217. diffusers/loaders/lora.py +0 -1728
  218. {diffusers-0.29.2.dist-info → diffusers-0.30.0.dist-info}/LICENSE +0 -0
  219. {diffusers-0.29.2.dist-info → diffusers-0.30.0.dist-info}/entry_points.txt +0 -0
  220. {diffusers-0.29.2.dist-info → diffusers-0.30.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1035 @@
1
+ # Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+
23
+ from ...configuration_utils import ConfigMixin, register_to_config
24
+ from ...loaders.single_file_model import FromOriginalModelMixin
25
+ from ...utils import logging
26
+ from ...utils.accelerate_utils import apply_forward_hook
27
+ from ..activations import get_activation
28
+ from ..downsampling import CogVideoXDownsample3D
29
+ from ..modeling_outputs import AutoencoderKLOutput
30
+ from ..modeling_utils import ModelMixin
31
+ from ..upsampling import CogVideoXUpsample3D
32
+ from .vae import DecoderOutput, DiagonalGaussianDistribution
33
+
34
+
35
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
36
+
37
+
38
+ class CogVideoXSafeConv3d(nn.Conv3d):
39
+ """
40
+ A 3D convolution layer that splits the input tensor into smaller parts to avoid OOM in CogVideoX Model.
41
+ """
42
+
43
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
44
+ memory_count = torch.prod(torch.tensor(input.shape)).item() * 2 / 1024**3
45
+
46
+ # Set to 2GB, suitable for CuDNN
47
+ if memory_count > 2:
48
+ kernel_size = self.kernel_size[0]
49
+ part_num = int(memory_count / 2) + 1
50
+ input_chunks = torch.chunk(input, part_num, dim=2)
51
+
52
+ if kernel_size > 1:
53
+ input_chunks = [input_chunks[0]] + [
54
+ torch.cat((input_chunks[i - 1][:, :, -kernel_size + 1 :], input_chunks[i]), dim=2)
55
+ for i in range(1, len(input_chunks))
56
+ ]
57
+
58
+ output_chunks = []
59
+ for input_chunk in input_chunks:
60
+ output_chunks.append(super().forward(input_chunk))
61
+ output = torch.cat(output_chunks, dim=2)
62
+ return output
63
+ else:
64
+ return super().forward(input)
65
+
66
+
67
+ class CogVideoXCausalConv3d(nn.Module):
68
+ r"""A 3D causal convolution layer that pads the input tensor to ensure causality in CogVideoX Model.
69
+
70
+ Args:
71
+ in_channels (int): Number of channels in the input tensor.
72
+ out_channels (int): Number of output channels.
73
+ kernel_size (Union[int, Tuple[int, int, int]]): Size of the convolutional kernel.
74
+ stride (int, optional): Stride of the convolution. Default is 1.
75
+ dilation (int, optional): Dilation rate of the convolution. Default is 1.
76
+ pad_mode (str, optional): Padding mode. Default is "constant".
77
+ """
78
+
79
+ def __init__(
80
+ self,
81
+ in_channels: int,
82
+ out_channels: int,
83
+ kernel_size: Union[int, Tuple[int, int, int]],
84
+ stride: int = 1,
85
+ dilation: int = 1,
86
+ pad_mode: str = "constant",
87
+ ):
88
+ super().__init__()
89
+
90
+ if isinstance(kernel_size, int):
91
+ kernel_size = (kernel_size,) * 3
92
+
93
+ time_kernel_size, height_kernel_size, width_kernel_size = kernel_size
94
+
95
+ self.pad_mode = pad_mode
96
+ time_pad = dilation * (time_kernel_size - 1) + (1 - stride)
97
+ height_pad = height_kernel_size // 2
98
+ width_pad = width_kernel_size // 2
99
+
100
+ self.height_pad = height_pad
101
+ self.width_pad = width_pad
102
+ self.time_pad = time_pad
103
+ self.time_causal_padding = (width_pad, width_pad, height_pad, height_pad, time_pad, 0)
104
+
105
+ self.temporal_dim = 2
106
+ self.time_kernel_size = time_kernel_size
107
+
108
+ stride = (stride, 1, 1)
109
+ dilation = (dilation, 1, 1)
110
+ self.conv = CogVideoXSafeConv3d(
111
+ in_channels=in_channels,
112
+ out_channels=out_channels,
113
+ kernel_size=kernel_size,
114
+ stride=stride,
115
+ dilation=dilation,
116
+ )
117
+
118
+ self.conv_cache = None
119
+
120
+ def fake_context_parallel_forward(self, inputs: torch.Tensor) -> torch.Tensor:
121
+ dim = self.temporal_dim
122
+ kernel_size = self.time_kernel_size
123
+ if kernel_size == 1:
124
+ return inputs
125
+
126
+ inputs = inputs.transpose(0, dim)
127
+
128
+ if self.conv_cache is not None:
129
+ inputs = torch.cat([self.conv_cache.transpose(0, dim).to(inputs.device), inputs], dim=0)
130
+ else:
131
+ inputs = torch.cat([inputs[:1]] * (kernel_size - 1) + [inputs], dim=0)
132
+
133
+ inputs = inputs.transpose(0, dim).contiguous()
134
+ return inputs
135
+
136
+ def _clear_fake_context_parallel_cache(self):
137
+ del self.conv_cache
138
+ self.conv_cache = None
139
+
140
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
141
+ input_parallel = self.fake_context_parallel_forward(inputs)
142
+
143
+ self._clear_fake_context_parallel_cache()
144
+ self.conv_cache = input_parallel[:, :, -self.time_kernel_size + 1 :].contiguous().detach().clone().cpu()
145
+
146
+ padding_2d = (self.width_pad, self.width_pad, self.height_pad, self.height_pad)
147
+ input_parallel = F.pad(input_parallel, padding_2d, mode="constant", value=0)
148
+
149
+ output_parallel = self.conv(input_parallel)
150
+ output = output_parallel
151
+ return output
152
+
153
+
154
+ class CogVideoXSpatialNorm3D(nn.Module):
155
+ r"""
156
+ Spatially conditioned normalization as defined in https://arxiv.org/abs/2209.09002. This implementation is specific
157
+ to 3D-video like data.
158
+
159
+ CogVideoXSafeConv3d is used instead of nn.Conv3d to avoid OOM in CogVideoX Model.
160
+
161
+ Args:
162
+ f_channels (`int`):
163
+ The number of channels for input to group normalization layer, and output of the spatial norm layer.
164
+ zq_channels (`int`):
165
+ The number of channels for the quantized vector as described in the paper.
166
+ """
167
+
168
+ def __init__(
169
+ self,
170
+ f_channels: int,
171
+ zq_channels: int,
172
+ groups: int = 32,
173
+ ):
174
+ super().__init__()
175
+ self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=groups, eps=1e-6, affine=True)
176
+ self.conv_y = CogVideoXCausalConv3d(zq_channels, f_channels, kernel_size=1, stride=1)
177
+ self.conv_b = CogVideoXCausalConv3d(zq_channels, f_channels, kernel_size=1, stride=1)
178
+
179
+ def forward(self, f: torch.Tensor, zq: torch.Tensor) -> torch.Tensor:
180
+ if f.shape[2] > 1 and f.shape[2] % 2 == 1:
181
+ f_first, f_rest = f[:, :, :1], f[:, :, 1:]
182
+ f_first_size, f_rest_size = f_first.shape[-3:], f_rest.shape[-3:]
183
+ z_first, z_rest = zq[:, :, :1], zq[:, :, 1:]
184
+ z_first = F.interpolate(z_first, size=f_first_size)
185
+ z_rest = F.interpolate(z_rest, size=f_rest_size)
186
+ zq = torch.cat([z_first, z_rest], dim=2)
187
+ else:
188
+ zq = F.interpolate(zq, size=f.shape[-3:])
189
+
190
+ norm_f = self.norm_layer(f)
191
+ new_f = norm_f * self.conv_y(zq) + self.conv_b(zq)
192
+ return new_f
193
+
194
+
195
+ class CogVideoXResnetBlock3D(nn.Module):
196
+ r"""
197
+ A 3D ResNet block used in the CogVideoX model.
198
+
199
+ Args:
200
+ in_channels (int): Number of input channels.
201
+ out_channels (Optional[int], optional):
202
+ Number of output channels. If None, defaults to `in_channels`. Default is None.
203
+ dropout (float, optional): Dropout rate. Default is 0.0.
204
+ temb_channels (int, optional): Number of time embedding channels. Default is 512.
205
+ groups (int, optional): Number of groups for group normalization. Default is 32.
206
+ eps (float, optional): Epsilon value for normalization layers. Default is 1e-6.
207
+ non_linearity (str, optional): Activation function to use. Default is "swish".
208
+ conv_shortcut (bool, optional): If True, use a convolutional shortcut. Default is False.
209
+ spatial_norm_dim (Optional[int], optional): Dimension of the spatial normalization. Default is None.
210
+ pad_mode (str, optional): Padding mode. Default is "first".
211
+ """
212
+
213
+ def __init__(
214
+ self,
215
+ in_channels: int,
216
+ out_channels: Optional[int] = None,
217
+ dropout: float = 0.0,
218
+ temb_channels: int = 512,
219
+ groups: int = 32,
220
+ eps: float = 1e-6,
221
+ non_linearity: str = "swish",
222
+ conv_shortcut: bool = False,
223
+ spatial_norm_dim: Optional[int] = None,
224
+ pad_mode: str = "first",
225
+ ):
226
+ super().__init__()
227
+
228
+ out_channels = out_channels or in_channels
229
+
230
+ self.in_channels = in_channels
231
+ self.out_channels = out_channels
232
+ self.nonlinearity = get_activation(non_linearity)
233
+ self.use_conv_shortcut = conv_shortcut
234
+
235
+ if spatial_norm_dim is None:
236
+ self.norm1 = nn.GroupNorm(num_channels=in_channels, num_groups=groups, eps=eps)
237
+ self.norm2 = nn.GroupNorm(num_channels=out_channels, num_groups=groups, eps=eps)
238
+ else:
239
+ self.norm1 = CogVideoXSpatialNorm3D(
240
+ f_channels=in_channels,
241
+ zq_channels=spatial_norm_dim,
242
+ groups=groups,
243
+ )
244
+ self.norm2 = CogVideoXSpatialNorm3D(
245
+ f_channels=out_channels,
246
+ zq_channels=spatial_norm_dim,
247
+ groups=groups,
248
+ )
249
+
250
+ self.conv1 = CogVideoXCausalConv3d(
251
+ in_channels=in_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode
252
+ )
253
+
254
+ if temb_channels > 0:
255
+ self.temb_proj = nn.Linear(in_features=temb_channels, out_features=out_channels)
256
+
257
+ self.dropout = nn.Dropout(dropout)
258
+ self.conv2 = CogVideoXCausalConv3d(
259
+ in_channels=out_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode
260
+ )
261
+
262
+ if self.in_channels != self.out_channels:
263
+ if self.use_conv_shortcut:
264
+ self.conv_shortcut = CogVideoXCausalConv3d(
265
+ in_channels=in_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode
266
+ )
267
+ else:
268
+ self.conv_shortcut = CogVideoXSafeConv3d(
269
+ in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0
270
+ )
271
+
272
+ def forward(
273
+ self,
274
+ inputs: torch.Tensor,
275
+ temb: Optional[torch.Tensor] = None,
276
+ zq: Optional[torch.Tensor] = None,
277
+ ) -> torch.Tensor:
278
+ hidden_states = inputs
279
+
280
+ if zq is not None:
281
+ hidden_states = self.norm1(hidden_states, zq)
282
+ else:
283
+ hidden_states = self.norm1(hidden_states)
284
+
285
+ hidden_states = self.nonlinearity(hidden_states)
286
+ hidden_states = self.conv1(hidden_states)
287
+
288
+ if temb is not None:
289
+ hidden_states = hidden_states + self.temb_proj(self.nonlinearity(temb))[:, :, None, None, None]
290
+
291
+ if zq is not None:
292
+ hidden_states = self.norm2(hidden_states, zq)
293
+ else:
294
+ hidden_states = self.norm2(hidden_states)
295
+
296
+ hidden_states = self.nonlinearity(hidden_states)
297
+ hidden_states = self.dropout(hidden_states)
298
+ hidden_states = self.conv2(hidden_states)
299
+
300
+ if self.in_channels != self.out_channels:
301
+ inputs = self.conv_shortcut(inputs)
302
+
303
+ hidden_states = hidden_states + inputs
304
+ return hidden_states
305
+
306
+
307
+ class CogVideoXDownBlock3D(nn.Module):
308
+ r"""
309
+ A downsampling block used in the CogVideoX model.
310
+
311
+ Args:
312
+ in_channels (int): Number of input channels.
313
+ out_channels (int): Number of output channels.
314
+ temb_channels (int): Number of time embedding channels.
315
+ dropout (float, optional): Dropout rate. Default is 0.0.
316
+ num_layers (int, optional): Number of layers in the block. Default is 1.
317
+ resnet_eps (float, optional): Epsilon value for the ResNet layers. Default is 1e-6.
318
+ resnet_act_fn (str, optional): Activation function for the ResNet layers. Default is "swish".
319
+ resnet_groups (int, optional): Number of groups for group normalization in the ResNet layers. Default is 32.
320
+ add_downsample (bool, optional): If True, add a downsampling layer at the end of the block. Default is True.
321
+ downsample_padding (int, optional): Padding for the downsampling layer. Default is 0.
322
+ compress_time (bool, optional): If True, apply temporal compression. Default is False.
323
+ pad_mode (str, optional): Padding mode. Default is "first".
324
+ """
325
+
326
+ _supports_gradient_checkpointing = True
327
+
328
+ def __init__(
329
+ self,
330
+ in_channels: int,
331
+ out_channels: int,
332
+ temb_channels: int,
333
+ dropout: float = 0.0,
334
+ num_layers: int = 1,
335
+ resnet_eps: float = 1e-6,
336
+ resnet_act_fn: str = "swish",
337
+ resnet_groups: int = 32,
338
+ add_downsample: bool = True,
339
+ downsample_padding: int = 0,
340
+ compress_time: bool = False,
341
+ pad_mode: str = "first",
342
+ ):
343
+ super().__init__()
344
+
345
+ resnets = []
346
+ for i in range(num_layers):
347
+ in_channel = in_channels if i == 0 else out_channels
348
+ resnets.append(
349
+ CogVideoXResnetBlock3D(
350
+ in_channels=in_channel,
351
+ out_channels=out_channels,
352
+ dropout=dropout,
353
+ temb_channels=temb_channels,
354
+ groups=resnet_groups,
355
+ eps=resnet_eps,
356
+ non_linearity=resnet_act_fn,
357
+ pad_mode=pad_mode,
358
+ )
359
+ )
360
+
361
+ self.resnets = nn.ModuleList(resnets)
362
+ self.downsamplers = None
363
+
364
+ if add_downsample:
365
+ self.downsamplers = nn.ModuleList(
366
+ [
367
+ CogVideoXDownsample3D(
368
+ out_channels, out_channels, padding=downsample_padding, compress_time=compress_time
369
+ )
370
+ ]
371
+ )
372
+
373
+ self.gradient_checkpointing = False
374
+
375
+ def forward(
376
+ self,
377
+ hidden_states: torch.Tensor,
378
+ temb: Optional[torch.Tensor] = None,
379
+ zq: Optional[torch.Tensor] = None,
380
+ ) -> torch.Tensor:
381
+ for resnet in self.resnets:
382
+ if self.training and self.gradient_checkpointing:
383
+
384
+ def create_custom_forward(module):
385
+ def create_forward(*inputs):
386
+ return module(*inputs)
387
+
388
+ return create_forward
389
+
390
+ hidden_states = torch.utils.checkpoint.checkpoint(
391
+ create_custom_forward(resnet), hidden_states, temb, zq
392
+ )
393
+ else:
394
+ hidden_states = resnet(hidden_states, temb, zq)
395
+
396
+ if self.downsamplers is not None:
397
+ for downsampler in self.downsamplers:
398
+ hidden_states = downsampler(hidden_states)
399
+
400
+ return hidden_states
401
+
402
+
403
+ class CogVideoXMidBlock3D(nn.Module):
404
+ r"""
405
+ A middle block used in the CogVideoX model.
406
+
407
+ Args:
408
+ in_channels (int): Number of input channels.
409
+ temb_channels (int): Number of time embedding channels.
410
+ dropout (float, optional): Dropout rate. Default is 0.0.
411
+ num_layers (int, optional): Number of layers in the block. Default is 1.
412
+ resnet_eps (float, optional): Epsilon value for the ResNet layers. Default is 1e-6.
413
+ resnet_act_fn (str, optional): Activation function for the ResNet layers. Default is "swish".
414
+ resnet_groups (int, optional): Number of groups for group normalization in the ResNet layers. Default is 32.
415
+ spatial_norm_dim (Optional[int], optional): Dimension of the spatial normalization. Default is None.
416
+ pad_mode (str, optional): Padding mode. Default is "first".
417
+ """
418
+
419
+ _supports_gradient_checkpointing = True
420
+
421
+ def __init__(
422
+ self,
423
+ in_channels: int,
424
+ temb_channels: int,
425
+ dropout: float = 0.0,
426
+ num_layers: int = 1,
427
+ resnet_eps: float = 1e-6,
428
+ resnet_act_fn: str = "swish",
429
+ resnet_groups: int = 32,
430
+ spatial_norm_dim: Optional[int] = None,
431
+ pad_mode: str = "first",
432
+ ):
433
+ super().__init__()
434
+
435
+ resnets = []
436
+ for _ in range(num_layers):
437
+ resnets.append(
438
+ CogVideoXResnetBlock3D(
439
+ in_channels=in_channels,
440
+ out_channels=in_channels,
441
+ dropout=dropout,
442
+ temb_channels=temb_channels,
443
+ groups=resnet_groups,
444
+ eps=resnet_eps,
445
+ spatial_norm_dim=spatial_norm_dim,
446
+ non_linearity=resnet_act_fn,
447
+ pad_mode=pad_mode,
448
+ )
449
+ )
450
+ self.resnets = nn.ModuleList(resnets)
451
+
452
+ self.gradient_checkpointing = False
453
+
454
+ def forward(
455
+ self,
456
+ hidden_states: torch.Tensor,
457
+ temb: Optional[torch.Tensor] = None,
458
+ zq: Optional[torch.Tensor] = None,
459
+ ) -> torch.Tensor:
460
+ for resnet in self.resnets:
461
+ if self.training and self.gradient_checkpointing:
462
+
463
+ def create_custom_forward(module):
464
+ def create_forward(*inputs):
465
+ return module(*inputs)
466
+
467
+ return create_forward
468
+
469
+ hidden_states = torch.utils.checkpoint.checkpoint(
470
+ create_custom_forward(resnet), hidden_states, temb, zq
471
+ )
472
+ else:
473
+ hidden_states = resnet(hidden_states, temb, zq)
474
+
475
+ return hidden_states
476
+
477
+
478
+ class CogVideoXUpBlock3D(nn.Module):
479
+ r"""
480
+ An upsampling block used in the CogVideoX model.
481
+
482
+ Args:
483
+ in_channels (int): Number of input channels.
484
+ out_channels (int): Number of output channels.
485
+ temb_channels (int): Number of time embedding channels.
486
+ dropout (float, optional): Dropout rate. Default is 0.0.
487
+ num_layers (int, optional): Number of layers in the block. Default is 1.
488
+ resnet_eps (float, optional): Epsilon value for the ResNet layers. Default is 1e-6.
489
+ resnet_act_fn (str, optional): Activation function for the ResNet layers. Default is "swish".
490
+ resnet_groups (int, optional): Number of groups for group normalization in the ResNet layers. Default is 32.
491
+ spatial_norm_dim (int, optional): Dimension of the spatial normalization. Default is 16.
492
+ add_upsample (bool, optional): If True, add an upsampling layer at the end of the block. Default is True.
493
+ upsample_padding (int, optional): Padding for the upsampling layer. Default is 1.
494
+ compress_time (bool, optional): If True, apply temporal compression. Default is False.
495
+ pad_mode (str, optional): Padding mode. Default is "first".
496
+ """
497
+
498
+ def __init__(
499
+ self,
500
+ in_channels: int,
501
+ out_channels: int,
502
+ temb_channels: int,
503
+ dropout: float = 0.0,
504
+ num_layers: int = 1,
505
+ resnet_eps: float = 1e-6,
506
+ resnet_act_fn: str = "swish",
507
+ resnet_groups: int = 32,
508
+ spatial_norm_dim: int = 16,
509
+ add_upsample: bool = True,
510
+ upsample_padding: int = 1,
511
+ compress_time: bool = False,
512
+ pad_mode: str = "first",
513
+ ):
514
+ super().__init__()
515
+
516
+ resnets = []
517
+ for i in range(num_layers):
518
+ in_channel = in_channels if i == 0 else out_channels
519
+ resnets.append(
520
+ CogVideoXResnetBlock3D(
521
+ in_channels=in_channel,
522
+ out_channels=out_channels,
523
+ dropout=dropout,
524
+ temb_channels=temb_channels,
525
+ groups=resnet_groups,
526
+ eps=resnet_eps,
527
+ non_linearity=resnet_act_fn,
528
+ spatial_norm_dim=spatial_norm_dim,
529
+ pad_mode=pad_mode,
530
+ )
531
+ )
532
+
533
+ self.resnets = nn.ModuleList(resnets)
534
+ self.upsamplers = None
535
+
536
+ if add_upsample:
537
+ self.upsamplers = nn.ModuleList(
538
+ [
539
+ CogVideoXUpsample3D(
540
+ out_channels, out_channels, padding=upsample_padding, compress_time=compress_time
541
+ )
542
+ ]
543
+ )
544
+
545
+ self.gradient_checkpointing = False
546
+
547
+ def forward(
548
+ self,
549
+ hidden_states: torch.Tensor,
550
+ temb: Optional[torch.Tensor] = None,
551
+ zq: Optional[torch.Tensor] = None,
552
+ ) -> torch.Tensor:
553
+ r"""Forward method of the `CogVideoXUpBlock3D` class."""
554
+ for resnet in self.resnets:
555
+ if self.training and self.gradient_checkpointing:
556
+
557
+ def create_custom_forward(module):
558
+ def create_forward(*inputs):
559
+ return module(*inputs)
560
+
561
+ return create_forward
562
+
563
+ hidden_states = torch.utils.checkpoint.checkpoint(
564
+ create_custom_forward(resnet), hidden_states, temb, zq
565
+ )
566
+ else:
567
+ hidden_states = resnet(hidden_states, temb, zq)
568
+
569
+ if self.upsamplers is not None:
570
+ for upsampler in self.upsamplers:
571
+ hidden_states = upsampler(hidden_states)
572
+
573
+ return hidden_states
574
+
575
+
576
+ class CogVideoXEncoder3D(nn.Module):
577
+ r"""
578
+ The `CogVideoXEncoder3D` layer of a variational autoencoder that encodes its input into a latent representation.
579
+
580
+ Args:
581
+ in_channels (`int`, *optional*, defaults to 3):
582
+ The number of input channels.
583
+ out_channels (`int`, *optional*, defaults to 3):
584
+ The number of output channels.
585
+ down_block_types (`Tuple[str, ...]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
586
+ The types of down blocks to use. See `~diffusers.models.unet_2d_blocks.get_down_block` for available
587
+ options.
588
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
589
+ The number of output channels for each block.
590
+ layers_per_block (`int`, *optional*, defaults to 2):
591
+ The number of layers per block.
592
+ norm_num_groups (`int`, *optional*, defaults to 32):
593
+ The number of groups for normalization.
594
+ act_fn (`str`, *optional*, defaults to `"silu"`):
595
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
596
+ double_z (`bool`, *optional*, defaults to `True`):
597
+ Whether to double the number of output channels for the last block.
598
+ """
599
+
600
+ _supports_gradient_checkpointing = True
601
+
602
+ def __init__(
603
+ self,
604
+ in_channels: int = 3,
605
+ out_channels: int = 16,
606
+ down_block_types: Tuple[str, ...] = (
607
+ "CogVideoXDownBlock3D",
608
+ "CogVideoXDownBlock3D",
609
+ "CogVideoXDownBlock3D",
610
+ "CogVideoXDownBlock3D",
611
+ ),
612
+ block_out_channels: Tuple[int, ...] = (128, 256, 256, 512),
613
+ layers_per_block: int = 3,
614
+ act_fn: str = "silu",
615
+ norm_eps: float = 1e-6,
616
+ norm_num_groups: int = 32,
617
+ dropout: float = 0.0,
618
+ pad_mode: str = "first",
619
+ temporal_compression_ratio: float = 4,
620
+ ):
621
+ super().__init__()
622
+
623
+ # log2 of temporal_compress_times
624
+ temporal_compress_level = int(np.log2(temporal_compression_ratio))
625
+
626
+ self.conv_in = CogVideoXCausalConv3d(in_channels, block_out_channels[0], kernel_size=3, pad_mode=pad_mode)
627
+ self.down_blocks = nn.ModuleList([])
628
+
629
+ # down blocks
630
+ output_channel = block_out_channels[0]
631
+ for i, down_block_type in enumerate(down_block_types):
632
+ input_channel = output_channel
633
+ output_channel = block_out_channels[i]
634
+ is_final_block = i == len(block_out_channels) - 1
635
+ compress_time = i < temporal_compress_level
636
+
637
+ if down_block_type == "CogVideoXDownBlock3D":
638
+ down_block = CogVideoXDownBlock3D(
639
+ in_channels=input_channel,
640
+ out_channels=output_channel,
641
+ temb_channels=0,
642
+ dropout=dropout,
643
+ num_layers=layers_per_block,
644
+ resnet_eps=norm_eps,
645
+ resnet_act_fn=act_fn,
646
+ resnet_groups=norm_num_groups,
647
+ add_downsample=not is_final_block,
648
+ compress_time=compress_time,
649
+ )
650
+ else:
651
+ raise ValueError("Invalid `down_block_type` encountered. Must be `CogVideoXDownBlock3D`")
652
+
653
+ self.down_blocks.append(down_block)
654
+
655
+ # mid block
656
+ self.mid_block = CogVideoXMidBlock3D(
657
+ in_channels=block_out_channels[-1],
658
+ temb_channels=0,
659
+ dropout=dropout,
660
+ num_layers=2,
661
+ resnet_eps=norm_eps,
662
+ resnet_act_fn=act_fn,
663
+ resnet_groups=norm_num_groups,
664
+ pad_mode=pad_mode,
665
+ )
666
+
667
+ self.norm_out = nn.GroupNorm(norm_num_groups, block_out_channels[-1], eps=1e-6)
668
+ self.conv_act = nn.SiLU()
669
+ self.conv_out = CogVideoXCausalConv3d(
670
+ block_out_channels[-1], 2 * out_channels, kernel_size=3, pad_mode=pad_mode
671
+ )
672
+
673
+ self.gradient_checkpointing = False
674
+
675
+ def forward(self, sample: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor:
676
+ r"""The forward method of the `CogVideoXEncoder3D` class."""
677
+ hidden_states = self.conv_in(sample)
678
+
679
+ if self.training and self.gradient_checkpointing:
680
+
681
+ def create_custom_forward(module):
682
+ def custom_forward(*inputs):
683
+ return module(*inputs)
684
+
685
+ return custom_forward
686
+
687
+ # 1. Down
688
+ for down_block in self.down_blocks:
689
+ hidden_states = torch.utils.checkpoint.checkpoint(
690
+ create_custom_forward(down_block), hidden_states, temb, None
691
+ )
692
+
693
+ # 2. Mid
694
+ hidden_states = torch.utils.checkpoint.checkpoint(
695
+ create_custom_forward(self.mid_block), hidden_states, temb, None
696
+ )
697
+ else:
698
+ # 1. Down
699
+ for down_block in self.down_blocks:
700
+ hidden_states = down_block(hidden_states, temb, None)
701
+
702
+ # 2. Mid
703
+ hidden_states = self.mid_block(hidden_states, temb, None)
704
+
705
+ # 3. Post-process
706
+ hidden_states = self.norm_out(hidden_states)
707
+ hidden_states = self.conv_act(hidden_states)
708
+ hidden_states = self.conv_out(hidden_states)
709
+ return hidden_states
710
+
711
+
712
+ class CogVideoXDecoder3D(nn.Module):
713
+ r"""
714
+ The `CogVideoXDecoder3D` layer of a variational autoencoder that decodes its latent representation into an output
715
+ sample.
716
+
717
+ Args:
718
+ in_channels (`int`, *optional*, defaults to 3):
719
+ The number of input channels.
720
+ out_channels (`int`, *optional*, defaults to 3):
721
+ The number of output channels.
722
+ up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
723
+ The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options.
724
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
725
+ The number of output channels for each block.
726
+ layers_per_block (`int`, *optional*, defaults to 2):
727
+ The number of layers per block.
728
+ norm_num_groups (`int`, *optional*, defaults to 32):
729
+ The number of groups for normalization.
730
+ act_fn (`str`, *optional*, defaults to `"silu"`):
731
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
732
+ norm_type (`str`, *optional*, defaults to `"group"`):
733
+ The normalization type to use. Can be either `"group"` or `"spatial"`.
734
+ """
735
+
736
+ _supports_gradient_checkpointing = True
737
+
738
+ def __init__(
739
+ self,
740
+ in_channels: int = 16,
741
+ out_channels: int = 3,
742
+ up_block_types: Tuple[str, ...] = (
743
+ "CogVideoXUpBlock3D",
744
+ "CogVideoXUpBlock3D",
745
+ "CogVideoXUpBlock3D",
746
+ "CogVideoXUpBlock3D",
747
+ ),
748
+ block_out_channels: Tuple[int, ...] = (128, 256, 256, 512),
749
+ layers_per_block: int = 3,
750
+ act_fn: str = "silu",
751
+ norm_eps: float = 1e-6,
752
+ norm_num_groups: int = 32,
753
+ dropout: float = 0.0,
754
+ pad_mode: str = "first",
755
+ temporal_compression_ratio: float = 4,
756
+ ):
757
+ super().__init__()
758
+
759
+ reversed_block_out_channels = list(reversed(block_out_channels))
760
+
761
+ self.conv_in = CogVideoXCausalConv3d(
762
+ in_channels, reversed_block_out_channels[0], kernel_size=3, pad_mode=pad_mode
763
+ )
764
+
765
+ # mid block
766
+ self.mid_block = CogVideoXMidBlock3D(
767
+ in_channels=reversed_block_out_channels[0],
768
+ temb_channels=0,
769
+ num_layers=2,
770
+ resnet_eps=norm_eps,
771
+ resnet_act_fn=act_fn,
772
+ resnet_groups=norm_num_groups,
773
+ spatial_norm_dim=in_channels,
774
+ pad_mode=pad_mode,
775
+ )
776
+
777
+ # up blocks
778
+ self.up_blocks = nn.ModuleList([])
779
+
780
+ output_channel = reversed_block_out_channels[0]
781
+ temporal_compress_level = int(np.log2(temporal_compression_ratio))
782
+
783
+ for i, up_block_type in enumerate(up_block_types):
784
+ prev_output_channel = output_channel
785
+ output_channel = reversed_block_out_channels[i]
786
+ is_final_block = i == len(block_out_channels) - 1
787
+ compress_time = i < temporal_compress_level
788
+
789
+ if up_block_type == "CogVideoXUpBlock3D":
790
+ up_block = CogVideoXUpBlock3D(
791
+ in_channels=prev_output_channel,
792
+ out_channels=output_channel,
793
+ temb_channels=0,
794
+ dropout=dropout,
795
+ num_layers=layers_per_block + 1,
796
+ resnet_eps=norm_eps,
797
+ resnet_act_fn=act_fn,
798
+ resnet_groups=norm_num_groups,
799
+ spatial_norm_dim=in_channels,
800
+ add_upsample=not is_final_block,
801
+ compress_time=compress_time,
802
+ pad_mode=pad_mode,
803
+ )
804
+ prev_output_channel = output_channel
805
+ else:
806
+ raise ValueError("Invalid `up_block_type` encountered. Must be `CogVideoXUpBlock3D`")
807
+
808
+ self.up_blocks.append(up_block)
809
+
810
+ self.norm_out = CogVideoXSpatialNorm3D(reversed_block_out_channels[-1], in_channels, groups=norm_num_groups)
811
+ self.conv_act = nn.SiLU()
812
+ self.conv_out = CogVideoXCausalConv3d(
813
+ reversed_block_out_channels[-1], out_channels, kernel_size=3, pad_mode=pad_mode
814
+ )
815
+
816
+ self.gradient_checkpointing = False
817
+
818
+ def forward(self, sample: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor:
819
+ r"""The forward method of the `CogVideoXDecoder3D` class."""
820
+ hidden_states = self.conv_in(sample)
821
+
822
+ if self.training and self.gradient_checkpointing:
823
+
824
+ def create_custom_forward(module):
825
+ def custom_forward(*inputs):
826
+ return module(*inputs)
827
+
828
+ return custom_forward
829
+
830
+ # 1. Mid
831
+ hidden_states = torch.utils.checkpoint.checkpoint(
832
+ create_custom_forward(self.mid_block), hidden_states, temb, sample
833
+ )
834
+
835
+ # 2. Up
836
+ for up_block in self.up_blocks:
837
+ hidden_states = torch.utils.checkpoint.checkpoint(
838
+ create_custom_forward(up_block), hidden_states, temb, sample
839
+ )
840
+ else:
841
+ # 1. Mid
842
+ hidden_states = self.mid_block(hidden_states, temb, sample)
843
+
844
+ # 2. Up
845
+ for up_block in self.up_blocks:
846
+ hidden_states = up_block(hidden_states, temb, sample)
847
+
848
+ # 3. Post-process
849
+ hidden_states = self.norm_out(hidden_states, sample)
850
+ hidden_states = self.conv_act(hidden_states)
851
+ hidden_states = self.conv_out(hidden_states)
852
+ return hidden_states
853
+
854
+
855
+ class AutoencoderKLCogVideoX(ModelMixin, ConfigMixin, FromOriginalModelMixin):
856
+ r"""
857
+ A VAE model with KL loss for encoding images into latents and decoding latent representations into images. Used in
858
+ [CogVideoX](https://github.com/THUDM/CogVideo).
859
+
860
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
861
+ for all models (such as downloading or saving).
862
+
863
+ Parameters:
864
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
865
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
866
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
867
+ Tuple of downsample block types.
868
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
869
+ Tuple of upsample block types.
870
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
871
+ Tuple of block output channels.
872
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
873
+ sample_size (`int`, *optional*, defaults to `32`): Sample input size.
874
+ scaling_factor (`float`, *optional*, defaults to 0.18215):
875
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
876
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
877
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
878
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
879
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
880
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
881
+ force_upcast (`bool`, *optional*, default to `True`):
882
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
883
+ can be fine-tuned / trained to a lower range without loosing too much precision in which case
884
+ `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
885
+ """
886
+
887
+ _supports_gradient_checkpointing = True
888
+ _no_split_modules = ["CogVideoXResnetBlock3D"]
889
+
890
+ @register_to_config
891
+ def __init__(
892
+ self,
893
+ in_channels: int = 3,
894
+ out_channels: int = 3,
895
+ down_block_types: Tuple[str] = (
896
+ "CogVideoXDownBlock3D",
897
+ "CogVideoXDownBlock3D",
898
+ "CogVideoXDownBlock3D",
899
+ "CogVideoXDownBlock3D",
900
+ ),
901
+ up_block_types: Tuple[str] = (
902
+ "CogVideoXUpBlock3D",
903
+ "CogVideoXUpBlock3D",
904
+ "CogVideoXUpBlock3D",
905
+ "CogVideoXUpBlock3D",
906
+ ),
907
+ block_out_channels: Tuple[int] = (128, 256, 256, 512),
908
+ latent_channels: int = 16,
909
+ layers_per_block: int = 3,
910
+ act_fn: str = "silu",
911
+ norm_eps: float = 1e-6,
912
+ norm_num_groups: int = 32,
913
+ temporal_compression_ratio: float = 4,
914
+ sample_size: int = 256,
915
+ scaling_factor: float = 1.15258426,
916
+ shift_factor: Optional[float] = None,
917
+ latents_mean: Optional[Tuple[float]] = None,
918
+ latents_std: Optional[Tuple[float]] = None,
919
+ force_upcast: float = True,
920
+ use_quant_conv: bool = False,
921
+ use_post_quant_conv: bool = False,
922
+ ):
923
+ super().__init__()
924
+
925
+ self.encoder = CogVideoXEncoder3D(
926
+ in_channels=in_channels,
927
+ out_channels=latent_channels,
928
+ down_block_types=down_block_types,
929
+ block_out_channels=block_out_channels,
930
+ layers_per_block=layers_per_block,
931
+ act_fn=act_fn,
932
+ norm_eps=norm_eps,
933
+ norm_num_groups=norm_num_groups,
934
+ temporal_compression_ratio=temporal_compression_ratio,
935
+ )
936
+ self.decoder = CogVideoXDecoder3D(
937
+ in_channels=latent_channels,
938
+ out_channels=out_channels,
939
+ up_block_types=up_block_types,
940
+ block_out_channels=block_out_channels,
941
+ layers_per_block=layers_per_block,
942
+ act_fn=act_fn,
943
+ norm_eps=norm_eps,
944
+ norm_num_groups=norm_num_groups,
945
+ temporal_compression_ratio=temporal_compression_ratio,
946
+ )
947
+ self.quant_conv = CogVideoXSafeConv3d(2 * out_channels, 2 * out_channels, 1) if use_quant_conv else None
948
+ self.post_quant_conv = CogVideoXSafeConv3d(out_channels, out_channels, 1) if use_post_quant_conv else None
949
+
950
+ self.use_slicing = False
951
+ self.use_tiling = False
952
+
953
+ self.tile_sample_min_size = self.config.sample_size
954
+ sample_size = (
955
+ self.config.sample_size[0]
956
+ if isinstance(self.config.sample_size, (list, tuple))
957
+ else self.config.sample_size
958
+ )
959
+ self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
960
+ self.tile_overlap_factor = 0.25
961
+
962
+ def _set_gradient_checkpointing(self, module, value=False):
963
+ if isinstance(module, (CogVideoXEncoder3D, CogVideoXDecoder3D)):
964
+ module.gradient_checkpointing = value
965
+
966
+ def clear_fake_context_parallel_cache(self):
967
+ for name, module in self.named_modules():
968
+ if isinstance(module, CogVideoXCausalConv3d):
969
+ logger.debug(f"Clearing fake Context Parallel cache for layer: {name}")
970
+ module._clear_fake_context_parallel_cache()
971
+
972
+ @apply_forward_hook
973
+ def encode(
974
+ self, x: torch.Tensor, return_dict: bool = True
975
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
976
+ """
977
+ Encode a batch of images into latents.
978
+
979
+ Args:
980
+ x (`torch.Tensor`): Input batch of images.
981
+ return_dict (`bool`, *optional*, defaults to `True`):
982
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
983
+
984
+ Returns:
985
+ The latent representations of the encoded images. If `return_dict` is True, a
986
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
987
+ """
988
+ h = self.encoder(x)
989
+ if self.quant_conv is not None:
990
+ h = self.quant_conv(h)
991
+ posterior = DiagonalGaussianDistribution(h)
992
+ if not return_dict:
993
+ return (posterior,)
994
+ return AutoencoderKLOutput(latent_dist=posterior)
995
+
996
+ @apply_forward_hook
997
+ def decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
998
+ """
999
+ Decode a batch of images.
1000
+
1001
+ Args:
1002
+ z (`torch.Tensor`): Input batch of latent vectors.
1003
+ return_dict (`bool`, *optional*, defaults to `True`):
1004
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
1005
+
1006
+ Returns:
1007
+ [`~models.vae.DecoderOutput`] or `tuple`:
1008
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
1009
+ returned.
1010
+
1011
+ """
1012
+ if self.post_quant_conv is not None:
1013
+ z = self.post_quant_conv(z)
1014
+ dec = self.decoder(z)
1015
+ if not return_dict:
1016
+ return (dec,)
1017
+ return DecoderOutput(sample=dec)
1018
+
1019
+ def forward(
1020
+ self,
1021
+ sample: torch.Tensor,
1022
+ sample_posterior: bool = False,
1023
+ return_dict: bool = True,
1024
+ generator: Optional[torch.Generator] = None,
1025
+ ) -> Union[torch.Tensor, torch.Tensor]:
1026
+ x = sample
1027
+ posterior = self.encode(x).latent_dist
1028
+ if sample_posterior:
1029
+ z = posterior.sample(generator=generator)
1030
+ else:
1031
+ z = posterior.mode()
1032
+ dec = self.decode(z)
1033
+ if not return_dict:
1034
+ return (dec,)
1035
+ return dec