diffusers 0.31.0__py3-none-any.whl → 0.32.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- diffusers/__init__.py +66 -5
- diffusers/callbacks.py +56 -3
- diffusers/configuration_utils.py +1 -1
- diffusers/dependency_versions_table.py +1 -1
- diffusers/image_processor.py +25 -17
- diffusers/loaders/__init__.py +22 -3
- diffusers/loaders/ip_adapter.py +538 -15
- diffusers/loaders/lora_base.py +124 -118
- diffusers/loaders/lora_conversion_utils.py +318 -3
- diffusers/loaders/lora_pipeline.py +1688 -368
- diffusers/loaders/peft.py +379 -0
- diffusers/loaders/single_file_model.py +71 -4
- diffusers/loaders/single_file_utils.py +519 -9
- diffusers/loaders/textual_inversion.py +3 -3
- diffusers/loaders/transformer_flux.py +181 -0
- diffusers/loaders/transformer_sd3.py +89 -0
- diffusers/loaders/unet.py +17 -4
- diffusers/models/__init__.py +47 -14
- diffusers/models/activations.py +22 -9
- diffusers/models/attention.py +13 -4
- diffusers/models/attention_flax.py +1 -1
- diffusers/models/attention_processor.py +2059 -281
- diffusers/models/autoencoders/__init__.py +5 -0
- diffusers/models/autoencoders/autoencoder_dc.py +620 -0
- diffusers/models/autoencoders/autoencoder_kl.py +2 -1
- diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
- diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +36 -27
- diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
- diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
- diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +3 -10
- diffusers/models/autoencoders/autoencoder_tiny.py +4 -2
- diffusers/models/autoencoders/vae.py +18 -5
- diffusers/models/controlnet.py +47 -802
- diffusers/models/controlnet_flux.py +29 -495
- diffusers/models/controlnet_sd3.py +25 -379
- diffusers/models/controlnet_sparsectrl.py +46 -718
- diffusers/models/controlnets/__init__.py +23 -0
- diffusers/models/controlnets/controlnet.py +872 -0
- diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +5 -5
- diffusers/models/controlnets/controlnet_flux.py +536 -0
- diffusers/models/{controlnet_hunyuan.py → controlnets/controlnet_hunyuan.py} +7 -7
- diffusers/models/controlnets/controlnet_sd3.py +489 -0
- diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
- diffusers/models/controlnets/controlnet_union.py +832 -0
- diffusers/models/{controlnet_xs.py → controlnets/controlnet_xs.py} +14 -13
- diffusers/models/controlnets/multicontrolnet.py +183 -0
- diffusers/models/embeddings.py +838 -43
- diffusers/models/model_loading_utils.py +88 -6
- diffusers/models/modeling_flax_utils.py +1 -1
- diffusers/models/modeling_utils.py +72 -26
- diffusers/models/normalization.py +78 -13
- diffusers/models/transformers/__init__.py +5 -0
- diffusers/models/transformers/auraflow_transformer_2d.py +2 -2
- diffusers/models/transformers/cogvideox_transformer_3d.py +46 -11
- diffusers/models/transformers/dit_transformer_2d.py +1 -1
- diffusers/models/transformers/latte_transformer_3d.py +4 -4
- diffusers/models/transformers/pixart_transformer_2d.py +1 -1
- diffusers/models/transformers/sana_transformer.py +488 -0
- diffusers/models/transformers/stable_audio_transformer.py +1 -1
- diffusers/models/transformers/transformer_2d.py +1 -1
- diffusers/models/transformers/transformer_allegro.py +422 -0
- diffusers/models/transformers/transformer_cogview3plus.py +1 -1
- diffusers/models/transformers/transformer_flux.py +30 -9
- diffusers/models/transformers/transformer_hunyuan_video.py +789 -0
- diffusers/models/transformers/transformer_ltx.py +469 -0
- diffusers/models/transformers/transformer_mochi.py +499 -0
- diffusers/models/transformers/transformer_sd3.py +105 -17
- diffusers/models/transformers/transformer_temporal.py +1 -1
- diffusers/models/unets/unet_1d_blocks.py +1 -1
- diffusers/models/unets/unet_2d.py +8 -1
- diffusers/models/unets/unet_2d_blocks.py +88 -21
- diffusers/models/unets/unet_2d_condition.py +1 -1
- diffusers/models/unets/unet_3d_blocks.py +9 -7
- diffusers/models/unets/unet_motion_model.py +5 -5
- diffusers/models/unets/unet_spatio_temporal_condition.py +23 -0
- diffusers/models/unets/unet_stable_cascade.py +2 -2
- diffusers/models/unets/uvit_2d.py +1 -1
- diffusers/models/upsampling.py +8 -0
- diffusers/pipelines/__init__.py +34 -0
- diffusers/pipelines/allegro/__init__.py +48 -0
- diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
- diffusers/pipelines/allegro/pipeline_output.py +23 -0
- diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +8 -2
- diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1 -1
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +0 -6
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +8 -8
- diffusers/pipelines/audioldm2/modeling_audioldm2.py +3 -3
- diffusers/pipelines/aura_flow/pipeline_aura_flow.py +1 -8
- diffusers/pipelines/auto_pipeline.py +53 -6
- diffusers/pipelines/blip_diffusion/modeling_blip2.py +1 -1
- diffusers/pipelines/cogvideo/pipeline_cogvideox.py +50 -22
- diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +51 -20
- diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +69 -21
- diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +47 -21
- diffusers/pipelines/cogview3/pipeline_cogview3plus.py +1 -1
- diffusers/pipelines/controlnet/__init__.py +86 -80
- diffusers/pipelines/controlnet/multicontrolnet.py +7 -178
- diffusers/pipelines/controlnet/pipeline_controlnet.py +11 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +1 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +1 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +1 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +3 -3
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +1 -3
- diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
- diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +5 -1
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +53 -19
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +7 -7
- diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +31 -8
- diffusers/pipelines/flux/__init__.py +13 -1
- diffusers/pipelines/flux/modeling_flux.py +47 -0
- diffusers/pipelines/flux/pipeline_flux.py +204 -29
- diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
- diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
- diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet.py +49 -27
- diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +40 -30
- diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +78 -56
- diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
- diffusers/pipelines/flux/pipeline_flux_img2img.py +33 -27
- diffusers/pipelines/flux/pipeline_flux_inpaint.py +36 -29
- diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
- diffusers/pipelines/flux/pipeline_output.py +16 -0
- diffusers/pipelines/hunyuan_video/__init__.py +48 -0
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
- diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
- diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +5 -1
- diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +9 -9
- diffusers/pipelines/kolors/text_encoder.py +2 -2
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -1
- diffusers/pipelines/ltx/__init__.py +50 -0
- diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
- diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
- diffusers/pipelines/ltx/pipeline_output.py +20 -0
- diffusers/pipelines/lumina/pipeline_lumina.py +1 -8
- diffusers/pipelines/mochi/__init__.py +48 -0
- diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
- diffusers/pipelines/mochi/pipeline_output.py +20 -0
- diffusers/pipelines/pag/__init__.py +7 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1 -2
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1 -2
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1 -3
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1 -3
- diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +5 -1
- diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +6 -13
- diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
- diffusers/pipelines/pag/pipeline_pag_sd_3.py +6 -6
- diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
- diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +3 -0
- diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
- diffusers/pipelines/pipeline_flax_utils.py +1 -1
- diffusers/pipelines/pipeline_loading_utils.py +25 -4
- diffusers/pipelines/pipeline_utils.py +35 -6
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +6 -13
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +6 -13
- diffusers/pipelines/sana/__init__.py +47 -0
- diffusers/pipelines/sana/pipeline_output.py +21 -0
- diffusers/pipelines/sana/pipeline_sana.py +884 -0
- diffusers/pipelines/stable_audio/pipeline_stable_audio.py +12 -1
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +18 -3
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +216 -20
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +62 -9
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +57 -8
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +11 -1
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +0 -8
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +0 -8
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +0 -8
- diffusers/pipelines/unidiffuser/modeling_uvit.py +2 -2
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +1 -1
- diffusers/quantizers/auto.py +14 -1
- diffusers/quantizers/bitsandbytes/bnb_quantizer.py +4 -1
- diffusers/quantizers/gguf/__init__.py +1 -0
- diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
- diffusers/quantizers/gguf/utils.py +456 -0
- diffusers/quantizers/quantization_config.py +280 -2
- diffusers/quantizers/torchao/__init__.py +15 -0
- diffusers/quantizers/torchao/torchao_quantizer.py +292 -0
- diffusers/schedulers/scheduling_ddpm.py +2 -6
- diffusers/schedulers/scheduling_ddpm_parallel.py +2 -6
- diffusers/schedulers/scheduling_deis_multistep.py +28 -9
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +35 -9
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +35 -8
- diffusers/schedulers/scheduling_dpmsolver_sde.py +4 -4
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +48 -10
- diffusers/schedulers/scheduling_euler_discrete.py +4 -4
- diffusers/schedulers/scheduling_flow_match_euler_discrete.py +153 -6
- diffusers/schedulers/scheduling_heun_discrete.py +4 -4
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +4 -4
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +4 -4
- diffusers/schedulers/scheduling_lcm.py +2 -6
- diffusers/schedulers/scheduling_lms_discrete.py +4 -4
- diffusers/schedulers/scheduling_repaint.py +1 -1
- diffusers/schedulers/scheduling_sasolver.py +28 -9
- diffusers/schedulers/scheduling_tcd.py +2 -6
- diffusers/schedulers/scheduling_unipc_multistep.py +53 -8
- diffusers/training_utils.py +16 -2
- diffusers/utils/__init__.py +5 -0
- diffusers/utils/constants.py +1 -0
- diffusers/utils/dummy_pt_objects.py +180 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +270 -0
- diffusers/utils/dynamic_modules_utils.py +3 -3
- diffusers/utils/hub_utils.py +31 -39
- diffusers/utils/import_utils.py +67 -0
- diffusers/utils/peft_utils.py +3 -0
- diffusers/utils/testing_utils.py +56 -1
- diffusers/utils/torch_utils.py +3 -0
- {diffusers-0.31.0.dist-info → diffusers-0.32.1.dist-info}/METADATA +6 -6
- {diffusers-0.31.0.dist-info → diffusers-0.32.1.dist-info}/RECORD +214 -162
- {diffusers-0.31.0.dist-info → diffusers-0.32.1.dist-info}/WHEEL +1 -1
- {diffusers-0.31.0.dist-info → diffusers-0.32.1.dist-info}/LICENSE +0 -0
- {diffusers-0.31.0.dist-info → diffusers-0.32.1.dist-info}/entry_points.txt +0 -0
- {diffusers-0.31.0.dist-info → diffusers-0.32.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,687 @@
|
|
1
|
+
# Copyright 2024 The HunyuanVideo Team and The HuggingFace Team. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import inspect
|
16
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
17
|
+
|
18
|
+
import numpy as np
|
19
|
+
import torch
|
20
|
+
from transformers import CLIPTextModel, CLIPTokenizer, LlamaModel, LlamaTokenizerFast
|
21
|
+
|
22
|
+
from ...callbacks import MultiPipelineCallbacks, PipelineCallback
|
23
|
+
from ...loaders import HunyuanVideoLoraLoaderMixin
|
24
|
+
from ...models import AutoencoderKLHunyuanVideo, HunyuanVideoTransformer3DModel
|
25
|
+
from ...schedulers import FlowMatchEulerDiscreteScheduler
|
26
|
+
from ...utils import logging, replace_example_docstring
|
27
|
+
from ...utils.torch_utils import randn_tensor
|
28
|
+
from ...video_processor import VideoProcessor
|
29
|
+
from ..pipeline_utils import DiffusionPipeline
|
30
|
+
from .pipeline_output import HunyuanVideoPipelineOutput
|
31
|
+
|
32
|
+
|
33
|
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
34
|
+
|
35
|
+
EXAMPLE_DOC_STRING = """
|
36
|
+
Examples:
|
37
|
+
```python
|
38
|
+
>>> import torch
|
39
|
+
>>> from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel
|
40
|
+
>>> from diffusers.utils import export_to_video
|
41
|
+
|
42
|
+
>>> model_id = "hunyuanvideo-community/HunyuanVideo"
|
43
|
+
>>> transformer = HunyuanVideoTransformer3DModel.from_pretrained(
|
44
|
+
... model_id, subfolder="transformer", torch_dtype=torch.bfloat16
|
45
|
+
... )
|
46
|
+
>>> pipe = HunyuanVideoPipeline.from_pretrained(model_id, transformer=transformer, torch_dtype=torch.float16)
|
47
|
+
>>> pipe.vae.enable_tiling()
|
48
|
+
>>> pipe.to("cuda")
|
49
|
+
|
50
|
+
>>> output = pipe(
|
51
|
+
... prompt="A cat walks on the grass, realistic",
|
52
|
+
... height=320,
|
53
|
+
... width=512,
|
54
|
+
... num_frames=61,
|
55
|
+
... num_inference_steps=30,
|
56
|
+
... ).frames[0]
|
57
|
+
>>> export_to_video(output, "output.mp4", fps=15)
|
58
|
+
```
|
59
|
+
"""
|
60
|
+
|
61
|
+
|
62
|
+
DEFAULT_PROMPT_TEMPLATE = {
|
63
|
+
"template": (
|
64
|
+
"<|start_header_id|>system<|end_header_id|>\n\nDescribe the video by detailing the following aspects: "
|
65
|
+
"1. The main content and theme of the video."
|
66
|
+
"2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
|
67
|
+
"3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
|
68
|
+
"4. background environment, light, style and atmosphere."
|
69
|
+
"5. camera angles, movements, and transitions used in the video:<|eot_id|>"
|
70
|
+
"<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
|
71
|
+
),
|
72
|
+
"crop_start": 95,
|
73
|
+
}
|
74
|
+
|
75
|
+
|
76
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
77
|
+
def retrieve_timesteps(
|
78
|
+
scheduler,
|
79
|
+
num_inference_steps: Optional[int] = None,
|
80
|
+
device: Optional[Union[str, torch.device]] = None,
|
81
|
+
timesteps: Optional[List[int]] = None,
|
82
|
+
sigmas: Optional[List[float]] = None,
|
83
|
+
**kwargs,
|
84
|
+
):
|
85
|
+
r"""
|
86
|
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
87
|
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
88
|
+
|
89
|
+
Args:
|
90
|
+
scheduler (`SchedulerMixin`):
|
91
|
+
The scheduler to get timesteps from.
|
92
|
+
num_inference_steps (`int`):
|
93
|
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
94
|
+
must be `None`.
|
95
|
+
device (`str` or `torch.device`, *optional*):
|
96
|
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
97
|
+
timesteps (`List[int]`, *optional*):
|
98
|
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
99
|
+
`num_inference_steps` and `sigmas` must be `None`.
|
100
|
+
sigmas (`List[float]`, *optional*):
|
101
|
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
102
|
+
`num_inference_steps` and `timesteps` must be `None`.
|
103
|
+
|
104
|
+
Returns:
|
105
|
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
106
|
+
second element is the number of inference steps.
|
107
|
+
"""
|
108
|
+
if timesteps is not None and sigmas is not None:
|
109
|
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
110
|
+
if timesteps is not None:
|
111
|
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
112
|
+
if not accepts_timesteps:
|
113
|
+
raise ValueError(
|
114
|
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
115
|
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
116
|
+
)
|
117
|
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
118
|
+
timesteps = scheduler.timesteps
|
119
|
+
num_inference_steps = len(timesteps)
|
120
|
+
elif sigmas is not None:
|
121
|
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
122
|
+
if not accept_sigmas:
|
123
|
+
raise ValueError(
|
124
|
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
125
|
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
126
|
+
)
|
127
|
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
128
|
+
timesteps = scheduler.timesteps
|
129
|
+
num_inference_steps = len(timesteps)
|
130
|
+
else:
|
131
|
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
132
|
+
timesteps = scheduler.timesteps
|
133
|
+
return timesteps, num_inference_steps
|
134
|
+
|
135
|
+
|
136
|
+
class HunyuanVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin):
|
137
|
+
r"""
|
138
|
+
Pipeline for text-to-video generation using HunyuanVideo.
|
139
|
+
|
140
|
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
141
|
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
142
|
+
|
143
|
+
Args:
|
144
|
+
text_encoder ([`LlamaModel`]):
|
145
|
+
[Llava Llama3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers).
|
146
|
+
tokenizer (`LlamaTokenizer`):
|
147
|
+
Tokenizer from [Llava Llama3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers).
|
148
|
+
transformer ([`HunyuanVideoTransformer3DModel`]):
|
149
|
+
Conditional Transformer to denoise the encoded image latents.
|
150
|
+
scheduler ([`FlowMatchEulerDiscreteScheduler`]):
|
151
|
+
A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
|
152
|
+
vae ([`AutoencoderKLHunyuanVideo`]):
|
153
|
+
Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
|
154
|
+
text_encoder_2 ([`CLIPTextModel`]):
|
155
|
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
156
|
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
157
|
+
tokenizer_2 (`CLIPTokenizer`):
|
158
|
+
Tokenizer of class
|
159
|
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
|
160
|
+
"""
|
161
|
+
|
162
|
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
|
163
|
+
_callback_tensor_inputs = ["latents", "prompt_embeds"]
|
164
|
+
|
165
|
+
def __init__(
|
166
|
+
self,
|
167
|
+
text_encoder: LlamaModel,
|
168
|
+
tokenizer: LlamaTokenizerFast,
|
169
|
+
transformer: HunyuanVideoTransformer3DModel,
|
170
|
+
vae: AutoencoderKLHunyuanVideo,
|
171
|
+
scheduler: FlowMatchEulerDiscreteScheduler,
|
172
|
+
text_encoder_2: CLIPTextModel,
|
173
|
+
tokenizer_2: CLIPTokenizer,
|
174
|
+
):
|
175
|
+
super().__init__()
|
176
|
+
|
177
|
+
self.register_modules(
|
178
|
+
vae=vae,
|
179
|
+
text_encoder=text_encoder,
|
180
|
+
tokenizer=tokenizer,
|
181
|
+
transformer=transformer,
|
182
|
+
scheduler=scheduler,
|
183
|
+
text_encoder_2=text_encoder_2,
|
184
|
+
tokenizer_2=tokenizer_2,
|
185
|
+
)
|
186
|
+
|
187
|
+
self.vae_scale_factor_temporal = (
|
188
|
+
self.vae.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4
|
189
|
+
)
|
190
|
+
self.vae_scale_factor_spatial = (
|
191
|
+
self.vae.spatial_compression_ratio if hasattr(self, "vae") and self.vae is not None else 8
|
192
|
+
)
|
193
|
+
self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
|
194
|
+
|
195
|
+
def _get_llama_prompt_embeds(
|
196
|
+
self,
|
197
|
+
prompt: Union[str, List[str]],
|
198
|
+
prompt_template: Dict[str, Any],
|
199
|
+
num_videos_per_prompt: int = 1,
|
200
|
+
device: Optional[torch.device] = None,
|
201
|
+
dtype: Optional[torch.dtype] = None,
|
202
|
+
max_sequence_length: int = 256,
|
203
|
+
num_hidden_layers_to_skip: int = 2,
|
204
|
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
205
|
+
device = device or self._execution_device
|
206
|
+
dtype = dtype or self.text_encoder.dtype
|
207
|
+
|
208
|
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
209
|
+
batch_size = len(prompt)
|
210
|
+
|
211
|
+
prompt = [prompt_template["template"].format(p) for p in prompt]
|
212
|
+
|
213
|
+
crop_start = prompt_template.get("crop_start", None)
|
214
|
+
if crop_start is None:
|
215
|
+
prompt_template_input = self.tokenizer(
|
216
|
+
prompt_template["template"],
|
217
|
+
padding="max_length",
|
218
|
+
return_tensors="pt",
|
219
|
+
return_length=False,
|
220
|
+
return_overflowing_tokens=False,
|
221
|
+
return_attention_mask=False,
|
222
|
+
)
|
223
|
+
crop_start = prompt_template_input["input_ids"].shape[-1]
|
224
|
+
# Remove <|eot_id|> token and placeholder {}
|
225
|
+
crop_start -= 2
|
226
|
+
|
227
|
+
max_sequence_length += crop_start
|
228
|
+
text_inputs = self.tokenizer(
|
229
|
+
prompt,
|
230
|
+
max_length=max_sequence_length,
|
231
|
+
padding="max_length",
|
232
|
+
truncation=True,
|
233
|
+
return_tensors="pt",
|
234
|
+
return_length=False,
|
235
|
+
return_overflowing_tokens=False,
|
236
|
+
return_attention_mask=True,
|
237
|
+
)
|
238
|
+
text_input_ids = text_inputs.input_ids.to(device=device)
|
239
|
+
prompt_attention_mask = text_inputs.attention_mask.to(device=device)
|
240
|
+
|
241
|
+
prompt_embeds = self.text_encoder(
|
242
|
+
input_ids=text_input_ids,
|
243
|
+
attention_mask=prompt_attention_mask,
|
244
|
+
output_hidden_states=True,
|
245
|
+
).hidden_states[-(num_hidden_layers_to_skip + 1)]
|
246
|
+
prompt_embeds = prompt_embeds.to(dtype=dtype)
|
247
|
+
|
248
|
+
if crop_start is not None and crop_start > 0:
|
249
|
+
prompt_embeds = prompt_embeds[:, crop_start:]
|
250
|
+
prompt_attention_mask = prompt_attention_mask[:, crop_start:]
|
251
|
+
|
252
|
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
253
|
+
_, seq_len, _ = prompt_embeds.shape
|
254
|
+
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
|
255
|
+
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
|
256
|
+
prompt_attention_mask = prompt_attention_mask.repeat(1, num_videos_per_prompt)
|
257
|
+
prompt_attention_mask = prompt_attention_mask.view(batch_size * num_videos_per_prompt, seq_len)
|
258
|
+
|
259
|
+
return prompt_embeds, prompt_attention_mask
|
260
|
+
|
261
|
+
def _get_clip_prompt_embeds(
|
262
|
+
self,
|
263
|
+
prompt: Union[str, List[str]],
|
264
|
+
num_videos_per_prompt: int = 1,
|
265
|
+
device: Optional[torch.device] = None,
|
266
|
+
dtype: Optional[torch.dtype] = None,
|
267
|
+
max_sequence_length: int = 77,
|
268
|
+
) -> torch.Tensor:
|
269
|
+
device = device or self._execution_device
|
270
|
+
dtype = dtype or self.text_encoder_2.dtype
|
271
|
+
|
272
|
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
273
|
+
batch_size = len(prompt)
|
274
|
+
|
275
|
+
text_inputs = self.tokenizer_2(
|
276
|
+
prompt,
|
277
|
+
padding="max_length",
|
278
|
+
max_length=max_sequence_length,
|
279
|
+
truncation=True,
|
280
|
+
return_tensors="pt",
|
281
|
+
)
|
282
|
+
|
283
|
+
text_input_ids = text_inputs.input_ids
|
284
|
+
untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids
|
285
|
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
286
|
+
removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1])
|
287
|
+
logger.warning(
|
288
|
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
289
|
+
f" {max_sequence_length} tokens: {removed_text}"
|
290
|
+
)
|
291
|
+
|
292
|
+
prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False).pooler_output
|
293
|
+
|
294
|
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
295
|
+
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt)
|
296
|
+
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, -1)
|
297
|
+
|
298
|
+
return prompt_embeds
|
299
|
+
|
300
|
+
def encode_prompt(
|
301
|
+
self,
|
302
|
+
prompt: Union[str, List[str]],
|
303
|
+
prompt_2: Union[str, List[str]] = None,
|
304
|
+
prompt_template: Dict[str, Any] = DEFAULT_PROMPT_TEMPLATE,
|
305
|
+
num_videos_per_prompt: int = 1,
|
306
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
307
|
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
308
|
+
prompt_attention_mask: Optional[torch.Tensor] = None,
|
309
|
+
device: Optional[torch.device] = None,
|
310
|
+
dtype: Optional[torch.dtype] = None,
|
311
|
+
max_sequence_length: int = 256,
|
312
|
+
):
|
313
|
+
if prompt_embeds is None:
|
314
|
+
prompt_embeds, prompt_attention_mask = self._get_llama_prompt_embeds(
|
315
|
+
prompt,
|
316
|
+
prompt_template,
|
317
|
+
num_videos_per_prompt,
|
318
|
+
device=device,
|
319
|
+
dtype=dtype,
|
320
|
+
max_sequence_length=max_sequence_length,
|
321
|
+
)
|
322
|
+
|
323
|
+
if pooled_prompt_embeds is None:
|
324
|
+
if prompt_2 is None and pooled_prompt_embeds is None:
|
325
|
+
prompt_2 = prompt
|
326
|
+
pooled_prompt_embeds = self._get_clip_prompt_embeds(
|
327
|
+
prompt,
|
328
|
+
num_videos_per_prompt,
|
329
|
+
device=device,
|
330
|
+
dtype=dtype,
|
331
|
+
max_sequence_length=77,
|
332
|
+
)
|
333
|
+
|
334
|
+
return prompt_embeds, pooled_prompt_embeds, prompt_attention_mask
|
335
|
+
|
336
|
+
def check_inputs(
|
337
|
+
self,
|
338
|
+
prompt,
|
339
|
+
prompt_2,
|
340
|
+
height,
|
341
|
+
width,
|
342
|
+
prompt_embeds=None,
|
343
|
+
callback_on_step_end_tensor_inputs=None,
|
344
|
+
prompt_template=None,
|
345
|
+
):
|
346
|
+
if height % 16 != 0 or width % 16 != 0:
|
347
|
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
348
|
+
|
349
|
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
350
|
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
351
|
+
):
|
352
|
+
raise ValueError(
|
353
|
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
354
|
+
)
|
355
|
+
|
356
|
+
if prompt is not None and prompt_embeds is not None:
|
357
|
+
raise ValueError(
|
358
|
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
359
|
+
" only forward one of the two."
|
360
|
+
)
|
361
|
+
elif prompt_2 is not None and prompt_embeds is not None:
|
362
|
+
raise ValueError(
|
363
|
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
364
|
+
" only forward one of the two."
|
365
|
+
)
|
366
|
+
elif prompt is None and prompt_embeds is None:
|
367
|
+
raise ValueError(
|
368
|
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
369
|
+
)
|
370
|
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
371
|
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
372
|
+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
373
|
+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
374
|
+
|
375
|
+
if prompt_template is not None:
|
376
|
+
if not isinstance(prompt_template, dict):
|
377
|
+
raise ValueError(f"`prompt_template` has to be of type `dict` but is {type(prompt_template)}")
|
378
|
+
if "template" not in prompt_template:
|
379
|
+
raise ValueError(
|
380
|
+
f"`prompt_template` has to contain a key `template` but only found {prompt_template.keys()}"
|
381
|
+
)
|
382
|
+
|
383
|
+
def prepare_latents(
|
384
|
+
self,
|
385
|
+
batch_size: int,
|
386
|
+
num_channels_latents: 32,
|
387
|
+
height: int = 720,
|
388
|
+
width: int = 1280,
|
389
|
+
num_frames: int = 129,
|
390
|
+
dtype: Optional[torch.dtype] = None,
|
391
|
+
device: Optional[torch.device] = None,
|
392
|
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
393
|
+
latents: Optional[torch.Tensor] = None,
|
394
|
+
) -> torch.Tensor:
|
395
|
+
if latents is not None:
|
396
|
+
return latents.to(device=device, dtype=dtype)
|
397
|
+
|
398
|
+
shape = (
|
399
|
+
batch_size,
|
400
|
+
num_channels_latents,
|
401
|
+
num_frames,
|
402
|
+
int(height) // self.vae_scale_factor_spatial,
|
403
|
+
int(width) // self.vae_scale_factor_spatial,
|
404
|
+
)
|
405
|
+
if isinstance(generator, list) and len(generator) != batch_size:
|
406
|
+
raise ValueError(
|
407
|
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
408
|
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
409
|
+
)
|
410
|
+
|
411
|
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
412
|
+
return latents
|
413
|
+
|
414
|
+
def enable_vae_slicing(self):
|
415
|
+
r"""
|
416
|
+
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
417
|
+
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
418
|
+
"""
|
419
|
+
self.vae.enable_slicing()
|
420
|
+
|
421
|
+
def disable_vae_slicing(self):
|
422
|
+
r"""
|
423
|
+
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
424
|
+
computing decoding in one step.
|
425
|
+
"""
|
426
|
+
self.vae.disable_slicing()
|
427
|
+
|
428
|
+
def enable_vae_tiling(self):
|
429
|
+
r"""
|
430
|
+
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
431
|
+
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
432
|
+
processing larger images.
|
433
|
+
"""
|
434
|
+
self.vae.enable_tiling()
|
435
|
+
|
436
|
+
def disable_vae_tiling(self):
|
437
|
+
r"""
|
438
|
+
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
|
439
|
+
computing decoding in one step.
|
440
|
+
"""
|
441
|
+
self.vae.disable_tiling()
|
442
|
+
|
443
|
+
@property
|
444
|
+
def guidance_scale(self):
|
445
|
+
return self._guidance_scale
|
446
|
+
|
447
|
+
@property
|
448
|
+
def num_timesteps(self):
|
449
|
+
return self._num_timesteps
|
450
|
+
|
451
|
+
@property
|
452
|
+
def attention_kwargs(self):
|
453
|
+
return self._attention_kwargs
|
454
|
+
|
455
|
+
@property
|
456
|
+
def interrupt(self):
|
457
|
+
return self._interrupt
|
458
|
+
|
459
|
+
@torch.no_grad()
|
460
|
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
461
|
+
def __call__(
|
462
|
+
self,
|
463
|
+
prompt: Union[str, List[str]] = None,
|
464
|
+
prompt_2: Union[str, List[str]] = None,
|
465
|
+
height: int = 720,
|
466
|
+
width: int = 1280,
|
467
|
+
num_frames: int = 129,
|
468
|
+
num_inference_steps: int = 50,
|
469
|
+
sigmas: List[float] = None,
|
470
|
+
guidance_scale: float = 6.0,
|
471
|
+
num_videos_per_prompt: Optional[int] = 1,
|
472
|
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
473
|
+
latents: Optional[torch.Tensor] = None,
|
474
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
475
|
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
476
|
+
prompt_attention_mask: Optional[torch.Tensor] = None,
|
477
|
+
output_type: Optional[str] = "pil",
|
478
|
+
return_dict: bool = True,
|
479
|
+
attention_kwargs: Optional[Dict[str, Any]] = None,
|
480
|
+
callback_on_step_end: Optional[
|
481
|
+
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
|
482
|
+
] = None,
|
483
|
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
484
|
+
prompt_template: Dict[str, Any] = DEFAULT_PROMPT_TEMPLATE,
|
485
|
+
max_sequence_length: int = 256,
|
486
|
+
):
|
487
|
+
r"""
|
488
|
+
The call function to the pipeline for generation.
|
489
|
+
|
490
|
+
Args:
|
491
|
+
prompt (`str` or `List[str]`, *optional*):
|
492
|
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
493
|
+
instead.
|
494
|
+
prompt_2 (`str` or `List[str]`, *optional*):
|
495
|
+
The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
496
|
+
will be used instead.
|
497
|
+
height (`int`, defaults to `720`):
|
498
|
+
The height in pixels of the generated image.
|
499
|
+
width (`int`, defaults to `1280`):
|
500
|
+
The width in pixels of the generated image.
|
501
|
+
num_frames (`int`, defaults to `129`):
|
502
|
+
The number of frames in the generated video.
|
503
|
+
num_inference_steps (`int`, defaults to `50`):
|
504
|
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
505
|
+
expense of slower inference.
|
506
|
+
sigmas (`List[float]`, *optional*):
|
507
|
+
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
|
508
|
+
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
|
509
|
+
will be used.
|
510
|
+
guidance_scale (`float`, defaults to `6.0`):
|
511
|
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
512
|
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
513
|
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
514
|
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
515
|
+
usually at the expense of lower image quality. Note that the only available HunyuanVideo model is
|
516
|
+
CFG-distilled, which means that traditional guidance between unconditional and conditional latent is
|
517
|
+
not applied.
|
518
|
+
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
519
|
+
The number of images to generate per prompt.
|
520
|
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
521
|
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
522
|
+
generation deterministic.
|
523
|
+
latents (`torch.Tensor`, *optional*):
|
524
|
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
525
|
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
526
|
+
tensor is generated by sampling using the supplied random `generator`.
|
527
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
528
|
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
529
|
+
provided, text embeddings are generated from the `prompt` input argument.
|
530
|
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
531
|
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
532
|
+
return_dict (`bool`, *optional*, defaults to `True`):
|
533
|
+
Whether or not to return a [`HunyuanVideoPipelineOutput`] instead of a plain tuple.
|
534
|
+
attention_kwargs (`dict`, *optional*):
|
535
|
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
536
|
+
`self.processor` in
|
537
|
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
538
|
+
clip_skip (`int`, *optional*):
|
539
|
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
540
|
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
541
|
+
callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
|
542
|
+
A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
|
543
|
+
each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
|
544
|
+
DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
|
545
|
+
list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
|
546
|
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
547
|
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
548
|
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
549
|
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
550
|
+
|
551
|
+
Examples:
|
552
|
+
|
553
|
+
Returns:
|
554
|
+
[`~HunyuanVideoPipelineOutput`] or `tuple`:
|
555
|
+
If `return_dict` is `True`, [`HunyuanVideoPipelineOutput`] is returned, otherwise a `tuple` is returned
|
556
|
+
where the first element is a list with the generated images and the second element is a list of `bool`s
|
557
|
+
indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content.
|
558
|
+
"""
|
559
|
+
|
560
|
+
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
|
561
|
+
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
562
|
+
|
563
|
+
# 1. Check inputs. Raise error if not correct
|
564
|
+
self.check_inputs(
|
565
|
+
prompt,
|
566
|
+
prompt_2,
|
567
|
+
height,
|
568
|
+
width,
|
569
|
+
prompt_embeds,
|
570
|
+
callback_on_step_end_tensor_inputs,
|
571
|
+
prompt_template,
|
572
|
+
)
|
573
|
+
|
574
|
+
self._guidance_scale = guidance_scale
|
575
|
+
self._attention_kwargs = attention_kwargs
|
576
|
+
self._interrupt = False
|
577
|
+
|
578
|
+
device = self._execution_device
|
579
|
+
|
580
|
+
# 2. Define call parameters
|
581
|
+
if prompt is not None and isinstance(prompt, str):
|
582
|
+
batch_size = 1
|
583
|
+
elif prompt is not None and isinstance(prompt, list):
|
584
|
+
batch_size = len(prompt)
|
585
|
+
else:
|
586
|
+
batch_size = prompt_embeds.shape[0]
|
587
|
+
|
588
|
+
# 3. Encode input prompt
|
589
|
+
prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = self.encode_prompt(
|
590
|
+
prompt=prompt,
|
591
|
+
prompt_2=prompt_2,
|
592
|
+
prompt_template=prompt_template,
|
593
|
+
num_videos_per_prompt=num_videos_per_prompt,
|
594
|
+
prompt_embeds=prompt_embeds,
|
595
|
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
596
|
+
prompt_attention_mask=prompt_attention_mask,
|
597
|
+
device=device,
|
598
|
+
max_sequence_length=max_sequence_length,
|
599
|
+
)
|
600
|
+
|
601
|
+
transformer_dtype = self.transformer.dtype
|
602
|
+
prompt_embeds = prompt_embeds.to(transformer_dtype)
|
603
|
+
prompt_attention_mask = prompt_attention_mask.to(transformer_dtype)
|
604
|
+
if pooled_prompt_embeds is not None:
|
605
|
+
pooled_prompt_embeds = pooled_prompt_embeds.to(transformer_dtype)
|
606
|
+
|
607
|
+
# 4. Prepare timesteps
|
608
|
+
sigmas = np.linspace(1.0, 0.0, num_inference_steps + 1)[:-1] if sigmas is None else sigmas
|
609
|
+
timesteps, num_inference_steps = retrieve_timesteps(
|
610
|
+
self.scheduler,
|
611
|
+
num_inference_steps,
|
612
|
+
device,
|
613
|
+
sigmas=sigmas,
|
614
|
+
)
|
615
|
+
|
616
|
+
# 5. Prepare latent variables
|
617
|
+
num_channels_latents = self.transformer.config.in_channels
|
618
|
+
num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1
|
619
|
+
latents = self.prepare_latents(
|
620
|
+
batch_size * num_videos_per_prompt,
|
621
|
+
num_channels_latents,
|
622
|
+
height,
|
623
|
+
width,
|
624
|
+
num_latent_frames,
|
625
|
+
torch.float32,
|
626
|
+
device,
|
627
|
+
generator,
|
628
|
+
latents,
|
629
|
+
)
|
630
|
+
|
631
|
+
# 6. Prepare guidance condition
|
632
|
+
guidance = torch.tensor([guidance_scale] * latents.shape[0], dtype=transformer_dtype, device=device) * 1000.0
|
633
|
+
|
634
|
+
# 7. Denoising loop
|
635
|
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
636
|
+
self._num_timesteps = len(timesteps)
|
637
|
+
|
638
|
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
639
|
+
for i, t in enumerate(timesteps):
|
640
|
+
if self.interrupt:
|
641
|
+
continue
|
642
|
+
|
643
|
+
latent_model_input = latents.to(transformer_dtype)
|
644
|
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
645
|
+
timestep = t.expand(latents.shape[0]).to(latents.dtype)
|
646
|
+
|
647
|
+
noise_pred = self.transformer(
|
648
|
+
hidden_states=latent_model_input,
|
649
|
+
timestep=timestep,
|
650
|
+
encoder_hidden_states=prompt_embeds,
|
651
|
+
encoder_attention_mask=prompt_attention_mask,
|
652
|
+
pooled_projections=pooled_prompt_embeds,
|
653
|
+
guidance=guidance,
|
654
|
+
attention_kwargs=attention_kwargs,
|
655
|
+
return_dict=False,
|
656
|
+
)[0]
|
657
|
+
|
658
|
+
# compute the previous noisy sample x_t -> x_t-1
|
659
|
+
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
|
660
|
+
|
661
|
+
if callback_on_step_end is not None:
|
662
|
+
callback_kwargs = {}
|
663
|
+
for k in callback_on_step_end_tensor_inputs:
|
664
|
+
callback_kwargs[k] = locals()[k]
|
665
|
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
666
|
+
|
667
|
+
latents = callback_outputs.pop("latents", latents)
|
668
|
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
669
|
+
|
670
|
+
# call the callback, if provided
|
671
|
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
672
|
+
progress_bar.update()
|
673
|
+
|
674
|
+
if not output_type == "latent":
|
675
|
+
latents = latents.to(self.vae.dtype) / self.vae.config.scaling_factor
|
676
|
+
video = self.vae.decode(latents, return_dict=False)[0]
|
677
|
+
video = self.video_processor.postprocess_video(video, output_type=output_type)
|
678
|
+
else:
|
679
|
+
video = latents
|
680
|
+
|
681
|
+
# Offload all models
|
682
|
+
self.maybe_free_model_hooks()
|
683
|
+
|
684
|
+
if not return_dict:
|
685
|
+
return (video,)
|
686
|
+
|
687
|
+
return HunyuanVideoPipelineOutput(frames=video)
|