diffusers 0.34.0__py3-none-any.whl → 0.35.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- diffusers/__init__.py +98 -1
- diffusers/callbacks.py +35 -0
- diffusers/commands/custom_blocks.py +134 -0
- diffusers/commands/diffusers_cli.py +2 -0
- diffusers/commands/fp16_safetensors.py +1 -1
- diffusers/configuration_utils.py +11 -2
- diffusers/dependency_versions_table.py +3 -3
- diffusers/guiders/__init__.py +41 -0
- diffusers/guiders/adaptive_projected_guidance.py +188 -0
- diffusers/guiders/auto_guidance.py +190 -0
- diffusers/guiders/classifier_free_guidance.py +141 -0
- diffusers/guiders/classifier_free_zero_star_guidance.py +152 -0
- diffusers/guiders/frequency_decoupled_guidance.py +327 -0
- diffusers/guiders/guider_utils.py +309 -0
- diffusers/guiders/perturbed_attention_guidance.py +271 -0
- diffusers/guiders/skip_layer_guidance.py +262 -0
- diffusers/guiders/smoothed_energy_guidance.py +251 -0
- diffusers/guiders/tangential_classifier_free_guidance.py +143 -0
- diffusers/hooks/__init__.py +17 -0
- diffusers/hooks/_common.py +56 -0
- diffusers/hooks/_helpers.py +293 -0
- diffusers/hooks/faster_cache.py +7 -6
- diffusers/hooks/first_block_cache.py +259 -0
- diffusers/hooks/group_offloading.py +292 -286
- diffusers/hooks/hooks.py +56 -1
- diffusers/hooks/layer_skip.py +263 -0
- diffusers/hooks/layerwise_casting.py +2 -7
- diffusers/hooks/pyramid_attention_broadcast.py +14 -11
- diffusers/hooks/smoothed_energy_guidance_utils.py +167 -0
- diffusers/hooks/utils.py +43 -0
- diffusers/loaders/__init__.py +6 -0
- diffusers/loaders/ip_adapter.py +255 -4
- diffusers/loaders/lora_base.py +63 -30
- diffusers/loaders/lora_conversion_utils.py +434 -53
- diffusers/loaders/lora_pipeline.py +834 -37
- diffusers/loaders/peft.py +28 -5
- diffusers/loaders/single_file_model.py +44 -11
- diffusers/loaders/single_file_utils.py +170 -2
- diffusers/loaders/transformer_flux.py +9 -10
- diffusers/loaders/transformer_sd3.py +6 -1
- diffusers/loaders/unet.py +22 -5
- diffusers/loaders/unet_loader_utils.py +5 -2
- diffusers/models/__init__.py +8 -0
- diffusers/models/attention.py +484 -3
- diffusers/models/attention_dispatch.py +1218 -0
- diffusers/models/attention_processor.py +105 -663
- diffusers/models/auto_model.py +2 -2
- diffusers/models/autoencoders/__init__.py +1 -0
- diffusers/models/autoencoders/autoencoder_dc.py +14 -1
- diffusers/models/autoencoders/autoencoder_kl.py +1 -1
- diffusers/models/autoencoders/autoencoder_kl_cosmos.py +3 -1
- diffusers/models/autoencoders/autoencoder_kl_qwenimage.py +1070 -0
- diffusers/models/autoencoders/autoencoder_kl_wan.py +370 -40
- diffusers/models/cache_utils.py +31 -9
- diffusers/models/controlnets/controlnet_flux.py +5 -5
- diffusers/models/controlnets/controlnet_union.py +4 -4
- diffusers/models/embeddings.py +26 -34
- diffusers/models/model_loading_utils.py +233 -1
- diffusers/models/modeling_flax_utils.py +1 -2
- diffusers/models/modeling_utils.py +159 -94
- diffusers/models/transformers/__init__.py +2 -0
- diffusers/models/transformers/transformer_chroma.py +16 -117
- diffusers/models/transformers/transformer_cogview4.py +36 -2
- diffusers/models/transformers/transformer_cosmos.py +11 -4
- diffusers/models/transformers/transformer_flux.py +372 -132
- diffusers/models/transformers/transformer_hunyuan_video.py +6 -0
- diffusers/models/transformers/transformer_ltx.py +104 -23
- diffusers/models/transformers/transformer_qwenimage.py +645 -0
- diffusers/models/transformers/transformer_skyreels_v2.py +607 -0
- diffusers/models/transformers/transformer_wan.py +298 -85
- diffusers/models/transformers/transformer_wan_vace.py +15 -21
- diffusers/models/unets/unet_2d_condition.py +2 -1
- diffusers/modular_pipelines/__init__.py +83 -0
- diffusers/modular_pipelines/components_manager.py +1068 -0
- diffusers/modular_pipelines/flux/__init__.py +66 -0
- diffusers/modular_pipelines/flux/before_denoise.py +689 -0
- diffusers/modular_pipelines/flux/decoders.py +109 -0
- diffusers/modular_pipelines/flux/denoise.py +227 -0
- diffusers/modular_pipelines/flux/encoders.py +412 -0
- diffusers/modular_pipelines/flux/modular_blocks.py +181 -0
- diffusers/modular_pipelines/flux/modular_pipeline.py +59 -0
- diffusers/modular_pipelines/modular_pipeline.py +2446 -0
- diffusers/modular_pipelines/modular_pipeline_utils.py +672 -0
- diffusers/modular_pipelines/node_utils.py +665 -0
- diffusers/modular_pipelines/stable_diffusion_xl/__init__.py +77 -0
- diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py +1874 -0
- diffusers/modular_pipelines/stable_diffusion_xl/decoders.py +208 -0
- diffusers/modular_pipelines/stable_diffusion_xl/denoise.py +771 -0
- diffusers/modular_pipelines/stable_diffusion_xl/encoders.py +887 -0
- diffusers/modular_pipelines/stable_diffusion_xl/modular_blocks.py +380 -0
- diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py +365 -0
- diffusers/modular_pipelines/wan/__init__.py +66 -0
- diffusers/modular_pipelines/wan/before_denoise.py +365 -0
- diffusers/modular_pipelines/wan/decoders.py +105 -0
- diffusers/modular_pipelines/wan/denoise.py +261 -0
- diffusers/modular_pipelines/wan/encoders.py +242 -0
- diffusers/modular_pipelines/wan/modular_blocks.py +144 -0
- diffusers/modular_pipelines/wan/modular_pipeline.py +90 -0
- diffusers/pipelines/__init__.py +31 -0
- diffusers/pipelines/audioldm2/pipeline_audioldm2.py +2 -3
- diffusers/pipelines/auto_pipeline.py +17 -13
- diffusers/pipelines/chroma/pipeline_chroma.py +5 -5
- diffusers/pipelines/chroma/pipeline_chroma_img2img.py +5 -5
- diffusers/pipelines/cogvideo/pipeline_cogvideox.py +9 -8
- diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +9 -8
- diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +10 -9
- diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +9 -8
- diffusers/pipelines/cogview4/pipeline_cogview4.py +16 -15
- diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +3 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +212 -93
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +7 -3
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +194 -92
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +1 -1
- diffusers/pipelines/dit/pipeline_dit.py +3 -1
- diffusers/pipelines/flux/__init__.py +4 -0
- diffusers/pipelines/flux/pipeline_flux.py +34 -26
- diffusers/pipelines/flux/pipeline_flux_control.py +8 -8
- diffusers/pipelines/flux/pipeline_flux_control_img2img.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_controlnet.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_fill.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_img2img.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_inpaint.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_kontext.py +1134 -0
- diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py +1460 -0
- diffusers/pipelines/flux/pipeline_flux_prior_redux.py +1 -1
- diffusers/pipelines/flux/pipeline_output.py +6 -4
- diffusers/pipelines/hidream_image/pipeline_hidream_image.py +5 -5
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +25 -24
- diffusers/pipelines/ltx/pipeline_ltx.py +13 -12
- diffusers/pipelines/ltx/pipeline_ltx_condition.py +10 -9
- diffusers/pipelines/ltx/pipeline_ltx_image2video.py +13 -12
- diffusers/pipelines/mochi/pipeline_mochi.py +9 -8
- diffusers/pipelines/pipeline_flax_utils.py +2 -2
- diffusers/pipelines/pipeline_loading_utils.py +24 -2
- diffusers/pipelines/pipeline_utils.py +22 -15
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +3 -1
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +20 -0
- diffusers/pipelines/qwenimage/__init__.py +55 -0
- diffusers/pipelines/qwenimage/pipeline_output.py +21 -0
- diffusers/pipelines/qwenimage/pipeline_qwenimage.py +726 -0
- diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +849 -0
- diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py +829 -0
- diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py +1015 -0
- diffusers/pipelines/sana/pipeline_sana_sprint.py +5 -5
- diffusers/pipelines/skyreels_v2/__init__.py +59 -0
- diffusers/pipelines/skyreels_v2/pipeline_output.py +20 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2.py +610 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing.py +978 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_i2v.py +1059 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_v2v.py +1063 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_i2v.py +745 -0
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +2 -1
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +6 -5
- diffusers/pipelines/wan/pipeline_wan.py +78 -20
- diffusers/pipelines/wan/pipeline_wan_i2v.py +112 -32
- diffusers/pipelines/wan/pipeline_wan_vace.py +1 -2
- diffusers/quantizers/__init__.py +1 -177
- diffusers/quantizers/base.py +11 -0
- diffusers/quantizers/gguf/utils.py +92 -3
- diffusers/quantizers/pipe_quant_config.py +202 -0
- diffusers/quantizers/torchao/torchao_quantizer.py +26 -0
- diffusers/schedulers/scheduling_deis_multistep.py +8 -1
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +6 -0
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +6 -0
- diffusers/schedulers/scheduling_scm.py +0 -1
- diffusers/schedulers/scheduling_unipc_multistep.py +10 -1
- diffusers/schedulers/scheduling_utils.py +2 -2
- diffusers/schedulers/scheduling_utils_flax.py +1 -1
- diffusers/training_utils.py +78 -0
- diffusers/utils/__init__.py +10 -0
- diffusers/utils/constants.py +4 -0
- diffusers/utils/dummy_pt_objects.py +312 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +255 -0
- diffusers/utils/dynamic_modules_utils.py +84 -25
- diffusers/utils/hub_utils.py +33 -17
- diffusers/utils/import_utils.py +70 -0
- diffusers/utils/peft_utils.py +11 -8
- diffusers/utils/testing_utils.py +136 -10
- diffusers/utils/torch_utils.py +18 -0
- {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/METADATA +6 -6
- {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/RECORD +191 -127
- {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/LICENSE +0 -0
- {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/WHEEL +0 -0
- {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/entry_points.txt +0 -0
- {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1015 @@
|
|
1
|
+
import inspect
|
2
|
+
from typing import Any, Callable, Dict, List, Optional, Union
|
3
|
+
|
4
|
+
import numpy as np
|
5
|
+
import PIL.Image
|
6
|
+
import torch
|
7
|
+
from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer
|
8
|
+
|
9
|
+
from ...image_processor import PipelineImageInput, VaeImageProcessor
|
10
|
+
from ...loaders import QwenImageLoraLoaderMixin
|
11
|
+
from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel
|
12
|
+
from ...schedulers import FlowMatchEulerDiscreteScheduler
|
13
|
+
from ...utils import is_torch_xla_available, logging, replace_example_docstring
|
14
|
+
from ...utils.torch_utils import randn_tensor
|
15
|
+
from ..pipeline_utils import DiffusionPipeline
|
16
|
+
from .pipeline_output import QwenImagePipelineOutput
|
17
|
+
|
18
|
+
|
19
|
+
if is_torch_xla_available():
|
20
|
+
import torch_xla.core.xla_model as xm
|
21
|
+
|
22
|
+
XLA_AVAILABLE = True
|
23
|
+
else:
|
24
|
+
XLA_AVAILABLE = False
|
25
|
+
|
26
|
+
|
27
|
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
28
|
+
|
29
|
+
EXAMPLE_DOC_STRING = """
|
30
|
+
Examples:
|
31
|
+
```py
|
32
|
+
>>> import torch
|
33
|
+
>>> from diffusers import QwenImageInpaintPipeline
|
34
|
+
>>> from diffusers.utils import load_image
|
35
|
+
|
36
|
+
>>> pipe = QwenImageInpaintPipeline.from_pretrained("Qwen/Qwen-Image", torch_dtype=torch.bfloat16)
|
37
|
+
>>> pipe.to("cuda")
|
38
|
+
>>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
|
39
|
+
>>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
40
|
+
>>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
41
|
+
>>> source = load_image(img_url)
|
42
|
+
>>> mask = load_image(mask_url)
|
43
|
+
>>> image = pipe(prompt=prompt, negative_prompt=" ", image=source, mask_image=mask, strength=0.85).images[0]
|
44
|
+
>>> image.save("qwenimage_inpainting.png")
|
45
|
+
```
|
46
|
+
"""
|
47
|
+
|
48
|
+
|
49
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
50
|
+
def retrieve_latents(
|
51
|
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
52
|
+
):
|
53
|
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
54
|
+
return encoder_output.latent_dist.sample(generator)
|
55
|
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
56
|
+
return encoder_output.latent_dist.mode()
|
57
|
+
elif hasattr(encoder_output, "latents"):
|
58
|
+
return encoder_output.latents
|
59
|
+
else:
|
60
|
+
raise AttributeError("Could not access latents of provided encoder_output")
|
61
|
+
|
62
|
+
|
63
|
+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift
|
64
|
+
def calculate_shift(
|
65
|
+
image_seq_len,
|
66
|
+
base_seq_len: int = 256,
|
67
|
+
max_seq_len: int = 4096,
|
68
|
+
base_shift: float = 0.5,
|
69
|
+
max_shift: float = 1.15,
|
70
|
+
):
|
71
|
+
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
|
72
|
+
b = base_shift - m * base_seq_len
|
73
|
+
mu = image_seq_len * m + b
|
74
|
+
return mu
|
75
|
+
|
76
|
+
|
77
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
78
|
+
def retrieve_timesteps(
|
79
|
+
scheduler,
|
80
|
+
num_inference_steps: Optional[int] = None,
|
81
|
+
device: Optional[Union[str, torch.device]] = None,
|
82
|
+
timesteps: Optional[List[int]] = None,
|
83
|
+
sigmas: Optional[List[float]] = None,
|
84
|
+
**kwargs,
|
85
|
+
):
|
86
|
+
r"""
|
87
|
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
88
|
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
89
|
+
|
90
|
+
Args:
|
91
|
+
scheduler (`SchedulerMixin`):
|
92
|
+
The scheduler to get timesteps from.
|
93
|
+
num_inference_steps (`int`):
|
94
|
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
95
|
+
must be `None`.
|
96
|
+
device (`str` or `torch.device`, *optional*):
|
97
|
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
98
|
+
timesteps (`List[int]`, *optional*):
|
99
|
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
100
|
+
`num_inference_steps` and `sigmas` must be `None`.
|
101
|
+
sigmas (`List[float]`, *optional*):
|
102
|
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
103
|
+
`num_inference_steps` and `timesteps` must be `None`.
|
104
|
+
|
105
|
+
Returns:
|
106
|
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
107
|
+
second element is the number of inference steps.
|
108
|
+
"""
|
109
|
+
if timesteps is not None and sigmas is not None:
|
110
|
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
111
|
+
if timesteps is not None:
|
112
|
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
113
|
+
if not accepts_timesteps:
|
114
|
+
raise ValueError(
|
115
|
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
116
|
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
117
|
+
)
|
118
|
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
119
|
+
timesteps = scheduler.timesteps
|
120
|
+
num_inference_steps = len(timesteps)
|
121
|
+
elif sigmas is not None:
|
122
|
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
123
|
+
if not accept_sigmas:
|
124
|
+
raise ValueError(
|
125
|
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
126
|
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
127
|
+
)
|
128
|
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
129
|
+
timesteps = scheduler.timesteps
|
130
|
+
num_inference_steps = len(timesteps)
|
131
|
+
else:
|
132
|
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
133
|
+
timesteps = scheduler.timesteps
|
134
|
+
return timesteps, num_inference_steps
|
135
|
+
|
136
|
+
|
137
|
+
class QwenImageInpaintPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin):
|
138
|
+
r"""
|
139
|
+
The QwenImage pipeline for text-to-image generation.
|
140
|
+
|
141
|
+
Args:
|
142
|
+
transformer ([`QwenImageTransformer2DModel`]):
|
143
|
+
Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
|
144
|
+
scheduler ([`FlowMatchEulerDiscreteScheduler`]):
|
145
|
+
A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
|
146
|
+
vae ([`AutoencoderKL`]):
|
147
|
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
148
|
+
text_encoder ([`Qwen2.5-VL-7B-Instruct`]):
|
149
|
+
[Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct), specifically the
|
150
|
+
[Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) variant.
|
151
|
+
tokenizer (`QwenTokenizer`):
|
152
|
+
Tokenizer of class
|
153
|
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
|
154
|
+
"""
|
155
|
+
|
156
|
+
model_cpu_offload_seq = "text_encoder->transformer->vae"
|
157
|
+
_callback_tensor_inputs = ["latents", "prompt_embeds"]
|
158
|
+
|
159
|
+
def __init__(
|
160
|
+
self,
|
161
|
+
scheduler: FlowMatchEulerDiscreteScheduler,
|
162
|
+
vae: AutoencoderKLQwenImage,
|
163
|
+
text_encoder: Qwen2_5_VLForConditionalGeneration,
|
164
|
+
tokenizer: Qwen2Tokenizer,
|
165
|
+
transformer: QwenImageTransformer2DModel,
|
166
|
+
):
|
167
|
+
super().__init__()
|
168
|
+
|
169
|
+
self.register_modules(
|
170
|
+
vae=vae,
|
171
|
+
text_encoder=text_encoder,
|
172
|
+
tokenizer=tokenizer,
|
173
|
+
transformer=transformer,
|
174
|
+
scheduler=scheduler,
|
175
|
+
)
|
176
|
+
self.vae_scale_factor = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8
|
177
|
+
# QwenImage latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible
|
178
|
+
# by the patch size. So the vae scale factor is multiplied by the patch size to account for this
|
179
|
+
self.latent_channels = self.vae.config.z_dim if getattr(self, "vae", None) else 16
|
180
|
+
self.image_processor = VaeImageProcessor(
|
181
|
+
vae_scale_factor=self.vae_scale_factor * 2, vae_latent_channels=self.latent_channels
|
182
|
+
)
|
183
|
+
self.mask_processor = VaeImageProcessor(
|
184
|
+
vae_scale_factor=self.vae_scale_factor * 2,
|
185
|
+
vae_latent_channels=self.latent_channels,
|
186
|
+
do_normalize=False,
|
187
|
+
do_binarize=True,
|
188
|
+
do_convert_grayscale=True,
|
189
|
+
)
|
190
|
+
self.tokenizer_max_length = 1024
|
191
|
+
self.prompt_template_encode = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n"
|
192
|
+
self.prompt_template_encode_start_idx = 34
|
193
|
+
self.default_sample_size = 128
|
194
|
+
|
195
|
+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._extract_masked_hidden
|
196
|
+
def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor):
|
197
|
+
bool_mask = mask.bool()
|
198
|
+
valid_lengths = bool_mask.sum(dim=1)
|
199
|
+
selected = hidden_states[bool_mask]
|
200
|
+
split_result = torch.split(selected, valid_lengths.tolist(), dim=0)
|
201
|
+
|
202
|
+
return split_result
|
203
|
+
|
204
|
+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._get_qwen_prompt_embeds
|
205
|
+
def _get_qwen_prompt_embeds(
|
206
|
+
self,
|
207
|
+
prompt: Union[str, List[str]] = None,
|
208
|
+
device: Optional[torch.device] = None,
|
209
|
+
dtype: Optional[torch.dtype] = None,
|
210
|
+
):
|
211
|
+
device = device or self._execution_device
|
212
|
+
dtype = dtype or self.text_encoder.dtype
|
213
|
+
|
214
|
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
215
|
+
|
216
|
+
template = self.prompt_template_encode
|
217
|
+
drop_idx = self.prompt_template_encode_start_idx
|
218
|
+
txt = [template.format(e) for e in prompt]
|
219
|
+
txt_tokens = self.tokenizer(
|
220
|
+
txt, max_length=self.tokenizer_max_length + drop_idx, padding=True, truncation=True, return_tensors="pt"
|
221
|
+
).to(device)
|
222
|
+
encoder_hidden_states = self.text_encoder(
|
223
|
+
input_ids=txt_tokens.input_ids,
|
224
|
+
attention_mask=txt_tokens.attention_mask,
|
225
|
+
output_hidden_states=True,
|
226
|
+
)
|
227
|
+
hidden_states = encoder_hidden_states.hidden_states[-1]
|
228
|
+
split_hidden_states = self._extract_masked_hidden(hidden_states, txt_tokens.attention_mask)
|
229
|
+
split_hidden_states = [e[drop_idx:] for e in split_hidden_states]
|
230
|
+
attn_mask_list = [torch.ones(e.size(0), dtype=torch.long, device=e.device) for e in split_hidden_states]
|
231
|
+
max_seq_len = max([e.size(0) for e in split_hidden_states])
|
232
|
+
prompt_embeds = torch.stack(
|
233
|
+
[torch.cat([u, u.new_zeros(max_seq_len - u.size(0), u.size(1))]) for u in split_hidden_states]
|
234
|
+
)
|
235
|
+
encoder_attention_mask = torch.stack(
|
236
|
+
[torch.cat([u, u.new_zeros(max_seq_len - u.size(0))]) for u in attn_mask_list]
|
237
|
+
)
|
238
|
+
|
239
|
+
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
|
240
|
+
|
241
|
+
return prompt_embeds, encoder_attention_mask
|
242
|
+
|
243
|
+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_img2img.QwenImageImg2ImgPipeline._encode_vae_image
|
244
|
+
def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
|
245
|
+
if isinstance(generator, list):
|
246
|
+
image_latents = [
|
247
|
+
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
|
248
|
+
for i in range(image.shape[0])
|
249
|
+
]
|
250
|
+
image_latents = torch.cat(image_latents, dim=0)
|
251
|
+
else:
|
252
|
+
image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
|
253
|
+
|
254
|
+
latents_mean = (
|
255
|
+
torch.tensor(self.vae.config.latents_mean)
|
256
|
+
.view(1, self.vae.config.z_dim, 1, 1, 1)
|
257
|
+
.to(image_latents.device, image_latents.dtype)
|
258
|
+
)
|
259
|
+
latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to(
|
260
|
+
image_latents.device, image_latents.dtype
|
261
|
+
)
|
262
|
+
|
263
|
+
image_latents = (image_latents - latents_mean) * latents_std
|
264
|
+
|
265
|
+
return image_latents
|
266
|
+
|
267
|
+
# Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps
|
268
|
+
def get_timesteps(self, num_inference_steps, strength, device):
|
269
|
+
# get the original timestep using init_timestep
|
270
|
+
init_timestep = min(num_inference_steps * strength, num_inference_steps)
|
271
|
+
|
272
|
+
t_start = int(max(num_inference_steps - init_timestep, 0))
|
273
|
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
274
|
+
if hasattr(self.scheduler, "set_begin_index"):
|
275
|
+
self.scheduler.set_begin_index(t_start * self.scheduler.order)
|
276
|
+
|
277
|
+
return timesteps, num_inference_steps - t_start
|
278
|
+
|
279
|
+
# Copied fromCopied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline.encode_prompt
|
280
|
+
def encode_prompt(
|
281
|
+
self,
|
282
|
+
prompt: Union[str, List[str]],
|
283
|
+
device: Optional[torch.device] = None,
|
284
|
+
num_images_per_prompt: int = 1,
|
285
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
286
|
+
prompt_embeds_mask: Optional[torch.Tensor] = None,
|
287
|
+
max_sequence_length: int = 1024,
|
288
|
+
):
|
289
|
+
r"""
|
290
|
+
|
291
|
+
Args:
|
292
|
+
prompt (`str` or `List[str]`, *optional*):
|
293
|
+
prompt to be encoded
|
294
|
+
device: (`torch.device`):
|
295
|
+
torch device
|
296
|
+
num_images_per_prompt (`int`):
|
297
|
+
number of images that should be generated per prompt
|
298
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
299
|
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
300
|
+
provided, text embeddings will be generated from `prompt` input argument.
|
301
|
+
"""
|
302
|
+
device = device or self._execution_device
|
303
|
+
|
304
|
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
305
|
+
batch_size = len(prompt) if prompt_embeds is None else prompt_embeds.shape[0]
|
306
|
+
|
307
|
+
if prompt_embeds is None:
|
308
|
+
prompt_embeds, prompt_embeds_mask = self._get_qwen_prompt_embeds(prompt, device)
|
309
|
+
|
310
|
+
prompt_embeds = prompt_embeds[:, :max_sequence_length]
|
311
|
+
prompt_embeds_mask = prompt_embeds_mask[:, :max_sequence_length]
|
312
|
+
|
313
|
+
_, seq_len, _ = prompt_embeds.shape
|
314
|
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
315
|
+
prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
316
|
+
prompt_embeds_mask = prompt_embeds_mask.repeat(1, num_images_per_prompt, 1)
|
317
|
+
prompt_embeds_mask = prompt_embeds_mask.view(batch_size * num_images_per_prompt, seq_len)
|
318
|
+
|
319
|
+
return prompt_embeds, prompt_embeds_mask
|
320
|
+
|
321
|
+
def check_inputs(
|
322
|
+
self,
|
323
|
+
prompt,
|
324
|
+
image,
|
325
|
+
mask_image,
|
326
|
+
strength,
|
327
|
+
height,
|
328
|
+
width,
|
329
|
+
output_type,
|
330
|
+
negative_prompt=None,
|
331
|
+
prompt_embeds=None,
|
332
|
+
negative_prompt_embeds=None,
|
333
|
+
prompt_embeds_mask=None,
|
334
|
+
negative_prompt_embeds_mask=None,
|
335
|
+
callback_on_step_end_tensor_inputs=None,
|
336
|
+
padding_mask_crop=None,
|
337
|
+
max_sequence_length=None,
|
338
|
+
):
|
339
|
+
if strength < 0 or strength > 1:
|
340
|
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
341
|
+
|
342
|
+
if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0:
|
343
|
+
logger.warning(
|
344
|
+
f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly"
|
345
|
+
)
|
346
|
+
|
347
|
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
348
|
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
349
|
+
):
|
350
|
+
raise ValueError(
|
351
|
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
352
|
+
)
|
353
|
+
|
354
|
+
if prompt is not None and prompt_embeds is not None:
|
355
|
+
raise ValueError(
|
356
|
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
357
|
+
" only forward one of the two."
|
358
|
+
)
|
359
|
+
elif prompt is None and prompt_embeds is None:
|
360
|
+
raise ValueError(
|
361
|
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
362
|
+
)
|
363
|
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
364
|
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
365
|
+
|
366
|
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
367
|
+
raise ValueError(
|
368
|
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
369
|
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
370
|
+
)
|
371
|
+
|
372
|
+
if prompt_embeds is not None and prompt_embeds_mask is None:
|
373
|
+
raise ValueError(
|
374
|
+
"If `prompt_embeds` are provided, `prompt_embeds_mask` also have to be passed. Make sure to generate `prompt_embeds_mask` from the same text encoder that was used to generate `prompt_embeds`."
|
375
|
+
)
|
376
|
+
if negative_prompt_embeds is not None and negative_prompt_embeds_mask is None:
|
377
|
+
raise ValueError(
|
378
|
+
"If `negative_prompt_embeds` are provided, `negative_prompt_embeds_mask` also have to be passed. Make sure to generate `negative_prompt_embeds_mask` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
379
|
+
)
|
380
|
+
if padding_mask_crop is not None:
|
381
|
+
if not isinstance(image, PIL.Image.Image):
|
382
|
+
raise ValueError(
|
383
|
+
f"The image should be a PIL image when inpainting mask crop, but is of type {type(image)}."
|
384
|
+
)
|
385
|
+
if not isinstance(mask_image, PIL.Image.Image):
|
386
|
+
raise ValueError(
|
387
|
+
f"The mask image should be a PIL image when inpainting mask crop, but is of type"
|
388
|
+
f" {type(mask_image)}."
|
389
|
+
)
|
390
|
+
if output_type != "pil":
|
391
|
+
raise ValueError(f"The output type should be PIL when inpainting mask crop, but is {output_type}.")
|
392
|
+
|
393
|
+
if max_sequence_length is not None and max_sequence_length > 1024:
|
394
|
+
raise ValueError(f"`max_sequence_length` cannot be greater than 1024 but is {max_sequence_length}")
|
395
|
+
|
396
|
+
@staticmethod
|
397
|
+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._pack_latents
|
398
|
+
def _pack_latents(latents, batch_size, num_channels_latents, height, width):
|
399
|
+
latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
|
400
|
+
latents = latents.permute(0, 2, 4, 1, 3, 5)
|
401
|
+
latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4)
|
402
|
+
|
403
|
+
return latents
|
404
|
+
|
405
|
+
@staticmethod
|
406
|
+
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._unpack_latents
|
407
|
+
def _unpack_latents(latents, height, width, vae_scale_factor):
|
408
|
+
batch_size, num_patches, channels = latents.shape
|
409
|
+
|
410
|
+
# VAE applies 8x compression on images but we must also account for packing which requires
|
411
|
+
# latent height and width to be divisible by 2.
|
412
|
+
height = 2 * (int(height) // (vae_scale_factor * 2))
|
413
|
+
width = 2 * (int(width) // (vae_scale_factor * 2))
|
414
|
+
|
415
|
+
latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2)
|
416
|
+
latents = latents.permute(0, 3, 1, 4, 2, 5)
|
417
|
+
|
418
|
+
latents = latents.reshape(batch_size, channels // (2 * 2), 1, height, width)
|
419
|
+
|
420
|
+
return latents
|
421
|
+
|
422
|
+
def enable_vae_slicing(self):
|
423
|
+
r"""
|
424
|
+
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
425
|
+
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
426
|
+
"""
|
427
|
+
self.vae.enable_slicing()
|
428
|
+
|
429
|
+
def disable_vae_slicing(self):
|
430
|
+
r"""
|
431
|
+
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
432
|
+
computing decoding in one step.
|
433
|
+
"""
|
434
|
+
self.vae.disable_slicing()
|
435
|
+
|
436
|
+
def enable_vae_tiling(self):
|
437
|
+
r"""
|
438
|
+
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
439
|
+
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
440
|
+
processing larger images.
|
441
|
+
"""
|
442
|
+
self.vae.enable_tiling()
|
443
|
+
|
444
|
+
def disable_vae_tiling(self):
|
445
|
+
r"""
|
446
|
+
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
|
447
|
+
computing decoding in one step.
|
448
|
+
"""
|
449
|
+
self.vae.disable_tiling()
|
450
|
+
|
451
|
+
def prepare_latents(
|
452
|
+
self,
|
453
|
+
image,
|
454
|
+
timestep,
|
455
|
+
batch_size,
|
456
|
+
num_channels_latents,
|
457
|
+
height,
|
458
|
+
width,
|
459
|
+
dtype,
|
460
|
+
device,
|
461
|
+
generator,
|
462
|
+
latents=None,
|
463
|
+
):
|
464
|
+
if isinstance(generator, list) and len(generator) != batch_size:
|
465
|
+
raise ValueError(
|
466
|
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
467
|
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
468
|
+
)
|
469
|
+
# VAE applies 8x compression on images but we must also account for packing which requires
|
470
|
+
# latent height and width to be divisible by 2.
|
471
|
+
height = 2 * (int(height) // (self.vae_scale_factor * 2))
|
472
|
+
width = 2 * (int(width) // (self.vae_scale_factor * 2))
|
473
|
+
|
474
|
+
shape = (batch_size, 1, num_channels_latents, height, width)
|
475
|
+
|
476
|
+
# If image is [B,C,H,W] -> add T=1. If it's already [B,C,T,H,W], leave it.
|
477
|
+
if image.dim() == 4:
|
478
|
+
image = image.unsqueeze(2)
|
479
|
+
elif image.dim() != 5:
|
480
|
+
raise ValueError(f"Expected image dims 4 or 5, got {image.dim()}.")
|
481
|
+
|
482
|
+
if latents is not None:
|
483
|
+
return latents.to(device=device, dtype=dtype)
|
484
|
+
|
485
|
+
image = image.to(device=device, dtype=dtype)
|
486
|
+
if image.shape[1] != self.latent_channels:
|
487
|
+
image_latents = self._encode_vae_image(image=image, generator=generator) # [B,z,1,H',W']
|
488
|
+
else:
|
489
|
+
image_latents = image
|
490
|
+
if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:
|
491
|
+
# expand init_latents for batch_size
|
492
|
+
additional_image_per_prompt = batch_size // image_latents.shape[0]
|
493
|
+
image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0)
|
494
|
+
elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0:
|
495
|
+
raise ValueError(
|
496
|
+
f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts."
|
497
|
+
)
|
498
|
+
else:
|
499
|
+
image_latents = torch.cat([image_latents], dim=0)
|
500
|
+
|
501
|
+
image_latents = image_latents.transpose(1, 2) # [B,1,z,H',W']
|
502
|
+
|
503
|
+
if latents is None:
|
504
|
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
505
|
+
latents = self.scheduler.scale_noise(image_latents, timestep, noise)
|
506
|
+
else:
|
507
|
+
noise = latents.to(device)
|
508
|
+
latents = noise
|
509
|
+
|
510
|
+
noise = self._pack_latents(noise, batch_size, num_channels_latents, height, width)
|
511
|
+
image_latents = self._pack_latents(image_latents, batch_size, num_channels_latents, height, width)
|
512
|
+
latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width)
|
513
|
+
|
514
|
+
return latents, noise, image_latents
|
515
|
+
|
516
|
+
def prepare_mask_latents(
|
517
|
+
self,
|
518
|
+
mask,
|
519
|
+
masked_image,
|
520
|
+
batch_size,
|
521
|
+
num_channels_latents,
|
522
|
+
num_images_per_prompt,
|
523
|
+
height,
|
524
|
+
width,
|
525
|
+
dtype,
|
526
|
+
device,
|
527
|
+
generator,
|
528
|
+
):
|
529
|
+
# VAE applies 8x compression on images but we must also account for packing which requires
|
530
|
+
# latent height and width to be divisible by 2.
|
531
|
+
height = 2 * (int(height) // (self.vae_scale_factor * 2))
|
532
|
+
width = 2 * (int(width) // (self.vae_scale_factor * 2))
|
533
|
+
# resize the mask to latents shape as we concatenate the mask to the latents
|
534
|
+
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
|
535
|
+
# and half precision
|
536
|
+
mask = torch.nn.functional.interpolate(mask, size=(height, width))
|
537
|
+
mask = mask.to(device=device, dtype=dtype)
|
538
|
+
|
539
|
+
batch_size = batch_size * num_images_per_prompt
|
540
|
+
|
541
|
+
if masked_image.dim() == 4:
|
542
|
+
masked_image = masked_image.unsqueeze(2)
|
543
|
+
elif masked_image.dim() != 5:
|
544
|
+
raise ValueError(f"Expected image dims 4 or 5, got {masked_image.dim()}.")
|
545
|
+
|
546
|
+
masked_image = masked_image.to(device=device, dtype=dtype)
|
547
|
+
|
548
|
+
if masked_image.shape[1] == self.latent_channels:
|
549
|
+
masked_image_latents = masked_image
|
550
|
+
else:
|
551
|
+
masked_image_latents = self._encode_vae_image(image=masked_image, generator=generator)
|
552
|
+
|
553
|
+
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
|
554
|
+
if mask.shape[0] < batch_size:
|
555
|
+
if not batch_size % mask.shape[0] == 0:
|
556
|
+
raise ValueError(
|
557
|
+
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
|
558
|
+
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
|
559
|
+
" of masks that you pass is divisible by the total requested batch size."
|
560
|
+
)
|
561
|
+
mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
|
562
|
+
if masked_image_latents.shape[0] < batch_size:
|
563
|
+
if not batch_size % masked_image_latents.shape[0] == 0:
|
564
|
+
raise ValueError(
|
565
|
+
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
|
566
|
+
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
|
567
|
+
" Make sure the number of images that you pass is divisible by the total requested batch size."
|
568
|
+
)
|
569
|
+
masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1, 1)
|
570
|
+
|
571
|
+
# aligning device to prevent device errors when concating it with the latent model input
|
572
|
+
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
|
573
|
+
|
574
|
+
masked_image_latents = self._pack_latents(
|
575
|
+
masked_image_latents,
|
576
|
+
batch_size,
|
577
|
+
num_channels_latents,
|
578
|
+
height,
|
579
|
+
width,
|
580
|
+
)
|
581
|
+
mask = self._pack_latents(
|
582
|
+
mask.repeat(1, num_channels_latents, 1, 1),
|
583
|
+
batch_size,
|
584
|
+
num_channels_latents,
|
585
|
+
height,
|
586
|
+
width,
|
587
|
+
)
|
588
|
+
|
589
|
+
return mask, masked_image_latents
|
590
|
+
|
591
|
+
@property
|
592
|
+
def guidance_scale(self):
|
593
|
+
return self._guidance_scale
|
594
|
+
|
595
|
+
@property
|
596
|
+
def attention_kwargs(self):
|
597
|
+
return self._attention_kwargs
|
598
|
+
|
599
|
+
@property
|
600
|
+
def num_timesteps(self):
|
601
|
+
return self._num_timesteps
|
602
|
+
|
603
|
+
@property
|
604
|
+
def current_timestep(self):
|
605
|
+
return self._current_timestep
|
606
|
+
|
607
|
+
@property
|
608
|
+
def interrupt(self):
|
609
|
+
return self._interrupt
|
610
|
+
|
611
|
+
@torch.no_grad()
|
612
|
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
613
|
+
def __call__(
|
614
|
+
self,
|
615
|
+
prompt: Union[str, List[str]] = None,
|
616
|
+
negative_prompt: Union[str, List[str]] = None,
|
617
|
+
true_cfg_scale: float = 4.0,
|
618
|
+
image: PipelineImageInput = None,
|
619
|
+
mask_image: PipelineImageInput = None,
|
620
|
+
masked_image_latents: PipelineImageInput = None,
|
621
|
+
height: Optional[int] = None,
|
622
|
+
width: Optional[int] = None,
|
623
|
+
padding_mask_crop: Optional[int] = None,
|
624
|
+
strength: float = 0.6,
|
625
|
+
num_inference_steps: int = 50,
|
626
|
+
sigmas: Optional[List[float]] = None,
|
627
|
+
guidance_scale: float = 1.0,
|
628
|
+
num_images_per_prompt: int = 1,
|
629
|
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
630
|
+
latents: Optional[torch.Tensor] = None,
|
631
|
+
prompt_embeds: Optional[torch.Tensor] = None,
|
632
|
+
prompt_embeds_mask: Optional[torch.Tensor] = None,
|
633
|
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
634
|
+
negative_prompt_embeds_mask: Optional[torch.Tensor] = None,
|
635
|
+
output_type: Optional[str] = "pil",
|
636
|
+
return_dict: bool = True,
|
637
|
+
attention_kwargs: Optional[Dict[str, Any]] = None,
|
638
|
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
639
|
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
640
|
+
max_sequence_length: int = 512,
|
641
|
+
):
|
642
|
+
r"""
|
643
|
+
Function invoked when calling the pipeline for generation.
|
644
|
+
|
645
|
+
Args:
|
646
|
+
prompt (`str` or `List[str]`, *optional*):
|
647
|
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
648
|
+
instead.
|
649
|
+
negative_prompt (`str` or `List[str]`, *optional*):
|
650
|
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
651
|
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is
|
652
|
+
not greater than `1`).
|
653
|
+
image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
|
654
|
+
`Image`, numpy array or tensor representing an image batch to be used as the starting point. For both
|
655
|
+
numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list
|
656
|
+
or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a
|
657
|
+
list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image
|
658
|
+
latents as `image`, but if passing latents directly it is not encoded again.
|
659
|
+
true_cfg_scale (`float`, *optional*, defaults to 1.0):
|
660
|
+
When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance.
|
661
|
+
mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
|
662
|
+
`Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask
|
663
|
+
are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a
|
664
|
+
single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one
|
665
|
+
color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B,
|
666
|
+
H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W,
|
667
|
+
1)`, or `(H, W)`.
|
668
|
+
mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`):
|
669
|
+
`Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask
|
670
|
+
latents tensor will ge generated by `mask_image`.
|
671
|
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
672
|
+
The height in pixels of the generated image. This is set to 1024 by default for the best results.
|
673
|
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
674
|
+
The width in pixels of the generated image. This is set to 1024 by default for the best results.
|
675
|
+
padding_mask_crop (`int`, *optional*, defaults to `None`):
|
676
|
+
The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to
|
677
|
+
image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region
|
678
|
+
with the same aspect ration of the image and contains all masked area, and then expand that area based
|
679
|
+
on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before
|
680
|
+
resizing to the original image size for inpainting. This is useful when the masked area is small while
|
681
|
+
the image is large and contain information irrelevant for inpainting, such as background.
|
682
|
+
strength (`float`, *optional*, defaults to 1.0):
|
683
|
+
Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
|
684
|
+
starting point and more noise is added the higher the `strength`. The number of denoising steps depends
|
685
|
+
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
|
686
|
+
process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
|
687
|
+
essentially ignores `image`.
|
688
|
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
689
|
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
690
|
+
expense of slower inference.
|
691
|
+
sigmas (`List[float]`, *optional*):
|
692
|
+
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
|
693
|
+
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
|
694
|
+
will be used.
|
695
|
+
guidance_scale (`float`, *optional*, defaults to 3.5):
|
696
|
+
Guidance scale as defined in [Classifier-Free Diffusion
|
697
|
+
Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2.
|
698
|
+
of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting
|
699
|
+
`guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to
|
700
|
+
the text `prompt`, usually at the expense of lower image quality.
|
701
|
+
|
702
|
+
This parameter in the pipeline is there to support future guidance-distilled models when they come up.
|
703
|
+
Note that passing `guidance_scale` to the pipeline is ineffective. To enable classifier-free guidance,
|
704
|
+
please pass `true_cfg_scale` and `negative_prompt` (even an empty negative prompt like " ") should
|
705
|
+
enable classifier-free guidance computations.
|
706
|
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
707
|
+
The number of images to generate per prompt.
|
708
|
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
709
|
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
710
|
+
to make generation deterministic.
|
711
|
+
latents (`torch.Tensor`, *optional*):
|
712
|
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
713
|
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
714
|
+
tensor will be generated by sampling using the supplied random `generator`.
|
715
|
+
prompt_embeds (`torch.Tensor`, *optional*):
|
716
|
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
717
|
+
provided, text embeddings will be generated from `prompt` input argument.
|
718
|
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
719
|
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
720
|
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
721
|
+
argument.
|
722
|
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
723
|
+
The output format of the generate image. Choose between
|
724
|
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
725
|
+
return_dict (`bool`, *optional*, defaults to `True`):
|
726
|
+
Whether or not to return a [`~pipelines.qwenimage.QwenImagePipelineOutput`] instead of a plain tuple.
|
727
|
+
attention_kwargs (`dict`, *optional*):
|
728
|
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
729
|
+
`self.processor` in
|
730
|
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
731
|
+
callback_on_step_end (`Callable`, *optional*):
|
732
|
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
733
|
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
734
|
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
735
|
+
`callback_on_step_end_tensor_inputs`.
|
736
|
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
737
|
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
738
|
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
739
|
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
740
|
+
max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
|
741
|
+
|
742
|
+
Examples:
|
743
|
+
|
744
|
+
Returns:
|
745
|
+
[`~pipelines.qwenimage.QwenImagePipelineOutput`] or `tuple`:
|
746
|
+
[`~pipelines.qwenimage.QwenImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When
|
747
|
+
returning a tuple, the first element is a list with the generated images.
|
748
|
+
"""
|
749
|
+
|
750
|
+
height = height or self.default_sample_size * self.vae_scale_factor
|
751
|
+
width = width or self.default_sample_size * self.vae_scale_factor
|
752
|
+
|
753
|
+
# 1. Check inputs. Raise error if not correct
|
754
|
+
self.check_inputs(
|
755
|
+
prompt,
|
756
|
+
image,
|
757
|
+
mask_image,
|
758
|
+
strength,
|
759
|
+
height,
|
760
|
+
width,
|
761
|
+
output_type=output_type,
|
762
|
+
negative_prompt=negative_prompt,
|
763
|
+
prompt_embeds=prompt_embeds,
|
764
|
+
negative_prompt_embeds=negative_prompt_embeds,
|
765
|
+
prompt_embeds_mask=prompt_embeds_mask,
|
766
|
+
negative_prompt_embeds_mask=negative_prompt_embeds_mask,
|
767
|
+
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
768
|
+
padding_mask_crop=padding_mask_crop,
|
769
|
+
max_sequence_length=max_sequence_length,
|
770
|
+
)
|
771
|
+
|
772
|
+
self._guidance_scale = guidance_scale
|
773
|
+
self._attention_kwargs = attention_kwargs
|
774
|
+
self._current_timestep = None
|
775
|
+
self._interrupt = False
|
776
|
+
|
777
|
+
# 2. Preprocess image
|
778
|
+
if padding_mask_crop is not None:
|
779
|
+
crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop)
|
780
|
+
resize_mode = "fill"
|
781
|
+
else:
|
782
|
+
crops_coords = None
|
783
|
+
resize_mode = "default"
|
784
|
+
|
785
|
+
original_image = image
|
786
|
+
init_image = self.image_processor.preprocess(
|
787
|
+
image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
|
788
|
+
)
|
789
|
+
init_image = init_image.to(dtype=torch.float32)
|
790
|
+
|
791
|
+
# 3. Define call parameters
|
792
|
+
if prompt is not None and isinstance(prompt, str):
|
793
|
+
batch_size = 1
|
794
|
+
elif prompt is not None and isinstance(prompt, list):
|
795
|
+
batch_size = len(prompt)
|
796
|
+
else:
|
797
|
+
batch_size = prompt_embeds.shape[0]
|
798
|
+
|
799
|
+
device = self._execution_device
|
800
|
+
|
801
|
+
has_neg_prompt = negative_prompt is not None or (
|
802
|
+
negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None
|
803
|
+
)
|
804
|
+
do_true_cfg = true_cfg_scale > 1 and has_neg_prompt
|
805
|
+
prompt_embeds, prompt_embeds_mask = self.encode_prompt(
|
806
|
+
prompt=prompt,
|
807
|
+
prompt_embeds=prompt_embeds,
|
808
|
+
prompt_embeds_mask=prompt_embeds_mask,
|
809
|
+
device=device,
|
810
|
+
num_images_per_prompt=num_images_per_prompt,
|
811
|
+
max_sequence_length=max_sequence_length,
|
812
|
+
)
|
813
|
+
if do_true_cfg:
|
814
|
+
negative_prompt_embeds, negative_prompt_embeds_mask = self.encode_prompt(
|
815
|
+
prompt=negative_prompt,
|
816
|
+
prompt_embeds=negative_prompt_embeds,
|
817
|
+
prompt_embeds_mask=negative_prompt_embeds_mask,
|
818
|
+
device=device,
|
819
|
+
num_images_per_prompt=num_images_per_prompt,
|
820
|
+
max_sequence_length=max_sequence_length,
|
821
|
+
)
|
822
|
+
|
823
|
+
# 4. Prepare timesteps
|
824
|
+
sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas
|
825
|
+
image_seq_len = (int(height) // self.vae_scale_factor // 2) * (int(width) // self.vae_scale_factor // 2)
|
826
|
+
mu = calculate_shift(
|
827
|
+
image_seq_len,
|
828
|
+
self.scheduler.config.get("base_image_seq_len", 256),
|
829
|
+
self.scheduler.config.get("max_image_seq_len", 4096),
|
830
|
+
self.scheduler.config.get("base_shift", 0.5),
|
831
|
+
self.scheduler.config.get("max_shift", 1.15),
|
832
|
+
)
|
833
|
+
timesteps, num_inference_steps = retrieve_timesteps(
|
834
|
+
self.scheduler,
|
835
|
+
num_inference_steps,
|
836
|
+
device,
|
837
|
+
sigmas=sigmas,
|
838
|
+
mu=mu,
|
839
|
+
)
|
840
|
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
|
841
|
+
|
842
|
+
if num_inference_steps < 1:
|
843
|
+
raise ValueError(
|
844
|
+
f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
|
845
|
+
f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
|
846
|
+
)
|
847
|
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
848
|
+
|
849
|
+
# 5. Prepare latent variables
|
850
|
+
num_channels_latents = self.transformer.config.in_channels // 4
|
851
|
+
|
852
|
+
latents, noise, image_latents = self.prepare_latents(
|
853
|
+
init_image,
|
854
|
+
latent_timestep,
|
855
|
+
batch_size * num_images_per_prompt,
|
856
|
+
num_channels_latents,
|
857
|
+
height,
|
858
|
+
width,
|
859
|
+
prompt_embeds.dtype,
|
860
|
+
device,
|
861
|
+
generator,
|
862
|
+
latents,
|
863
|
+
)
|
864
|
+
|
865
|
+
mask_condition = self.mask_processor.preprocess(
|
866
|
+
mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
|
867
|
+
)
|
868
|
+
|
869
|
+
if masked_image_latents is None:
|
870
|
+
masked_image = init_image * (mask_condition < 0.5)
|
871
|
+
else:
|
872
|
+
masked_image = masked_image_latents
|
873
|
+
|
874
|
+
mask, masked_image_latents = self.prepare_mask_latents(
|
875
|
+
mask_condition,
|
876
|
+
masked_image,
|
877
|
+
batch_size,
|
878
|
+
num_channels_latents,
|
879
|
+
num_images_per_prompt,
|
880
|
+
height,
|
881
|
+
width,
|
882
|
+
prompt_embeds.dtype,
|
883
|
+
device,
|
884
|
+
generator,
|
885
|
+
)
|
886
|
+
|
887
|
+
img_shapes = [[(1, height // self.vae_scale_factor // 2, width // self.vae_scale_factor // 2)]] * batch_size
|
888
|
+
|
889
|
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
890
|
+
self._num_timesteps = len(timesteps)
|
891
|
+
|
892
|
+
# handle guidance
|
893
|
+
if self.transformer.config.guidance_embeds:
|
894
|
+
guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32)
|
895
|
+
guidance = guidance.expand(latents.shape[0])
|
896
|
+
else:
|
897
|
+
guidance = None
|
898
|
+
|
899
|
+
if self.attention_kwargs is None:
|
900
|
+
self._attention_kwargs = {}
|
901
|
+
|
902
|
+
txt_seq_lens = prompt_embeds_mask.sum(dim=1).tolist() if prompt_embeds_mask is not None else None
|
903
|
+
negative_txt_seq_lens = (
|
904
|
+
negative_prompt_embeds_mask.sum(dim=1).tolist() if negative_prompt_embeds_mask is not None else None
|
905
|
+
)
|
906
|
+
|
907
|
+
# 6. Denoising loop
|
908
|
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
909
|
+
for i, t in enumerate(timesteps):
|
910
|
+
if self.interrupt:
|
911
|
+
continue
|
912
|
+
|
913
|
+
self._current_timestep = t
|
914
|
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
915
|
+
timestep = t.expand(latents.shape[0]).to(latents.dtype)
|
916
|
+
with self.transformer.cache_context("cond"):
|
917
|
+
noise_pred = self.transformer(
|
918
|
+
hidden_states=latents,
|
919
|
+
timestep=timestep / 1000,
|
920
|
+
guidance=guidance,
|
921
|
+
encoder_hidden_states_mask=prompt_embeds_mask,
|
922
|
+
encoder_hidden_states=prompt_embeds,
|
923
|
+
img_shapes=img_shapes,
|
924
|
+
txt_seq_lens=txt_seq_lens,
|
925
|
+
attention_kwargs=self.attention_kwargs,
|
926
|
+
return_dict=False,
|
927
|
+
)[0]
|
928
|
+
|
929
|
+
if do_true_cfg:
|
930
|
+
with self.transformer.cache_context("uncond"):
|
931
|
+
neg_noise_pred = self.transformer(
|
932
|
+
hidden_states=latents,
|
933
|
+
timestep=timestep / 1000,
|
934
|
+
guidance=guidance,
|
935
|
+
encoder_hidden_states_mask=negative_prompt_embeds_mask,
|
936
|
+
encoder_hidden_states=negative_prompt_embeds,
|
937
|
+
img_shapes=img_shapes,
|
938
|
+
txt_seq_lens=negative_txt_seq_lens,
|
939
|
+
attention_kwargs=self.attention_kwargs,
|
940
|
+
return_dict=False,
|
941
|
+
)[0]
|
942
|
+
comb_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred)
|
943
|
+
|
944
|
+
cond_norm = torch.norm(noise_pred, dim=-1, keepdim=True)
|
945
|
+
noise_norm = torch.norm(comb_pred, dim=-1, keepdim=True)
|
946
|
+
noise_pred = comb_pred * (cond_norm / noise_norm)
|
947
|
+
|
948
|
+
# compute the previous noisy sample x_t -> x_t-1
|
949
|
+
latents_dtype = latents.dtype
|
950
|
+
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
|
951
|
+
|
952
|
+
# for 64 channel transformer only.
|
953
|
+
init_latents_proper = image_latents
|
954
|
+
init_mask = mask
|
955
|
+
|
956
|
+
if i < len(timesteps) - 1:
|
957
|
+
noise_timestep = timesteps[i + 1]
|
958
|
+
init_latents_proper = self.scheduler.scale_noise(
|
959
|
+
init_latents_proper, torch.tensor([noise_timestep]), noise
|
960
|
+
)
|
961
|
+
|
962
|
+
latents = (1 - init_mask) * init_latents_proper + init_mask * latents
|
963
|
+
|
964
|
+
if latents.dtype != latents_dtype:
|
965
|
+
if torch.backends.mps.is_available():
|
966
|
+
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
|
967
|
+
latents = latents.to(latents_dtype)
|
968
|
+
|
969
|
+
if callback_on_step_end is not None:
|
970
|
+
callback_kwargs = {}
|
971
|
+
for k in callback_on_step_end_tensor_inputs:
|
972
|
+
callback_kwargs[k] = locals()[k]
|
973
|
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
974
|
+
|
975
|
+
latents = callback_outputs.pop("latents", latents)
|
976
|
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
977
|
+
|
978
|
+
# call the callback, if provided
|
979
|
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
980
|
+
progress_bar.update()
|
981
|
+
|
982
|
+
if XLA_AVAILABLE:
|
983
|
+
xm.mark_step()
|
984
|
+
|
985
|
+
self._current_timestep = None
|
986
|
+
if output_type == "latent":
|
987
|
+
image = latents
|
988
|
+
else:
|
989
|
+
latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
|
990
|
+
latents = latents.to(self.vae.dtype)
|
991
|
+
latents_mean = (
|
992
|
+
torch.tensor(self.vae.config.latents_mean)
|
993
|
+
.view(1, self.vae.config.z_dim, 1, 1, 1)
|
994
|
+
.to(latents.device, latents.dtype)
|
995
|
+
)
|
996
|
+
latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to(
|
997
|
+
latents.device, latents.dtype
|
998
|
+
)
|
999
|
+
|
1000
|
+
latents = latents / latents_std + latents_mean
|
1001
|
+
image = self.vae.decode(latents, return_dict=False)[0][:, :, 0]
|
1002
|
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
1003
|
+
|
1004
|
+
if padding_mask_crop is not None:
|
1005
|
+
image = [
|
1006
|
+
self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image
|
1007
|
+
]
|
1008
|
+
|
1009
|
+
# Offload all models
|
1010
|
+
self.maybe_free_model_hooks()
|
1011
|
+
|
1012
|
+
if not return_dict:
|
1013
|
+
return (image,)
|
1014
|
+
|
1015
|
+
return QwenImagePipelineOutput(images=image)
|