diffusers 0.34.0__py3-none-any.whl → 0.35.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- diffusers/__init__.py +98 -1
- diffusers/callbacks.py +35 -0
- diffusers/commands/custom_blocks.py +134 -0
- diffusers/commands/diffusers_cli.py +2 -0
- diffusers/commands/fp16_safetensors.py +1 -1
- diffusers/configuration_utils.py +11 -2
- diffusers/dependency_versions_table.py +3 -3
- diffusers/guiders/__init__.py +41 -0
- diffusers/guiders/adaptive_projected_guidance.py +188 -0
- diffusers/guiders/auto_guidance.py +190 -0
- diffusers/guiders/classifier_free_guidance.py +141 -0
- diffusers/guiders/classifier_free_zero_star_guidance.py +152 -0
- diffusers/guiders/frequency_decoupled_guidance.py +327 -0
- diffusers/guiders/guider_utils.py +309 -0
- diffusers/guiders/perturbed_attention_guidance.py +271 -0
- diffusers/guiders/skip_layer_guidance.py +262 -0
- diffusers/guiders/smoothed_energy_guidance.py +251 -0
- diffusers/guiders/tangential_classifier_free_guidance.py +143 -0
- diffusers/hooks/__init__.py +17 -0
- diffusers/hooks/_common.py +56 -0
- diffusers/hooks/_helpers.py +293 -0
- diffusers/hooks/faster_cache.py +7 -6
- diffusers/hooks/first_block_cache.py +259 -0
- diffusers/hooks/group_offloading.py +292 -286
- diffusers/hooks/hooks.py +56 -1
- diffusers/hooks/layer_skip.py +263 -0
- diffusers/hooks/layerwise_casting.py +2 -7
- diffusers/hooks/pyramid_attention_broadcast.py +14 -11
- diffusers/hooks/smoothed_energy_guidance_utils.py +167 -0
- diffusers/hooks/utils.py +43 -0
- diffusers/loaders/__init__.py +6 -0
- diffusers/loaders/ip_adapter.py +255 -4
- diffusers/loaders/lora_base.py +63 -30
- diffusers/loaders/lora_conversion_utils.py +434 -53
- diffusers/loaders/lora_pipeline.py +834 -37
- diffusers/loaders/peft.py +28 -5
- diffusers/loaders/single_file_model.py +44 -11
- diffusers/loaders/single_file_utils.py +170 -2
- diffusers/loaders/transformer_flux.py +9 -10
- diffusers/loaders/transformer_sd3.py +6 -1
- diffusers/loaders/unet.py +22 -5
- diffusers/loaders/unet_loader_utils.py +5 -2
- diffusers/models/__init__.py +8 -0
- diffusers/models/attention.py +484 -3
- diffusers/models/attention_dispatch.py +1218 -0
- diffusers/models/attention_processor.py +105 -663
- diffusers/models/auto_model.py +2 -2
- diffusers/models/autoencoders/__init__.py +1 -0
- diffusers/models/autoencoders/autoencoder_dc.py +14 -1
- diffusers/models/autoencoders/autoencoder_kl.py +1 -1
- diffusers/models/autoencoders/autoencoder_kl_cosmos.py +3 -1
- diffusers/models/autoencoders/autoencoder_kl_qwenimage.py +1070 -0
- diffusers/models/autoencoders/autoencoder_kl_wan.py +370 -40
- diffusers/models/cache_utils.py +31 -9
- diffusers/models/controlnets/controlnet_flux.py +5 -5
- diffusers/models/controlnets/controlnet_union.py +4 -4
- diffusers/models/embeddings.py +26 -34
- diffusers/models/model_loading_utils.py +233 -1
- diffusers/models/modeling_flax_utils.py +1 -2
- diffusers/models/modeling_utils.py +159 -94
- diffusers/models/transformers/__init__.py +2 -0
- diffusers/models/transformers/transformer_chroma.py +16 -117
- diffusers/models/transformers/transformer_cogview4.py +36 -2
- diffusers/models/transformers/transformer_cosmos.py +11 -4
- diffusers/models/transformers/transformer_flux.py +372 -132
- diffusers/models/transformers/transformer_hunyuan_video.py +6 -0
- diffusers/models/transformers/transformer_ltx.py +104 -23
- diffusers/models/transformers/transformer_qwenimage.py +645 -0
- diffusers/models/transformers/transformer_skyreels_v2.py +607 -0
- diffusers/models/transformers/transformer_wan.py +298 -85
- diffusers/models/transformers/transformer_wan_vace.py +15 -21
- diffusers/models/unets/unet_2d_condition.py +2 -1
- diffusers/modular_pipelines/__init__.py +83 -0
- diffusers/modular_pipelines/components_manager.py +1068 -0
- diffusers/modular_pipelines/flux/__init__.py +66 -0
- diffusers/modular_pipelines/flux/before_denoise.py +689 -0
- diffusers/modular_pipelines/flux/decoders.py +109 -0
- diffusers/modular_pipelines/flux/denoise.py +227 -0
- diffusers/modular_pipelines/flux/encoders.py +412 -0
- diffusers/modular_pipelines/flux/modular_blocks.py +181 -0
- diffusers/modular_pipelines/flux/modular_pipeline.py +59 -0
- diffusers/modular_pipelines/modular_pipeline.py +2446 -0
- diffusers/modular_pipelines/modular_pipeline_utils.py +672 -0
- diffusers/modular_pipelines/node_utils.py +665 -0
- diffusers/modular_pipelines/stable_diffusion_xl/__init__.py +77 -0
- diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py +1874 -0
- diffusers/modular_pipelines/stable_diffusion_xl/decoders.py +208 -0
- diffusers/modular_pipelines/stable_diffusion_xl/denoise.py +771 -0
- diffusers/modular_pipelines/stable_diffusion_xl/encoders.py +887 -0
- diffusers/modular_pipelines/stable_diffusion_xl/modular_blocks.py +380 -0
- diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py +365 -0
- diffusers/modular_pipelines/wan/__init__.py +66 -0
- diffusers/modular_pipelines/wan/before_denoise.py +365 -0
- diffusers/modular_pipelines/wan/decoders.py +105 -0
- diffusers/modular_pipelines/wan/denoise.py +261 -0
- diffusers/modular_pipelines/wan/encoders.py +242 -0
- diffusers/modular_pipelines/wan/modular_blocks.py +144 -0
- diffusers/modular_pipelines/wan/modular_pipeline.py +90 -0
- diffusers/pipelines/__init__.py +31 -0
- diffusers/pipelines/audioldm2/pipeline_audioldm2.py +2 -3
- diffusers/pipelines/auto_pipeline.py +17 -13
- diffusers/pipelines/chroma/pipeline_chroma.py +5 -5
- diffusers/pipelines/chroma/pipeline_chroma_img2img.py +5 -5
- diffusers/pipelines/cogvideo/pipeline_cogvideox.py +9 -8
- diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +9 -8
- diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +10 -9
- diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +9 -8
- diffusers/pipelines/cogview4/pipeline_cogview4.py +16 -15
- diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +3 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +212 -93
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +7 -3
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +194 -92
- diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +1 -1
- diffusers/pipelines/dit/pipeline_dit.py +3 -1
- diffusers/pipelines/flux/__init__.py +4 -0
- diffusers/pipelines/flux/pipeline_flux.py +34 -26
- diffusers/pipelines/flux/pipeline_flux_control.py +8 -8
- diffusers/pipelines/flux/pipeline_flux_control_img2img.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_controlnet.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_fill.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_img2img.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_inpaint.py +1 -1
- diffusers/pipelines/flux/pipeline_flux_kontext.py +1134 -0
- diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py +1460 -0
- diffusers/pipelines/flux/pipeline_flux_prior_redux.py +1 -1
- diffusers/pipelines/flux/pipeline_output.py +6 -4
- diffusers/pipelines/hidream_image/pipeline_hidream_image.py +5 -5
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +25 -24
- diffusers/pipelines/ltx/pipeline_ltx.py +13 -12
- diffusers/pipelines/ltx/pipeline_ltx_condition.py +10 -9
- diffusers/pipelines/ltx/pipeline_ltx_image2video.py +13 -12
- diffusers/pipelines/mochi/pipeline_mochi.py +9 -8
- diffusers/pipelines/pipeline_flax_utils.py +2 -2
- diffusers/pipelines/pipeline_loading_utils.py +24 -2
- diffusers/pipelines/pipeline_utils.py +22 -15
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +3 -1
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +20 -0
- diffusers/pipelines/qwenimage/__init__.py +55 -0
- diffusers/pipelines/qwenimage/pipeline_output.py +21 -0
- diffusers/pipelines/qwenimage/pipeline_qwenimage.py +726 -0
- diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +849 -0
- diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py +829 -0
- diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py +1015 -0
- diffusers/pipelines/sana/pipeline_sana_sprint.py +5 -5
- diffusers/pipelines/skyreels_v2/__init__.py +59 -0
- diffusers/pipelines/skyreels_v2/pipeline_output.py +20 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2.py +610 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing.py +978 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_i2v.py +1059 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_v2v.py +1063 -0
- diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_i2v.py +745 -0
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +2 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +2 -1
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +6 -5
- diffusers/pipelines/wan/pipeline_wan.py +78 -20
- diffusers/pipelines/wan/pipeline_wan_i2v.py +112 -32
- diffusers/pipelines/wan/pipeline_wan_vace.py +1 -2
- diffusers/quantizers/__init__.py +1 -177
- diffusers/quantizers/base.py +11 -0
- diffusers/quantizers/gguf/utils.py +92 -3
- diffusers/quantizers/pipe_quant_config.py +202 -0
- diffusers/quantizers/torchao/torchao_quantizer.py +26 -0
- diffusers/schedulers/scheduling_deis_multistep.py +8 -1
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +6 -0
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +6 -0
- diffusers/schedulers/scheduling_scm.py +0 -1
- diffusers/schedulers/scheduling_unipc_multistep.py +10 -1
- diffusers/schedulers/scheduling_utils.py +2 -2
- diffusers/schedulers/scheduling_utils_flax.py +1 -1
- diffusers/training_utils.py +78 -0
- diffusers/utils/__init__.py +10 -0
- diffusers/utils/constants.py +4 -0
- diffusers/utils/dummy_pt_objects.py +312 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +255 -0
- diffusers/utils/dynamic_modules_utils.py +84 -25
- diffusers/utils/hub_utils.py +33 -17
- diffusers/utils/import_utils.py +70 -0
- diffusers/utils/peft_utils.py +11 -8
- diffusers/utils/testing_utils.py +136 -10
- diffusers/utils/torch_utils.py +18 -0
- {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/METADATA +6 -6
- {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/RECORD +191 -127
- {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/LICENSE +0 -0
- {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/WHEEL +0 -0
- {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/entry_points.txt +0 -0
- {diffusers-0.34.0.dist-info → diffusers-0.35.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,365 @@
|
|
1
|
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from typing import List, Optional, Tuple, Union
|
16
|
+
|
17
|
+
import numpy as np
|
18
|
+
import PIL
|
19
|
+
import torch
|
20
|
+
|
21
|
+
from ...image_processor import PipelineImageInput
|
22
|
+
from ...loaders import ModularIPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
|
23
|
+
from ...pipelines.pipeline_utils import StableDiffusionMixin
|
24
|
+
from ...pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
|
25
|
+
from ...utils import logging
|
26
|
+
from ..modular_pipeline import ModularPipeline
|
27
|
+
from ..modular_pipeline_utils import InputParam, OutputParam
|
28
|
+
|
29
|
+
|
30
|
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
31
|
+
|
32
|
+
|
33
|
+
# YiYi TODO: move to a different file? stable_diffusion_xl_module should have its own folder?
|
34
|
+
# YiYi Notes: model specific components:
|
35
|
+
## (1) it should inherit from ModularPipeline
|
36
|
+
## (2) acts like a container that holds components and configs
|
37
|
+
## (3) define default config (related to components), e.g. default_sample_size, vae_scale_factor, num_channels_unet, num_channels_latents
|
38
|
+
## (4) inherit from model-specic loader class (e.g. StableDiffusionXLLoraLoaderMixin)
|
39
|
+
## (5) how to use together with Components_manager?
|
40
|
+
class StableDiffusionXLModularPipeline(
|
41
|
+
ModularPipeline,
|
42
|
+
StableDiffusionMixin,
|
43
|
+
TextualInversionLoaderMixin,
|
44
|
+
StableDiffusionXLLoraLoaderMixin,
|
45
|
+
ModularIPAdapterMixin,
|
46
|
+
):
|
47
|
+
"""
|
48
|
+
A ModularPipeline for Stable Diffusion XL.
|
49
|
+
|
50
|
+
<Tip warning={true}>
|
51
|
+
|
52
|
+
This is an experimental feature and is likely to change in the future.
|
53
|
+
|
54
|
+
</Tip>
|
55
|
+
"""
|
56
|
+
|
57
|
+
@property
|
58
|
+
def default_height(self):
|
59
|
+
return self.default_sample_size * self.vae_scale_factor
|
60
|
+
|
61
|
+
@property
|
62
|
+
def default_width(self):
|
63
|
+
return self.default_sample_size * self.vae_scale_factor
|
64
|
+
|
65
|
+
@property
|
66
|
+
def default_sample_size(self):
|
67
|
+
default_sample_size = 128
|
68
|
+
if hasattr(self, "unet") and self.unet is not None:
|
69
|
+
default_sample_size = self.unet.config.sample_size
|
70
|
+
return default_sample_size
|
71
|
+
|
72
|
+
@property
|
73
|
+
def vae_scale_factor(self):
|
74
|
+
vae_scale_factor = 8
|
75
|
+
if hasattr(self, "vae") and self.vae is not None:
|
76
|
+
vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
77
|
+
return vae_scale_factor
|
78
|
+
|
79
|
+
@property
|
80
|
+
def num_channels_unet(self):
|
81
|
+
num_channels_unet = 4
|
82
|
+
if hasattr(self, "unet") and self.unet is not None:
|
83
|
+
num_channels_unet = self.unet.config.in_channels
|
84
|
+
return num_channels_unet
|
85
|
+
|
86
|
+
@property
|
87
|
+
def num_channels_latents(self):
|
88
|
+
num_channels_latents = 4
|
89
|
+
if hasattr(self, "vae") and self.vae is not None:
|
90
|
+
num_channels_latents = self.vae.config.latent_channels
|
91
|
+
return num_channels_latents
|
92
|
+
|
93
|
+
|
94
|
+
# YiYi/Sayak TODO: not used yet, maintain a list of schema that can be used across all pipeline blocks
|
95
|
+
# auto_docstring
|
96
|
+
SDXL_INPUTS_SCHEMA = {
|
97
|
+
"prompt": InputParam(
|
98
|
+
"prompt", type_hint=Union[str, List[str]], description="The prompt or prompts to guide the image generation"
|
99
|
+
),
|
100
|
+
"prompt_2": InputParam(
|
101
|
+
"prompt_2",
|
102
|
+
type_hint=Union[str, List[str]],
|
103
|
+
description="The prompt or prompts to be sent to the tokenizer_2 and text_encoder_2",
|
104
|
+
),
|
105
|
+
"negative_prompt": InputParam(
|
106
|
+
"negative_prompt",
|
107
|
+
type_hint=Union[str, List[str]],
|
108
|
+
description="The prompt or prompts not to guide the image generation",
|
109
|
+
),
|
110
|
+
"negative_prompt_2": InputParam(
|
111
|
+
"negative_prompt_2",
|
112
|
+
type_hint=Union[str, List[str]],
|
113
|
+
description="The negative prompt or prompts for text_encoder_2",
|
114
|
+
),
|
115
|
+
"cross_attention_kwargs": InputParam(
|
116
|
+
"cross_attention_kwargs",
|
117
|
+
type_hint=Optional[dict],
|
118
|
+
description="Kwargs dictionary passed to the AttentionProcessor",
|
119
|
+
),
|
120
|
+
"clip_skip": InputParam(
|
121
|
+
"clip_skip", type_hint=Optional[int], description="Number of layers to skip in CLIP text encoder"
|
122
|
+
),
|
123
|
+
"image": InputParam(
|
124
|
+
"image",
|
125
|
+
type_hint=PipelineImageInput,
|
126
|
+
required=True,
|
127
|
+
description="The image(s) to modify for img2img or inpainting",
|
128
|
+
),
|
129
|
+
"mask_image": InputParam(
|
130
|
+
"mask_image",
|
131
|
+
type_hint=PipelineImageInput,
|
132
|
+
required=True,
|
133
|
+
description="Mask image for inpainting, white pixels will be repainted",
|
134
|
+
),
|
135
|
+
"generator": InputParam(
|
136
|
+
"generator",
|
137
|
+
type_hint=Optional[Union[torch.Generator, List[torch.Generator]]],
|
138
|
+
description="Generator(s) for deterministic generation",
|
139
|
+
),
|
140
|
+
"height": InputParam("height", type_hint=Optional[int], description="Height in pixels of the generated image"),
|
141
|
+
"width": InputParam("width", type_hint=Optional[int], description="Width in pixels of the generated image"),
|
142
|
+
"num_images_per_prompt": InputParam(
|
143
|
+
"num_images_per_prompt", type_hint=int, default=1, description="Number of images to generate per prompt"
|
144
|
+
),
|
145
|
+
"num_inference_steps": InputParam(
|
146
|
+
"num_inference_steps", type_hint=int, default=50, description="Number of denoising steps"
|
147
|
+
),
|
148
|
+
"timesteps": InputParam(
|
149
|
+
"timesteps", type_hint=Optional[torch.Tensor], description="Custom timesteps for the denoising process"
|
150
|
+
),
|
151
|
+
"sigmas": InputParam(
|
152
|
+
"sigmas", type_hint=Optional[torch.Tensor], description="Custom sigmas for the denoising process"
|
153
|
+
),
|
154
|
+
"denoising_end": InputParam(
|
155
|
+
"denoising_end",
|
156
|
+
type_hint=Optional[float],
|
157
|
+
description="Fraction of denoising process to complete before termination",
|
158
|
+
),
|
159
|
+
# YiYi Notes: img2img defaults to 0.3, inpainting defaults to 0.9999
|
160
|
+
"strength": InputParam(
|
161
|
+
"strength", type_hint=float, default=0.3, description="How much to transform the reference image"
|
162
|
+
),
|
163
|
+
"denoising_start": InputParam(
|
164
|
+
"denoising_start", type_hint=Optional[float], description="Starting point of the denoising process"
|
165
|
+
),
|
166
|
+
"latents": InputParam(
|
167
|
+
"latents", type_hint=Optional[torch.Tensor], description="Pre-generated noisy latents for image generation"
|
168
|
+
),
|
169
|
+
"padding_mask_crop": InputParam(
|
170
|
+
"padding_mask_crop",
|
171
|
+
type_hint=Optional[Tuple[int, int]],
|
172
|
+
description="Size of margin in crop for image and mask",
|
173
|
+
),
|
174
|
+
"original_size": InputParam(
|
175
|
+
"original_size",
|
176
|
+
type_hint=Optional[Tuple[int, int]],
|
177
|
+
description="Original size of the image for SDXL's micro-conditioning",
|
178
|
+
),
|
179
|
+
"target_size": InputParam(
|
180
|
+
"target_size", type_hint=Optional[Tuple[int, int]], description="Target size for SDXL's micro-conditioning"
|
181
|
+
),
|
182
|
+
"negative_original_size": InputParam(
|
183
|
+
"negative_original_size",
|
184
|
+
type_hint=Optional[Tuple[int, int]],
|
185
|
+
description="Negative conditioning based on image resolution",
|
186
|
+
),
|
187
|
+
"negative_target_size": InputParam(
|
188
|
+
"negative_target_size",
|
189
|
+
type_hint=Optional[Tuple[int, int]],
|
190
|
+
description="Negative conditioning based on target resolution",
|
191
|
+
),
|
192
|
+
"crops_coords_top_left": InputParam(
|
193
|
+
"crops_coords_top_left",
|
194
|
+
type_hint=Tuple[int, int],
|
195
|
+
default=(0, 0),
|
196
|
+
description="Top-left coordinates for SDXL's micro-conditioning",
|
197
|
+
),
|
198
|
+
"negative_crops_coords_top_left": InputParam(
|
199
|
+
"negative_crops_coords_top_left",
|
200
|
+
type_hint=Tuple[int, int],
|
201
|
+
default=(0, 0),
|
202
|
+
description="Negative conditioning crop coordinates",
|
203
|
+
),
|
204
|
+
"aesthetic_score": InputParam(
|
205
|
+
"aesthetic_score", type_hint=float, default=6.0, description="Simulates aesthetic score of generated image"
|
206
|
+
),
|
207
|
+
"negative_aesthetic_score": InputParam(
|
208
|
+
"negative_aesthetic_score", type_hint=float, default=2.0, description="Simulates negative aesthetic score"
|
209
|
+
),
|
210
|
+
"eta": InputParam("eta", type_hint=float, default=0.0, description="Parameter η in the DDIM paper"),
|
211
|
+
"output_type": InputParam(
|
212
|
+
"output_type", type_hint=str, default="pil", description="Output format (pil/tensor/np.array)"
|
213
|
+
),
|
214
|
+
"ip_adapter_image": InputParam(
|
215
|
+
"ip_adapter_image",
|
216
|
+
type_hint=PipelineImageInput,
|
217
|
+
required=True,
|
218
|
+
description="Image(s) to be used as IP adapter",
|
219
|
+
),
|
220
|
+
"control_image": InputParam(
|
221
|
+
"control_image", type_hint=PipelineImageInput, required=True, description="ControlNet input condition"
|
222
|
+
),
|
223
|
+
"control_guidance_start": InputParam(
|
224
|
+
"control_guidance_start",
|
225
|
+
type_hint=Union[float, List[float]],
|
226
|
+
default=0.0,
|
227
|
+
description="When ControlNet starts applying",
|
228
|
+
),
|
229
|
+
"control_guidance_end": InputParam(
|
230
|
+
"control_guidance_end",
|
231
|
+
type_hint=Union[float, List[float]],
|
232
|
+
default=1.0,
|
233
|
+
description="When ControlNet stops applying",
|
234
|
+
),
|
235
|
+
"controlnet_conditioning_scale": InputParam(
|
236
|
+
"controlnet_conditioning_scale",
|
237
|
+
type_hint=Union[float, List[float]],
|
238
|
+
default=1.0,
|
239
|
+
description="Scale factor for ControlNet outputs",
|
240
|
+
),
|
241
|
+
"guess_mode": InputParam(
|
242
|
+
"guess_mode",
|
243
|
+
type_hint=bool,
|
244
|
+
default=False,
|
245
|
+
description="Enables ControlNet encoder to recognize input without prompts",
|
246
|
+
),
|
247
|
+
"control_mode": InputParam(
|
248
|
+
"control_mode", type_hint=List[int], required=True, description="Control mode for union controlnet"
|
249
|
+
),
|
250
|
+
"prompt_embeds": InputParam(
|
251
|
+
"prompt_embeds",
|
252
|
+
type_hint=torch.Tensor,
|
253
|
+
required=True,
|
254
|
+
description="Text embeddings used to guide image generation",
|
255
|
+
),
|
256
|
+
"negative_prompt_embeds": InputParam(
|
257
|
+
"negative_prompt_embeds", type_hint=torch.Tensor, description="Negative text embeddings"
|
258
|
+
),
|
259
|
+
"pooled_prompt_embeds": InputParam(
|
260
|
+
"pooled_prompt_embeds", type_hint=torch.Tensor, required=True, description="Pooled text embeddings"
|
261
|
+
),
|
262
|
+
"negative_pooled_prompt_embeds": InputParam(
|
263
|
+
"negative_pooled_prompt_embeds", type_hint=torch.Tensor, description="Negative pooled text embeddings"
|
264
|
+
),
|
265
|
+
"batch_size": InputParam("batch_size", type_hint=int, required=True, description="Number of prompts"),
|
266
|
+
"dtype": InputParam("dtype", type_hint=torch.dtype, description="Data type of model tensor inputs"),
|
267
|
+
"preprocess_kwargs": InputParam(
|
268
|
+
"preprocess_kwargs", type_hint=Optional[dict], description="Kwargs for ImageProcessor"
|
269
|
+
),
|
270
|
+
"latent_timestep": InputParam(
|
271
|
+
"latent_timestep", type_hint=torch.Tensor, required=True, description="Initial noise level timestep"
|
272
|
+
),
|
273
|
+
"image_latents": InputParam(
|
274
|
+
"image_latents", type_hint=torch.Tensor, required=True, description="Latents representing reference image"
|
275
|
+
),
|
276
|
+
"mask": InputParam("mask", type_hint=torch.Tensor, required=True, description="Mask for inpainting"),
|
277
|
+
"masked_image_latents": InputParam(
|
278
|
+
"masked_image_latents", type_hint=torch.Tensor, description="Masked image latents for inpainting"
|
279
|
+
),
|
280
|
+
"add_time_ids": InputParam(
|
281
|
+
"add_time_ids", type_hint=torch.Tensor, required=True, description="Time ids for conditioning"
|
282
|
+
),
|
283
|
+
"negative_add_time_ids": InputParam(
|
284
|
+
"negative_add_time_ids", type_hint=torch.Tensor, description="Negative time ids"
|
285
|
+
),
|
286
|
+
"timestep_cond": InputParam("timestep_cond", type_hint=torch.Tensor, description="Timestep conditioning for LCM"),
|
287
|
+
"noise": InputParam("noise", type_hint=torch.Tensor, description="Noise added to image latents"),
|
288
|
+
"crops_coords": InputParam("crops_coords", type_hint=Optional[Tuple[int]], description="Crop coordinates"),
|
289
|
+
"ip_adapter_embeds": InputParam(
|
290
|
+
"ip_adapter_embeds", type_hint=List[torch.Tensor], description="Image embeddings for IP-Adapter"
|
291
|
+
),
|
292
|
+
"negative_ip_adapter_embeds": InputParam(
|
293
|
+
"negative_ip_adapter_embeds",
|
294
|
+
type_hint=List[torch.Tensor],
|
295
|
+
description="Negative image embeddings for IP-Adapter",
|
296
|
+
),
|
297
|
+
"images": InputParam(
|
298
|
+
"images",
|
299
|
+
type_hint=Union[List[PIL.Image.Image], List[torch.Tensor], List[np.array]],
|
300
|
+
required=True,
|
301
|
+
description="Generated images",
|
302
|
+
),
|
303
|
+
}
|
304
|
+
|
305
|
+
|
306
|
+
SDXL_INTERMEDIATE_OUTPUTS_SCHEMA = {
|
307
|
+
"prompt_embeds": OutputParam(
|
308
|
+
"prompt_embeds", type_hint=torch.Tensor, description="Text embeddings used to guide image generation"
|
309
|
+
),
|
310
|
+
"negative_prompt_embeds": OutputParam(
|
311
|
+
"negative_prompt_embeds", type_hint=torch.Tensor, description="Negative text embeddings"
|
312
|
+
),
|
313
|
+
"pooled_prompt_embeds": OutputParam(
|
314
|
+
"pooled_prompt_embeds", type_hint=torch.Tensor, description="Pooled text embeddings"
|
315
|
+
),
|
316
|
+
"negative_pooled_prompt_embeds": OutputParam(
|
317
|
+
"negative_pooled_prompt_embeds", type_hint=torch.Tensor, description="Negative pooled text embeddings"
|
318
|
+
),
|
319
|
+
"batch_size": OutputParam("batch_size", type_hint=int, description="Number of prompts"),
|
320
|
+
"dtype": OutputParam("dtype", type_hint=torch.dtype, description="Data type of model tensor inputs"),
|
321
|
+
"image_latents": OutputParam(
|
322
|
+
"image_latents", type_hint=torch.Tensor, description="Latents representing reference image"
|
323
|
+
),
|
324
|
+
"mask": OutputParam("mask", type_hint=torch.Tensor, description="Mask for inpainting"),
|
325
|
+
"masked_image_latents": OutputParam(
|
326
|
+
"masked_image_latents", type_hint=torch.Tensor, description="Masked image latents for inpainting"
|
327
|
+
),
|
328
|
+
"crops_coords": OutputParam("crops_coords", type_hint=Optional[Tuple[int]], description="Crop coordinates"),
|
329
|
+
"timesteps": OutputParam("timesteps", type_hint=torch.Tensor, description="Timesteps for inference"),
|
330
|
+
"num_inference_steps": OutputParam("num_inference_steps", type_hint=int, description="Number of denoising steps"),
|
331
|
+
"latent_timestep": OutputParam(
|
332
|
+
"latent_timestep", type_hint=torch.Tensor, description="Initial noise level timestep"
|
333
|
+
),
|
334
|
+
"add_time_ids": OutputParam("add_time_ids", type_hint=torch.Tensor, description="Time ids for conditioning"),
|
335
|
+
"negative_add_time_ids": OutputParam(
|
336
|
+
"negative_add_time_ids", type_hint=torch.Tensor, description="Negative time ids"
|
337
|
+
),
|
338
|
+
"timestep_cond": OutputParam("timestep_cond", type_hint=torch.Tensor, description="Timestep conditioning for LCM"),
|
339
|
+
"latents": OutputParam("latents", type_hint=torch.Tensor, description="Denoised latents"),
|
340
|
+
"noise": OutputParam("noise", type_hint=torch.Tensor, description="Noise added to image latents"),
|
341
|
+
"ip_adapter_embeds": OutputParam(
|
342
|
+
"ip_adapter_embeds", type_hint=List[torch.Tensor], description="Image embeddings for IP-Adapter"
|
343
|
+
),
|
344
|
+
"negative_ip_adapter_embeds": OutputParam(
|
345
|
+
"negative_ip_adapter_embeds",
|
346
|
+
type_hint=List[torch.Tensor],
|
347
|
+
description="Negative image embeddings for IP-Adapter",
|
348
|
+
),
|
349
|
+
"images": OutputParam(
|
350
|
+
"images",
|
351
|
+
type_hint=Union[List[PIL.Image.Image], List[torch.Tensor], List[np.array]],
|
352
|
+
description="Generated images",
|
353
|
+
),
|
354
|
+
}
|
355
|
+
|
356
|
+
|
357
|
+
SDXL_OUTPUTS_SCHEMA = {
|
358
|
+
"images": OutputParam(
|
359
|
+
"images",
|
360
|
+
type_hint=Union[
|
361
|
+
Tuple[Union[List[PIL.Image.Image], List[torch.Tensor], List[np.array]]], StableDiffusionXLPipelineOutput
|
362
|
+
],
|
363
|
+
description="The final generated images",
|
364
|
+
)
|
365
|
+
}
|
@@ -0,0 +1,66 @@
|
|
1
|
+
from typing import TYPE_CHECKING
|
2
|
+
|
3
|
+
from ...utils import (
|
4
|
+
DIFFUSERS_SLOW_IMPORT,
|
5
|
+
OptionalDependencyNotAvailable,
|
6
|
+
_LazyModule,
|
7
|
+
get_objects_from_module,
|
8
|
+
is_torch_available,
|
9
|
+
is_transformers_available,
|
10
|
+
)
|
11
|
+
|
12
|
+
|
13
|
+
_dummy_objects = {}
|
14
|
+
_import_structure = {}
|
15
|
+
|
16
|
+
try:
|
17
|
+
if not (is_transformers_available() and is_torch_available()):
|
18
|
+
raise OptionalDependencyNotAvailable()
|
19
|
+
except OptionalDependencyNotAvailable:
|
20
|
+
from ...utils import dummy_torch_and_transformers_objects # noqa F403
|
21
|
+
|
22
|
+
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
|
23
|
+
else:
|
24
|
+
_import_structure["encoders"] = ["WanTextEncoderStep"]
|
25
|
+
_import_structure["modular_blocks"] = [
|
26
|
+
"ALL_BLOCKS",
|
27
|
+
"AUTO_BLOCKS",
|
28
|
+
"TEXT2VIDEO_BLOCKS",
|
29
|
+
"WanAutoBeforeDenoiseStep",
|
30
|
+
"WanAutoBlocks",
|
31
|
+
"WanAutoBlocks",
|
32
|
+
"WanAutoDecodeStep",
|
33
|
+
"WanAutoDenoiseStep",
|
34
|
+
]
|
35
|
+
_import_structure["modular_pipeline"] = ["WanModularPipeline"]
|
36
|
+
|
37
|
+
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
38
|
+
try:
|
39
|
+
if not (is_transformers_available() and is_torch_available()):
|
40
|
+
raise OptionalDependencyNotAvailable()
|
41
|
+
except OptionalDependencyNotAvailable:
|
42
|
+
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
|
43
|
+
else:
|
44
|
+
from .encoders import WanTextEncoderStep
|
45
|
+
from .modular_blocks import (
|
46
|
+
ALL_BLOCKS,
|
47
|
+
AUTO_BLOCKS,
|
48
|
+
TEXT2VIDEO_BLOCKS,
|
49
|
+
WanAutoBeforeDenoiseStep,
|
50
|
+
WanAutoBlocks,
|
51
|
+
WanAutoDecodeStep,
|
52
|
+
WanAutoDenoiseStep,
|
53
|
+
)
|
54
|
+
from .modular_pipeline import WanModularPipeline
|
55
|
+
else:
|
56
|
+
import sys
|
57
|
+
|
58
|
+
sys.modules[__name__] = _LazyModule(
|
59
|
+
__name__,
|
60
|
+
globals()["__file__"],
|
61
|
+
_import_structure,
|
62
|
+
module_spec=__spec__,
|
63
|
+
)
|
64
|
+
|
65
|
+
for name, value in _dummy_objects.items():
|
66
|
+
setattr(sys.modules[__name__], name, value)
|