diffusers 0.30.3__py3-none-any.whl → 0.32.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- diffusers/__init__.py +97 -4
- diffusers/callbacks.py +56 -3
- diffusers/configuration_utils.py +13 -1
- diffusers/image_processor.py +282 -71
- diffusers/loaders/__init__.py +24 -3
- diffusers/loaders/ip_adapter.py +543 -16
- diffusers/loaders/lora_base.py +138 -125
- diffusers/loaders/lora_conversion_utils.py +647 -0
- diffusers/loaders/lora_pipeline.py +2216 -230
- diffusers/loaders/peft.py +380 -0
- diffusers/loaders/single_file_model.py +71 -4
- diffusers/loaders/single_file_utils.py +597 -10
- diffusers/loaders/textual_inversion.py +5 -3
- diffusers/loaders/transformer_flux.py +181 -0
- diffusers/loaders/transformer_sd3.py +89 -0
- diffusers/loaders/unet.py +56 -12
- diffusers/models/__init__.py +49 -12
- diffusers/models/activations.py +22 -9
- diffusers/models/adapter.py +53 -53
- diffusers/models/attention.py +98 -13
- diffusers/models/attention_flax.py +1 -1
- diffusers/models/attention_processor.py +2160 -346
- diffusers/models/autoencoders/__init__.py +5 -0
- diffusers/models/autoencoders/autoencoder_dc.py +620 -0
- diffusers/models/autoencoders/autoencoder_kl.py +73 -12
- diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
- diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +213 -105
- diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
- diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
- diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +3 -10
- diffusers/models/autoencoders/autoencoder_tiny.py +4 -2
- diffusers/models/autoencoders/vae.py +18 -5
- diffusers/models/controlnet.py +47 -802
- diffusers/models/controlnet_flux.py +70 -0
- diffusers/models/controlnet_sd3.py +26 -376
- diffusers/models/controlnet_sparsectrl.py +46 -719
- diffusers/models/controlnets/__init__.py +23 -0
- diffusers/models/controlnets/controlnet.py +872 -0
- diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +5 -5
- diffusers/models/controlnets/controlnet_flux.py +536 -0
- diffusers/models/{controlnet_hunyuan.py → controlnets/controlnet_hunyuan.py} +7 -7
- diffusers/models/controlnets/controlnet_sd3.py +489 -0
- diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
- diffusers/models/controlnets/controlnet_union.py +832 -0
- diffusers/models/{controlnet_xs.py → controlnets/controlnet_xs.py} +14 -13
- diffusers/models/controlnets/multicontrolnet.py +183 -0
- diffusers/models/embeddings.py +996 -92
- diffusers/models/embeddings_flax.py +23 -9
- diffusers/models/model_loading_utils.py +264 -14
- diffusers/models/modeling_flax_utils.py +1 -1
- diffusers/models/modeling_utils.py +334 -51
- diffusers/models/normalization.py +157 -13
- diffusers/models/transformers/__init__.py +6 -0
- diffusers/models/transformers/auraflow_transformer_2d.py +3 -2
- diffusers/models/transformers/cogvideox_transformer_3d.py +69 -13
- diffusers/models/transformers/dit_transformer_2d.py +1 -1
- diffusers/models/transformers/latte_transformer_3d.py +4 -4
- diffusers/models/transformers/pixart_transformer_2d.py +10 -2
- diffusers/models/transformers/sana_transformer.py +488 -0
- diffusers/models/transformers/stable_audio_transformer.py +1 -1
- diffusers/models/transformers/transformer_2d.py +1 -1
- diffusers/models/transformers/transformer_allegro.py +422 -0
- diffusers/models/transformers/transformer_cogview3plus.py +386 -0
- diffusers/models/transformers/transformer_flux.py +189 -51
- diffusers/models/transformers/transformer_hunyuan_video.py +789 -0
- diffusers/models/transformers/transformer_ltx.py +469 -0
- diffusers/models/transformers/transformer_mochi.py +499 -0
- diffusers/models/transformers/transformer_sd3.py +112 -18
- diffusers/models/transformers/transformer_temporal.py +1 -1
- diffusers/models/unets/unet_1d_blocks.py +1 -1
- diffusers/models/unets/unet_2d.py +8 -1
- diffusers/models/unets/unet_2d_blocks.py +88 -21
- diffusers/models/unets/unet_2d_condition.py +9 -9
- diffusers/models/unets/unet_3d_blocks.py +9 -7
- diffusers/models/unets/unet_motion_model.py +46 -68
- diffusers/models/unets/unet_spatio_temporal_condition.py +23 -0
- diffusers/models/unets/unet_stable_cascade.py +2 -2
- diffusers/models/unets/uvit_2d.py +1 -1
- diffusers/models/upsampling.py +14 -6
- diffusers/pipelines/__init__.py +69 -6
- diffusers/pipelines/allegro/__init__.py +48 -0
- diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
- diffusers/pipelines/allegro/pipeline_output.py +23 -0
- diffusers/pipelines/animatediff/__init__.py +2 -0
- diffusers/pipelines/animatediff/pipeline_animatediff.py +45 -21
- diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +52 -22
- diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +18 -4
- diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +3 -1
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +104 -72
- diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +1341 -0
- diffusers/pipelines/audioldm2/modeling_audioldm2.py +3 -3
- diffusers/pipelines/aura_flow/pipeline_aura_flow.py +2 -9
- diffusers/pipelines/auto_pipeline.py +88 -10
- diffusers/pipelines/blip_diffusion/modeling_blip2.py +1 -1
- diffusers/pipelines/cogvideo/__init__.py +2 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox.py +80 -39
- diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +825 -0
- diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +108 -50
- diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +89 -50
- diffusers/pipelines/cogview3/__init__.py +47 -0
- diffusers/pipelines/cogview3/pipeline_cogview3plus.py +674 -0
- diffusers/pipelines/cogview3/pipeline_output.py +21 -0
- diffusers/pipelines/controlnet/__init__.py +86 -80
- diffusers/pipelines/controlnet/multicontrolnet.py +7 -178
- diffusers/pipelines/controlnet/pipeline_controlnet.py +20 -3
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +9 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +9 -2
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +37 -15
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +12 -4
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +9 -4
- diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
- diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
- diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +22 -4
- diffusers/pipelines/controlnet_sd3/__init__.py +4 -0
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +56 -20
- diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +1153 -0
- diffusers/pipelines/ddpm/pipeline_ddpm.py +2 -2
- diffusers/pipelines/deepfloyd_if/pipeline_output.py +6 -5
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +16 -4
- diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +1 -1
- diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +32 -9
- diffusers/pipelines/flux/__init__.py +23 -1
- diffusers/pipelines/flux/modeling_flux.py +47 -0
- diffusers/pipelines/flux/pipeline_flux.py +256 -48
- diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
- diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
- diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet.py +1006 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +998 -0
- diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +1204 -0
- diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
- diffusers/pipelines/flux/pipeline_flux_img2img.py +856 -0
- diffusers/pipelines/flux/pipeline_flux_inpaint.py +1022 -0
- diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
- diffusers/pipelines/flux/pipeline_output.py +16 -0
- diffusers/pipelines/free_noise_utils.py +365 -5
- diffusers/pipelines/hunyuan_video/__init__.py +48 -0
- diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
- diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
- diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +20 -4
- diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +9 -9
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +2 -2
- diffusers/pipelines/kolors/pipeline_kolors.py +1 -1
- diffusers/pipelines/kolors/pipeline_kolors_img2img.py +14 -11
- diffusers/pipelines/kolors/text_encoder.py +2 -2
- diffusers/pipelines/kolors/tokenizer.py +4 -0
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +1 -1
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +1 -1
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -1
- diffusers/pipelines/latte/pipeline_latte.py +2 -2
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +15 -3
- diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +15 -3
- diffusers/pipelines/ltx/__init__.py +50 -0
- diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
- diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
- diffusers/pipelines/ltx/pipeline_output.py +20 -0
- diffusers/pipelines/lumina/pipeline_lumina.py +3 -10
- diffusers/pipelines/mochi/__init__.py +48 -0
- diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
- diffusers/pipelines/mochi/pipeline_output.py +20 -0
- diffusers/pipelines/pag/__init__.py +13 -0
- diffusers/pipelines/pag/pag_utils.py +8 -2
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +2 -3
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1543 -0
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +3 -5
- diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1683 -0
- diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +22 -6
- diffusers/pipelines/pag/pipeline_pag_kolors.py +1 -1
- diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +7 -14
- diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
- diffusers/pipelines/pag/pipeline_pag_sd.py +18 -6
- diffusers/pipelines/pag/pipeline_pag_sd_3.py +18 -9
- diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
- diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +5 -1
- diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +1094 -0
- diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
- diffusers/pipelines/pag/pipeline_pag_sd_xl.py +18 -6
- diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +31 -16
- diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +42 -19
- diffusers/pipelines/pia/pipeline_pia.py +2 -0
- diffusers/pipelines/pipeline_flax_utils.py +1 -1
- diffusers/pipelines/pipeline_loading_utils.py +250 -31
- diffusers/pipelines/pipeline_utils.py +158 -186
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +7 -14
- diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +7 -14
- diffusers/pipelines/sana/__init__.py +47 -0
- diffusers/pipelines/sana/pipeline_output.py +21 -0
- diffusers/pipelines/sana/pipeline_sana.py +884 -0
- diffusers/pipelines/stable_audio/pipeline_stable_audio.py +12 -1
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +35 -3
- diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +2 -2
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +46 -9
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +1 -1
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +241 -81
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +228 -23
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +82 -13
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +60 -11
- diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +11 -1
- diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +1 -1
- diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +16 -4
- diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +16 -4
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +16 -12
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +29 -22
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +29 -22
- diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +1 -1
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +1 -1
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +16 -4
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +15 -3
- diffusers/pipelines/unidiffuser/modeling_uvit.py +2 -2
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +1 -1
- diffusers/quantizers/__init__.py +16 -0
- diffusers/quantizers/auto.py +139 -0
- diffusers/quantizers/base.py +233 -0
- diffusers/quantizers/bitsandbytes/__init__.py +2 -0
- diffusers/quantizers/bitsandbytes/bnb_quantizer.py +561 -0
- diffusers/quantizers/bitsandbytes/utils.py +306 -0
- diffusers/quantizers/gguf/__init__.py +1 -0
- diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
- diffusers/quantizers/gguf/utils.py +456 -0
- diffusers/quantizers/quantization_config.py +669 -0
- diffusers/quantizers/torchao/__init__.py +15 -0
- diffusers/quantizers/torchao/torchao_quantizer.py +285 -0
- diffusers/schedulers/scheduling_ddim.py +4 -1
- diffusers/schedulers/scheduling_ddim_cogvideox.py +4 -1
- diffusers/schedulers/scheduling_ddim_parallel.py +4 -1
- diffusers/schedulers/scheduling_ddpm.py +6 -7
- diffusers/schedulers/scheduling_ddpm_parallel.py +6 -7
- diffusers/schedulers/scheduling_deis_multistep.py +102 -6
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +113 -6
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +111 -5
- diffusers/schedulers/scheduling_dpmsolver_sde.py +125 -10
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +126 -7
- diffusers/schedulers/scheduling_edm_euler.py +8 -6
- diffusers/schedulers/scheduling_euler_ancestral_discrete.py +4 -1
- diffusers/schedulers/scheduling_euler_discrete.py +92 -7
- diffusers/schedulers/scheduling_flow_match_euler_discrete.py +153 -6
- diffusers/schedulers/scheduling_flow_match_heun_discrete.py +4 -5
- diffusers/schedulers/scheduling_heun_discrete.py +114 -8
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +116 -11
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +110 -8
- diffusers/schedulers/scheduling_lcm.py +2 -6
- diffusers/schedulers/scheduling_lms_discrete.py +76 -1
- diffusers/schedulers/scheduling_repaint.py +1 -1
- diffusers/schedulers/scheduling_sasolver.py +102 -6
- diffusers/schedulers/scheduling_tcd.py +2 -6
- diffusers/schedulers/scheduling_unclip.py +4 -1
- diffusers/schedulers/scheduling_unipc_multistep.py +127 -5
- diffusers/training_utils.py +63 -19
- diffusers/utils/__init__.py +7 -1
- diffusers/utils/constants.py +1 -0
- diffusers/utils/dummy_pt_objects.py +240 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +435 -0
- diffusers/utils/dynamic_modules_utils.py +3 -3
- diffusers/utils/hub_utils.py +44 -40
- diffusers/utils/import_utils.py +98 -8
- diffusers/utils/loading_utils.py +28 -4
- diffusers/utils/peft_utils.py +6 -3
- diffusers/utils/testing_utils.py +115 -1
- diffusers/utils/torch_utils.py +3 -0
- {diffusers-0.30.3.dist-info → diffusers-0.32.0.dist-info}/METADATA +73 -72
- {diffusers-0.30.3.dist-info → diffusers-0.32.0.dist-info}/RECORD +268 -193
- {diffusers-0.30.3.dist-info → diffusers-0.32.0.dist-info}/WHEEL +1 -1
- {diffusers-0.30.3.dist-info → diffusers-0.32.0.dist-info}/LICENSE +0 -0
- {diffusers-0.30.3.dist-info → diffusers-0.32.0.dist-info}/entry_points.txt +0 -0
- {diffusers-0.30.3.dist-info → diffusers-0.32.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1006 @@
|
|
1
|
+
# Copyright 2024 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import inspect
|
16
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
17
|
+
|
18
|
+
import numpy as np
|
19
|
+
import torch
|
20
|
+
from transformers import (
|
21
|
+
CLIPTextModel,
|
22
|
+
CLIPTokenizer,
|
23
|
+
T5EncoderModel,
|
24
|
+
T5TokenizerFast,
|
25
|
+
)
|
26
|
+
|
27
|
+
from ...image_processor import PipelineImageInput, VaeImageProcessor
|
28
|
+
from ...loaders import FluxLoraLoaderMixin, FromSingleFileMixin, TextualInversionLoaderMixin
|
29
|
+
from ...models.autoencoders import AutoencoderKL
|
30
|
+
from ...models.controlnets.controlnet_flux import FluxControlNetModel, FluxMultiControlNetModel
|
31
|
+
from ...models.transformers import FluxTransformer2DModel
|
32
|
+
from ...schedulers import FlowMatchEulerDiscreteScheduler
|
33
|
+
from ...utils import (
|
34
|
+
USE_PEFT_BACKEND,
|
35
|
+
is_torch_xla_available,
|
36
|
+
logging,
|
37
|
+
replace_example_docstring,
|
38
|
+
scale_lora_layers,
|
39
|
+
unscale_lora_layers,
|
40
|
+
)
|
41
|
+
from ...utils.torch_utils import randn_tensor
|
42
|
+
from ..pipeline_utils import DiffusionPipeline
|
43
|
+
from .pipeline_output import FluxPipelineOutput
|
44
|
+
|
45
|
+
|
46
|
+
if is_torch_xla_available():
|
47
|
+
import torch_xla.core.xla_model as xm
|
48
|
+
|
49
|
+
XLA_AVAILABLE = True
|
50
|
+
else:
|
51
|
+
XLA_AVAILABLE = False
|
52
|
+
|
53
|
+
|
54
|
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
55
|
+
|
56
|
+
EXAMPLE_DOC_STRING = """
|
57
|
+
Examples:
|
58
|
+
```py
|
59
|
+
>>> import torch
|
60
|
+
>>> from diffusers.utils import load_image
|
61
|
+
>>> from diffusers import FluxControlNetPipeline
|
62
|
+
>>> from diffusers import FluxControlNetModel
|
63
|
+
|
64
|
+
>>> controlnet_model = "InstantX/FLUX.1-dev-controlnet-canny"
|
65
|
+
>>> controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
|
66
|
+
>>> pipe = FluxControlNetPipeline.from_pretrained(
|
67
|
+
... base_model, controlnet=controlnet, torch_dtype=torch.bfloat16
|
68
|
+
... )
|
69
|
+
>>> pipe.to("cuda")
|
70
|
+
>>> control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg")
|
71
|
+
>>> prompt = "A girl in city, 25 years old, cool, futuristic"
|
72
|
+
>>> image = pipe(
|
73
|
+
... prompt,
|
74
|
+
... control_image=control_image,
|
75
|
+
... control_guidance_start=0.2,
|
76
|
+
... control_guidance_end=0.8,
|
77
|
+
... controlnet_conditioning_scale=1.0,
|
78
|
+
... num_inference_steps=28,
|
79
|
+
... guidance_scale=3.5,
|
80
|
+
... ).images[0]
|
81
|
+
>>> image.save("flux.png")
|
82
|
+
```
|
83
|
+
"""
|
84
|
+
|
85
|
+
|
86
|
+
# Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift
|
87
|
+
def calculate_shift(
|
88
|
+
image_seq_len,
|
89
|
+
base_seq_len: int = 256,
|
90
|
+
max_seq_len: int = 4096,
|
91
|
+
base_shift: float = 0.5,
|
92
|
+
max_shift: float = 1.16,
|
93
|
+
):
|
94
|
+
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
|
95
|
+
b = base_shift - m * base_seq_len
|
96
|
+
mu = image_seq_len * m + b
|
97
|
+
return mu
|
98
|
+
|
99
|
+
|
100
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
101
|
+
def retrieve_latents(
|
102
|
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
103
|
+
):
|
104
|
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
105
|
+
return encoder_output.latent_dist.sample(generator)
|
106
|
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
107
|
+
return encoder_output.latent_dist.mode()
|
108
|
+
elif hasattr(encoder_output, "latents"):
|
109
|
+
return encoder_output.latents
|
110
|
+
else:
|
111
|
+
raise AttributeError("Could not access latents of provided encoder_output")
|
112
|
+
|
113
|
+
|
114
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
115
|
+
def retrieve_timesteps(
|
116
|
+
scheduler,
|
117
|
+
num_inference_steps: Optional[int] = None,
|
118
|
+
device: Optional[Union[str, torch.device]] = None,
|
119
|
+
timesteps: Optional[List[int]] = None,
|
120
|
+
sigmas: Optional[List[float]] = None,
|
121
|
+
**kwargs,
|
122
|
+
):
|
123
|
+
r"""
|
124
|
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
125
|
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
126
|
+
|
127
|
+
Args:
|
128
|
+
scheduler (`SchedulerMixin`):
|
129
|
+
The scheduler to get timesteps from.
|
130
|
+
num_inference_steps (`int`):
|
131
|
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
132
|
+
must be `None`.
|
133
|
+
device (`str` or `torch.device`, *optional*):
|
134
|
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
135
|
+
timesteps (`List[int]`, *optional*):
|
136
|
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
137
|
+
`num_inference_steps` and `sigmas` must be `None`.
|
138
|
+
sigmas (`List[float]`, *optional*):
|
139
|
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
140
|
+
`num_inference_steps` and `timesteps` must be `None`.
|
141
|
+
|
142
|
+
Returns:
|
143
|
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
144
|
+
second element is the number of inference steps.
|
145
|
+
"""
|
146
|
+
if timesteps is not None and sigmas is not None:
|
147
|
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
148
|
+
if timesteps is not None:
|
149
|
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
150
|
+
if not accepts_timesteps:
|
151
|
+
raise ValueError(
|
152
|
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
153
|
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
154
|
+
)
|
155
|
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
156
|
+
timesteps = scheduler.timesteps
|
157
|
+
num_inference_steps = len(timesteps)
|
158
|
+
elif sigmas is not None:
|
159
|
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
160
|
+
if not accept_sigmas:
|
161
|
+
raise ValueError(
|
162
|
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
163
|
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
164
|
+
)
|
165
|
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
166
|
+
timesteps = scheduler.timesteps
|
167
|
+
num_inference_steps = len(timesteps)
|
168
|
+
else:
|
169
|
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
170
|
+
timesteps = scheduler.timesteps
|
171
|
+
return timesteps, num_inference_steps
|
172
|
+
|
173
|
+
|
174
|
+
class FluxControlNetPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin):
|
175
|
+
r"""
|
176
|
+
The Flux pipeline for text-to-image generation.
|
177
|
+
|
178
|
+
Reference: https://blackforestlabs.ai/announcing-black-forest-labs/
|
179
|
+
|
180
|
+
Args:
|
181
|
+
transformer ([`FluxTransformer2DModel`]):
|
182
|
+
Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
|
183
|
+
scheduler ([`FlowMatchEulerDiscreteScheduler`]):
|
184
|
+
A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
|
185
|
+
vae ([`AutoencoderKL`]):
|
186
|
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
187
|
+
text_encoder ([`CLIPTextModel`]):
|
188
|
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
189
|
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
190
|
+
text_encoder_2 ([`T5EncoderModel`]):
|
191
|
+
[T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically
|
192
|
+
the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
|
193
|
+
tokenizer (`CLIPTokenizer`):
|
194
|
+
Tokenizer of class
|
195
|
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
|
196
|
+
tokenizer_2 (`T5TokenizerFast`):
|
197
|
+
Second Tokenizer of class
|
198
|
+
[T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast).
|
199
|
+
"""
|
200
|
+
|
201
|
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
|
202
|
+
_optional_components = []
|
203
|
+
_callback_tensor_inputs = ["latents", "prompt_embeds"]
|
204
|
+
|
205
|
+
def __init__(
|
206
|
+
self,
|
207
|
+
scheduler: FlowMatchEulerDiscreteScheduler,
|
208
|
+
vae: AutoencoderKL,
|
209
|
+
text_encoder: CLIPTextModel,
|
210
|
+
tokenizer: CLIPTokenizer,
|
211
|
+
text_encoder_2: T5EncoderModel,
|
212
|
+
tokenizer_2: T5TokenizerFast,
|
213
|
+
transformer: FluxTransformer2DModel,
|
214
|
+
controlnet: Union[
|
215
|
+
FluxControlNetModel, List[FluxControlNetModel], Tuple[FluxControlNetModel], FluxMultiControlNetModel
|
216
|
+
],
|
217
|
+
):
|
218
|
+
super().__init__()
|
219
|
+
if isinstance(controlnet, (list, tuple)):
|
220
|
+
controlnet = FluxMultiControlNetModel(controlnet)
|
221
|
+
|
222
|
+
self.register_modules(
|
223
|
+
vae=vae,
|
224
|
+
text_encoder=text_encoder,
|
225
|
+
text_encoder_2=text_encoder_2,
|
226
|
+
tokenizer=tokenizer,
|
227
|
+
tokenizer_2=tokenizer_2,
|
228
|
+
transformer=transformer,
|
229
|
+
scheduler=scheduler,
|
230
|
+
controlnet=controlnet,
|
231
|
+
)
|
232
|
+
self.vae_scale_factor = (
|
233
|
+
2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
|
234
|
+
)
|
235
|
+
# Flux latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible
|
236
|
+
# by the patch size. So the vae scale factor is multiplied by the patch size to account for this
|
237
|
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2)
|
238
|
+
self.tokenizer_max_length = (
|
239
|
+
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
|
240
|
+
)
|
241
|
+
self.default_sample_size = 128
|
242
|
+
|
243
|
+
def _get_t5_prompt_embeds(
|
244
|
+
self,
|
245
|
+
prompt: Union[str, List[str]] = None,
|
246
|
+
num_images_per_prompt: int = 1,
|
247
|
+
max_sequence_length: int = 512,
|
248
|
+
device: Optional[torch.device] = None,
|
249
|
+
dtype: Optional[torch.dtype] = None,
|
250
|
+
):
|
251
|
+
device = device or self._execution_device
|
252
|
+
dtype = dtype or self.text_encoder.dtype
|
253
|
+
|
254
|
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
255
|
+
batch_size = len(prompt)
|
256
|
+
|
257
|
+
if isinstance(self, TextualInversionLoaderMixin):
|
258
|
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
259
|
+
|
260
|
+
text_inputs = self.tokenizer_2(
|
261
|
+
prompt,
|
262
|
+
padding="max_length",
|
263
|
+
max_length=max_sequence_length,
|
264
|
+
truncation=True,
|
265
|
+
return_length=False,
|
266
|
+
return_overflowing_tokens=False,
|
267
|
+
return_tensors="pt",
|
268
|
+
)
|
269
|
+
text_input_ids = text_inputs.input_ids
|
270
|
+
untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids
|
271
|
+
|
272
|
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
273
|
+
removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
|
274
|
+
logger.warning(
|
275
|
+
"The following part of your input was truncated because `max_sequence_length` is set to "
|
276
|
+
f" {max_sequence_length} tokens: {removed_text}"
|
277
|
+
)
|
278
|
+
|
279
|
+
prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0]
|
280
|
+
|
281
|
+
dtype = self.text_encoder_2.dtype
|
282
|
+
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
|
283
|
+
|
284
|
+
_, seq_len, _ = prompt_embeds.shape
|
285
|
+
|
286
|
+
# duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
|
287
|
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
288
|
+
prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
289
|
+
|
290
|
+
return prompt_embeds
|
291
|
+
|
292
|
+
def _get_clip_prompt_embeds(
|
293
|
+
self,
|
294
|
+
prompt: Union[str, List[str]],
|
295
|
+
num_images_per_prompt: int = 1,
|
296
|
+
device: Optional[torch.device] = None,
|
297
|
+
):
|
298
|
+
device = device or self._execution_device
|
299
|
+
|
300
|
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
301
|
+
batch_size = len(prompt)
|
302
|
+
|
303
|
+
if isinstance(self, TextualInversionLoaderMixin):
|
304
|
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
305
|
+
|
306
|
+
text_inputs = self.tokenizer(
|
307
|
+
prompt,
|
308
|
+
padding="max_length",
|
309
|
+
max_length=self.tokenizer_max_length,
|
310
|
+
truncation=True,
|
311
|
+
return_overflowing_tokens=False,
|
312
|
+
return_length=False,
|
313
|
+
return_tensors="pt",
|
314
|
+
)
|
315
|
+
|
316
|
+
text_input_ids = text_inputs.input_ids
|
317
|
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
318
|
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
319
|
+
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
|
320
|
+
logger.warning(
|
321
|
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
322
|
+
f" {self.tokenizer_max_length} tokens: {removed_text}"
|
323
|
+
)
|
324
|
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False)
|
325
|
+
|
326
|
+
# Use pooled output of CLIPTextModel
|
327
|
+
prompt_embeds = prompt_embeds.pooler_output
|
328
|
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
329
|
+
|
330
|
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
331
|
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt)
|
332
|
+
prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1)
|
333
|
+
|
334
|
+
return prompt_embeds
|
335
|
+
|
336
|
+
def encode_prompt(
|
337
|
+
self,
|
338
|
+
prompt: Union[str, List[str]],
|
339
|
+
prompt_2: Union[str, List[str]],
|
340
|
+
device: Optional[torch.device] = None,
|
341
|
+
num_images_per_prompt: int = 1,
|
342
|
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
343
|
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
344
|
+
max_sequence_length: int = 512,
|
345
|
+
lora_scale: Optional[float] = None,
|
346
|
+
):
|
347
|
+
r"""
|
348
|
+
|
349
|
+
Args:
|
350
|
+
prompt (`str` or `List[str]`, *optional*):
|
351
|
+
prompt to be encoded
|
352
|
+
prompt_2 (`str` or `List[str]`, *optional*):
|
353
|
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
354
|
+
used in all text-encoders
|
355
|
+
device: (`torch.device`):
|
356
|
+
torch device
|
357
|
+
num_images_per_prompt (`int`):
|
358
|
+
number of images that should be generated per prompt
|
359
|
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
360
|
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
361
|
+
provided, text embeddings will be generated from `prompt` input argument.
|
362
|
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
363
|
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
364
|
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
365
|
+
clip_skip (`int`, *optional*):
|
366
|
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
367
|
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
368
|
+
lora_scale (`float`, *optional*):
|
369
|
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
370
|
+
"""
|
371
|
+
device = device or self._execution_device
|
372
|
+
|
373
|
+
# set lora scale so that monkey patched LoRA
|
374
|
+
# function of text encoder can correctly access it
|
375
|
+
if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin):
|
376
|
+
self._lora_scale = lora_scale
|
377
|
+
|
378
|
+
# dynamically adjust the LoRA scale
|
379
|
+
if self.text_encoder is not None and USE_PEFT_BACKEND:
|
380
|
+
scale_lora_layers(self.text_encoder, lora_scale)
|
381
|
+
if self.text_encoder_2 is not None and USE_PEFT_BACKEND:
|
382
|
+
scale_lora_layers(self.text_encoder_2, lora_scale)
|
383
|
+
|
384
|
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
385
|
+
|
386
|
+
if prompt_embeds is None:
|
387
|
+
prompt_2 = prompt_2 or prompt
|
388
|
+
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
|
389
|
+
|
390
|
+
# We only use the pooled prompt output from the CLIPTextModel
|
391
|
+
pooled_prompt_embeds = self._get_clip_prompt_embeds(
|
392
|
+
prompt=prompt,
|
393
|
+
device=device,
|
394
|
+
num_images_per_prompt=num_images_per_prompt,
|
395
|
+
)
|
396
|
+
prompt_embeds = self._get_t5_prompt_embeds(
|
397
|
+
prompt=prompt_2,
|
398
|
+
num_images_per_prompt=num_images_per_prompt,
|
399
|
+
max_sequence_length=max_sequence_length,
|
400
|
+
device=device,
|
401
|
+
)
|
402
|
+
|
403
|
+
if self.text_encoder is not None:
|
404
|
+
if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
|
405
|
+
# Retrieve the original scale by scaling back the LoRA layers
|
406
|
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
407
|
+
|
408
|
+
if self.text_encoder_2 is not None:
|
409
|
+
if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
|
410
|
+
# Retrieve the original scale by scaling back the LoRA layers
|
411
|
+
unscale_lora_layers(self.text_encoder_2, lora_scale)
|
412
|
+
|
413
|
+
dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype
|
414
|
+
text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype)
|
415
|
+
|
416
|
+
return prompt_embeds, pooled_prompt_embeds, text_ids
|
417
|
+
|
418
|
+
def check_inputs(
|
419
|
+
self,
|
420
|
+
prompt,
|
421
|
+
prompt_2,
|
422
|
+
height,
|
423
|
+
width,
|
424
|
+
prompt_embeds=None,
|
425
|
+
pooled_prompt_embeds=None,
|
426
|
+
callback_on_step_end_tensor_inputs=None,
|
427
|
+
max_sequence_length=None,
|
428
|
+
):
|
429
|
+
if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0:
|
430
|
+
logger.warning(
|
431
|
+
f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly"
|
432
|
+
)
|
433
|
+
|
434
|
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
435
|
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
436
|
+
):
|
437
|
+
raise ValueError(
|
438
|
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
439
|
+
)
|
440
|
+
|
441
|
+
if prompt is not None and prompt_embeds is not None:
|
442
|
+
raise ValueError(
|
443
|
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
444
|
+
" only forward one of the two."
|
445
|
+
)
|
446
|
+
elif prompt_2 is not None and prompt_embeds is not None:
|
447
|
+
raise ValueError(
|
448
|
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
449
|
+
" only forward one of the two."
|
450
|
+
)
|
451
|
+
elif prompt is None and prompt_embeds is None:
|
452
|
+
raise ValueError(
|
453
|
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
454
|
+
)
|
455
|
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
456
|
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
457
|
+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
458
|
+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
459
|
+
|
460
|
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
461
|
+
raise ValueError(
|
462
|
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
463
|
+
)
|
464
|
+
|
465
|
+
if max_sequence_length is not None and max_sequence_length > 512:
|
466
|
+
raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
|
467
|
+
|
468
|
+
@staticmethod
|
469
|
+
# Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._prepare_latent_image_ids
|
470
|
+
def _prepare_latent_image_ids(batch_size, height, width, device, dtype):
|
471
|
+
latent_image_ids = torch.zeros(height, width, 3)
|
472
|
+
latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None]
|
473
|
+
latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :]
|
474
|
+
|
475
|
+
latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape
|
476
|
+
|
477
|
+
latent_image_ids = latent_image_ids.reshape(
|
478
|
+
latent_image_id_height * latent_image_id_width, latent_image_id_channels
|
479
|
+
)
|
480
|
+
|
481
|
+
return latent_image_ids.to(device=device, dtype=dtype)
|
482
|
+
|
483
|
+
@staticmethod
|
484
|
+
# Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._pack_latents
|
485
|
+
def _pack_latents(latents, batch_size, num_channels_latents, height, width):
|
486
|
+
latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
|
487
|
+
latents = latents.permute(0, 2, 4, 1, 3, 5)
|
488
|
+
latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4)
|
489
|
+
|
490
|
+
return latents
|
491
|
+
|
492
|
+
@staticmethod
|
493
|
+
# Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents
|
494
|
+
def _unpack_latents(latents, height, width, vae_scale_factor):
|
495
|
+
batch_size, num_patches, channels = latents.shape
|
496
|
+
|
497
|
+
# VAE applies 8x compression on images but we must also account for packing which requires
|
498
|
+
# latent height and width to be divisible by 2.
|
499
|
+
height = 2 * (int(height) // (vae_scale_factor * 2))
|
500
|
+
width = 2 * (int(width) // (vae_scale_factor * 2))
|
501
|
+
|
502
|
+
latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2)
|
503
|
+
latents = latents.permute(0, 3, 1, 4, 2, 5)
|
504
|
+
|
505
|
+
latents = latents.reshape(batch_size, channels // (2 * 2), height, width)
|
506
|
+
|
507
|
+
return latents
|
508
|
+
|
509
|
+
# Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_latents
|
510
|
+
def prepare_latents(
|
511
|
+
self,
|
512
|
+
batch_size,
|
513
|
+
num_channels_latents,
|
514
|
+
height,
|
515
|
+
width,
|
516
|
+
dtype,
|
517
|
+
device,
|
518
|
+
generator,
|
519
|
+
latents=None,
|
520
|
+
):
|
521
|
+
# VAE applies 8x compression on images but we must also account for packing which requires
|
522
|
+
# latent height and width to be divisible by 2.
|
523
|
+
height = 2 * (int(height) // (self.vae_scale_factor * 2))
|
524
|
+
width = 2 * (int(width) // (self.vae_scale_factor * 2))
|
525
|
+
|
526
|
+
shape = (batch_size, num_channels_latents, height, width)
|
527
|
+
|
528
|
+
if latents is not None:
|
529
|
+
latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype)
|
530
|
+
return latents.to(device=device, dtype=dtype), latent_image_ids
|
531
|
+
|
532
|
+
if isinstance(generator, list) and len(generator) != batch_size:
|
533
|
+
raise ValueError(
|
534
|
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
535
|
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
536
|
+
)
|
537
|
+
|
538
|
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
539
|
+
latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width)
|
540
|
+
|
541
|
+
latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype)
|
542
|
+
|
543
|
+
return latents, latent_image_ids
|
544
|
+
|
545
|
+
# Copied from diffusers.pipelines.controlnet_sd3.pipeline_stable_diffusion_3_controlnet.StableDiffusion3ControlNetPipeline.prepare_image
|
546
|
+
def prepare_image(
|
547
|
+
self,
|
548
|
+
image,
|
549
|
+
width,
|
550
|
+
height,
|
551
|
+
batch_size,
|
552
|
+
num_images_per_prompt,
|
553
|
+
device,
|
554
|
+
dtype,
|
555
|
+
do_classifier_free_guidance=False,
|
556
|
+
guess_mode=False,
|
557
|
+
):
|
558
|
+
if isinstance(image, torch.Tensor):
|
559
|
+
pass
|
560
|
+
else:
|
561
|
+
image = self.image_processor.preprocess(image, height=height, width=width)
|
562
|
+
|
563
|
+
image_batch_size = image.shape[0]
|
564
|
+
|
565
|
+
if image_batch_size == 1:
|
566
|
+
repeat_by = batch_size
|
567
|
+
else:
|
568
|
+
# image batch size is the same as prompt batch size
|
569
|
+
repeat_by = num_images_per_prompt
|
570
|
+
|
571
|
+
image = image.repeat_interleave(repeat_by, dim=0)
|
572
|
+
|
573
|
+
image = image.to(device=device, dtype=dtype)
|
574
|
+
|
575
|
+
if do_classifier_free_guidance and not guess_mode:
|
576
|
+
image = torch.cat([image] * 2)
|
577
|
+
|
578
|
+
return image
|
579
|
+
|
580
|
+
@property
|
581
|
+
def guidance_scale(self):
|
582
|
+
return self._guidance_scale
|
583
|
+
|
584
|
+
@property
|
585
|
+
def joint_attention_kwargs(self):
|
586
|
+
return self._joint_attention_kwargs
|
587
|
+
|
588
|
+
@property
|
589
|
+
def num_timesteps(self):
|
590
|
+
return self._num_timesteps
|
591
|
+
|
592
|
+
@property
|
593
|
+
def interrupt(self):
|
594
|
+
return self._interrupt
|
595
|
+
|
596
|
+
@torch.no_grad()
|
597
|
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
598
|
+
def __call__(
|
599
|
+
self,
|
600
|
+
prompt: Union[str, List[str]] = None,
|
601
|
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
602
|
+
height: Optional[int] = None,
|
603
|
+
width: Optional[int] = None,
|
604
|
+
num_inference_steps: int = 28,
|
605
|
+
sigmas: Optional[List[float]] = None,
|
606
|
+
guidance_scale: float = 7.0,
|
607
|
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
608
|
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
609
|
+
control_image: PipelineImageInput = None,
|
610
|
+
control_mode: Optional[Union[int, List[int]]] = None,
|
611
|
+
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
|
612
|
+
num_images_per_prompt: Optional[int] = 1,
|
613
|
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
614
|
+
latents: Optional[torch.FloatTensor] = None,
|
615
|
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
616
|
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
617
|
+
output_type: Optional[str] = "pil",
|
618
|
+
return_dict: bool = True,
|
619
|
+
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
|
620
|
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
621
|
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
622
|
+
max_sequence_length: int = 512,
|
623
|
+
):
|
624
|
+
r"""
|
625
|
+
Function invoked when calling the pipeline for generation.
|
626
|
+
|
627
|
+
Args:
|
628
|
+
prompt (`str` or `List[str]`, *optional*):
|
629
|
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
630
|
+
instead.
|
631
|
+
prompt_2 (`str` or `List[str]`, *optional*):
|
632
|
+
The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
633
|
+
will be used instead
|
634
|
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
635
|
+
The height in pixels of the generated image. This is set to 1024 by default for the best results.
|
636
|
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
637
|
+
The width in pixels of the generated image. This is set to 1024 by default for the best results.
|
638
|
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
639
|
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
640
|
+
expense of slower inference.
|
641
|
+
sigmas (`List[float]`, *optional*):
|
642
|
+
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
|
643
|
+
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
|
644
|
+
will be used.
|
645
|
+
guidance_scale (`float`, *optional*, defaults to 7.0):
|
646
|
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
647
|
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
648
|
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
649
|
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
650
|
+
usually at the expense of lower image quality.
|
651
|
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
652
|
+
The percentage of total steps at which the ControlNet starts applying.
|
653
|
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
654
|
+
The percentage of total steps at which the ControlNet stops applying.
|
655
|
+
control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
656
|
+
`List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
657
|
+
The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
|
658
|
+
specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted
|
659
|
+
as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or
|
660
|
+
width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`,
|
661
|
+
images must be passed as a list such that each element of the list can be correctly batched for input
|
662
|
+
to a single ControlNet.
|
663
|
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
664
|
+
The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
|
665
|
+
to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
|
666
|
+
the corresponding scale as a list.
|
667
|
+
control_mode (`int` or `List[int]`,, *optional*, defaults to None):
|
668
|
+
The control mode when applying ControlNet-Union.
|
669
|
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
670
|
+
The number of images to generate per prompt.
|
671
|
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
672
|
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
673
|
+
to make generation deterministic.
|
674
|
+
latents (`torch.FloatTensor`, *optional*):
|
675
|
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
676
|
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
677
|
+
tensor will ge generated by sampling using the supplied random `generator`.
|
678
|
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
679
|
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
680
|
+
provided, text embeddings will be generated from `prompt` input argument.
|
681
|
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
682
|
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
683
|
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
684
|
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
685
|
+
The output format of the generate image. Choose between
|
686
|
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
687
|
+
return_dict (`bool`, *optional*, defaults to `True`):
|
688
|
+
Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple.
|
689
|
+
joint_attention_kwargs (`dict`, *optional*):
|
690
|
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
691
|
+
`self.processor` in
|
692
|
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
693
|
+
callback_on_step_end (`Callable`, *optional*):
|
694
|
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
695
|
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
696
|
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
697
|
+
`callback_on_step_end_tensor_inputs`.
|
698
|
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
699
|
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
700
|
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
701
|
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
702
|
+
max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
|
703
|
+
|
704
|
+
Examples:
|
705
|
+
|
706
|
+
Returns:
|
707
|
+
[`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict`
|
708
|
+
is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated
|
709
|
+
images.
|
710
|
+
"""
|
711
|
+
|
712
|
+
height = height or self.default_sample_size * self.vae_scale_factor
|
713
|
+
width = width or self.default_sample_size * self.vae_scale_factor
|
714
|
+
|
715
|
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
716
|
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
717
|
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
718
|
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
719
|
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
720
|
+
mult = len(self.controlnet.nets) if isinstance(self.controlnet, FluxMultiControlNetModel) else 1
|
721
|
+
control_guidance_start, control_guidance_end = (
|
722
|
+
mult * [control_guidance_start],
|
723
|
+
mult * [control_guidance_end],
|
724
|
+
)
|
725
|
+
|
726
|
+
# 1. Check inputs. Raise error if not correct
|
727
|
+
self.check_inputs(
|
728
|
+
prompt,
|
729
|
+
prompt_2,
|
730
|
+
height,
|
731
|
+
width,
|
732
|
+
prompt_embeds=prompt_embeds,
|
733
|
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
734
|
+
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
735
|
+
max_sequence_length=max_sequence_length,
|
736
|
+
)
|
737
|
+
|
738
|
+
self._guidance_scale = guidance_scale
|
739
|
+
self._joint_attention_kwargs = joint_attention_kwargs
|
740
|
+
self._interrupt = False
|
741
|
+
|
742
|
+
# 2. Define call parameters
|
743
|
+
if prompt is not None and isinstance(prompt, str):
|
744
|
+
batch_size = 1
|
745
|
+
elif prompt is not None and isinstance(prompt, list):
|
746
|
+
batch_size = len(prompt)
|
747
|
+
else:
|
748
|
+
batch_size = prompt_embeds.shape[0]
|
749
|
+
|
750
|
+
device = self._execution_device
|
751
|
+
dtype = self.transformer.dtype
|
752
|
+
|
753
|
+
# 3. Prepare text embeddings
|
754
|
+
lora_scale = (
|
755
|
+
self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
|
756
|
+
)
|
757
|
+
(
|
758
|
+
prompt_embeds,
|
759
|
+
pooled_prompt_embeds,
|
760
|
+
text_ids,
|
761
|
+
) = self.encode_prompt(
|
762
|
+
prompt=prompt,
|
763
|
+
prompt_2=prompt_2,
|
764
|
+
prompt_embeds=prompt_embeds,
|
765
|
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
766
|
+
device=device,
|
767
|
+
num_images_per_prompt=num_images_per_prompt,
|
768
|
+
max_sequence_length=max_sequence_length,
|
769
|
+
lora_scale=lora_scale,
|
770
|
+
)
|
771
|
+
|
772
|
+
# 3. Prepare control image
|
773
|
+
num_channels_latents = self.transformer.config.in_channels // 4
|
774
|
+
if isinstance(self.controlnet, FluxControlNetModel):
|
775
|
+
control_image = self.prepare_image(
|
776
|
+
image=control_image,
|
777
|
+
width=width,
|
778
|
+
height=height,
|
779
|
+
batch_size=batch_size * num_images_per_prompt,
|
780
|
+
num_images_per_prompt=num_images_per_prompt,
|
781
|
+
device=device,
|
782
|
+
dtype=self.vae.dtype,
|
783
|
+
)
|
784
|
+
height, width = control_image.shape[-2:]
|
785
|
+
|
786
|
+
# xlab controlnet has a input_hint_block and instantx controlnet does not
|
787
|
+
controlnet_blocks_repeat = False if self.controlnet.input_hint_block is None else True
|
788
|
+
if self.controlnet.input_hint_block is None:
|
789
|
+
# vae encode
|
790
|
+
control_image = retrieve_latents(self.vae.encode(control_image), generator=generator)
|
791
|
+
control_image = (control_image - self.vae.config.shift_factor) * self.vae.config.scaling_factor
|
792
|
+
|
793
|
+
# pack
|
794
|
+
height_control_image, width_control_image = control_image.shape[2:]
|
795
|
+
control_image = self._pack_latents(
|
796
|
+
control_image,
|
797
|
+
batch_size * num_images_per_prompt,
|
798
|
+
num_channels_latents,
|
799
|
+
height_control_image,
|
800
|
+
width_control_image,
|
801
|
+
)
|
802
|
+
|
803
|
+
# Here we ensure that `control_mode` has the same length as the control_image.
|
804
|
+
if control_mode is not None:
|
805
|
+
if not isinstance(control_mode, int):
|
806
|
+
raise ValueError(" For `FluxControlNet`, `control_mode` should be an `int` or `None`")
|
807
|
+
control_mode = torch.tensor(control_mode).to(device, dtype=torch.long)
|
808
|
+
control_mode = control_mode.view(-1, 1).expand(control_image.shape[0], 1)
|
809
|
+
|
810
|
+
elif isinstance(self.controlnet, FluxMultiControlNetModel):
|
811
|
+
control_images = []
|
812
|
+
# xlab controlnet has a input_hint_block and instantx controlnet does not
|
813
|
+
controlnet_blocks_repeat = False if self.controlnet.nets[0].input_hint_block is None else True
|
814
|
+
for i, control_image_ in enumerate(control_image):
|
815
|
+
control_image_ = self.prepare_image(
|
816
|
+
image=control_image_,
|
817
|
+
width=width,
|
818
|
+
height=height,
|
819
|
+
batch_size=batch_size * num_images_per_prompt,
|
820
|
+
num_images_per_prompt=num_images_per_prompt,
|
821
|
+
device=device,
|
822
|
+
dtype=self.vae.dtype,
|
823
|
+
)
|
824
|
+
height, width = control_image_.shape[-2:]
|
825
|
+
|
826
|
+
if self.controlnet.nets[0].input_hint_block is None:
|
827
|
+
# vae encode
|
828
|
+
control_image_ = retrieve_latents(self.vae.encode(control_image_), generator=generator)
|
829
|
+
control_image_ = (control_image_ - self.vae.config.shift_factor) * self.vae.config.scaling_factor
|
830
|
+
|
831
|
+
# pack
|
832
|
+
height_control_image, width_control_image = control_image_.shape[2:]
|
833
|
+
control_image_ = self._pack_latents(
|
834
|
+
control_image_,
|
835
|
+
batch_size * num_images_per_prompt,
|
836
|
+
num_channels_latents,
|
837
|
+
height_control_image,
|
838
|
+
width_control_image,
|
839
|
+
)
|
840
|
+
control_images.append(control_image_)
|
841
|
+
|
842
|
+
control_image = control_images
|
843
|
+
|
844
|
+
# Here we ensure that `control_mode` has the same length as the control_image.
|
845
|
+
if isinstance(control_mode, list) and len(control_mode) != len(control_image):
|
846
|
+
raise ValueError(
|
847
|
+
"For Multi-ControlNet, `control_mode` must be a list of the same "
|
848
|
+
+ " length as the number of controlnets (control images) specified"
|
849
|
+
)
|
850
|
+
if not isinstance(control_mode, list):
|
851
|
+
control_mode = [control_mode] * len(control_image)
|
852
|
+
# set control mode
|
853
|
+
control_modes = []
|
854
|
+
for cmode in control_mode:
|
855
|
+
if cmode is None:
|
856
|
+
cmode = -1
|
857
|
+
control_mode = torch.tensor(cmode).expand(control_images[0].shape[0]).to(device, dtype=torch.long)
|
858
|
+
control_modes.append(control_mode)
|
859
|
+
control_mode = control_modes
|
860
|
+
|
861
|
+
# 4. Prepare latent variables
|
862
|
+
num_channels_latents = self.transformer.config.in_channels // 4
|
863
|
+
latents, latent_image_ids = self.prepare_latents(
|
864
|
+
batch_size * num_images_per_prompt,
|
865
|
+
num_channels_latents,
|
866
|
+
height,
|
867
|
+
width,
|
868
|
+
prompt_embeds.dtype,
|
869
|
+
device,
|
870
|
+
generator,
|
871
|
+
latents,
|
872
|
+
)
|
873
|
+
|
874
|
+
# 5. Prepare timesteps
|
875
|
+
sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas
|
876
|
+
image_seq_len = latents.shape[1]
|
877
|
+
mu = calculate_shift(
|
878
|
+
image_seq_len,
|
879
|
+
self.scheduler.config.base_image_seq_len,
|
880
|
+
self.scheduler.config.max_image_seq_len,
|
881
|
+
self.scheduler.config.base_shift,
|
882
|
+
self.scheduler.config.max_shift,
|
883
|
+
)
|
884
|
+
timesteps, num_inference_steps = retrieve_timesteps(
|
885
|
+
self.scheduler,
|
886
|
+
num_inference_steps,
|
887
|
+
device,
|
888
|
+
sigmas=sigmas,
|
889
|
+
mu=mu,
|
890
|
+
)
|
891
|
+
|
892
|
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
893
|
+
self._num_timesteps = len(timesteps)
|
894
|
+
|
895
|
+
# 6. Create tensor stating which controlnets to keep
|
896
|
+
controlnet_keep = []
|
897
|
+
for i in range(len(timesteps)):
|
898
|
+
keeps = [
|
899
|
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
900
|
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
901
|
+
]
|
902
|
+
controlnet_keep.append(keeps[0] if isinstance(self.controlnet, FluxControlNetModel) else keeps)
|
903
|
+
|
904
|
+
# 7. Denoising loop
|
905
|
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
906
|
+
for i, t in enumerate(timesteps):
|
907
|
+
if self.interrupt:
|
908
|
+
continue
|
909
|
+
|
910
|
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
911
|
+
timestep = t.expand(latents.shape[0]).to(latents.dtype)
|
912
|
+
|
913
|
+
if isinstance(self.controlnet, FluxMultiControlNetModel):
|
914
|
+
use_guidance = self.controlnet.nets[0].config.guidance_embeds
|
915
|
+
else:
|
916
|
+
use_guidance = self.controlnet.config.guidance_embeds
|
917
|
+
|
918
|
+
guidance = torch.tensor([guidance_scale], device=device) if use_guidance else None
|
919
|
+
guidance = guidance.expand(latents.shape[0]) if guidance is not None else None
|
920
|
+
|
921
|
+
if isinstance(controlnet_keep[i], list):
|
922
|
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
923
|
+
else:
|
924
|
+
controlnet_cond_scale = controlnet_conditioning_scale
|
925
|
+
if isinstance(controlnet_cond_scale, list):
|
926
|
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
927
|
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
928
|
+
|
929
|
+
# controlnet
|
930
|
+
controlnet_block_samples, controlnet_single_block_samples = self.controlnet(
|
931
|
+
hidden_states=latents,
|
932
|
+
controlnet_cond=control_image,
|
933
|
+
controlnet_mode=control_mode,
|
934
|
+
conditioning_scale=cond_scale,
|
935
|
+
timestep=timestep / 1000,
|
936
|
+
guidance=guidance,
|
937
|
+
pooled_projections=pooled_prompt_embeds,
|
938
|
+
encoder_hidden_states=prompt_embeds,
|
939
|
+
txt_ids=text_ids,
|
940
|
+
img_ids=latent_image_ids,
|
941
|
+
joint_attention_kwargs=self.joint_attention_kwargs,
|
942
|
+
return_dict=False,
|
943
|
+
)
|
944
|
+
|
945
|
+
guidance = (
|
946
|
+
torch.tensor([guidance_scale], device=device) if self.transformer.config.guidance_embeds else None
|
947
|
+
)
|
948
|
+
guidance = guidance.expand(latents.shape[0]) if guidance is not None else None
|
949
|
+
|
950
|
+
noise_pred = self.transformer(
|
951
|
+
hidden_states=latents,
|
952
|
+
timestep=timestep / 1000,
|
953
|
+
guidance=guidance,
|
954
|
+
pooled_projections=pooled_prompt_embeds,
|
955
|
+
encoder_hidden_states=prompt_embeds,
|
956
|
+
controlnet_block_samples=controlnet_block_samples,
|
957
|
+
controlnet_single_block_samples=controlnet_single_block_samples,
|
958
|
+
txt_ids=text_ids,
|
959
|
+
img_ids=latent_image_ids,
|
960
|
+
joint_attention_kwargs=self.joint_attention_kwargs,
|
961
|
+
return_dict=False,
|
962
|
+
controlnet_blocks_repeat=controlnet_blocks_repeat,
|
963
|
+
)[0]
|
964
|
+
|
965
|
+
# compute the previous noisy sample x_t -> x_t-1
|
966
|
+
latents_dtype = latents.dtype
|
967
|
+
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
|
968
|
+
|
969
|
+
if latents.dtype != latents_dtype:
|
970
|
+
if torch.backends.mps.is_available():
|
971
|
+
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
|
972
|
+
latents = latents.to(latents_dtype)
|
973
|
+
|
974
|
+
if callback_on_step_end is not None:
|
975
|
+
callback_kwargs = {}
|
976
|
+
for k in callback_on_step_end_tensor_inputs:
|
977
|
+
callback_kwargs[k] = locals()[k]
|
978
|
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
979
|
+
|
980
|
+
latents = callback_outputs.pop("latents", latents)
|
981
|
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
982
|
+
|
983
|
+
# call the callback, if provided
|
984
|
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
985
|
+
progress_bar.update()
|
986
|
+
|
987
|
+
if XLA_AVAILABLE:
|
988
|
+
xm.mark_step()
|
989
|
+
|
990
|
+
if output_type == "latent":
|
991
|
+
image = latents
|
992
|
+
|
993
|
+
else:
|
994
|
+
latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
|
995
|
+
latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
|
996
|
+
|
997
|
+
image = self.vae.decode(latents, return_dict=False)[0]
|
998
|
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
999
|
+
|
1000
|
+
# Offload all models
|
1001
|
+
self.maybe_free_model_hooks()
|
1002
|
+
|
1003
|
+
if not return_dict:
|
1004
|
+
return (image,)
|
1005
|
+
|
1006
|
+
return FluxPipelineOutput(images=image)
|