diffusers 0.19.3__py3-none-any.whl → 0.20.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- diffusers/__init__.py +3 -1
- diffusers/commands/fp16_safetensors.py +2 -7
- diffusers/configuration_utils.py +23 -1
- diffusers/dependency_versions_table.py +1 -1
- diffusers/loaders.py +62 -64
- diffusers/models/__init__.py +1 -0
- diffusers/models/activations.py +2 -0
- diffusers/models/attention.py +45 -1
- diffusers/models/autoencoder_tiny.py +193 -0
- diffusers/models/controlnet.py +1 -1
- diffusers/models/embeddings.py +56 -0
- diffusers/models/lora.py +0 -6
- diffusers/models/modeling_flax_utils.py +28 -2
- diffusers/models/modeling_utils.py +33 -16
- diffusers/models/transformer_2d.py +26 -9
- diffusers/models/unet_1d.py +2 -2
- diffusers/models/unet_2d_blocks.py +106 -56
- diffusers/models/unet_2d_condition.py +20 -5
- diffusers/models/vae.py +106 -1
- diffusers/pipelines/__init__.py +1 -0
- diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py +10 -3
- diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py +10 -3
- diffusers/pipelines/audioldm/pipeline_audioldm.py +1 -1
- diffusers/pipelines/auto_pipeline.py +33 -43
- diffusers/pipelines/controlnet/multicontrolnet.py +4 -2
- diffusers/pipelines/controlnet/pipeline_controlnet.py +20 -4
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +15 -7
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +14 -4
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +157 -10
- diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -10
- diffusers/pipelines/deepfloyd_if/pipeline_if.py +1 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +1 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +1 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +1 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +1 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +1 -1
- diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +43 -2
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +44 -2
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +1 -1
- diffusers/pipelines/pipeline_flax_utils.py +41 -4
- diffusers/pipelines/pipeline_utils.py +60 -16
- diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +2 -2
- diffusers/pipelines/stable_diffusion/__init__.py +1 -0
- diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +81 -37
- diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_attend_and_excite.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py +12 -5
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_gligen.py +832 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py +9 -2
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_ldm3d.py +17 -8
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_model_editing.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_paradigms.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_pix2pix_zero.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_sag.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +10 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +10 -3
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +3 -5
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +75 -3
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +76 -6
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +1 -2
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +10 -3
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +10 -3
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +11 -4
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +1 -1
- diffusers/pipelines/versatile_diffusion/modeling_text_unet.py +131 -28
- diffusers/schedulers/scheduling_consistency_models.py +70 -57
- diffusers/schedulers/scheduling_ddim.py +76 -71
- diffusers/schedulers/scheduling_ddim_inverse.py +76 -44
- diffusers/schedulers/scheduling_ddim_parallel.py +11 -8
- diffusers/schedulers/scheduling_ddpm.py +68 -67
- diffusers/schedulers/scheduling_ddpm_parallel.py +18 -15
- diffusers/schedulers/scheduling_deis_multistep.py +93 -85
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +118 -120
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +116 -109
- diffusers/schedulers/scheduling_dpmsolver_sde.py +57 -43
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +122 -121
- diffusers/schedulers/scheduling_euler_ancestral_discrete.py +54 -44
- diffusers/schedulers/scheduling_euler_discrete.py +63 -56
- diffusers/schedulers/scheduling_heun_discrete.py +57 -45
- diffusers/schedulers/scheduling_ipndm.py +27 -22
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +54 -41
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +52 -41
- diffusers/schedulers/scheduling_karras_ve.py +55 -45
- diffusers/schedulers/scheduling_lms_discrete.py +58 -52
- diffusers/schedulers/scheduling_pndm.py +77 -62
- diffusers/schedulers/scheduling_repaint.py +56 -38
- diffusers/schedulers/scheduling_sde_ve.py +62 -50
- diffusers/schedulers/scheduling_sde_vp.py +32 -11
- diffusers/schedulers/scheduling_unclip.py +3 -3
- diffusers/schedulers/scheduling_unipc_multistep.py +131 -91
- diffusers/schedulers/scheduling_utils.py +41 -35
- diffusers/schedulers/scheduling_utils_flax.py +8 -2
- diffusers/schedulers/scheduling_vq_diffusion.py +39 -68
- diffusers/utils/__init__.py +2 -2
- diffusers/utils/dummy_pt_objects.py +15 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +15 -0
- diffusers/utils/hub_utils.py +105 -2
- diffusers/utils/import_utils.py +0 -4
- diffusers/utils/pil_utils.py +19 -0
- {diffusers-0.19.3.dist-info → diffusers-0.20.1.dist-info}/METADATA +5 -7
- {diffusers-0.19.3.dist-info → diffusers-0.20.1.dist-info}/RECORD +113 -112
- {diffusers-0.19.3.dist-info → diffusers-0.20.1.dist-info}/WHEEL +1 -1
- {diffusers-0.19.3.dist-info → diffusers-0.20.1.dist-info}/entry_points.txt +0 -1
- diffusers/models/cross_attention.py +0 -94
- {diffusers-0.19.3.dist-info → diffusers-0.20.1.dist-info}/LICENSE +0 -0
- {diffusers-0.19.3.dist-info → diffusers-0.20.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,832 @@
|
|
1
|
+
# Copyright 2023 The GLIGEN Authors and HuggingFace Team. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import inspect
|
16
|
+
import warnings
|
17
|
+
from typing import Any, Callable, Dict, List, Optional, Union
|
18
|
+
|
19
|
+
import PIL
|
20
|
+
import torch
|
21
|
+
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
22
|
+
|
23
|
+
from ...image_processor import VaeImageProcessor
|
24
|
+
from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
25
|
+
from ...models import AutoencoderKL, UNet2DConditionModel
|
26
|
+
from ...models.attention import GatedSelfAttentionDense
|
27
|
+
from ...schedulers import KarrasDiffusionSchedulers
|
28
|
+
from ...utils import (
|
29
|
+
is_accelerate_available,
|
30
|
+
is_accelerate_version,
|
31
|
+
logging,
|
32
|
+
randn_tensor,
|
33
|
+
replace_example_docstring,
|
34
|
+
)
|
35
|
+
from ..pipeline_utils import DiffusionPipeline
|
36
|
+
from . import StableDiffusionPipelineOutput
|
37
|
+
from .safety_checker import StableDiffusionSafetyChecker
|
38
|
+
|
39
|
+
|
40
|
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
41
|
+
|
42
|
+
EXAMPLE_DOC_STRING = """
|
43
|
+
Examples:
|
44
|
+
```py
|
45
|
+
>>> import torch
|
46
|
+
>>> from diffusers import StableDiffusionGLIGENPipeline
|
47
|
+
>>> from diffusers.utils import load_image
|
48
|
+
|
49
|
+
>>> # Insert objects described by text at the region defined by bounding boxes
|
50
|
+
>>> pipe = StableDiffusionGLIGENPipeline.from_pretrained(
|
51
|
+
... "masterful/gligen-1-4-inpainting-text-box", variant="fp16", torch_dtype=torch.float16
|
52
|
+
... )
|
53
|
+
>>> pipe = pipe.to("cuda")
|
54
|
+
|
55
|
+
>>> input_image = load_image(
|
56
|
+
... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/livingroom_modern.png"
|
57
|
+
... )
|
58
|
+
>>> prompt = "a birthday cake"
|
59
|
+
>>> boxes = [[0.2676, 0.6088, 0.4773, 0.7183]]
|
60
|
+
>>> phrases = ["a birthday cake"]
|
61
|
+
|
62
|
+
>>> images = pipe(
|
63
|
+
... prompt=prompt,
|
64
|
+
... gligen_phrases=phrases,
|
65
|
+
... gligen_inpaint_image=input_image,
|
66
|
+
... gligen_boxes=boxes,
|
67
|
+
... gligen_scheduled_sampling_beta=1,
|
68
|
+
... output_type="pil",
|
69
|
+
... num_inference_steps=50,
|
70
|
+
... ).images
|
71
|
+
|
72
|
+
>>> images[0].save("./gligen-1-4-inpainting-text-box.jpg")
|
73
|
+
|
74
|
+
>>> # Generate an image described by the prompt and
|
75
|
+
>>> # insert objects described by text at the region defined by bounding boxes
|
76
|
+
>>> pipe = StableDiffusionGLIGENPipeline.from_pretrained(
|
77
|
+
... "masterful/gligen-1-4-generation-text-box", variant="fp16", torch_dtype=torch.float16
|
78
|
+
... )
|
79
|
+
>>> pipe = pipe.to("cuda")
|
80
|
+
|
81
|
+
>>> prompt = "a waterfall and a modern high speed train running through the tunnel in a beautiful forest with fall foliage"
|
82
|
+
>>> boxes = [[0.1387, 0.2051, 0.4277, 0.7090], [0.4980, 0.4355, 0.8516, 0.7266]]
|
83
|
+
>>> phrases = ["a waterfall", "a modern high speed train running through the tunnel"]
|
84
|
+
|
85
|
+
>>> images = pipe(
|
86
|
+
... prompt=prompt,
|
87
|
+
... gligen_phrases=phrases,
|
88
|
+
... gligen_boxes=boxes,
|
89
|
+
... gligen_scheduled_sampling_beta=1,
|
90
|
+
... output_type="pil",
|
91
|
+
... num_inference_steps=50,
|
92
|
+
... ).images
|
93
|
+
|
94
|
+
>>> images[0].save("./gligen-1-4-generation-text-box.jpg")
|
95
|
+
```
|
96
|
+
"""
|
97
|
+
|
98
|
+
|
99
|
+
class StableDiffusionGLIGENPipeline(DiffusionPipeline):
|
100
|
+
r"""
|
101
|
+
Pipeline for text-to-image generation using Stable Diffusion with Grounded-Language-to-Image Generation (GLIGEN).
|
102
|
+
|
103
|
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
104
|
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.).
|
105
|
+
|
106
|
+
Args:
|
107
|
+
vae ([`AutoencoderKL`]):
|
108
|
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
109
|
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
110
|
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
111
|
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
112
|
+
A `CLIPTokenizer` to tokenize text.
|
113
|
+
unet ([`UNet2DConditionModel`]):
|
114
|
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
115
|
+
scheduler ([`SchedulerMixin`]):
|
116
|
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
117
|
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
118
|
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
119
|
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
120
|
+
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
121
|
+
about a model's potential harms.
|
122
|
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
123
|
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
124
|
+
"""
|
125
|
+
_optional_components = ["safety_checker", "feature_extractor"]
|
126
|
+
|
127
|
+
def __init__(
|
128
|
+
self,
|
129
|
+
vae: AutoencoderKL,
|
130
|
+
text_encoder: CLIPTextModel,
|
131
|
+
tokenizer: CLIPTokenizer,
|
132
|
+
unet: UNet2DConditionModel,
|
133
|
+
scheduler: KarrasDiffusionSchedulers,
|
134
|
+
safety_checker: StableDiffusionSafetyChecker,
|
135
|
+
feature_extractor: CLIPFeatureExtractor,
|
136
|
+
requires_safety_checker: bool = True,
|
137
|
+
):
|
138
|
+
super().__init__()
|
139
|
+
|
140
|
+
if safety_checker is None and requires_safety_checker:
|
141
|
+
logger.warning(
|
142
|
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
143
|
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
144
|
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
145
|
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
146
|
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
147
|
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
148
|
+
)
|
149
|
+
|
150
|
+
if safety_checker is not None and feature_extractor is None:
|
151
|
+
raise ValueError(
|
152
|
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
153
|
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
154
|
+
)
|
155
|
+
|
156
|
+
self.register_modules(
|
157
|
+
vae=vae,
|
158
|
+
text_encoder=text_encoder,
|
159
|
+
tokenizer=tokenizer,
|
160
|
+
unet=unet,
|
161
|
+
scheduler=scheduler,
|
162
|
+
safety_checker=safety_checker,
|
163
|
+
feature_extractor=feature_extractor,
|
164
|
+
)
|
165
|
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
166
|
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
167
|
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
168
|
+
|
169
|
+
def enable_vae_slicing(self):
|
170
|
+
r"""
|
171
|
+
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
172
|
+
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
173
|
+
"""
|
174
|
+
self.vae.enable_slicing()
|
175
|
+
|
176
|
+
def disable_vae_slicing(self):
|
177
|
+
r"""
|
178
|
+
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
179
|
+
computing decoding in one step.
|
180
|
+
"""
|
181
|
+
self.vae.disable_slicing()
|
182
|
+
|
183
|
+
def enable_vae_tiling(self):
|
184
|
+
r"""
|
185
|
+
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
186
|
+
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
187
|
+
processing larger images.
|
188
|
+
"""
|
189
|
+
self.vae.enable_tiling()
|
190
|
+
|
191
|
+
def disable_vae_tiling(self):
|
192
|
+
r"""
|
193
|
+
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
|
194
|
+
computing decoding in one step.
|
195
|
+
"""
|
196
|
+
self.vae.disable_tiling()
|
197
|
+
|
198
|
+
def enable_model_cpu_offload(self, gpu_id=0):
|
199
|
+
r"""
|
200
|
+
Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
|
201
|
+
time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
|
202
|
+
Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
|
203
|
+
iterative execution of the `unet`.
|
204
|
+
"""
|
205
|
+
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
206
|
+
from accelerate import cpu_offload_with_hook
|
207
|
+
else:
|
208
|
+
raise ImportError("`enable_model_offload` requires `accelerate v0.17.0` or higher.")
|
209
|
+
|
210
|
+
device = torch.device(f"cuda:{gpu_id}")
|
211
|
+
|
212
|
+
if self.device.type != "cpu":
|
213
|
+
self.to("cpu", silence_dtype_warnings=True)
|
214
|
+
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
215
|
+
|
216
|
+
hook = None
|
217
|
+
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
|
218
|
+
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
219
|
+
|
220
|
+
if self.safety_checker is not None:
|
221
|
+
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
|
222
|
+
|
223
|
+
# We'll offload the last model manually.
|
224
|
+
self.final_offload_hook = hook
|
225
|
+
|
226
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
227
|
+
def _encode_prompt(
|
228
|
+
self,
|
229
|
+
prompt,
|
230
|
+
device,
|
231
|
+
num_images_per_prompt,
|
232
|
+
do_classifier_free_guidance,
|
233
|
+
negative_prompt=None,
|
234
|
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
235
|
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
236
|
+
lora_scale: Optional[float] = None,
|
237
|
+
):
|
238
|
+
r"""
|
239
|
+
Encodes the prompt into text encoder hidden states.
|
240
|
+
|
241
|
+
Args:
|
242
|
+
prompt (`str` or `List[str]`, *optional*):
|
243
|
+
prompt to be encoded
|
244
|
+
device: (`torch.device`):
|
245
|
+
torch device
|
246
|
+
num_images_per_prompt (`int`):
|
247
|
+
number of images that should be generated per prompt
|
248
|
+
do_classifier_free_guidance (`bool`):
|
249
|
+
whether to use classifier free guidance or not
|
250
|
+
negative_prompt (`str` or `List[str]`, *optional*):
|
251
|
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
252
|
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
253
|
+
less than `1`).
|
254
|
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
255
|
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
256
|
+
provided, text embeddings will be generated from `prompt` input argument.
|
257
|
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
258
|
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
259
|
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
260
|
+
argument.
|
261
|
+
lora_scale (`float`, *optional*):
|
262
|
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
263
|
+
"""
|
264
|
+
# set lora scale so that monkey patched LoRA
|
265
|
+
# function of text encoder can correctly access it
|
266
|
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
267
|
+
self._lora_scale = lora_scale
|
268
|
+
|
269
|
+
if prompt is not None and isinstance(prompt, str):
|
270
|
+
batch_size = 1
|
271
|
+
elif prompt is not None and isinstance(prompt, list):
|
272
|
+
batch_size = len(prompt)
|
273
|
+
else:
|
274
|
+
batch_size = prompt_embeds.shape[0]
|
275
|
+
|
276
|
+
if prompt_embeds is None:
|
277
|
+
# textual inversion: procecss multi-vector tokens if necessary
|
278
|
+
if isinstance(self, TextualInversionLoaderMixin):
|
279
|
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
280
|
+
|
281
|
+
text_inputs = self.tokenizer(
|
282
|
+
prompt,
|
283
|
+
padding="max_length",
|
284
|
+
max_length=self.tokenizer.model_max_length,
|
285
|
+
truncation=True,
|
286
|
+
return_tensors="pt",
|
287
|
+
)
|
288
|
+
text_input_ids = text_inputs.input_ids
|
289
|
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
290
|
+
|
291
|
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
292
|
+
text_input_ids, untruncated_ids
|
293
|
+
):
|
294
|
+
removed_text = self.tokenizer.batch_decode(
|
295
|
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
296
|
+
)
|
297
|
+
logger.warning(
|
298
|
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
299
|
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
300
|
+
)
|
301
|
+
|
302
|
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
303
|
+
attention_mask = text_inputs.attention_mask.to(device)
|
304
|
+
else:
|
305
|
+
attention_mask = None
|
306
|
+
|
307
|
+
prompt_embeds = self.text_encoder(
|
308
|
+
text_input_ids.to(device),
|
309
|
+
attention_mask=attention_mask,
|
310
|
+
)
|
311
|
+
prompt_embeds = prompt_embeds[0]
|
312
|
+
|
313
|
+
if self.text_encoder is not None:
|
314
|
+
prompt_embeds_dtype = self.text_encoder.dtype
|
315
|
+
elif self.unet is not None:
|
316
|
+
prompt_embeds_dtype = self.unet.dtype
|
317
|
+
else:
|
318
|
+
prompt_embeds_dtype = prompt_embeds.dtype
|
319
|
+
|
320
|
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
321
|
+
|
322
|
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
323
|
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
324
|
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
325
|
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
326
|
+
|
327
|
+
# get unconditional embeddings for classifier free guidance
|
328
|
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
329
|
+
uncond_tokens: List[str]
|
330
|
+
if negative_prompt is None:
|
331
|
+
uncond_tokens = [""] * batch_size
|
332
|
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
333
|
+
raise TypeError(
|
334
|
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
335
|
+
f" {type(prompt)}."
|
336
|
+
)
|
337
|
+
elif isinstance(negative_prompt, str):
|
338
|
+
uncond_tokens = [negative_prompt]
|
339
|
+
elif batch_size != len(negative_prompt):
|
340
|
+
raise ValueError(
|
341
|
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
342
|
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
343
|
+
" the batch size of `prompt`."
|
344
|
+
)
|
345
|
+
else:
|
346
|
+
uncond_tokens = negative_prompt
|
347
|
+
|
348
|
+
# textual inversion: procecss multi-vector tokens if necessary
|
349
|
+
if isinstance(self, TextualInversionLoaderMixin):
|
350
|
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
351
|
+
|
352
|
+
max_length = prompt_embeds.shape[1]
|
353
|
+
uncond_input = self.tokenizer(
|
354
|
+
uncond_tokens,
|
355
|
+
padding="max_length",
|
356
|
+
max_length=max_length,
|
357
|
+
truncation=True,
|
358
|
+
return_tensors="pt",
|
359
|
+
)
|
360
|
+
|
361
|
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
362
|
+
attention_mask = uncond_input.attention_mask.to(device)
|
363
|
+
else:
|
364
|
+
attention_mask = None
|
365
|
+
|
366
|
+
negative_prompt_embeds = self.text_encoder(
|
367
|
+
uncond_input.input_ids.to(device),
|
368
|
+
attention_mask=attention_mask,
|
369
|
+
)
|
370
|
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
371
|
+
|
372
|
+
if do_classifier_free_guidance:
|
373
|
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
374
|
+
seq_len = negative_prompt_embeds.shape[1]
|
375
|
+
|
376
|
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
377
|
+
|
378
|
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
379
|
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
380
|
+
|
381
|
+
# For classifier free guidance, we need to do two forward passes.
|
382
|
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
383
|
+
# to avoid doing two forward passes
|
384
|
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
385
|
+
|
386
|
+
return prompt_embeds
|
387
|
+
|
388
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
389
|
+
def run_safety_checker(self, image, device, dtype):
|
390
|
+
if self.safety_checker is None:
|
391
|
+
has_nsfw_concept = None
|
392
|
+
else:
|
393
|
+
if torch.is_tensor(image):
|
394
|
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
395
|
+
else:
|
396
|
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
397
|
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
398
|
+
image, has_nsfw_concept = self.safety_checker(
|
399
|
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
400
|
+
)
|
401
|
+
return image, has_nsfw_concept
|
402
|
+
|
403
|
+
def prepare_extra_step_kwargs(self, generator, eta):
|
404
|
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
405
|
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
406
|
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
407
|
+
# and should be between [0, 1]
|
408
|
+
|
409
|
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
410
|
+
extra_step_kwargs = {}
|
411
|
+
if accepts_eta:
|
412
|
+
extra_step_kwargs["eta"] = eta
|
413
|
+
|
414
|
+
# check if the scheduler accepts generator
|
415
|
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
416
|
+
if accepts_generator:
|
417
|
+
extra_step_kwargs["generator"] = generator
|
418
|
+
return extra_step_kwargs
|
419
|
+
|
420
|
+
def check_inputs(
|
421
|
+
self,
|
422
|
+
prompt,
|
423
|
+
height,
|
424
|
+
width,
|
425
|
+
callback_steps,
|
426
|
+
gligen_phrases,
|
427
|
+
gligen_boxes,
|
428
|
+
negative_prompt=None,
|
429
|
+
prompt_embeds=None,
|
430
|
+
negative_prompt_embeds=None,
|
431
|
+
):
|
432
|
+
if height % 8 != 0 or width % 8 != 0:
|
433
|
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
434
|
+
|
435
|
+
if (callback_steps is None) or (
|
436
|
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
437
|
+
):
|
438
|
+
raise ValueError(
|
439
|
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
440
|
+
f" {type(callback_steps)}."
|
441
|
+
)
|
442
|
+
|
443
|
+
if prompt is not None and prompt_embeds is not None:
|
444
|
+
raise ValueError(
|
445
|
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
446
|
+
" only forward one of the two."
|
447
|
+
)
|
448
|
+
elif prompt is None and prompt_embeds is None:
|
449
|
+
raise ValueError(
|
450
|
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
451
|
+
)
|
452
|
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
453
|
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
454
|
+
|
455
|
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
456
|
+
raise ValueError(
|
457
|
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
458
|
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
459
|
+
)
|
460
|
+
|
461
|
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
462
|
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
463
|
+
raise ValueError(
|
464
|
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
465
|
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
466
|
+
f" {negative_prompt_embeds.shape}."
|
467
|
+
)
|
468
|
+
|
469
|
+
if len(gligen_phrases) != len(gligen_boxes):
|
470
|
+
ValueError(
|
471
|
+
"length of `gligen_phrases` and `gligen_boxes` has to be same, but"
|
472
|
+
f" got: `gligen_phrases` {len(gligen_phrases)} != `gligen_boxes` {len(gligen_boxes)}"
|
473
|
+
)
|
474
|
+
|
475
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
476
|
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
477
|
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
478
|
+
if isinstance(generator, list) and len(generator) != batch_size:
|
479
|
+
raise ValueError(
|
480
|
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
481
|
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
482
|
+
)
|
483
|
+
|
484
|
+
if latents is None:
|
485
|
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
486
|
+
else:
|
487
|
+
latents = latents.to(device)
|
488
|
+
|
489
|
+
# scale the initial noise by the standard deviation required by the scheduler
|
490
|
+
latents = latents * self.scheduler.init_noise_sigma
|
491
|
+
return latents
|
492
|
+
|
493
|
+
def enable_fuser(self, enabled=True):
|
494
|
+
for module in self.unet.modules():
|
495
|
+
if type(module) is GatedSelfAttentionDense:
|
496
|
+
module.enabled = enabled
|
497
|
+
|
498
|
+
def draw_inpaint_mask_from_boxes(self, boxes, size):
|
499
|
+
inpaint_mask = torch.ones(size[0], size[1])
|
500
|
+
for box in boxes:
|
501
|
+
x0, x1 = box[0] * size[0], box[2] * size[0]
|
502
|
+
y0, y1 = box[1] * size[1], box[3] * size[1]
|
503
|
+
inpaint_mask[int(y0) : int(y1), int(x0) : int(x1)] = 0
|
504
|
+
return inpaint_mask
|
505
|
+
|
506
|
+
def crop(self, im, new_width, new_height):
|
507
|
+
width, height = im.size
|
508
|
+
left = (width - new_width) / 2
|
509
|
+
top = (height - new_height) / 2
|
510
|
+
right = (width + new_width) / 2
|
511
|
+
bottom = (height + new_height) / 2
|
512
|
+
return im.crop((left, top, right, bottom))
|
513
|
+
|
514
|
+
def target_size_center_crop(self, im, new_hw):
|
515
|
+
width, height = im.size
|
516
|
+
if width != height:
|
517
|
+
im = self.crop(im, min(height, width), min(height, width))
|
518
|
+
return im.resize((new_hw, new_hw), PIL.Image.LANCZOS)
|
519
|
+
|
520
|
+
@torch.no_grad()
|
521
|
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
522
|
+
def __call__(
|
523
|
+
self,
|
524
|
+
prompt: Union[str, List[str]] = None,
|
525
|
+
height: Optional[int] = None,
|
526
|
+
width: Optional[int] = None,
|
527
|
+
num_inference_steps: int = 50,
|
528
|
+
guidance_scale: float = 7.5,
|
529
|
+
gligen_scheduled_sampling_beta: float = 0.3,
|
530
|
+
gligen_phrases: List[str] = None,
|
531
|
+
gligen_boxes: List[List[float]] = None,
|
532
|
+
gligen_inpaint_image: Optional[PIL.Image.Image] = None,
|
533
|
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
534
|
+
num_images_per_prompt: Optional[int] = 1,
|
535
|
+
eta: float = 0.0,
|
536
|
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
537
|
+
latents: Optional[torch.FloatTensor] = None,
|
538
|
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
539
|
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
540
|
+
output_type: Optional[str] = "pil",
|
541
|
+
return_dict: bool = True,
|
542
|
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
543
|
+
callback_steps: int = 1,
|
544
|
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
545
|
+
):
|
546
|
+
r"""
|
547
|
+
The call function to the pipeline for generation.
|
548
|
+
|
549
|
+
Args:
|
550
|
+
prompt (`str` or `List[str]`, *optional*):
|
551
|
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
552
|
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
553
|
+
The height in pixels of the generated image.
|
554
|
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
555
|
+
The width in pixels of the generated image.
|
556
|
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
557
|
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
558
|
+
expense of slower inference.
|
559
|
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
560
|
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
561
|
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
562
|
+
gligen_phrases (`List[str]`):
|
563
|
+
The phrases to guide what to include in each of the regions defined by the corresponding
|
564
|
+
`gligen_boxes`. There should only be one phrase per bounding box.
|
565
|
+
gligen_boxes (`List[List[float]]`):
|
566
|
+
The bounding boxes that identify rectangular regions of the image that are going to be filled with the
|
567
|
+
content described by the corresponding `gligen_phrases`. Each rectangular box is defined as a
|
568
|
+
`List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1].
|
569
|
+
gligen_inpaint_image (`PIL.Image.Image`, *optional*):
|
570
|
+
The input image, if provided, is inpainted with objects described by the `gligen_boxes` and
|
571
|
+
`gligen_phrases`. Otherwise, it is treated as a generation task on a blank input image.
|
572
|
+
gligen_scheduled_sampling_beta (`float`, defaults to 0.3):
|
573
|
+
Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image
|
574
|
+
Generation](https://arxiv.org/pdf/2301.07093.pdf). Scheduled Sampling factor is only varied for
|
575
|
+
scheduled sampling during inference for improved quality and controllability.
|
576
|
+
negative_prompt (`str` or `List[str]`, *optional*):
|
577
|
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
578
|
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
579
|
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
580
|
+
The number of images to generate per prompt.
|
581
|
+
eta (`float`, *optional*, defaults to 0.0):
|
582
|
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
583
|
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
584
|
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
585
|
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
586
|
+
generation deterministic.
|
587
|
+
latents (`torch.FloatTensor`, *optional*):
|
588
|
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
589
|
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
590
|
+
tensor is generated by sampling using the supplied random `generator`.
|
591
|
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
592
|
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
593
|
+
provided, text embeddings are generated from the `prompt` input argument.
|
594
|
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
595
|
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
596
|
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
597
|
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
598
|
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
599
|
+
return_dict (`bool`, *optional*, defaults to `True`):
|
600
|
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
601
|
+
plain tuple.
|
602
|
+
callback (`Callable`, *optional*):
|
603
|
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
604
|
+
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
605
|
+
callback_steps (`int`, *optional*, defaults to 1):
|
606
|
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
607
|
+
every step.
|
608
|
+
cross_attention_kwargs (`dict`, *optional*):
|
609
|
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
610
|
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
611
|
+
guidance_rescale (`float`, *optional*, defaults to 0.7):
|
612
|
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
613
|
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
|
614
|
+
using zero terminal SNR.
|
615
|
+
|
616
|
+
Examples:
|
617
|
+
|
618
|
+
Returns:
|
619
|
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
620
|
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
621
|
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
622
|
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
623
|
+
"not-safe-for-work" (nsfw) content.
|
624
|
+
"""
|
625
|
+
# 0. Default height and width to unet
|
626
|
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
627
|
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
628
|
+
|
629
|
+
# 1. Check inputs. Raise error if not correct
|
630
|
+
self.check_inputs(
|
631
|
+
prompt,
|
632
|
+
height,
|
633
|
+
width,
|
634
|
+
callback_steps,
|
635
|
+
gligen_phrases,
|
636
|
+
gligen_boxes,
|
637
|
+
negative_prompt,
|
638
|
+
prompt_embeds,
|
639
|
+
negative_prompt_embeds,
|
640
|
+
)
|
641
|
+
|
642
|
+
# 2. Define call parameters
|
643
|
+
if prompt is not None and isinstance(prompt, str):
|
644
|
+
batch_size = 1
|
645
|
+
elif prompt is not None and isinstance(prompt, list):
|
646
|
+
batch_size = len(prompt)
|
647
|
+
else:
|
648
|
+
batch_size = prompt_embeds.shape[0]
|
649
|
+
|
650
|
+
device = self._execution_device
|
651
|
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
652
|
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
653
|
+
# corresponds to doing no classifier free guidance.
|
654
|
+
do_classifier_free_guidance = guidance_scale > 1.0
|
655
|
+
|
656
|
+
# 3. Encode input prompt
|
657
|
+
prompt_embeds = self._encode_prompt(
|
658
|
+
prompt,
|
659
|
+
device,
|
660
|
+
num_images_per_prompt,
|
661
|
+
do_classifier_free_guidance,
|
662
|
+
negative_prompt,
|
663
|
+
prompt_embeds=prompt_embeds,
|
664
|
+
negative_prompt_embeds=negative_prompt_embeds,
|
665
|
+
)
|
666
|
+
|
667
|
+
# 4. Prepare timesteps
|
668
|
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
669
|
+
timesteps = self.scheduler.timesteps
|
670
|
+
|
671
|
+
# 5. Prepare latent variables
|
672
|
+
num_channels_latents = self.unet.in_channels
|
673
|
+
latents = self.prepare_latents(
|
674
|
+
batch_size * num_images_per_prompt,
|
675
|
+
num_channels_latents,
|
676
|
+
height,
|
677
|
+
width,
|
678
|
+
prompt_embeds.dtype,
|
679
|
+
device,
|
680
|
+
generator,
|
681
|
+
latents,
|
682
|
+
)
|
683
|
+
|
684
|
+
# 5.1 Prepare GLIGEN variables
|
685
|
+
max_objs = 30
|
686
|
+
if len(gligen_boxes) > max_objs:
|
687
|
+
warnings.warn(
|
688
|
+
f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.",
|
689
|
+
FutureWarning,
|
690
|
+
)
|
691
|
+
gligen_phrases = gligen_phrases[:max_objs]
|
692
|
+
gligen_boxes = gligen_boxes[:max_objs]
|
693
|
+
# prepare batched input to the PositionNet (boxes, phrases, mask)
|
694
|
+
# Get tokens for phrases from pre-trained CLIPTokenizer
|
695
|
+
tokenizer_inputs = self.tokenizer(gligen_phrases, padding=True, return_tensors="pt").to(device)
|
696
|
+
# For the token, we use the same pre-trained text encoder
|
697
|
+
# to obtain its text feature
|
698
|
+
_text_embeddings = self.text_encoder(**tokenizer_inputs).pooler_output
|
699
|
+
n_objs = len(gligen_boxes)
|
700
|
+
# For each entity, described in phrases, is denoted with a bounding box,
|
701
|
+
# we represent the location information as (xmin,ymin,xmax,ymax)
|
702
|
+
boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype)
|
703
|
+
boxes[:n_objs] = torch.tensor(gligen_boxes)
|
704
|
+
text_embeddings = torch.zeros(
|
705
|
+
max_objs, self.unet.cross_attention_dim, device=device, dtype=self.text_encoder.dtype
|
706
|
+
)
|
707
|
+
text_embeddings[:n_objs] = _text_embeddings
|
708
|
+
# Generate a mask for each object that is entity described by phrases
|
709
|
+
masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype)
|
710
|
+
masks[:n_objs] = 1
|
711
|
+
|
712
|
+
repeat_batch = batch_size * num_images_per_prompt
|
713
|
+
boxes = boxes.unsqueeze(0).expand(repeat_batch, -1, -1).clone()
|
714
|
+
text_embeddings = text_embeddings.unsqueeze(0).expand(repeat_batch, -1, -1).clone()
|
715
|
+
masks = masks.unsqueeze(0).expand(repeat_batch, -1).clone()
|
716
|
+
if do_classifier_free_guidance:
|
717
|
+
repeat_batch = repeat_batch * 2
|
718
|
+
boxes = torch.cat([boxes] * 2)
|
719
|
+
text_embeddings = torch.cat([text_embeddings] * 2)
|
720
|
+
masks = torch.cat([masks] * 2)
|
721
|
+
masks[: repeat_batch // 2] = 0
|
722
|
+
if cross_attention_kwargs is None:
|
723
|
+
cross_attention_kwargs = {}
|
724
|
+
cross_attention_kwargs["gligen"] = {"boxes": boxes, "positive_embeddings": text_embeddings, "masks": masks}
|
725
|
+
|
726
|
+
# Prepare latent variables for GLIGEN inpainting
|
727
|
+
if gligen_inpaint_image is not None:
|
728
|
+
# if the given input image is not of the same size as expected by VAE
|
729
|
+
# center crop and resize the input image to expected shape
|
730
|
+
if gligen_inpaint_image.size != (self.vae.sample_size, self.vae.sample_size):
|
731
|
+
gligen_inpaint_image = self.target_size_center_crop(gligen_inpaint_image, self.vae.sample_size)
|
732
|
+
# Convert a single image into a batch of images with a batch size of 1
|
733
|
+
# The resulting shape becomes (1, C, H, W), where C is the number of channels,
|
734
|
+
# and H and W are the height and width of the image.
|
735
|
+
# scales the pixel values to a range [-1, 1]
|
736
|
+
gligen_inpaint_image = self.image_processor.preprocess(gligen_inpaint_image)
|
737
|
+
gligen_inpaint_image = gligen_inpaint_image.to(dtype=self.vae.dtype, device=self.vae.device)
|
738
|
+
# Run AutoEncoder to get corresponding latents
|
739
|
+
gligen_inpaint_latent = self.vae.encode(gligen_inpaint_image).latent_dist.sample()
|
740
|
+
gligen_inpaint_latent = self.vae.config.scaling_factor * gligen_inpaint_latent
|
741
|
+
# Generate an inpainting mask
|
742
|
+
# pixel value = 0, where the object is present (defined by bounding boxes above)
|
743
|
+
# 1, everywhere else
|
744
|
+
gligen_inpaint_mask = self.draw_inpaint_mask_from_boxes(gligen_boxes, gligen_inpaint_latent.shape[2:])
|
745
|
+
gligen_inpaint_mask = gligen_inpaint_mask.to(
|
746
|
+
dtype=gligen_inpaint_latent.dtype, device=gligen_inpaint_latent.device
|
747
|
+
)
|
748
|
+
gligen_inpaint_mask = gligen_inpaint_mask[None, None]
|
749
|
+
gligen_inpaint_mask_addition = torch.cat(
|
750
|
+
(gligen_inpaint_latent * gligen_inpaint_mask, gligen_inpaint_mask), dim=1
|
751
|
+
)
|
752
|
+
# Convert a single mask into a batch of masks with a batch size of 1
|
753
|
+
gligen_inpaint_mask_addition = gligen_inpaint_mask_addition.expand(repeat_batch, -1, -1, -1).clone()
|
754
|
+
|
755
|
+
num_grounding_steps = int(gligen_scheduled_sampling_beta * len(timesteps))
|
756
|
+
self.enable_fuser(True)
|
757
|
+
|
758
|
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
759
|
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
760
|
+
|
761
|
+
# 7. Denoising loop
|
762
|
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
763
|
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
764
|
+
for i, t in enumerate(timesteps):
|
765
|
+
# Scheduled sampling
|
766
|
+
if i == num_grounding_steps:
|
767
|
+
self.enable_fuser(False)
|
768
|
+
|
769
|
+
if latents.shape[1] != 4:
|
770
|
+
latents = torch.randn_like(latents[:, :4])
|
771
|
+
|
772
|
+
if gligen_inpaint_image is not None:
|
773
|
+
gligen_inpaint_latent_with_noise = (
|
774
|
+
self.scheduler.add_noise(gligen_inpaint_latent, torch.randn_like(gligen_inpaint_latent), t)
|
775
|
+
.expand(latents.shape[0], -1, -1, -1)
|
776
|
+
.clone()
|
777
|
+
)
|
778
|
+
latents = gligen_inpaint_latent_with_noise * gligen_inpaint_mask + latents * (
|
779
|
+
1 - gligen_inpaint_mask
|
780
|
+
)
|
781
|
+
|
782
|
+
# expand the latents if we are doing classifier free guidance
|
783
|
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
784
|
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
785
|
+
|
786
|
+
if gligen_inpaint_image is not None:
|
787
|
+
latent_model_input = torch.cat((latent_model_input, gligen_inpaint_mask_addition), dim=1)
|
788
|
+
|
789
|
+
# predict the noise residual
|
790
|
+
noise_pred = self.unet(
|
791
|
+
latent_model_input,
|
792
|
+
t,
|
793
|
+
encoder_hidden_states=prompt_embeds,
|
794
|
+
cross_attention_kwargs=cross_attention_kwargs,
|
795
|
+
).sample
|
796
|
+
|
797
|
+
# perform guidance
|
798
|
+
if do_classifier_free_guidance:
|
799
|
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
800
|
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
801
|
+
|
802
|
+
# compute the previous noisy sample x_t -> x_t-1
|
803
|
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
804
|
+
|
805
|
+
# call the callback, if provided
|
806
|
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
807
|
+
progress_bar.update()
|
808
|
+
if callback is not None and i % callback_steps == 0:
|
809
|
+
callback(i, t, latents)
|
810
|
+
|
811
|
+
if not output_type == "latent":
|
812
|
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
813
|
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
814
|
+
else:
|
815
|
+
image = latents
|
816
|
+
has_nsfw_concept = None
|
817
|
+
|
818
|
+
if has_nsfw_concept is None:
|
819
|
+
do_denormalize = [True] * image.shape[0]
|
820
|
+
else:
|
821
|
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
822
|
+
|
823
|
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
824
|
+
|
825
|
+
# Offload last model to CPU
|
826
|
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
827
|
+
self.final_offload_hook.offload()
|
828
|
+
|
829
|
+
if not return_dict:
|
830
|
+
return (image, has_nsfw_concept)
|
831
|
+
|
832
|
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|