diffusers 0.23.0__py3-none-any.whl → 0.24.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- diffusers/__init__.py +16 -2
- diffusers/configuration_utils.py +1 -0
- diffusers/dependency_versions_check.py +1 -14
- diffusers/dependency_versions_table.py +5 -4
- diffusers/image_processor.py +186 -14
- diffusers/loaders/__init__.py +82 -0
- diffusers/loaders/ip_adapter.py +157 -0
- diffusers/loaders/lora.py +1415 -0
- diffusers/loaders/lora_conversion_utils.py +284 -0
- diffusers/loaders/single_file.py +631 -0
- diffusers/loaders/textual_inversion.py +459 -0
- diffusers/loaders/unet.py +735 -0
- diffusers/loaders/utils.py +59 -0
- diffusers/models/__init__.py +12 -1
- diffusers/models/attention.py +165 -14
- diffusers/models/attention_flax.py +9 -1
- diffusers/models/attention_processor.py +286 -1
- diffusers/models/autoencoder_asym_kl.py +14 -9
- diffusers/models/autoencoder_kl.py +3 -18
- diffusers/models/autoencoder_kl_temporal_decoder.py +402 -0
- diffusers/models/autoencoder_tiny.py +20 -24
- diffusers/models/consistency_decoder_vae.py +37 -30
- diffusers/models/controlnet.py +59 -39
- diffusers/models/controlnet_flax.py +19 -18
- diffusers/models/embeddings_flax.py +2 -0
- diffusers/models/lora.py +131 -1
- diffusers/models/modeling_flax_utils.py +2 -1
- diffusers/models/modeling_outputs.py +17 -0
- diffusers/models/modeling_utils.py +27 -19
- diffusers/models/normalization.py +2 -2
- diffusers/models/resnet.py +390 -59
- diffusers/models/transformer_2d.py +20 -3
- diffusers/models/transformer_temporal.py +183 -1
- diffusers/models/unet_2d_blocks_flax.py +5 -0
- diffusers/models/unet_2d_condition.py +9 -0
- diffusers/models/unet_2d_condition_flax.py +13 -13
- diffusers/models/unet_3d_blocks.py +957 -173
- diffusers/models/unet_3d_condition.py +16 -8
- diffusers/models/unet_kandi3.py +589 -0
- diffusers/models/unet_motion_model.py +48 -33
- diffusers/models/unet_spatio_temporal_condition.py +489 -0
- diffusers/models/vae.py +63 -13
- diffusers/models/vae_flax.py +7 -0
- diffusers/models/vq_model.py +3 -1
- diffusers/optimization.py +16 -9
- diffusers/pipelines/__init__.py +65 -12
- diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py +93 -23
- diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py +97 -25
- diffusers/pipelines/animatediff/pipeline_animatediff.py +34 -4
- diffusers/pipelines/audioldm/pipeline_audioldm.py +1 -0
- diffusers/pipelines/auto_pipeline.py +6 -0
- diffusers/pipelines/consistency_models/pipeline_consistency_models.py +1 -0
- diffusers/pipelines/controlnet/pipeline_controlnet.py +217 -31
- diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +101 -32
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +136 -39
- diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +119 -37
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +196 -35
- diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +102 -31
- diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +1 -0
- diffusers/pipelines/ddim/pipeline_ddim.py +1 -0
- diffusers/pipelines/ddpm/pipeline_ddpm.py +1 -0
- diffusers/pipelines/deepfloyd_if/pipeline_if.py +13 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +13 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +13 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +13 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +13 -1
- diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +13 -1
- diffusers/pipelines/dit/pipeline_dit.py +1 -0
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +3 -3
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +1 -1
- diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +1 -1
- diffusers/pipelines/kandinsky3/__init__.py +49 -0
- diffusers/pipelines/kandinsky3/kandinsky3_pipeline.py +452 -0
- diffusers/pipelines/kandinsky3/kandinsky3img2img_pipeline.py +460 -0
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +65 -6
- diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +55 -3
- diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -0
- diffusers/pipelines/musicldm/pipeline_musicldm.py +1 -1
- diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +7 -2
- diffusers/pipelines/pipeline_flax_utils.py +4 -2
- diffusers/pipelines/pipeline_utils.py +33 -13
- diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +196 -36
- diffusers/pipelines/score_sde_ve/pipeline_score_sde_ve.py +1 -0
- diffusers/pipelines/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +1 -0
- diffusers/pipelines/stable_diffusion/__init__.py +64 -21
- diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +8 -3
- diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py +18 -2
- diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +2 -2
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +2 -4
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +88 -9
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_attend_and_excite.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +8 -3
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_gligen.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_gligen_text_image.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +92 -9
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +92 -9
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +17 -13
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_ldm3d.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_model_editing.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_paradigms.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_pix2pix_zero.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_sag.py +1 -0
- diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +1 -0
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +103 -8
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +113 -8
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +115 -9
- diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +16 -12
- diffusers/pipelines/stable_video_diffusion/__init__.py +58 -0
- diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +649 -0
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +108 -12
- diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +109 -14
- diffusers/pipelines/text_to_video_synthesis/__init__.py +2 -0
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +1 -0
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +18 -3
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +4 -2
- diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +872 -0
- diffusers/pipelines/versatile_diffusion/modeling_text_unet.py +29 -40
- diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +1 -0
- diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +1 -0
- diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +1 -0
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +14 -4
- diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +9 -5
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +1 -1
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +2 -2
- diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +1 -1
- diffusers/schedulers/__init__.py +2 -4
- diffusers/schedulers/deprecated/__init__.py +50 -0
- diffusers/schedulers/{scheduling_karras_ve.py → deprecated/scheduling_karras_ve.py} +4 -4
- diffusers/schedulers/{scheduling_sde_vp.py → deprecated/scheduling_sde_vp.py} +4 -6
- diffusers/schedulers/scheduling_ddim.py +1 -3
- diffusers/schedulers/scheduling_ddim_inverse.py +1 -3
- diffusers/schedulers/scheduling_ddim_parallel.py +1 -3
- diffusers/schedulers/scheduling_ddpm.py +1 -3
- diffusers/schedulers/scheduling_ddpm_parallel.py +1 -3
- diffusers/schedulers/scheduling_deis_multistep.py +15 -5
- diffusers/schedulers/scheduling_dpmsolver_multistep.py +15 -5
- diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +15 -5
- diffusers/schedulers/scheduling_dpmsolver_sde.py +1 -3
- diffusers/schedulers/scheduling_dpmsolver_singlestep.py +15 -5
- diffusers/schedulers/scheduling_euler_ancestral_discrete.py +1 -3
- diffusers/schedulers/scheduling_euler_discrete.py +40 -13
- diffusers/schedulers/scheduling_heun_discrete.py +15 -5
- diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +15 -5
- diffusers/schedulers/scheduling_k_dpm_2_discrete.py +15 -5
- diffusers/schedulers/scheduling_lcm.py +123 -29
- diffusers/schedulers/scheduling_lms_discrete.py +1 -3
- diffusers/schedulers/scheduling_pndm.py +1 -3
- diffusers/schedulers/scheduling_repaint.py +1 -3
- diffusers/schedulers/scheduling_unipc_multistep.py +15 -5
- diffusers/utils/__init__.py +1 -0
- diffusers/utils/constants.py +11 -6
- diffusers/utils/dummy_pt_objects.py +45 -0
- diffusers/utils/dummy_torch_and_transformers_objects.py +60 -0
- diffusers/utils/dynamic_modules_utils.py +4 -4
- diffusers/utils/export_utils.py +8 -3
- diffusers/utils/logging.py +10 -10
- diffusers/utils/outputs.py +5 -5
- diffusers/utils/peft_utils.py +88 -44
- diffusers/utils/torch_utils.py +2 -2
- diffusers/utils/versions.py +117 -0
- {diffusers-0.23.0.dist-info → diffusers-0.24.0.dist-info}/METADATA +83 -64
- {diffusers-0.23.0.dist-info → diffusers-0.24.0.dist-info}/RECORD +176 -157
- {diffusers-0.23.0.dist-info → diffusers-0.24.0.dist-info}/WHEEL +1 -1
- {diffusers-0.23.0.dist-info → diffusers-0.24.0.dist-info}/entry_points.txt +1 -0
- diffusers/loaders.py +0 -3336
- {diffusers-0.23.0.dist-info → diffusers-0.24.0.dist-info}/LICENSE +0 -0
- {diffusers-0.23.0.dist-info → diffusers-0.24.0.dist-info}/top_level.txt +0 -0
@@ -19,11 +19,11 @@ import numpy as np
|
|
19
19
|
import PIL.Image
|
20
20
|
import torch
|
21
21
|
from packaging import version
|
22
|
-
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
22
|
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
23
23
|
|
24
24
|
from ...configuration_utils import FrozenDict
|
25
25
|
from ...image_processor import PipelineImageInput, VaeImageProcessor
|
26
|
-
from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
26
|
+
from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
27
27
|
from ...models import AutoencoderKL, UNet2DConditionModel
|
28
28
|
from ...models.lora import adjust_lora_scale_text_encoder
|
29
29
|
from ...schedulers import KarrasDiffusionSchedulers
|
@@ -73,9 +73,13 @@ EXAMPLE_DOC_STRING = """
|
|
73
73
|
"""
|
74
74
|
|
75
75
|
|
76
|
-
def retrieve_latents(
|
77
|
-
|
76
|
+
def retrieve_latents(
|
77
|
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
78
|
+
):
|
79
|
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
78
80
|
return encoder_output.latent_dist.sample(generator)
|
81
|
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
82
|
+
return encoder_output.latent_dist.mode()
|
79
83
|
elif hasattr(encoder_output, "latents"):
|
80
84
|
return encoder_output.latents
|
81
85
|
else:
|
@@ -105,8 +109,53 @@ def preprocess(image):
|
|
105
109
|
return image
|
106
110
|
|
107
111
|
|
112
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
113
|
+
def retrieve_timesteps(
|
114
|
+
scheduler,
|
115
|
+
num_inference_steps: Optional[int] = None,
|
116
|
+
device: Optional[Union[str, torch.device]] = None,
|
117
|
+
timesteps: Optional[List[int]] = None,
|
118
|
+
**kwargs,
|
119
|
+
):
|
120
|
+
"""
|
121
|
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
122
|
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
123
|
+
|
124
|
+
Args:
|
125
|
+
scheduler (`SchedulerMixin`):
|
126
|
+
The scheduler to get timesteps from.
|
127
|
+
num_inference_steps (`int`):
|
128
|
+
The number of diffusion steps used when generating samples with a pre-trained model. If used,
|
129
|
+
`timesteps` must be `None`.
|
130
|
+
device (`str` or `torch.device`, *optional*):
|
131
|
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
132
|
+
timesteps (`List[int]`, *optional*):
|
133
|
+
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
|
134
|
+
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
|
135
|
+
must be `None`.
|
136
|
+
|
137
|
+
Returns:
|
138
|
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
139
|
+
second element is the number of inference steps.
|
140
|
+
"""
|
141
|
+
if timesteps is not None:
|
142
|
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
143
|
+
if not accepts_timesteps:
|
144
|
+
raise ValueError(
|
145
|
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
146
|
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
147
|
+
)
|
148
|
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
149
|
+
timesteps = scheduler.timesteps
|
150
|
+
num_inference_steps = len(timesteps)
|
151
|
+
else:
|
152
|
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
153
|
+
timesteps = scheduler.timesteps
|
154
|
+
return timesteps, num_inference_steps
|
155
|
+
|
156
|
+
|
108
157
|
class StableDiffusionImg2ImgPipeline(
|
109
|
-
DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
|
158
|
+
DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin, FromSingleFileMixin
|
110
159
|
):
|
111
160
|
r"""
|
112
161
|
Pipeline for text-guided image-to-image generation using Stable Diffusion.
|
@@ -119,6 +168,7 @@ class StableDiffusionImg2ImgPipeline(
|
|
119
168
|
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
120
169
|
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
121
170
|
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
171
|
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
122
172
|
|
123
173
|
Args:
|
124
174
|
vae ([`AutoencoderKL`]):
|
@@ -139,8 +189,9 @@ class StableDiffusionImg2ImgPipeline(
|
|
139
189
|
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
140
190
|
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
141
191
|
"""
|
192
|
+
|
142
193
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
143
|
-
_optional_components = ["safety_checker", "feature_extractor"]
|
194
|
+
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
|
144
195
|
_exclude_from_cpu_offload = ["safety_checker"]
|
145
196
|
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
146
197
|
|
@@ -153,6 +204,7 @@ class StableDiffusionImg2ImgPipeline(
|
|
153
204
|
scheduler: KarrasDiffusionSchedulers,
|
154
205
|
safety_checker: StableDiffusionSafetyChecker,
|
155
206
|
feature_extractor: CLIPImageProcessor,
|
207
|
+
image_encoder: CLIPVisionModelWithProjection = None,
|
156
208
|
requires_safety_checker: bool = True,
|
157
209
|
):
|
158
210
|
super().__init__()
|
@@ -229,6 +281,7 @@ class StableDiffusionImg2ImgPipeline(
|
|
229
281
|
scheduler=scheduler,
|
230
282
|
safety_checker=safety_checker,
|
231
283
|
feature_extractor=feature_extractor,
|
284
|
+
image_encoder=image_encoder,
|
232
285
|
)
|
233
286
|
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
234
287
|
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
@@ -449,6 +502,20 @@ class StableDiffusionImg2ImgPipeline(
|
|
449
502
|
|
450
503
|
return prompt_embeds, negative_prompt_embeds
|
451
504
|
|
505
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
506
|
+
def encode_image(self, image, device, num_images_per_prompt):
|
507
|
+
dtype = next(self.image_encoder.parameters()).dtype
|
508
|
+
|
509
|
+
if not isinstance(image, torch.Tensor):
|
510
|
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
511
|
+
|
512
|
+
image = image.to(device=device, dtype=dtype)
|
513
|
+
image_embeds = self.image_encoder(image).image_embeds
|
514
|
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
515
|
+
|
516
|
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
517
|
+
return image_embeds, uncond_image_embeds
|
518
|
+
|
452
519
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
453
520
|
def run_safety_checker(self, image, device, dtype):
|
454
521
|
if self.safety_checker is None:
|
@@ -700,6 +767,7 @@ class StableDiffusionImg2ImgPipeline(
|
|
700
767
|
image: PipelineImageInput = None,
|
701
768
|
strength: float = 0.8,
|
702
769
|
num_inference_steps: Optional[int] = 50,
|
770
|
+
timesteps: List[int] = None,
|
703
771
|
guidance_scale: Optional[float] = 7.5,
|
704
772
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
705
773
|
num_images_per_prompt: Optional[int] = 1,
|
@@ -707,6 +775,7 @@ class StableDiffusionImg2ImgPipeline(
|
|
707
775
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
708
776
|
prompt_embeds: Optional[torch.FloatTensor] = None,
|
709
777
|
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
778
|
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
710
779
|
output_type: Optional[str] = "pil",
|
711
780
|
return_dict: bool = True,
|
712
781
|
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
@@ -736,6 +805,10 @@ class StableDiffusionImg2ImgPipeline(
|
|
736
805
|
num_inference_steps (`int`, *optional*, defaults to 50):
|
737
806
|
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
738
807
|
expense of slower inference. This parameter is modulated by `strength`.
|
808
|
+
timesteps (`List[int]`, *optional*):
|
809
|
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
810
|
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
811
|
+
passed will be used. Must be in descending order.
|
739
812
|
guidance_scale (`float`, *optional*, defaults to 7.5):
|
740
813
|
A higher guidance scale value encourages the model to generate images closely linked to the text
|
741
814
|
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
@@ -756,6 +829,7 @@ class StableDiffusionImg2ImgPipeline(
|
|
756
829
|
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
757
830
|
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
758
831
|
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
832
|
+
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
|
759
833
|
output_type (`str`, *optional*, defaults to `"pil"`):
|
760
834
|
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
761
835
|
return_dict (`bool`, *optional*, defaults to `True`):
|
@@ -775,7 +849,7 @@ class StableDiffusionImg2ImgPipeline(
|
|
775
849
|
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
776
850
|
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
777
851
|
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
778
|
-
`._callback_tensor_inputs` attribute of your
|
852
|
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
779
853
|
Examples:
|
780
854
|
|
781
855
|
Returns:
|
@@ -848,11 +922,16 @@ class StableDiffusionImg2ImgPipeline(
|
|
848
922
|
if self.do_classifier_free_guidance:
|
849
923
|
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
850
924
|
|
925
|
+
if ip_adapter_image is not None:
|
926
|
+
image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
|
927
|
+
if self.do_classifier_free_guidance:
|
928
|
+
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
929
|
+
|
851
930
|
# 4. Preprocess image
|
852
931
|
image = self.image_processor.preprocess(image)
|
853
932
|
|
854
933
|
# 5. set timesteps
|
855
|
-
self.scheduler
|
934
|
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
856
935
|
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
|
857
936
|
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
858
937
|
|
@@ -870,7 +949,10 @@ class StableDiffusionImg2ImgPipeline(
|
|
870
949
|
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
871
950
|
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
872
951
|
|
873
|
-
# 7.
|
952
|
+
# 7.1 Add image embeds for IP-Adapter
|
953
|
+
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
|
954
|
+
|
955
|
+
# 7.2 Optionally get Guidance Scale Embedding
|
874
956
|
timestep_cond = None
|
875
957
|
if self.unet.config.time_cond_proj_dim is not None:
|
876
958
|
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
@@ -894,6 +976,7 @@ class StableDiffusionImg2ImgPipeline(
|
|
894
976
|
encoder_hidden_states=prompt_embeds,
|
895
977
|
timestep_cond=timestep_cond,
|
896
978
|
cross_attention_kwargs=self.cross_attention_kwargs,
|
979
|
+
added_cond_kwargs=added_cond_kwargs,
|
897
980
|
return_dict=False,
|
898
981
|
)[0]
|
899
982
|
|
@@ -19,11 +19,11 @@ import numpy as np
|
|
19
19
|
import PIL.Image
|
20
20
|
import torch
|
21
21
|
from packaging import version
|
22
|
-
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
22
|
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
23
23
|
|
24
24
|
from ...configuration_utils import FrozenDict
|
25
25
|
from ...image_processor import PipelineImageInput, VaeImageProcessor
|
26
|
-
from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
26
|
+
from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
27
27
|
from ...models import AsymmetricAutoencoderKL, AutoencoderKL, UNet2DConditionModel
|
28
28
|
from ...models.lora import adjust_lora_scale_text_encoder
|
29
29
|
from ...schedulers import KarrasDiffusionSchedulers
|
@@ -160,17 +160,66 @@ def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool
|
|
160
160
|
|
161
161
|
|
162
162
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
163
|
-
def retrieve_latents(
|
164
|
-
|
163
|
+
def retrieve_latents(
|
164
|
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
165
|
+
):
|
166
|
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
165
167
|
return encoder_output.latent_dist.sample(generator)
|
168
|
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
169
|
+
return encoder_output.latent_dist.mode()
|
166
170
|
elif hasattr(encoder_output, "latents"):
|
167
171
|
return encoder_output.latents
|
168
172
|
else:
|
169
173
|
raise AttributeError("Could not access latents of provided encoder_output")
|
170
174
|
|
171
175
|
|
176
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
177
|
+
def retrieve_timesteps(
|
178
|
+
scheduler,
|
179
|
+
num_inference_steps: Optional[int] = None,
|
180
|
+
device: Optional[Union[str, torch.device]] = None,
|
181
|
+
timesteps: Optional[List[int]] = None,
|
182
|
+
**kwargs,
|
183
|
+
):
|
184
|
+
"""
|
185
|
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
186
|
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
187
|
+
|
188
|
+
Args:
|
189
|
+
scheduler (`SchedulerMixin`):
|
190
|
+
The scheduler to get timesteps from.
|
191
|
+
num_inference_steps (`int`):
|
192
|
+
The number of diffusion steps used when generating samples with a pre-trained model. If used,
|
193
|
+
`timesteps` must be `None`.
|
194
|
+
device (`str` or `torch.device`, *optional*):
|
195
|
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
196
|
+
timesteps (`List[int]`, *optional*):
|
197
|
+
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
|
198
|
+
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
|
199
|
+
must be `None`.
|
200
|
+
|
201
|
+
Returns:
|
202
|
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
203
|
+
second element is the number of inference steps.
|
204
|
+
"""
|
205
|
+
if timesteps is not None:
|
206
|
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
207
|
+
if not accepts_timesteps:
|
208
|
+
raise ValueError(
|
209
|
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
210
|
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
211
|
+
)
|
212
|
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
213
|
+
timesteps = scheduler.timesteps
|
214
|
+
num_inference_steps = len(timesteps)
|
215
|
+
else:
|
216
|
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
217
|
+
timesteps = scheduler.timesteps
|
218
|
+
return timesteps, num_inference_steps
|
219
|
+
|
220
|
+
|
172
221
|
class StableDiffusionInpaintPipeline(
|
173
|
-
DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
|
222
|
+
DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin, FromSingleFileMixin
|
174
223
|
):
|
175
224
|
r"""
|
176
225
|
Pipeline for text-guided image inpainting using Stable Diffusion.
|
@@ -182,6 +231,7 @@ class StableDiffusionInpaintPipeline(
|
|
182
231
|
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
183
232
|
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
184
233
|
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
234
|
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
185
235
|
|
186
236
|
Args:
|
187
237
|
vae ([`AutoencoderKL`, `AsymmetricAutoencoderKL`]):
|
@@ -202,8 +252,9 @@ class StableDiffusionInpaintPipeline(
|
|
202
252
|
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
203
253
|
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
204
254
|
"""
|
255
|
+
|
205
256
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
206
|
-
_optional_components = ["safety_checker", "feature_extractor"]
|
257
|
+
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
|
207
258
|
_exclude_from_cpu_offload = ["safety_checker"]
|
208
259
|
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "mask", "masked_image_latents"]
|
209
260
|
|
@@ -216,6 +267,7 @@ class StableDiffusionInpaintPipeline(
|
|
216
267
|
scheduler: KarrasDiffusionSchedulers,
|
217
268
|
safety_checker: StableDiffusionSafetyChecker,
|
218
269
|
feature_extractor: CLIPImageProcessor,
|
270
|
+
image_encoder: CLIPVisionModelWithProjection = None,
|
219
271
|
requires_safety_checker: bool = True,
|
220
272
|
):
|
221
273
|
super().__init__()
|
@@ -297,6 +349,7 @@ class StableDiffusionInpaintPipeline(
|
|
297
349
|
scheduler=scheduler,
|
298
350
|
safety_checker=safety_checker,
|
299
351
|
feature_extractor=feature_extractor,
|
352
|
+
image_encoder=image_encoder,
|
300
353
|
)
|
301
354
|
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
302
355
|
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
@@ -520,6 +573,20 @@ class StableDiffusionInpaintPipeline(
|
|
520
573
|
|
521
574
|
return prompt_embeds, negative_prompt_embeds
|
522
575
|
|
576
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
577
|
+
def encode_image(self, image, device, num_images_per_prompt):
|
578
|
+
dtype = next(self.image_encoder.parameters()).dtype
|
579
|
+
|
580
|
+
if not isinstance(image, torch.Tensor):
|
581
|
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
582
|
+
|
583
|
+
image = image.to(device=device, dtype=dtype)
|
584
|
+
image_embeds = self.image_encoder(image).image_embeds
|
585
|
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
586
|
+
|
587
|
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
588
|
+
return image_embeds, uncond_image_embeds
|
589
|
+
|
523
590
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
524
591
|
def run_safety_checker(self, image, device, dtype):
|
525
592
|
if self.safety_checker is None:
|
@@ -828,6 +895,7 @@ class StableDiffusionInpaintPipeline(
|
|
828
895
|
width: Optional[int] = None,
|
829
896
|
strength: float = 1.0,
|
830
897
|
num_inference_steps: int = 50,
|
898
|
+
timesteps: List[int] = None,
|
831
899
|
guidance_scale: float = 7.5,
|
832
900
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
833
901
|
num_images_per_prompt: Optional[int] = 1,
|
@@ -836,6 +904,7 @@ class StableDiffusionInpaintPipeline(
|
|
836
904
|
latents: Optional[torch.FloatTensor] = None,
|
837
905
|
prompt_embeds: Optional[torch.FloatTensor] = None,
|
838
906
|
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
907
|
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
839
908
|
output_type: Optional[str] = "pil",
|
840
909
|
return_dict: bool = True,
|
841
910
|
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
@@ -877,6 +946,10 @@ class StableDiffusionInpaintPipeline(
|
|
877
946
|
num_inference_steps (`int`, *optional*, defaults to 50):
|
878
947
|
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
879
948
|
expense of slower inference. This parameter is modulated by `strength`.
|
949
|
+
timesteps (`List[int]`, *optional*):
|
950
|
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
951
|
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
952
|
+
passed will be used. Must be in descending order.
|
880
953
|
guidance_scale (`float`, *optional*, defaults to 7.5):
|
881
954
|
A higher guidance scale value encourages the model to generate images closely linked to the text
|
882
955
|
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
@@ -901,6 +974,7 @@ class StableDiffusionInpaintPipeline(
|
|
901
974
|
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
902
975
|
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
903
976
|
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
977
|
+
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
|
904
978
|
output_type (`str`, *optional*, defaults to `"pil"`):
|
905
979
|
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
906
980
|
return_dict (`bool`, *optional*, defaults to `True`):
|
@@ -920,7 +994,7 @@ class StableDiffusionInpaintPipeline(
|
|
920
994
|
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
921
995
|
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
922
996
|
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
923
|
-
`._callback_tensor_inputs` attribute of your
|
997
|
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
924
998
|
Examples:
|
925
999
|
|
926
1000
|
```py
|
@@ -1028,8 +1102,13 @@ class StableDiffusionInpaintPipeline(
|
|
1028
1102
|
if self.do_classifier_free_guidance:
|
1029
1103
|
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
1030
1104
|
|
1105
|
+
if ip_adapter_image is not None:
|
1106
|
+
image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
|
1107
|
+
if self.do_classifier_free_guidance:
|
1108
|
+
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
1109
|
+
|
1031
1110
|
# 4. set timesteps
|
1032
|
-
self.scheduler
|
1111
|
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
1033
1112
|
timesteps, num_inference_steps = self.get_timesteps(
|
1034
1113
|
num_inference_steps=num_inference_steps, strength=strength, device=device
|
1035
1114
|
)
|
@@ -1116,7 +1195,10 @@ class StableDiffusionInpaintPipeline(
|
|
1116
1195
|
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
1117
1196
|
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
1118
1197
|
|
1119
|
-
# 9.
|
1198
|
+
# 9.1 Add image embeds for IP-Adapter
|
1199
|
+
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
|
1200
|
+
|
1201
|
+
# 9.2 Optionally get Guidance Scale Embedding
|
1120
1202
|
timestep_cond = None
|
1121
1203
|
if self.unet.config.time_cond_proj_dim is not None:
|
1122
1204
|
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
@@ -1145,6 +1227,7 @@ class StableDiffusionInpaintPipeline(
|
|
1145
1227
|
encoder_hidden_states=prompt_embeds,
|
1146
1228
|
timestep_cond=timestep_cond,
|
1147
1229
|
cross_attention_kwargs=self.cross_attention_kwargs,
|
1230
|
+
added_cond_kwargs=added_cond_kwargs,
|
1148
1231
|
return_dict=False,
|
1149
1232
|
)[0]
|
1150
1233
|
|
@@ -115,6 +115,7 @@ class StableDiffusionInpaintPipelineLegacy(
|
|
115
115
|
feature_extractor ([`CLIPImageProcessor`]):
|
116
116
|
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
117
117
|
"""
|
118
|
+
|
118
119
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
119
120
|
_optional_components = ["feature_extractor"]
|
120
121
|
_exclude_from_cpu_offload = ["safety_checker"]
|
@@ -58,6 +58,20 @@ def preprocess(image):
|
|
58
58
|
return image
|
59
59
|
|
60
60
|
|
61
|
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
62
|
+
def retrieve_latents(
|
63
|
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
64
|
+
):
|
65
|
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
66
|
+
return encoder_output.latent_dist.sample(generator)
|
67
|
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
68
|
+
return encoder_output.latent_dist.mode()
|
69
|
+
elif hasattr(encoder_output, "latents"):
|
70
|
+
return encoder_output.latents
|
71
|
+
else:
|
72
|
+
raise AttributeError("Could not access latents of provided encoder_output")
|
73
|
+
|
74
|
+
|
61
75
|
class StableDiffusionInstructPix2PixPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
|
62
76
|
r"""
|
63
77
|
Pipeline for pixel-level image editing by following text instructions (based on Stable Diffusion).
|
@@ -89,6 +103,7 @@ class StableDiffusionInstructPix2PixPipeline(DiffusionPipeline, TextualInversion
|
|
89
103
|
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
90
104
|
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
91
105
|
"""
|
106
|
+
|
92
107
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
93
108
|
_optional_components = ["safety_checker", "feature_extractor"]
|
94
109
|
_exclude_from_cpu_offload = ["safety_checker"]
|
@@ -211,7 +226,7 @@ class StableDiffusionInstructPix2PixPipeline(DiffusionPipeline, TextualInversion
|
|
211
226
|
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
212
227
|
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
213
228
|
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
214
|
-
`._callback_tensor_inputs` attribute of your
|
229
|
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
215
230
|
|
216
231
|
Examples:
|
217
232
|
|
@@ -319,7 +334,6 @@ class StableDiffusionInstructPix2PixPipeline(DiffusionPipeline, TextualInversion
|
|
319
334
|
prompt_embeds.dtype,
|
320
335
|
device,
|
321
336
|
self.do_classifier_free_guidance,
|
322
|
-
generator,
|
323
337
|
)
|
324
338
|
|
325
339
|
height, width = image_latents.shape[-2:]
|
@@ -715,17 +729,7 @@ class StableDiffusionInstructPix2PixPipeline(DiffusionPipeline, TextualInversion
|
|
715
729
|
if image.shape[1] == 4:
|
716
730
|
image_latents = image
|
717
731
|
else:
|
718
|
-
|
719
|
-
raise ValueError(
|
720
|
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
721
|
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
722
|
-
)
|
723
|
-
|
724
|
-
if isinstance(generator, list):
|
725
|
-
image_latents = [self.vae.encode(image[i : i + 1]).latent_dist.mode() for i in range(batch_size)]
|
726
|
-
image_latents = torch.cat(image_latents, dim=0)
|
727
|
-
else:
|
728
|
-
image_latents = self.vae.encode(image).latent_dist.mode()
|
732
|
+
image_latents = retrieve_latents(self.vae.encode(image), sample_mode="argmax")
|
729
733
|
|
730
734
|
if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:
|
731
735
|
# expand image_latents for batch_size
|
@@ -80,6 +80,7 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline, TextualInversionLoade
|
|
80
80
|
feature_extractor ([`CLIPImageProcessor`]):
|
81
81
|
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
82
82
|
"""
|
83
|
+
|
83
84
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
84
85
|
_optional_components = ["safety_checker", "feature_extractor"]
|
85
86
|
_exclude_from_cpu_offload = ["safety_checker"]
|
@@ -79,6 +79,7 @@ class StableDiffusionLatentUpscalePipeline(DiffusionPipeline, FromSingleFileMixi
|
|
79
79
|
scheduler ([`SchedulerMixin`]):
|
80
80
|
A [`EulerDiscreteScheduler`] to be used in combination with `unet` to denoise the encoded image latents.
|
81
81
|
"""
|
82
|
+
|
82
83
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
83
84
|
|
84
85
|
def __init__(
|
@@ -115,6 +115,7 @@ class StableDiffusionLDM3DPipeline(
|
|
115
115
|
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
116
116
|
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
117
117
|
"""
|
118
|
+
|
118
119
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
119
120
|
_optional_components = ["safety_checker", "feature_extractor"]
|
120
121
|
_exclude_from_cpu_offload = ["safety_checker"]
|
@@ -66,6 +66,7 @@ class StableDiffusionModelEditingPipeline(DiffusionPipeline, TextualInversionLoa
|
|
66
66
|
with_augs ([`list`]):
|
67
67
|
Textual augmentations to apply while editing the text-to-image model. Set to `[]` for no augmentations.
|
68
68
|
"""
|
69
|
+
|
69
70
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
70
71
|
_optional_components = ["safety_checker", "feature_extractor"]
|
71
72
|
_exclude_from_cpu_offload = ["safety_checker"]
|
@@ -85,6 +85,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
|
|
85
85
|
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
86
86
|
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
87
87
|
"""
|
88
|
+
|
88
89
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
89
90
|
_optional_components = ["safety_checker", "feature_extractor"]
|
90
91
|
_exclude_from_cpu_offload = ["safety_checker"]
|
@@ -96,6 +96,7 @@ class StableDiffusionParadigmsPipeline(
|
|
96
96
|
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
97
97
|
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
98
98
|
"""
|
99
|
+
|
99
100
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
100
101
|
_optional_components = ["safety_checker", "feature_extractor"]
|
101
102
|
_exclude_from_cpu_offload = ["safety_checker"]
|
@@ -310,6 +310,7 @@ class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline):
|
|
310
310
|
Whether the pipeline requires a safety checker. We recommend setting it to True if you're using the
|
311
311
|
pipeline publicly.
|
312
312
|
"""
|
313
|
+
|
313
314
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
314
315
|
_optional_components = [
|
315
316
|
"safety_checker",
|
@@ -124,6 +124,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
|
|
124
124
|
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
125
125
|
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
126
126
|
"""
|
127
|
+
|
127
128
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
128
129
|
_optional_components = ["safety_checker", "feature_extractor"]
|
129
130
|
_exclude_from_cpu_offload = ["safety_checker"]
|
@@ -92,6 +92,7 @@ class StableDiffusionUpscalePipeline(
|
|
92
92
|
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
93
93
|
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
94
94
|
"""
|
95
|
+
|
95
96
|
model_cpu_offload_seq = "text_encoder->unet->vae"
|
96
97
|
_optional_components = ["watermarker", "safety_checker", "feature_extractor"]
|
97
98
|
_exclude_from_cpu_offload = ["safety_checker"]
|