diffusers 0.24.0__py3-none-any.whl → 0.25.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (174) hide show
  1. diffusers/__init__.py +11 -1
  2. diffusers/commands/fp16_safetensors.py +10 -11
  3. diffusers/configuration_utils.py +12 -8
  4. diffusers/dependency_versions_table.py +2 -1
  5. diffusers/experimental/rl/value_guided_sampling.py +1 -1
  6. diffusers/image_processor.py +286 -46
  7. diffusers/loaders/ip_adapter.py +11 -9
  8. diffusers/loaders/lora.py +198 -60
  9. diffusers/loaders/single_file.py +24 -18
  10. diffusers/loaders/textual_inversion.py +10 -14
  11. diffusers/loaders/unet.py +130 -37
  12. diffusers/models/__init__.py +18 -12
  13. diffusers/models/activations.py +9 -6
  14. diffusers/models/attention.py +137 -16
  15. diffusers/models/attention_processor.py +133 -46
  16. diffusers/models/autoencoders/__init__.py +5 -0
  17. diffusers/models/{autoencoder_asym_kl.py → autoencoders/autoencoder_asym_kl.py} +4 -4
  18. diffusers/models/{autoencoder_kl.py → autoencoders/autoencoder_kl.py} +45 -6
  19. diffusers/models/{autoencoder_kl_temporal_decoder.py → autoencoders/autoencoder_kl_temporal_decoder.py} +8 -8
  20. diffusers/models/{autoencoder_tiny.py → autoencoders/autoencoder_tiny.py} +4 -4
  21. diffusers/models/{consistency_decoder_vae.py → autoencoders/consistency_decoder_vae.py} +14 -14
  22. diffusers/models/{vae.py → autoencoders/vae.py} +9 -5
  23. diffusers/models/downsampling.py +338 -0
  24. diffusers/models/embeddings.py +112 -29
  25. diffusers/models/modeling_flax_utils.py +12 -7
  26. diffusers/models/modeling_utils.py +10 -10
  27. diffusers/models/normalization.py +108 -2
  28. diffusers/models/resnet.py +15 -699
  29. diffusers/models/transformer_2d.py +2 -2
  30. diffusers/models/unet_2d_condition.py +37 -0
  31. diffusers/models/{unet_kandi3.py → unet_kandinsky3.py} +105 -159
  32. diffusers/models/upsampling.py +454 -0
  33. diffusers/models/uvit_2d.py +471 -0
  34. diffusers/models/vq_model.py +9 -2
  35. diffusers/pipelines/__init__.py +81 -73
  36. diffusers/pipelines/amused/__init__.py +62 -0
  37. diffusers/pipelines/amused/pipeline_amused.py +328 -0
  38. diffusers/pipelines/amused/pipeline_amused_img2img.py +347 -0
  39. diffusers/pipelines/amused/pipeline_amused_inpaint.py +378 -0
  40. diffusers/pipelines/animatediff/pipeline_animatediff.py +38 -10
  41. diffusers/pipelines/auto_pipeline.py +17 -13
  42. diffusers/pipelines/controlnet/pipeline_controlnet.py +27 -10
  43. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +47 -5
  44. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +25 -8
  45. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +4 -6
  46. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +26 -10
  47. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +4 -3
  48. diffusers/pipelines/deprecated/__init__.py +153 -0
  49. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/__init__.py +3 -3
  50. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_alt_diffusion.py +91 -18
  51. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_alt_diffusion_img2img.py +91 -18
  52. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_output.py +1 -1
  53. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/__init__.py +1 -1
  54. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/mel.py +2 -2
  55. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/pipeline_audio_diffusion.py +4 -4
  56. diffusers/pipelines/{latent_diffusion_uncond → deprecated/latent_diffusion_uncond}/__init__.py +1 -1
  57. diffusers/pipelines/{latent_diffusion_uncond → deprecated/latent_diffusion_uncond}/pipeline_latent_diffusion_uncond.py +4 -4
  58. diffusers/pipelines/{pndm → deprecated/pndm}/__init__.py +1 -1
  59. diffusers/pipelines/{pndm → deprecated/pndm}/pipeline_pndm.py +4 -4
  60. diffusers/pipelines/{repaint → deprecated/repaint}/__init__.py +1 -1
  61. diffusers/pipelines/{repaint → deprecated/repaint}/pipeline_repaint.py +5 -5
  62. diffusers/pipelines/{score_sde_ve → deprecated/score_sde_ve}/__init__.py +1 -1
  63. diffusers/pipelines/{score_sde_ve → deprecated/score_sde_ve}/pipeline_score_sde_ve.py +4 -4
  64. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/__init__.py +6 -6
  65. diffusers/pipelines/{spectrogram_diffusion/continous_encoder.py → deprecated/spectrogram_diffusion/continuous_encoder.py} +2 -2
  66. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/midi_utils.py +1 -1
  67. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/notes_encoder.py +2 -2
  68. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/pipeline_spectrogram_diffusion.py +7 -7
  69. diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py +55 -0
  70. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_cycle_diffusion.py +16 -11
  71. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_onnx_stable_diffusion_inpaint_legacy.py +6 -6
  72. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_inpaint_legacy.py +11 -11
  73. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_model_editing.py +16 -11
  74. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_paradigms.py +10 -10
  75. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_pix2pix_zero.py +13 -13
  76. diffusers/pipelines/{stochastic_karras_ve → deprecated/stochastic_karras_ve}/__init__.py +1 -1
  77. diffusers/pipelines/{stochastic_karras_ve → deprecated/stochastic_karras_ve}/pipeline_stochastic_karras_ve.py +4 -4
  78. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/__init__.py +3 -3
  79. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/modeling_text_unet.py +54 -11
  80. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion.py +4 -4
  81. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_dual_guided.py +6 -6
  82. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_image_variation.py +6 -6
  83. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_text_to_image.py +6 -6
  84. diffusers/pipelines/{vq_diffusion → deprecated/vq_diffusion}/__init__.py +3 -3
  85. diffusers/pipelines/{vq_diffusion → deprecated/vq_diffusion}/pipeline_vq_diffusion.py +5 -5
  86. diffusers/pipelines/kandinsky3/__init__.py +4 -4
  87. diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +98 -0
  88. diffusers/pipelines/kandinsky3/{kandinsky3_pipeline.py → pipeline_kandinsky3.py} +172 -35
  89. diffusers/pipelines/kandinsky3/{kandinsky3img2img_pipeline.py → pipeline_kandinsky3_img2img.py} +228 -34
  90. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +46 -5
  91. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +47 -6
  92. diffusers/pipelines/onnx_utils.py +8 -5
  93. diffusers/pipelines/pipeline_flax_utils.py +7 -6
  94. diffusers/pipelines/pipeline_utils.py +30 -29
  95. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +51 -2
  96. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +3 -3
  97. diffusers/pipelines/stable_diffusion/__init__.py +1 -72
  98. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +67 -75
  99. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +92 -8
  100. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +92 -8
  101. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +138 -10
  102. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +57 -7
  103. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +3 -0
  104. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +6 -0
  105. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +5 -0
  106. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +5 -0
  107. diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py +48 -0
  108. diffusers/pipelines/{stable_diffusion → stable_diffusion_attend_and_excite}/pipeline_stable_diffusion_attend_and_excite.py +5 -2
  109. diffusers/pipelines/stable_diffusion_diffedit/__init__.py +48 -0
  110. diffusers/pipelines/{stable_diffusion → stable_diffusion_diffedit}/pipeline_stable_diffusion_diffedit.py +2 -3
  111. diffusers/pipelines/stable_diffusion_gligen/__init__.py +50 -0
  112. diffusers/pipelines/{stable_diffusion → stable_diffusion_gligen}/pipeline_stable_diffusion_gligen.py +2 -2
  113. diffusers/pipelines/{stable_diffusion → stable_diffusion_gligen}/pipeline_stable_diffusion_gligen_text_image.py +3 -3
  114. diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py +60 -0
  115. diffusers/pipelines/{stable_diffusion → stable_diffusion_k_diffusion}/pipeline_stable_diffusion_k_diffusion.py +6 -1
  116. diffusers/pipelines/stable_diffusion_ldm3d/__init__.py +48 -0
  117. diffusers/pipelines/{stable_diffusion → stable_diffusion_ldm3d}/pipeline_stable_diffusion_ldm3d.py +50 -7
  118. diffusers/pipelines/stable_diffusion_panorama/__init__.py +48 -0
  119. diffusers/pipelines/{stable_diffusion → stable_diffusion_panorama}/pipeline_stable_diffusion_panorama.py +56 -8
  120. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +58 -6
  121. diffusers/pipelines/stable_diffusion_sag/__init__.py +48 -0
  122. diffusers/pipelines/{stable_diffusion → stable_diffusion_sag}/pipeline_stable_diffusion_sag.py +67 -10
  123. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +97 -15
  124. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +98 -14
  125. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +97 -14
  126. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +7 -5
  127. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +12 -9
  128. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +6 -0
  129. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +5 -0
  130. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +5 -0
  131. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +331 -9
  132. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +468 -9
  133. diffusers/pipelines/unclip/pipeline_unclip.py +2 -1
  134. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +1 -0
  135. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +1 -1
  136. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +4 -0
  137. diffusers/schedulers/__init__.py +2 -0
  138. diffusers/schedulers/scheduling_amused.py +162 -0
  139. diffusers/schedulers/scheduling_consistency_models.py +2 -0
  140. diffusers/schedulers/scheduling_ddim_inverse.py +1 -4
  141. diffusers/schedulers/scheduling_ddpm.py +46 -0
  142. diffusers/schedulers/scheduling_ddpm_parallel.py +46 -0
  143. diffusers/schedulers/scheduling_deis_multistep.py +13 -1
  144. diffusers/schedulers/scheduling_dpmsolver_multistep.py +13 -1
  145. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +13 -1
  146. diffusers/schedulers/scheduling_dpmsolver_sde.py +2 -0
  147. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +13 -1
  148. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +58 -0
  149. diffusers/schedulers/scheduling_euler_discrete.py +62 -3
  150. diffusers/schedulers/scheduling_heun_discrete.py +2 -0
  151. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +2 -0
  152. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +2 -0
  153. diffusers/schedulers/scheduling_lms_discrete.py +2 -0
  154. diffusers/schedulers/scheduling_unipc_multistep.py +13 -1
  155. diffusers/schedulers/scheduling_utils.py +3 -1
  156. diffusers/schedulers/scheduling_utils_flax.py +3 -1
  157. diffusers/training_utils.py +1 -1
  158. diffusers/utils/__init__.py +0 -2
  159. diffusers/utils/constants.py +2 -5
  160. diffusers/utils/dummy_pt_objects.py +30 -0
  161. diffusers/utils/dummy_torch_and_transformers_objects.py +45 -0
  162. diffusers/utils/dynamic_modules_utils.py +14 -18
  163. diffusers/utils/hub_utils.py +24 -36
  164. diffusers/utils/logging.py +1 -1
  165. diffusers/utils/state_dict_utils.py +8 -0
  166. diffusers/utils/testing_utils.py +199 -1
  167. diffusers/utils/torch_utils.py +3 -3
  168. {diffusers-0.24.0.dist-info → diffusers-0.25.0.dist-info}/METADATA +54 -53
  169. {diffusers-0.24.0.dist-info → diffusers-0.25.0.dist-info}/RECORD +174 -155
  170. {diffusers-0.24.0.dist-info → diffusers-0.25.0.dist-info}/WHEEL +1 -1
  171. {diffusers-0.24.0.dist-info → diffusers-0.25.0.dist-info}/entry_points.txt +0 -1
  172. /diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/modeling_roberta_series.py +0 -0
  173. {diffusers-0.24.0.dist-info → diffusers-0.25.0.dist-info}/LICENSE +0 -0
  174. {diffusers-0.24.0.dist-info → diffusers-0.25.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,48 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {}
15
+
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available()):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["pipeline_stable_diffusion_sag"] = ["StableDiffusionSAGPipeline"]
26
+
27
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
28
+ try:
29
+ if not (is_transformers_available() and is_torch_available()):
30
+ raise OptionalDependencyNotAvailable()
31
+
32
+ except OptionalDependencyNotAvailable:
33
+ from ...utils.dummy_torch_and_transformers_objects import *
34
+ else:
35
+ from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
36
+
37
+ else:
38
+ import sys
39
+
40
+ sys.modules[__name__] = _LazyModule(
41
+ __name__,
42
+ globals()["__file__"],
43
+ _import_structure,
44
+ module_spec=__spec__,
45
+ )
46
+
47
+ for name, value in _dummy_objects.items():
48
+ setattr(sys.modules[__name__], name, value)
@@ -17,11 +17,11 @@ from typing import Any, Callable, Dict, List, Optional, Union
17
17
 
18
18
  import torch
19
19
  import torch.nn.functional as F
20
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
20
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
21
21
 
22
- from ...image_processor import VaeImageProcessor
23
- from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
24
- from ...models import AutoencoderKL, UNet2DConditionModel
22
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
23
+ from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
24
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
25
25
  from ...models.lora import adjust_lora_scale_text_encoder
26
26
  from ...schedulers import KarrasDiffusionSchedulers
27
27
  from ...utils import (
@@ -34,8 +34,8 @@ from ...utils import (
34
34
  )
35
35
  from ...utils.torch_utils import randn_tensor
36
36
  from ..pipeline_utils import DiffusionPipeline
37
- from . import StableDiffusionPipelineOutput
38
- from .safety_checker import StableDiffusionSafetyChecker
37
+ from ..stable_diffusion import StableDiffusionPipelineOutput
38
+ from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
39
39
 
40
40
 
41
41
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@@ -98,13 +98,17 @@ class CrossAttnStoreProcessor:
98
98
 
99
99
 
100
100
  # Modified to get self-attention guidance scale in this paper (https://arxiv.org/pdf/2210.00939.pdf) as an input
101
- class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin):
101
+ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin):
102
102
  r"""
103
103
  Pipeline for text-to-image generation using Stable Diffusion.
104
104
 
105
105
  This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
106
106
  implemented for all pipelines (downloading, saving, running on a particular device, etc.).
107
107
 
108
+ The pipeline also inherits the following loading methods:
109
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
110
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
111
+
108
112
  Args:
109
113
  vae ([`AutoencoderKL`]):
110
114
  Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
@@ -126,7 +130,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
126
130
  """
127
131
 
128
132
  model_cpu_offload_seq = "text_encoder->unet->vae"
129
- _optional_components = ["safety_checker", "feature_extractor"]
133
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
130
134
  _exclude_from_cpu_offload = ["safety_checker"]
131
135
 
132
136
  def __init__(
@@ -138,6 +142,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
138
142
  scheduler: KarrasDiffusionSchedulers,
139
143
  safety_checker: StableDiffusionSafetyChecker,
140
144
  feature_extractor: CLIPImageProcessor,
145
+ image_encoder: Optional[CLIPVisionModelWithProjection] = None,
141
146
  requires_safety_checker: bool = True,
142
147
  ):
143
148
  super().__init__()
@@ -150,6 +155,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
150
155
  scheduler=scheduler,
151
156
  safety_checker=safety_checker,
152
157
  feature_extractor=feature_extractor,
158
+ image_encoder=image_encoder,
153
159
  )
154
160
  self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
155
161
  self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
@@ -386,6 +392,31 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
386
392
 
387
393
  return prompt_embeds, negative_prompt_embeds
388
394
 
395
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
396
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
397
+ dtype = next(self.image_encoder.parameters()).dtype
398
+
399
+ if not isinstance(image, torch.Tensor):
400
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
401
+
402
+ image = image.to(device=device, dtype=dtype)
403
+ if output_hidden_states:
404
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
405
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
406
+ uncond_image_enc_hidden_states = self.image_encoder(
407
+ torch.zeros_like(image), output_hidden_states=True
408
+ ).hidden_states[-2]
409
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
410
+ num_images_per_prompt, dim=0
411
+ )
412
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
413
+ else:
414
+ image_embeds = self.image_encoder(image).image_embeds
415
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
416
+ uncond_image_embeds = torch.zeros_like(image_embeds)
417
+
418
+ return image_embeds, uncond_image_embeds
419
+
389
420
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
390
421
  def run_safety_checker(self, image, device, dtype):
391
422
  if self.safety_checker is None:
@@ -519,6 +550,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
519
550
  latents: Optional[torch.FloatTensor] = None,
520
551
  prompt_embeds: Optional[torch.FloatTensor] = None,
521
552
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
553
+ ip_adapter_image: Optional[PipelineImageInput] = None,
522
554
  output_type: Optional[str] = "pil",
523
555
  return_dict: bool = True,
524
556
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
@@ -565,6 +597,8 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
565
597
  negative_prompt_embeds (`torch.FloatTensor`, *optional*):
566
598
  Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
567
599
  not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
600
+ ip_adapter_image: (`PipelineImageInput`, *optional*):
601
+ Optional image input to work with IP Adapters.
568
602
  output_type (`str`, *optional*, defaults to `"pil"`):
569
603
  The output format of the generated image. Choose between `PIL.Image` or `np.array`.
570
604
  return_dict (`bool`, *optional*, defaults to `True`):
@@ -618,6 +652,14 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
618
652
  # `sag_scale = 0` means no self-attention guidance
619
653
  do_self_attention_guidance = sag_scale > 0.0
620
654
 
655
+ if ip_adapter_image is not None:
656
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
657
+ image_embeds, negative_image_embeds = self.encode_image(
658
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
659
+ )
660
+ if do_classifier_free_guidance:
661
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
662
+
621
663
  # 3. Encode input prompt
622
664
  prompt_embeds, negative_prompt_embeds = self.encode_prompt(
623
665
  prompt,
@@ -655,6 +697,10 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
655
697
  # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
656
698
  extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
657
699
 
700
+ # 6.1 Add image embeds for IP-Adapter
701
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
702
+ added_uncond_kwargs = {"image_embeds": negative_image_embeds} if ip_adapter_image is not None else None
703
+
658
704
  # 7. Denoising loop
659
705
  store_processor = CrossAttnStoreProcessor()
660
706
  self.unet.mid_block.attentions[0].transformer_blocks[0].attn1.processor = store_processor
@@ -680,6 +726,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
680
726
  t,
681
727
  encoder_hidden_states=prompt_embeds,
682
728
  cross_attention_kwargs=cross_attention_kwargs,
729
+ added_cond_kwargs=added_cond_kwargs,
683
730
  ).sample
684
731
 
685
732
  # perform guidance
@@ -703,7 +750,12 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
703
750
  )
704
751
  uncond_emb, _ = prompt_embeds.chunk(2)
705
752
  # forward and give guidance
706
- degraded_pred = self.unet(degraded_latents, t, encoder_hidden_states=uncond_emb).sample
753
+ degraded_pred = self.unet(
754
+ degraded_latents,
755
+ t,
756
+ encoder_hidden_states=uncond_emb,
757
+ added_cond_kwargs=added_uncond_kwargs,
758
+ ).sample
707
759
  noise_pred += sag_scale * (noise_pred_uncond - degraded_pred)
708
760
  else:
709
761
  # DDIM-like prediction of x0
@@ -715,7 +767,12 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
715
767
  pred_x0, cond_attn, map_size, t, self.pred_epsilon(latents, noise_pred, t)
716
768
  )
717
769
  # forward and give guidance
718
- degraded_pred = self.unet(degraded_latents, t, encoder_hidden_states=prompt_embeds).sample
770
+ degraded_pred = self.unet(
771
+ degraded_latents,
772
+ t,
773
+ encoder_hidden_states=prompt_embeds,
774
+ added_cond_kwargs=added_cond_kwargs,
775
+ ).sample
719
776
  noise_pred += sag_scale * (noise_pred - degraded_pred)
720
777
 
721
778
  # compute the previous noisy sample x_t -> x_t-1
@@ -31,9 +31,10 @@ from ...loaders import (
31
31
  StableDiffusionXLLoraLoaderMixin,
32
32
  TextualInversionLoaderMixin,
33
33
  )
34
- from ...models import AutoencoderKL, UNet2DConditionModel
34
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
35
35
  from ...models.attention_processor import (
36
36
  AttnProcessor2_0,
37
+ FusedAttnProcessor2_0,
37
38
  LoRAAttnProcessor2_0,
38
39
  LoRAXFormersAttnProcessor,
39
40
  XFormersAttnProcessor,
@@ -158,12 +159,12 @@ class StableDiffusionXLPipeline(
158
159
  This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
159
160
  library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
160
161
 
161
- In addition the pipeline inherits the following loading methods:
162
- - *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`]
163
- - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
164
-
165
- as well as the following saving methods:
166
- - *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`]
162
+ The pipeline also inherits the following loading methods:
163
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
164
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
165
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
166
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
167
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
167
168
 
168
169
  Args:
169
170
  vae ([`AutoencoderKL`]):
@@ -197,7 +198,7 @@ class StableDiffusionXLPipeline(
197
198
  watermarker will be used.
198
199
  """
199
200
 
200
- model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
201
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
201
202
  _optional_components = [
202
203
  "tokenizer",
203
204
  "tokenizer_2",
@@ -524,18 +525,29 @@ class StableDiffusionXLPipeline(
524
525
  return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
525
526
 
526
527
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
527
- def encode_image(self, image, device, num_images_per_prompt):
528
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
528
529
  dtype = next(self.image_encoder.parameters()).dtype
529
530
 
530
531
  if not isinstance(image, torch.Tensor):
531
532
  image = self.feature_extractor(image, return_tensors="pt").pixel_values
532
533
 
533
534
  image = image.to(device=device, dtype=dtype)
534
- image_embeds = self.image_encoder(image).image_embeds
535
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
535
+ if output_hidden_states:
536
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
537
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
538
+ uncond_image_enc_hidden_states = self.image_encoder(
539
+ torch.zeros_like(image), output_hidden_states=True
540
+ ).hidden_states[-2]
541
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
542
+ num_images_per_prompt, dim=0
543
+ )
544
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
545
+ else:
546
+ image_embeds = self.image_encoder(image).image_embeds
547
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
548
+ uncond_image_embeds = torch.zeros_like(image_embeds)
536
549
 
537
- uncond_image_embeds = torch.zeros_like(image_embeds)
538
- return image_embeds, uncond_image_embeds
550
+ return image_embeds, uncond_image_embeds
539
551
 
540
552
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
541
553
  def prepare_extra_step_kwargs(self, generator, eta):
@@ -670,7 +682,6 @@ class StableDiffusionXLPipeline(
670
682
  add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
671
683
  return add_time_ids
672
684
 
673
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
674
685
  def upcast_vae(self):
675
686
  dtype = self.vae.dtype
676
687
  self.vae.to(dtype=torch.float32)
@@ -681,6 +692,7 @@ class StableDiffusionXLPipeline(
681
692
  XFormersAttnProcessor,
682
693
  LoRAXFormersAttnProcessor,
683
694
  LoRAAttnProcessor2_0,
695
+ FusedAttnProcessor2_0,
684
696
  ),
685
697
  )
686
698
  # if xformers or torch_2_0 is used attention block does not need
@@ -718,6 +730,65 @@ class StableDiffusionXLPipeline(
718
730
  """Disables the FreeU mechanism if enabled."""
719
731
  self.unet.disable_freeu()
720
732
 
733
+ def fuse_qkv_projections(self, unet: bool = True, vae: bool = True):
734
+ """
735
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
736
+ key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
737
+
738
+ <Tip warning={true}>
739
+
740
+ This API is 🧪 experimental.
741
+
742
+ </Tip>
743
+
744
+ Args:
745
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
746
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
747
+ """
748
+ self.fusing_unet = False
749
+ self.fusing_vae = False
750
+
751
+ if unet:
752
+ self.fusing_unet = True
753
+ self.unet.fuse_qkv_projections()
754
+ self.unet.set_attn_processor(FusedAttnProcessor2_0())
755
+
756
+ if vae:
757
+ if not isinstance(self.vae, AutoencoderKL):
758
+ raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.")
759
+
760
+ self.fusing_vae = True
761
+ self.vae.fuse_qkv_projections()
762
+ self.vae.set_attn_processor(FusedAttnProcessor2_0())
763
+
764
+ def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
765
+ """Disable QKV projection fusion if enabled.
766
+
767
+ <Tip warning={true}>
768
+
769
+ This API is 🧪 experimental.
770
+
771
+ </Tip>
772
+
773
+ Args:
774
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
775
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
776
+
777
+ """
778
+ if unet:
779
+ if not self.fusing_unet:
780
+ logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.")
781
+ else:
782
+ self.unet.unfuse_qkv_projections()
783
+ self.fusing_unet = False
784
+
785
+ if vae:
786
+ if not self.fusing_vae:
787
+ logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.")
788
+ else:
789
+ self.vae.unfuse_qkv_projections()
790
+ self.fusing_vae = False
791
+
721
792
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
722
793
  def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
723
794
  """
@@ -778,6 +849,10 @@ class StableDiffusionXLPipeline(
778
849
  def num_timesteps(self):
779
850
  return self._num_timesteps
780
851
 
852
+ @property
853
+ def interrupt(self):
854
+ return self._interrupt
855
+
781
856
  @torch.no_grad()
782
857
  @replace_example_docstring(EXAMPLE_DOC_STRING)
783
858
  def __call__(
@@ -996,6 +1071,7 @@ class StableDiffusionXLPipeline(
996
1071
  self._clip_skip = clip_skip
997
1072
  self._cross_attention_kwargs = cross_attention_kwargs
998
1073
  self._denoising_end = denoising_end
1074
+ self._interrupt = False
999
1075
 
1000
1076
  # 2. Define call parameters
1001
1077
  if prompt is not None and isinstance(prompt, str):
@@ -1087,7 +1163,10 @@ class StableDiffusionXLPipeline(
1087
1163
  add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1088
1164
 
1089
1165
  if ip_adapter_image is not None:
1090
- image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
1166
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
1167
+ image_embeds, negative_image_embeds = self.encode_image(
1168
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
1169
+ )
1091
1170
  if self.do_classifier_free_guidance:
1092
1171
  image_embeds = torch.cat([negative_image_embeds, image_embeds])
1093
1172
  image_embeds = image_embeds.to(device)
@@ -1122,6 +1201,9 @@ class StableDiffusionXLPipeline(
1122
1201
  self._num_timesteps = len(timesteps)
1123
1202
  with self.progress_bar(total=num_inference_steps) as progress_bar:
1124
1203
  for i, t in enumerate(timesteps):
1204
+ if self.interrupt:
1205
+ continue
1206
+
1125
1207
  # expand the latents if we are doing classifier free guidance
1126
1208
  latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1127
1209
 
@@ -32,9 +32,10 @@ from ...loaders import (
32
32
  StableDiffusionXLLoraLoaderMixin,
33
33
  TextualInversionLoaderMixin,
34
34
  )
35
- from ...models import AutoencoderKL, UNet2DConditionModel
35
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
36
36
  from ...models.attention_processor import (
37
37
  AttnProcessor2_0,
38
+ FusedAttnProcessor2_0,
38
39
  LoRAAttnProcessor2_0,
39
40
  LoRAXFormersAttnProcessor,
40
41
  XFormersAttnProcessor,
@@ -176,12 +177,12 @@ class StableDiffusionXLImg2ImgPipeline(
176
177
  This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
177
178
  library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
178
179
 
179
- In addition the pipeline inherits the following loading methods:
180
- - *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`]
181
- - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
182
-
183
- as well as the following saving methods:
184
- - *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`]
180
+ The pipeline also inherits the following loading methods:
181
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
182
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
183
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
184
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
185
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
185
186
 
186
187
  Args:
187
188
  vae ([`AutoencoderKL`]):
@@ -218,7 +219,7 @@ class StableDiffusionXLImg2ImgPipeline(
218
219
  watermarker will be used.
219
220
  """
220
221
 
221
- model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
222
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
222
223
  _optional_components = [
223
224
  "tokenizer",
224
225
  "tokenizer_2",
@@ -741,18 +742,29 @@ class StableDiffusionXLImg2ImgPipeline(
741
742
  return latents
742
743
 
743
744
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
744
- def encode_image(self, image, device, num_images_per_prompt):
745
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
745
746
  dtype = next(self.image_encoder.parameters()).dtype
746
747
 
747
748
  if not isinstance(image, torch.Tensor):
748
749
  image = self.feature_extractor(image, return_tensors="pt").pixel_values
749
750
 
750
751
  image = image.to(device=device, dtype=dtype)
751
- image_embeds = self.image_encoder(image).image_embeds
752
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
752
+ if output_hidden_states:
753
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
754
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
755
+ uncond_image_enc_hidden_states = self.image_encoder(
756
+ torch.zeros_like(image), output_hidden_states=True
757
+ ).hidden_states[-2]
758
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
759
+ num_images_per_prompt, dim=0
760
+ )
761
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
762
+ else:
763
+ image_embeds = self.image_encoder(image).image_embeds
764
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
765
+ uncond_image_embeds = torch.zeros_like(image_embeds)
753
766
 
754
- uncond_image_embeds = torch.zeros_like(image_embeds)
755
- return image_embeds, uncond_image_embeds
767
+ return image_embeds, uncond_image_embeds
756
768
 
757
769
  def _get_add_time_ids(
758
770
  self,
@@ -853,6 +865,67 @@ class StableDiffusionXLImg2ImgPipeline(
853
865
  """Disables the FreeU mechanism if enabled."""
854
866
  self.unet.disable_freeu()
855
867
 
868
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.fuse_qkv_projections
869
+ def fuse_qkv_projections(self, unet: bool = True, vae: bool = True):
870
+ """
871
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
872
+ key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
873
+
874
+ <Tip warning={true}>
875
+
876
+ This API is 🧪 experimental.
877
+
878
+ </Tip>
879
+
880
+ Args:
881
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
882
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
883
+ """
884
+ self.fusing_unet = False
885
+ self.fusing_vae = False
886
+
887
+ if unet:
888
+ self.fusing_unet = True
889
+ self.unet.fuse_qkv_projections()
890
+ self.unet.set_attn_processor(FusedAttnProcessor2_0())
891
+
892
+ if vae:
893
+ if not isinstance(self.vae, AutoencoderKL):
894
+ raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.")
895
+
896
+ self.fusing_vae = True
897
+ self.vae.fuse_qkv_projections()
898
+ self.vae.set_attn_processor(FusedAttnProcessor2_0())
899
+
900
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
901
+ def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
902
+ """Disable QKV projection fusion if enabled.
903
+
904
+ <Tip warning={true}>
905
+
906
+ This API is 🧪 experimental.
907
+
908
+ </Tip>
909
+
910
+ Args:
911
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
912
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
913
+
914
+ """
915
+ if unet:
916
+ if not self.fusing_unet:
917
+ logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.")
918
+ else:
919
+ self.unet.unfuse_qkv_projections()
920
+ self.fusing_unet = False
921
+
922
+ if vae:
923
+ if not self.fusing_vae:
924
+ logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.")
925
+ else:
926
+ self.vae.unfuse_qkv_projections()
927
+ self.fusing_vae = False
928
+
856
929
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
857
930
  def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
858
931
  """
@@ -917,6 +990,10 @@ class StableDiffusionXLImg2ImgPipeline(
917
990
  def num_timesteps(self):
918
991
  return self._num_timesteps
919
992
 
993
+ @property
994
+ def interrupt(self):
995
+ return self._interrupt
996
+
920
997
  @torch.no_grad()
921
998
  @replace_example_docstring(EXAMPLE_DOC_STRING)
922
999
  def __call__(
@@ -1148,6 +1225,7 @@ class StableDiffusionXLImg2ImgPipeline(
1148
1225
  self._cross_attention_kwargs = cross_attention_kwargs
1149
1226
  self._denoising_end = denoising_end
1150
1227
  self._denoising_start = denoising_start
1228
+ self._interrupt = False
1151
1229
 
1152
1230
  # 2. Define call parameters
1153
1231
  if prompt is not None and isinstance(prompt, str):
@@ -1259,7 +1337,10 @@ class StableDiffusionXLImg2ImgPipeline(
1259
1337
  add_time_ids = add_time_ids.to(device)
1260
1338
 
1261
1339
  if ip_adapter_image is not None:
1262
- image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
1340
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
1341
+ image_embeds, negative_image_embeds = self.encode_image(
1342
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
1343
+ )
1263
1344
  if self.do_classifier_free_guidance:
1264
1345
  image_embeds = torch.cat([negative_image_embeds, image_embeds])
1265
1346
  image_embeds = image_embeds.to(device)
@@ -1300,6 +1381,9 @@ class StableDiffusionXLImg2ImgPipeline(
1300
1381
  self._num_timesteps = len(timesteps)
1301
1382
  with self.progress_bar(total=num_inference_steps) as progress_bar:
1302
1383
  for i, t in enumerate(timesteps):
1384
+ if self.interrupt:
1385
+ continue
1386
+
1303
1387
  # expand the latents if we are doing classifier free guidance
1304
1388
  latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1305
1389