diffusers 0.23.1__py3-none-any.whl → 0.25.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (238) hide show
  1. diffusers/__init__.py +26 -2
  2. diffusers/commands/fp16_safetensors.py +10 -11
  3. diffusers/configuration_utils.py +13 -8
  4. diffusers/dependency_versions_check.py +0 -1
  5. diffusers/dependency_versions_table.py +5 -5
  6. diffusers/experimental/rl/value_guided_sampling.py +1 -1
  7. diffusers/image_processor.py +463 -51
  8. diffusers/loaders/__init__.py +82 -0
  9. diffusers/loaders/ip_adapter.py +159 -0
  10. diffusers/loaders/lora.py +1553 -0
  11. diffusers/loaders/lora_conversion_utils.py +284 -0
  12. diffusers/loaders/single_file.py +637 -0
  13. diffusers/loaders/textual_inversion.py +455 -0
  14. diffusers/loaders/unet.py +828 -0
  15. diffusers/loaders/utils.py +59 -0
  16. diffusers/models/__init__.py +26 -9
  17. diffusers/models/activations.py +9 -6
  18. diffusers/models/attention.py +301 -29
  19. diffusers/models/attention_flax.py +9 -1
  20. diffusers/models/attention_processor.py +378 -6
  21. diffusers/models/autoencoders/__init__.py +5 -0
  22. diffusers/models/{autoencoder_asym_kl.py → autoencoders/autoencoder_asym_kl.py} +17 -12
  23. diffusers/models/{autoencoder_kl.py → autoencoders/autoencoder_kl.py} +47 -23
  24. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +402 -0
  25. diffusers/models/{autoencoder_tiny.py → autoencoders/autoencoder_tiny.py} +24 -28
  26. diffusers/models/{consistency_decoder_vae.py → autoencoders/consistency_decoder_vae.py} +51 -44
  27. diffusers/models/{vae.py → autoencoders/vae.py} +71 -17
  28. diffusers/models/controlnet.py +59 -39
  29. diffusers/models/controlnet_flax.py +19 -18
  30. diffusers/models/downsampling.py +338 -0
  31. diffusers/models/embeddings.py +112 -29
  32. diffusers/models/embeddings_flax.py +2 -0
  33. diffusers/models/lora.py +131 -1
  34. diffusers/models/modeling_flax_utils.py +14 -8
  35. diffusers/models/modeling_outputs.py +17 -0
  36. diffusers/models/modeling_utils.py +37 -29
  37. diffusers/models/normalization.py +110 -4
  38. diffusers/models/resnet.py +299 -652
  39. diffusers/models/transformer_2d.py +22 -5
  40. diffusers/models/transformer_temporal.py +183 -1
  41. diffusers/models/unet_2d_blocks_flax.py +5 -0
  42. diffusers/models/unet_2d_condition.py +46 -0
  43. diffusers/models/unet_2d_condition_flax.py +13 -13
  44. diffusers/models/unet_3d_blocks.py +957 -173
  45. diffusers/models/unet_3d_condition.py +16 -8
  46. diffusers/models/unet_kandinsky3.py +535 -0
  47. diffusers/models/unet_motion_model.py +48 -33
  48. diffusers/models/unet_spatio_temporal_condition.py +489 -0
  49. diffusers/models/upsampling.py +454 -0
  50. diffusers/models/uvit_2d.py +471 -0
  51. diffusers/models/vae_flax.py +7 -0
  52. diffusers/models/vq_model.py +12 -3
  53. diffusers/optimization.py +16 -9
  54. diffusers/pipelines/__init__.py +137 -76
  55. diffusers/pipelines/amused/__init__.py +62 -0
  56. diffusers/pipelines/amused/pipeline_amused.py +328 -0
  57. diffusers/pipelines/amused/pipeline_amused_img2img.py +347 -0
  58. diffusers/pipelines/amused/pipeline_amused_inpaint.py +378 -0
  59. diffusers/pipelines/animatediff/pipeline_animatediff.py +66 -8
  60. diffusers/pipelines/audioldm/pipeline_audioldm.py +1 -0
  61. diffusers/pipelines/auto_pipeline.py +23 -13
  62. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +1 -0
  63. diffusers/pipelines/controlnet/pipeline_controlnet.py +238 -35
  64. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +148 -37
  65. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +155 -41
  66. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +123 -43
  67. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +216 -39
  68. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +106 -34
  69. diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +1 -0
  70. diffusers/pipelines/ddim/pipeline_ddim.py +1 -0
  71. diffusers/pipelines/ddpm/pipeline_ddpm.py +1 -0
  72. diffusers/pipelines/deepfloyd_if/pipeline_if.py +13 -1
  73. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +13 -1
  74. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +13 -1
  75. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +13 -1
  76. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +13 -1
  77. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +13 -1
  78. diffusers/pipelines/deprecated/__init__.py +153 -0
  79. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/__init__.py +3 -3
  80. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_alt_diffusion.py +177 -34
  81. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_alt_diffusion_img2img.py +182 -37
  82. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_output.py +1 -1
  83. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/__init__.py +1 -1
  84. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/mel.py +2 -2
  85. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/pipeline_audio_diffusion.py +4 -4
  86. diffusers/pipelines/{latent_diffusion_uncond → deprecated/latent_diffusion_uncond}/__init__.py +1 -1
  87. diffusers/pipelines/{latent_diffusion_uncond → deprecated/latent_diffusion_uncond}/pipeline_latent_diffusion_uncond.py +4 -4
  88. diffusers/pipelines/{pndm → deprecated/pndm}/__init__.py +1 -1
  89. diffusers/pipelines/{pndm → deprecated/pndm}/pipeline_pndm.py +4 -4
  90. diffusers/pipelines/{repaint → deprecated/repaint}/__init__.py +1 -1
  91. diffusers/pipelines/{repaint → deprecated/repaint}/pipeline_repaint.py +5 -5
  92. diffusers/pipelines/{score_sde_ve → deprecated/score_sde_ve}/__init__.py +1 -1
  93. diffusers/pipelines/{score_sde_ve → deprecated/score_sde_ve}/pipeline_score_sde_ve.py +5 -4
  94. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/__init__.py +6 -6
  95. diffusers/pipelines/{spectrogram_diffusion/continous_encoder.py → deprecated/spectrogram_diffusion/continuous_encoder.py} +2 -2
  96. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/midi_utils.py +1 -1
  97. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/notes_encoder.py +2 -2
  98. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/pipeline_spectrogram_diffusion.py +8 -7
  99. diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py +55 -0
  100. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_cycle_diffusion.py +34 -13
  101. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_onnx_stable_diffusion_inpaint_legacy.py +7 -6
  102. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_inpaint_legacy.py +12 -11
  103. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_model_editing.py +17 -11
  104. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_paradigms.py +11 -10
  105. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_pix2pix_zero.py +14 -13
  106. diffusers/pipelines/{stochastic_karras_ve → deprecated/stochastic_karras_ve}/__init__.py +1 -1
  107. diffusers/pipelines/{stochastic_karras_ve → deprecated/stochastic_karras_ve}/pipeline_stochastic_karras_ve.py +4 -4
  108. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/__init__.py +3 -3
  109. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/modeling_text_unet.py +83 -51
  110. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion.py +4 -4
  111. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_dual_guided.py +7 -6
  112. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_image_variation.py +7 -6
  113. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_text_to_image.py +7 -6
  114. diffusers/pipelines/{vq_diffusion → deprecated/vq_diffusion}/__init__.py +3 -3
  115. diffusers/pipelines/{vq_diffusion → deprecated/vq_diffusion}/pipeline_vq_diffusion.py +5 -5
  116. diffusers/pipelines/dit/pipeline_dit.py +1 -0
  117. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +1 -1
  118. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +3 -3
  119. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +1 -1
  120. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +1 -1
  121. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +1 -1
  122. diffusers/pipelines/kandinsky3/__init__.py +49 -0
  123. diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +98 -0
  124. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +589 -0
  125. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +654 -0
  126. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +111 -11
  127. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +102 -9
  128. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -0
  129. diffusers/pipelines/musicldm/pipeline_musicldm.py +1 -1
  130. diffusers/pipelines/onnx_utils.py +8 -5
  131. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +7 -2
  132. diffusers/pipelines/pipeline_flax_utils.py +11 -8
  133. diffusers/pipelines/pipeline_utils.py +63 -42
  134. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +247 -38
  135. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +3 -3
  136. diffusers/pipelines/stable_diffusion/__init__.py +37 -65
  137. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +75 -78
  138. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +2 -2
  139. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +2 -4
  140. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +1 -0
  141. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +174 -11
  142. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +8 -3
  143. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +1 -0
  144. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +178 -11
  145. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +224 -13
  146. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +74 -20
  147. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +4 -0
  148. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +7 -0
  149. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +5 -0
  150. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +5 -0
  151. diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py +48 -0
  152. diffusers/pipelines/{stable_diffusion → stable_diffusion_attend_and_excite}/pipeline_stable_diffusion_attend_and_excite.py +6 -2
  153. diffusers/pipelines/stable_diffusion_diffedit/__init__.py +48 -0
  154. diffusers/pipelines/{stable_diffusion → stable_diffusion_diffedit}/pipeline_stable_diffusion_diffedit.py +3 -3
  155. diffusers/pipelines/stable_diffusion_gligen/__init__.py +50 -0
  156. diffusers/pipelines/{stable_diffusion → stable_diffusion_gligen}/pipeline_stable_diffusion_gligen.py +3 -2
  157. diffusers/pipelines/{stable_diffusion → stable_diffusion_gligen}/pipeline_stable_diffusion_gligen_text_image.py +4 -3
  158. diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py +60 -0
  159. diffusers/pipelines/{stable_diffusion → stable_diffusion_k_diffusion}/pipeline_stable_diffusion_k_diffusion.py +7 -1
  160. diffusers/pipelines/stable_diffusion_ldm3d/__init__.py +48 -0
  161. diffusers/pipelines/{stable_diffusion → stable_diffusion_ldm3d}/pipeline_stable_diffusion_ldm3d.py +51 -7
  162. diffusers/pipelines/stable_diffusion_panorama/__init__.py +48 -0
  163. diffusers/pipelines/{stable_diffusion → stable_diffusion_panorama}/pipeline_stable_diffusion_panorama.py +57 -8
  164. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +58 -6
  165. diffusers/pipelines/stable_diffusion_sag/__init__.py +48 -0
  166. diffusers/pipelines/{stable_diffusion → stable_diffusion_sag}/pipeline_stable_diffusion_sag.py +68 -10
  167. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +194 -17
  168. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +205 -16
  169. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +206 -17
  170. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +23 -17
  171. diffusers/pipelines/stable_video_diffusion/__init__.py +58 -0
  172. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +652 -0
  173. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +108 -12
  174. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +115 -14
  175. diffusers/pipelines/text_to_video_synthesis/__init__.py +2 -0
  176. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +6 -0
  177. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +23 -3
  178. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +334 -10
  179. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +1331 -0
  180. diffusers/pipelines/unclip/pipeline_unclip.py +2 -1
  181. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +1 -0
  182. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +1 -1
  183. diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +14 -4
  184. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +9 -5
  185. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +1 -1
  186. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +2 -2
  187. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +5 -1
  188. diffusers/schedulers/__init__.py +4 -4
  189. diffusers/schedulers/deprecated/__init__.py +50 -0
  190. diffusers/schedulers/{scheduling_karras_ve.py → deprecated/scheduling_karras_ve.py} +4 -4
  191. diffusers/schedulers/{scheduling_sde_vp.py → deprecated/scheduling_sde_vp.py} +4 -6
  192. diffusers/schedulers/scheduling_amused.py +162 -0
  193. diffusers/schedulers/scheduling_consistency_models.py +2 -0
  194. diffusers/schedulers/scheduling_ddim.py +1 -3
  195. diffusers/schedulers/scheduling_ddim_inverse.py +2 -7
  196. diffusers/schedulers/scheduling_ddim_parallel.py +1 -3
  197. diffusers/schedulers/scheduling_ddpm.py +47 -3
  198. diffusers/schedulers/scheduling_ddpm_parallel.py +47 -3
  199. diffusers/schedulers/scheduling_deis_multistep.py +28 -6
  200. diffusers/schedulers/scheduling_dpmsolver_multistep.py +28 -6
  201. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +28 -6
  202. diffusers/schedulers/scheduling_dpmsolver_sde.py +3 -3
  203. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +28 -6
  204. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +59 -3
  205. diffusers/schedulers/scheduling_euler_discrete.py +102 -16
  206. diffusers/schedulers/scheduling_heun_discrete.py +17 -5
  207. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +17 -5
  208. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +17 -5
  209. diffusers/schedulers/scheduling_lcm.py +123 -29
  210. diffusers/schedulers/scheduling_lms_discrete.py +3 -3
  211. diffusers/schedulers/scheduling_pndm.py +1 -3
  212. diffusers/schedulers/scheduling_repaint.py +1 -3
  213. diffusers/schedulers/scheduling_unipc_multistep.py +28 -6
  214. diffusers/schedulers/scheduling_utils.py +3 -1
  215. diffusers/schedulers/scheduling_utils_flax.py +3 -1
  216. diffusers/training_utils.py +1 -1
  217. diffusers/utils/__init__.py +1 -2
  218. diffusers/utils/constants.py +10 -12
  219. diffusers/utils/dummy_pt_objects.py +75 -0
  220. diffusers/utils/dummy_torch_and_transformers_objects.py +105 -0
  221. diffusers/utils/dynamic_modules_utils.py +18 -22
  222. diffusers/utils/export_utils.py +8 -3
  223. diffusers/utils/hub_utils.py +24 -36
  224. diffusers/utils/logging.py +11 -11
  225. diffusers/utils/outputs.py +5 -5
  226. diffusers/utils/peft_utils.py +88 -44
  227. diffusers/utils/state_dict_utils.py +8 -0
  228. diffusers/utils/testing_utils.py +199 -1
  229. diffusers/utils/torch_utils.py +4 -4
  230. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/METADATA +86 -69
  231. diffusers-0.25.0.dist-info/RECORD +360 -0
  232. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/WHEEL +1 -1
  233. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/entry_points.txt +0 -1
  234. diffusers/loaders.py +0 -3336
  235. diffusers-0.23.1.dist-info/RECORD +0 -323
  236. /diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/modeling_roberta_series.py +0 -0
  237. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/LICENSE +0 -0
  238. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/top_level.txt +0 -0
@@ -16,11 +16,11 @@ import inspect
16
16
  from typing import Any, Callable, Dict, List, Optional, Union
17
17
 
18
18
  import torch
19
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
19
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
20
20
 
21
- from ...image_processor import VaeImageProcessor
22
- from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
23
- from ...models import AutoencoderKL, UNet2DConditionModel
21
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
22
+ from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
23
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
24
24
  from ...models.lora import adjust_lora_scale_text_encoder
25
25
  from ...schedulers import DDIMScheduler
26
26
  from ...utils import (
@@ -33,8 +33,8 @@ from ...utils import (
33
33
  )
34
34
  from ...utils.torch_utils import randn_tensor
35
35
  from ..pipeline_utils import DiffusionPipeline
36
- from . import StableDiffusionPipelineOutput
37
- from .safety_checker import StableDiffusionSafetyChecker
36
+ from ..stable_diffusion import StableDiffusionPipelineOutput
37
+ from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
38
38
 
39
39
 
40
40
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@@ -59,13 +59,19 @@ EXAMPLE_DOC_STRING = """
59
59
  """
60
60
 
61
61
 
62
- class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
62
+ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin):
63
63
  r"""
64
64
  Pipeline for text-to-image generation using MultiDiffusion.
65
65
 
66
66
  This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
67
67
  implemented for all pipelines (downloading, saving, running on a particular device, etc.).
68
68
 
69
+ The pipeline also inherits the following loading methods:
70
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
71
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
72
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
73
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
74
+
69
75
  Args:
70
76
  vae ([`AutoencoderKL`]):
71
77
  Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
@@ -85,8 +91,9 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
85
91
  feature_extractor ([`~transformers.CLIPImageProcessor`]):
86
92
  A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
87
93
  """
94
+
88
95
  model_cpu_offload_seq = "text_encoder->unet->vae"
89
- _optional_components = ["safety_checker", "feature_extractor"]
96
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
90
97
  _exclude_from_cpu_offload = ["safety_checker"]
91
98
 
92
99
  def __init__(
@@ -98,6 +105,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
98
105
  scheduler: DDIMScheduler,
99
106
  safety_checker: StableDiffusionSafetyChecker,
100
107
  feature_extractor: CLIPImageProcessor,
108
+ image_encoder: Optional[CLIPVisionModelWithProjection] = None,
101
109
  requires_safety_checker: bool = True,
102
110
  ):
103
111
  super().__init__()
@@ -126,6 +134,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
126
134
  scheduler=scheduler,
127
135
  safety_checker=safety_checker,
128
136
  feature_extractor=feature_extractor,
137
+ image_encoder=image_encoder,
129
138
  )
130
139
  self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
131
140
  self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
@@ -362,6 +371,31 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
362
371
 
363
372
  return prompt_embeds, negative_prompt_embeds
364
373
 
374
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
375
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
376
+ dtype = next(self.image_encoder.parameters()).dtype
377
+
378
+ if not isinstance(image, torch.Tensor):
379
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
380
+
381
+ image = image.to(device=device, dtype=dtype)
382
+ if output_hidden_states:
383
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
384
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
385
+ uncond_image_enc_hidden_states = self.image_encoder(
386
+ torch.zeros_like(image), output_hidden_states=True
387
+ ).hidden_states[-2]
388
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
389
+ num_images_per_prompt, dim=0
390
+ )
391
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
392
+ else:
393
+ image_embeds = self.image_encoder(image).image_embeds
394
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
395
+ uncond_image_embeds = torch.zeros_like(image_embeds)
396
+
397
+ return image_embeds, uncond_image_embeds
398
+
365
399
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
366
400
  def run_safety_checker(self, image, device, dtype):
367
401
  if self.safety_checker is None:
@@ -528,6 +562,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
528
562
  latents: Optional[torch.FloatTensor] = None,
529
563
  prompt_embeds: Optional[torch.FloatTensor] = None,
530
564
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
565
+ ip_adapter_image: Optional[PipelineImageInput] = None,
531
566
  output_type: Optional[str] = "pil",
532
567
  return_dict: bool = True,
533
568
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
@@ -577,6 +612,8 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
577
612
  negative_prompt_embeds (`torch.FloatTensor`, *optional*):
578
613
  Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
579
614
  not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
615
+ ip_adapter_image: (`PipelineImageInput`, *optional*):
616
+ Optional image input to work with IP Adapters.
580
617
  output_type (`str`, *optional*, defaults to `"pil"`):
581
618
  The output format of the generated image. Choose between `PIL.Image` or `np.array`.
582
619
  return_dict (`bool`, *optional*, defaults to `True`):
@@ -631,6 +668,14 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
631
668
  # corresponds to doing no classifier free guidance.
632
669
  do_classifier_free_guidance = guidance_scale > 1.0
633
670
 
671
+ if ip_adapter_image is not None:
672
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
673
+ image_embeds, negative_image_embeds = self.encode_image(
674
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
675
+ )
676
+ if do_classifier_free_guidance:
677
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
678
+
634
679
  # 3. Encode input prompt
635
680
  text_encoder_lora_scale = (
636
681
  cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
@@ -680,6 +725,9 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
680
725
  # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
681
726
  extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
682
727
 
728
+ # 7.1 Add image embeds for IP-Adapter
729
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
730
+
683
731
  # 8. Denoising loop
684
732
  # Each denoising step also includes refinement of the latents with respect to the
685
733
  # views.
@@ -742,6 +790,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
742
790
  t,
743
791
  encoder_hidden_states=prompt_embeds_input,
744
792
  cross_attention_kwargs=cross_attention_kwargs,
793
+ added_cond_kwargs=added_cond_kwargs,
745
794
  ).sample
746
795
 
747
796
  # perform guidance
@@ -5,10 +5,12 @@ from typing import Callable, List, Optional, Union
5
5
  import numpy as np
6
6
  import torch
7
7
  from packaging import version
8
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
8
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
9
9
 
10
10
  from ...configuration_utils import FrozenDict
11
- from ...models import AutoencoderKL, UNet2DConditionModel
11
+ from ...image_processor import PipelineImageInput
12
+ from ...loaders import IPAdapterMixin
13
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
12
14
  from ...schedulers import KarrasDiffusionSchedulers
13
15
  from ...utils import deprecate, logging
14
16
  from ...utils.torch_utils import randn_tensor
@@ -20,13 +22,16 @@ from .safety_checker import SafeStableDiffusionSafetyChecker
20
22
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
21
23
 
22
24
 
23
- class StableDiffusionPipelineSafe(DiffusionPipeline):
25
+ class StableDiffusionPipelineSafe(DiffusionPipeline, IPAdapterMixin):
24
26
  r"""
25
27
  Pipeline based on the [`StableDiffusionPipeline`] for text-to-image generation using Safe Latent Diffusion.
26
28
 
27
29
  This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
28
30
  implemented for all pipelines (downloading, saving, running on a particular device, etc.).
29
31
 
32
+ The pipeline also inherits the following loading methods:
33
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
34
+
30
35
  Args:
31
36
  vae ([`AutoencoderKL`]):
32
37
  Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
@@ -48,7 +53,7 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
48
53
  """
49
54
 
50
55
  model_cpu_offload_seq = "text_encoder->unet->vae"
51
- _optional_components = ["safety_checker", "feature_extractor"]
56
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
52
57
 
53
58
  def __init__(
54
59
  self,
@@ -59,6 +64,7 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
59
64
  scheduler: KarrasDiffusionSchedulers,
60
65
  safety_checker: SafeStableDiffusionSafetyChecker,
61
66
  feature_extractor: CLIPImageProcessor,
67
+ image_encoder: Optional[CLIPVisionModelWithProjection] = None,
62
68
  requires_safety_checker: bool = True,
63
69
  ):
64
70
  super().__init__()
@@ -140,6 +146,7 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
140
146
  scheduler=scheduler,
141
147
  safety_checker=safety_checker,
142
148
  feature_extractor=feature_extractor,
149
+ image_encoder=image_encoder,
143
150
  )
144
151
  self._safety_text_concept = safety_concept
145
152
  self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
@@ -467,6 +474,31 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
467
474
  noise_guidance = noise_guidance - noise_guidance_safety
468
475
  return noise_guidance, safety_momentum
469
476
 
477
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
478
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
479
+ dtype = next(self.image_encoder.parameters()).dtype
480
+
481
+ if not isinstance(image, torch.Tensor):
482
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
483
+
484
+ image = image.to(device=device, dtype=dtype)
485
+ if output_hidden_states:
486
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
487
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
488
+ uncond_image_enc_hidden_states = self.image_encoder(
489
+ torch.zeros_like(image), output_hidden_states=True
490
+ ).hidden_states[-2]
491
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
492
+ num_images_per_prompt, dim=0
493
+ )
494
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
495
+ else:
496
+ image_embeds = self.image_encoder(image).image_embeds
497
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
498
+ uncond_image_embeds = torch.zeros_like(image_embeds)
499
+
500
+ return image_embeds, uncond_image_embeds
501
+
470
502
  @torch.no_grad()
471
503
  def __call__(
472
504
  self,
@@ -480,6 +512,7 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
480
512
  eta: float = 0.0,
481
513
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
482
514
  latents: Optional[torch.FloatTensor] = None,
515
+ ip_adapter_image: Optional[PipelineImageInput] = None,
483
516
  output_type: Optional[str] = "pil",
484
517
  return_dict: bool = True,
485
518
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
@@ -521,6 +554,8 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
521
554
  Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
522
555
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
523
556
  tensor is generated by sampling using the supplied random `generator`.
557
+ ip_adapter_image: (`PipelineImageInput`, *optional*):
558
+ Optional image input to work with IP Adapters.
524
559
  output_type (`str`, *optional*, defaults to `"pil"`):
525
560
  The output format of the generated image. Choose between `PIL.Image` or `np.array`.
526
561
  return_dict (`bool`, *optional*, defaults to `True`):
@@ -560,10 +595,11 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
560
595
  ```py
561
596
  import torch
562
597
  from diffusers import StableDiffusionPipelineSafe
598
+ from diffusers.pipelines.stable_diffusion_safe import SafetyConfig
563
599
 
564
600
  pipeline = StableDiffusionPipelineSafe.from_pretrained(
565
601
  "AIML-TUDA/stable-diffusion-safe", torch_dtype=torch.float16
566
- )
602
+ ).to("cuda")
567
603
  prompt = "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c. leyendecker"
568
604
  image = pipeline(prompt=prompt, **SafetyConfig.MEDIUM).images[0]
569
605
  ```
@@ -588,6 +624,17 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
588
624
  if not enable_safety_guidance:
589
625
  warnings.warn("Safety checker disabled!")
590
626
 
627
+ if ip_adapter_image is not None:
628
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
629
+ image_embeds, negative_image_embeds = self.encode_image(
630
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
631
+ )
632
+ if do_classifier_free_guidance:
633
+ if enable_safety_guidance:
634
+ image_embeds = torch.cat([negative_image_embeds, image_embeds, image_embeds])
635
+ else:
636
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
637
+
591
638
  # 3. Encode input prompt
592
639
  prompt_embeds = self._encode_prompt(
593
640
  prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, enable_safety_guidance
@@ -613,6 +660,9 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
613
660
  # 6. Prepare extra step kwargs.
614
661
  extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
615
662
 
663
+ # 6.1 Add image embeds for IP-Adapter
664
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
665
+
616
666
  safety_momentum = None
617
667
 
618
668
  num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
@@ -627,7 +677,9 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
627
677
  latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
628
678
 
629
679
  # predict the noise residual
630
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
680
+ noise_pred = self.unet(
681
+ latent_model_input, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs
682
+ ).sample
631
683
 
632
684
  # perform guidance
633
685
  if do_classifier_free_guidance:
@@ -0,0 +1,48 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {}
15
+
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available()):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["pipeline_stable_diffusion_sag"] = ["StableDiffusionSAGPipeline"]
26
+
27
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
28
+ try:
29
+ if not (is_transformers_available() and is_torch_available()):
30
+ raise OptionalDependencyNotAvailable()
31
+
32
+ except OptionalDependencyNotAvailable:
33
+ from ...utils.dummy_torch_and_transformers_objects import *
34
+ else:
35
+ from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
36
+
37
+ else:
38
+ import sys
39
+
40
+ sys.modules[__name__] = _LazyModule(
41
+ __name__,
42
+ globals()["__file__"],
43
+ _import_structure,
44
+ module_spec=__spec__,
45
+ )
46
+
47
+ for name, value in _dummy_objects.items():
48
+ setattr(sys.modules[__name__], name, value)
@@ -17,11 +17,11 @@ from typing import Any, Callable, Dict, List, Optional, Union
17
17
 
18
18
  import torch
19
19
  import torch.nn.functional as F
20
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
20
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
21
21
 
22
- from ...image_processor import VaeImageProcessor
23
- from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
24
- from ...models import AutoencoderKL, UNet2DConditionModel
22
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
23
+ from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
24
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
25
25
  from ...models.lora import adjust_lora_scale_text_encoder
26
26
  from ...schedulers import KarrasDiffusionSchedulers
27
27
  from ...utils import (
@@ -34,8 +34,8 @@ from ...utils import (
34
34
  )
35
35
  from ...utils.torch_utils import randn_tensor
36
36
  from ..pipeline_utils import DiffusionPipeline
37
- from . import StableDiffusionPipelineOutput
38
- from .safety_checker import StableDiffusionSafetyChecker
37
+ from ..stable_diffusion import StableDiffusionPipelineOutput
38
+ from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
39
39
 
40
40
 
41
41
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@@ -98,13 +98,17 @@ class CrossAttnStoreProcessor:
98
98
 
99
99
 
100
100
  # Modified to get self-attention guidance scale in this paper (https://arxiv.org/pdf/2210.00939.pdf) as an input
101
- class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin):
101
+ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin):
102
102
  r"""
103
103
  Pipeline for text-to-image generation using Stable Diffusion.
104
104
 
105
105
  This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
106
106
  implemented for all pipelines (downloading, saving, running on a particular device, etc.).
107
107
 
108
+ The pipeline also inherits the following loading methods:
109
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
110
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
111
+
108
112
  Args:
109
113
  vae ([`AutoencoderKL`]):
110
114
  Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
@@ -124,8 +128,9 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
124
128
  feature_extractor ([`~transformers.CLIPImageProcessor`]):
125
129
  A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
126
130
  """
131
+
127
132
  model_cpu_offload_seq = "text_encoder->unet->vae"
128
- _optional_components = ["safety_checker", "feature_extractor"]
133
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
129
134
  _exclude_from_cpu_offload = ["safety_checker"]
130
135
 
131
136
  def __init__(
@@ -137,6 +142,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
137
142
  scheduler: KarrasDiffusionSchedulers,
138
143
  safety_checker: StableDiffusionSafetyChecker,
139
144
  feature_extractor: CLIPImageProcessor,
145
+ image_encoder: Optional[CLIPVisionModelWithProjection] = None,
140
146
  requires_safety_checker: bool = True,
141
147
  ):
142
148
  super().__init__()
@@ -149,6 +155,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
149
155
  scheduler=scheduler,
150
156
  safety_checker=safety_checker,
151
157
  feature_extractor=feature_extractor,
158
+ image_encoder=image_encoder,
152
159
  )
153
160
  self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
154
161
  self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
@@ -385,6 +392,31 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
385
392
 
386
393
  return prompt_embeds, negative_prompt_embeds
387
394
 
395
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
396
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
397
+ dtype = next(self.image_encoder.parameters()).dtype
398
+
399
+ if not isinstance(image, torch.Tensor):
400
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
401
+
402
+ image = image.to(device=device, dtype=dtype)
403
+ if output_hidden_states:
404
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
405
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
406
+ uncond_image_enc_hidden_states = self.image_encoder(
407
+ torch.zeros_like(image), output_hidden_states=True
408
+ ).hidden_states[-2]
409
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
410
+ num_images_per_prompt, dim=0
411
+ )
412
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
413
+ else:
414
+ image_embeds = self.image_encoder(image).image_embeds
415
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
416
+ uncond_image_embeds = torch.zeros_like(image_embeds)
417
+
418
+ return image_embeds, uncond_image_embeds
419
+
388
420
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
389
421
  def run_safety_checker(self, image, device, dtype):
390
422
  if self.safety_checker is None:
@@ -518,6 +550,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
518
550
  latents: Optional[torch.FloatTensor] = None,
519
551
  prompt_embeds: Optional[torch.FloatTensor] = None,
520
552
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
553
+ ip_adapter_image: Optional[PipelineImageInput] = None,
521
554
  output_type: Optional[str] = "pil",
522
555
  return_dict: bool = True,
523
556
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
@@ -564,6 +597,8 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
564
597
  negative_prompt_embeds (`torch.FloatTensor`, *optional*):
565
598
  Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
566
599
  not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
600
+ ip_adapter_image: (`PipelineImageInput`, *optional*):
601
+ Optional image input to work with IP Adapters.
567
602
  output_type (`str`, *optional*, defaults to `"pil"`):
568
603
  The output format of the generated image. Choose between `PIL.Image` or `np.array`.
569
604
  return_dict (`bool`, *optional*, defaults to `True`):
@@ -617,6 +652,14 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
617
652
  # `sag_scale = 0` means no self-attention guidance
618
653
  do_self_attention_guidance = sag_scale > 0.0
619
654
 
655
+ if ip_adapter_image is not None:
656
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
657
+ image_embeds, negative_image_embeds = self.encode_image(
658
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
659
+ )
660
+ if do_classifier_free_guidance:
661
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
662
+
620
663
  # 3. Encode input prompt
621
664
  prompt_embeds, negative_prompt_embeds = self.encode_prompt(
622
665
  prompt,
@@ -654,6 +697,10 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
654
697
  # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
655
698
  extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
656
699
 
700
+ # 6.1 Add image embeds for IP-Adapter
701
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
702
+ added_uncond_kwargs = {"image_embeds": negative_image_embeds} if ip_adapter_image is not None else None
703
+
657
704
  # 7. Denoising loop
658
705
  store_processor = CrossAttnStoreProcessor()
659
706
  self.unet.mid_block.attentions[0].transformer_blocks[0].attn1.processor = store_processor
@@ -679,6 +726,7 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
679
726
  t,
680
727
  encoder_hidden_states=prompt_embeds,
681
728
  cross_attention_kwargs=cross_attention_kwargs,
729
+ added_cond_kwargs=added_cond_kwargs,
682
730
  ).sample
683
731
 
684
732
  # perform guidance
@@ -702,7 +750,12 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
702
750
  )
703
751
  uncond_emb, _ = prompt_embeds.chunk(2)
704
752
  # forward and give guidance
705
- degraded_pred = self.unet(degraded_latents, t, encoder_hidden_states=uncond_emb).sample
753
+ degraded_pred = self.unet(
754
+ degraded_latents,
755
+ t,
756
+ encoder_hidden_states=uncond_emb,
757
+ added_cond_kwargs=added_uncond_kwargs,
758
+ ).sample
706
759
  noise_pred += sag_scale * (noise_pred_uncond - degraded_pred)
707
760
  else:
708
761
  # DDIM-like prediction of x0
@@ -714,7 +767,12 @@ class StableDiffusionSAGPipeline(DiffusionPipeline, TextualInversionLoaderMixin)
714
767
  pred_x0, cond_attn, map_size, t, self.pred_epsilon(latents, noise_pred, t)
715
768
  )
716
769
  # forward and give guidance
717
- degraded_pred = self.unet(degraded_latents, t, encoder_hidden_states=prompt_embeds).sample
770
+ degraded_pred = self.unet(
771
+ degraded_latents,
772
+ t,
773
+ encoder_hidden_states=prompt_embeds,
774
+ added_cond_kwargs=added_cond_kwargs,
775
+ ).sample
718
776
  noise_pred += sag_scale * (noise_pred - degraded_pred)
719
777
 
720
778
  # compute the previous noisy sample x_t -> x_t-1