diffusers 0.23.1__py3-none-any.whl → 0.25.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (238) hide show
  1. diffusers/__init__.py +26 -2
  2. diffusers/commands/fp16_safetensors.py +10 -11
  3. diffusers/configuration_utils.py +13 -8
  4. diffusers/dependency_versions_check.py +0 -1
  5. diffusers/dependency_versions_table.py +5 -5
  6. diffusers/experimental/rl/value_guided_sampling.py +1 -1
  7. diffusers/image_processor.py +463 -51
  8. diffusers/loaders/__init__.py +82 -0
  9. diffusers/loaders/ip_adapter.py +159 -0
  10. diffusers/loaders/lora.py +1553 -0
  11. diffusers/loaders/lora_conversion_utils.py +284 -0
  12. diffusers/loaders/single_file.py +637 -0
  13. diffusers/loaders/textual_inversion.py +455 -0
  14. diffusers/loaders/unet.py +828 -0
  15. diffusers/loaders/utils.py +59 -0
  16. diffusers/models/__init__.py +26 -9
  17. diffusers/models/activations.py +9 -6
  18. diffusers/models/attention.py +301 -29
  19. diffusers/models/attention_flax.py +9 -1
  20. diffusers/models/attention_processor.py +378 -6
  21. diffusers/models/autoencoders/__init__.py +5 -0
  22. diffusers/models/{autoencoder_asym_kl.py → autoencoders/autoencoder_asym_kl.py} +17 -12
  23. diffusers/models/{autoencoder_kl.py → autoencoders/autoencoder_kl.py} +47 -23
  24. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +402 -0
  25. diffusers/models/{autoencoder_tiny.py → autoencoders/autoencoder_tiny.py} +24 -28
  26. diffusers/models/{consistency_decoder_vae.py → autoencoders/consistency_decoder_vae.py} +51 -44
  27. diffusers/models/{vae.py → autoencoders/vae.py} +71 -17
  28. diffusers/models/controlnet.py +59 -39
  29. diffusers/models/controlnet_flax.py +19 -18
  30. diffusers/models/downsampling.py +338 -0
  31. diffusers/models/embeddings.py +112 -29
  32. diffusers/models/embeddings_flax.py +2 -0
  33. diffusers/models/lora.py +131 -1
  34. diffusers/models/modeling_flax_utils.py +14 -8
  35. diffusers/models/modeling_outputs.py +17 -0
  36. diffusers/models/modeling_utils.py +37 -29
  37. diffusers/models/normalization.py +110 -4
  38. diffusers/models/resnet.py +299 -652
  39. diffusers/models/transformer_2d.py +22 -5
  40. diffusers/models/transformer_temporal.py +183 -1
  41. diffusers/models/unet_2d_blocks_flax.py +5 -0
  42. diffusers/models/unet_2d_condition.py +46 -0
  43. diffusers/models/unet_2d_condition_flax.py +13 -13
  44. diffusers/models/unet_3d_blocks.py +957 -173
  45. diffusers/models/unet_3d_condition.py +16 -8
  46. diffusers/models/unet_kandinsky3.py +535 -0
  47. diffusers/models/unet_motion_model.py +48 -33
  48. diffusers/models/unet_spatio_temporal_condition.py +489 -0
  49. diffusers/models/upsampling.py +454 -0
  50. diffusers/models/uvit_2d.py +471 -0
  51. diffusers/models/vae_flax.py +7 -0
  52. diffusers/models/vq_model.py +12 -3
  53. diffusers/optimization.py +16 -9
  54. diffusers/pipelines/__init__.py +137 -76
  55. diffusers/pipelines/amused/__init__.py +62 -0
  56. diffusers/pipelines/amused/pipeline_amused.py +328 -0
  57. diffusers/pipelines/amused/pipeline_amused_img2img.py +347 -0
  58. diffusers/pipelines/amused/pipeline_amused_inpaint.py +378 -0
  59. diffusers/pipelines/animatediff/pipeline_animatediff.py +66 -8
  60. diffusers/pipelines/audioldm/pipeline_audioldm.py +1 -0
  61. diffusers/pipelines/auto_pipeline.py +23 -13
  62. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +1 -0
  63. diffusers/pipelines/controlnet/pipeline_controlnet.py +238 -35
  64. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +148 -37
  65. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +155 -41
  66. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +123 -43
  67. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +216 -39
  68. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +106 -34
  69. diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +1 -0
  70. diffusers/pipelines/ddim/pipeline_ddim.py +1 -0
  71. diffusers/pipelines/ddpm/pipeline_ddpm.py +1 -0
  72. diffusers/pipelines/deepfloyd_if/pipeline_if.py +13 -1
  73. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +13 -1
  74. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +13 -1
  75. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +13 -1
  76. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +13 -1
  77. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +13 -1
  78. diffusers/pipelines/deprecated/__init__.py +153 -0
  79. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/__init__.py +3 -3
  80. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_alt_diffusion.py +177 -34
  81. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_alt_diffusion_img2img.py +182 -37
  82. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_output.py +1 -1
  83. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/__init__.py +1 -1
  84. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/mel.py +2 -2
  85. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/pipeline_audio_diffusion.py +4 -4
  86. diffusers/pipelines/{latent_diffusion_uncond → deprecated/latent_diffusion_uncond}/__init__.py +1 -1
  87. diffusers/pipelines/{latent_diffusion_uncond → deprecated/latent_diffusion_uncond}/pipeline_latent_diffusion_uncond.py +4 -4
  88. diffusers/pipelines/{pndm → deprecated/pndm}/__init__.py +1 -1
  89. diffusers/pipelines/{pndm → deprecated/pndm}/pipeline_pndm.py +4 -4
  90. diffusers/pipelines/{repaint → deprecated/repaint}/__init__.py +1 -1
  91. diffusers/pipelines/{repaint → deprecated/repaint}/pipeline_repaint.py +5 -5
  92. diffusers/pipelines/{score_sde_ve → deprecated/score_sde_ve}/__init__.py +1 -1
  93. diffusers/pipelines/{score_sde_ve → deprecated/score_sde_ve}/pipeline_score_sde_ve.py +5 -4
  94. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/__init__.py +6 -6
  95. diffusers/pipelines/{spectrogram_diffusion/continous_encoder.py → deprecated/spectrogram_diffusion/continuous_encoder.py} +2 -2
  96. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/midi_utils.py +1 -1
  97. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/notes_encoder.py +2 -2
  98. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/pipeline_spectrogram_diffusion.py +8 -7
  99. diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py +55 -0
  100. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_cycle_diffusion.py +34 -13
  101. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_onnx_stable_diffusion_inpaint_legacy.py +7 -6
  102. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_inpaint_legacy.py +12 -11
  103. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_model_editing.py +17 -11
  104. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_paradigms.py +11 -10
  105. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_pix2pix_zero.py +14 -13
  106. diffusers/pipelines/{stochastic_karras_ve → deprecated/stochastic_karras_ve}/__init__.py +1 -1
  107. diffusers/pipelines/{stochastic_karras_ve → deprecated/stochastic_karras_ve}/pipeline_stochastic_karras_ve.py +4 -4
  108. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/__init__.py +3 -3
  109. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/modeling_text_unet.py +83 -51
  110. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion.py +4 -4
  111. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_dual_guided.py +7 -6
  112. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_image_variation.py +7 -6
  113. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_text_to_image.py +7 -6
  114. diffusers/pipelines/{vq_diffusion → deprecated/vq_diffusion}/__init__.py +3 -3
  115. diffusers/pipelines/{vq_diffusion → deprecated/vq_diffusion}/pipeline_vq_diffusion.py +5 -5
  116. diffusers/pipelines/dit/pipeline_dit.py +1 -0
  117. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +1 -1
  118. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +3 -3
  119. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +1 -1
  120. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +1 -1
  121. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +1 -1
  122. diffusers/pipelines/kandinsky3/__init__.py +49 -0
  123. diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +98 -0
  124. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +589 -0
  125. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +654 -0
  126. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +111 -11
  127. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +102 -9
  128. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -0
  129. diffusers/pipelines/musicldm/pipeline_musicldm.py +1 -1
  130. diffusers/pipelines/onnx_utils.py +8 -5
  131. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +7 -2
  132. diffusers/pipelines/pipeline_flax_utils.py +11 -8
  133. diffusers/pipelines/pipeline_utils.py +63 -42
  134. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +247 -38
  135. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +3 -3
  136. diffusers/pipelines/stable_diffusion/__init__.py +37 -65
  137. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +75 -78
  138. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +2 -2
  139. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +2 -4
  140. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +1 -0
  141. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +174 -11
  142. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +8 -3
  143. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +1 -0
  144. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +178 -11
  145. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +224 -13
  146. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +74 -20
  147. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +4 -0
  148. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +7 -0
  149. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +5 -0
  150. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +5 -0
  151. diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py +48 -0
  152. diffusers/pipelines/{stable_diffusion → stable_diffusion_attend_and_excite}/pipeline_stable_diffusion_attend_and_excite.py +6 -2
  153. diffusers/pipelines/stable_diffusion_diffedit/__init__.py +48 -0
  154. diffusers/pipelines/{stable_diffusion → stable_diffusion_diffedit}/pipeline_stable_diffusion_diffedit.py +3 -3
  155. diffusers/pipelines/stable_diffusion_gligen/__init__.py +50 -0
  156. diffusers/pipelines/{stable_diffusion → stable_diffusion_gligen}/pipeline_stable_diffusion_gligen.py +3 -2
  157. diffusers/pipelines/{stable_diffusion → stable_diffusion_gligen}/pipeline_stable_diffusion_gligen_text_image.py +4 -3
  158. diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py +60 -0
  159. diffusers/pipelines/{stable_diffusion → stable_diffusion_k_diffusion}/pipeline_stable_diffusion_k_diffusion.py +7 -1
  160. diffusers/pipelines/stable_diffusion_ldm3d/__init__.py +48 -0
  161. diffusers/pipelines/{stable_diffusion → stable_diffusion_ldm3d}/pipeline_stable_diffusion_ldm3d.py +51 -7
  162. diffusers/pipelines/stable_diffusion_panorama/__init__.py +48 -0
  163. diffusers/pipelines/{stable_diffusion → stable_diffusion_panorama}/pipeline_stable_diffusion_panorama.py +57 -8
  164. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +58 -6
  165. diffusers/pipelines/stable_diffusion_sag/__init__.py +48 -0
  166. diffusers/pipelines/{stable_diffusion → stable_diffusion_sag}/pipeline_stable_diffusion_sag.py +68 -10
  167. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +194 -17
  168. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +205 -16
  169. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +206 -17
  170. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +23 -17
  171. diffusers/pipelines/stable_video_diffusion/__init__.py +58 -0
  172. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +652 -0
  173. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +108 -12
  174. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +115 -14
  175. diffusers/pipelines/text_to_video_synthesis/__init__.py +2 -0
  176. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +6 -0
  177. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +23 -3
  178. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +334 -10
  179. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +1331 -0
  180. diffusers/pipelines/unclip/pipeline_unclip.py +2 -1
  181. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +1 -0
  182. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +1 -1
  183. diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +14 -4
  184. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +9 -5
  185. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +1 -1
  186. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +2 -2
  187. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +5 -1
  188. diffusers/schedulers/__init__.py +4 -4
  189. diffusers/schedulers/deprecated/__init__.py +50 -0
  190. diffusers/schedulers/{scheduling_karras_ve.py → deprecated/scheduling_karras_ve.py} +4 -4
  191. diffusers/schedulers/{scheduling_sde_vp.py → deprecated/scheduling_sde_vp.py} +4 -6
  192. diffusers/schedulers/scheduling_amused.py +162 -0
  193. diffusers/schedulers/scheduling_consistency_models.py +2 -0
  194. diffusers/schedulers/scheduling_ddim.py +1 -3
  195. diffusers/schedulers/scheduling_ddim_inverse.py +2 -7
  196. diffusers/schedulers/scheduling_ddim_parallel.py +1 -3
  197. diffusers/schedulers/scheduling_ddpm.py +47 -3
  198. diffusers/schedulers/scheduling_ddpm_parallel.py +47 -3
  199. diffusers/schedulers/scheduling_deis_multistep.py +28 -6
  200. diffusers/schedulers/scheduling_dpmsolver_multistep.py +28 -6
  201. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +28 -6
  202. diffusers/schedulers/scheduling_dpmsolver_sde.py +3 -3
  203. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +28 -6
  204. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +59 -3
  205. diffusers/schedulers/scheduling_euler_discrete.py +102 -16
  206. diffusers/schedulers/scheduling_heun_discrete.py +17 -5
  207. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +17 -5
  208. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +17 -5
  209. diffusers/schedulers/scheduling_lcm.py +123 -29
  210. diffusers/schedulers/scheduling_lms_discrete.py +3 -3
  211. diffusers/schedulers/scheduling_pndm.py +1 -3
  212. diffusers/schedulers/scheduling_repaint.py +1 -3
  213. diffusers/schedulers/scheduling_unipc_multistep.py +28 -6
  214. diffusers/schedulers/scheduling_utils.py +3 -1
  215. diffusers/schedulers/scheduling_utils_flax.py +3 -1
  216. diffusers/training_utils.py +1 -1
  217. diffusers/utils/__init__.py +1 -2
  218. diffusers/utils/constants.py +10 -12
  219. diffusers/utils/dummy_pt_objects.py +75 -0
  220. diffusers/utils/dummy_torch_and_transformers_objects.py +105 -0
  221. diffusers/utils/dynamic_modules_utils.py +18 -22
  222. diffusers/utils/export_utils.py +8 -3
  223. diffusers/utils/hub_utils.py +24 -36
  224. diffusers/utils/logging.py +11 -11
  225. diffusers/utils/outputs.py +5 -5
  226. diffusers/utils/peft_utils.py +88 -44
  227. diffusers/utils/state_dict_utils.py +8 -0
  228. diffusers/utils/testing_utils.py +199 -1
  229. diffusers/utils/torch_utils.py +4 -4
  230. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/METADATA +86 -69
  231. diffusers-0.25.0.dist-info/RECORD +360 -0
  232. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/WHEEL +1 -1
  233. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/entry_points.txt +0 -1
  234. diffusers/loaders.py +0 -3336
  235. diffusers-0.23.1.dist-info/RECORD +0 -323
  236. /diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/modeling_roberta_series.py +0 -0
  237. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/LICENSE +0 -0
  238. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/top_level.txt +0 -0
@@ -17,12 +17,13 @@ from typing import Any, Callable, Dict, List, Optional, Union
17
17
 
18
18
  import torch
19
19
  from packaging import version
20
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
20
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
21
21
 
22
22
  from ...configuration_utils import FrozenDict
23
- from ...image_processor import VaeImageProcessor
24
- from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
25
- from ...models import AutoencoderKL, UNet2DConditionModel
23
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
24
+ from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
25
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
26
+ from ...models.attention_processor import FusedAttnProcessor2_0
26
27
  from ...models.lora import adjust_lora_scale_text_encoder
27
28
  from ...schedulers import KarrasDiffusionSchedulers
28
29
  from ...utils import (
@@ -70,7 +71,53 @@ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
70
71
  return noise_cfg
71
72
 
72
73
 
73
- class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin):
74
+ def retrieve_timesteps(
75
+ scheduler,
76
+ num_inference_steps: Optional[int] = None,
77
+ device: Optional[Union[str, torch.device]] = None,
78
+ timesteps: Optional[List[int]] = None,
79
+ **kwargs,
80
+ ):
81
+ """
82
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
83
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
84
+
85
+ Args:
86
+ scheduler (`SchedulerMixin`):
87
+ The scheduler to get timesteps from.
88
+ num_inference_steps (`int`):
89
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
90
+ `timesteps` must be `None`.
91
+ device (`str` or `torch.device`, *optional*):
92
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
93
+ timesteps (`List[int]`, *optional*):
94
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
95
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
96
+ must be `None`.
97
+
98
+ Returns:
99
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
100
+ second element is the number of inference steps.
101
+ """
102
+ if timesteps is not None:
103
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
104
+ if not accepts_timesteps:
105
+ raise ValueError(
106
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
107
+ f" timestep schedules. Please check whether you are using the correct scheduler."
108
+ )
109
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
110
+ timesteps = scheduler.timesteps
111
+ num_inference_steps = len(timesteps)
112
+ else:
113
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
114
+ timesteps = scheduler.timesteps
115
+ return timesteps, num_inference_steps
116
+
117
+
118
+ class StableDiffusionPipeline(
119
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
120
+ ):
74
121
  r"""
75
122
  Pipeline for text-to-image generation using Stable Diffusion.
76
123
 
@@ -82,6 +129,7 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
82
129
  - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
83
130
  - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
84
131
  - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
132
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
85
133
 
86
134
  Args:
87
135
  vae ([`AutoencoderKL`]):
@@ -102,8 +150,9 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
102
150
  feature_extractor ([`~transformers.CLIPImageProcessor`]):
103
151
  A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
104
152
  """
105
- model_cpu_offload_seq = "text_encoder->unet->vae"
106
- _optional_components = ["safety_checker", "feature_extractor"]
153
+
154
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
155
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
107
156
  _exclude_from_cpu_offload = ["safety_checker"]
108
157
  _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
109
158
 
@@ -116,6 +165,7 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
116
165
  scheduler: KarrasDiffusionSchedulers,
117
166
  safety_checker: StableDiffusionSafetyChecker,
118
167
  feature_extractor: CLIPImageProcessor,
168
+ image_encoder: CLIPVisionModelWithProjection = None,
119
169
  requires_safety_checker: bool = True,
120
170
  ):
121
171
  super().__init__()
@@ -192,6 +242,7 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
192
242
  scheduler=scheduler,
193
243
  safety_checker=safety_checker,
194
244
  feature_extractor=feature_extractor,
245
+ image_encoder=image_encoder,
195
246
  )
196
247
  self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
197
248
  self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
@@ -439,6 +490,30 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
439
490
 
440
491
  return prompt_embeds, negative_prompt_embeds
441
492
 
493
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
494
+ dtype = next(self.image_encoder.parameters()).dtype
495
+
496
+ if not isinstance(image, torch.Tensor):
497
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
498
+
499
+ image = image.to(device=device, dtype=dtype)
500
+ if output_hidden_states:
501
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
502
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
503
+ uncond_image_enc_hidden_states = self.image_encoder(
504
+ torch.zeros_like(image), output_hidden_states=True
505
+ ).hidden_states[-2]
506
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
507
+ num_images_per_prompt, dim=0
508
+ )
509
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
510
+ else:
511
+ image_embeds = self.image_encoder(image).image_embeds
512
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
513
+ uncond_image_embeds = torch.zeros_like(image_embeds)
514
+
515
+ return image_embeds, uncond_image_embeds
516
+
442
517
  def run_safety_checker(self, image, device, dtype):
443
518
  if self.safety_checker is None:
444
519
  has_nsfw_concept = None
@@ -576,6 +651,67 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
576
651
  """Disables the FreeU mechanism if enabled."""
577
652
  self.unet.disable_freeu()
578
653
 
654
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.fuse_qkv_projections
655
+ def fuse_qkv_projections(self, unet: bool = True, vae: bool = True):
656
+ """
657
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
658
+ key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
659
+
660
+ <Tip warning={true}>
661
+
662
+ This API is 🧪 experimental.
663
+
664
+ </Tip>
665
+
666
+ Args:
667
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
668
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
669
+ """
670
+ self.fusing_unet = False
671
+ self.fusing_vae = False
672
+
673
+ if unet:
674
+ self.fusing_unet = True
675
+ self.unet.fuse_qkv_projections()
676
+ self.unet.set_attn_processor(FusedAttnProcessor2_0())
677
+
678
+ if vae:
679
+ if not isinstance(self.vae, AutoencoderKL):
680
+ raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.")
681
+
682
+ self.fusing_vae = True
683
+ self.vae.fuse_qkv_projections()
684
+ self.vae.set_attn_processor(FusedAttnProcessor2_0())
685
+
686
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
687
+ def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
688
+ """Disable QKV projection fusion if enabled.
689
+
690
+ <Tip warning={true}>
691
+
692
+ This API is 🧪 experimental.
693
+
694
+ </Tip>
695
+
696
+ Args:
697
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
698
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
699
+
700
+ """
701
+ if unet:
702
+ if not self.fusing_unet:
703
+ logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.")
704
+ else:
705
+ self.unet.unfuse_qkv_projections()
706
+ self.fusing_unet = False
707
+
708
+ if vae:
709
+ if not self.fusing_vae:
710
+ logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.")
711
+ else:
712
+ self.vae.unfuse_qkv_projections()
713
+ self.fusing_vae = False
714
+
579
715
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
580
716
  def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
581
717
  """
@@ -632,6 +768,10 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
632
768
  def num_timesteps(self):
633
769
  return self._num_timesteps
634
770
 
771
+ @property
772
+ def interrupt(self):
773
+ return self._interrupt
774
+
635
775
  @torch.no_grad()
636
776
  @replace_example_docstring(EXAMPLE_DOC_STRING)
637
777
  def __call__(
@@ -640,6 +780,7 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
640
780
  height: Optional[int] = None,
641
781
  width: Optional[int] = None,
642
782
  num_inference_steps: int = 50,
783
+ timesteps: List[int] = None,
643
784
  guidance_scale: float = 7.5,
644
785
  negative_prompt: Optional[Union[str, List[str]]] = None,
645
786
  num_images_per_prompt: Optional[int] = 1,
@@ -648,6 +789,7 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
648
789
  latents: Optional[torch.FloatTensor] = None,
649
790
  prompt_embeds: Optional[torch.FloatTensor] = None,
650
791
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
792
+ ip_adapter_image: Optional[PipelineImageInput] = None,
651
793
  output_type: Optional[str] = "pil",
652
794
  return_dict: bool = True,
653
795
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
@@ -670,6 +812,10 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
670
812
  num_inference_steps (`int`, *optional*, defaults to 50):
671
813
  The number of denoising steps. More denoising steps usually lead to a higher quality image at the
672
814
  expense of slower inference.
815
+ timesteps (`List[int]`, *optional*):
816
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
817
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
818
+ passed will be used. Must be in descending order.
673
819
  guidance_scale (`float`, *optional*, defaults to 7.5):
674
820
  A higher guidance scale value encourages the model to generate images closely linked to the text
675
821
  `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
@@ -694,6 +840,7 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
694
840
  negative_prompt_embeds (`torch.FloatTensor`, *optional*):
695
841
  Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
696
842
  not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
843
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
697
844
  output_type (`str`, *optional*, defaults to `"pil"`):
698
845
  The output format of the generated image. Choose between `PIL.Image` or `np.array`.
699
846
  return_dict (`bool`, *optional*, defaults to `True`):
@@ -717,7 +864,7 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
717
864
  callback_on_step_end_tensor_inputs (`List`, *optional*):
718
865
  The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
719
866
  will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
720
- `._callback_tensor_inputs` attribute of your pipeine class.
867
+ `._callback_tensor_inputs` attribute of your pipeline class.
721
868
 
722
869
  Examples:
723
870
 
@@ -766,6 +913,7 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
766
913
  self._guidance_rescale = guidance_rescale
767
914
  self._clip_skip = clip_skip
768
915
  self._cross_attention_kwargs = cross_attention_kwargs
916
+ self._interrupt = False
769
917
 
770
918
  # 2. Define call parameters
771
919
  if prompt is not None and isinstance(prompt, str):
@@ -793,15 +941,23 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
793
941
  lora_scale=lora_scale,
794
942
  clip_skip=self.clip_skip,
795
943
  )
944
+
796
945
  # For classifier free guidance, we need to do two forward passes.
797
946
  # Here we concatenate the unconditional and text embeddings into a single batch
798
947
  # to avoid doing two forward passes
799
948
  if self.do_classifier_free_guidance:
800
949
  prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
801
950
 
951
+ if ip_adapter_image is not None:
952
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
953
+ image_embeds, negative_image_embeds = self.encode_image(
954
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
955
+ )
956
+ if self.do_classifier_free_guidance:
957
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
958
+
802
959
  # 4. Prepare timesteps
803
- self.scheduler.set_timesteps(num_inference_steps, device=device)
804
- timesteps = self.scheduler.timesteps
960
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
805
961
 
806
962
  # 5. Prepare latent variables
807
963
  num_channels_latents = self.unet.config.in_channels
@@ -819,7 +975,10 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
819
975
  # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
820
976
  extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
821
977
 
822
- # 6.5 Optionally get Guidance Scale Embedding
978
+ # 6.1 Add image embeds for IP-Adapter
979
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
980
+
981
+ # 6.2 Optionally get Guidance Scale Embedding
823
982
  timestep_cond = None
824
983
  if self.unet.config.time_cond_proj_dim is not None:
825
984
  guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
@@ -832,6 +991,9 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
832
991
  self._num_timesteps = len(timesteps)
833
992
  with self.progress_bar(total=num_inference_steps) as progress_bar:
834
993
  for i, t in enumerate(timesteps):
994
+ if self.interrupt:
995
+ continue
996
+
835
997
  # expand the latents if we are doing classifier free guidance
836
998
  latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
837
999
  latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
@@ -843,6 +1005,7 @@ class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lo
843
1005
  encoder_hidden_states=prompt_embeds,
844
1006
  timestep_cond=timestep_cond,
845
1007
  cross_attention_kwargs=self.cross_attention_kwargs,
1008
+ added_cond_kwargs=added_cond_kwargs,
846
1009
  return_dict=False,
847
1010
  )[0]
848
1011
 
@@ -37,9 +37,13 @@ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
37
37
 
38
38
 
39
39
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
40
- def retrieve_latents(encoder_output, generator):
41
- if hasattr(encoder_output, "latent_dist"):
40
+ def retrieve_latents(
41
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
42
+ ):
43
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
42
44
  return encoder_output.latent_dist.sample(generator)
45
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
46
+ return encoder_output.latent_dist.mode()
43
47
  elif hasattr(encoder_output, "latents"):
44
48
  return encoder_output.latents
45
49
  else:
@@ -95,6 +99,7 @@ class StableDiffusionDepth2ImgPipeline(DiffusionPipeline, TextualInversionLoader
95
99
  A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
96
100
  [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
97
101
  """
102
+
98
103
  model_cpu_offload_seq = "text_encoder->unet->vae"
99
104
  _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "depth_mask"]
100
105
 
@@ -674,7 +679,7 @@ class StableDiffusionDepth2ImgPipeline(DiffusionPipeline, TextualInversionLoader
674
679
  callback_on_step_end_tensor_inputs (`List`, *optional*):
675
680
  The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
676
681
  will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
677
- `._callback_tensor_inputs` attribute of your pipeine class.
682
+ `._callback_tensor_inputs` attribute of your pipeline class.
678
683
  Examples:
679
684
 
680
685
  ```py
@@ -62,6 +62,7 @@ class StableDiffusionImageVariationPipeline(DiffusionPipeline):
62
62
  feature_extractor ([`~transformers.CLIPImageProcessor`]):
63
63
  A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
64
64
  """
65
+
65
66
  # TODO: feature_extractor is required to encode images (if they are in PIL format),
66
67
  # we should give a descriptive message if the pipeline doesn't have one.
67
68
  _optional_components = ["safety_checker"]