diffusers 0.23.1__py3-none-any.whl → 0.25.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (238) hide show
  1. diffusers/__init__.py +26 -2
  2. diffusers/commands/fp16_safetensors.py +10 -11
  3. diffusers/configuration_utils.py +13 -8
  4. diffusers/dependency_versions_check.py +0 -1
  5. diffusers/dependency_versions_table.py +5 -5
  6. diffusers/experimental/rl/value_guided_sampling.py +1 -1
  7. diffusers/image_processor.py +463 -51
  8. diffusers/loaders/__init__.py +82 -0
  9. diffusers/loaders/ip_adapter.py +159 -0
  10. diffusers/loaders/lora.py +1553 -0
  11. diffusers/loaders/lora_conversion_utils.py +284 -0
  12. diffusers/loaders/single_file.py +637 -0
  13. diffusers/loaders/textual_inversion.py +455 -0
  14. diffusers/loaders/unet.py +828 -0
  15. diffusers/loaders/utils.py +59 -0
  16. diffusers/models/__init__.py +26 -9
  17. diffusers/models/activations.py +9 -6
  18. diffusers/models/attention.py +301 -29
  19. diffusers/models/attention_flax.py +9 -1
  20. diffusers/models/attention_processor.py +378 -6
  21. diffusers/models/autoencoders/__init__.py +5 -0
  22. diffusers/models/{autoencoder_asym_kl.py → autoencoders/autoencoder_asym_kl.py} +17 -12
  23. diffusers/models/{autoencoder_kl.py → autoencoders/autoencoder_kl.py} +47 -23
  24. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +402 -0
  25. diffusers/models/{autoencoder_tiny.py → autoencoders/autoencoder_tiny.py} +24 -28
  26. diffusers/models/{consistency_decoder_vae.py → autoencoders/consistency_decoder_vae.py} +51 -44
  27. diffusers/models/{vae.py → autoencoders/vae.py} +71 -17
  28. diffusers/models/controlnet.py +59 -39
  29. diffusers/models/controlnet_flax.py +19 -18
  30. diffusers/models/downsampling.py +338 -0
  31. diffusers/models/embeddings.py +112 -29
  32. diffusers/models/embeddings_flax.py +2 -0
  33. diffusers/models/lora.py +131 -1
  34. diffusers/models/modeling_flax_utils.py +14 -8
  35. diffusers/models/modeling_outputs.py +17 -0
  36. diffusers/models/modeling_utils.py +37 -29
  37. diffusers/models/normalization.py +110 -4
  38. diffusers/models/resnet.py +299 -652
  39. diffusers/models/transformer_2d.py +22 -5
  40. diffusers/models/transformer_temporal.py +183 -1
  41. diffusers/models/unet_2d_blocks_flax.py +5 -0
  42. diffusers/models/unet_2d_condition.py +46 -0
  43. diffusers/models/unet_2d_condition_flax.py +13 -13
  44. diffusers/models/unet_3d_blocks.py +957 -173
  45. diffusers/models/unet_3d_condition.py +16 -8
  46. diffusers/models/unet_kandinsky3.py +535 -0
  47. diffusers/models/unet_motion_model.py +48 -33
  48. diffusers/models/unet_spatio_temporal_condition.py +489 -0
  49. diffusers/models/upsampling.py +454 -0
  50. diffusers/models/uvit_2d.py +471 -0
  51. diffusers/models/vae_flax.py +7 -0
  52. diffusers/models/vq_model.py +12 -3
  53. diffusers/optimization.py +16 -9
  54. diffusers/pipelines/__init__.py +137 -76
  55. diffusers/pipelines/amused/__init__.py +62 -0
  56. diffusers/pipelines/amused/pipeline_amused.py +328 -0
  57. diffusers/pipelines/amused/pipeline_amused_img2img.py +347 -0
  58. diffusers/pipelines/amused/pipeline_amused_inpaint.py +378 -0
  59. diffusers/pipelines/animatediff/pipeline_animatediff.py +66 -8
  60. diffusers/pipelines/audioldm/pipeline_audioldm.py +1 -0
  61. diffusers/pipelines/auto_pipeline.py +23 -13
  62. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +1 -0
  63. diffusers/pipelines/controlnet/pipeline_controlnet.py +238 -35
  64. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +148 -37
  65. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +155 -41
  66. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +123 -43
  67. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +216 -39
  68. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +106 -34
  69. diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +1 -0
  70. diffusers/pipelines/ddim/pipeline_ddim.py +1 -0
  71. diffusers/pipelines/ddpm/pipeline_ddpm.py +1 -0
  72. diffusers/pipelines/deepfloyd_if/pipeline_if.py +13 -1
  73. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +13 -1
  74. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +13 -1
  75. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +13 -1
  76. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +13 -1
  77. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +13 -1
  78. diffusers/pipelines/deprecated/__init__.py +153 -0
  79. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/__init__.py +3 -3
  80. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_alt_diffusion.py +177 -34
  81. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_alt_diffusion_img2img.py +182 -37
  82. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_output.py +1 -1
  83. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/__init__.py +1 -1
  84. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/mel.py +2 -2
  85. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/pipeline_audio_diffusion.py +4 -4
  86. diffusers/pipelines/{latent_diffusion_uncond → deprecated/latent_diffusion_uncond}/__init__.py +1 -1
  87. diffusers/pipelines/{latent_diffusion_uncond → deprecated/latent_diffusion_uncond}/pipeline_latent_diffusion_uncond.py +4 -4
  88. diffusers/pipelines/{pndm → deprecated/pndm}/__init__.py +1 -1
  89. diffusers/pipelines/{pndm → deprecated/pndm}/pipeline_pndm.py +4 -4
  90. diffusers/pipelines/{repaint → deprecated/repaint}/__init__.py +1 -1
  91. diffusers/pipelines/{repaint → deprecated/repaint}/pipeline_repaint.py +5 -5
  92. diffusers/pipelines/{score_sde_ve → deprecated/score_sde_ve}/__init__.py +1 -1
  93. diffusers/pipelines/{score_sde_ve → deprecated/score_sde_ve}/pipeline_score_sde_ve.py +5 -4
  94. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/__init__.py +6 -6
  95. diffusers/pipelines/{spectrogram_diffusion/continous_encoder.py → deprecated/spectrogram_diffusion/continuous_encoder.py} +2 -2
  96. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/midi_utils.py +1 -1
  97. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/notes_encoder.py +2 -2
  98. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/pipeline_spectrogram_diffusion.py +8 -7
  99. diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py +55 -0
  100. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_cycle_diffusion.py +34 -13
  101. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_onnx_stable_diffusion_inpaint_legacy.py +7 -6
  102. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_inpaint_legacy.py +12 -11
  103. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_model_editing.py +17 -11
  104. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_paradigms.py +11 -10
  105. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_pix2pix_zero.py +14 -13
  106. diffusers/pipelines/{stochastic_karras_ve → deprecated/stochastic_karras_ve}/__init__.py +1 -1
  107. diffusers/pipelines/{stochastic_karras_ve → deprecated/stochastic_karras_ve}/pipeline_stochastic_karras_ve.py +4 -4
  108. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/__init__.py +3 -3
  109. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/modeling_text_unet.py +83 -51
  110. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion.py +4 -4
  111. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_dual_guided.py +7 -6
  112. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_image_variation.py +7 -6
  113. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_text_to_image.py +7 -6
  114. diffusers/pipelines/{vq_diffusion → deprecated/vq_diffusion}/__init__.py +3 -3
  115. diffusers/pipelines/{vq_diffusion → deprecated/vq_diffusion}/pipeline_vq_diffusion.py +5 -5
  116. diffusers/pipelines/dit/pipeline_dit.py +1 -0
  117. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +1 -1
  118. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +3 -3
  119. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +1 -1
  120. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +1 -1
  121. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +1 -1
  122. diffusers/pipelines/kandinsky3/__init__.py +49 -0
  123. diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +98 -0
  124. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +589 -0
  125. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +654 -0
  126. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +111 -11
  127. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +102 -9
  128. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -0
  129. diffusers/pipelines/musicldm/pipeline_musicldm.py +1 -1
  130. diffusers/pipelines/onnx_utils.py +8 -5
  131. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +7 -2
  132. diffusers/pipelines/pipeline_flax_utils.py +11 -8
  133. diffusers/pipelines/pipeline_utils.py +63 -42
  134. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +247 -38
  135. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +3 -3
  136. diffusers/pipelines/stable_diffusion/__init__.py +37 -65
  137. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +75 -78
  138. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +2 -2
  139. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +2 -4
  140. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +1 -0
  141. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +174 -11
  142. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +8 -3
  143. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +1 -0
  144. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +178 -11
  145. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +224 -13
  146. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +74 -20
  147. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +4 -0
  148. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +7 -0
  149. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +5 -0
  150. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +5 -0
  151. diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py +48 -0
  152. diffusers/pipelines/{stable_diffusion → stable_diffusion_attend_and_excite}/pipeline_stable_diffusion_attend_and_excite.py +6 -2
  153. diffusers/pipelines/stable_diffusion_diffedit/__init__.py +48 -0
  154. diffusers/pipelines/{stable_diffusion → stable_diffusion_diffedit}/pipeline_stable_diffusion_diffedit.py +3 -3
  155. diffusers/pipelines/stable_diffusion_gligen/__init__.py +50 -0
  156. diffusers/pipelines/{stable_diffusion → stable_diffusion_gligen}/pipeline_stable_diffusion_gligen.py +3 -2
  157. diffusers/pipelines/{stable_diffusion → stable_diffusion_gligen}/pipeline_stable_diffusion_gligen_text_image.py +4 -3
  158. diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py +60 -0
  159. diffusers/pipelines/{stable_diffusion → stable_diffusion_k_diffusion}/pipeline_stable_diffusion_k_diffusion.py +7 -1
  160. diffusers/pipelines/stable_diffusion_ldm3d/__init__.py +48 -0
  161. diffusers/pipelines/{stable_diffusion → stable_diffusion_ldm3d}/pipeline_stable_diffusion_ldm3d.py +51 -7
  162. diffusers/pipelines/stable_diffusion_panorama/__init__.py +48 -0
  163. diffusers/pipelines/{stable_diffusion → stable_diffusion_panorama}/pipeline_stable_diffusion_panorama.py +57 -8
  164. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +58 -6
  165. diffusers/pipelines/stable_diffusion_sag/__init__.py +48 -0
  166. diffusers/pipelines/{stable_diffusion → stable_diffusion_sag}/pipeline_stable_diffusion_sag.py +68 -10
  167. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +194 -17
  168. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +205 -16
  169. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +206 -17
  170. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +23 -17
  171. diffusers/pipelines/stable_video_diffusion/__init__.py +58 -0
  172. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +652 -0
  173. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +108 -12
  174. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +115 -14
  175. diffusers/pipelines/text_to_video_synthesis/__init__.py +2 -0
  176. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +6 -0
  177. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +23 -3
  178. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +334 -10
  179. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +1331 -0
  180. diffusers/pipelines/unclip/pipeline_unclip.py +2 -1
  181. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +1 -0
  182. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +1 -1
  183. diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +14 -4
  184. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +9 -5
  185. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +1 -1
  186. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +2 -2
  187. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +5 -1
  188. diffusers/schedulers/__init__.py +4 -4
  189. diffusers/schedulers/deprecated/__init__.py +50 -0
  190. diffusers/schedulers/{scheduling_karras_ve.py → deprecated/scheduling_karras_ve.py} +4 -4
  191. diffusers/schedulers/{scheduling_sde_vp.py → deprecated/scheduling_sde_vp.py} +4 -6
  192. diffusers/schedulers/scheduling_amused.py +162 -0
  193. diffusers/schedulers/scheduling_consistency_models.py +2 -0
  194. diffusers/schedulers/scheduling_ddim.py +1 -3
  195. diffusers/schedulers/scheduling_ddim_inverse.py +2 -7
  196. diffusers/schedulers/scheduling_ddim_parallel.py +1 -3
  197. diffusers/schedulers/scheduling_ddpm.py +47 -3
  198. diffusers/schedulers/scheduling_ddpm_parallel.py +47 -3
  199. diffusers/schedulers/scheduling_deis_multistep.py +28 -6
  200. diffusers/schedulers/scheduling_dpmsolver_multistep.py +28 -6
  201. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +28 -6
  202. diffusers/schedulers/scheduling_dpmsolver_sde.py +3 -3
  203. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +28 -6
  204. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +59 -3
  205. diffusers/schedulers/scheduling_euler_discrete.py +102 -16
  206. diffusers/schedulers/scheduling_heun_discrete.py +17 -5
  207. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +17 -5
  208. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +17 -5
  209. diffusers/schedulers/scheduling_lcm.py +123 -29
  210. diffusers/schedulers/scheduling_lms_discrete.py +3 -3
  211. diffusers/schedulers/scheduling_pndm.py +1 -3
  212. diffusers/schedulers/scheduling_repaint.py +1 -3
  213. diffusers/schedulers/scheduling_unipc_multistep.py +28 -6
  214. diffusers/schedulers/scheduling_utils.py +3 -1
  215. diffusers/schedulers/scheduling_utils_flax.py +3 -1
  216. diffusers/training_utils.py +1 -1
  217. diffusers/utils/__init__.py +1 -2
  218. diffusers/utils/constants.py +10 -12
  219. diffusers/utils/dummy_pt_objects.py +75 -0
  220. diffusers/utils/dummy_torch_and_transformers_objects.py +105 -0
  221. diffusers/utils/dynamic_modules_utils.py +18 -22
  222. diffusers/utils/export_utils.py +8 -3
  223. diffusers/utils/hub_utils.py +24 -36
  224. diffusers/utils/logging.py +11 -11
  225. diffusers/utils/outputs.py +5 -5
  226. diffusers/utils/peft_utils.py +88 -44
  227. diffusers/utils/state_dict_utils.py +8 -0
  228. diffusers/utils/testing_utils.py +199 -1
  229. diffusers/utils/torch_utils.py +4 -4
  230. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/METADATA +86 -69
  231. diffusers-0.25.0.dist-info/RECORD +360 -0
  232. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/WHEEL +1 -1
  233. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/entry_points.txt +0 -1
  234. diffusers/loaders.py +0 -3336
  235. diffusers-0.23.1.dist-info/RECORD +0 -323
  236. /diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/modeling_roberta_series.py +0 -0
  237. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/LICENSE +0 -0
  238. {diffusers-0.23.1.dist-info → diffusers-0.25.0.dist-info}/top_level.txt +0 -0
@@ -21,11 +21,11 @@ import numpy as np
21
21
  import PIL.Image
22
22
  import torch
23
23
  import torch.nn.functional as F
24
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
24
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
25
25
 
26
26
  from ...image_processor import PipelineImageInput, VaeImageProcessor
27
- from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
28
- from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel
27
+ from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
28
+ from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel
29
29
  from ...models.lora import adjust_lora_scale_text_encoder
30
30
  from ...schedulers import KarrasDiffusionSchedulers
31
31
  from ...utils import (
@@ -104,9 +104,13 @@ EXAMPLE_DOC_STRING = """
104
104
 
105
105
 
106
106
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
107
- def retrieve_latents(encoder_output, generator):
108
- if hasattr(encoder_output, "latent_dist"):
107
+ def retrieve_latents(
108
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
109
+ ):
110
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
109
111
  return encoder_output.latent_dist.sample(generator)
112
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
113
+ return encoder_output.latent_dist.mode()
110
114
  elif hasattr(encoder_output, "latents"):
111
115
  return encoder_output.latents
112
116
  else:
@@ -237,7 +241,7 @@ def prepare_mask_and_masked_image(image, mask, height, width, return_image=False
237
241
 
238
242
 
239
243
  class StableDiffusionControlNetInpaintPipeline(
240
- DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
244
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
241
245
  ):
242
246
  r"""
243
247
  Pipeline for image inpainting using Stable Diffusion with ControlNet guidance.
@@ -247,6 +251,10 @@ class StableDiffusionControlNetInpaintPipeline(
247
251
 
248
252
  The pipeline also inherits the following loading methods:
249
253
  - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
254
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
255
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
256
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
257
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
250
258
 
251
259
  <Tip>
252
260
 
@@ -282,9 +290,11 @@ class StableDiffusionControlNetInpaintPipeline(
282
290
  feature_extractor ([`~transformers.CLIPImageProcessor`]):
283
291
  A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
284
292
  """
285
- model_cpu_offload_seq = "text_encoder->unet->vae"
286
- _optional_components = ["safety_checker", "feature_extractor"]
293
+
294
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
295
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
287
296
  _exclude_from_cpu_offload = ["safety_checker"]
297
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
288
298
 
289
299
  def __init__(
290
300
  self,
@@ -296,6 +306,7 @@ class StableDiffusionControlNetInpaintPipeline(
296
306
  scheduler: KarrasDiffusionSchedulers,
297
307
  safety_checker: StableDiffusionSafetyChecker,
298
308
  feature_extractor: CLIPImageProcessor,
309
+ image_encoder: CLIPVisionModelWithProjection = None,
299
310
  requires_safety_checker: bool = True,
300
311
  ):
301
312
  super().__init__()
@@ -328,6 +339,7 @@ class StableDiffusionControlNetInpaintPipeline(
328
339
  scheduler=scheduler,
329
340
  safety_checker=safety_checker,
330
341
  feature_extractor=feature_extractor,
342
+ image_encoder=image_encoder,
331
343
  )
332
344
  self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
333
345
  self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
@@ -587,6 +599,31 @@ class StableDiffusionControlNetInpaintPipeline(
587
599
 
588
600
  return prompt_embeds, negative_prompt_embeds
589
601
 
602
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
603
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
604
+ dtype = next(self.image_encoder.parameters()).dtype
605
+
606
+ if not isinstance(image, torch.Tensor):
607
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
608
+
609
+ image = image.to(device=device, dtype=dtype)
610
+ if output_hidden_states:
611
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
612
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
613
+ uncond_image_enc_hidden_states = self.image_encoder(
614
+ torch.zeros_like(image), output_hidden_states=True
615
+ ).hidden_states[-2]
616
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
617
+ num_images_per_prompt, dim=0
618
+ )
619
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
620
+ else:
621
+ image_embeds = self.image_encoder(image).image_embeds
622
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
623
+ uncond_image_embeds = torch.zeros_like(image_embeds)
624
+
625
+ return image_embeds, uncond_image_embeds
626
+
590
627
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
591
628
  def run_safety_checker(self, image, device, dtype):
592
629
  if self.safety_checker is None:
@@ -655,18 +692,24 @@ class StableDiffusionControlNetInpaintPipeline(
655
692
  controlnet_conditioning_scale=1.0,
656
693
  control_guidance_start=0.0,
657
694
  control_guidance_end=1.0,
695
+ callback_on_step_end_tensor_inputs=None,
658
696
  ):
659
697
  if height is not None and height % 8 != 0 or width is not None and width % 8 != 0:
660
698
  raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
661
699
 
662
- if (callback_steps is None) or (
663
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
664
- ):
700
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
665
701
  raise ValueError(
666
702
  f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
667
703
  f" {type(callback_steps)}."
668
704
  )
669
705
 
706
+ if callback_on_step_end_tensor_inputs is not None and not all(
707
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
708
+ ):
709
+ raise ValueError(
710
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
711
+ )
712
+
670
713
  if prompt is not None and prompt_embeds is not None:
671
714
  raise ValueError(
672
715
  f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
@@ -998,6 +1041,29 @@ class StableDiffusionControlNetInpaintPipeline(
998
1041
  """Disables the FreeU mechanism if enabled."""
999
1042
  self.unet.disable_freeu()
1000
1043
 
1044
+ @property
1045
+ def guidance_scale(self):
1046
+ return self._guidance_scale
1047
+
1048
+ @property
1049
+ def clip_skip(self):
1050
+ return self._clip_skip
1051
+
1052
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1053
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1054
+ # corresponds to doing no classifier free guidance.
1055
+ @property
1056
+ def do_classifier_free_guidance(self):
1057
+ return self._guidance_scale > 1
1058
+
1059
+ @property
1060
+ def cross_attention_kwargs(self):
1061
+ return self._cross_attention_kwargs
1062
+
1063
+ @property
1064
+ def num_timesteps(self):
1065
+ return self._num_timesteps
1066
+
1001
1067
  @torch.no_grad()
1002
1068
  @replace_example_docstring(EXAMPLE_DOC_STRING)
1003
1069
  def __call__(
@@ -1018,16 +1084,18 @@ class StableDiffusionControlNetInpaintPipeline(
1018
1084
  latents: Optional[torch.FloatTensor] = None,
1019
1085
  prompt_embeds: Optional[torch.FloatTensor] = None,
1020
1086
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1087
+ ip_adapter_image: Optional[PipelineImageInput] = None,
1021
1088
  output_type: Optional[str] = "pil",
1022
1089
  return_dict: bool = True,
1023
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1024
- callback_steps: int = 1,
1025
1090
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1026
1091
  controlnet_conditioning_scale: Union[float, List[float]] = 0.5,
1027
1092
  guess_mode: bool = False,
1028
1093
  control_guidance_start: Union[float, List[float]] = 0.0,
1029
1094
  control_guidance_end: Union[float, List[float]] = 1.0,
1030
1095
  clip_skip: Optional[int] = None,
1096
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
1097
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1098
+ **kwargs,
1031
1099
  ):
1032
1100
  r"""
1033
1101
  The call function to the pipeline for generation.
@@ -1095,17 +1163,12 @@ class StableDiffusionControlNetInpaintPipeline(
1095
1163
  negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1096
1164
  Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
1097
1165
  not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
1166
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1098
1167
  output_type (`str`, *optional*, defaults to `"pil"`):
1099
1168
  The output format of the generated image. Choose between `PIL.Image` or `np.array`.
1100
1169
  return_dict (`bool`, *optional*, defaults to `True`):
1101
1170
  Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1102
1171
  plain tuple.
1103
- callback (`Callable`, *optional*):
1104
- A function that calls every `callback_steps` steps during inference. The function is called with the
1105
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1106
- callback_steps (`int`, *optional*, defaults to 1):
1107
- The frequency at which the `callback` function is called. If not specified, the callback is called at
1108
- every step.
1109
1172
  cross_attention_kwargs (`dict`, *optional*):
1110
1173
  A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
1111
1174
  [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
@@ -1123,6 +1186,15 @@ class StableDiffusionControlNetInpaintPipeline(
1123
1186
  clip_skip (`int`, *optional*):
1124
1187
  Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1125
1188
  the output of the pre-final layer will be used for computing the prompt embeddings.
1189
+ callback_on_step_end (`Callable`, *optional*):
1190
+ A function that calls at the end of each denoising steps during the inference. The function is called
1191
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1192
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1193
+ `callback_on_step_end_tensor_inputs`.
1194
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1195
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1196
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1197
+ `._callback_tensor_inputs` attribute of your pipeine class.
1126
1198
 
1127
1199
  Examples:
1128
1200
 
@@ -1133,6 +1205,23 @@ class StableDiffusionControlNetInpaintPipeline(
1133
1205
  second element is a list of `bool`s indicating whether the corresponding generated image contains
1134
1206
  "not-safe-for-work" (nsfw) content.
1135
1207
  """
1208
+
1209
+ callback = kwargs.pop("callback", None)
1210
+ callback_steps = kwargs.pop("callback_steps", None)
1211
+
1212
+ if callback is not None:
1213
+ deprecate(
1214
+ "callback",
1215
+ "1.0.0",
1216
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1217
+ )
1218
+ if callback_steps is not None:
1219
+ deprecate(
1220
+ "callback_steps",
1221
+ "1.0.0",
1222
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1223
+ )
1224
+
1136
1225
  controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1137
1226
 
1138
1227
  # align format for control guidance
@@ -1142,9 +1231,10 @@ class StableDiffusionControlNetInpaintPipeline(
1142
1231
  control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1143
1232
  elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1144
1233
  mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1145
- control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [
1146
- control_guidance_end
1147
- ]
1234
+ control_guidance_start, control_guidance_end = (
1235
+ mult * [control_guidance_start],
1236
+ mult * [control_guidance_end],
1237
+ )
1148
1238
 
1149
1239
  # 1. Check inputs. Raise error if not correct
1150
1240
  self.check_inputs(
@@ -1159,8 +1249,13 @@ class StableDiffusionControlNetInpaintPipeline(
1159
1249
  controlnet_conditioning_scale,
1160
1250
  control_guidance_start,
1161
1251
  control_guidance_end,
1252
+ callback_on_step_end_tensor_inputs,
1162
1253
  )
1163
1254
 
1255
+ self._guidance_scale = guidance_scale
1256
+ self._clip_skip = clip_skip
1257
+ self._cross_attention_kwargs = cross_attention_kwargs
1258
+
1164
1259
  # 2. Define call parameters
1165
1260
  if prompt is not None and isinstance(prompt, str):
1166
1261
  batch_size = 1
@@ -1170,10 +1265,6 @@ class StableDiffusionControlNetInpaintPipeline(
1170
1265
  batch_size = prompt_embeds.shape[0]
1171
1266
 
1172
1267
  device = self._execution_device
1173
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1174
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1175
- # corresponds to doing no classifier free guidance.
1176
- do_classifier_free_guidance = guidance_scale > 1.0
1177
1268
 
1178
1269
  if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1179
1270
  controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
@@ -1187,25 +1278,33 @@ class StableDiffusionControlNetInpaintPipeline(
1187
1278
 
1188
1279
  # 3. Encode input prompt
1189
1280
  text_encoder_lora_scale = (
1190
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
1281
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1191
1282
  )
1192
1283
  prompt_embeds, negative_prompt_embeds = self.encode_prompt(
1193
1284
  prompt,
1194
1285
  device,
1195
1286
  num_images_per_prompt,
1196
- do_classifier_free_guidance,
1287
+ self.do_classifier_free_guidance,
1197
1288
  negative_prompt,
1198
1289
  prompt_embeds=prompt_embeds,
1199
1290
  negative_prompt_embeds=negative_prompt_embeds,
1200
1291
  lora_scale=text_encoder_lora_scale,
1201
- clip_skip=clip_skip,
1292
+ clip_skip=self.clip_skip,
1202
1293
  )
1203
1294
  # For classifier free guidance, we need to do two forward passes.
1204
1295
  # Here we concatenate the unconditional and text embeddings into a single batch
1205
1296
  # to avoid doing two forward passes
1206
- if do_classifier_free_guidance:
1297
+ if self.do_classifier_free_guidance:
1207
1298
  prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
1208
1299
 
1300
+ if ip_adapter_image is not None:
1301
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
1302
+ image_embeds, negative_image_embeds = self.encode_image(
1303
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
1304
+ )
1305
+ if self.do_classifier_free_guidance:
1306
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
1307
+
1209
1308
  # 4. Prepare image
1210
1309
  if isinstance(controlnet, ControlNetModel):
1211
1310
  control_image = self.prepare_control_image(
@@ -1216,7 +1315,7 @@ class StableDiffusionControlNetInpaintPipeline(
1216
1315
  num_images_per_prompt=num_images_per_prompt,
1217
1316
  device=device,
1218
1317
  dtype=controlnet.dtype,
1219
- do_classifier_free_guidance=do_classifier_free_guidance,
1318
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1220
1319
  guess_mode=guess_mode,
1221
1320
  )
1222
1321
  elif isinstance(controlnet, MultiControlNetModel):
@@ -1231,7 +1330,7 @@ class StableDiffusionControlNetInpaintPipeline(
1231
1330
  num_images_per_prompt=num_images_per_prompt,
1232
1331
  device=device,
1233
1332
  dtype=controlnet.dtype,
1234
- do_classifier_free_guidance=do_classifier_free_guidance,
1333
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1235
1334
  guess_mode=guess_mode,
1236
1335
  )
1237
1336
 
@@ -1241,7 +1340,7 @@ class StableDiffusionControlNetInpaintPipeline(
1241
1340
  else:
1242
1341
  assert False
1243
1342
 
1244
- # 4. Preprocess mask and image - resizes image and mask w.r.t height and width
1343
+ # 4.1 Preprocess mask and image - resizes image and mask w.r.t height and width
1245
1344
  init_image = self.image_processor.preprocess(image, height=height, width=width)
1246
1345
  init_image = init_image.to(dtype=torch.float32)
1247
1346
 
@@ -1259,6 +1358,7 @@ class StableDiffusionControlNetInpaintPipeline(
1259
1358
  latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1260
1359
  # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
1261
1360
  is_strength_max = strength == 1.0
1361
+ self._num_timesteps = len(timesteps)
1262
1362
 
1263
1363
  # 6. Prepare latent variables
1264
1364
  num_channels_latents = self.vae.config.latent_channels
@@ -1295,13 +1395,16 @@ class StableDiffusionControlNetInpaintPipeline(
1295
1395
  prompt_embeds.dtype,
1296
1396
  device,
1297
1397
  generator,
1298
- do_classifier_free_guidance,
1398
+ self.do_classifier_free_guidance,
1299
1399
  )
1300
1400
 
1301
1401
  # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1302
1402
  extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1303
1403
 
1304
- # 7.1 Create tensor stating which controlnets to keep
1404
+ # 7.1 Add image embeds for IP-Adapter
1405
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
1406
+
1407
+ # 7.2 Create tensor stating which controlnets to keep
1305
1408
  controlnet_keep = []
1306
1409
  for i in range(len(timesteps)):
1307
1410
  keeps = [
@@ -1315,11 +1418,11 @@ class StableDiffusionControlNetInpaintPipeline(
1315
1418
  with self.progress_bar(total=num_inference_steps) as progress_bar:
1316
1419
  for i, t in enumerate(timesteps):
1317
1420
  # expand the latents if we are doing classifier free guidance
1318
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1421
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1319
1422
  latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1320
1423
 
1321
1424
  # controlnet(s) inference
1322
- if guess_mode and do_classifier_free_guidance:
1425
+ if guess_mode and self.do_classifier_free_guidance:
1323
1426
  # Infer ControlNet only for the conditional batch.
1324
1427
  control_model_input = latents
1325
1428
  control_model_input = self.scheduler.scale_model_input(control_model_input, t)
@@ -1346,7 +1449,7 @@ class StableDiffusionControlNetInpaintPipeline(
1346
1449
  return_dict=False,
1347
1450
  )
1348
1451
 
1349
- if guess_mode and do_classifier_free_guidance:
1452
+ if guess_mode and self.do_classifier_free_guidance:
1350
1453
  # Infered ControlNet only for the conditional batch.
1351
1454
  # To apply the output of ControlNet to both the unconditional and conditional batches,
1352
1455
  # add 0 to the unconditional batch to keep it unchanged.
@@ -1361,14 +1464,15 @@ class StableDiffusionControlNetInpaintPipeline(
1361
1464
  latent_model_input,
1362
1465
  t,
1363
1466
  encoder_hidden_states=prompt_embeds,
1364
- cross_attention_kwargs=cross_attention_kwargs,
1467
+ cross_attention_kwargs=self.cross_attention_kwargs,
1365
1468
  down_block_additional_residuals=down_block_res_samples,
1366
1469
  mid_block_additional_residual=mid_block_res_sample,
1470
+ added_cond_kwargs=added_cond_kwargs,
1367
1471
  return_dict=False,
1368
1472
  )[0]
1369
1473
 
1370
1474
  # perform guidance
1371
- if do_classifier_free_guidance:
1475
+ if self.do_classifier_free_guidance:
1372
1476
  noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1373
1477
  noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1374
1478
 
@@ -1377,7 +1481,7 @@ class StableDiffusionControlNetInpaintPipeline(
1377
1481
 
1378
1482
  if num_channels_unet == 4:
1379
1483
  init_latents_proper = image_latents
1380
- if do_classifier_free_guidance:
1484
+ if self.do_classifier_free_guidance:
1381
1485
  init_mask, _ = mask.chunk(2)
1382
1486
  else:
1383
1487
  init_mask = mask
@@ -1390,6 +1494,16 @@ class StableDiffusionControlNetInpaintPipeline(
1390
1494
 
1391
1495
  latents = (1 - init_mask) * init_latents_proper + init_mask * latents
1392
1496
 
1497
+ if callback_on_step_end is not None:
1498
+ callback_kwargs = {}
1499
+ for k in callback_on_step_end_tensor_inputs:
1500
+ callback_kwargs[k] = locals()[k]
1501
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1502
+
1503
+ latents = callback_outputs.pop("latents", latents)
1504
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1505
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1506
+
1393
1507
  # call the callback, if provided
1394
1508
  if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1395
1509
  progress_bar.update()