diffusers 0.24.0__py3-none-any.whl → 0.25.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (174) hide show
  1. diffusers/__init__.py +11 -1
  2. diffusers/commands/fp16_safetensors.py +10 -11
  3. diffusers/configuration_utils.py +12 -8
  4. diffusers/dependency_versions_table.py +2 -1
  5. diffusers/experimental/rl/value_guided_sampling.py +1 -1
  6. diffusers/image_processor.py +286 -46
  7. diffusers/loaders/ip_adapter.py +11 -9
  8. diffusers/loaders/lora.py +198 -60
  9. diffusers/loaders/single_file.py +24 -18
  10. diffusers/loaders/textual_inversion.py +10 -14
  11. diffusers/loaders/unet.py +130 -37
  12. diffusers/models/__init__.py +18 -12
  13. diffusers/models/activations.py +9 -6
  14. diffusers/models/attention.py +137 -16
  15. diffusers/models/attention_processor.py +133 -46
  16. diffusers/models/autoencoders/__init__.py +5 -0
  17. diffusers/models/{autoencoder_asym_kl.py → autoencoders/autoencoder_asym_kl.py} +4 -4
  18. diffusers/models/{autoencoder_kl.py → autoencoders/autoencoder_kl.py} +45 -6
  19. diffusers/models/{autoencoder_kl_temporal_decoder.py → autoencoders/autoencoder_kl_temporal_decoder.py} +8 -8
  20. diffusers/models/{autoencoder_tiny.py → autoencoders/autoencoder_tiny.py} +4 -4
  21. diffusers/models/{consistency_decoder_vae.py → autoencoders/consistency_decoder_vae.py} +14 -14
  22. diffusers/models/{vae.py → autoencoders/vae.py} +9 -5
  23. diffusers/models/downsampling.py +338 -0
  24. diffusers/models/embeddings.py +112 -29
  25. diffusers/models/modeling_flax_utils.py +12 -7
  26. diffusers/models/modeling_utils.py +10 -10
  27. diffusers/models/normalization.py +108 -2
  28. diffusers/models/resnet.py +15 -699
  29. diffusers/models/transformer_2d.py +2 -2
  30. diffusers/models/unet_2d_condition.py +37 -0
  31. diffusers/models/{unet_kandi3.py → unet_kandinsky3.py} +105 -159
  32. diffusers/models/upsampling.py +454 -0
  33. diffusers/models/uvit_2d.py +471 -0
  34. diffusers/models/vq_model.py +9 -2
  35. diffusers/pipelines/__init__.py +81 -73
  36. diffusers/pipelines/amused/__init__.py +62 -0
  37. diffusers/pipelines/amused/pipeline_amused.py +328 -0
  38. diffusers/pipelines/amused/pipeline_amused_img2img.py +347 -0
  39. diffusers/pipelines/amused/pipeline_amused_inpaint.py +378 -0
  40. diffusers/pipelines/animatediff/pipeline_animatediff.py +38 -10
  41. diffusers/pipelines/auto_pipeline.py +17 -13
  42. diffusers/pipelines/controlnet/pipeline_controlnet.py +27 -10
  43. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +47 -5
  44. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +25 -8
  45. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +4 -6
  46. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +26 -10
  47. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +4 -3
  48. diffusers/pipelines/deprecated/__init__.py +153 -0
  49. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/__init__.py +3 -3
  50. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_alt_diffusion.py +91 -18
  51. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_alt_diffusion_img2img.py +91 -18
  52. diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/pipeline_output.py +1 -1
  53. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/__init__.py +1 -1
  54. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/mel.py +2 -2
  55. diffusers/pipelines/{audio_diffusion → deprecated/audio_diffusion}/pipeline_audio_diffusion.py +4 -4
  56. diffusers/pipelines/{latent_diffusion_uncond → deprecated/latent_diffusion_uncond}/__init__.py +1 -1
  57. diffusers/pipelines/{latent_diffusion_uncond → deprecated/latent_diffusion_uncond}/pipeline_latent_diffusion_uncond.py +4 -4
  58. diffusers/pipelines/{pndm → deprecated/pndm}/__init__.py +1 -1
  59. diffusers/pipelines/{pndm → deprecated/pndm}/pipeline_pndm.py +4 -4
  60. diffusers/pipelines/{repaint → deprecated/repaint}/__init__.py +1 -1
  61. diffusers/pipelines/{repaint → deprecated/repaint}/pipeline_repaint.py +5 -5
  62. diffusers/pipelines/{score_sde_ve → deprecated/score_sde_ve}/__init__.py +1 -1
  63. diffusers/pipelines/{score_sde_ve → deprecated/score_sde_ve}/pipeline_score_sde_ve.py +4 -4
  64. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/__init__.py +6 -6
  65. diffusers/pipelines/{spectrogram_diffusion/continous_encoder.py → deprecated/spectrogram_diffusion/continuous_encoder.py} +2 -2
  66. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/midi_utils.py +1 -1
  67. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/notes_encoder.py +2 -2
  68. diffusers/pipelines/{spectrogram_diffusion → deprecated/spectrogram_diffusion}/pipeline_spectrogram_diffusion.py +7 -7
  69. diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py +55 -0
  70. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_cycle_diffusion.py +16 -11
  71. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_onnx_stable_diffusion_inpaint_legacy.py +6 -6
  72. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_inpaint_legacy.py +11 -11
  73. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_model_editing.py +16 -11
  74. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_paradigms.py +10 -10
  75. diffusers/pipelines/{stable_diffusion → deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_pix2pix_zero.py +13 -13
  76. diffusers/pipelines/{stochastic_karras_ve → deprecated/stochastic_karras_ve}/__init__.py +1 -1
  77. diffusers/pipelines/{stochastic_karras_ve → deprecated/stochastic_karras_ve}/pipeline_stochastic_karras_ve.py +4 -4
  78. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/__init__.py +3 -3
  79. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/modeling_text_unet.py +54 -11
  80. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion.py +4 -4
  81. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_dual_guided.py +6 -6
  82. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_image_variation.py +6 -6
  83. diffusers/pipelines/{versatile_diffusion → deprecated/versatile_diffusion}/pipeline_versatile_diffusion_text_to_image.py +6 -6
  84. diffusers/pipelines/{vq_diffusion → deprecated/vq_diffusion}/__init__.py +3 -3
  85. diffusers/pipelines/{vq_diffusion → deprecated/vq_diffusion}/pipeline_vq_diffusion.py +5 -5
  86. diffusers/pipelines/kandinsky3/__init__.py +4 -4
  87. diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +98 -0
  88. diffusers/pipelines/kandinsky3/{kandinsky3_pipeline.py → pipeline_kandinsky3.py} +172 -35
  89. diffusers/pipelines/kandinsky3/{kandinsky3img2img_pipeline.py → pipeline_kandinsky3_img2img.py} +228 -34
  90. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +46 -5
  91. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +47 -6
  92. diffusers/pipelines/onnx_utils.py +8 -5
  93. diffusers/pipelines/pipeline_flax_utils.py +7 -6
  94. diffusers/pipelines/pipeline_utils.py +30 -29
  95. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +51 -2
  96. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +3 -3
  97. diffusers/pipelines/stable_diffusion/__init__.py +1 -72
  98. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +67 -75
  99. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +92 -8
  100. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +92 -8
  101. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +138 -10
  102. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +57 -7
  103. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +3 -0
  104. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +6 -0
  105. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +5 -0
  106. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +5 -0
  107. diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py +48 -0
  108. diffusers/pipelines/{stable_diffusion → stable_diffusion_attend_and_excite}/pipeline_stable_diffusion_attend_and_excite.py +5 -2
  109. diffusers/pipelines/stable_diffusion_diffedit/__init__.py +48 -0
  110. diffusers/pipelines/{stable_diffusion → stable_diffusion_diffedit}/pipeline_stable_diffusion_diffedit.py +2 -3
  111. diffusers/pipelines/stable_diffusion_gligen/__init__.py +50 -0
  112. diffusers/pipelines/{stable_diffusion → stable_diffusion_gligen}/pipeline_stable_diffusion_gligen.py +2 -2
  113. diffusers/pipelines/{stable_diffusion → stable_diffusion_gligen}/pipeline_stable_diffusion_gligen_text_image.py +3 -3
  114. diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py +60 -0
  115. diffusers/pipelines/{stable_diffusion → stable_diffusion_k_diffusion}/pipeline_stable_diffusion_k_diffusion.py +6 -1
  116. diffusers/pipelines/stable_diffusion_ldm3d/__init__.py +48 -0
  117. diffusers/pipelines/{stable_diffusion → stable_diffusion_ldm3d}/pipeline_stable_diffusion_ldm3d.py +50 -7
  118. diffusers/pipelines/stable_diffusion_panorama/__init__.py +48 -0
  119. diffusers/pipelines/{stable_diffusion → stable_diffusion_panorama}/pipeline_stable_diffusion_panorama.py +56 -8
  120. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +58 -6
  121. diffusers/pipelines/stable_diffusion_sag/__init__.py +48 -0
  122. diffusers/pipelines/{stable_diffusion → stable_diffusion_sag}/pipeline_stable_diffusion_sag.py +67 -10
  123. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +97 -15
  124. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +98 -14
  125. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +97 -14
  126. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +7 -5
  127. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +12 -9
  128. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +6 -0
  129. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +5 -0
  130. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +5 -0
  131. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +331 -9
  132. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +468 -9
  133. diffusers/pipelines/unclip/pipeline_unclip.py +2 -1
  134. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +1 -0
  135. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +1 -1
  136. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +4 -0
  137. diffusers/schedulers/__init__.py +2 -0
  138. diffusers/schedulers/scheduling_amused.py +162 -0
  139. diffusers/schedulers/scheduling_consistency_models.py +2 -0
  140. diffusers/schedulers/scheduling_ddim_inverse.py +1 -4
  141. diffusers/schedulers/scheduling_ddpm.py +46 -0
  142. diffusers/schedulers/scheduling_ddpm_parallel.py +46 -0
  143. diffusers/schedulers/scheduling_deis_multistep.py +13 -1
  144. diffusers/schedulers/scheduling_dpmsolver_multistep.py +13 -1
  145. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +13 -1
  146. diffusers/schedulers/scheduling_dpmsolver_sde.py +2 -0
  147. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +13 -1
  148. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +58 -0
  149. diffusers/schedulers/scheduling_euler_discrete.py +62 -3
  150. diffusers/schedulers/scheduling_heun_discrete.py +2 -0
  151. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +2 -0
  152. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +2 -0
  153. diffusers/schedulers/scheduling_lms_discrete.py +2 -0
  154. diffusers/schedulers/scheduling_unipc_multistep.py +13 -1
  155. diffusers/schedulers/scheduling_utils.py +3 -1
  156. diffusers/schedulers/scheduling_utils_flax.py +3 -1
  157. diffusers/training_utils.py +1 -1
  158. diffusers/utils/__init__.py +0 -2
  159. diffusers/utils/constants.py +2 -5
  160. diffusers/utils/dummy_pt_objects.py +30 -0
  161. diffusers/utils/dummy_torch_and_transformers_objects.py +45 -0
  162. diffusers/utils/dynamic_modules_utils.py +14 -18
  163. diffusers/utils/hub_utils.py +24 -36
  164. diffusers/utils/logging.py +1 -1
  165. diffusers/utils/state_dict_utils.py +8 -0
  166. diffusers/utils/testing_utils.py +199 -1
  167. diffusers/utils/torch_utils.py +3 -3
  168. {diffusers-0.24.0.dist-info → diffusers-0.25.0.dist-info}/METADATA +54 -53
  169. {diffusers-0.24.0.dist-info → diffusers-0.25.0.dist-info}/RECORD +174 -155
  170. {diffusers-0.24.0.dist-info → diffusers-0.25.0.dist-info}/WHEEL +1 -1
  171. {diffusers-0.24.0.dist-info → diffusers-0.25.0.dist-info}/entry_points.txt +0 -1
  172. /diffusers/pipelines/{alt_diffusion → deprecated/alt_diffusion}/modeling_roberta_series.py +0 -0
  173. {diffusers-0.24.0.dist-info → diffusers-0.25.0.dist-info}/LICENSE +0 -0
  174. {diffusers-0.24.0.dist-info → diffusers-0.25.0.dist-info}/top_level.txt +0 -0
@@ -36,8 +36,8 @@ from ...utils import (
36
36
  )
37
37
  from ...utils.torch_utils import randn_tensor
38
38
  from ..pipeline_utils import DiffusionPipeline
39
- from . import StableDiffusionPipelineOutput
40
- from .safety_checker import StableDiffusionSafetyChecker
39
+ from ..stable_diffusion import StableDiffusionPipelineOutput
40
+ from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
41
41
 
42
42
 
43
43
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@@ -35,9 +35,9 @@ from ...schedulers import KarrasDiffusionSchedulers
35
35
  from ...utils import USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers
36
36
  from ...utils.torch_utils import randn_tensor
37
37
  from ..pipeline_utils import DiffusionPipeline
38
- from . import StableDiffusionPipelineOutput
39
- from .clip_image_project_model import CLIPImageProjection
40
- from .safety_checker import StableDiffusionSafetyChecker
38
+ from ..stable_diffusion import StableDiffusionPipelineOutput
39
+ from ..stable_diffusion.clip_image_project_model import CLIPImageProjection
40
+ from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
41
41
 
42
42
 
43
43
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@@ -0,0 +1,60 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_k_diffusion_available,
9
+ is_k_diffusion_version,
10
+ is_torch_available,
11
+ is_transformers_available,
12
+ )
13
+
14
+
15
+ _dummy_objects = {}
16
+ _import_structure = {}
17
+
18
+
19
+ try:
20
+ if not (
21
+ is_transformers_available()
22
+ and is_torch_available()
23
+ and is_k_diffusion_available()
24
+ and is_k_diffusion_version(">=", "0.0.12")
25
+ ):
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ from ...utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403
29
+
30
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_k_diffusion_objects))
31
+ else:
32
+ _import_structure["pipeline_stable_diffusion_k_diffusion"] = ["StableDiffusionKDiffusionPipeline"]
33
+
34
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
35
+ try:
36
+ if not (
37
+ is_transformers_available()
38
+ and is_torch_available()
39
+ and is_k_diffusion_available()
40
+ and is_k_diffusion_version(">=", "0.0.12")
41
+ ):
42
+ raise OptionalDependencyNotAvailable()
43
+
44
+ except OptionalDependencyNotAvailable:
45
+ from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import *
46
+ else:
47
+ from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
48
+
49
+ else:
50
+ import sys
51
+
52
+ sys.modules[__name__] = _LazyModule(
53
+ __name__,
54
+ globals()["__file__"],
55
+ _import_structure,
56
+ module_spec=__spec__,
57
+ )
58
+
59
+ for name, value in _dummy_objects.items():
60
+ setattr(sys.modules[__name__], name, value)
@@ -27,7 +27,7 @@ from ...schedulers import LMSDiscreteScheduler
27
27
  from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers
28
28
  from ...utils.torch_utils import randn_tensor
29
29
  from ..pipeline_utils import DiffusionPipeline
30
- from . import StableDiffusionPipelineOutput
30
+ from ..stable_diffusion import StableDiffusionPipelineOutput
31
31
 
32
32
 
33
33
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@@ -54,6 +54,11 @@ class StableDiffusionKDiffusionPipeline(DiffusionPipeline, TextualInversionLoade
54
54
  This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
55
55
  library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
56
56
 
57
+ The pipeline also inherits the following loading methods:
58
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
59
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
60
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
61
+
57
62
  <Tip warning={true}>
58
63
 
59
64
  This is an experimental pipeline and is likely to change in the future.
@@ -0,0 +1,48 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {}
15
+
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available()):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["pipeline_stable_diffusion_ldm3d"] = ["StableDiffusionLDM3DPipeline"]
26
+
27
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
28
+ try:
29
+ if not (is_transformers_available() and is_torch_available()):
30
+ raise OptionalDependencyNotAvailable()
31
+
32
+ except OptionalDependencyNotAvailable:
33
+ from ...utils.dummy_torch_and_transformers_objects import *
34
+ else:
35
+ from .pipeline_stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline
36
+
37
+ else:
38
+ import sys
39
+
40
+ sys.modules[__name__] = _LazyModule(
41
+ __name__,
42
+ globals()["__file__"],
43
+ _import_structure,
44
+ module_spec=__spec__,
45
+ )
46
+
47
+ for name, value in _dummy_objects.items():
48
+ setattr(sys.modules[__name__], name, value)
@@ -19,11 +19,11 @@ from typing import Any, Callable, Dict, List, Optional, Union
19
19
  import numpy as np
20
20
  import PIL.Image
21
21
  import torch
22
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
22
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
23
23
 
24
- from ...image_processor import VaeImageProcessorLDM3D
25
- from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
- from ...models import AutoencoderKL, UNet2DConditionModel
24
+ from ...image_processor import PipelineImageInput, VaeImageProcessorLDM3D
25
+ from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
27
27
  from ...models.lora import adjust_lora_scale_text_encoder
28
28
  from ...schedulers import KarrasDiffusionSchedulers
29
29
  from ...utils import (
@@ -37,7 +37,7 @@ from ...utils import (
37
37
  )
38
38
  from ...utils.torch_utils import randn_tensor
39
39
  from ..pipeline_utils import DiffusionPipeline
40
- from .safety_checker import StableDiffusionSafetyChecker
40
+ from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
41
41
 
42
42
 
43
43
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@@ -82,7 +82,7 @@ class LDM3DPipelineOutput(BaseOutput):
82
82
 
83
83
 
84
84
  class StableDiffusionLDM3DPipeline(
85
- DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
85
+ DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin, FromSingleFileMixin
86
86
  ):
87
87
  r"""
88
88
  Pipeline for text-to-image and 3D generation using LDM3D.
@@ -95,6 +95,7 @@ class StableDiffusionLDM3DPipeline(
95
95
  - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
96
96
  - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
97
97
  - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
98
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
98
99
 
99
100
  Args:
100
101
  vae ([`AutoencoderKL`]):
@@ -117,7 +118,7 @@ class StableDiffusionLDM3DPipeline(
117
118
  """
118
119
 
119
120
  model_cpu_offload_seq = "text_encoder->unet->vae"
120
- _optional_components = ["safety_checker", "feature_extractor"]
121
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
121
122
  _exclude_from_cpu_offload = ["safety_checker"]
122
123
 
123
124
  def __init__(
@@ -129,6 +130,7 @@ class StableDiffusionLDM3DPipeline(
129
130
  scheduler: KarrasDiffusionSchedulers,
130
131
  safety_checker: StableDiffusionSafetyChecker,
131
132
  feature_extractor: CLIPImageProcessor,
133
+ image_encoder: Optional[CLIPVisionModelWithProjection],
132
134
  requires_safety_checker: bool = True,
133
135
  ):
134
136
  super().__init__()
@@ -157,6 +159,7 @@ class StableDiffusionLDM3DPipeline(
157
159
  scheduler=scheduler,
158
160
  safety_checker=safety_checker,
159
161
  feature_extractor=feature_extractor,
162
+ image_encoder=image_encoder,
160
163
  )
161
164
  self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
162
165
  self.image_processor = VaeImageProcessorLDM3D(vae_scale_factor=self.vae_scale_factor)
@@ -410,6 +413,31 @@ class StableDiffusionLDM3DPipeline(
410
413
 
411
414
  return prompt_embeds, negative_prompt_embeds
412
415
 
416
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
417
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
418
+ dtype = next(self.image_encoder.parameters()).dtype
419
+
420
+ if not isinstance(image, torch.Tensor):
421
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
422
+
423
+ image = image.to(device=device, dtype=dtype)
424
+ if output_hidden_states:
425
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
426
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
427
+ uncond_image_enc_hidden_states = self.image_encoder(
428
+ torch.zeros_like(image), output_hidden_states=True
429
+ ).hidden_states[-2]
430
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
431
+ num_images_per_prompt, dim=0
432
+ )
433
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
434
+ else:
435
+ image_embeds = self.image_encoder(image).image_embeds
436
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
437
+ uncond_image_embeds = torch.zeros_like(image_embeds)
438
+
439
+ return image_embeds, uncond_image_embeds
440
+
413
441
  def run_safety_checker(self, image, device, dtype):
414
442
  if self.safety_checker is None:
415
443
  has_nsfw_concept = None
@@ -529,6 +557,7 @@ class StableDiffusionLDM3DPipeline(
529
557
  latents: Optional[torch.FloatTensor] = None,
530
558
  prompt_embeds: Optional[torch.FloatTensor] = None,
531
559
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
560
+ ip_adapter_image: Optional[PipelineImageInput] = None,
532
561
  output_type: Optional[str] = "pil",
533
562
  return_dict: bool = True,
534
563
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
@@ -573,6 +602,8 @@ class StableDiffusionLDM3DPipeline(
573
602
  negative_prompt_embeds (`torch.FloatTensor`, *optional*):
574
603
  Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
575
604
  not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
605
+ ip_adapter_image: (`PipelineImageInput`, *optional*):
606
+ Optional image input to work with IP Adapters.
576
607
  output_type (`str`, *optional*, defaults to `"pil"`):
577
608
  The output format of the generated image. Choose between `PIL.Image` or `np.array`.
578
609
  return_dict (`bool`, *optional*, defaults to `True`):
@@ -622,6 +653,14 @@ class StableDiffusionLDM3DPipeline(
622
653
  # corresponds to doing no classifier free guidance.
623
654
  do_classifier_free_guidance = guidance_scale > 1.0
624
655
 
656
+ if ip_adapter_image is not None:
657
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
658
+ image_embeds, negative_image_embeds = self.encode_image(
659
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
660
+ )
661
+ if do_classifier_free_guidance:
662
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
663
+
625
664
  # 3. Encode input prompt
626
665
  prompt_embeds, negative_prompt_embeds = self.encode_prompt(
627
666
  prompt,
@@ -659,6 +698,9 @@ class StableDiffusionLDM3DPipeline(
659
698
  # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
660
699
  extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
661
700
 
701
+ # 6.1 Add image embeds for IP-Adapter
702
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
703
+
662
704
  # 7. Denoising loop
663
705
  num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
664
706
  with self.progress_bar(total=num_inference_steps) as progress_bar:
@@ -673,6 +715,7 @@ class StableDiffusionLDM3DPipeline(
673
715
  t,
674
716
  encoder_hidden_states=prompt_embeds,
675
717
  cross_attention_kwargs=cross_attention_kwargs,
718
+ added_cond_kwargs=added_cond_kwargs,
676
719
  return_dict=False,
677
720
  )[0]
678
721
 
@@ -0,0 +1,48 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {}
15
+
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available()):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["pipeline_stable_diffusion_panorama"] = ["StableDiffusionPanoramaPipeline"]
26
+
27
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
28
+ try:
29
+ if not (is_transformers_available() and is_torch_available()):
30
+ raise OptionalDependencyNotAvailable()
31
+
32
+ except OptionalDependencyNotAvailable:
33
+ from ...utils.dummy_torch_and_transformers_objects import *
34
+ else:
35
+ from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
36
+
37
+ else:
38
+ import sys
39
+
40
+ sys.modules[__name__] = _LazyModule(
41
+ __name__,
42
+ globals()["__file__"],
43
+ _import_structure,
44
+ module_spec=__spec__,
45
+ )
46
+
47
+ for name, value in _dummy_objects.items():
48
+ setattr(sys.modules[__name__], name, value)
@@ -16,11 +16,11 @@ import inspect
16
16
  from typing import Any, Callable, Dict, List, Optional, Union
17
17
 
18
18
  import torch
19
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
19
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
20
20
 
21
- from ...image_processor import VaeImageProcessor
22
- from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
23
- from ...models import AutoencoderKL, UNet2DConditionModel
21
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
22
+ from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
23
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
24
24
  from ...models.lora import adjust_lora_scale_text_encoder
25
25
  from ...schedulers import DDIMScheduler
26
26
  from ...utils import (
@@ -33,8 +33,8 @@ from ...utils import (
33
33
  )
34
34
  from ...utils.torch_utils import randn_tensor
35
35
  from ..pipeline_utils import DiffusionPipeline
36
- from . import StableDiffusionPipelineOutput
37
- from .safety_checker import StableDiffusionSafetyChecker
36
+ from ..stable_diffusion import StableDiffusionPipelineOutput
37
+ from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
38
38
 
39
39
 
40
40
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@@ -59,13 +59,19 @@ EXAMPLE_DOC_STRING = """
59
59
  """
60
60
 
61
61
 
62
- class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
62
+ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin):
63
63
  r"""
64
64
  Pipeline for text-to-image generation using MultiDiffusion.
65
65
 
66
66
  This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
67
67
  implemented for all pipelines (downloading, saving, running on a particular device, etc.).
68
68
 
69
+ The pipeline also inherits the following loading methods:
70
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
71
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
72
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
73
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
74
+
69
75
  Args:
70
76
  vae ([`AutoencoderKL`]):
71
77
  Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
@@ -87,7 +93,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
87
93
  """
88
94
 
89
95
  model_cpu_offload_seq = "text_encoder->unet->vae"
90
- _optional_components = ["safety_checker", "feature_extractor"]
96
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
91
97
  _exclude_from_cpu_offload = ["safety_checker"]
92
98
 
93
99
  def __init__(
@@ -99,6 +105,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
99
105
  scheduler: DDIMScheduler,
100
106
  safety_checker: StableDiffusionSafetyChecker,
101
107
  feature_extractor: CLIPImageProcessor,
108
+ image_encoder: Optional[CLIPVisionModelWithProjection] = None,
102
109
  requires_safety_checker: bool = True,
103
110
  ):
104
111
  super().__init__()
@@ -127,6 +134,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
127
134
  scheduler=scheduler,
128
135
  safety_checker=safety_checker,
129
136
  feature_extractor=feature_extractor,
137
+ image_encoder=image_encoder,
130
138
  )
131
139
  self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
132
140
  self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
@@ -363,6 +371,31 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
363
371
 
364
372
  return prompt_embeds, negative_prompt_embeds
365
373
 
374
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
375
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
376
+ dtype = next(self.image_encoder.parameters()).dtype
377
+
378
+ if not isinstance(image, torch.Tensor):
379
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
380
+
381
+ image = image.to(device=device, dtype=dtype)
382
+ if output_hidden_states:
383
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
384
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
385
+ uncond_image_enc_hidden_states = self.image_encoder(
386
+ torch.zeros_like(image), output_hidden_states=True
387
+ ).hidden_states[-2]
388
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
389
+ num_images_per_prompt, dim=0
390
+ )
391
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
392
+ else:
393
+ image_embeds = self.image_encoder(image).image_embeds
394
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
395
+ uncond_image_embeds = torch.zeros_like(image_embeds)
396
+
397
+ return image_embeds, uncond_image_embeds
398
+
366
399
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
367
400
  def run_safety_checker(self, image, device, dtype):
368
401
  if self.safety_checker is None:
@@ -529,6 +562,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
529
562
  latents: Optional[torch.FloatTensor] = None,
530
563
  prompt_embeds: Optional[torch.FloatTensor] = None,
531
564
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
565
+ ip_adapter_image: Optional[PipelineImageInput] = None,
532
566
  output_type: Optional[str] = "pil",
533
567
  return_dict: bool = True,
534
568
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
@@ -578,6 +612,8 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
578
612
  negative_prompt_embeds (`torch.FloatTensor`, *optional*):
579
613
  Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
580
614
  not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
615
+ ip_adapter_image: (`PipelineImageInput`, *optional*):
616
+ Optional image input to work with IP Adapters.
581
617
  output_type (`str`, *optional*, defaults to `"pil"`):
582
618
  The output format of the generated image. Choose between `PIL.Image` or `np.array`.
583
619
  return_dict (`bool`, *optional*, defaults to `True`):
@@ -632,6 +668,14 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
632
668
  # corresponds to doing no classifier free guidance.
633
669
  do_classifier_free_guidance = guidance_scale > 1.0
634
670
 
671
+ if ip_adapter_image is not None:
672
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
673
+ image_embeds, negative_image_embeds = self.encode_image(
674
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
675
+ )
676
+ if do_classifier_free_guidance:
677
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
678
+
635
679
  # 3. Encode input prompt
636
680
  text_encoder_lora_scale = (
637
681
  cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
@@ -681,6 +725,9 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
681
725
  # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
682
726
  extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
683
727
 
728
+ # 7.1 Add image embeds for IP-Adapter
729
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
730
+
684
731
  # 8. Denoising loop
685
732
  # Each denoising step also includes refinement of the latents with respect to the
686
733
  # views.
@@ -743,6 +790,7 @@ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderM
743
790
  t,
744
791
  encoder_hidden_states=prompt_embeds_input,
745
792
  cross_attention_kwargs=cross_attention_kwargs,
793
+ added_cond_kwargs=added_cond_kwargs,
746
794
  ).sample
747
795
 
748
796
  # perform guidance
@@ -5,10 +5,12 @@ from typing import Callable, List, Optional, Union
5
5
  import numpy as np
6
6
  import torch
7
7
  from packaging import version
8
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
8
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
9
9
 
10
10
  from ...configuration_utils import FrozenDict
11
- from ...models import AutoencoderKL, UNet2DConditionModel
11
+ from ...image_processor import PipelineImageInput
12
+ from ...loaders import IPAdapterMixin
13
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
12
14
  from ...schedulers import KarrasDiffusionSchedulers
13
15
  from ...utils import deprecate, logging
14
16
  from ...utils.torch_utils import randn_tensor
@@ -20,13 +22,16 @@ from .safety_checker import SafeStableDiffusionSafetyChecker
20
22
  logger = logging.get_logger(__name__) # pylint: disable=invalid-name
21
23
 
22
24
 
23
- class StableDiffusionPipelineSafe(DiffusionPipeline):
25
+ class StableDiffusionPipelineSafe(DiffusionPipeline, IPAdapterMixin):
24
26
  r"""
25
27
  Pipeline based on the [`StableDiffusionPipeline`] for text-to-image generation using Safe Latent Diffusion.
26
28
 
27
29
  This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
28
30
  implemented for all pipelines (downloading, saving, running on a particular device, etc.).
29
31
 
32
+ The pipeline also inherits the following loading methods:
33
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
34
+
30
35
  Args:
31
36
  vae ([`AutoencoderKL`]):
32
37
  Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
@@ -48,7 +53,7 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
48
53
  """
49
54
 
50
55
  model_cpu_offload_seq = "text_encoder->unet->vae"
51
- _optional_components = ["safety_checker", "feature_extractor"]
56
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
52
57
 
53
58
  def __init__(
54
59
  self,
@@ -59,6 +64,7 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
59
64
  scheduler: KarrasDiffusionSchedulers,
60
65
  safety_checker: SafeStableDiffusionSafetyChecker,
61
66
  feature_extractor: CLIPImageProcessor,
67
+ image_encoder: Optional[CLIPVisionModelWithProjection] = None,
62
68
  requires_safety_checker: bool = True,
63
69
  ):
64
70
  super().__init__()
@@ -140,6 +146,7 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
140
146
  scheduler=scheduler,
141
147
  safety_checker=safety_checker,
142
148
  feature_extractor=feature_extractor,
149
+ image_encoder=image_encoder,
143
150
  )
144
151
  self._safety_text_concept = safety_concept
145
152
  self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
@@ -467,6 +474,31 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
467
474
  noise_guidance = noise_guidance - noise_guidance_safety
468
475
  return noise_guidance, safety_momentum
469
476
 
477
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
478
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
479
+ dtype = next(self.image_encoder.parameters()).dtype
480
+
481
+ if not isinstance(image, torch.Tensor):
482
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
483
+
484
+ image = image.to(device=device, dtype=dtype)
485
+ if output_hidden_states:
486
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
487
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
488
+ uncond_image_enc_hidden_states = self.image_encoder(
489
+ torch.zeros_like(image), output_hidden_states=True
490
+ ).hidden_states[-2]
491
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
492
+ num_images_per_prompt, dim=0
493
+ )
494
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
495
+ else:
496
+ image_embeds = self.image_encoder(image).image_embeds
497
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
498
+ uncond_image_embeds = torch.zeros_like(image_embeds)
499
+
500
+ return image_embeds, uncond_image_embeds
501
+
470
502
  @torch.no_grad()
471
503
  def __call__(
472
504
  self,
@@ -480,6 +512,7 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
480
512
  eta: float = 0.0,
481
513
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
482
514
  latents: Optional[torch.FloatTensor] = None,
515
+ ip_adapter_image: Optional[PipelineImageInput] = None,
483
516
  output_type: Optional[str] = "pil",
484
517
  return_dict: bool = True,
485
518
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
@@ -521,6 +554,8 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
521
554
  Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
522
555
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
523
556
  tensor is generated by sampling using the supplied random `generator`.
557
+ ip_adapter_image: (`PipelineImageInput`, *optional*):
558
+ Optional image input to work with IP Adapters.
524
559
  output_type (`str`, *optional*, defaults to `"pil"`):
525
560
  The output format of the generated image. Choose between `PIL.Image` or `np.array`.
526
561
  return_dict (`bool`, *optional*, defaults to `True`):
@@ -560,10 +595,11 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
560
595
  ```py
561
596
  import torch
562
597
  from diffusers import StableDiffusionPipelineSafe
598
+ from diffusers.pipelines.stable_diffusion_safe import SafetyConfig
563
599
 
564
600
  pipeline = StableDiffusionPipelineSafe.from_pretrained(
565
601
  "AIML-TUDA/stable-diffusion-safe", torch_dtype=torch.float16
566
- )
602
+ ).to("cuda")
567
603
  prompt = "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c. leyendecker"
568
604
  image = pipeline(prompt=prompt, **SafetyConfig.MEDIUM).images[0]
569
605
  ```
@@ -588,6 +624,17 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
588
624
  if not enable_safety_guidance:
589
625
  warnings.warn("Safety checker disabled!")
590
626
 
627
+ if ip_adapter_image is not None:
628
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
629
+ image_embeds, negative_image_embeds = self.encode_image(
630
+ ip_adapter_image, device, num_images_per_prompt, output_hidden_state
631
+ )
632
+ if do_classifier_free_guidance:
633
+ if enable_safety_guidance:
634
+ image_embeds = torch.cat([negative_image_embeds, image_embeds, image_embeds])
635
+ else:
636
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
637
+
591
638
  # 3. Encode input prompt
592
639
  prompt_embeds = self._encode_prompt(
593
640
  prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, enable_safety_guidance
@@ -613,6 +660,9 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
613
660
  # 6. Prepare extra step kwargs.
614
661
  extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
615
662
 
663
+ # 6.1 Add image embeds for IP-Adapter
664
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
665
+
616
666
  safety_momentum = None
617
667
 
618
668
  num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
@@ -627,7 +677,9 @@ class StableDiffusionPipelineSafe(DiffusionPipeline):
627
677
  latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
628
678
 
629
679
  # predict the noise residual
630
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
680
+ noise_pred = self.unet(
681
+ latent_model_input, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs
682
+ ).sample
631
683
 
632
684
  # perform guidance
633
685
  if do_classifier_free_guidance: