diffusers 0.17.1__py3-none-any.whl → 0.18.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (120) hide show
  1. diffusers/__init__.py +26 -1
  2. diffusers/configuration_utils.py +34 -29
  3. diffusers/dependency_versions_table.py +4 -0
  4. diffusers/image_processor.py +125 -12
  5. diffusers/loaders.py +169 -203
  6. diffusers/models/attention.py +24 -1
  7. diffusers/models/attention_flax.py +10 -5
  8. diffusers/models/attention_processor.py +3 -0
  9. diffusers/models/autoencoder_kl.py +114 -33
  10. diffusers/models/controlnet.py +131 -14
  11. diffusers/models/controlnet_flax.py +37 -26
  12. diffusers/models/cross_attention.py +17 -17
  13. diffusers/models/embeddings.py +67 -0
  14. diffusers/models/modeling_flax_utils.py +64 -56
  15. diffusers/models/modeling_utils.py +193 -104
  16. diffusers/models/prior_transformer.py +207 -37
  17. diffusers/models/resnet.py +26 -26
  18. diffusers/models/transformer_2d.py +36 -41
  19. diffusers/models/transformer_temporal.py +24 -21
  20. diffusers/models/unet_1d.py +31 -25
  21. diffusers/models/unet_2d.py +43 -30
  22. diffusers/models/unet_2d_blocks.py +210 -89
  23. diffusers/models/unet_2d_blocks_flax.py +12 -12
  24. diffusers/models/unet_2d_condition.py +172 -64
  25. diffusers/models/unet_2d_condition_flax.py +38 -24
  26. diffusers/models/unet_3d_blocks.py +34 -31
  27. diffusers/models/unet_3d_condition.py +101 -34
  28. diffusers/models/vae.py +5 -5
  29. diffusers/models/vae_flax.py +37 -34
  30. diffusers/models/vq_model.py +23 -14
  31. diffusers/pipelines/__init__.py +24 -1
  32. diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py +1 -1
  33. diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py +5 -3
  34. diffusers/pipelines/consistency_models/__init__.py +1 -0
  35. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +337 -0
  36. diffusers/pipelines/controlnet/multicontrolnet.py +120 -1
  37. diffusers/pipelines/controlnet/pipeline_controlnet.py +59 -17
  38. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +60 -15
  39. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +60 -17
  40. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +1 -1
  41. diffusers/pipelines/kandinsky/__init__.py +1 -1
  42. diffusers/pipelines/kandinsky/pipeline_kandinsky.py +4 -6
  43. diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +1 -0
  44. diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +1 -0
  45. diffusers/pipelines/kandinsky2_2/__init__.py +7 -0
  46. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +317 -0
  47. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +372 -0
  48. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +434 -0
  49. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +398 -0
  50. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +531 -0
  51. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +541 -0
  52. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +605 -0
  53. diffusers/pipelines/pipeline_flax_utils.py +2 -2
  54. diffusers/pipelines/pipeline_utils.py +124 -146
  55. diffusers/pipelines/shap_e/__init__.py +27 -0
  56. diffusers/pipelines/shap_e/camera.py +147 -0
  57. diffusers/pipelines/shap_e/pipeline_shap_e.py +390 -0
  58. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +349 -0
  59. diffusers/pipelines/shap_e/renderer.py +709 -0
  60. diffusers/pipelines/stable_diffusion/__init__.py +2 -0
  61. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +261 -66
  62. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +3 -3
  63. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +5 -3
  64. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +4 -2
  65. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py +6 -6
  66. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +1 -1
  67. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py +1 -1
  68. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_ldm3d.py +719 -0
  69. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py +1 -1
  70. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_paradigms.py +832 -0
  71. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +17 -7
  72. diffusers/pipelines/stable_diffusion_xl/__init__.py +26 -0
  73. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +823 -0
  74. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +896 -0
  75. diffusers/pipelines/stable_diffusion_xl/watermark.py +31 -0
  76. diffusers/pipelines/text_to_video_synthesis/__init__.py +2 -1
  77. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +5 -1
  78. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +771 -0
  79. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +92 -6
  80. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +3 -3
  81. diffusers/pipelines/versatile_diffusion/modeling_text_unet.py +209 -91
  82. diffusers/schedulers/__init__.py +3 -0
  83. diffusers/schedulers/scheduling_consistency_models.py +380 -0
  84. diffusers/schedulers/scheduling_ddim.py +28 -6
  85. diffusers/schedulers/scheduling_ddim_inverse.py +19 -4
  86. diffusers/schedulers/scheduling_ddim_parallel.py +642 -0
  87. diffusers/schedulers/scheduling_ddpm.py +53 -7
  88. diffusers/schedulers/scheduling_ddpm_parallel.py +604 -0
  89. diffusers/schedulers/scheduling_deis_multistep.py +66 -11
  90. diffusers/schedulers/scheduling_dpmsolver_multistep.py +55 -13
  91. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +19 -4
  92. diffusers/schedulers/scheduling_dpmsolver_sde.py +73 -11
  93. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +23 -7
  94. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +58 -9
  95. diffusers/schedulers/scheduling_euler_discrete.py +58 -8
  96. diffusers/schedulers/scheduling_heun_discrete.py +89 -14
  97. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +73 -11
  98. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +73 -11
  99. diffusers/schedulers/scheduling_lms_discrete.py +57 -8
  100. diffusers/schedulers/scheduling_pndm.py +46 -10
  101. diffusers/schedulers/scheduling_repaint.py +19 -4
  102. diffusers/schedulers/scheduling_sde_ve.py +5 -1
  103. diffusers/schedulers/scheduling_unclip.py +43 -4
  104. diffusers/schedulers/scheduling_unipc_multistep.py +48 -7
  105. diffusers/training_utils.py +1 -1
  106. diffusers/utils/__init__.py +2 -1
  107. diffusers/utils/dummy_pt_objects.py +60 -0
  108. diffusers/utils/dummy_torch_and_transformers_and_invisible_watermark_objects.py +32 -0
  109. diffusers/utils/dummy_torch_and_transformers_objects.py +180 -0
  110. diffusers/utils/hub_utils.py +1 -1
  111. diffusers/utils/import_utils.py +20 -3
  112. diffusers/utils/logging.py +15 -18
  113. diffusers/utils/outputs.py +3 -3
  114. diffusers/utils/testing_utils.py +15 -0
  115. {diffusers-0.17.1.dist-info → diffusers-0.18.2.dist-info}/METADATA +4 -2
  116. {diffusers-0.17.1.dist-info → diffusers-0.18.2.dist-info}/RECORD +120 -94
  117. {diffusers-0.17.1.dist-info → diffusers-0.18.2.dist-info}/WHEEL +1 -1
  118. {diffusers-0.17.1.dist-info → diffusers-0.18.2.dist-info}/LICENSE +0 -0
  119. {diffusers-0.17.1.dist-info → diffusers-0.18.2.dist-info}/entry_points.txt +0 -0
  120. {diffusers-0.17.1.dist-info → diffusers-0.18.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,317 @@
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import List, Optional, Union
16
+
17
+ import torch
18
+
19
+ from ...models import UNet2DConditionModel, VQModel
20
+ from ...pipelines import DiffusionPipeline
21
+ from ...pipelines.pipeline_utils import ImagePipelineOutput
22
+ from ...schedulers import DDPMScheduler
23
+ from ...utils import (
24
+ is_accelerate_available,
25
+ is_accelerate_version,
26
+ logging,
27
+ randn_tensor,
28
+ replace_example_docstring,
29
+ )
30
+
31
+
32
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
33
+
34
+ EXAMPLE_DOC_STRING = """
35
+ Examples:
36
+ ```py
37
+ >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
38
+ >>> import torch
39
+
40
+ >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
41
+ >>> pipe_prior.to("cuda")
42
+ >>> prompt = "red cat, 4k photo"
43
+ >>> out = pipe_prior(prompt)
44
+ >>> image_emb = out.image_embeds
45
+ >>> zero_image_emb = out.negative_image_embeds
46
+ >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
47
+ >>> pipe.to("cuda")
48
+ >>> image = pipe(
49
+ ... image_embeds=image_emb,
50
+ ... negative_image_embeds=zero_image_emb,
51
+ ... height=768,
52
+ ... width=768,
53
+ ... num_inference_steps=50,
54
+ ... ).images
55
+ >>> image[0].save("cat.png")
56
+ ```
57
+ """
58
+
59
+
60
+ def downscale_height_and_width(height, width, scale_factor=8):
61
+ new_height = height // scale_factor**2
62
+ if height % scale_factor**2 != 0:
63
+ new_height += 1
64
+ new_width = width // scale_factor**2
65
+ if width % scale_factor**2 != 0:
66
+ new_width += 1
67
+ return new_height * scale_factor, new_width * scale_factor
68
+
69
+
70
+ class KandinskyV22Pipeline(DiffusionPipeline):
71
+ """
72
+ Pipeline for text-to-image generation using Kandinsky
73
+
74
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
75
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
76
+
77
+ Args:
78
+ scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
79
+ A scheduler to be used in combination with `unet` to generate image latents.
80
+ unet ([`UNet2DConditionModel`]):
81
+ Conditional U-Net architecture to denoise the image embedding.
82
+ movq ([`VQModel`]):
83
+ MoVQ Decoder to generate the image from the latents.
84
+ """
85
+
86
+ def __init__(
87
+ self,
88
+ unet: UNet2DConditionModel,
89
+ scheduler: DDPMScheduler,
90
+ movq: VQModel,
91
+ ):
92
+ super().__init__()
93
+
94
+ self.register_modules(
95
+ unet=unet,
96
+ scheduler=scheduler,
97
+ movq=movq,
98
+ )
99
+ self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
100
+
101
+ # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
102
+ def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
103
+ if latents is None:
104
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
105
+ else:
106
+ if latents.shape != shape:
107
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
108
+ latents = latents.to(device)
109
+
110
+ latents = latents * scheduler.init_noise_sigma
111
+ return latents
112
+
113
+ def enable_sequential_cpu_offload(self, gpu_id=0):
114
+ r"""
115
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline's
116
+ models have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only
117
+ when their specific submodule has its `forward` method called.
118
+ """
119
+ if is_accelerate_available():
120
+ from accelerate import cpu_offload
121
+ else:
122
+ raise ImportError("Please install accelerate via `pip install accelerate`")
123
+
124
+ device = torch.device(f"cuda:{gpu_id}")
125
+
126
+ models = [
127
+ self.unet,
128
+ self.movq,
129
+ ]
130
+ for cpu_offloaded_model in models:
131
+ if cpu_offloaded_model is not None:
132
+ cpu_offload(cpu_offloaded_model, device)
133
+
134
+ def enable_model_cpu_offload(self, gpu_id=0):
135
+ r"""
136
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
137
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
138
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
139
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
140
+ """
141
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
142
+ from accelerate import cpu_offload_with_hook
143
+ else:
144
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
145
+
146
+ device = torch.device(f"cuda:{gpu_id}")
147
+
148
+ if self.device.type != "cpu":
149
+ self.to("cpu", silence_dtype_warnings=True)
150
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
151
+
152
+ hook = None
153
+ for cpu_offloaded_model in [self.unet, self.movq]:
154
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
155
+
156
+ # We'll offload the last model manually.
157
+ self.final_offload_hook = hook
158
+
159
+ @property
160
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
161
+ def _execution_device(self):
162
+ r"""
163
+ Returns the device on which the pipeline's models will be executed. After calling
164
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
165
+ hooks.
166
+ """
167
+ if not hasattr(self.unet, "_hf_hook"):
168
+ return self.device
169
+ for module in self.unet.modules():
170
+ if (
171
+ hasattr(module, "_hf_hook")
172
+ and hasattr(module._hf_hook, "execution_device")
173
+ and module._hf_hook.execution_device is not None
174
+ ):
175
+ return torch.device(module._hf_hook.execution_device)
176
+ return self.device
177
+
178
+ @torch.no_grad()
179
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
180
+ def __call__(
181
+ self,
182
+ image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
183
+ negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
184
+ height: int = 512,
185
+ width: int = 512,
186
+ num_inference_steps: int = 100,
187
+ guidance_scale: float = 4.0,
188
+ num_images_per_prompt: int = 1,
189
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
190
+ latents: Optional[torch.FloatTensor] = None,
191
+ output_type: Optional[str] = "pil",
192
+ return_dict: bool = True,
193
+ ):
194
+ """
195
+ Args:
196
+ Function invoked when calling the pipeline for generation.
197
+ image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
198
+ The clip image embeddings for text prompt, that will be used to condition the image generation.
199
+ negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
200
+ The clip image embeddings for negative text prompt, will be used to condition the image generation.
201
+ height (`int`, *optional*, defaults to 512):
202
+ The height in pixels of the generated image.
203
+ width (`int`, *optional*, defaults to 512):
204
+ The width in pixels of the generated image.
205
+ num_inference_steps (`int`, *optional*, defaults to 100):
206
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
207
+ expense of slower inference.
208
+ guidance_scale (`float`, *optional*, defaults to 4.0):
209
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
210
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
211
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
212
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
213
+ usually at the expense of lower image quality.
214
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
215
+ The number of images to generate per prompt.
216
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
217
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
218
+ to make generation deterministic.
219
+ latents (`torch.FloatTensor`, *optional*):
220
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
221
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
222
+ tensor will ge generated by sampling using the supplied random `generator`.
223
+ output_type (`str`, *optional*, defaults to `"pil"`):
224
+ The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
225
+ (`np.array`) or `"pt"` (`torch.Tensor`).
226
+ return_dict (`bool`, *optional*, defaults to `True`):
227
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
228
+
229
+ Examples:
230
+
231
+ Returns:
232
+ [`~pipelines.ImagePipelineOutput`] or `tuple`
233
+ """
234
+ device = self._execution_device
235
+
236
+ do_classifier_free_guidance = guidance_scale > 1.0
237
+
238
+ if isinstance(image_embeds, list):
239
+ image_embeds = torch.cat(image_embeds, dim=0)
240
+ batch_size = image_embeds.shape[0] * num_images_per_prompt
241
+ if isinstance(negative_image_embeds, list):
242
+ negative_image_embeds = torch.cat(negative_image_embeds, dim=0)
243
+
244
+ if do_classifier_free_guidance:
245
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
246
+ negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
247
+
248
+ image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=self.unet.dtype, device=device)
249
+
250
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
251
+ timesteps_tensor = self.scheduler.timesteps
252
+
253
+ num_channels_latents = self.unet.config.in_channels
254
+
255
+ height, width = downscale_height_and_width(height, width, self.movq_scale_factor)
256
+
257
+ # create initial latent
258
+ latents = self.prepare_latents(
259
+ (batch_size, num_channels_latents, height, width),
260
+ image_embeds.dtype,
261
+ device,
262
+ generator,
263
+ latents,
264
+ self.scheduler,
265
+ )
266
+
267
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
268
+ # expand the latents if we are doing classifier free guidance
269
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
270
+
271
+ added_cond_kwargs = {"image_embeds": image_embeds}
272
+ noise_pred = self.unet(
273
+ sample=latent_model_input,
274
+ timestep=t,
275
+ encoder_hidden_states=None,
276
+ added_cond_kwargs=added_cond_kwargs,
277
+ return_dict=False,
278
+ )[0]
279
+
280
+ if do_classifier_free_guidance:
281
+ noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1)
282
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
283
+ _, variance_pred_text = variance_pred.chunk(2)
284
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
285
+ noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1)
286
+
287
+ if not (
288
+ hasattr(self.scheduler.config, "variance_type")
289
+ and self.scheduler.config.variance_type in ["learned", "learned_range"]
290
+ ):
291
+ noise_pred, _ = noise_pred.split(latents.shape[1], dim=1)
292
+
293
+ # compute the previous noisy sample x_t -> x_t-1
294
+ latents = self.scheduler.step(
295
+ noise_pred,
296
+ t,
297
+ latents,
298
+ generator=generator,
299
+ )[0]
300
+ # post-processing
301
+ image = self.movq.decode(latents, force_not_quantize=True)["sample"]
302
+
303
+ if output_type not in ["pt", "np", "pil"]:
304
+ raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
305
+
306
+ if output_type in ["np", "pil"]:
307
+ image = image * 0.5 + 0.5
308
+ image = image.clamp(0, 1)
309
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
310
+
311
+ if output_type == "pil":
312
+ image = self.numpy_to_pil(image)
313
+
314
+ if not return_dict:
315
+ return (image,)
316
+
317
+ return ImagePipelineOutput(images=image)
@@ -0,0 +1,372 @@
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import List, Optional, Union
16
+
17
+ import torch
18
+
19
+ from ...models import UNet2DConditionModel, VQModel
20
+ from ...pipelines import DiffusionPipeline
21
+ from ...pipelines.pipeline_utils import ImagePipelineOutput
22
+ from ...schedulers import DDPMScheduler
23
+ from ...utils import (
24
+ is_accelerate_available,
25
+ is_accelerate_version,
26
+ logging,
27
+ randn_tensor,
28
+ replace_example_docstring,
29
+ )
30
+
31
+
32
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
33
+
34
+ EXAMPLE_DOC_STRING = """
35
+ Examples:
36
+ ```py
37
+ >>> import torch
38
+ >>> import numpy as np
39
+
40
+ >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
41
+ >>> from transformers import pipeline
42
+ >>> from diffusers.utils import load_image
43
+
44
+
45
+ >>> def make_hint(image, depth_estimator):
46
+ ... image = depth_estimator(image)["depth"]
47
+ ... image = np.array(image)
48
+ ... image = image[:, :, None]
49
+ ... image = np.concatenate([image, image, image], axis=2)
50
+ ... detected_map = torch.from_numpy(image).float() / 255.0
51
+ ... hint = detected_map.permute(2, 0, 1)
52
+ ... return hint
53
+
54
+
55
+ >>> depth_estimator = pipeline("depth-estimation")
56
+
57
+ >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
58
+ ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
59
+ ... )
60
+ >>> pipe_prior = pipe_prior.to("cuda")
61
+
62
+ >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
63
+ ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
64
+ ... )
65
+ >>> pipe = pipe.to("cuda")
66
+
67
+
68
+ >>> img = load_image(
69
+ ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
70
+ ... "/kandinsky/cat.png"
71
+ ... ).resize((768, 768))
72
+
73
+ >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
74
+
75
+ >>> prompt = "A robot, 4k photo"
76
+ >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
77
+
78
+ >>> generator = torch.Generator(device="cuda").manual_seed(43)
79
+
80
+ >>> image_emb, zero_image_emb = pipe_prior(
81
+ ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
82
+ ... ).to_tuple()
83
+
84
+ >>> images = pipe(
85
+ ... image_embeds=image_emb,
86
+ ... negative_image_embeds=zero_image_emb,
87
+ ... hint=hint,
88
+ ... num_inference_steps=50,
89
+ ... generator=generator,
90
+ ... height=768,
91
+ ... width=768,
92
+ ... ).images
93
+
94
+ >>> images[0].save("robot_cat.png")
95
+ ```
96
+ """
97
+
98
+
99
+ # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width
100
+ def downscale_height_and_width(height, width, scale_factor=8):
101
+ new_height = height // scale_factor**2
102
+ if height % scale_factor**2 != 0:
103
+ new_height += 1
104
+ new_width = width // scale_factor**2
105
+ if width % scale_factor**2 != 0:
106
+ new_width += 1
107
+ return new_height * scale_factor, new_width * scale_factor
108
+
109
+
110
+ class KandinskyV22ControlnetPipeline(DiffusionPipeline):
111
+ """
112
+ Pipeline for text-to-image generation using Kandinsky
113
+
114
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
115
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
116
+
117
+ Args:
118
+ scheduler ([`DDIMScheduler`]):
119
+ A scheduler to be used in combination with `unet` to generate image latents.
120
+ unet ([`UNet2DConditionModel`]):
121
+ Conditional U-Net architecture to denoise the image embedding.
122
+ movq ([`VQModel`]):
123
+ MoVQ Decoder to generate the image from the latents.
124
+ """
125
+
126
+ def __init__(
127
+ self,
128
+ unet: UNet2DConditionModel,
129
+ scheduler: DDPMScheduler,
130
+ movq: VQModel,
131
+ ):
132
+ super().__init__()
133
+
134
+ self.register_modules(
135
+ unet=unet,
136
+ scheduler=scheduler,
137
+ movq=movq,
138
+ )
139
+ self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
140
+
141
+ # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
142
+ def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
143
+ if latents is None:
144
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
145
+ else:
146
+ if latents.shape != shape:
147
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
148
+ latents = latents.to(device)
149
+
150
+ latents = latents * scheduler.init_noise_sigma
151
+ return latents
152
+
153
+ # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.KandinskyV22Pipeline.enable_sequential_cpu_offload
154
+ def enable_sequential_cpu_offload(self, gpu_id=0):
155
+ r"""
156
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline's
157
+ models have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only
158
+ when their specific submodule has its `forward` method called.
159
+ """
160
+ if is_accelerate_available():
161
+ from accelerate import cpu_offload
162
+ else:
163
+ raise ImportError("Please install accelerate via `pip install accelerate`")
164
+
165
+ device = torch.device(f"cuda:{gpu_id}")
166
+
167
+ models = [
168
+ self.unet,
169
+ self.movq,
170
+ ]
171
+ for cpu_offloaded_model in models:
172
+ if cpu_offloaded_model is not None:
173
+ cpu_offload(cpu_offloaded_model, device)
174
+
175
+ # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.KandinskyV22Pipeline.enable_model_cpu_offload
176
+ def enable_model_cpu_offload(self, gpu_id=0):
177
+ r"""
178
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
179
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
180
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
181
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
182
+ """
183
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
184
+ from accelerate import cpu_offload_with_hook
185
+ else:
186
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
187
+
188
+ device = torch.device(f"cuda:{gpu_id}")
189
+
190
+ if self.device.type != "cpu":
191
+ self.to("cpu", silence_dtype_warnings=True)
192
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
193
+
194
+ hook = None
195
+ for cpu_offloaded_model in [self.unet, self.movq]:
196
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
197
+
198
+ # We'll offload the last model manually.
199
+ self.final_offload_hook = hook
200
+
201
+ @property
202
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
203
+ def _execution_device(self):
204
+ r"""
205
+ Returns the device on which the pipeline's models will be executed. After calling
206
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
207
+ hooks.
208
+ """
209
+ if not hasattr(self.unet, "_hf_hook"):
210
+ return self.device
211
+ for module in self.unet.modules():
212
+ if (
213
+ hasattr(module, "_hf_hook")
214
+ and hasattr(module._hf_hook, "execution_device")
215
+ and module._hf_hook.execution_device is not None
216
+ ):
217
+ return torch.device(module._hf_hook.execution_device)
218
+ return self.device
219
+
220
+ @torch.no_grad()
221
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
222
+ def __call__(
223
+ self,
224
+ image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
225
+ negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
226
+ hint: torch.FloatTensor,
227
+ height: int = 512,
228
+ width: int = 512,
229
+ num_inference_steps: int = 100,
230
+ guidance_scale: float = 4.0,
231
+ num_images_per_prompt: int = 1,
232
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
233
+ latents: Optional[torch.FloatTensor] = None,
234
+ output_type: Optional[str] = "pil",
235
+ return_dict: bool = True,
236
+ ):
237
+ """
238
+ Function invoked when calling the pipeline for generation.
239
+
240
+ Args:
241
+ prompt (`str` or `List[str]`):
242
+ The prompt or prompts to guide the image generation.
243
+ hint (`torch.FloatTensor`):
244
+ The controlnet condition.
245
+ image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
246
+ The clip image embeddings for text prompt, that will be used to condition the image generation.
247
+ negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
248
+ The clip image embeddings for negative text prompt, will be used to condition the image generation.
249
+ negative_prompt (`str` or `List[str]`, *optional*):
250
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
251
+ if `guidance_scale` is less than `1`).
252
+ height (`int`, *optional*, defaults to 512):
253
+ The height in pixels of the generated image.
254
+ width (`int`, *optional*, defaults to 512):
255
+ The width in pixels of the generated image.
256
+ num_inference_steps (`int`, *optional*, defaults to 100):
257
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
258
+ expense of slower inference.
259
+ guidance_scale (`float`, *optional*, defaults to 4.0):
260
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
261
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
262
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
263
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
264
+ usually at the expense of lower image quality.
265
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
266
+ The number of images to generate per prompt.
267
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
268
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
269
+ to make generation deterministic.
270
+ latents (`torch.FloatTensor`, *optional*):
271
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
272
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
273
+ tensor will ge generated by sampling using the supplied random `generator`.
274
+ output_type (`str`, *optional*, defaults to `"pil"`):
275
+ The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
276
+ (`np.array`) or `"pt"` (`torch.Tensor`).
277
+ return_dict (`bool`, *optional*, defaults to `True`):
278
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
279
+
280
+ Examples:
281
+
282
+ Returns:
283
+ [`~pipelines.ImagePipelineOutput`] or `tuple`
284
+ """
285
+ device = self._execution_device
286
+
287
+ do_classifier_free_guidance = guidance_scale > 1.0
288
+
289
+ if isinstance(image_embeds, list):
290
+ image_embeds = torch.cat(image_embeds, dim=0)
291
+ if isinstance(negative_image_embeds, list):
292
+ negative_image_embeds = torch.cat(negative_image_embeds, dim=0)
293
+ if isinstance(hint, list):
294
+ hint = torch.cat(hint, dim=0)
295
+
296
+ batch_size = image_embeds.shape[0] * num_images_per_prompt
297
+
298
+ if do_classifier_free_guidance:
299
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
300
+ negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
301
+ hint = hint.repeat_interleave(num_images_per_prompt, dim=0)
302
+
303
+ image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=self.unet.dtype, device=device)
304
+ hint = torch.cat([hint, hint], dim=0).to(dtype=self.unet.dtype, device=device)
305
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
306
+ timesteps_tensor = self.scheduler.timesteps
307
+
308
+ num_channels_latents = self.movq.config.latent_channels
309
+
310
+ height, width = downscale_height_and_width(height, width, self.movq_scale_factor)
311
+
312
+ # create initial latent
313
+ latents = self.prepare_latents(
314
+ (batch_size, num_channels_latents, height, width),
315
+ image_embeds.dtype,
316
+ device,
317
+ generator,
318
+ latents,
319
+ self.scheduler,
320
+ )
321
+
322
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
323
+ # expand the latents if we are doing classifier free guidance
324
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
325
+
326
+ added_cond_kwargs = {"image_embeds": image_embeds, "hint": hint}
327
+ noise_pred = self.unet(
328
+ sample=latent_model_input,
329
+ timestep=t,
330
+ encoder_hidden_states=None,
331
+ added_cond_kwargs=added_cond_kwargs,
332
+ return_dict=False,
333
+ )[0]
334
+
335
+ if do_classifier_free_guidance:
336
+ noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1)
337
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
338
+ _, variance_pred_text = variance_pred.chunk(2)
339
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
340
+ noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1)
341
+
342
+ if not (
343
+ hasattr(self.scheduler.config, "variance_type")
344
+ and self.scheduler.config.variance_type in ["learned", "learned_range"]
345
+ ):
346
+ noise_pred, _ = noise_pred.split(latents.shape[1], dim=1)
347
+
348
+ # compute the previous noisy sample x_t -> x_t-1
349
+ latents = self.scheduler.step(
350
+ noise_pred,
351
+ t,
352
+ latents,
353
+ generator=generator,
354
+ )[0]
355
+ # post-processing
356
+ image = self.movq.decode(latents, force_not_quantize=True)["sample"]
357
+
358
+ if output_type not in ["pt", "np", "pil"]:
359
+ raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
360
+
361
+ if output_type in ["np", "pil"]:
362
+ image = image * 0.5 + 0.5
363
+ image = image.clamp(0, 1)
364
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
365
+
366
+ if output_type == "pil":
367
+ image = self.numpy_to_pil(image)
368
+
369
+ if not return_dict:
370
+ return (image,)
371
+
372
+ return ImagePipelineOutput(images=image)