diffusers 0.17.1__py3-none-any.whl → 0.18.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (120) hide show
  1. diffusers/__init__.py +26 -1
  2. diffusers/configuration_utils.py +34 -29
  3. diffusers/dependency_versions_table.py +4 -0
  4. diffusers/image_processor.py +125 -12
  5. diffusers/loaders.py +169 -203
  6. diffusers/models/attention.py +24 -1
  7. diffusers/models/attention_flax.py +10 -5
  8. diffusers/models/attention_processor.py +3 -0
  9. diffusers/models/autoencoder_kl.py +114 -33
  10. diffusers/models/controlnet.py +131 -14
  11. diffusers/models/controlnet_flax.py +37 -26
  12. diffusers/models/cross_attention.py +17 -17
  13. diffusers/models/embeddings.py +67 -0
  14. diffusers/models/modeling_flax_utils.py +64 -56
  15. diffusers/models/modeling_utils.py +193 -104
  16. diffusers/models/prior_transformer.py +207 -37
  17. diffusers/models/resnet.py +26 -26
  18. diffusers/models/transformer_2d.py +36 -41
  19. diffusers/models/transformer_temporal.py +24 -21
  20. diffusers/models/unet_1d.py +31 -25
  21. diffusers/models/unet_2d.py +43 -30
  22. diffusers/models/unet_2d_blocks.py +210 -89
  23. diffusers/models/unet_2d_blocks_flax.py +12 -12
  24. diffusers/models/unet_2d_condition.py +172 -64
  25. diffusers/models/unet_2d_condition_flax.py +38 -24
  26. diffusers/models/unet_3d_blocks.py +34 -31
  27. diffusers/models/unet_3d_condition.py +101 -34
  28. diffusers/models/vae.py +5 -5
  29. diffusers/models/vae_flax.py +37 -34
  30. diffusers/models/vq_model.py +23 -14
  31. diffusers/pipelines/__init__.py +24 -1
  32. diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py +1 -1
  33. diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py +5 -3
  34. diffusers/pipelines/consistency_models/__init__.py +1 -0
  35. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +337 -0
  36. diffusers/pipelines/controlnet/multicontrolnet.py +120 -1
  37. diffusers/pipelines/controlnet/pipeline_controlnet.py +59 -17
  38. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +60 -15
  39. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +60 -17
  40. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +1 -1
  41. diffusers/pipelines/kandinsky/__init__.py +1 -1
  42. diffusers/pipelines/kandinsky/pipeline_kandinsky.py +4 -6
  43. diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +1 -0
  44. diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +1 -0
  45. diffusers/pipelines/kandinsky2_2/__init__.py +7 -0
  46. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +317 -0
  47. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +372 -0
  48. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +434 -0
  49. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +398 -0
  50. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +531 -0
  51. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +541 -0
  52. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +605 -0
  53. diffusers/pipelines/pipeline_flax_utils.py +2 -2
  54. diffusers/pipelines/pipeline_utils.py +124 -146
  55. diffusers/pipelines/shap_e/__init__.py +27 -0
  56. diffusers/pipelines/shap_e/camera.py +147 -0
  57. diffusers/pipelines/shap_e/pipeline_shap_e.py +390 -0
  58. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +349 -0
  59. diffusers/pipelines/shap_e/renderer.py +709 -0
  60. diffusers/pipelines/stable_diffusion/__init__.py +2 -0
  61. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +261 -66
  62. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +3 -3
  63. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +5 -3
  64. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +4 -2
  65. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py +6 -6
  66. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +1 -1
  67. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py +1 -1
  68. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_ldm3d.py +719 -0
  69. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py +1 -1
  70. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_paradigms.py +832 -0
  71. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +17 -7
  72. diffusers/pipelines/stable_diffusion_xl/__init__.py +26 -0
  73. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +823 -0
  74. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +896 -0
  75. diffusers/pipelines/stable_diffusion_xl/watermark.py +31 -0
  76. diffusers/pipelines/text_to_video_synthesis/__init__.py +2 -1
  77. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +5 -1
  78. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +771 -0
  79. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +92 -6
  80. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +3 -3
  81. diffusers/pipelines/versatile_diffusion/modeling_text_unet.py +209 -91
  82. diffusers/schedulers/__init__.py +3 -0
  83. diffusers/schedulers/scheduling_consistency_models.py +380 -0
  84. diffusers/schedulers/scheduling_ddim.py +28 -6
  85. diffusers/schedulers/scheduling_ddim_inverse.py +19 -4
  86. diffusers/schedulers/scheduling_ddim_parallel.py +642 -0
  87. diffusers/schedulers/scheduling_ddpm.py +53 -7
  88. diffusers/schedulers/scheduling_ddpm_parallel.py +604 -0
  89. diffusers/schedulers/scheduling_deis_multistep.py +66 -11
  90. diffusers/schedulers/scheduling_dpmsolver_multistep.py +55 -13
  91. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +19 -4
  92. diffusers/schedulers/scheduling_dpmsolver_sde.py +73 -11
  93. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +23 -7
  94. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +58 -9
  95. diffusers/schedulers/scheduling_euler_discrete.py +58 -8
  96. diffusers/schedulers/scheduling_heun_discrete.py +89 -14
  97. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +73 -11
  98. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +73 -11
  99. diffusers/schedulers/scheduling_lms_discrete.py +57 -8
  100. diffusers/schedulers/scheduling_pndm.py +46 -10
  101. diffusers/schedulers/scheduling_repaint.py +19 -4
  102. diffusers/schedulers/scheduling_sde_ve.py +5 -1
  103. diffusers/schedulers/scheduling_unclip.py +43 -4
  104. diffusers/schedulers/scheduling_unipc_multistep.py +48 -7
  105. diffusers/training_utils.py +1 -1
  106. diffusers/utils/__init__.py +2 -1
  107. diffusers/utils/dummy_pt_objects.py +60 -0
  108. diffusers/utils/dummy_torch_and_transformers_and_invisible_watermark_objects.py +32 -0
  109. diffusers/utils/dummy_torch_and_transformers_objects.py +180 -0
  110. diffusers/utils/hub_utils.py +1 -1
  111. diffusers/utils/import_utils.py +20 -3
  112. diffusers/utils/logging.py +15 -18
  113. diffusers/utils/outputs.py +3 -3
  114. diffusers/utils/testing_utils.py +15 -0
  115. {diffusers-0.17.1.dist-info → diffusers-0.18.2.dist-info}/METADATA +4 -2
  116. {diffusers-0.17.1.dist-info → diffusers-0.18.2.dist-info}/RECORD +120 -94
  117. {diffusers-0.17.1.dist-info → diffusers-0.18.2.dist-info}/WHEEL +1 -1
  118. {diffusers-0.17.1.dist-info → diffusers-0.18.2.dist-info}/LICENSE +0 -0
  119. {diffusers-0.17.1.dist-info → diffusers-0.18.2.dist-info}/entry_points.txt +0 -0
  120. {diffusers-0.17.1.dist-info → diffusers-0.18.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,349 @@
1
+ # Copyright 2023 Open AI and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+ from typing import List, Optional, Union
17
+
18
+ import numpy as np
19
+ import PIL
20
+ import torch
21
+ from transformers import CLIPImageProcessor, CLIPVisionModel
22
+
23
+ from ...models import PriorTransformer
24
+ from ...pipelines import DiffusionPipeline
25
+ from ...schedulers import HeunDiscreteScheduler
26
+ from ...utils import (
27
+ BaseOutput,
28
+ is_accelerate_available,
29
+ logging,
30
+ randn_tensor,
31
+ replace_example_docstring,
32
+ )
33
+ from .renderer import ShapERenderer
34
+
35
+
36
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
37
+
38
+ EXAMPLE_DOC_STRING = """
39
+ Examples:
40
+ ```py
41
+ >>> from PIL import Image
42
+ >>> import torch
43
+ >>> from diffusers import DiffusionPipeline
44
+ >>> from diffusers.utils import export_to_gif, load_image
45
+
46
+ >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
47
+
48
+ >>> repo = "openai/shap-e-img2img"
49
+ >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
50
+ >>> pipe = pipe.to(device)
51
+
52
+ >>> guidance_scale = 3.0
53
+ >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
54
+ >>> image = load_image(image_url).convert("RGB")
55
+
56
+ >>> images = pipe(
57
+ ... image,
58
+ ... guidance_scale=guidance_scale,
59
+ ... num_inference_steps=64,
60
+ ... frame_size=256,
61
+ ... ).images
62
+
63
+ >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
64
+ ```
65
+ """
66
+
67
+
68
+ @dataclass
69
+ class ShapEPipelineOutput(BaseOutput):
70
+ """
71
+ Output class for ShapEPipeline.
72
+
73
+ Args:
74
+ images (`torch.FloatTensor`)
75
+ a list of images for 3D rendering
76
+ """
77
+
78
+ images: Union[PIL.Image.Image, np.ndarray]
79
+
80
+
81
+ class ShapEImg2ImgPipeline(DiffusionPipeline):
82
+ """
83
+ Pipeline for generating latent representation of a 3D asset and rendering with NeRF method with Shap-E
84
+
85
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
86
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
87
+
88
+ Args:
89
+ prior ([`PriorTransformer`]):
90
+ The canonincal unCLIP prior to approximate the image embedding from the text embedding.
91
+ text_encoder ([`CLIPTextModelWithProjection`]):
92
+ Frozen text-encoder.
93
+ tokenizer (`CLIPTokenizer`):
94
+ Tokenizer of class
95
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
96
+ scheduler ([`HeunDiscreteScheduler`]):
97
+ A scheduler to be used in combination with `prior` to generate image embedding.
98
+ renderer ([`ShapERenderer`]):
99
+ Shap-E renderer projects the generated latents into parameters of a MLP that's used to create 3D objects
100
+ with the NeRF rendering method
101
+ """
102
+
103
+ def __init__(
104
+ self,
105
+ prior: PriorTransformer,
106
+ image_encoder: CLIPVisionModel,
107
+ image_processor: CLIPImageProcessor,
108
+ scheduler: HeunDiscreteScheduler,
109
+ renderer: ShapERenderer,
110
+ ):
111
+ super().__init__()
112
+
113
+ self.register_modules(
114
+ prior=prior,
115
+ image_encoder=image_encoder,
116
+ image_processor=image_processor,
117
+ scheduler=scheduler,
118
+ renderer=renderer,
119
+ )
120
+
121
+ # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
122
+ def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
123
+ if latents is None:
124
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
125
+ else:
126
+ if latents.shape != shape:
127
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
128
+ latents = latents.to(device)
129
+
130
+ latents = latents * scheduler.init_noise_sigma
131
+ return latents
132
+
133
+ def enable_sequential_cpu_offload(self, gpu_id=0):
134
+ r"""
135
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline's
136
+ models have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only
137
+ when their specific submodule has its `forward` method called.
138
+ """
139
+ if is_accelerate_available():
140
+ from accelerate import cpu_offload
141
+ else:
142
+ raise ImportError("Please install accelerate via `pip install accelerate`")
143
+
144
+ device = torch.device(f"cuda:{gpu_id}")
145
+
146
+ models = [self.image_encoder, self.prior]
147
+ for cpu_offloaded_model in models:
148
+ if cpu_offloaded_model is not None:
149
+ cpu_offload(cpu_offloaded_model, device)
150
+
151
+ @property
152
+ def _execution_device(self):
153
+ r"""
154
+ Returns the device on which the pipeline's models will be executed. After calling
155
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
156
+ hooks.
157
+ """
158
+ if self.device != torch.device("meta") or not hasattr(self.image_encoder, "_hf_hook"):
159
+ return self.device
160
+ for module in self.image_encoder.modules():
161
+ if (
162
+ hasattr(module, "_hf_hook")
163
+ and hasattr(module._hf_hook, "execution_device")
164
+ and module._hf_hook.execution_device is not None
165
+ ):
166
+ return torch.device(module._hf_hook.execution_device)
167
+ return self.device
168
+
169
+ def _encode_image(
170
+ self,
171
+ image,
172
+ device,
173
+ num_images_per_prompt,
174
+ do_classifier_free_guidance,
175
+ ):
176
+ if isinstance(image, List) and isinstance(image[0], torch.Tensor):
177
+ image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
178
+
179
+ if not isinstance(image, torch.Tensor):
180
+ image = self.image_processor(image, return_tensors="pt").pixel_values[0].unsqueeze(0)
181
+
182
+ image = image.to(dtype=self.image_encoder.dtype, device=device)
183
+
184
+ image_embeds = self.image_encoder(image)["last_hidden_state"]
185
+ image_embeds = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
186
+
187
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
188
+
189
+ if do_classifier_free_guidance:
190
+ negative_image_embeds = torch.zeros_like(image_embeds)
191
+
192
+ # For classifier free guidance, we need to do two forward passes.
193
+ # Here we concatenate the unconditional and text embeddings into a single batch
194
+ # to avoid doing two forward passes
195
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
196
+
197
+ return image_embeds
198
+
199
+ @torch.no_grad()
200
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
201
+ def __call__(
202
+ self,
203
+ image: Union[PIL.Image.Image, List[PIL.Image.Image]],
204
+ num_images_per_prompt: int = 1,
205
+ num_inference_steps: int = 25,
206
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
207
+ latents: Optional[torch.FloatTensor] = None,
208
+ guidance_scale: float = 4.0,
209
+ frame_size: int = 64,
210
+ output_type: Optional[str] = "pil", # pil, np, latent
211
+ return_dict: bool = True,
212
+ ):
213
+ """
214
+ Function invoked when calling the pipeline for generation.
215
+
216
+ Args:
217
+ prompt (`str` or `List[str]`):
218
+ The prompt or prompts to guide the image generation.
219
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
220
+ The number of images to generate per prompt.
221
+ num_inference_steps (`int`, *optional*, defaults to 100):
222
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
223
+ expense of slower inference.
224
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
225
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
226
+ to make generation deterministic.
227
+ latents (`torch.FloatTensor`, *optional*):
228
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
229
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
230
+ tensor will ge generated by sampling using the supplied random `generator`.
231
+ guidance_scale (`float`, *optional*, defaults to 4.0):
232
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
233
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
234
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
235
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
236
+ usually at the expense of lower image quality.
237
+ frame_size (`int`, *optional*, default to 64):
238
+ the width and height of each image frame of the generated 3d output
239
+ output_type (`str`, *optional*, defaults to `"pt"`):
240
+ The output format of the generate image. Choose between: `"np"` (`np.array`) or `"pt"`
241
+ (`torch.Tensor`).
242
+ return_dict (`bool`, *optional*, defaults to `True`):
243
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
244
+
245
+ Examples:
246
+
247
+ Returns:
248
+ [`ShapEPipelineOutput`] or `tuple`
249
+ """
250
+
251
+ if isinstance(image, PIL.Image.Image):
252
+ batch_size = 1
253
+ elif isinstance(image, torch.Tensor):
254
+ batch_size = image.shape[0]
255
+ elif isinstance(image, list) and isinstance(image[0], (torch.Tensor, PIL.Image.Image)):
256
+ batch_size = len(image)
257
+ else:
258
+ raise ValueError(
259
+ f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(image)}"
260
+ )
261
+
262
+ device = self._execution_device
263
+
264
+ batch_size = batch_size * num_images_per_prompt
265
+
266
+ do_classifier_free_guidance = guidance_scale > 1.0
267
+ image_embeds = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance)
268
+
269
+ # prior
270
+
271
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
272
+ timesteps = self.scheduler.timesteps
273
+
274
+ num_embeddings = self.prior.config.num_embeddings
275
+ embedding_dim = self.prior.config.embedding_dim
276
+
277
+ latents = self.prepare_latents(
278
+ (batch_size, num_embeddings * embedding_dim),
279
+ image_embeds.dtype,
280
+ device,
281
+ generator,
282
+ latents,
283
+ self.scheduler,
284
+ )
285
+
286
+ # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
287
+ latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim)
288
+
289
+ for i, t in enumerate(self.progress_bar(timesteps)):
290
+ # expand the latents if we are doing classifier free guidance
291
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
292
+ scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t)
293
+
294
+ noise_pred = self.prior(
295
+ scaled_model_input,
296
+ timestep=t,
297
+ proj_embedding=image_embeds,
298
+ ).predicted_image_embedding
299
+
300
+ # remove the variance
301
+ noise_pred, _ = noise_pred.split(
302
+ scaled_model_input.shape[2], dim=2
303
+ ) # batch_size, num_embeddings, embedding_dim
304
+
305
+ if do_classifier_free_guidance is not None:
306
+ noise_pred_uncond, noise_pred = noise_pred.chunk(2)
307
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
308
+
309
+ latents = self.scheduler.step(
310
+ noise_pred,
311
+ timestep=t,
312
+ sample=latents,
313
+ ).prev_sample
314
+
315
+ if output_type == "latent":
316
+ return ShapEPipelineOutput(images=latents)
317
+
318
+ images = []
319
+ for i, latent in enumerate(latents):
320
+ print()
321
+ image = self.renderer.decode(
322
+ latent[None, :],
323
+ device,
324
+ size=frame_size,
325
+ ray_batch_size=4096,
326
+ n_coarse_samples=64,
327
+ n_fine_samples=128,
328
+ )
329
+
330
+ images.append(image)
331
+
332
+ images = torch.stack(images)
333
+
334
+ if output_type not in ["np", "pil"]:
335
+ raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}")
336
+
337
+ images = images.cpu().numpy()
338
+
339
+ if output_type == "pil":
340
+ images = [self.numpy_to_pil(image) for image in images]
341
+
342
+ # Offload last model to CPU
343
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
344
+ self.final_offload_hook.offload()
345
+
346
+ if not return_dict:
347
+ return (images,)
348
+
349
+ return ShapEPipelineOutput(images=images)