diffusers 0.31.0__py3-none-any.whl → 0.32.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (214) hide show
  1. diffusers/__init__.py +66 -5
  2. diffusers/callbacks.py +56 -3
  3. diffusers/configuration_utils.py +1 -1
  4. diffusers/dependency_versions_table.py +1 -1
  5. diffusers/image_processor.py +25 -17
  6. diffusers/loaders/__init__.py +22 -3
  7. diffusers/loaders/ip_adapter.py +538 -15
  8. diffusers/loaders/lora_base.py +124 -118
  9. diffusers/loaders/lora_conversion_utils.py +318 -3
  10. diffusers/loaders/lora_pipeline.py +1688 -368
  11. diffusers/loaders/peft.py +379 -0
  12. diffusers/loaders/single_file_model.py +71 -4
  13. diffusers/loaders/single_file_utils.py +519 -9
  14. diffusers/loaders/textual_inversion.py +3 -3
  15. diffusers/loaders/transformer_flux.py +181 -0
  16. diffusers/loaders/transformer_sd3.py +89 -0
  17. diffusers/loaders/unet.py +17 -4
  18. diffusers/models/__init__.py +47 -14
  19. diffusers/models/activations.py +22 -9
  20. diffusers/models/attention.py +13 -4
  21. diffusers/models/attention_flax.py +1 -1
  22. diffusers/models/attention_processor.py +2059 -281
  23. diffusers/models/autoencoders/__init__.py +5 -0
  24. diffusers/models/autoencoders/autoencoder_dc.py +620 -0
  25. diffusers/models/autoencoders/autoencoder_kl.py +2 -1
  26. diffusers/models/autoencoders/autoencoder_kl_allegro.py +1149 -0
  27. diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +36 -27
  28. diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +1176 -0
  29. diffusers/models/autoencoders/autoencoder_kl_ltx.py +1338 -0
  30. diffusers/models/autoencoders/autoencoder_kl_mochi.py +1166 -0
  31. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +3 -10
  32. diffusers/models/autoencoders/autoencoder_tiny.py +4 -2
  33. diffusers/models/autoencoders/vae.py +18 -5
  34. diffusers/models/controlnet.py +47 -802
  35. diffusers/models/controlnet_flux.py +29 -495
  36. diffusers/models/controlnet_sd3.py +25 -379
  37. diffusers/models/controlnet_sparsectrl.py +46 -718
  38. diffusers/models/controlnets/__init__.py +23 -0
  39. diffusers/models/controlnets/controlnet.py +872 -0
  40. diffusers/models/{controlnet_flax.py → controlnets/controlnet_flax.py} +5 -5
  41. diffusers/models/controlnets/controlnet_flux.py +536 -0
  42. diffusers/models/{controlnet_hunyuan.py → controlnets/controlnet_hunyuan.py} +7 -7
  43. diffusers/models/controlnets/controlnet_sd3.py +489 -0
  44. diffusers/models/controlnets/controlnet_sparsectrl.py +788 -0
  45. diffusers/models/controlnets/controlnet_union.py +832 -0
  46. diffusers/models/{controlnet_xs.py → controlnets/controlnet_xs.py} +14 -13
  47. diffusers/models/controlnets/multicontrolnet.py +183 -0
  48. diffusers/models/embeddings.py +838 -43
  49. diffusers/models/model_loading_utils.py +88 -6
  50. diffusers/models/modeling_flax_utils.py +1 -1
  51. diffusers/models/modeling_utils.py +74 -28
  52. diffusers/models/normalization.py +78 -13
  53. diffusers/models/transformers/__init__.py +5 -0
  54. diffusers/models/transformers/auraflow_transformer_2d.py +2 -2
  55. diffusers/models/transformers/cogvideox_transformer_3d.py +46 -11
  56. diffusers/models/transformers/dit_transformer_2d.py +1 -1
  57. diffusers/models/transformers/latte_transformer_3d.py +4 -4
  58. diffusers/models/transformers/pixart_transformer_2d.py +1 -1
  59. diffusers/models/transformers/sana_transformer.py +488 -0
  60. diffusers/models/transformers/stable_audio_transformer.py +1 -1
  61. diffusers/models/transformers/transformer_2d.py +1 -1
  62. diffusers/models/transformers/transformer_allegro.py +422 -0
  63. diffusers/models/transformers/transformer_cogview3plus.py +1 -1
  64. diffusers/models/transformers/transformer_flux.py +30 -9
  65. diffusers/models/transformers/transformer_hunyuan_video.py +789 -0
  66. diffusers/models/transformers/transformer_ltx.py +469 -0
  67. diffusers/models/transformers/transformer_mochi.py +499 -0
  68. diffusers/models/transformers/transformer_sd3.py +105 -17
  69. diffusers/models/transformers/transformer_temporal.py +1 -1
  70. diffusers/models/unets/unet_1d_blocks.py +1 -1
  71. diffusers/models/unets/unet_2d.py +8 -1
  72. diffusers/models/unets/unet_2d_blocks.py +88 -21
  73. diffusers/models/unets/unet_2d_condition.py +1 -1
  74. diffusers/models/unets/unet_3d_blocks.py +9 -7
  75. diffusers/models/unets/unet_motion_model.py +5 -5
  76. diffusers/models/unets/unet_spatio_temporal_condition.py +23 -0
  77. diffusers/models/unets/unet_stable_cascade.py +2 -2
  78. diffusers/models/unets/uvit_2d.py +1 -1
  79. diffusers/models/upsampling.py +8 -0
  80. diffusers/pipelines/__init__.py +34 -0
  81. diffusers/pipelines/allegro/__init__.py +48 -0
  82. diffusers/pipelines/allegro/pipeline_allegro.py +938 -0
  83. diffusers/pipelines/allegro/pipeline_output.py +23 -0
  84. diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +8 -2
  85. diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +1 -1
  86. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +0 -6
  87. diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +8 -8
  88. diffusers/pipelines/audioldm2/modeling_audioldm2.py +3 -3
  89. diffusers/pipelines/aura_flow/pipeline_aura_flow.py +1 -8
  90. diffusers/pipelines/auto_pipeline.py +53 -6
  91. diffusers/pipelines/blip_diffusion/modeling_blip2.py +1 -1
  92. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +50 -22
  93. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +51 -20
  94. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +69 -21
  95. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +47 -21
  96. diffusers/pipelines/cogview3/pipeline_cogview3plus.py +1 -1
  97. diffusers/pipelines/controlnet/__init__.py +86 -80
  98. diffusers/pipelines/controlnet/multicontrolnet.py +7 -178
  99. diffusers/pipelines/controlnet/pipeline_controlnet.py +11 -2
  100. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +1 -2
  101. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +1 -2
  102. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +1 -2
  103. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +3 -3
  104. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +1 -3
  105. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +1790 -0
  106. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +1501 -0
  107. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +1627 -0
  108. diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +5 -1
  109. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +53 -19
  110. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +7 -7
  111. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +31 -8
  112. diffusers/pipelines/flux/__init__.py +13 -1
  113. diffusers/pipelines/flux/modeling_flux.py +47 -0
  114. diffusers/pipelines/flux/pipeline_flux.py +204 -29
  115. diffusers/pipelines/flux/pipeline_flux_control.py +889 -0
  116. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +945 -0
  117. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +1141 -0
  118. diffusers/pipelines/flux/pipeline_flux_controlnet.py +49 -27
  119. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +40 -30
  120. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +78 -56
  121. diffusers/pipelines/flux/pipeline_flux_fill.py +969 -0
  122. diffusers/pipelines/flux/pipeline_flux_img2img.py +33 -27
  123. diffusers/pipelines/flux/pipeline_flux_inpaint.py +36 -29
  124. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +492 -0
  125. diffusers/pipelines/flux/pipeline_output.py +16 -0
  126. diffusers/pipelines/hunyuan_video/__init__.py +48 -0
  127. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +687 -0
  128. diffusers/pipelines/hunyuan_video/pipeline_output.py +20 -0
  129. diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +5 -1
  130. diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +9 -9
  131. diffusers/pipelines/kolors/text_encoder.py +2 -2
  132. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -1
  133. diffusers/pipelines/ltx/__init__.py +50 -0
  134. diffusers/pipelines/ltx/pipeline_ltx.py +789 -0
  135. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +885 -0
  136. diffusers/pipelines/ltx/pipeline_output.py +20 -0
  137. diffusers/pipelines/lumina/pipeline_lumina.py +1 -8
  138. diffusers/pipelines/mochi/__init__.py +48 -0
  139. diffusers/pipelines/mochi/pipeline_mochi.py +748 -0
  140. diffusers/pipelines/mochi/pipeline_output.py +20 -0
  141. diffusers/pipelines/pag/__init__.py +7 -0
  142. diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +1 -2
  143. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +1 -2
  144. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +1 -3
  145. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +1 -3
  146. diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +5 -1
  147. diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +6 -13
  148. diffusers/pipelines/pag/pipeline_pag_sana.py +886 -0
  149. diffusers/pipelines/pag/pipeline_pag_sd_3.py +6 -6
  150. diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +1058 -0
  151. diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +3 -0
  152. diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +1356 -0
  153. diffusers/pipelines/pipeline_flax_utils.py +1 -1
  154. diffusers/pipelines/pipeline_loading_utils.py +25 -4
  155. diffusers/pipelines/pipeline_utils.py +35 -6
  156. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +6 -13
  157. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +6 -13
  158. diffusers/pipelines/sana/__init__.py +47 -0
  159. diffusers/pipelines/sana/pipeline_output.py +21 -0
  160. diffusers/pipelines/sana/pipeline_sana.py +884 -0
  161. diffusers/pipelines/stable_audio/pipeline_stable_audio.py +12 -1
  162. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +18 -3
  163. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +216 -20
  164. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +62 -9
  165. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +57 -8
  166. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +11 -1
  167. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +0 -8
  168. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +0 -8
  169. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +0 -8
  170. diffusers/pipelines/unidiffuser/modeling_uvit.py +2 -2
  171. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +1 -1
  172. diffusers/quantizers/auto.py +14 -1
  173. diffusers/quantizers/bitsandbytes/bnb_quantizer.py +4 -1
  174. diffusers/quantizers/gguf/__init__.py +1 -0
  175. diffusers/quantizers/gguf/gguf_quantizer.py +159 -0
  176. diffusers/quantizers/gguf/utils.py +456 -0
  177. diffusers/quantizers/quantization_config.py +280 -2
  178. diffusers/quantizers/torchao/__init__.py +15 -0
  179. diffusers/quantizers/torchao/torchao_quantizer.py +285 -0
  180. diffusers/schedulers/scheduling_ddpm.py +2 -6
  181. diffusers/schedulers/scheduling_ddpm_parallel.py +2 -6
  182. diffusers/schedulers/scheduling_deis_multistep.py +28 -9
  183. diffusers/schedulers/scheduling_dpmsolver_multistep.py +35 -9
  184. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +35 -8
  185. diffusers/schedulers/scheduling_dpmsolver_sde.py +4 -4
  186. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +48 -10
  187. diffusers/schedulers/scheduling_euler_discrete.py +4 -4
  188. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +153 -6
  189. diffusers/schedulers/scheduling_heun_discrete.py +4 -4
  190. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +4 -4
  191. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +4 -4
  192. diffusers/schedulers/scheduling_lcm.py +2 -6
  193. diffusers/schedulers/scheduling_lms_discrete.py +4 -4
  194. diffusers/schedulers/scheduling_repaint.py +1 -1
  195. diffusers/schedulers/scheduling_sasolver.py +28 -9
  196. diffusers/schedulers/scheduling_tcd.py +2 -6
  197. diffusers/schedulers/scheduling_unipc_multistep.py +53 -8
  198. diffusers/training_utils.py +16 -2
  199. diffusers/utils/__init__.py +5 -0
  200. diffusers/utils/constants.py +1 -0
  201. diffusers/utils/dummy_pt_objects.py +180 -0
  202. diffusers/utils/dummy_torch_and_transformers_objects.py +270 -0
  203. diffusers/utils/dynamic_modules_utils.py +3 -3
  204. diffusers/utils/hub_utils.py +31 -39
  205. diffusers/utils/import_utils.py +67 -0
  206. diffusers/utils/peft_utils.py +3 -0
  207. diffusers/utils/testing_utils.py +56 -1
  208. diffusers/utils/torch_utils.py +3 -0
  209. {diffusers-0.31.0.dist-info → diffusers-0.32.0.dist-info}/METADATA +69 -69
  210. {diffusers-0.31.0.dist-info → diffusers-0.32.0.dist-info}/RECORD +214 -162
  211. {diffusers-0.31.0.dist-info → diffusers-0.32.0.dist-info}/WHEEL +1 -1
  212. {diffusers-0.31.0.dist-info → diffusers-0.32.0.dist-info}/LICENSE +0 -0
  213. {diffusers-0.31.0.dist-info → diffusers-0.32.0.dist-info}/entry_points.txt +0 -0
  214. {diffusers-0.31.0.dist-info → diffusers-0.32.0.dist-info}/top_level.txt +0 -0
@@ -149,6 +149,8 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
149
149
  use_karras_sigmas: Optional[bool] = False,
150
150
  use_exponential_sigmas: Optional[bool] = False,
151
151
  use_beta_sigmas: Optional[bool] = False,
152
+ use_flow_sigmas: Optional[bool] = False,
153
+ flow_shift: Optional[float] = 1.0,
152
154
  timestep_spacing: str = "linspace",
153
155
  steps_offset: int = 0,
154
156
  ):
@@ -266,18 +268,28 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
266
268
  )
267
269
 
268
270
  sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
271
+ log_sigmas = np.log(sigmas)
269
272
  if self.config.use_karras_sigmas:
270
- log_sigmas = np.log(sigmas)
271
273
  sigmas = np.flip(sigmas).copy()
272
274
  sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
273
275
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
274
276
  sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
275
277
  elif self.config.use_exponential_sigmas:
276
- sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
278
+ sigmas = np.flip(sigmas).copy()
279
+ sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
277
280
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
281
+ sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
278
282
  elif self.config.use_beta_sigmas:
279
- sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
283
+ sigmas = np.flip(sigmas).copy()
284
+ sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
280
285
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
286
+ sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
287
+ elif self.config.use_flow_sigmas:
288
+ alphas = np.linspace(1, 1 / self.config.num_train_timesteps, num_inference_steps + 1)
289
+ sigmas = 1.0 - alphas
290
+ sigmas = np.flip(self.config.flow_shift * sigmas / (1 + (self.config.flow_shift - 1) * sigmas))[:-1].copy()
291
+ timesteps = (sigmas * self.config.num_train_timesteps).copy()
292
+ sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
281
293
  else:
282
294
  sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
283
295
  sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5
@@ -358,8 +370,12 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
358
370
 
359
371
  # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t
360
372
  def _sigma_to_alpha_sigma_t(self, sigma):
361
- alpha_t = 1 / ((sigma**2 + 1) ** 0.5)
362
- sigma_t = sigma * alpha_t
373
+ if self.config.use_flow_sigmas:
374
+ alpha_t = 1 - sigma
375
+ sigma_t = sigma
376
+ else:
377
+ alpha_t = 1 / ((sigma**2 + 1) ** 0.5)
378
+ sigma_t = sigma * alpha_t
363
379
 
364
380
  return alpha_t, sigma_t
365
381
 
@@ -408,7 +424,7 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
408
424
  sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
409
425
  sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
410
426
 
411
- sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
427
+ sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
412
428
  return sigmas
413
429
 
414
430
  # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -432,7 +448,7 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
432
448
  sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
433
449
  sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
434
450
 
435
- sigmas = torch.Tensor(
451
+ sigmas = np.array(
436
452
  [
437
453
  sigma_min + (ppf * (sigma_max - sigma_min))
438
454
  for ppf in [
@@ -486,10 +502,13 @@ class DEISMultistepScheduler(SchedulerMixin, ConfigMixin):
486
502
  x0_pred = model_output
487
503
  elif self.config.prediction_type == "v_prediction":
488
504
  x0_pred = alpha_t * sample - sigma_t * model_output
505
+ elif self.config.prediction_type == "flow_prediction":
506
+ sigma_t = self.sigmas[self.step_index]
507
+ x0_pred = sample - sigma_t * model_output
489
508
  else:
490
509
  raise ValueError(
491
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
492
- " `v_prediction` for the DEISMultistepScheduler."
510
+ f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, "
511
+ "`v_prediction`, or `flow_prediction` for the DEISMultistepScheduler."
493
512
  )
494
513
 
495
514
  if self.config.thresholding:
@@ -218,6 +218,8 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
218
218
  use_exponential_sigmas: Optional[bool] = False,
219
219
  use_beta_sigmas: Optional[bool] = False,
220
220
  use_lu_lambdas: Optional[bool] = False,
221
+ use_flow_sigmas: Optional[bool] = False,
222
+ flow_shift: Optional[float] = 1.0,
221
223
  final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min"
222
224
  lambda_min_clipped: float = -float("inf"),
223
225
  variance_type: Optional[str] = None,
@@ -400,11 +402,18 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
400
402
  sigmas = np.exp(lambdas)
401
403
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
402
404
  elif self.config.use_exponential_sigmas:
403
- sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
405
+ sigmas = np.flip(sigmas).copy()
406
+ sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
404
407
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
405
408
  elif self.config.use_beta_sigmas:
406
- sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
409
+ sigmas = np.flip(sigmas).copy()
410
+ sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
407
411
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
412
+ elif self.config.use_flow_sigmas:
413
+ alphas = np.linspace(1, 1 / self.config.num_train_timesteps, num_inference_steps + 1)
414
+ sigmas = 1.0 - alphas
415
+ sigmas = np.flip(self.config.flow_shift * sigmas / (1 + (self.config.flow_shift - 1) * sigmas))[:-1].copy()
416
+ timesteps = (sigmas * self.config.num_train_timesteps).copy()
408
417
  else:
409
418
  sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
410
419
 
@@ -493,8 +502,12 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
493
502
  return t
494
503
 
495
504
  def _sigma_to_alpha_sigma_t(self, sigma):
496
- alpha_t = 1 / ((sigma**2 + 1) ** 0.5)
497
- sigma_t = sigma * alpha_t
505
+ if self.config.use_flow_sigmas:
506
+ alpha_t = 1 - sigma
507
+ sigma_t = sigma
508
+ else:
509
+ alpha_t = 1 / ((sigma**2 + 1) ** 0.5)
510
+ sigma_t = sigma * alpha_t
498
511
 
499
512
  return alpha_t, sigma_t
500
513
 
@@ -556,7 +569,7 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
556
569
  sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
557
570
  sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
558
571
 
559
- sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
572
+ sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
560
573
  return sigmas
561
574
 
562
575
  # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -580,7 +593,7 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
580
593
  sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
581
594
  sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
582
595
 
583
- sigmas = torch.Tensor(
596
+ sigmas = np.array(
584
597
  [
585
598
  sigma_min + (ppf * (sigma_max - sigma_min))
586
599
  for ppf in [
@@ -648,10 +661,13 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
648
661
  sigma = self.sigmas[self.step_index]
649
662
  alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma)
650
663
  x0_pred = alpha_t * sample - sigma_t * model_output
664
+ elif self.config.prediction_type == "flow_prediction":
665
+ sigma_t = self.sigmas[self.step_index]
666
+ x0_pred = sample - sigma_t * model_output
651
667
  else:
652
668
  raise ValueError(
653
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
654
- " `v_prediction` for the DPMSolverMultistepScheduler."
669
+ f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, "
670
+ "`v_prediction`, or `flow_prediction` for the DPMSolverMultistepScheduler."
655
671
  )
656
672
 
657
673
  if self.config.thresholding:
@@ -887,6 +903,7 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
887
903
  model_output_list: List[torch.Tensor],
888
904
  *args,
889
905
  sample: torch.Tensor = None,
906
+ noise: Optional[torch.Tensor] = None,
890
907
  **kwargs,
891
908
  ) -> torch.Tensor:
892
909
  """
@@ -965,6 +982,15 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
965
982
  - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1
966
983
  - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2
967
984
  )
985
+ elif self.config.algorithm_type == "sde-dpmsolver++":
986
+ assert noise is not None
987
+ x_t = (
988
+ (sigma_t / sigma_s0 * torch.exp(-h)) * sample
989
+ + (alpha_t * (1.0 - torch.exp(-2.0 * h))) * D0
990
+ + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1
991
+ + (alpha_t * ((1.0 - torch.exp(-2.0 * h) - 2.0 * h) / (2.0 * h) ** 2 - 0.5)) * D2
992
+ + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise
993
+ )
968
994
  return x_t
969
995
 
970
996
  def index_for_timestep(self, timestep, schedule_timesteps=None):
@@ -1071,7 +1097,7 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
1071
1097
  elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second:
1072
1098
  prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise)
1073
1099
  else:
1074
- prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample)
1100
+ prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample, noise=noise)
1075
1101
 
1076
1102
  if self.lower_order_nums < self.config.solver_order:
1077
1103
  self.lower_order_nums += 1
@@ -169,6 +169,8 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin):
169
169
  use_karras_sigmas: Optional[bool] = False,
170
170
  use_exponential_sigmas: Optional[bool] = False,
171
171
  use_beta_sigmas: Optional[bool] = False,
172
+ use_flow_sigmas: Optional[bool] = False,
173
+ flow_shift: Optional[float] = 1.0,
172
174
  lambda_min_clipped: float = -float("inf"),
173
175
  variance_type: Optional[str] = None,
174
176
  timestep_spacing: str = "linspace",
@@ -287,11 +289,19 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin):
287
289
  timesteps = timesteps.copy().astype(np.int64)
288
290
  sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
289
291
  elif self.config.use_exponential_sigmas:
290
- sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
292
+ sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
291
293
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
294
+ sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
292
295
  elif self.config.use_beta_sigmas:
293
- sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
296
+ sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
294
297
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
298
+ sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
299
+ elif self.config.use_flow_sigmas:
300
+ alphas = np.linspace(1, 1 / self.config.num_train_timesteps, num_inference_steps + 1)
301
+ sigmas = 1.0 - alphas
302
+ sigmas = np.flip(self.config.flow_shift * sigmas / (1 + (self.config.flow_shift - 1) * sigmas))[:-1].copy()
303
+ timesteps = (sigmas * self.config.num_train_timesteps).copy()
304
+ sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
295
305
  else:
296
306
  sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
297
307
  sigma_max = (
@@ -379,8 +389,12 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin):
379
389
 
380
390
  # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t
381
391
  def _sigma_to_alpha_sigma_t(self, sigma):
382
- alpha_t = 1 / ((sigma**2 + 1) ** 0.5)
383
- sigma_t = sigma * alpha_t
392
+ if self.config.use_flow_sigmas:
393
+ alpha_t = 1 - sigma
394
+ sigma_t = sigma
395
+ else:
396
+ alpha_t = 1 / ((sigma**2 + 1) ** 0.5)
397
+ sigma_t = sigma * alpha_t
384
398
 
385
399
  return alpha_t, sigma_t
386
400
 
@@ -429,7 +443,7 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin):
429
443
  sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
430
444
  sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
431
445
 
432
- sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
446
+ sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
433
447
  return sigmas
434
448
 
435
449
  # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -453,7 +467,7 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin):
453
467
  sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
454
468
  sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
455
469
 
456
- sigmas = torch.Tensor(
470
+ sigmas = np.array(
457
471
  [
458
472
  sigma_min + (ppf * (sigma_max - sigma_min))
459
473
  for ppf in [
@@ -522,10 +536,13 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin):
522
536
  sigma = self.sigmas[self.step_index]
523
537
  alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma)
524
538
  x0_pred = alpha_t * sample - sigma_t * model_output
539
+ elif self.config.prediction_type == "flow_prediction":
540
+ sigma_t = self.sigmas[self.step_index]
541
+ x0_pred = sample - sigma_t * model_output
525
542
  else:
526
543
  raise ValueError(
527
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
528
- " `v_prediction` for the DPMSolverMultistepScheduler."
544
+ f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, "
545
+ "`v_prediction`, or `flow_prediction` for the DPMSolverMultistepScheduler."
529
546
  )
530
547
 
531
548
  if self.config.thresholding:
@@ -764,6 +781,7 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin):
764
781
  model_output_list: List[torch.Tensor],
765
782
  *args,
766
783
  sample: torch.Tensor = None,
784
+ noise: Optional[torch.Tensor] = None,
767
785
  **kwargs,
768
786
  ) -> torch.Tensor:
769
787
  """
@@ -842,6 +860,15 @@ class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin):
842
860
  - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1
843
861
  - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2
844
862
  )
863
+ elif self.config.algorithm_type == "sde-dpmsolver++":
864
+ assert noise is not None
865
+ x_t = (
866
+ (sigma_t / sigma_s0 * torch.exp(-h)) * sample
867
+ + (alpha_t * (1.0 - torch.exp(-2.0 * h))) * D0
868
+ + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1
869
+ + (alpha_t * ((1.0 - torch.exp(-2.0 * h) - 2.0 * h) / (2.0 * h) ** 2 - 0.5)) * D2
870
+ + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise
871
+ )
845
872
  return x_t
846
873
 
847
874
  def _init_step_index(self, timestep):
@@ -380,10 +380,10 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin):
380
380
  sigmas = self._convert_to_karras(in_sigmas=sigmas)
381
381
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
382
382
  elif self.config.use_exponential_sigmas:
383
- sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
383
+ sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
384
384
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
385
385
  elif self.config.use_beta_sigmas:
386
- sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
386
+ sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
387
387
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
388
388
 
389
389
  second_order_timesteps = self._second_order_timesteps(sigmas, log_sigmas)
@@ -484,7 +484,7 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin):
484
484
  sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
485
485
  sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
486
486
 
487
- sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
487
+ sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
488
488
  return sigmas
489
489
 
490
490
  # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -508,7 +508,7 @@ class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin):
508
508
  sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
509
509
  sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
510
510
 
511
- sigmas = torch.Tensor(
511
+ sigmas = np.array(
512
512
  [
513
513
  sigma_min + (ppf * (sigma_max - sigma_min))
514
514
  for ppf in [
@@ -164,6 +164,8 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
164
164
  use_karras_sigmas: Optional[bool] = False,
165
165
  use_exponential_sigmas: Optional[bool] = False,
166
166
  use_beta_sigmas: Optional[bool] = False,
167
+ use_flow_sigmas: Optional[bool] = False,
168
+ flow_shift: Optional[float] = 1.0,
167
169
  final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min"
168
170
  lambda_min_clipped: float = -float("inf"),
169
171
  variance_type: Optional[str] = None,
@@ -264,6 +266,10 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
264
266
  orders = [1, 2] * (steps // 2)
265
267
  elif order == 1:
266
268
  orders = [1] * steps
269
+
270
+ if self.config.final_sigmas_type == "zero":
271
+ orders[-1] = 1
272
+
267
273
  return orders
268
274
 
269
275
  @property
@@ -339,17 +345,24 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
339
345
  )
340
346
 
341
347
  sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
348
+ log_sigmas = np.log(sigmas)
342
349
  if self.config.use_karras_sigmas:
343
- log_sigmas = np.log(sigmas)
344
350
  sigmas = np.flip(sigmas).copy()
345
351
  sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
346
352
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
347
353
  elif self.config.use_exponential_sigmas:
348
- sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
354
+ sigmas = np.flip(sigmas).copy()
355
+ sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
349
356
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
350
357
  elif self.config.use_beta_sigmas:
351
- sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
358
+ sigmas = np.flip(sigmas).copy()
359
+ sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
352
360
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
361
+ elif self.config.use_flow_sigmas:
362
+ alphas = np.linspace(1, 1 / self.config.num_train_timesteps, num_inference_steps + 1)
363
+ sigmas = 1.0 - alphas
364
+ sigmas = np.flip(self.config.flow_shift * sigmas / (1 + (self.config.flow_shift - 1) * sigmas))[:-1].copy()
365
+ timesteps = (sigmas * self.config.num_train_timesteps).copy()
353
366
  else:
354
367
  sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
355
368
 
@@ -448,8 +461,12 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
448
461
 
449
462
  # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t
450
463
  def _sigma_to_alpha_sigma_t(self, sigma):
451
- alpha_t = 1 / ((sigma**2 + 1) ** 0.5)
452
- sigma_t = sigma * alpha_t
464
+ if self.config.use_flow_sigmas:
465
+ alpha_t = 1 - sigma
466
+ sigma_t = sigma
467
+ else:
468
+ alpha_t = 1 / ((sigma**2 + 1) ** 0.5)
469
+ sigma_t = sigma * alpha_t
453
470
 
454
471
  return alpha_t, sigma_t
455
472
 
@@ -498,7 +515,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
498
515
  sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
499
516
  sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
500
517
 
501
- sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
518
+ sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
502
519
  return sigmas
503
520
 
504
521
  # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -522,7 +539,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
522
539
  sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
523
540
  sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
524
541
 
525
- sigmas = torch.Tensor(
542
+ sigmas = np.array(
526
543
  [
527
544
  sigma_min + (ppf * (sigma_max - sigma_min))
528
545
  for ppf in [
@@ -589,10 +606,13 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
589
606
  sigma = self.sigmas[self.step_index]
590
607
  alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma)
591
608
  x0_pred = alpha_t * sample - sigma_t * model_output
609
+ elif self.config.prediction_type == "flow_prediction":
610
+ sigma_t = self.sigmas[self.step_index]
611
+ x0_pred = sample - sigma_t * model_output
592
612
  else:
593
613
  raise ValueError(
594
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
595
- " `v_prediction` for the DPMSolverSinglestepScheduler."
614
+ f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, "
615
+ "`v_prediction`, or `flow_prediction` for the DPMSolverSinglestepScheduler."
596
616
  )
597
617
 
598
618
  if self.config.thresholding:
@@ -810,6 +830,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
810
830
  model_output_list: List[torch.Tensor],
811
831
  *args,
812
832
  sample: torch.Tensor = None,
833
+ noise: Optional[torch.Tensor] = None,
813
834
  **kwargs,
814
835
  ) -> torch.Tensor:
815
836
  """
@@ -907,6 +928,23 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
907
928
  - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1
908
929
  - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2
909
930
  )
931
+ elif self.config.algorithm_type == "sde-dpmsolver++":
932
+ assert noise is not None
933
+ if self.config.solver_type == "midpoint":
934
+ x_t = (
935
+ (sigma_t / sigma_s2 * torch.exp(-h)) * sample
936
+ + (alpha_t * (1.0 - torch.exp(-2.0 * h))) * D0
937
+ + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1_1
938
+ + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise
939
+ )
940
+ elif self.config.solver_type == "heun":
941
+ x_t = (
942
+ (sigma_t / sigma_s2 * torch.exp(-h)) * sample
943
+ + (alpha_t * (1.0 - torch.exp(-2.0 * h))) * D0
944
+ + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1
945
+ + (alpha_t * ((1.0 - torch.exp(-2.0 * h) + (-2.0 * h)) / (-2.0 * h) ** 2 - 0.5)) * D2
946
+ + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise
947
+ )
910
948
  return x_t
911
949
 
912
950
  def singlestep_dpm_solver_update(
@@ -968,7 +1006,7 @@ class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
968
1006
  elif order == 2:
969
1007
  return self.singlestep_dpm_solver_second_order_update(model_output_list, sample=sample, noise=noise)
970
1008
  elif order == 3:
971
- return self.singlestep_dpm_solver_third_order_update(model_output_list, sample=sample)
1009
+ return self.singlestep_dpm_solver_third_order_update(model_output_list, sample=sample, noise=noise)
972
1010
  else:
973
1011
  raise ValueError(f"Order must be 1, 2, 3, got {order}")
974
1012
 
@@ -419,11 +419,11 @@ class EulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
419
419
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
420
420
 
421
421
  elif self.config.use_exponential_sigmas:
422
- sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
422
+ sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
423
423
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
424
424
 
425
425
  elif self.config.use_beta_sigmas:
426
- sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
426
+ sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
427
427
  timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
428
428
 
429
429
  if self.config.final_sigmas_type == "sigma_min":
@@ -517,7 +517,7 @@ class EulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
517
517
  sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
518
518
  sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
519
519
 
520
- sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
520
+ sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
521
521
  return sigmas
522
522
 
523
523
  def _convert_to_beta(
@@ -540,7 +540,7 @@ class EulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
540
540
  sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
541
541
  sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
542
542
 
543
- sigmas = torch.Tensor(
543
+ sigmas = np.array(
544
544
  [
545
545
  sigma_min + (ppf * (sigma_max - sigma_min))
546
546
  for ppf in [