diffusers 0.33.1__py3-none-any.whl → 0.35.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (551) hide show
  1. diffusers/__init__.py +145 -1
  2. diffusers/callbacks.py +35 -0
  3. diffusers/commands/__init__.py +1 -1
  4. diffusers/commands/custom_blocks.py +134 -0
  5. diffusers/commands/diffusers_cli.py +3 -1
  6. diffusers/commands/env.py +1 -1
  7. diffusers/commands/fp16_safetensors.py +2 -2
  8. diffusers/configuration_utils.py +11 -2
  9. diffusers/dependency_versions_check.py +1 -1
  10. diffusers/dependency_versions_table.py +3 -3
  11. diffusers/experimental/rl/value_guided_sampling.py +1 -1
  12. diffusers/guiders/__init__.py +41 -0
  13. diffusers/guiders/adaptive_projected_guidance.py +188 -0
  14. diffusers/guiders/auto_guidance.py +190 -0
  15. diffusers/guiders/classifier_free_guidance.py +141 -0
  16. diffusers/guiders/classifier_free_zero_star_guidance.py +152 -0
  17. diffusers/guiders/frequency_decoupled_guidance.py +327 -0
  18. diffusers/guiders/guider_utils.py +309 -0
  19. diffusers/guiders/perturbed_attention_guidance.py +271 -0
  20. diffusers/guiders/skip_layer_guidance.py +262 -0
  21. diffusers/guiders/smoothed_energy_guidance.py +251 -0
  22. diffusers/guiders/tangential_classifier_free_guidance.py +143 -0
  23. diffusers/hooks/__init__.py +17 -0
  24. diffusers/hooks/_common.py +56 -0
  25. diffusers/hooks/_helpers.py +293 -0
  26. diffusers/hooks/faster_cache.py +9 -8
  27. diffusers/hooks/first_block_cache.py +259 -0
  28. diffusers/hooks/group_offloading.py +332 -227
  29. diffusers/hooks/hooks.py +58 -3
  30. diffusers/hooks/layer_skip.py +263 -0
  31. diffusers/hooks/layerwise_casting.py +5 -10
  32. diffusers/hooks/pyramid_attention_broadcast.py +15 -12
  33. diffusers/hooks/smoothed_energy_guidance_utils.py +167 -0
  34. diffusers/hooks/utils.py +43 -0
  35. diffusers/image_processor.py +7 -2
  36. diffusers/loaders/__init__.py +10 -0
  37. diffusers/loaders/ip_adapter.py +260 -18
  38. diffusers/loaders/lora_base.py +261 -127
  39. diffusers/loaders/lora_conversion_utils.py +657 -35
  40. diffusers/loaders/lora_pipeline.py +2778 -1246
  41. diffusers/loaders/peft.py +78 -112
  42. diffusers/loaders/single_file.py +2 -2
  43. diffusers/loaders/single_file_model.py +64 -15
  44. diffusers/loaders/single_file_utils.py +395 -7
  45. diffusers/loaders/textual_inversion.py +3 -2
  46. diffusers/loaders/transformer_flux.py +10 -11
  47. diffusers/loaders/transformer_sd3.py +8 -3
  48. diffusers/loaders/unet.py +24 -21
  49. diffusers/loaders/unet_loader_utils.py +6 -3
  50. diffusers/loaders/utils.py +1 -1
  51. diffusers/models/__init__.py +23 -1
  52. diffusers/models/activations.py +5 -5
  53. diffusers/models/adapter.py +2 -3
  54. diffusers/models/attention.py +488 -7
  55. diffusers/models/attention_dispatch.py +1218 -0
  56. diffusers/models/attention_flax.py +10 -10
  57. diffusers/models/attention_processor.py +113 -667
  58. diffusers/models/auto_model.py +49 -12
  59. diffusers/models/autoencoders/__init__.py +2 -0
  60. diffusers/models/autoencoders/autoencoder_asym_kl.py +4 -4
  61. diffusers/models/autoencoders/autoencoder_dc.py +17 -4
  62. diffusers/models/autoencoders/autoencoder_kl.py +5 -5
  63. diffusers/models/autoencoders/autoencoder_kl_allegro.py +4 -4
  64. diffusers/models/autoencoders/autoencoder_kl_cogvideox.py +6 -6
  65. diffusers/models/autoencoders/autoencoder_kl_cosmos.py +1110 -0
  66. diffusers/models/autoencoders/autoencoder_kl_hunyuan_video.py +2 -2
  67. diffusers/models/autoencoders/autoencoder_kl_ltx.py +3 -3
  68. diffusers/models/autoencoders/autoencoder_kl_magvit.py +4 -4
  69. diffusers/models/autoencoders/autoencoder_kl_mochi.py +3 -3
  70. diffusers/models/autoencoders/autoencoder_kl_qwenimage.py +1070 -0
  71. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +4 -4
  72. diffusers/models/autoencoders/autoencoder_kl_wan.py +626 -62
  73. diffusers/models/autoencoders/autoencoder_oobleck.py +1 -1
  74. diffusers/models/autoencoders/autoencoder_tiny.py +3 -3
  75. diffusers/models/autoencoders/consistency_decoder_vae.py +1 -1
  76. diffusers/models/autoencoders/vae.py +13 -2
  77. diffusers/models/autoencoders/vq_model.py +2 -2
  78. diffusers/models/cache_utils.py +32 -10
  79. diffusers/models/controlnet.py +1 -1
  80. diffusers/models/controlnet_flux.py +1 -1
  81. diffusers/models/controlnet_sd3.py +1 -1
  82. diffusers/models/controlnet_sparsectrl.py +1 -1
  83. diffusers/models/controlnets/__init__.py +1 -0
  84. diffusers/models/controlnets/controlnet.py +3 -3
  85. diffusers/models/controlnets/controlnet_flax.py +1 -1
  86. diffusers/models/controlnets/controlnet_flux.py +21 -20
  87. diffusers/models/controlnets/controlnet_hunyuan.py +2 -2
  88. diffusers/models/controlnets/controlnet_sana.py +290 -0
  89. diffusers/models/controlnets/controlnet_sd3.py +1 -1
  90. diffusers/models/controlnets/controlnet_sparsectrl.py +2 -2
  91. diffusers/models/controlnets/controlnet_union.py +5 -5
  92. diffusers/models/controlnets/controlnet_xs.py +7 -7
  93. diffusers/models/controlnets/multicontrolnet.py +4 -5
  94. diffusers/models/controlnets/multicontrolnet_union.py +5 -6
  95. diffusers/models/downsampling.py +2 -2
  96. diffusers/models/embeddings.py +36 -46
  97. diffusers/models/embeddings_flax.py +2 -2
  98. diffusers/models/lora.py +3 -3
  99. diffusers/models/model_loading_utils.py +233 -1
  100. diffusers/models/modeling_flax_utils.py +1 -2
  101. diffusers/models/modeling_utils.py +203 -108
  102. diffusers/models/normalization.py +4 -4
  103. diffusers/models/resnet.py +2 -2
  104. diffusers/models/resnet_flax.py +1 -1
  105. diffusers/models/transformers/__init__.py +7 -0
  106. diffusers/models/transformers/auraflow_transformer_2d.py +70 -24
  107. diffusers/models/transformers/cogvideox_transformer_3d.py +1 -1
  108. diffusers/models/transformers/consisid_transformer_3d.py +1 -1
  109. diffusers/models/transformers/dit_transformer_2d.py +2 -2
  110. diffusers/models/transformers/dual_transformer_2d.py +1 -1
  111. diffusers/models/transformers/hunyuan_transformer_2d.py +2 -2
  112. diffusers/models/transformers/latte_transformer_3d.py +4 -5
  113. diffusers/models/transformers/lumina_nextdit2d.py +2 -2
  114. diffusers/models/transformers/pixart_transformer_2d.py +3 -3
  115. diffusers/models/transformers/prior_transformer.py +1 -1
  116. diffusers/models/transformers/sana_transformer.py +8 -3
  117. diffusers/models/transformers/stable_audio_transformer.py +5 -9
  118. diffusers/models/transformers/t5_film_transformer.py +3 -3
  119. diffusers/models/transformers/transformer_2d.py +1 -1
  120. diffusers/models/transformers/transformer_allegro.py +1 -1
  121. diffusers/models/transformers/transformer_chroma.py +641 -0
  122. diffusers/models/transformers/transformer_cogview3plus.py +5 -10
  123. diffusers/models/transformers/transformer_cogview4.py +353 -27
  124. diffusers/models/transformers/transformer_cosmos.py +586 -0
  125. diffusers/models/transformers/transformer_flux.py +376 -138
  126. diffusers/models/transformers/transformer_hidream_image.py +942 -0
  127. diffusers/models/transformers/transformer_hunyuan_video.py +12 -8
  128. diffusers/models/transformers/transformer_hunyuan_video_framepack.py +416 -0
  129. diffusers/models/transformers/transformer_ltx.py +105 -24
  130. diffusers/models/transformers/transformer_lumina2.py +1 -1
  131. diffusers/models/transformers/transformer_mochi.py +1 -1
  132. diffusers/models/transformers/transformer_omnigen.py +2 -2
  133. diffusers/models/transformers/transformer_qwenimage.py +645 -0
  134. diffusers/models/transformers/transformer_sd3.py +7 -7
  135. diffusers/models/transformers/transformer_skyreels_v2.py +607 -0
  136. diffusers/models/transformers/transformer_temporal.py +1 -1
  137. diffusers/models/transformers/transformer_wan.py +316 -87
  138. diffusers/models/transformers/transformer_wan_vace.py +387 -0
  139. diffusers/models/unets/unet_1d.py +1 -1
  140. diffusers/models/unets/unet_1d_blocks.py +1 -1
  141. diffusers/models/unets/unet_2d.py +1 -1
  142. diffusers/models/unets/unet_2d_blocks.py +1 -1
  143. diffusers/models/unets/unet_2d_blocks_flax.py +8 -7
  144. diffusers/models/unets/unet_2d_condition.py +4 -3
  145. diffusers/models/unets/unet_2d_condition_flax.py +2 -2
  146. diffusers/models/unets/unet_3d_blocks.py +1 -1
  147. diffusers/models/unets/unet_3d_condition.py +3 -3
  148. diffusers/models/unets/unet_i2vgen_xl.py +3 -3
  149. diffusers/models/unets/unet_kandinsky3.py +1 -1
  150. diffusers/models/unets/unet_motion_model.py +2 -2
  151. diffusers/models/unets/unet_stable_cascade.py +1 -1
  152. diffusers/models/upsampling.py +2 -2
  153. diffusers/models/vae_flax.py +2 -2
  154. diffusers/models/vq_model.py +1 -1
  155. diffusers/modular_pipelines/__init__.py +83 -0
  156. diffusers/modular_pipelines/components_manager.py +1068 -0
  157. diffusers/modular_pipelines/flux/__init__.py +66 -0
  158. diffusers/modular_pipelines/flux/before_denoise.py +689 -0
  159. diffusers/modular_pipelines/flux/decoders.py +109 -0
  160. diffusers/modular_pipelines/flux/denoise.py +227 -0
  161. diffusers/modular_pipelines/flux/encoders.py +412 -0
  162. diffusers/modular_pipelines/flux/modular_blocks.py +181 -0
  163. diffusers/modular_pipelines/flux/modular_pipeline.py +59 -0
  164. diffusers/modular_pipelines/modular_pipeline.py +2446 -0
  165. diffusers/modular_pipelines/modular_pipeline_utils.py +672 -0
  166. diffusers/modular_pipelines/node_utils.py +665 -0
  167. diffusers/modular_pipelines/stable_diffusion_xl/__init__.py +77 -0
  168. diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py +1874 -0
  169. diffusers/modular_pipelines/stable_diffusion_xl/decoders.py +208 -0
  170. diffusers/modular_pipelines/stable_diffusion_xl/denoise.py +771 -0
  171. diffusers/modular_pipelines/stable_diffusion_xl/encoders.py +887 -0
  172. diffusers/modular_pipelines/stable_diffusion_xl/modular_blocks.py +380 -0
  173. diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py +365 -0
  174. diffusers/modular_pipelines/wan/__init__.py +66 -0
  175. diffusers/modular_pipelines/wan/before_denoise.py +365 -0
  176. diffusers/modular_pipelines/wan/decoders.py +105 -0
  177. diffusers/modular_pipelines/wan/denoise.py +261 -0
  178. diffusers/modular_pipelines/wan/encoders.py +242 -0
  179. diffusers/modular_pipelines/wan/modular_blocks.py +144 -0
  180. diffusers/modular_pipelines/wan/modular_pipeline.py +90 -0
  181. diffusers/pipelines/__init__.py +68 -6
  182. diffusers/pipelines/allegro/pipeline_allegro.py +11 -11
  183. diffusers/pipelines/amused/pipeline_amused.py +7 -6
  184. diffusers/pipelines/amused/pipeline_amused_img2img.py +6 -5
  185. diffusers/pipelines/amused/pipeline_amused_inpaint.py +6 -5
  186. diffusers/pipelines/animatediff/pipeline_animatediff.py +6 -6
  187. diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py +6 -6
  188. diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +16 -15
  189. diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +6 -6
  190. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +5 -5
  191. diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py +5 -5
  192. diffusers/pipelines/audioldm/pipeline_audioldm.py +8 -7
  193. diffusers/pipelines/audioldm2/modeling_audioldm2.py +1 -1
  194. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +22 -13
  195. diffusers/pipelines/aura_flow/pipeline_aura_flow.py +48 -11
  196. diffusers/pipelines/auto_pipeline.py +23 -20
  197. diffusers/pipelines/blip_diffusion/modeling_blip2.py +1 -1
  198. diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +2 -2
  199. diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +11 -10
  200. diffusers/pipelines/chroma/__init__.py +49 -0
  201. diffusers/pipelines/chroma/pipeline_chroma.py +949 -0
  202. diffusers/pipelines/chroma/pipeline_chroma_img2img.py +1034 -0
  203. diffusers/pipelines/chroma/pipeline_output.py +21 -0
  204. diffusers/pipelines/cogvideo/pipeline_cogvideox.py +17 -16
  205. diffusers/pipelines/cogvideo/pipeline_cogvideox_fun_control.py +17 -16
  206. diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py +18 -17
  207. diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py +17 -16
  208. diffusers/pipelines/cogview3/pipeline_cogview3plus.py +9 -9
  209. diffusers/pipelines/cogview4/pipeline_cogview4.py +23 -22
  210. diffusers/pipelines/cogview4/pipeline_cogview4_control.py +7 -7
  211. diffusers/pipelines/consisid/consisid_utils.py +2 -2
  212. diffusers/pipelines/consisid/pipeline_consisid.py +8 -8
  213. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +1 -1
  214. diffusers/pipelines/controlnet/pipeline_controlnet.py +7 -7
  215. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +11 -10
  216. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +7 -7
  217. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +7 -7
  218. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +14 -14
  219. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +10 -6
  220. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +13 -13
  221. diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py +226 -107
  222. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl.py +12 -8
  223. diffusers/pipelines/controlnet/pipeline_controlnet_union_sd_xl_img2img.py +207 -105
  224. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +1 -1
  225. diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py +8 -8
  226. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py +7 -7
  227. diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py +7 -7
  228. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +12 -10
  229. diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +9 -7
  230. diffusers/pipelines/cosmos/__init__.py +54 -0
  231. diffusers/pipelines/cosmos/pipeline_cosmos2_text2image.py +673 -0
  232. diffusers/pipelines/cosmos/pipeline_cosmos2_video2world.py +792 -0
  233. diffusers/pipelines/cosmos/pipeline_cosmos_text2world.py +664 -0
  234. diffusers/pipelines/cosmos/pipeline_cosmos_video2world.py +826 -0
  235. diffusers/pipelines/cosmos/pipeline_output.py +40 -0
  236. diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +5 -4
  237. diffusers/pipelines/ddim/pipeline_ddim.py +4 -4
  238. diffusers/pipelines/ddpm/pipeline_ddpm.py +1 -1
  239. diffusers/pipelines/deepfloyd_if/pipeline_if.py +10 -10
  240. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +10 -10
  241. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +10 -10
  242. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +10 -10
  243. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +10 -10
  244. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +10 -10
  245. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +8 -8
  246. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +5 -5
  247. diffusers/pipelines/deprecated/audio_diffusion/mel.py +1 -1
  248. diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py +3 -3
  249. diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +1 -1
  250. diffusers/pipelines/deprecated/pndm/pipeline_pndm.py +2 -2
  251. diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +4 -3
  252. diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py +1 -1
  253. diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py +1 -1
  254. diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py +1 -1
  255. diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py +1 -1
  256. diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +1 -1
  257. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +8 -8
  258. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py +9 -9
  259. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +10 -10
  260. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +10 -8
  261. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +5 -5
  262. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +18 -18
  263. diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py +1 -1
  264. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +2 -2
  265. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +6 -6
  266. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +5 -5
  267. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +5 -5
  268. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +5 -5
  269. diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +1 -1
  270. diffusers/pipelines/dit/pipeline_dit.py +4 -2
  271. diffusers/pipelines/easyanimate/pipeline_easyanimate.py +4 -4
  272. diffusers/pipelines/easyanimate/pipeline_easyanimate_control.py +4 -4
  273. diffusers/pipelines/easyanimate/pipeline_easyanimate_inpaint.py +7 -6
  274. diffusers/pipelines/flux/__init__.py +4 -0
  275. diffusers/pipelines/flux/modeling_flux.py +1 -1
  276. diffusers/pipelines/flux/pipeline_flux.py +37 -36
  277. diffusers/pipelines/flux/pipeline_flux_control.py +9 -9
  278. diffusers/pipelines/flux/pipeline_flux_control_img2img.py +7 -7
  279. diffusers/pipelines/flux/pipeline_flux_control_inpaint.py +7 -7
  280. diffusers/pipelines/flux/pipeline_flux_controlnet.py +7 -7
  281. diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py +31 -23
  282. diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py +3 -2
  283. diffusers/pipelines/flux/pipeline_flux_fill.py +7 -7
  284. diffusers/pipelines/flux/pipeline_flux_img2img.py +40 -7
  285. diffusers/pipelines/flux/pipeline_flux_inpaint.py +12 -7
  286. diffusers/pipelines/flux/pipeline_flux_kontext.py +1134 -0
  287. diffusers/pipelines/flux/pipeline_flux_kontext_inpaint.py +1460 -0
  288. diffusers/pipelines/flux/pipeline_flux_prior_redux.py +2 -2
  289. diffusers/pipelines/flux/pipeline_output.py +6 -4
  290. diffusers/pipelines/free_init_utils.py +2 -2
  291. diffusers/pipelines/free_noise_utils.py +3 -3
  292. diffusers/pipelines/hidream_image/__init__.py +47 -0
  293. diffusers/pipelines/hidream_image/pipeline_hidream_image.py +1026 -0
  294. diffusers/pipelines/hidream_image/pipeline_output.py +35 -0
  295. diffusers/pipelines/hunyuan_video/__init__.py +2 -0
  296. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_skyreels_image2video.py +8 -8
  297. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video.py +26 -25
  298. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_framepack.py +1114 -0
  299. diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_image2video.py +71 -15
  300. diffusers/pipelines/hunyuan_video/pipeline_output.py +19 -0
  301. diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py +8 -8
  302. diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +10 -8
  303. diffusers/pipelines/kandinsky/pipeline_kandinsky.py +6 -6
  304. diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +34 -34
  305. diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +19 -26
  306. diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +7 -7
  307. diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +11 -11
  308. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +6 -6
  309. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +35 -35
  310. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +6 -6
  311. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +17 -39
  312. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +17 -45
  313. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +7 -7
  314. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +10 -10
  315. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +10 -10
  316. diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py +7 -7
  317. diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py +17 -38
  318. diffusers/pipelines/kolors/pipeline_kolors.py +10 -10
  319. diffusers/pipelines/kolors/pipeline_kolors_img2img.py +12 -12
  320. diffusers/pipelines/kolors/text_encoder.py +3 -3
  321. diffusers/pipelines/kolors/tokenizer.py +1 -1
  322. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +2 -2
  323. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +2 -2
  324. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -1
  325. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py +3 -3
  326. diffusers/pipelines/latte/pipeline_latte.py +12 -12
  327. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +13 -13
  328. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +17 -16
  329. diffusers/pipelines/ltx/__init__.py +4 -0
  330. diffusers/pipelines/ltx/modeling_latent_upsampler.py +188 -0
  331. diffusers/pipelines/ltx/pipeline_ltx.py +64 -18
  332. diffusers/pipelines/ltx/pipeline_ltx_condition.py +117 -38
  333. diffusers/pipelines/ltx/pipeline_ltx_image2video.py +63 -18
  334. diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py +277 -0
  335. diffusers/pipelines/lumina/pipeline_lumina.py +13 -13
  336. diffusers/pipelines/lumina2/pipeline_lumina2.py +10 -10
  337. diffusers/pipelines/marigold/marigold_image_processing.py +2 -2
  338. diffusers/pipelines/mochi/pipeline_mochi.py +15 -14
  339. diffusers/pipelines/musicldm/pipeline_musicldm.py +16 -13
  340. diffusers/pipelines/omnigen/pipeline_omnigen.py +13 -11
  341. diffusers/pipelines/omnigen/processor_omnigen.py +8 -3
  342. diffusers/pipelines/onnx_utils.py +15 -2
  343. diffusers/pipelines/pag/pag_utils.py +2 -2
  344. diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py +12 -8
  345. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_inpaint.py +7 -7
  346. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py +10 -6
  347. diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py +14 -14
  348. diffusers/pipelines/pag/pipeline_pag_hunyuandit.py +8 -8
  349. diffusers/pipelines/pag/pipeline_pag_kolors.py +10 -10
  350. diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py +11 -11
  351. diffusers/pipelines/pag/pipeline_pag_sana.py +18 -12
  352. diffusers/pipelines/pag/pipeline_pag_sd.py +8 -8
  353. diffusers/pipelines/pag/pipeline_pag_sd_3.py +7 -7
  354. diffusers/pipelines/pag/pipeline_pag_sd_3_img2img.py +7 -7
  355. diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +6 -6
  356. diffusers/pipelines/pag/pipeline_pag_sd_img2img.py +5 -5
  357. diffusers/pipelines/pag/pipeline_pag_sd_inpaint.py +8 -8
  358. diffusers/pipelines/pag/pipeline_pag_sd_xl.py +16 -15
  359. diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py +18 -17
  360. diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py +12 -12
  361. diffusers/pipelines/paint_by_example/image_encoder.py +1 -1
  362. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +8 -7
  363. diffusers/pipelines/pia/pipeline_pia.py +8 -6
  364. diffusers/pipelines/pipeline_flax_utils.py +5 -6
  365. diffusers/pipelines/pipeline_loading_utils.py +113 -15
  366. diffusers/pipelines/pipeline_utils.py +127 -48
  367. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +14 -12
  368. diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py +31 -11
  369. diffusers/pipelines/qwenimage/__init__.py +55 -0
  370. diffusers/pipelines/qwenimage/pipeline_output.py +21 -0
  371. diffusers/pipelines/qwenimage/pipeline_qwenimage.py +726 -0
  372. diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +882 -0
  373. diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py +829 -0
  374. diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py +1015 -0
  375. diffusers/pipelines/sana/__init__.py +4 -0
  376. diffusers/pipelines/sana/pipeline_sana.py +23 -21
  377. diffusers/pipelines/sana/pipeline_sana_controlnet.py +1106 -0
  378. diffusers/pipelines/sana/pipeline_sana_sprint.py +23 -19
  379. diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py +981 -0
  380. diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +7 -6
  381. diffusers/pipelines/shap_e/camera.py +1 -1
  382. diffusers/pipelines/shap_e/pipeline_shap_e.py +1 -1
  383. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +1 -1
  384. diffusers/pipelines/shap_e/renderer.py +3 -3
  385. diffusers/pipelines/skyreels_v2/__init__.py +59 -0
  386. diffusers/pipelines/skyreels_v2/pipeline_output.py +20 -0
  387. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2.py +610 -0
  388. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing.py +978 -0
  389. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_i2v.py +1059 -0
  390. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing_v2v.py +1063 -0
  391. diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_i2v.py +745 -0
  392. diffusers/pipelines/stable_audio/modeling_stable_audio.py +1 -1
  393. diffusers/pipelines/stable_audio/pipeline_stable_audio.py +5 -5
  394. diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +8 -8
  395. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +13 -13
  396. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +9 -9
  397. diffusers/pipelines/stable_diffusion/__init__.py +0 -7
  398. diffusers/pipelines/stable_diffusion/clip_image_project_model.py +1 -1
  399. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +11 -4
  400. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
  401. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py +1 -1
  402. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py +1 -1
  403. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +12 -11
  404. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +10 -10
  405. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +11 -11
  406. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +10 -10
  407. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +10 -9
  408. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +5 -5
  409. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +5 -5
  410. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +5 -5
  411. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +5 -5
  412. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +5 -5
  413. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +4 -4
  414. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +5 -5
  415. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +7 -7
  416. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +5 -5
  417. diffusers/pipelines/stable_diffusion/safety_checker.py +1 -1
  418. diffusers/pipelines/stable_diffusion/safety_checker_flax.py +1 -1
  419. diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py +1 -1
  420. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +13 -12
  421. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +7 -7
  422. diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +7 -7
  423. diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +12 -8
  424. diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +15 -9
  425. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +11 -9
  426. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +11 -9
  427. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +18 -12
  428. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +11 -8
  429. diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +11 -8
  430. diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +15 -12
  431. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +8 -6
  432. diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
  433. diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +15 -11
  434. diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py +1 -1
  435. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +16 -15
  436. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +18 -17
  437. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +12 -12
  438. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +16 -15
  439. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +3 -3
  440. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +12 -12
  441. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +18 -17
  442. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +12 -7
  443. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +12 -7
  444. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +15 -13
  445. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +24 -21
  446. diffusers/pipelines/unclip/pipeline_unclip.py +4 -3
  447. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +4 -3
  448. diffusers/pipelines/unclip/text_proj.py +2 -2
  449. diffusers/pipelines/unidiffuser/modeling_text_decoder.py +2 -2
  450. diffusers/pipelines/unidiffuser/modeling_uvit.py +1 -1
  451. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +8 -7
  452. diffusers/pipelines/visualcloze/__init__.py +52 -0
  453. diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py +444 -0
  454. diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py +952 -0
  455. diffusers/pipelines/visualcloze/visualcloze_utils.py +251 -0
  456. diffusers/pipelines/wan/__init__.py +2 -0
  457. diffusers/pipelines/wan/pipeline_wan.py +91 -30
  458. diffusers/pipelines/wan/pipeline_wan_i2v.py +145 -45
  459. diffusers/pipelines/wan/pipeline_wan_vace.py +975 -0
  460. diffusers/pipelines/wan/pipeline_wan_video2video.py +14 -16
  461. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +1 -1
  462. diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py +1 -1
  463. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +1 -1
  464. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +8 -8
  465. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +16 -15
  466. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +6 -6
  467. diffusers/quantizers/__init__.py +3 -1
  468. diffusers/quantizers/base.py +17 -1
  469. diffusers/quantizers/bitsandbytes/bnb_quantizer.py +4 -0
  470. diffusers/quantizers/bitsandbytes/utils.py +10 -7
  471. diffusers/quantizers/gguf/gguf_quantizer.py +13 -4
  472. diffusers/quantizers/gguf/utils.py +108 -16
  473. diffusers/quantizers/pipe_quant_config.py +202 -0
  474. diffusers/quantizers/quantization_config.py +18 -16
  475. diffusers/quantizers/quanto/quanto_quantizer.py +4 -0
  476. diffusers/quantizers/torchao/torchao_quantizer.py +31 -1
  477. diffusers/schedulers/__init__.py +3 -1
  478. diffusers/schedulers/deprecated/scheduling_karras_ve.py +4 -3
  479. diffusers/schedulers/deprecated/scheduling_sde_vp.py +1 -1
  480. diffusers/schedulers/scheduling_consistency_models.py +1 -1
  481. diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py +10 -5
  482. diffusers/schedulers/scheduling_ddim.py +8 -8
  483. diffusers/schedulers/scheduling_ddim_cogvideox.py +5 -5
  484. diffusers/schedulers/scheduling_ddim_flax.py +6 -6
  485. diffusers/schedulers/scheduling_ddim_inverse.py +6 -6
  486. diffusers/schedulers/scheduling_ddim_parallel.py +22 -22
  487. diffusers/schedulers/scheduling_ddpm.py +9 -9
  488. diffusers/schedulers/scheduling_ddpm_flax.py +7 -7
  489. diffusers/schedulers/scheduling_ddpm_parallel.py +18 -18
  490. diffusers/schedulers/scheduling_ddpm_wuerstchen.py +2 -2
  491. diffusers/schedulers/scheduling_deis_multistep.py +16 -9
  492. diffusers/schedulers/scheduling_dpm_cogvideox.py +5 -5
  493. diffusers/schedulers/scheduling_dpmsolver_multistep.py +18 -12
  494. diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +22 -20
  495. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +11 -11
  496. diffusers/schedulers/scheduling_dpmsolver_sde.py +2 -2
  497. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +19 -13
  498. diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +13 -8
  499. diffusers/schedulers/scheduling_edm_euler.py +20 -11
  500. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +3 -3
  501. diffusers/schedulers/scheduling_euler_discrete.py +3 -3
  502. diffusers/schedulers/scheduling_euler_discrete_flax.py +3 -3
  503. diffusers/schedulers/scheduling_flow_match_euler_discrete.py +20 -5
  504. diffusers/schedulers/scheduling_flow_match_heun_discrete.py +1 -1
  505. diffusers/schedulers/scheduling_flow_match_lcm.py +561 -0
  506. diffusers/schedulers/scheduling_heun_discrete.py +2 -2
  507. diffusers/schedulers/scheduling_ipndm.py +2 -2
  508. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +2 -2
  509. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +2 -2
  510. diffusers/schedulers/scheduling_karras_ve_flax.py +5 -5
  511. diffusers/schedulers/scheduling_lcm.py +3 -3
  512. diffusers/schedulers/scheduling_lms_discrete.py +2 -2
  513. diffusers/schedulers/scheduling_lms_discrete_flax.py +1 -1
  514. diffusers/schedulers/scheduling_pndm.py +4 -4
  515. diffusers/schedulers/scheduling_pndm_flax.py +4 -4
  516. diffusers/schedulers/scheduling_repaint.py +9 -9
  517. diffusers/schedulers/scheduling_sasolver.py +15 -15
  518. diffusers/schedulers/scheduling_scm.py +1 -2
  519. diffusers/schedulers/scheduling_sde_ve.py +1 -1
  520. diffusers/schedulers/scheduling_sde_ve_flax.py +2 -2
  521. diffusers/schedulers/scheduling_tcd.py +3 -3
  522. diffusers/schedulers/scheduling_unclip.py +5 -5
  523. diffusers/schedulers/scheduling_unipc_multistep.py +21 -12
  524. diffusers/schedulers/scheduling_utils.py +3 -3
  525. diffusers/schedulers/scheduling_utils_flax.py +2 -2
  526. diffusers/schedulers/scheduling_vq_diffusion.py +1 -1
  527. diffusers/training_utils.py +91 -5
  528. diffusers/utils/__init__.py +15 -0
  529. diffusers/utils/accelerate_utils.py +1 -1
  530. diffusers/utils/constants.py +4 -0
  531. diffusers/utils/doc_utils.py +1 -1
  532. diffusers/utils/dummy_pt_objects.py +432 -0
  533. diffusers/utils/dummy_torch_and_transformers_objects.py +480 -0
  534. diffusers/utils/dynamic_modules_utils.py +85 -8
  535. diffusers/utils/export_utils.py +1 -1
  536. diffusers/utils/hub_utils.py +33 -17
  537. diffusers/utils/import_utils.py +151 -18
  538. diffusers/utils/logging.py +1 -1
  539. diffusers/utils/outputs.py +2 -1
  540. diffusers/utils/peft_utils.py +96 -10
  541. diffusers/utils/state_dict_utils.py +20 -3
  542. diffusers/utils/testing_utils.py +195 -17
  543. diffusers/utils/torch_utils.py +43 -5
  544. diffusers/video_processor.py +2 -2
  545. {diffusers-0.33.1.dist-info → diffusers-0.35.0.dist-info}/METADATA +72 -57
  546. diffusers-0.35.0.dist-info/RECORD +703 -0
  547. {diffusers-0.33.1.dist-info → diffusers-0.35.0.dist-info}/WHEEL +1 -1
  548. diffusers-0.33.1.dist-info/RECORD +0 -608
  549. {diffusers-0.33.1.dist-info → diffusers-0.35.0.dist-info}/LICENSE +0 -0
  550. {diffusers-0.33.1.dist-info → diffusers-0.35.0.dist-info}/entry_points.txt +0 -0
  551. {diffusers-0.33.1.dist-info → diffusers-0.35.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,882 @@
1
+ # Copyright 2025 Qwen-Image Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ import math
17
+ from typing import Any, Callable, Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+ from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer, Qwen2VLProcessor
22
+
23
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
24
+ from ...loaders import QwenImageLoraLoaderMixin
25
+ from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel
26
+ from ...schedulers import FlowMatchEulerDiscreteScheduler
27
+ from ...utils import is_torch_xla_available, logging, replace_example_docstring
28
+ from ...utils.torch_utils import randn_tensor
29
+ from ..pipeline_utils import DiffusionPipeline
30
+ from .pipeline_output import QwenImagePipelineOutput
31
+
32
+
33
+ if is_torch_xla_available():
34
+ import torch_xla.core.xla_model as xm
35
+
36
+ XLA_AVAILABLE = True
37
+ else:
38
+ XLA_AVAILABLE = False
39
+
40
+
41
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
+
43
+ EXAMPLE_DOC_STRING = """
44
+ Examples:
45
+ ```py
46
+ >>> import torch
47
+ >>> from PIL import Image
48
+ >>> from diffusers import QwenImageEditPipeline
49
+ >>> from diffusers.utils import load_image
50
+
51
+ >>> pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", torch_dtype=torch.bfloat16)
52
+ >>> pipe.to("cuda")
53
+ >>> image = load_image(
54
+ ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/yarn-art-pikachu.png"
55
+ ... ).convert("RGB")
56
+ >>> prompt = (
57
+ ... "Make Pikachu hold a sign that says 'Qwen Edit is awesome', yarn art style, detailed, vibrant colors"
58
+ ... )
59
+ >>> # Depending on the variant being used, the pipeline call will slightly vary.
60
+ >>> # Refer to the pipeline documentation for more details.
61
+ >>> image = pipe(image, prompt, num_inference_steps=50).images[0]
62
+ >>> image.save("qwenimage_edit.png")
63
+ ```
64
+ """
65
+ PREFERRED_QWENIMAGE_RESOLUTIONS = [
66
+ (672, 1568),
67
+ (688, 1504),
68
+ (720, 1456),
69
+ (752, 1392),
70
+ (800, 1328),
71
+ (832, 1248),
72
+ (880, 1184),
73
+ (944, 1104),
74
+ (1024, 1024),
75
+ (1104, 944),
76
+ (1184, 880),
77
+ (1248, 832),
78
+ (1328, 800),
79
+ (1392, 752),
80
+ (1456, 720),
81
+ (1504, 688),
82
+ (1568, 672),
83
+ ]
84
+
85
+
86
+ # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift
87
+ def calculate_shift(
88
+ image_seq_len,
89
+ base_seq_len: int = 256,
90
+ max_seq_len: int = 4096,
91
+ base_shift: float = 0.5,
92
+ max_shift: float = 1.15,
93
+ ):
94
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
95
+ b = base_shift - m * base_seq_len
96
+ mu = image_seq_len * m + b
97
+ return mu
98
+
99
+
100
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
101
+ def retrieve_timesteps(
102
+ scheduler,
103
+ num_inference_steps: Optional[int] = None,
104
+ device: Optional[Union[str, torch.device]] = None,
105
+ timesteps: Optional[List[int]] = None,
106
+ sigmas: Optional[List[float]] = None,
107
+ **kwargs,
108
+ ):
109
+ r"""
110
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
111
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
112
+
113
+ Args:
114
+ scheduler (`SchedulerMixin`):
115
+ The scheduler to get timesteps from.
116
+ num_inference_steps (`int`):
117
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
118
+ must be `None`.
119
+ device (`str` or `torch.device`, *optional*):
120
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
121
+ timesteps (`List[int]`, *optional*):
122
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
123
+ `num_inference_steps` and `sigmas` must be `None`.
124
+ sigmas (`List[float]`, *optional*):
125
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
126
+ `num_inference_steps` and `timesteps` must be `None`.
127
+
128
+ Returns:
129
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
130
+ second element is the number of inference steps.
131
+ """
132
+ if timesteps is not None and sigmas is not None:
133
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
134
+ if timesteps is not None:
135
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
136
+ if not accepts_timesteps:
137
+ raise ValueError(
138
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
139
+ f" timestep schedules. Please check whether you are using the correct scheduler."
140
+ )
141
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
142
+ timesteps = scheduler.timesteps
143
+ num_inference_steps = len(timesteps)
144
+ elif sigmas is not None:
145
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
146
+ if not accept_sigmas:
147
+ raise ValueError(
148
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
149
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
150
+ )
151
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
152
+ timesteps = scheduler.timesteps
153
+ num_inference_steps = len(timesteps)
154
+ else:
155
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
156
+ timesteps = scheduler.timesteps
157
+ return timesteps, num_inference_steps
158
+
159
+
160
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
161
+ def retrieve_latents(
162
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
163
+ ):
164
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
165
+ return encoder_output.latent_dist.sample(generator)
166
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
167
+ return encoder_output.latent_dist.mode()
168
+ elif hasattr(encoder_output, "latents"):
169
+ return encoder_output.latents
170
+ else:
171
+ raise AttributeError("Could not access latents of provided encoder_output")
172
+
173
+
174
+ def calculate_dimensions(target_area, ratio):
175
+ width = math.sqrt(target_area * ratio)
176
+ height = width / ratio
177
+
178
+ width = round(width / 32) * 32
179
+ height = round(height / 32) * 32
180
+
181
+ return width, height, None
182
+
183
+
184
+ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin):
185
+ r"""
186
+ The Qwen-Image-Edit pipeline for image editing.
187
+
188
+ Args:
189
+ transformer ([`QwenImageTransformer2DModel`]):
190
+ Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
191
+ scheduler ([`FlowMatchEulerDiscreteScheduler`]):
192
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
193
+ vae ([`AutoencoderKL`]):
194
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
195
+ text_encoder ([`Qwen2.5-VL-7B-Instruct`]):
196
+ [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct), specifically the
197
+ [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) variant.
198
+ tokenizer (`QwenTokenizer`):
199
+ Tokenizer of class
200
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
201
+ """
202
+
203
+ model_cpu_offload_seq = "text_encoder->transformer->vae"
204
+ _callback_tensor_inputs = ["latents", "prompt_embeds"]
205
+
206
+ def __init__(
207
+ self,
208
+ scheduler: FlowMatchEulerDiscreteScheduler,
209
+ vae: AutoencoderKLQwenImage,
210
+ text_encoder: Qwen2_5_VLForConditionalGeneration,
211
+ tokenizer: Qwen2Tokenizer,
212
+ processor: Qwen2VLProcessor,
213
+ transformer: QwenImageTransformer2DModel,
214
+ ):
215
+ super().__init__()
216
+
217
+ self.register_modules(
218
+ vae=vae,
219
+ text_encoder=text_encoder,
220
+ tokenizer=tokenizer,
221
+ processor=processor,
222
+ transformer=transformer,
223
+ scheduler=scheduler,
224
+ )
225
+ self.vae_scale_factor = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8
226
+ self.latent_channels = self.vae.config.z_dim if getattr(self, "vae", None) else 16
227
+ # QwenImage latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible
228
+ # by the patch size. So the vae scale factor is multiplied by the patch size to account for this
229
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2)
230
+ self.vl_processor = processor
231
+ self.tokenizer_max_length = 1024
232
+
233
+ self.prompt_template_encode = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n"
234
+ self.prompt_template_encode_start_idx = 64
235
+ self.default_sample_size = 128
236
+
237
+ # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._extract_masked_hidden
238
+ def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor):
239
+ bool_mask = mask.bool()
240
+ valid_lengths = bool_mask.sum(dim=1)
241
+ selected = hidden_states[bool_mask]
242
+ split_result = torch.split(selected, valid_lengths.tolist(), dim=0)
243
+
244
+ return split_result
245
+
246
+ def _get_qwen_prompt_embeds(
247
+ self,
248
+ prompt: Union[str, List[str]] = None,
249
+ image: Optional[torch.Tensor] = None,
250
+ device: Optional[torch.device] = None,
251
+ dtype: Optional[torch.dtype] = None,
252
+ ):
253
+ device = device or self._execution_device
254
+ dtype = dtype or self.text_encoder.dtype
255
+
256
+ prompt = [prompt] if isinstance(prompt, str) else prompt
257
+
258
+ template = self.prompt_template_encode
259
+ drop_idx = self.prompt_template_encode_start_idx
260
+ txt = [template.format(e) for e in prompt]
261
+
262
+ model_inputs = self.processor(
263
+ text=txt,
264
+ images=image,
265
+ padding=True,
266
+ return_tensors="pt",
267
+ ).to(device)
268
+
269
+ outputs = self.text_encoder(
270
+ input_ids=model_inputs.input_ids,
271
+ attention_mask=model_inputs.attention_mask,
272
+ pixel_values=model_inputs.pixel_values,
273
+ image_grid_thw=model_inputs.image_grid_thw,
274
+ output_hidden_states=True,
275
+ )
276
+
277
+ hidden_states = outputs.hidden_states[-1]
278
+ split_hidden_states = self._extract_masked_hidden(hidden_states, model_inputs.attention_mask)
279
+ split_hidden_states = [e[drop_idx:] for e in split_hidden_states]
280
+ attn_mask_list = [torch.ones(e.size(0), dtype=torch.long, device=e.device) for e in split_hidden_states]
281
+ max_seq_len = max([e.size(0) for e in split_hidden_states])
282
+ prompt_embeds = torch.stack(
283
+ [torch.cat([u, u.new_zeros(max_seq_len - u.size(0), u.size(1))]) for u in split_hidden_states]
284
+ )
285
+ encoder_attention_mask = torch.stack(
286
+ [torch.cat([u, u.new_zeros(max_seq_len - u.size(0))]) for u in attn_mask_list]
287
+ )
288
+
289
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
290
+
291
+ return prompt_embeds, encoder_attention_mask
292
+
293
+ def encode_prompt(
294
+ self,
295
+ prompt: Union[str, List[str]],
296
+ image: Optional[torch.Tensor] = None,
297
+ device: Optional[torch.device] = None,
298
+ num_images_per_prompt: int = 1,
299
+ prompt_embeds: Optional[torch.Tensor] = None,
300
+ prompt_embeds_mask: Optional[torch.Tensor] = None,
301
+ max_sequence_length: int = 1024,
302
+ ):
303
+ r"""
304
+
305
+ Args:
306
+ prompt (`str` or `List[str]`, *optional*):
307
+ prompt to be encoded
308
+ image (`torch.Tensor`, *optional*):
309
+ image to be encoded
310
+ device: (`torch.device`):
311
+ torch device
312
+ num_images_per_prompt (`int`):
313
+ number of images that should be generated per prompt
314
+ prompt_embeds (`torch.Tensor`, *optional*):
315
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
316
+ provided, text embeddings will be generated from `prompt` input argument.
317
+ """
318
+ device = device or self._execution_device
319
+
320
+ prompt = [prompt] if isinstance(prompt, str) else prompt
321
+ batch_size = len(prompt) if prompt_embeds is None else prompt_embeds.shape[0]
322
+
323
+ if prompt_embeds is None:
324
+ prompt_embeds, prompt_embeds_mask = self._get_qwen_prompt_embeds(prompt, image, device)
325
+
326
+ _, seq_len, _ = prompt_embeds.shape
327
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
328
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
329
+ prompt_embeds_mask = prompt_embeds_mask.repeat(1, num_images_per_prompt, 1)
330
+ prompt_embeds_mask = prompt_embeds_mask.view(batch_size * num_images_per_prompt, seq_len)
331
+
332
+ return prompt_embeds, prompt_embeds_mask
333
+
334
+ def check_inputs(
335
+ self,
336
+ prompt,
337
+ height,
338
+ width,
339
+ negative_prompt=None,
340
+ prompt_embeds=None,
341
+ negative_prompt_embeds=None,
342
+ prompt_embeds_mask=None,
343
+ negative_prompt_embeds_mask=None,
344
+ callback_on_step_end_tensor_inputs=None,
345
+ max_sequence_length=None,
346
+ ):
347
+ if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0:
348
+ logger.warning(
349
+ f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly"
350
+ )
351
+
352
+ if callback_on_step_end_tensor_inputs is not None and not all(
353
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
354
+ ):
355
+ raise ValueError(
356
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
357
+ )
358
+
359
+ if prompt is not None and prompt_embeds is not None:
360
+ raise ValueError(
361
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
362
+ " only forward one of the two."
363
+ )
364
+ elif prompt is None and prompt_embeds is None:
365
+ raise ValueError(
366
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
367
+ )
368
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
369
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
370
+
371
+ if negative_prompt is not None and negative_prompt_embeds is not None:
372
+ raise ValueError(
373
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
374
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
375
+ )
376
+
377
+ if prompt_embeds is not None and prompt_embeds_mask is None:
378
+ raise ValueError(
379
+ "If `prompt_embeds` are provided, `prompt_embeds_mask` also have to be passed. Make sure to generate `prompt_embeds_mask` from the same text encoder that was used to generate `prompt_embeds`."
380
+ )
381
+ if negative_prompt_embeds is not None and negative_prompt_embeds_mask is None:
382
+ raise ValueError(
383
+ "If `negative_prompt_embeds` are provided, `negative_prompt_embeds_mask` also have to be passed. Make sure to generate `negative_prompt_embeds_mask` from the same text encoder that was used to generate `negative_prompt_embeds`."
384
+ )
385
+
386
+ if max_sequence_length is not None and max_sequence_length > 1024:
387
+ raise ValueError(f"`max_sequence_length` cannot be greater than 1024 but is {max_sequence_length}")
388
+
389
+ @staticmethod
390
+ # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._pack_latents
391
+ def _pack_latents(latents, batch_size, num_channels_latents, height, width):
392
+ latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
393
+ latents = latents.permute(0, 2, 4, 1, 3, 5)
394
+ latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4)
395
+
396
+ return latents
397
+
398
+ @staticmethod
399
+ # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._unpack_latents
400
+ def _unpack_latents(latents, height, width, vae_scale_factor):
401
+ batch_size, num_patches, channels = latents.shape
402
+
403
+ # VAE applies 8x compression on images but we must also account for packing which requires
404
+ # latent height and width to be divisible by 2.
405
+ height = 2 * (int(height) // (vae_scale_factor * 2))
406
+ width = 2 * (int(width) // (vae_scale_factor * 2))
407
+
408
+ latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2)
409
+ latents = latents.permute(0, 3, 1, 4, 2, 5)
410
+
411
+ latents = latents.reshape(batch_size, channels // (2 * 2), 1, height, width)
412
+
413
+ return latents
414
+
415
+ def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
416
+ if isinstance(generator, list):
417
+ image_latents = [
418
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i], sample_mode="argmax")
419
+ for i in range(image.shape[0])
420
+ ]
421
+ image_latents = torch.cat(image_latents, dim=0)
422
+ else:
423
+ image_latents = retrieve_latents(self.vae.encode(image), generator=generator, sample_mode="argmax")
424
+ latents_mean = (
425
+ torch.tensor(self.vae.config.latents_mean)
426
+ .view(1, self.latent_channels, 1, 1, 1)
427
+ .to(image_latents.device, image_latents.dtype)
428
+ )
429
+ latents_std = (
430
+ torch.tensor(self.vae.config.latents_std)
431
+ .view(1, self.latent_channels, 1, 1, 1)
432
+ .to(image_latents.device, image_latents.dtype)
433
+ )
434
+ image_latents = (image_latents - latents_mean) / latents_std
435
+
436
+ return image_latents
437
+
438
+ def enable_vae_slicing(self):
439
+ r"""
440
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
441
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
442
+ """
443
+ self.vae.enable_slicing()
444
+
445
+ def disable_vae_slicing(self):
446
+ r"""
447
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
448
+ computing decoding in one step.
449
+ """
450
+ self.vae.disable_slicing()
451
+
452
+ def enable_vae_tiling(self):
453
+ r"""
454
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
455
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
456
+ processing larger images.
457
+ """
458
+ self.vae.enable_tiling()
459
+
460
+ def disable_vae_tiling(self):
461
+ r"""
462
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
463
+ computing decoding in one step.
464
+ """
465
+ self.vae.disable_tiling()
466
+
467
+ def prepare_latents(
468
+ self,
469
+ image,
470
+ batch_size,
471
+ num_channels_latents,
472
+ height,
473
+ width,
474
+ dtype,
475
+ device,
476
+ generator,
477
+ latents=None,
478
+ ):
479
+ # VAE applies 8x compression on images but we must also account for packing which requires
480
+ # latent height and width to be divisible by 2.
481
+ height = 2 * (int(height) // (self.vae_scale_factor * 2))
482
+ width = 2 * (int(width) // (self.vae_scale_factor * 2))
483
+
484
+ shape = (batch_size, 1, num_channels_latents, height, width)
485
+
486
+ image_latents = None
487
+ if image is not None:
488
+ image = image.to(device=device, dtype=dtype)
489
+ if image.shape[1] != self.latent_channels:
490
+ image_latents = self._encode_vae_image(image=image, generator=generator)
491
+ else:
492
+ image_latents = image
493
+ if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:
494
+ # expand init_latents for batch_size
495
+ additional_image_per_prompt = batch_size // image_latents.shape[0]
496
+ image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0)
497
+ elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0:
498
+ raise ValueError(
499
+ f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts."
500
+ )
501
+ else:
502
+ image_latents = torch.cat([image_latents], dim=0)
503
+
504
+ image_latent_height, image_latent_width = image_latents.shape[3:]
505
+ image_latents = self._pack_latents(
506
+ image_latents, batch_size, num_channels_latents, image_latent_height, image_latent_width
507
+ )
508
+
509
+ if isinstance(generator, list) and len(generator) != batch_size:
510
+ raise ValueError(
511
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
512
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
513
+ )
514
+ if latents is None:
515
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
516
+ latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width)
517
+ else:
518
+ latents = latents.to(device=device, dtype=dtype)
519
+
520
+ return latents, image_latents
521
+
522
+ @property
523
+ def guidance_scale(self):
524
+ return self._guidance_scale
525
+
526
+ @property
527
+ def attention_kwargs(self):
528
+ return self._attention_kwargs
529
+
530
+ @property
531
+ def num_timesteps(self):
532
+ return self._num_timesteps
533
+
534
+ @property
535
+ def current_timestep(self):
536
+ return self._current_timestep
537
+
538
+ @property
539
+ def interrupt(self):
540
+ return self._interrupt
541
+
542
+ @torch.no_grad()
543
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
544
+ def __call__(
545
+ self,
546
+ image: Optional[PipelineImageInput] = None,
547
+ prompt: Union[str, List[str]] = None,
548
+ negative_prompt: Union[str, List[str]] = None,
549
+ true_cfg_scale: float = 4.0,
550
+ height: Optional[int] = None,
551
+ width: Optional[int] = None,
552
+ num_inference_steps: int = 50,
553
+ sigmas: Optional[List[float]] = None,
554
+ guidance_scale: float = 1.0,
555
+ num_images_per_prompt: int = 1,
556
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
557
+ latents: Optional[torch.Tensor] = None,
558
+ prompt_embeds: Optional[torch.Tensor] = None,
559
+ prompt_embeds_mask: Optional[torch.Tensor] = None,
560
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
561
+ negative_prompt_embeds_mask: Optional[torch.Tensor] = None,
562
+ output_type: Optional[str] = "pil",
563
+ return_dict: bool = True,
564
+ attention_kwargs: Optional[Dict[str, Any]] = None,
565
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
566
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
567
+ max_sequence_length: int = 512,
568
+ _auto_resize: bool = True,
569
+ ):
570
+ r"""
571
+ Function invoked when calling the pipeline for generation.
572
+
573
+ Args:
574
+ prompt (`str` or `List[str]`, *optional*):
575
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
576
+ instead.
577
+ negative_prompt (`str` or `List[str]`, *optional*):
578
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
579
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is
580
+ not greater than `1`).
581
+ true_cfg_scale (`float`, *optional*, defaults to 1.0):
582
+ When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance.
583
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
584
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
585
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
586
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
587
+ num_inference_steps (`int`, *optional*, defaults to 50):
588
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
589
+ expense of slower inference.
590
+ sigmas (`List[float]`, *optional*):
591
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
592
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
593
+ will be used.
594
+ guidance_scale (`float`, *optional*, defaults to 3.5):
595
+ Guidance scale as defined in [Classifier-Free Diffusion
596
+ Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2.
597
+ of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting
598
+ `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to
599
+ the text `prompt`, usually at the expense of lower image quality.
600
+
601
+ This parameter in the pipeline is there to support future guidance-distilled models when they come up.
602
+ Note that passing `guidance_scale` to the pipeline is ineffective. To enable classifier-free guidance,
603
+ please pass `true_cfg_scale` and `negative_prompt` (even an empty negative prompt like " ") should
604
+ enable classifier-free guidance computations.
605
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
606
+ The number of images to generate per prompt.
607
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
608
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
609
+ to make generation deterministic.
610
+ latents (`torch.Tensor`, *optional*):
611
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
612
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
613
+ tensor will be generated by sampling using the supplied random `generator`.
614
+ prompt_embeds (`torch.Tensor`, *optional*):
615
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
616
+ provided, text embeddings will be generated from `prompt` input argument.
617
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
618
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
619
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
620
+ argument.
621
+ output_type (`str`, *optional*, defaults to `"pil"`):
622
+ The output format of the generate image. Choose between
623
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
624
+ return_dict (`bool`, *optional*, defaults to `True`):
625
+ Whether or not to return a [`~pipelines.qwenimage.QwenImagePipelineOutput`] instead of a plain tuple.
626
+ attention_kwargs (`dict`, *optional*):
627
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
628
+ `self.processor` in
629
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
630
+ callback_on_step_end (`Callable`, *optional*):
631
+ A function that calls at the end of each denoising steps during the inference. The function is called
632
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
633
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
634
+ `callback_on_step_end_tensor_inputs`.
635
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
636
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
637
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
638
+ `._callback_tensor_inputs` attribute of your pipeline class.
639
+ max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
640
+
641
+ Examples:
642
+
643
+ Returns:
644
+ [`~pipelines.qwenimage.QwenImagePipelineOutput`] or `tuple`:
645
+ [`~pipelines.qwenimage.QwenImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When
646
+ returning a tuple, the first element is a list with the generated images.
647
+ """
648
+ image_size = image[0].size if isinstance(image, list) else image.size
649
+ width, height = image_size
650
+ calculated_width, calculated_height, _ = calculate_dimensions(1024 * 1024, width / height)
651
+ height = height or calculated_height
652
+ width = width or calculated_width
653
+
654
+ multiple_of = self.vae_scale_factor * 2
655
+ width = width // multiple_of * multiple_of
656
+ height = height // multiple_of * multiple_of
657
+
658
+ # 1. Check inputs. Raise error if not correct
659
+ self.check_inputs(
660
+ prompt,
661
+ height,
662
+ width,
663
+ negative_prompt=negative_prompt,
664
+ prompt_embeds=prompt_embeds,
665
+ negative_prompt_embeds=negative_prompt_embeds,
666
+ prompt_embeds_mask=prompt_embeds_mask,
667
+ negative_prompt_embeds_mask=negative_prompt_embeds_mask,
668
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
669
+ max_sequence_length=max_sequence_length,
670
+ )
671
+
672
+ self._guidance_scale = guidance_scale
673
+ self._attention_kwargs = attention_kwargs
674
+ self._current_timestep = None
675
+ self._interrupt = False
676
+
677
+ # 2. Define call parameters
678
+ if prompt is not None and isinstance(prompt, str):
679
+ batch_size = 1
680
+ elif prompt is not None and isinstance(prompt, list):
681
+ batch_size = len(prompt)
682
+ else:
683
+ batch_size = prompt_embeds.shape[0]
684
+
685
+ device = self._execution_device
686
+ # 3. Preprocess image
687
+ if image is not None and not (isinstance(image, torch.Tensor) and image.size(1) == self.latent_channels):
688
+ img = image[0] if isinstance(image, list) else image
689
+ image_height, image_width = self.image_processor.get_default_height_width(img)
690
+ aspect_ratio = image_width / image_height
691
+ if _auto_resize:
692
+ _, image_width, image_height = min(
693
+ (abs(aspect_ratio - w / h), w, h) for w, h in PREFERRED_QWENIMAGE_RESOLUTIONS
694
+ )
695
+ image_width = image_width // multiple_of * multiple_of
696
+ image_height = image_height // multiple_of * multiple_of
697
+ image = self.image_processor.resize(image, image_height, image_width)
698
+ prompt_image = image
699
+ image = self.image_processor.preprocess(image, image_height, image_width)
700
+ image = image.unsqueeze(2)
701
+
702
+ has_neg_prompt = negative_prompt is not None or (
703
+ negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None
704
+ )
705
+ do_true_cfg = true_cfg_scale > 1 and has_neg_prompt
706
+ prompt_embeds, prompt_embeds_mask = self.encode_prompt(
707
+ image=prompt_image,
708
+ prompt=prompt,
709
+ prompt_embeds=prompt_embeds,
710
+ prompt_embeds_mask=prompt_embeds_mask,
711
+ device=device,
712
+ num_images_per_prompt=num_images_per_prompt,
713
+ max_sequence_length=max_sequence_length,
714
+ )
715
+ if do_true_cfg:
716
+ # negative image is the same size as the original image, but all pixels are white
717
+ # negative_image = Image.new("RGB", (image.width, image.height), (255, 255, 255))
718
+
719
+ negative_prompt_embeds, negative_prompt_embeds_mask = self.encode_prompt(
720
+ image=prompt_image,
721
+ prompt=negative_prompt,
722
+ prompt_embeds=negative_prompt_embeds,
723
+ prompt_embeds_mask=negative_prompt_embeds_mask,
724
+ device=device,
725
+ num_images_per_prompt=num_images_per_prompt,
726
+ max_sequence_length=max_sequence_length,
727
+ )
728
+
729
+ # 4. Prepare latent variables
730
+ num_channels_latents = self.transformer.config.in_channels // 4
731
+ latents, image_latents = self.prepare_latents(
732
+ image,
733
+ batch_size * num_images_per_prompt,
734
+ num_channels_latents,
735
+ height,
736
+ width,
737
+ prompt_embeds.dtype,
738
+ device,
739
+ generator,
740
+ latents,
741
+ )
742
+ img_shapes = [
743
+ [
744
+ (1, height // self.vae_scale_factor // 2, width // self.vae_scale_factor // 2),
745
+ (1, image_height // self.vae_scale_factor // 2, image_width // self.vae_scale_factor // 2),
746
+ ]
747
+ ] * batch_size
748
+
749
+ # 5. Prepare timesteps
750
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas
751
+ image_seq_len = latents.shape[1]
752
+ mu = calculate_shift(
753
+ image_seq_len,
754
+ self.scheduler.config.get("base_image_seq_len", 256),
755
+ self.scheduler.config.get("max_image_seq_len", 4096),
756
+ self.scheduler.config.get("base_shift", 0.5),
757
+ self.scheduler.config.get("max_shift", 1.15),
758
+ )
759
+ timesteps, num_inference_steps = retrieve_timesteps(
760
+ self.scheduler,
761
+ num_inference_steps,
762
+ device,
763
+ sigmas=sigmas,
764
+ mu=mu,
765
+ )
766
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
767
+ self._num_timesteps = len(timesteps)
768
+
769
+ # handle guidance
770
+ if self.transformer.config.guidance_embeds:
771
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32)
772
+ guidance = guidance.expand(latents.shape[0])
773
+ else:
774
+ guidance = None
775
+
776
+ if self.attention_kwargs is None:
777
+ self._attention_kwargs = {}
778
+
779
+ txt_seq_lens = prompt_embeds_mask.sum(dim=1).tolist() if prompt_embeds_mask is not None else None
780
+ negative_txt_seq_lens = (
781
+ negative_prompt_embeds_mask.sum(dim=1).tolist() if negative_prompt_embeds_mask is not None else None
782
+ )
783
+
784
+ # 6. Denoising loop
785
+ self.scheduler.set_begin_index(0)
786
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
787
+ for i, t in enumerate(timesteps):
788
+ if self.interrupt:
789
+ continue
790
+
791
+ self._current_timestep = t
792
+
793
+ latent_model_input = latents
794
+ if image_latents is not None:
795
+ latent_model_input = torch.cat([latents, image_latents], dim=1)
796
+
797
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
798
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
799
+ with self.transformer.cache_context("cond"):
800
+ noise_pred = self.transformer(
801
+ hidden_states=latent_model_input,
802
+ timestep=timestep / 1000,
803
+ guidance=guidance,
804
+ encoder_hidden_states_mask=prompt_embeds_mask,
805
+ encoder_hidden_states=prompt_embeds,
806
+ img_shapes=img_shapes,
807
+ txt_seq_lens=txt_seq_lens,
808
+ attention_kwargs=self.attention_kwargs,
809
+ return_dict=False,
810
+ )[0]
811
+ noise_pred = noise_pred[:, : latents.size(1)]
812
+
813
+ if do_true_cfg:
814
+ with self.transformer.cache_context("uncond"):
815
+ neg_noise_pred = self.transformer(
816
+ hidden_states=latent_model_input,
817
+ timestep=timestep / 1000,
818
+ guidance=guidance,
819
+ encoder_hidden_states_mask=negative_prompt_embeds_mask,
820
+ encoder_hidden_states=negative_prompt_embeds,
821
+ img_shapes=img_shapes,
822
+ txt_seq_lens=negative_txt_seq_lens,
823
+ attention_kwargs=self.attention_kwargs,
824
+ return_dict=False,
825
+ )[0]
826
+ neg_noise_pred = neg_noise_pred[:, : latents.size(1)]
827
+ comb_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred)
828
+
829
+ cond_norm = torch.norm(noise_pred, dim=-1, keepdim=True)
830
+ noise_norm = torch.norm(comb_pred, dim=-1, keepdim=True)
831
+ noise_pred = comb_pred * (cond_norm / noise_norm)
832
+
833
+ # compute the previous noisy sample x_t -> x_t-1
834
+ latents_dtype = latents.dtype
835
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
836
+
837
+ if latents.dtype != latents_dtype:
838
+ if torch.backends.mps.is_available():
839
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
840
+ latents = latents.to(latents_dtype)
841
+
842
+ if callback_on_step_end is not None:
843
+ callback_kwargs = {}
844
+ for k in callback_on_step_end_tensor_inputs:
845
+ callback_kwargs[k] = locals()[k]
846
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
847
+
848
+ latents = callback_outputs.pop("latents", latents)
849
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
850
+
851
+ # call the callback, if provided
852
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
853
+ progress_bar.update()
854
+
855
+ if XLA_AVAILABLE:
856
+ xm.mark_step()
857
+
858
+ self._current_timestep = None
859
+ if output_type == "latent":
860
+ image = latents
861
+ else:
862
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
863
+ latents = latents.to(self.vae.dtype)
864
+ latents_mean = (
865
+ torch.tensor(self.vae.config.latents_mean)
866
+ .view(1, self.vae.config.z_dim, 1, 1, 1)
867
+ .to(latents.device, latents.dtype)
868
+ )
869
+ latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to(
870
+ latents.device, latents.dtype
871
+ )
872
+ latents = latents / latents_std + latents_mean
873
+ image = self.vae.decode(latents, return_dict=False)[0][:, :, 0]
874
+ image = self.image_processor.postprocess(image, output_type=output_type)
875
+
876
+ # Offload all models
877
+ self.maybe_free_model_hooks()
878
+
879
+ if not return_dict:
880
+ return (image,)
881
+
882
+ return QwenImagePipelineOutput(images=image)