diffusers 0.26.2__py3-none-any.whl → 0.27.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (299) hide show
  1. diffusers/__init__.py +20 -1
  2. diffusers/commands/__init__.py +1 -1
  3. diffusers/commands/diffusers_cli.py +1 -1
  4. diffusers/commands/env.py +1 -1
  5. diffusers/commands/fp16_safetensors.py +1 -1
  6. diffusers/configuration_utils.py +7 -3
  7. diffusers/dependency_versions_check.py +1 -1
  8. diffusers/dependency_versions_table.py +2 -2
  9. diffusers/experimental/rl/value_guided_sampling.py +1 -1
  10. diffusers/image_processor.py +110 -4
  11. diffusers/loaders/autoencoder.py +28 -8
  12. diffusers/loaders/controlnet.py +17 -8
  13. diffusers/loaders/ip_adapter.py +86 -23
  14. diffusers/loaders/lora.py +105 -310
  15. diffusers/loaders/lora_conversion_utils.py +1 -1
  16. diffusers/loaders/peft.py +1 -1
  17. diffusers/loaders/single_file.py +51 -12
  18. diffusers/loaders/single_file_utils.py +278 -49
  19. diffusers/loaders/textual_inversion.py +23 -4
  20. diffusers/loaders/unet.py +195 -41
  21. diffusers/loaders/utils.py +1 -1
  22. diffusers/models/__init__.py +3 -1
  23. diffusers/models/activations.py +9 -9
  24. diffusers/models/attention.py +26 -36
  25. diffusers/models/attention_flax.py +1 -1
  26. diffusers/models/attention_processor.py +171 -114
  27. diffusers/models/autoencoders/autoencoder_asym_kl.py +1 -1
  28. diffusers/models/autoencoders/autoencoder_kl.py +3 -1
  29. diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +1 -1
  30. diffusers/models/autoencoders/autoencoder_tiny.py +4 -2
  31. diffusers/models/autoencoders/consistency_decoder_vae.py +1 -1
  32. diffusers/models/autoencoders/vae.py +1 -1
  33. diffusers/models/controlnet.py +1 -1
  34. diffusers/models/controlnet_flax.py +1 -1
  35. diffusers/models/downsampling.py +8 -12
  36. diffusers/models/dual_transformer_2d.py +1 -1
  37. diffusers/models/embeddings.py +3 -4
  38. diffusers/models/embeddings_flax.py +1 -1
  39. diffusers/models/lora.py +33 -10
  40. diffusers/models/modeling_flax_pytorch_utils.py +1 -1
  41. diffusers/models/modeling_flax_utils.py +1 -1
  42. diffusers/models/modeling_pytorch_flax_utils.py +1 -1
  43. diffusers/models/modeling_utils.py +4 -6
  44. diffusers/models/normalization.py +1 -1
  45. diffusers/models/resnet.py +31 -58
  46. diffusers/models/resnet_flax.py +1 -1
  47. diffusers/models/t5_film_transformer.py +1 -1
  48. diffusers/models/transformer_2d.py +1 -1
  49. diffusers/models/transformer_temporal.py +1 -1
  50. diffusers/models/transformers/dual_transformer_2d.py +1 -1
  51. diffusers/models/transformers/t5_film_transformer.py +1 -1
  52. diffusers/models/transformers/transformer_2d.py +29 -31
  53. diffusers/models/transformers/transformer_temporal.py +1 -1
  54. diffusers/models/unet_1d.py +1 -1
  55. diffusers/models/unet_1d_blocks.py +1 -1
  56. diffusers/models/unet_2d.py +1 -1
  57. diffusers/models/unet_2d_blocks.py +1 -1
  58. diffusers/models/unet_2d_condition.py +1 -1
  59. diffusers/models/unets/__init__.py +1 -0
  60. diffusers/models/unets/unet_1d.py +1 -1
  61. diffusers/models/unets/unet_1d_blocks.py +1 -1
  62. diffusers/models/unets/unet_2d.py +4 -4
  63. diffusers/models/unets/unet_2d_blocks.py +238 -98
  64. diffusers/models/unets/unet_2d_blocks_flax.py +1 -1
  65. diffusers/models/unets/unet_2d_condition.py +420 -323
  66. diffusers/models/unets/unet_2d_condition_flax.py +21 -12
  67. diffusers/models/unets/unet_3d_blocks.py +50 -40
  68. diffusers/models/unets/unet_3d_condition.py +47 -8
  69. diffusers/models/unets/unet_i2vgen_xl.py +75 -30
  70. diffusers/models/unets/unet_kandinsky3.py +1 -1
  71. diffusers/models/unets/unet_motion_model.py +48 -8
  72. diffusers/models/unets/unet_spatio_temporal_condition.py +1 -1
  73. diffusers/models/unets/unet_stable_cascade.py +610 -0
  74. diffusers/models/unets/uvit_2d.py +1 -1
  75. diffusers/models/upsampling.py +10 -16
  76. diffusers/models/vae_flax.py +1 -1
  77. diffusers/models/vq_model.py +1 -1
  78. diffusers/optimization.py +1 -1
  79. diffusers/pipelines/__init__.py +26 -0
  80. diffusers/pipelines/amused/pipeline_amused.py +1 -1
  81. diffusers/pipelines/amused/pipeline_amused_img2img.py +1 -1
  82. diffusers/pipelines/amused/pipeline_amused_inpaint.py +1 -1
  83. diffusers/pipelines/animatediff/pipeline_animatediff.py +162 -417
  84. diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +165 -137
  85. diffusers/pipelines/animatediff/pipeline_output.py +7 -6
  86. diffusers/pipelines/audioldm/pipeline_audioldm.py +3 -19
  87. diffusers/pipelines/audioldm2/modeling_audioldm2.py +1 -1
  88. diffusers/pipelines/audioldm2/pipeline_audioldm2.py +3 -3
  89. diffusers/pipelines/auto_pipeline.py +7 -16
  90. diffusers/pipelines/blip_diffusion/blip_image_processing.py +1 -1
  91. diffusers/pipelines/blip_diffusion/modeling_blip2.py +1 -1
  92. diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py +2 -2
  93. diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +2 -2
  94. diffusers/pipelines/consistency_models/pipeline_consistency_models.py +1 -1
  95. diffusers/pipelines/controlnet/pipeline_controlnet.py +90 -90
  96. diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +2 -2
  97. diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +98 -90
  98. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py +92 -90
  99. diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py +145 -70
  100. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +126 -89
  101. diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +108 -96
  102. diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +2 -2
  103. diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +1 -1
  104. diffusers/pipelines/ddim/pipeline_ddim.py +1 -1
  105. diffusers/pipelines/ddpm/pipeline_ddpm.py +1 -1
  106. diffusers/pipelines/deepfloyd_if/pipeline_if.py +4 -4
  107. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py +4 -4
  108. diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +5 -5
  109. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py +4 -4
  110. diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py +5 -5
  111. diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py +5 -5
  112. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +10 -120
  113. diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +10 -91
  114. diffusers/pipelines/deprecated/audio_diffusion/mel.py +1 -1
  115. diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py +1 -1
  116. diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +1 -1
  117. diffusers/pipelines/deprecated/pndm/pipeline_pndm.py +1 -1
  118. diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +1 -1
  119. diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py +1 -1
  120. diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py +1 -1
  121. diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py +1 -1
  122. diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py +1 -1
  123. diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +1 -1
  124. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +5 -4
  125. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +5 -4
  126. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +7 -22
  127. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +5 -39
  128. diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +5 -5
  129. diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py +1 -1
  130. diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py +31 -22
  131. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +1 -1
  132. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +1 -1
  133. diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +1 -2
  134. diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +1 -1
  135. diffusers/pipelines/dit/pipeline_dit.py +1 -1
  136. diffusers/pipelines/free_init_utils.py +184 -0
  137. diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +22 -104
  138. diffusers/pipelines/kandinsky/pipeline_kandinsky.py +1 -1
  139. diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py +1 -1
  140. diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py +1 -1
  141. diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +2 -2
  142. diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +1 -1
  143. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +1 -1
  144. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +1 -1
  145. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +1 -1
  146. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +1 -1
  147. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +1 -1
  148. diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +2 -2
  149. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +104 -93
  150. diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py +112 -74
  151. diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +1 -1
  152. diffusers/pipelines/ledits_pp/__init__.py +55 -0
  153. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py +1505 -0
  154. diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py +1797 -0
  155. diffusers/pipelines/ledits_pp/pipeline_output.py +43 -0
  156. diffusers/pipelines/musicldm/pipeline_musicldm.py +3 -19
  157. diffusers/pipelines/onnx_utils.py +1 -1
  158. diffusers/pipelines/paint_by_example/image_encoder.py +1 -1
  159. diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +3 -3
  160. diffusers/pipelines/pia/pipeline_pia.py +168 -327
  161. diffusers/pipelines/pipeline_flax_utils.py +1 -1
  162. diffusers/pipelines/pipeline_loading_utils.py +508 -0
  163. diffusers/pipelines/pipeline_utils.py +188 -534
  164. diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +56 -10
  165. diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +3 -3
  166. diffusers/pipelines/shap_e/camera.py +1 -1
  167. diffusers/pipelines/shap_e/pipeline_shap_e.py +1 -1
  168. diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +1 -1
  169. diffusers/pipelines/shap_e/renderer.py +1 -1
  170. diffusers/pipelines/stable_cascade/__init__.py +50 -0
  171. diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +482 -0
  172. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +311 -0
  173. diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py +638 -0
  174. diffusers/pipelines/stable_diffusion/clip_image_project_model.py +1 -1
  175. diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +4 -1
  176. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +1 -1
  177. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py +2 -2
  178. diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py +1 -1
  179. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +1 -1
  180. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +1 -1
  181. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +1 -1
  182. diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py +1 -1
  183. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +90 -146
  184. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +5 -4
  185. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +4 -32
  186. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +92 -119
  187. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +92 -119
  188. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +13 -59
  189. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +3 -31
  190. diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +5 -33
  191. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +5 -21
  192. diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py +7 -21
  193. diffusers/pipelines/stable_diffusion/safety_checker.py +1 -1
  194. diffusers/pipelines/stable_diffusion/safety_checker_flax.py +1 -1
  195. diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py +1 -1
  196. diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +5 -21
  197. diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +9 -38
  198. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +5 -34
  199. diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +6 -35
  200. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py +7 -6
  201. diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py +4 -124
  202. diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +282 -80
  203. diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +94 -46
  204. diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +3 -3
  205. diffusers/pipelines/stable_diffusion_safe/safety_checker.py +1 -1
  206. diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +6 -22
  207. diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py +1 -1
  208. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +96 -148
  209. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +98 -154
  210. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +98 -153
  211. diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py +25 -87
  212. diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +89 -80
  213. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +5 -49
  214. diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py +80 -88
  215. diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +8 -6
  216. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +15 -86
  217. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +20 -93
  218. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +5 -5
  219. diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +3 -19
  220. diffusers/pipelines/unclip/pipeline_unclip.py +1 -1
  221. diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +1 -1
  222. diffusers/pipelines/unclip/text_proj.py +1 -1
  223. diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +35 -35
  224. diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +1 -1
  225. diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +4 -21
  226. diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py +2 -2
  227. diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +4 -5
  228. diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +8 -8
  229. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +1 -1
  230. diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +2 -2
  231. diffusers/schedulers/__init__.py +7 -1
  232. diffusers/schedulers/deprecated/scheduling_karras_ve.py +1 -1
  233. diffusers/schedulers/deprecated/scheduling_sde_vp.py +1 -1
  234. diffusers/schedulers/scheduling_consistency_models.py +42 -19
  235. diffusers/schedulers/scheduling_ddim.py +2 -4
  236. diffusers/schedulers/scheduling_ddim_flax.py +13 -5
  237. diffusers/schedulers/scheduling_ddim_inverse.py +2 -4
  238. diffusers/schedulers/scheduling_ddim_parallel.py +2 -4
  239. diffusers/schedulers/scheduling_ddpm.py +2 -4
  240. diffusers/schedulers/scheduling_ddpm_flax.py +1 -1
  241. diffusers/schedulers/scheduling_ddpm_parallel.py +2 -4
  242. diffusers/schedulers/scheduling_ddpm_wuerstchen.py +1 -1
  243. diffusers/schedulers/scheduling_deis_multistep.py +46 -19
  244. diffusers/schedulers/scheduling_dpmsolver_multistep.py +107 -21
  245. diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +1 -1
  246. diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py +9 -7
  247. diffusers/schedulers/scheduling_dpmsolver_sde.py +35 -35
  248. diffusers/schedulers/scheduling_dpmsolver_singlestep.py +52 -21
  249. diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py +683 -0
  250. diffusers/schedulers/scheduling_edm_euler.py +381 -0
  251. diffusers/schedulers/scheduling_euler_ancestral_discrete.py +43 -15
  252. diffusers/schedulers/scheduling_euler_discrete.py +42 -17
  253. diffusers/schedulers/scheduling_euler_discrete_flax.py +1 -1
  254. diffusers/schedulers/scheduling_heun_discrete.py +35 -35
  255. diffusers/schedulers/scheduling_ipndm.py +37 -11
  256. diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +44 -44
  257. diffusers/schedulers/scheduling_k_dpm_2_discrete.py +44 -44
  258. diffusers/schedulers/scheduling_karras_ve_flax.py +1 -1
  259. diffusers/schedulers/scheduling_lcm.py +38 -14
  260. diffusers/schedulers/scheduling_lms_discrete.py +43 -15
  261. diffusers/schedulers/scheduling_lms_discrete_flax.py +1 -1
  262. diffusers/schedulers/scheduling_pndm.py +2 -4
  263. diffusers/schedulers/scheduling_pndm_flax.py +2 -4
  264. diffusers/schedulers/scheduling_repaint.py +1 -1
  265. diffusers/schedulers/scheduling_sasolver.py +41 -9
  266. diffusers/schedulers/scheduling_sde_ve.py +1 -1
  267. diffusers/schedulers/scheduling_sde_ve_flax.py +1 -1
  268. diffusers/schedulers/scheduling_tcd.py +686 -0
  269. diffusers/schedulers/scheduling_unclip.py +1 -1
  270. diffusers/schedulers/scheduling_unipc_multistep.py +46 -19
  271. diffusers/schedulers/scheduling_utils.py +2 -1
  272. diffusers/schedulers/scheduling_utils_flax.py +1 -1
  273. diffusers/schedulers/scheduling_vq_diffusion.py +1 -1
  274. diffusers/training_utils.py +9 -2
  275. diffusers/utils/__init__.py +2 -1
  276. diffusers/utils/accelerate_utils.py +1 -1
  277. diffusers/utils/constants.py +1 -1
  278. diffusers/utils/doc_utils.py +1 -1
  279. diffusers/utils/dummy_pt_objects.py +60 -0
  280. diffusers/utils/dummy_torch_and_transformers_objects.py +75 -0
  281. diffusers/utils/dynamic_modules_utils.py +1 -1
  282. diffusers/utils/export_utils.py +3 -3
  283. diffusers/utils/hub_utils.py +60 -16
  284. diffusers/utils/import_utils.py +15 -1
  285. diffusers/utils/loading_utils.py +2 -0
  286. diffusers/utils/logging.py +1 -1
  287. diffusers/utils/model_card_template.md +24 -0
  288. diffusers/utils/outputs.py +14 -7
  289. diffusers/utils/peft_utils.py +1 -1
  290. diffusers/utils/state_dict_utils.py +1 -1
  291. diffusers/utils/testing_utils.py +2 -0
  292. diffusers/utils/torch_utils.py +1 -1
  293. {diffusers-0.26.2.dist-info → diffusers-0.27.0.dist-info}/METADATA +5 -5
  294. diffusers-0.27.0.dist-info/RECORD +399 -0
  295. diffusers-0.26.2.dist-info/RECORD +0 -384
  296. {diffusers-0.26.2.dist-info → diffusers-0.27.0.dist-info}/LICENSE +0 -0
  297. {diffusers-0.26.2.dist-info → diffusers-0.27.0.dist-info}/WHEEL +0 -0
  298. {diffusers-0.26.2.dist-info → diffusers-0.27.0.dist-info}/entry_points.txt +0 -0
  299. {diffusers-0.26.2.dist-info → diffusers-0.27.0.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2023 Ollin Boer Bohan and The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 Ollin Boer Bohan and The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -292,7 +292,9 @@ class AutoencoderTiny(ModelMixin, ConfigMixin):
292
292
  self, x: torch.FloatTensor, return_dict: bool = True
293
293
  ) -> Union[AutoencoderTinyOutput, Tuple[torch.FloatTensor]]:
294
294
  if self.use_slicing and x.shape[0] > 1:
295
- output = [self._tiled_encode(x_slice) if self.use_tiling else self.encoder(x) for x_slice in x.split(1)]
295
+ output = [
296
+ self._tiled_encode(x_slice) if self.use_tiling else self.encoder(x_slice) for x_slice in x.split(1)
297
+ ]
296
298
  output = torch.cat(output)
297
299
  else:
298
300
  output = self._tiled_encode(x) if self.use_tiling else self.encoder(x)
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -18,8 +18,7 @@ import torch
18
18
  import torch.nn as nn
19
19
  import torch.nn.functional as F
20
20
 
21
- from ..utils import USE_PEFT_BACKEND
22
- from .lora import LoRACompatibleConv
21
+ from ..utils import deprecate
23
22
  from .normalization import RMSNorm
24
23
  from .upsampling import upfirdn2d_native
25
24
 
@@ -103,7 +102,7 @@ class Downsample2D(nn.Module):
103
102
  self.padding = padding
104
103
  stride = 2
105
104
  self.name = name
106
- conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv
105
+ conv_cls = nn.Conv2d
107
106
 
108
107
  if norm_type == "ln_norm":
109
108
  self.norm = nn.LayerNorm(channels, eps, elementwise_affine)
@@ -131,7 +130,10 @@ class Downsample2D(nn.Module):
131
130
  else:
132
131
  self.conv = conv
133
132
 
134
- def forward(self, hidden_states: torch.FloatTensor, scale: float = 1.0) -> torch.FloatTensor:
133
+ def forward(self, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:
134
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
135
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
136
+ deprecate("scale", "1.0.0", deprecation_message)
135
137
  assert hidden_states.shape[1] == self.channels
136
138
 
137
139
  if self.norm is not None:
@@ -143,13 +145,7 @@ class Downsample2D(nn.Module):
143
145
 
144
146
  assert hidden_states.shape[1] == self.channels
145
147
 
146
- if not USE_PEFT_BACKEND:
147
- if isinstance(self.conv, LoRACompatibleConv):
148
- hidden_states = self.conv(hidden_states, scale)
149
- else:
150
- hidden_states = self.conv(hidden_states)
151
- else:
152
- hidden_states = self.conv(hidden_states)
148
+ hidden_states = self.conv(hidden_states)
153
149
 
154
150
  return hidden_states
155
151
 
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -18,10 +18,9 @@ import numpy as np
18
18
  import torch
19
19
  from torch import nn
20
20
 
21
- from ..utils import USE_PEFT_BACKEND, deprecate
21
+ from ..utils import deprecate
22
22
  from .activations import get_activation
23
23
  from .attention_processor import Attention
24
- from .lora import LoRACompatibleLinear
25
24
 
26
25
 
27
26
  def get_timestep_embedding(
@@ -200,7 +199,7 @@ class TimestepEmbedding(nn.Module):
200
199
  sample_proj_bias=True,
201
200
  ):
202
201
  super().__init__()
203
- linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear
202
+ linear_cls = nn.Linear
204
203
 
205
204
  self.linear_1 = linear_cls(in_channels, time_embed_dim, sample_proj_bias)
206
205
 
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
diffusers/models/lora.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -27,7 +27,7 @@ import torch
27
27
  import torch.nn.functional as F
28
28
  from torch import nn
29
29
 
30
- from ..utils import logging
30
+ from ..utils import deprecate, logging
31
31
  from ..utils.import_utils import is_transformers_available
32
32
 
33
33
 
@@ -82,6 +82,9 @@ def adjust_lora_scale_text_encoder(text_encoder, lora_scale: float = 1.0):
82
82
 
83
83
  class PatchedLoraProjection(torch.nn.Module):
84
84
  def __init__(self, regular_linear_layer, lora_scale=1, network_alpha=None, rank=4, dtype=None):
85
+ deprecation_message = "Use of `PatchedLoraProjection` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
86
+ deprecate("PatchedLoraProjection", "1.0.0", deprecation_message)
87
+
85
88
  super().__init__()
86
89
  from ..models.lora import LoRALinearLayer
87
90
 
@@ -201,6 +204,9 @@ class LoRALinearLayer(nn.Module):
201
204
  ):
202
205
  super().__init__()
203
206
 
207
+ deprecation_message = "Use of `LoRALinearLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
208
+ deprecate("LoRALinearLayer", "1.0.0", deprecation_message)
209
+
204
210
  self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype)
205
211
  self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype)
206
212
  # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
@@ -261,6 +267,9 @@ class LoRAConv2dLayer(nn.Module):
261
267
  ):
262
268
  super().__init__()
263
269
 
270
+ deprecation_message = "Use of `LoRAConv2dLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
271
+ deprecate("LoRAConv2dLayer", "1.0.0", deprecation_message)
272
+
264
273
  self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
265
274
  # according to the official kohya_ss trainer kernel_size are always fixed for the up layer
266
275
  # # see: https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L129
@@ -293,10 +302,16 @@ class LoRACompatibleConv(nn.Conv2d):
293
302
  """
294
303
 
295
304
  def __init__(self, *args, lora_layer: Optional[LoRAConv2dLayer] = None, **kwargs):
305
+ deprecation_message = "Use of `LoRACompatibleConv` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
306
+ deprecate("LoRACompatibleConv", "1.0.0", deprecation_message)
307
+
296
308
  super().__init__(*args, **kwargs)
297
309
  self.lora_layer = lora_layer
298
310
 
299
311
  def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]):
312
+ deprecation_message = "Use of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
313
+ deprecate("set_lora_layer", "1.0.0", deprecation_message)
314
+
300
315
  self.lora_layer = lora_layer
301
316
 
302
317
  def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False):
@@ -352,16 +367,19 @@ class LoRACompatibleConv(nn.Conv2d):
352
367
  self.w_down = None
353
368
 
354
369
  def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
370
+ if self.padding_mode != "zeros":
371
+ hidden_states = F.pad(hidden_states, self._reversed_padding_repeated_twice, mode=self.padding_mode)
372
+ padding = (0, 0)
373
+ else:
374
+ padding = self.padding
375
+
376
+ original_outputs = F.conv2d(
377
+ hidden_states, self.weight, self.bias, self.stride, padding, self.dilation, self.groups
378
+ )
379
+
355
380
  if self.lora_layer is None:
356
- # make sure to the functional Conv2D function as otherwise torch.compile's graph will break
357
- # see: https://github.com/huggingface/diffusers/pull/4315
358
- return F.conv2d(
359
- hidden_states, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups
360
- )
381
+ return original_outputs
361
382
  else:
362
- original_outputs = F.conv2d(
363
- hidden_states, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups
364
- )
365
383
  return original_outputs + (scale * self.lora_layer(hidden_states))
366
384
 
367
385
 
@@ -371,10 +389,15 @@ class LoRACompatibleLinear(nn.Linear):
371
389
  """
372
390
 
373
391
  def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs):
392
+ deprecation_message = "Use of `LoRACompatibleLinear` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
393
+ deprecate("LoRACompatibleLinear", "1.0.0", deprecation_message)
394
+
374
395
  super().__init__(*args, **kwargs)
375
396
  self.lora_layer = lora_layer
376
397
 
377
398
  def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]):
399
+ deprecation_message = "Use of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`."
400
+ deprecate("set_lora_layer", "1.0.0", deprecation_message)
378
401
  self.lora_layer = lora_layer
379
402
 
380
403
  def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False):
@@ -1,5 +1,5 @@
1
1
  # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
3
  #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
5
  # you may not use this file except in compliance with the License.
@@ -1,5 +1,5 @@
1
1
  # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
3
  #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
5
  # you may not use this file except in compliance with the License.
@@ -1,5 +1,5 @@
1
1
  # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
3
  #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
5
  # you may not use this file except in compliance with the License.
@@ -1,5 +1,5 @@
1
1
  # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
3
  # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
4
4
  #
5
5
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -124,9 +124,7 @@ def load_state_dict(checkpoint_file: Union[str, os.PathLike], variant: Optional[
124
124
  ) from e
125
125
  except (UnicodeDecodeError, ValueError):
126
126
  raise OSError(
127
- f"Unable to load weights from checkpoint file for '{checkpoint_file}' "
128
- f"at '{checkpoint_file}'. "
129
- "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True."
127
+ f"Unable to load weights from checkpoint file for '{checkpoint_file}' " f"at '{checkpoint_file}'. "
130
128
  )
131
129
 
132
130
 
@@ -679,7 +677,7 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
679
677
  unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
680
678
 
681
679
  if len(unexpected_keys) > 0:
682
- logger.warn(
680
+ logger.warning(
683
681
  f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}"
684
682
  )
685
683
 
@@ -707,7 +705,7 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
707
705
  # the weights so we don't have to do this again.
708
706
 
709
707
  if "'Attention' object has no attribute" in str(e):
710
- logger.warn(
708
+ logger.warning(
711
709
  f"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path}"
712
710
  " was saved with deprecated attention block weight names. We will load it with the deprecated attention block"
713
711
  " names and convert them on the fly to the new attention block format. Please re-save the model after this conversion,"
@@ -1,5 +1,5 @@
1
1
  # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
2
+ # Copyright 2024 HuggingFace Inc.
3
3
  #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
5
  # you may not use this file except in compliance with the License.
@@ -1,5 +1,5 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- # `TemporalConvLayer` Copyright 2023 Alibaba DAMO-VILAB, The ModelScope Team and The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ # `TemporalConvLayer` Copyright 2024 Alibaba DAMO-VILAB, The ModelScope Team and The HuggingFace Team. All rights reserved.
3
3
  #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
5
  # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import torch
20
20
  import torch.nn as nn
21
21
  import torch.nn.functional as F
22
22
 
23
- from ..utils import USE_PEFT_BACKEND
23
+ from ..utils import deprecate
24
24
  from .activations import get_activation
25
25
  from .attention_processor import SpatialNorm
26
26
  from .downsampling import ( # noqa
@@ -30,7 +30,6 @@ from .downsampling import ( # noqa
30
30
  KDownsample2D,
31
31
  downsample_2d,
32
32
  )
33
- from .lora import LoRACompatibleConv, LoRACompatibleLinear
34
33
  from .normalization import AdaGroupNorm
35
34
  from .upsampling import ( # noqa
36
35
  FirUpsample2D,
@@ -102,7 +101,7 @@ class ResnetBlockCondNorm2D(nn.Module):
102
101
  self.output_scale_factor = output_scale_factor
103
102
  self.time_embedding_norm = time_embedding_norm
104
103
 
105
- conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv
104
+ conv_cls = nn.Conv2d
106
105
 
107
106
  if groups_out is None:
108
107
  groups_out = groups
@@ -149,12 +148,11 @@ class ResnetBlockCondNorm2D(nn.Module):
149
148
  bias=conv_shortcut_bias,
150
149
  )
151
150
 
152
- def forward(
153
- self,
154
- input_tensor: torch.FloatTensor,
155
- temb: torch.FloatTensor,
156
- scale: float = 1.0,
157
- ) -> torch.FloatTensor:
151
+ def forward(self, input_tensor: torch.FloatTensor, temb: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:
152
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
153
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
154
+ deprecate("scale", "1.0.0", deprecation_message)
155
+
158
156
  hidden_states = input_tensor
159
157
 
160
158
  hidden_states = self.norm1(hidden_states, temb)
@@ -166,26 +164,24 @@ class ResnetBlockCondNorm2D(nn.Module):
166
164
  if hidden_states.shape[0] >= 64:
167
165
  input_tensor = input_tensor.contiguous()
168
166
  hidden_states = hidden_states.contiguous()
169
- input_tensor = self.upsample(input_tensor, scale=scale)
170
- hidden_states = self.upsample(hidden_states, scale=scale)
167
+ input_tensor = self.upsample(input_tensor)
168
+ hidden_states = self.upsample(hidden_states)
171
169
 
172
170
  elif self.downsample is not None:
173
- input_tensor = self.downsample(input_tensor, scale=scale)
174
- hidden_states = self.downsample(hidden_states, scale=scale)
171
+ input_tensor = self.downsample(input_tensor)
172
+ hidden_states = self.downsample(hidden_states)
175
173
 
176
- hidden_states = self.conv1(hidden_states, scale) if not USE_PEFT_BACKEND else self.conv1(hidden_states)
174
+ hidden_states = self.conv1(hidden_states)
177
175
 
178
176
  hidden_states = self.norm2(hidden_states, temb)
179
177
 
180
178
  hidden_states = self.nonlinearity(hidden_states)
181
179
 
182
180
  hidden_states = self.dropout(hidden_states)
183
- hidden_states = self.conv2(hidden_states, scale) if not USE_PEFT_BACKEND else self.conv2(hidden_states)
181
+ hidden_states = self.conv2(hidden_states)
184
182
 
185
183
  if self.conv_shortcut is not None:
186
- input_tensor = (
187
- self.conv_shortcut(input_tensor, scale) if not USE_PEFT_BACKEND else self.conv_shortcut(input_tensor)
188
- )
184
+ input_tensor = self.conv_shortcut(input_tensor)
189
185
 
190
186
  output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
191
187
 
@@ -267,8 +263,8 @@ class ResnetBlock2D(nn.Module):
267
263
  self.time_embedding_norm = time_embedding_norm
268
264
  self.skip_time_act = skip_time_act
269
265
 
270
- linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear
271
- conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv
266
+ linear_cls = nn.Linear
267
+ conv_cls = nn.Conv2d
272
268
 
273
269
  if groups_out is None:
274
270
  groups_out = groups
@@ -326,12 +322,11 @@ class ResnetBlock2D(nn.Module):
326
322
  bias=conv_shortcut_bias,
327
323
  )
328
324
 
329
- def forward(
330
- self,
331
- input_tensor: torch.FloatTensor,
332
- temb: torch.FloatTensor,
333
- scale: float = 1.0,
334
- ) -> torch.FloatTensor:
325
+ def forward(self, input_tensor: torch.FloatTensor, temb: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:
326
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
327
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
328
+ deprecate("scale", "1.0.0", deprecation_message)
329
+
335
330
  hidden_states = input_tensor
336
331
 
337
332
  hidden_states = self.norm1(hidden_states)
@@ -342,38 +337,18 @@ class ResnetBlock2D(nn.Module):
342
337
  if hidden_states.shape[0] >= 64:
343
338
  input_tensor = input_tensor.contiguous()
344
339
  hidden_states = hidden_states.contiguous()
345
- input_tensor = (
346
- self.upsample(input_tensor, scale=scale)
347
- if isinstance(self.upsample, Upsample2D)
348
- else self.upsample(input_tensor)
349
- )
350
- hidden_states = (
351
- self.upsample(hidden_states, scale=scale)
352
- if isinstance(self.upsample, Upsample2D)
353
- else self.upsample(hidden_states)
354
- )
340
+ input_tensor = self.upsample(input_tensor)
341
+ hidden_states = self.upsample(hidden_states)
355
342
  elif self.downsample is not None:
356
- input_tensor = (
357
- self.downsample(input_tensor, scale=scale)
358
- if isinstance(self.downsample, Downsample2D)
359
- else self.downsample(input_tensor)
360
- )
361
- hidden_states = (
362
- self.downsample(hidden_states, scale=scale)
363
- if isinstance(self.downsample, Downsample2D)
364
- else self.downsample(hidden_states)
365
- )
343
+ input_tensor = self.downsample(input_tensor)
344
+ hidden_states = self.downsample(hidden_states)
366
345
 
367
- hidden_states = self.conv1(hidden_states, scale) if not USE_PEFT_BACKEND else self.conv1(hidden_states)
346
+ hidden_states = self.conv1(hidden_states)
368
347
 
369
348
  if self.time_emb_proj is not None:
370
349
  if not self.skip_time_act:
371
350
  temb = self.nonlinearity(temb)
372
- temb = (
373
- self.time_emb_proj(temb, scale)[:, :, None, None]
374
- if not USE_PEFT_BACKEND
375
- else self.time_emb_proj(temb)[:, :, None, None]
376
- )
351
+ temb = self.time_emb_proj(temb)[:, :, None, None]
377
352
 
378
353
  if self.time_embedding_norm == "default":
379
354
  if temb is not None:
@@ -393,12 +368,10 @@ class ResnetBlock2D(nn.Module):
393
368
  hidden_states = self.nonlinearity(hidden_states)
394
369
 
395
370
  hidden_states = self.dropout(hidden_states)
396
- hidden_states = self.conv2(hidden_states, scale) if not USE_PEFT_BACKEND else self.conv2(hidden_states)
371
+ hidden_states = self.conv2(hidden_states)
397
372
 
398
373
  if self.conv_shortcut is not None:
399
- input_tensor = (
400
- self.conv_shortcut(input_tensor, scale) if not USE_PEFT_BACKEND else self.conv_shortcut(input_tensor)
401
- )
374
+ input_tensor = self.conv_shortcut(input_tensor)
402
375
 
403
376
  output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
404
377
 
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -1,4 +1,4 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.