diffusers 0.35.1__py3-none-any.whl → 0.35.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
diffusers/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.35.1"
1
+ __version__ = "0.35.2"
2
2
 
3
3
  from typing import TYPE_CHECKING
4
4
 
@@ -110,6 +110,27 @@ if _CAN_USE_XFORMERS_ATTN:
110
110
  else:
111
111
  xops = None
112
112
 
113
+ # Version guard for PyTorch compatibility - custom_op was added in PyTorch 2.4
114
+ if torch.__version__ >= "2.4.0":
115
+ _custom_op = torch.library.custom_op
116
+ _register_fake = torch.library.register_fake
117
+ else:
118
+
119
+ def custom_op_no_op(name, fn=None, /, *, mutates_args, device_types=None, schema=None):
120
+ def wrap(func):
121
+ return func
122
+
123
+ return wrap if fn is None else fn
124
+
125
+ def register_fake_no_op(op, fn=None, /, *, lib=None, _stacklevel=1):
126
+ def wrap(func):
127
+ return func
128
+
129
+ return wrap if fn is None else fn
130
+
131
+ _custom_op = custom_op_no_op
132
+ _register_fake = register_fake_no_op
133
+
113
134
 
114
135
  logger = get_logger(__name__) # pylint: disable=invalid-name
115
136
 
@@ -473,12 +494,11 @@ def _flex_attention_causal_mask_mod(batch_idx, head_idx, q_idx, kv_idx):
473
494
 
474
495
  # ===== torch op registrations =====
475
496
  # Registrations are required for fullgraph tracing compatibility
476
-
477
-
478
- # TODO: library.custom_op and register_fake probably need version guards?
479
497
  # TODO: this is only required because the beta release FA3 does not have it. There is a PR adding
480
498
  # this but it was never merged: https://github.com/Dao-AILab/flash-attention/pull/1590
481
- @torch.library.custom_op("flash_attn_3::_flash_attn_forward", mutates_args=(), device_types="cuda")
499
+
500
+
501
+ @_custom_op("flash_attn_3::_flash_attn_forward", mutates_args=(), device_types="cuda")
482
502
  def _wrapped_flash_attn_3_original(
483
503
  query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
484
504
  ) -> Tuple[torch.Tensor, torch.Tensor]:
@@ -487,7 +507,7 @@ def _wrapped_flash_attn_3_original(
487
507
  return out, lse
488
508
 
489
509
 
490
- @torch.library.register_fake("flash_attn_3::_flash_attn_forward")
510
+ @_register_fake("flash_attn_3::_flash_attn_forward")
491
511
  def _(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
492
512
  batch_size, seq_len, num_heads, head_dim = query.shape
493
513
  lse_shape = (batch_size, seq_len, num_heads)
@@ -350,7 +350,9 @@ class LTXVideoTransformerBlock(nn.Module):
350
350
  norm_hidden_states = self.norm1(hidden_states)
351
351
 
352
352
  num_ada_params = self.scale_shift_table.shape[0]
353
- ada_values = self.scale_shift_table[None, None] + temb.reshape(batch_size, temb.size(1), num_ada_params, -1)
353
+ ada_values = self.scale_shift_table[None, None].to(temb.device) + temb.reshape(
354
+ batch_size, temb.size(1), num_ada_params, -1
355
+ )
354
356
  shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ada_values.unbind(dim=2)
355
357
  norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
356
358
 
@@ -665,12 +665,12 @@ class WanTransformer3DModel(
665
665
  # 5. Output norm, projection & unpatchify
666
666
  if temb.ndim == 3:
667
667
  # batch_size, seq_len, inner_dim (wan 2.2 ti2v)
668
- shift, scale = (self.scale_shift_table.unsqueeze(0) + temb.unsqueeze(2)).chunk(2, dim=2)
668
+ shift, scale = (self.scale_shift_table.unsqueeze(0).to(temb.device) + temb.unsqueeze(2)).chunk(2, dim=2)
669
669
  shift = shift.squeeze(2)
670
670
  scale = scale.squeeze(2)
671
671
  else:
672
672
  # batch_size, inner_dim
673
- shift, scale = (self.scale_shift_table + temb.unsqueeze(1)).chunk(2, dim=1)
673
+ shift, scale = (self.scale_shift_table.to(temb.device) + temb.unsqueeze(1)).chunk(2, dim=1)
674
674
 
675
675
  # Move the shift and scale tensors to the same device as hidden_states.
676
676
  # When using multi-GPU inference via accelerate these will be on the
@@ -103,7 +103,7 @@ class WanVACETransformerBlock(nn.Module):
103
103
  control_hidden_states = control_hidden_states + hidden_states
104
104
 
105
105
  shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = (
106
- self.scale_shift_table + temb.float()
106
+ self.scale_shift_table.to(temb.device) + temb.float()
107
107
  ).chunk(6, dim=1)
108
108
 
109
109
  # 1. Self-attention
@@ -359,7 +359,7 @@ class WanVACETransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromO
359
359
  hidden_states = hidden_states + control_hint * scale
360
360
 
361
361
  # 6. Output norm, projection & unpatchify
362
- shift, scale = (self.scale_shift_table + temb.unsqueeze(1)).chunk(2, dim=1)
362
+ shift, scale = (self.scale_shift_table.to(temb.device) + temb.unsqueeze(1)).chunk(2, dim=1)
363
363
 
364
364
  # Move the shift and scale tensors to the same device as hidden_states.
365
365
  # When using multi-GPU inference via accelerate these will be on the
@@ -48,10 +48,12 @@ from .transformers_loading_utils import _load_tokenizer_from_dduf, _load_transfo
48
48
  if is_transformers_available():
49
49
  import transformers
50
50
  from transformers import PreTrainedModel, PreTrainedTokenizerBase
51
- from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME
52
51
  from transformers.utils import SAFE_WEIGHTS_NAME as TRANSFORMERS_SAFE_WEIGHTS_NAME
53
52
  from transformers.utils import WEIGHTS_NAME as TRANSFORMERS_WEIGHTS_NAME
54
53
 
54
+ if is_transformers_version("<=", "4.56.2"):
55
+ from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME
56
+
55
57
  if is_accelerate_available():
56
58
  import accelerate
57
59
  from accelerate import dispatch_model
@@ -112,7 +114,9 @@ def is_safetensors_compatible(filenames, passed_components=None, folder_names=No
112
114
  ]
113
115
 
114
116
  if is_transformers_available():
115
- weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME]
117
+ weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME]
118
+ if is_transformers_version("<=", "4.56.2"):
119
+ weight_names += [TRANSFORMERS_FLAX_WEIGHTS_NAME]
116
120
 
117
121
  # model_pytorch, diffusion_model_pytorch, ...
118
122
  weight_prefixes = [w.split(".")[0] for w in weight_names]
@@ -191,7 +195,9 @@ def filter_model_files(filenames):
191
195
  ]
192
196
 
193
197
  if is_transformers_available():
194
- weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME]
198
+ weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME]
199
+ if is_transformers_version("<=", "4.56.2"):
200
+ weight_names += [TRANSFORMERS_FLAX_WEIGHTS_NAME]
195
201
 
196
202
  allowed_extensions = [wn.split(".")[-1] for wn in weight_names]
197
203
 
@@ -212,7 +218,9 @@ def variant_compatible_siblings(filenames, variant=None, ignore_patterns=None) -
212
218
  ]
213
219
 
214
220
  if is_transformers_available():
215
- weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME]
221
+ weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME]
222
+ if is_transformers_version("<=", "4.56.2"):
223
+ weight_names += [TRANSFORMERS_FLAX_WEIGHTS_NAME]
216
224
 
217
225
  # model_pytorch, diffusion_model_pytorch, ...
218
226
  weight_prefixes = [w.split(".")[0] for w in weight_names]
@@ -830,6 +838,9 @@ def load_sub_model(
830
838
  else:
831
839
  loading_kwargs["low_cpu_mem_usage"] = False
832
840
 
841
+ if is_transformers_model and is_transformers_version(">=", "4.57.0"):
842
+ loading_kwargs.pop("offload_state_dict")
843
+
833
844
  if (
834
845
  quantization_config is not None
835
846
  and isinstance(quantization_config, PipelineQuantizationConfig)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: diffusers
3
- Version: 0.35.1
3
+ Version: 0.35.2
4
4
  Summary: State-of-the-art diffusion in PyTorch and JAX.
5
5
  Home-page: https://github.com/huggingface/diffusers
6
6
  Author: The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/diffusers/graphs/contributors)
@@ -1,4 +1,4 @@
1
- diffusers/__init__.py,sha256=ez_SaG7tS8y601bNDEVjOX-_7BXe7KdUrG9PCsQ6kY0,50085
1
+ diffusers/__init__.py,sha256=otVY6ZdoA1vJBd-2_zRA-_0NVYvhbKzj0vZKDMMIAEc,50085
2
2
  diffusers/callbacks.py,sha256=9NgQ7QkUncjVR8pbCHEd-PYvf5xKD-QRDmOoAjyzoXY,10282
3
3
  diffusers/configuration_utils.py,sha256=sf1KSxuGJSPo9beQvEXt8PolG0OVXdYF9WDRYssDGug,34942
4
4
  diffusers/dependency_versions_check.py,sha256=PcT_deWuvIKrNkjkCnQKi0ZTWCl77tHC02lhttbqQHM,1271
@@ -58,7 +58,7 @@ diffusers/models/__init__.py,sha256=EYlrNtK3-O_PuQqkxi1pJ2JmOPZ0eIpX6Q2HKBl_z-w,
58
58
  diffusers/models/activations.py,sha256=qxdn6OROfUvxyxgpm6M2VDKeJxH6mDsUI_xP4S3iw6s,6511
59
59
  diffusers/models/adapter.py,sha256=NDnqBqD53fg2fWWHt-LUHDyhuH6J-4R7PoStr2ggp-4,24507
60
60
  diffusers/models/attention.py,sha256=wxrklJ6tDweVrTFEJ2oCbcUIfdRha4irPQ602wNTEMo,73123
61
- diffusers/models/attention_dispatch.py,sha256=e7Bm5EWvvdPw0CT-NDvndHD1QahIOmGB9u2-g-rrahM,42265
61
+ diffusers/models/attention_dispatch.py,sha256=Bk_-N04IIB2jN6y5gFo4yBNxPjOBtxpoYkOm1hlZFME,42798
62
62
  diffusers/models/attention_flax.py,sha256=NJTCmsALDnRScOy2EG7r6fZGXaNrJGBVTHQxAluWZEs,20392
63
63
  diffusers/models/attention_processor.py,sha256=1WrbKRIhcciXVorgI126bFQJuaQXbeYZbVY8b1bJ35A,239691
64
64
  diffusers/models/auto_model.py,sha256=6H0dnsr9atn-kgsEjnjLVpS6f3kzFDXCLhKUusxXdQE,11147
@@ -139,7 +139,7 @@ diffusers/models/transformers/transformer_flux.py,sha256=TLEdwpt-9u-sDUpuo14T91C
139
139
  diffusers/models/transformers/transformer_hidream_image.py,sha256=ciuyPRprws2K77apFzuLHmDZADOJ3AzAwsVze3FmR_E,39322
140
140
  diffusers/models/transformers/transformer_hunyuan_video.py,sha256=DMVzgTdQv0jrSiJA_qnt4eOcbRfDC_K1W4O9vvrWphw,47084
141
141
  diffusers/models/transformers/transformer_hunyuan_video_framepack.py,sha256=p9RgBqN5SWWqQU_wvEmaPZ2W5sCsYZoiGblqFKg6GpM,18698
142
- diffusers/models/transformers/transformer_ltx.py,sha256=7znlVN1UPdXBR91OXN_zyiwHt895XF71gCocV-BWpVc,22162
142
+ diffusers/models/transformers/transformer_ltx.py,sha256=bFn8QbJG7O_VDCSIidQl-ovkl3jFEWQG4AT3m9By674,22200
143
143
  diffusers/models/transformers/transformer_lumina2.py,sha256=ACu9X7vatGMoiKSZKkEOcbEQjIvZwmqZ6nmHaSkD9Wo,22075
144
144
  diffusers/models/transformers/transformer_mochi.py,sha256=FbNpuQR3MrH7I7CU8tJ_8Nf00Q2VB5hNUpqwuYXS50Y,18521
145
145
  diffusers/models/transformers/transformer_omnigen.py,sha256=nxmuNqRXRG51Mw2G8BNu-DZrRKWMDc3G99DFgPS9yZA,20029
@@ -147,8 +147,8 @@ diffusers/models/transformers/transformer_qwenimage.py,sha256=Jzhx2Pf8odX8bRQLhP
147
147
  diffusers/models/transformers/transformer_sd3.py,sha256=2Aw1Di240iPngdlg_TZewlgi4WT2fGAOpB5XFa3RmO0,19232
148
148
  diffusers/models/transformers/transformer_skyreels_v2.py,sha256=PpHVausTXRFhm1gQJy_AFdzjtBb3NNThd2oqoPp4pAw,25733
149
149
  diffusers/models/transformers/transformer_temporal.py,sha256=GMn5WUbWWX7ZvyqVhO12g6bID7dnrMndYrDN-UZEI0Q,16812
150
- diffusers/models/transformers/transformer_wan.py,sha256=Y3e_j2CUZ0fce9yrkL7rYmDdAazigulkqeYEZEY1Mec,28852
151
- diffusers/models/transformers/transformer_wan_vace.py,sha256=KISLOLoYGRlPusyLOJEzjYdckFxFm_adv7b7Jg6jqYM,16477
150
+ diffusers/models/transformers/transformer_wan.py,sha256=4dvIriHCdVVBf3H6n9vS5Ue9BArR92ZZpFTVYAQWrsA,28884
151
+ diffusers/models/transformers/transformer_wan_vace.py,sha256=Bqaktjz0ETpwsta5jWvfP8fb1fsALRzW_sAGYcfY4qQ,16509
152
152
  diffusers/models/unets/__init__.py,sha256=srYFA7zEcDY7LxyUB2jz3TdRgsLz8elrWCpT6Y4YXuU,695
153
153
  diffusers/models/unets/unet_1d.py,sha256=tmSBsH3cPzNj9xCQt1zrV2lApufLEB2xiFTcXulE1Wo,10853
154
154
  diffusers/models/unets/unet_1d_blocks.py,sha256=LIuM8MwkcJ0n8wYwS6FBGYSMfl9wYv0-TbQ6FHO6A7k,26829
@@ -197,7 +197,7 @@ diffusers/pipelines/free_init_utils.py,sha256=SHrGV68cii9sYCKZLbIdBEOB5tANOVbN9v
197
197
  diffusers/pipelines/free_noise_utils.py,sha256=SlcvpUInyDDOOq7CkzXsjbcsC3Z7nY_JBzy6MJorHc4,29691
198
198
  diffusers/pipelines/onnx_utils.py,sha256=oTRc_iLHEKpf_IGFw_ka1bloAI-XUa6_ASMLW2LAH4w,8810
199
199
  diffusers/pipelines/pipeline_flax_utils.py,sha256=u8wFa0wdJ3CCCVpZdSLCAPDambSNGNLcyArEhiLwuAM,27026
200
- diffusers/pipelines/pipeline_loading_utils.py,sha256=RDbQqtx4F2Oog8b7jYC3vm-GfSZpa1UAUBQQVnfe_G4,48069
200
+ diffusers/pipelines/pipeline_loading_utils.py,sha256=B7A8P0XoX4aCRBv0n6GdivzoDrX0m29nd7gF3GMXHaA,48489
201
201
  diffusers/pipelines/pipeline_utils.py,sha256=u-KRiPcVriYKHvEL_dT3PvmlBDCQBiqmFUVnexXx8oE,105794
202
202
  diffusers/pipelines/transformers_loading_utils.py,sha256=98wKUHN89Q1nmmat046hgQxLDlnZNj9Ww4TLB5W52pQ,5281
203
203
  diffusers/pipelines/allegro/__init__.py,sha256=T1MLZgDf8Fhh6YunF8a4Ta6NNIqneWsJIvmBhiy1ABM,1290
@@ -695,9 +695,9 @@ diffusers/utils/testing_utils.py,sha256=LSFTPVApvPOK9EHJYmgrUSRXzyYuHXFmEWHzvgF-
695
695
  diffusers/utils/torch_utils.py,sha256=ogvNvRfOryc5riP-Ph_nopGEsuZtmIszMIDPrkSgnAY,7775
696
696
  diffusers/utils/typing_utils.py,sha256=yeuCJmb1t5n5rG1JRPJo33KO7tg_m9ZwSXQcPKiKyFA,3400
697
697
  diffusers/utils/versions.py,sha256=-e7XW1TzZ-tsRo9PMQHp-hNGYHuVDFzLtwg3uAJzqdI,4333
698
- diffusers-0.35.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
699
- diffusers-0.35.1.dist-info/METADATA,sha256=2YroDyHqhHv9IcdMEoodiSq5KijgbvS9mrwCfNOJFB8,20120
700
- diffusers-0.35.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
701
- diffusers-0.35.1.dist-info/entry_points.txt,sha256=_1bvshKV_6_b63_FAkcUs9W6tUKGeIoQ3SHEZsovEWs,72
702
- diffusers-0.35.1.dist-info/top_level.txt,sha256=axJl2884vMSvhzrFrSoht36QXA_6gZN9cKtg4xOO72o,10
703
- diffusers-0.35.1.dist-info/RECORD,,
698
+ diffusers-0.35.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
699
+ diffusers-0.35.2.dist-info/METADATA,sha256=RLXSlGdSKETBelaMUMUMPTXpjM3FaqRn3AiCqlBwvFc,20120
700
+ diffusers-0.35.2.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
701
+ diffusers-0.35.2.dist-info/entry_points.txt,sha256=_1bvshKV_6_b63_FAkcUs9W6tUKGeIoQ3SHEZsovEWs,72
702
+ diffusers-0.35.2.dist-info/top_level.txt,sha256=axJl2884vMSvhzrFrSoht36QXA_6gZN9cKtg4xOO72o,10
703
+ diffusers-0.35.2.dist-info/RECORD,,