diffusers 0.35.0__py3-none-any.whl → 0.35.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- diffusers/__init__.py +1 -1
- diffusers/models/attention_dispatch.py +25 -5
- diffusers/models/transformers/transformer_ltx.py +3 -1
- diffusers/models/transformers/transformer_wan.py +2 -2
- diffusers/models/transformers/transformer_wan_vace.py +2 -2
- diffusers/pipelines/pipeline_loading_utils.py +15 -4
- diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py +4 -37
- {diffusers-0.35.0.dist-info → diffusers-0.35.2.dist-info}/METADATA +1 -1
- {diffusers-0.35.0.dist-info → diffusers-0.35.2.dist-info}/RECORD +13 -13
- {diffusers-0.35.0.dist-info → diffusers-0.35.2.dist-info}/LICENSE +0 -0
- {diffusers-0.35.0.dist-info → diffusers-0.35.2.dist-info}/WHEEL +0 -0
- {diffusers-0.35.0.dist-info → diffusers-0.35.2.dist-info}/entry_points.txt +0 -0
- {diffusers-0.35.0.dist-info → diffusers-0.35.2.dist-info}/top_level.txt +0 -0
diffusers/__init__.py
CHANGED
@@ -110,6 +110,27 @@ if _CAN_USE_XFORMERS_ATTN:
|
|
110
110
|
else:
|
111
111
|
xops = None
|
112
112
|
|
113
|
+
# Version guard for PyTorch compatibility - custom_op was added in PyTorch 2.4
|
114
|
+
if torch.__version__ >= "2.4.0":
|
115
|
+
_custom_op = torch.library.custom_op
|
116
|
+
_register_fake = torch.library.register_fake
|
117
|
+
else:
|
118
|
+
|
119
|
+
def custom_op_no_op(name, fn=None, /, *, mutates_args, device_types=None, schema=None):
|
120
|
+
def wrap(func):
|
121
|
+
return func
|
122
|
+
|
123
|
+
return wrap if fn is None else fn
|
124
|
+
|
125
|
+
def register_fake_no_op(op, fn=None, /, *, lib=None, _stacklevel=1):
|
126
|
+
def wrap(func):
|
127
|
+
return func
|
128
|
+
|
129
|
+
return wrap if fn is None else fn
|
130
|
+
|
131
|
+
_custom_op = custom_op_no_op
|
132
|
+
_register_fake = register_fake_no_op
|
133
|
+
|
113
134
|
|
114
135
|
logger = get_logger(__name__) # pylint: disable=invalid-name
|
115
136
|
|
@@ -473,12 +494,11 @@ def _flex_attention_causal_mask_mod(batch_idx, head_idx, q_idx, kv_idx):
|
|
473
494
|
|
474
495
|
# ===== torch op registrations =====
|
475
496
|
# Registrations are required for fullgraph tracing compatibility
|
476
|
-
|
477
|
-
|
478
|
-
# TODO: library.custom_op and register_fake probably need version guards?
|
479
497
|
# TODO: this is only required because the beta release FA3 does not have it. There is a PR adding
|
480
498
|
# this but it was never merged: https://github.com/Dao-AILab/flash-attention/pull/1590
|
481
|
-
|
499
|
+
|
500
|
+
|
501
|
+
@_custom_op("flash_attn_3::_flash_attn_forward", mutates_args=(), device_types="cuda")
|
482
502
|
def _wrapped_flash_attn_3_original(
|
483
503
|
query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
|
484
504
|
) -> Tuple[torch.Tensor, torch.Tensor]:
|
@@ -487,7 +507,7 @@ def _wrapped_flash_attn_3_original(
|
|
487
507
|
return out, lse
|
488
508
|
|
489
509
|
|
490
|
-
@
|
510
|
+
@_register_fake("flash_attn_3::_flash_attn_forward")
|
491
511
|
def _(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
492
512
|
batch_size, seq_len, num_heads, head_dim = query.shape
|
493
513
|
lse_shape = (batch_size, seq_len, num_heads)
|
@@ -350,7 +350,9 @@ class LTXVideoTransformerBlock(nn.Module):
|
|
350
350
|
norm_hidden_states = self.norm1(hidden_states)
|
351
351
|
|
352
352
|
num_ada_params = self.scale_shift_table.shape[0]
|
353
|
-
ada_values = self.scale_shift_table[None, None] + temb.reshape(
|
353
|
+
ada_values = self.scale_shift_table[None, None].to(temb.device) + temb.reshape(
|
354
|
+
batch_size, temb.size(1), num_ada_params, -1
|
355
|
+
)
|
354
356
|
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ada_values.unbind(dim=2)
|
355
357
|
norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
|
356
358
|
|
@@ -665,12 +665,12 @@ class WanTransformer3DModel(
|
|
665
665
|
# 5. Output norm, projection & unpatchify
|
666
666
|
if temb.ndim == 3:
|
667
667
|
# batch_size, seq_len, inner_dim (wan 2.2 ti2v)
|
668
|
-
shift, scale = (self.scale_shift_table.unsqueeze(0) + temb.unsqueeze(2)).chunk(2, dim=2)
|
668
|
+
shift, scale = (self.scale_shift_table.unsqueeze(0).to(temb.device) + temb.unsqueeze(2)).chunk(2, dim=2)
|
669
669
|
shift = shift.squeeze(2)
|
670
670
|
scale = scale.squeeze(2)
|
671
671
|
else:
|
672
672
|
# batch_size, inner_dim
|
673
|
-
shift, scale = (self.scale_shift_table + temb.unsqueeze(1)).chunk(2, dim=1)
|
673
|
+
shift, scale = (self.scale_shift_table.to(temb.device) + temb.unsqueeze(1)).chunk(2, dim=1)
|
674
674
|
|
675
675
|
# Move the shift and scale tensors to the same device as hidden_states.
|
676
676
|
# When using multi-GPU inference via accelerate these will be on the
|
@@ -103,7 +103,7 @@ class WanVACETransformerBlock(nn.Module):
|
|
103
103
|
control_hidden_states = control_hidden_states + hidden_states
|
104
104
|
|
105
105
|
shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = (
|
106
|
-
self.scale_shift_table + temb.float()
|
106
|
+
self.scale_shift_table.to(temb.device) + temb.float()
|
107
107
|
).chunk(6, dim=1)
|
108
108
|
|
109
109
|
# 1. Self-attention
|
@@ -359,7 +359,7 @@ class WanVACETransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromO
|
|
359
359
|
hidden_states = hidden_states + control_hint * scale
|
360
360
|
|
361
361
|
# 6. Output norm, projection & unpatchify
|
362
|
-
shift, scale = (self.scale_shift_table + temb.unsqueeze(1)).chunk(2, dim=1)
|
362
|
+
shift, scale = (self.scale_shift_table.to(temb.device) + temb.unsqueeze(1)).chunk(2, dim=1)
|
363
363
|
|
364
364
|
# Move the shift and scale tensors to the same device as hidden_states.
|
365
365
|
# When using multi-GPU inference via accelerate these will be on the
|
@@ -48,10 +48,12 @@ from .transformers_loading_utils import _load_tokenizer_from_dduf, _load_transfo
|
|
48
48
|
if is_transformers_available():
|
49
49
|
import transformers
|
50
50
|
from transformers import PreTrainedModel, PreTrainedTokenizerBase
|
51
|
-
from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME
|
52
51
|
from transformers.utils import SAFE_WEIGHTS_NAME as TRANSFORMERS_SAFE_WEIGHTS_NAME
|
53
52
|
from transformers.utils import WEIGHTS_NAME as TRANSFORMERS_WEIGHTS_NAME
|
54
53
|
|
54
|
+
if is_transformers_version("<=", "4.56.2"):
|
55
|
+
from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME
|
56
|
+
|
55
57
|
if is_accelerate_available():
|
56
58
|
import accelerate
|
57
59
|
from accelerate import dispatch_model
|
@@ -112,7 +114,9 @@ def is_safetensors_compatible(filenames, passed_components=None, folder_names=No
|
|
112
114
|
]
|
113
115
|
|
114
116
|
if is_transformers_available():
|
115
|
-
weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME
|
117
|
+
weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME]
|
118
|
+
if is_transformers_version("<=", "4.56.2"):
|
119
|
+
weight_names += [TRANSFORMERS_FLAX_WEIGHTS_NAME]
|
116
120
|
|
117
121
|
# model_pytorch, diffusion_model_pytorch, ...
|
118
122
|
weight_prefixes = [w.split(".")[0] for w in weight_names]
|
@@ -191,7 +195,9 @@ def filter_model_files(filenames):
|
|
191
195
|
]
|
192
196
|
|
193
197
|
if is_transformers_available():
|
194
|
-
weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME
|
198
|
+
weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME]
|
199
|
+
if is_transformers_version("<=", "4.56.2"):
|
200
|
+
weight_names += [TRANSFORMERS_FLAX_WEIGHTS_NAME]
|
195
201
|
|
196
202
|
allowed_extensions = [wn.split(".")[-1] for wn in weight_names]
|
197
203
|
|
@@ -212,7 +218,9 @@ def variant_compatible_siblings(filenames, variant=None, ignore_patterns=None) -
|
|
212
218
|
]
|
213
219
|
|
214
220
|
if is_transformers_available():
|
215
|
-
weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME
|
221
|
+
weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME]
|
222
|
+
if is_transformers_version("<=", "4.56.2"):
|
223
|
+
weight_names += [TRANSFORMERS_FLAX_WEIGHTS_NAME]
|
216
224
|
|
217
225
|
# model_pytorch, diffusion_model_pytorch, ...
|
218
226
|
weight_prefixes = [w.split(".")[0] for w in weight_names]
|
@@ -830,6 +838,9 @@ def load_sub_model(
|
|
830
838
|
else:
|
831
839
|
loading_kwargs["low_cpu_mem_usage"] = False
|
832
840
|
|
841
|
+
if is_transformers_model and is_transformers_version(">=", "4.57.0"):
|
842
|
+
loading_kwargs.pop("offload_state_dict")
|
843
|
+
|
833
844
|
if (
|
834
845
|
quantization_config is not None
|
835
846
|
and isinstance(quantization_config, PipelineQuantizationConfig)
|
@@ -62,25 +62,6 @@ EXAMPLE_DOC_STRING = """
|
|
62
62
|
>>> image.save("qwenimage_edit.png")
|
63
63
|
```
|
64
64
|
"""
|
65
|
-
PREFERRED_QWENIMAGE_RESOLUTIONS = [
|
66
|
-
(672, 1568),
|
67
|
-
(688, 1504),
|
68
|
-
(720, 1456),
|
69
|
-
(752, 1392),
|
70
|
-
(800, 1328),
|
71
|
-
(832, 1248),
|
72
|
-
(880, 1184),
|
73
|
-
(944, 1104),
|
74
|
-
(1024, 1024),
|
75
|
-
(1104, 944),
|
76
|
-
(1184, 880),
|
77
|
-
(1248, 832),
|
78
|
-
(1328, 800),
|
79
|
-
(1392, 752),
|
80
|
-
(1456, 720),
|
81
|
-
(1504, 688),
|
82
|
-
(1568, 672),
|
83
|
-
]
|
84
65
|
|
85
66
|
|
86
67
|
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift
|
@@ -565,7 +546,6 @@ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin):
|
|
565
546
|
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
566
547
|
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
567
548
|
max_sequence_length: int = 512,
|
568
|
-
_auto_resize: bool = True,
|
569
549
|
):
|
570
550
|
r"""
|
571
551
|
Function invoked when calling the pipeline for generation.
|
@@ -646,8 +626,7 @@ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin):
|
|
646
626
|
returning a tuple, the first element is a list with the generated images.
|
647
627
|
"""
|
648
628
|
image_size = image[0].size if isinstance(image, list) else image.size
|
649
|
-
|
650
|
-
calculated_width, calculated_height, _ = calculate_dimensions(1024 * 1024, width / height)
|
629
|
+
calculated_width, calculated_height, _ = calculate_dimensions(1024 * 1024, image_size[0] / image_size[1])
|
651
630
|
height = height or calculated_height
|
652
631
|
width = width or calculated_width
|
653
632
|
|
@@ -685,18 +664,9 @@ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin):
|
|
685
664
|
device = self._execution_device
|
686
665
|
# 3. Preprocess image
|
687
666
|
if image is not None and not (isinstance(image, torch.Tensor) and image.size(1) == self.latent_channels):
|
688
|
-
|
689
|
-
image_height, image_width = self.image_processor.get_default_height_width(img)
|
690
|
-
aspect_ratio = image_width / image_height
|
691
|
-
if _auto_resize:
|
692
|
-
_, image_width, image_height = min(
|
693
|
-
(abs(aspect_ratio - w / h), w, h) for w, h in PREFERRED_QWENIMAGE_RESOLUTIONS
|
694
|
-
)
|
695
|
-
image_width = image_width // multiple_of * multiple_of
|
696
|
-
image_height = image_height // multiple_of * multiple_of
|
697
|
-
image = self.image_processor.resize(image, image_height, image_width)
|
667
|
+
image = self.image_processor.resize(image, calculated_height, calculated_width)
|
698
668
|
prompt_image = image
|
699
|
-
image = self.image_processor.preprocess(image,
|
669
|
+
image = self.image_processor.preprocess(image, calculated_height, calculated_width)
|
700
670
|
image = image.unsqueeze(2)
|
701
671
|
|
702
672
|
has_neg_prompt = negative_prompt is not None or (
|
@@ -713,9 +683,6 @@ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin):
|
|
713
683
|
max_sequence_length=max_sequence_length,
|
714
684
|
)
|
715
685
|
if do_true_cfg:
|
716
|
-
# negative image is the same size as the original image, but all pixels are white
|
717
|
-
# negative_image = Image.new("RGB", (image.width, image.height), (255, 255, 255))
|
718
|
-
|
719
686
|
negative_prompt_embeds, negative_prompt_embeds_mask = self.encode_prompt(
|
720
687
|
image=prompt_image,
|
721
688
|
prompt=negative_prompt,
|
@@ -742,7 +709,7 @@ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin):
|
|
742
709
|
img_shapes = [
|
743
710
|
[
|
744
711
|
(1, height // self.vae_scale_factor // 2, width // self.vae_scale_factor // 2),
|
745
|
-
(1,
|
712
|
+
(1, calculated_height // self.vae_scale_factor // 2, calculated_width // self.vae_scale_factor // 2),
|
746
713
|
]
|
747
714
|
] * batch_size
|
748
715
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: diffusers
|
3
|
-
Version: 0.35.
|
3
|
+
Version: 0.35.2
|
4
4
|
Summary: State-of-the-art diffusion in PyTorch and JAX.
|
5
5
|
Home-page: https://github.com/huggingface/diffusers
|
6
6
|
Author: The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/diffusers/graphs/contributors)
|
@@ -1,4 +1,4 @@
|
|
1
|
-
diffusers/__init__.py,sha256=
|
1
|
+
diffusers/__init__.py,sha256=otVY6ZdoA1vJBd-2_zRA-_0NVYvhbKzj0vZKDMMIAEc,50085
|
2
2
|
diffusers/callbacks.py,sha256=9NgQ7QkUncjVR8pbCHEd-PYvf5xKD-QRDmOoAjyzoXY,10282
|
3
3
|
diffusers/configuration_utils.py,sha256=sf1KSxuGJSPo9beQvEXt8PolG0OVXdYF9WDRYssDGug,34942
|
4
4
|
diffusers/dependency_versions_check.py,sha256=PcT_deWuvIKrNkjkCnQKi0ZTWCl77tHC02lhttbqQHM,1271
|
@@ -58,7 +58,7 @@ diffusers/models/__init__.py,sha256=EYlrNtK3-O_PuQqkxi1pJ2JmOPZ0eIpX6Q2HKBl_z-w,
|
|
58
58
|
diffusers/models/activations.py,sha256=qxdn6OROfUvxyxgpm6M2VDKeJxH6mDsUI_xP4S3iw6s,6511
|
59
59
|
diffusers/models/adapter.py,sha256=NDnqBqD53fg2fWWHt-LUHDyhuH6J-4R7PoStr2ggp-4,24507
|
60
60
|
diffusers/models/attention.py,sha256=wxrklJ6tDweVrTFEJ2oCbcUIfdRha4irPQ602wNTEMo,73123
|
61
|
-
diffusers/models/attention_dispatch.py,sha256=
|
61
|
+
diffusers/models/attention_dispatch.py,sha256=Bk_-N04IIB2jN6y5gFo4yBNxPjOBtxpoYkOm1hlZFME,42798
|
62
62
|
diffusers/models/attention_flax.py,sha256=NJTCmsALDnRScOy2EG7r6fZGXaNrJGBVTHQxAluWZEs,20392
|
63
63
|
diffusers/models/attention_processor.py,sha256=1WrbKRIhcciXVorgI126bFQJuaQXbeYZbVY8b1bJ35A,239691
|
64
64
|
diffusers/models/auto_model.py,sha256=6H0dnsr9atn-kgsEjnjLVpS6f3kzFDXCLhKUusxXdQE,11147
|
@@ -139,7 +139,7 @@ diffusers/models/transformers/transformer_flux.py,sha256=TLEdwpt-9u-sDUpuo14T91C
|
|
139
139
|
diffusers/models/transformers/transformer_hidream_image.py,sha256=ciuyPRprws2K77apFzuLHmDZADOJ3AzAwsVze3FmR_E,39322
|
140
140
|
diffusers/models/transformers/transformer_hunyuan_video.py,sha256=DMVzgTdQv0jrSiJA_qnt4eOcbRfDC_K1W4O9vvrWphw,47084
|
141
141
|
diffusers/models/transformers/transformer_hunyuan_video_framepack.py,sha256=p9RgBqN5SWWqQU_wvEmaPZ2W5sCsYZoiGblqFKg6GpM,18698
|
142
|
-
diffusers/models/transformers/transformer_ltx.py,sha256=
|
142
|
+
diffusers/models/transformers/transformer_ltx.py,sha256=bFn8QbJG7O_VDCSIidQl-ovkl3jFEWQG4AT3m9By674,22200
|
143
143
|
diffusers/models/transformers/transformer_lumina2.py,sha256=ACu9X7vatGMoiKSZKkEOcbEQjIvZwmqZ6nmHaSkD9Wo,22075
|
144
144
|
diffusers/models/transformers/transformer_mochi.py,sha256=FbNpuQR3MrH7I7CU8tJ_8Nf00Q2VB5hNUpqwuYXS50Y,18521
|
145
145
|
diffusers/models/transformers/transformer_omnigen.py,sha256=nxmuNqRXRG51Mw2G8BNu-DZrRKWMDc3G99DFgPS9yZA,20029
|
@@ -147,8 +147,8 @@ diffusers/models/transformers/transformer_qwenimage.py,sha256=Jzhx2Pf8odX8bRQLhP
|
|
147
147
|
diffusers/models/transformers/transformer_sd3.py,sha256=2Aw1Di240iPngdlg_TZewlgi4WT2fGAOpB5XFa3RmO0,19232
|
148
148
|
diffusers/models/transformers/transformer_skyreels_v2.py,sha256=PpHVausTXRFhm1gQJy_AFdzjtBb3NNThd2oqoPp4pAw,25733
|
149
149
|
diffusers/models/transformers/transformer_temporal.py,sha256=GMn5WUbWWX7ZvyqVhO12g6bID7dnrMndYrDN-UZEI0Q,16812
|
150
|
-
diffusers/models/transformers/transformer_wan.py,sha256=
|
151
|
-
diffusers/models/transformers/transformer_wan_vace.py,sha256=
|
150
|
+
diffusers/models/transformers/transformer_wan.py,sha256=4dvIriHCdVVBf3H6n9vS5Ue9BArR92ZZpFTVYAQWrsA,28884
|
151
|
+
diffusers/models/transformers/transformer_wan_vace.py,sha256=Bqaktjz0ETpwsta5jWvfP8fb1fsALRzW_sAGYcfY4qQ,16509
|
152
152
|
diffusers/models/unets/__init__.py,sha256=srYFA7zEcDY7LxyUB2jz3TdRgsLz8elrWCpT6Y4YXuU,695
|
153
153
|
diffusers/models/unets/unet_1d.py,sha256=tmSBsH3cPzNj9xCQt1zrV2lApufLEB2xiFTcXulE1Wo,10853
|
154
154
|
diffusers/models/unets/unet_1d_blocks.py,sha256=LIuM8MwkcJ0n8wYwS6FBGYSMfl9wYv0-TbQ6FHO6A7k,26829
|
@@ -197,7 +197,7 @@ diffusers/pipelines/free_init_utils.py,sha256=SHrGV68cii9sYCKZLbIdBEOB5tANOVbN9v
|
|
197
197
|
diffusers/pipelines/free_noise_utils.py,sha256=SlcvpUInyDDOOq7CkzXsjbcsC3Z7nY_JBzy6MJorHc4,29691
|
198
198
|
diffusers/pipelines/onnx_utils.py,sha256=oTRc_iLHEKpf_IGFw_ka1bloAI-XUa6_ASMLW2LAH4w,8810
|
199
199
|
diffusers/pipelines/pipeline_flax_utils.py,sha256=u8wFa0wdJ3CCCVpZdSLCAPDambSNGNLcyArEhiLwuAM,27026
|
200
|
-
diffusers/pipelines/pipeline_loading_utils.py,sha256=
|
200
|
+
diffusers/pipelines/pipeline_loading_utils.py,sha256=B7A8P0XoX4aCRBv0n6GdivzoDrX0m29nd7gF3GMXHaA,48489
|
201
201
|
diffusers/pipelines/pipeline_utils.py,sha256=u-KRiPcVriYKHvEL_dT3PvmlBDCQBiqmFUVnexXx8oE,105794
|
202
202
|
diffusers/pipelines/transformers_loading_utils.py,sha256=98wKUHN89Q1nmmat046hgQxLDlnZNj9Ww4TLB5W52pQ,5281
|
203
203
|
diffusers/pipelines/allegro/__init__.py,sha256=T1MLZgDf8Fhh6YunF8a4Ta6NNIqneWsJIvmBhiy1ABM,1290
|
@@ -461,7 +461,7 @@ diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py,sha256=V0zzovrP3DC-J7z
|
|
461
461
|
diffusers/pipelines/qwenimage/__init__.py,sha256=0Bh0rr7S-9V3uA3fm0cZhpi1ASTuChOdTHlq2s2Rv-A,2044
|
462
462
|
diffusers/pipelines/qwenimage/pipeline_output.py,sha256=TbhkYg7Uq10TNp_mIDvm7imSEh8LK0hFpQ018gCVaYk,603
|
463
463
|
diffusers/pipelines/qwenimage/pipeline_qwenimage.py,sha256=Tqsf8Ax8SKRChYtr7wjL2936miNfp5oqfdUZA9K1xpE,34535
|
464
|
-
diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py,sha256=
|
464
|
+
diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py,sha256=qD5btDMHYoSMlCNVmHu7BMoscb8XNoDufxtVzUVxQm0,40451
|
465
465
|
diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py,sha256=FMYy0J8vN_E7Hpcg0bXmvmotJuBiOU7TfwJPoN5185A,40812
|
466
466
|
diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py,sha256=wGbXWHnRfmtU0SEPV34y9CTSEpus1OVP7UNbItiBuBs,49677
|
467
467
|
diffusers/pipelines/sana/__init__.py,sha256=qkgbJxOAEH4gmyQ4FX4USnOd-PPEDkZGjZ3QO0ID0pA,1719
|
@@ -695,9 +695,9 @@ diffusers/utils/testing_utils.py,sha256=LSFTPVApvPOK9EHJYmgrUSRXzyYuHXFmEWHzvgF-
|
|
695
695
|
diffusers/utils/torch_utils.py,sha256=ogvNvRfOryc5riP-Ph_nopGEsuZtmIszMIDPrkSgnAY,7775
|
696
696
|
diffusers/utils/typing_utils.py,sha256=yeuCJmb1t5n5rG1JRPJo33KO7tg_m9ZwSXQcPKiKyFA,3400
|
697
697
|
diffusers/utils/versions.py,sha256=-e7XW1TzZ-tsRo9PMQHp-hNGYHuVDFzLtwg3uAJzqdI,4333
|
698
|
-
diffusers-0.35.
|
699
|
-
diffusers-0.35.
|
700
|
-
diffusers-0.35.
|
701
|
-
diffusers-0.35.
|
702
|
-
diffusers-0.35.
|
703
|
-
diffusers-0.35.
|
698
|
+
diffusers-0.35.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
699
|
+
diffusers-0.35.2.dist-info/METADATA,sha256=RLXSlGdSKETBelaMUMUMPTXpjM3FaqRn3AiCqlBwvFc,20120
|
700
|
+
diffusers-0.35.2.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
701
|
+
diffusers-0.35.2.dist-info/entry_points.txt,sha256=_1bvshKV_6_b63_FAkcUs9W6tUKGeIoQ3SHEZsovEWs,72
|
702
|
+
diffusers-0.35.2.dist-info/top_level.txt,sha256=axJl2884vMSvhzrFrSoht36QXA_6gZN9cKtg4xOO72o,10
|
703
|
+
diffusers-0.35.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|