diffsynth-engine 0.6.1.dev23__py3-none-any.whl → 0.6.1.dev24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -262,16 +262,11 @@ class QwenImagePipelineConfig(AttentionConfig, OptimizationConfig, ParallelConfi
262
262
  encoder_dtype: torch.dtype = torch.bfloat16
263
263
  vae_dtype: torch.dtype = torch.float32
264
264
 
265
+ load_encoder: bool = True
266
+
265
267
  # override OptimizationConfig
266
268
  fbcache_relative_l1_threshold = 0.009
267
269
 
268
- # override BaseConfig
269
- vae_tiled: bool = True
270
- vae_tile_size: Tuple[int, int] = (34, 34)
271
- vae_tile_stride: Tuple[int, int] = (18, 16)
272
-
273
- load_encoder: bool = True
274
-
275
270
  @classmethod
276
271
  def basic_config(
277
272
  cls,
@@ -1,5 +1,6 @@
1
1
  import torch
2
2
  import torch.nn as nn
3
+ import torch.nn.functional as F
3
4
  import math
4
5
 
5
6
 
@@ -91,8 +92,8 @@ class NewGELUActivation(nn.Module):
91
92
  the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
92
93
  """
93
94
 
94
- def forward(self, input: "torch.Tensor") -> "torch.Tensor":
95
- return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
95
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
96
+ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
96
97
 
97
98
 
98
99
  class ApproximateGELU(nn.Module):
@@ -115,3 +116,36 @@ class ApproximateGELU(nn.Module):
115
116
  def forward(self, x: torch.Tensor) -> torch.Tensor:
116
117
  x = self.proj(x)
117
118
  return x * torch.sigmoid(1.702 * x)
119
+
120
+
121
+ class GELU(nn.Module):
122
+ r"""
123
+ GELU activation function with tanh approximation support with `approximate="tanh"`.
124
+
125
+ Parameters:
126
+ dim_in (`int`): The number of channels in the input.
127
+ dim_out (`int`): The number of channels in the output.
128
+ approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation.
129
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
130
+ """
131
+
132
+ def __init__(
133
+ self,
134
+ dim_in: int,
135
+ dim_out: int,
136
+ approximate: str = "none",
137
+ bias: bool = True,
138
+ device: str = "cuda:0",
139
+ dtype: torch.dtype = torch.float16,
140
+ ):
141
+ super().__init__()
142
+ self.proj = nn.Linear(dim_in, dim_out, bias=bias, device=device, dtype=dtype)
143
+ self.approximate = approximate
144
+
145
+ def gelu(self, gate: torch.Tensor) -> torch.Tensor:
146
+ return F.gelu(gate, approximate=self.approximate)
147
+
148
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
149
+ x = self.proj(x)
150
+ x = self.gelu(x)
151
+ return x
@@ -6,7 +6,7 @@ from einops import rearrange
6
6
  from diffsynth_engine.models.base import StateDictConverter, PreTrainedModel
7
7
  from diffsynth_engine.models.basic import attention as attention_ops
8
8
  from diffsynth_engine.models.basic.timestep import TimestepEmbeddings
9
- from diffsynth_engine.models.basic.transformer_helper import AdaLayerNorm, ApproximateGELU, RMSNorm
9
+ from diffsynth_engine.models.basic.transformer_helper import AdaLayerNorm, GELU, RMSNorm
10
10
  from diffsynth_engine.utils.gguf import gguf_inference
11
11
  from diffsynth_engine.utils.fp8_linear import fp8_inference
12
12
  from diffsynth_engine.utils.parallel import (
@@ -144,7 +144,7 @@ class QwenFeedForward(nn.Module):
144
144
  super().__init__()
145
145
  inner_dim = int(dim * 4)
146
146
  self.net = nn.ModuleList([])
147
- self.net.append(ApproximateGELU(dim, inner_dim, device=device, dtype=dtype))
147
+ self.net.append(GELU(dim, inner_dim, approximate="tanh", device=device, dtype=dtype))
148
148
  self.net.append(nn.Dropout(dropout))
149
149
  self.net.append(nn.Linear(inner_dim, dim_out, device=device, dtype=dtype))
150
150
 
@@ -155,8 +155,8 @@ class QwenFeedForward(nn.Module):
155
155
 
156
156
 
157
157
  def apply_rotary_emb_qwen(x: torch.Tensor, freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]]):
158
- x_rotated = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2))
159
- x_out = torch.view_as_real(x_rotated * freqs_cis).flatten(3)
158
+ x_rotated = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2)) # (b, s, h, d) -> (b, s, h, d/2, 2)
159
+ x_out = torch.view_as_real(x_rotated * freqs_cis.unsqueeze(1)).flatten(3) # (b, s, h, d/2, 2) -> (b, s, h, d)
160
160
  return x_out.type_as(x)
161
161
 
162
162
 
@@ -200,13 +200,13 @@ class QwenDoubleStreamAttention(nn.Module):
200
200
  img_q, img_k, img_v = self.to_q(image), self.to_k(image), self.to_v(image)
201
201
  txt_q, txt_k, txt_v = self.add_q_proj(text), self.add_k_proj(text), self.add_v_proj(text)
202
202
 
203
- img_q = rearrange(img_q, "b s (h d) -> b h s d", h=self.num_heads)
204
- img_k = rearrange(img_k, "b s (h d) -> b h s d", h=self.num_heads)
205
- img_v = rearrange(img_v, "b s (h d) -> b h s d", h=self.num_heads)
203
+ img_q = rearrange(img_q, "b s (h d) -> b s h d", h=self.num_heads)
204
+ img_k = rearrange(img_k, "b s (h d) -> b s h d", h=self.num_heads)
205
+ img_v = rearrange(img_v, "b s (h d) -> b s h d", h=self.num_heads)
206
206
 
207
- txt_q = rearrange(txt_q, "b s (h d) -> b h s d", h=self.num_heads)
208
- txt_k = rearrange(txt_k, "b s (h d) -> b h s d", h=self.num_heads)
209
- txt_v = rearrange(txt_v, "b s (h d) -> b h s d", h=self.num_heads)
207
+ txt_q = rearrange(txt_q, "b s (h d) -> b s h d", h=self.num_heads)
208
+ txt_k = rearrange(txt_k, "b s (h d) -> b s h d", h=self.num_heads)
209
+ txt_v = rearrange(txt_v, "b s (h d) -> b s h d", h=self.num_heads)
210
210
 
211
211
  img_q, img_k = self.norm_q(img_q), self.norm_k(img_k)
212
212
  txt_q, txt_k = self.norm_added_q(txt_q), self.norm_added_k(txt_k)
@@ -218,13 +218,9 @@ class QwenDoubleStreamAttention(nn.Module):
218
218
  txt_q = apply_rotary_emb_qwen(txt_q, txt_freqs)
219
219
  txt_k = apply_rotary_emb_qwen(txt_k, txt_freqs)
220
220
 
221
- joint_q = torch.cat([txt_q, img_q], dim=2)
222
- joint_k = torch.cat([txt_k, img_k], dim=2)
223
- joint_v = torch.cat([txt_v, img_v], dim=2)
224
-
225
- joint_q = joint_q.transpose(1, 2)
226
- joint_k = joint_k.transpose(1, 2)
227
- joint_v = joint_v.transpose(1, 2)
221
+ joint_q = torch.cat([txt_q, img_q], dim=1)
222
+ joint_k = torch.cat([txt_k, img_k], dim=1)
223
+ joint_v = torch.cat([txt_v, img_v], dim=1)
228
224
 
229
225
  attn_kwargs = attn_kwargs if attn_kwargs is not None else {}
230
226
  joint_attn_out = attention_ops.attention(joint_q, joint_k, joint_v, attn_mask=attn_mask, **attn_kwargs)
@@ -24,7 +24,7 @@ from diffsynth_engine.models.qwen_image import (
24
24
  from diffsynth_engine.models.qwen_image import QwenImageVAE
25
25
  from diffsynth_engine.tokenizers import Qwen2TokenizerFast, Qwen2VLProcessor
26
26
  from diffsynth_engine.pipelines import BasePipeline, LoRAStateDictConverter
27
- from diffsynth_engine.pipelines.utils import calculate_shift
27
+ from diffsynth_engine.pipelines.utils import calculate_shift, pad_and_concat
28
28
  from diffsynth_engine.algorithm.noise_scheduler import RecifitedFlowScheduler
29
29
  from diffsynth_engine.algorithm.sampler import FlowMatchEulerSampler
30
30
  from diffsynth_engine.utils.constants import (
@@ -148,9 +148,17 @@ class QwenImagePipeline(BasePipeline):
148
148
  self.prompt_template_encode_start_idx = 34
149
149
  # qwen image edit
150
150
  self.edit_system_prompt = "Describe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate."
151
- self.edit_prompt_template_encode = "<|im_start|>system\n" + self.edit_system_prompt + "<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n"
151
+ self.edit_prompt_template_encode = (
152
+ "<|im_start|>system\n"
153
+ + self.edit_system_prompt
154
+ + "<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n"
155
+ )
152
156
  # qwen image edit plus
153
- self.edit_plus_prompt_template_encode = "<|im_start|>system\n" + self.edit_system_prompt + "<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n"
157
+ self.edit_plus_prompt_template_encode = (
158
+ "<|im_start|>system\n"
159
+ + self.edit_system_prompt
160
+ + "<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n"
161
+ )
154
162
 
155
163
  self.edit_prompt_template_encode_start_idx = 64
156
164
 
@@ -490,8 +498,8 @@ class QwenImagePipeline(BasePipeline):
490
498
  else:
491
499
  # cfg by predict noise in one batch
492
500
  bs, _, h, w = latents.shape
493
- prompt_emb = torch.cat([prompt_emb, negative_prompt_emb], dim=0)
494
- prompt_emb_mask = torch.cat([prompt_emb_mask, negative_prompt_emb_mask], dim=0)
501
+ prompt_emb = pad_and_concat(prompt_emb, negative_prompt_emb)
502
+ prompt_emb_mask = pad_and_concat(prompt_emb_mask, negative_prompt_emb_mask)
495
503
  if entity_prompt_embs is not None:
496
504
  entity_prompt_embs = [
497
505
  torch.cat([x, y], dim=0) for x, y in zip(entity_prompt_embs, negative_entity_prompt_embs)
@@ -1,3 +1,7 @@
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+
1
5
  def accumulate(result, new_item):
2
6
  if result is None:
3
7
  return new_item
@@ -17,3 +21,51 @@ def calculate_shift(
17
21
  b = base_shift - m * base_seq_len
18
22
  mu = image_seq_len * m + b
19
23
  return mu
24
+
25
+
26
+ def pad_and_concat(
27
+ tensor1: torch.Tensor,
28
+ tensor2: torch.Tensor,
29
+ concat_dim: int = 0,
30
+ pad_dim: int = 1,
31
+ ) -> torch.Tensor:
32
+ """
33
+ Concatenate two tensors along a specified dimension after padding along another dimension.
34
+
35
+ Assumes input tensors have shape (b, s, d), where:
36
+ - b: batch dimension
37
+ - s: sequence dimension (may differ)
38
+ - d: feature dimension
39
+
40
+ Args:
41
+ tensor1: First tensor with shape (b1, s1, d)
42
+ tensor2: Second tensor with shape (b2, s2, d)
43
+ concat_dim: Dimension to concatenate along, default is 0 (batch dimension)
44
+ pad_dim: Dimension to pad along, default is 1 (sequence dimension)
45
+
46
+ Returns:
47
+ Concatenated tensor, shape depends on concat_dim and pad_dim choices
48
+ """
49
+ assert tensor1.dim() == tensor2.dim(), "Both tensors must have the same number of dimensions"
50
+ assert concat_dim != pad_dim, "concat_dim and pad_dim cannot be the same"
51
+
52
+ len1, len2 = tensor1.shape[pad_dim], tensor2.shape[pad_dim]
53
+ max_len = max(len1, len2)
54
+
55
+ # Calculate the position of pad_dim in the padding list
56
+ # Padding format: from the last dimension, each pair represents (dim_n_left, dim_n_right, ..., dim_0_left, dim_0_right)
57
+ ndim = tensor1.dim()
58
+ padding = [0] * (2 * ndim)
59
+ pad_right_idx = -2 * pad_dim - 1
60
+
61
+ if len1 < max_len:
62
+ pad_len = max_len - len1
63
+ padding[pad_right_idx] = pad_len
64
+ tensor1 = F.pad(tensor1, padding, mode="constant", value=0)
65
+ elif len2 < max_len:
66
+ pad_len = max_len - len2
67
+ padding[pad_right_idx] = pad_len
68
+ tensor2 = F.pad(tensor2, padding, mode="constant", value=0)
69
+
70
+ # Concatenate along the specified dimension
71
+ return torch.cat([tensor1, tensor2], dim=concat_dim)
@@ -1,10 +1,16 @@
1
1
  # Modified from transformers.tokenization_utils_base
2
2
  from typing import Dict, List, Union, overload
3
+ from enum import Enum
3
4
 
4
5
 
5
6
  TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
6
7
 
7
8
 
9
+ class PaddingStrategy(str, Enum):
10
+ LONGEST = "longest"
11
+ MAX_LENGTH = "max_length"
12
+
13
+
8
14
  class BaseTokenizer:
9
15
  SPECIAL_TOKENS_ATTRIBUTES = [
10
16
  "bos_token",
@@ -4,7 +4,7 @@ import torch
4
4
  from typing import Dict, List, Union, Optional
5
5
  from tokenizers import Tokenizer as TokenizerFast, AddedToken
6
6
 
7
- from diffsynth_engine.tokenizers.base import BaseTokenizer, TOKENIZER_CONFIG_FILE
7
+ from diffsynth_engine.tokenizers.base import BaseTokenizer, PaddingStrategy, TOKENIZER_CONFIG_FILE
8
8
 
9
9
 
10
10
  VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
@@ -165,22 +165,28 @@ class Qwen2TokenizerFast(BaseTokenizer):
165
165
  texts: Union[str, List[str]],
166
166
  max_length: Optional[int] = None,
167
167
  padding_side: Optional[str] = None,
168
+ padding_strategy: Union[PaddingStrategy, str] = "longest",
168
169
  **kwargs,
169
170
  ) -> Dict[str, "torch.Tensor"]:
170
171
  """
171
172
  Tokenize text and prepare for model inputs.
172
173
 
173
174
  Args:
174
- text (`str`, `List[str]`, *optional*):
175
+ texts (`str`, `List[str]`):
175
176
  The sequence or batch of sequences to be encoded.
176
177
 
177
178
  max_length (`int`, *optional*):
178
- Each encoded sequence will be truncated or padded to max_length.
179
+ Maximum length of the encoded sequences.
179
180
 
180
181
  padding_side (`str`, *optional*):
181
182
  The side on which the padding should be applied. Should be selected between `"right"` and `"left"`.
182
183
  Defaults to `"right"`.
183
184
 
185
+ padding_strategy (`PaddingStrategy`, `str`, *optional*):
186
+ If `"longest"`, will pad the sequences to the longest sequence in the batch.
187
+ If `"max_length"`, will pad the sequences to the `max_length` argument.
188
+ Defaults to `"longest"`.
189
+
184
190
  Returns:
185
191
  `Dict[str, "torch.Tensor"]`: tensor dict compatible with model_input_names.
186
192
  """
@@ -190,7 +196,9 @@ class Qwen2TokenizerFast(BaseTokenizer):
190
196
 
191
197
  batch_ids = self.batch_encode(texts)
192
198
  ids_lens = [len(ids_) for ids_ in batch_ids]
193
- max_length = max_length if max_length is not None else min(max(ids_lens), self.model_max_length)
199
+ max_length = max_length if max_length is not None else self.model_max_length
200
+ if padding_strategy == PaddingStrategy.LONGEST:
201
+ max_length = min(max(ids_lens), max_length)
194
202
  padding_side = padding_side if padding_side is not None else self.padding_side
195
203
 
196
204
  encoded = torch.zeros(len(texts), max_length, dtype=torch.long)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: diffsynth_engine
3
- Version: 0.6.1.dev23
3
+ Version: 0.6.1.dev24
4
4
  Author: MuseAI x ModelScope
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: Operating System :: OS Independent
@@ -81,7 +81,7 @@ diffsynth_engine/conf/tokenizers/wan/umt5-xxl/tokenizer.json,sha256=bhl7TT29cdoU
81
81
  diffsynth_engine/conf/tokenizers/wan/umt5-xxl/tokenizer_config.json,sha256=7Zo6iw-qcacKMoR-BDX-A25uES1N9O23u0ipIeNE3AU,61728
82
82
  diffsynth_engine/configs/__init__.py,sha256=f6Y-j_ZQs7bM4Lr7Mh9CXFEBrSNLc9k5GJyJqjLAGiY,1187
83
83
  diffsynth_engine/configs/controlnet.py,sha256=f3vclyP3lcAjxDGD9C1vevhqqQ7W2LL_c6Wye0uxk3Q,1180
84
- diffsynth_engine/configs/pipeline.py,sha256=zNvkWXW4rvC8UUuGX3fU4JQ_YuxKfMT3ek3CHbF8mnI,14385
84
+ diffsynth_engine/configs/pipeline.py,sha256=xpVp6ePFdn2zPoE4aG5GOqCvdZ89hcl3c7bSuvaDeZA,14237
85
85
  diffsynth_engine/kernels/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
86
86
  diffsynth_engine/models/__init__.py,sha256=8Ze7cSE8InetgXWTNb0neVA2Q44K7WlE-h7O-02m2sY,119
87
87
  diffsynth_engine/models/base.py,sha256=BA5vgMqfy_cjuL2OtXbrFD-Qg5xQnaumHpj5TabwSy8,2559
@@ -90,7 +90,7 @@ diffsynth_engine/models/basic/attention.py,sha256=iFxpvXdaEJZHddTTRuKL1grKb6beU5
90
90
  diffsynth_engine/models/basic/lora.py,sha256=PT-A3pwIuUrW2w3TnNlBPb1KRj70QYiBaoCvLnkR5cs,10652
91
91
  diffsynth_engine/models/basic/relative_position_emb.py,sha256=rCXOweZMcayVnNUVvBcYXMdhHS257B_PC8PZSWxvhNQ,2540
92
92
  diffsynth_engine/models/basic/timestep.py,sha256=WJODYqkSXEM0wcS42YkkfrGwxWt0e60zMTkDdUBQqBw,2810
93
- diffsynth_engine/models/basic/transformer_helper.py,sha256=v86ECt1F9LRreWxxvx7ciqORnBDRHsFOGv2wepDYLyk,4208
93
+ diffsynth_engine/models/basic/transformer_helper.py,sha256=6K7A5bVnN2bOoq6I0IQf7RJBhSZUP4jNf1n7NPGu8zA,5287
94
94
  diffsynth_engine/models/basic/unet_helper.py,sha256=4lN6F80Ubm6ip4dkLVmB-Og5-Y25Wduhs9Q8qjyzK6E,9044
95
95
  diffsynth_engine/models/basic/video_sparse_attention.py,sha256=1pZRNDv4A5dV3H_-LBHfUZaXpPiglHxZypcP8Pos7rU,7738
96
96
  diffsynth_engine/models/flux/__init__.py,sha256=x0JoxL0CdiiVrY0BjkIrGinud7mcXecLleGO0km91XQ,686
@@ -110,7 +110,7 @@ diffsynth_engine/models/hunyuan3d/surface_extractor.py,sha256=b15mb1N4PYwAvDk1Gu
110
110
  diffsynth_engine/models/hunyuan3d/volume_decoder.py,sha256=sgflj1a8sIerqGSalBAVQOlyiIihkLOLXYysNbulCoQ,2355
111
111
  diffsynth_engine/models/qwen_image/__init__.py,sha256=X5pig621WEsDZ6L7HVkmYspV53-GDfs_la1ncaq_NFw,417
112
112
  diffsynth_engine/models/qwen_image/qwen2_5_vl.py,sha256=Eu-r-c42t_q74Qpwz21ToCGHpvSi7VND4B1EI0e-ePA,57748
113
- diffsynth_engine/models/qwen_image/qwen_image_dit.py,sha256=WWbuF9OuPOEzOaasAlQeJCpjhfpvqSh6pu0lvykCpk0,22624
113
+ diffsynth_engine/models/qwen_image/qwen_image_dit.py,sha256=iJ-FinDyXa982Uao1is37bxUttyPu0Eldyd7qPJO_XQ,22582
114
114
  diffsynth_engine/models/qwen_image/qwen_image_dit_fbcache.py,sha256=LIv9X_BohKk5rcEzyl3ATLwd8MSoFX43wjkArQ68nq8,4828
115
115
  diffsynth_engine/models/qwen_image/qwen_image_vae.py,sha256=eO7f4YqiYXfw7NncBNFTu-xEvdJ5uKY-SnfP15QY0tE,38443
116
116
  diffsynth_engine/models/sd/__init__.py,sha256=hjoKRnwoXOLD0wude-w7I6wK5ak7ACMbnbkPuBB2oU0,380
@@ -144,19 +144,19 @@ diffsynth_engine/pipelines/__init__.py,sha256=jh-4LSJ0vqlXiT8BgFgRIQxuAr2atEPyHr
144
144
  diffsynth_engine/pipelines/base.py,sha256=BWW7LW0E2qwu8G-6bP3nmeO7VCQxC8srOo8tE4aKA4o,14993
145
145
  diffsynth_engine/pipelines/flux_image.py,sha256=B2MfZrgFKcSEY-Js8i61aqGdf1Inu0SSKQEZT75ygNs,50747
146
146
  diffsynth_engine/pipelines/hunyuan3d_shape.py,sha256=TNV0Wr09Dj2bzzlpua9WioCClOj3YiLfE6utI9aWL8A,8164
147
- diffsynth_engine/pipelines/qwen_image.py,sha256=y5FIzzFQuMx8PJXFuJ375r9Kdk_PP8rWUN08SFXP8SY,32940
147
+ diffsynth_engine/pipelines/qwen_image.py,sha256=jZE1-y7YIgfeyP2okZpMvOGfX71MmvmfYTvkAvYrckI,33044
148
148
  diffsynth_engine/pipelines/sd_image.py,sha256=nr-Nhsnomq8CsUqhTM3i2l2zG01YjwXdfRXgr_bC3F0,17891
149
149
  diffsynth_engine/pipelines/sdxl_image.py,sha256=v7ZACGPb6EcBunL6e5E9jynSQjE7GQx8etEV-ZLP91g,21704
150
- diffsynth_engine/pipelines/utils.py,sha256=lk7sFGEk-fGjgadLpwwppHKG-yZ0RC-4ZmHW7pRRe8A,473
150
+ diffsynth_engine/pipelines/utils.py,sha256=HZbJHErNJS1DhlwJKvZ9dY7Kh8Zdlsw3zE2e88TYGRY,2277
151
151
  diffsynth_engine/pipelines/wan_s2v.py,sha256=dffhWy_H53Kp-6by_XYvDQhUo0WZmIuip1rNd2s2kZ8,29343
152
152
  diffsynth_engine/pipelines/wan_video.py,sha256=IGImgfBLrwAHhJTbulROPd0LmnY-p9Ol0TBQKdeO0S4,29099
153
153
  diffsynth_engine/processor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
154
154
  diffsynth_engine/processor/canny_processor.py,sha256=hV30NlblTkEFUAmF_O-LJrNlGVM2SFrqq6okfF8VpOo,602
155
155
  diffsynth_engine/processor/depth_processor.py,sha256=dQvs3JsnyMbz4dyI9QoR8oO-mMFBFAgNvgqeCoaU5jk,1532
156
156
  diffsynth_engine/tokenizers/__init__.py,sha256=KxTna7UrkptrBU1j3zBYOi_8mVEWlcSMGZwK2ahuHNw,456
157
- diffsynth_engine/tokenizers/base.py,sha256=JX4C8FX7Y-glpszk39pUlS0QvqRyiKBYr-GWrrpee78,5149
157
+ diffsynth_engine/tokenizers/base.py,sha256=skDQZXEYElc51nDi3b0vhtgm4PZQpQOIDz7fPSAYTHI,5261
158
158
  diffsynth_engine/tokenizers/clip.py,sha256=6yggDSRGZc34CKflO1DwTIisggv53aITe_h-YnsERzc,10695
159
- diffsynth_engine/tokenizers/qwen2.py,sha256=NDuE0hs1c4WfHOYUZ9KC0L-1vqOQ0Pj7ugWMqp24rys,9263
159
+ diffsynth_engine/tokenizers/qwen2.py,sha256=siel195SbXOD7XVnJVKMkOtW8Vl1vXOiyWVXXToVKW0,9696
160
160
  diffsynth_engine/tokenizers/qwen2_vl_image_processor.py,sha256=7IBOn2m4AbL-URVrSrFY0k88r4_gkK_nuTQRAxorBes,6239
161
161
  diffsynth_engine/tokenizers/qwen2_vl_processor.py,sha256=Zyu8_5ETCjACQ8BX6jvVRWj37nZqJgtI0hesSUGm4-g,4145
162
162
  diffsynth_engine/tokenizers/t5.py,sha256=brhRFkXaTzE29hl_wDdcjQ3MCoL0pQslwHIRbMX_bNo,7442
@@ -187,8 +187,8 @@ diffsynth_engine/utils/video.py,sha256=8FCaeqIdUsWMgWI_6SO9SPynsToGcLCQAVYFTc4CD
187
187
  diffsynth_engine/utils/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
188
188
  diffsynth_engine/utils/memory/linear_regression.py,sha256=oW_EQEw13oPoyUrxiL8A7Ksa5AuJ2ynI2qhCbfAuZbg,3930
189
189
  diffsynth_engine/utils/memory/memory_predcit_model.py,sha256=EXprSl_zlVjgfMWNXP-iw83Ot3hyMcgYaRPv-dvyL84,3943
190
- diffsynth_engine-0.6.1.dev23.dist-info/licenses/LICENSE,sha256=x7aBqQuVI0IYnftgoTPI_A0I_rjdjPPQkjnU6N2nikM,11346
191
- diffsynth_engine-0.6.1.dev23.dist-info/METADATA,sha256=xt9H8AC1fRxfdVEL2Xvn9zY8hSOY103JeIfptaTP5h4,1164
192
- diffsynth_engine-0.6.1.dev23.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
193
- diffsynth_engine-0.6.1.dev23.dist-info/top_level.txt,sha256=6zgbiIzEHLbhgDKRyX0uBJOV3F6VnGGBRIQvSiYYn6w,17
194
- diffsynth_engine-0.6.1.dev23.dist-info/RECORD,,
190
+ diffsynth_engine-0.6.1.dev24.dist-info/licenses/LICENSE,sha256=x7aBqQuVI0IYnftgoTPI_A0I_rjdjPPQkjnU6N2nikM,11346
191
+ diffsynth_engine-0.6.1.dev24.dist-info/METADATA,sha256=CGGAx5MpKsDGAEZ9gFgc8tq9lINfB3SrxngslYjCt8k,1164
192
+ diffsynth_engine-0.6.1.dev24.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
193
+ diffsynth_engine-0.6.1.dev24.dist-info/top_level.txt,sha256=6zgbiIzEHLbhgDKRyX0uBJOV3F6VnGGBRIQvSiYYn6w,17
194
+ diffsynth_engine-0.6.1.dev24.dist-info/RECORD,,