cache-dit 0.2.4__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cache-dit might be problematic. Click here for more details.

cache_dit/_version.py CHANGED
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '0.2.4'
21
- __version_tuple__ = version_tuple = (0, 2, 4)
20
+ __version__ = version = '0.2.5'
21
+ __version_tuple__ = version_tuple = (0, 2, 5)
@@ -71,12 +71,19 @@ class DBCacheContext:
71
71
  taylorseer_kwargs: Dict[str, Any] = dataclasses.field(default_factory=dict)
72
72
  taylorseer: Optional[TaylorSeer] = None
73
73
  encoder_tarlorseer: Optional[TaylorSeer] = None
74
+
74
75
  # Support do_separate_classifier_free_guidance, such as Wan 2.1
75
76
  # For model that fused CFG and non-CFG into single forward step,
76
77
  # should set do_separate_classifier_free_guidance as False. For
77
- # example: CogVideoX
78
+ # example: CogVideoX, HunyuanVideo, Mochi.
78
79
  do_separate_classifier_free_guidance: bool = False
80
+ # Compute cfg forward first or not, default False, namely,
81
+ # 0, 2, 4, ..., -> non-CFG step; 1, 3, 5, ... -> CFG step.
79
82
  cfg_compute_first: bool = False
83
+ # Compute spearate diff values for CFG and non-CFG step,
84
+ # default True. If False, we will use the computed diff from
85
+ # current non-CFG transformer step for current CFG step.
86
+ cfg_diff_compute_separate: bool = True
80
87
  cfg_taylorseer: Optional[TaylorSeer] = None
81
88
  cfg_encoder_taylorseer: Optional[TaylorSeer] = None
82
89
 
@@ -99,11 +106,17 @@ class DBCacheContext:
99
106
 
100
107
  @torch.compiler.disable
101
108
  def __post_init__(self):
109
+ # Some checks for settings
102
110
  if self.do_separate_classifier_free_guidance:
103
111
  assert self.enable_alter_cache is False, (
104
112
  "enable_alter_cache must set as False if "
105
113
  "do_separate_classifier_free_guidance is enabled."
106
114
  )
115
+ if self.cfg_diff_compute_separate:
116
+ assert self.cfg_compute_first is False, (
117
+ "cfg_compute_first must set as False if "
118
+ "cfg_diff_compute_separate is enabled."
119
+ )
107
120
 
108
121
  if "warmup_steps" not in self.taylorseer_kwargs:
109
122
  # If warmup_steps is not set in taylorseer_kwargs,
@@ -185,10 +198,17 @@ class DBCacheContext:
185
198
  # current step: incr step - 1
186
199
  self.transformer_executed_steps += 1
187
200
  if not self.do_separate_classifier_free_guidance:
188
- self.executed_steps = self.transformer_executed_steps
201
+ self.executed_steps += 1
189
202
  else:
190
- # 0,1 -> 0, 2,3 -> 1, ...
191
- self.executed_steps = self.transformer_executed_steps // 2
203
+ # 0,1 -> 0 + 1, 2,3 -> 1 + 1, ...
204
+ if not self.cfg_compute_first:
205
+ if not self.is_separate_classifier_free_guidance_step():
206
+ # transformer step: 0,2,4,...
207
+ self.executed_steps += 1
208
+ else:
209
+ if self.is_separate_classifier_free_guidance_step():
210
+ # transformer step: 0,2,4,...
211
+ self.executed_steps += 1
192
212
 
193
213
  if not self.enable_alter_cache:
194
214
  # 0 F 1 T 2 F 3 T 4 F 5 T ...
@@ -253,6 +273,7 @@ class DBCacheContext:
253
273
 
254
274
  @torch.compiler.disable
255
275
  def add_residual_diff(self, diff):
276
+ # step: executed_steps - 1, not transformer_steps - 1
256
277
  step = str(self.get_current_step())
257
278
  # Only add the diff if it is not already recorded for this step
258
279
  if not self.is_separate_classifier_free_guidance_step():
@@ -299,9 +320,9 @@ class DBCacheContext:
299
320
  return False
300
321
  if self.cfg_compute_first:
301
322
  # CFG steps: 0, 2, 4, 6, ...
302
- return self.get_current_transformer_step() % 2
323
+ return self.get_current_transformer_step() % 2 == 0
303
324
  # CFG steps: 1, 3, 5, 7, ...
304
- return not self.get_current_transformer_step() % 2
325
+ return self.get_current_transformer_step() % 2 != 0
305
326
 
306
327
  @torch.compiler.disable
307
328
  def is_in_warmup(self):
@@ -350,6 +371,28 @@ def get_current_step():
350
371
  return cache_context.get_current_step()
351
372
 
352
373
 
374
+ @torch.compiler.disable
375
+ def get_current_step_residual_diff():
376
+ cache_context = get_current_cache_context()
377
+ assert cache_context is not None, "cache_context must be set before"
378
+ step = str(get_current_step())
379
+ residual_diffs = get_residual_diffs()
380
+ if step in residual_diffs:
381
+ return residual_diffs[step]
382
+ return None
383
+
384
+
385
+ @torch.compiler.disable
386
+ def get_current_step_cfg_residual_diff():
387
+ cache_context = get_current_cache_context()
388
+ assert cache_context is not None, "cache_context must be set before"
389
+ step = str(get_current_step())
390
+ cfg_residual_diffs = get_cfg_residual_diffs()
391
+ if step in cfg_residual_diffs:
392
+ return cfg_residual_diffs[step]
393
+ return None
394
+
395
+
353
396
  @torch.compiler.disable
354
397
  def get_current_transformer_step():
355
398
  cache_context = get_current_cache_context()
@@ -586,6 +629,13 @@ def is_separate_classifier_free_guidance_step():
586
629
  return cache_context.is_separate_classifier_free_guidance_step()
587
630
 
588
631
 
632
+ @torch.compiler.disable
633
+ def cfg_diff_compute_separate():
634
+ cache_context = get_current_cache_context()
635
+ assert cache_context is not None, "cache_context must be set before"
636
+ return cache_context.cfg_diff_compute_separate
637
+
638
+
589
639
  _current_cache_context: DBCacheContext = None
590
640
 
591
641
 
@@ -686,38 +736,49 @@ def are_two_tensors_similar(
686
736
  add_residual_diff(-2.0)
687
737
  return False
688
738
 
689
- # Find the most significant token through t1 and t2, and
690
- # consider the diff of the significant token. The more significant,
691
- # the more important.
692
- condition_thresh = get_important_condition_threshold()
693
- if condition_thresh > 0.0:
694
- raw_diff = (t1 - t2).abs() # [B, seq_len, d]
695
- token_m_df = raw_diff.mean(dim=-1) # [B, seq_len]
696
- token_m_t1 = t1.abs().mean(dim=-1) # [B, seq_len]
697
- # D = (t1 - t2) / t1 = 1 - (t2 / t1), if D = 0, then t1 = t2.
698
- token_diff = token_m_df / token_m_t1 # [B, seq_len]
699
- condition = token_diff > condition_thresh # [B, seq_len]
700
- if condition.sum() > 0:
701
- condition = condition.unsqueeze(-1) # [B, seq_len, 1]
702
- condition = condition.expand_as(raw_diff) # [B, seq_len, d]
703
- mean_diff = raw_diff[condition].mean()
704
- mean_t1 = t1[condition].abs().mean()
739
+ if all(
740
+ (
741
+ do_separate_classifier_free_guidance(),
742
+ is_separate_classifier_free_guidance_step(),
743
+ not cfg_diff_compute_separate(),
744
+ get_current_step_residual_diff() is not None,
745
+ )
746
+ ):
747
+ # Reuse computed diff value from non-CFG step
748
+ diff = get_current_step_residual_diff()
749
+ else:
750
+ # Find the most significant token through t1 and t2, and
751
+ # consider the diff of the significant token. The more significant,
752
+ # the more important.
753
+ condition_thresh = get_important_condition_threshold()
754
+ if condition_thresh > 0.0:
755
+ raw_diff = (t1 - t2).abs() # [B, seq_len, d]
756
+ token_m_df = raw_diff.mean(dim=-1) # [B, seq_len]
757
+ token_m_t1 = t1.abs().mean(dim=-1) # [B, seq_len]
758
+ # D = (t1 - t2) / t1 = 1 - (t2 / t1), if D = 0, then t1 = t2.
759
+ token_diff = token_m_df / token_m_t1 # [B, seq_len]
760
+ condition = token_diff > condition_thresh # [B, seq_len]
761
+ if condition.sum() > 0:
762
+ condition = condition.unsqueeze(-1) # [B, seq_len, 1]
763
+ condition = condition.expand_as(raw_diff) # [B, seq_len, d]
764
+ mean_diff = raw_diff[condition].mean()
765
+ mean_t1 = t1[condition].abs().mean()
766
+ else:
767
+ mean_diff = (t1 - t2).abs().mean()
768
+ mean_t1 = t1.abs().mean()
705
769
  else:
770
+ # Use the mean of the absolute difference of the tensors
706
771
  mean_diff = (t1 - t2).abs().mean()
707
772
  mean_t1 = t1.abs().mean()
708
- else:
709
- # Use the mean of the absolute difference of the tensors
710
- mean_diff = (t1 - t2).abs().mean()
711
- mean_t1 = t1.abs().mean()
712
773
 
713
- if parallelized:
714
- mean_diff = DP.all_reduce_sync(mean_diff, "avg")
715
- mean_t1 = DP.all_reduce_sync(mean_t1, "avg")
774
+ if parallelized:
775
+ mean_diff = DP.all_reduce_sync(mean_diff, "avg")
776
+ mean_t1 = DP.all_reduce_sync(mean_t1, "avg")
716
777
 
717
- # D = (t1 - t2) / t1 = 1 - (t2 / t1), if D = 0, then t1 = t2.
718
- # Futher, if we assume that (H(t, 0) - H(t-1,0)) ~ 0, then,
719
- # H(t-1,n) ~ H(t ,n), which means the hidden states are similar.
720
- diff = (mean_diff / mean_t1).item()
778
+ # D = (t1 - t2) / t1 = 1 - (t2 / t1), if D = 0, then t1 = t2.
779
+ # Futher, if we assume that (H(t, 0) - H(t-1,0)) ~ 0, then,
780
+ # H(t-1,n) ~ H(t ,n), which means the hidden states are similar.
781
+ diff = (mean_diff / mean_t1).item()
721
782
 
722
783
  if logger.isEnabledFor(logging.DEBUG):
723
784
  logger.debug(f"{prefix}, diff: {diff:.6f}, threshold: {threshold:.6f}")
@@ -727,6 +788,26 @@ def are_two_tensors_similar(
727
788
  return diff < threshold
728
789
 
729
790
 
791
+ @torch.compiler.disable
792
+ def _debugging_set_buffer(prefix):
793
+ if logger.isEnabledFor(logging.DEBUG):
794
+ logger.debug(
795
+ f"set {prefix}, "
796
+ f"transformer step: {get_current_transformer_step()}, "
797
+ f"executed step: {get_current_step()}"
798
+ )
799
+
800
+
801
+ @torch.compiler.disable
802
+ def _debugging_get_buffer(prefix):
803
+ if logger.isEnabledFor(logging.DEBUG):
804
+ logger.debug(
805
+ f"get {prefix}, "
806
+ f"transformer step: {get_current_transformer_step()}, "
807
+ f"executed step: {get_current_step()}"
808
+ )
809
+
810
+
730
811
  # Fn buffers
731
812
  @torch.compiler.disable
732
813
  def set_Fn_buffer(buffer: torch.Tensor, prefix: str = "Fn"):
@@ -737,30 +818,38 @@ def set_Fn_buffer(buffer: torch.Tensor, prefix: str = "Fn"):
737
818
  buffer = buffer[..., ::downsample_factor]
738
819
  buffer = buffer.contiguous()
739
820
  if is_separate_classifier_free_guidance_step():
821
+ _debugging_set_buffer(f"{prefix}_buffer_cfg")
740
822
  set_buffer(f"{prefix}_buffer_cfg", buffer)
741
823
  else:
824
+ _debugging_set_buffer(f"{prefix}_buffer")
742
825
  set_buffer(f"{prefix}_buffer", buffer)
743
826
 
744
827
 
745
828
  @torch.compiler.disable
746
829
  def get_Fn_buffer(prefix: str = "Fn"):
747
830
  if is_separate_classifier_free_guidance_step():
831
+ _debugging_get_buffer(f"{prefix}_buffer_cfg")
748
832
  return get_buffer(f"{prefix}_buffer_cfg")
833
+ _debugging_get_buffer(f"{prefix}_buffer")
749
834
  return get_buffer(f"{prefix}_buffer")
750
835
 
751
836
 
752
837
  @torch.compiler.disable
753
838
  def set_Fn_encoder_buffer(buffer: torch.Tensor, prefix: str = "Fn"):
754
839
  if is_separate_classifier_free_guidance_step():
840
+ _debugging_set_buffer(f"{prefix}_encoder_buffer_cfg")
755
841
  set_buffer(f"{prefix}_encoder_buffer_cfg", buffer)
756
842
  else:
843
+ _debugging_set_buffer(f"{prefix}_encoder_buffer")
757
844
  set_buffer(f"{prefix}_encoder_buffer", buffer)
758
845
 
759
846
 
760
847
  @torch.compiler.disable
761
848
  def get_Fn_encoder_buffer(prefix: str = "Fn"):
762
849
  if is_separate_classifier_free_guidance_step():
850
+ _debugging_get_buffer(f"{prefix}_encoder_buffer_cfg")
763
851
  return get_buffer(f"{prefix}_encoder_buffer_cfg")
852
+ _debugging_get_buffer(f"{prefix}_encoder_buffer")
764
853
  return get_buffer(f"{prefix}_encoder_buffer")
765
854
 
766
855
 
@@ -786,13 +875,17 @@ def set_Bn_buffer(buffer: torch.Tensor, prefix: str = "Bn"):
786
875
  "Falling back to default buffer retrieval."
787
876
  )
788
877
  if is_separate_classifier_free_guidance_step():
878
+ _debugging_set_buffer(f"{prefix}_buffer_cfg")
789
879
  set_buffer(f"{prefix}_buffer_cfg", buffer)
790
880
  else:
881
+ _debugging_set_buffer(f"{prefix}_buffer")
791
882
  set_buffer(f"{prefix}_buffer", buffer)
792
883
  else:
793
884
  if is_separate_classifier_free_guidance_step():
885
+ _debugging_set_buffer(f"{prefix}_buffer_cfg")
794
886
  set_buffer(f"{prefix}_buffer_cfg", buffer)
795
887
  else:
888
+ _debugging_set_buffer(f"{prefix}_buffer")
796
889
  set_buffer(f"{prefix}_buffer", buffer)
797
890
 
798
891
 
@@ -815,11 +908,15 @@ def get_Bn_buffer(prefix: str = "Bn"):
815
908
  )
816
909
  # Fallback to default buffer retrieval
817
910
  if is_separate_classifier_free_guidance_step():
911
+ _debugging_get_buffer(f"{prefix}_buffer_cfg")
818
912
  return get_buffer(f"{prefix}_buffer_cfg")
913
+ _debugging_get_buffer(f"{prefix}_buffer")
819
914
  return get_buffer(f"{prefix}_buffer")
820
915
  else:
821
916
  if is_separate_classifier_free_guidance_step():
917
+ _debugging_get_buffer(f"{prefix}_buffer_cfg")
822
918
  return get_buffer(f"{prefix}_buffer_cfg")
919
+ _debugging_get_buffer(f"{prefix}_buffer")
823
920
  return get_buffer(f"{prefix}_buffer")
824
921
 
825
922
 
@@ -843,13 +940,17 @@ def set_Bn_encoder_buffer(buffer: torch.Tensor, prefix: str = "Bn"):
843
940
  "Falling back to default buffer retrieval."
844
941
  )
845
942
  if is_separate_classifier_free_guidance_step():
943
+ _debugging_set_buffer(f"{prefix}_encoder_buffer_cfg")
846
944
  set_buffer(f"{prefix}_encoder_buffer_cfg", buffer)
847
945
  else:
946
+ _debugging_set_buffer(f"{prefix}_encoder_buffer")
848
947
  set_buffer(f"{prefix}_encoder_buffer", buffer)
849
948
  else:
850
949
  if is_separate_classifier_free_guidance_step():
950
+ _debugging_set_buffer(f"{prefix}_encoder_buffer_cfg")
851
951
  set_buffer(f"{prefix}_encoder_buffer_cfg", buffer)
852
952
  else:
953
+ _debugging_set_buffer(f"{prefix}_encoder_buffer")
853
954
  set_buffer(f"{prefix}_encoder_buffer", buffer)
854
955
 
855
956
 
@@ -872,11 +973,15 @@ def get_Bn_encoder_buffer(prefix: str = "Bn"):
872
973
  )
873
974
  # Fallback to default buffer retrieval
874
975
  if is_separate_classifier_free_guidance_step():
976
+ _debugging_get_buffer(f"{prefix}_encoder_buffer_cfg")
875
977
  return get_buffer(f"{prefix}_encoder_buffer_cfg")
978
+ _debugging_get_buffer(f"{prefix}_encoder_buffer")
876
979
  return get_buffer(f"{prefix}_encoder_buffer")
877
980
  else:
878
981
  if is_separate_classifier_free_guidance_step():
982
+ _debugging_get_buffer(f"{prefix}_encoder_buffer_cfg")
879
983
  return get_buffer(f"{prefix}_encoder_buffer_cfg")
984
+ _debugging_get_buffer(f"{prefix}_encoder_buffer")
880
985
  return get_buffer(f"{prefix}_encoder_buffer")
881
986
 
882
987
 
@@ -370,8 +370,8 @@ def apply_prev_hidden_states_residual(
370
370
  hidden_states = hidden_states_residual + hidden_states
371
371
 
372
372
  hidden_states = hidden_states.contiguous()
373
- # NOTE: We should also support taylorseer for
374
- # encoder_hidden_states approximation. Please
373
+ # NOTE: We should also support taylorseer for
374
+ # encoder_hidden_states approximation. Please
375
375
  # use DBCache instead.
376
376
  else:
377
377
  hidden_states_residual = get_hidden_states_residual()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cache_dit
3
- Version: 0.2.4
3
+ Version: 0.2.5
4
4
  Summary: 🤗 CacheDiT: A Training-free and Easy-to-use Cache Acceleration Toolbox for Diffusion Transformers
5
5
  Author: DefTruth, vipshop.com, etc.
6
6
  Maintainer: DefTruth, vipshop.com, etc
@@ -44,7 +44,7 @@ Dynamic: requires-python
44
44
  <img src=https://img.shields.io/badge/PyPI-pass-brightgreen.svg >
45
45
  <img src=https://static.pepy.tech/badge/cache-dit >
46
46
  <img src=https://img.shields.io/badge/Python-3.10|3.11|3.12-9cf.svg >
47
- <img src=https://img.shields.io/badge/Release-v0.2.2-brightgreen.svg >
47
+ <img src=https://img.shields.io/badge/Release-v0.2-brightgreen.svg >
48
48
  </div>
49
49
  <p align="center">
50
50
  DeepCache is for UNet not DiT. Most DiT cache speedups are complex and not training-free. CacheDiT offers <br>a set of training-free cache accelerators for DiT: <b>🔥<a href="#dbcache">DBCache</a>, <a href="#dbprune">DBPrune</a>, <a href="#taylorseer">TaylorSeer</a>, <a href="#fbcache">FBCache</a></b>, etc🔥
@@ -169,7 +169,7 @@ The **CacheDiT** codebase is adapted from [FBCache](https://github.com/chengzeyi
169
169
  You can install the stable release of `cache-dit` from PyPI:
170
170
 
171
171
  ```bash
172
- pip3 install cache-dit
172
+ pip3 install -U cache-dit
173
173
  ```
174
174
  Or you can install the latest develop version from GitHub:
175
175
 
@@ -181,11 +181,13 @@ pip3 install git+https://github.com/vipshop/cache-dit.git
181
181
 
182
182
  <div id="supported"></div>
183
183
 
184
- - [🚀FLUX.1](https://github.com/vipshop/cache-dit/raw/main/examples)
185
- - [🚀Mochi](https://github.com/vipshop/cache-dit/raw/main/examples)
184
+ - [🚀FLUX.1-dev](https://github.com/vipshop/cache-dit/raw/main/examples)
185
+ - [🚀FLUX.1-Fill-dev](https://github.com/vipshop/cache-dit/raw/main/examples)
186
+ - [🚀mochi-1-preview](https://github.com/vipshop/cache-dit/raw/main/examples)
186
187
  - [🚀CogVideoX](https://github.com/vipshop/cache-dit/raw/main/examples)
187
188
  - [🚀CogVideoX1.5](https://github.com/vipshop/cache-dit/raw/main/examples)
188
- - [🚀Wan2.1](https://github.com/vipshop/cache-dit/raw/main/examples)
189
+ - [🚀Wan2.1-T2V](https://github.com/vipshop/cache-dit/raw/main/examples)
190
+ - [🚀Wan2.1-FLF2V](https://github.com/vipshop/cache-dit/raw/main/examples)
189
191
  - [🚀HunyuanVideo](https://github.com/vipshop/cache-dit/raw/main/examples)
190
192
 
191
193
 
@@ -281,7 +283,7 @@ cache_options = {
281
283
  "taylorseer_kwargs": {
282
284
  "n_derivatives": 2, # default is 2.
283
285
  },
284
- "warmup_steps": 3, # n_derivatives + 1
286
+ "warmup_steps": 3, # prefer: >= n_derivatives + 1
285
287
  "residual_diff_threshold": 0.12,
286
288
  }
287
289
  ```
@@ -304,12 +306,23 @@ cache_options = {
304
306
 
305
307
  <div id="cfg"></div>
306
308
 
307
- CacheDiT supports caching for CFG (classifier-free guidance). For models that fuse CFG and non-CFG into a single forward step, or models that do not include CFG (classifier-free guidance) in the forward step, please set `do_separate_classifier_free_guidance` param to False. Otherwise, set it to True. Wan 2.1: True. FLUX.1, HunyunVideo, CogVideoX, Mochi: False.
309
+ CacheDiT supports caching for **CFG (classifier-free guidance)**. For models that fuse CFG and non-CFG into a single forward step, or models that do not include CFG (classifier-free guidance) in the forward step, please set `do_separate_classifier_free_guidance` param to **False (default)**. Otherwise, set it to True. For examples:
308
310
 
309
311
  ```python
310
312
  cache_options = {
313
+ # CFG: classifier free guidance or not
314
+ # For model that fused CFG and non-CFG into single forward step,
315
+ # should set do_separate_classifier_free_guidance as False.
316
+ # For example, set it as True for Wan 2.1 and set it as False
317
+ # for FLUX.1, HunyuanVideo, CogVideoX, Mochi.
311
318
  "do_separate_classifier_free_guidance": True, # Wan 2.1
319
+ # Compute cfg forward first or not, default False, namely,
320
+ # 0, 2, 4, ..., -> non-CFG step; 1, 3, 5, ... -> CFG step.
312
321
  "cfg_compute_first": False,
322
+ # Compute spearate diff values for CFG and non-CFG step,
323
+ # default True. If False, we will use the computed diff from
324
+ # current non-CFG transformer step for current CFG step.
325
+ "cfg_diff_compute_separate": True,
313
326
  }
314
327
  ```
315
328
 
@@ -1,12 +1,12 @@
1
1
  cache_dit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- cache_dit/_version.py,sha256=1LUN_sRKOiFInoB6AlW6TYoQMCh1Z4KutwcHNvHcfB0,511
2
+ cache_dit/_version.py,sha256=N3oBwJUFmS-AwCjqOcSlRW4GvSq-uJJMaBvoGfv1-hM,511
3
3
  cache_dit/logger.py,sha256=0zsu42hN-3-rgGC_C29ms1IvVpV4_b4_SwJCKSenxBE,4304
4
4
  cache_dit/primitives.py,sha256=A2iG9YLot3gOsZSPp-_gyjqjLgJvWQRx8aitD4JQ23Y,3877
5
5
  cache_dit/cache_factory/__init__.py,sha256=5RNuhWakvvqrOV4vkqrEBA7d-V1LwcNSsjtW14mkqK8,5255
6
6
  cache_dit/cache_factory/taylorseer.py,sha256=LKSNo2ode69EVo9xrxjxAMEjz0yDGiGADeDYnEqddA8,3987
7
7
  cache_dit/cache_factory/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
8
  cache_dit/cache_factory/dual_block_cache/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- cache_dit/cache_factory/dual_block_cache/cache_context.py,sha256=H7u5zIAdEjiYU0QvWYIMj3lKYI4D8cmDLy7eZ9tyoyU,66848
9
+ cache_dit/cache_factory/dual_block_cache/cache_context.py,sha256=wE_xYp7DRbgB-fD8dpr75o4Cvvl2s-jnT2fRyqWm_RM,71286
10
10
  cache_dit/cache_factory/dual_block_cache/diffusers_adapters/__init__.py,sha256=krNAICf-aS3JLmSG8vOB9tpLa04uYRcABsC8PMbVUKY,1870
11
11
  cache_dit/cache_factory/dual_block_cache/diffusers_adapters/cogvideox.py,sha256=fibkeU-FHa30BNT-uPV2Eqcd5IRli07EKb25tMDp23c,2270
12
12
  cache_dit/cache_factory/dual_block_cache/diffusers_adapters/flux.py,sha256=fddSpTHXU24COMGAY-Z21EmHHAEArZBv_-XLRFD6ADU,2625
@@ -22,7 +22,7 @@ cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/hunyuan_video.py,
22
22
  cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/mochi.py,sha256=zXgoRDDjus3a2WSjtNh4ERtQp20ceb6nzohHMDlo2zY,2265
23
23
  cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/wan.py,sha256=PA7nuLgfAelnaI8usQx0Kxi8XATzMapyR1WndEdFoZA,2604
24
24
  cache_dit/cache_factory/first_block_cache/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
- cache_dit/cache_factory/first_block_cache/cache_context.py,sha256=NeAfDJlJVVUAL4btax5_iOLTuue1x4qeXwk0pM-QH28,23219
25
+ cache_dit/cache_factory/first_block_cache/cache_context.py,sha256=tTPwhPLEA7LqGupps1Zy2MycCtLzs22wsW0yUhiiF-U,23217
26
26
  cache_dit/cache_factory/first_block_cache/diffusers_adapters/__init__.py,sha256=-FFgA2MoudEo7uDacg4aWgm1KwfLZFsEDTVxatgbq9M,2146
27
27
  cache_dit/cache_factory/first_block_cache/diffusers_adapters/cogvideox.py,sha256=qO5CWyurtwW30mvOe6cxeQPTSXLDlPJcezm72zEjDq8,2375
28
28
  cache_dit/cache_factory/first_block_cache/diffusers_adapters/flux.py,sha256=Dcd4OzABCtyQCZNX2KNnUTdVoO1E1ApM7P8gcVYzcK0,2733
@@ -33,8 +33,8 @@ cache_dit/compile/__init__.py,sha256=DfMdPleFFGADXLsr7zXui8BTz_y9futY6rNmNdh9y7k
33
33
  cache_dit/compile/utils.py,sha256=KU60xc474Anbj7Y_FLRFmNxEjVYLLXkhbtCLXO7o_Tc,3699
34
34
  cache_dit/custom_ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
35
  cache_dit/custom_ops/triton_taylorseer.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
- cache_dit-0.2.4.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
37
- cache_dit-0.2.4.dist-info/METADATA,sha256=1oDgkkUwGVfwX_jCyU0jHbQTVQDfL59OEbrUb_9SVF4,25442
38
- cache_dit-0.2.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
39
- cache_dit-0.2.4.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
40
- cache_dit-0.2.4.dist-info/RECORD,,
36
+ cache_dit-0.2.5.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
37
+ cache_dit-0.2.5.dist-info/METADATA,sha256=J37Waq-cMbuFfTrngXuxqouXpjHK9qhR_MZHlE2odmY,26249
38
+ cache_dit-0.2.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
39
+ cache_dit-0.2.5.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
40
+ cache_dit-0.2.5.dist-info/RECORD,,