cache-dit 0.2.27__py3-none-any.whl → 0.2.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,7 +21,7 @@ def enable_cache(
21
21
  max_continuous_cached_steps: int = -1,
22
22
  residual_diff_threshold: float = 0.08,
23
23
  # Cache CFG or not
24
- do_separate_cfg: bool = False,
24
+ enable_spearate_cfg: bool = False,
25
25
  cfg_compute_first: bool = False,
26
26
  cfg_diff_compute_separate: bool = True,
27
27
  # Hybird TaylorSeer
@@ -64,9 +64,9 @@ def enable_cache(
64
64
  residual_diff_threshold (`float`, *required*, defaults to 0.08):
65
65
  he value of residual diff threshold, a higher value leads to faster performance at the
66
66
  cost of lower precision.
67
- do_separate_cfg (`bool`, *required*, defaults to False):
67
+ enable_spearate_cfg (`bool`, *required*, defaults to False):
68
68
  Whether to do separate cfg or not, such as Wan 2.1, Qwen-Image. For model that fused CFG
69
- and non-CFG into single forward step, should set do_separate_cfg as False, for example:
69
+ and non-CFG into single forward step, should set enable_spearate_cfg as False, for example:
70
70
  CogVideoX, HunyuanVideo, Mochi, etc.
71
71
  cfg_compute_first (`bool`, *required*, defaults to False):
72
72
  Compute cfg forward first or not, default False, namely, 0, 2, 4, ..., -> non-CFG step;
@@ -89,7 +89,7 @@ def enable_cache(
89
89
  The order of taylorseer, higher values of n_derivatives will lead to longer computation time,
90
90
  but may improve precision significantly.
91
91
  other_cache_kwargs: (`dict`, *optional*, defaults to {})
92
- Other cache context kwargs, please check https://github.com/vipshop/cache-dit/blob/main/src/cache_dit/cache_factory/cache_context.py
92
+ Other cache context kwargs, please check https://github.com/vipshop/cache-dit/blob/main/src/cache_dit/cache_factory/cache_contexts/cache_context.py
93
93
  for more details.
94
94
 
95
95
  Examples:
@@ -104,6 +104,10 @@ def enable_cache(
104
104
 
105
105
  # Collect cache context kwargs
106
106
  cache_context_kwargs = other_cache_context_kwargs.copy()
107
+ if cache_type := cache_context_kwargs.get("cache_type", None):
108
+ if cache_type == CacheType.NONE:
109
+ return pipe_or_adapter
110
+
107
111
  cache_context_kwargs["cache_type"] = CacheType.DBCache
108
112
  cache_context_kwargs["Fn_compute_blocks"] = Fn_compute_blocks
109
113
  cache_context_kwargs["Bn_compute_blocks"] = Bn_compute_blocks
@@ -113,7 +117,7 @@ def enable_cache(
113
117
  max_continuous_cached_steps
114
118
  )
115
119
  cache_context_kwargs["residual_diff_threshold"] = residual_diff_threshold
116
- cache_context_kwargs["do_separate_cfg"] = do_separate_cfg
120
+ cache_context_kwargs["enable_spearate_cfg"] = enable_spearate_cfg
117
121
  cache_context_kwargs["cfg_compute_first"] = cfg_compute_first
118
122
  cache_context_kwargs["cfg_diff_compute_separate"] = (
119
123
  cfg_diff_compute_separate
@@ -123,14 +127,7 @@ def enable_cache(
123
127
  enable_encoder_taylorseer
124
128
  )
125
129
  cache_context_kwargs["taylorseer_cache_type"] = taylorseer_cache_type
126
- if "taylorseer_kwargs" in cache_context_kwargs:
127
- cache_context_kwargs["taylorseer_kwargs"][
128
- "n_derivatives"
129
- ] = taylorseer_order
130
- else:
131
- cache_context_kwargs["taylorseer_kwargs"] = {
132
- "n_derivatives": taylorseer_order
133
- }
130
+ cache_context_kwargs["taylorseer_order"] = taylorseer_order
134
131
 
135
132
  if isinstance(pipe_or_adapter, BlockAdapter):
136
133
  return CachedAdapter.apply(
cache_dit/utils.py CHANGED
@@ -7,7 +7,6 @@ from diffusers import DiffusionPipeline
7
7
 
8
8
  from typing import Dict, Any
9
9
  from cache_dit.logger import init_logger
10
- from cache_dit.cache_factory import CacheType
11
10
 
12
11
 
13
12
  logger = init_logger(__name__)
@@ -156,9 +155,15 @@ def strify(
156
155
  cache_options = stats.cache_options
157
156
  cached_steps = len(stats.cached_steps)
158
157
  elif isinstance(pipe_or_stats, dict):
158
+ from cache_dit.cache_factory import CacheType
159
+
159
160
  # Assume cache_context_kwargs
160
161
  cache_options = pipe_or_stats
161
162
  cached_steps = None
163
+ cache_type = cache_options.get("cache_type", CacheType.NONE)
164
+
165
+ if cache_type == CacheType.NONE:
166
+ return "NONE"
162
167
  else:
163
168
  raise ValueError(
164
169
  "Please set pipe_or_stats param as one of: "
@@ -168,17 +173,9 @@ def strify(
168
173
  if not cache_options:
169
174
  return "NONE"
170
175
 
171
- if cache_options.get("cache_type", None) != CacheType.DBCache:
172
- return "NONE"
173
-
174
176
  def get_taylorseer_order():
175
177
  taylorseer_order = 0
176
- if "taylorseer_kwargs" in cache_options:
177
- if "n_derivatives" in cache_options["taylorseer_kwargs"]:
178
- taylorseer_order = cache_options["taylorseer_kwargs"][
179
- "n_derivatives"
180
- ]
181
- elif "taylorseer_order" in cache_options:
178
+ if "taylorseer_order" in cache_options:
182
179
  taylorseer_order = cache_options["taylorseer_order"]
183
180
  return taylorseer_order
184
181
 
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cache_dit
3
- Version: 0.2.27
4
- Summary: 🤗 CacheDiT: An Unified and Training-free Cache Acceleration Toolbox for Diffusion Transformers
3
+ Version: 0.2.28
4
+ Summary: 🤗 A Unified and Training-free Cache Acceleration Toolbox for Diffusion Transformers
5
5
  Author: DefTruth, vipshop.com, etc.
6
6
  Maintainer: DefTruth, vipshop.com, etc
7
7
  Project-URL: Repository, https://github.com/vipshop/cache-dit.git
@@ -44,7 +44,7 @@ Dynamic: requires-python
44
44
  <img src=https://github.com/vipshop/cache-dit/raw/main/assets/cache-dit-logo.png height="120">
45
45
 
46
46
  <p align="center">
47
- An <b>Unified</b> and Training-free <b>Cache Acceleration</b> Toolbox for <b>Diffusion Transformers</b> <br>
47
+ A <b>Unified</b> and Training-free <b>Cache Acceleration</b> Toolbox for <b>Diffusion Transformers</b> <br>
48
48
  ♥️ <b>Cache Acceleration</b> with <b>One-line</b> Code ~ ♥️
49
49
  </p>
50
50
  <div align='center'>
@@ -59,32 +59,28 @@ Dynamic: requires-python
59
59
  🔥<b><a href="#unified">Unified Cache APIs</a> | <a href="#dbcache">DBCache</a> | <a href="#taylorseer">Hybrid TaylorSeer</a> | <a href="#cfg">Hybrid Cache CFG</a></b>🔥
60
60
  </p>
61
61
  <p align="center">
62
- 🎉Now, <b>cache-dit</b> covers <b>All</b> mainstream <b>DiT-based</b> Diffusers' Pipelines</b>🎉<br>
62
+ 🎉Now, <b>cache-dit</b> covers <b>100%</b> Diffusers' <b>DiT-based</b> Pipelines🎉<br>
63
63
  🔥<b><a href="#supported">Qwen-Image</a> | <a href="#supported">FLUX.1</a> | <a href="#supported">Wan 2.1/2.2</a> | <a href="#supported"> ... </a> | <a href="#supported">CogVideoX</a></b>🔥
64
64
  </p>
65
65
  </div>
66
66
 
67
67
  ## 🔥News
68
68
 
69
- - [2025-09-01] 📚[**Hybird Forward Pattern**](#unified) is supported! Please check [FLUX.1-dev](./examples/run_flux_adapter.py) as an example.
70
- - [2025-08-29] 🔥</b>Covers <b>All</b> Diffusers' <b>DiT-based</b> Pipelines via **[BlockAdapter](#unified) + [Pattern Matching](#unified).**
71
- - [2025-08-26] 🎉[**Wan2.2**](https://github.com/Wan-Video) **1.8x⚡️** speedup with `cache-dit + compile`! Please check the [example](./examples/run_wan_2.2.py).
72
- - [2025-08-19] 🔥[**Qwen-Image-Edit**](https://github.com/QwenLM/Qwen-Image) **2x⚡️** speedup! Check the example at [run_qwen_image_edit.py](./examples/run_qwen_image_edit.py).
69
+ - [2025-09-03] 🎉[**Wan2.2-MoE**](https://github.com/Wan-Video) **2.4x⚡️** speedup! Please refer to [run_wan_2.2.py](./examples/pipeline/run_wan_2.2.py) as an example.
70
+ - [2025-08-19] 🔥[**Qwen-Image-Edit**](https://github.com/QwenLM/Qwen-Image) **2x⚡️** speedup! Check the example: [run_qwen_image_edit.py](./examples/pipeline/run_qwen_image_edit.py).
73
71
  - [2025-08-12] 🎉First caching mechanism in [QwenLM/Qwen-Image](https://github.com/QwenLM/Qwen-Image) with **[cache-dit](https://github.com/vipshop/cache-dit)**, check this [PR](https://github.com/QwenLM/Qwen-Image/pull/61).
74
- - [2025-08-11] 🔥[**Qwen-Image**](https://github.com/QwenLM/Qwen-Image) **1.8x⚡️** speedup! Please refer [run_qwen_image.py](./examples/run_qwen_image.py) as an example.
75
- - [2025-08-10] 🔥[**FLUX.1-Kontext-dev**](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev) is supported! Please refer [run_flux_kontext.py](./examples/run_flux_kontext.py) as an example.
76
- - [2025-07-18] 🎉First caching mechanism in [🤗huggingface/flux-fast](https://github.com/huggingface/flux-fast) with **[cache-dit](https://github.com/vipshop/cache-dit)**, check the [PR](https://github.com/huggingface/flux-fast/pull/13).
77
- - [2025-07-13] **[🤗flux-faster](https://github.com/xlite-dev/flux-faster)** is released! **3.3x** speedup for FLUX.1 on NVIDIA L20 with **[cache-dit](https://github.com/vipshop/cache-dit)**.
72
+ - [2025-08-11] 🔥[**Qwen-Image**](https://github.com/QwenLM/Qwen-Image) **1.8x⚡️** speedup! Please refer to [run_qwen_image.py](./examples/pipeline/run_qwen_image.py) as an example.
78
73
 
79
- <!--
80
74
  <details>
81
75
  <summary> Previous News </summary>
82
76
 
77
+ - [2025-09-01] 📚[**Hybird Forward Pattern**](#unified) is supported! Please check [FLUX.1-dev](./examples/run_flux_adapter.py) as an example.
78
+ - [2025-08-29] 🔥</b>Covers <b>100%</b> Diffusers' <b>DiT-based</b> Pipelines: **[BlockAdapter](#unified) + [Pattern Matching](#unified).**
79
+ - [2025-08-10] 🔥[**FLUX.1-Kontext-dev**](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev) is supported! Please refer [run_flux_kontext.py](./examples/pipeline/run_flux_kontext.py) as an example.
83
80
  - [2025-07-18] 🎉First caching mechanism in [🤗huggingface/flux-fast](https://github.com/huggingface/flux-fast) with **[cache-dit](https://github.com/vipshop/cache-dit)**, check the [PR](https://github.com/huggingface/flux-fast/pull/13).
84
81
  - [2025-07-13] **[🤗flux-faster](https://github.com/xlite-dev/flux-faster)** is released! **3.3x** speedup for FLUX.1 on NVIDIA L20 with **[cache-dit](https://github.com/vipshop/cache-dit)**.
85
82
 
86
83
  </details>
87
- -->
88
84
 
89
85
  ## 📖Contents
90
86
 
@@ -93,6 +89,11 @@ Dynamic: requires-python
93
89
  - [⚙️Installation](#️installation)
94
90
  - [🔥Supported Models](#supported)
95
91
  - [🎉Unified Cache APIs](#unified)
92
+ - [📚Forward Pattern Matching](#unified)
93
+ - [🎉Cache with One-line Code](#unified)
94
+ - [🔥Automatic Block Adapter](#unified)
95
+ - [📚Hybird Forward Pattern](#unified)
96
+ - [🤖Cache Acceleration Stats](#unified)
96
97
  - [⚡️Dual Block Cache](#dbcache)
97
98
  - [🔥Hybrid TaylorSeer](#taylorseer)
98
99
  - [⚡️Hybrid Cache CFG](#cfg)
@@ -185,7 +186,7 @@ Currently, for any **Diffusion** models with **Transformer Blocks** that match t
185
186
 
186
187
  ### ♥️Cache Acceleration with One-line Code
187
188
 
188
- In most cases, you only need to call **one-line** of code, that is `cache_dit.enable_cache(...)`. After this API is called, you just need to call the pipe as normal. The `pipe` param can be **any** Diffusion Pipeline. Please refer to [Qwen-Image](./examples/run_qwen_image.py) as an example.
189
+ In most cases, you only need to call **one-line** of code, that is `cache_dit.enable_cache(...)`. After this API is called, you just need to call the pipe as normal. The `pipe` param can be **any** Diffusion Pipeline. Please refer to [Qwen-Image](./examples/pipeline/run_qwen_image.py) as an example.
189
190
 
190
191
  ```python
191
192
  import cache_dit
@@ -201,9 +202,9 @@ cache_dit.enable_cache(pipe)
201
202
  output = pipe(...)
202
203
  ```
203
204
 
204
- ### 🔥Automatic Block Adapter + 📚Hybird Forward Pattern
205
+ ### 🔥Automatic Block Adapter
205
206
 
206
- But in some cases, you may have a **modified** Diffusion Pipeline or Transformer that is not located in the diffusers library or not officially supported by **cache-dit** at this time. The **BlockAdapter** can help you solve this problems. Please refer to [🔥Qwen-Image w/ BlockAdapter](./examples/run_qwen_image_adapter.py) and [📚FLUX.1 w/ Hybird Forward Pattern](./examples/run_flux_adapter.py) for more details.
207
+ But in some cases, you may have a **modified** Diffusion Pipeline or Transformer that is not located in the diffusers library or not officially supported by **cache-dit** at this time. The **BlockAdapter** can help you solve this problems. Please refer to [🔥Qwen-Image w/ BlockAdapter](./examples/adapter/run_qwen_image_adapter.py) as an example.
207
208
 
208
209
  ```python
209
210
  from cache_dit import ForwardPattern, BlockAdapter
@@ -229,10 +230,16 @@ cache_dit.enable_cache(
229
230
  forward_pattern=ForwardPattern.Pattern_1,
230
231
  ),
231
232
  )
233
+ ```
234
+ For such situations, **BlockAdapter** can help you quickly apply various cache acceleration features to your own Diffusion Pipelines and Transformers. Please check the [📚BlockAdapter.md](./docs/BlockAdapter.md) for more details.
235
+
236
+ ### 📚Hybird Forward Pattern
237
+
238
+ Sometimes, a Transformer class will contain more than one transformer `blocks`. For example, **FLUX.1** (HiDream, Chroma, etc) contains transformer_blocks and single_transformer_blocks (with different forward patterns). The **BlockAdapter** can also help you solve this problem. Please refer to [📚FLUX.1](./examples/adapter/run_flux_adapter.py) as an example.
232
239
 
233
- # cache-dit supported 📚Hybird Forward Pattern, for example:
240
+ ```python
234
241
  # For diffusers <= 0.34.0, FLUX.1 transformer_blocks and
235
- # single_transformer_blocks has different forward pattern.
242
+ # single_transformer_blocks have different forward patterns.
236
243
  cache_dit.enable_cache(
237
244
  BlockAdapter(
238
245
  pipe=pipe, # FLUX.1, etc.
@@ -252,7 +259,6 @@ cache_dit.enable_cache(
252
259
  ),
253
260
  )
254
261
  ```
255
- For such situations, **BlockAdapter** can help you quickly apply various cache acceleration features to your own Diffusion Pipelines and Transformers. Please check the [📚BlockAdapter.md](./docs/BlockAdapter.md) for more details.
256
262
 
257
263
  ### 🤖Cache Acceleration Stats Summary
258
264
 
@@ -377,7 +383,7 @@ cache_dit.enable_cache(
377
383
 
378
384
  <div id="cfg"></div>
379
385
 
380
- cache-dit supports caching for **CFG (classifier-free guidance)**. For models that fuse CFG and non-CFG into a single forward step, or models that do not include CFG (classifier-free guidance) in the forward step, please set `do_separate_cfg` param to **False (default)**. Otherwise, set it to True. For examples:
386
+ cache-dit supports caching for **CFG (classifier-free guidance)**. For models that fuse CFG and non-CFG into a single forward step, or models that do not include CFG (classifier-free guidance) in the forward step, please set `enable_spearate_cfg` param to **False (default)**. Otherwise, set it to True. For examples:
381
387
 
382
388
  ```python
383
389
  cache_dit.enable_cache(
@@ -385,10 +391,10 @@ cache_dit.enable_cache(
385
391
  ...,
386
392
  # CFG: classifier free guidance or not
387
393
  # For model that fused CFG and non-CFG into single forward step,
388
- # should set do_separate_cfg as False. For example, set it as True
394
+ # should set enable_spearate_cfg as False. For example, set it as True
389
395
  # for Wan 2.1/Qwen-Image and set it as False for FLUX.1, HunyuanVideo,
390
396
  # CogVideoX, Mochi, LTXVideo, Allegro, CogView3Plus, EasyAnimate, SD3, etc.
391
- do_separate_cfg=True, # Wan 2.1, Qwen-Image, CogView4, Cosmos, SkyReelsV2, etc.
397
+ enable_spearate_cfg=True, # Wan 2.1, Qwen-Image, CogView4, Cosmos, SkyReelsV2, etc.
392
398
  # Compute cfg forward first or not, default False, namely,
393
399
  # 0, 2, 4, ..., -> non-CFG step; 1, 3, 5, ... -> CFG step.
394
400
  cfg_compute_first=False,
@@ -463,7 +469,7 @@ The **cache-dit** codebase is adapted from FBCache. Special thanks to their exce
463
469
 
464
470
  ```BibTeX
465
471
  @misc{cache-dit@2025,
466
- title={cache-dit: An Unified and Training-free Cache Acceleration Toolbox for Diffusion Transformers},
472
+ title={cache-dit: A Unified and Training-free Cache Acceleration Toolbox for Diffusion Transformers},
467
473
  url={https://github.com/vipshop/cache-dit.git},
468
474
  note={Open-source software available at https://github.com/vipshop/cache-dit.git},
469
475
  author={vipshop.com},
@@ -1,25 +1,25 @@
1
- cache_dit/__init__.py,sha256=-ENQmZ9rQfIlyiyyL1M_i_7dmxdp7EIXk3Q0ijccweE,1140
2
- cache_dit/_version.py,sha256=sQt3vJQXfZVL852h4ZP8iWmYkhYM4zSob72-HinVTME,706
1
+ cache_dit/__init__.py,sha256=V4jCkTic4XvWojCUqYcjlvxiNM2DjGQbOLk6R-tAx2A,1191
2
+ cache_dit/_version.py,sha256=hCl1MKhh249NDbigjeJY-mrKYvjPFbJ7oklAepBQrto,706
3
3
  cache_dit/logger.py,sha256=0zsu42hN-3-rgGC_C29ms1IvVpV4_b4_SwJCKSenxBE,4304
4
- cache_dit/utils.py,sha256=d3aE6dybxu8zVTskfPvZIDXqtA8f7WZQuvhVrE9iUq8,7336
4
+ cache_dit/utils.py,sha256=pb5298XKmaZDoHwyteYRhixAG_0DGWrvvaObeShIhOM,7146
5
5
  cache_dit/cache_factory/.gitignore,sha256=5Cb-qT9wsTUoMJ7vACDF7ZcLpAXhi5v-xdcWSRit988,23
6
- cache_dit/cache_factory/__init__.py,sha256=8IqCdKEzqJGGlAcmgn4ySXmpyZE0BzUA7xBrZHpYGb4,899
7
- cache_dit/cache_factory/cache_adapters.py,sha256=wTLgJAxAJLn7iF6ylWvfaxWT9dhQ977AF5t1cdYHFOg,9857
8
- cache_dit/cache_factory/cache_interface.py,sha256=-PI7Oph2TDUCE6sviudV75jQ36p8WymLSSlyhHazL4k,8369
6
+ cache_dit/cache_factory/__init__.py,sha256=M8q9furJOq2AZcLHRuCXZCjR9fNSELoEYdsCofIjMAo,1037
7
+ cache_dit/cache_factory/cache_adapters.py,sha256=q7MxY44qw90h449Gr8W5iJjSwXPJR-YIyRmu_KJnQo0,13284
8
+ cache_dit/cache_factory/cache_interface.py,sha256=2jcuTZ4D_P0M5pSw0z3BMPalobYen3YO1yKvRjaQjdQ,8332
9
9
  cache_dit/cache_factory/cache_types.py,sha256=FIFa6ZBfvvSMMHyBBhvarvgg2Y2wbRgITcG_uGylGe0,991
10
10
  cache_dit/cache_factory/forward_pattern.py,sha256=FumlCuZ-TSmSYH0hGBHctSJ-oGLCftdZjLygqhsmdR4,2258
11
11
  cache_dit/cache_factory/utils.py,sha256=XkVM9AXcB9zYq8-S8QKAsGz80r3tA6U3lBNGDGeHOe4,1871
12
- cache_dit/cache_factory/block_adapters/__init__.py,sha256=wtbQLqTCyegAzDjBn3qVmg80Q6SH7WE8IxSYh-a6Chw,16935
13
- cache_dit/cache_factory/block_adapters/block_adapters.py,sha256=Q1BCk96ibWOuyCI_JO_qkQ-SCoCs0J4UtSFupaZ8jeU,11560
12
+ cache_dit/cache_factory/block_adapters/__init__.py,sha256=UFuHxNR7Y0RZoCl97wn0u2d_2rj8PzNsWfzgda5AoKM,17395
13
+ cache_dit/cache_factory/block_adapters/block_adapters.py,sha256=WsqGUDSDU_5-pIXwDqAK_k4a-4jgbFGoLCoF6kAjLt8,19198
14
14
  cache_dit/cache_factory/block_adapters/block_registers.py,sha256=99ouWioxldlZJYQWhcUkOu94f8vO9V9QGzVNhKWtyO4,2005
15
- cache_dit/cache_factory/cache_blocks/__init__.py,sha256=yqZZLNM9NN8SHv9bb1jPAaRAfb_jQ77Km8nMBJpfhpI,777
15
+ cache_dit/cache_factory/cache_blocks/__init__.py,sha256=OWjnpJxA8EJVoRzuyb5miuiRphUFj831-bbtWsTDjnM,2750
16
16
  cache_dit/cache_factory/cache_blocks/pattern_0_1_2.py,sha256=ElMps6_7uI74tSF9GDR_dEI0bZEhdzcepM29xFWnYo8,428
17
- cache_dit/cache_factory/cache_blocks/pattern_3_4_5.py,sha256=BD-bJdFleN7B2JLrNc90s9f-wQon9Ud5JUV7fBaaLmg,9902
18
- cache_dit/cache_factory/cache_blocks/pattern_base.py,sha256=2FPQBnX4yroH4i5-qb8yPyAN523hrGc-ozGsv7dENQU,19809
19
- cache_dit/cache_factory/cache_blocks/utils.py,sha256=DM4YEtbRVV-_NsbFWjI-WZPBbSDc4Gl366AgFz06L_s,770
20
- cache_dit/cache_factory/cache_contexts/__init__.py,sha256=KqD3XSAnUlaa2wzuOhG0q8A3Xf0m5rsOEihCNLjsdfU,153
21
- cache_dit/cache_factory/cache_contexts/cache_context.py,sha256=6rtLjAPX5zVrmBXYPHOGQuyIR0fuDxrqsRiWV9A1h8M,43135
22
- cache_dit/cache_factory/cache_contexts/cache_manager.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
+ cache_dit/cache_factory/cache_blocks/pattern_3_4_5.py,sha256=CtBr6nvtAW8SAeEwPwiwWtPgrmwyb5ukb-j3IwFULJU,9953
18
+ cache_dit/cache_factory/cache_blocks/pattern_base.py,sha256=XSDy3hsaKbAZPGZY92YgGA0qLgjQyIX8irQkb2R5T2c,20331
19
+ cache_dit/cache_factory/cache_blocks/utils.py,sha256=wfreGvtK22hDnXuw0z0hUw-9ywu91FnExfPkP8ZzlkA,891
20
+ cache_dit/cache_factory/cache_contexts/__init__.py,sha256=rqnJ5__zqnpVHK5A1OqWILpNh5Ss-0ZDTGgtxZMKGGo,250
21
+ cache_dit/cache_factory/cache_contexts/cache_context.py,sha256=N88WLdd4KE9DuMWmpX8URcF55E2zWNwcKMxgVYkxMJY,13691
22
+ cache_dit/cache_factory/cache_contexts/cache_manager.py,sha256=wSghuX93KmCxE4pFEVKuyrO0Jt5STu_x4CxypS2EdxI,34276
23
23
  cache_dit/cache_factory/cache_contexts/taylorseer.py,sha256=etSUIZzDvqW3ScKCbccTPcFaSmxV1T-xAXdk-p3e3wk,3802
24
24
  cache_dit/cache_factory/patch_functors/__init__.py,sha256=yK05iONMGILsTZ83ynrUUJtiJKJ_FDjxmVIzRLy416s,252
25
25
  cache_dit/cache_factory/patch_functors/functor_base.py,sha256=Ahk0fTfrHgNdEl-9JSkACvfyyv9G-Ei5OSz7XBIlX5o,357
@@ -39,9 +39,9 @@ cache_dit/quantize/__init__.py,sha256=kWYoMAyZgBXu9BJlZjTQ0dRffW9GqeeY9_iTkXrb70
39
39
  cache_dit/quantize/quantize_ao.py,sha256=mGspqYgQtenl3QnKPtsSYsSD7LbVX93f1M940bhXKLU,6066
40
40
  cache_dit/quantize/quantize_interface.py,sha256=2s_R7xPSKuJeFpEGeLwRxnq_CqJcBG3a3lzyW5wh-UM,1241
41
41
  cache_dit/quantize/quantize_svdq.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
- cache_dit-0.2.27.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
43
- cache_dit-0.2.27.dist-info/METADATA,sha256=DN_iaPFoqnieAUKirJp-A0eLdYiiABAO8TY1EyCh8ss,23150
44
- cache_dit-0.2.27.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
45
- cache_dit-0.2.27.dist-info/entry_points.txt,sha256=FX2gysXaZx6NeK1iCLMcIdP8Q4_qikkIHtEmi3oWn8o,65
46
- cache_dit-0.2.27.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
47
- cache_dit-0.2.27.dist-info/RECORD,,
42
+ cache_dit-0.2.28.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
43
+ cache_dit-0.2.28.dist-info/METADATA,sha256=03FPh4nIDfjSFMfkDz-sWr2g3l30UsQek8VjQ6TPn8g,23204
44
+ cache_dit-0.2.28.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
45
+ cache_dit-0.2.28.dist-info/entry_points.txt,sha256=FX2gysXaZx6NeK1iCLMcIdP8Q4_qikkIHtEmi3oWn8o,65
46
+ cache_dit-0.2.28.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
47
+ cache_dit-0.2.28.dist-info/RECORD,,