cache-dit 0.2.18__py3-none-any.whl → 0.2.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cache-dit might be problematic. Click here for more details.

cache_dit/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.2.18'
32
- __version_tuple__ = version_tuple = (0, 2, 18)
31
+ __version__ = version = '0.2.20'
32
+ __version_tuple__ = version_tuple = (0, 2, 20)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -631,7 +631,8 @@ class UnifiedCacheAdapter:
631
631
  pattern_id = list(unique_pattern_ids)[0]
632
632
  pattern = selected_patterns[pattern_id]
633
633
  logger.info(
634
- f"Match cache pattern: IN({pattern['IN']}, OUT({pattern['OUT']}))"
634
+ f"Match Block Forward Pattern: {transformer_blocks[0].__class__.__name__}"
635
+ f"\n IN({pattern['IN']}, \nOUT({pattern['OUT']}))"
635
636
  )
636
637
 
637
638
  return pattern_matched
@@ -230,7 +230,7 @@ def maybe_patch_flux_transformer(
230
230
  for block in blocks:
231
231
  if isinstance(block, FluxSingleTransformerBlock):
232
232
  forward_parameters = inspect.signature(
233
- blocks.forward
233
+ block.forward
234
234
  ).parameters.keys()
235
235
  if "encoder_hidden_states" not in forward_parameters:
236
236
  block.forward = __patch_single_forward__.__get__(block)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cache_dit
3
- Version: 0.2.18
3
+ Version: 0.2.20
4
4
  Summary: 🤗 CacheDiT: An Unified and Training-free Cache Acceleration Toolbox for Diffusion Transformers
5
5
  Author: DefTruth, vipshop.com, etc.
6
6
  Maintainer: DefTruth, vipshop.com, etc
@@ -40,10 +40,12 @@ Dynamic: requires-dist
40
40
  Dynamic: requires-python
41
41
 
42
42
  <div align="center">
43
+ <img src=https://github.com/vipshop/cache-dit/raw/main/assets/cache-dit-logo.png height="120">
44
+
43
45
  <p align="center">
44
- <h2>🤗 CacheDiT: An Unified and Training-free Cache Acceleration <br>Toolbox for Diffusion Transformers</h2>
46
+ An <b>Unified</b> and Training-free <b>Cache Acceleration</b> Toolbox for <b>Diffusion Transformers</b> <br>
47
+ ♥️ <b>Cache Acceleration</b> with <b>One-line</b> Code ~ ♥️
45
48
  </p>
46
- <img src=https://github.com/vipshop/cache-dit/raw/main/assets/cache-dit-v1.png >
47
49
  <div align='center'>
48
50
  <img src=https://img.shields.io/badge/Language-Python-brightgreen.svg >
49
51
  <img src=https://img.shields.io/badge/PRs-welcome-9cf.svg >
@@ -52,26 +54,36 @@ Dynamic: requires-python
52
54
  <img src=https://img.shields.io/badge/Python-3.10|3.11|3.12-9cf.svg >
53
55
  <img src=https://img.shields.io/badge/Release-v0.2-brightgreen.svg >
54
56
  </div>
57
+ <p align="center">
55
58
  🔥<b><a href="#unified">Unified Cache APIs</a> | <a href="#dbcache">DBCache</a> | <a href="#taylorseer">Hybrid TaylorSeer</a> | <a href="#cfg">Hybrid Cache CFG</a></b>🔥
56
- </div>
57
-
58
- <div align="center">
59
+ </p>
59
60
  <p align="center">
60
- ♥️ Cache <b>Acceleration</b> with <b>One-line</b> Code ~ ♥️
61
+ 🎉Now, <b>Diffusers's</b> Pipelines <b>Coverage Ratio: 60%~70%</b>🎉
61
62
  </p>
62
- </div>
63
+ </div>
63
64
 
65
+ <!--
66
+ <img src=https://github.com/vipshop/cache-dit/raw/main/assets/cache-dit-v1.png >
67
+ <img src=https://github.com/vipshop/cache-dit/raw/main/assets/dbcache-v1.png height="320px">
68
+ <img src=https://github.com/vipshop/cache-dit/raw/main/assets/dbcache-v1.png>
69
+ <img src=https://github.com/vipshop/cache-dit/raw/main/assets/patterns.png>
70
+ -->
64
71
 
65
72
  ## 🔥News
66
73
 
67
- - [2025-08-19] 🔥[**Qwen-Image-Edit**](https://github.com/QwenLM/Qwen-Image) **~2x⚡️** speedup! Check example [run_qwen_image_edit.py](./examples/run_qwen_image_edit.py).
68
- - [2025-08-18] 🎉Early **[Unified Cache APIs](#unified)** released! Check [Qwen-Image w/ UAPI](./examples/run_qwen_image_uapi.py) as an example.
74
+ - [2025-08-19] 🔥[**Qwen-Image-Edit**](https://github.com/QwenLM/Qwen-Image) **2x⚡️** speedup! Check example [run_qwen_image_edit.py](./examples/run_qwen_image_edit.py).
69
75
  - [2025-08-12] 🎉First caching mechanism in [QwenLM/Qwen-Image](https://github.com/QwenLM/Qwen-Image) with **[cache-dit](https://github.com/vipshop/cache-dit)**, check the [PR](https://github.com/QwenLM/Qwen-Image/pull/61).
70
- - [2025-08-11] 🔥[**Qwen-Image**](https://github.com/QwenLM/Qwen-Image) **~1.8x⚡️** speedup! Please refer [run_qwen_image.py](./examples/run_qwen_image.py) as an example.
71
- - [2025-08-10] 🔥[FLUX.1-Kontext-dev](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev) is supported! Please refer [run_flux_kontext.py](./examples/run_flux_kontext.py) as an example.
76
+ - [2025-08-11] 🔥[**Qwen-Image**](https://github.com/QwenLM/Qwen-Image) **1.8x⚡️** speedup! Please refer [run_qwen_image.py](./examples/run_qwen_image.py) as an example.
77
+
78
+ <details>
79
+ <summary> Previous News </summary>
80
+
81
+ - [2025-08-10] 🔥[**FLUX.1-Kontext-dev**](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev) is supported! Please refer [run_flux_kontext.py](./examples/run_flux_kontext.py) as an example.
72
82
  - [2025-07-18] 🎉First caching mechanism in [🤗huggingface/flux-fast](https://github.com/huggingface/flux-fast) with **[cache-dit](https://github.com/vipshop/cache-dit)**, check the [PR](https://github.com/huggingface/flux-fast/pull/13).
73
- - [2025-07-13] **[🤗flux-faster](https://github.com/xlite-dev/flux-faster)** is released! **3.3x** speedup for FLUX.1 on NVIDIA L20 with `cache-dit`.
83
+ - [2025-07-13] **[🤗flux-faster](https://github.com/xlite-dev/flux-faster)** is released! **3.3x** speedup for FLUX.1 on NVIDIA L20 with **[cache-dit](https://github.com/vipshop/cache-dit)**.
74
84
 
85
+ </details>
86
+
75
87
  ## 📖Contents
76
88
 
77
89
  <div id="contents"></div>
@@ -82,7 +94,7 @@ Dynamic: requires-python
82
94
  - [⚡️Dual Block Cache](#dbcache)
83
95
  - [🔥Hybrid TaylorSeer](#taylorseer)
84
96
  - [⚡️Hybrid Cache CFG](#cfg)
85
- - [🔥Torch Compile](#compile)
97
+ - [⚙️Torch Compile](#compile)
86
98
  - [🛠Metrics CLI](#metrics)
87
99
 
88
100
  ## ⚙️Installation
@@ -111,11 +123,15 @@ Currently, **cache-dit** library supports almost **Any** Diffusion Transformers
111
123
  - [🚀FLUX.1-dev](https://github.com/vipshop/cache-dit/raw/main/examples)
112
124
  - [🚀FLUX.1-Fill-dev](https://github.com/vipshop/cache-dit/raw/main/examples)
113
125
  - [🚀FLUX.1-Kontext-dev](https://github.com/vipshop/cache-dit/raw/main/examples)
114
- - [🚀mochi-1-preview](https://github.com/vipshop/cache-dit/raw/main/examples)
115
126
  - [🚀CogVideoX](https://github.com/vipshop/cache-dit/raw/main/examples)
116
127
  - [🚀CogVideoX1.5](https://github.com/vipshop/cache-dit/raw/main/examples)
117
128
  - [🚀Wan2.1-T2V](https://github.com/vipshop/cache-dit/raw/main/examples)
118
129
  - [🚀Wan2.1-FLF2V](https://github.com/vipshop/cache-dit/raw/main/examples)
130
+
131
+ <details>
132
+ <summary> More Pipelines </summary>
133
+
134
+ - [🚀mochi-1-preview](https://github.com/vipshop/cache-dit/raw/main/examples)
119
135
  - [🚀HunyuanVideo](https://github.com/vipshop/cache-dit/raw/main/examples)
120
136
  - [🚀LTXVideo](https://github.com/vipshop/cache-dit/raw/main/examples)
121
137
  - [🚀Allegro](https://github.com/vipshop/cache-dit/raw/main/examples)
@@ -125,33 +141,29 @@ Currently, **cache-dit** library supports almost **Any** Diffusion Transformers
125
141
  - [🚀EasyAnimate](https://github.com/vipshop/cache-dit/raw/main/examples)
126
142
  - [🚀SkyReelsV2](https://github.com/vipshop/cache-dit/raw/main/examples)
127
143
  - [🚀SD3](https://github.com/vipshop/cache-dit/raw/main/examples)
144
+
145
+ </details>
128
146
 
129
147
  ## 🎉Unified Cache APIs
130
148
 
131
149
  <div id="unified"></div>
132
150
 
151
+ Currently, for any **Diffusion** models with **Transformer Blocks** that match the specific **Input/Output patterns**, we can use the **Unified Cache APIs** from **cache-dit**, namely, the `cache_dit.enable_cache(...)` API. The **Unified Cache APIs** are currently in the experimental phase; please stay tuned for updates. The supported patterns are listed as follows:
133
152
 
134
- Currently, for any **Diffusion** models with **Transformer Blocks** that match the specific **Input/Output pattern**, we can use the **Unified Cache APIs** from **cache-dit**. The supported patterns are listed as follows:
135
-
136
- ```bash
137
- (IN: hidden_states, encoder_hidden_states, ...) -> (OUT: hidden_states, encoder_hidden_states)
138
- (IN: hidden_states, encoder_hidden_states, ...) -> (OUT: encoder_hidden_states, hidden_states)
139
- (IN: hidden_states, encoder_hidden_states, ...) -> (OUT: hidden_states)
140
- (IN: hidden_states, ...) -> (OUT: hidden_states) # TODO, DiT, Lumina2, etc.
141
- ```
142
-
143
- Please refer to [Qwen-Image w/ UAPI](./examples/run_qwen_image_uapi.py) as an example. The `pipe` parameter can be **Any** Diffusion Pipelines. The **Unified Cache APIs** are currently in the experimental phase, please stay tuned for updates.
153
+ ![](https://github.com/vipshop/cache-dit/raw/main/assets/patterns.png)
144
154
 
155
+ After the `cache_dit.enable_cache(...)` API is called, you just need to call the pipe as normal. The `pipe` param can be **any** Diffusion Pipeline. Please refer to [Qwen-Image](./examples/run_qwen_image_uapi.py) as an example.
145
156
  ```python
146
157
  import cache_dit
147
- from diffusers import DiffusionPipeline # Can be [Any] Diffusion Pipeline
158
+ from diffusers import DiffusionPipeline
148
159
 
160
+ # can be any diffusion pipeline
149
161
  pipe = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image")
150
162
 
151
- # One line code with default cache options.
163
+ # one line code with default cache options.
152
164
  cache_dit.enable_cache(pipe)
153
165
 
154
- # Or, enable cache with custom setting.
166
+ # or, enable cache with custom settings.
155
167
  cache_dit.enable_cache(
156
168
  pipe, transformer=pipe.transformer,
157
169
  blocks=pipe.transformer.transformer_blocks,
@@ -159,75 +171,35 @@ cache_dit.enable_cache(
159
171
  **cache_dit.default_options(),
160
172
  )
161
173
 
162
- # summary cache stats.
163
- cache_dit.summary(pipe)
164
- ```
165
-
166
- After finishing each inference of `pipe(...)`, you can call the `cache_dict.summary` API on pipe to get the details of the cache stats for the current inference (markdown table format). You can set `details` param as `True` to show more details of cache stats.
174
+ # just call the pipe as normal.
175
+ output = pipe(...)
167
176
 
168
- ```bash
169
- 🤗Cache Options: QwenImagePipeline
177
+ # then, summary the cache stats.
178
+ stats = cache_dit.summary(pipe)
179
+ ```
170
180
 
171
- {'Fn_compute_blocks': 8, 'Bn_compute_blocks': 0, 'non_compute_blocks_diff_threshold': 0.08, 'max_Fn_compute_blocks': -1, 'max_Bn_compute_blocks': -1, 'residual_diff_threshold': 0.12, 'l1_hidden_states_diff_threshold': None, 'important_condition_threshold': 0.0, 'enable_alter_cache': False, 'is_alter_cache': True, 'alter_residual_diff_threshold': 1.0, 'downsample_factor': 1, 'num_inference_steps': -1, 'warmup_steps': 8, 'max_cached_steps': -1, 'executed_steps': 0, 'transformer_executed_steps': 0, 'enable_taylorseer': True, 'enable_encoder_taylorseer': True, 'taylorseer_cache_type': 'residual', 'taylorseer': None, 'encoder_tarlorseer': None, 'do_separate_classifier_free_guidance': True, 'cfg_compute_first': False, 'cfg_diff_compute_separate': True, 'cfg_taylorseer': None, 'cfg_encoder_taylorseer': None, 'Fn_compute_blocks_ids': [], 'Bn_compute_blocks_ids': [], 'taylorseer_kwargs': {'n_derivatives': 4, 'warmup_steps': 8}}
181
+ After finishing each inference of `pipe(...)`, you can call the `cache_dit.summary(...)` API on pipe to get the details of the cache stats for the current inference (markdown table format). You can set `details` param as `True` to show more details of cache stats.
172
182
 
183
+ ```python
173
184
  ⚡️Cache Steps and Residual Diffs Statistics: QwenImagePipeline
174
185
 
175
186
  | Cache Steps | Diffs P00 | Diffs P25 | Diffs P50 | Diffs P75 | Diffs P95 |
176
187
  |-------------|-----------|-----------|-----------|-----------|-----------|
177
188
  | 23 | 0.04 | 0.082 | 0.115 | 0.152 | 0.245 |
178
-
179
- ⚡️CFG Cache Steps and Residual Diffs Statistics: QwenImagePipeline
180
-
181
- | CFG Cache Steps | Diffs P00 | Diffs P25 | Diffs P50 | Diffs P75 | Diffs P95 |
182
- |-----------------|-----------|-----------|-----------|-----------|-----------|
183
- | 22 | 0.045 | 0.077 | 0.112 | 0.148 | 0.245 |
189
+ ...
184
190
  ```
185
191
 
186
192
  ## ⚡️DBCache: Dual Block Cache
187
193
 
188
194
  <div id="dbcache"></div>
189
195
 
190
- ![](https://github.com/vipshop/cache-dit/raw/main/assets/dbcache-v1.png)
191
-
192
-
193
- **DBCache**: **Dual Block Caching** for Diffusion Transformers. We have enhanced `FBCache` into a more general and customizable cache algorithm, namely `DBCache`, enabling it to achieve fully `UNet-style` cache acceleration for DiT models. Different configurations of compute blocks (**F8B12**, etc.) can be customized in DBCache. Moreover, it can be entirely **training**-**free**. DBCache can strike a perfect **balance** between performance and precision!
194
-
195
- <div align="center">
196
- <p align="center">
197
- DBCache, <b> L20x1 </b>, Steps: 28, "A cat holding a sign that says hello world with complex background"
198
- </p>
199
- </div>
200
-
201
- |Baseline(L20x1)|F1B0 (0.08)|F1B0 (0.20)|F8B8 (0.15)|F12B12 (0.20)|F16B16 (0.20)|
202
- |:---:|:---:|:---:|:---:|:---:|:---:|
203
- |24.85s|15.59s|8.58s|15.41s|15.11s|17.74s|
204
- |<img src=https://github.com/vipshop/cache-dit/raw/main/assets/NONE_R0.08_S0.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F1B0S1_R0.08_S11.png width=105px> | <img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F1B0S1_R0.2_S19.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F8B8S1_R0.15_S15.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F12B12S4_R0.2_S16.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F16B16S4_R0.2_S13.png width=105px>|
205
- |**Baseline(L20x1)**|**F1B0 (0.08)**|**F8B8 (0.12)**|**F8B12 (0.12)**|**F8B16 (0.20)**|**F8B20 (0.20)**|
206
- |27.85s|6.04s|5.88s|5.77s|6.01s|6.20s|
207
- |<img src=https://github.com/vipshop/cache-dit/raw/main/assets/TEXTURE_NONE_R0.08.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/TEXTURE_DBCACHE_F1B0_R0.08.png width=105px> |<img src=https://github.com/vipshop/cache-dit/raw/main/assets/TEXTURE_DBCACHE_F8B8_R0.12.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/TEXTURE_DBCACHE_F8B12_R0.12.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/TEXTURE_DBCACHE_F8B16_R0.2.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/TEXTURE_DBCACHE_F8B20_R0.2.png width=105px>|
208
-
209
- <div align="center">
210
- <p align="center">
211
- DBCache, <b> L20x4 </b>, Steps: 20, case to show the texture recovery ability of DBCache
212
- </p>
213
- </div>
214
-
215
- These case studies demonstrate that even with relatively high thresholds (such as 0.12, 0.15, 0.2, etc.) under the DBCache **F12B12** or **F8B16** configuration, the detailed texture of the kitten's fur, colored cloth, and the clarity of text can still be preserved. This suggests that users can leverage DBCache to effectively balance performance and precision in their workflows!
216
-
196
+ ![](https://github.com/vipshop/cache-dit/raw/main/assets/dbcache-fnbn-v1.png)
217
197
 
218
- **DBCache** provides configurable parameters for custom optimization, enabling a balanced trade-off between performance and precision:
198
+ **DBCache**: **Dual Block Caching** for Diffusion Transformers. Different configurations of compute blocks (**F8B12**, etc.) can be customized in DBCache, enabling a balanced trade-off between performance and precision. Moreover, it can be entirely **training**-**free**. Please check [DBCache.md](./docs/DBCache.md) docs for more design details.
219
199
 
220
200
  - **Fn**: Specifies that DBCache uses the **first n** Transformer blocks to fit the information at time step t, enabling the calculation of a more stable L1 diff and delivering more accurate information to subsequent blocks.
221
201
  - **Bn**: Further fuses approximate information in the **last n** Transformer blocks to enhance prediction accuracy. These blocks act as an auto-scaler for approximate hidden states that use residual cache.
222
202
 
223
- ![](https://github.com/vipshop/cache-dit/raw/main/assets/dbcache-fnbn-v1.png)
224
-
225
- - **warmup_steps**: (default: 0) DBCache does not apply the caching strategy when the number of running steps is less than or equal to this value, ensuring the model sufficiently learns basic features during warmup.
226
- - **max_cached_steps**: (default: -1) DBCache disables the caching strategy when the previous cached steps exceed this value to prevent precision degradation.
227
- - **residual_diff_threshold**: The value of residual diff threshold, a higher value leads to faster performance at the cost of lower precision.
228
-
229
- For a good balance between performance and precision, DBCache is configured by default with **F8B0**, 8 warmup steps, and unlimited cached steps.
230
-
231
203
  ```python
232
204
  import cache_dit
233
205
  from diffusers import FluxPipeline
@@ -237,7 +209,8 @@ pipe = FluxPipeline.from_pretrained(
237
209
  torch_dtype=torch.bfloat16,
238
210
  ).to("cuda")
239
211
 
240
- # Default options, F8B0, good balance between performance and precision
212
+ # Default options, F8B0, 8 warmup steps, and unlimited cached
213
+ # steps for good balance between performance and precision
241
214
  cache_options = cache_dit.default_options()
242
215
 
243
216
  # Custom options, F8B8, higher precision
@@ -266,6 +239,17 @@ cache_options = {
266
239
  }
267
240
  ```
268
241
 
242
+ <div align="center">
243
+ <p align="center">
244
+ DBCache, <b> L20x1 </b>, Steps: 28, "A cat holding a sign that says hello world with complex background"
245
+ </p>
246
+ </div>
247
+
248
+ |Baseline(L20x1)|F1B0 (0.08)|F1B0 (0.20)|F8B8 (0.15)|F12B12 (0.20)|F16B16 (0.20)|
249
+ |:---:|:---:|:---:|:---:|:---:|:---:|
250
+ |24.85s|15.59s|8.58s|15.41s|15.11s|17.74s|
251
+ |<img src=https://github.com/vipshop/cache-dit/raw/main/assets/NONE_R0.08_S0.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F1B0S1_R0.08_S11.png width=105px> | <img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F1B0S1_R0.2_S19.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F8B8S1_R0.15_S15.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F12B12S4_R0.2_S16.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F16B16S4_R0.2_S13.png width=105px>|
252
+
269
253
  ## 🔥Hybrid TaylorSeer
270
254
 
271
255
  <div id="taylorseer"></div>
@@ -333,16 +317,15 @@ cache_options = {
333
317
  }
334
318
  ```
335
319
 
336
- ## 🔥Torch Compile
320
+ ## ⚙️Torch Compile
337
321
 
338
322
  <div id="compile"></div>
339
323
 
340
324
  By the way, **cache-dit** is designed to work compatibly with **torch.compile.** You can easily use cache-dit with torch.compile to further achieve a better performance. For example:
341
325
 
342
326
  ```python
343
- cache_dit.enable_cache(
344
- pipe, **cache_dit.default_options()
345
- )
327
+ cache_dit.enable_cache(pipe)
328
+
346
329
  # Compile the Transformer module
347
330
  pipe.transformer = torch.compile(pipe.transformer)
348
331
  ```
@@ -1,17 +1,17 @@
1
1
  cache_dit/__init__.py,sha256=TvZI861ipGnYaOEHJA0Og-ksRUGNCld-PGy_NgjcKZE,641
2
- cache_dit/_version.py,sha256=dCdYCOiddErhHj0N8E7eGD9CFxW2AjdU7N-JosKZW0o,706
2
+ cache_dit/_version.py,sha256=4N3ayuoZZJYPEGMvrxu7tnGigRTxbAdCyp5a8y7c6aw,706
3
3
  cache_dit/logger.py,sha256=0zsu42hN-3-rgGC_C29ms1IvVpV4_b4_SwJCKSenxBE,4304
4
4
  cache_dit/primitives.py,sha256=A2iG9YLot3gOsZSPp-_gyjqjLgJvWQRx8aitD4JQ23Y,3877
5
5
  cache_dit/utils.py,sha256=yybhUTGPfeCoIVZzpoefZ2ypvH8de-10UhPls81ceG4,4800
6
6
  cache_dit/cache_factory/.gitignore,sha256=5Cb-qT9wsTUoMJ7vACDF7ZcLpAXhi5v-xdcWSRit988,23
7
7
  cache_dit/cache_factory/__init__.py,sha256=f4IUOgEALTEBb9UOHtXoGwqKrDb2ZhI_dHkiIrni1Xc,1586
8
- cache_dit/cache_factory/cache_adapters.py,sha256=twzxe6VYB-9iRwU1Fct15mm5pdPNOyqBoLRBscnYZHA,23508
8
+ cache_dit/cache_factory/cache_adapters.py,sha256=-bFMOfIPdfWKTAfF533PuLYaDl1wq8RNUAXJn--Rm7I,23587
9
9
  cache_dit/cache_factory/cache_blocks.py,sha256=9jgK2IT0Y_AlbhJLnhgA47lOxQNwNizDgHve45818gg,18390
10
10
  cache_dit/cache_factory/cache_context.py,sha256=f-ihx14NXIZNakN2b_dduegRpJr5SwcPtc2PqnpDdUY,39818
11
11
  cache_dit/cache_factory/taylorseer.py,sha256=LKSNo2ode69EVo9xrxjxAMEjz0yDGiGADeDYnEqddA8,3987
12
12
  cache_dit/cache_factory/utils.py,sha256=iQg3dqBfQTGkvMdKeO5-YmzkQO5LBSoZ8sYKwQA_7_I,1805
13
13
  cache_dit/cache_factory/patch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- cache_dit/cache_factory/patch/flux.py,sha256=eTdq-3limKHgwtVCILkZTwt9FwYUhH7_VlhKnfu55BU,8999
14
+ cache_dit/cache_factory/patch/flux.py,sha256=iNQ-1RlOgXupZ4uPiEvJ__Ro6vKT_fOKja9JrpMrO78,8998
15
15
  cache_dit/compile/__init__.py,sha256=FcTVzCeyypl-mxlc59_ehHL3lBNiDAFsXuRoJ-5Cfi0,56
16
16
  cache_dit/compile/utils.py,sha256=ugHrv3QRieG1xKwcg_pi3yVZF6EpSOEJjRmbnfa7VG0,3779
17
17
  cache_dit/custom_ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -22,9 +22,9 @@ cache_dit/metrics/fid.py,sha256=9Ivtazl6mW0Bon2VXa-Ia5Xj2ewxRD3V1Qkd69zYM3Y,1706
22
22
  cache_dit/metrics/inception.py,sha256=pBVe2X6ylLPIXTG4-GWDM9DWnCviMJbJ45R3ulhktR0,12759
23
23
  cache_dit/metrics/lpips.py,sha256=I2qCNi6qJh5TRsaIsdxO0WoRX1DN7U_H3zS0oCSahYM,1032
24
24
  cache_dit/metrics/metrics.py,sha256=8jvM1sF-nDxUuwCRy44QEoo4dYVLCQVh1QyAMs4eaQY,27840
25
- cache_dit-0.2.18.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
26
- cache_dit-0.2.18.dist-info/METADATA,sha256=8U_a77k97X2b_nV1X7VDjusKLAAnAbHA-6eYhJ4wiqA,21940
27
- cache_dit-0.2.18.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
28
- cache_dit-0.2.18.dist-info/entry_points.txt,sha256=FX2gysXaZx6NeK1iCLMcIdP8Q4_qikkIHtEmi3oWn8o,65
29
- cache_dit-0.2.18.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
30
- cache_dit-0.2.18.dist-info/RECORD,,
25
+ cache_dit-0.2.20.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
26
+ cache_dit-0.2.20.dist-info/METADATA,sha256=rq7U5fZNeRMn6CvKRyuh1JcovMSwbDWGt1v6LzfaBUE,18752
27
+ cache_dit-0.2.20.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
28
+ cache_dit-0.2.20.dist-info/entry_points.txt,sha256=FX2gysXaZx6NeK1iCLMcIdP8Q4_qikkIHtEmi3oWn8o,65
29
+ cache_dit-0.2.20.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
30
+ cache_dit-0.2.20.dist-info/RECORD,,