cache-dit 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cache_dit/__init__.py +4 -0
- cache_dit/_version.py +2 -2
- cache_dit/cache_factory/__init__.py +6 -2
- cache_dit/cache_factory/block_adapters/block_adapters.py +8 -54
- cache_dit/cache_factory/cache_adapters/__init__.py +1 -0
- cache_dit/cache_factory/{cache_adapters.py → cache_adapters/cache_adapter.py} +53 -20
- cache_dit/cache_factory/cache_contexts/__init__.py +11 -2
- cache_dit/cache_factory/cache_contexts/cache_context.py +186 -117
- cache_dit/cache_factory/cache_contexts/cache_manager.py +63 -131
- cache_dit/cache_factory/cache_contexts/calibrators/__init__.py +132 -0
- cache_dit/cache_factory/cache_contexts/calibrators/base.py +27 -0
- cache_dit/cache_factory/cache_contexts/calibrators/foca.py +26 -0
- cache_dit/cache_factory/cache_contexts/{taylorseer.py → calibrators/taylorseer.py} +32 -24
- cache_dit/cache_factory/cache_interface.py +130 -86
- cache_dit/cache_factory/params_modifier.py +87 -0
- cache_dit/metrics/__init__.py +3 -1
- cache_dit/utils.py +18 -17
- {cache_dit-0.3.0.dist-info → cache_dit-0.3.2.dist-info}/METADATA +109 -86
- {cache_dit-0.3.0.dist-info → cache_dit-0.3.2.dist-info}/RECORD +23 -18
- {cache_dit-0.3.0.dist-info → cache_dit-0.3.2.dist-info}/WHEEL +0 -0
- {cache_dit-0.3.0.dist-info → cache_dit-0.3.2.dist-info}/entry_points.txt +0 -0
- {cache_dit-0.3.0.dist-info → cache_dit-0.3.2.dist-info}/licenses/LICENSE +0 -0
- {cache_dit-0.3.0.dist-info → cache_dit-0.3.2.dist-info}/top_level.txt +0 -0
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cache_dit
|
|
3
|
-
Version: 0.3.
|
|
4
|
-
Summary:
|
|
3
|
+
Version: 0.3.2
|
|
4
|
+
Summary: A Unified, Flexible and Training-free Cache Acceleration Framework for 🤗Diffusers.
|
|
5
5
|
Author: DefTruth, vipshop.com, etc.
|
|
6
6
|
Maintainer: DefTruth, vipshop.com, etc
|
|
7
7
|
Project-URL: Repository, https://github.com/vipshop/cache-dit.git
|
|
@@ -49,8 +49,8 @@ Dynamic: requires-python
|
|
|
49
49
|
<img src=https://github.com/vipshop/cache-dit/raw/main/assets/cache-dit-logo.png height="120">
|
|
50
50
|
|
|
51
51
|
<p align="center">
|
|
52
|
-
A <b>Unified</b
|
|
53
|
-
♥️
|
|
52
|
+
A <b>Unified</b>, Flexible and Training-free <b>Cache Acceleration</b> Framework for <b>🤗Diffusers</b> <br>
|
|
53
|
+
♥️ Cache Acceleration with <b>One-line</b> Code ~ ♥️
|
|
54
54
|
</p>
|
|
55
55
|
<div align='center'>
|
|
56
56
|
<img src=https://img.shields.io/badge/Language-Python-brightgreen.svg >
|
|
@@ -62,11 +62,11 @@ Dynamic: requires-python
|
|
|
62
62
|
</div>
|
|
63
63
|
<p align="center">
|
|
64
64
|
<b><a href="#unified">📚Unified Cache APIs</a></b> | <a href="#forward-pattern-matching">📚Forward Pattern Matching</a> | <a href="#automatic-block-adapter">📚Automatic Block Adapter</a><br>
|
|
65
|
-
<a href="#hybird-forward-pattern">📚Hybrid Forward Pattern</a> | <a href="#dbcache">📚DBCache</a> | <a href="#taylorseer">📚
|
|
65
|
+
<a href="#hybird-forward-pattern">📚Hybrid Forward Pattern</a> | <a href="#dbcache">📚DBCache</a> | <a href="#taylorseer">📚TaylorSeer Calibrator</a> | <a href="#cfg">📚Cache CFG</a><br>
|
|
66
66
|
<a href="#benchmarks">📚Text2Image DrawBench</a> | <a href="#benchmarks">📚Text2Image Distillation DrawBench</a>
|
|
67
67
|
</p>
|
|
68
68
|
<p align="center">
|
|
69
|
-
🎉Now, <b>cache-dit</b> covers <b>
|
|
69
|
+
🎉Now, <b>cache-dit</b> covers almost <b>All</b> Diffusers' <b>DiT</b> Pipelines🎉<br>
|
|
70
70
|
🔥<a href="#supported">Qwen-Image</a> | <a href="#supported">FLUX.1</a> | <a href="#supported">Qwen-Image-Lightning</a> | <a href="#supported"> Wan 2.1 </a> | <a href="#supported"> Wan 2.2 </a>🔥<br>
|
|
71
71
|
🔥<a href="#supported">HunyuanImage-2.1</a> | <a href="#supported">HunyuanVideo</a> | <a href="#supported">HunyuanDiT</a> | <a href="#supported">HiDream</a> | <a href="#supported">AuraFlow</a>🔥<br>
|
|
72
72
|
🔥<a href="#supported">CogView3Plus</a> | <a href="#supported">CogView4</a> | <a href="#supported">LTXVideo</a> | <a href="#supported">CogVideoX</a> | <a href="#supported">CogVideoX 1.5</a> | <a href="#supported">ConsisID</a>🔥<br>
|
|
@@ -163,11 +163,11 @@ Dynamic: requires-python
|
|
|
163
163
|
|
|
164
164
|
## 🔥News
|
|
165
165
|
|
|
166
|
-
- [2025-09-10] 🎉Day 1 support [**HunyuanImage-2.1**](https://github.com/Tencent-Hunyuan/HunyuanImage-2.1) with **1.7x↑🎉** speedup! Check this [example](
|
|
167
|
-
- [2025-09-08] 🔥[**Qwen-Image-Lightning**](
|
|
168
|
-
- [2025-09-03] 🎉[**Wan2.2-MoE**](https://github.com/Wan-Video) **2.4x↑🎉** speedup! Please refer to [run_wan_2.2.py](
|
|
169
|
-
- [2025-08-19] 🔥[**Qwen-Image-Edit**](https://github.com/QwenLM/Qwen-Image) **2x↑🎉** speedup! Check the example: [run_qwen_image_edit.py](
|
|
170
|
-
- [2025-08-11] 🔥[**Qwen-Image**](https://github.com/QwenLM/Qwen-Image) **1.8x↑🎉** speedup! Please refer to [run_qwen_image.py](
|
|
166
|
+
- [2025-09-10] 🎉Day 1 support [**HunyuanImage-2.1**](https://github.com/Tencent-Hunyuan/HunyuanImage-2.1) with **1.7x↑🎉** speedup! Check this [example](https://github.com/vipshop/cache-dit/raw/main/examples/pipeline/run_hunyuan_image_2.1.py).
|
|
167
|
+
- [2025-09-08] 🔥[**Qwen-Image-Lightning**](https://github.com/vipshop/cache-dit/raw/main/examples/pipeline/run_qwen_image_lightning.py) **7.1/3.5 steps🎉** inference with **[DBCache: F16B16](https://github.com/vipshop/cache-dit)**.
|
|
168
|
+
- [2025-09-03] 🎉[**Wan2.2-MoE**](https://github.com/Wan-Video) **2.4x↑🎉** speedup! Please refer to [run_wan_2.2.py](https://github.com/vipshop/cache-dit/raw/main/examples/pipeline/run_wan_2.2.py) as an example.
|
|
169
|
+
- [2025-08-19] 🔥[**Qwen-Image-Edit**](https://github.com/QwenLM/Qwen-Image) **2x↑🎉** speedup! Check the example: [run_qwen_image_edit.py](https://github.com/vipshop/cache-dit/raw/main/examples/pipeline/run_qwen_image_edit.py).
|
|
170
|
+
- [2025-08-11] 🔥[**Qwen-Image**](https://github.com/QwenLM/Qwen-Image) **1.8x↑🎉** speedup! Please refer to [run_qwen_image.py](https://github.com/vipshop/cache-dit/raw/main/examples/pipeline/run_qwen_image.py) as an example.
|
|
171
171
|
- [2025-07-13] 🎉[**FLUX.1-dev**](https://github.com/xlite-dev/flux-faster) **3.3x↑🎉** speedup! NVIDIA L20 with **[cache-dit](https://github.com/vipshop/cache-dit)** + **compile + FP8 DQ**.
|
|
172
172
|
|
|
173
173
|
<details>
|
|
@@ -176,8 +176,8 @@ Dynamic: requires-python
|
|
|
176
176
|
- [2025-09-08] 🎉First caching mechanism in [Qwen-Image-Lightning](https://github.com/ModelTC/Qwen-Image-Lightning) with **[cache-dit](https://github.com/vipshop/cache-dit)**, check this [PR](https://github.com/ModelTC/Qwen-Image-Lightning/pull/35).
|
|
177
177
|
- [2025-09-08] 🎉First caching mechanism in [Wan2.2](https://github.com/Wan-Video/Wan2.2) with **[cache-dit](https://github.com/vipshop/cache-dit)**, check this [PR](https://github.com/Wan-Video/Wan2.2/pull/127) for more details.
|
|
178
178
|
- [2025-08-12] 🎉First caching mechanism in [QwenLM/Qwen-Image](https://github.com/QwenLM/Qwen-Image) with **[cache-dit](https://github.com/vipshop/cache-dit)**, check this [PR](https://github.com/QwenLM/Qwen-Image/pull/61).
|
|
179
|
-
- [2025-09-01] 📚[**Hybird Forward Pattern**](#unified) is supported! Please check [FLUX.1-dev](
|
|
180
|
-
- [2025-08-10] 🔥[**FLUX.1-Kontext-dev**](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev) is supported! Please refer [run_flux_kontext.py](
|
|
179
|
+
- [2025-09-01] 📚[**Hybird Forward Pattern**](#unified) is supported! Please check [FLUX.1-dev](https://github.com/vipshop/cache-dit/raw/main/examples/run_flux_adapter.py) as an example.
|
|
180
|
+
- [2025-08-10] 🔥[**FLUX.1-Kontext-dev**](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev) is supported! Please refer [run_flux_kontext.py](https://github.com/vipshop/cache-dit/raw/main/examples/pipeline/run_flux_kontext.py) as an example.
|
|
181
181
|
- [2025-07-18] 🎉First caching mechanism in [🤗huggingface/flux-fast](https://github.com/huggingface/flux-fast) with **[cache-dit](https://github.com/vipshop/cache-dit)**, check the [PR](https://github.com/huggingface/flux-fast/pull/13).
|
|
182
182
|
|
|
183
183
|
</details>
|
|
@@ -197,7 +197,7 @@ Dynamic: requires-python
|
|
|
197
197
|
- [📚Implement Patch Functor](#implement-patch-functor)
|
|
198
198
|
- [🤖Cache Acceleration Stats](#cache-acceleration-stats-summary)
|
|
199
199
|
- [⚡️Dual Block Cache](#dbcache)
|
|
200
|
-
- [🔥
|
|
200
|
+
- [🔥TaylorSeer Calibrator](#taylorseer)
|
|
201
201
|
- [⚡️Hybrid Cache CFG](#cfg)
|
|
202
202
|
- [⚙️Torch Compile](#compile)
|
|
203
203
|
- [🛠Metrics CLI](#metrics)
|
|
@@ -221,7 +221,7 @@ pip3 install git+https://github.com/vipshop/cache-dit.git
|
|
|
221
221
|
|
|
222
222
|
<div id="supported"></div>
|
|
223
223
|
|
|
224
|
-
Currently, **cache-dit** library supports almost **Any** Diffusion Transformers (with **Transformer Blocks** that match the specific Input and Output **patterns**). Please check [🎉Examples](
|
|
224
|
+
Currently, **cache-dit** library supports almost **Any** Diffusion Transformers (with **Transformer Blocks** that match the specific Input and Output **patterns**). Please check [🎉Examples](https://github.com/vipshop/cache-dit/raw/main/examples/pipeline) for more details. Here are just some of the tested models listed.
|
|
225
225
|
|
|
226
226
|
```python
|
|
227
227
|
>>> import cache_dit
|
|
@@ -276,7 +276,7 @@ Currently, **cache-dit** library supports almost **Any** Diffusion Transformers
|
|
|
276
276
|
|
|
277
277
|
<div id="benchmarks"></div>
|
|
278
278
|
|
|
279
|
-
cache-dit will support more mainstream Cache acceleration algorithms in the future. More benchmarks will be released, please stay tuned for update. Here, only the results of some precision and performance benchmarks are presented. The test dataset is **DrawBench**. For a complete benchmark, please refer to [📚Benchmarks](
|
|
279
|
+
cache-dit will support more mainstream Cache acceleration algorithms in the future. More benchmarks will be released, please stay tuned for update. Here, only the results of some precision and performance benchmarks are presented. The test dataset is **DrawBench**. For a complete benchmark, please refer to [📚Benchmarks](https://github.com/vipshop/cache-dit/raw/main/bench/).
|
|
280
280
|
|
|
281
281
|
### 📚Text2Image DrawBench: FLUX.1-dev
|
|
282
282
|
|
|
@@ -292,7 +292,7 @@ Comparisons between different FnBn compute block configurations show that **more
|
|
|
292
292
|
| F4B0_W4MC3_R0.12 | 32.8981 | 1.0130 | 31.8031 | 1507.83 | 2.47x |
|
|
293
293
|
| F4B0_W4MC4_R0.12 | 32.8384 | 1.0065 | 31.5292 | 1400.08 | 2.66x |
|
|
294
294
|
|
|
295
|
-
The comparison between **cache-dit: DBCache** and algorithms such as Δ-DiT, Chipmunk, FORA, DuCa, TaylorSeer and FoCa is as follows. Now, in the comparison with a speedup ratio less than **3x**, cache-dit achieved the best accuracy. Please check [📚How to Reproduce?](
|
|
295
|
+
The comparison between **cache-dit: DBCache** and algorithms such as Δ-DiT, Chipmunk, FORA, DuCa, TaylorSeer and FoCa is as follows. Now, in the comparison with a speedup ratio less than **3x**, cache-dit achieved the best accuracy. Please check [📚How to Reproduce?](https://github.com/vipshop/cache-dit/raw/main/bench/) for more details.
|
|
296
296
|
|
|
297
297
|
| Method | TFLOPs(↓) | SpeedUp(↑) | ImageReward(↑) | Clip Score(↑) |
|
|
298
298
|
| --- | --- | --- | --- | --- |
|
|
@@ -302,12 +302,14 @@ The comparison between **cache-dit: DBCache** and algorithms such as Δ-DiT, Chi
|
|
|
302
302
|
| Δ-DiT(N=3) | 1686.76 | 2.21× | 0.8721 | 32.102 |
|
|
303
303
|
| [**FLUX.1**-dev]: 34% steps | 1264.63 | 3.13× | 0.9453 | 32.114 |
|
|
304
304
|
| Chipmunk | 1505.87 | 2.47× | 0.9936 | 32.776 |
|
|
305
|
-
| FORA
|
|
306
|
-
| **[DBCache(F=4,B=0,W=4,MC=4)](https://github.com/vipshop/cache-dit)** |
|
|
305
|
+
| FORA(N=3) | 1320.07 | 2.82× | 0.9776 | 32.266 |
|
|
306
|
+
| **[DBCache(F=4,B=0,W=4,MC=4)](https://github.com/vipshop/cache-dit)** | 1400.08 | **2.66×** | **1.0065** | 32.838 |
|
|
307
|
+
| **[DBCache+TaylorSeer(F=1,B=0,O=1)](https://github.com/vipshop/cache-dit)** | 1153.05 | **3.23×** | **1.0221** | 32.819 |
|
|
307
308
|
| DuCa(N=5) | 978.76 | 3.80× | 0.9955 | 32.241 |
|
|
308
309
|
| TaylorSeer(N=4,O=2) | 1042.27 | 3.57× | 0.9857 | 32.413 |
|
|
309
|
-
| **[DBCache
|
|
310
|
-
| **[
|
|
310
|
+
| **[DBCache(F=1,B=0,W=4,MC=6)](https://github.com/vipshop/cache-dit)** | 944.75 | **3.94×** | 0.9997 | 32.849 |
|
|
311
|
+
| **[DBCache+TaylorSeer(F=1,B=0,O=1)](https://github.com/vipshop/cache-dit)** | 944.75 | **3.94×** | **1.0107** | 32.865 |
|
|
312
|
+
| **[FoCa(N=5): arxiv.2508.16211](https://arxiv.org/pdf/2508.16211)** | 893.54 | **4.16×** | **1.0029** | **32.948** |
|
|
311
313
|
|
|
312
314
|
<details>
|
|
313
315
|
<summary> Show all comparison </summary>
|
|
@@ -320,12 +322,14 @@ The comparison between **cache-dit: DBCache** and algorithms such as Δ-DiT, Chi
|
|
|
320
322
|
| Δ-DiT(N=3) | 1686.76 | 2.21× | 0.8721 | 32.102 |
|
|
321
323
|
| [**FLUX.1**-dev]: 34% steps | 1264.63 | 3.13× | 0.9453 | 32.114 |
|
|
322
324
|
| Chipmunk | 1505.87 | 2.47× | 0.9936 | 32.776 |
|
|
323
|
-
| FORA
|
|
324
|
-
| **[DBCache(F=4,B=0,W=4,MC=4)](https://github.com/vipshop/cache-dit)** |
|
|
325
|
+
| FORA(N=3) | 1320.07 | 2.82× | 0.9776 | 32.266 |
|
|
326
|
+
| **[DBCache(F=4,B=0,W=4,MC=4)](https://github.com/vipshop/cache-dit)** | 1400.08 | **2.66×** | **1.0065** | 32.838 |
|
|
325
327
|
| DuCa(N=5) | 978.76 | 3.80× | 0.9955 | 32.241 |
|
|
326
328
|
| TaylorSeer(N=4,O=2) | 1042.27 | 3.57× | 0.9857 | 32.413 |
|
|
327
|
-
| **[DBCache+TaylorSeer(F=1,B=0,O=1)](https://github.com/vipshop/cache-dit)** |
|
|
328
|
-
| **[
|
|
329
|
+
| **[DBCache+TaylorSeer(F=1,B=0,O=1)](https://github.com/vipshop/cache-dit)** | 1153.05 | **3.23×** | **1.0221** | 32.819 |
|
|
330
|
+
| **[DBCache(F=1,B=0,W=4,MC=6)](https://github.com/vipshop/cache-dit)** | 944.75 | **3.94×** | 0.9997 | 32.849 |
|
|
331
|
+
| **[DBCache+TaylorSeer(F=1,B=0,O=1)](https://github.com/vipshop/cache-dit)** | 944.75 | **3.94×** | **1.0107** | 32.865 |
|
|
332
|
+
| **[FoCa(N=5): arxiv.2508.16211](https://arxiv.org/pdf/2508.16211)** | 893.54 | **4.16×** | **1.0029** | **32.948** |
|
|
329
333
|
| [**FLUX.1**-dev]: 22% steps | 818.29 | 4.55× | 0.8183 | 31.772 |
|
|
330
334
|
| FORA(N=4) | 967.91 | 3.84× | 0.9730 | 32.142 |
|
|
331
335
|
| ToCa(N=8) | 784.54 | 4.74× | 0.9451 | 31.993 |
|
|
@@ -333,14 +337,14 @@ The comparison between **cache-dit: DBCache** and algorithms such as Δ-DiT, Chi
|
|
|
333
337
|
| TeaCache(l=0.8) | 892.35 | 4.17× | 0.8683 | 31.704 |
|
|
334
338
|
| **[DBCache(F=4,B=0,W=4,MC=10)](https://github.com/vipshop/cache-dit)** | 816.65 | 4.56x | 0.8245 | 32.191 |
|
|
335
339
|
| TaylorSeer(N=5,O=2) | 893.54 | 4.16× | 0.9768 | 32.467 |
|
|
336
|
-
| **[FoCa(N=7) arxiv.2508.16211](https://arxiv.org/pdf/2508.16211)** |
|
|
340
|
+
| **[FoCa(N=7): arxiv.2508.16211](https://arxiv.org/pdf/2508.16211)** | 670.44 | **5.54×** | **0.9891** | **32.920** |
|
|
337
341
|
| FORA(N=7) | 670.14 | 5.55× | 0.7418 | 31.519 |
|
|
338
342
|
| ToCa(N=12) | 644.70 | 5.77× | 0.7155 | 31.808 |
|
|
339
343
|
| DuCa(N=10) | 606.91 | 6.13× | 0.8382 | 31.759 |
|
|
340
344
|
| TeaCache(l=1.2) | 669.27 | 5.56× | 0.7394 | 31.704 |
|
|
341
|
-
| **[DBCache(F=1,B=0,W=4,MC=10)](https://github.com/vipshop/cache-dit)** |
|
|
345
|
+
| **[DBCache(F=1,B=0,W=4,MC=10)](https://github.com/vipshop/cache-dit)** | 651.90 | **5.72x** | 0.8796 | **32.318** |
|
|
342
346
|
| TaylorSeer(N=7,O=2) | 670.44 | 5.54× | 0.9128 | 32.128 |
|
|
343
|
-
| **[FoCa(N=8) arxiv.2508.16211](https://arxiv.org/pdf/2508.16211)** |
|
|
347
|
+
| **[FoCa(N=8): arxiv.2508.16211](https://arxiv.org/pdf/2508.16211)** | 596.07 | **6.24×** | **0.9502** | **32.706** |
|
|
344
348
|
|
|
345
349
|
NOTE: Except for DBCache, other performance data are referenced from the paper [FoCa, arxiv.2508.16211](https://arxiv.org/pdf/2508.16211).
|
|
346
350
|
|
|
@@ -371,7 +375,7 @@ Currently, for any **Diffusion** models with **Transformer Blocks** that match t
|
|
|
371
375
|
|
|
372
376
|
### ♥️Cache Acceleration with One-line Code
|
|
373
377
|
|
|
374
|
-
In most cases, you only need to call **one-line** of code, that is `cache_dit.enable_cache(...)`. After this API is called, you just need to call the pipe as normal. The `pipe` param can be **any** Diffusion Pipeline. Please refer to [Qwen-Image](
|
|
378
|
+
In most cases, you only need to call **one-line** of code, that is `cache_dit.enable_cache(...)`. After this API is called, you just need to call the pipe as normal. The `pipe` param can be **any** Diffusion Pipeline. Please refer to [Qwen-Image](https://github.com/vipshop/cache-dit/raw/main/examples/pipeline/run_qwen_image.py) as an example.
|
|
375
379
|
|
|
376
380
|
```python
|
|
377
381
|
import cache_dit
|
|
@@ -392,7 +396,7 @@ cache_dit.disable_cache(pipe)
|
|
|
392
396
|
|
|
393
397
|
### 🔥Automatic Block Adapter
|
|
394
398
|
|
|
395
|
-
But in some cases, you may have a **modified** Diffusion Pipeline or Transformer that is not located in the diffusers library or not officially supported by **cache-dit** at this time. The **BlockAdapter** can help you solve this problems. Please refer to [🔥Qwen-Image w/ BlockAdapter](
|
|
399
|
+
But in some cases, you may have a **modified** Diffusion Pipeline or Transformer that is not located in the diffusers library or not officially supported by **cache-dit** at this time. The **BlockAdapter** can help you solve this problems. Please refer to [🔥Qwen-Image w/ BlockAdapter](https://github.com/vipshop/cache-dit/raw/main/examples/adapter/run_qwen_image_adapter.py) as an example.
|
|
396
400
|
|
|
397
401
|
```python
|
|
398
402
|
from cache_dit import ForwardPattern, BlockAdapter
|
|
@@ -418,11 +422,11 @@ cache_dit.enable_cache(
|
|
|
418
422
|
),
|
|
419
423
|
)
|
|
420
424
|
```
|
|
421
|
-
For such situations, **BlockAdapter** can help you quickly apply various cache acceleration features to your own Diffusion Pipelines and Transformers. Please check the [📚BlockAdapter.md](
|
|
425
|
+
For such situations, **BlockAdapter** can help you quickly apply various cache acceleration features to your own Diffusion Pipelines and Transformers. Please check the [📚BlockAdapter.md](https://github.com/vipshop/cache-dit/raw/main/docs/BlockAdapter.md) for more details.
|
|
422
426
|
|
|
423
427
|
### 📚Hybird Forward Pattern
|
|
424
428
|
|
|
425
|
-
Sometimes, a Transformer class will contain more than one transformer `blocks`. For example, **FLUX.1** (HiDream, Chroma, etc) contains transformer_blocks and single_transformer_blocks (with different forward patterns). The **BlockAdapter** can also help you solve this problem. Please refer to [📚FLUX.1](
|
|
429
|
+
Sometimes, a Transformer class will contain more than one transformer `blocks`. For example, **FLUX.1** (HiDream, Chroma, etc) contains transformer_blocks and single_transformer_blocks (with different forward patterns). The **BlockAdapter** can also help you solve this problem. Please refer to [📚FLUX.1](https://github.com/vipshop/cache-dit/raw/main/examples/adapter/run_flux_adapter.py) as an example.
|
|
426
430
|
|
|
427
431
|
```python
|
|
428
432
|
# For diffusers <= 0.34.0, FLUX.1 transformer_blocks and
|
|
@@ -443,10 +447,10 @@ cache_dit.enable_cache(
|
|
|
443
447
|
)
|
|
444
448
|
```
|
|
445
449
|
|
|
446
|
-
Even sometimes you have more complex cases, such as **Wan 2.2 MoE**, which has more than one Transformer (namely `transformer` and `transformer_2`) in its structure. Fortunately, **cache-dit** can also handle this situation very well. Please refer to [📚Wan 2.2 MoE](
|
|
450
|
+
Even sometimes you have more complex cases, such as **Wan 2.2 MoE**, which has more than one Transformer (namely `transformer` and `transformer_2`) in its structure. Fortunately, **cache-dit** can also handle this situation very well. Please refer to [📚Wan 2.2 MoE](https://github.com/vipshop/cache-dit/raw/main/examples/pipeline/run_wan_2.2.py) as an example.
|
|
447
451
|
|
|
448
452
|
```python
|
|
449
|
-
from cache_dit import ForwardPattern, BlockAdapter, ParamsModifier
|
|
453
|
+
from cache_dit import ForwardPattern, BlockAdapter, ParamsModifier, BasicCacheConfig
|
|
450
454
|
|
|
451
455
|
cache_dit.enable_cache(
|
|
452
456
|
BlockAdapter(
|
|
@@ -468,12 +472,16 @@ cache_dit.enable_cache(
|
|
|
468
472
|
# value will be overwrite by the new one.
|
|
469
473
|
params_modifiers=[
|
|
470
474
|
ParamsModifier(
|
|
471
|
-
|
|
472
|
-
|
|
475
|
+
cache_config=BasicCacheConfig(
|
|
476
|
+
max_warmup_steps=4,
|
|
477
|
+
max_cached_steps=8,
|
|
478
|
+
),
|
|
473
479
|
),
|
|
474
480
|
ParamsModifier(
|
|
475
|
-
|
|
476
|
-
|
|
481
|
+
cache_config=BasicCacheConfig(
|
|
482
|
+
max_warmup_steps=2,
|
|
483
|
+
max_cached_steps=20,
|
|
484
|
+
),
|
|
477
485
|
),
|
|
478
486
|
],
|
|
479
487
|
has_separate_cfg=True,
|
|
@@ -482,11 +490,11 @@ cache_dit.enable_cache(
|
|
|
482
490
|
```
|
|
483
491
|
### 📚Implement Patch Functor
|
|
484
492
|
|
|
485
|
-
For any PATTERN not {0...5}, we introduced the simple abstract concept of **Patch Functor**. Users can implement a subclass of Patch Functor to convert an unknown Pattern into a known PATTERN, and for some models, users may also need to fuse the operations within the blocks for loop into block forward.
|
|
493
|
+
For any PATTERN not in {0...5}, we introduced the simple abstract concept of **Patch Functor**. Users can implement a subclass of Patch Functor to convert an unknown Pattern into a known PATTERN, and for some models, users may also need to fuse the operations within the blocks for loop into block forward.
|
|
486
494
|
|
|
487
495
|

|
|
488
496
|
|
|
489
|
-
Some Patch functors have already been provided in cache-dit: [📚HiDreamPatchFunctor](
|
|
497
|
+
Some Patch functors have already been provided in cache-dit: [📚HiDreamPatchFunctor](https://github.com/vipshop/cache-dit/raw/main/src/cache_dit/cache_factory/patch_functors/functor_hidream.py), [📚ChromaPatchFunctor](https://github.com/vipshop/cache-dit/raw/main/src/cache_dit/cache_factory/patch_functors/functor_chroma.py), etc. After implementing Patch Functor, users need to set the `patch_functor` property of **BlockAdapter**.
|
|
490
498
|
|
|
491
499
|
```python
|
|
492
500
|
@BlockAdapterRegistry.register("HiDream")
|
|
@@ -535,7 +543,7 @@ You can set `details` param as `True` to show more details of cache stats. (mark
|
|
|
535
543
|
|
|
536
544
|

|
|
537
545
|
|
|
538
|
-
**DBCache**: **Dual Block Caching** for Diffusion Transformers. Different configurations of compute blocks (**F8B12**, etc.) can be customized in DBCache, enabling a balanced trade-off between performance and precision. Moreover, it can be entirely **training**-**free**. Please check [DBCache.md](
|
|
546
|
+
**DBCache**: **Dual Block Caching** for Diffusion Transformers. Different configurations of compute blocks (**F8B12**, etc.) can be customized in DBCache, enabling a balanced trade-off between performance and precision. Moreover, it can be entirely **training**-**free**. Please check [DBCache.md](https://github.com/vipshop/cache-dit/raw/main/docs/DBCache.md) docs for more design details.
|
|
539
547
|
|
|
540
548
|
- **Fn**: Specifies that DBCache uses the **first n** Transformer blocks to fit the information at time step t, enabling the calculation of a more stable L1 diff and delivering more accurate information to subsequent blocks.
|
|
541
549
|
- **Bn**: Further fuses approximate information in the **last n** Transformer blocks to enhance prediction accuracy. These blocks act as an auto-scaler for approximate hidden states that use residual cache.
|
|
@@ -544,23 +552,27 @@ You can set `details` param as `True` to show more details of cache stats. (mark
|
|
|
544
552
|
import cache_dit
|
|
545
553
|
from diffusers import FluxPipeline
|
|
546
554
|
|
|
547
|
-
|
|
555
|
+
pipe_or_adapter = FluxPipeline.from_pretrained(
|
|
548
556
|
"black-forest-labs/FLUX.1-dev",
|
|
549
557
|
torch_dtype=torch.bfloat16,
|
|
550
558
|
).to("cuda")
|
|
551
559
|
|
|
552
560
|
# Default options, F8B0, 8 warmup steps, and unlimited cached
|
|
553
561
|
# steps for good balance between performance and precision
|
|
554
|
-
cache_dit.enable_cache(
|
|
562
|
+
cache_dit.enable_cache(pipe_or_adapter)
|
|
555
563
|
|
|
556
564
|
# Custom options, F8B8, higher precision
|
|
565
|
+
from cache_dit import BasicCacheConfig
|
|
566
|
+
|
|
557
567
|
cache_dit.enable_cache(
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
568
|
+
pipe_or_adapter,
|
|
569
|
+
cache_config=BasicCacheConfig(
|
|
570
|
+
max_warmup_steps=8, # steps do not cache
|
|
571
|
+
max_cached_steps=-1, # -1 means no limit
|
|
572
|
+
Fn_compute_blocks=8, # Fn, F8, etc.
|
|
573
|
+
Bn_compute_blocks=8, # Bn, B8, etc.
|
|
574
|
+
residual_diff_threshold=0.12,
|
|
575
|
+
),
|
|
564
576
|
)
|
|
565
577
|
```
|
|
566
578
|
|
|
@@ -575,7 +587,7 @@ cache_dit.enable_cache(
|
|
|
575
587
|
|24.85s|15.59s|8.58s|15.41s|15.11s|17.74s|
|
|
576
588
|
|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/NONE_R0.08_S0.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F1B0S1_R0.08_S11.png width=105px> | <img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F1B0S1_R0.2_S19.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F8B8S1_R0.15_S15.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F12B12S4_R0.2_S16.png width=105px>|<img src=https://github.com/vipshop/cache-dit/raw/main/assets/DBCACHE_F16B16S4_R0.2_S13.png width=105px>|
|
|
577
589
|
|
|
578
|
-
## 🔥
|
|
590
|
+
## 🔥TaylorSeer Calibrator
|
|
579
591
|
|
|
580
592
|
<div id="taylorseer"></div>
|
|
581
593
|
|
|
@@ -588,17 +600,24 @@ $$
|
|
|
588
600
|
**TaylorSeer** employs a differential method to approximate the higher-order derivatives of features and predict features in future timesteps with Taylor series expansion. The TaylorSeer implemented in cache-dit supports both hidden states and residual cache types. That is $\mathcal{F}\_{\text {pred }, m}\left(x_{t-k}^l\right)$ can be a residual cache or a hidden-state cache.
|
|
589
601
|
|
|
590
602
|
```python
|
|
603
|
+
from cache_dit import BasicCacheConfig, TaylorSeerCalibratorConfig
|
|
604
|
+
|
|
591
605
|
cache_dit.enable_cache(
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
606
|
+
pipe_or_adapter,
|
|
607
|
+
# Basic DBCache w/ FnBn configurations
|
|
608
|
+
cache_config=BasicCacheConfig(
|
|
609
|
+
max_warmup_steps=8, # steps do not cache
|
|
610
|
+
max_cached_steps=-1, # -1 means no limit
|
|
611
|
+
Fn_compute_blocks=8, # Fn, F8, etc.
|
|
612
|
+
Bn_compute_blocks=8, # Bn, B8, etc.
|
|
613
|
+
residual_diff_threshold=0.12,
|
|
614
|
+
),
|
|
615
|
+
# Then, you can use the TaylorSeer Calibrator to approximate
|
|
616
|
+
# the values in cached steps, taylorseer_order default is 1.
|
|
617
|
+
calibrator_config=TaylorSeerCalibratorConfig(
|
|
618
|
+
taylorseer_order=1,
|
|
619
|
+
),
|
|
620
|
+
)
|
|
602
621
|
```
|
|
603
622
|
|
|
604
623
|
> [!Important]
|
|
@@ -622,22 +641,26 @@ cache_dit.enable_cache(
|
|
|
622
641
|
cache-dit supports caching for **CFG (classifier-free guidance)**. For models that fuse CFG and non-CFG into a single forward step, or models that do not include CFG (classifier-free guidance) in the forward step, please set `enable_separate_cfg` param to **False (default, None)**. Otherwise, set it to True. For examples:
|
|
623
642
|
|
|
624
643
|
```python
|
|
644
|
+
from cache_dit import BasicCacheConfig
|
|
645
|
+
|
|
625
646
|
cache_dit.enable_cache(
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
647
|
+
pipe_or_adapter,
|
|
648
|
+
cache_config=BasicCacheConfig(
|
|
649
|
+
...,
|
|
650
|
+
# CFG: classifier free guidance or not
|
|
651
|
+
# For model that fused CFG and non-CFG into single forward step,
|
|
652
|
+
# should set enable_separate_cfg as False. For example, set it as True
|
|
653
|
+
# for Wan 2.1/Qwen-Image and set it as False for FLUX.1, HunyuanVideo,
|
|
654
|
+
# CogVideoX, Mochi, LTXVideo, Allegro, CogView3Plus, EasyAnimate, SD3, etc.
|
|
655
|
+
enable_separate_cfg=True, # Wan 2.1, Qwen-Image, CogView4, Cosmos, SkyReelsV2, etc.
|
|
656
|
+
# Compute cfg forward first or not, default False, namely,
|
|
657
|
+
# 0, 2, 4, ..., -> non-CFG step; 1, 3, 5, ... -> CFG step.
|
|
658
|
+
cfg_compute_first=False,
|
|
659
|
+
# Compute separate diff values for CFG and non-CFG step,
|
|
660
|
+
# default True. If False, we will use the computed diff from
|
|
661
|
+
# current non-CFG transformer step for current CFG step.
|
|
662
|
+
cfg_diff_compute_separate=True,
|
|
663
|
+
),
|
|
641
664
|
)
|
|
642
665
|
```
|
|
643
666
|
|
|
@@ -659,7 +682,7 @@ torch._dynamo.config.recompile_limit = 96 # default is 8
|
|
|
659
682
|
torch._dynamo.config.accumulated_recompile_limit = 2048 # default is 256
|
|
660
683
|
```
|
|
661
684
|
|
|
662
|
-
Please check [perf.py](
|
|
685
|
+
Please check [perf.py](https://github.com/vipshop/cache-dit/raw/main/bench/perf.py) for more details.
|
|
663
686
|
|
|
664
687
|
|
|
665
688
|
## 🛠Metrics CLI
|
|
@@ -676,16 +699,16 @@ from cache_dit.metrics import compute_lpips
|
|
|
676
699
|
from cache_dit.metrics import compute_clip_score
|
|
677
700
|
from cache_dit.metrics import compute_image_reward
|
|
678
701
|
|
|
679
|
-
psnr,
|
|
680
|
-
psnr,
|
|
681
|
-
ssim,
|
|
682
|
-
fid,
|
|
683
|
-
lpips,
|
|
684
|
-
|
|
685
|
-
reward,
|
|
702
|
+
psnr, n = compute_psnr("true.png", "test.png") # Num: n
|
|
703
|
+
psnr, n = compute_psnr("true_dir", "test_dir")
|
|
704
|
+
ssim, n = compute_ssim("true_dir", "test_dir")
|
|
705
|
+
fid, n = compute_fid("true_dir", "test_dir")
|
|
706
|
+
lpips, n = compute_lpips("true_dir", "test_dir")
|
|
707
|
+
clip, n = compute_clip_score("DrawBench200.txt", "test_dir")
|
|
708
|
+
reward, n = compute_image_reward("DrawBench200.txt", "test_dir")
|
|
686
709
|
```
|
|
687
710
|
|
|
688
|
-
|
|
711
|
+
Or, you can use `cache-dit-metrics-cli` tool. For examples:
|
|
689
712
|
|
|
690
713
|
```bash
|
|
691
714
|
cache-dit-metrics-cli -h # show usage
|
|
@@ -697,7 +720,7 @@ cache-dit-metrics-cli all -i1 true_dir -i2 test_dir # image dir
|
|
|
697
720
|
## 👋Contribute
|
|
698
721
|
<div id="contribute"></div>
|
|
699
722
|
|
|
700
|
-
How to contribute? Star ⭐️ this repo to support us or check [CONTRIBUTE.md](
|
|
723
|
+
How to contribute? Star ⭐️ this repo to support us or check [CONTRIBUTE.md](https://github.com/vipshop/cache-dit/raw/main/CONTRIBUTE.md).
|
|
701
724
|
|
|
702
725
|
<div align='center'>
|
|
703
726
|
<a href="https://star-history.com/#vipshop/cache-dit&Date">
|
|
@@ -721,7 +744,7 @@ The **cache-dit** codebase is adapted from FBCache. Over time its codebase diver
|
|
|
721
744
|
|
|
722
745
|
```BibTeX
|
|
723
746
|
@misc{cache-dit@2025,
|
|
724
|
-
title={cache-dit: A Unified and Training-free Cache Acceleration
|
|
747
|
+
title={cache-dit: A Unified, Flexible and Training-free Cache Acceleration Framework for 🤗Diffusers.},
|
|
725
748
|
url={https://github.com/vipshop/cache-dit.git},
|
|
726
749
|
note={Open-source software available at https://github.com/vipshop/cache-dit.git},
|
|
727
750
|
author={vipshop.com},
|
|
@@ -1,26 +1,31 @@
|
|
|
1
|
-
cache_dit/__init__.py,sha256=
|
|
2
|
-
cache_dit/_version.py,sha256=
|
|
1
|
+
cache_dit/__init__.py,sha256=sHRg0swXZZiw6lvSQ53fcVtN9JRayx0az2lXAz5OOGI,1510
|
|
2
|
+
cache_dit/_version.py,sha256=e8NqPtZ8fggRgk3GPrqZ_U_BDV8aSULw1u_Gn9NNbnk,704
|
|
3
3
|
cache_dit/logger.py,sha256=0zsu42hN-3-rgGC_C29ms1IvVpV4_b4_SwJCKSenxBE,4304
|
|
4
|
-
cache_dit/utils.py,sha256=
|
|
4
|
+
cache_dit/utils.py,sha256=AyYRwi5XBxYBH4GaXxOxv9-X24Te_IYOYwh54t_1d3A,10674
|
|
5
5
|
cache_dit/cache_factory/.gitignore,sha256=5Cb-qT9wsTUoMJ7vACDF7ZcLpAXhi5v-xdcWSRit988,23
|
|
6
|
-
cache_dit/cache_factory/__init__.py,sha256=
|
|
7
|
-
cache_dit/cache_factory/
|
|
8
|
-
cache_dit/cache_factory/cache_interface.py,sha256=tHQv7i8Hp6nfbjZWHwDx3nEvCfxLeBw26aMYjyu6nMw,8541
|
|
6
|
+
cache_dit/cache_factory/__init__.py,sha256=vy9I6Ofkj9jWeUoOvh-cY5a9QlDDKfj2FVPlVTf7BeA,1390
|
|
7
|
+
cache_dit/cache_factory/cache_interface.py,sha256=A_8bBsLfGOE5wM3_rniQKPJ223_-fSpNIq65uv00sF0,10620
|
|
9
8
|
cache_dit/cache_factory/cache_types.py,sha256=ooukxQRG55uTLmaZ0SKw6gIeY6SQHhMxkbv55uj2Sqk,991
|
|
10
9
|
cache_dit/cache_factory/forward_pattern.py,sha256=FumlCuZ-TSmSYH0hGBHctSJ-oGLCftdZjLygqhsmdR4,2258
|
|
10
|
+
cache_dit/cache_factory/params_modifier.py,sha256=zYJJsInTYCaYHBZ7mZJOP-PZnkSg3iN1WPewNOayXos,3628
|
|
11
11
|
cache_dit/cache_factory/utils.py,sha256=XkVM9AXcB9zYq8-S8QKAsGz80r3tA6U3lBNGDGeHOe4,1871
|
|
12
12
|
cache_dit/cache_factory/block_adapters/__init__.py,sha256=33geXMz56TxFWMp0c-H4__MY5SGRzKMKj3TXnUYOMlc,17512
|
|
13
|
-
cache_dit/cache_factory/block_adapters/block_adapters.py,sha256=
|
|
13
|
+
cache_dit/cache_factory/block_adapters/block_adapters.py,sha256=jAgzMPTaY4rBuq7DLK2VeEWuYLy7lvw7bZcPY4S93b4,21660
|
|
14
14
|
cache_dit/cache_factory/block_adapters/block_registers.py,sha256=2L7QeM4ygnaKQpC9PoJod0QRYyxidUKU2AYpysDCUwE,2572
|
|
15
|
+
cache_dit/cache_factory/cache_adapters/__init__.py,sha256=py71WGD3JztQ1uk6qdLVbzYcQ1rvqFidNNaQYo7tqTo,79
|
|
16
|
+
cache_dit/cache_factory/cache_adapters/cache_adapter.py,sha256=GrkSz4was9gg_dYkfBobrOQ_eNqipQBqeuFfqcwkCXc,19650
|
|
15
17
|
cache_dit/cache_factory/cache_blocks/__init__.py,sha256=08Ox7kD05lkRKCOsVTdEZeKAWBheqpxfrAT1Nz7eclI,2916
|
|
16
18
|
cache_dit/cache_factory/cache_blocks/pattern_0_1_2.py,sha256=ElMps6_7uI74tSF9GDR_dEI0bZEhdzcepM29xFWnYo8,428
|
|
17
19
|
cache_dit/cache_factory/cache_blocks/pattern_3_4_5.py,sha256=Bv56qETXhsREvCrNvnZpSqDIIHsi6Ze3FJW4Yk2x3uI,8597
|
|
18
20
|
cache_dit/cache_factory/cache_blocks/pattern_base.py,sha256=d4H9kEB0AgnVMT8aF0Y54SUMUQUxw5HQ8gRkoCuTQ_A,14577
|
|
19
21
|
cache_dit/cache_factory/cache_blocks/utils.py,sha256=dGOC1tMMOvcbvEgx44eTESKn_jsv-0RZ3tRHPa3wmQ4,1315
|
|
20
|
-
cache_dit/cache_factory/cache_contexts/__init__.py,sha256=
|
|
21
|
-
cache_dit/cache_factory/cache_contexts/cache_context.py,sha256=
|
|
22
|
-
cache_dit/cache_factory/cache_contexts/cache_manager.py,sha256=
|
|
23
|
-
cache_dit/cache_factory/cache_contexts/
|
|
22
|
+
cache_dit/cache_factory/cache_contexts/__init__.py,sha256=T6Vak3x7Rs0Oy15Tou49p-rPQRA2jiuYtJBsbv1lBBU,388
|
|
23
|
+
cache_dit/cache_factory/cache_contexts/cache_context.py,sha256=3EhaMCz3VUQ_NF81VgYwWoSEGIvhScPxPYhjL1OcgxE,15240
|
|
24
|
+
cache_dit/cache_factory/cache_contexts/cache_manager.py,sha256=hSKAeP1CxmO3RFUxjFjAK1xdvVvTmeayh5jEHMaQXNE,30225
|
|
25
|
+
cache_dit/cache_factory/cache_contexts/calibrators/__init__.py,sha256=mzYXO8tbytGpJJ9rpPu20kMoj1Iu_7Ym9tjfzV8rA98,5574
|
|
26
|
+
cache_dit/cache_factory/cache_contexts/calibrators/base.py,sha256=mn6ZBkChGpGwN5csrHTUGMoX6BBPvqHXSLbIExiW-EU,748
|
|
27
|
+
cache_dit/cache_factory/cache_contexts/calibrators/foca.py,sha256=nhHGs_hxwW1M942BQDMJb9-9IuHdnOxp774Jrna1bJI,891
|
|
28
|
+
cache_dit/cache_factory/cache_contexts/calibrators/taylorseer.py,sha256=aGxr9SpytYznTepDWGPAxWDnuVMSuNyn6uNXnLh2acQ,4001
|
|
24
29
|
cache_dit/cache_factory/patch_functors/__init__.py,sha256=oI6F3N9ezahRHaFUOZ1GfrAw1qFdKrxFXXmlwwehHj4,530
|
|
25
30
|
cache_dit/cache_factory/patch_functors/functor_base.py,sha256=Ahk0fTfrHgNdEl-9JSkACvfyyv9G-Ei5OSz7XBIlX5o,357
|
|
26
31
|
cache_dit/cache_factory/patch_functors/functor_chroma.py,sha256=xD0Q96VArp1vYBLQ0pcjRIyFB1i_Y7muZ2q07Hz8Oqs,13430
|
|
@@ -32,7 +37,7 @@ cache_dit/compile/__init__.py,sha256=FcTVzCeyypl-mxlc59_ehHL3lBNiDAFsXuRoJ-5Cfi0
|
|
|
32
37
|
cache_dit/compile/utils.py,sha256=nN2OIrSdwRR5zGxJinKDqb07pXpvTNTF3g_OgLkeeBU,3858
|
|
33
38
|
cache_dit/custom_ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
39
|
cache_dit/custom_ops/triton_taylorseer.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
35
|
-
cache_dit/metrics/__init__.py,sha256=
|
|
40
|
+
cache_dit/metrics/__init__.py,sha256=UjPJ69DyyjZDfERTpKAjZKOxOTx58aWnkze7VfH3en8,673
|
|
36
41
|
cache_dit/metrics/clip_score.py,sha256=ERNCFQFJKzJdbIX9OAg-1LiSPuXUVHLOFxbf2gcENpc,3938
|
|
37
42
|
cache_dit/metrics/config.py,sha256=ieOgD9ayz722RjVzk24bSIqS2D6o7TZjGk8KeXV-OLQ,551
|
|
38
43
|
cache_dit/metrics/fid.py,sha256=ZM_FM0XERtpnkMUfphmw2aOdljrh1uba-pnYItu0q6M,18219
|
|
@@ -43,9 +48,9 @@ cache_dit/metrics/metrics.py,sha256=7UV-H2NRbhfr6dvrXEzU97Zy-BSQ5zEfm9CKtaK4ldg,
|
|
|
43
48
|
cache_dit/quantize/__init__.py,sha256=kWYoMAyZgBXu9BJlZjTQ0dRffW9GqeeY9_iTkXrb70A,59
|
|
44
49
|
cache_dit/quantize/quantize_ao.py,sha256=Fx1KW4l3gdEkdrcAYtPoDW7WKBJWrs3glOHiEwW_TgE,6160
|
|
45
50
|
cache_dit/quantize/quantize_interface.py,sha256=2s_R7xPSKuJeFpEGeLwRxnq_CqJcBG3a3lzyW5wh-UM,1241
|
|
46
|
-
cache_dit-0.3.
|
|
47
|
-
cache_dit-0.3.
|
|
48
|
-
cache_dit-0.3.
|
|
49
|
-
cache_dit-0.3.
|
|
50
|
-
cache_dit-0.3.
|
|
51
|
-
cache_dit-0.3.
|
|
51
|
+
cache_dit-0.3.2.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
|
|
52
|
+
cache_dit-0.3.2.dist-info/METADATA,sha256=L8vWXW0w9Z4GXVXylKnqmhnfpKJ8YeL0LKIuwLL8HEo,47858
|
|
53
|
+
cache_dit-0.3.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
54
|
+
cache_dit-0.3.2.dist-info/entry_points.txt,sha256=FX2gysXaZx6NeK1iCLMcIdP8Q4_qikkIHtEmi3oWn8o,65
|
|
55
|
+
cache_dit-0.3.2.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
|
|
56
|
+
cache_dit-0.3.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|