cache-dit 1.0.4__py3-none-any.whl → 1.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cache-dit might be problematic. Click here for more details.

cache_dit/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '1.0.4'
32
- __version_tuple__ = version_tuple = (1, 0, 4)
31
+ __version__ = version = '1.0.5'
32
+ __version_tuple__ = version_tuple = (1, 0, 5)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -12,7 +12,10 @@ def flux_adapter(pipe, **kwargs) -> BlockAdapter:
12
12
  from cache_dit.utils import is_diffusers_at_least_0_3_5
13
13
 
14
14
  assert isinstance(pipe.transformer, FluxTransformer2DModel)
15
- if is_diffusers_at_least_0_3_5():
15
+ transformer_cls_name: str = pipe.transformer.__class__.__name__
16
+ if is_diffusers_at_least_0_3_5() and not transformer_cls_name.startswith(
17
+ "Nunchaku"
18
+ ):
16
19
  return BlockAdapter(
17
20
  pipe=pipe,
18
21
  transformer=pipe.transformer,
@@ -1,3 +1,4 @@
1
+ import copy
1
2
  import torch
2
3
  import unittest
3
4
  import functools
@@ -197,7 +198,6 @@ class CachedAdapter:
197
198
  flatten_contexts, contexts_kwargs = cls.modify_context_params(
198
199
  block_adapter, **context_kwargs
199
200
  )
200
-
201
201
  original_call = block_adapter.pipe.__class__.__call__
202
202
 
203
203
  @functools.wraps(original_call)
@@ -238,7 +238,7 @@ class CachedAdapter:
238
238
  block_adapter.unique_blocks_name
239
239
  )
240
240
  contexts_kwargs = [
241
- context_kwargs.copy()
241
+ copy.deepcopy(context_kwargs) # must deep copy
242
242
  for _ in range(
243
243
  len(flatten_contexts),
244
244
  )
@@ -259,9 +259,41 @@ class CachedAdapter:
259
259
  for i in range(
260
260
  min(len(contexts_kwargs), len(flatten_modifiers)),
261
261
  ):
262
- contexts_kwargs[i].update(
263
- flatten_modifiers[i]._context_kwargs,
264
- )
262
+ if "cache_config" in flatten_modifiers[i]._context_kwargs:
263
+ modifier_cache_config = flatten_modifiers[
264
+ i
265
+ ]._context_kwargs.get("cache_config", None)
266
+ modifier_calibrator_config = flatten_modifiers[
267
+ i
268
+ ]._context_kwargs.get("calibrator_config", None)
269
+ if modifier_cache_config is not None:
270
+ assert isinstance(
271
+ modifier_cache_config, BasicCacheConfig
272
+ ), (
273
+ f"cache_config must be BasicCacheConfig, but got "
274
+ f"{type(modifier_cache_config)}."
275
+ )
276
+ contexts_kwargs[i]["cache_config"].update(
277
+ **modifier_cache_config.as_dict()
278
+ )
279
+ if modifier_calibrator_config is not None:
280
+ assert isinstance(
281
+ modifier_calibrator_config, CalibratorConfig
282
+ ), (
283
+ f"calibrator_config must be CalibratorConfig, but got "
284
+ f"{type(modifier_calibrator_config)}."
285
+ )
286
+ if (
287
+ contexts_kwargs[i].get("calibrator_config", None)
288
+ is None
289
+ ):
290
+ contexts_kwargs[i][
291
+ "calibrator_config"
292
+ ] = modifier_calibrator_config
293
+ else:
294
+ contexts_kwargs[i]["calibrator_config"].update(
295
+ **modifier_calibrator_config.as_dict()
296
+ )
265
297
  cls._config_messages(**contexts_kwargs[i])
266
298
 
267
299
  return flatten_contexts, contexts_kwargs
@@ -60,9 +60,25 @@ class BasicCacheConfig:
60
60
  def update(self, **kwargs) -> "BasicCacheConfig":
61
61
  for key, value in kwargs.items():
62
62
  if hasattr(self, key):
63
- setattr(self, key, value)
63
+ if value is not None:
64
+ setattr(self, key, value)
64
65
  return self
65
66
 
67
+ def empty(self, **kwargs) -> "BasicCacheConfig":
68
+ # Set all fields to None
69
+ for field in dataclasses.fields(self):
70
+ if hasattr(self, field.name):
71
+ setattr(self, field.name, None)
72
+ if kwargs:
73
+ self.update(**kwargs)
74
+ return self
75
+
76
+ def reset(self, **kwargs) -> "BasicCacheConfig":
77
+ return self.empty(**kwargs)
78
+
79
+ def as_dict(self) -> dict:
80
+ return dataclasses.asdict(self)
81
+
66
82
  def strify(self) -> str:
67
83
  return (
68
84
  f"{self.cache_type}_"
@@ -45,6 +45,28 @@ class CalibratorConfig:
45
45
  def to_kwargs(self) -> Dict:
46
46
  return self.calibrator_kwargs.copy()
47
47
 
48
+ def as_dict(self) -> dict:
49
+ return dataclasses.asdict(self)
50
+
51
+ def update(self, **kwargs) -> "CalibratorConfig":
52
+ for key, value in kwargs.items():
53
+ if hasattr(self, key):
54
+ if value is not None:
55
+ setattr(self, key, value)
56
+ return self
57
+
58
+ def empty(self, **kwargs) -> "CalibratorConfig":
59
+ # Set all fields to None
60
+ for field in dataclasses.fields(self):
61
+ if hasattr(self, field.name):
62
+ setattr(self, field.name, None)
63
+ if kwargs:
64
+ self.update(**kwargs)
65
+ return self
66
+
67
+ def reset(self, **kwargs) -> "CalibratorConfig":
68
+ return self.empty(**kwargs)
69
+
48
70
 
49
71
  @dataclasses.dataclass
50
72
  class TaylorSeerCalibratorConfig(CalibratorConfig):
@@ -50,12 +50,6 @@ class DBPruneConfig(BasicCacheConfig):
50
50
  # to at least 2 to reduce the VRAM usage of the calibrator.
51
51
  force_reduce_calibrator_vram: bool = False
52
52
 
53
- def update(self, **kwargs) -> "DBPruneConfig":
54
- for key, value in kwargs.items():
55
- if hasattr(self, key):
56
- setattr(self, key, value)
57
- return self
58
-
59
53
  def strify(self) -> str:
60
54
  return (
61
55
  f"{self.cache_type}_"
@@ -83,11 +83,18 @@ def quantize_ao(
83
83
  def _quantization_fn():
84
84
  try:
85
85
  if quant_type == "fp8_w8a8_dq":
86
- from torchao.quantization import (
87
- float8_dynamic_activation_float8_weight,
88
- PerTensor,
89
- PerRow,
90
- )
86
+ try:
87
+ from torchao.quantization import (
88
+ float8_dynamic_activation_float8_weight,
89
+ PerTensor,
90
+ PerRow,
91
+ )
92
+ except ImportError:
93
+ from torchao.quantization import (
94
+ Float8DynamicActivationFloat8WeightConfig as float8_dynamic_activation_float8_weight,
95
+ PerTensor,
96
+ PerRow,
97
+ )
91
98
 
92
99
  if per_row: # Ensure bfloat16
93
100
  module.to(torch.bfloat16)
@@ -109,7 +116,12 @@ def quantize_ao(
109
116
  )
110
117
 
111
118
  elif quant_type == "fp8_w8a16_wo":
112
- from torchao.quantization import float8_weight_only
119
+ try:
120
+ from torchao.quantization import float8_weight_only
121
+ except ImportError:
122
+ from torchao.quantization import (
123
+ Float8WeightOnlyConfig as float8_weight_only,
124
+ )
113
125
 
114
126
  quantization_fn = float8_weight_only(
115
127
  weight_dtype=kwargs.get(
@@ -119,14 +131,25 @@ def quantize_ao(
119
131
  )
120
132
 
121
133
  elif quant_type == "int8_w8a8_dq":
122
- from torchao.quantization import (
123
- int8_dynamic_activation_int8_weight,
124
- )
134
+ try:
135
+ from torchao.quantization import (
136
+ int8_dynamic_activation_int8_weight,
137
+ )
138
+ except ImportError:
139
+ from torchao.quantization import (
140
+ Int8DynamicActivationInt8WeightConfig as int8_dynamic_activation_int8_weight,
141
+ )
125
142
 
126
143
  quantization_fn = int8_dynamic_activation_int8_weight()
127
144
 
128
145
  elif quant_type == "int8_w8a16_wo":
129
- from torchao.quantization import int8_weight_only
146
+
147
+ try:
148
+ from torchao.quantization import int8_weight_only
149
+ except ImportError:
150
+ from torchao.quantization import (
151
+ Int8WeightOnlyConfig as int8_weight_only,
152
+ )
130
153
 
131
154
  quantization_fn = int8_weight_only(
132
155
  # group_size is None -> per_channel, else per group
@@ -134,23 +157,41 @@ def quantize_ao(
134
157
  )
135
158
 
136
159
  elif quant_type == "int4_w4a8_dq":
137
- from torchao.quantization import (
138
- int8_dynamic_activation_int4_weight,
139
- )
160
+
161
+ try:
162
+ from torchao.quantization import (
163
+ int8_dynamic_activation_int4_weight,
164
+ )
165
+ except ImportError:
166
+ from torchao.quantization import (
167
+ Int8DynamicActivationInt4WeightConfig as int8_dynamic_activation_int4_weight,
168
+ )
140
169
 
141
170
  quantization_fn = int8_dynamic_activation_int4_weight(
142
171
  group_size=kwargs.get("group_size", 32),
143
172
  )
144
173
 
145
174
  elif quant_type == "int4_w4a4_dq":
146
- from torchao.quantization import (
147
- int4_dynamic_activation_int4_weight,
148
- )
175
+
176
+ try:
177
+ from torchao.quantization import (
178
+ int4_dynamic_activation_int4_weight,
179
+ )
180
+ except ImportError:
181
+ from torchao.quantization import (
182
+ Int4DynamicActivationInt4WeightConfig as int4_dynamic_activation_int4_weight,
183
+ )
149
184
 
150
185
  quantization_fn = int4_dynamic_activation_int4_weight()
151
186
 
152
187
  elif quant_type == "int4_w4a16_wo":
153
- from torchao.quantization import int4_weight_only
188
+
189
+ try:
190
+ from torchao.quantization import int4_weight_only
191
+ except ImportError:
192
+ from torchao.quantization import (
193
+ Int4WeightOnlyConfig as int4_weight_only,
194
+ )
154
195
 
155
196
  quantization_fn = int4_weight_only(
156
197
  group_size=kwargs.get("group_size", 32),
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cache_dit
3
- Version: 1.0.4
3
+ Version: 1.0.5
4
4
  Summary: A Unified, Flexible and Training-free Cache Acceleration Framework for 🤗Diffusers.
5
5
  Author: DefTruth, vipshop.com, etc.
6
6
  Maintainer: DefTruth, vipshop.com, etc
@@ -194,7 +194,7 @@ You can install the stable release of cache-dit from PyPI, or the latest develop
194
194
  - **[🎉Easy New Model Integration](./docs/User_Guide.md#automatic-block-adapter)**: Features like **Unified Cache APIs**, **Forward Pattern Matching**, **Automatic Block Adapter**, **Hybrid Forward Pattern**, and **Patch Functor** make it highly functional and flexible. For example, we achieved 🎉 Day 1 support for [HunyuanImage-2.1](https://github.com/Tencent-Hunyuan/HunyuanImage-2.1) with 1.7x speedup w/o precision loss—even before it was available in the Diffusers library.
195
195
  - **[🎉State-of-the-Art Performance](./bench/)**: Compared with algorithms including Δ-DiT, Chipmunk, FORA, DuCa, TaylorSeer and FoCa, cache-dit achieved the **SOTA** performance w/ **7.4x↑🎉** speedup on ClipScore!
196
196
  - **[🎉Support for 4/8-Steps Distilled Models](./bench/)**: Surprisingly, cache-dit's **DBCache** works for extremely few-step distilled models—something many other methods fail to do.
197
- - **[🎉Compatibility with Other Optimizations](./docs/User_Guide.md#️torch-compile)**: Designed to work seamlessly with torch.compile, model CPU offload, sequential CPU offload, group offloading, etc.
197
+ - **[🎉Compatibility with Other Optimizations](./docs/User_Guide.md#️torch-compile)**: Designed to work seamlessly with torch.compile, model CPU offload, sequential CPU offload, group offloading, Quantization(**[torchao](./examples/quantize/)**, **[🔥nunchaku](./examples/quantize/)**), etc.
198
198
  - **[🎉Hybrid Cache Acceleration](./docs/User_Guide.md#taylorseer-calibrator)**: Now supports hybrid **Block-wise Cache + Calibrator** schemes (e.g., DBCache or DBPrune + TaylorSeerCalibrator). DBCache or DBPrune acts as the **Indicator** to decide *when* to cache, while the Calibrator decides *how* to cache. More mainstream cache acceleration algorithms (e.g., FoCa) will be supported in the future, along with additional benchmarks—stay tuned for updates!
199
199
  - **[🤗Diffusers Ecosystem Integration](https://huggingface.co/docs/diffusers/main/en/optimization/cache_dit)**: 🔥**cache-dit** has joined the Diffusers community ecosystem as the **first** DiT-specific cache acceleration framework! Check out the documentation here: <a href="https://huggingface.co/docs/diffusers/main/en/optimization/cache_dit"><img src=https://img.shields.io/badge/🤗Diffusers-ecosystem-yellow.svg ></a>
200
200
 
@@ -202,6 +202,7 @@ You can install the stable release of cache-dit from PyPI, or the latest develop
202
202
 
203
203
  ## 🔥Important News
204
204
 
205
+ - 2025.10.15: 🎉cache-dit now supported [**🔥nunchaku**](https://github.com/nunchaku-tech/nunchaku): Qwen-Image/FLUX.1 [4-bits examples](./examples/quantize/)
205
206
  - 2025.10.13: 🎉cache-dit achieved the **SOTA** performance w/ **7.4x↑🎉** speedup on ClipScore!
206
207
  - 2025.10.10: 🔥[**Qwen-Image-ControlNet-Inpainting**](https://huggingface.co/InstantX/Qwen-Image-ControlNet-Inpainting) **2.3x↑🎉** speedup! Check the [example](https://github.com/vipshop/cache-dit/blob/main/examples/pipeline/run_qwen_image_controlnet_inpaint.py).
207
208
  - 2025.09.26: 🔥[**Qwen-Image-Edit-Plus(2509)**](https://github.com/QwenLM/Qwen-Image) **2.1x↑🎉** speedup! Please check the [example](https://github.com/vipshop/cache-dit/blob/main/examples/pipeline/run_qwen_image_edit_plus.py).
@@ -1,5 +1,5 @@
1
1
  cache_dit/__init__.py,sha256=JQLxwr5aqoMFp-BNR58J0i6NutbRmNXKsaRJKCZQDCg,1638
2
- cache_dit/_version.py,sha256=jp1Oow7okdi1HqeKIp8SmyysmUf-oq2X9syICfrgATI,704
2
+ cache_dit/_version.py,sha256=uYAmg6HpJuEIdeeqRyfS9YeZSA-4pXIPoW7JKBe0Uvw,704
3
3
  cache_dit/logger.py,sha256=0zsu42hN-3-rgGC_C29ms1IvVpV4_b4_SwJCKSenxBE,4304
4
4
  cache_dit/utils.py,sha256=0YNFr84pxYoHOCZvnONKKXYN3PZY4kao9Tq2yEfHHR8,16986
5
5
  cache_dit/cache_factory/.gitignore,sha256=5Cb-qT9wsTUoMJ7vACDF7ZcLpAXhi5v-xdcWSRit988,23
@@ -9,11 +9,11 @@ cache_dit/cache_factory/cache_types.py,sha256=QnWfaS52UOXQtnoCUOwwz4ziY0dyBta6vQ
9
9
  cache_dit/cache_factory/forward_pattern.py,sha256=FumlCuZ-TSmSYH0hGBHctSJ-oGLCftdZjLygqhsmdR4,2258
10
10
  cache_dit/cache_factory/params_modifier.py,sha256=2T98IbepAolWW6GwQsqUDsRzu0k65vo7BOrN3V8mKog,3606
11
11
  cache_dit/cache_factory/utils.py,sha256=S3SD6Zhexzhkqnmfo830v6oNLm8stZe32nF4VdxD_bA,2497
12
- cache_dit/cache_factory/block_adapters/__init__.py,sha256=vM3aDMzPY79Tw4L0hlV2PdA3MFYomnf0eo0BGBo9P78,18087
12
+ cache_dit/cache_factory/block_adapters/__init__.py,sha256=zs-cYacRL_hWlhUXmKc0TZNDAKzzWuznvHeuDpAmuwc,18221
13
13
  cache_dit/cache_factory/block_adapters/block_adapters.py,sha256=2TVK_KqiYXC7AKZ2s07fzdOzUoeUBc9P1SzQtLVzhf4,22249
14
14
  cache_dit/cache_factory/block_adapters/block_registers.py,sha256=2L7QeM4ygnaKQpC9PoJod0QRYyxidUKU2AYpysDCUwE,2572
15
15
  cache_dit/cache_factory/cache_adapters/__init__.py,sha256=py71WGD3JztQ1uk6qdLVbzYcQ1rvqFidNNaQYo7tqTo,79
16
- cache_dit/cache_factory/cache_adapters/cache_adapter.py,sha256=Za9HixVkEKldYzyDA57xvF91fm9dao2S-Fz5QBIT02M,22123
16
+ cache_dit/cache_factory/cache_adapters/cache_adapter.py,sha256=-KTKukcNaVYk92np-QnGJ-EJbTdRQplqIHtqEORPiAo,23745
17
17
  cache_dit/cache_factory/cache_blocks/__init__.py,sha256=cpxzmDcUhbXcReHqaKSnWyEEbIg1H91Pz5hE3z9Xj3k,9984
18
18
  cache_dit/cache_factory/cache_blocks/offload_utils.py,sha256=wusgcqaCrwEjvv7Guy-6VXhNOgPPUrBV2sSVuRmGuvo,3513
19
19
  cache_dit/cache_factory/cache_blocks/pattern_0_1_2.py,sha256=j4bTafqU5DLQhzP_X5XwOk-QUVLWkGrX-Q6JZvBGHh0,666
@@ -21,14 +21,14 @@ cache_dit/cache_factory/cache_blocks/pattern_3_4_5.py,sha256=2qPnXVZwpQIm2oJ-Yrn
21
21
  cache_dit/cache_factory/cache_blocks/pattern_base.py,sha256=9H87qBRpa6UWRkUKXLVO0_9NJgxCVKkFSzaQxM9YPw8,25487
22
22
  cache_dit/cache_factory/cache_blocks/pattern_utils.py,sha256=qOxoVTlYPQzPMrR06-7_Ce_lwNg6n5pt1KQrvxzAJhE,3124
23
23
  cache_dit/cache_factory/cache_contexts/__init__.py,sha256=7uY8fX9uhpC71VNm1HH4aDIicYn-dD3kRpPQhvc9-EI,853
24
- cache_dit/cache_factory/cache_contexts/cache_config.py,sha256=WBHU2XVuYSFUSkrrJk8c4952LTeqvgetdkdtch_uSmg,5238
24
+ cache_dit/cache_factory/cache_contexts/cache_config.py,sha256=G0PVWgckDqeyARc72Ne_0lRtO_LftsOeMERRhbh2gCA,5739
25
25
  cache_dit/cache_factory/cache_contexts/cache_context.py,sha256=fjZMEHaT1DZvUKnzY41GP0Ep8tmPEZTOsCSvG-5it5k,11269
26
26
  cache_dit/cache_factory/cache_contexts/cache_manager.py,sha256=tKtP35GDwZDoxGrQ_Okg_enlh3L-t-iqpytx8TFO_fw,30519
27
27
  cache_dit/cache_factory/cache_contexts/context_manager.py,sha256=j5zP_kwZAKla3EXbfr6JKI1vIxZuUEbZVhAPrtC4COw,853
28
- cache_dit/cache_factory/cache_contexts/prune_config.py,sha256=efFO_tu6AFJxIDp0OxExWKPzOFj95-NSrLGXggimBMA,3407
28
+ cache_dit/cache_factory/cache_contexts/prune_config.py,sha256=WMTh6zb480a0oJiYMlgI0cwCsDSVvs6UjyeJLiXbjP8,3216
29
29
  cache_dit/cache_factory/cache_contexts/prune_context.py,sha256=ywiT9P0w_GjIFLowzUDa6jhTohNsSGfTbanZcs9wMic,6359
30
30
  cache_dit/cache_factory/cache_contexts/prune_manager.py,sha256=rZG7HD9ATqgH4VZdMq1XtP_h2pokaotFOVx1svB3J7E,5478
31
- cache_dit/cache_factory/cache_contexts/calibrators/__init__.py,sha256=mzYXO8tbytGpJJ9rpPu20kMoj1Iu_7Ym9tjfzV8rA98,5574
31
+ cache_dit/cache_factory/cache_contexts/calibrators/__init__.py,sha256=QTbyT8xcFEjfIp9xjbnsnlnVCNvMjUc20NjB0W-s95k,6269
32
32
  cache_dit/cache_factory/cache_contexts/calibrators/base.py,sha256=mn6ZBkChGpGwN5csrHTUGMoX6BBPvqHXSLbIExiW-EU,748
33
33
  cache_dit/cache_factory/cache_contexts/calibrators/foca.py,sha256=nhHGs_hxwW1M942BQDMJb9-9IuHdnOxp774Jrna1bJI,891
34
34
  cache_dit/cache_factory/cache_contexts/calibrators/taylorseer.py,sha256=l1QSNaBwtGtpZZFAgCE7Hu8Nf1oL4QAcYu7lShpFGyw,5850
@@ -53,11 +53,11 @@ cache_dit/metrics/inception.py,sha256=pBVe2X6ylLPIXTG4-GWDM9DWnCviMJbJ45R3ulhktR
53
53
  cache_dit/metrics/lpips.py,sha256=hrHrmdM-f2B4TKDs0xLqJO5JFaYcCjq2qNIR8oCrVkc,811
54
54
  cache_dit/metrics/metrics.py,sha256=AZbQyoavE-djvyRUZ_EfCIrWSQbiWQFo7n2dhn7XptE,40466
55
55
  cache_dit/quantize/__init__.py,sha256=kWYoMAyZgBXu9BJlZjTQ0dRffW9GqeeY9_iTkXrb70A,59
56
- cache_dit/quantize/quantize_ao.py,sha256=Pr3u3Qr6qLvFkd8k-_rfcz4Mkjlg36U9BHG2t6Bl-6M,6301
56
+ cache_dit/quantize/quantize_ao.py,sha256=LlkKh2uAtLihNRXHWIVggYR7ls12ak_VVLCY8trp2LY,7996
57
57
  cache_dit/quantize/quantize_interface.py,sha256=2s_R7xPSKuJeFpEGeLwRxnq_CqJcBG3a3lzyW5wh-UM,1241
58
- cache_dit-1.0.4.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
59
- cache_dit-1.0.4.dist-info/METADATA,sha256=f04uCgApjgfHTC7Ll9aPejXCFFXbFTpTy-rjd5I_iwM,28376
60
- cache_dit-1.0.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
- cache_dit-1.0.4.dist-info/entry_points.txt,sha256=FX2gysXaZx6NeK1iCLMcIdP8Q4_qikkIHtEmi3oWn8o,65
62
- cache_dit-1.0.4.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
63
- cache_dit-1.0.4.dist-info/RECORD,,
58
+ cache_dit-1.0.5.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
59
+ cache_dit-1.0.5.dist-info/METADATA,sha256=qalYkFx9Y0cSmoBqHx-ZmI2RdVxcFhuNkWZVs7laExI,28632
60
+ cache_dit-1.0.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
+ cache_dit-1.0.5.dist-info/entry_points.txt,sha256=FX2gysXaZx6NeK1iCLMcIdP8Q4_qikkIHtEmi3oWn8o,65
62
+ cache_dit-1.0.5.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
63
+ cache_dit-1.0.5.dist-info/RECORD,,