cache-dit 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cache-dit might be problematic. Click here for more details.

cache_dit/_version.py CHANGED
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '0.2.0'
21
- __version_tuple__ = version_tuple = (0, 2, 0)
20
+ __version__ = version = '0.2.1'
21
+ __version_tuple__ = version_tuple = (0, 2, 1)
@@ -9,6 +9,7 @@ from typing import Any, DefaultDict, Dict, List, Optional, Union
9
9
  import torch
10
10
 
11
11
  import cache_dit.primitives as DP
12
+ from cache_dit.cache_factory.taylorseer import TaylorSeer
12
13
  from cache_dit.logger import init_logger
13
14
 
14
15
  logger = init_logger(__name__)
@@ -60,6 +61,18 @@ class DBCacheContext:
60
61
  residual_diffs: DefaultDict[str, float] = dataclasses.field(
61
62
  default_factory=lambda: defaultdict(float),
62
63
  )
64
+ # TODO: Support TaylorSeers and SLG in Dual Block Cache
65
+ # TaylorSeers:
66
+ # Title: From Reusing to Forecasting: Accelerating Diffusion Models with TaylorSeers
67
+ # Url: https://arxiv.org/pdf/2503.06923
68
+ taylorseer: Optional[TaylorSeer] = None
69
+ alter_taylorseer: Optional[TaylorSeer] = None
70
+
71
+ # Skip Layer Guidance, SLG
72
+ # https://github.com/huggingface/candle/issues/2588
73
+ slg_layers: Optional[List[int]] = None
74
+ slg_start: float = 0.0
75
+ slg_end: float = 0.1
63
76
 
64
77
  def get_incremental_name(self, name=None):
65
78
  if name is None:
@@ -700,7 +713,7 @@ class DBCachedTransformerBlocks(torch.nn.Module):
700
713
  encoder_hidden_states,
701
714
  hidden_states_residual,
702
715
  encoder_hidden_states_residual,
703
- ) = self.call_MN2n_transformer_blocks( # middle
716
+ ) = self.call_Mn_transformer_blocks( # middle
704
717
  hidden_states,
705
718
  encoder_hidden_states,
706
719
  *args,
@@ -772,32 +785,32 @@ class DBCachedTransformerBlocks(torch.nn.Module):
772
785
  return selected_Fn_transformer_blocks
773
786
 
774
787
  @torch.compiler.disable
775
- def _MN2n_single_transformer_blocks(self): # middle
788
+ def _Mn_single_transformer_blocks(self): # middle blocks
776
789
  # M(N-2n): transformer_blocks [n,...] + single_transformer_blocks [0,...,N-n]
777
- selected_MN2n_single_transformer_blocks = []
790
+ selected_Mn_single_transformer_blocks = []
778
791
  if self.single_transformer_blocks is not None:
779
792
  if Bn_compute_blocks() == 0: # WARN: x[:-0] = []
780
- selected_MN2n_single_transformer_blocks = (
793
+ selected_Mn_single_transformer_blocks = (
781
794
  self.single_transformer_blocks
782
795
  )
783
796
  else:
784
- selected_MN2n_single_transformer_blocks = (
797
+ selected_Mn_single_transformer_blocks = (
785
798
  self.single_transformer_blocks[: -Bn_compute_blocks()]
786
799
  )
787
- return selected_MN2n_single_transformer_blocks
800
+ return selected_Mn_single_transformer_blocks
788
801
 
789
802
  @torch.compiler.disable
790
- def _MN2n_transformer_blocks(self):
803
+ def _Mn_transformer_blocks(self): # middle blocks
791
804
  # M(N-2n): only transformer_blocks [n,...,N-n], middle
792
805
  if Bn_compute_blocks() == 0: # WARN: x[:-0] = []
793
- selected_MN2n_transformer_blocks = self.transformer_blocks[
806
+ selected_Mn_transformer_blocks = self.transformer_blocks[
794
807
  Fn_compute_blocks() :
795
808
  ]
796
809
  else:
797
- selected_MN2n_transformer_blocks = self.transformer_blocks[
810
+ selected_Mn_transformer_blocks = self.transformer_blocks[
798
811
  Fn_compute_blocks() : -Bn_compute_blocks()
799
812
  ]
800
- return selected_MN2n_transformer_blocks
813
+ return selected_Mn_transformer_blocks
801
814
 
802
815
  @torch.compiler.disable
803
816
  def _Bn_single_transformer_blocks(self):
@@ -845,7 +858,7 @@ class DBCachedTransformerBlocks(torch.nn.Module):
845
858
 
846
859
  return hidden_states, encoder_hidden_states
847
860
 
848
- def call_MN2n_transformer_blocks(
861
+ def call_Mn_transformer_blocks(
849
862
  self,
850
863
  hidden_states: torch.Tensor,
851
864
  encoder_hidden_states: torch.Tensor,
@@ -873,7 +886,7 @@ class DBCachedTransformerBlocks(torch.nn.Module):
873
886
  hidden_states = torch.cat(
874
887
  [encoder_hidden_states, hidden_states], dim=1
875
888
  )
876
- for block in self._MN2n_single_transformer_blocks():
889
+ for block in self._Mn_single_transformer_blocks():
877
890
  hidden_states = block(
878
891
  hidden_states,
879
892
  *args,
@@ -887,7 +900,7 @@ class DBCachedTransformerBlocks(torch.nn.Module):
887
900
  dim=1,
888
901
  )
889
902
  else:
890
- for block in self._MN2n_transformer_blocks():
903
+ for block in self._Mn_transformer_blocks():
891
904
  hidden_states = block(
892
905
  hidden_states,
893
906
  encoder_hidden_states,
@@ -1016,7 +1029,9 @@ class DBCachedTransformerBlocks(torch.nn.Module):
1016
1029
 
1017
1030
  def _compute_and_cache_single_transformer_block(
1018
1031
  self,
1019
- i: int, # Block index in the transformer blocks
1032
+ # Block index in the transformer blocks
1033
+ # Bn: 8, block_id should be in [0, 8)
1034
+ block_id: int,
1020
1035
  # Helper inputs for hidden states split and reshape
1021
1036
  original_hidden_states: torch.Tensor,
1022
1037
  original_encoder_hidden_states: torch.Tensor,
@@ -1042,7 +1057,7 @@ class DBCachedTransformerBlocks(torch.nn.Module):
1042
1057
  )
1043
1058
  # Cache residuals for the non-compute Bn blocks for
1044
1059
  # subsequent cache steps.
1045
- if i not in Bn_compute_blocks_ids():
1060
+ if block_id not in Bn_compute_blocks_ids():
1046
1061
  Bn_i_hidden_states = hidden_states
1047
1062
  (
1048
1063
  Bn_i_hidden_states_residual,
@@ -1057,16 +1072,16 @@ class DBCachedTransformerBlocks(torch.nn.Module):
1057
1072
  # Save original_hidden_states for diff calculation.
1058
1073
  set_Bn_buffer(
1059
1074
  Bn_i_original_hidden_states,
1060
- prefix=f"Bn_{i}_single_original",
1075
+ prefix=f"Bn_{block_id}_single_original",
1061
1076
  )
1062
1077
 
1063
1078
  set_Bn_buffer(
1064
1079
  Bn_i_hidden_states_residual,
1065
- prefix=f"Bn_{i}_single_residual",
1080
+ prefix=f"Bn_{block_id}_single_residual",
1066
1081
  )
1067
1082
  set_Bn_encoder_buffer(
1068
1083
  Bn_i_encoder_hidden_states_residual,
1069
- prefix=f"Bn_{i}_single_residual",
1084
+ prefix=f"Bn_{block_id}_single_residual",
1070
1085
  )
1071
1086
  del Bn_i_hidden_states
1072
1087
  del Bn_i_hidden_states_residual
@@ -1077,7 +1092,7 @@ class DBCachedTransformerBlocks(torch.nn.Module):
1077
1092
  else:
1078
1093
  # Cache steps: Reuse the cached residuals.
1079
1094
  # Check if the block is in the Bn_compute_blocks_ids.
1080
- if i in Bn_compute_blocks_ids():
1095
+ if block_id in Bn_compute_blocks_ids():
1081
1096
  hidden_states = block(
1082
1097
  hidden_states,
1083
1098
  *args,
@@ -1091,7 +1106,7 @@ class DBCachedTransformerBlocks(torch.nn.Module):
1091
1106
  hidden_states, # curr step
1092
1107
  parallelized=self._is_parallelized(),
1093
1108
  threshold=non_compute_blocks_diff_threshold(),
1094
- prefix=f"Bn_{i}_single_original", # prev step
1109
+ prefix=f"Bn_{block_id}_single_original", # prev step
1095
1110
  ):
1096
1111
  Bn_i_original_hidden_states = hidden_states
1097
1112
  (
@@ -1106,7 +1121,7 @@ class DBCachedTransformerBlocks(torch.nn.Module):
1106
1121
  apply_hidden_states_residual(
1107
1122
  Bn_i_original_hidden_states,
1108
1123
  Bn_i_original_encoder_hidden_states,
1109
- prefix=f"Bn_{i}_single_residual",
1124
+ prefix=f"Bn_{block_id}_single_residual",
1110
1125
  )
1111
1126
  )
1112
1127
  hidden_states = torch.cat(
@@ -1125,7 +1140,9 @@ class DBCachedTransformerBlocks(torch.nn.Module):
1125
1140
 
1126
1141
  def _compute_and_cache_transformer_block(
1127
1142
  self,
1128
- i: int, # Block index in the transformer blocks
1143
+ # Block index in the transformer blocks
1144
+ # Bn: 8, block_id should be in [0, 8)
1145
+ block_id: int,
1129
1146
  # Below are the inputs to the block
1130
1147
  block, # The transformer block to be executed
1131
1148
  hidden_states: torch.Tensor,
@@ -1158,7 +1175,7 @@ class DBCachedTransformerBlocks(torch.nn.Module):
1158
1175
  )
1159
1176
  # Cache residuals for the non-compute Bn blocks for
1160
1177
  # subsequent cache steps.
1161
- if i not in Bn_compute_blocks_ids():
1178
+ if block_id not in Bn_compute_blocks_ids():
1162
1179
  Bn_i_hidden_states_residual = (
1163
1180
  hidden_states - Bn_i_original_hidden_states
1164
1181
  )
@@ -1169,16 +1186,16 @@ class DBCachedTransformerBlocks(torch.nn.Module):
1169
1186
  # Save original_hidden_states for diff calculation.
1170
1187
  set_Bn_buffer(
1171
1188
  Bn_i_original_hidden_states,
1172
- prefix=f"Bn_{i}_original",
1189
+ prefix=f"Bn_{block_id}_original",
1173
1190
  )
1174
1191
 
1175
1192
  set_Bn_buffer(
1176
1193
  Bn_i_hidden_states_residual,
1177
- prefix=f"Bn_{i}_residual",
1194
+ prefix=f"Bn_{block_id}_residual",
1178
1195
  )
1179
1196
  set_Bn_encoder_buffer(
1180
1197
  Bn_i_encoder_hidden_states_residual,
1181
- prefix=f"Bn_{i}_residual",
1198
+ prefix=f"Bn_{block_id}_residual",
1182
1199
  )
1183
1200
  del Bn_i_hidden_states_residual
1184
1201
  del Bn_i_encoder_hidden_states_residual
@@ -1189,7 +1206,7 @@ class DBCachedTransformerBlocks(torch.nn.Module):
1189
1206
  else:
1190
1207
  # Cache steps: Reuse the cached residuals.
1191
1208
  # Check if the block is in the Bn_compute_blocks_ids.
1192
- if i in Bn_compute_blocks_ids():
1209
+ if block_id in Bn_compute_blocks_ids():
1193
1210
  hidden_states = block(
1194
1211
  hidden_states,
1195
1212
  encoder_hidden_states,
@@ -1211,13 +1228,13 @@ class DBCachedTransformerBlocks(torch.nn.Module):
1211
1228
  hidden_states, # curr step
1212
1229
  parallelized=self._is_parallelized(),
1213
1230
  threshold=non_compute_blocks_diff_threshold(),
1214
- prefix=f"Bn_{i}_original", # prev step
1231
+ prefix=f"Bn_{block_id}_original", # prev step
1215
1232
  ):
1216
1233
  hidden_states, encoder_hidden_states = (
1217
1234
  apply_hidden_states_residual(
1218
1235
  hidden_states,
1219
1236
  encoder_hidden_states,
1220
- prefix=f"Bn_{i}_residual",
1237
+ prefix=f"Bn_{block_id}_residual",
1221
1238
  )
1222
1239
  )
1223
1240
  else:
@@ -56,7 +56,7 @@ def apply_cache_on_pipe(
56
56
  shallow_patch: bool = False,
57
57
  residual_diff_threshold=0.03,
58
58
  downsample_factor=1,
59
- # SLG is not supported in WAN with DBCache yet
59
+ # SLG is not supported in WAN with DBPrune yet
60
60
  # slg_layers=None,
61
61
  # slg_start: float = 0.0,
62
62
  # slg_end: float = 0.1,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cache_dit
3
- Version: 0.2.0
3
+ Version: 0.2.1
4
4
  Summary: 🤗 CacheDiT: A Training-free and Easy-to-use Cache Acceleration Toolbox for Diffusion Transformers
5
5
  Author: DefTruth, vipshop.com, etc.
6
6
  Maintainer: DefTruth, vipshop.com, etc
@@ -44,20 +44,11 @@ Dynamic: requires-python
44
44
  <img src=https://img.shields.io/badge/PyPI-pass-brightgreen.svg >
45
45
  <img src=https://static.pepy.tech/badge/cache-dit >
46
46
  <img src=https://img.shields.io/badge/Python-3.10|3.11|3.12-9cf.svg >
47
- <img src=https://img.shields.io/badge/Release-v0.2.0-brightgreen.svg >
47
+ <img src=https://img.shields.io/badge/Release-v0.2.1-brightgreen.svg >
48
48
  </div>
49
49
  <p align="center">
50
50
  DeepCache is for UNet not DiT. Most DiT cache speedups are complex and not training-free. CacheDiT <br>offers a set of training-free cache accelerators for DiT: 🔥DBCache, DBPrune, FBCache, etc🔥
51
51
  </p>
52
- <p align="center">
53
- <h4> 🔥Supported Models🔥</h4>
54
- <a href=https://github.com/vipshop/cache-dit/raw/main/examples> <b>🚀FLUX.1</b>: ✔️DBCache, ✔️DBPrune, ✔️FBCache🔥</a> <br>
55
- <a href=https://github.com/vipshop/cache-dit/raw/main/examples> <b>🚀Mochi</b>: ✔️DBCache, ✔️DBPrune, ✔️FBCache🔥</a> <br>
56
- <a href=https://github.com/vipshop/cache-dit/raw/main/examples> <b>🚀CogVideoX</b>: ✔️DBCache, ✔️DBPrune, ✔️FBCache🔥</a> <br>
57
- <a href=https://github.com/vipshop/cache-dit/raw/main/examples> <b>🚀CogVideoX1.5</b>: ✔️DBCache, ✔️DBPrune, ✔️FBCache🔥</a> <br>
58
- <a href=https://github.com/vipshop/cache-dit/raw/main/examples> <b>🚀Wan2.1</b>: ✔️DBCache, ✔️DBPrune, ✔️FBCache🔥</a> <br>
59
- <a href=https://github.com/vipshop/cache-dit/raw/main/examples> <b>🚀HunyuanVideo</b>: ✔️DBCache, ✔️DBPrune, ✔️FBCache🔥</a> <br>
60
- </p>
61
52
  </div>
62
53
 
63
54
  ## 👋 Highlight
@@ -160,6 +151,7 @@ Moreover, **CacheDiT** are **plug-and-play** solutions that works hand-in-hand w
160
151
  <div id="contents"></div>
161
152
 
162
153
  - [⚙️Installation](#️installation)
154
+ - [🔥Supported Models](#supported)
163
155
  - [⚡️Dual Block Cache](#dbcache)
164
156
  - [🎉First Block Cache](#fbcache)
165
157
  - [⚡️Dynamic Block Prune](#dbprune)
@@ -183,6 +175,30 @@ Or you can install the latest develop version from GitHub:
183
175
  pip3 install git+https://github.com/vipshop/cache-dit.git
184
176
  ```
185
177
 
178
+ ## 🔥Supported Models
179
+
180
+ <div id="supported"></div>
181
+
182
+ - [🚀FLUX.1](https://github.com/vipshop/cache-dit/raw/main/examples)
183
+ - [🚀Mochi](https://github.com/vipshop/cache-dit/raw/main/examples)
184
+ - [🚀CogVideoX](https://github.com/vipshop/cache-dit/raw/main/examples)
185
+ - [🚀CogVideoX1.5](https://github.com/vipshop/cache-dit/raw/main/examples)
186
+ - [🚀Wan2.1](https://github.com/vipshop/cache-dit/raw/main/examples)
187
+ - [🚀HunyuanVideo](https://github.com/vipshop/cache-dit/raw/main/examples)
188
+
189
+
190
+ <!--
191
+ <p align="center">
192
+ <h4> 🔥Supported Models🔥</h4>
193
+ <a href=https://github.com/vipshop/cache-dit/raw/main/examples> <b>🚀FLUX.1</b>: ✔️DBCache, ✔️DBPrune, ✔️FBCache🔥</a> <br>
194
+ <a href=https://github.com/vipshop/cache-dit/raw/main/examples> <b>🚀Mochi</b>: ✔️DBCache, ✔️DBPrune, ✔️FBCache🔥</a> <br>
195
+ <a href=https://github.com/vipshop/cache-dit/raw/main/examples> <b>🚀CogVideoX</b>: ✔️DBCache, ✔️DBPrune, ✔️FBCache🔥</a> <br>
196
+ <a href=https://github.com/vipshop/cache-dit/raw/main/examples> <b>🚀CogVideoX1.5</b>: ✔️DBCache, ✔️DBPrune, ✔️FBCache🔥</a> <br>
197
+ <a href=https://github.com/vipshop/cache-dit/raw/main/examples> <b>🚀Wan2.1</b>: ✔️DBCache, ✔️DBPrune, ✔️FBCache🔥</a> <br>
198
+ <a href=https://github.com/vipshop/cache-dit/raw/main/examples> <b>🚀HunyuanVideo</b>: ✔️DBCache, ✔️DBPrune, ✔️FBCache🔥</a> <br>
199
+ </p>
200
+ -->
201
+
186
202
  ## ⚡️DBCache: Dual Block Cache
187
203
 
188
204
  <div id="dbcache"></div>
@@ -340,6 +356,9 @@ cache_options = {
340
356
  apply_cache_on_pipe(pipe, **cache_options)
341
357
  ```
342
358
 
359
+ > [!Important]
360
+ > Please note that for GPUs with lower VRAM, DBPrune may not be suitable for use on video DiTs, as it caches the hidden states and residuals of each block, leading to higher GPU memory requirements. In such cases, please use DBCache, which only caches the hidden states and residuals of 2 blocks.
361
+
343
362
  <div align="center">
344
363
  <p align="center">
345
364
  DBPrune, <b> L20x1 </b>, Steps: 28, "A cat holding a sign that says hello world with complex background"
@@ -1,12 +1,12 @@
1
1
  cache_dit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- cache_dit/_version.py,sha256=iB5DfB5V6YB5Wo4JmvS-txT42QtmGaWcWp3udRT7zCI,511
2
+ cache_dit/_version.py,sha256=UoNvMtd4wCG76RwoSpNCUtaFyTwakGcZolfjXzNVSMY,511
3
3
  cache_dit/logger.py,sha256=dKfNe_RRk9HJwfgHGeRR1f0LbskJpKdGmISCbL9roQs,3443
4
4
  cache_dit/primitives.py,sha256=A2iG9YLot3gOsZSPp-_gyjqjLgJvWQRx8aitD4JQ23Y,3877
5
5
  cache_dit/cache_factory/__init__.py,sha256=5RNuhWakvvqrOV4vkqrEBA7d-V1LwcNSsjtW14mkqK8,5255
6
6
  cache_dit/cache_factory/taylorseer.py,sha256=0W29ykJg3MnyLAB2KFicsl11Xe41cDYPgI60bquG_NY,2495
7
7
  cache_dit/cache_factory/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
8
  cache_dit/cache_factory/dual_block_cache/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- cache_dit/cache_factory/dual_block_cache/cache_context.py,sha256=EJ-uhA2-sWMW1jNDhcBtjHDqSn8lUzfKbYoPfZDQhZU,49665
9
+ cache_dit/cache_factory/dual_block_cache/cache_context.py,sha256=huudVxz-SF3wAY_9vkViFCBhFKm5IzLvXR686u82pbM,50430
10
10
  cache_dit/cache_factory/dual_block_cache/diffusers_adapters/__init__.py,sha256=ySmO_0IuSm5VrYdi9ccGVHoFVkgtPZMJGq_OMoyl0Q8,2003
11
11
  cache_dit/cache_factory/dual_block_cache/diffusers_adapters/cogvideox.py,sha256=1_n-RFMiL3v2SjhSfFrPH5Mn5Dq9z4BesVK8GN_nh2g,2404
12
12
  cache_dit/cache_factory/dual_block_cache/diffusers_adapters/flux.py,sha256=UbE6nIF-EtA92QxIZVMzIssdZKQSPAVX1hchF9R8drU,2754
@@ -20,7 +20,7 @@ cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/cogvideox.py,sha2
20
20
  cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/flux.py,sha256=KbEkLSsHtS6xwLWNh3jlOlXRyGRdrI2pWV1zyQxMTj4,2757
21
21
  cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/hunyuan_video.py,sha256=v3SgLJQPbEqSVy2sYVGMhptJqCe-XPXL7LIV7GEacZg,10105
22
22
  cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/mochi.py,sha256=rgeXfww-7WX6URSDg7mF1HuxSmYmoJVjMVoNGuxjwxc,2395
23
- cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/wan.py,sha256=JCRIq7kt59nbv7t10FdfmFZd1GawUZu2tYQ_QBB8zXQ,2713
23
+ cache_dit/cache_factory/dynamic_block_prune/diffusers_adapters/wan.py,sha256=3-Bg9oPdLcIFZqqSpBGU3Ps1DJ9J8rslP5X7Ow1EHmc,2713
24
24
  cache_dit/cache_factory/first_block_cache/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
25
  cache_dit/cache_factory/first_block_cache/cache_context.py,sha256=DpDhtK095PlrvACf7sbjOt2-QpVkV1arr1qGEKJqgaQ,23502
26
26
  cache_dit/cache_factory/first_block_cache/diffusers_adapters/__init__.py,sha256=-FFgA2MoudEo7uDacg4aWgm1KwfLZFsEDTVxatgbq9M,2146
@@ -29,8 +29,8 @@ cache_dit/cache_factory/first_block_cache/diffusers_adapters/flux.py,sha256=Dcd4
29
29
  cache_dit/cache_factory/first_block_cache/diffusers_adapters/hunyuan_video.py,sha256=OL7W4ukYlZz0IDmBR1zVV6XT3Mgciglj9Hqzv1wUAkQ,10092
30
30
  cache_dit/cache_factory/first_block_cache/diffusers_adapters/mochi.py,sha256=lQTClo52OwPbNEE4jiBZQhfC7hbtYqnYIABp_vbm_dk,2363
31
31
  cache_dit/cache_factory/first_block_cache/diffusers_adapters/wan.py,sha256=dBNzHBECAuTTA1a7kLdvZL20YzaKTAS3iciVLzKKEWA,2638
32
- cache_dit-0.2.0.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
33
- cache_dit-0.2.0.dist-info/METADATA,sha256=WK3Fu8euIwLlm3TXjJws9VzwNjEfcMNvkCSJRt7jEdo,21845
34
- cache_dit-0.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
35
- cache_dit-0.2.0.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
36
- cache_dit-0.2.0.dist-info/RECORD,,
32
+ cache_dit-0.2.1.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
33
+ cache_dit-0.2.1.dist-info/METADATA,sha256=YbS-gmVFpGfTmaKNTUbXfWlfQa-RCoz0TyRqCtHEGJc,22700
34
+ cache_dit-0.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
35
+ cache_dit-0.2.1.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
36
+ cache_dit-0.2.1.dist-info/RECORD,,