cache-dit 1.0.5__py3-none-any.whl → 1.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cache-dit might be problematic. Click here for more details.

cache_dit/__init__.py CHANGED
@@ -4,8 +4,7 @@ except ImportError:
4
4
  __version__ = "unknown version"
5
5
  version_tuple = (0, 0, "unknown version")
6
6
 
7
- from cache_dit.utils import summary
8
- from cache_dit.utils import strify
7
+
9
8
  from cache_dit.utils import disable_print
10
9
  from cache_dit.logger import init_logger
11
10
  from cache_dit.cache_factory import load_options
@@ -28,7 +27,10 @@ from cache_dit.cache_factory import supported_pipelines
28
27
  from cache_dit.cache_factory import get_adapter
29
28
  from cache_dit.compile import set_compile_configs
30
29
  from cache_dit.quantize import quantize
31
-
30
+ from cache_dit.parallelism import ParallelismBackend
31
+ from cache_dit.parallelism import ParallelismConfig
32
+ from cache_dit.utils import summary
33
+ from cache_dit.utils import strify
32
34
 
33
35
  NONE = CacheType.NONE
34
36
  DBCache = CacheType.DBCache
cache_dit/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '1.0.5'
32
- __version_tuple__ = version_tuple = (1, 0, 5)
31
+ __version__ = version = '1.0.7'
32
+ __version_tuple__ = version_tuple = (1, 0, 7)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -577,3 +577,47 @@ def hunyuanditpag_adapter(pipe, **kwargs) -> BlockAdapter:
577
577
  patch_functor=HunyuanDiTPatchFunctor(),
578
578
  **kwargs,
579
579
  )
580
+
581
+
582
+ @BlockAdapterRegistry.register("Kandinsky5")
583
+ def kandinsky5_adapter(pipe, **kwargs) -> BlockAdapter:
584
+ try:
585
+ from diffusers import Kandinsky5Transformer3DModel
586
+
587
+ assert isinstance(pipe.transformer, Kandinsky5Transformer3DModel)
588
+ return BlockAdapter(
589
+ pipe=pipe,
590
+ transformer=pipe.transformer,
591
+ blocks=pipe.transformer.visual_transformer_blocks,
592
+ forward_pattern=ForwardPattern.Pattern_3, # or Pattern_2
593
+ has_separate_cfg=True,
594
+ check_forward_pattern=False,
595
+ check_num_outputs=False,
596
+ **kwargs,
597
+ )
598
+ except ImportError:
599
+ raise ImportError(
600
+ "Kandinsky5Transformer3DModel is not available in the current diffusers version. "
601
+ "Please upgrade diffusers>=0.36.dev0 to use this adapter."
602
+ )
603
+
604
+
605
+ @BlockAdapterRegistry.register("PRX")
606
+ def prx_adapter(pipe, **kwargs) -> BlockAdapter:
607
+ try:
608
+ from diffusers import PRXTransformer2DModel
609
+
610
+ assert isinstance(pipe.transformer, PRXTransformer2DModel)
611
+ return BlockAdapter(
612
+ pipe=pipe,
613
+ transformer=pipe.transformer,
614
+ blocks=pipe.transformer.blocks,
615
+ forward_pattern=ForwardPattern.Pattern_3,
616
+ check_num_outputs=False,
617
+ **kwargs,
618
+ )
619
+ except ImportError:
620
+ raise ImportError(
621
+ "PRXTransformer2DModel is not available in the current diffusers version. "
622
+ "Please upgrade diffusers>=0.36.dev0 to use this adapter."
623
+ )
@@ -18,6 +18,7 @@ class BlockAdapterRegistry:
18
18
  "SkyReelsV2",
19
19
  "Chroma",
20
20
  "Lumina2",
21
+ "Kandinsky5",
21
22
  ]
22
23
 
23
24
  @classmethod
@@ -614,6 +614,18 @@ class CachedAdapter:
614
614
  pipe_or_adapter, remove_stats, remove_stats, remove_stats
615
615
  )
616
616
 
617
+ # maybe release parallelism stats
618
+ from cache_dit.parallelism.parallel_interface import (
619
+ remove_parallelism_stats,
620
+ )
621
+
622
+ cls.release_hooks(
623
+ pipe_or_adapter,
624
+ remove_parallelism_stats,
625
+ remove_parallelism_stats,
626
+ remove_parallelism_stats,
627
+ )
628
+
617
629
  @classmethod
618
630
  def release_hooks(
619
631
  cls,
@@ -139,14 +139,9 @@ class CachedBlocks_Pattern_Base(torch.nn.Module):
139
139
  *args,
140
140
  **kwargs,
141
141
  )
142
- if not isinstance(hidden_states, torch.Tensor):
143
- hidden_states, encoder_hidden_states = hidden_states
144
- if not self.forward_pattern.Return_H_First:
145
- hidden_states, encoder_hidden_states = (
146
- encoder_hidden_states,
147
- hidden_states,
148
- )
149
-
142
+ hidden_states, encoder_hidden_states = self._process_block_outputs(
143
+ hidden_states, encoder_hidden_states
144
+ )
150
145
  return hidden_states, encoder_hidden_states
151
146
 
152
147
  @torch.compiler.disable
@@ -9,6 +9,8 @@ from cache_dit.cache_factory.cache_contexts import DBCacheConfig
9
9
  from cache_dit.cache_factory.cache_contexts import DBPruneConfig
10
10
  from cache_dit.cache_factory.cache_contexts import CalibratorConfig
11
11
  from cache_dit.cache_factory.params_modifier import ParamsModifier
12
+ from cache_dit.parallelism import ParallelismConfig
13
+ from cache_dit.parallelism import enable_parallelism
12
14
 
13
15
  from cache_dit.logger import init_logger
14
16
 
@@ -37,6 +39,8 @@ def enable_cache(
37
39
  List[List[ParamsModifier]],
38
40
  ]
39
41
  ] = None,
42
+ # Config for Parallelism
43
+ parallelism_config: Optional[ParallelismConfig] = None,
40
44
  # Other cache context kwargs: Deprecated cache kwargs
41
45
  **kwargs,
42
46
  ) -> Union[
@@ -127,6 +131,15 @@ def enable_cache(
127
131
  **kwargs: (`dict`, *optional*, defaults to {}):
128
132
  The same as 'kwargs' param in cache_dit.enable_cache() interface.
129
133
 
134
+ parallelism_config (`ParallelismConfig`, *optional*, defaults to None):
135
+ Config for Parallelism. If parallelism_config is not None, it means the user wants to enable
136
+ parallelism for cache-dit. Please check https://github.com/vipshop/cache-dit/blob/main/src/cache_dit/parallelism/parallel_config.py
137
+ for more details of ParallelismConfig.
138
+ ulysses_size: (`int`, *optional*, defaults to None):
139
+ The size of Ulysses cluster. If ulysses_size is not None, enable Ulysses style parallelism.
140
+ ring_size: (`int`, *optional*, defaults to None):
141
+ The size of ring for ring parallelism. If ring_size is not None, enable ring attention.
142
+
130
143
  kwargs (`dict`, *optional*, defaults to {})
131
144
  Other cache context kwargs, please check https://github.com/vipshop/cache-dit/blob/main/src/cache_dit/cache_factory/cache_contexts/cache_context.py
132
145
  for more details.
@@ -214,7 +227,7 @@ def enable_cache(
214
227
  context_kwargs["params_modifiers"] = params_modifiers
215
228
 
216
229
  if isinstance(pipe_or_adapter, (DiffusionPipeline, BlockAdapter)):
217
- return CachedAdapter.apply(
230
+ pipe_or_adapter = CachedAdapter.apply(
218
231
  pipe_or_adapter,
219
232
  **context_kwargs,
220
233
  )
@@ -225,6 +238,27 @@ def enable_cache(
225
238
  "for the 1's position param: pipe_or_adapter"
226
239
  )
227
240
 
241
+ # NOTE: Users should always enable parallelism after applying
242
+ # cache to avoid hooks conflict.
243
+ if parallelism_config is not None:
244
+ assert isinstance(
245
+ parallelism_config, ParallelismConfig
246
+ ), "parallelism_config should be of type ParallelismConfig."
247
+ if isinstance(pipe_or_adapter, DiffusionPipeline):
248
+ transformer = pipe_or_adapter.transformer
249
+ else:
250
+ assert BlockAdapter.assert_normalized(pipe_or_adapter)
251
+ assert (
252
+ len(BlockAdapter.flatten(pipe_or_adapter.transformer)) == 1
253
+ ), (
254
+ "Only single transformer is supported to enable parallelism "
255
+ "currently for BlockAdapter."
256
+ )
257
+ transformer = BlockAdapter.flatten(pipe_or_adapter.transformer)[0]
258
+ # Enable parallelism for the transformer inplace
259
+ transformer = enable_parallelism(transformer, parallelism_config)
260
+ return pipe_or_adapter
261
+
228
262
 
229
263
  def disable_cache(
230
264
  pipe_or_adapter: Union[
@@ -0,0 +1,3 @@
1
+ from cache_dit.parallelism.parallel_backend import ParallelismBackend
2
+ from cache_dit.parallelism.parallel_config import ParallelismConfig
3
+ from cache_dit.parallelism.parallel_interface import enable_parallelism
@@ -0,0 +1,73 @@
1
+ import torch
2
+
3
+ from typing import Optional
4
+ from cache_dit.logger import init_logger
5
+
6
+ logger = init_logger(__name__)
7
+
8
+
9
+ try:
10
+ from diffusers import ContextParallelConfig
11
+
12
+ def native_diffusers_parallelism_available() -> bool:
13
+ return True
14
+
15
+ except ImportError:
16
+ ContextParallelConfig = None
17
+
18
+ def native_diffusers_parallelism_available() -> bool:
19
+ return False
20
+
21
+
22
+ from diffusers.models.modeling_utils import ModelMixin
23
+ from cache_dit.parallelism.parallel_backend import ParallelismBackend
24
+ from cache_dit.parallelism.parallel_config import ParallelismConfig
25
+
26
+
27
+ def maybe_enable_parallelism(
28
+ transformer: torch.nn.Module,
29
+ parallelism_config: Optional[ParallelismConfig],
30
+ ) -> torch.nn.Module:
31
+ assert isinstance(transformer, ModelMixin), (
32
+ "transformer must be an instance of diffusers' ModelMixin, "
33
+ f"but got {type(transformer)}"
34
+ )
35
+ if parallelism_config is None:
36
+ return transformer
37
+
38
+ assert isinstance(parallelism_config, ParallelismConfig), (
39
+ "parallelism_config must be an instance of ParallelismConfig"
40
+ f" but got {type(parallelism_config)}"
41
+ )
42
+
43
+ if (
44
+ parallelism_config.backend == ParallelismBackend.NATIVE_DIFFUSER
45
+ and native_diffusers_parallelism_available()
46
+ ):
47
+ cp_config = None
48
+ if (
49
+ parallelism_config.ulysses_size is not None
50
+ or parallelism_config.ring_size is not None
51
+ ):
52
+ cp_config = ContextParallelConfig(
53
+ ulysses_degree=parallelism_config.ulysses_size,
54
+ ring_degree=parallelism_config.ring_size,
55
+ )
56
+ if cp_config is not None:
57
+ if hasattr(transformer, "enable_parallelism"):
58
+ if hasattr(transformer, "set_attention_backend"):
59
+ # Now only _native_cudnn is supported for parallelism
60
+ # issue: https://github.com/huggingface/diffusers/pull/12443
61
+ transformer.set_attention_backend("_native_cudnn")
62
+ logger.warning(
63
+ "Set attention backend to _native_cudnn for parallelism because of "
64
+ "the issue: https://github.com/huggingface/diffusers/pull/12443"
65
+ )
66
+
67
+ transformer.enable_parallelism(config=cp_config)
68
+ else:
69
+ raise ValueError(
70
+ f"{transformer.__class__.__name__} does not support context parallelism."
71
+ )
72
+
73
+ return transformer
@@ -0,0 +1,18 @@
1
+ from enum import Enum
2
+
3
+
4
+ class ParallelismBackend(Enum):
5
+ NATIVE_DIFFUSER = "Native_Diffuser"
6
+ NATIVE_PYTORCH = "Native_PyTorch"
7
+ NONE = "None"
8
+
9
+ @classmethod
10
+ def is_supported(cls, backend: "ParallelismBackend") -> bool:
11
+ # Now, only Native_Diffuser backend is supported
12
+ if backend in [cls.NATIVE_DIFFUSER]:
13
+ try:
14
+ import diffusers # noqa: F401
15
+ except ImportError:
16
+ return False
17
+ return True
18
+ return False
@@ -0,0 +1,47 @@
1
+ import dataclasses
2
+ from cache_dit.parallelism.parallel_backend import ParallelismBackend
3
+ from cache_dit.logger import init_logger
4
+
5
+ logger = init_logger(__name__)
6
+
7
+
8
+ @dataclasses.dataclass
9
+ class ParallelismConfig:
10
+ # Parallelism backend, defaults to NATIVE_DIFFUSER
11
+ backend: ParallelismBackend = ParallelismBackend.NATIVE_DIFFUSER
12
+ # Context parallelism config
13
+ # ulysses_size (`int`, *optional*):
14
+ # The degree of ulysses parallelism.
15
+ ulysses_size: int = None
16
+ # ring_size (`int`, *optional*):
17
+ # The degree of ring parallelism.
18
+ ring_size: int = None
19
+ # Tensor parallelism config
20
+ # tp_size (`int`, *optional*):
21
+ # The degree of tensor parallelism.
22
+ tp_size: int = None
23
+
24
+ def __post_init__(self):
25
+ assert ParallelismBackend.is_supported(self.backend), (
26
+ f"Parallel backend {self.backend} is not supported. "
27
+ f"Please make sure the required packages are installed."
28
+ )
29
+ assert self.tp_size is None, "Tensor parallelism is not supported yet."
30
+
31
+ def strify(self, details: bool = False) -> str:
32
+ if details:
33
+ return (
34
+ f"ParallelismConfig(backend={self.backend}, "
35
+ f"ulysses_size={self.ulysses_size}, "
36
+ f"ring_size={self.ring_size}, "
37
+ f"tp_size={self.tp_size})"
38
+ )
39
+ else:
40
+ parallel_str = ""
41
+ if self.ulysses_size is not None:
42
+ parallel_str += f"Ulysses{self.ulysses_size}"
43
+ if self.ring_size is not None:
44
+ parallel_str += f"Ring{self.ring_size}"
45
+ if self.tp_size is not None:
46
+ parallel_str += f"TP{self.tp_size}"
47
+ return parallel_str
@@ -0,0 +1,62 @@
1
+ import torch
2
+ from cache_dit.parallelism.parallel_backend import ParallelismBackend
3
+ from cache_dit.parallelism.parallel_config import ParallelismConfig
4
+ from cache_dit.logger import init_logger
5
+
6
+ logger = init_logger(__name__)
7
+
8
+
9
+ def enable_parallelism(
10
+ transformer: torch.nn.Module,
11
+ parallelism_config: ParallelismConfig,
12
+ ) -> torch.nn.Module:
13
+ assert isinstance(transformer, torch.nn.Module), (
14
+ "transformer must be an instance of torch.nn.Module, "
15
+ f"but got {type(transformer)}"
16
+ )
17
+ if getattr(transformer, "_is_parallelized", False):
18
+ logger.warning(
19
+ "The transformer is already parallelized. "
20
+ "Skipping parallelism enabling."
21
+ )
22
+ return transformer
23
+
24
+ if parallelism_config.backend == ParallelismBackend.NATIVE_DIFFUSER:
25
+ from cache_dit.parallelism.backends.parallel_difffusers import (
26
+ maybe_enable_parallelism,
27
+ native_diffusers_parallelism_available,
28
+ )
29
+
30
+ assert (
31
+ native_diffusers_parallelism_available()
32
+ ), "Please install diffusers>=0.36.dev0 to use Native_Diffuser backend."
33
+ transformer = maybe_enable_parallelism(
34
+ transformer,
35
+ parallelism_config,
36
+ )
37
+ else:
38
+ raise ValueError(
39
+ f"Parallel backend {parallelism_config.backend} is not supported yet."
40
+ )
41
+
42
+ transformer._is_parallelized = True # type: ignore[attr-defined]
43
+ transformer._parallelism_config = parallelism_config # type: ignore[attr-defined]
44
+ logger.info(f"Enabled parallelism: {parallelism_config.strify(True)}")
45
+ return transformer
46
+
47
+
48
+ def remove_parallelism_stats(
49
+ transformer: torch.nn.Module,
50
+ ) -> torch.nn.Module:
51
+ if not getattr(transformer, "_is_parallelized", False):
52
+ logger.warning(
53
+ "The transformer is not parallelized. "
54
+ "Skipping removing parallelism."
55
+ )
56
+ return transformer
57
+
58
+ if hasattr(transformer, "_is_parallelized"):
59
+ del transformer._is_parallelized # type: ignore[attr-defined]
60
+ if hasattr(transformer, "_parallelism_config"):
61
+ del transformer._parallelism_config # type: ignore[attr-defined]
62
+ return transformer
@@ -80,26 +80,19 @@ def quantize_ao(
80
80
 
81
81
  return False
82
82
 
83
- def _quantization_fn():
83
+ def _quant_config():
84
84
  try:
85
85
  if quant_type == "fp8_w8a8_dq":
86
- try:
87
- from torchao.quantization import (
88
- float8_dynamic_activation_float8_weight,
89
- PerTensor,
90
- PerRow,
91
- )
92
- except ImportError:
93
- from torchao.quantization import (
94
- Float8DynamicActivationFloat8WeightConfig as float8_dynamic_activation_float8_weight,
95
- PerTensor,
96
- PerRow,
97
- )
86
+ from torchao.quantization import (
87
+ Float8DynamicActivationFloat8WeightConfig,
88
+ PerTensor,
89
+ PerRow,
90
+ )
98
91
 
99
92
  if per_row: # Ensure bfloat16
100
93
  module.to(torch.bfloat16)
101
94
 
102
- quantization_fn = float8_dynamic_activation_float8_weight(
95
+ quant_config = Float8DynamicActivationFloat8WeightConfig(
103
96
  weight_dtype=kwargs.get(
104
97
  "weight_dtype",
105
98
  torch.float8_e4m3fn,
@@ -116,14 +109,9 @@ def quantize_ao(
116
109
  )
117
110
 
118
111
  elif quant_type == "fp8_w8a16_wo":
119
- try:
120
- from torchao.quantization import float8_weight_only
121
- except ImportError:
122
- from torchao.quantization import (
123
- Float8WeightOnlyConfig as float8_weight_only,
124
- )
112
+ from torchao.quantization import Float8WeightOnlyConfig
125
113
 
126
- quantization_fn = float8_weight_only(
114
+ quant_config = Float8WeightOnlyConfig(
127
115
  weight_dtype=kwargs.get(
128
116
  "weight_dtype",
129
117
  torch.float8_e4m3fn,
@@ -131,69 +119,44 @@ def quantize_ao(
131
119
  )
132
120
 
133
121
  elif quant_type == "int8_w8a8_dq":
134
- try:
135
- from torchao.quantization import (
136
- int8_dynamic_activation_int8_weight,
137
- )
138
- except ImportError:
139
- from torchao.quantization import (
140
- Int8DynamicActivationInt8WeightConfig as int8_dynamic_activation_int8_weight,
141
- )
122
+ from torchao.quantization import (
123
+ Int8DynamicActivationInt8WeightConfig,
124
+ )
142
125
 
143
- quantization_fn = int8_dynamic_activation_int8_weight()
126
+ quant_config = Int8DynamicActivationInt8WeightConfig()
144
127
 
145
128
  elif quant_type == "int8_w8a16_wo":
146
129
 
147
- try:
148
- from torchao.quantization import int8_weight_only
149
- except ImportError:
150
- from torchao.quantization import (
151
- Int8WeightOnlyConfig as int8_weight_only,
152
- )
130
+ from torchao.quantization import Int8WeightOnlyConfig
153
131
 
154
- quantization_fn = int8_weight_only(
132
+ quant_config = Int8WeightOnlyConfig(
155
133
  # group_size is None -> per_channel, else per group
156
134
  group_size=kwargs.get("group_size", None),
157
135
  )
158
136
 
159
137
  elif quant_type == "int4_w4a8_dq":
160
138
 
161
- try:
162
- from torchao.quantization import (
163
- int8_dynamic_activation_int4_weight,
164
- )
165
- except ImportError:
166
- from torchao.quantization import (
167
- Int8DynamicActivationInt4WeightConfig as int8_dynamic_activation_int4_weight,
168
- )
139
+ from torchao.quantization import (
140
+ Int8DynamicActivationInt4WeightConfig,
141
+ )
169
142
 
170
- quantization_fn = int8_dynamic_activation_int4_weight(
143
+ quant_config = Int8DynamicActivationInt4WeightConfig(
171
144
  group_size=kwargs.get("group_size", 32),
172
145
  )
173
146
 
174
147
  elif quant_type == "int4_w4a4_dq":
175
148
 
176
- try:
177
- from torchao.quantization import (
178
- int4_dynamic_activation_int4_weight,
179
- )
180
- except ImportError:
181
- from torchao.quantization import (
182
- Int4DynamicActivationInt4WeightConfig as int4_dynamic_activation_int4_weight,
183
- )
149
+ from torchao.quantization import (
150
+ Int4DynamicActivationInt4WeightConfig,
151
+ )
184
152
 
185
- quantization_fn = int4_dynamic_activation_int4_weight()
153
+ quant_config = Int4DynamicActivationInt4WeightConfig()
186
154
 
187
155
  elif quant_type == "int4_w4a16_wo":
188
156
 
189
- try:
190
- from torchao.quantization import int4_weight_only
191
- except ImportError:
192
- from torchao.quantization import (
193
- Int4WeightOnlyConfig as int4_weight_only,
194
- )
157
+ from torchao.quantization import Int4WeightOnlyConfig
195
158
 
196
- quantization_fn = int4_weight_only(
159
+ quant_config = Int4WeightOnlyConfig(
197
160
  group_size=kwargs.get("group_size", 32),
198
161
  )
199
162
 
@@ -209,13 +172,13 @@ def quantize_ao(
209
172
  )
210
173
  raise e
211
174
 
212
- return quantization_fn
175
+ return quant_config
213
176
 
214
177
  from torchao.quantization import quantize_
215
178
 
216
179
  quantize_(
217
180
  module,
218
- _quantization_fn(),
181
+ _quant_config(),
219
182
  filter_fn=_filter_fn if filter_fn is None else filter_fn,
220
183
  device=kwargs.get("device", None),
221
184
  )
cache_dit/utils.py CHANGED
@@ -13,6 +13,7 @@ from cache_dit.cache_factory import CacheType
13
13
  from cache_dit.cache_factory import BlockAdapter
14
14
  from cache_dit.cache_factory import BasicCacheConfig
15
15
  from cache_dit.cache_factory import CalibratorConfig
16
+ from cache_dit.parallelism import ParallelismConfig
16
17
  from cache_dit.logger import init_logger
17
18
 
18
19
 
@@ -55,6 +56,8 @@ class CacheStats:
55
56
  cfg_pruned_blocks: list[int] = dataclasses.field(default_factory=list)
56
57
  cfg_actual_blocks: list[int] = dataclasses.field(default_factory=list)
57
58
  cfg_pruned_ratio: float = None
59
+ # Parallelism Stats
60
+ parallelism_config: ParallelismConfig = None
58
61
 
59
62
 
60
63
  def summary(
@@ -180,6 +183,8 @@ def strify(
180
183
  cached_steps = None
181
184
  cache_type = cache_options.get("cache_type", CacheType.NONE)
182
185
 
186
+ stats = None
187
+
183
188
  if cache_type == CacheType.NONE:
184
189
  return "NONE"
185
190
  else:
@@ -213,7 +218,15 @@ def strify(
213
218
  return calibrator_config.strify()
214
219
  return "T0O0"
215
220
 
216
- cache_type_str = f"{cache_str()}_{calibrator_str()}"
221
+ def parallelism_str():
222
+ if stats is None:
223
+ return ""
224
+ parallelism_config: ParallelismConfig = stats.parallelism_config
225
+ if parallelism_config is not None:
226
+ return f"_{parallelism_config.strify()}"
227
+ return ""
228
+
229
+ cache_type_str = f"{cache_str()}_{calibrator_str()}{parallelism_str()}"
217
230
 
218
231
  if cached_steps:
219
232
  cache_type_str += f"_S{cached_steps}"
@@ -252,6 +265,17 @@ def _summary(
252
265
  if logging:
253
266
  logger.warning(f"Can't find Context Options for: {cls_name}")
254
267
 
268
+ if hasattr(module, "_parallelism_config"):
269
+ parallelism_config: ParallelismConfig = module._parallelism_config
270
+ cache_stats.parallelism_config = parallelism_config
271
+ if logging:
272
+ print(
273
+ f"\n🤖Parallelism Config: {cls_name}\n\n{parallelism_config.strify(True)}"
274
+ )
275
+ else:
276
+ if logging:
277
+ logger.warning(f"Can't find Parallelism Config for: {cls_name}")
278
+
255
279
  if hasattr(module, "_cached_steps"):
256
280
  cached_steps: list[int] = module._cached_steps
257
281
  residual_diffs: dict[str, list | float] = dict(module._residual_diffs)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cache_dit
3
- Version: 1.0.5
3
+ Version: 1.0.7
4
4
  Summary: A Unified, Flexible and Training-free Cache Acceleration Framework for 🤗Diffusers.
5
5
  Author: DefTruth, vipshop.com, etc.
6
6
  Maintainer: DefTruth, vipshop.com, etc
@@ -45,13 +45,15 @@ Dynamic: provides-extra
45
45
  Dynamic: requires-dist
46
46
  Dynamic: requires-python
47
47
 
48
- <a href="./README.md">📚English</a> | <a href="./README_CN.md">📚中文阅读 </a>
48
+ 📚English | <a href="./README_CN.md">📚中文阅读 </a>
49
49
 
50
50
  <div align="center">
51
51
  <img src=https://github.com/vipshop/cache-dit/raw/main/assets/cache-dit-logo.png height="120">
52
52
  <p align="center">
53
53
  A <b>Unified</b>, Flexible and Training-free <b>Cache Acceleration</b> Framework for <b>🤗Diffusers</b> <br>
54
- ♥️ Cache Acceleration with <b>One-line</b> Code ~ ♥️
54
+ ♥️ Cache Acceleration with <b>One-line</b> Code ~ ♥️ <br>
55
+ 🔥<b><a href="./docs/User_Guide.md">DBCache</a> | <a href="./docs/User_Guide.md">DBPrune</a> | <a href="./docs/User_Guide.md">Hybrid TaylorSeer</a> | <a href="./docs/User_Guide.md">Hybrid Cache CFG</a></b>🔥 <br>
56
+ 🔥<b><a href="./docs/User_Guide.md">Hybrid Context Paralleism</a> | <a href="./docs/User_Guide.md">Diffusers Native</a> | <a href="./docs/User_Guide.md">SOTA</a></b>🔥
55
57
  </p>
56
58
  <div align='center'>
57
59
  <img src=https://img.shields.io/badge/Language-Python-brightgreen.svg >
@@ -194,7 +196,7 @@ You can install the stable release of cache-dit from PyPI, or the latest develop
194
196
  - **[🎉Easy New Model Integration](./docs/User_Guide.md#automatic-block-adapter)**: Features like **Unified Cache APIs**, **Forward Pattern Matching**, **Automatic Block Adapter**, **Hybrid Forward Pattern**, and **Patch Functor** make it highly functional and flexible. For example, we achieved 🎉 Day 1 support for [HunyuanImage-2.1](https://github.com/Tencent-Hunyuan/HunyuanImage-2.1) with 1.7x speedup w/o precision loss—even before it was available in the Diffusers library.
195
197
  - **[🎉State-of-the-Art Performance](./bench/)**: Compared with algorithms including Δ-DiT, Chipmunk, FORA, DuCa, TaylorSeer and FoCa, cache-dit achieved the **SOTA** performance w/ **7.4x↑🎉** speedup on ClipScore!
196
198
  - **[🎉Support for 4/8-Steps Distilled Models](./bench/)**: Surprisingly, cache-dit's **DBCache** works for extremely few-step distilled models—something many other methods fail to do.
197
- - **[🎉Compatibility with Other Optimizations](./docs/User_Guide.md#️torch-compile)**: Designed to work seamlessly with torch.compile, model CPU offload, sequential CPU offload, group offloading, Quantization(**[torchao](./examples/quantize/)**, **[🔥nunchaku](./examples/quantize/)**), etc.
199
+ - **[🎉Compatibility with Other Optimizations](./docs/User_Guide.md#️torch-compile)**: Designed to work seamlessly with torch.compile, Quantization ([torchao](./examples/quantize/), [🔥nunchaku](./examples/quantize/)), CPU or Sequential Offloading, **[🔥Context Parallelism](./docs/User_Guide.md/#️hybrid-context-parallelism)**, Tensor Parallelism, etc.
198
200
  - **[🎉Hybrid Cache Acceleration](./docs/User_Guide.md#taylorseer-calibrator)**: Now supports hybrid **Block-wise Cache + Calibrator** schemes (e.g., DBCache or DBPrune + TaylorSeerCalibrator). DBCache or DBPrune acts as the **Indicator** to decide *when* to cache, while the Calibrator decides *how* to cache. More mainstream cache acceleration algorithms (e.g., FoCa) will be supported in the future, along with additional benchmarks—stay tuned for updates!
199
201
  - **[🤗Diffusers Ecosystem Integration](https://huggingface.co/docs/diffusers/main/en/optimization/cache_dit)**: 🔥**cache-dit** has joined the Diffusers community ecosystem as the **first** DiT-specific cache acceleration framework! Check out the documentation here: <a href="https://huggingface.co/docs/diffusers/main/en/optimization/cache_dit"><img src=https://img.shields.io/badge/🤗Diffusers-ecosystem-yellow.svg ></a>
200
202
 
@@ -202,12 +204,12 @@ You can install the stable release of cache-dit from PyPI, or the latest develop
202
204
 
203
205
  ## 🔥Important News
204
206
 
207
+ - 2025.10.20: 🔥Now cache-dit supported the **[Hybrid Cache + Context Parallelism](./docs/User_Guide.md/#️hybrid-context-parallelism)** scheme!🔥
208
+ - 2025.10.16: 🎉cache-dit + [**🔥nunchaku 4-bits**](https://github.com/nunchaku-tech/nunchaku) supported: [Qwen-Image-Lightning 4/8 steps](./examples/quantize/).
205
209
  - 2025.10.15: 🎉cache-dit now supported [**🔥nunchaku**](https://github.com/nunchaku-tech/nunchaku): Qwen-Image/FLUX.1 [4-bits examples](./examples/quantize/)
206
210
  - 2025.10.13: 🎉cache-dit achieved the **SOTA** performance w/ **7.4x↑🎉** speedup on ClipScore!
207
211
  - 2025.10.10: 🔥[**Qwen-Image-ControlNet-Inpainting**](https://huggingface.co/InstantX/Qwen-Image-ControlNet-Inpainting) **2.3x↑🎉** speedup! Check the [example](https://github.com/vipshop/cache-dit/blob/main/examples/pipeline/run_qwen_image_controlnet_inpaint.py).
208
212
  - 2025.09.26: 🔥[**Qwen-Image-Edit-Plus(2509)**](https://github.com/QwenLM/Qwen-Image) **2.1x↑🎉** speedup! Please check the [example](https://github.com/vipshop/cache-dit/blob/main/examples/pipeline/run_qwen_image_edit_plus.py).
209
- - 2025.09.25: 🎉The **first API-stable version (v1.0.0)** of cache-dit has finally been released!
210
- - 2025.09.25: 🔥**cache-dit** has joined the Diffusers community ecosystem: <a href="https://huggingface.co/docs/diffusers/main/en/optimization/cache_dit"><img src=https://img.shields.io/badge/🤗Diffusers-ecosystem-yellow.svg ></a>
211
213
  - 2025.09.10: 🎉Day 1 support [**HunyuanImage-2.1**](https://github.com/Tencent-Hunyuan/HunyuanImage-2.1) with **1.7x↑🎉** speedup! Check this [example](https://github.com/vipshop/cache-dit/blob/main/examples/pipeline/run_hunyuan_image_2.1.py).
212
214
  - 2025.09.08: 🔥[**Qwen-Image-Lightning**](https://github.com/vipshop/cache-dit/blob/main/examples/pipeline/run_qwen_image_lightning.py) **7.1/3.5 steps🎉** inference with **[DBCache: F16B16](https://github.com/vipshop/cache-dit)**.
213
215
  - 2025.09.03: 🎉[**Wan2.2-MoE**](https://github.com/Wan-Video) **2.4x↑🎉** speedup! Please refer to [run_wan_2.2.py](https://github.com/vipshop/cache-dit/blob/main/examples/pipeline/run_wan_2.2.py) as an example.
@@ -217,6 +219,8 @@ You can install the stable release of cache-dit from PyPI, or the latest develop
217
219
  <details>
218
220
  <summary>Previous News</summary>
219
221
 
222
+ - 2025.09.25: 🎉The **first API-stable version (v1.0.0)** of cache-dit has finally been released!
223
+ - 2025.09.25: 🔥**cache-dit** has joined the Diffusers community ecosystem: <a href="https://huggingface.co/docs/diffusers/main/en/optimization/cache_dit"><img src=https://img.shields.io/badge/🤗Diffusers-ecosystem-yellow.svg ></a>
220
224
  - 2025.09.08: 🎉First caching mechanism in [Wan2.2](https://github.com/Wan-Video/Wan2.2) with **[cache-dit](https://github.com/vipshop/cache-dit)**, check this [PR](https://github.com/Wan-Video/Wan2.2/pull/127) for more details.
221
225
  - 2025.09.08: 🎉First caching mechanism in [Qwen-Image-Lightning](https://github.com/ModelTC/Qwen-Image-Lightning) with **[cache-dit](https://github.com/vipshop/cache-dit)**, check this [PR](https://github.com/ModelTC/Qwen-Image-Lightning/pull/35).
222
226
  - 2025.08.10: 🔥[**FLUX.1-Kontext-dev**](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev) is supported! Please refer [run_flux_kontext.py](https://github.com/vipshop/cache-dit/blob/main/examples/pipeline/run_flux_kontext.py) as an example.
@@ -239,14 +243,15 @@ For more advanced features such as **Unified Cache APIs**, **Forward Pattern Mat
239
243
  - [📚Forward Pattern Matching](./docs/User_Guide.md#forward-pattern-matching)
240
244
  - [📚Cache with One-line Code](./docs/User_Guide.md#%EF%B8%8Fcache-acceleration-with-one-line-code)
241
245
  - [🔥Automatic Block Adapter](./docs/User_Guide.md#automatic-block-adapter)
242
- - [📚Hybird Forward Pattern](./docs/User_Guide.md#hybird-forward-pattern)
246
+ - [📚Hybrid Forward Pattern](./docs/User_Guide.md#hybrid-forward-pattern)
243
247
  - [📚Implement Patch Functor](./docs/User_Guide.md#implement-patch-functor)
244
248
  - [🤖Cache Acceleration Stats](./docs/User_Guide.md#cache-acceleration-stats-summary)
245
249
  - [⚡️DBCache: Dual Block Cache](./docs/User_Guide.md#️dbcache-dual-block-cache)
246
250
  - [⚡️DBPrune: Dynamic Block Prune](./docs/User_Guide.md#️dbprune-dynamic-block-prune)
247
- - [🔥TaylorSeer Calibrator](./docs/User_Guide.md#taylorseer-calibrator)
248
251
  - [⚡️Hybrid Cache CFG](./docs/User_Guide.md#️hybrid-cache-cfg)
249
- - [🛠Metrics CLI](./docs/User_Guide.md#metrics-cli)
252
+ - [🔥Hybrid TaylorSeer Calibrator](./docs/User_Guide.md#taylorseer-calibrator)
253
+ - [⚡️Hybrid Context Parallelism](./docs/User_Guide.md#context-paralleism)
254
+ - [🛠Metrics Command Line](./docs/User_Guide.md#metrics-cli)
250
255
  - [⚙️Torch Compile](./docs/User_Guide.md#️torch-compile)
251
256
  - [📚API Documents](./docs/User_Guide.md#api-documentation)
252
257
 
@@ -268,7 +273,7 @@ How to contribute? Star ⭐️ this repo to support us or check [CONTRIBUTE.md](
268
273
 
269
274
  ## 🎉Projects Using CacheDiT
270
275
 
271
- Here is a curated list of open-source projects integrating **CacheDiT**, including popular repositories like [jetson-containers](https://github.com/dusty-nv/jetson-containers/blob/master/packages/diffusion/cache_edit/build.sh) ![](https://img.shields.io/github/stars/dusty-nv/jetson-containers.svg), [flux-fast](https://github.com/huggingface/flux-fast) ![](https://img.shields.io/github/stars/huggingface/flux-fast.svg), and [sdnext](https://github.com/vladmandic/sdnext/blob/dev/modules/cachedit.py) ![](https://img.shields.io/github/stars/vladmandic/sdnext.svg). **CacheDiT** has also been **recommended** by [Wan2.2](https://github.com/Wan-Video/Wan2.2) ![](https://img.shields.io/github/stars/Wan-Video/Wan2.2.svg), [Qwen-Image-Lightning](https://github.com/ModelTC/Qwen-Image-Lightning) ![](https://img.shields.io/github/stars/ModelTC/Qwen-Image-Lightning.svg), [Qwen-Image](https://github.com/QwenLM/Qwen-Image) ![](https://img.shields.io/github/stars/QwenLM/Qwen-Image.svg), and <a href="https://huggingface.co/docs/diffusers/main/en/optimization/cache_dit"><img src="https://img.shields.io/badge/🤗Diffusers-ecosystem-yellow.svg"></a> ![](https://img.shields.io/github/stars/huggingface/diffusers.svg), among others. We would be grateful if you could let us know if you have used CacheDiT.
276
+ Here is a curated list of open-source projects integrating **CacheDiT**, including popular repositories like [jetson-containers](https://github.com/dusty-nv/jetson-containers/blob/master/packages/diffusion/cache_edit/build.sh) ![](https://img.shields.io/github/stars/dusty-nv/jetson-containers.svg), [flux-fast](https://github.com/huggingface/flux-fast) ![](https://img.shields.io/github/stars/huggingface/flux-fast.svg), and [sdnext](https://github.com/vladmandic/sdnext/discussions/4269) ![](https://img.shields.io/github/stars/vladmandic/sdnext.svg). **CacheDiT** has also been **recommended** by [Wan2.2](https://github.com/Wan-Video/Wan2.2) ![](https://img.shields.io/github/stars/Wan-Video/Wan2.2.svg), [Qwen-Image-Lightning](https://github.com/ModelTC/Qwen-Image-Lightning) ![](https://img.shields.io/github/stars/ModelTC/Qwen-Image-Lightning.svg), [Qwen-Image](https://github.com/QwenLM/Qwen-Image) ![](https://img.shields.io/github/stars/QwenLM/Qwen-Image.svg), and <a href="https://huggingface.co/docs/diffusers/main/en/optimization/cache_dit"><img src="https://img.shields.io/badge/🤗Diffusers-ecosystem-yellow.svg"></a> ![](https://img.shields.io/github/stars/huggingface/diffusers.svg), among others. We would be grateful if you could let us know if you have used CacheDiT.
272
277
 
273
278
  ## ©️Acknowledgements
274
279
 
@@ -1,24 +1,24 @@
1
- cache_dit/__init__.py,sha256=JQLxwr5aqoMFp-BNR58J0i6NutbRmNXKsaRJKCZQDCg,1638
2
- cache_dit/_version.py,sha256=uYAmg6HpJuEIdeeqRyfS9YeZSA-4pXIPoW7JKBe0Uvw,704
1
+ cache_dit/__init__.py,sha256=HZb04M7AHCfk9DaEAGApGJ2lCM-rsP6pbsNQxsQudi0,1743
2
+ cache_dit/_version.py,sha256=xUX1oSOk6hTPREy9SfhUBjaOBMJucMgoQViQ3e2Ce9A,704
3
3
  cache_dit/logger.py,sha256=0zsu42hN-3-rgGC_C29ms1IvVpV4_b4_SwJCKSenxBE,4304
4
- cache_dit/utils.py,sha256=0YNFr84pxYoHOCZvnONKKXYN3PZY4kao9Tq2yEfHHR8,16986
4
+ cache_dit/utils.py,sha256=3NcEb324fNY0NYnrBTjsLURKQuckKeFe3V9Dfc_g4sc,17851
5
5
  cache_dit/cache_factory/.gitignore,sha256=5Cb-qT9wsTUoMJ7vACDF7ZcLpAXhi5v-xdcWSRit988,23
6
6
  cache_dit/cache_factory/__init__.py,sha256=5UjrpxLVlmjHttTL0O14fD5oU5uKI3FKYevL613ibFQ,1848
7
- cache_dit/cache_factory/cache_interface.py,sha256=spiE7pWF80G3Y06_TKVvrmKufbAvQmyvshZZVsmb-nM,12714
7
+ cache_dit/cache_factory/cache_interface.py,sha256=244uTVx83hpCpbCDgEOydi5HqG7hKHHzEoz1ApJW6lI,14627
8
8
  cache_dit/cache_factory/cache_types.py,sha256=QnWfaS52UOXQtnoCUOwwz4ziY0dyBta6vQ6hvgtdV44,1404
9
9
  cache_dit/cache_factory/forward_pattern.py,sha256=FumlCuZ-TSmSYH0hGBHctSJ-oGLCftdZjLygqhsmdR4,2258
10
10
  cache_dit/cache_factory/params_modifier.py,sha256=2T98IbepAolWW6GwQsqUDsRzu0k65vo7BOrN3V8mKog,3606
11
11
  cache_dit/cache_factory/utils.py,sha256=S3SD6Zhexzhkqnmfo830v6oNLm8stZe32nF4VdxD_bA,2497
12
- cache_dit/cache_factory/block_adapters/__init__.py,sha256=zs-cYacRL_hWlhUXmKc0TZNDAKzzWuznvHeuDpAmuwc,18221
12
+ cache_dit/cache_factory/block_adapters/__init__.py,sha256=eeBcWUMIvS-x3GcD1LNesW2SuB9V5mtwG9MoUBWHsL8,19765
13
13
  cache_dit/cache_factory/block_adapters/block_adapters.py,sha256=2TVK_KqiYXC7AKZ2s07fzdOzUoeUBc9P1SzQtLVzhf4,22249
14
- cache_dit/cache_factory/block_adapters/block_registers.py,sha256=2L7QeM4ygnaKQpC9PoJod0QRYyxidUKU2AYpysDCUwE,2572
14
+ cache_dit/cache_factory/block_adapters/block_registers.py,sha256=KU0cqtLYRlij2WvuQ6erqZbxUWkb6DjvmY_sB3o_fQM,2594
15
15
  cache_dit/cache_factory/cache_adapters/__init__.py,sha256=py71WGD3JztQ1uk6qdLVbzYcQ1rvqFidNNaQYo7tqTo,79
16
- cache_dit/cache_factory/cache_adapters/cache_adapter.py,sha256=-KTKukcNaVYk92np-QnGJ-EJbTdRQplqIHtqEORPiAo,23745
16
+ cache_dit/cache_factory/cache_adapters/cache_adapter.py,sha256=WYrgV3DKxOxttl-wEKymyKIB1Po0eW73Q2_vOlGEKdQ,24080
17
17
  cache_dit/cache_factory/cache_blocks/__init__.py,sha256=cpxzmDcUhbXcReHqaKSnWyEEbIg1H91Pz5hE3z9Xj3k,9984
18
18
  cache_dit/cache_factory/cache_blocks/offload_utils.py,sha256=wusgcqaCrwEjvv7Guy-6VXhNOgPPUrBV2sSVuRmGuvo,3513
19
19
  cache_dit/cache_factory/cache_blocks/pattern_0_1_2.py,sha256=j4bTafqU5DLQhzP_X5XwOk-QUVLWkGrX-Q6JZvBGHh0,666
20
20
  cache_dit/cache_factory/cache_blocks/pattern_3_4_5.py,sha256=2qPnXVZwpQIm2oJ-Yrn3Avqi3BcXtE2133jPIL_LhK8,19595
21
- cache_dit/cache_factory/cache_blocks/pattern_base.py,sha256=9H87qBRpa6UWRkUKXLVO0_9NJgxCVKkFSzaQxM9YPw8,25487
21
+ cache_dit/cache_factory/cache_blocks/pattern_base.py,sha256=uNcPZU8b8i_-re_X1xBHkSDQSacQO7Fa69vjbfAYxOA,25275
22
22
  cache_dit/cache_factory/cache_blocks/pattern_utils.py,sha256=qOxoVTlYPQzPMrR06-7_Ce_lwNg6n5pt1KQrvxzAJhE,3124
23
23
  cache_dit/cache_factory/cache_contexts/__init__.py,sha256=7uY8fX9uhpC71VNm1HH4aDIicYn-dD3kRpPQhvc9-EI,853
24
24
  cache_dit/cache_factory/cache_contexts/cache_config.py,sha256=G0PVWgckDqeyARc72Ne_0lRtO_LftsOeMERRhbh2gCA,5739
@@ -52,12 +52,17 @@ cache_dit/metrics/image_reward.py,sha256=N8HalJo1T1js0dsNb2V1KRv4kIdcm3nhx7iOXJu
52
52
  cache_dit/metrics/inception.py,sha256=pBVe2X6ylLPIXTG4-GWDM9DWnCviMJbJ45R3ulhktR0,12759
53
53
  cache_dit/metrics/lpips.py,sha256=hrHrmdM-f2B4TKDs0xLqJO5JFaYcCjq2qNIR8oCrVkc,811
54
54
  cache_dit/metrics/metrics.py,sha256=AZbQyoavE-djvyRUZ_EfCIrWSQbiWQFo7n2dhn7XptE,40466
55
+ cache_dit/parallelism/__init__.py,sha256=dheBG5_TZCuwctviMslpAEgB-B3N8F816bE51qsw_fU,210
56
+ cache_dit/parallelism/parallel_backend.py,sha256=js1soTMenLeAyPMsBgdI3gWcdXoqjWgBD-PuFEywMr0,508
57
+ cache_dit/parallelism/parallel_config.py,sha256=bu24sRSzJMmH7FZqzUPTcT6tAzQ20-FAqAEvGV3Q1Fw,1733
58
+ cache_dit/parallelism/parallel_interface.py,sha256=tsiIdHosTmRbeRg0z9q0eMQlx-7vefmSIlc56OWnuMg,2205
59
+ cache_dit/parallelism/backends/parallel_difffusers.py,sha256=YQkCJ1yq1OomZLyRLtGMaPSNWbDeAWGx9XuObVJ_85I,2499
55
60
  cache_dit/quantize/__init__.py,sha256=kWYoMAyZgBXu9BJlZjTQ0dRffW9GqeeY9_iTkXrb70A,59
56
- cache_dit/quantize/quantize_ao.py,sha256=LlkKh2uAtLihNRXHWIVggYR7ls12ak_VVLCY8trp2LY,7996
61
+ cache_dit/quantize/quantize_ao.py,sha256=bbEUwsrMp3bMuRw8qJZREIvCHaJRQoZyfMjlu4ImRMI,6315
57
62
  cache_dit/quantize/quantize_interface.py,sha256=2s_R7xPSKuJeFpEGeLwRxnq_CqJcBG3a3lzyW5wh-UM,1241
58
- cache_dit-1.0.5.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
59
- cache_dit-1.0.5.dist-info/METADATA,sha256=qalYkFx9Y0cSmoBqHx-ZmI2RdVxcFhuNkWZVs7laExI,28632
60
- cache_dit-1.0.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
- cache_dit-1.0.5.dist-info/entry_points.txt,sha256=FX2gysXaZx6NeK1iCLMcIdP8Q4_qikkIHtEmi3oWn8o,65
62
- cache_dit-1.0.5.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
63
- cache_dit-1.0.5.dist-info/RECORD,,
63
+ cache_dit-1.0.7.dist-info/licenses/LICENSE,sha256=Dqb07Ik2dV41s9nIdMUbiRWEfDqo7-dQeRiY7kPO8PE,3769
64
+ cache_dit-1.0.7.dist-info/METADATA,sha256=I0Vb-ZqUHblKOWwXyCyZVfcllq1lLm7ML2X7U6TJs4s,29475
65
+ cache_dit-1.0.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
66
+ cache_dit-1.0.7.dist-info/entry_points.txt,sha256=FX2gysXaZx6NeK1iCLMcIdP8Q4_qikkIHtEmi3oWn8o,65
67
+ cache_dit-1.0.7.dist-info/top_level.txt,sha256=ZJDydonLEhujzz0FOkVbO-BqfzO9d_VqRHmZU-3MOZo,10
68
+ cache_dit-1.0.7.dist-info/RECORD,,