tico 0.1.0.dev250729__py3-none-any.whl → 0.1.0.dev250731__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tico/__init__.py CHANGED
@@ -29,7 +29,7 @@ __all__ = [
29
29
  ]
30
30
 
31
31
  # THIS LINE IS AUTOMATICALLY GENERATED BY setup.py
32
- __version__ = "0.1.0.dev250729"
32
+ __version__ = "0.1.0.dev250731"
33
33
 
34
34
  MINIMUM_SUPPORTED_VERSION = "2.5.0"
35
35
  SECURE_TORCH_VERSION = "2.6.0"
@@ -14,7 +14,7 @@
14
14
  # limitations under the License.
15
15
 
16
16
  import types
17
- from typing import Any, Dict, List, Optional
17
+ from typing import Any, Callable, Dict, List, Optional
18
18
 
19
19
  import torch
20
20
 
@@ -28,16 +28,34 @@ from tico.experimental.quantization.config import BaseConfig, GPTQConfig
28
28
  from tico.experimental.quantization.quantizer import BaseQuantizer
29
29
 
30
30
 
31
+ class StopForward(Exception):
32
+ """Custom exception used to stop the forward pass after the first layer."""
33
+
34
+ pass
35
+
36
+
31
37
  class GPTQQuantizer(BaseQuantizer):
32
38
  """
33
- Quantizer for applying the GPTQ algorithm (typically for weight quantization)
39
+ Quantizer for applying the GPTQ algorithm (typically for weight quantization).
40
+ This implementation expects:
41
+ 1) prepare(model, ...) to only attach hooks/Catchers and NOT run the model internally.
42
+ 2) The user runs the model with arbitrary number of batches to collect calibration data.
43
+ 3) convert(model) to consume the collected data and apply GPTQ.
34
44
  """
35
45
 
36
46
  def __init__(self, config: BaseConfig):
37
47
  super().__init__(config)
38
48
 
39
- self.cache_args: List[Any] = []
40
- self.cache_kwargs: Dict[str, Any] = {"batch_num": 0}
49
+ # cache_args[i] -> list of the i-th positional argument for each batch
50
+ self.cache_args: List[List[Any]] = []
51
+ # cache_kwargs[k] -> list of the value for keyword k for each batch
52
+ self.cache_kwargs: Dict[str, List[Any]] = {}
53
+ self.num_batches: int = 0
54
+
55
+ # References to original forwards for restoration
56
+ self._orig_model_forward: Optional[Callable[..., Any]] = None
57
+ self._orig_layer_forward: Optional[Callable[..., Any]] = None
58
+ self._first_layer_ref: Optional[torch.nn.Module] = None
41
59
 
42
60
  @torch.no_grad()
43
61
  def prepare(
@@ -50,104 +68,125 @@ class GPTQQuantizer(BaseQuantizer):
50
68
  Overrides the forward method of the first LLaMA layer (layer 0) to capture the
51
69
  input required for calibration.
52
70
 
53
- This method modifies the original forward pass of LLaMA layer 0 so that the
54
- inputs used during inference are intercepted and recorded. These captured inputs
55
- are then utilized to calibrate the quantization parameters for the GPTQ.
71
+ When the user calls `model(...)`, we intercept (and store) the inputs to that
72
+ layer, then raise an exception to stop the forward pass immediately. These
73
+ captured inputs are then utilized to calibrate the quantization parameters
74
+ for the GPTQ.
56
75
 
57
76
  Parameters:
58
- model: The target PyTorch model.
59
- args: Positional example inputs required for capturing graph.
60
- kwargs: Keyword example inputs required for capturing graph.
77
+ model (torch.nn.Module): The target PyTorch model
78
+ args (Any, optional): Unused (kept for API compatibility)
79
+ kwargs (Dict[str, Any], optional): Unused (kept for API compatibility)
61
80
 
62
81
  Returns:
63
- The model prepared for GPTQ quantization.
82
+ torch.nn.Module: The model with the catcher attached
64
83
  """
65
- if args is None and kwargs is None:
66
- raise RuntimeError(
67
- "Either args or kwargs must be provided for captruing graph."
68
- )
69
- # Define a function to capture input activations and associated parameters.
84
+ # Define the catcher to store inputs/kwargs and stop the execution
70
85
  def forward(layer, *args, **kwargs):
71
- self.cache_kwargs["batch_num"] += 1
86
+ """
87
+ Stores this batch's inputs and kwargs, then raises StopForward to stop computation.
88
+ """
89
+ # Store positional args
72
90
  for idx, item in enumerate(args):
73
91
  if (idx + 1) > len(self.cache_args):
74
92
  self.cache_args.append([])
75
93
  self.cache_args[idx].append(item)
76
- for arg in kwargs:
77
- if self.cache_kwargs.get(arg, None) is None:
78
- self.cache_kwargs[arg] = []
79
- self.cache_kwargs[arg].append(kwargs[arg])
80
- # Raise an error to interrupt the forward pass after capturing data.
81
- raise ValueError
94
+ # Store keyword args
95
+ for k, v in kwargs.items():
96
+ if self.cache_kwargs.get(k, None) is None:
97
+ self.cache_kwargs[k] = []
98
+ self.cache_kwargs[k].append(v)
99
+
100
+ self.num_batches += 1
101
+ raise StopForward # stop after the first layer
82
102
 
83
103
  # Replace the first layer with defined function to capture calibration data.
84
104
  if hasattr(model, "model"):
85
- assert hasattr(model.model, "layers")
86
- assert isinstance(model.model.layers, torch.nn.ModuleList)
87
- layer_forward_cache = model.model.layers[0].forward
88
- model.model.layers[0].forward = types.MethodType(
89
- forward, model.model.layers[0]
90
- )
105
+ if hasattr(model.model, "layers") and isinstance(
106
+ model.model.layers, torch.nn.ModuleList
107
+ ):
108
+ self._first_layer_ref = model.model.layers[0]
109
+ else:
110
+ raise RuntimeError(
111
+ "GPTQ Quantizer assumes the model has a nested structure like `model.model.layers`, commonly found in LLaMA and other Hugging Face transformer models."
112
+ )
91
113
  else:
92
- assert hasattr(model, "forward")
93
- layer_forward_cache = model.forward
94
- model.forward = types.MethodType(forward, model.forward)
95
-
96
- model_forward_cache = model.forward
97
- # Replace model's forward to avoid ValueError
98
- def model_forward(model, *args, **kwargs):
99
- nonlocal model_forward_cache
100
- try:
101
- model_forward_cache(*args, **kwargs)
102
- except ValueError:
103
- pass
114
+ # fallback if the model is not LLaMA-like; treat whole model as single layer
115
+ self._first_layer_ref = model
104
116
 
105
- model.forward = types.MethodType(model_forward, model)
106
- kwargs = kwargs or {}
107
- model(*args, **kwargs) # type: ignore[misc]
117
+ assert hasattr(self._first_layer_ref, "forward")
118
+ # Backup the original forward of the first layer
119
+ assert isinstance(self._first_layer_ref, torch.nn.Module)
120
+ self._orig_layer_forward = self._first_layer_ref.forward
121
+ self._first_layer_ref.forward = types.MethodType(forward, self._first_layer_ref)
108
122
 
109
- # Recover original forward
110
- model.forward = model_forward_cache
111
- if hasattr(model, "model"):
112
- assert hasattr(model.model, "layers")
113
- assert isinstance(model.model.layers, torch.nn.ModuleList)
114
- model.model.layers[0].forward = layer_forward_cache
115
- else:
116
- model.forward = layer_forward_cache
123
+ def model_forward_wrapper(_model, *m_args, **m_kwargs):
124
+ """
125
+ Wrapper to ignore StopForward exceptions so the user's training loop doesn't crash.
126
+ """
127
+ try:
128
+ assert self._orig_model_forward is not None
129
+ return self._orig_model_forward(*m_args, **m_kwargs)
130
+ except StopForward:
131
+ # We stopped after the first layer; return None or dummy output if needed.
132
+ return None
133
+
134
+ # Backup model.forward so we can suppress StopForward
135
+ self._orig_model_forward = model.forward
136
+ model.forward = types.MethodType(model_forward_wrapper, model)
117
137
 
118
138
  return model
119
139
 
120
140
  @torch.no_grad()
121
141
  def convert(self, model):
122
142
  """
123
- Convert the prepared model to its GPTQ quantized version.
143
+ Perform GPTQ quantization using cached first-layer inputs.
124
144
 
125
- Applies the GPTQ quantization on weights based on the collected statistics.
145
+ Steps:
146
+ 1) Restore original forwards (no more catching).
147
+ 2) Iterate through each Transformer layer sequentially:
148
+ a) For each layer, register forward hooks to collect (inp, out) stats for GPTQ.
149
+ b) Run the layer on cached inputs for all batches.
150
+ c) Apply GPTQ and update the weights.
151
+ d) Re-run the layer to produce outputs for the next layer; update cached inputs.
152
+ 3) Restore model.config.use_cache if needed and clear internal caches.
126
153
 
127
154
  Parameters:
128
- model: The prepared PyTorch model.
155
+ model (torch.nn.Module): The prepared model.
129
156
 
130
157
  Returns:
131
- The quantized model.
158
+ torch.nn.Module: Quantized model.
132
159
  """
160
+ # Restore original forwards (we no longer want to stop after first layer)
161
+ assert self._orig_model_forward is not None
162
+ model.forward = self._orig_model_forward
163
+ assert (
164
+ self._first_layer_ref is not None and self._orig_layer_forward is not None
165
+ )
166
+ self._first_layer_ref.forward = self._orig_layer_forward
167
+
133
168
  gptq_conf = self.config
134
169
  assert isinstance(gptq_conf, GPTQConfig)
135
-
136
- # Save the original cache setting and disable caching during calibration/inference.
137
- if hasattr(model, "config"):
138
- use_cache = model.config.use_cache
170
+ # Disable use_cache during calibration
171
+ if hasattr(model, "config") and hasattr(model.config, "use_cache"):
172
+ orig_use_cache = model.config.use_cache
139
173
  model.config.use_cache = False
174
+ else:
175
+ orig_use_cache = None
140
176
 
141
- quantizers = {}
177
+ # Identify layers
142
178
  if hasattr(model, "model"):
143
179
  target_layers = model.model.layers
144
180
  else:
145
181
  target_layers = [model]
182
+
183
+ quantizers: Dict[str, Any] = {}
146
184
  for l_idx, layer in enumerate(target_layers):
147
- # Identify quantizable submodules within the layer.
185
+ # 1) Identify quantizable submodules within the layer
148
186
  full = find_layers(layer)
149
-
150
187
  sequential = [list(full.keys())]
188
+
189
+ # 2) Set up GPTQ objects and gather stats
151
190
  for names in sequential:
152
191
  subset = {n: full[n] for n in names}
153
192
 
@@ -155,20 +194,22 @@ class GPTQQuantizer(BaseQuantizer):
155
194
  for name in subset:
156
195
  gptq[name] = GPTQ(subset[name])
157
196
  gptq[name].quantizer.configure(
158
- 8, perchannel=True, sym=False, mse=False
197
+ bits=8, perchannel=True, sym=False, mse=False
159
198
  )
160
- # Define a hook to collect input/output batches for quantizer calibration.
199
+
200
+ # Hook to collect (inp, out) for GPTQ
161
201
  def add_batch(name):
162
- def tmp(_, inp, out):
202
+ def _hook(_, inp, out):
163
203
  gptq[name].add_batch(inp[0].data, out.data)
164
204
 
165
- return tmp
205
+ return _hook
166
206
 
167
207
  handles = []
168
208
  for name in subset:
169
209
  handles.append(subset[name].register_forward_hook(add_batch(name)))
170
- # Run the current layer on the stored calibration inputs to capture activation stats.
171
- batch_num = self.cache_kwargs.pop("batch_num")
210
+
211
+ # Run layer forward over all cached batches to build Hessian/statistics
212
+ batch_num = self.num_batches
172
213
  for batch_idx in range(batch_num):
173
214
  cache_args_batch = gather_single_batch_from_list(
174
215
  self.cache_args, batch_idx
@@ -176,15 +217,16 @@ class GPTQQuantizer(BaseQuantizer):
176
217
  cache_kwargs_batch = gather_single_batch_from_dict(
177
218
  self.cache_kwargs, batch_idx
178
219
  )
179
- layer(*cache_args_batch, **cache_kwargs_batch)[0]
180
- self.cache_kwargs["batch_num"] = batch_num
220
+ layer(*cache_args_batch, **cache_kwargs_batch)
221
+
222
+ # Remove handles
181
223
  for h in handles:
182
224
  h.remove()
183
- # Quantize each submodule using the collected calibration data.
225
+
226
+ # 3) Quantize each submodule
184
227
  for name in subset:
185
228
  if gptq_conf.verbose:
186
- print(l_idx, name)
187
- print("Quantizing ...")
229
+ print(f"[Layer {l_idx}] {name} -> Quantizing ...")
188
230
  gptq[name].fasterquant(
189
231
  percdamp=0.01,
190
232
  groupsize=-1,
@@ -192,18 +234,10 @@ class GPTQQuantizer(BaseQuantizer):
192
234
  static_groups=False,
193
235
  verbose=gptq_conf.verbose,
194
236
  )
195
- quantizers["model.layers.%d.%s" % (l_idx, name)] = gptq[
196
- name
197
- ].quantizer
237
+ quantizers[f"model.layers.{l_idx}.{name}"] = gptq[name].quantizer
198
238
  gptq[name].free()
199
- """
200
- Execute the quantized layer with the calibration inputs to obtain ouptuts
201
- that will serve as inputs for the next layer.
202
239
 
203
- This ensures that the quantization effects are correctly propagated to subsequent
204
- layers.
205
- """
206
- batch_num = self.cache_kwargs.pop("batch_num")
240
+ # 4) After quantization, re-run the layer to produce outputs for the next layer
207
241
  for batch_idx in range(batch_num):
208
242
  cache_args_batch = gather_single_batch_from_list(
209
243
  self.cache_args, batch_idx
@@ -211,15 +245,26 @@ class GPTQQuantizer(BaseQuantizer):
211
245
  cache_kwargs_batch = gather_single_batch_from_dict(
212
246
  self.cache_kwargs, batch_idx
213
247
  )
214
- outs = layer(*cache_args_batch, **cache_kwargs_batch)[0]
248
+ outs = layer(*cache_args_batch, **cache_kwargs_batch)
249
+ # LLaMA's decoder layer return type differs across Transformers versions:
250
+ # some return a tuple (hidden_states, ...), others return just a tensor.
251
+ # This line ensures we always take the first element when it's a tuple.
252
+ outs = outs[0] if isinstance(outs, tuple) else outs
215
253
  # Update inputs for next iteration.
216
254
  self.cache_args[0][batch_idx] = outs
217
- self.cache_kwargs["batch_num"] = batch_num
218
255
 
219
256
  if torch.cuda.is_available():
220
257
  torch.cuda.empty_cache()
258
+
221
259
  # Restore the original cache configuration.
222
- if hasattr(model, "config"):
223
- model.config.use_cache = use_cache
260
+ if orig_use_cache is not None:
261
+ model.config.use_cache = orig_use_cache
262
+
263
+ # Clear caches to free memory
264
+ self.cache_args.clear()
265
+ self.cache_kwargs.clear()
266
+ self.num_batches = 0
267
+
268
+ model.quantizers = quantizers
224
269
 
225
270
  return model
@@ -58,7 +58,7 @@ def gather_single_batch_from_list(data_list, idx):
58
58
  Returns:
59
59
  list: single batch.
60
60
  """
61
- # obtain a set of keyword input from cache
61
+ # obtain a set of positional input from cache
62
62
  single_batch = []
63
63
  for data_item in data_list:
64
64
  single_batch.append(data_item[idx])
@@ -59,6 +59,8 @@ def prepare(
59
59
  Returns:
60
60
  The model prepared for quantization.
61
61
  """
62
+ if hasattr(model, QUANTIZER_ATTRIBUTE_NAME):
63
+ raise RuntimeError("prepare() already has been called.")
62
64
  if quant_config.name == "pt2e" and inplace:
63
65
  raise RuntimeError(
64
66
  "In-place is not supported for PT2E quantization due to limitation in the underlying Torch APIs. Please set 'inplace=False' to proceed."
@@ -99,6 +101,12 @@ def convert(model, inplace: Optional[bool] = False):
99
101
  raise RuntimeError(
100
102
  "In-place is not supported for PT2E quantization due to limitation in the underlying Torch APIs. Please set 'inplace=False' to proceed."
101
103
  )
104
+ # deepcopy prevents the quantizer from restoring the catcher used for calibration.
105
+ # TODO Revisit `inplace` policy.
106
+ if isinstance(quantizer, GPTQQuantizer) and not inplace:
107
+ raise RuntimeError(
108
+ "GPTQ quantization only supports `in-place=True`. Please set 'inplace=True' to proceed."
109
+ )
102
110
 
103
111
  model = model if inplace else copy.deepcopy(model)
104
112
 
@@ -15,7 +15,6 @@
15
15
  import copy
16
16
 
17
17
  import inspect
18
- from contextlib import contextmanager
19
18
  from typing import Callable, List, Optional
20
19
 
21
20
  import torch.nn as nn
@@ -59,10 +58,21 @@ class RecordingInput:
59
58
  self.condition = condition
60
59
  self.input_to_remove = input_to_remove
61
60
  self.sig = inspect.signature(self.forward_org)
61
+
62
+ for param in self.sig.parameters.values():
63
+ if param.kind == inspect.Parameter.KEYWORD_ONLY:
64
+ raise ValueError(f"Keyword-only parameter not supported: {param.name}")
65
+ if param.kind == inspect.Parameter.VAR_POSITIONAL:
66
+ raise ValueError(
67
+ f"Var positional parameter not supported: {param.name}"
68
+ )
69
+
70
+ # NOTE: the name `kwargs` is removed since `kwargs` is a dict, not arg itself.
71
+ # args in kwargs are kept via sig.bind(*args, **kwargs) in capture_and_forward.
62
72
  self.args_names = [
63
73
  name
64
- for name in self.sig.parameters.keys()
65
- if name not in ("self", "kwargs")
74
+ for name, param in self.sig.parameters.items()
75
+ if param.kind != inspect.Parameter.VAR_KEYWORD and name != "self"
66
76
  ]
67
77
  self.captured_input = None
68
78
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tico
3
- Version: 0.1.0.dev250729
3
+ Version: 0.1.0.dev250731
4
4
  Summary: Convert exported Torch module to circle
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -1,4 +1,4 @@
1
- tico/__init__.py,sha256=quSJ4KKyh76eIG3MrN15qViNKdudexAIk-h6x_L-hRc,1883
1
+ tico/__init__.py,sha256=DnYmoCoxFZl3z2PPb_zOksdK5E8E2JAGnARIST2A4n0,1883
2
2
  tico/pt2_to_circle.py,sha256=gu3MD4Iqc0zMZcCZ2IT8oGbyj21CTSbT3Rgd9s2B_9A,2767
3
3
  tico/config/__init__.py,sha256=xZzCXjZ84qE-CsBi-dfaL05bqpQ3stKKfTXhnrJRyVs,142
4
4
  tico/config/base.py,sha256=q5xMqGxTUZs4mFqt5c7i_y9U00fYgdMGl9nUqIVMlCo,1248
@@ -7,14 +7,14 @@ tico/config/v1.py,sha256=O1jzpUBDwoWpLohEpI08pJNwVB-yz3ufPrQm2_XWq4Y,1108
7
7
  tico/experimental/__init__.py,sha256=IO6FP_xYbGy0dW0HL26GXD3ouxARaxCK7bz9dn4blPQ,26
8
8
  tico/experimental/quantization/__init__.py,sha256=IaJPZegVJp0P3luutBo907Kp5sOJensE1Mm-XBG_jBs,122
9
9
  tico/experimental/quantization/config.py,sha256=h01WpP8Y-dLj6yg12pMZm3PXJqUnU2sWip5jBRc5x9Q,1604
10
- tico/experimental/quantization/public_interface.py,sha256=OKW8UoBMjPwiTacrWgQY9ENCh8ucPnYMSrl2R-w0pJ0,3982
10
+ tico/experimental/quantization/public_interface.py,sha256=4-v9VXsokRG2-UUYYHd_MlbHxChqdGI5iuySyYDY_Pw,4420
11
11
  tico/experimental/quantization/quantizer.py,sha256=_2pDtWFKDCuKfYF2bptOwIYsa0VFNFM1ZNgi8_OGvHM,2365
12
12
  tico/experimental/quantization/algorithm/__init__.py,sha256=IO6FP_xYbGy0dW0HL26GXD3ouxARaxCK7bz9dn4blPQ,26
13
13
  tico/experimental/quantization/algorithm/gptq/__init__.py,sha256=IO6FP_xYbGy0dW0HL26GXD3ouxARaxCK7bz9dn4blPQ,26
14
14
  tico/experimental/quantization/algorithm/gptq/gptq.py,sha256=Qn9b_2ki7B64DcVEY25NMkww3PdZ5EqYQQXfYhNDQ6I,5555
15
15
  tico/experimental/quantization/algorithm/gptq/quant.py,sha256=Rl4wAOCmlE0U09BtNCDbccaSNohRHCNLwFi3zCqZfNo,5127
16
- tico/experimental/quantization/algorithm/gptq/quantizer.py,sha256=icaFDXA1UibgRI0nBZ4N0Ij1ajVpShWUFw5pTDffOiE,8914
17
- tico/experimental/quantization/algorithm/gptq/utils.py,sha256=vDIW5ow5c1VSFpub7QumMWorHrV86c0kOtlBxMw2Y2Y,1808
16
+ tico/experimental/quantization/algorithm/gptq/quantizer.py,sha256=KiaNcDkufbYPHdkkOGw9nAwLtk0yYwUDbyzFT3xRLOs,11066
17
+ tico/experimental/quantization/algorithm/gptq/utils.py,sha256=leGKayf-xbSjVwwAGTA5RsxUKrhDiklOQdlsLifjdrs,1811
18
18
  tico/experimental/quantization/algorithm/pt2e/__init__.py,sha256=IO6FP_xYbGy0dW0HL26GXD3ouxARaxCK7bz9dn4blPQ,26
19
19
  tico/experimental/quantization/algorithm/pt2e/quantizer.py,sha256=mdTvsG87bo8fu0GaWqSM8iBCs-4f4EfUlVtk-Ko6M34,2546
20
20
  tico/experimental/quantization/algorithm/pt2e/utils.py,sha256=URjTGgsnDdhUC2Nr0-YJ9GWbVOKmjElfLr83Y8eCz-M,4806
@@ -195,7 +195,7 @@ tico/utils/model.py,sha256=pPOIjD0qjQirLibiRxxfjOR6efimOcDAd9R-74eus-k,1282
195
195
  tico/utils/padding.py,sha256=qKke-dJeeLHiRaePjDS66txrGyiYuipLVQeqLYad8uk,3349
196
196
  tico/utils/passes.py,sha256=kGmDe__5cPaO6i5EDAoXSVe6yXEoX9hAny4ROb3ZEmQ,2409
197
197
  tico/utils/pytree_utils.py,sha256=jrk3N6X6LiUnBCX_gM1K9nywbVAJBVnszlTAgeIeDUc,5219
198
- tico/utils/record_input.py,sha256=FBtV00WWcXMXmg-Ujgvci9HjOmRJC1cVzx_WRNIF4MI,3324
198
+ tico/utils/record_input.py,sha256=QN-8D71G_WAX3QQQ5CIwbEfFJZTQ3CvL4wCMiVddua4,3894
199
199
  tico/utils/register_custom_op.py,sha256=3-Yl6iYmx1qQA2igNHt4hYhQhQMkdPb7gF50LIY8yvc,27350
200
200
  tico/utils/serialize.py,sha256=mEuusEzi82WFsz3AkowgWwxSLeo50JDxyOj6yYDQhEI,1914
201
201
  tico/utils/torch_compat.py,sha256=oc6PztVsXdHcQ3iaVR90wLLxrGaj6zFHWZ8K9rRS6q8,1795
@@ -206,9 +206,9 @@ tico/utils/mx/__init__.py,sha256=IO6FP_xYbGy0dW0HL26GXD3ouxARaxCK7bz9dn4blPQ,26
206
206
  tico/utils/mx/elemwise_ops.py,sha256=V6glyAHsVR1joqpsgnNytatCD_ew92xNWZ19UFDoMTA,10281
207
207
  tico/utils/mx/formats.py,sha256=uzNWyu-1onUlwQfX5cZ6fZSUfHMRqorper7_T1k3jfk,3404
208
208
  tico/utils/mx/mx_ops.py,sha256=RcfUTYVi-wilGB2sC35OeARdwDqnixv7dG5iyZ-fQT8,8555
209
- tico-0.1.0.dev250729.dist-info/LICENSE,sha256=kp4JLII7bzRhPb0CPD5XTDZMh22BQ7h3k3B7t8TiSbw,12644
210
- tico-0.1.0.dev250729.dist-info/METADATA,sha256=ZcnGD8K56o04Pt172XYRpuh-DHPoFWDhUhEpaCsy23k,8430
211
- tico-0.1.0.dev250729.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
212
- tico-0.1.0.dev250729.dist-info/entry_points.txt,sha256=kBKYSS_IYrSXmUYevmmepqIVPScq5vF8ulQRu3I_Zf0,59
213
- tico-0.1.0.dev250729.dist-info/top_level.txt,sha256=oqs7UPoNSKZEwqsX8B-KAWdQwfAa7i60pbxW_Jk7P3w,5
214
- tico-0.1.0.dev250729.dist-info/RECORD,,
209
+ tico-0.1.0.dev250731.dist-info/LICENSE,sha256=kp4JLII7bzRhPb0CPD5XTDZMh22BQ7h3k3B7t8TiSbw,12644
210
+ tico-0.1.0.dev250731.dist-info/METADATA,sha256=oCmTMbqj-MxLLJHDtnZkmdHWT4C_g796A3bgpNEFxUA,8430
211
+ tico-0.1.0.dev250731.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
212
+ tico-0.1.0.dev250731.dist-info/entry_points.txt,sha256=kBKYSS_IYrSXmUYevmmepqIVPScq5vF8ulQRu3I_Zf0,59
213
+ tico-0.1.0.dev250731.dist-info/top_level.txt,sha256=oqs7UPoNSKZEwqsX8B-KAWdQwfAa7i60pbxW_Jk7P3w,5
214
+ tico-0.1.0.dev250731.dist-info/RECORD,,