tico 0.1.0.dev250825__py3-none-any.whl → 0.1.0.dev250827__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tico/__init__.py CHANGED
@@ -29,7 +29,7 @@ __all__ = [
29
29
  ]
30
30
 
31
31
  # THIS LINE IS AUTOMATICALLY GENERATED BY setup.py
32
- __version__ = "0.1.0.dev250825"
32
+ __version__ = "0.1.0.dev250827"
33
33
 
34
34
  MINIMUM_SUPPORTED_VERSION = "2.5.0"
35
35
  SECURE_TORCH_VERSION = "2.6.0"
@@ -0,0 +1 @@
1
+ # DO NOT REMOVE THIS FILE
@@ -0,0 +1,106 @@
1
+ # Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # =============================================================================
16
+ # POST-TRAINING QUANTIZATION EXAMPLE — Simple Linear Model
17
+ # -----------------------------------------------------------------------------
18
+ # This demo shows a minimal PTQ flow for a toy model:
19
+ # 1. Define a simple model with a single Linear layer.
20
+ # 2. Replace the FP32 Linear with a QuantLinear wrapper.
21
+ # 3. Run a short calibration pass to collect activation statistics.
22
+ # 4. Freeze scales / zero-points and switch to INT-simulation mode.
23
+ # 5. Compare INT vs FP32 outputs with a mean-absolute-diff check.
24
+ # 6. Export the quantized model to a Circle format.
25
+ # =============================================================================
26
+
27
+ import pathlib
28
+
29
+ import torch
30
+ import torch.nn as nn
31
+
32
+ from tico.experimental.quantization.evaluation.metric import compute_peir
33
+ from tico.experimental.quantization.evaluation.utils import plot_two_outputs
34
+
35
+ from tico.experimental.quantization.ptq.mode import Mode
36
+ from tico.experimental.quantization.ptq.wrappers.nn.quant_linear import QuantLinear
37
+ from tico.utils.utils import SuppressWarning
38
+
39
+ # -------------------------------------------------------------------------
40
+ # 0. Define a toy model (1 Linear layer only)
41
+ # -------------------------------------------------------------------------
42
+ class TinyLinearModel(nn.Module):
43
+ """A minimal model: single Linear layer."""
44
+
45
+ def __init__(self, in_features=16, out_features=8):
46
+ super().__init__()
47
+ self.fc = nn.Linear(in_features, out_features, bias=False)
48
+
49
+ def forward(self, x):
50
+ return self.fc(x)
51
+
52
+
53
+ # Instantiate FP32 model
54
+ model = TinyLinearModel()
55
+ model.eval()
56
+
57
+ # Keep FP32 reference for diff check
58
+ fp32_layer = model.fc
59
+
60
+ # -------------------------------------------------------------------------
61
+ # 1. Replace the Linear with QuantLinear wrapper
62
+ # -------------------------------------------------------------------------
63
+ model.fc = QuantLinear(fp32_layer) # type: ignore[assignment]
64
+ # model.fc = PTQWrapper(fp32_layer) (Wrapping helper class)
65
+ qlayer = model.fc # alias for brevity
66
+
67
+ # -------------------------------------------------------------------------
68
+ # 2. Single-pass calibration (collect activation ranges)
69
+ # -------------------------------------------------------------------------
70
+ assert isinstance(qlayer, QuantLinear)
71
+ with torch.no_grad():
72
+ qlayer.enable_calibration()
73
+ for _ in range(16): # small toy batch
74
+ x = torch.randn(4, 16) # (batch=4, features=16)
75
+ _ = model(x)
76
+ qlayer.freeze_qparams() # lock scales & zero-points
77
+
78
+ assert qlayer._mode is Mode.QUANT, "Quantization mode should be active now."
79
+
80
+ # -------------------------------------------------------------------------
81
+ # 3. Quick INT-sim vs FP32 sanity check
82
+ # -------------------------------------------------------------------------
83
+ x = torch.randn(2, 16)
84
+ with torch.no_grad():
85
+ int8_out = model(x)
86
+ fp32_out = fp32_layer(x)
87
+
88
+ print("┌───────────── Quantization Error Summary ─────────────")
89
+ print(f"│ Mean |diff|: {(int8_out - fp32_out).abs().mean().item():.6f}")
90
+ print(f"│ PEIR : {compute_peir(fp32_out, int8_out) * 100:.6f} %")
91
+ print("└──────────────────────────────────────────────────────")
92
+ print(plot_two_outputs(fp32_out, int8_out))
93
+
94
+ # -------------------------------------------------------------------------
95
+ # 4. Export the calibrated model to Circle
96
+ # -------------------------------------------------------------------------
97
+ import tico
98
+
99
+ save_path = pathlib.Path("tiny_linear.q.circle")
100
+ example_input = torch.randn(1, 16)
101
+
102
+ with SuppressWarning(UserWarning, ".*"):
103
+ cm = tico.convert(model, (example_input,)) # forward(x) only
104
+ cm.save(save_path)
105
+
106
+ print(f"Quantized Circle model saved to {save_path.resolve()}")
@@ -0,0 +1,87 @@
1
+ # Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import pathlib
16
+
17
+ import torch
18
+ from transformers import AutoModelForCausalLM, AutoTokenizer
19
+
20
+ import tico
21
+ from tico.experimental.quantization.evaluation.metric import compute_peir
22
+ from tico.experimental.quantization.evaluation.utils import plot_two_outputs
23
+ from tico.experimental.quantization.ptq.dtypes import INT16
24
+ from tico.experimental.quantization.ptq.mode import Mode
25
+ from tico.experimental.quantization.ptq.qscheme import QScheme
26
+ from tico.experimental.quantization.ptq.quant_config import QuantConfig
27
+ from tico.experimental.quantization.ptq.wrappers.llama.quant_mlp import QuantLlamaMLP
28
+ from tico.utils.utils import SuppressWarning
29
+
30
+ name = "Maykeye/TinyLLama-v0"
31
+ model = AutoModelForCausalLM.from_pretrained(name)
32
+ tokenizer = AutoTokenizer.from_pretrained(name)
33
+ model.eval()
34
+
35
+ # -------------------------------------------------------------------------
36
+ # 1. Replace layer-0’s MLP with QuantLlamaMLP
37
+ # -------------------------------------------------------------------------
38
+ fp32_mlp = model.model.layers[0].mlp
39
+ model.model.layers[0].mlp = QuantLlamaMLP(
40
+ fp32_mlp,
41
+ qcfg=QuantConfig(default_dtype=INT16, default_qscheme=QScheme.PER_TENSOR_SYMM),
42
+ ) # PTQWrapper(fp32_mlp) is also fine
43
+ model.eval()
44
+
45
+ mlp_q = model.model.layers[0].mlp
46
+
47
+ # -------------------------------------------------------------------------
48
+ # 2. Single-pass calibration
49
+ # -------------------------------------------------------------------------
50
+ with torch.no_grad():
51
+ mlp_q.enable_calibration()
52
+ for _ in range(16):
53
+ prompts = ["hello tinyllama "] * 8
54
+ enc = tokenizer(prompts, return_tensors="pt")
55
+ emb = model.model.embed_tokens(enc["input_ids"])
56
+ _ = mlp_q(emb)
57
+
58
+ mlp_q.freeze_qparams()
59
+
60
+ assert mlp_q._mode is Mode.QUANT, "Quantization mode should be active now."
61
+
62
+ # -------------------------------------------------------------------------
63
+ # 3. Quick diff check (INT-sim vs FP32)
64
+ # -------------------------------------------------------------------------
65
+ with torch.no_grad():
66
+ ids = tokenizer("quant all tensors!", return_tensors="pt")
67
+ emb = model.model.embed_tokens(ids["input_ids"])
68
+ int16 = mlp_q(emb) # INT-sim
69
+ fp32 = fp32_mlp(emb) # baseline reference
70
+
71
+ print("┌───────────── Quantization Error Summary ─────────────")
72
+ print(f"│ Mean |diff|: {(int16 - fp32).abs().mean().item():.6f}")
73
+ print(f"│ PEIR : {compute_peir(fp32, int16) * 100:.6f} %")
74
+ print("└──────────────────────────────────────────────────────")
75
+ print(plot_two_outputs(fp32, int16))
76
+
77
+ # -------------------------------------------------------------------------
78
+ # 4. Export the quantized block
79
+ # -------------------------------------------------------------------------
80
+ save_path = pathlib.Path("mlp.q.circle")
81
+ example_in = (torch.randn(1, 1, model.config.hidden_size),)
82
+
83
+ with SuppressWarning(UserWarning, ".*"):
84
+ cm = tico.convert(mlp_q, example_in)
85
+ cm.save(save_path)
86
+
87
+ print(f"Quantized Circle model saved to {save_path.resolve()}")
@@ -0,0 +1,3 @@
1
+ from tico.experimental.quantization.ptq.wrappers.llama.quant_mlp import QuantLlamaMLP
2
+
3
+ __all__ = ["QuantLlamaMLP"]
@@ -0,0 +1,98 @@
1
+ # Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Optional
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+
20
+ from tico.experimental.quantization.ptq.quant_config import QuantConfig
21
+ from tico.experimental.quantization.ptq.wrappers.ptq_wrapper import PTQWrapper
22
+ from tico.experimental.quantization.ptq.wrappers.quant_module_base import (
23
+ QuantModuleBase,
24
+ )
25
+ from tico.experimental.quantization.ptq.wrappers.registry import try_register
26
+
27
+
28
+ @try_register("transformers.models.llama.modeling_llama.LlamaMLP")
29
+ class QuantLlamaMLP(QuantModuleBase):
30
+ def __init__(
31
+ self,
32
+ mlp_fp: nn.Module,
33
+ *,
34
+ qcfg: Optional[QuantConfig] = None,
35
+ fp_name: Optional[str] = None,
36
+ ):
37
+ super().__init__(qcfg, fp_name=fp_name)
38
+
39
+ # ----- child configs (hierarchical override) -------------------
40
+ gate_cfg = qcfg.child("gate_proj") if qcfg else None
41
+ up_cfg = qcfg.child("up_proj") if qcfg else None
42
+ down_cfg = qcfg.child("down_proj") if qcfg else None
43
+ act_cfg = qcfg.child("act_fn") if qcfg else None
44
+
45
+ # ----- wrap three Linear layers -------------------------------
46
+ assert hasattr(mlp_fp, "gate_proj") and isinstance(
47
+ mlp_fp.gate_proj, torch.nn.Module
48
+ )
49
+ assert hasattr(mlp_fp, "up_proj") and isinstance(
50
+ mlp_fp.up_proj, torch.nn.Module
51
+ )
52
+ assert hasattr(mlp_fp, "down_proj") and isinstance(
53
+ mlp_fp.down_proj, torch.nn.Module
54
+ )
55
+ self.gate_proj = PTQWrapper(
56
+ mlp_fp.gate_proj, qcfg=gate_cfg, fp_name=f"{fp_name}.gate_proj"
57
+ )
58
+ self.up_proj = PTQWrapper(
59
+ mlp_fp.up_proj, qcfg=up_cfg, fp_name=f"{fp_name}.up_proj"
60
+ )
61
+ self.down_proj = PTQWrapper(
62
+ mlp_fp.down_proj, qcfg=down_cfg, fp_name=f"{fp_name}.down_proj"
63
+ )
64
+
65
+ # ----- activation ---------------------------------------------
66
+ assert hasattr(mlp_fp, "act_fn") and isinstance(mlp_fp.act_fn, torch.nn.Module)
67
+ self.act_fn = PTQWrapper(
68
+ mlp_fp.act_fn, qcfg=act_cfg, fp_name=f"{fp_name}.act_fn"
69
+ )
70
+
71
+ # ----- local observers ----------------------------------------
72
+ self.act_in_obs = self._make_obs("act_in")
73
+ self.mul_obs = self._make_obs("mul")
74
+
75
+ def forward(self, x: torch.Tensor):
76
+ # 1) quantize input once
77
+ x_q = self._fq(x, self.act_in_obs)
78
+
79
+ # 2) parallel projections
80
+ g = self.gate_proj(x_q)
81
+ u = self.up_proj(x_q)
82
+
83
+ # 3) activation on gate
84
+ a = self.act_fn(g)
85
+
86
+ # 4) element-wise product
87
+ h = self._fq(a * u, self.mul_obs)
88
+
89
+ # 5) final projection
90
+ return self.down_proj(h)
91
+
92
+ def _all_observers(self):
93
+ # local first
94
+ yield self.act_in_obs
95
+ yield self.mul_obs
96
+ # recurse into children that are QuantModuleBase
97
+ for m in (self.gate_proj, self.up_proj, self.down_proj, self.act_fn):
98
+ yield from m._all_observers()
@@ -48,7 +48,24 @@ class PTQWrapper(QuantModuleBase):
48
48
  return self.wrapped(*args, **kwargs)
49
49
 
50
50
  def _all_observers(self):
51
- yield from self.wrapped._all_observers()
51
+ """
52
+ PTQWrapper itself owns NO observers (transparent node).
53
+ Returning an empty iterator prevents double-processing when parents
54
+ traverse the tree and then recurse into `self.wrapped`.
55
+ """
56
+ return () # no local observers
57
+
58
+ def named_observers(self):
59
+ """
60
+ Proxy to the wrapped module so debugging tools can still enumerate observers.
61
+ """
62
+ yield from self.wrapped.named_observers()
63
+
64
+ def get_observer(self, name: str):
65
+ """
66
+ Proxy to the wrapped module for direct lookup by name.
67
+ """
68
+ return self.wrapped.get_observer(name)
52
69
 
53
70
  def extra_repr(self) -> str:
54
71
  return self.wrapped.extra_repr()
@@ -28,6 +28,7 @@ _CORE_MODULES = (
28
28
  "tico.experimental.quantization.ptq.wrappers.nn.quant_layernorm",
29
29
  "tico.experimental.quantization.ptq.wrappers.nn.quant_linear",
30
30
  "tico.experimental.quantization.ptq.wrappers.nn.quant_silu",
31
+ "tico.experimental.quantization.ptq.wrappers.llama.quant_mlp",
31
32
  # add future core wrappers here
32
33
  )
33
34
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tico
3
- Version: 0.1.0.dev250825
3
+ Version: 0.1.0.dev250827
4
4
  Summary: Convert exported Torch module to circle
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -1,4 +1,4 @@
1
- tico/__init__.py,sha256=qQ6AhQGEdqTIol9a3pZd8krL41W7mwvr0HoA1tPCeqU,1883
1
+ tico/__init__.py,sha256=zW-Qvhbt2u7nt1XfXlgpi6kJrCNuyIJ3PddICgrWpZI,1883
2
2
  tico/pt2_to_circle.py,sha256=gu3MD4Iqc0zMZcCZ2IT8oGbyj21CTSbT3Rgd9s2B_9A,2767
3
3
  tico/config/__init__.py,sha256=xZzCXjZ84qE-CsBi-dfaL05bqpQ3stKKfTXhnrJRyVs,142
4
4
  tico/config/base.py,sha256=q5xMqGxTUZs4mFqt5c7i_y9U00fYgdMGl9nUqIVMlCo,1248
@@ -61,6 +61,9 @@ tico/experimental/quantization/ptq/dtypes.py,sha256=xfCBtq6mQmUYRwsoFgII6gvRl1ra
61
61
  tico/experimental/quantization/ptq/mode.py,sha256=lT-T8vIv8YWcwrjT7xXVhOw1g7aoAdh_3PWB-ptPKaI,1052
62
62
  tico/experimental/quantization/ptq/qscheme.py,sha256=uwhv7bCxOOXB3I-IKlRyr_u4eXOq48uIqGy4TLDqGxY,1301
63
63
  tico/experimental/quantization/ptq/quant_config.py,sha256=nm7570Y1X2mOT_8s27ilWid04otor6cVTi9GwgAEaKc,4300
64
+ tico/experimental/quantization/ptq/examples/__init__.py,sha256=IO6FP_xYbGy0dW0HL26GXD3ouxARaxCK7bz9dn4blPQ,26
65
+ tico/experimental/quantization/ptq/examples/quantize_linear.py,sha256=8zq-ZJDYgam0xQ-PbC6Xb1I7W1mv0Wi-b--IP2wwXtw,4539
66
+ tico/experimental/quantization/ptq/examples/quantize_llama_mlp.py,sha256=MktXLIxz2fUAA7sDG_wN6B8eUi71jOgAaMfhPj9u9XM,3736
64
67
  tico/experimental/quantization/ptq/observers/__init__.py,sha256=WF2MvL9M_jl-B1FqcY9zic34NOCRp17HkRYv-TMxMr4,613
65
68
  tico/experimental/quantization/ptq/observers/affine_base.py,sha256=e2Eba64nrxKQyE4F_WJ7WTSsk3xe6bkdGUKaoLFWGFw,4638
66
69
  tico/experimental/quantization/ptq/observers/base.py,sha256=Wons1MzpqK1mfcy-ppl-B2Dum0edXg2dWW2Lw3V18tw,3280
@@ -71,10 +74,12 @@ tico/experimental/quantization/ptq/observers/mx.py,sha256=aP4qmBgeiRIYZJksShN5gs
71
74
  tico/experimental/quantization/ptq/utils/__init__.py,sha256=PL9IZgiWoMtsXVljeOy7KymmLVP238SXEFRLXYK72WQ,126
72
75
  tico/experimental/quantization/ptq/utils/reduce_utils.py,sha256=3kWawLB91EcvvHlCrNqqfZF7tpgr22htBSA049mKw_4,973
73
76
  tico/experimental/quantization/ptq/wrappers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
74
- tico/experimental/quantization/ptq/wrappers/ptq_wrapper.py,sha256=KRw_VvFJYvd2OBj4K1sYEXxUwZk9QghMw3NsgjKIAGk,1857
77
+ tico/experimental/quantization/ptq/wrappers/ptq_wrapper.py,sha256=F9sK_DiRaXiGNHULcwIbs5EUtHz6ZJ7N4r5CWTTfhsM,2442
75
78
  tico/experimental/quantization/ptq/wrappers/quant_elementwise.py,sha256=LhEoobfvto6zKrBOKL4gmxfFFc31jHzyQV_zfps-iQM,3604
76
79
  tico/experimental/quantization/ptq/wrappers/quant_module_base.py,sha256=vkcDos_knGSS29rIZuEIWkAJLHrENbGz8nCH2-iara8,5969
77
- tico/experimental/quantization/ptq/wrappers/registry.py,sha256=562nKSlp9qF-w4-aQeJbx2V_wMGE2FRrjIKUfRwC4Mg,4571
80
+ tico/experimental/quantization/ptq/wrappers/registry.py,sha256=utZY381tuKYyUITptUfx4_0V-jprSjvEXpGtfa4cPnI,4638
81
+ tico/experimental/quantization/ptq/wrappers/llama/__init__.py,sha256=vVdVj7HMjxRPAYEO4DEg1l_7qztMmGCGCe7GbniCzrM,115
82
+ tico/experimental/quantization/ptq/wrappers/llama/quant_mlp.py,sha256=uZMnrX66oZwxhKhcNbLXXeri-WxxRBiZnr15aBXJMm0,3562
78
83
  tico/experimental/quantization/ptq/wrappers/nn/__init__.py,sha256=I9uTt5HfcRoMEDYHpAeATMv2TbCQiX0ZbfUFMzSJ4Qw,336
79
84
  tico/experimental/quantization/ptq/wrappers/nn/quant_layernorm.py,sha256=G5Sgt-tXnzh0Rxyk-2honmZIfEQOZlRfOsoDBdSGmA4,6887
80
85
  tico/experimental/quantization/ptq/wrappers/nn/quant_linear.py,sha256=xW-VEPB7RJoslS3xLVCdhIuMjppknvpkZleRGK4JFVQ,2240
@@ -233,9 +238,9 @@ tico/utils/mx/__init__.py,sha256=IO6FP_xYbGy0dW0HL26GXD3ouxARaxCK7bz9dn4blPQ,26
233
238
  tico/utils/mx/elemwise_ops.py,sha256=V6glyAHsVR1joqpsgnNytatCD_ew92xNWZ19UFDoMTA,10281
234
239
  tico/utils/mx/formats.py,sha256=uzNWyu-1onUlwQfX5cZ6fZSUfHMRqorper7_T1k3jfk,3404
235
240
  tico/utils/mx/mx_ops.py,sha256=RcfUTYVi-wilGB2sC35OeARdwDqnixv7dG5iyZ-fQT8,8555
236
- tico-0.1.0.dev250825.dist-info/LICENSE,sha256=kp4JLII7bzRhPb0CPD5XTDZMh22BQ7h3k3B7t8TiSbw,12644
237
- tico-0.1.0.dev250825.dist-info/METADATA,sha256=7wBkNIwJG_prscPdY7Rn_Muit4OuPN29Q8C_isHlEdI,8450
238
- tico-0.1.0.dev250825.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
239
- tico-0.1.0.dev250825.dist-info/entry_points.txt,sha256=kBKYSS_IYrSXmUYevmmepqIVPScq5vF8ulQRu3I_Zf0,59
240
- tico-0.1.0.dev250825.dist-info/top_level.txt,sha256=oqs7UPoNSKZEwqsX8B-KAWdQwfAa7i60pbxW_Jk7P3w,5
241
- tico-0.1.0.dev250825.dist-info/RECORD,,
241
+ tico-0.1.0.dev250827.dist-info/LICENSE,sha256=kp4JLII7bzRhPb0CPD5XTDZMh22BQ7h3k3B7t8TiSbw,12644
242
+ tico-0.1.0.dev250827.dist-info/METADATA,sha256=dkNactHh3C0pzY9_mblMYxzAdqWIRbFTre9sBiKfDAo,8450
243
+ tico-0.1.0.dev250827.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
244
+ tico-0.1.0.dev250827.dist-info/entry_points.txt,sha256=kBKYSS_IYrSXmUYevmmepqIVPScq5vF8ulQRu3I_Zf0,59
245
+ tico-0.1.0.dev250827.dist-info/top_level.txt,sha256=oqs7UPoNSKZEwqsX8B-KAWdQwfAa7i60pbxW_Jk7P3w,5
246
+ tico-0.1.0.dev250827.dist-info/RECORD,,