ai-edge-quantizer-nightly 0.1.0.dev20250327__py3-none-any.whl → 0.1.0.dev20250329__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -170,6 +170,7 @@ register_config_check_policy_func(
170
170
 
171
171
  DEQUANTIZED_WEIGHT_RECOVERY_OP_NAME_MATERIALIZE_FUNC_DICT = {
172
172
  _TFLOpName.FULLY_CONNECTED: common_quantize.materialize_fc_conv,
173
+ _TFLOpName.CONV_2D: common_quantize.materialize_fc_conv,
173
174
  _TFLOpName.EMBEDDING_LOOKUP: common_quantize.materialize_embedding_lookup,
174
175
  }
175
176
 
@@ -70,17 +70,17 @@ def _get_scale(arr: np.ndarray, min_scale: float) -> float:
70
70
  return min_scale
71
71
 
72
72
 
73
- def get_zp_scale_from_2d_dequantized_symmetric_weights(
73
+ def get_zp_scale_from_dequantized_symmetric_weights(
74
74
  dequant_vals: np.ndarray,
75
75
  quantized_dimension: Optional[int] = None,
76
76
  min_scale: float = 1e-9,
77
77
  ) -> tuple[np.ndarray, np.ndarray]:
78
- """Calculates scale and zero point from 2D dequantized, symmetric weights.
78
+ """Calculates scale and zero point from dequantized and symmetric weights.
79
79
 
80
80
  Handles both per-tensor and per-channel (axis) quantization.
81
81
 
82
82
  Args:
83
- dequant_vals: The 2D dequantized weight values (numpy array).
83
+ dequant_vals: The dequantized weight values (numpy array).
84
84
  quantized_dimension: The dimension along which quantization was performed
85
85
  (0 or 1), or None for per-tensor quantization.
86
86
  min_scale: The minimum allowed scale value.
@@ -91,15 +91,9 @@ def get_zp_scale_from_2d_dequantized_symmetric_weights(
91
91
  - scales: Scales (scalar for per-tensor, array for per-channel).
92
92
 
93
93
  Raises:
94
- ValueError: If `dequant_vals` is not 2D, or if
95
- `quantized_dimension` is not 0, 1, or None.
94
+ ValueError: If `quantized_dimension` is not 0, 1, or None.
96
95
  """
97
96
 
98
- if dequant_vals.ndim != 2:
99
- raise ValueError(
100
- f"Only 2D weights are supported. Got {dequant_vals.ndim} dimensions."
101
- )
102
-
103
97
  if quantized_dimension not in (0, 1, None):
104
98
  raise ValueError(
105
99
  f"quantized_dimension must be 0, 1, or None. Got {quantized_dimension}"
@@ -112,23 +106,26 @@ def get_zp_scale_from_2d_dequantized_symmetric_weights(
112
106
  # Per-tensor quantization: One scale for the entire tensor.
113
107
  scales = _get_scale(dequant_vals.flatten(), min_scale)
114
108
  scales = np.array([[scales]])
115
-
116
109
  else:
117
110
  # Per-channel quantization: A scale for each slice along the dimension.
118
- scales = []
119
- for i in range(dequant_vals.shape[quantized_dimension]):
120
- if quantized_dimension == 0:
121
- vec = dequant_vals[i, :]
122
- else: # quantized_dimension == 1
123
- vec = dequant_vals[:, i]
124
- scales.append(_get_scale(vec, min_scale))
125
-
126
- # Reshape for correct broadcasting.
127
- scales = (
128
- np.array(scales).reshape(-1, 1)
129
- if quantized_dimension == 0
130
- else np.array(scales).reshape(1, -1)
111
+ # Create a broadcasted array for per-channel scales. It should have the same
112
+ # number of dimensions as the input, with 1 in all dimensions except for the
113
+ # quantized dimension, which retains its original size.
114
+ scales = np.empty(
115
+ tuple(
116
+ [
117
+ 1
118
+ if i != quantized_dimension
119
+ else dequant_vals.shape[quantized_dimension]
120
+ for i in range(dequant_vals.ndim)
121
+ ]
122
+ )
131
123
  )
124
+ for i in range(dequant_vals.shape[quantized_dimension]):
125
+ slices = [slice(None)] * dequant_vals.ndim
126
+ slices[quantized_dimension] = i
127
+ vec = dequant_vals[tuple(slices)]
128
+ scales[tuple(slices)] = _get_scale(vec, min_scale)
132
129
 
133
130
  zero_points = np.zeros_like(scales, dtype=np.int32)
134
131
  return zero_points, scales
@@ -153,7 +150,7 @@ def get_tensor_quant_params(
153
150
 
154
151
  Raises:
155
152
  ValueError: If the quantization granularity is blockwise, or if the tensor
156
- is not a 2D symmetric weight tensor.
153
+ is not a symmetric weight tensor.
157
154
  """
158
155
  # Fallback to naive_min_max_quantize.py for non-weight tensors.
159
156
  if tensor_content is None:
@@ -166,10 +163,9 @@ def get_tensor_quant_params(
166
163
  "Blockwise quantization is not supported for dequantized weight"
167
164
  " recovery."
168
165
  )
169
- if tensor_content.ndim != 2 or not tensor_quant_config.symmetric:
166
+ if not tensor_quant_config.symmetric:
170
167
  raise ValueError(
171
- "Only 2D symmetric weights are supported for dequantized weight"
172
- " recovery."
168
+ "Only symmetric weights are supported for dequantized weight recovery."
173
169
  )
174
170
 
175
171
  quantized_dim = None
@@ -178,7 +174,7 @@ def get_tensor_quant_params(
178
174
  op_info, tensor_content
179
175
  )
180
176
 
181
- zp, scale = get_zp_scale_from_2d_dequantized_symmetric_weights(
177
+ zp, scale = get_zp_scale_from_dequantized_symmetric_weights(
182
178
  dequant_vals=tensor_content,
183
179
  quantized_dimension=quantized_dim,
184
180
  )
@@ -62,7 +62,7 @@ class DequantizedWeightRecoveryTest(parameterized.TestCase):
62
62
  ):
63
63
  dequant_vals = scale * self._dummy_quantized_weights
64
64
  zp, recovered_scale = (
65
- dequantized_weight_recovery.get_zp_scale_from_2d_dequantized_symmetric_weights(
65
+ dequantized_weight_recovery.get_zp_scale_from_dequantized_symmetric_weights(
66
66
  dequant_vals, quantized_dimension
67
67
  )
68
68
  )
@@ -72,17 +72,40 @@ class DequantizedWeightRecoveryTest(parameterized.TestCase):
72
72
  self.assertEqual(np.sum(zp), 0)
73
73
  self.assertEqual(zp.shape, scale.shape)
74
74
 
75
- def test_tensor_zp_scale_from_2d_dequantized_symmetric_weights_raises_error_for_non_2d_weights(
76
- self,
75
+ @parameterized.named_parameters(
76
+ dict(
77
+ testcase_name="per-tensor-recovery",
78
+ quantized_dimension=None,
79
+ scale=np.array([0.1875]).reshape(1, 1),
80
+ ),
81
+ dict(
82
+ testcase_name="channel0-recovery",
83
+ quantized_dimension=0,
84
+ scale=np.array([0.1875, 1e-4, 12.3]).reshape(3, 1, 1),
85
+ ),
86
+ dict(
87
+ testcase_name="channel1-recovery",
88
+ quantized_dimension=1,
89
+ scale=np.array([0.003, 1.234]).reshape(1, 2, 1),
90
+ ),
91
+ )
92
+ def test_tensor_zp_scale_from_3d_dequantized_symmetric_weights_success(
93
+ self, quantized_dimension, scale
77
94
  ):
78
- weights_3d = self._dummy_quantized_weights.reshape(1, 3, 4)
79
- weights_3d = weights_3d * 1.02
80
- with self.assertRaisesRegex(
81
- ValueError, "Only 2D weights are supported. Got 3 dimensions."
82
- ):
83
- dequantized_weight_recovery.get_zp_scale_from_2d_dequantized_symmetric_weights(
84
- weights_3d, quantized_dimension=None
85
- )
95
+ dequant_vals = scale * self._dummy_quantized_weights.reshape(3, 2, 2)
96
+ zp, recovered_scale = (
97
+ dequantized_weight_recovery.get_zp_scale_from_dequantized_symmetric_weights(
98
+ dequant_vals, quantized_dimension
99
+ )
100
+ )
101
+ with self.subTest("shapes_match"):
102
+ self.assertEqual(recovered_scale.shape, scale.shape)
103
+ self.assertEqual(zp.shape, scale.shape)
104
+ with self.subTest("scale_value_match"):
105
+ self.assertSequenceAlmostEqual(recovered_scale.flatten(), scale.flatten())
106
+ with self.subTest("zp_is_zero"):
107
+ # Zero point should be zero for symmetric quantization.
108
+ self.assertEqual(np.sum(zp), 0)
86
109
 
87
110
  @parameterized.named_parameters(
88
111
  dict(testcase_name="negative_dimension", quantized_dimension=-1),
@@ -95,7 +118,7 @@ class DequantizedWeightRecoveryTest(parameterized.TestCase):
95
118
  with self.assertRaisesRegex(
96
119
  ValueError, "quantized_dimension must be 0, 1, or None. Got"
97
120
  ):
98
- dequantized_weight_recovery.get_zp_scale_from_2d_dequantized_symmetric_weights(
121
+ dequantized_weight_recovery.get_zp_scale_from_dequantized_symmetric_weights(
99
122
  dequant_vals, quantized_dimension
100
123
  )
101
124
 
@@ -394,14 +394,6 @@ def _compatible_tensor_params(
394
394
  ]
395
395
  if _same_tensor_params_except_id(params1, params2):
396
396
  return True
397
- if (
398
- params1.transformations[0] != _QuantTrans.NO_QUANTIZE
399
- and params2.transformations[0] != _QuantTrans.NO_QUANTIZE
400
- ):
401
- # NO_QUANTIZE has no parameters. So only if both params aren't NO_QUANTIZE
402
- # do we expect the parameters to be the same.
403
- if params1.parameters != params2.parameters:
404
- return False
405
397
  # We only need to check the first transformation because transformations are
406
398
  # applied in order, and as long as the one that's immediately after the tensor
407
399
  # is the same, it's compatible.
@@ -413,6 +405,7 @@ def _compatible_tensor_params(
413
405
  if (
414
406
  params1.transformations[0] in quantized_source_transformations
415
407
  and params2.transformations[0] in quantized_source_transformations
408
+ and params1.parameters == params2.parameters
416
409
  ):
417
410
  return True
418
411
  return False
@@ -914,6 +914,58 @@ class ParamsGeneratorTest(parameterized.TestCase):
914
914
  ),
915
915
  expected=True,
916
916
  ),
917
+ dict(
918
+ testcase_name='compatible_no_numeric_check',
919
+ param1=qtyping.TensorTransformationParams(
920
+ tensor_name='tfl.quantize',
921
+ producer=None,
922
+ consumers=[
923
+ qtyping.OpToTensorParams(
924
+ subgraph_op_id=4,
925
+ transformations=[
926
+ qtyping.QuantTransformation.ADD_QUANTIZE,
927
+ ],
928
+ parameters=qtyping.UniformQuantParams(
929
+ 8, None, np.array([0.00028806]), np.array([0])
930
+ ),
931
+ ),
932
+ qtyping.OpToTensorParams(
933
+ subgraph_op_id=5,
934
+ transformations=[
935
+ qtyping.QuantTransformation.ADD_QUANTIZE,
936
+ ],
937
+ parameters=qtyping.UniformQuantParams(
938
+ 8, None, np.array([0.00027501]), np.array([0])
939
+ ),
940
+ ),
941
+ ],
942
+ ),
943
+ param2=qtyping.TensorTransformationParams(
944
+ tensor_name='tfl.quantize',
945
+ producer=None,
946
+ consumers=[
947
+ qtyping.OpToTensorParams(
948
+ subgraph_op_id=4,
949
+ transformations=[
950
+ qtyping.QuantTransformation.ADD_QUANTIZE,
951
+ ],
952
+ parameters=qtyping.UniformQuantParams(
953
+ 8, None, np.array([0.00028806]), np.array([0])
954
+ ),
955
+ ),
956
+ qtyping.OpToTensorParams(
957
+ subgraph_op_id=5,
958
+ transformations=[
959
+ qtyping.QuantTransformation.ADD_QUANTIZE,
960
+ ],
961
+ parameters=qtyping.UniformQuantParams(
962
+ 8, None, np.array([0.00027501]), np.array([0])
963
+ ),
964
+ ),
965
+ ],
966
+ ),
967
+ expected=True,
968
+ ),
917
969
  )
918
970
  def test_params_compatible(self, param1, param2, expected):
919
971
  # adding a test to make production coverage happy.
@@ -121,6 +121,26 @@ def _perform_channelwise_quantization(
121
121
  return flatbuffer_quantization
122
122
 
123
123
 
124
+ def _downcast_and_truncate_scale(input_scale: np.ndarray) -> np.ndarray:
125
+ """Given a fp32 scale, downcast it to fp16 and truncate mantissa to 7 bits.
126
+
127
+ CPU kernel can only utilize 7 bits of mantissa for fp16, so we want to produce
128
+ scale this way to unify behaviours across different platforms.
129
+
130
+ Args:
131
+ input_scale: The input scale in fp32.
132
+
133
+ Returns:
134
+ The downcasted & truncated scale in fp16.
135
+ """
136
+
137
+ # A regular fp16 has 10 bits of mantissa, so we need to zero out the 3 least
138
+ # significant bits.
139
+ return (
140
+ input_scale.astype(np.float16).view(dtype=np.uint16) & np.uint16(0xFFF8)
141
+ ).view(dtype=np.float16)
142
+
143
+
124
144
  def _perform_blockwise_quantization(
125
145
  transformation_input: transformation_utils.TransformationInput,
126
146
  ) -> schema_py_generated.QuantizationParametersT():
@@ -142,9 +162,13 @@ def _perform_blockwise_quantization(
142
162
  )
143
163
  tensor = transformation_input.subgraph.tensors[transformation_input.tensor_id]
144
164
  blockwise_details = schema_py_generated.BlockwiseQuantizationT()
165
+ # Downcast and truncate the scale to fp16.
166
+ downcasted_scale = _downcast_and_truncate_scale(
167
+ transformation_input.quant_params.scale
168
+ )
145
169
  scale_tensor_id = transformation_utils.add_new_constant_tensor(
146
170
  tensor.name + b"_scales",
147
- transformation_input.quant_params.scale.astype(np.float16),
171
+ downcasted_scale,
148
172
  schema_py_generated.TensorType.FLOAT16,
149
173
  transformation_input.subgraph,
150
174
  transformation_input.buffers,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ai-edge-quantizer-nightly
3
- Version: 0.1.0.dev20250327
3
+ Version: 0.1.0.dev20250329
4
4
  Summary: A quantizer for advanced developers to quantize converted AI Edge models.
5
5
  Home-page: https://github.com/google-ai-edge/ai-edge-quantizer
6
6
  Keywords: On-Device ML,AI,Google,TFLite,Quantization,LLMs,GenAI
@@ -1,5 +1,5 @@
1
1
  ai_edge_quantizer/__init__.py,sha256=4pFSkukSwahYyzwqia0yPRyz8TnFQfGRthVJhYpMWas,793
2
- ai_edge_quantizer/algorithm_manager.py,sha256=sOZ1T8n0YYi_ijDDuzryNJi2HUPggeo9uWNJri3elv0,10431
2
+ ai_edge_quantizer/algorithm_manager.py,sha256=0uootLsVD6h9ph9TrnXZMI-ExkX8UvXSV0lbWxBLybU,10492
3
3
  ai_edge_quantizer/algorithm_manager_api.py,sha256=u903TG0s1uIDhJqfeJne3CFl8A93phZrwgV2-hwdcXU,9247
4
4
  ai_edge_quantizer/algorithm_manager_api_test.py,sha256=tL_ozYFTsOPX8qGcti0KTz37nVsCxf0SSG5C45SyT-g,7319
5
5
  ai_edge_quantizer/calibrator.py,sha256=n7AD9j7UScR-CieoI6DQRMeiG_fhLBfSLRiM4460xaM,11895
@@ -10,8 +10,8 @@ ai_edge_quantizer/model_modifier.py,sha256=SPt9X-xBzRvcd4xIS24zLHt3aUS2QwsNDqweF
10
10
  ai_edge_quantizer/model_modifier_test.py,sha256=cJd04SLOG-fQZZNZPcisoBLx3cLtWEwGqUBbLb-pif4,4751
11
11
  ai_edge_quantizer/model_validator.py,sha256=fRNz0jO54cthPTibsCuViUXUuFRHl_fbvEiCukIVy20,13030
12
12
  ai_edge_quantizer/model_validator_test.py,sha256=EeqOP_mrZsnZ3rug756s0ryDDqd2KgIDld5Lm_gDuWY,13020
13
- ai_edge_quantizer/params_generator.py,sha256=f-KhJMFdRv2oHxfM8tAANPOtfBMw8vD7Vjv0rYQbnF4,16062
14
- ai_edge_quantizer/params_generator_test.py,sha256=zmDS6jG5zKhHL_hzJw2wlMTx1LLcNCK6S5WlwogWF-A,41122
13
+ ai_edge_quantizer/params_generator.py,sha256=46XDjnP4R3m4xsoXNp7brv0sNQPdQMg217_CbEl-Wgg,15780
14
+ ai_edge_quantizer/params_generator_test.py,sha256=9WTUl87XqbM4NruX5ypLuVRtuhcw-CmxndsMOUzZ92Q,43171
15
15
  ai_edge_quantizer/qtyping.py,sha256=UBZ3HgO8IDLY6VJmO05rGtFv_idMD3Os3WWsnriA0NA,15235
16
16
  ai_edge_quantizer/quantizer.py,sha256=g3DMqFMrMpt9jQttCE0WcdNbMtk0JZnmN5MmCHrNdyM,13202
17
17
  ai_edge_quantizer/quantizer_test.py,sha256=K_HBA56JkFI3HL8VLWCqGEfC0ISh5ldMKoNyBdGRAJg,20368
@@ -30,8 +30,8 @@ ai_edge_quantizer/algorithms/nonlinear_quantize/float_casting_test.py,sha256=s64
30
30
  ai_edge_quantizer/algorithms/uniform_quantize/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
31
31
  ai_edge_quantizer/algorithms/uniform_quantize/common_quantize.py,sha256=SVu1RSX5xOWhuNEi9hHqgIDGe_ywyHBZAczp7KAcl3k,27220
32
32
  ai_edge_quantizer/algorithms/uniform_quantize/common_quantize_test.py,sha256=qMmKbWqxrCoVKbLKHn9WuCrGKPfHkEyU0Nmhokh8Qeo,2597
33
- ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery.py,sha256=OTXjEZ3Ctq3ffYzisX-6HwgK_DuA7uos_aap5PiIUPE,8686
34
- ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery_test.py,sha256=y7BK11fkF63Ex_Jzg3fbIdy0D_Ca6HuvChVZR7Uwggc,8073
33
+ ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery.py,sha256=Fk3s9Qy2A_hjUepFOUmTwIZ_wKYVPbdDX4eoP-eoAQU,8726
34
+ ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery_test.py,sha256=sT5eX5TLZEHTtPfnSkCPDlS0sQxlTFWbCsbvOuj--yY,8889
35
35
  ai_edge_quantizer/algorithms/uniform_quantize/naive_min_max_quantize.py,sha256=cbyyYAoQnEraOYSV00wZ557ElBndHduVGeHikYUEFCE,7995
36
36
  ai_edge_quantizer/algorithms/uniform_quantize/naive_min_max_quantize_test.py,sha256=B30SEISYZ9DPs3suKeG2elgXylR98pCEMWSEGgZo20o,7648
37
37
  ai_edge_quantizer/algorithms/uniform_quantize/octav.py,sha256=e5wYtki-vl739gSVAZHAKcs2hA87GvFUjVoSUPlnkyM,6433
@@ -50,7 +50,7 @@ ai_edge_quantizer/transformations/emulated_subchannel.py,sha256=HVaRxoC8PCAvy3xe
50
50
  ai_edge_quantizer/transformations/emulated_subchannel_test.py,sha256=gZP6u9NdPXl7s19qB_Un8evou9ZZV6I9Gy0E1rdobHM,7722
51
51
  ai_edge_quantizer/transformations/quant_insert.py,sha256=jn6HsJaV-sqBiFPY-Aqbd64t8zgcYVkEkZI375x_FWY,3958
52
52
  ai_edge_quantizer/transformations/quant_insert_test.py,sha256=X9ptPDvJCFkR5tejKnD1SlHFGPazQTW-wNNMV9MEAuw,10107
53
- ai_edge_quantizer/transformations/quantize_tensor.py,sha256=vzKtrXILqVsr1NGlribhdtKEIsXA93o37embLRe9TwQ,7493
53
+ ai_edge_quantizer/transformations/quantize_tensor.py,sha256=y6As38mTzhva50YvNQ7p0SFpuWet3LPqFwE3qIO0gEQ,8231
54
54
  ai_edge_quantizer/transformations/quantize_tensor_test.py,sha256=mHLO3_MRt36A8-ZN8ADn5tBBJlqjTWa7ZUN8Mmu5Rcw,9116
55
55
  ai_edge_quantizer/transformations/transformation_utils.py,sha256=R42OIbzwQ7JYJ-Qt46jsqwb6u4MfDGiIPCRZCUGLVCw,4664
56
56
  ai_edge_quantizer/transformations/transformation_utils_test.py,sha256=xH64SF3UHDh84vYbt-WvmXNjM-Jg-mefES1ACO1tkqw,6269
@@ -64,8 +64,8 @@ ai_edge_quantizer/utils/tfl_interpreter_utils.py,sha256=x2xA2CFPpe_2trcV8v5xGaBE
64
64
  ai_edge_quantizer/utils/tfl_interpreter_utils_test.py,sha256=Op3JxtOqlrjzmYF18jnnstL1k9xiY9kKJ8S2vklKGkc,11327
65
65
  ai_edge_quantizer/utils/validation_utils.py,sha256=oYw33Sg547AqtGw-choPUJmp9SAKkV46J_ddqSsum2Q,3950
66
66
  ai_edge_quantizer/utils/validation_utils_test.py,sha256=V_qNDikPD4OPB-siOLQCWNVWTAu87h2IgNYt7teFd-o,2934
67
- ai_edge_quantizer_nightly-0.1.0.dev20250327.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
68
- ai_edge_quantizer_nightly-0.1.0.dev20250327.dist-info/METADATA,sha256=KES2W7tXAwTOpymOVvoQXovvZ2eaObKUUUT5L06a2gw,1527
69
- ai_edge_quantizer_nightly-0.1.0.dev20250327.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
70
- ai_edge_quantizer_nightly-0.1.0.dev20250327.dist-info/top_level.txt,sha256=8QTfPnFXNVUhScFLaa-NWZMFWMn72M50DVPubpwWB1g,18
71
- ai_edge_quantizer_nightly-0.1.0.dev20250327.dist-info/RECORD,,
67
+ ai_edge_quantizer_nightly-0.1.0.dev20250329.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
68
+ ai_edge_quantizer_nightly-0.1.0.dev20250329.dist-info/METADATA,sha256=3GoKN9dRRW0IhcJA2xnqAhU3znEgEzkDec2STODYtLA,1527
69
+ ai_edge_quantizer_nightly-0.1.0.dev20250329.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
70
+ ai_edge_quantizer_nightly-0.1.0.dev20250329.dist-info/top_level.txt,sha256=8QTfPnFXNVUhScFLaa-NWZMFWMn72M50DVPubpwWB1g,18
71
+ ai_edge_quantizer_nightly-0.1.0.dev20250329.dist-info/RECORD,,