ai-edge-quantizer-nightly 0.4.0.dev20251105__py3-none-any.whl → 0.4.0.dev20251107__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor.py +1 -1
- ai_edge_quantizer/algorithms/utils/common_utils.py +42 -26
- ai_edge_quantizer/calibrator.py +1 -50
- ai_edge_quantizer/calibrator_test.py +2 -67
- ai_edge_quantizer/quantizer_test.py +3 -4
- {ai_edge_quantizer_nightly-0.4.0.dev20251105.dist-info → ai_edge_quantizer_nightly-0.4.0.dev20251107.dist-info}/METADATA +1 -1
- {ai_edge_quantizer_nightly-0.4.0.dev20251105.dist-info → ai_edge_quantizer_nightly-0.4.0.dev20251107.dist-info}/RECORD +10 -10
- {ai_edge_quantizer_nightly-0.4.0.dev20251105.dist-info → ai_edge_quantizer_nightly-0.4.0.dev20251107.dist-info}/WHEEL +0 -0
- {ai_edge_quantizer_nightly-0.4.0.dev20251105.dist-info → ai_edge_quantizer_nightly-0.4.0.dev20251107.dist-info}/licenses/LICENSE +0 -0
- {ai_edge_quantizer_nightly-0.4.0.dev20251105.dist-info → ai_edge_quantizer_nightly-0.4.0.dev20251107.dist-info}/top_level.txt +0 -0
|
@@ -465,7 +465,7 @@ def tensor_zp_scale_from_min_max(
|
|
|
465
465
|
signed=True,
|
|
466
466
|
)
|
|
467
467
|
qmin, qmax = get_quantized_range(qtype)
|
|
468
|
-
min_bound = 1e-
|
|
468
|
+
min_bound = 1e-9 # Avoid zero scale.
|
|
469
469
|
pos_clipping_values = None if clipping_values is None else clipping_values
|
|
470
470
|
neg_clipping_values = None if clipping_values is None else -clipping_values
|
|
471
471
|
|
|
@@ -366,11 +366,28 @@ def _materialize_standard_op_with_same_as_input_scale(
|
|
|
366
366
|
|
|
367
367
|
# Change output qsv to be the same as input qsv. This is safe since TFL
|
|
368
368
|
# subgraph is acyclic.
|
|
369
|
-
input_tensor_qsv = tensor_name_to_qsv
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
369
|
+
input_tensor_qsv = tensor_name_to_qsv.get(
|
|
370
|
+
input_tensor_params.tensor_name, None
|
|
371
|
+
)
|
|
372
|
+
if input_tensor_qsv is None:
|
|
373
|
+
input_tensor_data = tfl_flatbuffer_utils.get_tensor_data(
|
|
374
|
+
input_tensors[0], graph_info.buffers
|
|
373
375
|
)
|
|
376
|
+
# If the input tensor is a constant tensor without qsv, compute qsv from
|
|
377
|
+
# its quant params.
|
|
378
|
+
if input_tensor_data is None:
|
|
379
|
+
# If the only input to an op that needs to match input to
|
|
380
|
+
# output has no qsv and is not a constant tensor, then this is an error.
|
|
381
|
+
raise ValueError(
|
|
382
|
+
"Input tensor qsv is None for tensor"
|
|
383
|
+
f" {input_tensor_params.tensor_name}."
|
|
384
|
+
)
|
|
385
|
+
min_val, max_val = _get_min_max_from_quant_params(input_quant_params)
|
|
386
|
+
input_tensor_qsv = {"min": min_val, "max": max_val}
|
|
387
|
+
for output_tensor in output_tensors:
|
|
388
|
+
tensor_name_to_qsv[
|
|
389
|
+
tfl_flatbuffer_utils.get_tensor_name(output_tensor)
|
|
390
|
+
] = input_tensor_qsv
|
|
374
391
|
|
|
375
392
|
return op_tensor_params
|
|
376
393
|
|
|
@@ -694,6 +711,26 @@ def _add_non_match_tensors_to_ignored_lists(
|
|
|
694
711
|
return inputs_to_ignore, outputs_to_ignore
|
|
695
712
|
|
|
696
713
|
|
|
714
|
+
def _get_min_max_from_quant_params(
|
|
715
|
+
quant_params: qtyping.UniformQuantParams,
|
|
716
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
717
|
+
"""Recalculate min/max from tensor quantization params."""
|
|
718
|
+
q_min, q_max = uniform_quantize_tensor.get_quantized_range(
|
|
719
|
+
_IntType(quant_params.num_bits, True)
|
|
720
|
+
)
|
|
721
|
+
float_min = uniform_quantize_tensor.uniform_dequantize(
|
|
722
|
+
np.array(q_min), quant_params
|
|
723
|
+
)
|
|
724
|
+
float_max = uniform_quantize_tensor.uniform_dequantize(
|
|
725
|
+
np.array(q_max), quant_params
|
|
726
|
+
)
|
|
727
|
+
# We use qmax values to compute scale for symmetric quantization (see
|
|
728
|
+
# uniform_quantize_tensor.tensor_zp_scale_from_min_max).
|
|
729
|
+
if quant_params.symmetric:
|
|
730
|
+
float_min = -float_max
|
|
731
|
+
return float_min, float_max
|
|
732
|
+
|
|
733
|
+
|
|
697
734
|
def materialize_standard_op(
|
|
698
735
|
op_info: qtyping.OpInfo,
|
|
699
736
|
graph_info: qtyping.GraphInfo,
|
|
@@ -860,8 +897,6 @@ def materialize_op_with_output_activation_constraint(
|
|
|
860
897
|
output_tensor_params.producer = op_tensor_params
|
|
861
898
|
# Update the tensor_name_to_qsv map using the output activation constraints.
|
|
862
899
|
min_val, max_val = _get_min_max_from_quant_params(
|
|
863
|
-
activation_num_bits,
|
|
864
|
-
activation_tensor_config.symmetric,
|
|
865
900
|
fixed_quant_params,
|
|
866
901
|
)
|
|
867
902
|
tensor_name_to_qsv[output_tensor_params.tensor_name]["min"] = min_val
|
|
@@ -1024,23 +1059,4 @@ def get_bmm_weight_quantized_dim(
|
|
|
1024
1059
|
return rank - 1
|
|
1025
1060
|
|
|
1026
1061
|
|
|
1027
|
-
|
|
1028
|
-
num_bits: int,
|
|
1029
|
-
symmetric: bool,
|
|
1030
|
-
tensor_params: qtyping.UniformQuantParams,
|
|
1031
|
-
) -> tuple[float, float]:
|
|
1032
|
-
"""Recalculate min/max from tensor quantization params."""
|
|
1033
|
-
q_min, q_max = uniform_quantize_tensor.get_quantized_range(
|
|
1034
|
-
_IntType(num_bits, True)
|
|
1035
|
-
)
|
|
1036
|
-
float_min = uniform_quantize_tensor.uniform_dequantize(
|
|
1037
|
-
np.array(q_min), tensor_params
|
|
1038
|
-
)
|
|
1039
|
-
float_max = uniform_quantize_tensor.uniform_dequantize(
|
|
1040
|
-
np.array(q_max), tensor_params
|
|
1041
|
-
)
|
|
1042
|
-
# We use qmax values to compute scale for symmetric quantization (see
|
|
1043
|
-
# uniform_quantize_tensor.tensor_zp_scale_from_min_max).
|
|
1044
|
-
if symmetric:
|
|
1045
|
-
float_min = -float_max
|
|
1046
|
-
return (float_min, float_max)
|
|
1062
|
+
|
ai_edge_quantizer/calibrator.py
CHANGED
|
@@ -98,9 +98,7 @@ class Calibrator:
|
|
|
98
98
|
qsv_update_func: The function to update the QSVs.
|
|
99
99
|
"""
|
|
100
100
|
op_codes = self._flatbuffer_model.operatorCodes
|
|
101
|
-
if
|
|
102
|
-
self._initialize_model_qsvs(model_recipe_manager)
|
|
103
|
-
else:
|
|
101
|
+
if self._model_qsvs:
|
|
104
102
|
logging.warning(
|
|
105
103
|
"Calibrator contains non-empty model qsvs, and the current"
|
|
106
104
|
" calibration process will start on top of this state (i.e., update"
|
|
@@ -263,50 +261,3 @@ class Calibrator:
|
|
|
263
261
|
output_tensor = subgraph_tensors[output_tensor_idx]
|
|
264
262
|
scope += tfl_flatbuffer_utils.get_tensor_name(output_tensor)
|
|
265
263
|
return scope
|
|
266
|
-
|
|
267
|
-
# TODO: b/354224138 - Remove code duplication between calibrate and
|
|
268
|
-
# _initialize_model_qsvs.
|
|
269
|
-
def _initialize_model_qsvs(
|
|
270
|
-
self, model_recipe_manager: recipe_manager.RecipeManager
|
|
271
|
-
) -> None:
|
|
272
|
-
"""Initialize the model qsvs.
|
|
273
|
-
|
|
274
|
-
Args:
|
|
275
|
-
model_recipe_manager: A RecipeManager object that contains the
|
|
276
|
-
quantization recipe.
|
|
277
|
-
"""
|
|
278
|
-
op_codes = self._flatbuffer_model.operatorCodes
|
|
279
|
-
for subgraph in self._flatbuffer_model.subgraphs:
|
|
280
|
-
graph_info = qtyping.GraphInfo(
|
|
281
|
-
subgraph.tensors, self._flatbuffer_model.buffers
|
|
282
|
-
)
|
|
283
|
-
for subgraph_op_id, op in enumerate(subgraph.operators):
|
|
284
|
-
op_code = op_codes[op.opcodeIndex].builtinCode
|
|
285
|
-
if op_code not in tfl_flatbuffer_utils.TFL_OP_CODE_TO_NAME:
|
|
286
|
-
continue
|
|
287
|
-
op_key = tfl_flatbuffer_utils.TFL_OP_CODE_TO_NAME[op_code]
|
|
288
|
-
# Step1: query the quantization_recipe to get op quantization
|
|
289
|
-
# settings.
|
|
290
|
-
op_scope = self._get_op_scope(op, subgraph.tensors)
|
|
291
|
-
algorithm_name, op_quant_config = (
|
|
292
|
-
model_recipe_manager.get_quantization_configs(op_key, op_scope)
|
|
293
|
-
)
|
|
294
|
-
if algorithm_name == algorithm_manager.AlgorithmName.NO_QUANTIZE:
|
|
295
|
-
continue
|
|
296
|
-
# Step2: query algorithm_manager to get/call the related qsv init
|
|
297
|
-
# function.
|
|
298
|
-
qsv_init_func = algorithm_manager.get_init_qsv_func(
|
|
299
|
-
algorithm_name, op_key
|
|
300
|
-
)
|
|
301
|
-
op_info = qtyping.OpInfo(op, op_key, subgraph_op_id, op_quant_config)
|
|
302
|
-
# Ignore the input tensors where any dimension of the shape is 0.
|
|
303
|
-
inputs_to_ignore = [
|
|
304
|
-
opr_idx
|
|
305
|
-
for opr_idx, tensor_idx in enumerate(op.inputs)
|
|
306
|
-
if not np.all(graph_info.subgraph_tensors[tensor_idx].shape)
|
|
307
|
-
]
|
|
308
|
-
op_qsvs = qsv_init_func(op_info, graph_info, inputs_to_ignore)
|
|
309
|
-
# Step3: initialize tensor qsvs.
|
|
310
|
-
for tensor_name, qsv in op_qsvs.items():
|
|
311
|
-
if tensor_name not in self._model_qsvs:
|
|
312
|
-
self._model_qsvs[tensor_name] = qsv
|
|
@@ -103,58 +103,6 @@ class CalibratorTest(googletest.TestCase):
|
|
|
103
103
|
model_tensor_qsvs = self._calibrator.get_model_qsvs()
|
|
104
104
|
self.assertEmpty(model_tensor_qsvs)
|
|
105
105
|
|
|
106
|
-
def test_calibrator_initialize_qsv(self):
|
|
107
|
-
_add_default_int8xint8_integer_recipe(self._recipe_manager)
|
|
108
|
-
# Overwrite the single op to fc
|
|
109
|
-
self._recipe_manager.add_quantization_config(
|
|
110
|
-
regex=".*Stateful.*",
|
|
111
|
-
operation_name=qtyping.TFLOperationName.FULLY_CONNECTED,
|
|
112
|
-
algorithm_key=_AlgorithmName.MIN_MAX_UNIFORM_QUANT,
|
|
113
|
-
op_config=qtyping.OpQuantizationConfig(
|
|
114
|
-
weight_tensor_config=_TENSOR_QUANT_CONFIG(
|
|
115
|
-
num_bits=4,
|
|
116
|
-
granularity=qtyping.QuantGranularity.CHANNELWISE,
|
|
117
|
-
),
|
|
118
|
-
compute_precision=_ComputePrecision.INTEGER,
|
|
119
|
-
),
|
|
120
|
-
)
|
|
121
|
-
self._calibrator._initialize_model_qsvs(self._recipe_manager)
|
|
122
|
-
model_tensor_qsvs = self._calibrator.get_model_qsvs()
|
|
123
|
-
|
|
124
|
-
self.assertLen(model_tensor_qsvs, 4)
|
|
125
|
-
self.assertIn("serving_default_input_1:0", model_tensor_qsvs) # input
|
|
126
|
-
input_qsv = model_tensor_qsvs["serving_default_input_1:0"]
|
|
127
|
-
self.assertEmpty(input_qsv)
|
|
128
|
-
|
|
129
|
-
self.assertIn("sequential/dense/MatMul", model_tensor_qsvs) # weight
|
|
130
|
-
weight_tensor_qsv = model_tensor_qsvs["sequential/dense/MatMul"]
|
|
131
|
-
mins_maxs_shape = (16, 1)
|
|
132
|
-
self.assertTupleEqual(weight_tensor_qsv["min"].shape, mins_maxs_shape)
|
|
133
|
-
self.assertAlmostEqual(weight_tensor_qsv["min"][0][0], -0.40436327)
|
|
134
|
-
self.assertTupleEqual(weight_tensor_qsv["max"].shape, mins_maxs_shape)
|
|
135
|
-
self.assertAlmostEqual(weight_tensor_qsv["max"][0][0], 0.46138108)
|
|
136
|
-
|
|
137
|
-
self.assertIn(
|
|
138
|
-
"sequential/dense/BiasAdd/ReadVariableOp", model_tensor_qsvs
|
|
139
|
-
) # bias
|
|
140
|
-
bias_tensor_qsv = model_tensor_qsvs[
|
|
141
|
-
"sequential/dense/BiasAdd/ReadVariableOp"
|
|
142
|
-
]
|
|
143
|
-
mins_maxs_shape = (16,)
|
|
144
|
-
self.assertTupleEqual(bias_tensor_qsv["min"].shape, mins_maxs_shape)
|
|
145
|
-
self.assertAlmostEqual(bias_tensor_qsv["min"][0], -0.26978338)
|
|
146
|
-
self.assertTupleEqual(bias_tensor_qsv["max"].shape, mins_maxs_shape)
|
|
147
|
-
# Here bias min/max will be the same as each element is a scalar
|
|
148
|
-
# Bias will be quantized with input_scale * weight_scale.
|
|
149
|
-
self.assertSequenceEqual(
|
|
150
|
-
list(bias_tensor_qsv["max"].flatten()),
|
|
151
|
-
list(bias_tensor_qsv["min"].flatten()),
|
|
152
|
-
)
|
|
153
|
-
|
|
154
|
-
self.assertIn("StatefulPartitionedCall:0", model_tensor_qsvs) # output
|
|
155
|
-
output_qsv = model_tensor_qsvs["StatefulPartitionedCall:0"]
|
|
156
|
-
self.assertEmpty(output_qsv)
|
|
157
|
-
|
|
158
106
|
def test_calibrate_single_fc_success(self):
|
|
159
107
|
_add_default_int8xint8_integer_recipe(self._recipe_manager)
|
|
160
108
|
self._calibrator.calibrate(
|
|
@@ -162,7 +110,7 @@ class CalibratorTest(googletest.TestCase):
|
|
|
162
110
|
)
|
|
163
111
|
model_tensor_qsvs = self._calibrator.get_model_qsvs()
|
|
164
112
|
|
|
165
|
-
self.assertLen(model_tensor_qsvs,
|
|
113
|
+
self.assertLen(model_tensor_qsvs, 2)
|
|
166
114
|
self.assertIn("serving_default_input_1:0", model_tensor_qsvs) # input
|
|
167
115
|
input_qsv = model_tensor_qsvs["serving_default_input_1:0"]
|
|
168
116
|
self.assertSequenceAlmostEqual(
|
|
@@ -171,19 +119,6 @@ class CalibratorTest(googletest.TestCase):
|
|
|
171
119
|
self.assertSequenceAlmostEqual(
|
|
172
120
|
input_qsv["max"].flatten(), [TEST_MAX_VAL], delta=1e-5
|
|
173
121
|
)
|
|
174
|
-
|
|
175
|
-
self.assertIn("sequential/dense/MatMul", model_tensor_qsvs) # weight
|
|
176
|
-
weight_qsv = model_tensor_qsvs["sequential/dense/MatMul"]
|
|
177
|
-
self.assertSequenceAlmostEqual(weight_qsv["min"].flatten(), [-0.49114203])
|
|
178
|
-
self.assertSequenceAlmostEqual(weight_qsv["max"].flatten(), [0.4903704])
|
|
179
|
-
|
|
180
|
-
self.assertIn(
|
|
181
|
-
"sequential/dense/BiasAdd/ReadVariableOp", model_tensor_qsvs
|
|
182
|
-
) # bias
|
|
183
|
-
bias_qsv = model_tensor_qsvs["sequential/dense/BiasAdd/ReadVariableOp"]
|
|
184
|
-
self.assertSequenceAlmostEqual(bias_qsv["min"].flatten(), [-0.38401994])
|
|
185
|
-
self.assertSequenceAlmostEqual(bias_qsv["max"].flatten(), [0.31727126])
|
|
186
|
-
|
|
187
122
|
self.assertIn("StatefulPartitionedCall:0", model_tensor_qsvs) # output
|
|
188
123
|
output_qsv = model_tensor_qsvs["StatefulPartitionedCall:0"]
|
|
189
124
|
# Relu, only check the min
|
|
@@ -302,7 +237,7 @@ class CalibratorToyGemma2Test(googletest.TestCase):
|
|
|
302
237
|
self._toy_gemma2_calibration_dataset,
|
|
303
238
|
model_recipe_manager=recipe_mngr,
|
|
304
239
|
)
|
|
305
|
-
self.assertLen(calib.get_model_qsvs(),
|
|
240
|
+
self.assertLen(calib.get_model_qsvs(), 202)
|
|
306
241
|
|
|
307
242
|
|
|
308
243
|
if __name__ == "__main__":
|
|
@@ -212,7 +212,7 @@ class QuantizerTest(parameterized.TestCase):
|
|
|
212
212
|
# Calibrate with empty state.
|
|
213
213
|
calib_data = _get_calibration_data()
|
|
214
214
|
calibration_result = self._quantizer.calibrate(calib_data)
|
|
215
|
-
self.assertLen(calibration_result,
|
|
215
|
+
self.assertLen(calibration_result, 7)
|
|
216
216
|
|
|
217
217
|
@parameterized.parameters(
|
|
218
218
|
'recipes/default_a8w8_recipe.json',
|
|
@@ -227,7 +227,7 @@ class QuantizerTest(parameterized.TestCase):
|
|
|
227
227
|
updated_calibration_result = self._quantizer.calibrate(
|
|
228
228
|
calib_data, previous_calibration_result=calibration_result
|
|
229
229
|
)
|
|
230
|
-
self.assertLen(updated_calibration_result,
|
|
230
|
+
self.assertLen(updated_calibration_result, 7)
|
|
231
231
|
self.assertNotEqual(
|
|
232
232
|
calibration_result['StatefulPartitionedCall:0'],
|
|
233
233
|
updated_calibration_result['StatefulPartitionedCall:0'],
|
|
@@ -599,8 +599,7 @@ class QuantizerMultiSignatureModelTest(parameterized.TestCase):
|
|
|
599
599
|
|
|
600
600
|
# Quantize and expect an error about missing signature in calibration data.
|
|
601
601
|
error_message = (
|
|
602
|
-
'
|
|
603
|
-
" 'multiply'."
|
|
602
|
+
'MUL(index: 0) not found in tensor_name_to_qsv'
|
|
604
603
|
)
|
|
605
604
|
with self.assertRaisesWithPredicateMatch(
|
|
606
605
|
ValueError, lambda err: error_message in str(err)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ai-edge-quantizer-nightly
|
|
3
|
-
Version: 0.4.0.
|
|
3
|
+
Version: 0.4.0.dev20251107
|
|
4
4
|
Summary: A quantizer for advanced developers to quantize converted AI Edge models.
|
|
5
5
|
Home-page: https://github.com/google-ai-edge/ai-edge-quantizer
|
|
6
6
|
Keywords: On-Device ML,AI,Google,TFLite,Quantization,LLMs,GenAI
|
|
@@ -2,8 +2,8 @@ ai_edge_quantizer/__init__.py,sha256=4pFSkukSwahYyzwqia0yPRyz8TnFQfGRthVJhYpMWas
|
|
|
2
2
|
ai_edge_quantizer/algorithm_manager.py,sha256=0jSNITKl0Ge1XeYKueOUj9brlS4B5ZcdcVQ1kZS3JKg,16518
|
|
3
3
|
ai_edge_quantizer/algorithm_manager_api.py,sha256=u903TG0s1uIDhJqfeJne3CFl8A93phZrwgV2-hwdcXU,9247
|
|
4
4
|
ai_edge_quantizer/algorithm_manager_api_test.py,sha256=w6bSONvXkX6bzXAGc0-7b6gNDt9oz9ieq97KP8Sg_JU,7666
|
|
5
|
-
ai_edge_quantizer/calibrator.py,sha256=
|
|
6
|
-
ai_edge_quantizer/calibrator_test.py,sha256=
|
|
5
|
+
ai_edge_quantizer/calibrator.py,sha256=brB6ENjZFQnIzlshr0zAFo0g-XjwvD-Wsy5VasJspRU,9986
|
|
6
|
+
ai_edge_quantizer/calibrator_test.py,sha256=VKK6p9M3EwSq4D7Sna2v1EFeop2zfL-Af-YiusIuyb8,8957
|
|
7
7
|
ai_edge_quantizer/conftest.py,sha256=SxCz-5LlRD_lQm4hQc4c6IGG7DS8d7IyEWY9gnscPN0,794
|
|
8
8
|
ai_edge_quantizer/default_policy.py,sha256=YcwwtVzoWUhjYgMtJ7b9f647740lURKteDOeJvwe17o,11384
|
|
9
9
|
ai_edge_quantizer/model_modifier.py,sha256=U70JByv6CItP8tg4bdyMfX-R3UlwylAGSviZkF_FSAM,10468
|
|
@@ -14,7 +14,7 @@ ai_edge_quantizer/params_generator.py,sha256=0w-sDGk84sVNkXoduon1wDqq30sGOHVgBVb
|
|
|
14
14
|
ai_edge_quantizer/params_generator_test.py,sha256=RDYoRZDJfEZRtjlTAU2kZ_4t3JHOqEHxfJX9V4ETAhg,40597
|
|
15
15
|
ai_edge_quantizer/qtyping.py,sha256=y9KretGzUGztyLdmto2XV6U0cxrSrfLWP1UOVcwR4dY,18011
|
|
16
16
|
ai_edge_quantizer/quantizer.py,sha256=teYeONdIS31IAY6ubLujCRi1t6lYAd0LkC8dRPxQdbw,18919
|
|
17
|
-
ai_edge_quantizer/quantizer_test.py,sha256=
|
|
17
|
+
ai_edge_quantizer/quantizer_test.py,sha256=pavS0mezRdBAGD4lqBUhj53pXJDFH3U02ldxxh5RDYQ,28407
|
|
18
18
|
ai_edge_quantizer/recipe.py,sha256=MEkfQ2Sg3KAE9LAORHWcbjYNPg06EUbwc1d-VspQA2U,6461
|
|
19
19
|
ai_edge_quantizer/recipe_manager.py,sha256=6l2uq8KL23KLu9OQDmPGkxrFiwHrdDB9xnn-ni8WdEM,15036
|
|
20
20
|
ai_edge_quantizer/recipe_manager_test.py,sha256=gYK3haUJ8-AISQvTI6tD-E-drJXQPSXPqBZdgpc5QTo,36595
|
|
@@ -40,10 +40,10 @@ ai_edge_quantizer/algorithms/uniform_quantize/naive_min_max_quantize.py,sha256=3
|
|
|
40
40
|
ai_edge_quantizer/algorithms/uniform_quantize/naive_min_max_quantize_test.py,sha256=Eqa4OUqoCGywbHz-HxJ9dWRj9BKlVzJPuIhVzvrpdLM,8925
|
|
41
41
|
ai_edge_quantizer/algorithms/uniform_quantize/octav.py,sha256=-n-QZyp9y8WCy5FPSpXZXHfOA-p-RLvfSaCzAfhHiHI,7040
|
|
42
42
|
ai_edge_quantizer/algorithms/uniform_quantize/octav_test.py,sha256=6m2U-9JdNei0XzOORg2gt87TJdD0XHZ-z5h9c4g_TB4,9120
|
|
43
|
-
ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor.py,sha256=
|
|
43
|
+
ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor.py,sha256=qZxTj3B-tqNTLCViwuJj285YncvwjWeay2QKWd8nr6A,20420
|
|
44
44
|
ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor_test.py,sha256=eTrrc8AGaSf1Ytp5gsRONAZ94PHFJUTd4dGi5ZnKZjU,16038
|
|
45
45
|
ai_edge_quantizer/algorithms/utils/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
|
|
46
|
-
ai_edge_quantizer/algorithms/utils/common_utils.py,sha256=
|
|
46
|
+
ai_edge_quantizer/algorithms/utils/common_utils.py,sha256=M3VZsdLC4jCPfSI_aGAY4XjiHvoXtR-UyPZdZdz8GD0,38082
|
|
47
47
|
ai_edge_quantizer/algorithms/utils/common_utils_test.py,sha256=zqapGEfYhjQWe9cNGPLmdbwtEUUYQRhlO_kNe0cXX6E,18104
|
|
48
48
|
ai_edge_quantizer/transformations/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
|
|
49
49
|
ai_edge_quantizer/transformations/dequant_insert.py,sha256=sL1LHFVzBDSd9jgrzlHz38LWU0bwmVX7iBkaNcui0ts,3566
|
|
@@ -74,8 +74,8 @@ ai_edge_quantizer/utils/tfl_interpreter_utils.py,sha256=EoVjI_hplX_Rml3hfRsGmQOi
|
|
|
74
74
|
ai_edge_quantizer/utils/tfl_interpreter_utils_test.py,sha256=6fjkM-rycZ95L4yfvlr0TN6RlrhfPzxNUYrZaYO_F0A,12013
|
|
75
75
|
ai_edge_quantizer/utils/validation_utils.py,sha256=QTYyQ_HDVrFTGPIsrA240Lv8tUw1fwWp2fu9kTVISkE,6224
|
|
76
76
|
ai_edge_quantizer/utils/validation_utils_test.py,sha256=lO51rGskhzpXePRdZMU87u_YO35_sDp9_eQ85CmupL4,4600
|
|
77
|
-
ai_edge_quantizer_nightly-0.4.0.
|
|
78
|
-
ai_edge_quantizer_nightly-0.4.0.
|
|
79
|
-
ai_edge_quantizer_nightly-0.4.0.
|
|
80
|
-
ai_edge_quantizer_nightly-0.4.0.
|
|
81
|
-
ai_edge_quantizer_nightly-0.4.0.
|
|
77
|
+
ai_edge_quantizer_nightly-0.4.0.dev20251107.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
78
|
+
ai_edge_quantizer_nightly-0.4.0.dev20251107.dist-info/METADATA,sha256=VHxdnbf9zn0x3a3NiiCXSlBMLpMaEy_aDh_RwNiTats,1707
|
|
79
|
+
ai_edge_quantizer_nightly-0.4.0.dev20251107.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
|
|
80
|
+
ai_edge_quantizer_nightly-0.4.0.dev20251107.dist-info/top_level.txt,sha256=8QTfPnFXNVUhScFLaa-NWZMFWMn72M50DVPubpwWB1g,18
|
|
81
|
+
ai_edge_quantizer_nightly-0.4.0.dev20251107.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|