ai-edge-quantizer-nightly 0.0.1.dev20250304__py3-none-any.whl → 0.0.1.dev20250305__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_edge_quantizer/algorithm_manager.py +3 -0
- ai_edge_quantizer/algorithms/uniform_quantize/common_quantize.py +19 -0
- ai_edge_quantizer/calibrator_test.py +1 -1
- ai_edge_quantizer/default_policy.py +4 -2
- ai_edge_quantizer/model_validator.py +7 -2
- ai_edge_quantizer/model_validator_test.py +2 -1
- ai_edge_quantizer/qtyping.py +1 -0
- ai_edge_quantizer/quantizer.py +1 -2
- ai_edge_quantizer/utils/test_utils.py +89 -59
- ai_edge_quantizer/utils/tfl_flatbuffer_utils.py +3 -0
- ai_edge_quantizer/utils/tfl_interpreter_utils.py +59 -0
- {ai_edge_quantizer_nightly-0.0.1.dev20250304.dist-info → ai_edge_quantizer_nightly-0.0.1.dev20250305.dist-info}/METADATA +1 -1
- {ai_edge_quantizer_nightly-0.0.1.dev20250304.dist-info → ai_edge_quantizer_nightly-0.0.1.dev20250305.dist-info}/RECORD +16 -16
- {ai_edge_quantizer_nightly-0.0.1.dev20250304.dist-info → ai_edge_quantizer_nightly-0.0.1.dev20250305.dist-info}/LICENSE +0 -0
- {ai_edge_quantizer_nightly-0.0.1.dev20250304.dist-info → ai_edge_quantizer_nightly-0.0.1.dev20250305.dist-info}/WHEEL +0 -0
- {ai_edge_quantizer_nightly-0.0.1.dev20250304.dist-info → ai_edge_quantizer_nightly-0.0.1.dev20250305.dist-info}/top_level.txt +0 -0
@@ -97,6 +97,9 @@ MIN_MAX_OP_NAME_MATERIALIZE_FUNC_DICT = {
|
|
97
97
|
_TFLOpName.SLICE: common_quantize.materialize_slice,
|
98
98
|
_TFLOpName.SUM: common_quantize.materialize_sum,
|
99
99
|
_TFLOpName.SELECT_V2: common_quantize.materialize_select_v2,
|
100
|
+
_TFLOpName.DYNAMIC_UPDATE_SLICE: (
|
101
|
+
common_quantize.materialize_dynamic_update_slice
|
102
|
+
),
|
100
103
|
}
|
101
104
|
for op_name, materialize_func in MIN_MAX_OP_NAME_MATERIALIZE_FUNC_DICT.items():
|
102
105
|
register_quantized_op(
|
@@ -375,6 +375,25 @@ def materialize_select_v2(
|
|
375
375
|
)
|
376
376
|
|
377
377
|
|
378
|
+
def materialize_dynamic_update_slice(
|
379
|
+
get_tensor_quant_params_fn: qtyping.GetTensorQuantParamsFuncSignature,
|
380
|
+
op_info: qtyping.OpInfo,
|
381
|
+
graph_info: qtyping.GraphInfo,
|
382
|
+
tensor_name_to_qsv: dict[str, Any],
|
383
|
+
) -> list[qtyping.TensorTransformationParams]:
|
384
|
+
"""Materialize tensors in tfl.dynamic_update_slice."""
|
385
|
+
return common_utils.materialize_standard_op(
|
386
|
+
op_info,
|
387
|
+
graph_info,
|
388
|
+
tensor_name_to_qsv,
|
389
|
+
get_tensor_quant_params_fn,
|
390
|
+
constraint=_OpQuantConstraint.SAME_AS_OUTPUT_SCALE,
|
391
|
+
inputs_to_ignore=[
|
392
|
+
2,
|
393
|
+
], # start_indices do not need to be quantized.
|
394
|
+
)
|
395
|
+
|
396
|
+
|
378
397
|
def materialize_sum(
|
379
398
|
get_tensor_quant_params_fn: qtyping.GetTensorQuantParamsFuncSignature,
|
380
399
|
op_info: qtyping.OpInfo,
|
@@ -234,7 +234,7 @@ class CalibratorTest(googletest.TestCase):
|
|
234
234
|
)
|
235
235
|
test_calibrator = calibrator.Calibrator(test_model_path)
|
236
236
|
_add_default_int8xint8_integer_recipe(self._recipe_manager)
|
237
|
-
calib_data =
|
237
|
+
calib_data = tfl_interpreter_utils.create_random_normal_input_data(
|
238
238
|
test_model_path, num_samples=4
|
239
239
|
)
|
240
240
|
test_calibrator.calibrate(calib_data, self._recipe_manager)
|
@@ -167,7 +167,8 @@ DEFAULT_JSON_POLICY = """
|
|
167
167
|
"SLICE",
|
168
168
|
"EMBEDDING_LOOKUP",
|
169
169
|
"SUM",
|
170
|
-
"SELECT_V2"
|
170
|
+
"SELECT_V2",
|
171
|
+
"DYNAMIC_UPDATE_SLICE"
|
171
172
|
],
|
172
173
|
"static_wi8_ai8": [
|
173
174
|
"ADD",
|
@@ -195,7 +196,8 @@ DEFAULT_JSON_POLICY = """
|
|
195
196
|
"SLICE",
|
196
197
|
"EMBEDDING_LOOKUP",
|
197
198
|
"SUM",
|
198
|
-
"SELECT_V2"
|
199
|
+
"SELECT_V2",
|
200
|
+
"DYNAMIC_UPDATE_SLICE"
|
199
201
|
],
|
200
202
|
"static_wi4_ai8": ["FULLY_CONNECTED", "CONV_2D", "INPUT", "OUTPUT", "EMBEDDING_LOOKUP"],
|
201
203
|
"static_wi4_ai16": ["FULLY_CONNECTED", "CONV_2D", "INPUT", "OUTPUT", "EMBEDDING_LOOKUP"],
|
@@ -160,6 +160,12 @@ class ComparisonResult:
|
|
160
160
|
result.update(signature_comparison_result.intermediate_tensors)
|
161
161
|
return result
|
162
162
|
|
163
|
+
def get_model_size_reduction(self) -> tuple[int, float]:
|
164
|
+
"""Get the model size reduction in bytes and percentage."""
|
165
|
+
reduced_model_size = len(self._reference_model) - len(self._target_model)
|
166
|
+
reduction_perc = reduced_model_size / len(self._reference_model) * 100
|
167
|
+
return reduced_model_size, reduction_perc
|
168
|
+
|
163
169
|
def save(self, save_folder: str, model_name: str) -> None:
|
164
170
|
"""Saves the model comparison result.
|
165
171
|
|
@@ -170,8 +176,7 @@ class ComparisonResult:
|
|
170
176
|
Raises:
|
171
177
|
RuntimeError: If no quantized model is available.
|
172
178
|
"""
|
173
|
-
reduced_model_size =
|
174
|
-
reduction_ratio = reduced_model_size / len(self._reference_model) * 100
|
179
|
+
reduced_model_size, reduction_ratio = self.get_model_size_reduction()
|
175
180
|
result = {
|
176
181
|
'reduced_size_bytes': reduced_model_size,
|
177
182
|
'reduced_size_percentage': reduction_ratio,
|
@@ -21,6 +21,7 @@ from tensorflow.python.platform import googletest
|
|
21
21
|
from ai_edge_quantizer import model_validator
|
22
22
|
from ai_edge_quantizer.utils import test_utils
|
23
23
|
from ai_edge_quantizer.utils import tfl_flatbuffer_utils
|
24
|
+
from ai_edge_quantizer.utils import tfl_interpreter_utils
|
24
25
|
from ai_edge_quantizer.utils import validation_utils
|
25
26
|
|
26
27
|
TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile('.')
|
@@ -194,7 +195,7 @@ class ModelValidatorCompareTest(googletest.TestCase):
|
|
194
195
|
self.target_model_path
|
195
196
|
)
|
196
197
|
self.signature_key = 'serving_default' # single signature.
|
197
|
-
self.test_data =
|
198
|
+
self.test_data = tfl_interpreter_utils.create_random_normal_input_data(
|
198
199
|
self.reference_model_path
|
199
200
|
)
|
200
201
|
self.test_dir = self.create_tempdir()
|
ai_edge_quantizer/qtyping.py
CHANGED
ai_edge_quantizer/quantizer.py
CHANGED
@@ -28,7 +28,6 @@ from ai_edge_quantizer import model_validator
|
|
28
28
|
from ai_edge_quantizer import params_generator
|
29
29
|
from ai_edge_quantizer import qtyping
|
30
30
|
from ai_edge_quantizer import recipe_manager
|
31
|
-
from ai_edge_quantizer.utils import test_utils
|
32
31
|
from ai_edge_quantizer.utils import tfl_flatbuffer_utils
|
33
32
|
from ai_edge_quantizer.utils import tfl_interpreter_utils
|
34
33
|
from ai_edge_quantizer.utils import validation_utils
|
@@ -325,7 +324,7 @@ class Quantizer:
|
|
325
324
|
"""
|
326
325
|
if test_data is None:
|
327
326
|
# Create test data for all signatures in the model.
|
328
|
-
test_data =
|
327
|
+
test_data = tfl_interpreter_utils.create_random_normal_input_data(
|
329
328
|
self.float_model, num_samples=1
|
330
329
|
)
|
331
330
|
return model_validator.compare_model(
|
@@ -18,12 +18,20 @@
|
|
18
18
|
import inspect as _inspect
|
19
19
|
import os.path as _os_path
|
20
20
|
import sys as _sys
|
21
|
-
from typing import Any, Union
|
22
21
|
|
23
|
-
|
22
|
+
from absl.testing import parameterized
|
24
23
|
|
24
|
+
from ai_edge_quantizer import model_validator
|
25
|
+
from ai_edge_quantizer import qtyping
|
26
|
+
from ai_edge_quantizer import quantizer
|
25
27
|
from ai_edge_quantizer.utils import tfl_interpreter_utils
|
26
28
|
|
29
|
+
_ComputePrecision = qtyping.ComputePrecision
|
30
|
+
_OpName = qtyping.TFLOperationName
|
31
|
+
_TensorQuantConfig = qtyping.TensorQuantizationConfig
|
32
|
+
_OpQuantConfig = qtyping.OpQuantizationConfig
|
33
|
+
_AlgorithmName = quantizer.AlgorithmName
|
34
|
+
|
27
35
|
|
28
36
|
def get_path_to_datafile(path):
|
29
37
|
"""Get the path to the specified file in the data dependencies.
|
@@ -46,62 +54,84 @@ def get_path_to_datafile(path):
|
|
46
54
|
return path
|
47
55
|
|
48
56
|
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
57
|
+
class BaseOpTestCase(parameterized.TestCase):
|
58
|
+
"""Base class for op-level tests."""
|
59
|
+
|
60
|
+
def quantize_and_validate(
|
61
|
+
self,
|
62
|
+
model_path: str,
|
63
|
+
algorithm_key: _AlgorithmName,
|
64
|
+
op_name: _OpName,
|
65
|
+
op_config: _OpQuantConfig,
|
66
|
+
num_validation_samples: int = 4,
|
67
|
+
error_metric: str = 'mse',
|
68
|
+
) -> model_validator.ComparisonResult:
|
69
|
+
"""Quantizes and validates the given model with the given configurations.
|
70
|
+
|
71
|
+
Args:
|
72
|
+
model_path: The path to the model to be quantized.
|
73
|
+
algorithm_key: The algorithm to be used for quantization.
|
74
|
+
op_name: The name of the operation to be quantized.
|
75
|
+
op_config: The configuration for the operation to be quantized.
|
76
|
+
num_validation_samples: The number of samples to use for validation.
|
77
|
+
error_metric: The error error_metric to use for validation.
|
78
|
+
|
79
|
+
Returns:
|
80
|
+
The comparison result of the validation.
|
81
|
+
"""
|
82
|
+
quantizer_instance = quantizer.Quantizer(model_path)
|
83
|
+
quantizer_instance.update_quantization_recipe(
|
84
|
+
algorithm_key=algorithm_key,
|
85
|
+
regex='.*',
|
86
|
+
operation_name=op_name,
|
87
|
+
op_config=op_config,
|
88
|
+
)
|
89
|
+
if quantizer_instance.need_calibration:
|
90
|
+
calibration_data = tfl_interpreter_utils.create_random_normal_input_data(
|
91
|
+
quantizer_instance.float_model, num_samples=num_validation_samples * 8
|
74
92
|
)
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
tflite_model: Union[str, bytes],
|
82
|
-
num_samples: int = 4,
|
83
|
-
random_seed: int = 666,
|
84
|
-
) -> dict[str, list[dict[str, Any]]]:
|
85
|
-
"""create random dataset following random distribution for signature runner.
|
86
|
-
|
87
|
-
Args:
|
88
|
-
tflite_model: TFLite model path or bytearray
|
89
|
-
num_samples: number of input samples to be generated
|
90
|
-
random_seed: random seed to be used for function
|
91
|
-
|
92
|
-
Returns:
|
93
|
-
a list of inputs to the given interpreter, for a single interpreter we may
|
94
|
-
have multiple signatures so each set of inputs is also represented as
|
95
|
-
list
|
96
|
-
"""
|
97
|
-
tfl_interpreter = tfl_interpreter_utils.create_tfl_interpreter(tflite_model)
|
98
|
-
signature_defs = tfl_interpreter.get_signature_list()
|
99
|
-
signature_keys = list(signature_defs.keys())
|
100
|
-
test_data = {}
|
101
|
-
for signature_key in signature_keys:
|
102
|
-
signature_runner = tfl_interpreter.get_signature_runner(signature_key)
|
103
|
-
input_details = signature_runner.get_input_details()
|
104
|
-
test_data[signature_key] = create_random_normal_dataset(
|
105
|
-
input_details, num_samples, random_seed
|
93
|
+
calibration_result = quantizer_instance.calibrate(calibration_data)
|
94
|
+
quantization_result = quantizer_instance.quantize(calibration_result)
|
95
|
+
else:
|
96
|
+
quantization_result = quantizer_instance.quantize()
|
97
|
+
test_data = tfl_interpreter_utils.create_random_normal_input_data(
|
98
|
+
quantization_result.quantized_model, num_samples=num_validation_samples
|
106
99
|
)
|
107
|
-
|
100
|
+
return quantizer_instance.validate(test_data, error_metric)
|
101
|
+
|
102
|
+
def assert_model_size_reduction_above_min_pct(
|
103
|
+
self,
|
104
|
+
validation_result: model_validator.ComparisonResult,
|
105
|
+
min_pct: float,
|
106
|
+
):
|
107
|
+
"""Checks the model size reduction (percentage) against the given expectation."""
|
108
|
+
_, reduction_pct = validation_result.get_model_size_reduction()
|
109
|
+
self.assertGreater(reduction_pct, min_pct)
|
110
|
+
|
111
|
+
def assert_weights_errors_below_tolerance(
|
112
|
+
self,
|
113
|
+
validation_result: model_validator.ComparisonResult,
|
114
|
+
weight_tolerance: float,
|
115
|
+
):
|
116
|
+
"""Checks the weight tensors' numerical behavior against the given tolerance."""
|
117
|
+
self.assertNotEmpty(validation_result.available_signature_keys())
|
118
|
+
for signature_key in validation_result.available_signature_keys():
|
119
|
+
signature_result = validation_result.get_signature_comparison_result(
|
120
|
+
signature_key
|
121
|
+
)
|
122
|
+
for result in signature_result.constant_tensors.values():
|
123
|
+
self.assertLess(result, weight_tolerance)
|
124
|
+
|
125
|
+
def assert_output_errors_below_tolerance(
|
126
|
+
self,
|
127
|
+
validation_result: model_validator.ComparisonResult,
|
128
|
+
output_tolerance: float,
|
129
|
+
):
|
130
|
+
"""Checks the output tensor numerical behavior against the given tolerance."""
|
131
|
+
self.assertNotEmpty(validation_result.available_signature_keys())
|
132
|
+
for signature_key in validation_result.available_signature_keys():
|
133
|
+
signature_result = validation_result.get_signature_comparison_result(
|
134
|
+
signature_key
|
135
|
+
)
|
136
|
+
for result in signature_result.output_tensors.values():
|
137
|
+
self.assertLess(result, output_tolerance)
|
@@ -62,6 +62,9 @@ TFL_OP_NAME_TO_CODE = immutabledict.immutabledict({
|
|
62
62
|
_TFLOpName.SLICE: schema_py_generated.BuiltinOperator.SLICE,
|
63
63
|
_TFLOpName.SUM: schema_py_generated.BuiltinOperator.SUM,
|
64
64
|
_TFLOpName.SELECT_V2: schema_py_generated.BuiltinOperator.SELECT_V2,
|
65
|
+
_TFLOpName.DYNAMIC_UPDATE_SLICE: (
|
66
|
+
schema_py_generated.BuiltinOperator.DYNAMIC_UPDATE_SLICE
|
67
|
+
),
|
65
68
|
})
|
66
69
|
|
67
70
|
TFL_OP_CODE_TO_NAME = immutabledict.immutabledict(
|
@@ -315,3 +315,62 @@ def get_signature_main_subgraph_index(
|
|
315
315
|
"""
|
316
316
|
signature_runner = tflite_interpreter.get_signature_runner(signature_key)
|
317
317
|
return signature_runner._subgraph_index # pylint:disable=protected-access
|
318
|
+
|
319
|
+
|
320
|
+
def create_random_normal_dataset(
|
321
|
+
input_details: dict[str, Any],
|
322
|
+
num_samples: int,
|
323
|
+
random_seed: Union[int, np._typing.ArrayLike],
|
324
|
+
) -> list[dict[str, Any]]:
|
325
|
+
"""Creates a random normal dataset for given input details.
|
326
|
+
|
327
|
+
Args:
|
328
|
+
input_details: A dictionary of input details.
|
329
|
+
num_samples: The number of samples to generate.
|
330
|
+
random_seed: The random seed to use.
|
331
|
+
|
332
|
+
Returns:
|
333
|
+
A list of dictionaries, each containing a sample of input data (for all
|
334
|
+
signatures).
|
335
|
+
"""
|
336
|
+
rng = np.random.default_rng(random_seed)
|
337
|
+
dataset = []
|
338
|
+
for _ in range(num_samples):
|
339
|
+
input_data = {}
|
340
|
+
for arg_name, input_tensor in input_details.items():
|
341
|
+
new_data = rng.normal(size=input_tensor["shape"]).astype(
|
342
|
+
input_tensor["dtype"]
|
343
|
+
)
|
344
|
+
input_data[arg_name] = new_data
|
345
|
+
dataset.append(input_data)
|
346
|
+
return dataset
|
347
|
+
|
348
|
+
|
349
|
+
def create_random_normal_input_data(
|
350
|
+
tflite_model: Union[str, bytes],
|
351
|
+
num_samples: int = 4,
|
352
|
+
random_seed: int = 666,
|
353
|
+
) -> dict[str, list[dict[str, Any]]]:
|
354
|
+
"""create random dataset following random distribution for signature runner.
|
355
|
+
|
356
|
+
Args:
|
357
|
+
tflite_model: TFLite model path or bytearray
|
358
|
+
num_samples: number of input samples to be generated
|
359
|
+
random_seed: random seed to be used for function
|
360
|
+
|
361
|
+
Returns:
|
362
|
+
a list of inputs to the given interpreter, for a single interpreter we may
|
363
|
+
have multiple signatures so each set of inputs is also represented as
|
364
|
+
list
|
365
|
+
"""
|
366
|
+
tfl_interpreter = create_tfl_interpreter(tflite_model)
|
367
|
+
signature_defs = tfl_interpreter.get_signature_list()
|
368
|
+
signature_keys = list(signature_defs.keys())
|
369
|
+
test_data = {}
|
370
|
+
for signature_key in signature_keys:
|
371
|
+
signature_runner = tfl_interpreter.get_signature_runner(signature_key)
|
372
|
+
input_details = signature_runner.get_input_details()
|
373
|
+
test_data[signature_key] = create_random_normal_dataset(
|
374
|
+
input_details, num_samples, random_seed
|
375
|
+
)
|
376
|
+
return test_data
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ai-edge-quantizer-nightly
|
3
|
-
Version: 0.0.1.
|
3
|
+
Version: 0.0.1.dev20250305
|
4
4
|
Summary: A quantizer for advanced developers to quantize converted AI Edge models.
|
5
5
|
Home-page: https://github.com/google-ai-edge/ai-edge-quantizer
|
6
6
|
Keywords: On-Device ML,AI,Google,TFLite,Quantization,LLMs,GenAI
|
@@ -1,19 +1,19 @@
|
|
1
1
|
ai_edge_quantizer/__init__.py,sha256=4pFSkukSwahYyzwqia0yPRyz8TnFQfGRthVJhYpMWas,793
|
2
|
-
ai_edge_quantizer/algorithm_manager.py,sha256=
|
2
|
+
ai_edge_quantizer/algorithm_manager.py,sha256=4PPqKQcOLDsrY_LhyPvTQsSfH_fpuBmKzz3Uins1UK4,7637
|
3
3
|
ai_edge_quantizer/algorithm_manager_api.py,sha256=u903TG0s1uIDhJqfeJne3CFl8A93phZrwgV2-hwdcXU,9247
|
4
4
|
ai_edge_quantizer/algorithm_manager_api_test.py,sha256=tL_ozYFTsOPX8qGcti0KTz37nVsCxf0SSG5C45SyT-g,7319
|
5
5
|
ai_edge_quantizer/calibrator.py,sha256=2J-bX0k09A7vZSRnO3eP49YO2uBMUQh6-sk3JRz9fGQ,11363
|
6
|
-
ai_edge_quantizer/calibrator_test.py,sha256=
|
6
|
+
ai_edge_quantizer/calibrator_test.py,sha256=C_oWOaRugPKYX74jF-eRFH-k6nGOdA8I9_uPiocaOuE,11900
|
7
7
|
ai_edge_quantizer/conftest.py,sha256=SxCz-5LlRD_lQm4hQc4c6IGG7DS8d7IyEWY9gnscPN0,794
|
8
|
-
ai_edge_quantizer/default_policy.py,sha256=
|
8
|
+
ai_edge_quantizer/default_policy.py,sha256=CgArJLygD3eJ2g9ur-i1M9DLb3Tb8yy3hiJfyeikixA,9179
|
9
9
|
ai_edge_quantizer/model_modifier.py,sha256=Z8EYtrz4zhCFpzd1zVwl2AetVE3BGBf5OvB2DbVQuds,5850
|
10
10
|
ai_edge_quantizer/model_modifier_test.py,sha256=cJd04SLOG-fQZZNZPcisoBLx3cLtWEwGqUBbLb-pif4,4751
|
11
|
-
ai_edge_quantizer/model_validator.py,sha256=
|
12
|
-
ai_edge_quantizer/model_validator_test.py,sha256=
|
11
|
+
ai_edge_quantizer/model_validator.py,sha256=fRNz0jO54cthPTibsCuViUXUuFRHl_fbvEiCukIVy20,13030
|
12
|
+
ai_edge_quantizer/model_validator_test.py,sha256=EeqOP_mrZsnZ3rug756s0ryDDqd2KgIDld5Lm_gDuWY,13020
|
13
13
|
ai_edge_quantizer/params_generator.py,sha256=FvBub5yM2q98k7wNLgEyRerf8sVIETvGbrFcXFPUPdA,13523
|
14
14
|
ai_edge_quantizer/params_generator_test.py,sha256=d9JwR-yxNJgg1SW-m8sFFPkIRdhgsDwMpVKsBQFL0gg,37658
|
15
|
-
ai_edge_quantizer/qtyping.py,sha256=
|
16
|
-
ai_edge_quantizer/quantizer.py,sha256=
|
15
|
+
ai_edge_quantizer/qtyping.py,sha256=qeSxsbU0aEzBMgjvM4a4iYWx1xT7L5Sk5ZyON7xJWWU,15140
|
16
|
+
ai_edge_quantizer/quantizer.py,sha256=g3DMqFMrMpt9jQttCE0WcdNbMtk0JZnmN5MmCHrNdyM,13202
|
17
17
|
ai_edge_quantizer/quantizer_test.py,sha256=38oTMJwMmxwPDeqT3eaVbazjtuIUIzMQ3mJNKh_eNQY,20493
|
18
18
|
ai_edge_quantizer/recipe.py,sha256=r5tJiUs-ihZFzeK_jP2sUIUgTqZsL5SWvbUokuIUPDo,2251
|
19
19
|
ai_edge_quantizer/recipe_manager.py,sha256=qcGUD7e7BISKdsY9WH2rdaRR3acmzSA5qMezGNbzlpo,8931
|
@@ -28,7 +28,7 @@ ai_edge_quantizer/algorithms/nonlinear_quantize/__init__.py,sha256=lpq1g2ayg3lCP
|
|
28
28
|
ai_edge_quantizer/algorithms/nonlinear_quantize/float_casting.py,sha256=Bs9CK7wZAw6jNaZ8xEtbwO2vM34VYXNZSMVWvxJo9nw,9297
|
29
29
|
ai_edge_quantizer/algorithms/nonlinear_quantize/float_casting_test.py,sha256=s64eDDH9bmRWy6Bl1peHnhGewLnFJjvnhYOdjo1zYOA,22625
|
30
30
|
ai_edge_quantizer/algorithms/uniform_quantize/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
|
31
|
-
ai_edge_quantizer/algorithms/uniform_quantize/common_quantize.py,sha256=
|
31
|
+
ai_edge_quantizer/algorithms/uniform_quantize/common_quantize.py,sha256=l3wJXoyXJXULeSEx3_BzbtIxLzotziT1mQembF9A1RE,21877
|
32
32
|
ai_edge_quantizer/algorithms/uniform_quantize/common_quantize_test.py,sha256=qMmKbWqxrCoVKbLKHn9WuCrGKPfHkEyU0Nmhokh8Qeo,2597
|
33
33
|
ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery.py,sha256=OTXjEZ3Ctq3ffYzisX-6HwgK_DuA7uos_aap5PiIUPE,8686
|
34
34
|
ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery_test.py,sha256=y7BK11fkF63Ex_Jzg3fbIdy0D_Ca6HuvChVZR7Uwggc,8073
|
@@ -53,15 +53,15 @@ ai_edge_quantizer/transformations/transformation_utils_test.py,sha256=ks81nNvruO
|
|
53
53
|
ai_edge_quantizer/utils/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
|
54
54
|
ai_edge_quantizer/utils/calibration_utils.py,sha256=1Fj9MIO6aLZIRgyd4axvZN4S_O64nB_-Miu1WP664js,2536
|
55
55
|
ai_edge_quantizer/utils/calibration_utils_test.py,sha256=Z-AcdTieesWFKyKBb08ZXm4Mgu6cvJ4bg2-MJ7hLD10,2856
|
56
|
-
ai_edge_quantizer/utils/test_utils.py,sha256=
|
57
|
-
ai_edge_quantizer/utils/tfl_flatbuffer_utils.py,sha256=
|
56
|
+
ai_edge_quantizer/utils/test_utils.py,sha256=gmq3QeyVuMAgYuGp_uZ2Ei0TOCDqW2jD2GcfgcCX9FA,5183
|
57
|
+
ai_edge_quantizer/utils/tfl_flatbuffer_utils.py,sha256=pI4JWq_rhp1tRWwNSFMzbWx-3YEQWQir-bM6S88WKIw,10237
|
58
58
|
ai_edge_quantizer/utils/tfl_flatbuffer_utils_test.py,sha256=AbyDxoM62k4ojD8gPdkWo--xe5hlX3t0kobQSA80kuk,7740
|
59
|
-
ai_edge_quantizer/utils/tfl_interpreter_utils.py,sha256=
|
59
|
+
ai_edge_quantizer/utils/tfl_interpreter_utils.py,sha256=ZI16i25bAOpnJUBgRg38EH2CuZ55wxyHFScM12RsOwc,12487
|
60
60
|
ai_edge_quantizer/utils/tfl_interpreter_utils_test.py,sha256=Op3JxtOqlrjzmYF18jnnstL1k9xiY9kKJ8S2vklKGkc,11327
|
61
61
|
ai_edge_quantizer/utils/validation_utils.py,sha256=oYw33Sg547AqtGw-choPUJmp9SAKkV46J_ddqSsum2Q,3950
|
62
62
|
ai_edge_quantizer/utils/validation_utils_test.py,sha256=V_qNDikPD4OPB-siOLQCWNVWTAu87h2IgNYt7teFd-o,2934
|
63
|
-
ai_edge_quantizer_nightly-0.0.1.
|
64
|
-
ai_edge_quantizer_nightly-0.0.1.
|
65
|
-
ai_edge_quantizer_nightly-0.0.1.
|
66
|
-
ai_edge_quantizer_nightly-0.0.1.
|
67
|
-
ai_edge_quantizer_nightly-0.0.1.
|
63
|
+
ai_edge_quantizer_nightly-0.0.1.dev20250305.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
64
|
+
ai_edge_quantizer_nightly-0.0.1.dev20250305.dist-info/METADATA,sha256=YR7Hg6bZCgBYLBxr723WDNJ1JH9bPs1__4rsSFSD1-o,1528
|
65
|
+
ai_edge_quantizer_nightly-0.0.1.dev20250305.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
66
|
+
ai_edge_quantizer_nightly-0.0.1.dev20250305.dist-info/top_level.txt,sha256=8QTfPnFXNVUhScFLaa-NWZMFWMn72M50DVPubpwWB1g,18
|
67
|
+
ai_edge_quantizer_nightly-0.0.1.dev20250305.dist-info/RECORD,,
|
File without changes
|
File without changes
|