ai-edge-quantizer-nightly 0.1.0.dev20250512__py3-none-any.whl → 0.1.0.dev20250514__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_edge_quantizer/algorithm_manager.py +34 -0
- ai_edge_quantizer/algorithms/uniform_quantize/common_quantize.py +37 -12
- ai_edge_quantizer/algorithms/uniform_quantize/common_quantize_test.py +29 -2
- ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery.py +3 -5
- ai_edge_quantizer/algorithms/uniform_quantize/hadamard_rotation.py +357 -0
- ai_edge_quantizer/algorithms/uniform_quantize/hadamard_rotation_test.py +265 -0
- ai_edge_quantizer/algorithms/uniform_quantize/naive_min_max_quantize.py +7 -31
- ai_edge_quantizer/algorithms/uniform_quantize/octav.py +27 -17
- ai_edge_quantizer/algorithms/uniform_quantize/octav_test.py +93 -38
- ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor.py +133 -3
- ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor_test.py +11 -2
- ai_edge_quantizer/algorithms/utils/common_utils.py +21 -8
- ai_edge_quantizer/default_policy.py +4 -2
- ai_edge_quantizer/params_generator.py +1 -0
- ai_edge_quantizer/qtyping.py +34 -1
- ai_edge_quantizer/transformation_performer.py +5 -0
- ai_edge_quantizer/transformations/insert_hadamard_rotation.py +209 -0
- ai_edge_quantizer/transformations/insert_hadamard_rotation_test.py +200 -0
- ai_edge_quantizer/utils/test_utils.py +33 -0
- ai_edge_quantizer/utils/tfl_flatbuffer_utils.py +1 -0
- {ai_edge_quantizer_nightly-0.1.0.dev20250512.dist-info → ai_edge_quantizer_nightly-0.1.0.dev20250514.dist-info}/METADATA +1 -1
- {ai_edge_quantizer_nightly-0.1.0.dev20250512.dist-info → ai_edge_quantizer_nightly-0.1.0.dev20250514.dist-info}/RECORD +25 -21
- {ai_edge_quantizer_nightly-0.1.0.dev20250512.dist-info → ai_edge_quantizer_nightly-0.1.0.dev20250514.dist-info}/LICENSE +0 -0
- {ai_edge_quantizer_nightly-0.1.0.dev20250512.dist-info → ai_edge_quantizer_nightly-0.1.0.dev20250514.dist-info}/WHEEL +0 -0
- {ai_edge_quantizer_nightly-0.1.0.dev20250512.dist-info → ai_edge_quantizer_nightly-0.1.0.dev20250514.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,200 @@
|
|
1
|
+
# Copyright 2024 The AI Edge Quantizer Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
|
16
|
+
"""Test insertion of the Hadamard rotation custom op."""
|
17
|
+
|
18
|
+
import os
|
19
|
+
import numpy as np
|
20
|
+
from tensorflow.python.platform import googletest
|
21
|
+
from ai_edge_quantizer import qtyping
|
22
|
+
from ai_edge_quantizer.transformations import insert_hadamard_rotation
|
23
|
+
from ai_edge_quantizer.transformations import transformation_utils
|
24
|
+
from ai_edge_quantizer.utils import test_utils
|
25
|
+
from ai_edge_quantizer.utils import tfl_flatbuffer_utils
|
26
|
+
from ai_edge_litert import schema_py_generated # pylint: disable=g-direct-tensorflow-import
|
27
|
+
|
28
|
+
_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile('..')
|
29
|
+
|
30
|
+
|
31
|
+
class InsertHadamardRotationFullyConnectedTest(googletest.TestCase):
|
32
|
+
|
33
|
+
def setUp(self):
|
34
|
+
super().setUp()
|
35
|
+
model_path = os.path.join(
|
36
|
+
_TEST_DATA_PREFIX_PATH, 'tests/models/single_fc_bias.tflite'
|
37
|
+
)
|
38
|
+
self.model = tfl_flatbuffer_utils.read_model(model_path)
|
39
|
+
self.params = qtyping.UniformQuantParams(
|
40
|
+
num_bits=8,
|
41
|
+
quantized_dimension=None,
|
42
|
+
scale=np.ones(1),
|
43
|
+
zero_point=np.zeros(1),
|
44
|
+
hadamard=qtyping.UniformQuantParams.HadamardRotationParams(
|
45
|
+
random_binary_vector=np.ones(1),
|
46
|
+
hadamard_size=2,
|
47
|
+
),
|
48
|
+
)
|
49
|
+
|
50
|
+
def test_raise_unsupported_qparams(self):
|
51
|
+
with self.assertRaisesWithPredicateMatch(
|
52
|
+
ValueError, lambda err: 'uniform quantization' in str(err)
|
53
|
+
):
|
54
|
+
insert_hadamard_rotation.insert_hadamard_rotation(
|
55
|
+
transformation_utils.TransformationInput(
|
56
|
+
tensor_id=0,
|
57
|
+
op_codes=self.model.operatorCodes,
|
58
|
+
buffers=self.model.buffers,
|
59
|
+
subgraph=self.model.subgraphs[0],
|
60
|
+
producer=-1,
|
61
|
+
consumers=[-1],
|
62
|
+
quant_params=qtyping.NonLinearQuantParams(
|
63
|
+
num_bits=16, quantized_data=None
|
64
|
+
),
|
65
|
+
)
|
66
|
+
)
|
67
|
+
|
68
|
+
def test_raise_missing_hadamard_data(self):
|
69
|
+
with self.assertRaisesWithPredicateMatch(
|
70
|
+
ValueError, lambda err: 'quantization params are not set' in str(err)
|
71
|
+
):
|
72
|
+
insert_hadamard_rotation.insert_hadamard_rotation(
|
73
|
+
transformation_utils.TransformationInput(
|
74
|
+
tensor_id=0,
|
75
|
+
op_codes=self.model.operatorCodes,
|
76
|
+
buffers=self.model.buffers,
|
77
|
+
subgraph=self.model.subgraphs[0],
|
78
|
+
producer=-1,
|
79
|
+
consumers=[-1],
|
80
|
+
quant_params=qtyping.UniformQuantParams(
|
81
|
+
num_bits=8,
|
82
|
+
quantized_dimension=None,
|
83
|
+
scale=np.ones(1),
|
84
|
+
zero_point=np.zeros(1),
|
85
|
+
),
|
86
|
+
)
|
87
|
+
)
|
88
|
+
|
89
|
+
def test_raise_non_float32_tensor(self):
|
90
|
+
self.model.subgraphs[0].tensors[
|
91
|
+
0
|
92
|
+
].type = schema_py_generated.TensorType.INT32
|
93
|
+
with self.assertRaisesWithPredicateMatch(
|
94
|
+
ValueError, lambda err: 'float32 tensors' in str(err)
|
95
|
+
):
|
96
|
+
insert_hadamard_rotation.insert_hadamard_rotation(
|
97
|
+
transformation_utils.TransformationInput(
|
98
|
+
tensor_id=0,
|
99
|
+
op_codes=self.model.operatorCodes,
|
100
|
+
buffers=self.model.buffers,
|
101
|
+
subgraph=self.model.subgraphs[0],
|
102
|
+
producer=-1,
|
103
|
+
consumers=[-1],
|
104
|
+
quant_params=self.params,
|
105
|
+
),
|
106
|
+
)
|
107
|
+
|
108
|
+
def test_insert_single_custom_op(self):
|
109
|
+
# Insert aeq.hadamard_rotation before fully_connected
|
110
|
+
info = insert_hadamard_rotation.insert_hadamard_rotation(
|
111
|
+
transformation_utils.TransformationInput(
|
112
|
+
tensor_id=0,
|
113
|
+
op_codes=self.model.operatorCodes,
|
114
|
+
buffers=self.model.buffers,
|
115
|
+
subgraph=self.model.subgraphs[0],
|
116
|
+
producer=-1,
|
117
|
+
consumers=[-1],
|
118
|
+
quant_params=self.params,
|
119
|
+
)
|
120
|
+
)
|
121
|
+
subgraph = self.model.subgraphs[0]
|
122
|
+
self.assertEqual(info.op_id, 0)
|
123
|
+
self.assertEqual(info.num_ops_added, 1)
|
124
|
+
# Model had 4 tensors, added 1.
|
125
|
+
self.assertEqual(info.output_tensor_id, 4)
|
126
|
+
self.assertLen(subgraph.tensors, 5)
|
127
|
+
# Model had 1 op, added a new one.
|
128
|
+
self.assertLen(self.model.operatorCodes, 2)
|
129
|
+
self.assertEqual(
|
130
|
+
self.model.operatorCodes[1].builtinCode,
|
131
|
+
schema_py_generated.BuiltinOperator.CUSTOM,
|
132
|
+
)
|
133
|
+
# First op is now the custom op, precedes fully_connected.
|
134
|
+
self.assertEqual(
|
135
|
+
self.model.operatorCodes[subgraph.operators[0].opcodeIndex].builtinCode,
|
136
|
+
schema_py_generated.BuiltinOperator.CUSTOM,
|
137
|
+
)
|
138
|
+
# Input to the custom op is graph input
|
139
|
+
self.assertEqual(subgraph.operators[0].inputs[0], 0)
|
140
|
+
# Input to the FC is the custom op output
|
141
|
+
self.assertEqual(subgraph.operators[1].inputs[0], 4)
|
142
|
+
|
143
|
+
|
144
|
+
class InsertHadamardRotationEmbeddingLookupTest(googletest.TestCase):
|
145
|
+
|
146
|
+
def setUp(self):
|
147
|
+
super().setUp()
|
148
|
+
model_path = os.path.join(
|
149
|
+
_TEST_DATA_PREFIX_PATH, 'tests/models/embedding_lookup.tflite'
|
150
|
+
)
|
151
|
+
self.model = tfl_flatbuffer_utils.read_model(model_path)
|
152
|
+
self.params = qtyping.UniformQuantParams(
|
153
|
+
num_bits=8,
|
154
|
+
quantized_dimension=None,
|
155
|
+
scale=np.ones(1),
|
156
|
+
zero_point=np.zeros(1),
|
157
|
+
hadamard=qtyping.UniformQuantParams.HadamardRotationParams(
|
158
|
+
random_binary_vector=np.ones(1),
|
159
|
+
hadamard_size=2,
|
160
|
+
),
|
161
|
+
)
|
162
|
+
|
163
|
+
def test_insert_single_custom_op(self):
|
164
|
+
# Insert aeq.hadamard_rotation after embedding_lookup
|
165
|
+
info = insert_hadamard_rotation.insert_hadamard_rotation(
|
166
|
+
transformation_utils.TransformationInput(
|
167
|
+
tensor_id=2,
|
168
|
+
op_codes=self.model.operatorCodes,
|
169
|
+
buffers=self.model.buffers,
|
170
|
+
subgraph=self.model.subgraphs[0],
|
171
|
+
producer=0,
|
172
|
+
consumers=[-1],
|
173
|
+
quant_params=self.params,
|
174
|
+
)
|
175
|
+
)
|
176
|
+
subgraph = self.model.subgraphs[0]
|
177
|
+
self.assertEqual(info.op_id, 1)
|
178
|
+
self.assertEqual(info.num_ops_added, 1)
|
179
|
+
# Model had 3 tensors, added 1.
|
180
|
+
self.assertEqual(info.output_tensor_id, 3)
|
181
|
+
self.assertLen(subgraph.tensors, 4)
|
182
|
+
# Model had 1 op, added a new one.
|
183
|
+
self.assertLen(self.model.operatorCodes, 2)
|
184
|
+
self.assertEqual(
|
185
|
+
self.model.operatorCodes[1].builtinCode,
|
186
|
+
schema_py_generated.BuiltinOperator.CUSTOM,
|
187
|
+
)
|
188
|
+
# Second op is now the custom op, after embedding_lookup.
|
189
|
+
self.assertEqual(
|
190
|
+
self.model.operatorCodes[subgraph.operators[1].opcodeIndex].builtinCode,
|
191
|
+
schema_py_generated.BuiltinOperator.CUSTOM,
|
192
|
+
)
|
193
|
+
# Input to the custom op is embedding's output
|
194
|
+
self.assertEqual(subgraph.operators[1].inputs[0], 2)
|
195
|
+
# Custom op's output is the new tensor
|
196
|
+
self.assertEqual(subgraph.operators[1].outputs[0], 3)
|
197
|
+
|
198
|
+
|
199
|
+
if __name__ == '__main__':
|
200
|
+
googletest.main()
|
@@ -33,6 +33,39 @@ _OpQuantConfig = qtyping.OpQuantizationConfig
|
|
33
33
|
_AlgorithmName = quantizer.AlgorithmName
|
34
34
|
|
35
35
|
|
36
|
+
DEFAULT_ACTIVATION_QUANT_SETTING = _TensorQuantConfig(
|
37
|
+
num_bits=8,
|
38
|
+
symmetric=False,
|
39
|
+
granularity=qtyping.QuantGranularity.TENSORWISE,
|
40
|
+
)
|
41
|
+
DEFAULT_WEIGHT_QUANT_SETTING = _TensorQuantConfig(
|
42
|
+
num_bits=8,
|
43
|
+
symmetric=True,
|
44
|
+
granularity=qtyping.QuantGranularity.CHANNELWISE,
|
45
|
+
)
|
46
|
+
|
47
|
+
|
48
|
+
def get_static_activation_quant_setting(
|
49
|
+
num_bits: int, symmetric: bool
|
50
|
+
) -> _TensorQuantConfig:
|
51
|
+
return _TensorQuantConfig(
|
52
|
+
num_bits=num_bits,
|
53
|
+
symmetric=symmetric,
|
54
|
+
granularity=qtyping.QuantGranularity.TENSORWISE,
|
55
|
+
)
|
56
|
+
|
57
|
+
|
58
|
+
def get_static_op_quant_config(
|
59
|
+
activation_config: _TensorQuantConfig = DEFAULT_ACTIVATION_QUANT_SETTING,
|
60
|
+
weight_config: _TensorQuantConfig = DEFAULT_WEIGHT_QUANT_SETTING,
|
61
|
+
) -> _OpQuantConfig:
|
62
|
+
return qtyping.OpQuantizationConfig(
|
63
|
+
activation_tensor_config=activation_config,
|
64
|
+
weight_tensor_config=weight_config,
|
65
|
+
compute_precision=_ComputePrecision.INTEGER,
|
66
|
+
)
|
67
|
+
|
68
|
+
|
36
69
|
def get_path_to_datafile(path):
|
37
70
|
"""Get the path to the specified file in the data dependencies.
|
38
71
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ai-edge-quantizer-nightly
|
3
|
-
Version: 0.1.0.
|
3
|
+
Version: 0.1.0.dev20250514
|
4
4
|
Summary: A quantizer for advanced developers to quantize converted AI Edge models.
|
5
5
|
Home-page: https://github.com/google-ai-edge/ai-edge-quantizer
|
6
6
|
Keywords: On-Device ML,AI,Google,TFLite,Quantization,LLMs,GenAI
|
@@ -1,18 +1,18 @@
|
|
1
1
|
ai_edge_quantizer/__init__.py,sha256=4pFSkukSwahYyzwqia0yPRyz8TnFQfGRthVJhYpMWas,793
|
2
|
-
ai_edge_quantizer/algorithm_manager.py,sha256=
|
2
|
+
ai_edge_quantizer/algorithm_manager.py,sha256=p-wX2ksIV1hbWEQz-uUnbNMVgDJrsIiIOU2ZYX2ZrTM,11726
|
3
3
|
ai_edge_quantizer/algorithm_manager_api.py,sha256=u903TG0s1uIDhJqfeJne3CFl8A93phZrwgV2-hwdcXU,9247
|
4
4
|
ai_edge_quantizer/algorithm_manager_api_test.py,sha256=w6bSONvXkX6bzXAGc0-7b6gNDt9oz9ieq97KP8Sg_JU,7666
|
5
5
|
ai_edge_quantizer/calibrator.py,sha256=n7AD9j7UScR-CieoI6DQRMeiG_fhLBfSLRiM4460xaM,11895
|
6
6
|
ai_edge_quantizer/calibrator_test.py,sha256=C_oWOaRugPKYX74jF-eRFH-k6nGOdA8I9_uPiocaOuE,11900
|
7
7
|
ai_edge_quantizer/conftest.py,sha256=SxCz-5LlRD_lQm4hQc4c6IGG7DS8d7IyEWY9gnscPN0,794
|
8
|
-
ai_edge_quantizer/default_policy.py,sha256=
|
8
|
+
ai_edge_quantizer/default_policy.py,sha256=zNTeiI_eP5-dLL3P_VWIQB3RzXBrb06peJKngLnSSFY,11125
|
9
9
|
ai_edge_quantizer/model_modifier.py,sha256=teGa8I6kGvn6TQY6Xv53YFIc_pQEhNvM9Zb4bvhezyw,7110
|
10
10
|
ai_edge_quantizer/model_modifier_test.py,sha256=cJd04SLOG-fQZZNZPcisoBLx3cLtWEwGqUBbLb-pif4,4751
|
11
11
|
ai_edge_quantizer/model_validator.py,sha256=Hj0_5o-Oa3dSlJ3ryVjRhvsyelHNyek1GrtG9buMczg,13153
|
12
12
|
ai_edge_quantizer/model_validator_test.py,sha256=EeqOP_mrZsnZ3rug756s0ryDDqd2KgIDld5Lm_gDuWY,13020
|
13
|
-
ai_edge_quantizer/params_generator.py,sha256=
|
13
|
+
ai_edge_quantizer/params_generator.py,sha256=j1BV2cGFLlQmUY6aoW5uglYqf77b9ytN8oZ1gh6o0mM,20096
|
14
14
|
ai_edge_quantizer/params_generator_test.py,sha256=RDYoRZDJfEZRtjlTAU2kZ_4t3JHOqEHxfJX9V4ETAhg,40597
|
15
|
-
ai_edge_quantizer/qtyping.py,sha256=
|
15
|
+
ai_edge_quantizer/qtyping.py,sha256=LKn9w53wmw3gPO0E4DKOhj8gkx9efjXMoipGnsJyGiU,16453
|
16
16
|
ai_edge_quantizer/quantizer.py,sha256=g3DMqFMrMpt9jQttCE0WcdNbMtk0JZnmN5MmCHrNdyM,13202
|
17
17
|
ai_edge_quantizer/quantizer_test.py,sha256=K_HBA56JkFI3HL8VLWCqGEfC0ISh5ldMKoNyBdGRAJg,20368
|
18
18
|
ai_edge_quantizer/recipe.py,sha256=FR0uJceumZrnle2VRSOQZ1uXup4S1cTYKRH-N53mWRo,2919
|
@@ -21,25 +21,27 @@ ai_edge_quantizer/recipe_manager_test.py,sha256=LulVxsYp6TBGFI2PLCUCd4VsFq8ELpC7
|
|
21
21
|
ai_edge_quantizer/recipe_test.py,sha256=Fg_sfxovI2fRjk5qdu18ghOvXdUvhDR1TxbE0GHDczc,3381
|
22
22
|
ai_edge_quantizer/transformation_instruction_generator.py,sha256=R7A90Qj6iQQROrznXmXLJd-5yXq0PRHbLOdNY51dEu4,27913
|
23
23
|
ai_edge_quantizer/transformation_instruction_generator_test.py,sha256=E0QSDCav6N6izlJ-a1ZJOsb2VEUxuxBmTbt0-EgDdxY,49890
|
24
|
-
ai_edge_quantizer/transformation_performer.py,sha256=
|
24
|
+
ai_edge_quantizer/transformation_performer.py,sha256=nkkqbs81ITB5u2FoWeG9z5d8EtLtCiltOxcQ34okN8E,13091
|
25
25
|
ai_edge_quantizer/transformation_performer_test.py,sha256=xk6A3LStCyPclN51--9uO7XjSxNfZmpdfvrzOL0maNM,20349
|
26
26
|
ai_edge_quantizer/algorithms/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
|
27
27
|
ai_edge_quantizer/algorithms/nonlinear_quantize/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
|
28
28
|
ai_edge_quantizer/algorithms/nonlinear_quantize/float_casting.py,sha256=Bs9CK7wZAw6jNaZ8xEtbwO2vM34VYXNZSMVWvxJo9nw,9297
|
29
29
|
ai_edge_quantizer/algorithms/nonlinear_quantize/float_casting_test.py,sha256=EqIHGEZ1LgUrTN7zf880RuAzEv3Qy7kgh5ivObJGHSo,22646
|
30
30
|
ai_edge_quantizer/algorithms/uniform_quantize/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
|
31
|
-
ai_edge_quantizer/algorithms/uniform_quantize/common_quantize.py,sha256=
|
32
|
-
ai_edge_quantizer/algorithms/uniform_quantize/common_quantize_test.py,sha256=
|
33
|
-
ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery.py,sha256=
|
31
|
+
ai_edge_quantizer/algorithms/uniform_quantize/common_quantize.py,sha256=NpZ-JvZt2OhpTqH7Z81YYVjzOX_pHoDCt8rr3VIXJUY,28665
|
32
|
+
ai_edge_quantizer/algorithms/uniform_quantize/common_quantize_test.py,sha256=GGf_n3wIeg3GB_eGsmyNJ0fTcxgpeMMbugTMRONK6TQ,3553
|
33
|
+
ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery.py,sha256=BDdn_uBZakfHyzdMJPKadsOqxqyC-s6W2ZzFH99L4fE,8652
|
34
34
|
ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery_test.py,sha256=sT5eX5TLZEHTtPfnSkCPDlS0sQxlTFWbCsbvOuj--yY,8889
|
35
|
-
ai_edge_quantizer/algorithms/uniform_quantize/
|
35
|
+
ai_edge_quantizer/algorithms/uniform_quantize/hadamard_rotation.py,sha256=pN4hwggrdI4eBdqvsdwnFagFxpd4D8LkWK0o4HG_xxk,12536
|
36
|
+
ai_edge_quantizer/algorithms/uniform_quantize/hadamard_rotation_test.py,sha256=MajG6DqpP4HvVzcZwgiKojWL3RBxCpkU3u2mKyeB0hA,9191
|
37
|
+
ai_edge_quantizer/algorithms/uniform_quantize/naive_min_max_quantize.py,sha256=8_tNLTbOWTKId4DfHBjkOR9RvELUyIpxlGxKu7tv5Ko,7556
|
36
38
|
ai_edge_quantizer/algorithms/uniform_quantize/naive_min_max_quantize_test.py,sha256=zoF_EHjYqsKkuev8wfuutIITEmp_maa70IpJI_Df3ck,7431
|
37
|
-
ai_edge_quantizer/algorithms/uniform_quantize/octav.py,sha256=
|
38
|
-
ai_edge_quantizer/algorithms/uniform_quantize/octav_test.py,sha256=
|
39
|
-
ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor.py,sha256=
|
40
|
-
ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor_test.py,sha256=
|
39
|
+
ai_edge_quantizer/algorithms/uniform_quantize/octav.py,sha256=Umxh4kJyeHddZf-Wd4aXE5MTI1XWFa5KRuM17uYU714,6922
|
40
|
+
ai_edge_quantizer/algorithms/uniform_quantize/octav_test.py,sha256=sha1d99Xk87bI87tgz0g5LeDC-EeE4WMfM5rRC98-m4,9140
|
41
|
+
ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor.py,sha256=W2QbXP96xeleAmA7qFwco1iq_bOtArGDK6Qj_g6kNl8,15986
|
42
|
+
ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor_test.py,sha256=MgG7Qh2_z4I6InBqEEDSVlaR0q48aMz4xqAlxeG2EMk,12436
|
41
43
|
ai_edge_quantizer/algorithms/utils/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
|
42
|
-
ai_edge_quantizer/algorithms/utils/common_utils.py,sha256=
|
44
|
+
ai_edge_quantizer/algorithms/utils/common_utils.py,sha256=UoZxeAQmZk3b3hK51KFwq6XfdbeduXVjdYIxAxlAzB8,34982
|
43
45
|
ai_edge_quantizer/algorithms/utils/common_utils_test.py,sha256=zqapGEfYhjQWe9cNGPLmdbwtEUUYQRhlO_kNe0cXX6E,18104
|
44
46
|
ai_edge_quantizer/transformations/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
|
45
47
|
ai_edge_quantizer/transformations/dequant_insert.py,sha256=sL1LHFVzBDSd9jgrzlHz38LWU0bwmVX7iBkaNcui0ts,3566
|
@@ -50,6 +52,8 @@ ai_edge_quantizer/transformations/duplicate_tensor.py,sha256=HF1uuKFm5kFF6X0XUpd
|
|
50
52
|
ai_edge_quantizer/transformations/duplicate_tensor_test.py,sha256=s-RqSxNBMfVJyCunXz2eb7-KA6UiBmbOmL7phLslENQ,5056
|
51
53
|
ai_edge_quantizer/transformations/emulated_subchannel.py,sha256=HVaRxoC8PCAvy3xeMv3OIymukUy_yW1zK0xN8Ann6I4,13602
|
52
54
|
ai_edge_quantizer/transformations/emulated_subchannel_test.py,sha256=gZP6u9NdPXl7s19qB_Un8evou9ZZV6I9Gy0E1rdobHM,7722
|
55
|
+
ai_edge_quantizer/transformations/insert_hadamard_rotation.py,sha256=rBbKgcVKHie38NT2UQ7KQ1xCb2tRu_rVl0yFloOAW_A,7562
|
56
|
+
ai_edge_quantizer/transformations/insert_hadamard_rotation_test.py,sha256=iV1p3nZfHUATV2YRoBOYurnu3pLy8n3aFppLWGQOPdA,7268
|
53
57
|
ai_edge_quantizer/transformations/quant_insert.py,sha256=jn6HsJaV-sqBiFPY-Aqbd64t8zgcYVkEkZI375x_FWY,3958
|
54
58
|
ai_edge_quantizer/transformations/quant_insert_test.py,sha256=X9ptPDvJCFkR5tejKnD1SlHFGPazQTW-wNNMV9MEAuw,10107
|
55
59
|
ai_edge_quantizer/transformations/quantize_tensor.py,sha256=kjaNrw9mnrn0t8u0vey9S_uPz3iVUicwy4rluxVqV3E,7617
|
@@ -59,15 +63,15 @@ ai_edge_quantizer/transformations/transformation_utils_test.py,sha256=E90O4PYSjz
|
|
59
63
|
ai_edge_quantizer/utils/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
|
60
64
|
ai_edge_quantizer/utils/calibration_utils.py,sha256=1Fj9MIO6aLZIRgyd4axvZN4S_O64nB_-Miu1WP664js,2536
|
61
65
|
ai_edge_quantizer/utils/calibration_utils_test.py,sha256=Z-AcdTieesWFKyKBb08ZXm4Mgu6cvJ4bg2-MJ7hLD10,2856
|
62
|
-
ai_edge_quantizer/utils/test_utils.py,sha256=
|
63
|
-
ai_edge_quantizer/utils/tfl_flatbuffer_utils.py,sha256=
|
66
|
+
ai_edge_quantizer/utils/test_utils.py,sha256=fXwQ353P7tSy7W4Hs6YskIbCLLaBYGA724hMMbcqCUk,7129
|
67
|
+
ai_edge_quantizer/utils/tfl_flatbuffer_utils.py,sha256=zNlR_SJAkDi-EX63O3pNpFLVqSktysScZKgKk1XT3c8,10616
|
64
68
|
ai_edge_quantizer/utils/tfl_flatbuffer_utils_test.py,sha256=K1SbK8q92qYVtiVj0I0GtugsPTkpIpEKv9zakvFV_Sc,8555
|
65
69
|
ai_edge_quantizer/utils/tfl_interpreter_utils.py,sha256=WoewyiZpaua80oP0tpgyrw5Ws1v7f4vl88vdzS0UjDE,13490
|
66
70
|
ai_edge_quantizer/utils/tfl_interpreter_utils_test.py,sha256=6fjkM-rycZ95L4yfvlr0TN6RlrhfPzxNUYrZaYO_F0A,12013
|
67
71
|
ai_edge_quantizer/utils/validation_utils.py,sha256=oYw33Sg547AqtGw-choPUJmp9SAKkV46J_ddqSsum2Q,3950
|
68
72
|
ai_edge_quantizer/utils/validation_utils_test.py,sha256=V_qNDikPD4OPB-siOLQCWNVWTAu87h2IgNYt7teFd-o,2934
|
69
|
-
ai_edge_quantizer_nightly-0.1.0.
|
70
|
-
ai_edge_quantizer_nightly-0.1.0.
|
71
|
-
ai_edge_quantizer_nightly-0.1.0.
|
72
|
-
ai_edge_quantizer_nightly-0.1.0.
|
73
|
-
ai_edge_quantizer_nightly-0.1.0.
|
73
|
+
ai_edge_quantizer_nightly-0.1.0.dev20250514.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
74
|
+
ai_edge_quantizer_nightly-0.1.0.dev20250514.dist-info/METADATA,sha256=jGKo0MakT_DI_wLdclRVhNbLFRzJ2GDmQX7UGLDsr4I,1528
|
75
|
+
ai_edge_quantizer_nightly-0.1.0.dev20250514.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
76
|
+
ai_edge_quantizer_nightly-0.1.0.dev20250514.dist-info/top_level.txt,sha256=8QTfPnFXNVUhScFLaa-NWZMFWMn72M50DVPubpwWB1g,18
|
77
|
+
ai_edge_quantizer_nightly-0.1.0.dev20250514.dist-info/RECORD,,
|
File without changes
|
File without changes
|