ai-edge-quantizer-nightly 0.0.1.dev20250302__py3-none-any.whl → 0.5.0.dev20260103__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_edge_quantizer/algorithm_manager.py +224 -0
- ai_edge_quantizer/algorithm_manager_api_test.py +7 -0
- ai_edge_quantizer/algorithms/nonlinear_quantize/float_casting_test.py +2 -2
- ai_edge_quantizer/algorithms/uniform_quantize/common_quantize.py +643 -20
- ai_edge_quantizer/algorithms/uniform_quantize/common_quantize_test.py +29 -2
- ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery.py +29 -35
- ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery_test.py +35 -12
- ai_edge_quantizer/algorithms/uniform_quantize/hadamard_rotation.py +414 -0
- ai_edge_quantizer/algorithms/uniform_quantize/hadamard_rotation_test.py +440 -0
- ai_edge_quantizer/algorithms/uniform_quantize/mse.py +127 -0
- ai_edge_quantizer/algorithms/uniform_quantize/mse_test.py +195 -0
- ai_edge_quantizer/algorithms/uniform_quantize/naive_min_max_quantize.py +54 -168
- ai_edge_quantizer/algorithms/uniform_quantize/naive_min_max_quantize_test.py +54 -17
- ai_edge_quantizer/algorithms/uniform_quantize/octav.py +188 -0
- ai_edge_quantizer/algorithms/uniform_quantize/octav_test.py +240 -0
- ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor.py +260 -13
- ai_edge_quantizer/algorithms/uniform_quantize/uniform_quantize_tensor_test.py +152 -5
- ai_edge_quantizer/algorithms/utils/common_utils.py +142 -54
- ai_edge_quantizer/calibrator.py +58 -94
- ai_edge_quantizer/calibrator_test.py +5 -74
- ai_edge_quantizer/default_policy.py +108 -16
- ai_edge_quantizer/model_modifier.py +132 -8
- ai_edge_quantizer/model_modifier_test.py +81 -1
- ai_edge_quantizer/model_validator.py +38 -10
- ai_edge_quantizer/model_validator_test.py +2 -1
- ai_edge_quantizer/params_generator.py +230 -47
- ai_edge_quantizer/params_generator_test.py +366 -261
- ai_edge_quantizer/qtyping.py +92 -6
- ai_edge_quantizer/quantizer.py +167 -23
- ai_edge_quantizer/quantizer_test.py +288 -26
- ai_edge_quantizer/recipe.py +156 -21
- ai_edge_quantizer/recipe_manager.py +158 -1
- ai_edge_quantizer/recipe_manager_test.py +146 -32
- ai_edge_quantizer/recipe_test.py +93 -17
- ai_edge_quantizer/transformation_instruction_generator.py +313 -46
- ai_edge_quantizer/transformation_instruction_generator_test.py +449 -27
- ai_edge_quantizer/transformation_performer.py +112 -58
- ai_edge_quantizer/transformation_performer_test.py +176 -4
- ai_edge_quantizer/transformations/duplicate_buffer.py +46 -0
- ai_edge_quantizer/transformations/duplicate_buffer_test.py +106 -0
- ai_edge_quantizer/transformations/duplicate_tensor.py +62 -0
- ai_edge_quantizer/transformations/duplicate_tensor_test.py +131 -0
- ai_edge_quantizer/transformations/insert_decomposed_hadamard_rotation.py +299 -0
- ai_edge_quantizer/transformations/insert_decomposed_hadamard_rotation_test.py +244 -0
- ai_edge_quantizer/transformations/insert_hadamard_rotation.py +186 -0
- ai_edge_quantizer/transformations/insert_hadamard_rotation_test.py +200 -0
- ai_edge_quantizer/transformations/quantize_tensor.py +24 -44
- ai_edge_quantizer/transformations/quantize_tensor_test.py +3 -2
- ai_edge_quantizer/transformations/transformation_utils.py +157 -11
- ai_edge_quantizer/transformations/transformation_utils_test.py +96 -2
- ai_edge_quantizer/utils/calibration_utils.py +263 -1
- ai_edge_quantizer/utils/calibration_utils_test.py +173 -3
- ai_edge_quantizer/utils/constrained_ops_utils.py +111 -0
- ai_edge_quantizer/utils/constrained_ops_utils_test.py +50 -0
- ai_edge_quantizer/utils/test_utils.py +191 -58
- ai_edge_quantizer/utils/tfl_flatbuffer_utils.py +96 -50
- ai_edge_quantizer/utils/tfl_flatbuffer_utils_test.py +20 -0
- ai_edge_quantizer/utils/tfl_interpreter_utils.py +138 -5
- ai_edge_quantizer/utils/tfl_interpreter_utils_test.py +29 -2
- ai_edge_quantizer/utils/validation_utils.py +114 -4
- ai_edge_quantizer/utils/validation_utils_test.py +80 -0
- {ai_edge_quantizer_nightly-0.0.1.dev20250302.dist-info → ai_edge_quantizer_nightly-0.5.0.dev20260103.dist-info}/METADATA +13 -3
- ai_edge_quantizer_nightly-0.5.0.dev20260103.dist-info/RECORD +81 -0
- {ai_edge_quantizer_nightly-0.0.1.dev20250302.dist-info → ai_edge_quantizer_nightly-0.5.0.dev20260103.dist-info}/WHEEL +1 -1
- ai_edge_quantizer/transformations/emulated_subchannel.py +0 -363
- ai_edge_quantizer/transformations/emulated_subchannel_test.py +0 -212
- ai_edge_quantizer_nightly-0.0.1.dev20250302.dist-info/RECORD +0 -67
- {ai_edge_quantizer_nightly-0.0.1.dev20250302.dist-info → ai_edge_quantizer_nightly-0.5.0.dev20260103.dist-info/licenses}/LICENSE +0 -0
- {ai_edge_quantizer_nightly-0.0.1.dev20250302.dist-info → ai_edge_quantizer_nightly-0.5.0.dev20260103.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
# Copyright 2024 The AI Edge Quantizer Authors.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
|
|
16
|
+
"""Test insertion of the Decomposed Hadamard rotation ops."""
|
|
17
|
+
|
|
18
|
+
import os
|
|
19
|
+
import numpy as np
|
|
20
|
+
from tensorflow.python.platform import googletest
|
|
21
|
+
from ai_edge_quantizer import qtyping
|
|
22
|
+
from ai_edge_quantizer.transformations import insert_decomposed_hadamard_rotation
|
|
23
|
+
from ai_edge_quantizer.transformations import transformation_utils
|
|
24
|
+
from ai_edge_quantizer.utils import test_utils
|
|
25
|
+
from ai_edge_quantizer.utils import tfl_flatbuffer_utils
|
|
26
|
+
from ai_edge_litert import schema_py_generated # pylint: disable=g-direct-tensorflow-import
|
|
27
|
+
|
|
28
|
+
_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile('..')
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class InsertDecomposedHadamardRotationFullyConnectedTest(googletest.TestCase):
|
|
32
|
+
|
|
33
|
+
def setUp(self):
|
|
34
|
+
super().setUp()
|
|
35
|
+
model_path = os.path.join(
|
|
36
|
+
_TEST_DATA_PREFIX_PATH, 'tests/models/single_fc_bias.tflite'
|
|
37
|
+
)
|
|
38
|
+
self.model = tfl_flatbuffer_utils.read_model(model_path)
|
|
39
|
+
self.params = qtyping.UniformQuantParams(
|
|
40
|
+
num_bits=8,
|
|
41
|
+
quantized_dimension=None,
|
|
42
|
+
scale=np.ones(1),
|
|
43
|
+
zero_point=np.zeros(1),
|
|
44
|
+
hadamard=qtyping.UniformQuantParams.HadamardRotationParams(
|
|
45
|
+
random_binary_vector=np.ones(1),
|
|
46
|
+
hadamard_size=2,
|
|
47
|
+
),
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
def test_raise_unsupported_qparams(self):
|
|
51
|
+
with self.assertRaisesWithPredicateMatch(
|
|
52
|
+
ValueError, lambda err: 'uniform quantization' in str(err)
|
|
53
|
+
):
|
|
54
|
+
insert_decomposed_hadamard_rotation.insert_decomposed_hadamard_rotation(
|
|
55
|
+
transformation_utils.TransformationInput(
|
|
56
|
+
tensor_id=0,
|
|
57
|
+
op_codes=self.model.operatorCodes,
|
|
58
|
+
buffers=self.model.buffers,
|
|
59
|
+
subgraph=self.model.subgraphs[0],
|
|
60
|
+
producer=-1,
|
|
61
|
+
consumers=[-1],
|
|
62
|
+
quant_params=qtyping.NonLinearQuantParams(
|
|
63
|
+
num_bits=16, quantized_data=None
|
|
64
|
+
),
|
|
65
|
+
)
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
def test_raise_missing_hadamard_data(self):
|
|
69
|
+
with self.assertRaisesWithPredicateMatch(
|
|
70
|
+
ValueError, lambda err: 'quantization params are not set' in str(err)
|
|
71
|
+
):
|
|
72
|
+
insert_decomposed_hadamard_rotation.insert_decomposed_hadamard_rotation(
|
|
73
|
+
transformation_utils.TransformationInput(
|
|
74
|
+
tensor_id=0,
|
|
75
|
+
op_codes=self.model.operatorCodes,
|
|
76
|
+
buffers=self.model.buffers,
|
|
77
|
+
subgraph=self.model.subgraphs[0],
|
|
78
|
+
producer=-1,
|
|
79
|
+
consumers=[-1],
|
|
80
|
+
quant_params=qtyping.UniformQuantParams(
|
|
81
|
+
num_bits=8,
|
|
82
|
+
quantized_dimension=None,
|
|
83
|
+
scale=np.ones(1),
|
|
84
|
+
zero_point=np.zeros(1),
|
|
85
|
+
),
|
|
86
|
+
)
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
def test_raise_non_float32_tensor(self):
|
|
90
|
+
self.model.subgraphs[0].tensors[
|
|
91
|
+
0
|
|
92
|
+
].type = schema_py_generated.TensorType.INT32
|
|
93
|
+
with self.assertRaisesWithPredicateMatch(
|
|
94
|
+
ValueError, lambda err: 'float32 tensors' in str(err)
|
|
95
|
+
):
|
|
96
|
+
insert_decomposed_hadamard_rotation.insert_decomposed_hadamard_rotation(
|
|
97
|
+
transformation_utils.TransformationInput(
|
|
98
|
+
tensor_id=0,
|
|
99
|
+
op_codes=self.model.operatorCodes,
|
|
100
|
+
buffers=self.model.buffers,
|
|
101
|
+
subgraph=self.model.subgraphs[0],
|
|
102
|
+
producer=-1,
|
|
103
|
+
consumers=[-1],
|
|
104
|
+
quant_params=self.params,
|
|
105
|
+
),
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
def test_insert_decomposed_ops(self):
|
|
109
|
+
# Insert Decomposed Hadamard ops before fully_connected
|
|
110
|
+
info = (
|
|
111
|
+
insert_decomposed_hadamard_rotation.insert_decomposed_hadamard_rotation(
|
|
112
|
+
transformation_utils.TransformationInput(
|
|
113
|
+
tensor_id=0,
|
|
114
|
+
op_codes=self.model.operatorCodes,
|
|
115
|
+
buffers=self.model.buffers,
|
|
116
|
+
subgraph=self.model.subgraphs[0],
|
|
117
|
+
producer=-1,
|
|
118
|
+
consumers=[0], # Consumer is the FC op
|
|
119
|
+
quant_params=self.params,
|
|
120
|
+
)
|
|
121
|
+
)
|
|
122
|
+
)
|
|
123
|
+
subgraph = self.model.subgraphs[0]
|
|
124
|
+
self.assertEqual(info.op_id, 0)
|
|
125
|
+
self.assertEqual(info.num_ops_added, 3)
|
|
126
|
+
# Model had 4 tensors, added 6 tensors (3 activations 3 constants).
|
|
127
|
+
self.assertEqual(info.output_tensor_id, 9)
|
|
128
|
+
self.assertLen(subgraph.tensors, 10)
|
|
129
|
+
# Model had 1 op code, added RESHAPE and FC.
|
|
130
|
+
self.assertLen(self.model.operatorCodes, 3)
|
|
131
|
+
self.assertEqual(
|
|
132
|
+
self.model.operatorCodes[1].builtinCode,
|
|
133
|
+
schema_py_generated.BuiltinOperator.RESHAPE,
|
|
134
|
+
)
|
|
135
|
+
self.assertEqual(
|
|
136
|
+
self.model.operatorCodes[2].builtinCode,
|
|
137
|
+
schema_py_generated.BuiltinOperator.FULLY_CONNECTED,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Op 0: RESHAPE
|
|
141
|
+
reshape_op = subgraph.operators[0]
|
|
142
|
+
self.assertEqual(
|
|
143
|
+
self.model.operatorCodes[reshape_op.opcodeIndex].builtinCode,
|
|
144
|
+
schema_py_generated.BuiltinOperator.RESHAPE,
|
|
145
|
+
)
|
|
146
|
+
self.assertEqual(reshape_op.inputs[0], 0) # Graph input
|
|
147
|
+
self.assertEqual(reshape_op.outputs[0], 5) # Reshape output
|
|
148
|
+
|
|
149
|
+
# Op 1: FULLY_CONNECTED
|
|
150
|
+
fc_op = subgraph.operators[1]
|
|
151
|
+
self.assertEqual(
|
|
152
|
+
self.model.operatorCodes[fc_op.opcodeIndex].builtinCode,
|
|
153
|
+
schema_py_generated.BuiltinOperator.FULLY_CONNECTED,
|
|
154
|
+
)
|
|
155
|
+
self.assertEqual(fc_op.inputs[0], 5) # Reshape output
|
|
156
|
+
self.assertEqual(fc_op.inputs[1], 6) # Hadamard matrix tensor
|
|
157
|
+
self.assertEqual(fc_op.outputs[0], 7) # FC output
|
|
158
|
+
|
|
159
|
+
# Op 2: RESHAPE (post)
|
|
160
|
+
post_reshape_op = subgraph.operators[2]
|
|
161
|
+
self.assertEqual(
|
|
162
|
+
self.model.operatorCodes[post_reshape_op.opcodeIndex].builtinCode,
|
|
163
|
+
schema_py_generated.BuiltinOperator.RESHAPE,
|
|
164
|
+
)
|
|
165
|
+
self.assertEqual(post_reshape_op.inputs[0], 7) # FC output
|
|
166
|
+
self.assertEqual(post_reshape_op.outputs[0], 9) # Post Reshape output
|
|
167
|
+
|
|
168
|
+
# Op 3: Original FULLY_CONNECTED
|
|
169
|
+
orig_fc_op = subgraph.operators[3]
|
|
170
|
+
self.assertEqual(
|
|
171
|
+
self.model.operatorCodes[orig_fc_op.opcodeIndex].builtinCode,
|
|
172
|
+
schema_py_generated.BuiltinOperator.FULLY_CONNECTED,
|
|
173
|
+
)
|
|
174
|
+
# Input to the original FC is the post reshape output
|
|
175
|
+
self.assertEqual(orig_fc_op.inputs[0], 9)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
class InsertDecomposedHadamardRotationEmbeddingLookupTest(googletest.TestCase):
|
|
179
|
+
|
|
180
|
+
def setUp(self):
|
|
181
|
+
super().setUp()
|
|
182
|
+
model_path = os.path.join(
|
|
183
|
+
_TEST_DATA_PREFIX_PATH, 'tests/models/embedding_lookup.tflite'
|
|
184
|
+
)
|
|
185
|
+
self.model = tfl_flatbuffer_utils.read_model(model_path)
|
|
186
|
+
self.params = qtyping.UniformQuantParams(
|
|
187
|
+
num_bits=8,
|
|
188
|
+
quantized_dimension=None,
|
|
189
|
+
scale=np.ones(1),
|
|
190
|
+
zero_point=np.zeros(1),
|
|
191
|
+
hadamard=qtyping.UniformQuantParams.HadamardRotationParams(
|
|
192
|
+
random_binary_vector=np.ones(1),
|
|
193
|
+
hadamard_size=2,
|
|
194
|
+
),
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
def test_insert_decomposed_ops(self):
|
|
198
|
+
# Insert Decomposed Hadamard ops after embedding_lookup
|
|
199
|
+
info = (
|
|
200
|
+
insert_decomposed_hadamard_rotation.insert_decomposed_hadamard_rotation(
|
|
201
|
+
transformation_utils.TransformationInput(
|
|
202
|
+
tensor_id=2, # Output of embedding_lookup
|
|
203
|
+
op_codes=self.model.operatorCodes,
|
|
204
|
+
buffers=self.model.buffers,
|
|
205
|
+
subgraph=self.model.subgraphs[0],
|
|
206
|
+
producer=0,
|
|
207
|
+
consumers=[-1], # Output is a graph output
|
|
208
|
+
quant_params=self.params,
|
|
209
|
+
)
|
|
210
|
+
)
|
|
211
|
+
)
|
|
212
|
+
subgraph = self.model.subgraphs[0]
|
|
213
|
+
self.assertEqual(info.op_id, 1)
|
|
214
|
+
self.assertEqual(info.num_ops_added, 3)
|
|
215
|
+
# Model had 3 tensors, added 6 (3 activations 3 constants).
|
|
216
|
+
self.assertEqual(info.output_tensor_id, 8)
|
|
217
|
+
self.assertLen(subgraph.tensors, 9)
|
|
218
|
+
# Model had 1 op code, added RESHAPE and FC.
|
|
219
|
+
self.assertLen(self.model.operatorCodes, 3)
|
|
220
|
+
|
|
221
|
+
# Op 0: EMBEDDING_LOOKUP (Original)
|
|
222
|
+
# Op 1: RESHAPE
|
|
223
|
+
reshape_op = subgraph.operators[1]
|
|
224
|
+
self.assertEqual(reshape_op.inputs[0], 2) # Embedding lookup output
|
|
225
|
+
self.assertEqual(reshape_op.outputs[0], 4)
|
|
226
|
+
|
|
227
|
+
# Op 2: FULLY_CONNECTED
|
|
228
|
+
fc_op = subgraph.operators[2]
|
|
229
|
+
self.assertEqual(fc_op.inputs[0], 4)
|
|
230
|
+
self.assertEqual(fc_op.inputs[1], 5) # Hadamard matrix
|
|
231
|
+
self.assertEqual(fc_op.outputs[0], 6)
|
|
232
|
+
|
|
233
|
+
# Op 3: RESHAPE (post)
|
|
234
|
+
post_reshape_op = subgraph.operators[3]
|
|
235
|
+
self.assertEqual(post_reshape_op.inputs[0], 6)
|
|
236
|
+
self.assertEqual(post_reshape_op.outputs[0], 8)
|
|
237
|
+
|
|
238
|
+
# Check graph output
|
|
239
|
+
self.assertIn(8, subgraph.outputs)
|
|
240
|
+
self.assertNotIn(2, subgraph.outputs)
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
if __name__ == '__main__':
|
|
244
|
+
googletest.main()
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
# Copyright 2024 The AI Edge Quantizer Authors.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
|
|
16
|
+
"""Hadamard rotation pattern transformation."""
|
|
17
|
+
|
|
18
|
+
from flatbuffers import flexbuffers
|
|
19
|
+
import numpy as np
|
|
20
|
+
from ai_edge_quantizer import qtyping
|
|
21
|
+
from ai_edge_quantizer.transformations import transformation_utils
|
|
22
|
+
from ai_edge_litert import schema_py_generated # pylint: disable=g-direct-tensorflow-import
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _to_flexbuffer(
|
|
26
|
+
hadamard_size: int,
|
|
27
|
+
random_binary_vector: list[np.int8],
|
|
28
|
+
) -> bytes:
|
|
29
|
+
"""Converts hadamard_size to flexbuffer."""
|
|
30
|
+
fbb = flexbuffers.Builder()
|
|
31
|
+
with fbb.Map():
|
|
32
|
+
fbb.Int('hadamard_size', hadamard_size)
|
|
33
|
+
fbb.VectorFromElements('random_binary_vector', random_binary_vector)
|
|
34
|
+
return fbb.Finish()
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _update_embedding_lookup_consumers(
|
|
38
|
+
transformation: transformation_utils.TransformationInput,
|
|
39
|
+
new_tensor_id: int,
|
|
40
|
+
) -> bool:
|
|
41
|
+
"""Updates the consumers of the embedding lookup op to use the new tensor.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
transformation: The transformation input to update the consumers of.
|
|
45
|
+
new_tensor_id: The new tensor id to use as the input to the embedding lookup
|
|
46
|
+
consumers.
|
|
47
|
+
"""
|
|
48
|
+
for consumer in transformation.consumers:
|
|
49
|
+
# If the consumer is a graph output and not an op, we can ignore it here
|
|
50
|
+
# since the graph output will be updated later.
|
|
51
|
+
if consumer == -1:
|
|
52
|
+
continue
|
|
53
|
+
consumer_op = transformation.subgraph.operators[consumer]
|
|
54
|
+
# Find the input that was attached to the insertion point, and replace it
|
|
55
|
+
# with the new tensor.
|
|
56
|
+
for i in range(len(consumer_op.inputs)):
|
|
57
|
+
if consumer_op.inputs[i] == transformation.tensor_id:
|
|
58
|
+
consumer_op.inputs[i] = new_tensor_id
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _update_fully_connected_consumers(
|
|
62
|
+
transformation: transformation_utils.TransformationInput,
|
|
63
|
+
new_tensor_id: int,
|
|
64
|
+
) -> bool:
|
|
65
|
+
"""Updates the fully connected op(s) to use the new tensor.
|
|
66
|
+
|
|
67
|
+
Since the new tensor is inserted to the fully_connected's input, we need to
|
|
68
|
+
scan each consumer (in case of multiple fully_connected ops), and update
|
|
69
|
+
the input tensor to the new tensor.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
transformation: The transformation input to update the consumers of.
|
|
73
|
+
new_tensor_id: The new tensor id to use as the input to the fully connected
|
|
74
|
+
consumers.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
True if the fully connected op(s) were updated to use the new tensor.
|
|
78
|
+
"""
|
|
79
|
+
updated = False
|
|
80
|
+
for consumer in transformation.consumers:
|
|
81
|
+
if (
|
|
82
|
+
transformation_utils.get_schema_op_id(transformation, consumer)
|
|
83
|
+
== schema_py_generated.BuiltinOperator.FULLY_CONNECTED
|
|
84
|
+
):
|
|
85
|
+
transformation.subgraph.operators[consumer].inputs[0] = new_tensor_id
|
|
86
|
+
updated = True
|
|
87
|
+
return updated
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def insert_hadamard_rotation(
|
|
91
|
+
transformation_input: transformation_utils.TransformationInput,
|
|
92
|
+
) -> qtyping.TransformationInfo:
|
|
93
|
+
"""Inserts a custom aeq.hadamard_rotation op on this tensor.
|
|
94
|
+
|
|
95
|
+
This function works for float32 tensors only.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
transformation_input: The transformation input to insert the custom op on.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
The transformation info of the inserted custom op.
|
|
102
|
+
|
|
103
|
+
Raises:
|
|
104
|
+
ValueError: If the transformation input is not a uniform quantization
|
|
105
|
+
transformation.
|
|
106
|
+
ValueError: If the Hadamard quantization params are not set.
|
|
107
|
+
ValueError: If the tensor is not a float32 tensor.
|
|
108
|
+
ValueError: If no supported ops were found as the tensor's producer or
|
|
109
|
+
consumers.
|
|
110
|
+
"""
|
|
111
|
+
if not isinstance(
|
|
112
|
+
transformation_input.quant_params, qtyping.UniformQuantParams
|
|
113
|
+
):
|
|
114
|
+
raise ValueError('Hadamard rotation supports uniform quantization only')
|
|
115
|
+
|
|
116
|
+
if transformation_input.quant_params.hadamard is None:
|
|
117
|
+
raise ValueError(
|
|
118
|
+
'Hadamard rotation quantization params are not set but op insertion is'
|
|
119
|
+
' requested.'
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
tensor = transformation_input.subgraph.tensors[transformation_input.tensor_id]
|
|
123
|
+
if tensor.type != schema_py_generated.TensorType.FLOAT32:
|
|
124
|
+
raise ValueError(
|
|
125
|
+
'The Hadamard rotation op supports float32 tensors only. Got'
|
|
126
|
+
f' {tensor.type} tensor.'
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# Create new custom op with the current tensor as input and a new activation
|
|
130
|
+
# tensor as output.
|
|
131
|
+
custom_op_code_idx = transformation_utils.add_op_code(
|
|
132
|
+
schema_py_generated.BuiltinOperator.CUSTOM,
|
|
133
|
+
transformation_input.op_codes,
|
|
134
|
+
'aeq.hadamard_rotation',
|
|
135
|
+
)
|
|
136
|
+
custom_op = schema_py_generated.OperatorT()
|
|
137
|
+
custom_op.opcodeIndex = custom_op_code_idx
|
|
138
|
+
custom_op.inputs = [transformation_input.tensor_id]
|
|
139
|
+
custom_op.customOptions = _to_flexbuffer(
|
|
140
|
+
transformation_input.quant_params.hadamard.hadamard_size,
|
|
141
|
+
transformation_input.quant_params.hadamard.random_binary_vector.tolist(),
|
|
142
|
+
)
|
|
143
|
+
new_tensor_id = transformation_utils.add_new_activation_tensor(
|
|
144
|
+
tensor.name + b'_rotated',
|
|
145
|
+
tensor.shapeSignature
|
|
146
|
+
if tensor.shapeSignature is not None
|
|
147
|
+
else tensor.shape,
|
|
148
|
+
schema_py_generated.TensorType.FLOAT32,
|
|
149
|
+
transformation_input.subgraph,
|
|
150
|
+
)
|
|
151
|
+
custom_op.outputs = [new_tensor_id]
|
|
152
|
+
|
|
153
|
+
# Update the users of this tensor to use the new tensor.
|
|
154
|
+
if (
|
|
155
|
+
transformation_utils.get_producer_schema_op_id(transformation_input)
|
|
156
|
+
== schema_py_generated.BuiltinOperator.EMBEDDING_LOOKUP
|
|
157
|
+
):
|
|
158
|
+
_update_embedding_lookup_consumers(transformation_input, new_tensor_id)
|
|
159
|
+
elif not _update_fully_connected_consumers(
|
|
160
|
+
transformation_input, new_tensor_id
|
|
161
|
+
):
|
|
162
|
+
raise ValueError(
|
|
163
|
+
'The Hadamard rotation op supports embedding lookup and fully connected'
|
|
164
|
+
' ops only, but no such ops were found.'
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# If the tensor is a graph output, we need to replace the tensor with the
|
|
168
|
+
# new tensor.
|
|
169
|
+
for i, output in enumerate(transformation_input.subgraph.outputs):
|
|
170
|
+
if output == transformation_input.tensor_id:
|
|
171
|
+
transformation_input.subgraph.outputs[i] = new_tensor_id
|
|
172
|
+
|
|
173
|
+
# Find the actual insertion point. The insertion point should be after the
|
|
174
|
+
# producer op and before the first consumer op. The max() operation ensures
|
|
175
|
+
# that we're not using -1 as the insertion point.
|
|
176
|
+
first_consumer_id = min(transformation_input.consumers)
|
|
177
|
+
op_id = max(transformation_input.producer + 1, first_consumer_id)
|
|
178
|
+
|
|
179
|
+
# Insert the custom op.
|
|
180
|
+
transformation_input.subgraph.operators.insert(op_id, custom_op)
|
|
181
|
+
|
|
182
|
+
return qtyping.TransformationInfo(
|
|
183
|
+
op_id=op_id,
|
|
184
|
+
num_ops_added=1,
|
|
185
|
+
output_tensor_id=new_tensor_id,
|
|
186
|
+
)
|
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
# Copyright 2024 The AI Edge Quantizer Authors.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
|
|
16
|
+
"""Test insertion of the Hadamard rotation custom op."""
|
|
17
|
+
|
|
18
|
+
import os
|
|
19
|
+
import numpy as np
|
|
20
|
+
from tensorflow.python.platform import googletest
|
|
21
|
+
from ai_edge_quantizer import qtyping
|
|
22
|
+
from ai_edge_quantizer.transformations import insert_hadamard_rotation
|
|
23
|
+
from ai_edge_quantizer.transformations import transformation_utils
|
|
24
|
+
from ai_edge_quantizer.utils import test_utils
|
|
25
|
+
from ai_edge_quantizer.utils import tfl_flatbuffer_utils
|
|
26
|
+
from ai_edge_litert import schema_py_generated # pylint: disable=g-direct-tensorflow-import
|
|
27
|
+
|
|
28
|
+
_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile('..')
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class InsertHadamardRotationFullyConnectedTest(googletest.TestCase):
|
|
32
|
+
|
|
33
|
+
def setUp(self):
|
|
34
|
+
super().setUp()
|
|
35
|
+
model_path = os.path.join(
|
|
36
|
+
_TEST_DATA_PREFIX_PATH, 'tests/models/single_fc_bias.tflite'
|
|
37
|
+
)
|
|
38
|
+
self.model = tfl_flatbuffer_utils.read_model(model_path)
|
|
39
|
+
self.params = qtyping.UniformQuantParams(
|
|
40
|
+
num_bits=8,
|
|
41
|
+
quantized_dimension=None,
|
|
42
|
+
scale=np.ones(1),
|
|
43
|
+
zero_point=np.zeros(1),
|
|
44
|
+
hadamard=qtyping.UniformQuantParams.HadamardRotationParams(
|
|
45
|
+
random_binary_vector=np.ones(1),
|
|
46
|
+
hadamard_size=2,
|
|
47
|
+
),
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
def test_raise_unsupported_qparams(self):
|
|
51
|
+
with self.assertRaisesWithPredicateMatch(
|
|
52
|
+
ValueError, lambda err: 'uniform quantization' in str(err)
|
|
53
|
+
):
|
|
54
|
+
insert_hadamard_rotation.insert_hadamard_rotation(
|
|
55
|
+
transformation_utils.TransformationInput(
|
|
56
|
+
tensor_id=0,
|
|
57
|
+
op_codes=self.model.operatorCodes,
|
|
58
|
+
buffers=self.model.buffers,
|
|
59
|
+
subgraph=self.model.subgraphs[0],
|
|
60
|
+
producer=-1,
|
|
61
|
+
consumers=[-1],
|
|
62
|
+
quant_params=qtyping.NonLinearQuantParams(
|
|
63
|
+
num_bits=16, quantized_data=None
|
|
64
|
+
),
|
|
65
|
+
)
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
def test_raise_missing_hadamard_data(self):
|
|
69
|
+
with self.assertRaisesWithPredicateMatch(
|
|
70
|
+
ValueError, lambda err: 'quantization params are not set' in str(err)
|
|
71
|
+
):
|
|
72
|
+
insert_hadamard_rotation.insert_hadamard_rotation(
|
|
73
|
+
transformation_utils.TransformationInput(
|
|
74
|
+
tensor_id=0,
|
|
75
|
+
op_codes=self.model.operatorCodes,
|
|
76
|
+
buffers=self.model.buffers,
|
|
77
|
+
subgraph=self.model.subgraphs[0],
|
|
78
|
+
producer=-1,
|
|
79
|
+
consumers=[-1],
|
|
80
|
+
quant_params=qtyping.UniformQuantParams(
|
|
81
|
+
num_bits=8,
|
|
82
|
+
quantized_dimension=None,
|
|
83
|
+
scale=np.ones(1),
|
|
84
|
+
zero_point=np.zeros(1),
|
|
85
|
+
),
|
|
86
|
+
)
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
def test_raise_non_float32_tensor(self):
|
|
90
|
+
self.model.subgraphs[0].tensors[
|
|
91
|
+
0
|
|
92
|
+
].type = schema_py_generated.TensorType.INT32
|
|
93
|
+
with self.assertRaisesWithPredicateMatch(
|
|
94
|
+
ValueError, lambda err: 'float32 tensors' in str(err)
|
|
95
|
+
):
|
|
96
|
+
insert_hadamard_rotation.insert_hadamard_rotation(
|
|
97
|
+
transformation_utils.TransformationInput(
|
|
98
|
+
tensor_id=0,
|
|
99
|
+
op_codes=self.model.operatorCodes,
|
|
100
|
+
buffers=self.model.buffers,
|
|
101
|
+
subgraph=self.model.subgraphs[0],
|
|
102
|
+
producer=-1,
|
|
103
|
+
consumers=[-1],
|
|
104
|
+
quant_params=self.params,
|
|
105
|
+
),
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
def test_insert_single_custom_op(self):
|
|
109
|
+
# Insert aeq.hadamard_rotation before fully_connected
|
|
110
|
+
info = insert_hadamard_rotation.insert_hadamard_rotation(
|
|
111
|
+
transformation_utils.TransformationInput(
|
|
112
|
+
tensor_id=0,
|
|
113
|
+
op_codes=self.model.operatorCodes,
|
|
114
|
+
buffers=self.model.buffers,
|
|
115
|
+
subgraph=self.model.subgraphs[0],
|
|
116
|
+
producer=-1,
|
|
117
|
+
consumers=[-1],
|
|
118
|
+
quant_params=self.params,
|
|
119
|
+
)
|
|
120
|
+
)
|
|
121
|
+
subgraph = self.model.subgraphs[0]
|
|
122
|
+
self.assertEqual(info.op_id, 0)
|
|
123
|
+
self.assertEqual(info.num_ops_added, 1)
|
|
124
|
+
# Model had 4 tensors, added 1.
|
|
125
|
+
self.assertEqual(info.output_tensor_id, 4)
|
|
126
|
+
self.assertLen(subgraph.tensors, 5)
|
|
127
|
+
# Model had 1 op, added a new one.
|
|
128
|
+
self.assertLen(self.model.operatorCodes, 2)
|
|
129
|
+
self.assertEqual(
|
|
130
|
+
self.model.operatorCodes[1].builtinCode,
|
|
131
|
+
schema_py_generated.BuiltinOperator.CUSTOM,
|
|
132
|
+
)
|
|
133
|
+
# First op is now the custom op, precedes fully_connected.
|
|
134
|
+
self.assertEqual(
|
|
135
|
+
self.model.operatorCodes[subgraph.operators[0].opcodeIndex].builtinCode,
|
|
136
|
+
schema_py_generated.BuiltinOperator.CUSTOM,
|
|
137
|
+
)
|
|
138
|
+
# Input to the custom op is graph input
|
|
139
|
+
self.assertEqual(subgraph.operators[0].inputs[0], 0)
|
|
140
|
+
# Input to the FC is the custom op output
|
|
141
|
+
self.assertEqual(subgraph.operators[1].inputs[0], 4)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class InsertHadamardRotationEmbeddingLookupTest(googletest.TestCase):
|
|
145
|
+
|
|
146
|
+
def setUp(self):
|
|
147
|
+
super().setUp()
|
|
148
|
+
model_path = os.path.join(
|
|
149
|
+
_TEST_DATA_PREFIX_PATH, 'tests/models/embedding_lookup.tflite'
|
|
150
|
+
)
|
|
151
|
+
self.model = tfl_flatbuffer_utils.read_model(model_path)
|
|
152
|
+
self.params = qtyping.UniformQuantParams(
|
|
153
|
+
num_bits=8,
|
|
154
|
+
quantized_dimension=None,
|
|
155
|
+
scale=np.ones(1),
|
|
156
|
+
zero_point=np.zeros(1),
|
|
157
|
+
hadamard=qtyping.UniformQuantParams.HadamardRotationParams(
|
|
158
|
+
random_binary_vector=np.ones(1),
|
|
159
|
+
hadamard_size=2,
|
|
160
|
+
),
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
def test_insert_single_custom_op(self):
|
|
164
|
+
# Insert aeq.hadamard_rotation after embedding_lookup
|
|
165
|
+
info = insert_hadamard_rotation.insert_hadamard_rotation(
|
|
166
|
+
transformation_utils.TransformationInput(
|
|
167
|
+
tensor_id=2,
|
|
168
|
+
op_codes=self.model.operatorCodes,
|
|
169
|
+
buffers=self.model.buffers,
|
|
170
|
+
subgraph=self.model.subgraphs[0],
|
|
171
|
+
producer=0,
|
|
172
|
+
consumers=[-1],
|
|
173
|
+
quant_params=self.params,
|
|
174
|
+
)
|
|
175
|
+
)
|
|
176
|
+
subgraph = self.model.subgraphs[0]
|
|
177
|
+
self.assertEqual(info.op_id, 1)
|
|
178
|
+
self.assertEqual(info.num_ops_added, 1)
|
|
179
|
+
# Model had 3 tensors, added 1.
|
|
180
|
+
self.assertEqual(info.output_tensor_id, 3)
|
|
181
|
+
self.assertLen(subgraph.tensors, 4)
|
|
182
|
+
# Model had 1 op, added a new one.
|
|
183
|
+
self.assertLen(self.model.operatorCodes, 2)
|
|
184
|
+
self.assertEqual(
|
|
185
|
+
self.model.operatorCodes[1].builtinCode,
|
|
186
|
+
schema_py_generated.BuiltinOperator.CUSTOM,
|
|
187
|
+
)
|
|
188
|
+
# Second op is now the custom op, after embedding_lookup.
|
|
189
|
+
self.assertEqual(
|
|
190
|
+
self.model.operatorCodes[subgraph.operators[1].opcodeIndex].builtinCode,
|
|
191
|
+
schema_py_generated.BuiltinOperator.CUSTOM,
|
|
192
|
+
)
|
|
193
|
+
# Input to the custom op is embedding's output
|
|
194
|
+
self.assertEqual(subgraph.operators[1].inputs[0], 2)
|
|
195
|
+
# Custom op's output is the new tensor
|
|
196
|
+
self.assertEqual(subgraph.operators[1].outputs[0], 3)
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
if __name__ == '__main__':
|
|
200
|
+
googletest.main()
|