ai-edge-quantizer-nightly 0.5.0.dev20251216__py3-none-any.whl → 0.5.0.dev20251218__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -133,6 +133,7 @@ MIN_MAX_OP_NAME_MATERIALIZE_FUNC_DICT = {
133
133
  _TFLOpName.NOT_EQUAL: common_quantize.materialize_not_equal,
134
134
  _TFLOpName.MIRROR_PAD: common_quantize.materialize_mirror_pad,
135
135
  _TFLOpName.SPACE_TO_DEPTH: common_quantize.materialize_space_to_depth,
136
+ _TFLOpName.RELU: common_quantize.materialize_relu,
136
137
  }
137
138
  for op_name, materialize_func in MIN_MAX_OP_NAME_MATERIALIZE_FUNC_DICT.items():
138
139
  register_quantized_op(
@@ -288,6 +289,7 @@ _OCTAV_OP_NAME_MATERIALIZE_FUNC_DICT = immutabledict({
288
289
  _TFLOpName.NOT_EQUAL: common_quantize.materialize_not_equal,
289
290
  _TFLOpName.MIRROR_PAD: common_quantize.materialize_mirror_pad,
290
291
  _TFLOpName.SPACE_TO_DEPTH: common_quantize.materialize_space_to_depth,
292
+ _TFLOpName.RELU: common_quantize.materialize_relu,
291
293
  })
292
294
 
293
295
  for op_name, materialize_func in _OCTAV_OP_NAME_MATERIALIZE_FUNC_DICT.items():
@@ -1070,6 +1070,21 @@ def materialize_not_equal(
1070
1070
  )
1071
1071
 
1072
1072
 
1073
+ def materialize_relu(
1074
+ get_tensor_quant_params_fn: qtyping.GetTensorQuantParamsFuncSignature,
1075
+ op_info: qtyping.OpInfo,
1076
+ graph_info: qtyping.GraphInfo,
1077
+ tensor_name_to_qsv: dict[str, Any],
1078
+ ) -> list[qtyping.TensorTransformationParams]:
1079
+ """Materialize tensors in tfl.relu."""
1080
+ return common_utils.materialize_standard_op(
1081
+ op_info,
1082
+ graph_info,
1083
+ tensor_name_to_qsv,
1084
+ get_tensor_quant_params_fn,
1085
+ )
1086
+
1087
+
1073
1088
  def _get_tensor_shape_for_blockwise(
1074
1089
  tensor_shape: Sequence[int], quantized_dim: int, block_size: int
1075
1090
  ) -> list[int]:
@@ -19,9 +19,9 @@ import collections
19
19
  import copy
20
20
  import json
21
21
  from typing import Any, Union
22
+ from ai_edge_litert.tools import flatbuffer_utils
22
23
  from ai_edge_quantizer import qtyping
23
24
  from ai_edge_litert import schema_py_generated as schema # pylint:disable=g-direct-tensorflow-import
24
- from tensorflow.lite.tools import flatbuffer_utils # pylint: disable=g-direct-tensorflow-import
25
25
 
26
26
  _TFLOpName = qtyping.TFLOperationName
27
27
  _OpQuantizationConfig = qtyping.OpQuantizationConfig
@@ -198,7 +198,8 @@ DEFAULT_JSON_POLICY = """
198
198
  "REDUCE_MIN",
199
199
  "EQUAL",
200
200
  "NOT_EQUAL",
201
- "MIRROR_PAD"
201
+ "MIRROR_PAD",
202
+ "RELU"
202
203
  ],
203
204
  "static_wi8_ai8": [
204
205
  "ADD",
@@ -248,7 +249,8 @@ DEFAULT_JSON_POLICY = """
248
249
  "EQUAL",
249
250
  "NOT_EQUAL",
250
251
  "MIRROR_PAD",
251
- "SPACE_TO_DEPTH"
252
+ "SPACE_TO_DEPTH",
253
+ "RELU"
252
254
  ],
253
255
  "static_wi4_ai8": ["FULLY_CONNECTED", "CONV_2D", "INPUT", "OUTPUT"],
254
256
  "static_wi4_ai16": ["FULLY_CONNECTED", "CONV_2D", "INPUT", "OUTPUT"],
@@ -21,6 +21,7 @@ import logging
21
21
 
22
22
  import numpy as np
23
23
 
24
+ from ai_edge_litert.tools import flatbuffer_utils
24
25
  from ai_edge_quantizer import qtyping
25
26
  from ai_edge_quantizer import transformation_instruction_generator
26
27
  from ai_edge_quantizer import transformation_performer
@@ -28,7 +29,6 @@ from ai_edge_quantizer.utils import tfl_flatbuffer_utils
28
29
  from ai_edge_quantizer.utils import tfl_interpreter_utils
29
30
  from ai_edge_litert import interpreter as tfl # pylint: disable=g-direct-tensorflow-import
30
31
  from ai_edge_litert import schema_py_generated # pylint: disable=g-direct-tensorflow-import
31
- from tensorflow.lite.tools import flatbuffer_utils # pylint: disable=g-direct-tensorflow-import
32
32
 
33
33
 
34
34
  _DEQUANT_SUFFIX = "_dequant"
@@ -19,13 +19,13 @@ import os
19
19
  import tracemalloc
20
20
  from tensorflow.python.platform import googletest
21
21
  from absl.testing import parameterized
22
+ from ai_edge_litert.tools import flatbuffer_utils
22
23
  from ai_edge_quantizer import model_modifier
23
24
  from ai_edge_quantizer import params_generator
24
25
  from ai_edge_quantizer import qtyping
25
26
  from ai_edge_quantizer import recipe_manager
26
27
  from ai_edge_quantizer.utils import test_utils
27
28
  from ai_edge_quantizer.utils import tfl_flatbuffer_utils
28
- from tensorflow.lite.tools import flatbuffer_utils # pylint: disable=g-direct-tensorflow-import
29
29
 
30
30
  TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile('.')
31
31
 
@@ -25,7 +25,7 @@ from typing import Any, Optional, Union
25
25
  import numpy as np
26
26
 
27
27
  from ai_edge_quantizer.utils import tfl_interpreter_utils as utils
28
- from tensorflow.python.platform import gfile # pylint: disable=g-direct-tensorflow-import
28
+ import os # tensorflow.python.platform.gfile # pylint: disable=g-direct-tensorflow-import
29
29
 
30
30
 
31
31
  _DEFAULT_SIGNATURE_KEY = utils.DEFAULT_SIGNATURE_KEY
@@ -194,7 +194,7 @@ class ComparisonResult:
194
194
  result_save_path = os.path.join(
195
195
  save_folder, model_name + '_comparison_result.json'
196
196
  )
197
- with gfile.GFile(result_save_path, 'w') as output_file_handle:
197
+ with open(result_save_path, 'w') as output_file_handle:
198
198
  output_file_handle.write(json.dumps(result))
199
199
 
200
200
  # TODO: b/365578554 - Remove after ME is updated to use the new json format.
@@ -206,7 +206,7 @@ class ComparisonResult:
206
206
  json_save_path = os.path.join(
207
207
  save_folder, model_name + '_comparison_result_me_input.json'
208
208
  )
209
- with gfile.GFile(json_save_path, 'w') as output_file_handle:
209
+ with open(json_save_path, 'w') as output_file_handle:
210
210
  output_file_handle.write(json_object)
211
211
 
212
212
 
@@ -82,6 +82,7 @@ class TFLOperationName(str, enum.Enum):
82
82
  NOT_EQUAL = 'NOT_EQUAL'
83
83
  MIRROR_PAD = 'MIRROR_PAD'
84
84
  SPACE_TO_DEPTH = 'SPACE_TO_DEPTH'
85
+ RELU = 'RELU'
85
86
 
86
87
 
87
88
  class QuantizeMode(enum.Enum):
@@ -33,7 +33,7 @@ from ai_edge_quantizer import recipe_manager
33
33
  from ai_edge_quantizer.utils import tfl_flatbuffer_utils
34
34
  from ai_edge_quantizer.utils import tfl_interpreter_utils
35
35
  from ai_edge_quantizer.utils import validation_utils
36
- from tensorflow.python.platform import gfile # pylint: disable=g-direct-tensorflow-import
36
+ import os # tensorflow.python.platform.gfile # pylint: disable=g-direct-tensorflow-import
37
37
 
38
38
 
39
39
  # Expose algorithm names to users.
@@ -74,15 +74,15 @@ class QuantizationResult:
74
74
  Raises:
75
75
  RuntimeError: If no quantized model is available.
76
76
  """
77
- if not gfile.Exists(save_folder):
78
- gfile.MakeDirs(save_folder)
77
+ if not os.path.exists(save_folder):
78
+ os.makedirs(save_folder)
79
79
 
80
80
  model_save_path = os.path.join(save_folder, f'{model_name}.tflite')
81
81
  self.export_model(model_save_path, overwrite)
82
82
 
83
83
  recipe_save_path = os.path.join(save_folder, model_name + '_recipe.json')
84
84
  recipe = json.dumps(self.recipe)
85
- with gfile.GFile(recipe_save_path, 'w') as output_file_handle:
85
+ with open(recipe_save_path, 'w') as output_file_handle:
86
86
  output_file_handle.write(recipe)
87
87
 
88
88
  def export_model(self, filepath: str, overwrite: bool = False) -> None:
@@ -102,7 +102,7 @@ class QuantizationResult:
102
102
  raise RuntimeError(
103
103
  'No quantized model to save. Make sure .quantize() is called.'
104
104
  )
105
- if gfile.Exists(filepath):
105
+ if os.path.exists(filepath):
106
106
  if overwrite:
107
107
  logging.warning(
108
108
  'The model %s already exists in the folder. Overwriting the model'
@@ -115,7 +115,7 @@ class QuantizationResult:
115
115
  ' consider change the model name or specify overwrite=True to'
116
116
  ' overwrite the model if needed.'
117
117
  )
118
- with gfile.GFile(filepath, 'wb') as output_file_handle:
118
+ with open(filepath, 'wb') as output_file_handle:
119
119
  output_file_handle.write(self.quantized_model)
120
120
 
121
121
 
@@ -179,7 +179,7 @@ class Quantizer:
179
179
  recipe: Quantization recipe in json format.
180
180
  """
181
181
  if isinstance(recipe, str):
182
- with gfile.Open(recipe) as json_file:
182
+ with open(recipe) as json_file:
183
183
  recipe = json.load(json_file)
184
184
  self._recipe_manager.load_quantization_recipe(recipe)
185
185
 
@@ -191,7 +191,7 @@ class Quantizer:
191
191
  Args:
192
192
  filename: Config policy filename.
193
193
  """
194
- with gfile.Open(filename, 'r') as f:
194
+ with open(filename, 'r') as f:
195
195
  policy = default_policy.update_default_config_policy(f.read())
196
196
 
197
197
  # Register the policy for MIN_MAX_UNIFORM_QUANT algorithm.
@@ -20,12 +20,12 @@ from typing import Any, Union
20
20
 
21
21
  import numpy as np
22
22
 
23
+ from ai_edge_litert.tools import flatbuffer_utils
23
24
  from ai_edge_quantizer import qtyping
24
25
  from ai_edge_quantizer.algorithms.utils import common_utils
25
26
  from ai_edge_quantizer.utils import constrained_ops_utils
26
27
  from ai_edge_quantizer.utils import tfl_flatbuffer_utils
27
28
  from ai_edge_quantizer.utils import tfl_interpreter_utils
28
- from tensorflow.lite.tools import flatbuffer_utils # pylint: disable=g-direct-tensorflow-import
29
29
 
30
30
 
31
31
  _SignatureInput = dict[str, Any]
@@ -15,7 +15,6 @@
15
15
 
16
16
  from absl.testing import parameterized
17
17
  import numpy as np
18
- import tensorflow as tf
19
18
 
20
19
  from tensorflow.python.platform import googletest
21
20
  from ai_edge_quantizer import quantizer
@@ -132,7 +131,7 @@ class CalibrationQsvAlignmentUtilsTest(parameterized.TestCase):
132
131
  def test_calibration_utils_init_fails(self):
133
132
  model_path = "non_existent_model.tflite"
134
133
  with self.assertRaisesWithPredicateMatch(
135
- tf.errors.NotFoundError, lambda err: f"{model_path}" in str(err)
134
+ Exception, lambda err: f"{model_path}" in str(err)
136
135
  ):
137
136
  calibration_utils.CalibrationQsvAlignmentUtils(model_path)
138
137
 
@@ -38,7 +38,7 @@ class ConstrainedOpsUtilsTest(parameterized.TestCase):
38
38
  dict(
39
39
  testcase_name="no_constrain",
40
40
  constraint=_OpQuantConstraint.NO_CONSTRAIN,
41
- expected_num_ops=24,
41
+ expected_num_ops=25,
42
42
  ),
43
43
  )
44
44
  def test_get_constrained_op_list(self, constraint, expected_num_ops):
@@ -20,10 +20,10 @@ from typing import Any, Optional, Union
20
20
  import immutabledict
21
21
  import numpy as np
22
22
 
23
+ from ai_edge_litert.tools import flatbuffer_utils
23
24
  from ai_edge_quantizer import qtyping
24
25
  from ai_edge_litert import schema_py_generated as schema # pylint:disable=g-direct-tensorflow-import
25
- from tensorflow.lite.tools import flatbuffer_utils # pylint: disable=g-direct-tensorflow-import
26
- from tensorflow.python.platform import gfile # pylint: disable=g-direct-tensorflow-import
26
+ import os # tensorflow.python.platform.gfile # pylint: disable=g-direct-tensorflow-import
27
27
 
28
28
  _TFLOpName = qtyping.TFLOperationName
29
29
 
@@ -76,6 +76,7 @@ TFL_OP_NAME_TO_CODE = immutabledict.immutabledict({
76
76
  _TFLOpName.NOT_EQUAL: schema.BuiltinOperator.NOT_EQUAL,
77
77
  _TFLOpName.MIRROR_PAD: schema.BuiltinOperator.MIRROR_PAD,
78
78
  _TFLOpName.SPACE_TO_DEPTH: schema.BuiltinOperator.SPACE_TO_DEPTH,
79
+ _TFLOpName.RELU: schema.BuiltinOperator.RELU,
79
80
  })
80
81
 
81
82
  TFL_OP_CODE_TO_NAME = immutabledict.immutabledict(
@@ -106,7 +107,7 @@ TENSOR_TYPE_TO_CODE = immutabledict.immutabledict(
106
107
  (reversed(item) for item in TENSOR_CODE_TO_TYPE.items())
107
108
  )
108
109
 
109
- # Expose functions in tensorflow.lite.tools.flatbuffer_utils
110
+ # Expose functions in litert.python.tools.flatbuffer_utils
110
111
  write_model = flatbuffer_utils.write_model
111
112
 
112
113
 
@@ -141,7 +142,7 @@ def get_model_content(tflite_path: str) -> bytes:
141
142
  Returns:
142
143
  The model bytes.
143
144
  """
144
- with gfile.Open(tflite_path, "rb") as tflite_file:
145
+ with open(tflite_path, "rb") as tflite_file:
145
146
  return tflite_file.read()
146
147
 
147
148
 
@@ -154,7 +155,7 @@ def get_model_buffer(tflite_path: str) -> bytearray:
154
155
  Returns:
155
156
  model_buffer: the model buffer.
156
157
  """
157
- with gfile.Open(tflite_path, "rb") as tflite_file:
158
+ with open(tflite_path, "rb") as tflite_file:
158
159
  return bytearray(tflite_file.read())
159
160
 
160
161
 
@@ -207,7 +208,7 @@ def parse_fc_bmm_conv_tensors(
207
208
  return input_tensor, weight_tensor, bias_tensor, output_tensor
208
209
 
209
210
 
210
- # flatbuffer_model has Any type since tensorflow.lite.tools.flatbuffer_utils
211
+ # flatbuffer_model has Any type since litert.python.tools.flatbuffer_utils
211
212
  # is not type annotated.
212
213
  def buffer_to_tensors(flatbuffer_model: Any) -> dict[int, list[Any]]:
213
214
  """Returns a map from buffer id to tensors that use it."""
@@ -23,7 +23,7 @@ import numpy as np
23
23
  from ai_edge_quantizer import qtyping
24
24
  from ai_edge_quantizer.algorithms.uniform_quantize import uniform_quantize_tensor
25
25
  from ai_edge_litert import interpreter as tfl # pylint: disable=g-direct-tensorflow-import
26
- from tensorflow.python.platform import gfile # pylint: disable=g-direct-tensorflow-import
26
+ import os # tensorflow.python.platform.gfile # pylint: disable=g-direct-tensorflow-import
27
27
 
28
28
  DEFAULT_SIGNATURE_KEY = "serving_default"
29
29
 
@@ -51,7 +51,7 @@ def create_tfl_interpreter(
51
51
  A TFLite interpreter.
52
52
  """
53
53
  if isinstance(tflite_model, str):
54
- with gfile.GFile(tflite_model, "rb") as f:
54
+ with open(tflite_model, "rb") as f:
55
55
  tflite_model = f.read()
56
56
 
57
57
  if use_xnnpack:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ai-edge-quantizer-nightly
3
- Version: 0.5.0.dev20251216
3
+ Version: 0.5.0.dev20251218
4
4
  Summary: A quantizer for advanced developers to quantize converted AI Edge models.
5
5
  Home-page: https://github.com/google-ai-edge/ai-edge-quantizer
6
6
  Keywords: On-Device ML,AI,Google,TFLite,Quantization,LLMs,GenAI
@@ -24,10 +24,11 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
24
24
  Requires-Python: >=3.9
25
25
  Description-Content-Type: text/markdown
26
26
  License-File: LICENSE
27
+ Requires-Dist: absl-py
27
28
  Requires-Dist: immutabledict
28
29
  Requires-Dist: numpy
29
- Requires-Dist: tf-nightly
30
- Requires-Dist: ai-edge-litert-nightly
30
+ Requires-Dist: ml_dtypes
31
+ Requires-Dist: ai-edge-litert
31
32
  Dynamic: classifier
32
33
  Dynamic: description
33
34
  Dynamic: description-content-type
@@ -1,19 +1,19 @@
1
1
  ai_edge_quantizer/__init__.py,sha256=4pFSkukSwahYyzwqia0yPRyz8TnFQfGRthVJhYpMWas,793
2
- ai_edge_quantizer/algorithm_manager.py,sha256=0jSNITKl0Ge1XeYKueOUj9brlS4B5ZcdcVQ1kZS3JKg,16518
2
+ ai_edge_quantizer/algorithm_manager.py,sha256=GuFls-3z23dk6wKxRDvmg2WI0uvAzCwbDDC3q5OXeVs,16628
3
3
  ai_edge_quantizer/algorithm_manager_api.py,sha256=u903TG0s1uIDhJqfeJne3CFl8A93phZrwgV2-hwdcXU,9247
4
4
  ai_edge_quantizer/algorithm_manager_api_test.py,sha256=w6bSONvXkX6bzXAGc0-7b6gNDt9oz9ieq97KP8Sg_JU,7666
5
5
  ai_edge_quantizer/calibrator.py,sha256=nkHUmxdWy16Vw3EOD3B_7EkGiX8V-XJRXXFynweGfG8,9744
6
6
  ai_edge_quantizer/calibrator_test.py,sha256=c2ZCjl7PQYU9KtAovpDO9JX8sClgaLGO0P7oqoL6rP0,8830
7
7
  ai_edge_quantizer/conftest.py,sha256=SxCz-5LlRD_lQm4hQc4c6IGG7DS8d7IyEWY9gnscPN0,794
8
- ai_edge_quantizer/default_policy.py,sha256=YcwwtVzoWUhjYgMtJ7b9f647740lURKteDOeJvwe17o,11384
9
- ai_edge_quantizer/model_modifier.py,sha256=U70JByv6CItP8tg4bdyMfX-R3UlwylAGSviZkF_FSAM,10468
10
- ai_edge_quantizer/model_modifier_test.py,sha256=CV4pgMEQkBJr_qbYR720TO8HBCutbEYLHptDHgdQMUE,7274
11
- ai_edge_quantizer/model_validator.py,sha256=HCXl8lu8wRmLn6wUaEm3I7xDOul3s7VC6XzbKjGfkuU,13945
8
+ ai_edge_quantizer/default_policy.py,sha256=POPuYzAXjNFZDGozZcvfxyUkiuT-8KMQ5SpGxAFB9xo,11365
9
+ ai_edge_quantizer/model_modifier.py,sha256=RxzfB1UULxLZlFEtgvFu0WrdTo7SLofc52KZchV_2vQ,10421
10
+ ai_edge_quantizer/model_modifier_test.py,sha256=5vUCodVNk9GPcecjGwovV0677vD0BUZjfq9PGOnMEmM,7227
11
+ ai_edge_quantizer/model_validator.py,sha256=mU6MLMvNQK7fxEJmh11H44OGnkUof0CVP6kYjb_du2A,13931
12
12
  ai_edge_quantizer/model_validator_test.py,sha256=EeqOP_mrZsnZ3rug756s0ryDDqd2KgIDld5Lm_gDuWY,13020
13
13
  ai_edge_quantizer/params_generator.py,sha256=-tbXB6crutiFhmLFEMe_-sxGylsvgd_cRZQ2fB67bNE,20436
14
14
  ai_edge_quantizer/params_generator_test.py,sha256=gJlq_qCPC0dWkbkyCpQiqAsmCYoWYxtxM2xYMEkrr3g,40436
15
- ai_edge_quantizer/qtyping.py,sha256=y9KretGzUGztyLdmto2XV6U0cxrSrfLWP1UOVcwR4dY,18011
16
- ai_edge_quantizer/quantizer.py,sha256=_XRzj1UTXoPa0AeE1Ygz6XAelst2p2fGLqrhYB5MOCg,19150
15
+ ai_edge_quantizer/qtyping.py,sha256=0AVvoCN0TPzbkSsE8bp3vb-b4hctZZ-q098cjnp46c8,18027
16
+ ai_edge_quantizer/quantizer.py,sha256=dgBkHR1VXuXzwKKdv7D39OL2z0ASp30xbN0vwFUX31M,19125
17
17
  ai_edge_quantizer/quantizer_test.py,sha256=6gcOLsZO-XW9VoKmcf_9CalG-_2lSUAe_fcmH2zHcoU,30167
18
18
  ai_edge_quantizer/recipe.py,sha256=MEkfQ2Sg3KAE9LAORHWcbjYNPg06EUbwc1d-VspQA2U,6461
19
19
  ai_edge_quantizer/recipe_manager.py,sha256=OcnrY8Qj_kjDIXx71RX1MHw5qND89N-DKuMRajfGMEg,15205
@@ -28,7 +28,7 @@ ai_edge_quantizer/algorithms/nonlinear_quantize/__init__.py,sha256=lpq1g2ayg3lCP
28
28
  ai_edge_quantizer/algorithms/nonlinear_quantize/float_casting.py,sha256=Bs9CK7wZAw6jNaZ8xEtbwO2vM34VYXNZSMVWvxJo9nw,9297
29
29
  ai_edge_quantizer/algorithms/nonlinear_quantize/float_casting_test.py,sha256=EqIHGEZ1LgUrTN7zf880RuAzEv3Qy7kgh5ivObJGHSo,22646
30
30
  ai_edge_quantizer/algorithms/uniform_quantize/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
31
- ai_edge_quantizer/algorithms/uniform_quantize/common_quantize.py,sha256=s4JudZaYZlL5PwdfjKV-HcbaVSzVcXXueNFdBxZDv9I,41033
31
+ ai_edge_quantizer/algorithms/uniform_quantize/common_quantize.py,sha256=XZ79vEVqUF7CGLC2M4PEbGFbdPKeMFmOpkyjdX2MCso,41468
32
32
  ai_edge_quantizer/algorithms/uniform_quantize/common_quantize_test.py,sha256=GGf_n3wIeg3GB_eGsmyNJ0fTcxgpeMMbugTMRONK6TQ,3553
33
33
  ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery.py,sha256=VjBDxGxjITHJc7xJABqBbZt6_qhobtZAl2gnVQrYJgc,8652
34
34
  ai_edge_quantizer/algorithms/uniform_quantize/dequantized_weight_recovery_test.py,sha256=sT5eX5TLZEHTtPfnSkCPDlS0sQxlTFWbCsbvOuj--yY,8889
@@ -63,19 +63,19 @@ ai_edge_quantizer/transformations/quantize_tensor_test.py,sha256=CD7OboBcIQxQY8O
63
63
  ai_edge_quantizer/transformations/transformation_utils.py,sha256=IKrtXJNH0msiTcI7KXkCYn2EkzmbZKWMMX_r5PMEx2U,8857
64
64
  ai_edge_quantizer/transformations/transformation_utils_test.py,sha256=MWgq29t7rvxRQIfi4ny9IoODFCTcbpjnIwoCL40zDKk,8698
65
65
  ai_edge_quantizer/utils/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
66
- ai_edge_quantizer/utils/calibration_utils.py,sha256=iMf_bSCf-O86MzDt5D9hLKqbTydqLwirluaC6BJ9yHo,11553
67
- ai_edge_quantizer/utils/calibration_utils_test.py,sha256=4BlksXl7b4yptL8xPR67hmJCnjhN9V10a2PunzfHrUE,9372
66
+ ai_edge_quantizer/utils/calibration_utils.py,sha256=dFDsjc3CXaDFNbCMyoPrMVubd3EDtG0ZwIY3Tmbb0sw,11506
67
+ ai_edge_quantizer/utils/calibration_utils_test.py,sha256=jod4iokZkG00y9JrYaFzVvg4JwiA6mX8_whAMkNyoEc,9334
68
68
  ai_edge_quantizer/utils/constrained_ops_utils.py,sha256=z0sm1R9anRRVgdgI23XQKwDRcdARdpTo_6UBDB_lHXE,4502
69
- ai_edge_quantizer/utils/constrained_ops_utils_test.py,sha256=i_uERo-KvMj0dvUSuI67kdOBHvRQETg8-qnejs_MgTE,1756
69
+ ai_edge_quantizer/utils/constrained_ops_utils_test.py,sha256=O0ull_vJeFlYG9Yl6L0IIJlx0Kn882xc4yl1uqA4-bo,1756
70
70
  ai_edge_quantizer/utils/test_utils.py,sha256=a4Nk-wbeB09dFjTDZiA0K67d26j5DD0UDH_GIVmVG_4,8685
71
- ai_edge_quantizer/utils/tfl_flatbuffer_utils.py,sha256=42OWzQsRTXq3XQYmoxlz177_dw2fJfq7mDSJaU--ArQ,12076
71
+ ai_edge_quantizer/utils/tfl_flatbuffer_utils.py,sha256=VeE8oYSnswXndKlvp8pCMvliKu_6g1liHV_kXg8TSlc,12063
72
72
  ai_edge_quantizer/utils/tfl_flatbuffer_utils_test.py,sha256=K1SbK8q92qYVtiVj0I0GtugsPTkpIpEKv9zakvFV_Sc,8555
73
- ai_edge_quantizer/utils/tfl_interpreter_utils.py,sha256=zgXVSIoNU-M2V1Wcq06M0MPoA-dCXXEZd1Y9vvors_c,15100
73
+ ai_edge_quantizer/utils/tfl_interpreter_utils.py,sha256=ptdlC3WVUE9aBznT7kZQ0ZOk3EKgOBQdMDAaCdGedIM,15093
74
74
  ai_edge_quantizer/utils/tfl_interpreter_utils_test.py,sha256=EPOXbmXqbt3tAewo3BQQjh2mjuxrrFit5tkF0wUVYHU,12471
75
75
  ai_edge_quantizer/utils/validation_utils.py,sha256=Mr0D6X-pTDLODFAnCX3IlqdV1OL02tlq0ZjHbqx8nzg,7439
76
76
  ai_edge_quantizer/utils/validation_utils_test.py,sha256=T8K5mCWeMcihND2KS_dHvCJUU9lEdG2sD95EgPkaX3w,5584
77
- ai_edge_quantizer_nightly-0.5.0.dev20251216.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
78
- ai_edge_quantizer_nightly-0.5.0.dev20251216.dist-info/METADATA,sha256=n1FgmxZ5QAv5zN84QzZ6CQHX14nRmXaPJK2H46Bf7T0,1707
79
- ai_edge_quantizer_nightly-0.5.0.dev20251216.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
80
- ai_edge_quantizer_nightly-0.5.0.dev20251216.dist-info/top_level.txt,sha256=8QTfPnFXNVUhScFLaa-NWZMFWMn72M50DVPubpwWB1g,18
81
- ai_edge_quantizer_nightly-0.5.0.dev20251216.dist-info/RECORD,,
77
+ ai_edge_quantizer_nightly-0.5.0.dev20251218.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
78
+ ai_edge_quantizer_nightly-0.5.0.dev20251218.dist-info/METADATA,sha256=1sLLAYQ3je02FJRMxC36xRz46bn6AFmpZ0UQ0oQLt4c,1721
79
+ ai_edge_quantizer_nightly-0.5.0.dev20251218.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
80
+ ai_edge_quantizer_nightly-0.5.0.dev20251218.dist-info/top_level.txt,sha256=8QTfPnFXNVUhScFLaa-NWZMFWMn72M50DVPubpwWB1g,18
81
+ ai_edge_quantizer_nightly-0.5.0.dev20251218.dist-info/RECORD,,