ai-edge-quantizer-nightly 0.3.0.dev20250619__py3-none-any.whl → 0.3.0.dev20250621__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -100,6 +100,7 @@ class BaseOpTestCase(parameterized.TestCase):
100
100
  num_validation_samples: int = 4,
101
101
  num_calibration_samples: Union[int, None] = None,
102
102
  error_metric: str = 'mse',
103
+ int_min_max: Union[tuple[int, int], None] = None,
103
104
  ) -> model_validator.ComparisonResult:
104
105
  """Quantizes and validates the given model with the given configurations.
105
106
 
@@ -112,6 +113,7 @@ class BaseOpTestCase(parameterized.TestCase):
112
113
  num_calibration_samples: The number of samples to use for calibration. If
113
114
  None then it will be set to num_validation_samples * 8.
114
115
  error_metric: The error error_metric to use for validation.
116
+ int_min_max: The min and max of the integer input range.
115
117
 
116
118
  Returns:
117
119
  The comparison result of the validation.
@@ -129,13 +131,16 @@ class BaseOpTestCase(parameterized.TestCase):
129
131
  calibration_data = tfl_interpreter_utils.create_random_normal_input_data(
130
132
  quantizer_instance.float_model,
131
133
  num_samples=num_calibration_samples,
134
+ int_min_max=int_min_max,
132
135
  )
133
136
  calibration_result = quantizer_instance.calibrate(calibration_data)
134
137
  quantization_result = quantizer_instance.quantize(calibration_result)
135
138
  else:
136
139
  quantization_result = quantizer_instance.quantize()
137
140
  test_data = tfl_interpreter_utils.create_random_normal_input_data(
138
- quantization_result.quantized_model, num_samples=num_validation_samples
141
+ quantization_result.quantized_model,
142
+ num_samples=num_validation_samples,
143
+ int_min_max=int_min_max,
139
144
  )
140
145
  return quantizer_instance.validate(test_data, error_metric)
141
146
 
@@ -185,6 +190,7 @@ class BaseOpTestCase(parameterized.TestCase):
185
190
  expected_model_size_reduction: float,
186
191
  weight_tolerance: float = 1e-4,
187
192
  output_tolerance: float = 1e-4,
193
+ int_min_max: Union[tuple[int, int], None] = None,
188
194
  ):
189
195
  """Check if the quantization is successful and the result is valid."""
190
196
  validation_result = self.quantize_and_validate(
@@ -192,6 +198,7 @@ class BaseOpTestCase(parameterized.TestCase):
192
198
  algorithm_key=algorithm_key,
193
199
  op_name=op_name,
194
200
  op_config=op_config,
201
+ int_min_max=int_min_max,
195
202
  )
196
203
  with self.subTest(name='ModelSizeReduction'):
197
204
  self.assert_model_size_reduction_above_min_pct(
@@ -215,8 +222,9 @@ class BaseOpTestCase(parameterized.TestCase):
215
222
  num_validation_samples: int = 4,
216
223
  num_calibration_samples: Union[int, None] = None,
217
224
  output_tolerance: float = 1e-4,
225
+ int_min_max: Union[tuple[int, int], None] = None,
218
226
  ):
219
- """Check if the output errors after quantization are within the tolerance."""
227
+ """Checks if the output errors after quantization are within the tolerance."""
220
228
  validation_result = self.quantize_and_validate(
221
229
  model_path=model_path,
222
230
  algorithm_key=algorithm_key,
@@ -224,6 +232,7 @@ class BaseOpTestCase(parameterized.TestCase):
224
232
  num_calibration_samples=num_calibration_samples,
225
233
  op_name=op_name,
226
234
  op_config=op_config,
235
+ int_min_max=int_min_max,
227
236
  )
228
237
  self.assert_output_errors_below_tolerance(
229
238
  validation_result, output_tolerance
@@ -353,6 +353,7 @@ def create_random_dataset(
353
353
  input_details: dict[str, Any],
354
354
  num_samples: int,
355
355
  random_seed: Union[int, np._typing.ArrayLike],
356
+ int_min_max: Union[tuple[int, int], None] = None,
356
357
  ) -> list[dict[str, Any]]:
357
358
  """Creates a random normal dataset for given input details.
358
359
 
@@ -360,6 +361,7 @@ def create_random_dataset(
360
361
  input_details: A dictionary of input details.
361
362
  num_samples: The number of samples to generate.
362
363
  random_seed: The random seed to use.
364
+ int_min_max: The min and max of the integer input range.
363
365
 
364
366
  Returns:
365
367
  A list of dictionaries, each containing a sample of input data (for all
@@ -373,7 +375,13 @@ def create_random_dataset(
373
375
  dtype = input_tensor["dtype"]
374
376
  shape = input_tensor["shape"]
375
377
  if dtype in (np.int32, np.int64):
376
- new_data = _create_random_integers(rng, shape, dtype)
378
+ if int_min_max is None:
379
+ new_data = _create_random_integers(rng, shape, dtype)
380
+ else:
381
+ min_value, max_value = int_min_max
382
+ new_data = _create_random_integers(
383
+ rng, shape, dtype, min_value, max_value
384
+ )
377
385
  elif dtype in (np.float32, ml_dtypes.bfloat16):
378
386
  new_data = _create_random_normal(rng, shape, dtype)
379
387
  elif dtype == np.bool:
@@ -389,18 +397,20 @@ def create_random_normal_input_data(
389
397
  tflite_model: Union[str, bytes],
390
398
  num_samples: int = 4,
391
399
  random_seed: int = 666,
400
+ int_min_max: Union[tuple[int, int], None] = None,
392
401
  ) -> dict[str, list[dict[str, Any]]]:
393
- """create random dataset following random distribution for signature runner.
402
+ """Creates a random normal dataset for a signature runner.
394
403
 
395
404
  Args:
396
- tflite_model: TFLite model path or bytearray
397
- num_samples: number of input samples to be generated
398
- random_seed: random seed to be used for function
405
+ tflite_model: TFLite model path or bytearray.
406
+ num_samples: Number of input samples to be generated.
407
+ random_seed: Random seed to be used for function.
408
+ int_min_max: The min and max of the integer input range.
399
409
 
400
410
  Returns:
401
- a list of inputs to the given interpreter, for a single interpreter we may
411
+ A list of inputs to the given interpreter, for a single interpreter we may
402
412
  have multiple signatures so each set of inputs is also represented as
403
- list
413
+ list.
404
414
  """
405
415
  tfl_interpreter = create_tfl_interpreter(tflite_model)
406
416
  signature_defs = tfl_interpreter.get_signature_list()
@@ -410,6 +420,6 @@ def create_random_normal_input_data(
410
420
  signature_runner = tfl_interpreter.get_signature_runner(signature_key)
411
421
  input_details = signature_runner.get_input_details()
412
422
  test_data[signature_key] = create_random_dataset(
413
- input_details, num_samples, random_seed
423
+ input_details, num_samples, random_seed, int_min_max
414
424
  )
415
425
  return test_data
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ai-edge-quantizer-nightly
3
- Version: 0.3.0.dev20250619
3
+ Version: 0.3.0.dev20250621
4
4
  Summary: A quantizer for advanced developers to quantize converted AI Edge models.
5
5
  Home-page: https://github.com/google-ai-edge/ai-edge-quantizer
6
6
  Keywords: On-Device ML,AI,Google,TFLite,Quantization,LLMs,GenAI
@@ -63,15 +63,15 @@ ai_edge_quantizer/transformations/transformation_utils_test.py,sha256=MWgq29t7rv
63
63
  ai_edge_quantizer/utils/__init__.py,sha256=lpq1g2ayg3lCPLy79t2VicYcnGKw64FfYIj1V7J-4m8,676
64
64
  ai_edge_quantizer/utils/calibration_utils.py,sha256=e3dG7Nm94Ix0hkTWTWPUhEG6a8QR_cAM3PSwblfJV5g,15106
65
65
  ai_edge_quantizer/utils/calibration_utils_test.py,sha256=4BlksXl7b4yptL8xPR67hmJCnjhN9V10a2PunzfHrUE,9372
66
- ai_edge_quantizer/utils/test_utils.py,sha256=Y2pdMvn1k4gmqDo3noJfzx3fJcDHX_1hcsP6oiIz65Y,8240
66
+ ai_edge_quantizer/utils/test_utils.py,sha256=spqUmSNciOKPQHCBkHE7Zo34eMFq_BfBCAnMT3jAulU,8615
67
67
  ai_edge_quantizer/utils/tfl_flatbuffer_utils.py,sha256=pZv8FMWyjBSLN5MGJ2K_dZ6oqkJGbp9RI4CfnlPuPII,10830
68
68
  ai_edge_quantizer/utils/tfl_flatbuffer_utils_test.py,sha256=K1SbK8q92qYVtiVj0I0GtugsPTkpIpEKv9zakvFV_Sc,8555
69
- ai_edge_quantizer/utils/tfl_interpreter_utils.py,sha256=EtOv6cpKM_F0uv2bWuSXylYmTeXT6zUc182pw4sdYSI,13889
69
+ ai_edge_quantizer/utils/tfl_interpreter_utils.py,sha256=vTyy6-4PgfFPL3C8uTq_iPFBwdxCjhrWzUiec4DdFPw,14323
70
70
  ai_edge_quantizer/utils/tfl_interpreter_utils_test.py,sha256=6fjkM-rycZ95L4yfvlr0TN6RlrhfPzxNUYrZaYO_F0A,12013
71
71
  ai_edge_quantizer/utils/validation_utils.py,sha256=oYw33Sg547AqtGw-choPUJmp9SAKkV46J_ddqSsum2Q,3950
72
72
  ai_edge_quantizer/utils/validation_utils_test.py,sha256=V_qNDikPD4OPB-siOLQCWNVWTAu87h2IgNYt7teFd-o,2934
73
- ai_edge_quantizer_nightly-0.3.0.dev20250619.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
74
- ai_edge_quantizer_nightly-0.3.0.dev20250619.dist-info/METADATA,sha256=_1ICby0AKllEQdwCpKgxbguZWlBDy6vIyrNOJ7LhR7s,1528
75
- ai_edge_quantizer_nightly-0.3.0.dev20250619.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
76
- ai_edge_quantizer_nightly-0.3.0.dev20250619.dist-info/top_level.txt,sha256=8QTfPnFXNVUhScFLaa-NWZMFWMn72M50DVPubpwWB1g,18
77
- ai_edge_quantizer_nightly-0.3.0.dev20250619.dist-info/RECORD,,
73
+ ai_edge_quantizer_nightly-0.3.0.dev20250621.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
74
+ ai_edge_quantizer_nightly-0.3.0.dev20250621.dist-info/METADATA,sha256=681mJjVZmED7ft7VdtbDu1c43bz96uHnrnB--dlZhQA,1528
75
+ ai_edge_quantizer_nightly-0.3.0.dev20250621.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
76
+ ai_edge_quantizer_nightly-0.3.0.dev20250621.dist-info/top_level.txt,sha256=8QTfPnFXNVUhScFLaa-NWZMFWMn72M50DVPubpwWB1g,18
77
+ ai_edge_quantizer_nightly-0.3.0.dev20250621.dist-info/RECORD,,