onnx2tf 1.28.7__py3-none-any.whl → 1.28.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
onnx2tf/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.28.7'
3
+ __version__ = '1.28.8'
onnx2tf/onnx2tf.py CHANGED
@@ -74,6 +74,8 @@ def convert(
74
74
  copy_onnx_input_output_names_to_tflite: Optional[bool] = False,
75
75
  output_dynamic_range_quantized_tflite: Optional[bool] = False,
76
76
  output_integer_quantized_tflite: Optional[bool] = False,
77
+ quant_norm_mean: Optional[str] = '[[[[0.485, 0.456, 0.406]]]]',
78
+ quant_norm_std: Optional[str] = '[[[[0.229, 0.224, 0.225]]]]',
77
79
  quant_type: Optional[str] = 'per-channel',
78
80
  custom_input_op_name_np_data_path: Optional[List] = None,
79
81
  input_quant_dtype: Optional[str] = 'int8',
@@ -172,6 +174,16 @@ def convert(
172
174
  output_integer_quantized_tflite: Optional[bool]
173
175
  Output of integer quantized tflite.
174
176
 
177
+ quant_norm_mean: Optional[str]
178
+ Normalized average value during quantization.\n
179
+ Only valid when the "-cind" option is not used.\n
180
+ Default: "[[[[0.485, 0.456, 0.406]]]]"
181
+
182
+ quant_norm_std: Optional[str]
183
+ Normalized standard deviation during quantization.\n
184
+ Only valid when the "-cind" option is not used.\n
185
+ Default: "[[[[0.229, 0.224, 0.225]]]]"
186
+
175
187
  quant_type: Optional[str]
176
188
  Selects whether "per-channel" or "per-tensor" quantization is used.\n
177
189
  Default: "per-channel"
@@ -621,6 +633,19 @@ def convert(
621
633
  )
622
634
  sys.exit(1)
623
635
 
636
+ # Normalized average value during quantization
637
+ if quant_norm_mean:
638
+ quant_norm_mean_np = np.array(ast.literal_eval(quant_norm_mean), dtype=np.float32)
639
+ else:
640
+ quant_norm_mean_np = np.array(ast.literal_eval("[[[[0.000, 0.000, 0.000]]]]"), dtype=np.float32)
641
+
642
+ # Normalized standard deviation during quantization
643
+ if quant_norm_std:
644
+ quant_norm_std_np = np.array(ast.literal_eval(quant_norm_std), dtype=np.float32)
645
+ else:
646
+ quant_norm_std_np = np.array(ast.literal_eval("[[[[1.000, 1.000, 1.000]]]]"), dtype=np.float32)
647
+
648
+ # param replacement
624
649
  replacement_parameters = None
625
650
  if param_replacement_file:
626
651
  if not os.path.isfile(param_replacement_file):
@@ -1683,6 +1708,8 @@ def convert(
1683
1708
  Color.BLUE(f'shape') + f': {output_shape} '+
1684
1709
  Color.BLUE(f'dtype') + f': {output_dtype}'
1685
1710
  )
1711
+ info(Color.BLUE(f'quant_norm_mean') + f': {quant_norm_mean_np} ')
1712
+ info(Color.BLUE(f'quant_norm_std') + f': {quant_norm_std_np} ')
1686
1713
  print('')
1687
1714
 
1688
1715
  # INT8 Converter
@@ -1720,18 +1747,20 @@ def convert(
1720
1747
 
1721
1748
  if model_input.shape[-1] == 3:
1722
1749
  # RGB
1723
- mean = np.asarray([[[[0.485, 0.456, 0.406]]]], dtype=np.float32)
1724
- std = np.asarray([[[[0.229, 0.224, 0.225]]]], dtype=np.float32)
1750
+ mean = quant_norm_mean_np
1751
+ std = quant_norm_std_np
1725
1752
  elif model_input.shape[-1] == 4:
1726
1753
  # RGBA
1727
- mean = np.asarray([[[[0.485, 0.456, 0.406, 0.000]]]], dtype=np.float32)
1728
- std = np.asarray([[[[0.229, 0.224, 0.225, 1.000]]]], dtype=np.float32)
1754
+ zero = np.zeros((*quant_norm_mean_np.shape[:-1], 1), dtype=quant_norm_mean_np.dtype)
1755
+ mean = np.concatenate([quant_norm_mean_np, zero], axis=-1)
1756
+ one = np.ones((*quant_norm_std_np.shape[:-1], 1), dtype=quant_norm_std_np.dtype)
1757
+ std = np.concatenate([quant_norm_std_np, zero], axis=-1)
1729
1758
  new_element_array = np.full((*calib_data.shape[:-1], 1), 0.500, dtype=np.float32)
1730
1759
  calib_data = np.concatenate((calib_data, new_element_array), axis=-1)
1731
1760
  else:
1732
1761
  # Others
1733
- mean = np.asarray([[[[0.485, 0.456, 0.406]]]], dtype=np.float32)
1734
- std = np.asarray([[[[0.229, 0.224, 0.225]]]], dtype=np.float32)
1762
+ mean = quant_norm_mean_np
1763
+ std = quant_norm_std_np
1735
1764
 
1736
1765
  calib_data_dict[model_input.name] = \
1737
1766
  [
@@ -2475,6 +2504,24 @@ def main():
2475
2504
  'Selects whether "per-channel" or "per-tensor" quantization is used. \n' +
2476
2505
  'Default: "per-channel"'
2477
2506
  )
2507
+ parser.add_argument(
2508
+ '-qnm',
2509
+ '--quant_norm_mean',
2510
+ type=str,
2511
+ default='[[[[0.485, 0.456, 0.406]]]]',
2512
+ help=\
2513
+ 'Normalized average value during quantization. \n' +
2514
+ 'Default: "[[[[0.485, 0.456, 0.406]]]]"'
2515
+ )
2516
+ parser.add_argument(
2517
+ '-qns',
2518
+ '--quant_norm_std',
2519
+ type=str,
2520
+ default='[[[[0.229, 0.224, 0.225]]]]',
2521
+ help=\
2522
+ 'Normalized standard deviation during quantization. \n' +
2523
+ 'Default: "[[[[0.229, 0.224, 0.225]]]]"'
2524
+ )
2478
2525
  parser.add_argument(
2479
2526
  '-cind',
2480
2527
  '--custom_input_op_name_np_data_path',
@@ -3061,6 +3108,8 @@ def main():
3061
3108
  copy_onnx_input_output_names_to_tflite=args.copy_onnx_input_output_names_to_tflite,
3062
3109
  output_dynamic_range_quantized_tflite=args.output_dynamic_range_quantized_tflite,
3063
3110
  output_integer_quantized_tflite=args.output_integer_quantized_tflite,
3111
+ quant_norm_mean=args.quant_norm_mean,
3112
+ quant_norm_std=args.quant_norm_std,
3064
3113
  quant_type=args.quant_type,
3065
3114
  custom_input_op_name_np_data_path=custom_params,
3066
3115
  input_quant_dtype=args.input_quant_dtype,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.28.7
3
+ Version: 1.28.8
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -334,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
334
334
  docker run --rm -it \
335
335
  -v `pwd`:/workdir \
336
336
  -w /workdir \
337
- ghcr.io/pinto0309/onnx2tf:1.28.7
337
+ ghcr.io/pinto0309/onnx2tf:1.28.8
338
338
 
339
339
  or
340
340
 
@@ -342,7 +342,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
342
342
  docker run --rm -it \
343
343
  -v `pwd`:/workdir \
344
344
  -w /workdir \
345
- docker.io/pinto0309/onnx2tf:1.28.7
345
+ docker.io/pinto0309/onnx2tf:1.28.8
346
346
 
347
347
  or
348
348
 
@@ -1667,6 +1667,16 @@ optional arguments:
1667
1667
  Selects whether "per-channel" or "per-tensor" quantization is used.
1668
1668
  Default: "per-channel"
1669
1669
 
1670
+ -qnm QUANT_NORM_MEAN, --quant_norm_mean QUANT_NORM_MEAN
1671
+ Normalized average value during quantization.
1672
+ Only valid when the "-cind" option is not used.
1673
+ Default: "[[[[0.485, 0.456, 0.406]]]]"
1674
+
1675
+ -qns QUANT_NORM_STD, --quant_norm_std QUANT_NORM_STD
1676
+ Normalized standard deviation during quantization.
1677
+ Only valid when the "-cind" option is not used.
1678
+ Default: "[[[[0.229, 0.224, 0.225]]]]"
1679
+
1670
1680
  -cind INPUT_NAME NUMPY_FILE_PATH MEAN STD, \
1671
1681
  --custom_input_op_name_np_data_path INPUT_NAME NUMPY_FILE_PATH MEAN STD
1672
1682
  Input name of OP and path of data file (Numpy) for custom input for -cotof or -oiqt,
@@ -2095,6 +2105,8 @@ convert(
2095
2105
  output_weights: Optional[bool] = False,
2096
2106
  copy_onnx_input_output_names_to_tflite: Optional[bool] = False,
2097
2107
  output_integer_quantized_tflite: Optional[bool] = False,
2108
+ quant_norm_mean: Optional[str] = '[[[[0.485, 0.456, 0.406]]]]',
2109
+ quant_norm_std: Optional[str] = '[[[[0.229, 0.224, 0.225]]]]',
2098
2110
  quant_type: Optional[str] = 'per-channel',
2099
2111
  custom_input_op_name_np_data_path: Optional[List] = None,
2100
2112
  input_quant_dtype: Optional[str] = 'int8',
@@ -2190,6 +2202,16 @@ convert(
2190
2202
  output_integer_quantized_tflite: Optional[bool]
2191
2203
  Output of integer quantized tflite.
2192
2204
 
2205
+ quant_norm_mean: Optional[str]
2206
+ Normalized average value during quantization.
2207
+ Only valid when the "-cind" option is not used.
2208
+ Default: "[[[[0.485, 0.456, 0.406]]]]"
2209
+
2210
+ quant_norm_std: Optional[str]
2211
+ Normalized standard deviation during quantization.
2212
+ Only valid when the "-cind" option is not used.
2213
+ Default: "[[[[0.229, 0.224, 0.225]]]]"
2214
+
2193
2215
  quant_type: Optional[str]
2194
2216
  Selects whether "per-channel" or "per-tensor" quantization is used.
2195
2217
  Default: "per-channel"
@@ -1,6 +1,6 @@
1
- onnx2tf/__init__.py,sha256=A8WjpXyCp-z5LE7ay1tL-CV8Xjh1Xu01oFLi0sqb1bw,66
1
+ onnx2tf/__init__.py,sha256=PpTfjKEZ7PWIDHIvtSVU9Epc3rkkmE02EOIWvlHFnWU,66
2
2
  onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
3
- onnx2tf/onnx2tf.py,sha256=iQwYuLj1f7gHWU9r_4L1pyRnMsJgHkqEPeavxejtY4Y,146964
3
+ onnx2tf/onnx2tf.py,sha256=v6dx8ZQrj2h4-xv3lae4wkdts1NO3tEINS_sK0dykvw,148911
4
4
  onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
5
5
  onnx2tf/ops/Acos.py,sha256=Fo8YkFKuWq8Fi2xUrBdKcAH1yJ8r5pjSD0wgLttTNdk,4003
6
6
  onnx2tf/ops/Acosh.py,sha256=ATQj2cT5JS_mTfXi0kXqJ1yzSZu5J0zHA5VjV3j7uKY,3588
@@ -190,10 +190,10 @@ onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
190
190
  onnx2tf/utils/iterative_json_optimizer.py,sha256=qqeIxWGxrhcCYk8-ebWnblnOkzDCwi-nseipHzHR_bk,10436
191
191
  onnx2tf/utils/json_auto_generator.py,sha256=C-4P8sii7B2_LRwW6AFG13tI3R5DHPhNirgxfmBU1F8,61944
192
192
  onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
193
- onnx2tf-1.28.7.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
194
- onnx2tf-1.28.7.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
195
- onnx2tf-1.28.7.dist-info/METADATA,sha256=OcuSCAQ-Taq_SZ120vzXJQTQ5PQzilWq8ojS-Fwjx7Y,151931
196
- onnx2tf-1.28.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
197
- onnx2tf-1.28.7.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
198
- onnx2tf-1.28.7.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
199
- onnx2tf-1.28.7.dist-info/RECORD,,
193
+ onnx2tf-1.28.8.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
194
+ onnx2tf-1.28.8.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
195
+ onnx2tf-1.28.8.dist-info/METADATA,sha256=NKZj0tTAVIo80eNaKXJTxQDpRtl3-smFlcQCS3QpnMc,152862
196
+ onnx2tf-1.28.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
197
+ onnx2tf-1.28.8.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
198
+ onnx2tf-1.28.8.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
199
+ onnx2tf-1.28.8.dist-info/RECORD,,