onnx2tf 1.28.7__py3-none-any.whl → 1.29.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
onnx2tf/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.28.7'
3
+ __version__ = '1.29.0'
onnx2tf/onnx2tf.py CHANGED
@@ -74,6 +74,8 @@ def convert(
74
74
  copy_onnx_input_output_names_to_tflite: Optional[bool] = False,
75
75
  output_dynamic_range_quantized_tflite: Optional[bool] = False,
76
76
  output_integer_quantized_tflite: Optional[bool] = False,
77
+ quant_norm_mean: Optional[str] = '[[[[0.485, 0.456, 0.406]]]]',
78
+ quant_norm_std: Optional[str] = '[[[[0.229, 0.224, 0.225]]]]',
77
79
  quant_type: Optional[str] = 'per-channel',
78
80
  custom_input_op_name_np_data_path: Optional[List] = None,
79
81
  input_quant_dtype: Optional[str] = 'int8',
@@ -97,6 +99,8 @@ def convert(
97
99
  enable_rnn_unroll: Optional[bool] = False,
98
100
  disable_suppression_flextranspose: Optional[bool] = False,
99
101
  disable_strict_mode: Optional[bool] = False,
102
+ onnxruntime_output_memmap: Optional[bool] = True,
103
+ onnxruntime_output_memmap_dir: Optional[str] = None,
100
104
  number_of_dimensions_after_flextranspose_compression: Optional[int] = 6,
101
105
  disable_suppression_flexstridedslice: Optional[bool] = False,
102
106
  number_of_dimensions_after_flexstridedslice_compression: Optional[int] = 5,
@@ -172,6 +176,16 @@ def convert(
172
176
  output_integer_quantized_tflite: Optional[bool]
173
177
  Output of integer quantized tflite.
174
178
 
179
+ quant_norm_mean: Optional[str]
180
+ Normalized average value during quantization.\n
181
+ Only valid when the "-cind" option is not used.\n
182
+ Default: "[[[[0.485, 0.456, 0.406]]]]"
183
+
184
+ quant_norm_std: Optional[str]
185
+ Normalized standard deviation during quantization.\n
186
+ Only valid when the "-cind" option is not used.\n
187
+ Default: "[[[[0.229, 0.224, 0.225]]]]"
188
+
175
189
  quant_type: Optional[str]
176
190
  Selects whether "per-channel" or "per-tensor" quantization is used.\n
177
191
  Default: "per-channel"
@@ -360,6 +374,15 @@ def convert(
360
374
  correction process is skipped, but the frequency of transposition errors increases\n
361
375
  and accuracy errors are more likely to occur. Strict mode is enabled by default.
362
376
 
377
+ onnxruntime_output_memmap: Optional[bool]
378
+ Use onnxruntime IOBinding with np.memmap for dummy inference outputs when\n
379
+ the estimated output tensor size exceeds available RAM. This avoids OOM\n
380
+ but increases disk I/O and may slow down validation.
381
+
382
+ onnxruntime_output_memmap_dir: Optional[str]
383
+ Directory for memmap files used by onnxruntime_output_memmap.\n
384
+ If omitted, a temporary directory is created and removed on exit.
385
+
363
386
  number_of_dimensions_after_flextranspose_compression: Optional[int]
364
387
  Number of Transpose OP dimensions generated after avoiding FlexTranspose generation.\n
365
388
  Also suppress the creation of the Transpose itself by specifying 2.\n
@@ -621,6 +644,19 @@ def convert(
621
644
  )
622
645
  sys.exit(1)
623
646
 
647
+ # Normalized average value during quantization
648
+ if quant_norm_mean:
649
+ quant_norm_mean_np = np.array(ast.literal_eval(quant_norm_mean), dtype=np.float32)
650
+ else:
651
+ quant_norm_mean_np = np.array(ast.literal_eval("[[[[0.000, 0.000, 0.000]]]]"), dtype=np.float32)
652
+
653
+ # Normalized standard deviation during quantization
654
+ if quant_norm_std:
655
+ quant_norm_std_np = np.array(ast.literal_eval(quant_norm_std), dtype=np.float32)
656
+ else:
657
+ quant_norm_std_np = np.array(ast.literal_eval("[[[[1.000, 1.000, 1.000]]]]"), dtype=np.float32)
658
+
659
+ # param replacement
624
660
  replacement_parameters = None
625
661
  if param_replacement_file:
626
662
  if not os.path.isfile(param_replacement_file):
@@ -1093,6 +1129,8 @@ def convert(
1093
1129
  tf_layers_dict=tf_layers_dict,
1094
1130
  use_cuda=use_cuda,
1095
1131
  disable_strict_mode=disable_strict_mode,
1132
+ enable_ort_output_memmap=onnxruntime_output_memmap,
1133
+ ort_output_memmap_dir=onnxruntime_output_memmap_dir,
1096
1134
  shape_hints=shape_hints if (check_onnx_tf_outputs_elementwise_close or check_onnx_tf_outputs_elementwise_close_full) else None,
1097
1135
  )
1098
1136
  """
@@ -1683,6 +1721,8 @@ def convert(
1683
1721
  Color.BLUE(f'shape') + f': {output_shape} '+
1684
1722
  Color.BLUE(f'dtype') + f': {output_dtype}'
1685
1723
  )
1724
+ info(Color.BLUE(f'quant_norm_mean') + f': {quant_norm_mean_np} ')
1725
+ info(Color.BLUE(f'quant_norm_std') + f': {quant_norm_std_np} ')
1686
1726
  print('')
1687
1727
 
1688
1728
  # INT8 Converter
@@ -1720,18 +1760,20 @@ def convert(
1720
1760
 
1721
1761
  if model_input.shape[-1] == 3:
1722
1762
  # RGB
1723
- mean = np.asarray([[[[0.485, 0.456, 0.406]]]], dtype=np.float32)
1724
- std = np.asarray([[[[0.229, 0.224, 0.225]]]], dtype=np.float32)
1763
+ mean = quant_norm_mean_np
1764
+ std = quant_norm_std_np
1725
1765
  elif model_input.shape[-1] == 4:
1726
1766
  # RGBA
1727
- mean = np.asarray([[[[0.485, 0.456, 0.406, 0.000]]]], dtype=np.float32)
1728
- std = np.asarray([[[[0.229, 0.224, 0.225, 1.000]]]], dtype=np.float32)
1767
+ zero = np.zeros((*quant_norm_mean_np.shape[:-1], 1), dtype=quant_norm_mean_np.dtype)
1768
+ mean = np.concatenate([quant_norm_mean_np, zero], axis=-1)
1769
+ one = np.ones((*quant_norm_std_np.shape[:-1], 1), dtype=quant_norm_std_np.dtype)
1770
+ std = np.concatenate([quant_norm_std_np, zero], axis=-1)
1729
1771
  new_element_array = np.full((*calib_data.shape[:-1], 1), 0.500, dtype=np.float32)
1730
1772
  calib_data = np.concatenate((calib_data, new_element_array), axis=-1)
1731
1773
  else:
1732
1774
  # Others
1733
- mean = np.asarray([[[[0.485, 0.456, 0.406]]]], dtype=np.float32)
1734
- std = np.asarray([[[[0.229, 0.224, 0.225]]]], dtype=np.float32)
1775
+ mean = quant_norm_mean_np
1776
+ std = quant_norm_std_np
1735
1777
 
1736
1778
  calib_data_dict[model_input.name] = \
1737
1779
  [
@@ -2012,6 +2054,8 @@ def convert(
2012
2054
  custom_input_op_name_np_data_path=custom_input_op_name_np_data_path,
2013
2055
  tf_layers_dict=tf_layers_dict,
2014
2056
  use_cuda=use_cuda,
2057
+ enable_ort_output_memmap=onnxruntime_output_memmap,
2058
+ ort_output_memmap_dir=onnxruntime_output_memmap_dir,
2015
2059
  shape_hints=shape_hints,
2016
2060
  )
2017
2061
  except Exception as ex:
@@ -2275,6 +2319,8 @@ def convert(
2275
2319
  custom_input_op_name_np_data_path=custom_input_op_name_np_data_path,
2276
2320
  tf_layers_dict=tf_layers_dict,
2277
2321
  use_cuda=use_cuda,
2322
+ enable_ort_output_memmap=onnxruntime_output_memmap,
2323
+ ort_output_memmap_dir=onnxruntime_output_memmap_dir,
2278
2324
  shape_hints=shape_hints,
2279
2325
  )
2280
2326
 
@@ -2475,6 +2521,24 @@ def main():
2475
2521
  'Selects whether "per-channel" or "per-tensor" quantization is used. \n' +
2476
2522
  'Default: "per-channel"'
2477
2523
  )
2524
+ parser.add_argument(
2525
+ '-qnm',
2526
+ '--quant_norm_mean',
2527
+ type=str,
2528
+ default='[[[[0.485, 0.456, 0.406]]]]',
2529
+ help=\
2530
+ 'Normalized average value during quantization. \n' +
2531
+ 'Default: "[[[[0.485, 0.456, 0.406]]]]"'
2532
+ )
2533
+ parser.add_argument(
2534
+ '-qns',
2535
+ '--quant_norm_std',
2536
+ type=str,
2537
+ default='[[[[0.229, 0.224, 0.225]]]]',
2538
+ help=\
2539
+ 'Normalized standard deviation during quantization. \n' +
2540
+ 'Default: "[[[[0.229, 0.224, 0.225]]]]"'
2541
+ )
2478
2542
  parser.add_argument(
2479
2543
  '-cind',
2480
2544
  '--custom_input_op_name_np_data_path',
@@ -2789,6 +2853,28 @@ def main():
2789
2853
  'correction process is skipped, but the frequency of transposition errors increases \n' +
2790
2854
  'and accuracy errors are more likely to occur. Strict mode is enabled by default.'
2791
2855
  )
2856
+ parser.add_argument(
2857
+ '-doem',
2858
+ '--disable_onnxruntime_output_memmap',
2859
+ dest='disable_onnxruntime_output_memmap',
2860
+ action='store_true',
2861
+ help=\
2862
+ 'Disable onnxruntime output memmap. \n' +
2863
+ 'By default, onnx2tf uses onnxruntime IOBinding with np.memmap for dummy inference \n' +
2864
+ 'outputs only when the estimated output tensor size exceeds available RAM. \n' +
2865
+ 'Use this flag to force the standard in-memory output path instead. \n' +
2866
+ 'Default: disabled (memmap enabled when needed).'
2867
+ )
2868
+ parser.set_defaults(disable_onnxruntime_output_memmap=False)
2869
+ parser.add_argument(
2870
+ '-oemd',
2871
+ '--onnxruntime_output_memmap_dir',
2872
+ type=str,
2873
+ help=\
2874
+ 'Directory for memmap files used by onnxruntime output memmap. \n' +
2875
+ 'If omitted, a temporary directory is created and removed on exit. \n' +
2876
+ 'This setting is used only when memmap is actually enabled.'
2877
+ )
2792
2878
  parser.add_argument(
2793
2879
  '-nodafsc',
2794
2880
  '--number_of_dimensions_after_flexstridedslice_compression',
@@ -3061,6 +3147,8 @@ def main():
3061
3147
  copy_onnx_input_output_names_to_tflite=args.copy_onnx_input_output_names_to_tflite,
3062
3148
  output_dynamic_range_quantized_tflite=args.output_dynamic_range_quantized_tflite,
3063
3149
  output_integer_quantized_tflite=args.output_integer_quantized_tflite,
3150
+ quant_norm_mean=args.quant_norm_mean,
3151
+ quant_norm_std=args.quant_norm_std,
3064
3152
  quant_type=args.quant_type,
3065
3153
  custom_input_op_name_np_data_path=custom_params,
3066
3154
  input_quant_dtype=args.input_quant_dtype,
@@ -3084,6 +3172,8 @@ def main():
3084
3172
  enable_rnn_unroll=args.enable_rnn_unroll,
3085
3173
  disable_suppression_flextranspose=args.disable_suppression_flextranspose,
3086
3174
  disable_strict_mode=args.disable_strict_mode,
3175
+ onnxruntime_output_memmap=not args.disable_onnxruntime_output_memmap,
3176
+ onnxruntime_output_memmap_dir=args.onnxruntime_output_memmap_dir,
3087
3177
  number_of_dimensions_after_flextranspose_compression=args.number_of_dimensions_after_flextranspose_compression,
3088
3178
  disable_suppression_flexstridedslice=args.disable_suppression_flexstridedslice,
3089
3179
  number_of_dimensions_after_flexstridedslice_compression=args.number_of_dimensions_after_flexstridedslice_compression,
@@ -7,6 +7,9 @@ import copy
7
7
  import json
8
8
  import psutil
9
9
  import random
10
+ import atexit
11
+ import tempfile
12
+ import shutil
10
13
  random.seed(0)
11
14
  import requests
12
15
  import flatbuffers
@@ -3634,6 +3637,8 @@ def dummy_onnx_inference(
3634
3637
  tf_layers_dict: Optional[Dict] = None,
3635
3638
  use_cuda: bool = False,
3636
3639
  disable_strict_mode: bool = False,
3640
+ enable_ort_output_memmap: bool = False,
3641
+ ort_output_memmap_dir: Optional[str] = None,
3637
3642
  shape_hints: Optional[List[str]] = None,
3638
3643
  ) -> List[np.ndarray]:
3639
3644
  """Perform inference on ONNX subgraphs with an all-1 dummy tensor.
@@ -3663,6 +3668,14 @@ def dummy_onnx_inference(
3663
3668
  disable_strict_mode: Optional[bool]
3664
3669
  True to disable strict inference mode, False to enable it.
3665
3670
 
3671
+ enable_ort_output_memmap: bool
3672
+ True to use onnxruntime IOBinding with np.memmap for outputs when
3673
+ output tensors are too large for available RAM.
3674
+
3675
+ ort_output_memmap_dir: Optional[str]
3676
+ Directory to store memmap files. If not specified, a temporary
3677
+ directory is created and removed on exit.
3678
+
3666
3679
  Returns
3667
3680
  ----------
3668
3681
  outputs: List[np.ndarray]
@@ -3880,7 +3893,7 @@ def dummy_onnx_inference(
3880
3893
  op_output_size: int = 1
3881
3894
  if gs_graph_output.shape is not None:
3882
3895
  for s in gs_graph_output.shape:
3883
- if isinstance(s, int):
3896
+ if isinstance(s, (int, np.integer)):
3884
3897
  op_output_size *= s
3885
3898
  # Total bytes
3886
3899
  total_output_size += op_output_size * dtype_sizes.get(gs_graph_output.dtype, 4)
@@ -3888,7 +3901,8 @@ def dummy_onnx_inference(
3888
3901
  # When exact inference mode is enabled and the total size of the tensor of inference results exceeds approximately 80% of available RAM
3889
3902
  mem_available = psutil.virtual_memory().available * 0.80 // 1024 // 1024 //1024
3890
3903
  total_output_size_gb = (total_output_size // 1024 // 1024 //1024)
3891
- if (not disable_strict_mode and total_output_size_gb > mem_available):
3904
+ use_memmap_outputs = enable_ort_output_memmap and total_output_size_gb > mem_available
3905
+ if (not disable_strict_mode and total_output_size_gb > mem_available and not use_memmap_outputs):
3892
3906
  if tmp_onnx_path:
3893
3907
  os.remove(tmp_onnx_path)
3894
3908
  os.remove(tmp_onnx_external_weights_path)
@@ -3896,7 +3910,94 @@ def dummy_onnx_inference(
3896
3910
  f'The tool skipped dummy inference to avoid SWAP processing because the total size of the tensor of inference results exceeded about {mem_available} GB. (results: {total_output_size_gb} GB)'
3897
3911
  )
3898
3912
 
3899
- outputs = onnx_session.run(None, input_datas)
3913
+ if use_memmap_outputs:
3914
+ output_shapes = []
3915
+ output_names_order = [out.name for out in gs_graph.outputs]
3916
+ for out in gs_graph.outputs:
3917
+ shape = out.shape
3918
+ if shape is None or any(not isinstance(s, (int, np.integer)) for s in shape):
3919
+ if tmp_onnx_path:
3920
+ os.remove(tmp_onnx_path)
3921
+ os.remove(tmp_onnx_external_weights_path)
3922
+ raise Exception(
3923
+ 'onnxruntime output memmap requires static output shapes. ' +
3924
+ 'Provide --shape_hints or reduce validation outputs.'
3925
+ )
3926
+ output_shapes.append([int(s) for s in shape])
3927
+
3928
+ memmap_dir = ort_output_memmap_dir
3929
+ cleanup_memmap_dir = False
3930
+ if memmap_dir is None:
3931
+ memmap_dir = tempfile.mkdtemp(prefix='onnx2tf_ort_mm_')
3932
+ cleanup_memmap_dir = True
3933
+ os.makedirs(memmap_dir, exist_ok=True)
3934
+
3935
+ try:
3936
+ disk_free = psutil.disk_usage(memmap_dir).free
3937
+ if total_output_size > disk_free:
3938
+ raise Exception(
3939
+ f'Not enough disk space for memmap outputs. ' +
3940
+ f'Required: {total_output_size} bytes, Free: {disk_free} bytes.'
3941
+ )
3942
+ except Exception as ex:
3943
+ if 'Not enough disk space' in str(ex):
3944
+ if tmp_onnx_path:
3945
+ os.remove(tmp_onnx_path)
3946
+ os.remove(tmp_onnx_external_weights_path)
3947
+ raise
3948
+
3949
+ if cleanup_memmap_dir:
3950
+ atexit.register(shutil.rmtree, memmap_dir, ignore_errors=True)
3951
+
3952
+ info(
3953
+ f'onnxruntime output memmap enabled. ' +
3954
+ f'Outputs: {len(output_names_order)}, Path: {memmap_dir}'
3955
+ )
3956
+
3957
+ io_binding = onnx_session.io_binding()
3958
+
3959
+ for input_name, input_data in input_datas.items():
3960
+ if not input_data.flags['C_CONTIGUOUS']:
3961
+ input_data = np.ascontiguousarray(input_data)
3962
+ input_datas[input_name] = input_data
3963
+ io_binding.bind_input(
3964
+ input_name,
3965
+ 'cpu',
3966
+ 0,
3967
+ input_data.dtype,
3968
+ input_data.shape,
3969
+ input_data.__array_interface__['data'][0],
3970
+ )
3971
+
3972
+ memmap_outputs = {}
3973
+ for idx, (output_name, output_shape) in enumerate(zip(output_names_order, output_shapes)):
3974
+ safe_output_name = re.sub(r'[^0-9A-Za-z._-]+', '_', output_name)
3975
+ memmap_path = os.path.join(memmap_dir, f'ort_output_{idx}_{safe_output_name}.mmap')
3976
+ output_dtype = np.dtype(gs_graph.outputs[idx].dtype)
3977
+ memmap_array = np.memmap(
3978
+ memmap_path,
3979
+ dtype=output_dtype,
3980
+ mode='w+',
3981
+ shape=tuple(output_shape),
3982
+ )
3983
+ memmap_outputs[output_name] = memmap_array
3984
+ io_binding.bind_output(
3985
+ output_name,
3986
+ 'cpu',
3987
+ 0,
3988
+ output_dtype,
3989
+ output_shape,
3990
+ memmap_array.__array_interface__['data'][0],
3991
+ )
3992
+
3993
+ onnx_session.run_with_iobinding(io_binding)
3994
+ io_binding.synchronize_outputs()
3995
+ for memmap_array in memmap_outputs.values():
3996
+ memmap_array.flush()
3997
+
3998
+ outputs = [memmap_outputs[name] for name in output_names_order]
3999
+ else:
4000
+ outputs = onnx_session.run(None, input_datas)
3900
4001
  if tmp_onnx_path:
3901
4002
  os.remove(tmp_onnx_path)
3902
4003
  os.remove(tmp_onnx_external_weights_path)
@@ -1,27 +1,38 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.28.7
4
- Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
3
+ Version: 1.29.0
4
+ Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
7
7
  Author-email: rmsdh122@yahoo.co.jp
8
- License: MIT License
9
8
  Platform: linux
10
9
  Platform: unix
11
10
  Requires-Python: >=3.10
12
11
  Description-Content-Type: text/markdown
13
12
  License-File: LICENSE
14
13
  License-File: LICENSE_onnx-tensorflow
14
+ Requires-Dist: requests==2.32.5
15
+ Requires-Dist: numpy==1.26.4
16
+ Requires-Dist: onnx==1.19.0
17
+ Requires-Dist: onnxruntime==1.23.0
18
+ Requires-Dist: opencv-python==4.11.0.86
19
+ Requires-Dist: onnxsim==0.4.30
20
+ Requires-Dist: ai-edge-litert==2.1.0
21
+ Requires-Dist: tensorflow==2.19.0
22
+ Requires-Dist: tf-keras==2.19.0
23
+ Requires-Dist: onnx-graphsurgeon==0.5.8
24
+ Requires-Dist: simple-onnx-processing-tools==1.1.32
25
+ Requires-Dist: psutil==5.9.5
26
+ Requires-Dist: protobuf==4.25.5
27
+ Requires-Dist: h5py==3.11.0
28
+ Requires-Dist: ml_dtypes==0.5.1
29
+ Requires-Dist: flatbuffers==25.12.19
15
30
  Dynamic: author
16
31
  Dynamic: author-email
17
- Dynamic: description
18
- Dynamic: description-content-type
19
32
  Dynamic: home-page
20
- Dynamic: license
21
33
  Dynamic: license-file
22
34
  Dynamic: platform
23
35
  Dynamic: requires-python
24
- Dynamic: summary
25
36
 
26
37
  # onnx2tf
27
38
  Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in [onnx-tensorflow](https://github.com/onnx/onnx-tensorflow) ([onnx-tf](https://pypi.org/project/onnx-tf/)). I don't need a Star, but give me a pull request. Since I am adding challenging model optimizations and fixing bugs almost daily, I frequently embed potential bugs that would otherwise break through CI's regression testing. Therefore, if you encounter new problems, I recommend that you try a package that is a few versions older, or try the latest package that will be released in a few days.
@@ -334,7 +345,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
334
345
  docker run --rm -it \
335
346
  -v `pwd`:/workdir \
336
347
  -w /workdir \
337
- ghcr.io/pinto0309/onnx2tf:1.28.7
348
+ ghcr.io/pinto0309/onnx2tf:1.29.0
338
349
 
339
350
  or
340
351
 
@@ -342,7 +353,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
342
353
  docker run --rm -it \
343
354
  -v `pwd`:/workdir \
344
355
  -w /workdir \
345
- docker.io/pinto0309/onnx2tf:1.28.7
356
+ docker.io/pinto0309/onnx2tf:1.29.0
346
357
 
347
358
  or
348
359
 
@@ -1667,6 +1678,16 @@ optional arguments:
1667
1678
  Selects whether "per-channel" or "per-tensor" quantization is used.
1668
1679
  Default: "per-channel"
1669
1680
 
1681
+ -qnm QUANT_NORM_MEAN, --quant_norm_mean QUANT_NORM_MEAN
1682
+ Normalized average value during quantization.
1683
+ Only valid when the "-cind" option is not used.
1684
+ Default: "[[[[0.485, 0.456, 0.406]]]]"
1685
+
1686
+ -qns QUANT_NORM_STD, --quant_norm_std QUANT_NORM_STD
1687
+ Normalized standard deviation during quantization.
1688
+ Only valid when the "-cind" option is not used.
1689
+ Default: "[[[[0.229, 0.224, 0.225]]]]"
1690
+
1670
1691
  -cind INPUT_NAME NUMPY_FILE_PATH MEAN STD, \
1671
1692
  --custom_input_op_name_np_data_path INPUT_NAME NUMPY_FILE_PATH MEAN STD
1672
1693
  Input name of OP and path of data file (Numpy) for custom input for -cotof or -oiqt,
@@ -2095,6 +2116,8 @@ convert(
2095
2116
  output_weights: Optional[bool] = False,
2096
2117
  copy_onnx_input_output_names_to_tflite: Optional[bool] = False,
2097
2118
  output_integer_quantized_tflite: Optional[bool] = False,
2119
+ quant_norm_mean: Optional[str] = '[[[[0.485, 0.456, 0.406]]]]',
2120
+ quant_norm_std: Optional[str] = '[[[[0.229, 0.224, 0.225]]]]',
2098
2121
  quant_type: Optional[str] = 'per-channel',
2099
2122
  custom_input_op_name_np_data_path: Optional[List] = None,
2100
2123
  input_quant_dtype: Optional[str] = 'int8',
@@ -2190,6 +2213,16 @@ convert(
2190
2213
  output_integer_quantized_tflite: Optional[bool]
2191
2214
  Output of integer quantized tflite.
2192
2215
 
2216
+ quant_norm_mean: Optional[str]
2217
+ Normalized average value during quantization.
2218
+ Only valid when the "-cind" option is not used.
2219
+ Default: "[[[[0.485, 0.456, 0.406]]]]"
2220
+
2221
+ quant_norm_std: Optional[str]
2222
+ Normalized standard deviation during quantization.
2223
+ Only valid when the "-cind" option is not used.
2224
+ Default: "[[[[0.229, 0.224, 0.225]]]]"
2225
+
2193
2226
  quant_type: Optional[str]
2194
2227
  Selects whether "per-channel" or "per-tensor" quantization is used.
2195
2228
  Default: "per-channel"
@@ -1,6 +1,6 @@
1
- onnx2tf/__init__.py,sha256=A8WjpXyCp-z5LE7ay1tL-CV8Xjh1Xu01oFLi0sqb1bw,66
1
+ onnx2tf/__init__.py,sha256=DObbckS1XgiAp4svAwPuHhkeAswQOzsYLXEyNPN7vwY,66
2
2
  onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
3
- onnx2tf/onnx2tf.py,sha256=iQwYuLj1f7gHWU9r_4L1pyRnMsJgHkqEPeavxejtY4Y,146964
3
+ onnx2tf/onnx2tf.py,sha256=XZBqVn1Q_qmPmrYw_Dz30vRWJ8uaJURkbUSo_8rZrjk,151116
4
4
  onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
5
5
  onnx2tf/ops/Acos.py,sha256=Fo8YkFKuWq8Fi2xUrBdKcAH1yJ8r5pjSD0wgLttTNdk,4003
6
6
  onnx2tf/ops/Acosh.py,sha256=ATQj2cT5JS_mTfXi0kXqJ1yzSZu5J0zHA5VjV3j7uKY,3588
@@ -185,15 +185,14 @@ onnx2tf/ops/_Loop.py,sha256=eo5sNfrfOnKV6_I737AWsM5LJTY9DVOxQEvhanxtP4g,11322
185
185
  onnx2tf/ops/__Loop.py,sha256=ClwMcbNS4hqUtW_pzwjMa9Cqg7ONvz9aplke55A0uJ0,19704
186
186
  onnx2tf/ops/__init__.py,sha256=jnmUWWa-3dHzBZV9bmPzXu6eoz2dumJTzO7i8JdcgSM,25
187
187
  onnx2tf/utils/__init__.py,sha256=E9FM9He68VIASDnYp-OrxvHFVn55GzWqw2OEkCqn1zg,27
188
- onnx2tf/utils/common_functions.py,sha256=_GPrqIz4Ueg3hriOulErfoNGhoMw_5OKiWqLTYZh6YI,245622
188
+ onnx2tf/utils/common_functions.py,sha256=o9a4g56OdQKocODzBp2Uxesves_Tl-Iizh5r4Okmu6Q,249631
189
189
  onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
190
190
  onnx2tf/utils/iterative_json_optimizer.py,sha256=qqeIxWGxrhcCYk8-ebWnblnOkzDCwi-nseipHzHR_bk,10436
191
191
  onnx2tf/utils/json_auto_generator.py,sha256=C-4P8sii7B2_LRwW6AFG13tI3R5DHPhNirgxfmBU1F8,61944
192
192
  onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
193
- onnx2tf-1.28.7.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
194
- onnx2tf-1.28.7.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
195
- onnx2tf-1.28.7.dist-info/METADATA,sha256=OcuSCAQ-Taq_SZ120vzXJQTQ5PQzilWq8ojS-Fwjx7Y,151931
196
- onnx2tf-1.28.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
197
- onnx2tf-1.28.7.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
198
- onnx2tf-1.28.7.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
199
- onnx2tf-1.28.7.dist-info/RECORD,,
193
+ onnx2tf-1.29.0.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
194
+ onnx2tf-1.29.0.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
195
+ onnx2tf-1.29.0.dist-info/METADATA,sha256=bqMWcqDQNEcmcnraz4IoNm7XGT3mARr-WvMnVlOdIaM,153189
196
+ onnx2tf-1.29.0.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
197
+ onnx2tf-1.29.0.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
198
+ onnx2tf-1.29.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,2 +0,0 @@
1
- [console_scripts]
2
- onnx2tf = onnx2tf:main