onnx2tf 1.25.15__py3-none-any.whl → 1.26.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
onnx2tf/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.25.15'
3
+ __version__ = '1.26.1'
onnx2tf/onnx2tf.py CHANGED
@@ -72,7 +72,8 @@ def convert(
72
72
  output_integer_quantized_tflite: Optional[bool] = False,
73
73
  quant_type: Optional[str] = 'per-channel',
74
74
  custom_input_op_name_np_data_path: Optional[List] = None,
75
- input_output_quant_dtype: Optional[str] = 'int8',
75
+ input_quant_dtype: Optional[str] = 'int8',
76
+ output_quant_dtype: Optional[str] = 'int8',
76
77
  not_use_onnxsim: Optional[bool] = False,
77
78
  not_use_opname_auto_generate: Optional[bool] = False,
78
79
  batch_size: Optional[int] = None,
@@ -221,9 +222,13 @@ def convert(
221
222
  ["input2","input2.npy",[0.3],[0.07]],\n
222
223
  ]
223
224
 
224
- input_output_quant_dtype: Optional[str]
225
- Input and Output dtypes when doing Full INT8 Quantization.\n
226
- "int8"(default) or "uint8"
225
+ input_quant_dtype: Optional[str]
226
+ Input dtypes when doing Full INT8 Quantization.\n
227
+ "int8"(default) or "uint8" or "float32"
228
+
229
+ output_quant_dtype: Optional[str]
230
+ Output dtypes when doing Full INT8 Quantization.\n
231
+ "int8"(default) or "uint8" or "float32"
227
232
 
228
233
  not_use_onnxsim: Optional[bool]
229
234
  No optimization by onnx-simplifier is performed.\n
@@ -1693,15 +1698,27 @@ def convert(
1693
1698
  converter._experimental_disable_per_channel = disable_per_channel
1694
1699
  converter.unfold_batchmatmul = enable_batchmatmul_unfold
1695
1700
  converter.representative_dataset = representative_dataset_gen
1696
- inf_type = None
1697
- if input_output_quant_dtype == 'int8':
1698
- inf_type = tf.int8
1699
- elif input_output_quant_dtype == 'uint8':
1700
- inf_type = tf.uint8
1701
+ inf_type_input = None
1702
+ inf_type_output = None
1703
+ if input_quant_dtype == 'int8':
1704
+ inf_type_input = tf.int8
1705
+ elif input_quant_dtype == 'uint8':
1706
+ inf_type_input = tf.uint8
1707
+ elif input_quant_dtype == 'float32':
1708
+ inf_type_input = tf.float32
1709
+ else:
1710
+ inf_type_input = tf.int8
1711
+
1712
+ if output_quant_dtype == 'int8':
1713
+ inf_type_output = tf.int8
1714
+ elif output_quant_dtype == 'uint8':
1715
+ inf_type_output = tf.uint8
1716
+ elif output_quant_dtype == 'float32':
1717
+ inf_type_output = tf.float32
1701
1718
  else:
1702
- inf_type = tf.int8
1703
- converter.inference_input_type = inf_type
1704
- converter.inference_output_type = inf_type
1719
+ inf_type_output = tf.int8
1720
+ converter.inference_input_type = inf_type_input
1721
+ converter.inference_output_type = inf_type_output
1705
1722
  tflite_model = converter.convert()
1706
1723
  with open(f'{output_folder_path}/{output_file_name}_full_integer_quant.tflite', 'wb') as w:
1707
1724
  w.write(tflite_model)
@@ -2128,14 +2145,24 @@ def main():
2128
2145
  'Otherwise, an error will occur during the -oiqt stage.'
2129
2146
  )
2130
2147
  parser.add_argument(
2131
- '-ioqd',
2132
- '--input_output_quant_dtype',
2148
+ '-iqd',
2149
+ '--input_quant_dtype',
2150
+ type=str,
2151
+ choices=['int8', 'uint8', 'float32'],
2152
+ default='int8',
2153
+ help=\
2154
+ 'Input dtypes when doing Full INT8 Quantization. \n' +
2155
+ '"int8"(default) or "uint8" or "float32"'
2156
+ )
2157
+ parser.add_argument(
2158
+ '-oqd',
2159
+ '--output_quant_dtype',
2133
2160
  type=str,
2134
- choices=['int8', 'uint8'],
2161
+ choices=['int8', 'uint8', 'float32'],
2135
2162
  default='int8',
2136
2163
  help=\
2137
- 'Input and Output dtypes when doing Full INT8 Quantization. \n' +
2138
- '"int8"(default) or "uint8"'
2164
+ 'Output dtypes when doing Full INT8 Quantization. \n' +
2165
+ '"int8"(default) or "uint8" or "float32"'
2139
2166
  )
2140
2167
  parser.add_argument(
2141
2168
  '-nuo',
@@ -2584,7 +2611,8 @@ def main():
2584
2611
  output_integer_quantized_tflite=args.output_integer_quantized_tflite,
2585
2612
  quant_type=args.quant_type,
2586
2613
  custom_input_op_name_np_data_path=custom_params,
2587
- input_output_quant_dtype=args.input_output_quant_dtype,
2614
+ input_quant_dtype=args.input_quant_dtype,
2615
+ output_quant_dtype=args.output_quant_dtype,
2588
2616
  not_use_onnxsim=args.not_use_onnxsim,
2589
2617
  not_use_opname_auto_generate=args.not_use_opname_auto_generate,
2590
2618
  batch_size=args.batch_size,
onnx2tf/ops/Conv.py CHANGED
@@ -14,6 +14,7 @@ from tensorflow.python.keras.layers import (
14
14
  )
15
15
  import onnx_graphsurgeon as gs
16
16
  from onnx2tf.utils.common_functions import (
17
+ get_replacement_parameter,
17
18
  get_constant_or_variable,
18
19
  get_weights_constant_or_variable,
19
20
  get_padding_as_op,
@@ -24,6 +25,7 @@ from onnx2tf.utils.common_functions import (
24
25
  transpose_with_flexing_deterrence,
25
26
  get_tf_model_inputs,
26
27
  onnx_tf_tensor_validation,
28
+ post_process_transpose,
27
29
  )
28
30
  from typing import Any, Dict
29
31
  from onnx2tf.utils.logging import *
@@ -33,6 +35,7 @@ INF_INDEX_VALUE: int = 4294967296
33
35
 
34
36
  @print_node_info
35
37
  @inverted_operation_enable_disable
38
+ @get_replacement_parameter
36
39
  def make_node(
37
40
  *,
38
41
  graph_node: gs.Node,
@@ -932,6 +935,15 @@ def make_node(
932
935
  dilations,
933
936
  )
934
937
 
938
+ # Post-process transpose
939
+ tf_layers_dict[graph_node_output.name]['tf_node'] = \
940
+ post_process_transpose(
941
+ value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
942
+ param_target='outputs',
943
+ param_name=graph_node.outputs[0].name,
944
+ **kwargs,
945
+ )
946
+
935
947
  # Generation of Debug Info
936
948
  tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
937
949
  make_tf_node_info(
@@ -5857,11 +5857,18 @@ def correction_process_for_accuracy_errors(
5857
5857
  onnx_output_same_shape_counts = collections.Counter(onnx_output_shape)
5858
5858
  if sum([1 if dim > 1 and cnt > 1 else 0 for dim, cnt in onnx_output_same_shape_counts.items()]) >= 1:
5859
5859
  # Generate dummy op
5860
- dummy_op = tf_func(
5861
- input_tensor_1,
5862
- input_tensor_2,
5863
- )
5864
- if dummy_op.shape != tf.TensorShape(None):
5860
+ dummy_op = None
5861
+ tensor_2_candidate_for_transpositions = list(itertools.permutations(range(len(input_tensor_2.shape))))
5862
+ for tensor_2_candidate_for_transposition in tensor_2_candidate_for_transpositions:
5863
+ try:
5864
+ dummy_op = tf_func(
5865
+ input_tensor_1,
5866
+ tf.transpose(a=input_tensor_2, perm=tensor_2_candidate_for_transposition),
5867
+ )
5868
+ break
5869
+ except Exception as ex:
5870
+ pass
5871
+ if dummy_op is not None and dummy_op.shape != tf.TensorShape(None):
5865
5872
  tf_output_shape = [dim if dim is not None else -1 for dim in dummy_op.shape]
5866
5873
  number_of_dim_other_than_1 = sum([1 if i != 1 else 0 for i in onnx_output_shape])
5867
5874
  # Processing continues only if there are two or more dimensions other than 1
@@ -5889,7 +5896,7 @@ def correction_process_for_accuracy_errors(
5889
5896
  tensor_1_candidate_for_transpositions = \
5890
5897
  obtaining_an_inverted_pattern_for_brute_force_validation(tensor_shape=validation_data_1.shape)
5891
5898
  tensor_2_candidate_for_transpositions = \
5892
- obtaining_an_inverted_pattern_for_brute_force_validation(tensor_shape=validation_data_2.shape)
5899
+ list(itertools.permutations(range(len(validation_data_2.shape))))
5893
5900
  for tensor_1_candidate_for_transposition in tensor_1_candidate_for_transpositions:
5894
5901
  for tensor_2_candidate_for_transposition in tensor_2_candidate_for_transpositions:
5895
5902
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: onnx2tf
3
- Version: 1.25.15
3
+ Version: 1.26.1
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -314,7 +314,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
314
314
  docker run --rm -it \
315
315
  -v `pwd`:/workdir \
316
316
  -w /workdir \
317
- ghcr.io/pinto0309/onnx2tf:1.25.15
317
+ ghcr.io/pinto0309/onnx2tf:1.26.1
318
318
 
319
319
  or
320
320
 
@@ -322,7 +322,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
322
322
  docker run --rm -it \
323
323
  -v `pwd`:/workdir \
324
324
  -w /workdir \
325
- docker.io/pinto0309/onnx2tf:1.25.15
325
+ docker.io/pinto0309/onnx2tf:1.26.1
326
326
 
327
327
  or
328
328
 
@@ -1529,7 +1529,8 @@ usage: onnx2tf
1529
1529
  [-oiqt]
1530
1530
  [-qt {per-channel,per-tensor}]
1531
1531
  [-cind INPUT_NAME NUMPY_FILE_PATH MEAN STD]
1532
- [-ioqd {int8,uint8}]
1532
+ [-iqd {int8,uint8,float32}]
1533
+ [-oqd {int8,uint8,float32}]
1533
1534
  [-nuo]
1534
1535
  [-nuonag]
1535
1536
  [-b BATCH_SIZE]
@@ -1686,9 +1687,13 @@ optional arguments:
1686
1687
  and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
1687
1688
  Otherwise, an error will occur during the -oiqt stage.
1688
1689
 
1689
- -ioqd {int8,uint8}, --input_output_quant_dtype {int8,uint8}
1690
- Input and Output dtypes when doing Full INT8 Quantization.
1691
- "int8"(default) or "uint8"
1690
+ -iqd {int8,uint8,float32}, --input_quant_dtype {int8,uint8,float32}
1691
+ Input dtypes when doing Full INT8 Quantization.
1692
+ "int8"(default) or "uint8" or "float32"
1693
+
1694
+ -oqd {int8,uint8,float32}, --output_quant_dtype {int8,uint8,float32}
1695
+ Output dtypes when doing Full INT8 Quantization.
1696
+ "int8"(default) or "uint8" or "float32"
1692
1697
 
1693
1698
  -nuo, --not_use_onnxsim
1694
1699
  No optimization by onnx-simplifier is performed.
@@ -2008,7 +2013,8 @@ convert(
2008
2013
  output_integer_quantized_tflite: Optional[bool] = False,
2009
2014
  quant_type: Optional[str] = 'per-channel',
2010
2015
  custom_input_op_name_np_data_path: Optional[List] = None,
2011
- input_output_quant_dtype: Optional[str] = 'int8',
2016
+ input_quant_dtype: Optional[str] = 'int8',
2017
+ output_quant_dtype: Optional[str] = 'int8',
2012
2018
  not_use_onnxsim: Optional[bool] = False,
2013
2019
  not_use_opname_auto_generate: Optional[bool] = False,
2014
2020
  batch_size: Union[int, NoneType] = None,
@@ -2172,9 +2178,13 @@ convert(
2172
2178
  and {input_op_name}, {numpy_file_path}, {mean}, and {std} must all be entered.
2173
2179
  Otherwise, an error will occur during the -oiqt stage.
2174
2180
 
2175
- input_output_quant_dtype: Optional[str]
2176
- Input and Output dtypes when doing Full INT8 Quantization.
2177
- "int8"(default) or "uint8"
2181
+ input_quant_dtype: Optional[str]
2182
+ Input dtypes when doing Full INT8 Quantization.
2183
+ "int8"(default) or "uint8" or "float32"
2184
+
2185
+ output_quant_dtype: Optional[str]
2186
+ Output dtypes when doing Full INT8 Quantization.
2187
+ "int8"(default) or "uint8" or "float32"
2178
2188
 
2179
2189
  not_use_onnxsim: Optional[bool]
2180
2190
  No optimization by onnx-simplifier is performed.
@@ -2600,7 +2610,7 @@ Do not submit an issue that only contains an amount of information that cannot b
2600
2610
  |14|Unsqueeze|1. "param_target": "inputs"<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Unsqueeze operation with the perm specified as pre-processing.<br>2. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Unsqueeze operation with the perm specified as post-processing.<br>3. "param_target": "op"<br>`new_shape`: Specifies directly the shape after Unsqueeze processing.<br>{<br>&nbsp;&nbsp;"op_name": "/backbone/backbone.1/Unsqueeze_1",<br>&nbsp;&nbsp;"param_target": "op",<br>&nbsp;&nbsp;"new_shape": [1,15,15,1]<br>}|
2601
2611
  |15|Reshape|1. "param_target": "inputs"<br>`values`: Value of `shape`<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Reshape operation with the perm specified as pre-processing.<br>2. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Reshape operation with the perm specified as post-processing.|
2602
2612
  |16|Resize|1. "param_target": "attributes"<br>`coordinate_transformation_mode`: Value of `coordinate_transformation_mode`<br>`extrapolation_value`: Value of `extrapolation_value`<br>`mode`: Value of `mode`<br>2. "param_target": "inputs"<br>`values`: Value of `roi` or `scales` or `sizes`. `scales`=`[scale_h,scale_w]`,`sizes`=`[h,w]`<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Resize operation with the perm specified as pre-processing.<br>3. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Resize operation with the perm specified as post-processing.|
2603
- |17|Slice|`Slice` implements special replacements separately ignore all automatic conversions and generate `tf.strided_slice` directly by specifying all parameters of `tf.strided_slice` directly.<br>https://www.tensorflow.org/api_docs/python/tf/strided_slice<br>See [replace_slice.json](https://github.com/PINTO0309/onnx2tf/blob/main/replace_slice.json) for a sample description.<br>![20221221222956](https://user-images.githubusercontent.com/33194443/208916732-9987a69a-83a7-4a29-8b77-d97b1812d59c.png)<br>1. "param_target": "op"<br>`begin`: Value of `begin`<br>`end`: Value of `end`<br>`strides`: Value of `strides`<br>`begin_mask`: Value of `begin_mask`<br>`end_mask`: Value of `end_mask`<br>`ellipsis_mask`: Value of `ellipsis_mask`<br>`new_axis_mask`: Value of `new_axis_mask`<br>`shrink_axis_mask`: Value of `shrink_axis_mask`<br>{<br>&nbsp;&nbsp;"op_name": "/Slice",<br>&nbsp;&nbsp;"param_target": "op",<br>&nbsp;&nbsp;"begin": [0,0,1,0],<br>&nbsp;&nbsp;"end": [0,0,0,0],<br>&nbsp;&nbsp;"end_mask": 15<br>}|
2613
+ |17|Slice|`Slice` implements special replacements separately ignore all automatic conversions and generate `tf.strided_slice` directly by specifying all parameters of `tf.strided_slice` directly.<br>https://www.tensorflow.org/api_docs/python/tf/strided_slice<br>See [json_samples/replace_slice.json](https://github.com/PINTO0309/onnx2tf/blob/main/json_samples/replace_slice.json) for a sample description.<br>![20221221222956](https://user-images.githubusercontent.com/33194443/208916732-9987a69a-83a7-4a29-8b77-d97b1812d59c.png)<br>1. "param_target": "op"<br>`begin`: Value of `begin`<br>`end`: Value of `end`<br>`strides`: Value of `strides`<br>`begin_mask`: Value of `begin_mask`<br>`end_mask`: Value of `end_mask`<br>`ellipsis_mask`: Value of `ellipsis_mask`<br>`new_axis_mask`: Value of `new_axis_mask`<br>`shrink_axis_mask`: Value of `shrink_axis_mask`<br>{<br>&nbsp;&nbsp;"op_name": "/Slice",<br>&nbsp;&nbsp;"param_target": "op",<br>&nbsp;&nbsp;"begin": [0,0,1,0],<br>&nbsp;&nbsp;"end": [0,0,0,0],<br>&nbsp;&nbsp;"end_mask": 15<br>}|
2604
2614
  |18|Softmax|1. "param_target": "attributes"<br>`axis`: Value of `axis`. The transpositions corresponding to the specified axis are extrapolated before and after `Softmax`.<br>2. "param_target": "inputs"<br>`values`: Value of `tensor`|
2605
2615
  |19|Split|1. "param_target": "inputs"<br>`values`: Value of `split`<br>2. "param_target": "attributes"<br>`axis`: Value of `axis`.<br>`num_outputs`: Value of `num_outputs`.|
2606
2616
  |20|Sub|1. "param_target": "inputs"<br>`values`: Value of `input`<br>`pre_process_transpose_perm`: Transpose is applied to the tensor before the Sub operation with the perm specified as pre-processing.<br>2. "param_target": "outputs"<br>`post_process_transpose_perm`: Transpose is applied to the tensor after the Sub operation with the perm specified as post-processing.|
@@ -1,6 +1,6 @@
1
- onnx2tf/__init__.py,sha256=opVL1OZ6Gid56hPg_xSsIoFbi2YhdQYGiyMz8JpmmUI,67
1
+ onnx2tf/__init__.py,sha256=VdUE2DxKBM0W8yDa6byr11bxjYw_QKVdSvq071Ap8Rs,66
2
2
  onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
3
- onnx2tf/onnx2tf.py,sha256=v7juKPrl_0h9SQyck-Ol4B59QXWSSePVC3AUu3fmvII,122750
3
+ onnx2tf/onnx2tf.py,sha256=vUMz_U8PQ0MQE6a8BBmOmsiFKAE-TMZPE1H_fnn0UCo,123824
4
4
  onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
5
5
  onnx2tf/ops/Acos.py,sha256=Fo8YkFKuWq8Fi2xUrBdKcAH1yJ8r5pjSD0wgLttTNdk,4003
6
6
  onnx2tf/ops/Acosh.py,sha256=ATQj2cT5JS_mTfXi0kXqJ1yzSZu5J0zHA5VjV3j7uKY,3588
@@ -26,7 +26,7 @@ onnx2tf/ops/Concat.py,sha256=CKfJbiAwP7h9sFFVyueHJCbwMkUo3NXqkTuRc8v7Tw8,31215
26
26
  onnx2tf/ops/ConcatFromSequence.py,sha256=z8pNmGQRGq9cxWORW330NZS_0zsmhFudLswMyPn8AXU,3086
27
27
  onnx2tf/ops/Constant.py,sha256=BNZLzNI4rK9kXgVWwD-2RFsDsH7mMy7AY2JSgTNXIWk,10696
28
28
  onnx2tf/ops/ConstantOfShape.py,sha256=6eYm-niow-6fHVEyNyi81BdrVe3IbcdazCp2nySWExA,2331
29
- onnx2tf/ops/Conv.py,sha256=VU6BeEIOyr2DebGU0QWcBwyqzIrje5vur_GdwZEvxjs,38380
29
+ onnx2tf/ops/Conv.py,sha256=2BPHrEFWSWONDFp4Mrs8V426ybKANxUmjFscM8PXXD4,38791
30
30
  onnx2tf/ops/ConvInteger.py,sha256=Njw7nJhfZcbo4ofC3UMry5EWxs2lRq1X370vSHIZpXw,27095
31
31
  onnx2tf/ops/ConvTranspose.py,sha256=C7CR6m3kz0MtUBdtWrrKWZbZL7tJpGXl7Nkn3DRiEaA,15410
32
32
  onnx2tf/ops/Cos.py,sha256=0v5ZJZRzrswVEObyxf4f0RvnWMWZA4uCEdoeq_VE31s,3608
@@ -185,13 +185,13 @@ onnx2tf/ops/_Loop.py,sha256=eo5sNfrfOnKV6_I737AWsM5LJTY9DVOxQEvhanxtP4g,11322
185
185
  onnx2tf/ops/__Loop.py,sha256=ClwMcbNS4hqUtW_pzwjMa9Cqg7ONvz9aplke55A0uJ0,19704
186
186
  onnx2tf/ops/__init__.py,sha256=jnmUWWa-3dHzBZV9bmPzXu6eoz2dumJTzO7i8JdcgSM,25
187
187
  onnx2tf/utils/__init__.py,sha256=E9FM9He68VIASDnYp-OrxvHFVn55GzWqw2OEkCqn1zg,27
188
- onnx2tf/utils/common_functions.py,sha256=gCpAe11EcX-gYOMu68saIpIj20pDmLld-6jybnc_0aY,240691
188
+ onnx2tf/utils/common_functions.py,sha256=35vTJfectN2lPwsVGaka_wzpZpCLJeQDmn327oVj4FA,241128
189
189
  onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
190
190
  onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
191
- onnx2tf-1.25.15.dist-info/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
192
- onnx2tf-1.25.15.dist-info/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
193
- onnx2tf-1.25.15.dist-info/METADATA,sha256=a3NRQOOxqxrY48M3FAnq6UiitZ3iXaX_zyLHhd02buQ,146125
194
- onnx2tf-1.25.15.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
195
- onnx2tf-1.25.15.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
196
- onnx2tf-1.25.15.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
197
- onnx2tf-1.25.15.dist-info/RECORD,,
191
+ onnx2tf-1.26.1.dist-info/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
192
+ onnx2tf-1.26.1.dist-info/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
193
+ onnx2tf-1.26.1.dist-info/METADATA,sha256=h7Qt9w1jz2cYvjyl_nmXcrzhsPmbovld2ZBpxEeu_J4,146536
194
+ onnx2tf-1.26.1.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
195
+ onnx2tf-1.26.1.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
196
+ onnx2tf-1.26.1.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
197
+ onnx2tf-1.26.1.dist-info/RECORD,,