onnx2tf 1.27.0__py3-none-any.whl → 1.27.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
onnx2tf/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.27.0'
3
+ __version__ = '1.27.2'
onnx2tf/ops/Add.py CHANGED
@@ -130,9 +130,9 @@ def make_node(
130
130
  )
131
131
 
132
132
  # Workaround for ConvInteger
133
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
133
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
134
134
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
135
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
135
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
136
136
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
137
137
 
138
138
  # Disable unnecessary Transpose
onnx2tf/ops/Cast.py CHANGED
@@ -96,10 +96,16 @@ def make_node(
96
96
  tf_layers_dict[graph_node_output.name].pop('nhwc')
97
97
 
98
98
  # Suppression of FlexCast generation
99
- # Float32 -> Float64
99
+ # Float64 -> Float32
100
+ # Float16 -> Float32
100
101
  if input_tensor.dtype == tf.float32 \
101
102
  and to == tf.float64:
102
103
  to = tf.float32
104
+ elif isinstance(graph_node.inputs[0], gs.Variable) \
105
+ and hasattr(graph_node.inputs[0], "dtype") \
106
+ and graph_node.inputs[0].dtype == np.float32 \
107
+ and to == tf.float16:
108
+ to = tf.float32
103
109
 
104
110
  # Generation of TF OP
105
111
  tf_layers_dict[graph_node_output.name]['tf_node'] = \
onnx2tf/ops/Div.py CHANGED
@@ -116,9 +116,9 @@ def make_node(
116
116
  )
117
117
 
118
118
  # Workaround for ConvInteger
119
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
119
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
120
120
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
121
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
121
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
122
122
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
123
123
 
124
124
  # Disable unnecessary Transpose
onnx2tf/ops/MatMul.py CHANGED
@@ -128,6 +128,12 @@ def make_node(
128
128
  output_dtype = NUMPY_DTYPES_TO_TF_DTYPES[dtype] \
129
129
  if isinstance(dtype, np.dtype) else dtype
130
130
 
131
+ # Workaround for Float16
132
+ if input_tensor_1.dtype == tf.float32 and output_dtype in [tf.int32, tf.int64, tf.float16]:
133
+ output_dtype = tf.float32
134
+ elif output_dtype and input_tensor_2.dtype == tf.float32:
135
+ output_dtype = tf.float32
136
+
131
137
  # Shape Unmatch Error Mitigation Measures
132
138
  # Search for and transpose shapes that do not cause shape unmatch errors
133
139
  min_abs_err = sys.maxsize
onnx2tf/ops/Mod.py CHANGED
@@ -115,9 +115,9 @@ def make_node(
115
115
  )
116
116
 
117
117
  # Workaround for ConvInteger
118
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
118
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
119
119
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
120
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
120
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
121
121
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
122
122
 
123
123
  # Disable unnecessary Transpose
onnx2tf/ops/Mul.py CHANGED
@@ -120,9 +120,9 @@ def make_node(
120
120
  )
121
121
 
122
122
  # Workaround for ConvInteger
123
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
123
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
124
124
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
125
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
125
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
126
126
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
127
127
 
128
128
  # Disable unnecessary Transpose
onnx2tf/ops/Split.py CHANGED
@@ -158,15 +158,56 @@ def make_node(
158
158
  if idx == axis:
159
159
  end_.append(split_idx + 1)
160
160
  elif input_tensor_shape[idx] is None:
161
- end_.append(-1)
161
+ end_.append(0)
162
162
  else:
163
163
  end_.append(input_tensor_shape[idx])
164
164
 
165
+ begin_mask_ = np.sum([2**idx if idx != axis else 0 for idx in range(input_tensor_rank)])
166
+ end_mask_ = np.sum([2**idx if idx != axis else 0 for idx in range(input_tensor_rank)])
167
+
165
168
  splited_tensors.append(
166
169
  tf.strided_slice(
167
170
  input_=input_tensor,
168
171
  begin=begin_,
169
172
  end=end_,
173
+ begin_mask=begin_mask_,
174
+ end_mask=end_mask_,
175
+ )
176
+ )
177
+ elif isinstance(split, np.ndarray) \
178
+ and len(list(split)) > 1 \
179
+ and np.prod(split) != 1 \
180
+ and np.all(split == split[0]) \
181
+ and isinstance(input_tensor_shape[axis], int) \
182
+ and input_tensor_shape[axis] == np.sum(split):
183
+ # strided_slice - Slice everything in same size
184
+ # Suppression of FlexSplitV generation
185
+ # https://github.com/PINTO0309/onnx2tf/issues/751
186
+ splited_tensors = []
187
+ split_size = split[0]
188
+ for split_idx in range(len(list(split))):
189
+ begin_ = [
190
+ split_size * split_idx if idx == axis else 0 for idx in range(input_tensor_rank)
191
+ ]
192
+ end_ = []
193
+ for idx in range(input_tensor_rank):
194
+ if idx == axis:
195
+ end_.append(split_size * split_idx + split_size)
196
+ elif input_tensor_shape[idx] is None:
197
+ end_.append(0)
198
+ else:
199
+ end_.append(input_tensor_shape[idx])
200
+
201
+ begin_mask_ = np.sum([2**idx if idx != axis else 0 for idx in range(input_tensor_rank)])
202
+ end_mask_ = np.sum([2**idx if idx != axis else 0 for idx in range(input_tensor_rank)])
203
+
204
+ splited_tensors.append(
205
+ tf.strided_slice(
206
+ input_=input_tensor,
207
+ begin=begin_,
208
+ end=end_,
209
+ begin_mask=begin_mask_,
210
+ end_mask=end_mask_,
170
211
  )
171
212
  )
172
213
  elif isinstance(split, np.ndarray) \
onnx2tf/ops/Sub.py CHANGED
@@ -114,9 +114,9 @@ def make_node(
114
114
  )
115
115
 
116
116
  # Workaround for ConvInteger
117
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
117
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
118
118
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
119
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
119
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
120
120
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
121
121
 
122
122
  # Disable unnecessary Transpose
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.27.0
3
+ Version: 1.27.2
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -18,6 +18,7 @@ Dynamic: description
18
18
  Dynamic: description-content-type
19
19
  Dynamic: home-page
20
20
  Dynamic: license
21
+ Dynamic: license-file
21
22
  Dynamic: platform
22
23
  Dynamic: requires-python
23
24
  Dynamic: summary
@@ -280,7 +281,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
280
281
 
281
282
  ## Environment
282
283
  - Linux / Windows
283
- - onnx==1.16.1
284
+ - onnx==1.17.0
284
285
  - onnxruntime==1.18.1
285
286
  - onnx-simplifier==0.4.33 or 0.4.30 `(onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] (op_type:Slice, node name: /xxxx/Slice): [ShapeInferenceError] Inferred shape and existing shape differ in rank: (x) vs (y))`
286
287
  - onnx_graphsurgeon
@@ -333,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
333
334
  docker run --rm -it \
334
335
  -v `pwd`:/workdir \
335
336
  -w /workdir \
336
- ghcr.io/pinto0309/onnx2tf:1.27.0
337
+ ghcr.io/pinto0309/onnx2tf:1.27.2
337
338
 
338
339
  or
339
340
 
@@ -341,7 +342,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
341
342
  docker run --rm -it \
342
343
  -v `pwd`:/workdir \
343
344
  -w /workdir \
344
- docker.io/pinto0309/onnx2tf:1.27.0
345
+ docker.io/pinto0309/onnx2tf:1.27.2
345
346
 
346
347
  or
347
348
 
@@ -1512,7 +1513,7 @@ See: https://github.com/tensorflow/tfjs/tree/master/tfjs-converter
1512
1513
  When converting to CoreML, process as follows. The `-k` option is for conversion while maintaining the input channel order in ONNX's NCHW format.
1513
1514
 
1514
1515
  ```bash
1515
- pip install coremltools
1516
+ pip install coremltools==8.2
1516
1517
 
1517
1518
  onnx2tf -i mobilenetv2-12.onnx -k input -ois input:1,3,224,224 -osd
1518
1519
  ```
@@ -1525,7 +1526,7 @@ model = ct.convert(
1525
1526
  model=FOLDER_PATH,
1526
1527
  source='tensorflow',
1527
1528
  )
1528
- model.save(f'{FOLDER_PATH}/model.mlmodel')
1529
+ model.save(f'{FOLDER_PATH}/model.mlpackage')
1529
1530
  ```
1530
1531
 
1531
1532
  See: https://github.com/apple/coremltools
@@ -1,10 +1,10 @@
1
- onnx2tf/__init__.py,sha256=lHhAWe8iqi6znmPVuWY05CVVp6042_wHUvkziLiOvA8,66
1
+ onnx2tf/__init__.py,sha256=hu7TZkPbwUCwf8zZcrp1NGR2aMUiIbLW_oqR1se7KzI,66
2
2
  onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
3
3
  onnx2tf/onnx2tf.py,sha256=IEnfIs3Dy8Y5F3iJ4HY7bWkn3QuB6lq_gHa1q5E3tMI,124745
4
4
  onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
5
5
  onnx2tf/ops/Acos.py,sha256=Fo8YkFKuWq8Fi2xUrBdKcAH1yJ8r5pjSD0wgLttTNdk,4003
6
6
  onnx2tf/ops/Acosh.py,sha256=ATQj2cT5JS_mTfXi0kXqJ1yzSZu5J0zHA5VjV3j7uKY,3588
7
- onnx2tf/ops/Add.py,sha256=JnoEYKLFI2Uh7G1MTOvMxGPLIgIJ5V1VAZGKkVa_XzU,12271
7
+ onnx2tf/ops/Add.py,sha256=pgJTnV1wZZk3mRaVxxezVkArfmlqlk74DCMZDm6VRJc,12295
8
8
  onnx2tf/ops/And.py,sha256=_ubtWa0r8-60x__pS7MEMil1DfBqxiUsk66yRCYS4KY,4591
9
9
  onnx2tf/ops/ArgMax.py,sha256=F3PV4EchYQgH1GATJybVGnmY9sGvZkgxCHbNCue9Qns,7278
10
10
  onnx2tf/ops/ArgMin.py,sha256=32r7I8AYLQOKTPOOPX1AZwiPnQfkrFB0Le16vdJ1yBs,4225
@@ -16,7 +16,7 @@ onnx2tf/ops/AveragePool.py,sha256=p9R4k87FO1yKZMQ699FIftXGUNKxb5yu0vYfzPlpsMA,14
16
16
  onnx2tf/ops/BatchNormalization.py,sha256=_hlf2-5-j3MCJHEoE2oMNQ8YhCm7ad9h2fwPpTo3i7g,26624
17
17
  onnx2tf/ops/Bernoulli.py,sha256=PM0xS0n1q4bnT_9PnbcKW8_Qj8dJYYBQR8kb2X-wIp4,3670
18
18
  onnx2tf/ops/BitShift.py,sha256=a28_E9hwA8yfjvtsrSKCZCeeMPB5RBQbjB3cmaNGN6k,3861
19
- onnx2tf/ops/Cast.py,sha256=iVSqSm1l_MXHtxUBRdQPJlzOTNRHcqMAPKi_LWaPYuc,4357
19
+ onnx2tf/ops/Cast.py,sha256=M0LRClHPgZ_8NubwME6ipKrAqcY9aKC5ihQXCkTkNkM,4601
20
20
  onnx2tf/ops/Ceil.py,sha256=0-jaueltpQSwpOIDUmy9DdTy98qN-XimYu5cHVPnUIs,3586
21
21
  onnx2tf/ops/Celu.py,sha256=9g7WNKo4G_jMtUXcoOfpNdLYqEsuyXLPkkyQZxDuL4U,3853
22
22
  onnx2tf/ops/Clip.py,sha256=K3Pgt9BXl5_rzg6s-kPFmwElL5COsvolRY1BUTo7UWw,8753
@@ -35,7 +35,7 @@ onnx2tf/ops/CumSum.py,sha256=SYKmD5r9Cm9gsCkJPNFoHigvvBO1PmRYRrVmn1HE78o,3954
35
35
  onnx2tf/ops/DepthToSpace.py,sha256=BiyBZ88dmXQAkZ5Jc-Ddo-5Kn8dRYCnoik_XnOFzqXc,14449
36
36
  onnx2tf/ops/DequantizeLinear.py,sha256=cNbGw4ITg_BsrXYkSb7fD05XEkQgz7v__-StQtvIvB4,5220
37
37
  onnx2tf/ops/Det.py,sha256=kxuHkpv_KNHkof0uBv2RLtr3G1uA76MFHyCiCYCBXkw,3590
38
- onnx2tf/ops/Div.py,sha256=fIN90mp7ByeKWEVDVENvCMja5Qc83jstayNw9hoKP4Y,16224
38
+ onnx2tf/ops/Div.py,sha256=NyAsvCxI41hyBX_kiCEILHY6QQkas_o4wRY8zkDUiwk,16248
39
39
  onnx2tf/ops/Dropout.py,sha256=KZKVqlnbq875awsNvJaQRvkO3XgqxeAmjbikXymRCtA,5860
40
40
  onnx2tf/ops/DynamicQuantizeLinear.py,sha256=UGmN2nXBBQHXcNlorEQfnKDnnoOadt4TNzXox-Xki2U,4759
41
41
  onnx2tf/ops/Einsum.py,sha256=YBw0JmSglOVVje80RqmqIjgsc7V5SnYS6s1Ysa2NUPA,12369
@@ -82,7 +82,7 @@ onnx2tf/ops/LessOrEqual.py,sha256=9Lc8qaYUPVC6yZoQluNqcdHnvpUbfWBOI4Ow38RRAJo,45
82
82
  onnx2tf/ops/Log.py,sha256=UZebF3SGq85BnoPgYyN2j-zzFRp67fJnYPNyu33W55o,3582
83
83
  onnx2tf/ops/LogSoftmax.py,sha256=j2nhYY7__8ViLFJVLA5tS98QEvGS1gTIW0QCdnZWUPQ,3923
84
84
  onnx2tf/ops/LpNormalization.py,sha256=Uu15HgxFNXb6gNMgdTJyf0SLPaLbcbkOYqY_4hMBxNA,3153
85
- onnx2tf/ops/MatMul.py,sha256=oH-VvMn-RTozk3E8zcFE2-T78csDIygtMksVX30o4MY,18804
85
+ onnx2tf/ops/MatMul.py,sha256=95HrWr3Dt6BLqx_zqm3WXBw_WzrWLObYVgz4K1yrhqE,19060
86
86
  onnx2tf/ops/MatMulInteger.py,sha256=qHqzdJNI9SeJDbW8pR90baYCdGN6FdOez4hi9EzwXoc,6538
87
87
  onnx2tf/ops/Max.py,sha256=w5nMciO_6ApYUobHuwMGuS3xhuza7eSvKDRhvMPgAuo,3256
88
88
  onnx2tf/ops/MaxPool.py,sha256=_JC4eqBTh-qLkZCMG8RZhthRZ8D2d821zaFMWeGMEWc,15775
@@ -92,8 +92,8 @@ onnx2tf/ops/MeanVarianceNormalization.py,sha256=Ne53jlDgAJZ9yhzKOWR-0LnjDdM-fg7D
92
92
  onnx2tf/ops/MelWeightMatrix.py,sha256=MyYFUTxz2wFVqNx3Dhlro0ktg9kxtEq8sGFmHICDZsI,5453
93
93
  onnx2tf/ops/Min.py,sha256=dK3i115xYh6NusQtGfswEGYBg9MBc_g-edafLgvq4TQ,3356
94
94
  onnx2tf/ops/Mish.py,sha256=LEg5MXBLLIzwxmsudC1zTA_yq7drVY_DMCB8lHBCA-8,3546
95
- onnx2tf/ops/Mod.py,sha256=K6oH5Q4I5JWh8DFp8T1CSdL4WUJCexYdfqTy5iceJxo,9999
96
- onnx2tf/ops/Mul.py,sha256=p75MHWbJSo6jLarFzmfK6oQREar4ntlFGqn-U7MzY8s,15962
95
+ onnx2tf/ops/Mod.py,sha256=Y7kqCEOLqof4zVszJslQayt6COyU-MS5qKLHAYOyxmc,10023
96
+ onnx2tf/ops/Mul.py,sha256=0hOf2O8ktRpIi4eOMfLGdwKl-yACFyGO3nU_s_XXUIE,15986
97
97
  onnx2tf/ops/Multinomial.py,sha256=0HQC76IA3AvRsUx9RS0S__nIfEmPuvIaDfSt8bns4FU,3158
98
98
  onnx2tf/ops/Neg.py,sha256=vu2ExVXyGpggAM_DNPeZj9QFeUyqhn5XmJnDlPJFsQU,4219
99
99
  onnx2tf/ops/NonMaxSuppression.py,sha256=nHeiX5eMGQAq_51KoljNZGlZddJ89Oe7Yfe33xLhl6M,15763
@@ -162,12 +162,12 @@ onnx2tf/ops/Softmax.py,sha256=CEnHcSm25v1QC4QVDg4fz1NooYY1v-Uq4GORd8dnnr8,14773
162
162
  onnx2tf/ops/Softplus.py,sha256=R44YMo8G2Ig15jBO6T2VOI6RhpUmjD70qvSCXFylU-Q,3605
163
163
  onnx2tf/ops/Softsign.py,sha256=2ZdKH3KVHZXDzyO7S8f-O_aqRugurbRxd1i2g_fwCos,3600
164
164
  onnx2tf/ops/SpaceToDepth.py,sha256=rWtPQNm2rErYs20gQyz-tFYsImAIUBGtdvfMVkJg5bo,2809
165
- onnx2tf/ops/Split.py,sha256=ueBG52Z_wWD2xZLXgCPB11VbYz6pQDvhATqylomEMY8,8946
165
+ onnx2tf/ops/Split.py,sha256=ukm7QZmSwYwUwGLbVGsOiCEB3YfrFMl0cozn1kwgCv0,10728
166
166
  onnx2tf/ops/SplitToSequence.py,sha256=BS_JEd7DC7vuPfs5oRRW774mtlK--kqf9DJUalv-Agk,5062
167
167
  onnx2tf/ops/Sqrt.py,sha256=-xE8Tk_6unSR56k9g3R46lML4Nht5kQwqJT0JYkn5ko,3585
168
168
  onnx2tf/ops/Squeeze.py,sha256=FLIt2qjWh1IJyti1c4YHuepH2Fkxt40rnEKszzmwsnE,7980
169
169
  onnx2tf/ops/StringNormalizer.py,sha256=lyjUfhvZiIUZhLptI0rW_xwpFBJ6XuhDCyvCKNh-ogA,5214
170
- onnx2tf/ops/Sub.py,sha256=_fSx-wrpoQAzqShFEU3QMZb-E3FdfPdfc9k-2Xt0cgc,10988
170
+ onnx2tf/ops/Sub.py,sha256=JCUWNmRLrwJEB8_0MPRTzmZ4KAV_HLXNivUd_jNqPQI,11012
171
171
  onnx2tf/ops/Sum.py,sha256=wtI0SbGuNFxkLskBk68ZhOAg3XyrIx-9xGYy1GZCVSo,3073
172
172
  onnx2tf/ops/Tan.py,sha256=Ncig8clGvY7GWshqxRDRdcxjcbf_HTKGdpDw5ValrKI,3582
173
173
  onnx2tf/ops/Tanh.py,sha256=PIQUvxS_AIDufblC2vc573nse2UCRA9z5yWd7kB-51s,3585
@@ -188,10 +188,10 @@ onnx2tf/utils/__init__.py,sha256=E9FM9He68VIASDnYp-OrxvHFVn55GzWqw2OEkCqn1zg,27
188
188
  onnx2tf/utils/common_functions.py,sha256=HTDca3DGXB3xvc1S50RNscgB57TCiq4yC5Nrafs6ka4,241430
189
189
  onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
190
190
  onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
191
- onnx2tf-1.27.0.dist-info/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
192
- onnx2tf-1.27.0.dist-info/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
193
- onnx2tf-1.27.0.dist-info/METADATA,sha256=Qjvfs5wxntQGejt30wlRjfCY1PA2gtg8d2UOLJ_8GEk,147683
194
- onnx2tf-1.27.0.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
195
- onnx2tf-1.27.0.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
196
- onnx2tf-1.27.0.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
197
- onnx2tf-1.27.0.dist-info/RECORD,,
191
+ onnx2tf-1.27.2.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
192
+ onnx2tf-1.27.2.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
193
+ onnx2tf-1.27.2.dist-info/METADATA,sha256=Vhy11gd-Dvr86ENsHvxyHUhVQGHzwEY6PHvY8KYmVqM,147712
194
+ onnx2tf-1.27.2.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
195
+ onnx2tf-1.27.2.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
196
+ onnx2tf-1.27.2.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
197
+ onnx2tf-1.27.2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (76.0.0)
2
+ Generator: setuptools (78.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5