onnx2tf 1.26.9__py3-none-any.whl → 1.27.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
onnx2tf/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.26.9'
3
+ __version__ = '1.27.1'
onnx2tf/ops/Add.py CHANGED
@@ -130,9 +130,9 @@ def make_node(
130
130
  )
131
131
 
132
132
  # Workaround for ConvInteger
133
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
133
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
134
134
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
135
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
135
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
136
136
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
137
137
 
138
138
  # Disable unnecessary Transpose
onnx2tf/ops/Cast.py CHANGED
@@ -96,10 +96,16 @@ def make_node(
96
96
  tf_layers_dict[graph_node_output.name].pop('nhwc')
97
97
 
98
98
  # Suppression of FlexCast generation
99
- # Float32 -> Float64
99
+ # Float64 -> Float32
100
+ # Float16 -> Float32
100
101
  if input_tensor.dtype == tf.float32 \
101
102
  and to == tf.float64:
102
103
  to = tf.float32
104
+ elif isinstance(graph_node.inputs[0], gs.Variable) \
105
+ and hasattr(graph_node.inputs[0], "dtype") \
106
+ and graph_node.inputs[0].dtype == np.float32 \
107
+ and to == tf.float16:
108
+ to = tf.float32
103
109
 
104
110
  # Generation of TF OP
105
111
  tf_layers_dict[graph_node_output.name]['tf_node'] = \
onnx2tf/ops/Div.py CHANGED
@@ -116,9 +116,9 @@ def make_node(
116
116
  )
117
117
 
118
118
  # Workaround for ConvInteger
119
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
119
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
120
120
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
121
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
121
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
122
122
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
123
123
 
124
124
  # Disable unnecessary Transpose
onnx2tf/ops/MatMul.py CHANGED
@@ -128,6 +128,12 @@ def make_node(
128
128
  output_dtype = NUMPY_DTYPES_TO_TF_DTYPES[dtype] \
129
129
  if isinstance(dtype, np.dtype) else dtype
130
130
 
131
+ # Workaround for Float16
132
+ if input_tensor_1.dtype == tf.float32 and output_dtype in [tf.int32, tf.int64, tf.float16]:
133
+ output_dtype = tf.float32
134
+ elif output_dtype and input_tensor_2.dtype == tf.float32:
135
+ output_dtype = tf.float32
136
+
131
137
  # Shape Unmatch Error Mitigation Measures
132
138
  # Search for and transpose shapes that do not cause shape unmatch errors
133
139
  min_abs_err = sys.maxsize
onnx2tf/ops/Mod.py CHANGED
@@ -115,9 +115,9 @@ def make_node(
115
115
  )
116
116
 
117
117
  # Workaround for ConvInteger
118
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
118
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
119
119
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
120
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
120
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
121
121
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
122
122
 
123
123
  # Disable unnecessary Transpose
onnx2tf/ops/Mul.py CHANGED
@@ -120,9 +120,9 @@ def make_node(
120
120
  )
121
121
 
122
122
  # Workaround for ConvInteger
123
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
123
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
124
124
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
125
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
125
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
126
126
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
127
127
 
128
128
  # Disable unnecessary Transpose
onnx2tf/ops/Sub.py CHANGED
@@ -114,9 +114,9 @@ def make_node(
114
114
  )
115
115
 
116
116
  # Workaround for ConvInteger
117
- if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64]:
117
+ if input_tensor_1.dtype == tf.float32 and input_tensor_2.dtype in [tf.int32, tf.int64, tf.float16]:
118
118
  input_tensor_2 = tf.cast(input_tensor_2, dtype=tf.float32)
119
- elif input_tensor_1.dtype in [tf.int32, tf.int64] and input_tensor_2.dtype == tf.float32:
119
+ elif input_tensor_1.dtype in [tf.int32, tf.int64, tf.float16] and input_tensor_2.dtype == tf.float32:
120
120
  input_tensor_1 = tf.cast(input_tensor_1, dtype=tf.float32)
121
121
 
122
122
  # Disable unnecessary Transpose
@@ -16,6 +16,7 @@ import subprocess
16
16
  import numpy as np
17
17
  np.random.seed(0)
18
18
  import tensorflow as tf
19
+ from ai_edge_litert.interpreter import Interpreter
19
20
  import tf_keras
20
21
  from tensorflow.python.keras.layers import Lambda
21
22
  from tensorflow.python.keras.utils import conv_utils
@@ -4143,8 +4144,7 @@ def weights_export(
4143
4144
  Path to file in hdf5 format to save the extracted weights
4144
4145
  """
4145
4146
  import h5py
4146
- from tensorflow.lite.python import interpreter as interpreter_wrapper
4147
- interpreter = interpreter_wrapper.Interpreter(
4147
+ interpreter = Interpreter(
4148
4148
  model_path=extract_target_tflite_file_path,
4149
4149
  )
4150
4150
  interpreter.allocate_tensors()
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.26.9
3
+ Version: 1.27.1
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -18,6 +18,7 @@ Dynamic: description
18
18
  Dynamic: description-content-type
19
19
  Dynamic: home-page
20
20
  Dynamic: license
21
+ Dynamic: license-file
21
22
  Dynamic: platform
22
23
  Dynamic: requires-python
23
24
  Dynamic: summary
@@ -280,14 +281,16 @@ Video speed is adjusted approximately 50 times slower than actual speed.
280
281
 
281
282
  ## Environment
282
283
  - Linux / Windows
283
- - onnx==1.16.1
284
+ - onnx==1.17.0
284
285
  - onnxruntime==1.18.1
285
286
  - onnx-simplifier==0.4.33 or 0.4.30 `(onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] (op_type:Slice, node name: /xxxx/Slice): [ShapeInferenceError] Inferred shape and existing shape differ in rank: (x) vs (y))`
286
287
  - onnx_graphsurgeon
287
288
  - simple_onnx_processing_tools
288
- - tensorflow==2.17.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
289
+ - tensorflow==2.19.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
290
+ - tf-keras==2.19.0
291
+ - ai-edge-litert==1.2.0
289
292
  - psutil==5.9.5
290
- - ml_dtypes==0.3.2
293
+ - ml_dtypes==0.5.1
291
294
  - flatbuffers-compiler (Optional, Only when using the `-coion` option. Executable file named `flatc`.)
292
295
  - flatbuffers>=23.1.21
293
296
  ```bash
@@ -331,7 +334,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
331
334
  docker run --rm -it \
332
335
  -v `pwd`:/workdir \
333
336
  -w /workdir \
334
- ghcr.io/pinto0309/onnx2tf:1.26.9
337
+ ghcr.io/pinto0309/onnx2tf:1.27.1
335
338
 
336
339
  or
337
340
 
@@ -339,11 +342,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
339
342
  docker run --rm -it \
340
343
  -v `pwd`:/workdir \
341
344
  -w /workdir \
342
- docker.io/pinto0309/onnx2tf:1.26.9
345
+ docker.io/pinto0309/onnx2tf:1.27.1
343
346
 
344
347
  or
345
348
 
346
- pip install -U onnx==1.16.1 \
349
+ pip install -U onnx==1.17.0 \
347
350
  && pip install -U nvidia-pyindex \
348
351
  && pip install -U onnx-graphsurgeon \
349
352
  && pip install -U onnxruntime==1.18.1 \
@@ -351,13 +354,14 @@ Video speed is adjusted approximately 50 times slower than actual speed.
351
354
  && pip install -U simple_onnx_processing_tools \
352
355
  && pip install -U sne4onnx>=1.0.13 \
353
356
  && pip install -U sng4onnx>=1.0.4 \
354
- && pip install -U tensorflow==2.17.0 \
357
+ && pip install -U ai_edge_litert==1.2.0 \
358
+ && pip install -U tensorflow==2.19.0 \
355
359
  && pip install -U protobuf==3.20.3 \
356
360
  && pip install -U onnx2tf \
357
361
  && pip install -U h5py==3.11.0 \
358
362
  && pip install -U psutil==5.9.5 \
359
- && pip install -U ml_dtypes==0.3.2 \
360
- && pip install -U tf-keras~=2.16 \
363
+ && pip install -U ml_dtypes==0.5.1 \
364
+ && pip install -U tf-keras==2.19.0 \
361
365
  && pip install flatbuffers>=23.5.26
362
366
 
363
367
  or
@@ -381,8 +385,9 @@ or
381
385
  && sudo chmod +x flatc \
382
386
  && sudo mv flatc /usr/bin/
383
387
  !pip install -U pip \
384
- && pip install tensorflow==2.17.0 \
385
- && pip install -U onnx==1.16.1 \
388
+ && pip install tensorflow==2.19.0 \
389
+ && pip install ai_edge_litert==1.2.0 \
390
+ && pip install -U onnx==1.17.0 \
386
391
  && python -m pip install onnx_graphsurgeon \
387
392
  --index-url https://pypi.ngc.nvidia.com \
388
393
  && pip install -U onnxruntime==1.18.1 \
@@ -392,8 +397,8 @@ or
392
397
  && pip install -U protobuf==3.20.3 \
393
398
  && pip install -U h5py==3.11.0 \
394
399
  && pip install -U psutil==5.9.5 \
395
- && pip install -U ml_dtypes==0.3.2 \
396
- && pip install -U tf-keras~=2.16 \
400
+ && pip install -U ml_dtypes==0.5.1 \
401
+ && pip install -U tf-keras==2.19.0 \
397
402
  && pip install flatbuffers>=23.5.26
398
403
  ```
399
404
 
@@ -608,7 +613,7 @@ import onnxruntime
608
613
  import numpy as np
609
614
  import onnx2tf
610
615
  import tensorflow as tf
611
- from tensorflow.lite.python import interpreter as tflite_interpreter
616
+ from ai_edge_litert.interpreter import Interpreter
612
617
 
613
618
  class Model(torch.nn.Module):
614
619
  def forward(self, x, y):
@@ -647,7 +652,7 @@ onnx2tf.convert(
647
652
  )
648
653
 
649
654
  # Now, test the newer TFLite model
650
- interpreter = tf.lite.Interpreter(model_path="model.tf/model_float32.tflite")
655
+ interpreter = Interpreter(model_path="model.tf/model_float32.tflite")
651
656
  tf_lite_model = interpreter.get_signature_runner()
652
657
  inputs = {
653
658
  'x': np.asarray([10], dtype=np.int64),
@@ -1061,10 +1066,10 @@ Now, let's try inference with the TFLite runtime instead of the TensorFlow runti
1061
1066
  import time
1062
1067
  import numpy as np
1063
1068
  np.random.seed(0)
1064
- import tensorflow as tf
1069
+ from ai_edge_litert.interpreter import Interpreter
1065
1070
 
1066
1071
  # Load TFLite model
1067
- interpreter = tf.lite.Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1072
+ interpreter = Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
1068
1073
  interpreter.allocate_tensors()
1069
1074
  tensor_shape = (256, 20)
1070
1075
  input_data = {'waveform': np.random.randn(*tensor_shape).astype(np.float32)}
@@ -1232,10 +1237,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1232
1237
  - `test.py` - Batch size: `5`
1233
1238
  ```python
1234
1239
  import numpy as np
1235
- import tensorflow as tf
1240
+ from ai_edge_litert.interpreter import Interpreter
1236
1241
  from pprint import pprint
1237
1242
 
1238
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1243
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1239
1244
  tf_lite_model = interpreter.get_signature_runner()
1240
1245
  inputs = {
1241
1246
  'images': np.ones([5,256,128,3], dtype=np.float32),
@@ -1263,10 +1268,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
1263
1268
  - `test.py` - Batch size: `3`
1264
1269
  ```python
1265
1270
  import numpy as np
1266
- import tensorflow as tf
1271
+ from ai_edge_litert.interpreter import Interpreter
1267
1272
  from pprint import pprint
1268
1273
 
1269
- interpreter = tf.lite.Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1274
+ interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
1270
1275
  tf_lite_model = interpreter.get_signature_runner()
1271
1276
  inputs = {
1272
1277
  'images': np.ones([3,256,128,3], dtype=np.float32),
@@ -1350,11 +1355,11 @@ The relationship between the ONNX before conversion and the TFLite file after co
1350
1355
  Use the generated TFLite file to inference and ensure that it always contains fixed value output.
1351
1356
 
1352
1357
  ```python
1353
- import tensorflow as tf
1358
+ from ai_edge_litert.interpreter import Interpreter
1354
1359
  import numpy as np
1355
1360
  from pprint import pprint
1356
1361
 
1357
- interpreter = tf.lite.Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1362
+ interpreter = Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
1358
1363
  interpreter.allocate_tensors()
1359
1364
 
1360
1365
  input_details = interpreter.get_input_details()
@@ -1508,7 +1513,7 @@ See: https://github.com/tensorflow/tfjs/tree/master/tfjs-converter
1508
1513
  When converting to CoreML, process as follows. The `-k` option is for conversion while maintaining the input channel order in ONNX's NCHW format.
1509
1514
 
1510
1515
  ```bash
1511
- pip install coremltools
1516
+ pip install coremltools==8.2
1512
1517
 
1513
1518
  onnx2tf -i mobilenetv2-12.onnx -k input -ois input:1,3,224,224 -osd
1514
1519
  ```
@@ -1521,7 +1526,7 @@ model = ct.convert(
1521
1526
  model=FOLDER_PATH,
1522
1527
  source='tensorflow',
1523
1528
  )
1524
- model.save(f'{FOLDER_PATH}/model.mlmodel')
1529
+ model.save(f'{FOLDER_PATH}/model.mlpackage')
1525
1530
  ```
1526
1531
 
1527
1532
  See: https://github.com/apple/coremltools
@@ -1,10 +1,10 @@
1
- onnx2tf/__init__.py,sha256=14sed6LJ9qrrV90jfdiUVlID3DheS0YBDgOQlRIbt_U,66
1
+ onnx2tf/__init__.py,sha256=1DKX0NLn80YH-Q7CqMU_p0krBqmhasp8q5rJQdssxTw,66
2
2
  onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
3
3
  onnx2tf/onnx2tf.py,sha256=IEnfIs3Dy8Y5F3iJ4HY7bWkn3QuB6lq_gHa1q5E3tMI,124745
4
4
  onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
5
5
  onnx2tf/ops/Acos.py,sha256=Fo8YkFKuWq8Fi2xUrBdKcAH1yJ8r5pjSD0wgLttTNdk,4003
6
6
  onnx2tf/ops/Acosh.py,sha256=ATQj2cT5JS_mTfXi0kXqJ1yzSZu5J0zHA5VjV3j7uKY,3588
7
- onnx2tf/ops/Add.py,sha256=JnoEYKLFI2Uh7G1MTOvMxGPLIgIJ5V1VAZGKkVa_XzU,12271
7
+ onnx2tf/ops/Add.py,sha256=pgJTnV1wZZk3mRaVxxezVkArfmlqlk74DCMZDm6VRJc,12295
8
8
  onnx2tf/ops/And.py,sha256=_ubtWa0r8-60x__pS7MEMil1DfBqxiUsk66yRCYS4KY,4591
9
9
  onnx2tf/ops/ArgMax.py,sha256=F3PV4EchYQgH1GATJybVGnmY9sGvZkgxCHbNCue9Qns,7278
10
10
  onnx2tf/ops/ArgMin.py,sha256=32r7I8AYLQOKTPOOPX1AZwiPnQfkrFB0Le16vdJ1yBs,4225
@@ -16,7 +16,7 @@ onnx2tf/ops/AveragePool.py,sha256=p9R4k87FO1yKZMQ699FIftXGUNKxb5yu0vYfzPlpsMA,14
16
16
  onnx2tf/ops/BatchNormalization.py,sha256=_hlf2-5-j3MCJHEoE2oMNQ8YhCm7ad9h2fwPpTo3i7g,26624
17
17
  onnx2tf/ops/Bernoulli.py,sha256=PM0xS0n1q4bnT_9PnbcKW8_Qj8dJYYBQR8kb2X-wIp4,3670
18
18
  onnx2tf/ops/BitShift.py,sha256=a28_E9hwA8yfjvtsrSKCZCeeMPB5RBQbjB3cmaNGN6k,3861
19
- onnx2tf/ops/Cast.py,sha256=iVSqSm1l_MXHtxUBRdQPJlzOTNRHcqMAPKi_LWaPYuc,4357
19
+ onnx2tf/ops/Cast.py,sha256=M0LRClHPgZ_8NubwME6ipKrAqcY9aKC5ihQXCkTkNkM,4601
20
20
  onnx2tf/ops/Ceil.py,sha256=0-jaueltpQSwpOIDUmy9DdTy98qN-XimYu5cHVPnUIs,3586
21
21
  onnx2tf/ops/Celu.py,sha256=9g7WNKo4G_jMtUXcoOfpNdLYqEsuyXLPkkyQZxDuL4U,3853
22
22
  onnx2tf/ops/Clip.py,sha256=K3Pgt9BXl5_rzg6s-kPFmwElL5COsvolRY1BUTo7UWw,8753
@@ -35,7 +35,7 @@ onnx2tf/ops/CumSum.py,sha256=SYKmD5r9Cm9gsCkJPNFoHigvvBO1PmRYRrVmn1HE78o,3954
35
35
  onnx2tf/ops/DepthToSpace.py,sha256=BiyBZ88dmXQAkZ5Jc-Ddo-5Kn8dRYCnoik_XnOFzqXc,14449
36
36
  onnx2tf/ops/DequantizeLinear.py,sha256=cNbGw4ITg_BsrXYkSb7fD05XEkQgz7v__-StQtvIvB4,5220
37
37
  onnx2tf/ops/Det.py,sha256=kxuHkpv_KNHkof0uBv2RLtr3G1uA76MFHyCiCYCBXkw,3590
38
- onnx2tf/ops/Div.py,sha256=fIN90mp7ByeKWEVDVENvCMja5Qc83jstayNw9hoKP4Y,16224
38
+ onnx2tf/ops/Div.py,sha256=NyAsvCxI41hyBX_kiCEILHY6QQkas_o4wRY8zkDUiwk,16248
39
39
  onnx2tf/ops/Dropout.py,sha256=KZKVqlnbq875awsNvJaQRvkO3XgqxeAmjbikXymRCtA,5860
40
40
  onnx2tf/ops/DynamicQuantizeLinear.py,sha256=UGmN2nXBBQHXcNlorEQfnKDnnoOadt4TNzXox-Xki2U,4759
41
41
  onnx2tf/ops/Einsum.py,sha256=YBw0JmSglOVVje80RqmqIjgsc7V5SnYS6s1Ysa2NUPA,12369
@@ -82,7 +82,7 @@ onnx2tf/ops/LessOrEqual.py,sha256=9Lc8qaYUPVC6yZoQluNqcdHnvpUbfWBOI4Ow38RRAJo,45
82
82
  onnx2tf/ops/Log.py,sha256=UZebF3SGq85BnoPgYyN2j-zzFRp67fJnYPNyu33W55o,3582
83
83
  onnx2tf/ops/LogSoftmax.py,sha256=j2nhYY7__8ViLFJVLA5tS98QEvGS1gTIW0QCdnZWUPQ,3923
84
84
  onnx2tf/ops/LpNormalization.py,sha256=Uu15HgxFNXb6gNMgdTJyf0SLPaLbcbkOYqY_4hMBxNA,3153
85
- onnx2tf/ops/MatMul.py,sha256=oH-VvMn-RTozk3E8zcFE2-T78csDIygtMksVX30o4MY,18804
85
+ onnx2tf/ops/MatMul.py,sha256=95HrWr3Dt6BLqx_zqm3WXBw_WzrWLObYVgz4K1yrhqE,19060
86
86
  onnx2tf/ops/MatMulInteger.py,sha256=qHqzdJNI9SeJDbW8pR90baYCdGN6FdOez4hi9EzwXoc,6538
87
87
  onnx2tf/ops/Max.py,sha256=w5nMciO_6ApYUobHuwMGuS3xhuza7eSvKDRhvMPgAuo,3256
88
88
  onnx2tf/ops/MaxPool.py,sha256=_JC4eqBTh-qLkZCMG8RZhthRZ8D2d821zaFMWeGMEWc,15775
@@ -92,8 +92,8 @@ onnx2tf/ops/MeanVarianceNormalization.py,sha256=Ne53jlDgAJZ9yhzKOWR-0LnjDdM-fg7D
92
92
  onnx2tf/ops/MelWeightMatrix.py,sha256=MyYFUTxz2wFVqNx3Dhlro0ktg9kxtEq8sGFmHICDZsI,5453
93
93
  onnx2tf/ops/Min.py,sha256=dK3i115xYh6NusQtGfswEGYBg9MBc_g-edafLgvq4TQ,3356
94
94
  onnx2tf/ops/Mish.py,sha256=LEg5MXBLLIzwxmsudC1zTA_yq7drVY_DMCB8lHBCA-8,3546
95
- onnx2tf/ops/Mod.py,sha256=K6oH5Q4I5JWh8DFp8T1CSdL4WUJCexYdfqTy5iceJxo,9999
96
- onnx2tf/ops/Mul.py,sha256=p75MHWbJSo6jLarFzmfK6oQREar4ntlFGqn-U7MzY8s,15962
95
+ onnx2tf/ops/Mod.py,sha256=Y7kqCEOLqof4zVszJslQayt6COyU-MS5qKLHAYOyxmc,10023
96
+ onnx2tf/ops/Mul.py,sha256=0hOf2O8ktRpIi4eOMfLGdwKl-yACFyGO3nU_s_XXUIE,15986
97
97
  onnx2tf/ops/Multinomial.py,sha256=0HQC76IA3AvRsUx9RS0S__nIfEmPuvIaDfSt8bns4FU,3158
98
98
  onnx2tf/ops/Neg.py,sha256=vu2ExVXyGpggAM_DNPeZj9QFeUyqhn5XmJnDlPJFsQU,4219
99
99
  onnx2tf/ops/NonMaxSuppression.py,sha256=nHeiX5eMGQAq_51KoljNZGlZddJ89Oe7Yfe33xLhl6M,15763
@@ -167,7 +167,7 @@ onnx2tf/ops/SplitToSequence.py,sha256=BS_JEd7DC7vuPfs5oRRW774mtlK--kqf9DJUalv-Ag
167
167
  onnx2tf/ops/Sqrt.py,sha256=-xE8Tk_6unSR56k9g3R46lML4Nht5kQwqJT0JYkn5ko,3585
168
168
  onnx2tf/ops/Squeeze.py,sha256=FLIt2qjWh1IJyti1c4YHuepH2Fkxt40rnEKszzmwsnE,7980
169
169
  onnx2tf/ops/StringNormalizer.py,sha256=lyjUfhvZiIUZhLptI0rW_xwpFBJ6XuhDCyvCKNh-ogA,5214
170
- onnx2tf/ops/Sub.py,sha256=_fSx-wrpoQAzqShFEU3QMZb-E3FdfPdfc9k-2Xt0cgc,10988
170
+ onnx2tf/ops/Sub.py,sha256=JCUWNmRLrwJEB8_0MPRTzmZ4KAV_HLXNivUd_jNqPQI,11012
171
171
  onnx2tf/ops/Sum.py,sha256=wtI0SbGuNFxkLskBk68ZhOAg3XyrIx-9xGYy1GZCVSo,3073
172
172
  onnx2tf/ops/Tan.py,sha256=Ncig8clGvY7GWshqxRDRdcxjcbf_HTKGdpDw5ValrKI,3582
173
173
  onnx2tf/ops/Tanh.py,sha256=PIQUvxS_AIDufblC2vc573nse2UCRA9z5yWd7kB-51s,3585
@@ -185,13 +185,13 @@ onnx2tf/ops/_Loop.py,sha256=eo5sNfrfOnKV6_I737AWsM5LJTY9DVOxQEvhanxtP4g,11322
185
185
  onnx2tf/ops/__Loop.py,sha256=ClwMcbNS4hqUtW_pzwjMa9Cqg7ONvz9aplke55A0uJ0,19704
186
186
  onnx2tf/ops/__init__.py,sha256=jnmUWWa-3dHzBZV9bmPzXu6eoz2dumJTzO7i8JdcgSM,25
187
187
  onnx2tf/utils/__init__.py,sha256=E9FM9He68VIASDnYp-OrxvHFVn55GzWqw2OEkCqn1zg,27
188
- onnx2tf/utils/common_functions.py,sha256=lhhWuNVDwM_mVFc9DD2kYcYEyyT46CDlJMvZzi9KWD4,241473
188
+ onnx2tf/utils/common_functions.py,sha256=HTDca3DGXB3xvc1S50RNscgB57TCiq4yC5Nrafs6ka4,241430
189
189
  onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
190
190
  onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
191
- onnx2tf-1.26.9.dist-info/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
192
- onnx2tf-1.26.9.dist-info/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
193
- onnx2tf-1.26.9.dist-info/METADATA,sha256=YlHKAl6m5rKRbF5pN8-jvIFUDvVTX9Mg-eWdkJvUkKM,147499
194
- onnx2tf-1.26.9.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
195
- onnx2tf-1.26.9.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
196
- onnx2tf-1.26.9.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
197
- onnx2tf-1.26.9.dist-info/RECORD,,
191
+ onnx2tf-1.27.1.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
192
+ onnx2tf-1.27.1.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
193
+ onnx2tf-1.27.1.dist-info/METADATA,sha256=_cqkJJkplUxrDL2K4Kk4lsXZ5BLefqYuNLh7hvVA8Nc,147712
194
+ onnx2tf-1.27.1.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
195
+ onnx2tf-1.27.1.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
196
+ onnx2tf-1.27.1.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
197
+ onnx2tf-1.27.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (76.0.0)
2
+ Generator: setuptools (78.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5