onnx2tf 1.26.9__py3-none-any.whl → 1.27.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- onnx2tf/__init__.py +1 -1
- onnx2tf/utils/common_functions.py +2 -2
- {onnx2tf-1.26.9.dist-info → onnx2tf-1.27.0.dist-info}/METADATA +27 -23
- {onnx2tf-1.26.9.dist-info → onnx2tf-1.27.0.dist-info}/RECORD +9 -9
- {onnx2tf-1.26.9.dist-info → onnx2tf-1.27.0.dist-info}/LICENSE +0 -0
- {onnx2tf-1.26.9.dist-info → onnx2tf-1.27.0.dist-info}/LICENSE_onnx-tensorflow +0 -0
- {onnx2tf-1.26.9.dist-info → onnx2tf-1.27.0.dist-info}/WHEEL +0 -0
- {onnx2tf-1.26.9.dist-info → onnx2tf-1.27.0.dist-info}/entry_points.txt +0 -0
- {onnx2tf-1.26.9.dist-info → onnx2tf-1.27.0.dist-info}/top_level.txt +0 -0
onnx2tf/__init__.py
CHANGED
|
@@ -16,6 +16,7 @@ import subprocess
|
|
|
16
16
|
import numpy as np
|
|
17
17
|
np.random.seed(0)
|
|
18
18
|
import tensorflow as tf
|
|
19
|
+
from ai_edge_litert.interpreter import Interpreter
|
|
19
20
|
import tf_keras
|
|
20
21
|
from tensorflow.python.keras.layers import Lambda
|
|
21
22
|
from tensorflow.python.keras.utils import conv_utils
|
|
@@ -4143,8 +4144,7 @@ def weights_export(
|
|
|
4143
4144
|
Path to file in hdf5 format to save the extracted weights
|
|
4144
4145
|
"""
|
|
4145
4146
|
import h5py
|
|
4146
|
-
|
|
4147
|
-
interpreter = interpreter_wrapper.Interpreter(
|
|
4147
|
+
interpreter = Interpreter(
|
|
4148
4148
|
model_path=extract_target_tflite_file_path,
|
|
4149
4149
|
)
|
|
4150
4150
|
interpreter.allocate_tensors()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: onnx2tf
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.27.0
|
|
4
4
|
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
5
|
Home-page: https://github.com/PINTO0309/onnx2tf
|
|
6
6
|
Author: Katsuya Hyodo
|
|
@@ -285,9 +285,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
285
285
|
- onnx-simplifier==0.4.33 or 0.4.30 `(onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] (op_type:Slice, node name: /xxxx/Slice): [ShapeInferenceError] Inferred shape and existing shape differ in rank: (x) vs (y))`
|
|
286
286
|
- onnx_graphsurgeon
|
|
287
287
|
- simple_onnx_processing_tools
|
|
288
|
-
- tensorflow==2.
|
|
288
|
+
- tensorflow==2.19.0, Special bugs: [#436](https://github.com/PINTO0309/onnx2tf/issues/436)
|
|
289
|
+
- tf-keras==2.19.0
|
|
290
|
+
- ai-edge-litert==1.2.0
|
|
289
291
|
- psutil==5.9.5
|
|
290
|
-
- ml_dtypes==0.
|
|
292
|
+
- ml_dtypes==0.5.1
|
|
291
293
|
- flatbuffers-compiler (Optional, Only when using the `-coion` option. Executable file named `flatc`.)
|
|
292
294
|
- flatbuffers>=23.1.21
|
|
293
295
|
```bash
|
|
@@ -331,7 +333,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
331
333
|
docker run --rm -it \
|
|
332
334
|
-v `pwd`:/workdir \
|
|
333
335
|
-w /workdir \
|
|
334
|
-
ghcr.io/pinto0309/onnx2tf:1.
|
|
336
|
+
ghcr.io/pinto0309/onnx2tf:1.27.0
|
|
335
337
|
|
|
336
338
|
or
|
|
337
339
|
|
|
@@ -339,11 +341,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
339
341
|
docker run --rm -it \
|
|
340
342
|
-v `pwd`:/workdir \
|
|
341
343
|
-w /workdir \
|
|
342
|
-
docker.io/pinto0309/onnx2tf:1.
|
|
344
|
+
docker.io/pinto0309/onnx2tf:1.27.0
|
|
343
345
|
|
|
344
346
|
or
|
|
345
347
|
|
|
346
|
-
pip install -U onnx==1.
|
|
348
|
+
pip install -U onnx==1.17.0 \
|
|
347
349
|
&& pip install -U nvidia-pyindex \
|
|
348
350
|
&& pip install -U onnx-graphsurgeon \
|
|
349
351
|
&& pip install -U onnxruntime==1.18.1 \
|
|
@@ -351,13 +353,14 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
351
353
|
&& pip install -U simple_onnx_processing_tools \
|
|
352
354
|
&& pip install -U sne4onnx>=1.0.13 \
|
|
353
355
|
&& pip install -U sng4onnx>=1.0.4 \
|
|
354
|
-
&& pip install -U
|
|
356
|
+
&& pip install -U ai_edge_litert==1.2.0 \
|
|
357
|
+
&& pip install -U tensorflow==2.19.0 \
|
|
355
358
|
&& pip install -U protobuf==3.20.3 \
|
|
356
359
|
&& pip install -U onnx2tf \
|
|
357
360
|
&& pip install -U h5py==3.11.0 \
|
|
358
361
|
&& pip install -U psutil==5.9.5 \
|
|
359
|
-
&& pip install -U ml_dtypes==0.
|
|
360
|
-
&& pip install -U tf-keras
|
|
362
|
+
&& pip install -U ml_dtypes==0.5.1 \
|
|
363
|
+
&& pip install -U tf-keras==2.19.0 \
|
|
361
364
|
&& pip install flatbuffers>=23.5.26
|
|
362
365
|
|
|
363
366
|
or
|
|
@@ -381,8 +384,9 @@ or
|
|
|
381
384
|
&& sudo chmod +x flatc \
|
|
382
385
|
&& sudo mv flatc /usr/bin/
|
|
383
386
|
!pip install -U pip \
|
|
384
|
-
&& pip install tensorflow==2.
|
|
385
|
-
&& pip install
|
|
387
|
+
&& pip install tensorflow==2.19.0 \
|
|
388
|
+
&& pip install ai_edge_litert==1.2.0 \
|
|
389
|
+
&& pip install -U onnx==1.17.0 \
|
|
386
390
|
&& python -m pip install onnx_graphsurgeon \
|
|
387
391
|
--index-url https://pypi.ngc.nvidia.com \
|
|
388
392
|
&& pip install -U onnxruntime==1.18.1 \
|
|
@@ -392,8 +396,8 @@ or
|
|
|
392
396
|
&& pip install -U protobuf==3.20.3 \
|
|
393
397
|
&& pip install -U h5py==3.11.0 \
|
|
394
398
|
&& pip install -U psutil==5.9.5 \
|
|
395
|
-
&& pip install -U ml_dtypes==0.
|
|
396
|
-
&& pip install -U tf-keras
|
|
399
|
+
&& pip install -U ml_dtypes==0.5.1 \
|
|
400
|
+
&& pip install -U tf-keras==2.19.0 \
|
|
397
401
|
&& pip install flatbuffers>=23.5.26
|
|
398
402
|
```
|
|
399
403
|
|
|
@@ -608,7 +612,7 @@ import onnxruntime
|
|
|
608
612
|
import numpy as np
|
|
609
613
|
import onnx2tf
|
|
610
614
|
import tensorflow as tf
|
|
611
|
-
from
|
|
615
|
+
from ai_edge_litert.interpreter import Interpreter
|
|
612
616
|
|
|
613
617
|
class Model(torch.nn.Module):
|
|
614
618
|
def forward(self, x, y):
|
|
@@ -647,7 +651,7 @@ onnx2tf.convert(
|
|
|
647
651
|
)
|
|
648
652
|
|
|
649
653
|
# Now, test the newer TFLite model
|
|
650
|
-
interpreter =
|
|
654
|
+
interpreter = Interpreter(model_path="model.tf/model_float32.tflite")
|
|
651
655
|
tf_lite_model = interpreter.get_signature_runner()
|
|
652
656
|
inputs = {
|
|
653
657
|
'x': np.asarray([10], dtype=np.int64),
|
|
@@ -1061,10 +1065,10 @@ Now, let's try inference with the TFLite runtime instead of the TensorFlow runti
|
|
|
1061
1065
|
import time
|
|
1062
1066
|
import numpy as np
|
|
1063
1067
|
np.random.seed(0)
|
|
1064
|
-
|
|
1068
|
+
from ai_edge_litert.interpreter import Interpreter
|
|
1065
1069
|
|
|
1066
1070
|
# Load TFLite model
|
|
1067
|
-
interpreter =
|
|
1071
|
+
interpreter = Interpreter(model_path="./saved_model/prelu_check_float32.tflite")
|
|
1068
1072
|
interpreter.allocate_tensors()
|
|
1069
1073
|
tensor_shape = (256, 20)
|
|
1070
1074
|
input_data = {'waveform': np.random.randn(*tensor_shape).astype(np.float32)}
|
|
@@ -1232,10 +1236,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
|
|
|
1232
1236
|
- `test.py` - Batch size: `5`
|
|
1233
1237
|
```python
|
|
1234
1238
|
import numpy as np
|
|
1235
|
-
|
|
1239
|
+
from ai_edge_litert.interpreter import Interpreter
|
|
1236
1240
|
from pprint import pprint
|
|
1237
1241
|
|
|
1238
|
-
interpreter =
|
|
1242
|
+
interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
|
|
1239
1243
|
tf_lite_model = interpreter.get_signature_runner()
|
|
1240
1244
|
inputs = {
|
|
1241
1245
|
'images': np.ones([5,256,128,3], dtype=np.float32),
|
|
@@ -1263,10 +1267,10 @@ You can use `signature_runner` to handle dynamic input tensors by performing inf
|
|
|
1263
1267
|
- `test.py` - Batch size: `3`
|
|
1264
1268
|
```python
|
|
1265
1269
|
import numpy as np
|
|
1266
|
-
|
|
1270
|
+
from ai_edge_litert.interpreter import Interpreter
|
|
1267
1271
|
from pprint import pprint
|
|
1268
1272
|
|
|
1269
|
-
interpreter =
|
|
1273
|
+
interpreter = Interpreter(model_path="saved_model/osnet_x0_25_msmt17_float32.tflite")
|
|
1270
1274
|
tf_lite_model = interpreter.get_signature_runner()
|
|
1271
1275
|
inputs = {
|
|
1272
1276
|
'images': np.ones([3,256,128,3], dtype=np.float32),
|
|
@@ -1350,11 +1354,11 @@ The relationship between the ONNX before conversion and the TFLite file after co
|
|
|
1350
1354
|
Use the generated TFLite file to inference and ensure that it always contains fixed value output.
|
|
1351
1355
|
|
|
1352
1356
|
```python
|
|
1353
|
-
|
|
1357
|
+
from ai_edge_litert.interpreter import Interpreter
|
|
1354
1358
|
import numpy as np
|
|
1355
1359
|
from pprint import pprint
|
|
1356
1360
|
|
|
1357
|
-
interpreter =
|
|
1361
|
+
interpreter = Interpreter(model_path="saved_model/toy_with_constant_float32.tflite")
|
|
1358
1362
|
interpreter.allocate_tensors()
|
|
1359
1363
|
|
|
1360
1364
|
input_details = interpreter.get_input_details()
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
onnx2tf/__init__.py,sha256=
|
|
1
|
+
onnx2tf/__init__.py,sha256=lHhAWe8iqi6znmPVuWY05CVVp6042_wHUvkziLiOvA8,66
|
|
2
2
|
onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
|
|
3
3
|
onnx2tf/onnx2tf.py,sha256=IEnfIs3Dy8Y5F3iJ4HY7bWkn3QuB6lq_gHa1q5E3tMI,124745
|
|
4
4
|
onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
|
|
@@ -185,13 +185,13 @@ onnx2tf/ops/_Loop.py,sha256=eo5sNfrfOnKV6_I737AWsM5LJTY9DVOxQEvhanxtP4g,11322
|
|
|
185
185
|
onnx2tf/ops/__Loop.py,sha256=ClwMcbNS4hqUtW_pzwjMa9Cqg7ONvz9aplke55A0uJ0,19704
|
|
186
186
|
onnx2tf/ops/__init__.py,sha256=jnmUWWa-3dHzBZV9bmPzXu6eoz2dumJTzO7i8JdcgSM,25
|
|
187
187
|
onnx2tf/utils/__init__.py,sha256=E9FM9He68VIASDnYp-OrxvHFVn55GzWqw2OEkCqn1zg,27
|
|
188
|
-
onnx2tf/utils/common_functions.py,sha256=
|
|
188
|
+
onnx2tf/utils/common_functions.py,sha256=HTDca3DGXB3xvc1S50RNscgB57TCiq4yC5Nrafs6ka4,241430
|
|
189
189
|
onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
|
|
190
190
|
onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
|
|
191
|
-
onnx2tf-1.
|
|
192
|
-
onnx2tf-1.
|
|
193
|
-
onnx2tf-1.
|
|
194
|
-
onnx2tf-1.
|
|
195
|
-
onnx2tf-1.
|
|
196
|
-
onnx2tf-1.
|
|
197
|
-
onnx2tf-1.
|
|
191
|
+
onnx2tf-1.27.0.dist-info/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
|
|
192
|
+
onnx2tf-1.27.0.dist-info/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
|
|
193
|
+
onnx2tf-1.27.0.dist-info/METADATA,sha256=Qjvfs5wxntQGejt30wlRjfCY1PA2gtg8d2UOLJ_8GEk,147683
|
|
194
|
+
onnx2tf-1.27.0.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
|
|
195
|
+
onnx2tf-1.27.0.dist-info/entry_points.txt,sha256=gDPK8ToCFPKMvm8jr9xrGOkXtORJJVh4736fBEKO5k0,41
|
|
196
|
+
onnx2tf-1.27.0.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
|
|
197
|
+
onnx2tf-1.27.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|