onnx2tf 1.29.18__py3-none-any.whl → 1.29.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- onnx2tf/__init__.py +1 -1
- onnx2tf/onnx2tf.py +967 -27
- onnx2tf/ops/Col2Im.py +108 -64
- onnx2tf/ops/DFT.py +245 -0
- onnx2tf/ops/DeformConv.py +399 -0
- onnx2tf/ops/GatherElements.py +25 -7
- onnx2tf/ops/GatherND.py +28 -1
- onnx2tf/ops/ScatterElements.py +25 -7
- onnx2tf/ops/ScatterND.py +45 -6
- onnx2tf/ops/TensorScatter.py +20 -6
- onnx2tf/utils/common_functions.py +99 -2
- {onnx2tf-1.29.18.dist-info → onnx2tf-1.29.20.dist-info}/METADATA +27 -5
- {onnx2tf-1.29.18.dist-info → onnx2tf-1.29.20.dist-info}/RECORD +15 -13
- {onnx2tf-1.29.18.dist-info → onnx2tf-1.29.20.dist-info}/WHEEL +0 -0
- {onnx2tf-1.29.18.dist-info → onnx2tf-1.29.20.dist-info}/entry_points.txt +0 -0
|
@@ -26,6 +26,7 @@ from tensorflow.python.keras.layers import Lambda
|
|
|
26
26
|
from tensorflow.python.keras.utils import conv_utils
|
|
27
27
|
import onnx
|
|
28
28
|
from onnx.serialization import ProtoSerializer
|
|
29
|
+
from onnx.external_data_helper import uses_external_data
|
|
29
30
|
import onnx_graphsurgeon as gs
|
|
30
31
|
try:
|
|
31
32
|
import onnxruntime as ort
|
|
@@ -45,6 +46,8 @@ INF_INDEX_VALUE: int = 4294967296
|
|
|
45
46
|
ONNX_INF_INDEX_VALUE = sys.maxsize # 9223372036854775807
|
|
46
47
|
|
|
47
48
|
|
|
49
|
+
|
|
50
|
+
|
|
48
51
|
def get_replacement_parameter(func):
|
|
49
52
|
@wraps(func)
|
|
50
53
|
def get_replacement_parameter_wrapper_func(*args, **kwargs):
|
|
@@ -4046,6 +4049,9 @@ def dummy_onnx_inference(
|
|
|
4046
4049
|
input_sizes[i] = updated_shape
|
|
4047
4050
|
|
|
4048
4051
|
input_dtypes: List[Any] = [inp.dtype for inp in onnx_inputs]
|
|
4052
|
+
input_size_map = {
|
|
4053
|
+
name: tuple(size) for name, size in zip(input_names, input_sizes)
|
|
4054
|
+
}
|
|
4049
4055
|
input_datas = {}
|
|
4050
4056
|
|
|
4051
4057
|
# -cid
|
|
@@ -4059,7 +4065,16 @@ def dummy_onnx_inference(
|
|
|
4059
4065
|
if input_op_info is not None:
|
|
4060
4066
|
ncw_nchw_ncdhw_perm: List = input_op_info.get('ncw_nchw_ncdhw_perm', None)
|
|
4061
4067
|
if ncw_nchw_ncdhw_perm is not None:
|
|
4062
|
-
|
|
4068
|
+
expected_shape = input_size_map.get(
|
|
4069
|
+
input_op_name,
|
|
4070
|
+
tuple(custom_input_data.shape),
|
|
4071
|
+
)
|
|
4072
|
+
if tuple(custom_input_data.shape) != expected_shape:
|
|
4073
|
+
permuted_shape = tuple(
|
|
4074
|
+
custom_input_data.shape[i] for i in ncw_nchw_ncdhw_perm
|
|
4075
|
+
)
|
|
4076
|
+
if permuted_shape == expected_shape:
|
|
4077
|
+
custom_input_data = custom_input_data.transpose(ncw_nchw_ncdhw_perm)
|
|
4063
4078
|
onnx_batch_size = input_op_info['shape'][0]
|
|
4064
4079
|
cdata_batch_size = custom_input_data.shape[0]
|
|
4065
4080
|
if isinstance(onnx_batch_size, int) and onnx_batch_size != cdata_batch_size and cdata_batch_size > 1:
|
|
@@ -4231,6 +4246,7 @@ def dummy_tf_inference(
|
|
|
4231
4246
|
custom_input_op_name_np_data_path: Optional[str] = None,
|
|
4232
4247
|
shape_hints: Optional[List[str]] = None,
|
|
4233
4248
|
input_datas_for_validation: Optional[Dict[str, np.ndarray]] = None,
|
|
4249
|
+
prefilled_input_datas: Optional[Dict[str, np.ndarray]] = None,
|
|
4234
4250
|
keep_shape_absolutely_input_names: Optional[List[str]] = None,
|
|
4235
4251
|
keep_ncw_or_nchw_or_ncdhw_input_names: Optional[List[str]] = None,
|
|
4236
4252
|
keep_nwc_or_nhwc_or_ndhwc_input_names: Optional[List[str]] = None,
|
|
@@ -4264,6 +4280,8 @@ def dummy_tf_inference(
|
|
|
4264
4280
|
"""
|
|
4265
4281
|
input_names: List[str] = [inp.name for inp in inputs]
|
|
4266
4282
|
input_sizes: List[int] = [inp.shape for inp in inputs]
|
|
4283
|
+
input_size_map = {name: size for name, size in zip(input_names, input_sizes)}
|
|
4284
|
+
input_index_map = {name: i for i, name in enumerate(input_names)}
|
|
4267
4285
|
|
|
4268
4286
|
if shape_hints is None:
|
|
4269
4287
|
new_input_sizes = []
|
|
@@ -4338,10 +4356,20 @@ def dummy_tf_inference(
|
|
|
4338
4356
|
|
|
4339
4357
|
# -cid
|
|
4340
4358
|
if custom_input_op_name_np_data_path:
|
|
4341
|
-
for
|
|
4359
|
+
for param in custom_input_op_name_np_data_path:
|
|
4360
|
+
if len(param) < 2:
|
|
4361
|
+
continue
|
|
4362
|
+
input_name = str(param[0])
|
|
4342
4363
|
numpy_file_path = str(param[1])
|
|
4364
|
+
if input_name not in input_index_map:
|
|
4365
|
+
continue
|
|
4366
|
+
idx = input_index_map[input_name]
|
|
4367
|
+
tf_input_name = input_names[idx]
|
|
4368
|
+
if prefilled_input_datas and tf_input_name in prefilled_input_datas:
|
|
4369
|
+
continue
|
|
4343
4370
|
custom_input_data = np.load(numpy_file_path)
|
|
4344
4371
|
input_size = input_sizes[idx]
|
|
4372
|
+
input_dtype = input_dtypes[idx] if idx < len(input_dtypes) else np.float32
|
|
4345
4373
|
|
|
4346
4374
|
tf_batch_size = input_size[0]
|
|
4347
4375
|
cdata_batch_size = custom_input_data.shape[0]
|
|
@@ -4351,6 +4379,24 @@ def dummy_tf_inference(
|
|
|
4351
4379
|
custom_input_data = custom_input_data[0:1, ...]
|
|
4352
4380
|
|
|
4353
4381
|
if list(custom_input_data.shape) != input_size:
|
|
4382
|
+
auto_split_input = (
|
|
4383
|
+
'onnx2tf_split_' in numpy_file_path
|
|
4384
|
+
or os.path.basename(numpy_file_path).startswith('part_')
|
|
4385
|
+
)
|
|
4386
|
+
if auto_split_input:
|
|
4387
|
+
warn(
|
|
4388
|
+
'Auto-split custom input shape does not match TF input shape. '
|
|
4389
|
+
f'input_name={input_name} '
|
|
4390
|
+
f'tf_shape={input_size} '
|
|
4391
|
+
f'numpy_shape={list(custom_input_data.shape)} '
|
|
4392
|
+
f'path={numpy_file_path} '
|
|
4393
|
+
'Fallback to dummy input for this tensor.'
|
|
4394
|
+
)
|
|
4395
|
+
input_datas[input_names[idx]] = np.ones(
|
|
4396
|
+
input_size,
|
|
4397
|
+
dtype=TF_DTYPES_TO_NUMPY_DTYPES[input_dtype],
|
|
4398
|
+
)
|
|
4399
|
+
continue
|
|
4354
4400
|
error_msg = f'' + \
|
|
4355
4401
|
Color.RED(f'ERROR:') + ' ' + \
|
|
4356
4402
|
f"The format of custom input data is different from Tensorflow's format. " + \
|
|
@@ -4397,6 +4443,33 @@ def dummy_tf_inference(
|
|
|
4397
4443
|
if input_datas_for_validation is not None:
|
|
4398
4444
|
input_datas_for_validation.update(input_datas)
|
|
4399
4445
|
|
|
4446
|
+
if prefilled_input_datas:
|
|
4447
|
+
for input_name, input_data in prefilled_input_datas.items():
|
|
4448
|
+
expected = None
|
|
4449
|
+
if input_name in input_datas:
|
|
4450
|
+
expected = input_datas[input_name].shape
|
|
4451
|
+
elif input_name in input_size_map:
|
|
4452
|
+
expected = input_size_map[input_name]
|
|
4453
|
+
else:
|
|
4454
|
+
continue
|
|
4455
|
+
data = input_data
|
|
4456
|
+
try:
|
|
4457
|
+
if expected is not None and tuple(data.shape) != tuple(expected):
|
|
4458
|
+
if data.size == np.prod(expected):
|
|
4459
|
+
data = data.reshape(expected)
|
|
4460
|
+
else:
|
|
4461
|
+
continue
|
|
4462
|
+
target_dtype = None
|
|
4463
|
+
if input_name in input_datas:
|
|
4464
|
+
target_dtype = input_datas[input_name].dtype
|
|
4465
|
+
elif input_name in input_index_map:
|
|
4466
|
+
target_dtype = input_dtypes[input_index_map[input_name]]
|
|
4467
|
+
if target_dtype is not None and data.dtype != target_dtype:
|
|
4468
|
+
data = data.astype(target_dtype)
|
|
4469
|
+
input_datas[input_name] = data
|
|
4470
|
+
except Exception:
|
|
4471
|
+
continue
|
|
4472
|
+
|
|
4400
4473
|
outputs = model(
|
|
4401
4474
|
inputs={
|
|
4402
4475
|
input.name: input_datas[input.name] for input in inputs
|
|
@@ -6057,6 +6130,8 @@ def acquisition_of_validation_data(
|
|
|
6057
6130
|
kwargs['test_data_nhwc']
|
|
6058
6131
|
custom_input_op_name_np_data_path: str = \
|
|
6059
6132
|
kwargs['custom_input_op_name_np_data_path']
|
|
6133
|
+
tf_input_cache: Optional[Dict[str, np.ndarray]] = \
|
|
6134
|
+
kwargs.get('tf_input_cache', None)
|
|
6060
6135
|
|
|
6061
6136
|
# Get the output tensor of one previous OP of TensorFlow only once
|
|
6062
6137
|
tf_model_inputs = get_tf_model_inputs(
|
|
@@ -6108,6 +6183,7 @@ def acquisition_of_validation_data(
|
|
|
6108
6183
|
inputs=tf_model_inputs,
|
|
6109
6184
|
test_data_nhwc=test_data_nhwc,
|
|
6110
6185
|
custom_input_op_name_np_data_path=custom_input_op_name_np_data_path,
|
|
6186
|
+
prefilled_input_datas=tf_input_cache,
|
|
6111
6187
|
)
|
|
6112
6188
|
except Exception as ex:
|
|
6113
6189
|
pass
|
|
@@ -6526,3 +6602,24 @@ def define_reduceXXX(
|
|
|
6526
6602
|
keepdims=target_keepdims,
|
|
6527
6603
|
)
|
|
6528
6604
|
return reduced_tensor
|
|
6605
|
+
|
|
6606
|
+
def check_has_external_data(input_onnx_file_path: str) -> bool:
|
|
6607
|
+
model = onnx.load(input_onnx_file_path, load_external_data=False)
|
|
6608
|
+
def iter_tensors_in_graph(g):
|
|
6609
|
+
for t in g.initializer:
|
|
6610
|
+
yield t
|
|
6611
|
+
for t in g.sparse_initializer:
|
|
6612
|
+
yield t
|
|
6613
|
+
for n in g.node:
|
|
6614
|
+
for a in n.attribute:
|
|
6615
|
+
if a.type == onnx.AttributeProto.TENSOR:
|
|
6616
|
+
yield a.t
|
|
6617
|
+
elif a.type == onnx.AttributeProto.TENSORS:
|
|
6618
|
+
for t in a.tensors:
|
|
6619
|
+
yield t
|
|
6620
|
+
elif a.type == onnx.AttributeProto.GRAPH:
|
|
6621
|
+
yield from iter_tensors_in_graph(a.g)
|
|
6622
|
+
elif a.type == onnx.AttributeProto.GRAPHS:
|
|
6623
|
+
for sg in a.graphs:
|
|
6624
|
+
yield from iter_tensors_in_graph(sg)
|
|
6625
|
+
return any(uses_external_data(t) for t in iter_tensors_in_graph(model.graph))
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: onnx2tf
|
|
3
|
-
Version: 1.29.
|
|
3
|
+
Version: 1.29.20
|
|
4
4
|
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).
|
|
5
5
|
Keywords: onnx,tensorflow,tflite,keras,deep-learning,machine-learning
|
|
6
6
|
Author: Katsuya Hyodo
|
|
@@ -137,11 +137,11 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
|
|
|
137
137
|
|Cos|:heavy_check_mark:|
|
|
138
138
|
|CumProd|:heavy_check_mark:|
|
|
139
139
|
|CumSum|:heavy_check_mark:|
|
|
140
|
-
|DeformConv
|
|
140
|
+
|DeformConv|:white_check_mark:|
|
|
141
141
|
|DepthToSpace|:heavy_check_mark:|
|
|
142
142
|
|Det|:heavy_check_mark:|
|
|
143
143
|
|DequantizeLinear|:heavy_check_mark:|
|
|
144
|
-
|DFT
|
|
144
|
+
|DFT|:white_check_mark:|
|
|
145
145
|
|Div|:heavy_check_mark:|
|
|
146
146
|
|Dropout|:heavy_check_mark:|
|
|
147
147
|
|DynamicQuantizeLinear|:heavy_check_mark:|
|
|
@@ -365,7 +365,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
365
365
|
docker run --rm -it \
|
|
366
366
|
-v `pwd`:/workdir \
|
|
367
367
|
-w /workdir \
|
|
368
|
-
ghcr.io/pinto0309/onnx2tf:1.29.
|
|
368
|
+
ghcr.io/pinto0309/onnx2tf:1.29.20
|
|
369
369
|
|
|
370
370
|
or
|
|
371
371
|
|
|
@@ -373,7 +373,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
373
373
|
docker run --rm -it \
|
|
374
374
|
-v `pwd`:/workdir \
|
|
375
375
|
-w /workdir \
|
|
376
|
-
docker.io/pinto0309/onnx2tf:1.29.
|
|
376
|
+
docker.io/pinto0309/onnx2tf:1.29.20
|
|
377
377
|
|
|
378
378
|
or
|
|
379
379
|
|
|
@@ -1887,6 +1887,15 @@ optional arguments:
|
|
|
1887
1887
|
model partitioned into subgraphs.
|
|
1888
1888
|
e.g. --output_names_to_interrupt_model_conversion "output0" "output1" "output2"
|
|
1889
1889
|
|
|
1890
|
+
-easm, --enable_auto_split_model
|
|
1891
|
+
Force auto split regardless of the ONNX file size.
|
|
1892
|
+
Uses --auto_split_max_size_mb as the target partition size.
|
|
1893
|
+
|
|
1894
|
+
-asmsm AUTO_SPLIT_MAX_SIZE_MB, --auto_split_max_size_mb AUTO_SPLIT_MAX_SIZE_MB
|
|
1895
|
+
Target maximum size per partition in MB based on ONNX initializer sizes.
|
|
1896
|
+
Used when auto-split is triggered or forced.
|
|
1897
|
+
Default: 1024
|
|
1898
|
+
|
|
1890
1899
|
-dgc, --disable_group_convolution
|
|
1891
1900
|
Disable GroupConvolution and replace it with SeparableConvolution for
|
|
1892
1901
|
output to saved_model format.
|
|
@@ -2156,6 +2165,8 @@ convert(
|
|
|
2156
2165
|
keep_shape_absolutely_input_names: Optional[List[str]] = None,
|
|
2157
2166
|
input_names_to_interrupt_model_conversion: Union[List[str], NoneType] = None,
|
|
2158
2167
|
output_names_to_interrupt_model_conversion: Union[List[str], NoneType] = None,
|
|
2168
|
+
enable_auto_split_model: Optional[bool] = False,
|
|
2169
|
+
auto_split_max_size_mb: Optional[int] = 1024,
|
|
2159
2170
|
disable_group_convolution: Union[bool, NoneType] = False,
|
|
2160
2171
|
enable_batchmatmul_unfold: Optional[bool] = False,
|
|
2161
2172
|
enable_rnn_unroll: Optional[bool] = False,
|
|
@@ -2424,6 +2435,17 @@ convert(
|
|
|
2424
2435
|
e.g.
|
|
2425
2436
|
output_names_to_interrupt_model_conversion=['output0','output1','output2']
|
|
2426
2437
|
|
|
2438
|
+
enable_auto_split_model: Optional[bool]
|
|
2439
|
+
Force auto split regardless of the ONNX file size.
|
|
2440
|
+
Uses auto_split_max_size_mb as the target partition size.
|
|
2441
|
+
Short option: -easm
|
|
2442
|
+
Default: False
|
|
2443
|
+
|
|
2444
|
+
auto_split_max_size_mb: Optional[int]
|
|
2445
|
+
Target maximum size per partition in MB based on ONNX initializer sizes.
|
|
2446
|
+
Used when auto-split is triggered or forced.
|
|
2447
|
+
Default: 1024
|
|
2448
|
+
|
|
2427
2449
|
disable_group_convolution: Optional[bool]
|
|
2428
2450
|
Disable GroupConvolution and replace it with SeparableConvolution for
|
|
2429
2451
|
output to saved_model format.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
onnx2tf/__init__.py,sha256=
|
|
1
|
+
onnx2tf/__init__.py,sha256=eeSEsd3Wo8LJdK15-iysWnXuxIbvnuVUXyPA9PbSr1o,67
|
|
2
2
|
onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
|
|
3
|
-
onnx2tf/onnx2tf.py,sha256=
|
|
3
|
+
onnx2tf/onnx2tf.py,sha256=pBrhrEzDqFKOgvXEr75vMJ61P_m3_f5ogUegD_vDEnc,207077
|
|
4
4
|
onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
|
|
5
5
|
onnx2tf/ops/Acos.py,sha256=Fo8YkFKuWq8Fi2xUrBdKcAH1yJ8r5pjSD0wgLttTNdk,4003
|
|
6
6
|
onnx2tf/ops/Acosh.py,sha256=ATQj2cT5JS_mTfXi0kXqJ1yzSZu5J0zHA5VjV3j7uKY,3588
|
|
@@ -27,7 +27,7 @@ onnx2tf/ops/Cast.py,sha256=M0LRClHPgZ_8NubwME6ipKrAqcY9aKC5ihQXCkTkNkM,4601
|
|
|
27
27
|
onnx2tf/ops/Ceil.py,sha256=0-jaueltpQSwpOIDUmy9DdTy98qN-XimYu5cHVPnUIs,3586
|
|
28
28
|
onnx2tf/ops/Celu.py,sha256=9g7WNKo4G_jMtUXcoOfpNdLYqEsuyXLPkkyQZxDuL4U,3853
|
|
29
29
|
onnx2tf/ops/Clip.py,sha256=K3Pgt9BXl5_rzg6s-kPFmwElL5COsvolRY1BUTo7UWw,8753
|
|
30
|
-
onnx2tf/ops/Col2Im.py,sha256=
|
|
30
|
+
onnx2tf/ops/Col2Im.py,sha256=8n66z3O59VJvJRlcrj93a5TLJ_qh-aSdR_-8SAQIlRo,7658
|
|
31
31
|
onnx2tf/ops/Compress.py,sha256=NvDGr9gCNl-8YG41xDBfe3UvhRP03K-ktdtY_MoytBc,3667
|
|
32
32
|
onnx2tf/ops/Concat.py,sha256=kDk7LN01nHuQyYnEPUEikSQ-M17jguAc-qFtCb9tg9I,38537
|
|
33
33
|
onnx2tf/ops/ConcatFromSequence.py,sha256=z8pNmGQRGq9cxWORW330NZS_0zsmhFudLswMyPn8AXU,3086
|
|
@@ -40,6 +40,8 @@ onnx2tf/ops/Cos.py,sha256=0v5ZJZRzrswVEObyxf4f0RvnWMWZA4uCEdoeq_VE31s,3608
|
|
|
40
40
|
onnx2tf/ops/Cosh.py,sha256=-L3QkQtiVBJIv1sSxbXtetVIwgI_2T4WC1O4t2aJ8Gc,3585
|
|
41
41
|
onnx2tf/ops/CumProd.py,sha256=k4hTEQrkwS7vk7pEy2Btvy2y0o70NlWj1MgsNomfOPg,3957
|
|
42
42
|
onnx2tf/ops/CumSum.py,sha256=SYKmD5r9Cm9gsCkJPNFoHigvvBO1PmRYRrVmn1HE78o,3954
|
|
43
|
+
onnx2tf/ops/DFT.py,sha256=rL_w1z16N9Kkf0TyMbywOawld0qZ_g1QOD9npGYD_zY,8086
|
|
44
|
+
onnx2tf/ops/DeformConv.py,sha256=tlhZDzNYAT093PkBDP4s6-EO-vnSdW_0KZTxtddjajM,13541
|
|
43
45
|
onnx2tf/ops/DepthToSpace.py,sha256=BiyBZ88dmXQAkZ5Jc-Ddo-5Kn8dRYCnoik_XnOFzqXc,14449
|
|
44
46
|
onnx2tf/ops/DequantizeLinear.py,sha256=1v43E1hUqO3g7N-PL1fy_cGj4oUgbphh7vXIGhUAyGc,6463
|
|
45
47
|
onnx2tf/ops/Det.py,sha256=kxuHkpv_KNHkof0uBv2RLtr3G1uA76MFHyCiCYCBXkw,3590
|
|
@@ -58,8 +60,8 @@ onnx2tf/ops/Floor.py,sha256=8izJrNmw8wNmjF_YabIpLs4jm82J-gKcyAQbwV7Yqpc,3589
|
|
|
58
60
|
onnx2tf/ops/FusedConv.py,sha256=gslI50V3yvt4l0mmodnyHFAu0cORx1J_ZL5cE0rZ8qs,4523
|
|
59
61
|
onnx2tf/ops/GRU.py,sha256=kBHiZlhlPIV2DQCoFYFHxCTwOATeguJy1MSfj2kxqDM,30732
|
|
60
62
|
onnx2tf/ops/Gather.py,sha256=ezsUTN8nWau4-kB696xjonlVWU6XQ6BjtyjSebt1EXg,15216
|
|
61
|
-
onnx2tf/ops/GatherElements.py,sha256=
|
|
62
|
-
onnx2tf/ops/GatherND.py,sha256=
|
|
63
|
+
onnx2tf/ops/GatherElements.py,sha256=qF8milhgXOOc_G3W80U2rK7Q2SsHlPNHrs4VX20ddDY,16002
|
|
64
|
+
onnx2tf/ops/GatherND.py,sha256=2PwSyXHwPP9_xADPasjxj-IXAvLNqKuGm3P5K3GOiwE,9239
|
|
63
65
|
onnx2tf/ops/Gelu.py,sha256=ms9oHnESOuiIPxl_8YU2WEnQo_BVKRPKo5UJsvsWyEA,4321
|
|
64
66
|
onnx2tf/ops/Gemm.py,sha256=8vGtXwx_V59JIDh3EBPuFVQSbIVql45zEHUlVGV3coU,7587
|
|
65
67
|
onnx2tf/ops/GlobalAveragePool.py,sha256=GrDDOywtO6peW79mBPmBJX9MrEU2PXso94xazAzx_xk,5704
|
|
@@ -158,8 +160,8 @@ onnx2tf/ops/STFT.py,sha256=LDKN309_dBu4v9AYpz70uMJbNjRFiOte9O3wUL4bIJw,4463
|
|
|
158
160
|
onnx2tf/ops/ScaleAndTranslate.py,sha256=VQDDhSs9TyMLQy0mF7n8pZ2TuvoKY-Lhlzd7Inf4UdI,11989
|
|
159
161
|
onnx2tf/ops/Scan.py,sha256=hfN-DX6Gp-dG5158WMoHRrDWZAra3VSbsjsiphNqRIQ,16293
|
|
160
162
|
onnx2tf/ops/Scatter.py,sha256=5_rTM60FPCq8unyNPDO-BZXcuz6w9Uyl2Xqx-zJTpgg,746
|
|
161
|
-
onnx2tf/ops/ScatterElements.py,sha256=
|
|
162
|
-
onnx2tf/ops/ScatterND.py,sha256
|
|
163
|
+
onnx2tf/ops/ScatterElements.py,sha256=mp-TmswDTA9Nv0B3G3b-khOCPCKHnhCI97jDRofoEM0,8561
|
|
164
|
+
onnx2tf/ops/ScatterND.py,sha256=-mVbxXjQor2T6HVHSJy5e0FHQmEfaHknaKPuSc3Oz4o,11005
|
|
163
165
|
onnx2tf/ops/Selu.py,sha256=CD0SqQlTTe0chO7lebkrdfDFSk6Cg9zLhvrKomsSH4Y,3799
|
|
164
166
|
onnx2tf/ops/SequenceAt.py,sha256=jpjl9gVJFagtg223YY26I0pUUEgEFjJGvSZWwbo2-mQ,3278
|
|
165
167
|
onnx2tf/ops/SequenceConstruct.py,sha256=KKbnpnitdAky23WF_DS49ot7ZxVoqBEU2ChgYEcXshY,2639
|
|
@@ -191,7 +193,7 @@ onnx2tf/ops/Sub.py,sha256=JCUWNmRLrwJEB8_0MPRTzmZ4KAV_HLXNivUd_jNqPQI,11012
|
|
|
191
193
|
onnx2tf/ops/Sum.py,sha256=wtI0SbGuNFxkLskBk68ZhOAg3XyrIx-9xGYy1GZCVSo,3073
|
|
192
194
|
onnx2tf/ops/Tan.py,sha256=Ncig8clGvY7GWshqxRDRdcxjcbf_HTKGdpDw5ValrKI,3582
|
|
193
195
|
onnx2tf/ops/Tanh.py,sha256=PIQUvxS_AIDufblC2vc573nse2UCRA9z5yWd7kB-51s,3585
|
|
194
|
-
onnx2tf/ops/TensorScatter.py,sha256=
|
|
196
|
+
onnx2tf/ops/TensorScatter.py,sha256=9M1L8ys2FodscRZXdjme5NQYrCFX_nZH7wm8vx-PXcc,8176
|
|
195
197
|
onnx2tf/ops/ThresholdedRelu.py,sha256=ArF3uRH7jN8kdYYDNcivJgv9UTFl5aqqSH2Qu79j4sY,3769
|
|
196
198
|
onnx2tf/ops/Tile.py,sha256=xkprg6yTaykivcHFJ644opzVPctaeplu-Ed-OpS98Gg,12720
|
|
197
199
|
onnx2tf/ops/TopK.py,sha256=f6OG-DcMWneXwSjIkmY935SPyOMD5tMteHnlQHoJwQo,6348
|
|
@@ -204,12 +206,12 @@ onnx2tf/ops/Where.py,sha256=MaCcY9g4mKZQqCgh4xtoylicP-xVu9f4boKiu_q9Ow8,7711
|
|
|
204
206
|
onnx2tf/ops/Xor.py,sha256=2ceqxHSI1Wtez_CIh8gFfvcu45Xboqfyp1iy3v2vuIs,4590
|
|
205
207
|
onnx2tf/ops/__init__.py,sha256=jnmUWWa-3dHzBZV9bmPzXu6eoz2dumJTzO7i8JdcgSM,25
|
|
206
208
|
onnx2tf/utils/__init__.py,sha256=E9FM9He68VIASDnYp-OrxvHFVn55GzWqw2OEkCqn1zg,27
|
|
207
|
-
onnx2tf/utils/common_functions.py,sha256=
|
|
209
|
+
onnx2tf/utils/common_functions.py,sha256=NPsgAhZTgLZABrVK1x_mWs5_nXDC9tyWFAfvW9qqNLI,265023
|
|
208
210
|
onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
|
|
209
211
|
onnx2tf/utils/iterative_json_optimizer.py,sha256=qqeIxWGxrhcCYk8-ebWnblnOkzDCwi-nseipHzHR_bk,10436
|
|
210
212
|
onnx2tf/utils/json_auto_generator.py,sha256=OC-SfKtUg7zUxaXTAg6kT0ShzIc3ByjDa3FNp173DtA,60302
|
|
211
213
|
onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
|
|
212
|
-
onnx2tf-1.29.
|
|
213
|
-
onnx2tf-1.29.
|
|
214
|
-
onnx2tf-1.29.
|
|
215
|
-
onnx2tf-1.29.
|
|
214
|
+
onnx2tf-1.29.20.dist-info/WHEEL,sha256=fAguSjoiATBe7TNBkJwOjyL1Tt4wwiaQGtNtjRPNMQA,80
|
|
215
|
+
onnx2tf-1.29.20.dist-info/entry_points.txt,sha256=GuhvLu7ZlYECumbmoiFlKX0mFPtFi_Ti9L-E5yuQqKs,42
|
|
216
|
+
onnx2tf-1.29.20.dist-info/METADATA,sha256=4DMYV8XRObEn9G6cqsBYyWC-Vcj42dbDs8IAxPKeg0Q,155198
|
|
217
|
+
onnx2tf-1.29.20.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|