onnx2tf 1.29.9__py3-none-any.whl → 1.29.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
onnx2tf/ops/LpPool.py ADDED
@@ -0,0 +1,296 @@
1
+ import random
2
+ random.seed(0)
3
+ import numpy as np
4
+ np.random.seed(0)
5
+ import tensorflow as tf
6
+ import onnx_graphsurgeon as gs
7
+ from onnx2tf.utils.common_functions import (
8
+ get_constant_or_variable,
9
+ remove_dilations,
10
+ print_node_info,
11
+ inverted_operation_enable_disable,
12
+ make_tf_node_info,
13
+ get_replacement_parameter,
14
+ pre_process_transpose,
15
+ post_process_transpose,
16
+ calc_tf_pooling_pads,
17
+ calc_extra_padding_with_ceil,
18
+ transpose_with_flexing_deterrence,
19
+ )
20
+ from onnx2tf.utils.logging import *
21
+
22
+ INF_INDEX_VALUE: int = 4294967296
23
+
24
+
25
+ def _kernel_size_const(kernel_shape, dtype):
26
+ if isinstance(kernel_shape, (list, tuple, np.ndarray)):
27
+ size = 1
28
+ for k in kernel_shape:
29
+ size *= int(k)
30
+ return tf.cast(size, dtype)
31
+ if tf.is_tensor(kernel_shape):
32
+ return tf.cast(tf.reduce_prod(kernel_shape), dtype)
33
+ return tf.cast(int(kernel_shape), dtype)
34
+
35
+
36
+ @print_node_info
37
+ @inverted_operation_enable_disable
38
+ @get_replacement_parameter
39
+ def make_node(
40
+ *,
41
+ graph_node: gs.Node,
42
+ tf_layers_dict: dict,
43
+ **kwargs: dict,
44
+ ):
45
+ """LpPool
46
+
47
+ Parameters
48
+ ----------
49
+ graph_node: gs.Node
50
+ graph_surgeon Node
51
+
52
+ tf_layers_dict: dict
53
+ optype, shape, dtype, tensorflow graph
54
+ """
55
+ before_op_output_shape_trans_1 = \
56
+ tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
57
+ before_op_output_shape_trans = \
58
+ before_op_output_shape_trans_1
59
+
60
+ graph_node_input = get_constant_or_variable(
61
+ graph_node.inputs[0],
62
+ before_op_output_shape_trans,
63
+ )
64
+ graph_node_output: gs.Variable = graph_node.outputs[0]
65
+ shape = graph_node_output.shape
66
+ dtype = graph_node_output.dtype
67
+
68
+ input_tensor = tf_layers_dict[graph_node_input.name]['tf_node'] \
69
+ if isinstance(graph_node_input, gs.Variable) else graph_node_input
70
+ input_tensor_shape = input_tensor.shape
71
+ input_tensor_rank = len(input_tensor_shape)
72
+
73
+ # Pre-process transpose
74
+ input_tensor = pre_process_transpose(
75
+ value_before_transpose=input_tensor,
76
+ param_target='inputs',
77
+ param_name=graph_node.inputs[0].name,
78
+ **kwargs,
79
+ )
80
+
81
+ # Workaround to avoid as many conversion failures as possible
82
+ # for models with useless Transpose immediately before them.
83
+ # If the input geometry of the ONNX and the input geometry of the TF model match,
84
+ # the input geometry on the TF model side is forcibly transposed to the NWC or NHWC or NDHWC format.
85
+ # However, if all dimensions of CW or CHW or CDHW have the same value,
86
+ # the forced transposition process is skipped because it may destroy the structure of the model.
87
+ onnx_input_shape = [
88
+ dim if isinstance(dim, int) else None for dim in graph_node.inputs[0].shape
89
+ ] if graph_node.inputs[0].shape is not None else None
90
+ tf_input_shape = [
91
+ dim if isinstance(dim, int) else None for dim in input_tensor_shape
92
+ ]
93
+ if onnx_input_shape is not None \
94
+ and len(onnx_input_shape) > 1 and len(tf_input_shape) > 1 \
95
+ and onnx_input_shape == tf_input_shape:
96
+
97
+ shape_for_judging_skip = [
98
+ dim if dim is not None else INF_INDEX_VALUE for dim in onnx_input_shape[1:]
99
+ ]
100
+ if shape_for_judging_skip.count(shape_for_judging_skip[0]) != len(shape_for_judging_skip):
101
+ if len(onnx_input_shape) == 3:
102
+ # 1D
103
+ input_tensor = transpose_with_flexing_deterrence(
104
+ input_tensor=input_tensor,
105
+ perm=[0,2,1],
106
+ **kwargs,
107
+ )
108
+ elif len(onnx_input_shape) == 4:
109
+ # 2D
110
+ input_tensor = transpose_with_flexing_deterrence(
111
+ input_tensor=input_tensor,
112
+ perm=[0,2,3,1],
113
+ **kwargs,
114
+ )
115
+ elif len(onnx_input_shape) == 5:
116
+ # 3D
117
+ input_tensor = transpose_with_flexing_deterrence(
118
+ input_tensor=input_tensor,
119
+ perm=[0,2,3,4,1],
120
+ **kwargs,
121
+ )
122
+
123
+ auto_pad = graph_node.attrs.get('auto_pad', 'NOTSET')
124
+ ceil_mode = bool(graph_node.attrs.get('ceil_mode', 0))
125
+ kernel_shape = graph_node.attrs['kernel_shape']
126
+ spatial_size = len(kernel_shape)
127
+ dilations = graph_node.attrs.get('dilations', [1] * spatial_size)
128
+ pads = graph_node.attrs.get('pads', [0] * spatial_size * 2)
129
+ strides = graph_node.attrs.get('strides', [1] * spatial_size)
130
+ p = float(graph_node.attrs.get('p', 2))
131
+
132
+ input_tensor_shape = input_tensor.shape.as_list()
133
+ is_known_shape = None not in input_tensor_shape[1:]
134
+
135
+ # default tensorflow action is 'SAME_UPPER' mode (extra padding in the end for odd numbers)
136
+ # explicit pad layer is added for tensorflow incompatible cases
137
+ tf_pad_mode = 'VALID'
138
+ is_explicit_padding = False
139
+ dilated_kernel_shape = kernel_shape
140
+ if dilations != [1] * spatial_size:
141
+ dilated_kernel_shape = [(k - 1) * d for k, d in zip(kernel_shape, dilations)]
142
+
143
+ tf_pads = calc_tf_pooling_pads(
144
+ input_shape=input_tensor_shape,
145
+ kernel=dilated_kernel_shape,
146
+ strides=strides,
147
+ input_tensor=input_tensor,
148
+ )
149
+
150
+ # onnx padding value is ignored if auto_pad is not 'NOTSET'
151
+ if auto_pad == 'NOTSET':
152
+
153
+ # check if onnx padding is same with tensorflow padding mode 'SAME'
154
+ # this is to avoid flex operations since tflite has no builtin pooling with manual padding value
155
+ if is_known_shape and pads != [0] * spatial_size * 2 and tf_pads == pads:
156
+ auto_pad = 'SAME_UPPER'
157
+ tf_pad_mode = 'SAME'
158
+
159
+ else:
160
+ auto_pad = 'VALID'
161
+ is_explicit_padding = True
162
+
163
+ # extra padding may be needed for ceiling
164
+ # this padding is added to end side (right, bottom) only
165
+ if ceil_mode:
166
+ extra_pads = \
167
+ calc_extra_padding_with_ceil(
168
+ input_shape=input_tensor_shape[1:-1],
169
+ kernel=kernel_shape,
170
+ pads=pads,
171
+ dilations=dilations,
172
+ strides=strides,
173
+ )
174
+ pads = pads[:len(pads) // 2] + [p + e for p, e in zip(pads[len(pads) // 2:], extra_pads)]
175
+
176
+ tf_pads = pads
177
+
178
+ elif auto_pad == 'SAME_UPPER':
179
+ tf_pad_mode = 'SAME'
180
+
181
+ elif auto_pad == 'SAME_LOWER':
182
+ is_explicit_padding = True
183
+
184
+ elif auto_pad == 'VALID':
185
+ tf_pads = [0] * spatial_size * 2
186
+
187
+ else:
188
+ error_msg = Color.RED(f'ERROR:') + ' ' + \
189
+ f'Wrong auto_pad parameter in LpPool: {auto_pad}.'
190
+ raise ValueError(error_msg)
191
+
192
+ # add extra pad layer if needed
193
+ if is_explicit_padding and tf_pads != [0] * spatial_size * 2:
194
+ warn(
195
+ f'Tensorflow incompatible padding detected. ' \
196
+ f'Extra pad layer is inserted automatically. '
197
+ )
198
+
199
+ if auto_pad == 'SAME_LOWER':
200
+ # switch the order of pads
201
+ tf_pads = [i for tup in zip(tf_pads[len(tf_pads) // 2:], tf_pads[:len(tf_pads) // 2]) for i in tup]
202
+
203
+ # convert to tensorflow padding format
204
+ tf_pads = \
205
+ [[0, 0]] + \
206
+ [list(i) for i in zip(tf_pads[:len(tf_pads) // 2], tf_pads[len(tf_pads) // 2:])] + \
207
+ [[0, 0]]
208
+
209
+ padded_tensor = tf.pad(
210
+ tensor=input_tensor,
211
+ paddings=tf_pads,
212
+ mode='CONSTANT',
213
+ constant_values=0.0,
214
+ )
215
+
216
+ else:
217
+ padded_tensor = input_tensor
218
+
219
+ # Preserving Graph Structure (Dict)
220
+ tf_layers_dict[graph_node_output.name] = {
221
+ 'optype': graph_node.op,
222
+ 'shape': shape,
223
+ 'dtype': dtype,
224
+ 'nhwc': True,
225
+ }
226
+
227
+ # Generation of TF OP
228
+ tf_op_type = None
229
+ abs_p_tensor = tf.pow(tf.abs(padded_tensor), p) if p != 1.0 else tf.abs(padded_tensor)
230
+ kernel_size_const = _kernel_size_const(kernel_shape, abs_p_tensor.dtype)
231
+
232
+ if spatial_size < 4 and (strides == [1] * spatial_size or dilations == [1] * spatial_size):
233
+ pooled_tensor = tf.nn.pool(
234
+ input=abs_p_tensor,
235
+ window_shape=kernel_shape,
236
+ dilations=dilations,
237
+ strides=strides,
238
+ padding=tf_pad_mode.upper(),
239
+ pooling_type='AVG',
240
+ )
241
+ tf_op_type = tf.nn.pool
242
+ else:
243
+ # TODO: Dilated LpPool with strides is broken for 3D and above, need to be fixed
244
+ if spatial_size >= 3:
245
+ error_msg = Color.RED(f'ERROR:') + ' ' \
246
+ f'Dilated LpPool with strides is not supported for 3D and above for now. '
247
+ print(error_msg)
248
+ raise NotImplementedError(error_msg)
249
+
250
+ abs_p_tensor = remove_dilations(
251
+ input_tensor=abs_p_tensor,
252
+ kernel_shape=kernel_shape,
253
+ spatial_size=spatial_size,
254
+ strides=strides,
255
+ dilations=dilations,
256
+ )
257
+ pooled_tensor = tf.nn.pool(
258
+ input=abs_p_tensor,
259
+ window_shape=kernel_shape,
260
+ strides=kernel_shape,
261
+ padding='VALID',
262
+ pooling_type='AVG',
263
+ )
264
+ tf_op_type = tf.nn.pool
265
+
266
+ pooled_tensor = pooled_tensor * kernel_size_const
267
+ tf_layers_dict[graph_node_output.name]['tf_node'] = \
268
+ tf.pow(pooled_tensor, 1.0 / p) if p != 1.0 else pooled_tensor
269
+
270
+ # Post-process transpose
271
+ tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
272
+ value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
273
+ param_target='outputs',
274
+ param_name=graph_node.outputs[0].name,
275
+ **kwargs,
276
+ )
277
+
278
+ # Generation of Debug Info
279
+ tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
280
+ make_tf_node_info(
281
+ node_info={
282
+ 'tf_op_type': tf_op_type,
283
+ 'tf_inputs': {
284
+ 'input': input_tensor,
285
+ 'kernel_shape': kernel_shape,
286
+ 'strides': strides,
287
+ 'dilations': dilations,
288
+ 'padding': tf_pads if tf_pad_mode != 'same' else tf_pad_mode,
289
+ 'ceil_mode': ceil_mode,
290
+ 'p': p,
291
+ },
292
+ 'tf_outputs': {
293
+ 'output': tf_layers_dict[graph_node_output.name]['tf_node'],
294
+ },
295
+ }
296
+ )
@@ -0,0 +1,236 @@
1
+ import random
2
+ random.seed(0)
3
+ import numpy as np
4
+ np.random.seed(0)
5
+ import tensorflow as tf
6
+ import onnx_graphsurgeon as gs
7
+ from onnx2tf.utils.common_functions import (
8
+ get_constant_or_variable,
9
+ print_node_info,
10
+ inverted_operation_enable_disable,
11
+ make_tf_node_info,
12
+ get_replacement_parameter,
13
+ pre_process_transpose,
14
+ post_process_transpose,
15
+ transpose_with_flexing_deterrence,
16
+ )
17
+ from onnx2tf.utils.logging import *
18
+
19
+ INF_INDEX_VALUE: int = 4294967296
20
+
21
+
22
+ @print_node_info
23
+ @inverted_operation_enable_disable
24
+ @get_replacement_parameter
25
+ def make_node(
26
+ *,
27
+ graph_node: gs.Node,
28
+ tf_layers_dict: dict,
29
+ **kwargs: dict,
30
+ ):
31
+ """MaxRoiPool
32
+
33
+ Parameters
34
+ ----------
35
+ graph_node: gs.Node
36
+ graph_surgeon Node
37
+
38
+ tf_layers_dict: dict
39
+ optype, shape, dtype, tensorflow graph
40
+ """
41
+ before_op_output_shape_trans_1 = \
42
+ tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
43
+ before_op_output_shape_trans_2 = \
44
+ tf_layers_dict.get(graph_node.inputs[1].name, {}).get('before_op_output_shape_trans', True)
45
+ before_op_output_shape_trans = \
46
+ before_op_output_shape_trans_1 and before_op_output_shape_trans_2
47
+
48
+ graph_node_input_1 = get_constant_or_variable(
49
+ graph_node.inputs[0],
50
+ before_op_output_shape_trans,
51
+ )
52
+ graph_node_input_2 = get_constant_or_variable(
53
+ graph_node.inputs[1],
54
+ before_op_output_shape_trans,
55
+ )
56
+ graph_node_output: gs.Variable = graph_node.outputs[0]
57
+ shape = graph_node_output.shape
58
+ dtype = graph_node_output.dtype
59
+
60
+ input_tensor = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
61
+ if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
62
+ rois = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
63
+ if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
64
+
65
+ # Pre-process transpose
66
+ input_tensor = pre_process_transpose(
67
+ value_before_transpose=input_tensor,
68
+ param_target='inputs',
69
+ param_name=graph_node.inputs[0].name,
70
+ **kwargs,
71
+ )
72
+
73
+ # Workaround to avoid as many conversion failures as possible
74
+ # for models with useless Transpose immediately before them.
75
+ # If the input geometry of the ONNX and the input geometry of the TF model match,
76
+ # the input geometry on the TF model side is forcibly transposed to the NHWC format.
77
+ # However, if all dimensions of CHW have the same value,
78
+ # the forced transposition process is skipped because it may destroy the structure of the model.
79
+ onnx_input_shape = [
80
+ dim if isinstance(dim, int) else None for dim in graph_node.inputs[0].shape
81
+ ] if graph_node.inputs[0].shape is not None else None
82
+ tf_input_shape = [
83
+ dim if isinstance(dim, int) else None for dim in input_tensor.shape
84
+ ]
85
+ if onnx_input_shape is not None \
86
+ and len(onnx_input_shape) > 1 and len(tf_input_shape) > 1 \
87
+ and onnx_input_shape == tf_input_shape:
88
+
89
+ shape_for_judging_skip = [
90
+ dim if dim is not None else INF_INDEX_VALUE for dim in onnx_input_shape[1:]
91
+ ]
92
+ if shape_for_judging_skip.count(shape_for_judging_skip[0]) != len(shape_for_judging_skip):
93
+ if len(onnx_input_shape) == 4:
94
+ # 2D
95
+ input_tensor = transpose_with_flexing_deterrence(
96
+ input_tensor=input_tensor,
97
+ perm=[0,2,3,1],
98
+ **kwargs,
99
+ )
100
+
101
+ pooled_shape = graph_node.attrs.get('pooled_shape', None)
102
+ if pooled_shape is None or len(pooled_shape) != 2:
103
+ error_msg = \
104
+ Color.RED(f'ERROR:') + ' ' + \
105
+ f'pooled_shape is required for MaxRoiPool. ' \
106
+ f'graph_node.name: {graph_node.name}, pooled_shape: {pooled_shape}'
107
+ print(error_msg)
108
+ raise ValueError(error_msg)
109
+
110
+ pooled_h = int(pooled_shape[0])
111
+ pooled_w = int(pooled_shape[1])
112
+ spatial_scale = float(graph_node.attrs.get('spatial_scale', 1.0))
113
+
114
+ # Preserving Graph Structure (Dict)
115
+ tf_layers_dict[graph_node_output.name] = {
116
+ 'optype': graph_node.op,
117
+ 'shape': shape,
118
+ 'dtype': dtype,
119
+ 'nhwc': True,
120
+ }
121
+
122
+ # Generation of TF OP
123
+ rois = tf.cast(rois, tf.float32)
124
+ if rois.shape.rank == 1:
125
+ rois = tf.expand_dims(rois, axis=0)
126
+
127
+ channels_static = input_tensor.shape[-1]
128
+ channel_spec = tf.TensorSpec(
129
+ shape=(channels_static,) if channels_static is not None else (None,),
130
+ dtype=input_tensor.dtype,
131
+ )
132
+ row_spec = tf.TensorSpec(
133
+ shape=(pooled_w, channels_static) if channels_static is not None else (pooled_w, None),
134
+ dtype=input_tensor.dtype,
135
+ )
136
+ roi_spec = tf.TensorSpec(
137
+ shape=(pooled_h, pooled_w, channels_static) if channels_static is not None else (pooled_h, pooled_w, None),
138
+ dtype=input_tensor.dtype,
139
+ )
140
+
141
+ def roi_pool_single(roi):
142
+ batch_idx = tf.cast(roi[0], tf.int32)
143
+ x1, y1, x2, y2 = tf.unstack(roi[1:5])
144
+ x1 = x1 * spatial_scale
145
+ y1 = y1 * spatial_scale
146
+ x2 = x2 * spatial_scale
147
+ y2 = y2 * spatial_scale
148
+
149
+ roi_start_w = tf.cast(tf.round(x1), tf.int32)
150
+ roi_start_h = tf.cast(tf.round(y1), tf.int32)
151
+ roi_end_w = tf.cast(tf.round(x2), tf.int32)
152
+ roi_end_h = tf.cast(tf.round(y2), tf.int32)
153
+
154
+ height = tf.shape(input_tensor)[1]
155
+ width = tf.shape(input_tensor)[2]
156
+
157
+ roi_start_w = tf.clip_by_value(roi_start_w, 0, width)
158
+ roi_start_h = tf.clip_by_value(roi_start_h, 0, height)
159
+ roi_end_w = tf.clip_by_value(roi_end_w, 0, width)
160
+ roi_end_h = tf.clip_by_value(roi_end_h, 0, height)
161
+
162
+ roi_width = tf.maximum(roi_end_w - roi_start_w + 1, 1)
163
+ roi_height = tf.maximum(roi_end_h - roi_start_h + 1, 1)
164
+
165
+ bin_size_h = tf.cast(roi_height, tf.float32) / tf.cast(pooled_h, tf.float32)
166
+ bin_size_w = tf.cast(roi_width, tf.float32) / tf.cast(pooled_w, tf.float32)
167
+
168
+ channels_dynamic = tf.shape(input_tensor)[-1]
169
+ zero = tf.zeros([channels_dynamic], dtype=input_tensor.dtype)
170
+
171
+ def pool_bin(ph, pw):
172
+ ph_f = tf.cast(ph, tf.float32)
173
+ pw_f = tf.cast(pw, tf.float32)
174
+ hstart = tf.cast(tf.floor(ph_f * bin_size_h), tf.int32) + roi_start_h
175
+ hend = tf.cast(tf.ceil((ph_f + 1.0) * bin_size_h), tf.int32) + roi_start_h
176
+ wstart = tf.cast(tf.floor(pw_f * bin_size_w), tf.int32) + roi_start_w
177
+ wend = tf.cast(tf.ceil((pw_f + 1.0) * bin_size_w), tf.int32) + roi_start_w
178
+
179
+ hstart = tf.clip_by_value(hstart, 0, height)
180
+ hend = tf.clip_by_value(hend, 0, height)
181
+ wstart = tf.clip_by_value(wstart, 0, width)
182
+ wend = tf.clip_by_value(wend, 0, width)
183
+
184
+ is_empty = tf.logical_or(hend <= hstart, wend <= wstart)
185
+
186
+ def do_max():
187
+ region = input_tensor[batch_idx, hstart:hend, wstart:wend, :]
188
+ return tf.reduce_max(region, axis=[0,1])
189
+
190
+ return tf.cond(is_empty, lambda: zero, do_max)
191
+
192
+ def pool_row(ph):
193
+ return tf.map_fn(
194
+ lambda pw: pool_bin(ph, pw),
195
+ tf.range(pooled_w),
196
+ fn_output_signature=channel_spec,
197
+ )
198
+
199
+ return tf.map_fn(
200
+ pool_row,
201
+ tf.range(pooled_h),
202
+ fn_output_signature=row_spec,
203
+ )
204
+
205
+ pooled_tensor = tf.map_fn(
206
+ roi_pool_single,
207
+ rois,
208
+ fn_output_signature=roi_spec,
209
+ )
210
+
211
+ tf_layers_dict[graph_node_output.name]['tf_node'] = pooled_tensor
212
+
213
+ # Post-process transpose
214
+ tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
215
+ value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
216
+ param_target='outputs',
217
+ param_name=graph_node.outputs[0].name,
218
+ **kwargs,
219
+ )
220
+
221
+ # Generation of Debug Info
222
+ tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
223
+ make_tf_node_info(
224
+ node_info={
225
+ 'tf_op_type': 'MaxRoiPool',
226
+ 'tf_inputs': {
227
+ 'input': input_tensor,
228
+ 'rois': rois,
229
+ 'pooled_shape': pooled_shape,
230
+ 'spatial_scale': spatial_scale,
231
+ },
232
+ 'tf_outputs': {
233
+ 'output': tf_layers_dict[graph_node_output.name]['tf_node'],
234
+ },
235
+ }
236
+ )
@@ -276,6 +276,8 @@ def make_tf_node_info(**kwargs):
276
276
  def print_node_info(func):
277
277
  @wraps(func)
278
278
  def print_wrapper_func(*args, **kwargs):
279
+ if kwargs.get('suppress_log', False):
280
+ return func(*args, **kwargs)
279
281
  input_onnx_file_path: str = kwargs.get('input_onnx_file_path', None)
280
282
  graph_input: gs.Variable = kwargs.get('graph_input', None)
281
283
  graph_node: gs.Variable = kwargs.get('graph_node', None)
@@ -4051,6 +4053,7 @@ def dummy_tf_inference(
4051
4053
  for idx, dim in enumerate(input_size):
4052
4054
  if idx == 0 and input_sizes[0][0] is not None \
4053
4055
  and len(input_sizes[0]) == len(input_size) \
4056
+ and len(input_size) >= 2 \
4054
4057
  and dim is None:
4055
4058
  # Batch size assignment for input OPs
4056
4059
  new_input_size.append(input_sizes[0][0])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.29.9
3
+ Version: 1.29.10
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -182,16 +182,16 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
182
182
  |Less|:heavy_check_mark:|
183
183
  |Log|:heavy_check_mark:|
184
184
  |LogSoftmax|:heavy_check_mark:|
185
- |Loop|**Help wanted**|
185
+ |Loop|:heavy_check_mark:|
186
186
  |LpNormalization|:heavy_check_mark:|
187
- |LpPool|**Help wanted**|
187
+ |LpPool|:heavy_check_mark:|
188
188
  |LRN|:heavy_check_mark:|
189
189
  |LSTM|:heavy_check_mark:|
190
190
  |MatMul|:heavy_check_mark:|
191
191
  |MatMulInteger|:heavy_check_mark:|
192
192
  |MaxPool|:heavy_check_mark:|
193
193
  |Max|:heavy_check_mark:|
194
- |MaxRoiPool|**Help wanted**|
194
+ |MaxRoiPool|:heavy_check_mark:|
195
195
  |MaxUnpool|:heavy_check_mark:|
196
196
  |Mean|:heavy_check_mark:|
197
197
  |MeanVarianceNormalization|:heavy_check_mark:|
@@ -359,7 +359,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
359
359
  docker run --rm -it \
360
360
  -v `pwd`:/workdir \
361
361
  -w /workdir \
362
- ghcr.io/pinto0309/onnx2tf:1.29.9
362
+ ghcr.io/pinto0309/onnx2tf:1.29.10
363
363
 
364
364
  or
365
365
 
@@ -367,7 +367,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
367
367
  docker run --rm -it \
368
368
  -v `pwd`:/workdir \
369
369
  -w /workdir \
370
- docker.io/pinto0309/onnx2tf:1.29.9
370
+ docker.io/pinto0309/onnx2tf:1.29.10
371
371
 
372
372
  or
373
373
 
@@ -1,6 +1,6 @@
1
- onnx2tf/__init__.py,sha256=0iOZ-E1Mv5pU4CHm_29h1gNBG_KtKmxjV5sHtV-0DKI,66
1
+ onnx2tf/__init__.py,sha256=E1LYdyUQ9pnnydTNI6NtTfnt7AZXaoxY42NlhsS5Jr0,67
2
2
  onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
3
- onnx2tf/onnx2tf.py,sha256=wdBA-lgCEu-ZfUAKIUQgLe8hSP8ifE7rS6nWAq6iF6o,151519
3
+ onnx2tf/onnx2tf.py,sha256=yR8aKaEn01Q8dEeYDqHIsuZuG6l5TGQniHDlPiUROx4,152238
4
4
  onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
5
5
  onnx2tf/ops/Acos.py,sha256=Fo8YkFKuWq8Fi2xUrBdKcAH1yJ8r5pjSD0wgLttTNdk,4003
6
6
  onnx2tf/ops/Acosh.py,sha256=ATQj2cT5JS_mTfXi0kXqJ1yzSZu5J0zHA5VjV3j7uKY,3588
@@ -75,7 +75,7 @@ onnx2tf/ops/HardSigmoid.py,sha256=KDP_t-Z70sDsHMOYxyJ7ZNH31zqkrViOKYCcRG5NJHc,36
75
75
  onnx2tf/ops/HardSwish.py,sha256=nEng3LCDQYMZ4XhFZ7pXKGyRsM2_waowi8PlZt_f6Ck,3994
76
76
  onnx2tf/ops/Hardmax.py,sha256=tiMch3Tuc8Rvy52hgGSfqfOVyXaEsnxYplRMy7vtpyA,4398
77
77
  onnx2tf/ops/Identity.py,sha256=egudADqdhe4BiunYHUTh-AlDAkPpRESRT2eG0Q4rBts,2425
78
- onnx2tf/ops/If.py,sha256=d1CD5R3-UuhO35XwLsVCtWhUvJJg6LKh5OKTmYrGBh8,6966
78
+ onnx2tf/ops/If.py,sha256=Z3VEMm1mOKomYl1Mw58shc83kNPZsYs-wvhse7PlfTY,7062
79
79
  onnx2tf/ops/Input.py,sha256=aRZQ4uLWmMS3q317wZO68qqks8p3QDOINhTEObAhvvY,16225
80
80
  onnx2tf/ops/InstanceNormalization.py,sha256=gUixsJ1105tt8UGwoLLdZ4V95GiZwzHm_jJMugqQ1yQ,11997
81
81
  onnx2tf/ops/Inverse.py,sha256=YsRs0mpZg6dXWXnM1-UU5PcaUvrUqLmDDCNFpirXqp4,4595
@@ -89,11 +89,14 @@ onnx2tf/ops/Less.py,sha256=YZp5u3cUMU9Gcv_JVqPSIeuaIzVlU0hKy0PnvE6BXFo,4576
89
89
  onnx2tf/ops/LessOrEqual.py,sha256=9Lc8qaYUPVC6yZoQluNqcdHnvpUbfWBOI4Ow38RRAJo,4595
90
90
  onnx2tf/ops/Log.py,sha256=UZebF3SGq85BnoPgYyN2j-zzFRp67fJnYPNyu33W55o,3582
91
91
  onnx2tf/ops/LogSoftmax.py,sha256=j2nhYY7__8ViLFJVLA5tS98QEvGS1gTIW0QCdnZWUPQ,3923
92
+ onnx2tf/ops/Loop.py,sha256=I32CWoex8FMXm9KE2aomADB4jK5BzaMoAKvtPnBJy6A,14593
92
93
  onnx2tf/ops/LpNormalization.py,sha256=Uu15HgxFNXb6gNMgdTJyf0SLPaLbcbkOYqY_4hMBxNA,3153
94
+ onnx2tf/ops/LpPool.py,sha256=96eI1FaDgW0M_USWBCHFedvtojHTLL28_lb3mcEV55A,10470
93
95
  onnx2tf/ops/MatMul.py,sha256=KHhRyQCyxe6845f-AOI1UJzA3rGTssG6eyKmDw0oegs,21466
94
96
  onnx2tf/ops/MatMulInteger.py,sha256=qHqzdJNI9SeJDbW8pR90baYCdGN6FdOez4hi9EzwXoc,6538
95
97
  onnx2tf/ops/Max.py,sha256=w5nMciO_6ApYUobHuwMGuS3xhuza7eSvKDRhvMPgAuo,3256
96
98
  onnx2tf/ops/MaxPool.py,sha256=_JC4eqBTh-qLkZCMG8RZhthRZ8D2d821zaFMWeGMEWc,15775
99
+ onnx2tf/ops/MaxRoiPool.py,sha256=RYZyjnINqJd6k7KLFJ-D9iHjA2vR-m7WvhrumD9cmDk,8490
97
100
  onnx2tf/ops/MaxUnpool.py,sha256=dGIEvC45rFuWoeG1S9j4sjEdEUqiWs_xdY5DZH6X7b4,5743
98
101
  onnx2tf/ops/Mean.py,sha256=xfTjKpQntJB8uXAkgDLS77oLXy2yQh1Hlz0K2GltMh0,3132
99
102
  onnx2tf/ops/MeanVarianceNormalization.py,sha256=Ne53jlDgAJZ9yhzKOWR-0LnjDdM-fg7DYmRytoP-4IA,3743
@@ -189,18 +192,16 @@ onnx2tf/ops/Unsqueeze.py,sha256=UJun_DYfg7aQaHoeAvWlB85oRtDWq2lP7kvb0njcaC0,1221
189
192
  onnx2tf/ops/Upsample.py,sha256=SX3N_wZHD8G5Z0PLcPgX1ZCzOdct-uTzxKeMhhzeBOw,5304
190
193
  onnx2tf/ops/Where.py,sha256=MaCcY9g4mKZQqCgh4xtoylicP-xVu9f4boKiu_q9Ow8,7711
191
194
  onnx2tf/ops/Xor.py,sha256=2ceqxHSI1Wtez_CIh8gFfvcu45Xboqfyp1iy3v2vuIs,4590
192
- onnx2tf/ops/_Loop.py,sha256=eo5sNfrfOnKV6_I737AWsM5LJTY9DVOxQEvhanxtP4g,11322
193
- onnx2tf/ops/__Loop.py,sha256=ClwMcbNS4hqUtW_pzwjMa9Cqg7ONvz9aplke55A0uJ0,19704
194
195
  onnx2tf/ops/__init__.py,sha256=jnmUWWa-3dHzBZV9bmPzXu6eoz2dumJTzO7i8JdcgSM,25
195
196
  onnx2tf/utils/__init__.py,sha256=E9FM9He68VIASDnYp-OrxvHFVn55GzWqw2OEkCqn1zg,27
196
- onnx2tf/utils/common_functions.py,sha256=o9a4g56OdQKocODzBp2Uxesves_Tl-Iizh5r4Okmu6Q,249631
197
+ onnx2tf/utils/common_functions.py,sha256=TWb_e6i2MjB7C4eh1FWHTIDVlr6-7NgSNcCKwKGhGg8,249765
197
198
  onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
198
199
  onnx2tf/utils/iterative_json_optimizer.py,sha256=qqeIxWGxrhcCYk8-ebWnblnOkzDCwi-nseipHzHR_bk,10436
199
200
  onnx2tf/utils/json_auto_generator.py,sha256=OC-SfKtUg7zUxaXTAg6kT0ShzIc3ByjDa3FNp173DtA,60302
200
201
  onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
201
- onnx2tf-1.29.9.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
202
- onnx2tf-1.29.9.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
203
- onnx2tf-1.29.9.dist-info/METADATA,sha256=_Rhg96G54IqDYqigqKQjYlFIzp3SRB2wpRrt03eeZ1w,153504
204
- onnx2tf-1.29.9.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
205
- onnx2tf-1.29.9.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
206
- onnx2tf-1.29.9.dist-info/RECORD,,
202
+ onnx2tf-1.29.10.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
203
+ onnx2tf-1.29.10.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
204
+ onnx2tf-1.29.10.dist-info/METADATA,sha256=LMFqdTpJPlqtKrJ0jmOeQh5PKMyUy_XDbSPDcARueyE,153516
205
+ onnx2tf-1.29.10.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
206
+ onnx2tf-1.29.10.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
207
+ onnx2tf-1.29.10.dist-info/RECORD,,