onnx2tf 1.29.2__py3-none-any.whl → 1.29.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
onnx2tf/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.29.2'
3
+ __version__ = '1.29.4'
onnx2tf/onnx2tf.py CHANGED
@@ -1246,8 +1246,8 @@ def convert(
1246
1246
  # Attach it to the exception for later use
1247
1247
  ex.onnx_op_name = error_onnx_op_name
1248
1248
 
1249
- # If no replacement file was provided, try to generate one automatically
1250
- if not param_replacement_file and input_onnx_file_path:
1249
+ # If no replacement file was provided, optionally try to generate one automatically
1250
+ if not param_replacement_file and input_onnx_file_path and auto_generate_json_on_error:
1251
1251
  info('')
1252
1252
  info(Color.REVERSE(f'Attempting automatic JSON generation due to conversion error'), '=' * 30)
1253
1253
  if error_onnx_op_name:
@@ -1313,6 +1313,11 @@ def convert(
1313
1313
  warn(
1314
1314
  f'Conversion failed and automatic JSON generation could not find a solution after {attempt} attempts.'
1315
1315
  )
1316
+ elif not param_replacement_file and input_onnx_file_path and not auto_generate_json_on_error:
1317
+ warn(
1318
+ 'Conversion failed. Automatic JSON generation on error is disabled by default.\n' +
1319
+ 'Re-run with --auto_generate_json_on_error or provide a parameter replacement JSON file.'
1320
+ )
1316
1321
  # Re-raise the original error
1317
1322
  raise ex
1318
1323
 
@@ -168,6 +168,46 @@ def make_node(
168
168
  )
169
169
 
170
170
  if not is_known_shape:
171
+ def build_tf_pads_from_begin_end(pads_begin, pads_end):
172
+ pads_begin = tf.cast(pads_begin, tf.int32)
173
+ pads_end = tf.cast(pads_end, tf.int32)
174
+ spatial_pads = tf.stack([pads_begin, pads_end], axis=1)
175
+ return tf.concat(
176
+ [
177
+ tf.zeros((1, 2), dtype=tf.int32),
178
+ spatial_pads,
179
+ tf.zeros((1, 2), dtype=tf.int32),
180
+ ],
181
+ axis=0,
182
+ )
183
+
184
+ def calc_extra_padding_with_ceil_dynamic(input_tensor, pads, kernel_shape, dilations, strides):
185
+ input_shape = tf.shape(input_tensor)
186
+ input_spatial = input_shape[1:-1]
187
+ pads_begin = tf.constant(pads[:len(pads) // 2], dtype=tf.int32)
188
+ pads_end = tf.constant(pads[len(pads) // 2:], dtype=tf.int32)
189
+ pads_along_axis = pads_begin + pads_end
190
+ k = tf.constant(kernel_shape, dtype=tf.int32)
191
+ d = tf.constant(dilations, dtype=tf.int32)
192
+ s = tf.constant(strides, dtype=tf.int32)
193
+
194
+ numerator = input_spatial + pads_along_axis - d * (k - 1) - 1
195
+ output_spatial = tf.cast(
196
+ tf.math.ceil(
197
+ tf.cast(numerator, tf.float32) / tf.cast(s, tf.float32) + 1.0
198
+ ),
199
+ tf.int32,
200
+ )
201
+ last_stride_starts = (output_spatial - 1) * s
202
+ last_stride_validity = last_stride_starts < (input_spatial + pads_begin)
203
+
204
+ extra_pads = tf.where(
205
+ last_stride_validity,
206
+ last_stride_starts + (k - 1) * d + 1 - (input_spatial + pads_along_axis),
207
+ tf.zeros_like(input_spatial),
208
+ )
209
+ return extra_pads
210
+
171
211
  def compute_output_spatial_shape_from_tensor(input_tensor, pads, kernel_shape, dilations, strides, ceil_mode=False):
172
212
  input_shape = tf.shape(input_tensor) # Get dynamic shape
173
213
  input_spatial = input_shape[1:-1] # Extract spatial dimensions only (NHWC format)
@@ -223,18 +263,32 @@ def make_node(
223
263
 
224
264
  # extra padding to end side (right, bottom) may be needed when ceil_mode is True
225
265
  # this extra padding should not be counted as padding when count_include_pad is True
226
- if ceil_mode:
227
- extra_pads = \
228
- calc_extra_padding_with_ceil(
229
- input_shape=input_tensor_shape[1:-1],
230
- kernel=kernel_shape,
266
+ if not is_known_shape:
267
+ pads_begin = tf.constant(pads[:len(pads) // 2], dtype=tf.int32)
268
+ pads_end = tf.constant(pads[len(pads) // 2:], dtype=tf.int32)
269
+ if ceil_mode:
270
+ extra_pads = calc_extra_padding_with_ceil_dynamic(
271
+ input_tensor=input_tensor,
231
272
  pads=pads,
273
+ kernel_shape=kernel_shape,
232
274
  dilations=dilations,
233
275
  strides=strides,
234
276
  )
235
- pads = pads[:len(pads) // 2] + [p + e for p, e in zip(pads[len(pads) // 2:], extra_pads)]
236
-
237
- tf_pads = pads
277
+ pads_end = pads_end + extra_pads
278
+ tf_pads = build_tf_pads_from_begin_end(pads_begin, pads_end)
279
+ else:
280
+ if ceil_mode:
281
+ extra_pads = \
282
+ calc_extra_padding_with_ceil(
283
+ input_shape=input_tensor_shape[1:-1],
284
+ kernel=kernel_shape,
285
+ pads=pads,
286
+ dilations=dilations,
287
+ strides=strides,
288
+ )
289
+ pads = pads[:len(pads) // 2] + [p + e for p, e in zip(pads[len(pads) // 2:], extra_pads)]
290
+
291
+ tf_pads = pads
238
292
 
239
293
  elif auto_pad == 'SAME_UPPER':
240
294
  tf_pad_mode = 'SAME'
@@ -305,36 +359,42 @@ def make_node(
305
359
  # 1. when extra padding layer is added and count_include_pad is False
306
360
  # 2. when extra padding layer is not added and count_include_pad is True
307
361
  # 3. when last stride has extra padding due to ceil_mode and count_include_pad is True
308
- if is_explicit_padding and tf_pads != [0] * spatial_size * 2:
362
+ if is_explicit_padding:
309
363
  warn(
310
364
  f'Tensorflow incompatible padding detected. ' \
311
365
  f'Extra pad layer is inserted automatically. '
312
366
  )
313
-
314
- if auto_pad == 'SAME_LOWER':
315
- # switch the order of pads
316
- tf_pads = [i for tup in zip(tf_pads[len(tf_pads) // 2:], tf_pads[:len(tf_pads) // 2]) for i in tup]
317
-
318
- if not count_include_pad and need_multiplier:
319
- average_multiplier = []
320
- for k, non_zero_count in zip(kernel_shape, non_zero_counts):
321
- multiplier = [k / n if n != 0 else 1 for n in non_zero_count]
322
- average_multiplier.append(multiplier)
323
-
324
- # convert to tensorflow padding format
325
- tf_pads = \
326
- [[0, 0]] + \
327
- [list(i) for i in zip(tf_pads[:len(tf_pads) // 2], tf_pads[len(tf_pads) // 2:])] + \
328
- [[0, 0]]
329
-
330
- if spatial_size == 1 and kernel_shape[0] > input_tensor_shape[1]:
331
- padded_tensor = input_tensor
332
- else:
367
+ if not is_known_shape:
333
368
  padded_tensor = tf.pad(
334
369
  tensor=input_tensor,
335
370
  paddings=tf_pads,
336
371
  mode='CONSTANT',
337
372
  )
373
+ else:
374
+ if auto_pad == 'SAME_LOWER':
375
+ # switch the order of pads
376
+ tf_pads = [i for tup in zip(tf_pads[len(tf_pads) // 2:], tf_pads[:len(tf_pads) // 2]) for i in tup]
377
+
378
+ if not count_include_pad and need_multiplier:
379
+ average_multiplier = []
380
+ for k, non_zero_count in zip(kernel_shape, non_zero_counts):
381
+ multiplier = [k / n if n != 0 else 1 for n in non_zero_count]
382
+ average_multiplier.append(multiplier)
383
+
384
+ # convert to tensorflow padding format
385
+ tf_pads = \
386
+ [[0, 0]] + \
387
+ [list(i) for i in zip(tf_pads[:len(tf_pads) // 2], tf_pads[len(tf_pads) // 2:])] + \
388
+ [[0, 0]]
389
+
390
+ if spatial_size == 1 and kernel_shape[0] > input_tensor_shape[1]:
391
+ padded_tensor = input_tensor
392
+ else:
393
+ padded_tensor = tf.pad(
394
+ tensor=input_tensor,
395
+ paddings=tf_pads,
396
+ mode='CONSTANT',
397
+ )
338
398
 
339
399
  else:
340
400
  padded_tensor = input_tensor
@@ -345,7 +405,7 @@ def make_node(
345
405
  multiplier = [n / k for n in non_zero_count]
346
406
  average_multiplier.append(multiplier)
347
407
 
348
- if count_include_pad and extra_pads != [0] * spatial_size:
408
+ if count_include_pad and is_known_shape and extra_pads != [0] * spatial_size:
349
409
  # extra padding in last stride should not be included in averaging
350
410
  if average_multiplier is None:
351
411
  average_multiplier = []
@@ -370,7 +430,7 @@ def make_node(
370
430
  # Generation of TF OP
371
431
  tf_op_type = None
372
432
  if len(kernel_shape) == 1:
373
- if kernel_shape[0] > padded_tensor.shape[1]:
433
+ if padded_tensor.shape[1] is not None and kernel_shape[0] > padded_tensor.shape[1]:
374
434
  pooled_tensor = AveragePooling1D(
375
435
  pool_size=[padded_tensor.shape[1]],
376
436
  strides=[padded_tensor.shape[1]],
@@ -0,0 +1,115 @@
1
+ import random
2
+ random.seed(0)
3
+ import numpy as np
4
+ np.random.seed(0)
5
+ import tensorflow as tf
6
+ import onnx_graphsurgeon as gs
7
+ from onnx2tf.utils.common_functions import (
8
+ get_constant_or_variable,
9
+ print_node_info,
10
+ inverted_operation_enable_disable,
11
+ make_tf_node_info,
12
+ get_replacement_parameter,
13
+ pre_process_transpose,
14
+ post_process_transpose,
15
+ )
16
+
17
+
18
+ @print_node_info
19
+ @inverted_operation_enable_disable
20
+ @get_replacement_parameter
21
+ def make_node(
22
+ *,
23
+ graph_node: gs.Node,
24
+ tf_layers_dict: dict,
25
+ **kwargs: dict,
26
+ ):
27
+ """BitwiseAnd
28
+
29
+ Parameters
30
+ ----------
31
+ graph_node: gs.Node
32
+ graph_surgeon Node
33
+
34
+ tf_layers_dict: dict
35
+ optype, shape, dtype, tensorflow graph
36
+ """
37
+ before_op_output_shape_trans_1 = \
38
+ tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
39
+ before_op_output_shape_trans_2 = \
40
+ tf_layers_dict.get(graph_node.inputs[1].name, {}).get('before_op_output_shape_trans', True)
41
+ before_op_output_shape_trans = \
42
+ before_op_output_shape_trans_1 \
43
+ and before_op_output_shape_trans_2
44
+
45
+ graph_node_input_1 = get_constant_or_variable(
46
+ graph_node.inputs[0],
47
+ before_op_output_shape_trans,
48
+ )
49
+ graph_node_input_2 = get_constant_or_variable(
50
+ graph_node.inputs[1],
51
+ before_op_output_shape_trans,
52
+ )
53
+ graph_node_output: gs.Variable = graph_node.outputs[0]
54
+
55
+ shape = graph_node_output.shape
56
+ dtype = graph_node_output.dtype
57
+
58
+ # Preserving Graph Structure (Dict)
59
+ tf_layers_dict[graph_node_output.name] = {
60
+ 'optype': graph_node.op,
61
+ 'shape': shape,
62
+ 'dtype': dtype,
63
+ }
64
+
65
+ # Generation of TF OP
66
+ input_tensor_1 = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
67
+ if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
68
+ input_tensor_2 = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
69
+ if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
70
+
71
+ # Pre-process transpose
72
+ input_tensor_1 = pre_process_transpose(
73
+ value_before_transpose=input_tensor_1,
74
+ param_target='inputs',
75
+ param_name=graph_node.inputs[0].name,
76
+ **kwargs,
77
+ )
78
+ input_tensor_2 = pre_process_transpose(
79
+ value_before_transpose=input_tensor_2,
80
+ param_target='inputs',
81
+ param_name=graph_node.inputs[1].name,
82
+ **kwargs,
83
+ )
84
+
85
+ tf_op_type = None
86
+ tf_layers_dict[graph_node_output.name]['tf_node'] = \
87
+ tf.bitwise.bitwise_and(
88
+ x=input_tensor_1,
89
+ y=input_tensor_2,
90
+ name=graph_node.name,
91
+ )
92
+ tf_op_type = tf.bitwise.bitwise_and
93
+
94
+ # Post-process transpose
95
+ tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
96
+ value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
97
+ param_target='outputs',
98
+ param_name=graph_node.outputs[0].name,
99
+ **kwargs,
100
+ )
101
+
102
+ # Generation of Debug Info
103
+ tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
104
+ make_tf_node_info(
105
+ node_info={
106
+ 'tf_op_type': tf_op_type,
107
+ 'tf_inputs': {
108
+ 'x': input_tensor_1,
109
+ 'y': input_tensor_2,
110
+ },
111
+ 'tf_outputs': {
112
+ 'output': tf_layers_dict[graph_node_output.name]['tf_node'],
113
+ },
114
+ }
115
+ )
@@ -0,0 +1,98 @@
1
+ import random
2
+ random.seed(0)
3
+ import numpy as np
4
+ np.random.seed(0)
5
+ import tensorflow as tf
6
+ import onnx_graphsurgeon as gs
7
+ from onnx2tf.utils.common_functions import (
8
+ get_constant_or_variable,
9
+ print_node_info,
10
+ inverted_operation_enable_disable,
11
+ make_tf_node_info,
12
+ get_replacement_parameter,
13
+ pre_process_transpose,
14
+ post_process_transpose,
15
+ )
16
+
17
+
18
+ @print_node_info
19
+ @inverted_operation_enable_disable
20
+ @get_replacement_parameter
21
+ def make_node(
22
+ *,
23
+ graph_node: gs.Node,
24
+ tf_layers_dict: dict,
25
+ **kwargs: dict,
26
+ ):
27
+ """BitwiseNot
28
+
29
+ Parameters
30
+ ----------
31
+ graph_node: gs.Node
32
+ graph_surgeon Node
33
+
34
+ tf_layers_dict: dict
35
+ optype, shape, dtype, tensorflow graph
36
+ """
37
+ before_op_output_shape_trans_1 = \
38
+ tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
39
+ before_op_output_shape_trans = \
40
+ before_op_output_shape_trans_1
41
+
42
+ graph_node_input_1 = get_constant_or_variable(
43
+ graph_node.inputs[0],
44
+ before_op_output_shape_trans,
45
+ )
46
+ graph_node_output: gs.Variable = graph_node.outputs[0]
47
+
48
+ shape = graph_node_output.shape
49
+ dtype = graph_node_output.dtype
50
+
51
+ # Preserving Graph Structure (Dict)
52
+ tf_layers_dict[graph_node_output.name] = {
53
+ 'optype': graph_node.op,
54
+ 'shape': shape,
55
+ 'dtype': dtype,
56
+ }
57
+
58
+ # Generation of TF OP
59
+ input_tensor_1 = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
60
+ if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
61
+
62
+ # Pre-process transpose
63
+ input_tensor_1 = pre_process_transpose(
64
+ value_before_transpose=input_tensor_1,
65
+ param_target='inputs',
66
+ param_name=graph_node.inputs[0].name,
67
+ **kwargs,
68
+ )
69
+
70
+ tf_op_type = None
71
+ tf_layers_dict[graph_node_output.name]['tf_node'] = \
72
+ tf.bitwise.invert(
73
+ x=input_tensor_1,
74
+ name=graph_node.name,
75
+ )
76
+ tf_op_type = tf.bitwise.invert
77
+
78
+ # Post-process transpose
79
+ tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
80
+ value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
81
+ param_target='outputs',
82
+ param_name=graph_node.outputs[0].name,
83
+ **kwargs,
84
+ )
85
+
86
+ # Generation of Debug Info
87
+ tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
88
+ make_tf_node_info(
89
+ node_info={
90
+ 'tf_op_type': tf_op_type,
91
+ 'tf_inputs': {
92
+ 'x': input_tensor_1,
93
+ },
94
+ 'tf_outputs': {
95
+ 'output': tf_layers_dict[graph_node_output.name]['tf_node'],
96
+ },
97
+ }
98
+ )
@@ -0,0 +1,115 @@
1
+ import random
2
+ random.seed(0)
3
+ import numpy as np
4
+ np.random.seed(0)
5
+ import tensorflow as tf
6
+ import onnx_graphsurgeon as gs
7
+ from onnx2tf.utils.common_functions import (
8
+ get_constant_or_variable,
9
+ print_node_info,
10
+ inverted_operation_enable_disable,
11
+ make_tf_node_info,
12
+ get_replacement_parameter,
13
+ pre_process_transpose,
14
+ post_process_transpose,
15
+ )
16
+
17
+
18
+ @print_node_info
19
+ @inverted_operation_enable_disable
20
+ @get_replacement_parameter
21
+ def make_node(
22
+ *,
23
+ graph_node: gs.Node,
24
+ tf_layers_dict: dict,
25
+ **kwargs: dict,
26
+ ):
27
+ """BitwiseOr
28
+
29
+ Parameters
30
+ ----------
31
+ graph_node: gs.Node
32
+ graph_surgeon Node
33
+
34
+ tf_layers_dict: dict
35
+ optype, shape, dtype, tensorflow graph
36
+ """
37
+ before_op_output_shape_trans_1 = \
38
+ tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
39
+ before_op_output_shape_trans_2 = \
40
+ tf_layers_dict.get(graph_node.inputs[1].name, {}).get('before_op_output_shape_trans', True)
41
+ before_op_output_shape_trans = \
42
+ before_op_output_shape_trans_1 \
43
+ and before_op_output_shape_trans_2
44
+
45
+ graph_node_input_1 = get_constant_or_variable(
46
+ graph_node.inputs[0],
47
+ before_op_output_shape_trans,
48
+ )
49
+ graph_node_input_2 = get_constant_or_variable(
50
+ graph_node.inputs[1],
51
+ before_op_output_shape_trans,
52
+ )
53
+ graph_node_output: gs.Variable = graph_node.outputs[0]
54
+
55
+ shape = graph_node_output.shape
56
+ dtype = graph_node_output.dtype
57
+
58
+ # Preserving Graph Structure (Dict)
59
+ tf_layers_dict[graph_node_output.name] = {
60
+ 'optype': graph_node.op,
61
+ 'shape': shape,
62
+ 'dtype': dtype,
63
+ }
64
+
65
+ # Generation of TF OP
66
+ input_tensor_1 = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
67
+ if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
68
+ input_tensor_2 = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
69
+ if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
70
+
71
+ # Pre-process transpose
72
+ input_tensor_1 = pre_process_transpose(
73
+ value_before_transpose=input_tensor_1,
74
+ param_target='inputs',
75
+ param_name=graph_node.inputs[0].name,
76
+ **kwargs,
77
+ )
78
+ input_tensor_2 = pre_process_transpose(
79
+ value_before_transpose=input_tensor_2,
80
+ param_target='inputs',
81
+ param_name=graph_node.inputs[1].name,
82
+ **kwargs,
83
+ )
84
+
85
+ tf_op_type = None
86
+ tf_layers_dict[graph_node_output.name]['tf_node'] = \
87
+ tf.bitwise.bitwise_or(
88
+ x=input_tensor_1,
89
+ y=input_tensor_2,
90
+ name=graph_node.name,
91
+ )
92
+ tf_op_type = tf.bitwise.bitwise_or
93
+
94
+ # Post-process transpose
95
+ tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
96
+ value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
97
+ param_target='outputs',
98
+ param_name=graph_node.outputs[0].name,
99
+ **kwargs,
100
+ )
101
+
102
+ # Generation of Debug Info
103
+ tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
104
+ make_tf_node_info(
105
+ node_info={
106
+ 'tf_op_type': tf_op_type,
107
+ 'tf_inputs': {
108
+ 'x': input_tensor_1,
109
+ 'y': input_tensor_2,
110
+ },
111
+ 'tf_outputs': {
112
+ 'output': tf_layers_dict[graph_node_output.name]['tf_node'],
113
+ },
114
+ }
115
+ )
@@ -0,0 +1,115 @@
1
+ import random
2
+ random.seed(0)
3
+ import numpy as np
4
+ np.random.seed(0)
5
+ import tensorflow as tf
6
+ import onnx_graphsurgeon as gs
7
+ from onnx2tf.utils.common_functions import (
8
+ get_constant_or_variable,
9
+ print_node_info,
10
+ inverted_operation_enable_disable,
11
+ make_tf_node_info,
12
+ get_replacement_parameter,
13
+ pre_process_transpose,
14
+ post_process_transpose,
15
+ )
16
+
17
+
18
+ @print_node_info
19
+ @inverted_operation_enable_disable
20
+ @get_replacement_parameter
21
+ def make_node(
22
+ *,
23
+ graph_node: gs.Node,
24
+ tf_layers_dict: dict,
25
+ **kwargs: dict,
26
+ ):
27
+ """BitwiseXor
28
+
29
+ Parameters
30
+ ----------
31
+ graph_node: gs.Node
32
+ graph_surgeon Node
33
+
34
+ tf_layers_dict: dict
35
+ optype, shape, dtype, tensorflow graph
36
+ """
37
+ before_op_output_shape_trans_1 = \
38
+ tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
39
+ before_op_output_shape_trans_2 = \
40
+ tf_layers_dict.get(graph_node.inputs[1].name, {}).get('before_op_output_shape_trans', True)
41
+ before_op_output_shape_trans = \
42
+ before_op_output_shape_trans_1 \
43
+ and before_op_output_shape_trans_2
44
+
45
+ graph_node_input_1 = get_constant_or_variable(
46
+ graph_node.inputs[0],
47
+ before_op_output_shape_trans,
48
+ )
49
+ graph_node_input_2 = get_constant_or_variable(
50
+ graph_node.inputs[1],
51
+ before_op_output_shape_trans,
52
+ )
53
+ graph_node_output: gs.Variable = graph_node.outputs[0]
54
+
55
+ shape = graph_node_output.shape
56
+ dtype = graph_node_output.dtype
57
+
58
+ # Preserving Graph Structure (Dict)
59
+ tf_layers_dict[graph_node_output.name] = {
60
+ 'optype': graph_node.op,
61
+ 'shape': shape,
62
+ 'dtype': dtype,
63
+ }
64
+
65
+ # Generation of TF OP
66
+ input_tensor_1 = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
67
+ if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
68
+ input_tensor_2 = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
69
+ if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
70
+
71
+ # Pre-process transpose
72
+ input_tensor_1 = pre_process_transpose(
73
+ value_before_transpose=input_tensor_1,
74
+ param_target='inputs',
75
+ param_name=graph_node.inputs[0].name,
76
+ **kwargs,
77
+ )
78
+ input_tensor_2 = pre_process_transpose(
79
+ value_before_transpose=input_tensor_2,
80
+ param_target='inputs',
81
+ param_name=graph_node.inputs[1].name,
82
+ **kwargs,
83
+ )
84
+
85
+ tf_op_type = None
86
+ tf_layers_dict[graph_node_output.name]['tf_node'] = \
87
+ tf.bitwise.bitwise_xor(
88
+ x=input_tensor_1,
89
+ y=input_tensor_2,
90
+ name=graph_node.name,
91
+ )
92
+ tf_op_type = tf.bitwise.bitwise_xor
93
+
94
+ # Post-process transpose
95
+ tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
96
+ value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
97
+ param_target='outputs',
98
+ param_name=graph_node.outputs[0].name,
99
+ **kwargs,
100
+ )
101
+
102
+ # Generation of Debug Info
103
+ tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
104
+ make_tf_node_info(
105
+ node_info={
106
+ 'tf_op_type': tf_op_type,
107
+ 'tf_inputs': {
108
+ 'x': input_tensor_1,
109
+ 'y': input_tensor_2,
110
+ },
111
+ 'tf_outputs': {
112
+ 'output': tf_layers_dict[graph_node_output.name]['tf_node'],
113
+ },
114
+ }
115
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnx2tf
3
- Version: 1.29.2
3
+ Version: 1.29.4
4
4
  Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC).
5
5
  Home-page: https://github.com/PINTO0309/onnx2tf
6
6
  Author: Katsuya Hyodo
@@ -106,10 +106,10 @@ https://github.com/PINTO0309/onnx2tf/wiki/model_status
106
106
  |BatchNormalization|:heavy_check_mark:|
107
107
  |Bernoulli|:heavy_check_mark:|
108
108
  |BitShift|:heavy_check_mark:|
109
- |BitwiseAnd|**Help wanted**|
110
- |BitwiseNot|**Help wanted**|
111
- |BitwiseOr|**Help wanted**|
112
- |BitwiseXor|**Help wanted**|
109
+ |BitwiseAnd|:heavy_check_mark:|
110
+ |BitwiseNot|:heavy_check_mark:|
111
+ |BitwiseOr|:heavy_check_mark:|
112
+ |BitwiseXor|:heavy_check_mark:|
113
113
  |Cast|:heavy_check_mark:|
114
114
  |Ceil|:heavy_check_mark:|
115
115
  |Celu|:heavy_check_mark:|
@@ -345,7 +345,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
345
345
  docker run --rm -it \
346
346
  -v `pwd`:/workdir \
347
347
  -w /workdir \
348
- ghcr.io/pinto0309/onnx2tf:1.29.2
348
+ ghcr.io/pinto0309/onnx2tf:1.29.4
349
349
 
350
350
  or
351
351
 
@@ -353,7 +353,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
353
353
  docker run --rm -it \
354
354
  -v `pwd`:/workdir \
355
355
  -w /workdir \
356
- docker.io/pinto0309/onnx2tf:1.29.2
356
+ docker.io/pinto0309/onnx2tf:1.29.4
357
357
 
358
358
  or
359
359
 
@@ -2077,9 +2077,9 @@ optional arguments:
2077
2077
  which can take a very long time depending on the model complexity.
2078
2078
 
2079
2079
  -agje, --auto_generate_json_on_error
2080
- Attempts to generate a parameter replacement JSON when accuracy validation finds errors
2081
- greater than 1e-2. Useful for quickly capturing fixes during -cotof runs.
2082
- Disabled by default to avoid unexpected file generation.
2080
+ Attempts to generate a parameter replacement JSON when conversion fails or when accuracy
2081
+ validation finds errors greater than 1e-2. Useful for quickly capturing fixes during
2082
+ -cotof runs. Disabled by default to avoid unexpected file generation.
2083
2083
 
2084
2084
  -dms, --disable_model_save
2085
2085
  Does not save the converted model. For CIs RAM savings.
@@ -2518,8 +2518,8 @@ convert(
2518
2518
  Default: False
2519
2519
 
2520
2520
  auto_generate_json_on_error: Optional[bool]
2521
- When accuracy validation detects errors greater than 1e-2, attempts to generate
2522
- a parameter replacement JSON as a best-effort fix.
2521
+ When conversion fails or accuracy validation detects errors greater than 1e-2,
2522
+ attempts to generate a parameter replacement JSON as a best-effort fix.
2523
2523
  Default: False
2524
2524
 
2525
2525
  check_gpu_delegate_compatibility: Optional[bool]
@@ -1,6 +1,6 @@
1
- onnx2tf/__init__.py,sha256=XiJqjkSHI6QAj_uhuuHUb9C9a1UT1jlqkKPHin_kgWU,66
1
+ onnx2tf/__init__.py,sha256=pluvSdvaC9nePqUVOCSAuPcchjfddyLkq1cJnhohl5w,66
2
2
  onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
3
- onnx2tf/onnx2tf.py,sha256=XZBqVn1Q_qmPmrYw_Dz30vRWJ8uaJURkbUSo_8rZrjk,151116
3
+ onnx2tf/onnx2tf.py,sha256=wdBA-lgCEu-ZfUAKIUQgLe8hSP8ifE7rS6nWAq6iF6o,151519
4
4
  onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
5
5
  onnx2tf/ops/Acos.py,sha256=Fo8YkFKuWq8Fi2xUrBdKcAH1yJ8r5pjSD0wgLttTNdk,4003
6
6
  onnx2tf/ops/Acosh.py,sha256=ATQj2cT5JS_mTfXi0kXqJ1yzSZu5J0zHA5VjV3j7uKY,3588
@@ -12,10 +12,14 @@ onnx2tf/ops/Asin.py,sha256=2djUjTaOzXM6t4Qb-EEMZY-pm1rJl24cgcrep2i_6aQ,4003
12
12
  onnx2tf/ops/Asinh.py,sha256=74ZzTEkpxZY4CGfJT2JJU-SHXYL5KZeUkWY2v7hsMMw,3588
13
13
  onnx2tf/ops/Atan.py,sha256=D24XDMxEwXFtJheQAr3V3IWOUOc6Q5M0-b_83bmGGMM,3981
14
14
  onnx2tf/ops/Atanh.py,sha256=VsUYopBWWPoo4gta1_aqvUL6NrVXuVkGid4SqDqYJ9Q,3588
15
- onnx2tf/ops/AveragePool.py,sha256=X2uMuo1CnPBpeqXbCkG-hMBmmUFGry-Xu4EXLg2aoIw,17297
15
+ onnx2tf/ops/AveragePool.py,sha256=kifQJZplqC2Px209BotbjXCPpRBQQsB8DlJYJTvJD78,20065
16
16
  onnx2tf/ops/BatchNormalization.py,sha256=_hlf2-5-j3MCJHEoE2oMNQ8YhCm7ad9h2fwPpTo3i7g,26624
17
17
  onnx2tf/ops/Bernoulli.py,sha256=PM0xS0n1q4bnT_9PnbcKW8_Qj8dJYYBQR8kb2X-wIp4,3670
18
18
  onnx2tf/ops/BitShift.py,sha256=a28_E9hwA8yfjvtsrSKCZCeeMPB5RBQbjB3cmaNGN6k,3861
19
+ onnx2tf/ops/BitwiseAnd.py,sha256=snmmVzVwLxhWh0aKyaskScBvefncGyW7ZPVrmbugazk,3456
20
+ onnx2tf/ops/BitwiseNot.py,sha256=QuFUyK24JGrEOKYu-6lRi9uZLz4MKVtBwUqzDdqtBKA,2721
21
+ onnx2tf/ops/BitwiseOr.py,sha256=WSswhA3qmp3OJ4iIibl_2ps-tZEyfKI7B19GiFH7Uik,3453
22
+ onnx2tf/ops/BitwiseXor.py,sha256=d1WoshWdfcoQnYrdaxafRleipy1d0AKleTgh0G7lZlw,3456
19
23
  onnx2tf/ops/Cast.py,sha256=M0LRClHPgZ_8NubwME6ipKrAqcY9aKC5ihQXCkTkNkM,4601
20
24
  onnx2tf/ops/Ceil.py,sha256=0-jaueltpQSwpOIDUmy9DdTy98qN-XimYu5cHVPnUIs,3586
21
25
  onnx2tf/ops/Celu.py,sha256=9g7WNKo4G_jMtUXcoOfpNdLYqEsuyXLPkkyQZxDuL4U,3853
@@ -190,9 +194,9 @@ onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
190
194
  onnx2tf/utils/iterative_json_optimizer.py,sha256=qqeIxWGxrhcCYk8-ebWnblnOkzDCwi-nseipHzHR_bk,10436
191
195
  onnx2tf/utils/json_auto_generator.py,sha256=OC-SfKtUg7zUxaXTAg6kT0ShzIc3ByjDa3FNp173DtA,60302
192
196
  onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
193
- onnx2tf-1.29.2.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
194
- onnx2tf-1.29.2.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
195
- onnx2tf-1.29.2.dist-info/METADATA,sha256=x_nPx4ypPex9jfMHIPDvjT7LS8-bWke3CWugzi7n404,153189
196
- onnx2tf-1.29.2.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
197
- onnx2tf-1.29.2.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
198
- onnx2tf-1.29.2.dist-info/RECORD,,
197
+ onnx2tf-1.29.4.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
198
+ onnx2tf-1.29.4.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
199
+ onnx2tf-1.29.4.dist-info/METADATA,sha256=HYGQZOLfX2Hvk0xSg3t8Dfd376S7WlGyAc16CEfQztM,153246
200
+ onnx2tf-1.29.4.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
201
+ onnx2tf-1.29.4.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
202
+ onnx2tf-1.29.4.dist-info/RECORD,,