onnx2tf 1.29.15__py3-none-any.whl → 1.29.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- onnx2tf/__init__.py +1 -1
- onnx2tf/onnx2tf.py +141 -0
- onnx2tf/ops/Add.py +112 -0
- onnx2tf/ops/Concat.py +236 -64
- onnx2tf/ops/DequantizeLinear.py +76 -34
- onnx2tf/ops/DynamicQuantizeLinear.py +18 -17
- onnx2tf/ops/QLinearConcat.py +245 -26
- onnx2tf/ops/QLinearConv.py +70 -75
- onnx2tf/ops/QLinearMatMul.py +77 -20
- onnx2tf/ops/QuantizeLinear.py +117 -44
- onnx2tf/ops/Split.py +33 -8
- {onnx2tf-1.29.15.dist-info → onnx2tf-1.29.17.dist-info}/METADATA +3 -3
- {onnx2tf-1.29.15.dist-info → onnx2tf-1.29.17.dist-info}/RECORD +15 -15
- {onnx2tf-1.29.15.dist-info → onnx2tf-1.29.17.dist-info}/WHEEL +0 -0
- {onnx2tf-1.29.15.dist-info → onnx2tf-1.29.17.dist-info}/entry_points.txt +0 -0
onnx2tf/ops/DequantizeLinear.py
CHANGED
|
@@ -15,6 +15,43 @@ from onnx2tf.utils.common_functions import (
|
|
|
15
15
|
post_process_transpose,
|
|
16
16
|
)
|
|
17
17
|
|
|
18
|
+
def _expand_scale_or_zero_point(
|
|
19
|
+
*,
|
|
20
|
+
value,
|
|
21
|
+
input_tensor,
|
|
22
|
+
axis: int,
|
|
23
|
+
block_size: int,
|
|
24
|
+
):
|
|
25
|
+
value_rank = len(value.shape)
|
|
26
|
+
input_rank = len(input_tensor.shape)
|
|
27
|
+
|
|
28
|
+
if value_rank == 0:
|
|
29
|
+
return value
|
|
30
|
+
|
|
31
|
+
if input_rank <= 0:
|
|
32
|
+
return value
|
|
33
|
+
|
|
34
|
+
if axis < 0 or axis >= input_rank:
|
|
35
|
+
axis = 0
|
|
36
|
+
|
|
37
|
+
# Blocked quantization: expand along axis then slice to input shape
|
|
38
|
+
if block_size > 0 and value_rank == input_rank:
|
|
39
|
+
if value.shape[axis] is None \
|
|
40
|
+
or input_tensor.shape[axis] is None \
|
|
41
|
+
or value.shape[axis] != input_tensor.shape[axis]:
|
|
42
|
+
expanded = tf.repeat(value, repeats=block_size, axis=axis)
|
|
43
|
+
expanded = tf.slice(expanded, [0] * input_rank, tf.shape(input_tensor))
|
|
44
|
+
return expanded
|
|
45
|
+
return value
|
|
46
|
+
|
|
47
|
+
# Per-axis quantization: reshape 1-D to broadcast
|
|
48
|
+
if value_rank == 1 and input_rank is not None:
|
|
49
|
+
shape = [1] * input_rank
|
|
50
|
+
shape[axis] = -1
|
|
51
|
+
return tf.reshape(value, shape)
|
|
52
|
+
|
|
53
|
+
return value
|
|
54
|
+
|
|
18
55
|
|
|
19
56
|
@print_node_info
|
|
20
57
|
@inverted_operation_enable_disable
|
|
@@ -63,6 +100,11 @@ def make_node(
|
|
|
63
100
|
|
|
64
101
|
input_tensor = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
|
|
65
102
|
if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
|
|
103
|
+
input_is_dequantized = False
|
|
104
|
+
input_nhwc = False
|
|
105
|
+
if isinstance(graph_node_input_1, gs.Variable):
|
|
106
|
+
input_is_dequantized = tf_layers_dict.get(graph_node_input_1.name, {}).get('is_dequantized', False)
|
|
107
|
+
input_nhwc = tf_layers_dict.get(graph_node_input_1.name, {}).get('nhwc', False)
|
|
66
108
|
|
|
67
109
|
# Pre-process transpose
|
|
68
110
|
input_tensor = pre_process_transpose(
|
|
@@ -72,12 +114,10 @@ def make_node(
|
|
|
72
114
|
**kwargs,
|
|
73
115
|
)
|
|
74
116
|
|
|
75
|
-
|
|
76
|
-
|
|
117
|
+
input_tensor_rank = len(input_tensor.shape)
|
|
118
|
+
input_tensor_dtype = input_tensor.dtype
|
|
77
119
|
x_scale = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
|
|
78
120
|
if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
|
|
79
|
-
x_scale_shape = x_scale.shape
|
|
80
|
-
x_scale_rank = len(x_scale_shape)
|
|
81
121
|
x_zero_point = tf_layers_dict[graph_node_input_3.name]['tf_node'] \
|
|
82
122
|
if isinstance(graph_node_input_3, gs.Variable) else graph_node_input_3
|
|
83
123
|
|
|
@@ -87,48 +127,50 @@ def make_node(
|
|
|
87
127
|
tensor_rank=input_tensor_rank,
|
|
88
128
|
before_op_output_shape_trans=before_op_output_shape_trans,
|
|
89
129
|
)
|
|
130
|
+
if input_tensor_rank == 1:
|
|
131
|
+
axis = 0
|
|
90
132
|
|
|
91
133
|
# Preserving Graph Structure (Dict)
|
|
92
134
|
tf_layers_dict[graph_node_output.name] = {
|
|
93
135
|
'optype': graph_node.op,
|
|
94
136
|
'shape': shape,
|
|
95
137
|
'dtype': dtype,
|
|
138
|
+
'is_dequantized': True,
|
|
139
|
+
'nhwc': input_nhwc,
|
|
96
140
|
}
|
|
97
141
|
|
|
98
142
|
# Generation of TF OP
|
|
99
143
|
|
|
100
144
|
input_tensor = tf.cast(input_tensor, tf.float32)
|
|
145
|
+
x_scale = tf.cast(x_scale, tf.float32)
|
|
101
146
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
x=subed_tensor,
|
|
130
|
-
y=x_scale,
|
|
131
|
-
)
|
|
147
|
+
block_size = int(graph_node.attrs.get('block_size', 0))
|
|
148
|
+
x_scale = _expand_scale_or_zero_point(
|
|
149
|
+
value=x_scale,
|
|
150
|
+
input_tensor=input_tensor,
|
|
151
|
+
axis=axis,
|
|
152
|
+
block_size=block_size,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
if input_is_dequantized:
|
|
156
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = input_tensor
|
|
157
|
+
else:
|
|
158
|
+
if x_zero_point is None or input_tensor_dtype == tf.int32:
|
|
159
|
+
x_zero_point = tf.zeros_like(x_scale)
|
|
160
|
+
else:
|
|
161
|
+
x_zero_point = tf.cast(x_zero_point, tf.float32)
|
|
162
|
+
x_zero_point = _expand_scale_or_zero_point(
|
|
163
|
+
value=x_zero_point,
|
|
164
|
+
input_tensor=input_tensor,
|
|
165
|
+
axis=axis,
|
|
166
|
+
block_size=block_size,
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
170
|
+
tf.multiply(
|
|
171
|
+
x=tf.subtract(input_tensor, x_zero_point),
|
|
172
|
+
y=x_scale,
|
|
173
|
+
)
|
|
132
174
|
|
|
133
175
|
if hasattr(tf_layers_dict[graph_node_output.name]['tf_node'], 'numpy'):
|
|
134
176
|
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
@@ -43,6 +43,9 @@ def make_node(
|
|
|
43
43
|
graph_node.inputs[0],
|
|
44
44
|
before_op_output_shape_trans,
|
|
45
45
|
)
|
|
46
|
+
input_nhwc = False
|
|
47
|
+
if isinstance(graph_node_input_1, gs.Variable):
|
|
48
|
+
input_nhwc = tf_layers_dict.get(graph_node_input_1.name, {}).get('nhwc', False)
|
|
46
49
|
graph_node_output_1: gs.Variable = graph_node.outputs[0]
|
|
47
50
|
o1_shape = graph_node_output_1.shape
|
|
48
51
|
o1_dtype = graph_node_output_1.dtype
|
|
@@ -58,6 +61,8 @@ def make_node(
|
|
|
58
61
|
'optype': graph_node.op,
|
|
59
62
|
'shape': o1_shape,
|
|
60
63
|
'dtype': o1_dtype,
|
|
64
|
+
'is_dequantized': True,
|
|
65
|
+
'nhwc': input_nhwc,
|
|
61
66
|
}
|
|
62
67
|
tf_layers_dict[graph_node_output_2.name] = {
|
|
63
68
|
'optype': graph_node.op,
|
|
@@ -82,35 +87,31 @@ def make_node(
|
|
|
82
87
|
)
|
|
83
88
|
|
|
84
89
|
# Generation of TF OP
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
max_x = tf.math.maximum(0., tf.math.reduce_max(input_tensor_1))
|
|
90
|
+
qmin = 0.0
|
|
91
|
+
qmax = 255.0
|
|
92
|
+
min_x = tf.math.minimum(0.0, tf.math.reduce_min(input_tensor_1))
|
|
93
|
+
max_x = tf.math.maximum(0.0, tf.math.reduce_max(input_tensor_1))
|
|
90
94
|
y_scale = (max_x - min_x) / (qmax - qmin)
|
|
91
95
|
intermediate_zero_point = qmin - (min_x / y_scale)
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
x=intermediate_zero_point
|
|
95
|
-
),
|
|
96
|
+
clipped_zero_point = tf.clip_by_value(
|
|
97
|
+
intermediate_zero_point,
|
|
96
98
|
clip_value_min=qmin,
|
|
97
99
|
clip_value_max=qmax,
|
|
98
100
|
)
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
),
|
|
105
|
-
dtype=dtype,
|
|
101
|
+
y_zero_point = tf.round(clipped_zero_point)
|
|
102
|
+
y_quant = tf.clip_by_value(
|
|
103
|
+
tf.round(input_tensor_1 / y_scale) + y_zero_point,
|
|
104
|
+
clip_value_min=qmin,
|
|
105
|
+
clip_value_max=qmax,
|
|
106
106
|
)
|
|
107
|
+
y = (y_quant - y_zero_point) * y_scale
|
|
107
108
|
|
|
108
109
|
tf_layers_dict[graph_node_output_1.name]['tf_node'] = y
|
|
109
110
|
tf_layers_dict[graph_node_output_2.name]['tf_node'] = y_scale
|
|
110
111
|
tf_layers_dict[graph_node_output_3.name]['tf_node'] = \
|
|
111
112
|
tf.cast(
|
|
112
113
|
x=y_zero_point,
|
|
113
|
-
dtype=
|
|
114
|
+
dtype=tf.uint8,
|
|
114
115
|
)
|
|
115
116
|
|
|
116
117
|
# Post-process transpose
|
onnx2tf/ops/QLinearConcat.py
CHANGED
|
@@ -10,6 +10,10 @@ from onnx2tf.utils.common_functions import (
|
|
|
10
10
|
print_node_info,
|
|
11
11
|
inverted_operation_enable_disable,
|
|
12
12
|
make_tf_node_info,
|
|
13
|
+
pre_process_transpose,
|
|
14
|
+
replace_parameter,
|
|
15
|
+
shape_is_equal_ignore_order,
|
|
16
|
+
transpose_with_flexing_deterrence,
|
|
13
17
|
)
|
|
14
18
|
|
|
15
19
|
|
|
@@ -35,8 +39,7 @@ def make_node(
|
|
|
35
39
|
y_zero_point_list = [i for i in graph_node.inputs[1::3]]
|
|
36
40
|
input_list = [i for i in graph_node.inputs[2::3]]
|
|
37
41
|
|
|
38
|
-
|
|
39
|
-
input_tensor_rank = len(input_tensor_shape)
|
|
42
|
+
input_tensor_rank = len(input_list[0].shape)
|
|
40
43
|
|
|
41
44
|
before_op_output_shape_trans = True
|
|
42
45
|
for graph_node_input in input_list:
|
|
@@ -46,6 +49,9 @@ def make_node(
|
|
|
46
49
|
before_op_output_shape_trans and before_op_output_shape_trans_n
|
|
47
50
|
|
|
48
51
|
got_values = []
|
|
52
|
+
nhwc_flags = []
|
|
53
|
+
same_input_shape_as_onnxs = []
|
|
54
|
+
input_is_dequantized_list = []
|
|
49
55
|
got_y_scale_list = []
|
|
50
56
|
got_y_zero_point_list = []
|
|
51
57
|
for input, y_scale, y_zero_point in zip(input_list, y_scale_list, y_zero_point_list):
|
|
@@ -55,8 +61,24 @@ def make_node(
|
|
|
55
61
|
)
|
|
56
62
|
if isinstance(const_or_var, gs.Variable):
|
|
57
63
|
got_values.append(tf_layers_dict[const_or_var.name]['tf_node'])
|
|
64
|
+
nhwc_flags.append(
|
|
65
|
+
tf_layers_dict[const_or_var.name].get('nhwc', False)
|
|
66
|
+
)
|
|
67
|
+
same_input_shape_as_onnxs.append(
|
|
68
|
+
True if input.shape is not None and len(input.shape) > 0 \
|
|
69
|
+
and input.shape == tf_layers_dict[const_or_var.name]['tf_node'].shape else False
|
|
70
|
+
)
|
|
71
|
+
input_is_dequantized_list.append(
|
|
72
|
+
tf_layers_dict[const_or_var.name].get('is_dequantized', False)
|
|
73
|
+
)
|
|
58
74
|
else:
|
|
59
75
|
got_values.append(const_or_var)
|
|
76
|
+
nhwc_flags.append(False)
|
|
77
|
+
same_input_shape_as_onnxs.append(
|
|
78
|
+
True if input.shape is not None and len(input.shape) > 0 \
|
|
79
|
+
and input.shape == const_or_var.shape else False
|
|
80
|
+
)
|
|
81
|
+
input_is_dequantized_list.append(False)
|
|
60
82
|
|
|
61
83
|
const_or_var = get_constant_or_variable(
|
|
62
84
|
y_scale,
|
|
@@ -82,50 +104,247 @@ def make_node(
|
|
|
82
104
|
dtype = graph_node_output.dtype
|
|
83
105
|
|
|
84
106
|
axis = graph_node.attrs.get('axis', 0)
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
107
|
+
|
|
108
|
+
# Shape Unmatched Special Avoidance Workaround
|
|
109
|
+
if True in same_input_shape_as_onnxs and True in nhwc_flags:
|
|
110
|
+
before_op_output_shape_trans = True
|
|
111
|
+
new_values = []
|
|
112
|
+
for same_input_shape_as_onnx, nhwc_flag, value in zip(same_input_shape_as_onnxs, nhwc_flags, got_values):
|
|
113
|
+
if same_input_shape_as_onnx and not nhwc_flag:
|
|
114
|
+
if len(value.shape) == 3:
|
|
115
|
+
new_values.append(
|
|
116
|
+
transpose_with_flexing_deterrence(
|
|
117
|
+
input_tensor=value,
|
|
118
|
+
perm=[0, 2, 1],
|
|
119
|
+
**kwargs,
|
|
120
|
+
)
|
|
121
|
+
)
|
|
122
|
+
elif len(value.shape) == 4:
|
|
123
|
+
new_values.append(
|
|
124
|
+
transpose_with_flexing_deterrence(
|
|
125
|
+
input_tensor=value,
|
|
126
|
+
perm=[0, 2, 3, 1],
|
|
127
|
+
**kwargs,
|
|
128
|
+
)
|
|
129
|
+
)
|
|
130
|
+
elif len(value.shape) == 5:
|
|
131
|
+
new_values.append(
|
|
132
|
+
transpose_with_flexing_deterrence(
|
|
133
|
+
input_tensor=value,
|
|
134
|
+
perm=[0, 2, 3, 4, 1],
|
|
135
|
+
**kwargs,
|
|
136
|
+
)
|
|
137
|
+
)
|
|
138
|
+
else:
|
|
139
|
+
new_values.append(value)
|
|
140
|
+
else:
|
|
141
|
+
new_values.append(value)
|
|
142
|
+
got_values = new_values
|
|
91
143
|
|
|
92
144
|
# Preserving Graph Structure (Dict)
|
|
145
|
+
nhwc_judge = True
|
|
146
|
+
for graph_node_input in input_list:
|
|
147
|
+
if isinstance(graph_node_input, gs.Variable) \
|
|
148
|
+
and tf_layers_dict.get(graph_node_input.name, {}).get('nhwc', False):
|
|
149
|
+
nhwc_judge = nhwc_judge and True
|
|
150
|
+
elif isinstance(graph_node_input, gs.Constant) \
|
|
151
|
+
and hasattr(graph_node_input, 'values') \
|
|
152
|
+
and isinstance(graph_node_input.values, np.ndarray):
|
|
153
|
+
nhwc_judge = nhwc_judge or False
|
|
154
|
+
else:
|
|
155
|
+
nhwc_judge = nhwc_judge and False
|
|
156
|
+
|
|
93
157
|
tf_layers_dict[graph_node_output.name] = {
|
|
94
158
|
'optype': graph_node.op,
|
|
95
159
|
'shape': shape,
|
|
96
160
|
'dtype': dtype,
|
|
161
|
+
'is_dequantized': True,
|
|
97
162
|
}
|
|
163
|
+
if nhwc_judge:
|
|
164
|
+
tf_layers_dict[graph_node_output.name]['nhwc'] = True
|
|
98
165
|
|
|
99
166
|
# Generation of TF OP
|
|
100
167
|
|
|
168
|
+
# NCHW->NHWC, NCDHW->NDHWC
|
|
169
|
+
axis = convert_axis(
|
|
170
|
+
axis=axis,
|
|
171
|
+
tensor_rank=len(shape) if shape is not None else input_tensor_rank,
|
|
172
|
+
before_op_output_shape_trans=before_op_output_shape_trans,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
# Param replacement
|
|
176
|
+
before_axis = axis
|
|
177
|
+
axis = replace_parameter(
|
|
178
|
+
value_before_replacement=axis,
|
|
179
|
+
param_target='attributes',
|
|
180
|
+
param_name='axis',
|
|
181
|
+
**kwargs,
|
|
182
|
+
)
|
|
183
|
+
|
|
101
184
|
# TensorFlow does not support Concat for scalar values, so convert to tensor
|
|
102
|
-
values = [
|
|
103
|
-
|
|
104
|
-
|
|
185
|
+
values = []
|
|
186
|
+
for graph_node_input, value in zip(input_list, got_values):
|
|
187
|
+
value = pre_process_transpose(
|
|
188
|
+
value_before_transpose=value,
|
|
189
|
+
param_target='inputs',
|
|
190
|
+
param_name=graph_node_input.name,
|
|
191
|
+
**kwargs,
|
|
192
|
+
)
|
|
193
|
+
values.append(value if len(value.shape) > 0 else tf.reshape(value, [1]))
|
|
194
|
+
|
|
195
|
+
def _infer_concat_axis(values, output_shape):
|
|
196
|
+
if not values:
|
|
197
|
+
return None
|
|
198
|
+
ranks = []
|
|
199
|
+
shapes = []
|
|
200
|
+
for val in values:
|
|
201
|
+
if val.shape is None or val.shape == tf.TensorShape(None):
|
|
202
|
+
return None
|
|
203
|
+
shape_list = list(val.shape)
|
|
204
|
+
ranks.append(len(shape_list))
|
|
205
|
+
shapes.append(shape_list)
|
|
206
|
+
if len(set(ranks)) != 1:
|
|
207
|
+
return None
|
|
208
|
+
rank = ranks[0]
|
|
209
|
+
candidates = []
|
|
210
|
+
for ax in range(rank):
|
|
211
|
+
ok = True
|
|
212
|
+
for dim in range(rank):
|
|
213
|
+
if dim == ax:
|
|
214
|
+
continue
|
|
215
|
+
base = shapes[0][dim]
|
|
216
|
+
for s in shapes[1:]:
|
|
217
|
+
if base is None or s[dim] is None:
|
|
218
|
+
continue
|
|
219
|
+
if base != s[dim]:
|
|
220
|
+
ok = False
|
|
221
|
+
break
|
|
222
|
+
if not ok:
|
|
223
|
+
break
|
|
224
|
+
if not ok:
|
|
225
|
+
continue
|
|
226
|
+
if output_shape is not None and len(output_shape) == rank:
|
|
227
|
+
out_dim = output_shape[ax]
|
|
228
|
+
if out_dim is not None:
|
|
229
|
+
sum_dim = 0
|
|
230
|
+
for s in shapes:
|
|
231
|
+
if s[ax] is None:
|
|
232
|
+
sum_dim = None
|
|
233
|
+
break
|
|
234
|
+
sum_dim += s[ax]
|
|
235
|
+
if sum_dim is None or sum_dim != out_dim:
|
|
236
|
+
continue
|
|
237
|
+
candidates.append(ax)
|
|
238
|
+
if len(candidates) == 1:
|
|
239
|
+
return candidates[0]
|
|
240
|
+
return None
|
|
241
|
+
|
|
242
|
+
inferred_axis = _infer_concat_axis(values, shape if shape is not None else None)
|
|
243
|
+
if inferred_axis is not None:
|
|
244
|
+
axis = inferred_axis
|
|
105
245
|
# cast all inputs to float32
|
|
106
246
|
casted_x_list = []
|
|
107
247
|
casted_y_zero_point_list = []
|
|
108
|
-
|
|
248
|
+
casted_y_scale_list = []
|
|
249
|
+
for x, y_scale, y_zero_point in zip(values, got_y_scale_list, got_y_zero_point_list):
|
|
109
250
|
casted_x_list.append(tf.cast(x, tf.float32))
|
|
251
|
+
casted_y_scale_list.append(tf.cast(y_scale, tf.float32))
|
|
110
252
|
casted_y_zero_point_list.append(tf.cast(y_zero_point, tf.float32))
|
|
111
253
|
# dequantize x with y_scale, y_zero_point
|
|
112
254
|
dequantized_x_list = []
|
|
113
|
-
for x, y_scale, y_zero_point in zip(
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
255
|
+
for x, y_scale, y_zero_point, is_dequantized in zip(
|
|
256
|
+
casted_x_list,
|
|
257
|
+
casted_y_scale_list,
|
|
258
|
+
casted_y_zero_point_list,
|
|
259
|
+
input_is_dequantized_list,
|
|
260
|
+
):
|
|
261
|
+
if is_dequantized:
|
|
262
|
+
dequantized_x_list.append(x)
|
|
263
|
+
else:
|
|
264
|
+
dequantized_value = tf.multiply(
|
|
265
|
+
x=tf.subtract(x, y_zero_point),
|
|
117
266
|
y=y_scale,
|
|
118
|
-
)
|
|
119
|
-
|
|
120
|
-
)
|
|
121
|
-
dequantized_x_list.append(dequantized_value)
|
|
267
|
+
)
|
|
268
|
+
dequantized_x_list.append(dequantized_value)
|
|
122
269
|
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
270
|
+
try:
|
|
271
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
272
|
+
tf.concat(
|
|
273
|
+
values=dequantized_x_list,
|
|
274
|
+
axis=axis,
|
|
275
|
+
name=graph_node.name,
|
|
276
|
+
)
|
|
277
|
+
except:
|
|
278
|
+
try:
|
|
279
|
+
onnx_axis = int(graph_node.attrs.get('axis', 0))
|
|
280
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
281
|
+
tf.concat(
|
|
282
|
+
values=dequantized_x_list,
|
|
283
|
+
axis=onnx_axis,
|
|
284
|
+
name=graph_node.name,
|
|
285
|
+
)
|
|
286
|
+
axis = onnx_axis
|
|
287
|
+
except:
|
|
288
|
+
value_rank = len(dequantized_x_list[0].shape)
|
|
289
|
+
succeed = False
|
|
290
|
+
for idx in reversed(range(value_rank)):
|
|
291
|
+
try:
|
|
292
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
293
|
+
tf.concat(
|
|
294
|
+
values=dequantized_x_list,
|
|
295
|
+
axis=idx,
|
|
296
|
+
name=graph_node.name,
|
|
297
|
+
)
|
|
298
|
+
axis = idx
|
|
299
|
+
succeed = True
|
|
300
|
+
break
|
|
301
|
+
except:
|
|
302
|
+
pass
|
|
303
|
+
if not succeed:
|
|
304
|
+
raise
|
|
305
|
+
|
|
306
|
+
output_tensor_shape = tf_layers_dict[graph_node_output.name]['tf_node'].shape
|
|
307
|
+
if output_tensor_shape != tf.TensorShape(None):
|
|
308
|
+
output_tensor_rank = len(output_tensor_shape)
|
|
309
|
+
if graph_node.outputs[0].shape is not None \
|
|
310
|
+
and axis != 0 \
|
|
311
|
+
and output_tensor_rank >= 2 \
|
|
312
|
+
and before_axis == axis:
|
|
313
|
+
if not shape_is_equal_ignore_order(list(graph_node.outputs[0].shape), list(output_tensor_shape)):
|
|
314
|
+
matched_axes = []
|
|
315
|
+
for dummy_axis in range(1, output_tensor_rank):
|
|
316
|
+
try:
|
|
317
|
+
dummy_concat_tensor = \
|
|
318
|
+
tf.concat(
|
|
319
|
+
values=dequantized_x_list,
|
|
320
|
+
axis=dummy_axis,
|
|
321
|
+
name=graph_node.name,
|
|
322
|
+
)
|
|
323
|
+
dummy_output_shape = dummy_concat_tensor.shape
|
|
324
|
+
if shape_is_equal_ignore_order(list(graph_node.outputs[0].shape), list(dummy_output_shape)):
|
|
325
|
+
matched_axes.append(dummy_axis)
|
|
326
|
+
except:
|
|
327
|
+
pass
|
|
328
|
+
if len(matched_axes) == 1:
|
|
329
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
330
|
+
tf.concat(
|
|
331
|
+
values=dequantized_x_list,
|
|
332
|
+
axis=matched_axes[0],
|
|
333
|
+
name=graph_node.name,
|
|
334
|
+
)
|
|
335
|
+
axis = matched_axes[0]
|
|
336
|
+
elif not nhwc_judge:
|
|
337
|
+
onnx_axis = int(graph_node.attrs.get('axis', 0))
|
|
338
|
+
onnx_axis = output_tensor_rank - 1 if onnx_axis == -1 else onnx_axis
|
|
339
|
+
if onnx_axis == output_tensor_rank - 1 \
|
|
340
|
+
and onnx_axis in matched_axes:
|
|
341
|
+
tf_layers_dict[graph_node_output.name]['tf_node'] = \
|
|
342
|
+
tf.concat(
|
|
343
|
+
values=dequantized_x_list,
|
|
344
|
+
axis=onnx_axis,
|
|
345
|
+
name=graph_node.name,
|
|
346
|
+
)
|
|
347
|
+
axis = onnx_axis
|
|
129
348
|
|
|
130
349
|
# Generation of Debug Info
|
|
131
350
|
tf_inputs = {f"input{idx}": dequantized_x for idx, dequantized_x in enumerate(dequantized_x_list)}
|