onnx2tf 1.29.17__py3-none-any.whl → 1.29.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
onnx2tf/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  from onnx2tf.onnx2tf import convert, main
2
2
 
3
- __version__ = '1.29.17'
3
+ __version__ = '1.29.19'
onnx2tf/ops/Col2Im.py CHANGED
@@ -1,9 +1,9 @@
1
+ import sys
1
2
  import random
2
3
  random.seed(0)
3
4
  import numpy as np
4
5
  np.random.seed(0)
5
6
  import tensorflow as tf
6
- import tf_keras
7
7
  import onnx_graphsurgeon as gs
8
8
  from onnx2tf.utils.common_functions import (
9
9
  get_constant_or_variable,
@@ -14,57 +14,32 @@ from onnx2tf.utils.common_functions import (
14
14
  pre_process_transpose,
15
15
  post_process_transpose,
16
16
  )
17
+ from onnx2tf.utils.logging import *
17
18
 
18
19
 
19
- class Col2ImLayer(tf_keras.layers.Layer):
20
- def __init__(self):
21
- super(Col2ImLayer, self).__init__()
20
+ def _build_col2im_kernel(
21
+ *,
22
+ k_h,
23
+ k_w,
24
+ dilation_h,
25
+ dilation_w,
26
+ dtype,
27
+ ):
28
+ k_h = tf.cast(k_h, tf.int32)
29
+ k_w = tf.cast(k_w, tf.int32)
30
+ eff_k_h = (k_h - 1) * dilation_h + 1
31
+ eff_k_w = (k_w - 1) * dilation_w + 1
22
32
 
23
- def call(
24
- self,
25
- input_tensor,
26
- input_block_shape,
27
- strides,
28
- input_image_shape,
29
- pads,
30
- dilations,
31
- output_shape = None,
32
- ):
33
- N, _, L = input_tensor.shape
34
- if output_shape is not None:
35
- C = tf.convert_to_tensor(output_shape[1])
36
- output_shape = tf.convert_to_tensor([output_shape[0]] + [s for s in output_shape[2:]] + [output_shape[1]])
37
- else:
38
- C = tf.convert_to_tensor(input_tensor.shape[1] // (input_block_shape[0] * input_block_shape[1]))
39
- output_shape = tf.convert_to_tensor([N] + input_image_shape + [C])
40
- im = tf.TensorArray(input_tensor.dtype, size=N, dynamic_size=True)
41
-
42
- def loop_over_l(n, im_n, l):
43
- row_idx = tf.convert_to_tensor((l // ((input_image_shape[1] - (input_block_shape[1] - 1) * dilations[1] - 1 + strides[1]) // strides[1])) * strides[0])
44
- col_idx = tf.convert_to_tensor((l % ((input_image_shape[1] - (input_block_shape[1] - 1) * dilations[1] - 1 + strides[1]) // strides[1])) * strides[1])
45
-
46
- def loop_over_c(c, im_n):
47
- patch_idx = tf.convert_to_tensor(c * input_block_shape[0] * input_block_shape[1])
48
- patch = tf.reshape(input_tensor[n, patch_idx:patch_idx + input_block_shape[0] * input_block_shape[1], l], input_block_shape)
49
- if dilations[0] > 1 or dilations[1] > 1:
50
- dilated_patch = tf.zeros([(input_block_shape[0] - 1) * dilations[0] + 1, (input_block_shape[1] - 1) * dilations[1] + 1], dtype=patch.dtype)
51
- for i in tf.range(input_block_shape[0]):
52
- for j in tf.range(input_block_shape[1]):
53
- dilated_patch = tf.tensor_scatter_nd_update(dilated_patch, [[i * dilations[0], j * dilations[1]]], [patch[i, j]])
54
- patch = dilated_patch
55
- patch_shape = tf.shape(patch)
56
- indices = tf.reshape(tf.stack(tf.meshgrid(row_idx + tf.range(patch_shape[0]), col_idx + tf.range(patch_shape[1]), c, indexing='ij'), axis=-1), [-1, 3])
57
- updates = tf.reshape(patch, [-1])
58
- return tf.tensor_scatter_nd_add(im_n, indices, updates)
59
- return tf.reduce_sum(tf.map_fn(lambda c: loop_over_c(c, im_n), tf.range(C), dtype=im_n.dtype), axis=0)
60
-
61
- def loop_over_n(n):
62
- im_n = tf.zeros(output_shape[1:], dtype=input_tensor.dtype)
63
- return tf.reduce_sum(tf.map_fn(lambda l: loop_over_l(n, im_n, l), tf.range(L), dtype=im_n.dtype), axis=0)
64
-
65
- im = tf.map_fn(loop_over_n, tf.range(N), dtype=input_tensor.dtype)
66
-
67
- return im[:, pads[0]:input_image_shape[0]-pads[2], pads[1]:input_image_shape[1]-pads[3], :]
33
+ ky = tf.reshape(tf.repeat(tf.range(k_h), k_w), tf.stack([k_h, k_w]))
34
+ kx = tf.reshape(tf.tile(tf.range(k_w), [k_h]), tf.stack([k_h, k_w]))
35
+
36
+ positions = ky * dilation_h * eff_k_w + kx * dilation_w
37
+ positions = tf.reshape(positions, [-1])
38
+ one_hot = tf.one_hot(positions, depth=eff_k_h * eff_k_w, dtype=dtype)
39
+ kernel = tf.reshape(one_hot, tf.stack([k_h * k_w, eff_k_h, eff_k_w]))
40
+ kernel = tf.transpose(kernel, [1, 2, 0])
41
+ kernel = tf.expand_dims(kernel, axis=2)
42
+ return kernel, eff_k_h, eff_k_w
68
43
 
69
44
 
70
45
  @print_node_info
@@ -114,10 +89,9 @@ def make_node(
114
89
  input_block_shape = tf_layers_dict[graph_node_input_3.name]['tf_node'] \
115
90
  if isinstance(graph_node_input_3, gs.Variable) else graph_node_input_3
116
91
 
117
- spatial_size = len(input_image_shape)
118
- dilations = graph_node.attrs.get('dilations', [1] * spatial_size)
119
- pads = graph_node.attrs.get('pads', [0, 0] * spatial_size)
120
- strides = graph_node.attrs.get('strides', [1] * spatial_size)
92
+ dilations = graph_node.attrs.get('dilations', [1, 1])
93
+ pads = graph_node.attrs.get('pads', [0, 0, 0, 0])
94
+ strides = graph_node.attrs.get('strides', [1, 1])
121
95
 
122
96
  # Preserving Graph Structure (Dict)
123
97
  tf_layers_dict[graph_node_output.name] = {
@@ -142,17 +116,87 @@ def make_node(
142
116
  tf_layers_dict[graph_node_output.name].pop('nhwc')
143
117
 
144
118
  # Generation of TF OP
145
- col2im = Col2ImLayer()
146
- tf_layers_dict[graph_node_output.name]['tf_node'] = \
147
- col2im(
148
- input_tensor=input_tensor,
149
- input_block_shape=input_block_shape,
150
- strides=strides,
151
- input_image_shape=input_image_shape,
152
- pads=pads,
153
- dilations=dilations,
154
- output_shape=shape,
155
- )
119
+ original_dtype = input_tensor.dtype
120
+ compute_dtype = original_dtype
121
+ if original_dtype not in (tf.float16, tf.float32, tf.float64, tf.bfloat16):
122
+ if original_dtype in (tf.complex64, tf.complex128):
123
+ error('Col2Im does not support complex types in non-Flex implementation.')
124
+ sys.exit(1)
125
+ compute_dtype = tf.float32
126
+ input_tensor = tf.cast(input_tensor, compute_dtype)
127
+
128
+ input_image_shape = tf.cast(input_image_shape, tf.int32)
129
+ input_block_shape = tf.cast(input_block_shape, tf.int32)
130
+
131
+ if input_image_shape.shape is not None \
132
+ and input_image_shape.shape.rank is not None \
133
+ and input_image_shape.shape.rank != 1:
134
+ error('Col2Im supports only 2D image_shape input.')
135
+ sys.exit(1)
136
+
137
+ if input_block_shape.shape is not None \
138
+ and input_block_shape.shape.rank is not None \
139
+ and input_block_shape.shape.rank != 1:
140
+ error('Col2Im supports only 2D block_shape input.')
141
+ sys.exit(1)
142
+
143
+ k_h = input_block_shape[0]
144
+ k_w = input_block_shape[1]
145
+ h_img = input_image_shape[0]
146
+ w_img = input_image_shape[1]
147
+
148
+ stride_h, stride_w = strides
149
+ dilation_h, dilation_w = dilations
150
+ pad_top, pad_left, pad_bottom, pad_right = pads
151
+
152
+ kernel, eff_k_h, eff_k_w = _build_col2im_kernel(
153
+ k_h=k_h,
154
+ k_w=k_w,
155
+ dilation_h=dilation_h,
156
+ dilation_w=dilation_w,
157
+ dtype=compute_dtype,
158
+ )
159
+
160
+ h_pad = h_img + pad_top + pad_bottom
161
+ w_pad = w_img + pad_left + pad_right
162
+
163
+ out_h = tf.math.floordiv(h_pad - eff_k_h, stride_h) + 1
164
+ out_w = tf.math.floordiv(w_pad - eff_k_w, stride_w) + 1
165
+
166
+ input_shape = tf.shape(input_tensor)
167
+ n = input_shape[0]
168
+ ck = input_shape[1]
169
+ c = tf.math.floordiv(ck, k_h * k_w)
170
+
171
+ cols = tf.reshape(
172
+ input_tensor,
173
+ tf.stack([n, c, k_h * k_w, out_h, out_w]),
174
+ )
175
+ cols = tf.transpose(cols, [0, 1, 3, 4, 2])
176
+ cols = tf.reshape(cols, tf.stack([n * c, out_h, out_w, k_h * k_w]))
177
+
178
+ output_shape = tf.stack([n * c, h_pad, w_pad, 1])
179
+ output = tf.nn.conv2d_transpose(
180
+ cols,
181
+ kernel,
182
+ output_shape=output_shape,
183
+ strides=[1, stride_h, stride_w, 1],
184
+ padding='VALID',
185
+ )
186
+
187
+ output = tf.reshape(output, tf.stack([n, c, h_pad, w_pad]))
188
+ output = tf.transpose(output, [0, 2, 3, 1])
189
+
190
+ output = tf.slice(
191
+ output,
192
+ tf.stack([0, pad_top, pad_left, 0]),
193
+ tf.stack([-1, h_img, w_img, -1]),
194
+ )
195
+
196
+ if output.dtype != original_dtype:
197
+ output = tf.cast(output, original_dtype)
198
+
199
+ tf_layers_dict[graph_node_output.name]['tf_node'] = output
156
200
 
157
201
  # Post-process transpose
158
202
  before_trans_shape = tf_layers_dict[graph_node_output.name]['tf_node'].shape
onnx2tf/ops/DFT.py ADDED
@@ -0,0 +1,245 @@
1
+ import sys
2
+ import random
3
+ random.seed(0)
4
+ import numpy as np
5
+ np.random.seed(0)
6
+ import tensorflow as tf
7
+ import onnx_graphsurgeon as gs
8
+ from onnx2tf.utils.common_functions import (
9
+ get_constant_or_variable,
10
+ print_node_info,
11
+ inverted_operation_enable_disable,
12
+ make_tf_node_info,
13
+ get_replacement_parameter,
14
+ pre_process_transpose,
15
+ post_process_transpose,
16
+ )
17
+ from onnx2tf.utils.logging import *
18
+
19
+
20
+ def _normalize_axis(axis, rank):
21
+ axis = tf.cast(axis, tf.int32)
22
+ rank = tf.cast(rank, tf.int32)
23
+ return tf.where(axis < 0, axis + rank, axis)
24
+
25
+
26
+ def _move_axis_to_last(x, axis):
27
+ rank = tf.rank(x)
28
+ axis = _normalize_axis(axis, rank)
29
+ range0 = tf.range(axis)
30
+ range1 = tf.range(axis + 1, rank)
31
+ perm = tf.concat([range0, range1, [axis]], axis=0)
32
+ x_t = tf.transpose(x, perm)
33
+ inv_perm = tf.argsort(perm)
34
+ return x_t, inv_perm
35
+
36
+
37
+ def _pad_or_slice_last(x, length):
38
+ length = tf.cast(length, tf.int32)
39
+ current = tf.shape(x)[-1]
40
+
41
+ def _pad():
42
+ pad_amount = length - current
43
+ pad = tf.concat(
44
+ [
45
+ tf.zeros([tf.rank(x) - 1, 2], dtype=tf.int32),
46
+ tf.stack([[0, pad_amount]]),
47
+ ],
48
+ axis=0,
49
+ )
50
+ return tf.pad(x, pad)
51
+
52
+ def _slice():
53
+ return x[..., :length]
54
+
55
+ x = tf.cond(current < length, _pad, lambda: x)
56
+ x = tf.cond(current > length, _slice, lambda: x)
57
+ return x
58
+
59
+
60
+ @print_node_info
61
+ @inverted_operation_enable_disable
62
+ @get_replacement_parameter
63
+ def make_node(
64
+ *,
65
+ graph_node: gs.Node,
66
+ tf_layers_dict: dict,
67
+ **kwargs: dict,
68
+ ):
69
+ """DFT
70
+
71
+ Parameters
72
+ ----------
73
+ graph_node: gs.Node
74
+ graph_surgeon Node
75
+
76
+ tf_layers_dict: dict
77
+ optype, shape, dtype, tensorflow graph
78
+ """
79
+ before_op_output_shape_trans_1 = \
80
+ tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
81
+
82
+ graph_node_input_1 = get_constant_or_variable(
83
+ graph_node.inputs[0],
84
+ before_op_output_shape_trans_1,
85
+ )
86
+ graph_node_input_2 = get_constant_or_variable(
87
+ graph_node.inputs[1],
88
+ before_op_output_shape_trans=False,
89
+ ) if len(graph_node.inputs) >= 2 else None
90
+ graph_node_input_3 = get_constant_or_variable(
91
+ graph_node.inputs[2],
92
+ before_op_output_shape_trans=False,
93
+ ) if len(graph_node.inputs) >= 3 else None
94
+
95
+ graph_node_output: gs.Variable = graph_node.outputs[0]
96
+ shape = graph_node_output.shape
97
+ dtype = graph_node_output.dtype
98
+
99
+ input_tensor = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
100
+ if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
101
+ dft_length = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
102
+ if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
103
+ axis = tf_layers_dict[graph_node_input_3.name]['tf_node'] \
104
+ if isinstance(graph_node_input_3, gs.Variable) else graph_node_input_3
105
+
106
+ onesided = bool(graph_node.attrs.get('onesided', 0))
107
+ inverse = bool(graph_node.attrs.get('inverse', 0))
108
+
109
+ # Preserving Graph Structure (Dict)
110
+ tf_layers_dict[graph_node_output.name] = {
111
+ 'optype': graph_node.op,
112
+ 'shape': shape,
113
+ 'dtype': dtype,
114
+ 'nhwc': tf_layers_dict[graph_node_input_1.name]['nhwc'] \
115
+ if isinstance(graph_node_input_1, gs.Variable) \
116
+ and 'nhwc' in tf_layers_dict[graph_node_input_1.name].keys() else False,
117
+ }
118
+
119
+ # Pre-process transpose
120
+ input_tensor = pre_process_transpose(
121
+ value_before_transpose=input_tensor,
122
+ param_target='inputs',
123
+ param_name=graph_node.inputs[0].name,
124
+ **kwargs,
125
+ )
126
+
127
+ input_rank = tf.rank(input_tensor)
128
+ axis_attr = graph_node.attrs.get('axis', None)
129
+ if axis is None:
130
+ if axis_attr is not None:
131
+ axis = tf.constant(axis_attr, dtype=tf.int32)
132
+ else:
133
+ axis = tf.cast(input_rank - 2, tf.int32)
134
+ else:
135
+ axis = tf.cast(tf.reshape(axis, []), tf.int32)
136
+ axis = _normalize_axis(axis, input_rank)
137
+
138
+ dft_length_value = None
139
+ if dft_length is not None:
140
+ dft_length_value = tf.cast(tf.reshape(dft_length, []), tf.int32)
141
+
142
+ input_dtype = input_tensor.dtype
143
+ if input_dtype in (tf.float64,):
144
+ float_dtype = tf.float64
145
+ complex_dtype = tf.complex128
146
+ elif input_dtype in (tf.float32,):
147
+ float_dtype = tf.float32
148
+ complex_dtype = tf.complex64
149
+ elif input_dtype in (tf.float16, tf.bfloat16):
150
+ float_dtype = tf.float32
151
+ complex_dtype = tf.complex64
152
+ else:
153
+ error('DFT supports float/bfloat16 types only.')
154
+ sys.exit(1)
155
+
156
+ # Convert to complex tensor (drop last dim)
157
+ last_dim_static = input_tensor.shape[-1]
158
+ if last_dim_static is not None:
159
+ if last_dim_static == 1:
160
+ real = tf.squeeze(input_tensor, axis=-1)
161
+ imag = tf.zeros_like(real)
162
+ elif last_dim_static == 2:
163
+ real, imag = tf.unstack(input_tensor, axis=-1)
164
+ else:
165
+ error('DFT input last dimension must be 1 or 2.')
166
+ sys.exit(1)
167
+ else:
168
+ last_dim = tf.shape(input_tensor)[-1]
169
+ def _real_case():
170
+ real = tf.squeeze(input_tensor, axis=-1)
171
+ imag = tf.zeros_like(real)
172
+ return real, imag
173
+ def _complex_case():
174
+ real, imag = tf.unstack(input_tensor, axis=-1)
175
+ return real, imag
176
+ real, imag = tf.cond(
177
+ tf.equal(last_dim, 1),
178
+ _real_case,
179
+ _complex_case,
180
+ )
181
+
182
+ real = tf.cast(real, float_dtype)
183
+ imag = tf.cast(imag, float_dtype)
184
+ signal = tf.complex(real, imag)
185
+
186
+ signal_t, inv_perm = _move_axis_to_last(signal, axis)
187
+ signal_len = tf.shape(signal_t)[-1]
188
+
189
+ if onesided and inverse:
190
+ if last_dim_static == 1:
191
+ error('DFT onesided inverse supports only complex input.')
192
+ sys.exit(1)
193
+ if dft_length_value is None:
194
+ dft_length_value = tf.cast(signal_len * 2 - 2, tf.int32)
195
+ out_real = tf.signal.irfft(signal_t, fft_length=[dft_length_value])
196
+ out_real = tf.transpose(out_real, inv_perm)
197
+ output = tf.expand_dims(out_real, axis=-1)
198
+ elif onesided and not inverse:
199
+ if last_dim_static == 2:
200
+ error('DFT onesided forward supports only real input.')
201
+ sys.exit(1)
202
+ if dft_length_value is None:
203
+ dft_length_value = tf.cast(signal_len, tf.int32)
204
+ real_signal = tf.math.real(signal_t)
205
+ out_complex = tf.signal.rfft(real_signal, fft_length=[dft_length_value])
206
+ out_complex = tf.transpose(out_complex, inv_perm)
207
+ output = tf.stack([tf.math.real(out_complex), tf.math.imag(out_complex)], axis=-1)
208
+ else:
209
+ if dft_length_value is not None:
210
+ signal_t = _pad_or_slice_last(signal_t, dft_length_value)
211
+ if inverse:
212
+ out_complex = tf.signal.ifft(signal_t)
213
+ else:
214
+ out_complex = tf.signal.fft(signal_t)
215
+ out_complex = tf.transpose(out_complex, inv_perm)
216
+ output = tf.stack([tf.math.real(out_complex), tf.math.imag(out_complex)], axis=-1)
217
+
218
+ if output.dtype != input_dtype:
219
+ output = tf.cast(output, input_dtype)
220
+
221
+ # Post-process transpose
222
+ tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
223
+ value_before_transpose=output,
224
+ param_target='outputs',
225
+ param_name=graph_node.outputs[0].name,
226
+ **kwargs,
227
+ )
228
+
229
+ # Generation of Debug Info
230
+ tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
231
+ make_tf_node_info(
232
+ node_info={
233
+ 'tf_op_type': 'DFT',
234
+ 'tf_inputs': {
235
+ 'input': input_tensor,
236
+ 'axis': axis,
237
+ 'dft_length': dft_length_value,
238
+ 'onesided': onesided,
239
+ 'inverse': inverse,
240
+ },
241
+ 'tf_outputs': {
242
+ 'output': tf_layers_dict[graph_node_output.name]['tf_node'],
243
+ },
244
+ }
245
+ )