onnx2tf 1.29.6__py3-none-any.whl → 1.29.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- onnx2tf/__init__.py +1 -1
- onnx2tf/ops/GridSample.py +466 -369
- {onnx2tf-1.29.6.dist-info → onnx2tf-1.29.8.dist-info}/METADATA +10 -7
- {onnx2tf-1.29.6.dist-info → onnx2tf-1.29.8.dist-info}/RECORD +8 -8
- {onnx2tf-1.29.6.dist-info → onnx2tf-1.29.8.dist-info}/WHEEL +0 -0
- {onnx2tf-1.29.6.dist-info → onnx2tf-1.29.8.dist-info}/licenses/LICENSE +0 -0
- {onnx2tf-1.29.6.dist-info → onnx2tf-1.29.8.dist-info}/licenses/LICENSE_onnx-tensorflow +0 -0
- {onnx2tf-1.29.6.dist-info → onnx2tf-1.29.8.dist-info}/top_level.txt +0 -0
onnx2tf/__init__.py
CHANGED
onnx2tf/ops/GridSample.py
CHANGED
|
@@ -101,10 +101,12 @@ def make_node(
|
|
|
101
101
|
)
|
|
102
102
|
|
|
103
103
|
align_corners = bool(graph_node.attrs.get('align_corners', 0))
|
|
104
|
-
mode = graph_node.attrs.get('mode', '
|
|
104
|
+
mode = graph_node.attrs.get('mode', 'linear')
|
|
105
105
|
padding_mode = graph_node.attrs.get('padding_mode', 'zeros')
|
|
106
106
|
|
|
107
|
-
|
|
107
|
+
if mode == 'bilinear':
|
|
108
|
+
mode = 'linear'
|
|
109
|
+
ENABLE_MODES = ['linear', 'nearest', 'cubic']
|
|
108
110
|
if mode not in ENABLE_MODES:
|
|
109
111
|
error(
|
|
110
112
|
f'The current implementation of GridSample supports only mode={ENABLE_MODES}. '+
|
|
@@ -113,7 +115,7 @@ def make_node(
|
|
|
113
115
|
)
|
|
114
116
|
sys.exit(1)
|
|
115
117
|
|
|
116
|
-
ENABLE_PADDING_MODES = ['zeros']
|
|
118
|
+
ENABLE_PADDING_MODES = ['zeros', 'border', 'reflection']
|
|
117
119
|
if padding_mode not in ENABLE_PADDING_MODES:
|
|
118
120
|
error(
|
|
119
121
|
f'The current implementation of GridSample supports only mode={ENABLE_PADDING_MODES}. '+
|
|
@@ -166,390 +168,485 @@ def make_node(
|
|
|
166
168
|
)
|
|
167
169
|
|
|
168
170
|
# Generation of TF OP
|
|
171
|
+
use_linear_gather_2d = padding_mode in ['zeros', 'border', 'reflection'] \
|
|
172
|
+
and mode in ['linear', 'nearest', 'cubic']
|
|
173
|
+
use_linear_gather_3d = padding_mode in ['zeros', 'border', 'reflection'] \
|
|
174
|
+
and mode in ['linear', 'nearest', 'cubic']
|
|
169
175
|
"""
|
|
170
176
|
image
|
|
171
177
|
[N, H, W, C]
|
|
172
178
|
grid
|
|
173
179
|
[N, grid_H, grid_W, 2]
|
|
174
180
|
"""
|
|
175
|
-
def
|
|
176
|
-
|
|
177
|
-
|
|
181
|
+
def _reflect_coord(coord, size, align_corners):
|
|
182
|
+
size = tf.cast(size, coord.dtype)
|
|
178
183
|
if align_corners:
|
|
179
|
-
|
|
184
|
+
size_minus_one = tf.maximum(size - 1.0, 1.0)
|
|
185
|
+
coord = (size - 1.0) - tf.abs(
|
|
186
|
+
tf.math.floormod(coord, 2.0 * size_minus_one) - (size - 1.0)
|
|
187
|
+
)
|
|
180
188
|
else:
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
w_y1_x1 = tf.math.multiply(dy, dx)
|
|
204
|
-
w_y0_x1 = tf.math.multiply(1.0 - dy, dx)
|
|
205
|
-
|
|
206
|
-
# input - [N, H_in, W_in, C]
|
|
207
|
-
# grid - [N, H_out, W_out, 2]
|
|
208
|
-
# output - [N, H_out, W_out, C]
|
|
209
|
-
v_y0_x0 = tf.gather_nd(params=image, indices=tf.cast(tf.concat([y0, x0], axis=-1), dtype=tf.int64), batch_dims=1)
|
|
210
|
-
v_y1_x0 = tf.gather_nd(params=image, indices=tf.cast(tf.concat([y1, x0], axis=-1), dtype=tf.int64), batch_dims=1)
|
|
211
|
-
v_y1_x1 = tf.gather_nd(params=image, indices=tf.cast(tf.concat([y1, x1], axis=-1), dtype=tf.int64), batch_dims=1)
|
|
212
|
-
v_y0_x1 = tf.gather_nd(params=image, indices=tf.cast(tf.concat([y0, x1], axis=-1), dtype=tf.int64), batch_dims=1)
|
|
213
|
-
|
|
214
|
-
output = w_y0_x0 * v_y0_x0 + w_y1_x0 * v_y1_x0 + w_y1_x1 * v_y1_x1 + w_y0_x1 * v_y0_x1
|
|
215
|
-
|
|
216
|
-
x_invalid = tf.math.logical_or(
|
|
217
|
-
tf.math.less(x, tf.convert_to_tensor(0.0, dtype=tf.float32)),
|
|
218
|
-
tf.math.greater(x, tf.convert_to_tensor(w_in - 1.0, dtype=tf.float32))
|
|
219
|
-
)
|
|
220
|
-
y_invalid = tf.math.logical_or(
|
|
221
|
-
tf.math.less(y, tf.convert_to_tensor(0.0, dtype=tf.float32)),
|
|
222
|
-
tf.math.greater(y, tf.convert_to_tensor(h_in - 1.0, dtype=tf.float32))
|
|
223
|
-
)
|
|
224
|
-
invalid = tf.math.logical_or(x_invalid, y_invalid)
|
|
225
|
-
|
|
226
|
-
output = tf.where(
|
|
227
|
-
condition=invalid,
|
|
228
|
-
x=tf.convert_to_tensor(0.0, dtype=tf.float32),
|
|
229
|
-
y=output,
|
|
230
|
-
name=target_name,
|
|
189
|
+
coord = size - tf.abs(
|
|
190
|
+
tf.math.floormod(coord + 0.5, 2.0 * size) - size
|
|
191
|
+
) - 0.5
|
|
192
|
+
coord = tf.clip_by_value(coord, 0.0, size - 1.0)
|
|
193
|
+
return coord
|
|
194
|
+
|
|
195
|
+
def _normalize_grid(grid_coord, size, align_corners):
|
|
196
|
+
size = tf.cast(size, grid_coord.dtype)
|
|
197
|
+
if align_corners:
|
|
198
|
+
return (grid_coord + 1.0) * (size - 1.0) * 0.5
|
|
199
|
+
return (grid_coord + 1.0) * size * 0.5 - 0.5
|
|
200
|
+
|
|
201
|
+
def _cubic_kernel(x, a=-0.75):
|
|
202
|
+
absx = tf.abs(x)
|
|
203
|
+
absx2 = absx * absx
|
|
204
|
+
absx3 = absx2 * absx
|
|
205
|
+
f1 = (a + 2.0) * absx3 - (a + 3.0) * absx2 + 1.0
|
|
206
|
+
f2 = a * absx3 - 5.0 * a * absx2 + 8.0 * a * absx - 4.0 * a
|
|
207
|
+
return tf.where(
|
|
208
|
+
absx <= 1.0,
|
|
209
|
+
f1,
|
|
210
|
+
tf.where(absx < 2.0, f2, tf.zeros_like(x)),
|
|
231
211
|
)
|
|
232
|
-
return output
|
|
233
|
-
|
|
234
|
-
def define_accurate_gridsample(image, grid, align_corners, target_name):
|
|
235
|
-
split11, split12 = tf.split(grid, num_or_size_splits=2, axis=3) # x, y
|
|
236
212
|
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
213
|
+
def _prepare_linear_gather_2d(input_tensor):
|
|
214
|
+
shape = tf.shape(input_tensor)
|
|
215
|
+
h = shape[1]
|
|
216
|
+
w = shape[2]
|
|
217
|
+
c = shape[3]
|
|
218
|
+
input_flat = tf.reshape(input_tensor, tf.stack([shape[0], h * w, c]))
|
|
219
|
+
return input_flat, h, w
|
|
220
|
+
|
|
221
|
+
def _prepare_linear_gather_3d(input_tensor):
|
|
222
|
+
shape = tf.shape(input_tensor)
|
|
223
|
+
d = shape[1]
|
|
224
|
+
h = shape[2]
|
|
225
|
+
w = shape[3]
|
|
226
|
+
c = shape[4]
|
|
227
|
+
input_flat = tf.reshape(input_tensor, tf.stack([shape[0], d * h * w, c]))
|
|
228
|
+
return input_flat, d, h, w
|
|
229
|
+
|
|
230
|
+
def _gather_1d(input_tensor, x_idx):
|
|
231
|
+
idx = tf.cast(x_idx, tf.int64)
|
|
232
|
+
return tf.gather_nd(input_tensor, idx, batch_dims=1)
|
|
233
|
+
|
|
234
|
+
def _gather_2d(input_tensor, y_idx, x_idx, linear_cache=None):
|
|
235
|
+
if use_linear_gather_2d:
|
|
236
|
+
if linear_cache is None:
|
|
237
|
+
input_flat, _, w = _prepare_linear_gather_2d(input_tensor)
|
|
238
|
+
else:
|
|
239
|
+
input_flat, _, w = linear_cache
|
|
240
|
+
w_f = tf.cast(w, y_idx.dtype)
|
|
241
|
+
linear = tf.cast(y_idx * w_f + x_idx, tf.int32)
|
|
242
|
+
linear = tf.squeeze(linear, axis=-1)
|
|
243
|
+
return tf.gather(params=input_flat, indices=linear, batch_dims=1)
|
|
244
|
+
idx = tf.cast(tf.concat([y_idx, x_idx], axis=-1), tf.int64)
|
|
245
|
+
return tf.gather_nd(input_tensor, idx, batch_dims=1)
|
|
246
|
+
|
|
247
|
+
def _gather_3d(input_tensor, z_idx, y_idx, x_idx, linear_cache=None):
|
|
248
|
+
if use_linear_gather_3d:
|
|
249
|
+
if linear_cache is None:
|
|
250
|
+
input_flat, d, h, w = _prepare_linear_gather_3d(input_tensor)
|
|
251
|
+
else:
|
|
252
|
+
input_flat, d, h, w = linear_cache
|
|
253
|
+
w_f = tf.cast(w, z_idx.dtype)
|
|
254
|
+
h_f = tf.cast(h, z_idx.dtype)
|
|
255
|
+
linear = z_idx * (h_f * w_f) + y_idx * w_f + x_idx
|
|
256
|
+
linear = tf.cast(linear, tf.int32)
|
|
257
|
+
linear = tf.squeeze(linear, axis=-1)
|
|
258
|
+
return tf.gather(params=input_flat, indices=linear, batch_dims=1)
|
|
259
|
+
idx = tf.cast(tf.concat([z_idx, y_idx, x_idx], axis=-1), tf.int64)
|
|
260
|
+
return tf.gather_nd(input_tensor, idx, batch_dims=1)
|
|
261
|
+
|
|
262
|
+
def _grid_sample_1d(image, grid, target_name):
|
|
263
|
+
w_in = tf.shape(image)[1]
|
|
264
|
+
w_in_f = tf.cast(w_in, grid.dtype)
|
|
265
|
+
x = _normalize_grid(grid, w_in_f, align_corners)
|
|
266
|
+
|
|
267
|
+
if padding_mode == 'border':
|
|
268
|
+
x = tf.clip_by_value(x, 0.0, w_in_f - 1.0)
|
|
269
|
+
max_x = w_in_f - 1.0
|
|
270
|
+
elif padding_mode == 'reflection':
|
|
271
|
+
x = _reflect_coord(x, w_in_f, align_corners)
|
|
272
|
+
max_x = w_in_f - 1.0
|
|
240
273
|
else:
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
274
|
+
pad = 2 if mode == 'cubic' else 1
|
|
275
|
+
x = tf.clip_by_value(x, -float(pad), w_in_f - 1.0 + float(pad)) + float(pad)
|
|
276
|
+
image = tf.pad(image, paddings=[[0,0],[pad,pad],[0,0]])
|
|
277
|
+
max_x = tf.cast(w_in + 2 * pad - 1, grid.dtype)
|
|
278
|
+
|
|
279
|
+
if mode == 'nearest':
|
|
280
|
+
x0 = tf.round(x)
|
|
281
|
+
if padding_mode == 'reflection':
|
|
282
|
+
x0 = _reflect_coord(x0, w_in_f, align_corners)
|
|
283
|
+
x0 = tf.clip_by_value(x0, 0.0, max_x)
|
|
284
|
+
output = _gather_1d(image, x0)
|
|
285
|
+
return tf.identity(output, name=target_name)
|
|
286
|
+
|
|
287
|
+
if mode == 'cubic':
|
|
288
|
+
x1 = tf.floor(x)
|
|
289
|
+
dx = x - x1
|
|
290
|
+
x0 = x1 - 1.0
|
|
291
|
+
x2 = x1 + 1.0
|
|
292
|
+
x3 = x1 + 2.0
|
|
293
|
+
|
|
294
|
+
if padding_mode == 'reflection':
|
|
295
|
+
x0 = _reflect_coord(x0, w_in_f, align_corners)
|
|
296
|
+
x1 = _reflect_coord(x1, w_in_f, align_corners)
|
|
297
|
+
x2 = _reflect_coord(x2, w_in_f, align_corners)
|
|
298
|
+
x3 = _reflect_coord(x3, w_in_f, align_corners)
|
|
299
|
+
|
|
300
|
+
x0 = tf.clip_by_value(x0, 0.0, max_x)
|
|
301
|
+
x1 = tf.clip_by_value(x1, 0.0, max_x)
|
|
302
|
+
x2 = tf.clip_by_value(x2, 0.0, max_x)
|
|
303
|
+
x3 = tf.clip_by_value(x3, 0.0, max_x)
|
|
304
|
+
|
|
305
|
+
w0 = _cubic_kernel(dx + 1.0)
|
|
306
|
+
w1 = _cubic_kernel(dx)
|
|
307
|
+
w2 = _cubic_kernel(dx - 1.0)
|
|
308
|
+
w3 = _cubic_kernel(dx - 2.0)
|
|
309
|
+
|
|
310
|
+
v0 = _gather_1d(image, x0)
|
|
311
|
+
v1 = _gather_1d(image, x1)
|
|
312
|
+
v2 = _gather_1d(image, x2)
|
|
313
|
+
v3 = _gather_1d(image, x3)
|
|
314
|
+
output = w0 * v0 + w1 * v1 + w2 * v2 + w3 * v3
|
|
315
|
+
return tf.identity(output, name=target_name)
|
|
316
|
+
|
|
317
|
+
x0 = tf.floor(x)
|
|
318
|
+
x1 = x0 + 1.0
|
|
319
|
+
dx = x - x0
|
|
320
|
+
|
|
321
|
+
if padding_mode == 'reflection':
|
|
322
|
+
x0 = _reflect_coord(x0, w_in_f, align_corners)
|
|
323
|
+
x1 = _reflect_coord(x1, w_in_f, align_corners)
|
|
324
|
+
|
|
325
|
+
x0 = tf.clip_by_value(x0, 0.0, max_x)
|
|
326
|
+
x1 = tf.clip_by_value(x1, 0.0, max_x)
|
|
327
|
+
|
|
328
|
+
w0 = 1.0 - dx
|
|
329
|
+
w1 = dx
|
|
330
|
+
v0 = _gather_1d(image, x0)
|
|
331
|
+
v1 = _gather_1d(image, x1)
|
|
332
|
+
output = w0 * v0 + w1 * v1
|
|
333
|
+
return tf.identity(output, name=target_name)
|
|
334
|
+
|
|
335
|
+
def _grid_sample_2d(image, grid, target_name):
|
|
336
|
+
h_in = tf.shape(image)[1]
|
|
337
|
+
w_in = tf.shape(image)[2]
|
|
338
|
+
h_in_f = tf.cast(h_in, grid.dtype)
|
|
339
|
+
w_in_f = tf.cast(w_in, grid.dtype)
|
|
340
|
+
grid_x, grid_y = tf.split(grid, num_or_size_splits=2, axis=-1)
|
|
341
|
+
x = _normalize_grid(grid_x, w_in_f, align_corners)
|
|
342
|
+
y = _normalize_grid(grid_y, h_in_f, align_corners)
|
|
343
|
+
|
|
344
|
+
if padding_mode == 'border':
|
|
345
|
+
x = tf.clip_by_value(x, 0.0, w_in_f - 1.0)
|
|
346
|
+
y = tf.clip_by_value(y, 0.0, h_in_f - 1.0)
|
|
347
|
+
max_x = w_in_f - 1.0
|
|
348
|
+
max_y = h_in_f - 1.0
|
|
349
|
+
elif padding_mode == 'reflection':
|
|
350
|
+
x = _reflect_coord(x, w_in_f, align_corners)
|
|
351
|
+
y = _reflect_coord(y, h_in_f, align_corners)
|
|
352
|
+
max_x = w_in_f - 1.0
|
|
353
|
+
max_y = h_in_f - 1.0
|
|
250
354
|
else:
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
tf.
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
)
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
)
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
tf.
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
)
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
# Where_6_output_0 Constant_32_output_0 -> Greater_3_output_0
|
|
345
|
-
greter33 = tf.greater(where331, tf.convert_to_tensor(image.shape[1]+1, dtype=tf.int64)) # Greater_3_output_0
|
|
346
|
-
# Greater_3_output_0 Constant_32_output_0 Where_6_output_0 -> Where_7_output_0
|
|
347
|
-
where332 = \
|
|
348
|
-
tf.where(
|
|
349
|
-
condition=greter33,
|
|
350
|
-
x=tf.convert_to_tensor(image.shape[1]+1, dtype=tf.int64),
|
|
351
|
-
y=where331,
|
|
352
|
-
)
|
|
353
|
-
# Where_7_output_0 Constant_37_output_0 -> Mul_8_output_0
|
|
354
|
-
mul33 = tf.math.multiply(where332, tf.convert_to_tensor(image.shape[2]+2, dtype=tf.int64)) # Mul_8_output_0
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
# Add_3_output_0 -> Cast_3_output_0
|
|
358
|
-
cast34 = tf.cast(add22, dtype=tf.int64) # Cast_3_output_0
|
|
359
|
-
# Cast_3_output_0 Constant_26_output_0 -> Less_2_output_0
|
|
360
|
-
less34 = tf.less(cast34, tf.convert_to_tensor(0, dtype=tf.int64)) # Less_2_output_0
|
|
361
|
-
# Less_2_output_0 Constant_26_output_0 Cast_3_output_0 -> Where_4_output_0
|
|
362
|
-
where341 = \
|
|
363
|
-
tf.where(
|
|
364
|
-
condition=less34,
|
|
365
|
-
x=tf.convert_to_tensor(0, dtype=tf.int64),
|
|
366
|
-
y=cast34,
|
|
367
|
-
)
|
|
368
|
-
# Where_4_output_0 Constant_32_output_0 -> Greater_2_output_0
|
|
369
|
-
greter34 = tf.greater(where341, tf.convert_to_tensor(image.shape[1]+1, dtype=tf.int64)) # Greater_2_output_0
|
|
370
|
-
# Greater_2_output_0 Constant_32_output_0 Where_4_output_0 -> Where_5_output_0
|
|
371
|
-
where342 = \
|
|
372
|
-
tf.where(
|
|
373
|
-
condition=greter34,
|
|
374
|
-
x=tf.convert_to_tensor(image.shape[1]+1, dtype=tf.int64),
|
|
375
|
-
y=where341,
|
|
376
|
-
)
|
|
377
|
-
# Where_5_output_0 Constant_37_output_0 -> Mul_6_output_0
|
|
378
|
-
mul34 = tf.math.multiply(where342, tf.convert_to_tensor(image.shape[2]+2, dtype=tf.int64)) # Mul_6_output_0
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
# Where_3_output_0 Mul_6_output_0 -> Add_8_output_0
|
|
382
|
-
add41 = tf.math.add(where312, mul34) # Add_8_output_0
|
|
383
|
-
# Add_8_output_0 Constant_11_output_0 -> Unsqueeze_6_output_0
|
|
384
|
-
unsqueeze41 = tf.expand_dims(add41, axis=1) # Unsqueeze_6_output_0
|
|
385
|
-
# Unsqueeze_6_output_0 Where_8_output_0 -> Expand_2_output_0
|
|
386
|
-
expand41_ones = tf.ones([1] + [image.shape[3]] + [1], dtype=tf.int64)
|
|
387
|
-
expand41 = tf.math.multiply(unsqueeze41, expand41_ones) # Expand_2_output_0
|
|
388
|
-
|
|
389
|
-
# Where_1_output_0 Mul_6_output_0 -> Add_6_output_0
|
|
390
|
-
add42 = tf.math.add(where322, mul34) # Add_6_output_0
|
|
391
|
-
# Add_6_output_0 Constant_11_output_0 -> Unsqueeze_4_output_0
|
|
392
|
-
unsqueeze42 = tf.expand_dims(add42, axis=1) # Unsqueeze_4_output_0
|
|
393
|
-
# Unsqueeze_4_output_0 Where_8_output_0 -> Expand_output_0
|
|
394
|
-
expand42_ones = tf.ones([1] + [image.shape[3]] + [1], dtype=tf.int64)
|
|
395
|
-
expand42 = tf.math.multiply(unsqueeze42, expand42_ones) # Expand_output_0
|
|
396
|
-
|
|
397
|
-
# Where_3_output_0 Mul_8_output_0 -> Add_9_output_0
|
|
398
|
-
add43 = tf.math.add(where312, mul33) # Add_9_output_0
|
|
399
|
-
# Add_9_output_0 Constant_11_output_0 -> Unsqueeze_7_output_0
|
|
400
|
-
unsqueeze43 = tf.expand_dims(add43, axis=1) # Unsqueeze_7_output_0
|
|
401
|
-
# Unsqueeze_7_output_0 Where_8_output_0 -> Expand_3_output_0
|
|
402
|
-
expand43_ones = tf.ones([1] + [image.shape[3]] + [1], dtype=tf.int64)
|
|
403
|
-
expand43 = tf.math.multiply(unsqueeze43, expand43_ones) # Expand_3_output_0
|
|
404
|
-
|
|
405
|
-
# Where_1_output_0 Mul_8_output_0 -> Add_7_output_0
|
|
406
|
-
add44 = tf.math.add(where322, mul33) # Add_7_output_0
|
|
407
|
-
# Add_7_output_0 Constant_11_output_0 -> Unsqueeze_5_output_0
|
|
408
|
-
unsqueeze44 = tf.expand_dims(add44, axis=1) # Unsqueeze_5_output_0
|
|
409
|
-
# Unsqueeze_5_output_0 Where_8_output_0 -> Expand_1_output_0
|
|
410
|
-
expand44_ones = tf.ones([1] + [image.shape[3]] + [1], dtype=tf.int64)
|
|
411
|
-
expand44 = tf.math.multiply(unsqueeze44, expand44_ones) # Expand_1_output_0
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
################################################## image
|
|
415
|
-
image_padded = tf.pad(image, paddings=[[0,0],[1,1],[1,1],[0,0]]) # Pad_output_0
|
|
416
|
-
# Pad_output_0 Constant_36_output_0 -> Reshape_4_output_0
|
|
417
|
-
image_reshape = tf.reshape(image_padded, shape=[image_padded.shape[0]] + [np.prod(image_padded.shape[1:3])] + [image_padded.shape[3]])
|
|
418
|
-
image_reshape_transpose = tf.transpose(image_reshape, perm=[0,2,1])
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
# Reshape_4_output_0 Expand_3_output_0 -> GatherElements_3_output_0
|
|
422
|
-
axis_perm1 = tf.tensor_scatter_nd_update(
|
|
423
|
-
tf.range(tf.rank(image_reshape_transpose)),
|
|
424
|
-
tf.constant([[0], [2]]),
|
|
425
|
-
tf.constant([2, 0])
|
|
426
|
-
)
|
|
427
|
-
data_swaped1 = tf.transpose(image_reshape_transpose, perm=axis_perm1)
|
|
428
|
-
index_swaped1 = tf.transpose(expand43, perm=axis_perm1)
|
|
429
|
-
idx_tensors_per_axis1 = [
|
|
430
|
-
tf.range(tf.shape(index_swaped1, index_swaped1.dtype)[i]) \
|
|
431
|
-
for i in range(index_swaped1.shape.rank)
|
|
432
|
-
]
|
|
433
|
-
idx_tensors_per_axis1 = tf.meshgrid(
|
|
434
|
-
*idx_tensors_per_axis1,
|
|
435
|
-
indexing='ij',
|
|
436
|
-
)
|
|
437
|
-
idx_tensors_per_axis1[0] = index_swaped1
|
|
438
|
-
dim_expanded_idx_tensors_per_axis1 = [
|
|
439
|
-
tf.expand_dims(idx_tensor, axis=-1)
|
|
440
|
-
for idx_tensor in idx_tensors_per_axis1
|
|
441
|
-
]
|
|
442
|
-
index_expanded1 = tf.concat(dim_expanded_idx_tensors_per_axis1, axis=-1)
|
|
443
|
-
gathernd1 = tf.gather_nd(data_swaped1, index_expanded1)
|
|
444
|
-
gatherelements1 = tf.transpose(gathernd1, perm=[2,1,0]) # GatherElements_3_output_0
|
|
445
|
-
|
|
355
|
+
pad = 2 if mode == 'cubic' else 1
|
|
356
|
+
x = tf.clip_by_value(x, -float(pad), w_in_f - 1.0 + float(pad)) + float(pad)
|
|
357
|
+
y = tf.clip_by_value(y, -float(pad), h_in_f - 1.0 + float(pad)) + float(pad)
|
|
358
|
+
image = tf.pad(image, paddings=[[0,0],[pad,pad],[pad,pad],[0,0]])
|
|
359
|
+
max_x = tf.cast(w_in + 2 * pad - 1, grid.dtype)
|
|
360
|
+
max_y = tf.cast(h_in + 2 * pad - 1, grid.dtype)
|
|
361
|
+
|
|
362
|
+
linear_cache = _prepare_linear_gather_2d(image) if use_linear_gather_2d else None
|
|
363
|
+
|
|
364
|
+
if mode == 'nearest':
|
|
365
|
+
x0 = tf.round(x)
|
|
366
|
+
y0 = tf.round(y)
|
|
367
|
+
if padding_mode == 'reflection':
|
|
368
|
+
x0 = _reflect_coord(x0, w_in_f, align_corners)
|
|
369
|
+
y0 = _reflect_coord(y0, h_in_f, align_corners)
|
|
370
|
+
x0 = tf.clip_by_value(x0, 0.0, max_x)
|
|
371
|
+
y0 = tf.clip_by_value(y0, 0.0, max_y)
|
|
372
|
+
output = _gather_2d(image, y0, x0, linear_cache)
|
|
373
|
+
return tf.identity(output, name=target_name)
|
|
374
|
+
|
|
375
|
+
if mode == 'cubic':
|
|
376
|
+
x1 = tf.floor(x)
|
|
377
|
+
y1 = tf.floor(y)
|
|
378
|
+
dx = x - x1
|
|
379
|
+
dy = y - y1
|
|
380
|
+
x0 = x1 - 1.0
|
|
381
|
+
x2 = x1 + 1.0
|
|
382
|
+
x3 = x1 + 2.0
|
|
383
|
+
y0 = y1 - 1.0
|
|
384
|
+
y2 = y1 + 1.0
|
|
385
|
+
y3 = y1 + 2.0
|
|
386
|
+
|
|
387
|
+
if padding_mode == 'reflection':
|
|
388
|
+
x0 = _reflect_coord(x0, w_in_f, align_corners)
|
|
389
|
+
x1 = _reflect_coord(x1, w_in_f, align_corners)
|
|
390
|
+
x2 = _reflect_coord(x2, w_in_f, align_corners)
|
|
391
|
+
x3 = _reflect_coord(x3, w_in_f, align_corners)
|
|
392
|
+
y0 = _reflect_coord(y0, h_in_f, align_corners)
|
|
393
|
+
y1 = _reflect_coord(y1, h_in_f, align_corners)
|
|
394
|
+
y2 = _reflect_coord(y2, h_in_f, align_corners)
|
|
395
|
+
y3 = _reflect_coord(y3, h_in_f, align_corners)
|
|
396
|
+
|
|
397
|
+
x0 = tf.clip_by_value(x0, 0.0, max_x)
|
|
398
|
+
x1 = tf.clip_by_value(x1, 0.0, max_x)
|
|
399
|
+
x2 = tf.clip_by_value(x2, 0.0, max_x)
|
|
400
|
+
x3 = tf.clip_by_value(x3, 0.0, max_x)
|
|
401
|
+
y0 = tf.clip_by_value(y0, 0.0, max_y)
|
|
402
|
+
y1 = tf.clip_by_value(y1, 0.0, max_y)
|
|
403
|
+
y2 = tf.clip_by_value(y2, 0.0, max_y)
|
|
404
|
+
y3 = tf.clip_by_value(y3, 0.0, max_y)
|
|
405
|
+
|
|
406
|
+
wx0 = _cubic_kernel(dx + 1.0)
|
|
407
|
+
wx1 = _cubic_kernel(dx)
|
|
408
|
+
wx2 = _cubic_kernel(dx - 1.0)
|
|
409
|
+
wx3 = _cubic_kernel(dx - 2.0)
|
|
410
|
+
wy0 = _cubic_kernel(dy + 1.0)
|
|
411
|
+
wy1 = _cubic_kernel(dy)
|
|
412
|
+
wy2 = _cubic_kernel(dy - 1.0)
|
|
413
|
+
wy3 = _cubic_kernel(dy - 2.0)
|
|
414
|
+
|
|
415
|
+
out = 0.0
|
|
416
|
+
for x_idx, wx in [(x0, wx0), (x1, wx1), (x2, wx2), (x3, wx3)]:
|
|
417
|
+
for y_idx, wy in [(y0, wy0), (y1, wy1), (y2, wy2), (y3, wy3)]:
|
|
418
|
+
out = out + _gather_2d(image, y_idx, x_idx, linear_cache) * wx * wy
|
|
419
|
+
return tf.identity(out, name=target_name)
|
|
420
|
+
|
|
421
|
+
x0 = tf.floor(x)
|
|
422
|
+
y0 = tf.floor(y)
|
|
423
|
+
x1 = x0 + 1.0
|
|
424
|
+
y1 = y0 + 1.0
|
|
425
|
+
dx = x - x0
|
|
426
|
+
dy = y - y0
|
|
427
|
+
|
|
428
|
+
if padding_mode == 'reflection':
|
|
429
|
+
x0 = _reflect_coord(x0, w_in_f, align_corners)
|
|
430
|
+
x1 = _reflect_coord(x1, w_in_f, align_corners)
|
|
431
|
+
y0 = _reflect_coord(y0, h_in_f, align_corners)
|
|
432
|
+
y1 = _reflect_coord(y1, h_in_f, align_corners)
|
|
433
|
+
|
|
434
|
+
x0 = tf.clip_by_value(x0, 0.0, max_x)
|
|
435
|
+
x1 = tf.clip_by_value(x1, 0.0, max_x)
|
|
436
|
+
y0 = tf.clip_by_value(y0, 0.0, max_y)
|
|
437
|
+
y1 = tf.clip_by_value(y1, 0.0, max_y)
|
|
438
|
+
|
|
439
|
+
w_y0_x0 = (1.0 - dy) * (1.0 - dx)
|
|
440
|
+
w_y1_x0 = dy * (1.0 - dx)
|
|
441
|
+
w_y1_x1 = dy * dx
|
|
442
|
+
w_y0_x1 = (1.0 - dy) * dx
|
|
443
|
+
|
|
444
|
+
v_y0_x0 = _gather_2d(image, y0, x0, linear_cache)
|
|
445
|
+
v_y1_x0 = _gather_2d(image, y1, x0, linear_cache)
|
|
446
|
+
v_y1_x1 = _gather_2d(image, y1, x1, linear_cache)
|
|
447
|
+
v_y0_x1 = _gather_2d(image, y0, x1, linear_cache)
|
|
446
448
|
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
)
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
449
|
+
output = w_y0_x0 * v_y0_x0 + w_y1_x0 * v_y1_x0 + w_y1_x1 * v_y1_x1 + w_y0_x1 * v_y0_x1
|
|
450
|
+
return tf.identity(output, name=target_name)
|
|
451
|
+
|
|
452
|
+
def _grid_sample_3d(image, grid, target_name):
|
|
453
|
+
d_in = tf.shape(image)[1]
|
|
454
|
+
h_in = tf.shape(image)[2]
|
|
455
|
+
w_in = tf.shape(image)[3]
|
|
456
|
+
d_in_f = tf.cast(d_in, grid.dtype)
|
|
457
|
+
h_in_f = tf.cast(h_in, grid.dtype)
|
|
458
|
+
w_in_f = tf.cast(w_in, grid.dtype)
|
|
459
|
+
grid_x, grid_y, grid_z = tf.split(grid, num_or_size_splits=3, axis=-1)
|
|
460
|
+
x = _normalize_grid(grid_x, w_in_f, align_corners)
|
|
461
|
+
y = _normalize_grid(grid_y, h_in_f, align_corners)
|
|
462
|
+
z = _normalize_grid(grid_z, d_in_f, align_corners)
|
|
463
|
+
|
|
464
|
+
if padding_mode == 'border':
|
|
465
|
+
x = tf.clip_by_value(x, 0.0, w_in_f - 1.0)
|
|
466
|
+
y = tf.clip_by_value(y, 0.0, h_in_f - 1.0)
|
|
467
|
+
z = tf.clip_by_value(z, 0.0, d_in_f - 1.0)
|
|
468
|
+
max_x = w_in_f - 1.0
|
|
469
|
+
max_y = h_in_f - 1.0
|
|
470
|
+
max_z = d_in_f - 1.0
|
|
471
|
+
elif padding_mode == 'reflection':
|
|
472
|
+
x = _reflect_coord(x, w_in_f, align_corners)
|
|
473
|
+
y = _reflect_coord(y, h_in_f, align_corners)
|
|
474
|
+
z = _reflect_coord(z, d_in_f, align_corners)
|
|
475
|
+
max_x = w_in_f - 1.0
|
|
476
|
+
max_y = h_in_f - 1.0
|
|
477
|
+
max_z = d_in_f - 1.0
|
|
478
|
+
else:
|
|
479
|
+
pad = 2 if mode == 'cubic' else 1
|
|
480
|
+
x = tf.clip_by_value(x, -float(pad), w_in_f - 1.0 + float(pad)) + float(pad)
|
|
481
|
+
y = tf.clip_by_value(y, -float(pad), h_in_f - 1.0 + float(pad)) + float(pad)
|
|
482
|
+
z = tf.clip_by_value(z, -float(pad), d_in_f - 1.0 + float(pad)) + float(pad)
|
|
483
|
+
image = tf.pad(image, paddings=[[0,0],[pad,pad],[pad,pad],[pad,pad],[0,0]])
|
|
484
|
+
max_x = tf.cast(w_in + 2 * pad - 1, grid.dtype)
|
|
485
|
+
max_y = tf.cast(h_in + 2 * pad - 1, grid.dtype)
|
|
486
|
+
max_z = tf.cast(d_in + 2 * pad - 1, grid.dtype)
|
|
487
|
+
|
|
488
|
+
linear_cache = _prepare_linear_gather_3d(image) if use_linear_gather_3d else None
|
|
489
|
+
|
|
490
|
+
if mode == 'nearest':
|
|
491
|
+
x0 = tf.round(x)
|
|
492
|
+
y0 = tf.round(y)
|
|
493
|
+
z0 = tf.round(z)
|
|
494
|
+
if padding_mode == 'reflection':
|
|
495
|
+
x0 = _reflect_coord(x0, w_in_f, align_corners)
|
|
496
|
+
y0 = _reflect_coord(y0, h_in_f, align_corners)
|
|
497
|
+
z0 = _reflect_coord(z0, d_in_f, align_corners)
|
|
498
|
+
x0 = tf.clip_by_value(x0, 0.0, max_x)
|
|
499
|
+
y0 = tf.clip_by_value(y0, 0.0, max_y)
|
|
500
|
+
z0 = tf.clip_by_value(z0, 0.0, max_z)
|
|
501
|
+
output = _gather_3d(image, z0, y0, x0, linear_cache)
|
|
502
|
+
return tf.identity(output, name=target_name)
|
|
503
|
+
|
|
504
|
+
if mode == 'cubic':
|
|
505
|
+
x1 = tf.floor(x)
|
|
506
|
+
y1 = tf.floor(y)
|
|
507
|
+
z1 = tf.floor(z)
|
|
508
|
+
dx = x - x1
|
|
509
|
+
dy = y - y1
|
|
510
|
+
dz = z - z1
|
|
511
|
+
x0 = x1 - 1.0
|
|
512
|
+
x2 = x1 + 1.0
|
|
513
|
+
x3 = x1 + 2.0
|
|
514
|
+
y0 = y1 - 1.0
|
|
515
|
+
y2 = y1 + 1.0
|
|
516
|
+
y3 = y1 + 2.0
|
|
517
|
+
z0 = z1 - 1.0
|
|
518
|
+
z2 = z1 + 1.0
|
|
519
|
+
z3 = z1 + 2.0
|
|
520
|
+
|
|
521
|
+
if padding_mode == 'reflection':
|
|
522
|
+
x0 = _reflect_coord(x0, w_in_f, align_corners)
|
|
523
|
+
x1 = _reflect_coord(x1, w_in_f, align_corners)
|
|
524
|
+
x2 = _reflect_coord(x2, w_in_f, align_corners)
|
|
525
|
+
x3 = _reflect_coord(x3, w_in_f, align_corners)
|
|
526
|
+
y0 = _reflect_coord(y0, h_in_f, align_corners)
|
|
527
|
+
y1 = _reflect_coord(y1, h_in_f, align_corners)
|
|
528
|
+
y2 = _reflect_coord(y2, h_in_f, align_corners)
|
|
529
|
+
y3 = _reflect_coord(y3, h_in_f, align_corners)
|
|
530
|
+
z0 = _reflect_coord(z0, d_in_f, align_corners)
|
|
531
|
+
z1 = _reflect_coord(z1, d_in_f, align_corners)
|
|
532
|
+
z2 = _reflect_coord(z2, d_in_f, align_corners)
|
|
533
|
+
z3 = _reflect_coord(z3, d_in_f, align_corners)
|
|
534
|
+
|
|
535
|
+
x0 = tf.clip_by_value(x0, 0.0, max_x)
|
|
536
|
+
x1 = tf.clip_by_value(x1, 0.0, max_x)
|
|
537
|
+
x2 = tf.clip_by_value(x2, 0.0, max_x)
|
|
538
|
+
x3 = tf.clip_by_value(x3, 0.0, max_x)
|
|
539
|
+
y0 = tf.clip_by_value(y0, 0.0, max_y)
|
|
540
|
+
y1 = tf.clip_by_value(y1, 0.0, max_y)
|
|
541
|
+
y2 = tf.clip_by_value(y2, 0.0, max_y)
|
|
542
|
+
y3 = tf.clip_by_value(y3, 0.0, max_y)
|
|
543
|
+
z0 = tf.clip_by_value(z0, 0.0, max_z)
|
|
544
|
+
z1 = tf.clip_by_value(z1, 0.0, max_z)
|
|
545
|
+
z2 = tf.clip_by_value(z2, 0.0, max_z)
|
|
546
|
+
z3 = tf.clip_by_value(z3, 0.0, max_z)
|
|
547
|
+
|
|
548
|
+
wx0 = _cubic_kernel(dx + 1.0)
|
|
549
|
+
wx1 = _cubic_kernel(dx)
|
|
550
|
+
wx2 = _cubic_kernel(dx - 1.0)
|
|
551
|
+
wx3 = _cubic_kernel(dx - 2.0)
|
|
552
|
+
wy0 = _cubic_kernel(dy + 1.0)
|
|
553
|
+
wy1 = _cubic_kernel(dy)
|
|
554
|
+
wy2 = _cubic_kernel(dy - 1.0)
|
|
555
|
+
wy3 = _cubic_kernel(dy - 2.0)
|
|
556
|
+
wz0 = _cubic_kernel(dz + 1.0)
|
|
557
|
+
wz1 = _cubic_kernel(dz)
|
|
558
|
+
wz2 = _cubic_kernel(dz - 1.0)
|
|
559
|
+
wz3 = _cubic_kernel(dz - 2.0)
|
|
560
|
+
|
|
561
|
+
out = 0.0
|
|
562
|
+
for x_idx, wx in [(x0, wx0), (x1, wx1), (x2, wx2), (x3, wx3)]:
|
|
563
|
+
for y_idx, wy in [(y0, wy0), (y1, wy1), (y2, wy2), (y3, wy3)]:
|
|
564
|
+
for z_idx, wz in [(z0, wz0), (z1, wz1), (z2, wz2), (z3, wz3)]:
|
|
565
|
+
out = out + _gather_3d(image, z_idx, y_idx, x_idx, linear_cache) * wx * wy * wz
|
|
566
|
+
return tf.identity(out, name=target_name)
|
|
567
|
+
|
|
568
|
+
x0 = tf.floor(x)
|
|
569
|
+
y0 = tf.floor(y)
|
|
570
|
+
z0 = tf.floor(z)
|
|
571
|
+
x1 = x0 + 1.0
|
|
572
|
+
y1 = y0 + 1.0
|
|
573
|
+
z1 = z0 + 1.0
|
|
574
|
+
dx = x - x0
|
|
575
|
+
dy = y - y0
|
|
576
|
+
dz = z - z0
|
|
577
|
+
|
|
578
|
+
if padding_mode == 'reflection':
|
|
579
|
+
x0 = _reflect_coord(x0, w_in_f, align_corners)
|
|
580
|
+
x1 = _reflect_coord(x1, w_in_f, align_corners)
|
|
581
|
+
y0 = _reflect_coord(y0, h_in_f, align_corners)
|
|
582
|
+
y1 = _reflect_coord(y1, h_in_f, align_corners)
|
|
583
|
+
z0 = _reflect_coord(z0, d_in_f, align_corners)
|
|
584
|
+
z1 = _reflect_coord(z1, d_in_f, align_corners)
|
|
585
|
+
|
|
586
|
+
x0 = tf.clip_by_value(x0, 0.0, max_x)
|
|
587
|
+
x1 = tf.clip_by_value(x1, 0.0, max_x)
|
|
588
|
+
y0 = tf.clip_by_value(y0, 0.0, max_y)
|
|
589
|
+
y1 = tf.clip_by_value(y1, 0.0, max_y)
|
|
590
|
+
z0 = tf.clip_by_value(z0, 0.0, max_z)
|
|
591
|
+
z1 = tf.clip_by_value(z1, 0.0, max_z)
|
|
592
|
+
|
|
593
|
+
w000 = (1.0 - dz) * (1.0 - dy) * (1.0 - dx)
|
|
594
|
+
w001 = (1.0 - dz) * (1.0 - dy) * dx
|
|
595
|
+
w010 = (1.0 - dz) * dy * (1.0 - dx)
|
|
596
|
+
w011 = (1.0 - dz) * dy * dx
|
|
597
|
+
w100 = dz * (1.0 - dy) * (1.0 - dx)
|
|
598
|
+
w101 = dz * (1.0 - dy) * dx
|
|
599
|
+
w110 = dz * dy * (1.0 - dx)
|
|
600
|
+
w111 = dz * dy * dx
|
|
601
|
+
|
|
602
|
+
v000 = _gather_3d(image, z0, y0, x0, linear_cache)
|
|
603
|
+
v001 = _gather_3d(image, z0, y0, x1, linear_cache)
|
|
604
|
+
v010 = _gather_3d(image, z0, y1, x0, linear_cache)
|
|
605
|
+
v011 = _gather_3d(image, z0, y1, x1, linear_cache)
|
|
606
|
+
v100 = _gather_3d(image, z1, y0, x0, linear_cache)
|
|
607
|
+
v101 = _gather_3d(image, z1, y0, x1, linear_cache)
|
|
608
|
+
v110 = _gather_3d(image, z1, y1, x0, linear_cache)
|
|
609
|
+
v111 = _gather_3d(image, z1, y1, x1, linear_cache)
|
|
610
|
+
|
|
611
|
+
output = (
|
|
612
|
+
w000 * v000 + w001 * v001 + w010 * v010 + w011 * v011 +
|
|
613
|
+
w100 * v100 + w101 * v101 + w110 * v110 + w111 * v111
|
|
462
614
|
)
|
|
463
|
-
|
|
464
|
-
dim_expanded_idx_tensors_per_axis2 = [
|
|
465
|
-
tf.expand_dims(idx_tensor, axis=-1)
|
|
466
|
-
for idx_tensor in idx_tensors_per_axis2
|
|
467
|
-
]
|
|
468
|
-
index_expanded2 = tf.concat(dim_expanded_idx_tensors_per_axis2, axis=-1)
|
|
469
|
-
gathernd2 = tf.gather_nd(data_swaped2, index_expanded2)
|
|
470
|
-
gatherelements2 = tf.transpose(gathernd2, perm=[2,1,0]) # GatherElements_2_output_0
|
|
471
|
-
|
|
615
|
+
return tf.identity(output, name=target_name)
|
|
472
616
|
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
index_expanded3 = tf.concat(dim_expanded_idx_tensors_per_axis3, axis=-1)
|
|
495
|
-
gathernd3 = tf.gather_nd(data_swaped3, index_expanded3)
|
|
496
|
-
gatherelements3 = tf.transpose(gathernd3, perm=[2,1,0]) # GatherElements_1_output_0
|
|
617
|
+
def define_fast_gridsample(image, grid, align_corners, target_name):
|
|
618
|
+
is_string = image.dtype == tf.string
|
|
619
|
+
if is_string and mode != 'nearest':
|
|
620
|
+
error('GridSample supports string input types only with nearest mode.')
|
|
621
|
+
sys.exit(1)
|
|
622
|
+
|
|
623
|
+
compute_dtype = tf.float64 \
|
|
624
|
+
if image.dtype == tf.float64 or grid.dtype == tf.float64 else tf.float32
|
|
625
|
+
image_compute = image if (is_string or image.dtype.is_complex) else tf.cast(image, compute_dtype)
|
|
626
|
+
grid_compute = tf.cast(grid, compute_dtype)
|
|
627
|
+
|
|
628
|
+
image_rank = len(image_compute.shape)
|
|
629
|
+
if image_rank == 3:
|
|
630
|
+
output = _grid_sample_1d(image_compute, grid_compute, target_name)
|
|
631
|
+
elif image_rank == 4:
|
|
632
|
+
output = _grid_sample_2d(image_compute, grid_compute, target_name)
|
|
633
|
+
elif image_rank == 5:
|
|
634
|
+
output = _grid_sample_3d(image_compute, grid_compute, target_name)
|
|
635
|
+
else:
|
|
636
|
+
error(f'GridSample supports only 1D/2D/3D inputs. Got rank={image_rank}.')
|
|
637
|
+
sys.exit(1)
|
|
497
638
|
|
|
639
|
+
if output.dtype != image.dtype:
|
|
640
|
+
output = tf.cast(output, image.dtype)
|
|
641
|
+
return output
|
|
498
642
|
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
data_swaped4 = tf.transpose(image_reshape_transpose, perm=axis_perm4)
|
|
506
|
-
index_swaped4 = tf.transpose(expand42, perm=axis_perm4)
|
|
507
|
-
idx_tensors_per_axis4 = [
|
|
508
|
-
tf.range(tf.shape(index_swaped4, index_swaped4.dtype)[i]) \
|
|
509
|
-
for i in range(index_swaped4.shape.rank)
|
|
510
|
-
]
|
|
511
|
-
idx_tensors_per_axis4 = tf.meshgrid(
|
|
512
|
-
*idx_tensors_per_axis4,
|
|
513
|
-
indexing='ij',
|
|
643
|
+
def define_accurate_gridsample(image, grid, align_corners, target_name):
|
|
644
|
+
return define_fast_gridsample(
|
|
645
|
+
image=image,
|
|
646
|
+
grid=grid,
|
|
647
|
+
align_corners=align_corners,
|
|
648
|
+
target_name=target_name,
|
|
514
649
|
)
|
|
515
|
-
idx_tensors_per_axis4[0] = index_swaped4
|
|
516
|
-
dim_expanded_idx_tensors_per_axis4 = [
|
|
517
|
-
tf.expand_dims(idx_tensor, axis=-1)
|
|
518
|
-
for idx_tensor in idx_tensors_per_axis4
|
|
519
|
-
]
|
|
520
|
-
index_expanded4 = tf.concat(dim_expanded_idx_tensors_per_axis4, axis=-1)
|
|
521
|
-
gathernd4 = tf.gather_nd(data_swaped4, index_expanded4)
|
|
522
|
-
gatherelements4 = tf.transpose(gathernd4, perm=[2,1,0]) # GatherElements_output_0
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
# GatherElements_3_output_0 Unsqueeze_3_output_0 -> Mul_15_output_0
|
|
526
|
-
mul51 = tf.math.multiply(gatherelements1, unsqueeze21)
|
|
527
|
-
# GatherElements_2_output_0 Unsqueeze_2_output_0 -> Mul_14_output_0
|
|
528
|
-
mul52 = tf.math.multiply(gatherelements2, unsqueeze22)
|
|
529
|
-
# GatherElements_1_output_0 Unsqueeze_1_output_0 -> Mul_13_output_0
|
|
530
|
-
mul53 = tf.math.multiply(gatherelements3, unsqueeze12)
|
|
531
|
-
# GatherElements_output_0 Unsqueeze_output_0 -> Mul_12_output_0
|
|
532
|
-
mul54 = tf.math.multiply(gatherelements4, unsqueeze11)
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
# Mul_12_output_0 Mul_13_output_0 -> Add_10_output_0
|
|
536
|
-
add61 = tf.math.add(mul54, mul53)
|
|
537
|
-
# Add_10_output_0 Mul_14_output_0 -> Add_11_output_0
|
|
538
|
-
add62 = tf.math.add(add61, mul52)
|
|
539
|
-
# Add_11_output_0 Mul_15_output_0 -> Add_12_output_0
|
|
540
|
-
add63 = tf.math.add(add62, mul51)
|
|
541
|
-
|
|
542
|
-
# Add_12_output_0 Constant_55_output_0 -> output_tensor
|
|
543
|
-
output_shape = [
|
|
544
|
-
image.shape[0],
|
|
545
|
-
image.shape[3],
|
|
546
|
-
grid.shape[1],
|
|
547
|
-
grid.shape[2],
|
|
548
|
-
]
|
|
549
|
-
final_reshape = tf.reshape(add63, shape=output_shape)
|
|
550
|
-
# NCHW -> NHWC
|
|
551
|
-
output = tf.transpose(final_reshape, perm=[0,2,3,1], name=target_name)
|
|
552
|
-
return output
|
|
553
650
|
|
|
554
651
|
disable_strict_mode: bool = kwargs['disable_strict_mode']
|
|
555
652
|
enable_fast_gridsample = True
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: onnx2tf
|
|
3
|
-
Version: 1.29.
|
|
3
|
+
Version: 1.29.8
|
|
4
4
|
Summary: Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC).
|
|
5
5
|
Home-page: https://github.com/PINTO0309/onnx2tf
|
|
6
6
|
Author: Katsuya Hyodo
|
|
@@ -16,7 +16,8 @@ Requires-Dist: numpy==1.26.4
|
|
|
16
16
|
Requires-Dist: onnx==1.19.0
|
|
17
17
|
Requires-Dist: onnxruntime==1.23.0
|
|
18
18
|
Requires-Dist: opencv-python==4.11.0.86
|
|
19
|
-
Requires-Dist: onnxsim==0.4.
|
|
19
|
+
Requires-Dist: onnxsim==0.4.36
|
|
20
|
+
Requires-Dist: onnxoptimizer==0.4.2
|
|
20
21
|
Requires-Dist: ai-edge-litert==2.1.0
|
|
21
22
|
Requires-Dist: tensorflow==2.19.0
|
|
22
23
|
Requires-Dist: tf-keras==2.19.0
|
|
@@ -306,10 +307,11 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
306
307
|
- Linux / Windows
|
|
307
308
|
- onnx==1.19.0
|
|
308
309
|
- onnxruntime==1.23.0
|
|
309
|
-
-
|
|
310
|
+
- onnxsim==0.4.36
|
|
311
|
+
- onnxoptimizer==0.4.2
|
|
310
312
|
- onnx_graphsurgeon==0.5.8
|
|
311
313
|
- simple_onnx_processing_tools==1.1.32
|
|
312
|
-
- tensorflow==2.19.0
|
|
314
|
+
- tensorflow==2.19.0
|
|
313
315
|
- tf-keras==2.19.0
|
|
314
316
|
- ai-edge-litert==1.2.0
|
|
315
317
|
- psutil==5.9.5
|
|
@@ -357,7 +359,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
357
359
|
docker run --rm -it \
|
|
358
360
|
-v `pwd`:/workdir \
|
|
359
361
|
-w /workdir \
|
|
360
|
-
ghcr.io/pinto0309/onnx2tf:1.29.
|
|
362
|
+
ghcr.io/pinto0309/onnx2tf:1.29.8
|
|
361
363
|
|
|
362
364
|
or
|
|
363
365
|
|
|
@@ -365,14 +367,15 @@ Video speed is adjusted approximately 50 times slower than actual speed.
|
|
|
365
367
|
docker run --rm -it \
|
|
366
368
|
-v `pwd`:/workdir \
|
|
367
369
|
-w /workdir \
|
|
368
|
-
docker.io/pinto0309/onnx2tf:1.29.
|
|
370
|
+
docker.io/pinto0309/onnx2tf:1.29.8
|
|
369
371
|
|
|
370
372
|
or
|
|
371
373
|
|
|
372
374
|
pip install -U onnx==1.19.0 \
|
|
373
375
|
&& pip install -U onnx-graphsurgeon==0.5.8 \
|
|
374
376
|
&& pip install -U onnxruntime==1.23.0 \
|
|
375
|
-
&& pip install -U onnxsim==0.4.
|
|
377
|
+
&& pip install -U onnxsim==0.4.36 \
|
|
378
|
+
&& pip install -U onnxoptimizer==0.4.2 \
|
|
376
379
|
&& pip install -U simple_onnx_processing_tools==1.1.32 \
|
|
377
380
|
&& pip install -U sne4onnx>=1.0.13 \
|
|
378
381
|
&& pip install -U sng4onnx>=1.0.4 \
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
onnx2tf/__init__.py,sha256=
|
|
1
|
+
onnx2tf/__init__.py,sha256=hMWH2boz8JPzsuaYsF1xqqELYgBVGg6tIlkAtcbQowQ,66
|
|
2
2
|
onnx2tf/__main__.py,sha256=2RSCQ7d4lc6CwD-rlGn9UicPFg-P5du7ZD_yh-kuBEU,57
|
|
3
3
|
onnx2tf/onnx2tf.py,sha256=wdBA-lgCEu-ZfUAKIUQgLe8hSP8ifE7rS6nWAq6iF6o,151519
|
|
4
4
|
onnx2tf/ops/Abs.py,sha256=V7btmCG_ZvK_qJovUsguq0ZMJ349mhNQ4FHSgzP_Yuo,4029
|
|
@@ -67,7 +67,7 @@ onnx2tf/ops/GlobalLpPool.py,sha256=bzsQBuqKo5CI7HB-HlFC3m96swAarCsEnOlPMrmZVFM,5
|
|
|
67
67
|
onnx2tf/ops/GlobalMaxPool.py,sha256=UAlMRlnJlgq9keEYYpqvmTruGJ4jeKIECArESbMlNOg,4985
|
|
68
68
|
onnx2tf/ops/Greater.py,sha256=fhMFF0fGt2c1W_rHCy0yKAXUYThLgBVnoFmCYLPD12Q,4585
|
|
69
69
|
onnx2tf/ops/GreaterOrEqual.py,sha256=sfNBveEyoU2oIlFILKlZ3jopeCnnPH2ij4J08QtIX8I,4604
|
|
70
|
-
onnx2tf/ops/GridSample.py,sha256=
|
|
70
|
+
onnx2tf/ops/GridSample.py,sha256=3THBiJcB9J5eFoobiwwqqQ-BJ0pr7xK9JyTCxXixLs0,31360
|
|
71
71
|
onnx2tf/ops/GroupNorm.py,sha256=zMjgkTDhb8OySDa4ZBg-45rWQQ5dy3wmqAY-Aj7izac,12026
|
|
72
72
|
onnx2tf/ops/HammingWindow.py,sha256=PY6NVvzutmFKB8UyJYl2LcwqzZGhRMg0jot96m0isCc,2891
|
|
73
73
|
onnx2tf/ops/HannWindow.py,sha256=vMvtn3JwjxUqPXTXdNzk3QjH87JFAEStwwEnIl_5jKY,2882
|
|
@@ -198,9 +198,9 @@ onnx2tf/utils/enums.py,sha256=7c5TqetqB07VjyHoxJHfLgtqBqk9ZRyUF33fPOJR1IM,1649
|
|
|
198
198
|
onnx2tf/utils/iterative_json_optimizer.py,sha256=qqeIxWGxrhcCYk8-ebWnblnOkzDCwi-nseipHzHR_bk,10436
|
|
199
199
|
onnx2tf/utils/json_auto_generator.py,sha256=OC-SfKtUg7zUxaXTAg6kT0ShzIc3ByjDa3FNp173DtA,60302
|
|
200
200
|
onnx2tf/utils/logging.py,sha256=yUCmPuJ_XiUItM3sZMcaMO24JErkQy7zZwVTYWAuiKg,1982
|
|
201
|
-
onnx2tf-1.29.
|
|
202
|
-
onnx2tf-1.29.
|
|
203
|
-
onnx2tf-1.29.
|
|
204
|
-
onnx2tf-1.29.
|
|
205
|
-
onnx2tf-1.29.
|
|
206
|
-
onnx2tf-1.29.
|
|
201
|
+
onnx2tf-1.29.8.dist-info/licenses/LICENSE,sha256=5v_Kxihy8i6mzHVl349ikSREaIdsl9YeUnX1KBDLD2w,1070
|
|
202
|
+
onnx2tf-1.29.8.dist-info/licenses/LICENSE_onnx-tensorflow,sha256=gK4GtS9S5YcyINu6uuNNWdo-kBClyEM4MFLFGiNTeRM,11231
|
|
203
|
+
onnx2tf-1.29.8.dist-info/METADATA,sha256=tL52Iorw3kjy7YJ7IPdqiAXcNGxhQJNq-8BGf6FXAZM,153504
|
|
204
|
+
onnx2tf-1.29.8.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
205
|
+
onnx2tf-1.29.8.dist-info/top_level.txt,sha256=WgfPiEy3f6vZ_FOpAIEA2CF3TCx1eYrhGw93Ih6b9Fw,8
|
|
206
|
+
onnx2tf-1.29.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|