onnx2tf 1.29.9__py3-none-any.whl → 1.29.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
onnx2tf/ops/_Loop.py DELETED
@@ -1,306 +0,0 @@
1
- import re
2
- import sys
3
- import random
4
- random.seed(0)
5
- import numpy as np
6
- np.random.seed(0)
7
- import tensorflow as tf
8
- import tf_keras
9
- import onnx_graphsurgeon as gs
10
- from onnx2tf.utils.common_functions import (
11
- get_constant_or_variable,
12
- print_node_info,
13
- inverted_operation_enable_disable,
14
- make_tf_node_info,
15
- )
16
- from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES
17
- import importlib
18
- from onnx2tf.utils.logging import *
19
-
20
-
21
- class While_Loop_CustomLayer(tf_keras.layers.Layer):
22
- def __init__(self):
23
- super(While_Loop_CustomLayer, self).__init__()
24
-
25
- def call(self, cond, body, loop_vars, shape_invariants, maximum_iterations):
26
- return tf.while_loop(
27
- cond=cond,
28
- body=body,
29
- loop_vars=loop_vars,
30
- shape_invariants=shape_invariants,
31
- maximum_iterations=maximum_iterations,
32
- )
33
-
34
-
35
- @print_node_info
36
- @inverted_operation_enable_disable
37
- def make_node(
38
- *,
39
- graph_node: gs.Node,
40
- tf_layers_dict: dict,
41
- **kwargs: dict,
42
- ):
43
- """Loop
44
-
45
- Parameters
46
- ----------
47
- graph_node: gs.Node
48
- graph_surgeon Node
49
-
50
- tf_layers_dict: dict
51
- optype, shape, dtype, tensorflow graph
52
- """
53
- before_op_output_shape_trans_1 = \
54
- tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
55
- before_op_output_shape_trans_2 = \
56
- tf_layers_dict.get(graph_node.inputs[1].name, {}).get('before_op_output_shape_trans', True)
57
- before_op_output_shape_trans = \
58
- before_op_output_shape_trans_1 \
59
- and before_op_output_shape_trans_2
60
-
61
- graph_node_input_1 = get_constant_or_variable(
62
- graph_node.inputs[0],
63
- before_op_output_shape_trans,
64
- )
65
- graph_node_input_2 = get_constant_or_variable(
66
- graph_node.inputs[1],
67
- before_op_output_shape_trans,
68
- )
69
- graph_node_input_n_list = []
70
- for graph_node_input in graph_node.inputs[2:]:
71
- graph_node_input_n = get_constant_or_variable(
72
- graph_node_input,
73
- before_op_output_shape_trans,
74
- )
75
- graph_node_input_n_list.append(graph_node_input_n)
76
-
77
- M = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
78
- if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
79
- M = None if isinstance(M, str) and M == "" else M
80
- M = tf.where(
81
- tf.greater(M, tf.int32.max),
82
- tf.constant(tf.int32.max, tf.int32),
83
- tf.cast(M, tf.int32)
84
- ) if M is not None else M
85
- cond = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
86
- if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
87
- cond_init = None if isinstance(cond, str) and cond == "" else tf.cast(cond, tf.bool)
88
-
89
- v_init = [
90
- tf_layers_dict[graph_node_input_n.name]['tf_node'] \
91
- if isinstance(graph_node_input_n, gs.Variable) else graph_node_input_n \
92
- for graph_node_input_n in graph_node_input_n_list
93
- ]
94
- v_shapes = [
95
- tf.TensorShape([None for i in range(len(v.shape))]) for v in v_init
96
- ]
97
-
98
- body: gs.Graph = graph_node.attrs["body"]
99
-
100
- iter_cnt_init = np.int64(0)
101
-
102
- scan_outputs_start_index = 1 + len(v_init)
103
- scan_outputs_init = [
104
- tf.TensorArray(
105
- dtype=body.outputs[i].dtype,
106
- size=0,
107
- dynamic_size=True
108
- ) for i in range(scan_outputs_start_index, len(body.outputs))
109
- ]
110
- scan_outputs_shapes = [tf.TensorShape(None) for o in scan_outputs_init]
111
-
112
- graph_node_output: gs.Variable = graph_node.outputs[0]
113
- shape = graph_node_output.shape
114
- dtype = graph_node_output.dtype
115
-
116
- # Preserving Graph Structure (Dict)
117
- tf_layers_dict[graph_node_output.name] = {
118
- 'optype': graph_node.op,
119
- 'shape': shape,
120
- 'dtype': dtype,
121
- }
122
-
123
- # Generation of TF OP
124
- def run_subgraph(iter_cnt, cond, v, scan_outputs):
125
- for body_input in body.inputs:
126
- try:
127
- op = importlib.import_module(f'onnx2tf.ops.Input')
128
- except ModuleNotFoundError as ex:
129
- error(
130
- f'{optype} OP is not yet implemented.'
131
- )
132
- sys.exit(1)
133
- # substitution because saved_model does not allow colons
134
- body_input.name = body_input.name.replace(':','__')
135
- # Substitution because saved_model does not allow leading slashes in op names
136
- if kwargs['output_signaturedefs']:
137
- body_input.name = re.sub('^/', 'wa/', body_input.name)
138
- op.make_node(
139
- graph_input=body_input,
140
- tf_layers_dict=tf_layers_dict,
141
- keep_ncw_or_nchw_or_ncdhw_input_names=[],
142
- keep_nwc_or_nhwc_or_ndhwc_input_names=[],
143
- keep_shape_absolutely_input_names=[],
144
- **kwargs,
145
- )
146
- for body_node in body.nodes:
147
- optype = body_node.op
148
- try:
149
- op = importlib.import_module(f'onnx2tf.ops.{optype}')
150
- except ModuleNotFoundError as ex:
151
- error(
152
- f'{optype} OP is not yet implemented.'
153
- )
154
- sys.exit(1)
155
- # substitution because saved_model does not allow colons
156
- body_node.name = body_node.name.replace(':','__')
157
- # Substitution because saved_model does not allow leading slashes in op names
158
- if kwargs['output_signaturedefs']:
159
- body_node.name = re.sub('^/', 'wa/', body_node.name)
160
- op.make_node(
161
- graph_node=body_node,
162
- tf_layers_dict=tf_layers_dict,
163
- **kwargs,
164
- )
165
- # Resister constant
166
- for output in body.outputs:
167
- if output.name not in tf_layers_dict and isinstance(output, gs.Constant):
168
- tf_layers_dict[output.name] = {
169
- 'optype': 'Constant',
170
- 'shape': output.values.shape,
171
- 'dtype': output.values.dtype,
172
- }
173
- tf_layers_dict[output.name]['tf_node'] = \
174
- tf.constant(
175
- output.values,
176
- dtype=NUMPY_DTYPES_TO_TF_DTYPES[output.values.dtype],
177
- )
178
- outputs = [tf_layers_dict[output.name]['tf_node'] for output in body.outputs]
179
- for i in range(scan_outputs_start_index, len(outputs)):
180
- s_index = i - scan_outputs_start_index
181
- insert_index = scan_outputs[s_index].size()
182
- scan_outputs[s_index] = scan_outputs[s_index].write(insert_index, outputs[i])
183
- iter_cnt += 1
184
- return iter_cnt, outputs[0], outputs[1:scan_outputs_start_index], scan_outputs
185
-
186
- # for loop
187
- # https://stackoverflow.com/questions/71635459/how-to-use-keras-symbolic-inputs-with-tf-while-loop
188
- if M is not None and cond_init is None:
189
- condition = lambda iter_cnt, cond, v, scan_outputs: True
190
- while_loop_layer = While_Loop_CustomLayer()
191
- iter_cnt_final, _, v_final, scan_outputs_final = while_loop_layer(
192
- cond=condition,
193
- body=run_subgraph,
194
- loop_vars=[
195
- iter_cnt_init,
196
- "",
197
- v_init,
198
- scan_outputs_init,
199
- ],
200
- shape_invariants=[
201
- tf.TensorShape([]),
202
- tf.TensorShape(None),
203
- v_shapes,
204
- scan_outputs_shapes,
205
- ],
206
- maximum_iterations=M,
207
- )
208
- # while and do-while loop
209
- # https://stackoverflow.com/questions/71635459/how-to-use-keras-symbolic-inputs-with-tf-while-loop
210
- elif M is None and cond_init is not None:
211
- condition = lambda iter_cnt, cond, v, scan_outputs: tf.reduce_all(tf.equal(cond, True))
212
- while_loop_layer = While_Loop_CustomLayer()
213
- iter_cnt_final, cond_final, v_final, scan_outputs_final = while_loop_layer(
214
- cond=condition,
215
- body=run_subgraph,
216
- loop_vars=[
217
- iter_cnt_init,
218
- cond_init,
219
- v_init,
220
- scan_outputs_init,
221
- ],
222
- shape_invariants=[
223
- tf.TensorShape([]),
224
- tf.TensorShape(None),
225
- v_shapes,
226
- scan_outputs_shapes,
227
- ],
228
- )
229
- # combine for loop and while loop together
230
- # https://stackoverflow.com/questions/71635459/how-to-use-keras-symbolic-inputs-with-tf-while-loop
231
- elif M is not None and cond_init is not None:
232
- condition = lambda iter_cnt, cond, v, scan_outputs: tf.reduce_all(tf.equal(cond, True))
233
- while_loop_layer = While_Loop_CustomLayer()
234
- iter_cnt_final, cond_final, v_final, scan_outputs_final = while_loop_layer(
235
- cond=condition,
236
- body=run_subgraph,
237
- loop_vars=[
238
- tf.constant(iter_cnt_init, dtype=iter_cnt_init.dtype),
239
- cond_init,
240
- v_init,
241
- scan_outputs_init,
242
- ],
243
- shape_invariants=[
244
- tf.TensorShape([]),
245
- tf.TensorShape(None),
246
- v_shapes,
247
- scan_outputs_shapes,
248
- ],
249
- maximum_iterations=M,
250
- )
251
- # M is None and cond is None
252
- else:
253
- error(
254
- f'Both M and cond in Loop are not set at the same time ' +
255
- f'Tensorflow.(PS. if you want to create a do-while loop ' +
256
- f'then please set cond to True or 1)\n' +
257
- f'graph_node.name: {graph_node.name}'
258
- )
259
- sys.exit(1)
260
-
261
-
262
- if scan_outputs_start_index == len(body.outputs):
263
- # there is no scan_output in the body graph
264
- tf_layers_dict[graph_node_output.name]['tf_node'] = v_final
265
-
266
- else:
267
- def true_fn():
268
- return scan_outputs_final
269
-
270
- def false_fn():
271
- new_scan_outputs = []
272
- for i in range(scan_outputs_start_index, len(body.outputs)):
273
- exp_elem_shape = scan_outputs_init[i-scan_outputs_start_index].element_shape
274
- elem_shape = []
275
- for j in range(exp_elem_shape.rank):
276
- shape_j = 0 if exp_elem_shape[j] is None else exp_elem_shape[j]
277
- elem_shape.append(shape_j)
278
- new_scan_outputs.append(
279
- tf.TensorArray(
280
- dtype=body.outputs[i].dtype,
281
- size=0,
282
- element_shape=tf.TensorShape(elem_shape)
283
- )
284
- )
285
- return new_scan_outputs
286
-
287
- scan_out_final = tf.cond(tf.greater(iter_cnt_final, 0), true_fn, false_fn)
288
- scan_outputs_tensors = [o.stack() for o in scan_out_final]
289
- tf_layers_dict[graph_node_output.name]['tf_node'] = v_final + scan_outputs_tensors
290
-
291
- # Generation of Debug Info
292
- tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
293
- make_tf_node_info(
294
- node_info={
295
- 'tf_op_type': tf.while_loop,
296
- 'tf_inputs': {
297
- 'condition': condition,
298
- 'M': M,
299
- 'cond': cond_init,
300
- 'v_initial': v_init,
301
- },
302
- 'tf_outputs': {
303
- 'output': tf_layers_dict[graph_node_output.name]['tf_node'],
304
- },
305
- }
306
- )