onnxruntime_extensions 0.14.0__cp313-cp313-macosx_11_0_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. onnxruntime_extensions/__init__.py +82 -0
  2. onnxruntime_extensions/_cuops.py +564 -0
  3. onnxruntime_extensions/_extensions_pydll.cpython-313-darwin.so +0 -0
  4. onnxruntime_extensions/_extensions_pydll.pyi +45 -0
  5. onnxruntime_extensions/_hf_cvt.py +331 -0
  6. onnxruntime_extensions/_ocos.py +133 -0
  7. onnxruntime_extensions/_ortapi2.py +274 -0
  8. onnxruntime_extensions/_torch_cvt.py +231 -0
  9. onnxruntime_extensions/_version.py +2 -0
  10. onnxruntime_extensions/cmd.py +66 -0
  11. onnxruntime_extensions/cvt.py +306 -0
  12. onnxruntime_extensions/onnxprocess/__init__.py +12 -0
  13. onnxruntime_extensions/onnxprocess/_builder.py +53 -0
  14. onnxruntime_extensions/onnxprocess/_onnx_ops.py +1507 -0
  15. onnxruntime_extensions/onnxprocess/_session.py +355 -0
  16. onnxruntime_extensions/onnxprocess/_tensor.py +628 -0
  17. onnxruntime_extensions/onnxprocess/torch_wrapper.py +31 -0
  18. onnxruntime_extensions/pnp/__init__.py +13 -0
  19. onnxruntime_extensions/pnp/_base.py +124 -0
  20. onnxruntime_extensions/pnp/_imagenet.py +65 -0
  21. onnxruntime_extensions/pnp/_nlp.py +148 -0
  22. onnxruntime_extensions/pnp/_onnx_ops.py +1544 -0
  23. onnxruntime_extensions/pnp/_torchext.py +310 -0
  24. onnxruntime_extensions/pnp/_unifier.py +45 -0
  25. onnxruntime_extensions/pnp/_utils.py +302 -0
  26. onnxruntime_extensions/pp_api.py +83 -0
  27. onnxruntime_extensions/tools/__init__.py +0 -0
  28. onnxruntime_extensions/tools/add_HuggingFace_CLIPImageProcessor_to_model.py +171 -0
  29. onnxruntime_extensions/tools/add_pre_post_processing_to_model.py +535 -0
  30. onnxruntime_extensions/tools/pre_post_processing/__init__.py +4 -0
  31. onnxruntime_extensions/tools/pre_post_processing/pre_post_processor.py +395 -0
  32. onnxruntime_extensions/tools/pre_post_processing/step.py +227 -0
  33. onnxruntime_extensions/tools/pre_post_processing/steps/__init__.py +6 -0
  34. onnxruntime_extensions/tools/pre_post_processing/steps/general.py +366 -0
  35. onnxruntime_extensions/tools/pre_post_processing/steps/nlp.py +344 -0
  36. onnxruntime_extensions/tools/pre_post_processing/steps/vision.py +1157 -0
  37. onnxruntime_extensions/tools/pre_post_processing/utils.py +139 -0
  38. onnxruntime_extensions/util.py +186 -0
  39. onnxruntime_extensions-0.14.0.dist-info/LICENSE +21 -0
  40. onnxruntime_extensions-0.14.0.dist-info/METADATA +102 -0
  41. onnxruntime_extensions-0.14.0.dist-info/RECORD +43 -0
  42. onnxruntime_extensions-0.14.0.dist-info/WHEEL +6 -0
  43. onnxruntime_extensions-0.14.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1507 @@
1
+ # Copyright (c) Microsoft Corporation. All rights reserved.
2
+ # Licensed under the MIT License. See License.txt in the project root for
3
+ # license information.
4
+ ###############################################################################
5
+ import warnings
6
+ import numpy as np
7
+ from onnx import helper, defs as onnx_defs, onnx_pb as onnx_proto
8
+ from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
9
+
10
+
11
+ DEFAULT_OPSET_NUMBER = 13 # The maximum opset supported by the converter in the code branch.
12
+ # From https://github.com/onnx/onnx/blob/master/docs/Versioning.md
13
+ OPSET_TO_IR_VERSION = {
14
+ 1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3,
15
+ 7: 3, 8: 3, 9: 4, 10: 5, 11: 6, 12: 7,
16
+ 13: 7, 14: 7, 15: 8
17
+ }
18
+
19
+
20
+ def _get_main_opset_version(model):
21
+ """
22
+ Returns the main opset version.
23
+ """
24
+ for op in model.opset_import:
25
+ if op.domain == '' or op.domain == 'ai.onnx':
26
+ return op.version
27
+ return None
28
+
29
+
30
+ def onnx_builtin_opset_version():
31
+ return onnx_defs.onnx_opset_version()
32
+
33
+
34
+ def get_maximum_opset_supported():
35
+ return min(DEFAULT_OPSET_NUMBER, onnx_builtin_opset_version())
36
+
37
+
38
+ def make_model_ex(graph, imported_opset_pairs, target_default_opset, **kwargs):
39
+ onnx_model = helper.make_model(graph, **kwargs)
40
+
41
+ # Merge operator sets for the same domain, the largest version number would be kept
42
+ purified_operator_set = dict()
43
+ for op_domain, op_version in imported_opset_pairs:
44
+ if op_domain not in purified_operator_set:
45
+ if op_domain == '' or op_domain == 'ai.onnx':
46
+ # Initializers are a subset of graph inputs for IR_VERSION <= 3 (target opset < 8).
47
+ # Need upgrade opv since initializers are separate for IR_VERSION >= 4 to pass onnx.checker.
48
+ if op_version < 8 and target_default_opset is not None and target_default_opset >= 8:
49
+ op_version = 8
50
+ purified_operator_set[op_domain] = op_version
51
+ else:
52
+ purified_operator_set[op_domain] = max(purified_operator_set[op_domain], op_version)
53
+
54
+ # Fill operator sets
55
+ i = 0
56
+ for op_domain, op_version in purified_operator_set.items():
57
+ if i == 0 and len(onnx_model.opset_import) == 1:
58
+ # Overwrite the default operator set created by helper.make_model(...)
59
+ op_set = onnx_model.opset_import[0]
60
+ else:
61
+ # Just create one ONNX element in opset_import
62
+ op_set = onnx_model.opset_import.add()
63
+ op_set.domain = op_domain
64
+ op_set.version = op_version
65
+ i += 1
66
+ if op_domain == '' or op_domain == 'ai.onnx':
67
+ if target_default_opset < op_version:
68
+ raise RuntimeError(('The specified opset %d is too low to convert this model, ' +
69
+ 'which requires at least opset %d.') % (target_default_opset, op_version))
70
+ elif target_default_opset > op_version:
71
+ warnings.warn('The maximum opset needed by this model is only %d.' % op_version)
72
+ else:
73
+ pass
74
+
75
+ opv = _get_main_opset_version(onnx_model) or target_default_opset
76
+ irv = OPSET_TO_IR_VERSION.get(opv, onnx_proto.IR_VERSION)
77
+ onnx_model.ir_version = irv
78
+ return onnx_model
79
+
80
+
81
+ class _ONNXModelOperator:
82
+ def __init__(self, name, model, input, output):
83
+ self.name = name
84
+ self.model = model
85
+ self.input = input
86
+ self.output = output
87
+
88
+ def __repr__(self):
89
+ """
90
+ without this method, it's too slow for the debugging.
91
+ :return:
92
+ """
93
+ return "name: {}, input: {}, output: {}".format(self.name, self.input, self.output)
94
+
95
+ @property
96
+ def op_type(self):
97
+ return 'ModelOp'
98
+
99
+
100
+ class ONNXElementContainer:
101
+
102
+ opdict_counter = {}
103
+
104
+ def __init__(self, target_opset, parent=None):
105
+ """
106
+ :param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
107
+ """
108
+ self.inputs = []
109
+ self.outputs = []
110
+ self.initializers = []
111
+ self.value_info = []
112
+ self.nodes = []
113
+ self.node_domain_version_pair_sets = set()
114
+ self.target_opset = target_opset
115
+ self.enable_optimizer = True
116
+ self.parent = parent
117
+
118
+ # the following property make this container be compatible with onnx.GraphProto
119
+ @property
120
+ def initializer(self):
121
+ return self.initializers
122
+
123
+ @property
124
+ def input(self):
125
+ return self.inputs
126
+
127
+ @property
128
+ def output(self):
129
+ return self.outputs
130
+
131
+ @staticmethod
132
+ def _make_value_info(variable):
133
+ value_info = helper.ValueInfoProto()
134
+ value_info.name = variable.full_name
135
+ value_info.type.CopyFrom(variable.type.to_onnx_type())
136
+ if variable.type.doc_string:
137
+ value_info.doc_string = variable.type.doc_string
138
+ return value_info
139
+
140
+ def add_input(self, variable):
141
+ """
142
+ Add our Variable object defined _parser.py into the the input list of the final ONNX model
143
+
144
+ :param variable: The Variable object to be added
145
+ """
146
+ self.inputs.append(self._make_value_info(variable))
147
+
148
+ def add_output(self, variable):
149
+ """1
150
+ Add our Variable object defined _parser.py into the the output list of the final ONNX model
151
+
152
+ :param variable: The Variable object to be added
153
+ """
154
+ self.outputs.append(self._make_value_info(variable))
155
+
156
+ def add_initializer(self, name, onnx_type, shape, content):
157
+ """
158
+ Add a TensorProto into the initializer list of the final ONNX model
159
+
160
+ :param name: Variable name in the produced ONNX model.
161
+ :param onnx_type: Element types allowed in ONNX tensor, e.g., TensorProto.FLOAT and TensorProto.STRING.
162
+ :param shape: Tensor shape, a list of integers.
163
+ :param content: Flattened tensor values (i.e., a float list or a float array).
164
+ """
165
+ if any(d is None for d in shape):
166
+ raise ValueError('Shape of initializer cannot contain None')
167
+ tensor = helper.make_tensor(name, onnx_type, shape, content)
168
+ self.initializers.append(tensor)
169
+
170
+ def add_value_info(self, variable):
171
+ self.value_info.append(self._make_value_info(variable))
172
+
173
+ def add_node(self, op_type, inputs, outputs, op_domain='', op_version=1, **attrs):
174
+ """
175
+ Add a NodeProto into the node list of the final ONNX model. If the input operator's domain-version information
176
+ cannot be found in our domain-version pool (a Python set), we may add it.
177
+
178
+ :param op_type: A string (e.g., Pool and Conv) indicating the type of the NodeProto
179
+ :param inputs: A list of strings. They are the input variables' names of the considered NodeProto
180
+ :param outputs: A list of strings. They are the output variables' names of the considered NodeProto
181
+ :param op_domain: The domain name (e.g., ai.onnx.ml) of the operator we are trying to add.
182
+ :param op_version: The version number (e.g., 0 and 1) of the operator we are trying to add.
183
+ :param attrs: A Python dictionary. Keys and values are attributes' names and attributes' values, respectively.
184
+ """
185
+
186
+ if isinstance(inputs, str):
187
+ inputs = [inputs]
188
+ if isinstance(outputs, str):
189
+ outputs = [outputs]
190
+ if not isinstance(inputs, (list, tuple)) or not all(isinstance(s, str) for s in inputs):
191
+ type_list = ','.join(list(str(type(s)) for s in inputs))
192
+ raise ValueError('Inputs must be a list of string but get [%s]' % type_list)
193
+ if not isinstance(outputs, (list, tuple)) or not all(isinstance(s, str) for s in outputs):
194
+ type_list = ','.join(list(str(type(s)) for s in outputs))
195
+ raise ValueError('Outputs must be a list of string but get [%s]' % type_list)
196
+ for k, v in attrs.items():
197
+ if v is None:
198
+ raise ValueError('Failed to create ONNX node. Undefined attribute pair (%s, %s) found' % (k, v))
199
+
200
+ node = helper.make_node(op_type, inputs, outputs, **attrs)
201
+ node.domain = op_domain
202
+
203
+ self.node_domain_version_pair_sets.add((op_domain, op_version))
204
+ self.nodes.append(node)
205
+
206
+ def add_model_node(self, inputs, outputs, name, model):
207
+ self.nodes.append(_ONNXModelOperator(name=name, model=model, input=inputs, output=outputs))
208
+
209
+ def get_unique_operator_name(self, op_type: str):
210
+ name = op_type.lower()
211
+ nn = self.opdict_counter.get(name, 0)
212
+ self.opdict_counter[name] = nn + 1
213
+ return name if nn == 0 else "{}_{}".format(name, nn+1)
214
+
215
+
216
+ def _create_name_or_use_existing_one(container, op_type, name):
217
+ return name or container.get_unique_operator_name(op_type)
218
+
219
+
220
+ class _ONNXOperatorAPI:
221
+ def get_unique_tensor_name(self, base): pass # implemented by the model builder
222
+
223
+ def _apply_unary_operation(self, op_type, input_name, output_name, container, operator_name, **attrs):
224
+ name = _create_name_or_use_existing_one(container, op_type, operator_name)
225
+
226
+ attrs['name'] = name
227
+ if container.target_opset < 6:
228
+ attrs['consumed_inputs'] = [0]
229
+ op_version = 1
230
+ else:
231
+ op_version = 6
232
+
233
+ container.add_node(op_type, input_name, output_name, op_version=op_version, **attrs)
234
+
235
+ def _apply_basic_numerical_operation(self, op_type, input_names, output_name, container, operator_name,
236
+ axis, broadcast):
237
+ name = _create_name_or_use_existing_one(container, op_type, operator_name)
238
+
239
+ attrs = {}
240
+ if container.target_opset < 7:
241
+ # Before ONNX-1.2 (opset 7), broadcasting behavior is Caffe2-like.
242
+ if axis is not None:
243
+ attrs['axis'] = axis
244
+ if broadcast is not None:
245
+ attrs['broadcast'] = broadcast
246
+
247
+ if container.target_opset < 6:
248
+ attrs['consumed_inputs'] = [0, 0]
249
+ op_version = 1
250
+ else:
251
+ op_version = 6
252
+ else:
253
+ # Since ONNX-1.2 (opset 7), broadcasting behavior is Numpy-like, so we don't need to specify any attributes
254
+ op_version = 7
255
+
256
+ container.add_node(op_type, input_names, output_name, op_version=op_version, name=name, **attrs)
257
+
258
+ def _apply_pointwise_operation(self, op_type, input_names, output_name, container, operator_name):
259
+ name = _create_name_or_use_existing_one(container, op_type, operator_name)
260
+ attrs = {}
261
+
262
+ if container.target_opset < 6:
263
+ attrs['consumed_inputs'] = [0] * len(input_names)
264
+ op_version = 1
265
+ elif container.target_opset < 8:
266
+ op_version = 6
267
+ else:
268
+ if container.target_opset < 12 or op_type == 'Mean':
269
+ op_version = 8
270
+ else:
271
+ op_version = 12
272
+
273
+ container.add_node(op_type, input_names, output_name, op_version=op_version, name=name, **attrs)
274
+
275
+ def abs(self, input_name, output_name, container, operator_name=None):
276
+ self._apply_unary_operation('Abs', input_name, output_name, container, operator_name=operator_name)
277
+ return output_name
278
+
279
+ def add(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):
280
+ self._apply_basic_numerical_operation('Add', input_names, output_name, container, operator_name=operator_name,
281
+ axis=axis, broadcast=broadcast)
282
+ return output_name
283
+
284
+ def argmax(self, input_name, output_name, container, operator_name=None, axis=0, keepdims=1,
285
+ select_last_index=0):
286
+ name = _create_name_or_use_existing_one(container, 'ArgMax', operator_name)
287
+ attrs = {'axis': axis, 'keepdims': keepdims}
288
+ if container.target_opset < 11:
289
+ op_version = 1
290
+ elif container.target_opset < 12:
291
+ op_version = 11
292
+ else:
293
+ op_version = 12
294
+ attrs['select_last_index'] = select_last_index
295
+ container.add_node('ArgMax', input_name, output_name, op_version=op_version, name=name, **attrs)
296
+ return output_name
297
+
298
+ def argmin(self, input_name, output_name, container, operator_name=None, axis=0, keepdims=1,
299
+ select_last_index=0):
300
+ name = _create_name_or_use_existing_one(container, 'ArgMin', operator_name)
301
+ attrs = {'axis': axis, 'keepdims': keepdims}
302
+ if container.target_opset < 11:
303
+ op_version = 1
304
+ elif container.target_opset < 12:
305
+ op_version = 11
306
+ else:
307
+ op_version = 12
308
+ attrs['select_last_index'] = select_last_index
309
+ container.add_node('ArgMin', input_name, output_name, op_version=op_version, name=name, **attrs)
310
+ return output_name
311
+
312
+ def affine(self, input_name, output_name, container, operator_name=None, alpha=1., beta=0.):
313
+ if container.target_opset < 9:
314
+ op_type = 'Affine'
315
+ name = _create_name_or_use_existing_one(container, 'Affine', operator_name)
316
+ attrs = {'name': name, 'alpha': alpha, 'beta': beta}
317
+ container.add_node(op_type, input_name, output_name, **attrs)
318
+ else:
319
+ name = _create_name_or_use_existing_one(container, 'Affine', operator_name)
320
+ # Define a and b.
321
+ aName = self.get_unique_tensor_name(name + '_alpha')
322
+ container.add_initializer(aName, onnx_proto.TensorProto.FLOAT, [1], [alpha])
323
+ bName = self.get_unique_tensor_name(name + '_beta')
324
+ container.add_initializer(bName, onnx_proto.TensorProto.FLOAT, [1], [beta])
325
+
326
+ # Compute Z = a * X, where X is the original input.
327
+ zName = self.get_unique_tensor_name(name + '_scaled')
328
+ self.mul([aName, input_name], zName, container)
329
+
330
+ # Compute Y = Z + b, where Y is the final output.
331
+ self.add(self, [zName, bName], output_name, container)
332
+ return output_name
333
+
334
+ def batch_norm(self, input_names, output_names, container, operator_name=None,
335
+ epsilon=None, is_test=None, momentum=None, spatial=None):
336
+ name = _create_name_or_use_existing_one(container, 'BatchNormalization', operator_name)
337
+ attrs = {'name': name, 'epsilon': epsilon, 'momentum': momentum}
338
+
339
+ if container.target_opset < 9:
340
+ attrs['spatial'] = spatial
341
+ if container.target_opset < 7:
342
+ attrs['is_test'] = is_test
343
+
344
+ if container.target_opset < 6:
345
+ attrs['consumed_inputs'] = [0] * len(input_names)
346
+ if len(input_names) > 3:
347
+ attrs['consumed_inputs'][3] = 1
348
+ if len(input_names) > 4:
349
+ attrs['consumed_inputs'][4] = 2
350
+ op_version = 1
351
+ elif container.target_opset < 7:
352
+ op_version = 6
353
+ elif container.target_opset < 9:
354
+ op_version = 7
355
+ else:
356
+ op_version = 9
357
+
358
+ container.add_node('BatchNormalization', input_names, output_names, op_version=op_version, **attrs)
359
+ return output_names
360
+
361
+ def cast(self, input_name, output_name, container, operator_name=None, to=None):
362
+ """
363
+ :param to: enum defined in ONNX TensorProto.DataType, for example, TensorProto.FLOAT and TensorProto.INT64.
364
+ """
365
+ name = _create_name_or_use_existing_one(container, 'Cast', operator_name)
366
+ attrs = {'name': name}
367
+
368
+ d = onnx_proto.TensorProto.DataType.DESCRIPTOR
369
+ allowed_type_name_and_type_enum_pairs = {v.number: k for k, v in d.values_by_name.items()}
370
+ if to not in allowed_type_name_and_type_enum_pairs:
371
+ raise ValueError('Attribute "to" must be one of %s' % allowed_type_name_and_type_enum_pairs.keys())
372
+
373
+ if container.target_opset < 9:
374
+ if to in [onnx_proto.TensorProto.STRING, onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]:
375
+ raise ValueError('Attribute "to" cannot correspond to a String or Complex TensorProto type.')
376
+
377
+ if container.target_opset < 6:
378
+ # Convert enum to string, for example, TensorProto.INT64 to 'INT64'
379
+ attrs['to'] = allowed_type_name_and_type_enum_pairs[to]
380
+ op_version = 1
381
+ else:
382
+ # Enum, for example, TensorProto.INT64
383
+ attrs['to'] = to
384
+ op_version = 6
385
+ else:
386
+ # Enum value, for example, TensorProto.INT64
387
+ # String casting is supported in opset 9
388
+ if to in [onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]:
389
+ raise ValueError('Attribute "to" cannot correspond to a Complex TensorProto type.')
390
+ attrs['to'] = to
391
+ op_version = 9
392
+
393
+ container.add_node('Cast', input_name, output_name, op_version=op_version, **attrs)
394
+ return output_name
395
+
396
+ def clip(self, input_name, output_name, container, operator_name=None, max=None, min=None):
397
+ name = _create_name_or_use_existing_one(container, 'Clip', operator_name)
398
+ attrs = {'name': name}
399
+
400
+ if container.target_opset < 11:
401
+ if max is not None:
402
+ attrs['max'] = float(max)
403
+ if min is not None:
404
+ attrs['min'] = float(min)
405
+
406
+ if container.target_opset < 6:
407
+ attrs['consumed_inputs'] = [0]
408
+ op_version = 1
409
+ else:
410
+ op_version = 6
411
+
412
+ container.add_node('Clip', input_name, output_name, op_version=op_version, **attrs)
413
+ else:
414
+ if container.target_opset < 12:
415
+ op_version = 11
416
+ else:
417
+ op_version = 12
418
+ if min is None and max is not None:
419
+ raise RuntimeError("Operator 'Clip': min must be specified if max is.")
420
+ inputs = [input_name]
421
+
422
+ if min is not None:
423
+ if isinstance(min, (np.ndarray, float, int)):
424
+ # add initializer
425
+ if isinstance(min, np.ndarray):
426
+ if len(min.shape) == 0:
427
+ min = [min]
428
+ elif min.shape == (1,):
429
+ min = list(min[0]) if hasattr(min[0], '__iter__') else list(min)
430
+ else:
431
+ raise RuntimeError("min must be an array of one element.")
432
+ else:
433
+ min = [min]
434
+
435
+ # container in sklearn-onnx stores the computation type in
436
+ # container.dtype.
437
+ min_name = self.get_unique_tensor_name('clip_min')
438
+ if op_version < 12:
439
+ min = np.array(min, dtype=getattr(container, 'dtype', np.float32))
440
+ container.add_initializer(min_name, getattr(container, 'proto_dtype',
441
+ onnx_proto.TensorProto.FLOAT), [], [min[0]])
442
+ else:
443
+ min = np.array(min)
444
+ container.add_initializer(min_name, NP_TYPE_TO_TENSOR_TYPE[min.dtype], [], [min[0]])
445
+ min = min_name
446
+ if isinstance(min, str):
447
+ inputs.append(min)
448
+ else:
449
+ raise RuntimeError("Parameter 'min' must be a string or a float.")
450
+
451
+ if max is not None:
452
+ if min is None:
453
+ raise RuntimeError("Parameter 'min' must be specified if 'max' is.")
454
+ if isinstance(max, (np.ndarray, float, int)):
455
+ # add initializer
456
+ if isinstance(max, np.ndarray):
457
+ if len(max.shape) == 0:
458
+ max = [max]
459
+ elif max.shape == (1,):
460
+ max = list(max[0]) if hasattr(max[0], '__iter__') else list(max)
461
+ else:
462
+ raise RuntimeError("max must be an array of one element.")
463
+ else:
464
+ max = [max]
465
+
466
+ max_name = self.get_unique_tensor_name('clip_max')
467
+ if op_version < 12:
468
+ max = np.array(max, dtype=getattr(container, 'dtype', np.float32))
469
+ container.add_initializer(max_name, getattr(container, 'proto_dtype',
470
+ onnx_proto.TensorProto.FLOAT), [], [max[0]])
471
+ else:
472
+ max = np.array(max)
473
+ container.add_initializer(max_name, NP_TYPE_TO_TENSOR_TYPE[max.dtype], [], [max[0]])
474
+ max = max_name
475
+ if isinstance(max, str):
476
+ inputs.append(max)
477
+ else:
478
+ raise RuntimeError("Parameter 'max' must be a string or a float.")
479
+
480
+ container.add_node('Clip', inputs, output_name, op_version=op_version,
481
+ **attrs)
482
+ return output_name
483
+
484
+ def concat(self, input_names, output_name, container, operator_name=None, axis=0):
485
+ name = _create_name_or_use_existing_one(container, 'Concat', operator_name)
486
+
487
+ if container.target_opset < 4:
488
+ op_version = 1
489
+ elif container.target_opset < 11:
490
+ op_version = 4
491
+ else:
492
+ op_version = 11
493
+
494
+ container.add_node('Concat', input_names, output_name, op_version=op_version, name=name, axis=axis)
495
+ return output_name
496
+
497
+ def concat_from_sequence(self, input_names, output_name, container, operator_name=None, axis=0, new_axis=None):
498
+ name = _create_name_or_use_existing_one(container, 'Concat', operator_name)
499
+ attrs = {'axis': axis}
500
+ if new_axis is not None:
501
+ attrs['new_axis'] = new_axis
502
+ container.add_node('ConcatFromSequence', input_names, output_name, op_version=11, name=name, **attrs)
503
+ return output_name
504
+
505
+ def constant(self, input_names, output_name, container, operator_name=None, value=None):
506
+ assert len(input_names) == 0 # only a placeholder to standardize the argument list.
507
+ name = _create_name_or_use_existing_one(container, 'Constant', operator_name)
508
+
509
+ if value is None:
510
+ raise ValueError('Attribute "value" is a required argument.')
511
+
512
+ if container.target_opset < 9:
513
+ op_version = 1
514
+ elif container.target_opset < 11:
515
+ op_version = 9
516
+ elif container.target_opset < 12:
517
+ op_version = 11
518
+ else:
519
+ op_version = 12
520
+
521
+ if op_version < 12:
522
+ attrs = {'name': name, 'value': value}
523
+ else:
524
+ if isinstance(value, float):
525
+ attrs = {'name': name, 'value_float': value}
526
+ elif isinstance(value, int):
527
+ attrs = {'name': name, 'value_int': value}
528
+ elif isinstance(value, str):
529
+ attrs = {'name': name, 'value_string': value}
530
+ else:
531
+ attrs = {'name': name, 'value': value}
532
+
533
+ container.add_node('Constant', [], output_name, op_version=op_version, **attrs)
534
+ return output_name
535
+
536
+ def constant_of_shape(self, input_names, output_name, container, operator_name=None, value=None):
537
+ attrs = {}
538
+ if value is not None:
539
+ attrs['value'] = value
540
+ name = _create_name_or_use_existing_one(container, 'ConstantOfShape', operator_name)
541
+ container.add_node('ConstantOfShape', input_names, output_name, name=name, op_version=9, **attrs)
542
+ return output_name
543
+
544
+ def conv(self, input_names, output_name, container, operator_name=None, **attrs):
545
+ name = _create_name_or_use_existing_one(container, 'Conv', operator_name)
546
+
547
+ if container.target_opset < 11:
548
+ op_version = 1
549
+ else:
550
+ op_version = 11
551
+
552
+ container.add_node('Conv', input_names, output_name, name=name, op_version=op_version, **attrs)
553
+ return output_name
554
+
555
+ def crop_height_width(self, input_name, output_name, container, operator_name=None,
556
+ top_border=0, bottom_border=0, left_border=0, right_border=0):
557
+ name = container.get_unique_operator_name('CropHeightWidth')
558
+ if container.target_opset < 9:
559
+ # If operator set < 9, we can use the experimental Crop in ONNX.
560
+ attrs = {'name': name, 'border': [left_border, top_border, right_border, bottom_border]}
561
+ container.add_node('Crop', input_name, output_name, **attrs)
562
+ else:
563
+ # The experimental Crop in ONNX is removed after operator set 9, so we
564
+ # switch to ONNX DynamicSlice operator.
565
+
566
+ # CoreML only crops H- and W-axes.
567
+ axes = [2, 3]
568
+ axes_name = self.get_unique_tensor_name(name + '_axes')
569
+ container.add_initializer(axes_name, onnx_proto.TensorProto.INT64,
570
+ [len(axes)], axes)
571
+
572
+ # Number of cropped pixels is the starting index of the remained region.
573
+ starts = [top_border, left_border]
574
+ starts_name = self.get_unique_tensor_name(name + '_starts')
575
+ container.add_initializer(starts_name, onnx_proto.TensorProto.INT64,
576
+ [len(starts)], starts)
577
+
578
+ # First we assume no cropping is needed at the end of those axes.
579
+ # We will change this right below depending on Crop's configuration.
580
+ ends = [np.iinfo(np.int64).max] * 2
581
+
582
+ # Crop n pixel means the end index (exclusive) is -n. Note that indexing
583
+ # system is zero-based.
584
+ if bottom_border > 0:
585
+ ends[0] = -bottom_border
586
+ if right_border > 0:
587
+ ends[1] = -right_border
588
+
589
+ # Add the adjusted ends.
590
+ ends_name = self.get_unique_tensor_name(name + '_ends')
591
+ container.add_initializer(ends_name, onnx_proto.TensorProto.INT64,
592
+ [len(ends)], ends)
593
+
594
+ # Collect all input names as a list because DynamicSlice has multiple inputs.
595
+ input_list = [input_name, starts_name, ends_name, axes_name]
596
+ container.add_node('DynamicSlice', input_list, output_name, op_version=9)
597
+ return output_name
598
+
599
+ def cumsum(self, input_names, output_names, container, operator_name=None, axis=None):
600
+ name = _create_name_or_use_existing_one(container, 'cumsum', operator_name)
601
+ assert axis is not None, "Axis in Op CumSum must be provided."
602
+ axis_name = self.get_unique_tensor_name(name+'_dim')
603
+ container.add_initializer(axis_name,
604
+ onnx_proto.TensorProto.INT64,
605
+ [1], [axis])
606
+ container.add_node('CumSum', input_names + [axis_name], output_names, op_version=11, name=name)
607
+ return output_names
608
+
609
+ def div(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):
610
+ self._apply_basic_numerical_operation('Div', input_names, output_name,
611
+ container, operator_name,
612
+ axis, broadcast)
613
+ return output_name
614
+
615
+ def elu(self, input_name, output_name, container, operator_name=None, alpha=1.0):
616
+ self._apply_unary_operation('Elu', input_name, output_name, container, operator_name, alpha=alpha)
617
+ return output_name
618
+
619
+ def equal(self, input_names, output_name, container, operator_name=None):
620
+ name = _create_name_or_use_existing_one(container, 'equal', operator_name)
621
+ if container.target_opset < 7:
622
+ op_version = 1
623
+ elif container.target_opset < 9:
624
+ op_version = 7
625
+ else:
626
+ op_version = 9
627
+ container.add_node('Equal', input_names, output_name, name=name, op_version=op_version)
628
+ return output_name
629
+
630
+ def exp(self, input_name, output_name, container, operator_name=None):
631
+ self._apply_unary_operation('Exp', input_name, output_name, container, operator_name=operator_name)
632
+ return output_name
633
+
634
+ def floor(self, input_name, output_name, container, operator_name=None):
635
+ self._apply_unary_operation('Floor', input_name, output_name, container, operator_name=operator_name)
636
+ return output_name
637
+
638
+ def flatten(self, input_name, output_name, container, operator_name=None, axis=1):
639
+ name = _create_name_or_use_existing_one(container, 'Flatten', operator_name)
640
+ if container.target_opset < 9:
641
+ op_version = 1
642
+ elif container.target_opset < 11:
643
+ op_version = 9
644
+ else:
645
+ op_version = 11
646
+ container.add_node('Flatten', input_name, output_name, name=name, op_version=op_version, axis=axis)
647
+ return output_name
648
+
649
+ def gather(self, input_names, output_name, container, operator_name=None, axis=0):
650
+ name = _create_name_or_use_existing_one(container, 'Gather', operator_name)
651
+ if container.target_opset < 11:
652
+ op_version = 1
653
+ else:
654
+ op_version = 11
655
+
656
+ container.add_node('Gather', input_names, output_name, name=name, op_version=op_version, axis=axis)
657
+ return output_name
658
+
659
+ def gemm(self, input_name, output_name, container, operator_name=None, alpha=1.0, beta=1.0,
660
+ transA=0, transB=0):
661
+ """
662
+ Applies operator `gemm <https://github.com/onnx/onnx/blob/master/docs/Operators.md#gemm>`.
663
+ """
664
+ name = _create_name_or_use_existing_one(container, 'Gemm', operator_name)
665
+ attrs = {'alpha': alpha, 'beta': beta, 'transA': transA, 'transB': transB}
666
+ if container.target_opset < 5:
667
+ attrs['op_version'] = 1
668
+ attrs['broadcast'] = 1
669
+ elif container.target_opset < 7:
670
+ attrs['op_version'] = 6
671
+ attrs['broadcast'] = 1
672
+ elif container.target_opset < 11:
673
+ attrs['op_version'] = 7
674
+ else:
675
+ attrs['op_version'] = 11
676
+
677
+ container.add_node('Gemm', input_name, output_name, name=name, **attrs)
678
+ return output_name
679
+
680
+ def greater(self, input_names, output_name, container, operator_name=None):
681
+ name = _create_name_or_use_existing_one(container, 'Greater', operator_name)
682
+ if container.target_opset < 7:
683
+ op_version = 1
684
+ elif container.target_opset < 9:
685
+ op_version = 7
686
+ else:
687
+ op_version = 9
688
+
689
+ container.add_node('Greater', input_names, output_name, name=name, op_version=op_version)
690
+ return output_name
691
+
692
+ def _apply_convert_compare_equal(self, input_names, output_name, container, operator_name,
693
+ tf_op_string, onnx_op_string_rev, onnx_op_string):
694
+ if container.target_opset < 7:
695
+ raise ValueError(tf_op_string + " op is not supported for opset < 7")
696
+ elif container.target_opset < 9:
697
+ op_version = 7
698
+ elif container.target_opset < 12:
699
+ op_version = 9
700
+ else:
701
+ op_version = 12
702
+ name = _create_name_or_use_existing_one(container, tf_op_string, operator_name)
703
+ if op_version < 9:
704
+ compare_input_0 = self.get_unique_tensor_name(name + '_input_0_cast')
705
+ container.add_node('Cast', [input_names[0]], compare_input_0, name=name + '_input_0_cast', to=1)
706
+ compare_input_1 = self.get_unique_tensor_name(name + '_input_1_cast')
707
+ container.add_node('Cast', [input_names[1]], compare_input_1, name=name + '_input_1_cast', to=1)
708
+ less_out = self.get_unique_tensor_name(name + '_less_out')
709
+ container.add_node(onnx_op_string_rev, [compare_input_0, compare_input_1], less_out,
710
+ name=name + '_' + onnx_op_string_rev.lower(),
711
+ op_version=op_version)
712
+ container.add_node('Not', less_out, output_name, name=name + '_not')
713
+ elif op_version < 12:
714
+ compare_node = self.get_unique_tensor_name(name + '_compare_node')
715
+ container.add_node(onnx_op_string_rev, input_names, compare_node,
716
+ name=name + '_' + onnx_op_string_rev.lower(),
717
+ op_version=op_version)
718
+ container.add_node('Not', [compare_node], output_name, name=name)
719
+ else:
720
+ container.add_node(onnx_op_string, input_names, output_name,
721
+ name=name + '_' + onnx_op_string_rev.lower(), op_version=op_version)
722
+
723
+ def greater_or_equal(self, input_names, output_name, container, operator_name=None):
724
+ self._apply_convert_compare_equal(input_names, output_name, container, operator_name,
725
+ 'GreaterEqual', 'Less', 'GreaterOrEqual')
726
+ return output_name
727
+
728
+ def less_or_equal(self, input_names, output_name, container, operator_name=None):
729
+ self._apply_convert_compare_equal(input_names, output_name, container,
730
+ operator_name, 'LessEqual', 'Greater', 'LessOrEqual')
731
+ return output_name
732
+
733
+ def gru(self, input_names, output_names, container, operator_name=None, output_seq=0, reset_after=0, **attrs):
734
+ name = _create_name_or_use_existing_one(container, 'GRU', operator_name)
735
+ if container.target_opset < 3:
736
+ op_version = 1
737
+ attrs['output_sequence'] = 1 if output_seq else 0
738
+ else:
739
+ attrs['linear_before_reset'] = 1 if reset_after else 0
740
+ if container.target_opset <= 5:
741
+ attrs['output_sequence'] = 1 if output_seq else 0
742
+ op_version = 3
743
+ else:
744
+ op_version = 7
745
+
746
+ container.add_node('GRU', input_names, output_names, name=name, op_version=op_version, **attrs)
747
+ return output_names
748
+
749
+ def hard_sigmoid(self, input_name, output_name, container, operator_name=None, alpha=None, beta=None):
750
+ self._apply_unary_operation('HardSigmoid', input_name, output_name, container, operator_name,
751
+ alpha=alpha, beta=beta)
752
+ return output_name
753
+
754
+ def identity(self, input_name, output_name, container, operator_name=None):
755
+ name = _create_name_or_use_existing_one(container, 'Identity', operator_name)
756
+ container.add_node('Identity', input_name, output_name, name=name)
757
+ return output_name
758
+
759
+ def instance_norm(self, input_names, output_name, container, operator_name=None, epsilon=1e-5):
760
+ name = _create_name_or_use_existing_one(container, 'InstanceNormalization', operator_name)
761
+ attrs = {'name': name, 'epsilon': epsilon}
762
+
763
+ if container.target_opset < 2:
764
+ attrs['consumed_inputs'] = [0] * len(input_names)
765
+ op_version = 1
766
+ else:
767
+ op_version = 6
768
+
769
+ container.add_node('InstanceNormalization', input_names, output_name, op_version=op_version, **attrs)
770
+ return output_name
771
+
772
+ def inverse(self, input_name, output_name, container, operator_name=None):
773
+ if container.target_opset < 12:
774
+ raise ValueError("tf op MatrixInverse is not supported for opset < 12")
775
+ else:
776
+ op_version = 12
777
+ name = _create_name_or_use_existing_one(container, 'Inverse', operator_name)
778
+ container.add_node('Inverse', input_name, output_name, name=name, op_version=op_version)
779
+ return output_name
780
+
781
+ def leaky_relu(self, input_name, output_name, container, operator_name=None, alpha=0.01):
782
+ self._apply_unary_operation('LeakyRelu', input_name, output_name, container, operator_name, alpha=alpha)
783
+ return output_name
784
+
785
+ def less(self, input_names, output_name, container, operator_name=None):
786
+ name = _create_name_or_use_existing_one(container, 'Less', operator_name)
787
+ if container.target_opset < 7:
788
+ op_version = 1
789
+ elif container.target_opset < 9:
790
+ op_version = 7
791
+ else:
792
+ op_version = 9
793
+
794
+ container.add_node('Less', input_names, output_name, name=name, op_version=op_version)
795
+ return output_name
796
+
797
+ def log(self, input_name, output_name, container, operator_name=None):
798
+ self._apply_unary_operation('Log', input_name, output_name, container, operator_name=operator_name)
799
+ return output_name
800
+
801
+ def lstm(self, input_names, output_names, container, operator_name=None, output_seq=0, **attrs):
802
+ name = _create_name_or_use_existing_one(container, 'LSTM', operator_name)
803
+ if container.target_opset <= 6:
804
+ attrs['output_sequence'] = 1 if output_seq else 0
805
+ op_version = 1
806
+ else:
807
+ op_version = 7
808
+ container.add_node('LSTM', input_names, output_names, name=name, op_version=op_version, **attrs)
809
+ return output_names
810
+
811
+ def matmul(self, input_names, output_name, container, operator_name=None):
812
+ op_type = 'MatMul'
813
+ name = _create_name_or_use_existing_one(container, op_type, operator_name)
814
+ if container.target_opset <= 9:
815
+ op_version = 1
816
+ else:
817
+ op_version = 9
818
+ container.add_node(op_type, input_names, output_name, op_version=op_version, name=name)
819
+ return output_name
820
+
821
+ def max(self, input_names, output_name, container, operator_name=None):
822
+ self._apply_pointwise_operation('Max', input_names, output_name, container, operator_name)
823
+ return output_name
824
+
825
+ def mean(self, input_names, output_name, container, operator_name=None):
826
+ self._apply_pointwise_operation('Mean', input_names, output_name, container, operator_name)
827
+ return output_name
828
+
829
+ def min(self, input_names, output_name, container, operator_name=None):
830
+ self._apply_pointwise_operation('Min', input_names, output_name, container, operator_name)
831
+ return output_name
832
+
833
+ def mul(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):
834
+ self._apply_basic_numerical_operation('Mul', input_names, output_name,
835
+ container, operator_name=operator_name,
836
+ axis=axis, broadcast=broadcast)
837
+ return output_name
838
+
839
+ def neg(self, input_name, output_name, container, operator_name=None):
840
+ self._apply_unary_operation('Neg', input_name, output_name, container, operator_name)
841
+ return output_name
842
+
843
+ def lpnormalization(self, input_name, output_name, container, operator_name=None, axis=1, p=2):
844
+ name = _create_name_or_use_existing_one(container, 'LpNormalization', operator_name)
845
+ container.add_node('LpNormalization', input_name, output_name, name=name, p=p, axis=axis)
846
+ return output_name
847
+
848
+ def not_op(self, input_name, output_name, container, operator_name=None):
849
+ self._apply_unary_operation('Not', input_name, output_name, container, operator_name)
850
+ return output_name
851
+
852
+ def or_op(self, input_names, output_names, container, operator_name=None):
853
+ name = _create_name_or_use_existing_one(container, 'or', operator_name)
854
+ container.add_node('Or', input_names, output_names, op_version=7, name=name)
855
+ return output_names
856
+
857
+ def pad(self, input_name, output_name, container, operator_name=None, mode=None, pads=None, value=None,
858
+ onnx_type=onnx_proto.TensorProto.FLOAT):
859
+ name = _create_name_or_use_existing_one(container, 'Pad', operator_name)
860
+ attrs = {'name': name}
861
+ inputs = input_name if isinstance(input_name, list) else [input_name]
862
+
863
+ if mode is not None:
864
+ attrs['mode'] = mode
865
+
866
+ if container.target_opset < 11:
867
+ if isinstance(pads, str):
868
+ raise ValueError("Dynamic pad is not supported for opset < 11.")
869
+ if value is not None:
870
+ attrs['value'] = value
871
+ if container.target_opset < 2:
872
+ attrs['paddings'] = pads
873
+ op_version = 1
874
+ else:
875
+ attrs['pads'] = pads
876
+ op_version = 2
877
+ else:
878
+ op_version = 11
879
+ if isinstance(pads, str):
880
+ inputs.append(pads)
881
+ else:
882
+ pads_name = self.get_unique_tensor_name(name + '_pads')
883
+ container.add_initializer(pads_name, onnx_proto.TensorProto.INT64, [len(pads)], pads)
884
+ inputs.append(pads_name)
885
+ if value is not None:
886
+ value_name = self.get_unique_tensor_name(name + '_value')
887
+ container.add_initializer(value_name, onnx_type, [], [value])
888
+ inputs.append(value_name)
889
+
890
+ container.add_node('Pad', inputs, output_name, op_version=op_version, **attrs)
891
+ return output_name
892
+
893
+ def parametric_softplus(self, input_name, output_name, container, operator_name=None, alpha=None, beta=None):
894
+ if alpha is None:
895
+ alpha = [1.0]
896
+ if beta is None:
897
+ beta = [0.]
898
+
899
+ name = _create_name_or_use_existing_one(container, 'ParametricSoftplus', operator_name)
900
+ if container.target_opset < 9:
901
+ if len(alpha) != 1 or len(beta) != 1:
902
+ raise ValueError('alpha and beta must be 1-element lists')
903
+ op_type = 'ParametricSoftplus'
904
+ attrs = {'name': name, 'alpha': alpha[0], 'beta': beta[0]}
905
+ container.add_node(op_type, input_name, output_name, **attrs)
906
+ else:
907
+ # Define three scalars: a, b, 1.
908
+ aName = self.get_unique_tensor_name(name + '_alpha')
909
+ aShape = [len(alpha)] if len(alpha) == 1 else [len(alpha), 1, 1]
910
+ container.add_initializer(aName, onnx_proto.TensorProto.FLOAT, aShape, alpha)
911
+ bShape = [len(beta)] if len(beta) == 1 else [len(beta), 1, 1]
912
+ bName = self.get_unique_tensor_name(name + '_beta')
913
+ container.add_initializer(bName, onnx_proto.TensorProto.FLOAT, bShape, beta)
914
+ oneName = self.get_unique_tensor_name(name + '_one')
915
+ container.add_initializer(oneName, onnx_proto.TensorProto.FLOAT, [1], [1.])
916
+
917
+ # c = b * x
918
+ cName = self.get_unique_tensor_name(name + '_c')
919
+ self.mul([input_name, bName], cName, container)
920
+
921
+ # d = exp(c)
922
+ dName = self.get_unique_tensor_name(name + '_d')
923
+ self.exp(cName, dName, container)
924
+
925
+ # e = 1 + d
926
+ eName = self.get_unique_tensor_name(name + '_e')
927
+ self.add([dName, oneName], eName, container)
928
+
929
+ # f = log(e)
930
+ fName = self.get_unique_tensor_name(name + '_f')
931
+ self.log(eName, fName, container)
932
+
933
+ # g = a * f
934
+ self.mul([fName, aName], output_name, container)
935
+ return output_name
936
+
937
+ def pow(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):
938
+ name = _create_name_or_use_existing_one(container, 'Pow', operator_name)
939
+
940
+ attrs = {'name': name}
941
+ if container.target_opset < 7:
942
+ # Before ONNX-1.2, broadcasting behavior is Caffe2-like.
943
+ if axis is not None:
944
+ attrs['axis'] = axis
945
+ if broadcast is not None:
946
+ attrs['broadcast'] = broadcast
947
+ op_version = 1
948
+ elif container.target_opset < 12:
949
+ # Since ONNX-1.2, broadcasting behavior is Numpy-like, so we don't need to specify any attributes
950
+ op_version = 7
951
+ else:
952
+ op_version = 12
953
+
954
+ container.add_node('Pow', input_names, output_name, op_version=op_version, **attrs)
955
+ return output_name
956
+
957
+ def prelu(self, input_name, output_name, container, operator_name=None, slp_rate=None):
958
+ name = _create_name_or_use_existing_one(container, 'PRelu', operator_name)
959
+ slp_rate_tensor_name = self.get_unique_tensor_name('slp_rate')
960
+ s_shape = slp_rate.shape
961
+ if container.target_opset < 7:
962
+ s_shape = [len(slp_rate.flatten())]
963
+ container.add_initializer(slp_rate_tensor_name, onnx_proto.TensorProto.FLOAT, s_shape, slp_rate.flatten())
964
+
965
+ if container.target_opset < 6:
966
+ container.add_node('PRelu', [input_name, slp_rate_tensor_name], output_name, op_version=1, name=name,
967
+ consumed_inputs=[0, 0])
968
+ else:
969
+ if container.target_opset < 7:
970
+ op_version = 6
971
+ elif container.target_opset < 9:
972
+ op_version = 7
973
+ else:
974
+ # opset 9 supports unidirectional broadcasting
975
+ op_version = 9
976
+
977
+ container.add_node('PRelu', [input_name, slp_rate_tensor_name], output_name, op_version=op_version, name=name)
978
+ return output_name
979
+
980
+ def range(self, input_name, output_name, container, operator_name=None):
981
+ name = _create_name_or_use_existing_one(container, 'Range', operator_name)
982
+ container.add_node('Range', input_name, output_name, op_version=11, name=name)
983
+ return output_name
984
+
985
+ def reciprocal(self, input_name, output_name, container, operator_name=None):
986
+ self._apply_unary_operation('Reciprocal', input_name, output_name, container, operator_name=operator_name)
987
+ return output_name
988
+
989
+ # Some old ORT supports axis < 0 case, so put rank=0 as default.
990
+ def reducesum(self, input_name, output_name, container, operator_name=None, axes=None, keepdims=1, rank=0):
991
+ name = _create_name_or_use_existing_one(container, 'ReduceSum', operator_name)
992
+ if axes is None:
993
+ axes = []
994
+ if container.target_opset < 13:
995
+ if container.target_opset < 11:
996
+ op_version = 1
997
+ axes = [axis if axis >= 0 else axis + rank for axis in axes]
998
+ else:
999
+ op_version = 11
1000
+ container.add_node('ReduceSum', input_name, output_name, name=name,
1001
+ op_version=op_version, axes=axes, keepdims=keepdims)
1002
+ else:
1003
+ if not isinstance(input_name, list):
1004
+ input_name = [input_name]
1005
+ op_version = 13
1006
+ if isinstance(axes, str):
1007
+ container.add_node('ReduceSum', input_name + [axes], output_name,
1008
+ op_version=op_version, name=name, keepdims=keepdims)
1009
+ elif axes is None or len(axes) == 0:
1010
+ container.add_node('ReduceSum', input_name, output_name,
1011
+ op_version=op_version, name=name, keepdims=keepdims)
1012
+ else:
1013
+ axes_name = self.get_unique_tensor_name(name + '_reducesum')
1014
+ container.add_initializer(axes_name, onnx_proto.TensorProto.INT64, [len(axes)], axes)
1015
+ container.add_node('ReduceSum', input_name + [axes_name], output_name,
1016
+ op_version=op_version, name=name, keepdims=keepdims)
1017
+ return output_name
1018
+
1019
+ def reducemin(self, input_name, output_name, container, operator_name=None, axes=None, keepdims=1, rank=0):
1020
+ name = _create_name_or_use_existing_one(container, 'ReduceMin', operator_name)
1021
+ if axes is None:
1022
+ axes = []
1023
+ if container.target_opset < 13:
1024
+ if container.target_opset < 11:
1025
+ op_version = 1
1026
+ axes = [axis if axis >= 0 else axis + rank for axis in axes]
1027
+ else:
1028
+ op_version = 11
1029
+ container.add_node('ReduceMin', input_name, output_name, name=name,
1030
+ op_version=op_version, axes=axes, keepdims=keepdims)
1031
+ else:
1032
+ if not isinstance(input_name, list):
1033
+ input_name = [input_name]
1034
+ op_version = 13
1035
+ if isinstance(axes, str):
1036
+ container.add_node('ReduceMin', input_name + [axes], output_name,
1037
+ op_version=op_version, name=name, keepdims=keepdims)
1038
+ elif axes is None or len(axes) == 0:
1039
+ container.add_node('ReduceMin', input_name, output_name,
1040
+ op_version=op_version, name=name, keepdims=keepdims)
1041
+ else:
1042
+ axes_name = self.get_unique_tensor_name(name + '_reducemin')
1043
+ container.add_initializer(axes_name, onnx_proto.TensorProto.INT64, [len(axes)], axes)
1044
+ container.add_node('ReduceMin', input_name + [axes_name], output_name,
1045
+ op_version=op_version, name=name, keepdims=keepdims)
1046
+ return output_name
1047
+
1048
+ def relu(self, input_name, output_name, container, operator_name=None):
1049
+ self._apply_unary_operation('Relu', input_name, output_name, container, operator_name)
1050
+ return output_name
1051
+
1052
+ def relu_6(self, input_name, output_name, container, operator_name=None, zero_value=0.0):
1053
+ name_relu = _create_name_or_use_existing_one(container, 'relu', operator_name)
1054
+ name_relu_op = _create_name_or_use_existing_one(container, 'relu6', operator_name)
1055
+ self.relu(input_name, name_relu, container, name_relu_op+'_relu')
1056
+ self.clip(name_relu, output_name, container, name_relu_op + '_clip', zero_value+6, zero_value)
1057
+
1058
+ def reshape(self, input_name, output_name, container, operator_name=None, desired_shape=None):
1059
+ if not isinstance(desired_shape, str) and len(list(i for i in desired_shape if i is not None and i < 0)) > 1:
1060
+ raise ValueError('There can only be one -1 in the targeted shape of a Reshape but got %s' % desired_shape)
1061
+
1062
+ name = _create_name_or_use_existing_one(container, 'Reshape', operator_name)
1063
+
1064
+ if container.target_opset < 5:
1065
+ container.add_node('Reshape', input_name, output_name, op_version=1, name=name, shape=desired_shape,
1066
+ consumed_inputs=[0])
1067
+ else:
1068
+ if isinstance(desired_shape, str):
1069
+ desired_shape_name = desired_shape
1070
+ else:
1071
+ desired_shape_name = self.get_unique_tensor_name('shape_tensor')
1072
+ container.add_initializer(desired_shape_name, onnx_proto.TensorProto.INT64, [len(desired_shape)],
1073
+ desired_shape)
1074
+
1075
+ # Create ONNX Reshape operator
1076
+ if isinstance(input_name, list):
1077
+ input_name.append(desired_shape_name)
1078
+ else:
1079
+ input_name = [input_name, desired_shape_name]
1080
+ container.add_node('Reshape', input_name, output_name, op_version=5, name=name)
1081
+ return output_name
1082
+
1083
+ def resize(self, input_name, output_name, container, operator_name=None, mode='nearest',
1084
+ coordinate_transformation_mode='asymmetric', scales=None):
1085
+ """
1086
+ :param mode: "nearest" or "linear"
1087
+ :param scales: a float tensor for scaling (upsampling or downsampling) all input dimensions
1088
+ """
1089
+ name = _create_name_or_use_existing_one(container, 'Resize', operator_name)
1090
+ attrs = {'name': name}
1091
+ attrs['mode'] = mode.lower()
1092
+
1093
+ inputs = [input_name]
1094
+
1095
+ if container.target_opset < 11:
1096
+ op_version = 10
1097
+ else:
1098
+ op_version = 11
1099
+ roi_tensor_name = self.get_unique_tensor_name(name + '_roi')
1100
+ roi = [0.0] * len(scales) + [1.0] * len(scales)
1101
+ container.add_initializer(roi_tensor_name, onnx_proto.TensorProto.FLOAT, [2 * len(scales)], roi)
1102
+ inputs.append(roi_tensor_name)
1103
+ attrs['coordinate_transformation_mode'] = coordinate_transformation_mode
1104
+ if attrs['mode'] == 'nearest':
1105
+ attrs['nearest_mode'] = 'floor'
1106
+
1107
+ scales_tensor_name = self.get_unique_tensor_name(name + '_scales')
1108
+ container.add_initializer(scales_tensor_name, onnx_proto.TensorProto.FLOAT, [len(scales)], scales)
1109
+ inputs.append(scales_tensor_name)
1110
+ container.add_node('Resize', inputs, output_name, op_version=op_version, **attrs)
1111
+ return output_name
1112
+
1113
+ def rnn(self, input_names, output_names, container, operator_name=None, output_seq=0, **attrs):
1114
+ name = _create_name_or_use_existing_one(container, 'RNN', operator_name)
1115
+ if container.target_opset <= 6:
1116
+ attrs['output_sequence'] = 1 if output_seq else 0
1117
+ op_version = 1
1118
+ else:
1119
+ op_version = 7
1120
+ container.add_node('RNN', input_names, output_names, name=name, op_version=op_version, **attrs)
1121
+ return output_names
1122
+
1123
+ def shape(self, input_name, output_name, container, operator_name=None):
1124
+ name = _create_name_or_use_existing_one(container, 'Shape', operator_name)
1125
+ container.add_node('Shape', input_name, output_name, name=name, op_version=1)
1126
+ return output_name
1127
+
1128
+ def sigmoid(self, input_name, output_name, container, operator_name=None):
1129
+ self._apply_unary_operation('Sigmoid', input_name, output_name, container, operator_name)
1130
+ return output_name
1131
+
1132
+ def softsign(self, input_name, output_name, container, operator_name=None):
1133
+ name = _create_name_or_use_existing_one(container, 'Softsign', operator_name)
1134
+ container.add_node('Softsign', input_name, output_name, name=name, op_version=1)
1135
+ return output_name
1136
+
1137
+ # See alpha and gamma at https://github.com/keras-team/keras/blob/master/keras/activations.py#L80-L81
1138
+ def selu(self, input_name, output_name, container, operator_name=None, alpha=1.673263, gamma=1.050701):
1139
+ self._apply_unary_operation('Selu', input_name, output_name, container, operator_name, alpha=alpha, gamma=gamma)
1140
+ return output_name
1141
+
1142
+ def softmax(self, input_name, output_name, container, operator_name=None, axis=None):
1143
+ name = _create_name_or_use_existing_one(container, 'Softmax', operator_name)
1144
+ if axis is None:
1145
+ axis = 1 if container.target_opset < 13 else -1
1146
+ container.add_node('Softmax', input_name, output_name, name=name, axis=axis)
1147
+ return output_name
1148
+
1149
+ def scaled_tanh(self, input_name, output_name, container, operator_name=None, alpha=None, beta=None):
1150
+ if alpha is None:
1151
+ alpha = [1.0]
1152
+ if beta is None:
1153
+ beta = [1.0]
1154
+ if len(alpha) != 1 or len(beta) != 1:
1155
+ raise ValueError('alpha and beta must be 1-element lists')
1156
+
1157
+ name = _create_name_or_use_existing_one(container, 'ScaledTanh', operator_name)
1158
+ if container.target_opset < 9:
1159
+ attrs = {'name': name, 'alpha': alpha[0], 'beta': beta[0]}
1160
+ container.add_node('ScaledTanh', input_name, output_name, **attrs)
1161
+ else:
1162
+ # Define scalar a, initialize with parameter alpha.
1163
+ aName = self.get_unique_tensor_name(name + '_alpha')
1164
+ aShape = [len(alpha)] if len(alpha) == 1 else [len(alpha), 1, 1]
1165
+ container.add_initializer(aName, onnx_proto.TensorProto.FLOAT, aShape, alpha)
1166
+
1167
+ # Define scalar b, initialize with parameter beta.
1168
+ bShape = [len(beta)] if len(beta) == 1 else [len(beta), 1, 1]
1169
+ bName = self.get_unique_tensor_name(name + '_beta')
1170
+ container.add_initializer(bName, onnx_proto.TensorProto.FLOAT, bShape, beta)
1171
+
1172
+ # c = b * x
1173
+ cName = self.get_unique_tensor_name(name + '_c')
1174
+ self.mul([input_name, bName], cName, container)
1175
+
1176
+ # d = tanh(c)
1177
+ dName = self.get_unique_tensor_name(name + '_d')
1178
+ self.tanh(cName, dName, container)
1179
+
1180
+ # output = a * d
1181
+ self.mul([aName, dName], output_name, container)
1182
+ return output_name
1183
+
1184
+ def slice(self, input_name, output_name, container,
1185
+ operator_name=None, starts=None, ends=None, axes=None, steps=None):
1186
+ assert starts is not None, 'the starts in slice op cannot be None'
1187
+ assert ends is not None, 'the ends in slice op cannot be None'
1188
+ name = _create_name_or_use_existing_one(container, 'Slice', operator_name)
1189
+
1190
+ if container.target_opset < 10:
1191
+ if axes is None:
1192
+ container.add_node('Slice', input_name, output_name, name=name,
1193
+ starts=starts, ends=ends, op_version=1)
1194
+ else:
1195
+ container.add_node('Slice', input_name, output_name, name=name,
1196
+ starts=starts, ends=ends, axes=axes, op_version=1)
1197
+ else:
1198
+ if container.target_opset == 10:
1199
+ op_version = 10
1200
+ else:
1201
+ op_version = 11
1202
+ inputs = input_name if isinstance(input_name, list) else [input_name]
1203
+ if isinstance(starts, str):
1204
+ starts_name = starts
1205
+ else:
1206
+ starts_name = self.get_unique_tensor_name('starts')
1207
+ container.add_initializer(starts_name, onnx_proto.TensorProto.INT64,
1208
+ [len(starts)], starts)
1209
+
1210
+ if isinstance(ends, str):
1211
+ ends_name = ends
1212
+ else:
1213
+ ends_name = self.get_unique_tensor_name('ends')
1214
+ container.add_initializer(ends_name, onnx_proto.TensorProto.INT64,
1215
+ [len(ends)], ends)
1216
+
1217
+ inputs.append(starts_name)
1218
+ inputs.append(ends_name)
1219
+ if axes:
1220
+ if isinstance(axes, str):
1221
+ axes_name = axes
1222
+ else:
1223
+ axes_name = self.get_unique_tensor_name('axes')
1224
+ container.add_initializer(axes_name, onnx_proto.TensorProto.INT64,
1225
+ [len(axes)], axes)
1226
+ inputs.append(axes_name)
1227
+ if steps:
1228
+ if not axes:
1229
+ inputs.append('')
1230
+ if isinstance(steps, str):
1231
+ steps_name = steps
1232
+ else:
1233
+ steps_name = self.get_unique_tensor_name('steps')
1234
+ container.add_initializer(steps_name, onnx_proto.TensorProto.INT64,
1235
+ [len(steps)], steps)
1236
+ inputs.append(steps_name)
1237
+ container.add_node('Slice', inputs, output_name, name=name,
1238
+ op_version=op_version)
1239
+ return output_name
1240
+
1241
+ def split(self, input_name, output_names, container, operator_name=None, split=None, axis=0):
1242
+ name = _create_name_or_use_existing_one(container, 'Split', operator_name)
1243
+ if container.target_opset <= 1:
1244
+ op_version = 1
1245
+ elif container.target_opset < 11:
1246
+ op_version = 2
1247
+ elif container.target_opset < 13:
1248
+ op_version = 11
1249
+ else:
1250
+ op_version = 13
1251
+
1252
+ attrs = {'name': name}
1253
+ if split is not None:
1254
+ if container.target_opset < 13:
1255
+ attrs['split'] = split
1256
+ else:
1257
+ if not isinstance(input_name, list):
1258
+ input_name = [input_name]
1259
+ if isinstance(split, str):
1260
+ split_name = split
1261
+ else:
1262
+ split_name = self.get_unique_tensor_name(name + '_split')
1263
+ container.add_initializer(split_name, onnx_proto.TensorProto.INT64, [len(split)], split)
1264
+ input_name = input_name + [split_name]
1265
+
1266
+ if axis is not None:
1267
+ attrs['axis'] = axis
1268
+
1269
+ container.add_node('Split', input_name, output_names, op_version=op_version, **attrs)
1270
+ return output_names
1271
+
1272
+ def sqrt(self, input_name, output_name, container, operator_name=None):
1273
+ self._apply_unary_operation('Sqrt', input_name, output_name, container, operator_name=operator_name)
1274
+ return output_name
1275
+
1276
+ def _apply_squeeze_unsqueeze(self, input_name, output_name, container, squeeze_str, operator_name=None, axes=None,
1277
+ rank=0):
1278
+ name = _create_name_or_use_existing_one(container, squeeze_str, operator_name)
1279
+ if container.target_opset < 13:
1280
+ if container.target_opset < 11:
1281
+ op_version = 1
1282
+ axes = [axis if axis >= 0 else axis + rank for axis in axes]
1283
+ else:
1284
+ op_version = 11
1285
+ container.add_node(squeeze_str, input_name, output_name, name=name, op_version=op_version, axes=axes)
1286
+ else:
1287
+ op_version = 13
1288
+ if not isinstance(input_name, list):
1289
+ input_name = [input_name]
1290
+ if isinstance(axes, str):
1291
+ container.add_node(squeeze_str, input_name + [axes], output_name, op_version=op_version, name=name)
1292
+ elif len(axes) == 0:
1293
+ container.add_node(squeeze_str, input_name, output_name, op_version=op_version, name=name)
1294
+ else:
1295
+ axes_name = self.get_unique_tensor_name(name + '_axes')
1296
+ container.add_initializer(axes_name, onnx_proto.TensorProto.INT64, [len(axes)], axes)
1297
+ container.add_node(squeeze_str, input_name + [axes_name], output_name, op_version=op_version, name=name)
1298
+ return output_name
1299
+
1300
+ def squeeze(self, input_name, output_name, container, operator_name=None, axes=None, rank=0):
1301
+ if axes is None:
1302
+ axes = []
1303
+ self._apply_squeeze_unsqueeze(input_name, output_name, container, 'Squeeze', operator_name, axes, rank)
1304
+ return output_name
1305
+
1306
+ def sub(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=0):
1307
+ self._apply_basic_numerical_operation('Sub', input_names, output_name, container, operator_name=operator_name,
1308
+ axis=axis, broadcast=broadcast)
1309
+ return output_name
1310
+
1311
+ def sum(self, input_names, output_name, container, operator_name=None):
1312
+ name = _create_name_or_use_existing_one(container, 'Sum', operator_name)
1313
+ if container.target_opset < 6:
1314
+ op_version = 1
1315
+ else:
1316
+ op_version = 6
1317
+ container.add_node('Sum', input_names, output_name, op_version=op_version, name=name)
1318
+ return output_name
1319
+
1320
+ def tanh(self, input_name, output_name, container, operator_name=None):
1321
+ self._apply_unary_operation('Tanh', input_name, output_name, container, operator_name)
1322
+ return output_name
1323
+
1324
+ def thresholded_relu(self, input_name, output_name, container, operator_name=None, alpha=None):
1325
+ if alpha is None:
1326
+ alpha = [1.0]
1327
+
1328
+ name = _create_name_or_use_existing_one(container, 'ThresholdedRelu', operator_name)
1329
+ attrs = {'name': name, 'alpha': alpha[0]}
1330
+ if container.target_opset < 10:
1331
+ # ThresholdedRelu graduated from an experimental op to a full op in opset 10
1332
+ # onnxruntime maintains support in the ONNX domain for ThresholdedRelu as a contrib op
1333
+ attrs['op_domain'] = "ai.onnx"
1334
+ op_version = 1
1335
+ else:
1336
+ op_version = 10
1337
+ container.add_node('ThresholdedRelu', input_name, output_name, op_version=op_version, **attrs)
1338
+ return output_name
1339
+
1340
+ def tile(self, input_name, output_name, container, operator_name=None, repeats=None):
1341
+ name = _create_name_or_use_existing_one(container, 'Tile', operator_name)
1342
+
1343
+ if repeats is None or (not isinstance(repeats, str) and all(repeat_count == 1 for repeat_count in repeats)):
1344
+ container.add_node('Identity', input_name, output_name, name=name)
1345
+ return output_name
1346
+
1347
+ if container.target_opset < 6:
1348
+ intermediate_input_name = input_name
1349
+ intermediate_output_name = None
1350
+ if isinstance(repeats, str):
1351
+ raise ValueError('repeats cannot be string type before opset 6')
1352
+
1353
+ for axis, repeat_count in enumerate(repeats):
1354
+ if repeat_count == 1:
1355
+ continue
1356
+
1357
+ # Create the 2nd input of Tile
1358
+ tile_tensor_name = self.get_unique_tensor_name(name + '_tile')
1359
+ container.add_initializer(tile_tensor_name, onnx_proto.TensorProto.FLOAT, [1], [float(repeat_count)])
1360
+
1361
+ # Create the 3rd input of Tile
1362
+ axis_tensor_name = self.get_unique_tensor_name(name + '_axis')
1363
+ container.add_initializer(axis_tensor_name, onnx_proto.TensorProto.FLOAT, [1], [float(axis)])
1364
+
1365
+ # Create tile for duplicating along one axis. After ONNX-1.2, we can duplicate along multiple axes,
1366
+ # so we don't have to iterate through all axes.
1367
+ intermediate_output_name = self.get_unique_tensor_name(name + '_input')
1368
+ container.add_node('Tile', [intermediate_input_name, tile_tensor_name, axis_tensor_name],
1369
+ intermediate_output_name, name=name)
1370
+
1371
+ # Use the output produced by this round as the input in the next iteration
1372
+ intermediate_input_name = intermediate_output_name
1373
+
1374
+ # Create a new name for next Tile
1375
+ name = container.get_unique_operator_name('Tile')
1376
+
1377
+ # Use the last Tile name for the name of an Identity
1378
+ container.add_node('Identity', intermediate_output_name, output_name, op_version=1, name=name)
1379
+ else:
1380
+ # ONNX-1.2 has a new Tile and we use it here
1381
+ if isinstance(repeats, str):
1382
+ container.add_node('Tile', input_name + [repeats], output_name, op_version=6, name=name)
1383
+ else:
1384
+ repeat_tensor_name = self.get_unique_tensor_name(name + '_repeats')
1385
+ container.add_initializer(repeat_tensor_name, onnx_proto.TensorProto.INT64, [len(repeats)], repeats)
1386
+ container.add_node('Tile', [input_name, repeat_tensor_name], output_name, op_version=6, name=name)
1387
+ return output_name
1388
+
1389
+ def topk(self, input_name, output_names, container, k, operator_name=None):
1390
+ name = _create_name_or_use_existing_one(container, 'TopK', operator_name)
1391
+
1392
+ if container.target_opset < 10:
1393
+ if isinstance(k, str):
1394
+ raise ValueError('topk k cannot be string type before opset 10')
1395
+ container.add_node('TopK', input_name, output_names, name=name, k=k, op_version=1)
1396
+ else:
1397
+ if container.target_opset == 10:
1398
+ op_version = 10
1399
+ else:
1400
+ op_version = 11
1401
+
1402
+ if isinstance(k, str):
1403
+ k_value_name = k
1404
+ else:
1405
+ k_value_name = self.get_unique_tensor_name('k_value')
1406
+ container.add_initializer(k_value_name, onnx_proto.TensorProto.INT64, [1], [k])
1407
+ container.add_node('TopK', input_name + [k_value_name], output_names, name=name, op_version=op_version)
1408
+ return output_names
1409
+
1410
+ def transpose(self, input_name, output_name, container, operator_name=None, perm=None):
1411
+ name = _create_name_or_use_existing_one(container, 'Transpose', operator_name)
1412
+ container.add_node('Transpose', input_name, output_name, name=name, perm=perm)
1413
+ return output_name
1414
+
1415
+ def upsample(self, input_name, output_name, container, operator_name=None, mode='nearest',
1416
+ coordinate_transformation_mode='asymmetric', scales=None):
1417
+ """
1418
+ :param input_name:
1419
+ :param output_name:
1420
+ :param container:
1421
+ :param operator_name:
1422
+ :param mode: nearest or linear
1423
+ :param coordinate_transformation_mode:
1424
+ :param scales: an integer list of scaling-up rate of all input dimensions
1425
+ :return:
1426
+ """
1427
+ if container.target_opset < 10:
1428
+ name = _create_name_or_use_existing_one(container, 'Upsample', operator_name)
1429
+ inputs = [input_name]
1430
+ attrs = {'name': name}
1431
+ if container.target_opset < 7:
1432
+ if len(scales) != 4:
1433
+ raise ValueError('Need to specify a 4-element list the the scales of N-, C-, H-, and W-axes')
1434
+ attrs['height_scale'] = float(scales[2])
1435
+ attrs['width_scale'] = float(scales[3])
1436
+ attrs['mode'] = mode.upper()
1437
+ op_version = 1
1438
+ else:
1439
+ attrs['mode'] = mode.lower()
1440
+ if container.target_opset < 9:
1441
+ attrs['scales'] = list(map(float, scales))
1442
+ op_version = 7
1443
+ else:
1444
+ # scales moved from attribute to input in opset 9
1445
+ scales_tensor_name = self.get_unique_tensor_name(name + '_scales')
1446
+ container.add_initializer(scales_tensor_name, onnx_proto.TensorProto.FLOAT, [len(scales)], scales)
1447
+ inputs = [input_name, scales_tensor_name]
1448
+ op_version = 9
1449
+
1450
+ container.add_node('Upsample', inputs, output_name, op_version=op_version, **attrs)
1451
+ else:
1452
+ # Upsample op is deprecated in ONNX opset 10
1453
+ # We implement Upsample through Resize instead
1454
+ self.resize(input_name, output_name, container, operator_name, mode, coordinate_transformation_mode,
1455
+ scales)
1456
+ return output_name
1457
+
1458
+ def unsqueeze(self, input_name, output_name, container, operator_name=None, axes=None, rank=0):
1459
+ if axes is None:
1460
+ axes = [0]
1461
+ self._apply_squeeze_unsqueeze(input_name, output_name, container, 'Unsqueeze', operator_name, axes, rank)
1462
+ return output_name
1463
+
1464
+ def where(self, input_names, output_names, container, operator_name=None):
1465
+ name = _create_name_or_use_existing_one(container, 'where', operator_name)
1466
+ container.add_node('Where', input_names, output_names, op_version=9, name=name)
1467
+ return output_names
1468
+
1469
+ def loop(self, input_names, output_names, container, operator_name=None, body=None):
1470
+ name = _create_name_or_use_existing_one(container, 'loop', operator_name)
1471
+ trip_count, cond, *states = tuple(input_names)
1472
+ trip_count = '' if trip_count is None else trip_count
1473
+ cond_name = '' if cond is None else cond
1474
+ container.add_node(
1475
+ 'Loop', [trip_count, cond_name] + states, output_names, op_version=11, name=name, body=body)
1476
+ return output_names
1477
+
1478
+ def model_call(self, input_name, output_name, container, operator_name=None, oxml=None):
1479
+ name = operator_name
1480
+ if name is None:
1481
+ name = container.get_unique_operator_name('og')
1482
+
1483
+ # The tensor name replacement happens on unfolding ONNX model.
1484
+ for idx, nm_ in enumerate(input_name):
1485
+ nvi = oxml.graph.input[idx]
1486
+ self.identity([nm_], ["{}_{}".format(name, nvi.name)], container)
1487
+ container.value_info.append(nvi)
1488
+ for idx, nm_ in enumerate(output_name):
1489
+ self.identity(["{}_{}".format(name, oxml.graph.output[idx].name)], [nm_], container)
1490
+ container.value_info.extend(oxml.graph.output)
1491
+ container.add_model_node(input_name, output_name, name=name, model=oxml)
1492
+ return output_name
1493
+
1494
+
1495
+ class _ONNXModelBuilder(_ONNXOperatorAPI):
1496
+ def __init__(self):
1497
+ self._id_count = 0
1498
+
1499
+ def get_unique_tensor_name(self, hint):
1500
+ self._id_count += 1
1501
+ return "v{}_{}".format(hint, str(self._id_count))
1502
+
1503
+ def make_tensor(self, dtype, dims, vals):
1504
+ return helper.make_tensor(self.get_unique_tensor_name('ts'), dtype, dims, vals)
1505
+
1506
+
1507
+ ox = _ONNXModelBuilder()