onnxruntime_extensions 0.14.0__cp313-cp313-macosx_11_0_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- onnxruntime_extensions/__init__.py +82 -0
- onnxruntime_extensions/_cuops.py +564 -0
- onnxruntime_extensions/_extensions_pydll.cpython-313-darwin.so +0 -0
- onnxruntime_extensions/_extensions_pydll.pyi +45 -0
- onnxruntime_extensions/_hf_cvt.py +331 -0
- onnxruntime_extensions/_ocos.py +133 -0
- onnxruntime_extensions/_ortapi2.py +274 -0
- onnxruntime_extensions/_torch_cvt.py +231 -0
- onnxruntime_extensions/_version.py +2 -0
- onnxruntime_extensions/cmd.py +66 -0
- onnxruntime_extensions/cvt.py +306 -0
- onnxruntime_extensions/onnxprocess/__init__.py +12 -0
- onnxruntime_extensions/onnxprocess/_builder.py +53 -0
- onnxruntime_extensions/onnxprocess/_onnx_ops.py +1507 -0
- onnxruntime_extensions/onnxprocess/_session.py +355 -0
- onnxruntime_extensions/onnxprocess/_tensor.py +628 -0
- onnxruntime_extensions/onnxprocess/torch_wrapper.py +31 -0
- onnxruntime_extensions/pnp/__init__.py +13 -0
- onnxruntime_extensions/pnp/_base.py +124 -0
- onnxruntime_extensions/pnp/_imagenet.py +65 -0
- onnxruntime_extensions/pnp/_nlp.py +148 -0
- onnxruntime_extensions/pnp/_onnx_ops.py +1544 -0
- onnxruntime_extensions/pnp/_torchext.py +310 -0
- onnxruntime_extensions/pnp/_unifier.py +45 -0
- onnxruntime_extensions/pnp/_utils.py +302 -0
- onnxruntime_extensions/pp_api.py +83 -0
- onnxruntime_extensions/tools/__init__.py +0 -0
- onnxruntime_extensions/tools/add_HuggingFace_CLIPImageProcessor_to_model.py +171 -0
- onnxruntime_extensions/tools/add_pre_post_processing_to_model.py +535 -0
- onnxruntime_extensions/tools/pre_post_processing/__init__.py +4 -0
- onnxruntime_extensions/tools/pre_post_processing/pre_post_processor.py +395 -0
- onnxruntime_extensions/tools/pre_post_processing/step.py +227 -0
- onnxruntime_extensions/tools/pre_post_processing/steps/__init__.py +6 -0
- onnxruntime_extensions/tools/pre_post_processing/steps/general.py +366 -0
- onnxruntime_extensions/tools/pre_post_processing/steps/nlp.py +344 -0
- onnxruntime_extensions/tools/pre_post_processing/steps/vision.py +1157 -0
- onnxruntime_extensions/tools/pre_post_processing/utils.py +139 -0
- onnxruntime_extensions/util.py +186 -0
- onnxruntime_extensions-0.14.0.dist-info/LICENSE +21 -0
- onnxruntime_extensions-0.14.0.dist-info/METADATA +102 -0
- onnxruntime_extensions-0.14.0.dist-info/RECORD +43 -0
- onnxruntime_extensions-0.14.0.dist-info/WHEEL +6 -0
- onnxruntime_extensions-0.14.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1544 @@
|
|
|
1
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
2
|
+
# Licensed under the MIT License. See License.txt in the project root for
|
|
3
|
+
# license information.
|
|
4
|
+
###############################################################################
|
|
5
|
+
import warnings
|
|
6
|
+
import numpy as np
|
|
7
|
+
from onnx import helper, defs as onnx_defs, onnx_pb as onnx_proto
|
|
8
|
+
from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
DEFAULT_OPSET_NUMBER = 13 # The maximum opset supported by the converter in the code branch.
|
|
12
|
+
# From https://github.com/onnx/onnx/blob/master/docs/Versioning.md
|
|
13
|
+
OPSET_TO_IR_VERSION = {
|
|
14
|
+
1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3,
|
|
15
|
+
7: 3, 8: 3, 9: 4, 10: 5, 11: 6, 12: 7,
|
|
16
|
+
13: 7, 14: 7, 15: 8, 16: 8, 17: 8
|
|
17
|
+
}
|
|
18
|
+
if hasattr(helper, 'VERSION_TABLE'):
|
|
19
|
+
OPSET_TO_IR_VERSION = {row[2]: row[1] for row in helper.VERSION_TABLE}
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _get_main_opset_version(model):
|
|
23
|
+
"""
|
|
24
|
+
Returns the main opset version.
|
|
25
|
+
"""
|
|
26
|
+
for op in model.opset_import:
|
|
27
|
+
if op.domain == '' or op.domain == 'ai.onnx':
|
|
28
|
+
return op.version
|
|
29
|
+
return None
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def onnx_builtin_opset_version():
|
|
33
|
+
return onnx_defs.onnx_opset_version()
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def get_maximum_opset_supported():
|
|
37
|
+
return min(DEFAULT_OPSET_NUMBER, onnx_builtin_opset_version())
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def make_model_ex(graph, imported_opset_pairs, target_default_opset, **kwargs):
|
|
41
|
+
onnx_model = helper.make_model(graph, **kwargs)
|
|
42
|
+
|
|
43
|
+
# Merge operator sets for the same domain, the largest version number would be kept
|
|
44
|
+
purified_operator_set = dict()
|
|
45
|
+
for op_domain, op_version in imported_opset_pairs:
|
|
46
|
+
if op_domain not in purified_operator_set:
|
|
47
|
+
if op_domain == '' or op_domain == 'ai.onnx':
|
|
48
|
+
# Initializers are a subset of graph inputs for IR_VERSION <= 3 (target opset < 8).
|
|
49
|
+
# Need upgrade opv since initializers are separate for IR_VERSION >= 4 to pass onnx.checker.
|
|
50
|
+
if op_version < 8 and target_default_opset is not None and target_default_opset >= 8:
|
|
51
|
+
op_version = 8
|
|
52
|
+
purified_operator_set[op_domain] = op_version
|
|
53
|
+
else:
|
|
54
|
+
purified_operator_set[op_domain] = max(purified_operator_set[op_domain], op_version)
|
|
55
|
+
|
|
56
|
+
# Fill operator sets
|
|
57
|
+
i = 0
|
|
58
|
+
for op_domain, op_version in purified_operator_set.items():
|
|
59
|
+
if i == 0 and len(onnx_model.opset_import) == 1:
|
|
60
|
+
# Overwrite the default operator set created by helper.make_model(...)
|
|
61
|
+
op_set = onnx_model.opset_import[0]
|
|
62
|
+
else:
|
|
63
|
+
# Just create one ONNX element in opset_import
|
|
64
|
+
op_set = onnx_model.opset_import.add()
|
|
65
|
+
op_set.domain = op_domain
|
|
66
|
+
op_set.version = op_version
|
|
67
|
+
i += 1
|
|
68
|
+
if op_domain == '' or op_domain == 'ai.onnx':
|
|
69
|
+
if target_default_opset < op_version:
|
|
70
|
+
raise RuntimeError(('The specified opset %d is too low to convert this model, ' +
|
|
71
|
+
'which requires at least opset %d.') % (target_default_opset, op_version))
|
|
72
|
+
elif target_default_opset > op_version:
|
|
73
|
+
warnings.warn('The maximum opset needed by this model is only %d.' % op_version)
|
|
74
|
+
else:
|
|
75
|
+
pass
|
|
76
|
+
|
|
77
|
+
opv = _get_main_opset_version(onnx_model) or target_default_opset
|
|
78
|
+
irv = OPSET_TO_IR_VERSION.get(opv, onnx_proto.IR_VERSION)
|
|
79
|
+
onnx_model.ir_version = irv
|
|
80
|
+
return onnx_model
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class _ONNXModelOperator:
|
|
84
|
+
def __init__(self, name, model, input, output):
|
|
85
|
+
self.name = name
|
|
86
|
+
self.model = model
|
|
87
|
+
self.input = input
|
|
88
|
+
self.output = output
|
|
89
|
+
|
|
90
|
+
def __repr__(self):
|
|
91
|
+
"""
|
|
92
|
+
without this method, it's too slow for the debugging.
|
|
93
|
+
:return:
|
|
94
|
+
"""
|
|
95
|
+
return "name: {}, input: {}, output: {}".format(self.name, self.input, self.output)
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def op_type(self):
|
|
99
|
+
return 'ModelOp'
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class ONNXElementContainer:
|
|
103
|
+
|
|
104
|
+
opdict_counter = {}
|
|
105
|
+
|
|
106
|
+
def __init__(self, target_opset, parent=None):
|
|
107
|
+
"""
|
|
108
|
+
:param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
|
|
109
|
+
"""
|
|
110
|
+
self.inputs = []
|
|
111
|
+
self.outputs = []
|
|
112
|
+
self.initializers = []
|
|
113
|
+
self.value_info = []
|
|
114
|
+
self.nodes = []
|
|
115
|
+
self.node_domain_version_pair_sets = set()
|
|
116
|
+
self.target_opset = target_opset
|
|
117
|
+
self.enable_optimizer = True
|
|
118
|
+
self.parent = parent
|
|
119
|
+
|
|
120
|
+
# the following property make this container be compatible with onnx.GraphProto
|
|
121
|
+
@property
|
|
122
|
+
def initializer(self):
|
|
123
|
+
return self.initializers
|
|
124
|
+
|
|
125
|
+
@property
|
|
126
|
+
def input(self):
|
|
127
|
+
return self.inputs
|
|
128
|
+
|
|
129
|
+
@property
|
|
130
|
+
def output(self):
|
|
131
|
+
return self.outputs
|
|
132
|
+
|
|
133
|
+
@staticmethod
|
|
134
|
+
def _make_value_info(variable):
|
|
135
|
+
value_info = helper.ValueInfoProto()
|
|
136
|
+
value_info.name = variable.full_name
|
|
137
|
+
value_info.type.CopyFrom(variable.type.to_onnx_type())
|
|
138
|
+
if variable.type.doc_string:
|
|
139
|
+
value_info.doc_string = variable.type.doc_string
|
|
140
|
+
return value_info
|
|
141
|
+
|
|
142
|
+
def add_input(self, variable):
|
|
143
|
+
"""
|
|
144
|
+
Add our Variable object defined _parser.py into the the input list of the final ONNX model
|
|
145
|
+
|
|
146
|
+
:param variable: The Variable object to be added
|
|
147
|
+
"""
|
|
148
|
+
self.inputs.append(self._make_value_info(variable))
|
|
149
|
+
|
|
150
|
+
def add_output(self, variable):
|
|
151
|
+
"""1
|
|
152
|
+
Add our Variable object defined _parser.py into the the output list of the final ONNX model
|
|
153
|
+
|
|
154
|
+
:param variable: The Variable object to be added
|
|
155
|
+
"""
|
|
156
|
+
self.outputs.append(self._make_value_info(variable))
|
|
157
|
+
|
|
158
|
+
def add_initializer(self, name, onnx_type, shape, content):
|
|
159
|
+
"""
|
|
160
|
+
Add a TensorProto into the initializer list of the final ONNX model
|
|
161
|
+
|
|
162
|
+
:param name: Variable name in the produced ONNX model.
|
|
163
|
+
:param onnx_type: Element types allowed in ONNX tensor, e.g., TensorProto.FLOAT and TensorProto.STRING.
|
|
164
|
+
:param shape: Tensor shape, a list of integers.
|
|
165
|
+
:param content: Flattened tensor values (i.e., a float list or a float array).
|
|
166
|
+
"""
|
|
167
|
+
if any(d is None for d in shape):
|
|
168
|
+
raise ValueError('Shape of initializer cannot contain None')
|
|
169
|
+
tensor = helper.make_tensor(name, onnx_type, shape, content)
|
|
170
|
+
self.initializers.append(tensor)
|
|
171
|
+
|
|
172
|
+
def add_value_info(self, variable):
|
|
173
|
+
self.value_info.append(self._make_value_info(variable))
|
|
174
|
+
|
|
175
|
+
def add_node(self, op_type, inputs, outputs, op_domain='', op_version=1, **attrs):
|
|
176
|
+
"""
|
|
177
|
+
Add a NodeProto into the node list of the final ONNX model. If the input operator's domain-version information
|
|
178
|
+
cannot be found in our domain-version pool (a Python set), we may add it.
|
|
179
|
+
|
|
180
|
+
:param op_type: A string (e.g., Pool and Conv) indicating the type of the NodeProto
|
|
181
|
+
:param inputs: A list of strings. They are the input variables' names of the considered NodeProto
|
|
182
|
+
:param outputs: A list of strings. They are the output variables' names of the considered NodeProto
|
|
183
|
+
:param op_domain: The domain name (e.g., ai.onnx.ml) of the operator we are trying to add.
|
|
184
|
+
:param op_version: The version number (e.g., 0 and 1) of the operator we are trying to add.
|
|
185
|
+
:param attrs: A Python dictionary. Keys and values are attributes' names and attributes' values, respectively.
|
|
186
|
+
"""
|
|
187
|
+
|
|
188
|
+
if isinstance(inputs, str):
|
|
189
|
+
inputs = [inputs]
|
|
190
|
+
if isinstance(outputs, str):
|
|
191
|
+
outputs = [outputs]
|
|
192
|
+
if not isinstance(inputs, (list, tuple)) or not all(isinstance(s, str) for s in inputs):
|
|
193
|
+
type_list = ','.join(list(str(type(s)) for s in inputs))
|
|
194
|
+
raise ValueError('Inputs must be a list of string but get [%s]' % type_list)
|
|
195
|
+
if not isinstance(outputs, (list, tuple)) or not all(isinstance(s, str) for s in outputs):
|
|
196
|
+
type_list = ','.join(list(str(type(s)) for s in outputs))
|
|
197
|
+
raise ValueError('Outputs must be a list of string but get [%s]' % type_list)
|
|
198
|
+
for k, v in attrs.items():
|
|
199
|
+
if v is None:
|
|
200
|
+
raise ValueError('Failed to create ONNX node. Undefined attribute pair (%s, %s) found' % (k, v))
|
|
201
|
+
|
|
202
|
+
node = helper.make_node(op_type, inputs, outputs, **attrs)
|
|
203
|
+
node.domain = op_domain
|
|
204
|
+
|
|
205
|
+
self.node_domain_version_pair_sets.add((op_domain, op_version))
|
|
206
|
+
self.nodes.append(node)
|
|
207
|
+
|
|
208
|
+
def add_model_node(self, inputs, outputs, name, model):
|
|
209
|
+
self.nodes.append(_ONNXModelOperator(name=name, model=model, input=inputs, output=outputs))
|
|
210
|
+
|
|
211
|
+
@classmethod
|
|
212
|
+
def get_unique_operator_name(cls, op_type: str):
|
|
213
|
+
name = op_type.lower()
|
|
214
|
+
nn = cls.opdict_counter.get(name, 0)
|
|
215
|
+
cls.opdict_counter[name] = nn + 1
|
|
216
|
+
return name if nn == 0 else "{}_{}".format(name, nn+1)
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def _create_name_or_use_existing_one(container, op_type, name):
|
|
220
|
+
return name or container.get_unique_operator_name(op_type)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
class _OpSchema:
|
|
224
|
+
_ox = None # will be assigned by ONNXModelBuilder.
|
|
225
|
+
|
|
226
|
+
def __init__(self, *args, **kwargs):
|
|
227
|
+
# self.op_builder = None
|
|
228
|
+
self.apply_fn = args[0]
|
|
229
|
+
self.inputs = kwargs['inputs'] if 'inputs' in kwargs else []
|
|
230
|
+
self.outputs = kwargs['outputs'] if 'outputs' in kwargs else []
|
|
231
|
+
|
|
232
|
+
def __call__(self, *args, **kwargs):
|
|
233
|
+
assert self._ox is not None, 'no builder instance was created'
|
|
234
|
+
return self.apply_fn(self._ox, *args, **kwargs)
|
|
235
|
+
|
|
236
|
+
# def __get__(self, instance, owner):
|
|
237
|
+
# if owner.__name__ == '_ONNXModelBuilder':
|
|
238
|
+
# self.op_builder = instance
|
|
239
|
+
# return self
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def schema(apply_fn=None, *args, **kwargs):
|
|
243
|
+
if apply_fn is None:
|
|
244
|
+
def wrapper(fn):
|
|
245
|
+
return _OpSchema(fn, *args, **kwargs)
|
|
246
|
+
return wrapper
|
|
247
|
+
else:
|
|
248
|
+
# used as a function.
|
|
249
|
+
return _OpSchema(apply_fn, *args, **kwargs)
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class _ONNXOperatorAPI:
|
|
253
|
+
_dt = onnx_proto.TensorProto
|
|
254
|
+
def get_unique_tensor_name(self, base): pass # implemented by the model builder
|
|
255
|
+
|
|
256
|
+
def _apply_unary_operation(self, op_type, input_name, output_name, container, operator_name, **attrs):
|
|
257
|
+
name = _create_name_or_use_existing_one(container, op_type, operator_name)
|
|
258
|
+
|
|
259
|
+
attrs['name'] = name
|
|
260
|
+
if container.target_opset < 6:
|
|
261
|
+
attrs['consumed_inputs'] = [0]
|
|
262
|
+
op_version = 1
|
|
263
|
+
else:
|
|
264
|
+
op_version = 6
|
|
265
|
+
|
|
266
|
+
container.add_node(op_type, input_name, output_name, op_version=op_version, **attrs)
|
|
267
|
+
|
|
268
|
+
def _apply_basic_numerical_operation(self, op_type, input_names, output_name, container, operator_name,
|
|
269
|
+
axis, broadcast):
|
|
270
|
+
name = _create_name_or_use_existing_one(container, op_type, operator_name)
|
|
271
|
+
|
|
272
|
+
attrs = {}
|
|
273
|
+
if container.target_opset < 7:
|
|
274
|
+
# Before ONNX-1.2 (opset 7), broadcasting behavior is Caffe2-like.
|
|
275
|
+
if axis is not None:
|
|
276
|
+
attrs['axis'] = axis
|
|
277
|
+
if broadcast is not None:
|
|
278
|
+
attrs['broadcast'] = broadcast
|
|
279
|
+
|
|
280
|
+
if container.target_opset < 6:
|
|
281
|
+
attrs['consumed_inputs'] = [0, 0]
|
|
282
|
+
op_version = 1
|
|
283
|
+
else:
|
|
284
|
+
op_version = 6
|
|
285
|
+
else:
|
|
286
|
+
# Since ONNX-1.2 (opset 7), broadcasting behavior is Numpy-like, so we don't need to specify any attributes
|
|
287
|
+
op_version = 7
|
|
288
|
+
|
|
289
|
+
container.add_node(op_type, input_names, output_name, op_version=op_version, name=name, **attrs)
|
|
290
|
+
|
|
291
|
+
def _apply_pointwise_operation(self, op_type, input_names, output_name, container, operator_name):
|
|
292
|
+
name = _create_name_or_use_existing_one(container, op_type, operator_name)
|
|
293
|
+
attrs = {}
|
|
294
|
+
|
|
295
|
+
if container.target_opset < 6:
|
|
296
|
+
attrs['consumed_inputs'] = [0] * len(input_names)
|
|
297
|
+
op_version = 1
|
|
298
|
+
elif container.target_opset < 8:
|
|
299
|
+
op_version = 6
|
|
300
|
+
else:
|
|
301
|
+
if container.target_opset < 12 or op_type == 'Mean':
|
|
302
|
+
op_version = 8
|
|
303
|
+
else:
|
|
304
|
+
op_version = 12
|
|
305
|
+
|
|
306
|
+
container.add_node(op_type, input_names, output_name, op_version=op_version, name=name, **attrs)
|
|
307
|
+
|
|
308
|
+
def abs(self, input_name, output_name, container, operator_name=None):
|
|
309
|
+
self._apply_unary_operation('Abs', input_name, output_name, container, operator_name=operator_name)
|
|
310
|
+
return output_name
|
|
311
|
+
|
|
312
|
+
def add(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):
|
|
313
|
+
self._apply_basic_numerical_operation('Add', input_names, output_name, container, operator_name=operator_name,
|
|
314
|
+
axis=axis, broadcast=broadcast)
|
|
315
|
+
return output_name
|
|
316
|
+
|
|
317
|
+
def argmax(self, input_name, output_name, container, operator_name=None, axis=0, keepdims=1,
|
|
318
|
+
select_last_index=0):
|
|
319
|
+
name = _create_name_or_use_existing_one(container, 'ArgMax', operator_name)
|
|
320
|
+
attrs = {'axis': axis, 'keepdims': keepdims}
|
|
321
|
+
if container.target_opset < 11:
|
|
322
|
+
op_version = 1
|
|
323
|
+
elif container.target_opset < 12:
|
|
324
|
+
op_version = 11
|
|
325
|
+
else:
|
|
326
|
+
op_version = 12
|
|
327
|
+
attrs['select_last_index'] = select_last_index
|
|
328
|
+
container.add_node('ArgMax', input_name, output_name, op_version=op_version, name=name, **attrs)
|
|
329
|
+
return output_name
|
|
330
|
+
|
|
331
|
+
def argmin(self, input_name, output_name, container, operator_name=None, axis=0, keepdims=1,
|
|
332
|
+
select_last_index=0):
|
|
333
|
+
name = _create_name_or_use_existing_one(container, 'ArgMin', operator_name)
|
|
334
|
+
attrs = {'axis': axis, 'keepdims': keepdims}
|
|
335
|
+
if container.target_opset < 11:
|
|
336
|
+
op_version = 1
|
|
337
|
+
elif container.target_opset < 12:
|
|
338
|
+
op_version = 11
|
|
339
|
+
else:
|
|
340
|
+
op_version = 12
|
|
341
|
+
attrs['select_last_index'] = select_last_index
|
|
342
|
+
container.add_node('ArgMin', input_name, output_name, op_version=op_version, name=name, **attrs)
|
|
343
|
+
return output_name
|
|
344
|
+
|
|
345
|
+
def affine(self, input_name, output_name, container, operator_name=None, alpha=1., beta=0.):
|
|
346
|
+
if container.target_opset < 9:
|
|
347
|
+
op_type = 'Affine'
|
|
348
|
+
name = _create_name_or_use_existing_one(container, 'Affine', operator_name)
|
|
349
|
+
attrs = {'name': name, 'alpha': alpha, 'beta': beta}
|
|
350
|
+
container.add_node(op_type, input_name, output_name, **attrs)
|
|
351
|
+
else:
|
|
352
|
+
name = _create_name_or_use_existing_one(container, 'Affine', operator_name)
|
|
353
|
+
# Define a and b.
|
|
354
|
+
aName = self.get_unique_tensor_name(name + '_alpha')
|
|
355
|
+
container.add_initializer(aName, onnx_proto.TensorProto.FLOAT, [1], [alpha])
|
|
356
|
+
bName = self.get_unique_tensor_name(name + '_beta')
|
|
357
|
+
container.add_initializer(bName, onnx_proto.TensorProto.FLOAT, [1], [beta])
|
|
358
|
+
|
|
359
|
+
# Compute Z = a * X, where X is the original input.
|
|
360
|
+
zName = self.get_unique_tensor_name(name + '_scaled')
|
|
361
|
+
self.mul([aName, input_name], zName, container)
|
|
362
|
+
|
|
363
|
+
# Compute Y = Z + b, where Y is the final output.
|
|
364
|
+
self.add(self, [zName, bName], output_name, container)
|
|
365
|
+
return output_name
|
|
366
|
+
|
|
367
|
+
def batch_norm(self, input_names, output_names, container, operator_name=None,
|
|
368
|
+
epsilon=None, is_test=None, momentum=None, spatial=None):
|
|
369
|
+
name = _create_name_or_use_existing_one(container, 'BatchNormalization', operator_name)
|
|
370
|
+
attrs = {'name': name, 'epsilon': epsilon, 'momentum': momentum}
|
|
371
|
+
|
|
372
|
+
if container.target_opset < 9:
|
|
373
|
+
attrs['spatial'] = spatial
|
|
374
|
+
if container.target_opset < 7:
|
|
375
|
+
attrs['is_test'] = is_test
|
|
376
|
+
|
|
377
|
+
if container.target_opset < 6:
|
|
378
|
+
attrs['consumed_inputs'] = [0] * len(input_names)
|
|
379
|
+
if len(input_names) > 3:
|
|
380
|
+
attrs['consumed_inputs'][3] = 1
|
|
381
|
+
if len(input_names) > 4:
|
|
382
|
+
attrs['consumed_inputs'][4] = 2
|
|
383
|
+
op_version = 1
|
|
384
|
+
elif container.target_opset < 7:
|
|
385
|
+
op_version = 6
|
|
386
|
+
elif container.target_opset < 9:
|
|
387
|
+
op_version = 7
|
|
388
|
+
else:
|
|
389
|
+
op_version = 9
|
|
390
|
+
|
|
391
|
+
container.add_node('BatchNormalization', input_names, output_names, op_version=op_version, **attrs)
|
|
392
|
+
return output_names
|
|
393
|
+
|
|
394
|
+
def cast(self, input_name, output_name, container, operator_name=None, to=None):
|
|
395
|
+
"""
|
|
396
|
+
:param to: enum defined in ONNX TensorProto.DataType, for example, TensorProto.FLOAT and TensorProto.INT64.
|
|
397
|
+
"""
|
|
398
|
+
name = _create_name_or_use_existing_one(container, 'Cast', operator_name)
|
|
399
|
+
attrs = {'name': name}
|
|
400
|
+
|
|
401
|
+
d = onnx_proto.TensorProto.DataType.DESCRIPTOR
|
|
402
|
+
allowed_type_name_and_type_enum_pairs = {v.number: k for k, v in d.values_by_name.items()}
|
|
403
|
+
if to not in allowed_type_name_and_type_enum_pairs:
|
|
404
|
+
raise ValueError('Attribute "to" must be one of %s' % allowed_type_name_and_type_enum_pairs.keys())
|
|
405
|
+
|
|
406
|
+
if container.target_opset < 9:
|
|
407
|
+
if to in [onnx_proto.TensorProto.STRING, onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]:
|
|
408
|
+
raise ValueError('Attribute "to" cannot correspond to a String or Complex TensorProto type.')
|
|
409
|
+
|
|
410
|
+
if container.target_opset < 6:
|
|
411
|
+
# Convert enum to string, for example, TensorProto.INT64 to 'INT64'
|
|
412
|
+
attrs['to'] = allowed_type_name_and_type_enum_pairs[to]
|
|
413
|
+
op_version = 1
|
|
414
|
+
else:
|
|
415
|
+
# Enum, for example, TensorProto.INT64
|
|
416
|
+
attrs['to'] = to
|
|
417
|
+
op_version = 6
|
|
418
|
+
else:
|
|
419
|
+
# Enum value, for example, TensorProto.INT64
|
|
420
|
+
# String casting is supported in opset 9
|
|
421
|
+
if to in [onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]:
|
|
422
|
+
raise ValueError('Attribute "to" cannot correspond to a Complex TensorProto type.')
|
|
423
|
+
attrs['to'] = to
|
|
424
|
+
op_version = 9
|
|
425
|
+
|
|
426
|
+
container.add_node('Cast', input_name, output_name, op_version=op_version, **attrs)
|
|
427
|
+
return output_name
|
|
428
|
+
|
|
429
|
+
def clip(self, input_name, output_name, container, operator_name=None, max=None, min=None):
|
|
430
|
+
name = _create_name_or_use_existing_one(container, 'Clip', operator_name)
|
|
431
|
+
attrs = {'name': name}
|
|
432
|
+
|
|
433
|
+
if container.target_opset < 11:
|
|
434
|
+
if max is not None:
|
|
435
|
+
attrs['max'] = float(max)
|
|
436
|
+
if min is not None:
|
|
437
|
+
attrs['min'] = float(min)
|
|
438
|
+
|
|
439
|
+
if container.target_opset < 6:
|
|
440
|
+
attrs['consumed_inputs'] = [0]
|
|
441
|
+
op_version = 1
|
|
442
|
+
else:
|
|
443
|
+
op_version = 6
|
|
444
|
+
|
|
445
|
+
container.add_node('Clip', input_name, output_name, op_version=op_version, **attrs)
|
|
446
|
+
else:
|
|
447
|
+
if container.target_opset < 12:
|
|
448
|
+
op_version = 11
|
|
449
|
+
else:
|
|
450
|
+
op_version = 12
|
|
451
|
+
if min is None and max is not None:
|
|
452
|
+
raise RuntimeError("Operator 'Clip': min must be specified if max is.")
|
|
453
|
+
inputs = [input_name]
|
|
454
|
+
|
|
455
|
+
if min is not None:
|
|
456
|
+
if isinstance(min, (np.ndarray, float, int)):
|
|
457
|
+
# add initializer
|
|
458
|
+
if isinstance(min, np.ndarray):
|
|
459
|
+
if len(min.shape) == 0:
|
|
460
|
+
min = [min]
|
|
461
|
+
elif min.shape == (1,):
|
|
462
|
+
min = list(min[0]) if hasattr(min[0], '__iter__') else list(min)
|
|
463
|
+
else:
|
|
464
|
+
raise RuntimeError("min must be an array of one element.")
|
|
465
|
+
else:
|
|
466
|
+
min = [min]
|
|
467
|
+
|
|
468
|
+
# container in sklearn-onnx stores the computation type in
|
|
469
|
+
# container.dtype.
|
|
470
|
+
min_name = self.get_unique_tensor_name('clip_min')
|
|
471
|
+
if op_version < 12:
|
|
472
|
+
min = np.array(min, dtype=getattr(container, 'dtype', np.float32))
|
|
473
|
+
container.add_initializer(min_name, getattr(container, 'proto_dtype',
|
|
474
|
+
onnx_proto.TensorProto.FLOAT), [], [min[0]])
|
|
475
|
+
else:
|
|
476
|
+
min = np.array(min)
|
|
477
|
+
container.add_initializer(min_name, NP_TYPE_TO_TENSOR_TYPE[min.dtype], [], [min[0]])
|
|
478
|
+
min = min_name
|
|
479
|
+
if isinstance(min, str):
|
|
480
|
+
inputs.append(min)
|
|
481
|
+
else:
|
|
482
|
+
raise RuntimeError("Parameter 'min' must be a string or a float.")
|
|
483
|
+
|
|
484
|
+
if max is not None:
|
|
485
|
+
if min is None:
|
|
486
|
+
raise RuntimeError("Parameter 'min' must be specified if 'max' is.")
|
|
487
|
+
if isinstance(max, (np.ndarray, float, int)):
|
|
488
|
+
# add initializer
|
|
489
|
+
if isinstance(max, np.ndarray):
|
|
490
|
+
if len(max.shape) == 0:
|
|
491
|
+
max = [max]
|
|
492
|
+
elif max.shape == (1,):
|
|
493
|
+
max = list(max[0]) if hasattr(max[0], '__iter__') else list(max)
|
|
494
|
+
else:
|
|
495
|
+
raise RuntimeError("max must be an array of one element.")
|
|
496
|
+
else:
|
|
497
|
+
max = [max]
|
|
498
|
+
|
|
499
|
+
max_name = self.get_unique_tensor_name('clip_max')
|
|
500
|
+
if op_version < 12:
|
|
501
|
+
max = np.array(max, dtype=getattr(container, 'dtype', np.float32))
|
|
502
|
+
container.add_initializer(max_name, getattr(container, 'proto_dtype',
|
|
503
|
+
onnx_proto.TensorProto.FLOAT), [], [max[0]])
|
|
504
|
+
else:
|
|
505
|
+
max = np.array(max)
|
|
506
|
+
container.add_initializer(max_name, NP_TYPE_TO_TENSOR_TYPE[max.dtype], [], [max[0]])
|
|
507
|
+
max = max_name
|
|
508
|
+
if isinstance(max, str):
|
|
509
|
+
inputs.append(max)
|
|
510
|
+
else:
|
|
511
|
+
raise RuntimeError("Parameter 'max' must be a string or a float.")
|
|
512
|
+
|
|
513
|
+
container.add_node('Clip', inputs, output_name, op_version=op_version,
|
|
514
|
+
**attrs)
|
|
515
|
+
return output_name
|
|
516
|
+
|
|
517
|
+
def concat(self, input_names, output_name, container, operator_name=None, axis=0):
|
|
518
|
+
name = _create_name_or_use_existing_one(container, 'Concat', operator_name)
|
|
519
|
+
|
|
520
|
+
if container.target_opset < 4:
|
|
521
|
+
op_version = 1
|
|
522
|
+
elif container.target_opset < 11:
|
|
523
|
+
op_version = 4
|
|
524
|
+
else:
|
|
525
|
+
op_version = 11
|
|
526
|
+
|
|
527
|
+
container.add_node('Concat', input_names, output_name, op_version=op_version, name=name, axis=axis)
|
|
528
|
+
return output_name
|
|
529
|
+
|
|
530
|
+
def concat_from_sequence(self, input_names, output_name, container, operator_name=None, axis=0, new_axis=None):
|
|
531
|
+
name = _create_name_or_use_existing_one(container, 'Concat', operator_name)
|
|
532
|
+
attrs = {'axis': axis}
|
|
533
|
+
if new_axis is not None:
|
|
534
|
+
attrs['new_axis'] = new_axis
|
|
535
|
+
container.add_node('ConcatFromSequence', input_names, output_name, op_version=11, name=name, **attrs)
|
|
536
|
+
return output_name
|
|
537
|
+
|
|
538
|
+
def constant(self, input_names, output_name, container, operator_name=None, value=None):
|
|
539
|
+
assert len(input_names) == 0 # only a placeholder to standardize the argument list.
|
|
540
|
+
name = _create_name_or_use_existing_one(container, 'Constant', operator_name)
|
|
541
|
+
|
|
542
|
+
if value is None:
|
|
543
|
+
raise ValueError('Attribute "value" is a required argument.')
|
|
544
|
+
|
|
545
|
+
if container.target_opset < 9:
|
|
546
|
+
op_version = 1
|
|
547
|
+
elif container.target_opset < 11:
|
|
548
|
+
op_version = 9
|
|
549
|
+
elif container.target_opset < 12:
|
|
550
|
+
op_version = 11
|
|
551
|
+
else:
|
|
552
|
+
op_version = 12
|
|
553
|
+
|
|
554
|
+
if op_version < 12:
|
|
555
|
+
attrs = {'name': name, 'value': value}
|
|
556
|
+
else:
|
|
557
|
+
if isinstance(value, float):
|
|
558
|
+
attrs = {'name': name, 'value_float': value}
|
|
559
|
+
elif isinstance(value, int):
|
|
560
|
+
attrs = {'name': name, 'value_int': value}
|
|
561
|
+
elif isinstance(value, str):
|
|
562
|
+
attrs = {'name': name, 'value_string': value}
|
|
563
|
+
else:
|
|
564
|
+
attrs = {'name': name, 'value': value}
|
|
565
|
+
|
|
566
|
+
container.add_node('Constant', [], output_name, op_version=op_version, **attrs)
|
|
567
|
+
return output_name
|
|
568
|
+
|
|
569
|
+
def constant_of_shape(self, input_names, output_name, container, operator_name=None, value=None):
|
|
570
|
+
attrs = {}
|
|
571
|
+
if value is not None:
|
|
572
|
+
attrs['value'] = value
|
|
573
|
+
name = _create_name_or_use_existing_one(container, 'ConstantOfShape', operator_name)
|
|
574
|
+
container.add_node('ConstantOfShape', input_names, output_name, name=name, op_version=9, **attrs)
|
|
575
|
+
return output_name
|
|
576
|
+
|
|
577
|
+
def conv(self, input_names, output_name, container, operator_name=None, **attrs):
|
|
578
|
+
name = _create_name_or_use_existing_one(container, 'Conv', operator_name)
|
|
579
|
+
|
|
580
|
+
if container.target_opset < 11:
|
|
581
|
+
op_version = 1
|
|
582
|
+
else:
|
|
583
|
+
op_version = 11
|
|
584
|
+
|
|
585
|
+
container.add_node('Conv', input_names, output_name, name=name, op_version=op_version, **attrs)
|
|
586
|
+
return output_name
|
|
587
|
+
|
|
588
|
+
def crop_height_width(self, input_name, output_name, container, operator_name=None,
|
|
589
|
+
top_border=0, bottom_border=0, left_border=0, right_border=0):
|
|
590
|
+
name = container.get_unique_operator_name('CropHeightWidth')
|
|
591
|
+
if container.target_opset < 9:
|
|
592
|
+
# If operator set < 9, we can use the experimental Crop in ONNX.
|
|
593
|
+
attrs = {'name': name, 'border': [left_border, top_border, right_border, bottom_border]}
|
|
594
|
+
container.add_node('Crop', input_name, output_name, **attrs)
|
|
595
|
+
else:
|
|
596
|
+
# The experimental Crop in ONNX is removed after operator set 9, so we
|
|
597
|
+
# switch to ONNX DynamicSlice operator.
|
|
598
|
+
|
|
599
|
+
# CoreML only crops H- and W-axes.
|
|
600
|
+
axes = [2, 3]
|
|
601
|
+
axes_name = self.get_unique_tensor_name(name + '_axes')
|
|
602
|
+
container.add_initializer(axes_name, onnx_proto.TensorProto.INT64,
|
|
603
|
+
[len(axes)], axes)
|
|
604
|
+
|
|
605
|
+
# Number of cropped pixels is the starting index of the remained region.
|
|
606
|
+
starts = [top_border, left_border]
|
|
607
|
+
starts_name = self.get_unique_tensor_name(name + '_starts')
|
|
608
|
+
container.add_initializer(starts_name, onnx_proto.TensorProto.INT64,
|
|
609
|
+
[len(starts)], starts)
|
|
610
|
+
|
|
611
|
+
# First we assume no cropping is needed at the end of those axes.
|
|
612
|
+
# We will change this right below depending on Crop's configuration.
|
|
613
|
+
ends = [np.iinfo(np.int64).max] * 2
|
|
614
|
+
|
|
615
|
+
# Crop n pixel means the end index (exclusive) is -n. Note that indexing
|
|
616
|
+
# system is zero-based.
|
|
617
|
+
if bottom_border > 0:
|
|
618
|
+
ends[0] = -bottom_border
|
|
619
|
+
if right_border > 0:
|
|
620
|
+
ends[1] = -right_border
|
|
621
|
+
|
|
622
|
+
# Add the adjusted ends.
|
|
623
|
+
ends_name = self.get_unique_tensor_name(name + '_ends')
|
|
624
|
+
container.add_initializer(ends_name, onnx_proto.TensorProto.INT64,
|
|
625
|
+
[len(ends)], ends)
|
|
626
|
+
|
|
627
|
+
# Collect all input names as a list because DynamicSlice has multiple inputs.
|
|
628
|
+
input_list = [input_name, starts_name, ends_name, axes_name]
|
|
629
|
+
container.add_node('DynamicSlice', input_list, output_name, op_version=9)
|
|
630
|
+
return output_name
|
|
631
|
+
|
|
632
|
+
def cumsum(self, input_names, output_names, container, operator_name=None, axis=None):
|
|
633
|
+
name = _create_name_or_use_existing_one(container, 'cumsum', operator_name)
|
|
634
|
+
assert axis is not None, "Axis in Op CumSum must be provided."
|
|
635
|
+
axis_name = self.get_unique_tensor_name(name+'_dim')
|
|
636
|
+
container.add_initializer(axis_name,
|
|
637
|
+
onnx_proto.TensorProto.INT64,
|
|
638
|
+
[1], [axis])
|
|
639
|
+
container.add_node('CumSum', input_names + [axis_name], output_names, op_version=11, name=name)
|
|
640
|
+
return output_names
|
|
641
|
+
|
|
642
|
+
def div(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):
|
|
643
|
+
self._apply_basic_numerical_operation('Div', input_names, output_name,
|
|
644
|
+
container, operator_name,
|
|
645
|
+
axis, broadcast)
|
|
646
|
+
return output_name
|
|
647
|
+
|
|
648
|
+
def elu(self, input_name, output_name, container, operator_name=None, alpha=1.0):
|
|
649
|
+
self._apply_unary_operation('Elu', input_name, output_name, container, operator_name, alpha=alpha)
|
|
650
|
+
return output_name
|
|
651
|
+
|
|
652
|
+
def equal(self, input_names, output_name, container, operator_name=None):
|
|
653
|
+
name = _create_name_or_use_existing_one(container, 'equal', operator_name)
|
|
654
|
+
if container.target_opset < 7:
|
|
655
|
+
op_version = 1
|
|
656
|
+
elif container.target_opset < 9:
|
|
657
|
+
op_version = 7
|
|
658
|
+
else:
|
|
659
|
+
op_version = 9
|
|
660
|
+
container.add_node('Equal', input_names, output_name, name=name, op_version=op_version)
|
|
661
|
+
return output_name
|
|
662
|
+
|
|
663
|
+
def exp(self, input_name, output_name, container, operator_name=None):
|
|
664
|
+
self._apply_unary_operation('Exp', input_name, output_name, container, operator_name=operator_name)
|
|
665
|
+
return output_name
|
|
666
|
+
|
|
667
|
+
def floor(self, input_name, output_name, container, operator_name=None):
|
|
668
|
+
self._apply_unary_operation('Floor', input_name, output_name, container, operator_name=operator_name)
|
|
669
|
+
return output_name
|
|
670
|
+
|
|
671
|
+
def flatten(self, input_name, output_name, container, operator_name=None, axis=1):
|
|
672
|
+
name = _create_name_or_use_existing_one(container, 'Flatten', operator_name)
|
|
673
|
+
if container.target_opset < 9:
|
|
674
|
+
op_version = 1
|
|
675
|
+
elif container.target_opset < 11:
|
|
676
|
+
op_version = 9
|
|
677
|
+
else:
|
|
678
|
+
op_version = 11
|
|
679
|
+
container.add_node('Flatten', input_name, output_name, name=name, op_version=op_version, axis=axis)
|
|
680
|
+
return output_name
|
|
681
|
+
|
|
682
|
+
def gather(self, input_names, output_name, container, operator_name=None, axis=0):
|
|
683
|
+
name = _create_name_or_use_existing_one(container, 'Gather', operator_name)
|
|
684
|
+
if container.target_opset < 11:
|
|
685
|
+
op_version = 1
|
|
686
|
+
else:
|
|
687
|
+
op_version = 11
|
|
688
|
+
|
|
689
|
+
container.add_node('Gather', input_names, output_name, name=name, op_version=op_version, axis=axis)
|
|
690
|
+
return output_name
|
|
691
|
+
|
|
692
|
+
def gemm(self, input_name, output_name, container, operator_name=None, alpha=1.0, beta=1.0,
|
|
693
|
+
transA=0, transB=0):
|
|
694
|
+
"""
|
|
695
|
+
Applies operator `gemm <https://github.com/onnx/onnx/blob/master/docs/Operators.md#gemm>`.
|
|
696
|
+
"""
|
|
697
|
+
name = _create_name_or_use_existing_one(container, 'Gemm', operator_name)
|
|
698
|
+
attrs = {'alpha': alpha, 'beta': beta, 'transA': transA, 'transB': transB}
|
|
699
|
+
if container.target_opset < 5:
|
|
700
|
+
attrs['op_version'] = 1
|
|
701
|
+
attrs['broadcast'] = 1
|
|
702
|
+
elif container.target_opset < 7:
|
|
703
|
+
attrs['op_version'] = 6
|
|
704
|
+
attrs['broadcast'] = 1
|
|
705
|
+
elif container.target_opset < 11:
|
|
706
|
+
attrs['op_version'] = 7
|
|
707
|
+
else:
|
|
708
|
+
attrs['op_version'] = 11
|
|
709
|
+
|
|
710
|
+
container.add_node('Gemm', input_name, output_name, name=name, **attrs)
|
|
711
|
+
return output_name
|
|
712
|
+
|
|
713
|
+
@schema(outputs=((_dt.BOOL, []), ),)
|
|
714
|
+
def greater(self, input_names, output_name, container, operator_name=None):
|
|
715
|
+
name = _create_name_or_use_existing_one(container, 'Greater', operator_name)
|
|
716
|
+
if container.target_opset < 7:
|
|
717
|
+
op_version = 1
|
|
718
|
+
elif container.target_opset < 9:
|
|
719
|
+
op_version = 7
|
|
720
|
+
else:
|
|
721
|
+
op_version = 9
|
|
722
|
+
|
|
723
|
+
container.add_node('Greater', input_names, output_name, name=name, op_version=op_version)
|
|
724
|
+
return output_name
|
|
725
|
+
|
|
726
|
+
def _apply_convert_compare_equal(self, input_names, output_name, container, operator_name,
|
|
727
|
+
tf_op_string, onnx_op_string_rev, onnx_op_string):
|
|
728
|
+
if container.target_opset < 7:
|
|
729
|
+
raise ValueError(tf_op_string + " op is not supported for opset < 7")
|
|
730
|
+
elif container.target_opset < 9:
|
|
731
|
+
op_version = 7
|
|
732
|
+
elif container.target_opset < 12:
|
|
733
|
+
op_version = 9
|
|
734
|
+
else:
|
|
735
|
+
op_version = 12
|
|
736
|
+
name = _create_name_or_use_existing_one(container, tf_op_string, operator_name)
|
|
737
|
+
if op_version < 9:
|
|
738
|
+
compare_input_0 = self.get_unique_tensor_name(name + '_input_0_cast')
|
|
739
|
+
container.add_node('Cast', [input_names[0]], compare_input_0, name=name + '_input_0_cast', to=1)
|
|
740
|
+
compare_input_1 = self.get_unique_tensor_name(name + '_input_1_cast')
|
|
741
|
+
container.add_node('Cast', [input_names[1]], compare_input_1, name=name + '_input_1_cast', to=1)
|
|
742
|
+
less_out = self.get_unique_tensor_name(name + '_less_out')
|
|
743
|
+
container.add_node(onnx_op_string_rev, [compare_input_0, compare_input_1], less_out,
|
|
744
|
+
name=name + '_' + onnx_op_string_rev.lower(),
|
|
745
|
+
op_version=op_version)
|
|
746
|
+
container.add_node('Not', less_out, output_name, name=name + '_not')
|
|
747
|
+
elif op_version < 12:
|
|
748
|
+
compare_node = self.get_unique_tensor_name(name + '_compare_node')
|
|
749
|
+
container.add_node(onnx_op_string_rev, input_names, compare_node,
|
|
750
|
+
name=name + '_' + onnx_op_string_rev.lower(),
|
|
751
|
+
op_version=op_version)
|
|
752
|
+
container.add_node('Not', [compare_node], output_name, name=name)
|
|
753
|
+
else:
|
|
754
|
+
container.add_node(onnx_op_string, input_names, output_name,
|
|
755
|
+
name=name + '_' + onnx_op_string_rev.lower(), op_version=op_version)
|
|
756
|
+
|
|
757
|
+
def greater_or_equal(self, input_names, output_name, container, operator_name=None):
|
|
758
|
+
self._apply_convert_compare_equal(input_names, output_name, container, operator_name,
|
|
759
|
+
'GreaterEqual', 'Less', 'GreaterOrEqual')
|
|
760
|
+
return output_name
|
|
761
|
+
|
|
762
|
+
def less_or_equal(self, input_names, output_name, container, operator_name=None):
|
|
763
|
+
self._apply_convert_compare_equal(input_names, output_name, container,
|
|
764
|
+
operator_name, 'LessEqual', 'Greater', 'LessOrEqual')
|
|
765
|
+
return output_name
|
|
766
|
+
|
|
767
|
+
def gru(self, input_names, output_names, container, operator_name=None, output_seq=0, reset_after=0, **attrs):
|
|
768
|
+
name = _create_name_or_use_existing_one(container, 'GRU', operator_name)
|
|
769
|
+
if container.target_opset < 3:
|
|
770
|
+
op_version = 1
|
|
771
|
+
attrs['output_sequence'] = 1 if output_seq else 0
|
|
772
|
+
else:
|
|
773
|
+
attrs['linear_before_reset'] = 1 if reset_after else 0
|
|
774
|
+
if container.target_opset <= 5:
|
|
775
|
+
attrs['output_sequence'] = 1 if output_seq else 0
|
|
776
|
+
op_version = 3
|
|
777
|
+
else:
|
|
778
|
+
op_version = 7
|
|
779
|
+
|
|
780
|
+
container.add_node('GRU', input_names, output_names, name=name, op_version=op_version, **attrs)
|
|
781
|
+
return output_names
|
|
782
|
+
|
|
783
|
+
def hard_sigmoid(self, input_name, output_name, container, operator_name=None, alpha=None, beta=None):
|
|
784
|
+
self._apply_unary_operation('HardSigmoid', input_name, output_name, container, operator_name,
|
|
785
|
+
alpha=alpha, beta=beta)
|
|
786
|
+
return output_name
|
|
787
|
+
|
|
788
|
+
def identity(self, input_name, output_name, container, operator_name=None):
|
|
789
|
+
name = _create_name_or_use_existing_one(container, 'Identity', operator_name)
|
|
790
|
+
container.add_node('Identity', input_name, output_name, name=name)
|
|
791
|
+
return output_name
|
|
792
|
+
|
|
793
|
+
def instance_norm(self, input_names, output_name, container, operator_name=None, epsilon=1e-5):
|
|
794
|
+
name = _create_name_or_use_existing_one(container, 'InstanceNormalization', operator_name)
|
|
795
|
+
attrs = {'name': name, 'epsilon': epsilon}
|
|
796
|
+
|
|
797
|
+
if container.target_opset < 2:
|
|
798
|
+
attrs['consumed_inputs'] = [0] * len(input_names)
|
|
799
|
+
op_version = 1
|
|
800
|
+
else:
|
|
801
|
+
op_version = 6
|
|
802
|
+
|
|
803
|
+
container.add_node('InstanceNormalization', input_names, output_name, op_version=op_version, **attrs)
|
|
804
|
+
return output_name
|
|
805
|
+
|
|
806
|
+
def leaky_relu(self, input_name, output_name, container, operator_name=None, alpha=0.01):
|
|
807
|
+
self._apply_unary_operation('LeakyRelu', input_name, output_name, container, operator_name, alpha=alpha)
|
|
808
|
+
return output_name
|
|
809
|
+
|
|
810
|
+
def less(self, input_names, output_name, container, operator_name=None):
|
|
811
|
+
name = _create_name_or_use_existing_one(container, 'Less', operator_name)
|
|
812
|
+
if container.target_opset < 7:
|
|
813
|
+
op_version = 1
|
|
814
|
+
elif container.target_opset < 9:
|
|
815
|
+
op_version = 7
|
|
816
|
+
else:
|
|
817
|
+
op_version = 9
|
|
818
|
+
|
|
819
|
+
container.add_node('Less', input_names, output_name, name=name, op_version=op_version)
|
|
820
|
+
return output_name
|
|
821
|
+
|
|
822
|
+
def log(self, input_name, output_name, container, operator_name=None):
|
|
823
|
+
self._apply_unary_operation('Log', input_name, output_name, container, operator_name=operator_name)
|
|
824
|
+
return output_name
|
|
825
|
+
|
|
826
|
+
def lstm(self, input_names, output_names, container, operator_name=None, output_seq=0, **attrs):
|
|
827
|
+
name = _create_name_or_use_existing_one(container, 'LSTM', operator_name)
|
|
828
|
+
if container.target_opset <= 6:
|
|
829
|
+
attrs['output_sequence'] = 1 if output_seq else 0
|
|
830
|
+
op_version = 1
|
|
831
|
+
else:
|
|
832
|
+
op_version = 7
|
|
833
|
+
container.add_node('LSTM', input_names, output_names, name=name, op_version=op_version, **attrs)
|
|
834
|
+
return output_names
|
|
835
|
+
|
|
836
|
+
def matmul(self, input_names, output_name, container, operator_name=None):
|
|
837
|
+
op_type = 'MatMul'
|
|
838
|
+
name = _create_name_or_use_existing_one(container, op_type, operator_name)
|
|
839
|
+
if container.target_opset <= 9:
|
|
840
|
+
op_version = 1
|
|
841
|
+
else:
|
|
842
|
+
op_version = 9
|
|
843
|
+
container.add_node(op_type, input_names, output_name, op_version=op_version, name=name)
|
|
844
|
+
return output_name
|
|
845
|
+
|
|
846
|
+
def max(self, input_names, output_name, container, operator_name=None):
|
|
847
|
+
self._apply_pointwise_operation('Max', input_names, output_name, container, operator_name)
|
|
848
|
+
return output_name
|
|
849
|
+
|
|
850
|
+
def mean(self, input_names, output_name, container, operator_name=None):
|
|
851
|
+
self._apply_pointwise_operation('Mean', input_names, output_name, container, operator_name)
|
|
852
|
+
return output_name
|
|
853
|
+
|
|
854
|
+
def min(self, input_names, output_name, container, operator_name=None):
|
|
855
|
+
self._apply_pointwise_operation('Min', input_names, output_name, container, operator_name)
|
|
856
|
+
return output_name
|
|
857
|
+
|
|
858
|
+
def mul(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):
|
|
859
|
+
self._apply_basic_numerical_operation('Mul', input_names, output_name,
|
|
860
|
+
container, operator_name=operator_name,
|
|
861
|
+
axis=axis, broadcast=broadcast)
|
|
862
|
+
return output_name
|
|
863
|
+
|
|
864
|
+
def neg(self, input_name, output_name, container, operator_name=None):
|
|
865
|
+
self._apply_unary_operation('Neg', input_name, output_name, container, operator_name)
|
|
866
|
+
return output_name
|
|
867
|
+
|
|
868
|
+
def lpnormalization(self, input_name, output_name, container, operator_name=None, axis=1, p=2):
|
|
869
|
+
name = _create_name_or_use_existing_one(container, 'LpNormalization', operator_name)
|
|
870
|
+
container.add_node('LpNormalization', input_name, output_name, name=name, p=p, axis=axis)
|
|
871
|
+
return output_name
|
|
872
|
+
|
|
873
|
+
def not_op(self, input_name, output_name, container, operator_name=None):
|
|
874
|
+
self._apply_unary_operation('Not', input_name, output_name, container, operator_name)
|
|
875
|
+
return output_name
|
|
876
|
+
|
|
877
|
+
def or_op(self, input_names, output_names, container, operator_name=None):
|
|
878
|
+
name = _create_name_or_use_existing_one(container, 'or', operator_name)
|
|
879
|
+
container.add_node('Or', input_names, output_names, op_version=7, name=name)
|
|
880
|
+
return output_names
|
|
881
|
+
|
|
882
|
+
def pad(self, input_name, output_name, container, operator_name=None, mode=None, pads=None, value=None,
|
|
883
|
+
onnx_type=onnx_proto.TensorProto.FLOAT):
|
|
884
|
+
name = _create_name_or_use_existing_one(container, 'Pad', operator_name)
|
|
885
|
+
attrs = {'name': name}
|
|
886
|
+
inputs = input_name if isinstance(input_name, list) else [input_name]
|
|
887
|
+
|
|
888
|
+
if mode is not None:
|
|
889
|
+
attrs['mode'] = mode
|
|
890
|
+
|
|
891
|
+
if container.target_opset < 11:
|
|
892
|
+
if isinstance(pads, str):
|
|
893
|
+
raise ValueError("Dynamic pad is not supported for opset < 11.")
|
|
894
|
+
if value is not None:
|
|
895
|
+
attrs['value'] = value
|
|
896
|
+
if container.target_opset < 2:
|
|
897
|
+
attrs['paddings'] = pads
|
|
898
|
+
op_version = 1
|
|
899
|
+
else:
|
|
900
|
+
attrs['pads'] = pads
|
|
901
|
+
op_version = 2
|
|
902
|
+
else:
|
|
903
|
+
op_version = 11
|
|
904
|
+
if isinstance(pads, str):
|
|
905
|
+
inputs.append(pads)
|
|
906
|
+
else:
|
|
907
|
+
pads_name = self.get_unique_tensor_name(name + '_pads')
|
|
908
|
+
container.add_initializer(pads_name, onnx_proto.TensorProto.INT64, [len(pads)], pads)
|
|
909
|
+
inputs.append(pads_name)
|
|
910
|
+
if value is not None:
|
|
911
|
+
value_name = self.get_unique_tensor_name(name + '_value')
|
|
912
|
+
container.add_initializer(value_name, onnx_type, [], [value])
|
|
913
|
+
inputs.append(value_name)
|
|
914
|
+
|
|
915
|
+
container.add_node('Pad', inputs, output_name, op_version=op_version, **attrs)
|
|
916
|
+
return output_name
|
|
917
|
+
|
|
918
|
+
def parametric_softplus(self, input_name, output_name, container, operator_name=None, alpha=None, beta=None):
|
|
919
|
+
if alpha is None:
|
|
920
|
+
alpha = [1.0]
|
|
921
|
+
if beta is None:
|
|
922
|
+
beta = [0.]
|
|
923
|
+
|
|
924
|
+
name = _create_name_or_use_existing_one(container, 'ParametricSoftplus', operator_name)
|
|
925
|
+
if container.target_opset < 9:
|
|
926
|
+
if len(alpha) != 1 or len(beta) != 1:
|
|
927
|
+
raise ValueError('alpha and beta must be 1-element lists')
|
|
928
|
+
op_type = 'ParametricSoftplus'
|
|
929
|
+
attrs = {'name': name, 'alpha': alpha[0], 'beta': beta[0]}
|
|
930
|
+
container.add_node(op_type, input_name, output_name, **attrs)
|
|
931
|
+
else:
|
|
932
|
+
# Define three scalars: a, b, 1.
|
|
933
|
+
aName = self.get_unique_tensor_name(name + '_alpha')
|
|
934
|
+
aShape = [len(alpha)] if len(alpha) == 1 else [len(alpha), 1, 1]
|
|
935
|
+
container.add_initializer(aName, onnx_proto.TensorProto.FLOAT, aShape, alpha)
|
|
936
|
+
bShape = [len(beta)] if len(beta) == 1 else [len(beta), 1, 1]
|
|
937
|
+
bName = self.get_unique_tensor_name(name + '_beta')
|
|
938
|
+
container.add_initializer(bName, onnx_proto.TensorProto.FLOAT, bShape, beta)
|
|
939
|
+
oneName = self.get_unique_tensor_name(name + '_one')
|
|
940
|
+
container.add_initializer(oneName, onnx_proto.TensorProto.FLOAT, [1], [1.])
|
|
941
|
+
|
|
942
|
+
# c = b * x
|
|
943
|
+
cName = self.get_unique_tensor_name(name + '_c')
|
|
944
|
+
self.mul([input_name, bName], cName, container)
|
|
945
|
+
|
|
946
|
+
# d = exp(c)
|
|
947
|
+
dName = self.get_unique_tensor_name(name + '_d')
|
|
948
|
+
self.exp(cName, dName, container)
|
|
949
|
+
|
|
950
|
+
# e = 1 + d
|
|
951
|
+
eName = self.get_unique_tensor_name(name + '_e')
|
|
952
|
+
self.add([dName, oneName], eName, container)
|
|
953
|
+
|
|
954
|
+
# f = log(e)
|
|
955
|
+
fName = self.get_unique_tensor_name(name + '_f')
|
|
956
|
+
self.log(eName, fName, container)
|
|
957
|
+
|
|
958
|
+
# g = a * f
|
|
959
|
+
self.mul([fName, aName], output_name, container)
|
|
960
|
+
return output_name
|
|
961
|
+
|
|
962
|
+
def pow(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):
|
|
963
|
+
name = _create_name_or_use_existing_one(container, 'Pow', operator_name)
|
|
964
|
+
|
|
965
|
+
attrs = {'name': name}
|
|
966
|
+
if container.target_opset < 7:
|
|
967
|
+
# Before ONNX-1.2, broadcasting behavior is Caffe2-like.
|
|
968
|
+
if axis is not None:
|
|
969
|
+
attrs['axis'] = axis
|
|
970
|
+
if broadcast is not None:
|
|
971
|
+
attrs['broadcast'] = broadcast
|
|
972
|
+
op_version = 1
|
|
973
|
+
elif container.target_opset < 12:
|
|
974
|
+
# Since ONNX-1.2, broadcasting behavior is Numpy-like, so we don't need to specify any attributes
|
|
975
|
+
op_version = 7
|
|
976
|
+
else:
|
|
977
|
+
op_version = 12
|
|
978
|
+
|
|
979
|
+
container.add_node('Pow', input_names, output_name, op_version=op_version, **attrs)
|
|
980
|
+
return output_name
|
|
981
|
+
|
|
982
|
+
def prelu(self, input_name, output_name, container, operator_name=None, slp_rate=None):
|
|
983
|
+
name = _create_name_or_use_existing_one(container, 'PRelu', operator_name)
|
|
984
|
+
slp_rate_tensor_name = self.get_unique_tensor_name('slp_rate')
|
|
985
|
+
s_shape = slp_rate.shape
|
|
986
|
+
if container.target_opset < 7:
|
|
987
|
+
s_shape = [len(slp_rate.flatten())]
|
|
988
|
+
container.add_initializer(slp_rate_tensor_name, onnx_proto.TensorProto.FLOAT, s_shape, slp_rate.flatten())
|
|
989
|
+
|
|
990
|
+
if container.target_opset < 6:
|
|
991
|
+
container.add_node('PRelu', [input_name, slp_rate_tensor_name], output_name, op_version=1, name=name,
|
|
992
|
+
consumed_inputs=[0, 0])
|
|
993
|
+
else:
|
|
994
|
+
if container.target_opset < 7:
|
|
995
|
+
op_version = 6
|
|
996
|
+
elif container.target_opset < 9:
|
|
997
|
+
op_version = 7
|
|
998
|
+
else:
|
|
999
|
+
# opset 9 supports unidirectional broadcasting
|
|
1000
|
+
op_version = 9
|
|
1001
|
+
|
|
1002
|
+
container.add_node('PRelu', [input_name, slp_rate_tensor_name], output_name, op_version=op_version, name=name)
|
|
1003
|
+
return output_name
|
|
1004
|
+
|
|
1005
|
+
def range(self, input_name, output_name, container, operator_name=None):
|
|
1006
|
+
name = _create_name_or_use_existing_one(container, 'Range', operator_name)
|
|
1007
|
+
container.add_node('Range', input_name, output_name, op_version=11, name=name)
|
|
1008
|
+
return output_name
|
|
1009
|
+
|
|
1010
|
+
def reciprocal(self, input_name, output_name, container, operator_name=None):
|
|
1011
|
+
self._apply_unary_operation('Reciprocal', input_name, output_name, container, operator_name=operator_name)
|
|
1012
|
+
return output_name
|
|
1013
|
+
|
|
1014
|
+
# Some old ORT supports axis < 0 case, so put rank=0 as default.
|
|
1015
|
+
def reducesum(self, input_name, output_name, container, operator_name=None, axes=None, keepdims=1, rank=0):
|
|
1016
|
+
name = _create_name_or_use_existing_one(container, 'ReduceSum', operator_name)
|
|
1017
|
+
if axes is None:
|
|
1018
|
+
axes = []
|
|
1019
|
+
if container.target_opset < 13:
|
|
1020
|
+
if container.target_opset < 11:
|
|
1021
|
+
op_version = 1
|
|
1022
|
+
axes = [axis if axis >= 0 else axis + rank for axis in axes]
|
|
1023
|
+
else:
|
|
1024
|
+
op_version = 11
|
|
1025
|
+
container.add_node('ReduceSum', input_name, output_name, name=name,
|
|
1026
|
+
op_version=op_version, axes=axes, keepdims=keepdims)
|
|
1027
|
+
else:
|
|
1028
|
+
if not isinstance(input_name, list):
|
|
1029
|
+
input_name = [input_name]
|
|
1030
|
+
op_version = 13
|
|
1031
|
+
if isinstance(axes, str):
|
|
1032
|
+
container.add_node('ReduceSum', input_name + [axes], output_name,
|
|
1033
|
+
op_version=op_version, name=name, keepdims=keepdims)
|
|
1034
|
+
elif axes is None or len(axes) == 0:
|
|
1035
|
+
container.add_node('ReduceSum', input_name, output_name,
|
|
1036
|
+
op_version=op_version, name=name, keepdims=keepdims)
|
|
1037
|
+
else:
|
|
1038
|
+
axes_name = self.get_unique_tensor_name(name + '_reducesum')
|
|
1039
|
+
container.add_initializer(axes_name, onnx_proto.TensorProto.INT64, [len(axes)], axes)
|
|
1040
|
+
container.add_node('ReduceSum', input_name + [axes_name], output_name,
|
|
1041
|
+
op_version=op_version, name=name, keepdims=keepdims)
|
|
1042
|
+
return output_name
|
|
1043
|
+
|
|
1044
|
+
def reducemin(self, input_name, output_name, container, operator_name=None, axes=None, keepdims=1, rank=0):
|
|
1045
|
+
name = _create_name_or_use_existing_one(container, 'ReduceMin', operator_name)
|
|
1046
|
+
if axes is None:
|
|
1047
|
+
axes = []
|
|
1048
|
+
if container.target_opset < 13:
|
|
1049
|
+
if container.target_opset < 11:
|
|
1050
|
+
op_version = 1
|
|
1051
|
+
axes = [axis if axis >= 0 else axis + rank for axis in axes]
|
|
1052
|
+
else:
|
|
1053
|
+
op_version = 11
|
|
1054
|
+
container.add_node('ReduceMin', input_name, output_name, name=name,
|
|
1055
|
+
op_version=op_version, axes=axes, keepdims=keepdims)
|
|
1056
|
+
else:
|
|
1057
|
+
if not isinstance(input_name, list):
|
|
1058
|
+
input_name = [input_name]
|
|
1059
|
+
op_version = 13
|
|
1060
|
+
if isinstance(axes, str):
|
|
1061
|
+
container.add_node('ReduceMin', input_name + [axes], output_name,
|
|
1062
|
+
op_version=op_version, name=name, keepdims=keepdims)
|
|
1063
|
+
elif axes is None or len(axes) == 0:
|
|
1064
|
+
container.add_node('ReduceMin', input_name, output_name,
|
|
1065
|
+
op_version=op_version, name=name, keepdims=keepdims)
|
|
1066
|
+
else:
|
|
1067
|
+
axes_name = self.get_unique_tensor_name(name + '_reducemin')
|
|
1068
|
+
container.add_initializer(axes_name, onnx_proto.TensorProto.INT64, [len(axes)], axes)
|
|
1069
|
+
container.add_node('ReduceMin', input_name + [axes_name], output_name,
|
|
1070
|
+
op_version=op_version, name=name, keepdims=keepdims)
|
|
1071
|
+
return output_name
|
|
1072
|
+
|
|
1073
|
+
def relu(self, input_name, output_name, container, operator_name=None):
|
|
1074
|
+
self._apply_unary_operation('Relu', input_name, output_name, container, operator_name)
|
|
1075
|
+
return output_name
|
|
1076
|
+
|
|
1077
|
+
def relu_6(self, input_name, output_name, container, operator_name=None, zero_value=0.0):
|
|
1078
|
+
name_relu = _create_name_or_use_existing_one(container, 'relu', operator_name)
|
|
1079
|
+
name_relu_op = _create_name_or_use_existing_one(container, 'relu6', operator_name)
|
|
1080
|
+
self.relu(input_name, name_relu, container, name_relu_op+'_relu')
|
|
1081
|
+
self.clip(name_relu, output_name, container, name_relu_op + '_clip', zero_value+6, zero_value)
|
|
1082
|
+
|
|
1083
|
+
def reshape(self, input_name, output_name, container, operator_name=None, desired_shape=None):
|
|
1084
|
+
if not isinstance(desired_shape, str) and len(list(i for i in desired_shape if i is not None and i < 0)) > 1:
|
|
1085
|
+
raise ValueError('There can only be one -1 in the targeted shape of a Reshape but got %s' % desired_shape)
|
|
1086
|
+
|
|
1087
|
+
name = _create_name_or_use_existing_one(container, 'Reshape', operator_name)
|
|
1088
|
+
|
|
1089
|
+
if container.target_opset < 5:
|
|
1090
|
+
container.add_node('Reshape', input_name, output_name, op_version=1, name=name, shape=desired_shape,
|
|
1091
|
+
consumed_inputs=[0])
|
|
1092
|
+
else:
|
|
1093
|
+
if isinstance(desired_shape, str):
|
|
1094
|
+
desired_shape_name = desired_shape
|
|
1095
|
+
else:
|
|
1096
|
+
desired_shape_name = self.get_unique_tensor_name('shape_tensor')
|
|
1097
|
+
container.add_initializer(desired_shape_name, onnx_proto.TensorProto.INT64, [len(desired_shape)],
|
|
1098
|
+
desired_shape)
|
|
1099
|
+
|
|
1100
|
+
# Create ONNX Reshape operator
|
|
1101
|
+
if isinstance(input_name, list):
|
|
1102
|
+
input_name.append(desired_shape_name)
|
|
1103
|
+
else:
|
|
1104
|
+
input_name = [input_name, desired_shape_name]
|
|
1105
|
+
container.add_node('Reshape', input_name, output_name, op_version=5, name=name)
|
|
1106
|
+
return output_name
|
|
1107
|
+
|
|
1108
|
+
def resize(self, input_name, output_name, container, operator_name=None, mode='nearest',
|
|
1109
|
+
coordinate_transformation_mode='asymmetric', scales=None):
|
|
1110
|
+
"""
|
|
1111
|
+
:param mode: "nearest" or "linear"
|
|
1112
|
+
:param scales: a float tensor for scaling (upsampling or downsampling) all input dimensions
|
|
1113
|
+
"""
|
|
1114
|
+
name = _create_name_or_use_existing_one(container, 'Resize', operator_name)
|
|
1115
|
+
attrs = {'name': name}
|
|
1116
|
+
attrs['mode'] = mode.lower()
|
|
1117
|
+
|
|
1118
|
+
inputs = [input_name]
|
|
1119
|
+
|
|
1120
|
+
if container.target_opset < 11:
|
|
1121
|
+
op_version = 10
|
|
1122
|
+
else:
|
|
1123
|
+
op_version = 11
|
|
1124
|
+
roi_tensor_name = self.get_unique_tensor_name(name + '_roi')
|
|
1125
|
+
roi = [0.0] * len(scales) + [1.0] * len(scales)
|
|
1126
|
+
container.add_initializer(roi_tensor_name, onnx_proto.TensorProto.FLOAT, [2 * len(scales)], roi)
|
|
1127
|
+
inputs.append(roi_tensor_name)
|
|
1128
|
+
attrs['coordinate_transformation_mode'] = coordinate_transformation_mode
|
|
1129
|
+
if attrs['mode'] == 'nearest':
|
|
1130
|
+
attrs['nearest_mode'] = 'floor'
|
|
1131
|
+
|
|
1132
|
+
scales_tensor_name = self.get_unique_tensor_name(name + '_scales')
|
|
1133
|
+
container.add_initializer(scales_tensor_name, onnx_proto.TensorProto.FLOAT, [len(scales)], scales)
|
|
1134
|
+
inputs.append(scales_tensor_name)
|
|
1135
|
+
container.add_node('Resize', inputs, output_name, op_version=op_version, **attrs)
|
|
1136
|
+
return output_name
|
|
1137
|
+
|
|
1138
|
+
def rnn(self, input_names, output_names, container, operator_name=None, output_seq=0, **attrs):
|
|
1139
|
+
name = _create_name_or_use_existing_one(container, 'RNN', operator_name)
|
|
1140
|
+
if container.target_opset <= 6:
|
|
1141
|
+
attrs['output_sequence'] = 1 if output_seq else 0
|
|
1142
|
+
op_version = 1
|
|
1143
|
+
else:
|
|
1144
|
+
op_version = 7
|
|
1145
|
+
container.add_node('RNN', input_names, output_names, name=name, op_version=op_version, **attrs)
|
|
1146
|
+
return output_names
|
|
1147
|
+
|
|
1148
|
+
def shape(self, input_name, output_name, container, operator_name=None):
|
|
1149
|
+
name = _create_name_or_use_existing_one(container, 'Shape', operator_name)
|
|
1150
|
+
container.add_node('Shape', input_name, output_name, name=name, op_version=1)
|
|
1151
|
+
return output_name
|
|
1152
|
+
|
|
1153
|
+
def sigmoid(self, input_name, output_name, container, operator_name=None):
|
|
1154
|
+
self._apply_unary_operation('Sigmoid', input_name, output_name, container, operator_name)
|
|
1155
|
+
return output_name
|
|
1156
|
+
|
|
1157
|
+
def softsign(self, input_name, output_name, container, operator_name=None):
|
|
1158
|
+
name = _create_name_or_use_existing_one(container, 'Softsign', operator_name)
|
|
1159
|
+
container.add_node('Softsign', input_name, output_name, name=name, op_version=1)
|
|
1160
|
+
return output_name
|
|
1161
|
+
|
|
1162
|
+
# See alpha and gamma at https://github.com/keras-team/keras/blob/master/keras/activations.py#L80-L81
|
|
1163
|
+
def selu(self, input_name, output_name, container, operator_name=None, alpha=1.673263, gamma=1.050701):
|
|
1164
|
+
self._apply_unary_operation('Selu', input_name, output_name, container, operator_name, alpha=alpha, gamma=gamma)
|
|
1165
|
+
return output_name
|
|
1166
|
+
|
|
1167
|
+
def softmax(self, input_name, output_name, container, operator_name=None, axis=None):
|
|
1168
|
+
name = _create_name_or_use_existing_one(container, 'Softmax', operator_name)
|
|
1169
|
+
if axis is None:
|
|
1170
|
+
axis = 1 if container.target_opset < 13 else -1
|
|
1171
|
+
container.add_node('Softmax', input_name, output_name, name=name, axis=axis)
|
|
1172
|
+
return output_name
|
|
1173
|
+
|
|
1174
|
+
def scaled_tanh(self, input_name, output_name, container, operator_name=None, alpha=None, beta=None):
|
|
1175
|
+
if alpha is None:
|
|
1176
|
+
alpha = [1.0]
|
|
1177
|
+
if beta is None:
|
|
1178
|
+
beta = [1.0]
|
|
1179
|
+
if len(alpha) != 1 or len(beta) != 1:
|
|
1180
|
+
raise ValueError('alpha and beta must be 1-element lists')
|
|
1181
|
+
|
|
1182
|
+
name = _create_name_or_use_existing_one(container, 'ScaledTanh', operator_name)
|
|
1183
|
+
if container.target_opset < 9:
|
|
1184
|
+
attrs = {'name': name, 'alpha': alpha[0], 'beta': beta[0]}
|
|
1185
|
+
container.add_node('ScaledTanh', input_name, output_name, **attrs)
|
|
1186
|
+
else:
|
|
1187
|
+
# Define scalar a, initialize with parameter alpha.
|
|
1188
|
+
aName = self.get_unique_tensor_name(name + '_alpha')
|
|
1189
|
+
aShape = [len(alpha)] if len(alpha) == 1 else [len(alpha), 1, 1]
|
|
1190
|
+
container.add_initializer(aName, onnx_proto.TensorProto.FLOAT, aShape, alpha)
|
|
1191
|
+
|
|
1192
|
+
# Define scalar b, initialize with parameter beta.
|
|
1193
|
+
bShape = [len(beta)] if len(beta) == 1 else [len(beta), 1, 1]
|
|
1194
|
+
bName = self.get_unique_tensor_name(name + '_beta')
|
|
1195
|
+
container.add_initializer(bName, onnx_proto.TensorProto.FLOAT, bShape, beta)
|
|
1196
|
+
|
|
1197
|
+
# c = b * x
|
|
1198
|
+
cName = self.get_unique_tensor_name(name + '_c')
|
|
1199
|
+
self.mul([input_name, bName], cName, container)
|
|
1200
|
+
|
|
1201
|
+
# d = tanh(c)
|
|
1202
|
+
dName = self.get_unique_tensor_name(name + '_d')
|
|
1203
|
+
self.tanh(cName, dName, container)
|
|
1204
|
+
|
|
1205
|
+
# output = a * d
|
|
1206
|
+
self.mul([aName, dName], output_name, container)
|
|
1207
|
+
return output_name
|
|
1208
|
+
|
|
1209
|
+
def slice(self, input_name, output_name, container,
|
|
1210
|
+
operator_name=None, starts=None, ends=None, axes=None, steps=None):
|
|
1211
|
+
assert starts is not None, 'the starts in slice op cannot be None'
|
|
1212
|
+
assert ends is not None, 'the ends in slice op cannot be None'
|
|
1213
|
+
name = _create_name_or_use_existing_one(container, 'Slice', operator_name)
|
|
1214
|
+
|
|
1215
|
+
if container.target_opset < 10:
|
|
1216
|
+
if axes is None:
|
|
1217
|
+
container.add_node('Slice', input_name, output_name, name=name,
|
|
1218
|
+
starts=starts, ends=ends, op_version=1)
|
|
1219
|
+
else:
|
|
1220
|
+
container.add_node('Slice', input_name, output_name, name=name,
|
|
1221
|
+
starts=starts, ends=ends, axes=axes, op_version=1)
|
|
1222
|
+
else:
|
|
1223
|
+
if container.target_opset == 10:
|
|
1224
|
+
op_version = 10
|
|
1225
|
+
else:
|
|
1226
|
+
op_version = 11
|
|
1227
|
+
inputs = input_name if isinstance(input_name, list) else [input_name]
|
|
1228
|
+
if isinstance(starts, str):
|
|
1229
|
+
starts_name = starts
|
|
1230
|
+
else:
|
|
1231
|
+
starts_name = self.get_unique_tensor_name('starts')
|
|
1232
|
+
container.add_initializer(starts_name, onnx_proto.TensorProto.INT64,
|
|
1233
|
+
[len(starts)], starts)
|
|
1234
|
+
|
|
1235
|
+
if isinstance(ends, str):
|
|
1236
|
+
ends_name = ends
|
|
1237
|
+
else:
|
|
1238
|
+
ends_name = self.get_unique_tensor_name('ends')
|
|
1239
|
+
container.add_initializer(ends_name, onnx_proto.TensorProto.INT64,
|
|
1240
|
+
[len(ends)], ends)
|
|
1241
|
+
|
|
1242
|
+
inputs.append(starts_name)
|
|
1243
|
+
inputs.append(ends_name)
|
|
1244
|
+
if axes:
|
|
1245
|
+
if isinstance(axes, str):
|
|
1246
|
+
axes_name = axes
|
|
1247
|
+
else:
|
|
1248
|
+
axes_name = self.get_unique_tensor_name('axes')
|
|
1249
|
+
container.add_initializer(axes_name, onnx_proto.TensorProto.INT64,
|
|
1250
|
+
[len(axes)], axes)
|
|
1251
|
+
inputs.append(axes_name)
|
|
1252
|
+
if steps:
|
|
1253
|
+
if not axes:
|
|
1254
|
+
inputs.append('')
|
|
1255
|
+
if isinstance(steps, str):
|
|
1256
|
+
steps_name = steps
|
|
1257
|
+
else:
|
|
1258
|
+
steps_name = self.get_unique_tensor_name('steps')
|
|
1259
|
+
container.add_initializer(steps_name, onnx_proto.TensorProto.INT64,
|
|
1260
|
+
[len(steps)], steps)
|
|
1261
|
+
inputs.append(steps_name)
|
|
1262
|
+
container.add_node('Slice', inputs, output_name, name=name,
|
|
1263
|
+
op_version=op_version)
|
|
1264
|
+
return output_name
|
|
1265
|
+
|
|
1266
|
+
def split(self, input_name, output_names, container, operator_name=None, split=None, axis=0):
|
|
1267
|
+
name = _create_name_or_use_existing_one(container, 'Split', operator_name)
|
|
1268
|
+
if container.target_opset <= 1:
|
|
1269
|
+
op_version = 1
|
|
1270
|
+
elif container.target_opset < 11:
|
|
1271
|
+
op_version = 2
|
|
1272
|
+
elif container.target_opset < 13:
|
|
1273
|
+
op_version = 11
|
|
1274
|
+
else:
|
|
1275
|
+
op_version = 13
|
|
1276
|
+
|
|
1277
|
+
attrs = {'name': name}
|
|
1278
|
+
if split is not None:
|
|
1279
|
+
if container.target_opset < 13:
|
|
1280
|
+
attrs['split'] = split
|
|
1281
|
+
else:
|
|
1282
|
+
if not isinstance(input_name, list):
|
|
1283
|
+
input_name = [input_name]
|
|
1284
|
+
if isinstance(split, str):
|
|
1285
|
+
split_name = split
|
|
1286
|
+
else:
|
|
1287
|
+
split_name = self.get_unique_tensor_name(name + '_split')
|
|
1288
|
+
container.add_initializer(split_name, onnx_proto.TensorProto.INT64, [len(split)], split)
|
|
1289
|
+
input_name = input_name + [split_name]
|
|
1290
|
+
|
|
1291
|
+
if axis is not None:
|
|
1292
|
+
attrs['axis'] = axis
|
|
1293
|
+
|
|
1294
|
+
container.add_node('Split', input_name, output_names, op_version=op_version, **attrs)
|
|
1295
|
+
return output_names
|
|
1296
|
+
|
|
1297
|
+
def sqrt(self, input_name, output_name, container, operator_name=None):
|
|
1298
|
+
self._apply_unary_operation('Sqrt', input_name, output_name, container, operator_name=operator_name)
|
|
1299
|
+
return output_name
|
|
1300
|
+
|
|
1301
|
+
def _apply_squeeze_unsqueeze(self, input_name, output_name, container, squeeze_str, operator_name=None, axes=None,
|
|
1302
|
+
rank=0):
|
|
1303
|
+
name = _create_name_or_use_existing_one(container, squeeze_str, operator_name)
|
|
1304
|
+
if container.target_opset < 13:
|
|
1305
|
+
if container.target_opset < 11:
|
|
1306
|
+
op_version = 1
|
|
1307
|
+
axes = [axis if axis >= 0 else axis + rank for axis in axes]
|
|
1308
|
+
else:
|
|
1309
|
+
op_version = 11
|
|
1310
|
+
container.add_node(squeeze_str, input_name, output_name, name=name, op_version=op_version, axes=axes)
|
|
1311
|
+
else:
|
|
1312
|
+
op_version = 13
|
|
1313
|
+
if not isinstance(input_name, list):
|
|
1314
|
+
input_name = [input_name]
|
|
1315
|
+
if isinstance(axes, str):
|
|
1316
|
+
container.add_node(squeeze_str, input_name + [axes], output_name, op_version=op_version, name=name)
|
|
1317
|
+
elif len(axes) == 0:
|
|
1318
|
+
container.add_node(squeeze_str, input_name, output_name, op_version=op_version, name=name)
|
|
1319
|
+
else:
|
|
1320
|
+
axes_name = self.get_unique_tensor_name(name + '_axes')
|
|
1321
|
+
container.add_initializer(axes_name, onnx_proto.TensorProto.INT64, [len(axes)], axes)
|
|
1322
|
+
container.add_node(squeeze_str, input_name + [axes_name], output_name, op_version=op_version, name=name)
|
|
1323
|
+
return output_name
|
|
1324
|
+
|
|
1325
|
+
def squeeze(self, input_name, output_name, container, operator_name=None, axes=None, rank=0):
|
|
1326
|
+
if axes is None:
|
|
1327
|
+
axes = []
|
|
1328
|
+
self._apply_squeeze_unsqueeze(input_name, output_name, container, 'Squeeze', operator_name, axes, rank)
|
|
1329
|
+
return output_name
|
|
1330
|
+
|
|
1331
|
+
def sub(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=0):
|
|
1332
|
+
self._apply_basic_numerical_operation('Sub', input_names, output_name, container, operator_name=operator_name,
|
|
1333
|
+
axis=axis, broadcast=broadcast)
|
|
1334
|
+
return output_name
|
|
1335
|
+
|
|
1336
|
+
def sum(self, input_names, output_name, container, operator_name=None):
|
|
1337
|
+
name = _create_name_or_use_existing_one(container, 'Sum', operator_name)
|
|
1338
|
+
if container.target_opset < 6:
|
|
1339
|
+
op_version = 1
|
|
1340
|
+
else:
|
|
1341
|
+
op_version = 6
|
|
1342
|
+
container.add_node('Sum', input_names, output_name, op_version=op_version, name=name)
|
|
1343
|
+
return output_name
|
|
1344
|
+
|
|
1345
|
+
def tanh(self, input_name, output_name, container, operator_name=None):
|
|
1346
|
+
self._apply_unary_operation('Tanh', input_name, output_name, container, operator_name)
|
|
1347
|
+
return output_name
|
|
1348
|
+
|
|
1349
|
+
def thresholded_relu(self, input_name, output_name, container, operator_name=None, alpha=None):
|
|
1350
|
+
if alpha is None:
|
|
1351
|
+
alpha = [1.0]
|
|
1352
|
+
|
|
1353
|
+
name = _create_name_or_use_existing_one(container, 'ThresholdedRelu', operator_name)
|
|
1354
|
+
attrs = {'name': name, 'alpha': alpha[0]}
|
|
1355
|
+
if container.target_opset < 10:
|
|
1356
|
+
# ThresholdedRelu graduated from an experimental op to a full op in opset 10
|
|
1357
|
+
# onnxruntime maintains support in the ONNX domain for ThresholdedRelu as a contrib op
|
|
1358
|
+
attrs['op_domain'] = "ai.onnx"
|
|
1359
|
+
op_version = 1
|
|
1360
|
+
else:
|
|
1361
|
+
op_version = 10
|
|
1362
|
+
container.add_node('ThresholdedRelu', input_name, output_name, op_version=op_version, **attrs)
|
|
1363
|
+
return output_name
|
|
1364
|
+
|
|
1365
|
+
def tile(self, input_name, output_name, container, operator_name=None, repeats=None):
|
|
1366
|
+
name = _create_name_or_use_existing_one(container, 'Tile', operator_name)
|
|
1367
|
+
|
|
1368
|
+
if repeats is None or (not isinstance(repeats, str) and all(repeat_count == 1 for repeat_count in repeats)):
|
|
1369
|
+
container.add_node('Identity', input_name, output_name, name=name)
|
|
1370
|
+
return output_name
|
|
1371
|
+
|
|
1372
|
+
if container.target_opset < 6:
|
|
1373
|
+
intermediate_input_name = input_name
|
|
1374
|
+
intermediate_output_name = None
|
|
1375
|
+
if isinstance(repeats, str):
|
|
1376
|
+
raise ValueError('repeats cannot be string type before opset 6')
|
|
1377
|
+
|
|
1378
|
+
for axis, repeat_count in enumerate(repeats):
|
|
1379
|
+
if repeat_count == 1:
|
|
1380
|
+
continue
|
|
1381
|
+
|
|
1382
|
+
# Create the 2nd input of Tile
|
|
1383
|
+
tile_tensor_name = self.get_unique_tensor_name(name + '_tile')
|
|
1384
|
+
container.add_initializer(tile_tensor_name, onnx_proto.TensorProto.FLOAT, [1], [float(repeat_count)])
|
|
1385
|
+
|
|
1386
|
+
# Create the 3rd input of Tile
|
|
1387
|
+
axis_tensor_name = self.get_unique_tensor_name(name + '_axis')
|
|
1388
|
+
container.add_initializer(axis_tensor_name, onnx_proto.TensorProto.FLOAT, [1], [float(axis)])
|
|
1389
|
+
|
|
1390
|
+
# Create tile for duplicating along one axis. After ONNX-1.2, we can duplicate along multiple axes,
|
|
1391
|
+
# so we don't have to iterate through all axes.
|
|
1392
|
+
intermediate_output_name = self.get_unique_tensor_name(name + '_input')
|
|
1393
|
+
container.add_node('Tile', [intermediate_input_name, tile_tensor_name, axis_tensor_name],
|
|
1394
|
+
intermediate_output_name, name=name)
|
|
1395
|
+
|
|
1396
|
+
# Use the output produced by this round as the input in the next iteration
|
|
1397
|
+
intermediate_input_name = intermediate_output_name
|
|
1398
|
+
|
|
1399
|
+
# Create a new name for next Tile
|
|
1400
|
+
name = container.get_unique_operator_name('Tile')
|
|
1401
|
+
|
|
1402
|
+
# Use the last Tile name for the name of an Identity
|
|
1403
|
+
container.add_node('Identity', intermediate_output_name, output_name, op_version=1, name=name)
|
|
1404
|
+
else:
|
|
1405
|
+
# ONNX-1.2 has a new Tile and we use it here
|
|
1406
|
+
if isinstance(repeats, str):
|
|
1407
|
+
container.add_node('Tile', input_name + [repeats], output_name, op_version=6, name=name)
|
|
1408
|
+
else:
|
|
1409
|
+
repeat_tensor_name = self.get_unique_tensor_name(name + '_repeats')
|
|
1410
|
+
container.add_initializer(repeat_tensor_name, onnx_proto.TensorProto.INT64, [len(repeats)], repeats)
|
|
1411
|
+
container.add_node('Tile', [input_name, repeat_tensor_name], output_name, op_version=6, name=name)
|
|
1412
|
+
return output_name
|
|
1413
|
+
|
|
1414
|
+
def topk(self, input_name, output_names, container, k, operator_name=None):
|
|
1415
|
+
name = _create_name_or_use_existing_one(container, 'TopK', operator_name)
|
|
1416
|
+
|
|
1417
|
+
if container.target_opset < 10:
|
|
1418
|
+
if isinstance(k, str):
|
|
1419
|
+
raise ValueError('topk k cannot be string type before opset 10')
|
|
1420
|
+
container.add_node('TopK', input_name, output_names, name=name, k=k, op_version=1)
|
|
1421
|
+
else:
|
|
1422
|
+
if container.target_opset == 10:
|
|
1423
|
+
op_version = 10
|
|
1424
|
+
else:
|
|
1425
|
+
op_version = 11
|
|
1426
|
+
|
|
1427
|
+
if isinstance(k, str):
|
|
1428
|
+
k_value_name = k
|
|
1429
|
+
else:
|
|
1430
|
+
k_value_name = self.get_unique_tensor_name('k_value')
|
|
1431
|
+
container.add_initializer(k_value_name, onnx_proto.TensorProto.INT64, [1], [k])
|
|
1432
|
+
container.add_node('TopK', input_name + [k_value_name], output_names, name=name, op_version=op_version)
|
|
1433
|
+
return output_names
|
|
1434
|
+
|
|
1435
|
+
def transpose(self, input_name, output_name, container, operator_name=None, perm=None):
|
|
1436
|
+
name = _create_name_or_use_existing_one(container, 'Transpose', operator_name)
|
|
1437
|
+
container.add_node('Transpose', input_name, output_name, name=name, perm=perm)
|
|
1438
|
+
return output_name
|
|
1439
|
+
|
|
1440
|
+
def upsample(self, input_name, output_name, container, operator_name=None, mode='nearest',
|
|
1441
|
+
coordinate_transformation_mode='asymmetric', scales=None):
|
|
1442
|
+
"""
|
|
1443
|
+
:param input_name:
|
|
1444
|
+
:param output_name:
|
|
1445
|
+
:param container:
|
|
1446
|
+
:param operator_name:
|
|
1447
|
+
:param mode: nearest or linear
|
|
1448
|
+
:param coordinate_transformation_mode:
|
|
1449
|
+
:param scales: an integer list of scaling-up rate of all input dimensions
|
|
1450
|
+
:return:
|
|
1451
|
+
"""
|
|
1452
|
+
if container.target_opset < 10:
|
|
1453
|
+
name = _create_name_or_use_existing_one(container, 'Upsample', operator_name)
|
|
1454
|
+
inputs = [input_name]
|
|
1455
|
+
attrs = {'name': name}
|
|
1456
|
+
if container.target_opset < 7:
|
|
1457
|
+
if len(scales) != 4:
|
|
1458
|
+
raise ValueError('Need to specify a 4-element list the the scales of N-, C-, H-, and W-axes')
|
|
1459
|
+
attrs['height_scale'] = float(scales[2])
|
|
1460
|
+
attrs['width_scale'] = float(scales[3])
|
|
1461
|
+
attrs['mode'] = mode.upper()
|
|
1462
|
+
op_version = 1
|
|
1463
|
+
else:
|
|
1464
|
+
attrs['mode'] = mode.lower()
|
|
1465
|
+
if container.target_opset < 9:
|
|
1466
|
+
attrs['scales'] = list(map(float, scales))
|
|
1467
|
+
op_version = 7
|
|
1468
|
+
else:
|
|
1469
|
+
# scales moved from attribute to input in opset 9
|
|
1470
|
+
scales_tensor_name = self.get_unique_tensor_name(name + '_scales')
|
|
1471
|
+
container.add_initializer(scales_tensor_name, onnx_proto.TensorProto.FLOAT, [len(scales)], scales)
|
|
1472
|
+
inputs = [input_name, scales_tensor_name]
|
|
1473
|
+
op_version = 9
|
|
1474
|
+
|
|
1475
|
+
container.add_node('Upsample', inputs, output_name, op_version=op_version, **attrs)
|
|
1476
|
+
else:
|
|
1477
|
+
# Upsample op is deprecated in ONNX opset 10
|
|
1478
|
+
# We implement Upsample through Resize instead
|
|
1479
|
+
self.resize(input_name, output_name, container, operator_name, mode, coordinate_transformation_mode,
|
|
1480
|
+
scales)
|
|
1481
|
+
return output_name
|
|
1482
|
+
|
|
1483
|
+
def unsqueeze(self, input_name, output_name, container, operator_name=None, axes=None, rank=0):
|
|
1484
|
+
if axes is None:
|
|
1485
|
+
axes = [0]
|
|
1486
|
+
self._apply_squeeze_unsqueeze(input_name, output_name, container, 'Unsqueeze', operator_name, axes, rank)
|
|
1487
|
+
return output_name
|
|
1488
|
+
|
|
1489
|
+
def where(self, input_names, output_names, container, operator_name=None):
|
|
1490
|
+
name = _create_name_or_use_existing_one(container, 'where', operator_name)
|
|
1491
|
+
container.add_node('Where', input_names, output_names, op_version=9, name=name)
|
|
1492
|
+
return output_names
|
|
1493
|
+
|
|
1494
|
+
def loop(self, input_names, output_names, container, operator_name=None, body=None):
|
|
1495
|
+
name = _create_name_or_use_existing_one(container, 'loop', operator_name)
|
|
1496
|
+
trip_count, cond, *states = tuple(input_names)
|
|
1497
|
+
trip_count = '' if trip_count is None else trip_count
|
|
1498
|
+
cond_name = '' if cond is None else cond
|
|
1499
|
+
container.add_node(
|
|
1500
|
+
'Loop', [trip_count, cond_name] + states, output_names, op_version=11, name=name, body=body)
|
|
1501
|
+
return output_names
|
|
1502
|
+
|
|
1503
|
+
def model_call(self, input_name, output_name, container, operator_name=None, oxml=None):
|
|
1504
|
+
name = operator_name
|
|
1505
|
+
if name is None:
|
|
1506
|
+
name = container.get_unique_operator_name('og')
|
|
1507
|
+
|
|
1508
|
+
# The tensor name replacement happens on unfolding ONNX model.
|
|
1509
|
+
for idx, nm_ in enumerate(input_name):
|
|
1510
|
+
nvi = oxml.graph.input[idx]
|
|
1511
|
+
self.identity([nm_], ["{}_{}".format(name, nvi.name)], container)
|
|
1512
|
+
container.value_info.append(nvi)
|
|
1513
|
+
for idx, nm_ in enumerate(output_name):
|
|
1514
|
+
self.identity(["{}_{}".format(name, oxml.graph.output[idx].name)], [nm_], container)
|
|
1515
|
+
container.value_info.extend(oxml.graph.output)
|
|
1516
|
+
container.add_model_node(input_name, output_name, name=name, model=oxml)
|
|
1517
|
+
return output_name
|
|
1518
|
+
|
|
1519
|
+
|
|
1520
|
+
class _ONNXModelBuilder(_ONNXOperatorAPI):
|
|
1521
|
+
def __init__(self):
|
|
1522
|
+
_OpSchema._ox = self
|
|
1523
|
+
self._id_count = 0
|
|
1524
|
+
self.opdict_counter = {}
|
|
1525
|
+
|
|
1526
|
+
def get_unique_tensor_name(self, hint):
|
|
1527
|
+
self._id_count += 1
|
|
1528
|
+
return "v{}_{}".format(hint, str(self._id_count))
|
|
1529
|
+
|
|
1530
|
+
def make_tensor(self, dtype, dims, vals):
|
|
1531
|
+
return helper.make_tensor(self.get_unique_tensor_name('ts'), dtype, dims, vals)
|
|
1532
|
+
|
|
1533
|
+
def get_unique_operator_type_name(self, op_type):
|
|
1534
|
+
nn = self.opdict_counter.get(op_type, 0)
|
|
1535
|
+
self.opdict_counter[op_type] = nn + 1
|
|
1536
|
+
return "_Op{}".format(op_type) if nn == 0 else "_Op{}_{}".format(op_type, nn+1)
|
|
1537
|
+
|
|
1538
|
+
@classmethod
|
|
1539
|
+
def is_raw(cls, func): # without any schema decorator
|
|
1540
|
+
return not isinstance(func, _OpSchema)
|
|
1541
|
+
|
|
1542
|
+
|
|
1543
|
+
# Singleton
|
|
1544
|
+
ox = _ONNXModelBuilder()
|