onnx-ir 0.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. onnx_ir/__init__.py +176 -0
  2. onnx_ir/_cloner.py +229 -0
  3. onnx_ir/_convenience/__init__.py +558 -0
  4. onnx_ir/_convenience/_constructors.py +291 -0
  5. onnx_ir/_convenience/_extractor.py +191 -0
  6. onnx_ir/_core.py +4435 -0
  7. onnx_ir/_display.py +54 -0
  8. onnx_ir/_enums.py +474 -0
  9. onnx_ir/_graph_comparison.py +23 -0
  10. onnx_ir/_graph_containers.py +373 -0
  11. onnx_ir/_io.py +133 -0
  12. onnx_ir/_linked_list.py +284 -0
  13. onnx_ir/_metadata.py +45 -0
  14. onnx_ir/_name_authority.py +72 -0
  15. onnx_ir/_polyfill.py +26 -0
  16. onnx_ir/_protocols.py +627 -0
  17. onnx_ir/_safetensors/__init__.py +510 -0
  18. onnx_ir/_tape.py +242 -0
  19. onnx_ir/_thirdparty/asciichartpy.py +310 -0
  20. onnx_ir/_type_casting.py +89 -0
  21. onnx_ir/_version_utils.py +48 -0
  22. onnx_ir/analysis/__init__.py +21 -0
  23. onnx_ir/analysis/_implicit_usage.py +74 -0
  24. onnx_ir/convenience.py +38 -0
  25. onnx_ir/external_data.py +459 -0
  26. onnx_ir/passes/__init__.py +41 -0
  27. onnx_ir/passes/_pass_infra.py +351 -0
  28. onnx_ir/passes/common/__init__.py +54 -0
  29. onnx_ir/passes/common/_c_api_utils.py +76 -0
  30. onnx_ir/passes/common/clear_metadata_and_docstring.py +60 -0
  31. onnx_ir/passes/common/common_subexpression_elimination.py +207 -0
  32. onnx_ir/passes/common/constant_manipulation.py +230 -0
  33. onnx_ir/passes/common/default_attributes.py +99 -0
  34. onnx_ir/passes/common/identity_elimination.py +120 -0
  35. onnx_ir/passes/common/initializer_deduplication.py +179 -0
  36. onnx_ir/passes/common/inliner.py +223 -0
  37. onnx_ir/passes/common/naming.py +280 -0
  38. onnx_ir/passes/common/onnx_checker.py +57 -0
  39. onnx_ir/passes/common/output_fix.py +141 -0
  40. onnx_ir/passes/common/shape_inference.py +112 -0
  41. onnx_ir/passes/common/topological_sort.py +37 -0
  42. onnx_ir/passes/common/unused_removal.py +215 -0
  43. onnx_ir/py.typed +1 -0
  44. onnx_ir/serde.py +2043 -0
  45. onnx_ir/tape.py +15 -0
  46. onnx_ir/tensor_adapters.py +210 -0
  47. onnx_ir/testing.py +197 -0
  48. onnx_ir/traversal.py +118 -0
  49. onnx_ir-0.1.15.dist-info/METADATA +68 -0
  50. onnx_ir-0.1.15.dist-info/RECORD +53 -0
  51. onnx_ir-0.1.15.dist-info/WHEEL +5 -0
  52. onnx_ir-0.1.15.dist-info/licenses/LICENSE +202 -0
  53. onnx_ir-0.1.15.dist-info/top_level.txt +1 -0
onnx_ir/_protocols.py ADDED
@@ -0,0 +1,627 @@
1
+ # Copyright (c) ONNX Project Contributors
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ """Protocols for the ONNX IR.
4
+
5
+ This file defines the interfaces for tools to interact with the IR. The interfaces
6
+ are designed such that tools leveraging the IR can be decoupled from the IR
7
+ implementation. This allows for the implementation to evolve independently of the
8
+ tools.
9
+ """
10
+
11
+ # 👀
12
+ # NOTE: Why are we using protocols, instead of abstract base classes?
13
+ #
14
+ # Protocols are more flexible than abstract base classes. Users can define their
15
+ # own classes that implement the protocols without having to inherit from a
16
+ # specific base class. For example, a user can define a custom tensor class that
17
+ # implements the TensorProtocol without explicitly inheriting, and the IR can
18
+ # work with that class without any changes.
19
+ #
20
+ # `isinstance` checks can be slower with protocols. Avoid using `isinstance`
21
+ # checks when you can. Always check for concrete classes first.
22
+ #
23
+ # NOTE: Why are we using protocols, instead of using concrete classes directly?
24
+ #
25
+ # Protocols define the interface that is typically more stable. If you find yourself
26
+ # updating the protocols, pause 🛑, and carefully make sure it is absolutely needed
27
+ # and will improve the design. If you are adding new methods, consider if the method
28
+ # should be part of the protocol or if it should be a higher level convenience function
29
+ # defined outside the protocol.
30
+
31
+ from __future__ import annotations
32
+
33
+ import typing
34
+ from collections import OrderedDict
35
+ from collections.abc import (
36
+ Collection,
37
+ Iterable,
38
+ Iterator,
39
+ Mapping,
40
+ MutableMapping,
41
+ MutableSequence,
42
+ Sequence,
43
+ )
44
+ from typing import (
45
+ Any,
46
+ Literal,
47
+ Protocol,
48
+ )
49
+
50
+ from onnx_ir import _enums
51
+
52
+ if typing.TYPE_CHECKING:
53
+ import numpy as np
54
+ from typing_extensions import TypeAlias
55
+
56
+ # An identifier that will uniquely identify an operator. E.g (domain, op_type, overload)
57
+ OperatorIdentifier: TypeAlias = tuple[str, str, str]
58
+
59
+
60
+ @typing.runtime_checkable
61
+ class ArrayCompatible(Protocol):
62
+ """Protocol for array-like objects.
63
+
64
+ An example of an array-like object is a numpy ndarray or a PyTorch Tensor.
65
+ Read more at https://numpy.org/devdocs/user/basics.interoperability.html
66
+ """
67
+
68
+ def __array__(self, dtype: Any) -> np.ndarray: ...
69
+
70
+
71
+ @typing.runtime_checkable
72
+ class DLPackCompatible(Protocol):
73
+ """Protocol for objects that can support dlpack.
74
+
75
+ Computation backends can call __dlpack__ to obtain the underlying data in a
76
+ tensor without copying the data. This allows use to use tensorflow tensors etc.
77
+ without copying the data.
78
+ """
79
+
80
+ def __dlpack__(self, *, stream: Any = ...) -> Any:
81
+ """Return PyCapsule."""
82
+ ...
83
+
84
+ def __dlpack_device__(self) -> Any:
85
+ """Return the device."""
86
+ ...
87
+
88
+
89
+ @typing.runtime_checkable
90
+ class TensorProtocol(ArrayCompatible, DLPackCompatible, Protocol):
91
+ """Concrete tensor backed by data.
92
+
93
+ The protocol does not specify how the data is stored. That data is exposed
94
+ through the :attr:`raw` attribute for examination, but accessing :attr:`raw`
95
+ is typically not needed.
96
+
97
+ To use the tensor as a numpy array, call :meth:`numpy`. To convert the tensor
98
+ to a byte string for serialization, call :meth:`tobytes`.
99
+
100
+ It is recommended to check the size of the tensor first before accessing the
101
+ underlying data, because accessing the data may be expensive and incur IO
102
+ overhead.
103
+
104
+ Attributes:
105
+ name: The name of the tensor.
106
+ shape: The shape of the tensor.
107
+ dtype: The data type of the elements of the tensor. It is an :class:`ir.DataType` enum.
108
+ doc_string: Documentation string.
109
+ raw: The raw data behind this tensor. It can be anything.
110
+ size: The number of elements in the tensor.
111
+ nbytes: The number of bytes in the tensor.
112
+ metadata_props: Metadata that will be serialized to the ONNX file.
113
+ meta: Metadata store for graph transform passes.
114
+ """
115
+
116
+ name: str | None
117
+ shape: ShapeProtocol
118
+ dtype: _enums.DataType
119
+ doc_string: str | None
120
+ raw: Any
121
+ metadata_props: MutableMapping[str, str]
122
+ meta: MutableMapping[str, Any]
123
+
124
+ @property
125
+ def size(self) -> int: ...
126
+
127
+ @property
128
+ def nbytes(self) -> int: ...
129
+
130
+ def numpy(self) -> np.ndarray:
131
+ """Return the tensor as a numpy array."""
132
+ ...
133
+
134
+ def __array__(self, dtype: Any = None) -> np.ndarray:
135
+ """Return the tensor as a numpy array, compatible with np.array."""
136
+ ...
137
+
138
+ def __dlpack__(self, *, stream: Any = ...) -> Any:
139
+ """Return PyCapsule."""
140
+ ...
141
+
142
+ def __dlpack_device__(self) -> Any:
143
+ """Return the device."""
144
+ ...
145
+
146
+ def tobytes(self) -> bytes:
147
+ """Return the tensor as a byte string conformed to the ONNX specification, in little endian."""
148
+ ...
149
+
150
+
151
+ @typing.runtime_checkable
152
+ class ValueProtocol(Protocol):
153
+ """Protocol for values.
154
+
155
+ A value is a named entity that can be used to represent an input or output of a graph,
156
+ a function, or a node. The information it stores generalizes over ``ValueInfoProto``
157
+ in the ONNX specification.
158
+
159
+ A :class:`Value` is always not owned or owned by exactly one node. When the value is not
160
+ owned, it must be an input of a graph or a function. ``producer`` and ``index``
161
+ are ``None``.
162
+
163
+ When the value is owned by a node, it is an output of the node.
164
+ The node that produces the value can be accessed with :meth:`producer`.
165
+ The index of the output of the node that produces the value can be accessed with
166
+ :meth:`index`.
167
+
168
+ To find all the nodes that use this value as an input, call :meth:`uses`.
169
+
170
+ To check if the value is an output of a graph, call :meth:`is_graph_output`.
171
+
172
+ Attributes:
173
+ name: The name of the value. A value is always named when it is part of a graph.
174
+ shape: The shape of the value.
175
+ type: The type of the value.
176
+ metadata_props: Metadata that will be serialized to the ONNX file.
177
+ meta: Metadata store for graph transform passes.
178
+ doc_string: Documentation string.
179
+ const_value: The constant tensor is the value constant.
180
+ """
181
+
182
+ name: str
183
+ shape: ShapeProtocol | None
184
+ type: TypeProtocol | None
185
+ metadata_props: MutableMapping[str, str]
186
+ meta: MutableMapping[str, Any]
187
+ doc_string: str | None
188
+ const_value: TensorProtocol | None
189
+
190
+ def producer(self) -> NodeProtocol | None:
191
+ """The node that produces this value."""
192
+ ...
193
+
194
+ def index(self) -> int | None:
195
+ """The index of the output of the node that produces this value."""
196
+ ...
197
+
198
+ def uses(self) -> Collection[tuple[NodeProtocol, int]]:
199
+ """The set of (node, input_index) with node being those that use this value as an input."""
200
+ ...
201
+
202
+ def is_graph_output(self) -> bool:
203
+ """Whether this value is an output of a graph."""
204
+ ...
205
+
206
+ def replace_all_uses_with(
207
+ self, new_value: ValueProtocol | None, replace_graph_outputs: bool = False
208
+ ) -> None:
209
+ """Replace all uses of this value with the given new value.
210
+
211
+ Args:
212
+ new_value: The new value to replace this value with.
213
+ replace_graph_outputs: Whether to replace graph outputs that use this value.
214
+ """
215
+ ...
216
+
217
+
218
+ @typing.runtime_checkable
219
+ class NodeProtocol(Protocol):
220
+ """Protocol for nodes.
221
+
222
+ A node represents an invocation of an operation on the :class:`Value` s in
223
+ the computational graph.
224
+
225
+ A node can be optionally named. A name should typically be assigned when the
226
+ node is added to a graph.
227
+
228
+ :attr:`domain`, :attr:`op_type`, and :attr:`overload` together uniquely identify
229
+ the operator, and are always strings. For ONNX operators, :attr:`domain` and :attr:`overload`
230
+ are both empty strings.
231
+
232
+ :attr:`inputs` and :attr:`outputs` are the input and output values of the node.
233
+
234
+ :attr:`attributes` are the attributes of the node. The attributes are stored in an
235
+ ordered dictionary to preserve the order of the attributes. This is a deviation from
236
+ the current ONNX spec where attributes are unordered, but it is helpful for tools
237
+ that rely on the order of the attributes, e.g. those converting to and from Python
238
+ function keyword arguments.
239
+
240
+ :attr:`version` is unique to the IR and is not specified in the ONNX spec. This
241
+ allows the IR to represent a graph with mixed opset versions. Deserializers
242
+ should decide how to reconcile the different versions within the graph. A typical
243
+ graph will have a single version, declared in the :class:`Graph` object and
244
+ the nodes will have ``None`` as the version.
245
+
246
+ Attributes:
247
+ domain: The domain of the operator. E.g. ``""`` for ONNX operators.
248
+ op_type: The operator name.
249
+ overload: The overload name when the node is invoking a function.
250
+ inputs: Input values.
251
+ outputs: Output values.
252
+ attributes: The attributes of the operator.
253
+ version: The version of the operator.
254
+ doc_string: Documentation string.
255
+ metadata_props: Metadata that will be serialized to the ONNX file.
256
+ meta: Metadata store for graph transform passes.
257
+ """
258
+
259
+ name: str | None
260
+ domain: str
261
+ op_type: str
262
+ overload: str
263
+ inputs: Sequence[ValueProtocol]
264
+ outputs: Sequence[ValueProtocol]
265
+ attributes: OrderedDict[str, AttributeProtocol | ReferenceAttributeProtocol]
266
+ version: int | None
267
+ doc_string: str | None
268
+ metadata_props: MutableMapping[str, str]
269
+ meta: MutableMapping[str, Any]
270
+
271
+ def replace_input_with(self, index: int, value: ValueProtocol | None) -> None:
272
+ """Set the input at the given index to the given value, replacing the original value."""
273
+ ...
274
+
275
+
276
+ @typing.runtime_checkable
277
+ class GraphProtocol(Protocol):
278
+ """Protocol for graphs.
279
+
280
+ Graph represents a computation graph. In addition to the ONNX specification
281
+ specified fields, it also contains a mapping of :attr:`opset_imports`. This
282
+ allows different subgraphs to import different opsets. It is the responsibility
283
+ of the deserializer to reconcile the different opsets.
284
+
285
+ The nodes are not guaranteed to be topologically sorted. But the
286
+ iteration order should be deterministic across different runs. It is the
287
+ responsibility of the user to maintain a topological order of the nodes.
288
+
289
+ Note that there is not a ``node`` attribute in the Graph. The Graph can be
290
+ seen as a Sequence of nodes and should be used as such. For example, to obtain
291
+ all nodes as a list, call ``list(graph)``.
292
+
293
+ .. :note::
294
+ ``quantization_annotation`` is deserialized into the Value's ``meta`` field
295
+ under the ``quant_parameter_tensor_names`` key. Values that are stored
296
+ under this key will be serialized as quantization annotations.
297
+
298
+ Attributes:
299
+ name: The name of the graph.
300
+ inputs: The input values of the graph.
301
+ outputs: The output values of the graph.
302
+ initializers: The initializers in the graph.
303
+ doc_string: Documentation string.
304
+ opset_imports: Opsets imported by the graph.
305
+ metadata_props: Metadata that will be serialized to the ONNX file.
306
+ meta: Metadata store for graph transform passes.
307
+ """
308
+
309
+ name: str | None
310
+ inputs: MutableSequence[ValueProtocol]
311
+ outputs: MutableSequence[ValueProtocol]
312
+ initializers: MutableMapping[str, ValueProtocol]
313
+ doc_string: str
314
+ opset_imports: MutableMapping[str, int]
315
+ metadata_props: MutableMapping[str, str]
316
+ meta: MutableMapping[str, Any]
317
+
318
+ def __getitem__(self, index: int) -> NodeProtocol: ...
319
+ def __len__(self) -> int: ...
320
+ def __iter__(self) -> Iterator[NodeProtocol]: ...
321
+ def __reversed__(self) -> Iterator[NodeProtocol]: ...
322
+
323
+ # Mutation methods
324
+ def append(self, node: NodeProtocol, /) -> None:
325
+ """Append a node to the graph."""
326
+ ...
327
+
328
+ def extend(self, nodes: Iterable[NodeProtocol], /) -> None:
329
+ """Extend the graph with the given nodes."""
330
+ ...
331
+
332
+ def remove(self, node: NodeProtocol, /) -> None:
333
+ """Remove a node from the graph."""
334
+ ...
335
+
336
+ def insert_after(
337
+ self, node: NodeProtocol, new_nodes: Iterator[NodeProtocol] | NodeProtocol, /
338
+ ) -> None:
339
+ """Insert new nodes after the given node."""
340
+ ...
341
+
342
+ def insert_before(
343
+ self, node: NodeProtocol, new_nodes: Iterator[NodeProtocol] | NodeProtocol, /
344
+ ) -> None:
345
+ """Insert new nodes before the given node."""
346
+ ...
347
+
348
+ def sort(self) -> None:
349
+ """Topologically sort the nodes in the graph."""
350
+ ...
351
+
352
+
353
+ @typing.runtime_checkable
354
+ class GraphViewProtocol(Protocol):
355
+ """Protocol for a read-only view on a graph.
356
+
357
+ The GraphView is useful for analysis of a subgraph. It can be initialized
358
+ with a subset of nodes from a :class:`Graph`. Creating GraphView does not
359
+ change the ownership of the nodes, and so it is possible to create multiple
360
+ GraphViews that contain the same nodes.
361
+
362
+ Attributes:
363
+ name: The name of the graph.
364
+ inputs: The input values of the graph.
365
+ outputs: The output values of the graph.
366
+ initializers: The initializers in the graph.
367
+ doc_string: Documentation string.
368
+ opset_imports: Opsets imported by the graph.
369
+ metadata_props: Metadata that will be serialized to the ONNX file.
370
+ meta: Metadata store for graph transform passes.
371
+ """
372
+
373
+ name: str | None
374
+ inputs: Sequence[ValueProtocol]
375
+ outputs: Sequence[ValueProtocol]
376
+ initializers: Mapping[str, ValueProtocol]
377
+ doc_string: str
378
+ opset_imports: Mapping[str, int]
379
+ metadata_props: MutableMapping[str, str]
380
+ meta: MutableMapping[str, Any]
381
+
382
+ def __getitem__(self, index: int) -> NodeProtocol: ...
383
+ def __len__(self) -> int: ...
384
+ def __iter__(self) -> Iterator[NodeProtocol]: ...
385
+ def __reversed__(self) -> Iterator[NodeProtocol]: ...
386
+
387
+
388
+ @typing.runtime_checkable
389
+ class ModelProtocol(Protocol):
390
+ """Protocol for models.
391
+
392
+ A model is a container for a graph and metadata. It is the top-level object
393
+ that represents an ONNX model.
394
+
395
+ Attributes:
396
+ graph: The graph of the model.
397
+ ir_version: The version of the IR.
398
+ producer_name: The name of the producer.
399
+ producer_version: The version of the producer.
400
+ domain: The domain of the model.
401
+ model_version: The version of the model.
402
+ doc_string: Documentation string.
403
+ functions: The functions defined in the model.
404
+ metadata_props: Metadata that will be serialized to the ONNX file.
405
+ meta: Metadata store for graph transform passes.
406
+ """
407
+
408
+ graph: GraphProtocol
409
+ ir_version: int
410
+ producer_name: str | None
411
+ producer_version: str | None
412
+ domain: str | None
413
+ model_version: int | None
414
+ doc_string: str | None
415
+ functions: MutableMapping[str, FunctionProtocol]
416
+ # TODO(justinchuby): Add training_info
417
+ opset_imports: MutableMapping[str, int]
418
+ metadata_props: MutableMapping[str, str]
419
+ meta: MutableMapping[str, Any]
420
+
421
+
422
+ @typing.runtime_checkable
423
+ class AttributeProtocol(Protocol):
424
+ """Protocol for ONNX attributes.
425
+
426
+ Attributes:
427
+ name: The name of the attribute.
428
+ type: The type of the attribute.
429
+ value: The value of the attribute.
430
+ doc_string: Documentation string.
431
+ """
432
+
433
+ name: str
434
+ type: _enums.AttributeType
435
+ value: Any
436
+ doc_string: str | None
437
+
438
+ def is_ref(self) -> Literal[False]: ...
439
+
440
+
441
+ @typing.runtime_checkable
442
+ class ReferenceAttributeProtocol(Protocol):
443
+ """Protocol for a reference attribute.
444
+
445
+ A reference attribute can only appear inside the definition body of a function.
446
+
447
+ Attributes:
448
+ name: The name of the attribute.
449
+ ref_attr_name: The name of the attribute definition this attribute refers to.
450
+ type: The type of the attribute.
451
+ doc_string: Documentation string.
452
+ """
453
+
454
+ name: str
455
+ ref_attr_name: str
456
+ type: _enums.AttributeType
457
+ doc_string: str | None
458
+
459
+ def is_ref(self) -> Literal[True]: ...
460
+
461
+
462
+ @typing.runtime_checkable
463
+ class SparseTensorProtocol(Protocol):
464
+ values: TensorProtocol
465
+ indices: TensorProtocol
466
+ dims: Sequence[int]
467
+
468
+
469
+ @typing.runtime_checkable
470
+ class SymbolicDimProtocol(Protocol):
471
+ """Value of a single symbolic/dynamic dimension in a shape.
472
+
473
+ Attributes:
474
+ value: The value of the dimension.
475
+ """
476
+
477
+ value: str | None # TODO(justinchuby): Maybe support sympy
478
+
479
+
480
+ @typing.runtime_checkable
481
+ class ShapeProtocol(Protocol):
482
+ """Protocol for ONNX shapes.
483
+
484
+ A shape is a sequence of dimensions.
485
+
486
+ Attributes:
487
+ dims: The dimensions of the shape.
488
+ """
489
+
490
+ dims: Sequence[int | SymbolicDimProtocol]
491
+
492
+ def __len__(self) -> int: ...
493
+ def __iter__(self) -> Iterator[int | SymbolicDimProtocol]: ...
494
+ @typing.overload
495
+ def __getitem__(self, index: int) -> int | SymbolicDimProtocol: ...
496
+ @typing.overload
497
+ def __getitem__(self, index: slice) -> tuple[int | SymbolicDimProtocol, ...]: ...
498
+ def __setitem__(
499
+ self, index: int, value: int | SymbolicDimProtocol | str | None
500
+ ) -> None: ...
501
+ def __eq__(self, other: object) -> bool: ...
502
+ def __ne__(self, value: object) -> bool: ...
503
+ def get_denotation(self, index: int) -> str | None: ...
504
+ def set_denotation(self, index: int, denotation: str | None) -> None: ...
505
+ def numpy(self) -> Sequence[int]: ...
506
+ def rank(self) -> int: ...
507
+
508
+
509
+ @typing.runtime_checkable
510
+ class TypeProtocol(Protocol):
511
+ """Protocol for ONNX tensors, Sequence tensors, Optional tensors and Sparse tensors.
512
+
513
+ These three types of tensors share the same attribute "elem_type" so they are
514
+ merged in the same interface. Unlike the ONNX TensorProto, shapes are not included
515
+ in the type and should be stored in the :class:`Value`.
516
+
517
+ Attributes:
518
+ denotation: An optional denotation can be used to denote the whole
519
+ type with a standard semantic description as to what is
520
+ stored inside.
521
+ Refer to https://github.com/onnx/onnx/blob/main/docs/TypeDenotation.md#type-denotation-definition
522
+ for pre-defined type denotations.
523
+ elem_type: The type of its elements for nested types like Sequence[Optional] tensors.
524
+ Or the DataType if the type is not nested.
525
+ dtype: The data type of the tensor or the nested tensor.
526
+ """
527
+
528
+ denotation: str | None
529
+ elem_type: TypeProtocol | _enums.DataType
530
+ dtype: _enums.DataType
531
+
532
+ def __eq__(self, value: object, /) -> bool: ...
533
+
534
+
535
+ @typing.runtime_checkable
536
+ class MapTypeProtocol(Protocol):
537
+ """Protocol for ONNX map types.
538
+
539
+ TODO: This protocol is not yet implemented in the ONNX IR.
540
+ """
541
+
542
+ key_type: typing.Literal[
543
+ _enums.DataType.STRING,
544
+ _enums.DataType.INT64,
545
+ _enums.DataType.INT32,
546
+ _enums.DataType.INT16,
547
+ _enums.DataType.INT8,
548
+ _enums.DataType.UINT64,
549
+ _enums.DataType.UINT32,
550
+ _enums.DataType.UINT16,
551
+ _enums.DataType.UINT8,
552
+ ]
553
+ value_type: _enums.DataType
554
+
555
+
556
+ @typing.runtime_checkable
557
+ class FunctionProtocol(Protocol):
558
+ """Protocol for ONNX functions.
559
+
560
+ Like a graph, a function can have nodes that are not topologically sorted. It is
561
+ the responsibility of the user to maintain a topological order of the nodes.
562
+
563
+ Note that there is not a ``node`` attribute in the Function. The Function can be
564
+ seen as a Sequence of nodes and should be used as such. For example, to obtain
565
+ all nodes as a list, call ``list(function)``.
566
+
567
+ Attributes:
568
+ name: The function name.
569
+ domain: The domain this function is defined in.
570
+ overload: The overload name when the function is overloaded.
571
+ inputs: The input values of the function.
572
+ attributes: The attributes this function defines.
573
+ outputs: The output values of the function.
574
+ opset_imports: Opsets imported by the function.
575
+ doc_string: Documentation string.
576
+ metadata_props: Metadata that will be serialized to the ONNX file.
577
+ meta: Metadata store for graph transform passes.
578
+ """
579
+
580
+ name: str
581
+ domain: str
582
+ overload: str
583
+ inputs: Sequence[ValueProtocol]
584
+ attributes: OrderedDict[str, AttributeProtocol]
585
+ outputs: Sequence[ValueProtocol]
586
+ doc_string: str
587
+ opset_imports: MutableMapping[str, int]
588
+ metadata_props: MutableMapping[str, str]
589
+ meta: MutableMapping[str, Any]
590
+
591
+ def __getitem__(self, index: int) -> NodeProtocol: ...
592
+ def __len__(self) -> int: ...
593
+ def __iter__(self) -> Iterator[NodeProtocol]: ...
594
+ def __reversed__(self) -> Iterator[NodeProtocol]: ...
595
+ def identifier(self) -> OperatorIdentifier:
596
+ """Return the unique identifier of the function."""
597
+ ...
598
+
599
+ # Mutation methods
600
+ # End Block
601
+ def append(self, node: NodeProtocol, /) -> None:
602
+ """Append a node to the function."""
603
+ ...
604
+
605
+ def extend(self, nodes: Iterable[NodeProtocol], /) -> None:
606
+ """Extend the function with the given nodes."""
607
+ ...
608
+
609
+ def remove(self, node: NodeProtocol, /) -> None:
610
+ """Remove a node from the function."""
611
+ ...
612
+
613
+ def insert_after(
614
+ self, node: NodeProtocol, new_nodes: Iterator[NodeProtocol] | NodeProtocol, /
615
+ ) -> None:
616
+ """Insert new nodes after the given node."""
617
+ ...
618
+
619
+ def insert_before(
620
+ self, node: NodeProtocol, new_nodes: Iterator[NodeProtocol] | NodeProtocol, /
621
+ ) -> None:
622
+ """Insert new nodes before the given node."""
623
+ ...
624
+
625
+ def sort(self) -> None:
626
+ """Topologically sort the nodes in the function."""
627
+ ...