onnx-ir 0.1.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- onnx_ir/__init__.py +176 -0
- onnx_ir/_cloner.py +229 -0
- onnx_ir/_convenience/__init__.py +558 -0
- onnx_ir/_convenience/_constructors.py +291 -0
- onnx_ir/_convenience/_extractor.py +191 -0
- onnx_ir/_core.py +4435 -0
- onnx_ir/_display.py +54 -0
- onnx_ir/_enums.py +474 -0
- onnx_ir/_graph_comparison.py +23 -0
- onnx_ir/_graph_containers.py +373 -0
- onnx_ir/_io.py +133 -0
- onnx_ir/_linked_list.py +284 -0
- onnx_ir/_metadata.py +45 -0
- onnx_ir/_name_authority.py +72 -0
- onnx_ir/_polyfill.py +26 -0
- onnx_ir/_protocols.py +627 -0
- onnx_ir/_safetensors/__init__.py +510 -0
- onnx_ir/_tape.py +242 -0
- onnx_ir/_thirdparty/asciichartpy.py +310 -0
- onnx_ir/_type_casting.py +89 -0
- onnx_ir/_version_utils.py +48 -0
- onnx_ir/analysis/__init__.py +21 -0
- onnx_ir/analysis/_implicit_usage.py +74 -0
- onnx_ir/convenience.py +38 -0
- onnx_ir/external_data.py +459 -0
- onnx_ir/passes/__init__.py +41 -0
- onnx_ir/passes/_pass_infra.py +351 -0
- onnx_ir/passes/common/__init__.py +54 -0
- onnx_ir/passes/common/_c_api_utils.py +76 -0
- onnx_ir/passes/common/clear_metadata_and_docstring.py +60 -0
- onnx_ir/passes/common/common_subexpression_elimination.py +207 -0
- onnx_ir/passes/common/constant_manipulation.py +230 -0
- onnx_ir/passes/common/default_attributes.py +99 -0
- onnx_ir/passes/common/identity_elimination.py +120 -0
- onnx_ir/passes/common/initializer_deduplication.py +179 -0
- onnx_ir/passes/common/inliner.py +223 -0
- onnx_ir/passes/common/naming.py +280 -0
- onnx_ir/passes/common/onnx_checker.py +57 -0
- onnx_ir/passes/common/output_fix.py +141 -0
- onnx_ir/passes/common/shape_inference.py +112 -0
- onnx_ir/passes/common/topological_sort.py +37 -0
- onnx_ir/passes/common/unused_removal.py +215 -0
- onnx_ir/py.typed +1 -0
- onnx_ir/serde.py +2043 -0
- onnx_ir/tape.py +15 -0
- onnx_ir/tensor_adapters.py +210 -0
- onnx_ir/testing.py +197 -0
- onnx_ir/traversal.py +118 -0
- onnx_ir-0.1.15.dist-info/METADATA +68 -0
- onnx_ir-0.1.15.dist-info/RECORD +53 -0
- onnx_ir-0.1.15.dist-info/WHEEL +5 -0
- onnx_ir-0.1.15.dist-info/licenses/LICENSE +202 -0
- onnx_ir-0.1.15.dist-info/top_level.txt +1 -0
onnx_ir/tape.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# Copyright (c) ONNX Project Contributors
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
"""Taping module to facilitate building IR graphs."""
|
|
4
|
+
|
|
5
|
+
# NOTE: Be *selective* about what this module exports because it is part of the public API.
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"Tape",
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
from onnx_ir._tape import Tape
|
|
14
|
+
|
|
15
|
+
Tape.__module__ = __name__
|
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
# Copyright (c) ONNX Project Contributors
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
"""Compatible adapters implementing the TensorProtocol interface for various framework tensor types.
|
|
4
|
+
|
|
5
|
+
This module provides public classes that implement the :class:`onnx_ir.TensorProtocol`
|
|
6
|
+
interface for various tensor types from popular deep learning frameworks.
|
|
7
|
+
|
|
8
|
+
You can use these classes to create tensors and use them in the IR graph like any other tensor.
|
|
9
|
+
|
|
10
|
+
Example::
|
|
11
|
+
import torch
|
|
12
|
+
import onnx_ir as ir
|
|
13
|
+
|
|
14
|
+
# Create a PyTorch tensor
|
|
15
|
+
torch_tensor = torch.tensor([1, 2, 3])
|
|
16
|
+
|
|
17
|
+
# Wrap the PyTorch tensor in a TorchTensor object
|
|
18
|
+
ir_tensor = ir.tensor_adapters.TorchTensor(torch_tensor)
|
|
19
|
+
|
|
20
|
+
# Use the IR tensor in the graph
|
|
21
|
+
attr = ir.AttrTensor("x", ir_tensor)
|
|
22
|
+
print(attr)
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
# pylint: disable=import-outside-toplevel
|
|
26
|
+
|
|
27
|
+
# NOTE: DO NOT import any framework-specific modules here in the global namespace.
|
|
28
|
+
|
|
29
|
+
from __future__ import annotations
|
|
30
|
+
|
|
31
|
+
__all__ = [
|
|
32
|
+
"from_torch_dtype",
|
|
33
|
+
"to_torch_dtype",
|
|
34
|
+
"TorchTensor",
|
|
35
|
+
]
|
|
36
|
+
|
|
37
|
+
import ctypes
|
|
38
|
+
from typing import TYPE_CHECKING, Any
|
|
39
|
+
|
|
40
|
+
import numpy.typing as npt
|
|
41
|
+
|
|
42
|
+
import onnx_ir as ir
|
|
43
|
+
from onnx_ir import _core
|
|
44
|
+
|
|
45
|
+
if TYPE_CHECKING:
|
|
46
|
+
import torch
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
_TORCH_DTYPE_TO_ONNX: dict[torch.dtype, ir.DataType] | None = None
|
|
50
|
+
_ONNX_DTYPE_TO_TORCH: dict[ir.DataType, torch.dtype] | None = None
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def from_torch_dtype(dtype: torch.dtype) -> ir.DataType:
|
|
54
|
+
"""Convert a PyTorch dtype to an ONNX IR DataType."""
|
|
55
|
+
global _TORCH_DTYPE_TO_ONNX
|
|
56
|
+
if _TORCH_DTYPE_TO_ONNX is None:
|
|
57
|
+
import torch
|
|
58
|
+
|
|
59
|
+
_TORCH_DTYPE_TO_ONNX = {
|
|
60
|
+
torch.bfloat16: ir.DataType.BFLOAT16,
|
|
61
|
+
torch.bool: ir.DataType.BOOL,
|
|
62
|
+
torch.complex128: ir.DataType.COMPLEX128,
|
|
63
|
+
torch.complex64: ir.DataType.COMPLEX64,
|
|
64
|
+
torch.float16: ir.DataType.FLOAT16,
|
|
65
|
+
torch.float32: ir.DataType.FLOAT,
|
|
66
|
+
torch.float64: ir.DataType.DOUBLE,
|
|
67
|
+
torch.float8_e4m3fn: ir.DataType.FLOAT8E4M3FN,
|
|
68
|
+
torch.float8_e4m3fnuz: ir.DataType.FLOAT8E4M3FNUZ,
|
|
69
|
+
torch.float8_e5m2: ir.DataType.FLOAT8E5M2,
|
|
70
|
+
torch.float8_e5m2fnuz: ir.DataType.FLOAT8E5M2FNUZ,
|
|
71
|
+
torch.int16: ir.DataType.INT16,
|
|
72
|
+
torch.int32: ir.DataType.INT32,
|
|
73
|
+
torch.int64: ir.DataType.INT64,
|
|
74
|
+
torch.int8: ir.DataType.INT8,
|
|
75
|
+
torch.uint8: ir.DataType.UINT8,
|
|
76
|
+
torch.uint16: ir.DataType.UINT16,
|
|
77
|
+
torch.uint32: ir.DataType.UINT32,
|
|
78
|
+
torch.uint64: ir.DataType.UINT64,
|
|
79
|
+
}
|
|
80
|
+
if hasattr(torch, "float8_e8m0fnu"):
|
|
81
|
+
# torch.float8_e8m0fnu is available in PyTorch 2.7+
|
|
82
|
+
_TORCH_DTYPE_TO_ONNX[torch.float8_e8m0fnu] = ir.DataType.FLOAT8E8M0
|
|
83
|
+
if hasattr(torch, "int2"):
|
|
84
|
+
_TORCH_DTYPE_TO_ONNX[torch.int2] = ir.DataType.INT2
|
|
85
|
+
if hasattr(torch, "uint2"):
|
|
86
|
+
_TORCH_DTYPE_TO_ONNX[torch.uint2] = ir.DataType.UINT2
|
|
87
|
+
|
|
88
|
+
if dtype not in _TORCH_DTYPE_TO_ONNX:
|
|
89
|
+
raise TypeError(
|
|
90
|
+
f"Unsupported PyTorch dtype '{dtype}'. "
|
|
91
|
+
"Please use a supported dtype from the list: "
|
|
92
|
+
f"{list(_TORCH_DTYPE_TO_ONNX.keys())}"
|
|
93
|
+
)
|
|
94
|
+
return _TORCH_DTYPE_TO_ONNX[dtype]
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def to_torch_dtype(dtype: ir.DataType) -> torch.dtype:
|
|
98
|
+
"""Convert an ONNX IR DataType to a PyTorch dtype."""
|
|
99
|
+
global _ONNX_DTYPE_TO_TORCH
|
|
100
|
+
if _ONNX_DTYPE_TO_TORCH is None:
|
|
101
|
+
import torch
|
|
102
|
+
|
|
103
|
+
_ONNX_DTYPE_TO_TORCH = {
|
|
104
|
+
ir.DataType.BFLOAT16: torch.bfloat16,
|
|
105
|
+
ir.DataType.BOOL: torch.bool,
|
|
106
|
+
ir.DataType.COMPLEX128: torch.complex128,
|
|
107
|
+
ir.DataType.COMPLEX64: torch.complex64,
|
|
108
|
+
ir.DataType.FLOAT16: torch.float16,
|
|
109
|
+
ir.DataType.FLOAT: torch.float32,
|
|
110
|
+
ir.DataType.DOUBLE: torch.float64,
|
|
111
|
+
ir.DataType.FLOAT8E4M3FN: torch.float8_e4m3fn,
|
|
112
|
+
ir.DataType.FLOAT8E4M3FNUZ: torch.float8_e4m3fnuz,
|
|
113
|
+
ir.DataType.FLOAT8E5M2: torch.float8_e5m2,
|
|
114
|
+
ir.DataType.FLOAT8E5M2FNUZ: torch.float8_e5m2fnuz,
|
|
115
|
+
ir.DataType.INT16: torch.int16,
|
|
116
|
+
ir.DataType.INT32: torch.int32,
|
|
117
|
+
ir.DataType.INT64: torch.int64,
|
|
118
|
+
ir.DataType.INT8: torch.int8,
|
|
119
|
+
ir.DataType.UINT8: torch.uint8,
|
|
120
|
+
ir.DataType.UINT16: torch.uint16,
|
|
121
|
+
ir.DataType.UINT32: torch.uint32,
|
|
122
|
+
ir.DataType.UINT64: torch.uint64,
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
if hasattr(torch, "float8_e8m0fnu"):
|
|
126
|
+
# torch.float8_e8m0fnu is available in PyTorch 2.7+
|
|
127
|
+
_ONNX_DTYPE_TO_TORCH[ir.DataType.FLOAT8E8M0] = torch.float8_e8m0fnu
|
|
128
|
+
if hasattr(torch, "int2"):
|
|
129
|
+
_ONNX_DTYPE_TO_TORCH[ir.DataType.INT2] = torch.int2
|
|
130
|
+
if hasattr(torch, "uint2"):
|
|
131
|
+
_ONNX_DTYPE_TO_TORCH[ir.DataType.UINT2] = torch.uint2
|
|
132
|
+
|
|
133
|
+
if dtype not in _ONNX_DTYPE_TO_TORCH:
|
|
134
|
+
if dtype == ir.DataType.FLOAT8E8M0:
|
|
135
|
+
raise ValueError(
|
|
136
|
+
"The requested DataType 'FLOAT8E8M0' is only supported in PyTorch 2.7+. "
|
|
137
|
+
"Please upgrade your PyTorch version to use this dtype."
|
|
138
|
+
)
|
|
139
|
+
raise TypeError(
|
|
140
|
+
f"Unsupported conversion from ONNX dtype '{dtype}' to torch. "
|
|
141
|
+
"Please use a supported dtype from the list: "
|
|
142
|
+
f"{list(_ONNX_DTYPE_TO_TORCH.keys())}"
|
|
143
|
+
)
|
|
144
|
+
return _ONNX_DTYPE_TO_TORCH[dtype]
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
class TorchTensor(_core.Tensor):
|
|
148
|
+
def __init__(
|
|
149
|
+
self, tensor: torch.Tensor, name: str | None = None, doc_string: str | None = None
|
|
150
|
+
):
|
|
151
|
+
# Pass the tensor as the raw data to ir.Tensor's constructor
|
|
152
|
+
super().__init__(
|
|
153
|
+
tensor, dtype=from_torch_dtype(tensor.dtype), name=name, doc_string=doc_string
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
def numpy(self) -> npt.NDArray:
|
|
157
|
+
import torch
|
|
158
|
+
|
|
159
|
+
self.raw: torch.Tensor
|
|
160
|
+
if self.dtype == ir.DataType.BFLOAT16:
|
|
161
|
+
return self.raw.view(torch.uint16).numpy(force=True).view(self.dtype.numpy())
|
|
162
|
+
if self.dtype in {
|
|
163
|
+
ir.DataType.FLOAT8E4M3FN,
|
|
164
|
+
ir.DataType.FLOAT8E4M3FNUZ,
|
|
165
|
+
ir.DataType.FLOAT8E5M2,
|
|
166
|
+
ir.DataType.FLOAT8E5M2FNUZ,
|
|
167
|
+
ir.DataType.FLOAT8E8M0,
|
|
168
|
+
}:
|
|
169
|
+
return self.raw.view(torch.uint8).numpy(force=True).view(self.dtype.numpy())
|
|
170
|
+
if self.dtype in {ir.DataType.INT2, ir.DataType.UINT2}:
|
|
171
|
+
return self.raw.view(torch.uint8).numpy(force=True).view(self.dtype.numpy())
|
|
172
|
+
|
|
173
|
+
return self.raw.numpy(force=True)
|
|
174
|
+
|
|
175
|
+
def __array__(self, dtype: Any = None, copy: bool | None = None) -> npt.NDArray:
|
|
176
|
+
del copy # Unused, but needed for the signature
|
|
177
|
+
if dtype is None:
|
|
178
|
+
return self.numpy()
|
|
179
|
+
return self.numpy().__array__(dtype)
|
|
180
|
+
|
|
181
|
+
def _get_cbytes(self):
|
|
182
|
+
"""Get a ctypes byte array pointing to the tensor data."""
|
|
183
|
+
import torch._subclasses.fake_tensor
|
|
184
|
+
|
|
185
|
+
with torch._subclasses.fake_tensor.unset_fake_temporarily(): # pylint: disable=protected-access
|
|
186
|
+
# Disable any fake mode so calling detach() etc. will return a real tensor
|
|
187
|
+
tensor = self.raw.detach().cpu().contiguous()
|
|
188
|
+
|
|
189
|
+
if isinstance(tensor, torch._subclasses.fake_tensor.FakeTensor): # pylint: disable=protected-access
|
|
190
|
+
raise TypeError(
|
|
191
|
+
f"Cannot take content out from the FakeTensor ('{self.name}'). Please replace the tensor "
|
|
192
|
+
"with a tensor backed by real data using ONNXProgram.apply_weights() "
|
|
193
|
+
"or save the model without initializers by setting include_initializers=False."
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
# Return the tensor to ensure it is not garbage collected while the ctypes array is in use
|
|
197
|
+
return tensor, (ctypes.c_ubyte * tensor.element_size() * tensor.numel()).from_address(
|
|
198
|
+
tensor.data_ptr()
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
def tobytes(self) -> bytes:
|
|
202
|
+
# Implement tobytes to support native PyTorch types so we can use types like bloat16
|
|
203
|
+
# Reading from memory directly is also more efficient because
|
|
204
|
+
# it avoids copying to a NumPy array
|
|
205
|
+
_, data = self._get_cbytes()
|
|
206
|
+
return bytes(data)
|
|
207
|
+
|
|
208
|
+
def tofile(self, file) -> None:
|
|
209
|
+
_, data = self._get_cbytes()
|
|
210
|
+
return file.write(data)
|
onnx_ir/testing.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
# Copyright (c) ONNX Project Contributors
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
"""Utilities for testing."""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"assert_onnx_proto_equal",
|
|
9
|
+
]
|
|
10
|
+
|
|
11
|
+
import difflib
|
|
12
|
+
import math
|
|
13
|
+
from collections.abc import Collection, Sequence
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
import google.protobuf.message
|
|
17
|
+
import onnx
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _opset_import_key(opset_import: onnx.OperatorSetIdProto) -> tuple[str, int]:
|
|
21
|
+
return (opset_import.domain, opset_import.version)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _value_info_key(value_info: onnx.ValueInfoProto) -> str:
|
|
25
|
+
return value_info.name
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _function_key(function: onnx.FunctionProto) -> tuple[str, str, str]:
|
|
29
|
+
return (function.domain, function.name, getattr(function, "overload", ""))
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _find_duplicates(with_duplicates: Collection[Any]) -> list[Any]:
|
|
33
|
+
"""Return a list of duplicated elements in a collection."""
|
|
34
|
+
seen = set()
|
|
35
|
+
duplicates = []
|
|
36
|
+
for x in with_duplicates:
|
|
37
|
+
if x in seen:
|
|
38
|
+
duplicates.append(x)
|
|
39
|
+
seen.add(x)
|
|
40
|
+
return duplicates
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def assert_onnx_proto_equal(
|
|
44
|
+
actual: google.protobuf.message.Message | Any,
|
|
45
|
+
expected: google.protobuf.message.Message | Any,
|
|
46
|
+
ignore_initializer_value_proto: bool = False,
|
|
47
|
+
) -> None:
|
|
48
|
+
"""Assert that two ONNX protos are equal.
|
|
49
|
+
|
|
50
|
+
Equality is defined as having the same fields with the same values. When
|
|
51
|
+
a field takes the default value, it is considered equal to the field
|
|
52
|
+
not being set.
|
|
53
|
+
|
|
54
|
+
Sequential fields with name `opset_import`, `value_info`, and `functions` are
|
|
55
|
+
compared disregarding the order of their elements.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
actual: The first ONNX proto.
|
|
59
|
+
expected: The second ONNX proto.
|
|
60
|
+
ignore_initializer_value_proto: Ignore value protos for initializers if there
|
|
61
|
+
are extra ones in the actual proto.
|
|
62
|
+
"""
|
|
63
|
+
assert type(actual) is type(expected), (
|
|
64
|
+
f"Type not equal: {type(actual)} != {type(expected)}"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
a_fields = {field.name: value for field, value in actual.ListFields()}
|
|
68
|
+
b_fields = {field.name: value for field, value in expected.ListFields()}
|
|
69
|
+
all_fields = sorted(set(a_fields.keys()) | set(b_fields.keys()))
|
|
70
|
+
if isinstance(actual, onnx.GraphProto) and isinstance(expected, onnx.GraphProto):
|
|
71
|
+
actual_initializer_names = {i.name for i in actual.initializer}
|
|
72
|
+
expected_initializer_names = {i.name for i in expected.initializer}
|
|
73
|
+
else:
|
|
74
|
+
actual_initializer_names = set()
|
|
75
|
+
expected_initializer_names = set()
|
|
76
|
+
|
|
77
|
+
# Record and report all errors
|
|
78
|
+
errors = []
|
|
79
|
+
for field in all_fields: # pylint: disable=too-many-nested-blocks
|
|
80
|
+
# Obtain the default value if the field is not set. This way we can compare the two fields.
|
|
81
|
+
a_value = getattr(actual, field)
|
|
82
|
+
b_value = getattr(expected, field)
|
|
83
|
+
if (
|
|
84
|
+
isinstance(a_value, Sequence)
|
|
85
|
+
and isinstance(b_value, Sequence)
|
|
86
|
+
and not isinstance(a_value, (str, bytes))
|
|
87
|
+
and not isinstance(b_value, (str, bytes))
|
|
88
|
+
):
|
|
89
|
+
# Check length first
|
|
90
|
+
a_keys: list[Any] = []
|
|
91
|
+
b_keys: list[Any] = []
|
|
92
|
+
if field == "opset_import":
|
|
93
|
+
a_value = sorted(a_value, key=_opset_import_key)
|
|
94
|
+
b_value = sorted(b_value, key=_opset_import_key)
|
|
95
|
+
a_keys = [_opset_import_key(opset_import) for opset_import in a_value]
|
|
96
|
+
b_keys = [_opset_import_key(opset_import) for opset_import in b_value]
|
|
97
|
+
elif field == "value_info":
|
|
98
|
+
if (
|
|
99
|
+
ignore_initializer_value_proto
|
|
100
|
+
and isinstance(actual, onnx.GraphProto)
|
|
101
|
+
and isinstance(expected, onnx.GraphProto)
|
|
102
|
+
):
|
|
103
|
+
# Filter out initializers from the value_info list
|
|
104
|
+
a_value = [
|
|
105
|
+
value_info
|
|
106
|
+
for value_info in a_value
|
|
107
|
+
if value_info.name not in actual_initializer_names
|
|
108
|
+
]
|
|
109
|
+
b_value = [
|
|
110
|
+
value_info
|
|
111
|
+
for value_info in b_value
|
|
112
|
+
if value_info.name not in expected_initializer_names
|
|
113
|
+
]
|
|
114
|
+
a_value = sorted(a_value, key=_value_info_key)
|
|
115
|
+
b_value = sorted(b_value, key=_value_info_key)
|
|
116
|
+
a_keys = [_value_info_key(value_info) for value_info in a_value]
|
|
117
|
+
b_keys = [_value_info_key(value_info) for value_info in b_value]
|
|
118
|
+
elif field == "functions":
|
|
119
|
+
a_value = sorted(a_value, key=_function_key)
|
|
120
|
+
b_value = sorted(b_value, key=_function_key)
|
|
121
|
+
a_keys = [_function_key(functions) for functions in a_value]
|
|
122
|
+
b_keys = [_function_key(functions) for functions in b_value]
|
|
123
|
+
|
|
124
|
+
if a_keys != b_keys:
|
|
125
|
+
keys_only_in_actual = set(a_keys) - set(b_keys)
|
|
126
|
+
keys_only_in_expected = set(b_keys) - set(a_keys)
|
|
127
|
+
error_message = (
|
|
128
|
+
f"Field {field} not equal: keys_only_in_actual={keys_only_in_actual}, keys_only_in_expected={keys_only_in_expected}. "
|
|
129
|
+
f"Field type: {type(a_value)}. "
|
|
130
|
+
f"Duplicated a_keys: {_find_duplicates(a_keys)}, duplicated b_keys: {_find_duplicates(b_keys)}"
|
|
131
|
+
)
|
|
132
|
+
errors.append(error_message)
|
|
133
|
+
elif len(a_value) != len(b_value):
|
|
134
|
+
error_message = (
|
|
135
|
+
f"Field {field} not equal: len(a)={len(a_value)}, len(b)={len(b_value)} "
|
|
136
|
+
f"Field type: {type(a_value)}"
|
|
137
|
+
)
|
|
138
|
+
errors.append(error_message)
|
|
139
|
+
else:
|
|
140
|
+
# Check every element
|
|
141
|
+
for i in range(len(a_value)): # pylint: disable=consider-using-enumerate
|
|
142
|
+
actual_value_i = a_value[i]
|
|
143
|
+
expected_value_i = b_value[i]
|
|
144
|
+
if isinstance(
|
|
145
|
+
actual_value_i, google.protobuf.message.Message
|
|
146
|
+
) and isinstance(expected_value_i, google.protobuf.message.Message):
|
|
147
|
+
try:
|
|
148
|
+
assert_onnx_proto_equal(
|
|
149
|
+
actual_value_i,
|
|
150
|
+
expected_value_i,
|
|
151
|
+
ignore_initializer_value_proto=ignore_initializer_value_proto,
|
|
152
|
+
)
|
|
153
|
+
except AssertionError as e:
|
|
154
|
+
error_message = f"Field {field} index {i} in sequence not equal. type(actual_value_i): {type(actual_value_i)}, type(expected_value_i): {type(expected_value_i)}, actual_value_i: {actual_value_i}, expected_value_i: {expected_value_i}"
|
|
155
|
+
error_message = (
|
|
156
|
+
str(e) + "\n\nCaused by the above error\n\n" + error_message
|
|
157
|
+
)
|
|
158
|
+
errors.append(error_message)
|
|
159
|
+
elif actual_value_i != expected_value_i:
|
|
160
|
+
if (
|
|
161
|
+
isinstance(actual_value_i, float)
|
|
162
|
+
and isinstance(expected_value_i, float)
|
|
163
|
+
and math.isnan(actual_value_i)
|
|
164
|
+
and math.isnan(expected_value_i)
|
|
165
|
+
):
|
|
166
|
+
# Consider NaNs equal
|
|
167
|
+
continue
|
|
168
|
+
error_message = f"Field {field} index {i} in sequence not equal. type(actual_value_i): {type(actual_value_i)}, type(expected_value_i): {type(expected_value_i)}"
|
|
169
|
+
for line in difflib.ndiff(
|
|
170
|
+
str(actual_value_i).splitlines(),
|
|
171
|
+
str(expected_value_i).splitlines(),
|
|
172
|
+
):
|
|
173
|
+
error_message += "\n" + line
|
|
174
|
+
errors.append(error_message)
|
|
175
|
+
elif isinstance(a_value, google.protobuf.message.Message) and isinstance(
|
|
176
|
+
b_value, google.protobuf.message.Message
|
|
177
|
+
):
|
|
178
|
+
assert_onnx_proto_equal(
|
|
179
|
+
a_value, b_value, ignore_initializer_value_proto=ignore_initializer_value_proto
|
|
180
|
+
)
|
|
181
|
+
elif a_value != b_value:
|
|
182
|
+
if (
|
|
183
|
+
isinstance(a_value, float)
|
|
184
|
+
and isinstance(b_value, float)
|
|
185
|
+
and math.isnan(a_value)
|
|
186
|
+
and math.isnan(b_value)
|
|
187
|
+
):
|
|
188
|
+
# Consider NaNs equal
|
|
189
|
+
continue
|
|
190
|
+
error_message = (
|
|
191
|
+
f"Field {field} not equal. field_actual: {a_value}, field_expected: {b_value}"
|
|
192
|
+
)
|
|
193
|
+
errors.append(error_message)
|
|
194
|
+
if errors:
|
|
195
|
+
raise AssertionError(
|
|
196
|
+
f"Protos not equal: {type(actual)} != {type(expected)}\n" + "\n".join(errors)
|
|
197
|
+
)
|
onnx_ir/traversal.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
# Copyright (c) ONNX Project Contributors
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
"""Utilities for traversing the IR graph."""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"RecursiveGraphIterator",
|
|
9
|
+
]
|
|
10
|
+
|
|
11
|
+
from collections.abc import Iterator, Reversible
|
|
12
|
+
from typing import Callable, Union
|
|
13
|
+
|
|
14
|
+
from typing_extensions import Self
|
|
15
|
+
|
|
16
|
+
from onnx_ir import _core, _enums
|
|
17
|
+
|
|
18
|
+
GraphLike = Union[_core.Graph, _core.Function, _core.GraphView]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RecursiveGraphIterator(Iterator[_core.Node], Reversible[_core.Node]):
|
|
22
|
+
def __init__(
|
|
23
|
+
self,
|
|
24
|
+
graph_like: GraphLike,
|
|
25
|
+
*,
|
|
26
|
+
recursive: Callable[[_core.Node], bool] | None = None,
|
|
27
|
+
reverse: bool = False,
|
|
28
|
+
enter_graph: Callable[[GraphLike], None] | None = None,
|
|
29
|
+
exit_graph: Callable[[GraphLike], None] | None = None,
|
|
30
|
+
):
|
|
31
|
+
"""Iterate over the nodes in the graph, recursively visiting subgraphs.
|
|
32
|
+
|
|
33
|
+
This iterator allows for traversing the nodes of a graph and its subgraphs
|
|
34
|
+
in a depth-first manner. It supports optional callbacks for entering and exiting
|
|
35
|
+
subgraphs, as well as a callback `recursive` to determine whether to visit subgraphs
|
|
36
|
+
contained within nodes.
|
|
37
|
+
|
|
38
|
+
.. versionadded:: 0.1.6
|
|
39
|
+
Added the `enter_graph` and `exit_graph` callbacks.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
graph_like: The graph to traverse.
|
|
43
|
+
recursive: A callback that determines whether to recursively visit the subgraphs
|
|
44
|
+
contained in a node. If not provided, all nodes in subgraphs are visited.
|
|
45
|
+
reverse: Whether to iterate in reverse order.
|
|
46
|
+
enter_graph: An optional callback that is called when entering a subgraph.
|
|
47
|
+
exit_graph: An optional callback that is called when exiting a subgraph.
|
|
48
|
+
"""
|
|
49
|
+
self._graph = graph_like
|
|
50
|
+
self._recursive = recursive
|
|
51
|
+
self._reverse = reverse
|
|
52
|
+
self._iterator = self._recursive_node_iter(graph_like)
|
|
53
|
+
self._enter_graph = enter_graph
|
|
54
|
+
self._exit_graph = exit_graph
|
|
55
|
+
|
|
56
|
+
def __iter__(self) -> Self:
|
|
57
|
+
self._iterator = self._recursive_node_iter(self._graph)
|
|
58
|
+
return self
|
|
59
|
+
|
|
60
|
+
def __next__(self) -> _core.Node:
|
|
61
|
+
return next(self._iterator)
|
|
62
|
+
|
|
63
|
+
def _recursive_node_iter(
|
|
64
|
+
self, graph: _core.Graph | _core.Function | _core.GraphView
|
|
65
|
+
) -> Iterator[_core.Node]:
|
|
66
|
+
iterable = reversed(graph) if self._reverse else graph
|
|
67
|
+
|
|
68
|
+
if self._enter_graph is not None:
|
|
69
|
+
self._enter_graph(graph)
|
|
70
|
+
|
|
71
|
+
for node in iterable: # type: ignore[union-attr]
|
|
72
|
+
yield node
|
|
73
|
+
if self._recursive is not None and not self._recursive(node):
|
|
74
|
+
continue
|
|
75
|
+
yield from self._iterate_subgraphs(node)
|
|
76
|
+
|
|
77
|
+
if self._exit_graph is not None:
|
|
78
|
+
self._exit_graph(graph)
|
|
79
|
+
|
|
80
|
+
def _iterate_subgraphs(self, node: _core.Node):
|
|
81
|
+
for attr in node.attributes.values():
|
|
82
|
+
if not isinstance(attr, _core.Attr):
|
|
83
|
+
continue
|
|
84
|
+
if attr.type == _enums.AttributeType.GRAPH:
|
|
85
|
+
if self._enter_graph is not None:
|
|
86
|
+
self._enter_graph(attr.value)
|
|
87
|
+
yield from RecursiveGraphIterator(
|
|
88
|
+
attr.value,
|
|
89
|
+
recursive=self._recursive,
|
|
90
|
+
reverse=self._reverse,
|
|
91
|
+
enter_graph=self._enter_graph,
|
|
92
|
+
exit_graph=self._exit_graph,
|
|
93
|
+
)
|
|
94
|
+
if self._exit_graph is not None:
|
|
95
|
+
self._exit_graph(attr.value)
|
|
96
|
+
elif attr.type == _enums.AttributeType.GRAPHS:
|
|
97
|
+
graphs = reversed(attr.value) if self._reverse else attr.value
|
|
98
|
+
for graph in graphs:
|
|
99
|
+
if self._enter_graph is not None:
|
|
100
|
+
self._enter_graph(graph)
|
|
101
|
+
yield from RecursiveGraphIterator(
|
|
102
|
+
graph,
|
|
103
|
+
recursive=self._recursive,
|
|
104
|
+
reverse=self._reverse,
|
|
105
|
+
enter_graph=self._enter_graph,
|
|
106
|
+
exit_graph=self._exit_graph,
|
|
107
|
+
)
|
|
108
|
+
if self._exit_graph is not None:
|
|
109
|
+
self._exit_graph(graph)
|
|
110
|
+
|
|
111
|
+
def __reversed__(self) -> Iterator[_core.Node]:
|
|
112
|
+
return RecursiveGraphIterator(
|
|
113
|
+
self._graph,
|
|
114
|
+
recursive=self._recursive,
|
|
115
|
+
reverse=not self._reverse,
|
|
116
|
+
enter_graph=self._enter_graph,
|
|
117
|
+
exit_graph=self._exit_graph,
|
|
118
|
+
)
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: onnx-ir
|
|
3
|
+
Version: 0.1.15
|
|
4
|
+
Summary: Efficient in-memory representation for ONNX
|
|
5
|
+
Author-email: ONNX Contributors <onnx-technical-discuss@lists.lfaidata.foundation>
|
|
6
|
+
License-Expression: Apache-2.0
|
|
7
|
+
Project-URL: Homepage, https://onnx.ai/ir-py
|
|
8
|
+
Project-URL: Issues, https://github.com/onnx/ir-py/issues
|
|
9
|
+
Project-URL: Repository, https://github.com/onnx/ir-py
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Requires-Python: >=3.9
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
License-File: LICENSE
|
|
14
|
+
Requires-Dist: numpy
|
|
15
|
+
Requires-Dist: onnx>=1.16
|
|
16
|
+
Requires-Dist: typing_extensions>=4.10
|
|
17
|
+
Requires-Dist: ml_dtypes>=0.5.0
|
|
18
|
+
Dynamic: license-file
|
|
19
|
+
|
|
20
|
+
# <img src="docs/_static/logo-light.png" alt="ONNX IR" width="250"/>
|
|
21
|
+
|
|
22
|
+
[](https://pypi.org/project/onnx-ir)
|
|
23
|
+
[](https://github.com/astral-sh/ruff)
|
|
24
|
+
[](https://codecov.io/gh/onnx/ir-py)
|
|
25
|
+
[](https://pepy.tech/projects/onnx-ir)
|
|
26
|
+
|
|
27
|
+
An in-memory IR that supports the full ONNX spec, designed for graph construction, analysis and transformation.
|
|
28
|
+
|
|
29
|
+
## Getting Started
|
|
30
|
+
|
|
31
|
+
[onnx-ir documentation](https://onnx.ai/ir-py/)
|
|
32
|
+
|
|
33
|
+
### Installation
|
|
34
|
+
|
|
35
|
+
Via pip:
|
|
36
|
+
|
|
37
|
+
```
|
|
38
|
+
pip install onnx-ir
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
Or from source:
|
|
42
|
+
|
|
43
|
+
```
|
|
44
|
+
pip install git+https://github.com/onnx/ir-py.git
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Features ✨
|
|
48
|
+
|
|
49
|
+
- Full ONNX spec support: all valid models representable by ONNX protobuf, and a subset of invalid models (so you can load and fix them).
|
|
50
|
+
- Low memory footprint: mmap'ed external tensors; unified interface for ONNX TensorProto, Numpy arrays and PyTorch Tensors etc. No tensor size limitation. Zero copies.
|
|
51
|
+
- Straightforward access patterns: Access value information and traverse the graph topology at ease.
|
|
52
|
+
- Robust mutation: Create as many iterators as you like on the graph while mutating it.
|
|
53
|
+
- Speed: Performant graph manipulation, serialization/deserialization to Protobuf.
|
|
54
|
+
- Pythonic and familiar APIs: Classes define Pythonic apis and still map to ONNX protobuf concepts in an intuitive way.
|
|
55
|
+
- No protobuf dependency: The IR does not require protobuf once the model is converted to the IR representation, decoupling from the serialization format.
|
|
56
|
+
|
|
57
|
+
## Concept Diagram
|
|
58
|
+
|
|
59
|
+

|
|
60
|
+
|
|
61
|
+
## Code Organization 🗺️
|
|
62
|
+
|
|
63
|
+
- [`_protocols.py`](src/onnx_ir/_protocols.py): Interfaces defined for all entities in the IR.
|
|
64
|
+
- [`_core.py`](src/onnx_ir/_core.py): Implementation of the core entities in the IR, including `Model`, `Graph`, `Node`, `Value`, and others.
|
|
65
|
+
- [`_enums.py`](src/onnx_ir/_enums.py): Definition of the type enums that correspond to the `DataType` and `AttributeType` in `onnx.proto`.
|
|
66
|
+
- [`_name_authority.py`](src/onnx_ir/_name_authority.py): The authority for giving names to entities in the graph, used internally.
|
|
67
|
+
- [`_linked_list.py`](src/onnx_ir/_linked_list.py): The data structure as the node container in the graph that supports robust iteration and mutation. Internal.
|
|
68
|
+
- [`_metadata.py`](src/onnx_ir/_metadata.py): Metadata store for all entities in the IR.
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
onnx_ir/__init__.py,sha256=KRCf_XITzIbeZi5MvpUkjEA6QtMbEzFgw145-WbhRAM,3588
|
|
2
|
+
onnx_ir/_cloner.py,sha256=b1tUeg3K3yfAk7LE2IIAR0TlKU-mZw3vE9hpfumGKXg,9718
|
|
3
|
+
onnx_ir/_core.py,sha256=a19zUiqEFhc355RroJiiyw4u2J8vclkfb3mn7DjcL6g,164263
|
|
4
|
+
onnx_ir/_display.py,sha256=eVnikQJ2xAi5JjM6JPaHlYCMSOyna5-2FYgZWvsusNI,1420
|
|
5
|
+
onnx_ir/_enums.py,sha256=Pv7jaLzSjIX95MjCRUjo1_SHbOkUVEkm-rubacrEU7E,14372
|
|
6
|
+
onnx_ir/_graph_comparison.py,sha256=8_D1gu547eCDotEUqxfIJhUGU_Ufhfji7sfsSraOj3g,727
|
|
7
|
+
onnx_ir/_graph_containers.py,sha256=PRKrshRZ5rzWCgRs1TefzJq9n8wyo7OqeKy3XxMhyys,14265
|
|
8
|
+
onnx_ir/_io.py,sha256=dA2_mJCxvBBS_0cK6YW_vuuaCuxXnsZdqKNmYx2q_gM,5205
|
|
9
|
+
onnx_ir/_linked_list.py,sha256=PXVcbHLMXHLZ6DxZnElnJLWfhBPvYcXUxM8Y3K4J7lM,10592
|
|
10
|
+
onnx_ir/_metadata.py,sha256=lzmCaYy4kAJrPW-PSGKF4a78LisxF0hiofySNQ3Mwhg,1544
|
|
11
|
+
onnx_ir/_name_authority.py,sha256=PnoV9TRgMLussZNufWavJXosDWx5avPfldVjMWEEz18,3036
|
|
12
|
+
onnx_ir/_polyfill.py,sha256=LzAGBKQbVDlURC0tgQgaxgkYU4rESgCYnqVs-u-Vsx8,887
|
|
13
|
+
onnx_ir/_protocols.py,sha256=PHJtdhATDNwnfocIpvQUNM2dPn1sxfGbU1JPhFDvoIA,21667
|
|
14
|
+
onnx_ir/_tape.py,sha256=kVTejszu7ljRm3qACbgVNJL4YXMSpiq1Vu49VBRggYU,8148
|
|
15
|
+
onnx_ir/_type_casting.py,sha256=GgGcAV6f2mzwHFcHCI9Xd-TBYwxAhcKt3U5PCP9VXOU,3195
|
|
16
|
+
onnx_ir/_version_utils.py,sha256=bZThuE7meVHFOY1DLsmss9WshVIp9iig7udGfDbVaK4,1333
|
|
17
|
+
onnx_ir/convenience.py,sha256=ZK-m9LNcDWRvBl9ebHfL5C1bpmTu9z61MGYJ4brbews,984
|
|
18
|
+
onnx_ir/external_data.py,sha256=uIrcz1iqrUADx1MJFhw4zh-QHgF31rC4kz5i_4AfG94,19013
|
|
19
|
+
onnx_ir/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
20
|
+
onnx_ir/serde.py,sha256=PVefkYfyNjgF2z8rRE1SfO8645aAHUHDDKiIRheTTVk,81423
|
|
21
|
+
onnx_ir/tape.py,sha256=4FyfAHmVhQoMsfHMYnBwP2azi6UF6b6pj--ercObqZs,350
|
|
22
|
+
onnx_ir/tensor_adapters.py,sha256=KPZba59Jd17lKsypptUpu-Q8SzhoxDy4eOMKJBDtZUc,8123
|
|
23
|
+
onnx_ir/testing.py,sha256=WTrjf2joWizDWaYMJlV1KjZMQw7YmZ8NvuBTVn1uY6s,8803
|
|
24
|
+
onnx_ir/traversal.py,sha256=Wy4XphwuapAvm94-5iaz6G8LjIoMFpY7qfPfXzYViEE,4488
|
|
25
|
+
onnx_ir/_convenience/__init__.py,sha256=h6U_F20fM6YjBsWfdj00tvWnzhmKlIuJ5jORQww-RR0,21268
|
|
26
|
+
onnx_ir/_convenience/_constructors.py,sha256=HqCGtNPMzFFEerC7I5VEyMdBuIdOJDucn9UEdwuymcg,11519
|
|
27
|
+
onnx_ir/_convenience/_extractor.py,sha256=QsaSqnhFT1nSwLMKEXI7iPJN5sI_S7rwazuYwoqP0P8,7452
|
|
28
|
+
onnx_ir/_safetensors/__init__.py,sha256=f6d0xtgTIK9v9JKPuovQ4eR14kowFbHJT-ItGJu2FyU,18808
|
|
29
|
+
onnx_ir/_thirdparty/asciichartpy.py,sha256=afQ0fsqko2uYRPAR4TZBrQxvCb4eN8lxZ2yDFbVQq_s,10533
|
|
30
|
+
onnx_ir/analysis/__init__.py,sha256=Wk21u0f9rYCFTh-WBIbCUXRPZtK_Y_BGh7nmjOygTJk,503
|
|
31
|
+
onnx_ir/analysis/_implicit_usage.py,sha256=03nEZieao_xAhlfR5Y6qlYd61cfumV8MoteLX34MOfM,2575
|
|
32
|
+
onnx_ir/passes/__init__.py,sha256=IHMsZowNOmu18OyMt-fc-2tCGMZBAJq_BRLKW3sauaw,804
|
|
33
|
+
onnx_ir/passes/_pass_infra.py,sha256=6x8ku-LbKKjZ4v_SXy2PkSvC5zT-rDE-KqOr1iD_cU0,11399
|
|
34
|
+
onnx_ir/passes/common/__init__.py,sha256=c2Jswbg-bgMav-abEhdAS2YAmqJXQKtwB2MFawbT_IQ,1919
|
|
35
|
+
onnx_ir/passes/common/_c_api_utils.py,sha256=g6riA6xNGVWaO5YjVHZ0krrfslWHmRlryRkwB8X56cg,2907
|
|
36
|
+
onnx_ir/passes/common/clear_metadata_and_docstring.py,sha256=YwouLfsNFSaTuGd7uMOGjdvVwG9yHQTkSphUgDlM0ME,2365
|
|
37
|
+
onnx_ir/passes/common/common_subexpression_elimination.py,sha256=p5hZjWyswn8qm91M6BqlqMUBu78ohxaw72Ky12zrPZ0,7949
|
|
38
|
+
onnx_ir/passes/common/constant_manipulation.py,sha256=qKC--Mr8IzrXPmr6xKJKOVO54M9wh8Ruho423LQ1mfU,9375
|
|
39
|
+
onnx_ir/passes/common/default_attributes.py,sha256=LI-cZagYoW5n32ywz8LpMQv2gcDiRUnpQ2V5kBzwPfc,3470
|
|
40
|
+
onnx_ir/passes/common/identity_elimination.py,sha256=90HYaq4QYUilHMyRoN_qhwWt0bQpqm5Z69KNU_7z_As,4376
|
|
41
|
+
onnx_ir/passes/common/initializer_deduplication.py,sha256=2OK6h6cLp2VmfT5VUxsknsRXr1fgbY2w5npn0hV1cdE,7221
|
|
42
|
+
onnx_ir/passes/common/inliner.py,sha256=z7Yq1yGr_KmsrpPGkJCqjh-mXBgcXE_0YrH04XPNqNM,9373
|
|
43
|
+
onnx_ir/passes/common/naming.py,sha256=0SgYItShUIFlZLmaMw0aYfGMEthd1PaQMMrqFYNK26s,10769
|
|
44
|
+
onnx_ir/passes/common/onnx_checker.py,sha256=_sPmJ2ff9pDB1g9q7082BL6fyubomRaj6svE0cCyDew,1691
|
|
45
|
+
onnx_ir/passes/common/output_fix.py,sha256=B5jQjJhtvPR5SSAcNLAg9IaNu_sXRdshRWl7ds_R-44,5304
|
|
46
|
+
onnx_ir/passes/common/shape_inference.py,sha256=LVdvxjeKtcIEbPcb6mKisxoPJOOawzsm3tzk5j9xqeM,3992
|
|
47
|
+
onnx_ir/passes/common/topological_sort.py,sha256=LqZ2ELD7RoXRGSrvo5kgbgQMdatjt9hHGBek9XIEfy4,1151
|
|
48
|
+
onnx_ir/passes/common/unused_removal.py,sha256=BarXGpukPF4qlWdm_K7KfK9gQD9p5cFG5XNeo1wl5W8,8208
|
|
49
|
+
onnx_ir-0.1.15.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
|
50
|
+
onnx_ir-0.1.15.dist-info/METADATA,sha256=6w6ljZycHmEH1sM5T-uLKGZNMSN0e98eW4jXGaqXwak,3238
|
|
51
|
+
onnx_ir-0.1.15.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
|
|
52
|
+
onnx_ir-0.1.15.dist-info/top_level.txt,sha256=W5tROO93YjO0XRxIdjMy4wocp-5st5GiI2ukvW7UhDo,8
|
|
53
|
+
onnx_ir-0.1.15.dist-info/RECORD,,
|