onnxslim 0.1.77__tar.gz → 0.1.79__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {onnxslim-0.1.77/onnxslim.egg-info → onnxslim-0.1.79}/PKG-INFO +76 -15
- {onnxslim-0.1.77 → onnxslim-0.1.79}/README.md +75 -14
- onnxslim-0.1.79/VERSION +1 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/exporters/onnx_exporter.py +13 -5
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/ir/graph.py +117 -25
- onnxslim-0.1.79/onnxslim/version.py +1 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79/onnxslim.egg-info}/PKG-INFO +76 -15
- onnxslim-0.1.77/VERSION +0 -1
- onnxslim-0.1.77/onnxslim/version.py +0 -1
- {onnxslim-0.1.77 → onnxslim-0.1.79}/LICENSE +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/MANIFEST.in +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/__main__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/argparser.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/cli/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/cli/_main.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/optimization/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/optimization/dead_node_elimination.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/optimization/subexpression_elimination.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/optimization/weight_tying.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/elimination/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/elimination/concat.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/elimination/reshape.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/elimination/reshape_as.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/elimination/slice.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/elimination/unsqueeze.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/fusion/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/fusion/concat_reshape.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/fusion/convadd.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/fusion/convbn.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/fusion/convmul.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/fusion/gelu.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/fusion/gemm.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/fusion/padconv.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/fusion/reduce.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/core/pattern/registry.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/misc/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/misc/tabulate.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/_sympy/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/_sympy/functions.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/_sympy/numbers.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/_sympy/printers.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/_sympy/solve.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/_sympy/symbol.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/exporters/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/exporters/base_exporter.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/graph_pattern/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/graph_pattern/graph_pattern.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/importers/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/importers/base_importer.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/importers/onnx_importer.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/ir/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/ir/function.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/ir/node.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/ir/tensor.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/logger/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/logger/logger.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/util/__init__.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/util/exception.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/util/misc.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/symbolic_shape_infer.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/utils.py +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim.egg-info/SOURCES.txt +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim.egg-info/dependency_links.txt +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim.egg-info/entry_points.txt +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim.egg-info/requires.txt +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim.egg-info/top_level.txt +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim.egg-info/zip-safe +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/pyproject.toml +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/setup.cfg +0 -0
- {onnxslim-0.1.77 → onnxslim-0.1.79}/setup.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: onnxslim
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.79
|
|
4
4
|
Summary: OnnxSlim: A Toolkit to Help Optimize Onnx Model
|
|
5
5
|
Home-page: https://github.com/inisis/OnnxSlim
|
|
6
6
|
Author: inisis
|
|
@@ -61,7 +61,9 @@ Dynamic: summary
|
|
|
61
61
|
|
|
62
62
|
OnnxSlim can help you slim your onnx model, with less operators, but same accuracy, better inference speed.
|
|
63
63
|
|
|
64
|
-
- 🚀 2025/
|
|
64
|
+
- 🚀 2025/11/29: Top 1% on PyPI
|
|
65
|
+
- 🚀 2025/11/27: OnnxSlim is merged into [NVIDIA TensorRT-Model-Optimizer](https://github.com/NVIDIA/TensorRT-Model-Optimizer) 🤗🤗🤗
|
|
66
|
+
- 🚀 2025/05/17: OnnxSlim is merged into [HuggingFace optimum](https://github.com/huggingface/optimum) 🤗🤗🤗
|
|
65
67
|
- 🚀 2025/04/30: Rank 1st in the [AICAS 2025 LLM inference optimization challenge](https://tianchi.aliyun.com/competition/entrance/532289/customize588)
|
|
66
68
|
- 🚀 2025/01/28: Achieved 1M downloads
|
|
67
69
|
- 🚀 2024/06/23: OnnxSlim is merged into [transformers.js](https://github.com/huggingface/transformers.js) 🤗🤗🤗
|
|
@@ -119,19 +121,78 @@ For more usage, see onnxslim -h or refer to our [examples](./examples)
|
|
|
119
121
|
|
|
120
122
|
# Projects using OnnxSlim
|
|
121
123
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
124
|
+
<table style="width:100%; border-collapse:separate; border-spacing:10px;">
|
|
125
|
+
<tr>
|
|
126
|
+
<td style="vertical-align:middle;">
|
|
127
|
+
<img src="https://avatars.githubusercontent.com/u/1728152?s=200&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
128
|
+
<a href="https://github.com/NVIDIA/TensorRT-Model-Optimizer" target="_blank">NVIDIA/TensorRT-Model-Optimizer</a>
|
|
129
|
+
</td>
|
|
130
|
+
<td style="vertical-align:middle;">
|
|
131
|
+
<img src="https://avatars.githubusercontent.com/u/1961952?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
132
|
+
<a href="https://github.com/alibaba/MNN" target="_blank">alibaba/MNN</a>
|
|
133
|
+
</td>
|
|
134
|
+
</tr>
|
|
135
|
+
<tr>
|
|
136
|
+
<td style="vertical-align:middle;">
|
|
137
|
+
<img src="https://avatars.githubusercontent.com/u/26833451?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
138
|
+
<a href="https://github.com/ultralytics/ultralytics" target="_blank">ultralytics/ultralytics</a>
|
|
139
|
+
</td>
|
|
140
|
+
<td style="vertical-align:middle;">
|
|
141
|
+
<img src="https://avatars.githubusercontent.com/u/131524?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
142
|
+
<a href="https://github.com/mozilla/smart_autofill" target="_blank">Mozilla/smart_autofill</a>
|
|
143
|
+
</td>
|
|
144
|
+
</tr>
|
|
145
|
+
<tr>
|
|
146
|
+
<td style="vertical-align:middle;">
|
|
147
|
+
<img src="https://avatars.githubusercontent.com/u/1961952?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
148
|
+
<a href="https://github.com/wangzhaode/mnn-llm" target="_blank">alibaba/MNN-LLM</a>
|
|
149
|
+
</td>
|
|
150
|
+
<td style="vertical-align:middle;">
|
|
151
|
+
<img src="https://avatars.githubusercontent.com/u/25720743?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
152
|
+
<a href="https://github.com/huggingface/transformers.js" target="_blank">huggingface/transformers.js</a>
|
|
153
|
+
</td>
|
|
154
|
+
</tr>
|
|
155
|
+
<tr>
|
|
156
|
+
<td style="vertical-align:middle;">
|
|
157
|
+
<img src="https://avatars.githubusercontent.com/u/25720743?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
158
|
+
<a href="https://github.com/huggingface/optimum" target="_blank">huggingface/optimum</a>
|
|
159
|
+
</td>
|
|
160
|
+
<td style="vertical-align:middle;">
|
|
161
|
+
<img src="https://avatars.githubusercontent.com/u/23534030?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
162
|
+
<a href="https://github.com/PaddlePaddle/PaddleOCR" target="_blank">PaddlePaddle/PaddleOCR</a>
|
|
163
|
+
</td>
|
|
164
|
+
</tr>
|
|
165
|
+
<tr>
|
|
166
|
+
<td style="vertical-align:middle;">
|
|
167
|
+
<img src="https://avatars.githubusercontent.com/u/109945100?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
168
|
+
<a href="https://github.com/modelscope/FunASR" target="_blank">ModelScope/FunASR</a>
|
|
169
|
+
</td>
|
|
170
|
+
<td style="vertical-align:middle;">
|
|
171
|
+
<img src="https://avatars.githubusercontent.com/u/111754012?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
172
|
+
<a href="https://github.com/CVCUDA/CV-CUDA" target="_blank">CVCUDA/CV-CUDA</a>
|
|
173
|
+
</td>
|
|
174
|
+
</tr>
|
|
175
|
+
<tr>
|
|
176
|
+
<td style="vertical-align:middle;">
|
|
177
|
+
<img src="https://avatars.githubusercontent.com/u/86091366?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
178
|
+
<a href="https://github.com/THU-MIG/yolov10" target="_blank">THU-MIG/yolov10</a>
|
|
179
|
+
</td>
|
|
180
|
+
<td style="vertical-align:middle;">
|
|
181
|
+
<img src="https://avatars.githubusercontent.com/u/48153283?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
182
|
+
<a href="https://github.com/sunsmarterjie/yolov12" target="_blank">sunsmarterjie/yolov12</a>
|
|
183
|
+
</td>
|
|
184
|
+
</tr>
|
|
185
|
+
<tr>
|
|
186
|
+
<td style="vertical-align:middle;">
|
|
187
|
+
<img src="https://avatars.githubusercontent.com/u/147458884?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
188
|
+
<a href="https://github.com/nndeploy/nndeploy" target="_blank">nndeploy/nndeploy</a>
|
|
189
|
+
</td>
|
|
190
|
+
<td style="vertical-align:middle;">
|
|
191
|
+
<img src="https://avatars.githubusercontent.com/u/126587470?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
192
|
+
<a href="https://github.com/deepghs/imgutils" target="_blank">deepghs/imgutils</a>
|
|
193
|
+
</td>
|
|
194
|
+
</tr>
|
|
195
|
+
</table>
|
|
135
196
|
|
|
136
197
|
# References
|
|
137
198
|
|
|
@@ -27,7 +27,9 @@
|
|
|
27
27
|
|
|
28
28
|
OnnxSlim can help you slim your onnx model, with less operators, but same accuracy, better inference speed.
|
|
29
29
|
|
|
30
|
-
- 🚀 2025/
|
|
30
|
+
- 🚀 2025/11/29: Top 1% on PyPI
|
|
31
|
+
- 🚀 2025/11/27: OnnxSlim is merged into [NVIDIA TensorRT-Model-Optimizer](https://github.com/NVIDIA/TensorRT-Model-Optimizer) 🤗🤗🤗
|
|
32
|
+
- 🚀 2025/05/17: OnnxSlim is merged into [HuggingFace optimum](https://github.com/huggingface/optimum) 🤗🤗🤗
|
|
31
33
|
- 🚀 2025/04/30: Rank 1st in the [AICAS 2025 LLM inference optimization challenge](https://tianchi.aliyun.com/competition/entrance/532289/customize588)
|
|
32
34
|
- 🚀 2025/01/28: Achieved 1M downloads
|
|
33
35
|
- 🚀 2024/06/23: OnnxSlim is merged into [transformers.js](https://github.com/huggingface/transformers.js) 🤗🤗🤗
|
|
@@ -85,19 +87,78 @@ For more usage, see onnxslim -h or refer to our [examples](./examples)
|
|
|
85
87
|
|
|
86
88
|
# Projects using OnnxSlim
|
|
87
89
|
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
90
|
+
<table style="width:100%; border-collapse:separate; border-spacing:10px;">
|
|
91
|
+
<tr>
|
|
92
|
+
<td style="vertical-align:middle;">
|
|
93
|
+
<img src="https://avatars.githubusercontent.com/u/1728152?s=200&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
94
|
+
<a href="https://github.com/NVIDIA/TensorRT-Model-Optimizer" target="_blank">NVIDIA/TensorRT-Model-Optimizer</a>
|
|
95
|
+
</td>
|
|
96
|
+
<td style="vertical-align:middle;">
|
|
97
|
+
<img src="https://avatars.githubusercontent.com/u/1961952?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
98
|
+
<a href="https://github.com/alibaba/MNN" target="_blank">alibaba/MNN</a>
|
|
99
|
+
</td>
|
|
100
|
+
</tr>
|
|
101
|
+
<tr>
|
|
102
|
+
<td style="vertical-align:middle;">
|
|
103
|
+
<img src="https://avatars.githubusercontent.com/u/26833451?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
104
|
+
<a href="https://github.com/ultralytics/ultralytics" target="_blank">ultralytics/ultralytics</a>
|
|
105
|
+
</td>
|
|
106
|
+
<td style="vertical-align:middle;">
|
|
107
|
+
<img src="https://avatars.githubusercontent.com/u/131524?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
108
|
+
<a href="https://github.com/mozilla/smart_autofill" target="_blank">Mozilla/smart_autofill</a>
|
|
109
|
+
</td>
|
|
110
|
+
</tr>
|
|
111
|
+
<tr>
|
|
112
|
+
<td style="vertical-align:middle;">
|
|
113
|
+
<img src="https://avatars.githubusercontent.com/u/1961952?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
114
|
+
<a href="https://github.com/wangzhaode/mnn-llm" target="_blank">alibaba/MNN-LLM</a>
|
|
115
|
+
</td>
|
|
116
|
+
<td style="vertical-align:middle;">
|
|
117
|
+
<img src="https://avatars.githubusercontent.com/u/25720743?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
118
|
+
<a href="https://github.com/huggingface/transformers.js" target="_blank">huggingface/transformers.js</a>
|
|
119
|
+
</td>
|
|
120
|
+
</tr>
|
|
121
|
+
<tr>
|
|
122
|
+
<td style="vertical-align:middle;">
|
|
123
|
+
<img src="https://avatars.githubusercontent.com/u/25720743?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
124
|
+
<a href="https://github.com/huggingface/optimum" target="_blank">huggingface/optimum</a>
|
|
125
|
+
</td>
|
|
126
|
+
<td style="vertical-align:middle;">
|
|
127
|
+
<img src="https://avatars.githubusercontent.com/u/23534030?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
128
|
+
<a href="https://github.com/PaddlePaddle/PaddleOCR" target="_blank">PaddlePaddle/PaddleOCR</a>
|
|
129
|
+
</td>
|
|
130
|
+
</tr>
|
|
131
|
+
<tr>
|
|
132
|
+
<td style="vertical-align:middle;">
|
|
133
|
+
<img src="https://avatars.githubusercontent.com/u/109945100?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
134
|
+
<a href="https://github.com/modelscope/FunASR" target="_blank">ModelScope/FunASR</a>
|
|
135
|
+
</td>
|
|
136
|
+
<td style="vertical-align:middle;">
|
|
137
|
+
<img src="https://avatars.githubusercontent.com/u/111754012?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
138
|
+
<a href="https://github.com/CVCUDA/CV-CUDA" target="_blank">CVCUDA/CV-CUDA</a>
|
|
139
|
+
</td>
|
|
140
|
+
</tr>
|
|
141
|
+
<tr>
|
|
142
|
+
<td style="vertical-align:middle;">
|
|
143
|
+
<img src="https://avatars.githubusercontent.com/u/86091366?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
144
|
+
<a href="https://github.com/THU-MIG/yolov10" target="_blank">THU-MIG/yolov10</a>
|
|
145
|
+
</td>
|
|
146
|
+
<td style="vertical-align:middle;">
|
|
147
|
+
<img src="https://avatars.githubusercontent.com/u/48153283?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
148
|
+
<a href="https://github.com/sunsmarterjie/yolov12" target="_blank">sunsmarterjie/yolov12</a>
|
|
149
|
+
</td>
|
|
150
|
+
</tr>
|
|
151
|
+
<tr>
|
|
152
|
+
<td style="vertical-align:middle;">
|
|
153
|
+
<img src="https://avatars.githubusercontent.com/u/147458884?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
154
|
+
<a href="https://github.com/nndeploy/nndeploy" target="_blank">nndeploy/nndeploy</a>
|
|
155
|
+
</td>
|
|
156
|
+
<td style="vertical-align:middle;">
|
|
157
|
+
<img src="https://avatars.githubusercontent.com/u/126587470?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
158
|
+
<a href="https://github.com/deepghs/imgutils" target="_blank">deepghs/imgutils</a>
|
|
159
|
+
</td>
|
|
160
|
+
</tr>
|
|
161
|
+
</table>
|
|
101
162
|
|
|
102
163
|
# References
|
|
103
164
|
|
onnxslim-0.1.79/VERSION
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
0.1.79
|
|
@@ -21,7 +21,6 @@ from collections.abc import Sequence
|
|
|
21
21
|
|
|
22
22
|
import numpy as np
|
|
23
23
|
import onnx
|
|
24
|
-
import onnx.numpy_helper
|
|
25
24
|
from onnx import IR_VERSION, ModelProto, defs
|
|
26
25
|
|
|
27
26
|
from onnxslim.third_party.onnx_graphsurgeon.exporters.base_exporter import BaseExporter
|
|
@@ -35,9 +34,11 @@ from onnxslim.third_party.onnx_graphsurgeon.ir.tensor import (
|
|
|
35
34
|
Tensor,
|
|
36
35
|
Variable,
|
|
37
36
|
)
|
|
37
|
+
|
|
38
38
|
from onnxslim.third_party.onnx_graphsurgeon.logger import G_LOGGER
|
|
39
39
|
from onnxslim.third_party.onnx_graphsurgeon.util import misc
|
|
40
40
|
|
|
41
|
+
from ml_dtypes import bfloat16, float8_e4m3fn
|
|
41
42
|
|
|
42
43
|
def dtype_to_onnx(dtype: np.dtype | onnx.TensorProto.DataType) -> int:
|
|
43
44
|
"""Converts a numpy dtype or ONNX data type to its integer representation."""
|
|
@@ -86,6 +87,15 @@ def update_import_domains(graph):
|
|
|
86
87
|
return graph.import_domains
|
|
87
88
|
|
|
88
89
|
|
|
90
|
+
def float32_to_bfloat16_uint16(x):
|
|
91
|
+
"""Convert a float32 value to bfloat16 represented as uint16."""
|
|
92
|
+
return bfloat16(x).view(np.uint16)
|
|
93
|
+
|
|
94
|
+
def float32_to_float8e4m3(x):
|
|
95
|
+
"""Convert a float32 value to float8e4m3 represented as uint8."""
|
|
96
|
+
return float8_e4m3fn(x).view(np.uint8)
|
|
97
|
+
|
|
98
|
+
|
|
89
99
|
class NumpyArrayConverter:
|
|
90
100
|
def __init__(self, container, scalar_converter):
|
|
91
101
|
self.func = np.vectorize(scalar_converter, otypes=[container])
|
|
@@ -95,12 +105,10 @@ class NumpyArrayConverter:
|
|
|
95
105
|
|
|
96
106
|
|
|
97
107
|
_NUMPY_ARRAY_CONVERTERS = {
|
|
98
|
-
onnx.TensorProto.BFLOAT16: NumpyArrayConverter(np.uint16,
|
|
108
|
+
onnx.TensorProto.BFLOAT16: NumpyArrayConverter(np.uint16, float32_to_bfloat16_uint16),
|
|
99
109
|
# FP8 in TensorRT supports negative zeros, no infinities
|
|
100
110
|
# See https://onnx.ai/onnx/technical/float8.html#papers
|
|
101
|
-
onnx.TensorProto.FLOAT8E4M3FN: NumpyArrayConverter(
|
|
102
|
-
np.uint8, lambda x: onnx.helper.float32_to_float8e4m3(x, fn=True, uz=False)
|
|
103
|
-
),
|
|
111
|
+
onnx.TensorProto.FLOAT8E4M3FN: NumpyArrayConverter(np.uint8, float32_to_float8e4m3),
|
|
104
112
|
}
|
|
105
113
|
|
|
106
114
|
|
|
@@ -978,17 +978,46 @@ class Graph:
|
|
|
978
978
|
return next(iter(tensor.values)) if tensor.shape else tensor.values
|
|
979
979
|
|
|
980
980
|
def fold_shape(tensor):
|
|
981
|
-
"""Returns the input tensor shape if available, otherwise returns None.
|
|
982
|
-
|
|
981
|
+
"""Returns the input tensor shape if available, otherwise returns None.
|
|
982
|
+
Handles Shape node with optional 'start' and 'end' attributes (opset 15+).
|
|
983
|
+
"""
|
|
984
|
+
shape_node = get_producer(tensor, "Shape")
|
|
985
|
+
inp = get_input(shape_node)
|
|
983
986
|
if inp is None:
|
|
984
987
|
return None
|
|
985
988
|
|
|
986
989
|
if inp.shape is None or misc.is_dynamic_shape(inp.shape):
|
|
987
990
|
return None
|
|
988
|
-
|
|
991
|
+
|
|
992
|
+
full_shape = inp.shape
|
|
993
|
+
num_dims = len(full_shape)
|
|
994
|
+
|
|
995
|
+
# Get start and end attributes (default: start=0, end=None means full shape)
|
|
996
|
+
start = shape_node.attrs.get("start", 0)
|
|
997
|
+
end = shape_node.attrs.get("end", None)
|
|
998
|
+
|
|
999
|
+
# Handle negative indices
|
|
1000
|
+
if start < 0:
|
|
1001
|
+
start = num_dims + start
|
|
1002
|
+
if end is None:
|
|
1003
|
+
end = num_dims
|
|
1004
|
+
elif end < 0:
|
|
1005
|
+
end = num_dims + end
|
|
1006
|
+
|
|
1007
|
+
# Clamp to valid range
|
|
1008
|
+
start = max(0, min(start, num_dims))
|
|
1009
|
+
end = max(0, min(end, num_dims))
|
|
1010
|
+
|
|
1011
|
+
if start > end:
|
|
1012
|
+
return None
|
|
1013
|
+
|
|
1014
|
+
target_shape = full_shape[start:end]
|
|
1015
|
+
return np.array(target_shape, dtype=np.int64)
|
|
989
1016
|
|
|
990
1017
|
def fold_shape_gather(tensor):
|
|
991
|
-
"""Retrieves and returns the shape of the input tensor as a NumPy array, otherwise returns None.
|
|
1018
|
+
"""Retrieves and returns the shape of the input tensor as a NumPy array, otherwise returns None.
|
|
1019
|
+
Handles Shape node with optional 'start' and 'end' attributes (opset 15+).
|
|
1020
|
+
"""
|
|
992
1021
|
gather = get_producer(tensor, "Gather")
|
|
993
1022
|
if gather is None:
|
|
994
1023
|
return None
|
|
@@ -996,69 +1025,132 @@ class Graph:
|
|
|
996
1025
|
data = gather.inputs[0]
|
|
997
1026
|
indices_tensor = gather.inputs[1]
|
|
998
1027
|
|
|
999
|
-
|
|
1028
|
+
shape_node = get_producer(data, "Shape")
|
|
1029
|
+
inp = get_input(shape_node)
|
|
1000
1030
|
if inp is None or inp.shape is None:
|
|
1001
1031
|
return None
|
|
1002
1032
|
|
|
1003
1033
|
if not isinstance(indices_tensor, Constant):
|
|
1004
1034
|
return None
|
|
1005
1035
|
|
|
1036
|
+
# Get the shape slice from Shape node (considering start/end attributes)
|
|
1037
|
+
full_shape = inp.shape
|
|
1038
|
+
num_dims = len(full_shape)
|
|
1039
|
+
|
|
1040
|
+
start = shape_node.attrs.get("start", 0)
|
|
1041
|
+
end = shape_node.attrs.get("end", None)
|
|
1042
|
+
|
|
1043
|
+
if start < 0:
|
|
1044
|
+
start = num_dims + start
|
|
1045
|
+
if end is None:
|
|
1046
|
+
end = num_dims
|
|
1047
|
+
elif end < 0:
|
|
1048
|
+
end = num_dims + end
|
|
1049
|
+
|
|
1050
|
+
start = max(0, min(start, num_dims))
|
|
1051
|
+
end = max(0, min(end, num_dims))
|
|
1052
|
+
|
|
1053
|
+
if start > end:
|
|
1054
|
+
return None
|
|
1055
|
+
|
|
1056
|
+
shape_slice = full_shape[start:end]
|
|
1057
|
+
|
|
1006
1058
|
indices = indices_tensor.values
|
|
1007
1059
|
if not indices.shape: # Scalar-case
|
|
1008
|
-
|
|
1060
|
+
idx = int(indices)
|
|
1061
|
+
# Handle negative indices relative to shape_slice
|
|
1062
|
+
if idx < 0:
|
|
1063
|
+
idx = len(shape_slice) + idx
|
|
1064
|
+
if idx < 0 or idx >= len(shape_slice):
|
|
1065
|
+
return None
|
|
1066
|
+
shape = shape_slice[idx]
|
|
1009
1067
|
if misc.is_dynamic_dimension(shape):
|
|
1010
1068
|
return None
|
|
1011
1069
|
else:
|
|
1012
|
-
shape = [
|
|
1070
|
+
shape = []
|
|
1071
|
+
for index in indices:
|
|
1072
|
+
idx = int(index)
|
|
1073
|
+
# Handle negative indices relative to shape_slice
|
|
1074
|
+
if idx < 0:
|
|
1075
|
+
idx = len(shape_slice) + idx
|
|
1076
|
+
if idx < 0 or idx >= len(shape_slice):
|
|
1077
|
+
return None
|
|
1078
|
+
shape.append(shape_slice[idx])
|
|
1013
1079
|
if misc.is_dynamic_shape(shape):
|
|
1014
1080
|
return None
|
|
1015
1081
|
|
|
1016
1082
|
return np.array(shape, dtype=np.int64)
|
|
1017
1083
|
|
|
1018
1084
|
def fold_shape_slice(tensor):
|
|
1019
|
-
"""Fold tensor shape slice information into a NumPy array of int64 type.
|
|
1020
|
-
|
|
1021
|
-
|
|
1085
|
+
"""Fold tensor shape slice information into a NumPy array of int64 type.
|
|
1086
|
+
Handles Shape node with optional 'start' and 'end' attributes (opset 15+).
|
|
1087
|
+
"""
|
|
1088
|
+
slice_node = get_producer(tensor, "Slice")
|
|
1089
|
+
if slice_node is None:
|
|
1022
1090
|
return None
|
|
1023
1091
|
|
|
1024
|
-
data =
|
|
1092
|
+
data = slice_node.inputs[0]
|
|
1025
1093
|
|
|
1026
|
-
if len(
|
|
1027
|
-
starts, ends =
|
|
1094
|
+
if len(slice_node.inputs) >= 3:
|
|
1095
|
+
starts, ends = slice_node.inputs[1:3]
|
|
1028
1096
|
if any(not isinstance(t, Constant) for t in [starts, ends]):
|
|
1029
1097
|
return None
|
|
1030
1098
|
starts, ends = get_scalar_value(starts), get_scalar_value(ends)
|
|
1031
|
-
elif "starts" in
|
|
1032
|
-
starts, ends =
|
|
1099
|
+
elif "starts" in slice_node.attrs and "ends" in slice_node.attrs:
|
|
1100
|
+
starts, ends = slice_node.attrs["starts"][0], slice_node.attrs["ends"][0]
|
|
1033
1101
|
else:
|
|
1034
1102
|
return None
|
|
1035
1103
|
|
|
1036
|
-
|
|
1104
|
+
shape_node = get_producer(data, "Shape")
|
|
1105
|
+
inp = get_input(shape_node)
|
|
1037
1106
|
if inp is None or inp.shape is None:
|
|
1038
1107
|
return None
|
|
1039
1108
|
|
|
1040
1109
|
# For shape tensors, we can only slice on the 0th dimension.
|
|
1041
|
-
if len(
|
|
1042
|
-
axes =
|
|
1110
|
+
if len(slice_node.inputs) > 3:
|
|
1111
|
+
axes = slice_node.inputs[3]
|
|
1043
1112
|
if not isinstance(axes, Constant):
|
|
1044
1113
|
return None
|
|
1045
1114
|
|
|
1046
1115
|
if get_scalar_value(axes) != 0:
|
|
1047
1116
|
return None
|
|
1048
|
-
elif "axes" in
|
|
1049
|
-
if
|
|
1117
|
+
elif "axes" in slice_node.attrs:
|
|
1118
|
+
if slice_node.attrs["axes"][0] != 0:
|
|
1050
1119
|
return None
|
|
1051
1120
|
|
|
1052
1121
|
steps = 1
|
|
1053
|
-
if len(
|
|
1054
|
-
steps =
|
|
1122
|
+
if len(slice_node.inputs) > 4:
|
|
1123
|
+
steps = slice_node.inputs[4]
|
|
1055
1124
|
if not isinstance(steps, Constant):
|
|
1056
1125
|
return None
|
|
1057
1126
|
steps = get_scalar_value(steps)
|
|
1058
|
-
elif "steps" in
|
|
1059
|
-
steps =
|
|
1127
|
+
elif "steps" in slice_node.attrs:
|
|
1128
|
+
steps = slice_node.attrs["steps"][0]
|
|
1129
|
+
|
|
1130
|
+
# Get the shape slice from Shape node (considering start/end attributes)
|
|
1131
|
+
full_shape = inp.shape
|
|
1132
|
+
num_dims = len(full_shape)
|
|
1133
|
+
|
|
1134
|
+
shape_start = shape_node.attrs.get("start", 0)
|
|
1135
|
+
shape_end = shape_node.attrs.get("end", None)
|
|
1136
|
+
|
|
1137
|
+
if shape_start < 0:
|
|
1138
|
+
shape_start = num_dims + shape_start
|
|
1139
|
+
if shape_end is None:
|
|
1140
|
+
shape_end = num_dims
|
|
1141
|
+
elif shape_end < 0:
|
|
1142
|
+
shape_end = num_dims + shape_end
|
|
1143
|
+
|
|
1144
|
+
shape_start = max(0, min(shape_start, num_dims))
|
|
1145
|
+
shape_end = max(0, min(shape_end, num_dims))
|
|
1146
|
+
|
|
1147
|
+
if shape_start > shape_end:
|
|
1148
|
+
return None
|
|
1149
|
+
|
|
1150
|
+
shape_slice = full_shape[shape_start:shape_end]
|
|
1060
1151
|
|
|
1061
|
-
|
|
1152
|
+
# Apply the Slice operation on the shape_slice
|
|
1153
|
+
shape = shape_slice[starts:ends:steps]
|
|
1062
1154
|
if misc.is_dynamic_shape(shape):
|
|
1063
1155
|
return None
|
|
1064
1156
|
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.79"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: onnxslim
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.79
|
|
4
4
|
Summary: OnnxSlim: A Toolkit to Help Optimize Onnx Model
|
|
5
5
|
Home-page: https://github.com/inisis/OnnxSlim
|
|
6
6
|
Author: inisis
|
|
@@ -61,7 +61,9 @@ Dynamic: summary
|
|
|
61
61
|
|
|
62
62
|
OnnxSlim can help you slim your onnx model, with less operators, but same accuracy, better inference speed.
|
|
63
63
|
|
|
64
|
-
- 🚀 2025/
|
|
64
|
+
- 🚀 2025/11/29: Top 1% on PyPI
|
|
65
|
+
- 🚀 2025/11/27: OnnxSlim is merged into [NVIDIA TensorRT-Model-Optimizer](https://github.com/NVIDIA/TensorRT-Model-Optimizer) 🤗🤗🤗
|
|
66
|
+
- 🚀 2025/05/17: OnnxSlim is merged into [HuggingFace optimum](https://github.com/huggingface/optimum) 🤗🤗🤗
|
|
65
67
|
- 🚀 2025/04/30: Rank 1st in the [AICAS 2025 LLM inference optimization challenge](https://tianchi.aliyun.com/competition/entrance/532289/customize588)
|
|
66
68
|
- 🚀 2025/01/28: Achieved 1M downloads
|
|
67
69
|
- 🚀 2024/06/23: OnnxSlim is merged into [transformers.js](https://github.com/huggingface/transformers.js) 🤗🤗🤗
|
|
@@ -119,19 +121,78 @@ For more usage, see onnxslim -h or refer to our [examples](./examples)
|
|
|
119
121
|
|
|
120
122
|
# Projects using OnnxSlim
|
|
121
123
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
124
|
+
<table style="width:100%; border-collapse:separate; border-spacing:10px;">
|
|
125
|
+
<tr>
|
|
126
|
+
<td style="vertical-align:middle;">
|
|
127
|
+
<img src="https://avatars.githubusercontent.com/u/1728152?s=200&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
128
|
+
<a href="https://github.com/NVIDIA/TensorRT-Model-Optimizer" target="_blank">NVIDIA/TensorRT-Model-Optimizer</a>
|
|
129
|
+
</td>
|
|
130
|
+
<td style="vertical-align:middle;">
|
|
131
|
+
<img src="https://avatars.githubusercontent.com/u/1961952?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
132
|
+
<a href="https://github.com/alibaba/MNN" target="_blank">alibaba/MNN</a>
|
|
133
|
+
</td>
|
|
134
|
+
</tr>
|
|
135
|
+
<tr>
|
|
136
|
+
<td style="vertical-align:middle;">
|
|
137
|
+
<img src="https://avatars.githubusercontent.com/u/26833451?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
138
|
+
<a href="https://github.com/ultralytics/ultralytics" target="_blank">ultralytics/ultralytics</a>
|
|
139
|
+
</td>
|
|
140
|
+
<td style="vertical-align:middle;">
|
|
141
|
+
<img src="https://avatars.githubusercontent.com/u/131524?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
142
|
+
<a href="https://github.com/mozilla/smart_autofill" target="_blank">Mozilla/smart_autofill</a>
|
|
143
|
+
</td>
|
|
144
|
+
</tr>
|
|
145
|
+
<tr>
|
|
146
|
+
<td style="vertical-align:middle;">
|
|
147
|
+
<img src="https://avatars.githubusercontent.com/u/1961952?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
148
|
+
<a href="https://github.com/wangzhaode/mnn-llm" target="_blank">alibaba/MNN-LLM</a>
|
|
149
|
+
</td>
|
|
150
|
+
<td style="vertical-align:middle;">
|
|
151
|
+
<img src="https://avatars.githubusercontent.com/u/25720743?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
152
|
+
<a href="https://github.com/huggingface/transformers.js" target="_blank">huggingface/transformers.js</a>
|
|
153
|
+
</td>
|
|
154
|
+
</tr>
|
|
155
|
+
<tr>
|
|
156
|
+
<td style="vertical-align:middle;">
|
|
157
|
+
<img src="https://avatars.githubusercontent.com/u/25720743?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
158
|
+
<a href="https://github.com/huggingface/optimum" target="_blank">huggingface/optimum</a>
|
|
159
|
+
</td>
|
|
160
|
+
<td style="vertical-align:middle;">
|
|
161
|
+
<img src="https://avatars.githubusercontent.com/u/23534030?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
162
|
+
<a href="https://github.com/PaddlePaddle/PaddleOCR" target="_blank">PaddlePaddle/PaddleOCR</a>
|
|
163
|
+
</td>
|
|
164
|
+
</tr>
|
|
165
|
+
<tr>
|
|
166
|
+
<td style="vertical-align:middle;">
|
|
167
|
+
<img src="https://avatars.githubusercontent.com/u/109945100?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
168
|
+
<a href="https://github.com/modelscope/FunASR" target="_blank">ModelScope/FunASR</a>
|
|
169
|
+
</td>
|
|
170
|
+
<td style="vertical-align:middle;">
|
|
171
|
+
<img src="https://avatars.githubusercontent.com/u/111754012?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
172
|
+
<a href="https://github.com/CVCUDA/CV-CUDA" target="_blank">CVCUDA/CV-CUDA</a>
|
|
173
|
+
</td>
|
|
174
|
+
</tr>
|
|
175
|
+
<tr>
|
|
176
|
+
<td style="vertical-align:middle;">
|
|
177
|
+
<img src="https://avatars.githubusercontent.com/u/86091366?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
178
|
+
<a href="https://github.com/THU-MIG/yolov10" target="_blank">THU-MIG/yolov10</a>
|
|
179
|
+
</td>
|
|
180
|
+
<td style="vertical-align:middle;">
|
|
181
|
+
<img src="https://avatars.githubusercontent.com/u/48153283?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
182
|
+
<a href="https://github.com/sunsmarterjie/yolov12" target="_blank">sunsmarterjie/yolov12</a>
|
|
183
|
+
</td>
|
|
184
|
+
</tr>
|
|
185
|
+
<tr>
|
|
186
|
+
<td style="vertical-align:middle;">
|
|
187
|
+
<img src="https://avatars.githubusercontent.com/u/147458884?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
188
|
+
<a href="https://github.com/nndeploy/nndeploy" target="_blank">nndeploy/nndeploy</a>
|
|
189
|
+
</td>
|
|
190
|
+
<td style="vertical-align:middle;">
|
|
191
|
+
<img src="https://avatars.githubusercontent.com/u/126587470?s=48&v=4" width="22" height="22" style="vertical-align:middle; margin-right:8px;"/>
|
|
192
|
+
<a href="https://github.com/deepghs/imgutils" target="_blank">deepghs/imgutils</a>
|
|
193
|
+
</td>
|
|
194
|
+
</tr>
|
|
195
|
+
</table>
|
|
135
196
|
|
|
136
197
|
# References
|
|
137
198
|
|
onnxslim-0.1.77/VERSION
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
0.1.77
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.1.77"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/exporters/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/graph_pattern/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/importers/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/logger/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{onnxslim-0.1.77 → onnxslim-0.1.79}/onnxslim/third_party/onnx_graphsurgeon/util/exception.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|