py2edg 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- py2edg-0.1.0/LICENSE +21 -0
- py2edg-0.1.0/PKG-INFO +286 -0
- py2edg-0.1.0/README.md +237 -0
- py2edg-0.1.0/py2edg/__init__.py +80 -0
- py2edg-0.1.0/py2edg/_imports.py +60 -0
- py2edg-0.1.0/py2edg/api.py +593 -0
- py2edg-0.1.0/py2edg/benchmark.py +307 -0
- py2edg-0.1.0/py2edg/cli.py +142 -0
- py2edg-0.1.0/py2edg/converter.py +349 -0
- py2edg-0.1.0/py2edg/devices.py +329 -0
- py2edg-0.1.0/py2edg/optimizer.py +256 -0
- py2edg-0.1.0/py2edg/quantizer.py +259 -0
- py2edg-0.1.0/py2edg/recipe.py +180 -0
- py2edg-0.1.0/py2edg/report.py +166 -0
- py2edg-0.1.0/py2edg.egg-info/PKG-INFO +286 -0
- py2edg-0.1.0/py2edg.egg-info/SOURCES.txt +21 -0
- py2edg-0.1.0/py2edg.egg-info/dependency_links.txt +1 -0
- py2edg-0.1.0/py2edg.egg-info/entry_points.txt +2 -0
- py2edg-0.1.0/py2edg.egg-info/requires.txt +27 -0
- py2edg-0.1.0/py2edg.egg-info/top_level.txt +1 -0
- py2edg-0.1.0/pyproject.toml +87 -0
- py2edg-0.1.0/setup.cfg +4 -0
- py2edg-0.1.0/tests/test_py2edg.py +207 -0
py2edg-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Mouissat Rabah Abderrahmane
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
py2edg-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: py2edg
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: One-line edge deployment for CV models — convert, quantize, benchmark, and ship to any device.
|
|
5
|
+
Author-email: Mouissat Rabah Abderrahmane <mouissat.rabah@example.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/Rahimdzx/py2edg
|
|
8
|
+
Project-URL: Documentation, https://github.com/Rahimdzx/py2edg#readme
|
|
9
|
+
Project-URL: Repository, https://github.com/Rahimdzx/py2edg
|
|
10
|
+
Project-URL: Issues, https://github.com/Rahimdzx/py2edg/issues
|
|
11
|
+
Keywords: edge-deployment,onnx,tflite,openvino,quantization,model-optimization,computer-vision,edge-ai,raspberry-pi,jetson,orange-pi,benchmarking,model-compression
|
|
12
|
+
Classifier: Development Status :: 3 - Alpha
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Intended Audience :: Science/Research
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
22
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
23
|
+
Requires-Python: >=3.9
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
License-File: LICENSE
|
|
26
|
+
Requires-Dist: numpy>=1.21.0
|
|
27
|
+
Requires-Dist: onnx>=1.14.0
|
|
28
|
+
Requires-Dist: onnxruntime>=1.15.0
|
|
29
|
+
Requires-Dist: rich>=13.0.0
|
|
30
|
+
Requires-Dist: pyyaml>=6.0
|
|
31
|
+
Provides-Extra: torch
|
|
32
|
+
Requires-Dist: torch>=2.0.0; extra == "torch"
|
|
33
|
+
Requires-Dist: torchvision>=0.15.0; extra == "torch"
|
|
34
|
+
Provides-Extra: tflite
|
|
35
|
+
Requires-Dist: tensorflow>=2.13.0; extra == "tflite"
|
|
36
|
+
Requires-Dist: flatbuffers>=23.0; extra == "tflite"
|
|
37
|
+
Provides-Extra: openvino
|
|
38
|
+
Requires-Dist: openvino>=2023.1; extra == "openvino"
|
|
39
|
+
Provides-Extra: full
|
|
40
|
+
Requires-Dist: py2edg[openvino,tflite,torch]; extra == "full"
|
|
41
|
+
Requires-Dist: onnxsim>=0.4.0; extra == "full"
|
|
42
|
+
Requires-Dist: onnxoptimizer>=0.3.0; extra == "full"
|
|
43
|
+
Provides-Extra: dev
|
|
44
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
45
|
+
Requires-Dist: pytest-cov>=4.0; extra == "dev"
|
|
46
|
+
Requires-Dist: black>=23.0; extra == "dev"
|
|
47
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
48
|
+
Dynamic: license-file
|
|
49
|
+
|
|
50
|
+
<p align="center">
|
|
51
|
+
<h1 align="center">🚀 Py2Edg</h1>
|
|
52
|
+
<p align="center"><strong>One-line edge deployment for Computer Vision models</strong></p>
|
|
53
|
+
<p align="center">
|
|
54
|
+
Convert, quantize, optimize, and benchmark CV models for any edge device — in a single function call.
|
|
55
|
+
</p>
|
|
56
|
+
<p align="center">
|
|
57
|
+
<em>Built by <a href="https://github.com/Rahimdzx">Mouissat Rabah Abderrahmane</a></em>
|
|
58
|
+
</p>
|
|
59
|
+
</p>
|
|
60
|
+
|
|
61
|
+
<p align="center">
|
|
62
|
+
<a href="https://pypi.org/project/py2edg/"><img src="https://img.shields.io/pypi/v/py2edg?color=blue&label=PyPI" alt="PyPI"></a>
|
|
63
|
+
<a href="https://pypi.org/project/py2edg/"><img src="https://img.shields.io/pypi/pyversions/py2edg" alt="Python"></a>
|
|
64
|
+
<a href="https://github.com/Rahimdzx/py2edg/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-green" alt="License"></a>
|
|
65
|
+
</p>
|
|
66
|
+
|
|
67
|
+
---
|
|
68
|
+
|
|
69
|
+
## The Problem
|
|
70
|
+
|
|
71
|
+
Deploying a CV model to edge devices is painful. You need to:
|
|
72
|
+
1. Export PyTorch → ONNX (handle dynamic axes, opsets, tracing issues)
|
|
73
|
+
2. Optimize the graph (constant folding, operator fusion, dead code elimination)
|
|
74
|
+
3. Quantize (FP16? INT8? Dynamic? Static with calibration?)
|
|
75
|
+
4. Convert to target format (TFLite? OpenVINO? CoreML?)
|
|
76
|
+
5. Benchmark (latency, throughput, memory)
|
|
77
|
+
6. Repeat for each device...
|
|
78
|
+
|
|
79
|
+
**Py2Edg does all of this in one line.**
|
|
80
|
+
|
|
81
|
+
## Quick Start
|
|
82
|
+
|
|
83
|
+
```bash
|
|
84
|
+
pip install py2edg
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
```python
|
|
88
|
+
import py2edg as rcv
|
|
89
|
+
|
|
90
|
+
# Deploy to Raspberry Pi 4 — converts, optimizes, quantizes, benchmarks
|
|
91
|
+
report = rcv.deploy("yolov8n.pt", device="rpi4")
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
That's it. Py2Edg automatically:
|
|
95
|
+
- Converts your PyTorch model to ONNX
|
|
96
|
+
- Optimizes the computation graph
|
|
97
|
+
- Quantizes to INT8 (optimal for RPi4)
|
|
98
|
+
- Converts to TFLite (best format for RPi4)
|
|
99
|
+
- Benchmarks and generates a deployment report
|
|
100
|
+
|
|
101
|
+
## Core API
|
|
102
|
+
|
|
103
|
+
### `rcv.deploy()` — Full Pipeline
|
|
104
|
+
```python
|
|
105
|
+
# Raspberry Pi 4
|
|
106
|
+
report = rcv.deploy("model.pt", device="rpi4")
|
|
107
|
+
|
|
108
|
+
# NVIDIA Jetson Nano
|
|
109
|
+
report = rcv.deploy("model.pt", device="jetson_nano")
|
|
110
|
+
|
|
111
|
+
# Orange Pi 5
|
|
112
|
+
report = rcv.deploy("model.pt", device="orange_pi")
|
|
113
|
+
|
|
114
|
+
# Custom configuration
|
|
115
|
+
report = rcv.deploy(
|
|
116
|
+
"model.pt",
|
|
117
|
+
device="rpi4",
|
|
118
|
+
quantize="int8",
|
|
119
|
+
input_shape=(1, 3, 320, 320),
|
|
120
|
+
benchmark_runs=200,
|
|
121
|
+
)
|
|
122
|
+
report.print()
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
### `rcv.convert()` — Format Conversion
|
|
126
|
+
```python
|
|
127
|
+
# PyTorch → ONNX
|
|
128
|
+
rcv.convert("model.pt", target="onnx")
|
|
129
|
+
|
|
130
|
+
# ONNX with FP16 quantization
|
|
131
|
+
rcv.convert("model.pt", target="onnx", quantize="fp16")
|
|
132
|
+
|
|
133
|
+
# Auto-configure for device
|
|
134
|
+
rcv.convert("model.pt", device="jetson_nano")
|
|
135
|
+
|
|
136
|
+
# To TFLite with INT8
|
|
137
|
+
rcv.convert("model.onnx", target="tflite", quantize="int8")
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
### `rcv.benchmark()` — Performance Measurement
|
|
141
|
+
```python
|
|
142
|
+
stats = rcv.benchmark("model.onnx", input_shape=(1, 3, 640, 640))
|
|
143
|
+
print(f"Latency: {stats.latency_mean_ms:.1f} ms")
|
|
144
|
+
print(f"Throughput: {stats.throughput_fps:.0f} FPS")
|
|
145
|
+
print(f"P95: {stats.latency_p95_ms:.1f} ms")
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
### `rcv.compare()` — Side-by-Side Comparison
|
|
149
|
+
```python
|
|
150
|
+
rcv.compare(
|
|
151
|
+
"model.onnx",
|
|
152
|
+
"model_fp16.onnx",
|
|
153
|
+
"model_int8.onnx",
|
|
154
|
+
input_shape=(1, 3, 640, 640),
|
|
155
|
+
)
|
|
156
|
+
# ┌───────────────────┬──────────┬───────────┬─────────┬───────────┐
|
|
157
|
+
# │ Model │ Size(MB) │ Mean(ms) │ FPS │ vs Base │
|
|
158
|
+
# ├───────────────────┼──────────┼───────────┼─────────┼───────────┤
|
|
159
|
+
# │ model │ 25.30 │ 18.42 │ 54.3 │ baseline │
|
|
160
|
+
# │ model_fp16 │ 12.70 │ 12.15 │ 82.3 │ 1.52x │
|
|
161
|
+
# │ model_int8 │ 6.80 │ 8.91 │ 112.2 │ 2.07x │
|
|
162
|
+
# └───────────────────┴──────────┴───────────┴─────────┴───────────┘
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
### `rcv.validate()` — Output Accuracy Check
|
|
166
|
+
```python
|
|
167
|
+
result = rcv.validate(pytorch_model, "model_fp16.onnx")
|
|
168
|
+
print(f"Max diff: {result['max_diff']:.6f}")
|
|
169
|
+
print(f"Passed: {result['passed']}")
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
## Built-in Device Profiles
|
|
173
|
+
|
|
174
|
+
| Device | Format | Quantize | Notes |
|
|
175
|
+
|--------|--------|----------|-------|
|
|
176
|
+
| `rpi4` | TFLite | INT8 | Best for real-time on RPi4 |
|
|
177
|
+
| `rpi5` | TFLite | INT8 | 2x faster than RPi4 |
|
|
178
|
+
| `jetson_nano` | ONNX | FP16 | CUDA + TensorRT |
|
|
179
|
+
| `jetson_orin` | ONNX | FP16 | Extremely powerful |
|
|
180
|
+
| `orange_pi` | ONNX | INT8 | RK3588 NPU support |
|
|
181
|
+
| `coral_tpu` | TFLite | UINT8 | Edge TPU compiler |
|
|
182
|
+
| `android_cpu` | TFLite | INT8 | XNNPACK delegate |
|
|
183
|
+
| `android_gpu` | TFLite | FP16 | GPU delegate |
|
|
184
|
+
| `ios_coreml` | CoreML | FP16 | Apple Neural Engine |
|
|
185
|
+
| `server_gpu` | ONNX | FP16 | TensorRT provider |
|
|
186
|
+
|
|
187
|
+
### Custom Devices
|
|
188
|
+
```python
|
|
189
|
+
rcv.register_device(rcv.DeviceProfile(
|
|
190
|
+
name="my_fpga",
|
|
191
|
+
display_name="Custom FPGA Board",
|
|
192
|
+
compute="npu",
|
|
193
|
+
memory_mb=256,
|
|
194
|
+
preferred_format="onnx",
|
|
195
|
+
quantize="int8",
|
|
196
|
+
))
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
## Deployment Recipes
|
|
200
|
+
|
|
201
|
+
Save and share reproducible deployment configs:
|
|
202
|
+
|
|
203
|
+
```python
|
|
204
|
+
recipe = rcv.DeployRecipe(
|
|
205
|
+
name="yolo-rpi4",
|
|
206
|
+
model="yolov8n.pt",
|
|
207
|
+
device="rpi4",
|
|
208
|
+
)
|
|
209
|
+
recipe.apply_device_defaults()
|
|
210
|
+
rcv.save_recipe(recipe, "deploy.yaml")
|
|
211
|
+
|
|
212
|
+
# Later, or on another machine:
|
|
213
|
+
recipe = rcv.load_recipe("deploy.yaml")
|
|
214
|
+
report = rcv.deploy(recipe=recipe)
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
## CLI
|
|
218
|
+
|
|
219
|
+
```bash
|
|
220
|
+
# Full deployment
|
|
221
|
+
py2edg deploy model.pt --device rpi4
|
|
222
|
+
|
|
223
|
+
# Convert only
|
|
224
|
+
py2edg convert model.pt --target onnx --quantize fp16
|
|
225
|
+
|
|
226
|
+
# Benchmark
|
|
227
|
+
py2edg benchmark model.onnx --runs 200
|
|
228
|
+
|
|
229
|
+
# Inspect model
|
|
230
|
+
py2edg inspect model.onnx
|
|
231
|
+
|
|
232
|
+
# List devices
|
|
233
|
+
py2edg devices
|
|
234
|
+
|
|
235
|
+
# Create recipe
|
|
236
|
+
py2edg recipe create --device jetson_nano --model model.pt -o deploy.yaml
|
|
237
|
+
```
|
|
238
|
+
|
|
239
|
+
## Architecture
|
|
240
|
+
|
|
241
|
+
```
|
|
242
|
+
py2edg/
|
|
243
|
+
├── api.py # High-level one-liner API (convert, deploy, benchmark, ...)
|
|
244
|
+
├── converter.py # Format conversion engine (PyTorch→ONNX→TFLite/OpenVINO)
|
|
245
|
+
├── quantizer.py # Quantization (FP16, INT8 static/dynamic)
|
|
246
|
+
├── optimizer.py # Graph optimization (fusion, pruning, simplification)
|
|
247
|
+
├── benchmark.py # Speed/memory benchmarking with statistics
|
|
248
|
+
├── devices.py # Edge device profiles (RPi, Jetson, Coral, mobile, ...)
|
|
249
|
+
├── recipe.py # YAML-based deployment recipes
|
|
250
|
+
├── report.py # Deployment report generation
|
|
251
|
+
├── cli.py # Command-line interface
|
|
252
|
+
└── _imports.py # Lazy optional dependency management
|
|
253
|
+
```
|
|
254
|
+
|
|
255
|
+
## Installation Options
|
|
256
|
+
|
|
257
|
+
```bash
|
|
258
|
+
pip install py2edg # Core (ONNX Runtime)
|
|
259
|
+
pip install py2edg[torch] # + PyTorch support
|
|
260
|
+
pip install py2edg[tflite] # + TFLite conversion
|
|
261
|
+
pip install py2edg[openvino] # + OpenVINO support
|
|
262
|
+
pip install py2edg[full] # Everything
|
|
263
|
+
```
|
|
264
|
+
|
|
265
|
+
## Contributing
|
|
266
|
+
|
|
267
|
+
```bash
|
|
268
|
+
git clone https://github.com/Rahimdzx/py2edg.git
|
|
269
|
+
cd py2edg
|
|
270
|
+
pip install -e ".[dev]"
|
|
271
|
+
pytest
|
|
272
|
+
```
|
|
273
|
+
|
|
274
|
+
## License
|
|
275
|
+
|
|
276
|
+
MIT License — see [LICENSE](LICENSE) for details.
|
|
277
|
+
|
|
278
|
+
## Author
|
|
279
|
+
|
|
280
|
+
**Mouissat Rabah Abderrahmane**
|
|
281
|
+
- AI & Robotics Engineer | MSc from Saint Petersburg State University
|
|
282
|
+
- GitHub: [@Rahimdzx](https://github.com/Rahimdzx)
|
|
283
|
+
|
|
284
|
+
---
|
|
285
|
+
|
|
286
|
+
<p align="center">Made with ❤️ in Algeria 🇩🇿</p>
|
py2edg-0.1.0/README.md
ADDED
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
<p align="center">
|
|
2
|
+
<h1 align="center">🚀 Py2Edg</h1>
|
|
3
|
+
<p align="center"><strong>One-line edge deployment for Computer Vision models</strong></p>
|
|
4
|
+
<p align="center">
|
|
5
|
+
Convert, quantize, optimize, and benchmark CV models for any edge device — in a single function call.
|
|
6
|
+
</p>
|
|
7
|
+
<p align="center">
|
|
8
|
+
<em>Built by <a href="https://github.com/Rahimdzx">Mouissat Rabah Abderrahmane</a></em>
|
|
9
|
+
</p>
|
|
10
|
+
</p>
|
|
11
|
+
|
|
12
|
+
<p align="center">
|
|
13
|
+
<a href="https://pypi.org/project/py2edg/"><img src="https://img.shields.io/pypi/v/py2edg?color=blue&label=PyPI" alt="PyPI"></a>
|
|
14
|
+
<a href="https://pypi.org/project/py2edg/"><img src="https://img.shields.io/pypi/pyversions/py2edg" alt="Python"></a>
|
|
15
|
+
<a href="https://github.com/Rahimdzx/py2edg/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-green" alt="License"></a>
|
|
16
|
+
</p>
|
|
17
|
+
|
|
18
|
+
---
|
|
19
|
+
|
|
20
|
+
## The Problem
|
|
21
|
+
|
|
22
|
+
Deploying a CV model to edge devices is painful. You need to:
|
|
23
|
+
1. Export PyTorch → ONNX (handle dynamic axes, opsets, tracing issues)
|
|
24
|
+
2. Optimize the graph (constant folding, operator fusion, dead code elimination)
|
|
25
|
+
3. Quantize (FP16? INT8? Dynamic? Static with calibration?)
|
|
26
|
+
4. Convert to target format (TFLite? OpenVINO? CoreML?)
|
|
27
|
+
5. Benchmark (latency, throughput, memory)
|
|
28
|
+
6. Repeat for each device...
|
|
29
|
+
|
|
30
|
+
**Py2Edg does all of this in one line.**
|
|
31
|
+
|
|
32
|
+
## Quick Start
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
pip install py2edg
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
```python
|
|
39
|
+
import py2edg as rcv
|
|
40
|
+
|
|
41
|
+
# Deploy to Raspberry Pi 4 — converts, optimizes, quantizes, benchmarks
|
|
42
|
+
report = rcv.deploy("yolov8n.pt", device="rpi4")
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
That's it. Py2Edg automatically:
|
|
46
|
+
- Converts your PyTorch model to ONNX
|
|
47
|
+
- Optimizes the computation graph
|
|
48
|
+
- Quantizes to INT8 (optimal for RPi4)
|
|
49
|
+
- Converts to TFLite (best format for RPi4)
|
|
50
|
+
- Benchmarks and generates a deployment report
|
|
51
|
+
|
|
52
|
+
## Core API
|
|
53
|
+
|
|
54
|
+
### `rcv.deploy()` — Full Pipeline
|
|
55
|
+
```python
|
|
56
|
+
# Raspberry Pi 4
|
|
57
|
+
report = rcv.deploy("model.pt", device="rpi4")
|
|
58
|
+
|
|
59
|
+
# NVIDIA Jetson Nano
|
|
60
|
+
report = rcv.deploy("model.pt", device="jetson_nano")
|
|
61
|
+
|
|
62
|
+
# Orange Pi 5
|
|
63
|
+
report = rcv.deploy("model.pt", device="orange_pi")
|
|
64
|
+
|
|
65
|
+
# Custom configuration
|
|
66
|
+
report = rcv.deploy(
|
|
67
|
+
"model.pt",
|
|
68
|
+
device="rpi4",
|
|
69
|
+
quantize="int8",
|
|
70
|
+
input_shape=(1, 3, 320, 320),
|
|
71
|
+
benchmark_runs=200,
|
|
72
|
+
)
|
|
73
|
+
report.print()
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
### `rcv.convert()` — Format Conversion
|
|
77
|
+
```python
|
|
78
|
+
# PyTorch → ONNX
|
|
79
|
+
rcv.convert("model.pt", target="onnx")
|
|
80
|
+
|
|
81
|
+
# ONNX with FP16 quantization
|
|
82
|
+
rcv.convert("model.pt", target="onnx", quantize="fp16")
|
|
83
|
+
|
|
84
|
+
# Auto-configure for device
|
|
85
|
+
rcv.convert("model.pt", device="jetson_nano")
|
|
86
|
+
|
|
87
|
+
# To TFLite with INT8
|
|
88
|
+
rcv.convert("model.onnx", target="tflite", quantize="int8")
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
### `rcv.benchmark()` — Performance Measurement
|
|
92
|
+
```python
|
|
93
|
+
stats = rcv.benchmark("model.onnx", input_shape=(1, 3, 640, 640))
|
|
94
|
+
print(f"Latency: {stats.latency_mean_ms:.1f} ms")
|
|
95
|
+
print(f"Throughput: {stats.throughput_fps:.0f} FPS")
|
|
96
|
+
print(f"P95: {stats.latency_p95_ms:.1f} ms")
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
### `rcv.compare()` — Side-by-Side Comparison
|
|
100
|
+
```python
|
|
101
|
+
rcv.compare(
|
|
102
|
+
"model.onnx",
|
|
103
|
+
"model_fp16.onnx",
|
|
104
|
+
"model_int8.onnx",
|
|
105
|
+
input_shape=(1, 3, 640, 640),
|
|
106
|
+
)
|
|
107
|
+
# ┌───────────────────┬──────────┬───────────┬─────────┬───────────┐
|
|
108
|
+
# │ Model │ Size(MB) │ Mean(ms) │ FPS │ vs Base │
|
|
109
|
+
# ├───────────────────┼──────────┼───────────┼─────────┼───────────┤
|
|
110
|
+
# │ model │ 25.30 │ 18.42 │ 54.3 │ baseline │
|
|
111
|
+
# │ model_fp16 │ 12.70 │ 12.15 │ 82.3 │ 1.52x │
|
|
112
|
+
# │ model_int8 │ 6.80 │ 8.91 │ 112.2 │ 2.07x │
|
|
113
|
+
# └───────────────────┴──────────┴───────────┴─────────┴───────────┘
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
### `rcv.validate()` — Output Accuracy Check
|
|
117
|
+
```python
|
|
118
|
+
result = rcv.validate(pytorch_model, "model_fp16.onnx")
|
|
119
|
+
print(f"Max diff: {result['max_diff']:.6f}")
|
|
120
|
+
print(f"Passed: {result['passed']}")
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
## Built-in Device Profiles
|
|
124
|
+
|
|
125
|
+
| Device | Format | Quantize | Notes |
|
|
126
|
+
|--------|--------|----------|-------|
|
|
127
|
+
| `rpi4` | TFLite | INT8 | Best for real-time on RPi4 |
|
|
128
|
+
| `rpi5` | TFLite | INT8 | 2x faster than RPi4 |
|
|
129
|
+
| `jetson_nano` | ONNX | FP16 | CUDA + TensorRT |
|
|
130
|
+
| `jetson_orin` | ONNX | FP16 | Extremely powerful |
|
|
131
|
+
| `orange_pi` | ONNX | INT8 | RK3588 NPU support |
|
|
132
|
+
| `coral_tpu` | TFLite | UINT8 | Edge TPU compiler |
|
|
133
|
+
| `android_cpu` | TFLite | INT8 | XNNPACK delegate |
|
|
134
|
+
| `android_gpu` | TFLite | FP16 | GPU delegate |
|
|
135
|
+
| `ios_coreml` | CoreML | FP16 | Apple Neural Engine |
|
|
136
|
+
| `server_gpu` | ONNX | FP16 | TensorRT provider |
|
|
137
|
+
|
|
138
|
+
### Custom Devices
|
|
139
|
+
```python
|
|
140
|
+
rcv.register_device(rcv.DeviceProfile(
|
|
141
|
+
name="my_fpga",
|
|
142
|
+
display_name="Custom FPGA Board",
|
|
143
|
+
compute="npu",
|
|
144
|
+
memory_mb=256,
|
|
145
|
+
preferred_format="onnx",
|
|
146
|
+
quantize="int8",
|
|
147
|
+
))
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
## Deployment Recipes
|
|
151
|
+
|
|
152
|
+
Save and share reproducible deployment configs:
|
|
153
|
+
|
|
154
|
+
```python
|
|
155
|
+
recipe = rcv.DeployRecipe(
|
|
156
|
+
name="yolo-rpi4",
|
|
157
|
+
model="yolov8n.pt",
|
|
158
|
+
device="rpi4",
|
|
159
|
+
)
|
|
160
|
+
recipe.apply_device_defaults()
|
|
161
|
+
rcv.save_recipe(recipe, "deploy.yaml")
|
|
162
|
+
|
|
163
|
+
# Later, or on another machine:
|
|
164
|
+
recipe = rcv.load_recipe("deploy.yaml")
|
|
165
|
+
report = rcv.deploy(recipe=recipe)
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
## CLI
|
|
169
|
+
|
|
170
|
+
```bash
|
|
171
|
+
# Full deployment
|
|
172
|
+
py2edg deploy model.pt --device rpi4
|
|
173
|
+
|
|
174
|
+
# Convert only
|
|
175
|
+
py2edg convert model.pt --target onnx --quantize fp16
|
|
176
|
+
|
|
177
|
+
# Benchmark
|
|
178
|
+
py2edg benchmark model.onnx --runs 200
|
|
179
|
+
|
|
180
|
+
# Inspect model
|
|
181
|
+
py2edg inspect model.onnx
|
|
182
|
+
|
|
183
|
+
# List devices
|
|
184
|
+
py2edg devices
|
|
185
|
+
|
|
186
|
+
# Create recipe
|
|
187
|
+
py2edg recipe create --device jetson_nano --model model.pt -o deploy.yaml
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
## Architecture
|
|
191
|
+
|
|
192
|
+
```
|
|
193
|
+
py2edg/
|
|
194
|
+
├── api.py # High-level one-liner API (convert, deploy, benchmark, ...)
|
|
195
|
+
├── converter.py # Format conversion engine (PyTorch→ONNX→TFLite/OpenVINO)
|
|
196
|
+
├── quantizer.py # Quantization (FP16, INT8 static/dynamic)
|
|
197
|
+
├── optimizer.py # Graph optimization (fusion, pruning, simplification)
|
|
198
|
+
├── benchmark.py # Speed/memory benchmarking with statistics
|
|
199
|
+
├── devices.py # Edge device profiles (RPi, Jetson, Coral, mobile, ...)
|
|
200
|
+
├── recipe.py # YAML-based deployment recipes
|
|
201
|
+
├── report.py # Deployment report generation
|
|
202
|
+
├── cli.py # Command-line interface
|
|
203
|
+
└── _imports.py # Lazy optional dependency management
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
## Installation Options
|
|
207
|
+
|
|
208
|
+
```bash
|
|
209
|
+
pip install py2edg # Core (ONNX Runtime)
|
|
210
|
+
pip install py2edg[torch] # + PyTorch support
|
|
211
|
+
pip install py2edg[tflite] # + TFLite conversion
|
|
212
|
+
pip install py2edg[openvino] # + OpenVINO support
|
|
213
|
+
pip install py2edg[full] # Everything
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
## Contributing
|
|
217
|
+
|
|
218
|
+
```bash
|
|
219
|
+
git clone https://github.com/Rahimdzx/py2edg.git
|
|
220
|
+
cd py2edg
|
|
221
|
+
pip install -e ".[dev]"
|
|
222
|
+
pytest
|
|
223
|
+
```
|
|
224
|
+
|
|
225
|
+
## License
|
|
226
|
+
|
|
227
|
+
MIT License — see [LICENSE](LICENSE) for details.
|
|
228
|
+
|
|
229
|
+
## Author
|
|
230
|
+
|
|
231
|
+
**Mouissat Rabah Abderrahmane**
|
|
232
|
+
- AI & Robotics Engineer | MSc from Saint Petersburg State University
|
|
233
|
+
- GitHub: [@Rahimdzx](https://github.com/Rahimdzx)
|
|
234
|
+
|
|
235
|
+
---
|
|
236
|
+
|
|
237
|
+
<p align="center">Made with ❤️ in Algeria 🇩🇿</p>
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""
|
|
2
|
+
╔══════════════════════════════════════════════════════════════════════╗
|
|
3
|
+
║ Py2Edg v0.1.0 ║
|
|
4
|
+
║ One-line edge deployment for Computer Vision models ║
|
|
5
|
+
║ by Mouissat Rabah Abderrahmane ║
|
|
6
|
+
╚══════════════════════════════════════════════════════════════════════╝
|
|
7
|
+
|
|
8
|
+
Convert, quantize, optimize, and benchmark CV models for edge devices
|
|
9
|
+
with a single function call.
|
|
10
|
+
|
|
11
|
+
Quick Start:
|
|
12
|
+
>>> import py2edg as rcv
|
|
13
|
+
>>>
|
|
14
|
+
>>> # Convert PyTorch model to optimized ONNX
|
|
15
|
+
>>> result = rcv.convert("model.pt", target="onnx", quantize="fp16")
|
|
16
|
+
>>>
|
|
17
|
+
>>> # Full deployment pipeline: convert + optimize + benchmark
|
|
18
|
+
>>> report = rcv.deploy("model.pt", device="rpi4", quantize="int8")
|
|
19
|
+
>>>
|
|
20
|
+
>>> # Benchmark any model
|
|
21
|
+
>>> stats = rcv.benchmark("model.onnx", input_shape=(1, 3, 640, 640))
|
|
22
|
+
>>>
|
|
23
|
+
>>> # Compare original vs optimized
|
|
24
|
+
>>> rcv.compare("model.pt", "model_opt.onnx", input_shape=(1, 3, 640, 640))
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
__version__ = "0.1.0"
|
|
28
|
+
__author__ = "Mouissat Rabah Abderrahmane"
|
|
29
|
+
__license__ = "MIT"
|
|
30
|
+
|
|
31
|
+
# ── High-level API (the magic) ──
|
|
32
|
+
from py2edg.api import (
|
|
33
|
+
convert,
|
|
34
|
+
deploy,
|
|
35
|
+
benchmark,
|
|
36
|
+
compare,
|
|
37
|
+
profile,
|
|
38
|
+
validate,
|
|
39
|
+
inspect_model,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
# ── Device Profiles ──
|
|
43
|
+
from py2edg.devices import (
|
|
44
|
+
DeviceProfile,
|
|
45
|
+
get_device,
|
|
46
|
+
list_devices,
|
|
47
|
+
register_device,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
# ── Deployment Recipe ──
|
|
51
|
+
from py2edg.recipe import (
|
|
52
|
+
DeployRecipe,
|
|
53
|
+
load_recipe,
|
|
54
|
+
save_recipe,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# ── Report ──
|
|
58
|
+
from py2edg.report import DeployReport
|
|
59
|
+
|
|
60
|
+
__all__ = [
|
|
61
|
+
# API
|
|
62
|
+
"convert",
|
|
63
|
+
"deploy",
|
|
64
|
+
"benchmark",
|
|
65
|
+
"compare",
|
|
66
|
+
"profile",
|
|
67
|
+
"validate",
|
|
68
|
+
"inspect_model",
|
|
69
|
+
# Devices
|
|
70
|
+
"DeviceProfile",
|
|
71
|
+
"get_device",
|
|
72
|
+
"list_devices",
|
|
73
|
+
"register_device",
|
|
74
|
+
# Recipe
|
|
75
|
+
"DeployRecipe",
|
|
76
|
+
"load_recipe",
|
|
77
|
+
"save_recipe",
|
|
78
|
+
# Report
|
|
79
|
+
"DeployReport",
|
|
80
|
+
]
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
"""Lazy import helpers for optional dependencies."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import importlib
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class _LazyModule:
|
|
10
|
+
"""Delay ImportError until the module is actually used."""
|
|
11
|
+
|
|
12
|
+
def __init__(self, name: str, install_hint: str):
|
|
13
|
+
self._name = name
|
|
14
|
+
self._hint = install_hint
|
|
15
|
+
self._mod = None
|
|
16
|
+
|
|
17
|
+
def _load(self):
|
|
18
|
+
if self._mod is None:
|
|
19
|
+
try:
|
|
20
|
+
self._mod = importlib.import_module(self._name)
|
|
21
|
+
except ImportError:
|
|
22
|
+
raise ImportError(
|
|
23
|
+
f"'{self._name}' is required for this operation.\n"
|
|
24
|
+
f"Install it with: pip install {self._hint}"
|
|
25
|
+
)
|
|
26
|
+
return self._mod
|
|
27
|
+
|
|
28
|
+
def __getattr__(self, item: str) -> Any:
|
|
29
|
+
return getattr(self._load(), item)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def require(module_name: str, install_hint: str | None = None):
|
|
33
|
+
"""Import a module or raise a helpful error."""
|
|
34
|
+
hint = install_hint or module_name
|
|
35
|
+
try:
|
|
36
|
+
return importlib.import_module(module_name)
|
|
37
|
+
except ImportError:
|
|
38
|
+
raise ImportError(
|
|
39
|
+
f"'{module_name}' is required for this operation.\n"
|
|
40
|
+
f"Install it with: pip install {hint}"
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def is_available(module_name: str) -> bool:
|
|
45
|
+
"""Check if a module is importable."""
|
|
46
|
+
try:
|
|
47
|
+
importlib.import_module(module_name)
|
|
48
|
+
return True
|
|
49
|
+
except ImportError:
|
|
50
|
+
return False
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# ── Pre-built lazy imports ──
|
|
54
|
+
torch = _LazyModule("torch", "torch")
|
|
55
|
+
torchvision = _LazyModule("torchvision", "torchvision")
|
|
56
|
+
onnx = _LazyModule("onnx", "onnx")
|
|
57
|
+
onnxruntime = _LazyModule("onnxruntime", "onnxruntime")
|
|
58
|
+
tensorflow = _LazyModule("tensorflow", "tensorflow")
|
|
59
|
+
openvino = _LazyModule("openvino", "openvino")
|
|
60
|
+
onnxsim = _LazyModule("onnxsim", "onnxsim")
|