emx-onnx-cgen 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of emx-onnx-cgen might be problematic. Click here for more details.
- emx_onnx_cgen/__init__.py +6 -0
- emx_onnx_cgen/__main__.py +9 -0
- emx_onnx_cgen/_build_info.py +3 -0
- emx_onnx_cgen/cli.py +328 -0
- emx_onnx_cgen/codegen/__init__.py +25 -0
- emx_onnx_cgen/codegen/c_emitter.py +9044 -0
- emx_onnx_cgen/compiler.py +601 -0
- emx_onnx_cgen/dtypes.py +40 -0
- emx_onnx_cgen/errors.py +14 -0
- emx_onnx_cgen/ir/__init__.py +3 -0
- emx_onnx_cgen/ir/model.py +55 -0
- emx_onnx_cgen/lowering/__init__.py +3 -0
- emx_onnx_cgen/lowering/arg_reduce.py +99 -0
- emx_onnx_cgen/lowering/attention.py +421 -0
- emx_onnx_cgen/lowering/average_pool.py +229 -0
- emx_onnx_cgen/lowering/batch_normalization.py +116 -0
- emx_onnx_cgen/lowering/cast.py +70 -0
- emx_onnx_cgen/lowering/common.py +72 -0
- emx_onnx_cgen/lowering/concat.py +31 -0
- emx_onnx_cgen/lowering/constant_of_shape.py +85 -0
- emx_onnx_cgen/lowering/conv.py +192 -0
- emx_onnx_cgen/lowering/cumsum.py +118 -0
- emx_onnx_cgen/lowering/depth_space.py +114 -0
- emx_onnx_cgen/lowering/dropout.py +46 -0
- emx_onnx_cgen/lowering/elementwise.py +164 -0
- emx_onnx_cgen/lowering/expand.py +151 -0
- emx_onnx_cgen/lowering/eye_like.py +43 -0
- emx_onnx_cgen/lowering/flatten.py +60 -0
- emx_onnx_cgen/lowering/gather.py +48 -0
- emx_onnx_cgen/lowering/gather_elements.py +60 -0
- emx_onnx_cgen/lowering/gemm.py +139 -0
- emx_onnx_cgen/lowering/grid_sample.py +149 -0
- emx_onnx_cgen/lowering/group_normalization.py +68 -0
- emx_onnx_cgen/lowering/identity.py +43 -0
- emx_onnx_cgen/lowering/instance_normalization.py +50 -0
- emx_onnx_cgen/lowering/layer_normalization.py +110 -0
- emx_onnx_cgen/lowering/logsoftmax.py +47 -0
- emx_onnx_cgen/lowering/lp_normalization.py +45 -0
- emx_onnx_cgen/lowering/lrn.py +104 -0
- emx_onnx_cgen/lowering/lstm.py +355 -0
- emx_onnx_cgen/lowering/matmul.py +120 -0
- emx_onnx_cgen/lowering/maxpool.py +195 -0
- emx_onnx_cgen/lowering/mean_variance_normalization.py +49 -0
- emx_onnx_cgen/lowering/negative_log_likelihood_loss.py +250 -0
- emx_onnx_cgen/lowering/pad.py +287 -0
- emx_onnx_cgen/lowering/range.py +104 -0
- emx_onnx_cgen/lowering/reduce.py +544 -0
- emx_onnx_cgen/lowering/registry.py +51 -0
- emx_onnx_cgen/lowering/reshape.py +188 -0
- emx_onnx_cgen/lowering/resize.py +445 -0
- emx_onnx_cgen/lowering/rms_normalization.py +67 -0
- emx_onnx_cgen/lowering/shape.py +78 -0
- emx_onnx_cgen/lowering/size.py +33 -0
- emx_onnx_cgen/lowering/slice.py +425 -0
- emx_onnx_cgen/lowering/softmax.py +47 -0
- emx_onnx_cgen/lowering/softmax_cross_entropy_loss.py +129 -0
- emx_onnx_cgen/lowering/split.py +150 -0
- emx_onnx_cgen/lowering/squeeze.py +161 -0
- emx_onnx_cgen/lowering/tile.py +81 -0
- emx_onnx_cgen/lowering/transpose.py +46 -0
- emx_onnx_cgen/lowering/unsqueeze.py +157 -0
- emx_onnx_cgen/lowering/variadic.py +95 -0
- emx_onnx_cgen/lowering/where.py +73 -0
- emx_onnx_cgen/onnx_import.py +261 -0
- emx_onnx_cgen/ops.py +565 -0
- emx_onnx_cgen/runtime/__init__.py +1 -0
- emx_onnx_cgen/runtime/evaluator.py +2206 -0
- emx_onnx_cgen/validation.py +76 -0
- emx_onnx_cgen-0.2.0.dist-info/METADATA +128 -0
- emx_onnx_cgen-0.2.0.dist-info/RECORD +76 -0
- emx_onnx_cgen-0.2.0.dist-info/WHEEL +5 -0
- emx_onnx_cgen-0.2.0.dist-info/entry_points.txt +2 -0
- emx_onnx_cgen-0.2.0.dist-info/top_level.txt +2 -0
- shared/__init__.py +2 -0
- shared/scalar_functions.py +2405 -0
- shared/scalar_types.py +243 -0
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from .errors import ShapeInferenceError
|
|
4
|
+
from .ir.model import Node
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def normalize_axis(axis: int, shape: tuple[int, ...], node: Node) -> int:
|
|
8
|
+
if not shape:
|
|
9
|
+
raise ShapeInferenceError(f"{node.op_type} does not support scalar inputs")
|
|
10
|
+
rank = len(shape)
|
|
11
|
+
if axis < 0:
|
|
12
|
+
axis += rank
|
|
13
|
+
if axis < 0 or axis >= rank:
|
|
14
|
+
raise ShapeInferenceError(
|
|
15
|
+
f"{node.op_type} axis {axis} is out of range for rank {rank}"
|
|
16
|
+
)
|
|
17
|
+
return axis
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def normalize_concat_axis(axis: int, rank: int) -> int:
|
|
21
|
+
if axis < 0:
|
|
22
|
+
axis += rank
|
|
23
|
+
if axis < 0 or axis >= rank:
|
|
24
|
+
raise ShapeInferenceError(
|
|
25
|
+
f"Concat axis out of range for rank {rank}: {axis}"
|
|
26
|
+
)
|
|
27
|
+
return axis
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def ensure_output_shape_matches_input(
|
|
31
|
+
node: Node,
|
|
32
|
+
input_shape: tuple[int, ...],
|
|
33
|
+
output_shape: tuple[int, ...],
|
|
34
|
+
) -> None:
|
|
35
|
+
if input_shape != output_shape:
|
|
36
|
+
raise ShapeInferenceError(
|
|
37
|
+
f"{node.op_type} output shape must be {input_shape}, got {output_shape}"
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def validate_concat_shapes(
|
|
42
|
+
input_shapes: tuple[tuple[int, ...], ...],
|
|
43
|
+
output_shape: tuple[int, ...],
|
|
44
|
+
axis: int,
|
|
45
|
+
) -> int:
|
|
46
|
+
ranks = {len(shape) for shape in input_shapes}
|
|
47
|
+
if len(ranks) != 1:
|
|
48
|
+
raise ShapeInferenceError(
|
|
49
|
+
f"Concat inputs must have matching ranks, got {input_shapes}"
|
|
50
|
+
)
|
|
51
|
+
rank = ranks.pop()
|
|
52
|
+
axis = normalize_concat_axis(axis, rank)
|
|
53
|
+
base_shape = list(input_shapes[0])
|
|
54
|
+
axis_dim = 0
|
|
55
|
+
for shape in input_shapes:
|
|
56
|
+
if len(shape) != rank:
|
|
57
|
+
raise ShapeInferenceError(
|
|
58
|
+
f"Concat inputs must have matching ranks, got {input_shapes}"
|
|
59
|
+
)
|
|
60
|
+
for dim_index, dim in enumerate(shape):
|
|
61
|
+
if dim_index == axis:
|
|
62
|
+
continue
|
|
63
|
+
if dim != base_shape[dim_index]:
|
|
64
|
+
raise ShapeInferenceError(
|
|
65
|
+
"Concat inputs must match on non-axis dimensions, "
|
|
66
|
+
f"got {input_shapes}"
|
|
67
|
+
)
|
|
68
|
+
axis_dim += shape[axis]
|
|
69
|
+
base_shape[axis] = axis_dim
|
|
70
|
+
expected_output_shape = tuple(base_shape)
|
|
71
|
+
if output_shape != expected_output_shape:
|
|
72
|
+
raise ShapeInferenceError(
|
|
73
|
+
"Concat output shape must be "
|
|
74
|
+
f"{expected_output_shape}, got {output_shape}"
|
|
75
|
+
)
|
|
76
|
+
return axis
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: emx-onnx-cgen
|
|
3
|
+
Version: 0.2.0
|
|
4
|
+
Summary: emmtrix ONNX-to-C Code Generator
|
|
5
|
+
Requires-Python: >=3.10
|
|
6
|
+
Description-Content-Type: text/markdown
|
|
7
|
+
|
|
8
|
+
# emmtrix ONNX-to-C Code Generator (emx-onnx-cgen)
|
|
9
|
+
|
|
10
|
+
[](https://pypi.org/project/emx-onnx-cgen)
|
|
11
|
+
|
|
12
|
+
`emx-onnx-cgen` compiles ONNX models to portable, deterministic C code for deeply embedded systems. The generated code is designed to run without dynamic memory allocation, operating systems, or external runtimes, making it suitable for safety-critical and resource-constrained targets.
|
|
13
|
+
|
|
14
|
+
Key characteristics:
|
|
15
|
+
|
|
16
|
+
- **No dynamic memory allocation** (`malloc`, `free`, heap usage)
|
|
17
|
+
- **Static, compile-time known memory layout** for parameters, activations, and temporaries
|
|
18
|
+
- **Deterministic control flow** (explicit loops, no hidden dispatch or callbacks)
|
|
19
|
+
- **No OS or libc dependencies** beyond basic C
|
|
20
|
+
- **Single-threaded execution model**
|
|
21
|
+
- **Bitwise-stable code generation** for reproducible builds
|
|
22
|
+
- **Readable, auditable C code** suitable for certification and code reviews
|
|
23
|
+
- Designed for **bare-metal and RTOS-based systems**
|
|
24
|
+
|
|
25
|
+
## Goals
|
|
26
|
+
|
|
27
|
+
- Correctness-first compilation with outputs comparable to ONNX Runtime.
|
|
28
|
+
- Deterministic and reproducible C code generation.
|
|
29
|
+
- Clean, pass-based compiler architecture (import → normalize → optimize → lower → emit).
|
|
30
|
+
- Minimal C runtime with explicit, predictable data movement.
|
|
31
|
+
|
|
32
|
+
## Non-goals
|
|
33
|
+
|
|
34
|
+
- Aggressive performance optimizations in generated C.
|
|
35
|
+
- Implicit runtime dependencies or dynamic loading.
|
|
36
|
+
- Training/backpropagation support.
|
|
37
|
+
|
|
38
|
+
## Features
|
|
39
|
+
|
|
40
|
+
- CLI for ONNX-to-C compilation and verification.
|
|
41
|
+
- Deterministic codegen with explicit tensor shapes and loop nests.
|
|
42
|
+
- Minimal C runtime templates in `templates/`.
|
|
43
|
+
- ONNX Runtime comparison for end-to-end validation.
|
|
44
|
+
- Official ONNX operator coverage tracking.
|
|
45
|
+
- Support for a wide range of ONNX operators (see `OFFICIAL_ONNX_FILE_SUPPORT.md`).
|
|
46
|
+
- Supported data types:
|
|
47
|
+
- `float`, `double`, `float16`
|
|
48
|
+
- `int8_t`, `uint8_t`, `int16_t`, `uint16_t`, `int32_t`, `uint32_t`, `int64_t`, `uint64_t`
|
|
49
|
+
- `bool`
|
|
50
|
+
- Supporting dynamic dimensions by utilizing C99 variable-length arrays (VLAs).
|
|
51
|
+
|
|
52
|
+
## Installation
|
|
53
|
+
|
|
54
|
+
Install the package directly from PyPI (recommended):
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
pip install emx-onnx-cgen
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
Optional for verification and tests:
|
|
61
|
+
|
|
62
|
+
- `onnxruntime`
|
|
63
|
+
- `numpy`
|
|
64
|
+
- A C compiler (`cc`, `gcc`, `clang` or via `--cc`)
|
|
65
|
+
|
|
66
|
+
## Quickstart
|
|
67
|
+
|
|
68
|
+
Compile an ONNX model into a C source file:
|
|
69
|
+
|
|
70
|
+
```bash
|
|
71
|
+
emx-onnx-cgen compile path/to/model.onnx build/model.c
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
Verify an ONNX model end-to-end against ONNX Runtime:
|
|
75
|
+
|
|
76
|
+
```bash
|
|
77
|
+
emx-onnx-cgen verify path/to/model.onnx
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
## CLI Reference
|
|
81
|
+
|
|
82
|
+
`emx-onnx-cgen` provides two subcommands: `compile` and `verify`.
|
|
83
|
+
|
|
84
|
+
### `compile`
|
|
85
|
+
|
|
86
|
+
```bash
|
|
87
|
+
emx-onnx-cgen compile <model.onnx> <output.c> [options]
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
Options:
|
|
91
|
+
|
|
92
|
+
- `--template-dir`: Directory containing the C templates (default: `templates`).
|
|
93
|
+
- `--model-name`: Override the generated model name (default: output file stem).
|
|
94
|
+
- `--emit-testbench`: Emit a JSON-producing `main()` testbench for validation.
|
|
95
|
+
- `--emit-data-file`: Emit constant data arrays into a companion `_data` C file.
|
|
96
|
+
- `--no-restrict-arrays`: Disable `restrict` qualifiers on generated array parameters.
|
|
97
|
+
|
|
98
|
+
### `verify`
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
emx-onnx-cgen verify <model.onnx> [options]
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
Options:
|
|
105
|
+
|
|
106
|
+
- `--template-dir`: Directory containing the C templates (default: `templates`).
|
|
107
|
+
- `--model-name`: Override the generated model name (default: model file stem).
|
|
108
|
+
- `--cc`: Explicit C compiler command for building the testbench binary.
|
|
109
|
+
|
|
110
|
+
## Output
|
|
111
|
+
|
|
112
|
+
By default, the compiler emits a single C source file that includes:
|
|
113
|
+
|
|
114
|
+
- A generated entry point that mirrors the ONNX graph inputs/outputs.
|
|
115
|
+
- Tensor buffers for constants and temporaries.
|
|
116
|
+
- A lightweight runtime implemented via templates in `templates/`.
|
|
117
|
+
|
|
118
|
+
When `--emit-data-file` is enabled, the main C source declares constant arrays
|
|
119
|
+
as `extern`, and a second file named like the output with a `_data` suffix
|
|
120
|
+
contains the constant definitions.
|
|
121
|
+
|
|
122
|
+
## Official ONNX test coverage
|
|
123
|
+
|
|
124
|
+
See [`OFFICIAL_ONNX_FILE_SUPPORT.md`](OFFICIAL_ONNX_FILE_SUPPORT.md) for the generated support matrix.
|
|
125
|
+
|
|
126
|
+
## Maintained by
|
|
127
|
+
|
|
128
|
+
This project is maintained by [emmtrix](https://www.emmtrix.com).
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
emx_onnx_cgen/__init__.py,sha256=jUSbu1kJ0krzVTYEcph3jCprBhD7tWNtiSdL6r29KrM,221
|
|
2
|
+
emx_onnx_cgen/__main__.py,sha256=iC1lLVtR6-TmpL6OxXcy3oIntExUtajn9-q627R1XyI,140
|
|
3
|
+
emx_onnx_cgen/_build_info.py,sha256=tp1Kzo4PNvj3ydRSCdr84YUd2wEUVUopl7AoaJfOaHk,112
|
|
4
|
+
emx_onnx_cgen/cli.py,sha256=2eQ6JxfdamiLDqo-ZfZNnnb1a1I7NTEvecfUJaAA_3M,11024
|
|
5
|
+
emx_onnx_cgen/compiler.py,sha256=foB9JK1Z2NRcRg_Fn-kL9K7L7tKGWxI9-kaMbYg3dnM,20877
|
|
6
|
+
emx_onnx_cgen/dtypes.py,sha256=jRx3BBvk0qFW14bngoL1B7L_IRasyNJ4jqhpM5YhcOM,1335
|
|
7
|
+
emx_onnx_cgen/errors.py,sha256=HpOv95mTgr9ZX2gYe1RtwVMbPskh7zkqjU_FgAD-uIM,363
|
|
8
|
+
emx_onnx_cgen/onnx_import.py,sha256=aMvSxT3ycg4UmnutWYvsQpzGt2m_KpNnDIiddlg-vDA,9028
|
|
9
|
+
emx_onnx_cgen/ops.py,sha256=pW3ks2EJITiJxvThfU58KjQZE7AFUiPmMwKZRCNb1mY,16586
|
|
10
|
+
emx_onnx_cgen/validation.py,sha256=KFdUdGjQbzTj1szCJcjxnTi8f5l6ywNgCB9abbBpTbM,2360
|
|
11
|
+
emx_onnx_cgen/codegen/__init__.py,sha256=-_sxL87uyAIunaetjUvIUo2bc46ugVlaNtSsidegMRM,362
|
|
12
|
+
emx_onnx_cgen/codegen/c_emitter.py,sha256=gCgbqTgDWGAmp7H8TCFPBKp7DCNb_nskkMCAzKwDG0Y,337438
|
|
13
|
+
emx_onnx_cgen/ir/__init__.py,sha256=fD2D8qxlGoCFJb0m9v6u3XTgzSxDOhB4cfLBiCLovzg,102
|
|
14
|
+
emx_onnx_cgen/ir/model.py,sha256=e8vRA0RNDU8Ioz3TXQKpdUhDtUK6Hm71KouUPMhCcpg,1213
|
|
15
|
+
emx_onnx_cgen/lowering/__init__.py,sha256=wrxLMWcPUH1RbPJOs0Tsdb12FhXjAAeZVDYwKqcIuzw,103
|
|
16
|
+
emx_onnx_cgen/lowering/arg_reduce.py,sha256=2AowDRCJRkIvrVBphbA0rM18oCWEpCDEV5Y4K9wSDII,3388
|
|
17
|
+
emx_onnx_cgen/lowering/attention.py,sha256=19Jq_k0DXwH71a3pmLTWCNMttmw5uuiNK6Jhln5HC4A,16488
|
|
18
|
+
emx_onnx_cgen/lowering/average_pool.py,sha256=9kg3pYHG7QLid_M2dbleC1VoNlVlGsKdOrsWp3pt7sc,8085
|
|
19
|
+
emx_onnx_cgen/lowering/batch_normalization.py,sha256=_aFCm4QaC5jH-JNEvqDFYOyAMdzgUFS_3Gmo1vdPyKE,3987
|
|
20
|
+
emx_onnx_cgen/lowering/cast.py,sha256=zKiE4wI7oWP_TjxBV4fY3-FXvZxK2zy58O6tWJ2dODQ,2852
|
|
21
|
+
emx_onnx_cgen/lowering/common.py,sha256=4w9kjKW3_LilOGgmXYcUGg5lohbYsaLudoL4ALoDUkk,2356
|
|
22
|
+
emx_onnx_cgen/lowering/concat.py,sha256=TefckPfuaIHVHxGExJiO9wlkjyRO1TGg-QAMeoW8hW0,1097
|
|
23
|
+
emx_onnx_cgen/lowering/constant_of_shape.py,sha256=btQflQFMP_y22sK7RrhkbGdaeSSLPC_DWhLjxY7CAgk,3208
|
|
24
|
+
emx_onnx_cgen/lowering/conv.py,sha256=I1_tssw_ySf4beKV0sCVe8DRhNxL58PqC0wxtWjD79s,7309
|
|
25
|
+
emx_onnx_cgen/lowering/cumsum.py,sha256=eX0bDtwY-qevz0KXNHtJaDiKUUHIOhDX0uDiSxcC0ZU,4125
|
|
26
|
+
emx_onnx_cgen/lowering/depth_space.py,sha256=M4md379jiumGWmg7EgR-CinoPzwof2RdfOiNqOzxd9o,4217
|
|
27
|
+
emx_onnx_cgen/lowering/dropout.py,sha256=oBKPMN-J9Gnw8dRXvf-bN15L1-5W7-qKhR72Z6AgLXQ,1775
|
|
28
|
+
emx_onnx_cgen/lowering/elementwise.py,sha256=HN6vEW58lceYECp-7QWLCWOBo1ImyY66aZIg06nA5g8,6231
|
|
29
|
+
emx_onnx_cgen/lowering/expand.py,sha256=4msnYM-6RnzMplQqde2ovOLsjmWQ4bnXEoUiEM6CT6k,5529
|
|
30
|
+
emx_onnx_cgen/lowering/eye_like.py,sha256=76HEdT-EofDCCy7DewjIpILJdIJyJ-YVCbLXO54SX5E,1734
|
|
31
|
+
emx_onnx_cgen/lowering/flatten.py,sha256=sGol05FDN0xoNgSl_DlVbjYvBHCHWjQC2KB15ytYfPs,2142
|
|
32
|
+
emx_onnx_cgen/lowering/gather.py,sha256=9zMB9fcdJi1fkTmDs_-L6FvQi1fnhdk0h7RmeN5MP2M,1814
|
|
33
|
+
emx_onnx_cgen/lowering/gather_elements.py,sha256=0E-WAge15HhGeWPRC_ZE94fb9C6LIoef8p5D1usWvBQ,2358
|
|
34
|
+
emx_onnx_cgen/lowering/gemm.py,sha256=Ps2T4tZgXr5FObz5figwbLZq-Njzg44iBQ9cFmvH78k,4590
|
|
35
|
+
emx_onnx_cgen/lowering/grid_sample.py,sha256=Ne-97ljxSdqfjBJtVHp2AQnEeXGQ5HE-HegCoxcNCm0,5228
|
|
36
|
+
emx_onnx_cgen/lowering/group_normalization.py,sha256=RqXud5_xNxMu8DP5EsPr4L2b6CZghQWCcG6Bh1x2gMA,2664
|
|
37
|
+
emx_onnx_cgen/lowering/identity.py,sha256=fn1Tg56xACwAjhesy0wyr9TJjCmmddnd8QrQ4-uCdO0,1843
|
|
38
|
+
emx_onnx_cgen/lowering/instance_normalization.py,sha256=1Yx2KPKq_BHberCBTrGQXQswAS0FfDle9NpyeD41ypU,1950
|
|
39
|
+
emx_onnx_cgen/lowering/layer_normalization.py,sha256=ZvqGZOhuoYh8ZPyzb-PV0kIc2bbunWTYj12wmrGu9YY,4529
|
|
40
|
+
emx_onnx_cgen/lowering/logsoftmax.py,sha256=1FEaX45GdDr6jIdS_sOwXOy_DdVDruZem4yZ9XA4a38,1669
|
|
41
|
+
emx_onnx_cgen/lowering/lp_normalization.py,sha256=61CGS-2yN0bf5dby5b7Ug1PH3CStZN1xZmYWa5TysTI,1712
|
|
42
|
+
emx_onnx_cgen/lowering/lrn.py,sha256=zGw1Jk7iBk1jHdjdDqfAREsV5VcSdOG3LcAmEllIB08,3370
|
|
43
|
+
emx_onnx_cgen/lowering/lstm.py,sha256=JhGxiF3bTSY3flkw_u9mil2esRxvIjr5Tc4vSPULDr4,12305
|
|
44
|
+
emx_onnx_cgen/lowering/matmul.py,sha256=NEfBa140ofpgm9xnqUBulMSA-yQlb29F2NqhCJpmKSY,4262
|
|
45
|
+
emx_onnx_cgen/lowering/maxpool.py,sha256=MRLeoCEdIwO8JNWOi7iKoeIsJvukqpx_w6GCHaDaYHU,7494
|
|
46
|
+
emx_onnx_cgen/lowering/mean_variance_normalization.py,sha256=L_6ECH9wPEnNX2mL6yroZRexZM8JV5ZnJvoPQS6IAuc,1875
|
|
47
|
+
emx_onnx_cgen/lowering/negative_log_likelihood_loss.py,sha256=wMWvJ9ymkA-ptFgqx0VmHAJHS5NVvDeo2GAlrECztJ8,9307
|
|
48
|
+
emx_onnx_cgen/lowering/pad.py,sha256=cNlh-rA3CRPfO-u8gvJ1MeF1j-vdBMXLuJBpp2DkFTc,10416
|
|
49
|
+
emx_onnx_cgen/lowering/range.py,sha256=xLbG3SGvQiboPqSIh5qZyw3Krbxjk0nd3YvRlUI_q64,3463
|
|
50
|
+
emx_onnx_cgen/lowering/reduce.py,sha256=XRxPopJCU9FGd1XmdxCZickmSTEvmkpAgLUPDFfjRm0,18431
|
|
51
|
+
emx_onnx_cgen/lowering/registry.py,sha256=rKyWnLDBFHJbHw-iyOtXv2Qc5LEBCwgopUXewvQpEpg,1392
|
|
52
|
+
emx_onnx_cgen/lowering/reshape.py,sha256=SF46eP95Z19PT67ayJDKW1fwllBWEZmGfXAL5q9wy-I,6881
|
|
53
|
+
emx_onnx_cgen/lowering/resize.py,sha256=J_x53hVHlfJemLwEhq5n_11Pe1TlF9nRMEpkw6IpzN8,14644
|
|
54
|
+
emx_onnx_cgen/lowering/rms_normalization.py,sha256=_H56Pf9T80FYbmy1m3oc7_D5TbNxRrVeJScD5VmLZRo,2536
|
|
55
|
+
emx_onnx_cgen/lowering/shape.py,sha256=Vvd2zQB06wZcEe4mW5WBRrQuVF8f_tXSM9fpGxe9PEo,2913
|
|
56
|
+
emx_onnx_cgen/lowering/size.py,sha256=Z_DTevdpx2W_3k0GoyQ2uWE3ms_PN1d_Ti7hh6HhB1Q,1261
|
|
57
|
+
emx_onnx_cgen/lowering/slice.py,sha256=yHm_mXeHcLufDmVNvj_kv08zMdbvI39ViHcE-tVPKa0,14816
|
|
58
|
+
emx_onnx_cgen/lowering/softmax.py,sha256=ZaOZf00f5PNHRjSki08Fv-iod6UgqL7cmblfpE_OQRU,1648
|
|
59
|
+
emx_onnx_cgen/lowering/softmax_cross_entropy_loss.py,sha256=I0pbWyJdnf-9vAuX8-xsnovDKDGxynlBhfj5k_IVIa4,5230
|
|
60
|
+
emx_onnx_cgen/lowering/split.py,sha256=ImGnsqrl7IdWbPTPazfXYjcsoRoziqsqtJBum12xTXY,5894
|
|
61
|
+
emx_onnx_cgen/lowering/squeeze.py,sha256=rgICFprcWhC03h-GXZNaIQsdFjsqyxybJYtPklTfaYM,6086
|
|
62
|
+
emx_onnx_cgen/lowering/tile.py,sha256=fT-ybiBZfb4bqBAPrCORZCNm3KWeu4rRW2BJ_UVIVZU,3041
|
|
63
|
+
emx_onnx_cgen/lowering/transpose.py,sha256=TrRXUt-4UFNHZWaOpS3N5zEz5-OCK6-twZdlrnw7Pqg,1762
|
|
64
|
+
emx_onnx_cgen/lowering/unsqueeze.py,sha256=sE3vribz8EyHqDG8lEcreKII7rQDElnHf1OpoM5HiAo,5987
|
|
65
|
+
emx_onnx_cgen/lowering/variadic.py,sha256=hmPzRIj0kcZriGRTR2ma1YMH9g21K_4f-3FXw6qO3jE,3298
|
|
66
|
+
emx_onnx_cgen/lowering/where.py,sha256=uiaWU9RM6o-n38N0AEINIkXS33yVK3-ohkfKIApJOoA,2655
|
|
67
|
+
emx_onnx_cgen/runtime/__init__.py,sha256=88xGpAs1IEBlzlWL_e9tnKUlaSRdc7pQUeVCu5LC4DY,50
|
|
68
|
+
emx_onnx_cgen/runtime/evaluator.py,sha256=GFxrBXcKuQkZ0HY46twOTrNc955UqW3cRKAu5AYVJzQ,84910
|
|
69
|
+
shared/__init__.py,sha256=bmP79AVZdY_1aNULJap9pm76Q41Rabrza6X-0A8lDzw,45
|
|
70
|
+
shared/scalar_functions.py,sha256=OAFO6kT6Gtcv5jp7UBLRhifhGmAbWhDKAmapTvqQruc,89911
|
|
71
|
+
shared/scalar_types.py,sha256=kEpsl5T-NVFxCcTzXqPJbtpvDiCgKHfz91dphLLZxZA,4912
|
|
72
|
+
emx_onnx_cgen-0.2.0.dist-info/METADATA,sha256=xTbPSAdUMfyXwvkxGGVYhsGhKjpxgPSzV1rimuB8zn0,4256
|
|
73
|
+
emx_onnx_cgen-0.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
74
|
+
emx_onnx_cgen-0.2.0.dist-info/entry_points.txt,sha256=b7Rvmz_Bi9kWyn7QayQC_FEXiRpt4cS1RnluKh49yoo,57
|
|
75
|
+
emx_onnx_cgen-0.2.0.dist-info/top_level.txt,sha256=g39fo-blEbgiVcC_GRqAnBzN234w3LXbcVdLUoItSLk,21
|
|
76
|
+
emx_onnx_cgen-0.2.0.dist-info/RECORD,,
|
shared/__init__.py
ADDED