potnn 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- potnn/__init__.py +86 -0
- potnn/codegen/__init__.py +20 -0
- potnn/codegen/bit2.py +263 -0
- potnn/codegen/fp130.py +269 -0
- potnn/codegen/header.py +460 -0
- potnn/codegen/level5.py +393 -0
- potnn/codegen/scale.py +184 -0
- potnn/codegen/ternary.py +354 -0
- potnn/codegen/unroll.py +616 -0
- potnn/config.py +112 -0
- potnn/export.py +2196 -0
- potnn/fuse.py +167 -0
- potnn/modules/__init__.py +11 -0
- potnn/modules/add.py +114 -0
- potnn/modules/avgpool.py +173 -0
- potnn/modules/base.py +225 -0
- potnn/modules/conv.py +203 -0
- potnn/modules/conv1d.py +317 -0
- potnn/modules/depthwise.py +216 -0
- potnn/modules/linear.py +199 -0
- potnn/quantize/__init__.py +35 -0
- potnn/quantize/calibration.py +233 -0
- potnn/quantize/integer_ops.py +207 -0
- potnn/quantize/integer_sim.py +225 -0
- potnn/quantize/pot.py +455 -0
- potnn/quantize/qat.py +356 -0
- potnn/utils/__init__.py +13 -0
- potnn/utils/allocation.py +240 -0
- potnn/utils/memory.py +158 -0
- potnn/wrapper.py +304 -0
- potnn-1.0.0.dist-info/METADATA +260 -0
- potnn-1.0.0.dist-info/RECORD +35 -0
- potnn-1.0.0.dist-info/WHEEL +5 -0
- potnn-1.0.0.dist-info/licenses/LICENSE +72 -0
- potnn-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: potnn
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Multiplication-free neural networks for ultra-low-power MCUs
|
|
5
|
+
Home-page: https://github.com/scienthoon/potnn
|
|
6
|
+
Author: Scienthoon
|
|
7
|
+
Author-email: scienthoon@gmail.com
|
|
8
|
+
License: GPL-3.0
|
|
9
|
+
Classifier: Development Status :: 4 - Beta
|
|
10
|
+
Classifier: Intended Audience :: Developers
|
|
11
|
+
Classifier: Intended Audience :: Science/Research
|
|
12
|
+
Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
18
|
+
Classifier: Topic :: Software Development :: Embedded Systems
|
|
19
|
+
Requires-Python: >=3.8
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
License-File: LICENSE
|
|
22
|
+
Requires-Dist: torch>=2.0.0
|
|
23
|
+
Requires-Dist: numpy
|
|
24
|
+
Dynamic: author
|
|
25
|
+
Dynamic: author-email
|
|
26
|
+
Dynamic: classifier
|
|
27
|
+
Dynamic: description
|
|
28
|
+
Dynamic: description-content-type
|
|
29
|
+
Dynamic: home-page
|
|
30
|
+
Dynamic: license
|
|
31
|
+
Dynamic: license-file
|
|
32
|
+
Dynamic: requires-dist
|
|
33
|
+
Dynamic: requires-python
|
|
34
|
+
Dynamic: summary
|
|
35
|
+
|
|
36
|
+
# PoT-NN: Multiplication-Free Neural Networks for Ultra-Low-Power MCUs
|
|
37
|
+
|
|
38
|
+
[](https://opensource.org/licenses/MIT)
|
|
39
|
+
[](https://www.python.org/downloads/)
|
|
40
|
+
[](https://pytorch.org/)
|
|
41
|
+
|
|
42
|
+
**PoT-NN** is a quantization framework that enables **deep learning inference without multiplication**.
|
|
43
|
+
Run neural networks on ultra-low-cost MCUs without hardware multipliers (CH32V003, PY32F003, etc.).
|
|
44
|
+
|
|
45
|
+
> π°π· [νκ΅μ΄ λ¬Έμ](README_ko.md)
|
|
46
|
+
|
|
47
|
+
## π― Key Features
|
|
48
|
+
|
|
49
|
+
| Feature | Description |
|
|
50
|
+
|---------|-------------|
|
|
51
|
+
| **Multiplication-Free** | All weights quantized to powers-of-two, using only `<<`, `>>`, `+` operations |
|
|
52
|
+
| **Integer-Only Inference** | No floating-point operations, only `int8`/`int32` arithmetic |
|
|
53
|
+
| **5 Encoding Modes** | Choose between accuracy vs. memory tradeoff |
|
|
54
|
+
| **Auto C Export** | Generates standalone C header files with zero dependencies |
|
|
55
|
+
| **Bit-Exact Matching** | Guaranteed 100% match between Python simulation and C code |
|
|
56
|
+
|
|
57
|
+
## π¦ Installation
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
git clone https://github.com/YOUR_USERNAME/potnn.git
|
|
61
|
+
cd potnn
|
|
62
|
+
pip install -e .
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
## π Quick Start
|
|
66
|
+
|
|
67
|
+
### Method 1: One-Line Training (Recommended)
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
import torch
|
|
71
|
+
import torch.nn as nn
|
|
72
|
+
import potnn
|
|
73
|
+
from potnn import PoTConv2d, PoTLinear
|
|
74
|
+
|
|
75
|
+
# 1. Define model using PoT layers
|
|
76
|
+
class TinyNet(nn.Module):
|
|
77
|
+
def __init__(self):
|
|
78
|
+
super().__init__()
|
|
79
|
+
self.conv1 = PoTConv2d(1, 8, kernel_size=3, padding=1)
|
|
80
|
+
self.conv2 = PoTConv2d(8, 16, kernel_size=3, padding=1)
|
|
81
|
+
self.pool = nn.AdaptiveAvgPool2d(1) # Auto-replaced with PoTGlobalAvgPool
|
|
82
|
+
self.fc = PoTLinear(16, 10)
|
|
83
|
+
|
|
84
|
+
def forward(self, x):
|
|
85
|
+
x = torch.relu(self.conv1(x))
|
|
86
|
+
x = nn.functional.max_pool2d(x, 2)
|
|
87
|
+
x = torch.relu(self.conv2(x))
|
|
88
|
+
x = self.pool(x).view(x.size(0), -1)
|
|
89
|
+
return self.fc(x)
|
|
90
|
+
|
|
91
|
+
model = TinyNet()
|
|
92
|
+
|
|
93
|
+
# 2. Configure
|
|
94
|
+
config = potnn.Config(
|
|
95
|
+
flash=16384, # Target MCU Flash (bytes)
|
|
96
|
+
ram=2048, # Target MCU RAM (bytes)
|
|
97
|
+
mean=0.1307, # Dataset mean
|
|
98
|
+
std=0.3081, # Dataset std
|
|
99
|
+
input_h=16, input_w=16, input_channels=1,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
# 3. Train (Float β Calibrate β QAT β Integer Sim)
|
|
103
|
+
model = potnn.train(model, train_loader, test_loader, config,
|
|
104
|
+
float_epochs=15, qat_epochs=50)
|
|
105
|
+
|
|
106
|
+
# 4. Export to C
|
|
107
|
+
potnn.export(model, "model.h", config)
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
### Method 2: Manual Pipeline
|
|
111
|
+
|
|
112
|
+
```python
|
|
113
|
+
import potnn
|
|
114
|
+
|
|
115
|
+
# Step 1: Train float model (standard PyTorch training)
|
|
116
|
+
train_float(model, train_loader, epochs=15)
|
|
117
|
+
|
|
118
|
+
# Step 2: Fuse BatchNorm into Conv (if any)
|
|
119
|
+
potnn.fuse_batchnorm(model)
|
|
120
|
+
|
|
121
|
+
# Step 3: Calibrate activation scales
|
|
122
|
+
potnn.calibrate(model, train_loader, config)
|
|
123
|
+
|
|
124
|
+
# Step 4: Prepare for QAT
|
|
125
|
+
potnn.prepare_qat(model, config)
|
|
126
|
+
|
|
127
|
+
# Step 5: QAT training
|
|
128
|
+
train_qat(model, train_loader, epochs=50)
|
|
129
|
+
|
|
130
|
+
# Step 6: Enable integer simulation (C-compatible)
|
|
131
|
+
potnn.enable_integer_sim(model, input_std=config.std, input_mean=config.mean)
|
|
132
|
+
|
|
133
|
+
# Step 7: Export
|
|
134
|
+
potnn.export(model, "model.h", config)
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
## π Encoding Modes
|
|
138
|
+
|
|
139
|
+
Choose encoding based on accuracy vs. memory tradeoff:
|
|
140
|
+
|
|
141
|
+
| Encoding | Levels | Values | Bits/Weight | Best For |
|
|
142
|
+
|----------|--------|--------|-------------|----------|
|
|
143
|
+
| `unroll` | 17 | 0, Β±1, Β±2, Β±4, ..., Β±128 | Code-unrolled | Highest accuracy |
|
|
144
|
+
| `fp130` | 16 | Β±1, Β±2, Β±4, ..., Β±128 | 4-bit | Dense layers |
|
|
145
|
+
| `5level` | 5 | -8, -1, 0, +1, +8 | 4-bit (skip) | Balanced |
|
|
146
|
+
| `2bit` | 4 | -2, -1, +1, +2 | 2-bit | Smallest memory |
|
|
147
|
+
| `ternary` | 3 | -1, 0, +1 | 2-bit (RLE) | Sparse models |
|
|
148
|
+
|
|
149
|
+
### Per-Layer Encoding
|
|
150
|
+
|
|
151
|
+
```python
|
|
152
|
+
config = potnn.Config(
|
|
153
|
+
flash=16384, ram=2048,
|
|
154
|
+
layer_encodings={
|
|
155
|
+
'conv1': 'unroll', # First layer: max accuracy
|
|
156
|
+
'conv2': '5level', # Middle layer
|
|
157
|
+
'fc': 'unroll', # Last layer: max accuracy
|
|
158
|
+
},
|
|
159
|
+
default_encoding='5level'
|
|
160
|
+
)
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
### Encoding Details
|
|
164
|
+
|
|
165
|
+
#### `unroll` (Default)
|
|
166
|
+
- Weights embedded directly as shift-add operations
|
|
167
|
+
- Zero weights omitted entirely (sparse-friendly)
|
|
168
|
+
- Largest code size, highest accuracy
|
|
169
|
+
|
|
170
|
+
#### `fp130` (FP1.3.0 Format)
|
|
171
|
+
- 4-bit packing: `[sign(1)][exp(3)]`
|
|
172
|
+
- No zero (zeros replaced with Β±1)
|
|
173
|
+
- Good for dense layers
|
|
174
|
+
|
|
175
|
+
#### `5level` (Skip Encoding)
|
|
176
|
+
- 4-bit packing: `[skip(2)][sign(1)][mag(1)]`
|
|
177
|
+
- Skip field compresses consecutive zeros (0-3)
|
|
178
|
+
- **Constraint**: Max 3 consecutive zeros (4th+ replaced with +1)
|
|
179
|
+
|
|
180
|
+
#### `2bit`
|
|
181
|
+
- 2-bit packing: `[sign(1)][shift(1)]`
|
|
182
|
+
- Smallest memory (16 weights per uint32)
|
|
183
|
+
- No zero (zeros replaced with Β±1)
|
|
184
|
+
|
|
185
|
+
#### `ternary` (Triple-Run)
|
|
186
|
+
- 2-bit codes with run-length encoding
|
|
187
|
+
- `11` code = repeat previous value 2 more times
|
|
188
|
+
- Best for very sparse models
|
|
189
|
+
|
|
190
|
+
## π Supported Layers
|
|
191
|
+
|
|
192
|
+
| Layer | Class | Notes |
|
|
193
|
+
|-------|-------|-------|
|
|
194
|
+
| Conv2D | `PoTConv2d` | All standard parameters supported |
|
|
195
|
+
| Conv1D | `PoTConv1d` | For time series |
|
|
196
|
+
| Depthwise | `PoTDepthwiseConv2d` | MobileNet-style |
|
|
197
|
+
| Linear | `PoTLinear` | Fully connected |
|
|
198
|
+
| GAP | Auto-replaced | `nn.AdaptiveAvgPool2d(1)` β `PoTGlobalAvgPool` |
|
|
199
|
+
| Add | `PoTAdd` | For residual connections |
|
|
200
|
+
| BatchNorm | Auto-fused | Merged into preceding Conv/Linear |
|
|
201
|
+
|
|
202
|
+
## βοΈ API Reference
|
|
203
|
+
|
|
204
|
+
### `potnn.Config`
|
|
205
|
+
|
|
206
|
+
| Parameter | Type | Required | Description |
|
|
207
|
+
|-----------|------|----------|-------------|
|
|
208
|
+
| `flash` | int | β
| Flash memory budget (bytes) |
|
|
209
|
+
| `ram` | int | β
| RAM budget (bytes) |
|
|
210
|
+
| `mean` | float/list | β | Dataset mean (single or per-channel) |
|
|
211
|
+
| `std` | float/list | β | Dataset std |
|
|
212
|
+
| `input_h`, `input_w` | int | β | Input dimensions (default: 16Γ16) |
|
|
213
|
+
| `input_channels` | int | β | Input channels (default: 1) |
|
|
214
|
+
| `layer_encodings` | dict | β | Per-layer encoding override |
|
|
215
|
+
| `default_encoding` | str | β | Default encoding (default: 'unroll') |
|
|
216
|
+
|
|
217
|
+
### Key Functions
|
|
218
|
+
|
|
219
|
+
```python
|
|
220
|
+
potnn.train(model, train_loader, test_loader, config, ...) # Full pipeline
|
|
221
|
+
potnn.calibrate(model, data_loader, config) # Calibrate scales
|
|
222
|
+
potnn.prepare_qat(model, config) # Enable QAT mode
|
|
223
|
+
potnn.enable_integer_sim(model, input_std, input_mean) # C-compatible mode
|
|
224
|
+
potnn.export(model, output_path, config) # Generate C code
|
|
225
|
+
potnn.fuse_batchnorm(model) # Fuse BN layers
|
|
226
|
+
```
|
|
227
|
+
|
|
228
|
+
## π§ͺ Verified Results
|
|
229
|
+
|
|
230
|
+
- **Bit-Exact Matching**: Python integer simulation matches C output 100%
|
|
231
|
+
- **MNIST**: 97%+ accuracy with 12KB binary
|
|
232
|
+
- **100-Model Stress Test**: Verified across random architectures
|
|
233
|
+
|
|
234
|
+
## π License
|
|
235
|
+
|
|
236
|
+
**Dual License**: GPL-3.0 + Commercial
|
|
237
|
+
|
|
238
|
+
| Use Case | License |
|
|
239
|
+
|----------|---------|
|
|
240
|
+
| Open Source Projects | GPL-3.0 (Free) |
|
|
241
|
+
| Proprietary/Commercial | Commercial License (Contact us) |
|
|
242
|
+
|
|
243
|
+
See [LICENSE](LICENSE) for details.
|
|
244
|
+
|
|
245
|
+
## π Contributing
|
|
246
|
+
|
|
247
|
+
This project was created by a solo developer without formal CS education.
|
|
248
|
+
There may be bugs, inefficiencies, or areas for improvement.
|
|
249
|
+
|
|
250
|
+
**Any contributions are greatly appreciated!**
|
|
251
|
+
- π Bug reports
|
|
252
|
+
- π‘ Feature suggestions
|
|
253
|
+
- π§ Pull requests
|
|
254
|
+
- π Documentation improvements
|
|
255
|
+
|
|
256
|
+
If you find issues or have ideas, please open an issue or PR. Thank you!
|
|
257
|
+
|
|
258
|
+
---
|
|
259
|
+
|
|
260
|
+
**Made with β€οΈ for ultra-low-power AI**
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
potnn/__init__.py,sha256=RmM9ryV2CxB692tNaaQXwnKJ2GgfTUzbf0ubISCpciw,2561
|
|
2
|
+
potnn/config.py,sha256=mR9NbbGN9q5G6wEm3vx0ODeWSHbVliyW7RHIj8uS1os,4464
|
|
3
|
+
potnn/export.py,sha256=wHOWeOLd9YlN3-GFL0lIiLdZud8yPvdqtGkvy7A78L8,92313
|
|
4
|
+
potnn/fuse.py,sha256=_ReCS9sk6ygQbZA7G2jS11ApAg8l6xWGxieJJ-K7L7Q,5631
|
|
5
|
+
potnn/wrapper.py,sha256=zgETMT7IlkhvMo1MZBSxmG21AJ7uzgmrK1eyB8pKIlY,11889
|
|
6
|
+
potnn/codegen/__init__.py,sha256=htno5WMgQlvjsDSi1fM3mSyc6N8TZCI5eXb2jQujorg,599
|
|
7
|
+
potnn/codegen/bit2.py,sha256=5lTgaHsuD8J1ppqBihpXm3dUNT3ZUQ4ecKNDVel57t0,9786
|
|
8
|
+
potnn/codegen/fp130.py,sha256=E3ufBvru-ZadooXfc_-2jtj7JWlmOGwGgRUDUiuuBSg,10363
|
|
9
|
+
potnn/codegen/header.py,sha256=fY3s8bm47eIWtMydf2fo8V8O3eyNdMjp3P4OAbGZ4aY,18586
|
|
10
|
+
potnn/codegen/level5.py,sha256=-tLPvymYQFTENuuwdl3E5f0bkKqy9XEUbcqVu7wEeFY,14733
|
|
11
|
+
potnn/codegen/scale.py,sha256=N_5olynrFs8E66GWCr33iHtlH5WL9P26e2A1DDt0szE,5974
|
|
12
|
+
potnn/codegen/ternary.py,sha256=-PSWa334SxVcTy23hEI_PJqK86KaQ8-Uh_TqUPLhMqI,13117
|
|
13
|
+
potnn/codegen/unroll.py,sha256=-RttYPp7GMu8qFq81v0pqM4xJp09cap_2GRL8g0po2A,23251
|
|
14
|
+
potnn/modules/__init__.py,sha256=TEhviXjRra7PIGTZpDQKX0tEJb8uajIjcE_fiZiHWyk,382
|
|
15
|
+
potnn/modules/add.py,sha256=M_ccOyRjnLqtoYVU5bnUKMEaCLQquP6i3zNe1_nF5ks,4352
|
|
16
|
+
potnn/modules/avgpool.py,sha256=eX6IgWIDlCh-wvop01aEWZN64aWg2gudhwqaaXhVOCs,6878
|
|
17
|
+
potnn/modules/base.py,sha256=oWHWXV-O7NQ0N4G0GosGC8H3vaHRpljaPVcxgPEv9v0,8069
|
|
18
|
+
potnn/modules/conv.py,sha256=2n_b-WCqPOriZRM-KQmgwoTpCQKmLnxzVG94AJ9EdIg,7869
|
|
19
|
+
potnn/modules/conv1d.py,sha256=x1zz6sZ7qm4wW0vFYxGI2iUKbFLi81kyu2e3JnGWcMo,13257
|
|
20
|
+
potnn/modules/depthwise.py,sha256=D4u1gmWESRyaqp5idWqMUJioxuWHga2FjLvgSOOzyGE,8725
|
|
21
|
+
potnn/modules/linear.py,sha256=ZipAI0rbwiPEVxDAcY-AVkuf1RoxbSLk9jut6nZ573w,7910
|
|
22
|
+
potnn/quantize/__init__.py,sha256=rBLPaOLE3Lcv4gvmw_CjN7AJwbz2j3CtZb5dC-K6DWA,967
|
|
23
|
+
potnn/quantize/calibration.py,sha256=xBUjQwhNRie9xcG8XTyU8m5xXnD_UU1QMlre1wMBtRw,9570
|
|
24
|
+
potnn/quantize/integer_ops.py,sha256=8alMYkmqOQLrwu2hx0uXU4Gaz1HnRpw_fgrL86a4TNI,7534
|
|
25
|
+
potnn/quantize/integer_sim.py,sha256=pKpJ-dY_AlNW6HpyUWZ3JEKgWIJSCdA_E9DgHuNntc8,6140
|
|
26
|
+
potnn/quantize/pot.py,sha256=_x86v3BXpIe17jZjvTy3Ozd3Mlumw4hH9yUXazd0YCI,14705
|
|
27
|
+
potnn/quantize/qat.py,sha256=6ozzrH1IiSH5Oj1qDpVTJ2-LG4Lf7daKF4t4DM0XRK4,13629
|
|
28
|
+
potnn/utils/__init__.py,sha256=OTwUq1x-nRfC-ZnR1ZGvJX-gb_aaAXLwYFBdO9xnHvs,352
|
|
29
|
+
potnn/utils/allocation.py,sha256=swIfoOEl2pbt0Cf-W_5Za1qAva8OTBS9wfCwKGCCZX8,7731
|
|
30
|
+
potnn/utils/memory.py,sha256=xuLYnOfh7WcfOkEikZb1pSMBdXwD_mUwuQG0Gc9dEBs,5110
|
|
31
|
+
potnn-1.0.0.dist-info/licenses/LICENSE,sha256=cZZ65pYx8hdCtK3VRVbUiI5NaFsNHrq195NuctTSmtU,2731
|
|
32
|
+
potnn-1.0.0.dist-info/METADATA,sha256=bS24-cfZo2yw8T05-O8SRLAty5ky78fyDFC2XW_JRm0,8393
|
|
33
|
+
potnn-1.0.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
34
|
+
potnn-1.0.0.dist-info/top_level.txt,sha256=fc3uRsXRimIAyDiapfFVH2jm9bL14fZziC7Y5isAsJU,6
|
|
35
|
+
potnn-1.0.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
PoT-NN Dual License
|
|
2
|
+
====================
|
|
3
|
+
|
|
4
|
+
This software is available under a dual license model:
|
|
5
|
+
|
|
6
|
+
1. GNU General Public License v3.0 (GPL-3.0) - For open source use
|
|
7
|
+
2. Commercial License - For proprietary/closed-source use
|
|
8
|
+
|
|
9
|
+
--------------------------------------------------------------------------------
|
|
10
|
+
|
|
11
|
+
OPTION 1: GPL-3.0 (Open Source)
|
|
12
|
+
--------------------------------
|
|
13
|
+
|
|
14
|
+
Copyright (c) 2026 PoT-NN Developers
|
|
15
|
+
|
|
16
|
+
This program is free software: you can redistribute it and/or modify
|
|
17
|
+
it under the terms of the GNU General Public License as published by
|
|
18
|
+
the Free Software Foundation, either version 3 of the License, or
|
|
19
|
+
(at your option) any later version.
|
|
20
|
+
|
|
21
|
+
This program is distributed in the hope that it will be useful,
|
|
22
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
23
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
24
|
+
GNU General Public License for more details.
|
|
25
|
+
|
|
26
|
+
You should have received a copy of the GNU General Public License
|
|
27
|
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
28
|
+
|
|
29
|
+
**Summary of GPL-3.0 Requirements:**
|
|
30
|
+
- You must disclose your source code if you distribute the software
|
|
31
|
+
- Derivative works must also be licensed under GPL-3.0
|
|
32
|
+
- You must include the original copyright notice
|
|
33
|
+
- Changes to the code must be documented
|
|
34
|
+
|
|
35
|
+
--------------------------------------------------------------------------------
|
|
36
|
+
|
|
37
|
+
OPTION 2: Commercial License
|
|
38
|
+
-----------------------------
|
|
39
|
+
|
|
40
|
+
For companies and individuals who wish to use PoT-NN in proprietary products
|
|
41
|
+
without the obligations of the GPL license, a commercial license is available.
|
|
42
|
+
|
|
43
|
+
**Commercial License Benefits:**
|
|
44
|
+
- Use in closed-source products
|
|
45
|
+
- No requirement to disclose your source code
|
|
46
|
+
- No copyleft obligations
|
|
47
|
+
- Priority support available
|
|
48
|
+
|
|
49
|
+
**To obtain a commercial license, please contact:**
|
|
50
|
+
- Email: [YOUR_EMAIL@example.com]
|
|
51
|
+
- Website: [YOUR_WEBSITE]
|
|
52
|
+
|
|
53
|
+
--------------------------------------------------------------------------------
|
|
54
|
+
|
|
55
|
+
Which License Applies to You?
|
|
56
|
+
------------------------------
|
|
57
|
+
|
|
58
|
+
- **Open Source Projects**: If your project is open source and will be
|
|
59
|
+
distributed under GPL-3.0 or a compatible license, you can use PoT-NN
|
|
60
|
+
for free under the GPL-3.0 license.
|
|
61
|
+
|
|
62
|
+
- **Commercial/Proprietary Use**: If you want to use PoT-NN in a proprietary
|
|
63
|
+
product without releasing your source code, you need a commercial license.
|
|
64
|
+
|
|
65
|
+
- **Internal Use**: If you're only using PoT-NN internally and not distributing
|
|
66
|
+
it, GPL-3.0 does not require you to release your source code. However, if
|
|
67
|
+
you want dedicated support or prefer clear legal terms, consider a
|
|
68
|
+
commercial license.
|
|
69
|
+
|
|
70
|
+
--------------------------------------------------------------------------------
|
|
71
|
+
|
|
72
|
+
Full GPL-3.0 License Text: https://www.gnu.org/licenses/gpl-3.0.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
potnn
|