LetsANN 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letsann-0.1.0/LICENSE +21 -0
- letsann-0.1.0/LetsANN.egg-info/PKG-INFO +118 -0
- letsann-0.1.0/LetsANN.egg-info/SOURCES.txt +18 -0
- letsann-0.1.0/LetsANN.egg-info/dependency_links.txt +1 -0
- letsann-0.1.0/LetsANN.egg-info/entry_points.txt +2 -0
- letsann-0.1.0/LetsANN.egg-info/requires.txt +9 -0
- letsann-0.1.0/LetsANN.egg-info/top_level.txt +1 -0
- letsann-0.1.0/MANIFEST.in +2 -0
- letsann-0.1.0/PKG-INFO +118 -0
- letsann-0.1.0/README.md +84 -0
- letsann-0.1.0/letsann/__init__.py +44 -0
- letsann-0.1.0/letsann/_version.py +7 -0
- letsann-0.1.0/letsann/cli.py +35 -0
- letsann-0.1.0/letsann/data.py +197 -0
- letsann-0.1.0/letsann/layers.py +217 -0
- letsann-0.1.0/letsann/model.py +132 -0
- letsann-0.1.0/letsann/trainer.py +215 -0
- letsann-0.1.0/pyproject.toml +52 -0
- letsann-0.1.0/setup.cfg +4 -0
- letsann-0.1.0/tests/test_model.py +71 -0
letsann-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 LetsANN Contributors
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: LetsANN
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: 基于 TensorFlow 的零基础 ANN 库:用简单的 Python 字典就能描述网络。
|
|
5
|
+
Author: LetsANN Contributors
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/letsann/letsann
|
|
8
|
+
Project-URL: Documentation, https://github.com/letsann/letsann#readme
|
|
9
|
+
Project-URL: Issues, https://github.com/letsann/letsann/issues
|
|
10
|
+
Keywords: tensorflow,keras,neural network,ann,deep learning,education
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Education
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Operating System :: OS Independent
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
21
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
22
|
+
Requires-Python: >=3.8
|
|
23
|
+
Description-Content-Type: text/markdown
|
|
24
|
+
License-File: LICENSE
|
|
25
|
+
Requires-Dist: tensorflow>=2.8
|
|
26
|
+
Requires-Dist: numpy>=1.19
|
|
27
|
+
Requires-Dist: pandas>=1.2
|
|
28
|
+
Requires-Dist: scikit-learn>=1.0
|
|
29
|
+
Provides-Extra: dev
|
|
30
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
31
|
+
Requires-Dist: build; extra == "dev"
|
|
32
|
+
Requires-Dist: twine; extra == "dev"
|
|
33
|
+
Dynamic: license-file
|
|
34
|
+
|
|
35
|
+
# LetsANN
|
|
36
|
+
|
|
37
|
+
**LetsANN** 是一个基于 TensorFlow / Keras 的零基础 ANN 库。
|
|
38
|
+
用最简单的 Python 列表描述网络,像搭积木一样训练模型。
|
|
39
|
+
|
|
40
|
+
> 需要可视化拖拽界面?请看配套的独立项目 [`letsann-web`](https://github.com/letsann/letsann-web)。
|
|
41
|
+
|
|
42
|
+
## 安装
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install LetsANN
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
要求 Python **3.8 及以上**。
|
|
49
|
+
|
|
50
|
+
## 最小示例
|
|
51
|
+
|
|
52
|
+
```python
|
|
53
|
+
from letsann import Model, load_dataset
|
|
54
|
+
|
|
55
|
+
# 用 DataFrame 或 CSV 路径都行,最后一列默认为标签
|
|
56
|
+
ds = load_dataset("iris.csv", target="species")
|
|
57
|
+
|
|
58
|
+
# 用列表描述网络
|
|
59
|
+
model = Model([
|
|
60
|
+
{"type": "Input", "params": {"shape": "4"}},
|
|
61
|
+
{"type": "Dense", "params": {"units": 16, "activation": "relu"}},
|
|
62
|
+
{"type": "Dense", "params": {"units": 3, "activation": "softmax"}},
|
|
63
|
+
])
|
|
64
|
+
|
|
65
|
+
# 和 Keras 一样编译、训练
|
|
66
|
+
model.compile(optimizer="adam",
|
|
67
|
+
loss="sparse_categorical_crossentropy",
|
|
68
|
+
metrics=["accuracy"])
|
|
69
|
+
model.fit(ds.X_train, ds.y_train,
|
|
70
|
+
validation_data=(ds.X_val, ds.y_val),
|
|
71
|
+
epochs=20, batch_size=16)
|
|
72
|
+
|
|
73
|
+
print(model.summary())
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
更多示例见 `examples/quickstart.py`。
|
|
77
|
+
|
|
78
|
+
## 支持的层
|
|
79
|
+
|
|
80
|
+
`Input`、`Dense`、`Dropout`、`BatchNormalization`、`Flatten`、`Activation`、
|
|
81
|
+
`Conv2D`、`MaxPooling2D`。全部在 `letsann/layers.py` 中注册,想扩展就
|
|
82
|
+
往 `LAYER_REGISTRY` 里加一条即可。
|
|
83
|
+
|
|
84
|
+
## 数据集格式
|
|
85
|
+
|
|
86
|
+
- **CSV / TSV**:默认最后一列为标签;用 `target="col"` 指定其它列。
|
|
87
|
+
- **NPZ**:需要包含 `X` 和 `y` 两个数组。
|
|
88
|
+
|
|
89
|
+
## 发布到 PyPI
|
|
90
|
+
|
|
91
|
+
```bash
|
|
92
|
+
# 1. 安装打包工具
|
|
93
|
+
pip install build twine
|
|
94
|
+
|
|
95
|
+
# 2. 打包(在本目录运行)
|
|
96
|
+
python -m build # 会生成 dist/LetsANN-0.1.0.tar.gz 和 .whl
|
|
97
|
+
|
|
98
|
+
# 3. 先上传到 TestPyPI 验证
|
|
99
|
+
twine upload --repository testpypi dist/*
|
|
100
|
+
|
|
101
|
+
# 4. 确认没问题后,正式上传 PyPI
|
|
102
|
+
twine upload dist/*
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
上传需要在 <https://pypi.org> 先创建账号并生成 API Token,放进
|
|
106
|
+
`~/.pypirc` 或设置环境变量 `TWINE_USERNAME=__token__`、
|
|
107
|
+
`TWINE_PASSWORD=<你的 token>`。
|
|
108
|
+
|
|
109
|
+
## 开发
|
|
110
|
+
|
|
111
|
+
```bash
|
|
112
|
+
pip install -e ".[dev]"
|
|
113
|
+
pytest
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
## License
|
|
117
|
+
|
|
118
|
+
MIT
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
MANIFEST.in
|
|
3
|
+
README.md
|
|
4
|
+
pyproject.toml
|
|
5
|
+
LetsANN.egg-info/PKG-INFO
|
|
6
|
+
LetsANN.egg-info/SOURCES.txt
|
|
7
|
+
LetsANN.egg-info/dependency_links.txt
|
|
8
|
+
LetsANN.egg-info/entry_points.txt
|
|
9
|
+
LetsANN.egg-info/requires.txt
|
|
10
|
+
LetsANN.egg-info/top_level.txt
|
|
11
|
+
letsann/__init__.py
|
|
12
|
+
letsann/_version.py
|
|
13
|
+
letsann/cli.py
|
|
14
|
+
letsann/data.py
|
|
15
|
+
letsann/layers.py
|
|
16
|
+
letsann/model.py
|
|
17
|
+
letsann/trainer.py
|
|
18
|
+
tests/test_model.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
letsann
|
letsann-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: LetsANN
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: 基于 TensorFlow 的零基础 ANN 库:用简单的 Python 字典就能描述网络。
|
|
5
|
+
Author: LetsANN Contributors
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/letsann/letsann
|
|
8
|
+
Project-URL: Documentation, https://github.com/letsann/letsann#readme
|
|
9
|
+
Project-URL: Issues, https://github.com/letsann/letsann/issues
|
|
10
|
+
Keywords: tensorflow,keras,neural network,ann,deep learning,education
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Education
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Operating System :: OS Independent
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
21
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
22
|
+
Requires-Python: >=3.8
|
|
23
|
+
Description-Content-Type: text/markdown
|
|
24
|
+
License-File: LICENSE
|
|
25
|
+
Requires-Dist: tensorflow>=2.8
|
|
26
|
+
Requires-Dist: numpy>=1.19
|
|
27
|
+
Requires-Dist: pandas>=1.2
|
|
28
|
+
Requires-Dist: scikit-learn>=1.0
|
|
29
|
+
Provides-Extra: dev
|
|
30
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
31
|
+
Requires-Dist: build; extra == "dev"
|
|
32
|
+
Requires-Dist: twine; extra == "dev"
|
|
33
|
+
Dynamic: license-file
|
|
34
|
+
|
|
35
|
+
# LetsANN
|
|
36
|
+
|
|
37
|
+
**LetsANN** 是一个基于 TensorFlow / Keras 的零基础 ANN 库。
|
|
38
|
+
用最简单的 Python 列表描述网络,像搭积木一样训练模型。
|
|
39
|
+
|
|
40
|
+
> 需要可视化拖拽界面?请看配套的独立项目 [`letsann-web`](https://github.com/letsann/letsann-web)。
|
|
41
|
+
|
|
42
|
+
## 安装
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install LetsANN
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
要求 Python **3.8 及以上**。
|
|
49
|
+
|
|
50
|
+
## 最小示例
|
|
51
|
+
|
|
52
|
+
```python
|
|
53
|
+
from letsann import Model, load_dataset
|
|
54
|
+
|
|
55
|
+
# 用 DataFrame 或 CSV 路径都行,最后一列默认为标签
|
|
56
|
+
ds = load_dataset("iris.csv", target="species")
|
|
57
|
+
|
|
58
|
+
# 用列表描述网络
|
|
59
|
+
model = Model([
|
|
60
|
+
{"type": "Input", "params": {"shape": "4"}},
|
|
61
|
+
{"type": "Dense", "params": {"units": 16, "activation": "relu"}},
|
|
62
|
+
{"type": "Dense", "params": {"units": 3, "activation": "softmax"}},
|
|
63
|
+
])
|
|
64
|
+
|
|
65
|
+
# 和 Keras 一样编译、训练
|
|
66
|
+
model.compile(optimizer="adam",
|
|
67
|
+
loss="sparse_categorical_crossentropy",
|
|
68
|
+
metrics=["accuracy"])
|
|
69
|
+
model.fit(ds.X_train, ds.y_train,
|
|
70
|
+
validation_data=(ds.X_val, ds.y_val),
|
|
71
|
+
epochs=20, batch_size=16)
|
|
72
|
+
|
|
73
|
+
print(model.summary())
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
更多示例见 `examples/quickstart.py`。
|
|
77
|
+
|
|
78
|
+
## 支持的层
|
|
79
|
+
|
|
80
|
+
`Input`、`Dense`、`Dropout`、`BatchNormalization`、`Flatten`、`Activation`、
|
|
81
|
+
`Conv2D`、`MaxPooling2D`。全部在 `letsann/layers.py` 中注册,想扩展就
|
|
82
|
+
往 `LAYER_REGISTRY` 里加一条即可。
|
|
83
|
+
|
|
84
|
+
## 数据集格式
|
|
85
|
+
|
|
86
|
+
- **CSV / TSV**:默认最后一列为标签;用 `target="col"` 指定其它列。
|
|
87
|
+
- **NPZ**:需要包含 `X` 和 `y` 两个数组。
|
|
88
|
+
|
|
89
|
+
## 发布到 PyPI
|
|
90
|
+
|
|
91
|
+
```bash
|
|
92
|
+
# 1. 安装打包工具
|
|
93
|
+
pip install build twine
|
|
94
|
+
|
|
95
|
+
# 2. 打包(在本目录运行)
|
|
96
|
+
python -m build # 会生成 dist/LetsANN-0.1.0.tar.gz 和 .whl
|
|
97
|
+
|
|
98
|
+
# 3. 先上传到 TestPyPI 验证
|
|
99
|
+
twine upload --repository testpypi dist/*
|
|
100
|
+
|
|
101
|
+
# 4. 确认没问题后,正式上传 PyPI
|
|
102
|
+
twine upload dist/*
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
上传需要在 <https://pypi.org> 先创建账号并生成 API Token,放进
|
|
106
|
+
`~/.pypirc` 或设置环境变量 `TWINE_USERNAME=__token__`、
|
|
107
|
+
`TWINE_PASSWORD=<你的 token>`。
|
|
108
|
+
|
|
109
|
+
## 开发
|
|
110
|
+
|
|
111
|
+
```bash
|
|
112
|
+
pip install -e ".[dev]"
|
|
113
|
+
pytest
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
## License
|
|
117
|
+
|
|
118
|
+
MIT
|
letsann-0.1.0/README.md
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
# LetsANN
|
|
2
|
+
|
|
3
|
+
**LetsANN** 是一个基于 TensorFlow / Keras 的零基础 ANN 库。
|
|
4
|
+
用最简单的 Python 列表描述网络,像搭积木一样训练模型。
|
|
5
|
+
|
|
6
|
+
> 需要可视化拖拽界面?请看配套的独立项目 [`letsann-web`](https://github.com/letsann/letsann-web)。
|
|
7
|
+
|
|
8
|
+
## 安装
|
|
9
|
+
|
|
10
|
+
```bash
|
|
11
|
+
pip install LetsANN
|
|
12
|
+
```
|
|
13
|
+
|
|
14
|
+
要求 Python **3.8 及以上**。
|
|
15
|
+
|
|
16
|
+
## 最小示例
|
|
17
|
+
|
|
18
|
+
```python
|
|
19
|
+
from letsann import Model, load_dataset
|
|
20
|
+
|
|
21
|
+
# 用 DataFrame 或 CSV 路径都行,最后一列默认为标签
|
|
22
|
+
ds = load_dataset("iris.csv", target="species")
|
|
23
|
+
|
|
24
|
+
# 用列表描述网络
|
|
25
|
+
model = Model([
|
|
26
|
+
{"type": "Input", "params": {"shape": "4"}},
|
|
27
|
+
{"type": "Dense", "params": {"units": 16, "activation": "relu"}},
|
|
28
|
+
{"type": "Dense", "params": {"units": 3, "activation": "softmax"}},
|
|
29
|
+
])
|
|
30
|
+
|
|
31
|
+
# 和 Keras 一样编译、训练
|
|
32
|
+
model.compile(optimizer="adam",
|
|
33
|
+
loss="sparse_categorical_crossentropy",
|
|
34
|
+
metrics=["accuracy"])
|
|
35
|
+
model.fit(ds.X_train, ds.y_train,
|
|
36
|
+
validation_data=(ds.X_val, ds.y_val),
|
|
37
|
+
epochs=20, batch_size=16)
|
|
38
|
+
|
|
39
|
+
print(model.summary())
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
更多示例见 `examples/quickstart.py`。
|
|
43
|
+
|
|
44
|
+
## 支持的层
|
|
45
|
+
|
|
46
|
+
`Input`、`Dense`、`Dropout`、`BatchNormalization`、`Flatten`、`Activation`、
|
|
47
|
+
`Conv2D`、`MaxPooling2D`。全部在 `letsann/layers.py` 中注册,想扩展就
|
|
48
|
+
往 `LAYER_REGISTRY` 里加一条即可。
|
|
49
|
+
|
|
50
|
+
## 数据集格式
|
|
51
|
+
|
|
52
|
+
- **CSV / TSV**:默认最后一列为标签;用 `target="col"` 指定其它列。
|
|
53
|
+
- **NPZ**:需要包含 `X` 和 `y` 两个数组。
|
|
54
|
+
|
|
55
|
+
## 发布到 PyPI
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
# 1. 安装打包工具
|
|
59
|
+
pip install build twine
|
|
60
|
+
|
|
61
|
+
# 2. 打包(在本目录运行)
|
|
62
|
+
python -m build # 会生成 dist/LetsANN-0.1.0.tar.gz 和 .whl
|
|
63
|
+
|
|
64
|
+
# 3. 先上传到 TestPyPI 验证
|
|
65
|
+
twine upload --repository testpypi dist/*
|
|
66
|
+
|
|
67
|
+
# 4. 确认没问题后,正式上传 PyPI
|
|
68
|
+
twine upload dist/*
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
上传需要在 <https://pypi.org> 先创建账号并生成 API Token,放进
|
|
72
|
+
`~/.pypirc` 或设置环境变量 `TWINE_USERNAME=__token__`、
|
|
73
|
+
`TWINE_PASSWORD=<你的 token>`。
|
|
74
|
+
|
|
75
|
+
## 开发
|
|
76
|
+
|
|
77
|
+
```bash
|
|
78
|
+
pip install -e ".[dev]"
|
|
79
|
+
pytest
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
## License
|
|
83
|
+
|
|
84
|
+
MIT
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
"""LetsANN — A beginner-friendly ANN library on top of TensorFlow.
|
|
2
|
+
|
|
3
|
+
Public API:
|
|
4
|
+
- Model: thin wrapper around ``tf.keras.Sequential`` that builds a network
|
|
5
|
+
from a simple JSON-like list of layer specs.
|
|
6
|
+
- build_model: convenience factory for spec-based model construction.
|
|
7
|
+
- LAYER_REGISTRY: mapping of supported layer types to Keras classes
|
|
8
|
+
together with the metadata used by the web UI.
|
|
9
|
+
|
|
10
|
+
Importing ``letsann`` does *not* start the web server. The web UI has its own
|
|
11
|
+
console script (``letsann-web``) and must be started explicitly.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from ._version import __version__
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"Model",
|
|
18
|
+
"build_model",
|
|
19
|
+
"load_dataset",
|
|
20
|
+
"LAYER_REGISTRY",
|
|
21
|
+
"layer_catalog",
|
|
22
|
+
"__version__",
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def __getattr__(name):
|
|
27
|
+
"""Lazily import the heavy (TensorFlow-backed) public API.
|
|
28
|
+
|
|
29
|
+
This keeps ``python -m letsann.cli version`` and similar tooling usable
|
|
30
|
+
even when TensorFlow has not been imported yet (or is not installed).
|
|
31
|
+
"""
|
|
32
|
+
if name in {"Model", "build_model"}:
|
|
33
|
+
from .model import Model, build_model
|
|
34
|
+
|
|
35
|
+
return {"Model": Model, "build_model": build_model}[name]
|
|
36
|
+
if name in {"LAYER_REGISTRY", "layer_catalog"}:
|
|
37
|
+
from .layers import LAYER_REGISTRY, layer_catalog
|
|
38
|
+
|
|
39
|
+
return {"LAYER_REGISTRY": LAYER_REGISTRY, "layer_catalog": layer_catalog}[name]
|
|
40
|
+
if name == "load_dataset":
|
|
41
|
+
from .data import load_dataset
|
|
42
|
+
|
|
43
|
+
return load_dataset
|
|
44
|
+
raise AttributeError(f"module 'letsann' has no attribute {name!r}")
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""LetsANN 命令行工具。
|
|
2
|
+
|
|
3
|
+
只做一件事:查版本号。
|
|
4
|
+
Web 界面已拆到独立的 ``letsann-web`` 项目,本包不再附带。
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import argparse
|
|
10
|
+
|
|
11
|
+
from ._version import __version__
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def build_parser() -> argparse.ArgumentParser:
|
|
15
|
+
parser = argparse.ArgumentParser(
|
|
16
|
+
prog="letsann",
|
|
17
|
+
description="LetsANN —— 基于 TensorFlow 的极简 ANN 库。",
|
|
18
|
+
)
|
|
19
|
+
sub = parser.add_subparsers(dest="command")
|
|
20
|
+
sub.required = True
|
|
21
|
+
|
|
22
|
+
ver = sub.add_parser("version", help="打印 LetsANN 版本。")
|
|
23
|
+
ver.set_defaults(func=lambda _a: (print(f"LetsANN {__version__}") or 0))
|
|
24
|
+
|
|
25
|
+
return parser
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def main(argv=None) -> int:
|
|
29
|
+
parser = build_parser()
|
|
30
|
+
args = parser.parse_args(argv)
|
|
31
|
+
return args.func(args) or 0
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
if __name__ == "__main__": # pragma: no cover
|
|
35
|
+
raise SystemExit(main())
|
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
"""Dataset helpers for LetsANN.
|
|
2
|
+
|
|
3
|
+
Users can:
|
|
4
|
+
* point LetsANN at a local CSV/NPZ file via :func:`load_dataset`;
|
|
5
|
+
* load the same way from uploaded files in the web UI.
|
|
6
|
+
|
|
7
|
+
Datasets are kept intentionally simple: tabular data in CSV (with a target
|
|
8
|
+
column) or NPZ archives that contain ``X`` / ``y`` arrays.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import io
|
|
14
|
+
import os
|
|
15
|
+
from dataclasses import dataclass
|
|
16
|
+
from typing import Any, Dict, Optional, Tuple, Union
|
|
17
|
+
|
|
18
|
+
import numpy as np
|
|
19
|
+
import pandas as pd
|
|
20
|
+
from sklearn.model_selection import train_test_split
|
|
21
|
+
from sklearn.preprocessing import StandardScaler
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class Dataset:
|
|
26
|
+
X_train: np.ndarray
|
|
27
|
+
X_val: np.ndarray
|
|
28
|
+
y_train: np.ndarray
|
|
29
|
+
y_val: np.ndarray
|
|
30
|
+
feature_names: Optional[list] = None
|
|
31
|
+
target_name: Optional[str] = None
|
|
32
|
+
n_classes: Optional[int] = None
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def input_shape(self) -> Tuple[int, ...]:
|
|
36
|
+
return self.X_train.shape[1:]
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def task_type(self) -> str:
|
|
40
|
+
"""Heuristic classification-vs-regression detection."""
|
|
41
|
+
if self.n_classes and self.n_classes > 1:
|
|
42
|
+
return "classification"
|
|
43
|
+
return "regression"
|
|
44
|
+
|
|
45
|
+
def summary(self) -> Dict[str, Any]:
|
|
46
|
+
return {
|
|
47
|
+
"n_train": int(self.X_train.shape[0]),
|
|
48
|
+
"n_val": int(self.X_val.shape[0]),
|
|
49
|
+
"input_shape": list(self.input_shape),
|
|
50
|
+
"task_type": self.task_type,
|
|
51
|
+
"n_classes": self.n_classes,
|
|
52
|
+
"feature_names": self.feature_names,
|
|
53
|
+
"target_name": self.target_name,
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _infer_classes(y: np.ndarray) -> Optional[int]:
|
|
58
|
+
if y.ndim > 1 and y.shape[-1] > 1:
|
|
59
|
+
return int(y.shape[-1])
|
|
60
|
+
if np.issubdtype(y.dtype, np.integer):
|
|
61
|
+
uniq = np.unique(y)
|
|
62
|
+
if uniq.size <= max(50, int(np.sqrt(y.size))):
|
|
63
|
+
return int(uniq.size)
|
|
64
|
+
return None
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _from_dataframe(
|
|
68
|
+
df: pd.DataFrame,
|
|
69
|
+
target: Optional[str],
|
|
70
|
+
test_size: float,
|
|
71
|
+
normalize: bool,
|
|
72
|
+
random_state: int,
|
|
73
|
+
) -> Dataset:
|
|
74
|
+
if target is None:
|
|
75
|
+
target = df.columns[-1]
|
|
76
|
+
if target not in df.columns:
|
|
77
|
+
raise ValueError(f"Target column {target!r} not in dataset. Columns: {list(df.columns)}")
|
|
78
|
+
|
|
79
|
+
features = [c for c in df.columns if c != target]
|
|
80
|
+
X = df[features].to_numpy(dtype=np.float32)
|
|
81
|
+
y_raw = df[target].to_numpy()
|
|
82
|
+
|
|
83
|
+
# If target is non-numeric, label-encode it.
|
|
84
|
+
if y_raw.dtype.kind in {"O", "U", "S"}:
|
|
85
|
+
classes, y = np.unique(y_raw, return_inverse=True)
|
|
86
|
+
y = y.astype(np.int64)
|
|
87
|
+
n_classes = int(classes.size)
|
|
88
|
+
else:
|
|
89
|
+
y = y_raw
|
|
90
|
+
n_classes = _infer_classes(y)
|
|
91
|
+
if n_classes is None:
|
|
92
|
+
y = y.astype(np.float32)
|
|
93
|
+
|
|
94
|
+
if normalize:
|
|
95
|
+
scaler = StandardScaler()
|
|
96
|
+
X = scaler.fit_transform(X).astype(np.float32)
|
|
97
|
+
|
|
98
|
+
X_train, X_val, y_train, y_val = train_test_split(
|
|
99
|
+
X, y, test_size=test_size, random_state=random_state,
|
|
100
|
+
stratify=y if n_classes else None,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
return Dataset(
|
|
104
|
+
X_train=X_train,
|
|
105
|
+
X_val=X_val,
|
|
106
|
+
y_train=y_train,
|
|
107
|
+
y_val=y_val,
|
|
108
|
+
feature_names=features,
|
|
109
|
+
target_name=target,
|
|
110
|
+
n_classes=n_classes,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def load_dataset(
|
|
115
|
+
source: Union[str, bytes, io.BytesIO, pd.DataFrame],
|
|
116
|
+
*,
|
|
117
|
+
target: Optional[str] = None,
|
|
118
|
+
test_size: float = 0.2,
|
|
119
|
+
normalize: bool = True,
|
|
120
|
+
random_state: int = 42,
|
|
121
|
+
file_name: Optional[str] = None,
|
|
122
|
+
) -> Dataset:
|
|
123
|
+
"""Load a dataset from a path, bytes buffer, or DataFrame.
|
|
124
|
+
|
|
125
|
+
Parameters
|
|
126
|
+
----------
|
|
127
|
+
source:
|
|
128
|
+
Path to a CSV/NPZ file, a bytes buffer (e.g. uploaded file), or a
|
|
129
|
+
pandas DataFrame.
|
|
130
|
+
target:
|
|
131
|
+
Target column name. When ``None`` the last column is used.
|
|
132
|
+
test_size:
|
|
133
|
+
Fraction kept for the validation split.
|
|
134
|
+
normalize:
|
|
135
|
+
Whether to StandardScaler-normalise the features.
|
|
136
|
+
file_name:
|
|
137
|
+
Optional hint used when ``source`` is raw bytes and its extension is
|
|
138
|
+
otherwise unknown.
|
|
139
|
+
"""
|
|
140
|
+
|
|
141
|
+
if isinstance(source, pd.DataFrame):
|
|
142
|
+
return _from_dataframe(source, target, test_size, normalize, random_state)
|
|
143
|
+
|
|
144
|
+
if isinstance(source, (bytes, bytearray)):
|
|
145
|
+
buf = io.BytesIO(source)
|
|
146
|
+
ext = os.path.splitext(file_name or "")[1].lower()
|
|
147
|
+
return _load_from_buffer(buf, ext, target, test_size, normalize, random_state)
|
|
148
|
+
|
|
149
|
+
if isinstance(source, io.IOBase):
|
|
150
|
+
ext = os.path.splitext(file_name or getattr(source, "name", ""))[1].lower()
|
|
151
|
+
return _load_from_buffer(source, ext, target, test_size, normalize, random_state)
|
|
152
|
+
|
|
153
|
+
# assume string path
|
|
154
|
+
path = str(source)
|
|
155
|
+
ext = os.path.splitext(path)[1].lower()
|
|
156
|
+
if ext in {".csv", ".tsv", ".txt"}:
|
|
157
|
+
sep = "\t" if ext == ".tsv" else ","
|
|
158
|
+
df = pd.read_csv(path, sep=sep)
|
|
159
|
+
return _from_dataframe(df, target, test_size, normalize, random_state)
|
|
160
|
+
if ext in {".npz"}:
|
|
161
|
+
return _load_npz(np.load(path), test_size, normalize, random_state)
|
|
162
|
+
raise ValueError(f"Unsupported file extension: {ext!r}")
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def _load_from_buffer(buf, ext, target, test_size, normalize, random_state) -> Dataset:
|
|
166
|
+
if ext in {".csv", ".tsv", ".txt", ""}:
|
|
167
|
+
sep = "\t" if ext == ".tsv" else ","
|
|
168
|
+
df = pd.read_csv(buf, sep=sep)
|
|
169
|
+
return _from_dataframe(df, target, test_size, normalize, random_state)
|
|
170
|
+
if ext == ".npz":
|
|
171
|
+
return _load_npz(np.load(buf), test_size, normalize, random_state)
|
|
172
|
+
raise ValueError(f"Unsupported upload extension: {ext!r}")
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def _load_npz(npz, test_size: float, normalize: bool, random_state: int) -> Dataset:
|
|
176
|
+
if "X" not in npz or "y" not in npz:
|
|
177
|
+
raise ValueError("NPZ file must contain 'X' and 'y' arrays.")
|
|
178
|
+
X = np.asarray(npz["X"], dtype=np.float32)
|
|
179
|
+
y = np.asarray(npz["y"])
|
|
180
|
+
n_classes = _infer_classes(y)
|
|
181
|
+
if n_classes is None:
|
|
182
|
+
y = y.astype(np.float32)
|
|
183
|
+
|
|
184
|
+
if normalize and X.ndim == 2:
|
|
185
|
+
X = StandardScaler().fit_transform(X).astype(np.float32)
|
|
186
|
+
|
|
187
|
+
X_train, X_val, y_train, y_val = train_test_split(
|
|
188
|
+
X, y, test_size=test_size, random_state=random_state,
|
|
189
|
+
stratify=y if n_classes else None,
|
|
190
|
+
)
|
|
191
|
+
return Dataset(
|
|
192
|
+
X_train=X_train,
|
|
193
|
+
X_val=X_val,
|
|
194
|
+
y_train=y_train,
|
|
195
|
+
y_val=y_val,
|
|
196
|
+
n_classes=n_classes,
|
|
197
|
+
)
|