deep00 0.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
deep00-0.0.0/PKG-INFO ADDED
@@ -0,0 +1,26 @@
1
+ Metadata-Version: 2.1
2
+ Name: deep00
3
+ Version: 0.0.0
4
+ Summary: Ultra simple deep learning tool
5
+ Home-page: https://github.co.jp/
6
+ Author: bib_inf
7
+ Author-email: contact.bibinf@gmail.com
8
+ License: CC0 v1.0
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Topic :: Software Development :: Libraries
11
+ Classifier: License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
12
+ Description-Content-Type: text/markdown
13
+
14
+ English description follows Japanese.
15
+
16
+ ---
17
+
18
+ ### 概要
19
+ 超簡単に深層学習が使えるツールです。
20
+
21
+ 説明は執筆中です
22
+
23
+ ---
24
+
25
+ ### Overview
26
+ description is under construction.
deep00-0.0.0/README.md ADDED
@@ -0,0 +1,13 @@
1
+ English description follows Japanese.
2
+
3
+ ---
4
+
5
+ ### 概要
6
+ 超簡単に深層学習が使えるツールです。
7
+
8
+ 説明は執筆中です
9
+
10
+ ---
11
+
12
+ ### Overview
13
+ description is under construction.
File without changes
@@ -0,0 +1,307 @@
1
+ """
2
+ deep00 - シンプルな深層学習回帰ツール (PyTorch)
3
+
4
+ Usage:
5
+ model = deep00("reg", neurons=[7, 7], activation="relu")
6
+ model.fit(train_x, train_y)
7
+ pred_y = model(test_x)
8
+ """
9
+
10
+ import numpy as np
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.optim as optim
14
+ from torch.utils.data import TensorDataset, DataLoader
15
+
16
+
17
+ class _RegressionNet(nn.Module):
18
+ """全結合ニューラルネットワーク(回帰用)"""
19
+
20
+ _ACTIVATIONS = {
21
+ "relu": nn.ReLU,
22
+ "sigmoid": nn.Sigmoid,
23
+ "tanh": nn.Tanh,
24
+ "leaky_relu": nn.LeakyReLU,
25
+ "elu": nn.ELU,
26
+ "selu": nn.SELU,
27
+ "gelu": nn.GELU,
28
+ }
29
+
30
+ def __init__(self, in_dim: int, out_dim: int, neurons: list[int], activation: str):
31
+ super().__init__()
32
+ act_cls = self._ACTIVATIONS.get(activation)
33
+ if act_cls is None:
34
+ raise ValueError(
35
+ f"未対応の活性化関数: '{activation}' "
36
+ f"対応一覧: {list(self._ACTIVATIONS.keys())}"
37
+ )
38
+
39
+ layers: list[nn.Module] = []
40
+ prev = in_dim
41
+ for n in neurons:
42
+ layers.append(nn.Linear(prev, n))
43
+ layers.append(act_cls())
44
+ prev = n
45
+ layers.append(nn.Linear(prev, out_dim)) # 出力層(活性化なし)
46
+
47
+ self.net = nn.Sequential(*layers)
48
+
49
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
50
+ return self.net(x)
51
+
52
+
53
+ class deep00:
54
+ """
55
+ シンプルな深層学習回帰モデル。
56
+
57
+ Parameters
58
+ ----------
59
+ task : str
60
+ タスク種別。現在は "reg"(回帰)のみ対応。
61
+ neurons : list[int]
62
+ 中間層のニューロン数一覧。例: [7, 7]
63
+ activation : str
64
+ 活性化関数名。デフォルトは "relu"。
65
+ 対応: relu, sigmoid, tanh, leaky_relu, elu, selu, gelu
66
+ epochs : int
67
+ エポック数。デフォルト 100。
68
+ batch_size : int
69
+ ミニバッチサイズ。デフォルト 32。
70
+ lr : float
71
+ 学習率。デフォルト 1e-3。
72
+ optimizer : str
73
+ 最適化手法。"adam" または "sgd"。デフォルト "adam"。
74
+ weight_decay : float
75
+ L2正則化の強さ。デフォルト 0。
76
+ early_stopping : int or None
77
+ 検証損失が改善しないエポック数で学習を打ち切る。
78
+ None の場合は早期終了なし。デフォルト None。
79
+ val_ratio : float
80
+ 早期終了用の検証データ割合(0〜1)。デフォルト 0.1。
81
+ verbose : bool
82
+ 学習経過を表示するか。デフォルト True。
83
+ seed : int or None
84
+ 乱数シード。デフォルト None。
85
+ device : str or None
86
+ "cpu" / "cuda" / None(自動選択)。
87
+ """
88
+
89
+ def __init__(
90
+ self,
91
+ task: str = "reg",
92
+ *,
93
+ neurons: list[int] | None = None,
94
+ activation: str = "relu",
95
+ epochs: int = 100,
96
+ batch_size: int = 32,
97
+ lr: float = 1e-3,
98
+ optimizer: str = "adam",
99
+ weight_decay: float = 0.0,
100
+ early_stopping: int | None = None,
101
+ val_ratio: float = 0.1,
102
+ verbose: bool = True,
103
+ seed: int | None = None,
104
+ device: str | None = None,
105
+ ):
106
+ if task != "reg":
107
+ raise ValueError(f"未対応のタスク: '{task}'(現在は 'reg' のみ)")
108
+
109
+ self.task = task
110
+ self.neurons = neurons if neurons is not None else [32, 32]
111
+ self.activation = activation
112
+ self.epochs = epochs
113
+ self.batch_size = batch_size
114
+ self.lr = lr
115
+ self.optimizer_name = optimizer
116
+ self.weight_decay = weight_decay
117
+ self.early_stopping = early_stopping
118
+ self.val_ratio = val_ratio
119
+ self.verbose = verbose
120
+ self.seed = seed
121
+
122
+ if device is None:
123
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
124
+ else:
125
+ self.device = torch.device(device)
126
+
127
+ self._model: _RegressionNet | None = None
128
+ self._train_loss_history: list[float] = []
129
+ self._val_loss_history: list[float] = []
130
+
131
+ # ------------------------------------------------------------------
132
+ # 内部ユーティリティ
133
+ # ------------------------------------------------------------------
134
+ def _to_tensor(self, arr: np.ndarray) -> torch.Tensor:
135
+ return torch.as_tensor(arr, dtype=torch.float32, device=self.device)
136
+
137
+ def _build_model(self, in_dim: int, out_dim: int) -> _RegressionNet:
138
+ model = _RegressionNet(in_dim, out_dim, self.neurons, self.activation)
139
+ return model.to(self.device)
140
+
141
+ def _make_optimizer(self, model: _RegressionNet) -> optim.Optimizer:
142
+ if self.optimizer_name == "adam":
143
+ return optim.Adam(model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
144
+ elif self.optimizer_name == "sgd":
145
+ return optim.SGD(model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
146
+ else:
147
+ raise ValueError(
148
+ f"未対応のoptimizer: '{self.optimizer_name}' 対応: adam, sgd"
149
+ )
150
+
151
+ # ------------------------------------------------------------------
152
+ # fit
153
+ # ------------------------------------------------------------------
154
+ def fit(self, train_x: np.ndarray, train_y: np.ndarray) -> "deep00":
155
+ """
156
+ モデルを学習する。
157
+
158
+ Parameters
159
+ ----------
160
+ train_x : np.ndarray, shape (N, in_dim)
161
+ train_y : np.ndarray, shape (N, out_dim)
162
+
163
+ Returns
164
+ -------
165
+ self
166
+ """
167
+ if self.seed is not None:
168
+ torch.manual_seed(self.seed)
169
+ np.random.seed(self.seed)
170
+
171
+ x = self._to_tensor(train_x)
172
+ y = self._to_tensor(train_y)
173
+ in_dim = x.shape[1]
174
+ out_dim = y.shape[1]
175
+
176
+ self._model = self._build_model(in_dim, out_dim)
177
+ opt = self._make_optimizer(self._model)
178
+ criterion = nn.MSELoss()
179
+
180
+ # --- 検証データ分割(早期終了が有効な場合)---
181
+ use_val = self.early_stopping is not None and self.val_ratio > 0
182
+ if use_val:
183
+ n = x.shape[0]
184
+ idx = np.random.permutation(n)
185
+ val_n = max(1, int(n * self.val_ratio))
186
+ val_idx, trn_idx = idx[:val_n], idx[val_n:]
187
+ val_x, val_y = x[val_idx], y[val_idx]
188
+ x, y = x[trn_idx], y[trn_idx]
189
+
190
+ dataset = TensorDataset(x, y)
191
+ loader = DataLoader(dataset, batch_size=self.batch_size, shuffle=True)
192
+
193
+ best_val_loss = float("inf")
194
+ patience_counter = 0
195
+ best_state = None
196
+ self._train_loss_history = []
197
+ self._val_loss_history = []
198
+
199
+ for epoch in range(1, self.epochs + 1):
200
+ # --- 学習 ---
201
+ self._model.train()
202
+ epoch_loss = 0.0
203
+ count = 0
204
+ for bx, by in loader:
205
+ opt.zero_grad()
206
+ pred = self._model(bx)
207
+ loss = criterion(pred, by)
208
+ loss.backward()
209
+ opt.step()
210
+ epoch_loss += loss.item() * bx.shape[0]
211
+ count += bx.shape[0]
212
+ epoch_loss /= count
213
+ self._train_loss_history.append(epoch_loss)
214
+
215
+ # --- 検証 ---
216
+ if use_val:
217
+ self._model.eval()
218
+ with torch.no_grad():
219
+ val_pred = self._model(val_x)
220
+ val_loss = criterion(val_pred, val_y).item()
221
+ self._val_loss_history.append(val_loss)
222
+
223
+ if val_loss < best_val_loss:
224
+ best_val_loss = val_loss
225
+ patience_counter = 0
226
+ best_state = {k: v.clone() for k, v in self._model.state_dict().items()}
227
+ else:
228
+ patience_counter += 1
229
+ else:
230
+ val_loss = None
231
+
232
+ # --- ログ ---
233
+ if self.verbose and (epoch % max(1, self.epochs // 10) == 0 or epoch == 1):
234
+ msg = f"Epoch {epoch:>5d}/{self.epochs} train_loss={epoch_loss:.6f}"
235
+ if val_loss is not None:
236
+ msg += f" val_loss={val_loss:.6f}"
237
+ print(msg)
238
+
239
+ # --- 早期終了 ---
240
+ if use_val and patience_counter >= self.early_stopping:
241
+ if self.verbose:
242
+ print(f"Early stopping at epoch {epoch} (patience={self.early_stopping})")
243
+ break
244
+
245
+ # ベストモデル復元
246
+ if best_state is not None:
247
+ self._model.load_state_dict(best_state)
248
+
249
+ return self
250
+
251
+ # ------------------------------------------------------------------
252
+ # 推論(__call__ で呼び出し可能)
253
+ # ------------------------------------------------------------------
254
+ def __call__(self, x: np.ndarray) -> np.ndarray:
255
+ """
256
+ 予測を行う。
257
+
258
+ Parameters
259
+ ----------
260
+ x : np.ndarray, shape (N, in_dim)
261
+
262
+ Returns
263
+ -------
264
+ np.ndarray, shape (N, out_dim)
265
+ """
266
+ return self.predict(x)
267
+
268
+ def predict(self, x: np.ndarray) -> np.ndarray:
269
+ if self._model is None:
270
+ raise RuntimeError("先に fit() を実行してください。")
271
+ self._model.eval()
272
+ with torch.no_grad():
273
+ pred = self._model(self._to_tensor(x))
274
+ return pred.cpu().numpy()
275
+
276
+ # ------------------------------------------------------------------
277
+ # 便利メソッド
278
+ # ------------------------------------------------------------------
279
+ @property
280
+ def loss_history(self) -> dict[str, list[float]]:
281
+ """学習損失の履歴を返す。"""
282
+ h = {"train": self._train_loss_history}
283
+ if self._val_loss_history:
284
+ h["val"] = self._val_loss_history
285
+ return h
286
+
287
+ def save(self, path: str) -> None:
288
+ """モデルの重みを保存する。"""
289
+ if self._model is None:
290
+ raise RuntimeError("モデルがまだ構築されていません。")
291
+ torch.save(self._model.state_dict(), path)
292
+
293
+ def load(self, path: str, in_dim: int, out_dim: int) -> None:
294
+ """保存済みの重みを読み込む。"""
295
+ self._model = self._build_model(in_dim, out_dim)
296
+ self._model.load_state_dict(torch.load(path, map_location=self.device))
297
+
298
+ def summary(self) -> str:
299
+ """モデル構造の概要を返す。"""
300
+ if self._model is None:
301
+ return "モデル未構築(fit() 実行前)"
302
+ lines = [str(self._model)]
303
+ total = sum(p.numel() for p in self._model.parameters())
304
+ trainable = sum(p.numel() for p in self._model.parameters() if p.requires_grad)
305
+ lines.append(f"Total params: {total:,}")
306
+ lines.append(f"Trainable params: {trainable:,}")
307
+ return "\n".join(lines)
@@ -0,0 +1,26 @@
1
+ Metadata-Version: 2.1
2
+ Name: deep00
3
+ Version: 0.0.0
4
+ Summary: Ultra simple deep learning tool
5
+ Home-page: https://github.co.jp/
6
+ Author: bib_inf
7
+ Author-email: contact.bibinf@gmail.com
8
+ License: CC0 v1.0
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Topic :: Software Development :: Libraries
11
+ Classifier: License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
12
+ Description-Content-Type: text/markdown
13
+
14
+ English description follows Japanese.
15
+
16
+ ---
17
+
18
+ ### 概要
19
+ 超簡単に深層学習が使えるツールです。
20
+
21
+ 説明は執筆中です
22
+
23
+ ---
24
+
25
+ ### Overview
26
+ description is under construction.
@@ -0,0 +1,9 @@
1
+ README.md
2
+ setup.py
3
+ deep00/__init__.py
4
+ deep00/test.py
5
+ deep00.egg-info/PKG-INFO
6
+ deep00.egg-info/SOURCES.txt
7
+ deep00.egg-info/dependency_links.txt
8
+ deep00.egg-info/requires.txt
9
+ deep00.egg-info/top_level.txt
@@ -0,0 +1 @@
1
+ ezpip
@@ -0,0 +1 @@
1
+ deep00
deep00-0.0.0/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
deep00-0.0.0/setup.py ADDED
@@ -0,0 +1,29 @@
1
+
2
+ from setuptools import setup
3
+ # 公開用パッケージの作成 [ezpip]
4
+ import ezpip
5
+
6
+ # 公開用パッケージの作成 [ezpip]
7
+ with ezpip.packager(develop_dir = "./_develop_deep00/") as p:
8
+ setup(
9
+ name = "deep00",
10
+ version = "0.0.0",
11
+ description = "Ultra simple deep learning tool",
12
+ author = "bib_inf",
13
+ author_email = "contact.bibinf@gmail.com",
14
+ url = "https://github.co.jp/",
15
+ packages = p.packages,
16
+ install_requires = ["ezpip"],
17
+ long_description = p.long_description,
18
+ long_description_content_type = "text/markdown",
19
+ license = "CC0 v1.0",
20
+ classifiers = [
21
+ "Programming Language :: Python :: 3",
22
+ "Topic :: Software Development :: Libraries",
23
+ "License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication"
24
+ ],
25
+ # entry_points = """
26
+ # [console_scripts]
27
+ # py6 = py6:console_command
28
+ # """
29
+ )