morphlog-vp 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- morphlog_vp-1.0.0/PKG-INFO +12 -0
- morphlog_vp-1.0.0/morphlog/__init__.py +9 -0
- morphlog_vp-1.0.0/morphlog/formatters.py +84 -0
- morphlog_vp-1.0.0/morphlog/py.typed +0 -0
- morphlog_vp-1.0.0/morphlog/train_logger.py +296 -0
- morphlog_vp-1.0.0/morphlog_vp.egg-info/PKG-INFO +12 -0
- morphlog_vp-1.0.0/morphlog_vp.egg-info/SOURCES.txt +10 -0
- morphlog_vp-1.0.0/morphlog_vp.egg-info/dependency_links.txt +1 -0
- morphlog_vp-1.0.0/morphlog_vp.egg-info/requires.txt +1 -0
- morphlog_vp-1.0.0/morphlog_vp.egg-info/top_level.txt +1 -0
- morphlog_vp-1.0.0/pyproject.toml +33 -0
- morphlog_vp-1.0.0/setup.cfg +4 -0
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: morphlog-vp
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Structured training logger with per-expert loss display.
|
|
5
|
+
Author: F000NK, Voluntas Progressus
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
11
|
+
Requires-Python: >=3.14
|
|
12
|
+
Requires-Dist: vpterm-vp>=1.0.0
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""Structured training logger with per-expert loss display.
|
|
2
|
+
|
|
3
|
+
Uses vpterm for beautiful terminal rendering.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from morphlog.train_logger import TrainLogger
|
|
7
|
+
from morphlog.formatters import format_loss_breakdown, format_expert_losses
|
|
8
|
+
|
|
9
|
+
__version__ = "1.0.0"
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""Formatting helpers for training metrics and per-expert losses."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from vpterm.style import Style
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def format_loss_value(value: float, precision: int = 4) -> str:
|
|
9
|
+
"""Format a loss value with color coding based on magnitude."""
|
|
10
|
+
formatted = f"{value:.{precision}f}"
|
|
11
|
+
if value > 10.0:
|
|
12
|
+
return Style.bright_red(formatted)
|
|
13
|
+
if value > 5.0:
|
|
14
|
+
return Style.yellow(formatted)
|
|
15
|
+
if value > 1.0:
|
|
16
|
+
return Style.bright_yellow(formatted)
|
|
17
|
+
return Style.bright_green(formatted)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def format_loss_breakdown(losses: dict[str, float], precision: int = 4) -> str:
|
|
21
|
+
"""Format a flat loss dict into a human-readable string.
|
|
22
|
+
|
|
23
|
+
Supports keys like 'total', 'final', 'universal', 'family/slavic',
|
|
24
|
+
'language/rus', etc.
|
|
25
|
+
|
|
26
|
+
Returns a multi-line string with grouped losses.
|
|
27
|
+
"""
|
|
28
|
+
lines: list[str] = []
|
|
29
|
+
|
|
30
|
+
total = losses.get("total")
|
|
31
|
+
final = losses.get("final")
|
|
32
|
+
if total is not None:
|
|
33
|
+
parts = [f" {Style.metric_name('total')}{Style.dim('=')}{format_loss_value(total, precision)}"]
|
|
34
|
+
if final is not None:
|
|
35
|
+
parts.append(f"{Style.metric_name('final')}{Style.dim('=')}{format_loss_value(final, precision)}")
|
|
36
|
+
lines.append(" ".join(parts))
|
|
37
|
+
|
|
38
|
+
universal = losses.get("universal")
|
|
39
|
+
if universal is not None:
|
|
40
|
+
lines.append(f" {Style.metric_name('universal')}{Style.dim('=')}{format_loss_value(universal, precision)}")
|
|
41
|
+
|
|
42
|
+
family_losses = {key: value for key, value in losses.items() if key.startswith("family/")}
|
|
43
|
+
if family_losses:
|
|
44
|
+
parts = [f" {Style.cyan('family')} "]
|
|
45
|
+
for key, value in sorted(family_losses.items()):
|
|
46
|
+
expert_name = key.split("/", 1)[1]
|
|
47
|
+
parts.append(f"{Style.label(expert_name)}{Style.dim('=')}{format_loss_value(value, precision)}")
|
|
48
|
+
lines.append(" ".join(parts))
|
|
49
|
+
|
|
50
|
+
language_losses = {key: value for key, value in losses.items() if key.startswith("language/")}
|
|
51
|
+
if language_losses:
|
|
52
|
+
parts = [f" {Style.cyan('language')}"]
|
|
53
|
+
for key, value in sorted(language_losses.items()):
|
|
54
|
+
expert_name = key.split("/", 1)[1]
|
|
55
|
+
parts.append(f"{Style.label(expert_name)}{Style.dim('=')}{format_loss_value(value, precision)}")
|
|
56
|
+
lines.append(" ".join(parts))
|
|
57
|
+
|
|
58
|
+
return "\n".join(lines)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def format_expert_losses(expert_losses: dict[str, float], precision: int = 4) -> str:
|
|
62
|
+
"""Format per-expert losses as a single line for compact display.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
expert_losses: Dict like {"slavic": 5.12, "romance": 5.83}.
|
|
66
|
+
precision: Decimal places.
|
|
67
|
+
"""
|
|
68
|
+
parts = []
|
|
69
|
+
for name, value in sorted(expert_losses.items()):
|
|
70
|
+
parts.append(f"{Style.label(name)}{Style.dim('=')}{format_loss_value(value, precision)}")
|
|
71
|
+
return " ".join(parts)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def format_lr(learning_rate: float) -> str:
|
|
75
|
+
"""Format learning rate with scientific notation coloring."""
|
|
76
|
+
return Style.metric_value(f"{learning_rate:.2e}")
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def format_improvement(current: float, best: float) -> str:
|
|
80
|
+
"""Format a value showing improvement or degradation vs best."""
|
|
81
|
+
formatted = f"{current:.4f}"
|
|
82
|
+
if current <= best:
|
|
83
|
+
return Style.improved(f"{formatted} ★")
|
|
84
|
+
return format_loss_value(current)
|
|
File without changes
|
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
"""Training logger — structured output for CELMoE training loops.
|
|
2
|
+
|
|
3
|
+
Provides:
|
|
4
|
+
- Training header with model/data summary
|
|
5
|
+
- Per-batch progress with per-expert loss breakdown
|
|
6
|
+
- Epoch summaries with dev evaluation
|
|
7
|
+
- Beautiful formatting via vpterm
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import math
|
|
13
|
+
|
|
14
|
+
from vpterm.style import Style
|
|
15
|
+
from vpterm.panel import Panel
|
|
16
|
+
from vpterm.progress import ProgressBar, format_duration, format_number
|
|
17
|
+
from vpterm.terminal import Terminal, get_terminal
|
|
18
|
+
from vpterm.kv import KeyValue
|
|
19
|
+
|
|
20
|
+
from morphlog.formatters import format_loss_value, format_lr, format_improvement
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class TrainLogger:
|
|
24
|
+
"""Structured logger for CELMoE training sessions.
|
|
25
|
+
|
|
26
|
+
Usage:
|
|
27
|
+
logger = TrainLogger()
|
|
28
|
+
logger.training_header(...)
|
|
29
|
+
logger.batch_update(...)
|
|
30
|
+
logger.epoch_summary(...)
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(self, terminal: Terminal | None = None) -> None:
|
|
34
|
+
self.terminal = terminal or get_terminal()
|
|
35
|
+
|
|
36
|
+
def training_header(
|
|
37
|
+
self,
|
|
38
|
+
*,
|
|
39
|
+
version: str,
|
|
40
|
+
model_family: str,
|
|
41
|
+
model_series: str,
|
|
42
|
+
stage: str,
|
|
43
|
+
device: str,
|
|
44
|
+
amp: bool,
|
|
45
|
+
train_rows: int,
|
|
46
|
+
dev_rows: int,
|
|
47
|
+
num_languages: int,
|
|
48
|
+
num_families: int,
|
|
49
|
+
families: list[str],
|
|
50
|
+
languages: list[str],
|
|
51
|
+
max_len: int,
|
|
52
|
+
max_features: int,
|
|
53
|
+
d_model: int,
|
|
54
|
+
dim_ff: int,
|
|
55
|
+
num_heads: int,
|
|
56
|
+
num_kv_heads: int,
|
|
57
|
+
universal_layers: int,
|
|
58
|
+
family_layers: int,
|
|
59
|
+
language_layers: int,
|
|
60
|
+
total_params: int,
|
|
61
|
+
trainable_params: int,
|
|
62
|
+
epochs: int,
|
|
63
|
+
batch_size: int,
|
|
64
|
+
train_batches: int,
|
|
65
|
+
dev_batches: int,
|
|
66
|
+
total_steps: int,
|
|
67
|
+
warmup_steps: int,
|
|
68
|
+
lr: float,
|
|
69
|
+
weight_decay: float,
|
|
70
|
+
betas: tuple[float, float],
|
|
71
|
+
output_dir: str,
|
|
72
|
+
loss_weights: dict[str, float],
|
|
73
|
+
) -> None:
|
|
74
|
+
"""Print full training configuration header."""
|
|
75
|
+
panel = Panel(title=f"MorphFormer {model_series} — CELMoE Training")
|
|
76
|
+
|
|
77
|
+
panel.add_section("Run")
|
|
78
|
+
panel.add_kv("version", version)
|
|
79
|
+
panel.add_kv("stage", Style.bold(stage))
|
|
80
|
+
panel.add_kv("device", f"{device} {'AMP' if amp else 'FP32'}")
|
|
81
|
+
panel.add_kv("output", output_dir)
|
|
82
|
+
|
|
83
|
+
panel.add_blank()
|
|
84
|
+
panel.add_section("Data")
|
|
85
|
+
panel.add_kv("train", f"{format_number(train_rows)} rows")
|
|
86
|
+
panel.add_kv("dev", f"{format_number(dev_rows)} rows")
|
|
87
|
+
panel.add_kv("languages", f"{num_languages} {Style.dim(' '.join(languages))}")
|
|
88
|
+
panel.add_kv("families", f"{num_families} {Style.dim(' '.join(families))}")
|
|
89
|
+
panel.add_kv("max length", str(max_len))
|
|
90
|
+
panel.add_kv("max features", str(max_features))
|
|
91
|
+
|
|
92
|
+
panel.add_blank()
|
|
93
|
+
panel.add_section("Model")
|
|
94
|
+
panel.add_kv("d_model", str(d_model))
|
|
95
|
+
panel.add_kv("dim_ff", str(dim_ff))
|
|
96
|
+
panel.add_kv("heads", f"{num_heads} query {num_kv_heads} kv")
|
|
97
|
+
panel.add_kv("layers", f"{universal_layers} universal {family_layers} family {language_layers} language")
|
|
98
|
+
panel.add_kv("parameters", f"{format_number(total_params)} total {format_number(trainable_params)} trainable")
|
|
99
|
+
|
|
100
|
+
panel.add_blank()
|
|
101
|
+
panel.add_section("Training")
|
|
102
|
+
panel.add_kv("epochs", str(epochs))
|
|
103
|
+
panel.add_kv("batch size", str(batch_size))
|
|
104
|
+
panel.add_kv("batches", f"{format_number(train_batches)} train {format_number(dev_batches)} dev")
|
|
105
|
+
panel.add_kv("steps", f"{format_number(total_steps)} total {format_number(warmup_steps)} warmup")
|
|
106
|
+
|
|
107
|
+
panel.add_blank()
|
|
108
|
+
panel.add_section("Optimizer")
|
|
109
|
+
panel.add_kv("lr", str(lr))
|
|
110
|
+
panel.add_kv("weight decay", str(weight_decay))
|
|
111
|
+
panel.add_kv("betas", str(list(betas)))
|
|
112
|
+
|
|
113
|
+
panel.add_blank()
|
|
114
|
+
panel.add_section("Loss Weights")
|
|
115
|
+
for name, weight in sorted(loss_weights.items()):
|
|
116
|
+
panel.add_kv(name, f"{weight:.2f}")
|
|
117
|
+
|
|
118
|
+
self.terminal.writeln(panel.render())
|
|
119
|
+
self.terminal.blank_line()
|
|
120
|
+
|
|
121
|
+
def epoch_start(self, epoch: int, total_epochs: int, train_batches: int, dev_batches: int) -> None:
|
|
122
|
+
"""Log epoch start."""
|
|
123
|
+
self.terminal.header(f"Epoch {epoch}/{total_epochs}")
|
|
124
|
+
self.terminal.writeln(
|
|
125
|
+
f" {Style.dim('train')} {format_number(train_batches)} batches "
|
|
126
|
+
f"{Style.dim('dev')} {format_number(dev_batches)} batches"
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
def batch_update(
|
|
130
|
+
self,
|
|
131
|
+
*,
|
|
132
|
+
epoch: int,
|
|
133
|
+
total_epochs: int,
|
|
134
|
+
batch: int,
|
|
135
|
+
total_batches: int,
|
|
136
|
+
step: int,
|
|
137
|
+
total_steps: int,
|
|
138
|
+
learning_rate: float,
|
|
139
|
+
losses: dict[str, float],
|
|
140
|
+
elapsed_seconds: float,
|
|
141
|
+
total_completed_batches: int,
|
|
142
|
+
total_train_batches: int,
|
|
143
|
+
best_dev_loss: float,
|
|
144
|
+
) -> None:
|
|
145
|
+
"""Log a training batch update with per-expert losses.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
losses: Dict with keys like 'total', 'final', 'universal',
|
|
149
|
+
'family/slavic', 'language/rus', etc.
|
|
150
|
+
"""
|
|
151
|
+
seconds_per_batch = elapsed_seconds / max(1, total_completed_batches)
|
|
152
|
+
remaining_batches = max(0, total_train_batches - total_completed_batches)
|
|
153
|
+
remaining_seconds = seconds_per_batch * remaining_batches
|
|
154
|
+
|
|
155
|
+
progress = ProgressBar(total=total_batches, width=20)
|
|
156
|
+
progress_line = f" {progress.render_with_label(batch, 'progress')}"
|
|
157
|
+
|
|
158
|
+
line = KeyValue(separator=" ")
|
|
159
|
+
line.add("epoch", f"{epoch}/{total_epochs}")
|
|
160
|
+
line.add("batch", f"{batch}/{total_batches}")
|
|
161
|
+
line.add("step", f"{step}/{total_steps}")
|
|
162
|
+
line.add_raw(f"{Style.metric_name('lr')}{Style.dim('=')}{format_lr(learning_rate)}")
|
|
163
|
+
|
|
164
|
+
main_line = line.render()
|
|
165
|
+
|
|
166
|
+
loss_parts: list[str] = []
|
|
167
|
+
total_loss = losses.get("total")
|
|
168
|
+
final_loss = losses.get("final")
|
|
169
|
+
universal_loss = losses.get("universal")
|
|
170
|
+
if total_loss is not None:
|
|
171
|
+
loss_parts.append(f"{Style.metric_name('loss')}{Style.dim('=')}{format_loss_value(total_loss)}")
|
|
172
|
+
if final_loss is not None:
|
|
173
|
+
loss_parts.append(f"{Style.metric_name('final')}{Style.dim('=')}{format_loss_value(final_loss)}")
|
|
174
|
+
if universal_loss is not None:
|
|
175
|
+
loss_parts.append(f"{Style.metric_name('universal')}{Style.dim('=')}{format_loss_value(universal_loss)}")
|
|
176
|
+
loss_line = " ".join(loss_parts)
|
|
177
|
+
|
|
178
|
+
family_losses = {key: val for key, val in sorted(losses.items()) if key.startswith("family/")}
|
|
179
|
+
language_losses = {key: val for key, val in sorted(losses.items()) if key.startswith("language/")}
|
|
180
|
+
|
|
181
|
+
family_line = ""
|
|
182
|
+
if family_losses:
|
|
183
|
+
family_parts = [f" {Style.cyan('family')} "]
|
|
184
|
+
for key, val in family_losses.items():
|
|
185
|
+
expert_name = key.split("/", 1)[1]
|
|
186
|
+
family_parts.append(f"{Style.label(expert_name)}{Style.dim('=')}{format_loss_value(val)}")
|
|
187
|
+
family_line = " ".join(family_parts)
|
|
188
|
+
|
|
189
|
+
language_line = ""
|
|
190
|
+
if language_losses:
|
|
191
|
+
lang_parts = [f" {Style.cyan('language')}"]
|
|
192
|
+
for key, val in language_losses.items():
|
|
193
|
+
expert_name = key.split("/", 1)[1]
|
|
194
|
+
lang_parts.append(f"{Style.label(expert_name)}{Style.dim('=')}{format_loss_value(val)}")
|
|
195
|
+
language_line = " ".join(lang_parts)
|
|
196
|
+
|
|
197
|
+
best_display = "n/a" if math.isinf(best_dev_loss) else f"{best_dev_loss:.4f}"
|
|
198
|
+
time_line = (
|
|
199
|
+
f" {Style.dim('elapsed')}{Style.dim('=')}{Style.timestamp(format_duration(elapsed_seconds))}"
|
|
200
|
+
f" {Style.dim('remaining')}{Style.dim('=')}{Style.timestamp(format_duration(remaining_seconds))}"
|
|
201
|
+
f" {Style.dim('best dev')}{Style.dim('=')}{Style.metric_value(best_display)}"
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
output = f" {main_line}\n {loss_line}"
|
|
205
|
+
output += f"\n{progress_line}"
|
|
206
|
+
if family_line:
|
|
207
|
+
output += f"\n{family_line}"
|
|
208
|
+
if language_line:
|
|
209
|
+
output += f"\n{language_line}"
|
|
210
|
+
output += f"\n{time_line}"
|
|
211
|
+
|
|
212
|
+
self.terminal.writeln(output)
|
|
213
|
+
|
|
214
|
+
def epoch_end(
|
|
215
|
+
self,
|
|
216
|
+
*,
|
|
217
|
+
epoch: int,
|
|
218
|
+
total_epochs: int,
|
|
219
|
+
dev_loss: float,
|
|
220
|
+
best_dev_loss: float,
|
|
221
|
+
improved: bool,
|
|
222
|
+
epoch_elapsed: float,
|
|
223
|
+
total_elapsed: float,
|
|
224
|
+
remaining_seconds: float,
|
|
225
|
+
checkpoint_path: str | None,
|
|
226
|
+
dev_expert_losses: dict[str, float] | None = None,
|
|
227
|
+
) -> None:
|
|
228
|
+
"""Log epoch completion with dev evaluation results."""
|
|
229
|
+
self.terminal.blank_line()
|
|
230
|
+
|
|
231
|
+
status_icon = Style.success("✓") if improved else Style.dim("·")
|
|
232
|
+
dev_display = format_improvement(dev_loss, best_dev_loss) if improved else format_loss_value(dev_loss)
|
|
233
|
+
best_display = format_loss_value(best_dev_loss)
|
|
234
|
+
|
|
235
|
+
self.terminal.writeln(
|
|
236
|
+
f" {status_icon} {Style.bold(f'Epoch {epoch}/{total_epochs}')} "
|
|
237
|
+
f" {Style.dim('dev loss')}{Style.dim('=')}{dev_display}"
|
|
238
|
+
f" {Style.dim('best')}{Style.dim('=')}{best_display}"
|
|
239
|
+
f" {Style.dim('epoch time')}{Style.dim('=')}{Style.timestamp(format_duration(epoch_elapsed))}"
|
|
240
|
+
f" {Style.dim('elapsed')}{Style.dim('=')}{Style.timestamp(format_duration(total_elapsed))}"
|
|
241
|
+
f" {Style.dim('remaining')}{Style.dim('=')}{Style.timestamp(format_duration(remaining_seconds))}"
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
if dev_expert_losses:
|
|
245
|
+
family_losses = {k: v for k, v in dev_expert_losses.items() if k.startswith("family/")}
|
|
246
|
+
language_losses = {k: v for k, v in dev_expert_losses.items() if k.startswith("language/")}
|
|
247
|
+
if family_losses:
|
|
248
|
+
parts = [f" {Style.cyan('dev family')} "]
|
|
249
|
+
for key, val in sorted(family_losses.items()):
|
|
250
|
+
name = key.split("/", 1)[1]
|
|
251
|
+
parts.append(f"{Style.label(name)}{Style.dim('=')}{format_loss_value(val)}")
|
|
252
|
+
self.terminal.writeln(" ".join(parts))
|
|
253
|
+
if language_losses:
|
|
254
|
+
parts = [f" {Style.cyan('dev language')}"]
|
|
255
|
+
for key, val in sorted(language_losses.items()):
|
|
256
|
+
name = key.split("/", 1)[1]
|
|
257
|
+
parts.append(f"{Style.label(name)}{Style.dim('=')}{format_loss_value(val)}")
|
|
258
|
+
self.terminal.writeln(" ".join(parts))
|
|
259
|
+
|
|
260
|
+
if checkpoint_path:
|
|
261
|
+
self.terminal.writeln(f" {Style.dim('checkpoint')} {Style.green(checkpoint_path)}")
|
|
262
|
+
|
|
263
|
+
self.terminal.blank_line()
|
|
264
|
+
|
|
265
|
+
def training_interrupted(
|
|
266
|
+
self,
|
|
267
|
+
epoch: int,
|
|
268
|
+
total_epochs: int,
|
|
269
|
+
batch: int,
|
|
270
|
+
total_batches: int,
|
|
271
|
+
step: int,
|
|
272
|
+
total_steps: int,
|
|
273
|
+
elapsed_seconds: float,
|
|
274
|
+
checkpoint_path: str,
|
|
275
|
+
) -> None:
|
|
276
|
+
"""Log training interruption."""
|
|
277
|
+
self.terminal.blank_line()
|
|
278
|
+
self.terminal.writeln(
|
|
279
|
+
f" {Style.warning('■')} {Style.bold('Training interrupted')}"
|
|
280
|
+
f" epoch={epoch}/{total_epochs} batch={batch}/{total_batches}"
|
|
281
|
+
f" step={step}/{total_steps}"
|
|
282
|
+
f" elapsed={Style.timestamp(format_duration(elapsed_seconds))}"
|
|
283
|
+
)
|
|
284
|
+
self.terminal.writeln(f" {Style.dim('checkpoint')} {Style.yellow(checkpoint_path)}")
|
|
285
|
+
self.terminal.blank_line()
|
|
286
|
+
|
|
287
|
+
def training_complete(self, best_path: str, best_dev_loss: float, total_elapsed: float) -> None:
|
|
288
|
+
"""Log training completion."""
|
|
289
|
+
self.terminal.blank_line()
|
|
290
|
+
self.terminal.writeln(
|
|
291
|
+
f" {Style.success('✓')} {Style.bold('Training complete')}"
|
|
292
|
+
f" best dev={Style.improved(f'{best_dev_loss:.4f}')}"
|
|
293
|
+
f" time={Style.timestamp(format_duration(total_elapsed))}"
|
|
294
|
+
)
|
|
295
|
+
self.terminal.writeln(f" {Style.dim('best checkpoint')} {Style.green(best_path)}")
|
|
296
|
+
self.terminal.blank_line()
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: morphlog-vp
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Structured training logger with per-expert loss display.
|
|
5
|
+
Author: F000NK, Voluntas Progressus
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
11
|
+
Requires-Python: >=3.14
|
|
12
|
+
Requires-Dist: vpterm-vp>=1.0.0
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
pyproject.toml
|
|
2
|
+
morphlog/__init__.py
|
|
3
|
+
morphlog/formatters.py
|
|
4
|
+
morphlog/py.typed
|
|
5
|
+
morphlog/train_logger.py
|
|
6
|
+
morphlog_vp.egg-info/PKG-INFO
|
|
7
|
+
morphlog_vp.egg-info/SOURCES.txt
|
|
8
|
+
morphlog_vp.egg-info/dependency_links.txt
|
|
9
|
+
morphlog_vp.egg-info/requires.txt
|
|
10
|
+
morphlog_vp.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
vpterm-vp>=1.0.0
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
morphlog
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=69.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "morphlog-vp"
|
|
7
|
+
version = "1.0.0"
|
|
8
|
+
authors = [
|
|
9
|
+
{ name = "F000NK" },
|
|
10
|
+
{ name = "Voluntas Progressus" },
|
|
11
|
+
]
|
|
12
|
+
description = "Structured training logger with per-expert loss display."
|
|
13
|
+
license = "MIT"
|
|
14
|
+
requires-python = ">=3.14"
|
|
15
|
+
classifiers = [
|
|
16
|
+
"Programming Language :: Python :: 3",
|
|
17
|
+
"Programming Language :: Python :: 3.14",
|
|
18
|
+
"Operating System :: OS Independent",
|
|
19
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
20
|
+
]
|
|
21
|
+
dependencies = [
|
|
22
|
+
"vpterm-vp>=1.0.0",
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
[tool.setuptools.packages.find]
|
|
26
|
+
include = ["morphlog*"]
|
|
27
|
+
|
|
28
|
+
[tool.setuptools.package-data]
|
|
29
|
+
morphlog = ["py.typed"]
|
|
30
|
+
|
|
31
|
+
[tool.ruff]
|
|
32
|
+
line-length = 120
|
|
33
|
+
target-version = "py314"
|