mlx-tracker 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mlx/__init__.py +8 -0
- mlx/cli.py +46 -0
- mlx/commands/__init__.py +1 -0
- mlx/commands/compare.py +346 -0
- mlx/commands/export.py +264 -0
- mlx/commands/init.py +179 -0
- mlx/commands/log.py +174 -0
- mlx/commands/ls.py +218 -0
- mlx/commands/run.py +306 -0
- mlx/commands/status.py +124 -0
- mlx/core/__init__.py +7 -0
- mlx/core/experiment.py +43 -0
- mlx/core/metrics.py +149 -0
- mlx/core/params.py +150 -0
- mlx/core/run.py +162 -0
- mlx/storage/__init__.py +1 -0
- mlx/storage/db.py +121 -0
- mlx/storage/filesystem.py +232 -0
- mlx/utils/__init__.py +1 -0
- mlx/utils/config.py +0 -0
- mlx/utils/display.py +133 -0
- mlx_tracker-0.1.0.dist-info/METADATA +50 -0
- mlx_tracker-0.1.0.dist-info/RECORD +26 -0
- mlx_tracker-0.1.0.dist-info/WHEEL +4 -0
- mlx_tracker-0.1.0.dist-info/entry_points.txt +2 -0
- mlx_tracker-0.1.0.dist-info/licenses/LICENSE +21 -0
mlx/__init__.py
ADDED
mlx/cli.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""
|
|
2
|
+
mlx/cli.py — The main entry point for the mlx CLI tool.
|
|
3
|
+
|
|
4
|
+
When the user types `mlx` in their terminal, Python runs this file
|
|
5
|
+
and calls `app`. Everything starts here.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
import typer
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
|
|
12
|
+
from mlx import __version__
|
|
13
|
+
|
|
14
|
+
app = typer.Typer(
|
|
15
|
+
help="[bold cyan]MLX[/bold cyan] — Local ML Experiment Manager.\n\n"
|
|
16
|
+
"Track experiments, runs, params and metrics. 100% local. No server needed.",
|
|
17
|
+
no_args_is_help=True, # Show help when user types just `mlx`
|
|
18
|
+
rich_markup_mode="rich", # Allow [bold], [cyan] etc in help text
|
|
19
|
+
add_completion=True,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
console = Console()
|
|
23
|
+
|
|
24
|
+
from mlx.commands.init import init
|
|
25
|
+
from mlx.commands import run as run_cmd
|
|
26
|
+
from mlx.commands import log as log_cmd
|
|
27
|
+
from mlx.commands import ls as ls_cmd
|
|
28
|
+
from mlx.commands import status as status_cmd
|
|
29
|
+
from mlx.commands import compare as compare_cmd
|
|
30
|
+
from mlx.commands import export as export_cmd
|
|
31
|
+
|
|
32
|
+
from mlx.commands.init import init
|
|
33
|
+
app.command("init", help="Initialize a new mlx project")(init)
|
|
34
|
+
app.add_typer(run_cmd.app, name="run", help="Manage experiment runs")
|
|
35
|
+
app.add_typer(log_cmd.app, name="log", help="Log Metric, params and notes")
|
|
36
|
+
app.add_typer(ls_cmd.app , name="ls", help="list all runs")
|
|
37
|
+
app.add_typer(status_cmd.app, name="status", help="show the active run")
|
|
38
|
+
app.add_typer(compare_cmd.app, name="compare", help="comapre runs side by side")
|
|
39
|
+
app.add_typer(export_cmd.app, name="export", help="Export runs to csv or json")
|
|
40
|
+
|
|
41
|
+
@app.command("version")
|
|
42
|
+
def version():
|
|
43
|
+
console.print(f"mlx [bold cyan]v{__version__}[/bold cyan]")
|
|
44
|
+
|
|
45
|
+
if __name__ == "__main__":
|
|
46
|
+
app()
|
mlx/commands/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# CLI commands — one file per command group
|
mlx/commands/compare.py
ADDED
|
@@ -0,0 +1,346 @@
|
|
|
1
|
+
#it is the most important command, see exactly which model won and why ??
|
|
2
|
+
# uasge: mlx compare run-id-1 run-id-2 , mlx compare run-id-1 run-id-2 --params-only
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
import typer
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
from rich.table import Table
|
|
8
|
+
from rich.text import Text
|
|
9
|
+
from rich import box
|
|
10
|
+
|
|
11
|
+
from mlx.core.run import RunManager
|
|
12
|
+
from mlx.core.metrics import MetricManager
|
|
13
|
+
from mlx.core.params import ParamManager
|
|
14
|
+
from mlx.utils.display import error, warn
|
|
15
|
+
|
|
16
|
+
app = typer.Typer(help="Compare two or more runs side by side")
|
|
17
|
+
|
|
18
|
+
console = Console()
|
|
19
|
+
|
|
20
|
+
def _print_header(runs: list):
|
|
21
|
+
console.print(
|
|
22
|
+
f"[bold white]Comparing {len(runs)} runs[/bold white]\n"
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
for i, run in enumerate(runs):
|
|
26
|
+
# Last item gets └── others get ├──
|
|
27
|
+
prefix = "└──" if i == len(runs) - 1 else "├──"
|
|
28
|
+
|
|
29
|
+
status_color = {
|
|
30
|
+
"done": "green",
|
|
31
|
+
"running": "yellow",
|
|
32
|
+
"failed": "red",
|
|
33
|
+
}.get(run.status, "white")
|
|
34
|
+
|
|
35
|
+
duration = ""
|
|
36
|
+
if run.duration_sec:
|
|
37
|
+
m, s = divmod(int(run.duration_sec), 60)
|
|
38
|
+
duration = f"{m}m {s}s" if m > 0 else f"{s}s"
|
|
39
|
+
duration = f" [dim]{duration}[/dim]"
|
|
40
|
+
|
|
41
|
+
console.print(
|
|
42
|
+
f" {prefix} [bold white]{run.name}[/bold white]"
|
|
43
|
+
f" [{status_color}]{run.status}[/{status_color}]"
|
|
44
|
+
f"{duration}"
|
|
45
|
+
f" [dim]{run.run_id}[/dim]"
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
console.print()
|
|
49
|
+
|
|
50
|
+
def _print_params_table(
|
|
51
|
+
runs: list,
|
|
52
|
+
params_by_run: dict,
|
|
53
|
+
show_all: bool = False,
|
|
54
|
+
):
|
|
55
|
+
|
|
56
|
+
# Collect all unique param keys across all runs
|
|
57
|
+
all_keys = set()
|
|
58
|
+
for params in params_by_run.values():
|
|
59
|
+
all_keys.update(params.keys())
|
|
60
|
+
all_keys = sorted(all_keys)
|
|
61
|
+
|
|
62
|
+
if not all_keys:
|
|
63
|
+
console.print("[dim] No params logged for these runs.[/dim]\n")
|
|
64
|
+
return
|
|
65
|
+
|
|
66
|
+
# Figure out which params actually changed
|
|
67
|
+
changed_keys = set()
|
|
68
|
+
for key in all_keys:
|
|
69
|
+
values = [
|
|
70
|
+
params_by_run[run.run_id].get(key, "—")
|
|
71
|
+
for run in runs
|
|
72
|
+
]
|
|
73
|
+
# If not all values are the same → it changed
|
|
74
|
+
if len(set(values)) > 1:
|
|
75
|
+
changed_keys.add(key)
|
|
76
|
+
|
|
77
|
+
# Filter to only changed params unless show_all
|
|
78
|
+
display_keys = all_keys if show_all else [
|
|
79
|
+
k for k in all_keys if k in changed_keys
|
|
80
|
+
]
|
|
81
|
+
|
|
82
|
+
if not display_keys:
|
|
83
|
+
console.print(
|
|
84
|
+
"[dim] All params are identical across runs.[/dim]"
|
|
85
|
+
)
|
|
86
|
+
if not show_all:
|
|
87
|
+
console.print(
|
|
88
|
+
"[dim] Use [cyan]--all-params[/cyan] "
|
|
89
|
+
"to see them anyway.[/dim]"
|
|
90
|
+
)
|
|
91
|
+
console.print()
|
|
92
|
+
return
|
|
93
|
+
|
|
94
|
+
# ── Build the table ────────────────────────
|
|
95
|
+
table = Table(
|
|
96
|
+
box=box.SIMPLE_HEAD,
|
|
97
|
+
border_style="dim",
|
|
98
|
+
header_style="bold cyan",
|
|
99
|
+
show_edge=True,
|
|
100
|
+
pad_edge=True,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
# First column: param name
|
|
104
|
+
table.add_column(
|
|
105
|
+
"Param",
|
|
106
|
+
style="dim",
|
|
107
|
+
no_wrap=True,
|
|
108
|
+
min_width=18,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# One column per run
|
|
112
|
+
for run in runs:
|
|
113
|
+
table.add_column(
|
|
114
|
+
run.name,
|
|
115
|
+
justify="right",
|
|
116
|
+
style="white",
|
|
117
|
+
min_width=12,
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
# Last column: changed indicator
|
|
121
|
+
table.add_column("", justify="center", min_width=6)
|
|
122
|
+
|
|
123
|
+
# ── Add rows ───────────────────────────────
|
|
124
|
+
for key in display_keys:
|
|
125
|
+
values = [
|
|
126
|
+
params_by_run[run.run_id].get(key, "—")
|
|
127
|
+
for run in runs
|
|
128
|
+
]
|
|
129
|
+
|
|
130
|
+
is_changed = key in changed_keys
|
|
131
|
+
|
|
132
|
+
# Style each cell
|
|
133
|
+
styled_values = []
|
|
134
|
+
for v in values:
|
|
135
|
+
if v == "—":
|
|
136
|
+
# Not logged for this run
|
|
137
|
+
styled_values.append(Text("—", style="dim"))
|
|
138
|
+
elif is_changed:
|
|
139
|
+
# Changed — highlight yellow
|
|
140
|
+
styled_values.append(Text(str(v), style="bold yellow"))
|
|
141
|
+
else:
|
|
142
|
+
styled_values.append(Text(str(v), style="white"))
|
|
143
|
+
|
|
144
|
+
# Changed indicator
|
|
145
|
+
indicator = Text("← diff", style="dim yellow") if is_changed else Text("")
|
|
146
|
+
|
|
147
|
+
table.add_row(
|
|
148
|
+
key,
|
|
149
|
+
*styled_values,
|
|
150
|
+
indicator,
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
console.print("[bold]Params[/bold]")
|
|
154
|
+
console.print(table)
|
|
155
|
+
|
|
156
|
+
# Note about hidden unchanged params
|
|
157
|
+
hidden = len(all_keys) - len(display_keys)
|
|
158
|
+
if hidden > 0 and not show_all:
|
|
159
|
+
console.print(
|
|
160
|
+
f" [dim]{hidden} unchanged param(s) hidden — "
|
|
161
|
+
f"use [cyan]--all-params[/cyan] to show[/dim]"
|
|
162
|
+
)
|
|
163
|
+
console.print()
|
|
164
|
+
|
|
165
|
+
def _print_metrics_table(runs: list, metrics_by_run: dict):
|
|
166
|
+
|
|
167
|
+
# Collect all unique metric keys
|
|
168
|
+
all_keys = set()
|
|
169
|
+
for metrics in metrics_by_run.values():
|
|
170
|
+
all_keys.update(metrics.keys())
|
|
171
|
+
all_keys = sorted(all_keys)
|
|
172
|
+
|
|
173
|
+
if not all_keys:
|
|
174
|
+
console.print("[dim] No metrics logged for these runs.[/dim]\n")
|
|
175
|
+
return
|
|
176
|
+
|
|
177
|
+
# ── Decide which direction is "better" ────
|
|
178
|
+
# Lower is better for these metric name patterns
|
|
179
|
+
lower_is_better_patterns = [
|
|
180
|
+
"loss", "error", "mse", "mae", "rmse",
|
|
181
|
+
"mape", "logloss", "cross_entropy",
|
|
182
|
+
]
|
|
183
|
+
|
|
184
|
+
def lower_is_better(key: str) -> bool:
|
|
185
|
+
key_lower = key.lower()
|
|
186
|
+
return any(p in key_lower for p in lower_is_better_patterns)
|
|
187
|
+
|
|
188
|
+
# ── Build the table ────────────────────────
|
|
189
|
+
table = Table(
|
|
190
|
+
box=box.SIMPLE_HEAD,
|
|
191
|
+
border_style="dim",
|
|
192
|
+
header_style="bold cyan",
|
|
193
|
+
show_edge=True,
|
|
194
|
+
pad_edge=True,
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# First column: metric name
|
|
198
|
+
table.add_column(
|
|
199
|
+
"Metric",
|
|
200
|
+
style="dim",
|
|
201
|
+
no_wrap=True,
|
|
202
|
+
min_width=18,
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
# One column per run
|
|
206
|
+
for run in runs:
|
|
207
|
+
table.add_column(
|
|
208
|
+
run.name,
|
|
209
|
+
justify="right",
|
|
210
|
+
style="white",
|
|
211
|
+
min_width=12,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# Last column: difference
|
|
215
|
+
table.add_column(
|
|
216
|
+
"diff",
|
|
217
|
+
justify="right",
|
|
218
|
+
style="dim",
|
|
219
|
+
min_width=10,
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# ── Add rows ───────────────────────────────
|
|
223
|
+
for key in all_keys:
|
|
224
|
+
|
|
225
|
+
# Get values for all runs — None if not logged
|
|
226
|
+
values = [
|
|
227
|
+
metrics_by_run[run.run_id].get(key)
|
|
228
|
+
for run in runs
|
|
229
|
+
]
|
|
230
|
+
|
|
231
|
+
# Filter out None for calculations
|
|
232
|
+
real_values = [v for v in values if v is not None]
|
|
233
|
+
|
|
234
|
+
if not real_values:
|
|
235
|
+
continue
|
|
236
|
+
|
|
237
|
+
# Find best value
|
|
238
|
+
if lower_is_better(key):
|
|
239
|
+
best_val = min(real_values)
|
|
240
|
+
worst_val = max(real_values)
|
|
241
|
+
else:
|
|
242
|
+
best_val = max(real_values)
|
|
243
|
+
worst_val = min(real_values)
|
|
244
|
+
|
|
245
|
+
# Calculate difference
|
|
246
|
+
if len(real_values) >= 2:
|
|
247
|
+
diff = best_val - worst_val
|
|
248
|
+
# Format diff with sign
|
|
249
|
+
diff_str = f"{diff:+.4f}"
|
|
250
|
+
diff_color = "green" if diff > 0 else "red" if diff < 0 else "dim"
|
|
251
|
+
diff_display = Text(diff_str, style=diff_color)
|
|
252
|
+
else:
|
|
253
|
+
diff_display = Text("—", style="dim")
|
|
254
|
+
|
|
255
|
+
# Style each cell
|
|
256
|
+
styled_values = []
|
|
257
|
+
for v in values:
|
|
258
|
+
if v is None:
|
|
259
|
+
styled_values.append(Text("—", style="dim"))
|
|
260
|
+
elif v == best_val and len(real_values) > 1:
|
|
261
|
+
# Best value — green and bold
|
|
262
|
+
styled_values.append(
|
|
263
|
+
Text(f"{v:.4f}", style="bold green")
|
|
264
|
+
)
|
|
265
|
+
else:
|
|
266
|
+
styled_values.append(Text(f"{v:.4f}", style="white"))
|
|
267
|
+
|
|
268
|
+
table.add_row(
|
|
269
|
+
key,
|
|
270
|
+
*styled_values,
|
|
271
|
+
diff_display,
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
console.print("[bold]Metrics[/bold]")
|
|
275
|
+
console.print(table)
|
|
276
|
+
console.print(
|
|
277
|
+
" [dim][bold green]green[/bold green] = best value "
|
|
278
|
+
"· diff = best − worst[/dim]"
|
|
279
|
+
)
|
|
280
|
+
console.print()
|
|
281
|
+
|
|
282
|
+
@app.callback(invoke_without_command=True)
|
|
283
|
+
def compare(
|
|
284
|
+
run_ids: list[str] = typer.Argument(
|
|
285
|
+
...,
|
|
286
|
+
help="Two or more run IDS to compare"
|
|
287
|
+
),
|
|
288
|
+
params_only: bool = typer.Option(
|
|
289
|
+
False,
|
|
290
|
+
"--params-only", "-p",
|
|
291
|
+
help="Show only params, no metrics"
|
|
292
|
+
),
|
|
293
|
+
metrics_only: bool = typer.Option(
|
|
294
|
+
False,
|
|
295
|
+
"--metrics-only", "-m",
|
|
296
|
+
help="Show only metrics, no params"
|
|
297
|
+
),
|
|
298
|
+
all_params: bool = typer.Option(
|
|
299
|
+
False,
|
|
300
|
+
"--all-params",
|
|
301
|
+
help="Show all params including unchanged ones"
|
|
302
|
+
),
|
|
303
|
+
):
|
|
304
|
+
# first of all we will checks that we have at least 2 ids
|
|
305
|
+
if len(run_ids) < 2:
|
|
306
|
+
error("Please provide at least 2 run IDs to compare.")
|
|
307
|
+
console.print()
|
|
308
|
+
console.print(
|
|
309
|
+
" Usage: [cyan]mlx compare run-id-1 run-id-2[/cyan]"
|
|
310
|
+
)
|
|
311
|
+
console.print(
|
|
312
|
+
" Get run IDs from: [cyan]mlx ls[/cyan]"
|
|
313
|
+
)
|
|
314
|
+
raise typer.Exit(1)
|
|
315
|
+
|
|
316
|
+
# fetch all runs
|
|
317
|
+
runs = []
|
|
318
|
+
for rid in run_ids:
|
|
319
|
+
run = RunManager.get(rid)
|
|
320
|
+
if not run:
|
|
321
|
+
error(f"Run not found: [bold]{rid}[/bold]")
|
|
322
|
+
console.print(
|
|
323
|
+
f" Check your run IDs with: [cyan]mlx ls[/cyan]"
|
|
324
|
+
)
|
|
325
|
+
raise typer.Exit(1)
|
|
326
|
+
|
|
327
|
+
runs.append(run)
|
|
328
|
+
|
|
329
|
+
# fetch params and metrics for each run
|
|
330
|
+
params_by_run = {r.run_id: ParamManager.as_dict(r.run_id) for r in runs}
|
|
331
|
+
metrics_by_run = {
|
|
332
|
+
r.run_id: {m.key: m.value for m in MetricManager.get_latest(r.run_id)}
|
|
333
|
+
for r in runs
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
console.print()
|
|
337
|
+
_print_header(runs)
|
|
338
|
+
|
|
339
|
+
if not metrics_only:
|
|
340
|
+
_print_params_table(runs, params_by_run, show_all=all_params)
|
|
341
|
+
|
|
342
|
+
if not params_only:
|
|
343
|
+
_print_metrics_table(runs, metrics_by_run)
|
|
344
|
+
|
|
345
|
+
console.print()
|
|
346
|
+
|
mlx/commands/export.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
"""
|
|
2
|
+
|
|
3
|
+
Usage:
|
|
4
|
+
mlx export → CSV to stdout
|
|
5
|
+
mlx export --format json → JSON to stdout
|
|
6
|
+
mlx export --out runs.csv → save to file
|
|
7
|
+
mlx export --out runs.json --format json
|
|
8
|
+
mlx export --experiment fraud → filter by experiment
|
|
9
|
+
mlx export --status done → filter by status
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import csv
|
|
14
|
+
import io
|
|
15
|
+
import typer
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from rich.console import Console
|
|
18
|
+
|
|
19
|
+
from mlx.core.run import RunManager
|
|
20
|
+
from mlx.core.metrics import MetricManager
|
|
21
|
+
from mlx.core.params import ParamManager
|
|
22
|
+
from mlx.utils.display import success, error, info, warn
|
|
23
|
+
|
|
24
|
+
app = typer.Typer(help="Export runs to CSV or JSON.")
|
|
25
|
+
console = Console()
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@app.callback(invoke_without_command=True)
|
|
29
|
+
def export(
|
|
30
|
+
format: str = typer.Option(
|
|
31
|
+
"csv",
|
|
32
|
+
"--format", "-f",
|
|
33
|
+
help="Export format: csv or json"
|
|
34
|
+
),
|
|
35
|
+
out: str = typer.Option(
|
|
36
|
+
None,
|
|
37
|
+
"--out", "-o",
|
|
38
|
+
help="Output file path e.g. runs.csv"
|
|
39
|
+
),
|
|
40
|
+
experiment: str = typer.Option(
|
|
41
|
+
None,
|
|
42
|
+
"--experiment", "-e",
|
|
43
|
+
help="Filter by experiment name"
|
|
44
|
+
),
|
|
45
|
+
status: str = typer.Option(
|
|
46
|
+
None,
|
|
47
|
+
"--status", "-s",
|
|
48
|
+
help="Filter by status: done, running, failed"
|
|
49
|
+
),
|
|
50
|
+
limit: int = typer.Option(
|
|
51
|
+
None,
|
|
52
|
+
"--limit", "-l",
|
|
53
|
+
help="Max number of runs to export"
|
|
54
|
+
),
|
|
55
|
+
latest_metrics: bool = typer.Option(
|
|
56
|
+
True,
|
|
57
|
+
"--latest-metrics/--all-metrics",
|
|
58
|
+
help="Export only latest metric per key (default) or all steps"
|
|
59
|
+
),
|
|
60
|
+
):
|
|
61
|
+
|
|
62
|
+
# ── Validate format
|
|
63
|
+
if format not in ("csv", "json"):
|
|
64
|
+
error(f"Unknown format: '{format}'")
|
|
65
|
+
console.print(" Supported formats: [cyan]csv[/cyan], [cyan]json[/cyan]")
|
|
66
|
+
raise typer.Exit(1)
|
|
67
|
+
|
|
68
|
+
# ── Fetch runs
|
|
69
|
+
runs = RunManager.get_all(
|
|
70
|
+
experiment=experiment,
|
|
71
|
+
status=status,
|
|
72
|
+
limit=limit or 999999,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
if not runs:
|
|
76
|
+
warn("No runs found to export.")
|
|
77
|
+
if experiment or status:
|
|
78
|
+
console.print(" Try removing filters.")
|
|
79
|
+
raise typer.Exit()
|
|
80
|
+
|
|
81
|
+
# ── Build export data
|
|
82
|
+
# Each item = one run with all its params and metrics
|
|
83
|
+
export_data = _build_export_data(runs, latest_metrics)
|
|
84
|
+
|
|
85
|
+
# ── Generate output
|
|
86
|
+
if format == "csv":
|
|
87
|
+
output = _to_csv(export_data)
|
|
88
|
+
else:
|
|
89
|
+
output = _to_json(export_data)
|
|
90
|
+
|
|
91
|
+
# ── Write to file or stdout
|
|
92
|
+
if out:
|
|
93
|
+
_save_to_file(output, out, format, len(runs))
|
|
94
|
+
else:
|
|
95
|
+
# Print to terminal
|
|
96
|
+
# Use print() not console.print() to avoid Rich markup
|
|
97
|
+
print(output)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
# DATA BUILDER
|
|
101
|
+
|
|
102
|
+
def _build_export_data(runs: list, latest_only: bool) -> list[dict]:
|
|
103
|
+
data = []
|
|
104
|
+
|
|
105
|
+
for run in runs:
|
|
106
|
+
|
|
107
|
+
# Base run fields
|
|
108
|
+
row = {
|
|
109
|
+
"run_id": run.run_id,
|
|
110
|
+
"name": run.name,
|
|
111
|
+
"experiment": run.experiment,
|
|
112
|
+
"status": run.status,
|
|
113
|
+
"tags": run.tags,
|
|
114
|
+
"created_at": run.created_at[:19].replace("T", " "),
|
|
115
|
+
"finished_at": run.finished_at[:19].replace("T", " ") if run.finished_at else "",
|
|
116
|
+
"duration_sec": run.duration_sec or "",
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
# Add params — prefix with "param_" to avoid name collisions
|
|
120
|
+
params = ParamManager.as_dict(run.run_id)
|
|
121
|
+
for key, value in sorted(params.items()):
|
|
122
|
+
row[f"param_{key}"] = value
|
|
123
|
+
|
|
124
|
+
# Add metrics — prefix with "metric_"
|
|
125
|
+
if latest_only:
|
|
126
|
+
# One value per metric key — the final/best one
|
|
127
|
+
metrics = {
|
|
128
|
+
m.key: m.value
|
|
129
|
+
for m in MetricManager.get_latest(run.run_id)
|
|
130
|
+
}
|
|
131
|
+
for key, value in sorted(metrics.items()):
|
|
132
|
+
row[f"metric_{key}"] = value
|
|
133
|
+
else:
|
|
134
|
+
# All steps — creates columns like metric_accuracy_step_100
|
|
135
|
+
all_metrics = MetricManager.get_for_run(run.run_id)
|
|
136
|
+
for m in all_metrics:
|
|
137
|
+
row[f"metric_{m.key}_step_{m.step}"] = m.value
|
|
138
|
+
|
|
139
|
+
data.append(row)
|
|
140
|
+
|
|
141
|
+
return data
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
# CSV FORMATTER
|
|
145
|
+
|
|
146
|
+
def _to_csv(data: list[dict]) -> str:
|
|
147
|
+
|
|
148
|
+
if not data:
|
|
149
|
+
return ""
|
|
150
|
+
|
|
151
|
+
# Collect ALL unique column names across all runs
|
|
152
|
+
# Preserve order: base fields first, then params, then metrics
|
|
153
|
+
all_columns = []
|
|
154
|
+
seen = set()
|
|
155
|
+
|
|
156
|
+
for row in data:
|
|
157
|
+
for key in row.keys():
|
|
158
|
+
if key not in seen:
|
|
159
|
+
all_columns.append(key)
|
|
160
|
+
seen.add(key)
|
|
161
|
+
|
|
162
|
+
# Write CSV to a string buffer
|
|
163
|
+
output = io.StringIO()
|
|
164
|
+
writer = csv.DictWriter(
|
|
165
|
+
output,
|
|
166
|
+
fieldnames=all_columns,
|
|
167
|
+
extrasaction="ignore", # ignore extra keys
|
|
168
|
+
restval="", # empty string for missing values
|
|
169
|
+
lineterminator="\n",
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
writer.writeheader()
|
|
173
|
+
writer.writerows(data)
|
|
174
|
+
|
|
175
|
+
return output.getvalue()
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
# JSON FORMATTER
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def _to_json(data: list[dict]) -> str:
|
|
183
|
+
|
|
184
|
+
# Rebuild as nested structure for JSON
|
|
185
|
+
nested = []
|
|
186
|
+
|
|
187
|
+
for row in data:
|
|
188
|
+
# Separate base fields, params, metrics
|
|
189
|
+
base = {}
|
|
190
|
+
params = {}
|
|
191
|
+
metrics = {}
|
|
192
|
+
|
|
193
|
+
for key, value in row.items():
|
|
194
|
+
if key.startswith("param_"):
|
|
195
|
+
params[key[6:]] = value # strip "param_" prefix
|
|
196
|
+
elif key.startswith("metric_"):
|
|
197
|
+
metrics[key[7:]] = value # strip "metric_" prefix
|
|
198
|
+
else:
|
|
199
|
+
base[key] = value
|
|
200
|
+
|
|
201
|
+
nested.append({
|
|
202
|
+
**base,
|
|
203
|
+
"params": params,
|
|
204
|
+
"metrics": metrics,
|
|
205
|
+
})
|
|
206
|
+
|
|
207
|
+
return json.dumps(nested, indent=2)
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
# FILE SAVER
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def _save_to_file(
|
|
215
|
+
content: str,
|
|
216
|
+
path: str,
|
|
217
|
+
format: str,
|
|
218
|
+
run_count: int,
|
|
219
|
+
):
|
|
220
|
+
"""
|
|
221
|
+
Save exported content to a file.
|
|
222
|
+
Creates parent directories if needed.
|
|
223
|
+
"""
|
|
224
|
+
out_path = Path(path)
|
|
225
|
+
|
|
226
|
+
# Auto-add extension if missing
|
|
227
|
+
if not out_path.suffix:
|
|
228
|
+
out_path = out_path.with_suffix(f".{format}")
|
|
229
|
+
|
|
230
|
+
# Create parent dirs if needed
|
|
231
|
+
out_path.parent.mkdir(parents=True, exist_ok=True)
|
|
232
|
+
|
|
233
|
+
# Write the file
|
|
234
|
+
out_path.write_text(content)
|
|
235
|
+
|
|
236
|
+
success(
|
|
237
|
+
f"Exported [bold]{run_count}[/bold] run(s) "
|
|
238
|
+
f"to [bold cyan]{out_path}[/bold cyan]"
|
|
239
|
+
)
|
|
240
|
+
console.print(
|
|
241
|
+
f" [dim]Format :[/dim] {format.upper()}"
|
|
242
|
+
)
|
|
243
|
+
console.print(
|
|
244
|
+
f" [dim]Size :[/dim] "
|
|
245
|
+
f"{out_path.stat().st_size / 1024:.1f} KB"
|
|
246
|
+
)
|
|
247
|
+
console.print()
|
|
248
|
+
|
|
249
|
+
# Show a helpful next step
|
|
250
|
+
if format == "csv":
|
|
251
|
+
console.print("[dim]Open in pandas:[/dim]")
|
|
252
|
+
console.print(
|
|
253
|
+
f" [cyan]import pandas as pd[/cyan]\n"
|
|
254
|
+
f" [cyan]df = pd.read_csv('{out_path}')[/cyan]\n"
|
|
255
|
+
f" [cyan]print(df.head())[/cyan]"
|
|
256
|
+
)
|
|
257
|
+
else:
|
|
258
|
+
console.print("[dim]Load in Python:[/dim]")
|
|
259
|
+
console.print(
|
|
260
|
+
f" [cyan]import json[/cyan]\n"
|
|
261
|
+
f" [cyan]data = json.load(open('{out_path}'))[/cyan]\n"
|
|
262
|
+
f" [cyan]print(data[0]['metrics'])[/cyan]"
|
|
263
|
+
)
|
|
264
|
+
console.print()
|