yanex 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yanex/__init__.py +74 -0
- yanex/api.py +507 -0
- yanex/cli/__init__.py +3 -0
- yanex/cli/_utils.py +114 -0
- yanex/cli/commands/__init__.py +3 -0
- yanex/cli/commands/archive.py +177 -0
- yanex/cli/commands/compare.py +320 -0
- yanex/cli/commands/confirm.py +198 -0
- yanex/cli/commands/delete.py +203 -0
- yanex/cli/commands/list.py +243 -0
- yanex/cli/commands/run.py +625 -0
- yanex/cli/commands/show.py +560 -0
- yanex/cli/commands/unarchive.py +177 -0
- yanex/cli/commands/update.py +282 -0
- yanex/cli/filters/__init__.py +8 -0
- yanex/cli/filters/base.py +286 -0
- yanex/cli/filters/time_utils.py +178 -0
- yanex/cli/formatters/__init__.py +7 -0
- yanex/cli/formatters/console.py +325 -0
- yanex/cli/main.py +45 -0
- yanex/core/__init__.py +3 -0
- yanex/core/comparison.py +549 -0
- yanex/core/config.py +587 -0
- yanex/core/constants.py +16 -0
- yanex/core/environment.py +146 -0
- yanex/core/git_utils.py +153 -0
- yanex/core/manager.py +555 -0
- yanex/core/storage.py +682 -0
- yanex/ui/__init__.py +1 -0
- yanex/ui/compare_table.py +524 -0
- yanex/utils/__init__.py +3 -0
- yanex/utils/exceptions.py +70 -0
- yanex/utils/validation.py +165 -0
- yanex-0.1.0.dist-info/METADATA +251 -0
- yanex-0.1.0.dist-info/RECORD +39 -0
- yanex-0.1.0.dist-info/WHEEL +5 -0
- yanex-0.1.0.dist-info/entry_points.txt +2 -0
- yanex-0.1.0.dist-info/licenses/LICENSE +21 -0
- yanex-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,560 @@
|
|
1
|
+
"""
|
2
|
+
Show detailed information about a specific experiment.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import Any, Dict, List, Optional
|
6
|
+
|
7
|
+
import click
|
8
|
+
|
9
|
+
from yanex.cli.filters import ExperimentFilter
|
10
|
+
from yanex.cli.formatters.console import ExperimentTableFormatter
|
11
|
+
from yanex.core.manager import ExperimentManager
|
12
|
+
|
13
|
+
|
14
|
+
@click.command("show")
|
15
|
+
@click.argument("experiment_identifier", required=True)
|
16
|
+
@click.option(
|
17
|
+
"--show-metric",
|
18
|
+
"show_metrics",
|
19
|
+
help="Comma-separated list of specific metrics to show in results table (e.g., 'accuracy,loss,f1_score')",
|
20
|
+
)
|
21
|
+
@click.option("--archived", is_flag=True, help="Include archived experiments in search")
|
22
|
+
@click.pass_context
|
23
|
+
def show_experiment(
|
24
|
+
ctx, experiment_identifier: str, show_metrics: Optional[str], archived: bool
|
25
|
+
):
|
26
|
+
"""
|
27
|
+
Show detailed information about an experiment.
|
28
|
+
|
29
|
+
EXPERIMENT_IDENTIFIER can be either:
|
30
|
+
- An experiment ID (8-character string)
|
31
|
+
- An experiment name
|
32
|
+
|
33
|
+
If multiple experiments have the same name, a list will be shown
|
34
|
+
and you'll need to use the unique experiment ID instead.
|
35
|
+
"""
|
36
|
+
try:
|
37
|
+
# Create filter and formatter (filter creates default manager)
|
38
|
+
filter_obj = ExperimentFilter()
|
39
|
+
formatter = ExperimentTableFormatter()
|
40
|
+
|
41
|
+
# Try to find the experiment
|
42
|
+
experiment = find_experiment(filter_obj, experiment_identifier, archived)
|
43
|
+
|
44
|
+
if experiment is None:
|
45
|
+
click.echo(
|
46
|
+
f"Error: No experiment found with ID or name '{experiment_identifier}'",
|
47
|
+
err=True,
|
48
|
+
)
|
49
|
+
ctx.exit(1)
|
50
|
+
|
51
|
+
# Check if we got multiple experiments (name collision)
|
52
|
+
if isinstance(experiment, list):
|
53
|
+
click.echo(
|
54
|
+
f"Multiple experiments found with name '{experiment_identifier}':"
|
55
|
+
)
|
56
|
+
click.echo()
|
57
|
+
|
58
|
+
# Show a filtered list using the existing list formatter
|
59
|
+
formatter.print_experiments_table(experiment)
|
60
|
+
click.echo()
|
61
|
+
click.echo(
|
62
|
+
"Please use the specific experiment ID with 'yanex show <id>' to view details."
|
63
|
+
)
|
64
|
+
ctx.exit(1)
|
65
|
+
|
66
|
+
# Parse show_metrics if provided
|
67
|
+
requested_metrics = None
|
68
|
+
if show_metrics:
|
69
|
+
requested_metrics = [
|
70
|
+
metric.strip() for metric in show_metrics.split(",") if metric.strip()
|
71
|
+
]
|
72
|
+
|
73
|
+
# Display detailed experiment information
|
74
|
+
display_experiment_details(
|
75
|
+
filter_obj.manager, experiment, formatter, requested_metrics, archived
|
76
|
+
)
|
77
|
+
|
78
|
+
except Exception as e:
|
79
|
+
click.echo(f"Error: {e}", err=True)
|
80
|
+
ctx.exit(1)
|
81
|
+
|
82
|
+
|
83
|
+
def find_experiment(
|
84
|
+
filter_obj: ExperimentFilter, identifier: str, include_archived: bool = False
|
85
|
+
) -> Optional[Dict[str, Any] | List[Dict[str, Any]]]:
|
86
|
+
"""
|
87
|
+
Find experiment by ID or name.
|
88
|
+
|
89
|
+
Args:
|
90
|
+
filter_obj: ExperimentFilter instance
|
91
|
+
identifier: Experiment ID or name
|
92
|
+
include_archived: Whether to search archived experiments
|
93
|
+
|
94
|
+
Returns:
|
95
|
+
- Single experiment dict if found by ID or unique name
|
96
|
+
- List of experiments if multiple names match
|
97
|
+
- None if not found
|
98
|
+
"""
|
99
|
+
# First, try to find by exact ID match
|
100
|
+
try:
|
101
|
+
all_experiments = filter_obj._load_all_experiments(include_archived)
|
102
|
+
|
103
|
+
# Try ID match first (exact 8-character match)
|
104
|
+
if len(identifier) == 8:
|
105
|
+
for exp in all_experiments:
|
106
|
+
if exp.get("id") == identifier:
|
107
|
+
return exp
|
108
|
+
|
109
|
+
# Try name match
|
110
|
+
name_matches = []
|
111
|
+
for exp in all_experiments:
|
112
|
+
exp_name = exp.get("name")
|
113
|
+
if exp_name and exp_name == identifier:
|
114
|
+
name_matches.append(exp)
|
115
|
+
|
116
|
+
# Return based on name matches
|
117
|
+
if len(name_matches) == 1:
|
118
|
+
return name_matches[0]
|
119
|
+
elif len(name_matches) > 1:
|
120
|
+
return name_matches # Multiple matches - let caller handle
|
121
|
+
|
122
|
+
# No matches found
|
123
|
+
return None
|
124
|
+
|
125
|
+
except Exception:
|
126
|
+
return None
|
127
|
+
|
128
|
+
|
129
|
+
def display_experiment_details(
|
130
|
+
manager: ExperimentManager,
|
131
|
+
experiment: Dict[str, Any],
|
132
|
+
formatter: ExperimentTableFormatter,
|
133
|
+
requested_metrics: Optional[List[str]] = None,
|
134
|
+
include_archived: bool = False,
|
135
|
+
):
|
136
|
+
"""Display comprehensive experiment details."""
|
137
|
+
from rich import box
|
138
|
+
from rich.console import Console
|
139
|
+
from rich.panel import Panel
|
140
|
+
from rich.table import Table
|
141
|
+
from rich.text import Text
|
142
|
+
|
143
|
+
console = Console()
|
144
|
+
experiment_id = experiment["id"]
|
145
|
+
|
146
|
+
# Header with experiment overview
|
147
|
+
status = experiment.get("status", "unknown")
|
148
|
+
status_color = formatter.STATUS_COLORS.get(status, "white")
|
149
|
+
status_emoji = formatter.STATUS_SYMBOLS.get(status, "○")
|
150
|
+
|
151
|
+
header_text = Text()
|
152
|
+
header_text.append("Experiment: ", style="bold")
|
153
|
+
header_text.append(f"{experiment.get('name', '[unnamed]')} ", style="bold cyan")
|
154
|
+
header_text.append(f"({experiment_id})", style="dim")
|
155
|
+
header_text.append(f"\nStatus: {status_emoji} ", style="")
|
156
|
+
header_text.append(f"{status}", style=f"bold {status_color}")
|
157
|
+
|
158
|
+
# Add directory path
|
159
|
+
try:
|
160
|
+
exp_dir = manager.storage.get_experiment_dir(experiment_id, include_archived)
|
161
|
+
header_text.append(f"\nDirectory: {exp_dir}", style="dim cyan")
|
162
|
+
except Exception:
|
163
|
+
pass # Skip directory path if not available
|
164
|
+
|
165
|
+
# Add timing information
|
166
|
+
created_at = experiment.get("created_at")
|
167
|
+
started_at = experiment.get("started_at")
|
168
|
+
completed_at = experiment.get("completed_at")
|
169
|
+
failed_at = experiment.get("failed_at")
|
170
|
+
cancelled_at = experiment.get("cancelled_at")
|
171
|
+
|
172
|
+
if created_at:
|
173
|
+
header_text.append(f"\nCreated: {formatter._format_time(created_at)}")
|
174
|
+
if started_at:
|
175
|
+
header_text.append(f"\nStarted: {formatter._format_time(started_at)}")
|
176
|
+
|
177
|
+
# Show end time based on status
|
178
|
+
end_time = completed_at or failed_at or cancelled_at
|
179
|
+
if end_time:
|
180
|
+
end_label = (
|
181
|
+
"Completed" if completed_at else ("Failed" if failed_at else "Cancelled")
|
182
|
+
)
|
183
|
+
header_text.append(f"\n{end_label}: {formatter._format_time(end_time)}")
|
184
|
+
|
185
|
+
# Calculate and show duration
|
186
|
+
if started_at:
|
187
|
+
duration = formatter._calculate_duration(started_at, end_time)
|
188
|
+
header_text.append(f"\nDuration: {duration}")
|
189
|
+
elif started_at:
|
190
|
+
# Still running
|
191
|
+
duration = formatter._calculate_duration(started_at, None)
|
192
|
+
header_text.append(f"\nDuration: {duration}")
|
193
|
+
|
194
|
+
console.print(Panel(header_text, box=box.ROUNDED, border_style=status_color))
|
195
|
+
console.print()
|
196
|
+
|
197
|
+
# Tags and Description
|
198
|
+
tags = experiment.get("tags", [])
|
199
|
+
description = experiment.get("description")
|
200
|
+
|
201
|
+
if tags or description:
|
202
|
+
info_table = Table.grid(padding=(0, 2))
|
203
|
+
info_table.add_column("Field", style="bold")
|
204
|
+
info_table.add_column("Value")
|
205
|
+
|
206
|
+
if tags:
|
207
|
+
tags_text = ", ".join(tags) if tags else "-"
|
208
|
+
info_table.add_row("Tags:", tags_text)
|
209
|
+
|
210
|
+
if description:
|
211
|
+
info_table.add_row("Description:", description)
|
212
|
+
|
213
|
+
console.print(
|
214
|
+
Panel(info_table, title="[bold]Experiment Info[/bold]", box=box.ROUNDED)
|
215
|
+
)
|
216
|
+
console.print()
|
217
|
+
|
218
|
+
# Configuration
|
219
|
+
try:
|
220
|
+
config = manager.storage.load_config(experiment_id, include_archived)
|
221
|
+
if config:
|
222
|
+
config_table = Table(
|
223
|
+
show_header=True, header_style="bold magenta", box=box.SIMPLE
|
224
|
+
)
|
225
|
+
config_table.add_column("Parameter", style="cyan")
|
226
|
+
config_table.add_column("Value", style="green")
|
227
|
+
|
228
|
+
for key, value in config.items():
|
229
|
+
# Format value for display
|
230
|
+
if isinstance(value, (dict, list)):
|
231
|
+
value_str = str(value)
|
232
|
+
if len(value_str) > 50:
|
233
|
+
value_str = value_str[:47] + "..."
|
234
|
+
else:
|
235
|
+
value_str = str(value)
|
236
|
+
|
237
|
+
config_table.add_row(key, value_str)
|
238
|
+
|
239
|
+
console.print(
|
240
|
+
Panel(config_table, title="[bold]Configuration[/bold]", box=box.ROUNDED)
|
241
|
+
)
|
242
|
+
console.print()
|
243
|
+
except Exception:
|
244
|
+
pass # Skip config if not available
|
245
|
+
|
246
|
+
# Results
|
247
|
+
try:
|
248
|
+
results = manager.storage.load_results(experiment_id, include_archived)
|
249
|
+
if results:
|
250
|
+
# Get all unique metric names
|
251
|
+
all_metrics = set()
|
252
|
+
for result in results:
|
253
|
+
for key in result.keys():
|
254
|
+
if key not in ["step", "timestamp"]:
|
255
|
+
all_metrics.add(key)
|
256
|
+
|
257
|
+
all_metrics = sorted(all_metrics)
|
258
|
+
|
259
|
+
# Determine which metrics to show
|
260
|
+
if requested_metrics:
|
261
|
+
# User specified specific metrics
|
262
|
+
shown_metrics = []
|
263
|
+
missing_metrics = []
|
264
|
+
for metric in requested_metrics:
|
265
|
+
if metric in all_metrics:
|
266
|
+
shown_metrics.append(metric)
|
267
|
+
else:
|
268
|
+
missing_metrics.append(metric)
|
269
|
+
|
270
|
+
# Warn about missing metrics
|
271
|
+
if missing_metrics:
|
272
|
+
warning_text = Text("Warning: ", style="bold yellow")
|
273
|
+
warning_text.append(
|
274
|
+
f"Requested metrics not found: {', '.join(missing_metrics)}",
|
275
|
+
style="yellow",
|
276
|
+
)
|
277
|
+
console.print(warning_text)
|
278
|
+
|
279
|
+
if not shown_metrics:
|
280
|
+
console.print(
|
281
|
+
Text(
|
282
|
+
"No requested metrics found in experiment results.",
|
283
|
+
style="red",
|
284
|
+
)
|
285
|
+
)
|
286
|
+
return
|
287
|
+
|
288
|
+
results_table = Table(
|
289
|
+
show_header=True, header_style="bold magenta", box=box.SIMPLE
|
290
|
+
)
|
291
|
+
results_table.add_column("Step", justify="right", style="cyan")
|
292
|
+
results_table.add_column("Timestamp", style="dim")
|
293
|
+
|
294
|
+
for metric in shown_metrics:
|
295
|
+
results_table.add_column(metric, justify="right", style="green")
|
296
|
+
|
297
|
+
# Add rows
|
298
|
+
for result in results[-10:]: # Show last 10 results
|
299
|
+
row = [
|
300
|
+
str(result.get("step", "-")),
|
301
|
+
formatter._format_time(result.get("timestamp", "")),
|
302
|
+
]
|
303
|
+
|
304
|
+
for metric in shown_metrics:
|
305
|
+
value = result.get(metric)
|
306
|
+
if value is not None:
|
307
|
+
if isinstance(value, float):
|
308
|
+
row.append(f"{value:.4f}")
|
309
|
+
else:
|
310
|
+
row.append(str(value))
|
311
|
+
else:
|
312
|
+
row.append("-")
|
313
|
+
|
314
|
+
results_table.add_row(*row)
|
315
|
+
|
316
|
+
title = f"[bold]Results[/bold] (showing {len(shown_metrics)} of {len(all_metrics)} metrics)"
|
317
|
+
if len(results) > 10:
|
318
|
+
title += f" (last 10 of {len(results)} steps)"
|
319
|
+
|
320
|
+
console.print(Panel(results_table, title=title, box=box.ROUNDED))
|
321
|
+
console.print()
|
322
|
+
|
323
|
+
elif len(all_metrics) > 8:
|
324
|
+
# Too many metrics - show summary table with key metrics and count
|
325
|
+
results_table = Table(
|
326
|
+
show_header=True, header_style="bold magenta", box=box.SIMPLE
|
327
|
+
)
|
328
|
+
results_table.add_column("Step", justify="right", style="cyan")
|
329
|
+
results_table.add_column("Timestamp", style="dim")
|
330
|
+
|
331
|
+
# Show key metrics first, then fill up to 8 total metrics
|
332
|
+
key_metrics = [
|
333
|
+
"accuracy",
|
334
|
+
"loss",
|
335
|
+
"epoch",
|
336
|
+
"learning_rate",
|
337
|
+
"f1_score",
|
338
|
+
"precision",
|
339
|
+
"recall",
|
340
|
+
]
|
341
|
+
shown_metrics = []
|
342
|
+
|
343
|
+
# Add key metrics that exist
|
344
|
+
for metric in key_metrics:
|
345
|
+
if metric in all_metrics and len(shown_metrics) < 8:
|
346
|
+
shown_metrics.append(metric)
|
347
|
+
|
348
|
+
# Fill remaining slots with other metrics (alphabetically)
|
349
|
+
remaining_metrics = [m for m in all_metrics if m not in shown_metrics]
|
350
|
+
for metric in remaining_metrics:
|
351
|
+
if len(shown_metrics) < 8:
|
352
|
+
shown_metrics.append(metric)
|
353
|
+
else:
|
354
|
+
break
|
355
|
+
|
356
|
+
# Add columns for shown metrics
|
357
|
+
for metric in shown_metrics:
|
358
|
+
results_table.add_column(metric, justify="right", style="green")
|
359
|
+
|
360
|
+
# Add other metrics column to show count
|
361
|
+
other_count = len(all_metrics) - len(shown_metrics)
|
362
|
+
if other_count > 0:
|
363
|
+
results_table.add_column(
|
364
|
+
f"(+{other_count} more)", justify="center", style="dim"
|
365
|
+
)
|
366
|
+
|
367
|
+
# Add rows
|
368
|
+
for result in results[-10:]: # Show last 10 results
|
369
|
+
row = [
|
370
|
+
str(result.get("step", "-")),
|
371
|
+
formatter._format_time(result.get("timestamp", "")),
|
372
|
+
]
|
373
|
+
|
374
|
+
for metric in shown_metrics:
|
375
|
+
value = result.get(metric)
|
376
|
+
if value is not None:
|
377
|
+
if isinstance(value, float):
|
378
|
+
row.append(f"{value:.4f}")
|
379
|
+
else:
|
380
|
+
row.append(str(value))
|
381
|
+
else:
|
382
|
+
row.append("-")
|
383
|
+
|
384
|
+
if other_count > 0:
|
385
|
+
row.append("...")
|
386
|
+
|
387
|
+
results_table.add_row(*row)
|
388
|
+
|
389
|
+
title = f"[bold]Results[/bold] ({len(all_metrics)} metrics total)"
|
390
|
+
if len(results) > 10:
|
391
|
+
title += f" (showing last 10 of {len(results)} steps)"
|
392
|
+
|
393
|
+
console.print(Panel(results_table, title=title, box=box.ROUNDED))
|
394
|
+
|
395
|
+
# Show all metrics list below table for reference
|
396
|
+
metrics_text = Text("All metrics: ", style="bold")
|
397
|
+
metrics_text.append(", ".join(all_metrics), style="dim")
|
398
|
+
console.print(metrics_text)
|
399
|
+
console.print()
|
400
|
+
|
401
|
+
else:
|
402
|
+
# Few metrics - show normal table
|
403
|
+
results_table = Table(
|
404
|
+
show_header=True, header_style="bold magenta", box=box.SIMPLE
|
405
|
+
)
|
406
|
+
results_table.add_column("Step", justify="right", style="cyan")
|
407
|
+
results_table.add_column("Timestamp", style="dim")
|
408
|
+
|
409
|
+
# Add columns for each metric
|
410
|
+
for metric in all_metrics:
|
411
|
+
results_table.add_column(metric, justify="right", style="green")
|
412
|
+
|
413
|
+
# Add rows
|
414
|
+
for result in results[-10:]: # Show last 10 results
|
415
|
+
row = [
|
416
|
+
str(result.get("step", "-")),
|
417
|
+
formatter._format_time(result.get("timestamp", "")),
|
418
|
+
]
|
419
|
+
|
420
|
+
for metric in all_metrics:
|
421
|
+
value = result.get(metric)
|
422
|
+
if value is not None:
|
423
|
+
# Format numbers nicely
|
424
|
+
if isinstance(value, float):
|
425
|
+
row.append(f"{value:.4f}")
|
426
|
+
else:
|
427
|
+
row.append(str(value))
|
428
|
+
else:
|
429
|
+
row.append("-")
|
430
|
+
|
431
|
+
results_table.add_row(*row)
|
432
|
+
|
433
|
+
title = "[bold]Results[/bold]"
|
434
|
+
if len(results) > 10:
|
435
|
+
title += f" (showing last 10 of {len(results)})"
|
436
|
+
|
437
|
+
console.print(Panel(results_table, title=title, box=box.ROUNDED))
|
438
|
+
console.print()
|
439
|
+
except Exception:
|
440
|
+
pass # Skip results if not available
|
441
|
+
|
442
|
+
# Artifacts
|
443
|
+
try:
|
444
|
+
experiment_dir = manager.storage.get_experiment_dir(
|
445
|
+
experiment_id, include_archived
|
446
|
+
)
|
447
|
+
artifacts_dir = experiment_dir / "artifacts"
|
448
|
+
|
449
|
+
if artifacts_dir.exists():
|
450
|
+
artifacts = list(artifacts_dir.iterdir())
|
451
|
+
if artifacts:
|
452
|
+
artifacts_table = Table(
|
453
|
+
show_header=True, header_style="bold magenta", box=box.SIMPLE
|
454
|
+
)
|
455
|
+
artifacts_table.add_column("Artifact", style="cyan")
|
456
|
+
artifacts_table.add_column("Size", justify="right", style="green")
|
457
|
+
artifacts_table.add_column("Modified", style="dim")
|
458
|
+
|
459
|
+
for artifact_path in sorted(artifacts):
|
460
|
+
if artifact_path.is_file():
|
461
|
+
size = artifact_path.stat().st_size
|
462
|
+
size_str = formatter._format_file_size(size)
|
463
|
+
mtime = artifact_path.stat().st_mtime
|
464
|
+
mtime_str = formatter._format_timestamp(mtime)
|
465
|
+
|
466
|
+
artifacts_table.add_row(artifact_path.name, size_str, mtime_str)
|
467
|
+
|
468
|
+
console.print(
|
469
|
+
Panel(
|
470
|
+
artifacts_table, title="[bold]Artifacts[/bold]", box=box.ROUNDED
|
471
|
+
)
|
472
|
+
)
|
473
|
+
console.print()
|
474
|
+
except Exception:
|
475
|
+
pass # Skip artifacts if not available
|
476
|
+
|
477
|
+
# Environment and execution info
|
478
|
+
try:
|
479
|
+
metadata = manager.storage.load_metadata(experiment_id, include_archived)
|
480
|
+
env_info = metadata.get("environment", {})
|
481
|
+
git_info = metadata.get("git", {})
|
482
|
+
|
483
|
+
if env_info or git_info:
|
484
|
+
env_table = Table.grid(padding=(0, 2))
|
485
|
+
env_table.add_column("Field", style="bold")
|
486
|
+
env_table.add_column("Value")
|
487
|
+
|
488
|
+
# Git information
|
489
|
+
if git_info:
|
490
|
+
branch = git_info.get("branch", "unknown")
|
491
|
+
commit_hash = git_info.get(
|
492
|
+
"commit_hash_short", git_info.get("commit_hash", "unknown")
|
493
|
+
)
|
494
|
+
if commit_hash != "unknown" and len(commit_hash) > 12:
|
495
|
+
commit_hash = commit_hash[:12]
|
496
|
+
|
497
|
+
env_table.add_row("Git Branch:", branch)
|
498
|
+
env_table.add_row("Git Commit:", commit_hash)
|
499
|
+
|
500
|
+
# Check for uncommitted changes from environment git info
|
501
|
+
env_git_info = env_info.get("git", {})
|
502
|
+
if env_git_info.get("has_uncommitted_changes"):
|
503
|
+
env_table.add_row("", "[yellow]⚠ Uncommitted changes[/yellow]")
|
504
|
+
|
505
|
+
# Python version information
|
506
|
+
python_info = env_info.get("python", {})
|
507
|
+
if python_info:
|
508
|
+
python_version = python_info.get("python_version", "unknown")
|
509
|
+
# Extract just the version number for cleaner display
|
510
|
+
if python_version != "unknown" and "(" in python_version:
|
511
|
+
python_version = python_version.split(" (")[
|
512
|
+
0
|
513
|
+
] # e.g., "3.11.9" from "3.11.9 (main, ...)"
|
514
|
+
env_table.add_row("Python:", python_version)
|
515
|
+
|
516
|
+
# Platform from python info (more readable than system platform)
|
517
|
+
python_platform = python_info.get("platform", "unknown")
|
518
|
+
if python_platform != "unknown":
|
519
|
+
env_table.add_row("Platform:", python_platform)
|
520
|
+
|
521
|
+
# System information (fallback if python platform not available)
|
522
|
+
if python_info.get("platform") == "unknown" or not python_info:
|
523
|
+
system_info = env_info.get("system", {})
|
524
|
+
platform_info = system_info.get("platform", {})
|
525
|
+
if platform_info:
|
526
|
+
system_name = platform_info.get("system", "unknown")
|
527
|
+
machine = platform_info.get("machine", "")
|
528
|
+
if system_name != "unknown":
|
529
|
+
platform_display = system_name
|
530
|
+
if machine:
|
531
|
+
platform_display += f" ({machine})"
|
532
|
+
env_table.add_row("Platform:", platform_display)
|
533
|
+
|
534
|
+
# Script information
|
535
|
+
script_path = metadata.get("script_path")
|
536
|
+
if script_path:
|
537
|
+
env_table.add_row("Script:", script_path)
|
538
|
+
|
539
|
+
console.print(
|
540
|
+
Panel(env_table, title="[bold]Environment[/bold]", box=box.ROUNDED)
|
541
|
+
)
|
542
|
+
console.print()
|
543
|
+
except Exception:
|
544
|
+
pass # Skip environment if not available
|
545
|
+
|
546
|
+
# Error information if failed
|
547
|
+
if status in ["failed", "cancelled"]:
|
548
|
+
error_msg = experiment.get("error_message")
|
549
|
+
cancel_reason = experiment.get("cancellation_reason")
|
550
|
+
|
551
|
+
if error_msg or cancel_reason:
|
552
|
+
error_text = error_msg or cancel_reason
|
553
|
+
console.print(
|
554
|
+
Panel(
|
555
|
+
Text(error_text, style="red"),
|
556
|
+
title=f"[bold red]{'Error' if error_msg else 'Cancellation Reason'}[/bold red]",
|
557
|
+
box=box.ROUNDED,
|
558
|
+
border_style="red",
|
559
|
+
)
|
560
|
+
)
|