yanex 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yanex/__init__.py +74 -0
- yanex/api.py +507 -0
- yanex/cli/__init__.py +3 -0
- yanex/cli/_utils.py +114 -0
- yanex/cli/commands/__init__.py +3 -0
- yanex/cli/commands/archive.py +177 -0
- yanex/cli/commands/compare.py +320 -0
- yanex/cli/commands/confirm.py +198 -0
- yanex/cli/commands/delete.py +203 -0
- yanex/cli/commands/list.py +243 -0
- yanex/cli/commands/run.py +625 -0
- yanex/cli/commands/show.py +560 -0
- yanex/cli/commands/unarchive.py +177 -0
- yanex/cli/commands/update.py +282 -0
- yanex/cli/filters/__init__.py +8 -0
- yanex/cli/filters/base.py +286 -0
- yanex/cli/filters/time_utils.py +178 -0
- yanex/cli/formatters/__init__.py +7 -0
- yanex/cli/formatters/console.py +325 -0
- yanex/cli/main.py +45 -0
- yanex/core/__init__.py +3 -0
- yanex/core/comparison.py +549 -0
- yanex/core/config.py +587 -0
- yanex/core/constants.py +16 -0
- yanex/core/environment.py +146 -0
- yanex/core/git_utils.py +153 -0
- yanex/core/manager.py +555 -0
- yanex/core/storage.py +682 -0
- yanex/ui/__init__.py +1 -0
- yanex/ui/compare_table.py +524 -0
- yanex/utils/__init__.py +3 -0
- yanex/utils/exceptions.py +70 -0
- yanex/utils/validation.py +165 -0
- yanex-0.1.0.dist-info/METADATA +251 -0
- yanex-0.1.0.dist-info/RECORD +39 -0
- yanex-0.1.0.dist-info/WHEEL +5 -0
- yanex-0.1.0.dist-info/entry_points.txt +2 -0
- yanex-0.1.0.dist-info/licenses/LICENSE +21 -0
- yanex-0.1.0.dist-info/top_level.txt +1 -0
yanex/core/comparison.py
ADDED
@@ -0,0 +1,549 @@
|
|
1
|
+
"""
|
2
|
+
Experiment comparison data extraction and processing.
|
3
|
+
|
4
|
+
This module provides functionality to extract, process, and organize experiment data
|
5
|
+
for comparison views, including parameter and metric analysis.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import json
|
9
|
+
from datetime import datetime
|
10
|
+
from typing import Any, Dict, List, Optional, Tuple
|
11
|
+
|
12
|
+
from ..utils.exceptions import StorageError
|
13
|
+
from .manager import ExperimentManager
|
14
|
+
|
15
|
+
|
16
|
+
class ExperimentComparisonData:
|
17
|
+
"""Handles data extraction and processing for experiment comparison."""
|
18
|
+
|
19
|
+
def __init__(self, manager: Optional[ExperimentManager] = None):
|
20
|
+
"""
|
21
|
+
Initialize comparison data processor.
|
22
|
+
|
23
|
+
Args:
|
24
|
+
manager: ExperimentManager instance, creates default if None
|
25
|
+
"""
|
26
|
+
self.manager = manager or ExperimentManager()
|
27
|
+
self.storage = self.manager.storage
|
28
|
+
|
29
|
+
def extract_experiment_data(
|
30
|
+
self, experiment_ids: List[str], include_archived: bool = False
|
31
|
+
) -> List[Dict[str, Any]]:
|
32
|
+
"""
|
33
|
+
Extract complete data for a list of experiments.
|
34
|
+
|
35
|
+
Args:
|
36
|
+
experiment_ids: List of experiment IDs to extract data for
|
37
|
+
include_archived: Whether to include archived experiments
|
38
|
+
|
39
|
+
Returns:
|
40
|
+
List of experiment data dictionaries
|
41
|
+
|
42
|
+
Raises:
|
43
|
+
StorageError: If experiment data cannot be loaded
|
44
|
+
"""
|
45
|
+
experiments_data = []
|
46
|
+
|
47
|
+
for exp_id in experiment_ids:
|
48
|
+
try:
|
49
|
+
exp_data = self._extract_single_experiment(exp_id, include_archived)
|
50
|
+
if exp_data:
|
51
|
+
experiments_data.append(exp_data)
|
52
|
+
except Exception as e:
|
53
|
+
# Log warning but continue with other experiments
|
54
|
+
print(f"Warning: Failed to load experiment {exp_id}: {e}")
|
55
|
+
continue
|
56
|
+
|
57
|
+
return experiments_data
|
58
|
+
|
59
|
+
def _extract_single_experiment(
|
60
|
+
self, experiment_id: str, include_archived: bool = False
|
61
|
+
) -> Optional[Dict[str, Any]]:
|
62
|
+
"""
|
63
|
+
Extract data from a single experiment.
|
64
|
+
|
65
|
+
Args:
|
66
|
+
experiment_id: Experiment ID to extract
|
67
|
+
include_archived: Whether to search archived experiments
|
68
|
+
|
69
|
+
Returns:
|
70
|
+
Experiment data dictionary or None if failed
|
71
|
+
"""
|
72
|
+
try:
|
73
|
+
# Load metadata (required)
|
74
|
+
metadata = self.storage.load_metadata(experiment_id, include_archived)
|
75
|
+
|
76
|
+
# Load config (optional)
|
77
|
+
try:
|
78
|
+
config = self.storage.load_config(experiment_id, include_archived)
|
79
|
+
except StorageError:
|
80
|
+
config = {}
|
81
|
+
|
82
|
+
# Load results (optional)
|
83
|
+
try:
|
84
|
+
results = self._load_results(experiment_id, include_archived)
|
85
|
+
except StorageError:
|
86
|
+
results = {}
|
87
|
+
|
88
|
+
# Combine all data
|
89
|
+
exp_data = {
|
90
|
+
"id": experiment_id,
|
91
|
+
"metadata": metadata,
|
92
|
+
"config": config,
|
93
|
+
"results": results,
|
94
|
+
# Extract commonly used fields for easy access
|
95
|
+
"name": metadata.get("name"),
|
96
|
+
"description": metadata.get("description"),
|
97
|
+
"status": metadata.get("status", "unknown"),
|
98
|
+
"tags": metadata.get("tags", []),
|
99
|
+
"started_at": metadata.get("started_at"),
|
100
|
+
"ended_at": metadata.get("completed_at"),
|
101
|
+
"script_path": metadata.get("script_path", ""),
|
102
|
+
"archived": metadata.get("archived", False),
|
103
|
+
}
|
104
|
+
|
105
|
+
return exp_data
|
106
|
+
|
107
|
+
except Exception as e:
|
108
|
+
raise StorageError(
|
109
|
+
f"Failed to extract experiment {experiment_id}: {e}"
|
110
|
+
) from e
|
111
|
+
|
112
|
+
def _load_results(
|
113
|
+
self, experiment_id: str, include_archived: bool = False
|
114
|
+
) -> Dict[str, Any]:
|
115
|
+
"""
|
116
|
+
Load results from experiment directory.
|
117
|
+
|
118
|
+
Args:
|
119
|
+
experiment_id: Experiment ID
|
120
|
+
include_archived: Whether to search archived experiments
|
121
|
+
|
122
|
+
Returns:
|
123
|
+
Results dictionary
|
124
|
+
"""
|
125
|
+
exp_dir = self.storage.get_experiment_directory(experiment_id, include_archived)
|
126
|
+
results_path = exp_dir / "results.json"
|
127
|
+
|
128
|
+
if not results_path.exists():
|
129
|
+
return {}
|
130
|
+
|
131
|
+
try:
|
132
|
+
with results_path.open("r", encoding="utf-8") as f:
|
133
|
+
return json.load(f)
|
134
|
+
except Exception as e:
|
135
|
+
raise StorageError(f"Failed to load results: {e}") from e
|
136
|
+
|
137
|
+
def discover_columns(
|
138
|
+
self,
|
139
|
+
experiments_data: List[Dict[str, Any]],
|
140
|
+
params: Optional[List[str]] = None,
|
141
|
+
metrics: Optional[List[str]] = None,
|
142
|
+
) -> Tuple[List[str], List[str]]:
|
143
|
+
"""
|
144
|
+
Discover available parameter and metric columns.
|
145
|
+
|
146
|
+
Args:
|
147
|
+
experiments_data: List of experiment data dictionaries
|
148
|
+
params: Specific parameters to include (None for auto-discovery)
|
149
|
+
metrics: Specific metrics to include (None for auto-discovery)
|
150
|
+
|
151
|
+
Returns:
|
152
|
+
Tuple of (parameter_columns, metric_columns)
|
153
|
+
"""
|
154
|
+
if params is not None and metrics is not None:
|
155
|
+
# Both specified - use as-is
|
156
|
+
return params, metrics
|
157
|
+
|
158
|
+
# Auto-discover columns
|
159
|
+
all_params = set()
|
160
|
+
all_metrics = set()
|
161
|
+
|
162
|
+
for exp_data in experiments_data:
|
163
|
+
# Collect parameter keys
|
164
|
+
config = exp_data.get("config", {})
|
165
|
+
all_params.update(config.keys())
|
166
|
+
|
167
|
+
# Collect metric keys
|
168
|
+
results = exp_data.get("results", {})
|
169
|
+
if isinstance(results, dict):
|
170
|
+
all_metrics.update(results.keys())
|
171
|
+
elif isinstance(results, list):
|
172
|
+
# Handle list of result dictionaries
|
173
|
+
for result_entry in results:
|
174
|
+
if isinstance(result_entry, dict):
|
175
|
+
all_metrics.update(result_entry.keys())
|
176
|
+
|
177
|
+
# Use specified or discovered columns
|
178
|
+
final_params = params if params is not None else sorted(all_params)
|
179
|
+
final_metrics = metrics if metrics is not None else sorted(all_metrics)
|
180
|
+
|
181
|
+
return final_params, final_metrics
|
182
|
+
|
183
|
+
def build_comparison_matrix(
|
184
|
+
self,
|
185
|
+
experiments_data: List[Dict[str, Any]],
|
186
|
+
param_columns: List[str],
|
187
|
+
metric_columns: List[str],
|
188
|
+
) -> List[Dict[str, Any]]:
|
189
|
+
"""
|
190
|
+
Build comparison data matrix with unified columns.
|
191
|
+
|
192
|
+
Args:
|
193
|
+
experiments_data: List of experiment data dictionaries
|
194
|
+
param_columns: Parameter column names
|
195
|
+
metric_columns: Metric column names
|
196
|
+
|
197
|
+
Returns:
|
198
|
+
List of row dictionaries for table display
|
199
|
+
"""
|
200
|
+
comparison_rows = []
|
201
|
+
|
202
|
+
for exp_data in experiments_data:
|
203
|
+
row = self._build_experiment_row(exp_data, param_columns, metric_columns)
|
204
|
+
comparison_rows.append(row)
|
205
|
+
|
206
|
+
return comparison_rows
|
207
|
+
|
208
|
+
def _build_experiment_row(
|
209
|
+
self,
|
210
|
+
exp_data: Dict[str, Any],
|
211
|
+
param_columns: List[str],
|
212
|
+
metric_columns: List[str],
|
213
|
+
) -> Dict[str, Any]:
|
214
|
+
"""
|
215
|
+
Build a single experiment row for the comparison table.
|
216
|
+
|
217
|
+
Args:
|
218
|
+
exp_data: Experiment data dictionary
|
219
|
+
param_columns: Parameter column names
|
220
|
+
metric_columns: Metric column names
|
221
|
+
|
222
|
+
Returns:
|
223
|
+
Row dictionary with all columns
|
224
|
+
"""
|
225
|
+
config = exp_data.get("config", {})
|
226
|
+
results = exp_data.get("results", {})
|
227
|
+
|
228
|
+
# Fixed columns
|
229
|
+
row = {
|
230
|
+
"id": exp_data["id"],
|
231
|
+
"name": exp_data.get("name") or "[unnamed]",
|
232
|
+
"started": self._format_datetime(exp_data.get("started_at")),
|
233
|
+
"duration": self._calculate_duration(
|
234
|
+
exp_data.get("started_at"),
|
235
|
+
exp_data.get("ended_at"),
|
236
|
+
exp_data.get("metadata", {}),
|
237
|
+
),
|
238
|
+
"status": exp_data["status"],
|
239
|
+
"tags": self._format_tags(exp_data.get("tags", [])),
|
240
|
+
}
|
241
|
+
|
242
|
+
# Parameter columns
|
243
|
+
for param in param_columns:
|
244
|
+
value = config.get(param)
|
245
|
+
row[f"param:{param}"] = self._format_value(value)
|
246
|
+
|
247
|
+
# Metric columns
|
248
|
+
for metric in metric_columns:
|
249
|
+
value = self._extract_metric_value(results, metric)
|
250
|
+
row[f"metric:{metric}"] = self._format_value(value)
|
251
|
+
|
252
|
+
return row
|
253
|
+
|
254
|
+
def _extract_metric_value(self, results: Any, metric_name: str) -> Any:
|
255
|
+
"""Extract a metric value from results (dict or list)."""
|
256
|
+
if isinstance(results, dict):
|
257
|
+
return results.get(metric_name)
|
258
|
+
elif isinstance(results, list):
|
259
|
+
# For list of results, try to find the latest/last value
|
260
|
+
# or aggregate if appropriate
|
261
|
+
for result_entry in reversed(results): # Start from most recent
|
262
|
+
if isinstance(result_entry, dict) and metric_name in result_entry:
|
263
|
+
return result_entry[metric_name]
|
264
|
+
return None
|
265
|
+
|
266
|
+
def _format_datetime(self, dt_str: Optional[str]) -> str:
|
267
|
+
"""Format datetime string for display."""
|
268
|
+
if not dt_str:
|
269
|
+
return "-"
|
270
|
+
|
271
|
+
try:
|
272
|
+
dt = datetime.fromisoformat(dt_str.replace("Z", "+00:00"))
|
273
|
+
return dt.strftime("%Y-%m-%d %H:%M:%S")
|
274
|
+
except (ValueError, AttributeError):
|
275
|
+
return str(dt_str) if dt_str else "-"
|
276
|
+
|
277
|
+
def _format_tags(self, tags: list) -> str:
|
278
|
+
"""Format tags list for display."""
|
279
|
+
if not tags:
|
280
|
+
return "-"
|
281
|
+
return ", ".join(tags)
|
282
|
+
|
283
|
+
def _calculate_duration(
|
284
|
+
self, start_str: Optional[str], end_str: Optional[str], metadata: dict = None
|
285
|
+
) -> str:
|
286
|
+
"""Calculate and format duration."""
|
287
|
+
# Try to use duration from metadata first
|
288
|
+
if metadata and "duration" in metadata:
|
289
|
+
try:
|
290
|
+
duration_seconds = float(metadata["duration"])
|
291
|
+
hours = int(duration_seconds // 3600)
|
292
|
+
minutes = int((duration_seconds % 3600) // 60)
|
293
|
+
seconds = int(duration_seconds % 60)
|
294
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:02d}"
|
295
|
+
except (ValueError, TypeError):
|
296
|
+
pass
|
297
|
+
|
298
|
+
# Fall back to calculating from start/end times
|
299
|
+
if not start_str:
|
300
|
+
return "-"
|
301
|
+
|
302
|
+
if not end_str:
|
303
|
+
return "[running]"
|
304
|
+
|
305
|
+
try:
|
306
|
+
start_dt = datetime.fromisoformat(start_str.replace("Z", "+00:00"))
|
307
|
+
end_dt = datetime.fromisoformat(end_str.replace("Z", "+00:00"))
|
308
|
+
duration = end_dt - start_dt
|
309
|
+
|
310
|
+
# Format as HH:MM:SS
|
311
|
+
total_seconds = int(duration.total_seconds())
|
312
|
+
hours = total_seconds // 3600
|
313
|
+
minutes = (total_seconds % 3600) // 60
|
314
|
+
seconds = total_seconds % 60
|
315
|
+
|
316
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:02d}"
|
317
|
+
|
318
|
+
except (ValueError, AttributeError):
|
319
|
+
return "-"
|
320
|
+
|
321
|
+
def _format_value(self, value: Any) -> str:
|
322
|
+
"""Format a value for table display."""
|
323
|
+
if value is None:
|
324
|
+
return "-"
|
325
|
+
|
326
|
+
# Handle different types appropriately
|
327
|
+
if isinstance(value, bool):
|
328
|
+
return str(value).lower()
|
329
|
+
elif isinstance(value, (int, float)):
|
330
|
+
if isinstance(value, float):
|
331
|
+
# Format floats with reasonable precision
|
332
|
+
if abs(value) >= 10000:
|
333
|
+
return f"{value:.2e}"
|
334
|
+
elif abs(value) >= 1:
|
335
|
+
return f"{value:.4f}".rstrip("0").rstrip(".")
|
336
|
+
else:
|
337
|
+
return f"{value:.6f}".rstrip("0").rstrip(".")
|
338
|
+
return str(value)
|
339
|
+
elif isinstance(value, (list, tuple)):
|
340
|
+
# Format lists/tuples as comma-separated strings
|
341
|
+
return ", ".join(str(item) for item in value)
|
342
|
+
elif isinstance(value, dict):
|
343
|
+
# Format dicts as key=value pairs
|
344
|
+
return ", ".join(f"{k}={v}" for k, v in value.items())
|
345
|
+
else:
|
346
|
+
return str(value)
|
347
|
+
|
348
|
+
def filter_different_columns(
|
349
|
+
self,
|
350
|
+
comparison_rows: List[Dict[str, Any]],
|
351
|
+
param_columns: List[str],
|
352
|
+
metric_columns: List[str],
|
353
|
+
) -> Tuple[List[str], List[str]]:
|
354
|
+
"""
|
355
|
+
Filter out columns where all values are identical.
|
356
|
+
|
357
|
+
Args:
|
358
|
+
comparison_rows: Comparison matrix rows
|
359
|
+
param_columns: Parameter column names
|
360
|
+
metric_columns: Metric column names
|
361
|
+
|
362
|
+
Returns:
|
363
|
+
Tuple of (filtered_param_columns, filtered_metric_columns)
|
364
|
+
"""
|
365
|
+
if not comparison_rows:
|
366
|
+
return param_columns, metric_columns
|
367
|
+
|
368
|
+
def has_different_values(column_key: str) -> bool:
|
369
|
+
"""Check if a column has different values across experiments."""
|
370
|
+
values = set()
|
371
|
+
|
372
|
+
for row in comparison_rows:
|
373
|
+
value = row.get(column_key, "-")
|
374
|
+
# Skip missing values for difference analysis
|
375
|
+
if value != "-":
|
376
|
+
values.add(value)
|
377
|
+
|
378
|
+
# Column is "different" if it has more than one unique non-missing value
|
379
|
+
return len(values) > 1
|
380
|
+
|
381
|
+
# Filter parameter columns
|
382
|
+
filtered_params = []
|
383
|
+
for param in param_columns:
|
384
|
+
column_key = f"param:{param}"
|
385
|
+
if has_different_values(column_key):
|
386
|
+
filtered_params.append(param)
|
387
|
+
|
388
|
+
# Filter metric columns
|
389
|
+
filtered_metrics = []
|
390
|
+
for metric in metric_columns:
|
391
|
+
column_key = f"metric:{metric}"
|
392
|
+
if has_different_values(column_key):
|
393
|
+
filtered_metrics.append(metric)
|
394
|
+
|
395
|
+
return filtered_params, filtered_metrics
|
396
|
+
|
397
|
+
def infer_column_types(
|
398
|
+
self,
|
399
|
+
comparison_rows: List[Dict[str, Any]],
|
400
|
+
param_columns: List[str],
|
401
|
+
metric_columns: List[str],
|
402
|
+
) -> Dict[str, str]:
|
403
|
+
"""
|
404
|
+
Infer data types for columns to enable proper sorting.
|
405
|
+
|
406
|
+
Args:
|
407
|
+
comparison_rows: Comparison matrix rows
|
408
|
+
param_columns: Parameter column names
|
409
|
+
metric_columns: Metric column names
|
410
|
+
|
411
|
+
Returns:
|
412
|
+
Dictionary mapping column keys to data types ('numeric', 'datetime', 'string')
|
413
|
+
"""
|
414
|
+
column_types = {}
|
415
|
+
|
416
|
+
# Fixed columns - we know their types
|
417
|
+
column_types.update(
|
418
|
+
{
|
419
|
+
"id": "string",
|
420
|
+
"name": "string",
|
421
|
+
"started": "datetime",
|
422
|
+
"duration": "string", # Duration format
|
423
|
+
"status": "string",
|
424
|
+
"tags": "string",
|
425
|
+
}
|
426
|
+
)
|
427
|
+
|
428
|
+
# Infer types for parameter and metric columns
|
429
|
+
all_columns = [f"param:{param}" for param in param_columns] + [
|
430
|
+
f"metric:{metric}" for metric in metric_columns
|
431
|
+
]
|
432
|
+
|
433
|
+
for column_key in all_columns:
|
434
|
+
column_type = self._infer_single_column_type(comparison_rows, column_key)
|
435
|
+
column_types[column_key] = column_type
|
436
|
+
|
437
|
+
return column_types
|
438
|
+
|
439
|
+
def _infer_single_column_type(
|
440
|
+
self, comparison_rows: List[Dict[str, Any]], column_key: str
|
441
|
+
) -> str:
|
442
|
+
"""Infer the data type of a single column."""
|
443
|
+
# Collect non-missing values
|
444
|
+
values = []
|
445
|
+
for row in comparison_rows:
|
446
|
+
value = row.get(column_key, "-")
|
447
|
+
if value != "-":
|
448
|
+
values.append(value)
|
449
|
+
|
450
|
+
if not values:
|
451
|
+
return "string"
|
452
|
+
|
453
|
+
# Try to infer type from values
|
454
|
+
numeric_count = 0
|
455
|
+
datetime_count = 0
|
456
|
+
|
457
|
+
for value in values:
|
458
|
+
# Check if numeric
|
459
|
+
try:
|
460
|
+
float(value)
|
461
|
+
numeric_count += 1
|
462
|
+
continue
|
463
|
+
except (ValueError, TypeError):
|
464
|
+
pass
|
465
|
+
|
466
|
+
# Check if datetime
|
467
|
+
try:
|
468
|
+
datetime.fromisoformat(str(value).replace("Z", "+00:00"))
|
469
|
+
datetime_count += 1
|
470
|
+
continue
|
471
|
+
except (ValueError, TypeError):
|
472
|
+
pass
|
473
|
+
|
474
|
+
# Determine type based on majority
|
475
|
+
total_values = len(values)
|
476
|
+
if numeric_count > total_values * 0.8:
|
477
|
+
return "numeric"
|
478
|
+
elif datetime_count > total_values * 0.8:
|
479
|
+
return "datetime"
|
480
|
+
else:
|
481
|
+
return "string"
|
482
|
+
|
483
|
+
def get_comparison_data(
|
484
|
+
self,
|
485
|
+
experiment_ids: List[str],
|
486
|
+
params: Optional[List[str]] = None,
|
487
|
+
metrics: Optional[List[str]] = None,
|
488
|
+
only_different: bool = False,
|
489
|
+
include_archived: bool = False,
|
490
|
+
) -> Dict[str, Any]:
|
491
|
+
"""
|
492
|
+
Get complete comparison data for experiments.
|
493
|
+
|
494
|
+
Args:
|
495
|
+
experiment_ids: List of experiment IDs
|
496
|
+
params: Specific parameters to include (None for auto-discovery)
|
497
|
+
metrics: Specific metrics to include (None for auto-discovery)
|
498
|
+
only_different: Whether to show only columns with different values
|
499
|
+
include_archived: Whether to include archived experiments
|
500
|
+
|
501
|
+
Returns:
|
502
|
+
Dictionary containing comparison data and metadata
|
503
|
+
"""
|
504
|
+
# Extract experiment data
|
505
|
+
experiments_data = self.extract_experiment_data(
|
506
|
+
experiment_ids, include_archived
|
507
|
+
)
|
508
|
+
|
509
|
+
if not experiments_data:
|
510
|
+
return {
|
511
|
+
"rows": [],
|
512
|
+
"param_columns": [],
|
513
|
+
"metric_columns": [],
|
514
|
+
"column_types": {},
|
515
|
+
"total_experiments": 0,
|
516
|
+
}
|
517
|
+
|
518
|
+
# Discover columns
|
519
|
+
param_columns, metric_columns = self.discover_columns(
|
520
|
+
experiments_data, params, metrics
|
521
|
+
)
|
522
|
+
|
523
|
+
# Build comparison matrix
|
524
|
+
comparison_rows = self.build_comparison_matrix(
|
525
|
+
experiments_data, param_columns, metric_columns
|
526
|
+
)
|
527
|
+
|
528
|
+
# Filter for different columns if requested
|
529
|
+
if only_different:
|
530
|
+
param_columns, metric_columns = self.filter_different_columns(
|
531
|
+
comparison_rows, param_columns, metric_columns
|
532
|
+
)
|
533
|
+
# Rebuild matrix with filtered columns
|
534
|
+
comparison_rows = self.build_comparison_matrix(
|
535
|
+
experiments_data, param_columns, metric_columns
|
536
|
+
)
|
537
|
+
|
538
|
+
# Infer column types
|
539
|
+
column_types = self.infer_column_types(
|
540
|
+
comparison_rows, param_columns, metric_columns
|
541
|
+
)
|
542
|
+
|
543
|
+
return {
|
544
|
+
"rows": comparison_rows,
|
545
|
+
"param_columns": param_columns,
|
546
|
+
"metric_columns": metric_columns,
|
547
|
+
"column_types": column_types,
|
548
|
+
"total_experiments": len(experiments_data),
|
549
|
+
}
|