claude-turing 3.1.0 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,364 @@
1
+ #!/usr/bin/env python3
2
+ """Probability calibration for the autoresearch pipeline.
3
+
4
+ Measures whether model probabilities are well-calibrated, computes ECE/MCE,
5
+ generates reliability diagrams, and applies post-hoc calibration (Platt
6
+ scaling, isotonic regression, temperature scaling).
7
+
8
+ Usage:
9
+ python scripts/calibration.py exp-042
10
+ python scripts/calibration.py exp-042 --method platt
11
+ python scripts/calibration.py exp-042 --method auto
12
+ python scripts/calibration.py --json
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ import argparse
18
+ import json
19
+ import sys
20
+ from datetime import datetime, timezone
21
+ from pathlib import Path
22
+
23
+ import numpy as np
24
+ import yaml
25
+
26
+ from scripts.turing_io import load_config, load_experiments
27
+
28
+ DEFAULT_LOG_PATH = "experiments/log.jsonl"
29
+ DEFAULT_N_BINS = 10
30
+ CALIBRATION_METHODS = ["platt", "isotonic", "temperature"]
31
+
32
+
33
+ # --- Calibration Metrics ---
34
+
35
+
36
+ def compute_ece(
37
+ probabilities: np.ndarray,
38
+ labels: np.ndarray,
39
+ n_bins: int = DEFAULT_N_BINS,
40
+ ) -> float:
41
+ """Compute Expected Calibration Error.
42
+
43
+ ECE = sum(|bin_accuracy - bin_confidence| * bin_size / total)
44
+ """
45
+ if len(probabilities) == 0:
46
+ return 0.0
47
+
48
+ bin_boundaries = np.linspace(0, 1, n_bins + 1)
49
+ ece = 0.0
50
+
51
+ for i in range(n_bins):
52
+ mask = (probabilities >= bin_boundaries[i]) & (probabilities < bin_boundaries[i + 1])
53
+ if i == n_bins - 1:
54
+ mask = (probabilities >= bin_boundaries[i]) & (probabilities <= bin_boundaries[i + 1])
55
+
56
+ bin_size = np.sum(mask)
57
+ if bin_size == 0:
58
+ continue
59
+
60
+ bin_accuracy = np.mean(labels[mask])
61
+ bin_confidence = np.mean(probabilities[mask])
62
+ ece += abs(bin_accuracy - bin_confidence) * bin_size / len(probabilities)
63
+
64
+ return round(float(ece), 6)
65
+
66
+
67
+ def compute_mce(
68
+ probabilities: np.ndarray,
69
+ labels: np.ndarray,
70
+ n_bins: int = DEFAULT_N_BINS,
71
+ ) -> float:
72
+ """Compute Maximum Calibration Error."""
73
+ if len(probabilities) == 0:
74
+ return 0.0
75
+
76
+ bin_boundaries = np.linspace(0, 1, n_bins + 1)
77
+ max_gap = 0.0
78
+
79
+ for i in range(n_bins):
80
+ mask = (probabilities >= bin_boundaries[i]) & (probabilities < bin_boundaries[i + 1])
81
+ if i == n_bins - 1:
82
+ mask = (probabilities >= bin_boundaries[i]) & (probabilities <= bin_boundaries[i + 1])
83
+
84
+ if np.sum(mask) == 0:
85
+ continue
86
+
87
+ bin_accuracy = np.mean(labels[mask])
88
+ bin_confidence = np.mean(probabilities[mask])
89
+ max_gap = max(max_gap, abs(bin_accuracy - bin_confidence))
90
+
91
+ return round(float(max_gap), 6)
92
+
93
+
94
+ def compute_reliability_diagram(
95
+ probabilities: np.ndarray,
96
+ labels: np.ndarray,
97
+ n_bins: int = DEFAULT_N_BINS,
98
+ ) -> list[dict]:
99
+ """Compute reliability diagram data."""
100
+ if len(probabilities) == 0:
101
+ return []
102
+
103
+ bin_boundaries = np.linspace(0, 1, n_bins + 1)
104
+ bins = []
105
+
106
+ for i in range(n_bins):
107
+ lo = bin_boundaries[i]
108
+ hi = bin_boundaries[i + 1]
109
+ mask = (probabilities >= lo) & (probabilities < hi)
110
+ if i == n_bins - 1:
111
+ mask = (probabilities >= lo) & (probabilities <= hi)
112
+
113
+ bin_size = int(np.sum(mask))
114
+ if bin_size == 0:
115
+ bins.append({"bin": f"[{lo:.1f}-{hi:.1f}]", "predicted": None,
116
+ "actual": None, "gap": None, "n": 0})
117
+ continue
118
+
119
+ predicted = float(np.mean(probabilities[mask]))
120
+ actual = float(np.mean(labels[mask]))
121
+ gap = actual - predicted
122
+
123
+ bins.append({
124
+ "bin": f"[{lo:.1f}-{hi:.1f}]",
125
+ "predicted": round(predicted, 4),
126
+ "actual": round(actual, 4),
127
+ "gap": round(gap, 4),
128
+ "n": bin_size,
129
+ })
130
+
131
+ return bins
132
+
133
+
134
+ # --- Calibration Methods ---
135
+
136
+
137
+ def platt_scaling(
138
+ logits: np.ndarray,
139
+ labels: np.ndarray,
140
+ ) -> dict:
141
+ """Apply Platt scaling (logistic regression on logits)."""
142
+ from scipy.special import expit
143
+
144
+ # Fit logistic regression: P(y=1|f) = sigmoid(a*f + b)
145
+ # Simple gradient descent for a, b
146
+ a, b = 1.0, 0.0
147
+ lr = 0.01
148
+ for _ in range(1000):
149
+ pred = expit(a * logits + b)
150
+ pred = np.clip(pred, 1e-7, 1 - 1e-7)
151
+ grad_a = np.mean((pred - labels) * logits)
152
+ grad_b = np.mean(pred - labels)
153
+ a -= lr * grad_a
154
+ b -= lr * grad_b
155
+
156
+ calibrated = expit(a * logits + b)
157
+ return {"method": "platt", "params": {"a": round(float(a), 6), "b": round(float(b), 6)},
158
+ "calibrated_probabilities": calibrated}
159
+
160
+
161
+ def isotonic_calibration(
162
+ probabilities: np.ndarray,
163
+ labels: np.ndarray,
164
+ ) -> dict:
165
+ """Apply isotonic regression calibration."""
166
+ from sklearn.isotonic import IsotonicRegression
167
+
168
+ iso = IsotonicRegression(out_of_bounds="clip")
169
+ calibrated = iso.fit_transform(probabilities, labels)
170
+ return {"method": "isotonic", "params": {},
171
+ "calibrated_probabilities": np.clip(calibrated, 0, 1)}
172
+
173
+
174
+ def temperature_scaling(
175
+ logits: np.ndarray,
176
+ labels: np.ndarray,
177
+ ) -> dict:
178
+ """Apply temperature scaling (single parameter T)."""
179
+ from scipy.special import expit
180
+
181
+ best_t = 1.0
182
+ best_ece = float("inf")
183
+
184
+ for t in np.arange(0.5, 5.0, 0.1):
185
+ scaled = expit(logits / t)
186
+ ece = compute_ece(scaled, labels)
187
+ if ece < best_ece:
188
+ best_ece = ece
189
+ best_t = t
190
+
191
+ calibrated = expit(logits / best_t)
192
+ return {"method": "temperature", "params": {"T": round(float(best_t), 2)},
193
+ "calibrated_probabilities": calibrated}
194
+
195
+
196
+ # --- Full Pipeline ---
197
+
198
+
199
+ def calibrate_model(
200
+ probabilities: np.ndarray | None = None,
201
+ logits: np.ndarray | None = None,
202
+ labels: np.ndarray | None = None,
203
+ method: str = "auto",
204
+ exp_id: str | None = None,
205
+ config_path: str = "config.yaml",
206
+ ) -> dict:
207
+ """Run calibration analysis and optionally apply post-hoc calibration."""
208
+ if (probabilities is None and logits is None) or labels is None:
209
+ return {"error": "Provide probabilities (or logits) and labels for calibration"}
210
+
211
+ if probabilities is None and logits is not None:
212
+ from scipy.special import expit
213
+ probabilities = expit(logits)
214
+
215
+ # Before calibration
216
+ ece_before = compute_ece(probabilities, labels)
217
+ mce_before = compute_mce(probabilities, labels)
218
+ reliability = compute_reliability_diagram(probabilities, labels)
219
+
220
+ # Determine overconfidence
221
+ overconfident_bins = [b for b in reliability if b.get("gap") is not None and b["gap"] < -0.05 and b["n"] > 0]
222
+
223
+ report = {
224
+ "generated_at": datetime.now(timezone.utc).isoformat(),
225
+ "experiment_id": exp_id,
226
+ "before": {"ece": ece_before, "mce": mce_before},
227
+ "reliability_diagram": reliability,
228
+ "overconfident_bins": len(overconfident_bins),
229
+ }
230
+
231
+ # Apply calibration
232
+ methods_to_try = CALIBRATION_METHODS if method == "auto" else [method]
233
+ results = []
234
+
235
+ for m in methods_to_try:
236
+ try:
237
+ if m == "platt" and logits is not None:
238
+ cal = platt_scaling(logits, labels)
239
+ elif m == "isotonic":
240
+ cal = isotonic_calibration(probabilities, labels)
241
+ elif m == "temperature" and logits is not None:
242
+ cal = temperature_scaling(logits, labels)
243
+ else:
244
+ continue
245
+
246
+ ece_after = compute_ece(cal["calibrated_probabilities"], labels)
247
+ results.append({
248
+ "method": m,
249
+ "ece_after": ece_after,
250
+ "improvement": round(ece_before - ece_after, 6),
251
+ "params": cal.get("params", {}),
252
+ })
253
+ except Exception:
254
+ continue
255
+
256
+ # Find best method
257
+ best = None
258
+ if results:
259
+ best = min(results, key=lambda r: r["ece_after"])
260
+
261
+ report["calibration_results"] = results
262
+ report["best_method"] = best
263
+
264
+ # Verdict
265
+ if ece_before < 0.02:
266
+ report["verdict"] = "already_calibrated"
267
+ report["reason"] = f"ECE {ece_before:.4f} is already low — calibration not needed"
268
+ elif best and best["improvement"] > 0.01:
269
+ report["verdict"] = "improved"
270
+ report["reason"] = f"{best['method']} reduces ECE from {ece_before:.4f} to {best['ece_after']:.4f}"
271
+ elif best:
272
+ report["verdict"] = "marginal_improvement"
273
+ report["reason"] = f"Best method ({best['method']}) improves ECE by only {best['improvement']:.4f}"
274
+ else:
275
+ report["verdict"] = "no_improvement"
276
+ report["reason"] = "No calibration method improved ECE"
277
+
278
+ return report
279
+
280
+
281
+ # --- Report Formatting ---
282
+
283
+
284
+ def save_calibration_report(report: dict, output_dir: str = "experiments/calibration") -> Path:
285
+ out_path = Path(output_dir)
286
+ out_path.mkdir(parents=True, exist_ok=True)
287
+ exp_id = report.get("experiment_id", "unknown")
288
+ filepath = out_path / f"{exp_id}-calibration.yaml"
289
+ clean = json.loads(json.dumps(report, default=str))
290
+ with open(filepath, "w") as f:
291
+ yaml.dump(clean, f, default_flow_style=False, sort_keys=False)
292
+ return filepath
293
+
294
+
295
+ def format_calibration_report(report: dict) -> str:
296
+ if "error" in report:
297
+ return f"ERROR: {report['error']}"
298
+
299
+ exp_id = report.get("experiment_id", "?")
300
+ before = report.get("before", {})
301
+
302
+ lines = [f"# Calibration: {exp_id}", "",
303
+ f"*Generated {report.get('generated_at', 'N/A')[:19]}*", "",
304
+ f"**ECE before:** {before.get('ece', '?')}",
305
+ f"**MCE before:** {before.get('mce', '?')}", ""]
306
+
307
+ # Reliability diagram
308
+ diagram = report.get("reliability_diagram", [])
309
+ if diagram:
310
+ lines.extend(["## Reliability Diagram", "",
311
+ "| Bin | Predicted | Actual | Gap |",
312
+ "|-----|-----------|--------|-----|"])
313
+ for b in diagram:
314
+ if b["predicted"] is not None:
315
+ gap_marker = " overconfident" if b["gap"] is not None and b["gap"] < -0.05 else ""
316
+ lines.append(f"| {b['bin']} | {b['predicted']:.4f} | {b['actual']:.4f} | {b['gap']:+.4f}{gap_marker} |")
317
+ lines.append("")
318
+
319
+ # Calibration results
320
+ results = report.get("calibration_results", [])
321
+ if results:
322
+ lines.extend(["## Calibration Methods", "",
323
+ "| Method | ECE After | Improvement |",
324
+ "|--------|-----------|-------------|"])
325
+ best = report.get("best_method", {})
326
+ for r in results:
327
+ marker = " BEST" if r["method"] == best.get("method") else ""
328
+ lines.append(f"| {r['method']} | {r['ece_after']:.4f} | {r['improvement']:+.4f} |{marker}")
329
+ lines.append("")
330
+
331
+ # Verdict
332
+ verdict = report.get("verdict", "?")
333
+ labels = {"already_calibrated": "ALREADY CALIBRATED", "improved": "IMPROVED",
334
+ "marginal_improvement": "MARGINAL IMPROVEMENT", "no_improvement": "NO IMPROVEMENT"}
335
+ lines.extend(["## Verdict", "", f"**{labels.get(verdict, verdict.upper())}**", "",
336
+ report.get("reason", "")])
337
+
338
+ return "\n".join(lines)
339
+
340
+
341
+ def main() -> None:
342
+ parser = argparse.ArgumentParser(description="Probability calibration")
343
+ parser.add_argument("exp_id", nargs="?", help="Experiment ID")
344
+ parser.add_argument("--method", choices=CALIBRATION_METHODS + ["auto"], default="auto")
345
+ parser.add_argument("--config", default="config.yaml")
346
+ parser.add_argument("--log", default=DEFAULT_LOG_PATH)
347
+ parser.add_argument("--json", action="store_true")
348
+ args = parser.parse_args()
349
+
350
+ # Without data, show usage
351
+ report = calibrate_model(exp_id=args.exp_id, method=args.method, config_path=args.config)
352
+
353
+ if "error" not in report:
354
+ filepath = save_calibration_report(report)
355
+ print(f"Saved to {filepath}", file=sys.stderr)
356
+
357
+ if args.json:
358
+ print(json.dumps(report, indent=2, default=str))
359
+ else:
360
+ print(format_calibration_report(report))
361
+
362
+
363
+ if __name__ == "__main__":
364
+ main()
@@ -0,0 +1,337 @@
1
+ #!/usr/bin/env python3
2
+ """Training curriculum optimization for the autoresearch pipeline.
3
+
4
+ Orders training data by difficulty and measures whether curriculum
5
+ learning improves convergence speed or final performance. Tests
6
+ easy-to-hard, hard-to-easy, self-paced, and random strategies.
7
+
8
+ Usage:
9
+ python scripts/curriculum_optimizer.py exp-042
10
+ python scripts/curriculum_optimizer.py --strategies easy-to-hard,random
11
+ python scripts/curriculum_optimizer.py --json
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import argparse
17
+ import json
18
+ import sys
19
+ from datetime import datetime, timezone
20
+ from pathlib import Path
21
+
22
+ import numpy as np
23
+ import yaml
24
+
25
+ from scripts.turing_io import load_config
26
+
27
+ DEFAULT_STRATEGIES = ["random", "easy_to_hard", "hard_to_easy", "self_paced"]
28
+ IMPOSSIBLE_THRESHOLD = 0.9 # Samples with difficulty > this across all strategies
29
+
30
+
31
+ # --- Difficulty Scoring ---
32
+
33
+
34
+ def score_difficulty_by_loss(
35
+ losses: np.ndarray,
36
+ ) -> np.ndarray:
37
+ """Score sample difficulty by loss value (higher loss = harder).
38
+
39
+ Normalizes to [0, 1].
40
+ """
41
+ if len(losses) == 0:
42
+ return np.array([])
43
+
44
+ min_loss = np.min(losses)
45
+ max_loss = np.max(losses)
46
+ if max_loss == min_loss:
47
+ return np.zeros(len(losses))
48
+
49
+ return (losses - min_loss) / (max_loss - min_loss)
50
+
51
+
52
+ def score_difficulty_by_margin(
53
+ margins: np.ndarray,
54
+ ) -> np.ndarray:
55
+ """Score sample difficulty by margin (smaller margin = harder).
56
+
57
+ Margins = distance from decision boundary. Normalizes to [0, 1].
58
+ """
59
+ if len(margins) == 0:
60
+ return np.array([])
61
+
62
+ min_m = np.min(margins)
63
+ max_m = np.max(margins)
64
+ if max_m == min_m:
65
+ return np.full(len(margins), 0.5)
66
+
67
+ # Invert: small margin = high difficulty
68
+ return 1.0 - (margins - min_m) / (max_m - min_m)
69
+
70
+
71
+ def score_difficulty_by_disagreement(
72
+ multi_seed_predictions: list[np.ndarray],
73
+ labels: np.ndarray,
74
+ ) -> np.ndarray:
75
+ """Score difficulty by prediction disagreement across seeds.
76
+
77
+ Samples where different seeds disagree are "hard" (and possibly mislabeled).
78
+ """
79
+ if not multi_seed_predictions or len(labels) == 0:
80
+ return np.array([])
81
+
82
+ n_samples = len(labels)
83
+ n_seeds = len(multi_seed_predictions)
84
+
85
+ agreement = np.zeros(n_samples)
86
+ for preds in multi_seed_predictions:
87
+ if len(preds) == n_samples:
88
+ agreement += (preds == labels).astype(float)
89
+
90
+ agreement /= n_seeds # Fraction of seeds that got it right
91
+
92
+ # Disagreement = difficulty
93
+ return 1.0 - agreement
94
+
95
+
96
+ # --- Curriculum Strategies ---
97
+
98
+
99
+ def apply_curriculum(
100
+ indices: np.ndarray,
101
+ difficulties: np.ndarray,
102
+ strategy: str,
103
+ ) -> np.ndarray:
104
+ """Reorder sample indices according to curriculum strategy.
105
+
106
+ Args:
107
+ indices: Original sample indices.
108
+ difficulties: Difficulty scores [0, 1] per sample.
109
+ strategy: Curriculum strategy name.
110
+
111
+ Returns:
112
+ Reordered indices.
113
+ """
114
+ if len(indices) == 0:
115
+ return indices
116
+
117
+ if strategy == "random":
118
+ np.random.shuffle(indices)
119
+ return indices
120
+
121
+ elif strategy == "easy_to_hard":
122
+ order = np.argsort(difficulties)
123
+ return indices[order]
124
+
125
+ elif strategy == "hard_to_easy":
126
+ order = np.argsort(difficulties)[::-1]
127
+ return indices[order]
128
+
129
+ elif strategy == "self_paced":
130
+ # Start with easiest 20%, then gradually include harder
131
+ order = np.argsort(difficulties)
132
+ n = len(order)
133
+ # Shuffle within difficulty bands
134
+ bands = [order[:n // 5], order[n // 5:2 * n // 5],
135
+ order[2 * n // 5:3 * n // 5], order[3 * n // 5:4 * n // 5],
136
+ order[4 * n // 5:]]
137
+ result = []
138
+ for band in bands:
139
+ np.random.shuffle(band)
140
+ result.extend(band)
141
+ return np.array(result)
142
+
143
+ return indices
144
+
145
+
146
+ def detect_impossible_samples(
147
+ difficulties: np.ndarray,
148
+ threshold: float = IMPOSSIBLE_THRESHOLD,
149
+ ) -> list[int]:
150
+ """Find samples that are consistently difficult (likely mislabeled).
151
+
152
+ Returns list of sample indices.
153
+ """
154
+ return [int(i) for i in range(len(difficulties)) if difficulties[i] > threshold]
155
+
156
+
157
+ # --- Strategy Comparison ---
158
+
159
+
160
+ def compare_strategies(
161
+ strategy_results: dict[str, dict],
162
+ primary_metric: str = "accuracy",
163
+ ) -> dict:
164
+ """Compare curriculum strategy results.
165
+
166
+ Args:
167
+ strategy_results: {strategy_name: {metric_value, convergence_epoch, ...}}
168
+
169
+ Returns:
170
+ Comparison report with best strategy and verdict.
171
+ """
172
+ if not strategy_results:
173
+ return {"best_strategy": None, "verdict": "no_data"}
174
+
175
+ # Find baseline (random)
176
+ baseline = strategy_results.get("random", {})
177
+ baseline_metric = baseline.get("metric_value", 0)
178
+ baseline_epochs = baseline.get("convergence_epoch")
179
+
180
+ results = []
181
+ for name, data in strategy_results.items():
182
+ metric = data.get("metric_value", 0)
183
+ epochs = data.get("convergence_epoch")
184
+ speedup = None
185
+ if epochs and baseline_epochs and baseline_epochs > 0:
186
+ speedup = round(1 - epochs / baseline_epochs, 4)
187
+
188
+ results.append({
189
+ "strategy": name,
190
+ "metric_value": round(metric, 6) if metric else None,
191
+ "convergence_epoch": epochs,
192
+ "delta_vs_random": round(metric - baseline_metric, 6) if metric and baseline_metric else None,
193
+ "speedup": speedup,
194
+ })
195
+
196
+ # Find best by metric
197
+ with_metric = [r for r in results if r["metric_value"] is not None]
198
+ best = max(with_metric, key=lambda r: r["metric_value"]) if with_metric else None
199
+
200
+ verdict = "no_improvement"
201
+ if best and best.get("delta_vs_random") and best["delta_vs_random"] > 0.005:
202
+ verdict = "curriculum_helps"
203
+ elif best and best.get("speedup") and best["speedup"] > 0.1:
204
+ verdict = "faster_convergence"
205
+
206
+ return {
207
+ "results": results,
208
+ "best_strategy": best.get("strategy") if best else None,
209
+ "verdict": verdict,
210
+ }
211
+
212
+
213
+ # --- Full Pipeline ---
214
+
215
+
216
+ def curriculum_analysis(
217
+ difficulties: np.ndarray | None = None,
218
+ strategy_results: dict[str, dict] | None = None,
219
+ exp_id: str | None = None,
220
+ config_path: str = "config.yaml",
221
+ ) -> dict:
222
+ """Run curriculum analysis."""
223
+ config = load_config(config_path)
224
+ primary_metric = config.get("evaluation", {}).get("primary_metric", "accuracy")
225
+
226
+ report = {
227
+ "generated_at": datetime.now(timezone.utc).isoformat(),
228
+ "experiment_id": exp_id,
229
+ "primary_metric": primary_metric,
230
+ }
231
+
232
+ if difficulties is not None:
233
+ impossible = detect_impossible_samples(difficulties)
234
+ report["difficulty_stats"] = {
235
+ "n_samples": len(difficulties),
236
+ "mean_difficulty": round(float(np.mean(difficulties)), 4),
237
+ "n_impossible": len(impossible),
238
+ "impossible_indices": impossible[:20],
239
+ }
240
+
241
+ if strategy_results:
242
+ comparison = compare_strategies(strategy_results, primary_metric)
243
+ report["comparison"] = comparison
244
+ else:
245
+ report["note"] = "Provide strategy_results for comparison. Use /turing:curriculum to run strategies."
246
+ report["available_strategies"] = DEFAULT_STRATEGIES
247
+
248
+ return report
249
+
250
+
251
+ # --- Report Formatting ---
252
+
253
+
254
+ def save_curriculum_report(report: dict, output_dir: str = "experiments/curriculum") -> Path:
255
+ out_path = Path(output_dir)
256
+ out_path.mkdir(parents=True, exist_ok=True)
257
+ exp_id = report.get("experiment_id", "unknown")
258
+ filepath = out_path / f"{exp_id}-curriculum.yaml"
259
+ clean = json.loads(json.dumps(report, default=str))
260
+ with open(filepath, "w") as f:
261
+ yaml.dump(clean, f, default_flow_style=False, sort_keys=False)
262
+ return filepath
263
+
264
+
265
+ def format_curriculum_report(report: dict) -> str:
266
+ if "error" in report:
267
+ return f"ERROR: {report['error']}"
268
+
269
+ exp_id = report.get("experiment_id", "?")
270
+ metric = report.get("primary_metric", "metric")
271
+
272
+ lines = [f"# Curriculum Analysis: {exp_id}", "",
273
+ f"*Generated {report.get('generated_at', 'N/A')[:19]}*", ""]
274
+
275
+ # Difficulty stats
276
+ diff_stats = report.get("difficulty_stats")
277
+ if diff_stats:
278
+ lines.extend([
279
+ "## Difficulty Distribution",
280
+ f"- **Samples:** {diff_stats['n_samples']}",
281
+ f"- **Mean difficulty:** {diff_stats['mean_difficulty']:.4f}",
282
+ f"- **Impossible samples:** {diff_stats['n_impossible']} (likely mislabeled)",
283
+ "",
284
+ ])
285
+
286
+ # Strategy comparison
287
+ comparison = report.get("comparison")
288
+ if comparison:
289
+ results = comparison.get("results", [])
290
+ if results:
291
+ lines.extend(["## Strategy Comparison", "",
292
+ f"| Strategy | {metric} | Δ vs Random | Speedup |",
293
+ "|----------|--------|-------------|---------|"])
294
+ best_name = comparison.get("best_strategy")
295
+ for r in results:
296
+ val = f"{r['metric_value']:.4f}" if r.get("metric_value") is not None else "N/A"
297
+ delta = f"{r['delta_vs_random']:+.4f}" if r.get("delta_vs_random") is not None else "—"
298
+ speedup = f"{r['speedup']:+.0%}" if r.get("speedup") is not None else "—"
299
+ marker = " ← BEST" if r["strategy"] == best_name else ""
300
+ lines.append(f"| {r['strategy']} | {val} | {delta} | {speedup} |{marker}")
301
+ lines.append("")
302
+
303
+ verdict_labels = {
304
+ "curriculum_helps": "Curriculum learning improves final performance",
305
+ "faster_convergence": "Curriculum learning converges faster (similar final performance)",
306
+ "no_improvement": "No significant improvement from curriculum ordering",
307
+ }
308
+ verdict = comparison.get("verdict", "?")
309
+ lines.extend(["## Verdict", "", f"**{verdict_labels.get(verdict, verdict.upper())}**"])
310
+ elif report.get("note"):
311
+ lines.append(f"*{report['note']}*")
312
+
313
+ return "\n".join(lines)
314
+
315
+
316
+ def main() -> None:
317
+ parser = argparse.ArgumentParser(description="Training curriculum optimization")
318
+ parser.add_argument("exp_id", nargs="?", help="Experiment ID")
319
+ parser.add_argument("--strategies", help="Comma-separated strategies")
320
+ parser.add_argument("--config", default="config.yaml")
321
+ parser.add_argument("--json", action="store_true")
322
+ args = parser.parse_args()
323
+
324
+ report = curriculum_analysis(exp_id=args.exp_id, config_path=args.config)
325
+
326
+ if "error" not in report:
327
+ filepath = save_curriculum_report(report)
328
+ print(f"Saved to {filepath}", file=sys.stderr)
329
+
330
+ if args.json:
331
+ print(json.dumps(report, indent=2, default=str))
332
+ else:
333
+ print(format_curriculum_report(report))
334
+
335
+
336
+ if __name__ == "__main__":
337
+ main()