claude-turing 3.2.0 → 3.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,277 @@
1
+ #!/usr/bin/env python3
2
+ """Model merging for the autoresearch pipeline.
3
+
4
+ Average or merge weights from multiple fine-tuned checkpoints into a
5
+ single model (model soups, TIES, DARE, greedy soup). Often beats any
6
+ individual model with zero additional training cost and no latency overhead.
7
+
8
+ Usage:
9
+ python scripts/model_merger.py exp-042 exp-053 exp-067
10
+ python scripts/model_merger.py exp-042 exp-053 --method greedy
11
+ python scripts/model_merger.py --json
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import argparse
17
+ import json
18
+ import sys
19
+ from datetime import datetime, timezone
20
+ from pathlib import Path
21
+
22
+ import numpy as np
23
+ import yaml
24
+
25
+ from scripts.turing_io import load_config, load_experiments
26
+
27
+ DEFAULT_LOG_PATH = "experiments/log.jsonl"
28
+ MERGE_METHODS = ["uniform", "greedy", "ties", "dare"]
29
+
30
+
31
+ def check_compatibility(experiments: list[dict]) -> dict:
32
+ """Check that all models share the same architecture."""
33
+ model_types = {e.get("config", {}).get("model_type", "?") for e in experiments}
34
+ compatible = len(model_types) == 1
35
+ return {
36
+ "compatible": compatible,
37
+ "model_types": list(model_types),
38
+ "n_models": len(experiments),
39
+ "reason": "All models share same architecture" if compatible else f"Mixed architectures: {model_types}",
40
+ }
41
+
42
+
43
+ def plan_uniform_merge(
44
+ experiments: list[dict],
45
+ primary_metric: str,
46
+ ) -> dict:
47
+ """Plan uniform weight averaging (model soup)."""
48
+ metrics = [e.get("metrics", {}).get(primary_metric, 0) for e in experiments]
49
+ return {
50
+ "method": "uniform",
51
+ "description": "Simple average of all model weights",
52
+ "n_models": len(experiments),
53
+ "individual_metrics": [{"exp_id": e.get("experiment_id"), primary_metric: m} for e, m in zip(experiments, metrics)],
54
+ "weights": [round(1.0 / len(experiments), 4)] * len(experiments),
55
+ }
56
+
57
+
58
+ def plan_greedy_merge(
59
+ experiments: list[dict],
60
+ primary_metric: str,
61
+ merge_results: list[dict] | None = None,
62
+ ) -> dict:
63
+ """Plan greedy soup — iteratively add models only if they improve the merge."""
64
+ sorted_exps = sorted(experiments, key=lambda e: e.get("metrics", {}).get(primary_metric, 0), reverse=True)
65
+ included = [sorted_exps[0].get("experiment_id")]
66
+ excluded = []
67
+
68
+ if merge_results:
69
+ # Use actual results to determine inclusion
70
+ for r in merge_results[1:]:
71
+ if r.get("improved", True):
72
+ included.append(r.get("exp_id"))
73
+ else:
74
+ excluded.append(r.get("exp_id"))
75
+ else:
76
+ # Plan: include all by default, actual filtering done at execution
77
+ included = [e.get("experiment_id") for e in sorted_exps]
78
+
79
+ return {
80
+ "method": "greedy",
81
+ "description": "Iteratively add models only if they improve the merged result",
82
+ "included": included,
83
+ "excluded": excluded,
84
+ "n_included": len(included),
85
+ "n_excluded": len(excluded),
86
+ }
87
+
88
+
89
+ def plan_ties_merge(experiments: list[dict]) -> dict:
90
+ """Plan TIES merging (Trim, Elect sign, disjoint Merge)."""
91
+ return {
92
+ "method": "ties",
93
+ "description": "Trim redundant params, elect sign consensus, disjoint merge",
94
+ "n_models": len(experiments),
95
+ "steps": [
96
+ "1. Compute task vectors (delta from base) for each model",
97
+ "2. Trim: zero out smallest magnitude deltas",
98
+ "3. Elect: resolve sign conflicts by majority vote",
99
+ "4. Merge: average the surviving, sign-consistent deltas",
100
+ ],
101
+ }
102
+
103
+
104
+ def plan_dare_merge(experiments: list[dict]) -> dict:
105
+ """Plan DARE merging (Drop And REscale)."""
106
+ return {
107
+ "method": "dare",
108
+ "description": "Randomly drop parameters and rescale survivors to reduce interference",
109
+ "n_models": len(experiments),
110
+ "drop_rate": 0.5,
111
+ "steps": [
112
+ "1. Compute task vectors for each model",
113
+ "2. Randomly drop 50% of parameters per model",
114
+ "3. Rescale surviving parameters by 1/(1-drop_rate)",
115
+ "4. Average the rescaled task vectors",
116
+ ],
117
+ }
118
+
119
+
120
+ def compare_merge_methods(
121
+ method_results: dict[str, dict] | None = None,
122
+ experiments: list[dict] | None = None,
123
+ primary_metric: str = "accuracy",
124
+ ) -> dict:
125
+ """Compare merge method results."""
126
+ if not experiments:
127
+ return {"error": "No experiments provided"}
128
+
129
+ # Best single model
130
+ best_single = max(experiments, key=lambda e: e.get("metrics", {}).get(primary_metric, 0))
131
+ best_metric = best_single.get("metrics", {}).get(primary_metric, 0)
132
+
133
+ results = [{
134
+ "method": "best_single",
135
+ "metric_value": best_metric,
136
+ "delta": 0.0,
137
+ "experiment_id": best_single.get("experiment_id"),
138
+ }]
139
+
140
+ if method_results:
141
+ for method_name, data in method_results.items():
142
+ metric = data.get("metric_value", data.get(primary_metric, 0))
143
+ results.append({
144
+ "method": method_name,
145
+ "metric_value": metric,
146
+ "delta": round(metric - best_metric, 6),
147
+ })
148
+
149
+ best_merge = max(results, key=lambda r: r.get("metric_value", 0))
150
+
151
+ return {
152
+ "results": results,
153
+ "best_method": best_merge.get("method"),
154
+ "best_metric": best_merge.get("metric_value"),
155
+ "improvement": best_merge.get("delta", 0),
156
+ }
157
+
158
+
159
+ def merge_analysis(
160
+ exp_ids: list[str] | None = None,
161
+ method_results: dict[str, dict] | None = None,
162
+ config_path: str = "config.yaml",
163
+ log_path: str = DEFAULT_LOG_PATH,
164
+ ) -> dict:
165
+ """Run merge analysis."""
166
+ config = load_config(config_path)
167
+ primary_metric = config.get("evaluation", {}).get("primary_metric", "accuracy")
168
+ experiments = load_experiments(log_path)
169
+
170
+ if exp_ids:
171
+ selected = [e for e in experiments if e.get("experiment_id") in exp_ids]
172
+ else:
173
+ # Default: top 3 kept experiments
174
+ kept = sorted(
175
+ [e for e in experiments if e.get("status") == "kept"],
176
+ key=lambda e: e.get("metrics", {}).get(primary_metric, 0), reverse=True,
177
+ )
178
+ selected = kept[:3]
179
+
180
+ if len(selected) < 2:
181
+ return {"error": "Need at least 2 experiments for model merging"}
182
+
183
+ compat = check_compatibility(selected)
184
+
185
+ plans = {
186
+ "uniform": plan_uniform_merge(selected, primary_metric),
187
+ "greedy": plan_greedy_merge(selected, primary_metric),
188
+ "ties": plan_ties_merge(selected),
189
+ "dare": plan_dare_merge(selected),
190
+ }
191
+
192
+ comparison = compare_merge_methods(method_results, selected, primary_metric) if method_results else None
193
+
194
+ return {
195
+ "generated_at": datetime.now(timezone.utc).isoformat(),
196
+ "primary_metric": primary_metric,
197
+ "compatibility": compat,
198
+ "base_models": [{"exp_id": e.get("experiment_id"),
199
+ "model_type": e.get("config", {}).get("model_type"),
200
+ primary_metric: e.get("metrics", {}).get(primary_metric)}
201
+ for e in selected],
202
+ "plans": plans,
203
+ "comparison": comparison,
204
+ }
205
+
206
+
207
+ def save_merge_report(report: dict, output_dir: str = "experiments/merges") -> Path:
208
+ out = Path(output_dir); out.mkdir(parents=True, exist_ok=True)
209
+ ts = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S")
210
+ fp = out / f"merge-{ts}.yaml"
211
+ with open(fp, "w") as f: yaml.dump(json.loads(json.dumps(report, default=str)), f, default_flow_style=False, sort_keys=False)
212
+ return fp
213
+
214
+
215
+ def format_merge_report(report: dict) -> str:
216
+ if "error" in report: return f"ERROR: {report['error']}"
217
+
218
+ metric = report.get("primary_metric", "metric")
219
+ lines = ["# Model Merge Analysis", "",
220
+ f"*Generated {report.get('generated_at', 'N/A')[:19]}*", ""]
221
+
222
+ # Compatibility
223
+ compat = report.get("compatibility", {})
224
+ lines.append(f"**Compatibility:** {'✓' if compat.get('compatible') else '✗'} {compat.get('reason', '')}")
225
+ lines.append("")
226
+
227
+ # Base models
228
+ lines.extend(["## Base Models", "",
229
+ f"| Experiment | Model Type | {metric} |",
230
+ "|------------|------------|--------|"])
231
+ for m in report.get("base_models", []):
232
+ val = m.get(metric, "N/A")
233
+ val_str = f"{val:.4f}" if isinstance(val, float) else str(val)
234
+ lines.append(f"| {m.get('exp_id', '?')} | {m.get('model_type', '?')} | {val_str} |")
235
+ lines.append("")
236
+
237
+ # Methods
238
+ plans = report.get("plans", {})
239
+ if plans:
240
+ lines.extend(["## Available Methods", ""])
241
+ for name, plan in plans.items():
242
+ lines.append(f"- **{name}:** {plan.get('description', '')}")
243
+ lines.append("")
244
+
245
+ # Comparison (if results available)
246
+ comparison = report.get("comparison")
247
+ if comparison:
248
+ lines.extend(["## Results", "",
249
+ f"| Method | {metric} | Δ vs Best Single |",
250
+ "|--------|--------|------------------|"])
251
+ for r in comparison.get("results", []):
252
+ val = f"{r.get('metric_value', 0):.4f}"
253
+ delta = f"{r.get('delta', 0):+.4f}" if r.get("delta") is not None else "—"
254
+ marker = " ← BEST" if r["method"] == comparison.get("best_method") and r["method"] != "best_single" else ""
255
+ lines.append(f"| {r['method']} | {val} | {delta} |{marker}")
256
+ lines.append("")
257
+ imp = comparison.get("improvement", 0)
258
+ if imp > 0:
259
+ lines.append(f"**{comparison['best_method']} improves by {imp:+.4f} over best single model — zero latency cost.**")
260
+
261
+ return "\n".join(lines)
262
+
263
+
264
+ def main() -> None:
265
+ parser = argparse.ArgumentParser(description="Model merging")
266
+ parser.add_argument("exp_ids", nargs="*", help="Experiment IDs to merge")
267
+ parser.add_argument("--method", choices=MERGE_METHODS, help="Specific merge method")
268
+ parser.add_argument("--config", default="config.yaml")
269
+ parser.add_argument("--log", default=DEFAULT_LOG_PATH)
270
+ parser.add_argument("--json", action="store_true")
271
+ args = parser.parse_args()
272
+ report = merge_analysis(exp_ids=args.exp_ids or None, config_path=args.config, log_path=args.log)
273
+ if "error" not in report:
274
+ fp = save_merge_report(report); print(f"Saved to {fp}", file=sys.stderr)
275
+ print(json.dumps(report, indent=2, default=str) if args.json else format_merge_report(report))
276
+
277
+ if __name__ == "__main__": main()
@@ -0,0 +1,182 @@
1
+ #!/usr/bin/env python3
2
+ """Weight pruning for the autoresearch pipeline.
3
+
4
+ Structured and unstructured weight pruning. Measures accuracy at different
5
+ sparsity levels, finds the knee point, and plans pruned model production.
6
+
7
+ Usage:
8
+ python scripts/model_pruning.py exp-042
9
+ python scripts/model_pruning.py exp-042 --sparsity 0.5,0.75,0.9
10
+ python scripts/model_pruning.py exp-042 --method magnitude
11
+ python scripts/model_pruning.py --json
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import argparse
17
+ import json
18
+ import math
19
+ import sys
20
+ from datetime import datetime, timezone
21
+ from pathlib import Path
22
+
23
+ import numpy as np
24
+ import yaml
25
+
26
+ from scripts.turing_io import load_config, load_experiments
27
+
28
+ DEFAULT_LOG_PATH = "experiments/log.jsonl"
29
+ DEFAULT_SPARSITY_LEVELS = [0.0, 0.50, 0.75, 0.90, 0.95]
30
+ PRUNING_METHODS = ["magnitude", "structured", "lottery"]
31
+
32
+
33
+ def plan_sparsity_sweep(
34
+ sparsity_levels: list[float] | None = None,
35
+ ) -> list[dict]:
36
+ if sparsity_levels is None:
37
+ sparsity_levels = DEFAULT_SPARSITY_LEVELS
38
+ return [{"sparsity": s, "description": f"{s*100:.0f}% weights removed"} for s in sparsity_levels]
39
+
40
+
41
+ def compute_pruning_plan(
42
+ model_type: str,
43
+ hyperparams: dict,
44
+ method: str,
45
+ sparsity: float,
46
+ ) -> dict:
47
+ plan = {"method": method, "sparsity": sparsity, "config_changes": {}}
48
+ if "xgboost" in model_type.lower() or "lightgbm" in model_type.lower() or "forest" in model_type.lower():
49
+ n_est = hyperparams.get("n_estimators", 100)
50
+ plan["config_changes"]["n_estimators"] = max(1, int(n_est * (1 - sparsity)))
51
+ plan["strategy"] = "reduce_estimators"
52
+ elif method == "magnitude":
53
+ plan["strategy"] = "zero_small_weights"
54
+ plan["description"] = f"Zero out smallest {sparsity*100:.0f}% of weights by absolute value"
55
+ elif method == "structured":
56
+ plan["strategy"] = "remove_neurons"
57
+ plan["description"] = f"Remove {sparsity*100:.0f}% of neurons/filters by importance"
58
+ elif method == "lottery":
59
+ plan["strategy"] = "iterative_magnitude_with_rewind"
60
+ plan["description"] = f"Iterative pruning to {sparsity*100:.0f}% with weight rewinding"
61
+ return plan
62
+
63
+
64
+ def find_knee_point(sweep_results: list[dict], metric_key: str = "accuracy") -> dict | None:
65
+ if len(sweep_results) < 3:
66
+ return None
67
+ sparsities = [r["sparsity"] for r in sweep_results]
68
+ metrics = [r.get(metric_key, 0) for r in sweep_results]
69
+ max_drop = 0
70
+ knee_idx = None
71
+ for i in range(1, len(metrics)):
72
+ drop = metrics[i - 1] - metrics[i]
73
+ if drop > max_drop:
74
+ max_drop = drop
75
+ knee_idx = i
76
+ if knee_idx and knee_idx > 0:
77
+ return {"sparsity": sparsities[knee_idx - 1],
78
+ "metric_before_knee": round(metrics[knee_idx - 1], 6),
79
+ "metric_after_knee": round(metrics[knee_idx], 6),
80
+ "drop_at_knee": round(max_drop, 6)}
81
+ return None
82
+
83
+
84
+ def estimate_speedup(sparsity: float) -> float:
85
+ if sparsity <= 0:
86
+ return 1.0
87
+ return round(1.0 / (1.0 - sparsity * 0.7), 2)
88
+
89
+
90
+ def estimate_size_reduction(sparsity: float) -> float:
91
+ return round(sparsity * 100, 1)
92
+
93
+
94
+ def analyze_pruning(
95
+ sweep_results: list[dict] | None = None,
96
+ exp_id: str | None = None,
97
+ method: str = "magnitude",
98
+ config_path: str = "config.yaml",
99
+ log_path: str = DEFAULT_LOG_PATH,
100
+ ) -> dict:
101
+ config = load_config(config_path)
102
+ primary_metric = config.get("evaluation", {}).get("primary_metric", "accuracy")
103
+
104
+ if sweep_results:
105
+ knee = find_knee_point(sweep_results, primary_metric)
106
+ for r in sweep_results:
107
+ r["speedup"] = estimate_speedup(r["sparsity"])
108
+ r["size_reduction_pct"] = estimate_size_reduction(r["sparsity"])
109
+ recommended = None
110
+ for r in sweep_results:
111
+ delta = abs(r.get(primary_metric, 0) - sweep_results[0].get(primary_metric, 0))
112
+ if delta < 0.005 and r["sparsity"] > 0:
113
+ recommended = r
114
+ return {
115
+ "generated_at": datetime.now(timezone.utc).isoformat(),
116
+ "experiment_id": exp_id, "method": method, "primary_metric": primary_metric,
117
+ "sweep_results": sweep_results, "knee_point": knee,
118
+ "recommended": recommended,
119
+ }
120
+
121
+ experiments = load_experiments(log_path)
122
+ exp = next((e for e in experiments if e.get("experiment_id") == exp_id), None) if exp_id else None
123
+ model_type = exp.get("config", {}).get("model_type", "unknown") if exp else "unknown"
124
+ hyperparams = exp.get("config", {}).get("hyperparams", {}) if exp else {}
125
+
126
+ levels = plan_sparsity_sweep()
127
+ plans = [compute_pruning_plan(model_type, hyperparams, method, s["sparsity"]) for s in levels]
128
+ return {
129
+ "action": "plan", "generated_at": datetime.now(timezone.utc).isoformat(),
130
+ "experiment_id": exp_id, "model_type": model_type, "method": method,
131
+ "sparsity_levels": levels, "plans": plans,
132
+ "message": f"Run {len(levels)} experiments at sparsity levels: {', '.join(s['description'] for s in levels)}",
133
+ }
134
+
135
+
136
+ def save_pruning_report(report: dict, output_dir: str = "experiments/pruning") -> Path:
137
+ out = Path(output_dir); out.mkdir(parents=True, exist_ok=True)
138
+ exp_id = report.get("experiment_id", "unknown")
139
+ fp = out / f"{exp_id}-pruning.yaml"
140
+ with open(fp, "w") as f: yaml.dump(json.loads(json.dumps(report, default=str)), f, default_flow_style=False, sort_keys=False)
141
+ return fp
142
+
143
+
144
+ def format_pruning_report(report: dict) -> str:
145
+ if "error" in report: return f"ERROR: {report['error']}"
146
+ if report.get("action") == "plan":
147
+ lines = ["# Pruning Plan", "", f"**Model:** {report.get('model_type', '?')}", f"**Method:** {report.get('method', '?')}", ""]
148
+ for p in report.get("plans", []):
149
+ lines.append(f"- {p.get('sparsity', 0)*100:.0f}%: {p.get('strategy', '?')}")
150
+ return "\n".join(lines)
151
+
152
+ metric = report.get("primary_metric", "metric")
153
+ lines = [f"# Pruning Results: {report.get('experiment_id', '?')}", "",
154
+ f"| Sparsity | {metric} | Speedup | Size Reduction |",
155
+ "|----------|--------|---------|----------------|"]
156
+ for r in report.get("sweep_results", []):
157
+ val = f"{r.get(metric, 0):.4f}" if isinstance(r.get(metric), (int, float)) else "N/A"
158
+ lines.append(f"| {r['sparsity']*100:.0f}% | {val} | {r.get('speedup', '?')}x | {r.get('size_reduction_pct', '?')}% |")
159
+ knee = report.get("knee_point")
160
+ if knee:
161
+ lines.extend(["", f"**Knee point:** {knee['sparsity']*100:.0f}% sparsity (accuracy drops {knee['drop_at_knee']:.4f})"])
162
+ rec = report.get("recommended")
163
+ if rec:
164
+ lines.extend(["", f"**Recommended:** {rec['sparsity']*100:.0f}% sparsity ({rec.get('speedup', '?')}x speedup, <0.5% accuracy loss)"])
165
+ return "\n".join(lines)
166
+
167
+
168
+ def main() -> None:
169
+ parser = argparse.ArgumentParser(description="Weight pruning")
170
+ parser.add_argument("exp_id", nargs="?")
171
+ parser.add_argument("--sparsity", help="Comma-separated sparsity levels")
172
+ parser.add_argument("--method", choices=PRUNING_METHODS, default="magnitude")
173
+ parser.add_argument("--config", default="config.yaml")
174
+ parser.add_argument("--log", default=DEFAULT_LOG_PATH)
175
+ parser.add_argument("--json", action="store_true")
176
+ args = parser.parse_args()
177
+ report = analyze_pruning(exp_id=args.exp_id, method=args.method, config_path=args.config, log_path=args.log)
178
+ if "error" not in report:
179
+ fp = save_pruning_report(report); print(f"Saved to {fp}", file=sys.stderr)
180
+ print(json.dumps(report, indent=2, default=str) if args.json else format_pruning_report(report))
181
+
182
+ if __name__ == "__main__": main()
@@ -0,0 +1,177 @@
1
+ #!/usr/bin/env python3
2
+ """Post-training quantization for the autoresearch pipeline.
3
+
4
+ Quantize model weights from FP32 to INT8/FP16, measure accuracy loss
5
+ per precision level, and plan quantization-aware training if needed.
6
+
7
+ Usage:
8
+ python scripts/model_quantization.py exp-042
9
+ python scripts/model_quantization.py exp-042 --precision int8
10
+ python scripts/model_quantization.py --json
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import argparse
16
+ import json
17
+ import math
18
+ import sys
19
+ from datetime import datetime, timezone
20
+ from pathlib import Path
21
+
22
+ import numpy as np
23
+ import yaml
24
+
25
+ from scripts.turing_io import load_config, load_experiments
26
+
27
+ DEFAULT_LOG_PATH = "experiments/log.jsonl"
28
+ PRECISION_LEVELS = ["fp32", "fp16", "int8_dynamic", "int8_static"]
29
+ QAT_THRESHOLD = 0.01 # If PTQ accuracy loss > 1%, suggest QAT
30
+
31
+
32
+ def compute_quantization_plan(
33
+ precision: str,
34
+ model_size_bytes: int | None = None,
35
+ latency_ms: float | None = None,
36
+ ) -> dict:
37
+ size_factors = {"fp32": 1.0, "fp16": 0.5, "int8_dynamic": 0.25, "int8_static": 0.25}
38
+ latency_factors = {"fp32": 1.0, "fp16": 0.58, "int8_dynamic": 0.39, "int8_static": 0.37}
39
+
40
+ factor_s = size_factors.get(precision, 1.0)
41
+ factor_l = latency_factors.get(precision, 1.0)
42
+
43
+ plan = {
44
+ "precision": precision,
45
+ "size_factor": factor_s,
46
+ "latency_factor": factor_l,
47
+ "estimated_size_bytes": int(model_size_bytes * factor_s) if model_size_bytes else None,
48
+ "estimated_latency_ms": round(latency_ms * factor_l, 2) if latency_ms else None,
49
+ "size_reduction_pct": round((1 - factor_s) * 100, 1),
50
+ "speedup": round(1 / factor_l, 2) if factor_l > 0 else None,
51
+ }
52
+
53
+ if precision == "fp16":
54
+ plan["description"] = "Half-precision floating point — GPU inference"
55
+ plan["method"] = "cast_to_fp16"
56
+ elif precision == "int8_dynamic":
57
+ plan["description"] = "Dynamic INT8 — weights quantized, activations at runtime"
58
+ plan["method"] = "dynamic_quantization"
59
+ elif precision == "int8_static":
60
+ plan["description"] = "Static INT8 — calibrated activation ranges, best accuracy"
61
+ plan["method"] = "static_quantization"
62
+ plan["requires_calibration"] = True
63
+ else:
64
+ plan["description"] = "Full precision (baseline)"
65
+ plan["method"] = "none"
66
+
67
+ return plan
68
+
69
+
70
+ def compare_precision_levels(
71
+ sweep_results: list[dict] | None = None,
72
+ model_size_bytes: int | None = None,
73
+ latency_ms: float | None = None,
74
+ primary_metric: str = "accuracy",
75
+ ) -> dict:
76
+ """Compare quantization results across precision levels."""
77
+ if sweep_results:
78
+ baseline = next((r for r in sweep_results if r.get("precision") == "fp32"), sweep_results[0])
79
+ baseline_metric = baseline.get(primary_metric, 0)
80
+
81
+ for r in sweep_results:
82
+ r["delta"] = round(r.get(primary_metric, 0) - baseline_metric, 6)
83
+ plan = compute_quantization_plan(r["precision"], model_size_bytes, latency_ms)
84
+ r.update({k: v for k, v in plan.items() if k not in r})
85
+
86
+ best = min(
87
+ [r for r in sweep_results if r["precision"] != "fp32"],
88
+ key=lambda r: abs(r.get("delta", 0)) + (1 - r.get("speedup", 1)) * 0.1,
89
+ default=None,
90
+ )
91
+
92
+ needs_qat = any(abs(r.get("delta", 0)) > QAT_THRESHOLD for r in sweep_results if "int8" in r.get("precision", ""))
93
+
94
+ return {
95
+ "sweep_results": sweep_results,
96
+ "recommended": best,
97
+ "needs_qat": needs_qat,
98
+ }
99
+
100
+ # Plan mode
101
+ plans = [compute_quantization_plan(p, model_size_bytes, latency_ms) for p in PRECISION_LEVELS]
102
+ return {"action": "plan", "plans": plans}
103
+
104
+
105
+ def analyze_quantization(
106
+ sweep_results: list[dict] | None = None,
107
+ exp_id: str | None = None,
108
+ config_path: str = "config.yaml",
109
+ log_path: str = DEFAULT_LOG_PATH,
110
+ ) -> dict:
111
+ config = load_config(config_path)
112
+ primary_metric = config.get("evaluation", {}).get("primary_metric", "accuracy")
113
+
114
+ experiments = load_experiments(log_path)
115
+ exp = next((e for e in experiments if e.get("experiment_id") == exp_id), None) if exp_id else None
116
+
117
+ model_size = exp.get("metrics", {}).get("model_size_bytes") if exp else None
118
+ latency = exp.get("metrics", {}).get("latency_ms", exp.get("metrics", {}).get("inference_ms")) if exp else None
119
+
120
+ comparison = compare_precision_levels(sweep_results, model_size, latency, primary_metric)
121
+
122
+ return {
123
+ "generated_at": datetime.now(timezone.utc).isoformat(),
124
+ "experiment_id": exp_id,
125
+ "primary_metric": primary_metric,
126
+ **comparison,
127
+ }
128
+
129
+
130
+ def save_quantization_report(report: dict, output_dir: str = "experiments/quantization") -> Path:
131
+ out = Path(output_dir); out.mkdir(parents=True, exist_ok=True)
132
+ exp_id = report.get("experiment_id", "unknown")
133
+ fp = out / f"{exp_id}-quantization.yaml"
134
+ with open(fp, "w") as f: yaml.dump(json.loads(json.dumps(report, default=str)), f, default_flow_style=False, sort_keys=False)
135
+ return fp
136
+
137
+
138
+ def format_quantization_report(report: dict) -> str:
139
+ if "error" in report: return f"ERROR: {report['error']}"
140
+
141
+ if report.get("action") == "plan":
142
+ lines = ["# Quantization Plan", ""]
143
+ for p in report.get("plans", []):
144
+ lines.append(f"- **{p['precision']}**: {p['description']} (size: {p['size_reduction_pct']}% reduction, speedup: {p.get('speedup', '?')}x)")
145
+ return "\n".join(lines)
146
+
147
+ metric = report.get("primary_metric", "metric")
148
+ lines = [f"# Quantization Results: {report.get('experiment_id', '?')}", "",
149
+ f"| Precision | {metric} | Delta | Speedup | Size Reduction |",
150
+ "|-----------|--------|-------|---------|----------------|"]
151
+ for r in report.get("sweep_results", []):
152
+ val = f"{r.get(metric, 0):.4f}" if isinstance(r.get(metric), (int, float)) else "N/A"
153
+ delta = f"{r.get('delta', 0):+.4f}" if r.get("delta") is not None else "—"
154
+ lines.append(f"| {r['precision']} | {val} | {delta} | {r.get('speedup', '?')}x | {r.get('size_reduction_pct', '?')}% |")
155
+
156
+ rec = report.get("recommended")
157
+ if rec:
158
+ lines.extend(["", f"**Recommended:** {rec['precision']} ({rec.get('delta', 0):+.4f} accuracy, {rec.get('speedup', '?')}x speedup)"])
159
+ if report.get("needs_qat"):
160
+ lines.extend(["", "**Note:** INT8 accuracy loss > 1% — consider quantization-aware training (QAT)"])
161
+ return "\n".join(lines)
162
+
163
+
164
+ def main() -> None:
165
+ parser = argparse.ArgumentParser(description="Post-training quantization")
166
+ parser.add_argument("exp_id", nargs="?")
167
+ parser.add_argument("--precision", help="Specific precision level")
168
+ parser.add_argument("--config", default="config.yaml")
169
+ parser.add_argument("--log", default=DEFAULT_LOG_PATH)
170
+ parser.add_argument("--json", action="store_true")
171
+ args = parser.parse_args()
172
+ report = analyze_quantization(exp_id=args.exp_id, config_path=args.config, log_path=args.log)
173
+ if "error" not in report:
174
+ fp = save_quantization_report(report); print(f"Saved to {fp}", file=sys.stderr)
175
+ print(json.dumps(report, indent=2, default=str) if args.json else format_quantization_report(report))
176
+
177
+ if __name__ == "__main__": main()
@@ -124,6 +124,12 @@ TEMPLATE_DIRS = {
124
124
  "model_xray.py",
125
125
  "sensitivity_analysis.py",
126
126
  "calibration.py",
127
+ "feature_intelligence.py",
128
+ "curriculum_optimizer.py",
129
+ "model_pruning.py",
130
+ "model_quantization.py",
131
+ "model_merger.py",
132
+ "architecture_surgery.py",
127
133
  ],
128
134
  "tests": ["__init__.py", "conftest.py"],
129
135
  }
@@ -160,6 +166,12 @@ DIRECTORIES_TO_CREATE = [
160
166
  "experiments/xrays",
161
167
  "experiments/sensitivity",
162
168
  "experiments/calibration",
169
+ "experiments/features",
170
+ "experiments/curriculum",
171
+ "experiments/pruning",
172
+ "experiments/quantization",
173
+ "experiments/merges",
174
+ "experiments/surgery",
163
175
  "experiments/logs",
164
176
  "models/best",
165
177
  "models/archive",