claude-turing 2.2.1 → 2.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/plugin.json +2 -2
- package/README.md +8 -2
- package/commands/diff.md +48 -0
- package/commands/ensemble.md +54 -0
- package/commands/regress.md +53 -0
- package/commands/stitch.md +49 -0
- package/commands/turing.md +12 -0
- package/commands/warm.md +53 -0
- package/commands/watch.md +60 -0
- package/config/watch_alerts.yaml +36 -0
- package/package.json +1 -1
- package/src/install.js +3 -0
- package/src/verify.js +7 -0
- package/templates/scripts/__pycache__/build_ensemble.cpython-314.pyc +0 -0
- package/templates/scripts/__pycache__/experiment_diff.cpython-314.pyc +0 -0
- package/templates/scripts/__pycache__/generate_brief.cpython-314.pyc +0 -0
- package/templates/scripts/__pycache__/pipeline_manager.cpython-314.pyc +0 -0
- package/templates/scripts/__pycache__/regression_gate.cpython-314.pyc +0 -0
- package/templates/scripts/__pycache__/scaffold.cpython-314.pyc +0 -0
- package/templates/scripts/__pycache__/training_monitor.cpython-314.pyc +0 -0
- package/templates/scripts/__pycache__/warm_start.cpython-314.pyc +0 -0
- package/templates/scripts/build_ensemble.py +696 -0
- package/templates/scripts/experiment_diff.py +703 -0
- package/templates/scripts/generate_brief.py +79 -0
- package/templates/scripts/pipeline_manager.py +457 -0
- package/templates/scripts/regression_gate.py +536 -0
- package/templates/scripts/scaffold.py +12 -0
- package/templates/scripts/training_monitor.py +611 -0
- package/templates/scripts/warm_start.py +493 -0
|
@@ -0,0 +1,493 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Warm-start from prior model for the autoresearch pipeline.
|
|
3
|
+
|
|
4
|
+
Takes a trained checkpoint and uses it as initialization for a different
|
|
5
|
+
configuration. Automates the "start from here but change X" pattern for
|
|
6
|
+
tree models (continue boosting), neural networks (load weights, freeze
|
|
7
|
+
layers), and scikit-learn (warm_start=True).
|
|
8
|
+
|
|
9
|
+
Usage:
|
|
10
|
+
python scripts/warm_start.py exp-042
|
|
11
|
+
python scripts/warm_start.py exp-042 --freeze-layers encoder
|
|
12
|
+
python scripts/warm_start.py exp-042 --unfreeze-after 5
|
|
13
|
+
python scripts/warm_start.py exp-042 --lr-factor 0.1
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
from __future__ import annotations
|
|
17
|
+
|
|
18
|
+
import argparse
|
|
19
|
+
import json
|
|
20
|
+
import sys
|
|
21
|
+
from datetime import datetime, timezone
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
|
|
24
|
+
import yaml
|
|
25
|
+
|
|
26
|
+
from scripts.turing_io import load_config, load_experiments
|
|
27
|
+
|
|
28
|
+
DEFAULT_LOG_PATH = "experiments/log.jsonl"
|
|
29
|
+
DEFAULT_CHECKPOINT_DIR = "experiments/checkpoints"
|
|
30
|
+
DEFAULT_LR_FACTOR = 0.1 # Reduce LR by 10x for fine-tuning
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# --- Model Type Detection ---
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
TREE_MODELS = {"xgboost", "lightgbm", "catboost", "gradient_boosting", "gbm"}
|
|
37
|
+
NEURAL_MODELS = {"mlp", "neural_network", "nn", "pytorch", "tensorflow", "keras", "transformer"}
|
|
38
|
+
SKLEARN_WARM_STARTABLE = {
|
|
39
|
+
"random_forest", "gradient_boosting", "mlp",
|
|
40
|
+
"sgd", "passive_aggressive", "perceptron",
|
|
41
|
+
"bagging", "adaboost",
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def detect_model_type(experiment: dict) -> str:
|
|
46
|
+
"""Detect the model type category from an experiment.
|
|
47
|
+
|
|
48
|
+
Returns one of: 'tree', 'neural', 'sklearn', 'unknown'.
|
|
49
|
+
"""
|
|
50
|
+
config = experiment.get("config", {})
|
|
51
|
+
model_type = config.get("model_type", "").lower()
|
|
52
|
+
|
|
53
|
+
if any(t in model_type for t in TREE_MODELS):
|
|
54
|
+
return "tree"
|
|
55
|
+
if any(t in model_type for t in NEURAL_MODELS):
|
|
56
|
+
return "neural"
|
|
57
|
+
if any(t in model_type for t in SKLEARN_WARM_STARTABLE):
|
|
58
|
+
return "sklearn"
|
|
59
|
+
|
|
60
|
+
# Check hyperparams for hints
|
|
61
|
+
hyperparams = config.get("hyperparams", {})
|
|
62
|
+
if "n_estimators" in hyperparams and ("max_depth" in hyperparams or "num_leaves" in hyperparams):
|
|
63
|
+
return "tree"
|
|
64
|
+
if "hidden_size" in hyperparams or "layers" in hyperparams:
|
|
65
|
+
return "neural"
|
|
66
|
+
|
|
67
|
+
return "unknown"
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
# --- Warm-Start Strategy ---
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def plan_warm_start(
|
|
74
|
+
experiment: dict,
|
|
75
|
+
freeze_layers: list[str] | None = None,
|
|
76
|
+
unfreeze_after: int | None = None,
|
|
77
|
+
lr_factor: float = DEFAULT_LR_FACTOR,
|
|
78
|
+
) -> dict:
|
|
79
|
+
"""Plan the warm-start strategy for an experiment.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
experiment: Source experiment to warm-start from.
|
|
83
|
+
freeze_layers: Layer names to freeze (neural only).
|
|
84
|
+
unfreeze_after: Unfreeze all layers after N epochs (neural only).
|
|
85
|
+
lr_factor: Learning rate reduction factor for fine-tuning.
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Warm-start plan dict with strategy, config changes, and instructions.
|
|
89
|
+
"""
|
|
90
|
+
model_category = detect_model_type(experiment)
|
|
91
|
+
exp_id = experiment.get("experiment_id", "unknown")
|
|
92
|
+
config = experiment.get("config", {})
|
|
93
|
+
hyperparams = config.get("hyperparams", {})
|
|
94
|
+
|
|
95
|
+
plan = {
|
|
96
|
+
"source_experiment": exp_id,
|
|
97
|
+
"model_category": model_category,
|
|
98
|
+
"model_type": config.get("model_type", "unknown"),
|
|
99
|
+
"strategy": None,
|
|
100
|
+
"config_changes": {},
|
|
101
|
+
"instructions": [],
|
|
102
|
+
"warnings": [],
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if model_category == "tree":
|
|
106
|
+
plan.update(_plan_tree_warm_start(config, hyperparams))
|
|
107
|
+
elif model_category == "neural":
|
|
108
|
+
plan.update(_plan_neural_warm_start(
|
|
109
|
+
config, hyperparams, freeze_layers, unfreeze_after, lr_factor,
|
|
110
|
+
))
|
|
111
|
+
elif model_category == "sklearn":
|
|
112
|
+
plan.update(_plan_sklearn_warm_start(config, hyperparams))
|
|
113
|
+
else:
|
|
114
|
+
plan["strategy"] = "unsupported"
|
|
115
|
+
plan["warnings"].append(
|
|
116
|
+
f"Model type '{config.get('model_type', '?')}' does not support warm-starting. "
|
|
117
|
+
"Consider manually loading the checkpoint."
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
return plan
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def _plan_tree_warm_start(config: dict, hyperparams: dict) -> dict:
|
|
124
|
+
"""Plan warm-start for tree-based models."""
|
|
125
|
+
model_type = config.get("model_type", "").lower()
|
|
126
|
+
current_estimators = hyperparams.get("n_estimators", 100)
|
|
127
|
+
|
|
128
|
+
changes = {}
|
|
129
|
+
instructions = []
|
|
130
|
+
|
|
131
|
+
if "xgboost" in model_type:
|
|
132
|
+
changes["xgb_model"] = "checkpoint_path" # Load existing model
|
|
133
|
+
changes["n_estimators"] = current_estimators + 100 # Continue boosting
|
|
134
|
+
instructions.append(f"Load XGBoost model from checkpoint")
|
|
135
|
+
instructions.append(f"Continue boosting: {current_estimators} → {current_estimators + 100} estimators")
|
|
136
|
+
strategy = "continue_boosting"
|
|
137
|
+
|
|
138
|
+
elif "lightgbm" in model_type:
|
|
139
|
+
changes["init_model"] = "checkpoint_path"
|
|
140
|
+
changes["n_estimators"] = current_estimators + 100
|
|
141
|
+
instructions.append(f"Load LightGBM model as init_model")
|
|
142
|
+
instructions.append(f"Continue training with additional estimators")
|
|
143
|
+
strategy = "continue_boosting"
|
|
144
|
+
|
|
145
|
+
else:
|
|
146
|
+
changes["warm_start"] = True
|
|
147
|
+
changes["n_estimators"] = current_estimators + 100
|
|
148
|
+
instructions.append(f"Set warm_start=True for incremental learning")
|
|
149
|
+
strategy = "warm_start_param"
|
|
150
|
+
|
|
151
|
+
return {
|
|
152
|
+
"strategy": strategy,
|
|
153
|
+
"config_changes": changes,
|
|
154
|
+
"instructions": instructions,
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def _plan_neural_warm_start(
|
|
159
|
+
config: dict,
|
|
160
|
+
hyperparams: dict,
|
|
161
|
+
freeze_layers: list[str] | None,
|
|
162
|
+
unfreeze_after: int | None,
|
|
163
|
+
lr_factor: float,
|
|
164
|
+
) -> dict:
|
|
165
|
+
"""Plan warm-start for neural network models."""
|
|
166
|
+
changes = {}
|
|
167
|
+
instructions = []
|
|
168
|
+
warnings = []
|
|
169
|
+
|
|
170
|
+
# Load weights
|
|
171
|
+
changes["load_checkpoint"] = True
|
|
172
|
+
changes["checkpoint_source"] = config.get("model_type", "?")
|
|
173
|
+
instructions.append("Load weights from source experiment checkpoint")
|
|
174
|
+
|
|
175
|
+
# Layer freezing
|
|
176
|
+
if freeze_layers:
|
|
177
|
+
changes["freeze_layers"] = freeze_layers
|
|
178
|
+
instructions.append(f"Freeze layers: {', '.join(freeze_layers)}")
|
|
179
|
+
|
|
180
|
+
if unfreeze_after:
|
|
181
|
+
changes["unfreeze_after_epochs"] = unfreeze_after
|
|
182
|
+
instructions.append(f"Gradual unfreezing: unfreeze all after epoch {unfreeze_after}")
|
|
183
|
+
|
|
184
|
+
# Learning rate adjustment
|
|
185
|
+
current_lr = hyperparams.get("learning_rate", hyperparams.get("lr", 0.001))
|
|
186
|
+
new_lr = current_lr * lr_factor
|
|
187
|
+
changes["learning_rate"] = new_lr
|
|
188
|
+
instructions.append(f"Reduce learning rate: {current_lr} → {new_lr} ({lr_factor}x)")
|
|
189
|
+
|
|
190
|
+
# Reset optimizer
|
|
191
|
+
changes["reset_optimizer"] = True
|
|
192
|
+
instructions.append("Reset optimizer state (fresh momentum/adaptive learning rates)")
|
|
193
|
+
|
|
194
|
+
if not freeze_layers:
|
|
195
|
+
warnings.append(
|
|
196
|
+
"No layers frozen — all weights will be updated. "
|
|
197
|
+
"Consider freezing early layers for more stable fine-tuning."
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
return {
|
|
201
|
+
"strategy": "load_weights",
|
|
202
|
+
"config_changes": changes,
|
|
203
|
+
"instructions": instructions,
|
|
204
|
+
"warnings": warnings,
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def _plan_sklearn_warm_start(config: dict, hyperparams: dict) -> dict:
|
|
209
|
+
"""Plan warm-start for scikit-learn models."""
|
|
210
|
+
return {
|
|
211
|
+
"strategy": "warm_start_param",
|
|
212
|
+
"config_changes": {
|
|
213
|
+
"warm_start": True,
|
|
214
|
+
"n_estimators": hyperparams.get("n_estimators", 100) + 50,
|
|
215
|
+
},
|
|
216
|
+
"instructions": [
|
|
217
|
+
"Set warm_start=True on the estimator",
|
|
218
|
+
"Increase n_estimators for additional rounds",
|
|
219
|
+
"Call fit() with the original training data — model continues from prior state",
|
|
220
|
+
],
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
# --- Checkpoint Discovery ---
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def find_checkpoint(
|
|
228
|
+
exp_id: str,
|
|
229
|
+
checkpoint_dir: str = DEFAULT_CHECKPOINT_DIR,
|
|
230
|
+
) -> dict | None:
|
|
231
|
+
"""Find the checkpoint for a given experiment.
|
|
232
|
+
|
|
233
|
+
Returns dict with path, format, and size if found.
|
|
234
|
+
"""
|
|
235
|
+
ckpt_path = Path(checkpoint_dir)
|
|
236
|
+
|
|
237
|
+
# Check for experiment-specific checkpoint directory
|
|
238
|
+
exp_dir = ckpt_path / exp_id
|
|
239
|
+
if exp_dir.exists() and exp_dir.is_dir():
|
|
240
|
+
files = list(exp_dir.rglob("*"))
|
|
241
|
+
model_files = [f for f in files if f.is_file()]
|
|
242
|
+
if model_files:
|
|
243
|
+
total_size = sum(f.stat().st_size for f in model_files)
|
|
244
|
+
return {
|
|
245
|
+
"path": str(exp_dir),
|
|
246
|
+
"format": _detect_checkpoint_format(model_files),
|
|
247
|
+
"n_files": len(model_files),
|
|
248
|
+
"size_bytes": total_size,
|
|
249
|
+
"size_mb": round(total_size / (1024 * 1024), 2),
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
# Check for single file checkpoints
|
|
253
|
+
for ext in (".joblib", ".pkl", ".pt", ".pth", ".h5", ".xgb", ".lgb", ".cbm"):
|
|
254
|
+
candidate = ckpt_path / f"{exp_id}{ext}"
|
|
255
|
+
if candidate.exists():
|
|
256
|
+
return {
|
|
257
|
+
"path": str(candidate),
|
|
258
|
+
"format": ext.lstrip("."),
|
|
259
|
+
"n_files": 1,
|
|
260
|
+
"size_bytes": candidate.stat().st_size,
|
|
261
|
+
"size_mb": round(candidate.stat().st_size / (1024 * 1024), 2),
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
return None
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def _detect_checkpoint_format(files: list[Path]) -> str:
|
|
268
|
+
"""Detect the format of checkpoint files."""
|
|
269
|
+
extensions = {f.suffix.lower() for f in files}
|
|
270
|
+
if ".pt" in extensions or ".pth" in extensions:
|
|
271
|
+
return "pytorch"
|
|
272
|
+
if ".h5" in extensions:
|
|
273
|
+
return "keras"
|
|
274
|
+
if ".joblib" in extensions:
|
|
275
|
+
return "joblib"
|
|
276
|
+
if ".pkl" in extensions:
|
|
277
|
+
return "pickle"
|
|
278
|
+
if ".xgb" in extensions:
|
|
279
|
+
return "xgboost"
|
|
280
|
+
if ".lgb" in extensions:
|
|
281
|
+
return "lightgbm"
|
|
282
|
+
return "unknown"
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
# --- Full Warm-Start Pipeline ---
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
def warm_start(
|
|
289
|
+
exp_id: str,
|
|
290
|
+
freeze_layers: list[str] | None = None,
|
|
291
|
+
unfreeze_after: int | None = None,
|
|
292
|
+
lr_factor: float = DEFAULT_LR_FACTOR,
|
|
293
|
+
config_path: str = "config.yaml",
|
|
294
|
+
log_path: str = DEFAULT_LOG_PATH,
|
|
295
|
+
checkpoint_dir: str = DEFAULT_CHECKPOINT_DIR,
|
|
296
|
+
) -> dict:
|
|
297
|
+
"""Plan and prepare a warm-start from a prior experiment.
|
|
298
|
+
|
|
299
|
+
Args:
|
|
300
|
+
exp_id: Source experiment ID.
|
|
301
|
+
freeze_layers: Layers to freeze (neural only).
|
|
302
|
+
unfreeze_after: Unfreeze after N epochs (neural only).
|
|
303
|
+
lr_factor: Learning rate reduction factor.
|
|
304
|
+
config_path: Path to config.yaml.
|
|
305
|
+
log_path: Path to experiment log.
|
|
306
|
+
checkpoint_dir: Checkpoint directory.
|
|
307
|
+
|
|
308
|
+
Returns:
|
|
309
|
+
Complete warm-start report.
|
|
310
|
+
"""
|
|
311
|
+
experiments = load_experiments(log_path)
|
|
312
|
+
|
|
313
|
+
source = None
|
|
314
|
+
for exp in experiments:
|
|
315
|
+
if exp.get("experiment_id") == exp_id:
|
|
316
|
+
source = exp
|
|
317
|
+
break
|
|
318
|
+
|
|
319
|
+
if not source:
|
|
320
|
+
return {"error": f"Experiment {exp_id} not found in {log_path}"}
|
|
321
|
+
|
|
322
|
+
# Find checkpoint
|
|
323
|
+
checkpoint = find_checkpoint(exp_id, checkpoint_dir)
|
|
324
|
+
|
|
325
|
+
# Plan warm-start
|
|
326
|
+
plan = plan_warm_start(source, freeze_layers, unfreeze_after, lr_factor)
|
|
327
|
+
|
|
328
|
+
# Source experiment info
|
|
329
|
+
source_metrics = source.get("metrics", {})
|
|
330
|
+
|
|
331
|
+
report = {
|
|
332
|
+
"source_experiment": exp_id,
|
|
333
|
+
"generated_at": datetime.now(timezone.utc).isoformat(),
|
|
334
|
+
"source_metrics": source_metrics,
|
|
335
|
+
"checkpoint": checkpoint,
|
|
336
|
+
"plan": plan,
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
if not checkpoint:
|
|
340
|
+
report["warning"] = (
|
|
341
|
+
f"No checkpoint found for {exp_id} in {checkpoint_dir}. "
|
|
342
|
+
"The warm-start plan is ready but requires a saved checkpoint to execute."
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
return report
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
# --- Report Formatting ---
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
def save_warm_start_report(report: dict, output_dir: str = "experiments/warm_starts") -> Path:
|
|
352
|
+
"""Save warm-start report to YAML."""
|
|
353
|
+
out_path = Path(output_dir)
|
|
354
|
+
out_path.mkdir(parents=True, exist_ok=True)
|
|
355
|
+
|
|
356
|
+
exp_id = report.get("source_experiment", "unknown")
|
|
357
|
+
filepath = out_path / f"warm-{exp_id}.yaml"
|
|
358
|
+
|
|
359
|
+
with open(filepath, "w") as f:
|
|
360
|
+
yaml.dump(report, f, default_flow_style=False, sort_keys=False)
|
|
361
|
+
|
|
362
|
+
return filepath
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
def format_warm_start_report(report: dict) -> str:
|
|
366
|
+
"""Format warm-start report as markdown."""
|
|
367
|
+
if "error" in report:
|
|
368
|
+
return f"ERROR: {report['error']}"
|
|
369
|
+
|
|
370
|
+
plan = report.get("plan", {})
|
|
371
|
+
exp_id = report.get("source_experiment", "?")
|
|
372
|
+
|
|
373
|
+
lines = [
|
|
374
|
+
f"# Warm-Start Plan: {exp_id}",
|
|
375
|
+
"",
|
|
376
|
+
f"*Generated {report.get('generated_at', 'N/A')[:19]}*",
|
|
377
|
+
"",
|
|
378
|
+
f"**Model:** {plan.get('model_type', '?')} ({plan.get('model_category', '?')})",
|
|
379
|
+
f"**Strategy:** {plan.get('strategy', '?')}",
|
|
380
|
+
"",
|
|
381
|
+
]
|
|
382
|
+
|
|
383
|
+
# Source metrics
|
|
384
|
+
metrics = report.get("source_metrics", {})
|
|
385
|
+
if metrics:
|
|
386
|
+
lines.extend(["## Source Experiment Metrics", ""])
|
|
387
|
+
for k, v in metrics.items():
|
|
388
|
+
v_str = f"{v:.4f}" if isinstance(v, float) else str(v)
|
|
389
|
+
lines.append(f"- **{k}:** {v_str}")
|
|
390
|
+
lines.append("")
|
|
391
|
+
|
|
392
|
+
# Checkpoint info
|
|
393
|
+
checkpoint = report.get("checkpoint")
|
|
394
|
+
if checkpoint:
|
|
395
|
+
lines.extend([
|
|
396
|
+
"## Checkpoint",
|
|
397
|
+
"",
|
|
398
|
+
f"- **Path:** {checkpoint['path']}",
|
|
399
|
+
f"- **Format:** {checkpoint['format']}",
|
|
400
|
+
f"- **Size:** {checkpoint.get('size_mb', 0):.1f} MB",
|
|
401
|
+
"",
|
|
402
|
+
])
|
|
403
|
+
elif report.get("warning"):
|
|
404
|
+
lines.extend(["## Checkpoint", "", f"WARNING: {report['warning']}", ""])
|
|
405
|
+
|
|
406
|
+
# Instructions
|
|
407
|
+
instructions = plan.get("instructions", [])
|
|
408
|
+
if instructions:
|
|
409
|
+
lines.extend(["## Steps", ""])
|
|
410
|
+
for i, inst in enumerate(instructions, 1):
|
|
411
|
+
lines.append(f"{i}. {inst}")
|
|
412
|
+
lines.append("")
|
|
413
|
+
|
|
414
|
+
# Config changes
|
|
415
|
+
changes = plan.get("config_changes", {})
|
|
416
|
+
if changes:
|
|
417
|
+
lines.extend(["## Config Changes", ""])
|
|
418
|
+
for k, v in changes.items():
|
|
419
|
+
lines.append(f"- `{k}`: {v}")
|
|
420
|
+
lines.append("")
|
|
421
|
+
|
|
422
|
+
# Warnings
|
|
423
|
+
warnings = plan.get("warnings", [])
|
|
424
|
+
if warnings:
|
|
425
|
+
lines.extend(["## Warnings", ""])
|
|
426
|
+
for w in warnings:
|
|
427
|
+
lines.append(f"- {w}")
|
|
428
|
+
lines.append("")
|
|
429
|
+
|
|
430
|
+
return "\n".join(lines)
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
def main() -> None:
|
|
434
|
+
"""CLI entry point."""
|
|
435
|
+
parser = argparse.ArgumentParser(
|
|
436
|
+
description="Warm-start from prior model checkpoint",
|
|
437
|
+
)
|
|
438
|
+
parser.add_argument(
|
|
439
|
+
"exp_id",
|
|
440
|
+
help="Source experiment ID (e.g., exp-042)",
|
|
441
|
+
)
|
|
442
|
+
parser.add_argument(
|
|
443
|
+
"--freeze-layers", nargs="+",
|
|
444
|
+
help="Layer names to freeze (neural networks only)",
|
|
445
|
+
)
|
|
446
|
+
parser.add_argument(
|
|
447
|
+
"--unfreeze-after", type=int,
|
|
448
|
+
help="Unfreeze all layers after N epochs (gradual unfreezing)",
|
|
449
|
+
)
|
|
450
|
+
parser.add_argument(
|
|
451
|
+
"--lr-factor", type=float, default=DEFAULT_LR_FACTOR,
|
|
452
|
+
help=f"Learning rate reduction factor (default: {DEFAULT_LR_FACTOR})",
|
|
453
|
+
)
|
|
454
|
+
parser.add_argument(
|
|
455
|
+
"--config", default="config.yaml",
|
|
456
|
+
help="Path to config.yaml",
|
|
457
|
+
)
|
|
458
|
+
parser.add_argument(
|
|
459
|
+
"--log", default=DEFAULT_LOG_PATH,
|
|
460
|
+
help="Path to experiment log",
|
|
461
|
+
)
|
|
462
|
+
parser.add_argument(
|
|
463
|
+
"--checkpoint-dir", default=DEFAULT_CHECKPOINT_DIR,
|
|
464
|
+
help=f"Checkpoint directory (default: {DEFAULT_CHECKPOINT_DIR})",
|
|
465
|
+
)
|
|
466
|
+
parser.add_argument(
|
|
467
|
+
"--json", action="store_true",
|
|
468
|
+
help="Output raw JSON instead of formatted report",
|
|
469
|
+
)
|
|
470
|
+
args = parser.parse_args()
|
|
471
|
+
|
|
472
|
+
report = warm_start(
|
|
473
|
+
exp_id=args.exp_id,
|
|
474
|
+
freeze_layers=args.freeze_layers,
|
|
475
|
+
unfreeze_after=args.unfreeze_after,
|
|
476
|
+
lr_factor=args.lr_factor,
|
|
477
|
+
config_path=args.config,
|
|
478
|
+
log_path=args.log,
|
|
479
|
+
checkpoint_dir=args.checkpoint_dir,
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
if "error" not in report:
|
|
483
|
+
filepath = save_warm_start_report(report)
|
|
484
|
+
print(f"Saved to {filepath}", file=sys.stderr)
|
|
485
|
+
|
|
486
|
+
if args.json:
|
|
487
|
+
print(json.dumps(report, indent=2, default=str))
|
|
488
|
+
else:
|
|
489
|
+
print(format_warm_start_report(report))
|
|
490
|
+
|
|
491
|
+
|
|
492
|
+
if __name__ == "__main__":
|
|
493
|
+
main()
|