claude-turing 4.6.0 → 4.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (255) hide show
  1. package/.claude-plugin/plugin.json +2 -2
  2. package/README.md +1 -1
  3. package/package.json +9 -3
  4. package/skills/turing/SKILL.md +180 -0
  5. package/skills/turing/ablate/SKILL.md +47 -0
  6. package/skills/turing/annotate/SKILL.md +23 -0
  7. package/skills/turing/archive/SKILL.md +23 -0
  8. package/skills/turing/audit/SKILL.md +56 -0
  9. package/skills/turing/baseline/SKILL.md +45 -0
  10. package/skills/turing/brief/SKILL.md +95 -0
  11. package/skills/turing/budget/SKILL.md +52 -0
  12. package/skills/turing/calibrate/SKILL.md +47 -0
  13. package/skills/turing/card/SKILL.md +36 -0
  14. package/skills/turing/changelog/SKILL.md +22 -0
  15. package/skills/turing/checkpoint/SKILL.md +47 -0
  16. package/skills/turing/cite/SKILL.md +23 -0
  17. package/skills/turing/compare/SKILL.md +24 -0
  18. package/skills/turing/counterfactual/SKILL.md +27 -0
  19. package/skills/turing/curriculum/SKILL.md +43 -0
  20. package/skills/turing/design/SKILL.md +97 -0
  21. package/skills/turing/diagnose/SKILL.md +52 -0
  22. package/skills/turing/diff/SKILL.md +48 -0
  23. package/skills/turing/distill/SKILL.md +56 -0
  24. package/skills/turing/doctor/SKILL.md +31 -0
  25. package/skills/turing/ensemble/SKILL.md +54 -0
  26. package/skills/turing/explore/SKILL.md +107 -0
  27. package/skills/turing/export/SKILL.md +48 -0
  28. package/skills/turing/feature/SKILL.md +42 -0
  29. package/skills/turing/flashback/SKILL.md +22 -0
  30. package/skills/turing/fork/SKILL.md +40 -0
  31. package/skills/turing/frontier/SKILL.md +45 -0
  32. package/skills/turing/init/SKILL.md +154 -0
  33. package/skills/turing/leak/SKILL.md +47 -0
  34. package/skills/turing/lit/SKILL.md +47 -0
  35. package/skills/turing/logbook/SKILL.md +51 -0
  36. package/skills/turing/merge/SKILL.md +24 -0
  37. package/skills/turing/mode/SKILL.md +43 -0
  38. package/skills/turing/onboard/SKILL.md +20 -0
  39. package/skills/turing/paper/SKILL.md +44 -0
  40. package/skills/turing/plan/SKILL.md +27 -0
  41. package/skills/turing/poster/SKILL.md +89 -0
  42. package/skills/turing/postmortem/SKILL.md +28 -0
  43. package/skills/turing/preflight/SKILL.md +75 -0
  44. package/skills/turing/present/SKILL.md +23 -0
  45. package/skills/turing/profile/SKILL.md +43 -0
  46. package/skills/turing/prune/SKILL.md +26 -0
  47. package/skills/turing/quantize/SKILL.md +24 -0
  48. package/skills/turing/queue/SKILL.md +48 -0
  49. package/skills/turing/registry/SKILL.md +31 -0
  50. package/skills/turing/regress/SKILL.md +53 -0
  51. package/skills/turing/replay/SKILL.md +23 -0
  52. package/skills/turing/report/SKILL.md +97 -0
  53. package/skills/turing/reproduce/SKILL.md +48 -0
  54. package/skills/turing/retry/SKILL.md +41 -0
  55. package/skills/turing/review/SKILL.md +20 -0
  56. package/skills/turing/rules/loop-protocol.md +91 -0
  57. package/skills/turing/sanity/SKILL.md +48 -0
  58. package/skills/turing/scale/SKILL.md +55 -0
  59. package/skills/turing/search/SKILL.md +22 -0
  60. package/skills/turing/seed/SKILL.md +47 -0
  61. package/skills/turing/sensitivity/SKILL.md +41 -0
  62. package/skills/turing/share/SKILL.md +20 -0
  63. package/skills/turing/simulate/SKILL.md +28 -0
  64. package/skills/turing/status/SKILL.md +24 -0
  65. package/skills/turing/stitch/SKILL.md +49 -0
  66. package/skills/turing/suggest/SKILL.md +159 -0
  67. package/skills/turing/surgery/SKILL.md +27 -0
  68. package/skills/turing/sweep/SKILL.md +45 -0
  69. package/skills/turing/template/SKILL.md +22 -0
  70. package/skills/turing/train/SKILL.md +75 -0
  71. package/skills/turing/transfer/SKILL.md +54 -0
  72. package/skills/turing/trend/SKILL.md +21 -0
  73. package/skills/turing/try/SKILL.md +63 -0
  74. package/skills/turing/update/SKILL.md +27 -0
  75. package/skills/turing/validate/SKILL.md +34 -0
  76. package/skills/turing/warm/SKILL.md +53 -0
  77. package/skills/turing/watch/SKILL.md +60 -0
  78. package/skills/turing/whatif/SKILL.md +31 -0
  79. package/skills/turing/xray/SKILL.md +43 -0
  80. package/src/command-registry.js +9 -0
  81. package/src/sync-skills-layout.js +149 -0
  82. package/templates/__pycache__/evaluate.cpython-312.pyc +0 -0
  83. package/templates/__pycache__/evaluate.cpython-314.pyc +0 -0
  84. package/templates/__pycache__/prepare.cpython-312.pyc +0 -0
  85. package/templates/__pycache__/prepare.cpython-314.pyc +0 -0
  86. package/templates/features/__pycache__/__init__.cpython-312.pyc +0 -0
  87. package/templates/features/__pycache__/__init__.cpython-314.pyc +0 -0
  88. package/templates/features/__pycache__/featurizers.cpython-312.pyc +0 -0
  89. package/templates/features/__pycache__/featurizers.cpython-314.pyc +0 -0
  90. package/templates/scripts/__pycache__/__init__.cpython-312.pyc +0 -0
  91. package/templates/scripts/__pycache__/__init__.cpython-314.pyc +0 -0
  92. package/templates/scripts/__pycache__/ablation_study.cpython-312.pyc +0 -0
  93. package/templates/scripts/__pycache__/ablation_study.cpython-314.pyc +0 -0
  94. package/templates/scripts/__pycache__/architecture_surgery.cpython-312.pyc +0 -0
  95. package/templates/scripts/__pycache__/architecture_surgery.cpython-314.pyc +0 -0
  96. package/templates/scripts/__pycache__/budget_manager.cpython-312.pyc +0 -0
  97. package/templates/scripts/__pycache__/budget_manager.cpython-314.pyc +0 -0
  98. package/templates/scripts/__pycache__/build_ensemble.cpython-312.pyc +0 -0
  99. package/templates/scripts/__pycache__/build_ensemble.cpython-314.pyc +0 -0
  100. package/templates/scripts/__pycache__/calibration.cpython-312.pyc +0 -0
  101. package/templates/scripts/__pycache__/calibration.cpython-314.pyc +0 -0
  102. package/templates/scripts/__pycache__/check_convergence.cpython-312.pyc +0 -0
  103. package/templates/scripts/__pycache__/check_convergence.cpython-314.pyc +0 -0
  104. package/templates/scripts/__pycache__/checkpoint_manager.cpython-312.pyc +0 -0
  105. package/templates/scripts/__pycache__/checkpoint_manager.cpython-314.pyc +0 -0
  106. package/templates/scripts/__pycache__/citation_manager.cpython-312.pyc +0 -0
  107. package/templates/scripts/__pycache__/citation_manager.cpython-314.pyc +0 -0
  108. package/templates/scripts/__pycache__/cost_frontier.cpython-312.pyc +0 -0
  109. package/templates/scripts/__pycache__/cost_frontier.cpython-314.pyc +0 -0
  110. package/templates/scripts/__pycache__/counterfactual_explanation.cpython-312.pyc +0 -0
  111. package/templates/scripts/__pycache__/counterfactual_explanation.cpython-314.pyc +0 -0
  112. package/templates/scripts/__pycache__/critique_hypothesis.cpython-312.pyc +0 -0
  113. package/templates/scripts/__pycache__/critique_hypothesis.cpython-314.pyc +0 -0
  114. package/templates/scripts/__pycache__/curriculum_optimizer.cpython-312.pyc +0 -0
  115. package/templates/scripts/__pycache__/curriculum_optimizer.cpython-314.pyc +0 -0
  116. package/templates/scripts/__pycache__/diagnose_errors.cpython-312.pyc +0 -0
  117. package/templates/scripts/__pycache__/diagnose_errors.cpython-314.pyc +0 -0
  118. package/templates/scripts/__pycache__/draft_paper_sections.cpython-312.pyc +0 -0
  119. package/templates/scripts/__pycache__/draft_paper_sections.cpython-314.pyc +0 -0
  120. package/templates/scripts/__pycache__/equivalence_checker.cpython-312.pyc +0 -0
  121. package/templates/scripts/__pycache__/equivalence_checker.cpython-314.pyc +0 -0
  122. package/templates/scripts/__pycache__/experiment_annotations.cpython-312.pyc +0 -0
  123. package/templates/scripts/__pycache__/experiment_annotations.cpython-314.pyc +0 -0
  124. package/templates/scripts/__pycache__/experiment_archive.cpython-312.pyc +0 -0
  125. package/templates/scripts/__pycache__/experiment_archive.cpython-314.pyc +0 -0
  126. package/templates/scripts/__pycache__/experiment_diff.cpython-312.pyc +0 -0
  127. package/templates/scripts/__pycache__/experiment_diff.cpython-314.pyc +0 -0
  128. package/templates/scripts/__pycache__/experiment_index.cpython-312.pyc +0 -0
  129. package/templates/scripts/__pycache__/experiment_index.cpython-314.pyc +0 -0
  130. package/templates/scripts/__pycache__/experiment_queue.cpython-312.pyc +0 -0
  131. package/templates/scripts/__pycache__/experiment_queue.cpython-314.pyc +0 -0
  132. package/templates/scripts/__pycache__/experiment_replay.cpython-312.pyc +0 -0
  133. package/templates/scripts/__pycache__/experiment_replay.cpython-314.pyc +0 -0
  134. package/templates/scripts/__pycache__/experiment_search.cpython-312.pyc +0 -0
  135. package/templates/scripts/__pycache__/experiment_search.cpython-314.pyc +0 -0
  136. package/templates/scripts/__pycache__/experiment_simulator.cpython-312.pyc +0 -0
  137. package/templates/scripts/__pycache__/experiment_simulator.cpython-314.pyc +0 -0
  138. package/templates/scripts/__pycache__/experiment_templates.cpython-312.pyc +0 -0
  139. package/templates/scripts/__pycache__/experiment_templates.cpython-314.pyc +0 -0
  140. package/templates/scripts/__pycache__/export_card.cpython-312.pyc +0 -0
  141. package/templates/scripts/__pycache__/export_card.cpython-314.pyc +0 -0
  142. package/templates/scripts/__pycache__/export_formats.cpython-312.pyc +0 -0
  143. package/templates/scripts/__pycache__/export_formats.cpython-314.pyc +0 -0
  144. package/templates/scripts/__pycache__/failure_postmortem.cpython-312.pyc +0 -0
  145. package/templates/scripts/__pycache__/failure_postmortem.cpython-314.pyc +0 -0
  146. package/templates/scripts/__pycache__/feature_intelligence.cpython-312.pyc +0 -0
  147. package/templates/scripts/__pycache__/feature_intelligence.cpython-314.pyc +0 -0
  148. package/templates/scripts/__pycache__/fork_experiment.cpython-312.pyc +0 -0
  149. package/templates/scripts/__pycache__/fork_experiment.cpython-314.pyc +0 -0
  150. package/templates/scripts/__pycache__/generate_baselines.cpython-312.pyc +0 -0
  151. package/templates/scripts/__pycache__/generate_baselines.cpython-314.pyc +0 -0
  152. package/templates/scripts/__pycache__/generate_brief.cpython-312.pyc +0 -0
  153. package/templates/scripts/__pycache__/generate_brief.cpython-314.pyc +0 -0
  154. package/templates/scripts/__pycache__/generate_changelog.cpython-312.pyc +0 -0
  155. package/templates/scripts/__pycache__/generate_changelog.cpython-314.pyc +0 -0
  156. package/templates/scripts/__pycache__/generate_figures.cpython-312.pyc +0 -0
  157. package/templates/scripts/__pycache__/generate_figures.cpython-314.pyc +0 -0
  158. package/templates/scripts/__pycache__/generate_logbook.cpython-312.pyc +0 -0
  159. package/templates/scripts/__pycache__/generate_logbook.cpython-314.pyc +0 -0
  160. package/templates/scripts/__pycache__/generate_model_card.cpython-312.pyc +0 -0
  161. package/templates/scripts/__pycache__/generate_model_card.cpython-314.pyc +0 -0
  162. package/templates/scripts/__pycache__/generate_onboarding.cpython-312.pyc +0 -0
  163. package/templates/scripts/__pycache__/generate_onboarding.cpython-314.pyc +0 -0
  164. package/templates/scripts/__pycache__/harness_doctor.cpython-312.pyc +0 -0
  165. package/templates/scripts/__pycache__/harness_doctor.cpython-314.pyc +0 -0
  166. package/templates/scripts/__pycache__/incremental_update.cpython-312.pyc +0 -0
  167. package/templates/scripts/__pycache__/incremental_update.cpython-314.pyc +0 -0
  168. package/templates/scripts/__pycache__/knowledge_transfer.cpython-312.pyc +0 -0
  169. package/templates/scripts/__pycache__/knowledge_transfer.cpython-314.pyc +0 -0
  170. package/templates/scripts/__pycache__/latency_benchmark.cpython-312.pyc +0 -0
  171. package/templates/scripts/__pycache__/latency_benchmark.cpython-314.pyc +0 -0
  172. package/templates/scripts/__pycache__/leakage_detector.cpython-312.pyc +0 -0
  173. package/templates/scripts/__pycache__/leakage_detector.cpython-314.pyc +0 -0
  174. package/templates/scripts/__pycache__/literature_search.cpython-312.pyc +0 -0
  175. package/templates/scripts/__pycache__/literature_search.cpython-314.pyc +0 -0
  176. package/templates/scripts/__pycache__/log_experiment.cpython-312.pyc +0 -0
  177. package/templates/scripts/__pycache__/log_experiment.cpython-314.pyc +0 -0
  178. package/templates/scripts/__pycache__/manage_hypotheses.cpython-312.pyc +0 -0
  179. package/templates/scripts/__pycache__/manage_hypotheses.cpython-314.pyc +0 -0
  180. package/templates/scripts/__pycache__/methodology_audit.cpython-312.pyc +0 -0
  181. package/templates/scripts/__pycache__/methodology_audit.cpython-314.pyc +0 -0
  182. package/templates/scripts/__pycache__/model_distiller.cpython-312.pyc +0 -0
  183. package/templates/scripts/__pycache__/model_distiller.cpython-314.pyc +0 -0
  184. package/templates/scripts/__pycache__/model_lifecycle.cpython-312.pyc +0 -0
  185. package/templates/scripts/__pycache__/model_lifecycle.cpython-314.pyc +0 -0
  186. package/templates/scripts/__pycache__/model_merger.cpython-312.pyc +0 -0
  187. package/templates/scripts/__pycache__/model_merger.cpython-314.pyc +0 -0
  188. package/templates/scripts/__pycache__/model_pruning.cpython-312.pyc +0 -0
  189. package/templates/scripts/__pycache__/model_pruning.cpython-314.pyc +0 -0
  190. package/templates/scripts/__pycache__/model_quantization.cpython-312.pyc +0 -0
  191. package/templates/scripts/__pycache__/model_quantization.cpython-314.pyc +0 -0
  192. package/templates/scripts/__pycache__/model_xray.cpython-312.pyc +0 -0
  193. package/templates/scripts/__pycache__/model_xray.cpython-314.pyc +0 -0
  194. package/templates/scripts/__pycache__/novelty_guard.cpython-312.pyc +0 -0
  195. package/templates/scripts/__pycache__/novelty_guard.cpython-314.pyc +0 -0
  196. package/templates/scripts/__pycache__/package_experiments.cpython-312.pyc +0 -0
  197. package/templates/scripts/__pycache__/package_experiments.cpython-314.pyc +0 -0
  198. package/templates/scripts/__pycache__/pareto_frontier.cpython-312.pyc +0 -0
  199. package/templates/scripts/__pycache__/pareto_frontier.cpython-314.pyc +0 -0
  200. package/templates/scripts/__pycache__/parse_metrics.cpython-312.pyc +0 -0
  201. package/templates/scripts/__pycache__/parse_metrics.cpython-314.pyc +0 -0
  202. package/templates/scripts/__pycache__/pipeline_manager.cpython-312.pyc +0 -0
  203. package/templates/scripts/__pycache__/pipeline_manager.cpython-314.pyc +0 -0
  204. package/templates/scripts/__pycache__/profile_training.cpython-312.pyc +0 -0
  205. package/templates/scripts/__pycache__/profile_training.cpython-314.pyc +0 -0
  206. package/templates/scripts/__pycache__/regression_gate.cpython-312.pyc +0 -0
  207. package/templates/scripts/__pycache__/regression_gate.cpython-314.pyc +0 -0
  208. package/templates/scripts/__pycache__/reproduce_experiment.cpython-312.pyc +0 -0
  209. package/templates/scripts/__pycache__/reproduce_experiment.cpython-314.pyc +0 -0
  210. package/templates/scripts/__pycache__/research_planner.cpython-312.pyc +0 -0
  211. package/templates/scripts/__pycache__/research_planner.cpython-314.pyc +0 -0
  212. package/templates/scripts/__pycache__/sanity_checks.cpython-312.pyc +0 -0
  213. package/templates/scripts/__pycache__/sanity_checks.cpython-314.pyc +0 -0
  214. package/templates/scripts/__pycache__/scaffold.cpython-312.pyc +0 -0
  215. package/templates/scripts/__pycache__/scaffold.cpython-314.pyc +0 -0
  216. package/templates/scripts/__pycache__/scaling_estimator.cpython-312.pyc +0 -0
  217. package/templates/scripts/__pycache__/scaling_estimator.cpython-314.pyc +0 -0
  218. package/templates/scripts/__pycache__/seed_runner.cpython-312.pyc +0 -0
  219. package/templates/scripts/__pycache__/seed_runner.cpython-314.pyc +0 -0
  220. package/templates/scripts/__pycache__/sensitivity_analysis.cpython-312.pyc +0 -0
  221. package/templates/scripts/__pycache__/sensitivity_analysis.cpython-314.pyc +0 -0
  222. package/templates/scripts/__pycache__/session_flashback.cpython-312.pyc +0 -0
  223. package/templates/scripts/__pycache__/session_flashback.cpython-314.pyc +0 -0
  224. package/templates/scripts/__pycache__/show_experiment_tree.cpython-312.pyc +0 -0
  225. package/templates/scripts/__pycache__/show_experiment_tree.cpython-314.pyc +0 -0
  226. package/templates/scripts/__pycache__/show_families.cpython-312.pyc +0 -0
  227. package/templates/scripts/__pycache__/show_families.cpython-314.pyc +0 -0
  228. package/templates/scripts/__pycache__/simulate_review.cpython-312.pyc +0 -0
  229. package/templates/scripts/__pycache__/simulate_review.cpython-314.pyc +0 -0
  230. package/templates/scripts/__pycache__/smart_retry.cpython-312.pyc +0 -0
  231. package/templates/scripts/__pycache__/smart_retry.cpython-314.pyc +0 -0
  232. package/templates/scripts/__pycache__/statistical_compare.cpython-312.pyc +0 -0
  233. package/templates/scripts/__pycache__/statistical_compare.cpython-314.pyc +0 -0
  234. package/templates/scripts/__pycache__/suggest_next.cpython-312.pyc +0 -0
  235. package/templates/scripts/__pycache__/suggest_next.cpython-314.pyc +0 -0
  236. package/templates/scripts/__pycache__/sweep.cpython-312.pyc +0 -0
  237. package/templates/scripts/__pycache__/sweep.cpython-314.pyc +0 -0
  238. package/templates/scripts/__pycache__/synthesize_decision.cpython-312.pyc +0 -0
  239. package/templates/scripts/__pycache__/synthesize_decision.cpython-314.pyc +0 -0
  240. package/templates/scripts/__pycache__/training_monitor.cpython-312.pyc +0 -0
  241. package/templates/scripts/__pycache__/training_monitor.cpython-314.pyc +0 -0
  242. package/templates/scripts/__pycache__/treequest_suggest.cpython-312.pyc +0 -0
  243. package/templates/scripts/__pycache__/treequest_suggest.cpython-314.pyc +0 -0
  244. package/templates/scripts/__pycache__/trend_analysis.cpython-312.pyc +0 -0
  245. package/templates/scripts/__pycache__/trend_analysis.cpython-314.pyc +0 -0
  246. package/templates/scripts/__pycache__/turing_io.cpython-312.pyc +0 -0
  247. package/templates/scripts/__pycache__/turing_io.cpython-314.pyc +0 -0
  248. package/templates/scripts/__pycache__/update_state.cpython-312.pyc +0 -0
  249. package/templates/scripts/__pycache__/update_state.cpython-314.pyc +0 -0
  250. package/templates/scripts/__pycache__/verify_placeholders.cpython-312.pyc +0 -0
  251. package/templates/scripts/__pycache__/verify_placeholders.cpython-314.pyc +0 -0
  252. package/templates/scripts/__pycache__/warm_start.cpython-312.pyc +0 -0
  253. package/templates/scripts/__pycache__/warm_start.cpython-314.pyc +0 -0
  254. package/templates/scripts/__pycache__/whatif_engine.cpython-312.pyc +0 -0
  255. package/templates/scripts/__pycache__/whatif_engine.cpython-314.pyc +0 -0
@@ -0,0 +1,91 @@
1
+ # Autoresearch Loop Protocol Rules
2
+
3
+ These rules govern the autonomous ML experiment loop. They are non-negotiable safety constraints that preserve the integrity of the experimental process.
4
+
5
+ ## The Fundamental Separation
6
+
7
+ The autoresearch harness enforces a strict separation between the **hypothesis space** (what the agent can change) and the **measurement apparatus** (how results are evaluated). This separation is the architectural invariant that makes autonomous experimentation trustworthy.
8
+
9
+ | Layer | Files | Agent Access | Rationale |
10
+ |-------|-------|-------------|-----------|
11
+ | Hidden | `evaluate.py` | NONE — do not read, write, or reference | Reading evaluation code enables seed exploitation and metric gaming |
12
+ | Measurement | `prepare.py` | READ-ONLY | Data loading is visible but immutable |
13
+ | Hypothesis | `train.py` | READ-WRITE | All experimental changes go here |
14
+ | Configuration | `config.yaml` | READ-WRITE | Hyperparameter changes without code changes |
15
+ | Features | `features/featurizers.py` | READ-ONLY | Modify how `train.py` *uses* featurizers instead |
16
+
17
+ ## Execution Rules
18
+
19
+ - **ALWAYS redirect training output:** `python train.py > run.log 2>&1`
20
+ - **ALWAYS parse metrics with grep** between `---` delimiters: `grep -A 10 "^---" run.log | head -10`
21
+ - **ALWAYS activate the venv first:** `source .venv/bin/activate`
22
+ - **NEVER install new packages** without human approval
23
+
24
+ ## Git Discipline
25
+
26
+ ### Per-Experiment Branches (preferred)
27
+
28
+ - **Create branch before each experiment:** `git checkout -b exp/{NNN}-{short-description}`
29
+ - **Commit changes on the branch:** `git commit -am "exp: {description}"`
30
+ - **Run the experiment on the branch**
31
+ - **If improved:** `git checkout main && git merge exp/{NNN}-{short-description}`. Copy model to `models/best/`.
32
+ - **If NOT improved:** `git checkout main`. Branch preserved for comparison.
33
+ - **Keep all experiment branches** — they preserve code variants for later analysis.
34
+
35
+ ### Fallback: Commit/Revert (mid-sweep)
36
+
37
+ - **ALWAYS commit before running:** `git commit -am "exp: {description}"`
38
+ - **If improved:** keep commit, copy model to `models/best/`
39
+ - **If NOT improved:** `git reset --hard HEAD~1`
40
+
41
+ ## Sweep Workflow
42
+
43
+ 1. Generate queue: `python scripts/sweep.py`
44
+ 2. Check status: `python scripts/sweep.py --status`
45
+ 3. Get next: `python scripts/sweep.py --next`
46
+ 4. Apply overrides, create branch, run training
47
+ 5. Mark: `python scripts/sweep.py --mark <name> complete|failed`
48
+ 6. Repeat until queue is empty
49
+
50
+ ## Logging Rules
51
+
52
+ - **Log every experiment** to `experiments/log.jsonl` via `python scripts/log_experiment.py` — kept and discarded alike.
53
+ - **Include all metrics, config, and description** of the hypothesis and its outcome.
54
+
55
+ ## Convergence Rules
56
+
57
+ - **N consecutive non-improvements** (from `config.yaml` `convergence.patience`) with less than threshold relative gain = STOP.
58
+ - **max_iterations** (if provided) overrides convergence.
59
+ - **Always report** final best model, metrics, and recommended next steps when stopping.
60
+
61
+ ## Tool Restrictions
62
+
63
+ The researcher agent's Bash access is restricted to a whitelist of necessary commands:
64
+
65
+ | Allowed Pattern | Purpose |
66
+ |-----------------|---------|
67
+ | `python train.py:*` | Execute training |
68
+ | `python scripts/*:*` | Run utility scripts (logging, metrics, sweep) |
69
+ | `git:*` | Branch, commit, merge, reset operations |
70
+ | `source .venv/bin/activate:*` | Virtual environment activation |
71
+ | `pip:*` | Package installation (requires human approval) |
72
+
73
+ **Blocked by omission:** `cat`, `head`, `tail`, `less` (prevents reading hidden files via shell), `curl`, `wget` (prevents data exfiltration), arbitrary command execution.
74
+
75
+ The agent's Read tool is separately governed by the file access tiers above — hidden files are denied at the tool level.
76
+
77
+ ## Reproducibility Rules
78
+
79
+ Every experiment must be fully reproducible. The training template handles this automatically, but the agent must not subvert it:
80
+
81
+ - **NEVER use unseeded randomness.** All random state flows from `config.yaml → data.random_state`. The `pin_all_seeds()` function in `train.py` sets stdlib `random`, `numpy`, `PYTHONHASHSEED`, and `torch`/`cuda` seeds from this single source.
82
+ - **NEVER modify seeds mid-experiment.** If you need a different seed, use `--seed` flag for multi-run comparison (Phase 2.1). Do not hardcode seeds in `train.py`.
83
+ - **Environment is captured automatically.** `train_metadata.json` records python version, package versions, platform, GPU info, and a config hash. Do not modify this recording — it's used by behavioral probes.
84
+ - **Config snapshot:** The config at training time is stored inside the model artifact (`model.joblib` contains the full config dict). For any saved model, the exact configuration can be recovered.
85
+ - **If adding new dependencies** (requires human approval), note that the environment capture in `train_metadata.json` will automatically record the new package version.
86
+
87
+ ## Safety
88
+
89
+ - Do not modify files outside the ML project directory.
90
+ - Do not delete experiment logs or model archives.
91
+ - If something breaks unexpectedly, stop and report — do not auto-fix evaluation infrastructure.
@@ -0,0 +1,48 @@
1
+ ---
2
+ name: sanity
3
+ description: Pre-training sanity checks — catch broken data loaders, misconfigured losses, and dead gradients in 30 seconds before wasting hours.
4
+ disable-model-invocation: true
5
+ argument-hint: "[--quick] [--verbose]"
6
+ allowed-tools: Read, Bash(*), Grep, Glob
7
+ ---
8
+
9
+ Run a battery of fast checks before committing to a full training run. Catches wiring bugs in seconds.
10
+
11
+ ## Steps
12
+
13
+ 1. **Activate environment:**
14
+ ```bash
15
+ source .venv/bin/activate
16
+ ```
17
+
18
+ 2. **Parse arguments from `$ARGUMENTS`:**
19
+ - `--quick` — skip single-batch overfit test (fastest, ~5 seconds)
20
+ - `--verbose` — show detailed check output
21
+ - `--json` — raw JSON output
22
+
23
+ 3. **Run sanity checks:**
24
+ ```bash
25
+ python scripts/sanity_checks.py $ARGUMENTS
26
+ ```
27
+
28
+ 4. **Checks performed:**
29
+ - **Data pipeline** (critical): first batch loads, shapes match, no NaN/Inf
30
+ - **Initial loss** (high): loss at initialization matches theory (e.g., -log(1/C) for cross-entropy)
31
+ - **Gradient flow** (high): all parameters have non-zero, non-exploding gradients
32
+ - **Single-batch overfit** (critical): model can memorize 1 batch in 50 steps — if not, something is broken
33
+ - **Output validation** (high): predictions are non-NaN, non-constant, reasonable range
34
+ - **Config consistency** (medium): learning rate, batch size in reasonable ranges
35
+
36
+ 5. **Verdicts:**
37
+ - **PASS** — safe to proceed
38
+ - **PASS (with warnings)** — review before training
39
+ - **FAIL** — do not proceed, fix issues first
40
+
41
+ 6. **Saved output:** report in `experiments/sanity/sanity-*.yaml`
42
+
43
+ ## Examples
44
+
45
+ ```
46
+ /turing:sanity # Full check (~30 seconds)
47
+ /turing:sanity --quick # Skip overfit test (~5 seconds)
48
+ ```
@@ -0,0 +1,55 @@
1
+ ---
2
+ name: scale
3
+ description: Scaling law estimator — run small experiments at different sizes, fit a power law, and predict full-scale performance before committing compute.
4
+ disable-model-invocation: true
5
+ argument-hint: "[--axis data|compute|params] [--points 4] [--analyze results.yaml]"
6
+ allowed-tools: Read, Bash(*), Grep, Glob
7
+ ---
8
+
9
+ Predict full-scale performance from a handful of small experiments. Answers "is it worth training on the full dataset?" in 30 minutes instead of 3 days.
10
+
11
+ ## Steps
12
+
13
+ 1. **Activate environment:**
14
+ ```bash
15
+ source .venv/bin/activate
16
+ ```
17
+
18
+ 2. **Parse arguments from `$ARGUMENTS`:**
19
+ - `--axis data|compute|params` — scaling axis (default: data)
20
+ - `--points 4` — number of scale points (default: 4)
21
+ - `--analyze results.yaml` — analyze existing results instead of planning
22
+ - `--plot` — include ASCII scaling plot
23
+ - `--json` — raw JSON output
24
+
25
+ 3. **Plan or analyze:**
26
+ - **Plan mode (default):** generates scale point configs to run
27
+ ```bash
28
+ python scripts/scaling_estimator.py --axis data --points 4
29
+ ```
30
+ - **Analyze mode:** fits power law to completed results
31
+ ```bash
32
+ python scripts/scaling_estimator.py --analyze experiments/scaling/results.yaml
33
+ ```
34
+
35
+ 4. **Scaling axes:**
36
+ - **data:** train on 10%, 25%, 50%, 75% of dataset
37
+ - **compute:** train for 10%, 25%, 50%, 75% of max epochs
38
+ - **params:** scale model size (fewer estimators, shallower depth)
39
+
40
+ 5. **After planning:** run each scale point experiment, record results in YAML, then use `--analyze` to fit the curve
41
+
42
+ 6. **Report includes:**
43
+ - Power law fit: `metric = a × n^b` with R²
44
+ - Predictions for 100%, 150%, 200% scale
45
+ - Verdict: DIMINISHING RETURNS / MARGINAL GAINS / WORTH SCALING
46
+
47
+ 7. **Saved output:** report written to `experiments/scaling/scale-YYYY-MM-DD.yaml`
48
+
49
+ ## Examples
50
+
51
+ ```
52
+ /turing:scale # Plan: data axis, 4 points
53
+ /turing:scale --axis compute --points 3 # Plan: compute axis, 3 points
54
+ /turing:scale --analyze results.yaml --plot # Analyze with ASCII plot
55
+ ```
@@ -0,0 +1,22 @@
1
+ ---
2
+ name: search
3
+ description: Natural language experiment search — query with text + structured filters over 200+ experiments.
4
+ disable-model-invocation: true
5
+ argument-hint: "<query> [--filter \"accuracy>0.85\"] [--limit 10]"
6
+ allowed-tools: Read, Bash(*), Grep, Glob
7
+ ---
8
+
9
+ Find specific experiments in a large history with natural language and structured filters.
10
+
11
+ ## Steps
12
+ 1. **Activate environment:** `source .venv/bin/activate`
13
+ 2. **Run:** `python scripts/experiment_search.py $ARGUMENTS`
14
+ 3. **Filters:** `accuracy>0.85`, `status:kept`, `family:baseline`, `date:last-week`
15
+ 4. **Report:** ranked table of matching experiments
16
+
17
+ ## Examples
18
+ ```
19
+ /turing:search "LightGBM high accuracy" --filter "accuracy>0.85"
20
+ /turing:search "failed neural net" --filter "status:discarded"
21
+ /turing:search "last week" --limit 5
22
+ ```
@@ -0,0 +1,47 @@
1
+ ---
2
+ name: seed
3
+ description: Run multi-seed study on an experiment to compute mean/std/CI and flag seed-sensitive results. Prevents publishing lucky seeds.
4
+ disable-model-invocation: true
5
+ argument-hint: "[N] [--quick] [--exp-id <id>]"
6
+ allowed-tools: Read, Bash(*), Grep, Glob
7
+ ---
8
+
9
+ Run a multi-seed study to verify that experiment results are robust across random seeds.
10
+
11
+ ## Steps
12
+
13
+ 1. **Activate environment:**
14
+ ```bash
15
+ source .venv/bin/activate
16
+ ```
17
+
18
+ 2. **Parse arguments from `$ARGUMENTS`:**
19
+ - A bare number (e.g., `5`) sets the seed count
20
+ - `--quick` runs 3 seeds instead of 5
21
+ - `--exp-id exp-042` targets a specific experiment (defaults to best)
22
+ - `--seed-list 42,123,456` uses specific seed values
23
+
24
+ 3. **Run seed study:**
25
+ ```bash
26
+ python scripts/seed_runner.py $ARGUMENTS
27
+ ```
28
+
29
+ 4. **Report results:**
30
+ - Show the per-seed results table
31
+ - Show mean +/- std with 95% CI
32
+ - **STABLE (CV < 5%):** result is robust, safe to report
33
+ - **SEED-SENSITIVE (CV >= 5%):** result varies too much across seeds — do not report single-seed numbers
34
+ - If seed-sensitive, recommend reporting as mean +/- std over N seeds
35
+
36
+ 5. **Saved output:** results are written to `experiments/seed_studies/exp-NNN-seeds.yaml`
37
+
38
+ 6. **If no training pipeline exists:** suggest `/turing:init` first.
39
+
40
+ ## Examples
41
+
42
+ ```
43
+ /turing:seed # 5 seeds on best experiment
44
+ /turing:seed --quick # 3 seeds for fast check
45
+ /turing:seed 10 # 10 seeds for thorough study
46
+ /turing:seed --exp-id exp-042 # Specific experiment
47
+ ```
@@ -0,0 +1,41 @@
1
+ ---
2
+ name: sensitivity
3
+ description: Hyperparameter sensitivity analysis — rank parameters by impact, identify which matter and which are noise.
4
+ disable-model-invocation: true
5
+ argument-hint: "[exp-id] [--params learning_rate,max_depth]"
6
+ allowed-tools: Read, Bash(*), Grep, Glob
7
+ ---
8
+
9
+ Which hyperparameters actually matter? Stop wasting time on the ones that don't.
10
+
11
+ ## Steps
12
+
13
+ 1. **Activate environment:**
14
+ ```bash
15
+ source .venv/bin/activate
16
+ ```
17
+
18
+ 2. **Parse arguments from `$ARGUMENTS`:**
19
+ - Optional experiment ID
20
+ - `--params "learning_rate,max_depth"` — specific parameters to analyze
21
+ - `--json` — raw JSON output
22
+
23
+ 3. **Run sensitivity analysis:**
24
+ ```bash
25
+ python scripts/sensitivity_analysis.py $ARGUMENTS
26
+ ```
27
+
28
+ 4. **Report includes:**
29
+ - Per-parameter sensitivity ranking: HIGH / MED / LOW / NONE
30
+ - Metric range for each parameter sweep
31
+ - Monotonicity detection (is there a sweet spot?)
32
+ - Recommendations: focus tuning on X, stop tuning Y
33
+
34
+ 5. **Saved output:** report in `experiments/sensitivity/<exp-id>-sensitivity.yaml`
35
+
36
+ ## Examples
37
+
38
+ ```
39
+ /turing:sensitivity exp-042 # All tunable params
40
+ /turing:sensitivity --params "learning_rate,max_depth" # Specific params
41
+ ```
@@ -0,0 +1,20 @@
1
+ ---
2
+ name: share
3
+ description: Experiment packaging — portable archive with config, metrics, seed study, annotations, reproduction instructions.
4
+ disable-model-invocation: true
5
+ argument-hint: "<exp-ids...> [--include model,figures,code]"
6
+ allowed-tools: Read, Bash(*), Grep, Glob
7
+ ---
8
+
9
+ Package experiments for collaborator handoff or paper supplementary material.
10
+
11
+ ## Steps
12
+ 1. `source .venv/bin/activate`
13
+ 2. `python scripts/package_experiments.py $ARGUMENTS`
14
+ 3. **Saved:** `exports/packages/<name>/`
15
+
16
+ ## Examples
17
+ ```
18
+ /turing:share exp-089
19
+ /turing:share exp-042 exp-089 --include model,figures
20
+ ```
@@ -0,0 +1,28 @@
1
+ ---
2
+ name: simulate
3
+ description: Experiment outcome prediction — predict which configs will beat the current best before running them.
4
+ disable-model-invocation: true
5
+ argument-hint: "[--configs configs.yaml] [--top-k 5] [--threshold 0.001]"
6
+ allowed-tools: Read, Bash(*), Grep, Glob
7
+ ---
8
+
9
+ Predict outcomes before spending compute. Ranks proposed configs and recommends which to run vs skip.
10
+
11
+ ## Steps
12
+ 1. `source .venv/bin/activate`
13
+ 2. `python scripts/experiment_simulator.py $ARGUMENTS`
14
+ 3. **Saved:** `experiments/simulations/`
15
+
16
+ ## How it works
17
+ - Builds a surrogate model from experiment history (weighted k-NN)
18
+ - Predicts metric for each proposed config
19
+ - Applies novelty penalty for configs far from training distribution
20
+ - Ranks and filters: only recommend configs predicted to improve
21
+
22
+ ## Examples
23
+ ```
24
+ /turing:simulate --configs sweep_configs.yaml
25
+ /turing:simulate --configs candidates.yaml --top-k 3
26
+ /turing:simulate --configs proposals.yaml --threshold 0.005
27
+ /turing:simulate --configs sweep.yaml --json
28
+ ```
@@ -0,0 +1,24 @@
1
+ ---
2
+ name: status
3
+ description: Show current ML experiment status — best model, recent experiments, convergence state, and trend analysis. Delegates to @ml-evaluator for read-only safety.
4
+ disable-model-invocation: true
5
+ allowed-tools: Read, Bash(*), Grep, Glob
6
+ ---
7
+
8
+ Show the current state of the ML training pipeline. This is an observation-only operation — no code is modified.
9
+
10
+ ## Steps
11
+
12
+ 1. **Run metrics display:**
13
+ ```bash
14
+ source .venv/bin/activate && python scripts/show_metrics.py --last 10
15
+ ```
16
+
17
+ 2. **Summarize for the user:**
18
+ - **Best model:** type, key metrics, experiment ID
19
+ - **Total experiments:** count from the log
20
+ - **Convergence state:** consecutive non-improvements vs patience threshold
21
+ - **Trend:** improving, plateauing, or regressing?
22
+ - **Recommendation:** continue training, try a different approach, or declare convergence
23
+
24
+ 3. **If no experiments exist:** report that the pipeline is ready but untrained. Suggest `/turing:train`.
@@ -0,0 +1,49 @@
1
+ ---
2
+ name: stitch
3
+ description: Pipeline composition — decompose ML pipelines into swappable stages. Show, swap, cache, and run stages independently.
4
+ disable-model-invocation: true
5
+ argument-hint: "<show|swap|cache|run> [stage] [--from exp-id]"
6
+ allowed-tools: Read, Bash(*), Grep, Glob
7
+ ---
8
+
9
+ Decompose your ML pipeline into stages that can be independently varied, cached, and reused across experiments.
10
+
11
+ ## Steps
12
+
13
+ 1. **Activate environment:**
14
+ ```bash
15
+ source .venv/bin/activate
16
+ ```
17
+
18
+ 2. **Parse arguments from `$ARGUMENTS`:**
19
+ - First argument is the action: `show`, `swap`, `cache`, `run`
20
+ - `show` — display pipeline stages with hash and cache status
21
+ - `swap <stage> --from <exp-id>` — replace a stage with one from another experiment
22
+ - `cache` — save intermediate stage outputs to disk
23
+ - `run` — execute pipeline, skipping cached stages
24
+
25
+ 3. **Run pipeline manager:**
26
+ ```bash
27
+ python scripts/pipeline_manager.py $ARGUMENTS
28
+ ```
29
+
30
+ 4. **Report results:**
31
+ - **show:** numbered stage list with description, content hash, and cache status
32
+ - **swap:** what changed, old vs new stage config, updated pipeline
33
+ - **cache:** per-stage cache paths and status
34
+ - **run:** which stages will be skipped (cached) vs re-run
35
+
36
+ 5. **Stage types:** preprocess, features, model, postprocess (configurable in `config.yaml` under `pipeline.stages`)
37
+
38
+ 6. **Cache benefit:** when only the model stage changes, preprocessing and feature engineering are skipped — experiments run faster
39
+
40
+ 7. **If no pipeline config:** falls back to default 4-stage pipeline
41
+
42
+ ## Examples
43
+
44
+ ```
45
+ /turing:stitch show # Display pipeline stages
46
+ /turing:stitch swap model --from exp-031 # Keep features, swap model
47
+ /turing:stitch cache # Cache intermediate outputs
48
+ /turing:stitch run # Run with cached stages
49
+ ```
@@ -0,0 +1,159 @@
1
+ ---
2
+ name: suggest
3
+ description: Literature-grounded model selection. Reads the ML task context, searches recent literature, and suggests model architectures worth trying — with citations. Suggestions are auto-queued as hypotheses.
4
+ disable-model-invocation: true
5
+ argument-hint: "[task description override]"
6
+ allowed-tools: Read, Write, Bash(python scripts/*:*, source .venv/bin/activate:*), Grep, Glob, WebSearch, WebFetch
7
+ ---
8
+
9
+ Suggest model architectures for the current ML task. Supports two strategies:
10
+
11
+ - **literature** (default): Web search for recent papers, synthesize grounded suggestions with citations.
12
+ - **treequest**: Tree-search-guided hypothesis exploration using AB-MCTS over the critique scoring function. Explores refinement chains that literature search cannot find.
13
+
14
+ ## Strategy Detection
15
+
16
+ If `$ARGUMENTS` contains `--strategy treequest` or `treequest`, use the TreeQuest strategy below. Otherwise use the default literature strategy.
17
+
18
+ ## Steps (Literature Strategy — default)
19
+
20
+ ### 1. Understand the Task
21
+
22
+ Read the project config and recent experiment history to understand the task:
23
+
24
+ ```bash
25
+ cat config.yaml
26
+ ```
27
+
28
+ ```bash
29
+ source .venv/bin/activate && python scripts/show_metrics.py --last 10 2>/dev/null || echo "No experiments yet"
30
+ ```
31
+
32
+ If `$ARGUMENTS` is provided, use that as the task description. Otherwise, infer from `config.yaml` (model type, primary metric, data source, target column).
33
+
34
+ From the config and any task description, identify the key task properties:
35
+ - Data type (tabular, time series, image, text, etc.)
36
+ - Objective (classification, regression, generation, etc.)
37
+ - Special constraints (imbalanced classes, small dataset, real-time, interpretability, etc.)
38
+ - Current model family and what's been tried
39
+
40
+ ### 2. Search Literature
41
+
42
+ Use `WebSearch` to find recent papers and benchmark results. Run 3-5 searches targeting:
43
+
44
+ 1. **Model comparison for this task type:** e.g., "best models for tabular classification benchmark 2024"
45
+ 2. **Current model alternatives:** e.g., "LightGBM vs XGBoost vs CatBoost tabular data"
46
+ 3. **Task-specific techniques:** e.g., "handling class imbalance gradient boosting"
47
+
48
+ For each search, use `WebFetch` on the top 1-2 results to extract specific model recommendations, benchmark numbers, and methodology.
49
+
50
+ Focus on:
51
+ - Recent work (2023-2026) with empirical comparisons
52
+ - Benchmark studies and surveys
53
+ - arXiv papers or reputable ML blogs with concrete results
54
+
55
+ ### 3. Synthesize Suggestions
56
+
57
+ From the literature, synthesize **3-5 concrete model architecture suggestions**. Each must include:
58
+
59
+ - **Model architecture:** specific (e.g., "LightGBM with GOSS sampling", not "try a different model")
60
+ - **Why:** one-sentence rationale grounded in what the literature says
61
+ - **Citation:** paper or source that supports this
62
+ - **Expected impact:** high/medium/low based on how well it fits this task
63
+ - **Implementation hint:** what to change in `train.py` (one concrete line)
64
+
65
+ ### 4. Queue as Hypotheses
66
+
67
+ For each suggestion, add to the hypothesis queue:
68
+
69
+ ```bash
70
+ source .venv/bin/activate && python scripts/manage_hypotheses.py add "<model>: <rationale> (source: <citation>)" --priority medium --source literature
71
+ ```
72
+
73
+ ### 5. Show Results
74
+
75
+ ```
76
+ Literature-Grounded Model Suggestions
77
+ ======================================
78
+
79
+ Task: <task description>
80
+ Current: <current model> (<current metric>=<value>)
81
+ Sources consulted: <N papers/articles>
82
+
83
+ 1. [HIGH] <technique>
84
+ Why: <one-sentence rationale with citation>
85
+ Source: <URL>
86
+ Change: <specific train.py change>
87
+ → Queued as hyp-NNN
88
+
89
+ 2. [MEDIUM] ...
90
+
91
+ Queued N hypotheses. Run /turing:train to test them.
92
+ ```
93
+
94
+ ## Fallback (Literature Strategy)
95
+
96
+ If web search returns insufficient results, suggest model families from `config/taxonomy.toml` based on what hasn't been tried yet. Note that suggestions are taxonomy-based, not literature-backed, and queue with `--source taxonomy`.
97
+
98
+ ## Steps (TreeQuest Strategy)
99
+
100
+ When using `--strategy treequest`:
101
+
102
+ ### 1. Detect Project Directory
103
+
104
+ Same detection logic as the literature strategy — find `config.yaml` + `train.py`.
105
+
106
+ ### 2. Run Tree Search
107
+
108
+ ```bash
109
+ source .venv/bin/activate && python scripts/treequest_suggest.py \
110
+ --log experiments/log.jsonl \
111
+ --config config.yaml \
112
+ --top 5 \
113
+ --iterations 30 \
114
+ --strategy abmcts-a
115
+ ```
116
+
117
+ If TreeQuest is not installed, the script automatically falls back to greedy best-first search.
118
+
119
+ ### 3. Queue Results
120
+
121
+ For each result from the tree search, queue as a hypothesis:
122
+
123
+ ```bash
124
+ source .venv/bin/activate && python scripts/manage_hypotheses.py add "<description>" --priority medium --source treequest
125
+ ```
126
+
127
+ ### 4. Show Results
128
+
129
+ Display the tree search output and confirm hypotheses were queued:
130
+
131
+ ```
132
+ TreeQuest Hypothesis Exploration (AB-MCTS-A)
133
+ ============================================
134
+ Nodes explored: 35
135
+ Top 5 hypotheses by critique score:
136
+
137
+ 1. [PROCEED] (score: 7.8/10)
138
+ Switch to LightGBM with dart boosting; additionally add polynomial features
139
+ Novelty: 8 Feasibility: 9 Impact: 7
140
+
141
+ ...
142
+
143
+ Queued N hypotheses. Run /turing:train to test them.
144
+ ```
145
+
146
+ ### TreeQuest Options
147
+
148
+ Pass additional flags via `$ARGUMENTS`:
149
+ - `--iterations N` — search depth (default: 30)
150
+ - `--top N` — number of results (default: 5)
151
+ - `--strategy abmcts-m` — use Bayesian mixed model variant (requires PyMC)
152
+ - `--greedy` — force greedy fallback without TreeQuest
153
+
154
+ ## Integration
155
+
156
+ - Suggestions feed into `hypotheses.yaml` — the next `/turing:train` picks them up
157
+ - `/turing:brief` shows queued literature-sourced and treequest-sourced hypotheses
158
+ - `/turing:explore` runs the TreeQuest search as a standalone command
159
+ - Human can override priority: `/turing:try` always takes precedence
@@ -0,0 +1,27 @@
1
+ ---
2
+ name: surgery
3
+ description: Architecture modification — add/remove layers, widen/narrow, swap activations, inject skip connections. Specify what to change, system handles how.
4
+ disable-model-invocation: true
5
+ argument-hint: "<exp-id> --op <operation> [args...]"
6
+ allowed-tools: Read, Bash(*), Grep, Glob
7
+ ---
8
+
9
+ Programmatic architecture changes with auto warm-start from existing weights.
10
+
11
+ ## Steps
12
+
13
+ 1. **Activate environment:** `source .venv/bin/activate`
14
+ 2. **Run:** `python scripts/architecture_surgery.py $ARGUMENTS`
15
+ 3. **Operations:** add-layer, remove-layer, widen, narrow, swap-activation, add-skip, add-norm, deepen, swap-objective
16
+ 4. **For tree models:** deepen (increase max_depth), widen (more estimators), swap-objective
17
+ 5. **Report:** operation details, config changes, parameter count delta, warm-start source
18
+ 6. **Saved output:** `experiments/surgery/<exp-id>-<op>.yaml`
19
+
20
+ ## Examples
21
+
22
+ ```
23
+ /turing:surgery exp-042 --op widen 2 # 2x wider hidden layers
24
+ /turing:surgery exp-042 --op add-layer # Insert a layer
25
+ /turing:surgery exp-042 --op swap-activation relu gelu # ReLU → GELU
26
+ /turing:surgery exp-042 --op deepen # Deeper trees
27
+ ```
@@ -0,0 +1,45 @@
1
+ ---
2
+ name: sweep
3
+ description: Generate and run a systematic hyperparameter sweep. Computes the cartesian product of configured parameter ranges and processes the queue sequentially with full experiment logging.
4
+ disable-model-invocation: true
5
+ argument-hint: "[sweep_config.yaml]"
6
+ allowed-tools: Read, Write, Edit, Bash(python train.py:*, python scripts/*:*, git:*, source .venv/bin/activate:*, pip:*), Grep, Glob
7
+ ---
8
+
9
+ Run a systematic hyperparameter sweep using the sweep configuration.
10
+
11
+ ## Steps
12
+
13
+ 1. **Activate environment:**
14
+ ```bash
15
+ source .venv/bin/activate
16
+ ```
17
+
18
+ 2. **Resolve config:** Use `$ARGUMENTS` as sweep config path, or default to `sweep_config.yaml`.
19
+
20
+ 3. **Generate queue** (if not already generated):
21
+ ```bash
22
+ python scripts/sweep.py [sweep_config.yaml]
23
+ ```
24
+
25
+ 4. **Check queue status:**
26
+ ```bash
27
+ python scripts/sweep.py --status
28
+ ```
29
+
30
+ 5. **Process queue sequentially:**
31
+ - Get next: `python scripts/sweep.py --next`
32
+ - Apply config overrides to `config.yaml`
33
+ - Create experiment branch: `git checkout -b exp/NNN-description`
34
+ - Run training: `python train.py > run.log 2>&1`
35
+ - Parse metrics: `grep -A 10 "^---" run.log | head -10`
36
+ - Log the experiment
37
+ - Mark complete: `python scripts/sweep.py --mark <name> complete`
38
+ - If improved, merge to main. If not, return to main.
39
+ - Repeat until queue is empty
40
+
41
+ 6. **Report** final results with best configuration found.
42
+
43
+ ## Rules
44
+
45
+ Follow the same safety constraints as `/turing:train` — see `rules/loop-protocol.md`.