@ia-ccun/code-agent-cli 0.0.15 → 0.0.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/bin/cli.js +153 -84
  2. package/config/agent/extensions/working-msg.ts +33 -8
  3. package/config/agent/models.json +41 -11
  4. package/config/agent/prompts/code-simplifier.md +52 -0
  5. package/config/agent/skills/brainstorming/SKILL.md +165 -0
  6. package/config/agent/skills/brainstorming/scripts/frame-template.html +214 -0
  7. package/config/agent/skills/brainstorming/scripts/helper.js +88 -0
  8. package/config/agent/skills/brainstorming/scripts/server.cjs +338 -0
  9. package/config/agent/skills/brainstorming/scripts/start-server.sh +153 -0
  10. package/config/agent/skills/brainstorming/scripts/stop-server.sh +55 -0
  11. package/config/agent/skills/brainstorming/spec-document-reviewer-prompt.md +49 -0
  12. package/config/agent/skills/brainstorming/visual-companion.md +286 -0
  13. package/config/agent/skills/dispatching-parallel-agents/SKILL.md +183 -0
  14. package/config/agent/skills/executing-plans/SKILL.md +71 -0
  15. package/config/agent/skills/finishing-a-development-branch/SKILL.md +201 -0
  16. package/config/agent/skills/owasp-security/SKILL.md +537 -0
  17. package/config/agent/skills/receiving-code-review/SKILL.md +214 -0
  18. package/config/agent/skills/requesting-code-review/SKILL.md +106 -0
  19. package/config/agent/skills/requesting-code-review/code-reviewer.md +146 -0
  20. package/config/agent/skills/skill-creator/SKILL.md +337 -213
  21. package/config/agent/skills/skill-creator/agents/analyzer.md +274 -0
  22. package/config/agent/skills/skill-creator/agents/comparator.md +202 -0
  23. package/config/agent/skills/skill-creator/agents/grader.md +223 -0
  24. package/config/agent/skills/skill-creator/assets/eval_review.html +146 -0
  25. package/config/agent/skills/skill-creator/eval-viewer/generate_review.py +471 -0
  26. package/config/agent/skills/skill-creator/eval-viewer/viewer.html +1325 -0
  27. package/config/agent/skills/skill-creator/references/schemas.md +430 -0
  28. package/config/agent/skills/skill-creator/scripts/__init__.py +0 -0
  29. package/config/agent/skills/skill-creator/scripts/aggregate_benchmark.py +401 -0
  30. package/config/agent/skills/skill-creator/scripts/generate_report.py +326 -0
  31. package/config/agent/skills/skill-creator/scripts/improve_description.py +248 -0
  32. package/config/agent/skills/skill-creator/scripts/package_skill.py +33 -7
  33. package/config/agent/skills/skill-creator/scripts/quick_validate.py +11 -3
  34. package/config/agent/skills/skill-creator/scripts/run_eval.py +310 -0
  35. package/config/agent/skills/skill-creator/scripts/run_loop.py +332 -0
  36. package/config/agent/skills/skill-creator/scripts/utils.py +47 -0
  37. package/config/agent/skills/subagent-driven-development/SKILL.md +278 -0
  38. package/config/agent/skills/subagent-driven-development/code-quality-reviewer-prompt.md +26 -0
  39. package/config/agent/skills/subagent-driven-development/implementer-prompt.md +113 -0
  40. package/config/agent/skills/subagent-driven-development/spec-reviewer-prompt.md +61 -0
  41. package/config/agent/skills/systematic-debugging/CREATION-LOG.md +119 -0
  42. package/config/agent/skills/systematic-debugging/SKILL.md +297 -0
  43. package/config/agent/skills/systematic-debugging/condition-based-waiting-example.ts +158 -0
  44. package/config/agent/skills/systematic-debugging/condition-based-waiting.md +115 -0
  45. package/config/agent/skills/systematic-debugging/defense-in-depth.md +122 -0
  46. package/config/agent/skills/systematic-debugging/find-polluter.sh +63 -0
  47. package/config/agent/skills/systematic-debugging/root-cause-tracing.md +169 -0
  48. package/config/agent/skills/systematic-debugging/test-academic.md +14 -0
  49. package/config/agent/skills/systematic-debugging/test-pressure-1.md +58 -0
  50. package/config/agent/skills/systematic-debugging/test-pressure-2.md +68 -0
  51. package/config/agent/skills/systematic-debugging/test-pressure-3.md +69 -0
  52. package/config/agent/skills/test-driven-development/SKILL.md +372 -0
  53. package/config/agent/skills/test-driven-development/testing-anti-patterns.md +299 -0
  54. package/config/agent/skills/using-git-worktrees/SKILL.md +219 -0
  55. package/config/agent/skills/using-superpowers/SKILL.md +116 -0
  56. package/config/agent/skills/using-superpowers/references/codex-tools.md +25 -0
  57. package/config/agent/skills/using-superpowers/references/gemini-tools.md +33 -0
  58. package/config/agent/skills/verification-before-completion/SKILL.md +140 -0
  59. package/config/agent/skills/writing-plans/SKILL.md +146 -0
  60. package/config/agent/skills/writing-plans/plan-document-reviewer-prompt.md +49 -0
  61. package/config/agent/skills/writing-skills/SKILL.md +667 -0
  62. package/config/agent/skills/writing-skills/anthropic-best-practices.md +1150 -0
  63. package/config/agent/skills/writing-skills/examples/CLAUDE_MD_TESTING.md +189 -0
  64. package/config/agent/skills/writing-skills/graphviz-conventions.dot +172 -0
  65. package/config/agent/skills/writing-skills/persuasion-principles.md +187 -0
  66. package/config/agent/skills/writing-skills/render-graphs.js +168 -0
  67. package/config/agent/skills/writing-skills/testing-skills-with-subagents.md +384 -0
  68. package/package.json +14 -7
  69. package/scripts/postinstall.js +81 -19
  70. package/config/agent/skills/github/SKILL.md +0 -47
  71. package/config/agent/skills/owasp/SKILL.md +0 -169
  72. package/config/agent/skills/pua/SKILL.md +0 -364
  73. package/config/agent/skills/skill-creator/references/output-patterns.md +0 -82
  74. package/config/agent/skills/skill-creator/references/workflows.md +0 -28
  75. package/config/agent/skills/skill-creator/scripts/init_skill.py +0 -303
@@ -0,0 +1,310 @@
1
+ #!/usr/bin/env python3
2
+ """Run trigger evaluation for a skill description.
3
+
4
+ Tests whether a skill's description causes Claude to trigger (read the skill)
5
+ for a set of queries. Outputs results as JSON.
6
+ """
7
+
8
+ import argparse
9
+ import json
10
+ import os
11
+ import select
12
+ import subprocess
13
+ import sys
14
+ import time
15
+ import uuid
16
+ from concurrent.futures import ProcessPoolExecutor, as_completed
17
+ from pathlib import Path
18
+
19
+ from scripts.utils import parse_skill_md
20
+
21
+
22
+ def find_project_root() -> Path:
23
+ """Find the project root by walking up from cwd looking for .claude/.
24
+
25
+ Mimics how Claude Code discovers its project root, so the command file
26
+ we create ends up where claude -p will look for it.
27
+ """
28
+ current = Path.cwd()
29
+ for parent in [current, *current.parents]:
30
+ if (parent / ".claude").is_dir():
31
+ return parent
32
+ return current
33
+
34
+
35
+ def run_single_query(
36
+ query: str,
37
+ skill_name: str,
38
+ skill_description: str,
39
+ timeout: int,
40
+ project_root: str,
41
+ model: str | None = None,
42
+ ) -> bool:
43
+ """Run a single query and return whether the skill was triggered.
44
+
45
+ Creates a command file in .claude/commands/ so it appears in Claude's
46
+ available_skills list, then runs `claude -p` with the raw query.
47
+ Uses --include-partial-messages to detect triggering early from
48
+ stream events (content_block_start) rather than waiting for the
49
+ full assistant message, which only arrives after tool execution.
50
+ """
51
+ unique_id = uuid.uuid4().hex[:8]
52
+ clean_name = f"{skill_name}-skill-{unique_id}"
53
+ project_commands_dir = Path(project_root) / ".claude" / "commands"
54
+ command_file = project_commands_dir / f"{clean_name}.md"
55
+
56
+ try:
57
+ project_commands_dir.mkdir(parents=True, exist_ok=True)
58
+ # Use YAML block scalar to avoid breaking on quotes in description
59
+ indented_desc = "\n ".join(skill_description.split("\n"))
60
+ command_content = (
61
+ f"---\n"
62
+ f"description: |\n"
63
+ f" {indented_desc}\n"
64
+ f"---\n\n"
65
+ f"# {skill_name}\n\n"
66
+ f"This skill handles: {skill_description}\n"
67
+ )
68
+ command_file.write_text(command_content)
69
+
70
+ cmd = [
71
+ "claude",
72
+ "-p", query,
73
+ "--output-format", "stream-json",
74
+ "--verbose",
75
+ "--include-partial-messages",
76
+ ]
77
+ if model:
78
+ cmd.extend(["--model", model])
79
+
80
+ # Remove CLAUDECODE env var to allow nesting claude -p inside a
81
+ # Claude Code session. The guard is for interactive terminal conflicts;
82
+ # programmatic subprocess usage is safe.
83
+ env = {k: v for k, v in os.environ.items() if k != "CLAUDECODE"}
84
+
85
+ process = subprocess.Popen(
86
+ cmd,
87
+ stdout=subprocess.PIPE,
88
+ stderr=subprocess.DEVNULL,
89
+ cwd=project_root,
90
+ env=env,
91
+ )
92
+
93
+ triggered = False
94
+ start_time = time.time()
95
+ buffer = ""
96
+ # Track state for stream event detection
97
+ pending_tool_name = None
98
+ accumulated_json = ""
99
+
100
+ try:
101
+ while time.time() - start_time < timeout:
102
+ if process.poll() is not None:
103
+ remaining = process.stdout.read()
104
+ if remaining:
105
+ buffer += remaining.decode("utf-8", errors="replace")
106
+ break
107
+
108
+ ready, _, _ = select.select([process.stdout], [], [], 1.0)
109
+ if not ready:
110
+ continue
111
+
112
+ chunk = os.read(process.stdout.fileno(), 8192)
113
+ if not chunk:
114
+ break
115
+ buffer += chunk.decode("utf-8", errors="replace")
116
+
117
+ while "\n" in buffer:
118
+ line, buffer = buffer.split("\n", 1)
119
+ line = line.strip()
120
+ if not line:
121
+ continue
122
+
123
+ try:
124
+ event = json.loads(line)
125
+ except json.JSONDecodeError:
126
+ continue
127
+
128
+ # Early detection via stream events
129
+ if event.get("type") == "stream_event":
130
+ se = event.get("event", {})
131
+ se_type = se.get("type", "")
132
+
133
+ if se_type == "content_block_start":
134
+ cb = se.get("content_block", {})
135
+ if cb.get("type") == "tool_use":
136
+ tool_name = cb.get("name", "")
137
+ if tool_name in ("Skill", "Read"):
138
+ pending_tool_name = tool_name
139
+ accumulated_json = ""
140
+ else:
141
+ return False
142
+
143
+ elif se_type == "content_block_delta" and pending_tool_name:
144
+ delta = se.get("delta", {})
145
+ if delta.get("type") == "input_json_delta":
146
+ accumulated_json += delta.get("partial_json", "")
147
+ if clean_name in accumulated_json:
148
+ return True
149
+
150
+ elif se_type in ("content_block_stop", "message_stop"):
151
+ if pending_tool_name:
152
+ return clean_name in accumulated_json
153
+ if se_type == "message_stop":
154
+ return False
155
+
156
+ # Fallback: full assistant message
157
+ elif event.get("type") == "assistant":
158
+ message = event.get("message", {})
159
+ for content_item in message.get("content", []):
160
+ if content_item.get("type") != "tool_use":
161
+ continue
162
+ tool_name = content_item.get("name", "")
163
+ tool_input = content_item.get("input", {})
164
+ if tool_name == "Skill" and clean_name in tool_input.get("skill", ""):
165
+ triggered = True
166
+ elif tool_name == "Read" and clean_name in tool_input.get("file_path", ""):
167
+ triggered = True
168
+ return triggered
169
+
170
+ elif event.get("type") == "result":
171
+ return triggered
172
+ finally:
173
+ # Clean up process on any exit path (return, exception, timeout)
174
+ if process.poll() is None:
175
+ process.kill()
176
+ process.wait()
177
+
178
+ return triggered
179
+ finally:
180
+ if command_file.exists():
181
+ command_file.unlink()
182
+
183
+
184
+ def run_eval(
185
+ eval_set: list[dict],
186
+ skill_name: str,
187
+ description: str,
188
+ num_workers: int,
189
+ timeout: int,
190
+ project_root: Path,
191
+ runs_per_query: int = 1,
192
+ trigger_threshold: float = 0.5,
193
+ model: str | None = None,
194
+ ) -> dict:
195
+ """Run the full eval set and return results."""
196
+ results = []
197
+
198
+ with ProcessPoolExecutor(max_workers=num_workers) as executor:
199
+ future_to_info = {}
200
+ for item in eval_set:
201
+ for run_idx in range(runs_per_query):
202
+ future = executor.submit(
203
+ run_single_query,
204
+ item["query"],
205
+ skill_name,
206
+ description,
207
+ timeout,
208
+ str(project_root),
209
+ model,
210
+ )
211
+ future_to_info[future] = (item, run_idx)
212
+
213
+ query_triggers: dict[str, list[bool]] = {}
214
+ query_items: dict[str, dict] = {}
215
+ for future in as_completed(future_to_info):
216
+ item, _ = future_to_info[future]
217
+ query = item["query"]
218
+ query_items[query] = item
219
+ if query not in query_triggers:
220
+ query_triggers[query] = []
221
+ try:
222
+ query_triggers[query].append(future.result())
223
+ except Exception as e:
224
+ print(f"Warning: query failed: {e}", file=sys.stderr)
225
+ query_triggers[query].append(False)
226
+
227
+ for query, triggers in query_triggers.items():
228
+ item = query_items[query]
229
+ trigger_rate = sum(triggers) / len(triggers)
230
+ should_trigger = item["should_trigger"]
231
+ if should_trigger:
232
+ did_pass = trigger_rate >= trigger_threshold
233
+ else:
234
+ did_pass = trigger_rate < trigger_threshold
235
+ results.append({
236
+ "query": query,
237
+ "should_trigger": should_trigger,
238
+ "trigger_rate": trigger_rate,
239
+ "triggers": sum(triggers),
240
+ "runs": len(triggers),
241
+ "pass": did_pass,
242
+ })
243
+
244
+ passed = sum(1 for r in results if r["pass"])
245
+ total = len(results)
246
+
247
+ return {
248
+ "skill_name": skill_name,
249
+ "description": description,
250
+ "results": results,
251
+ "summary": {
252
+ "total": total,
253
+ "passed": passed,
254
+ "failed": total - passed,
255
+ },
256
+ }
257
+
258
+
259
+ def main():
260
+ parser = argparse.ArgumentParser(description="Run trigger evaluation for a skill description")
261
+ parser.add_argument("--eval-set", required=True, help="Path to eval set JSON file")
262
+ parser.add_argument("--skill-path", required=True, help="Path to skill directory")
263
+ parser.add_argument("--description", default=None, help="Override description to test")
264
+ parser.add_argument("--num-workers", type=int, default=10, help="Number of parallel workers")
265
+ parser.add_argument("--timeout", type=int, default=30, help="Timeout per query in seconds")
266
+ parser.add_argument("--runs-per-query", type=int, default=3, help="Number of runs per query")
267
+ parser.add_argument("--trigger-threshold", type=float, default=0.5, help="Trigger rate threshold")
268
+ parser.add_argument("--model", default=None, help="Model to use for claude -p (default: user's configured model)")
269
+ parser.add_argument("--verbose", action="store_true", help="Print progress to stderr")
270
+ args = parser.parse_args()
271
+
272
+ eval_set = json.loads(Path(args.eval_set).read_text())
273
+ skill_path = Path(args.skill_path)
274
+
275
+ if not (skill_path / "SKILL.md").exists():
276
+ print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr)
277
+ sys.exit(1)
278
+
279
+ name, original_description, content = parse_skill_md(skill_path)
280
+ description = args.description or original_description
281
+ project_root = find_project_root()
282
+
283
+ if args.verbose:
284
+ print(f"Evaluating: {description}", file=sys.stderr)
285
+
286
+ output = run_eval(
287
+ eval_set=eval_set,
288
+ skill_name=name,
289
+ description=description,
290
+ num_workers=args.num_workers,
291
+ timeout=args.timeout,
292
+ project_root=project_root,
293
+ runs_per_query=args.runs_per_query,
294
+ trigger_threshold=args.trigger_threshold,
295
+ model=args.model,
296
+ )
297
+
298
+ if args.verbose:
299
+ summary = output["summary"]
300
+ print(f"Results: {summary['passed']}/{summary['total']} passed", file=sys.stderr)
301
+ for r in output["results"]:
302
+ status = "PASS" if r["pass"] else "FAIL"
303
+ rate_str = f"{r['triggers']}/{r['runs']}"
304
+ print(f" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:70]}", file=sys.stderr)
305
+
306
+ print(json.dumps(output, indent=2))
307
+
308
+
309
+ if __name__ == "__main__":
310
+ main()
@@ -0,0 +1,332 @@
1
+ #!/usr/bin/env python3
2
+ """Run the eval + improve loop until all pass or max iterations reached.
3
+
4
+ Combines run_eval.py and improve_description.py in a loop, tracking history
5
+ and returning the best description found. Supports train/test split to prevent
6
+ overfitting.
7
+ """
8
+
9
+ import argparse
10
+ import json
11
+ import random
12
+ import sys
13
+ import tempfile
14
+ import time
15
+ import webbrowser
16
+ from pathlib import Path
17
+
18
+ import anthropic
19
+
20
+ from scripts.generate_report import generate_html
21
+ from scripts.improve_description import improve_description
22
+ from scripts.run_eval import find_project_root, run_eval
23
+ from scripts.utils import parse_skill_md
24
+
25
+
26
+ def split_eval_set(eval_set: list[dict], holdout: float, seed: int = 42) -> tuple[list[dict], list[dict]]:
27
+ """Split eval set into train and test sets, stratified by should_trigger."""
28
+ random.seed(seed)
29
+
30
+ # Separate by should_trigger
31
+ trigger = [e for e in eval_set if e["should_trigger"]]
32
+ no_trigger = [e for e in eval_set if not e["should_trigger"]]
33
+
34
+ # Shuffle each group
35
+ random.shuffle(trigger)
36
+ random.shuffle(no_trigger)
37
+
38
+ # Calculate split points
39
+ n_trigger_test = max(1, int(len(trigger) * holdout))
40
+ n_no_trigger_test = max(1, int(len(no_trigger) * holdout))
41
+
42
+ # Split
43
+ test_set = trigger[:n_trigger_test] + no_trigger[:n_no_trigger_test]
44
+ train_set = trigger[n_trigger_test:] + no_trigger[n_no_trigger_test:]
45
+
46
+ return train_set, test_set
47
+
48
+
49
+ def run_loop(
50
+ eval_set: list[dict],
51
+ skill_path: Path,
52
+ description_override: str | None,
53
+ num_workers: int,
54
+ timeout: int,
55
+ max_iterations: int,
56
+ runs_per_query: int,
57
+ trigger_threshold: float,
58
+ holdout: float,
59
+ model: str,
60
+ verbose: bool,
61
+ live_report_path: Path | None = None,
62
+ log_dir: Path | None = None,
63
+ ) -> dict:
64
+ """Run the eval + improvement loop."""
65
+ project_root = find_project_root()
66
+ name, original_description, content = parse_skill_md(skill_path)
67
+ current_description = description_override or original_description
68
+
69
+ # Split into train/test if holdout > 0
70
+ if holdout > 0:
71
+ train_set, test_set = split_eval_set(eval_set, holdout)
72
+ if verbose:
73
+ print(f"Split: {len(train_set)} train, {len(test_set)} test (holdout={holdout})", file=sys.stderr)
74
+ else:
75
+ train_set = eval_set
76
+ test_set = []
77
+
78
+ client = anthropic.Anthropic()
79
+ history = []
80
+ exit_reason = "unknown"
81
+
82
+ for iteration in range(1, max_iterations + 1):
83
+ if verbose:
84
+ print(f"\n{'='*60}", file=sys.stderr)
85
+ print(f"Iteration {iteration}/{max_iterations}", file=sys.stderr)
86
+ print(f"Description: {current_description}", file=sys.stderr)
87
+ print(f"{'='*60}", file=sys.stderr)
88
+
89
+ # Evaluate train + test together in one batch for parallelism
90
+ all_queries = train_set + test_set
91
+ t0 = time.time()
92
+ all_results = run_eval(
93
+ eval_set=all_queries,
94
+ skill_name=name,
95
+ description=current_description,
96
+ num_workers=num_workers,
97
+ timeout=timeout,
98
+ project_root=project_root,
99
+ runs_per_query=runs_per_query,
100
+ trigger_threshold=trigger_threshold,
101
+ model=model,
102
+ )
103
+ eval_elapsed = time.time() - t0
104
+
105
+ # Split results back into train/test by matching queries
106
+ train_queries_set = {q["query"] for q in train_set}
107
+ train_result_list = [r for r in all_results["results"] if r["query"] in train_queries_set]
108
+ test_result_list = [r for r in all_results["results"] if r["query"] not in train_queries_set]
109
+
110
+ train_passed = sum(1 for r in train_result_list if r["pass"])
111
+ train_total = len(train_result_list)
112
+ train_summary = {"passed": train_passed, "failed": train_total - train_passed, "total": train_total}
113
+ train_results = {"results": train_result_list, "summary": train_summary}
114
+
115
+ if test_set:
116
+ test_passed = sum(1 for r in test_result_list if r["pass"])
117
+ test_total = len(test_result_list)
118
+ test_summary = {"passed": test_passed, "failed": test_total - test_passed, "total": test_total}
119
+ test_results = {"results": test_result_list, "summary": test_summary}
120
+ else:
121
+ test_results = None
122
+ test_summary = None
123
+
124
+ history.append({
125
+ "iteration": iteration,
126
+ "description": current_description,
127
+ "train_passed": train_summary["passed"],
128
+ "train_failed": train_summary["failed"],
129
+ "train_total": train_summary["total"],
130
+ "train_results": train_results["results"],
131
+ "test_passed": test_summary["passed"] if test_summary else None,
132
+ "test_failed": test_summary["failed"] if test_summary else None,
133
+ "test_total": test_summary["total"] if test_summary else None,
134
+ "test_results": test_results["results"] if test_results else None,
135
+ # For backward compat with report generator
136
+ "passed": train_summary["passed"],
137
+ "failed": train_summary["failed"],
138
+ "total": train_summary["total"],
139
+ "results": train_results["results"],
140
+ })
141
+
142
+ # Write live report if path provided
143
+ if live_report_path:
144
+ partial_output = {
145
+ "original_description": original_description,
146
+ "best_description": current_description,
147
+ "best_score": "in progress",
148
+ "iterations_run": len(history),
149
+ "holdout": holdout,
150
+ "train_size": len(train_set),
151
+ "test_size": len(test_set),
152
+ "history": history,
153
+ }
154
+ live_report_path.write_text(generate_html(partial_output, auto_refresh=True, skill_name=name))
155
+
156
+ if verbose:
157
+ def print_eval_stats(label, results, elapsed):
158
+ pos = [r for r in results if r["should_trigger"]]
159
+ neg = [r for r in results if not r["should_trigger"]]
160
+ tp = sum(r["triggers"] for r in pos)
161
+ pos_runs = sum(r["runs"] for r in pos)
162
+ fn = pos_runs - tp
163
+ fp = sum(r["triggers"] for r in neg)
164
+ neg_runs = sum(r["runs"] for r in neg)
165
+ tn = neg_runs - fp
166
+ total = tp + tn + fp + fn
167
+ precision = tp / (tp + fp) if (tp + fp) > 0 else 1.0
168
+ recall = tp / (tp + fn) if (tp + fn) > 0 else 1.0
169
+ accuracy = (tp + tn) / total if total > 0 else 0.0
170
+ print(f"{label}: {tp+tn}/{total} correct, precision={precision:.0%} recall={recall:.0%} accuracy={accuracy:.0%} ({elapsed:.1f}s)", file=sys.stderr)
171
+ for r in results:
172
+ status = "PASS" if r["pass"] else "FAIL"
173
+ rate_str = f"{r['triggers']}/{r['runs']}"
174
+ print(f" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:60]}", file=sys.stderr)
175
+
176
+ print_eval_stats("Train", train_results["results"], eval_elapsed)
177
+ if test_summary:
178
+ print_eval_stats("Test ", test_results["results"], 0)
179
+
180
+ if train_summary["failed"] == 0:
181
+ exit_reason = f"all_passed (iteration {iteration})"
182
+ if verbose:
183
+ print(f"\nAll train queries passed on iteration {iteration}!", file=sys.stderr)
184
+ break
185
+
186
+ if iteration == max_iterations:
187
+ exit_reason = f"max_iterations ({max_iterations})"
188
+ if verbose:
189
+ print(f"\nMax iterations reached ({max_iterations}).", file=sys.stderr)
190
+ break
191
+
192
+ # Improve the description based on train results
193
+ if verbose:
194
+ print(f"\nImproving description...", file=sys.stderr)
195
+
196
+ t0 = time.time()
197
+ # Strip test scores from history so improvement model can't see them
198
+ blinded_history = [
199
+ {k: v for k, v in h.items() if not k.startswith("test_")}
200
+ for h in history
201
+ ]
202
+ new_description = improve_description(
203
+ client=client,
204
+ skill_name=name,
205
+ skill_content=content,
206
+ current_description=current_description,
207
+ eval_results=train_results,
208
+ history=blinded_history,
209
+ model=model,
210
+ log_dir=log_dir,
211
+ iteration=iteration,
212
+ )
213
+ improve_elapsed = time.time() - t0
214
+
215
+ if verbose:
216
+ print(f"Proposed ({improve_elapsed:.1f}s): {new_description}", file=sys.stderr)
217
+
218
+ current_description = new_description
219
+
220
+ # Find the best iteration by TEST score (or train if no test set)
221
+ if test_set:
222
+ best = max(history, key=lambda h: h["test_passed"] or 0)
223
+ best_score = f"{best['test_passed']}/{best['test_total']}"
224
+ else:
225
+ best = max(history, key=lambda h: h["train_passed"])
226
+ best_score = f"{best['train_passed']}/{best['train_total']}"
227
+
228
+ if verbose:
229
+ print(f"\nExit reason: {exit_reason}", file=sys.stderr)
230
+ print(f"Best score: {best_score} (iteration {best['iteration']})", file=sys.stderr)
231
+
232
+ return {
233
+ "exit_reason": exit_reason,
234
+ "original_description": original_description,
235
+ "best_description": best["description"],
236
+ "best_score": best_score,
237
+ "best_train_score": f"{best['train_passed']}/{best['train_total']}",
238
+ "best_test_score": f"{best['test_passed']}/{best['test_total']}" if test_set else None,
239
+ "final_description": current_description,
240
+ "iterations_run": len(history),
241
+ "holdout": holdout,
242
+ "train_size": len(train_set),
243
+ "test_size": len(test_set),
244
+ "history": history,
245
+ }
246
+
247
+
248
+ def main():
249
+ parser = argparse.ArgumentParser(description="Run eval + improve loop")
250
+ parser.add_argument("--eval-set", required=True, help="Path to eval set JSON file")
251
+ parser.add_argument("--skill-path", required=True, help="Path to skill directory")
252
+ parser.add_argument("--description", default=None, help="Override starting description")
253
+ parser.add_argument("--num-workers", type=int, default=10, help="Number of parallel workers")
254
+ parser.add_argument("--timeout", type=int, default=30, help="Timeout per query in seconds")
255
+ parser.add_argument("--max-iterations", type=int, default=5, help="Max improvement iterations")
256
+ parser.add_argument("--runs-per-query", type=int, default=3, help="Number of runs per query")
257
+ parser.add_argument("--trigger-threshold", type=float, default=0.5, help="Trigger rate threshold")
258
+ parser.add_argument("--holdout", type=float, default=0.4, help="Fraction of eval set to hold out for testing (0 to disable)")
259
+ parser.add_argument("--model", required=True, help="Model for improvement")
260
+ parser.add_argument("--verbose", action="store_true", help="Print progress to stderr")
261
+ parser.add_argument("--report", default="auto", help="Generate HTML report at this path (default: 'auto' for temp file, 'none' to disable)")
262
+ parser.add_argument("--results-dir", default=None, help="Save all outputs (results.json, report.html, log.txt) to a timestamped subdirectory here")
263
+ args = parser.parse_args()
264
+
265
+ eval_set = json.loads(Path(args.eval_set).read_text())
266
+ skill_path = Path(args.skill_path)
267
+
268
+ if not (skill_path / "SKILL.md").exists():
269
+ print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr)
270
+ sys.exit(1)
271
+
272
+ name, _, _ = parse_skill_md(skill_path)
273
+
274
+ # Set up live report path
275
+ if args.report != "none":
276
+ if args.report == "auto":
277
+ timestamp = time.strftime("%Y%m%d_%H%M%S")
278
+ live_report_path = Path(tempfile.gettempdir()) / f"skill_description_report_{skill_path.name}_{timestamp}.html"
279
+ else:
280
+ live_report_path = Path(args.report)
281
+ # Open the report immediately so the user can watch
282
+ live_report_path.write_text("<html><body><h1>Starting optimization loop...</h1><meta http-equiv='refresh' content='5'></body></html>")
283
+ webbrowser.open(str(live_report_path))
284
+ else:
285
+ live_report_path = None
286
+
287
+ # Determine output directory (create before run_loop so logs can be written)
288
+ if args.results_dir:
289
+ timestamp = time.strftime("%Y-%m-%d_%H%M%S")
290
+ results_dir = Path(args.results_dir) / timestamp
291
+ results_dir.mkdir(parents=True, exist_ok=True)
292
+ else:
293
+ results_dir = None
294
+
295
+ log_dir = results_dir / "logs" if results_dir else None
296
+
297
+ output = run_loop(
298
+ eval_set=eval_set,
299
+ skill_path=skill_path,
300
+ description_override=args.description,
301
+ num_workers=args.num_workers,
302
+ timeout=args.timeout,
303
+ max_iterations=args.max_iterations,
304
+ runs_per_query=args.runs_per_query,
305
+ trigger_threshold=args.trigger_threshold,
306
+ holdout=args.holdout,
307
+ model=args.model,
308
+ verbose=args.verbose,
309
+ live_report_path=live_report_path,
310
+ log_dir=log_dir,
311
+ )
312
+
313
+ # Save JSON output
314
+ json_output = json.dumps(output, indent=2)
315
+ print(json_output)
316
+ if results_dir:
317
+ (results_dir / "results.json").write_text(json_output)
318
+
319
+ # Write final HTML report (without auto-refresh)
320
+ if live_report_path:
321
+ live_report_path.write_text(generate_html(output, auto_refresh=False, skill_name=name))
322
+ print(f"\nReport: {live_report_path}", file=sys.stderr)
323
+
324
+ if results_dir and live_report_path:
325
+ (results_dir / "report.html").write_text(generate_html(output, auto_refresh=False, skill_name=name))
326
+
327
+ if results_dir:
328
+ print(f"Results saved to: {results_dir}", file=sys.stderr)
329
+
330
+
331
+ if __name__ == "__main__":
332
+ main()
@@ -0,0 +1,47 @@
1
+ """Shared utilities for skill-creator scripts."""
2
+
3
+ from pathlib import Path
4
+
5
+
6
+
7
+ def parse_skill_md(skill_path: Path) -> tuple[str, str, str]:
8
+ """Parse a SKILL.md file, returning (name, description, full_content)."""
9
+ content = (skill_path / "SKILL.md").read_text()
10
+ lines = content.split("\n")
11
+
12
+ if lines[0].strip() != "---":
13
+ raise ValueError("SKILL.md missing frontmatter (no opening ---)")
14
+
15
+ end_idx = None
16
+ for i, line in enumerate(lines[1:], start=1):
17
+ if line.strip() == "---":
18
+ end_idx = i
19
+ break
20
+
21
+ if end_idx is None:
22
+ raise ValueError("SKILL.md missing frontmatter (no closing ---)")
23
+
24
+ name = ""
25
+ description = ""
26
+ frontmatter_lines = lines[1:end_idx]
27
+ i = 0
28
+ while i < len(frontmatter_lines):
29
+ line = frontmatter_lines[i]
30
+ if line.startswith("name:"):
31
+ name = line[len("name:"):].strip().strip('"').strip("'")
32
+ elif line.startswith("description:"):
33
+ value = line[len("description:"):].strip()
34
+ # Handle YAML multiline indicators (>, |, >-, |-)
35
+ if value in (">", "|", ">-", "|-"):
36
+ continuation_lines: list[str] = []
37
+ i += 1
38
+ while i < len(frontmatter_lines) and (frontmatter_lines[i].startswith(" ") or frontmatter_lines[i].startswith("\t")):
39
+ continuation_lines.append(frontmatter_lines[i].strip())
40
+ i += 1
41
+ description = " ".join(continuation_lines)
42
+ continue
43
+ else:
44
+ description = value.strip('"').strip("'")
45
+ i += 1
46
+
47
+ return name, description, content