evizi-kit 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (201) hide show
  1. package/README.md +506 -0
  2. package/kits/agent/.agent/skills/claude-code-subagent-creator/SKILL.md +292 -0
  3. package/kits/agent/.agent/skills/claude-code-subagent-creator/references/claude-code-subagent-configuration.md +158 -0
  4. package/kits/agent/.agent/skills/claude-code-subagent-creator/templates/subagent-profile.template.md +26 -0
  5. package/kits/agent/.agent/skills/skill-creator/LICENSE.txt +202 -0
  6. package/kits/agent/.agent/skills/skill-creator/SKILL.md +485 -0
  7. package/kits/agent/.agent/skills/skill-creator/agents/analyzer.md +274 -0
  8. package/kits/agent/.agent/skills/skill-creator/agents/comparator.md +202 -0
  9. package/kits/agent/.agent/skills/skill-creator/agents/grader.md +223 -0
  10. package/kits/agent/.agent/skills/skill-creator/assets/eval_review.html +146 -0
  11. package/kits/agent/.agent/skills/skill-creator/eval-viewer/generate_review.py +471 -0
  12. package/kits/agent/.agent/skills/skill-creator/eval-viewer/viewer.html +1325 -0
  13. package/kits/agent/.agent/skills/skill-creator/references/schemas.md +430 -0
  14. package/kits/agent/.agent/skills/skill-creator/scripts/__init__.py +0 -0
  15. package/kits/agent/.agent/skills/skill-creator/scripts/aggregate_benchmark.py +401 -0
  16. package/kits/agent/.agent/skills/skill-creator/scripts/generate_report.py +326 -0
  17. package/kits/agent/.agent/skills/skill-creator/scripts/improve_description.py +247 -0
  18. package/kits/agent/.agent/skills/skill-creator/scripts/package_skill.py +136 -0
  19. package/kits/agent/.agent/skills/skill-creator/scripts/quick_validate.py +103 -0
  20. package/kits/agent/.agent/skills/skill-creator/scripts/run_eval.py +310 -0
  21. package/kits/agent/.agent/skills/skill-creator/scripts/run_loop.py +328 -0
  22. package/kits/agent/.agent/skills/skill-creator/scripts/utils.py +47 -0
  23. package/kits/agent/manifest.json +10 -0
  24. package/kits/claude/.claude/agents/code-pusher.md +46 -0
  25. package/kits/claude/.claude/agents/feature-document-updater.md +37 -0
  26. package/kits/claude/.claude/agents/self-reviewer.md +32 -0
  27. package/kits/claude/.claude/agents/web-auto-agentic-workflow-initializer.md +42 -0
  28. package/kits/claude/.claude/agents/web-auto-assisted-fix-and-runner.md +36 -0
  29. package/kits/claude/.claude/agents/web-auto-chrome-devtools-selector-extractor.md +36 -0
  30. package/kits/claude/.claude/agents/web-auto-coder.md +33 -0
  31. package/kits/claude/.claude/agents/web-auto-fe-selector-extractor.md +31 -0
  32. package/kits/claude/.claude/agents/web-auto-fix-and-runner.md +35 -0
  33. package/kits/claude/.claude/agents/web-auto-lessons-learned-extractor.md +34 -0
  34. package/kits/claude/.claude/agents/web-auto-playwright-mcp-selector-extractor.md +37 -0
  35. package/kits/claude/.claude/agents/web-auto-source-instructions-updater.md +43 -0
  36. package/kits/claude/.claude/agents/web-auto-test-cases-generator.md +29 -0
  37. package/kits/claude/.claude/agents/web-auto-ticket-designer.md +35 -0
  38. package/kits/claude/.claude/agents/web-auto-ticket-playbook-planner.md +36 -0
  39. package/kits/claude/.claude/agents/web-auto.md +382 -0
  40. package/kits/claude/.claude/skills/claude-code-subagent-creator/SKILL.md +292 -0
  41. package/kits/claude/.claude/skills/claude-code-subagent-creator/references/claude-code-subagent-configuration.md +158 -0
  42. package/kits/claude/.claude/skills/claude-code-subagent-creator/templates/subagent-profile.template.md +26 -0
  43. package/kits/claude/.claude/skills/skill-creator/LICENSE.txt +202 -0
  44. package/kits/claude/.claude/skills/skill-creator/SKILL.md +485 -0
  45. package/kits/claude/.claude/skills/skill-creator/agents/analyzer.md +274 -0
  46. package/kits/claude/.claude/skills/skill-creator/agents/comparator.md +202 -0
  47. package/kits/claude/.claude/skills/skill-creator/agents/grader.md +223 -0
  48. package/kits/claude/.claude/skills/skill-creator/assets/eval_review.html +146 -0
  49. package/kits/claude/.claude/skills/skill-creator/eval-viewer/generate_review.py +471 -0
  50. package/kits/claude/.claude/skills/skill-creator/eval-viewer/viewer.html +1325 -0
  51. package/kits/claude/.claude/skills/skill-creator/references/schemas.md +430 -0
  52. package/kits/claude/.claude/skills/skill-creator/scripts/__init__.py +0 -0
  53. package/kits/claude/.claude/skills/skill-creator/scripts/aggregate_benchmark.py +401 -0
  54. package/kits/claude/.claude/skills/skill-creator/scripts/generate_report.py +326 -0
  55. package/kits/claude/.claude/skills/skill-creator/scripts/improve_description.py +247 -0
  56. package/kits/claude/.claude/skills/skill-creator/scripts/package_skill.py +136 -0
  57. package/kits/claude/.claude/skills/skill-creator/scripts/quick_validate.py +103 -0
  58. package/kits/claude/.claude/skills/skill-creator/scripts/run_eval.py +310 -0
  59. package/kits/claude/.claude/skills/skill-creator/scripts/run_loop.py +328 -0
  60. package/kits/claude/.claude/skills/skill-creator/scripts/utils.py +47 -0
  61. package/kits/claude/manifest.json +10 -0
  62. package/kits/cursor/.cursor/agents/code-pusher.agent.md +43 -0
  63. package/kits/cursor/.cursor/agents/feature-document-updater.agent.md +34 -0
  64. package/kits/cursor/.cursor/agents/self-reviewer.agent.md +29 -0
  65. package/kits/cursor/.cursor/agents/web-auto-agentic-workflow-initializer.agent.md +37 -0
  66. package/kits/cursor/.cursor/agents/web-auto-assisted-fix-and-runner.agent.md +33 -0
  67. package/kits/cursor/.cursor/agents/web-auto-chrome-devtools-selector-extractor.agent.md +31 -0
  68. package/kits/cursor/.cursor/agents/web-auto-coder.agent.md +30 -0
  69. package/kits/cursor/.cursor/agents/web-auto-fe-selector-extractor.agent.md +28 -0
  70. package/kits/cursor/.cursor/agents/web-auto-fix-and-runner.agent.md +32 -0
  71. package/kits/cursor/.cursor/agents/web-auto-lessons-learned-extractor.agent.md +31 -0
  72. package/kits/cursor/.cursor/agents/web-auto-playwright-mcp-selector-extractor.agent.md +32 -0
  73. package/kits/cursor/.cursor/agents/web-auto-source-instructions-updater.agent.md +40 -0
  74. package/kits/cursor/.cursor/agents/web-auto-test-cases-generator.agent.md +26 -0
  75. package/kits/cursor/.cursor/agents/web-auto-ticket-designer.agent.md +32 -0
  76. package/kits/cursor/.cursor/agents/web-auto-ticket-playbook-planner.agent.md +33 -0
  77. package/kits/cursor/.cursor/agents/web-auto.agent.md +379 -0
  78. package/kits/cursor/.cursor/skills/claude-code-subagent-creator/SKILL.md +292 -0
  79. package/kits/cursor/.cursor/skills/claude-code-subagent-creator/references/claude-code-subagent-configuration.md +158 -0
  80. package/kits/cursor/.cursor/skills/claude-code-subagent-creator/templates/subagent-profile.template.md +26 -0
  81. package/kits/cursor/.cursor/skills/skill-creator/LICENSE.txt +202 -0
  82. package/kits/cursor/.cursor/skills/skill-creator/SKILL.md +485 -0
  83. package/kits/cursor/.cursor/skills/skill-creator/agents/analyzer.md +274 -0
  84. package/kits/cursor/.cursor/skills/skill-creator/agents/comparator.md +202 -0
  85. package/kits/cursor/.cursor/skills/skill-creator/agents/grader.md +223 -0
  86. package/kits/cursor/.cursor/skills/skill-creator/assets/eval_review.html +146 -0
  87. package/kits/cursor/.cursor/skills/skill-creator/eval-viewer/generate_review.py +471 -0
  88. package/kits/cursor/.cursor/skills/skill-creator/eval-viewer/viewer.html +1325 -0
  89. package/kits/cursor/.cursor/skills/skill-creator/references/schemas.md +430 -0
  90. package/kits/cursor/.cursor/skills/skill-creator/scripts/__init__.py +0 -0
  91. package/kits/cursor/.cursor/skills/skill-creator/scripts/aggregate_benchmark.py +401 -0
  92. package/kits/cursor/.cursor/skills/skill-creator/scripts/generate_report.py +326 -0
  93. package/kits/cursor/.cursor/skills/skill-creator/scripts/improve_description.py +247 -0
  94. package/kits/cursor/.cursor/skills/skill-creator/scripts/package_skill.py +136 -0
  95. package/kits/cursor/.cursor/skills/skill-creator/scripts/quick_validate.py +103 -0
  96. package/kits/cursor/.cursor/skills/skill-creator/scripts/run_eval.py +310 -0
  97. package/kits/cursor/.cursor/skills/skill-creator/scripts/run_loop.py +328 -0
  98. package/kits/cursor/.cursor/skills/skill-creator/scripts/utils.py +47 -0
  99. package/kits/cursor/manifest.json +10 -0
  100. package/kits/github/.github/agents/code-pusher.agent.md +45 -0
  101. package/kits/github/.github/agents/feature-document-updater.agent.md +36 -0
  102. package/kits/github/.github/agents/self-reviewer.agent.md +31 -0
  103. package/kits/github/.github/agents/web-auto-agentic-workflow-initializer.agent.md +39 -0
  104. package/kits/github/.github/agents/web-auto-assisted-fix-and-runner.agent.md +35 -0
  105. package/kits/github/.github/agents/web-auto-chrome-devtools-selector-extractor.agent.md +33 -0
  106. package/kits/github/.github/agents/web-auto-coder.agent.md +32 -0
  107. package/kits/github/.github/agents/web-auto-fe-selector-extractor.agent.md +30 -0
  108. package/kits/github/.github/agents/web-auto-fix-and-runner.agent.md +34 -0
  109. package/kits/github/.github/agents/web-auto-lessons-learned-extractor.agent.md +33 -0
  110. package/kits/github/.github/agents/web-auto-playwright-mcp-selector-extractor.agent.md +34 -0
  111. package/kits/github/.github/agents/web-auto-source-instructions-updater.agent.md +42 -0
  112. package/kits/github/.github/agents/web-auto-test-cases-generator.agent.md +28 -0
  113. package/kits/github/.github/agents/web-auto-ticket-designer.agent.md +34 -0
  114. package/kits/github/.github/agents/web-auto-ticket-playbook-creator.agent.md +35 -0
  115. package/kits/github/.github/agents/web-auto.agent.md +382 -0
  116. package/kits/github/.github/skills/claude-code-subagent-creator/SKILL.md +310 -0
  117. package/kits/github/.github/skills/claude-code-subagent-creator/references/claude-code-subagent-configuration.md +158 -0
  118. package/kits/github/.github/skills/claude-code-subagent-creator/templates/subagent-profile.template.md +37 -0
  119. package/kits/github/.github/skills/skill-creator/LICENSE.txt +202 -0
  120. package/kits/github/.github/skills/skill-creator/SKILL.md +485 -0
  121. package/kits/github/.github/skills/skill-creator/agents/analyzer.md +274 -0
  122. package/kits/github/.github/skills/skill-creator/agents/comparator.md +202 -0
  123. package/kits/github/.github/skills/skill-creator/agents/grader.md +223 -0
  124. package/kits/github/.github/skills/skill-creator/assets/eval_review.html +146 -0
  125. package/kits/github/.github/skills/skill-creator/eval-viewer/generate_review.py +471 -0
  126. package/kits/github/.github/skills/skill-creator/eval-viewer/viewer.html +1325 -0
  127. package/kits/github/.github/skills/skill-creator/references/schemas.md +430 -0
  128. package/kits/github/.github/skills/skill-creator/scripts/__init__.py +0 -0
  129. package/kits/github/.github/skills/skill-creator/scripts/aggregate_benchmark.py +401 -0
  130. package/kits/github/.github/skills/skill-creator/scripts/generate_report.py +326 -0
  131. package/kits/github/.github/skills/skill-creator/scripts/improve_description.py +247 -0
  132. package/kits/github/.github/skills/skill-creator/scripts/package_skill.py +136 -0
  133. package/kits/github/.github/skills/skill-creator/scripts/quick_validate.py +103 -0
  134. package/kits/github/.github/skills/skill-creator/scripts/run_eval.py +310 -0
  135. package/kits/github/.github/skills/skill-creator/scripts/run_loop.py +328 -0
  136. package/kits/github/.github/skills/skill-creator/scripts/utils.py +47 -0
  137. package/kits/github/manifest.json +10 -0
  138. package/kits/shared/docs/ai-code-review.md +440 -0
  139. package/kits/shared/docs/increase-unit-test-coverage.md +77 -0
  140. package/kits/shared/docs/pr-review-agent.md +501 -0
  141. package/kits/shared/docs/self-review-agent.md +246 -0
  142. package/kits/shared/docs/web-auto-agentic-workflow.md +506 -0
  143. package/kits/shared/manifest.json +11 -0
  144. package/kits/shared/skills/fix-automation-tests/SKILL.md +280 -0
  145. package/kits/shared/skills/fix-automation-tests/scripts/fetch_pr_changes.py +300 -0
  146. package/kits/shared/skills/fix-automation-tests/templates/impact-report.template.md +42 -0
  147. package/kits/shared/skills/increase-unit-test-coverage/SKILL.md +117 -0
  148. package/kits/shared/skills/increase-unit-test-coverage/scripts/filter_low_coverage.py +447 -0
  149. package/kits/shared/skills/pr-review/SKILL.md +200 -0
  150. package/kits/shared/skills/pr-review/references/automation.md +62 -0
  151. package/kits/shared/skills/pr-review/references/backend.md +95 -0
  152. package/kits/shared/skills/pr-review/references/frontend.md +103 -0
  153. package/kits/shared/skills/pr-review/references/mobile.md +108 -0
  154. package/kits/shared/skills/pr-review/references/output-schema.md +130 -0
  155. package/kits/shared/skills/pr-review/scripts/post-review.py +1395 -0
  156. package/kits/shared/skills/push-code/SKILL.md +176 -0
  157. package/kits/shared/skills/self-review/SKILL.md +234 -0
  158. package/kits/shared/skills/self-review/evals/evals.json +23 -0
  159. package/kits/shared/skills/self-review/references/automation.md +62 -0
  160. package/kits/shared/skills/self-review/references/backend.md +95 -0
  161. package/kits/shared/skills/self-review/references/frontend.md +103 -0
  162. package/kits/shared/skills/self-review/references/mobile.md +108 -0
  163. package/kits/shared/skills/self-review/templates/issues.template.md +72 -0
  164. package/kits/shared/skills/update-feature-document/SKILL.md +156 -0
  165. package/kits/shared/skills/update-feature-document/templates/delta.template.yaml +58 -0
  166. package/kits/shared/skills/update-feature-document/templates/feature.template.md +25 -0
  167. package/kits/shared/skills/web-auto-assisted-fix-and-run/SKILL.md +130 -0
  168. package/kits/shared/skills/web-auto-assisted-fix-and-run/references/resolve-api-error.md +108 -0
  169. package/kits/shared/skills/web-auto-assisted-fix-and-run/references/resolve-selector.md +60 -0
  170. package/kits/shared/skills/web-auto-assisted-fix-and-run/templates/issues-resolution-report-append.template.md +54 -0
  171. package/kits/shared/skills/web-auto-chrome-devtools-mcp-extract-selectors/SKILL.md +284 -0
  172. package/kits/shared/skills/web-auto-coding/SKILL.md +152 -0
  173. package/kits/shared/skills/web-auto-extract-lessons-learned/SKILL.md +168 -0
  174. package/kits/shared/skills/web-auto-extract-lessons-learned/templates/lessons-learned.template.md +115 -0
  175. package/kits/shared/skills/web-auto-fe-extract-selectors/SKILL.md +282 -0
  176. package/kits/shared/skills/web-auto-fe-extract-selectors/evals/evals.json +23 -0
  177. package/kits/shared/skills/web-auto-fix-and-run-test/SKILL.md +183 -0
  178. package/kits/shared/skills/web-auto-fix-and-run-test/templates/issues-resolution-report.template.md +77 -0
  179. package/kits/shared/skills/web-auto-generate-best-practices/SKILL.md +123 -0
  180. package/kits/shared/skills/web-auto-generate-instructions/SKILL.md +200 -0
  181. package/kits/shared/skills/web-auto-generate-instructions/evals/evals.json +23 -0
  182. package/kits/shared/skills/web-auto-generate-instructions/references/analysis-guide.md +145 -0
  183. package/kits/shared/skills/web-auto-generate-instructions/templates/web-auto-instructions.template.md +184 -0
  184. package/kits/shared/skills/web-auto-generate-project-blueprint/SKILL.md +181 -0
  185. package/kits/shared/skills/web-auto-generate-project-blueprint/evals/evals.json +57 -0
  186. package/kits/shared/skills/web-auto-generate-project-blueprint/templates/web-auto-project-blueprint.template.md +161 -0
  187. package/kits/shared/skills/web-auto-playwright-mcp-extract-selectors/SKILL.md +293 -0
  188. package/kits/shared/skills/web-auto-test-cases/SKILL.md +138 -0
  189. package/kits/shared/skills/web-auto-test-cases/evals/evals.json +129 -0
  190. package/kits/shared/skills/web-auto-test-cases/templates/test-cases.template.md +53 -0
  191. package/kits/shared/skills/web-auto-ticket-design/SKILL.md +199 -0
  192. package/kits/shared/skills/web-auto-ticket-design/templates/ticket-design.template.md +138 -0
  193. package/kits/shared/skills/web-auto-ticket-playbook/SKILL.md +218 -0
  194. package/kits/shared/skills/web-auto-ticket-playbook/evals/evals.json +23 -0
  195. package/kits/shared/skills/web-auto-ticket-playbook/templates/ticket-playbook.template.md +148 -0
  196. package/kits/shared/skills/web-auto-update-source-instructions/SKILL.md +156 -0
  197. package/kits/shared/skills/web-auto-update-source-instructions/evals/evals.json +22 -0
  198. package/kits/shared/skills/workspace-ai-nav-creator/SKILL.md +168 -0
  199. package/kits/shared/skills/workspace-ai-nav-creator/templates/agents-md.template.md +112 -0
  200. package/kits/shared/skills/workspace-ai-nav-creator/templates/claude-md.template.md +86 -0
  201. package/package.json +16 -0
@@ -0,0 +1,326 @@
1
+ #!/usr/bin/env python3
2
+ """Generate an HTML report from run_loop.py output.
3
+
4
+ Takes the JSON output from run_loop.py and generates a visual HTML report
5
+ showing each description attempt with check/x for each test case.
6
+ Distinguishes between train and test queries.
7
+ """
8
+
9
+ import argparse
10
+ import html
11
+ import json
12
+ import sys
13
+ from pathlib import Path
14
+
15
+
16
+ def generate_html(data: dict, auto_refresh: bool = False, skill_name: str = "") -> str:
17
+ """Generate HTML report from loop output data. If auto_refresh is True, adds a meta refresh tag."""
18
+ history = data.get("history", [])
19
+ holdout = data.get("holdout", 0)
20
+ title_prefix = html.escape(skill_name + " \u2014 ") if skill_name else ""
21
+
22
+ # Get all unique queries from train and test sets, with should_trigger info
23
+ train_queries: list[dict] = []
24
+ test_queries: list[dict] = []
25
+ if history:
26
+ for r in history[0].get("train_results", history[0].get("results", [])):
27
+ train_queries.append({"query": r["query"], "should_trigger": r.get("should_trigger", True)})
28
+ if history[0].get("test_results"):
29
+ for r in history[0].get("test_results", []):
30
+ test_queries.append({"query": r["query"], "should_trigger": r.get("should_trigger", True)})
31
+
32
+ refresh_tag = ' <meta http-equiv="refresh" content="5">\n' if auto_refresh else ""
33
+
34
+ html_parts = ["""<!DOCTYPE html>
35
+ <html>
36
+ <head>
37
+ <meta charset="utf-8">
38
+ """ + refresh_tag + """ <title>""" + title_prefix + """Skill Description Optimization</title>
39
+ <link rel="preconnect" href="https://fonts.googleapis.com">
40
+ <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
41
+ <link href="https://fonts.googleapis.com/css2?family=Poppins:wght@500;600&family=Lora:wght@400;500&display=swap" rel="stylesheet">
42
+ <style>
43
+ body {
44
+ font-family: 'Lora', Georgia, serif;
45
+ max-width: 100%;
46
+ margin: 0 auto;
47
+ padding: 20px;
48
+ background: #faf9f5;
49
+ color: #141413;
50
+ }
51
+ h1 { font-family: 'Poppins', sans-serif; color: #141413; }
52
+ .explainer {
53
+ background: white;
54
+ padding: 15px;
55
+ border-radius: 6px;
56
+ margin-bottom: 20px;
57
+ border: 1px solid #e8e6dc;
58
+ color: #b0aea5;
59
+ font-size: 0.875rem;
60
+ line-height: 1.6;
61
+ }
62
+ .summary {
63
+ background: white;
64
+ padding: 15px;
65
+ border-radius: 6px;
66
+ margin-bottom: 20px;
67
+ border: 1px solid #e8e6dc;
68
+ }
69
+ .summary p { margin: 5px 0; }
70
+ .best { color: #788c5d; font-weight: bold; }
71
+ .table-container {
72
+ overflow-x: auto;
73
+ width: 100%;
74
+ }
75
+ table {
76
+ border-collapse: collapse;
77
+ background: white;
78
+ border: 1px solid #e8e6dc;
79
+ border-radius: 6px;
80
+ font-size: 12px;
81
+ min-width: 100%;
82
+ }
83
+ th, td {
84
+ padding: 8px;
85
+ text-align: left;
86
+ border: 1px solid #e8e6dc;
87
+ white-space: normal;
88
+ word-wrap: break-word;
89
+ }
90
+ th {
91
+ font-family: 'Poppins', sans-serif;
92
+ background: #141413;
93
+ color: #faf9f5;
94
+ font-weight: 500;
95
+ }
96
+ th.test-col {
97
+ background: #6a9bcc;
98
+ }
99
+ th.query-col { min-width: 200px; }
100
+ td.description {
101
+ font-family: monospace;
102
+ font-size: 11px;
103
+ word-wrap: break-word;
104
+ max-width: 400px;
105
+ }
106
+ td.result {
107
+ text-align: center;
108
+ font-size: 16px;
109
+ min-width: 40px;
110
+ }
111
+ td.test-result {
112
+ background: #f0f6fc;
113
+ }
114
+ .pass { color: #788c5d; }
115
+ .fail { color: #c44; }
116
+ .rate {
117
+ font-size: 9px;
118
+ color: #b0aea5;
119
+ display: block;
120
+ }
121
+ tr:hover { background: #faf9f5; }
122
+ .score {
123
+ display: inline-block;
124
+ padding: 2px 6px;
125
+ border-radius: 4px;
126
+ font-weight: bold;
127
+ font-size: 11px;
128
+ }
129
+ .score-good { background: #eef2e8; color: #788c5d; }
130
+ .score-ok { background: #fef3c7; color: #d97706; }
131
+ .score-bad { background: #fceaea; color: #c44; }
132
+ .train-label { color: #b0aea5; font-size: 10px; }
133
+ .test-label { color: #6a9bcc; font-size: 10px; font-weight: bold; }
134
+ .best-row { background: #f5f8f2; }
135
+ th.positive-col { border-bottom: 3px solid #788c5d; }
136
+ th.negative-col { border-bottom: 3px solid #c44; }
137
+ th.test-col.positive-col { border-bottom: 3px solid #788c5d; }
138
+ th.test-col.negative-col { border-bottom: 3px solid #c44; }
139
+ .legend { font-family: 'Poppins', sans-serif; display: flex; gap: 20px; margin-bottom: 10px; font-size: 13px; align-items: center; }
140
+ .legend-item { display: flex; align-items: center; gap: 6px; }
141
+ .legend-swatch { width: 16px; height: 16px; border-radius: 3px; display: inline-block; }
142
+ .swatch-positive { background: #141413; border-bottom: 3px solid #788c5d; }
143
+ .swatch-negative { background: #141413; border-bottom: 3px solid #c44; }
144
+ .swatch-test { background: #6a9bcc; }
145
+ .swatch-train { background: #141413; }
146
+ </style>
147
+ </head>
148
+ <body>
149
+ <h1>""" + title_prefix + """Skill Description Optimization</h1>
150
+ <div class="explainer">
151
+ <strong>Optimizing your skill's description.</strong> This page updates automatically as Claude tests different versions of your skill's description. Each row is an iteration — a new description attempt. The columns show test queries: green checkmarks mean the skill triggered correctly (or correctly didn't trigger), red crosses mean it got it wrong. The "Train" score shows performance on queries used to improve the description; the "Test" score shows performance on held-out queries the optimizer hasn't seen. When it's done, Claude will apply the best-performing description to your skill.
152
+ </div>
153
+ """]
154
+
155
+ # Summary section
156
+ best_test_score = data.get('best_test_score')
157
+ best_train_score = data.get('best_train_score')
158
+ html_parts.append(f"""
159
+ <div class="summary">
160
+ <p><strong>Original:</strong> {html.escape(data.get('original_description', 'N/A'))}</p>
161
+ <p class="best"><strong>Best:</strong> {html.escape(data.get('best_description', 'N/A'))}</p>
162
+ <p><strong>Best Score:</strong> {data.get('best_score', 'N/A')} {'(test)' if best_test_score else '(train)'}</p>
163
+ <p><strong>Iterations:</strong> {data.get('iterations_run', 0)} | <strong>Train:</strong> {data.get('train_size', '?')} | <strong>Test:</strong> {data.get('test_size', '?')}</p>
164
+ </div>
165
+ """)
166
+
167
+ # Legend
168
+ html_parts.append("""
169
+ <div class="legend">
170
+ <span style="font-weight:600">Query columns:</span>
171
+ <span class="legend-item"><span class="legend-swatch swatch-positive"></span> Should trigger</span>
172
+ <span class="legend-item"><span class="legend-swatch swatch-negative"></span> Should NOT trigger</span>
173
+ <span class="legend-item"><span class="legend-swatch swatch-train"></span> Train</span>
174
+ <span class="legend-item"><span class="legend-swatch swatch-test"></span> Test</span>
175
+ </div>
176
+ """)
177
+
178
+ # Table header
179
+ html_parts.append("""
180
+ <div class="table-container">
181
+ <table>
182
+ <thead>
183
+ <tr>
184
+ <th>Iter</th>
185
+ <th>Train</th>
186
+ <th>Test</th>
187
+ <th class="query-col">Description</th>
188
+ """)
189
+
190
+ # Add column headers for train queries
191
+ for qinfo in train_queries:
192
+ polarity = "positive-col" if qinfo["should_trigger"] else "negative-col"
193
+ html_parts.append(f' <th class="{polarity}">{html.escape(qinfo["query"])}</th>\n')
194
+
195
+ # Add column headers for test queries (different color)
196
+ for qinfo in test_queries:
197
+ polarity = "positive-col" if qinfo["should_trigger"] else "negative-col"
198
+ html_parts.append(f' <th class="test-col {polarity}">{html.escape(qinfo["query"])}</th>\n')
199
+
200
+ html_parts.append(""" </tr>
201
+ </thead>
202
+ <tbody>
203
+ """)
204
+
205
+ # Find best iteration for highlighting
206
+ if test_queries:
207
+ best_iter = max(history, key=lambda h: h.get("test_passed") or 0).get("iteration")
208
+ else:
209
+ best_iter = max(history, key=lambda h: h.get("train_passed", h.get("passed", 0))).get("iteration")
210
+
211
+ # Add rows for each iteration
212
+ for h in history:
213
+ iteration = h.get("iteration", "?")
214
+ train_passed = h.get("train_passed", h.get("passed", 0))
215
+ train_total = h.get("train_total", h.get("total", 0))
216
+ test_passed = h.get("test_passed")
217
+ test_total = h.get("test_total")
218
+ description = h.get("description", "")
219
+ train_results = h.get("train_results", h.get("results", []))
220
+ test_results = h.get("test_results", [])
221
+
222
+ # Create lookups for results by query
223
+ train_by_query = {r["query"]: r for r in train_results}
224
+ test_by_query = {r["query"]: r for r in test_results} if test_results else {}
225
+
226
+ # Compute aggregate correct/total runs across all retries
227
+ def aggregate_runs(results: list[dict]) -> tuple[int, int]:
228
+ correct = 0
229
+ total = 0
230
+ for r in results:
231
+ runs = r.get("runs", 0)
232
+ triggers = r.get("triggers", 0)
233
+ total += runs
234
+ if r.get("should_trigger", True):
235
+ correct += triggers
236
+ else:
237
+ correct += runs - triggers
238
+ return correct, total
239
+
240
+ train_correct, train_runs = aggregate_runs(train_results)
241
+ test_correct, test_runs = aggregate_runs(test_results)
242
+
243
+ # Determine score classes
244
+ def score_class(correct: int, total: int) -> str:
245
+ if total > 0:
246
+ ratio = correct / total
247
+ if ratio >= 0.8:
248
+ return "score-good"
249
+ elif ratio >= 0.5:
250
+ return "score-ok"
251
+ return "score-bad"
252
+
253
+ train_class = score_class(train_correct, train_runs)
254
+ test_class = score_class(test_correct, test_runs)
255
+
256
+ row_class = "best-row" if iteration == best_iter else ""
257
+
258
+ html_parts.append(f""" <tr class="{row_class}">
259
+ <td>{iteration}</td>
260
+ <td><span class="score {train_class}">{train_correct}/{train_runs}</span></td>
261
+ <td><span class="score {test_class}">{test_correct}/{test_runs}</span></td>
262
+ <td class="description">{html.escape(description)}</td>
263
+ """)
264
+
265
+ # Add result for each train query
266
+ for qinfo in train_queries:
267
+ r = train_by_query.get(qinfo["query"], {})
268
+ did_pass = r.get("pass", False)
269
+ triggers = r.get("triggers", 0)
270
+ runs = r.get("runs", 0)
271
+
272
+ icon = "✓" if did_pass else "✗"
273
+ css_class = "pass" if did_pass else "fail"
274
+
275
+ html_parts.append(f' <td class="result {css_class}">{icon}<span class="rate">{triggers}/{runs}</span></td>\n')
276
+
277
+ # Add result for each test query (with different background)
278
+ for qinfo in test_queries:
279
+ r = test_by_query.get(qinfo["query"], {})
280
+ did_pass = r.get("pass", False)
281
+ triggers = r.get("triggers", 0)
282
+ runs = r.get("runs", 0)
283
+
284
+ icon = "✓" if did_pass else "✗"
285
+ css_class = "pass" if did_pass else "fail"
286
+
287
+ html_parts.append(f' <td class="result test-result {css_class}">{icon}<span class="rate">{triggers}/{runs}</span></td>\n')
288
+
289
+ html_parts.append(" </tr>\n")
290
+
291
+ html_parts.append(""" </tbody>
292
+ </table>
293
+ </div>
294
+ """)
295
+
296
+ html_parts.append("""
297
+ </body>
298
+ </html>
299
+ """)
300
+
301
+ return "".join(html_parts)
302
+
303
+
304
+ def main():
305
+ parser = argparse.ArgumentParser(description="Generate HTML report from run_loop output")
306
+ parser.add_argument("input", help="Path to JSON output from run_loop.py (or - for stdin)")
307
+ parser.add_argument("-o", "--output", default=None, help="Output HTML file (default: stdout)")
308
+ parser.add_argument("--skill-name", default="", help="Skill name to include in the report title")
309
+ args = parser.parse_args()
310
+
311
+ if args.input == "-":
312
+ data = json.load(sys.stdin)
313
+ else:
314
+ data = json.loads(Path(args.input).read_text())
315
+
316
+ html_output = generate_html(data, skill_name=args.skill_name)
317
+
318
+ if args.output:
319
+ Path(args.output).write_text(html_output)
320
+ print(f"Report written to {args.output}", file=sys.stderr)
321
+ else:
322
+ print(html_output)
323
+
324
+
325
+ if __name__ == "__main__":
326
+ main()
@@ -0,0 +1,247 @@
1
+ #!/usr/bin/env python3
2
+ """Improve a skill description based on eval results.
3
+
4
+ Takes eval results (from run_eval.py) and generates an improved description
5
+ by calling `claude -p` as a subprocess (same auth pattern as run_eval.py —
6
+ uses the session's Claude Code auth, no separate ANTHROPIC_API_KEY needed).
7
+ """
8
+
9
+ import argparse
10
+ import json
11
+ import os
12
+ import re
13
+ import subprocess
14
+ import sys
15
+ from pathlib import Path
16
+
17
+ from scripts.utils import parse_skill_md
18
+
19
+
20
+ def _call_claude(prompt: str, model: str | None, timeout: int = 300) -> str:
21
+ """Run `claude -p` with the prompt on stdin and return the text response.
22
+
23
+ Prompt goes over stdin (not argv) because it embeds the full SKILL.md
24
+ body and can easily exceed comfortable argv length.
25
+ """
26
+ cmd = ["claude", "-p", "--output-format", "text"]
27
+ if model:
28
+ cmd.extend(["--model", model])
29
+
30
+ # Remove CLAUDECODE env var to allow nesting claude -p inside a
31
+ # Claude Code session. The guard is for interactive terminal conflicts;
32
+ # programmatic subprocess usage is safe. Same pattern as run_eval.py.
33
+ env = {k: v for k, v in os.environ.items() if k != "CLAUDECODE"}
34
+
35
+ result = subprocess.run(
36
+ cmd,
37
+ input=prompt,
38
+ capture_output=True,
39
+ text=True,
40
+ env=env,
41
+ timeout=timeout,
42
+ )
43
+ if result.returncode != 0:
44
+ raise RuntimeError(
45
+ f"claude -p exited {result.returncode}\nstderr: {result.stderr}"
46
+ )
47
+ return result.stdout
48
+
49
+
50
+ def improve_description(
51
+ skill_name: str,
52
+ skill_content: str,
53
+ current_description: str,
54
+ eval_results: dict,
55
+ history: list[dict],
56
+ model: str,
57
+ test_results: dict | None = None,
58
+ log_dir: Path | None = None,
59
+ iteration: int | None = None,
60
+ ) -> str:
61
+ """Call Claude to improve the description based on eval results."""
62
+ failed_triggers = [
63
+ r for r in eval_results["results"]
64
+ if r["should_trigger"] and not r["pass"]
65
+ ]
66
+ false_triggers = [
67
+ r for r in eval_results["results"]
68
+ if not r["should_trigger"] and not r["pass"]
69
+ ]
70
+
71
+ # Build scores summary
72
+ train_score = f"{eval_results['summary']['passed']}/{eval_results['summary']['total']}"
73
+ if test_results:
74
+ test_score = f"{test_results['summary']['passed']}/{test_results['summary']['total']}"
75
+ scores_summary = f"Train: {train_score}, Test: {test_score}"
76
+ else:
77
+ scores_summary = f"Train: {train_score}"
78
+
79
+ prompt = f"""You are optimizing a skill description for a Claude Code skill called "{skill_name}". A "skill" is sort of like a prompt, but with progressive disclosure -- there's a title and description that Claude sees when deciding whether to use the skill, and then if it does use the skill, it reads the .md file which has lots more details and potentially links to other resources in the skill folder like helper files and scripts and additional documentation or examples.
80
+
81
+ The description appears in Claude's "available_skills" list. When a user sends a query, Claude decides whether to invoke the skill based solely on the title and on this description. Your goal is to write a description that triggers for relevant queries, and doesn't trigger for irrelevant ones.
82
+
83
+ Here's the current description:
84
+ <current_description>
85
+ "{current_description}"
86
+ </current_description>
87
+
88
+ Current scores ({scores_summary}):
89
+ <scores_summary>
90
+ """
91
+ if failed_triggers:
92
+ prompt += "FAILED TO TRIGGER (should have triggered but didn't):\n"
93
+ for r in failed_triggers:
94
+ prompt += f' - "{r["query"]}" (triggered {r["triggers"]}/{r["runs"]} times)\n'
95
+ prompt += "\n"
96
+
97
+ if false_triggers:
98
+ prompt += "FALSE TRIGGERS (triggered but shouldn't have):\n"
99
+ for r in false_triggers:
100
+ prompt += f' - "{r["query"]}" (triggered {r["triggers"]}/{r["runs"]} times)\n'
101
+ prompt += "\n"
102
+
103
+ if history:
104
+ prompt += "PREVIOUS ATTEMPTS (do NOT repeat these — try something structurally different):\n\n"
105
+ for h in history:
106
+ train_s = f"{h.get('train_passed', h.get('passed', 0))}/{h.get('train_total', h.get('total', 0))}"
107
+ test_s = f"{h.get('test_passed', '?')}/{h.get('test_total', '?')}" if h.get('test_passed') is not None else None
108
+ score_str = f"train={train_s}" + (f", test={test_s}" if test_s else "")
109
+ prompt += f'<attempt {score_str}>\n'
110
+ prompt += f'Description: "{h["description"]}"\n'
111
+ if "results" in h:
112
+ prompt += "Train results:\n"
113
+ for r in h["results"]:
114
+ status = "PASS" if r["pass"] else "FAIL"
115
+ prompt += f' [{status}] "{r["query"][:80]}" (triggered {r["triggers"]}/{r["runs"]})\n'
116
+ if h.get("note"):
117
+ prompt += f'Note: {h["note"]}\n'
118
+ prompt += "</attempt>\n\n"
119
+
120
+ prompt += f"""</scores_summary>
121
+
122
+ Skill content (for context on what the skill does):
123
+ <skill_content>
124
+ {skill_content}
125
+ </skill_content>
126
+
127
+ Based on the failures, write a new and improved description that is more likely to trigger correctly. When I say "based on the failures", it's a bit of a tricky line to walk because we don't want to overfit to the specific cases you're seeing. So what I DON'T want you to do is produce an ever-expanding list of specific queries that this skill should or shouldn't trigger for. Instead, try to generalize from the failures to broader categories of user intent and situations where this skill would be useful or not useful. The reason for this is twofold:
128
+
129
+ 1. Avoid overfitting
130
+ 2. The list might get loooong and it's injected into ALL queries and there might be a lot of skills, so we don't want to blow too much space on any given description.
131
+
132
+ Concretely, your description should not be more than about 100-200 words, even if that comes at the cost of accuracy. There is a hard limit of 1024 characters — descriptions over that will be truncated, so stay comfortably under it.
133
+
134
+ Here are some tips that we've found to work well in writing these descriptions:
135
+ - The skill should be phrased in the imperative -- "Use this skill for" rather than "this skill does"
136
+ - The skill description should focus on the user's intent, what they are trying to achieve, vs. the implementation details of how the skill works.
137
+ - The description competes with other skills for Claude's attention — make it distinctive and immediately recognizable.
138
+ - If you're getting lots of failures after repeated attempts, change things up. Try different sentence structures or wordings.
139
+
140
+ I'd encourage you to be creative and mix up the style in different iterations since you'll have multiple opportunities to try different approaches and we'll just grab the highest-scoring one at the end.
141
+
142
+ Please respond with only the new description text in <new_description> tags, nothing else."""
143
+
144
+ text = _call_claude(prompt, model)
145
+
146
+ match = re.search(r"<new_description>(.*?)</new_description>", text, re.DOTALL)
147
+ description = match.group(1).strip().strip('"') if match else text.strip().strip('"')
148
+
149
+ transcript: dict = {
150
+ "iteration": iteration,
151
+ "prompt": prompt,
152
+ "response": text,
153
+ "parsed_description": description,
154
+ "char_count": len(description),
155
+ "over_limit": len(description) > 1024,
156
+ }
157
+
158
+ # Safety net: the prompt already states the 1024-char hard limit, but if
159
+ # the model blew past it anyway, make one fresh single-turn call that
160
+ # quotes the too-long version and asks for a shorter rewrite. (The old
161
+ # SDK path did this as a true multi-turn; `claude -p` is one-shot, so we
162
+ # inline the prior output into the new prompt instead.)
163
+ if len(description) > 1024:
164
+ shorten_prompt = (
165
+ f"{prompt}\n\n"
166
+ f"---\n\n"
167
+ f"A previous attempt produced this description, which at "
168
+ f"{len(description)} characters is over the 1024-character hard limit:\n\n"
169
+ f'"{description}"\n\n'
170
+ f"Rewrite it to be under 1024 characters while keeping the most "
171
+ f"important trigger words and intent coverage. Respond with only "
172
+ f"the new description in <new_description> tags."
173
+ )
174
+ shorten_text = _call_claude(shorten_prompt, model)
175
+ match = re.search(r"<new_description>(.*?)</new_description>", shorten_text, re.DOTALL)
176
+ shortened = match.group(1).strip().strip('"') if match else shorten_text.strip().strip('"')
177
+
178
+ transcript["rewrite_prompt"] = shorten_prompt
179
+ transcript["rewrite_response"] = shorten_text
180
+ transcript["rewrite_description"] = shortened
181
+ transcript["rewrite_char_count"] = len(shortened)
182
+ description = shortened
183
+
184
+ transcript["final_description"] = description
185
+
186
+ if log_dir:
187
+ log_dir.mkdir(parents=True, exist_ok=True)
188
+ log_file = log_dir / f"improve_iter_{iteration or 'unknown'}.json"
189
+ log_file.write_text(json.dumps(transcript, indent=2))
190
+
191
+ return description
192
+
193
+
194
+ def main():
195
+ parser = argparse.ArgumentParser(description="Improve a skill description based on eval results")
196
+ parser.add_argument("--eval-results", required=True, help="Path to eval results JSON (from run_eval.py)")
197
+ parser.add_argument("--skill-path", required=True, help="Path to skill directory")
198
+ parser.add_argument("--history", default=None, help="Path to history JSON (previous attempts)")
199
+ parser.add_argument("--model", required=True, help="Model for improvement")
200
+ parser.add_argument("--verbose", action="store_true", help="Print thinking to stderr")
201
+ args = parser.parse_args()
202
+
203
+ skill_path = Path(args.skill_path)
204
+ if not (skill_path / "SKILL.md").exists():
205
+ print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr)
206
+ sys.exit(1)
207
+
208
+ eval_results = json.loads(Path(args.eval_results).read_text())
209
+ history = []
210
+ if args.history:
211
+ history = json.loads(Path(args.history).read_text())
212
+
213
+ name, _, content = parse_skill_md(skill_path)
214
+ current_description = eval_results["description"]
215
+
216
+ if args.verbose:
217
+ print(f"Current: {current_description}", file=sys.stderr)
218
+ print(f"Score: {eval_results['summary']['passed']}/{eval_results['summary']['total']}", file=sys.stderr)
219
+
220
+ new_description = improve_description(
221
+ skill_name=name,
222
+ skill_content=content,
223
+ current_description=current_description,
224
+ eval_results=eval_results,
225
+ history=history,
226
+ model=args.model,
227
+ )
228
+
229
+ if args.verbose:
230
+ print(f"Improved: {new_description}", file=sys.stderr)
231
+
232
+ # Output as JSON with both the new description and updated history
233
+ output = {
234
+ "description": new_description,
235
+ "history": history + [{
236
+ "description": current_description,
237
+ "passed": eval_results["summary"]["passed"],
238
+ "failed": eval_results["summary"]["failed"],
239
+ "total": eval_results["summary"]["total"],
240
+ "results": eval_results["results"],
241
+ }],
242
+ }
243
+ print(json.dumps(output, indent=2))
244
+
245
+
246
+ if __name__ == "__main__":
247
+ main()