@cleocode/skills 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (171) hide show
  1. package/dispatch-config.json +404 -0
  2. package/index.d.ts +178 -0
  3. package/index.js +405 -0
  4. package/package.json +14 -0
  5. package/profiles/core.json +7 -0
  6. package/profiles/full.json +10 -0
  7. package/profiles/minimal.json +7 -0
  8. package/profiles/recommended.json +7 -0
  9. package/provider-skills-map.json +97 -0
  10. package/skills/_shared/cleo-style-guide.md +84 -0
  11. package/skills/_shared/manifest-operations.md +810 -0
  12. package/skills/_shared/placeholders.json +433 -0
  13. package/skills/_shared/skill-chaining-patterns.md +237 -0
  14. package/skills/_shared/subagent-protocol-base.md +223 -0
  15. package/skills/_shared/task-system-integration.md +232 -0
  16. package/skills/_shared/testing-framework-config.md +110 -0
  17. package/skills/ct-cleo/SKILL.md +490 -0
  18. package/skills/ct-cleo/references/anti-patterns.md +19 -0
  19. package/skills/ct-cleo/references/loom-lifecycle.md +136 -0
  20. package/skills/ct-cleo/references/orchestrator-constraints.md +55 -0
  21. package/skills/ct-cleo/references/session-protocol.md +162 -0
  22. package/skills/ct-codebase-mapper/SKILL.md +82 -0
  23. package/skills/ct-contribution/SKILL.md +521 -0
  24. package/skills/ct-contribution/templates/contribution-init.json +21 -0
  25. package/skills/ct-dev-workflow/SKILL.md +423 -0
  26. package/skills/ct-docs-lookup/SKILL.md +66 -0
  27. package/skills/ct-docs-review/SKILL.md +175 -0
  28. package/skills/ct-docs-write/SKILL.md +108 -0
  29. package/skills/ct-documentor/SKILL.md +231 -0
  30. package/skills/ct-epic-architect/SKILL.md +305 -0
  31. package/skills/ct-epic-architect/references/bug-epic-example.md +172 -0
  32. package/skills/ct-epic-architect/references/commands.md +201 -0
  33. package/skills/ct-epic-architect/references/feature-epic-example.md +210 -0
  34. package/skills/ct-epic-architect/references/migration-epic-example.md +244 -0
  35. package/skills/ct-epic-architect/references/output-format.md +92 -0
  36. package/skills/ct-epic-architect/references/patterns.md +284 -0
  37. package/skills/ct-epic-architect/references/refactor-epic-example.md +412 -0
  38. package/skills/ct-epic-architect/references/research-epic-example.md +226 -0
  39. package/skills/ct-epic-architect/references/shell-escaping.md +86 -0
  40. package/skills/ct-epic-architect/references/skill-aware-execution.md +195 -0
  41. package/skills/ct-grade/SKILL.md +230 -0
  42. package/skills/ct-grade/agents/analysis-reporter.md +203 -0
  43. package/skills/ct-grade/agents/blind-comparator.md +157 -0
  44. package/skills/ct-grade/agents/scenario-runner.md +134 -0
  45. package/skills/ct-grade/eval-viewer/__pycache__/generate_grade_review.cpython-314.pyc +0 -0
  46. package/skills/ct-grade/eval-viewer/generate_grade_review.py +1138 -0
  47. package/skills/ct-grade/eval-viewer/generate_grade_viewer.py +544 -0
  48. package/skills/ct-grade/eval-viewer/generate_review.py +283 -0
  49. package/skills/ct-grade/eval-viewer/grade-review.html +1574 -0
  50. package/skills/ct-grade/eval-viewer/viewer.html +219 -0
  51. package/skills/ct-grade/evals/evals.json +94 -0
  52. package/skills/ct-grade/references/ab-test-methodology.md +150 -0
  53. package/skills/ct-grade/references/domains.md +137 -0
  54. package/skills/ct-grade/references/grade-spec.md +236 -0
  55. package/skills/ct-grade/references/scenario-playbook.md +234 -0
  56. package/skills/ct-grade/references/token-tracking.md +120 -0
  57. package/skills/ct-grade/scripts/__pycache__/audit_analyzer.cpython-314.pyc +0 -0
  58. package/skills/ct-grade/scripts/__pycache__/run_ab_test.cpython-314.pyc +0 -0
  59. package/skills/ct-grade/scripts/__pycache__/run_all.cpython-314.pyc +0 -0
  60. package/skills/ct-grade/scripts/__pycache__/token_tracker.cpython-314.pyc +0 -0
  61. package/skills/ct-grade/scripts/audit_analyzer.py +279 -0
  62. package/skills/ct-grade/scripts/generate_report.py +283 -0
  63. package/skills/ct-grade/scripts/run_ab_test.py +504 -0
  64. package/skills/ct-grade/scripts/run_all.py +287 -0
  65. package/skills/ct-grade/scripts/setup_run.py +183 -0
  66. package/skills/ct-grade/scripts/token_tracker.py +630 -0
  67. package/skills/ct-grade-v2-1/SKILL.md +237 -0
  68. package/skills/ct-grade-v2-1/agents/analysis-reporter.md +203 -0
  69. package/skills/ct-grade-v2-1/agents/blind-comparator.md +157 -0
  70. package/skills/ct-grade-v2-1/agents/scenario-runner.md +179 -0
  71. package/skills/ct-grade-v2-1/evals/evals.json +74 -0
  72. package/skills/ct-grade-v2-1/grade-viewer/__pycache__/build_op_stats.cpython-314.pyc +0 -0
  73. package/skills/ct-grade-v2-1/grade-viewer/__pycache__/generate_grade_review.cpython-314.pyc +0 -0
  74. package/skills/ct-grade-v2-1/grade-viewer/build_op_stats.py +174 -0
  75. package/skills/ct-grade-v2-1/grade-viewer/eval-analysis.json +41 -0
  76. package/skills/ct-grade-v2-1/grade-viewer/eval-report.md +34 -0
  77. package/skills/ct-grade-v2-1/grade-viewer/generate_grade_review.py +1023 -0
  78. package/skills/ct-grade-v2-1/grade-viewer/generate_grade_viewer.py +548 -0
  79. package/skills/ct-grade-v2-1/grade-viewer/grade-review-eval.html +613 -0
  80. package/skills/ct-grade-v2-1/grade-viewer/grade-review.html +1532 -0
  81. package/skills/ct-grade-v2-1/grade-viewer/viewer.html +620 -0
  82. package/skills/ct-grade-v2-1/manifest-entry.json +31 -0
  83. package/skills/ct-grade-v2-1/references/ab-testing.md +233 -0
  84. package/skills/ct-grade-v2-1/references/domains-ssot.md +156 -0
  85. package/skills/ct-grade-v2-1/references/grade-spec-v2.md +167 -0
  86. package/skills/ct-grade-v2-1/references/playbook-v2.md +393 -0
  87. package/skills/ct-grade-v2-1/references/token-tracking.md +202 -0
  88. package/skills/ct-grade-v2-1/scripts/generate_report.py +419 -0
  89. package/skills/ct-grade-v2-1/scripts/run_ab_test.py +493 -0
  90. package/skills/ct-grade-v2-1/scripts/run_scenario.py +396 -0
  91. package/skills/ct-grade-v2-1/scripts/setup_run.py +207 -0
  92. package/skills/ct-grade-v2-1/scripts/token_tracker.py +175 -0
  93. package/skills/ct-memory/SKILL.md +84 -0
  94. package/skills/ct-orchestrator/INSTALL.md +61 -0
  95. package/skills/ct-orchestrator/README.md +69 -0
  96. package/skills/ct-orchestrator/SKILL.md +380 -0
  97. package/skills/ct-orchestrator/manifest-entry.json +19 -0
  98. package/skills/ct-orchestrator/orchestrator-prompt.txt +17 -0
  99. package/skills/ct-orchestrator/references/SUBAGENT-PROTOCOL-BLOCK.md +66 -0
  100. package/skills/ct-orchestrator/references/autonomous-operation.md +167 -0
  101. package/skills/ct-orchestrator/references/lifecycle-gates.md +98 -0
  102. package/skills/ct-orchestrator/references/orchestrator-compliance.md +271 -0
  103. package/skills/ct-orchestrator/references/orchestrator-handoffs.md +85 -0
  104. package/skills/ct-orchestrator/references/orchestrator-patterns.md +164 -0
  105. package/skills/ct-orchestrator/references/orchestrator-recovery.md +113 -0
  106. package/skills/ct-orchestrator/references/orchestrator-spawning.md +271 -0
  107. package/skills/ct-orchestrator/references/orchestrator-tokens.md +180 -0
  108. package/skills/ct-research-agent/SKILL.md +226 -0
  109. package/skills/ct-skill-creator/.cleo/.context-state.json +13 -0
  110. package/skills/ct-skill-creator/.cleo/logs/cleo.2026-03-07.1.log +24 -0
  111. package/skills/ct-skill-creator/.cleo/tasks.db +0 -0
  112. package/skills/ct-skill-creator/SKILL.md +356 -0
  113. package/skills/ct-skill-creator/agents/analyzer.md +276 -0
  114. package/skills/ct-skill-creator/agents/comparator.md +204 -0
  115. package/skills/ct-skill-creator/agents/grader.md +225 -0
  116. package/skills/ct-skill-creator/assets/eval_review.html +146 -0
  117. package/skills/ct-skill-creator/eval-viewer/__pycache__/generate_review.cpython-314.pyc +0 -0
  118. package/skills/ct-skill-creator/eval-viewer/generate_review.py +471 -0
  119. package/skills/ct-skill-creator/eval-viewer/viewer.html +1325 -0
  120. package/skills/ct-skill-creator/manifest-entry.json +17 -0
  121. package/skills/ct-skill-creator/references/dynamic-context.md +228 -0
  122. package/skills/ct-skill-creator/references/frontmatter.md +83 -0
  123. package/skills/ct-skill-creator/references/invocation-control.md +165 -0
  124. package/skills/ct-skill-creator/references/output-patterns.md +86 -0
  125. package/skills/ct-skill-creator/references/provider-deployment.md +175 -0
  126. package/skills/ct-skill-creator/references/schemas.md +430 -0
  127. package/skills/ct-skill-creator/references/workflows.md +28 -0
  128. package/skills/ct-skill-creator/scripts/__init__.py +1 -0
  129. package/skills/ct-skill-creator/scripts/__pycache__/__init__.cpython-314.pyc +0 -0
  130. package/skills/ct-skill-creator/scripts/__pycache__/aggregate_benchmark.cpython-314.pyc +0 -0
  131. package/skills/ct-skill-creator/scripts/__pycache__/generate_report.cpython-314.pyc +0 -0
  132. package/skills/ct-skill-creator/scripts/__pycache__/improve_description.cpython-314.pyc +0 -0
  133. package/skills/ct-skill-creator/scripts/__pycache__/init_skill.cpython-314.pyc +0 -0
  134. package/skills/ct-skill-creator/scripts/__pycache__/quick_validate.cpython-314.pyc +0 -0
  135. package/skills/ct-skill-creator/scripts/__pycache__/run_eval.cpython-314.pyc +0 -0
  136. package/skills/ct-skill-creator/scripts/__pycache__/run_loop.cpython-314.pyc +0 -0
  137. package/skills/ct-skill-creator/scripts/__pycache__/utils.cpython-314.pyc +0 -0
  138. package/skills/ct-skill-creator/scripts/aggregate_benchmark.py +401 -0
  139. package/skills/ct-skill-creator/scripts/generate_report.py +326 -0
  140. package/skills/ct-skill-creator/scripts/improve_description.py +247 -0
  141. package/skills/ct-skill-creator/scripts/init_skill.py +306 -0
  142. package/skills/ct-skill-creator/scripts/package_skill.py +110 -0
  143. package/skills/ct-skill-creator/scripts/quick_validate.py +97 -0
  144. package/skills/ct-skill-creator/scripts/run_eval.py +310 -0
  145. package/skills/ct-skill-creator/scripts/run_loop.py +328 -0
  146. package/skills/ct-skill-creator/scripts/utils.py +47 -0
  147. package/skills/ct-skill-validator/SKILL.md +178 -0
  148. package/skills/ct-skill-validator/agents/ecosystem-checker.md +151 -0
  149. package/skills/ct-skill-validator/assets/valid-skill-example.md +13 -0
  150. package/skills/ct-skill-validator/evals/eval_set.json +14 -0
  151. package/skills/ct-skill-validator/evals/evals.json +52 -0
  152. package/skills/ct-skill-validator/manifest-entry.json +20 -0
  153. package/skills/ct-skill-validator/references/cleo-ecosystem-rules.md +163 -0
  154. package/skills/ct-skill-validator/references/validation-rules.md +168 -0
  155. package/skills/ct-skill-validator/scripts/__init__.py +0 -0
  156. package/skills/ct-skill-validator/scripts/__pycache__/audit_body.cpython-314.pyc +0 -0
  157. package/skills/ct-skill-validator/scripts/__pycache__/check_ecosystem.cpython-314.pyc +0 -0
  158. package/skills/ct-skill-validator/scripts/__pycache__/generate_validation_report.cpython-314.pyc +0 -0
  159. package/skills/ct-skill-validator/scripts/__pycache__/validate.cpython-314.pyc +0 -0
  160. package/skills/ct-skill-validator/scripts/audit_body.py +242 -0
  161. package/skills/ct-skill-validator/scripts/check_ecosystem.py +169 -0
  162. package/skills/ct-skill-validator/scripts/check_manifest.py +172 -0
  163. package/skills/ct-skill-validator/scripts/generate_validation_report.py +442 -0
  164. package/skills/ct-skill-validator/scripts/validate.py +422 -0
  165. package/skills/ct-spec-writer/SKILL.md +189 -0
  166. package/skills/ct-stickynote/README.md +14 -0
  167. package/skills/ct-stickynote/SKILL.md +46 -0
  168. package/skills/ct-task-executor/SKILL.md +296 -0
  169. package/skills/ct-validator/SKILL.md +216 -0
  170. package/skills/manifest.json +469 -0
  171. package/skills.json +281 -0
@@ -0,0 +1,225 @@
1
+ # Grader Agent
2
+
3
+ Evaluate expectations against an execution transcript and outputs.
4
+
5
+ ## Role
6
+
7
+ The Grader reviews a transcript and output files, then determines whether each expectation passes or fails. Provide clear evidence for each judgment.
8
+
9
+ You have two jobs: grade the outputs, and critique the evals themselves. A passing grade on a weak assertion is worse than useless — it creates false confidence. When you notice an assertion that's trivially satisfied, or an important outcome that no assertion checks, say so.
10
+
11
+ ## Inputs
12
+
13
+ You receive these parameters in your prompt:
14
+
15
+ - **expectations**: List of expectations to evaluate (strings)
16
+ - **transcript_path**: Path to the execution transcript (markdown file)
17
+ - **outputs_dir**: Directory containing output files from execution
18
+
19
+ ## Process
20
+
21
+ ### Step 1: Read the Transcript
22
+
23
+ 1. Read the transcript file completely
24
+ 2. Note the eval prompt, execution steps, and final result
25
+ 3. Identify any issues or errors documented
26
+
27
+ ### Step 2: Examine Output Files
28
+
29
+ 1. List files in outputs_dir
30
+ 2. Read/examine each file relevant to the expectations. If outputs aren't plain text, use the inspection tools provided in your prompt — don't rely solely on what the transcript says the executor produced.
31
+ 3. Note contents, structure, and quality
32
+
33
+ ### Step 3: Evaluate Each Assertion
34
+
35
+ For each expectation:
36
+
37
+ 1. **Search for evidence** in the transcript and outputs
38
+ 2. **Determine verdict**:
39
+ - **PASS**: Clear evidence the expectation is true AND the evidence reflects genuine task completion, not just surface-level compliance
40
+ - **FAIL**: No evidence, or evidence contradicts the expectation, or the evidence is superficial (e.g., correct filename but empty/wrong content)
41
+ 3. **Cite the evidence**: Quote the specific text or describe what you found
42
+
43
+ ### Step 4: Extract and Verify Claims
44
+
45
+ Beyond the predefined expectations, extract implicit claims from the outputs and verify them:
46
+
47
+ 1. **Extract claims** from the transcript and outputs:
48
+ - Factual statements ("The form has 12 fields")
49
+ - Process claims ("Used pypdf to fill the form")
50
+ - Quality claims ("All fields were filled correctly")
51
+
52
+ 2. **Verify each claim**:
53
+ - **Factual claims**: Can be checked against the outputs or external sources
54
+ - **Process claims**: Can be verified from the transcript
55
+ - **Quality claims**: Evaluate whether the claim is justified
56
+
57
+ 3. **Flag unverifiable claims**: Note claims that cannot be verified with available information
58
+
59
+ This catches issues that predefined expectations might miss.
60
+
61
+ ### Step 5: Read User Notes
62
+
63
+ If `{outputs_dir}/user_notes.md` exists:
64
+ 1. Read it and note any uncertainties or issues flagged by the executor
65
+ 2. Include relevant concerns in the grading output
66
+ 3. These may reveal problems even when expectations pass
67
+
68
+ ### Step 6: Critique the Evals
69
+
70
+ After grading, consider whether the evals themselves could be improved. Only surface suggestions when there's a clear gap.
71
+
72
+ Good suggestions test meaningful outcomes — assertions that are hard to satisfy without actually doing the work correctly. Think about what makes an assertion *discriminating*: it passes when the skill genuinely succeeds and fails when it doesn't.
73
+
74
+ Suggestions worth raising:
75
+ - An assertion that passed but would also pass for a clearly wrong output (e.g., checking filename existence but not file content)
76
+ - An important outcome you observed — good or bad — that no assertion covers at all
77
+ - An assertion that can't actually be verified from the available outputs
78
+
79
+ Keep the bar high. The goal is to flag things the eval author would say "good catch" about, not to nitpick every assertion.
80
+
81
+ ### Step 7: Write Grading Results
82
+
83
+ Save results to `{outputs_dir}/../grading.json` (sibling to outputs_dir).
84
+
85
+ ## Grading Criteria
86
+
87
+ **PASS when**:
88
+ - The transcript or outputs clearly demonstrate the expectation is true
89
+ - Specific evidence can be cited
90
+ - The evidence reflects genuine substance, not just surface compliance (e.g., a file exists AND contains correct content, not just the right filename)
91
+
92
+ **FAIL when**:
93
+ - No evidence found for the expectation
94
+ - Evidence contradicts the expectation
95
+ - The expectation cannot be verified from available information
96
+ - The evidence is superficial — the assertion is technically satisfied but the underlying task outcome is wrong or incomplete
97
+ - The output appears to meet the assertion by coincidence rather than by actually doing the work
98
+
99
+ **When uncertain**: The burden of proof to pass is on the expectation.
100
+
101
+ ### Step 8: Read Executor Metrics and Timing
102
+
103
+ 1. If `{outputs_dir}/metrics.json` exists, read it and include in grading output
104
+ 2. If `{outputs_dir}/../timing.json` exists, read it and include timing data
105
+
106
+ ## Output Format
107
+
108
+ Write a JSON file with this structure:
109
+
110
+ ```json
111
+ {
112
+ "expectations": [
113
+ {
114
+ "text": "The output includes the name 'John Smith'",
115
+ "passed": true,
116
+ "evidence": "Found in transcript Step 3: 'Extracted names: John Smith, Sarah Johnson'"
117
+ },
118
+ {
119
+ "text": "The spreadsheet has a SUM formula in cell B10",
120
+ "passed": false,
121
+ "evidence": "No spreadsheet was created. The output was a text file."
122
+ },
123
+ {
124
+ "text": "The assistant used the skill's OCR script",
125
+ "passed": true,
126
+ "evidence": "Transcript Step 2 shows: 'Tool: Bash - python ocr_script.py image.png'"
127
+ }
128
+ ],
129
+ "summary": {
130
+ "passed": 2,
131
+ "failed": 1,
132
+ "total": 3,
133
+ "pass_rate": 0.67
134
+ },
135
+ "execution_metrics": {
136
+ "tool_calls": {
137
+ "Read": 5,
138
+ "Write": 2,
139
+ "Bash": 8
140
+ },
141
+ "total_tool_calls": 15,
142
+ "total_steps": 6,
143
+ "errors_encountered": 0,
144
+ "output_chars": 12450,
145
+ "transcript_chars": 3200
146
+ },
147
+ "timing": {
148
+ "executor_duration_seconds": 165.0,
149
+ "grader_duration_seconds": 26.0,
150
+ "total_duration_seconds": 191.0
151
+ },
152
+ "claims": [
153
+ {
154
+ "claim": "The form has 12 fillable fields",
155
+ "type": "factual",
156
+ "verified": true,
157
+ "evidence": "Counted 12 fields in field_info.json"
158
+ },
159
+ {
160
+ "claim": "All required fields were populated",
161
+ "type": "quality",
162
+ "verified": false,
163
+ "evidence": "Reference section was left blank despite data being available"
164
+ }
165
+ ],
166
+ "user_notes_summary": {
167
+ "uncertainties": ["Used 2023 data, may be stale"],
168
+ "needs_review": [],
169
+ "workarounds": ["Fell back to text overlay for non-fillable fields"]
170
+ },
171
+ "eval_feedback": {
172
+ "suggestions": [
173
+ {
174
+ "assertion": "The output includes the name 'John Smith'",
175
+ "reason": "A hallucinated document that mentions the name would also pass — consider checking it appears as the primary contact with matching phone and email from the input"
176
+ },
177
+ {
178
+ "reason": "No assertion checks whether the extracted phone numbers match the input — I observed incorrect numbers in the output that went uncaught"
179
+ }
180
+ ],
181
+ "overall": "Assertions check presence but not correctness. Consider adding content verification."
182
+ }
183
+ }
184
+ ```
185
+
186
+ ## Field Descriptions
187
+
188
+ - **expectations**: Array of graded expectations
189
+ - **text**: The original expectation text
190
+ - **passed**: Boolean - true if expectation passes
191
+ - **evidence**: Specific quote or description supporting the verdict
192
+ - **summary**: Aggregate statistics
193
+ - **passed**: Count of passed expectations
194
+ - **failed**: Count of failed expectations
195
+ - **total**: Total expectations evaluated
196
+ - **pass_rate**: Fraction passed (0.0 to 1.0)
197
+ - **execution_metrics**: Copied from executor's metrics.json (if available)
198
+ - **output_chars**: Total character count of output files (proxy for tokens)
199
+ - **transcript_chars**: Character count of transcript
200
+ - **timing**: Wall clock timing from timing.json (if available)
201
+ - **executor_duration_seconds**: Time spent in executor subagent
202
+ - **total_duration_seconds**: Total elapsed time for the run
203
+ - **claims**: Extracted and verified claims from the output
204
+ - **claim**: The statement being verified
205
+ - **type**: "factual", "process", or "quality"
206
+ - **verified**: Boolean - whether the claim holds
207
+ - **evidence**: Supporting or contradicting evidence
208
+ - **user_notes_summary**: Issues flagged by the executor
209
+ - **uncertainties**: Things the executor wasn't sure about
210
+ - **needs_review**: Items requiring human attention
211
+ - **workarounds**: Places where the skill didn't work as expected
212
+ - **eval_feedback**: Improvement suggestions for the evals (only when warranted)
213
+ - **suggestions**: List of concrete suggestions, each with a `reason` and optionally an `assertion` it relates to
214
+ - **overall**: Brief assessment — can be "No suggestions, evals look solid" if nothing to flag
215
+
216
+ ## Guidelines
217
+
218
+ - **Be objective**: Base verdicts on evidence, not assumptions
219
+ - **Be specific**: Quote the exact text that supports your verdict
220
+ - **Be thorough**: Check both transcript and output files
221
+ - **Be consistent**: Apply the same standard to each expectation
222
+ - **Explain failures**: Make it clear why evidence was insufficient
223
+ - **No partial credit**: Each expectation is pass or fail, not partial
224
+
225
+ See [references/schemas.md](../references/schemas.md) for the complete grading.json schema definition.
@@ -0,0 +1,146 @@
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Eval Set Review - __SKILL_NAME_PLACEHOLDER__</title>
7
+ <link rel="preconnect" href="https://fonts.googleapis.com">
8
+ <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
9
+ <link href="https://fonts.googleapis.com/css2?family=Poppins:wght@500;600&family=Lora:wght@400;500&display=swap" rel="stylesheet">
10
+ <style>
11
+ * { box-sizing: border-box; margin: 0; padding: 0; }
12
+ body { font-family: 'Lora', Georgia, serif; background: #faf9f5; padding: 2rem; color: #141413; }
13
+ h1 { font-family: 'Poppins', sans-serif; margin-bottom: 0.5rem; font-size: 1.5rem; }
14
+ .description { color: #b0aea5; margin-bottom: 1.5rem; font-style: italic; max-width: 900px; }
15
+ .controls { margin-bottom: 1rem; display: flex; gap: 0.5rem; }
16
+ .btn { font-family: 'Poppins', sans-serif; padding: 0.5rem 1rem; border: none; border-radius: 6px; cursor: pointer; font-size: 0.875rem; font-weight: 500; }
17
+ .btn-add { background: #6a9bcc; color: white; }
18
+ .btn-add:hover { background: #5889b8; }
19
+ .btn-export { background: #d97757; color: white; }
20
+ .btn-export:hover { background: #c4613f; }
21
+ table { width: 100%; max-width: 1100px; border-collapse: collapse; background: white; border-radius: 6px; overflow: hidden; box-shadow: 0 1px 3px rgba(0,0,0,0.08); }
22
+ th { font-family: 'Poppins', sans-serif; background: #141413; color: #faf9f5; padding: 0.75rem 1rem; text-align: left; font-size: 0.875rem; }
23
+ td { padding: 0.75rem 1rem; border-bottom: 1px solid #e8e6dc; vertical-align: top; }
24
+ tr:nth-child(even) td { background: #faf9f5; }
25
+ tr:hover td { background: #f3f1ea; }
26
+ .section-header td { background: #e8e6dc; font-family: 'Poppins', sans-serif; font-weight: 500; font-size: 0.8rem; color: #141413; text-transform: uppercase; letter-spacing: 0.05em; }
27
+ .query-input { width: 100%; padding: 0.4rem; border: 1px solid #e8e6dc; border-radius: 4px; font-size: 0.875rem; font-family: 'Lora', Georgia, serif; resize: vertical; min-height: 60px; }
28
+ .query-input:focus { outline: none; border-color: #d97757; box-shadow: 0 0 0 2px rgba(217,119,87,0.15); }
29
+ .toggle { position: relative; display: inline-block; width: 44px; height: 24px; }
30
+ .toggle input { opacity: 0; width: 0; height: 0; }
31
+ .toggle .slider { position: absolute; inset: 0; background: #b0aea5; border-radius: 24px; cursor: pointer; transition: 0.2s; }
32
+ .toggle .slider::before { content: ""; position: absolute; width: 18px; height: 18px; left: 3px; bottom: 3px; background: white; border-radius: 50%; transition: 0.2s; }
33
+ .toggle input:checked + .slider { background: #d97757; }
34
+ .toggle input:checked + .slider::before { transform: translateX(20px); }
35
+ .btn-delete { background: #c44; color: white; padding: 0.3rem 0.6rem; border: none; border-radius: 4px; cursor: pointer; font-size: 0.75rem; font-family: 'Poppins', sans-serif; }
36
+ .btn-delete:hover { background: #a33; }
37
+ .summary { margin-top: 1rem; color: #b0aea5; font-size: 0.875rem; }
38
+ </style>
39
+ </head>
40
+ <body>
41
+ <h1>Eval Set Review: <span id="skill-name">__SKILL_NAME_PLACEHOLDER__</span></h1>
42
+ <p class="description">Current description: <span id="skill-desc">__SKILL_DESCRIPTION_PLACEHOLDER__</span></p>
43
+
44
+ <div class="controls">
45
+ <button class="btn btn-add" onclick="addRow()">+ Add Query</button>
46
+ <button class="btn btn-export" onclick="exportEvalSet()">Export Eval Set</button>
47
+ </div>
48
+
49
+ <table>
50
+ <thead>
51
+ <tr>
52
+ <th style="width:65%">Query</th>
53
+ <th style="width:18%">Should Trigger</th>
54
+ <th style="width:10%">Actions</th>
55
+ </tr>
56
+ </thead>
57
+ <tbody id="eval-body"></tbody>
58
+ </table>
59
+
60
+ <p class="summary" id="summary"></p>
61
+
62
+ <script>
63
+ const EVAL_DATA = __EVAL_DATA_PLACEHOLDER__;
64
+
65
+ let evalItems = [...EVAL_DATA];
66
+
67
+ function render() {
68
+ const tbody = document.getElementById('eval-body');
69
+ tbody.innerHTML = '';
70
+
71
+ // Sort: should-trigger first, then should-not-trigger
72
+ const sorted = evalItems
73
+ .map((item, origIdx) => ({ ...item, origIdx }))
74
+ .sort((a, b) => (b.should_trigger ? 1 : 0) - (a.should_trigger ? 1 : 0));
75
+
76
+ let lastGroup = null;
77
+ sorted.forEach(item => {
78
+ const group = item.should_trigger ? 'trigger' : 'no-trigger';
79
+ if (group !== lastGroup) {
80
+ const headerRow = document.createElement('tr');
81
+ headerRow.className = 'section-header';
82
+ headerRow.innerHTML = `<td colspan="3">${item.should_trigger ? 'Should Trigger' : 'Should NOT Trigger'}</td>`;
83
+ tbody.appendChild(headerRow);
84
+ lastGroup = group;
85
+ }
86
+
87
+ const idx = item.origIdx;
88
+ const tr = document.createElement('tr');
89
+ tr.innerHTML = `
90
+ <td><textarea class="query-input" onchange="updateQuery(${idx}, this.value)">${escapeHtml(item.query)}</textarea></td>
91
+ <td>
92
+ <label class="toggle">
93
+ <input type="checkbox" ${item.should_trigger ? 'checked' : ''} onchange="updateTrigger(${idx}, this.checked)">
94
+ <span class="slider"></span>
95
+ </label>
96
+ <span style="margin-left:8px;font-size:0.8rem;color:#b0aea5">${item.should_trigger ? 'Yes' : 'No'}</span>
97
+ </td>
98
+ <td><button class="btn-delete" onclick="deleteRow(${idx})">Delete</button></td>
99
+ `;
100
+ tbody.appendChild(tr);
101
+ });
102
+ updateSummary();
103
+ }
104
+
105
+ function escapeHtml(text) {
106
+ const div = document.createElement('div');
107
+ div.textContent = text;
108
+ return div.innerHTML;
109
+ }
110
+
111
+ function updateQuery(idx, value) { evalItems[idx].query = value; updateSummary(); }
112
+ function updateTrigger(idx, value) { evalItems[idx].should_trigger = value; render(); }
113
+ function deleteRow(idx) { evalItems.splice(idx, 1); render(); }
114
+
115
+ function addRow() {
116
+ evalItems.push({ query: '', should_trigger: true });
117
+ render();
118
+ const inputs = document.querySelectorAll('.query-input');
119
+ inputs[inputs.length - 1].focus();
120
+ }
121
+
122
+ function updateSummary() {
123
+ const trigger = evalItems.filter(i => i.should_trigger).length;
124
+ const noTrigger = evalItems.filter(i => !i.should_trigger).length;
125
+ document.getElementById('summary').textContent =
126
+ `${evalItems.length} queries total: ${trigger} should trigger, ${noTrigger} should not trigger`;
127
+ }
128
+
129
+ function exportEvalSet() {
130
+ const valid = evalItems.filter(i => i.query.trim() !== '');
131
+ const data = valid.map(i => ({ query: i.query.trim(), should_trigger: i.should_trigger }));
132
+ const blob = new Blob([JSON.stringify(data, null, 2)], { type: 'application/json' });
133
+ const url = URL.createObjectURL(blob);
134
+ const a = document.createElement('a');
135
+ a.href = url;
136
+ a.download = 'eval_set.json';
137
+ document.body.appendChild(a);
138
+ a.click();
139
+ document.body.removeChild(a);
140
+ URL.revokeObjectURL(url);
141
+ }
142
+
143
+ render();
144
+ </script>
145
+ </body>
146
+ </html>