snapeval 1.4.0 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,7 @@
1
1
  import * as fs from 'node:fs';
2
2
  import * as path from 'node:path';
3
+ import { execFile } from 'node:child_process';
4
+ import * as os from 'node:os';
3
5
  import { JSONReporter } from '../adapters/report/json.js';
4
6
  import { TerminalReporter } from '../adapters/report/terminal.js';
5
7
  import { HTMLReporter } from '../adapters/report/html.js';
@@ -24,6 +26,17 @@ export async function reportCommand(skillPath, results, options = {}) {
24
26
  await htmlReporter.report(results);
25
27
  const reportPath = path.join(iterationDir, 'report.html');
26
28
  console.log(`Report written to ${reportPath}`);
29
+ if (!process.env.CI) {
30
+ const platform = os.platform();
31
+ const opener = platform === 'darwin' ? 'open' : platform === 'win32' ? 'cmd' : 'xdg-open';
32
+ const args = platform === 'win32' ? ['/c', 'start', '', reportPath] : [reportPath];
33
+ execFile(opener, args, (err) => {
34
+ if (err) {
35
+ // Fallback: print path so user can open manually
36
+ console.log(`Open in browser: ${reportPath}`);
37
+ }
38
+ });
39
+ }
27
40
  }
28
41
  // Print terminal report
29
42
  if (options.verbose !== false) {
@@ -1 +1 @@
1
- {"version":3,"file":"report.js","sourceRoot":"","sources":["../../../src/commands/report.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,SAAS,CAAC;AAC9B,OAAO,KAAK,IAAI,MAAM,WAAW,CAAC;AAElC,OAAO,EAAE,YAAY,EAAE,MAAM,4BAA4B,CAAC;AAC1D,OAAO,EAAE,gBAAgB,EAAE,MAAM,gCAAgC,CAAC;AAClE,OAAO,EAAE,YAAY,EAAE,MAAM,4BAA4B,CAAC;AAE1D,MAAM,CAAC,KAAK,UAAU,aAAa,CACjC,SAAiB,EACjB,OAAoB,EACpB,UAAiD,EAAE;IAEnD,kCAAkC;IAClC,MAAM,cAAc,GAAG,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,OAAO,EAAE,SAAS,CAAC,CAAC;IAChE,EAAE,CAAC,SAAS,CAAC,cAAc,EAAE,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,CAAC;IAElD,MAAM,kBAAkB,GAAG,EAAE,CAAC,WAAW,CAAC,cAAc,CAAC;SACtD,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,iBAAiB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;SACxC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,OAAO,CAAC,YAAY,EAAE,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC;SACrD,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;IAEzB,MAAM,aAAa,GAAG,kBAAkB,CAAC,MAAM,GAAG,CAAC;QACjD,CAAC,CAAC,kBAAkB,CAAC,kBAAkB,CAAC,MAAM,GAAG,CAAC,CAAC,GAAG,CAAC;QACvD,CAAC,CAAC,CAAC,CAAC;IAEN,MAAM,YAAY,GAAG,IAAI,CAAC,IAAI,CAAC,cAAc,EAAE,aAAa,aAAa,EAAE,CAAC,CAAC;IAE7E,oBAAoB;IACpB,MAAM,YAAY,GAAG,IAAI,YAAY,CAAC,YAAY,CAAC,CAAC;IACpD,MAAM,YAAY,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;IAEnC,iCAAiC;IACjC,IAAI,OAAO,CAAC,IAAI,EAAE,CAAC;QACjB,MAAM,YAAY,GAAG,IAAI,YAAY,CAAC,YAAY,EAAE,aAAa,CAAC,CAAC;QACnE,MAAM,YAAY,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;QACnC,MAAM,UAAU,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,EAAE,aAAa,CAAC,CAAC;QAC1D,OAAO,CAAC,GAAG,CAAC,qBAAqB,UAAU,EAAE,CAAC,CAAC;IACjD,CAAC;IAED,wBAAwB;IACxB,IAAI,OAAO,CAAC,OAAO,KAAK,KAAK,EAAE,CAAC;QAC9B,MAAM,gBAAgB,GAAG,IAAI,gBAAgB,EAAE,CAAC;QAChD,MAAM,gBAAgB,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;IACzC,CAAC;IAED,OAAO,YAAY,CAAC;AACtB,CAAC"}
1
+ {"version":3,"file":"report.js","sourceRoot":"","sources":["../../../src/commands/report.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,SAAS,CAAC;AAC9B,OAAO,KAAK,IAAI,MAAM,WAAW,CAAC;AAClC,OAAO,EAAE,QAAQ,EAAE,MAAM,oBAAoB,CAAC;AAC9C,OAAO,KAAK,EAAE,MAAM,SAAS,CAAC;AAE9B,OAAO,EAAE,YAAY,EAAE,MAAM,4BAA4B,CAAC;AAC1D,OAAO,EAAE,gBAAgB,EAAE,MAAM,gCAAgC,CAAC;AAClE,OAAO,EAAE,YAAY,EAAE,MAAM,4BAA4B,CAAC;AAE1D,MAAM,CAAC,KAAK,UAAU,aAAa,CACjC,SAAiB,EACjB,OAAoB,EACpB,UAAiD,EAAE;IAEnD,kCAAkC;IAClC,MAAM,cAAc,GAAG,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,OAAO,EAAE,SAAS,CAAC,CAAC;IAChE,EAAE,CAAC,SAAS,CAAC,cAAc,EAAE,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,CAAC;IAElD,MAAM,kBAAkB,GAAG,EAAE,CAAC,WAAW,CAAC,cAAc,CAAC;SACtD,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,iBAAiB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;SACxC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,QAAQ,CAAC,CAAC,CAAC,OAAO,CAAC,YAAY,EAAE,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC;SACrD,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;IAEzB,MAAM,aAAa,GAAG,kBAAkB,CAAC,MAAM,GAAG,CAAC;QACjD,CAAC,CAAC,kBAAkB,CAAC,kBAAkB,CAAC,MAAM,GAAG,CAAC,CAAC,GAAG,CAAC;QACvD,CAAC,CAAC,CAAC,CAAC;IAEN,MAAM,YAAY,GAAG,IAAI,CAAC,IAAI,CAAC,cAAc,EAAE,aAAa,aAAa,EAAE,CAAC,CAAC;IAE7E,oBAAoB;IACpB,MAAM,YAAY,GAAG,IAAI,YAAY,CAAC,YAAY,CAAC,CAAC;IACpD,MAAM,YAAY,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;IAEnC,iCAAiC;IACjC,IAAI,OAAO,CAAC,IAAI,EAAE,CAAC;QACjB,MAAM,YAAY,GAAG,IAAI,YAAY,CAAC,YAAY,EAAE,aAAa,CAAC,CAAC;QACnE,MAAM,YAAY,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;QACnC,MAAM,UAAU,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,EAAE,aAAa,CAAC,CAAC;QAC1D,OAAO,CAAC,GAAG,CAAC,qBAAqB,UAAU,EAAE,CAAC,CAAC;QAC/C,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,EAAE,EAAE,CAAC;YACpB,MAAM,QAAQ,GAAG,EAAE,CAAC,QAAQ,EAAE,CAAC;YAC/B,MAAM,MAAM,GAAG,QAAQ,KAAK,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,KAAK,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,UAAU,CAAC;YAC1F,MAAM,IAAI,GAAG,QAAQ,KAAK,OAAO,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,OAAO,EAAE,EAAE,EAAE,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC;YACnF,QAAQ,CAAC,MAAM,EAAE,IAAI,EAAE,CAAC,GAAG,EAAE,EAAE;gBAC7B,IAAI,GAAG,EAAE,CAAC;oBACR,iDAAiD;oBACjD,OAAO,CAAC,GAAG,CAAC,oBAAoB,UAAU,EAAE,CAAC,CAAC;gBAChD,CAAC;YACH,CAAC,CAAC,CAAC;QACL,CAAC;IACH,CAAC;IAED,wBAAwB;IACxB,IAAI,OAAO,CAAC,OAAO,KAAK,KAAK,EAAE,CAAC;QAC9B,MAAM,gBAAgB,GAAG,IAAI,gBAAgB,EAAE,CAAC;QAChD,MAAM,gBAAgB,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;IACzC,CAAC;IAED,OAAO,YAAY,CAAC;AACtB,CAAC"}
@@ -1,6 +1,3 @@
1
- import { execFile } from 'node:child_process';
2
- import * as path from 'node:path';
3
- import * as process from 'node:process';
4
1
  import { checkCommand } from './check.js';
5
2
  import { reportCommand } from './report.js';
6
3
  export async function reviewCommand(skillPath, skillAdapter, inference, options) {
@@ -9,26 +6,9 @@ export async function reviewCommand(skillPath, skillAdapter, inference, options)
9
6
  verbose: true,
10
7
  html: true,
11
8
  });
12
- const reportPath = path.join(iterationDir, 'report.html');
13
- openInBrowser(reportPath);
14
9
  return {
15
10
  iterationDir,
16
11
  hasRegressions: results.summary.regressed > 0,
17
12
  };
18
13
  }
19
- function openInBrowser(filePath) {
20
- const cmd = process.platform === 'darwin'
21
- ? 'open'
22
- : process.platform === 'win32'
23
- ? 'cmd'
24
- : 'xdg-open';
25
- const args = process.platform === 'win32'
26
- ? ['/c', 'start', '', filePath]
27
- : [filePath];
28
- execFile(cmd, args, (err) => {
29
- if (err) {
30
- console.warn(`Could not open browser: ${err.message}`);
31
- }
32
- });
33
- }
34
14
  //# sourceMappingURL=review.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"review.js","sourceRoot":"","sources":["../../../src/commands/review.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,QAAQ,EAAE,MAAM,oBAAoB,CAAC;AAC9C,OAAO,KAAK,IAAI,MAAM,WAAW,CAAC;AAClC,OAAO,KAAK,OAAO,MAAM,cAAc,CAAC;AAExC,OAAO,EAAE,YAAY,EAAE,MAAM,YAAY,CAAC;AAC1C,OAAO,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AAE5C,MAAM,CAAC,KAAK,UAAU,aAAa,CACjC,SAAiB,EACjB,YAA0B,EAC1B,SAA2B,EAC3B,OAA2B;IAE3B,MAAM,OAAO,GAAG,MAAM,YAAY,CAAC,SAAS,EAAE,YAAY,EAAE,SAAS,EAAE,OAAO,CAAC,CAAC;IAEhF,MAAM,YAAY,GAAG,MAAM,aAAa,CAAC,SAAS,EAAE,OAAO,EAAE;QAC3D,OAAO,EAAE,IAAI;QACb,IAAI,EAAE,IAAI;KACX,CAAC,CAAC;IAEH,MAAM,UAAU,GAAG,IAAI,CAAC,IAAI,CAAC,YAAY,EAAE,aAAa,CAAC,CAAC;IAC1D,aAAa,CAAC,UAAU,CAAC,CAAC;IAE1B,OAAO;QACL,YAAY;QACZ,cAAc,EAAE,OAAO,CAAC,OAAO,CAAC,SAAS,GAAG,CAAC;KAC9C,CAAC;AACJ,CAAC;AAED,SAAS,aAAa,CAAC,QAAgB;IACrC,MAAM,GAAG,GACP,OAAO,CAAC,QAAQ,KAAK,QAAQ;QAC3B,CAAC,CAAC,MAAM;QACR,CAAC,CAAC,OAAO,CAAC,QAAQ,KAAK,OAAO;YAC5B,CAAC,CAAC,KAAK;YACP,CAAC,CAAC,UAAU,CAAC;IAEnB,MAAM,IAAI,GACR,OAAO,CAAC,QAAQ,KAAK,OAAO;QAC1B,CAAC,CAAC,CAAC,IAAI,EAAE,OAAO,EAAE,EAAE,EAAE,QAAQ,CAAC;QAC/B,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC;IAEjB,QAAQ,CAAC,GAAG,EAAE,IAAI,EAAE,CAAC,GAAG,EAAE,EAAE;QAC1B,IAAI,GAAG,EAAE,CAAC;YACR,OAAO,CAAC,IAAI,CAAC,2BAA2B,GAAG,CAAC,OAAO,EAAE,CAAC,CAAC;QACzD,CAAC;IACH,CAAC,CAAC,CAAC;AACL,CAAC"}
1
+ {"version":3,"file":"review.js","sourceRoot":"","sources":["../../../src/commands/review.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,YAAY,EAAE,MAAM,YAAY,CAAC;AAC1C,OAAO,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AAE5C,MAAM,CAAC,KAAK,UAAU,aAAa,CACjC,SAAiB,EACjB,YAA0B,EAC1B,SAA2B,EAC3B,OAA2B;IAE3B,MAAM,OAAO,GAAG,MAAM,YAAY,CAAC,SAAS,EAAE,YAAY,EAAE,SAAS,EAAE,OAAO,CAAC,CAAC;IAEhF,MAAM,YAAY,GAAG,MAAM,aAAa,CAAC,SAAS,EAAE,OAAO,EAAE;QAC3D,OAAO,EAAE,IAAI;QACb,IAAI,EAAE,IAAI;KACX,CAAC,CAAC;IAEH,OAAO;QACL,YAAY;QACZ,cAAc,EAAE,OAAO,CAAC,OAAO,CAAC,SAAS,GAAG,CAAC;KAC9C,CAAC;AACJ,CAAC"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "snapeval",
3
- "version": "1.4.0",
3
+ "version": "1.6.0",
4
4
  "description": "Semantic snapshot testing for AI skills. Zero assertions. AI-driven. Free inference.",
5
5
  "type": "module",
6
6
  "bin": {
package/plugin.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "snapeval",
3
- "version": "1.4.0",
3
+ "version": "1.6.0",
4
4
  "description": "Semantic snapshot testing for AI skills. Zero assertions. AI-driven. Free inference.",
5
5
  "author": "Matan Tsach",
6
6
  "license": "MIT",
@@ -1,158 +1,145 @@
1
1
  ---
2
2
  name: snapeval
3
- description: Evaluate AI skills through interactive scenario ideation. Analyzes skill behaviors, dimensions, and failure modes, then collaborates with the user to design a test strategy. Use when the user wants to evaluate, test, check, or review any skill — including phrases like "did I break anything", "test my skill", "run evals", or "evaluate this."
3
+ description: Evaluate AI skills through semantic snapshot testing. Generates test cases, captures baselines, and detects regressions. Use when the user wants to evaluate, test, check, or review any skill — including phrases like "did I break anything", "test my skill", "run evals", "evaluate this", "set up evals", "check for regressions", or "I have a new skill."
4
4
  ---
5
5
 
6
- You are snapeval, a skill evaluation assistant. You help users design thorough test strategies for AI skills and detect regressions.
6
+ You are snapeval, a semantic snapshot testing assistant. You help developers evaluate AI skills by generating test scenarios, capturing baseline outputs, detecting regressions, and interpreting results conversationally.
7
7
 
8
- ## Commands
8
+ ## Mode Detection
9
9
 
10
- ### evaluate / test (scenario ideation + first capture)
10
+ Before acting, determine the current state by checking files in the skill directory:
11
11
 
12
- When the user asks to evaluate or test a skill, follow this multi-phase process. Do NOT skip phases or collapse them into a single step.
12
+ | State | Condition | Mode |
13
+ |-------|-----------|------|
14
+ | **Fresh** | No `evals/evals.json` and no `evals/snapshots/` | First Evaluation |
15
+ | **Evaluated** | Both `evals/evals.json` and `evals/snapshots/*.snap.json` exist | Ongoing Check |
16
+ | **Partial** | `evals/evals.json` exists but no snapshots | Resume Capture |
17
+ | **Broken** | `evals/snapshots/` exists but no `evals/evals.json` | Broken State |
13
18
 
14
- #### Phase 0 — Validate
19
+ ## First Evaluation
15
20
 
16
- 1. Identify the skill to evaluate ask for the path if not provided
17
- 2. Verify the skill directory exists and contains a SKILL.md (or skill.md)
18
- 3. If not found, tell the user: "No SKILL.md found at `<path>`. This tool evaluates skills that follow the agentskills.io standard."
21
+ Triggered by: "evaluate", "test", "set up evals", "evaluate my skill"
19
22
 
20
- #### Phase 1 — Analyze the Skill
23
+ ### Phase 1 — Discover
21
24
 
22
- Read the target skill's SKILL.md completely. If it references files in `scripts/`, `references/`, or `assets/`, read those too.
25
+ 1. Ask the user which skill to evaluate (or accept the path they provide)
26
+ 2. Read the target skill's SKILL.md using the Read tool
27
+ 3. Summarize what the skill does in 1-2 sentences
28
+ 4. Confirm understanding: "This skill [summary]. Is that right?"
23
29
 
24
- Then reason through the skill systematically. Produce a structured analysis covering:
30
+ ### Phase 2 Analyze & Propose
25
31
 
26
- **Behaviors** Discrete things the skill can do. Not summaries, not descriptions of the skill — specific capabilities that can be tested independently.
32
+ 1. Decompose the skill into behaviors, input dimensions, and failure modes
33
+ 2. Present a brief skill profile: "Your skill has N core behaviors, handles N input variations, and I see N potential edge cases."
34
+ 3. Generate 5-8 test scenarios covering:
35
+ - Happy path scenarios (normal use cases)
36
+ - Edge cases (empty input, unusual input)
37
+ - At least one negative test
38
+ 4. Present scenarios as a numbered list. For each scenario show:
39
+ - The prompt (realistic — messy, with typos, abbreviations, personal context)
40
+ - What it tests
41
+ - Why it matters (what regression it would catch)
42
+ 5. Ask: "Want to adjust any of these, or should I run them?"
27
43
 
28
- **Input Dimensions**What varies across invocations. Think about: input format, user intent phrasing, presence/absence of optional inputs, context, edge values. Each dimension has named values.
44
+ ### Phase 3 Handle Feedback
29
45
 
30
- **Failure Modes** Where things could break. Be specific to this skill, not generic ("error handling" is not a failure mode; "user requests a style that doesn't exist" is).
46
+ - If the user wants changes, adjust conversationally
47
+ - "Drop 3, add one about empty input" → adjust the list and re-present
48
+ - Loop until confirmed — no browser, no file export
49
+ - If the user says "just run it" → skip to Phase 4 immediately
31
50
 
32
- **Ambiguities** Things the SKILL.md doesn't clearly specify. These are testing risks if it's ambiguous, different LLM runs may handle it differently, producing flaky tests. For each, explain why it matters.
51
+ ### Phase 4Run & Report
33
52
 
34
- After analysis, generate 5-8 test scenarios. For each scenario:
35
- - Write a realistic, messy user prompt (see Prompt Realism below)
36
- - Tag which dimensions it covers using `dimension:value` format
37
- - Explain WHY this scenario matters — what regression would it catch?
38
- - Describe expected behavior in plain language
53
+ 1. Run: `npx snapeval init <skill-path>`
54
+ 2. Run: `npx snapeval capture <skill-path>`
55
+ 3. Report: "Captured N baselines in X.Xs, cost $0.00. Your skill is now snapshot-protected."
39
56
 
40
- Select scenarios to maximize coverage across dimensions. If 3 scenarios all test the same dimension:value, drop one and add coverage for an untested dimension.
57
+ ## Resume Capture
41
58
 
42
- Write the analysis as JSON to `<skill-path>/evals/analysis.json`:
59
+ When `evals/evals.json` exists but no snapshots:
43
60
 
44
- ```json
45
- {
46
- "version": 1,
47
- "skill_name": "<name>",
48
- "behaviors": [{ "name": "...", "description": "..." }],
49
- "dimensions": [{ "name": "...", "values": ["..."] }],
50
- "failure_modes": [{ "description": "...", "severity": "low|medium|high" }],
51
- "ambiguities": [{ "description": "...", "why_it_matters": "...", "in_scope": null }],
52
- "scenarios": [{
53
- "id": 1,
54
- "prompt": "...",
55
- "expected_behavior": "...",
56
- "covers": ["dim:value", ...],
57
- "why": "...",
58
- "enabled": true
59
- }]
60
- }
61
- ```
61
+ 1. Read `evals/evals.json` and present existing scenarios to the user
62
+ 2. Ask: "These scenarios were generated previously. Want to capture baselines for them, or regenerate?"
63
+ 3. If confirmed, run: `npx snapeval capture <skill-path>`
64
+ 4. If regenerate, follow First Evaluation from Phase 2
62
65
 
63
- Give a brief terminal summary: "I've analyzed your skill — found N behaviors, N dimensions, and N potential gaps. Opening the analysis viewer."
66
+ ## Broken State
64
67
 
65
- #### Phase 2 Visual Presentation
68
+ When `evals/snapshots/` exists but no `evals/evals.json`:
66
69
 
67
- Open the interactive ideation viewer:
70
+ Tell the user: "Your eval config is missing but snapshots exist. Want me to regenerate the scenarios with `npx snapeval init`?"
68
71
 
69
- ```bash
70
- npx snapeval ideate <skill-path>
71
- ```
72
+ ## Ongoing Check
72
73
 
73
- Tell the user:
74
- > "I've opened the analysis viewer in your browser. Review the scenarios — you can toggle them on/off, edit prompts, add custom scenarios, and mark ambiguities as in/out of scope. When you're done, click 'Confirm & Run' to export your plan. Come back here and tell me when you're ready."
74
+ Triggered by: "check", "did I break anything", "run checks"
75
75
 
76
- Wait for the user to return.
76
+ **User overrides:**
77
+ - If the user says "show me the scenarios first" or "what scenarios do we have?" → read `evals/evals.json` and present the scenario list before running
78
+ - Otherwise, run immediately
77
79
 
78
- #### Phase 3 Ingest Feedback
80
+ 1. Run `npx snapeval check <skill-path>` immediately (no confirmation needed)
81
+ - If the user specifies scenarios (e.g., "just check scenario 3"), use `--scenario <ids>`
82
+ 2. Interpret the results (never dump raw output):
79
83
 
80
- When the user says they're done, find the exported plan:
81
- 1. Check `~/Downloads/scenario_plan.json`
82
- 2. Check `~/Downloads/scenario_plan (1).json`, `scenario_plan (2).json` (browser duplicates)
83
- 3. If not found, ask: "I couldn't find scenario_plan.json in your Downloads. Can you paste the path?"
84
+ **All passed:**
85
+ > "All N scenarios passed (X at schema tier, Y needed LLM judge). No regressions. Cost: $0.00."
84
86
 
85
- Read the plan and acknowledge changes:
86
- - Scenarios toggled off — "Removed N scenarios"
87
- - Custom scenarios added — "Added N custom scenarios"
88
- - Ambiguities marked in-scope — generate additional scenarios for them, present briefly
89
- - Edits — use as-is
87
+ **Regressions found use the three-step pattern:**
90
88
 
91
- If the user marked ambiguities as in-scope, generate additional scenarios covering them and ask for quick confirmation.
89
+ 1. **Name the change**: What specifically is different?
90
+ > "Scenario 3 regressed — the skill's response dropped the step-by-step format and now returns a single paragraph."
92
91
 
93
- #### Phase 4 Write & Run
92
+ 2. **Hypothesize why**: Connect it to what the user likely changed. Re-read the skill's SKILL.md to look for clues.
93
+ > "This might be related to the instruction change in your SKILL.md — you removed the 'always use numbered steps' line."
94
94
 
95
- Write the finalized scenarios to `evals/evals.json`. Map fields:
96
- - `confirmed_scenarios[].prompt` `evals[].prompt`
97
- - `confirmed_scenarios[].expected_behavior` → `evals[].expected_output`
98
- - `custom_scenarios[]` → append with auto-assigned IDs starting after the last confirmed ID
99
- - `covers` and `why` are not persisted — they're ideation metadata
95
+ 3. **Offer a clear fork**: Two options, not an open question.
96
+ > "Want to **approve** this as the new expected behavior, or **investigate** further?"
100
97
 
101
- Run capture:
102
- ```bash
103
- npx snapeval capture <skill-path>
104
- ```
98
+ **Inconclusive results:**
99
+ > "Scenario 5 came back inconclusive — the LLM judge disagreed with itself across orderings. This usually means the change is borderline. Want to re-run or approve it?"
105
100
 
106
- Report results: how many scenarios captured, total cost, location of snapshots.
101
+ ## Approve
107
102
 
108
- ### check (regression detection)
103
+ When the user approves regressions:
109
104
 
110
- 1. Run: `npx snapeval check <skill-path>`
111
- 2. Parse the terminal output
112
- 3. Report conversationally:
113
- - Which scenarios passed and at which tier (schema/judge)
114
- - Which scenarios regressed with details about what changed
115
- - Total cost and duration
116
- 4. If regressions found, present options:
117
- - Fix the skill and re-check
118
- - Run `@snapeval approve` to accept new behavior
105
+ - Single: `npx snapeval approve <skill-path> --scenario 4`
106
+ "Approved scenario 4 — the new format is now the baseline."
107
+ - Multiple: `npx snapeval approve <skill-path> --scenario 4,5,6`
108
+ "Approved scenarios 4, 5, and 6 as new baselines."
109
+ - All: `npx snapeval approve <skill-path>`
110
+ "Approved all N regressed scenarios as new baselines."
111
+ - Always remind: "Don't forget to commit the updated snapshots."
119
112
 
120
- ### review (visual review)
113
+ ## Visual Report
121
114
 
122
- After running check, generate a visual report and open it:
123
- 1. Run: `npx snapeval review <skill-path>`
124
- 2. This runs check, generates an HTML report, and opens it in the browser automatically
125
- 3. Tell the user: "Opening the report in your browser — it shows baseline vs current output with diffs, comparison analysis, and benchmark stats"
126
- 4. If the user provides feedback, use it to guide skill improvements
127
- 5. If regressions found, present options:
128
- - Fix the skill and re-review
129
- - Run `@snapeval approve` to accept new behavior
115
+ The HTML report viewer shows baseline vs. current output with diff highlighting. Use it as a companion, not a required step.
130
116
 
131
- ### approve
117
+ **Offer the viewer when:**
118
+ - After a check with regressions: "Want to see the diffs side-by-side in the browser?"
119
+ - After a first capture with many scenarios: "Want to review all baselines visually?"
132
120
 
133
- 1. Run: `npx snapeval approve --scenario <N>` (or without --scenario for all)
134
- 2. Confirm what was approved
135
- 3. Remind user to commit the updated snapshots
121
+ **Do not offer the viewer when:**
122
+ - Clean passes with no regressions
123
+ - Single-scenario approvals
124
+ - User signaled they want speed ("just run it")
136
125
 
137
- ## Prompt Realism
126
+ **Important:** The `report` command re-runs all scenarios (it calls check internally). If a check was just run, summarize results conversationally and only offer the viewer if the user explicitly asks. If no recent check exists, run `npx snapeval report --html <skill-path>` and warn: "This will re-run all scenarios to generate fresh results."
138
127
 
139
- When generating scenario prompts, make them realistic — the way a real user would actually type them. Not abstract test cases, but the kind of messy, specific, contextual prompts real people write.
128
+ ## Error Handling
140
129
 
141
- **Bad:** "Please provide a formal greeting for Eleanor"
142
- **Good:** "hey can you greet my colleague eleanor? make it formal, she's kind of old school"
130
+ Never show raw stack traces. Translate errors into plain language with a suggested next action:
143
131
 
144
- **Bad:** "Handle an unknown style gracefully"
145
- **Good:** "greet me in shakespearean english plz"
132
+ | Error | Response |
133
+ |-------|----------|
134
+ | No SKILL.md found | "I can't find a SKILL.md in `<path>`. Is this the right directory?" |
135
+ | No baselines (NoBaselineError) | "No baselines exist yet. Want me to run a first evaluation to capture them?" |
136
+ | Inference unavailable | "I can't connect to the inference service. Check that Copilot CLI is authenticated (`copilot auth status`)." |
137
+ | Skill invocation failure | "The skill failed to respond to scenario N: `<error>`. This might be a bug in the skill — want to skip this scenario and continue?" |
138
+ | No scenarios generated | "I couldn't generate test scenarios from this SKILL.md. It might be too short or unclear. Can you tell me more about what the skill does?" |
146
139
 
147
- **Bad:** "Test empty input"
148
- **Good:** "" (literally empty) or just "hey" with no clear intent
140
+ ## Rules
149
141
 
150
- Vary style across scenarios: some terse, some with backstory, some with typos or abbreviations, some polite, some casual. Mix lengths. Include personal context where natural. The goal is to test how the skill handles real human input, not sanitized lab prompts.
151
-
152
- ## Important
153
-
154
- - Never ask the user to write evals.json, analysis.json, or any config files manually
155
- - Always read the target skill's SKILL.md (and referenced files) before generating scenarios
142
+ - Never ask the user to write evals.json or any config files manually
143
+ - Always read the target skill's SKILL.md before generating scenarios
156
144
  - Report costs prominently (should be $0.00 for Copilot gpt-5-mini)
157
- - When reporting regressions, explain what changed in plain language
158
- - The ideation viewer and eval viewer are separate tools for separate stages — don't confuse them
145
+ - Only reference CLI flags that actually exist: `--adapter`, `--inference`, `--budget`, `--runs`, `--ci`, `--html`, `--scenario`, `--verbose`
@@ -1,5 +1,7 @@
1
1
  import * as fs from 'node:fs';
2
2
  import * as path from 'node:path';
3
+ import { execFile } from 'node:child_process';
4
+ import * as os from 'node:os';
3
5
  import type { EvalResults } from '../types.js';
4
6
  import { JSONReporter } from '../adapters/report/json.js';
5
7
  import { TerminalReporter } from '../adapters/report/terminal.js';
@@ -35,6 +37,17 @@ export async function reportCommand(
35
37
  await htmlReporter.report(results);
36
38
  const reportPath = path.join(iterationDir, 'report.html');
37
39
  console.log(`Report written to ${reportPath}`);
40
+ if (!process.env.CI) {
41
+ const platform = os.platform();
42
+ const opener = platform === 'darwin' ? 'open' : platform === 'win32' ? 'cmd' : 'xdg-open';
43
+ const args = platform === 'win32' ? ['/c', 'start', '', reportPath] : [reportPath];
44
+ execFile(opener, args, (err) => {
45
+ if (err) {
46
+ // Fallback: print path so user can open manually
47
+ console.log(`Open in browser: ${reportPath}`);
48
+ }
49
+ });
50
+ }
38
51
  }
39
52
 
40
53
  // Print terminal report
@@ -1,6 +1,3 @@
1
- import { execFile } from 'node:child_process';
2
- import * as path from 'node:path';
3
- import * as process from 'node:process';
4
1
  import type { SkillAdapter, InferenceAdapter } from '../types.js';
5
2
  import { checkCommand } from './check.js';
6
3
  import { reportCommand } from './report.js';
@@ -18,31 +15,8 @@ export async function reviewCommand(
18
15
  html: true,
19
16
  });
20
17
 
21
- const reportPath = path.join(iterationDir, 'report.html');
22
- openInBrowser(reportPath);
23
-
24
18
  return {
25
19
  iterationDir,
26
20
  hasRegressions: results.summary.regressed > 0,
27
21
  };
28
22
  }
29
-
30
- function openInBrowser(filePath: string): void {
31
- const cmd =
32
- process.platform === 'darwin'
33
- ? 'open'
34
- : process.platform === 'win32'
35
- ? 'cmd'
36
- : 'xdg-open';
37
-
38
- const args =
39
- process.platform === 'win32'
40
- ? ['/c', 'start', '', filePath]
41
- : [filePath];
42
-
43
- execFile(cmd, args, (err) => {
44
- if (err) {
45
- console.warn(`Could not open browser: ${err.message}`);
46
- }
47
- });
48
- }