@staff0rd/assist 0.139.0 → 0.140.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,119 @@
1
+ ---
2
+ description: Incrementally increase test coverage by identifying and testing uncovered files
3
+ allowed_args: "[number of files to cover, default 1]"
4
+ ---
5
+
6
+ You are increasing test coverage for this project. Your goal is to pick the highest-value uncovered file(s) and write thorough tests for them.
7
+
8
+ ## Step 1: Measure current coverage
9
+
10
+ Run coverage against all source files to identify what is untested:
11
+
12
+ ```
13
+ npx vitest run --coverage --coverage.include='src/**/*.ts' --coverage.all --coverage.reporter=json 2>&1
14
+ ```
15
+
16
+ Read the JSON coverage report at `coverage/coverage-final.json` to get per-file statement coverage percentages. Identify files with 0% or low coverage.
17
+
18
+ ## Step 2: Prioritise files
19
+
20
+ From the uncovered files, prioritise by:
21
+
22
+ 1. **Pure logic** — functions with clear inputs/outputs, no side effects (parsers, validators, transformers)
23
+ 2. **Shared utilities** — files under `src/shared/` used by multiple consumers
24
+ 3. **Complexity** — files with branching logic, edge cases, or error handling
25
+
26
+ Skip files that are primarily:
27
+ - Thin CLI wrappers (just parse args and call another function)
28
+ - UI components (React/TSX)
29
+ - Files that only re-export or wire things together
30
+
31
+ Select the number of files specified by `$ARGUMENTS` (default: 1).
32
+
33
+ ## Step 3: Read the source and existing tests
34
+
35
+ Read each selected source file thoroughly. Also read any existing test files nearby to understand the project's testing patterns.
36
+
37
+ ## Step 4: Write tests
38
+
39
+ Create a test file colocated with the source file, named `<source>.test.ts`.
40
+
41
+ Follow these conventions exactly:
42
+
43
+ - Import from `vitest`: `import { describe, expect, it } from "vitest";` (add `vi` only if mocking)
44
+ - Use Vitest's native `expect()` — no external assertion libraries
45
+ - Define helper functions locally in the test file when needed (e.g., factory functions for test data)
46
+ - Mock dependencies with `vi.mock()` only when necessary — prefer real implementations
47
+
48
+ ### BDD structure
49
+
50
+ Use a behavioural, BDD-style structure:
51
+
52
+ - **Outer `describe`** — the function or module under test
53
+ - **Inner `describe("when ...")`** — groups tests that share the same setup/scenario
54
+ - **`it("should ...")`** — asserts a single rule or behaviour
55
+
56
+ Each test follows an **Arrange, Act, Assert** pattern. Do NOT add `// arrange`, `// act`, `// assert` comments — just structure the code in that order with whitespace separating the three sections.
57
+
58
+ Keep assertions per test to a minimum — ideally one, at most two closely related assertions. If you need to assert multiple things about the same action, split them into separate `it` blocks under the same `describe("when ...")`.
59
+
60
+ Example:
61
+
62
+ ```typescript
63
+ describe("parseToken", () => {
64
+ describe("when given a valid token", () => {
65
+ it("should return the decoded payload", () => {
66
+ const token = createToken({ sub: "user-1" });
67
+
68
+ const result = parseToken(token);
69
+
70
+ expect(result.sub).toBe("user-1");
71
+ });
72
+ });
73
+
74
+ describe("when given an expired token", () => {
75
+ it("should throw an expiration error", () => {
76
+ const token = createToken({ exp: pastDate() });
77
+
78
+ expect(() => parseToken(token)).toThrow("expired");
79
+ });
80
+ });
81
+ });
82
+ ```
83
+
84
+ ### Coverage targets
85
+
86
+ Cover:
87
+ - Happy path for each exported function
88
+ - Edge cases (empty input, undefined, boundary values)
89
+ - Error cases and invalid input
90
+ - Branch coverage — ensure each conditional path is exercised
91
+
92
+ ## Step 5: Run and verify
93
+
94
+ Run the tests to confirm they pass:
95
+
96
+ ```
97
+ npx vitest run <test-file-path> 2>&1
98
+ ```
99
+
100
+ If any tests fail, fix them. Then re-run coverage to confirm the file now has >90% statement coverage:
101
+
102
+ ```
103
+ npx vitest run --coverage --coverage.include='<source-file-path>' 2>&1
104
+ ```
105
+
106
+ ## Step 6: Run /verify
107
+
108
+ Run `/verify` to ensure nothing is broken.
109
+
110
+ ## Step 7: Report
111
+
112
+ Show a before/after summary:
113
+
114
+ ```
115
+ File | Before | After
116
+ <file path> | 0% | 95%
117
+ ```
118
+
119
+ And the new repo-wide coverage number.
@@ -0,0 +1,75 @@
1
+ ---
2
+ description: Review existing tests for quality, coverage gaps, and adherence to conventions
3
+ allowed_args: "[file or directory path to review, default: all test files]"
4
+ ---
5
+
6
+ You are reviewing existing tests for quality. Your goal is to identify tests that are weak, misleading, or missing important coverage — and to recommend specific improvements.
7
+
8
+ ## Step 1: Find test files
9
+
10
+ If `$ARGUMENTS` specifies a path, scope to that. Otherwise, find all `*.test.ts` files under `src/`.
11
+
12
+ ## Step 2: Read each test file and its source
13
+
14
+ For each test file, read both the test and the source file it covers. You need both to judge whether the tests are adequate.
15
+
16
+ ## Step 3: Evaluate each test file
17
+
18
+ Assess each test file against these criteria:
19
+
20
+ ### Correctness
21
+ - Do assertions actually verify the behaviour, or are they tautological (e.g., testing that a mock returns what you told it to)?
22
+ - Are expected values correct and meaningful, not just copied from implementation output?
23
+ - Do tests assert the right thing — return values, side effects, thrown errors — for each scenario?
24
+
25
+ ### Coverage of behaviour
26
+ - Are all exported functions tested?
27
+ - Are conditional branches exercised (if/else, switch, early returns, error paths)?
28
+ - Are edge cases covered (empty input, null/undefined, boundary values, large input)?
29
+ - Are error cases tested (invalid arguments, missing data, thrown exceptions)?
30
+
31
+ ### Test independence
32
+ - Can each test run in isolation, or do tests depend on shared mutable state or execution order?
33
+ - Are mocks reset properly between tests?
34
+
35
+ ### BDD structure and Arrange-Act-Assert
36
+ - Does the outer `describe` name the function or module under test?
37
+ - Do inner `describe("when ...")` blocks group tests by shared setup/scenario?
38
+ - Do `it` blocks use `should` phrasing (e.g., `it("should return empty array when no matches")`)?
39
+ - Does each test follow Arrange, Act, Assert ordering (without comments labelling the sections)?
40
+ - Are assertions minimal per test — ideally one, at most two closely related? If multiple things are asserted about the same action, are they split into separate `it` blocks under the same `describe("when ...")`?
41
+
42
+ ### Mocking discipline
43
+ - Are mocks used only when necessary (external I/O, complex dependencies)?
44
+ - Do mocks faithfully represent the real dependency's contract, or do they mask bugs?
45
+ - Is there a risk that mocked tests pass but real integration would fail?
46
+
47
+ ### Missing tests
48
+ - Are there exported functions or significant code paths with no corresponding test?
49
+ - Are there recently added functions (check git log) that lack tests?
50
+
51
+ ## Step 4: Report findings
52
+
53
+ For each test file, report:
54
+
55
+ **File:** `path/to/file.test.ts`
56
+
57
+ **Verdict:** Good / Needs improvement / Weak
58
+
59
+ **Strengths:**
60
+ - (what the tests do well)
61
+
62
+ **Issues:**
63
+ - (specific problems, each with a concrete recommendation)
64
+
65
+ **Missing coverage:**
66
+ - (untested functions or paths, with suggested test cases)
67
+
68
+ ## Step 5: Summary
69
+
70
+ End with an overall summary:
71
+
72
+ - Total test files reviewed
73
+ - Breakdown by verdict (Good / Needs improvement / Weak)
74
+ - Top 3 highest-priority improvements across all files
75
+ - Whether any tests risk giving false confidence (passing despite bugs)
@@ -89,6 +89,10 @@
89
89
  "SlashCommand(/screenshot)",
90
90
  "Skill(draft)",
91
91
  "SlashCommand(/draft)",
92
+ "Skill(test-cover)",
93
+ "SlashCommand(/test-cover)",
94
+ "Skill(test-review)",
95
+ "SlashCommand(/test-review)",
92
96
  "WebFetch(domain:staffordwilliams.com)"
93
97
  ],
94
98
  "deny": ["Bash(git commit:*)", "Bash(npm run:*)", "Bash(npx assist:*)"]
package/dist/index.js CHANGED
@@ -6,7 +6,7 @@ import { Command } from "commander";
6
6
  // package.json
7
7
  var package_default = {
8
8
  name: "@staff0rd/assist",
9
- version: "0.139.0",
9
+ version: "0.140.1",
10
10
  type: "module",
11
11
  main: "dist/index.js",
12
12
  bin: {
@@ -63,6 +63,7 @@ var package_default = {
63
63
  "@types/react-dom": "^19.2.3",
64
64
  "@types/semver": "^7.7.1",
65
65
  "@types/shell-quote": "^1.7.5",
66
+ "@vitest/coverage-v8": "^4.1.2",
66
67
  esbuild: "^0.27.3",
67
68
  jotai: "^2.18.0",
68
69
  jscpd: "^4.0.5",
@@ -2818,6 +2819,14 @@ async function run2(id) {
2818
2819
  if (!plan2) return;
2819
2820
  setStatus(id, "in-progress");
2820
2821
  const startPhase = item.currentPhase ?? 0;
2822
+ if (startPhase >= plan2.length) {
2823
+ setStatus(id, "done");
2824
+ console.log(
2825
+ chalk35.green(`All phases already complete for #${id}: ${item.name}`)
2826
+ );
2827
+ console.log(chalk35.dim("Review the changes, then use /commit when ready."));
2828
+ return;
2829
+ }
2821
2830
  console.log(chalk35.bold(`Running plan for #${id}: ${item.name}`));
2822
2831
  if (startPhase > 0) {
2823
2832
  console.log(
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@staff0rd/assist",
3
- "version": "0.139.0",
3
+ "version": "0.140.1",
4
4
  "type": "module",
5
5
  "main": "dist/index.js",
6
6
  "bin": {
@@ -57,6 +57,7 @@
57
57
  "@types/react-dom": "^19.2.3",
58
58
  "@types/semver": "^7.7.1",
59
59
  "@types/shell-quote": "^1.7.5",
60
+ "@vitest/coverage-v8": "^4.1.2",
60
61
  "esbuild": "^0.27.3",
61
62
  "jotai": "^2.18.0",
62
63
  "jscpd": "^4.0.5",