@kennethsolomon/shipkit 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +321 -0
- package/bin/shipkit.js +146 -0
- package/commands/sk/brainstorm.md +63 -0
- package/commands/sk/branch.md +35 -0
- package/commands/sk/config.md +96 -0
- package/commands/sk/execute-plan.md +85 -0
- package/commands/sk/features.md +238 -0
- package/commands/sk/finish-feature.md +154 -0
- package/commands/sk/help.md +103 -0
- package/commands/sk/hotfix.md +61 -0
- package/commands/sk/plan.md +30 -0
- package/commands/sk/release.md +72 -0
- package/commands/sk/security-check.md +188 -0
- package/commands/sk/set-profile.md +71 -0
- package/commands/sk/status.md +25 -0
- package/commands/sk/update-task.md +35 -0
- package/commands/sk/write-plan.md +72 -0
- package/package.json +23 -0
- package/skills/sk:accessibility/LICENSE.txt +177 -0
- package/skills/sk:accessibility/SKILL.md +150 -0
- package/skills/sk:api-design/LICENSE.txt +177 -0
- package/skills/sk:api-design/SKILL.md +158 -0
- package/skills/sk:brainstorming/SKILL.md +124 -0
- package/skills/sk:debug/SKILL.md +252 -0
- package/skills/sk:debug/debug_conductor.py +177 -0
- package/skills/sk:debug/lib/__init__.py +1 -0
- package/skills/sk:debug/lib/bug_gatherer.py +55 -0
- package/skills/sk:debug/lib/context_reader.py +139 -0
- package/skills/sk:debug/lib/findings_writer.py +76 -0
- package/skills/sk:debug/lib/lessons_writer.py +165 -0
- package/skills/sk:debug/lib/step_runner.py +326 -0
- package/skills/sk:features/SKILL.md +238 -0
- package/skills/sk:frontend-design/LICENSE.txt +177 -0
- package/skills/sk:frontend-design/SKILL.md +191 -0
- package/skills/sk:laravel-init/SKILL.md +37 -0
- package/skills/sk:laravel-new/SKILL.md +68 -0
- package/skills/sk:lint/SKILL.md +113 -0
- package/skills/sk:perf/LICENSE.txt +177 -0
- package/skills/sk:perf/SKILL.md +188 -0
- package/skills/sk:release/SKILL.md +113 -0
- package/skills/sk:release/references/android-checklist.md +269 -0
- package/skills/sk:release/references/ios-checklist.md +339 -0
- package/skills/sk:release/release.sh +378 -0
- package/skills/sk:review/SKILL.md +346 -0
- package/skills/sk:review/references/security-checklist.md +223 -0
- package/skills/sk:schema-migrate/SKILL.md +125 -0
- package/skills/sk:schema-migrate/orms/drizzle.md +546 -0
- package/skills/sk:schema-migrate/orms/laravel.md +367 -0
- package/skills/sk:schema-migrate/orms/prisma.md +357 -0
- package/skills/sk:schema-migrate/orms/rails.md +351 -0
- package/skills/sk:schema-migrate/orms/sqlalchemy.md +385 -0
- package/skills/sk:schema-migrate/references/detection.md +110 -0
- package/skills/sk:setup-claude/SKILL.md +365 -0
- package/skills/sk:setup-claude/references/detection.md +6 -0
- package/skills/sk:setup-claude/references/templates.md +11 -0
- package/skills/sk:setup-claude/scripts/apply_setup_claude.py +443 -0
- package/skills/sk:setup-claude/scripts/detect_arch_changes.py +437 -0
- package/skills/sk:setup-claude/templates/.claude/docs/arch-changelog-guide.md.template +6 -0
- package/skills/sk:setup-claude/templates/.claude/docs/changelog-guide.md.template +12 -0
- package/skills/sk:setup-claude/templates/CHANGELOG.md.template +21 -0
- package/skills/sk:setup-claude/templates/CLAUDE.md.template +299 -0
- package/skills/sk:setup-claude/templates/arch-changelog-guide.md.template +3 -0
- package/skills/sk:setup-claude/templates/changelog-guide.md.template +3 -0
- package/skills/sk:setup-claude/templates/commands/brainstorm.md.template +74 -0
- package/skills/sk:setup-claude/templates/commands/execute-plan.md.template +57 -0
- package/skills/sk:setup-claude/templates/commands/features.md.template +238 -0
- package/skills/sk:setup-claude/templates/commands/finish-feature.md.template +155 -0
- package/skills/sk:setup-claude/templates/commands/plan.md.template +30 -0
- package/skills/sk:setup-claude/templates/commands/re-setup.md.template +38 -0
- package/skills/sk:setup-claude/templates/commands/release.md.template +74 -0
- package/skills/sk:setup-claude/templates/commands/security-check.md.template +172 -0
- package/skills/sk:setup-claude/templates/commands/status.md.template +17 -0
- package/skills/sk:setup-claude/templates/commands/write-plan.md.template +34 -0
- package/skills/sk:setup-claude/templates/finish-feature.md.template +3 -0
- package/skills/sk:setup-claude/templates/plan.md.template +3 -0
- package/skills/sk:setup-claude/templates/status.md.template +3 -0
- package/skills/sk:setup-claude/templates/tasks/findings.md.template +19 -0
- package/skills/sk:setup-claude/templates/tasks/lessons.md.template +26 -0
- package/skills/sk:setup-claude/templates/tasks/progress.md.template +20 -0
- package/skills/sk:setup-claude/templates/tasks/security-findings.md.template +5 -0
- package/skills/sk:setup-claude/templates/tasks/todo.md.template +26 -0
- package/skills/sk:setup-claude/templates/tasks/workflow-status.md.template +31 -0
- package/skills/sk:setup-claude/templates/tasks-findings.md.template +3 -0
- package/skills/sk:setup-claude/templates/tasks-lessons.md.template +3 -0
- package/skills/sk:setup-claude/templates/tasks-progress.md.template +3 -0
- package/skills/sk:setup-claude/templates/tasks-todo.md.template +3 -0
- package/skills/sk:setup-claude/tests/test_apply_setup_claude.py +193 -0
- package/skills/sk:setup-optimizer/SKILL.md +184 -0
- package/skills/sk:setup-optimizer/lib/__init__.py +24 -0
- package/skills/sk:setup-optimizer/lib/detect.py +205 -0
- package/skills/sk:setup-optimizer/lib/discover.py +221 -0
- package/skills/sk:setup-optimizer/lib/enrich.py +163 -0
- package/skills/sk:setup-optimizer/lib/merge.py +277 -0
- package/skills/sk:setup-optimizer/lib/sidecar.py +129 -0
- package/skills/sk:setup-optimizer/optimize_claude.py +174 -0
- package/skills/sk:setup-optimizer/templates/CLAUDE.md.template +105 -0
- package/skills/sk:skill-creator/LICENSE.txt +202 -0
- package/skills/sk:skill-creator/SKILL.md +479 -0
- package/skills/sk:skill-creator/agents/analyzer.md +274 -0
- package/skills/sk:skill-creator/agents/comparator.md +202 -0
- package/skills/sk:skill-creator/agents/grader.md +223 -0
- package/skills/sk:skill-creator/assets/eval_review.html +146 -0
- package/skills/sk:skill-creator/eval-viewer/generate_review.py +471 -0
- package/skills/sk:skill-creator/eval-viewer/viewer.html +1325 -0
- package/skills/sk:skill-creator/references/schemas.md +430 -0
- package/skills/sk:skill-creator/scripts/aggregate_benchmark.py +401 -0
- package/skills/sk:skill-creator/scripts/generate_report.py +326 -0
- package/skills/sk:skill-creator/scripts/improve_description.py +248 -0
- package/skills/sk:skill-creator/scripts/package_skill.py +136 -0
- package/skills/sk:skill-creator/scripts/quick_validate.py +103 -0
- package/skills/sk:skill-creator/scripts/run_eval.py +310 -0
- package/skills/sk:skill-creator/scripts/run_loop.py +332 -0
- package/skills/sk:skill-creator/scripts/utils.py +47 -0
- package/skills/sk:smart-commit/SKILL.md +175 -0
- package/skills/sk:test/SKILL.md +171 -0
- package/skills/sk:write-tests/SKILL.md +195 -0
- package/skills/sk:write-tests/references/patterns.md +209 -0
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: sk:test
|
|
3
|
+
description: "Auto-detect BE + FE test runners, run both in parallel, verify 100% coverage on new code, fix failures and re-run until all pass."
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Test Verification
|
|
7
|
+
|
|
8
|
+
## Overview
|
|
9
|
+
|
|
10
|
+
Auto-detect the project's backend and frontend testing frameworks from config files, run all suites in parallel, and verify 100% coverage on new code. This is the **verification step** (TDD green check) — tests should already exist from `/sk:write-tests`. This skill does NOT write tests.
|
|
11
|
+
|
|
12
|
+
## Allowed Tools
|
|
13
|
+
|
|
14
|
+
Bash, Read, Glob, Grep
|
|
15
|
+
|
|
16
|
+
## Steps
|
|
17
|
+
|
|
18
|
+
You MUST complete these steps in order:
|
|
19
|
+
|
|
20
|
+
### 0. Check Project Lessons
|
|
21
|
+
|
|
22
|
+
If `tasks/lessons.md` exists, read it before doing anything else. Apply every active lesson as a standing constraint. Look for:
|
|
23
|
+
- Known flaky tests in this project
|
|
24
|
+
- Environment-specific runner issues
|
|
25
|
+
- Coverage tool quirks
|
|
26
|
+
|
|
27
|
+
### 1. Detect Testing Frameworks
|
|
28
|
+
|
|
29
|
+
Scan for **all** testing stacks by checking config files:
|
|
30
|
+
|
|
31
|
+
**Backend detection:**
|
|
32
|
+
```bash
|
|
33
|
+
cat composer.json 2>/dev/null # Pest / PHPUnit
|
|
34
|
+
cat pyproject.toml 2>/dev/null # pytest
|
|
35
|
+
cat go.mod 2>/dev/null # Go testing
|
|
36
|
+
cat Cargo.toml 2>/dev/null # Rust cargo test
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
**Frontend detection:**
|
|
40
|
+
```bash
|
|
41
|
+
cat package.json 2>/dev/null # Vitest / Jest
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
**Detection table:**
|
|
45
|
+
|
|
46
|
+
| Config file | Indicator | Runner | Command |
|
|
47
|
+
|-------------|-----------|--------|---------|
|
|
48
|
+
| `composer.json` | `pestphp/pest` in require-dev | Pest | `./vendor/bin/pest --coverage --compact` |
|
|
49
|
+
| `composer.json` | `phpunit/phpunit` (no Pest) | PHPUnit | `./vendor/bin/phpunit --coverage-text` |
|
|
50
|
+
| `package.json` | `vitest` in devDependencies | Vitest | `npx vitest run --coverage` |
|
|
51
|
+
| `package.json` | `jest` in devDependencies | Jest | `npx jest --coverage` |
|
|
52
|
+
| `pyproject.toml` | `pytest` in dependencies | pytest | `python -m pytest --cov` |
|
|
53
|
+
| `go.mod` | present | Go test | `go test -cover ./...` |
|
|
54
|
+
| `Cargo.toml` | present | cargo test | `cargo test` |
|
|
55
|
+
|
|
56
|
+
Report what was detected:
|
|
57
|
+
```
|
|
58
|
+
Backend: [runner] — [command]
|
|
59
|
+
Frontend: [runner] — [command]
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
If only one stack exists, report that and proceed with what is available.
|
|
63
|
+
|
|
64
|
+
### 2. Setup Vitest (conditional)
|
|
65
|
+
|
|
66
|
+
**Only if Vitest is detected but not yet configured** (no `vitest.config.ts` exists):
|
|
67
|
+
|
|
68
|
+
Install dependencies:
|
|
69
|
+
```bash
|
|
70
|
+
npm install -D vitest @testing-library/react @testing-library/jest-dom @testing-library/user-event jsdom @vitejs/plugin-react @vitest/coverage-v8
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
Create `vitest.config.ts`:
|
|
74
|
+
```ts
|
|
75
|
+
import { defineConfig } from 'vitest/sk:config';
|
|
76
|
+
import react from '@vitejs/plugin-react';
|
|
77
|
+
|
|
78
|
+
export default defineConfig({
|
|
79
|
+
plugins: [react()],
|
|
80
|
+
test: {
|
|
81
|
+
environment: 'jsdom',
|
|
82
|
+
globals: true,
|
|
83
|
+
setupFiles: ['./resources/js/__tests__/setup.ts'],
|
|
84
|
+
include: ['resources/js/__tests__/**/*.{test,spec}.{ts,tsx}'],
|
|
85
|
+
coverage: {
|
|
86
|
+
provider: 'v8',
|
|
87
|
+
include: ['resources/js/**/*.{ts,tsx}'],
|
|
88
|
+
exclude: ['resources/js/__tests__/**', 'resources/js/types/**'],
|
|
89
|
+
},
|
|
90
|
+
},
|
|
91
|
+
resolve: {
|
|
92
|
+
alias: {
|
|
93
|
+
'@': '/resources/js',
|
|
94
|
+
},
|
|
95
|
+
},
|
|
96
|
+
});
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
Create `resources/js/__tests__/setup.ts` if missing:
|
|
100
|
+
```ts
|
|
101
|
+
import '@testing-library/jest-dom';
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
Skip this step entirely if Vitest config already exists or a different FE runner was detected.
|
|
105
|
+
|
|
106
|
+
### 3. Run All Test Suites
|
|
107
|
+
|
|
108
|
+
Run BE and FE test suites **in parallel using sub-agents** since they are fully independent:
|
|
109
|
+
|
|
110
|
+
```
|
|
111
|
+
Sub-agent 1 (BE): [detected BE command]
|
|
112
|
+
Sub-agent 2 (FE): [detected FE command]
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
If only one stack exists, run it directly — no sub-agent needed.
|
|
116
|
+
|
|
117
|
+
For large BE suites, you may split further:
|
|
118
|
+
```
|
|
119
|
+
Sub-agent 1: ./vendor/bin/pest --filter=Feature --coverage --compact
|
|
120
|
+
Sub-agent 2: ./vendor/bin/pest --filter=Unit --coverage --compact
|
|
121
|
+
Sub-agent 3: [FE command]
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
### 4. If Tests Fail
|
|
125
|
+
|
|
126
|
+
- Read the failure output carefully — identify the root cause
|
|
127
|
+
- Fix the failing **implementation code** or test setup, not the test assertions (tests define expected behavior)
|
|
128
|
+
- Do NOT skip, mark incomplete, or delete failing tests
|
|
129
|
+
- Re-run the failing suite until all tests pass
|
|
130
|
+
- If a fix changes behavior, confirm with the user before applying
|
|
131
|
+
|
|
132
|
+
### 5. Verify Coverage
|
|
133
|
+
|
|
134
|
+
- **100% coverage on new code** is required for both suites
|
|
135
|
+
- Check the coverage output from each runner
|
|
136
|
+
- If coverage is below 100% on new code, identify the uncovered lines and report them — do NOT write new tests (that is `/sk:write-tests` responsibility)
|
|
137
|
+
|
|
138
|
+
### 6. Report Results
|
|
139
|
+
|
|
140
|
+
Output the final status in this exact format:
|
|
141
|
+
|
|
142
|
+
```
|
|
143
|
+
BE: X tests passed, X failed — coverage X%
|
|
144
|
+
FE: X tests passed, X failed — coverage X%
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
- Omit a line if that stack was not detected
|
|
148
|
+
- List any failing tests with `file:line`
|
|
149
|
+
- List any uncovered new code with `file:line`
|
|
150
|
+
|
|
151
|
+
## Pass Criteria
|
|
152
|
+
|
|
153
|
+
All detected suites pass with 100% coverage on new code. Both lines of the report show zero failures.
|
|
154
|
+
|
|
155
|
+
---
|
|
156
|
+
|
|
157
|
+
## Model Routing
|
|
158
|
+
|
|
159
|
+
Read `.shipkit/sk:config.json` from the project root if it exists.
|
|
160
|
+
|
|
161
|
+
- If `model_overrides["sk:test"]` is set, use that model — it takes precedence.
|
|
162
|
+
- Otherwise use the `profile` field. Default: `balanced`.
|
|
163
|
+
|
|
164
|
+
| Profile | Model |
|
|
165
|
+
|---------|-------|
|
|
166
|
+
| `full-sail` | sonnet |
|
|
167
|
+
| `quality` | sonnet |
|
|
168
|
+
| `balanced` | haiku |
|
|
169
|
+
| `budget` | haiku |
|
|
170
|
+
|
|
171
|
+
> `opus` = inherit (uses the current session model). When spawning sub-agents via the Agent tool, pass `model: "<resolved-model>"`.
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: sk:write-tests
|
|
3
|
+
description: "TDD: Auto-detect BE + FE testing stacks, write failing tests before implementation. Updates existing tests when behavior changes."
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Test Generation (TDD)
|
|
7
|
+
|
|
8
|
+
## Overview
|
|
9
|
+
|
|
10
|
+
Auto-detect the project's backend AND frontend testing frameworks, read the plan from `tasks/todo.md`, and write comprehensive failing tests BEFORE implementation. Tests define the expected behavior — implementation makes them pass.
|
|
11
|
+
|
|
12
|
+
## Allowed Tools
|
|
13
|
+
|
|
14
|
+
Bash, Read, Write, Edit, Glob, Grep
|
|
15
|
+
|
|
16
|
+
**When the detected framework is `@playwright/sk:test`**, also use:
|
|
17
|
+
mcp__plugin_playwright_playwright__browser_snapshot, mcp__plugin_playwright_playwright__browser_run_code, mcp__plugin_playwright_playwright__browser_navigate, mcp__plugin_playwright_playwright__browser_take_screenshot
|
|
18
|
+
|
|
19
|
+
## Steps
|
|
20
|
+
|
|
21
|
+
You MUST complete these steps in order:
|
|
22
|
+
|
|
23
|
+
### 0. Check Project Lessons
|
|
24
|
+
|
|
25
|
+
If `tasks/lessons.md` exists, read it before doing anything else. Apply every active lesson as a standing constraint. Look for:
|
|
26
|
+
- Known flaky test patterns in this project
|
|
27
|
+
- Mocking approaches that caused issues before
|
|
28
|
+
- Framework-specific gotchas
|
|
29
|
+
- File location conventions that broke in the past
|
|
30
|
+
|
|
31
|
+
### 1. Read the Plan
|
|
32
|
+
|
|
33
|
+
- Read `tasks/todo.md` to understand what will be implemented
|
|
34
|
+
- Read `tasks/progress.md` for context from brainstorm/design steps
|
|
35
|
+
- Identify all code that will be created or modified
|
|
36
|
+
- This is the **source of truth** for what tests to write
|
|
37
|
+
|
|
38
|
+
### 2. Detect ALL Testing Frameworks
|
|
39
|
+
|
|
40
|
+
Scan for **both backend and frontend** testing stacks:
|
|
41
|
+
|
|
42
|
+
**Backend detection:**
|
|
43
|
+
```bash
|
|
44
|
+
cat composer.json 2>/dev/null # PHPUnit / Pest
|
|
45
|
+
cat pyproject.toml 2>/dev/null # pytest
|
|
46
|
+
cat go.mod 2>/dev/null # Go testing
|
|
47
|
+
cat Cargo.toml 2>/dev/null # Rust #[cfg(test)]
|
|
48
|
+
cat Gemfile 2>/dev/null # RSpec / Minitest
|
|
49
|
+
cat build.gradle 2>/dev/null # JUnit
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
**Frontend detection:**
|
|
53
|
+
```bash
|
|
54
|
+
cat package.json 2>/dev/null # Jest, Vitest, Mocha, Cypress, Playwright
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
Check for framework-specific config:
|
|
58
|
+
- `vitest.config.ts` / `vite.config.ts` (Vitest)
|
|
59
|
+
- `jest.config.js` / `jest.config.ts` (Jest)
|
|
60
|
+
- `phpunit.xml` / `pest` in composer.json (PHPUnit / Pest)
|
|
61
|
+
- `pytest.ini` / `conftest.py` (pytest)
|
|
62
|
+
- `cypress.config.ts` (Cypress)
|
|
63
|
+
- `playwright.config.ts` (Playwright)
|
|
64
|
+
|
|
65
|
+
Report ALL detected frameworks:
|
|
66
|
+
```
|
|
67
|
+
Backend: [framework] ([language]) — [test runner command]
|
|
68
|
+
Frontend: [framework] ([language]) — [test runner command]
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
If only one stack exists (e.g., API-only with no FE, or FE-only SPA), report that and proceed with what's available.
|
|
72
|
+
|
|
73
|
+
### 3. Check Existing Tests
|
|
74
|
+
|
|
75
|
+
- Find existing tests related to the code being changed
|
|
76
|
+
- If modifying existing behavior: **update those tests first** to expect the new behavior
|
|
77
|
+
- If adding new code: identify what test files need to be created
|
|
78
|
+
- Report: "Updating X existing test files, creating Y new test files"
|
|
79
|
+
|
|
80
|
+
### 4. Learn Project Test Conventions
|
|
81
|
+
|
|
82
|
+
Find and read 1-2 existing test files **per stack** to learn patterns:
|
|
83
|
+
|
|
84
|
+
From existing tests, learn:
|
|
85
|
+
- Import style and aliases
|
|
86
|
+
- Test structure (describe/it, test(), func TestX)
|
|
87
|
+
- Assertion library and patterns
|
|
88
|
+
- Mocking approach
|
|
89
|
+
- Setup/teardown patterns
|
|
90
|
+
- File naming convention
|
|
91
|
+
- Test file location (co-located vs `tests/` directory)
|
|
92
|
+
|
|
93
|
+
If **no existing tests** are found, use `references/patterns.md` for framework-appropriate templates.
|
|
94
|
+
|
|
95
|
+
### 5. Analyze Target Code from Plan
|
|
96
|
+
|
|
97
|
+
Based on the plan in `tasks/todo.md`, identify test cases for each piece of planned code:
|
|
98
|
+
|
|
99
|
+
**Backend tests:**
|
|
100
|
+
- **Happy path**: Normal expected behavior for each endpoint/function
|
|
101
|
+
- **Edge cases**: Empty inputs, boundary values, null/undefined
|
|
102
|
+
- **Error handling**: Invalid inputs, thrown exceptions, error responses
|
|
103
|
+
- **Authorization**: Ensure policies/guards are tested
|
|
104
|
+
- **Validation**: All form request / input validation rules
|
|
105
|
+
|
|
106
|
+
**Frontend tests:**
|
|
107
|
+
- **Component rendering**: Correct output for given props
|
|
108
|
+
- **User interactions**: Click, type, submit, navigate
|
|
109
|
+
- **Conditional rendering**: Show/hide based on state
|
|
110
|
+
- **Error states**: Loading, empty, error displays
|
|
111
|
+
- **Form handling**: Validation, submission, reset
|
|
112
|
+
|
|
113
|
+
### 6. Determine Test File Locations
|
|
114
|
+
|
|
115
|
+
Follow the project's existing convention:
|
|
116
|
+
|
|
117
|
+
| Convention | Pattern | Example |
|
|
118
|
+
|-----------|---------|---------|
|
|
119
|
+
| Co-located | Same directory as source | `src/auth/login.test.ts` |
|
|
120
|
+
| Mirror `tests/` | Parallel directory structure | `tests/auth/login.test.ts` |
|
|
121
|
+
| `__tests__/` | Jest/Vitest convention | `src/auth/__tests__/login.test.ts` |
|
|
122
|
+
| `test_` prefix | Python convention | `tests/test_login.py` |
|
|
123
|
+
| `_test` suffix | Go convention | `auth/login_test.go` |
|
|
124
|
+
| `tests/Feature/` + `tests/Unit/` | Laravel/Pest convention | `tests/Feature/ServerTest.php` |
|
|
125
|
+
|
|
126
|
+
### 7. Write Backend Test Files
|
|
127
|
+
|
|
128
|
+
Generate complete test files matching the project's style:
|
|
129
|
+
- One test per behavior, not per line of code
|
|
130
|
+
- Descriptive test names that explain expected behavior
|
|
131
|
+
- Arrange-Act-Assert pattern
|
|
132
|
+
- Mock external dependencies, not the code under test
|
|
133
|
+
- Test behavior, not implementation details
|
|
134
|
+
|
|
135
|
+
### 8. Write Frontend Test Files
|
|
136
|
+
|
|
137
|
+
If a frontend stack was detected, generate FE test files:
|
|
138
|
+
- Component tests for every new/modified component
|
|
139
|
+
- Page tests for every new/modified page
|
|
140
|
+
- Hook tests for custom hooks
|
|
141
|
+
- Mock framework helpers (e.g., Inertia's `useForm`, Next.js `useRouter`, SvelteKit `goto`)
|
|
142
|
+
- Use `@testing-library` conventions: prefer `getByRole`, `getByText`, `getByLabelText`
|
|
143
|
+
|
|
144
|
+
Skip this step if no FE stack was detected.
|
|
145
|
+
|
|
146
|
+
### 8b. Playwright-Specific (conditional)
|
|
147
|
+
|
|
148
|
+
**Only if `@playwright/sk:test` is detected:**
|
|
149
|
+
|
|
150
|
+
Use the Playwright MCP plugin to inspect live page state for more accurate selectors:
|
|
151
|
+
|
|
152
|
+
1. Navigate to target URL
|
|
153
|
+
2. Capture accessibility snapshot for role-based selectors
|
|
154
|
+
3. Screenshot for visual reference
|
|
155
|
+
4. Optionally run inline assertions for complex interactions
|
|
156
|
+
|
|
157
|
+
### 9. Verify Tests Fail (Red Phase)
|
|
158
|
+
|
|
159
|
+
Run both suites to confirm tests fail as expected:
|
|
160
|
+
|
|
161
|
+
- **Tests SHOULD fail** — this confirms they're testing the right thing
|
|
162
|
+
- If tests pass without implementation, they're not testing anything useful — rewrite them
|
|
163
|
+
- Report which tests fail and why (missing class, missing route, missing component, etc.)
|
|
164
|
+
|
|
165
|
+
### 10. Report
|
|
166
|
+
|
|
167
|
+
Output:
|
|
168
|
+
```
|
|
169
|
+
BE tests written: X tests in Y files ([framework])
|
|
170
|
+
FE tests written: X tests in Y files ([framework]) ← omit if no FE stack
|
|
171
|
+
Existing tests updated: X files
|
|
172
|
+
Status: RED (tests fail as expected — ready for implementation)
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
## Key Principle
|
|
176
|
+
|
|
177
|
+
Tests define the **expected behavior**. Implementation makes them pass. If you're unsure what a piece of code should do, the test is where you decide.
|
|
178
|
+
|
|
179
|
+
---
|
|
180
|
+
|
|
181
|
+
## Model Routing
|
|
182
|
+
|
|
183
|
+
Read `.shipkit/sk:config.json` from the project root if it exists.
|
|
184
|
+
|
|
185
|
+
- If `model_overrides["sk:write-tests"]` is set, use that model — it takes precedence.
|
|
186
|
+
- Otherwise use the `profile` field. Default: `balanced`.
|
|
187
|
+
|
|
188
|
+
| Profile | Model |
|
|
189
|
+
|---------|-------|
|
|
190
|
+
| `full-sail` | opus (inherit) |
|
|
191
|
+
| `quality` | sonnet |
|
|
192
|
+
| `balanced` | sonnet |
|
|
193
|
+
| `budget` | haiku |
|
|
194
|
+
|
|
195
|
+
> `opus` = inherit (uses the current session model). When spawning sub-agents via the Agent tool, pass `model: "<resolved-model>"`.
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
# Test Framework Templates
|
|
2
|
+
|
|
3
|
+
Use these templates when the project has **no existing test files** to learn from. Adapt to the project's specific needs.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Vitest + React Testing Library
|
|
8
|
+
|
|
9
|
+
```tsx
|
|
10
|
+
import { describe, it, expect, vi, beforeEach } from 'vitest'
|
|
11
|
+
import { render, screen, fireEvent, waitFor } from '@testing-library/react'
|
|
12
|
+
import { ComponentName } from './ComponentName'
|
|
13
|
+
|
|
14
|
+
describe('ComponentName', () => {
|
|
15
|
+
beforeEach(() => {
|
|
16
|
+
vi.clearAllMocks()
|
|
17
|
+
})
|
|
18
|
+
|
|
19
|
+
it('renders with default props', () => {
|
|
20
|
+
render(<ComponentName />)
|
|
21
|
+
expect(screen.getByText('expected text')).toBeInTheDocument()
|
|
22
|
+
})
|
|
23
|
+
|
|
24
|
+
it('handles user interaction', async () => {
|
|
25
|
+
const onAction = vi.fn()
|
|
26
|
+
render(<ComponentName onAction={onAction} />)
|
|
27
|
+
fireEvent.click(screen.getByRole('button', { name: /submit/i }))
|
|
28
|
+
expect(onAction).toHaveBeenCalledOnce()
|
|
29
|
+
})
|
|
30
|
+
|
|
31
|
+
it('displays error state', () => {
|
|
32
|
+
render(<ComponentName error="Something went wrong" />)
|
|
33
|
+
expect(screen.getByRole('alert')).toHaveTextContent('Something went wrong')
|
|
34
|
+
})
|
|
35
|
+
})
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
---
|
|
39
|
+
|
|
40
|
+
## Jest (Node.js)
|
|
41
|
+
|
|
42
|
+
```ts
|
|
43
|
+
import { functionName } from './module'
|
|
44
|
+
|
|
45
|
+
describe('functionName', () => {
|
|
46
|
+
it('returns expected result for valid input', () => {
|
|
47
|
+
expect(functionName('input')).toBe('expected')
|
|
48
|
+
})
|
|
49
|
+
|
|
50
|
+
it('throws on invalid input', () => {
|
|
51
|
+
expect(() => functionName(null)).toThrow('Expected error message')
|
|
52
|
+
})
|
|
53
|
+
|
|
54
|
+
it('handles edge case', () => {
|
|
55
|
+
expect(functionName('')).toBe('default')
|
|
56
|
+
})
|
|
57
|
+
})
|
|
58
|
+
|
|
59
|
+
// Mocking example
|
|
60
|
+
jest.mock('./dependency', () => ({
|
|
61
|
+
depFunction: jest.fn().mockResolvedValue('mocked'),
|
|
62
|
+
}))
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
---
|
|
66
|
+
|
|
67
|
+
## pytest (Python)
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
import pytest
|
|
71
|
+
from module import function_name
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class TestFunctionName:
|
|
75
|
+
def test_returns_expected_for_valid_input(self):
|
|
76
|
+
result = function_name("input")
|
|
77
|
+
assert result == "expected"
|
|
78
|
+
|
|
79
|
+
def test_raises_on_invalid_input(self):
|
|
80
|
+
with pytest.raises(ValueError, match="expected message"):
|
|
81
|
+
function_name(None)
|
|
82
|
+
|
|
83
|
+
def test_handles_empty_input(self):
|
|
84
|
+
assert function_name("") == "default"
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
# Fixture example
|
|
88
|
+
@pytest.fixture
|
|
89
|
+
def sample_data():
|
|
90
|
+
return {"key": "value"}
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def test_with_fixture(sample_data):
|
|
94
|
+
result = function_name(sample_data)
|
|
95
|
+
assert result is not None
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
# Mocking example
|
|
99
|
+
def test_with_mock(mocker):
|
|
100
|
+
mock_dep = mocker.patch("module.dependency.dep_function")
|
|
101
|
+
mock_dep.return_value = "mocked"
|
|
102
|
+
result = function_name("input")
|
|
103
|
+
assert result == "mocked"
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
---
|
|
107
|
+
|
|
108
|
+
## Go testing
|
|
109
|
+
|
|
110
|
+
```go
|
|
111
|
+
package mypackage
|
|
112
|
+
|
|
113
|
+
import (
|
|
114
|
+
"testing"
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
func TestFunctionName(t *testing.T) {
|
|
118
|
+
tests := []struct {
|
|
119
|
+
name string
|
|
120
|
+
input string
|
|
121
|
+
expected string
|
|
122
|
+
wantErr bool
|
|
123
|
+
}{
|
|
124
|
+
{
|
|
125
|
+
name: "valid input",
|
|
126
|
+
input: "hello",
|
|
127
|
+
expected: "HELLO",
|
|
128
|
+
},
|
|
129
|
+
{
|
|
130
|
+
name: "empty input returns error",
|
|
131
|
+
input: "",
|
|
132
|
+
wantErr: true,
|
|
133
|
+
},
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
for _, tt := range tests {
|
|
137
|
+
t.Run(tt.name, func(t *testing.T) {
|
|
138
|
+
got, err := FunctionName(tt.input)
|
|
139
|
+
if (err != nil) != tt.wantErr {
|
|
140
|
+
t.Errorf("FunctionName() error = %v, wantErr %v", err, tt.wantErr)
|
|
141
|
+
return
|
|
142
|
+
}
|
|
143
|
+
if got != tt.expected {
|
|
144
|
+
t.Errorf("FunctionName() = %v, want %v", got, tt.expected)
|
|
145
|
+
}
|
|
146
|
+
})
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
---
|
|
152
|
+
|
|
153
|
+
## Rust
|
|
154
|
+
|
|
155
|
+
```rust
|
|
156
|
+
#[cfg(test)]
|
|
157
|
+
mod tests {
|
|
158
|
+
use super::*;
|
|
159
|
+
|
|
160
|
+
#[test]
|
|
161
|
+
fn test_valid_input() {
|
|
162
|
+
let result = function_name("input");
|
|
163
|
+
assert_eq!(result, "expected");
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
#[test]
|
|
167
|
+
#[should_panic(expected = "error message")]
|
|
168
|
+
fn test_invalid_input_panics() {
|
|
169
|
+
function_name("");
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
#[test]
|
|
173
|
+
fn test_returns_none_for_missing() {
|
|
174
|
+
assert!(function_name("missing").is_none());
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
---
|
|
180
|
+
|
|
181
|
+
## Mocha + Chai
|
|
182
|
+
|
|
183
|
+
```ts
|
|
184
|
+
import { expect } from 'chai'
|
|
185
|
+
import sinon from 'sinon'
|
|
186
|
+
import { functionName } from './module'
|
|
187
|
+
|
|
188
|
+
describe('functionName', () => {
|
|
189
|
+
afterEach(() => {
|
|
190
|
+
sinon.restore()
|
|
191
|
+
})
|
|
192
|
+
|
|
193
|
+
it('should return expected result for valid input', () => {
|
|
194
|
+
const result = functionName('input')
|
|
195
|
+
expect(result).to.equal('expected')
|
|
196
|
+
})
|
|
197
|
+
|
|
198
|
+
it('should throw on invalid input', () => {
|
|
199
|
+
expect(() => functionName(null)).to.throw('Expected error message')
|
|
200
|
+
})
|
|
201
|
+
|
|
202
|
+
it('should call dependency correctly', () => {
|
|
203
|
+
const stub = sinon.stub(dependency, 'method').returns('mocked')
|
|
204
|
+
const result = functionName('input')
|
|
205
|
+
expect(stub.calledOnce).to.be.true
|
|
206
|
+
expect(result).to.equal('mocked')
|
|
207
|
+
})
|
|
208
|
+
})
|
|
209
|
+
```
|