claude-dev-kit 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/angelic-workshop-energy-clearing.md +113 -0
- package/.claude/agents/angelic-workshop-intake.md +84 -0
- package/.claude/agents/angelic-workshop-integration.md +140 -0
- package/.claude/agents/angelic-workshop-invocation.md +92 -0
- package/.claude/agents/angelic-workshop-lead.md +225 -0
- package/.claude/agents/angelic-workshop-transmission.md +108 -0
- package/.claude/agents/deep-think-partner.md +41 -0
- package/.claude/agents/dev-backend.md +74 -0
- package/.claude/agents/dev-e2e.md +101 -0
- package/.claude/agents/dev-frontend.md +82 -0
- package/.claude/agents/dev-lead.md +144 -0
- package/.claude/agents/dev-reviewer.md +122 -0
- package/.claude/agents/dev-test.md +88 -0
- package/.claude/agents/documentation-manager.md +73 -0
- package/.claude/agents/haiku-executor.md +8 -0
- package/.claude/agents/pm-groomer.md +98 -0
- package/.claude/agents/pm-prp-writer.md +144 -0
- package/.claude/agents/pm-sizer.md +84 -0
- package/.claude/agents/project-manager.md +91 -0
- package/.claude/agents/system-architect.md +98 -0
- package/.claude/agents/validation-gates.md +121 -0
- package/.claude/agents/workflow-builder.md +416 -0
- package/.claude/commands/ai/detect.md +117 -0
- package/.claude/commands/ai/route.md +128 -0
- package/.claude/commands/ai/switch.md +121 -0
- package/.claude/commands/bs/brainstorm_full.md +149 -0
- package/.claude/commands/bs/claude.md +37 -0
- package/.claude/commands/bs/codex.md +37 -0
- package/.claude/commands/bs/gemini.md +37 -0
- package/.claude/commands/bs/glm.md +37 -0
- package/.claude/commands/bs/grok.md +37 -0
- package/.claude/commands/bs/kimi.md +37 -0
- package/.claude/commands/bs/minimax.md +37 -0
- package/.claude/commands/bs/ollama.md +71 -0
- package/.claude/commands/code/build-and-fix.md +80 -0
- package/.claude/commands/code/simplify.md +77 -0
- package/.claude/commands/dev/backend.md +47 -0
- package/.claude/commands/dev/e2e.md +49 -0
- package/.claude/commands/dev/frontend.md +45 -0
- package/.claude/commands/dev/review.md +48 -0
- package/.claude/commands/dev/test.md +54 -0
- package/.claude/commands/dev-epic.md +121 -0
- package/.claude/commands/dev-issue.md +79 -0
- package/.claude/commands/dev.md +134 -0
- package/.claude/commands/execute-prp.md +113 -0
- package/.claude/commands/fix-github-issue.md +14 -0
- package/.claude/commands/generate-prp.md +73 -0
- package/.claude/commands/git/status.md +14 -0
- package/.claude/commands/haiku.md +13 -0
- package/.claude/commands/improve.md +178 -0
- package/.claude/commands/init.md +311 -0
- package/.claude/commands/pm/groom.md +58 -0
- package/.claude/commands/pm/plan-epic.md +74 -0
- package/.claude/commands/pm/size.md +46 -0
- package/.claude/commands/pm.md +47 -0
- package/.claude/commands/primer.md +16 -0
- package/.claude/commands/self-improve.md +243 -0
- package/.claude/commands/think.md +68 -0
- package/.claude/commands/workflow/angelic-workshop.md +89 -0
- package/.claude/commands/workflow/build.md +91 -0
- package/.claude/hooks/pre-tool-use/block-dangerous-commands.js +196 -0
- package/.claude/hooks/skill-activation-prompt/package-lock.json +560 -0
- package/.claude/hooks/skill-activation-prompt/package.json +16 -0
- package/.claude/hooks/skill-activation-prompt/skill-activation-prompt.ts +135 -0
- package/.claude/hooks/skill-activation-prompt/skill-rules.json +50 -0
- package/.claude/hooks/stop/context_monitor.py +155 -0
- package/.claude/hooks/stop/learning_logger.py +218 -0
- package/.claude/skills/ai-router/SKILL.md +119 -0
- package/.claude/skills/build-and-fix/SKILL.md +271 -0
- package/.claude/skills/build-and-fix/examples/javascript-lint-fix.md +37 -0
- package/.claude/skills/build-and-fix/language-configs/javascript.yaml +139 -0
- package/.claude/skills/build-and-fix/references/config-schema.md +120 -0
- package/.claude/skills/build-and-fix/references/error-patterns.md +273 -0
- package/.claude/skills/code-investigator/SKILL.md +299 -0
- package/.claude/skills/code-investigator/references/investigation-workflows.md +542 -0
- package/.claude/skills/code-investigator/references/language-specific.md +761 -0
- package/.claude/skills/code-investigator/references/search-patterns.md +258 -0
- package/.claude/skills/code-investigator/references/serena-patterns.md +328 -0
- package/.claude/skills/stack-detector/SKILL.md +153 -0
- package/.claude/skills/verification-before-completion/SKILL.md +143 -0
- package/.claude/templates/claude-md-template.md +56 -0
- package/.claude/templates/stacks/express-node.md +134 -0
- package/.claude/templates/stacks/fastapi.md +152 -0
- package/.claude/templates/stacks/generic.md +101 -0
- package/.claude/templates/stacks/nextjs-prisma.md +235 -0
- package/README.md +499 -0
- package/bin/claude-dev-kit.js +11 -0
- package/package.json +31 -0
- package/scripts/install.sh +448 -0
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
---
|
|
2
|
+
version: 1.1.0
|
|
3
|
+
name: stack-detector
|
|
4
|
+
description: Detects the technology stack of a project by analyzing manifest files and dependencies. Returns a structured stack map JSON. Used by /init and other commands that need stack awareness.
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# Stack Detector
|
|
8
|
+
|
|
9
|
+
**Notification:** Output "Detecting project stack..." at skill start.
|
|
10
|
+
|
|
11
|
+
## Purpose
|
|
12
|
+
Identify the complete technology stack from project files without running any code. Return a structured JSON object for use by other commands.
|
|
13
|
+
|
|
14
|
+
## Detection Steps
|
|
15
|
+
|
|
16
|
+
### Step 1: Read manifest files (in parallel)
|
|
17
|
+
Use the Read tool (not bash cat) to read these files — avoids shell injection with unusual paths:
|
|
18
|
+
- `package.json`
|
|
19
|
+
- `pyproject.toml`
|
|
20
|
+
- `Cargo.toml`
|
|
21
|
+
- `go.mod`
|
|
22
|
+
- `deno.json` or `deno.jsonc`
|
|
23
|
+
- `astro.config.*`
|
|
24
|
+
|
|
25
|
+
Also check presence of these config files:
|
|
26
|
+
```
|
|
27
|
+
prisma/schema.prisma drizzle.config.* jest.config.* vitest.config.*
|
|
28
|
+
playwright.config.* cypress.config.* capacitor.config.ts app.json
|
|
29
|
+
deno.json deno.jsonc astro.config.mjs astro.config.ts
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
### Step 2: Detect package manager
|
|
33
|
+
| File present | Manager |
|
|
34
|
+
|-------------|---------|
|
|
35
|
+
| `bun.lockb` | `bun` |
|
|
36
|
+
| `pnpm-lock.yaml` | `pnpm` |
|
|
37
|
+
| `yarn.lock` | `yarn` |
|
|
38
|
+
| `package-lock.json` | `npm` |
|
|
39
|
+
| `poetry.lock` | `poetry` |
|
|
40
|
+
| `Pipfile.lock` | `pipenv` |
|
|
41
|
+
| `deno.json` or `deno.jsonc` | `deno` |
|
|
42
|
+
|
|
43
|
+
### Step 3: Detect framework (first match wins)
|
|
44
|
+
|
|
45
|
+
**Special case — Deno:** If `deno.json` or `deno.jsonc` exists, set `framework: "deno"` immediately and skip remaining JS detection.
|
|
46
|
+
|
|
47
|
+
From package.json dependencies/devDependencies:
|
|
48
|
+
- `"next"` → `nextjs`
|
|
49
|
+
- `"@remix-run/node"` or `"@remix-run/serve"` → `remix`
|
|
50
|
+
- `"@sveltejs/kit"` → `sveltekit`
|
|
51
|
+
- `"nuxt"` → `nuxt`
|
|
52
|
+
- `"@nestjs/core"` → `nestjs`
|
|
53
|
+
- `"fastify"` → `fastify`
|
|
54
|
+
- `"express"` → `express`
|
|
55
|
+
- `"astro"` → `astro`
|
|
56
|
+
- `"solid-js"` or `"@solidjs/start"` → `solidjs`
|
|
57
|
+
|
|
58
|
+
From pyproject.toml:
|
|
59
|
+
- `"fastapi"` → `fastapi`
|
|
60
|
+
- `"django"` → `django`
|
|
61
|
+
- `"flask"` → `flask`
|
|
62
|
+
|
|
63
|
+
From go.mod: `go`
|
|
64
|
+
From Cargo.toml: `rust`
|
|
65
|
+
|
|
66
|
+
### Step 4: Detect Next.js router variant (important — different templates)
|
|
67
|
+
If framework is `nextjs`:
|
|
68
|
+
- Check if `app/` directory exists AND contains `layout.tsx` or `layout.js` → `routerVariant: "app"`
|
|
69
|
+
- Otherwise if `pages/` directory exists → `routerVariant: "pages"`
|
|
70
|
+
- Default to `"app"` (Next.js 13+ default)
|
|
71
|
+
|
|
72
|
+
This affects the templateKey: `nextjs-prisma-app` vs `nextjs-prisma-pages`.
|
|
73
|
+
|
|
74
|
+
### Step 5: Detect ORM
|
|
75
|
+
- `prisma/schema.prisma` exists → `prisma`
|
|
76
|
+
- `drizzle.config.*` exists → `drizzle`
|
|
77
|
+
- `"mongoose"` in deps → `mongoose`
|
|
78
|
+
- `"sqlalchemy"` in pyproject → `sqlalchemy`
|
|
79
|
+
- framework is `django` → `django-orm`
|
|
80
|
+
- `"gorm"` in go.mod → `gorm`
|
|
81
|
+
- `"sqlx"` in Cargo.toml → `sqlx`
|
|
82
|
+
|
|
83
|
+
### Step 6: Detect test runner
|
|
84
|
+
- `jest.config.*` → `jest`
|
|
85
|
+
- `vitest.config.*` → `vitest`
|
|
86
|
+
- `pytest.ini` or `conftest.py` → `pytest`
|
|
87
|
+
- Rust → `cargo-test`
|
|
88
|
+
- Go → `go-test`
|
|
89
|
+
- Deno → `deno-test`
|
|
90
|
+
|
|
91
|
+
### Step 7: Detect E2E runner
|
|
92
|
+
- `playwright.config.*` → `playwright`
|
|
93
|
+
- `cypress.config.*` → `cypress`
|
|
94
|
+
|
|
95
|
+
### Step 8: Detect mobile
|
|
96
|
+
- `capacitor.config.ts` → `capacitor`
|
|
97
|
+
- `app.json` with `"expo"` key → `expo`
|
|
98
|
+
|
|
99
|
+
### Fallback: Gemini scan
|
|
100
|
+
If framework is still unknown after all above steps AND Gemini CLI is available:
|
|
101
|
+
```bash
|
|
102
|
+
# Use the Read tool to get the current directory path first, then construct the command safely
|
|
103
|
+
# IMPORTANT: always double-quote the path in the @ reference to handle spaces and special chars
|
|
104
|
+
gemini -p "@'./' What framework, ORM, test runner, and E2E tool is this project using? Reply in format: FRAMEWORK:x ORM:x TEST:x E2E:x MOBILE:x"
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
If `gemini` is not available, fall back to grep-based analysis:
|
|
108
|
+
```bash
|
|
109
|
+
grep -r "import\|require\|from" src/ app/ lib/ --include="*.ts" --include="*.js" --include="*.py" -l 2>/dev/null | head -10
|
|
110
|
+
```
|
|
111
|
+
Read 2-3 of those files and infer the framework from import patterns.
|
|
112
|
+
|
|
113
|
+
## Output Format
|
|
114
|
+
|
|
115
|
+
```json
|
|
116
|
+
{
|
|
117
|
+
"framework": "nextjs",
|
|
118
|
+
"frameworkVersion": "15",
|
|
119
|
+
"routerVariant": "app",
|
|
120
|
+
"language": "typescript",
|
|
121
|
+
"packageManager": "bun",
|
|
122
|
+
"orm": "prisma",
|
|
123
|
+
"testRunner": "jest",
|
|
124
|
+
"e2eRunner": "playwright",
|
|
125
|
+
"mobile": "capacitor",
|
|
126
|
+
"templateKey": "nextjs-prisma",
|
|
127
|
+
"commands": {
|
|
128
|
+
"dev": "bun run dev",
|
|
129
|
+
"lint": "bun lint",
|
|
130
|
+
"test": "bunx jest --coverage",
|
|
131
|
+
"e2e": "bunx playwright test",
|
|
132
|
+
"build": "bun run build"
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
The `templateKey` is `<framework>-<orm>` (or just `<framework>` if no ORM), used to look up `.claude/templates/stacks/<templateKey>.md`. If no matching template exists, use `generic`.
|
|
138
|
+
|
|
139
|
+
## Commands Derivation
|
|
140
|
+
|
|
141
|
+
Extract from `package.json` `scripts` where possible. Fallback defaults:
|
|
142
|
+
|
|
143
|
+
| Framework | Lint | Test | Build |
|
|
144
|
+
|-----------|------|------|-------|
|
|
145
|
+
| nextjs | `bun lint` / `npm run lint` | `bunx jest --coverage` | `bun run build` |
|
|
146
|
+
| fastapi | `ruff check . && mypy .` | `pytest --cov` | `echo ok` |
|
|
147
|
+
| django | `ruff check .` | `pytest --cov` | `python manage.py check` |
|
|
148
|
+
| express | `npm run lint` | `npm test` | `npm run build` |
|
|
149
|
+
| astro | `npm run lint` | `npm test` | `npm run build` |
|
|
150
|
+
| solidjs | `npm run lint` | `npm test` | `npm run build` |
|
|
151
|
+
| deno | `deno lint` | `deno test --coverage` | `deno compile` |
|
|
152
|
+
| go | `golangci-lint run` | `go test ./... -cover` | `go build ./...` |
|
|
153
|
+
| rust | `cargo clippy` | `cargo test` | `cargo build` |
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: verification-before-completion
|
|
3
|
+
description: Use when about to claim work is complete, fixed, or passing, before committing or creating PRs - requires running verification commands and confirming output before making any success claims; evidence before assertions always
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Verification Before Completion
|
|
7
|
+
|
|
8
|
+
## Overview
|
|
9
|
+
|
|
10
|
+
Claiming work is complete without verification is dishonesty, not efficiency.
|
|
11
|
+
|
|
12
|
+
**Core principle:** Evidence before claims, always.
|
|
13
|
+
|
|
14
|
+
**Violating the letter of this rule is violating the spirit of this rule.**
|
|
15
|
+
|
|
16
|
+
## The Iron Law
|
|
17
|
+
|
|
18
|
+
```
|
|
19
|
+
NO COMPLETION CLAIMS WITHOUT FRESH VERIFICATION EVIDENCE
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
If you haven't run the verification command in this message, you cannot claim it passes.
|
|
23
|
+
|
|
24
|
+
## The Gate Function
|
|
25
|
+
|
|
26
|
+
```
|
|
27
|
+
BEFORE claiming any status or expressing satisfaction:
|
|
28
|
+
|
|
29
|
+
1. IDENTIFY: What command proves this claim?
|
|
30
|
+
2. RUN: Execute the FULL command (fresh, complete)
|
|
31
|
+
3. READ: Full output, check exit code, count failures
|
|
32
|
+
4. VERIFY: Does output confirm the claim?
|
|
33
|
+
- If NO: State actual status with evidence
|
|
34
|
+
- If YES: State claim WITH evidence
|
|
35
|
+
5. ONLY THEN: Make the claim
|
|
36
|
+
|
|
37
|
+
Skip any step = lying, not verifying
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## Common Failures
|
|
41
|
+
|
|
42
|
+
| Claim | Requires | Not Sufficient |
|
|
43
|
+
|-------|----------|----------------|
|
|
44
|
+
| Tests pass | Test command output: 0 failures | Previous run, "should pass" |
|
|
45
|
+
| Linter clean | Linter output: 0 errors | Partial check, extrapolation |
|
|
46
|
+
| Build succeeds | Build command: exit 0 | Linter passing, logs look good |
|
|
47
|
+
| Bug fixed | Test original symptom: passes | Code changed, assumed fixed |
|
|
48
|
+
| Regression test works | Red-green cycle verified | Test passes once |
|
|
49
|
+
| Agent completed | VCS diff shows changes | Agent reports "success" |
|
|
50
|
+
| Requirements met | Line-by-line checklist | Tests passing |
|
|
51
|
+
|
|
52
|
+
## Red Flags - STOP
|
|
53
|
+
|
|
54
|
+
- Using "should", "probably", "seems to"
|
|
55
|
+
- Expressing satisfaction before verification ("Great!", "Perfect!", "Done!", etc.)
|
|
56
|
+
- About to commit/push/PR without verification
|
|
57
|
+
- Trusting agent success reports
|
|
58
|
+
- Relying on partial verification
|
|
59
|
+
- Thinking "just this once"
|
|
60
|
+
- Tired and wanting work over
|
|
61
|
+
- **ANY wording implying success without having run verification**
|
|
62
|
+
|
|
63
|
+
## Rationalization Prevention
|
|
64
|
+
|
|
65
|
+
| Excuse | Reality |
|
|
66
|
+
|--------|---------|
|
|
67
|
+
| "Should work now" | RUN the verification |
|
|
68
|
+
| "I'm confident" | Confidence ≠ evidence |
|
|
69
|
+
| "Just this once" | No exceptions |
|
|
70
|
+
| "Linter passed" | Linter ≠ compiler |
|
|
71
|
+
| "Agent said success" | Verify independently |
|
|
72
|
+
| "I'm tired" | Exhaustion ≠ excuse |
|
|
73
|
+
| "Partial check is enough" | Partial proves nothing |
|
|
74
|
+
| "Different words so rule doesn't apply" | Spirit over letter |
|
|
75
|
+
|
|
76
|
+
## Key Patterns
|
|
77
|
+
|
|
78
|
+
**Tests:**
|
|
79
|
+
```
|
|
80
|
+
✅ [Run test command] [See: 34/34 pass] "All tests pass"
|
|
81
|
+
❌ "Should pass now" / "Looks correct"
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
**Regression tests (TDD Red-Green):**
|
|
85
|
+
```
|
|
86
|
+
✅ Write → Run (pass) → Revert fix → Run (MUST FAIL) → Restore → Run (pass)
|
|
87
|
+
❌ "I've written a regression test" (without red-green verification)
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
**Build:**
|
|
91
|
+
```
|
|
92
|
+
✅ [Run build] [See: exit 0] "Build passes"
|
|
93
|
+
❌ "Linter passed" (linter doesn't check compilation)
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
**Requirements:**
|
|
97
|
+
```
|
|
98
|
+
✅ Re-read plan → Create checklist → Verify each → Report gaps or completion
|
|
99
|
+
❌ "Tests pass, phase complete"
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
**Agent delegation:**
|
|
103
|
+
```
|
|
104
|
+
✅ Agent reports success → Check VCS diff → Verify changes → Report actual state
|
|
105
|
+
❌ Trust agent report
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
## Why This Matters
|
|
109
|
+
|
|
110
|
+
From 24 failure memories:
|
|
111
|
+
- your human partner said "I don't believe you" - trust broken
|
|
112
|
+
- Undefined functions shipped - would crash
|
|
113
|
+
- Missing requirements shipped - incomplete features
|
|
114
|
+
- Time wasted on false completion → redirect → rework
|
|
115
|
+
- Violates: "Honesty is a core value. If you lie, you'll be replaced."
|
|
116
|
+
|
|
117
|
+
## When To Apply
|
|
118
|
+
|
|
119
|
+
**ALWAYS before:**
|
|
120
|
+
- ANY variation of success/completion claims
|
|
121
|
+
- ANY expression of satisfaction
|
|
122
|
+
- ANY positive statement about work state
|
|
123
|
+
- Committing, PR creation, task completion
|
|
124
|
+
- Moving to next task
|
|
125
|
+
- Delegating to agents
|
|
126
|
+
|
|
127
|
+
**Rule applies to:**
|
|
128
|
+
- Exact phrases
|
|
129
|
+
- Paraphrases and synonyms
|
|
130
|
+
- Implications of success
|
|
131
|
+
- ANY communication suggesting completion/correctness
|
|
132
|
+
|
|
133
|
+
## The Bottom Line
|
|
134
|
+
|
|
135
|
+
**No shortcuts for verification.**
|
|
136
|
+
|
|
137
|
+
Run the command. Read the output. THEN claim the result.
|
|
138
|
+
|
|
139
|
+
This is non-negotiable.
|
|
140
|
+
|
|
141
|
+
## Notify user
|
|
142
|
+
|
|
143
|
+
**MANDATORY** - When this skill is used, notify user about it
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# CLAUDE.md
|
|
2
|
+
|
|
3
|
+
<!-- CDK:GENERATED:START — managed by /init, do not edit this block manually -->
|
|
4
|
+
|
|
5
|
+
## Project Overview
|
|
6
|
+
<!-- PROJECT_DESCRIPTION -->
|
|
7
|
+
|
|
8
|
+
## Stack
|
|
9
|
+
| Component | Value |
|
|
10
|
+
|-----------|-------|
|
|
11
|
+
| Framework | <!-- FRAMEWORK --> |
|
|
12
|
+
| ORM / DB | <!-- ORM --> |
|
|
13
|
+
| Language | <!-- LANGUAGE --> |
|
|
14
|
+
| Package manager | <!-- PACKAGE_MANAGER --> |
|
|
15
|
+
| Test runner | <!-- TEST_RUNNER --> |
|
|
16
|
+
| E2E | <!-- E2E_RUNNER --> |
|
|
17
|
+
| Mobile | <!-- MOBILE --> |
|
|
18
|
+
|
|
19
|
+
## Development Commands
|
|
20
|
+
| Task | Command |
|
|
21
|
+
|------|---------|
|
|
22
|
+
| Dev server | `<!-- DEV_CMD -->` |
|
|
23
|
+
| Lint | `<!-- LINT_CMD -->` |
|
|
24
|
+
| Unit tests | `<!-- TEST_CMD -->` |
|
|
25
|
+
| E2E tests | `<!-- E2E_CMD -->` |
|
|
26
|
+
| Build | `<!-- BUILD_CMD -->` |
|
|
27
|
+
| Static export (mobile) | `<!-- STATIC_CMD -->` |
|
|
28
|
+
|
|
29
|
+
## Validation Gates (run in order, fix before proceeding)
|
|
30
|
+
1. `<!-- LINT_CMD -->` — linting, zero errors
|
|
31
|
+
2. `<!-- TEST_CMD -->` — unit tests, coverage threshold met
|
|
32
|
+
3. `<!-- E2E_CMD -->` — E2E tests, when user flows changed
|
|
33
|
+
4. `<!-- BUILD_CMD -->` — compilation, zero type errors
|
|
34
|
+
|
|
35
|
+
## Key Conventions
|
|
36
|
+
<!-- KEY_CONVENTIONS -->
|
|
37
|
+
|
|
38
|
+
## Agent System (Claude Dev Kit)
|
|
39
|
+
This project uses the claude-dev-kit autonomous development pipeline:
|
|
40
|
+
|
|
41
|
+
| Command | Purpose |
|
|
42
|
+
|---------|---------|
|
|
43
|
+
| `/init` | Re-run to refresh agents after stack changes |
|
|
44
|
+
| `/pm:groom` | Groom GitHub issues with acceptance criteria |
|
|
45
|
+
| `/pm:size` | Size stories for sprint planning |
|
|
46
|
+
| `/pm:plan-epic` | Full epic plan: groom → size → PRPs |
|
|
47
|
+
| `/dev <issue>` | Autonomous implementation: code → tests → review → PR |
|
|
48
|
+
| `/dev:review` | Code review of current branch |
|
|
49
|
+
| `/bs:brainstorm_full` | Multi-LLM brainstorming |
|
|
50
|
+
|
|
51
|
+
<!-- CDK:GENERATED:END -->
|
|
52
|
+
|
|
53
|
+
---
|
|
54
|
+
|
|
55
|
+
## Project Notes
|
|
56
|
+
<!-- Add your own project-specific notes below this line. /init will never touch this section. -->
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
# Stack Template: Express.js + Node.js + TypeScript
|
|
2
|
+
|
|
3
|
+
## META
|
|
4
|
+
FRAMEWORK: express
|
|
5
|
+
ORM: varies (see detection)
|
|
6
|
+
PACKAGE_MANAGER: npm (or pnpm/yarn)
|
|
7
|
+
LINT_CMD: npm run lint
|
|
8
|
+
TEST_CMD: npm test -- --coverage
|
|
9
|
+
E2E_CMD: npm run test:e2e
|
|
10
|
+
BUILD_CMD: npm run build
|
|
11
|
+
|
|
12
|
+
---
|
|
13
|
+
|
|
14
|
+
## BACKEND_AGENT_BODY
|
|
15
|
+
|
|
16
|
+
You are a senior Express.js + TypeScript backend engineer. You implement REST API routes, middleware, services, and data access layers.
|
|
17
|
+
|
|
18
|
+
### Stack
|
|
19
|
+
- **Express.js** with TypeScript
|
|
20
|
+
- **Router-based architecture** — `src/routes/<domain>.ts`
|
|
21
|
+
- **Service layer** — `src/services/<domain>.ts`
|
|
22
|
+
- **Middleware** for auth, validation, error handling
|
|
23
|
+
- **Zod** for input validation (or `express-validator`)
|
|
24
|
+
|
|
25
|
+
### Route Pattern
|
|
26
|
+
```typescript
|
|
27
|
+
// src/routes/bookings.ts
|
|
28
|
+
import { Router, Request, Response, NextFunction } from 'express'
|
|
29
|
+
import { z } from 'zod'
|
|
30
|
+
import { requireAuth } from '../middleware/auth'
|
|
31
|
+
import { BookingService } from '../services/BookingService'
|
|
32
|
+
|
|
33
|
+
const router = Router()
|
|
34
|
+
const bookingService = new BookingService()
|
|
35
|
+
|
|
36
|
+
const CreateBookingSchema = z.object({
|
|
37
|
+
chargePointId: z.string(),
|
|
38
|
+
startTime: z.string().datetime(),
|
|
39
|
+
endTime: z.string().datetime(),
|
|
40
|
+
})
|
|
41
|
+
|
|
42
|
+
router.post('/', requireAuth, async (req: Request, res: Response, next: NextFunction) => {
|
|
43
|
+
const parsed = CreateBookingSchema.safeParse(req.body)
|
|
44
|
+
if (!parsed.success) return res.status(400).json({ error: parsed.error.flatten() })
|
|
45
|
+
try {
|
|
46
|
+
const booking = await bookingService.create(parsed.data, req.user!.id)
|
|
47
|
+
res.status(201).json(booking)
|
|
48
|
+
} catch (err) {
|
|
49
|
+
next(err)
|
|
50
|
+
}
|
|
51
|
+
})
|
|
52
|
+
|
|
53
|
+
export default router
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
### Service Pattern
|
|
57
|
+
```typescript
|
|
58
|
+
// src/services/BookingService.ts
|
|
59
|
+
export class BookingService {
|
|
60
|
+
constructor(private readonly db = defaultDb) {}
|
|
61
|
+
|
|
62
|
+
async create(input: CreateBookingInput, driverId: string): Promise<Booking> {
|
|
63
|
+
const conflict = await this.db.booking.findConflict(input)
|
|
64
|
+
if (conflict) throw new ConflictError('SLOT_TAKEN')
|
|
65
|
+
return this.db.booking.create({ ...input, driverId, status: 'PENDING' })
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
### Key Conventions
|
|
71
|
+
- Dependency injection via constructor — pass mock DB in tests
|
|
72
|
+
- Error handling middleware at app level catches all thrown errors
|
|
73
|
+
- All routes go through the auth middleware before business logic
|
|
74
|
+
- Validate at route level with Zod before calling services
|
|
75
|
+
|
|
76
|
+
---
|
|
77
|
+
|
|
78
|
+
## TEST_AGENT_BODY
|
|
79
|
+
|
|
80
|
+
You write Jest/Vitest unit tests for Express.js services.
|
|
81
|
+
|
|
82
|
+
### Test Pattern
|
|
83
|
+
```typescript
|
|
84
|
+
// src/services/__tests__/BookingService.test.ts
|
|
85
|
+
import { BookingService } from '../BookingService'
|
|
86
|
+
import { ConflictError } from '../../errors'
|
|
87
|
+
|
|
88
|
+
const mockDb = {
|
|
89
|
+
booking: {
|
|
90
|
+
findConflict: jest.fn(),
|
|
91
|
+
create: jest.fn(),
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
describe('BookingService', () => {
|
|
96
|
+
const service = new BookingService(mockDb as any)
|
|
97
|
+
|
|
98
|
+
it('creates booking when no conflict', async () => {
|
|
99
|
+
mockDb.booking.findConflict.mockResolvedValue(null)
|
|
100
|
+
mockDb.booking.create.mockResolvedValue({ id: '1', status: 'PENDING' })
|
|
101
|
+
const result = await service.create({ chargePointId: 'cp1', ...}, 'user1')
|
|
102
|
+
expect(result.status).toBe('PENDING')
|
|
103
|
+
})
|
|
104
|
+
|
|
105
|
+
it('throws ConflictError when slot taken', async () => {
|
|
106
|
+
mockDb.booking.findConflict.mockResolvedValue({ id: 'existing' })
|
|
107
|
+
await expect(service.create({...}, 'user1')).rejects.toThrow(ConflictError)
|
|
108
|
+
})
|
|
109
|
+
})
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
---
|
|
113
|
+
|
|
114
|
+
## E2E_AGENT_BODY
|
|
115
|
+
|
|
116
|
+
You write Supertest integration tests for Express routes.
|
|
117
|
+
|
|
118
|
+
### E2E Pattern
|
|
119
|
+
```typescript
|
|
120
|
+
// src/routes/__tests__/bookings.integration.test.ts
|
|
121
|
+
import request from 'supertest'
|
|
122
|
+
import app from '../../app'
|
|
123
|
+
|
|
124
|
+
describe('POST /api/bookings', () => {
|
|
125
|
+
it('creates a booking', async () => {
|
|
126
|
+
const res = await request(app)
|
|
127
|
+
.post('/api/bookings')
|
|
128
|
+
.set('Authorization', `Bearer ${testToken}`)
|
|
129
|
+
.send({ chargePointId: 'cp1', startTime: '...', endTime: '...' })
|
|
130
|
+
expect(res.status).toBe(201)
|
|
131
|
+
expect(res.body.status).toBe('PENDING')
|
|
132
|
+
})
|
|
133
|
+
})
|
|
134
|
+
```
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
# Stack Template: FastAPI + SQLAlchemy + PostgreSQL
|
|
2
|
+
|
|
3
|
+
## META
|
|
4
|
+
FRAMEWORK: fastapi
|
|
5
|
+
ORM: sqlalchemy
|
|
6
|
+
PACKAGE_MANAGER: poetry (or pip)
|
|
7
|
+
LINT_CMD: ruff check . && mypy .
|
|
8
|
+
TEST_CMD: pytest --cov --cov-report=term-missing
|
|
9
|
+
E2E_CMD: pytest tests/e2e/
|
|
10
|
+
BUILD_CMD: python -m build 2>/dev/null || echo "no build step"
|
|
11
|
+
|
|
12
|
+
---
|
|
13
|
+
|
|
14
|
+
## BACKEND_AGENT_BODY
|
|
15
|
+
|
|
16
|
+
You are a senior FastAPI + SQLAlchemy engineer. You implement async API endpoints, service functions, and database queries with full type hints.
|
|
17
|
+
|
|
18
|
+
### Stack
|
|
19
|
+
- **FastAPI** with async endpoints
|
|
20
|
+
- **SQLAlchemy 2.x** — async sessions via `AsyncSession`
|
|
21
|
+
- **Pydantic v2** for request/response schemas
|
|
22
|
+
- **Alembic** for migrations
|
|
23
|
+
- **pytest** with `anyio` for async tests
|
|
24
|
+
|
|
25
|
+
### Route Pattern
|
|
26
|
+
```python
|
|
27
|
+
# app/api/v1/bookings.py
|
|
28
|
+
from fastapi import APIRouter, Depends, HTTPException, status
|
|
29
|
+
from sqlalchemy.ext.asyncio import AsyncSession
|
|
30
|
+
from app.core.db import get_async_db
|
|
31
|
+
from app.core.auth import require_driver
|
|
32
|
+
from app.schemas.booking import BookingCreate, BookingOut
|
|
33
|
+
from app.services.booking import booking_service
|
|
34
|
+
from app.models.user import User
|
|
35
|
+
|
|
36
|
+
router = APIRouter(prefix="/bookings", tags=["bookings"])
|
|
37
|
+
|
|
38
|
+
@router.post("/", response_model=BookingOut, status_code=status.HTTP_201_CREATED)
|
|
39
|
+
async def create_booking(
|
|
40
|
+
payload: BookingCreate,
|
|
41
|
+
db: AsyncSession = Depends(get_async_db),
|
|
42
|
+
current_user: User = Depends(require_driver),
|
|
43
|
+
):
|
|
44
|
+
try:
|
|
45
|
+
booking = await booking_service.create(db, payload, driver_id=current_user.id)
|
|
46
|
+
except booking_service.SlotTakenError:
|
|
47
|
+
raise HTTPException(status_code=409, detail="Slot already booked")
|
|
48
|
+
return booking
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
### Service Pattern (Dependency Injection via class)
|
|
52
|
+
```python
|
|
53
|
+
# app/services/booking.py
|
|
54
|
+
from sqlalchemy import select
|
|
55
|
+
from sqlalchemy.ext.asyncio import AsyncSession
|
|
56
|
+
from app.models.booking import Booking
|
|
57
|
+
from app.schemas.booking import BookingCreate
|
|
58
|
+
|
|
59
|
+
class SlotTakenError(Exception): ...
|
|
60
|
+
|
|
61
|
+
class BookingService:
|
|
62
|
+
async def create(self, db: AsyncSession, payload: BookingCreate, driver_id: str) -> Booking:
|
|
63
|
+
# 1. Check conflicts
|
|
64
|
+
existing = await db.scalar(select(Booking).where(...))
|
|
65
|
+
if existing:
|
|
66
|
+
raise SlotTakenError()
|
|
67
|
+
# 2. Create
|
|
68
|
+
booking = Booking(**payload.model_dump(), driver_id=driver_id, status="PENDING")
|
|
69
|
+
db.add(booking)
|
|
70
|
+
await db.commit()
|
|
71
|
+
await db.refresh(booking)
|
|
72
|
+
return booking
|
|
73
|
+
|
|
74
|
+
booking_service = BookingService()
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
### Key Conventions
|
|
78
|
+
- Pydantic schemas at `app/schemas/<domain>.py` — separate `Create`, `Update`, `Out` models
|
|
79
|
+
- SQLAlchemy models at `app/models/<domain>.py` — use `mapped_column` and `Mapped[T]`
|
|
80
|
+
- Services raise domain exceptions — routes translate to HTTP errors
|
|
81
|
+
- Always `await db.refresh(obj)` after commit to get updated fields
|
|
82
|
+
- No raw SQL — use SQLAlchemy 2.x select/insert/update
|
|
83
|
+
|
|
84
|
+
---
|
|
85
|
+
|
|
86
|
+
## FRONTEND_AGENT_BODY
|
|
87
|
+
|
|
88
|
+
FastAPI projects typically use separate frontends. If this project has a frontend, check for a separate `/frontend` directory and adapt accordingly. Otherwise, document the OpenAPI spec at `/docs` as the "frontend."
|
|
89
|
+
|
|
90
|
+
---
|
|
91
|
+
|
|
92
|
+
## TEST_AGENT_BODY
|
|
93
|
+
|
|
94
|
+
You write pytest tests for FastAPI + SQLAlchemy projects.
|
|
95
|
+
|
|
96
|
+
### Test Command
|
|
97
|
+
```bash
|
|
98
|
+
pytest --cov=app --cov-report=term-missing -x
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
### Test Pattern (async with test DB)
|
|
102
|
+
```python
|
|
103
|
+
# tests/unit/services/test_booking.py
|
|
104
|
+
import pytest
|
|
105
|
+
from unittest.mock import AsyncMock, MagicMock
|
|
106
|
+
from app.services.booking import BookingService, SlotTakenError
|
|
107
|
+
from app.schemas.booking import BookingCreate
|
|
108
|
+
|
|
109
|
+
@pytest.fixture
|
|
110
|
+
def service():
|
|
111
|
+
return BookingService()
|
|
112
|
+
|
|
113
|
+
@pytest.mark.anyio
|
|
114
|
+
async def test_create_booking_success(service):
|
|
115
|
+
mock_db = AsyncMock()
|
|
116
|
+
mock_db.scalar.return_value = None # no conflict
|
|
117
|
+
payload = BookingCreate(charge_point_id="cp_1", start_time="...", end_time="...")
|
|
118
|
+
booking = await service.create(mock_db, payload, driver_id="user_1")
|
|
119
|
+
mock_db.add.assert_called_once()
|
|
120
|
+
mock_db.commit.assert_awaited_once()
|
|
121
|
+
|
|
122
|
+
@pytest.mark.anyio
|
|
123
|
+
async def test_create_booking_conflict(service):
|
|
124
|
+
mock_db = AsyncMock()
|
|
125
|
+
mock_db.scalar.return_value = MagicMock() # existing booking
|
|
126
|
+
with pytest.raises(SlotTakenError):
|
|
127
|
+
await service.create(mock_db, ..., driver_id="user_1")
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
---
|
|
131
|
+
|
|
132
|
+
## E2E_AGENT_BODY
|
|
133
|
+
|
|
134
|
+
You write pytest-based integration/E2E tests using FastAPI's TestClient.
|
|
135
|
+
|
|
136
|
+
### E2E Pattern
|
|
137
|
+
```python
|
|
138
|
+
# tests/e2e/test_bookings_api.py
|
|
139
|
+
import pytest
|
|
140
|
+
from httpx import AsyncClient
|
|
141
|
+
from app.main import app
|
|
142
|
+
|
|
143
|
+
@pytest.mark.anyio
|
|
144
|
+
async def test_create_booking(async_client: AsyncClient, auth_headers: dict):
|
|
145
|
+
response = await async_client.post(
|
|
146
|
+
"/api/v1/bookings/",
|
|
147
|
+
json={"charge_point_id": "cp_1", "start_time": "...", "end_time": "..."},
|
|
148
|
+
headers=auth_headers,
|
|
149
|
+
)
|
|
150
|
+
assert response.status_code == 201
|
|
151
|
+
assert response.json()["status"] == "PENDING"
|
|
152
|
+
```
|