ccsetup 1.1.1 → 1.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +144 -342
- package/bin/create-project.js +1246 -90
- package/bin/lib/claudeInterface.js +209 -0
- package/lib/aiAgentSelector.js +155 -0
- package/lib/templates/README.md +176 -0
- package/lib/templates/catalog.js +230 -0
- package/lib/templates/filter.js +257 -0
- package/lib/templates/index.js +45 -0
- package/lib/templates/metadata/agents.json +413 -0
- package/lib/templates/metadata-extractor.js +329 -0
- package/lib/templates/search.js +356 -0
- package/package.json +13 -5
- package/template/{agents → .claude/agents}/checker.md +29 -0
- package/template/.claude/settings.json +32 -0
- package/template/.claude/skills/codex-review/SKILL.md +139 -0
- package/template/.claude/skills/prd/SKILL.md +343 -0
- package/template/.claude/skills/ralph/SKILL.md +339 -0
- package/template/.claude/skills/secops/SKILL.md +259 -0
- package/template/.codex/skills/codex-review/SKILL.md +139 -0
- package/template/.codex/skills/prd/SKILL.md +343 -0
- package/template/.codex/skills/ralph/SKILL.md +339 -0
- package/template/AGENTS.md +43 -0
- package/template/CLAUDE.md +141 -21
- package/template/CONTRIBUTING.md +37 -0
- package/template/agents/README.md +15 -171
- package/template/docs/ROADMAP.md +0 -36
- package/template/docs/agent-orchestration.md +24 -141
- package/template/docs/codex-setup.md +32 -0
- package/template/hooks/codex-review/index.js +105 -0
- package/template/hooks/workflow-selector/index.js +398 -0
- package/template/scripts/codex-review/codex-review.sh +266 -0
- package/template/scripts/ralph/CLAUDE.md +174 -0
- package/template/scripts/ralph/CODEX.md +76 -0
- package/template/scripts/ralph/ralph.sh +150 -0
- package/template/tickets/ticket-list.md +17 -68
- package/template/agents/ai-engineer.md +0 -31
- package/template/agents/api-documenter.md +0 -31
- package/template/agents/architect-review.md +0 -42
- package/template/agents/backend-architect.md +0 -29
- package/template/agents/business-analyst.md +0 -34
- package/template/agents/c-pro.md +0 -34
- package/template/agents/cloud-architect.md +0 -31
- package/template/agents/code-reviewer.md +0 -28
- package/template/agents/content-marketer.md +0 -34
- package/template/agents/context-manager.md +0 -63
- package/template/agents/cpp-pro.md +0 -37
- package/template/agents/customer-support.md +0 -34
- package/template/agents/data-engineer.md +0 -31
- package/template/agents/data-scientist.md +0 -28
- package/template/agents/database-admin.md +0 -31
- package/template/agents/database-optimizer.md +0 -31
- package/template/agents/debugger.md +0 -29
- package/template/agents/deployment-engineer.md +0 -31
- package/template/agents/devops-troubleshooter.md +0 -31
- package/template/agents/dx-optimizer.md +0 -62
- package/template/agents/error-detective.md +0 -31
- package/template/agents/frontend-developer.md +0 -30
- package/template/agents/golang-pro.md +0 -31
- package/template/agents/graphql-architect.md +0 -31
- package/template/agents/incident-responder.md +0 -73
- package/template/agents/javascript-pro.md +0 -34
- package/template/agents/legacy-modernizer.md +0 -31
- package/template/agents/ml-engineer.md +0 -31
- package/template/agents/mlops-engineer.md +0 -56
- package/template/agents/mobile-developer.md +0 -31
- package/template/agents/network-engineer.md +0 -31
- package/template/agents/payment-integration.md +0 -31
- package/template/agents/performance-engineer.md +0 -31
- package/template/agents/prompt-engineer.md +0 -58
- package/template/agents/python-pro.md +0 -31
- package/template/agents/quant-analyst.md +0 -31
- package/template/agents/risk-manager.md +0 -40
- package/template/agents/rust-pro.md +0 -34
- package/template/agents/sales-automator.md +0 -34
- package/template/agents/search-specialist.md +0 -58
- package/template/agents/security-auditor.md +0 -31
- package/template/agents/sql-pro.md +0 -34
- package/template/agents/terraform-specialist.md +0 -34
- package/template/agents/test-automator.md +0 -31
- /package/template/{agents → .claude/agents}/backend.md +0 -0
- /package/template/{agents → .claude/agents}/blockchain.md +0 -0
- /package/template/{agents → .claude/agents}/coder.md +0 -0
- /package/template/{agents → .claude/agents}/frontend.md +0 -0
- /package/template/{agents → .claude/agents}/planner.md +0 -0
- /package/template/{agents → .claude/agents}/researcher.md +0 -0
- /package/template/{agents → .claude/agents}/shadcn.md +0 -0
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "ccsetup",
|
|
3
|
-
"version": "1.
|
|
4
|
-
"description": "
|
|
3
|
+
"version": "1.2.1",
|
|
4
|
+
"description": "Interactive setup for Claude Code projects with smart context scanning, merge strategies, 8 core agents, and orchestration workflows",
|
|
5
5
|
"bin": {
|
|
6
6
|
"ccsetup": "bin/create-project.js"
|
|
7
7
|
},
|
|
@@ -26,6 +26,7 @@
|
|
|
26
26
|
"homepage": "https://github.com/MrMarciaOng/ccsetup#readme",
|
|
27
27
|
"files": [
|
|
28
28
|
"bin/",
|
|
29
|
+
"lib/",
|
|
29
30
|
"template/",
|
|
30
31
|
"README.md"
|
|
31
32
|
],
|
|
@@ -34,14 +35,21 @@
|
|
|
34
35
|
},
|
|
35
36
|
"dependencies": {
|
|
36
37
|
"@inquirer/checkbox": "^4.0.4",
|
|
37
|
-
"@inquirer/select": "^4.0.4"
|
|
38
|
+
"@inquirer/select": "^4.0.4",
|
|
39
|
+
"@inquirer/confirm": "^5.0.0",
|
|
40
|
+
"@inquirer/prompts": "^7.0.0",
|
|
41
|
+
"clipboardy": "^4.0.0",
|
|
42
|
+
"minimatch": "^9.0.0"
|
|
38
43
|
},
|
|
39
44
|
"devDependencies": {
|
|
40
45
|
"jest": "^30.0.5"
|
|
41
46
|
},
|
|
42
47
|
"scripts": {
|
|
43
|
-
"test": "jest",
|
|
48
|
+
"test": "jest --testPathIgnorePatterns integration",
|
|
49
|
+
"test:integration": "jest --testPathPatterns integration",
|
|
44
50
|
"test:watch": "jest --watch",
|
|
45
|
-
"test:coverage": "jest --coverage"
|
|
51
|
+
"test:coverage": "jest --coverage",
|
|
52
|
+
"metadata:generate": "node scripts/generate-metadata.js",
|
|
53
|
+
"catalog:test": "node scripts/test-catalog.js"
|
|
46
54
|
}
|
|
47
55
|
}
|
|
@@ -66,4 +66,33 @@ Structure your findings as:
|
|
|
66
66
|
4. Document all findings with clear reproduction steps
|
|
67
67
|
5. Verify fixes and re-test as needed
|
|
68
68
|
|
|
69
|
+
## Test File Management:
|
|
70
|
+
### CRITICAL: Working Directory Rules
|
|
71
|
+
- **ALWAYS** create test files only within the project directory
|
|
72
|
+
- **NEVER** use absolute paths outside the project (e.g., /tmp, /var, ~/)
|
|
73
|
+
- **ALWAYS** use relative paths from the project root
|
|
74
|
+
- Create test files in appropriate subdirectories:
|
|
75
|
+
- `__test__/` or `__tests__/` for test files
|
|
76
|
+
- `test/` for test utilities and fixtures
|
|
77
|
+
- `spec/` for specification tests
|
|
78
|
+
- Project-specific test directories as defined
|
|
79
|
+
|
|
80
|
+
### Test File Guidelines:
|
|
81
|
+
1. **Location**: Place test files adjacent to the code being tested or in designated test directories
|
|
82
|
+
2. **Naming**: Follow project conventions (e.g., `*.test.js`, `*.spec.ts`, `test_*.py`)
|
|
83
|
+
3. **Structure**: Mirror the source code structure in test directories
|
|
84
|
+
4. **Cleanup**: Ensure test files don't pollute the project with temporary data
|
|
85
|
+
5. **Isolation**: Tests should be self-contained and not depend on external paths
|
|
86
|
+
|
|
87
|
+
### Example Test File Creation:
|
|
88
|
+
```bash
|
|
89
|
+
# GOOD - Project relative paths
|
|
90
|
+
mkdir -p __test__/unit
|
|
91
|
+
echo "test content" > __test__/unit/example.test.js
|
|
92
|
+
|
|
93
|
+
# BAD - External paths (NEVER DO THIS)
|
|
94
|
+
# echo "test" > /tmp/test.js
|
|
95
|
+
# mkdir ~/test-files
|
|
96
|
+
```
|
|
97
|
+
|
|
69
98
|
Be thorough but practical - focus on issues that impact functionality, security, or maintainability.
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
{
|
|
2
|
+
"permissions": {
|
|
3
|
+
"allow": [
|
|
4
|
+
"Bash(osv-scanner scan:*)",
|
|
5
|
+
"Bash(osv-scanner:*)"
|
|
6
|
+
]
|
|
7
|
+
},
|
|
8
|
+
"hooks": {
|
|
9
|
+
"UserPromptSubmit": [
|
|
10
|
+
{
|
|
11
|
+
"matcher": ".*",
|
|
12
|
+
"hooks": [
|
|
13
|
+
{
|
|
14
|
+
"type": "command",
|
|
15
|
+
"command": "node $CLAUDE_PROJECT_DIR/.claude/hooks/workflow-selector/index.js"
|
|
16
|
+
}
|
|
17
|
+
]
|
|
18
|
+
}
|
|
19
|
+
],
|
|
20
|
+
"Stop": [
|
|
21
|
+
{
|
|
22
|
+
"matcher": ".*",
|
|
23
|
+
"hooks": [
|
|
24
|
+
{
|
|
25
|
+
"type": "command",
|
|
26
|
+
"command": "node $CLAUDE_PROJECT_DIR/.claude/hooks/codex-review/index.js"
|
|
27
|
+
}
|
|
28
|
+
]
|
|
29
|
+
}
|
|
30
|
+
]
|
|
31
|
+
}
|
|
32
|
+
}
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: codex-review
|
|
3
|
+
description: "Get a second-opinion review from Codex CLI. Auto-detects: plan review, implementation review (plan + code changes), or code review (just changes). Runs up to 3 feedback iterations. Triggers on: codex review, second opinion, review this plan, review my code, review implementation, validate changes, codex feedback, code review."
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Codex Review — Plan, Implementation, and Code Review
|
|
7
|
+
|
|
8
|
+
Get a review from OpenAI's Codex CLI. The script auto-detects what to review based on context:
|
|
9
|
+
|
|
10
|
+
- **Plan review** — when a plan file is provided and no git changes exist
|
|
11
|
+
- **Implementation review** — when a plan file is provided and git changes exist (validates code against the plan)
|
|
12
|
+
- **Code review** — when no plan file is provided but git changes exist
|
|
13
|
+
|
|
14
|
+
Iterates up to 3 times, refining based on feedback.
|
|
15
|
+
|
|
16
|
+
---
|
|
17
|
+
|
|
18
|
+
## The Job
|
|
19
|
+
|
|
20
|
+
1. Determine what to review based on user intent and context
|
|
21
|
+
2. Find a plan file if needed (or skip for pure code review)
|
|
22
|
+
3. Call the review script
|
|
23
|
+
4. Present feedback, iterate up to 3 times
|
|
24
|
+
|
|
25
|
+
**Important:** This skill requires the `codex` CLI to be installed (`npm install -g @openai/codex`) and an OpenAI API key configured.
|
|
26
|
+
|
|
27
|
+
---
|
|
28
|
+
|
|
29
|
+
## Step 1: Determine Review Type
|
|
30
|
+
|
|
31
|
+
Based on user intent:
|
|
32
|
+
- User says "review this plan", "second opinion" → find the plan file, pass it to the script
|
|
33
|
+
- User says "review my implementation", "validate changes", "does this match the plan" → **find the plan file and pass it to the script** (the script auto-includes git diff when changes exist, producing an implementation review)
|
|
34
|
+
- User says "review my code", "code review" → no plan file needed, run the script with no arguments
|
|
35
|
+
|
|
36
|
+
**Important:** For implementation reviews, you MUST pass the plan file path as an argument. The script uses it to compare the plan against the git diff. Without the plan file, you get a standalone code review instead.
|
|
37
|
+
|
|
38
|
+
If ambiguous, check:
|
|
39
|
+
1. Is there a recent plan file in `plans/` or `*plan*.md`?
|
|
40
|
+
2. Are there git changes (`git diff HEAD`)?
|
|
41
|
+
3. If a plan file exists and git changes exist, pass the plan file — the script auto-detects implementation review mode
|
|
42
|
+
4. If unsure, ask the user
|
|
43
|
+
|
|
44
|
+
---
|
|
45
|
+
|
|
46
|
+
## Step 2: Find the Plan (if needed)
|
|
47
|
+
|
|
48
|
+
Skip this step for pure code reviews (no plan context).
|
|
49
|
+
|
|
50
|
+
If the user provides a path argument, use that file.
|
|
51
|
+
|
|
52
|
+
Otherwise, find the most recently modified plan file:
|
|
53
|
+
1. Use Glob to search for `plans/**/*.md` and `*plan*.md`
|
|
54
|
+
2. Sort by modification time (most recent first)
|
|
55
|
+
3. Use the most recent file
|
|
56
|
+
|
|
57
|
+
If no plan file is found and one is needed, ask the user which file to review.
|
|
58
|
+
|
|
59
|
+
---
|
|
60
|
+
|
|
61
|
+
## Step 3: Review Loop (max 3 iterations)
|
|
62
|
+
|
|
63
|
+
For each iteration:
|
|
64
|
+
|
|
65
|
+
### 3a. Get Review
|
|
66
|
+
|
|
67
|
+
Run the review script using the Bash tool:
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
# With a plan file (plan review or implementation review — auto-detected)
|
|
71
|
+
bash scripts/codex-review/codex-review.sh <plan-file-path>
|
|
72
|
+
|
|
73
|
+
# Without a plan file (code review of git changes)
|
|
74
|
+
bash scripts/codex-review/codex-review.sh
|
|
75
|
+
|
|
76
|
+
# Override model
|
|
77
|
+
bash scripts/codex-review/codex-review.sh [plan-file] --model o3-mini
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### 3b. Present Feedback
|
|
81
|
+
|
|
82
|
+
Show the user the review output with an iteration counter:
|
|
83
|
+
|
|
84
|
+
```
|
|
85
|
+
## Codex Review (Iteration 1/3)
|
|
86
|
+
|
|
87
|
+
[review output]
|
|
88
|
+
|
|
89
|
+
---
|
|
90
|
+
Would you like me to update the [plan/code] based on this feedback and run another review?
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### 3c. Apply Changes
|
|
94
|
+
|
|
95
|
+
If the user wants to continue:
|
|
96
|
+
|
|
97
|
+
**For plan reviews:** Edit the plan file based on feedback, then re-review.
|
|
98
|
+
|
|
99
|
+
**For implementation/code reviews:** Fix the code based on feedback, then re-review (the git diff changes between iterations as code is updated).
|
|
100
|
+
|
|
101
|
+
If the user is satisfied, stop iterating.
|
|
102
|
+
|
|
103
|
+
---
|
|
104
|
+
|
|
105
|
+
## Step 4: Final Summary
|
|
106
|
+
|
|
107
|
+
After all iterations (or when the user stops):
|
|
108
|
+
|
|
109
|
+
```
|
|
110
|
+
## Review Complete (N/3 iterations)
|
|
111
|
+
|
|
112
|
+
### Changes Made
|
|
113
|
+
- [bullet list of improvements applied]
|
|
114
|
+
|
|
115
|
+
### Remaining Suggestions (not applied)
|
|
116
|
+
- [any suggestions the user chose to skip]
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
---
|
|
120
|
+
|
|
121
|
+
## Error Handling
|
|
122
|
+
|
|
123
|
+
Handle script exit codes:
|
|
124
|
+
- **Exit 1** — codex CLI not installed: "Install Codex CLI with `npm install -g @openai/codex`"
|
|
125
|
+
- **Exit 1** — nothing to review: "No plan file or git changes found. Provide a plan file or make some code changes first."
|
|
126
|
+
- **Exit 2** — Auth error: "Check your OpenAI API key configuration"
|
|
127
|
+
- **Exit 3** — Timeout: "Review timed out. Try a shorter plan or run again"
|
|
128
|
+
|
|
129
|
+
---
|
|
130
|
+
|
|
131
|
+
## Checklist
|
|
132
|
+
|
|
133
|
+
Before running:
|
|
134
|
+
- [ ] If reviewing a plan: plan file exists and has content
|
|
135
|
+
- [ ] If reviewing code: there are git changes to review
|
|
136
|
+
- [ ] `codex` CLI is available (the script checks this)
|
|
137
|
+
- [ ] Present iteration count clearly (1/3, 2/3, 3/3)
|
|
138
|
+
- [ ] After each iteration, ask user before continuing
|
|
139
|
+
- [ ] Stop after 3 iterations or user satisfaction
|
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: prd
|
|
3
|
+
description: "Generate a Product Requirements Document (PRD) for a new feature. Use when planning a feature, starting a new project, or when asked to create a PRD. Triggers on: create a prd, write prd for, plan this feature, requirements for, spec out."
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# PRD Generator
|
|
7
|
+
|
|
8
|
+
Create detailed Product Requirements Documents that are clear, actionable, and grounded in the actual codebase.
|
|
9
|
+
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## The Job
|
|
13
|
+
|
|
14
|
+
1. **Scan the codebase** to understand tech stack, architecture, and existing patterns
|
|
15
|
+
2. Receive a feature description from the user
|
|
16
|
+
3. Ask 3-5 **codebase-informed** clarifying questions (with lettered options)
|
|
17
|
+
4. Generate a structured PRD that references real files, components, and patterns
|
|
18
|
+
5. Save to `tasks/prd-[feature-name].md`
|
|
19
|
+
|
|
20
|
+
**Important:** Do NOT start implementing. Just create the PRD.
|
|
21
|
+
|
|
22
|
+
---
|
|
23
|
+
|
|
24
|
+
## Step 1: Codebase Reconnaissance
|
|
25
|
+
|
|
26
|
+
Before asking the user anything, silently scan the project using Claude Code tools:
|
|
27
|
+
|
|
28
|
+
### Detect Tech Stack
|
|
29
|
+
Use Glob and Read to find and inspect:
|
|
30
|
+
- `package.json`, `tsconfig.json`, `next.config.*` (Node/TypeScript/Next.js)
|
|
31
|
+
- `requirements.txt`, `pyproject.toml`, `setup.py` (Python)
|
|
32
|
+
- `go.mod` (Go)
|
|
33
|
+
- `Cargo.toml` (Rust)
|
|
34
|
+
- `Gemfile` (Ruby)
|
|
35
|
+
- `composer.json` (PHP)
|
|
36
|
+
|
|
37
|
+
Record: language, framework, package manager, key dependencies.
|
|
38
|
+
|
|
39
|
+
### Detect Quality Checks
|
|
40
|
+
From the config files found above, identify:
|
|
41
|
+
- **Typecheck**: `tsconfig.json` exists → "Typecheck passes"
|
|
42
|
+
- **Linter**: `eslint`, `biome`, `ruff`, `golangci-lint` in deps or config → "Lint passes"
|
|
43
|
+
- **Test framework**: `jest`, `vitest`, `pytest`, `go test` → "Tests pass"
|
|
44
|
+
- **Build**: check `scripts` in package.json for `build`, `check`, etc.
|
|
45
|
+
|
|
46
|
+
These become the **default quality criteria** appended to every user story.
|
|
47
|
+
|
|
48
|
+
### Scan Architecture
|
|
49
|
+
Use Glob to map the project structure:
|
|
50
|
+
- `src/**/*.{ts,tsx,js,jsx,py,go,rs}` — source file layout
|
|
51
|
+
- `**/schema.prisma`, `**/migrations/**`, `**/models/**` — database layer
|
|
52
|
+
- `**/api/**`, `**/routes/**`, `**/app/**/route.*` — API endpoints
|
|
53
|
+
- `**/components/**` — UI components
|
|
54
|
+
- `**/hooks/**`, `**/utils/**`, `**/lib/**` — shared utilities
|
|
55
|
+
|
|
56
|
+
### Read Project Context
|
|
57
|
+
- Read `CLAUDE.md` (root and any nested) for project instructions and conventions
|
|
58
|
+
- Read existing PRDs in `tasks/prd-*.md` for format consistency
|
|
59
|
+
- Read `docs/ROADMAP.md` if it exists for current priorities
|
|
60
|
+
|
|
61
|
+
### Build Context Summary
|
|
62
|
+
Compile findings into an internal context block (do not show to user):
|
|
63
|
+
```
|
|
64
|
+
Tech: [framework] + [language] + [key deps]
|
|
65
|
+
Quality: [typecheck] [lint] [test] [build commands]
|
|
66
|
+
DB: [ORM/schema location]
|
|
67
|
+
API: [routing pattern, e.g., "Next.js App Router at app/api/"]
|
|
68
|
+
UI: [component library, e.g., "shadcn/ui in src/components/ui/"]
|
|
69
|
+
Existing patterns: [key files and conventions discovered]
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
---
|
|
73
|
+
|
|
74
|
+
## Step 2: Clarifying Questions
|
|
75
|
+
|
|
76
|
+
Ask only critical questions where the initial prompt is ambiguous. **Tailor questions to what you found in the codebase** — don't ask about tech stack if you already know it.
|
|
77
|
+
|
|
78
|
+
Focus on:
|
|
79
|
+
|
|
80
|
+
- **Problem/Goal:** What problem does this solve?
|
|
81
|
+
- **Core Functionality:** What are the key actions?
|
|
82
|
+
- **Scope/Boundaries:** What should it NOT do?
|
|
83
|
+
- **Success Criteria:** How do we know it's done?
|
|
84
|
+
|
|
85
|
+
### Format Questions Like This:
|
|
86
|
+
|
|
87
|
+
```
|
|
88
|
+
1. What is the primary goal of this feature?
|
|
89
|
+
A. Improve user onboarding experience
|
|
90
|
+
B. Increase user retention
|
|
91
|
+
C. Reduce support burden
|
|
92
|
+
D. Other: [please specify]
|
|
93
|
+
|
|
94
|
+
2. Who is the target user?
|
|
95
|
+
A. New users only
|
|
96
|
+
B. Existing users only
|
|
97
|
+
C. All users
|
|
98
|
+
D. Admin users only
|
|
99
|
+
|
|
100
|
+
3. What is the scope?
|
|
101
|
+
A. Minimal viable version
|
|
102
|
+
B. Full-featured implementation
|
|
103
|
+
C. Just the backend/API
|
|
104
|
+
D. Just the UI
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
This lets users respond with "1A, 2C, 3B" for quick iteration.
|
|
108
|
+
|
|
109
|
+
### Codebase-Informed Questions
|
|
110
|
+
|
|
111
|
+
If your scan revealed relevant context, weave it into the questions:
|
|
112
|
+
|
|
113
|
+
- Found a database schema → "I see you have a `users` table with [fields]. Should this feature extend that table or create a new one?"
|
|
114
|
+
- Found existing components → "You already have a `DataTable` component in `src/components/ui/`. Should this feature reuse it?"
|
|
115
|
+
- Found API patterns → "Your API routes follow [pattern]. Should this feature add new routes under the same structure?"
|
|
116
|
+
|
|
117
|
+
Skip questions the codebase already answers. If the tech stack is clear, don't ask "What framework are you using?"
|
|
118
|
+
|
|
119
|
+
---
|
|
120
|
+
|
|
121
|
+
## Step 3: PRD Structure
|
|
122
|
+
|
|
123
|
+
Generate the PRD with these sections:
|
|
124
|
+
|
|
125
|
+
### 1. Introduction/Overview
|
|
126
|
+
Brief description of the feature and the problem it solves.
|
|
127
|
+
|
|
128
|
+
### 2. Tech Context
|
|
129
|
+
Auto-generated from your codebase scan. Include:
|
|
130
|
+
- **Stack:** Language, framework, key dependencies
|
|
131
|
+
- **Quality gates:** Which checks apply (typecheck, lint, test, build)
|
|
132
|
+
- **Relevant existing code:** File paths and patterns the feature should build on
|
|
133
|
+
|
|
134
|
+
This section helps implementers (human or AI) understand the codebase without re-scanning.
|
|
135
|
+
|
|
136
|
+
### 3. Goals
|
|
137
|
+
Specific, measurable objectives (bullet list).
|
|
138
|
+
|
|
139
|
+
### 4. User Stories
|
|
140
|
+
Each story needs:
|
|
141
|
+
- **Title:** Short descriptive name
|
|
142
|
+
- **Description:** "As a [user], I want [feature] so that [benefit]"
|
|
143
|
+
- **Acceptance Criteria:** Verifiable checklist of what "done" means
|
|
144
|
+
|
|
145
|
+
Each story should be small enough to implement in one focused session.
|
|
146
|
+
|
|
147
|
+
**Format:**
|
|
148
|
+
```markdown
|
|
149
|
+
### US-001: [Title]
|
|
150
|
+
**Description:** As a [user], I want [feature] so that [benefit].
|
|
151
|
+
|
|
152
|
+
**Acceptance Criteria:**
|
|
153
|
+
- [ ] Specific verifiable criterion
|
|
154
|
+
- [ ] Another criterion
|
|
155
|
+
- [ ] {auto-detected quality checks from Step 1}
|
|
156
|
+
- [ ] **[UI stories only]** Verify in browser using dev-browser skill
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
**Important:**
|
|
160
|
+
- Acceptance criteria must be verifiable, not vague. "Works correctly" is bad. "Button shows confirmation dialog before deleting" is good.
|
|
161
|
+
- **For any story with UI changes:** Always include "Verify in browser using dev-browser skill" as acceptance criteria.
|
|
162
|
+
- **Quality criteria are auto-appended** based on Step 1 detection. Do not hardcode "Typecheck passes" — use whatever the project actually has.
|
|
163
|
+
- **Reference real file paths** when a story modifies or extends existing code (e.g., "Add column to schema in `prisma/schema.prisma`").
|
|
164
|
+
|
|
165
|
+
### 5. Functional Requirements
|
|
166
|
+
Numbered list of specific functionalities:
|
|
167
|
+
- "FR-1: The system must allow users to..."
|
|
168
|
+
- "FR-2: When a user clicks X, the system must..."
|
|
169
|
+
|
|
170
|
+
Be explicit and unambiguous.
|
|
171
|
+
|
|
172
|
+
### 6. Non-Goals (Out of Scope)
|
|
173
|
+
What this feature will NOT include. Critical for managing scope.
|
|
174
|
+
|
|
175
|
+
### 7. Design Considerations (Optional)
|
|
176
|
+
- UI/UX requirements
|
|
177
|
+
- Link to mockups if available
|
|
178
|
+
- **Existing components to reuse** (reference actual paths found in scan, e.g., "`src/components/ui/DataTable.tsx`")
|
|
179
|
+
|
|
180
|
+
### 8. Technical Considerations
|
|
181
|
+
- Known constraints or dependencies
|
|
182
|
+
- Integration points with existing systems (reference actual files/modules)
|
|
183
|
+
- Performance requirements
|
|
184
|
+
- **Dependency order:** Which stories must be completed before others (schema → backend → UI)
|
|
185
|
+
|
|
186
|
+
### 9. Success Metrics
|
|
187
|
+
How will success be measured?
|
|
188
|
+
- "Reduce time to complete X by 50%"
|
|
189
|
+
- "Increase conversion rate by 10%"
|
|
190
|
+
|
|
191
|
+
### 10. Open Questions
|
|
192
|
+
Remaining questions or areas needing clarification.
|
|
193
|
+
|
|
194
|
+
---
|
|
195
|
+
|
|
196
|
+
## Writing for AI Agents and Junior Developers
|
|
197
|
+
|
|
198
|
+
The PRD reader may be a junior developer or an autonomous AI agent (like Ralph). Therefore:
|
|
199
|
+
|
|
200
|
+
- Be explicit and unambiguous
|
|
201
|
+
- Reference actual file paths, not abstract descriptions
|
|
202
|
+
- Provide enough detail to understand purpose and core logic
|
|
203
|
+
- Number requirements for easy reference
|
|
204
|
+
- Use concrete examples where helpful
|
|
205
|
+
- Include the quality gates the project actually uses, not generic ones
|
|
206
|
+
|
|
207
|
+
---
|
|
208
|
+
|
|
209
|
+
## Output
|
|
210
|
+
|
|
211
|
+
- **Format:** Markdown (`.md`)
|
|
212
|
+
- **Location:** `tasks/`
|
|
213
|
+
- **Filename:** `prd-[feature-name].md` (kebab-case)
|
|
214
|
+
|
|
215
|
+
---
|
|
216
|
+
|
|
217
|
+
## Example PRD
|
|
218
|
+
|
|
219
|
+
```markdown
|
|
220
|
+
# PRD: Task Priority System
|
|
221
|
+
|
|
222
|
+
## Introduction
|
|
223
|
+
|
|
224
|
+
Add priority levels to tasks so users can focus on what matters most. Tasks can be marked as high, medium, or low priority, with visual indicators and filtering to help users manage their workload effectively.
|
|
225
|
+
|
|
226
|
+
## Tech Context
|
|
227
|
+
|
|
228
|
+
- **Stack:** Next.js 14 (App Router) + TypeScript + Tailwind CSS
|
|
229
|
+
- **DB:** Prisma with PostgreSQL (`prisma/schema.prisma`)
|
|
230
|
+
- **UI:** shadcn/ui components in `src/components/ui/`
|
|
231
|
+
- **Quality gates:** Typecheck (`tsc --noEmit`), Lint (`eslint`), Tests (`vitest`)
|
|
232
|
+
- **Relevant code:**
|
|
233
|
+
- Task model: `prisma/schema.prisma` (Task table)
|
|
234
|
+
- Task list component: `src/components/TaskList.tsx`
|
|
235
|
+
- Task card component: `src/components/TaskCard.tsx`
|
|
236
|
+
- Existing badge component: `src/components/ui/Badge.tsx`
|
|
237
|
+
- API routes: `src/app/api/tasks/`
|
|
238
|
+
|
|
239
|
+
## Goals
|
|
240
|
+
|
|
241
|
+
- Allow assigning priority (high/medium/low) to any task
|
|
242
|
+
- Provide clear visual differentiation between priority levels
|
|
243
|
+
- Enable filtering and sorting by priority
|
|
244
|
+
- Default new tasks to medium priority
|
|
245
|
+
|
|
246
|
+
## User Stories
|
|
247
|
+
|
|
248
|
+
### US-001: Add priority field to database
|
|
249
|
+
**Description:** As a developer, I need to store task priority so it persists across sessions.
|
|
250
|
+
|
|
251
|
+
**Acceptance Criteria:**
|
|
252
|
+
- [ ] Add `priority` enum ('high' | 'medium' | 'low', default 'medium') to Task model in `prisma/schema.prisma`
|
|
253
|
+
- [ ] Generate and run migration successfully
|
|
254
|
+
- [ ] Typecheck passes
|
|
255
|
+
- [ ] Lint passes
|
|
256
|
+
|
|
257
|
+
### US-002: Display priority indicator on task cards
|
|
258
|
+
**Description:** As a user, I want to see task priority at a glance so I know what needs attention first.
|
|
259
|
+
|
|
260
|
+
**Acceptance Criteria:**
|
|
261
|
+
- [ ] Extend `src/components/TaskCard.tsx` to show priority using existing `Badge` component from `src/components/ui/Badge.tsx`
|
|
262
|
+
- [ ] Badge colors: red=high, yellow=medium, gray=low
|
|
263
|
+
- [ ] Priority visible without hovering or clicking
|
|
264
|
+
- [ ] Typecheck passes
|
|
265
|
+
- [ ] Lint passes
|
|
266
|
+
- [ ] Verify in browser using dev-browser skill
|
|
267
|
+
|
|
268
|
+
### US-003: Add priority selector to task edit
|
|
269
|
+
**Description:** As a user, I want to change a task's priority when editing it.
|
|
270
|
+
|
|
271
|
+
**Acceptance Criteria:**
|
|
272
|
+
- [ ] Priority dropdown in task edit modal using shadcn `Select` component
|
|
273
|
+
- [ ] Shows current priority as selected
|
|
274
|
+
- [ ] Saves via existing PATCH `/api/tasks/[id]` route
|
|
275
|
+
- [ ] Typecheck passes
|
|
276
|
+
- [ ] Lint passes
|
|
277
|
+
- [ ] Verify in browser using dev-browser skill
|
|
278
|
+
|
|
279
|
+
### US-004: Filter tasks by priority
|
|
280
|
+
**Description:** As a user, I want to filter the task list to see only high-priority items when I'm focused.
|
|
281
|
+
|
|
282
|
+
**Acceptance Criteria:**
|
|
283
|
+
- [ ] Add filter dropdown to `src/components/TaskList.tsx` with options: All | High | Medium | Low
|
|
284
|
+
- [ ] Filter persists in URL search params
|
|
285
|
+
- [ ] Empty state message when no tasks match filter
|
|
286
|
+
- [ ] Typecheck passes
|
|
287
|
+
- [ ] Lint passes
|
|
288
|
+
- [ ] Tests pass
|
|
289
|
+
- [ ] Verify in browser using dev-browser skill
|
|
290
|
+
|
|
291
|
+
## Functional Requirements
|
|
292
|
+
|
|
293
|
+
- FR-1: Add `priority` field to Task model in `prisma/schema.prisma` ('high' | 'medium' | 'low', default 'medium')
|
|
294
|
+
- FR-2: Display colored priority badge on each task card via `Badge` component
|
|
295
|
+
- FR-3: Include priority selector in task edit modal using shadcn `Select`
|
|
296
|
+
- FR-4: Add priority filter dropdown to `TaskList` header
|
|
297
|
+
- FR-5: Sort by priority within each status column (high → medium → low)
|
|
298
|
+
|
|
299
|
+
## Non-Goals
|
|
300
|
+
|
|
301
|
+
- No priority-based notifications or reminders
|
|
302
|
+
- No automatic priority assignment based on due date
|
|
303
|
+
- No priority inheritance for subtasks
|
|
304
|
+
|
|
305
|
+
## Design Considerations
|
|
306
|
+
|
|
307
|
+
- Reuse existing `src/components/ui/Badge.tsx` with color variants
|
|
308
|
+
- Use shadcn `Select` for the filter and edit dropdowns (already in project deps)
|
|
309
|
+
|
|
310
|
+
## Technical Considerations
|
|
311
|
+
|
|
312
|
+
- Filter state managed via URL search params (Next.js `useSearchParams`)
|
|
313
|
+
- Priority stored in database, not computed
|
|
314
|
+
- **Dependency order:** US-001 (schema) → US-002 (display) → US-003 (edit) → US-004 (filter)
|
|
315
|
+
|
|
316
|
+
## Success Metrics
|
|
317
|
+
|
|
318
|
+
- Users can change priority in under 2 clicks
|
|
319
|
+
- High-priority tasks immediately visible at top of lists
|
|
320
|
+
- No regression in task list performance
|
|
321
|
+
|
|
322
|
+
## Open Questions
|
|
323
|
+
|
|
324
|
+
- Should priority affect task ordering within a column?
|
|
325
|
+
- Should we add keyboard shortcuts for priority changes?
|
|
326
|
+
```
|
|
327
|
+
|
|
328
|
+
---
|
|
329
|
+
|
|
330
|
+
## Checklist
|
|
331
|
+
|
|
332
|
+
Before saving the PRD:
|
|
333
|
+
|
|
334
|
+
- [ ] Ran codebase reconnaissance (Step 1)
|
|
335
|
+
- [ ] Tech Context section reflects actual project stack and quality gates
|
|
336
|
+
- [ ] Asked codebase-informed clarifying questions with lettered options
|
|
337
|
+
- [ ] Incorporated user's answers
|
|
338
|
+
- [ ] User stories reference real file paths where applicable
|
|
339
|
+
- [ ] User stories are small and specific (one focused session each)
|
|
340
|
+
- [ ] Quality criteria match what the project actually uses (not hardcoded)
|
|
341
|
+
- [ ] Functional requirements are numbered and unambiguous
|
|
342
|
+
- [ ] Non-goals section defines clear boundaries
|
|
343
|
+
- [ ] Saved to `tasks/prd-[feature-name].md`
|