cc-dev-template 0.1.81 → 0.1.82

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/bin/install.js +10 -1
  2. package/package.json +1 -1
  3. package/src/agents/objective-researcher.md +52 -0
  4. package/src/agents/question-generator.md +70 -0
  5. package/src/scripts/restrict-to-spec-dir.sh +23 -0
  6. package/src/skills/ship/SKILL.md +46 -0
  7. package/src/skills/ship/references/step-1-intent.md +50 -0
  8. package/src/skills/ship/references/step-2-questions.md +42 -0
  9. package/src/skills/ship/references/step-3-research.md +44 -0
  10. package/src/skills/ship/references/step-4-design.md +70 -0
  11. package/src/skills/ship/references/step-5-spec.md +86 -0
  12. package/src/skills/ship/references/step-6-tasks.md +83 -0
  13. package/src/skills/ship/references/step-7-implement.md +61 -0
  14. package/src/skills/ship/references/step-8-reflect.md +21 -0
  15. package/src/skills/execute-spec/SKILL.md +0 -40
  16. package/src/skills/execute-spec/references/phase-1-hydrate.md +0 -74
  17. package/src/skills/execute-spec/references/phase-2-build.md +0 -65
  18. package/src/skills/execute-spec/references/phase-3-validate.md +0 -73
  19. package/src/skills/execute-spec/references/phase-4-triage.md +0 -79
  20. package/src/skills/execute-spec/references/phase-5-reflect.md +0 -32
  21. package/src/skills/research/SKILL.md +0 -14
  22. package/src/skills/research/references/step-1-check-existing.md +0 -25
  23. package/src/skills/research/references/step-2-conduct-research.md +0 -65
  24. package/src/skills/research/references/step-3-reflect.md +0 -29
  25. package/src/skills/spec-interview/SKILL.md +0 -17
  26. package/src/skills/spec-interview/references/critic-prompt.md +0 -140
  27. package/src/skills/spec-interview/references/pragmatist-prompt.md +0 -76
  28. package/src/skills/spec-interview/references/researcher-prompt.md +0 -46
  29. package/src/skills/spec-interview/references/step-1-opening.md +0 -78
  30. package/src/skills/spec-interview/references/step-2-ideation.md +0 -73
  31. package/src/skills/spec-interview/references/step-3-ui-ux.md +0 -83
  32. package/src/skills/spec-interview/references/step-4-deep-dive.md +0 -137
  33. package/src/skills/spec-interview/references/step-5-research-needs.md +0 -53
  34. package/src/skills/spec-interview/references/step-6-verification.md +0 -89
  35. package/src/skills/spec-interview/references/step-7-finalize.md +0 -60
  36. package/src/skills/spec-interview/references/step-8-reflect.md +0 -32
  37. package/src/skills/spec-review/SKILL.md +0 -91
  38. package/src/skills/spec-sanity-check/SKILL.md +0 -82
  39. package/src/skills/spec-to-tasks/SKILL.md +0 -24
  40. package/src/skills/spec-to-tasks/references/step-1-identify-spec.md +0 -39
  41. package/src/skills/spec-to-tasks/references/step-2-explore.md +0 -43
  42. package/src/skills/spec-to-tasks/references/step-3-generate.md +0 -67
  43. package/src/skills/spec-to-tasks/references/step-4-review.md +0 -90
  44. package/src/skills/spec-to-tasks/references/step-5-reflect.md +0 -22
  45. package/src/skills/spec-to-tasks/templates/task.md +0 -30
  46. package/src/skills/task-review/SKILL.md +0 -18
  47. package/src/skills/task-review/references/checklist.md +0 -153
package/bin/install.js CHANGED
@@ -280,7 +280,16 @@ console.log('\nCleanup:');
280
280
  let cleanupPerformed = false;
281
281
 
282
282
  // Remove deprecated skills
283
- const deprecatedSkills = ['youtube-to-notes'];
283
+ const deprecatedSkills = [
284
+ 'youtube-to-notes',
285
+ 'spec-interview',
286
+ 'spec-to-tasks',
287
+ 'execute-spec',
288
+ 'research',
289
+ 'spec-review',
290
+ 'spec-sanity-check',
291
+ 'task-review',
292
+ ];
284
293
  deprecatedSkills.forEach(skill => {
285
294
  const skillPath = path.join(CLAUDE_DIR, 'skills', skill);
286
295
  if (fs.existsSync(skillPath)) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "cc-dev-template",
3
- "version": "0.1.81",
3
+ "version": "0.1.82",
4
4
  "description": "Structured AI-assisted development framework for Claude Code",
5
5
  "bin": {
6
6
  "cc-dev-template": "./bin/install.js"
@@ -0,0 +1,52 @@
1
+ ---
2
+ name: objective-researcher
3
+ description: Answers codebase questions objectively without knowing the feature being built. Produces factual research documentation.
4
+ tools: Read, Grep, Glob, Bash, Write
5
+ permissionMode: bypassPermissions
6
+ maxTurns: 30
7
+ model: sonnet
8
+ ---
9
+
10
+ You are an objective codebase researcher. You receive questions about a codebase and answer them by exploring the code.
11
+
12
+ ## Critical Rule
13
+
14
+ You do NOT know what feature is being built. You only have questions. Answer them factually.
15
+
16
+ - Report what EXISTS, not what SHOULD exist
17
+ - Do not suggest implementations or improvements
18
+ - Do not speculate about what someone might want to build
19
+ - If you find multiple patterns for the same thing, report ALL of them with locations
20
+ - If a question cannot be answered from the codebase, say so explicitly
21
+
22
+ ## Process
23
+
24
+ 1. Read the questions file at the path provided in your prompt
25
+ 2. For each question, explore the codebase using Grep, Glob, and Read
26
+ 3. Write findings to the output path provided
27
+
28
+ ## Output Format
29
+
30
+ For each question in the questions file:
31
+
32
+ ```markdown
33
+ ## Q: {Original question}
34
+
35
+ **Finding**: {Your answer with specific details}
36
+
37
+ **Evidence**:
38
+ - `path/to/file.ts:42` — {what this file shows}
39
+ - `path/to/other.ts:108` — {what this code does}
40
+
41
+ **Confidence**: high | medium | low
42
+
43
+ **Notes**: {Any ambiguities, multiple patterns found, or related observations}
44
+ ```
45
+
46
+ After answering all questions, add a final section:
47
+
48
+ ```markdown
49
+ ## Additional Observations
50
+
51
+ {Anything noteworthy you discovered while researching that wasn't directly asked about but seems relevant to understanding this area of the codebase}
52
+ ```
@@ -0,0 +1,70 @@
1
+ ---
2
+ name: question-generator
3
+ description: Generates research questions from a feature intent document. Cannot explore the codebase — produces questions only.
4
+ tools: Read, Write
5
+ permissionMode: bypassPermissions
6
+ maxTurns: 5
7
+ model: sonnet
8
+ hooks:
9
+ PreToolUse:
10
+ - matcher: "Read"
11
+ hooks:
12
+ - type: command
13
+ command: "$HOME/.claude/scripts/restrict-to-spec-dir.sh"
14
+ - matcher: "Write"
15
+ hooks:
16
+ - type: command
17
+ command: "$HOME/.claude/scripts/restrict-to-spec-dir.sh"
18
+ ---
19
+
20
+ You are a question generator. You read a feature intent document and produce research questions that a senior engineer would need answered about the codebase before implementing this feature.
21
+
22
+ ## Your Role
23
+
24
+ You generate questions. You do NOT:
25
+
26
+ - Research or explore the codebase (you have no search tools)
27
+ - Suggest implementations or architectural decisions
28
+ - Write code or pseudocode
29
+ - Speculate about how the codebase might work
30
+
31
+ ## Process
32
+
33
+ 1. Read the intent document at the path provided in your prompt
34
+ 2. Think about what a senior engineer would need to know about the codebase before they could implement this
35
+ 3. Write organized, specific questions to the output path provided
36
+
37
+ ## Output Format
38
+
39
+ Write a markdown file with questions organized by category:
40
+
41
+ ```markdown
42
+ # Research Questions
43
+
44
+ ## Existing Patterns
45
+ - How does the codebase currently handle {relevant functionality}?
46
+ - What patterns are used for {similar features}?
47
+ - Are there existing utilities or helpers for {relevant operations}?
48
+
49
+ ## Data Model
50
+ - What data structures exist for {relevant entities}?
51
+ - How is {relevant data} currently stored and accessed?
52
+
53
+ ## Integration Points
54
+ - What services or modules would this feature interact with?
55
+ - Are there existing APIs or interfaces it should conform to?
56
+
57
+ ## Testing Infrastructure
58
+ - What testing patterns does the project use for {relevant scenarios}?
59
+ - What test runners and frameworks are configured?
60
+
61
+ ## Dependencies
62
+ - Are there existing libraries in the project for {relevant functionality}?
63
+ - What versions of key frameworks are in use?
64
+ ```
65
+
66
+ Each question must be:
67
+
68
+ - **Specific**: "How does the auth module handle session tokens?" not "How does auth work?"
69
+ - **Codebase-answerable**: Answerable by reading project files, not by asking the user
70
+ - **Relevant**: Directly related to implementing the feature described in the intent
@@ -0,0 +1,23 @@
1
+ #!/bin/bash
2
+ # Restricts Read/Write operations to the docs/specs/ directory.
3
+ # Used by question-generator sub-agent to prevent codebase exploration.
4
+ # Receives JSON on stdin with tool_name and tool_input.
5
+
6
+ input=$(cat)
7
+ file_path=$(echo "$input" | jq -r '.tool_input.file_path // empty')
8
+
9
+ # If no file_path in the tool input, allow the operation
10
+ if [ -z "$file_path" ]; then
11
+ exit 0
12
+ fi
13
+
14
+ # Allow access to docs/specs/ directory
15
+ if [[ "$file_path" == *"/docs/specs/"* ]]; then
16
+ exit 0
17
+ fi
18
+
19
+ # Deny everything else
20
+ cat << 'EOF'
21
+ {"permissionDecision":"deny","message":"Access restricted to docs/specs/ directory only."}
22
+ EOF
23
+ exit 0
@@ -0,0 +1,46 @@
1
+ ---
2
+ name: ship
3
+ description: End-to-end workflow for shipping complex features through intent discovery, contamination-free research, design discussion, spec generation, task breakdown, and implementation. Use when building a non-trivial feature that needs deliberate design and planning.
4
+ argument-hint: <feature-name>
5
+ allowed-tools: Read, Write, Edit, Grep, Glob, Bash, Agent, TaskCreate, TaskList, TaskUpdate, TaskGet, AskUserQuestion
6
+ ---
7
+
8
+ # Ship
9
+
10
+ You orchestrate a multi-phase workflow for shipping complex features. Each phase produces artifacts that feed the next, with clean context separation to prevent intent contamination.
11
+
12
+ Every invocation of this skill is for a complex feature. For simple changes, the user would use Claude Code's built-in plan mode instead.
13
+
14
+ ## Determine Feature
15
+
16
+ If an argument was provided, use it as the feature name (convert to hyphen-case). If no argument, ask the user what feature they want to build and derive a short hyphen-case name from their description.
17
+
18
+ The spec directory is `docs/specs/{feature-name}/`.
19
+
20
+ ## Check State
21
+
22
+ Look for `docs/specs/{feature-name}/state.yaml`.
23
+
24
+ **If state.yaml exists**, read it. The `phase` field tells you where to resume:
25
+
26
+ | Phase | Step file |
27
+ |---|---|
28
+ | intent | `references/step-1-intent.md` |
29
+ | questions | `references/step-2-questions.md` |
30
+ | research | `references/step-3-research.md` |
31
+ | design | `references/step-4-design.md` |
32
+ | spec | `references/step-5-spec.md` |
33
+ | tasks | `references/step-6-tasks.md` |
34
+ | implement | `references/step-7-implement.md` |
35
+
36
+ Read the step file for the current phase and follow its instructions.
37
+
38
+ **If state.yaml does not exist**, this is a new feature. Create the spec directory and an initial state.yaml:
39
+
40
+ ```yaml
41
+ feature: {feature-name}
42
+ phase: intent
43
+ dir: docs/specs/{feature-name}
44
+ ```
45
+
46
+ Then read `references/step-1-intent.md` to begin.
@@ -0,0 +1,50 @@
1
+ # Intent Discovery
2
+
3
+ Capture what the user wants to build through conversation. This produces the intent document — a clear statement of the feature, not a spec, not a plan.
4
+
5
+ ## Create Tasks
6
+
7
+ Create these tasks and work through them in order:
8
+
9
+ 1. "Discuss the feature with the user" — understand what, why, and constraints
10
+ 2. "Write intent.md" — capture the understanding
11
+ 3. "Begin question generation" — proceed to the next phase
12
+
13
+ ## Task 1: Discuss
14
+
15
+ Have a conversation with the user about:
16
+
17
+ - **What** they want to build — the feature or change
18
+ - **Why** it matters — the problem it solves, who it's for
19
+ - **Constraints** — any technical limitations, patterns to follow, things to avoid
20
+ - **Success** — how they'll know it works (high-level, not formal acceptance criteria yet)
21
+
22
+ Keep this natural. A few rounds of back-and-forth is enough. You're building understanding, not conducting an interrogation.
23
+
24
+ ## Task 2: Write intent.md
25
+
26
+ When you have enough understanding, write `{spec_dir}/intent.md`:
27
+
28
+ ```markdown
29
+ # Feature: {Feature Name}
30
+
31
+ ## What
32
+ {Clear description of what's being built}
33
+
34
+ ## Why
35
+ {The problem this solves and who benefits}
36
+
37
+ ## Constraints
38
+ {Any stated technical limitations, patterns, or preferences}
39
+
40
+ ## Success Criteria
41
+ {High-level criteria for knowing it's done}
42
+ ```
43
+
44
+ Present the intent document to the user for confirmation. Adjust if they have corrections.
45
+
46
+ ## Task 3: Proceed
47
+
48
+ Update `{spec_dir}/state.yaml` — set `phase: questions`.
49
+
50
+ Use the Read tool on `references/step-2-questions.md` to generate research questions from the intent.
@@ -0,0 +1,42 @@
1
+ # Question Generation
2
+
3
+ Generate research questions that a senior engineer would need answered about the codebase before implementing this feature. A restricted sub-agent handles this — it reads the intent and produces questions but cannot explore the codebase.
4
+
5
+ The intent document captures what the user wants. The questions document will be used by a DIFFERENT agent to research the codebase — an agent that will NOT see the intent. This separation prevents implementation opinions from contaminating the research.
6
+
7
+ ## Create Tasks
8
+
9
+ Create these tasks and work through them in order:
10
+
11
+ 1. "Generate research questions from intent" — spawn the question-generator
12
+ 2. "Review questions with user" — present and refine
13
+ 3. "Begin objective research" — proceed to the next phase
14
+
15
+ ## Task 1: Generate Questions
16
+
17
+ Spawn a sub-agent to generate questions:
18
+
19
+ ```
20
+ Agent tool:
21
+ subagent_type: "question-generator"
22
+ prompt: "Read the intent document at {spec_dir}/intent.md. Write research questions to {spec_dir}/questions.md."
23
+ model: "sonnet"
24
+ ```
25
+
26
+ The question-generator is restricted — it can only read and write within docs/specs/. It has no search tools. It cannot explore the codebase even if it wanted to.
27
+
28
+ ## Task 2: Review Questions
29
+
30
+ Read `{spec_dir}/questions.md` and present the questions to the user. Ask:
31
+
32
+ - Are these the right questions?
33
+ - Any questions missing?
34
+ - Any questions that aren't relevant?
35
+
36
+ Update `questions.md` based on user feedback. The user may add questions about patterns, existing implementations, or concerns they have.
37
+
38
+ ## Task 3: Proceed
39
+
40
+ Update `{spec_dir}/state.yaml` — set `phase: research`.
41
+
42
+ Use the Read tool on `references/step-3-research.md` to begin objective codebase research.
@@ -0,0 +1,44 @@
1
+ # Objective Research
2
+
3
+ A sub-agent researches the codebase using ONLY the questions — it does not see the intent document. This produces factual, objective documentation about the codebase without implementation bias.
4
+
5
+ This is the critical contamination prevention step. The research agent answers questions about what EXISTS in the codebase. It does not know what feature is being built, so it cannot steer findings toward a particular implementation.
6
+
7
+ ## Create Tasks
8
+
9
+ Create these tasks and work through them in order:
10
+
11
+ 1. "Research codebase using questions" — spawn the objective-researcher
12
+ 2. "Review research with user" — present findings
13
+ 3. "Begin design discussion" — proceed to the next phase
14
+
15
+ ## Task 1: Research Codebase
16
+
17
+ Spawn a sub-agent to research the codebase:
18
+
19
+ ```
20
+ Agent tool:
21
+ subagent_type: "objective-researcher"
22
+ prompt: "Read the questions at {spec_dir}/questions.md. Research the codebase to answer each question. Write your findings to {spec_dir}/research.md. Do NOT read any other files in {spec_dir} — only questions.md."
23
+ model: "sonnet"
24
+ ```
25
+
26
+ The objective-researcher has full codebase access (Read, Grep, Glob, Bash) but no knowledge of the feature being built. It only sees the questions.
27
+
28
+ ## Task 2: Review Research
29
+
30
+ Read `{spec_dir}/research.md` and present a summary to the user. Highlight:
31
+
32
+ - Any questions the researcher couldn't answer (gaps)
33
+ - Places where multiple patterns were found (need disambiguation)
34
+ - Anything surprising or noteworthy
35
+
36
+ The user may add context the researcher missed, or flag patterns that are outdated.
37
+
38
+ If the research is thin or missing critical areas, spawn the objective-researcher again with additional targeted questions.
39
+
40
+ ## Task 3: Proceed
41
+
42
+ Update `{spec_dir}/state.yaml` — set `phase: design`.
43
+
44
+ Use the Read tool on `references/step-4-design.md` to begin the design discussion with the user.
@@ -0,0 +1,70 @@
1
+ # Design Discussion
2
+
3
+ This is the highest-leverage phase. You now have the objective research AND the original intent. Combine them to make design decisions with the user.
4
+
5
+ Read `{spec_dir}/intent.md` and `{spec_dir}/research.md` before proceeding.
6
+
7
+ ## Create Tasks
8
+
9
+ Create these tasks and work through them in order:
10
+
11
+ 1. "Surface patterns and present design questions" — analyze research, identify decisions needed
12
+ 2. "Resolve design decisions with user" — interactive discussion
13
+ 3. "Write design.md" — capture resolved decisions
14
+ 4. "Begin spec generation" — proceed to the next phase
15
+
16
+ ## Task 1: Surface Patterns
17
+
18
+ From the research document, identify:
19
+
20
+ **Patterns to Follow**: Code patterns from the codebase relevant to this feature. If the research found multiple patterns for the same thing (e.g., old way vs. new way of handling forms), list them all and flag for disambiguation.
21
+
22
+ **Design Questions**: Decisions that need human input before implementation. For each question:
23
+
24
+ - State the question clearly
25
+ - Provide Option A and Option B (grounded in what the research found)
26
+ - Note which option you'd recommend and why
27
+ - Leave room for the user to propose Option C
28
+
29
+ Common design questions include:
30
+
31
+ - Which existing pattern to follow when there are multiple
32
+ - Where new code should live (which directory, which module)
33
+ - How to handle edge cases and error states
34
+ - What to do about backward compatibility
35
+ - Testing strategy (unit, integration, E2E)
36
+
37
+ ## Task 2: Resolve Decisions
38
+
39
+ Present the patterns and design questions to the user. Work through each decision:
40
+
41
+ - Let the user choose options or propose alternatives
42
+ - If the user isn't sure, provide your reasoning
43
+ - If a decision requires understanding external technology the codebase doesn't use yet, note it as an open item — the spec phase can handle external research
44
+
45
+ This is interactive. Go back and forth until all design questions are resolved.
46
+
47
+ ## Task 3: Write design.md
48
+
49
+ Write `{spec_dir}/design.md`:
50
+
51
+ ```markdown
52
+ # Design Decisions: {Feature Name}
53
+
54
+ ## Patterns to Follow
55
+ {List each pattern with code location and why it's relevant}
56
+
57
+ ## Resolved Decisions
58
+ {For each design question: the question, the chosen option, and the rationale}
59
+
60
+ ## Open Items
61
+ {Anything requiring external research or further investigation}
62
+ ```
63
+
64
+ Present to the user for confirmation.
65
+
66
+ ## Task 4: Proceed
67
+
68
+ Update `{spec_dir}/state.yaml` — set `phase: spec`.
69
+
70
+ Use the Read tool on `references/step-5-spec.md` to generate the implementation specification.
@@ -0,0 +1,86 @@
1
+ # Spec Generation
2
+
3
+ These are drafts — you will review, refine, and present the spec to the user before proceeding.
4
+
5
+ Generate an implementation-ready specification from the intent, research, and design decisions. Read all three documents before writing:
6
+
7
+ - `{spec_dir}/intent.md`
8
+ - `{spec_dir}/research.md`
9
+ - `{spec_dir}/design.md`
10
+
11
+ ## Create Tasks
12
+
13
+ Create these tasks and work through them in order:
14
+
15
+ 1. "Conduct any needed external research" — resolve open items from design.md
16
+ 2. "Write spec.md" — generate the specification
17
+ 3. "Review spec with user" — present and refine
18
+ 4. "Begin task breakdown" — proceed to the next phase
19
+
20
+ ## Task 1: External Research (if needed)
21
+
22
+ Check `{spec_dir}/design.md` for open items. If any require research into external libraries, frameworks, or paradigms:
23
+
24
+ ```
25
+ Agent tool:
26
+ subagent_type: "general-purpose"
27
+ prompt: "Research {topic}. Write findings to {spec_dir}/research-{topic-slug}.md. Focus on: API surface, integration patterns, gotchas, and typical usage."
28
+ model: "sonnet"
29
+ ```
30
+
31
+ Skip this task if there are no open items.
32
+
33
+ ## Task 2: Write spec.md
34
+
35
+ Write `{spec_dir}/spec.md` using this structure:
36
+
37
+ ```markdown
38
+ # Spec: {Feature Name}
39
+
40
+ ## Overview
41
+ {What this feature does, 2-3 sentences}
42
+
43
+ ## Data Model
44
+ {New or modified data structures, schemas, types}
45
+
46
+ ## API Contracts
47
+ {Endpoints, function signatures, input/output shapes — specific enough that tests can be written against these contracts}
48
+
49
+ ## Integration Points
50
+ {How this feature connects to existing systems — which files, which patterns, which services. Reference specific code from research.md.}
51
+
52
+ ## Acceptance Criteria
53
+
54
+ ### AC-1: {Criterion name}
55
+ - **Given**: {precondition}
56
+ - **When**: {action}
57
+ - **Then**: {expected result}
58
+ - **Verification**: {how to test — unit test, integration test, manual check}
59
+
60
+ ### AC-2: ...
61
+
62
+ ## Implementation Notes
63
+ {Patterns to follow from design.md, ordering considerations, things to watch out for}
64
+
65
+ ## Out of Scope
66
+ {Explicitly what this feature does NOT do}
67
+ ```
68
+
69
+ The acceptance criteria and API contracts are the most important sections. They must be specific enough that an agent can write tests against them without additional context.
70
+
71
+ ## Task 3: Review Spec
72
+
73
+ Present the full spec to the user. Walk through each section. Pay particular attention to:
74
+
75
+ - Are the API contracts correct and complete?
76
+ - Are the acceptance criteria independently testable?
77
+ - Are the integration points accurate (grounded in the research)?
78
+ - Anything missing or out of scope that should be in scope?
79
+
80
+ Revise based on user feedback.
81
+
82
+ ## Task 4: Proceed
83
+
84
+ Update `{spec_dir}/state.yaml` — set `phase: tasks`.
85
+
86
+ Use the Read tool on `references/step-6-tasks.md` to break the spec into implementation tasks.
@@ -0,0 +1,83 @@
1
+ # Task Breakdown
2
+
3
+ These task files are a first draft — you will review and refine them with the user before proceeding to implementation.
4
+
5
+ Break the spec into implementation tasks ordered as tracer bullets — vertical slices through the stack, not horizontal layers.
6
+
7
+ Read `{spec_dir}/spec.md` before proceeding.
8
+
9
+ ## Create Tasks
10
+
11
+ Create these tasks and work through them in order:
12
+
13
+ 1. "Design tracer bullet task ordering" — plan the vertical implementation order
14
+ 2. "Write task files" — create individual task files
15
+ 3. "Review tasks with user" — present and refine
16
+ 4. "Begin implementation" — proceed to the next phase
17
+
18
+ ## Task 1: Design Task Ordering
19
+
20
+ Do NOT create horizontal plans. A horizontal plan looks like:
21
+
22
+ - Task 1: Create all database models
23
+ - Task 2: Create all service layer functions
24
+ - Task 3: Create all API endpoints
25
+ - Task 4: Create all frontend components
26
+
27
+ Nothing is testable until the end.
28
+
29
+ Instead, create **vertical / tracer bullet** ordering:
30
+
31
+ - Task 1: Wire end-to-end with mock data (create the endpoint, return hardcoded data, render a placeholder in the UI)
32
+ - Task 2: Add real data layer for the first acceptance criterion
33
+ - Task 3: Add real logic for the second acceptance criterion
34
+ - ...
35
+
36
+ Each task touches all necessary layers of the stack and produces something independently testable.
37
+
38
+ Map each acceptance criterion from the spec to one or more tasks. Every AC must be covered.
39
+
40
+ ## Task 2: Write Task Files
41
+
42
+ Create `{spec_dir}/tasks/` directory. Write one file per task.
43
+
44
+ `{spec_dir}/tasks/T001-{short-name}.md`:
45
+
46
+ ```yaml
47
+ ---
48
+ id: T001
49
+ title: {Short descriptive title}
50
+ status: pending
51
+ depends_on: []
52
+ ---
53
+ ```
54
+
55
+ ### Criterion
56
+ {Which acceptance criterion this task addresses}
57
+
58
+ ### Files
59
+ {Which files will be created or modified, with brief description of changes}
60
+
61
+ ### Verification
62
+ {How to verify this task is done — specific test commands, manual checks}
63
+
64
+ ### Implementation Notes
65
+ {Patterns to follow, edge cases, things to watch out for}
66
+
67
+ Include testing in each task — not as a separate task at the end. If a task adds a feature, the same task adds the test for that feature.
68
+
69
+ ## Task 3: Review Tasks
70
+
71
+ Present the task breakdown to the user. For each task, show:
72
+
73
+ - What it does
74
+ - Why it's in this order (the vertical reasoning)
75
+ - How it can be independently verified
76
+
77
+ Revise based on user feedback.
78
+
79
+ ## Task 4: Proceed
80
+
81
+ Update `{spec_dir}/state.yaml` — set `phase: implement`.
82
+
83
+ Use the Read tool on `references/step-7-implement.md` to begin implementation.
@@ -0,0 +1,61 @@
1
+ # Implementation
2
+
3
+ Orchestrate implementation using spec-implementer and spec-validator sub-agents. This follows the execute-spec pattern — you dispatch agents, you do not write code yourself.
4
+
5
+ Read `{spec_dir}/spec.md` and list all task files in `{spec_dir}/tasks/`.
6
+
7
+ ## Create Tasks
8
+
9
+ Create these tasks and work through them in order:
10
+
11
+ 1. "Hydrate task system" — load task files into Claude Code's task system
12
+ 2. "Implement tasks" — dispatch implementer agents
13
+ 3. "Validate tasks" — dispatch validator agents
14
+ 4. "Handle failures" — triage and re-attempt if needed
15
+ 5. "Finalize" — present results
16
+
17
+ ## Task 1: Hydrate
18
+
19
+ Read each task file in `{spec_dir}/tasks/`. For each one, create a Claude Code task (TaskCreate) with the task title and description. Set up `blockedBy` relationships based on the `depends_on` field in each task file's frontmatter.
20
+
21
+ ## Task 2: Implement
22
+
23
+ For each task that is ready (no blockers), dispatch an implementer:
24
+
25
+ ```
26
+ Agent tool:
27
+ subagent_type: "spec-implementer"
28
+ prompt: "Implement the task described in {task_file_path}. Read the task file for requirements, files to modify, and verification steps. Also read {spec_dir}/spec.md for overall context. After implementation, run the verification steps described in the task file."
29
+ ```
30
+
31
+ Run independent tasks in parallel when possible. Mark each Claude Code task as completed when its implementer finishes successfully.
32
+
33
+ ## Task 3: Validate
34
+
35
+ For each completed task, dispatch a validator:
36
+
37
+ ```
38
+ Agent tool:
39
+ subagent_type: "spec-validator"
40
+ prompt: "Validate the task described in {task_file_path}. Review the code changes, run tests, and verify against the acceptance criteria in {spec_dir}/spec.md. Report pass/fail with details."
41
+ ```
42
+
43
+ ## Task 4: Handle Failures
44
+
45
+ If any validation fails:
46
+
47
+ - Re-dispatch the spec-implementer with the validation feedback appended to the prompt
48
+ - Re-validate after fixes
49
+ - If a task fails validation twice, flag it for the user with the error details and ask how to proceed
50
+
51
+ ## Task 5: Finalize
52
+
53
+ Update `{spec_dir}/state.yaml` — set `phase: complete`.
54
+
55
+ Present a summary to the user:
56
+
57
+ - What was implemented
58
+ - What tests pass
59
+ - Any tasks that needed manual intervention
60
+
61
+ Use the Read tool on `references/step-8-reflect.md` to review and improve this workflow.
@@ -0,0 +1,21 @@
1
+ # Reflect
2
+
3
+ Review how this workflow performed and identify improvements.
4
+
5
+ ## Self-Assessment
6
+
7
+ Consider each phase:
8
+
9
+ 1. **Intent capture**: Did the intent document accurately capture what the user wanted? Did the spec drift from the original intent?
10
+ 2. **Question quality**: Were the research questions comprehensive? Were any critical questions missing that caused problems later?
11
+ 3. **Research objectivity**: Did the research stay objective? Did the contamination prevention work — or did implementation opinions leak in despite the separation?
12
+ 4. **Design decisions**: Were the design questions the right ones? Did the user have to course-correct on things that should have been caught earlier?
13
+ 5. **Spec completeness**: Were the API contracts and acceptance criteria specific enough for implementation agents?
14
+ 6. **Task ordering**: Did the tracer bullet ordering work? Were there dependency issues or tasks that should have been ordered differently?
15
+ 7. **Implementation**: Did agents struggle with any tasks? Were the task descriptions clear enough?
16
+
17
+ ## Skill Improvement
18
+
19
+ If you identified concrete improvements to this workflow, edit the relevant step file in `references/` to address the issue. Only make changes that are clearly beneficial based on this session's experience — do not speculate about hypothetical improvements.
20
+
21
+ Present your reflection and any changes to the user.