context-engineer 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/README.md +88 -0
  2. package/bin/cli.mjs +91 -0
  3. package/lib/copy.mjs +102 -0
  4. package/lib/init.mjs +166 -0
  5. package/lib/prompts.mjs +144 -0
  6. package/lib/update.mjs +198 -0
  7. package/package.json +35 -0
  8. package/templates/checksums.json +68 -0
  9. package/templates/claude/.claude/rules/context-maintenance.md +38 -0
  10. package/templates/claude/.claude/rules/experience-capture.md +46 -0
  11. package/templates/claude/.claude/settings.project.json +22 -0
  12. package/templates/claude/.claude/skills/bootstrap/SKILL.md +223 -0
  13. package/templates/claude/.claude/skills/dev/SKILL.md +119 -0
  14. package/templates/claude/.claude/skills/dev-capture/SKILL.md +111 -0
  15. package/templates/claude/.claude/skills/dev-commit/SKILL.md +90 -0
  16. package/templates/claude/.claude/skills/dev-decompose/SKILL.md +113 -0
  17. package/templates/claude/.claude/skills/dev-deps/SKILL.md +108 -0
  18. package/templates/claude/.claude/skills/dev-execute/SKILL.md +196 -0
  19. package/templates/claude/.claude/skills/dev-prd/SKILL.md +100 -0
  20. package/templates/claude/.claude/skills/dev-quality/SKILL.md +109 -0
  21. package/templates/claude/.claude/skills/dev-requirements/SKILL.md +75 -0
  22. package/templates/claude/.claude/skills/review-context/SKILL.md +120 -0
  23. package/templates/claude/.claude/skills/sync/SKILL.md +107 -0
  24. package/templates/claude/.claude/skills/update-context/SKILL.md +105 -0
  25. package/templates/claude/.claude/workflow/agents/implementer.md +65 -0
  26. package/templates/claude/.claude/workflow/agents/reviewer.md +96 -0
  27. package/templates/claude/.claude/workflow/agents/team-config.md +97 -0
  28. package/templates/claude/.claude/workflow/agents/tester.md +98 -0
  29. package/templates/claude/.claude/workflow/interfaces/phase-contract.md +157 -0
  30. package/templates/claude/CLAUDE.md +50 -0
  31. package/templates/core/.context/_meta/concepts.md +9 -0
  32. package/templates/core/.context/_meta/drift-report.md +16 -0
  33. package/templates/core/.context/_meta/last-sync.json +6 -0
  34. package/templates/core/.context/_meta/schema.md +242 -0
  35. package/templates/core/.context/architecture/api-surface.md +52 -0
  36. package/templates/core/.context/architecture/class-index.md +49 -0
  37. package/templates/core/.context/architecture/data-flow.md +103 -0
  38. package/templates/core/.context/architecture/data-model.md +35 -0
  39. package/templates/core/.context/architecture/decisions/001-template.md +35 -0
  40. package/templates/core/.context/architecture/dependencies.md +35 -0
  41. package/templates/core/.context/architecture/infrastructure.md +42 -0
  42. package/templates/core/.context/architecture/module-graph.md +68 -0
  43. package/templates/core/.context/architecture/overview.md +87 -0
  44. package/templates/core/.context/business/domain-model.md +43 -0
  45. package/templates/core/.context/business/glossary.md +23 -0
  46. package/templates/core/.context/business/overview.md +29 -0
  47. package/templates/core/.context/business/workflows.md +61 -0
  48. package/templates/core/.context/constitution.md +84 -0
  49. package/templates/core/.context/conventions/code-style.md +47 -0
  50. package/templates/core/.context/conventions/error-handling.md +50 -0
  51. package/templates/core/.context/conventions/git.md +46 -0
  52. package/templates/core/.context/conventions/patterns.md +41 -0
  53. package/templates/core/.context/conventions/testing.md +49 -0
  54. package/templates/core/.context/experience/debugging.md +21 -0
  55. package/templates/core/.context/experience/incidents.md +26 -0
  56. package/templates/core/.context/experience/lessons.md +23 -0
  57. package/templates/core/.context/experience/performance.md +29 -0
  58. package/templates/core/.context/index.md +93 -0
  59. package/templates/core/.context/progress/backlog.md +23 -0
  60. package/templates/core/.context/progress/status.md +30 -0
  61. package/templates/core/.context/workflow/artifacts/.gitkeep +0 -0
  62. package/templates/core/.context/workflow/config.md +35 -0
  63. package/templates/core/AGENTS.md +53 -0
  64. package/templates/core/scripts/compact-experience.sh +83 -0
  65. package/templates/core/scripts/detect-drift.sh +388 -0
  66. package/templates/core/scripts/extract-structure.sh +757 -0
  67. package/templates/core/scripts/sync-context.sh +510 -0
  68. package/templates/cursor/.cursor/rules/always.mdc +18 -0
  69. package/templates/cursor/.cursor/rules/backend.mdc +16 -0
  70. package/templates/cursor/.cursor/rules/database.mdc +16 -0
  71. package/templates/cursor/.cursor/rules/frontend.mdc +13 -0
  72. package/templates/cursor/.cursorrules +23 -0
  73. package/templates/github/.github/copilot-instructions.md +15 -0
  74. package/templates/github/.github/workflows/context-drift.yml +73 -0
@@ -0,0 +1,196 @@
1
+ # /dev-execute — P4: Agent Execution
2
+
3
+ > Orchestrates an agent team (Implementer → Reviewer → Tester) to execute tasks according to the dependency graph, managing parallelism, isolation, and collaboration patterns.
4
+
5
+ ## Input
6
+ - `.context/workflow/artifacts/dep-graph.json` (from P3)
7
+ - `.context/workflow/artifacts/tasks.json` (from P2)
8
+ - `.claude/workflow/agents/team-config.md` — team composition and patterns
9
+ - `.claude/workflow/agents/*.md` — agent role definitions
10
+ - Context: Per-task `context_files` from tasks.json + `.context/conventions/*`
11
+
12
+ ## Output
13
+ - `.context/workflow/artifacts/execution-log.md`
14
+ - Actual code changes in the repository
15
+
16
+ ## Instructions
17
+
18
+ ### Step 1: Load Configuration
19
+
20
+ 1. Read `.context/workflow/artifacts/dep-graph.json` — parallel groups and isolation strategy
21
+ 2. Read `.context/workflow/artifacts/tasks.json` — full task specifications
22
+ 3. Read `.claude/workflow/agents/team-config.md` — team patterns and task type mapping
23
+ 4. Read `.context/workflow/config.md` for `max_parallel_agents` (default: 3)
24
+
25
+ ### Step 2: Load Agent Role Definitions
26
+
27
+ Read the following agent role files:
28
+ - `.claude/workflow/agents/implementer.md`
29
+ - `.claude/workflow/agents/reviewer.md`
30
+ - `.claude/workflow/agents/tester.md`
31
+
32
+ These are prompt templates that define how each agent behaves.
33
+
34
+ ### Step 3: Prepare Execution Log
35
+
36
+ Create `.context/workflow/artifacts/execution-log.md`:
37
+
38
+ ```markdown
39
+ # Execution Log
40
+
41
+ - **Feature**: [feature name from tasks.json]
42
+ - **Started**: [ISO-8601 timestamp]
43
+ - **Total tasks**: [count]
44
+ - **Total groups**: [count]
45
+ - **Team pattern**: using agent team (implementer/reviewer/tester)
46
+
47
+ ## Execution Progress
48
+ ```
49
+
50
+ ### Step 4: Execute Groups in Order
51
+
52
+ For each group in `dep-graph.json` (ordered by `order` field):
53
+
54
+ #### 4a. Determine Team Pattern for Each Task
55
+
56
+ For each task in the group, look up the team pattern using the mapping in `team-config.md`:
57
+ - Match on `task.type` + `task.complexity`
58
+ - Result: `default`, `lite`, or `implement-only`
59
+
60
+ #### 4b. Prepare Context for Agents
61
+
62
+ For each task, read the shared context that all agents for this task will receive:
63
+ 1. Project conventions: `.context/conventions/code-style.md`, `.context/conventions/patterns.md`
64
+ 2. Testing conventions: `.context/conventions/testing.md`
65
+ 3. Task-specific context: each file in the task's `context_files` array
66
+
67
+ #### 4c. Execute Task Team (per task)
68
+
69
+ For each task, run the agent team according to the determined pattern:
70
+
71
+ **Pattern: `default` (implement → review → test)**
72
+
73
+ ```
74
+ Round 0:
75
+ 1. IMPLEMENTER AGENT
76
+ Prompt = implementer.md role + task spec + context + conventions
77
+ Launch: Agent(prompt=..., subagent_type="general-purpose", isolation=per dep-graph)
78
+ → Code changes produced
79
+
80
+ 2. REVIEWER AGENT
81
+ Prompt = reviewer.md role + task spec + git diff of implementer's changes
82
+ Launch: Agent(prompt=..., subagent_type="general-purpose")
83
+ → Decision: APPROVED or REVISE with feedback
84
+
85
+ 3. If REVISE and round < 2:
86
+ IMPLEMENTER AGENT (revision)
87
+ Prompt = implementer.md role + task spec + reviewer feedback
88
+ Launch: Agent(prompt=..., subagent_type="general-purpose")
89
+ → Go to step 2 (round + 1)
90
+
91
+ 4. If REVISE and round >= 2:
92
+ Log "Review escalation needed" → pause for human
93
+
94
+ 5. If APPROVED:
95
+ TESTER AGENT
96
+ Prompt = tester.md role + task spec + current code state
97
+ Launch: Agent(prompt=..., subagent_type="general-purpose")
98
+ → Test results: PASS or FAIL
99
+ ```
100
+
101
+ **Pattern: `lite` (implement → test)**
102
+
103
+ ```
104
+ 1. IMPLEMENTER AGENT (same as above)
105
+ 2. TESTER AGENT (same as step 5 above)
106
+ ```
107
+
108
+ **Pattern: `implement-only`**
109
+
110
+ ```
111
+ 1. IMPLEMENTER AGENT (same as above)
112
+ ```
113
+
114
+ #### 4d. Parallelism Within Groups
115
+
116
+ Tasks within the same group can run in parallel, but the agent team for each task runs **sequentially** (implementer → reviewer → tester).
117
+
118
+ **How to parallelize:**
119
+ - For tasks with `isolation: "none"` — launch Implementer agents for all tasks in parallel
120
+ - Wait for all Implementers to complete
121
+ - Launch Reviewer agents in parallel (each reviewing its own task's changes)
122
+ - Wait for all Reviewers to complete
123
+ - Handle any REVISE loops
124
+ - Launch Tester agents in parallel
125
+ - Wait for all Testers to complete
126
+
127
+ **For tasks with `isolation: "worktree"`:**
128
+ - Launch the full team pipeline inside the worktree (the Implementer agent uses `isolation: "worktree"`)
129
+ - Reviewer and Tester run in the same worktree context
130
+ - After the full team pipeline completes, merge the worktree branch
131
+
132
+ Respect `max_parallel_agents` — if a group has more tasks than the limit, split into sub-batches.
133
+
134
+ #### 4e. Post-Group Validation
135
+
136
+ After all tasks in a group complete:
137
+ 1. Run a quick build check using the build command from `.context/constitution.md`
138
+ 2. If build fails:
139
+ - Attempt one auto-fix (read error, fix, rebuild)
140
+ - If still fails → stop and report
141
+ 3. If worktree branches were used → merge them into the current branch
142
+ - If merge conflict → stop, report conflict details, ask user
143
+
144
+ #### 4f. Update Execution Log
145
+
146
+ Append to `execution-log.md` for each task:
147
+
148
+ ```markdown
149
+ ### Group [N]
150
+ - **Status**: [completed | failed | partial]
151
+
152
+ #### T[id]: [title]
153
+ - **Pattern**: [default | lite | implement-only]
154
+ - **Status**: [completed | failed]
155
+ - **Isolation**: [none | worktree]
156
+
157
+ ##### Implementer
158
+ - **Files changed**: [list]
159
+ - **Summary**: [agent's summary]
160
+
161
+ ##### Reviewer (if applicable)
162
+ - **Decision**: [APPROVED | REVISE]
163
+ - **Rounds**: [1 | 2]
164
+ - **Issues found**: [count]
165
+
166
+ ##### Tester (if applicable)
167
+ - **Tests written**: [count]
168
+ - **Tests passed**: [count]
169
+ - **Tests failed**: [count]
170
+ - **Overall**: [PASS | FAIL]
171
+ ```
172
+
173
+ ### Step 5: Handle Failures
174
+
175
+ - **Implementer failure**: Log error, mark task failed. Dependent tasks in later groups cannot proceed.
176
+ - **Reviewer escalation** (2 rounds exceeded): Pause, show reviewer's feedback, ask human to decide.
177
+ - **Tester failure** (tests fail): Log failing tests. Do NOT auto-loop back to Implementer. Report and continue with next task (the quality gate in P5 will catch this).
178
+ - **Build failure after group**: Attempt one fix. If fails, stop and report.
179
+ - **Merge conflict**: Stop, show details, ask user.
180
+
181
+ ### Step 6: Finalize
182
+
183
+ After all groups complete, update execution log:
184
+
185
+ ```markdown
186
+ ## Summary
187
+ - **Completed**: [ISO-8601 timestamp]
188
+ - **Tasks completed**: [N/total]
189
+ - **Tasks failed**: [N]
190
+ - **Review rounds total**: [N]
191
+ - **Tests written**: [N]
192
+ - **Tests passed**: [N]
193
+ - **Build status**: [pass/fail]
194
+ ```
195
+
196
+ Show the user a summary of execution results.
@@ -0,0 +1,100 @@
1
+ # /dev-prd — P1: PRD Generation
2
+
3
+ > Generates a Product Requirements Document from structured requirements and project context.
4
+
5
+ ## Input
6
+ - `.context/workflow/artifacts/requirements.md` (from P0)
7
+ - Context: `.context/business/*`, `.context/architecture/overview.md`, `.context/architecture/api-surface.md`
8
+
9
+ ## Output
10
+ - `.context/workflow/artifacts/prd.md`
11
+
12
+ ## Instructions
13
+
14
+ ### Step 1: Load Inputs
15
+
16
+ 1. Read `.context/workflow/artifacts/requirements.md` — the structured requirements from P0
17
+ 2. Read the following context files (skip any that don't exist):
18
+ - `.context/business/overview.md` — product vision and positioning
19
+ - `.context/business/domain-model.md` — domain entities and relationships
20
+ - `.context/business/workflows.md` — existing business processes
21
+ - `.context/architecture/overview.md` — current system architecture
22
+ - `.context/architecture/api-surface.md` — existing API endpoints
23
+ - `.context/architecture/data-model.md` — current data model
24
+
25
+ ### Step 2: Analyze Requirements in Context
26
+
27
+ Consider:
28
+ - How does this feature fit into the existing architecture?
29
+ - Which existing modules/components are affected?
30
+ - Are there existing patterns that should be followed?
31
+ - Are there potential conflicts with current functionality?
32
+ - What data model changes might be needed?
33
+ - What API changes are needed?
34
+
35
+ ### Step 3: Generate PRD
36
+
37
+ Write `.context/workflow/artifacts/prd.md` with this structure:
38
+
39
+ ```markdown
40
+ # PRD: [Feature Name]
41
+
42
+ ## Overview
43
+ [2-3 sentence description of the feature, its purpose, and expected impact]
44
+
45
+ ## Background
46
+ [Why this feature is needed — business context, user pain points, strategic alignment]
47
+
48
+ ## Goals & Success Criteria
49
+ - [ ] [Measurable goal 1]
50
+ - [ ] [Measurable goal 2]
51
+
52
+ ## Functional Requirements
53
+
54
+ ### [Requirement Group 1]
55
+ - **FR-1.1**: [Detailed requirement]
56
+ - Acceptance: [How to verify this is done correctly]
57
+ - **FR-1.2**: [Detailed requirement]
58
+ - Acceptance: [How to verify]
59
+
60
+ ### [Requirement Group 2]
61
+ - **FR-2.1**: [Detailed requirement]
62
+ - Acceptance: [How to verify]
63
+
64
+ ## Non-Functional Requirements
65
+ - **Performance**: [Specific targets, e.g., "< 200ms response time"]
66
+ - **Security**: [Security considerations]
67
+ - **Scalability**: [Scale expectations]
68
+ - **Compatibility**: [Browser, OS, API version requirements]
69
+
70
+ ## Technical Design Notes
71
+ - **Affected modules**: [List of modules/components that need changes]
72
+ - **Data model changes**: [New entities, modified fields, migrations]
73
+ - **API changes**: [New endpoints, modified endpoints]
74
+ - **Dependencies**: [New libraries or services needed]
75
+
76
+ ## Scope Boundaries
77
+ ### In Scope
78
+ - [Explicit list of what's included]
79
+
80
+ ### Out of Scope
81
+ - [Explicit list of what's NOT included]
82
+
83
+ ## Risks & Mitigations
84
+ | Risk | Likelihood | Impact | Mitigation |
85
+ |------|-----------|--------|------------|
86
+ | [Risk 1] | [H/M/L] | [H/M/L] | [How to mitigate] |
87
+
88
+ ## Open Questions
89
+ - [ ] [Question that needs resolution]
90
+ ```
91
+
92
+ ### Step 4: Validate
93
+
94
+ - Ensure every user story from requirements.md is covered by at least one functional requirement
95
+ - Ensure acceptance criteria are testable and specific
96
+ - Flag any requirements that seem contradictory or incomplete
97
+
98
+ If running independently (not through `/dev` orchestrator):
99
+ - Present the PRD to the user for review
100
+ - Incorporate feedback and update the artifact
@@ -0,0 +1,109 @@
1
+ # /dev-quality — P5: Quality Gate
2
+
3
+ > Runs quality checks on code changes: build, lint, test. Reports pass/fail with details.
4
+
5
+ ## Input
6
+ - Code changes from P4 (in the working tree)
7
+ - `.context/workflow/artifacts/execution-log.md` (from P4)
8
+ - Context: `.context/conventions/testing.md`, `.context/constitution.md`
9
+
10
+ ## Output
11
+ - `.context/workflow/artifacts/quality-report.md`
12
+
13
+ ## Instructions
14
+
15
+ ### Step 1: Determine Quality Commands
16
+
17
+ Read `.context/constitution.md` to find the build, test, and lint commands:
18
+ - Look for the `## Build & Test Commands` section
19
+ - Extract: BUILD_COMMAND, TEST_COMMAND, LINT_COMMAND
20
+
21
+ If commands are placeholder (`[BUILD_COMMAND]` etc.), check common patterns:
22
+ - `package.json` exists → try `npm run build`, `npm test`, `npm run lint`
23
+ - `*.csproj` exists → try `dotnet build`, `dotnet test`
24
+ - `Cargo.toml` exists → try `cargo build`, `cargo test`
25
+ - `go.mod` exists → try `go build ./...`, `go test ./...`
26
+ - `requirements.txt` / `pyproject.toml` exists → try `python -m pytest`
27
+
28
+ If no commands can be determined, skip automated checks and note this in the report.
29
+
30
+ ### Step 2: Run Quality Checks
31
+
32
+ Execute each check in order. For each check, capture stdout/stderr and exit code.
33
+
34
+ #### Check 1: Build
35
+ ```bash
36
+ [BUILD_COMMAND]
37
+ ```
38
+ - Pass: exit code 0
39
+ - Fail: capture error output
40
+
41
+ #### Check 2: Lint (if available)
42
+ ```bash
43
+ [LINT_COMMAND]
44
+ ```
45
+ - Pass: exit code 0 (or only warnings)
46
+ - Fail: capture lint errors
47
+
48
+ #### Check 3: Test
49
+ ```bash
50
+ [TEST_COMMAND]
51
+ ```
52
+ - Pass: exit code 0, all tests pass
53
+ - Fail: capture failing test names and output
54
+
55
+ ### Step 3: Handle Failures
56
+
57
+ If any check fails:
58
+
59
+ 1. **First attempt**: Analyze the error output. If the error is clearly caused by the recent changes and looks fixable (syntax error, missing import, type error):
60
+ - Attempt to fix the issue
61
+ - Re-run the failed check
62
+ - If it passes now, record as "fixed on retry" in the report
63
+
64
+ 2. **Second failure**: Do NOT attempt further fixes. Record the failure in the report and let the orchestrator handle it (pause for human guidance).
65
+
66
+ ### Step 4: Generate Quality Report
67
+
68
+ Write `.context/workflow/artifacts/quality-report.md`:
69
+
70
+ ```markdown
71
+ # Quality Report
72
+
73
+ - **Date**: [ISO-8601]
74
+ - **Overall Status**: [PASS | FAIL]
75
+
76
+ ## Check Results
77
+
78
+ ### Build
79
+ - **Status**: [PASS | FAIL | SKIPPED]
80
+ - **Command**: `[command used]`
81
+ - **Output**: [summary of output, full output if failed]
82
+
83
+ ### Lint
84
+ - **Status**: [PASS | FAIL | SKIPPED]
85
+ - **Command**: `[command used]`
86
+ - **Warnings**: [count]
87
+ - **Errors**: [count, details if any]
88
+
89
+ ### Test
90
+ - **Status**: [PASS | FAIL | SKIPPED]
91
+ - **Command**: `[command used]`
92
+ - **Tests run**: [count]
93
+ - **Tests passed**: [count]
94
+ - **Tests failed**: [count]
95
+ - **Failed tests**: [list of failing test names and error messages]
96
+
97
+ ## Auto-Fix Attempts
98
+ - [Description of any auto-fix attempts and their outcomes]
99
+
100
+ ## Recommendations
101
+ - [Any suggestions for manual fixes if checks failed]
102
+ ```
103
+
104
+ ### Step 5: Report
105
+
106
+ If running independently (not through orchestrator):
107
+ - Show the quality report to the user
108
+ - If FAIL: suggest what needs to be fixed
109
+ - If PASS: confirm all checks passed
@@ -0,0 +1,75 @@
1
+ # /dev-requirements — P0: Requirements Gathering
2
+
3
+ > Collects and structures user requirements into a formal requirements document.
4
+
5
+ ## Input
6
+ - User conversation, issue links, or raw requirements text
7
+ - Context: `.context/business/overview.md`, `.context/business/domain-model.md`
8
+
9
+ ## Output
10
+ - `.context/workflow/artifacts/requirements.md`
11
+
12
+ ## Instructions
13
+
14
+ ### Step 1: Gather Context
15
+
16
+ Read the following files to understand the project's business domain (skip any that don't exist):
17
+ - `.context/business/overview.md` — product vision, target users
18
+ - `.context/business/domain-model.md` — core entities and domain language
19
+ - `.context/business/glossary.md` — terminology
20
+
21
+ ### Step 2: Collect Requirements
22
+
23
+ Check if requirements were provided inline (as arguments to `/dev` or `/dev-requirements`).
24
+
25
+ If requirements were provided inline:
26
+ - Use them directly as the raw input
27
+
28
+ If no requirements were provided:
29
+ - Ask the user to describe what they want to build or change
30
+ - Ask clarifying questions to understand:
31
+ - **What**: What is the feature/change/fix?
32
+ - **Why**: What problem does it solve? What's the motivation?
33
+ - **Who**: Who are the users affected?
34
+ - **Scope**: What's in scope and out of scope?
35
+ - **Constraints**: Any technical constraints, deadlines, or dependencies?
36
+
37
+ ### Step 3: Structure Requirements
38
+
39
+ Create `.context/workflow/artifacts/requirements.md` with this structure:
40
+
41
+ ```markdown
42
+ # Requirements: [Feature/Change Name]
43
+
44
+ ## Summary
45
+ [1-2 sentence overview of what is needed and why]
46
+
47
+ ## User Stories
48
+ - As a [user type], I want to [action] so that [benefit]
49
+ - ...
50
+
51
+ ## Functional Requirements
52
+ 1. [FR-1]: [Description]
53
+ 2. [FR-2]: [Description]
54
+ ...
55
+
56
+ ## Non-Functional Requirements
57
+ - Performance: [if applicable]
58
+ - Security: [if applicable]
59
+ - Compatibility: [if applicable]
60
+
61
+ ## Constraints
62
+ - [Any technical, business, or timeline constraints]
63
+
64
+ ## Out of Scope
65
+ - [What is explicitly NOT part of this work]
66
+
67
+ ## Open Questions
68
+ - [Any unresolved questions that need answers before proceeding]
69
+ ```
70
+
71
+ ### Step 4: Present to User
72
+
73
+ Show the structured requirements to the user. If running within the `/dev` orchestrator, the orchestrator handles checkpoint logic. If running independently:
74
+ - Ask the user if the requirements look correct
75
+ - Incorporate any feedback and update the artifact
@@ -0,0 +1,120 @@
1
+ ---
2
+ name: review-context
3
+ description: Audit the health of the .context/ system, detect drift, and report on context quality
4
+ triggers:
5
+ - review context
6
+ - audit context
7
+ - check context health
8
+ - context status
9
+ ---
10
+
11
+ # Review Context Skill
12
+
13
+ Perform a comprehensive health check of the `.context/` system to identify drift, staleness, missing information, and quality issues.
14
+
15
+ ## Workflow
16
+
17
+ ### Step 1: Completeness Check
18
+
19
+ Verify all expected context files exist and are populated:
20
+
21
+ 1. **Required files**: Check that all files listed in `.context/index.md` exist
22
+ 2. **Non-placeholder content**: Check that files contain actual content, not just template placeholders
23
+ 3. **Confidence levels**: Report the distribution of confidence markers (high/medium/low)
24
+
25
+ ### Step 2: Drift Detection
26
+
27
+ Run comprehensive drift analysis:
28
+
29
+ 1. **Auto-generated file freshness**:
30
+ - Compare `architecture/data-model.md` against current ORM models / migrations
31
+ - Compare `architecture/api-surface.md` against current route definitions
32
+ - Compare `architecture/dependencies.md` against current package manifests
33
+ - Compare `architecture/class-index.md` against current source class/interface definitions
34
+ - Compare `architecture/module-graph.md` against current module structure and imports
35
+
36
+ 2. **Timestamp analysis**:
37
+ - Compare `_meta/last-sync.json` timestamps against source file modification times
38
+ - Flag files not updated for more than 30 days if source code has changed
39
+
40
+ 3. **Content consistency**:
41
+ - Check that entities mentioned in `business/domain-model.md` exist in code
42
+ - Check that API endpoints in `api-surface.md` match actual routes
43
+ - Check that build/test commands in `constitution.md` actually work
44
+
45
+ 4. **Index completeness**:
46
+ - Scan `.context/architecture/decisions/` for ADR files not listed in `index.md`
47
+ - Scan `.context/specs/` for spec directories not listed in `index.md`
48
+ - Scan `.context/changes/` for change proposals not listed in `index.md`
49
+ - Report any orphaned entries (listed in `index.md` but file does not exist)
50
+
51
+ ### Step 3: Size Budget Check
52
+
53
+ Verify no files exceed their size budgets:
54
+
55
+ | File Category | Budget |
56
+ |--------------|--------|
57
+ | constitution.md | 200 lines |
58
+ | AGENTS.md | 150 lines |
59
+ | Business files | 500 lines |
60
+ | Architecture overview | 800 lines |
61
+ | Architecture files (other) | 500 lines |
62
+ | Convention files | 400 lines |
63
+ | ADR files | 200 lines |
64
+ | Experience files | 500 lines |
65
+
66
+ Flag any files exceeding their budget.
67
+
68
+ ### Step 4: Experience Health
69
+
70
+ 1. Check if `experience/lessons.md` needs compaction (> 500 lines)
71
+ 2. Check if recent bug fixes have corresponding debugging entries
72
+ 3. Check if recent architectural decisions have corresponding ADRs
73
+
74
+ ### Step 5: Generate Report
75
+
76
+ Produce a health report with:
77
+
78
+ ```markdown
79
+ # Context Health Report — YYYY-MM-DD
80
+
81
+ ## Overall Score: [A/B/C/D/F]
82
+
83
+ ## Completeness: [X/Y files populated]
84
+ - [List of missing or placeholder-only files]
85
+
86
+ ## Drift Detection: [N issues found]
87
+ - [List of drifted files with details]
88
+
89
+ ## Size Budgets: [N files over budget]
90
+ - [List of over-budget files]
91
+
92
+ ## Experience Health: [status]
93
+ - [Compaction needed? Recent bugs tracked?]
94
+
95
+ ## Recommendations
96
+ 1. [Most important action item]
97
+ 2. [Second action item]
98
+ 3. [...]
99
+ ```
100
+
101
+ ### Step 6: Auto-Fix (Default)
102
+
103
+ Unless the user specifies `--report-only`, automatically apply fixes:
104
+
105
+ 1. **Regenerate all auto-generated files**:
106
+ - Run `scripts/extract-structure.sh --class-index` → rebuild `architecture/class-index.md`
107
+ - Run `scripts/extract-structure.sh --module-graph` → rebuild auto-generated sections of `architecture/module-graph.md`
108
+ - Run `scripts/sync-context.sh` → rebuild `architecture/dependencies.md`
109
+ 2. **Update index.md**: Add entries for files discovered in Step 2.4 (index completeness), remove orphaned entries
110
+ 3. **Rebuild concept index**: Regenerate `_meta/concepts.md` from all context files
111
+ 4. **Compact experience files** that exceed budget (using `scripts/compact-experience.sh`)
112
+ 5. **Update `_meta/last-sync.json`** timestamps
113
+ 6. **Re-run drift detection** to verify fixes
114
+
115
+ If `--report-only` is specified, skip this step and only output the report from Step 5.
116
+
117
+ ## Output
118
+
119
+ - Context health report (printed to user and saved to `_meta/drift-report.md`)
120
+ - Auto-fixed files (regenerated auto-generated files, updated index, rebuilt concept index)