rpi-kit 2.2.2 → 2.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/.claude-plugin/marketplace.json +3 -2
  2. package/.claude-plugin/plugin.json +1 -1
  3. package/.gemini/commands/opsx/apply.toml +149 -0
  4. package/.gemini/commands/opsx/archive.toml +154 -0
  5. package/.gemini/commands/opsx/bulk-archive.toml +239 -0
  6. package/.gemini/commands/opsx/continue.toml +111 -0
  7. package/.gemini/commands/opsx/explore.toml +170 -0
  8. package/.gemini/commands/opsx/ff.toml +94 -0
  9. package/.gemini/commands/opsx/new.toml +66 -0
  10. package/.gemini/commands/opsx/onboard.toml +547 -0
  11. package/.gemini/commands/opsx/propose.toml +103 -0
  12. package/.gemini/commands/opsx/sync.toml +131 -0
  13. package/.gemini/commands/opsx/verify.toml +161 -0
  14. package/.gemini/commands/rpi/archive.toml +140 -0
  15. package/.gemini/commands/rpi/docs-gen.toml +210 -0
  16. package/.gemini/commands/rpi/docs.toml +153 -0
  17. package/.gemini/commands/rpi/evolve.toml +411 -0
  18. package/.gemini/commands/rpi/fix.toml +290 -0
  19. package/.gemini/commands/rpi/implement.toml +272 -0
  20. package/.gemini/commands/rpi/init.toml +180 -0
  21. package/.gemini/commands/rpi/learn.toml +105 -0
  22. package/.gemini/commands/rpi/new.toml +158 -0
  23. package/.gemini/commands/rpi/onboarding.toml +236 -0
  24. package/.gemini/commands/rpi/party.toml +204 -0
  25. package/.gemini/commands/rpi/plan.toml +623 -0
  26. package/.gemini/commands/rpi/research.toml +265 -0
  27. package/.gemini/commands/rpi/review.toml +443 -0
  28. package/.gemini/commands/rpi/rpi.toml +114 -0
  29. package/.gemini/commands/rpi/simplify.toml +214 -0
  30. package/.gemini/commands/rpi/status.toml +194 -0
  31. package/.gemini/commands/rpi/update.toml +107 -0
  32. package/.gemini/skills/openspec-apply-change/SKILL.md +156 -0
  33. package/.gemini/skills/openspec-archive-change/SKILL.md +114 -0
  34. package/.gemini/skills/openspec-bulk-archive-change/SKILL.md +246 -0
  35. package/.gemini/skills/openspec-continue-change/SKILL.md +118 -0
  36. package/.gemini/skills/openspec-explore/SKILL.md +288 -0
  37. package/.gemini/skills/openspec-ff-change/SKILL.md +101 -0
  38. package/.gemini/skills/openspec-new-change/SKILL.md +74 -0
  39. package/.gemini/skills/openspec-onboard/SKILL.md +554 -0
  40. package/.gemini/skills/openspec-propose/SKILL.md +110 -0
  41. package/.gemini/skills/openspec-sync-specs/SKILL.md +138 -0
  42. package/.gemini/skills/openspec-verify-change/SKILL.md +168 -0
  43. package/CHANGELOG.md +15 -0
  44. package/README.md +6 -6
  45. package/agents/atlas.md +40 -0
  46. package/agents/clara.md +40 -0
  47. package/agents/forge.md +40 -0
  48. package/agents/hawk.md +40 -0
  49. package/agents/luna.md +40 -0
  50. package/agents/mestre.md +46 -0
  51. package/agents/nexus.md +52 -0
  52. package/agents/pixel.md +40 -0
  53. package/agents/quill.md +40 -0
  54. package/agents/razor.md +40 -0
  55. package/agents/sage.md +46 -0
  56. package/agents/scout.md +40 -0
  57. package/agents/shield.md +40 -0
  58. package/bin/cli.js +60 -18
  59. package/commands/rpi/docs.md +29 -1
  60. package/commands/rpi/fix.md +301 -0
  61. package/commands/rpi/implement.md +37 -0
  62. package/commands/rpi/plan.md +66 -1
  63. package/commands/rpi/research.md +48 -1
  64. package/commands/rpi/review.md +48 -1
  65. package/commands/rpi/rpi.md +1 -1
  66. package/commands/rpi/simplify.md +31 -1
  67. package/commands/rpi/status.md +69 -0
  68. package/marketplace.json +3 -2
  69. package/package.json +2 -1
@@ -0,0 +1,153 @@
1
+ description = "Quill generates and updates documentation based on the implementation."
2
+
3
+ prompt = """
4
+ # /rpi:docs — Docs Phase
5
+
6
+ Quill reads all feature artifacts and generates documentation: README updates, changelog entries, API docs, and inline comments where non-obvious.
7
+
8
+ ---
9
+
10
+ ## Step 1: Load config and validate
11
+
12
+ 1. Read `.rpi.yaml` for config. Apply defaults if missing:
13
+ - `folder`: `rpi/features`
14
+ - `context_file`: `rpi/context.md`
15
+ - `commit_style`: `conventional`
16
+ 2. Parse `$ARGUMENTS` to extract `{slug}`.
17
+ 3. Validate `rpi/features/{slug}/implement/IMPLEMENT.md` exists. If not:
18
+ ```
19
+ IMPLEMENT.md not found for '{slug}'. Run /rpi:implement {slug} first.
20
+ ```
21
+ Stop.
22
+
23
+ ## Step 2: Validate review verdict
24
+
25
+ 1. Look for a review verdict in `rpi/features/{slug}/implement/IMPLEMENT.md`.
26
+ 2. The verdict appears in a `## Review` section as `PASS` or `PASS with concerns`.
27
+ 3. If verdict is `FAIL`:
28
+ ```
29
+ Review verdict is FAIL for '{slug}'.
30
+ Fix the issues identified in IMPLEMENT.md and re-run: /rpi:review {slug}
31
+ ```
32
+ Stop.
33
+ 4. If no review verdict is found:
34
+ ```
35
+ No review verdict found for '{slug}'. Run /rpi:review {slug} first.
36
+ ```
37
+ Stop.
38
+
39
+ ## Step 3: Gather context
40
+
41
+ 1. Read `rpi/features/{slug}/REQUEST.md` — store as `$REQUEST`.
42
+ 2. Read `rpi/features/{slug}/plan/PLAN.md` — store as `$PLAN`.
43
+ 3. Read `rpi/features/{slug}/implement/IMPLEMENT.md` — store as `$IMPLEMENT`.
44
+ 4. Read `rpi/context.md` (project context) if it exists — store as `$CONTEXT`.
45
+ 5. Scan `rpi/features/{slug}/delta/` for all files in ADDED/, MODIFIED/, and REMOVED/ — store as `$DELTA_CONTENTS`.
46
+ 6. Read `README.md` from the project root if it exists — store as `$CURRENT_README`.
47
+ 7. Read `CHANGELOG.md` from the project root if it exists — store as `$CURRENT_CHANGELOG`.
48
+
49
+ ## Step 4: Launch Quill
50
+
51
+ Launch Quill agent with this prompt:
52
+
53
+ ```
54
+ You are Quill. Generate and update documentation for feature: {slug}
55
+
56
+ ## Request
57
+ {$REQUEST}
58
+
59
+ ## Plan
60
+ {$PLAN}
61
+
62
+ ## Implementation
63
+ {$IMPLEMENT}
64
+
65
+ ## Delta Specs
66
+ {$DELTA_CONTENTS}
67
+
68
+ ## Project Context
69
+ {$CONTEXT}
70
+
71
+ ## Current README
72
+ {$CURRENT_README or "No README.md found."}
73
+
74
+ ## Current CHANGELOG
75
+ {$CURRENT_CHANGELOG or "No CHANGELOG.md found."}
76
+
77
+ Your task:
78
+ 1. Update README.md with new feature documentation (if the feature adds user-facing behavior or public API)
79
+ - Add a section or update an existing section — don't rewrite the entire README
80
+ - Include usage examples with concrete values
81
+ - If the feature is internal/refactoring only, skip README updates
82
+ 2. Write a changelog entry in conventional format
83
+ - Use the appropriate category: Added, Changed, Fixed, Removed
84
+ - Reference the feature slug
85
+ - If CHANGELOG.md exists, prepend the new entry under the correct version
86
+ - If CHANGELOG.md doesn't exist, create it with a header and the first entry
87
+ 3. Add API docs for new public interfaces
88
+ - Document exported functions, classes, or endpoints introduced by this feature
89
+ - Include parameter types, return types, and one usage example per interface
90
+ - Write docs where the project convention places them (JSDoc, docstrings, doc comments, or separate files)
91
+ 4. Add inline comments only where the code is non-obvious
92
+ - Explain WHY, not WHAT
93
+ - Focus on: non-obvious business rules, workarounds, performance tradeoffs, external API quirks
94
+ - Do NOT add comments that restate the code
95
+
96
+ Rules:
97
+ - Keep docs DRY — don't repeat what the code already says
98
+ - Match existing documentation style and tone
99
+ - Use concrete examples, not abstract descriptions
100
+ - If the code says WHAT, the docs should say WHY
101
+
102
+ After documentation updates, append your activity to rpi/features/{slug}/ACTIVITY.md:
103
+
104
+ ### {current_date} — Quill (Docs)
105
+ - **Action:** Documentation updates for {slug}
106
+ - **Key decisions:** {for each <decision> tag you emitted: "summary (rationale)", separated by semicolons. If none: "No decisions in this phase."}
107
+ - **Files updated:** {list}
108
+ - **Changelog entry:** {yes|no}
109
+ - **Quality:** {your quality gate result}
110
+ ```
111
+
112
+ Store the output as `$QUILL_OUTPUT`.
113
+
114
+ ## Step 5: Commit documentation changes
115
+
116
+ 1. Stage all documentation files changed by Quill:
117
+ ```bash
118
+ git add -A
119
+ ```
120
+ 2. Commit with a conventional message:
121
+ ```bash
122
+ git commit -m "docs({slug}): update documentation for {slug}"
123
+ ```
124
+
125
+ ## Step 6: Consolidate decisions to DECISIONS.md
126
+
127
+ 1. Read `rpi/features/{slug}/ACTIVITY.md`.
128
+ 2. Extract all `<decision>` tags from entries belonging to the Docs phase (Quill entries from this run).
129
+ 3. If no decisions found, skip this step.
130
+ 4. Read `rpi/features/{slug}/DECISIONS.md` if it exists (to get the last decision number for sequential numbering).
131
+ 5. Append a new section to `rpi/features/{slug}/DECISIONS.md`:
132
+
133
+ ```markdown
134
+ ## Docs Phase
135
+ _Generated: {current_date}_
136
+
137
+ | # | Type | Decision | Alternatives | Rationale | Impact |
138
+ |---|------|----------|-------------|-----------|--------|
139
+ | {N} | {type} | {summary} | {alternatives} | {rationale} | {impact} |
140
+ ```
141
+
142
+ 6. Number decisions sequentially, continuing from the last number in DECISIONS.md.
143
+
144
+ ## Step 7: Output summary
145
+
146
+ ```
147
+ Documentation complete: {slug}
148
+
149
+ {$QUILL_OUTPUT summary — list of files updated and what changed}
150
+
151
+ Next: /rpi:archive {slug}
152
+ ```
153
+ """
@@ -0,0 +1,411 @@
1
+ description = "Analyze the entire project for technical health, code quality, test coverage, ecosystem status, and product gaps. Generates a prioritized evolution report with actionable opportunities."
2
+
3
+ prompt = """
4
+ # /rpi:evolve — Product Evolution Analysis
5
+
6
+ Standalone utility command — launches 5 agents in parallel to analyze the project from different perspectives, then Nexus synthesizes into a prioritized evolution report.
7
+
8
+ Use `--quick` for a fast technical-only health check (Atlas + Nexus only).
9
+
10
+ ---
11
+
12
+ ## Step 1: Load config and context
13
+
14
+ 1. Read `.rpi.yaml` from the project root. If missing, use defaults silently.
15
+ 2. Read `rpi/context.md` if it exists — store as `$PROJECT_CONTEXT`.
16
+ 3. If `rpi/context.md` does not exist, note that Atlas will generate context from scratch.
17
+ 4. Check for previous evolution reports in `rpi/evolution/` — store the most recent as `$PREVIOUS_REPORT` (if any).
18
+ 5. Parse `$ARGUMENTS` for `--quick` flag.
19
+
20
+ ## Step 2: Create output directory
21
+
22
+ ```bash
23
+ mkdir -p rpi/evolution
24
+ ```
25
+
26
+ ## Step 3: Launch analysis agents
27
+
28
+ If `--quick` flag is set, skip to Step 4 (only Atlas runs, others are skipped).
29
+
30
+ Launch **5 agents in parallel** using the Agent tool. Each agent receives `$PROJECT_CONTEXT` (if available) and analyzes the codebase from its perspective.
31
+
32
+ ### Agent 1: Atlas — Technical Health
33
+
34
+ ```
35
+ You are Atlas. Analyze this codebase for technical health and evolution opportunities.
36
+
37
+ {If $PROJECT_CONTEXT exists:}
38
+ ## Existing Project Context
39
+ {$PROJECT_CONTEXT}
40
+ {End if}
41
+
42
+ Your task:
43
+ 1. Read config files (package.json, tsconfig.json, pyproject.toml, etc.)
44
+ 2. Scan directory structure for architecture patterns
45
+ 3. Identify technical debt: dead code, unused exports, inconsistent patterns
46
+ 4. Check dependency health: outdated versions, abandoned packages, duplicates
47
+ 5. Evaluate architecture: clean separation, coupling issues, scaling concerns
48
+ 6. Check documentation completeness: README, CLAUDE.md, inline docs
49
+
50
+ Produce your analysis with this structure:
51
+
52
+ ## [Atlas — Technical Health]
53
+
54
+ ### Strengths
55
+ - {strength 1 with evidence (file:line)}
56
+ - {strength 2}
57
+
58
+ ### Technical Debt
59
+ Severity: {LOW|MEDIUM|HIGH}
60
+ - {debt item 1 with evidence}
61
+ - {debt item 2}
62
+
63
+ ### Dependencies
64
+ - Outdated: {list with current vs latest}
65
+ - Abandoned: {deps with no recent updates}
66
+ - Duplicates: {overlapping deps}
67
+
68
+ ### Architecture Issues
69
+ - {issue 1 with evidence}
70
+ - {issue 2}
71
+
72
+ ### Quick Wins
73
+ - {actionable item that can be fixed in < 1 hour}
74
+
75
+ RULES:
76
+ - Be specific — cite files, lines, versions
77
+ - Only report what you can verify from the code
78
+ - Prioritize by impact, not by ease
79
+ - If a section has no findings, write "No issues found" and move on
80
+ ```
81
+
82
+ Store output as `$ATLAS_FINDINGS`.
83
+
84
+ ### Agent 2: Sage — Test Coverage
85
+
86
+ ```
87
+ You are Sage. Analyze the test coverage and testing strategy of this codebase.
88
+
89
+ {If $PROJECT_CONTEXT exists:}
90
+ ## Existing Project Context
91
+ {$PROJECT_CONTEXT}
92
+ {End if}
93
+
94
+ Your task:
95
+ 1. Identify the test framework(s) in use
96
+ 2. Map which modules/components have tests and which don't
97
+ 3. Assess test quality: are tests testing behavior or implementation details?
98
+ 4. Check for missing test types: unit, integration, e2e, edge cases
99
+ 5. Look for test anti-patterns: brittle assertions, test interdependencies, missing error cases
100
+
101
+ Produce your analysis with this structure:
102
+
103
+ ## [Sage — Test Coverage]
104
+
105
+ ### Coverage Map
106
+ - {module/file}: {has tests | no tests | partial}
107
+ - ...
108
+
109
+ ### Gaps (prioritized by risk)
110
+ - {untested module with risk assessment}
111
+ - ...
112
+
113
+ ### Test Quality
114
+ - Framework: {name}
115
+ - Anti-patterns found: {list or "none"}
116
+ - Missing test types: {unit|integration|e2e|edge cases}
117
+
118
+ ### Recommendations
119
+ - {recommendation 1 with effort estimate S|M|L}
120
+ - {recommendation 2}
121
+
122
+ RULES:
123
+ - Focus on what's NOT tested rather than what is
124
+ - Prioritize gaps by business risk, not code volume
125
+ - Be specific about which files/functions lack coverage
126
+ ```
127
+
128
+ Store output as `$SAGE_FINDINGS`.
129
+
130
+ ### Agent 3: Hawk — Code Quality
131
+
132
+ ```
133
+ You are Hawk. Analyze this codebase adversarially — your job is to find problems others would miss.
134
+
135
+ {If $PROJECT_CONTEXT exists:}
136
+ ## Existing Project Context
137
+ {$PROJECT_CONTEXT}
138
+ {End if}
139
+
140
+ Your task:
141
+ 1. Find anti-patterns and code smells
142
+ 2. Identify complexity hotspots (functions/files that are too complex)
143
+ 3. Look for copy-paste code and duplication
144
+ 4. Check error handling: swallowed errors, missing validation, inconsistent patterns
145
+ 5. Assess naming and readability issues
146
+ 6. Check for security risks: hardcoded values, exposed secrets, injection vectors
147
+
148
+ Produce your analysis with this structure:
149
+
150
+ ## [Hawk — Code Quality]
151
+
152
+ ### Problems
153
+ #### CRITICAL
154
+ - {problem with file:line and why it matters}
155
+
156
+ #### HIGH
157
+ - {problem with evidence}
158
+
159
+ #### MEDIUM
160
+ - {problem with evidence}
161
+
162
+ #### LOW
163
+ - {problem with evidence}
164
+
165
+ ### Quick Wins
166
+ - {fix that improves quality with minimal effort}
167
+
168
+ ### Risks
169
+ - {potential future problem based on current patterns}
170
+
171
+ RULES:
172
+ - You MUST find at least 3 issues — look harder if you think the code is perfect
173
+ - Severity must be justified with impact assessment
174
+ - Every finding must cite specific file:line
175
+ - Focus on real problems, not style preferences
176
+ ```
177
+
178
+ Store output as `$HAWK_FINDINGS`.
179
+
180
+ ### Agent 4: Scout — Ecosystem Analysis
181
+
182
+ ```
183
+ You are Scout. Analyze this project's ecosystem health and external dependencies.
184
+
185
+ {If $PROJECT_CONTEXT exists:}
186
+ ## Existing Project Context
187
+ {$PROJECT_CONTEXT}
188
+ {End if}
189
+
190
+ Your task:
191
+ 1. Check all dependencies for outdated versions (compare package.json/pyproject.toml against known latest)
192
+ 2. Identify dependencies with known security vulnerabilities
193
+ 3. Find deprecated APIs or patterns being used
194
+ 4. Look for better alternatives to current dependencies
195
+ 5. Check if the project follows current ecosystem best practices
196
+
197
+ Produce your analysis with this structure:
198
+
199
+ ## [Scout — Ecosystem Analysis]
200
+
201
+ ### Outdated Dependencies
202
+ | Package | Current | Latest | Breaking Changes? |
203
+ |---------|---------|--------|-------------------|
204
+ | {name} | {ver} | {ver} | {yes/no} |
205
+
206
+ ### Security Concerns
207
+ - {CVE or vulnerability with affected package}
208
+
209
+ ### Deprecated Patterns
210
+ - {deprecated API/pattern with recommended replacement}
211
+
212
+ ### Better Alternatives
213
+ - {current dep} → {alternative} — {why it's better}
214
+
215
+ ### Ecosystem Best Practices
216
+ - Following: {list}
217
+ - Missing: {list}
218
+
219
+ RULES:
220
+ - Only flag outdated deps that are significantly behind (skip minor patches)
221
+ - Security concerns must reference specific CVEs or advisories when possible
222
+ - "Better alternatives" must have concrete justification, not opinions
223
+ ```
224
+
225
+ Store output as `$SCOUT_FINDINGS`.
226
+
227
+ ### Agent 5: Clara — Product Analysis
228
+
229
+ ```
230
+ You are Clara. Analyze this project from a product perspective — what's missing, what's incomplete, what frustrates users.
231
+
232
+ {If $PROJECT_CONTEXT exists:}
233
+ ## Existing Project Context
234
+ {$PROJECT_CONTEXT}
235
+ {End if}
236
+
237
+ Your task:
238
+ 1. Map the user-facing features and assess completeness
239
+ 2. Identify incomplete user flows (started but not finished)
240
+ 3. Find UX friction points (confusing APIs, missing error messages, poor defaults)
241
+ 4. Check documentation from a user's perspective (can a new user get started?)
242
+ 5. Identify features that exist in code but aren't documented or discoverable
243
+ 6. Assess onboarding experience
244
+
245
+ Produce your analysis with this structure:
246
+
247
+ ## [Clara — Product Analysis]
248
+
249
+ ### Feature Completeness
250
+ - {feature}: {complete | partial | stub}
251
+ - ...
252
+
253
+ ### Missing Features
254
+ - {feature that users would expect but doesn't exist}
255
+
256
+ ### UX Friction Points
257
+ - {friction point with evidence}
258
+
259
+ ### Documentation Gaps
260
+ - {what's missing from user-facing docs}
261
+
262
+ ### Undiscoverable Features
263
+ - {feature that exists but users can't find}
264
+
265
+ ### Recommendations
266
+ - {recommendation with effort S|M|L and impact HIGH|MED|LOW}
267
+
268
+ RULES:
269
+ - Think as a user, not a developer
270
+ - Focus on the first 5 minutes of experience
271
+ - Missing error messages count as friction
272
+ - Score completeness honestly — partial is fine
273
+ ```
274
+
275
+ Store output as `$CLARA_FINDINGS`.
276
+
277
+ ## Step 4: Synthesize with Nexus
278
+
279
+ Launch Nexus agent with all findings:
280
+
281
+ ```
282
+ You are Nexus. Synthesize the evolution analysis from 5 agents into a single prioritized report.
283
+
284
+ {If --quick, only $ATLAS_FINDINGS is available:}
285
+ ## Atlas Findings (Technical Health)
286
+ {$ATLAS_FINDINGS}
287
+ {Else:}
288
+ ## Atlas Findings (Technical Health)
289
+ {$ATLAS_FINDINGS}
290
+
291
+ ## Sage Findings (Test Coverage)
292
+ {$SAGE_FINDINGS}
293
+
294
+ ## Hawk Findings (Code Quality)
295
+ {$HAWK_FINDINGS}
296
+
297
+ ## Scout Findings (Ecosystem)
298
+ {$SCOUT_FINDINGS}
299
+
300
+ ## Clara Findings (Product)
301
+ {$CLARA_FINDINGS}
302
+ {End if}
303
+
304
+ {If $PREVIOUS_REPORT exists:}
305
+ ## Previous Evolution Report
306
+ {$PREVIOUS_REPORT}
307
+ Note: Compare with previous findings. Highlight what improved and what regressed.
308
+ {End if}
309
+
310
+ Your tasks:
311
+
312
+ ### Task 1: Write the Evolution Report
313
+
314
+ Produce a complete report with this structure:
315
+
316
+ # Evolution Report — {Project Name}
317
+
318
+ ## Executive Summary
319
+ Health: {score}/10 | Opportunities: {N} | Critical: {N}
320
+ {2-3 sentence summary of the project's current state}
321
+
322
+ {If previous report exists:}
323
+ ### Changes Since Last Report
324
+ - Improved: {list}
325
+ - Regressed: {list}
326
+ - New: {list}
327
+ {End if}
328
+
329
+ ## Technical Health (Atlas)
330
+ {Summarize Atlas findings — keep the strongest evidence, drop noise}
331
+
332
+ ## Test Coverage (Sage)
333
+ {Summarize Sage findings}
334
+
335
+ ## Code Quality (Hawk)
336
+ {Summarize Hawk findings — group by severity}
337
+
338
+ ## Ecosystem (Scout)
339
+ {Summarize Scout findings}
340
+
341
+ ## Product Analysis (Clara)
342
+ {Summarize Clara findings}
343
+
344
+ ## Prioritized Recommendations
345
+ {Merge recommendations from all agents, remove duplicates, sort by impact/effort ratio}
346
+
347
+ 1. [{CRITICAL|HIGH|MEDIUM|LOW}] {recommendation} — Effort: {S|M|L|XL}
348
+ 2. ...
349
+
350
+ ### Task 2: Generate Opportunities List
351
+
352
+ Produce a separate document:
353
+
354
+ # Evolution Opportunities
355
+
356
+ ## Ready for /rpi:new
357
+ - [ ] **{slug}** — {S|M|L|XL} | {description}
358
+ - ...
359
+
360
+ ## Needs More Research
361
+ - [ ] **{slug}** — {S|M|L|XL} | {description}
362
+ - ...
363
+
364
+ Separate the two documents clearly with a --- delimiter.
365
+
366
+ ### Task 3: Health Score
367
+
368
+ Calculate a heuristic health score (1-10) based on:
369
+ - Technical debt severity (Atlas)
370
+ - Test coverage completeness (Sage)
371
+ - Code quality issues count and severity (Hawk)
372
+ - Dependency health (Scout)
373
+ - Feature completeness (Clara)
374
+
375
+ The score is a quick-read indicator, not a precise metric. Include it in the Executive Summary.
376
+
377
+ RULES:
378
+ 1. No contradictions left unresolved — if agents disagree, note the disagreement and your resolution
379
+ 2. Remove duplicate findings across agents
380
+ 3. Prioritize by impact × feasibility (high impact + low effort first)
381
+ 4. Every recommendation must have an effort estimate
382
+ 5. Opportunities must have slugs suitable for /rpi:new (kebab-case, descriptive)
383
+ 6. If only Atlas findings are available (--quick mode), adjust the report structure accordingly
384
+ ```
385
+
386
+ Store the output as `$NEXUS_SYNTHESIS`. Split at the `---` delimiter into `$REPORT_CONTENT` and `$OPPORTUNITIES_CONTENT`.
387
+
388
+ ## Step 5: Write outputs
389
+
390
+ 1. Write `$REPORT_CONTENT` to `rpi/evolution/{YYYY-MM-DD}-report.md`.
391
+ 2. Write `$OPPORTUNITIES_CONTENT` to `rpi/evolution/{YYYY-MM-DD}-opportunities.md`.
392
+
393
+ ## Step 6: Output terminal summary
394
+
395
+ ```
396
+ Evolution Report: {Project Name} ({date})
397
+
398
+ Health Score: {score}/10
399
+
400
+ Top 3 Opportunities:
401
+ 1. [{category}] {description} ({source agent})
402
+ 2. [{category}] {description} ({source agent})
403
+ 3. [{category}] {description} ({source agent})
404
+
405
+ Full report: rpi/evolution/{date}-report.md
406
+ Opportunities: rpi/evolution/{date}-opportunities.md
407
+
408
+ To start working on an opportunity:
409
+ /rpi:new {first-opportunity-slug}
410
+ ```
411
+ """