rpi-kit 2.2.2 → 2.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/.claude-plugin/marketplace.json +3 -2
  2. package/.claude-plugin/plugin.json +1 -1
  3. package/.gemini/commands/opsx/apply.toml +149 -0
  4. package/.gemini/commands/opsx/archive.toml +154 -0
  5. package/.gemini/commands/opsx/bulk-archive.toml +239 -0
  6. package/.gemini/commands/opsx/continue.toml +111 -0
  7. package/.gemini/commands/opsx/explore.toml +170 -0
  8. package/.gemini/commands/opsx/ff.toml +94 -0
  9. package/.gemini/commands/opsx/new.toml +66 -0
  10. package/.gemini/commands/opsx/onboard.toml +547 -0
  11. package/.gemini/commands/opsx/propose.toml +103 -0
  12. package/.gemini/commands/opsx/sync.toml +131 -0
  13. package/.gemini/commands/opsx/verify.toml +161 -0
  14. package/.gemini/commands/rpi/archive.toml +140 -0
  15. package/.gemini/commands/rpi/docs-gen.toml +210 -0
  16. package/.gemini/commands/rpi/docs.toml +153 -0
  17. package/.gemini/commands/rpi/evolve.toml +411 -0
  18. package/.gemini/commands/rpi/fix.toml +290 -0
  19. package/.gemini/commands/rpi/implement.toml +272 -0
  20. package/.gemini/commands/rpi/init.toml +180 -0
  21. package/.gemini/commands/rpi/learn.toml +105 -0
  22. package/.gemini/commands/rpi/new.toml +158 -0
  23. package/.gemini/commands/rpi/onboarding.toml +236 -0
  24. package/.gemini/commands/rpi/party.toml +204 -0
  25. package/.gemini/commands/rpi/plan.toml +623 -0
  26. package/.gemini/commands/rpi/research.toml +265 -0
  27. package/.gemini/commands/rpi/review.toml +443 -0
  28. package/.gemini/commands/rpi/rpi.toml +114 -0
  29. package/.gemini/commands/rpi/simplify.toml +214 -0
  30. package/.gemini/commands/rpi/status.toml +194 -0
  31. package/.gemini/commands/rpi/update.toml +107 -0
  32. package/.gemini/skills/openspec-apply-change/SKILL.md +156 -0
  33. package/.gemini/skills/openspec-archive-change/SKILL.md +114 -0
  34. package/.gemini/skills/openspec-bulk-archive-change/SKILL.md +246 -0
  35. package/.gemini/skills/openspec-continue-change/SKILL.md +118 -0
  36. package/.gemini/skills/openspec-explore/SKILL.md +288 -0
  37. package/.gemini/skills/openspec-ff-change/SKILL.md +101 -0
  38. package/.gemini/skills/openspec-new-change/SKILL.md +74 -0
  39. package/.gemini/skills/openspec-onboard/SKILL.md +554 -0
  40. package/.gemini/skills/openspec-propose/SKILL.md +110 -0
  41. package/.gemini/skills/openspec-sync-specs/SKILL.md +138 -0
  42. package/.gemini/skills/openspec-verify-change/SKILL.md +168 -0
  43. package/CHANGELOG.md +15 -0
  44. package/README.md +6 -6
  45. package/agents/atlas.md +40 -0
  46. package/agents/clara.md +40 -0
  47. package/agents/forge.md +40 -0
  48. package/agents/hawk.md +40 -0
  49. package/agents/luna.md +40 -0
  50. package/agents/mestre.md +46 -0
  51. package/agents/nexus.md +52 -0
  52. package/agents/pixel.md +40 -0
  53. package/agents/quill.md +40 -0
  54. package/agents/razor.md +40 -0
  55. package/agents/sage.md +46 -0
  56. package/agents/scout.md +40 -0
  57. package/agents/shield.md +40 -0
  58. package/bin/cli.js +60 -18
  59. package/commands/rpi/docs.md +29 -1
  60. package/commands/rpi/fix.md +301 -0
  61. package/commands/rpi/implement.md +37 -0
  62. package/commands/rpi/plan.md +66 -1
  63. package/commands/rpi/research.md +48 -1
  64. package/commands/rpi/review.md +48 -1
  65. package/commands/rpi/rpi.md +1 -1
  66. package/commands/rpi/simplify.md +31 -1
  67. package/commands/rpi/status.md +69 -0
  68. package/marketplace.json +3 -2
  69. package/package.json +2 -1
@@ -0,0 +1,623 @@
1
+ description = "Interview developer, generate specs with Mestre/Clara/Pixel, then adversarial review with Nexus."
2
+
3
+ prompt = """
4
+ # /rpi:plan — Plan Phase (v2: Interview-Driven)
5
+
6
+ Nexus interviews the developer, then Mestre (architecture), Clara (product), and Pixel (UX, conditional) generate specs informed by the interview. Nexus performs adversarial review, surfacing contradictions for developer resolution.
7
+
8
+ ---
9
+
10
+ ## Step 1: Load config and validate
11
+
12
+ 1. Read `.rpi.yaml` for config. Apply defaults if missing:
13
+ - `folder`: `rpi/features`
14
+ - `specs_dir`: `rpi/specs`
15
+ - `context_file`: `rpi/context.md`
16
+ - `ux_agent`: `auto`
17
+ 2. Parse `$ARGUMENTS` to extract `{slug}` and optional `--force` flag.
18
+ 3. Validate `rpi/features/{slug}/research/RESEARCH.md` exists. If not:
19
+ ```
20
+ RESEARCH.md not found for '{slug}'. Run /rpi:research {slug} first.
21
+ ```
22
+ Stop.
23
+
24
+ ## Step 2: Check research verdict
25
+
26
+ 1. Read `rpi/features/{slug}/research/RESEARCH.md`.
27
+ 2. Look for the `## Verdict` section.
28
+ 3. If verdict is `NO-GO` and `--force` was NOT passed:
29
+ ```
30
+ Research verdict is NO-GO for '{slug}'.
31
+ Review RESEARCH.md for details and alternatives.
32
+ To override: /rpi:plan {slug} --force
33
+ ```
34
+ Stop.
35
+ 4. If `--force` was passed: proceed despite NO-GO verdict.
36
+
37
+ ## Step 3: Check existing plan
38
+
39
+ 1. Check if `rpi/features/{slug}/plan/PLAN.md` already exists.
40
+ 2. If it exists and `--force` was NOT passed:
41
+ - Ask the user: "PLAN.md already exists for '{slug}'. Overwrite? (yes/no)"
42
+ - If no: stop.
43
+ 3. If `--force` was passed or user confirms: proceed (will overwrite).
44
+
45
+ ## Step 4: Gather context
46
+
47
+ 1. Read `rpi/features/{slug}/REQUEST.md` — store as `$REQUEST`.
48
+ 2. Read `rpi/features/{slug}/research/RESEARCH.md` — store as `$RESEARCH`.
49
+ 3. Read `rpi/context.md` (project context) if it exists — store as `$CONTEXT`.
50
+ 4. Scan `rpi/specs/` for specs relevant to the feature — store as `$RELEVANT_SPECS`.
51
+
52
+ ## Step 5: Detect frontend
53
+
54
+ Check the project root for frontend framework config files:
55
+ - `next.config.*` or `next.config.ts` → Next.js
56
+ - `vite.config.*` → Vite (React/Vue/Svelte)
57
+ - `angular.json` → Angular
58
+ - `svelte.config.*` → Svelte/SvelteKit
59
+ - `nuxt.config.*` → Nuxt
60
+ - `package.json` containing `react`, `vue`, `angular`, or `svelte` in dependencies
61
+
62
+ Set `$HAS_FRONTEND` to `true` if any of these are detected.
63
+
64
+ Read `ux_agent` from `.rpi.yaml`:
65
+ - If `always`: set `$RUN_PIXEL` to `true` regardless of frontend detection.
66
+ - If `never`: set `$RUN_PIXEL` to `false` regardless.
67
+ - If `auto` (default): set `$RUN_PIXEL` to `$HAS_FRONTEND`.
68
+
69
+ ## Step 6: Assess complexity
70
+
71
+ Analyze `$REQUEST` and `$RESEARCH` to determine interview depth.
72
+
73
+ 1. Count files mentioned in RESEARCH.md (file changes, affected components).
74
+ 2. Check if the feature involves new architecture (new system/service) vs modification of existing.
75
+ 3. Check if it spans multiple system layers (frontend + backend + database, or multiple services).
76
+ 4. Count open questions and risks flagged in RESEARCH.md.
77
+ 5. Determine complexity and interview depth:
78
+
79
+ | Complexity | Files affected | Layers | Interview depth |
80
+ |-----------|---------------|--------|----------------|
81
+ | S | 1-3 | single | 3-4 questions |
82
+ | M | 4-8 | 1-2 | 4-5 questions |
83
+ | L | 9-15 | multiple | 5-6 questions |
84
+ | XL | 16+ | cross-cutting | 6-8 questions |
85
+
86
+ 6. Store as `$COMPLEXITY` and `$INTERVIEW_DEPTH`.
87
+ 7. Output to user:
88
+ ```
89
+ Complexity: {$COMPLEXITY} — Interview depth: {$INTERVIEW_DEPTH} questions
90
+ ```
91
+
92
+ ## Step 7: Launch Nexus — developer interview
93
+
94
+ Launch Nexus agent to interview the developer before spec generation:
95
+
96
+ ```
97
+ You are Nexus. You are interviewing the developer about feature: {slug}
98
+ before the planning agents (Mestre, Clara, Pixel) generate their specs.
99
+
100
+ Your goal: surface decisions, constraints, and preferences that will
101
+ shape the plan. You are a FACILITATOR — you don't make decisions,
102
+ you help the developer make informed ones.
103
+
104
+ ## Context
105
+ ### REQUEST.md
106
+ {$REQUEST}
107
+
108
+ ### RESEARCH.md
109
+ {$RESEARCH}
110
+
111
+ ### Project Context
112
+ {$CONTEXT}
113
+
114
+ ### Complexity Assessment
115
+ Complexity: {$COMPLEXITY}
116
+ Interview depth: {$INTERVIEW_DEPTH} questions
117
+
118
+ ## Interview Protocol
119
+
120
+ ### Phase 1: Analyze Context (internal, no output)
121
+ 1. Read REQUEST.md and identify:
122
+ - Ambiguous requirements (multiple valid interpretations)
123
+ - Unstated assumptions
124
+ - Missing technical decisions
125
+ 2. Read RESEARCH.md and identify:
126
+ - Open questions flagged by Atlas/Scout
127
+ - Risks without clear mitigations
128
+ - Alternative approaches not yet chosen
129
+ - Contradictions between research findings
130
+ 3. Prioritize: rank discovered gaps by impact on plan quality
131
+ 4. Select top {$INTERVIEW_DEPTH} questions across categories
132
+
133
+ ### Phase 2: Interview (interactive)
134
+ Ask questions ONE AT A TIME using AskUserQuestion tool.
135
+
136
+ Rules:
137
+ - Each question MUST reference specific content from REQUEST or RESEARCH
138
+ - Provide 2-4 concrete options when possible (not vague open-ended)
139
+ - Include your recommendation as first option with "(Recommended)"
140
+ - After each answer, acknowledge briefly and ask the next question
141
+ - If an answer reveals NEW ambiguity, add a follow-up (within limit)
142
+ - Categories to cover (pick based on what's most impactful):
143
+
144
+ TECHNICAL APPROACH (at least 1 question):
145
+ - Architecture pattern choice
146
+ - Technology/library selection
147
+ - Integration strategy
148
+ - Error handling philosophy
149
+
150
+ SCOPE BOUNDARIES (at least 1 question):
151
+ - Must-have vs nice-to-have features
152
+ - Edge cases: in or out?
153
+ - MVP definition
154
+
155
+ TRADE-OFFS (if complexity >= L):
156
+ - Speed vs quality
157
+ - Simplicity vs flexibility
158
+ - Convention vs optimal
159
+
160
+ RISKS & CONSTRAINTS (if RESEARCH flags risks):
161
+ - Risk mitigation preference
162
+ - Deadline/dependency impacts
163
+ - Performance requirements
164
+
165
+ ### Phase 3: Compile
166
+ After all questions answered, compile the interview results using your
167
+ [Nexus — Developer Interview] output format.
168
+
169
+ Return the compiled interview content.
170
+
171
+ After the interview, append your activity to rpi/features/{slug}/ACTIVITY.md:
172
+
173
+ ### {current_date} — Nexus (Plan Interview)
174
+ - **Action:** Developer interview for {slug}
175
+ - **Key decisions:** {for each <decision> tag you emitted: "summary (rationale)", separated by semicolons. If none: "No decisions in this phase."}
176
+ - **Questions asked:** {count}
177
+ - **Quality:** {your quality gate result}
178
+ ```
179
+
180
+ Store the output as `$INTERVIEW`.
181
+
182
+ ## Step 8: Write INTERVIEW.md
183
+
184
+ 1. Ensure directory exists: `rpi/features/{slug}/plan/`
185
+ 2. Write `rpi/features/{slug}/plan/INTERVIEW.md` with `$INTERVIEW` content, using this format:
186
+
187
+ ```markdown
188
+ # Interview: {Feature Name}
189
+ Date: {current date}
190
+ Complexity: {$COMPLEXITY}
191
+ Questions: {N asked} / {$INTERVIEW_DEPTH planned}
192
+
193
+ {$INTERVIEW content organized by category:
194
+ - Technical Decisions (Q&A pairs with impact notes)
195
+ - Scope Boundaries (Q&A pairs with impact notes)
196
+ - Trade-offs (Q&A pairs with impact notes)
197
+ - Key Constraints Identified
198
+ - Open Items (flagged for agents)}
199
+
200
+ ## Resolved Contradictions
201
+ (Populated by Step 14-15)
202
+ ```
203
+
204
+ 3. Output to user:
205
+ ```
206
+ Interview saved: rpi/features/{slug}/plan/INTERVIEW.md ({N} questions)
207
+ ```
208
+
209
+ ## Step 9: Launch Mestre — first pass (eng.md)
210
+
211
+ Launch Mestre agent with this prompt:
212
+
213
+ ```
214
+ You are Mestre. Generate the engineering specification for feature: {slug}
215
+
216
+ ## Request
217
+ {$REQUEST}
218
+
219
+ ## Research
220
+ {$RESEARCH}
221
+
222
+ ## Project Context
223
+ {$CONTEXT}
224
+
225
+ ## Relevant Specs
226
+ {$RELEVANT_SPECS}
227
+
228
+ ## Developer Interview
229
+ {$INTERVIEW}
230
+
231
+ IMPORTANT: Your output MUST align with the developer's stated preferences
232
+ in the interview. If the developer chose approach X, use approach X.
233
+ If they marked something as out-of-scope, exclude it.
234
+ If an item is listed under "Open Items", use your best judgment but note your assumption.
235
+
236
+ Your task:
237
+ 1. Read the request and research findings carefully
238
+ 2. Make technical decisions: approach, architecture, patterns to follow
239
+ 3. Identify files to create, modify, and remove
240
+ 4. List architectural risks with mitigations
241
+ 5. Output using your eng.md format: [Mestre -- Engineering Specification]
242
+
243
+ Be pragmatic. Follow existing codebase patterns from context.md and research findings. No over-engineering.
244
+
245
+ After generating eng.md, append your activity to rpi/features/{slug}/ACTIVITY.md:
246
+
247
+ ### {current_date} — Mestre (Plan — eng.md)
248
+ - **Action:** Engineering specification for {slug}
249
+ - **Key decisions:** {for each <decision> tag you emitted: "summary (rationale)", separated by semicolons. If none: "No decisions in this phase."}
250
+ - **Architecture decisions:** {count}
251
+ - **Files planned:** {count create + modify}
252
+ - **Quality:** {your quality gate result}
253
+ ```
254
+
255
+ Store the output as `$ENG_OUTPUT`.
256
+
257
+ ## Step 10: Launch Clara — pm.md
258
+
259
+ Launch Clara agent with this prompt:
260
+
261
+ ```
262
+ You are Clara. Generate the product specification for feature: {slug}
263
+
264
+ ## Request
265
+ {$REQUEST}
266
+
267
+ ## Research
268
+ {$RESEARCH}
269
+
270
+ ## Project Context
271
+ {$CONTEXT}
272
+
273
+ ## Developer Interview
274
+ {$INTERVIEW}
275
+
276
+ IMPORTANT: Your output MUST align with the developer's stated preferences
277
+ in the interview. If the developer chose approach X, use approach X.
278
+ If they marked something as out-of-scope, exclude it.
279
+ If an item is listed under "Open Items", use your best judgment but note your assumption.
280
+
281
+ Your task:
282
+ 1. Define user stories with concrete acceptance criteria (Given/When/Then)
283
+ 2. Classify requirements: must-have, nice-to-have, out-of-scope
284
+ 3. Cut anything that doesn't map to the core problem in REQUEST.md
285
+ 4. Define success metrics
286
+ 5. Output using your pm.md format: [Clara -- Product Specification]
287
+
288
+ Be ruthless with scope. Every requirement must have acceptance criteria.
289
+
290
+ After generating pm.md, append your activity to rpi/features/{slug}/ACTIVITY.md:
291
+
292
+ ### {current_date} — Clara (Plan — pm.md)
293
+ - **Action:** Product specification for {slug}
294
+ - **Key decisions:** {for each <decision> tag you emitted: "summary (rationale)", separated by semicolons. If none: "No decisions in this phase."}
295
+ - **User stories:** {count}
296
+ - **Acceptance criteria:** {count}
297
+ - **Scope cuts:** {count of out-of-scope items}
298
+ - **Quality:** {your quality gate result}
299
+ ```
300
+
301
+ Store the output as `$PM_OUTPUT`.
302
+
303
+ ## Step 11: Launch Pixel — ux.md (conditional)
304
+
305
+ Only if `$RUN_PIXEL` is `true`:
306
+
307
+ Launch Pixel agent with this prompt:
308
+
309
+ ```
310
+ You are Pixel. Generate the UX specification for feature: {slug}
311
+
312
+ ## Request
313
+ {$REQUEST}
314
+
315
+ ## Research
316
+ {$RESEARCH}
317
+
318
+ ## Project Context
319
+ {$CONTEXT}
320
+
321
+ ## Engineering Specification
322
+ {$ENG_OUTPUT}
323
+
324
+ ## Developer Interview
325
+ {$INTERVIEW}
326
+
327
+ IMPORTANT: Your output MUST align with the developer's stated preferences
328
+ in the interview. If the developer chose approach X, use approach X.
329
+ If they marked something as out-of-scope, exclude it.
330
+ If an item is listed under "Open Items", use your best judgment but note your assumption.
331
+
332
+ Your task:
333
+ 1. Map the complete user flow from entry to completion
334
+ 2. Define all states: empty, loading, error, success, edge cases
335
+ 3. Identify accessibility requirements
336
+ 4. Consider responsive behavior
337
+ 5. Output using your ux.md format: [Pixel -- UX Specification]
338
+
339
+ Think from the user's perspective. If a flow needs a tooltip, the design failed.
340
+ ```
341
+
342
+ Store the output as `$UX_OUTPUT`.
343
+
344
+ If `$RUN_PIXEL` is `false`: set `$UX_OUTPUT` to `"No UX specification — no frontend detected."`.
345
+
346
+ ## Step 12: Launch Mestre — second pass (PLAN.md)
347
+
348
+ Launch Mestre agent to synthesize all specs into a concrete plan:
349
+
350
+ ```
351
+ You are Mestre. Generate the implementation plan (PLAN.md) for feature: {slug}
352
+
353
+ ## Engineering Specification
354
+ {$ENG_OUTPUT}
355
+
356
+ ## Product Specification
357
+ {$PM_OUTPUT}
358
+
359
+ ## UX Specification
360
+ {$UX_OUTPUT}
361
+
362
+ ## Request
363
+ {$REQUEST}
364
+
365
+ ## Research
366
+ {$RESEARCH}
367
+
368
+ ## Project Context
369
+ {$CONTEXT}
370
+
371
+ ## Developer Interview
372
+ {$INTERVIEW}
373
+
374
+ IMPORTANT: Your output MUST align with the developer's stated preferences
375
+ in the interview. If the developer chose approach X, use approach X.
376
+ If they marked something as out-of-scope, exclude it.
377
+ If an item is listed under "Open Items", use your best judgment but note your assumption.
378
+
379
+ Your task:
380
+ 1. Read all specifications and synthesize into numbered tasks
381
+ 2. Each task must have: effort estimate, file list, dependencies, test criteria
382
+ 3. Tasks must be small enough for one commit each
383
+ 4. Group tasks into phases where logical
384
+ 5. Include metadata: total tasks, total files, overall complexity
385
+ 6. Output using your PLAN.md format: [Mestre -- Implementation Plan]
386
+
387
+ Rules:
388
+ - Tasks are numbered (1.1, 1.2, 2.1, etc.)
389
+ - Every task lists exact files it touches
390
+ - Dependencies reference task IDs
391
+ - If Clara marked something as out-of-scope, don't create tasks for it
392
+ - If the developer interview decided on approach X, all tasks must use approach X
393
+ - If the developer marked something as out-of-scope, don't create tasks for it
394
+
395
+ After generating PLAN.md, append your activity to rpi/features/{slug}/ACTIVITY.md:
396
+
397
+ ### {current_date} — Mestre (Plan — PLAN.md)
398
+ - **Action:** Implementation plan for {slug}
399
+ - **Key decisions:** {for each <decision> tag you emitted: "summary (rationale)", separated by semicolons. If none: "No decisions in this phase."}
400
+ - **Tasks:** {count}
401
+ - **Complexity:** {S|M|L|XL}
402
+ - **Quality:** {your quality gate result}
403
+ ```
404
+
405
+ Store the output as `$PLAN_OUTPUT`.
406
+
407
+ ## Step 13: Mestre generates delta specs
408
+
409
+ Launch Mestre agent to create delta specifications:
410
+
411
+ ```
412
+ You are Mestre. Generate delta specs for feature: {slug}
413
+
414
+ ## Implementation Plan
415
+ {$PLAN_OUTPUT}
416
+
417
+ ## Engineering Specification
418
+ {$ENG_OUTPUT}
419
+
420
+ ## Relevant Current Specs
421
+ {$RELEVANT_SPECS}
422
+
423
+ ## Developer Interview
424
+ {$INTERVIEW}
425
+
426
+ IMPORTANT: Your output MUST align with the developer's stated preferences
427
+ in the interview. If the developer chose approach X, use approach X.
428
+ If they marked something as out-of-scope, exclude it.
429
+ If an item is listed under "Open Items", use your best judgment but note your assumption.
430
+
431
+ Your task:
432
+ 1. Based on the plan, determine what specs need to change
433
+ 2. For each new system component: create a spec in delta/ADDED/
434
+ 3. For each existing spec that changes: create the updated version in delta/MODIFIED/
435
+ 4. For any spec that becomes obsolete: create a marker in delta/REMOVED/
436
+ 5. Delta specs capture ONLY what changes — not the entire system
437
+
438
+ Output the list of delta specs you will create, with their paths:
439
+ - delta/ADDED/{name}.md — {description}
440
+ - delta/MODIFIED/{name}.md — {description}
441
+ - delta/REMOVED/{name}.md — {description}
442
+
443
+ Then write each spec file.
444
+ ```
445
+
446
+ ## Step 14: Launch Nexus — adversarial review + developer resolution
447
+
448
+ Launch Nexus agent to perform adversarial review of all plan artifacts:
449
+
450
+ ```
451
+ You are Nexus. You are performing ADVERSARIAL REVIEW of the plan
452
+ artifacts for feature: {slug}
453
+
454
+ Your mandate: You MUST find problems. "Looks good" is NOT acceptable.
455
+ If you cannot find real issues, you must document WHY the plan is
456
+ unusually solid — but never rubber-stamp.
457
+
458
+ ## Artifacts to Review
459
+ ### Engineering Specification (Mestre)
460
+ {$ENG_OUTPUT}
461
+
462
+ ### Product Specification (Clara)
463
+ {$PM_OUTPUT}
464
+
465
+ ### UX Specification (Pixel)
466
+ {$UX_OUTPUT}
467
+
468
+ ### Implementation Plan (Mestre)
469
+ {$PLAN_OUTPUT}
470
+
471
+ ### Developer Interview
472
+ {$INTERVIEW}
473
+
474
+ ### Original Request
475
+ {$REQUEST}
476
+
477
+ ### Research Findings
478
+ {$RESEARCH}
479
+
480
+ ## Adversarial Analysis Protocol
481
+
482
+ ### Pass 1: Cross-Artifact Contradictions
483
+ Check every pair of artifacts for conflicts:
484
+ - eng.md vs pm.md: Do technical decisions satisfy all acceptance criteria?
485
+ - eng.md vs ux.md: Does the architecture support all UI states/flows?
486
+ - pm.md vs PLAN.md: Does every must-have requirement have tasks?
487
+ - pm.md scope vs PLAN.md tasks: Are out-of-scope items sneaking in?
488
+ - PLAN.md vs INTERVIEW.md: Do tasks reflect developer's stated preferences?
489
+
490
+ ### Pass 2: Assumption Challenges
491
+ For each major decision in eng.md, ask:
492
+ - "What if this assumption is wrong?"
493
+ - "What's the blast radius if this fails?"
494
+ - "Is there a simpler approach nobody considered?"
495
+
496
+ ### Pass 3: Coverage Gaps
497
+ - Requirements without tasks
498
+ - Tasks without test criteria
499
+ - Files mentioned but not in any task
500
+ - UI states without error handling
501
+ - Happy path only (missing edge cases)
502
+
503
+ ### Pass 4: Hidden Complexity
504
+ - Tasks estimated as S that touch >3 files
505
+ - Dependencies that create serial bottlenecks
506
+ - Integration points without error handling
507
+ - Data migrations without rollback plan
508
+
509
+ ### Pass 5: REQUEST Drift
510
+ - Compare final PLAN.md against original REQUEST.md
511
+ - Has scope crept? Has the core problem shifted?
512
+ - Would the developer recognize this as what they asked for?
513
+
514
+ ## Output Format
515
+ For each issue found, output using your [Nexus — Adversarial Review] format.
516
+
517
+ ## Developer Resolution Protocol
518
+ After completing all passes:
519
+ 1. Count issues by severity
520
+ 2. CRITICAL issues: present one at a time via AskUserQuestion with suggested resolutions as options
521
+ 3. HIGH issues: present as batch via AskUserQuestion, let developer pick which to address
522
+ 4. MEDIUM/LOW issues: present summary, developer can dismiss or address
523
+ 5. For each resolved issue: note the chosen resolution and which artifacts need patching
524
+ 6. Return the full adversarial review with all resolutions noted
525
+
526
+ After adversarial review, append your activity to rpi/features/{slug}/ACTIVITY.md:
527
+
528
+ ### {current_date} — Nexus (Plan Adversarial Review)
529
+ - **Action:** Adversarial review for {slug}
530
+ - **Key decisions:** {for each <decision> tag you emitted: "summary (rationale)", separated by semicolons. If none: "No decisions in this phase."}
531
+ - **Issues found:** {count by severity}
532
+ - **Contradictions resolved:** {count}
533
+ - **Coherence status:** {PASS|PASS with notes|NEEDS re-plan}
534
+ - **Quality:** {your quality gate result}
535
+ ```
536
+
537
+ Store the output as `$ADVERSARIAL_REVIEW`.
538
+
539
+ If Nexus found CRITICAL issues that the developer could not resolve:
540
+ ```
541
+ Adversarial review found unresolvable issues. Consider re-running:
542
+ /rpi:plan {slug} --force
543
+ ```
544
+ Stop.
545
+
546
+ ## Step 15: Nexus patches artifacts
547
+
548
+ If `$ADVERSARIAL_REVIEW` contains resolved issues:
549
+
550
+ 1. For each resolved issue in `$ADVERSARIAL_REVIEW`:
551
+ - Identify which artifacts need changes (eng.md, pm.md, ux.md, PLAN.md)
552
+ - Apply surgical edits to `$ENG_OUTPUT`, `$PM_OUTPUT`, `$UX_OUTPUT`, or `$PLAN_OUTPUT` as needed
553
+ - Track the patch: add `<!-- Patched: {issue title} — {resolution chosen} -->` as comment near the change
554
+ 2. Update `$INTERVIEW` content: append resolved contradictions to the `## Resolved Contradictions` section:
555
+ ```
556
+ ### C{N}: {issue title}
557
+ **Severity:** {severity}
558
+ **Resolution:** {developer's chosen option}
559
+ **Artifacts patched:** {list of affected artifacts and sections}
560
+ ```
561
+ 3. Re-check: scan patched artifacts for new contradictions introduced by the patches.
562
+ - If new contradictions found: present to developer via AskUserQuestion and patch again.
563
+ - If clean: proceed.
564
+ 4. Update `rpi/features/{slug}/plan/INTERVIEW.md` with the patched version of `$INTERVIEW`.
565
+
566
+ ## Step 16: Write all artifacts
567
+
568
+ 1. Ensure directory exists: `rpi/features/{slug}/plan/`
569
+ 2. The file `rpi/features/{slug}/plan/INTERVIEW.md` was already written in Step 8 and updated in Step 15.
570
+ 3. Write `rpi/features/{slug}/plan/eng.md` with `$ENG_OUTPUT`
571
+ 4. Write `rpi/features/{slug}/plan/pm.md` with `$PM_OUTPUT`
572
+ 5. If `$RUN_PIXEL` is `true`: write `rpi/features/{slug}/plan/ux.md` with `$UX_OUTPUT`
573
+ 6. Write `rpi/features/{slug}/plan/PLAN.md` with `$PLAN_OUTPUT`
574
+ 7. Ensure delta directories exist:
575
+ ```bash
576
+ mkdir -p rpi/features/{slug}/delta/ADDED
577
+ mkdir -p rpi/features/{slug}/delta/MODIFIED
578
+ mkdir -p rpi/features/{slug}/delta/REMOVED
579
+ ```
580
+ 8. Write delta spec files from Step 13 into the appropriate delta subdirectories.
581
+
582
+ ## Step 17: Consolidate decisions to DECISIONS.md
583
+
584
+ 1. Read `rpi/features/{slug}/ACTIVITY.md`.
585
+ 2. Extract all `<decision>` tags from entries belonging to the Plan phase (Nexus interview, Mestre eng.md, Clara, Mestre PLAN.md, Nexus adversarial entries).
586
+ 3. If no decisions found, skip this step.
587
+ 4. Read `rpi/features/{slug}/DECISIONS.md` if it exists (to get the last decision number for sequential numbering).
588
+ 5. Append a new section to `rpi/features/{slug}/DECISIONS.md`:
589
+
590
+ ```markdown
591
+ ## Plan Phase
592
+ _Generated: {current_date}_
593
+
594
+ | # | Type | Decision | Alternatives | Rationale | Impact |
595
+ |---|------|----------|-------------|-----------|--------|
596
+ | {N} | {type} | {summary} | {alternatives} | {rationale} | {impact} |
597
+ ```
598
+
599
+ 6. Number decisions sequentially, continuing from the last number in DECISIONS.md.
600
+
601
+ ## Step 18: Output summary
602
+
603
+ ```
604
+ Plan complete: rpi/features/{slug}/plan/
605
+
606
+ Artifacts:
607
+ - plan/INTERVIEW.md (Nexus — developer interview)
608
+ - plan/eng.md (Mestre — engineering spec)
609
+ - plan/pm.md (Clara — product spec)
610
+ - plan/ux.md (Pixel — UX spec) ← only if frontend
611
+ - plan/PLAN.md (Mestre — implementation tasks)
612
+ - delta/ADDED/ ({N} new specs)
613
+ - delta/MODIFIED/ ({N} updated specs)
614
+ - delta/REMOVED/ ({N} removed specs)
615
+
616
+ Tasks: {N} | Files: {N} | Complexity: {$COMPLEXITY}
617
+ Interview: {N} questions asked, {N} contradictions resolved
618
+ Coherence: {Nexus adversarial verdict}
619
+
620
+ Next: /rpi {slug}
621
+ Or explicitly: /rpi:implement {slug}
622
+ ```
623
+ """