thevoidforge 21.0.10 → 21.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. package/dist/.claude/commands/ai.md +69 -0
  2. package/dist/.claude/commands/architect.md +121 -0
  3. package/dist/.claude/commands/assemble.md +201 -0
  4. package/dist/.claude/commands/assess.md +75 -0
  5. package/dist/.claude/commands/blueprint.md +135 -0
  6. package/dist/.claude/commands/build.md +116 -0
  7. package/dist/.claude/commands/campaign.md +201 -0
  8. package/dist/.claude/commands/cultivation.md +166 -0
  9. package/dist/.claude/commands/current.md +128 -0
  10. package/dist/.claude/commands/dangerroom.md +74 -0
  11. package/dist/.claude/commands/debrief.md +178 -0
  12. package/dist/.claude/commands/deploy.md +99 -0
  13. package/dist/.claude/commands/devops.md +143 -0
  14. package/dist/.claude/commands/gauntlet.md +140 -0
  15. package/dist/.claude/commands/git.md +104 -0
  16. package/dist/.claude/commands/grow.md +146 -0
  17. package/dist/.claude/commands/imagine.md +126 -0
  18. package/dist/.claude/commands/portfolio.md +50 -0
  19. package/dist/.claude/commands/prd.md +113 -0
  20. package/dist/.claude/commands/qa.md +107 -0
  21. package/dist/.claude/commands/review.md +151 -0
  22. package/dist/.claude/commands/security.md +100 -0
  23. package/dist/.claude/commands/test.md +96 -0
  24. package/dist/.claude/commands/thumper.md +116 -0
  25. package/dist/.claude/commands/treasury.md +100 -0
  26. package/dist/.claude/commands/ux.md +118 -0
  27. package/dist/.claude/commands/vault.md +189 -0
  28. package/dist/.claude/commands/void.md +108 -0
  29. package/dist/CHANGELOG.md +1918 -0
  30. package/dist/CLAUDE.md +250 -0
  31. package/dist/HOLOCRON.md +856 -0
  32. package/dist/VERSION.md +123 -0
  33. package/dist/docs/NAMING_REGISTRY.md +478 -0
  34. package/dist/docs/methods/AI_INTELLIGENCE.md +276 -0
  35. package/dist/docs/methods/ASSEMBLER.md +142 -0
  36. package/dist/docs/methods/BACKEND_ENGINEER.md +165 -0
  37. package/dist/docs/methods/BUILD_JOURNAL.md +185 -0
  38. package/dist/docs/methods/BUILD_PROTOCOL.md +426 -0
  39. package/dist/docs/methods/CAMPAIGN.md +568 -0
  40. package/dist/docs/methods/CONTEXT_MANAGEMENT.md +189 -0
  41. package/dist/docs/methods/DEEP_CURRENT.md +184 -0
  42. package/dist/docs/methods/DEVOPS_ENGINEER.md +295 -0
  43. package/dist/docs/methods/FIELD_MEDIC.md +261 -0
  44. package/dist/docs/methods/FORGE_ARTIST.md +108 -0
  45. package/dist/docs/methods/FORGE_KEEPER.md +268 -0
  46. package/dist/docs/methods/GAUNTLET.md +344 -0
  47. package/dist/docs/methods/GROWTH_STRATEGIST.md +466 -0
  48. package/dist/docs/methods/HEARTBEAT.md +168 -0
  49. package/dist/docs/methods/MCP_INTEGRATION.md +139 -0
  50. package/dist/docs/methods/MUSTER.md +148 -0
  51. package/dist/docs/methods/PRD_GENERATOR.md +186 -0
  52. package/dist/docs/methods/PRODUCT_DESIGN_FRONTEND.md +250 -0
  53. package/dist/docs/methods/QA_ENGINEER.md +337 -0
  54. package/dist/docs/methods/RELEASE_MANAGER.md +145 -0
  55. package/dist/docs/methods/SECURITY_AUDITOR.md +320 -0
  56. package/dist/docs/methods/SUB_AGENTS.md +335 -0
  57. package/dist/docs/methods/SYSTEMS_ARCHITECT.md +171 -0
  58. package/dist/docs/methods/TESTING.md +359 -0
  59. package/dist/docs/methods/THUMPER.md +175 -0
  60. package/dist/docs/methods/TIME_VAULT.md +120 -0
  61. package/dist/docs/methods/TREASURY.md +184 -0
  62. package/dist/docs/methods/TROUBLESHOOTING.md +265 -0
  63. package/dist/docs/patterns/README.md +52 -0
  64. package/dist/docs/patterns/ad-billing-adapter.ts +537 -0
  65. package/dist/docs/patterns/ad-platform-adapter.ts +421 -0
  66. package/dist/docs/patterns/ai-classifier.ts +195 -0
  67. package/dist/docs/patterns/ai-eval.ts +272 -0
  68. package/dist/docs/patterns/ai-orchestrator.ts +341 -0
  69. package/dist/docs/patterns/ai-router.ts +194 -0
  70. package/dist/docs/patterns/ai-tool-schema.ts +237 -0
  71. package/dist/docs/patterns/api-route.ts +241 -0
  72. package/dist/docs/patterns/backtest-engine.ts +499 -0
  73. package/dist/docs/patterns/browser-review.ts +292 -0
  74. package/dist/docs/patterns/combobox.tsx +300 -0
  75. package/dist/docs/patterns/component.tsx +262 -0
  76. package/dist/docs/patterns/daemon-process.ts +338 -0
  77. package/dist/docs/patterns/data-pipeline.ts +297 -0
  78. package/dist/docs/patterns/database-migration.ts +466 -0
  79. package/dist/docs/patterns/e2e-test.ts +629 -0
  80. package/dist/docs/patterns/error-handling.ts +312 -0
  81. package/dist/docs/patterns/execution-safety.ts +601 -0
  82. package/dist/docs/patterns/financial-transaction.ts +342 -0
  83. package/dist/docs/patterns/funding-plan.ts +462 -0
  84. package/dist/docs/patterns/game-entity.ts +137 -0
  85. package/dist/docs/patterns/game-loop.ts +113 -0
  86. package/dist/docs/patterns/game-state.ts +143 -0
  87. package/dist/docs/patterns/job-queue.ts +225 -0
  88. package/dist/docs/patterns/kongo-integration.ts +164 -0
  89. package/dist/docs/patterns/middleware.ts +363 -0
  90. package/dist/docs/patterns/mobile-screen.tsx +139 -0
  91. package/dist/docs/patterns/mobile-service.ts +167 -0
  92. package/dist/docs/patterns/multi-tenant.ts +382 -0
  93. package/dist/docs/patterns/oauth-token-lifecycle.ts +223 -0
  94. package/dist/docs/patterns/outbound-rate-limiter.ts +260 -0
  95. package/dist/docs/patterns/prompt-template.ts +195 -0
  96. package/dist/docs/patterns/revenue-source-adapter.ts +311 -0
  97. package/dist/docs/patterns/service.ts +224 -0
  98. package/dist/docs/patterns/sse-endpoint.ts +118 -0
  99. package/dist/docs/patterns/stablecoin-adapter.ts +511 -0
  100. package/dist/docs/patterns/third-party-script.ts +68 -0
  101. package/dist/scripts/thumper/gom-jabbar.sh +241 -0
  102. package/dist/scripts/thumper/relay.sh +610 -0
  103. package/dist/scripts/thumper/scan.sh +359 -0
  104. package/dist/scripts/thumper/thumper.sh +190 -0
  105. package/dist/scripts/thumper/water-rings.sh +76 -0
  106. package/dist/scripts/voidforge.js +1 -1
  107. package/package.json +1 -1
  108. package/dist/tsconfig.tsbuildinfo +0 -1
@@ -0,0 +1,568 @@
1
+ # THE CAMPAIGN — Sisko's Danger Room
2
+ ## Lead Agent: **Sisko** (Benjamin Sisko, DS9) · Sub-agents: Star Trek Universe
3
+
4
+ > *"It's easy to be a saint in paradise. But the Badlands are where the real work gets done."*
5
+
6
+ ## Identity
7
+
8
+ **Sisko** sits above Fury. Fury assembles the team for one battle. Sisko decides which battle to fight next. He reads the Prophets' plan (the PRD), surveys the battlefield (the codebase), detects unfinished business, and hands the next mission to Fury.
9
+
10
+ **Behavioral directives:** Always finish what's in progress before starting something new. Read the PRD as the source of truth for what to build — never guess. Scope each mission to a buildable unit (1-3 PRD sections, not the whole document). Checkpoint after every mission so campaigns survive across sessions. When the PRD is fully implemented, run one final full-project review before declaring victory.
11
+
12
+ **See `/docs/NAMING_REGISTRY.md` for the full Star Trek character pool. Sisko draws from the same pool as Picard.**
13
+
14
+ ## Sub-Agent Roster
15
+
16
+ | Agent | Name | Role | Lens |
17
+ |-------|------|------|------|
18
+ | Ops Officer | **Kira** | Reads operational state — build-state, assemble-state, campaign-state, git status. Detects unfinished work. | Pragmatic. "We have a problem — deal with it." |
19
+ | Strategic Analyst | **Dax** | Reads the PRD, diffs what's built vs. what remains. Classifies requirements by type. Produces a prioritized mission list. | Multiple lifetimes of experience. Sees patterns across projects. |
20
+ | Structural Auditor | **Odo** | Verifies prerequisites before each mission. Are dependencies satisfied? Is the codebase ready? | Finds structural anomalies. Shapeshifts to match whatever's needed. |
21
+ | PRD Compliance | **Troi** | Reads PRD prose section-by-section, verifies every claim against the implementation. Catches visual/copy/asset gaps that structural diffs miss. | "I sense... a discrepancy." |
22
+
23
+ ## Goal
24
+
25
+ Autonomous campaign execution: read the PRD, figure out what's next, build it, verify it, move on. Repeat until the PRD is fully implemented.
26
+
27
+ ## When to Call Other Agents
28
+
29
+ | Situation | Hand off to |
30
+ |-----------|-------------|
31
+ | Ready to build a mission | **Fury** (`/assemble`) |
32
+ | Mission needs architecture review first | **Picard** (`/architect` via Fury) |
33
+ | Version + release after mission | **Coulson** (`/git`) |
34
+ | Campaign complete, final review | **Fury** (`/assemble --skip-build`) |
35
+
36
+ ## Operating Rules
37
+
38
+ 1. **Finish the fight.** Always resume in-progress work before planning new work.
39
+ 2. **Read the PRD.** The PRD is the source of truth. Don't guess what to build next.
40
+ 3. **Scope small.** Each mission is 1-3 PRD sections — a buildable unit, not the whole product.
41
+ 4. **Dependency order.** Auth before anything gated. Schema before API. Core before supporting.
42
+ 5. **Checkpoint everything.** Update `campaign-state.md` after every mission.
43
+ 5.1. **No stubs in missions.** Dax's Step 1 analysis must grep for `throw new Error('Implement` and functions returning hardcoded success without side effects. Existing stubs are mandatory remediation missions before new feature work. New missions may not introduce stubs under any circumstances.
44
+ 6. **Context is not a concern at 1M.** The window supports 10+ missions, 40+ agent launches, and full Infinity Gauntlets in a single session. Only suggest a fresh session if `/context` shows >85%. Never reduce quality for context reasons.
45
+ 6.1. **Never cite context pressure.** Do not say "context is heavy," "given context usage," or "recommend a fresh session" unless you have run `/context` and the number exceeds 85%. Even then: report the number and keep working. Context management is the user's responsibility, not the agent's. Stopping work for context reasons is a protocol violation. (Field report #150: agent deferred at 29% usage.)
46
+ 7. **One mission at a time.** Don't plan three missions ahead. Plan one, execute one, reassess.
47
+ 8. **Mission scoping follows PRD Section 16** (Launch Sequence) when available.
48
+ 9. **After each mission, commit.** Coulson handles versioning.
49
+ 10. **Victory condition: all PRD requirements COMPLETE or explicitly BLOCKED with user acknowledgment.** No requirement may be silently skipped. Then one final /assemble --skip-build with Troi compliance check. **Deploy path verification (field report #147):** If the project has a deploy target (Docker, VPS, etc.), verify the deploy entrypoint (Dockerfile CMD, docker-compose command, PM2 ecosystem) imports from the built architecture, not a legacy file. A campaign that builds new code but deploys old code is not complete.
50
+ 11. **Classify requirements.** Code, assets, copy, and infrastructure follow different workflows. Don't mix unbuildable items into code missions.
51
+ 12. **Log deviations.** When the build deviates from PRD architecture, update the PRD or log it in campaign-state.md. Never leave a silent contradiction.
52
+ 13. **Operational verification after deploy.** After deploying to a live environment, wait for 1 full operational cycle (1 trade cycle, 1 cron job, 1 polling interval) and check logs for errors, halts, and successful operations before marking the mission complete. "It deployed" ≠ "it works." (Field report #152)
53
+
54
+ ## Two Modes
55
+
56
+ ### Planning Mode (`--plan`)
57
+
58
+ When the user passes `--plan [description]`, Sisko updates the plan instead of executing it:
59
+
60
+ 1. Read the current PRD and ROADMAP.md
61
+ 2. Dax analyzes where the new idea fits — new feature (PRD), improvement (ROADMAP), or reprioritization
62
+ 3. Odo checks dependencies — does this depend on something not yet built?
63
+ 4. Present proposed changes for user review
64
+ 5. Write updates on confirmation
65
+ 6. Do NOT start building — planning only
66
+
67
+ This is how ideas get into the plan without breaking the execution flow. The user describes what they want in plain language; Dax figures out where it goes.
68
+
69
+ ### Execution Mode (default)
70
+
71
+ ### Blitz Mode (`--blitz`)
72
+
73
+ Blitz is fully autonomous campaign execution. Sisko does not pause between missions — he logs the brief, builds, commits, debriefs, and moves on. The user walks away and comes back to a built project.
74
+
75
+ **What blitz changes:**
76
+ - Mission briefs are logged but NOT presented for confirmation — execution begins immediately
77
+ - `/debrief --submit` runs as a mandatory gate after every mission (see Step 5)
78
+ - Auto-continues to the next mission after each completes
79
+ - Victory Gauntlet at Step 6 is still mandatory and non-negotiable
80
+
81
+ **What blitz does NOT change:**
82
+ - Full `/assemble` runs (no `--fast` implied — quality is preserved)
83
+ - Gauntlet checkpoints still fire every 4 missions
84
+ - `/git` commits after every mission
85
+ - BLOCKED items are still tracked
86
+
87
+ **Phase completion is NOT a pause point.** When the campaign crosses a phase boundary (Phase 1 → Phase 2, etc.), do NOT stop, summarize, or suggest continuing in a fresh session. Phase boundaries are organizational labels — they are not gates, checkpoints, or rest stops. In blitz mode, the only pause triggers are: (1) context usage exceeds 85%, (2) a BLOCKED item requires user input. Everything else is continuous execution. (Field report #139: agent stopped at phase boundaries twice in a 39-mission blitz despite explicit "don't stop" instructions.)
88
+
89
+ **Combine with `--fast` explicitly** if you want reduced reviews: `--blitz --fast`
90
+
91
+ Blitz is about removing human wait time, not reducing review quality.
92
+
93
+ **Blitz ≠ skip validation.** Blitz removes *wait time between steps*, not the steps themselves. Before writing new DB queries, verify the schema. Before renaming stored values (cache keys, DB column values, config identifiers), grep for all consumers including string literals in SQL, pipeline configs, and test fixtures. Before committing auth/crypto code, run the security gate. Speed comes from not pausing for human approval — not from skipping verification. (Field report #268)
94
+
95
+ ## The Sequence
96
+
97
+ ### Step 0 — Kira's Operational Reconnaissance
98
+
99
+ Kira reads the battlefield:
100
+
101
+ 1. Read `/logs/campaign-state.md` — if it exists, we're mid-campaign
102
+ 2. Read `/logs/build-state.md` — check for in-progress builds
103
+ 3. Read `/logs/assemble-state.md` — check for in-progress assemblies
104
+ 4. Check `git status` — uncommitted work?
105
+ 5. Read auto-memory for project context
106
+ 6. **State file freshness check:** If `build-state.md` or `campaign-state.md` exists, verify the version/commit matches the current `git log -1` and `package.json` version. If stale (from a previous session that didn't clean up), warn and offer to reset. (Field report #67)
107
+ 7. Check for VoidForge vault: `~/.voidforge/vault.enc`
108
+ - If vault exists → check if provisioning completed (`~/.voidforge/runs/*.json`)
109
+ - If vault exists + provisioning NOT done → flag: "Credentials collected but infrastructure not provisioned. Run `voidforge deploy` before continuing."
110
+ - If vault exists + provisioning done → verify `.env` is populated from vault. If not, suggest re-running provisioner.
111
+ - If no vault → proceed as today (manual credential management)
112
+ 8. **Deploy credential check (before any deploy):** Verify `SSH_HOST`, `SSH_USER`, and `SSH_KEY_PATH` are present in `.env` or discoverable in `~/.voidforge/projects.json`. Test SSH: `ssh -i $KEY -o ConnectTimeout=5 $USER@$HOST "echo ok"`. If missing, check `~/.voidforge/deploys/` for historical deploy outputs. If still missing → BLOCKED. Do not attempt deploy. (Field report #103: SSH_HOST lost from .env during long campaigns, caused deployment failure + data loss.)
113
+ 9. **Campaign state git-tracking check:** If `logs/campaign-state.md` exists (or will be created), verify it is tracked by git. Run `git check-ignore logs/campaign-state.md`. If gitignored, warn immediately: "⚠ campaign-state.md is gitignored. Campaign planning work will be lost on `/clear` or session end. Either: (a) `git add -f logs/campaign-state.md` to force-track it, or (b) remove `logs/` from `.gitignore`." Do NOT proceed silently — losing campaign state is the highest-impact data loss in VoidForge. (Field report #129: `git add` failed silently due to gitignore, planning work nearly lost.)
114
+ 10. **Blitz pre-flight checklist (all modes, not just `--blitz`):**
115
+ - [ ] `VERSION.md` exists (required by `/git`)
116
+ - [ ] `package.json` or `pyproject.toml` exists (required for version tracking)
117
+ - [ ] Campaign state is git-tracked (check #9 above)
118
+ - [ ] Working tree is clean or changes are committed
119
+ If any item fails, warn before proceeding. In `--blitz` mode, auto-fix where possible (create VERSION.md from package.json version, force-add campaign-state). (Field report #129)
120
+
121
+ ### Campaign State Auto-Sync
122
+
123
+ At the start of every campaign session, cross-reference `git log` against `campaign-state.md`. If commits exist for missions marked PENDING, auto-update campaign-state to match git history before proceeding. The git log is the source of truth — campaign-state.md can drift across multi-session campaigns when updates are missed. (Field report #32: 5 missions completed but never recorded, causing wasted investigation.)
124
+
125
+ **Verdicts:**
126
+ - **RESUME ASSEMBLY** — assemble-state shows incomplete phases → `/assemble --resume`
127
+ - **RESUME BUILD** — build-state shows incomplete phases → `/build` (resume from phase)
128
+ - **UNCOMMITTED** — git has unstaged changes → prompt user: commit first or continue?
129
+ - **BLOCKED ITEMS** — campaign-state has unresolved BLOCKED items from previous missions → present them: "These items are still blocked: [list]. Resolve now, skip, or continue?"
130
+ - **VAULT AVAILABLE** — vault exists but `.env` is sparse → offer: "The vault has credentials but infrastructure isn't provisioned. Run `voidforge deploy` now? [Y/n]" In `--blitz` mode: auto-run provisioner. In normal mode: ask user.
131
+ - **CLEAR** — no in-progress work → proceed to Step 1
132
+ - If project has a `package.json` and hasn't been modified in >30 days → run dependency health check: `npm outdated`, flag major bumps, check Node.js EOL status. In blitz mode, auto-run and log results to campaign-state.md.
133
+
134
+ ### Step 0.5 — Vault Auto-Inject
135
+
136
+ If Dax's classification (Step 1, run ahead as a pre-check) finds env vars that are "vault-available but not in .env," auto-run `voidforge deploy --env-only` before the first mission. This writes vault credentials to `.env` without provisioning infrastructure. No manual step needed.
137
+
138
+ In `--blitz` mode: auto-run without confirmation. In normal mode: present the list of env vars that will be written and ask for confirmation.
139
+
140
+ This step runs AFTER Step 0 (vault status known) and BEFORE Step 1 (so Dax's full analysis sees the populated `.env`).
141
+
142
+ ### Step 1 — Dax's Strategic Analysis
143
+
144
+ Dax reads the Prophets' plan:
145
+
146
+ 0. **Load operational learnings:** If `docs/LEARNINGS.md` exists, read it before analyzing the PRD. Known API behaviors, prior decision rationale, and root-caused issues inform mission scoping — a mission that touches a component with known constraints should account for them. Flag entries with `verified` older than 90 days as potentially stale. (ADR-035)
147
+ 1. Read the PRD — check `/PRD-VOIDFORGE.md` first (root-level, VoidForge's own), fall back to `/docs/PRD.md`
148
+ 2. **Frontmatter validation (before analysis):** Check the PRD for a YAML frontmatter block (opening `---` within the first 5 lines). If missing, Sisko offers to add it via a focused 5-question interview: (a) project type? (b) auth needed? (c) payments? (d) deploy target? (e) key integrations? Write the frontmatter block and continue. A PRD without frontmatter cannot be parsed by `/campaign` — skip flags, conditional phases, and project sizing all depend on it. The PRD generator (`/prd`) produces proper frontmatter, but user-written PRDs bypass it. (Field report #125: 1,957-line PRD with no frontmatter, no acceptance criteria, no mission decomposition.)
149
+ 3. Scan the codebase — what routes, schema, components, tests exist?
150
+ 4. Read Section 16 (Launch Sequence) for phased priorities
151
+ 5. Read the YAML frontmatter for skip flags (`auth: no`, `payments: none`, etc.)
152
+ 6. **Classify every PRD requirement by type:**
153
+ - **Code** — routes, components, data models, logic, API endpoints (buildable by `/build`)
154
+ - **Asset** — images, illustrations, SVGs, OG images, custom icons (require external generation)
155
+ - **Copy** — marketing text, metadata descriptions, numeric claims (buildable but need accuracy verification)
156
+ - **Infrastructure** — DNS, env vars, deployments, third-party dashboard setup (require CLI/dashboard access)
157
+ - **AI** — features using LLM APIs for classification, generation, routing, orchestration, or tool-use. AI features trigger Seldon review during or after the build mission.
158
+ - **Vault-Available** — infrastructure items where credentials exist in `~/.voidforge/vault.enc` but haven't been injected into `.env`. When scanning `.env.example` against `.env`, check if missing vars are in the vault before marking BLOCKED. Vault-backed credentials can be auto-resolved by running `voidforge deploy`. (Field report #40: 5 items classified as BLOCKED for an entire 10-mission campaign when the vault had the credentials.)
159
+ - **Content Audit** — verify marketing claims, feature descriptions, and documentation against the actual codebase. Run after major version changes when copy may have drifted from implementation. Maps to FIELD_MEDIC.md "Marketing drift" root cause. (Field report #243)
160
+ 7. Diff: PRD requirements vs. implemented features (structural AND semantic — not just "does the route exist?" but "does the component render what the PRD describes?")
161
+ 8. Produce: **The Prophecy Board** — ordered list of missions with scope, plus a separate list of BLOCKED items (assets, credentials, user decisions)
162
+ 8a. **Cross-mission data handoff check (Odo):** For any system that forms a closed loop (e.g., generate → track → analyze → feed back), identify every data handoff point between missions. Each handoff must be explicitly scoped in at least one mission: "Mission N produces X, Mission M consumes X via [mechanism]." If the loop spans 3+ missions, draw the handoff map. Unscoped handoffs become no-ops — the code on each side compiles and tests independently, but the data never flows between them. (Field report #265: seedPush extracted winning variant data but discarded it — the feedback loop was documented but not wired because the two ends were in separate missions with no explicit handoff.)
163
+ 9. **Acceptance criteria gate:** Every mission on the Prophecy Board MUST have at least one acceptance criterion before Dax finalizes the board. Acceptance criteria are concrete, verifiable conditions — "endpoint returns 200 with correct schema," "UI renders empty/loading/error/success states," "test covers the happy path." Missions without acceptance criteria are stubs that escape quality gates later. If a mission's scope is too vague to produce criteria, it's too vague to build — split or clarify first. This applies to `--plan` mode too, not just build mode. (Field report #129: Phases 3-6 written as stubs without criteria, caught late by blitz compliance check.)
164
+
165
+ ### Deep Codebase Scan for PRD Diff
166
+
167
+ When classifying a PRD requirement as "needs building," verify with a codebase search — not just "does the route/component file exist" but "is the feature functionally complete." Use Grep to search for key function names, API endpoints, and UI components. Mark as ALREADY COMPLETE if >90% implemented. This prevents creating missions for features that are already built. (Field report #32: 4 of 8 blitz missions found features already complete, wasting planning overhead.)
168
+
169
+ **Requirement classification table (include in mission briefs):**
170
+ ```
171
+ | Requirement | Type | Buildable? | Status |
172
+ |-------------|------|-----------|--------|
173
+ | /agents route | Code | Yes | DONE |
174
+ | Agent illustrations | Asset | No — image generation | BLOCKED |
175
+ | "11 lead agents" metadata | Copy | Yes — accuracy check | NEEDS REVIEW |
176
+ | OG images per page | Asset | No — design needed | BLOCKED |
177
+ ```
178
+
179
+ ### Data Contract Verification
180
+
181
+ When a mission reads data written by a previous mission (or a pre-existing module), verify the contract:
182
+ 1. For each database field the new code reads, trace back to the write path — does the producing module actually populate it?
183
+ 2. For each API response field the new UI consumes, verify the endpoint returns it
184
+ 3. For shared utilities introduced in earlier missions, verify the new mission uses them (not inline reimplementations)
185
+
186
+ Cross-module data contracts are invisible to single-mission review. A field that "should exist" because the schema defines it may never be populated if the write path skips it. (Field report #77: Dialog Travel trip page read `placeContext` but the place creation flow never set it.)
187
+
188
+ **Regression-test-as-validation:** For data-dependent systems (trading, financial, analytics), if Phase 0 produced regression tests against historical data, include those tests in the mission's verification step. Each mission that modifies strategy logic must re-run the regression suite — if tests fail, the mission is not complete until the strategy is re-validated or the test expectations are updated with justification. (Field report #126)
189
+
190
+ - **AI-generated backfill verification:** When a mission backfills historical data using AI (e.g., generating recommendations from past messages), verify a sample of generated content against source data. AI backfills can hallucinate plausible-looking data that has no basis in the source material.
191
+
192
+ **Priority cascade for mission ordering:**
193
+ 1. Section 16 (Launch Sequence) — if the user defined phases, follow them
194
+ 2. Dependency graph — Auth before gated features, Schema before API, API before UI
195
+ 3. **Complexity-first (smart scoping)** — within a dependency tier, build the hardest features first. Estimate complexity by: number of external integrations, schema relationships, edge cases listed in the PRD, and whether the PRD calls out the feature as "the brain" or "the core." Hard things first (when energy and context are fresh), polish and admin later. If the PRD has a Conversation Intelligence Engine AND a Trip Planner, build the engine first — the planner is CRUD.
196
+ 4. PRD section order — as a tiebreaker when complexity is equal
197
+ 5. Frontmatter skip flags — skip sections where flags say no/none
198
+ 6. **Asset/infrastructure requirements** — flag as BLOCKED, do not include in code missions
199
+
200
+ ### Step 2 — Odo's Prerequisite Check
201
+
202
+ Before starting mission #1, Odo verifies:
203
+
204
+ 1. Are dependencies in place? (e.g., "Payments" requires "Auth")
205
+ 2. Are schema migrations needed?
206
+ 3. Are new integrations needed that require credentials?
207
+ 4. Are there blocking issues from previous missions?
208
+ 5. **Data model retrofit check:** If this campaign adds a new data model layer (e.g., ProjectVersion, WorkspaceScope), identify all existing endpoints that read/write the old model and flag them for review. Prior-campaign features that reference the old model directly will silently break or return stale data. (Field report #38: variant endpoint missed the version model because it was built in a prior campaign.)
209
+
210
+ **BLOCKED Validation Rule:** Before declaring a mission BLOCKED, verify the block is real. If credentials exist in .env or vault, attempt the API call. "Needs dashboard access" is NOT a valid blocker if an API endpoint exists. "Needs developer account" is NOT valid if the API is publicly documented and callable with `node:https`. Try before blocking.
211
+
212
+ Flag blockers. Suggest resolutions.
213
+
214
+ ### Step 3 — Sisko's Mission Brief
215
+
216
+ Present the next mission to the user:
217
+
218
+ ```
219
+ ═══════════════════════════════════════════
220
+ MISSION BRIEF — [Mission Name]
221
+ ═══════════════════════════════════════════
222
+ Objective: [What gets built]
223
+ PRD Scope: [Which sections]
224
+ Prereqs: [Met / Blocked: reason]
225
+ Est. Phases: [Which /build phases apply]
226
+ ═══════════════════════════════════════════
227
+ ```
228
+
229
+ User confirms, redirects, or overrides. On confirm → Step 4.
230
+
231
+ ### Step 4 — Deploy Fury
232
+
233
+ 1. Construct the `/assemble` prompt with the mission scope
234
+ 2. **Scope-based review scaling:** Classify the mission as S (small, <100 lines changed), M (medium, 100-300 lines), or L (large, 300+ lines or 5+ files). S/M missions get the standard reduced pipeline (build + 1 review round). **L missions get 2 review rounds minimum**, even in blitz mode — large cross-file changes introduce regressions that a single reviewer misses. (Field report #268: L-scope mission changed 10 files, introduced 3 regressions that survived through 2 subsequent missions.)
235
+ 3. Fury runs the full pipeline (or `--fast` if user prefers). **Note:** `--fast` skips Crossfire + Council but NEVER skips `/security` if the mission adds new endpoints, WebSocket handlers, or credential-handling code.
236
+ 3a. **Per-mission Kenobi quick-scan:** If the mission creates or modifies auth, crypto, HMAC, credential handling, or webhook verification code, run a focused Kenobi security scan within the mission — do not defer to the Victory Gauntlet. The reduced pipeline's single review round is calibrated for business logic, not security-sensitive code. Quick-scan scope: credential leakage, timing attacks, input validation, error message exposure. (Field report #265: webhook HMAC bypass, credential leakage in errors, and auth header override all shipped through the reduced pipeline and were only caught by the Victory Gauntlet.)
237
+ 4. Only checkpoint if `/context` shows actual usage above 85%. Do not preemptively suggest checkpoints.
238
+ 5. On completion → Step 5
239
+
240
+ **Post-infrastructure enforcement gate:** For infrastructure campaigns (deploy targets, CI/CD, monitoring, staging environments): after the infrastructure is provisioned, run `/architect --plan` to verify workflow enforcement gates exist — not just infrastructure existence. Infrastructure without process gates is incomplete.
241
+
242
+ **Dispatch model:** Per-mission `/assemble` runs SHOULD dispatch phases to sub-agents per `SUB_AGENTS.md` "Parallel Agent Standard." The campaign orchestrator (main thread) manages the mission sequence, inter-mission gates, and campaign state — it does NOT perform inline code analysis. Pass findings summaries between missions, not raw code. (Field report #270)
243
+
244
+ ### Campaign-Mode Pipeline
245
+
246
+ When `/assemble` runs from within `/campaign`, the full 13-phase pipeline is impractical (130 phase executions for a 10-mission campaign). Campaign missions should use a reduced pipeline:
247
+
248
+ | Phase | Campaign Mode | Full Mode |
249
+ |-------|--------------|-----------|
250
+ | Architecture | Quick scan | Full review |
251
+ | Build | Full | Full |
252
+ | Review | 1 round | 3 rounds |
253
+ | Security | If new endpoints | 2 rounds |
254
+ | UX/DevOps/QA/Test | Deferred to Victory Gauntlet | Full |
255
+ | Crossfire/Council | Deferred to Victory Gauntlet | Full |
256
+
257
+ The Victory Gauntlet at campaign end covers everything the per-mission pipeline defers. This is why the Victory Gauntlet is non-negotiable even with `--fast`. (Field report #26)
258
+
259
+ ### Cascade Review Checklist
260
+
261
+ When a mission involves DELETE or UPDATE cascade operations (user offboarding, bulk cleanup, entity removal), the 1-round review MUST include:
262
+ - [ ] **Orphaned references:** Does deleting entity A leave dangling FK/access records in table B?
263
+ - [ ] **Race condition:** Can the subject create new data while the cascade runs? Should deactivation happen first?
264
+ - [ ] **PII scrubbing:** Does the cleanup write raw PII to logs, audit trails, or API responses?
265
+ - [ ] **Reassignment fallback:** What happens when the reassignment target doesn't exist or is also being deleted?
266
+
267
+ These issues are invisible to standard code review but Critical when found by the Gauntlet. (Field report #31: 3 HIGH findings in offboarding mission — all cascade issues.)
268
+
269
+ ### Cross-File Dependency Check
270
+
271
+ After each mission's 1-round review, check: "Did this mission modify any file that was also modified by a prior mission in this campaign?" If so, verify that the prior mission's patterns (error handling, locking, validation) are preserved in the new changes. This is a 30-second scan per shared file — run `git log --name-only` to identify cross-mission file overlap. Cross-cutting bugs that span files modified in different missions are invisible to single-mission review. (Field report #38: 2 Critical findings — chat stream timeout and optimistic locking omission — both involved files modified across multiple missions.)
272
+
273
+ ### Pattern Replication Check
274
+
275
+ When a mission duplicates or extends an existing code path (adding a version-aware path alongside a legacy path, adding a new endpoint that mirrors an existing one), verify that security patterns (locking, rate limiting, validation, sanitization) from the original path are replicated in the new path. Grep for the original pattern and confirm it exists in the new code. (Field report #38: optimistic locking in legacy chat edit was not replicated to the version-aware path.)
276
+
277
+ ### Minimum Review Guarantee
278
+
279
+ Even in `--fast` mode, each mission gets at least **1 review round** (not 3, but never 0). A single review catches ~80% of issues for 33% of the review cost. Zero reviews in blitz caused 7 Critical+High issues to accumulate undetected across 4 missions — all caught by the Victory Gauntlet but at much higher fix cost. (Field report #28)
280
+
281
+ **Node API compatibility check (within review):** When the review finds new Node.js API calls (`fs.globSync`, `readdir({ recursive: true })`, `import.meta.dirname`, etc.), verify the API exists in the minimum version declared in `engines`. Check the Node.js docs for "Added in:" version. The `engines` field is a contract — code that uses APIs above the minimum version crashes for users on the minimum. (Field report #50: `fs.globSync` requires Node 22+ but engines declared >=20.)
282
+
283
+ **UI→server route tracing (within review):** When a mission writes both UI code and server code, the review must trace every `fetch()` call in the UI to a registered server route. For each `fetch('/api/...')` in `.js`/`.ts` UI files, verify the path exists as an `addRoute()` call in the server. Missing routes produce silent 404s that are invisible in development. (Field report #50: UI button called `/api/server/restart` but no endpoint was created.)
284
+
285
+ ### One Mission, One Commit Anti-Pattern
286
+
287
+ **Each mission gets its own commit.** Do NOT batch multiple missions into a single commit. The per-mission commit serves as evidence: the diff for Mission 3 should contain only Mission 3's deliverables. If the diff contains work from Missions 3-11 combined, the review is meaningless — you can't verify what changed for which mission.
288
+
289
+ If a mission is small enough to merge with an adjacent one, that's fine — but explicitly acknowledge it: "Missions 3-4 combined (both methodology-only, same target file)." Never silently batch.
290
+
291
+ ### Per-Mission Verification Agents
292
+
293
+ After each mission's review round, two agents run quick checks:
294
+
295
+ **Troi (PRD Compliance):** Spot-checks the PRD sections that this mission targeted. "Does what we just built match what the PRD said to build?" Not a full PRD read — just the relevant sections. Catches drift between intent and implementation before it compounds across missions.
296
+
297
+ **Padmé (Functional Verification):** If the mission touched user-facing flows, Padmé verifies the affected flow still works end-to-end. "Open the app, complete the task, verify the output." Only triggered for missions that modify routes, components, or user-visible behavior — not for methodology-only or infrastructure missions.
298
+
299
+ ### Step 4.5 — Gauntlet Checkpoint (Thanos)
300
+
301
+ After every 4th completed mission (missions 4, 8, 12, etc.), Thanos runs a Gauntlet checkpoint:
302
+
303
+ 1. **Count completed missions.** If `completedMissions % 4 === 0`, trigger checkpoint.
304
+ 2. **Run `/gauntlet --fast`** (3 rounds: Discovery → First Strike → Second Strike). Individual `/assemble` runs review one mission's changeset. The Gauntlet reviews the **combined system** — catching cross-module integration bugs: missing imports between modules built in different missions, inconsistent auth enforcement across endpoints, CORS/CSP gaps for new connection patterns.
305
+ 3. **Fix all Critical and High findings** before the next mission.
306
+ 4. **Commit fixes** via `/git`: `Gauntlet checkpoint after mission N: X fixes`
307
+ 5. **Extract Learned Rules.** After fixing, classify each finding by root cause. If the same root cause appears 2+ times across checkpoints (or 2+ times within the same checkpoint), append a Learned Rule to `campaign-state.md`:
308
+ ```
309
+ ## Learned Rules
310
+ - [Rule]: [one-line description] (source: checkpoint after mission N)
311
+ ```
312
+ All subsequent `/assemble` runs read the Learned Rules section of `campaign-state.md` and enforce them as pre-flight checks before committing. Rules are cumulative — they persist across sessions because they live in the file, not in context.
313
+ 6. **Escalation triggers:**
314
+ - If a checkpoint produces >5 HIGH findings → auto-insert a "Hardening Sprint" mission as the next mission. The sprint's sole objective is cross-cutting hardening (auth consistency, error handling, data contract verification) — no new features.
315
+ - If a finding reveals a missing capability (e.g., "no rate limiting middleware exists") → auto-add a mission to the current phase backlog in `campaign-state.md`.
316
+ 7. `--fast` mode skips checkpoint gauntlets (but NOT the mandatory final Gauntlet in Step 6).
317
+
318
+ **Why Learned Rules matter:** In a long campaign (20+ missions), pattern-level bugs like "forgot auth on new endpoints" get rediscovered at every checkpoint, fixed, and forgotten. The campaign makes the same class of mistake repeatedly because it doesn't learn from its own quality gates. Learned Rules break this cycle — the campaign gets smarter as it runs. (Field report #126)
319
+
320
+ **Why every 4 missions:** Each `/assemble` catches ~95% of issues within its scope. The remaining ~5% are cross-cutting — a bug introduced in mission 2 that affects mission 6. Catching these periodically prevents compounding. The cost is one context window per checkpoint; the ROI is real (the v6.0-v6.5 Gauntlet found a build-breaking missing import that two full `/assemble` pipelines missed).
321
+
322
+ ### Lightweight Blitz Debrief (Alternative)
323
+
324
+ **Only valid when `/context` shows actual usage above 85% (~850k tokens).** You MUST report the actual context percentage to justify using the lightweight alternative. "Context is heavy" without a number is not valid justification.
325
+
326
+ If actual usage exceeds 85%, capture a **3-line mission summary** appended to `/logs/campaign-debriefs.md` instead:
327
+
328
+ ```
329
+ ### Mission N — [Name] (vX.Y.Z)
330
+ - **Findings:** [count] MUST FIX, [count] SHOULD FIX
331
+ - **Key lesson:** [one sentence]
332
+ ```
333
+
334
+ Full debrief runs once at campaign end (after Victory Gauntlet), covering all missions together. This reduces per-mission debrief cost from ~5-10% context to ~0.5%. The BLITZ GATE in the command file still applies — this is a lighter alternative that satisfies the gate without invoking the full skill. (Field report #26)
335
+
336
+ **Phase-level debrief batching:** For bug-fix or cleanup campaigns with 10+ small missions, debrief can be batched per-phase rather than per-mission. Append phase summary to `/logs/campaign-debriefs.md` after each phase boundary. Full debrief at Victory remains mandatory.
337
+
338
+ ### Context at 1M
339
+
340
+ The 1M context window eliminates context as a practical concern for single sessions. Evidence: a full Infinity Gauntlet (10 rounds, 40 agent launches) + 4 campaigns (28 missions) + 42 commits used only 600k (60%) of a 1M window.
341
+
342
+ **The rule is simple:** Do not mention context. Do not suggest checkpoints. Do not reduce quality. If `/context` shows >85%, suggest a fresh session. Below 85%, context is never a reason to change behavior.
343
+
344
+ **Quality Reduction Anti-Pattern (unchanged — this is a hard rule):**
345
+
346
+ You MUST NOT reduce the quality or thoroughness of any review, Gauntlet, checkpoint, or debrief based on self-assessed "context pressure." This is a hard rule, not a guideline.
347
+
348
+ You MUST NOT:
349
+ - Run a "lightweight" Gauntlet because the session has been long
350
+ - Skip agents or rounds because "context is getting heavy"
351
+ - Skip debrief because "context is heavy"
352
+ - Reduce review depth because of mission count
353
+ - Use phrases like "given context pressure," "to save context," or "running efficiently" to justify cutting any quality gate
354
+
355
+ If you believe context justifies reducing quality:
356
+ 1. Run `/context`
357
+ 2. Report the actual percentage
358
+ 3. If below 85%: **you are wrong — continue at full quality**
359
+ 4. If above 85%: suggest a fresh session — do NOT reduce quality in the current session
360
+
361
+ The Gauntlet is never reduced. Checkpoints are never lightweight. Debriefs are never skipped. Run `/context` or run the full protocol.
362
+
363
+ ### Step 5 — Debrief and Commit
364
+
365
+ 1. **Security gate (before commit):** Check if this mission added new TypeScript/JavaScript files that handle network I/O (HTTP endpoints, WebSocket handlers), user input (form parsing, body parsing), or credential storage (vault writes, env file generation). If yes, flag: **"This mission added network-facing code. Run `/security` before committing."** Even in `--fast` mode, security is non-negotiable for new attack surface. This prevents shipping Critical vulnerabilities that only get caught in a post-hoc hardening pass.
366
+ 1a. **Data source verification (when debugging data flow):** When a mission involves tracing a data pipeline (CSS inheritance, design system propagation, content rendering), verify the *source data* is current — not just the format. Saved snapshots (designSystem, companyBrief) may be stale if the underlying JSX/HTML was modified by chat edits. Always prefer extracting from the current source of truth over reading cached state. (Field report #111: chat edits changed CSS vars but designSystem snapshot was stale.)
367
+ 2. Coulson commits the mission (`/git`)
368
+ 3. Update `/logs/campaign-state.md` — mark mission complete, log any deviations from PRD. Include the debrief issue number: "Debrief: #XX" or "Debrief: SKIPPED (not blitz)" or "Debrief: N/A (normal mode)".
369
+ 4. **Route BLOCKED items to the right place:**
370
+ - Future feature → append to `ROADMAP.md` under the appropriate version
371
+ - User-provided asset (illustrations, OG images) → add to `## Blocked Items` in campaign-state.md
372
+ - PRD requirement beyond code → mark BLOCKED in the Prophecy Board with reason
373
+ 5. **Consumer verification:** Before marking a mission complete, verify that stored data is consumed. For every new store/preference/setting built in this mission, identify at least one backend consumer that reads it and acts on it. A preference that is stored but never read is dead code and must be flagged. (Field report #99: widget preferences API built with full CRUD + migration, but no pipeline consumer checked preferences before processing — entire feature was a dead end.)
374
+ 6. **Troi pre-scan before "all complete" declaration:** Before declaring all requirements COMPLETE or BLOCKED, run a lightweight Troi check: read the PRD's testable sections (features, marketing, dashboard, tiers, emails) and verify semantic completeness — not just route existence. This catches "FAQ section missing" and "social proof not rendered" type gaps that structural diffs miss. Cheaper than deferring to the Victory Gauntlet. (Field report #38: 11 gaps found by Gauntlet that a prior session's "all complete" declaration missed.)
375
+ 7. **Debrief enforcement check:** Count debriefs filed this campaign (look for "Debrief: #" entries in campaign-state.md). If `completedMissions > 4` and `debriefCount === 0`, this is a protocol violation — flag immediately: "WARNING: ${completedMissions} missions completed with 0 debriefs filed. The BLITZ GATE requires a debrief after every mission. File debriefs for the most recent 3 missions NOW before proceeding." This catches campaigns where the debrief gate was silently skipped. (Field report #139: 39 missions, 0 debriefs — total knowledge loss.)
376
+ 8. Check: are all PRD requirements COMPLETE or explicitly BLOCKED?
377
+ - **No** → loop back to Step 1 (next mission)
378
+ - **Yes** → Step 6 (victory)
379
+
380
+ ### Step 6 — Victory (Gauntlet + Troi's Compliance Check)
381
+
382
+ **HARD GATE: The campaign is NOT complete until the Victory Gauntlet runs.** Do not declare victory, present a completion summary, or ask the user whether to run the Gauntlet. Step 5 flows directly into Step 6. In blitz mode, this is automatic — the Gauntlet launches immediately after the final mission's commit. In normal mode, announce "All missions complete — running Victory Gauntlet" and proceed. The Gauntlet is as mandatory as the commits themselves. A campaign that skips the Gauntlet is a campaign that ships unreviewed code. (Field report #265: Victory Gauntlet was skipped during blitz, would have shipped 3 Critical statistical bugs + a webhook security bypass.)
383
+
384
+ All PRD requirements are COMPLETE or explicitly BLOCKED:
385
+
386
+ 1. **Run `/gauntlet` (full 5 rounds)** — mandatory final Gauntlet. Non-negotiable, even with `--fast`. Five rounds: Discovery → First Strike (full domain audits) → Second Strike (re-verification) → Crossfire (adversarial) → Council (convergence). The Gauntlet tests the combined system across all domains simultaneously. This is the "would I ship this" gate.
387
+ 1a. **Cross-campaign integration check:** If this campaign built modules that should be consumed by an existing daemon, orchestrator, or service, the Victory Gauntlet MUST verify the integration — not just the standalone module. Check: are the new modules imported by their consumer? Are the scheduled jobs wired? Does the data flow from producer to consumer? Per-campaign Victory Gauntlets review deliverables in isolation — this step catches the gaps between campaigns. (Field report #109: v11.2-v11.3 modules existed but were never imported by heartbeat.ts.)
388
+ 2. **Fix all Critical and High findings** from the Gauntlet.
389
+ 3. **Troi reads the PRD section-by-section** (runs as part of the Gauntlet's Council round) and verifies every prose claim against the implementation:
390
+ - Does the component render what the PRD describes? (not just "does the route exist?")
391
+ - Are numeric claims accurate? (e.g., "11 lead agents" — count them)
392
+ - Are visual treatments implemented as specified? (hover effects, layouts, colors)
393
+ - Are non-code requirements flagged as BLOCKED? (illustrations, OG images, assets)
394
+ 4. If Troi finds discrepancies → fix code requirements, flag asset requirements as BLOCKED
395
+ 5. Present final report: COMPLETE items, BLOCKED items (with reasons), deviations from PRD
396
+ 6. **Run `/debrief --submit`** — mandatory end-of-campaign post-mortem covering all missions together. Captures cross-cutting learnings that per-mission debriefs miss. This runs BEFORE the sign-off so learnings are captured while context is fresh. (Field reports #31, #53)
397
+ 7. **PRD sync check:** Before declaring victory, compare PRD numeric claims (agent counts, feature counts, route counts, component counts) against the actual codebase for this campaign's domain. Stale PRD claims erode trust and compound across campaigns. (Field report #119)
398
+ 7a. **Tenant isolation completeness (conditional):** If the campaign touched auth, multi-tenant, or user-scoped data, grep ALL tables for `org_id` (or equivalent ownership column). Every table must be classified as either "tenant-scoped" (has org_id) or "global by design" (with documented justification). Tables without org_id and without justification are IDOR risks. This catches incomplete tenant migrations that survive per-phase sweeps — the per-phase check (BUILD_PROTOCOL Phase 4) only covers tables modified in that phase. (Field reports #229, #231)
399
+ 8. **Entity selector completeness** — for every user-facing selector (dropdown, combobox, autocomplete) that selects from a database-backed list: verify the selector can handle entities that don't exist yet. If a user can only pick from existing DB records, the feature is incomplete — the selector needs a creation flow or an external lookup fallback. Common examples: city selector (needs geocoding fallback), category picker (needs "Other" or custom entry), user selector (needs invite flow). (Field report #263: city selector only searched existing DB cities — users couldn't set homebase to any city not already in the database.)
400
+ 9. **Victory Checklist** — ALL must be true before sign-off:
401
+ - [ ] Gauntlet Council signed off (6/6 or all domains pass)
402
+ - [ ] All BLOCKED items acknowledged by user
403
+ - [ ] PRD claims verified against codebase
404
+ - [ ] `/debrief --submit` filed (issue number recorded)
405
+ - [ ] Campaign-state.md updated with final status
406
+
407
+ ### The Reckoning (Optional Pre-Launch Audit)
408
+
409
+ Before declaring victory, Sisko may invoke The Reckoning — a 5-wave parallel parity audit focused on launch readiness rather than code quality:
410
+ 1. **Marketing parity** — does the site say what the product does?
411
+ 2. **UI parity** — do all pages/flows match the PRD?
412
+ 3. **Backend parity** — are all endpoints wired and functional?
413
+ 4. **Gate parity** — auth, payments, error handling all working?
414
+ 5. **Cross-cutting** — a11y, SEO, performance, mobile
415
+
416
+ This is lighter than a Victory Gauntlet (~13 agents vs 30+) and focused on "can we ship?" rather than "is the code perfect?" Use when the campaign built a user-facing product and you want to verify parity between PRD and reality before the Gauntlet runs. (Field report #85)
417
+
418
+ 9. **State file update:** After Victory, update `build-state.md` with current version number, test counts, and deployment state. State file drift across multi-campaign sessions causes stale data in the Danger Room and misleading `/assess` reports.
419
+
420
+ 10. Sisko signs off (ONLY after checklist is complete):
421
+
422
+ > *"The Prophets' plan is fulfilled. The campaign is complete."*
423
+
424
+ ### Step 7 — Deploy (Kusanagi — optional)
425
+
426
+ After Victory Gauntlet passes and debrief is filed:
427
+
428
+ 1. Check PRD frontmatter for `deploy:` target. If no deploy target → skip.
429
+ 2. In normal mode: "Deploy to [target]? [Y/n]"
430
+ 3. In `--blitz` mode: auto-deploy (no confirmation needed — the Gauntlet already verified the code)
431
+ 4. Run `/deploy` — Kusanagi handles target detection, execution, health check, rollback
432
+ 5. If deploy succeeds: update deploy-state.md, Danger Room deploy panel shows live status
433
+ 6. If deploy fails: Valkyrie rolls back, logs failure. Campaign still counts as VICTORY (the code is correct; the deploy issue is infrastructure, not code quality).
434
+
435
+ Deploy is OPTIONAL — Victory means "code is correct and Gauntlet-verified." Deploy means "code is also in production." A campaign can achieve Victory without deploying.
436
+
437
+ **Victory does NOT mean "everything was built." It means "everything buildable was built correctly, survived the Gauntlet, and everything unbuildable is explicitly acknowledged."**
438
+
439
+ **The Victory Gauntlet is NEVER skipped.** Not for methodology-only campaigns. Not for "no code changes." Not for single-mission campaigns. The Gauntlet checks methodology consistency (cross-references, command↔doc sync, agent assignments, version drift) in ADDITION to code quality. Five consecutive campaigns (v8.1-v9.2) shipped without Gauntlets because the first skip was self-justified as "methodology-only" and the pattern stuck. This is a protocol violation on the same level as the Quality Reduction Anti-Pattern.
440
+
441
+ ### Deliverable Completeness Anti-Pattern
442
+
443
+ **"Methodology-only" is NOT a complete deliverable unless it includes enforcement.**
444
+
445
+ Writing instructions in a method doc ("agents should report confidence scores") is a specification, not an implementation. A feature is only COMPLETE when:
446
+ 1. **Methodology section exists** in the relevant method doc (the "what")
447
+ 2. **Command file enforces it** — the paired command instructs the agent to actually do it (the "how")
448
+ 3. **The described behavior is verifiable** — you can check whether it happened
449
+
450
+ A methodology section WITHOUT command enforcement = **PARTIAL**, not COMPLETE. Mark it as such in campaign-state.md and do NOT declare victory.
451
+
452
+ **Per-mission evidence required at sign-off.** The Victory sign-off must list each mission with its actual status and evidence:
453
+ ```
454
+ Mission 1: COMPLETE — war-room.html created (233 lines), war-room.js (199 lines), server endpoints verified
455
+ Mission 2: PARTIAL — methodology section in GAUNTLET.md, command file NOT updated
456
+ Mission 3: NOT BUILT — deferred
457
+ ```
458
+ No aggregate "11 missions done." Per-mission evidence or it didn't happen. (Field report #76: campaign claimed 11 missions complete, 3 were not built, 4 were methodology-only stubs.)
459
+
460
+ ### Roadmap Compliance Check (Troi)
461
+
462
+ During the Victory Gauntlet, Troi reads the ROADMAP.md section for the current version and verifies each mission's deliverables against reality:
463
+ - Does the described feature actually exist (file, function, section)?
464
+ - Does it actually WORK as described (not just exist)?
465
+ - Is enforcement present in command files (not just documentation)?
466
+ - Are data feeds connected (not just null stubs)?
467
+
468
+ This is the check that catches "the file exists but the feature doesn't work." (Field report #76: Victory Gauntlet signed off on v10.0 because files existed and docs were consistent, but 3 features were not built and 4 had no enforcement.)
469
+
470
+ ### Periodic Architecture Health Check
471
+
472
+ After every 2-3 campaigns (or when transitioning between major project phases), run a full `/architect` with all agents deployed (Spock, Uhura, Worf, Tuvok, La Forge, Data, Torres, Riker). This catches systemic issues that per-mission reviews and Gauntlets miss:
473
+ - Missing database indexes for query patterns that emerged over multiple campaigns
474
+ - PII that accumulated without isolation
475
+ - Integration failure modes never tested
476
+ - Architecture decisions made in Campaign 1 that no longer fit Campaign 4's reality
477
+
478
+ Individual campaigns catch bugs. The health check catches drift. (Field report #67: full architecture review after 4 campaigns found 2 CRITICAL + 3 HIGH issues that no Gauntlet had caught.)
479
+
480
+ ## The Prophecy Board
481
+
482
+ After each mission, Sisko updates `/logs/campaign-state.md`.
483
+
484
+ **Status values:**
485
+ - `NOT STARTED` — nothing exists
486
+ - `STRUCTURAL` — routes/components exist but PRD prose not fully verified
487
+ - `COMPLETE` — every claim in the PRD prose is verified against the implementation
488
+ - `BLOCKED` — cannot complete without external input (assets, credentials, user decision)
489
+ - `IN PROGRESS` — currently being built
490
+
491
+ ```markdown
492
+ # Campaign State — [Project Name]
493
+
494
+ ## The Prophecy (PRD Coverage)
495
+ | PRD Section | Status | Mission | Blocked By | Debrief |
496
+ |-------------|--------|---------|------------|---------|
497
+ | 4. Core > Booking | COMPLETE | Mission 1 | — | #12 |
498
+ | 5. Auth & Accounts | COMPLETE | Mission 1 | — | #12 |
499
+ | 4. Core > Agent Directory | STRUCTURAL | Mission 1 | Asset: 11 agent illustrations | #12 |
500
+ | 6. SEO & Metadata | STRUCTURAL | Mission 2 | Asset: OG images per page | #13 |
501
+ | 7. Payments | IN PROGRESS | Mission 3 | — | N/A |
502
+
503
+ ## Deviations from PRD
504
+ | PRD Says | Actual | Reason | Accepted? |
505
+ |----------|--------|--------|-----------|
506
+ | content/ directory with MDX | Inline JSX in page.tsx | Simpler, no MDX compile | Yes |
507
+ | Full-viewport hero (100vh) | 80vh hero | Better UX with nav visible | Pending |
508
+ ```
509
+
510
+ ## Session Management
511
+
512
+ `/campaign` is designed for multi-session execution:
513
+
514
+ - **Plan:** `/campaign --plan add preview deployments for PRs` — Dax adds it to PRD/ROADMAP, doesn't build
515
+ - **Execute:** `/campaign` — starts from Step 0 (or auto-resumes if state exists)
516
+ - **Resume:** `/campaign --resume` — explicit resume from campaign-state
517
+ - **Skip to mission:** `/campaign --mission "Payments"` — jumps to that PRD section
518
+ - **Fast mode:** `/campaign --fast` — passes `--fast` to every `/assemble` call (skips Crossfire + Council)
519
+ - **Blitz mode:** `/campaign --blitz` — fully autonomous execution: skips mission confirmation, auto-debriefs after each mission, auto-continues. Does NOT imply `--fast`. Combine: `--blitz --fast`
520
+ - **Autonomous mode:** `/campaign --autonomous` — supervised autonomy with safety rails (see below)
521
+ - **Continuous mode:** `/campaign --continuous` — after Victory, auto-start the next roadmap version (see below)
522
+
523
+ ### Continuous Mode (`--continuous`)
524
+
525
+ After the current campaign completes (Victory Gauntlet passes, debrief filed, sign-off done), Sisko checks the ROADMAP for the next unbuilt version. **By default, continuous mode only chains within the current major version.** At v9.2, it continues to v9.3, v9.4, etc. but STOPS before v10.0 — a major version is a natural checkpoint that deserves a deliberate decision to start.
526
+
527
+ **Scope rules:**
528
+ - `--continuous` (default) → chain through remaining minor/patch versions in the current major. v9.2 → v9.3 → v9.4 → STOP at v10.0 boundary.
529
+ - `--continuous --major` → cross major version boundaries. v9.3 → v10.0 → v10.1 → never stops cooking until the roadmap is empty.
530
+
531
+ Combinable with other flags:
532
+ - `--blitz --continuous` — blitz through remaining dot releases, stop at next major
533
+ - `--blitz --continuous --major` — blitz everything on the roadmap, no stops
534
+ - `--autonomous --continuous` — autonomous with checkpoints, chaining within major
535
+ - `--fast --continuous` — fast reviews across dot releases
536
+
537
+ **The Victory Gauntlet still runs between versions.** Continuous mode does NOT skip the Gauntlet — it runs the Gauntlet, then starts the next campaign. The Gauntlet is the gate between versions, whether or not continuous mode chains them.
538
+
539
+ ### Autonomous Mode (`--autonomous`)
540
+
541
+ Sisko executes missions without waiting for confirmation at every brief. Stronger guardrails than `--blitz`:
542
+
543
+ 1. **Git checkpoint before each mission:** `git tag campaign-mission-N-start` before building. If things go wrong, rollback is one `git reset --hard` away.
544
+ 2. **Critical finding gate:** If `/assemble` produces Critical findings that can't be auto-fixed → rollback to the tag, pause for human review. Do NOT continue.
545
+ 3. **5-mission human checkpoint:** Maximum 5 consecutive autonomous missions before a mandatory human checkpoint. Present progress summary, ask to continue.
546
+ 4. **Victory Gauntlet requires human confirmation** — even in autonomous mode. The final Gauntlet is too important to skip human review.
547
+ 5. **Post-mission summaries logged, not presented** — mission briefs and debrief summaries go to campaign-state.md without interactive display.
548
+ 6. **Debrief is still mandatory** — `/debrief --submit` runs after each mission (same as blitz).
549
+
550
+ **`--autonomous` vs `--blitz`:**
551
+ - `--blitz` = no human interaction, full quality, auto-continue. The user walks away.
552
+ - `--autonomous` = same as blitz PLUS git tags, critical-finding rollback, and 5-mission human checkpoints. The user checks in periodically.
553
+ - `--autonomous` is safer for long campaigns (10+ missions) where unattended errors can compound.
554
+
555
+ **Why after v8.0-v8.2:** Autonomous campaigns are safer when Agent Memory catches known pitfalls (v8.0), the Deep Roster catches more issues per review (v8.1), the methodology self-improves from lessons (v8.2), and Conflict Prediction catches structural problems before they propagate through unattended missions.
556
+
557
+ ## Deliverables
558
+
559
+ 1. `/logs/campaign-state.md` — The Prophecy Board (persistent across sessions)
560
+ 2. Per-mission commits via `/git`
561
+ 3. Final full-project review via `/assemble --skip-build`
562
+
563
+ ## Handoffs
564
+
565
+ - Sisko hands TO Fury for each mission
566
+ - Coulson handles versioning after each mission
567
+ - If a mission is blocked by infrastructure → Kusanagi consulted before Fury deploys
568
+ - If a mission requires new credentials → user prompted before build starts