thevoidforge 21.0.11 → 21.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/dist/.claude/commands/ai.md +69 -0
  2. package/dist/.claude/commands/architect.md +121 -0
  3. package/dist/.claude/commands/assemble.md +201 -0
  4. package/dist/.claude/commands/assess.md +75 -0
  5. package/dist/.claude/commands/blueprint.md +135 -0
  6. package/dist/.claude/commands/build.md +116 -0
  7. package/dist/.claude/commands/campaign.md +201 -0
  8. package/dist/.claude/commands/cultivation.md +166 -0
  9. package/dist/.claude/commands/current.md +128 -0
  10. package/dist/.claude/commands/dangerroom.md +74 -0
  11. package/dist/.claude/commands/debrief.md +178 -0
  12. package/dist/.claude/commands/deploy.md +99 -0
  13. package/dist/.claude/commands/devops.md +143 -0
  14. package/dist/.claude/commands/gauntlet.md +140 -0
  15. package/dist/.claude/commands/git.md +104 -0
  16. package/dist/.claude/commands/grow.md +146 -0
  17. package/dist/.claude/commands/imagine.md +126 -0
  18. package/dist/.claude/commands/portfolio.md +50 -0
  19. package/dist/.claude/commands/prd.md +113 -0
  20. package/dist/.claude/commands/qa.md +107 -0
  21. package/dist/.claude/commands/review.md +151 -0
  22. package/dist/.claude/commands/security.md +100 -0
  23. package/dist/.claude/commands/test.md +96 -0
  24. package/dist/.claude/commands/thumper.md +116 -0
  25. package/dist/.claude/commands/treasury.md +100 -0
  26. package/dist/.claude/commands/ux.md +118 -0
  27. package/dist/.claude/commands/vault.md +189 -0
  28. package/dist/.claude/commands/void.md +108 -0
  29. package/dist/CHANGELOG.md +1918 -0
  30. package/dist/CLAUDE.md +250 -0
  31. package/dist/HOLOCRON.md +856 -0
  32. package/dist/VERSION.md +123 -0
  33. package/dist/docs/NAMING_REGISTRY.md +478 -0
  34. package/dist/docs/methods/AI_INTELLIGENCE.md +276 -0
  35. package/dist/docs/methods/ASSEMBLER.md +142 -0
  36. package/dist/docs/methods/BACKEND_ENGINEER.md +165 -0
  37. package/dist/docs/methods/BUILD_JOURNAL.md +185 -0
  38. package/dist/docs/methods/BUILD_PROTOCOL.md +426 -0
  39. package/dist/docs/methods/CAMPAIGN.md +568 -0
  40. package/dist/docs/methods/CONTEXT_MANAGEMENT.md +189 -0
  41. package/dist/docs/methods/DEEP_CURRENT.md +184 -0
  42. package/dist/docs/methods/DEVOPS_ENGINEER.md +295 -0
  43. package/dist/docs/methods/FIELD_MEDIC.md +261 -0
  44. package/dist/docs/methods/FORGE_ARTIST.md +108 -0
  45. package/dist/docs/methods/FORGE_KEEPER.md +268 -0
  46. package/dist/docs/methods/GAUNTLET.md +344 -0
  47. package/dist/docs/methods/GROWTH_STRATEGIST.md +466 -0
  48. package/dist/docs/methods/HEARTBEAT.md +168 -0
  49. package/dist/docs/methods/MCP_INTEGRATION.md +139 -0
  50. package/dist/docs/methods/MUSTER.md +148 -0
  51. package/dist/docs/methods/PRD_GENERATOR.md +186 -0
  52. package/dist/docs/methods/PRODUCT_DESIGN_FRONTEND.md +250 -0
  53. package/dist/docs/methods/QA_ENGINEER.md +337 -0
  54. package/dist/docs/methods/RELEASE_MANAGER.md +145 -0
  55. package/dist/docs/methods/SECURITY_AUDITOR.md +320 -0
  56. package/dist/docs/methods/SUB_AGENTS.md +335 -0
  57. package/dist/docs/methods/SYSTEMS_ARCHITECT.md +171 -0
  58. package/dist/docs/methods/TESTING.md +359 -0
  59. package/dist/docs/methods/THUMPER.md +175 -0
  60. package/dist/docs/methods/TIME_VAULT.md +120 -0
  61. package/dist/docs/methods/TREASURY.md +184 -0
  62. package/dist/docs/methods/TROUBLESHOOTING.md +265 -0
  63. package/dist/docs/patterns/README.md +52 -0
  64. package/dist/docs/patterns/ad-billing-adapter.ts +537 -0
  65. package/dist/docs/patterns/ad-platform-adapter.ts +421 -0
  66. package/dist/docs/patterns/ai-classifier.ts +195 -0
  67. package/dist/docs/patterns/ai-eval.ts +272 -0
  68. package/dist/docs/patterns/ai-orchestrator.ts +341 -0
  69. package/dist/docs/patterns/ai-router.ts +194 -0
  70. package/dist/docs/patterns/ai-tool-schema.ts +237 -0
  71. package/dist/docs/patterns/api-route.ts +241 -0
  72. package/dist/docs/patterns/backtest-engine.ts +499 -0
  73. package/dist/docs/patterns/browser-review.ts +292 -0
  74. package/dist/docs/patterns/combobox.tsx +300 -0
  75. package/dist/docs/patterns/component.tsx +262 -0
  76. package/dist/docs/patterns/daemon-process.ts +338 -0
  77. package/dist/docs/patterns/data-pipeline.ts +297 -0
  78. package/dist/docs/patterns/database-migration.ts +466 -0
  79. package/dist/docs/patterns/e2e-test.ts +629 -0
  80. package/dist/docs/patterns/error-handling.ts +312 -0
  81. package/dist/docs/patterns/execution-safety.ts +601 -0
  82. package/dist/docs/patterns/financial-transaction.ts +342 -0
  83. package/dist/docs/patterns/funding-plan.ts +462 -0
  84. package/dist/docs/patterns/game-entity.ts +137 -0
  85. package/dist/docs/patterns/game-loop.ts +113 -0
  86. package/dist/docs/patterns/game-state.ts +143 -0
  87. package/dist/docs/patterns/job-queue.ts +225 -0
  88. package/dist/docs/patterns/kongo-integration.ts +164 -0
  89. package/dist/docs/patterns/middleware.ts +363 -0
  90. package/dist/docs/patterns/mobile-screen.tsx +139 -0
  91. package/dist/docs/patterns/mobile-service.ts +167 -0
  92. package/dist/docs/patterns/multi-tenant.ts +382 -0
  93. package/dist/docs/patterns/oauth-token-lifecycle.ts +223 -0
  94. package/dist/docs/patterns/outbound-rate-limiter.ts +260 -0
  95. package/dist/docs/patterns/prompt-template.ts +195 -0
  96. package/dist/docs/patterns/revenue-source-adapter.ts +311 -0
  97. package/dist/docs/patterns/service.ts +224 -0
  98. package/dist/docs/patterns/sse-endpoint.ts +118 -0
  99. package/dist/docs/patterns/stablecoin-adapter.ts +511 -0
  100. package/dist/docs/patterns/third-party-script.ts +68 -0
  101. package/dist/scripts/thumper/gom-jabbar.sh +241 -0
  102. package/dist/scripts/thumper/relay.sh +610 -0
  103. package/dist/scripts/thumper/scan.sh +359 -0
  104. package/dist/scripts/thumper/thumper.sh +190 -0
  105. package/dist/scripts/thumper/water-rings.sh +76 -0
  106. package/package.json +1 -1
  107. package/dist/tsconfig.tsbuildinfo +0 -1
@@ -0,0 +1,151 @@
1
+ # /review — Picard's Code Review
2
+
3
+ > Pattern compliance, code quality, and maintainability review. Picard-affiliated (Star Trek).
4
+
5
+ ## Context Setup
6
+ 1. Read `/logs/build-state.md` — understand current project state
7
+ 2. Read the relevant pattern files from `/docs/patterns/` for the code being reviewed
8
+ 3. Read `/docs/LESSONS.md` — check for review-relevant lessons (integration tracing gaps, render loops, cross-module issues). Flag matches during review.
9
+
10
+ ## Step 0 — Scope
11
+ Determine what to review:
12
+ - If `$ARGUMENTS` specifies files/directories, review those
13
+ - If no arguments, review all changed files since last commit: `git diff --name-only HEAD~1`
14
+ - If reviewing a feature branch: `git diff --name-only main...HEAD`
15
+
16
+ List all files in scope and their types (API route, service, component, middleware, config).
17
+
18
+ ## Agent Deployment Manifest
19
+
20
+ **Lead:** Picard (Star Trek) — architecture lens, final arbiter
21
+ **Core team (always deployed):**
22
+ - **Spock** — pattern compliance + integration tracing
23
+ - **Seven** — code quality, dead code, complexity
24
+ - **Data** — maintainability, error paths, state flow
25
+
26
+ **Stark's Marvel team (deployed on backend-heavy reviews):**
27
+ - **Rogers** — API design: HTTP semantics, consistent response shapes, REST conventions
28
+ - **Banner** — database: query patterns, N+1, missing indexes, schema concerns
29
+ - **Strange** — service architecture: separation of concerns, business logic placement
30
+ - **Barton** — error handling: try/catch completeness, error propagation, user-facing messages
31
+ - **Romanoff** — security implications in reviewed code (lightweight — flags for Kenobi, doesn't audit)
32
+ - **Thor** — performance: unnecessary re-renders, expensive computations, missing memoization
33
+ - **Wanda** — state management: store design, prop drilling, context boundaries
34
+ - **T'Challa** — API integration: external service calls, retry logic, fallback behavior
35
+
36
+ **Cross-domain agents (deployed based on content):**
37
+ - **Nightwing** (DC) — auth flow end-to-end: when auth code is in scope, trace signup→verify→login→protected→logout
38
+ - **Bilbo** (Tolkien) — copy audit: error messages, UI text, API response descriptions — are they clear and human?
39
+ - **Troi** (Star Trek) — PRD compliance: does the code match what the PRD describes?
40
+ - **Constantine** (DC) — cursed code: logic that works by accident, tautological checks, shadowed vars
41
+ - **Samwise** (Tolkien) — a11y spot-check: when components are in scope, check keyboard nav and ARIA
42
+
43
+ ## Step 1 — Parallel Analysis
44
+ Use the Agent tool to run these in parallel — all are read-only analysis:
45
+
46
+ **Agent 1 (Spock — Pattern Compliance + Integration Tracing):**
47
+ For each file, check against its matching pattern in `/docs/patterns/`:
48
+ - API routes follow `api-route.ts` — validate → auth → service → respond
49
+ - Services follow `service.ts` — business logic not in routes, ownership checks, typed errors
50
+ - Components follow `component.tsx` — all four states, keyboard accessible
51
+ - Middleware follows `middleware.ts` — auth, logging, rate limiting
52
+ - Error handling follows `error-handling.ts` — consistent types, no leaked internals
53
+ - Queues follow `job-queue.ts` — idempotent, retry, dead letter
54
+ - Multi-tenant follows `multi-tenant.ts` — workspace scoped, role-based
55
+
56
+ **INTEGRATION TRACING (mandatory):** When reviewed code generates URLs, references other API endpoints, constructs storage keys, or produces data consumed by other modules — you MUST read the consuming code to verify compatibility. Examples:
57
+ - File uploaded with key prefix `avatars/` → read the asset proxy to verify it serves that prefix
58
+ - API returns error `{ code: "CONFLICT" }` → read the UI that calls this API to verify it displays the error
59
+ - Middleware sets header `x-request-id` → read a sample API route to verify it can access the header
60
+ - Service generates a URL → read the route/proxy that handles that URL pattern
61
+
62
+ **Agent 2 (Seven — Code Quality):**
63
+ - Unnecessary complexity (can this be simpler?)
64
+ - Dead code, unused imports, unreachable branches
65
+ - Duplicated logic that should be extracted
66
+ - Inconsistent naming or style
67
+ - Missing TypeScript types or `any` usage
68
+ - Functions doing too many things (SRP violations)
69
+
70
+ **Agent 3 (Data — Maintainability + Error Paths + State Flow):**
71
+ - Wrong abstractions (over-engineered or under-abstracted)
72
+ - Coupling between modules that should be independent
73
+ - Missing error handling at system boundaries
74
+ - Hardcoded values that should be config
75
+ - Missing or misleading comments on non-obvious logic
76
+
77
+ **Agent 4 (Rogers + Banner + Strange — Backend Review, if backend code in scope):**
78
+ - Rogers: API endpoints follow REST conventions, consistent response shapes, proper HTTP status codes
79
+ - Banner: database queries are efficient (no N+1), indexes exist for query patterns, schema is normalized
80
+ - Strange: business logic is in services not routes, separation of concerns is clean, no god functions
81
+
82
+ **Agent 5 (Nightwing + Constantine — Cross-Domain, if auth or complex logic in scope):**
83
+ - Nightwing: if auth code changed, trace the full signup→verify→login→protected→logout flow
84
+ - Constantine: scan fixed/refactored areas for logic that only works by coincidence
85
+
86
+ **Agent 6 (Bilbo + Troi — Copy + PRD, if UI or user-facing code in scope):**
87
+ - Bilbo: error messages are clear and human, not generic "Something went wrong"
88
+ - Troi: implementation matches PRD descriptions (not just "route exists" but "renders what PRD says")
89
+
90
+ **ROUTE COLLISION CHECK (mandatory for web apps):** When a new router/route file is added, list ALL registered routes (method + path) across ALL routers. Check for duplicate method+path combinations. Frameworks like FastAPI silently shadow duplicate routes — the first registered wins.
91
+
92
+ **REACT STATE FLOW ANALYSIS (mandatory for React projects):**
93
+ For every `useEffect` in new/modified components:
94
+ 1. List what store values it reads (dependency array)
95
+ 2. List what store actions it calls (effect body)
96
+ 3. Check: does any action trigger a store update that changes a value in the dependency array? If yes → infinite render loop.
97
+ 4. Check: does the effect call `.focus()` or other DOM methods that should only run once? If yes → needs a ref guard.
98
+ 5. If a component has 3+ `useEffect` hooks with store dependencies, flag for manual render-cycle review.
99
+
100
+ **ERROR PATH VERIFICATION (mandatory):** For every API route that returns error responses (4xx, 5xx), identify the client that calls this endpoint and verify:
101
+ - The client reads the response body (not just checks `res.ok`)
102
+ - The specific error message/code is displayed to the user
103
+ - Generic fallback messages are only used when the server truly returns no useful error info
104
+ - The UI form state after error allows retry without losing user input
105
+
106
+ ## Step 1.5 — Conflict Detection
107
+ After parallel analysis completes, scan findings from all agents for conflicts:
108
+ - **Same code, different verdicts:** Spock says "pattern violation" but Data says "intentional trade-off"
109
+ - **Severity disagreements:** Seven says "Must Fix" but Spock says "Consider"
110
+ - **Contradictory fixes:** One agent's fix would break another agent's recommendation
111
+
112
+ For each conflict, trigger the debate protocol (see SUB_AGENTS.md "Agent Debate Protocol"): Agent A states finding → Agent B responds → Agent A rebuts → Arbiter (Picard) decides. 3 exchanges max. Log the debate transcript as an ADR. The winning position becomes the canonical finding in Step 2. Do NOT list both opinions — resolve them.
113
+
114
+ ## Step 2 — Synthesize Findings
115
+ Merge all findings into a review table (conflicts already resolved via Step 1.5):
116
+
117
+ | # | File | Line | Category | Severity | Confidence | Finding | Suggestion |
118
+ |---|------|------|----------|----------|------------|---------|-----------|
119
+
120
+ Categories: Pattern, Quality, Maintainability
121
+ Severity: Must Fix > Should Fix > Consider > Nit
122
+
123
+ **Confidence scoring is mandatory.** Every finding includes a confidence score (0-100). If confidence is below 60, escalate to a second agent from a different universe (e.g., if Spock found it, escalate to Oracle or Stark) to verify before including. If the second agent disagrees, drop the finding. High-confidence findings (90+) skip re-verification in Step 3.5.
124
+
125
+ ## Step 3 — Fix (small batches)
126
+ Fix "Must Fix" and "Should Fix" items. After each batch:
127
+ 1. Re-run `npm test`
128
+ 2. Verify the fix didn't change behavior
129
+ 3. Update review table status
130
+
131
+ "Consider" and "Nit" items are presented to the user for decision.
132
+
133
+ ## Step 3.5 — Re-Verify Fixes
134
+ After fixes are applied:
135
+ - **Spock** re-checks pattern compliance on modified files
136
+ - **Seven** confirms no new complexity or dead code introduced by fixes
137
+
138
+ If new issues found, fix and re-verify.
139
+
140
+ ## Step 4 — Deliverables
141
+ 1. Review findings table (in phase log or conversation)
142
+ 2. Code fixes for Must Fix and Should Fix items
143
+ 3. Remaining suggestions for user decision
144
+
145
+ ## Handoffs
146
+ - Security findings → Kenobi (`/security`)
147
+ - UX/a11y findings → Galadriel (`/ux`)
148
+ - Architecture concerns → Picard (`/architect`)
149
+ - Bug discoveries → Batman (`/qa`)
150
+
151
+ Log all handoffs to `/logs/handoffs.md`.
@@ -0,0 +1,100 @@
1
+ # /security — Kenobi's Security Audit
2
+
3
+ **AGENT DEPLOYMENT IS MANDATORY.** Phase 1 specifies parallel agent launches via the Agent tool. You MUST launch Leia, Chewie, Rex+Bo-Katan, and Maul as separate sub-processes. Phase 2 agents (Yoda, Windu, Ahsoka, Padmé, Qui-Gon) run sequentially but each MUST be a separate agent invocation. Do NOT shortcut to inline analysis. (Field report #68)
4
+
5
+ ## Context Setup
6
+ 1. Read `/logs/build-state.md` — understand current project state
7
+ 2. Read `/docs/methods/SECURITY_AUDITOR.md`
8
+ 3. Read `/docs/LESSONS.md` — check for security-relevant lessons (prior vulnerabilities, auth gotchas). Flag matches during audit.
9
+
10
+ ## Audit Sequence
11
+
12
+ ### Phase 0.5 — First Strike (**Han** + **Cassian**)
13
+ Before the deep audits, two agents do fast recon:
14
+ - **Han (First Strike):** Quick OWASP top 10 scan — finds the obvious vulnerabilities that shouldn't require deep analysis. Shoots first.
15
+ - **Cassian (Intelligence):** Threat modeling and attack surface mapping — maps all endpoints, identifies high-value targets, produces the threat model that guides the rest of the audit.
16
+
17
+ ### Phase 1 — Independent audits (parallel analysis)
18
+ Use the Agent tool to run these simultaneously — all are read-only analysis:
19
+ - **Agent 1 (Leia — Secrets):** Scan source code for hardcoded secrets, check .env is gitignored, check git history for leaked keys (`git log -p --all -S 'password' -S 'secret' -S 'api_key'`), verify different secrets dev/prod
20
+ - **Agent 2 (Chewie — Dependencies):** Run `npm audit`, check for critical/high vulns, verify lock file committed, check for deprecated packages
21
+ - **Agent 3 (Rex + Bo-Katan — Infrastructure + Perimeter):** Check security headers (HSTS, CSP, X-Frame-Options, CORS), verify TLS config, check for exposed ports/debug endpoints. **Bo-Katan** focuses on network perimeter: firewall rules, exposed ports, CORS policy enforcement.
22
+ - **Agent 4 (Maul — Red Team):** For each endpoint and flow, ask: "How would I exploit this?" Chain vulnerabilities. Test trust boundaries. Attempt privilege escalation. **RUNTIME EXPLOITATION (mandatory):** When the server is running, Maul must execute actual attack requests via curl/fetch — not just theorize. Upload a file then fetch the URL. Submit conflicting data. Send requests with stolen/expired tokens. If the server isn't running, document what couldn't be runtime-tested.
23
+
24
+ ### Phase 2 — Sequential audits (depend on understanding the codebase)
25
+ These require full codebase context — run sequentially:
26
+
27
+ **Yoda — Auth:**
28
+ - Password hashing (bcrypt >= 12 rounds, no plaintext anywhere)
29
+ - Session management (crypto random, httpOnly/secure/sameSite, invalidated on logout)
30
+ - OAuth (state param, redirect whitelist, server-side exchange)
31
+ - Reset tokens (single-use, expire, rate limited)
32
+ - Reference `/docs/patterns/middleware.ts` for auth middleware patterns
33
+
34
+ **Windu — Input:**
35
+ - SQL injection (parameterized queries everywhere)
36
+ - XSS (escaped output, no dangerouslySetInnerHTML without sanitization, CSP)
37
+ - SSRF (URL allowlist if user provides URLs)
38
+ - Command injection (no user input in shell commands)
39
+ - Path traversal (sanitized filenames)
40
+
41
+ **Ahsoka — Access Control:**
42
+ - Every endpoint verifies ownership (no IDOR)
43
+ - UUIDs not sequential IDs in URLs
44
+ - Admin verified server-side (not just hidden UI)
45
+ - Tier features verified server-side
46
+ - Rate limiting per-user and per-IP
47
+ - Reference `/docs/patterns/multi-tenant.ts` if multi-tenant
48
+ - **AUTH CHAIN TRACING (mandatory):** Don't just verify each endpoint checks auth — trace the full chain: Is the auth middleware actually applied to this route? Is the user/tenant context carried from middleware → service → DB query? Are there routes that SHOULD have auth middleware but don't? Read the middleware registration and verify every protected route is covered.
49
+
50
+ **Padme — Data:**
51
+ - PII identified and cataloged
52
+ - PII not in logs, error messages, or URLs
53
+ - Deletion possible (GDPR right to erasure)
54
+ - Backups encrypted
55
+
56
+ **Qui-Gon — Subtle Vulnerabilities** (after sequential audits):
57
+ - Timing-based attacks, race conditions in auth flows, logic errors that are technically correct but exploitable
58
+ - The vulnerabilities that pass every standard check
59
+
60
+ **Sabine — Unconventional** (conditional — if project has external dependencies):
61
+ - Supply chain attacks, dependency confusion, prototype pollution, CSP bypass via CDN
62
+
63
+ **Bail Organa — Governance** (conditional — if project has regulatory requirements):
64
+ - GDPR data handling, SOC2 controls, HIPAA mapping
65
+
66
+ ### Phase 3 — Remediate
67
+ Write all findings to `/logs/phase-11-security-audit.md` (or appropriate phase log):
68
+
69
+ | ID | Finding | Severity | Confidence | Category | Location | Remediation | Status |
70
+ |----|---------|----------|------------|----------|----------|-------------|--------|
71
+
72
+ Severity = exploitability x impact. Critical (auth bypass, data leak) > High (injection, IDOR) > Medium (missing headers, weak config) > Low (best practice)
73
+
74
+ **Confidence scoring is mandatory.** Every finding includes a confidence score (0-100). If confidence is below 60, escalate to a second agent from a different universe (e.g., if Maul found it, escalate to Deathstroke or Constantine) to verify before including. If the second agent disagrees, drop the finding. High-confidence findings (90+) skip re-verification in Phase 4.
75
+
76
+ Fix critical and high findings immediately. Medium findings get tracked. For each fix:
77
+ 1. Apply the fix
78
+ 2. Verify it works
79
+ 3. Check it didn't break anything (`npm test`)
80
+ 4. Update the finding status in the log
81
+
82
+ ### Phase 4 — Re-Verification (Maul + Anakin + Din Djarin)
83
+ After remediations are applied:
84
+ - **Maul** re-probes all remediated vulnerabilities — verify fixes hold under adversarial conditions. Execute actual HTTP requests against the running server.
85
+ - **Anakin** attempts to bypass remediations using dark-side techniques — JWT algorithm confusion, auth library edge cases, prototype pollution, framework misuse.
86
+ - **Din Djarin** bounty-hunts for anything Maul and Anakin missed — post-remediation sweep with Mandalorian tenacity.
87
+
88
+ If any agent finds new issues, fix and re-verify until clean.
89
+
90
+ ### Phase 5 — Deliverables
91
+ 1. SECURITY_AUDIT.md — prioritized findings with evidence
92
+ 2. SECURITY_CHECKLIST.md — reusable pre-deploy verification list
93
+ 3. Remediation code fixes
94
+ 4. INCIDENT_RESPONSE.md — if none exists, create template
95
+
96
+ ## Handoffs
97
+ - Backend refactoring needed → Stark, log to `/logs/handoffs.md`
98
+ - UI changes needed → Galadriel, log to `/logs/handoffs.md`
99
+ - Infrastructure changes → Kusanagi, log to `/logs/handoffs.md`
100
+ - Verify fixes didn't break → Batman, log to `/logs/handoffs.md`
@@ -0,0 +1,96 @@
1
+ # /test — Batman's Test-Writing Mode
2
+
3
+ > Different from `/qa` (which finds bugs). `/test` writes and improves tests.
4
+
5
+ ## Context Setup
6
+ 1. Read `/logs/build-state.md` — understand current project state
7
+ 2. Read `/docs/methods/QA_ENGINEER.md`
8
+ 3. Read `/docs/methods/TESTING.md` — testing pyramid, patterns, framework mapping
9
+
10
+ ## Step 0 — Orient (Oracle)
11
+ 1. Detect: test framework, test runner, test directory structure, existing coverage
12
+ 2. Run `npm test` to establish baseline — how many tests, how many pass, how many fail
13
+ 3. Document in phase log: framework, runner, config, current state
14
+
15
+ ## Step 1 — Coverage Analysis (Oracle + Alfred in parallel)
16
+ Use the Agent tool to run these in parallel:
17
+ - **Agent 1 (Oracle — Gap Analysis):** Scan all source files. For each service, API route, component, and utility, check: does a corresponding test file exist? What paths are tested? What paths are missing?
18
+ - **Agent 2 (Alfred — Test Infrastructure):** Review test config, fixtures, factories, mocks, test utilities. Are they well-organized? Is there a test database? Are there shared helpers?
19
+
20
+ Synthesize into a coverage map:
21
+
22
+ | Module | Type | Test File | Paths Covered | Paths Missing | Priority |
23
+ |--------|------|-----------|---------------|---------------|----------|
24
+
25
+ Priority: Critical path > User-facing > Internal > Utility
26
+
27
+ ## Step 2 — Test Architecture (Nightwing)
28
+ Review existing tests for quality:
29
+ - Are tests testing behavior or implementation details?
30
+ - Are tests isolated (no test-order dependency)?
31
+ - Are assertions specific (not just "doesn't throw")?
32
+ - Are test names descriptive ("should reject expired tokens" not "test auth")?
33
+ - Is there appropriate use of the testing pyramid (unit > integration > e2e)?
34
+
35
+ Flag anti-patterns:
36
+ - Tests that always pass (testing nothing)
37
+ - Tests that test the framework, not the code
38
+ - Excessive mocking that hides real bugs
39
+ - Tests coupled to implementation details
40
+
41
+ ## Step 3 — Write Missing Tests (Batman leads)
42
+ Write tests in priority order from Step 1. For each module:
43
+
44
+ 1. **Unit tests** for pure business logic (services, utils, validators)
45
+ - Happy path + edge cases + error cases
46
+ - Follow patterns from `/docs/methods/TESTING.md`
47
+
48
+ 2. **Integration tests** for API routes
49
+ - Request validation (missing fields, wrong types, unauthorized)
50
+ - Success path with response shape verification
51
+ - Error responses (404, 403, 422, 500)
52
+
53
+ 3. **Component tests** for UI (if applicable)
54
+ - All four states: loading, empty, error, success
55
+ - User interactions (click, type, submit)
56
+ - Keyboard navigation
57
+
58
+ Work in small batches — write tests for one module, run `npm test`, verify they pass, then move to the next.
59
+
60
+ ## Step 3.5 — Integration Tests (Oracle)
61
+ For each new feature, write at least one test that exercises the full cross-module path:
62
+ - **File handling:** upload file → verify returned URL → fetch URL → verify 200 + correct content-type
63
+ - **Form save with conflict:** submit with duplicate/conflicting value → verify response includes specific error message (not generic)
64
+ - **Bulk operations:** upload CSV/batch → verify created count + per-row error details
65
+ - **Generated URLs/keys:** verify the URL/key pattern is accepted by the serving endpoint (proxy, CDN, static handler)
66
+ - **Error propagation:** trigger a server error → verify the client receives and can display the specific error
67
+
68
+ These can use mocked databases but MUST cross module boundaries — the test should touch at least two modules that would be reviewed by different agents.
69
+
70
+ ## Step 4 — Hardening (Red Hood)
71
+ Red Hood writes adversarial tests:
72
+ - Boundary values (0, -1, MAX_INT, empty string, null, undefined)
73
+ - Unicode and special characters in all string inputs
74
+ - Concurrent operations (race conditions, double-submit)
75
+ - Large payloads (100MB upload, 10K item list)
76
+ - Missing/malformed auth tokens
77
+
78
+ ## Step 5 — Verify Suite
79
+ 1. Run full test suite: `npm test`
80
+ 2. All tests pass (fix any that don't)
81
+ 3. No flaky tests (run suite 3x if suspicious)
82
+ 4. Tests run in < 60 seconds (flag slow tests)
83
+ 5. Update `/docs/qa-prompt.md` with new test coverage state
84
+
85
+ ## Deliverables
86
+ 1. New and improved test files
87
+ 2. Test utilities/helpers/fixtures (if created)
88
+ 3. Updated coverage map in phase log
89
+ 4. List of remaining gaps (backlog)
90
+
91
+ ## Handoffs
92
+ - Security test gaps → Kenobi (`/security`)
93
+ - UI test gaps → Galadriel (`/ux`)
94
+ - Infrastructure test issues → Kusanagi (`/devops`)
95
+
96
+ Log all handoffs to `/logs/handoffs.md`.
@@ -0,0 +1,116 @@
1
+ Plant the thumper. Ride the worm. Command Claude Code from anywhere via Telegram.
2
+
3
+ ## If `$ARGUMENTS` is `setup`:
4
+
5
+ Guide the user through Telegram bot setup conversationally — do NOT run the interactive `scan.sh` (it requires stdin which doesn't work in Claude Code).
6
+
7
+ ### Step 1 — Get the bot token
8
+
9
+ Tell the user:
10
+
11
+ > To set up the Telegram bridge, you need a bot token from Telegram:
12
+ >
13
+ > 1. Open Telegram and search for **@BotFather**
14
+ > 2. Send `/newbot`
15
+ > 3. Choose a name (e.g., "VoidForge Bridge")
16
+ > 4. Choose a username ending in `bot` (e.g., `myforge_bot`)
17
+ > 5. BotFather will reply with a token — paste it here
18
+
19
+ Wait for the user to paste their bot token.
20
+
21
+ ### Step 2 — Validate and detect chat ID
22
+
23
+ Once the user provides the token:
24
+
25
+ 1. Validate it: `curl -s "https://api.telegram.org/bot<TOKEN>/getMe"` — check for `"ok":true`
26
+ 2. Tell the user: "Token validated! Now **send any message to your bot** on Telegram (just type 'hello') and tell me when done."
27
+ 3. When they confirm, detect the chat ID: `curl -s "https://api.telegram.org/bot<TOKEN>/getUpdates?limit=10"` — extract the first private chat ID
28
+ 4. If no chat found, ask them to try again
29
+
30
+ ### Step 3 — Run scan.sh non-interactive
31
+
32
+ Once you have both token and chat ID:
33
+
34
+ ```bash
35
+ bash scripts/thumper/scan.sh --token "<TOKEN>" --chat-id "<CHAT_ID>"
36
+ ```
37
+
38
+ Report the output. The sietch vault is sealed.
39
+
40
+ ### Step 3.5 — Personalize the bot
41
+
42
+ After the sietch vault is sealed, personalize the bot using the Telegram Bot API. Read the project's `CLAUDE.md` to get the project name, and `docs/PRD.md` (or the root-level PRD) for the one-liner description.
43
+
44
+ **Set bot identity:**
45
+
46
+ ```bash
47
+ # Bot display name — project-branded
48
+ curl -s -X POST "https://api.telegram.org/bot<TOKEN>/setMyName" \
49
+ -d "name=VoidForge — <PROJECT_NAME>"
50
+
51
+ # Description (shown when user opens bot for the first time)
52
+ # Bilbo writes this: warm, confident, one sentence about the project + what the bridge does
53
+ curl -s -X POST "https://api.telegram.org/bot<TOKEN>/setMyDescription" \
54
+ --data-urlencode "description=<BILBO_DESCRIPTION>"
55
+
56
+ # Short description (shown in bot search results and sharing)
57
+ curl -s -X POST "https://api.telegram.org/bot<TOKEN>/setMyShortDescription" \
58
+ --data-urlencode "short_description=VoidForge bridge for <PROJECT_NAME>. Command your build from anywhere."
59
+ ```
60
+
61
+ For the description, Bilbo should write something like: "Your direct line to [Project Name]'s forge. Send commands, check status, and build from anywhere. Powered by VoidForge — from nothing, everything."
62
+
63
+ **Register command menu:**
64
+
65
+ ```bash
66
+ curl -s -X POST "https://api.telegram.org/bot<TOKEN>/setMyCommands" \
67
+ -H "Content-Type: application/json" \
68
+ -d '{
69
+ "commands": [
70
+ {"command": "build", "description": "Execute the build protocol"},
71
+ {"command": "campaign", "description": "Run the campaign (add --blitz or --fast)"},
72
+ {"command": "qa", "description": "Batman'\''s QA pass"},
73
+ {"command": "review", "description": "Picard'\''s code review"},
74
+ {"command": "security", "description": "Kenobi'\''s security audit"},
75
+ {"command": "ux", "description": "Galadriel'\''s UX/UI review"},
76
+ {"command": "devops", "description": "Kusanagi'\''s infrastructure audit"},
77
+ {"command": "architect", "description": "Picard'\''s architecture review"},
78
+ {"command": "gauntlet", "description": "Thanos'\''s review (add --fast for 3 rounds)"},
79
+ {"command": "test", "description": "Batman'\''s test-writing mode"},
80
+ {"command": "debrief", "description": "Bashir'\''s post-mission analysis"},
81
+ {"command": "git", "description": "Coulson'\''s version & release"},
82
+ {"command": "void", "description": "Bombadil'\''s forge sync"},
83
+ {"command": "imagine", "description": "Celebrimbor'\''s image generation"},
84
+ {"command": "thumper", "description": "Thumper bridge control (on/off/status)"}
85
+ ]
86
+ }'
87
+ ```
88
+
89
+ Verify the response shows `"ok":true` for each API call.
90
+
91
+ **Optional — Generate and set profile photo:**
92
+
93
+ If the vault has an `openai-api-key`, use `/imagine` to generate a project-themed avatar:
94
+ - Éowyn's prompt: "Minimalist icon representing [project name], [brand personality from PRD], dark background, clean lines, suitable as a small Telegram avatar, no text"
95
+ - Generate via OpenAI, save to `.voidforge/thumper/bot-avatar.png`
96
+ - Upload: `curl -s -F photo=@.voidforge/thumper/bot-avatar.png "https://api.telegram.org/bot<TOKEN>/setMyPhoto"`
97
+
98
+ If no OpenAI key, skip with a note: "Set a profile photo manually in @BotFather, or add an OpenAI API key to the vault and run `/thumper setup` again."
99
+
100
+ ### Step 4 — Offer to start
101
+
102
+ Ask: "Thumper is configured and personalized. Want me to start the bridge now? (`/thumper on`)"
103
+
104
+ ---
105
+
106
+ ## For all other arguments (`on`, `off`, `status`, or no args):
107
+
108
+ Note: `status` subcommand is also available as `--status` flag for consistency.
109
+
110
+ Run the shell script directly:
111
+
112
+ ```bash
113
+ bash scripts/thumper/thumper.sh $ARGUMENTS
114
+ ```
115
+
116
+ Report the output exactly as returned.
@@ -0,0 +1,100 @@
1
+ # /treasury — Dockson's Financial Operations
2
+
3
+ > *"Every coin has a story. I know them all."*
4
+
5
+ Read `/docs/methods/TREASURY.md` for operating rules.
6
+ Read `/docs/methods/HEARTBEAT.md` for daemon architecture.
7
+
8
+ ## Agent Deployment Manifest
9
+
10
+ **Lead:** Dockson (Cosmere — Mistborn)
11
+ **Core team:**
12
+ - **Steris** — budget allocation, forecasting, contingency plans
13
+ - **Vin** — revenue analytics, attribution, pattern detection
14
+ - **Szeth** — financial compliance, tax records, platform ToS
15
+ - **Breeze** — platform relations, API credentials, OAuth management
16
+ - **Wax** — spend execution, campaign budget management
17
+
18
+ ## Prerequisites
19
+ If `packages/voidforge/wizard/server.ts` does not exist (scaffold/core users):
20
+ 1. Offer: "Treasury requires the wizard server. Pull it from upstream? [Y/n]"
21
+ 2. On yes: `git fetch voidforge main 2>/dev/null || git remote add voidforge https://github.com/tmcleod3/voidforge.git && git fetch voidforge main` then `git checkout voidforge/main -- packages/voidforge/` then `npm install`
22
+ 3. On no: stop with "Run manually: `git checkout voidforge/main -- packages/voidforge/`"
23
+
24
+ ## Context Setup
25
+ 1. Check if financial vault exists (`~/.voidforge/treasury/vault.enc`)
26
+ 2. Check if heartbeat daemon is running (`~/.voidforge/heartbeat.json`)
27
+ 3. If no vault: route to setup flow
28
+ 4. If vault exists but no args: show `--status`
29
+
30
+ ## First-Run Experience (§9.15.1)
31
+
32
+ When no treasury vault exists:
33
+ 1. "Treasury manages your project's finances — revenue tracking, ad spend budgets, and reconciliation."
34
+ 2. Start guided setup: connect one revenue source first (recommend Stripe).
35
+ 3. After first source connected: "Financial operations require two-factor authentication. Set up now? [Y/n]"
36
+ 4. TOTP required before connecting ad platforms or enabling spend.
37
+ 5. After setup: show treasury status with next steps.
38
+
39
+ ## Setup Flow
40
+
41
+ `/treasury setup`:
42
+ 1. "Which revenue sources?" → Stripe / Paddle / Skip for now
43
+ 2. For each selected source:
44
+ - Stripe: "Paste your restricted API key (read-only). Find it at https://dashboard.stripe.com/apikeys"
45
+ - Paddle: "Paste your API key (read-only). Find it in your Paddle dashboard."
46
+ 3. For each: encrypt → store in financial vault → connection test → initial data pull
47
+ 4. Show: current balance, last 30 days revenue
48
+ 5. Offer TOTP setup if not configured
49
+ 6. Offer heartbeat daemon start if not running
50
+
51
+ ## Commands
52
+
53
+ ### Viewing
54
+ - `/treasury` or `/treasury --status` — financial summary
55
+ - `/treasury --report` — monthly report (JSON/CSV/markdown)
56
+
57
+ ### Managing
58
+ - `/treasury --budget N` — set total monthly budget ($N)
59
+ - `/treasury --reconcile` — trigger manual reconciliation
60
+ - `/treasury --launch [file]` — launch campaigns from growth-campaigns.json
61
+ - `/treasury --hard-stop N` — set daily hard stop amount
62
+ - `/treasury --export [path]` — export all financial data (encrypted)
63
+
64
+ ### Preview
65
+ - `/treasury --dry-run` — Show what --launch would do without executing. Preview campaign submissions and spend amounts.
66
+
67
+ ### Emergency
68
+ - `/treasury --freeze` — pause ALL automated spending immediately
69
+ - `/treasury --unfreeze` — resume (requires vault password + TOTP)
70
+ - `/treasury --setup-2fa` — configure or reconfigure TOTP
71
+
72
+ ## Status Display
73
+
74
+ ```
75
+ ═══════════════════════════════════════════
76
+ TREASURY — [Month Year]
77
+ ═══════════════════════════════════════════
78
+ Revenue (Stripe): $X,XXX
79
+ Ad Spend (all platforms): $X,XXX
80
+ Net: $X,XXX
81
+ Blended ROAS: X.Xx
82
+ Budget remaining: $XXX
83
+ Reconciliation: ✓ MATCHED
84
+ Heartbeat: ● Online
85
+ ═══════════════════════════════════════════
86
+ ```
87
+
88
+ ### Stablecoin Funding
89
+ - `/treasury setup --crypto` — First-time stablecoin funding setup: provider selection (Circle / Bridge / manual), destination bank, treasury mode (maintain-buffer / just-in-time), buffer threshold, freeze thresholds, TOTP verification
90
+ - `/treasury --balances` — Show stablecoin source balance, bank available balance, reserved balance, and per-platform runway
91
+ - `/treasury --funding-status` — Show end-to-end funding chain: pending off-ramps, unsettled invoices, expected debits, freeze state, and funding sub-state
92
+ - `/treasury --offramp --amount N` — Initiate off-ramp of $N from stablecoin provider to destination bank (requires vault + TOTP)
93
+ - `/treasury --target-balance N` — Set minimum USD operating balance target at destination bank
94
+ - `/treasury --runway` — Forecast days of runway based on projected campaign spend vs available fiat
95
+ - `/treasury --invoice-pay [platform] [invoice-id]` — Settle a specific platform invoice (requires vault + TOTP)
96
+ - `/treasury --reconcile` — Trigger manual reconciliation across stablecoin transfers, bank settlements, and platform spend
97
+ - `/treasury --simulate-funding` — Dry-run: show projected 14-day spend, required float, recommended off-ramp amount, settlement lead time, and freeze triggers
98
+
99
+ ## Arguments
100
+ $ARGUMENTS