happy-stacks 0.3.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. package/README.md +93 -40
  2. package/bin/happys.mjs +158 -16
  3. package/docs/codex-mcp-resume.md +130 -0
  4. package/docs/commit-audits/happy/leeroy-wip.commit-analysis.md +17640 -0
  5. package/docs/commit-audits/happy/leeroy-wip.commit-export.fuller-stat.md +3845 -0
  6. package/docs/commit-audits/happy/leeroy-wip.commit-inventory.md +102 -0
  7. package/docs/commit-audits/happy/leeroy-wip.commit-manual-review.md +1452 -0
  8. package/docs/commit-audits/happy/leeroy-wip.manual-review-queue.md +116 -0
  9. package/docs/happy-development.md +3 -4
  10. package/docs/isolated-linux-vm.md +82 -0
  11. package/docs/mobile-ios.md +112 -54
  12. package/docs/monorepo-migration.md +286 -0
  13. package/docs/server-flavors.md +19 -3
  14. package/docs/stacks.md +35 -0
  15. package/package.json +5 -1
  16. package/scripts/auth.mjs +32 -10
  17. package/scripts/build.mjs +55 -8
  18. package/scripts/daemon.mjs +166 -10
  19. package/scripts/dev.mjs +198 -50
  20. package/scripts/doctor.mjs +0 -4
  21. package/scripts/edison.mjs +6 -4
  22. package/scripts/env.mjs +150 -0
  23. package/scripts/env_cmd.test.mjs +128 -0
  24. package/scripts/init.mjs +8 -3
  25. package/scripts/install.mjs +207 -69
  26. package/scripts/lint.mjs +24 -4
  27. package/scripts/migrate.mjs +3 -12
  28. package/scripts/mobile.mjs +88 -104
  29. package/scripts/mobile_dev_client.mjs +83 -0
  30. package/scripts/monorepo.mjs +1096 -0
  31. package/scripts/monorepo_port.test.mjs +1470 -0
  32. package/scripts/provision/linux-ubuntu-review-pr.sh +51 -0
  33. package/scripts/review.mjs +908 -0
  34. package/scripts/review_pr.mjs +353 -0
  35. package/scripts/run.mjs +101 -21
  36. package/scripts/service.mjs +2 -2
  37. package/scripts/setup.mjs +189 -68
  38. package/scripts/setup_pr.mjs +586 -38
  39. package/scripts/stack.mjs +990 -196
  40. package/scripts/stack_archive_cmd.test.mjs +91 -0
  41. package/scripts/stack_editor_workspace_monorepo_root.test.mjs +65 -0
  42. package/scripts/stack_env_cmd.test.mjs +87 -0
  43. package/scripts/stack_happy_cmd.test.mjs +126 -0
  44. package/scripts/stack_interactive_monorepo_group.test.mjs +71 -0
  45. package/scripts/stack_monorepo_defaults.test.mjs +62 -0
  46. package/scripts/stack_monorepo_server_light_from_happy_spec.test.mjs +66 -0
  47. package/scripts/stack_server_flavors_defaults.test.mjs +55 -0
  48. package/scripts/stack_shorthand_cmd.test.mjs +55 -0
  49. package/scripts/stack_wt_list.test.mjs +128 -0
  50. package/scripts/tailscale.mjs +37 -1
  51. package/scripts/test.mjs +45 -8
  52. package/scripts/tui.mjs +395 -39
  53. package/scripts/typecheck.mjs +24 -4
  54. package/scripts/utils/auth/daemon_gate.mjs +55 -0
  55. package/scripts/utils/auth/daemon_gate.test.mjs +37 -0
  56. package/scripts/utils/auth/guided_pr_auth.mjs +79 -0
  57. package/scripts/utils/auth/guided_stack_web_login.mjs +75 -0
  58. package/scripts/utils/auth/interactive_stack_auth.mjs +72 -0
  59. package/scripts/utils/auth/login_ux.mjs +32 -13
  60. package/scripts/utils/auth/sources.mjs +26 -0
  61. package/scripts/utils/auth/stack_guided_login.mjs +353 -0
  62. package/scripts/utils/cli/cli_registry.mjs +43 -4
  63. package/scripts/utils/cli/cwd_scope.mjs +136 -0
  64. package/scripts/utils/cli/cwd_scope.test.mjs +110 -0
  65. package/scripts/utils/cli/log_forwarder.mjs +157 -0
  66. package/scripts/utils/cli/prereqs.mjs +75 -0
  67. package/scripts/utils/cli/prereqs.test.mjs +34 -0
  68. package/scripts/utils/cli/progress.mjs +126 -0
  69. package/scripts/utils/cli/verbosity.mjs +12 -0
  70. package/scripts/utils/cli/wizard.mjs +17 -9
  71. package/scripts/utils/cli/wizard_prompt_worktree_source_lazy.test.mjs +60 -0
  72. package/scripts/utils/dev/daemon.mjs +61 -4
  73. package/scripts/utils/dev/expo_dev.mjs +430 -0
  74. package/scripts/utils/dev/expo_dev.test.mjs +76 -0
  75. package/scripts/utils/dev/server.mjs +36 -42
  76. package/scripts/utils/dev_auth_key.mjs +169 -0
  77. package/scripts/utils/edison/git_roots.mjs +29 -0
  78. package/scripts/utils/edison/git_roots.test.mjs +36 -0
  79. package/scripts/utils/env/env.mjs +7 -3
  80. package/scripts/utils/env/env_file.mjs +4 -2
  81. package/scripts/utils/env/env_file.test.mjs +44 -0
  82. package/scripts/utils/expo/command.mjs +52 -0
  83. package/scripts/utils/expo/expo.mjs +20 -1
  84. package/scripts/utils/expo/metro_ports.mjs +114 -0
  85. package/scripts/utils/git/git.mjs +67 -0
  86. package/scripts/utils/git/worktrees.mjs +80 -25
  87. package/scripts/utils/git/worktrees_monorepo.test.mjs +54 -0
  88. package/scripts/utils/handy_master_secret.mjs +94 -0
  89. package/scripts/utils/mobile/config.mjs +31 -0
  90. package/scripts/utils/mobile/dev_client_links.mjs +60 -0
  91. package/scripts/utils/mobile/identifiers.mjs +47 -0
  92. package/scripts/utils/mobile/identifiers.test.mjs +42 -0
  93. package/scripts/utils/mobile/ios_xcodeproj_patch.mjs +128 -0
  94. package/scripts/utils/mobile/ios_xcodeproj_patch.test.mjs +98 -0
  95. package/scripts/utils/net/lan_ip.mjs +24 -0
  96. package/scripts/utils/net/ports.mjs +9 -1
  97. package/scripts/utils/net/tcp_forward.mjs +162 -0
  98. package/scripts/utils/net/url.mjs +30 -0
  99. package/scripts/utils/net/url.test.mjs +20 -0
  100. package/scripts/utils/paths/localhost_host.mjs +50 -3
  101. package/scripts/utils/paths/paths.mjs +159 -40
  102. package/scripts/utils/paths/paths_monorepo.test.mjs +58 -0
  103. package/scripts/utils/paths/paths_server_flavors.test.mjs +45 -0
  104. package/scripts/utils/proc/commands.mjs +2 -3
  105. package/scripts/utils/proc/parallel.mjs +25 -0
  106. package/scripts/utils/proc/pm.mjs +176 -22
  107. package/scripts/utils/proc/pm_spawn.test.mjs +76 -0
  108. package/scripts/utils/proc/pm_stack_cache_env.test.mjs +142 -0
  109. package/scripts/utils/proc/proc.mjs +136 -4
  110. package/scripts/utils/proc/proc.test.mjs +77 -0
  111. package/scripts/utils/review/base_ref.mjs +74 -0
  112. package/scripts/utils/review/base_ref.test.mjs +54 -0
  113. package/scripts/utils/review/chunks.mjs +55 -0
  114. package/scripts/utils/review/chunks.test.mjs +51 -0
  115. package/scripts/utils/review/findings.mjs +165 -0
  116. package/scripts/utils/review/findings.test.mjs +85 -0
  117. package/scripts/utils/review/head_slice.mjs +153 -0
  118. package/scripts/utils/review/head_slice.test.mjs +91 -0
  119. package/scripts/utils/review/instructions/deep.md +20 -0
  120. package/scripts/utils/review/runners/coderabbit.mjs +61 -0
  121. package/scripts/utils/review/runners/coderabbit.test.mjs +59 -0
  122. package/scripts/utils/review/runners/codex.mjs +61 -0
  123. package/scripts/utils/review/runners/codex.test.mjs +35 -0
  124. package/scripts/utils/review/slices.mjs +140 -0
  125. package/scripts/utils/review/slices.test.mjs +32 -0
  126. package/scripts/utils/review/targets.mjs +24 -0
  127. package/scripts/utils/review/targets.test.mjs +36 -0
  128. package/scripts/utils/sandbox/review_pr_sandbox.mjs +106 -0
  129. package/scripts/utils/server/flavor_scripts.mjs +98 -0
  130. package/scripts/utils/server/flavor_scripts.test.mjs +146 -0
  131. package/scripts/utils/server/mobile_api_url.mjs +61 -0
  132. package/scripts/utils/server/mobile_api_url.test.mjs +41 -0
  133. package/scripts/utils/server/prisma_import.mjs +37 -0
  134. package/scripts/utils/server/prisma_import.test.mjs +70 -0
  135. package/scripts/utils/server/ui_env.mjs +14 -0
  136. package/scripts/utils/server/ui_env.test.mjs +46 -0
  137. package/scripts/utils/server/urls.mjs +14 -4
  138. package/scripts/utils/server/validate.mjs +53 -16
  139. package/scripts/utils/server/validate.test.mjs +89 -0
  140. package/scripts/utils/service/autostart_darwin.mjs +42 -2
  141. package/scripts/utils/service/autostart_darwin.test.mjs +50 -0
  142. package/scripts/utils/stack/context.mjs +2 -2
  143. package/scripts/utils/stack/editor_workspace.mjs +6 -6
  144. package/scripts/utils/stack/interactive_stack_config.mjs +185 -0
  145. package/scripts/utils/stack/pr_stack_name.mjs +16 -0
  146. package/scripts/utils/stack/runtime_state.mjs +2 -1
  147. package/scripts/utils/stack/startup.mjs +120 -13
  148. package/scripts/utils/stack/startup_server_light_dirs.test.mjs +64 -0
  149. package/scripts/utils/stack/startup_server_light_generate.test.mjs +70 -0
  150. package/scripts/utils/stack/startup_server_light_legacy.test.mjs +88 -0
  151. package/scripts/utils/stack/stop.mjs +15 -4
  152. package/scripts/utils/stack_context.mjs +23 -0
  153. package/scripts/utils/stack_runtime_state.mjs +104 -0
  154. package/scripts/utils/stacks.mjs +38 -0
  155. package/scripts/utils/tailscale/ip.mjs +116 -0
  156. package/scripts/utils/ui/ansi.mjs +39 -0
  157. package/scripts/utils/ui/qr.mjs +17 -0
  158. package/scripts/utils/validate.mjs +88 -0
  159. package/scripts/where.mjs +2 -2
  160. package/scripts/worktrees.mjs +755 -179
  161. package/scripts/worktrees_archive_cmd.test.mjs +245 -0
  162. package/scripts/worktrees_cursor_monorepo_root.test.mjs +63 -0
  163. package/scripts/worktrees_list_specs_no_recurse.test.mjs +33 -0
  164. package/scripts/worktrees_monorepo_use_group.test.mjs +67 -0
  165. package/scripts/utils/dev/expo_web.mjs +0 -112
@@ -0,0 +1,908 @@
1
+ import './utils/env/env.mjs';
2
+ import { parseArgs } from './utils/cli/args.mjs';
3
+ import { printResult, wantsHelp, wantsJson } from './utils/cli/cli.mjs';
4
+ import { coerceHappyMonorepoRootFromPath, getComponentDir, getRootDir } from './utils/paths/paths.mjs';
5
+ import { getInvokedCwd, inferComponentFromCwd } from './utils/cli/cwd_scope.mjs';
6
+ import { assertCliPrereqs } from './utils/cli/prereqs.mjs';
7
+ import { resolveBaseRef } from './utils/review/base_ref.mjs';
8
+ import { isStackMode, resolveDefaultStackReviewComponents } from './utils/review/targets.mjs';
9
+ import { planCommitChunks } from './utils/review/chunks.mjs';
10
+ import { planPathSlices } from './utils/review/slices.mjs';
11
+ import { createHeadSliceCommits, getChangedOps } from './utils/review/head_slice.mjs';
12
+ import { runWithConcurrencyLimit } from './utils/proc/parallel.mjs';
13
+ import { runCodeRabbitReview } from './utils/review/runners/coderabbit.mjs';
14
+ import { extractCodexReviewFromJsonl, runCodexReview } from './utils/review/runners/codex.mjs';
15
+ import { formatTriageMarkdown, parseCodeRabbitPlainOutput, parseCodexReviewText } from './utils/review/findings.mjs';
16
+ import { join } from 'node:path';
17
+ import { ensureDir } from './utils/fs/ops.mjs';
18
+ import { copyFile, writeFile } from 'node:fs/promises';
19
+ import { existsSync } from 'node:fs';
20
+ import { runCapture } from './utils/proc/proc.mjs';
21
+
22
+ const DEFAULT_COMPONENTS = ['happy', 'happy-cli', 'happy-server-light', 'happy-server'];
23
+ const VALID_COMPONENTS = DEFAULT_COMPONENTS;
24
+ const VALID_REVIEWERS = ['coderabbit', 'codex'];
25
+ const VALID_DEPTHS = ['deep', 'normal'];
26
+ const DEFAULT_REVIEW_MAX_FILES = 50;
27
+
28
+ function parseCsv(raw) {
29
+ return String(raw ?? '')
30
+ .split(',')
31
+ .map((s) => s.trim())
32
+ .filter(Boolean);
33
+ }
34
+
35
+ function normalizeReviewers(list) {
36
+ const raw = Array.isArray(list) ? list : [];
37
+ const lower = raw.map((r) => String(r).trim().toLowerCase()).filter(Boolean);
38
+ const uniq = Array.from(new Set(lower));
39
+ return uniq.length ? uniq : ['coderabbit'];
40
+ }
41
+
42
+ function usage() {
43
+ return [
44
+ '[review] usage:',
45
+ ' happys review [component...] [--reviewers=coderabbit,codex] [--base-remote=<remote>] [--base-branch=<branch>] [--base-ref=<ref>] [--concurrency=N] [--depth=deep|normal] [--chunks|--no-chunks] [--chunking=auto|head-slice|commit-window] [--chunk-max-files=N] [--coderabbit-type=committed|uncommitted|all] [--coderabbit-max-files=N] [--coderabbit-chunks|--no-coderabbit-chunks] [--codex-chunks|--no-codex-chunks] [--run-label=<label>] [--no-stream] [--json]',
46
+ '',
47
+ 'components:',
48
+ ` ${VALID_COMPONENTS.join(' | ')}`,
49
+ '',
50
+ 'reviewers:',
51
+ ` ${VALID_REVIEWERS.join(' | ')}`,
52
+ '',
53
+ 'depth:',
54
+ ` ${VALID_DEPTHS.join(' | ')}`,
55
+ '',
56
+ 'notes:',
57
+ '- If run from inside a component checkout/worktree and no components are provided, defaults to that component.',
58
+ '- In stack mode (invoked via `happys stack review <stack>`), if no components are provided, defaults to stack-pinned non-default components only.',
59
+ '',
60
+ 'examples:',
61
+ ' happys review',
62
+ ' happys review happy-cli --reviewers=coderabbit,codex',
63
+ ' happys stack review exp1 --reviewers=codex',
64
+ ' happys review happy --base-remote=upstream --base-branch=main',
65
+ ].join('\n');
66
+ }
67
+
68
+ function resolveComponentFromCwdOrNull({ rootDir, invokedCwd }) {
69
+ return inferComponentFromCwd({ rootDir, invokedCwd, components: DEFAULT_COMPONENTS });
70
+ }
71
+
72
+ function stackRemoteFallbackFromEnv(env) {
73
+ return String(env.HAPPY_STACKS_STACK_REMOTE ?? env.HAPPY_LOCAL_STACK_REMOTE ?? '').trim();
74
+ }
75
+
76
+ function sanitizeLabel(raw) {
77
+ return String(raw ?? '')
78
+ .trim()
79
+ .toLowerCase()
80
+ .replace(/[^a-z0-9._-]+/g, '-')
81
+ .replace(/^-+|-+$/g, '');
82
+ }
83
+
84
+ function tailLines(text, n) {
85
+ const lines = String(text ?? '')
86
+ .split('\n')
87
+ .slice(-n)
88
+ .join('\n')
89
+ .trimEnd();
90
+ return lines;
91
+ }
92
+
93
+ function printReviewOperatorGuidance() {
94
+ // Guidance for the human/LLM running the review (not the reviewer model itself).
95
+ // eslint-disable-next-line no-console
96
+ console.log(
97
+ [
98
+ '[review] operator guidance:',
99
+ '- Treat reviewer output as suggestions; verify against best practices + this codebase before applying.',
100
+ '- Triage every single finding (no skipping): apply / adjust / defer-with-rationale.',
101
+ '- Do not apply changes blindly; when uncertain, record in the report for discussion.',
102
+ '- When a suggestion references external standards, verify via official docs (or note what you checked).',
103
+ '- Prefer unified fixes; avoid duplication; avoid brittle tests (no exact wording assertions).',
104
+ '- This command writes a triage checklist file; work through it item-by-item and record decisions + commits.',
105
+ '',
106
+ ].join('\n')
107
+ );
108
+ }
109
+
110
+ function codexScopePathForComponent(component) {
111
+ switch (component) {
112
+ case 'happy':
113
+ return 'expo-app';
114
+ case 'happy-cli':
115
+ return 'cli';
116
+ case 'happy-server-light':
117
+ case 'happy-server':
118
+ return 'server';
119
+ default:
120
+ return null;
121
+ }
122
+ }
123
+
124
+ function buildCodexDeepPrompt({ component, baseRef }) {
125
+ const scopePath = codexScopePathForComponent(component);
126
+ const diffCmd = scopePath
127
+ ? `cd \"$(git rev-parse --show-toplevel)\" && git diff ${baseRef}...HEAD -- ${scopePath}/`
128
+ : `cd \"$(git rev-parse --show-toplevel)\" && git diff ${baseRef}...HEAD`;
129
+
130
+ return [
131
+ 'Run a deep, long-form code review.',
132
+ '',
133
+ `Base for review: ${baseRef}`,
134
+ scopePath ? `Scope: ${scopePath}/` : 'Scope: full repo (no path filter)',
135
+ '',
136
+ 'Instructions:',
137
+ `- Use: ${diffCmd}`,
138
+ '- Focus on correctness, edge cases, reliability, performance, and security.',
139
+ '- Prefer unified/coherent fixes; avoid duplication.',
140
+ '- Avoid brittle tests that assert on wording/phrasing/config; test real behavior and observable outcomes.',
141
+ '- Ensure i18n coverage is complete: do not introduce hardcoded user-visible strings; add translation keys across locales as needed.',
142
+ '- Treat every recommendation as a suggestion: validate it against best practices and this codebase’s existing patterns. Do not propose changes that violate project invariants.',
143
+ '- Be exhaustive: list all findings you notice, not only the highest-signal ones.',
144
+ '- Clearly mark any item that is uncertain, has tradeoffs, or needs product/UX decisions as "needs discussion".',
145
+ '',
146
+ 'Output format:',
147
+ '- Start with a short overall verdict.',
148
+ '- Then list findings as bullets with severity (blocker/major/minor/nit) and a concrete fix suggestion.',
149
+ '',
150
+ 'Machine-readable output (required):',
151
+ '- After your review, output a JSON array of findings preceded by a line containing exactly: ===FINDINGS_JSON===',
152
+ '- Each finding should include: severity, file, (optional) lines, title, description, recommendation, needsDiscussion (boolean).',
153
+ ].join('\n');
154
+ }
155
+
156
+ function buildCodexMonorepoSlicePrompt({ sliceLabel, baseCommit, baseRef }) {
157
+ const diffCmd = `cd \"$(git rev-parse --show-toplevel)\" && git diff ${baseCommit}...HEAD`;
158
+ return [
159
+ 'Run a deep, long-form code review on the monorepo.',
160
+ '',
161
+ `Base ref: ${baseRef}`,
162
+ `Slice: ${sliceLabel}`,
163
+ '',
164
+ 'Important:',
165
+ '- The base commit for this slice is synthetic: it represents upstream plus all NON-slice changes.',
166
+ '- Therefore, the diff below contains ONLY the changes for this slice, but the checked-out code is the full final HEAD.',
167
+ '',
168
+ 'Instructions:',
169
+ `- Use: ${diffCmd}`,
170
+ '- You may inspect any file in the repo for cross-references (server/cli/ui), but keep findings scoped to this slice diff.',
171
+ '- Focus on correctness, edge cases, reliability, performance, and security.',
172
+ '- Prefer unified/coherent fixes; avoid duplication.',
173
+ '- Avoid brittle tests that assert on wording/phrasing/config; test real behavior and observable outcomes.',
174
+ '- Ensure i18n coverage is complete: do not introduce hardcoded user-visible strings; add translation keys across locales as needed.',
175
+ '- Treat every recommendation as a suggestion: validate it against best practices and this codebase’s existing patterns. Do not propose changes that violate project invariants.',
176
+ '- Be exhaustive within this slice: list all findings you notice, not only the highest-signal ones.',
177
+ '- Clearly mark any item that is uncertain, has tradeoffs, or needs product/UX decisions as "needs discussion".',
178
+ '',
179
+ 'Output format:',
180
+ '- Start with a short overall verdict.',
181
+ '- Then list findings as bullets with severity (blocker/major/minor/nit) and a concrete fix suggestion.',
182
+ '',
183
+ 'Machine-readable output (required):',
184
+ '- After your review, output a JSON array of findings preceded by a line containing exactly: ===FINDINGS_JSON===',
185
+ '- Each finding should include: severity, file, (optional) lines, title, description, recommendation, needsDiscussion (boolean).',
186
+ ].join('\n');
187
+ }
188
+
189
+ async function gitLines({ cwd, args, env }) {
190
+ const out = await runCapture('git', args, { cwd, env });
191
+ return String(out ?? '')
192
+ .split('\n')
193
+ .map((l) => l.trimEnd())
194
+ .filter(Boolean);
195
+ }
196
+
197
+ async function countChangedFiles({ cwd, base, env }) {
198
+ const lines = await gitLines({ cwd, env, args: ['diff', '--name-only', `${base}...HEAD`] });
199
+ return lines.length;
200
+ }
201
+
202
+ async function countChangedFilesBetween({ cwd, base, head, env }) {
203
+ const lines = await gitLines({ cwd, env, args: ['diff', '--name-only', `${base}...${head}`] });
204
+ return lines.length;
205
+ }
206
+
207
+ async function mergeBase({ cwd, a, b, env }) {
208
+ const out = await runCapture('git', ['merge-base', a, b], { cwd, env });
209
+ const mb = String(out ?? '').trim();
210
+ if (!mb) throw new Error('[review] failed to compute merge-base');
211
+ return mb;
212
+ }
213
+
214
+ async function listCommitsBetween({ cwd, base, head, env }) {
215
+ return await gitLines({ cwd, env, args: ['rev-list', '--reverse', `${base}..${head}`] });
216
+ }
217
+
218
+ async function withDetachedWorktree({ repoDir, headCommit, label, env }, fn) {
219
+ const root = (await runCapture('git', ['rev-parse', '--show-toplevel'], { cwd: repoDir, env })).toString().trim();
220
+ if (!root) throw new Error('[review] failed to resolve git toplevel');
221
+
222
+ const safeLabel = String(label ?? 'worktree')
223
+ .toLowerCase()
224
+ .replace(/[^a-z0-9._-]+/g, '-')
225
+ .replace(/^-+|-+$/g, '');
226
+ const short = String(headCommit).slice(0, 12);
227
+ const dir = join(root, '.project', 'review-worktrees', `${safeLabel}-${short}`);
228
+
229
+ await ensureDir(join(root, '.project', 'review-worktrees'));
230
+
231
+ try {
232
+ await runCapture('git', ['worktree', 'add', '--detach', dir, headCommit], { cwd: repoDir, env });
233
+ return await fn(dir);
234
+ } finally {
235
+ try {
236
+ await runCapture('git', ['worktree', 'remove', '--force', dir], { cwd: repoDir, env });
237
+ await runCapture('git', ['worktree', 'prune'], { cwd: repoDir, env });
238
+ } catch {
239
+ // best-effort cleanup; leave an orphaned worktree if needed
240
+ }
241
+ }
242
+ }
243
+
244
+ async function pickCoderabbitBaseCommitForMaxFiles({ cwd, baseRef, maxFiles, env }) {
245
+ const commits = await gitLines({ cwd, env, args: ['rev-list', '--reverse', `${baseRef}..HEAD`] });
246
+ if (!commits.length) return null;
247
+
248
+ let lo = 0;
249
+ let hi = commits.length - 1;
250
+ let best = null;
251
+
252
+ while (lo <= hi) {
253
+ const mid = Math.floor((lo + hi) / 2);
254
+ const startCommit = commits[mid];
255
+ let baseCommit = '';
256
+ try {
257
+ baseCommit = (await runCapture('git', ['rev-parse', `${startCommit}^`], { cwd, env })).toString().trim();
258
+ } catch {
259
+ baseCommit = (await runCapture('git', ['rev-parse', startCommit], { cwd, env })).toString().trim();
260
+ }
261
+
262
+ const n = await countChangedFiles({ cwd, env, base: baseCommit });
263
+ if (n <= maxFiles) {
264
+ best = baseCommit;
265
+ hi = mid - 1;
266
+ } else {
267
+ lo = mid + 1;
268
+ }
269
+ }
270
+
271
+ return best;
272
+ }
273
+
274
+ async function main() {
275
+ const argv = process.argv.slice(2);
276
+ const { flags, kv } = parseArgs(argv);
277
+ const json = wantsJson(argv, { flags });
278
+ const stream = !json && !flags.has('--no-stream');
279
+
280
+ if (wantsHelp(argv, { flags })) {
281
+ printResult({ json, data: { usage: usage() }, text: usage() });
282
+ return;
283
+ }
284
+
285
+ const rootDir = getRootDir(import.meta.url);
286
+ const invokedCwd = getInvokedCwd(process.env);
287
+ const positionals = argv.filter((a) => !a.startsWith('--'));
288
+
289
+ const reviewers = normalizeReviewers(parseCsv(kv.get('--reviewers') ?? ''));
290
+ for (const r of reviewers) {
291
+ if (!VALID_REVIEWERS.includes(r)) {
292
+ throw new Error(`[review] unknown reviewer: ${r} (expected one of: ${VALID_REVIEWERS.join(', ')})`);
293
+ }
294
+ }
295
+
296
+ await assertCliPrereqs({
297
+ git: true,
298
+ coderabbit: reviewers.includes('coderabbit'),
299
+ codex: reviewers.includes('codex'),
300
+ });
301
+
302
+ const inferred = positionals.length === 0 ? resolveComponentFromCwdOrNull({ rootDir, invokedCwd }) : null;
303
+ if (inferred) {
304
+ // Make downstream getComponentDir() resolve to the inferred repo dir for this run.
305
+ process.env[`HAPPY_STACKS_COMPONENT_DIR_${inferred.component.toUpperCase().replace(/[^A-Z0-9]+/g, '_')}`] = inferred.repoDir;
306
+ }
307
+
308
+ const inStackMode = isStackMode(process.env);
309
+ const requestedComponents = positionals.length ? positionals : inferred ? [inferred.component] : ['all'];
310
+ const wantAll = requestedComponents.includes('all');
311
+
312
+ let components = wantAll ? DEFAULT_COMPONENTS : requestedComponents;
313
+ if (!positionals.length && !inferred && inStackMode) {
314
+ const pinned = resolveDefaultStackReviewComponents({ rootDir, components: DEFAULT_COMPONENTS });
315
+ components = pinned.length ? pinned : [];
316
+ }
317
+
318
+ for (const c of components) {
319
+ if (!VALID_COMPONENTS.includes(c)) {
320
+ throw new Error(`[review] unknown component: ${c} (expected one of: ${VALID_COMPONENTS.join(', ')})`);
321
+ }
322
+ }
323
+
324
+ if (!components.length) {
325
+ const msg = inStackMode ? '[review] no non-default stack-pinned components to review' : '[review] no components selected';
326
+ printResult({ json, data: { ok: true, skipped: true, reason: msg }, text: msg });
327
+ return;
328
+ }
329
+
330
+ const baseRefOverride = (kv.get('--base-ref') ?? '').trim();
331
+ const baseRemoteOverride = (kv.get('--base-remote') ?? '').trim();
332
+ const baseBranchOverride = (kv.get('--base-branch') ?? '').trim();
333
+ const stackRemoteFallback = stackRemoteFallbackFromEnv(process.env);
334
+ const concurrency = (kv.get('--concurrency') ?? '').trim();
335
+ const limit = concurrency ? Number(concurrency) : 4;
336
+ const depth = (kv.get('--depth') ?? 'deep').toString().trim().toLowerCase();
337
+ const coderabbitType = (kv.get('--coderabbit-type') ?? 'committed').toString().trim().toLowerCase();
338
+ const chunkingMode = (kv.get('--chunking') ?? 'auto').toString().trim().toLowerCase();
339
+ const chunkMaxFilesRaw = (kv.get('--chunk-max-files') ?? '').toString().trim();
340
+ const coderabbitMaxFilesRaw = (kv.get('--coderabbit-max-files') ?? '').toString().trim();
341
+ const coderabbitMaxFiles = coderabbitMaxFilesRaw ? Number(coderabbitMaxFilesRaw) : DEFAULT_REVIEW_MAX_FILES;
342
+ const chunkMaxFiles = chunkMaxFilesRaw ? Number(chunkMaxFilesRaw) : coderabbitMaxFiles;
343
+ const globalChunks = flags.has('--chunks') ? true : flags.has('--no-chunks') ? false : null;
344
+ const coderabbitChunksOverride = flags.has('--coderabbit-chunks')
345
+ ? true
346
+ : flags.has('--no-coderabbit-chunks')
347
+ ? false
348
+ : null;
349
+ const codexChunksOverride = flags.has('--codex-chunks') ? true : flags.has('--no-codex-chunks') ? false : null;
350
+ if (!VALID_DEPTHS.includes(depth)) {
351
+ throw new Error(`[review] invalid --depth=${depth} (expected: ${VALID_DEPTHS.join(' | ')})`);
352
+ }
353
+ if (!['auto', 'head-slice', 'commit-window'].includes(chunkingMode)) {
354
+ throw new Error('[review] invalid --chunking (expected: auto|head-slice|commit-window)');
355
+ }
356
+
357
+ const deepInstructionsPath = join(rootDir, 'scripts', 'utils', 'review', 'instructions', 'deep.md');
358
+ const coderabbitConfigFiles = depth === 'deep' ? [deepInstructionsPath] : [];
359
+
360
+ if (reviewers.includes('coderabbit')) {
361
+ const coderabbitHomeKey = 'HAPPY_STACKS_CODERABBIT_HOME_DIR';
362
+ if (!(process.env[coderabbitHomeKey] ?? '').toString().trim()) {
363
+ process.env[coderabbitHomeKey] = join(rootDir, '.project', 'coderabbit-home');
364
+ }
365
+ await ensureDir(process.env[coderabbitHomeKey]);
366
+ }
367
+
368
+ if (reviewers.includes('codex')) {
369
+ const codexHomeKey = 'HAPPY_STACKS_CODEX_HOME_DIR';
370
+ if (!(process.env[codexHomeKey] ?? '').toString().trim()) {
371
+ process.env[codexHomeKey] = join(rootDir, '.project', 'codex-home');
372
+ }
373
+ await ensureDir(process.env[codexHomeKey]);
374
+
375
+ if (!(process.env.HAPPY_STACKS_CODEX_SANDBOX ?? '').toString().trim()) {
376
+ process.env.HAPPY_STACKS_CODEX_SANDBOX = 'workspace-write';
377
+ }
378
+
379
+ // Seed Codex auth/config into the isolated CODEX_HOME to avoid sandbox permission issues
380
+ // writing under the real ~/.codex. We never print or inspect auth contents.
381
+ try {
382
+ const realHome = (process.env.HOME ?? '').toString().trim();
383
+ const overrideHome = process.env[codexHomeKey];
384
+ if (realHome && overrideHome && realHome !== overrideHome) {
385
+ const srcAuth = join(realHome, '.codex', 'auth.json');
386
+ const srcCfg = join(realHome, '.codex', 'config.toml');
387
+ const destAuth = join(overrideHome, 'auth.json');
388
+ const destCfg = join(overrideHome, 'config.toml');
389
+ if (existsSync(srcAuth) && !existsSync(destAuth)) await copyFile(srcAuth, destAuth);
390
+ if (existsSync(srcCfg) && !existsSync(destCfg)) await copyFile(srcCfg, destCfg);
391
+ }
392
+ } catch {
393
+ // ignore (codex will surface auth issues if seeding fails)
394
+ }
395
+ }
396
+
397
+ if (stream) {
398
+ // eslint-disable-next-line no-console
399
+ console.log('[review] note: this can take a long time (up to 60+ minutes per reviewer). No timeout is enforced.');
400
+ printReviewOperatorGuidance();
401
+ }
402
+
403
+ const resolved = components.map((component) => ({ component, repoDir: getComponentDir(rootDir, component) }));
404
+ const monoRoots = new Set(resolved.map((x) => coerceHappyMonorepoRootFromPath(x.repoDir)).filter(Boolean));
405
+ if (monoRoots.size > 1) {
406
+ const roots = Array.from(monoRoots).sort();
407
+ throw new Error(
408
+ `[review] multiple monorepo roots detected across selected component dirs:\n` +
409
+ roots.map((r) => `- ${r}`).join('\n') +
410
+ `\n\n` +
411
+ `Fix: ensure all monorepo components (happy/happy-cli/happy-server(-light)) point at the same worktree.\n` +
412
+ `- Stack mode: use \`happys stack wt <stack> -- use happy <worktree>\` (monorepo-aware)\n` +
413
+ `- One-shot: pass --happy=... --happy-cli=... --happy-server-light=... all pointing into the same monorepo worktree`
414
+ );
415
+ }
416
+ const monorepoRoot = monoRoots.size === 1 ? Array.from(monoRoots)[0] : null;
417
+
418
+ const jobs = monorepoRoot
419
+ ? [{ component: 'monorepo', repoDir: monorepoRoot, monorepo: true }]
420
+ : resolved.map((x) => ({ component: x.component, repoDir: x.repoDir, monorepo: false }));
421
+
422
+ // Review artifacts: always create a per-run directory containing raw outputs + a triage checklist.
423
+ const reviewsRootDir = join(rootDir, '.project', 'reviews');
424
+ await ensureDir(reviewsRootDir);
425
+ const runLabelOverride = (kv.get('--run-label') ?? '').toString().trim();
426
+ const ts = new Date().toISOString().replace(/[:.]/g, '-');
427
+ const stackName = (process.env.HAPPY_STACKS_STACK ?? process.env.HAPPY_LOCAL_STACK ?? '').toString().trim();
428
+ const defaultLabel = `review-${ts}${stackName ? `-${sanitizeLabel(stackName)}` : ''}`;
429
+ const runLabel = sanitizeLabel(runLabelOverride || defaultLabel) || defaultLabel;
430
+ const runDir = join(reviewsRootDir, runLabel);
431
+ await ensureDir(runDir);
432
+ await ensureDir(join(runDir, 'raw'));
433
+
434
+ const jobResults = await runWithConcurrencyLimit({
435
+ items: jobs,
436
+ limit,
437
+ fn: async (job) => {
438
+ const { component, repoDir, monorepo } = job;
439
+ const base = await resolveBaseRef({
440
+ cwd: repoDir,
441
+ baseRefOverride,
442
+ baseRemoteOverride,
443
+ baseBranchOverride,
444
+ stackRemoteFallback,
445
+ });
446
+
447
+ const maxFiles = Number.isFinite(chunkMaxFiles) && chunkMaxFiles > 0 ? chunkMaxFiles : 300;
448
+ const wantChunksCoderabbit = coderabbitChunksOverride ?? globalChunks;
449
+ const wantChunksCodex = codexChunksOverride ?? globalChunks;
450
+ const effectiveChunking = chunkingMode === 'auto' ? (monorepo ? 'head-slice' : 'commit-window') : chunkingMode;
451
+
452
+ if (monorepo && stream) {
453
+ // eslint-disable-next-line no-console
454
+ console.log(`[review] monorepo detected at ${repoDir}; running a single unified review (chunking=${effectiveChunking}).`);
455
+ }
456
+
457
+ const perReviewer = await Promise.all(
458
+ reviewers.map(async (reviewer) => {
459
+ if (reviewer === 'coderabbit') {
460
+ const fileCount = await countChangedFiles({ cwd: repoDir, env: process.env, base: base.baseRef });
461
+ const autoChunks = fileCount > maxFiles;
462
+
463
+ let coderabbitBaseCommit = null;
464
+ let note = '';
465
+
466
+ // Monorepo: prefer HEAD-sliced chunking so each slice is reviewed in the final HEAD state.
467
+ if (monorepo && effectiveChunking === 'head-slice' && (wantChunksCoderabbit ?? autoChunks)) {
468
+ const headCommit = (await runCapture('git', ['rev-parse', 'HEAD'], { cwd: repoDir, env: process.env })).trim();
469
+ const baseCommit = (await runCapture('git', ['rev-parse', base.baseRef], { cwd: repoDir, env: process.env })).trim();
470
+ const ops = await getChangedOps({ cwd: repoDir, baseRef: baseCommit, headRef: headCommit, env: process.env });
471
+ const slices = planPathSlices({ changedPaths: Array.from(ops.all), maxFiles });
472
+
473
+ const sliceResults = [];
474
+ for (let i = 0; i < slices.length; i += 1) {
475
+ const slice = slices[i];
476
+ const logFile = join(runDir, 'raw', `coderabbit-slice-${i + 1}-of-${slices.length}-${sanitizeLabel(slice.label)}.log`);
477
+ // eslint-disable-next-line no-await-in-loop
478
+ const rr = await withDetachedWorktree(
479
+ { repoDir, headCommit: baseCommit, label: `coderabbit-${i + 1}-of-${slices.length}`, env: process.env },
480
+ async (worktreeDir) => {
481
+ const { baseSliceCommit } = await createHeadSliceCommits({
482
+ cwd: worktreeDir,
483
+ env: process.env,
484
+ baseRef: baseCommit,
485
+ headCommit,
486
+ ops,
487
+ slicePaths: slice.paths,
488
+ label: slice.label.replace(/\/+$/g, ''),
489
+ });
490
+ return await runCodeRabbitReview({
491
+ repoDir: worktreeDir,
492
+ baseRef: null,
493
+ baseCommit: baseSliceCommit,
494
+ env: process.env,
495
+ type: coderabbitType,
496
+ configFiles: coderabbitConfigFiles,
497
+ streamLabel: stream ? `monorepo:coderabbit:${i + 1}/${slices.length}` : undefined,
498
+ teeFile: logFile,
499
+ teeLabel: `monorepo:coderabbit:${i + 1}/${slices.length}`,
500
+ });
501
+ }
502
+ );
503
+ sliceResults.push({
504
+ index: i + 1,
505
+ of: slices.length,
506
+ slice: slice.label,
507
+ fileCount: slice.paths.length,
508
+ logFile,
509
+ ok: Boolean(rr.ok),
510
+ exitCode: rr.exitCode,
511
+ signal: rr.signal,
512
+ durationMs: rr.durationMs,
513
+ stdout: rr.stdout ?? '',
514
+ stderr: rr.stderr ?? '',
515
+ });
516
+ }
517
+
518
+ const okAll = sliceResults.every((r) => r.ok);
519
+ return {
520
+ reviewer,
521
+ ok: okAll,
522
+ exitCode: okAll ? 0 : 1,
523
+ signal: null,
524
+ durationMs: sliceResults.reduce((acc, r) => acc + (r.durationMs ?? 0), 0),
525
+ stdout: '',
526
+ stderr: '',
527
+ note: `monorepo head-slice: ${sliceResults.length} slices (maxFiles=${maxFiles})`,
528
+ slices: sliceResults,
529
+ };
530
+ }
531
+
532
+ // Non-monorepo or non-sliced: optionally chunk by commit windows (older behavior).
533
+ if (fileCount > maxFiles && effectiveChunking === 'commit-window' && (wantChunksCoderabbit ?? false)) {
534
+ // fall through to commit-window chunking below
535
+ } else if (fileCount > maxFiles && (wantChunksCoderabbit === false || wantChunksCoderabbit == null)) {
536
+ coderabbitBaseCommit = await pickCoderabbitBaseCommitForMaxFiles({
537
+ cwd: repoDir,
538
+ env: process.env,
539
+ baseRef: base.baseRef,
540
+ maxFiles,
541
+ });
542
+ note = coderabbitBaseCommit
543
+ ? `diff too large (${fileCount} files vs limit ${maxFiles}); using --base-commit ${coderabbitBaseCommit} for a partial review`
544
+ : `diff too large (${fileCount} files vs limit ${maxFiles}); unable to pick a --base-commit automatically`;
545
+ // eslint-disable-next-line no-console
546
+ console.log(`[review] coderabbit: ${note}`);
547
+ }
548
+
549
+ if (!(fileCount > maxFiles && effectiveChunking === 'commit-window' && (wantChunksCoderabbit ?? false))) {
550
+ const logFile = join(runDir, 'raw', `coderabbit-${sanitizeLabel(component)}.log`);
551
+ const res = await runCodeRabbitReview({
552
+ repoDir,
553
+ baseRef: coderabbitBaseCommit ? null : base.baseRef,
554
+ baseCommit: coderabbitBaseCommit,
555
+ env: process.env,
556
+ type: coderabbitType,
557
+ configFiles: coderabbitConfigFiles,
558
+ streamLabel: stream ? `${component}:coderabbit` : undefined,
559
+ teeFile: logFile,
560
+ teeLabel: `${component}:coderabbit`,
561
+ });
562
+ return {
563
+ reviewer,
564
+ ok: Boolean(res.ok),
565
+ exitCode: res.exitCode,
566
+ signal: res.signal,
567
+ durationMs: res.durationMs,
568
+ stdout: res.stdout ?? '',
569
+ stderr: res.stderr ?? '',
570
+ note,
571
+ logFile,
572
+ };
573
+ }
574
+
575
+ // Chunked mode: split the commit range into <=maxFiles windows and review each window by
576
+ // running CodeRabbit in a detached worktree checked out at the window head.
577
+ const mb = await mergeBase({ cwd: repoDir, env: process.env, a: base.baseRef, b: 'HEAD' });
578
+ const commits = await listCommitsBetween({ cwd: repoDir, env: process.env, base: mb, head: 'HEAD' });
579
+ const planned = await planCommitChunks({
580
+ baseCommit: mb,
581
+ commits,
582
+ maxFiles,
583
+ countFilesBetween: async ({ base: baseCommit, head }) =>
584
+ await countChangedFilesBetween({ cwd: repoDir, env: process.env, base: baseCommit, head }),
585
+ });
586
+
587
+ const chunks = planned.map((ch) => ({
588
+ baseCommit: ch.base,
589
+ headCommit: ch.head,
590
+ fileCount: ch.fileCount,
591
+ overLimit: Boolean(ch.overLimit),
592
+ }));
593
+
594
+ const chunkResults = [];
595
+ for (let i = 0; i < chunks.length; i += 1) {
596
+ const ch = chunks[i];
597
+ const logFile = join(
598
+ runDir,
599
+ 'raw',
600
+ `coderabbit-${sanitizeLabel(component)}-window-${i + 1}-of-${chunks.length}-${String(ch.headCommit).slice(0, 12)}.log`
601
+ );
602
+ // eslint-disable-next-line no-await-in-loop
603
+ const rr = await withDetachedWorktree(
604
+ { repoDir, headCommit: ch.headCommit, label: `coderabbit-${component}-${i + 1}-of-${chunks.length}`, env: process.env },
605
+ async (worktreeDir) => {
606
+ return await runCodeRabbitReview({
607
+ repoDir: worktreeDir,
608
+ baseRef: null,
609
+ baseCommit: ch.baseCommit,
610
+ env: process.env,
611
+ type: coderabbitType,
612
+ configFiles: coderabbitConfigFiles,
613
+ streamLabel: stream ? `${component}:coderabbit:${i + 1}/${chunks.length}` : undefined,
614
+ teeFile: logFile,
615
+ teeLabel: `${component}:coderabbit:${i + 1}/${chunks.length}`,
616
+ });
617
+ }
618
+ );
619
+ chunkResults.push({
620
+ index: i + 1,
621
+ of: chunks.length,
622
+ baseCommit: ch.baseCommit,
623
+ headCommit: ch.headCommit,
624
+ fileCount: ch.fileCount,
625
+ overLimit: ch.overLimit,
626
+ logFile,
627
+ ok: Boolean(rr.ok),
628
+ exitCode: rr.exitCode,
629
+ signal: rr.signal,
630
+ durationMs: rr.durationMs,
631
+ stdout: rr.stdout ?? '',
632
+ stderr: rr.stderr ?? '',
633
+ });
634
+ }
635
+
636
+ const okAll = chunkResults.every((r) => r.ok);
637
+ return {
638
+ reviewer,
639
+ ok: okAll,
640
+ exitCode: okAll ? 0 : 1,
641
+ signal: null,
642
+ durationMs: chunkResults.reduce((acc, r) => acc + (r.durationMs ?? 0), 0),
643
+ stdout: '',
644
+ stderr: '',
645
+ note: `chunked: ${chunkResults.length} windows (maxFiles=${maxFiles})`,
646
+ chunks: chunkResults,
647
+ };
648
+ }
649
+ if (reviewer === 'codex') {
650
+ const jsonMode = json;
651
+ const usePromptMode = depth === 'deep';
652
+ const fileCount = await countChangedFiles({ cwd: repoDir, env: process.env, base: base.baseRef });
653
+ const autoChunks = usePromptMode && fileCount > maxFiles;
654
+
655
+ if (monorepo && effectiveChunking === 'head-slice' && usePromptMode && (wantChunksCodex ?? autoChunks)) {
656
+ const headCommit = (await runCapture('git', ['rev-parse', 'HEAD'], { cwd: repoDir, env: process.env })).trim();
657
+ const baseCommit = (await runCapture('git', ['rev-parse', base.baseRef], { cwd: repoDir, env: process.env })).trim();
658
+ const ops = await getChangedOps({ cwd: repoDir, baseRef: baseCommit, headRef: headCommit, env: process.env });
659
+ const slices = planPathSlices({ changedPaths: Array.from(ops.all), maxFiles });
660
+
661
+ const sliceResults = [];
662
+ for (let i = 0; i < slices.length; i += 1) {
663
+ const slice = slices[i];
664
+ const logFile = join(runDir, 'raw', `codex-slice-${i + 1}-of-${slices.length}-${sanitizeLabel(slice.label)}.log`);
665
+ // eslint-disable-next-line no-await-in-loop
666
+ const rr = await withDetachedWorktree(
667
+ { repoDir, headCommit: baseCommit, label: `codex-${i + 1}-of-${slices.length}`, env: process.env },
668
+ async (worktreeDir) => {
669
+ const { baseSliceCommit } = await createHeadSliceCommits({
670
+ cwd: worktreeDir,
671
+ env: process.env,
672
+ baseRef: baseCommit,
673
+ headCommit,
674
+ ops,
675
+ slicePaths: slice.paths,
676
+ label: slice.label.replace(/\/+$/g, ''),
677
+ });
678
+ const prompt = buildCodexMonorepoSlicePrompt({ sliceLabel: slice.label, baseCommit: baseSliceCommit, baseRef: base.baseRef });
679
+ return await runCodexReview({
680
+ repoDir: worktreeDir,
681
+ baseRef: null,
682
+ env: process.env,
683
+ jsonMode,
684
+ prompt,
685
+ streamLabel: stream && !jsonMode ? `monorepo:codex:${i + 1}/${slices.length}` : undefined,
686
+ teeFile: logFile,
687
+ teeLabel: `monorepo:codex:${i + 1}/${slices.length}`,
688
+ });
689
+ }
690
+ );
691
+ const extracted = jsonMode ? extractCodexReviewFromJsonl(rr.stdout ?? '') : null;
692
+ sliceResults.push({
693
+ index: i + 1,
694
+ of: slices.length,
695
+ slice: slice.label,
696
+ fileCount: slice.paths.length,
697
+ logFile,
698
+ ok: Boolean(rr.ok),
699
+ exitCode: rr.exitCode,
700
+ signal: rr.signal,
701
+ durationMs: rr.durationMs,
702
+ stdout: rr.stdout ?? '',
703
+ stderr: rr.stderr ?? '',
704
+ review_output: extracted,
705
+ });
706
+ }
707
+
708
+ const okAll = sliceResults.every((r) => r.ok);
709
+ return {
710
+ reviewer,
711
+ ok: okAll,
712
+ exitCode: okAll ? 0 : 1,
713
+ signal: null,
714
+ durationMs: sliceResults.reduce((acc, r) => acc + (r.durationMs ?? 0), 0),
715
+ stdout: '',
716
+ stderr: '',
717
+ note: `monorepo head-slice: ${sliceResults.length} slices (maxFiles=${maxFiles})`,
718
+ slices: sliceResults,
719
+ };
720
+ }
721
+
722
+ const prompt = usePromptMode ? buildCodexDeepPrompt({ component, baseRef: base.baseRef }) : '';
723
+ const logFile = join(runDir, 'raw', `codex-${sanitizeLabel(component)}.log`);
724
+ const res = await runCodexReview({
725
+ repoDir,
726
+ baseRef: usePromptMode ? null : base.baseRef,
727
+ env: process.env,
728
+ jsonMode,
729
+ prompt,
730
+ streamLabel: stream && !jsonMode ? `${component}:codex` : undefined,
731
+ teeFile: logFile,
732
+ teeLabel: `${component}:codex`,
733
+ });
734
+ const extracted = jsonMode ? extractCodexReviewFromJsonl(res.stdout ?? '') : null;
735
+ return {
736
+ reviewer,
737
+ ok: Boolean(res.ok),
738
+ exitCode: res.exitCode,
739
+ signal: res.signal,
740
+ durationMs: res.durationMs,
741
+ stdout: res.stdout ?? '',
742
+ stderr: res.stderr ?? '',
743
+ review_output: extracted,
744
+ logFile,
745
+ };
746
+ }
747
+ return { reviewer, ok: false, exitCode: null, signal: null, durationMs: 0, stdout: '', stderr: 'unknown reviewer\n' };
748
+ })
749
+ );
750
+
751
+ return { component, repoDir, base, results: perReviewer };
752
+ },
753
+ });
754
+
755
+ // Persist a structured triage checklist for the operator (human/LLM) to work through.
756
+ try {
757
+ const meta = {
758
+ runLabel,
759
+ startedAt: ts,
760
+ stackName: stackName || null,
761
+ reviewers,
762
+ jobs: jobs.map((j) => ({ component: j.component, repoDir: j.repoDir, monorepo: j.monorepo })),
763
+ depth,
764
+ chunkMaxFiles: Number.isFinite(chunkMaxFiles) ? chunkMaxFiles : null,
765
+ coderabbitMaxFiles,
766
+ chunkingMode,
767
+ argv,
768
+ };
769
+ await writeFile(join(runDir, 'meta.json'), JSON.stringify(meta, null, 2), 'utf-8');
770
+
771
+ const allFindings = [];
772
+ let cr = 0;
773
+ let cx = 0;
774
+
775
+ for (const job of jobResults) {
776
+ for (const rr of job.results) {
777
+ if (rr.reviewer === 'coderabbit') {
778
+ const sliceLike = rr.slices ?? rr.chunks ?? null;
779
+ if (Array.isArray(sliceLike)) {
780
+ for (const s of sliceLike) {
781
+ const parsed = parseCodeRabbitPlainOutput(s.stdout ?? '');
782
+ for (const f of parsed) {
783
+ cr += 1;
784
+ allFindings.push({
785
+ ...f,
786
+ id: `CR-${String(cr).padStart(3, '0')}`,
787
+ job: job.component,
788
+ slice: s.slice ?? `${s.index}/${s.of}`,
789
+ sourceLog: s.logFile ?? null,
790
+ });
791
+ }
792
+ }
793
+ } else {
794
+ const parsed = parseCodeRabbitPlainOutput(rr.stdout ?? '');
795
+ for (const f of parsed) {
796
+ cr += 1;
797
+ allFindings.push({
798
+ ...f,
799
+ id: `CR-${String(cr).padStart(3, '0')}`,
800
+ job: job.component,
801
+ slice: null,
802
+ sourceLog: rr.logFile ?? null,
803
+ });
804
+ }
805
+ }
806
+ }
807
+
808
+ if (rr.reviewer === 'codex') {
809
+ const sliceLike = rr.slices ?? rr.chunks ?? null;
810
+ const consumeText = (reviewText, slice, sourceLog) => {
811
+ const parsed = parseCodexReviewText(reviewText);
812
+ for (const f of parsed) {
813
+ cx += 1;
814
+ allFindings.push({
815
+ ...f,
816
+ id: `CX-${String(cx).padStart(3, '0')}`,
817
+ job: job.component,
818
+ slice,
819
+ sourceLog: sourceLog ?? null,
820
+ });
821
+ }
822
+ };
823
+
824
+ if (Array.isArray(sliceLike)) {
825
+ for (const s of sliceLike) {
826
+ const reviewText = s.review_output ?? extractCodexReviewFromJsonl(s.stdout ?? '') ?? (s.stdout ?? '');
827
+ consumeText(reviewText, s.slice ?? `${s.index}/${s.of}`, s.logFile ?? null);
828
+ }
829
+ } else {
830
+ const reviewText = rr.review_output ?? extractCodexReviewFromJsonl(rr.stdout ?? '') ?? (rr.stdout ?? '');
831
+ consumeText(reviewText, null, rr.logFile ?? null);
832
+ }
833
+ }
834
+ }
835
+ }
836
+
837
+ await writeFile(join(runDir, 'findings.json'), JSON.stringify(allFindings, null, 2), 'utf-8');
838
+ const triage = formatTriageMarkdown({ runLabel, baseRef: jobResults?.[0]?.base?.baseRef ?? '', findings: allFindings });
839
+ await writeFile(join(runDir, 'triage.md'), triage, 'utf-8');
840
+
841
+ if (stream) {
842
+ // eslint-disable-next-line no-console
843
+ console.log(`[review] trust/triage checklist (READ THIS NEXT): ${join(runDir, 'triage.md')}`);
844
+ // eslint-disable-next-line no-console
845
+ console.log(`[review] findings (raw, parsed): ${join(runDir, 'findings.json')}`);
846
+ // eslint-disable-next-line no-console
847
+ console.log(`[review] raw outputs: ${join(runDir, 'raw')}`);
848
+ // eslint-disable-next-line no-console
849
+ console.log(
850
+ [
851
+ '[review] next steps (mandatory):',
852
+ `- STOP: open ${join(runDir, 'triage.md')} now and load it into your context before doing anything else.`,
853
+ `- Then load ${join(runDir, 'findings.json')} (full parsed finding details + source logs).`,
854
+ `- Treat reviewer output as suggestions: verify against codebase invariants + best practices (use web search when needed) before applying.`,
855
+ `- For each finding: verify in the validation worktree, decide apply/adjust/defer, and record rationale + commit refs in triage.md.`,
856
+ `- For tests: validate behavior/logic; avoid brittle "wording/policing" assertions.`,
857
+ `- Do not start a new review run until the checklist has no remaining TBD decisions.`,
858
+ ].join('\n')
859
+ );
860
+ }
861
+ } catch (e) {
862
+ if (stream) {
863
+ // eslint-disable-next-line no-console
864
+ console.warn('[review] warning: failed to write triage artifacts:', e);
865
+ }
866
+ }
867
+
868
+ const ok = jobResults.every((r) => r.results.every((x) => x.ok));
869
+ if (json) {
870
+ printResult({ json, data: { ok, reviewers, components, results: jobResults } });
871
+ if (!ok) process.exit(1);
872
+ return;
873
+ }
874
+
875
+ const lines = [];
876
+ lines.push('[review] results:');
877
+ for (const r of jobResults) {
878
+ lines.push('============================================================================');
879
+ lines.push(`component: ${r.component}`);
880
+ lines.push(`dir: ${r.repoDir}`);
881
+ lines.push(`baseRef: ${r.base.baseRef}`);
882
+ for (const rr of r.results) {
883
+ lines.push('');
884
+ const status = rr.ok ? '✅ ok' : '❌ failed';
885
+ lines.push(`[${rr.reviewer}] ${status} (exit=${rr.exitCode ?? 'null'} durMs=${rr.durationMs ?? '?'})`);
886
+ if (rr.note) lines.push(`note: ${rr.note}`);
887
+ if (!rr.ok) {
888
+ if (rr.stderr) {
889
+ lines.push('--- stderr (tail) ---');
890
+ lines.push(tailLines(rr.stderr, 120));
891
+ }
892
+ if (rr.stdout) {
893
+ lines.push('--- stdout (tail) ---');
894
+ lines.push(tailLines(rr.stdout, 120));
895
+ }
896
+ }
897
+ }
898
+ lines.push('');
899
+ }
900
+ lines.push(ok ? '[review] ok' : '[review] failed');
901
+ printResult({ json: false, text: lines.join('\n') });
902
+ if (!ok) process.exit(1);
903
+ }
904
+
905
+ main().catch((err) => {
906
+ console.error('[review] failed:', err);
907
+ process.exit(1);
908
+ });