happy-stacks 0.4.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/README.md +64 -33
  2. package/bin/happys.mjs +44 -1
  3. package/docs/codex-mcp-resume.md +130 -0
  4. package/docs/commit-audits/happy/leeroy-wip.commit-analysis.md +17640 -0
  5. package/docs/commit-audits/happy/leeroy-wip.commit-export.fuller-stat.md +3845 -0
  6. package/docs/commit-audits/happy/leeroy-wip.commit-inventory.md +102 -0
  7. package/docs/commit-audits/happy/leeroy-wip.commit-manual-review.md +1452 -0
  8. package/docs/commit-audits/happy/leeroy-wip.manual-review-queue.md +116 -0
  9. package/docs/happy-development.md +1 -2
  10. package/docs/monorepo-migration.md +286 -0
  11. package/docs/server-flavors.md +19 -3
  12. package/docs/stacks.md +35 -0
  13. package/package.json +1 -1
  14. package/scripts/auth.mjs +21 -3
  15. package/scripts/build.mjs +1 -1
  16. package/scripts/dev.mjs +20 -7
  17. package/scripts/doctor.mjs +0 -4
  18. package/scripts/edison.mjs +2 -2
  19. package/scripts/env.mjs +150 -0
  20. package/scripts/env_cmd.test.mjs +128 -0
  21. package/scripts/init.mjs +5 -2
  22. package/scripts/install.mjs +99 -57
  23. package/scripts/migrate.mjs +3 -12
  24. package/scripts/monorepo.mjs +1096 -0
  25. package/scripts/monorepo_port.test.mjs +1470 -0
  26. package/scripts/review.mjs +715 -24
  27. package/scripts/review_pr.mjs +5 -20
  28. package/scripts/run.mjs +21 -15
  29. package/scripts/setup.mjs +147 -25
  30. package/scripts/setup_pr.mjs +19 -28
  31. package/scripts/stack.mjs +493 -157
  32. package/scripts/stack_archive_cmd.test.mjs +91 -0
  33. package/scripts/stack_editor_workspace_monorepo_root.test.mjs +65 -0
  34. package/scripts/stack_env_cmd.test.mjs +87 -0
  35. package/scripts/stack_happy_cmd.test.mjs +126 -0
  36. package/scripts/stack_interactive_monorepo_group.test.mjs +71 -0
  37. package/scripts/stack_monorepo_defaults.test.mjs +62 -0
  38. package/scripts/stack_monorepo_server_light_from_happy_spec.test.mjs +66 -0
  39. package/scripts/stack_server_flavors_defaults.test.mjs +55 -0
  40. package/scripts/stack_shorthand_cmd.test.mjs +55 -0
  41. package/scripts/stack_wt_list.test.mjs +128 -0
  42. package/scripts/tui.mjs +88 -2
  43. package/scripts/utils/cli/cli_registry.mjs +20 -5
  44. package/scripts/utils/cli/cwd_scope.mjs +56 -2
  45. package/scripts/utils/cli/cwd_scope.test.mjs +40 -7
  46. package/scripts/utils/cli/prereqs.mjs +8 -5
  47. package/scripts/utils/cli/prereqs.test.mjs +34 -0
  48. package/scripts/utils/cli/wizard.mjs +17 -9
  49. package/scripts/utils/cli/wizard_prompt_worktree_source_lazy.test.mjs +60 -0
  50. package/scripts/utils/dev/daemon.mjs +14 -1
  51. package/scripts/utils/dev/expo_dev.mjs +188 -4
  52. package/scripts/utils/dev/server.mjs +21 -17
  53. package/scripts/utils/edison/git_roots.mjs +29 -0
  54. package/scripts/utils/edison/git_roots.test.mjs +36 -0
  55. package/scripts/utils/env/env.mjs +7 -3
  56. package/scripts/utils/env/env_file.mjs +4 -2
  57. package/scripts/utils/env/env_file.test.mjs +44 -0
  58. package/scripts/utils/git/worktrees.mjs +63 -12
  59. package/scripts/utils/git/worktrees_monorepo.test.mjs +54 -0
  60. package/scripts/utils/net/tcp_forward.mjs +162 -0
  61. package/scripts/utils/paths/paths.mjs +118 -3
  62. package/scripts/utils/paths/paths_monorepo.test.mjs +58 -0
  63. package/scripts/utils/paths/paths_server_flavors.test.mjs +45 -0
  64. package/scripts/utils/proc/commands.mjs +2 -3
  65. package/scripts/utils/proc/pm.mjs +113 -16
  66. package/scripts/utils/proc/pm_spawn.test.mjs +76 -0
  67. package/scripts/utils/proc/pm_stack_cache_env.test.mjs +142 -0
  68. package/scripts/utils/proc/proc.mjs +68 -10
  69. package/scripts/utils/proc/proc.test.mjs +77 -0
  70. package/scripts/utils/review/chunks.mjs +55 -0
  71. package/scripts/utils/review/chunks.test.mjs +51 -0
  72. package/scripts/utils/review/findings.mjs +165 -0
  73. package/scripts/utils/review/findings.test.mjs +85 -0
  74. package/scripts/utils/review/head_slice.mjs +153 -0
  75. package/scripts/utils/review/head_slice.test.mjs +91 -0
  76. package/scripts/utils/review/instructions/deep.md +20 -0
  77. package/scripts/utils/review/runners/coderabbit.mjs +56 -14
  78. package/scripts/utils/review/runners/coderabbit.test.mjs +59 -0
  79. package/scripts/utils/review/runners/codex.mjs +32 -22
  80. package/scripts/utils/review/runners/codex.test.mjs +35 -0
  81. package/scripts/utils/review/slices.mjs +140 -0
  82. package/scripts/utils/review/slices.test.mjs +32 -0
  83. package/scripts/utils/server/flavor_scripts.mjs +98 -0
  84. package/scripts/utils/server/flavor_scripts.test.mjs +146 -0
  85. package/scripts/utils/server/prisma_import.mjs +37 -0
  86. package/scripts/utils/server/prisma_import.test.mjs +70 -0
  87. package/scripts/utils/server/ui_env.mjs +14 -0
  88. package/scripts/utils/server/ui_env.test.mjs +46 -0
  89. package/scripts/utils/server/validate.mjs +53 -16
  90. package/scripts/utils/server/validate.test.mjs +89 -0
  91. package/scripts/utils/stack/editor_workspace.mjs +4 -4
  92. package/scripts/utils/stack/interactive_stack_config.mjs +185 -0
  93. package/scripts/utils/stack/startup.mjs +113 -13
  94. package/scripts/utils/stack/startup_server_light_dirs.test.mjs +64 -0
  95. package/scripts/utils/stack/startup_server_light_generate.test.mjs +70 -0
  96. package/scripts/utils/stack/startup_server_light_legacy.test.mjs +88 -0
  97. package/scripts/utils/tailscale/ip.mjs +116 -0
  98. package/scripts/utils/ui/ansi.mjs +39 -0
  99. package/scripts/where.mjs +2 -2
  100. package/scripts/worktrees.mjs +627 -137
  101. package/scripts/worktrees_archive_cmd.test.mjs +245 -0
  102. package/scripts/worktrees_cursor_monorepo_root.test.mjs +63 -0
  103. package/scripts/worktrees_list_specs_no_recurse.test.mjs +33 -0
  104. package/scripts/worktrees_monorepo_use_group.test.mjs +67 -0
@@ -0,0 +1,55 @@
1
+ export async function planCommitChunks({ baseCommit, commits, maxFiles, countFilesBetween }) {
2
+ if (!Array.isArray(commits)) throw new Error('[review] planCommitChunks: commits must be an array');
3
+ const max = Number(maxFiles);
4
+ if (!Number.isFinite(max) || max <= 0) throw new Error('[review] planCommitChunks: maxFiles must be a positive number');
5
+ if (typeof countFilesBetween !== 'function') throw new Error('[review] planCommitChunks: countFilesBetween must be a function');
6
+
7
+ const list = commits.map((c) => String(c ?? '').trim()).filter(Boolean);
8
+ if (!list.length) return [];
9
+
10
+ const chunks = [];
11
+ let base = String(baseCommit ?? '').trim();
12
+ if (!base) throw new Error('[review] planCommitChunks: baseCommit is required');
13
+ let startIndex = 0;
14
+
15
+ while (startIndex < list.length) {
16
+ let lo = startIndex;
17
+ let hi = list.length - 1;
18
+ let bestIndex = -1;
19
+ let bestCount = -1;
20
+
21
+ while (lo <= hi) {
22
+ const mid = Math.floor((lo + hi) / 2);
23
+ const head = list[mid];
24
+ // eslint-disable-next-line no-await-in-loop
25
+ const n = await countFilesBetween({ base, head });
26
+ if (!Number.isFinite(n) || n < 0) throw new Error('[review] planCommitChunks: countFilesBetween returned invalid count');
27
+
28
+ if (n <= max) {
29
+ bestIndex = mid;
30
+ bestCount = n;
31
+ lo = mid + 1;
32
+ } else {
33
+ hi = mid - 1;
34
+ }
35
+ }
36
+
37
+ // If even the smallest chunk exceeds the limit, emit a single over-limit chunk so the caller can decide what to do.
38
+ if (bestIndex === -1) {
39
+ const head = list[startIndex];
40
+ // eslint-disable-next-line no-await-in-loop
41
+ const n = await countFilesBetween({ base, head });
42
+ chunks.push({ base, head, fileCount: n, overLimit: true });
43
+ base = head;
44
+ startIndex += 1;
45
+ continue;
46
+ }
47
+
48
+ const head = list[bestIndex];
49
+ chunks.push({ base, head, fileCount: bestCount, overLimit: false });
50
+ base = head;
51
+ startIndex = bestIndex + 1;
52
+ }
53
+
54
+ return chunks;
55
+ }
@@ -0,0 +1,51 @@
1
+ import test from 'node:test';
2
+ import assert from 'node:assert/strict';
3
+ import { planCommitChunks } from './chunks.mjs';
4
+
5
+ test('planCommitChunks greedily selects the largest end commit within maxFiles', async () => {
6
+ const commits = ['c1', 'c2', 'c3', 'c4'];
7
+
8
+ const counts = new Map([
9
+ ['BASE->c1', 1],
10
+ ['BASE->c2', 3],
11
+ ['BASE->c3', 4],
12
+ ['BASE->c4', 7],
13
+ ['c2->c3', 2],
14
+ ['c2->c4', 5],
15
+ ['c3->c4', 2],
16
+ ]);
17
+
18
+ const chunks = await planCommitChunks({
19
+ baseCommit: 'BASE',
20
+ commits,
21
+ maxFiles: 3,
22
+ countFilesBetween: async ({ base, head }) => counts.get(`${base}->${head}`),
23
+ });
24
+
25
+ assert.deepEqual(chunks, [
26
+ { base: 'BASE', head: 'c2', fileCount: 3, overLimit: false },
27
+ { base: 'c2', head: 'c3', fileCount: 2, overLimit: false },
28
+ { base: 'c3', head: 'c4', fileCount: 2, overLimit: false },
29
+ ]);
30
+ });
31
+
32
+ test('planCommitChunks marks overLimit when a single step exceeds maxFiles', async () => {
33
+ const commits = ['c1', 'c2'];
34
+
35
+ const counts = new Map([
36
+ ['BASE->c1', 10],
37
+ ['c1->c2', 2],
38
+ ]);
39
+
40
+ const chunks = await planCommitChunks({
41
+ baseCommit: 'BASE',
42
+ commits,
43
+ maxFiles: 3,
44
+ countFilesBetween: async ({ base, head }) => counts.get(`${base}->${head}`),
45
+ });
46
+
47
+ assert.deepEqual(chunks, [
48
+ { base: 'BASE', head: 'c1', fileCount: 10, overLimit: true },
49
+ { base: 'c1', head: 'c2', fileCount: 2, overLimit: false },
50
+ ]);
51
+ });
@@ -0,0 +1,165 @@
1
+ function parseLineRange(raw) {
2
+ const s = String(raw ?? '').trim();
3
+ // Common CodeRabbit format: "17 to 31"
4
+ const m = s.match(/^(\d+)\s+to\s+(\d+)$/i);
5
+ if (m) return { start: Number(m[1]), end: Number(m[2]) };
6
+ const n = s.match(/^(\d+)$/);
7
+ if (n) {
8
+ const v = Number(n[1]);
9
+ return { start: v, end: v };
10
+ }
11
+ return null;
12
+ }
13
+
14
+ export function parseCodeRabbitPlainOutput(text) {
15
+ const lines = String(text ?? '').split('\n');
16
+ const findings = [];
17
+
18
+ let current = null;
19
+ let mode = null; // 'comment' | 'prompt' | null
20
+
21
+ function flush() {
22
+ if (!current) return;
23
+ const comment = (current._commentLines ?? []).join('\n').trim();
24
+ const prompt = (current._promptLines ?? []).join('\n').trim();
25
+ const title =
26
+ current.title ??
27
+ comment
28
+ .split('\n')
29
+ .map((l) => l.trim())
30
+ .filter(Boolean)[0] ??
31
+ '';
32
+
33
+ findings.push({
34
+ reviewer: 'coderabbit',
35
+ file: current.file ?? '',
36
+ lines: current.lines ?? null,
37
+ type: current.type ?? '',
38
+ title,
39
+ comment,
40
+ prompt: prompt || null,
41
+ });
42
+ current = null;
43
+ mode = null;
44
+ }
45
+
46
+ for (let i = 0; i < lines.length; i += 1) {
47
+ const line = lines[i];
48
+ const trimmed = line.trimEnd();
49
+
50
+ if (trimmed.startsWith('============================================================================')) {
51
+ flush();
52
+ continue;
53
+ }
54
+ if (trimmed.startsWith('File: ')) {
55
+ flush();
56
+ current = { _commentLines: [], _promptLines: [] };
57
+ current.file = trimmed.slice('File: '.length).trim();
58
+ continue;
59
+ }
60
+ if (!current) continue;
61
+
62
+ if (trimmed.startsWith('Line: ')) {
63
+ const range = parseLineRange(trimmed.slice('Line: '.length).trim());
64
+ current.lines = range;
65
+ continue;
66
+ }
67
+ if (trimmed.startsWith('Type: ')) {
68
+ current.type = trimmed.slice('Type: '.length).trim();
69
+ continue;
70
+ }
71
+ if (trimmed === 'Comment:') {
72
+ mode = 'comment';
73
+ continue;
74
+ }
75
+ if (trimmed === 'Prompt for AI Agent:') {
76
+ mode = 'prompt';
77
+ continue;
78
+ }
79
+
80
+ if (mode === 'comment') {
81
+ // Title is first non-empty comment line.
82
+ if (!current.title && trimmed.trim()) current.title = trimmed.trim();
83
+ current._commentLines.push(trimmed);
84
+ } else if (mode === 'prompt') {
85
+ current._promptLines.push(trimmed);
86
+ }
87
+ }
88
+
89
+ flush();
90
+ // Drop empty placeholders
91
+ return findings.filter((f) => f.file && f.title);
92
+ }
93
+
94
+ export function parseCodexReviewText(reviewText) {
95
+ const s = String(reviewText ?? '');
96
+ const marker = '===FINDINGS_JSON===';
97
+ const idx = s.indexOf(marker);
98
+ if (idx < 0) return [];
99
+ const jsonText = s.slice(idx + marker.length).trim();
100
+ if (!jsonText) return [];
101
+
102
+ let parsed;
103
+ try {
104
+ parsed = JSON.parse(jsonText);
105
+ } catch {
106
+ return [];
107
+ }
108
+ if (!Array.isArray(parsed)) return [];
109
+ return parsed
110
+ .map((x) => ({
111
+ reviewer: 'codex',
112
+ severity: x?.severity ?? null,
113
+ file: x?.file ?? null,
114
+ title: x?.title ?? null,
115
+ recommendation: x?.recommendation ?? null,
116
+ needsDiscussion: Boolean(x?.needsDiscussion),
117
+ }))
118
+ .filter((x) => x.file && x.title);
119
+ }
120
+
121
+ export function formatTriageMarkdown({ runLabel, baseRef, findings }) {
122
+ const items = Array.isArray(findings) ? findings : [];
123
+ const header = [
124
+ `# Review triage: ${runLabel}`,
125
+ '',
126
+ `- Base ref: ${baseRef ?? ''}`,
127
+ `- Findings: ${items.length}`,
128
+ '',
129
+ '## Mandatory workflow',
130
+ '',
131
+ 'For each finding below:',
132
+ '1) Open the referenced file/lines in the *validation worktree* (committed-only).',
133
+ '2) Decide if it is a real bug/risk/correctness gap, already fixed, expected behavior, or style preference.',
134
+ '3) Record a final decision + rationale here (`apply` / `adjust` / `defer`).',
135
+ '4) If `apply/adjust`: implement in the main worktree as a clean commit (no unrelated changes), then sync that commit to validation.',
136
+ '',
137
+ 'Notes:',
138
+ '- Treat reviewer output as suggestions; verify against best practices and codebase invariants before applying.',
139
+ '- Avoid brittle tests that assert on wording/phrasing/config; test observable behavior.',
140
+ '',
141
+ ].join('\n');
142
+
143
+ const body = items
144
+ .map((f) => {
145
+ const lines = f.lines?.start ? `${f.lines.start}-${f.lines.end ?? f.lines.start}` : '';
146
+ const meta = [
147
+ `- [ ] \`${f.id ?? ''}\` reviewer=\`${f.reviewer ?? ''}\`${f.severity ? ` severity=\`${f.severity}\`` : ''}${
148
+ f.type ? ` type=\`${f.type}\`` : ''
149
+ } \`${f.file ?? ''}\`${lines ? ` (lines ${lines})` : ''}: ${f.title ?? ''}`,
150
+ f.sourceLog ? ` - Source log: \`${f.sourceLog}\`` : null,
151
+ ' - Final decision: **TBD** (apply|adjust|defer)',
152
+ ' - Verified in validation worktree: **TBD**',
153
+ ' - Rationale: **TBD**',
154
+ ' - Action taken: **TBD**',
155
+ ' - Commit: **TBD**',
156
+ ' - Needs discussion: **TBD**',
157
+ ];
158
+ if (f.comment) meta.push(` - Reviewer detail: ${String(f.comment).split('\n')[0].trim()}`);
159
+ if (f.recommendation) meta.push(` - Reviewer suggested fix: ${String(f.recommendation).split('\n')[0].trim()}`);
160
+ return meta.filter(Boolean).join('\n');
161
+ })
162
+ .join('\n\n');
163
+
164
+ return `${header}${body ? `${body}\n` : ''}`;
165
+ }
@@ -0,0 +1,85 @@
1
+ import test from 'node:test';
2
+ import assert from 'node:assert/strict';
3
+
4
+ import { formatTriageMarkdown, parseCodeRabbitPlainOutput, parseCodexReviewText } from './findings.mjs';
5
+
6
+ test('parseCodeRabbitPlainOutput parses CodeRabbit plain blocks', () => {
7
+ const out = [
8
+ '============================================================================',
9
+ 'File: cli/src/utils/spawnHappyCLI.invocation.test.ts',
10
+ 'Line: 17 to 31',
11
+ 'Type: potential_issue',
12
+ '',
13
+ 'Comment:',
14
+ 'Dynamic imports may be cached, causing test isolation issues.',
15
+ '',
16
+ 'Some more details.',
17
+ '',
18
+ 'Prompt for AI Agent:',
19
+ 'Do the thing.',
20
+ '',
21
+ '============================================================================',
22
+ 'File: expo-app/sources/app/(app)/_layout.tsx',
23
+ 'Line: 29 to 35',
24
+ 'Type: potential_issue',
25
+ '',
26
+ 'Comment:',
27
+ "Hooks order violation: useUnistyles() called after conditional return.",
28
+ '',
29
+ 'More details.',
30
+ ].join('\n');
31
+
32
+ const findings = parseCodeRabbitPlainOutput(out);
33
+ assert.equal(findings.length, 2);
34
+ assert.equal(findings[0].file, 'cli/src/utils/spawnHappyCLI.invocation.test.ts');
35
+ assert.deepEqual(findings[0].lines, { start: 17, end: 31 });
36
+ assert.equal(findings[0].type, 'potential_issue');
37
+ assert.equal(findings[0].title, 'Dynamic imports may be cached, causing test isolation issues.');
38
+ assert.match(findings[0].comment, /Some more details/);
39
+ assert.match(findings[0].prompt, /Do the thing/);
40
+ });
41
+
42
+ test('parseCodexReviewText extracts findings JSON trailer', () => {
43
+ const review = [
44
+ 'Overall verdict: looks good.',
45
+ '',
46
+ '===FINDINGS_JSON===',
47
+ JSON.stringify(
48
+ [
49
+ {
50
+ severity: 'major',
51
+ file: 'server/sources/main.light.ts',
52
+ title: 'Do not exit after startup',
53
+ recommendation: 'Remove process.exit(0) on success.',
54
+ },
55
+ ],
56
+ null,
57
+ 2
58
+ ),
59
+ ].join('\n');
60
+
61
+ const findings = parseCodexReviewText(review);
62
+ assert.equal(findings.length, 1);
63
+ assert.equal(findings[0].file, 'server/sources/main.light.ts');
64
+ assert.equal(findings[0].severity, 'major');
65
+ });
66
+
67
+ test('formatTriageMarkdown includes required workflow fields', () => {
68
+ const md = formatTriageMarkdown({
69
+ runLabel: 'review-123',
70
+ baseRef: 'upstream/main',
71
+ findings: [
72
+ {
73
+ reviewer: 'coderabbit',
74
+ id: 'CR-001',
75
+ file: 'cli/src/x.ts',
76
+ title: 'Thing',
77
+ type: 'potential_issue',
78
+ },
79
+ ],
80
+ });
81
+ assert.match(md, /Final decision: \*\*TBD\*\*/);
82
+ assert.match(md, /Verified in validation worktree:/);
83
+ assert.match(md, /Commit:/);
84
+ });
85
+
@@ -0,0 +1,153 @@
1
+ import { runCapture } from '../proc/proc.mjs';
2
+
3
+ function normalizePath(p) {
4
+ return String(p ?? '').replace(/\\/g, '/').replace(/^\/+/, '');
5
+ }
6
+
7
+ function parseNameStatusZ(buf) {
8
+ const raw = String(buf ?? '');
9
+ if (!raw) return [];
10
+ const parts = raw.split('\0').filter((x) => x.length);
11
+ const entries = [];
12
+ let i = 0;
13
+ while (i < parts.length) {
14
+ const status = parts[i++];
15
+ const code = status[0] ?? '';
16
+ if (!code) break;
17
+ if (code === 'R' || code === 'C') {
18
+ const from = parts[i++] ?? '';
19
+ const to = parts[i++] ?? '';
20
+ entries.push({ code, status, from, to });
21
+ continue;
22
+ }
23
+ const path = parts[i++] ?? '';
24
+ entries.push({ code, status, path });
25
+ }
26
+ return entries;
27
+ }
28
+
29
+ export async function getChangedOps({ cwd, baseRef, headRef = 'HEAD', env = process.env } = {}) {
30
+ const out = await runCapture('git', ['diff', '--name-status', '--find-renames', '-z', `${baseRef}...${headRef}`], { cwd, env });
31
+ const entries = parseNameStatusZ(out);
32
+ const checkout = new Set();
33
+ const remove = new Set();
34
+ for (const e of entries) {
35
+ if (e.code === 'A' || e.code === 'M' || e.code === 'T') {
36
+ checkout.add(normalizePath(e.path));
37
+ continue;
38
+ }
39
+ if (e.code === 'D') {
40
+ remove.add(normalizePath(e.path));
41
+ continue;
42
+ }
43
+ if (e.code === 'R' || e.code === 'C') {
44
+ if (e.from) remove.add(normalizePath(e.from));
45
+ if (e.to) checkout.add(normalizePath(e.to));
46
+ continue;
47
+ }
48
+ }
49
+ const all = new Set([...checkout, ...remove]);
50
+ return { checkout, remove, all };
51
+ }
52
+
53
+ function subset(set, allowed) {
54
+ const out = new Set();
55
+ for (const v of set) {
56
+ if (allowed.has(v)) out.add(v);
57
+ }
58
+ return out;
59
+ }
60
+
61
+ function difference(set, blocked) {
62
+ const out = new Set();
63
+ for (const v of set) {
64
+ if (!blocked.has(v)) out.add(v);
65
+ }
66
+ return out;
67
+ }
68
+
69
+ async function batched(args, batchSize, fn) {
70
+ const list = Array.from(args);
71
+ for (let i = 0; i < list.length; i += batchSize) {
72
+ // eslint-disable-next-line no-await-in-loop
73
+ await fn(list.slice(i, i + batchSize));
74
+ }
75
+ }
76
+
77
+ async function gitCommit({ cwd, env, message }) {
78
+ await runCapture(
79
+ 'git',
80
+ [
81
+ '-c',
82
+ 'user.name=Happy Review',
83
+ '-c',
84
+ 'user.email=review@happy.local',
85
+ '-c',
86
+ 'commit.gpgsign=false',
87
+ 'commit',
88
+ '-q',
89
+ '--no-verify',
90
+ '-m',
91
+ message,
92
+ ],
93
+ { cwd, env }
94
+ );
95
+ const sha = (await runCapture('git', ['rev-parse', 'HEAD'], { cwd, env })).trim();
96
+ return sha;
97
+ }
98
+
99
+ async function applyOpsFromHead({ cwd, env, headCommit, checkoutPaths, removePaths }) {
100
+ if (removePaths.size) {
101
+ await batched(Array.from(removePaths), 200, async (batch) => {
102
+ await runCapture('git', ['rm', '-q', '--ignore-unmatch', '--', ...batch], { cwd, env });
103
+ });
104
+ }
105
+ if (checkoutPaths.size) {
106
+ // Prefer batching over pathspec-from-file to maximize compatibility.
107
+ await batched(Array.from(checkoutPaths), 100, async (batch) => {
108
+ await runCapture('git', ['checkout', headCommit, '--', ...batch], { cwd, env });
109
+ });
110
+ }
111
+ // Stage all changes introduced by the operations.
112
+ await runCapture('git', ['add', '-A'], { cwd, env });
113
+ }
114
+
115
+ /**
116
+ * Create two local commits inside an ephemeral worktree:
117
+ * - baseSliceCommit: baseRef plus all NON-slice changes from headCommit
118
+ * - headSliceCommit: baseSliceCommit plus slice changes from headCommit (resulting tree equals headCommit)
119
+ *
120
+ * These commits are intended solely for review tooling (CodeRabbit/Codex) so the reviewer sees:
121
+ * - full, final code at HEAD
122
+ * - a focused diff for the slice (baseSliceCommit..headSliceCommit)
123
+ */
124
+ export async function createHeadSliceCommits({
125
+ cwd,
126
+ env = process.env,
127
+ baseRef,
128
+ headCommit,
129
+ ops,
130
+ slicePaths,
131
+ label = 'slice',
132
+ } = {}) {
133
+ const sliceSet = new Set((Array.isArray(slicePaths) ? slicePaths : []).map(normalizePath).filter(Boolean));
134
+ const sliceCheckout = subset(ops.checkout, sliceSet);
135
+ const sliceRemove = subset(ops.remove, sliceSet);
136
+ const nonSliceCheckout = difference(ops.checkout, sliceSet);
137
+ const nonSliceRemove = difference(ops.remove, sliceSet);
138
+
139
+ // Start from baseRef.
140
+ await runCapture('git', ['checkout', '-q', '--detach', baseRef], { cwd, env });
141
+
142
+ // Commit non-slice changes.
143
+ await applyOpsFromHead({ cwd, env, headCommit, checkoutPaths: nonSliceCheckout, removePaths: nonSliceRemove });
144
+ const baseSliceCommit = await gitCommit({ cwd, env, message: `chore(review): base for ${label}` });
145
+
146
+ // Commit slice changes.
147
+ await applyOpsFromHead({ cwd, env, headCommit, checkoutPaths: sliceCheckout, removePaths: sliceRemove });
148
+ const headSliceCommit = await gitCommit({ cwd, env, message: `chore(review): ${label}` });
149
+
150
+ // Ensure working tree is at the head slice commit for downstream tools.
151
+ await runCapture('git', ['checkout', '-q', headSliceCommit], { cwd, env });
152
+ return { baseSliceCommit, headSliceCommit };
153
+ }
@@ -0,0 +1,91 @@
1
+ import test from 'node:test';
2
+ import assert from 'node:assert/strict';
3
+ import { mkdtemp, mkdir, readFile, rm, writeFile } from 'node:fs/promises';
4
+ import { tmpdir } from 'node:os';
5
+ import { join } from 'node:path';
6
+ import { run, runCapture } from '../proc/proc.mjs';
7
+ import { createHeadSliceCommits, getChangedOps } from './head_slice.mjs';
8
+
9
+ function gitEnv() {
10
+ const clean = {};
11
+ for (const [k, v] of Object.entries(process.env)) {
12
+ if (k.startsWith('HAPPY_STACKS_') || k.startsWith('HAPPY_LOCAL_')) continue;
13
+ clean[k] = v;
14
+ }
15
+ return {
16
+ ...clean,
17
+ GIT_AUTHOR_NAME: 'Test',
18
+ GIT_AUTHOR_EMAIL: 'test@example.com',
19
+ GIT_COMMITTER_NAME: 'Test',
20
+ GIT_COMMITTER_EMAIL: 'test@example.com',
21
+ };
22
+ }
23
+
24
+ test('createHeadSliceCommits produces a focused diff while keeping full HEAD code', async (t) => {
25
+ const repo = await mkdtemp(join(tmpdir(), 'happy-review-head-slice-'));
26
+ const env = gitEnv();
27
+
28
+ const wt = join(repo, 'wt');
29
+ try {
30
+ await run('git', ['init', '-q'], { cwd: repo, env });
31
+ await run('git', ['checkout', '-q', '-b', 'main'], { cwd: repo, env });
32
+ await mkdir(join(repo, 'expo-app'), { recursive: true });
33
+ await mkdir(join(repo, 'cli'), { recursive: true });
34
+ await mkdir(join(repo, 'server'), { recursive: true });
35
+ await writeFile(join(repo, 'expo-app', 'a.txt'), 'base-a\n', 'utf-8');
36
+ await writeFile(join(repo, 'cli', 'c.txt'), 'base-c\n', 'utf-8');
37
+ await writeFile(join(repo, 'server', 'b.txt'), 'base-b\n', 'utf-8');
38
+ await run('git', ['add', '.'], { cwd: repo, env });
39
+ await run('git', ['commit', '-q', '-m', 'chore: base'], { cwd: repo, env });
40
+
41
+ // HEAD commit with mixed changes across areas.
42
+ await writeFile(join(repo, 'expo-app', 'a.txt'), 'head-a\n', 'utf-8');
43
+ await writeFile(join(repo, 'expo-app', 'new.txt'), 'new\n', 'utf-8');
44
+ await writeFile(join(repo, 'cli', 'c.txt'), 'head-c\n', 'utf-8');
45
+ await rm(join(repo, 'server', 'b.txt'));
46
+ await run('git', ['add', '-A'], { cwd: repo, env });
47
+ await run('git', ['commit', '-q', '-m', 'feat: head'], { cwd: repo, env });
48
+
49
+ const headCommit = (await runCapture('git', ['rev-parse', 'HEAD'], { cwd: repo, env })).trim();
50
+ const baseCommit = (await runCapture('git', ['rev-parse', 'HEAD^'], { cwd: repo, env })).trim();
51
+
52
+ // Create an ephemeral worktree to run the slice commit builder in isolation.
53
+ await run('git', ['worktree', 'add', '--detach', wt, baseCommit], { cwd: repo, env });
54
+
55
+ const ops = await getChangedOps({ cwd: repo, baseRef: baseCommit, headRef: headCommit, env });
56
+ const { baseSliceCommit, headSliceCommit } = await createHeadSliceCommits({
57
+ cwd: wt,
58
+ env,
59
+ baseRef: baseCommit,
60
+ headCommit,
61
+ ops,
62
+ slicePaths: ['expo-app/a.txt', 'expo-app/new.txt'],
63
+ label: 'expo-app',
64
+ });
65
+
66
+ // Working tree should match full HEAD.
67
+ const a = await readFile(join(wt, 'expo-app', 'a.txt'), 'utf-8');
68
+ const c = await readFile(join(wt, 'cli', 'c.txt'), 'utf-8');
69
+ assert.equal(a, 'head-a\n');
70
+ assert.equal(c, 'head-c\n');
71
+ await assert.rejects(async () => await readFile(join(wt, 'server', 'b.txt'), 'utf-8'));
72
+
73
+ // Diff between slice commits should include only expo-app changes.
74
+ const diffNames = (
75
+ await runCapture('git', ['diff', '--name-only', `${baseSliceCommit}...${headSliceCommit}`], { cwd: wt, env })
76
+ )
77
+ .trim()
78
+ .split('\n')
79
+ .filter(Boolean)
80
+ .sort();
81
+ assert.deepEqual(diffNames, ['expo-app/a.txt', 'expo-app/new.txt']);
82
+ } finally {
83
+ try {
84
+ await run('git', ['worktree', 'remove', '--force', wt], { cwd: repo, env });
85
+ await run('git', ['worktree', 'prune'], { cwd: repo, env });
86
+ } catch {
87
+ // ignore cleanup errors (best-effort)
88
+ }
89
+ await rm(repo, { recursive: true, force: true });
90
+ }
91
+ });
@@ -0,0 +1,20 @@
1
+ You are running a deep, long-form code review.
2
+
3
+ Goals:
4
+ - Find correctness bugs, edge cases, and regressions vs upstream/main.
5
+ - Find performance problems (big-O, unnecessary allocations, redundant work) and reliability issues.
6
+ - Find security and safety issues (filesystem access, env handling, process spawning, injection risks).
7
+ - Find maintainability issues (duplication, unclear ownership boundaries, inconsistent patterns).
8
+ - Ensure i18n coverage is complete: do not introduce hardcoded user-visible strings.
9
+
10
+ Constraints:
11
+ - Prefer fixes that are unified/coherent and avoid duplicating logic.
12
+ - Avoid “brittle” tests that assert on wording/phrasing or hardcoded text; test real behavior and observable outcomes.
13
+ - Do not suggest broad refactors unless clearly justified and low-risk.
14
+ - Treat every recommendation as a suggestion: validate against best practices and the existing codebase patterns; do not propose changes that conflict with project invariants.
15
+ - If a recommendation is uncertain, depends on product/UX decisions, or might have hidden tradeoffs, explicitly mark it as "needs discussion".
16
+
17
+ Output:
18
+ - Provide specific, actionable recommendations with file paths and a brief rationale.
19
+ - Call out any items that are uncertain or require product/UX decisions separately.
20
+ - Be exhaustive: include all findings you notice, not only the highest-signal ones.