@whatasoda/agent-tools 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/dist/agents/codex-review/body.md +98 -0
  2. package/dist/agents/team-reviewer/body.md +78 -0
  3. package/dist/agents/team-worker/body.md +46 -0
  4. package/dist/scripts/codex-review.js +237 -0
  5. package/dist/scripts/detect-base-branch.js +185 -0
  6. package/dist/scripts/resolve-session.js +76 -0
  7. package/dist/skills/soda-brief/body.md +73 -0
  8. package/dist/skills/soda-discuss/README.md +25 -0
  9. package/dist/skills/soda-discuss/body.md +216 -0
  10. package/dist/skills/soda-fix/body.md +137 -0
  11. package/dist/skills/soda-plan/body.md +333 -0
  12. package/dist/skills/soda-research/body.md +127 -0
  13. package/dist/skills/soda-review/body.md +165 -0
  14. package/dist/skills/soda-review-todos/body.md +19 -0
  15. package/dist/skills/soda-team-init/body.md +313 -0
  16. package/dist/skills/soda-team-init/references/coordination-files.md +188 -0
  17. package/dist/skills/soda-team-run/body.md +329 -0
  18. package/dist/skills/soda-todo/body.md +86 -0
  19. package/dist/src/cli/commands/agent.js +29 -0
  20. package/dist/src/cli/commands/codex-review.js +14 -0
  21. package/dist/src/cli/commands/decision.js +103 -0
  22. package/dist/src/cli/commands/import.js +174 -0
  23. package/dist/src/cli/commands/link.js +52 -0
  24. package/dist/src/cli/commands/list.js +12 -0
  25. package/dist/src/cli/commands/node.js +118 -0
  26. package/dist/src/cli/commands/review.js +23 -0
  27. package/dist/src/cli/commands/session.js +23 -0
  28. package/dist/src/cli/commands/skill.js +29 -0
  29. package/dist/src/cli/commands/tag.js +31 -0
  30. package/dist/src/cli/helpers.js +51 -0
  31. package/dist/src/cli/index.js +48 -0
  32. package/dist/src/cli.js +59 -0
  33. package/dist/src/core/database.js +209 -0
  34. package/dist/src/core/ensure-dirs.js +8 -0
  35. package/dist/src/core/index.js +1 -0
  36. package/dist/src/core/kinds.js +46 -0
  37. package/dist/src/core/schema.sql +36 -0
  38. package/dist/src/core/schemas.js +41 -0
  39. package/dist/src/core/search.js +80 -0
  40. package/dist/src/core/types.js +0 -0
  41. package/dist/src/tui/App.js +130 -0
  42. package/dist/src/tui/actions.js +9 -0
  43. package/dist/src/tui/components/FilterBar.js +46 -0
  44. package/dist/src/tui/components/LinkList.js +53 -0
  45. package/dist/src/tui/components/NodeDetail.js +111 -0
  46. package/dist/src/tui/components/NodeList.js +62 -0
  47. package/dist/src/tui/components/StatusBar.js +90 -0
  48. package/dist/src/tui/hooks/useNavigation.js +57 -0
  49. package/dist/src/tui/hooks/useNodes.js +44 -0
  50. package/dist/src/tui/index.js +4 -0
  51. package/package.json +29 -0
@@ -0,0 +1,98 @@
1
+
2
+ # Codex Review Agent
3
+
4
+ You are a codex-review agent. Your job is to run the codex review command, parse its output, and — if critical issues are found — revise the content and re-run the review.
5
+
6
+ ## Constraints
7
+
8
+ - Do NOT use AskUserQuestion, EnterPlanMode, or any interactive tools.
9
+ - Only run `sd codex-review` commands and `sd session resolve`. Do NOT run other Bash commands.
10
+ - Use the Write tool to write content to temp files before running Bash commands. Do NOT use heredoc or inline content in Bash commands. Generate a unique suffix (e.g., 8 random hex chars) for each temp file to avoid collisions across concurrent runs.
11
+
12
+ ## Input Format
13
+
14
+ The prompt must contain a `## Codex Review Request` section with the following fields:
15
+
16
+ - **Mode**: `init`, `resume`, or `findings`
17
+ - **Instruction**: The review instruction string (in quotes)
18
+ - **Ref Path**: (optional) Path to a reference CLAUDE.md
19
+ - **Session Path**: (optional) Path to session JSONL file for context-aware review
20
+ - **Session ID**: (required for resume only) Session ID from a prior init call
21
+ - **Review File**: (required for resume only) Review file path from a prior init call
22
+
23
+ Content to review follows under a `### Content` header.
24
+
25
+ ## Workflow
26
+
27
+ ### Step 1: Construct and run the command
28
+
29
+ Parse the `## Codex Review Request` fields and build the Bash command:
30
+
31
+ Generate a unique suffix `<ID>` (8 random hex chars) at the start. Use this same `<ID>` for all temp files in this run.
32
+
33
+ For `init` or `findings` mode:
34
+ 1. Write `<Content>` to `/tmp/codex-review-<ID>.md` using the Write tool.
35
+ 2. Run:
36
+ ```bash
37
+ sd codex-review <Mode> "<Instruction>" --file /tmp/codex-review-<ID>.md [--ref "<Ref Path>"] [--session "<Session Path>"]
38
+ ```
39
+
40
+ For `resume` mode:
41
+ 1. Write `<Content>` to `/tmp/codex-review-<ID>-revised.md` using the Write tool.
42
+ 2. Run:
43
+ ```bash
44
+ sd codex-review resume <Session ID> <Review File> "<Instruction>" [--ref "<Ref Path>"] [--session "<Session Path>"] < /tmp/codex-review-<ID>-revised.md
45
+ ```
46
+
47
+ ### Step 2: Parse the script output
48
+
49
+ Extract from stdout:
50
+ - `review_file:` line — extract the file path
51
+ - `session_id:` line — extract the session ID value
52
+ - Review findings from the codex output (after the `---` separator)
53
+
54
+ ### Step 3: Classify the result
55
+
56
+ - **Skipped**: The script output a skip warning, exited with an error, or timed out → go to Step 6
57
+ - **Mode is `findings`**: Skip revision entirely → go to Step 6 with Status: "Findings only"
58
+ - **No critical issues**: The review passed or only found trivial issues → go to Step 6
59
+ - **Critical issues found**: The review identified problems → go to Step 4
60
+
61
+ ### Step 4: Revise
62
+
63
+ Using the critical issues and the original content, produce a revised version that addresses the issues.
64
+
65
+ ### Step 5: Re-review
66
+
67
+ 1. Write revised content to `/tmp/codex-review-<ID>-revised.md` using the Write tool (same `<ID>` from Step 1).
68
+ 2. Run:
69
+ ```bash
70
+ sd codex-review resume <session_id> <review_file> "<same-instruction>" [--ref "<Ref Path>"] [--session "<Session Path>"] < /tmp/codex-review-<ID>-revised.md
71
+ ```
72
+
73
+ - If `session_id` is "none" or unavailable, skip re-review and report the issues as unresolved.
74
+ - Parse the resume output for any remaining critical issues.
75
+
76
+ ### Step 6: Return
77
+
78
+ Return findings in the output format below.
79
+
80
+ ## Output Format
81
+
82
+ ```
83
+ ### Review Result
84
+ - **review_file**: (path from script output)
85
+ - **session_id**: (value or "none")
86
+ - **Status**: No critical issues | Revised and re-reviewed | Critical issues (unresolved) | Skipped | Findings only
87
+ - **Revision Applied**: Yes | No
88
+ ### Issues
89
+ - (remaining issues after revision, or initial issues if no revision, or "none")
90
+ ### Revised Content
91
+ (full revised content if revision was applied — omit this section entirely if no revision)
92
+ ```
93
+
94
+ ## Error Handling
95
+
96
+ - If the init script outputs a skip warning or exits with a non-zero code, report Status as "Skipped".
97
+ - If the resume command fails, report Status as "Critical issues (unresolved)" with the original issues and note the resume failure.
98
+ - If any Bash command fails entirely, report Status as "Skipped" with a brief error description in Issues.
@@ -0,0 +1,78 @@
1
+
2
+ # Team Reviewer Agent
3
+
4
+ You are a code review agent (Reviewer). Your job is to evaluate whether a task implementation meets its acceptance criteria and adheres to architecture decisions.
5
+
6
+ ## Constraints
7
+
8
+ - Do NOT use AskUserQuestion, EnterPlanMode, or any interactive tools.
9
+ - Be specific in your findings — include file paths and line numbers.
10
+ - Run all validation commands from the task definition and verify they pass.
11
+ - You may apply trivial fixes (see Trivial Fix Policy) — but do NOT make non-trivial changes.
12
+
13
+ ## Trivial Fix Policy
14
+
15
+ You may directly fix issues that meet ALL of these criteria:
16
+ - The fix is 1-2 lines
17
+ - The correct change is unambiguous (no judgment required)
18
+ - Examples: typo, import path, config value, missing semicolon
19
+
20
+ If you apply a trivial fix, commit it and record it in the Trivial Fixes Applied section.
21
+ If a fix requires judgment or is more than 2 lines, mark it as FAIL.
22
+
23
+ ## Input Format
24
+
25
+ The prompt must contain the following sections:
26
+
27
+ - `## Task Definition` — contents of the TASK-NNN.md file
28
+ - `## Architecture Decisions` — contents of ARCHITECTURE.md (or relevant ADRs only if file is large)
29
+ - `## Working Directory` — absolute path to the worktree (run validation commands and apply trivial fixes here)
30
+ - `## Changes to Review` — git diff of the Worker's worktree branch vs base
31
+
32
+ ## Workflow
33
+
34
+ 1. Change to the working directory
35
+ 2. Read the git diff from the `## Changes to Review` section to understand the scope of changes
36
+ 3. Run all validation commands from the task definition — if any fail, this is an immediate FAIL signal
37
+ 4. Evaluate the implementation against the Review Criteria below
38
+ 5. Check ADR compliance for each relevant ADR listed in the task's Design Constraints
39
+ 6. Check for implicit design decisions — changes that introduce design judgments not specified in the task definition or Design Constraints
40
+ 7. If trivial fixes are needed and eligible under the Trivial Fix Policy, apply them and commit
41
+ 8. Return results in the output format below
42
+
43
+ ## Review Criteria
44
+
45
+ 1. Does the implementation satisfy all acceptance criteria in the task?
46
+ 2. Does it comply with the relevant ADRs listed in the task's Design Constraints?
47
+ 3. Do all validation commands pass? (Run them yourself — do not trust Worker's self-report)
48
+ 4. Are there obvious bugs, security vulnerabilities, or regressions?
49
+ 5. Is the implementation consistent with existing codebase patterns?
50
+ 6. Does the implementation introduce design decisions not specified in the task definition or Design Constraints? (e.g., new data structures, error handling strategies, API shapes, architectural patterns not mentioned in the task). These are not necessarily wrong, but must be surfaced for review.
51
+
52
+ ## Output Format
53
+
54
+ ```
55
+ ### Verdict: PASS | PASS_WITH_FIX | FAIL | ESCALATE
56
+ ### Summary
57
+ {{1-2 sentence overview}}
58
+ ### Findings
59
+ - **[PASS|FAIL|WARN]** {{criterion}} — {{evidence with file paths}}
60
+ ### ADR Compliance
61
+ - ADR-NNN: {{OK | VIOLATION — description}}
62
+ ### Trivial Fixes Applied
63
+ {{PASS_WITH_FIX or ESCALATE with trivial fixes — list each fix with file path and line number}}
64
+ ### For Next Worker
65
+ {{FAIL only — concrete instructions for re-implementation}}
66
+ ### Escalation
67
+ {{ESCALATE only — problem description for Architect}}
68
+ ### Implicit Decisions Detected
69
+ - **[file:line]** {{decision description}} — not covered by task definition or Design Constraints
70
+ ```
71
+
72
+ ## Verdict Logic for Implicit Decisions
73
+
74
+ When implicit design decisions are detected (criterion 6):
75
+ - If no other FAIL-worthy issues exist → verdict is **ESCALATE**. List implicit decisions in both `### Implicit Decisions Detected` and `### Escalation` sections. If trivial fixes were also applied, include them in `### Trivial Fixes Applied`.
76
+ - If FAIL-worthy issues coexist → verdict remains **FAIL** (FAIL takes priority). Still list implicit decisions in `### Implicit Decisions Detected` and reference them in `### For Next Worker`.
77
+
78
+ > **Why ESCALATE, not FAIL**: Task definitions cannot exhaustively specify every implementation detail. Workers may need to make judgment calls. These decisions should be surfaced for Architect/user review, not treated as implementation failures that trigger re-implementation loops.
@@ -0,0 +1,46 @@
1
+
2
+ # Team Worker Agent
3
+
4
+ You are an implementation agent (Worker). Your job is to implement exactly one task on an isolated git worktree.
5
+
6
+ ## Constraints
7
+
8
+ - Do NOT use AskUserQuestion, EnterPlanMode, or any interactive tools.
9
+ - Do NOT modify files outside the scope defined in the task.
10
+ - Commit your changes with the commit message provided in the prompt.
11
+ - If you encounter a blocker you cannot resolve, write a BLOCKER.md file in the worktree root describing the issue, then stop.
12
+
13
+ ## Input Format
14
+
15
+ The prompt must contain the following sections:
16
+
17
+ - `## Task` — contents of the TASK-NNN.md file (Definition, Design Constraints, Context, Validation, History)
18
+ - `## Commit Message` — imperative mood description for the commit
19
+ - `## Working Directory` — absolute path to the worktree
20
+
21
+ ## Workflow
22
+
23
+ 1. Read the task definition from the `## Task` section
24
+ 2. Change to the working directory
25
+ 3. Implement the task according to the Definition, Design Constraints, and Context
26
+ 4. Run the validation commands specified in the task
27
+ 5. Commit all changes with the provided commit message
28
+ 6. Return results in the output format below
29
+
30
+ If a validation command fails, attempt to fix the issue. If the fix is not straightforward, report it in the Notes section.
31
+
32
+ If you encounter a blocker that prevents you from completing the task:
33
+ 1. Write a `BLOCKER.md` file in the worktree root with a clear description of the issue
34
+ 2. Return with Status: BLOCKED
35
+
36
+ ## Output Format
37
+
38
+ ```
39
+ ### Status: DONE | BLOCKED
40
+ ### Validation Results
41
+ - `{{command}}` — {{PASS | FAIL: details}}
42
+ ### Files Changed
43
+ - `{{path}}` — {{what was changed}}
44
+ ### Notes
45
+ - {{anything the Reviewer should know}}
46
+ ```
@@ -0,0 +1,237 @@
1
+ import { mkdtemp, writeFile } from "node:fs/promises";
2
+ import { tmpdir } from "node:os";
3
+ import { join } from "node:path";
4
+ import { isatty } from "node:tty";
5
+ function parseArgs(argv) {
6
+ const args = argv.slice(2);
7
+ const mode = args[0];
8
+ if (!mode || !["init", "resume", "findings"].includes(mode)) {
9
+ return null;
10
+ }
11
+ const flagIdx = (name) => args.indexOf(name);
12
+ const refIdx = flagIdx("--ref");
13
+ const refPath = refIdx !== -1 ? args[refIdx + 1] : undefined;
14
+ const fileIdx = flagIdx("--file");
15
+ const filePath = fileIdx !== -1 ? args[fileIdx + 1] : undefined;
16
+ const sessionIdx = flagIdx("--session");
17
+ const sessionJsonlPath = sessionIdx !== -1 ? args[sessionIdx + 1] : undefined;
18
+ const flagIndices = new Set;
19
+ if (refIdx !== -1) {
20
+ flagIndices.add(refIdx);
21
+ flagIndices.add(refIdx + 1);
22
+ }
23
+ if (fileIdx !== -1) {
24
+ flagIndices.add(fileIdx);
25
+ flagIndices.add(fileIdx + 1);
26
+ }
27
+ if (sessionIdx !== -1) {
28
+ flagIndices.add(sessionIdx);
29
+ flagIndices.add(sessionIdx + 1);
30
+ }
31
+ const positional = args.filter((_, i) => !flagIndices.has(i));
32
+ if (mode === "init" || mode === "findings") {
33
+ return {
34
+ mode,
35
+ instruction: positional[1],
36
+ refPath,
37
+ filePath,
38
+ sessionJsonlPath
39
+ };
40
+ } else {
41
+ return {
42
+ mode,
43
+ sessionId: positional[1],
44
+ reviewFile: positional[2],
45
+ instruction: positional[3],
46
+ refPath,
47
+ filePath,
48
+ sessionJsonlPath
49
+ };
50
+ }
51
+ }
52
+ async function readStdin() {
53
+ if (isatty(0))
54
+ return "";
55
+ const chunks = [];
56
+ for await (const chunk of Bun.stdin.stream()) {
57
+ chunks.push(chunk);
58
+ }
59
+ return Buffer.concat(chunks).toString("utf-8");
60
+ }
61
+ async function getRefPath(refPath) {
62
+ if (refPath)
63
+ return refPath;
64
+ const proc = Bun.spawn(["git", "rev-parse", "--show-toplevel"], {
65
+ stdout: "pipe",
66
+ stderr: "pipe"
67
+ });
68
+ const repoRoot = (await new Response(proc.stdout).text()).trim();
69
+ const exitCode = await proc.exited;
70
+ if (exitCode !== 0 || !repoRoot) {
71
+ return "CLAUDE.md";
72
+ }
73
+ return `${repoRoot}/CLAUDE.md`;
74
+ }
75
+ async function runCodex(args) {
76
+ const proc = Bun.spawn(["codex", ...args], {
77
+ stdout: "pipe",
78
+ stderr: "inherit"
79
+ });
80
+ const output = await new Response(proc.stdout).text();
81
+ const exitCode = await proc.exited;
82
+ return { output, exitCode };
83
+ }
84
+ function extractTextBlocks(content) {
85
+ if (typeof content === "string")
86
+ return content;
87
+ return content.filter((b) => b.type === "text" && b.text).map((b) => b.text).join(`
88
+
89
+ `);
90
+ }
91
+ async function preprocessSession(jsonlPath) {
92
+ const file = Bun.file(jsonlPath);
93
+ if (!await file.exists()) {
94
+ console.error(`⚠ セッションファイルが見つかりません: ${jsonlPath}`);
95
+ return null;
96
+ }
97
+ const text = await file.text();
98
+ const lines = text.split(`
99
+ `).filter(Boolean);
100
+ const turns = [];
101
+ for (const line of lines) {
102
+ try {
103
+ const entry = JSON.parse(line);
104
+ if (entry.type === "user" && entry.message) {
105
+ const content = extractTextBlocks(entry.message.content);
106
+ if (content)
107
+ turns.push(`## User
108
+ ${content}`);
109
+ } else if (entry.type === "assistant" && entry.message) {
110
+ const content = extractTextBlocks(entry.message.content);
111
+ if (content)
112
+ turns.push(`## Assistant
113
+ ${content}`);
114
+ }
115
+ } catch {
116
+ continue;
117
+ }
118
+ }
119
+ return turns.length > 0 ? turns.join(`
120
+
121
+ ---
122
+
123
+ `) : null;
124
+ }
125
+ async function buildSessionPart(sessionJsonlPath, tmpDir) {
126
+ if (!sessionJsonlPath)
127
+ return "";
128
+ const digest = await preprocessSession(sessionJsonlPath);
129
+ if (!digest)
130
+ return "";
131
+ const digestDir = tmpDir ?? await mkdtemp(join(tmpdir(), "codex-review-"));
132
+ const sessionDigestFile = join(digestDir, "session.md");
133
+ await writeFile(sessionDigestFile, digest);
134
+ return ` (session: ${sessionDigestFile})`;
135
+ }
136
+ function extractSessionId(output) {
137
+ const match = output.match(/session id:\s*(\S+)/i);
138
+ return match ? match[1] : null;
139
+ }
140
+ function printUsage() {
141
+ console.error("Usage:");
142
+ console.error(' bun codex-review.ts init "instruction" [--file <path>] [--ref <path>] [--session <path>]');
143
+ console.error(' bun codex-review.ts findings "instruction" [--file <path>] [--ref <path>] [--session <path>]');
144
+ console.error(' bun codex-review.ts resume <session-id> <review-file> "instruction" [--ref <path>] [--session <path>]');
145
+ }
146
+ async function main() {
147
+ const parsed = parseArgs(process.argv);
148
+ if (!parsed) {
149
+ printUsage();
150
+ process.exit(1);
151
+ }
152
+ const refPath = await getRefPath(parsed.refPath);
153
+ if (parsed.mode === "init" || parsed.mode === "findings") {
154
+ if (!parsed.instruction) {
155
+ printUsage();
156
+ process.exit(1);
157
+ }
158
+ let reviewFile;
159
+ let tmpDir;
160
+ if (parsed.filePath) {
161
+ reviewFile = parsed.filePath;
162
+ } else {
163
+ const content = await readStdin();
164
+ if (!content.trim()) {
165
+ console.error("⚠ codex レビューをスキップします(入力なし)");
166
+ process.exit(0);
167
+ }
168
+ tmpDir = await mkdtemp(join(tmpdir(), "codex-review-"));
169
+ reviewFile = join(tmpDir, "review.md");
170
+ await writeFile(reviewFile, content);
171
+ }
172
+ const sessionPart = await buildSessionPart(parsed.sessionJsonlPath, tmpDir);
173
+ const prompt = `${parsed.instruction}: ${reviewFile} (ref: ${refPath})${sessionPart}`;
174
+ try {
175
+ const { output, exitCode } = await runCodex([
176
+ "exec",
177
+ "-m",
178
+ "gpt-5.4",
179
+ prompt
180
+ ]);
181
+ if (exitCode !== 0) {
182
+ if (exitCode === 126 || exitCode === 127) {
183
+ console.error(`⚠ codex レビューをスキップします(codex コマンドが見つからない、または実行権限がありません — exit code: ${exitCode})`);
184
+ } else {
185
+ console.error(`⚠ codex レビューをスキップします(コマンド実行失敗 — exit code: ${exitCode})`);
186
+ }
187
+ console.log(`review_file: ${reviewFile}`);
188
+ process.exit(0);
189
+ }
190
+ const sessionId = extractSessionId(output);
191
+ console.log(`review_file: ${reviewFile}`);
192
+ if (sessionId) {
193
+ console.log(`session_id: ${sessionId}`);
194
+ }
195
+ console.log("---");
196
+ console.log(output);
197
+ } catch {
198
+ console.error("⚠ codex レビューをスキップします(コマンド実行失敗)");
199
+ console.log(`review_file: ${reviewFile}`);
200
+ process.exit(0);
201
+ }
202
+ } else if (parsed.mode === "resume") {
203
+ if (!parsed.sessionId || !parsed.reviewFile || !parsed.instruction) {
204
+ printUsage();
205
+ process.exit(1);
206
+ }
207
+ const content = await readStdin();
208
+ if (content.trim()) {
209
+ await writeFile(parsed.reviewFile, content);
210
+ }
211
+ const sessionPart = await buildSessionPart(parsed.sessionJsonlPath);
212
+ const prompt = `${parsed.instruction}: ${parsed.reviewFile} (ref: ${refPath})${sessionPart}`;
213
+ try {
214
+ const { output, exitCode } = await runCodex([
215
+ "exec",
216
+ "resume",
217
+ "-m",
218
+ "gpt-5.4",
219
+ parsed.sessionId,
220
+ prompt
221
+ ]);
222
+ if (exitCode !== 0) {
223
+ if (exitCode === 126 || exitCode === 127) {
224
+ console.error(`⚠ codex 再レビューをスキップします(codex コマンドが見つからない、または実行権限がありません — exit code: ${exitCode})`);
225
+ } else {
226
+ console.error(`⚠ codex 再レビューをスキップします(コマンド実行失敗 — exit code: ${exitCode})`);
227
+ }
228
+ process.exit(0);
229
+ }
230
+ console.log(output);
231
+ } catch {
232
+ console.error("⚠ codex 再レビューをスキップします(コマンド実行失敗)");
233
+ process.exit(0);
234
+ }
235
+ }
236
+ }
237
+ main();
@@ -0,0 +1,185 @@
1
+ import { $ } from "bun";
2
+ const BASE_BRANCH_PATTERNS = ["release/*", "epic/*", "main"];
3
+ async function getCurrentBranch() {
4
+ const result = await $`git branch --show-current`.text();
5
+ return result.trim();
6
+ }
7
+ async function getPRBaseBranch() {
8
+ try {
9
+ const result = await $`gh pr view --json baseRefName -q .baseRefName`.text();
10
+ const branch = result.trim();
11
+ return branch || null;
12
+ } catch {
13
+ return null;
14
+ }
15
+ }
16
+ async function getRemoteBranches() {
17
+ const result = await $`git branch -r --format='%(refname:short)'`.text();
18
+ return result.trim().split(`
19
+ `).filter(Boolean).map((b) => b.replace(/^origin\//, ""));
20
+ }
21
+ async function getMergeBase(branch) {
22
+ try {
23
+ let result;
24
+ try {
25
+ result = await $`git merge-base HEAD origin/${branch}`.text();
26
+ } catch {
27
+ result = await $`git merge-base HEAD ${branch}`.text();
28
+ }
29
+ return result.trim() || null;
30
+ } catch {
31
+ return null;
32
+ }
33
+ }
34
+ async function getCommitCount(from, to) {
35
+ try {
36
+ const result = await $`git rev-list --count ${from}..${to}`.text();
37
+ return parseInt(result.trim(), 10) || 0;
38
+ } catch {
39
+ return 0;
40
+ }
41
+ }
42
+ async function getChangedFiles(mergeBase) {
43
+ const result = await $`git diff --name-status ${mergeBase}..HEAD`.text();
44
+ const lines = result.trim().split(`
45
+ `).filter(Boolean);
46
+ return lines.map((line) => {
47
+ const parts = line.split("\t");
48
+ const status = parts[0];
49
+ if (status.startsWith("R") || status.startsWith("C")) {
50
+ return {
51
+ status: status[0],
52
+ oldFile: parts[1],
53
+ file: parts[2]
54
+ };
55
+ }
56
+ return {
57
+ status: status[0],
58
+ file: parts[1]
59
+ };
60
+ });
61
+ }
62
+ async function getBaseChangedFiles(mergeBase, baseBranch) {
63
+ try {
64
+ let result;
65
+ try {
66
+ result = await $`git diff --name-only ${mergeBase}..origin/${baseBranch}`.text();
67
+ } catch {
68
+ result = await $`git diff --name-only ${mergeBase}..${baseBranch}`.text();
69
+ }
70
+ return result.trim().split(`
71
+ `).filter(Boolean);
72
+ } catch {
73
+ return [];
74
+ }
75
+ }
76
+ function detectConflicts(yourFiles, baseFiles) {
77
+ const yourFileSet = new Set;
78
+ for (const f of yourFiles) {
79
+ yourFileSet.add(f.file);
80
+ if (f.oldFile) {
81
+ yourFileSet.add(f.oldFile);
82
+ }
83
+ }
84
+ return baseFiles.filter((f) => yourFileSet.has(f));
85
+ }
86
+ async function findNearestBaseBranch(candidates) {
87
+ let nearest = null;
88
+ for (const branch of candidates) {
89
+ const mergeBase = await getMergeBase(branch);
90
+ if (!mergeBase)
91
+ continue;
92
+ const distance = await getCommitCount(mergeBase, "HEAD");
93
+ if (nearest === null || distance < nearest.distance) {
94
+ nearest = { branch, mergeBase, distance };
95
+ }
96
+ }
97
+ return nearest ? { branch: nearest.branch, mergeBase: nearest.mergeBase } : null;
98
+ }
99
+ async function findBaseBranchCandidates() {
100
+ const remoteBranches = await getRemoteBranches();
101
+ const candidates = [];
102
+ for (const pattern of BASE_BRANCH_PATTERNS) {
103
+ if (pattern.includes("*")) {
104
+ const prefix = pattern.replace("/*", "/");
105
+ const matches = remoteBranches.filter((b) => b.startsWith(prefix));
106
+ candidates.push(...matches);
107
+ } else {
108
+ if (remoteBranches.includes(pattern)) {
109
+ candidates.push(pattern);
110
+ }
111
+ }
112
+ }
113
+ return candidates;
114
+ }
115
+ async function main() {
116
+ const currentBranch = await getCurrentBranch();
117
+ const isBaseBranch = BASE_BRANCH_PATTERNS.some((pattern) => {
118
+ if (pattern.includes("*")) {
119
+ const prefix = pattern.replace("/*", "/");
120
+ return currentBranch.startsWith(prefix);
121
+ }
122
+ return currentBranch === pattern;
123
+ });
124
+ if (isBaseBranch || currentBranch === "") {
125
+ const result = {
126
+ error: `Cannot detect changes: currently on base branch "${currentBranch || "detached HEAD"}"`
127
+ };
128
+ console.log(JSON.stringify(result, null, 2));
129
+ return;
130
+ }
131
+ let baseBranch = await getPRBaseBranch();
132
+ let source = "pr";
133
+ let detectedMergeBase = null;
134
+ if (!baseBranch) {
135
+ source = "detected";
136
+ const candidates = await findBaseBranchCandidates();
137
+ const nearest = await findNearestBaseBranch(candidates);
138
+ if (nearest) {
139
+ baseBranch = nearest.branch;
140
+ detectedMergeBase = nearest.mergeBase;
141
+ } else {
142
+ baseBranch = "main";
143
+ }
144
+ }
145
+ const mergeBase = detectedMergeBase ?? await getMergeBase(baseBranch);
146
+ if (!mergeBase) {
147
+ const result = {
148
+ error: `Cannot find merge-base with "${baseBranch}"`
149
+ };
150
+ console.log(JSON.stringify(result, null, 2));
151
+ return;
152
+ }
153
+ const yourCommits = await getCommitCount(mergeBase, "HEAD");
154
+ let baseAhead = await getCommitCount(mergeBase, `origin/${baseBranch}`);
155
+ if (baseAhead === 0) {
156
+ const localBaseAhead = await getCommitCount(mergeBase, baseBranch);
157
+ if (localBaseAhead > 0) {
158
+ baseAhead = localBaseAhead;
159
+ }
160
+ }
161
+ const changedFiles = await getChangedFiles(mergeBase);
162
+ const baseChangedFiles = await getBaseChangedFiles(mergeBase, baseBranch);
163
+ const potentialConflicts = detectConflicts(changedFiles, baseChangedFiles);
164
+ const output = {
165
+ baseBranch,
166
+ mergeBase: mergeBase.substring(0, 7),
167
+ source,
168
+ yourCommits,
169
+ baseAhead,
170
+ changedFiles,
171
+ potentialConflicts,
172
+ commands: {
173
+ diff: `git diff ${mergeBase.substring(0, 7)}..HEAD`,
174
+ log: `git log --oneline ${mergeBase.substring(0, 7)}..HEAD`
175
+ }
176
+ };
177
+ console.log(JSON.stringify(output, null, 2));
178
+ }
179
+ main().catch((e) => {
180
+ const result = {
181
+ error: `Unexpected error: ${e instanceof Error ? e.message : String(e)}`
182
+ };
183
+ console.log(JSON.stringify(result, null, 2));
184
+ process.exit(1);
185
+ });