@delegance/claude-autopilot 2.2.0 → 2.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,26 @@
1
1
  # Changelog
2
2
 
3
+ ## [2.4.0] — 2026-04-22
4
+
5
+ ### Added
6
+ - **`ignore:` config key** — embed suppression rules in `autopilot.config.yaml` via `ignore: ['tests/**', { rule: hardcoded-secrets, path: src/vendor/** }]`; merged with `.autopilot-ignore` file rules at run time
7
+ - **Per-run cost log** — appends `{timestamp, files, inputTokens, outputTokens, costUSD, durationMs}` to `.autopilot-cache/costs.jsonl` after every run; corrupt lines skipped on read; `readCostLog()` exported for tooling
8
+ - **`--inline-comments`** — posts a GitHub PR review with per-line inline comments for every finding that has a `file:line`; re-runs dismiss the previous autopilot review before posting a new one; `autopilot ci` enables this by default (`--no-inline-comments` to opt out)
9
+ - **`reviewStrategy: auto-diff`** — tries diff first, falls back to full-file `auto` when diff is empty (new files, no git history); `--diff` flag still forces pure diff mode
10
+ - `src/cli/pr-review-comments.ts` — `postReviewComments()` using `gh api repos/{nwo}/pulls/{pr}/reviews`
11
+ - `src/core/persist/cost-log.ts` — `appendCostLog()`, `readCostLog()`
12
+ - 9 new tests — **257 total**
13
+
14
+ ## [2.3.0] — 2026-04-22
15
+
16
+ ### Added
17
+ - **Parallel chunk review** — file-level chunks are now reviewed concurrently (default parallelism: 3, configurable via `chunking.parallelism`); serial fallback preserved when `cost.budgetUSD` is set so budget enforcement remains accurate
18
+ - **`.autopilot-ignore`** — project-level suppression file; format: `<rule-id> <glob>` or bare `<glob>` (matches any finding on that path); comments and blank lines ignored; suppressed count printed dim after run
19
+ - **`--delta` mode** — only reports findings new since the previous run; pre-existing findings are hidden and the count is printed dim; findings always persisted to `.autopilot-cache/findings.json` after each run (gitignored)
20
+ - `src/core/ignore/index.ts` — `loadIgnoreRules()`, `applyIgnoreRules()`
21
+ - `src/core/persist/findings-cache.ts` — `loadCachedFindings()`, `saveCachedFindings()`, `filterNewFindings()`
22
+ - 15 new tests — **248 total**
23
+
3
24
  ## [2.2.0] — 2026-04-22
4
25
 
5
26
  ### Added
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@delegance/claude-autopilot",
3
- "version": "2.2.0",
3
+ "version": "2.4.0",
4
4
  "type": "module",
5
5
  "description": "Claude Code automation pipeline: spec → plan → implement → validate → PR",
6
6
  "keywords": [
package/src/cli/ci.ts CHANGED
@@ -7,6 +7,7 @@ export interface CiCommandOptions {
7
7
  postComments?: boolean;
8
8
  sarifOutput?: string;
9
9
  diff?: boolean;
10
+ inlineComments?: boolean;
10
11
  }
11
12
 
12
13
  /**
@@ -36,5 +37,6 @@ export async function runCi(options: CiCommandOptions = {}): Promise<number> {
36
37
  format: 'sarif',
37
38
  outputPath: sarifOutput,
38
39
  diff: options.diff,
40
+ inlineComments: options.inlineComments ?? true,
39
41
  });
40
42
  }
package/src/cli/index.ts CHANGED
@@ -67,6 +67,8 @@ Options (run):
67
67
  --files <a,b,c> Explicit comma-separated file list (skips git detection)
68
68
  --dry-run Show what would run without executing
69
69
  --diff Send git diff hunks instead of full files (~70% fewer tokens)
70
+ --delta Only report findings new since last run (suppress pre-existing)
71
+ --inline-comments Post per-line review comments on the PR diff
70
72
  --post-comments Post/update a summary comment on the open PR
71
73
  --format <text|sarif> Output format (default: text)
72
74
  --output <path> Output file path (required with --format sarif)
@@ -122,6 +124,8 @@ switch (subcommand) {
122
124
  const filesArg = flag('files');
123
125
  const dryRun = boolFlag('dry-run');
124
126
  const diff = boolFlag('diff');
127
+ const delta = boolFlag('delta');
128
+ const inlineComments = boolFlag('inline-comments');
125
129
  const postComments = boolFlag('post-comments');
126
130
  const formatArg = flag('format');
127
131
  const outputPath = flag('output');
@@ -141,6 +145,8 @@ switch (subcommand) {
141
145
  files: filesArg ? filesArg.split(',').map(f => f.trim()) : undefined,
142
146
  dryRun,
143
147
  diff,
148
+ delta,
149
+ inlineComments,
144
150
  postComments,
145
151
  format: formatArg as 'text' | 'sarif' | undefined,
146
152
  outputPath,
@@ -154,12 +160,14 @@ switch (subcommand) {
154
160
  const config = flag('config');
155
161
  const outputPath = flag('output');
156
162
  const noPostComments = boolFlag('no-post-comments');
163
+ const noInlineComments = boolFlag('no-inline-comments');
157
164
  const diff = boolFlag('diff');
158
165
  const code = await runCi({
159
166
  configPath: config,
160
167
  base,
161
168
  sarifOutput: outputPath,
162
169
  postComments: noPostComments ? false : undefined,
170
+ inlineComments: noInlineComments ? false : undefined,
163
171
  diff,
164
172
  });
165
173
  process.exit(code);
@@ -0,0 +1,92 @@
1
+ import { runSafe } from '../core/shell.ts';
2
+ import type { Finding } from '../core/findings/types.ts';
3
+
4
+ const REVIEW_MARKER = '<!-- autopilot-inline -->';
5
+
6
+ function getRepoNwo(cwd: string): string | null {
7
+ const raw = runSafe('gh', ['repo', 'view', '--json', 'nameWithOwner', '--jq', '.nameWithOwner'], { cwd });
8
+ return raw ? raw.trim() : null;
9
+ }
10
+
11
+ /** True when a review with our marker already exists on this PR (avoids duplicates on re-runs). */
12
+ function findExistingReviewId(pr: number, nwo: string, cwd: string): number | null {
13
+ const raw = runSafe('gh', [
14
+ 'api', `repos/${nwo}/pulls/${pr}/reviews`,
15
+ '--jq', `[.[] | select(.body | startswith("${REVIEW_MARKER}")) | .id] | first`,
16
+ ], { cwd });
17
+ if (!raw) return null;
18
+ const n = parseInt(raw.trim(), 10);
19
+ return isNaN(n) ? null : n;
20
+ }
21
+
22
+ export interface PostReviewCommentsResult {
23
+ posted: number;
24
+ skipped: number; // findings with no line number
25
+ }
26
+
27
+ /**
28
+ * Posts (or re-submits) a PR review with inline comments for each finding
29
+ * that has a file + line number. Findings without line numbers are skipped.
30
+ * Re-runs dismiss the previous autopilot review first to avoid stacking.
31
+ */
32
+ export async function postReviewComments(
33
+ pr: number,
34
+ findings: Finding[],
35
+ cwd: string,
36
+ ): Promise<PostReviewCommentsResult> {
37
+ const nwo = getRepoNwo(cwd);
38
+ if (!nwo) throw new Error('Could not determine repository name — is gh authenticated?');
39
+
40
+ const commentable = findings.filter(
41
+ f => f.line !== undefined && f.file && f.file !== '<unspecified>' && f.file !== '<pipeline>',
42
+ );
43
+ const skipped = findings.length - commentable.length;
44
+
45
+ if (commentable.length === 0) return { posted: 0, skipped };
46
+
47
+ // Dismiss existing review so we don't stack on re-runs
48
+ const existingId = findExistingReviewId(pr, nwo, cwd);
49
+ if (existingId) {
50
+ runSafe('gh', [
51
+ 'api', `repos/${nwo}/pulls/${pr}/reviews/${existingId}/dismissals`,
52
+ '--method', 'PUT',
53
+ '--field', 'message=Superseded by updated autopilot review',
54
+ ], { cwd });
55
+ }
56
+
57
+ // Build review body
58
+ const body = [
59
+ REVIEW_MARKER,
60
+ `**Autopilot** found ${commentable.length} inline finding${commentable.length !== 1 ? 's' : ''}.`,
61
+ ].join('\n');
62
+
63
+ // Build comments array as JSON
64
+ const comments = commentable.map(f => ({
65
+ path: f.file,
66
+ line: f.line,
67
+ side: 'RIGHT',
68
+ body: formatFindingBody(f),
69
+ }));
70
+
71
+ // gh api doesn't support array fields well via --field, use --input with JSON
72
+ const payload = JSON.stringify({ body, event: 'COMMENT', comments });
73
+ const result = runSafe('gh', [
74
+ 'api', `repos/${nwo}/pulls/${pr}/reviews`,
75
+ '--method', 'POST',
76
+ '--input', '-',
77
+ ], { cwd, input: payload });
78
+
79
+ if (!result) throw new Error('Failed to post review — gh api returned no output');
80
+
81
+ return { posted: commentable.length, skipped };
82
+ }
83
+
84
+ function formatFindingBody(f: Finding): string {
85
+ const sev = f.severity === 'critical' ? '🚨 **CRITICAL**'
86
+ : f.severity === 'warning' ? '⚠️ **Warning**'
87
+ : '💡 **Note**';
88
+ const lines = [`${sev} — ${f.message}`];
89
+ if (f.suggestion) lines.push(`\n> **Suggestion:** ${f.suggestion}`);
90
+ lines.push(`\n*[@delegance/claude-autopilot](https://github.com/axledbetter/claude-autopilot)*`);
91
+ return lines.join('');
92
+ }
package/src/cli/run.ts CHANGED
@@ -38,6 +38,10 @@ import { detectProtectedPaths } from '../core/detect/protected-paths.ts';
38
38
  import { detectGitContext } from '../core/detect/git-context.ts';
39
39
  import { detectProject } from './detector.ts';
40
40
  import { detectPrNumber, formatComment, postPrComment } from './pr-comment.ts';
41
+ import { postReviewComments } from './pr-review-comments.ts';
42
+ import { loadIgnoreRules, parseConfigIgnore, applyIgnoreRules } from '../core/ignore/index.ts';
43
+ import { loadCachedFindings, saveCachedFindings, filterNewFindings } from '../core/persist/findings-cache.ts';
44
+ import { appendCostLog } from '../core/persist/cost-log.ts';
41
45
 
42
46
  function readToolVersion(): string {
43
47
  const pkgPath = path.join(path.dirname(fileURLToPath(import.meta.url)), '../../package.json');
@@ -64,7 +68,9 @@ export interface RunCommandOptions {
64
68
  base?: string; // git base ref (default HEAD~1)
65
69
  files?: string[]; // explicit file list (skips git detection)
66
70
  dryRun?: boolean; // skip review, print what would run
67
- diff?: boolean; // use diff strategy (send git hunks instead of full files)
71
+ diff?: boolean; // use diff strategy (send git hunks instead of full files)
72
+ delta?: boolean; // only report findings not present in last run's baseline
73
+ inlineComments?: boolean; // post per-line review comments on the PR diff
68
74
  format?: 'text' | 'sarif';
69
75
  outputPath?: string;
70
76
  postComments?: boolean; // post/update summary comment on the open PR
@@ -187,6 +193,47 @@ export async function runCommand(options: RunCommandOptions = {}): Promise<numbe
187
193
  console.log('');
188
194
  const result = await runAutopilot(input);
189
195
 
196
+ // Apply .autopilot-ignore + config ignore: rules
197
+ const ignoreRules = [...loadIgnoreRules(cwd), ...parseConfigIgnore(config.ignore)];
198
+ if (ignoreRules.length > 0) {
199
+ const before = result.allFindings.length;
200
+ result.allFindings = applyIgnoreRules(result.allFindings, ignoreRules);
201
+ for (const phase of result.phases) {
202
+ phase.findings = applyIgnoreRules(phase.findings, ignoreRules);
203
+ }
204
+ const suppressed = before - result.allFindings.length;
205
+ if (suppressed > 0) {
206
+ console.log(fmt('dim', ` [run] ${suppressed} finding${suppressed !== 1 ? 's' : ''} suppressed by .autopilot-ignore`));
207
+ }
208
+ }
209
+
210
+ // Delta mode: filter to only new findings vs last run's baseline, then persist
211
+ if (options.delta) {
212
+ const cached = loadCachedFindings(cwd);
213
+ const before = result.allFindings.length;
214
+ result.allFindings = filterNewFindings(result.allFindings, cached);
215
+ for (const phase of result.phases) {
216
+ phase.findings = filterNewFindings(phase.findings, cached);
217
+ }
218
+ const existing = before - result.allFindings.length;
219
+ if (existing > 0) {
220
+ console.log(fmt('dim', ` [run] ${existing} pre-existing finding${existing !== 1 ? 's' : ''} hidden (--delta mode)`));
221
+ }
222
+ }
223
+ // Always persist the unfiltered findings as the new baseline
224
+ saveCachedFindings(cwd, result.allFindings);
225
+
226
+ // Append to per-run cost log
227
+ const reviewPhase = result.phases.find(p => p.phase === 'review') as { usage?: { input: number; output: number } } | undefined;
228
+ appendCostLog(cwd, {
229
+ timestamp: new Date().toISOString(),
230
+ files: touchedFiles.length,
231
+ inputTokens: reviewPhase?.usage?.input ?? 0,
232
+ outputTokens: reviewPhase?.usage?.output ?? 0,
233
+ costUSD: result.totalCostUSD ?? 0,
234
+ durationMs: result.durationMs,
235
+ });
236
+
190
237
  // emitAnnotations is a no-op unless GITHUB_ACTIONS=true
191
238
  emitAnnotations(result.allFindings);
192
239
 
@@ -198,6 +245,21 @@ export async function runCommand(options: RunCommandOptions = {}): Promise<numbe
198
245
  console.log(fmt('dim', `[run] SARIF written to ${options.outputPath}`));
199
246
  }
200
247
 
248
+ // Post inline PR review comments if requested
249
+ if (options.inlineComments) {
250
+ const pr = detectPrNumber(cwd);
251
+ if (!pr) {
252
+ console.log(fmt('yellow', ' [run] --inline-comments: no open PR found — skipping'));
253
+ } else {
254
+ try {
255
+ const { posted, skipped } = await postReviewComments(pr, result.allFindings, cwd);
256
+ console.log(fmt('dim', ` [run] PR #${pr} inline review: ${posted} comment${posted !== 1 ? 's' : ''} posted${skipped > 0 ? `, ${skipped} skipped (no line number)` : ''}`));
257
+ } catch (err) {
258
+ console.error(fmt('yellow', ` [run] Failed to post inline comments: ${err instanceof Error ? err.message : String(err)}`));
259
+ }
260
+ }
261
+ }
262
+
201
263
  // Post PR comment if requested
202
264
  if (options.postComments) {
203
265
  const pr = detectPrNumber(cwd);
@@ -13,12 +13,12 @@ export interface ReviewChunk {
13
13
 
14
14
  export interface BuildChunksInput {
15
15
  touchedFiles: string[];
16
- strategy: 'auto' | 'single-pass' | 'file-level' | 'diff';
16
+ strategy: 'auto' | 'single-pass' | 'file-level' | 'diff' | 'auto-diff';
17
17
  chunking?: AutopilotConfig['chunking'];
18
18
  engine: ReviewEngine;
19
19
  cwd?: string;
20
20
  protectedPaths?: string[];
21
- base?: string; // git base ref — required for 'diff' strategy
21
+ base?: string; // git base ref — required for 'diff'/'auto-diff' strategy
22
22
  }
23
23
 
24
24
  const DEFAULT_SMALL_TIER_TOKENS = 8000;
@@ -33,6 +33,14 @@ export async function buildReviewChunks(input: BuildChunksInput): Promise<Review
33
33
  return buildDiffChunks(input);
34
34
  }
35
35
 
36
+ // auto-diff: try diff first; fall back to full-file auto if diff is empty
37
+ // (handles new files, initial commits, or repos with no base ref)
38
+ if (input.strategy === 'auto-diff') {
39
+ const diffChunks = buildDiffChunks(input);
40
+ if (diffChunks.length > 0) return diffChunks;
41
+ // fall through to auto with full files
42
+ }
43
+
36
44
  const ranked = rankByRisk(input.touchedFiles, { protectedPaths: input.protectedPaths });
37
45
  const fileContents = await readFiles(ranked, input.cwd);
38
46
 
@@ -27,7 +27,8 @@ export interface AutopilotConfig {
27
27
  maxCodexRetries?: number;
28
28
  maxBugbotRounds?: number;
29
29
  };
30
- reviewStrategy?: 'auto' | 'single-pass' | 'file-level' | 'diff';
30
+ ignore?: Array<string | { rule?: string; path: string }>;
31
+ reviewStrategy?: 'auto' | 'single-pass' | 'file-level' | 'diff' | 'auto-diff';
31
32
  chunking?: {
32
33
  smallTierMaxTokens?: number;
33
34
  partialReviewTokens?: number;
@@ -0,0 +1,54 @@
1
+ import * as fs from 'node:fs';
2
+ import * as path from 'node:path';
3
+ import { minimatch } from 'minimatch';
4
+ import type { Finding } from '../findings/types.ts';
5
+ import type { AutopilotConfig } from '../config/types.ts';
6
+
7
+ export interface IgnoreRule {
8
+ ruleId: string | '*'; // finding id prefix or '*' for any
9
+ pathGlob: string | null; // null = match all paths
10
+ }
11
+
12
+ export function loadIgnoreRules(cwd: string): IgnoreRule[] {
13
+ const filePath = path.join(cwd, '.autopilot-ignore');
14
+ if (!fs.existsSync(filePath)) return [];
15
+
16
+ const rules: IgnoreRule[] = [];
17
+ for (const raw of fs.readFileSync(filePath, 'utf8').split('\n')) {
18
+ const line = raw.trim();
19
+ if (!line || line.startsWith('#')) continue;
20
+
21
+ const parts = line.split(/\s+/);
22
+ if (parts.length === 1) {
23
+ // bare glob — suppress any finding whose file matches
24
+ rules.push({ ruleId: '*', pathGlob: parts[0]! });
25
+ } else {
26
+ // <rule-id-or-*> <path-glob>
27
+ rules.push({ ruleId: parts[0]!, pathGlob: parts[1]! });
28
+ }
29
+ }
30
+ return rules;
31
+ }
32
+
33
+ function matchesRule(finding: Finding, rule: IgnoreRule): boolean {
34
+ const ruleMatches = rule.ruleId === '*' || finding.id.startsWith(rule.ruleId);
35
+ if (!ruleMatches) return false;
36
+ if (rule.pathGlob === null) return true;
37
+ return minimatch(finding.file.replace(/\\/g, '/'), rule.pathGlob, { matchBase: true });
38
+ }
39
+
40
+ /** Convert `ignore:` entries from autopilot.config.yaml into IgnoreRules. */
41
+ export function parseConfigIgnore(entries: AutopilotConfig['ignore']): IgnoreRule[] {
42
+ if (!entries || entries.length === 0) return [];
43
+ return entries.map(entry => {
44
+ if (typeof entry === 'string') {
45
+ return { ruleId: '*', pathGlob: entry };
46
+ }
47
+ return { ruleId: entry.rule ?? '*', pathGlob: entry.path };
48
+ });
49
+ }
50
+
51
+ export function applyIgnoreRules(findings: Finding[], rules: IgnoreRule[]): Finding[] {
52
+ if (rules.length === 0) return findings;
53
+ return findings.filter(f => !rules.some(r => matchesRule(f, r)));
54
+ }
@@ -0,0 +1,30 @@
1
+ import * as fs from 'node:fs';
2
+ import * as path from 'node:path';
3
+
4
+ const CACHE_DIR = '.autopilot-cache';
5
+ const LOG_FILE = 'costs.jsonl';
6
+
7
+ export interface CostLogEntry {
8
+ timestamp: string;
9
+ files: number;
10
+ inputTokens: number;
11
+ outputTokens: number;
12
+ costUSD: number;
13
+ durationMs: number;
14
+ }
15
+
16
+ export function appendCostLog(cwd: string, entry: CostLogEntry): void {
17
+ const dir = path.join(cwd, CACHE_DIR);
18
+ fs.mkdirSync(dir, { recursive: true });
19
+ fs.appendFileSync(path.join(dir, LOG_FILE), JSON.stringify(entry) + '\n', 'utf8');
20
+ }
21
+
22
+ export function readCostLog(cwd: string): CostLogEntry[] {
23
+ const p = path.join(cwd, CACHE_DIR, LOG_FILE);
24
+ if (!fs.existsSync(p)) return [];
25
+ return fs.readFileSync(p, 'utf8')
26
+ .split('\n')
27
+ .filter(Boolean)
28
+ .map(line => { try { return JSON.parse(line) as CostLogEntry; } catch { return null; } })
29
+ .filter((e): e is CostLogEntry => e !== null);
30
+ }
@@ -0,0 +1,43 @@
1
+ import * as fs from 'node:fs';
2
+ import * as path from 'node:path';
3
+ import type { Finding } from '../findings/types.ts';
4
+
5
+ const CACHE_DIR = '.autopilot-cache';
6
+ const CACHE_FILE = 'findings.json';
7
+
8
+ function cacheFilePath(cwd: string): string {
9
+ return path.join(cwd, CACHE_DIR, CACHE_FILE);
10
+ }
11
+
12
+ function findingKey(f: Finding): string {
13
+ return `${f.id}::${f.file}::${f.line ?? ''}`;
14
+ }
15
+
16
+ export function loadCachedFindings(cwd: string): Finding[] {
17
+ const p = cacheFilePath(cwd);
18
+ if (!fs.existsSync(p)) return [];
19
+ try {
20
+ return JSON.parse(fs.readFileSync(p, 'utf8')) as Finding[];
21
+ } catch {
22
+ return [];
23
+ }
24
+ }
25
+
26
+ export function saveCachedFindings(cwd: string, findings: Finding[]): void {
27
+ const dir = path.join(cwd, CACHE_DIR);
28
+ fs.mkdirSync(dir, { recursive: true });
29
+ // atomic write
30
+ const tmp = cacheFilePath(cwd) + '.tmp';
31
+ fs.writeFileSync(tmp, JSON.stringify(findings, null, 2), 'utf8');
32
+ fs.renameSync(tmp, cacheFilePath(cwd));
33
+ }
34
+
35
+ /**
36
+ * Returns only findings not present in the cached baseline.
37
+ * Two findings are considered the same when id + file + line all match.
38
+ */
39
+ export function filterNewFindings(current: Finding[], cached: Finding[]): Finding[] {
40
+ if (cached.length === 0) return current;
41
+ const seen = new Set(cached.map(findingKey));
42
+ return current.filter(f => !seen.has(findingKey(f)));
43
+ }
@@ -1,7 +1,7 @@
1
1
  import type { ReviewEngine } from '../../adapters/review-engine/types.ts';
2
2
  import type { Finding } from '../findings/types.ts';
3
3
  import type { AutopilotConfig } from '../config/types.ts';
4
- import { buildReviewChunks } from '../chunking/index.ts';
4
+ import { buildReviewChunks, type ReviewChunk } from '../chunking/index.ts';
5
5
 
6
6
  export interface ReviewPhaseResult {
7
7
  phase: 'review';
@@ -22,6 +22,48 @@ export interface ReviewPhaseInput {
22
22
  base?: string;
23
23
  }
24
24
 
25
+ interface ChunkResult {
26
+ findings: Finding[];
27
+ inputTokens: number;
28
+ outputTokens: number;
29
+ costUSD: number;
30
+ }
31
+
32
+ async function reviewChunk(chunk: ReviewChunk, input: ReviewPhaseInput): Promise<ChunkResult> {
33
+ const output = await input.engine.review({
34
+ content: chunk.content,
35
+ kind: chunk.kind,
36
+ context: { stack: input.config.stack, cwd: input.cwd, gitSummary: input.gitSummary },
37
+ });
38
+ return {
39
+ findings: output.findings,
40
+ inputTokens: output.usage?.input ?? 0,
41
+ outputTokens: output.usage?.output ?? 0,
42
+ costUSD: output.usage?.costUSD ?? 0,
43
+ };
44
+ }
45
+
46
+ /** Run up to `limit` promises concurrently, preserving result order. */
47
+ async function pMap<T, R>(
48
+ items: T[],
49
+ fn: (item: T, index: number) => Promise<R>,
50
+ limit: number,
51
+ ): Promise<R[]> {
52
+ const results: R[] = new Array(items.length);
53
+ let next = 0;
54
+
55
+ async function worker(): Promise<void> {
56
+ while (next < items.length) {
57
+ const i = next++;
58
+ results[i] = await fn(items[i]!, i);
59
+ }
60
+ }
61
+
62
+ const workers = Array.from({ length: Math.min(limit, items.length) }, () => worker());
63
+ await Promise.all(workers);
64
+ return results;
65
+ }
66
+
25
67
  export async function runReviewPhase(input: ReviewPhaseInput): Promise<ReviewPhaseResult> {
26
68
  const start = Date.now();
27
69
 
@@ -39,41 +81,51 @@ export async function runReviewPhase(input: ReviewPhaseInput): Promise<ReviewPha
39
81
  base: input.base,
40
82
  });
41
83
 
42
- const allFindings: Finding[] = [];
43
- let totalInputTokens = 0;
44
- let totalOutputTokens = 0;
45
- let totalCostUSD = 0;
46
- let budgetExceeded = false;
84
+ const parallelism = input.config.chunking?.parallelism ?? 3;
85
+ const budgetUSD = input.budgetRemainingUSD;
47
86
 
48
- for (const chunk of chunks) {
49
- if (input.budgetRemainingUSD !== undefined && totalCostUSD >= input.budgetRemainingUSD) {
50
- budgetExceeded = true;
51
- break;
87
+ // For budget tracking we still need to enforce it — run serially if budget set,
88
+ // parallel otherwise (budget check between serial chunks is the safe path).
89
+ let chunkResults: ChunkResult[];
90
+ if (budgetUSD !== undefined) {
91
+ chunkResults = [];
92
+ let spent = 0;
93
+ let budgetExceeded = false;
94
+ for (const chunk of chunks) {
95
+ if (spent >= budgetUSD) { budgetExceeded = true; break; }
96
+ const r = await reviewChunk(chunk, input);
97
+ spent += r.costUSD;
98
+ chunkResults.push(r);
52
99
  }
53
- const output = await input.engine.review({
54
- content: chunk.content,
55
- kind: chunk.kind,
56
- context: { stack: input.config.stack, cwd: input.cwd, gitSummary: input.gitSummary },
57
- });
58
- allFindings.push(...output.findings);
59
- if (output.usage) {
60
- totalInputTokens += output.usage.input;
61
- totalOutputTokens += output.usage.output;
62
- if (output.usage.costUSD !== undefined) totalCostUSD += output.usage.costUSD;
100
+ if (budgetExceeded) {
101
+ chunkResults.push({
102
+ findings: [{
103
+ id: 'budget-exceeded',
104
+ source: 'pipeline',
105
+ severity: 'warning',
106
+ category: 'budget',
107
+ file: '<pipeline>',
108
+ message: `Review budget of $${budgetUSD} USD exceeded — remaining chunks skipped`,
109
+ protectedPath: false,
110
+ createdAt: new Date().toISOString(),
111
+ }],
112
+ inputTokens: 0, outputTokens: 0, costUSD: 0,
113
+ });
63
114
  }
115
+ } else {
116
+ chunkResults = await pMap(chunks, chunk => reviewChunk(chunk, input), parallelism);
64
117
  }
65
118
 
66
- if (budgetExceeded) {
67
- allFindings.push({
68
- id: 'budget-exceeded',
69
- source: 'pipeline',
70
- severity: 'warning',
71
- category: 'budget',
72
- file: '<pipeline>',
73
- message: `Review budget of $${input.budgetRemainingUSD} USD exceeded — remaining chunks skipped`,
74
- protectedPath: false,
75
- createdAt: new Date().toISOString(),
76
- });
119
+ let totalInputTokens = 0;
120
+ let totalOutputTokens = 0;
121
+ let totalCostUSD = 0;
122
+ const allFindings: Finding[] = [];
123
+
124
+ for (const r of chunkResults) {
125
+ allFindings.push(...r.findings);
126
+ totalInputTokens += r.inputTokens;
127
+ totalOutputTokens += r.outputTokens;
128
+ totalCostUSD += r.costUSD;
77
129
  }
78
130
 
79
131
  const hasCritical = allFindings.some(f => f.severity === 'critical');