specvector 0.3.3 → 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/pipeline/batcher.ts +543 -0
- package/src/pipeline/classifier.ts +361 -0
- package/src/pipeline/index.ts +34 -0
- package/src/pipeline/merger.ts +329 -0
- package/src/review/engine.ts +5 -4
- package/src/review/json-parser.ts +283 -0
package/package.json
CHANGED
|
@@ -0,0 +1,543 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Batched Parallel Review Dispatcher for the Scalable Review Pipeline.
|
|
3
|
+
*
|
|
4
|
+
* Dispatches classified files to the appropriate review path:
|
|
5
|
+
* - FAST_PASS: batched (max 10), single LLM call per batch, no tools
|
|
6
|
+
* - DEEP_DIVE: individual, full agent loop with codebase exploration tools
|
|
7
|
+
* - SKIP: excluded entirely
|
|
8
|
+
*
|
|
9
|
+
* All batches execute in parallel via Promise.allSettled.
|
|
10
|
+
* Individual failures do not block other batches.
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import type { ClassifiedFile, ClassificationResult } from "./classifier";
|
|
14
|
+
import type { ReviewFinding, ContextSource } from "../types/review";
|
|
15
|
+
import type { DiffFile } from "../types/diff";
|
|
16
|
+
import type { ReviewConfig } from "../review/engine";
|
|
17
|
+
import { runReview } from "../review/engine";
|
|
18
|
+
import { parseReviewResponseWithFallback, REVIEW_JSON_INSTRUCTION } from "../review/json-parser";
|
|
19
|
+
import { createProvider } from "../llm";
|
|
20
|
+
import type { LLMProvider } from "../llm/provider";
|
|
21
|
+
import { withRetry } from "../llm/provider";
|
|
22
|
+
import { loadConfig, getStrictnessModifier } from "../config";
|
|
23
|
+
import { getLinearContextForReview, getADRContextForReview } from "../context";
|
|
24
|
+
|
|
25
|
+
// ---------------------------------------------------------------------------
|
|
26
|
+
// Types
|
|
27
|
+
// ---------------------------------------------------------------------------
|
|
28
|
+
|
|
29
|
+
/** Configuration for batch sizes and concurrency. */
|
|
30
|
+
export interface BatchConfig {
|
|
31
|
+
/** Maximum files per FAST_PASS batch (default: 10) */
|
|
32
|
+
maxBatchSize: number;
|
|
33
|
+
/** Maximum concurrent DEEP_DIVE reviews (default: 5) */
|
|
34
|
+
maxConcurrentDeepDives: number;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/** Error from a single batch or file review. */
|
|
38
|
+
export interface BatchError {
|
|
39
|
+
/** Label identifying the batch (e.g., "fast-pass-1", "deep-dive:src/auth/login.ts") */
|
|
40
|
+
batch: string;
|
|
41
|
+
/** Error message */
|
|
42
|
+
message: string;
|
|
43
|
+
/** Files affected by this error */
|
|
44
|
+
filesAffected: string[];
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/** Result of the entire batched review pipeline stage. */
|
|
48
|
+
export interface BatchResult {
|
|
49
|
+
/** All findings aggregated from all batches */
|
|
50
|
+
findings: ReviewFinding[];
|
|
51
|
+
/** Errors from failed batches (other batches still completed) */
|
|
52
|
+
errors: BatchError[];
|
|
53
|
+
/** Timing breakdown in milliseconds */
|
|
54
|
+
timing: {
|
|
55
|
+
totalMs: number;
|
|
56
|
+
fastPassMs: number;
|
|
57
|
+
deepDiveMs: number;
|
|
58
|
+
};
|
|
59
|
+
/** Context sources used across all reviews */
|
|
60
|
+
contextSources: ContextSource[];
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
// ---------------------------------------------------------------------------
|
|
64
|
+
// Constants
|
|
65
|
+
// ---------------------------------------------------------------------------
|
|
66
|
+
|
|
67
|
+
const DEFAULT_BATCH_CONFIG: Required<BatchConfig> = {
|
|
68
|
+
maxBatchSize: 10,
|
|
69
|
+
maxConcurrentDeepDives: 5,
|
|
70
|
+
};
|
|
71
|
+
|
|
72
|
+
/** Maximum total characters for the multi-file prompt. */
|
|
73
|
+
const MAX_FAST_PASS_PROMPT_CHARS = 15_000;
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* System prompt for FAST_PASS reviews.
|
|
77
|
+
* Same review quality expectations as the full prompt, but without tool-use
|
|
78
|
+
* instructions since FAST_PASS reviews have no codebase exploration tools.
|
|
79
|
+
*/
|
|
80
|
+
export const FAST_PASS_SYSTEM_PROMPT = `You are a pragmatic code reviewer. Your job is to catch REAL problems, not nitpick.
|
|
81
|
+
|
|
82
|
+
## What to Look For (in priority order)
|
|
83
|
+
1. **CRITICAL**: Security vulnerabilities, data loss, crashes
|
|
84
|
+
2. **HIGH**: Bugs that WILL break functionality in production
|
|
85
|
+
3. **MEDIUM**: Significant code quality issues (not style nits)
|
|
86
|
+
|
|
87
|
+
## Business Logic Patterns to Detect
|
|
88
|
+
Focus on real logic errors that cause incorrect behavior:
|
|
89
|
+
- **Off-by-one errors**: Wrong boundary conditions, < vs <=, array index issues
|
|
90
|
+
- **Null/undefined handling**: Missing null checks on values that can be null
|
|
91
|
+
- **Race conditions**: Shared state without synchronization, async ordering bugs
|
|
92
|
+
- **Incorrect boolean logic**: Inverted conditions, wrong operator (AND vs OR)
|
|
93
|
+
- **Missing error paths**: Happy-path-only code that ignores failure cases in data flows
|
|
94
|
+
- **Wrong operator**: Using = instead of ==, + instead of -, incorrect comparisons
|
|
95
|
+
- **State management bugs**: Mutating shared state, stale closures, incorrect resets
|
|
96
|
+
- **Type coercion issues**: Implicit conversions causing unexpected behavior
|
|
97
|
+
|
|
98
|
+
## What NOT to Flag
|
|
99
|
+
- Style preferences or "I would do it differently"
|
|
100
|
+
- Theoretical performance issues without evidence
|
|
101
|
+
- Missing edge case tests for working code
|
|
102
|
+
- "Could be refactored" suggestions
|
|
103
|
+
- Code that works but isn't perfect
|
|
104
|
+
- Naming convention preferences
|
|
105
|
+
- Comment formatting or missing comments
|
|
106
|
+
- Import ordering or grouping
|
|
107
|
+
|
|
108
|
+
## Key Principle
|
|
109
|
+
Most PRs should have 0-2 findings. If you're finding 5+ issues, you're being too picky.
|
|
110
|
+
Only flag issues you'd actually block a PR for in a real code review.
|
|
111
|
+
|
|
112
|
+
## Response Format
|
|
113
|
+
SUMMARY: [1-2 sentences - is this code ready to merge?]
|
|
114
|
+
|
|
115
|
+
FINDINGS:
|
|
116
|
+
- [CRITICAL|HIGH|MEDIUM] [Category]: [Title]
|
|
117
|
+
[Brief description + how to fix]
|
|
118
|
+
File: [filename]
|
|
119
|
+
|
|
120
|
+
If the code is ready to merge, respond with:
|
|
121
|
+
SUMMARY: [Positive assessment]
|
|
122
|
+
FINDINGS: None
|
|
123
|
+
|
|
124
|
+
Maximum 3 findings per file batch. Focus on what matters.`;
|
|
125
|
+
|
|
126
|
+
// ---------------------------------------------------------------------------
|
|
127
|
+
// Main Entry Point
|
|
128
|
+
// ---------------------------------------------------------------------------
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Run batched parallel reviews on classified files.
|
|
132
|
+
*
|
|
133
|
+
* FAST_PASS files → batched, single LLM call per batch, no tools.
|
|
134
|
+
* DEEP_DIVE files → individual, full agent loop with codebase tools.
|
|
135
|
+
* SKIP files → excluded.
|
|
136
|
+
* All batches execute in parallel; individual failures don't block others.
|
|
137
|
+
*/
|
|
138
|
+
export async function runBatchedReviews(
|
|
139
|
+
classification: ClassificationResult,
|
|
140
|
+
config: ReviewConfig,
|
|
141
|
+
batchConfig?: Partial<BatchConfig>,
|
|
142
|
+
): Promise<BatchResult> {
|
|
143
|
+
const totalStart = Date.now();
|
|
144
|
+
const cfg: Required<BatchConfig> = { ...DEFAULT_BATCH_CONFIG, ...batchConfig };
|
|
145
|
+
|
|
146
|
+
// Separate files by risk level
|
|
147
|
+
const fastPassFiles = classification.files.filter((f) => f.risk === "FAST_PASS");
|
|
148
|
+
const deepDiveFiles = classification.files.filter((f) => f.risk === "DEEP_DIVE");
|
|
149
|
+
|
|
150
|
+
// Early return if nothing to review
|
|
151
|
+
if (fastPassFiles.length === 0 && deepDiveFiles.length === 0) {
|
|
152
|
+
console.log(`📊 Nothing to review (${classification.counts.skip} files skipped)`);
|
|
153
|
+
return {
|
|
154
|
+
findings: [],
|
|
155
|
+
errors: [],
|
|
156
|
+
timing: { totalMs: 0, fastPassMs: 0, deepDiveMs: 0 },
|
|
157
|
+
contextSources: [],
|
|
158
|
+
};
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
console.log(
|
|
162
|
+
`🚀 Dispatching: ${fastPassFiles.length} FAST_PASS, ${deepDiveFiles.length} DEEP_DIVE (${classification.counts.skip} skipped)`,
|
|
163
|
+
);
|
|
164
|
+
|
|
165
|
+
// --- Setup shared resources ---
|
|
166
|
+
const fileConfig = await loadConfig(config.workingDir);
|
|
167
|
+
const providerName = config.provider || fileConfig.provider || "openrouter";
|
|
168
|
+
const model = config.model || fileConfig.model || "anthropic/claude-sonnet-4.5";
|
|
169
|
+
const strictness = fileConfig.strictness || "normal";
|
|
170
|
+
|
|
171
|
+
// Fetch external context once
|
|
172
|
+
const contextSources: ContextSource[] = [];
|
|
173
|
+
let contextPrefix = "";
|
|
174
|
+
|
|
175
|
+
const linearResult = await getLinearContextForReview(
|
|
176
|
+
config.branchName,
|
|
177
|
+
config.prTitle,
|
|
178
|
+
config.prBody,
|
|
179
|
+
);
|
|
180
|
+
if (linearResult.context) {
|
|
181
|
+
contextPrefix += linearResult.context + "\n\n";
|
|
182
|
+
if (linearResult.ticketId) {
|
|
183
|
+
contextSources.push({
|
|
184
|
+
type: "linear",
|
|
185
|
+
id: linearResult.ticketId,
|
|
186
|
+
title: linearResult.ticketTitle,
|
|
187
|
+
url: linearResult.ticketUrl,
|
|
188
|
+
});
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
const adrResult = await getADRContextForReview(config.workingDir, fileConfig.adrPath);
|
|
193
|
+
if (adrResult) {
|
|
194
|
+
contextPrefix += adrResult.formatted + "\n\n";
|
|
195
|
+
for (const adrFile of adrResult.context.files) {
|
|
196
|
+
contextSources.push({
|
|
197
|
+
type: "adr",
|
|
198
|
+
id: adrFile.name,
|
|
199
|
+
title: adrFile.name.replace(".md", ""),
|
|
200
|
+
});
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// Build FAST_PASS system prompt
|
|
205
|
+
const strictnessGuidance = getStrictnessModifier(strictness);
|
|
206
|
+
const fastPassSystemPrompt =
|
|
207
|
+
contextPrefix +
|
|
208
|
+
FAST_PASS_SYSTEM_PROMPT +
|
|
209
|
+
REVIEW_JSON_INSTRUCTION +
|
|
210
|
+
`\n\n## Strictness Setting: ${strictness.toUpperCase()}\n${strictnessGuidance}`;
|
|
211
|
+
|
|
212
|
+
// --- Build all review promises ---
|
|
213
|
+
|
|
214
|
+
interface WorkItem {
|
|
215
|
+
label: string;
|
|
216
|
+
type: "fast_pass" | "deep_dive";
|
|
217
|
+
files: string[];
|
|
218
|
+
promise: Promise<TimedResult<ReviewFinding[]>>;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
const work: WorkItem[] = [];
|
|
222
|
+
|
|
223
|
+
// Create shared provider for FAST_PASS batches
|
|
224
|
+
let sharedProvider: LLMProvider | null = null;
|
|
225
|
+
if (fastPassFiles.length > 0) {
|
|
226
|
+
const providerResult = createProvider({
|
|
227
|
+
provider: providerName,
|
|
228
|
+
model,
|
|
229
|
+
apiKey: process.env.OPENROUTER_API_KEY,
|
|
230
|
+
});
|
|
231
|
+
if (providerResult.ok) {
|
|
232
|
+
sharedProvider = providerResult.value;
|
|
233
|
+
} else {
|
|
234
|
+
console.error(`❌ Failed to create provider for FAST_PASS: ${providerResult.error.message}`);
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// FAST_PASS batch promises
|
|
239
|
+
const fastPassBatches = splitIntoBatches(fastPassFiles, cfg.maxBatchSize);
|
|
240
|
+
for (let i = 0; i < fastPassBatches.length; i++) {
|
|
241
|
+
const batch = fastPassBatches[i]!;
|
|
242
|
+
const label = `fast-pass-${i + 1}`;
|
|
243
|
+
const files = batch.map((f) => f.path);
|
|
244
|
+
|
|
245
|
+
if (sharedProvider) {
|
|
246
|
+
work.push({
|
|
247
|
+
label,
|
|
248
|
+
type: "fast_pass",
|
|
249
|
+
files,
|
|
250
|
+
promise: timed(() => reviewFastPassBatch(batch, sharedProvider!, fastPassSystemPrompt)),
|
|
251
|
+
});
|
|
252
|
+
} else {
|
|
253
|
+
work.push({
|
|
254
|
+
label,
|
|
255
|
+
type: "fast_pass",
|
|
256
|
+
files,
|
|
257
|
+
promise: Promise.reject(new Error("LLM provider creation failed")),
|
|
258
|
+
});
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
// DEEP_DIVE individual promises — concurrency-limited
|
|
263
|
+
const deepDivePromises = runWithConcurrencyLimit(
|
|
264
|
+
deepDiveFiles,
|
|
265
|
+
cfg.maxConcurrentDeepDives,
|
|
266
|
+
(file) => reviewDeepDiveFile(file, config),
|
|
267
|
+
);
|
|
268
|
+
for (let i = 0; i < deepDiveFiles.length; i++) {
|
|
269
|
+
work.push({
|
|
270
|
+
label: `deep-dive:${deepDiveFiles[i]!.path}`,
|
|
271
|
+
type: "deep_dive",
|
|
272
|
+
files: [deepDiveFiles[i]!.path],
|
|
273
|
+
promise: timed(() => deepDivePromises[i]!),
|
|
274
|
+
});
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
// --- Execute all in parallel ---
|
|
278
|
+
const settled = await Promise.allSettled(work.map((w) => w.promise));
|
|
279
|
+
|
|
280
|
+
// --- Collect results ---
|
|
281
|
+
const findings: ReviewFinding[] = [];
|
|
282
|
+
const errors: BatchError[] = [];
|
|
283
|
+
let maxFastPassMs = 0;
|
|
284
|
+
let maxDeepDiveMs = 0;
|
|
285
|
+
|
|
286
|
+
for (let i = 0; i < settled.length; i++) {
|
|
287
|
+
const result = settled[i]!;
|
|
288
|
+
const item = work[i]!;
|
|
289
|
+
|
|
290
|
+
if (result.status === "fulfilled") {
|
|
291
|
+
findings.push(...result.value.result);
|
|
292
|
+
if (item.type === "fast_pass") {
|
|
293
|
+
maxFastPassMs = Math.max(maxFastPassMs, result.value.durationMs);
|
|
294
|
+
} else {
|
|
295
|
+
maxDeepDiveMs = Math.max(maxDeepDiveMs, result.value.durationMs);
|
|
296
|
+
}
|
|
297
|
+
} else {
|
|
298
|
+
errors.push({
|
|
299
|
+
batch: item.label,
|
|
300
|
+
message: result.reason instanceof Error ? result.reason.message : String(result.reason),
|
|
301
|
+
filesAffected: item.files,
|
|
302
|
+
});
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
const totalMs = Date.now() - totalStart;
|
|
307
|
+
|
|
308
|
+
console.log(
|
|
309
|
+
`✅ Batched review complete: ${findings.length} findings, ${errors.length} errors in ${totalMs}ms`,
|
|
310
|
+
);
|
|
311
|
+
|
|
312
|
+
return {
|
|
313
|
+
findings,
|
|
314
|
+
errors,
|
|
315
|
+
timing: { totalMs, fastPassMs: maxFastPassMs, deepDiveMs: maxDeepDiveMs },
|
|
316
|
+
contextSources,
|
|
317
|
+
};
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
// ---------------------------------------------------------------------------
|
|
321
|
+
// FAST_PASS Review
|
|
322
|
+
// ---------------------------------------------------------------------------
|
|
323
|
+
|
|
324
|
+
/**
|
|
325
|
+
* Review a batch of FAST_PASS files with a single LLM call (no agent loop).
|
|
326
|
+
*/
|
|
327
|
+
export async function reviewFastPassBatch(
|
|
328
|
+
files: ClassifiedFile[],
|
|
329
|
+
provider: LLMProvider,
|
|
330
|
+
systemPrompt: string,
|
|
331
|
+
): Promise<ReviewFinding[]> {
|
|
332
|
+
const task = buildFastPassTask(files);
|
|
333
|
+
|
|
334
|
+
const result = await withRetry(
|
|
335
|
+
() =>
|
|
336
|
+
provider.chat(
|
|
337
|
+
[
|
|
338
|
+
{ role: "system", content: systemPrompt },
|
|
339
|
+
{ role: "user", content: task },
|
|
340
|
+
],
|
|
341
|
+
{ temperature: 0.2 },
|
|
342
|
+
),
|
|
343
|
+
{ maxRetries: 1, delayMs: 2000 },
|
|
344
|
+
);
|
|
345
|
+
|
|
346
|
+
if (!result.ok) {
|
|
347
|
+
throw new Error(`FAST_PASS LLM call failed: ${result.error.message}`);
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
const response = result.value.content ?? "";
|
|
351
|
+
const summary = `${files.length} files changed`;
|
|
352
|
+
const parsed = parseReviewResponseWithFallback(response, summary);
|
|
353
|
+
|
|
354
|
+
// Cap findings to prevent noisy FAST_PASS batches
|
|
355
|
+
return parsed.findings.slice(0, MAX_FAST_PASS_FINDINGS);
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
/** Maximum findings per FAST_PASS batch (matches prompt instruction). */
|
|
359
|
+
const MAX_FAST_PASS_FINDINGS = 3;
|
|
360
|
+
|
|
361
|
+
// ---------------------------------------------------------------------------
|
|
362
|
+
// DEEP_DIVE Review
|
|
363
|
+
// ---------------------------------------------------------------------------
|
|
364
|
+
|
|
365
|
+
/**
|
|
366
|
+
* Review a single DEEP_DIVE file with the full agent loop.
|
|
367
|
+
*/
|
|
368
|
+
export async function reviewDeepDiveFile(
|
|
369
|
+
file: ClassifiedFile,
|
|
370
|
+
config: ReviewConfig,
|
|
371
|
+
): Promise<ReviewFinding[]> {
|
|
372
|
+
const diff = reconstructFileDiff(file.diffFile);
|
|
373
|
+
const summary = buildFileSummary(file);
|
|
374
|
+
|
|
375
|
+
const result = await runReview(diff, summary, config);
|
|
376
|
+
|
|
377
|
+
if (!result.ok) {
|
|
378
|
+
throw new Error(`DEEP_DIVE review failed for ${file.path}: ${result.error.message}`);
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
return result.value.findings;
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
// ---------------------------------------------------------------------------
|
|
385
|
+
// Prompt Builders
|
|
386
|
+
// ---------------------------------------------------------------------------
|
|
387
|
+
|
|
388
|
+
/**
|
|
389
|
+
* Build the multi-file review task for a FAST_PASS batch.
|
|
390
|
+
* Includes each file's diff with path headers, truncating if needed.
|
|
391
|
+
*/
|
|
392
|
+
export function buildFastPassTask(files: ClassifiedFile[]): string {
|
|
393
|
+
const header = `Please review these ${files.length} files in a single pass.\n\n`;
|
|
394
|
+
const footer =
|
|
395
|
+
"\n\n## Instructions\n" +
|
|
396
|
+
"Review each file for real issues. " +
|
|
397
|
+
"Report findings with the file path in the File: field.\n";
|
|
398
|
+
|
|
399
|
+
// Per-file overhead: header lines (~80 chars) + code fences (~15 chars) + separator (~2 chars)
|
|
400
|
+
const PER_FILE_OVERHEAD = 100;
|
|
401
|
+
const totalOverhead = files.length * PER_FILE_OVERHEAD;
|
|
402
|
+
const availableChars = MAX_FAST_PASS_PROMPT_CHARS - header.length - footer.length - totalOverhead;
|
|
403
|
+
const perFileLimit = Math.floor(Math.max(availableChars, 0) / Math.max(files.length, 1));
|
|
404
|
+
|
|
405
|
+
const sections: string[] = [];
|
|
406
|
+
|
|
407
|
+
for (const file of files) {
|
|
408
|
+
const diff = reconstructFileDiff(file.diffFile);
|
|
409
|
+
const truncatedDiff =
|
|
410
|
+
diff.length > perFileLimit ? diff.slice(0, perFileLimit) + "\n(truncated)" : diff;
|
|
411
|
+
|
|
412
|
+
sections.push(
|
|
413
|
+
`### File: ${file.path}\n` +
|
|
414
|
+
`Status: ${file.diffFile.status} (+${file.diffFile.additions}/-${file.diffFile.deletions})\n\n` +
|
|
415
|
+
"```diff\n" +
|
|
416
|
+
truncatedDiff +
|
|
417
|
+
"\n```",
|
|
418
|
+
);
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
return header + sections.join("\n\n") + footer;
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
// ---------------------------------------------------------------------------
|
|
425
|
+
// Helpers
|
|
426
|
+
// ---------------------------------------------------------------------------
|
|
427
|
+
|
|
428
|
+
/**
|
|
429
|
+
* Reconstruct raw diff text from a parsed DiffFile.
|
|
430
|
+
*/
|
|
431
|
+
export function reconstructFileDiff(file: DiffFile): string {
|
|
432
|
+
const resolvedOldPath = file.oldPath ?? "unknown";
|
|
433
|
+
const resolvedNewPath = file.newPath ?? "unknown";
|
|
434
|
+
|
|
435
|
+
// For git header, use /dev/null for the missing side
|
|
436
|
+
const headerA = file.status === "added" ? "/dev/null" : `a/${resolvedOldPath}`;
|
|
437
|
+
const headerB = file.status === "deleted" ? "/dev/null" : `b/${resolvedNewPath}`;
|
|
438
|
+
|
|
439
|
+
const lines: string[] = [];
|
|
440
|
+
lines.push(`diff --git ${headerA} ${headerB}`);
|
|
441
|
+
|
|
442
|
+
if (file.status === "added") {
|
|
443
|
+
lines.push("new file mode 100644");
|
|
444
|
+
} else if (file.status === "deleted") {
|
|
445
|
+
lines.push("deleted file mode 100644");
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
lines.push(`--- ${file.status === "added" ? "/dev/null" : `a/${resolvedOldPath}`}`);
|
|
449
|
+
lines.push(`+++ ${file.status === "deleted" ? "/dev/null" : `b/${resolvedNewPath}`}`);
|
|
450
|
+
|
|
451
|
+
for (const hunk of file.hunks) {
|
|
452
|
+
lines.push(hunk.content);
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
return lines.join("\n");
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
/**
|
|
459
|
+
* Build a summary string for a single file.
|
|
460
|
+
*/
|
|
461
|
+
function buildFileSummary(file: ClassifiedFile): string {
|
|
462
|
+
const status = file.diffFile.status;
|
|
463
|
+
return [
|
|
464
|
+
`Files changed: 1`,
|
|
465
|
+
`Additions: +${file.diffFile.additions}`,
|
|
466
|
+
`Deletions: -${file.diffFile.deletions}`,
|
|
467
|
+
"",
|
|
468
|
+
"Files:",
|
|
469
|
+
` ${status}: ${file.path} +${file.diffFile.additions}/-${file.diffFile.deletions}`,
|
|
470
|
+
].join("\n");
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
/**
|
|
474
|
+
* Split an array into batches of a given size.
|
|
475
|
+
* Returns all items in a single batch if batchSize is <= 0.
|
|
476
|
+
*/
|
|
477
|
+
export function splitIntoBatches<T>(items: T[], batchSize: number): T[][] {
|
|
478
|
+
if (items.length === 0) return [];
|
|
479
|
+
if (batchSize <= 0) return [items];
|
|
480
|
+
const batches: T[][] = [];
|
|
481
|
+
for (let i = 0; i < items.length; i += batchSize) {
|
|
482
|
+
batches.push(items.slice(i, i + batchSize));
|
|
483
|
+
}
|
|
484
|
+
return batches;
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
/** Wrapper to measure promise duration. */
|
|
488
|
+
interface TimedResult<T> {
|
|
489
|
+
result: T;
|
|
490
|
+
durationMs: number;
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
async function timed<T>(fn: () => Promise<T>): Promise<TimedResult<T>> {
|
|
494
|
+
const start = Date.now();
|
|
495
|
+
const result = await fn();
|
|
496
|
+
return { result, durationMs: Date.now() - start };
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
/**
|
|
500
|
+
* Run async tasks with a concurrency limit.
|
|
501
|
+
* Returns an array of promises (one per item) that resolve in order,
|
|
502
|
+
* but at most `limit` tasks run simultaneously.
|
|
503
|
+
*/
|
|
504
|
+
export function runWithConcurrencyLimit<T, R>(
|
|
505
|
+
items: T[],
|
|
506
|
+
limit: number,
|
|
507
|
+
fn: (item: T) => Promise<R>,
|
|
508
|
+
): Promise<R>[] {
|
|
509
|
+
const effectiveLimit = Math.max(limit, 1);
|
|
510
|
+
let running = 0;
|
|
511
|
+
const results: Promise<R>[] = [];
|
|
512
|
+
const queue: Array<() => void> = [];
|
|
513
|
+
|
|
514
|
+
for (const item of items) {
|
|
515
|
+
results.push(
|
|
516
|
+
new Promise<R>((resolve, reject) => {
|
|
517
|
+
const execute = () => {
|
|
518
|
+
running++;
|
|
519
|
+
fn(item).then(
|
|
520
|
+
(value) => {
|
|
521
|
+
running--;
|
|
522
|
+
resolve(value);
|
|
523
|
+
if (queue.length > 0) queue.shift()!();
|
|
524
|
+
},
|
|
525
|
+
(error) => {
|
|
526
|
+
running--;
|
|
527
|
+
reject(error);
|
|
528
|
+
if (queue.length > 0) queue.shift()!();
|
|
529
|
+
},
|
|
530
|
+
);
|
|
531
|
+
};
|
|
532
|
+
|
|
533
|
+
if (running < effectiveLimit) {
|
|
534
|
+
execute();
|
|
535
|
+
} else {
|
|
536
|
+
queue.push(execute);
|
|
537
|
+
}
|
|
538
|
+
}),
|
|
539
|
+
);
|
|
540
|
+
}
|
|
541
|
+
|
|
542
|
+
return results;
|
|
543
|
+
}
|