@contextrail/code-review-agent 0.1.1 → 0.1.2-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,52 @@
1
+ import { createLogger, parseLogLevel } from './logging/logger.js';
2
+ import { serializeError, toError } from './errors/error-utils.js';
3
+ let mcpClientInstance = null;
4
+ let isShuttingDown = false;
5
+ export const setMcpClient = (client) => {
6
+ mcpClientInstance = client;
7
+ };
8
+ export const gracefulShutdown = async (signal, error) => {
9
+ const log = createLogger(parseLogLevel(process.env.DEBUG));
10
+ if (isShuttingDown) {
11
+ log.warn({ msg: 'Shutdown already in progress', signal });
12
+ return;
13
+ }
14
+ isShuttingDown = true;
15
+ log.warn({ msg: `Received ${signal}. Shutting down...`, signal });
16
+ try {
17
+ if (mcpClientInstance) {
18
+ await mcpClientInstance.close();
19
+ }
20
+ process.exit(error ? 1 : 0);
21
+ }
22
+ catch (shutdownError) {
23
+ const error = toError(shutdownError);
24
+ log.error({ msg: 'Error during shutdown', error: serializeError(error) });
25
+ process.exit(1);
26
+ }
27
+ };
28
+ export const setupLifecycleHandlers = () => {
29
+ process.on('uncaughtException', async (error) => {
30
+ const log = createLogger(parseLogLevel(process.env.DEBUG));
31
+ log.error({
32
+ msg: 'Uncaught exception',
33
+ error: serializeError(error),
34
+ });
35
+ await gracefulShutdown('SIGTERM', error);
36
+ });
37
+ process.on('unhandledRejection', async (reason) => {
38
+ const log = createLogger(parseLogLevel(process.env.DEBUG));
39
+ const error = toError(reason);
40
+ log.error({
41
+ msg: 'Unhandled rejection',
42
+ error: serializeError(error),
43
+ });
44
+ await gracefulShutdown('SIGTERM', error);
45
+ });
46
+ process.on('SIGTERM', async () => {
47
+ await gracefulShutdown('SIGTERM');
48
+ });
49
+ process.on('SIGINT', async () => {
50
+ await gracefulShutdown('SIGINT');
51
+ });
52
+ };
@@ -0,0 +1,3 @@
1
+ import type { Logger } from '../logging/logger.js';
2
+ import type { ReviewResult, ReviewerResult } from './schema.js';
3
+ export declare const logReviewSummary: (log: Logger, result: ReviewResult, reviewerResults: ReviewerResult[]) => void;
@@ -0,0 +1,81 @@
1
+ export const logReviewSummary = (log, result, reviewerResults) => {
2
+ log.info('\n═══════════════════════════════════════════════════════════');
3
+ log.info('Review Summary');
4
+ log.info('═══════════════════════════════════════════════════════════');
5
+ log.info(` Files reviewed: ${result.metadata.fileCount}`);
6
+ log.info(` Reviewers executed: ${reviewerResults.length}`);
7
+ log.info(` Total issue findings: ${result.summary.totalFindings}`);
8
+ log.info(` Critical: ${result.summary.bySeverity.critical}`);
9
+ log.info(` Major: ${result.summary.bySeverity.major}`);
10
+ log.info(` Minor: ${result.summary.bySeverity.minor}`);
11
+ log.info(` Info: ${result.summary.bySeverity.info}`);
12
+ log.info(` Pass signals: ${result.summary.bySeverity.pass}`);
13
+ if (result.summary.totalFindings > 0) {
14
+ log.info('\n Findings by reviewer:');
15
+ for (const [reviewer, count] of Object.entries(result.summary.byReviewer)) {
16
+ log.info(` ${reviewer}: ${count} finding(s)`);
17
+ }
18
+ }
19
+ log.info(`\n Decision: ${result.decision.decision.toUpperCase()}`);
20
+ log.info(` Summary: ${result.decision.summary}`);
21
+ log.info(` Rationale: ${result.decision.rationale}`);
22
+ const passReviewers = reviewerResults
23
+ .map((rr) => ({
24
+ reviewer: rr.reviewer,
25
+ passFinding: rr.findings.find((f) => f.severity === 'pass'),
26
+ notes: rr.notes,
27
+ validated: rr.validated,
28
+ }))
29
+ .filter((rr) => rr.validated && (rr.passFinding || rr.notes));
30
+ if (passReviewers.length > 0) {
31
+ log.info('\n Reviewer pass summaries:');
32
+ for (const rr of passReviewers) {
33
+ const passLine = rr.notes?.trim() ?? rr.passFinding?.title ?? 'PASS';
34
+ log.info(` ${rr.reviewer}: ${passLine.split('\n')[0] ?? passLine}`);
35
+ }
36
+ }
37
+ if (result.synthesis && result.synthesis.findings.length > 0) {
38
+ log.debug('\n Deduplicated findings (synthesis):');
39
+ for (const finding of result.synthesis.findings) {
40
+ log.debug(` [${finding.severity.toUpperCase()}] ${finding.title}`);
41
+ if (finding.file) {
42
+ log.debug(` File: ${finding.file}${finding.line ? `:${finding.line}` : ''}`);
43
+ }
44
+ if (finding.sourceReviewers && finding.sourceReviewers.length > 0) {
45
+ log.debug(` Source reviewers: ${finding.sourceReviewers.join(', ')}`);
46
+ }
47
+ if (finding.contextTitles && finding.contextTitles.length > 0) {
48
+ log.debug(` ContextRail Standards: ${finding.contextTitles.join(', ')}`);
49
+ }
50
+ log.debug(` ${finding.description}`);
51
+ }
52
+ }
53
+ else {
54
+ const hasAnyEntries = reviewerResults.some((rr) => rr.findings.length > 0);
55
+ if (hasAnyEntries) {
56
+ log.debug('\n All findings:');
57
+ for (const rr of reviewerResults) {
58
+ if (rr.findings.length > 0) {
59
+ log.debug(`\n ${rr.reviewer}:`);
60
+ for (const finding of rr.findings) {
61
+ log.debug(` [${finding.severity.toUpperCase()}] ${finding.title}`);
62
+ if (finding.file) {
63
+ log.debug(` File: ${finding.file}${finding.line ? `:${finding.line}` : ''}`);
64
+ }
65
+ if (finding.contextTitles && finding.contextTitles.length > 0) {
66
+ log.debug(` ContextRail Standards: ${finding.contextTitles.join(', ')}`);
67
+ }
68
+ log.debug(` ${finding.description}`);
69
+ }
70
+ }
71
+ }
72
+ }
73
+ }
74
+ if (result.failures && result.failures.length > 0) {
75
+ log.warn('\n Reviewer failures:');
76
+ for (const failure of result.failures) {
77
+ log.warn(` ${failure.reviewer}: ${failure.message}`);
78
+ }
79
+ }
80
+ log.info('═══════════════════════════════════════════════════════════\n');
81
+ };
@@ -0,0 +1,25 @@
1
+ import type { ValidatedReviewAgentConfig } from './config/index.js';
2
+ import type { Logger } from './logging/logger.js';
3
+ import { McpClient } from './mcp/client.js';
4
+ export type PipelineInput = {
5
+ config: ValidatedReviewAgentConfig;
6
+ repoPath: string;
7
+ outputDir: string;
8
+ files?: string[];
9
+ from?: string;
10
+ to?: string;
11
+ prDescription?: string;
12
+ log: Logger;
13
+ onMcpClientReady?: (client: McpClient) => void;
14
+ };
15
+ export type PipelineResult = {
16
+ decision: string;
17
+ resultPath: string;
18
+ totalFindings: number;
19
+ hasValidationFailures: boolean;
20
+ failures: Array<{
21
+ reviewer: string;
22
+ message: string;
23
+ }>;
24
+ };
25
+ export declare const runReview: (input: PipelineInput) => Promise<PipelineResult>;
@@ -0,0 +1,267 @@
1
+ import path from 'node:path';
2
+ import { DEFAULT_ORCHESTRATOR_MODEL, DEFAULT_REVIEWER_MODEL } from './config/defaults.js';
3
+ import { McpClient } from './mcp/client.js';
4
+ import { buildReviewInputs, triagePr } from './review-inputs/index.js';
5
+ import { runOrchestrator } from './orchestrator/agentic-orchestrator.js';
6
+ import { runReviewerLoop } from './reviewers/executor.js';
7
+ import { aggregateResults, writeResult, writeTokenBudgetMetrics } from './output/writer.js';
8
+ import { logReviewSummary } from './output/summary-logger.js';
9
+ import { metadataSchema, reviewerFindingsSchema } from './output/schema.js';
10
+ import { generateReviewDecision, normalizeDecisionWithSynthesis, synthesizeFindings } from './output/aggregator.js';
11
+ import { serializeError, toError } from './errors/error-utils.js';
12
+ export const runReview = async (input) => {
13
+ const { config, repoPath, outputDir, files, from, to, prDescription, log, onMcpClientReady } = input;
14
+ log.info('Starting code review...');
15
+ log.info(`Repository: ${repoPath}`);
16
+ log.info(`Output: ${outputDir}`);
17
+ if (files && files.length > 0) {
18
+ log.info(`Files: ${files.join(', ')}`);
19
+ }
20
+ else {
21
+ log.info(`From: ${from}`);
22
+ log.info(`To: ${to}`);
23
+ }
24
+ const mcpClient = new McpClient({
25
+ serverUrl: config.mcpServerUrl,
26
+ authToken: config.mcpAuthToken,
27
+ clientName: 'code-review-agent',
28
+ clientVersion: '0.1.0',
29
+ logger: log,
30
+ });
31
+ onMcpClientReady?.(mcpClient);
32
+ try {
33
+ try {
34
+ await mcpClient.connect();
35
+ log.info('Connected to MCP server');
36
+ }
37
+ catch (error) {
38
+ const connectionError = toError(error);
39
+ log.error({
40
+ msg: 'Failed to connect to MCP server',
41
+ serverUrl: config.mcpServerUrl,
42
+ error: serializeError(connectionError),
43
+ });
44
+ throw new Error(`Failed to connect to MCP server at ${config.mcpServerUrl}: ${connectionError.message}`, {
45
+ cause: connectionError,
46
+ });
47
+ }
48
+ // Build review inputs
49
+ log.info('Building review inputs...');
50
+ const inputs = files && files.length > 0
51
+ ? await buildReviewInputs({
52
+ mode: 'file-list',
53
+ files,
54
+ basePath: repoPath,
55
+ surroundingContext: {
56
+ enabled: true,
57
+ maxTokensPerFile: config.maxTokensPerFile,
58
+ contextLines: config.contextLines,
59
+ },
60
+ })
61
+ : await buildReviewInputs({
62
+ mode: 'diff',
63
+ repoPath,
64
+ from: from,
65
+ to: to,
66
+ surroundingContext: {
67
+ enabled: true,
68
+ maxTokensPerFile: config.maxTokensPerFile,
69
+ contextLines: config.contextLines,
70
+ },
71
+ });
72
+ log.info(`Found ${inputs.files.length} files to review`);
73
+ if (inputs.files.length > 0) {
74
+ log.debug(` Files: ${inputs.files.slice(0, 10).join(', ')}${inputs.files.length > 10 ? ` ... and ${inputs.files.length - 10} more` : ''}`);
75
+ }
76
+ // Triage PR to determine if it's trivial
77
+ const triageResult = triagePr(inputs);
78
+ log.info(`PR triage: ${triageResult.reason}`);
79
+ if (triageResult.isTrivial) {
80
+ log.info(`Trivial PR detected (${triageResult.isDocsOnly ? 'docs-only' : 'small'}). Skipping full reviewer flow.`);
81
+ // For trivial PRs, we still run orchestrator but with awareness
82
+ // The orchestrator can use this information to select fewer reviewers or skip entirely
83
+ }
84
+ // Run orchestrator
85
+ log.info('Running orchestrator...');
86
+ if (prDescription) {
87
+ log.debug('PR description provided, will be included in prompts');
88
+ }
89
+ if (config.reviewDomains && config.reviewDomains.length > 0) {
90
+ log.debug(`Review domains provided: ${config.reviewDomains.join(', ')}`);
91
+ }
92
+ const orchestratorOutput = await runOrchestrator(inputs, outputDir, {
93
+ mcpClient,
94
+ config: {
95
+ openRouterApiKey: config.openRouterApiKey,
96
+ orchestratorModel: config.orchestratorModel ?? DEFAULT_ORCHESTRATOR_MODEL,
97
+ maxSteps: config.maxSteps,
98
+ prDescription,
99
+ reviewDomains: config.reviewDomains,
100
+ },
101
+ logger: log,
102
+ });
103
+ log.info(`Selected reviewers: ${orchestratorOutput.reviewers.join(', ')}`);
104
+ log.debug(`Orchestrator understanding:\n${orchestratorOutput.understanding}`);
105
+ // Run reviewers in parallel with progress logging
106
+ log.info(`Running ${orchestratorOutput.reviewers.length} reviewer(s) in parallel...`);
107
+ const reviewerFailures = [];
108
+ const reviewerResults = await Promise.all(orchestratorOutput.reviewers.map(async (reviewer) => {
109
+ const startTime = Date.now();
110
+ log.info(`[${reviewer}] Starting review...`);
111
+ try {
112
+ const rawFindings = await runReviewerLoop(reviewer, inputs, orchestratorOutput.understanding, outputDir, {
113
+ mcpClient,
114
+ config: {
115
+ openRouterApiKey: config.openRouterApiKey,
116
+ reviewerModel: config.reviewerModel ?? DEFAULT_REVIEWER_MODEL,
117
+ criticModel: config.criticModel,
118
+ maxSteps: config.maxSteps,
119
+ maxIterations: config.maxIterations,
120
+ prDescription,
121
+ reviewDomains: config.reviewDomains,
122
+ },
123
+ logger: log,
124
+ });
125
+ const parsedFindings = reviewerFindingsSchema.safeParse(rawFindings);
126
+ if (!parsedFindings.success) {
127
+ throw new Error(`Reviewer ${reviewer} returned invalid findings payload: ${parsedFindings.error.issues
128
+ .slice(0, 3)
129
+ .map((issue) => `${issue.path.join('.') || 'root'}: ${issue.message}`)
130
+ .join('; ')}`, { cause: parsedFindings.error });
131
+ }
132
+ const findings = parsedFindings.data;
133
+ const result = {
134
+ reviewer,
135
+ findings: findings.findings,
136
+ validated: findings.validated,
137
+ notes: findings.notes ?? undefined,
138
+ };
139
+ const duration = ((Date.now() - startTime) / 1000).toFixed(1);
140
+ log.info(`[${reviewer}] Completed in ${duration}s`);
141
+ const issueCount = findings.findings.filter((f) => f.severity !== 'pass').length;
142
+ const passCount = findings.findings.filter((f) => f.severity === 'pass').length;
143
+ if (issueCount === 0) {
144
+ log.info(` ✓ ${reviewer}: Clean pass (pass signals: ${passCount}, validated: ${findings.validated})`);
145
+ }
146
+ else {
147
+ log.info(` ✓ ${reviewer}: ${issueCount} issue(s) (pass signals: ${passCount}, validated: ${findings.validated})`);
148
+ }
149
+ log.debug(` Findings: ${JSON.stringify(findings.findings, null, 2)}`);
150
+ if (findings.notes) {
151
+ log.debug(` Notes: ${findings.notes}`);
152
+ }
153
+ return result;
154
+ }
155
+ catch (error) {
156
+ const duration = ((Date.now() - startTime) / 1000).toFixed(1);
157
+ const reviewerError = toError(error);
158
+ const message = reviewerError.message;
159
+ log.error({
160
+ msg: `[${reviewer}] Failed after ${duration}s`,
161
+ reviewer,
162
+ durationSeconds: Number(duration),
163
+ error: serializeError(reviewerError),
164
+ });
165
+ if (message.includes('No object generated') || message.includes('No output generated')) {
166
+ log.error({
167
+ msg: `[${reviewer}] TROUBLESHOOTING`,
168
+ reviewer,
169
+ guidance: [
170
+ 'The model may not support structured output well.',
171
+ 'Try using a different model (e.g., anthropic/claude-haiku-4.5).',
172
+ 'Check model compatibility with structured output.',
173
+ "Review the model's response format requirements.",
174
+ ],
175
+ });
176
+ }
177
+ reviewerFailures.push({ reviewer, message });
178
+ return {
179
+ reviewer,
180
+ findings: [],
181
+ validated: false,
182
+ notes: `Error: ${message}`,
183
+ };
184
+ }
185
+ }));
186
+ // Aggregate results
187
+ log.info('Aggregating results...');
188
+ log.debug(` Total issue findings to aggregate: ${reviewerResults.reduce((sum, rr) => sum + rr.findings.filter((f) => f.severity !== 'pass').length, 0)}`);
189
+ log.debug(` Total pass signals to aggregate: ${reviewerResults.reduce((sum, rr) => sum + rr.findings.filter((f) => f.severity === 'pass').length, 0)}`);
190
+ log.debug(` Reviewers validated: ${reviewerResults.filter((rr) => rr.validated).length}/${reviewerResults.length}`);
191
+ const metadata = metadataSchema.parse({
192
+ timestamp: new Date().toISOString(),
193
+ mode: inputs.mode,
194
+ fileCount: inputs.files.length,
195
+ });
196
+ // Synthesize findings across reviewers (deduplication, contradictions, compound risks)
197
+ log.info(`Synthesis pass starting (model: ${config.orchestratorModel ?? DEFAULT_ORCHESTRATOR_MODEL}, reviewers: ${reviewerResults.length})`);
198
+ const synthesisResult = await synthesizeFindings(reviewerResults, {
199
+ openRouterApiKey: config.openRouterApiKey,
200
+ model: config.orchestratorModel ?? DEFAULT_ORCHESTRATOR_MODEL,
201
+ maxSteps: config.aggregationMaxSteps,
202
+ logger: log,
203
+ });
204
+ log.info(`Synthesis pass complete: ${synthesisResult.synthesis.findings.length} deduped findings, ${synthesisResult.synthesis.contradictions.length} contradictions, ${synthesisResult.synthesis.compoundRisks.length} compound risks`);
205
+ log.debug(` Synthesis: ${synthesisResult.synthesis.findings.length} deduplicated findings`);
206
+ log.debug(` Contradictions: ${synthesisResult.synthesis.contradictions.length}`);
207
+ log.debug(` Compound risks: ${synthesisResult.synthesis.compoundRisks.length}`);
208
+ const synthesisSeverityCounts = synthesisResult.synthesis.findings.reduce((acc, finding) => {
209
+ acc[finding.severity] += 1;
210
+ return acc;
211
+ }, { critical: 0, major: 0, minor: 0, info: 0, pass: 0 });
212
+ log.debug(` Synthesis severity: ${JSON.stringify(synthesisSeverityCounts)}`);
213
+ // Generate review decision based on synthesized findings
214
+ log.info(`Decision pass starting (model: ${config.orchestratorModel ?? DEFAULT_ORCHESTRATOR_MODEL}, synthesis findings: ${synthesisResult.synthesis.findings.length})`);
215
+ const decisionResult = await generateReviewDecision(orchestratorOutput.understanding, synthesisResult.synthesis, {
216
+ openRouterApiKey: config.openRouterApiKey,
217
+ model: config.orchestratorModel ?? DEFAULT_ORCHESTRATOR_MODEL,
218
+ maxSteps: config.aggregationMaxSteps,
219
+ logger: log,
220
+ });
221
+ const normalizedDecision = normalizeDecisionWithSynthesis(decisionResult.decision, synthesisResult.synthesis);
222
+ const decisionEvidence = {
223
+ findings: synthesisResult.synthesis.findings.length,
224
+ blockingFindings: synthesisResult.synthesis.findings.filter((f) => f.severity === 'critical' || f.severity === 'major').length,
225
+ contradictions: synthesisResult.synthesis.contradictions.length,
226
+ compoundRisks: synthesisResult.synthesis.compoundRisks.length,
227
+ modelDecision: decisionResult.decision.decision,
228
+ normalizedDecision: normalizedDecision.decision,
229
+ };
230
+ log.debug(` Decision evidence: ${JSON.stringify(decisionEvidence)}`);
231
+ if (normalizedDecision.decision !== decisionResult.decision.decision) {
232
+ log.warn(`Decision normalized from ${decisionResult.decision.decision} to ${normalizedDecision.decision} to match synthesized findings.`);
233
+ }
234
+ log.info(`Decision pass complete: ${normalizedDecision.decision}`);
235
+ log.debug(` Aggregation decision: ${normalizedDecision.decision}`);
236
+ log.debug(` Decision summary: ${normalizedDecision.summary}`);
237
+ log.debug(` Decision rationale: ${normalizedDecision.rationale}`);
238
+ const result = aggregateResults(metadata, orchestratorOutput.understanding, normalizedDecision, reviewerResults, reviewerFailures, synthesisResult.synthesis);
239
+ // Write result.json
240
+ await writeResult(outputDir, result);
241
+ const resultPath = path.join(outputDir, 'result.json');
242
+ log.info(`Results written to ${resultPath}`);
243
+ log.debug(` Full review results available at: ${resultPath}`);
244
+ // Write token budget metrics
245
+ const aggregationUsage = [];
246
+ if (synthesisResult.usage) {
247
+ aggregationUsage.push(synthesisResult.usage);
248
+ }
249
+ if (decisionResult.usage) {
250
+ aggregationUsage.push(decisionResult.usage);
251
+ }
252
+ await writeTokenBudgetMetrics(outputDir, orchestratorOutput.reviewers, aggregationUsage.length > 0 ? aggregationUsage : undefined);
253
+ log.info(`Token budget metrics written to ${path.join(outputDir, 'token-budget.json')}`);
254
+ // Print summary
255
+ logReviewSummary(log, result, reviewerResults);
256
+ return {
257
+ decision: result.decision.decision,
258
+ resultPath,
259
+ totalFindings: result.summary.totalFindings,
260
+ hasValidationFailures: reviewerResults.some((rr) => !rr.validated),
261
+ failures: result.failures ?? [],
262
+ };
263
+ }
264
+ finally {
265
+ await mcpClient.close();
266
+ }
267
+ };
@@ -8,7 +8,21 @@ export type FilteringConfig = {
8
8
  * Default: DEFAULT_EXCLUDE_PATTERNS
9
9
  */
10
10
  excludePatterns?: ReadonlyArray<string>;
11
+ /**
12
+ * Patterns to explicitly re-include after exclusion matching.
13
+ * Intended for `.gitignore` negation rules (`!pattern`).
14
+ */
15
+ includePatterns?: ReadonlyArray<string>;
16
+ };
17
+ export type GitignorePatterns = {
18
+ excludePatterns: string[];
19
+ includePatterns: string[];
11
20
  };
21
+ /**
22
+ * Parse root `.gitignore` contents into exclude/include glob sets.
23
+ * Negations (`!pattern`) become include patterns used to re-include files.
24
+ */
25
+ export declare const parseGitignoreContent: (contents: string) => GitignorePatterns;
12
26
  /**
13
27
  * Filter files based on exclude patterns.
14
28
  *
@@ -1,22 +1,102 @@
1
1
  import { DEFAULT_EXCLUDE_PATTERNS } from '../config/defaults.js';
2
2
  import { fileMatchesPatterns } from './file-patterns.js';
3
3
  /**
4
- * Check if a file path matches any exclude pattern.
4
+ * Check if a file path matches any pattern set.
5
5
  * Uses minimatch for secure glob pattern matching (prevents ReDoS).
6
6
  *
7
7
  * @param filePath - File path to check (relative or absolute)
8
8
  * @param patterns - Glob patterns to match against
9
- * @returns True if file should be excluded
9
+ * @returns True if file matches at least one pattern
10
10
  */
11
- const matchesExcludePattern = (filePath, patterns) => {
12
- // Convert patterns to FilePatterns format for consistent matching
11
+ const matchesIncludePatternSet = (filePath, patterns) => {
12
+ // Convert patterns to FilePatterns format for consistent matching.
13
13
  const filePatterns = {
14
- exclude: [...patterns],
14
+ include: [...patterns],
15
15
  };
16
16
  // Use fileMatchesPatterns which handles minimatch safely
17
- // If file matches exclude patterns, it should be excluded
17
+ // If file matches include patterns, it should be included
18
+ return fileMatchesPatterns(filePath, filePatterns);
19
+ };
20
+ /**
21
+ * Check if a file path matches any exclude pattern.
22
+ *
23
+ * @param filePath - File path to check
24
+ * @param patterns - Exclude patterns
25
+ * @returns True when file is matched by exclusion rules
26
+ */
27
+ const matchesExcludePatternSet = (filePath, patterns) => {
28
+ const filePatterns = {
29
+ exclude: [...patterns],
30
+ };
31
+ // fileMatchesPatterns returns false when exclude matches, so invert it here.
18
32
  return !fileMatchesPatterns(filePath, filePatterns);
19
33
  };
34
+ /**
35
+ * Convert a root `.gitignore` rule into a minimatch-style glob pattern.
36
+ * This is intentionally low-lift (root file only) and does not attempt full git parity.
37
+ */
38
+ const gitignoreRuleToGlob = (rule) => {
39
+ let pattern = rule.trim();
40
+ if (!pattern) {
41
+ return null;
42
+ }
43
+ // Keep escaped literals for leading comment/negation markers.
44
+ if (pattern.startsWith('\\#')) {
45
+ pattern = pattern.slice(1);
46
+ }
47
+ else if (pattern.startsWith('#')) {
48
+ return null;
49
+ }
50
+ if (pattern.startsWith('\\!')) {
51
+ pattern = pattern.slice(1);
52
+ }
53
+ // Normalize separators for cross-platform matching.
54
+ pattern = pattern.replace(/\\/g, '/');
55
+ const isRootAnchored = pattern.startsWith('/');
56
+ if (isRootAnchored) {
57
+ pattern = pattern.slice(1);
58
+ }
59
+ const isDirectoryPattern = pattern.endsWith('/');
60
+ if (isDirectoryPattern) {
61
+ pattern = pattern.slice(0, -1);
62
+ }
63
+ if (!pattern) {
64
+ return null;
65
+ }
66
+ const hasSlash = pattern.includes('/');
67
+ let glob = hasSlash ? (isRootAnchored ? pattern : `**/${pattern}`) : `**/${pattern}`;
68
+ if (isDirectoryPattern) {
69
+ glob = `${glob}/**`;
70
+ }
71
+ return glob;
72
+ };
73
+ /**
74
+ * Parse root `.gitignore` contents into exclude/include glob sets.
75
+ * Negations (`!pattern`) become include patterns used to re-include files.
76
+ */
77
+ export const parseGitignoreContent = (contents) => {
78
+ const excludePatterns = [];
79
+ const includePatterns = [];
80
+ for (const rawLine of contents.split(/\r?\n/)) {
81
+ const line = rawLine.trim();
82
+ if (!line) {
83
+ continue;
84
+ }
85
+ const isNegation = line.startsWith('!') && !line.startsWith('\\!');
86
+ const normalizedRule = isNegation ? line.slice(1) : line;
87
+ const glob = gitignoreRuleToGlob(normalizedRule);
88
+ if (!glob) {
89
+ continue;
90
+ }
91
+ if (isNegation) {
92
+ includePatterns.push(glob);
93
+ }
94
+ else {
95
+ excludePatterns.push(glob);
96
+ }
97
+ }
98
+ return { excludePatterns, includePatterns };
99
+ };
20
100
  /**
21
101
  * Filter files based on exclude patterns.
22
102
  *
@@ -25,8 +105,17 @@ const matchesExcludePattern = (filePath, patterns) => {
25
105
  * @returns Filtered array of file paths
26
106
  */
27
107
  export const filterFiles = (files, config = {}) => {
28
- const patterns = config.excludePatterns ?? DEFAULT_EXCLUDE_PATTERNS;
29
- return files.filter((file) => !matchesExcludePattern(file, patterns));
108
+ const excludePatterns = config.excludePatterns ?? DEFAULT_EXCLUDE_PATTERNS;
109
+ const includePatterns = config.includePatterns ?? [];
110
+ return files.filter((file) => {
111
+ const isExcluded = matchesExcludePatternSet(file, excludePatterns);
112
+ if (!isExcluded) {
113
+ return true;
114
+ }
115
+ // `.gitignore` negations can re-include previously excluded files.
116
+ const isReIncluded = includePatterns.length > 0 && matchesIncludePatternSet(file, includePatterns);
117
+ return isReIncluded;
118
+ });
30
119
  };
31
120
  /**
32
121
  * Filter diff inputs by removing excluded files and their diffs.