@delegance/claude-autopilot 1.4.0 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,38 @@
1
1
  # Changelog
2
2
 
3
+ ## [1.6.0] — 2026-04-22
4
+
5
+ ### Added
6
+ - **Provider usage scanner** (`src/core/detect/provider-usage.ts`) — walks project source files, counts per-provider API key and SDK references (capped at 1 per file to avoid skew), returns `ProviderCounts`
7
+ - **`dominantProvider()`** — returns the provider with the highest file-reference count
8
+ - **Smart `auto` tiebreaker** — when multiple API keys are present, `auto` scans the codebase and prefers the provider already used there; falls back to env-key priority order if counts are all zero
9
+ - `ReviewInput.context.cwd` — threads working directory through to the review engine so `auto` knows where to scan; `review-phase.ts` now passes `cwd` in context
10
+ - 12 new tests for `detectProviderUsage` and `dominantProvider` — **181 total**
11
+
12
+ ## [1.5.0] — 2026-04-22
13
+
14
+ ### Added
15
+ - **Gemini adapter** (`gemini`) — Google Gemini 2.5 Pro via `@google/generative-ai`; accepts `GEMINI_API_KEY` or `GOOGLE_API_KEY`; 1M token context window
16
+ - **OpenAI-compatible adapter** (`openai-compatible`) — works with any OpenAI-API-compatible endpoint (Groq, Ollama, Together AI, etc.); requires `options.model`; auto-selects API key via `options.apiKeyEnv` → `OPENAI_API_KEY` → `'ollama'`
17
+ - **Updated auto adapter** — full priority chain: `ANTHROPIC_API_KEY` → `GEMINI_API_KEY`/`GOOGLE_API_KEY` → `OPENAI_API_KEY` → `GROQ_API_KEY` (wraps openai-compatible with Groq config)
18
+ - `run.ts` no-key warning now lists all four key options
19
+
20
+ ### Changed
21
+ - 169 tests total (up from 136)
22
+
23
+ ## [1.4.0] — 2026-04-21
24
+
25
+ ### Added
26
+ - **Static rules registry** (`src/core/static-rules/registry.ts`) — lazy-loads built-in rules by name; fixes critical bug where config `staticRules` was always silently ignored
27
+ - **7 built-in rules**: `hardcoded-secrets`, `npm-audit`, `package-lock-sync`, `console-log`, `todo-fixme`, `large-file`, `missing-tests`
28
+ - **Claude adapter** (`claude`) — Anthropic Claude Opus 4.7 via `@anthropic-ai/sdk`; configurable model via `context.model`
29
+ - **Auto adapter** (`auto`) — detects best available key at runtime; checked in priority order
30
+ - `doctor` now checks `ANTHROPIC_API_KEY` in addition to `OPENAI_API_KEY`
31
+ - 136 tests total
32
+
33
+ ### Fixed
34
+ - **Critical**: `staticRules` in `RunInput` was never populated — config-listed rules were silently ignored. `loadRulesFromConfig()` now wired into `run.ts`
35
+
3
36
  ## [1.2.8] — 2026-04-21
4
37
 
5
38
  ### Added
package/README.md CHANGED
@@ -118,7 +118,7 @@ Presets: `nextjs-supabase`, `t3`, `python-fastapi`, `rails-postgres`, `go`.
118
118
  ```yaml
119
119
  configVersion: 1
120
120
  reviewEngine:
121
- adapter: codex
121
+ adapter: auto # auto-detects best available key at runtime
122
122
  testCommand: npm test
123
123
  protectedPaths:
124
124
  - src/core/**
@@ -130,6 +130,37 @@ staticRules:
130
130
 
131
131
  Full schema and preset defaults: `presets/<name>/autopilot.config.yaml`.
132
132
 
133
+ ### Review Engine Adapters
134
+
135
+ | Adapter | Key required | Notes |
136
+ |---|---|---|
137
+ | `auto` | any below | Auto-selects best available (recommended) |
138
+ | `claude` | `ANTHROPIC_API_KEY` | Opus 4.7 default |
139
+ | `gemini` | `GEMINI_API_KEY` or `GOOGLE_API_KEY` | Gemini 2.5 Pro, 1M context |
140
+ | `codex` | `OPENAI_API_KEY` | GPT-5 Codex |
141
+ | `openai-compatible` | configurable | Groq, Ollama, Together AI, etc. |
142
+
143
+ `auto` priority: Anthropic → Gemini → OpenAI → Groq.
144
+
145
+ **Groq example:**
146
+ ```yaml
147
+ reviewEngine:
148
+ adapter: openai-compatible
149
+ options:
150
+ model: llama-3.3-70b-versatile
151
+ baseUrl: https://api.groq.com/openai/v1
152
+ apiKeyEnv: GROQ_API_KEY
153
+ ```
154
+
155
+ **Ollama (local, no key):**
156
+ ```yaml
157
+ reviewEngine:
158
+ adapter: openai-compatible
159
+ options:
160
+ model: llama3.2
161
+ baseUrl: http://localhost:11434/v1
162
+ ```
163
+
133
164
  ## GitHub Actions
134
165
 
135
166
  ```yaml
@@ -175,7 +206,7 @@ Four pluggable adapter points:
175
206
 
176
207
  | Point | Built-in | Purpose |
177
208
  |---|---|---|
178
- | `review-engine` | `codex` | LLM code review (OpenAI) |
209
+ | `review-engine` | `auto`, `claude`, `gemini`, `codex`, `openai-compatible` | LLM code review |
179
210
  | `vcs-host` | `github` | PR comments + SARIF upload |
180
211
  | `migration-runner` | `supabase` | DB migration execution |
181
212
  | `review-bot-parser` | `cursor` | Parse review bot comments |
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@delegance/claude-autopilot",
3
- "version": "1.4.0",
3
+ "version": "1.6.0",
4
4
  "type": "module",
5
5
  "description": "Claude Code automation pipeline: spec → plan → implement → validate → PR",
6
6
  "keywords": [
@@ -46,6 +46,7 @@
46
46
  },
47
47
  "dependencies": {
48
48
  "@anthropic-ai/sdk": "^0.90.0",
49
+ "@google/generative-ai": "^0.24.1",
49
50
  "ajv": "^8",
50
51
  "dotenv": ">=16",
51
52
  "js-yaml": "^4",
@@ -16,6 +16,8 @@ const BUILTIN_PATHS: Record<IntegrationPoint, Record<string, string>> = {
16
16
  'review-engine': {
17
17
  codex: './review-engine/codex.ts',
18
18
  claude: './review-engine/claude.ts',
19
+ gemini: './review-engine/gemini.ts',
20
+ 'openai-compatible': './review-engine/openai-compatible.ts',
19
21
  auto: './review-engine/auto.ts',
20
22
  },
21
23
  'vcs-host': { github: './vcs-host/github.ts' },
@@ -1,21 +1,74 @@
1
1
  import type { Capabilities } from '../base.ts';
2
2
  import type { ReviewEngine, ReviewInput, ReviewOutput } from './types.ts';
3
3
  import { AutopilotError } from '../../core/errors.ts';
4
+ import { detectProviderUsage, dominantProvider, type Provider } from '../../core/detect/provider-usage.ts';
4
5
 
5
- // Priority order: ANTHROPIC_API_KEY → claude, OPENAI_API_KEY → codex
6
- async function resolveAdapter(): Promise<ReviewEngine> {
6
+ interface AvailableProvider {
7
+ provider: Provider;
8
+ load: () => Promise<ReviewEngine>;
9
+ }
10
+
11
+ function buildGroqAdapter(base: ReviewEngine): ReviewEngine {
12
+ return {
13
+ ...base,
14
+ name: 'auto',
15
+ review(input: ReviewInput) {
16
+ return base.review({
17
+ ...input,
18
+ context: {
19
+ ...input.context,
20
+ model: 'llama-3.3-70b-versatile',
21
+ baseUrl: 'https://api.groq.com/openai/v1',
22
+ apiKeyEnv: 'GROQ_API_KEY',
23
+ } as typeof input.context,
24
+ });
25
+ },
26
+ };
27
+ }
28
+
29
+ function getAvailableProviders(): AvailableProvider[] {
30
+ const available: AvailableProvider[] = [];
7
31
  if (process.env.ANTHROPIC_API_KEY) {
8
- const { claudeAdapter } = await import('./claude.ts');
9
- return claudeAdapter;
32
+ available.push({ provider: 'anthropic', load: async () => (await import('./claude.ts')).claudeAdapter });
33
+ }
34
+ if (process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY) {
35
+ available.push({ provider: 'gemini', load: async () => (await import('./gemini.ts')).geminiAdapter });
10
36
  }
11
37
  if (process.env.OPENAI_API_KEY) {
12
- const { codexAdapter } = await import('./codex.ts');
13
- return codexAdapter;
38
+ available.push({ provider: 'openai', load: async () => (await import('./codex.ts')).codexAdapter });
39
+ }
40
+ if (process.env.GROQ_API_KEY) {
41
+ available.push({
42
+ provider: 'groq',
43
+ load: async () => buildGroqAdapter((await import('./openai-compatible.ts')).openaiCompatibleAdapter),
44
+ });
45
+ }
46
+ return available;
47
+ }
48
+
49
+ async function resolveAdapter(cwd: string): Promise<ReviewEngine> {
50
+ const available = getAvailableProviders();
51
+
52
+ if (available.length === 0) {
53
+ throw new AutopilotError(
54
+ 'No LLM API key found. Set one of: ANTHROPIC_API_KEY, GEMINI_API_KEY, OPENAI_API_KEY, GROQ_API_KEY',
55
+ { code: 'auth', provider: 'auto' },
56
+ );
14
57
  }
15
- throw new AutopilotError(
16
- 'No LLM API key found set ANTHROPIC_API_KEY (recommended) or OPENAI_API_KEY to enable review',
17
- { code: 'auth', provider: 'auto' }
18
- );
58
+
59
+ // Single providerno need to scan
60
+ if (available.length === 1) return available[0]!.load();
61
+
62
+ // Multiple keys present — prefer the provider most referenced in source code
63
+ const counts = detectProviderUsage(cwd);
64
+ const dominant = dominantProvider(counts);
65
+ if (dominant) {
66
+ const match = available.find(p => p.provider === dominant);
67
+ if (match) return match.load();
68
+ }
69
+
70
+ // Fallback to first available (env-key priority order)
71
+ return available[0]!.load();
19
72
  }
20
73
 
21
74
  export const autoAdapter: ReviewEngine = {
@@ -31,7 +84,9 @@ export const autoAdapter: ReviewEngine = {
31
84
  },
32
85
 
33
86
  async review(input: ReviewInput): Promise<ReviewOutput> {
34
- const adapter = await resolveAdapter();
87
+ const cwd = (input.context as Record<string, unknown> | undefined)?.['cwd'] as string | undefined
88
+ ?? process.cwd();
89
+ const adapter = await resolveAdapter(cwd);
35
90
  return adapter.review(input);
36
91
  },
37
92
  };
@@ -0,0 +1,131 @@
1
+ import { GoogleGenerativeAI } from '@google/generative-ai';
2
+ import type { Finding } from '../../core/findings/types.ts';
3
+ import { AutopilotError } from '../../core/errors.ts';
4
+ import type { Capabilities } from '../base.ts';
5
+ import type { ReviewEngine, ReviewInput, ReviewOutput } from './types.ts';
6
+
7
+ const DEFAULT_MODEL = 'gemini-2.5-pro-preview-05-06';
8
+ const MAX_OUTPUT_TOKENS = 4096;
9
+
10
+ // Cost per million tokens (USD) — gemini-2.5-pro pricing (<200k context)
11
+ const COST_PER_M_INPUT = 1.25;
12
+ const COST_PER_M_OUTPUT = 10.0;
13
+
14
+ const PROMPT_TEMPLATE = `You are a senior software architect reviewing code changes for quality, security, and correctness.
15
+
16
+ The codebase context:
17
+ {STACK}
18
+
19
+ Please review the following:
20
+
21
+ ---
22
+
23
+ {CONTENT}
24
+
25
+ ---
26
+
27
+ Provide structured feedback in exactly this format:
28
+
29
+ ## Review Summary
30
+ One paragraph overall assessment.
31
+
32
+ ## Findings
33
+
34
+ For each finding, use this format:
35
+ ### [CRITICAL|WARNING|NOTE] <short title>
36
+ <explanation>
37
+ **Suggestion:** <actionable fix>
38
+
39
+ Rules:
40
+ - CRITICAL: Blocks merge (security issues, data loss risks, broken contracts)
41
+ - WARNING: Should address before merging (logic errors, missing error handling, test gaps)
42
+ - NOTE: Improvement suggestion (style, performance, clarity)
43
+ - Maximum 10 findings, ranked by severity
44
+ - Be specific and constructive
45
+ - Reference the file and line when possible`;
46
+
47
+ export const geminiAdapter: ReviewEngine = {
48
+ name: 'gemini',
49
+ apiVersion: '1.0.0',
50
+
51
+ getCapabilities(): Capabilities {
52
+ return { structuredOutput: false, streaming: false, maxContextTokens: 1000000, inlineComments: false };
53
+ },
54
+
55
+ estimateTokens(content: string): number {
56
+ return Math.ceil(content.length / 4);
57
+ },
58
+
59
+ async review(input: ReviewInput): Promise<ReviewOutput> {
60
+ const apiKey = process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY;
61
+ if (!apiKey) {
62
+ throw new AutopilotError('GEMINI_API_KEY (or GOOGLE_API_KEY) not set', { code: 'auth', provider: 'gemini' });
63
+ }
64
+
65
+ const model = (input.context as Record<string, unknown> | undefined)?.['model'] as string | undefined ?? DEFAULT_MODEL;
66
+ const stack = input.context?.stack ?? 'A web application — stack details unspecified.';
67
+ const prompt = PROMPT_TEMPLATE.replace('{STACK}', stack).replace('{CONTENT}', input.content);
68
+
69
+ const genAI = new GoogleGenerativeAI(apiKey);
70
+ const genModel = genAI.getGenerativeModel({
71
+ model,
72
+ generationConfig: { maxOutputTokens: MAX_OUTPUT_TOKENS },
73
+ });
74
+
75
+ let result: Awaited<ReturnType<typeof genModel.generateContent>>;
76
+ try {
77
+ result = await genModel.generateContent(prompt);
78
+ } catch (err) {
79
+ const message = err instanceof Error ? err.message : String(err);
80
+ const isRateLimit = /rate.limit|429|quota/i.test(message);
81
+ const isAuth = /api.key|unauthorized|403/i.test(message);
82
+ throw new AutopilotError(`Gemini review call failed: ${message}`, {
83
+ code: isAuth ? 'auth' : isRateLimit ? 'rate_limit' : 'transient_network',
84
+ provider: 'gemini',
85
+ retryable: isRateLimit,
86
+ });
87
+ }
88
+
89
+ const rawOutput = result.response.text();
90
+ const usage = result.response.usageMetadata;
91
+ const costUSD = usage
92
+ ? (usage.promptTokenCount / 1_000_000) * COST_PER_M_INPUT +
93
+ (usage.candidatesTokenCount / 1_000_000) * COST_PER_M_OUTPUT
94
+ : undefined;
95
+
96
+ return {
97
+ findings: parseGeminiOutput(rawOutput),
98
+ rawOutput,
99
+ usage: usage
100
+ ? { input: usage.promptTokenCount, output: usage.candidatesTokenCount, costUSD }
101
+ : undefined,
102
+ };
103
+ },
104
+ };
105
+
106
+ export default geminiAdapter;
107
+
108
+ function parseGeminiOutput(output: string): Finding[] {
109
+ const findings: Finding[] = [];
110
+ const regex = /### \[(CRITICAL|WARNING|NOTE)\]\s*(.+?)(?=\n### \[|## Review Summary|$)/gs;
111
+ let match: RegExpExecArray | null;
112
+ while ((match = regex.exec(output)) !== null) {
113
+ const severity = match[1]!.toLowerCase() as Finding['severity'];
114
+ const body = match[2]!.trim();
115
+ const titleEnd = body.indexOf('\n');
116
+ const title = (titleEnd > 0 ? body.slice(0, titleEnd) : body).trim();
117
+ const suggestion = body.match(/\*\*Suggestion:\*\*\s*(.+)/s)?.[1]?.trim();
118
+ findings.push({
119
+ id: `gemini-${findings.length}`,
120
+ source: 'review-engine',
121
+ severity,
122
+ category: 'gemini-review',
123
+ file: '<unspecified>',
124
+ message: title,
125
+ suggestion,
126
+ protectedPath: false,
127
+ createdAt: new Date().toISOString(),
128
+ });
129
+ }
130
+ return findings;
131
+ }
@@ -0,0 +1,126 @@
1
+ import OpenAI from 'openai';
2
+ import type { Finding } from '../../core/findings/types.ts';
3
+ import { AutopilotError } from '../../core/errors.ts';
4
+ import type { Capabilities } from '../base.ts';
5
+ import type { ReviewEngine, ReviewInput, ReviewOutput } from './types.ts';
6
+
7
+ const MAX_OUTPUT_TOKENS = 4096;
8
+
9
+ const SYSTEM_PROMPT_TEMPLATE = `You are a senior software architect reviewing code changes for quality, security, and correctness.
10
+
11
+ The codebase context:
12
+ {STACK}
13
+
14
+ Provide structured feedback in exactly this format:
15
+
16
+ ## Review Summary
17
+ One paragraph overall assessment.
18
+
19
+ ## Findings
20
+
21
+ For each finding, use this format:
22
+ ### [CRITICAL|WARNING|NOTE] <short title>
23
+ <explanation>
24
+ **Suggestion:** <actionable fix>
25
+
26
+ Rules:
27
+ - CRITICAL: Blocks merge (security issues, data loss risks, broken contracts)
28
+ - WARNING: Should address before merging (logic errors, missing error handling, test gaps)
29
+ - NOTE: Improvement suggestion (style, performance, clarity)
30
+ - Maximum 10 findings, ranked by severity
31
+ - Be specific and constructive
32
+ - Reference the file and line when possible`;
33
+
34
+ export const openaiCompatibleAdapter: ReviewEngine = {
35
+ name: 'openai-compatible',
36
+ apiVersion: '1.0.0',
37
+
38
+ getCapabilities(): Capabilities {
39
+ return { structuredOutput: false, streaming: false, maxContextTokens: 128000, inlineComments: false };
40
+ },
41
+
42
+ estimateTokens(content: string): number {
43
+ return Math.ceil(content.length / 4);
44
+ },
45
+
46
+ async review(input: ReviewInput): Promise<ReviewOutput> {
47
+ const opts = (input.context as Record<string, unknown> | undefined) ?? {};
48
+
49
+ // API key: options.apiKey → named env var → OPENAI_API_KEY
50
+ const apiKeyEnv = (opts['apiKeyEnv'] as string | undefined) ?? 'OPENAI_API_KEY';
51
+ const apiKey = (opts['apiKey'] as string | undefined) ?? process.env[apiKeyEnv] ?? 'ollama';
52
+
53
+ const baseURL = (opts['baseUrl'] as string | undefined) ??
54
+ process.env.OPENAI_BASE_URL ??
55
+ undefined;
56
+
57
+ const model = opts['model'] as string | undefined;
58
+ if (!model) {
59
+ throw new AutopilotError(
60
+ 'openai-compatible adapter requires options.model to be set in autopilot.config.yaml',
61
+ { code: 'invalid_config', provider: 'openai-compatible' },
62
+ );
63
+ }
64
+
65
+ const stack = input.context?.stack ?? 'A web application — stack details unspecified.';
66
+ const systemPrompt = SYSTEM_PROMPT_TEMPLATE.replace('{STACK}', stack);
67
+ const client = new OpenAI({ apiKey, ...(baseURL ? { baseURL } : {}) });
68
+
69
+ let response: OpenAI.Chat.ChatCompletion;
70
+ try {
71
+ response = await client.chat.completions.create({
72
+ model,
73
+ max_tokens: MAX_OUTPUT_TOKENS,
74
+ messages: [
75
+ { role: 'system', content: systemPrompt },
76
+ { role: 'user', content: `Please review the following:\n\n---\n\n${input.content}` },
77
+ ],
78
+ });
79
+ } catch (err) {
80
+ const message = err instanceof Error ? err.message : String(err);
81
+ const isRateLimit = /rate.limit|429/i.test(message);
82
+ const isAuth = /unauthorized|401|invalid.api.key/i.test(message);
83
+ throw new AutopilotError(`openai-compatible review call failed: ${message}`, {
84
+ code: isAuth ? 'auth' : isRateLimit ? 'rate_limit' : 'transient_network',
85
+ provider: 'openai-compatible',
86
+ retryable: isRateLimit,
87
+ });
88
+ }
89
+
90
+ const rawOutput = response.choices[0]?.message.content ?? '';
91
+ return {
92
+ findings: parseOutput(rawOutput),
93
+ rawOutput,
94
+ usage: response.usage
95
+ ? { input: response.usage.prompt_tokens, output: response.usage.completion_tokens }
96
+ : undefined,
97
+ };
98
+ },
99
+ };
100
+
101
+ export default openaiCompatibleAdapter;
102
+
103
+ function parseOutput(output: string): Finding[] {
104
+ const findings: Finding[] = [];
105
+ const regex = /### \[(CRITICAL|WARNING|NOTE)\]\s*(.+?)(?=\n### \[|## Review Summary|$)/gs;
106
+ let match: RegExpExecArray | null;
107
+ while ((match = regex.exec(output)) !== null) {
108
+ const severity = match[1]!.toLowerCase() as Finding['severity'];
109
+ const body = match[2]!.trim();
110
+ const titleEnd = body.indexOf('\n');
111
+ const title = (titleEnd > 0 ? body.slice(0, titleEnd) : body).trim();
112
+ const suggestion = body.match(/\*\*Suggestion:\*\*\s*(.+)/s)?.[1]?.trim();
113
+ findings.push({
114
+ id: `openai-compatible-${findings.length}`,
115
+ source: 'review-engine',
116
+ severity,
117
+ category: 'openai-compatible-review',
118
+ file: '<unspecified>',
119
+ message: title,
120
+ suggestion,
121
+ protectedPath: false,
122
+ createdAt: new Date().toISOString(),
123
+ });
124
+ }
125
+ return findings;
126
+ }
@@ -4,7 +4,7 @@ import type { Finding } from '../../core/findings/types.ts';
4
4
  export interface ReviewInput {
5
5
  content: string;
6
6
  kind: 'spec' | 'pr-diff' | 'file-batch';
7
- context?: { spec?: string; plan?: string; stack?: string };
7
+ context?: { spec?: string; plan?: string; stack?: string; cwd?: string };
8
8
  }
9
9
 
10
10
  export interface ReviewOutput {
package/src/cli/run.ts CHANGED
@@ -112,9 +112,10 @@ export async function runCommand(options: RunCommandOptions = {}): Promise<numbe
112
112
  let reviewEngine: ReviewEngine | undefined;
113
113
  if (config.reviewEngine) {
114
114
  const ref = typeof config.reviewEngine === 'string' ? config.reviewEngine : config.reviewEngine.adapter;
115
- const hasAnyKey = !!(process.env.ANTHROPIC_API_KEY || process.env.OPENAI_API_KEY);
116
- if (!hasAnyKey && (ref === 'auto' || ref === 'claude' || ref === 'codex')) {
117
- console.log(fmt('yellow', '\n [run] No LLM API key found — set ANTHROPIC_API_KEY or OPENAI_API_KEY to enable review'));
115
+ const hasAnyKey = !!(process.env.ANTHROPIC_API_KEY || process.env.GEMINI_API_KEY ||
116
+ process.env.GOOGLE_API_KEY || process.env.OPENAI_API_KEY || process.env.GROQ_API_KEY);
117
+ if (!hasAnyKey && ['auto', 'claude', 'gemini', 'codex', 'openai-compatible'].includes(ref)) {
118
+ console.log(fmt('yellow', '\n [run] No LLM API key found — set ANTHROPIC_API_KEY, GEMINI_API_KEY, OPENAI_API_KEY, or GROQ_API_KEY to enable review'));
118
119
  } else {
119
120
  try {
120
121
  reviewEngine = await loadAdapter<ReviewEngine>({
@@ -0,0 +1,74 @@
1
+ import * as fs from 'node:fs';
2
+ import * as path from 'node:path';
3
+
4
+ export type Provider = 'anthropic' | 'gemini' | 'openai' | 'groq';
5
+
6
+ const PROVIDER_PATTERNS: Record<Provider, RegExp> = {
7
+ anthropic: /ANTHROPIC_API_KEY|@anthropic-ai\/sdk|anthropic\.com|claude-[a-z0-9]/gi,
8
+ gemini: /GEMINI_API_KEY|GOOGLE_API_KEY|@google\/generative-ai|generativelanguage\.googleapis/gi,
9
+ openai: /OPENAI_API_KEY|openai\.com|gpt-[0-9]/gi,
10
+ groq: /GROQ_API_KEY|api\.groq\.com/gi,
11
+ };
12
+
13
+ const SOURCE_EXTENSIONS = new Set(['.ts', '.tsx', '.js', '.jsx', '.mjs', '.cjs', '.py', '.go', '.rb']);
14
+
15
+ const SKIP_DIRS = new Set(['node_modules', '.git', 'dist', 'build', '.next', '.nuxt', 'out',
16
+ 'coverage', '__pycache__', '.venv', 'venv', 'target', '.gradle', '.cache', '.turbo']);
17
+
18
+ function walkSync(dir: string, files: string[] = []): string[] {
19
+ let entries: fs.Dirent[];
20
+ try {
21
+ entries = fs.readdirSync(dir, { withFileTypes: true });
22
+ } catch {
23
+ return files;
24
+ }
25
+ for (const entry of entries) {
26
+ if (SKIP_DIRS.has(entry.name)) continue;
27
+ const full = path.join(dir, entry.name);
28
+ if (entry.isDirectory()) {
29
+ walkSync(full, files);
30
+ } else if (entry.isFile() && SOURCE_EXTENSIONS.has(path.extname(entry.name))) {
31
+ files.push(full);
32
+ }
33
+ }
34
+ return files;
35
+ }
36
+
37
+ export interface ProviderCounts {
38
+ anthropic: number;
39
+ gemini: number;
40
+ openai: number;
41
+ groq: number;
42
+ }
43
+
44
+ /**
45
+ * Scans source files under `cwd` and returns per-provider match counts.
46
+ * Counts are capped at 1 per file to avoid skewing on generated lock files.
47
+ */
48
+ export function detectProviderUsage(cwd: string): ProviderCounts {
49
+ const counts: ProviderCounts = { anthropic: 0, gemini: 0, openai: 0, groq: 0 };
50
+ const files = walkSync(cwd);
51
+ for (const file of files) {
52
+ let content: string;
53
+ try {
54
+ content = fs.readFileSync(file, 'utf8');
55
+ } catch {
56
+ continue;
57
+ }
58
+ for (const [provider, pattern] of Object.entries(PROVIDER_PATTERNS) as [Provider, RegExp][]) {
59
+ pattern.lastIndex = 0;
60
+ if (pattern.test(content)) counts[provider]++;
61
+ }
62
+ }
63
+ return counts;
64
+ }
65
+
66
+ /**
67
+ * Returns the provider with the highest usage count, or null if all zero.
68
+ */
69
+ export function dominantProvider(counts: ProviderCounts): Provider | null {
70
+ const entries = Object.entries(counts) as [Provider, number][];
71
+ const max = Math.max(...entries.map(([, v]) => v));
72
+ if (max === 0) return null;
73
+ return entries.find(([, v]) => v === max)![0];
74
+ }
@@ -49,7 +49,7 @@ export async function runReviewPhase(input: ReviewPhaseInput): Promise<ReviewPha
49
49
  const output = await input.engine.review({
50
50
  content: chunk.content,
51
51
  kind: chunk.kind,
52
- context: { stack: input.config.stack },
52
+ context: { stack: input.config.stack, cwd: input.cwd },
53
53
  });
54
54
  allFindings.push(...output.findings);
55
55
  if (output.usage) {