@delegance/claude-autopilot 1.4.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@delegance/claude-autopilot",
3
- "version": "1.4.0",
3
+ "version": "1.5.0",
4
4
  "type": "module",
5
5
  "description": "Claude Code automation pipeline: spec → plan → implement → validate → PR",
6
6
  "keywords": [
@@ -46,6 +46,7 @@
46
46
  },
47
47
  "dependencies": {
48
48
  "@anthropic-ai/sdk": "^0.90.0",
49
+ "@google/generative-ai": "^0.24.1",
49
50
  "ajv": "^8",
50
51
  "dotenv": ">=16",
51
52
  "js-yaml": "^4",
@@ -16,6 +16,8 @@ const BUILTIN_PATHS: Record<IntegrationPoint, Record<string, string>> = {
16
16
  'review-engine': {
17
17
  codex: './review-engine/codex.ts',
18
18
  claude: './review-engine/claude.ts',
19
+ gemini: './review-engine/gemini.ts',
20
+ 'openai-compatible': './review-engine/openai-compatible.ts',
19
21
  auto: './review-engine/auto.ts',
20
22
  },
21
23
  'vcs-host': { github: './vcs-host/github.ts' },
@@ -2,19 +2,42 @@ import type { Capabilities } from '../base.ts';
2
2
  import type { ReviewEngine, ReviewInput, ReviewOutput } from './types.ts';
3
3
  import { AutopilotError } from '../../core/errors.ts';
4
4
 
5
- // Priority order: ANTHROPIC_API_KEY claude, OPENAI_API_KEY → codex
5
+ // Priority order for key detection
6
6
  async function resolveAdapter(): Promise<ReviewEngine> {
7
7
  if (process.env.ANTHROPIC_API_KEY) {
8
8
  const { claudeAdapter } = await import('./claude.ts');
9
9
  return claudeAdapter;
10
10
  }
11
+ if (process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY) {
12
+ const { geminiAdapter } = await import('./gemini.ts');
13
+ return geminiAdapter;
14
+ }
11
15
  if (process.env.OPENAI_API_KEY) {
12
16
  const { codexAdapter } = await import('./codex.ts');
13
17
  return codexAdapter;
14
18
  }
19
+ if (process.env.GROQ_API_KEY) {
20
+ const { openaiCompatibleAdapter } = await import('./openai-compatible.ts');
21
+ // Wrap with Groq config injected into review() context
22
+ return {
23
+ ...openaiCompatibleAdapter,
24
+ name: 'auto',
25
+ review(input: ReviewInput) {
26
+ return openaiCompatibleAdapter.review({
27
+ ...input,
28
+ context: {
29
+ ...input.context,
30
+ model: 'llama-3.3-70b-versatile',
31
+ baseUrl: 'https://api.groq.com/openai/v1',
32
+ apiKeyEnv: 'GROQ_API_KEY',
33
+ } as typeof input.context,
34
+ });
35
+ },
36
+ };
37
+ }
15
38
  throw new AutopilotError(
16
- 'No LLM API key found set ANTHROPIC_API_KEY (recommended) or OPENAI_API_KEY to enable review',
17
- { code: 'auth', provider: 'auto' }
39
+ 'No LLM API key found. Set one of: ANTHROPIC_API_KEY, GEMINI_API_KEY, OPENAI_API_KEY, GROQ_API_KEY',
40
+ { code: 'auth', provider: 'auto' },
18
41
  );
19
42
  }
20
43
 
@@ -0,0 +1,131 @@
1
+ import { GoogleGenerativeAI } from '@google/generative-ai';
2
+ import type { Finding } from '../../core/findings/types.ts';
3
+ import { AutopilotError } from '../../core/errors.ts';
4
+ import type { Capabilities } from '../base.ts';
5
+ import type { ReviewEngine, ReviewInput, ReviewOutput } from './types.ts';
6
+
7
+ const DEFAULT_MODEL = 'gemini-2.5-pro-preview-05-06';
8
+ const MAX_OUTPUT_TOKENS = 4096;
9
+
10
+ // Cost per million tokens (USD) — gemini-2.5-pro pricing (<200k context)
11
+ const COST_PER_M_INPUT = 1.25;
12
+ const COST_PER_M_OUTPUT = 10.0;
13
+
14
+ const PROMPT_TEMPLATE = `You are a senior software architect reviewing code changes for quality, security, and correctness.
15
+
16
+ The codebase context:
17
+ {STACK}
18
+
19
+ Please review the following:
20
+
21
+ ---
22
+
23
+ {CONTENT}
24
+
25
+ ---
26
+
27
+ Provide structured feedback in exactly this format:
28
+
29
+ ## Review Summary
30
+ One paragraph overall assessment.
31
+
32
+ ## Findings
33
+
34
+ For each finding, use this format:
35
+ ### [CRITICAL|WARNING|NOTE] <short title>
36
+ <explanation>
37
+ **Suggestion:** <actionable fix>
38
+
39
+ Rules:
40
+ - CRITICAL: Blocks merge (security issues, data loss risks, broken contracts)
41
+ - WARNING: Should address before merging (logic errors, missing error handling, test gaps)
42
+ - NOTE: Improvement suggestion (style, performance, clarity)
43
+ - Maximum 10 findings, ranked by severity
44
+ - Be specific and constructive
45
+ - Reference the file and line when possible`;
46
+
47
+ export const geminiAdapter: ReviewEngine = {
48
+ name: 'gemini',
49
+ apiVersion: '1.0.0',
50
+
51
+ getCapabilities(): Capabilities {
52
+ return { structuredOutput: false, streaming: false, maxContextTokens: 1000000, inlineComments: false };
53
+ },
54
+
55
+ estimateTokens(content: string): number {
56
+ return Math.ceil(content.length / 4);
57
+ },
58
+
59
+ async review(input: ReviewInput): Promise<ReviewOutput> {
60
+ const apiKey = process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY;
61
+ if (!apiKey) {
62
+ throw new AutopilotError('GEMINI_API_KEY (or GOOGLE_API_KEY) not set', { code: 'auth', provider: 'gemini' });
63
+ }
64
+
65
+ const model = (input.context as Record<string, unknown> | undefined)?.['model'] as string | undefined ?? DEFAULT_MODEL;
66
+ const stack = input.context?.stack ?? 'A web application — stack details unspecified.';
67
+ const prompt = PROMPT_TEMPLATE.replace('{STACK}', stack).replace('{CONTENT}', input.content);
68
+
69
+ const genAI = new GoogleGenerativeAI(apiKey);
70
+ const genModel = genAI.getGenerativeModel({
71
+ model,
72
+ generationConfig: { maxOutputTokens: MAX_OUTPUT_TOKENS },
73
+ });
74
+
75
+ let result: Awaited<ReturnType<typeof genModel.generateContent>>;
76
+ try {
77
+ result = await genModel.generateContent(prompt);
78
+ } catch (err) {
79
+ const message = err instanceof Error ? err.message : String(err);
80
+ const isRateLimit = /rate.limit|429|quota/i.test(message);
81
+ const isAuth = /api.key|unauthorized|403/i.test(message);
82
+ throw new AutopilotError(`Gemini review call failed: ${message}`, {
83
+ code: isAuth ? 'auth' : isRateLimit ? 'rate_limit' : 'transient_network',
84
+ provider: 'gemini',
85
+ retryable: isRateLimit,
86
+ });
87
+ }
88
+
89
+ const rawOutput = result.response.text();
90
+ const usage = result.response.usageMetadata;
91
+ const costUSD = usage
92
+ ? (usage.promptTokenCount / 1_000_000) * COST_PER_M_INPUT +
93
+ (usage.candidatesTokenCount / 1_000_000) * COST_PER_M_OUTPUT
94
+ : undefined;
95
+
96
+ return {
97
+ findings: parseGeminiOutput(rawOutput),
98
+ rawOutput,
99
+ usage: usage
100
+ ? { input: usage.promptTokenCount, output: usage.candidatesTokenCount, costUSD }
101
+ : undefined,
102
+ };
103
+ },
104
+ };
105
+
106
+ export default geminiAdapter;
107
+
108
+ function parseGeminiOutput(output: string): Finding[] {
109
+ const findings: Finding[] = [];
110
+ const regex = /### \[(CRITICAL|WARNING|NOTE)\]\s*(.+?)(?=\n### \[|## Review Summary|$)/gs;
111
+ let match: RegExpExecArray | null;
112
+ while ((match = regex.exec(output)) !== null) {
113
+ const severity = match[1]!.toLowerCase() as Finding['severity'];
114
+ const body = match[2]!.trim();
115
+ const titleEnd = body.indexOf('\n');
116
+ const title = (titleEnd > 0 ? body.slice(0, titleEnd) : body).trim();
117
+ const suggestion = body.match(/\*\*Suggestion:\*\*\s*(.+)/s)?.[1]?.trim();
118
+ findings.push({
119
+ id: `gemini-${findings.length}`,
120
+ source: 'review-engine',
121
+ severity,
122
+ category: 'gemini-review',
123
+ file: '<unspecified>',
124
+ message: title,
125
+ suggestion,
126
+ protectedPath: false,
127
+ createdAt: new Date().toISOString(),
128
+ });
129
+ }
130
+ return findings;
131
+ }
@@ -0,0 +1,126 @@
1
+ import OpenAI from 'openai';
2
+ import type { Finding } from '../../core/findings/types.ts';
3
+ import { AutopilotError } from '../../core/errors.ts';
4
+ import type { Capabilities } from '../base.ts';
5
+ import type { ReviewEngine, ReviewInput, ReviewOutput } from './types.ts';
6
+
7
+ const MAX_OUTPUT_TOKENS = 4096;
8
+
9
+ const SYSTEM_PROMPT_TEMPLATE = `You are a senior software architect reviewing code changes for quality, security, and correctness.
10
+
11
+ The codebase context:
12
+ {STACK}
13
+
14
+ Provide structured feedback in exactly this format:
15
+
16
+ ## Review Summary
17
+ One paragraph overall assessment.
18
+
19
+ ## Findings
20
+
21
+ For each finding, use this format:
22
+ ### [CRITICAL|WARNING|NOTE] <short title>
23
+ <explanation>
24
+ **Suggestion:** <actionable fix>
25
+
26
+ Rules:
27
+ - CRITICAL: Blocks merge (security issues, data loss risks, broken contracts)
28
+ - WARNING: Should address before merging (logic errors, missing error handling, test gaps)
29
+ - NOTE: Improvement suggestion (style, performance, clarity)
30
+ - Maximum 10 findings, ranked by severity
31
+ - Be specific and constructive
32
+ - Reference the file and line when possible`;
33
+
34
+ export const openaiCompatibleAdapter: ReviewEngine = {
35
+ name: 'openai-compatible',
36
+ apiVersion: '1.0.0',
37
+
38
+ getCapabilities(): Capabilities {
39
+ return { structuredOutput: false, streaming: false, maxContextTokens: 128000, inlineComments: false };
40
+ },
41
+
42
+ estimateTokens(content: string): number {
43
+ return Math.ceil(content.length / 4);
44
+ },
45
+
46
+ async review(input: ReviewInput): Promise<ReviewOutput> {
47
+ const opts = (input.context as Record<string, unknown> | undefined) ?? {};
48
+
49
+ // API key: options.apiKey → named env var → OPENAI_API_KEY
50
+ const apiKeyEnv = (opts['apiKeyEnv'] as string | undefined) ?? 'OPENAI_API_KEY';
51
+ const apiKey = (opts['apiKey'] as string | undefined) ?? process.env[apiKeyEnv] ?? 'ollama';
52
+
53
+ const baseURL = (opts['baseUrl'] as string | undefined) ??
54
+ process.env.OPENAI_BASE_URL ??
55
+ undefined;
56
+
57
+ const model = opts['model'] as string | undefined;
58
+ if (!model) {
59
+ throw new AutopilotError(
60
+ 'openai-compatible adapter requires options.model to be set in autopilot.config.yaml',
61
+ { code: 'invalid_config', provider: 'openai-compatible' },
62
+ );
63
+ }
64
+
65
+ const stack = input.context?.stack ?? 'A web application — stack details unspecified.';
66
+ const systemPrompt = SYSTEM_PROMPT_TEMPLATE.replace('{STACK}', stack);
67
+ const client = new OpenAI({ apiKey, ...(baseURL ? { baseURL } : {}) });
68
+
69
+ let response: OpenAI.Chat.ChatCompletion;
70
+ try {
71
+ response = await client.chat.completions.create({
72
+ model,
73
+ max_tokens: MAX_OUTPUT_TOKENS,
74
+ messages: [
75
+ { role: 'system', content: systemPrompt },
76
+ { role: 'user', content: `Please review the following:\n\n---\n\n${input.content}` },
77
+ ],
78
+ });
79
+ } catch (err) {
80
+ const message = err instanceof Error ? err.message : String(err);
81
+ const isRateLimit = /rate.limit|429/i.test(message);
82
+ const isAuth = /unauthorized|401|invalid.api.key/i.test(message);
83
+ throw new AutopilotError(`openai-compatible review call failed: ${message}`, {
84
+ code: isAuth ? 'auth' : isRateLimit ? 'rate_limit' : 'transient_network',
85
+ provider: 'openai-compatible',
86
+ retryable: isRateLimit,
87
+ });
88
+ }
89
+
90
+ const rawOutput = response.choices[0]?.message.content ?? '';
91
+ return {
92
+ findings: parseOutput(rawOutput),
93
+ rawOutput,
94
+ usage: response.usage
95
+ ? { input: response.usage.prompt_tokens, output: response.usage.completion_tokens }
96
+ : undefined,
97
+ };
98
+ },
99
+ };
100
+
101
+ export default openaiCompatibleAdapter;
102
+
103
+ function parseOutput(output: string): Finding[] {
104
+ const findings: Finding[] = [];
105
+ const regex = /### \[(CRITICAL|WARNING|NOTE)\]\s*(.+?)(?=\n### \[|## Review Summary|$)/gs;
106
+ let match: RegExpExecArray | null;
107
+ while ((match = regex.exec(output)) !== null) {
108
+ const severity = match[1]!.toLowerCase() as Finding['severity'];
109
+ const body = match[2]!.trim();
110
+ const titleEnd = body.indexOf('\n');
111
+ const title = (titleEnd > 0 ? body.slice(0, titleEnd) : body).trim();
112
+ const suggestion = body.match(/\*\*Suggestion:\*\*\s*(.+)/s)?.[1]?.trim();
113
+ findings.push({
114
+ id: `openai-compatible-${findings.length}`,
115
+ source: 'review-engine',
116
+ severity,
117
+ category: 'openai-compatible-review',
118
+ file: '<unspecified>',
119
+ message: title,
120
+ suggestion,
121
+ protectedPath: false,
122
+ createdAt: new Date().toISOString(),
123
+ });
124
+ }
125
+ return findings;
126
+ }
package/src/cli/run.ts CHANGED
@@ -112,9 +112,10 @@ export async function runCommand(options: RunCommandOptions = {}): Promise<numbe
112
112
  let reviewEngine: ReviewEngine | undefined;
113
113
  if (config.reviewEngine) {
114
114
  const ref = typeof config.reviewEngine === 'string' ? config.reviewEngine : config.reviewEngine.adapter;
115
- const hasAnyKey = !!(process.env.ANTHROPIC_API_KEY || process.env.OPENAI_API_KEY);
116
- if (!hasAnyKey && (ref === 'auto' || ref === 'claude' || ref === 'codex')) {
117
- console.log(fmt('yellow', '\n [run] No LLM API key found — set ANTHROPIC_API_KEY or OPENAI_API_KEY to enable review'));
115
+ const hasAnyKey = !!(process.env.ANTHROPIC_API_KEY || process.env.GEMINI_API_KEY ||
116
+ process.env.GOOGLE_API_KEY || process.env.OPENAI_API_KEY || process.env.GROQ_API_KEY);
117
+ if (!hasAnyKey && ['auto', 'claude', 'gemini', 'codex', 'openai-compatible'].includes(ref)) {
118
+ console.log(fmt('yellow', '\n [run] No LLM API key found — set ANTHROPIC_API_KEY, GEMINI_API_KEY, OPENAI_API_KEY, or GROQ_API_KEY to enable review'));
118
119
  } else {
119
120
  try {
120
121
  reviewEngine = await loadAdapter<ReviewEngine>({