@elvatis_com/openclaw-cli-bridge-elvatis 2.7.3 → 2.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -1
- package/SKILL.md +1 -1
- package/openclaw.plugin.json +5 -3
- package/package.json +2 -1
- package/src/config.ts +4 -1
- package/src/gemini-api-runner.ts +470 -0
- package/src/proxy-server.ts +96 -0
- package/test/config.test.ts +4 -1
- package/test/gemini-api-proxy.test.ts +181 -0
- package/test/gemini-api-runner.test.ts +134 -0
package/README.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
> OpenClaw plugin that bridges locally installed AI CLIs (Codex, Gemini, Claude Code, OpenCode, Pi) as model providers — with slash commands for instant model switching, restore, health testing, and model listing.
|
|
4
4
|
|
|
5
|
-
**Current version:** `2.
|
|
5
|
+
**Current version:** `2.8.1`
|
|
6
6
|
|
|
7
7
|
---
|
|
8
8
|
|
|
@@ -406,6 +406,17 @@ npm run ci # lint + typecheck + test
|
|
|
406
406
|
|
|
407
407
|
## Changelog
|
|
408
408
|
|
|
409
|
+
### v2.8.1
|
|
410
|
+
- **fix:** increase Sonnet-4-6 base timeout from 180s to 300s to prevent premature SIGTERM kills causing FailoverError fallback to gpt-5.2-codex
|
|
411
|
+
|
|
412
|
+
### v2.8.0
|
|
413
|
+
- **feat:** Gemini API provider (`gemini-api/gemini-2.5-flash`, `gemini-api/gemini-2.5-pro`) — direct Google Generative AI SDK integration with native **image generation** support via `responseModalities: ["TEXT", "IMAGE"]`. No CLI subprocess overhead, no browser needed.
|
|
414
|
+
- **feat:** Images returned as base64 data URIs in OpenAI-compatible `content_parts` format — works with OpenClaw multimodal rendering
|
|
415
|
+
- **feat:** Native Gemini tool calling — converts OpenAI tool format to Gemini `functionDeclarations`, parses `functionCall` responses back to `tool_calls`
|
|
416
|
+
- **feat:** Real token usage from Gemini API (no estimation needed)
|
|
417
|
+
- **config:** API key via `GOOGLE_API_KEY` env var or `~/.openclaw/.env`
|
|
418
|
+
- **test:** 17 new tests — message conversion, tool conversion, proxy routing, streaming (278 total)
|
|
419
|
+
|
|
409
420
|
### v2.7.3
|
|
410
421
|
- **fix:** Gemini image generation timeouts — Gemini Pro models bumped from 180s → 300s base timeout, Flash models from 90s → 180s. Image generation needs significantly more time than text completion.
|
|
411
422
|
- **tune:** Per-tool timeout bonus increased from 5s → 7s per tool definition (21 tools = 147s instead of 105s)
|
package/SKILL.md
CHANGED
package/openclaw.plugin.json
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
"id": "openclaw-cli-bridge-elvatis",
|
|
3
3
|
"slug": "openclaw-cli-bridge-elvatis",
|
|
4
4
|
"name": "OpenClaw CLI Bridge",
|
|
5
|
-
"version": "2.
|
|
5
|
+
"version": "2.8.1",
|
|
6
6
|
"license": "MIT",
|
|
7
7
|
"description": "Phase 1: openai-codex auth bridge. Phase 2: local HTTP proxy routing model calls through gemini/claude CLIs (vllm provider).",
|
|
8
8
|
"providers": [
|
|
@@ -44,7 +44,7 @@
|
|
|
44
44
|
},
|
|
45
45
|
"default": {
|
|
46
46
|
"cli-claude/claude-opus-4-6": 300000,
|
|
47
|
-
"cli-claude/claude-sonnet-4-6":
|
|
47
|
+
"cli-claude/claude-sonnet-4-6": 300000,
|
|
48
48
|
"cli-claude/claude-haiku-4-5": 90000,
|
|
49
49
|
"cli-gemini/gemini-2.5-pro": 300000,
|
|
50
50
|
"cli-gemini/gemini-2.5-flash": 180000,
|
|
@@ -52,7 +52,9 @@
|
|
|
52
52
|
"cli-gemini/gemini-3-flash-preview": 180000,
|
|
53
53
|
"openai-codex/gpt-5.4": 300000,
|
|
54
54
|
"openai-codex/gpt-5.3-codex": 180000,
|
|
55
|
-
"openai-codex/gpt-5.1-codex-mini": 90000
|
|
55
|
+
"openai-codex/gpt-5.1-codex-mini": 90000,
|
|
56
|
+
"gemini-api/gemini-2.5-pro": 300000,
|
|
57
|
+
"gemini-api/gemini-2.5-flash": 180000
|
|
56
58
|
}
|
|
57
59
|
}
|
|
58
60
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@elvatis_com/openclaw-cli-bridge-elvatis",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.8.1",
|
|
4
4
|
"description": "Bridges gemini, claude, and codex CLI tools as OpenClaw model providers. Reads existing CLI auth without re-login.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"openclaw": {
|
|
@@ -24,6 +24,7 @@
|
|
|
24
24
|
"vitest": "^4.0.18"
|
|
25
25
|
},
|
|
26
26
|
"dependencies": {
|
|
27
|
+
"@google/genai": "^1.49.0",
|
|
27
28
|
"playwright": "^1.58.2"
|
|
28
29
|
},
|
|
29
30
|
"license": "Apache-2.0"
|
package/src/config.ts
CHANGED
|
@@ -92,7 +92,7 @@ export const PROVIDER_SESSION_SWEEP_MS = 10 * 60 * 1_000; // 10 min
|
|
|
92
92
|
*/
|
|
93
93
|
export const DEFAULT_MODEL_TIMEOUTS: Record<string, number> = {
|
|
94
94
|
"cli-claude/claude-opus-4-6": 300_000, // 5 min
|
|
95
|
-
"cli-claude/claude-sonnet-4-6":
|
|
95
|
+
"cli-claude/claude-sonnet-4-6": 300_000, // 5 min — match idleTimeoutSeconds
|
|
96
96
|
"cli-claude/claude-haiku-4-5": 90_000, // 90s
|
|
97
97
|
"cli-gemini/gemini-2.5-pro": 300_000, // 5 min — image generation needs more time
|
|
98
98
|
"cli-gemini/gemini-2.5-flash": 180_000, // 3 min
|
|
@@ -101,6 +101,8 @@ export const DEFAULT_MODEL_TIMEOUTS: Record<string, number> = {
|
|
|
101
101
|
"openai-codex/gpt-5.4": 300_000,
|
|
102
102
|
"openai-codex/gpt-5.3-codex": 180_000,
|
|
103
103
|
"openai-codex/gpt-5.1-codex-mini": 90_000,
|
|
104
|
+
"gemini-api/gemini-2.5-pro": 300_000, // 5 min — image generation needs time
|
|
105
|
+
"gemini-api/gemini-2.5-flash": 180_000, // 3 min
|
|
104
106
|
};
|
|
105
107
|
|
|
106
108
|
// ──────────────────────────────────────────────────────────────────────────────
|
|
@@ -116,6 +118,7 @@ export const DEFAULT_MODEL_FALLBACKS: Record<string, string> = {
|
|
|
116
118
|
"cli-gemini/gemini-3-pro-preview": "cli-gemini/gemini-3-flash-preview",
|
|
117
119
|
"cli-claude/claude-opus-4-6": "cli-claude/claude-sonnet-4-6",
|
|
118
120
|
"cli-claude/claude-sonnet-4-6": "cli-claude/claude-haiku-4-5",
|
|
121
|
+
"gemini-api/gemini-2.5-pro": "gemini-api/gemini-2.5-flash",
|
|
119
122
|
};
|
|
120
123
|
|
|
121
124
|
// ──────────────────────────────────────────────────────────────────────────────
|
|
@@ -0,0 +1,470 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* gemini-api-runner.ts
|
|
3
|
+
*
|
|
4
|
+
* Direct Gemini API integration via @google/genai SDK.
|
|
5
|
+
* Supports native text + image generation (responseModalities: ["TEXT", "IMAGE"]).
|
|
6
|
+
*
|
|
7
|
+
* Unlike CLI runners, this calls the Gemini API directly — no subprocess overhead.
|
|
8
|
+
* Images are returned as base64 data URIs in OpenAI-compatible content_parts format.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { GoogleGenAI } from "@google/genai";
|
|
12
|
+
import { readFileSync } from "node:fs";
|
|
13
|
+
import { join } from "node:path";
|
|
14
|
+
import { homedir } from "node:os";
|
|
15
|
+
import type { ToolDefinition, ToolCall } from "./tool-protocol.js";
|
|
16
|
+
import { generateCallId } from "./tool-protocol.js";
|
|
17
|
+
import type { ChatMessage } from "./cli-runner.js";
|
|
18
|
+
|
|
19
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
20
|
+
// Types
|
|
21
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
22
|
+
|
|
23
|
+
export interface ContentPart {
|
|
24
|
+
type: "text" | "image_url";
|
|
25
|
+
text?: string;
|
|
26
|
+
image_url?: { url: string };
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
export interface GeminiApiResult {
|
|
30
|
+
/** String for text-only, array for multimodal (text + images) */
|
|
31
|
+
content: string | ContentPart[];
|
|
32
|
+
finishReason: string;
|
|
33
|
+
promptTokens?: number;
|
|
34
|
+
completionTokens?: number;
|
|
35
|
+
tool_calls?: ToolCall[];
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
39
|
+
// API Key resolution
|
|
40
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
41
|
+
|
|
42
|
+
let cachedApiKey: string | null = null;
|
|
43
|
+
|
|
44
|
+
export function getApiKey(): string {
|
|
45
|
+
if (cachedApiKey) return cachedApiKey;
|
|
46
|
+
|
|
47
|
+
// 1. Environment variable
|
|
48
|
+
if (process.env.GOOGLE_API_KEY) {
|
|
49
|
+
cachedApiKey = process.env.GOOGLE_API_KEY;
|
|
50
|
+
return cachedApiKey;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// 2. Read from OpenClaw .env file
|
|
54
|
+
const envPath = join(homedir(), ".openclaw", ".env");
|
|
55
|
+
try {
|
|
56
|
+
const envContent = readFileSync(envPath, "utf-8");
|
|
57
|
+
const match = envContent.match(/^GOOGLE_API_KEY=(.+)$/m);
|
|
58
|
+
if (match?.[1]) {
|
|
59
|
+
cachedApiKey = match[1].trim();
|
|
60
|
+
return cachedApiKey;
|
|
61
|
+
}
|
|
62
|
+
} catch {
|
|
63
|
+
// File not found — fall through
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
throw new Error(
|
|
67
|
+
"GOOGLE_API_KEY not found. Set it as an environment variable or add it to ~/.openclaw/.env"
|
|
68
|
+
);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/** Reset cached key (for testing). */
|
|
72
|
+
export function _resetApiKeyCache(): void {
|
|
73
|
+
cachedApiKey = null;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
77
|
+
// Singleton client
|
|
78
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
79
|
+
|
|
80
|
+
let client: GoogleGenAI | null = null;
|
|
81
|
+
|
|
82
|
+
function getClient(): GoogleGenAI {
|
|
83
|
+
if (!client) {
|
|
84
|
+
client = new GoogleGenAI({ apiKey: getApiKey() });
|
|
85
|
+
}
|
|
86
|
+
return client;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/** Reset client (for testing). */
|
|
90
|
+
export function _resetClient(): void {
|
|
91
|
+
client = null;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
95
|
+
// Message conversion: OpenAI → Gemini
|
|
96
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
97
|
+
|
|
98
|
+
interface GeminiContent {
|
|
99
|
+
role: "user" | "model";
|
|
100
|
+
parts: GeminiPart[];
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
type GeminiPart =
|
|
104
|
+
| { text: string }
|
|
105
|
+
| { inlineData: { mimeType: string; data: string } }
|
|
106
|
+
| { functionCall: { name: string; args: Record<string, unknown> } }
|
|
107
|
+
| { functionResponse: { name: string; response: Record<string, unknown> } };
|
|
108
|
+
|
|
109
|
+
/**
|
|
110
|
+
* Convert OpenAI-format messages to Gemini API format.
|
|
111
|
+
* System messages → systemInstruction. Tool results → functionResponse parts.
|
|
112
|
+
*/
|
|
113
|
+
export function convertMessages(messages: ChatMessage[]): {
|
|
114
|
+
systemInstruction?: { parts: Array<{ text: string }> };
|
|
115
|
+
contents: GeminiContent[];
|
|
116
|
+
} {
|
|
117
|
+
const systemParts: Array<{ text: string }> = [];
|
|
118
|
+
const contents: GeminiContent[] = [];
|
|
119
|
+
|
|
120
|
+
for (const msg of messages) {
|
|
121
|
+
const role = msg.role;
|
|
122
|
+
|
|
123
|
+
if (role === "system") {
|
|
124
|
+
const text = typeof msg.content === "string"
|
|
125
|
+
? msg.content
|
|
126
|
+
: Array.isArray(msg.content)
|
|
127
|
+
? msg.content.map((p: Record<string, unknown>) => String(p.text ?? "")).join("\n")
|
|
128
|
+
: "";
|
|
129
|
+
if (text) systemParts.push({ text });
|
|
130
|
+
continue;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
if (role === "tool") {
|
|
134
|
+
// Tool result → functionResponse
|
|
135
|
+
const toolMsg = msg as ChatMessage & { tool_call_id?: string; name?: string };
|
|
136
|
+
const name = toolMsg.name ?? toolMsg.tool_call_id ?? "unknown";
|
|
137
|
+
const responseText = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
138
|
+
let responseObj: Record<string, unknown>;
|
|
139
|
+
try {
|
|
140
|
+
responseObj = JSON.parse(responseText);
|
|
141
|
+
if (typeof responseObj !== "object" || responseObj === null) {
|
|
142
|
+
responseObj = { result: responseText };
|
|
143
|
+
}
|
|
144
|
+
} catch {
|
|
145
|
+
responseObj = { result: responseText };
|
|
146
|
+
}
|
|
147
|
+
contents.push({
|
|
148
|
+
role: "user",
|
|
149
|
+
parts: [{ functionResponse: { name, response: responseObj } }],
|
|
150
|
+
});
|
|
151
|
+
continue;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
// assistant → model, user → user
|
|
155
|
+
const geminiRole: "user" | "model" = role === "assistant" ? "model" : "user";
|
|
156
|
+
const parts: GeminiPart[] = [];
|
|
157
|
+
|
|
158
|
+
// Handle tool_calls in assistant messages
|
|
159
|
+
const assistantMsg = msg as ChatMessage & { tool_calls?: Array<{ function: { name: string; arguments: string } }> };
|
|
160
|
+
if (assistantMsg.tool_calls?.length) {
|
|
161
|
+
for (const tc of assistantMsg.tool_calls) {
|
|
162
|
+
let args: Record<string, unknown> = {};
|
|
163
|
+
try { args = JSON.parse(tc.function.arguments); } catch { /* empty */ }
|
|
164
|
+
parts.push({ functionCall: { name: tc.function.name, args } });
|
|
165
|
+
}
|
|
166
|
+
if (typeof msg.content === "string" && msg.content) {
|
|
167
|
+
parts.unshift({ text: msg.content });
|
|
168
|
+
}
|
|
169
|
+
contents.push({ role: geminiRole, parts });
|
|
170
|
+
continue;
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
// Regular content
|
|
174
|
+
if (typeof msg.content === "string") {
|
|
175
|
+
if (msg.content) parts.push({ text: msg.content });
|
|
176
|
+
} else if (Array.isArray(msg.content)) {
|
|
177
|
+
for (const part of msg.content as Array<Record<string, unknown>>) {
|
|
178
|
+
if (part.type === "text" && typeof part.text === "string") {
|
|
179
|
+
parts.push({ text: part.text });
|
|
180
|
+
} else if (part.type === "image_url") {
|
|
181
|
+
const imageUrl = part.image_url as { url?: string } | undefined;
|
|
182
|
+
const url = imageUrl?.url ?? "";
|
|
183
|
+
const match = url.match(/^data:(image\/[\w+]+);base64,(.+)$/);
|
|
184
|
+
if (match) {
|
|
185
|
+
parts.push({ inlineData: { mimeType: match[1], data: match[2] } });
|
|
186
|
+
} else if (url) {
|
|
187
|
+
parts.push({ text: `[Image: ${url}]` });
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
if (parts.length > 0) {
|
|
194
|
+
contents.push({ role: geminiRole, parts });
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
return {
|
|
199
|
+
systemInstruction: systemParts.length > 0 ? { parts: systemParts } : undefined,
|
|
200
|
+
contents,
|
|
201
|
+
};
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
205
|
+
// Tool conversion: OpenAI → Gemini
|
|
206
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
207
|
+
|
|
208
|
+
export function convertTools(tools: ToolDefinition[]): Array<{
|
|
209
|
+
functionDeclarations: Array<{
|
|
210
|
+
name: string;
|
|
211
|
+
description: string;
|
|
212
|
+
parameters: Record<string, unknown>;
|
|
213
|
+
}>;
|
|
214
|
+
}> {
|
|
215
|
+
return [
|
|
216
|
+
{
|
|
217
|
+
functionDeclarations: tools.map((t) => ({
|
|
218
|
+
name: t.function.name,
|
|
219
|
+
description: t.function.description,
|
|
220
|
+
parameters: t.function.parameters,
|
|
221
|
+
})),
|
|
222
|
+
},
|
|
223
|
+
];
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
227
|
+
// Response parsing
|
|
228
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
229
|
+
|
|
230
|
+
function parseResponseParts(
|
|
231
|
+
parts: Array<Record<string, unknown>> | undefined
|
|
232
|
+
): { content: string | ContentPart[]; tool_calls?: ToolCall[] } {
|
|
233
|
+
if (!parts?.length) return { content: "" };
|
|
234
|
+
|
|
235
|
+
const textParts: string[] = [];
|
|
236
|
+
const imageParts: ContentPart[] = [];
|
|
237
|
+
const toolCalls: ToolCall[] = [];
|
|
238
|
+
|
|
239
|
+
for (const part of parts) {
|
|
240
|
+
if (typeof part.text === "string") {
|
|
241
|
+
textParts.push(part.text);
|
|
242
|
+
}
|
|
243
|
+
if (part.inlineData) {
|
|
244
|
+
const data = part.inlineData as { mimeType: string; data: string };
|
|
245
|
+
imageParts.push({
|
|
246
|
+
type: "image_url",
|
|
247
|
+
image_url: { url: `data:${data.mimeType};base64,${data.data}` },
|
|
248
|
+
});
|
|
249
|
+
}
|
|
250
|
+
if (part.functionCall) {
|
|
251
|
+
const fc = part.functionCall as { name: string; args: Record<string, unknown> };
|
|
252
|
+
toolCalls.push({
|
|
253
|
+
id: generateCallId(),
|
|
254
|
+
type: "function",
|
|
255
|
+
function: {
|
|
256
|
+
name: fc.name,
|
|
257
|
+
arguments: JSON.stringify(fc.args ?? {}),
|
|
258
|
+
},
|
|
259
|
+
});
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
if (toolCalls.length > 0) {
|
|
264
|
+
return { content: textParts.join("") || null as unknown as string, tool_calls: toolCalls };
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
// Multimodal: text + images → content_parts array
|
|
268
|
+
if (imageParts.length > 0) {
|
|
269
|
+
const contentParts: ContentPart[] = [];
|
|
270
|
+
const joinedText = textParts.join("");
|
|
271
|
+
if (joinedText) contentParts.push({ type: "text", text: joinedText });
|
|
272
|
+
contentParts.push(...imageParts);
|
|
273
|
+
return { content: contentParts };
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
// Text-only
|
|
277
|
+
return { content: textParts.join("") };
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
281
|
+
// Non-streaming completion
|
|
282
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
283
|
+
|
|
284
|
+
export interface GeminiApiOptions {
|
|
285
|
+
model: string;
|
|
286
|
+
timeoutMs?: number;
|
|
287
|
+
tools?: ToolDefinition[];
|
|
288
|
+
log?: (msg: string) => void;
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
export async function geminiApiComplete(
|
|
292
|
+
messages: ChatMessage[],
|
|
293
|
+
opts: GeminiApiOptions
|
|
294
|
+
): Promise<GeminiApiResult> {
|
|
295
|
+
const ai = getClient();
|
|
296
|
+
const modelId = stripPrefix(opts.model);
|
|
297
|
+
const { systemInstruction, contents } = convertMessages(messages);
|
|
298
|
+
|
|
299
|
+
const config: Record<string, unknown> = {};
|
|
300
|
+
|
|
301
|
+
// Enable image generation for models that support it
|
|
302
|
+
if (!opts.tools?.length) {
|
|
303
|
+
config.responseModalities = ["TEXT", "IMAGE"];
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
const requestOpts: Record<string, unknown> = {
|
|
307
|
+
model: modelId,
|
|
308
|
+
contents,
|
|
309
|
+
config: {
|
|
310
|
+
...config,
|
|
311
|
+
...(systemInstruction ? { systemInstruction } : {}),
|
|
312
|
+
...(opts.tools?.length ? { tools: convertTools(opts.tools) } : {}),
|
|
313
|
+
},
|
|
314
|
+
};
|
|
315
|
+
|
|
316
|
+
opts.log?.(`[gemini-api] ${modelId} · non-streaming · tools=${opts.tools?.length ?? 0}`);
|
|
317
|
+
|
|
318
|
+
const controller = new AbortController();
|
|
319
|
+
const timer = opts.timeoutMs
|
|
320
|
+
? setTimeout(() => controller.abort(), opts.timeoutMs)
|
|
321
|
+
: null;
|
|
322
|
+
|
|
323
|
+
try {
|
|
324
|
+
const response = await ai.models.generateContent(requestOpts as Parameters<typeof ai.models.generateContent>[0]);
|
|
325
|
+
|
|
326
|
+
const candidate = response.candidates?.[0];
|
|
327
|
+
const parts = candidate?.content?.parts as Array<Record<string, unknown>> | undefined;
|
|
328
|
+
const parsed = parseResponseParts(parts);
|
|
329
|
+
|
|
330
|
+
const finishReason = mapFinishReason(candidate?.finishReason as string);
|
|
331
|
+
|
|
332
|
+
return {
|
|
333
|
+
content: parsed.content,
|
|
334
|
+
finishReason,
|
|
335
|
+
promptTokens: response.usageMetadata?.promptTokenCount,
|
|
336
|
+
completionTokens: response.usageMetadata?.candidatesTokenCount,
|
|
337
|
+
tool_calls: parsed.tool_calls,
|
|
338
|
+
};
|
|
339
|
+
} finally {
|
|
340
|
+
if (timer) clearTimeout(timer);
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
345
|
+
// Streaming completion
|
|
346
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
347
|
+
|
|
348
|
+
export async function geminiApiCompleteStream(
|
|
349
|
+
messages: ChatMessage[],
|
|
350
|
+
opts: GeminiApiOptions,
|
|
351
|
+
onToken: (token: string) => void
|
|
352
|
+
): Promise<GeminiApiResult> {
|
|
353
|
+
const ai = getClient();
|
|
354
|
+
const modelId = stripPrefix(opts.model);
|
|
355
|
+
const { systemInstruction, contents } = convertMessages(messages);
|
|
356
|
+
|
|
357
|
+
const config: Record<string, unknown> = {};
|
|
358
|
+
|
|
359
|
+
// Image generation not supported in streaming — use text only
|
|
360
|
+
// (Gemini streams text tokens but images arrive as complete blobs)
|
|
361
|
+
|
|
362
|
+
const requestOpts: Record<string, unknown> = {
|
|
363
|
+
model: modelId,
|
|
364
|
+
contents,
|
|
365
|
+
config: {
|
|
366
|
+
...config,
|
|
367
|
+
...(systemInstruction ? { systemInstruction } : {}),
|
|
368
|
+
...(opts.tools?.length ? { tools: convertTools(opts.tools) } : {}),
|
|
369
|
+
},
|
|
370
|
+
};
|
|
371
|
+
|
|
372
|
+
opts.log?.(`[gemini-api] ${modelId} · streaming · tools=${opts.tools?.length ?? 0}`);
|
|
373
|
+
|
|
374
|
+
const controller = new AbortController();
|
|
375
|
+
const timer = opts.timeoutMs
|
|
376
|
+
? setTimeout(() => controller.abort(), opts.timeoutMs)
|
|
377
|
+
: null;
|
|
378
|
+
|
|
379
|
+
try {
|
|
380
|
+
const stream = await ai.models.generateContentStream(requestOpts as Parameters<typeof ai.models.generateContentStream>[0]);
|
|
381
|
+
|
|
382
|
+
let fullText = "";
|
|
383
|
+
const allImageParts: ContentPart[] = [];
|
|
384
|
+
const allToolCalls: ToolCall[] = [];
|
|
385
|
+
let finishReason = "stop";
|
|
386
|
+
let promptTokens: number | undefined;
|
|
387
|
+
let completionTokens: number | undefined;
|
|
388
|
+
|
|
389
|
+
for await (const chunk of stream) {
|
|
390
|
+
const candidate = chunk.candidates?.[0];
|
|
391
|
+
const parts = candidate?.content?.parts as Array<Record<string, unknown>> | undefined;
|
|
392
|
+
|
|
393
|
+
if (parts) {
|
|
394
|
+
for (const part of parts) {
|
|
395
|
+
if (typeof part.text === "string") {
|
|
396
|
+
fullText += part.text;
|
|
397
|
+
onToken(part.text);
|
|
398
|
+
}
|
|
399
|
+
if (part.inlineData) {
|
|
400
|
+
const data = part.inlineData as { mimeType: string; data: string };
|
|
401
|
+
allImageParts.push({
|
|
402
|
+
type: "image_url",
|
|
403
|
+
image_url: { url: `data:${data.mimeType};base64,${data.data}` },
|
|
404
|
+
});
|
|
405
|
+
}
|
|
406
|
+
if (part.functionCall) {
|
|
407
|
+
const fc = part.functionCall as { name: string; args: Record<string, unknown> };
|
|
408
|
+
allToolCalls.push({
|
|
409
|
+
id: generateCallId(),
|
|
410
|
+
type: "function",
|
|
411
|
+
function: {
|
|
412
|
+
name: fc.name,
|
|
413
|
+
arguments: JSON.stringify(fc.args ?? {}),
|
|
414
|
+
},
|
|
415
|
+
});
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
if (candidate?.finishReason) {
|
|
421
|
+
finishReason = mapFinishReason(candidate.finishReason as string);
|
|
422
|
+
}
|
|
423
|
+
if (chunk.usageMetadata) {
|
|
424
|
+
promptTokens = chunk.usageMetadata.promptTokenCount;
|
|
425
|
+
completionTokens = chunk.usageMetadata.candidatesTokenCount;
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
let content: string | ContentPart[];
|
|
430
|
+
if (allImageParts.length > 0) {
|
|
431
|
+
const parts: ContentPart[] = [];
|
|
432
|
+
if (fullText) parts.push({ type: "text", text: fullText });
|
|
433
|
+
parts.push(...allImageParts);
|
|
434
|
+
content = parts;
|
|
435
|
+
} else {
|
|
436
|
+
content = fullText;
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
return {
|
|
440
|
+
content,
|
|
441
|
+
finishReason,
|
|
442
|
+
promptTokens,
|
|
443
|
+
completionTokens,
|
|
444
|
+
tool_calls: allToolCalls.length > 0 ? allToolCalls : undefined,
|
|
445
|
+
};
|
|
446
|
+
} finally {
|
|
447
|
+
if (timer) clearTimeout(timer);
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
452
|
+
// Helpers
|
|
453
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
454
|
+
|
|
455
|
+
/** Strip provider prefix: "gemini-api/gemini-2.5-flash" → "gemini-2.5-flash" */
|
|
456
|
+
function stripPrefix(model: string): string {
|
|
457
|
+
const slash = model.indexOf("/");
|
|
458
|
+
return slash >= 0 ? model.slice(slash + 1) : model;
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
/** Map Gemini finish reasons to OpenAI format. */
|
|
462
|
+
function mapFinishReason(reason?: string): string {
|
|
463
|
+
switch (reason) {
|
|
464
|
+
case "STOP": return "stop";
|
|
465
|
+
case "MAX_TOKENS": return "length";
|
|
466
|
+
case "SAFETY": return "content_filter";
|
|
467
|
+
case "RECITATION": return "content_filter";
|
|
468
|
+
default: return "stop";
|
|
469
|
+
}
|
|
470
|
+
}
|
package/src/proxy-server.ts
CHANGED
|
@@ -16,6 +16,7 @@ import { grokComplete, grokCompleteStream, type ChatMessage as GrokChatMessage }
|
|
|
16
16
|
import { geminiComplete, geminiCompleteStream, type ChatMessage as GeminiBrowserChatMessage } from "./gemini-browser.js";
|
|
17
17
|
import { claudeComplete, claudeCompleteStream, type ChatMessage as ClaudeBrowserChatMessage } from "./claude-browser.js";
|
|
18
18
|
import { chatgptComplete, chatgptCompleteStream, type ChatMessage as ChatGPTBrowserChatMessage } from "./chatgpt-browser.js";
|
|
19
|
+
import { geminiApiComplete, geminiApiCompleteStream, type GeminiApiResult, type ContentPart } from "./gemini-api-runner.js";
|
|
19
20
|
import type { BrowserContext } from "playwright";
|
|
20
21
|
import { renderStatusPage, type StatusProvider } from "./status-template.js";
|
|
21
22
|
import { sessionManager } from "./session-manager.js";
|
|
@@ -74,6 +75,10 @@ export interface ProxyServerOptions {
|
|
|
74
75
|
_chatgptComplete?: typeof chatgptComplete;
|
|
75
76
|
/** Override for testing — replaces chatgptCompleteStream */
|
|
76
77
|
_chatgptCompleteStream?: typeof chatgptCompleteStream;
|
|
78
|
+
/** Override for testing — replaces geminiApiComplete */
|
|
79
|
+
_geminiApiComplete?: typeof geminiApiComplete;
|
|
80
|
+
/** Override for testing — replaces geminiApiCompleteStream */
|
|
81
|
+
_geminiApiCompleteStream?: typeof geminiApiCompleteStream;
|
|
77
82
|
/** Returns human-readable expiry string for each web provider (null = no login yet) */
|
|
78
83
|
getExpiryInfo?: () => {
|
|
79
84
|
grok: string | null;
|
|
@@ -140,6 +145,9 @@ export const CLI_MODELS = [
|
|
|
140
145
|
{ id: "web-gemini/gemini-3-flash", name: "Gemini 3 Flash (web session)", contextWindow: 1_048_576, maxTokens: 65_536 },
|
|
141
146
|
// Claude → use cli-claude/* instead (web-claude removed in v1.6.x)
|
|
142
147
|
// ChatGPT → use openai-codex/* or copilot-proxy instead (web-chatgpt removed in v1.6.x)
|
|
148
|
+
// ── Gemini API (native SDK, supports image generation) ─────────────────
|
|
149
|
+
{ id: "gemini-api/gemini-2.5-flash", name: "Gemini 2.5 Flash (API)", contextWindow: 1_048_576, maxTokens: 65_535 },
|
|
150
|
+
{ id: "gemini-api/gemini-2.5-pro", name: "Gemini 2.5 Pro (API)", contextWindow: 1_048_576, maxTokens: 65_535 },
|
|
143
151
|
// ── OpenCode CLI ──────────────────────────────────────────────────────────
|
|
144
152
|
{ id: "opencode/default", name: "OpenCode (CLI)", contextWindow: 128_000, maxTokens: 16_384 },
|
|
145
153
|
// ── Pi CLI ──────────────────────────────────────────────────────────────
|
|
@@ -564,6 +572,94 @@ async function handleRequest(
|
|
|
564
572
|
}
|
|
565
573
|
// ─────────────────────────────────────────────────────────────────────────
|
|
566
574
|
|
|
575
|
+
// ── Gemini API routing (native SDK — supports image generation) ─────────
|
|
576
|
+
// Strip vllm/ prefix if present — OpenClaw sends full provider path
|
|
577
|
+
const geminiApiModel = model.startsWith("vllm/") ? model.slice(5) : model;
|
|
578
|
+
if (geminiApiModel.startsWith("gemini-api/")) {
|
|
579
|
+
const doComplete = opts._geminiApiComplete ?? geminiApiComplete;
|
|
580
|
+
const doCompleteStream = opts._geminiApiCompleteStream ?? geminiApiCompleteStream;
|
|
581
|
+
const perModelTimeout = opts.modelTimeouts?.[geminiApiModel];
|
|
582
|
+
const timeoutMs = perModelTimeout ?? opts.timeoutMs ?? 180_000;
|
|
583
|
+
const apiStart = Date.now();
|
|
584
|
+
const apiOpts = { model: geminiApiModel, timeoutMs, tools: hasTools ? tools : undefined, log: opts.log };
|
|
585
|
+
try {
|
|
586
|
+
if (stream) {
|
|
587
|
+
res.writeHead(200, { "Content-Type": "text/event-stream", "Cache-Control": "no-cache", Connection: "keep-alive", ...corsHeaders() });
|
|
588
|
+
sendSseChunk(res, { id, created, model: geminiApiModel, delta: { role: "assistant" }, finish_reason: null });
|
|
589
|
+
const result = await doCompleteStream(
|
|
590
|
+
cleanMessages,
|
|
591
|
+
apiOpts,
|
|
592
|
+
(token) => sendSseChunk(res, { id, created, model: geminiApiModel, delta: { content: token }, finish_reason: null })
|
|
593
|
+
);
|
|
594
|
+
const estComp = typeof result.content === "string" ? estimateTokens(result.content) : (result.completionTokens ?? 0);
|
|
595
|
+
metrics.recordRequest(geminiApiModel, Date.now() - apiStart, true, estPromptTokens, estComp);
|
|
596
|
+
// If images were generated during streaming, send the full multimodal content as a final chunk
|
|
597
|
+
if (Array.isArray(result.content)) {
|
|
598
|
+
sendSseChunk(res, { id, created, model: geminiApiModel, delta: { content: JSON.stringify(result.content) }, finish_reason: null });
|
|
599
|
+
}
|
|
600
|
+
if (result.tool_calls?.length) {
|
|
601
|
+
const toolCalls = result.tool_calls;
|
|
602
|
+
sendSseChunk(res, {
|
|
603
|
+
id, created, model: geminiApiModel,
|
|
604
|
+
delta: {
|
|
605
|
+
tool_calls: toolCalls.map((tc, idx) => ({
|
|
606
|
+
index: idx, id: tc.id, type: "function",
|
|
607
|
+
function: { name: tc.function.name, arguments: "" },
|
|
608
|
+
})),
|
|
609
|
+
},
|
|
610
|
+
finish_reason: null,
|
|
611
|
+
});
|
|
612
|
+
for (let idx = 0; idx < toolCalls.length; idx++) {
|
|
613
|
+
sendSseChunk(res, {
|
|
614
|
+
id, created, model: geminiApiModel,
|
|
615
|
+
delta: { tool_calls: [{ index: idx, function: { arguments: toolCalls[idx].function.arguments } }] },
|
|
616
|
+
finish_reason: null,
|
|
617
|
+
});
|
|
618
|
+
}
|
|
619
|
+
sendSseChunk(res, { id, created, model: geminiApiModel, delta: {}, finish_reason: "tool_calls" });
|
|
620
|
+
} else {
|
|
621
|
+
sendSseChunk(res, { id, created, model: geminiApiModel, delta: {}, finish_reason: result.finishReason });
|
|
622
|
+
}
|
|
623
|
+
res.write("data: [DONE]\n\n");
|
|
624
|
+
res.end();
|
|
625
|
+
} else {
|
|
626
|
+
const result = await doComplete(cleanMessages, apiOpts);
|
|
627
|
+
const estComp = typeof result.content === "string"
|
|
628
|
+
? estimateTokens(result.content)
|
|
629
|
+
: (result.completionTokens ?? 0);
|
|
630
|
+
metrics.recordRequest(geminiApiModel, Date.now() - apiStart, true, estPromptTokens, estComp);
|
|
631
|
+
const message: Record<string, unknown> = { role: "assistant" };
|
|
632
|
+
if (result.tool_calls?.length) {
|
|
633
|
+
message.content = null;
|
|
634
|
+
message.tool_calls = result.tool_calls;
|
|
635
|
+
} else {
|
|
636
|
+
message.content = result.content;
|
|
637
|
+
}
|
|
638
|
+
const finishReason = result.tool_calls?.length ? "tool_calls" : result.finishReason;
|
|
639
|
+
res.writeHead(200, { "Content-Type": "application/json", ...corsHeaders() });
|
|
640
|
+
res.end(JSON.stringify({
|
|
641
|
+
id, object: "chat.completion", created, model: geminiApiModel,
|
|
642
|
+
choices: [{ index: 0, message, finish_reason: finishReason }],
|
|
643
|
+
usage: {
|
|
644
|
+
prompt_tokens: result.promptTokens ?? estPromptTokens,
|
|
645
|
+
completion_tokens: result.completionTokens ?? estComp,
|
|
646
|
+
total_tokens: (result.promptTokens ?? estPromptTokens) + (result.completionTokens ?? estComp),
|
|
647
|
+
},
|
|
648
|
+
}));
|
|
649
|
+
}
|
|
650
|
+
} catch (err) {
|
|
651
|
+
metrics.recordRequest(geminiApiModel, Date.now() - apiStart, false, estPromptTokens);
|
|
652
|
+
const msg = (err as Error).message;
|
|
653
|
+
opts.warn(`[cli-bridge] Gemini API error for ${geminiApiModel}: ${msg}`);
|
|
654
|
+
if (!res.headersSent) {
|
|
655
|
+
res.writeHead(500, { "Content-Type": "application/json", ...corsHeaders() });
|
|
656
|
+
res.end(JSON.stringify({ error: { message: msg, type: "gemini_api_error" } }));
|
|
657
|
+
}
|
|
658
|
+
}
|
|
659
|
+
return;
|
|
660
|
+
}
|
|
661
|
+
// ─────────────────────────────────────────────────────────────────────────
|
|
662
|
+
|
|
567
663
|
// ── BitNet local inference routing ────────────────────────────────────────
|
|
568
664
|
if (model.startsWith("local-bitnet/")) {
|
|
569
665
|
const bitnetUrl = opts.getBitNetServerUrl?.() ?? DEFAULT_BITNET_SERVER_URL;
|
package/test/config.test.ts
CHANGED
|
@@ -62,17 +62,20 @@ describe("config.ts exports", () => {
|
|
|
62
62
|
|
|
63
63
|
it("exports per-model timeouts for all major models", () => {
|
|
64
64
|
expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-opus-4-6"]).toBe(300_000);
|
|
65
|
-
expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-sonnet-4-6"]).toBe(
|
|
65
|
+
expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-sonnet-4-6"]).toBe(300_000);
|
|
66
66
|
expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-haiku-4-5"]).toBe(90_000);
|
|
67
67
|
expect(DEFAULT_MODEL_TIMEOUTS["cli-gemini/gemini-2.5-pro"]).toBe(300_000);
|
|
68
68
|
expect(DEFAULT_MODEL_TIMEOUTS["cli-gemini/gemini-2.5-flash"]).toBe(180_000);
|
|
69
69
|
expect(DEFAULT_MODEL_TIMEOUTS["openai-codex/gpt-5.4"]).toBe(300_000);
|
|
70
|
+
expect(DEFAULT_MODEL_TIMEOUTS["gemini-api/gemini-2.5-pro"]).toBe(300_000);
|
|
71
|
+
expect(DEFAULT_MODEL_TIMEOUTS["gemini-api/gemini-2.5-flash"]).toBe(180_000);
|
|
70
72
|
});
|
|
71
73
|
|
|
72
74
|
it("exports model fallback chains", () => {
|
|
73
75
|
expect(DEFAULT_MODEL_FALLBACKS["cli-claude/claude-sonnet-4-6"]).toBe("cli-claude/claude-haiku-4-5");
|
|
74
76
|
expect(DEFAULT_MODEL_FALLBACKS["cli-claude/claude-opus-4-6"]).toBe("cli-claude/claude-sonnet-4-6");
|
|
75
77
|
expect(DEFAULT_MODEL_FALLBACKS["cli-gemini/gemini-2.5-pro"]).toBe("cli-gemini/gemini-2.5-flash");
|
|
78
|
+
expect(DEFAULT_MODEL_FALLBACKS["gemini-api/gemini-2.5-pro"]).toBe("gemini-api/gemini-2.5-flash");
|
|
76
79
|
});
|
|
77
80
|
|
|
78
81
|
it("exports path constants rooted in ~/.openclaw", () => {
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* test/gemini-api-proxy.test.ts
|
|
3
|
+
*
|
|
4
|
+
* Integration tests for Gemini API routing in the cli-bridge proxy.
|
|
5
|
+
* Uses _geminiApiComplete/_geminiApiCompleteStream DI overrides (no real API calls).
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { describe, it, expect, beforeAll, afterAll, vi } from "vitest";
|
|
9
|
+
import http from "node:http";
|
|
10
|
+
import type { AddressInfo } from "node:net";
|
|
11
|
+
import { startProxyServer, CLI_MODELS } from "../src/proxy-server.js";
|
|
12
|
+
import type { GeminiApiResult, GeminiApiOptions, ContentPart } from "../src/gemini-api-runner.js";
|
|
13
|
+
import type { ChatMessage } from "../src/cli-runner.js";
|
|
14
|
+
|
|
15
|
+
const stubComplete = vi.fn(async (
|
|
16
|
+
messages: ChatMessage[],
|
|
17
|
+
opts: GeminiApiOptions
|
|
18
|
+
): Promise<GeminiApiResult> => ({
|
|
19
|
+
content: `api mock: ${typeof messages[messages.length - 1]?.content === "string" ? messages[messages.length - 1].content : "multipart"}`,
|
|
20
|
+
finishReason: "stop",
|
|
21
|
+
promptTokens: 10,
|
|
22
|
+
completionTokens: 5,
|
|
23
|
+
}));
|
|
24
|
+
|
|
25
|
+
const stubCompleteMultimodal = vi.fn(async (
|
|
26
|
+
_messages: ChatMessage[],
|
|
27
|
+
_opts: GeminiApiOptions
|
|
28
|
+
): Promise<GeminiApiResult> => ({
|
|
29
|
+
content: [
|
|
30
|
+
{ type: "text", text: "Here is the image:" },
|
|
31
|
+
{ type: "image_url", image_url: { url: "data:image/png;base64,iVBOR..." } },
|
|
32
|
+
] as ContentPart[],
|
|
33
|
+
finishReason: "stop",
|
|
34
|
+
promptTokens: 15,
|
|
35
|
+
completionTokens: 100,
|
|
36
|
+
}));
|
|
37
|
+
|
|
38
|
+
const stubCompleteStream = vi.fn(async (
|
|
39
|
+
messages: ChatMessage[],
|
|
40
|
+
opts: GeminiApiOptions,
|
|
41
|
+
onToken: (t: string) => void
|
|
42
|
+
): Promise<GeminiApiResult> => {
|
|
43
|
+
const tokens = ["api ", "stream ", "response"];
|
|
44
|
+
for (const t of tokens) onToken(t);
|
|
45
|
+
return { content: tokens.join(""), finishReason: "stop", promptTokens: 10, completionTokens: 8 };
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
const stubCompleteToolCalls = vi.fn(async (
|
|
49
|
+
_messages: ChatMessage[],
|
|
50
|
+
_opts: GeminiApiOptions
|
|
51
|
+
): Promise<GeminiApiResult> => ({
|
|
52
|
+
content: "",
|
|
53
|
+
finishReason: "stop",
|
|
54
|
+
tool_calls: [
|
|
55
|
+
{ id: "call_abc123", type: "function", function: { name: "search", arguments: '{"q":"test"}' } },
|
|
56
|
+
],
|
|
57
|
+
}));
|
|
58
|
+
|
|
59
|
+
async function httpPost(url: string, body: unknown): Promise<{ status: number; body: unknown; raw: string }> {
|
|
60
|
+
return new Promise((resolve, reject) => {
|
|
61
|
+
const data = JSON.stringify(body);
|
|
62
|
+
const u = new URL(url);
|
|
63
|
+
const req = http.request(
|
|
64
|
+
{ hostname: u.hostname, port: Number(u.port), path: u.pathname, method: "POST",
|
|
65
|
+
headers: { "Content-Type": "application/json", "Content-Length": Buffer.byteLength(data) } },
|
|
66
|
+
(res) => { let raw = ""; res.on("data", c => raw += c); res.on("end", () => { try { resolve({ status: res.statusCode ?? 0, body: JSON.parse(raw), raw }); } catch { resolve({ status: res.statusCode ?? 0, body: raw, raw }); } }); }
|
|
67
|
+
);
|
|
68
|
+
req.on("error", reject); req.write(data); req.end();
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
async function httpGet(url: string): Promise<{ status: number; body: unknown }> {
|
|
72
|
+
return new Promise((resolve, reject) => {
|
|
73
|
+
const u = new URL(url);
|
|
74
|
+
const req = http.request({ hostname: u.hostname, port: Number(u.port), path: u.pathname, method: "GET" },
|
|
75
|
+
(res) => { let raw = ""; res.on("data", c => raw += c); res.on("end", () => { try { resolve({ status: res.statusCode ?? 0, body: JSON.parse(raw) }); } catch { resolve({ status: res.statusCode ?? 0, body: raw }); } }); }
|
|
76
|
+
);
|
|
77
|
+
req.on("error", reject); req.end();
|
|
78
|
+
});
|
|
79
|
+
}
|
|
80
|
+
async function httpPostRaw(url: string, body: unknown): Promise<{ status: number; raw: string }> {
|
|
81
|
+
return new Promise((resolve, reject) => {
|
|
82
|
+
const data = JSON.stringify(body);
|
|
83
|
+
const u = new URL(url);
|
|
84
|
+
const req = http.request(
|
|
85
|
+
{ hostname: u.hostname, port: Number(u.port), path: u.pathname, method: "POST",
|
|
86
|
+
headers: { "Content-Type": "application/json", "Content-Length": Buffer.byteLength(data) } },
|
|
87
|
+
(res) => { let raw = ""; res.on("data", c => raw += c); res.on("end", () => resolve({ status: res.statusCode ?? 0, raw })); }
|
|
88
|
+
);
|
|
89
|
+
req.on("error", reject); req.write(data); req.end();
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
let server: http.Server;
|
|
94
|
+
let baseUrl: string;
|
|
95
|
+
|
|
96
|
+
beforeAll(async () => {
|
|
97
|
+
server = await startProxyServer({
|
|
98
|
+
port: 0, log: () => {}, warn: () => {},
|
|
99
|
+
// @ts-expect-error — stub types close enough for testing
|
|
100
|
+
_geminiApiComplete: stubComplete,
|
|
101
|
+
// @ts-expect-error — stub types close enough for testing
|
|
102
|
+
_geminiApiCompleteStream: stubCompleteStream,
|
|
103
|
+
});
|
|
104
|
+
baseUrl = `http://127.0.0.1:${(server.address() as AddressInfo).port}`;
|
|
105
|
+
});
|
|
106
|
+
afterAll(() => server.close());
|
|
107
|
+
|
|
108
|
+
describe("Gemini API routing — model list", () => {
|
|
109
|
+
it("includes gemini-api/* models in /v1/models", async () => {
|
|
110
|
+
const res = await httpGet(`${baseUrl}/v1/models`);
|
|
111
|
+
expect(res.status).toBe(200);
|
|
112
|
+
const data = res.body as { data: Array<{ id: string }> };
|
|
113
|
+
const ids = data.data.map(m => m.id);
|
|
114
|
+
expect(ids).toContain("gemini-api/gemini-2.5-flash");
|
|
115
|
+
expect(ids).toContain("gemini-api/gemini-2.5-pro");
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
it("gemini-api models exist in CLI_MODELS constant", () => {
|
|
119
|
+
const ids = CLI_MODELS.map(m => m.id);
|
|
120
|
+
expect(ids).toContain("gemini-api/gemini-2.5-flash");
|
|
121
|
+
expect(ids).toContain("gemini-api/gemini-2.5-pro");
|
|
122
|
+
});
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
describe("Gemini API routing — non-streaming", () => {
|
|
126
|
+
it("returns text response for gemini-api model", async () => {
|
|
127
|
+
const res = await httpPost(`${baseUrl}/v1/chat/completions`, {
|
|
128
|
+
model: "gemini-api/gemini-2.5-flash",
|
|
129
|
+
messages: [{ role: "user", content: "Hello" }],
|
|
130
|
+
});
|
|
131
|
+
expect(res.status).toBe(200);
|
|
132
|
+
const data = res.body as { choices: Array<{ message: { content: string } }> };
|
|
133
|
+
expect(data.choices[0].message.content).toContain("api mock");
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
it("returns usage with real token counts from API", async () => {
|
|
137
|
+
const res = await httpPost(`${baseUrl}/v1/chat/completions`, {
|
|
138
|
+
model: "gemini-api/gemini-2.5-flash",
|
|
139
|
+
messages: [{ role: "user", content: "test" }],
|
|
140
|
+
});
|
|
141
|
+
const data = res.body as { usage: { prompt_tokens: number; completion_tokens: number } };
|
|
142
|
+
expect(data.usage.prompt_tokens).toBe(10);
|
|
143
|
+
expect(data.usage.completion_tokens).toBe(5);
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
it("returns correct model in response", async () => {
|
|
147
|
+
const res = await httpPost(`${baseUrl}/v1/chat/completions`, {
|
|
148
|
+
model: "gemini-api/gemini-2.5-pro",
|
|
149
|
+
messages: [{ role: "user", content: "pro model" }],
|
|
150
|
+
});
|
|
151
|
+
expect(res.status).toBe(200);
|
|
152
|
+
const data = res.body as { model: string };
|
|
153
|
+
expect(data.model).toBe("gemini-api/gemini-2.5-pro");
|
|
154
|
+
});
|
|
155
|
+
|
|
156
|
+
it("accepts vllm/ prefix and routes correctly", async () => {
|
|
157
|
+
const res = await httpPost(`${baseUrl}/v1/chat/completions`, {
|
|
158
|
+
model: "vllm/gemini-api/gemini-2.5-flash",
|
|
159
|
+
messages: [{ role: "user", content: "with prefix" }],
|
|
160
|
+
});
|
|
161
|
+
expect(res.status).toBe(200);
|
|
162
|
+
const data = res.body as { choices: Array<{ message: { content: string } }> };
|
|
163
|
+
expect(data.choices[0].message.content).toContain("api mock");
|
|
164
|
+
});
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
describe("Gemini API routing — streaming", () => {
|
|
168
|
+
it("returns SSE stream with text tokens", async () => {
|
|
169
|
+
const res = await httpPostRaw(`${baseUrl}/v1/chat/completions`, {
|
|
170
|
+
model: "gemini-api/gemini-2.5-flash",
|
|
171
|
+
messages: [{ role: "user", content: "stream test" }],
|
|
172
|
+
stream: true,
|
|
173
|
+
});
|
|
174
|
+
expect(res.status).toBe(200);
|
|
175
|
+
expect(res.raw).toContain("data: ");
|
|
176
|
+
expect(res.raw).toContain("[DONE]");
|
|
177
|
+
// Should contain the streamed tokens
|
|
178
|
+
expect(res.raw).toContain("api ");
|
|
179
|
+
expect(res.raw).toContain("stream ");
|
|
180
|
+
});
|
|
181
|
+
});
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import { describe, it, expect } from "vitest";
|
|
2
|
+
import { convertMessages, convertTools, type ContentPart } from "../src/gemini-api-runner.js";
|
|
3
|
+
import type { ChatMessage } from "../src/cli-runner.js";
|
|
4
|
+
import type { ToolDefinition } from "../src/tool-protocol.js";
|
|
5
|
+
|
|
6
|
+
describe("gemini-api-runner", () => {
|
|
7
|
+
describe("convertMessages", () => {
|
|
8
|
+
it("converts system messages to systemInstruction", () => {
|
|
9
|
+
const messages: ChatMessage[] = [
|
|
10
|
+
{ role: "system", content: "You are a helpful assistant." },
|
|
11
|
+
{ role: "user", content: "Hello" },
|
|
12
|
+
];
|
|
13
|
+
const result = convertMessages(messages);
|
|
14
|
+
expect(result.systemInstruction).toEqual({
|
|
15
|
+
parts: [{ text: "You are a helpful assistant." }],
|
|
16
|
+
});
|
|
17
|
+
expect(result.contents).toHaveLength(1);
|
|
18
|
+
expect(result.contents[0].role).toBe("user");
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
it("maps assistant role to model", () => {
|
|
22
|
+
const messages: ChatMessage[] = [
|
|
23
|
+
{ role: "user", content: "Hi" },
|
|
24
|
+
{ role: "assistant", content: "Hello!" },
|
|
25
|
+
];
|
|
26
|
+
const result = convertMessages(messages);
|
|
27
|
+
expect(result.contents[0].role).toBe("user");
|
|
28
|
+
expect(result.contents[1].role).toBe("model");
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
it("converts tool results to functionResponse", () => {
|
|
32
|
+
const messages: ChatMessage[] = [
|
|
33
|
+
{ role: "tool", content: '{"result": "42"}', name: "calculator" } as ChatMessage & { name: string },
|
|
34
|
+
];
|
|
35
|
+
const result = convertMessages(messages);
|
|
36
|
+
expect(result.contents[0].role).toBe("user");
|
|
37
|
+
const part = result.contents[0].parts[0] as { functionResponse: { name: string; response: Record<string, unknown> } };
|
|
38
|
+
expect(part.functionResponse.name).toBe("calculator");
|
|
39
|
+
expect(part.functionResponse.response).toEqual({ result: "42" });
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
it("converts assistant tool_calls to functionCall parts", () => {
|
|
43
|
+
const messages = [
|
|
44
|
+
{
|
|
45
|
+
role: "assistant" as const,
|
|
46
|
+
content: null,
|
|
47
|
+
tool_calls: [
|
|
48
|
+
{ id: "call_1", type: "function" as const, function: { name: "search", arguments: '{"q":"test"}' } },
|
|
49
|
+
],
|
|
50
|
+
},
|
|
51
|
+
] as unknown as ChatMessage[];
|
|
52
|
+
const result = convertMessages(messages);
|
|
53
|
+
expect(result.contents[0].role).toBe("model");
|
|
54
|
+
const part = result.contents[0].parts[0] as { functionCall: { name: string; args: Record<string, unknown> } };
|
|
55
|
+
expect(part.functionCall.name).toBe("search");
|
|
56
|
+
expect(part.functionCall.args).toEqual({ q: "test" });
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
it("converts image_url content parts to inlineData", () => {
|
|
60
|
+
const messages: ChatMessage[] = [
|
|
61
|
+
{
|
|
62
|
+
role: "user",
|
|
63
|
+
content: [
|
|
64
|
+
{ type: "text", text: "What is this?" },
|
|
65
|
+
{ type: "image_url", image_url: { url: "data:image/png;base64,iVBOR" } },
|
|
66
|
+
] as unknown as string,
|
|
67
|
+
},
|
|
68
|
+
];
|
|
69
|
+
const result = convertMessages(messages);
|
|
70
|
+
expect(result.contents[0].parts).toHaveLength(2);
|
|
71
|
+
const imgPart = result.contents[0].parts[1] as { inlineData: { mimeType: string; data: string } };
|
|
72
|
+
expect(imgPart.inlineData.mimeType).toBe("image/png");
|
|
73
|
+
expect(imgPart.inlineData.data).toBe("iVBOR");
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
it("handles multiple system messages", () => {
|
|
77
|
+
const messages: ChatMessage[] = [
|
|
78
|
+
{ role: "system", content: "Rule 1" },
|
|
79
|
+
{ role: "system", content: "Rule 2" },
|
|
80
|
+
{ role: "user", content: "Go" },
|
|
81
|
+
];
|
|
82
|
+
const result = convertMessages(messages);
|
|
83
|
+
expect(result.systemInstruction?.parts).toHaveLength(2);
|
|
84
|
+
expect(result.contents).toHaveLength(1);
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
it("skips empty content messages", () => {
|
|
88
|
+
const messages: ChatMessage[] = [
|
|
89
|
+
{ role: "user", content: "" },
|
|
90
|
+
{ role: "user", content: "Hello" },
|
|
91
|
+
];
|
|
92
|
+
const result = convertMessages(messages);
|
|
93
|
+
expect(result.contents).toHaveLength(1);
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
it("handles plain string tool result", () => {
|
|
97
|
+
const messages: ChatMessage[] = [
|
|
98
|
+
{ role: "tool", content: "plain text result", name: "myTool" } as ChatMessage & { name: string },
|
|
99
|
+
];
|
|
100
|
+
const result = convertMessages(messages);
|
|
101
|
+
const part = result.contents[0].parts[0] as { functionResponse: { name: string; response: Record<string, unknown> } };
|
|
102
|
+
expect(part.functionResponse.response).toEqual({ result: "plain text result" });
|
|
103
|
+
});
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
describe("convertTools", () => {
|
|
107
|
+
it("wraps tool definitions in functionDeclarations", () => {
|
|
108
|
+
const tools: ToolDefinition[] = [
|
|
109
|
+
{
|
|
110
|
+
type: "function",
|
|
111
|
+
function: {
|
|
112
|
+
name: "search",
|
|
113
|
+
description: "Search the web",
|
|
114
|
+
parameters: { type: "object", properties: { q: { type: "string" } } },
|
|
115
|
+
},
|
|
116
|
+
},
|
|
117
|
+
];
|
|
118
|
+
const result = convertTools(tools);
|
|
119
|
+
expect(result).toHaveLength(1);
|
|
120
|
+
expect(result[0].functionDeclarations).toHaveLength(1);
|
|
121
|
+
expect(result[0].functionDeclarations[0].name).toBe("search");
|
|
122
|
+
expect(result[0].functionDeclarations[0].description).toBe("Search the web");
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
it("handles multiple tools", () => {
|
|
126
|
+
const tools: ToolDefinition[] = [
|
|
127
|
+
{ type: "function", function: { name: "a", description: "A", parameters: {} } },
|
|
128
|
+
{ type: "function", function: { name: "b", description: "B", parameters: {} } },
|
|
129
|
+
];
|
|
130
|
+
const result = convertTools(tools);
|
|
131
|
+
expect(result[0].functionDeclarations).toHaveLength(2);
|
|
132
|
+
});
|
|
133
|
+
});
|
|
134
|
+
});
|