@elvatis_com/openclaw-cli-bridge-elvatis 0.2.29 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,253 @@
1
+ /**
2
+ * chatgpt-browser.ts
3
+ *
4
+ * ChatGPT web automation via Playwright DOM-polling.
5
+ * Strategy identical to claude-browser.ts / grok-client.ts.
6
+ *
7
+ * DOM structure (confirmed 2026-03-11):
8
+ * Editor: #prompt-textarea (ProseMirror — use execCommand)
9
+ * Send btn: button[data-testid="send-button"]
10
+ * Response: [data-message-author-role="assistant"] (last element)
11
+ * Streaming indicator: button[data-testid="stop-button"]
12
+ */
13
+
14
+ import type { BrowserContext, Page } from "playwright";
15
+
16
+ export interface ChatMessage {
17
+ role: "system" | "user" | "assistant";
18
+ content: string;
19
+ }
20
+
21
+ export interface ChatGPTBrowserOptions {
22
+ messages: ChatMessage[];
23
+ model?: string;
24
+ timeoutMs?: number;
25
+ }
26
+
27
+ export interface ChatGPTBrowserResult {
28
+ content: string;
29
+ model: string;
30
+ finishReason: string;
31
+ }
32
+
33
+ const DEFAULT_TIMEOUT_MS = 120_000;
34
+ const STABLE_CHECKS = 3;
35
+ const STABLE_INTERVAL_MS = 500;
36
+ const CHATGPT_HOME = "https://chatgpt.com";
37
+
38
+ const MODEL_MAP: Record<string, string> = {
39
+ "gpt-4o": "gpt-4o",
40
+ "gpt-4o-mini": "gpt-4o-mini",
41
+ "gpt-o3": "o3",
42
+ "gpt-o4-mini": "o4-mini",
43
+ "gpt-4-1": "gpt-4.1",
44
+ "gpt-5": "gpt-5",
45
+ };
46
+
47
+ function resolveModel(m?: string): string {
48
+ const clean = (m ?? "gpt-4o").replace("web-chatgpt/", "");
49
+ return MODEL_MAP[clean] ?? clean;
50
+ }
51
+
52
+ function flattenMessages(messages: ChatMessage[]): string {
53
+ if (messages.length === 1) return messages[0].content;
54
+ return messages
55
+ .map((m) => {
56
+ if (m.role === "system") return `[System]: ${m.content}`;
57
+ if (m.role === "assistant") return `[Assistant]: ${m.content}`;
58
+ return m.content;
59
+ })
60
+ .join("\n\n");
61
+ }
62
+
63
+ /**
64
+ * Get or create a chatgpt.com page in the given context.
65
+ */
66
+ export async function getOrCreateChatGPTPage(
67
+ context: BrowserContext
68
+ ): Promise<{ page: Page; owned: boolean }> {
69
+ const existing = context.pages().filter((p) => p.url().startsWith("https://chatgpt.com"));
70
+ if (existing.length > 0) return { page: existing[0], owned: false };
71
+ const page = await context.newPage();
72
+ await page.goto(CHATGPT_HOME, { waitUntil: "domcontentloaded", timeout: 15_000 });
73
+ await new Promise((r) => setTimeout(r, 2_000));
74
+ return { page, owned: true };
75
+ }
76
+
77
+ /**
78
+ * Count assistant messages on the page.
79
+ */
80
+ async function countAssistantMessages(page: Page): Promise<number> {
81
+ return page.evaluate(() =>
82
+ document.querySelectorAll('[data-message-author-role="assistant"]').length
83
+ );
84
+ }
85
+
86
+ /**
87
+ * Get the text of the last assistant message.
88
+ */
89
+ async function getLastAssistantText(page: Page): Promise<string> {
90
+ return page.evaluate(() => {
91
+ const els = [...document.querySelectorAll('[data-message-author-role="assistant"]')];
92
+ return els[els.length - 1]?.textContent?.trim() ?? "";
93
+ });
94
+ }
95
+
96
+ /**
97
+ * Check if ChatGPT is still generating (stop button visible).
98
+ */
99
+ async function isStreaming(page: Page): Promise<boolean> {
100
+ return page.evaluate(() =>
101
+ !!document.querySelector('button[data-testid="stop-button"]')
102
+ );
103
+ }
104
+
105
+ /**
106
+ * Send a message and wait for stable response.
107
+ */
108
+ async function sendAndWait(
109
+ page: Page,
110
+ message: string,
111
+ timeoutMs: number,
112
+ log: (msg: string) => void
113
+ ): Promise<string> {
114
+ const countBefore = await countAssistantMessages(page);
115
+
116
+ // Type into ProseMirror via execCommand
117
+ await page.evaluate((msg: string) => {
118
+ const ed = document.querySelector("#prompt-textarea") as HTMLElement | null;
119
+ if (!ed) throw new Error("ChatGPT editor (#prompt-textarea) not found");
120
+ ed.focus();
121
+ document.execCommand("insertText", false, msg);
122
+ }, message);
123
+
124
+ await new Promise((r) => setTimeout(r, 300));
125
+
126
+ // Click send button (preferred) or Enter
127
+ const sendBtn = page.locator('button[data-testid="send-button"]').first();
128
+ const hasSendBtn = await sendBtn.isVisible().catch(() => false);
129
+ if (hasSendBtn) {
130
+ await sendBtn.click();
131
+ } else {
132
+ await page.keyboard.press("Enter");
133
+ }
134
+
135
+ log(`chatgpt-browser: message sent (${message.length} chars), waiting…`);
136
+
137
+ const deadline = Date.now() + timeoutMs;
138
+ let lastText = "";
139
+ let stableCount = 0;
140
+
141
+ while (Date.now() < deadline) {
142
+ await new Promise((r) => setTimeout(r, STABLE_INTERVAL_MS));
143
+
144
+ const currentCount = await countAssistantMessages(page);
145
+ if (currentCount <= countBefore) continue;
146
+
147
+ // Still generating?
148
+ const streaming = await isStreaming(page);
149
+ if (streaming) { stableCount = 0; continue; }
150
+
151
+ const text = await getLastAssistantText(page);
152
+ if (!text) continue;
153
+
154
+ if (text === lastText) {
155
+ stableCount++;
156
+ if (stableCount >= STABLE_CHECKS) {
157
+ log(`chatgpt-browser: response stable (${text.length} chars)`);
158
+ return text;
159
+ }
160
+ } else {
161
+ stableCount = 0;
162
+ lastText = text;
163
+ }
164
+ }
165
+
166
+ throw new Error(`chatgpt.com response timeout after ${timeoutMs}ms`);
167
+ }
168
+
169
+ // ─────────────────────────────────────────────────────────────────────────────
170
+
171
+ export async function chatgptComplete(
172
+ context: BrowserContext,
173
+ opts: ChatGPTBrowserOptions,
174
+ log: (msg: string) => void
175
+ ): Promise<ChatGPTBrowserResult> {
176
+ const { page, owned } = await getOrCreateChatGPTPage(context);
177
+ const model = resolveModel(opts.model);
178
+ const prompt = flattenMessages(opts.messages);
179
+ const timeoutMs = opts.timeoutMs ?? DEFAULT_TIMEOUT_MS;
180
+
181
+ log(`chatgpt-browser: complete model=${model}`);
182
+
183
+ try {
184
+ const content = await sendAndWait(page, prompt, timeoutMs, log);
185
+ return { content, model, finishReason: "stop" };
186
+ } finally {
187
+ if (owned) await page.close().catch(() => {});
188
+ }
189
+ }
190
+
191
+ export async function chatgptCompleteStream(
192
+ context: BrowserContext,
193
+ opts: ChatGPTBrowserOptions,
194
+ onToken: (token: string) => void,
195
+ log: (msg: string) => void
196
+ ): Promise<ChatGPTBrowserResult> {
197
+ const { page, owned } = await getOrCreateChatGPTPage(context);
198
+ const model = resolveModel(opts.model);
199
+ const prompt = flattenMessages(opts.messages);
200
+ const timeoutMs = opts.timeoutMs ?? DEFAULT_TIMEOUT_MS;
201
+
202
+ log(`chatgpt-browser: stream model=${model}`);
203
+
204
+ const countBefore = await countAssistantMessages(page);
205
+
206
+ await page.evaluate((msg: string) => {
207
+ const ed = document.querySelector("#prompt-textarea") as HTMLElement | null;
208
+ if (!ed) throw new Error("ChatGPT editor not found");
209
+ ed.focus();
210
+ document.execCommand("insertText", false, msg);
211
+ }, prompt);
212
+ await new Promise((r) => setTimeout(r, 300));
213
+ const sendBtn = page.locator('button[data-testid="send-button"]').first();
214
+ if (await sendBtn.isVisible().catch(() => false)) await sendBtn.click();
215
+ else await page.keyboard.press("Enter");
216
+
217
+ const deadline = Date.now() + timeoutMs;
218
+ let emittedLength = 0;
219
+ let lastText = "";
220
+ let stableCount = 0;
221
+
222
+ while (Date.now() < deadline) {
223
+ await new Promise((r) => setTimeout(r, STABLE_INTERVAL_MS));
224
+
225
+ const currentCount = await countAssistantMessages(page);
226
+ if (currentCount <= countBefore) continue;
227
+
228
+ const text = await getLastAssistantText(page);
229
+
230
+ if (text.length > emittedLength) {
231
+ onToken(text.slice(emittedLength));
232
+ emittedLength = text.length;
233
+ }
234
+
235
+ const streaming = await isStreaming(page);
236
+ if (streaming) { stableCount = 0; continue; }
237
+
238
+ if (text && text === lastText) {
239
+ stableCount++;
240
+ if (stableCount >= STABLE_CHECKS) {
241
+ log(`chatgpt-browser: stream done (${text.length} chars)`);
242
+ if (owned) await page.close().catch(() => {});
243
+ return { content: text, model, finishReason: "stop" };
244
+ }
245
+ } else {
246
+ stableCount = 0;
247
+ lastText = text;
248
+ }
249
+ }
250
+
251
+ if (owned) await page.close().catch(() => {});
252
+ throw new Error(`chatgpt.com stream timeout after ${timeoutMs}ms`);
253
+ }
@@ -0,0 +1,242 @@
1
+ /**
2
+ * gemini-browser.ts
3
+ *
4
+ * Gemini web automation via Playwright DOM-polling.
5
+ * Strategy identical to grok-client.ts / claude-browser.ts.
6
+ *
7
+ * DOM structure (confirmed 2026-03-11):
8
+ * Editor: .ql-editor (Quill — use page.type(), NOT execCommand)
9
+ * Response: message-content (custom element, innerText = clean response)
10
+ * Also: .markdown (same content, markdown-rendered)
11
+ */
12
+
13
+ import type { BrowserContext, Page } from "playwright";
14
+
15
+ export interface ChatMessage {
16
+ role: "system" | "user" | "assistant";
17
+ content: string;
18
+ }
19
+
20
+ export interface GeminiBrowserOptions {
21
+ messages: ChatMessage[];
22
+ model?: string;
23
+ timeoutMs?: number;
24
+ }
25
+
26
+ export interface GeminiBrowserResult {
27
+ content: string;
28
+ model: string;
29
+ finishReason: string;
30
+ }
31
+
32
+ const DEFAULT_TIMEOUT_MS = 120_000;
33
+ const STABLE_CHECKS = 3;
34
+ const STABLE_INTERVAL_MS = 600; // slightly longer — Gemini streams slower
35
+ const GEMINI_HOME = "https://gemini.google.com/app";
36
+
37
+ const MODEL_MAP: Record<string, string> = {
38
+ "gemini-2-5-pro": "gemini-2.5-pro",
39
+ "gemini-2-5-flash": "gemini-2.5-flash",
40
+ "gemini-flash": "gemini-flash",
41
+ "gemini-pro": "gemini-pro",
42
+ "gemini-3-pro": "gemini-3-pro",
43
+ "gemini-3-flash": "gemini-3-flash",
44
+ };
45
+
46
+ function resolveModel(m?: string): string {
47
+ const clean = (m ?? "gemini-2-5-pro").replace("web-gemini/", "").replace(/\./g, "-");
48
+ return MODEL_MAP[clean] ?? "gemini-2.5-pro";
49
+ }
50
+
51
+ function flattenMessages(messages: ChatMessage[]): string {
52
+ if (messages.length === 1) return messages[0].content;
53
+ return messages
54
+ .map((m) => {
55
+ if (m.role === "system") return `[System]: ${m.content}`;
56
+ if (m.role === "assistant") return `[Assistant]: ${m.content}`;
57
+ return m.content;
58
+ })
59
+ .join("\n\n");
60
+ }
61
+
62
+ /**
63
+ * Get or create a Gemini page in the given context.
64
+ */
65
+ export async function getOrCreateGeminiPage(
66
+ context: BrowserContext
67
+ ): Promise<{ page: Page; owned: boolean }> {
68
+ const existing = context.pages().filter((p) => p.url().startsWith("https://gemini.google.com"));
69
+ if (existing.length > 0) return { page: existing[0], owned: false };
70
+ const page = await context.newPage();
71
+ await page.goto(GEMINI_HOME, { waitUntil: "domcontentloaded", timeout: 15_000 });
72
+ await new Promise((r) => setTimeout(r, 2_000));
73
+ return { page, owned: true };
74
+ }
75
+
76
+ /**
77
+ * Count model-response elements on the page (= number of assistant turns).
78
+ */
79
+ async function countResponses(page: Page): Promise<number> {
80
+ return page.evaluate(() => document.querySelectorAll("model-response").length);
81
+ }
82
+
83
+ /**
84
+ * Get the text of the last model-response via message-content element.
85
+ * Uses message-content (cleanest, no "Gemini hat gesagt" prefix).
86
+ */
87
+ async function getLastResponseText(page: Page): Promise<string> {
88
+ return page.evaluate(() => {
89
+ const els = [...document.querySelectorAll("message-content")];
90
+ if (!els.length) return "";
91
+ return els[els.length - 1].textContent?.trim() ?? "";
92
+ });
93
+ }
94
+
95
+ /**
96
+ * Check if Gemini is still generating (streaming indicator present).
97
+ */
98
+ async function isStreaming(page: Page): Promise<boolean> {
99
+ return page.evaluate(() => {
100
+ // Gemini shows a stop button while streaming
101
+ const stopBtn = document.querySelector('button[aria-label*="stop"], button[aria-label*="Stop"], button[aria-label*="stopp"]');
102
+ return !!stopBtn;
103
+ });
104
+ }
105
+
106
+ /**
107
+ * Send a message and wait for a stable response via DOM-polling.
108
+ */
109
+ async function sendAndWait(
110
+ page: Page,
111
+ message: string,
112
+ timeoutMs: number,
113
+ log: (msg: string) => void
114
+ ): Promise<string> {
115
+ const countBefore = await countResponses(page);
116
+
117
+ // Quill editor: use page.type() (not execCommand — Quill ignores it)
118
+ const editor = page.locator(".ql-editor");
119
+ await editor.click();
120
+ await editor.type(message, { delay: 10 });
121
+ await new Promise((r) => setTimeout(r, 300));
122
+ await page.keyboard.press("Enter");
123
+
124
+ log(`gemini-browser: message sent (${message.length} chars), waiting…`);
125
+
126
+ const deadline = Date.now() + timeoutMs;
127
+ let lastText = "";
128
+ let stableCount = 0;
129
+
130
+ while (Date.now() < deadline) {
131
+ await new Promise((r) => setTimeout(r, STABLE_INTERVAL_MS));
132
+
133
+ // Wait for new response to appear
134
+ const currentCount = await countResponses(page);
135
+ if (currentCount <= countBefore) continue;
136
+
137
+ // Still streaming? Don't start stable-check yet
138
+ const streaming = await isStreaming(page);
139
+ if (streaming) {
140
+ stableCount = 0;
141
+ lastText = await getLastResponseText(page);
142
+ continue;
143
+ }
144
+
145
+ const text = await getLastResponseText(page);
146
+ if (!text) continue;
147
+
148
+ if (text === lastText) {
149
+ stableCount++;
150
+ if (stableCount >= STABLE_CHECKS) {
151
+ log(`gemini-browser: response stable (${text.length} chars)`);
152
+ return text;
153
+ }
154
+ } else {
155
+ stableCount = 0;
156
+ lastText = text;
157
+ }
158
+ }
159
+
160
+ throw new Error(`gemini.google.com response timeout after ${timeoutMs}ms`);
161
+ }
162
+
163
+ // ─────────────────────────────────────────────────────────────────────────────
164
+
165
+ export async function geminiComplete(
166
+ context: BrowserContext,
167
+ opts: GeminiBrowserOptions,
168
+ log: (msg: string) => void
169
+ ): Promise<GeminiBrowserResult> {
170
+ const { page, owned } = await getOrCreateGeminiPage(context);
171
+ const model = resolveModel(opts.model);
172
+ const prompt = flattenMessages(opts.messages);
173
+ const timeoutMs = opts.timeoutMs ?? DEFAULT_TIMEOUT_MS;
174
+
175
+ log(`gemini-browser: complete model=${model}`);
176
+
177
+ try {
178
+ const content = await sendAndWait(page, prompt, timeoutMs, log);
179
+ return { content, model, finishReason: "stop" };
180
+ } finally {
181
+ if (owned) await page.close().catch(() => {});
182
+ }
183
+ }
184
+
185
+ export async function geminiCompleteStream(
186
+ context: BrowserContext,
187
+ opts: GeminiBrowserOptions,
188
+ onToken: (token: string) => void,
189
+ log: (msg: string) => void
190
+ ): Promise<GeminiBrowserResult> {
191
+ const { page, owned } = await getOrCreateGeminiPage(context);
192
+ const model = resolveModel(opts.model);
193
+ const prompt = flattenMessages(opts.messages);
194
+ const timeoutMs = opts.timeoutMs ?? DEFAULT_TIMEOUT_MS;
195
+
196
+ log(`gemini-browser: stream model=${model}`);
197
+
198
+ const countBefore = await countResponses(page);
199
+
200
+ const editor = page.locator(".ql-editor");
201
+ await editor.click();
202
+ await editor.type(prompt, { delay: 10 });
203
+ await new Promise((r) => setTimeout(r, 300));
204
+ await page.keyboard.press("Enter");
205
+
206
+ const deadline = Date.now() + timeoutMs;
207
+ let emittedLength = 0;
208
+ let lastText = "";
209
+ let stableCount = 0;
210
+
211
+ while (Date.now() < deadline) {
212
+ await new Promise((r) => setTimeout(r, STABLE_INTERVAL_MS));
213
+
214
+ const currentCount = await countResponses(page);
215
+ if (currentCount <= countBefore) continue;
216
+
217
+ const text = await getLastResponseText(page);
218
+
219
+ if (text.length > emittedLength) {
220
+ onToken(text.slice(emittedLength));
221
+ emittedLength = text.length;
222
+ }
223
+
224
+ const streaming = await isStreaming(page);
225
+ if (streaming) { stableCount = 0; continue; }
226
+
227
+ if (text && text === lastText) {
228
+ stableCount++;
229
+ if (stableCount >= STABLE_CHECKS) {
230
+ log(`gemini-browser: stream done (${text.length} chars)`);
231
+ if (owned) await page.close().catch(() => {});
232
+ return { content: text, model, finishReason: "stop" };
233
+ }
234
+ } else {
235
+ stableCount = 0;
236
+ lastText = text;
237
+ }
238
+ }
239
+
240
+ if (owned) await page.close().catch(() => {});
241
+ throw new Error(`gemini.google.com stream timeout after ${timeoutMs}ms`);
242
+ }
@@ -14,6 +14,8 @@ import { type ChatMessage, routeToCliRunner } from "./cli-runner.js";
14
14
  import { scheduleTokenRefresh, setAuthLogger, stopTokenRefresh } from "./claude-auth.js";
15
15
  import { grokComplete, grokCompleteStream, type ChatMessage as GrokChatMessage } from "./grok-client.js";
16
16
  import { claudeComplete, claudeCompleteStream, type ChatMessage as ClaudeBrowserChatMessage } from "./claude-browser.js";
17
+ import { geminiComplete, geminiCompleteStream, type ChatMessage as GeminiBrowserChatMessage } from "./gemini-browser.js";
18
+ import { chatgptComplete, chatgptCompleteStream, type ChatMessage as ChatGPTBrowserChatMessage } from "./chatgpt-browser.js";
17
19
  import type { BrowserContext } from "playwright";
18
20
 
19
21
  export type GrokCompleteOptions = Parameters<typeof grokComplete>[1];
@@ -42,6 +44,22 @@ export interface ProxyServerOptions {
42
44
  _claudeComplete?: typeof claudeComplete;
43
45
  /** Override for testing — replaces claudeCompleteStream */
44
46
  _claudeCompleteStream?: typeof claudeCompleteStream;
47
+ /** Returns the current authenticated Gemini BrowserContext (null if not logged in) */
48
+ getGeminiContext?: () => BrowserContext | null;
49
+ /** Async lazy connect — called when getGeminiContext returns null */
50
+ connectGeminiContext?: () => Promise<BrowserContext | null>;
51
+ /** Override for testing — replaces geminiComplete */
52
+ _geminiComplete?: typeof geminiComplete;
53
+ /** Override for testing — replaces geminiCompleteStream */
54
+ _geminiCompleteStream?: typeof geminiCompleteStream;
55
+ /** Returns the current authenticated ChatGPT BrowserContext */
56
+ getChatGPTContext?: () => BrowserContext | null;
57
+ /** Async lazy connect for ChatGPT */
58
+ connectChatGPTContext?: () => Promise<BrowserContext | null>;
59
+ /** Override for testing */
60
+ _chatgptComplete?: typeof chatgptComplete;
61
+ /** Override for testing */
62
+ _chatgptCompleteStream?: typeof chatgptCompleteStream;
45
63
  }
46
64
 
47
65
  /** Available CLI bridge models for GET /v1/models */
@@ -91,6 +109,17 @@ export const CLI_MODELS = [
91
109
  { id: "web-claude/claude-sonnet", name: "Claude Sonnet (web session)", contextWindow: 200_000, maxTokens: 8192 },
92
110
  { id: "web-claude/claude-opus", name: "Claude Opus (web session)", contextWindow: 200_000, maxTokens: 8192 },
93
111
  { id: "web-claude/claude-haiku", name: "Claude Haiku (web session)", contextWindow: 200_000, maxTokens: 8192 },
112
+ // Gemini web-session models (requires /gemini-login)
113
+ { id: "web-gemini/gemini-2-5-pro", name: "Gemini 2.5 Pro (web session)", contextWindow: 1_000_000, maxTokens: 8192 },
114
+ { id: "web-gemini/gemini-2-5-flash", name: "Gemini 2.5 Flash (web session)", contextWindow: 1_000_000, maxTokens: 8192 },
115
+ { id: "web-gemini/gemini-3-pro", name: "Gemini 3 Pro (web session)", contextWindow: 1_000_000, maxTokens: 8192 },
116
+ { id: "web-gemini/gemini-3-flash", name: "Gemini 3 Flash (web session)", contextWindow: 1_000_000, maxTokens: 8192 },
117
+ // ChatGPT web-session models (requires /chatgpt-login)
118
+ { id: "web-chatgpt/gpt-4o", name: "GPT-4o (web session)", contextWindow: 128_000, maxTokens: 16_384 },
119
+ { id: "web-chatgpt/gpt-4o-mini", name: "GPT-4o Mini (web session)", contextWindow: 128_000, maxTokens: 16_384 },
120
+ { id: "web-chatgpt/gpt-o3", name: "o3 (web session)", contextWindow: 200_000, maxTokens: 100_000 },
121
+ { id: "web-chatgpt/gpt-o4-mini", name: "o4-mini (web session)", contextWindow: 200_000, maxTokens: 100_000 },
122
+ { id: "web-chatgpt/gpt-5", name: "GPT-5 (web session)", contextWindow: 1_000_000, maxTokens: 32_768 },
94
123
  ];
95
124
 
96
125
  // ──────────────────────────────────────────────────────────────────────────────
@@ -311,6 +340,92 @@ async function handleRequest(
311
340
  }
312
341
  // ─────────────────────────────────────────────────────────────────────────
313
342
 
343
+ // ── Gemini web-session routing ────────────────────────────────────────────
344
+ if (model.startsWith("web-gemini/")) {
345
+ let geminiCtx = opts.getGeminiContext?.() ?? null;
346
+ if (!geminiCtx && opts.connectGeminiContext) {
347
+ geminiCtx = await opts.connectGeminiContext();
348
+ }
349
+ if (!geminiCtx) {
350
+ res.writeHead(503, { "Content-Type": "application/json" });
351
+ res.end(JSON.stringify({ error: { message: "No active gemini.google.com session. Use /gemini-login to authenticate.", code: "no_gemini_session" } }));
352
+ return;
353
+ }
354
+ const timeoutMs = opts.timeoutMs ?? 120_000;
355
+ const geminiMessages = messages as GeminiBrowserChatMessage[];
356
+ const doGeminiComplete = opts._geminiComplete ?? geminiComplete;
357
+ const doGeminiCompleteStream = opts._geminiCompleteStream ?? geminiCompleteStream;
358
+ try {
359
+ if (stream) {
360
+ res.writeHead(200, { "Content-Type": "text/event-stream", "Cache-Control": "no-cache", Connection: "keep-alive", ...corsHeaders() });
361
+ sendSseChunk(res, { id, created, model, delta: { role: "assistant" }, finish_reason: null });
362
+ const result = await doGeminiCompleteStream(
363
+ geminiCtx,
364
+ { messages: geminiMessages, model, timeoutMs },
365
+ (token) => sendSseChunk(res, { id, created, model, delta: { content: token }, finish_reason: null }),
366
+ opts.log
367
+ );
368
+ sendSseChunk(res, { id, created, model, delta: {}, finish_reason: result.finishReason });
369
+ res.write("data: [DONE]\n\n");
370
+ res.end();
371
+ } else {
372
+ const result = await doGeminiComplete(geminiCtx, { messages: geminiMessages, model, timeoutMs }, opts.log);
373
+ res.writeHead(200, { "Content-Type": "application/json", ...corsHeaders() });
374
+ res.end(JSON.stringify({
375
+ id, object: "chat.completion", created, model,
376
+ choices: [{ index: 0, message: { role: "assistant", content: result.content }, finish_reason: result.finishReason }],
377
+ usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
378
+ }));
379
+ }
380
+ } catch (err) {
381
+ const msg = (err as Error).message;
382
+ opts.warn(`[cli-bridge] Gemini browser error for ${model}: ${msg}`);
383
+ if (!res.headersSent) {
384
+ res.writeHead(500, { "Content-Type": "application/json" });
385
+ res.end(JSON.stringify({ error: { message: msg, type: "gemini_browser_error" } }));
386
+ }
387
+ }
388
+ return;
389
+ }
390
+ // ─────────────────────────────────────────────────────────────────────────
391
+
392
+ // ── ChatGPT web-session routing ───────────────────────────────────────────
393
+ if (model.startsWith("web-chatgpt/")) {
394
+ let chatgptCtx = opts.getChatGPTContext?.() ?? null;
395
+ if (!chatgptCtx && opts.connectChatGPTContext) chatgptCtx = await opts.connectChatGPTContext();
396
+ if (!chatgptCtx) {
397
+ res.writeHead(503, { "Content-Type": "application/json" });
398
+ res.end(JSON.stringify({ error: { message: "No active chatgpt.com session. Use /chatgpt-login to authenticate.", code: "no_chatgpt_session" } }));
399
+ return;
400
+ }
401
+ const timeoutMs = opts.timeoutMs ?? 120_000;
402
+ const msgs = messages as ChatGPTBrowserChatMessage[];
403
+ const doComplete = opts._chatgptComplete ?? chatgptComplete;
404
+ const doStream = opts._chatgptCompleteStream ?? chatgptCompleteStream;
405
+ try {
406
+ if (stream) {
407
+ res.writeHead(200, { "Content-Type": "text/event-stream", "Cache-Control": "no-cache", Connection: "keep-alive", ...corsHeaders() });
408
+ sendSseChunk(res, { id, created, model, delta: { role: "assistant" }, finish_reason: null });
409
+ const result = await doStream(chatgptCtx, { messages: msgs, model, timeoutMs },
410
+ (token) => sendSseChunk(res, { id, created, model, delta: { content: token }, finish_reason: null }), opts.log);
411
+ sendSseChunk(res, { id, created, model, delta: {}, finish_reason: result.finishReason });
412
+ res.write("data: [DONE]\n\n"); res.end();
413
+ } else {
414
+ const result = await doComplete(chatgptCtx, { messages: msgs, model, timeoutMs }, opts.log);
415
+ res.writeHead(200, { "Content-Type": "application/json", ...corsHeaders() });
416
+ res.end(JSON.stringify({ id, object: "chat.completion", created, model,
417
+ choices: [{ index: 0, message: { role: "assistant", content: result.content }, finish_reason: result.finishReason }],
418
+ usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 } }));
419
+ }
420
+ } catch (err) {
421
+ const msg = (err as Error).message;
422
+ opts.warn(`[cli-bridge] ChatGPT browser error for ${model}: ${msg}`);
423
+ if (!res.headersSent) { res.writeHead(500, { "Content-Type": "application/json" }); res.end(JSON.stringify({ error: { message: msg, type: "chatgpt_browser_error" } })); }
424
+ }
425
+ return;
426
+ }
427
+ // ─────────────────────────────────────────────────────────────────────────
428
+
314
429
  // ── CLI runner routing (Gemini / Claude Code) ─────────────────────────────
315
430
  let content: string;
316
431
  try {