opencode-cursor-oauth 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/proxy.js ADDED
@@ -0,0 +1,992 @@
1
+ /**
2
+ * Local OpenAI-compatible proxy that translates requests to Cursor's gRPC protocol.
3
+ *
4
+ * Accepts POST /v1/chat/completions in OpenAI format, translates to Cursor's
5
+ * protobuf/HTTP2 Connect protocol, and streams back OpenAI-format SSE.
6
+ *
7
+ * Tool calling uses Cursor's native MCP tool protocol:
8
+ * - OpenAI tool defs → McpToolDefinition in RequestContext
9
+ * - Cursor toolCallStarted/Delta/Completed → OpenAI tool_calls SSE chunks
10
+ * - mcpArgs exec → pause stream, return tool_calls to caller
11
+ * - Follow-up request with tool results → resume bridge with mcpResult
12
+ *
13
+ * HTTP/2 transport is delegated to a Node child process (h2-bridge.mjs)
14
+ * because Bun's node:http2 module is broken.
15
+ */
16
+ import { create, fromBinary, fromJson, toBinary, toJson } from "@bufbuild/protobuf";
17
+ import { ValueSchema } from "@bufbuild/protobuf/wkt";
18
+ import { AgentClientMessageSchema, AgentRunRequestSchema, AgentServerMessageSchema, ClientHeartbeatSchema, ConversationActionSchema, ConversationStateStructureSchema, ConversationStepSchema, AgentConversationTurnStructureSchema, ConversationTurnStructureSchema, AssistantMessageSchema, BackgroundShellSpawnResultSchema, DeleteResultSchema, DeleteRejectedSchema, DiagnosticsResultSchema, ExecClientMessageSchema, FetchErrorSchema, FetchResultSchema, GetBlobResultSchema, GrepErrorSchema, GrepResultSchema, KvClientMessageSchema, LsRejectedSchema, LsResultSchema, McpErrorSchema, McpResultSchema, McpSuccessSchema, McpTextContentSchema, McpToolDefinitionSchema, McpToolResultContentItemSchema, ModelDetailsSchema, ReadRejectedSchema, ReadResultSchema, RequestContextResultSchema, RequestContextSchema, RequestContextSuccessSchema, SetBlobResultSchema, ShellRejectedSchema, ShellResultSchema, UserMessageActionSchema, UserMessageSchema, WriteRejectedSchema, WriteResultSchema, WriteShellStdinErrorSchema, WriteShellStdinResultSchema, } from "./proto/agent_pb";
19
+ import { createHash } from "node:crypto";
20
+ import { resolve as pathResolve } from "node:path";
21
+ const CURSOR_API_URL = "https://api2.cursor.sh";
22
+ const CONNECT_END_STREAM_FLAG = 0b00000010;
23
+ const BRIDGE_PATH = pathResolve(import.meta.dir, "h2-bridge.mjs");
24
+ // Active bridges keyed by a session token (derived from conversation state).
25
+ // When tool_calls are returned, the bridge stays alive. The next request
26
+ // with tool results looks up the bridge and sends mcpResult messages.
27
+ const activeBridges = new Map();
28
+ // --- H2 Bridge IPC ---
29
+ /** Length-prefix a message: [4-byte BE length][payload] */
30
+ function lpEncode(data) {
31
+ const buf = Buffer.alloc(4 + data.length);
32
+ buf.writeUInt32BE(data.length, 0);
33
+ buf.set(data, 4);
34
+ return buf;
35
+ }
36
+ /** Connect protocol frame: [1-byte flags][4-byte BE length][payload] */
37
+ function frameConnectMessage(data, flags = 0) {
38
+ const frame = Buffer.alloc(5 + data.length);
39
+ frame[0] = flags;
40
+ frame.writeUInt32BE(data.length, 1);
41
+ frame.set(data, 5);
42
+ return frame;
43
+ }
44
+ /**
45
+ * Spawn the Node H2 bridge and return read/write handles.
46
+ * The bridge uses length-prefixed framing on stdin/stdout.
47
+ */
48
+ function spawnBridge(accessToken) {
49
+ const proc = Bun.spawn(["node", BRIDGE_PATH], {
50
+ stdin: "pipe",
51
+ stdout: "pipe",
52
+ stderr: "ignore",
53
+ });
54
+ const config = JSON.stringify({
55
+ accessToken,
56
+ url: CURSOR_API_URL,
57
+ path: "/agent.v1.AgentService/Run",
58
+ });
59
+ proc.stdin.write(lpEncode(new TextEncoder().encode(config)));
60
+ const cbs = {
61
+ data: null,
62
+ close: null,
63
+ };
64
+ (async () => {
65
+ const reader = proc.stdout.getReader();
66
+ let pending = Buffer.alloc(0);
67
+ try {
68
+ while (true) {
69
+ const { done, value } = await reader.read();
70
+ if (done)
71
+ break;
72
+ pending = Buffer.concat([pending, Buffer.from(value)]);
73
+ while (pending.length >= 4) {
74
+ const len = pending.readUInt32BE(0);
75
+ if (pending.length < 4 + len)
76
+ break;
77
+ const payload = pending.subarray(4, 4 + len);
78
+ pending = pending.subarray(4 + len);
79
+ cbs.data?.(Buffer.from(payload));
80
+ }
81
+ }
82
+ }
83
+ catch {
84
+ // Stream ended
85
+ }
86
+ proc.exited.then((code) => cbs.close?.(code ?? 1));
87
+ })();
88
+ return {
89
+ proc,
90
+ write(data) {
91
+ try {
92
+ proc.stdin.write(lpEncode(data));
93
+ }
94
+ catch { }
95
+ },
96
+ end() {
97
+ try {
98
+ proc.stdin.write(lpEncode(new Uint8Array(0)));
99
+ proc.stdin.end();
100
+ }
101
+ catch { }
102
+ },
103
+ onData(cb) { cbs.data = cb; },
104
+ onClose(cb) { cbs.close = cb; },
105
+ };
106
+ }
107
+ // --- Proxy Server ---
108
+ let proxyServer;
109
+ let proxyPort;
110
+ export function getProxyPort() {
111
+ return proxyPort;
112
+ }
113
+ export async function startProxy(getAccessToken) {
114
+ if (proxyServer && proxyPort)
115
+ return proxyPort;
116
+ proxyServer = Bun.serve({
117
+ port: 0,
118
+ idleTimeout: 255, // max — Cursor responses can take 30s+
119
+ async fetch(req) {
120
+ const url = new URL(req.url);
121
+ if (req.method === "GET" && url.pathname === "/v1/models") {
122
+ return new Response(JSON.stringify({ object: "list", data: [] }), { headers: { "Content-Type": "application/json" } });
123
+ }
124
+ if (req.method === "POST" && url.pathname === "/v1/chat/completions") {
125
+ try {
126
+ const body = (await req.json());
127
+ const accessToken = await getAccessToken();
128
+ return handleChatCompletion(body, accessToken);
129
+ }
130
+ catch (err) {
131
+ const message = err instanceof Error ? err.message : String(err);
132
+ return new Response(JSON.stringify({
133
+ error: { message, type: "server_error", code: "internal_error" },
134
+ }), { status: 500, headers: { "Content-Type": "application/json" } });
135
+ }
136
+ }
137
+ return new Response("Not Found", { status: 404 });
138
+ },
139
+ });
140
+ proxyPort = proxyServer.port;
141
+ if (!proxyPort)
142
+ throw new Error("Failed to bind proxy to a port");
143
+ return proxyPort;
144
+ }
145
+ export function stopProxy() {
146
+ if (proxyServer) {
147
+ proxyServer.stop();
148
+ proxyServer = undefined;
149
+ proxyPort = undefined;
150
+ }
151
+ // Clean up any lingering bridges
152
+ for (const [key, active] of activeBridges) {
153
+ clearInterval(active.heartbeatTimer);
154
+ active.bridge.end();
155
+ activeBridges.delete(key);
156
+ }
157
+ }
158
+ // --- Chat Completion Handler ---
159
+ function handleChatCompletion(body, accessToken) {
160
+ const { systemPrompt, userText, turns, toolResults } = parseMessages(body.messages);
161
+ const modelId = body.model;
162
+ const tools = body.tools ?? [];
163
+ if (!userText && toolResults.length === 0) {
164
+ return new Response(JSON.stringify({
165
+ error: {
166
+ message: "No user message found",
167
+ type: "invalid_request_error",
168
+ },
169
+ }), { status: 400, headers: { "Content-Type": "application/json" } });
170
+ }
171
+ // Check for an active bridge waiting for tool results
172
+ const bridgeKey = deriveBridgeKey(modelId, body.messages);
173
+ const activeBridge = activeBridges.get(bridgeKey);
174
+ if (activeBridge && toolResults.length > 0) {
175
+ // Resume an existing bridge with tool results
176
+ activeBridges.delete(bridgeKey);
177
+ return handleToolResultResume(activeBridge, toolResults, modelId, tools, accessToken, bridgeKey);
178
+ }
179
+ // Clean up stale bridge if present
180
+ if (activeBridge) {
181
+ clearInterval(activeBridge.heartbeatTimer);
182
+ activeBridge.bridge.end();
183
+ activeBridges.delete(bridgeKey);
184
+ }
185
+ const mcpTools = buildMcpToolDefinitions(tools);
186
+ const payload = buildCursorRequest(modelId, systemPrompt, userText, turns);
187
+ payload.mcpTools = mcpTools;
188
+ if (body.stream === false) {
189
+ return handleNonStreamingResponse(payload, accessToken, modelId);
190
+ }
191
+ return handleStreamingResponse(payload, accessToken, modelId, bridgeKey);
192
+ }
193
+ function parseMessages(messages) {
194
+ let systemPrompt = "You are a helpful assistant.";
195
+ const pairs = [];
196
+ const toolResults = [];
197
+ // Collect system messages
198
+ const systemParts = messages
199
+ .filter((m) => m.role === "system")
200
+ .map((m) => m.content ?? "");
201
+ if (systemParts.length > 0) {
202
+ systemPrompt = systemParts.join("\n");
203
+ }
204
+ // Separate tool results from conversation turns
205
+ const nonSystem = messages.filter((m) => m.role !== "system");
206
+ let pendingUser = "";
207
+ for (const msg of nonSystem) {
208
+ if (msg.role === "tool") {
209
+ toolResults.push({
210
+ toolCallId: msg.tool_call_id ?? "",
211
+ content: msg.content ?? "",
212
+ });
213
+ }
214
+ else if (msg.role === "user") {
215
+ if (pendingUser) {
216
+ pairs.push({ userText: pendingUser, assistantText: "" });
217
+ }
218
+ pendingUser = msg.content ?? "";
219
+ }
220
+ else if (msg.role === "assistant") {
221
+ // Skip assistant messages that are just tool_calls with no text
222
+ const text = msg.content ?? "";
223
+ if (pendingUser) {
224
+ pairs.push({ userText: pendingUser, assistantText: text });
225
+ pendingUser = "";
226
+ }
227
+ }
228
+ }
229
+ let lastUserText = "";
230
+ if (pendingUser) {
231
+ lastUserText = pendingUser;
232
+ }
233
+ else if (pairs.length > 0 && toolResults.length === 0) {
234
+ const last = pairs.pop();
235
+ lastUserText = last.userText;
236
+ }
237
+ return { systemPrompt, userText: lastUserText, turns: pairs, toolResults };
238
+ }
239
+ // --- MCP Tool Definitions ---
240
+ /** Convert OpenAI tool definitions to Cursor's MCP tool protobuf format. */
241
+ function buildMcpToolDefinitions(tools) {
242
+ return tools.map((t) => {
243
+ const fn = t.function;
244
+ const jsonSchema = fn.parameters && typeof fn.parameters === "object"
245
+ ? fn.parameters
246
+ : { type: "object", properties: {}, required: [] };
247
+ const inputSchema = toBinary(ValueSchema, fromJson(ValueSchema, jsonSchema));
248
+ return create(McpToolDefinitionSchema, {
249
+ name: fn.name,
250
+ description: fn.description || "",
251
+ providerIdentifier: "opencode",
252
+ toolName: fn.name,
253
+ inputSchema,
254
+ });
255
+ });
256
+ }
257
+ /** Decode a Cursor MCP arg value (protobuf Value bytes) to a JS value. */
258
+ function decodeMcpArgValue(value) {
259
+ try {
260
+ const parsed = fromBinary(ValueSchema, value);
261
+ return toJson(ValueSchema, parsed);
262
+ }
263
+ catch { }
264
+ return new TextDecoder().decode(value);
265
+ }
266
+ /** Decode a map of MCP arg values. */
267
+ function decodeMcpArgsMap(args) {
268
+ const decoded = {};
269
+ for (const [key, value] of Object.entries(args)) {
270
+ decoded[key] = decodeMcpArgValue(value);
271
+ }
272
+ return decoded;
273
+ }
274
+ // --- gRPC Request Building ---
275
+ function buildCursorRequest(modelId, systemPrompt, userText, turns) {
276
+ const blobStore = new Map();
277
+ const turnBytes = [];
278
+ for (const turn of turns) {
279
+ const userMsg = create(UserMessageSchema, {
280
+ text: turn.userText,
281
+ messageId: crypto.randomUUID(),
282
+ });
283
+ const userMsgBytes = toBinary(UserMessageSchema, userMsg);
284
+ const stepBytes = [];
285
+ if (turn.assistantText) {
286
+ const step = create(ConversationStepSchema, {
287
+ message: {
288
+ case: "assistantMessage",
289
+ value: create(AssistantMessageSchema, { text: turn.assistantText }),
290
+ },
291
+ });
292
+ stepBytes.push(toBinary(ConversationStepSchema, step));
293
+ }
294
+ const agentTurn = create(AgentConversationTurnStructureSchema, {
295
+ userMessage: userMsgBytes,
296
+ steps: stepBytes,
297
+ });
298
+ const turnStructure = create(ConversationTurnStructureSchema, {
299
+ turn: { case: "agentConversationTurn", value: agentTurn },
300
+ });
301
+ turnBytes.push(toBinary(ConversationTurnStructureSchema, turnStructure));
302
+ }
303
+ // System prompt → blob store (Cursor requests it back via KV handshake)
304
+ const systemJson = JSON.stringify({ role: "system", content: systemPrompt });
305
+ const systemBytes = new TextEncoder().encode(systemJson);
306
+ const systemBlobId = new Uint8Array(createHash("sha256").update(systemBytes).digest());
307
+ blobStore.set(Buffer.from(systemBlobId).toString("hex"), systemBytes);
308
+ const conversationState = create(ConversationStateStructureSchema, {
309
+ rootPromptMessagesJson: [systemBlobId],
310
+ turns: turnBytes,
311
+ todos: [],
312
+ pendingToolCalls: [],
313
+ previousWorkspaceUris: [],
314
+ fileStates: {},
315
+ fileStatesV2: {},
316
+ summaryArchives: [],
317
+ turnTimings: [],
318
+ subagentStates: {},
319
+ selfSummaryCount: 0,
320
+ readPaths: [],
321
+ });
322
+ const userMessage = create(UserMessageSchema, {
323
+ text: userText,
324
+ messageId: crypto.randomUUID(),
325
+ });
326
+ const action = create(ConversationActionSchema, {
327
+ action: {
328
+ case: "userMessageAction",
329
+ value: create(UserMessageActionSchema, { userMessage }),
330
+ },
331
+ });
332
+ const modelDetails = create(ModelDetailsSchema, {
333
+ modelId,
334
+ displayModelId: modelId,
335
+ displayName: modelId,
336
+ });
337
+ const runRequest = create(AgentRunRequestSchema, {
338
+ conversationState,
339
+ action,
340
+ modelDetails,
341
+ conversationId: crypto.randomUUID(),
342
+ });
343
+ const clientMessage = create(AgentClientMessageSchema, {
344
+ message: { case: "runRequest", value: runRequest },
345
+ });
346
+ return {
347
+ requestBytes: toBinary(AgentClientMessageSchema, clientMessage),
348
+ blobStore,
349
+ mcpTools: [],
350
+ };
351
+ }
352
+ // --- Connect Protocol Helpers ---
353
+ function parseConnectEndStream(data) {
354
+ try {
355
+ const payload = JSON.parse(new TextDecoder().decode(data));
356
+ const error = payload?.error;
357
+ if (error) {
358
+ const code = typeof error.code === "string" ? error.code : "unknown";
359
+ const message = typeof error.message === "string" ? error.message : "Unknown error";
360
+ return new Error(`Connect error ${code}: ${message}`);
361
+ }
362
+ return null;
363
+ }
364
+ catch {
365
+ return new Error("Failed to parse Connect end stream");
366
+ }
367
+ }
368
+ function makeHeartbeatBytes() {
369
+ const heartbeat = create(AgentClientMessageSchema, {
370
+ message: {
371
+ case: "clientHeartbeat",
372
+ value: create(ClientHeartbeatSchema, {}),
373
+ },
374
+ });
375
+ return frameConnectMessage(toBinary(AgentClientMessageSchema, heartbeat));
376
+ }
377
+ function processServerMessage(msg, blobStore, mcpTools, sendFrame, state, onText, onMcpExec) {
378
+ const msgCase = msg.message.case;
379
+ if (msgCase === "interactionUpdate") {
380
+ handleInteractionUpdate(msg.message.value, onText);
381
+ }
382
+ else if (msgCase === "kvServerMessage") {
383
+ handleKvMessage(msg.message.value, blobStore, sendFrame);
384
+ }
385
+ else if (msgCase === "execServerMessage") {
386
+ handleExecMessage(msg.message.value, mcpTools, sendFrame, onMcpExec);
387
+ }
388
+ }
389
+ /**
390
+ * Handle interaction updates — text and thinking only.
391
+ * MCP tool calls are handled entirely via mcpArgs exec messages,
392
+ * not through interaction update callbacks.
393
+ */
394
+ function handleInteractionUpdate(update, onText) {
395
+ const updateCase = update.message?.case;
396
+ if (updateCase === "textDelta") {
397
+ const delta = update.message.value.text || "";
398
+ if (delta)
399
+ onText(delta, false);
400
+ }
401
+ else if (updateCase === "thinkingDelta") {
402
+ const delta = update.message.value.text || "";
403
+ if (delta)
404
+ onText(delta, true);
405
+ }
406
+ // toolCallStarted, partialToolCall, toolCallDelta, toolCallCompleted
407
+ // are intentionally ignored. MCP tool calls flow through the exec
408
+ // message path (mcpArgs → mcpResult), not interaction updates.
409
+ }
410
+ function handleKvMessage(kvMsg, blobStore, sendFrame) {
411
+ const kvCase = kvMsg.message.case;
412
+ if (kvCase === "getBlobArgs") {
413
+ const blobId = kvMsg.message.value.blobId;
414
+ const blobIdKey = Buffer.from(blobId).toString("hex");
415
+ const blobData = blobStore.get(blobIdKey);
416
+ const response = create(KvClientMessageSchema, {
417
+ id: kvMsg.id,
418
+ message: {
419
+ case: "getBlobResult",
420
+ value: create(GetBlobResultSchema, blobData ? { blobData } : {}),
421
+ },
422
+ });
423
+ const clientMsg = create(AgentClientMessageSchema, {
424
+ message: { case: "kvClientMessage", value: response },
425
+ });
426
+ sendFrame(frameConnectMessage(toBinary(AgentClientMessageSchema, clientMsg)));
427
+ }
428
+ else if (kvCase === "setBlobArgs") {
429
+ const { blobId, blobData } = kvMsg.message.value;
430
+ blobStore.set(Buffer.from(blobId).toString("hex"), blobData);
431
+ const response = create(KvClientMessageSchema, {
432
+ id: kvMsg.id,
433
+ message: {
434
+ case: "setBlobResult",
435
+ value: create(SetBlobResultSchema, {}),
436
+ },
437
+ });
438
+ const clientMsg = create(AgentClientMessageSchema, {
439
+ message: { case: "kvClientMessage", value: response },
440
+ });
441
+ sendFrame(frameConnectMessage(toBinary(AgentClientMessageSchema, clientMsg)));
442
+ }
443
+ }
444
+ function handleExecMessage(execMsg, mcpTools, sendFrame, onMcpExec) {
445
+ const execCase = execMsg.message.case;
446
+ if (execCase === "requestContextArgs") {
447
+ const requestContext = create(RequestContextSchema, {
448
+ rules: [],
449
+ repositoryInfo: [],
450
+ tools: mcpTools,
451
+ gitRepos: [],
452
+ projectLayouts: [],
453
+ mcpInstructions: [],
454
+ fileContents: {},
455
+ customSubagents: [],
456
+ });
457
+ const result = create(RequestContextResultSchema, {
458
+ result: {
459
+ case: "success",
460
+ value: create(RequestContextSuccessSchema, { requestContext }),
461
+ },
462
+ });
463
+ sendExecResult(execMsg, "requestContextResult", result, sendFrame);
464
+ return;
465
+ }
466
+ if (execCase === "mcpArgs") {
467
+ const mcpArgs = execMsg.message.value;
468
+ const decoded = decodeMcpArgsMap(mcpArgs.args ?? {});
469
+ onMcpExec({
470
+ execId: execMsg.execId,
471
+ execMsgId: execMsg.id,
472
+ toolCallId: mcpArgs.toolCallId || crypto.randomUUID(),
473
+ toolName: mcpArgs.toolName || mcpArgs.name,
474
+ decodedArgs: JSON.stringify(decoded),
475
+ });
476
+ return;
477
+ }
478
+ // --- Reject native Cursor tools ---
479
+ // The model tries these first. We must respond with rejection/error
480
+ // so it falls back to our MCP tools (registered via RequestContext).
481
+ const REJECT_REASON = "Tool not available in this environment. Use the MCP tools provided instead.";
482
+ if (execCase === "readArgs") {
483
+ const args = execMsg.message.value;
484
+ const result = create(ReadResultSchema, {
485
+ result: { case: "rejected", value: create(ReadRejectedSchema, { path: args.path, reason: REJECT_REASON }) },
486
+ });
487
+ sendExecResult(execMsg, "readResult", result, sendFrame);
488
+ return;
489
+ }
490
+ if (execCase === "lsArgs") {
491
+ const args = execMsg.message.value;
492
+ const result = create(LsResultSchema, {
493
+ result: { case: "rejected", value: create(LsRejectedSchema, { path: args.path, reason: REJECT_REASON }) },
494
+ });
495
+ sendExecResult(execMsg, "lsResult", result, sendFrame);
496
+ return;
497
+ }
498
+ if (execCase === "grepArgs") {
499
+ const result = create(GrepResultSchema, {
500
+ result: { case: "error", value: create(GrepErrorSchema, { error: REJECT_REASON }) },
501
+ });
502
+ sendExecResult(execMsg, "grepResult", result, sendFrame);
503
+ return;
504
+ }
505
+ if (execCase === "writeArgs") {
506
+ const args = execMsg.message.value;
507
+ const result = create(WriteResultSchema, {
508
+ result: { case: "rejected", value: create(WriteRejectedSchema, { path: args.path, reason: REJECT_REASON }) },
509
+ });
510
+ sendExecResult(execMsg, "writeResult", result, sendFrame);
511
+ return;
512
+ }
513
+ if (execCase === "deleteArgs") {
514
+ const args = execMsg.message.value;
515
+ const result = create(DeleteResultSchema, {
516
+ result: { case: "rejected", value: create(DeleteRejectedSchema, { path: args.path, reason: REJECT_REASON }) },
517
+ });
518
+ sendExecResult(execMsg, "deleteResult", result, sendFrame);
519
+ return;
520
+ }
521
+ if (execCase === "shellArgs" || execCase === "shellStreamArgs") {
522
+ const args = execMsg.message.value;
523
+ const result = create(ShellResultSchema, {
524
+ result: {
525
+ case: "rejected",
526
+ value: create(ShellRejectedSchema, {
527
+ command: args.command ?? "",
528
+ workingDirectory: args.workingDirectory ?? "",
529
+ reason: REJECT_REASON,
530
+ isReadonly: false,
531
+ }),
532
+ },
533
+ });
534
+ sendExecResult(execMsg, "shellResult", result, sendFrame);
535
+ return;
536
+ }
537
+ if (execCase === "backgroundShellSpawnArgs") {
538
+ const args = execMsg.message.value;
539
+ const result = create(BackgroundShellSpawnResultSchema, {
540
+ result: {
541
+ case: "rejected",
542
+ value: create(ShellRejectedSchema, {
543
+ command: args.command ?? "",
544
+ workingDirectory: args.workingDirectory ?? "",
545
+ reason: REJECT_REASON,
546
+ isReadonly: false,
547
+ }),
548
+ },
549
+ });
550
+ sendExecResult(execMsg, "backgroundShellSpawnResult", result, sendFrame);
551
+ return;
552
+ }
553
+ if (execCase === "writeShellStdinArgs") {
554
+ const result = create(WriteShellStdinResultSchema, {
555
+ result: { case: "error", value: create(WriteShellStdinErrorSchema, { error: REJECT_REASON }) },
556
+ });
557
+ sendExecResult(execMsg, "writeShellStdinResult", result, sendFrame);
558
+ return;
559
+ }
560
+ if (execCase === "fetchArgs") {
561
+ const args = execMsg.message.value;
562
+ const result = create(FetchResultSchema, {
563
+ result: { case: "error", value: create(FetchErrorSchema, { url: args.url ?? "", error: REJECT_REASON }) },
564
+ });
565
+ sendExecResult(execMsg, "fetchResult", result, sendFrame);
566
+ return;
567
+ }
568
+ if (execCase === "diagnosticsArgs") {
569
+ const result = create(DiagnosticsResultSchema, {});
570
+ sendExecResult(execMsg, "diagnosticsResult", result, sendFrame);
571
+ return;
572
+ }
573
+ // MCP resource/screen/computer exec types
574
+ const miscCaseMap = {
575
+ listMcpResourcesExecArgs: "listMcpResourcesExecResult",
576
+ readMcpResourceExecArgs: "readMcpResourceExecResult",
577
+ recordScreenArgs: "recordScreenResult",
578
+ computerUseArgs: "computerUseResult",
579
+ };
580
+ const resultCase = miscCaseMap[execCase];
581
+ if (resultCase) {
582
+ sendExecResult(execMsg, resultCase, create(McpResultSchema, {}), sendFrame);
583
+ return;
584
+ }
585
+ // Unknown exec type — log and ignore
586
+ console.error(`[proxy] unhandled exec: ${execCase}`);
587
+ }
588
+ /** Send an exec client message back to Cursor. */
589
+ function sendExecResult(execMsg, messageCase, value, sendFrame) {
590
+ const execClientMessage = create(ExecClientMessageSchema, {
591
+ id: execMsg.id,
592
+ execId: execMsg.execId,
593
+ message: { case: messageCase, value: value },
594
+ });
595
+ const clientMessage = create(AgentClientMessageSchema, {
596
+ message: { case: "execClientMessage", value: execClientMessage },
597
+ });
598
+ sendFrame(frameConnectMessage(toBinary(AgentClientMessageSchema, clientMessage)));
599
+ }
600
+ // --- Bridge Key ---
601
+ /** Derive a stable key to associate a bridge with a conversation. */
602
+ function deriveBridgeKey(modelId, messages) {
603
+ // Use a hash of the first user message + model as a stable key.
604
+ // This is imperfect but sufficient for single-session use.
605
+ const firstUser = messages.find((m) => m.role === "user")?.content ?? "";
606
+ return createHash("sha256")
607
+ .update(`${modelId}:${firstUser.slice(0, 200)}`)
608
+ .digest("hex")
609
+ .slice(0, 16);
610
+ }
611
+ // --- Streaming Handler ---
612
+ function handleStreamingResponse(payload, accessToken, modelId, bridgeKey) {
613
+ const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
614
+ const created = Math.floor(Date.now() / 1000);
615
+ const stream = new ReadableStream({
616
+ start(controller) {
617
+ const encoder = new TextEncoder();
618
+ let closed = false;
619
+ const sendSSE = (data) => {
620
+ if (closed)
621
+ return;
622
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(data)}\n\n`));
623
+ };
624
+ const sendDone = () => {
625
+ if (closed)
626
+ return;
627
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
628
+ };
629
+ const closeController = () => {
630
+ if (closed)
631
+ return;
632
+ closed = true;
633
+ controller.close();
634
+ };
635
+ const makeChunk = (delta, finishReason = null) => ({
636
+ id: completionId,
637
+ object: "chat.completion.chunk",
638
+ created,
639
+ model: modelId,
640
+ choices: [{ index: 0, delta, finish_reason: finishReason }],
641
+ });
642
+ const state = {
643
+ thinkingActive: false,
644
+ toolCallIndex: 0,
645
+ pendingExecs: [],
646
+ };
647
+ let mcpExecReceived = false;
648
+ const bridge = spawnBridge(accessToken);
649
+ bridge.write(frameConnectMessage(payload.requestBytes));
650
+ const heartbeatTimer = setInterval(() => {
651
+ bridge.write(makeHeartbeatBytes());
652
+ }, 5_000);
653
+ let pendingBuffer = Buffer.alloc(0);
654
+ const processChunk = (incoming) => {
655
+ pendingBuffer = Buffer.concat([pendingBuffer, incoming]);
656
+ while (pendingBuffer.length >= 5) {
657
+ const flags = pendingBuffer[0];
658
+ const msgLen = pendingBuffer.readUInt32BE(1);
659
+ if (pendingBuffer.length < 5 + msgLen)
660
+ break;
661
+ const messageBytes = pendingBuffer.subarray(5, 5 + msgLen);
662
+ pendingBuffer = pendingBuffer.subarray(5 + msgLen);
663
+ if (flags & CONNECT_END_STREAM_FLAG) {
664
+ const endError = parseConnectEndStream(messageBytes);
665
+ if (endError) {
666
+ sendSSE(makeChunk({ content: `\n[Error: ${endError.message}]` }));
667
+ }
668
+ continue;
669
+ }
670
+ try {
671
+ const serverMessage = fromBinary(AgentServerMessageSchema, messageBytes);
672
+ processServerMessage(serverMessage, payload.blobStore, payload.mcpTools, (data) => bridge.write(data), state,
673
+ // onText
674
+ (text, isThinking) => {
675
+ if (isThinking) {
676
+ if (!state.thinkingActive) {
677
+ state.thinkingActive = true;
678
+ sendSSE(makeChunk({ role: "assistant", content: "<think>" }));
679
+ }
680
+ sendSSE(makeChunk({ content: text }));
681
+ }
682
+ else {
683
+ if (state.thinkingActive) {
684
+ state.thinkingActive = false;
685
+ sendSSE(makeChunk({ content: "</think>" }));
686
+ }
687
+ sendSSE(makeChunk({ content: text }));
688
+ }
689
+ },
690
+ // onMcpExec — the model wants to execute a tool.
691
+ (exec) => {
692
+ state.pendingExecs.push(exec);
693
+ mcpExecReceived = true;
694
+ // Close thinking if active
695
+ if (state.thinkingActive) {
696
+ sendSSE(makeChunk({ content: "</think>" }));
697
+ state.thinkingActive = false;
698
+ }
699
+ // Emit tool_calls with decoded arguments
700
+ const toolCallIndex = state.toolCallIndex++;
701
+ sendSSE(makeChunk({
702
+ tool_calls: [{
703
+ index: toolCallIndex,
704
+ id: exec.toolCallId,
705
+ type: "function",
706
+ function: {
707
+ name: exec.toolName,
708
+ arguments: exec.decodedArgs,
709
+ },
710
+ }],
711
+ }));
712
+ // Keep the bridge alive for tool result continuation.
713
+ activeBridges.set(bridgeKey, {
714
+ bridge,
715
+ heartbeatTimer,
716
+ blobStore: payload.blobStore,
717
+ mcpTools: payload.mcpTools,
718
+ pendingExecs: state.pendingExecs,
719
+ onResume: null,
720
+ });
721
+ sendSSE(makeChunk({}, "tool_calls"));
722
+ sendDone();
723
+ closeController();
724
+ });
725
+ }
726
+ catch {
727
+ // Skip unparseable messages
728
+ }
729
+ }
730
+ };
731
+ bridge.onData(processChunk);
732
+ bridge.onClose(() => {
733
+ clearInterval(heartbeatTimer);
734
+ if (!mcpExecReceived) {
735
+ // Normal completion — no pending tool calls
736
+ if (state.thinkingActive) {
737
+ sendSSE(makeChunk({ content: "</think>" }));
738
+ }
739
+ sendSSE(makeChunk({}, "stop"));
740
+ sendDone();
741
+ closeController();
742
+ }
743
+ // If mcpExecReceived, we already closed the controller in onMcpExec
744
+ });
745
+ },
746
+ });
747
+ return new Response(stream, {
748
+ headers: {
749
+ "Content-Type": "text/event-stream",
750
+ "Cache-Control": "no-cache",
751
+ Connection: "keep-alive",
752
+ },
753
+ });
754
+ }
755
+ // --- Tool Result Resume ---
756
+ /** Resume a paused bridge by sending MCP results and continuing to stream. */
757
+ function handleToolResultResume(active, toolResults, modelId, tools, accessToken, bridgeKey) {
758
+ const { bridge, heartbeatTimer, blobStore, mcpTools, pendingExecs } = active;
759
+ // Send mcpResult for each pending exec that has a matching tool result
760
+ for (const exec of pendingExecs) {
761
+ const result = toolResults.find((r) => r.toolCallId === exec.toolCallId);
762
+ const mcpResult = result
763
+ ? create(McpResultSchema, {
764
+ result: {
765
+ case: "success",
766
+ value: create(McpSuccessSchema, {
767
+ content: [
768
+ create(McpToolResultContentItemSchema, {
769
+ content: {
770
+ case: "text",
771
+ value: create(McpTextContentSchema, { text: result.content }),
772
+ },
773
+ }),
774
+ ],
775
+ isError: false,
776
+ }),
777
+ },
778
+ })
779
+ : create(McpResultSchema, {
780
+ result: {
781
+ case: "error",
782
+ value: create(McpErrorSchema, { error: "Tool result not provided" }),
783
+ },
784
+ });
785
+ const execClientMessage = create(ExecClientMessageSchema, {
786
+ id: exec.execMsgId,
787
+ execId: exec.execId,
788
+ message: {
789
+ case: "mcpResult",
790
+ value: mcpResult,
791
+ },
792
+ });
793
+ const clientMessage = create(AgentClientMessageSchema, {
794
+ message: { case: "execClientMessage", value: execClientMessage },
795
+ });
796
+ bridge.write(frameConnectMessage(toBinary(AgentClientMessageSchema, clientMessage)));
797
+ }
798
+ // Now stream the continuation response.
799
+ // Reuse the same bridgeKey so subsequent tool calls can be found by deriveBridgeKey().
800
+ const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
801
+ const created = Math.floor(Date.now() / 1000);
802
+ const stream = new ReadableStream({
803
+ start(controller) {
804
+ const encoder = new TextEncoder();
805
+ let closed = false;
806
+ const sendSSE = (data) => {
807
+ if (closed)
808
+ return;
809
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(data)}\n\n`));
810
+ };
811
+ const sendDone = () => {
812
+ if (closed)
813
+ return;
814
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
815
+ };
816
+ const closeController = () => {
817
+ if (closed)
818
+ return;
819
+ closed = true;
820
+ controller.close();
821
+ };
822
+ const makeChunk = (delta, finishReason = null) => ({
823
+ id: completionId,
824
+ object: "chat.completion.chunk",
825
+ created,
826
+ model: modelId,
827
+ choices: [{ index: 0, delta, finish_reason: finishReason }],
828
+ });
829
+ const state = {
830
+ thinkingActive: false,
831
+ toolCallIndex: 0,
832
+ pendingExecs: [],
833
+ };
834
+ let mcpExecReceived = false;
835
+ let pendingBuffer = Buffer.alloc(0);
836
+ const processChunk = (incoming) => {
837
+ pendingBuffer = Buffer.concat([pendingBuffer, incoming]);
838
+ while (pendingBuffer.length >= 5) {
839
+ const flags = pendingBuffer[0];
840
+ const msgLen = pendingBuffer.readUInt32BE(1);
841
+ if (pendingBuffer.length < 5 + msgLen)
842
+ break;
843
+ const messageBytes = pendingBuffer.subarray(5, 5 + msgLen);
844
+ pendingBuffer = pendingBuffer.subarray(5 + msgLen);
845
+ if (flags & CONNECT_END_STREAM_FLAG) {
846
+ const endError = parseConnectEndStream(messageBytes);
847
+ if (endError) {
848
+ sendSSE(makeChunk({ content: `\n[Error: ${endError.message}]` }));
849
+ }
850
+ continue;
851
+ }
852
+ try {
853
+ const serverMessage = fromBinary(AgentServerMessageSchema, messageBytes);
854
+ processServerMessage(serverMessage, blobStore, mcpTools, (data) => bridge.write(data), state, (text, isThinking) => {
855
+ if (isThinking) {
856
+ if (!state.thinkingActive) {
857
+ state.thinkingActive = true;
858
+ sendSSE(makeChunk({ role: "assistant", content: "<think>" }));
859
+ }
860
+ sendSSE(makeChunk({ content: text }));
861
+ }
862
+ else {
863
+ if (state.thinkingActive) {
864
+ state.thinkingActive = false;
865
+ sendSSE(makeChunk({ content: "</think>" }));
866
+ }
867
+ sendSSE(makeChunk({ content: text }));
868
+ }
869
+ }, (exec) => {
870
+ state.pendingExecs.push(exec);
871
+ mcpExecReceived = true;
872
+ if (state.thinkingActive) {
873
+ sendSSE(makeChunk({ content: "</think>" }));
874
+ state.thinkingActive = false;
875
+ }
876
+ const toolCallIndex = state.toolCallIndex++;
877
+ sendSSE(makeChunk({
878
+ tool_calls: [{
879
+ index: toolCallIndex,
880
+ id: exec.toolCallId,
881
+ type: "function",
882
+ function: {
883
+ name: exec.toolName,
884
+ arguments: exec.decodedArgs,
885
+ },
886
+ }],
887
+ }));
888
+ activeBridges.set(bridgeKey, {
889
+ bridge,
890
+ heartbeatTimer,
891
+ blobStore,
892
+ mcpTools,
893
+ pendingExecs: state.pendingExecs,
894
+ onResume: null,
895
+ });
896
+ sendSSE(makeChunk({}, "tool_calls"));
897
+ sendDone();
898
+ closeController();
899
+ });
900
+ }
901
+ catch {
902
+ // Skip
903
+ }
904
+ }
905
+ };
906
+ // Re-attach data handler to the existing bridge
907
+ bridge.onData(processChunk);
908
+ bridge.onClose(() => {
909
+ clearInterval(heartbeatTimer);
910
+ if (!mcpExecReceived) {
911
+ if (state.thinkingActive) {
912
+ sendSSE(makeChunk({ content: "</think>" }));
913
+ }
914
+ sendSSE(makeChunk({}, "stop"));
915
+ sendDone();
916
+ closeController();
917
+ }
918
+ });
919
+ },
920
+ });
921
+ return new Response(stream, {
922
+ headers: {
923
+ "Content-Type": "text/event-stream",
924
+ "Cache-Control": "no-cache",
925
+ Connection: "keep-alive",
926
+ },
927
+ });
928
+ }
929
+ // --- Non-Streaming Handler ---
930
+ function handleNonStreamingResponse(payload, accessToken, modelId) {
931
+ const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
932
+ const created = Math.floor(Date.now() / 1000);
933
+ const responsePromise = collectFullResponse(payload, accessToken).then((fullText) => new Response(JSON.stringify({
934
+ id: completionId,
935
+ object: "chat.completion",
936
+ created,
937
+ model: modelId,
938
+ choices: [
939
+ {
940
+ index: 0,
941
+ message: { role: "assistant", content: fullText },
942
+ finish_reason: "stop",
943
+ },
944
+ ],
945
+ usage: {
946
+ prompt_tokens: 0,
947
+ completion_tokens: 0,
948
+ total_tokens: 0,
949
+ },
950
+ }), { headers: { "Content-Type": "application/json" } }));
951
+ return responsePromise;
952
+ }
953
+ async function collectFullResponse(payload, accessToken) {
954
+ const { promise, resolve } = Promise.withResolvers();
955
+ let fullText = "";
956
+ const bridge = spawnBridge(accessToken);
957
+ bridge.write(frameConnectMessage(payload.requestBytes));
958
+ const heartbeatTimer = setInterval(() => {
959
+ bridge.write(makeHeartbeatBytes());
960
+ }, 5_000);
961
+ let pendingBuffer = Buffer.alloc(0);
962
+ const state = {
963
+ thinkingActive: false,
964
+ toolCallIndex: 0,
965
+ pendingExecs: [],
966
+ };
967
+ bridge.onData((incoming) => {
968
+ pendingBuffer = Buffer.concat([pendingBuffer, incoming]);
969
+ while (pendingBuffer.length >= 5) {
970
+ const flags = pendingBuffer[0];
971
+ const msgLen = pendingBuffer.readUInt32BE(1);
972
+ if (pendingBuffer.length < 5 + msgLen)
973
+ break;
974
+ const messageBytes = pendingBuffer.subarray(5, 5 + msgLen);
975
+ pendingBuffer = pendingBuffer.subarray(5 + msgLen);
976
+ if (flags & CONNECT_END_STREAM_FLAG)
977
+ continue;
978
+ try {
979
+ const serverMessage = fromBinary(AgentServerMessageSchema, messageBytes);
980
+ processServerMessage(serverMessage, payload.blobStore, payload.mcpTools, (data) => bridge.write(data), state, (text) => { fullText += text; }, () => { });
981
+ }
982
+ catch {
983
+ // Skip
984
+ }
985
+ }
986
+ });
987
+ bridge.onClose(() => {
988
+ clearInterval(heartbeatTimer);
989
+ resolve(fullText);
990
+ });
991
+ return promise;
992
+ }