@playwo/opencode-cursor-oauth 0.0.0-dev.194e3412ea47 → 0.0.0-dev.2a59bf1639ea
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/auth.js +1 -2
- package/dist/constants.d.ts +2 -0
- package/dist/constants.js +2 -0
- package/dist/cursor/bidi-session.d.ts +13 -0
- package/dist/cursor/bidi-session.js +149 -0
- package/dist/cursor/config.d.ts +4 -0
- package/dist/cursor/config.js +4 -0
- package/dist/cursor/connect-framing.d.ts +10 -0
- package/dist/cursor/connect-framing.js +80 -0
- package/dist/cursor/headers.d.ts +6 -0
- package/dist/cursor/headers.js +16 -0
- package/dist/cursor/index.d.ts +5 -0
- package/dist/cursor/index.js +5 -0
- package/dist/cursor/unary-rpc.d.ts +13 -0
- package/dist/cursor/unary-rpc.js +181 -0
- package/dist/index.d.ts +2 -14
- package/dist/index.js +2 -306
- package/dist/logger.js +7 -2
- package/dist/models.js +1 -23
- package/dist/openai/index.d.ts +3 -0
- package/dist/openai/index.js +3 -0
- package/dist/openai/messages.d.ts +39 -0
- package/dist/openai/messages.js +223 -0
- package/dist/openai/tools.d.ts +7 -0
- package/dist/openai/tools.js +58 -0
- package/dist/openai/types.d.ts +41 -0
- package/dist/openai/types.js +1 -0
- package/dist/plugin/cursor-auth-plugin.d.ts +3 -0
- package/dist/plugin/cursor-auth-plugin.js +140 -0
- package/dist/proto/agent_pb.js +637 -319
- package/dist/provider/index.d.ts +2 -0
- package/dist/provider/index.js +2 -0
- package/dist/provider/model-cost.d.ts +9 -0
- package/dist/provider/model-cost.js +206 -0
- package/dist/provider/models.d.ts +8 -0
- package/dist/provider/models.js +86 -0
- package/dist/proxy/bridge-non-streaming.d.ts +3 -0
- package/dist/proxy/bridge-non-streaming.js +119 -0
- package/dist/proxy/bridge-session.d.ts +5 -0
- package/dist/proxy/bridge-session.js +13 -0
- package/dist/proxy/bridge-streaming.d.ts +5 -0
- package/dist/proxy/bridge-streaming.js +311 -0
- package/dist/proxy/bridge.d.ts +3 -0
- package/dist/proxy/bridge.js +3 -0
- package/dist/proxy/chat-completion.d.ts +2 -0
- package/dist/proxy/chat-completion.js +113 -0
- package/dist/proxy/conversation-meta.d.ts +12 -0
- package/dist/proxy/conversation-meta.js +1 -0
- package/dist/proxy/conversation-state.d.ts +35 -0
- package/dist/proxy/conversation-state.js +95 -0
- package/dist/proxy/cursor-request.d.ts +5 -0
- package/dist/proxy/cursor-request.js +86 -0
- package/dist/proxy/index.d.ts +12 -0
- package/dist/proxy/index.js +12 -0
- package/dist/proxy/server.d.ts +6 -0
- package/dist/proxy/server.js +89 -0
- package/dist/proxy/sse.d.ts +5 -0
- package/dist/proxy/sse.js +5 -0
- package/dist/proxy/state-sync.d.ts +2 -0
- package/dist/proxy/state-sync.js +17 -0
- package/dist/proxy/stream-dispatch.d.ts +42 -0
- package/dist/proxy/stream-dispatch.js +491 -0
- package/dist/proxy/stream-state.d.ts +9 -0
- package/dist/proxy/stream-state.js +1 -0
- package/dist/proxy/title.d.ts +1 -0
- package/dist/proxy/title.js +103 -0
- package/dist/proxy/types.d.ts +27 -0
- package/dist/proxy/types.js +1 -0
- package/dist/proxy.d.ts +2 -20
- package/dist/proxy.js +2 -1852
- package/package.json +1 -1
package/dist/proxy.js
CHANGED
|
@@ -1,1852 +1,2 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
*
|
|
4
|
-
* Accepts POST /v1/chat/completions in OpenAI format, translates to Cursor's
|
|
5
|
-
* protobuf/Connect protocol, and streams back OpenAI-format SSE.
|
|
6
|
-
*
|
|
7
|
-
* Tool calling uses Cursor's native MCP tool protocol:
|
|
8
|
-
* - OpenAI tool defs → McpToolDefinition in RequestContext
|
|
9
|
-
* - Cursor toolCallStarted/Delta/Completed → OpenAI tool_calls SSE chunks
|
|
10
|
-
* - mcpArgs exec → pause stream, return tool_calls to caller
|
|
11
|
-
* - Follow-up request with tool results → resume bridge with mcpResult
|
|
12
|
-
*
|
|
13
|
-
* Cursor agent streaming runs via RunSSE + BidiAppend, avoiding any Node sidecar.
|
|
14
|
-
*/
|
|
15
|
-
import { create, fromBinary, fromJson, toBinary, toJson } from "@bufbuild/protobuf";
|
|
16
|
-
import { ValueSchema } from "@bufbuild/protobuf/wkt";
|
|
17
|
-
import { AgentClientMessageSchema, AgentRunRequestSchema, AgentServerMessageSchema, BidiRequestIdSchema, ClientHeartbeatSchema, ConversationActionSchema, ConversationStateStructureSchema, ConversationStepSchema, AgentConversationTurnStructureSchema, ConversationTurnStructureSchema, AssistantMessageSchema, BackgroundShellSpawnResultSchema, DeleteResultSchema, DeleteRejectedSchema, DiagnosticsResultSchema, ExecClientMessageSchema, FetchErrorSchema, FetchResultSchema, GetBlobResultSchema, GrepErrorSchema, GrepResultSchema, KvClientMessageSchema, LsRejectedSchema, LsResultSchema, McpErrorSchema, McpResultSchema, McpSuccessSchema, McpTextContentSchema, McpToolDefinitionSchema, McpToolResultContentItemSchema, ModelDetailsSchema, NameAgentRequestSchema, NameAgentResponseSchema, ReadRejectedSchema, ReadResultSchema, RequestContextResultSchema, RequestContextSchema, RequestContextSuccessSchema, SetBlobResultSchema, ShellRejectedSchema, ShellResultSchema, UserMessageActionSchema, UserMessageSchema, WriteRejectedSchema, WriteResultSchema, WriteShellStdinErrorSchema, WriteShellStdinResultSchema, } from "./proto/agent_pb";
|
|
18
|
-
import { createHash } from "node:crypto";
|
|
19
|
-
import { connect as connectHttp2 } from "node:http2";
|
|
20
|
-
import { errorDetails, logPluginError, logPluginWarn } from "./logger";
|
|
21
|
-
const CURSOR_API_URL = process.env.CURSOR_API_URL ?? "https://api2.cursor.sh";
|
|
22
|
-
const CURSOR_CLIENT_VERSION = "cli-2026.01.09-231024f";
|
|
23
|
-
const CURSOR_CONNECT_PROTOCOL_VERSION = "1";
|
|
24
|
-
const CONNECT_END_STREAM_FLAG = 0b00000010;
|
|
25
|
-
const OPENCODE_TITLE_REQUEST_MARKER = "Generate a title for this conversation:";
|
|
26
|
-
const SSE_HEADERS = {
|
|
27
|
-
"Content-Type": "text/event-stream",
|
|
28
|
-
"Cache-Control": "no-cache",
|
|
29
|
-
Connection: "keep-alive",
|
|
30
|
-
};
|
|
31
|
-
// Active bridges keyed by a session token (derived from conversation state).
|
|
32
|
-
// When tool_calls are returned, the bridge stays alive. The next request
|
|
33
|
-
// with tool results looks up the bridge and sends mcpResult messages.
|
|
34
|
-
const activeBridges = new Map();
|
|
35
|
-
const conversationStates = new Map();
|
|
36
|
-
const CONVERSATION_TTL_MS = 30 * 60 * 1000; // 30 minutes
|
|
37
|
-
function evictStaleConversations() {
|
|
38
|
-
const now = Date.now();
|
|
39
|
-
for (const [key, stored] of conversationStates) {
|
|
40
|
-
if (now - stored.lastAccessMs > CONVERSATION_TTL_MS) {
|
|
41
|
-
conversationStates.delete(key);
|
|
42
|
-
}
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
function normalizeAgentKey(agentKey) {
|
|
46
|
-
const trimmed = agentKey?.trim();
|
|
47
|
-
return trimmed ? trimmed : "default";
|
|
48
|
-
}
|
|
49
|
-
function hashString(value) {
|
|
50
|
-
return createHash("sha256").update(value).digest("hex");
|
|
51
|
-
}
|
|
52
|
-
function createStoredConversation() {
|
|
53
|
-
return {
|
|
54
|
-
conversationId: crypto.randomUUID(),
|
|
55
|
-
checkpoint: null,
|
|
56
|
-
blobStore: new Map(),
|
|
57
|
-
lastAccessMs: Date.now(),
|
|
58
|
-
systemPromptHash: "",
|
|
59
|
-
completedTurnsFingerprint: "",
|
|
60
|
-
};
|
|
61
|
-
}
|
|
62
|
-
function resetStoredConversation(stored) {
|
|
63
|
-
stored.conversationId = crypto.randomUUID();
|
|
64
|
-
stored.checkpoint = null;
|
|
65
|
-
stored.blobStore = new Map();
|
|
66
|
-
stored.lastAccessMs = Date.now();
|
|
67
|
-
stored.systemPromptHash = "";
|
|
68
|
-
stored.completedTurnsFingerprint = "";
|
|
69
|
-
}
|
|
70
|
-
/** Connect protocol frame: [1-byte flags][4-byte BE length][payload] */
|
|
71
|
-
function frameConnectMessage(data, flags = 0) {
|
|
72
|
-
const frame = Buffer.alloc(5 + data.length);
|
|
73
|
-
frame[0] = flags;
|
|
74
|
-
frame.writeUInt32BE(data.length, 1);
|
|
75
|
-
frame.set(data, 5);
|
|
76
|
-
return frame;
|
|
77
|
-
}
|
|
78
|
-
function decodeConnectUnaryBody(payload) {
|
|
79
|
-
if (payload.length < 5)
|
|
80
|
-
return null;
|
|
81
|
-
let offset = 0;
|
|
82
|
-
while (offset + 5 <= payload.length) {
|
|
83
|
-
const flags = payload[offset];
|
|
84
|
-
const view = new DataView(payload.buffer, payload.byteOffset + offset, payload.byteLength - offset);
|
|
85
|
-
const messageLength = view.getUint32(1, false);
|
|
86
|
-
const frameEnd = offset + 5 + messageLength;
|
|
87
|
-
if (frameEnd > payload.length)
|
|
88
|
-
return null;
|
|
89
|
-
if ((flags & 0b0000_0001) !== 0)
|
|
90
|
-
return null;
|
|
91
|
-
if ((flags & CONNECT_END_STREAM_FLAG) === 0) {
|
|
92
|
-
return payload.subarray(offset + 5, frameEnd);
|
|
93
|
-
}
|
|
94
|
-
offset = frameEnd;
|
|
95
|
-
}
|
|
96
|
-
return null;
|
|
97
|
-
}
|
|
98
|
-
function buildCursorHeaders(options, contentType, extra = {}) {
|
|
99
|
-
const headers = new Headers(buildCursorHeaderValues(options, contentType, extra));
|
|
100
|
-
return headers;
|
|
101
|
-
}
|
|
102
|
-
function buildCursorHeaderValues(options, contentType, extra = {}) {
|
|
103
|
-
return {
|
|
104
|
-
authorization: `Bearer ${options.accessToken}`,
|
|
105
|
-
"content-type": contentType,
|
|
106
|
-
"x-ghost-mode": "true",
|
|
107
|
-
"x-cursor-client-version": CURSOR_CLIENT_VERSION,
|
|
108
|
-
"x-cursor-client-type": "cli",
|
|
109
|
-
"x-request-id": crypto.randomUUID(),
|
|
110
|
-
...extra,
|
|
111
|
-
};
|
|
112
|
-
}
|
|
113
|
-
function encodeVarint(value) {
|
|
114
|
-
if (!Number.isSafeInteger(value) || value < 0) {
|
|
115
|
-
throw new Error(`Unsupported varint value: ${value}`);
|
|
116
|
-
}
|
|
117
|
-
const bytes = [];
|
|
118
|
-
let current = value;
|
|
119
|
-
while (current >= 0x80) {
|
|
120
|
-
bytes.push((current & 0x7f) | 0x80);
|
|
121
|
-
current = Math.floor(current / 128);
|
|
122
|
-
}
|
|
123
|
-
bytes.push(current);
|
|
124
|
-
return Uint8Array.from(bytes);
|
|
125
|
-
}
|
|
126
|
-
function encodeProtoField(tag, wireType, value) {
|
|
127
|
-
const key = encodeVarint((tag << 3) | wireType);
|
|
128
|
-
const out = new Uint8Array(key.length + value.length);
|
|
129
|
-
out.set(key, 0);
|
|
130
|
-
out.set(value, key.length);
|
|
131
|
-
return out;
|
|
132
|
-
}
|
|
133
|
-
function encodeProtoStringField(tag, value) {
|
|
134
|
-
const bytes = new TextEncoder().encode(value);
|
|
135
|
-
const len = encodeVarint(bytes.length);
|
|
136
|
-
const payload = new Uint8Array(len.length + bytes.length);
|
|
137
|
-
payload.set(len, 0);
|
|
138
|
-
payload.set(bytes, len.length);
|
|
139
|
-
return encodeProtoField(tag, 2, payload);
|
|
140
|
-
}
|
|
141
|
-
function encodeProtoMessageField(tag, value) {
|
|
142
|
-
const len = encodeVarint(value.length);
|
|
143
|
-
const payload = new Uint8Array(len.length + value.length);
|
|
144
|
-
payload.set(len, 0);
|
|
145
|
-
payload.set(value, len.length);
|
|
146
|
-
return encodeProtoField(tag, 2, payload);
|
|
147
|
-
}
|
|
148
|
-
function encodeProtoVarintField(tag, value) {
|
|
149
|
-
return encodeProtoField(tag, 0, encodeVarint(value));
|
|
150
|
-
}
|
|
151
|
-
function concatBytes(parts) {
|
|
152
|
-
const total = parts.reduce((sum, part) => sum + part.length, 0);
|
|
153
|
-
const out = new Uint8Array(total);
|
|
154
|
-
let offset = 0;
|
|
155
|
-
for (const part of parts) {
|
|
156
|
-
out.set(part, offset);
|
|
157
|
-
offset += part.length;
|
|
158
|
-
}
|
|
159
|
-
return out;
|
|
160
|
-
}
|
|
161
|
-
function toFetchBody(data) {
|
|
162
|
-
return data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength);
|
|
163
|
-
}
|
|
164
|
-
function encodeBidiAppendRequest(dataHex, requestId, appendSeqno) {
|
|
165
|
-
const requestIdBytes = toBinary(BidiRequestIdSchema, create(BidiRequestIdSchema, { requestId }));
|
|
166
|
-
return concatBytes([
|
|
167
|
-
encodeProtoStringField(1, dataHex),
|
|
168
|
-
encodeProtoMessageField(2, requestIdBytes),
|
|
169
|
-
encodeProtoVarintField(3, appendSeqno),
|
|
170
|
-
]);
|
|
171
|
-
}
|
|
172
|
-
async function createCursorSession(options) {
|
|
173
|
-
const response = await fetch(new URL("/agent.v1.AgentService/RunSSE", options.url ?? CURSOR_API_URL), {
|
|
174
|
-
method: "POST",
|
|
175
|
-
headers: buildCursorHeaders(options, "application/connect+proto", {
|
|
176
|
-
accept: "text/event-stream",
|
|
177
|
-
"connect-protocol-version": "1",
|
|
178
|
-
}),
|
|
179
|
-
body: toFetchBody(frameConnectMessage(toBinary(BidiRequestIdSchema, create(BidiRequestIdSchema, { requestId: options.requestId })))),
|
|
180
|
-
});
|
|
181
|
-
if (!response.ok || !response.body) {
|
|
182
|
-
const errorBody = await response.text().catch(() => "");
|
|
183
|
-
logPluginError("Cursor RunSSE request failed", {
|
|
184
|
-
requestId: options.requestId,
|
|
185
|
-
status: response.status,
|
|
186
|
-
responseBody: errorBody,
|
|
187
|
-
});
|
|
188
|
-
throw new Error(`RunSSE failed: ${response.status}${errorBody ? ` ${errorBody}` : ""}`);
|
|
189
|
-
}
|
|
190
|
-
const cbs = {
|
|
191
|
-
data: null,
|
|
192
|
-
close: null,
|
|
193
|
-
};
|
|
194
|
-
const abortController = new AbortController();
|
|
195
|
-
const reader = response.body.getReader();
|
|
196
|
-
let appendSeqno = 0;
|
|
197
|
-
let alive = true;
|
|
198
|
-
let closeCode = 0;
|
|
199
|
-
let writeChain = Promise.resolve();
|
|
200
|
-
const pendingChunks = [];
|
|
201
|
-
const finish = (code) => {
|
|
202
|
-
if (!alive)
|
|
203
|
-
return;
|
|
204
|
-
alive = false;
|
|
205
|
-
closeCode = code;
|
|
206
|
-
cbs.close?.(code);
|
|
207
|
-
};
|
|
208
|
-
const append = async (data) => {
|
|
209
|
-
const requestBody = encodeBidiAppendRequest(Buffer.from(data).toString("hex"), options.requestId, appendSeqno++);
|
|
210
|
-
const appendResponse = await fetch(new URL("/aiserver.v1.BidiService/BidiAppend", options.url ?? CURSOR_API_URL), {
|
|
211
|
-
method: "POST",
|
|
212
|
-
headers: buildCursorHeaders(options, "application/proto"),
|
|
213
|
-
body: toFetchBody(requestBody),
|
|
214
|
-
signal: abortController.signal,
|
|
215
|
-
});
|
|
216
|
-
if (!appendResponse.ok) {
|
|
217
|
-
const errorBody = await appendResponse.text().catch(() => "");
|
|
218
|
-
logPluginError("Cursor BidiAppend request failed", {
|
|
219
|
-
requestId: options.requestId,
|
|
220
|
-
appendSeqno: appendSeqno - 1,
|
|
221
|
-
status: appendResponse.status,
|
|
222
|
-
responseBody: errorBody,
|
|
223
|
-
});
|
|
224
|
-
throw new Error(`BidiAppend failed: ${appendResponse.status}${errorBody ? ` ${errorBody}` : ""}`);
|
|
225
|
-
}
|
|
226
|
-
await appendResponse.arrayBuffer().catch(() => undefined);
|
|
227
|
-
};
|
|
228
|
-
(async () => {
|
|
229
|
-
try {
|
|
230
|
-
while (true) {
|
|
231
|
-
const { done, value } = await reader.read();
|
|
232
|
-
if (done) {
|
|
233
|
-
finish(0);
|
|
234
|
-
break;
|
|
235
|
-
}
|
|
236
|
-
if (value && value.length > 0) {
|
|
237
|
-
const chunk = Buffer.from(value);
|
|
238
|
-
if (cbs.data) {
|
|
239
|
-
cbs.data(chunk);
|
|
240
|
-
}
|
|
241
|
-
else {
|
|
242
|
-
pendingChunks.push(chunk);
|
|
243
|
-
}
|
|
244
|
-
}
|
|
245
|
-
}
|
|
246
|
-
}
|
|
247
|
-
catch (error) {
|
|
248
|
-
logPluginWarn("Cursor stream reader closed with error", {
|
|
249
|
-
requestId: options.requestId,
|
|
250
|
-
...errorDetails(error),
|
|
251
|
-
});
|
|
252
|
-
finish(alive ? 1 : closeCode);
|
|
253
|
-
}
|
|
254
|
-
})();
|
|
255
|
-
return {
|
|
256
|
-
get alive() {
|
|
257
|
-
return alive;
|
|
258
|
-
},
|
|
259
|
-
write(data) {
|
|
260
|
-
if (!alive)
|
|
261
|
-
return;
|
|
262
|
-
writeChain = writeChain
|
|
263
|
-
.then(() => append(data))
|
|
264
|
-
.catch((error) => {
|
|
265
|
-
logPluginError("Cursor stream append failed", {
|
|
266
|
-
requestId: options.requestId,
|
|
267
|
-
...errorDetails(error),
|
|
268
|
-
});
|
|
269
|
-
try {
|
|
270
|
-
abortController.abort();
|
|
271
|
-
}
|
|
272
|
-
catch { }
|
|
273
|
-
try {
|
|
274
|
-
reader.cancel();
|
|
275
|
-
}
|
|
276
|
-
catch { }
|
|
277
|
-
finish(1);
|
|
278
|
-
});
|
|
279
|
-
},
|
|
280
|
-
end() {
|
|
281
|
-
try {
|
|
282
|
-
abortController.abort();
|
|
283
|
-
}
|
|
284
|
-
catch { }
|
|
285
|
-
try {
|
|
286
|
-
reader.cancel();
|
|
287
|
-
}
|
|
288
|
-
catch { }
|
|
289
|
-
finish(0);
|
|
290
|
-
},
|
|
291
|
-
onData(cb) {
|
|
292
|
-
cbs.data = cb;
|
|
293
|
-
while (pendingChunks.length > 0) {
|
|
294
|
-
cb(pendingChunks.shift());
|
|
295
|
-
}
|
|
296
|
-
},
|
|
297
|
-
onClose(cb) {
|
|
298
|
-
if (!alive) {
|
|
299
|
-
queueMicrotask(() => cb(closeCode));
|
|
300
|
-
}
|
|
301
|
-
else {
|
|
302
|
-
cbs.close = cb;
|
|
303
|
-
}
|
|
304
|
-
},
|
|
305
|
-
};
|
|
306
|
-
}
|
|
307
|
-
export async function callCursorUnaryRpc(options) {
|
|
308
|
-
const target = new URL(options.rpcPath, options.url ?? CURSOR_API_URL);
|
|
309
|
-
const transport = options.transport ?? "auto";
|
|
310
|
-
if (transport === "http2" || (transport === "auto" && target.protocol === "https:")) {
|
|
311
|
-
const http2Result = await callCursorUnaryRpcOverHttp2(options, target);
|
|
312
|
-
if (transport === "http2" || http2Result.timedOut || http2Result.exitCode !== 1) {
|
|
313
|
-
return http2Result;
|
|
314
|
-
}
|
|
315
|
-
}
|
|
316
|
-
return callCursorUnaryRpcOverFetch(options, target);
|
|
317
|
-
}
|
|
318
|
-
async function callCursorUnaryRpcOverFetch(options, target) {
|
|
319
|
-
let timedOut = false;
|
|
320
|
-
const timeoutMs = options.timeoutMs ?? 5_000;
|
|
321
|
-
const controller = new AbortController();
|
|
322
|
-
const timeout = timeoutMs > 0
|
|
323
|
-
? setTimeout(() => {
|
|
324
|
-
timedOut = true;
|
|
325
|
-
controller.abort();
|
|
326
|
-
}, timeoutMs)
|
|
327
|
-
: undefined;
|
|
328
|
-
try {
|
|
329
|
-
const response = await fetch(target, {
|
|
330
|
-
method: "POST",
|
|
331
|
-
headers: buildCursorHeaders(options, "application/proto", {
|
|
332
|
-
accept: "application/proto, application/json",
|
|
333
|
-
"connect-protocol-version": CURSOR_CONNECT_PROTOCOL_VERSION,
|
|
334
|
-
"connect-timeout-ms": String(timeoutMs),
|
|
335
|
-
}),
|
|
336
|
-
body: toFetchBody(options.requestBody),
|
|
337
|
-
signal: controller.signal,
|
|
338
|
-
});
|
|
339
|
-
const body = new Uint8Array(await response.arrayBuffer());
|
|
340
|
-
return {
|
|
341
|
-
body,
|
|
342
|
-
exitCode: response.ok ? 0 : response.status,
|
|
343
|
-
timedOut,
|
|
344
|
-
};
|
|
345
|
-
}
|
|
346
|
-
catch {
|
|
347
|
-
logPluginError("Cursor unary fetch transport failed", {
|
|
348
|
-
rpcPath: options.rpcPath,
|
|
349
|
-
url: target.toString(),
|
|
350
|
-
timeoutMs,
|
|
351
|
-
timedOut,
|
|
352
|
-
});
|
|
353
|
-
return {
|
|
354
|
-
body: new Uint8Array(),
|
|
355
|
-
exitCode: timedOut ? 124 : 1,
|
|
356
|
-
timedOut,
|
|
357
|
-
};
|
|
358
|
-
}
|
|
359
|
-
finally {
|
|
360
|
-
if (timeout)
|
|
361
|
-
clearTimeout(timeout);
|
|
362
|
-
}
|
|
363
|
-
}
|
|
364
|
-
async function callCursorUnaryRpcOverHttp2(options, target) {
|
|
365
|
-
const timeoutMs = options.timeoutMs ?? 5_000;
|
|
366
|
-
const authority = `${target.protocol}//${target.host}`;
|
|
367
|
-
return new Promise((resolve) => {
|
|
368
|
-
let settled = false;
|
|
369
|
-
let timedOut = false;
|
|
370
|
-
let session;
|
|
371
|
-
let stream;
|
|
372
|
-
const finish = (result) => {
|
|
373
|
-
if (settled)
|
|
374
|
-
return;
|
|
375
|
-
settled = true;
|
|
376
|
-
if (timeout)
|
|
377
|
-
clearTimeout(timeout);
|
|
378
|
-
try {
|
|
379
|
-
stream?.close();
|
|
380
|
-
}
|
|
381
|
-
catch { }
|
|
382
|
-
try {
|
|
383
|
-
session?.close();
|
|
384
|
-
}
|
|
385
|
-
catch { }
|
|
386
|
-
resolve(result);
|
|
387
|
-
};
|
|
388
|
-
const timeout = timeoutMs > 0
|
|
389
|
-
? setTimeout(() => {
|
|
390
|
-
timedOut = true;
|
|
391
|
-
finish({
|
|
392
|
-
body: new Uint8Array(),
|
|
393
|
-
exitCode: 124,
|
|
394
|
-
timedOut: true,
|
|
395
|
-
});
|
|
396
|
-
}, timeoutMs)
|
|
397
|
-
: undefined;
|
|
398
|
-
try {
|
|
399
|
-
session = connectHttp2(authority);
|
|
400
|
-
session.once("error", (error) => {
|
|
401
|
-
logPluginError("Cursor unary HTTP/2 session failed", {
|
|
402
|
-
rpcPath: options.rpcPath,
|
|
403
|
-
url: target.toString(),
|
|
404
|
-
timedOut,
|
|
405
|
-
...errorDetails(error),
|
|
406
|
-
});
|
|
407
|
-
finish({
|
|
408
|
-
body: new Uint8Array(),
|
|
409
|
-
exitCode: timedOut ? 124 : 1,
|
|
410
|
-
timedOut,
|
|
411
|
-
});
|
|
412
|
-
});
|
|
413
|
-
const headers = {
|
|
414
|
-
":method": "POST",
|
|
415
|
-
":path": `${target.pathname}${target.search}`,
|
|
416
|
-
...buildCursorHeaderValues(options, "application/proto", {
|
|
417
|
-
accept: "application/proto, application/json",
|
|
418
|
-
"connect-protocol-version": CURSOR_CONNECT_PROTOCOL_VERSION,
|
|
419
|
-
"connect-timeout-ms": String(timeoutMs),
|
|
420
|
-
}),
|
|
421
|
-
};
|
|
422
|
-
stream = session.request(headers);
|
|
423
|
-
let statusCode = 0;
|
|
424
|
-
const chunks = [];
|
|
425
|
-
stream.once("response", (responseHeaders) => {
|
|
426
|
-
const statusHeader = responseHeaders[":status"];
|
|
427
|
-
statusCode = typeof statusHeader === "number"
|
|
428
|
-
? statusHeader
|
|
429
|
-
: Number(statusHeader ?? 0);
|
|
430
|
-
});
|
|
431
|
-
stream.on("data", (chunk) => {
|
|
432
|
-
chunks.push(Buffer.from(chunk));
|
|
433
|
-
});
|
|
434
|
-
stream.once("end", () => {
|
|
435
|
-
const body = new Uint8Array(Buffer.concat(chunks));
|
|
436
|
-
finish({
|
|
437
|
-
body,
|
|
438
|
-
exitCode: statusCode >= 200 && statusCode < 300 ? 0 : (statusCode || 1),
|
|
439
|
-
timedOut,
|
|
440
|
-
});
|
|
441
|
-
});
|
|
442
|
-
stream.once("error", (error) => {
|
|
443
|
-
logPluginError("Cursor unary HTTP/2 stream failed", {
|
|
444
|
-
rpcPath: options.rpcPath,
|
|
445
|
-
url: target.toString(),
|
|
446
|
-
timedOut,
|
|
447
|
-
...errorDetails(error),
|
|
448
|
-
});
|
|
449
|
-
finish({
|
|
450
|
-
body: new Uint8Array(),
|
|
451
|
-
exitCode: timedOut ? 124 : 1,
|
|
452
|
-
timedOut,
|
|
453
|
-
});
|
|
454
|
-
});
|
|
455
|
-
// Bun's node:http2 client currently breaks on end(Buffer.alloc(0)) against
|
|
456
|
-
// Cursor's HTTPS endpoint, but a header-only end() succeeds for empty unary bodies.
|
|
457
|
-
if (options.requestBody.length > 0) {
|
|
458
|
-
stream.end(Buffer.from(options.requestBody));
|
|
459
|
-
}
|
|
460
|
-
else {
|
|
461
|
-
stream.end();
|
|
462
|
-
}
|
|
463
|
-
}
|
|
464
|
-
catch (error) {
|
|
465
|
-
logPluginError("Cursor unary HTTP/2 setup failed", {
|
|
466
|
-
rpcPath: options.rpcPath,
|
|
467
|
-
url: target.toString(),
|
|
468
|
-
timedOut,
|
|
469
|
-
...errorDetails(error),
|
|
470
|
-
});
|
|
471
|
-
finish({
|
|
472
|
-
body: new Uint8Array(),
|
|
473
|
-
exitCode: timedOut ? 124 : 1,
|
|
474
|
-
timedOut,
|
|
475
|
-
});
|
|
476
|
-
}
|
|
477
|
-
});
|
|
478
|
-
}
|
|
479
|
-
let proxyServer;
|
|
480
|
-
let proxyPort;
|
|
481
|
-
let proxyAccessTokenProvider;
|
|
482
|
-
let proxyModels = [];
|
|
483
|
-
function buildOpenAIModelList(models) {
|
|
484
|
-
return models.map((model) => ({
|
|
485
|
-
id: model.id,
|
|
486
|
-
object: "model",
|
|
487
|
-
created: 0,
|
|
488
|
-
owned_by: "cursor",
|
|
489
|
-
}));
|
|
490
|
-
}
|
|
491
|
-
export function getProxyPort() {
|
|
492
|
-
return proxyPort;
|
|
493
|
-
}
|
|
494
|
-
export async function startProxy(getAccessToken, models = []) {
|
|
495
|
-
proxyAccessTokenProvider = getAccessToken;
|
|
496
|
-
proxyModels = models.map((model) => ({
|
|
497
|
-
id: model.id,
|
|
498
|
-
name: model.name,
|
|
499
|
-
}));
|
|
500
|
-
if (proxyServer && proxyPort)
|
|
501
|
-
return proxyPort;
|
|
502
|
-
proxyServer = Bun.serve({
|
|
503
|
-
port: 0,
|
|
504
|
-
idleTimeout: 255, // max — Cursor responses can take 30s+
|
|
505
|
-
async fetch(req) {
|
|
506
|
-
const url = new URL(req.url);
|
|
507
|
-
if (req.method === "GET" && url.pathname === "/v1/models") {
|
|
508
|
-
return new Response(JSON.stringify({
|
|
509
|
-
object: "list",
|
|
510
|
-
data: buildOpenAIModelList(proxyModels),
|
|
511
|
-
}), { headers: { "Content-Type": "application/json" } });
|
|
512
|
-
}
|
|
513
|
-
if (req.method === "POST" && url.pathname === "/v1/chat/completions") {
|
|
514
|
-
try {
|
|
515
|
-
const body = (await req.json());
|
|
516
|
-
if (!proxyAccessTokenProvider) {
|
|
517
|
-
throw new Error("Cursor proxy access token provider not configured");
|
|
518
|
-
}
|
|
519
|
-
const accessToken = await proxyAccessTokenProvider();
|
|
520
|
-
const sessionId = req.headers.get("x-opencode-session-id")
|
|
521
|
-
?? req.headers.get("x-session-id")
|
|
522
|
-
?? undefined;
|
|
523
|
-
const agentKey = req.headers.get("x-opencode-agent") ?? undefined;
|
|
524
|
-
return handleChatCompletion(body, accessToken, { sessionId, agentKey });
|
|
525
|
-
}
|
|
526
|
-
catch (err) {
|
|
527
|
-
const message = err instanceof Error ? err.message : String(err);
|
|
528
|
-
logPluginError("Cursor proxy request failed", {
|
|
529
|
-
path: url.pathname,
|
|
530
|
-
method: req.method,
|
|
531
|
-
...errorDetails(err),
|
|
532
|
-
});
|
|
533
|
-
return new Response(JSON.stringify({
|
|
534
|
-
error: { message, type: "server_error", code: "internal_error" },
|
|
535
|
-
}), { status: 500, headers: { "Content-Type": "application/json" } });
|
|
536
|
-
}
|
|
537
|
-
}
|
|
538
|
-
return new Response("Not Found", { status: 404 });
|
|
539
|
-
},
|
|
540
|
-
});
|
|
541
|
-
proxyPort = proxyServer.port;
|
|
542
|
-
if (!proxyPort)
|
|
543
|
-
throw new Error("Failed to bind proxy to a port");
|
|
544
|
-
return proxyPort;
|
|
545
|
-
}
|
|
546
|
-
export function stopProxy() {
|
|
547
|
-
if (proxyServer) {
|
|
548
|
-
proxyServer.stop();
|
|
549
|
-
proxyServer = undefined;
|
|
550
|
-
proxyPort = undefined;
|
|
551
|
-
proxyAccessTokenProvider = undefined;
|
|
552
|
-
proxyModels = [];
|
|
553
|
-
}
|
|
554
|
-
// Clean up any lingering bridges
|
|
555
|
-
for (const active of activeBridges.values()) {
|
|
556
|
-
clearInterval(active.heartbeatTimer);
|
|
557
|
-
active.bridge.end();
|
|
558
|
-
}
|
|
559
|
-
activeBridges.clear();
|
|
560
|
-
conversationStates.clear();
|
|
561
|
-
}
|
|
562
|
-
function handleChatCompletion(body, accessToken, context = {}) {
|
|
563
|
-
const parsed = parseMessages(body.messages);
|
|
564
|
-
const { systemPrompt, userText, turns, toolResults, pendingAssistantSummary, completedTurnsFingerprint, } = parsed;
|
|
565
|
-
const modelId = body.model;
|
|
566
|
-
const normalizedAgentKey = normalizeAgentKey(context.agentKey);
|
|
567
|
-
const titleDetection = detectTitleRequest(body);
|
|
568
|
-
const isTitleAgent = titleDetection.matched;
|
|
569
|
-
if (isTitleAgent) {
|
|
570
|
-
const titleSourceText = buildTitleSourceText(userText, turns, pendingAssistantSummary, toolResults);
|
|
571
|
-
if (!titleSourceText) {
|
|
572
|
-
return new Response(JSON.stringify({
|
|
573
|
-
error: {
|
|
574
|
-
message: "No title source text found",
|
|
575
|
-
type: "invalid_request_error",
|
|
576
|
-
},
|
|
577
|
-
}), { status: 400, headers: { "Content-Type": "application/json" } });
|
|
578
|
-
}
|
|
579
|
-
return handleTitleGenerationRequest(titleSourceText, accessToken, modelId, body.stream !== false);
|
|
580
|
-
}
|
|
581
|
-
const tools = selectToolsForChoice(body.tools ?? [], body.tool_choice);
|
|
582
|
-
if (!userText && toolResults.length === 0) {
|
|
583
|
-
return new Response(JSON.stringify({
|
|
584
|
-
error: {
|
|
585
|
-
message: "No user message found",
|
|
586
|
-
type: "invalid_request_error",
|
|
587
|
-
},
|
|
588
|
-
}), { status: 400, headers: { "Content-Type": "application/json" } });
|
|
589
|
-
}
|
|
590
|
-
// bridgeKey: session/agent-scoped, for active tool-call bridges
|
|
591
|
-
// convKey: model-independent, for conversation state that survives model switches
|
|
592
|
-
const bridgeKey = deriveBridgeKey(modelId, body.messages, context.sessionId, context.agentKey);
|
|
593
|
-
const convKey = deriveConversationKey(body.messages, context.sessionId, context.agentKey);
|
|
594
|
-
const activeBridge = activeBridges.get(bridgeKey);
|
|
595
|
-
if (activeBridge && toolResults.length > 0) {
|
|
596
|
-
activeBridges.delete(bridgeKey);
|
|
597
|
-
if (activeBridge.bridge.alive) {
|
|
598
|
-
if (activeBridge.modelId !== modelId) {
|
|
599
|
-
logPluginWarn("Resuming pending Cursor tool call on original model after model switch", {
|
|
600
|
-
requestedModelId: modelId,
|
|
601
|
-
resumedModelId: activeBridge.modelId,
|
|
602
|
-
convKey,
|
|
603
|
-
bridgeKey,
|
|
604
|
-
});
|
|
605
|
-
}
|
|
606
|
-
// Resume the live bridge with tool results
|
|
607
|
-
return handleToolResultResume(activeBridge, toolResults, bridgeKey, convKey);
|
|
608
|
-
}
|
|
609
|
-
// Bridge died (timeout, server disconnect, etc.).
|
|
610
|
-
// Clean up and fall through to start a fresh bridge.
|
|
611
|
-
clearInterval(activeBridge.heartbeatTimer);
|
|
612
|
-
activeBridge.bridge.end();
|
|
613
|
-
}
|
|
614
|
-
// Clean up stale bridge if present
|
|
615
|
-
if (activeBridge && activeBridges.has(bridgeKey)) {
|
|
616
|
-
clearInterval(activeBridge.heartbeatTimer);
|
|
617
|
-
activeBridge.bridge.end();
|
|
618
|
-
activeBridges.delete(bridgeKey);
|
|
619
|
-
}
|
|
620
|
-
let stored = conversationStates.get(convKey);
|
|
621
|
-
if (!stored) {
|
|
622
|
-
stored = createStoredConversation();
|
|
623
|
-
conversationStates.set(convKey, stored);
|
|
624
|
-
}
|
|
625
|
-
const systemPromptHash = hashString(systemPrompt);
|
|
626
|
-
if (stored.checkpoint
|
|
627
|
-
&& (stored.systemPromptHash !== systemPromptHash
|
|
628
|
-
|| (turns.length > 0 && stored.completedTurnsFingerprint !== completedTurnsFingerprint))) {
|
|
629
|
-
resetStoredConversation(stored);
|
|
630
|
-
}
|
|
631
|
-
stored.systemPromptHash = systemPromptHash;
|
|
632
|
-
stored.completedTurnsFingerprint = completedTurnsFingerprint;
|
|
633
|
-
stored.lastAccessMs = Date.now();
|
|
634
|
-
evictStaleConversations();
|
|
635
|
-
// Build the request. When tool results are present but the bridge died,
|
|
636
|
-
// we must still include the last user text so Cursor has context.
|
|
637
|
-
const mcpTools = buildMcpToolDefinitions(tools);
|
|
638
|
-
const needsInitialHandoff = !stored.checkpoint && (turns.length > 0 || pendingAssistantSummary || toolResults.length > 0);
|
|
639
|
-
const replayTurns = needsInitialHandoff ? [] : turns;
|
|
640
|
-
let effectiveUserText = needsInitialHandoff
|
|
641
|
-
? buildInitialHandoffPrompt(userText, turns, pendingAssistantSummary, toolResults)
|
|
642
|
-
: toolResults.length > 0
|
|
643
|
-
? buildToolResumePrompt(userText, pendingAssistantSummary, toolResults)
|
|
644
|
-
: userText;
|
|
645
|
-
const payload = buildCursorRequest(modelId, systemPrompt, effectiveUserText, replayTurns, stored.conversationId, stored.checkpoint, stored.blobStore);
|
|
646
|
-
payload.mcpTools = mcpTools;
|
|
647
|
-
if (body.stream === false) {
|
|
648
|
-
return handleNonStreamingResponse(payload, accessToken, modelId, convKey, {
|
|
649
|
-
systemPrompt,
|
|
650
|
-
systemPromptHash,
|
|
651
|
-
completedTurnsFingerprint,
|
|
652
|
-
turns,
|
|
653
|
-
userText,
|
|
654
|
-
agentKey: normalizedAgentKey,
|
|
655
|
-
});
|
|
656
|
-
}
|
|
657
|
-
return handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey, {
|
|
658
|
-
systemPrompt,
|
|
659
|
-
systemPromptHash,
|
|
660
|
-
completedTurnsFingerprint,
|
|
661
|
-
turns,
|
|
662
|
-
userText,
|
|
663
|
-
agentKey: normalizedAgentKey,
|
|
664
|
-
});
|
|
665
|
-
}
|
|
666
|
-
/** Normalize OpenAI message content to a plain string. */
|
|
667
|
-
function textContent(content) {
|
|
668
|
-
if (content == null)
|
|
669
|
-
return "";
|
|
670
|
-
if (typeof content === "string")
|
|
671
|
-
return content;
|
|
672
|
-
return content
|
|
673
|
-
.filter((p) => p.type === "text" && p.text)
|
|
674
|
-
.map((p) => p.text)
|
|
675
|
-
.join("\n");
|
|
676
|
-
}
|
|
677
|
-
function parseMessages(messages) {
|
|
678
|
-
let systemPrompt = "You are a helpful assistant.";
|
|
679
|
-
// Collect system messages
|
|
680
|
-
const systemParts = messages
|
|
681
|
-
.filter((m) => m.role === "system")
|
|
682
|
-
.map((m) => textContent(m.content));
|
|
683
|
-
if (systemParts.length > 0) {
|
|
684
|
-
systemPrompt = systemParts.join("\n");
|
|
685
|
-
}
|
|
686
|
-
const nonSystem = messages.filter((m) => m.role !== "system");
|
|
687
|
-
const parsedTurns = [];
|
|
688
|
-
let currentTurn;
|
|
689
|
-
for (const msg of nonSystem) {
|
|
690
|
-
if (msg.role === "user") {
|
|
691
|
-
if (currentTurn)
|
|
692
|
-
parsedTurns.push(currentTurn);
|
|
693
|
-
currentTurn = {
|
|
694
|
-
userText: textContent(msg.content),
|
|
695
|
-
segments: [],
|
|
696
|
-
};
|
|
697
|
-
continue;
|
|
698
|
-
}
|
|
699
|
-
if (!currentTurn) {
|
|
700
|
-
currentTurn = { userText: "", segments: [] };
|
|
701
|
-
}
|
|
702
|
-
if (msg.role === "assistant") {
|
|
703
|
-
const text = textContent(msg.content);
|
|
704
|
-
if (text) {
|
|
705
|
-
currentTurn.segments.push({ kind: "assistantText", text });
|
|
706
|
-
}
|
|
707
|
-
if (msg.tool_calls?.length) {
|
|
708
|
-
currentTurn.segments.push({
|
|
709
|
-
kind: "assistantToolCalls",
|
|
710
|
-
toolCalls: msg.tool_calls,
|
|
711
|
-
});
|
|
712
|
-
}
|
|
713
|
-
continue;
|
|
714
|
-
}
|
|
715
|
-
if (msg.role === "tool") {
|
|
716
|
-
currentTurn.segments.push({
|
|
717
|
-
kind: "toolResult",
|
|
718
|
-
result: {
|
|
719
|
-
toolCallId: msg.tool_call_id ?? "",
|
|
720
|
-
content: textContent(msg.content),
|
|
721
|
-
},
|
|
722
|
-
});
|
|
723
|
-
}
|
|
724
|
-
}
|
|
725
|
-
if (currentTurn)
|
|
726
|
-
parsedTurns.push(currentTurn);
|
|
727
|
-
let userText = "";
|
|
728
|
-
let toolResults = [];
|
|
729
|
-
let pendingAssistantSummary = "";
|
|
730
|
-
let completedTurnStates = parsedTurns;
|
|
731
|
-
const lastTurn = parsedTurns.at(-1);
|
|
732
|
-
if (lastTurn) {
|
|
733
|
-
const trailingSegments = splitTrailingToolResults(lastTurn.segments);
|
|
734
|
-
const hasAssistantSummary = trailingSegments.base.length > 0;
|
|
735
|
-
if (trailingSegments.trailing.length > 0 && hasAssistantSummary) {
|
|
736
|
-
completedTurnStates = parsedTurns.slice(0, -1);
|
|
737
|
-
userText = lastTurn.userText;
|
|
738
|
-
toolResults = trailingSegments.trailing.map((segment) => segment.result);
|
|
739
|
-
pendingAssistantSummary = summarizeTurnSegments(trailingSegments.base);
|
|
740
|
-
}
|
|
741
|
-
else if (lastTurn.userText && lastTurn.segments.length === 0) {
|
|
742
|
-
completedTurnStates = parsedTurns.slice(0, -1);
|
|
743
|
-
userText = lastTurn.userText;
|
|
744
|
-
}
|
|
745
|
-
}
|
|
746
|
-
const turns = completedTurnStates
|
|
747
|
-
.map((turn) => ({
|
|
748
|
-
userText: turn.userText,
|
|
749
|
-
assistantText: summarizeTurnSegments(turn.segments),
|
|
750
|
-
}))
|
|
751
|
-
.filter((turn) => turn.userText || turn.assistantText);
|
|
752
|
-
return {
|
|
753
|
-
systemPrompt,
|
|
754
|
-
userText,
|
|
755
|
-
turns,
|
|
756
|
-
toolResults,
|
|
757
|
-
pendingAssistantSummary,
|
|
758
|
-
completedTurnsFingerprint: buildCompletedTurnsFingerprint(systemPrompt, turns),
|
|
759
|
-
};
|
|
760
|
-
}
|
|
761
|
-
function splitTrailingToolResults(segments) {
|
|
762
|
-
let index = segments.length;
|
|
763
|
-
while (index > 0 && segments[index - 1]?.kind === "toolResult") {
|
|
764
|
-
index -= 1;
|
|
765
|
-
}
|
|
766
|
-
return {
|
|
767
|
-
base: segments.slice(0, index),
|
|
768
|
-
trailing: segments.slice(index).filter((segment) => segment.kind === "toolResult"),
|
|
769
|
-
};
|
|
770
|
-
}
|
|
771
|
-
function summarizeTurnSegments(segments) {
|
|
772
|
-
const parts = [];
|
|
773
|
-
for (const segment of segments) {
|
|
774
|
-
if (segment.kind === "assistantText") {
|
|
775
|
-
const trimmed = segment.text.trim();
|
|
776
|
-
if (trimmed)
|
|
777
|
-
parts.push(trimmed);
|
|
778
|
-
continue;
|
|
779
|
-
}
|
|
780
|
-
if (segment.kind === "assistantToolCalls") {
|
|
781
|
-
const summary = segment.toolCalls.map(formatToolCallSummary).join("\n\n");
|
|
782
|
-
if (summary)
|
|
783
|
-
parts.push(summary);
|
|
784
|
-
continue;
|
|
785
|
-
}
|
|
786
|
-
parts.push(formatToolResultSummary(segment.result));
|
|
787
|
-
}
|
|
788
|
-
return parts.join("\n\n").trim();
|
|
789
|
-
}
|
|
790
|
-
function formatToolCallSummary(call) {
|
|
791
|
-
const args = call.function.arguments?.trim();
|
|
792
|
-
return args
|
|
793
|
-
? `[assistant requested tool ${call.function.name} id=${call.id}]\n${args}`
|
|
794
|
-
: `[assistant requested tool ${call.function.name} id=${call.id}]`;
|
|
795
|
-
}
|
|
796
|
-
function formatToolResultSummary(result) {
|
|
797
|
-
const label = result.toolCallId
|
|
798
|
-
? `[tool result id=${result.toolCallId}]`
|
|
799
|
-
: "[tool result]";
|
|
800
|
-
const content = result.content.trim();
|
|
801
|
-
return content ? `${label}\n${content}` : label;
|
|
802
|
-
}
|
|
803
|
-
function buildCompletedTurnsFingerprint(systemPrompt, turns) {
|
|
804
|
-
return hashString(JSON.stringify({ systemPrompt, turns }));
|
|
805
|
-
}
|
|
806
|
-
function buildToolResumePrompt(userText, pendingAssistantSummary, toolResults) {
|
|
807
|
-
const parts = [userText.trim()];
|
|
808
|
-
if (pendingAssistantSummary.trim()) {
|
|
809
|
-
parts.push(`[previous assistant tool activity]\n${pendingAssistantSummary.trim()}`);
|
|
810
|
-
}
|
|
811
|
-
if (toolResults.length > 0) {
|
|
812
|
-
parts.push(toolResults.map(formatToolResultSummary).join("\n\n"));
|
|
813
|
-
}
|
|
814
|
-
return parts.filter(Boolean).join("\n\n");
|
|
815
|
-
}
|
|
816
|
-
function buildInitialHandoffPrompt(userText, turns, pendingAssistantSummary, toolResults) {
|
|
817
|
-
const transcript = turns.map((turn, index) => {
|
|
818
|
-
const sections = [`Turn ${index + 1}`];
|
|
819
|
-
if (turn.userText.trim())
|
|
820
|
-
sections.push(`User: ${turn.userText.trim()}`);
|
|
821
|
-
if (turn.assistantText.trim())
|
|
822
|
-
sections.push(`Assistant: ${turn.assistantText.trim()}`);
|
|
823
|
-
return sections.join("\n");
|
|
824
|
-
});
|
|
825
|
-
const inProgress = buildToolResumePrompt("", pendingAssistantSummary, toolResults).trim();
|
|
826
|
-
const history = [
|
|
827
|
-
...transcript,
|
|
828
|
-
...(inProgress ? [`In-progress turn\n${inProgress}`] : []),
|
|
829
|
-
].join("\n\n").trim();
|
|
830
|
-
if (!history)
|
|
831
|
-
return userText;
|
|
832
|
-
return [
|
|
833
|
-
"[OpenCode session handoff]",
|
|
834
|
-
"You are continuing an existing session that previously ran on another provider/model.",
|
|
835
|
-
"Treat the transcript below as prior conversation history before answering the latest user message.",
|
|
836
|
-
"",
|
|
837
|
-
"<previous-session-transcript>",
|
|
838
|
-
history,
|
|
839
|
-
"</previous-session-transcript>",
|
|
840
|
-
"",
|
|
841
|
-
"Latest user message:",
|
|
842
|
-
userText.trim(),
|
|
843
|
-
].filter(Boolean).join("\n");
|
|
844
|
-
}
|
|
845
|
-
function buildTitleSourceText(userText, turns, pendingAssistantSummary, toolResults) {
|
|
846
|
-
const history = turns
|
|
847
|
-
.map((turn) => [
|
|
848
|
-
isTitleRequestMarker(turn.userText) ? "" : turn.userText.trim(),
|
|
849
|
-
turn.assistantText.trim(),
|
|
850
|
-
].filter(Boolean).join("\n"))
|
|
851
|
-
.filter(Boolean);
|
|
852
|
-
if (pendingAssistantSummary.trim()) {
|
|
853
|
-
history.push(pendingAssistantSummary.trim());
|
|
854
|
-
}
|
|
855
|
-
if (toolResults.length > 0) {
|
|
856
|
-
history.push(toolResults.map(formatToolResultSummary).join("\n\n"));
|
|
857
|
-
}
|
|
858
|
-
if (userText.trim() && !isTitleRequestMarker(userText)) {
|
|
859
|
-
history.push(userText.trim());
|
|
860
|
-
}
|
|
861
|
-
return history.join("\n\n").trim();
|
|
862
|
-
}
|
|
863
|
-
function detectTitleRequest(body) {
|
|
864
|
-
if ((body.tools?.length ?? 0) > 0) {
|
|
865
|
-
return { matched: false, reason: "tools-present" };
|
|
866
|
-
}
|
|
867
|
-
const firstNonSystem = body.messages.find((message) => message.role !== "system");
|
|
868
|
-
if (firstNonSystem?.role === "user" && isTitleRequestMarker(textContent(firstNonSystem.content))) {
|
|
869
|
-
return { matched: true, reason: "opencode-title-marker" };
|
|
870
|
-
}
|
|
871
|
-
return { matched: false, reason: "no-title-marker" };
|
|
872
|
-
}
|
|
873
|
-
function isTitleRequestMarker(text) {
|
|
874
|
-
return text.trim() === OPENCODE_TITLE_REQUEST_MARKER;
|
|
875
|
-
}
|
|
876
|
-
function selectToolsForChoice(tools, toolChoice) {
|
|
877
|
-
if (!tools.length)
|
|
878
|
-
return [];
|
|
879
|
-
if (toolChoice === undefined || toolChoice === null || toolChoice === "auto" || toolChoice === "required") {
|
|
880
|
-
return tools;
|
|
881
|
-
}
|
|
882
|
-
if (toolChoice === "none") {
|
|
883
|
-
return [];
|
|
884
|
-
}
|
|
885
|
-
if (typeof toolChoice === "object") {
|
|
886
|
-
const choice = toolChoice;
|
|
887
|
-
if (choice.type === "function" && typeof choice.function?.name === "string") {
|
|
888
|
-
return tools.filter((tool) => tool.function.name === choice.function.name);
|
|
889
|
-
}
|
|
890
|
-
}
|
|
891
|
-
return tools;
|
|
892
|
-
}
|
|
893
|
-
/** Convert OpenAI tool definitions to Cursor's MCP tool protobuf format. */
|
|
894
|
-
function buildMcpToolDefinitions(tools) {
|
|
895
|
-
return tools.map((t) => {
|
|
896
|
-
const fn = t.function;
|
|
897
|
-
const jsonSchema = fn.parameters && typeof fn.parameters === "object"
|
|
898
|
-
? fn.parameters
|
|
899
|
-
: { type: "object", properties: {}, required: [] };
|
|
900
|
-
const inputSchema = toBinary(ValueSchema, fromJson(ValueSchema, jsonSchema));
|
|
901
|
-
return create(McpToolDefinitionSchema, {
|
|
902
|
-
name: fn.name,
|
|
903
|
-
description: fn.description || "",
|
|
904
|
-
providerIdentifier: "opencode",
|
|
905
|
-
toolName: fn.name,
|
|
906
|
-
inputSchema,
|
|
907
|
-
});
|
|
908
|
-
});
|
|
909
|
-
}
|
|
910
|
-
/** Decode a Cursor MCP arg value (protobuf Value bytes) to a JS value. */
|
|
911
|
-
function decodeMcpArgValue(value) {
|
|
912
|
-
try {
|
|
913
|
-
const parsed = fromBinary(ValueSchema, value);
|
|
914
|
-
return toJson(ValueSchema, parsed);
|
|
915
|
-
}
|
|
916
|
-
catch { }
|
|
917
|
-
return new TextDecoder().decode(value);
|
|
918
|
-
}
|
|
919
|
-
/** Decode a map of MCP arg values. */
|
|
920
|
-
function decodeMcpArgsMap(args) {
|
|
921
|
-
const decoded = {};
|
|
922
|
-
for (const [key, value] of Object.entries(args)) {
|
|
923
|
-
decoded[key] = decodeMcpArgValue(value);
|
|
924
|
-
}
|
|
925
|
-
return decoded;
|
|
926
|
-
}
|
|
927
|
-
function buildCursorRequest(modelId, systemPrompt, userText, turns, conversationId, checkpoint, existingBlobStore) {
|
|
928
|
-
const blobStore = new Map(existingBlobStore ?? []);
|
|
929
|
-
// System prompt → blob store (Cursor requests it back via KV handshake)
|
|
930
|
-
const systemJson = JSON.stringify({ role: "system", content: systemPrompt });
|
|
931
|
-
const systemBytes = new TextEncoder().encode(systemJson);
|
|
932
|
-
const systemBlobId = new Uint8Array(createHash("sha256").update(systemBytes).digest());
|
|
933
|
-
blobStore.set(Buffer.from(systemBlobId).toString("hex"), systemBytes);
|
|
934
|
-
let conversationState;
|
|
935
|
-
if (checkpoint) {
|
|
936
|
-
conversationState = fromBinary(ConversationStateStructureSchema, checkpoint);
|
|
937
|
-
}
|
|
938
|
-
else {
|
|
939
|
-
const turnBytes = [];
|
|
940
|
-
for (const turn of turns) {
|
|
941
|
-
const userMsg = create(UserMessageSchema, {
|
|
942
|
-
text: turn.userText,
|
|
943
|
-
messageId: crypto.randomUUID(),
|
|
944
|
-
});
|
|
945
|
-
const userMsgBytes = toBinary(UserMessageSchema, userMsg);
|
|
946
|
-
const stepBytes = [];
|
|
947
|
-
if (turn.assistantText) {
|
|
948
|
-
const step = create(ConversationStepSchema, {
|
|
949
|
-
message: {
|
|
950
|
-
case: "assistantMessage",
|
|
951
|
-
value: create(AssistantMessageSchema, { text: turn.assistantText }),
|
|
952
|
-
},
|
|
953
|
-
});
|
|
954
|
-
stepBytes.push(toBinary(ConversationStepSchema, step));
|
|
955
|
-
}
|
|
956
|
-
const agentTurn = create(AgentConversationTurnStructureSchema, {
|
|
957
|
-
userMessage: userMsgBytes,
|
|
958
|
-
steps: stepBytes,
|
|
959
|
-
});
|
|
960
|
-
const turnStructure = create(ConversationTurnStructureSchema, {
|
|
961
|
-
turn: { case: "agentConversationTurn", value: agentTurn },
|
|
962
|
-
});
|
|
963
|
-
turnBytes.push(toBinary(ConversationTurnStructureSchema, turnStructure));
|
|
964
|
-
}
|
|
965
|
-
conversationState = create(ConversationStateStructureSchema, {
|
|
966
|
-
rootPromptMessagesJson: [systemBlobId],
|
|
967
|
-
turns: turnBytes,
|
|
968
|
-
todos: [],
|
|
969
|
-
pendingToolCalls: [],
|
|
970
|
-
previousWorkspaceUris: [],
|
|
971
|
-
fileStates: {},
|
|
972
|
-
fileStatesV2: {},
|
|
973
|
-
summaryArchives: [],
|
|
974
|
-
turnTimings: [],
|
|
975
|
-
subagentStates: {},
|
|
976
|
-
selfSummaryCount: 0,
|
|
977
|
-
readPaths: [],
|
|
978
|
-
});
|
|
979
|
-
}
|
|
980
|
-
const userMessage = create(UserMessageSchema, {
|
|
981
|
-
text: userText,
|
|
982
|
-
messageId: crypto.randomUUID(),
|
|
983
|
-
});
|
|
984
|
-
const action = create(ConversationActionSchema, {
|
|
985
|
-
action: {
|
|
986
|
-
case: "userMessageAction",
|
|
987
|
-
value: create(UserMessageActionSchema, { userMessage }),
|
|
988
|
-
},
|
|
989
|
-
});
|
|
990
|
-
const modelDetails = create(ModelDetailsSchema, {
|
|
991
|
-
modelId,
|
|
992
|
-
displayModelId: modelId,
|
|
993
|
-
displayName: modelId,
|
|
994
|
-
});
|
|
995
|
-
const runRequest = create(AgentRunRequestSchema, {
|
|
996
|
-
conversationState,
|
|
997
|
-
action,
|
|
998
|
-
modelDetails,
|
|
999
|
-
conversationId,
|
|
1000
|
-
});
|
|
1001
|
-
const clientMessage = create(AgentClientMessageSchema, {
|
|
1002
|
-
message: { case: "runRequest", value: runRequest },
|
|
1003
|
-
});
|
|
1004
|
-
return {
|
|
1005
|
-
requestBytes: toBinary(AgentClientMessageSchema, clientMessage),
|
|
1006
|
-
blobStore,
|
|
1007
|
-
mcpTools: [],
|
|
1008
|
-
};
|
|
1009
|
-
}
|
|
1010
|
-
function parseConnectEndStream(data) {
|
|
1011
|
-
try {
|
|
1012
|
-
const payload = JSON.parse(new TextDecoder().decode(data));
|
|
1013
|
-
const error = payload?.error;
|
|
1014
|
-
if (error) {
|
|
1015
|
-
const code = error.code ?? "unknown";
|
|
1016
|
-
const message = error.message ?? "Unknown error";
|
|
1017
|
-
return new Error(`Connect error ${code}: ${message}`);
|
|
1018
|
-
}
|
|
1019
|
-
return null;
|
|
1020
|
-
}
|
|
1021
|
-
catch {
|
|
1022
|
-
return new Error("Failed to parse Connect end stream");
|
|
1023
|
-
}
|
|
1024
|
-
}
|
|
1025
|
-
function makeHeartbeatBytes() {
|
|
1026
|
-
const heartbeat = create(AgentClientMessageSchema, {
|
|
1027
|
-
message: {
|
|
1028
|
-
case: "clientHeartbeat",
|
|
1029
|
-
value: create(ClientHeartbeatSchema, {}),
|
|
1030
|
-
},
|
|
1031
|
-
});
|
|
1032
|
-
return toBinary(AgentClientMessageSchema, heartbeat);
|
|
1033
|
-
}
|
|
1034
|
-
function scheduleBridgeEnd(bridge) {
|
|
1035
|
-
queueMicrotask(() => {
|
|
1036
|
-
if (bridge.alive)
|
|
1037
|
-
bridge.end();
|
|
1038
|
-
});
|
|
1039
|
-
}
|
|
1040
|
-
/**
|
|
1041
|
-
* Create a stateful parser for Connect protocol frames.
|
|
1042
|
-
* Handles buffering partial data across chunks.
|
|
1043
|
-
*/
|
|
1044
|
-
function createConnectFrameParser(onMessage, onEndStream) {
|
|
1045
|
-
let pending = Buffer.alloc(0);
|
|
1046
|
-
return (incoming) => {
|
|
1047
|
-
pending = Buffer.concat([pending, incoming]);
|
|
1048
|
-
while (pending.length >= 5) {
|
|
1049
|
-
const flags = pending[0];
|
|
1050
|
-
const msgLen = pending.readUInt32BE(1);
|
|
1051
|
-
if (pending.length < 5 + msgLen)
|
|
1052
|
-
break;
|
|
1053
|
-
const messageBytes = pending.subarray(5, 5 + msgLen);
|
|
1054
|
-
pending = pending.subarray(5 + msgLen);
|
|
1055
|
-
if (flags & CONNECT_END_STREAM_FLAG) {
|
|
1056
|
-
onEndStream(messageBytes);
|
|
1057
|
-
}
|
|
1058
|
-
else {
|
|
1059
|
-
onMessage(messageBytes);
|
|
1060
|
-
}
|
|
1061
|
-
}
|
|
1062
|
-
};
|
|
1063
|
-
}
|
|
1064
|
-
const THINKING_TAG_NAMES = ['think', 'thinking', 'reasoning', 'thought', 'think_intent'];
|
|
1065
|
-
const MAX_THINKING_TAG_LEN = 16; // </think_intent> is 15 chars
|
|
1066
|
-
/**
|
|
1067
|
-
* Strip thinking tags from streamed text, routing tagged content to reasoning.
|
|
1068
|
-
* Buffers partial tags across chunk boundaries.
|
|
1069
|
-
*/
|
|
1070
|
-
function createThinkingTagFilter() {
|
|
1071
|
-
let buffer = '';
|
|
1072
|
-
let inThinking = false;
|
|
1073
|
-
return {
|
|
1074
|
-
process(text) {
|
|
1075
|
-
const input = buffer + text;
|
|
1076
|
-
buffer = '';
|
|
1077
|
-
let content = '';
|
|
1078
|
-
let reasoning = '';
|
|
1079
|
-
let lastIdx = 0;
|
|
1080
|
-
const re = new RegExp(`<(/?)(?:${THINKING_TAG_NAMES.join('|')})\\s*>`, 'gi');
|
|
1081
|
-
let match;
|
|
1082
|
-
while ((match = re.exec(input)) !== null) {
|
|
1083
|
-
const before = input.slice(lastIdx, match.index);
|
|
1084
|
-
if (inThinking)
|
|
1085
|
-
reasoning += before;
|
|
1086
|
-
else
|
|
1087
|
-
content += before;
|
|
1088
|
-
inThinking = match[1] !== '/';
|
|
1089
|
-
lastIdx = re.lastIndex;
|
|
1090
|
-
}
|
|
1091
|
-
const rest = input.slice(lastIdx);
|
|
1092
|
-
// Buffer a trailing '<' that could be the start of a thinking tag.
|
|
1093
|
-
const ltPos = rest.lastIndexOf('<');
|
|
1094
|
-
if (ltPos >= 0 && rest.length - ltPos < MAX_THINKING_TAG_LEN && /^<\/?[a-z_]*$/i.test(rest.slice(ltPos))) {
|
|
1095
|
-
buffer = rest.slice(ltPos);
|
|
1096
|
-
const before = rest.slice(0, ltPos);
|
|
1097
|
-
if (inThinking)
|
|
1098
|
-
reasoning += before;
|
|
1099
|
-
else
|
|
1100
|
-
content += before;
|
|
1101
|
-
}
|
|
1102
|
-
else {
|
|
1103
|
-
if (inThinking)
|
|
1104
|
-
reasoning += rest;
|
|
1105
|
-
else
|
|
1106
|
-
content += rest;
|
|
1107
|
-
}
|
|
1108
|
-
return { content, reasoning };
|
|
1109
|
-
},
|
|
1110
|
-
flush() {
|
|
1111
|
-
const b = buffer;
|
|
1112
|
-
buffer = '';
|
|
1113
|
-
if (!b)
|
|
1114
|
-
return { content: '', reasoning: '' };
|
|
1115
|
-
return inThinking ? { content: '', reasoning: b } : { content: b, reasoning: '' };
|
|
1116
|
-
},
|
|
1117
|
-
};
|
|
1118
|
-
}
|
|
1119
|
-
function computeUsage(state) {
|
|
1120
|
-
const completion_tokens = state.outputTokens;
|
|
1121
|
-
const total_tokens = state.totalTokens || completion_tokens;
|
|
1122
|
-
const prompt_tokens = Math.max(0, total_tokens - completion_tokens);
|
|
1123
|
-
return { prompt_tokens, completion_tokens, total_tokens };
|
|
1124
|
-
}
|
|
1125
|
-
function processServerMessage(msg, blobStore, mcpTools, sendFrame, state, onText, onMcpExec, onCheckpoint) {
|
|
1126
|
-
const msgCase = msg.message.case;
|
|
1127
|
-
if (msgCase === "interactionUpdate") {
|
|
1128
|
-
handleInteractionUpdate(msg.message.value, state, onText);
|
|
1129
|
-
}
|
|
1130
|
-
else if (msgCase === "kvServerMessage") {
|
|
1131
|
-
handleKvMessage(msg.message.value, blobStore, sendFrame);
|
|
1132
|
-
}
|
|
1133
|
-
else if (msgCase === "execServerMessage") {
|
|
1134
|
-
handleExecMessage(msg.message.value, mcpTools, sendFrame, onMcpExec);
|
|
1135
|
-
}
|
|
1136
|
-
else if (msgCase === "conversationCheckpointUpdate") {
|
|
1137
|
-
const stateStructure = msg.message.value;
|
|
1138
|
-
if (stateStructure.tokenDetails) {
|
|
1139
|
-
state.totalTokens = stateStructure.tokenDetails.usedTokens;
|
|
1140
|
-
}
|
|
1141
|
-
if (onCheckpoint) {
|
|
1142
|
-
onCheckpoint(toBinary(ConversationStateStructureSchema, stateStructure));
|
|
1143
|
-
}
|
|
1144
|
-
}
|
|
1145
|
-
}
|
|
1146
|
-
function handleInteractionUpdate(update, state, onText) {
|
|
1147
|
-
const updateCase = update.message?.case;
|
|
1148
|
-
if (updateCase === "textDelta") {
|
|
1149
|
-
const delta = update.message.value.text || "";
|
|
1150
|
-
if (delta)
|
|
1151
|
-
onText(delta, false);
|
|
1152
|
-
}
|
|
1153
|
-
else if (updateCase === "thinkingDelta") {
|
|
1154
|
-
const delta = update.message.value.text || "";
|
|
1155
|
-
if (delta)
|
|
1156
|
-
onText(delta, true);
|
|
1157
|
-
}
|
|
1158
|
-
else if (updateCase === "tokenDelta") {
|
|
1159
|
-
state.outputTokens += update.message.value.tokens ?? 0;
|
|
1160
|
-
}
|
|
1161
|
-
// toolCallStarted, partialToolCall, toolCallDelta, toolCallCompleted
|
|
1162
|
-
// are intentionally ignored. MCP tool calls flow through the exec
|
|
1163
|
-
// message path (mcpArgs → mcpResult), not interaction updates.
|
|
1164
|
-
}
|
|
1165
|
-
/** Send a KV client response back to Cursor. */
|
|
1166
|
-
function sendKvResponse(kvMsg, messageCase, value, sendFrame) {
|
|
1167
|
-
const response = create(KvClientMessageSchema, {
|
|
1168
|
-
id: kvMsg.id,
|
|
1169
|
-
message: { case: messageCase, value: value },
|
|
1170
|
-
});
|
|
1171
|
-
const clientMsg = create(AgentClientMessageSchema, {
|
|
1172
|
-
message: { case: "kvClientMessage", value: response },
|
|
1173
|
-
});
|
|
1174
|
-
sendFrame(toBinary(AgentClientMessageSchema, clientMsg));
|
|
1175
|
-
}
|
|
1176
|
-
function handleKvMessage(kvMsg, blobStore, sendFrame) {
|
|
1177
|
-
const kvCase = kvMsg.message.case;
|
|
1178
|
-
if (kvCase === "getBlobArgs") {
|
|
1179
|
-
const blobId = kvMsg.message.value.blobId;
|
|
1180
|
-
const blobIdKey = Buffer.from(blobId).toString("hex");
|
|
1181
|
-
const blobData = blobStore.get(blobIdKey);
|
|
1182
|
-
if (!blobData) {
|
|
1183
|
-
logPluginWarn("Cursor requested missing blob", {
|
|
1184
|
-
blobId: blobIdKey,
|
|
1185
|
-
knownBlobCount: blobStore.size,
|
|
1186
|
-
});
|
|
1187
|
-
}
|
|
1188
|
-
sendKvResponse(kvMsg, "getBlobResult", create(GetBlobResultSchema, blobData ? { blobData } : {}), sendFrame);
|
|
1189
|
-
}
|
|
1190
|
-
else if (kvCase === "setBlobArgs") {
|
|
1191
|
-
const { blobId, blobData } = kvMsg.message.value;
|
|
1192
|
-
blobStore.set(Buffer.from(blobId).toString("hex"), blobData);
|
|
1193
|
-
sendKvResponse(kvMsg, "setBlobResult", create(SetBlobResultSchema, {}), sendFrame);
|
|
1194
|
-
}
|
|
1195
|
-
}
|
|
1196
|
-
function handleExecMessage(execMsg, mcpTools, sendFrame, onMcpExec) {
|
|
1197
|
-
const execCase = execMsg.message.case;
|
|
1198
|
-
if (execCase === "requestContextArgs") {
|
|
1199
|
-
const requestContext = create(RequestContextSchema, {
|
|
1200
|
-
rules: [],
|
|
1201
|
-
repositoryInfo: [],
|
|
1202
|
-
tools: mcpTools,
|
|
1203
|
-
gitRepos: [],
|
|
1204
|
-
projectLayouts: [],
|
|
1205
|
-
mcpInstructions: [],
|
|
1206
|
-
fileContents: {},
|
|
1207
|
-
customSubagents: [],
|
|
1208
|
-
});
|
|
1209
|
-
const result = create(RequestContextResultSchema, {
|
|
1210
|
-
result: {
|
|
1211
|
-
case: "success",
|
|
1212
|
-
value: create(RequestContextSuccessSchema, { requestContext }),
|
|
1213
|
-
},
|
|
1214
|
-
});
|
|
1215
|
-
sendExecResult(execMsg, "requestContextResult", result, sendFrame);
|
|
1216
|
-
return;
|
|
1217
|
-
}
|
|
1218
|
-
if (execCase === "mcpArgs") {
|
|
1219
|
-
const mcpArgs = execMsg.message.value;
|
|
1220
|
-
const decoded = decodeMcpArgsMap(mcpArgs.args ?? {});
|
|
1221
|
-
onMcpExec({
|
|
1222
|
-
execId: execMsg.execId,
|
|
1223
|
-
execMsgId: execMsg.id,
|
|
1224
|
-
toolCallId: mcpArgs.toolCallId || crypto.randomUUID(),
|
|
1225
|
-
toolName: mcpArgs.toolName || mcpArgs.name,
|
|
1226
|
-
decodedArgs: JSON.stringify(decoded),
|
|
1227
|
-
});
|
|
1228
|
-
return;
|
|
1229
|
-
}
|
|
1230
|
-
// --- Reject native Cursor tools ---
|
|
1231
|
-
// The model tries these first. We must respond with rejection/error
|
|
1232
|
-
// so it falls back to our MCP tools (registered via RequestContext).
|
|
1233
|
-
const REJECT_REASON = "Tool not available in this environment. Use the MCP tools provided instead.";
|
|
1234
|
-
if (execCase === "readArgs") {
|
|
1235
|
-
const args = execMsg.message.value;
|
|
1236
|
-
const result = create(ReadResultSchema, {
|
|
1237
|
-
result: { case: "rejected", value: create(ReadRejectedSchema, { path: args.path, reason: REJECT_REASON }) },
|
|
1238
|
-
});
|
|
1239
|
-
sendExecResult(execMsg, "readResult", result, sendFrame);
|
|
1240
|
-
return;
|
|
1241
|
-
}
|
|
1242
|
-
if (execCase === "lsArgs") {
|
|
1243
|
-
const args = execMsg.message.value;
|
|
1244
|
-
const result = create(LsResultSchema, {
|
|
1245
|
-
result: { case: "rejected", value: create(LsRejectedSchema, { path: args.path, reason: REJECT_REASON }) },
|
|
1246
|
-
});
|
|
1247
|
-
sendExecResult(execMsg, "lsResult", result, sendFrame);
|
|
1248
|
-
return;
|
|
1249
|
-
}
|
|
1250
|
-
if (execCase === "grepArgs") {
|
|
1251
|
-
const result = create(GrepResultSchema, {
|
|
1252
|
-
result: { case: "error", value: create(GrepErrorSchema, { error: REJECT_REASON }) },
|
|
1253
|
-
});
|
|
1254
|
-
sendExecResult(execMsg, "grepResult", result, sendFrame);
|
|
1255
|
-
return;
|
|
1256
|
-
}
|
|
1257
|
-
if (execCase === "writeArgs") {
|
|
1258
|
-
const args = execMsg.message.value;
|
|
1259
|
-
const result = create(WriteResultSchema, {
|
|
1260
|
-
result: { case: "rejected", value: create(WriteRejectedSchema, { path: args.path, reason: REJECT_REASON }) },
|
|
1261
|
-
});
|
|
1262
|
-
sendExecResult(execMsg, "writeResult", result, sendFrame);
|
|
1263
|
-
return;
|
|
1264
|
-
}
|
|
1265
|
-
if (execCase === "deleteArgs") {
|
|
1266
|
-
const args = execMsg.message.value;
|
|
1267
|
-
const result = create(DeleteResultSchema, {
|
|
1268
|
-
result: { case: "rejected", value: create(DeleteRejectedSchema, { path: args.path, reason: REJECT_REASON }) },
|
|
1269
|
-
});
|
|
1270
|
-
sendExecResult(execMsg, "deleteResult", result, sendFrame);
|
|
1271
|
-
return;
|
|
1272
|
-
}
|
|
1273
|
-
if (execCase === "shellArgs" || execCase === "shellStreamArgs") {
|
|
1274
|
-
const args = execMsg.message.value;
|
|
1275
|
-
const result = create(ShellResultSchema, {
|
|
1276
|
-
result: {
|
|
1277
|
-
case: "rejected",
|
|
1278
|
-
value: create(ShellRejectedSchema, {
|
|
1279
|
-
command: args.command ?? "",
|
|
1280
|
-
workingDirectory: args.workingDirectory ?? "",
|
|
1281
|
-
reason: REJECT_REASON,
|
|
1282
|
-
isReadonly: false,
|
|
1283
|
-
}),
|
|
1284
|
-
},
|
|
1285
|
-
});
|
|
1286
|
-
sendExecResult(execMsg, "shellResult", result, sendFrame);
|
|
1287
|
-
return;
|
|
1288
|
-
}
|
|
1289
|
-
if (execCase === "backgroundShellSpawnArgs") {
|
|
1290
|
-
const args = execMsg.message.value;
|
|
1291
|
-
const result = create(BackgroundShellSpawnResultSchema, {
|
|
1292
|
-
result: {
|
|
1293
|
-
case: "rejected",
|
|
1294
|
-
value: create(ShellRejectedSchema, {
|
|
1295
|
-
command: args.command ?? "",
|
|
1296
|
-
workingDirectory: args.workingDirectory ?? "",
|
|
1297
|
-
reason: REJECT_REASON,
|
|
1298
|
-
isReadonly: false,
|
|
1299
|
-
}),
|
|
1300
|
-
},
|
|
1301
|
-
});
|
|
1302
|
-
sendExecResult(execMsg, "backgroundShellSpawnResult", result, sendFrame);
|
|
1303
|
-
return;
|
|
1304
|
-
}
|
|
1305
|
-
if (execCase === "writeShellStdinArgs") {
|
|
1306
|
-
const result = create(WriteShellStdinResultSchema, {
|
|
1307
|
-
result: { case: "error", value: create(WriteShellStdinErrorSchema, { error: REJECT_REASON }) },
|
|
1308
|
-
});
|
|
1309
|
-
sendExecResult(execMsg, "writeShellStdinResult", result, sendFrame);
|
|
1310
|
-
return;
|
|
1311
|
-
}
|
|
1312
|
-
if (execCase === "fetchArgs") {
|
|
1313
|
-
const args = execMsg.message.value;
|
|
1314
|
-
const result = create(FetchResultSchema, {
|
|
1315
|
-
result: { case: "error", value: create(FetchErrorSchema, { url: args.url ?? "", error: REJECT_REASON }) },
|
|
1316
|
-
});
|
|
1317
|
-
sendExecResult(execMsg, "fetchResult", result, sendFrame);
|
|
1318
|
-
return;
|
|
1319
|
-
}
|
|
1320
|
-
if (execCase === "diagnosticsArgs") {
|
|
1321
|
-
const result = create(DiagnosticsResultSchema, {});
|
|
1322
|
-
sendExecResult(execMsg, "diagnosticsResult", result, sendFrame);
|
|
1323
|
-
return;
|
|
1324
|
-
}
|
|
1325
|
-
// MCP resource/screen/computer exec types
|
|
1326
|
-
const miscCaseMap = {
|
|
1327
|
-
listMcpResourcesExecArgs: "listMcpResourcesExecResult",
|
|
1328
|
-
readMcpResourceExecArgs: "readMcpResourceExecResult",
|
|
1329
|
-
recordScreenArgs: "recordScreenResult",
|
|
1330
|
-
computerUseArgs: "computerUseResult",
|
|
1331
|
-
};
|
|
1332
|
-
const resultCase = miscCaseMap[execCase];
|
|
1333
|
-
if (resultCase) {
|
|
1334
|
-
sendExecResult(execMsg, resultCase, create(McpResultSchema, {}), sendFrame);
|
|
1335
|
-
return;
|
|
1336
|
-
}
|
|
1337
|
-
// Unknown exec type — log and ignore
|
|
1338
|
-
console.error(`[proxy] unhandled exec: ${execCase}`);
|
|
1339
|
-
}
|
|
1340
|
-
/** Send an exec client message back to Cursor. */
|
|
1341
|
-
function sendExecResult(execMsg, messageCase, value, sendFrame) {
|
|
1342
|
-
const execClientMessage = create(ExecClientMessageSchema, {
|
|
1343
|
-
id: execMsg.id,
|
|
1344
|
-
execId: execMsg.execId,
|
|
1345
|
-
message: { case: messageCase, value: value },
|
|
1346
|
-
});
|
|
1347
|
-
const clientMessage = create(AgentClientMessageSchema, {
|
|
1348
|
-
message: { case: "execClientMessage", value: execClientMessage },
|
|
1349
|
-
});
|
|
1350
|
-
sendFrame(toBinary(AgentClientMessageSchema, clientMessage));
|
|
1351
|
-
}
|
|
1352
|
-
/** Derive a key for active bridge lookup (tool-call continuations). */
|
|
1353
|
-
function deriveBridgeKey(modelId, messages, sessionId, agentKey) {
|
|
1354
|
-
if (sessionId) {
|
|
1355
|
-
const normalizedAgent = normalizeAgentKey(agentKey);
|
|
1356
|
-
return createHash("sha256")
|
|
1357
|
-
.update(`bridge:${sessionId}:${normalizedAgent}`)
|
|
1358
|
-
.digest("hex")
|
|
1359
|
-
.slice(0, 16);
|
|
1360
|
-
}
|
|
1361
|
-
const firstUserMsg = messages.find((m) => m.role === "user");
|
|
1362
|
-
const firstUserText = firstUserMsg ? textContent(firstUserMsg.content) : "";
|
|
1363
|
-
const normalizedAgent = normalizeAgentKey(agentKey);
|
|
1364
|
-
return createHash("sha256")
|
|
1365
|
-
.update(`bridge:${normalizedAgent}:${modelId}:${firstUserText.slice(0, 200)}`)
|
|
1366
|
-
.digest("hex")
|
|
1367
|
-
.slice(0, 16);
|
|
1368
|
-
}
|
|
1369
|
-
/** Derive a key for conversation state. Model-independent so context survives model switches. */
|
|
1370
|
-
function deriveConversationKey(messages, sessionId, agentKey) {
|
|
1371
|
-
if (sessionId) {
|
|
1372
|
-
const normalizedAgent = normalizeAgentKey(agentKey);
|
|
1373
|
-
return createHash("sha256")
|
|
1374
|
-
.update(`session:${sessionId}:${normalizedAgent}`)
|
|
1375
|
-
.digest("hex")
|
|
1376
|
-
.slice(0, 16);
|
|
1377
|
-
}
|
|
1378
|
-
return createHash("sha256")
|
|
1379
|
-
.update(`${normalizeAgentKey(agentKey)}:${buildConversationFingerprint(messages)}`)
|
|
1380
|
-
.digest("hex")
|
|
1381
|
-
.slice(0, 16);
|
|
1382
|
-
}
|
|
1383
|
-
function buildConversationFingerprint(messages) {
|
|
1384
|
-
return messages.map((message) => {
|
|
1385
|
-
const toolCallIDs = (message.tool_calls ?? []).map((call) => call.id).join(",");
|
|
1386
|
-
return `${message.role}:${textContent(message.content)}:${message.tool_call_id ?? ""}:${toolCallIDs}`;
|
|
1387
|
-
}).join("\n---\n");
|
|
1388
|
-
}
|
|
1389
|
-
function updateStoredConversationAfterCompletion(convKey, metadata, assistantText) {
|
|
1390
|
-
const stored = conversationStates.get(convKey);
|
|
1391
|
-
if (!stored)
|
|
1392
|
-
return;
|
|
1393
|
-
const nextTurns = metadata.userText
|
|
1394
|
-
? [...metadata.turns, { userText: metadata.userText, assistantText: assistantText.trim() }]
|
|
1395
|
-
: metadata.turns;
|
|
1396
|
-
stored.systemPromptHash = metadata.systemPromptHash;
|
|
1397
|
-
stored.completedTurnsFingerprint = buildCompletedTurnsFingerprint(metadata.systemPrompt, nextTurns);
|
|
1398
|
-
stored.lastAccessMs = Date.now();
|
|
1399
|
-
}
|
|
1400
|
-
function deriveFallbackTitle(text) {
|
|
1401
|
-
const cleaned = text
|
|
1402
|
-
.replace(/<[^>]+>/g, " ")
|
|
1403
|
-
.replace(/\[[^\]]+\]/g, " ")
|
|
1404
|
-
.replace(/[^\p{L}\p{N}'’\-\s]+/gu, " ")
|
|
1405
|
-
.replace(/\s+/g, " ")
|
|
1406
|
-
.trim();
|
|
1407
|
-
if (!cleaned)
|
|
1408
|
-
return "";
|
|
1409
|
-
const words = cleaned.split(" ").filter(Boolean).slice(0, 6);
|
|
1410
|
-
return finalizeTitle(words.map(titleCaseWord).join(" "));
|
|
1411
|
-
}
|
|
1412
|
-
function titleCaseWord(word) {
|
|
1413
|
-
if (!word)
|
|
1414
|
-
return word;
|
|
1415
|
-
return word[0].toUpperCase() + word.slice(1);
|
|
1416
|
-
}
|
|
1417
|
-
function finalizeTitle(value) {
|
|
1418
|
-
return value
|
|
1419
|
-
.replace(/^#{1,6}\s*/, "")
|
|
1420
|
-
.replace(/[.!?,:;]+$/g, "")
|
|
1421
|
-
.replace(/\s+/g, " ")
|
|
1422
|
-
.trim()
|
|
1423
|
-
.slice(0, 80)
|
|
1424
|
-
.trim();
|
|
1425
|
-
}
|
|
1426
|
-
function createBufferedSSETextResponse(modelId, text, usage) {
|
|
1427
|
-
const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
|
|
1428
|
-
const created = Math.floor(Date.now() / 1000);
|
|
1429
|
-
const payload = [
|
|
1430
|
-
{
|
|
1431
|
-
id: completionId,
|
|
1432
|
-
object: "chat.completion.chunk",
|
|
1433
|
-
created,
|
|
1434
|
-
model: modelId,
|
|
1435
|
-
choices: [{ index: 0, delta: { content: text }, finish_reason: null }],
|
|
1436
|
-
},
|
|
1437
|
-
{
|
|
1438
|
-
id: completionId,
|
|
1439
|
-
object: "chat.completion.chunk",
|
|
1440
|
-
created,
|
|
1441
|
-
model: modelId,
|
|
1442
|
-
choices: [{ index: 0, delta: {}, finish_reason: "stop" }],
|
|
1443
|
-
},
|
|
1444
|
-
{
|
|
1445
|
-
id: completionId,
|
|
1446
|
-
object: "chat.completion.chunk",
|
|
1447
|
-
created,
|
|
1448
|
-
model: modelId,
|
|
1449
|
-
choices: [],
|
|
1450
|
-
usage,
|
|
1451
|
-
},
|
|
1452
|
-
].map((chunk) => `data: ${JSON.stringify(chunk)}\n\n`).join("") + "data: [DONE]\n\n";
|
|
1453
|
-
return new Response(payload, { headers: SSE_HEADERS });
|
|
1454
|
-
}
|
|
1455
|
-
async function handleTitleGenerationRequest(sourceText, accessToken, modelId, stream) {
|
|
1456
|
-
const requestBody = toBinary(NameAgentRequestSchema, create(NameAgentRequestSchema, {
|
|
1457
|
-
userMessage: sourceText,
|
|
1458
|
-
}));
|
|
1459
|
-
const response = await callCursorUnaryRpc({
|
|
1460
|
-
accessToken,
|
|
1461
|
-
rpcPath: "/agent.v1.AgentService/NameAgent",
|
|
1462
|
-
requestBody,
|
|
1463
|
-
timeoutMs: 5_000,
|
|
1464
|
-
});
|
|
1465
|
-
if (response.timedOut) {
|
|
1466
|
-
throw new Error("Cursor title generation timed out");
|
|
1467
|
-
}
|
|
1468
|
-
if (response.exitCode !== 0) {
|
|
1469
|
-
throw new Error(`Cursor title generation failed with HTTP ${response.exitCode}`);
|
|
1470
|
-
}
|
|
1471
|
-
const payload = decodeConnectUnaryBody(response.body) ?? response.body;
|
|
1472
|
-
const decoded = fromBinary(NameAgentResponseSchema, payload);
|
|
1473
|
-
const title = finalizeTitle(decoded.name) || deriveFallbackTitle(sourceText) || "Untitled Session";
|
|
1474
|
-
const usage = { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 };
|
|
1475
|
-
if (stream) {
|
|
1476
|
-
return createBufferedSSETextResponse(modelId, title, usage);
|
|
1477
|
-
}
|
|
1478
|
-
const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
|
|
1479
|
-
const created = Math.floor(Date.now() / 1000);
|
|
1480
|
-
return new Response(JSON.stringify({
|
|
1481
|
-
id: completionId,
|
|
1482
|
-
object: "chat.completion",
|
|
1483
|
-
created,
|
|
1484
|
-
model: modelId,
|
|
1485
|
-
choices: [
|
|
1486
|
-
{
|
|
1487
|
-
index: 0,
|
|
1488
|
-
message: { role: "assistant", content: title },
|
|
1489
|
-
finish_reason: "stop",
|
|
1490
|
-
},
|
|
1491
|
-
],
|
|
1492
|
-
usage,
|
|
1493
|
-
}), { headers: { "Content-Type": "application/json" } });
|
|
1494
|
-
}
|
|
1495
|
-
/** Create an SSE streaming Response that reads from a live bridge. */
|
|
1496
|
-
function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey, metadata) {
|
|
1497
|
-
const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
|
|
1498
|
-
const created = Math.floor(Date.now() / 1000);
|
|
1499
|
-
const stream = new ReadableStream({
|
|
1500
|
-
start(controller) {
|
|
1501
|
-
const encoder = new TextEncoder();
|
|
1502
|
-
let closed = false;
|
|
1503
|
-
const sendSSE = (data) => {
|
|
1504
|
-
if (closed)
|
|
1505
|
-
return;
|
|
1506
|
-
controller.enqueue(encoder.encode(`data: ${JSON.stringify(data)}\n\n`));
|
|
1507
|
-
};
|
|
1508
|
-
const sendDone = () => {
|
|
1509
|
-
if (closed)
|
|
1510
|
-
return;
|
|
1511
|
-
controller.enqueue(encoder.encode("data: [DONE]\n\n"));
|
|
1512
|
-
};
|
|
1513
|
-
const closeController = () => {
|
|
1514
|
-
if (closed)
|
|
1515
|
-
return;
|
|
1516
|
-
closed = true;
|
|
1517
|
-
controller.close();
|
|
1518
|
-
};
|
|
1519
|
-
const makeChunk = (delta, finishReason = null) => ({
|
|
1520
|
-
id: completionId,
|
|
1521
|
-
object: "chat.completion.chunk",
|
|
1522
|
-
created,
|
|
1523
|
-
model: modelId,
|
|
1524
|
-
choices: [{ index: 0, delta, finish_reason: finishReason }],
|
|
1525
|
-
});
|
|
1526
|
-
const makeUsageChunk = () => {
|
|
1527
|
-
const { prompt_tokens, completion_tokens, total_tokens } = computeUsage(state);
|
|
1528
|
-
return {
|
|
1529
|
-
id: completionId,
|
|
1530
|
-
object: "chat.completion.chunk",
|
|
1531
|
-
created,
|
|
1532
|
-
model: modelId,
|
|
1533
|
-
choices: [],
|
|
1534
|
-
usage: { prompt_tokens, completion_tokens, total_tokens },
|
|
1535
|
-
};
|
|
1536
|
-
};
|
|
1537
|
-
const state = {
|
|
1538
|
-
toolCallIndex: 0,
|
|
1539
|
-
pendingExecs: [],
|
|
1540
|
-
outputTokens: 0,
|
|
1541
|
-
totalTokens: 0,
|
|
1542
|
-
};
|
|
1543
|
-
const tagFilter = createThinkingTagFilter();
|
|
1544
|
-
let assistantText = metadata.assistantSeedText ?? "";
|
|
1545
|
-
let mcpExecReceived = false;
|
|
1546
|
-
let endStreamError = null;
|
|
1547
|
-
const processChunk = createConnectFrameParser((messageBytes) => {
|
|
1548
|
-
try {
|
|
1549
|
-
const serverMessage = fromBinary(AgentServerMessageSchema, messageBytes);
|
|
1550
|
-
processServerMessage(serverMessage, blobStore, mcpTools, (data) => bridge.write(data), state, (text, isThinking) => {
|
|
1551
|
-
if (isThinking) {
|
|
1552
|
-
sendSSE(makeChunk({ reasoning_content: text }));
|
|
1553
|
-
}
|
|
1554
|
-
else {
|
|
1555
|
-
const { content, reasoning } = tagFilter.process(text);
|
|
1556
|
-
if (reasoning)
|
|
1557
|
-
sendSSE(makeChunk({ reasoning_content: reasoning }));
|
|
1558
|
-
if (content) {
|
|
1559
|
-
assistantText += content;
|
|
1560
|
-
sendSSE(makeChunk({ content }));
|
|
1561
|
-
}
|
|
1562
|
-
}
|
|
1563
|
-
},
|
|
1564
|
-
// onMcpExec — the model wants to execute a tool.
|
|
1565
|
-
(exec) => {
|
|
1566
|
-
state.pendingExecs.push(exec);
|
|
1567
|
-
mcpExecReceived = true;
|
|
1568
|
-
const flushed = tagFilter.flush();
|
|
1569
|
-
if (flushed.reasoning)
|
|
1570
|
-
sendSSE(makeChunk({ reasoning_content: flushed.reasoning }));
|
|
1571
|
-
if (flushed.content) {
|
|
1572
|
-
assistantText += flushed.content;
|
|
1573
|
-
sendSSE(makeChunk({ content: flushed.content }));
|
|
1574
|
-
}
|
|
1575
|
-
const assistantSeedText = [
|
|
1576
|
-
assistantText.trim(),
|
|
1577
|
-
formatToolCallSummary({
|
|
1578
|
-
id: exec.toolCallId,
|
|
1579
|
-
type: "function",
|
|
1580
|
-
function: {
|
|
1581
|
-
name: exec.toolName,
|
|
1582
|
-
arguments: exec.decodedArgs,
|
|
1583
|
-
},
|
|
1584
|
-
}),
|
|
1585
|
-
].filter(Boolean).join("\n\n");
|
|
1586
|
-
const toolCallIndex = state.toolCallIndex++;
|
|
1587
|
-
sendSSE(makeChunk({
|
|
1588
|
-
tool_calls: [{
|
|
1589
|
-
index: toolCallIndex,
|
|
1590
|
-
id: exec.toolCallId,
|
|
1591
|
-
type: "function",
|
|
1592
|
-
function: {
|
|
1593
|
-
name: exec.toolName,
|
|
1594
|
-
arguments: exec.decodedArgs,
|
|
1595
|
-
},
|
|
1596
|
-
}],
|
|
1597
|
-
}));
|
|
1598
|
-
// Keep the bridge alive for tool result continuation.
|
|
1599
|
-
activeBridges.set(bridgeKey, {
|
|
1600
|
-
bridge,
|
|
1601
|
-
heartbeatTimer,
|
|
1602
|
-
blobStore,
|
|
1603
|
-
mcpTools,
|
|
1604
|
-
pendingExecs: state.pendingExecs,
|
|
1605
|
-
modelId,
|
|
1606
|
-
metadata: {
|
|
1607
|
-
...metadata,
|
|
1608
|
-
assistantSeedText,
|
|
1609
|
-
},
|
|
1610
|
-
});
|
|
1611
|
-
sendSSE(makeChunk({}, "tool_calls"));
|
|
1612
|
-
sendDone();
|
|
1613
|
-
closeController();
|
|
1614
|
-
}, (checkpointBytes) => {
|
|
1615
|
-
const stored = conversationStates.get(convKey);
|
|
1616
|
-
if (stored) {
|
|
1617
|
-
stored.checkpoint = checkpointBytes;
|
|
1618
|
-
stored.lastAccessMs = Date.now();
|
|
1619
|
-
}
|
|
1620
|
-
});
|
|
1621
|
-
}
|
|
1622
|
-
catch {
|
|
1623
|
-
// Skip unparseable messages
|
|
1624
|
-
}
|
|
1625
|
-
}, (endStreamBytes) => {
|
|
1626
|
-
endStreamError = parseConnectEndStream(endStreamBytes);
|
|
1627
|
-
if (endStreamError) {
|
|
1628
|
-
logPluginError("Cursor stream returned Connect end-stream error", {
|
|
1629
|
-
modelId,
|
|
1630
|
-
bridgeKey,
|
|
1631
|
-
convKey,
|
|
1632
|
-
...errorDetails(endStreamError),
|
|
1633
|
-
});
|
|
1634
|
-
}
|
|
1635
|
-
scheduleBridgeEnd(bridge);
|
|
1636
|
-
});
|
|
1637
|
-
bridge.onData(processChunk);
|
|
1638
|
-
bridge.onClose((code) => {
|
|
1639
|
-
clearInterval(heartbeatTimer);
|
|
1640
|
-
const stored = conversationStates.get(convKey);
|
|
1641
|
-
if (stored) {
|
|
1642
|
-
for (const [k, v] of blobStore)
|
|
1643
|
-
stored.blobStore.set(k, v);
|
|
1644
|
-
stored.lastAccessMs = Date.now();
|
|
1645
|
-
}
|
|
1646
|
-
if (endStreamError) {
|
|
1647
|
-
activeBridges.delete(bridgeKey);
|
|
1648
|
-
if (!closed) {
|
|
1649
|
-
closed = true;
|
|
1650
|
-
controller.error(endStreamError);
|
|
1651
|
-
}
|
|
1652
|
-
return;
|
|
1653
|
-
}
|
|
1654
|
-
if (!mcpExecReceived) {
|
|
1655
|
-
const flushed = tagFilter.flush();
|
|
1656
|
-
if (flushed.reasoning)
|
|
1657
|
-
sendSSE(makeChunk({ reasoning_content: flushed.reasoning }));
|
|
1658
|
-
if (flushed.content) {
|
|
1659
|
-
assistantText += flushed.content;
|
|
1660
|
-
sendSSE(makeChunk({ content: flushed.content }));
|
|
1661
|
-
}
|
|
1662
|
-
updateStoredConversationAfterCompletion(convKey, metadata, assistantText);
|
|
1663
|
-
sendSSE(makeChunk({}, "stop"));
|
|
1664
|
-
sendSSE(makeUsageChunk());
|
|
1665
|
-
sendDone();
|
|
1666
|
-
closeController();
|
|
1667
|
-
}
|
|
1668
|
-
else {
|
|
1669
|
-
activeBridges.delete(bridgeKey);
|
|
1670
|
-
if (code !== 0 && !closed) {
|
|
1671
|
-
// Bridge died while tool calls are pending (timeout, crash, etc.).
|
|
1672
|
-
// Close the SSE stream so the client doesn't hang forever.
|
|
1673
|
-
sendSSE(makeChunk({ content: "\n[Error: bridge connection lost]" }));
|
|
1674
|
-
sendSSE(makeChunk({}, "stop"));
|
|
1675
|
-
sendSSE(makeUsageChunk());
|
|
1676
|
-
sendDone();
|
|
1677
|
-
closeController();
|
|
1678
|
-
}
|
|
1679
|
-
}
|
|
1680
|
-
});
|
|
1681
|
-
},
|
|
1682
|
-
});
|
|
1683
|
-
return new Response(stream, { headers: SSE_HEADERS });
|
|
1684
|
-
}
|
|
1685
|
-
/** Start a Cursor RunSSE session, send the initial request, and start heartbeats. */
|
|
1686
|
-
async function startBridge(accessToken, requestBytes) {
|
|
1687
|
-
const requestId = crypto.randomUUID();
|
|
1688
|
-
const bridge = await createCursorSession({
|
|
1689
|
-
accessToken,
|
|
1690
|
-
requestId,
|
|
1691
|
-
});
|
|
1692
|
-
bridge.write(requestBytes);
|
|
1693
|
-
const heartbeatTimer = setInterval(() => bridge.write(makeHeartbeatBytes()), 5_000);
|
|
1694
|
-
return { bridge, heartbeatTimer };
|
|
1695
|
-
}
|
|
1696
|
-
async function handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey, metadata) {
|
|
1697
|
-
const { bridge, heartbeatTimer } = await startBridge(accessToken, payload.requestBytes);
|
|
1698
|
-
return createBridgeStreamResponse(bridge, heartbeatTimer, payload.blobStore, payload.mcpTools, modelId, bridgeKey, convKey, metadata);
|
|
1699
|
-
}
|
|
1700
|
-
/** Resume a paused bridge by sending MCP results and continuing to stream. */
|
|
1701
|
-
function handleToolResultResume(active, toolResults, bridgeKey, convKey) {
|
|
1702
|
-
const { bridge, heartbeatTimer, blobStore, mcpTools, pendingExecs, modelId, metadata } = active;
|
|
1703
|
-
const resumeMetadata = {
|
|
1704
|
-
...metadata,
|
|
1705
|
-
assistantSeedText: [
|
|
1706
|
-
metadata.assistantSeedText?.trim() ?? "",
|
|
1707
|
-
toolResults.map(formatToolResultSummary).join("\n\n"),
|
|
1708
|
-
].filter(Boolean).join("\n\n"),
|
|
1709
|
-
};
|
|
1710
|
-
// Send mcpResult for each pending exec that has a matching tool result
|
|
1711
|
-
for (const exec of pendingExecs) {
|
|
1712
|
-
const result = toolResults.find((r) => r.toolCallId === exec.toolCallId);
|
|
1713
|
-
const mcpResult = result
|
|
1714
|
-
? create(McpResultSchema, {
|
|
1715
|
-
result: {
|
|
1716
|
-
case: "success",
|
|
1717
|
-
value: create(McpSuccessSchema, {
|
|
1718
|
-
content: [
|
|
1719
|
-
create(McpToolResultContentItemSchema, {
|
|
1720
|
-
content: {
|
|
1721
|
-
case: "text",
|
|
1722
|
-
value: create(McpTextContentSchema, { text: result.content }),
|
|
1723
|
-
},
|
|
1724
|
-
}),
|
|
1725
|
-
],
|
|
1726
|
-
isError: false,
|
|
1727
|
-
}),
|
|
1728
|
-
},
|
|
1729
|
-
})
|
|
1730
|
-
: create(McpResultSchema, {
|
|
1731
|
-
result: {
|
|
1732
|
-
case: "error",
|
|
1733
|
-
value: create(McpErrorSchema, { error: "Tool result not provided" }),
|
|
1734
|
-
},
|
|
1735
|
-
});
|
|
1736
|
-
const execClientMessage = create(ExecClientMessageSchema, {
|
|
1737
|
-
id: exec.execMsgId,
|
|
1738
|
-
execId: exec.execId,
|
|
1739
|
-
message: {
|
|
1740
|
-
case: "mcpResult",
|
|
1741
|
-
value: mcpResult,
|
|
1742
|
-
},
|
|
1743
|
-
});
|
|
1744
|
-
const clientMessage = create(AgentClientMessageSchema, {
|
|
1745
|
-
message: { case: "execClientMessage", value: execClientMessage },
|
|
1746
|
-
});
|
|
1747
|
-
bridge.write(toBinary(AgentClientMessageSchema, clientMessage));
|
|
1748
|
-
}
|
|
1749
|
-
return createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey, resumeMetadata);
|
|
1750
|
-
}
|
|
1751
|
-
async function handleNonStreamingResponse(payload, accessToken, modelId, convKey, metadata) {
|
|
1752
|
-
const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
|
|
1753
|
-
const created = Math.floor(Date.now() / 1000);
|
|
1754
|
-
const { text, usage, finishReason, toolCalls } = await collectFullResponse(payload, accessToken, modelId, convKey, metadata);
|
|
1755
|
-
const message = finishReason === "tool_calls"
|
|
1756
|
-
? { role: "assistant", content: null, tool_calls: toolCalls }
|
|
1757
|
-
: { role: "assistant", content: text };
|
|
1758
|
-
return new Response(JSON.stringify({
|
|
1759
|
-
id: completionId,
|
|
1760
|
-
object: "chat.completion",
|
|
1761
|
-
created,
|
|
1762
|
-
model: modelId,
|
|
1763
|
-
choices: [
|
|
1764
|
-
{
|
|
1765
|
-
index: 0,
|
|
1766
|
-
message,
|
|
1767
|
-
finish_reason: finishReason,
|
|
1768
|
-
},
|
|
1769
|
-
],
|
|
1770
|
-
usage,
|
|
1771
|
-
}), { headers: { "Content-Type": "application/json" } });
|
|
1772
|
-
}
|
|
1773
|
-
async function collectFullResponse(payload, accessToken, modelId, convKey, metadata) {
|
|
1774
|
-
const { promise, resolve, reject } = Promise.withResolvers();
|
|
1775
|
-
let fullText = "";
|
|
1776
|
-
let endStreamError = null;
|
|
1777
|
-
const pendingToolCalls = [];
|
|
1778
|
-
const { bridge, heartbeatTimer } = await startBridge(accessToken, payload.requestBytes);
|
|
1779
|
-
const state = {
|
|
1780
|
-
toolCallIndex: 0,
|
|
1781
|
-
pendingExecs: [],
|
|
1782
|
-
outputTokens: 0,
|
|
1783
|
-
totalTokens: 0,
|
|
1784
|
-
};
|
|
1785
|
-
const tagFilter = createThinkingTagFilter();
|
|
1786
|
-
bridge.onData(createConnectFrameParser((messageBytes) => {
|
|
1787
|
-
try {
|
|
1788
|
-
const serverMessage = fromBinary(AgentServerMessageSchema, messageBytes);
|
|
1789
|
-
processServerMessage(serverMessage, payload.blobStore, payload.mcpTools, (data) => bridge.write(data), state, (text, isThinking) => {
|
|
1790
|
-
if (isThinking)
|
|
1791
|
-
return;
|
|
1792
|
-
const { content } = tagFilter.process(text);
|
|
1793
|
-
fullText += content;
|
|
1794
|
-
}, (exec) => {
|
|
1795
|
-
pendingToolCalls.push({
|
|
1796
|
-
id: exec.toolCallId,
|
|
1797
|
-
type: "function",
|
|
1798
|
-
function: {
|
|
1799
|
-
name: exec.toolName,
|
|
1800
|
-
arguments: exec.decodedArgs,
|
|
1801
|
-
},
|
|
1802
|
-
});
|
|
1803
|
-
scheduleBridgeEnd(bridge);
|
|
1804
|
-
}, (checkpointBytes) => {
|
|
1805
|
-
const stored = conversationStates.get(convKey);
|
|
1806
|
-
if (stored) {
|
|
1807
|
-
stored.checkpoint = checkpointBytes;
|
|
1808
|
-
stored.lastAccessMs = Date.now();
|
|
1809
|
-
}
|
|
1810
|
-
});
|
|
1811
|
-
}
|
|
1812
|
-
catch {
|
|
1813
|
-
// Skip
|
|
1814
|
-
}
|
|
1815
|
-
}, (endStreamBytes) => {
|
|
1816
|
-
endStreamError = parseConnectEndStream(endStreamBytes);
|
|
1817
|
-
if (endStreamError) {
|
|
1818
|
-
logPluginError("Cursor non-streaming response returned Connect end-stream error", {
|
|
1819
|
-
modelId,
|
|
1820
|
-
convKey,
|
|
1821
|
-
...errorDetails(endStreamError),
|
|
1822
|
-
});
|
|
1823
|
-
}
|
|
1824
|
-
scheduleBridgeEnd(bridge);
|
|
1825
|
-
}));
|
|
1826
|
-
bridge.onClose(() => {
|
|
1827
|
-
clearInterval(heartbeatTimer);
|
|
1828
|
-
const stored = conversationStates.get(convKey);
|
|
1829
|
-
if (stored) {
|
|
1830
|
-
for (const [k, v] of payload.blobStore)
|
|
1831
|
-
stored.blobStore.set(k, v);
|
|
1832
|
-
stored.lastAccessMs = Date.now();
|
|
1833
|
-
}
|
|
1834
|
-
const flushed = tagFilter.flush();
|
|
1835
|
-
fullText += flushed.content;
|
|
1836
|
-
if (endStreamError) {
|
|
1837
|
-
reject(endStreamError);
|
|
1838
|
-
return;
|
|
1839
|
-
}
|
|
1840
|
-
if (pendingToolCalls.length === 0) {
|
|
1841
|
-
updateStoredConversationAfterCompletion(convKey, metadata, fullText);
|
|
1842
|
-
}
|
|
1843
|
-
const usage = computeUsage(state);
|
|
1844
|
-
resolve({
|
|
1845
|
-
text: fullText,
|
|
1846
|
-
usage,
|
|
1847
|
-
finishReason: pendingToolCalls.length > 0 ? "tool_calls" : "stop",
|
|
1848
|
-
toolCalls: pendingToolCalls,
|
|
1849
|
-
});
|
|
1850
|
-
});
|
|
1851
|
-
return promise;
|
|
1852
|
-
}
|
|
1
|
+
export { getProxyPort, startProxy, stopProxy } from "./proxy/index";
|
|
2
|
+
export { callCursorUnaryRpc } from "./cursor";
|