@playwo/opencode-cursor-oauth 0.0.9 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +19 -91
- package/dist/auth.js +26 -1
- package/dist/index.js +128 -51
- package/dist/logger.d.ts +6 -0
- package/dist/logger.js +142 -0
- package/dist/models.d.ts +3 -0
- package/dist/models.js +79 -31
- package/dist/proxy.d.ts +1 -0
- package/dist/proxy.js +739 -108
- package/package.json +2 -2
package/dist/proxy.js
CHANGED
|
@@ -14,11 +14,15 @@
|
|
|
14
14
|
*/
|
|
15
15
|
import { create, fromBinary, fromJson, toBinary, toJson } from "@bufbuild/protobuf";
|
|
16
16
|
import { ValueSchema } from "@bufbuild/protobuf/wkt";
|
|
17
|
-
import { AgentClientMessageSchema, AgentRunRequestSchema, AgentServerMessageSchema, BidiRequestIdSchema, ClientHeartbeatSchema, ConversationActionSchema, ConversationStateStructureSchema, ConversationStepSchema, AgentConversationTurnStructureSchema, ConversationTurnStructureSchema, AssistantMessageSchema, BackgroundShellSpawnResultSchema, DeleteResultSchema, DeleteRejectedSchema, DiagnosticsResultSchema, ExecClientMessageSchema, FetchErrorSchema, FetchResultSchema, GetBlobResultSchema, GrepErrorSchema, GrepResultSchema, KvClientMessageSchema, LsRejectedSchema, LsResultSchema, McpErrorSchema, McpResultSchema, McpSuccessSchema, McpTextContentSchema, McpToolDefinitionSchema, McpToolResultContentItemSchema, ModelDetailsSchema, ReadRejectedSchema, ReadResultSchema, RequestContextResultSchema, RequestContextSchema, RequestContextSuccessSchema, SetBlobResultSchema, ShellRejectedSchema, ShellResultSchema, UserMessageActionSchema, UserMessageSchema, WriteRejectedSchema, WriteResultSchema, WriteShellStdinErrorSchema, WriteShellStdinResultSchema, } from "./proto/agent_pb";
|
|
17
|
+
import { AgentClientMessageSchema, AgentRunRequestSchema, AgentServerMessageSchema, BidiRequestIdSchema, ClientHeartbeatSchema, ConversationActionSchema, ConversationStateStructureSchema, ConversationStepSchema, AgentConversationTurnStructureSchema, ConversationTurnStructureSchema, AssistantMessageSchema, BackgroundShellSpawnResultSchema, DeleteResultSchema, DeleteRejectedSchema, DiagnosticsResultSchema, ExecClientMessageSchema, FetchErrorSchema, FetchResultSchema, GetBlobResultSchema, GrepErrorSchema, GrepResultSchema, KvClientMessageSchema, LsRejectedSchema, LsResultSchema, McpErrorSchema, McpResultSchema, McpSuccessSchema, McpTextContentSchema, McpToolDefinitionSchema, McpToolResultContentItemSchema, ModelDetailsSchema, NameAgentRequestSchema, NameAgentResponseSchema, ReadRejectedSchema, ReadResultSchema, RequestContextResultSchema, RequestContextSchema, RequestContextSuccessSchema, SetBlobResultSchema, ShellRejectedSchema, ShellResultSchema, UserMessageActionSchema, UserMessageSchema, WriteRejectedSchema, WriteResultSchema, WriteShellStdinErrorSchema, WriteShellStdinResultSchema, } from "./proto/agent_pb";
|
|
18
18
|
import { createHash } from "node:crypto";
|
|
19
|
+
import { connect as connectHttp2 } from "node:http2";
|
|
20
|
+
import { errorDetails, logPluginError, logPluginWarn } from "./logger";
|
|
19
21
|
const CURSOR_API_URL = process.env.CURSOR_API_URL ?? "https://api2.cursor.sh";
|
|
20
22
|
const CURSOR_CLIENT_VERSION = "cli-2026.01.09-231024f";
|
|
23
|
+
const CURSOR_CONNECT_PROTOCOL_VERSION = "1";
|
|
21
24
|
const CONNECT_END_STREAM_FLAG = 0b00000010;
|
|
25
|
+
const OPENCODE_TITLE_REQUEST_MARKER = "Generate a title for this conversation:";
|
|
22
26
|
const SSE_HEADERS = {
|
|
23
27
|
"Content-Type": "text/event-stream",
|
|
24
28
|
"Cache-Control": "no-cache",
|
|
@@ -38,6 +42,31 @@ function evictStaleConversations() {
|
|
|
38
42
|
}
|
|
39
43
|
}
|
|
40
44
|
}
|
|
45
|
+
function normalizeAgentKey(agentKey) {
|
|
46
|
+
const trimmed = agentKey?.trim();
|
|
47
|
+
return trimmed ? trimmed : "default";
|
|
48
|
+
}
|
|
49
|
+
function hashString(value) {
|
|
50
|
+
return createHash("sha256").update(value).digest("hex");
|
|
51
|
+
}
|
|
52
|
+
function createStoredConversation() {
|
|
53
|
+
return {
|
|
54
|
+
conversationId: crypto.randomUUID(),
|
|
55
|
+
checkpoint: null,
|
|
56
|
+
blobStore: new Map(),
|
|
57
|
+
lastAccessMs: Date.now(),
|
|
58
|
+
systemPromptHash: "",
|
|
59
|
+
completedTurnsFingerprint: "",
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
function resetStoredConversation(stored) {
|
|
63
|
+
stored.conversationId = crypto.randomUUID();
|
|
64
|
+
stored.checkpoint = null;
|
|
65
|
+
stored.blobStore = new Map();
|
|
66
|
+
stored.lastAccessMs = Date.now();
|
|
67
|
+
stored.systemPromptHash = "";
|
|
68
|
+
stored.completedTurnsFingerprint = "";
|
|
69
|
+
}
|
|
41
70
|
/** Connect protocol frame: [1-byte flags][4-byte BE length][payload] */
|
|
42
71
|
function frameConnectMessage(data, flags = 0) {
|
|
43
72
|
const frame = Buffer.alloc(5 + data.length);
|
|
@@ -46,19 +75,40 @@ function frameConnectMessage(data, flags = 0) {
|
|
|
46
75
|
frame.set(data, 5);
|
|
47
76
|
return frame;
|
|
48
77
|
}
|
|
78
|
+
function decodeConnectUnaryBody(payload) {
|
|
79
|
+
if (payload.length < 5)
|
|
80
|
+
return null;
|
|
81
|
+
let offset = 0;
|
|
82
|
+
while (offset + 5 <= payload.length) {
|
|
83
|
+
const flags = payload[offset];
|
|
84
|
+
const view = new DataView(payload.buffer, payload.byteOffset + offset, payload.byteLength - offset);
|
|
85
|
+
const messageLength = view.getUint32(1, false);
|
|
86
|
+
const frameEnd = offset + 5 + messageLength;
|
|
87
|
+
if (frameEnd > payload.length)
|
|
88
|
+
return null;
|
|
89
|
+
if ((flags & 0b0000_0001) !== 0)
|
|
90
|
+
return null;
|
|
91
|
+
if ((flags & CONNECT_END_STREAM_FLAG) === 0) {
|
|
92
|
+
return payload.subarray(offset + 5, frameEnd);
|
|
93
|
+
}
|
|
94
|
+
offset = frameEnd;
|
|
95
|
+
}
|
|
96
|
+
return null;
|
|
97
|
+
}
|
|
49
98
|
function buildCursorHeaders(options, contentType, extra = {}) {
|
|
50
|
-
const headers = new Headers(
|
|
99
|
+
const headers = new Headers(buildCursorHeaderValues(options, contentType, extra));
|
|
100
|
+
return headers;
|
|
101
|
+
}
|
|
102
|
+
function buildCursorHeaderValues(options, contentType, extra = {}) {
|
|
103
|
+
return {
|
|
51
104
|
authorization: `Bearer ${options.accessToken}`,
|
|
52
105
|
"content-type": contentType,
|
|
53
106
|
"x-ghost-mode": "true",
|
|
54
107
|
"x-cursor-client-version": CURSOR_CLIENT_VERSION,
|
|
55
108
|
"x-cursor-client-type": "cli",
|
|
56
109
|
"x-request-id": crypto.randomUUID(),
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
headers.set(key, value);
|
|
60
|
-
}
|
|
61
|
-
return headers;
|
|
110
|
+
...extra,
|
|
111
|
+
};
|
|
62
112
|
}
|
|
63
113
|
function encodeVarint(value) {
|
|
64
114
|
if (!Number.isSafeInteger(value) || value < 0) {
|
|
@@ -130,6 +180,11 @@ async function createCursorSession(options) {
|
|
|
130
180
|
});
|
|
131
181
|
if (!response.ok || !response.body) {
|
|
132
182
|
const errorBody = await response.text().catch(() => "");
|
|
183
|
+
logPluginError("Cursor RunSSE request failed", {
|
|
184
|
+
requestId: options.requestId,
|
|
185
|
+
status: response.status,
|
|
186
|
+
responseBody: errorBody,
|
|
187
|
+
});
|
|
133
188
|
throw new Error(`RunSSE failed: ${response.status}${errorBody ? ` ${errorBody}` : ""}`);
|
|
134
189
|
}
|
|
135
190
|
const cbs = {
|
|
@@ -160,6 +215,12 @@ async function createCursorSession(options) {
|
|
|
160
215
|
});
|
|
161
216
|
if (!appendResponse.ok) {
|
|
162
217
|
const errorBody = await appendResponse.text().catch(() => "");
|
|
218
|
+
logPluginError("Cursor BidiAppend request failed", {
|
|
219
|
+
requestId: options.requestId,
|
|
220
|
+
appendSeqno: appendSeqno - 1,
|
|
221
|
+
status: appendResponse.status,
|
|
222
|
+
responseBody: errorBody,
|
|
223
|
+
});
|
|
163
224
|
throw new Error(`BidiAppend failed: ${appendResponse.status}${errorBody ? ` ${errorBody}` : ""}`);
|
|
164
225
|
}
|
|
165
226
|
await appendResponse.arrayBuffer().catch(() => undefined);
|
|
@@ -183,7 +244,11 @@ async function createCursorSession(options) {
|
|
|
183
244
|
}
|
|
184
245
|
}
|
|
185
246
|
}
|
|
186
|
-
catch {
|
|
247
|
+
catch (error) {
|
|
248
|
+
logPluginWarn("Cursor stream reader closed with error", {
|
|
249
|
+
requestId: options.requestId,
|
|
250
|
+
...errorDetails(error),
|
|
251
|
+
});
|
|
187
252
|
finish(alive ? 1 : closeCode);
|
|
188
253
|
}
|
|
189
254
|
})();
|
|
@@ -196,7 +261,11 @@ async function createCursorSession(options) {
|
|
|
196
261
|
return;
|
|
197
262
|
writeChain = writeChain
|
|
198
263
|
.then(() => append(data))
|
|
199
|
-
.catch(() => {
|
|
264
|
+
.catch((error) => {
|
|
265
|
+
logPluginError("Cursor stream append failed", {
|
|
266
|
+
requestId: options.requestId,
|
|
267
|
+
...errorDetails(error),
|
|
268
|
+
});
|
|
200
269
|
try {
|
|
201
270
|
abortController.abort();
|
|
202
271
|
}
|
|
@@ -236,6 +305,17 @@ async function createCursorSession(options) {
|
|
|
236
305
|
};
|
|
237
306
|
}
|
|
238
307
|
export async function callCursorUnaryRpc(options) {
|
|
308
|
+
const target = new URL(options.rpcPath, options.url ?? CURSOR_API_URL);
|
|
309
|
+
const transport = options.transport ?? "auto";
|
|
310
|
+
if (transport === "http2" || (transport === "auto" && target.protocol === "https:")) {
|
|
311
|
+
const http2Result = await callCursorUnaryRpcOverHttp2(options, target);
|
|
312
|
+
if (transport === "http2" || http2Result.timedOut || http2Result.exitCode !== 1) {
|
|
313
|
+
return http2Result;
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
return callCursorUnaryRpcOverFetch(options, target);
|
|
317
|
+
}
|
|
318
|
+
async function callCursorUnaryRpcOverFetch(options, target) {
|
|
239
319
|
let timedOut = false;
|
|
240
320
|
const timeoutMs = options.timeoutMs ?? 5_000;
|
|
241
321
|
const controller = new AbortController();
|
|
@@ -246,9 +326,13 @@ export async function callCursorUnaryRpc(options) {
|
|
|
246
326
|
}, timeoutMs)
|
|
247
327
|
: undefined;
|
|
248
328
|
try {
|
|
249
|
-
const response = await fetch(
|
|
329
|
+
const response = await fetch(target, {
|
|
250
330
|
method: "POST",
|
|
251
|
-
headers: buildCursorHeaders(options, "application/proto"
|
|
331
|
+
headers: buildCursorHeaders(options, "application/proto", {
|
|
332
|
+
accept: "application/proto, application/json",
|
|
333
|
+
"connect-protocol-version": CURSOR_CONNECT_PROTOCOL_VERSION,
|
|
334
|
+
"connect-timeout-ms": String(timeoutMs),
|
|
335
|
+
}),
|
|
252
336
|
body: toFetchBody(options.requestBody),
|
|
253
337
|
signal: controller.signal,
|
|
254
338
|
});
|
|
@@ -260,6 +344,12 @@ export async function callCursorUnaryRpc(options) {
|
|
|
260
344
|
};
|
|
261
345
|
}
|
|
262
346
|
catch {
|
|
347
|
+
logPluginError("Cursor unary fetch transport failed", {
|
|
348
|
+
rpcPath: options.rpcPath,
|
|
349
|
+
url: target.toString(),
|
|
350
|
+
timeoutMs,
|
|
351
|
+
timedOut,
|
|
352
|
+
});
|
|
263
353
|
return {
|
|
264
354
|
body: new Uint8Array(),
|
|
265
355
|
exitCode: timedOut ? 124 : 1,
|
|
@@ -271,6 +361,121 @@ export async function callCursorUnaryRpc(options) {
|
|
|
271
361
|
clearTimeout(timeout);
|
|
272
362
|
}
|
|
273
363
|
}
|
|
364
|
+
async function callCursorUnaryRpcOverHttp2(options, target) {
|
|
365
|
+
const timeoutMs = options.timeoutMs ?? 5_000;
|
|
366
|
+
const authority = `${target.protocol}//${target.host}`;
|
|
367
|
+
return new Promise((resolve) => {
|
|
368
|
+
let settled = false;
|
|
369
|
+
let timedOut = false;
|
|
370
|
+
let session;
|
|
371
|
+
let stream;
|
|
372
|
+
const finish = (result) => {
|
|
373
|
+
if (settled)
|
|
374
|
+
return;
|
|
375
|
+
settled = true;
|
|
376
|
+
if (timeout)
|
|
377
|
+
clearTimeout(timeout);
|
|
378
|
+
try {
|
|
379
|
+
stream?.close();
|
|
380
|
+
}
|
|
381
|
+
catch { }
|
|
382
|
+
try {
|
|
383
|
+
session?.close();
|
|
384
|
+
}
|
|
385
|
+
catch { }
|
|
386
|
+
resolve(result);
|
|
387
|
+
};
|
|
388
|
+
const timeout = timeoutMs > 0
|
|
389
|
+
? setTimeout(() => {
|
|
390
|
+
timedOut = true;
|
|
391
|
+
finish({
|
|
392
|
+
body: new Uint8Array(),
|
|
393
|
+
exitCode: 124,
|
|
394
|
+
timedOut: true,
|
|
395
|
+
});
|
|
396
|
+
}, timeoutMs)
|
|
397
|
+
: undefined;
|
|
398
|
+
try {
|
|
399
|
+
session = connectHttp2(authority);
|
|
400
|
+
session.once("error", (error) => {
|
|
401
|
+
logPluginError("Cursor unary HTTP/2 session failed", {
|
|
402
|
+
rpcPath: options.rpcPath,
|
|
403
|
+
url: target.toString(),
|
|
404
|
+
timedOut,
|
|
405
|
+
...errorDetails(error),
|
|
406
|
+
});
|
|
407
|
+
finish({
|
|
408
|
+
body: new Uint8Array(),
|
|
409
|
+
exitCode: timedOut ? 124 : 1,
|
|
410
|
+
timedOut,
|
|
411
|
+
});
|
|
412
|
+
});
|
|
413
|
+
const headers = {
|
|
414
|
+
":method": "POST",
|
|
415
|
+
":path": `${target.pathname}${target.search}`,
|
|
416
|
+
...buildCursorHeaderValues(options, "application/proto", {
|
|
417
|
+
accept: "application/proto, application/json",
|
|
418
|
+
"connect-protocol-version": CURSOR_CONNECT_PROTOCOL_VERSION,
|
|
419
|
+
"connect-timeout-ms": String(timeoutMs),
|
|
420
|
+
}),
|
|
421
|
+
};
|
|
422
|
+
stream = session.request(headers);
|
|
423
|
+
let statusCode = 0;
|
|
424
|
+
const chunks = [];
|
|
425
|
+
stream.once("response", (responseHeaders) => {
|
|
426
|
+
const statusHeader = responseHeaders[":status"];
|
|
427
|
+
statusCode = typeof statusHeader === "number"
|
|
428
|
+
? statusHeader
|
|
429
|
+
: Number(statusHeader ?? 0);
|
|
430
|
+
});
|
|
431
|
+
stream.on("data", (chunk) => {
|
|
432
|
+
chunks.push(Buffer.from(chunk));
|
|
433
|
+
});
|
|
434
|
+
stream.once("end", () => {
|
|
435
|
+
const body = new Uint8Array(Buffer.concat(chunks));
|
|
436
|
+
finish({
|
|
437
|
+
body,
|
|
438
|
+
exitCode: statusCode >= 200 && statusCode < 300 ? 0 : (statusCode || 1),
|
|
439
|
+
timedOut,
|
|
440
|
+
});
|
|
441
|
+
});
|
|
442
|
+
stream.once("error", (error) => {
|
|
443
|
+
logPluginError("Cursor unary HTTP/2 stream failed", {
|
|
444
|
+
rpcPath: options.rpcPath,
|
|
445
|
+
url: target.toString(),
|
|
446
|
+
timedOut,
|
|
447
|
+
...errorDetails(error),
|
|
448
|
+
});
|
|
449
|
+
finish({
|
|
450
|
+
body: new Uint8Array(),
|
|
451
|
+
exitCode: timedOut ? 124 : 1,
|
|
452
|
+
timedOut,
|
|
453
|
+
});
|
|
454
|
+
});
|
|
455
|
+
// Bun's node:http2 client currently breaks on end(Buffer.alloc(0)) against
|
|
456
|
+
// Cursor's HTTPS endpoint, but a header-only end() succeeds for empty unary bodies.
|
|
457
|
+
if (options.requestBody.length > 0) {
|
|
458
|
+
stream.end(Buffer.from(options.requestBody));
|
|
459
|
+
}
|
|
460
|
+
else {
|
|
461
|
+
stream.end();
|
|
462
|
+
}
|
|
463
|
+
}
|
|
464
|
+
catch (error) {
|
|
465
|
+
logPluginError("Cursor unary HTTP/2 setup failed", {
|
|
466
|
+
rpcPath: options.rpcPath,
|
|
467
|
+
url: target.toString(),
|
|
468
|
+
timedOut,
|
|
469
|
+
...errorDetails(error),
|
|
470
|
+
});
|
|
471
|
+
finish({
|
|
472
|
+
body: new Uint8Array(),
|
|
473
|
+
exitCode: timedOut ? 124 : 1,
|
|
474
|
+
timedOut,
|
|
475
|
+
});
|
|
476
|
+
}
|
|
477
|
+
});
|
|
478
|
+
}
|
|
274
479
|
let proxyServer;
|
|
275
480
|
let proxyPort;
|
|
276
481
|
let proxyAccessTokenProvider;
|
|
@@ -312,10 +517,19 @@ export async function startProxy(getAccessToken, models = []) {
|
|
|
312
517
|
throw new Error("Cursor proxy access token provider not configured");
|
|
313
518
|
}
|
|
314
519
|
const accessToken = await proxyAccessTokenProvider();
|
|
315
|
-
|
|
520
|
+
const sessionId = req.headers.get("x-opencode-session-id")
|
|
521
|
+
?? req.headers.get("x-session-id")
|
|
522
|
+
?? undefined;
|
|
523
|
+
const agentKey = req.headers.get("x-opencode-agent") ?? undefined;
|
|
524
|
+
return handleChatCompletion(body, accessToken, { sessionId, agentKey });
|
|
316
525
|
}
|
|
317
526
|
catch (err) {
|
|
318
527
|
const message = err instanceof Error ? err.message : String(err);
|
|
528
|
+
logPluginError("Cursor proxy request failed", {
|
|
529
|
+
path: url.pathname,
|
|
530
|
+
method: req.method,
|
|
531
|
+
...errorDetails(err),
|
|
532
|
+
});
|
|
319
533
|
return new Response(JSON.stringify({
|
|
320
534
|
error: { message, type: "server_error", code: "internal_error" },
|
|
321
535
|
}), { status: 500, headers: { "Content-Type": "application/json" } });
|
|
@@ -345,10 +559,26 @@ export function stopProxy() {
|
|
|
345
559
|
activeBridges.clear();
|
|
346
560
|
conversationStates.clear();
|
|
347
561
|
}
|
|
348
|
-
function handleChatCompletion(body, accessToken) {
|
|
349
|
-
const
|
|
562
|
+
function handleChatCompletion(body, accessToken, context = {}) {
|
|
563
|
+
const parsed = parseMessages(body.messages);
|
|
564
|
+
const { systemPrompt, userText, turns, toolResults, pendingAssistantSummary, completedTurnsFingerprint, } = parsed;
|
|
350
565
|
const modelId = body.model;
|
|
351
|
-
const
|
|
566
|
+
const normalizedAgentKey = normalizeAgentKey(context.agentKey);
|
|
567
|
+
const titleDetection = detectTitleRequest(body);
|
|
568
|
+
const isTitleAgent = titleDetection.matched;
|
|
569
|
+
if (isTitleAgent) {
|
|
570
|
+
const titleSourceText = buildTitleSourceText(userText, turns, pendingAssistantSummary, toolResults);
|
|
571
|
+
if (!titleSourceText) {
|
|
572
|
+
return new Response(JSON.stringify({
|
|
573
|
+
error: {
|
|
574
|
+
message: "No title source text found",
|
|
575
|
+
type: "invalid_request_error",
|
|
576
|
+
},
|
|
577
|
+
}), { status: 400, headers: { "Content-Type": "application/json" } });
|
|
578
|
+
}
|
|
579
|
+
return handleTitleGenerationRequest(titleSourceText, accessToken, modelId, body.stream !== false);
|
|
580
|
+
}
|
|
581
|
+
const tools = selectToolsForChoice(body.tools ?? [], body.tool_choice);
|
|
352
582
|
if (!userText && toolResults.length === 0) {
|
|
353
583
|
return new Response(JSON.stringify({
|
|
354
584
|
error: {
|
|
@@ -357,16 +587,24 @@ function handleChatCompletion(body, accessToken) {
|
|
|
357
587
|
},
|
|
358
588
|
}), { status: 400, headers: { "Content-Type": "application/json" } });
|
|
359
589
|
}
|
|
360
|
-
// bridgeKey:
|
|
590
|
+
// bridgeKey: session/agent-scoped, for active tool-call bridges
|
|
361
591
|
// convKey: model-independent, for conversation state that survives model switches
|
|
362
|
-
const bridgeKey = deriveBridgeKey(modelId, body.messages);
|
|
363
|
-
const convKey = deriveConversationKey(body.messages);
|
|
592
|
+
const bridgeKey = deriveBridgeKey(modelId, body.messages, context.sessionId, context.agentKey);
|
|
593
|
+
const convKey = deriveConversationKey(body.messages, context.sessionId, context.agentKey);
|
|
364
594
|
const activeBridge = activeBridges.get(bridgeKey);
|
|
365
595
|
if (activeBridge && toolResults.length > 0) {
|
|
366
596
|
activeBridges.delete(bridgeKey);
|
|
367
597
|
if (activeBridge.bridge.alive) {
|
|
598
|
+
if (activeBridge.modelId !== modelId) {
|
|
599
|
+
logPluginWarn("Resuming pending Cursor tool call on original model after model switch", {
|
|
600
|
+
requestedModelId: modelId,
|
|
601
|
+
resumedModelId: activeBridge.modelId,
|
|
602
|
+
convKey,
|
|
603
|
+
bridgeKey,
|
|
604
|
+
});
|
|
605
|
+
}
|
|
368
606
|
// Resume the live bridge with tool results
|
|
369
|
-
return handleToolResultResume(activeBridge, toolResults,
|
|
607
|
+
return handleToolResultResume(activeBridge, toolResults, bridgeKey, convKey);
|
|
370
608
|
}
|
|
371
609
|
// Bridge died (timeout, server disconnect, etc.).
|
|
372
610
|
// Clean up and fall through to start a fresh bridge.
|
|
@@ -381,28 +619,49 @@ function handleChatCompletion(body, accessToken) {
|
|
|
381
619
|
}
|
|
382
620
|
let stored = conversationStates.get(convKey);
|
|
383
621
|
if (!stored) {
|
|
384
|
-
stored =
|
|
385
|
-
conversationId: deterministicConversationId(convKey),
|
|
386
|
-
checkpoint: null,
|
|
387
|
-
blobStore: new Map(),
|
|
388
|
-
lastAccessMs: Date.now(),
|
|
389
|
-
};
|
|
622
|
+
stored = createStoredConversation();
|
|
390
623
|
conversationStates.set(convKey, stored);
|
|
391
624
|
}
|
|
625
|
+
const systemPromptHash = hashString(systemPrompt);
|
|
626
|
+
if (stored.checkpoint
|
|
627
|
+
&& (stored.systemPromptHash !== systemPromptHash
|
|
628
|
+
|| (turns.length > 0 && stored.completedTurnsFingerprint !== completedTurnsFingerprint))) {
|
|
629
|
+
resetStoredConversation(stored);
|
|
630
|
+
}
|
|
631
|
+
stored.systemPromptHash = systemPromptHash;
|
|
632
|
+
stored.completedTurnsFingerprint = completedTurnsFingerprint;
|
|
392
633
|
stored.lastAccessMs = Date.now();
|
|
393
634
|
evictStaleConversations();
|
|
394
635
|
// Build the request. When tool results are present but the bridge died,
|
|
395
636
|
// we must still include the last user text so Cursor has context.
|
|
396
637
|
const mcpTools = buildMcpToolDefinitions(tools);
|
|
397
|
-
const
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
638
|
+
const needsInitialHandoff = !stored.checkpoint && (turns.length > 0 || pendingAssistantSummary || toolResults.length > 0);
|
|
639
|
+
const replayTurns = needsInitialHandoff ? [] : turns;
|
|
640
|
+
let effectiveUserText = needsInitialHandoff
|
|
641
|
+
? buildInitialHandoffPrompt(userText, turns, pendingAssistantSummary, toolResults)
|
|
642
|
+
: toolResults.length > 0
|
|
643
|
+
? buildToolResumePrompt(userText, pendingAssistantSummary, toolResults)
|
|
644
|
+
: userText;
|
|
645
|
+
const payload = buildCursorRequest(modelId, systemPrompt, effectiveUserText, replayTurns, stored.conversationId, stored.checkpoint, stored.blobStore);
|
|
401
646
|
payload.mcpTools = mcpTools;
|
|
402
647
|
if (body.stream === false) {
|
|
403
|
-
return handleNonStreamingResponse(payload, accessToken, modelId, convKey
|
|
648
|
+
return handleNonStreamingResponse(payload, accessToken, modelId, convKey, {
|
|
649
|
+
systemPrompt,
|
|
650
|
+
systemPromptHash,
|
|
651
|
+
completedTurnsFingerprint,
|
|
652
|
+
turns,
|
|
653
|
+
userText,
|
|
654
|
+
agentKey: normalizedAgentKey,
|
|
655
|
+
});
|
|
404
656
|
}
|
|
405
|
-
return handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey
|
|
657
|
+
return handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey, {
|
|
658
|
+
systemPrompt,
|
|
659
|
+
systemPromptHash,
|
|
660
|
+
completedTurnsFingerprint,
|
|
661
|
+
turns,
|
|
662
|
+
userText,
|
|
663
|
+
agentKey: normalizedAgentKey,
|
|
664
|
+
});
|
|
406
665
|
}
|
|
407
666
|
/** Normalize OpenAI message content to a plain string. */
|
|
408
667
|
function textContent(content) {
|
|
@@ -417,8 +676,6 @@ function textContent(content) {
|
|
|
417
676
|
}
|
|
418
677
|
function parseMessages(messages) {
|
|
419
678
|
let systemPrompt = "You are a helpful assistant.";
|
|
420
|
-
const pairs = [];
|
|
421
|
-
const toolResults = [];
|
|
422
679
|
// Collect system messages
|
|
423
680
|
const systemParts = messages
|
|
424
681
|
.filter((m) => m.role === "system")
|
|
@@ -426,40 +683,212 @@ function parseMessages(messages) {
|
|
|
426
683
|
if (systemParts.length > 0) {
|
|
427
684
|
systemPrompt = systemParts.join("\n");
|
|
428
685
|
}
|
|
429
|
-
// Separate tool results from conversation turns
|
|
430
686
|
const nonSystem = messages.filter((m) => m.role !== "system");
|
|
431
|
-
|
|
687
|
+
const parsedTurns = [];
|
|
688
|
+
let currentTurn;
|
|
432
689
|
for (const msg of nonSystem) {
|
|
433
|
-
if (msg.role === "
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
690
|
+
if (msg.role === "user") {
|
|
691
|
+
if (currentTurn)
|
|
692
|
+
parsedTurns.push(currentTurn);
|
|
693
|
+
currentTurn = {
|
|
694
|
+
userText: textContent(msg.content),
|
|
695
|
+
segments: [],
|
|
696
|
+
};
|
|
697
|
+
continue;
|
|
438
698
|
}
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
pairs.push({ userText: pendingUser, assistantText: "" });
|
|
442
|
-
}
|
|
443
|
-
pendingUser = textContent(msg.content);
|
|
699
|
+
if (!currentTurn) {
|
|
700
|
+
currentTurn = { userText: "", segments: [] };
|
|
444
701
|
}
|
|
445
|
-
|
|
446
|
-
// Skip assistant messages that are just tool_calls with no text
|
|
702
|
+
if (msg.role === "assistant") {
|
|
447
703
|
const text = textContent(msg.content);
|
|
448
|
-
if (
|
|
449
|
-
|
|
450
|
-
|
|
704
|
+
if (text) {
|
|
705
|
+
currentTurn.segments.push({ kind: "assistantText", text });
|
|
706
|
+
}
|
|
707
|
+
if (msg.tool_calls?.length) {
|
|
708
|
+
currentTurn.segments.push({
|
|
709
|
+
kind: "assistantToolCalls",
|
|
710
|
+
toolCalls: msg.tool_calls,
|
|
711
|
+
});
|
|
451
712
|
}
|
|
713
|
+
continue;
|
|
714
|
+
}
|
|
715
|
+
if (msg.role === "tool") {
|
|
716
|
+
currentTurn.segments.push({
|
|
717
|
+
kind: "toolResult",
|
|
718
|
+
result: {
|
|
719
|
+
toolCallId: msg.tool_call_id ?? "",
|
|
720
|
+
content: textContent(msg.content),
|
|
721
|
+
},
|
|
722
|
+
});
|
|
723
|
+
}
|
|
724
|
+
}
|
|
725
|
+
if (currentTurn)
|
|
726
|
+
parsedTurns.push(currentTurn);
|
|
727
|
+
let userText = "";
|
|
728
|
+
let toolResults = [];
|
|
729
|
+
let pendingAssistantSummary = "";
|
|
730
|
+
let completedTurnStates = parsedTurns;
|
|
731
|
+
const lastTurn = parsedTurns.at(-1);
|
|
732
|
+
if (lastTurn) {
|
|
733
|
+
const trailingSegments = splitTrailingToolResults(lastTurn.segments);
|
|
734
|
+
const hasAssistantSummary = trailingSegments.base.length > 0;
|
|
735
|
+
if (trailingSegments.trailing.length > 0 && hasAssistantSummary) {
|
|
736
|
+
completedTurnStates = parsedTurns.slice(0, -1);
|
|
737
|
+
userText = lastTurn.userText;
|
|
738
|
+
toolResults = trailingSegments.trailing.map((segment) => segment.result);
|
|
739
|
+
pendingAssistantSummary = summarizeTurnSegments(trailingSegments.base);
|
|
740
|
+
}
|
|
741
|
+
else if (lastTurn.userText && lastTurn.segments.length === 0) {
|
|
742
|
+
completedTurnStates = parsedTurns.slice(0, -1);
|
|
743
|
+
userText = lastTurn.userText;
|
|
744
|
+
}
|
|
745
|
+
}
|
|
746
|
+
const turns = completedTurnStates
|
|
747
|
+
.map((turn) => ({
|
|
748
|
+
userText: turn.userText,
|
|
749
|
+
assistantText: summarizeTurnSegments(turn.segments),
|
|
750
|
+
}))
|
|
751
|
+
.filter((turn) => turn.userText || turn.assistantText);
|
|
752
|
+
return {
|
|
753
|
+
systemPrompt,
|
|
754
|
+
userText,
|
|
755
|
+
turns,
|
|
756
|
+
toolResults,
|
|
757
|
+
pendingAssistantSummary,
|
|
758
|
+
completedTurnsFingerprint: buildCompletedTurnsFingerprint(systemPrompt, turns),
|
|
759
|
+
};
|
|
760
|
+
}
|
|
761
|
+
function splitTrailingToolResults(segments) {
|
|
762
|
+
let index = segments.length;
|
|
763
|
+
while (index > 0 && segments[index - 1]?.kind === "toolResult") {
|
|
764
|
+
index -= 1;
|
|
765
|
+
}
|
|
766
|
+
return {
|
|
767
|
+
base: segments.slice(0, index),
|
|
768
|
+
trailing: segments.slice(index).filter((segment) => segment.kind === "toolResult"),
|
|
769
|
+
};
|
|
770
|
+
}
|
|
771
|
+
function summarizeTurnSegments(segments) {
|
|
772
|
+
const parts = [];
|
|
773
|
+
for (const segment of segments) {
|
|
774
|
+
if (segment.kind === "assistantText") {
|
|
775
|
+
const trimmed = segment.text.trim();
|
|
776
|
+
if (trimmed)
|
|
777
|
+
parts.push(trimmed);
|
|
778
|
+
continue;
|
|
779
|
+
}
|
|
780
|
+
if (segment.kind === "assistantToolCalls") {
|
|
781
|
+
const summary = segment.toolCalls.map(formatToolCallSummary).join("\n\n");
|
|
782
|
+
if (summary)
|
|
783
|
+
parts.push(summary);
|
|
784
|
+
continue;
|
|
452
785
|
}
|
|
786
|
+
parts.push(formatToolResultSummary(segment.result));
|
|
787
|
+
}
|
|
788
|
+
return parts.join("\n\n").trim();
|
|
789
|
+
}
|
|
790
|
+
function formatToolCallSummary(call) {
|
|
791
|
+
const args = call.function.arguments?.trim();
|
|
792
|
+
return args
|
|
793
|
+
? `[assistant requested tool ${call.function.name} id=${call.id}]\n${args}`
|
|
794
|
+
: `[assistant requested tool ${call.function.name} id=${call.id}]`;
|
|
795
|
+
}
|
|
796
|
+
function formatToolResultSummary(result) {
|
|
797
|
+
const label = result.toolCallId
|
|
798
|
+
? `[tool result id=${result.toolCallId}]`
|
|
799
|
+
: "[tool result]";
|
|
800
|
+
const content = result.content.trim();
|
|
801
|
+
return content ? `${label}\n${content}` : label;
|
|
802
|
+
}
|
|
803
|
+
function buildCompletedTurnsFingerprint(systemPrompt, turns) {
|
|
804
|
+
return hashString(JSON.stringify({ systemPrompt, turns }));
|
|
805
|
+
}
|
|
806
|
+
function buildToolResumePrompt(userText, pendingAssistantSummary, toolResults) {
|
|
807
|
+
const parts = [userText.trim()];
|
|
808
|
+
if (pendingAssistantSummary.trim()) {
|
|
809
|
+
parts.push(`[previous assistant tool activity]\n${pendingAssistantSummary.trim()}`);
|
|
810
|
+
}
|
|
811
|
+
if (toolResults.length > 0) {
|
|
812
|
+
parts.push(toolResults.map(formatToolResultSummary).join("\n\n"));
|
|
813
|
+
}
|
|
814
|
+
return parts.filter(Boolean).join("\n\n");
|
|
815
|
+
}
|
|
816
|
+
function buildInitialHandoffPrompt(userText, turns, pendingAssistantSummary, toolResults) {
|
|
817
|
+
const transcript = turns.map((turn, index) => {
|
|
818
|
+
const sections = [`Turn ${index + 1}`];
|
|
819
|
+
if (turn.userText.trim())
|
|
820
|
+
sections.push(`User: ${turn.userText.trim()}`);
|
|
821
|
+
if (turn.assistantText.trim())
|
|
822
|
+
sections.push(`Assistant: ${turn.assistantText.trim()}`);
|
|
823
|
+
return sections.join("\n");
|
|
824
|
+
});
|
|
825
|
+
const inProgress = buildToolResumePrompt("", pendingAssistantSummary, toolResults).trim();
|
|
826
|
+
const history = [
|
|
827
|
+
...transcript,
|
|
828
|
+
...(inProgress ? [`In-progress turn\n${inProgress}`] : []),
|
|
829
|
+
].join("\n\n").trim();
|
|
830
|
+
if (!history)
|
|
831
|
+
return userText;
|
|
832
|
+
return [
|
|
833
|
+
"[OpenCode session handoff]",
|
|
834
|
+
"You are continuing an existing session that previously ran on another provider/model.",
|
|
835
|
+
"Treat the transcript below as prior conversation history before answering the latest user message.",
|
|
836
|
+
"",
|
|
837
|
+
"<previous-session-transcript>",
|
|
838
|
+
history,
|
|
839
|
+
"</previous-session-transcript>",
|
|
840
|
+
"",
|
|
841
|
+
"Latest user message:",
|
|
842
|
+
userText.trim(),
|
|
843
|
+
].filter(Boolean).join("\n");
|
|
844
|
+
}
|
|
845
|
+
function buildTitleSourceText(userText, turns, pendingAssistantSummary, toolResults) {
|
|
846
|
+
const history = turns
|
|
847
|
+
.map((turn) => [
|
|
848
|
+
isTitleRequestMarker(turn.userText) ? "" : turn.userText.trim(),
|
|
849
|
+
turn.assistantText.trim(),
|
|
850
|
+
].filter(Boolean).join("\n"))
|
|
851
|
+
.filter(Boolean);
|
|
852
|
+
if (pendingAssistantSummary.trim()) {
|
|
853
|
+
history.push(pendingAssistantSummary.trim());
|
|
854
|
+
}
|
|
855
|
+
if (toolResults.length > 0) {
|
|
856
|
+
history.push(toolResults.map(formatToolResultSummary).join("\n\n"));
|
|
857
|
+
}
|
|
858
|
+
if (userText.trim() && !isTitleRequestMarker(userText)) {
|
|
859
|
+
history.push(userText.trim());
|
|
860
|
+
}
|
|
861
|
+
return history.join("\n\n").trim();
|
|
862
|
+
}
|
|
863
|
+
function detectTitleRequest(body) {
|
|
864
|
+
if ((body.tools?.length ?? 0) > 0) {
|
|
865
|
+
return { matched: false, reason: "tools-present" };
|
|
866
|
+
}
|
|
867
|
+
const firstNonSystem = body.messages.find((message) => message.role !== "system");
|
|
868
|
+
if (firstNonSystem?.role === "user" && isTitleRequestMarker(textContent(firstNonSystem.content))) {
|
|
869
|
+
return { matched: true, reason: "opencode-title-marker" };
|
|
453
870
|
}
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
871
|
+
return { matched: false, reason: "no-title-marker" };
|
|
872
|
+
}
|
|
873
|
+
function isTitleRequestMarker(text) {
|
|
874
|
+
return text.trim() === OPENCODE_TITLE_REQUEST_MARKER;
|
|
875
|
+
}
|
|
876
|
+
function selectToolsForChoice(tools, toolChoice) {
|
|
877
|
+
if (!tools.length)
|
|
878
|
+
return [];
|
|
879
|
+
if (toolChoice === undefined || toolChoice === null || toolChoice === "auto" || toolChoice === "required") {
|
|
880
|
+
return tools;
|
|
881
|
+
}
|
|
882
|
+
if (toolChoice === "none") {
|
|
883
|
+
return [];
|
|
457
884
|
}
|
|
458
|
-
|
|
459
|
-
const
|
|
460
|
-
|
|
885
|
+
if (typeof toolChoice === "object") {
|
|
886
|
+
const choice = toolChoice;
|
|
887
|
+
if (choice.type === "function" && typeof choice.function?.name === "string") {
|
|
888
|
+
return tools.filter((tool) => tool.function.name === choice.function.name);
|
|
889
|
+
}
|
|
461
890
|
}
|
|
462
|
-
return
|
|
891
|
+
return tools;
|
|
463
892
|
}
|
|
464
893
|
/** Convert OpenAI tool definitions to Cursor's MCP tool protobuf format. */
|
|
465
894
|
function buildMcpToolDefinitions(tools) {
|
|
@@ -602,6 +1031,12 @@ function makeHeartbeatBytes() {
|
|
|
602
1031
|
});
|
|
603
1032
|
return toBinary(AgentClientMessageSchema, heartbeat);
|
|
604
1033
|
}
|
|
1034
|
+
function scheduleBridgeEnd(bridge) {
|
|
1035
|
+
queueMicrotask(() => {
|
|
1036
|
+
if (bridge.alive)
|
|
1037
|
+
bridge.end();
|
|
1038
|
+
});
|
|
1039
|
+
}
|
|
605
1040
|
/**
|
|
606
1041
|
* Create a stateful parser for Connect protocol frames.
|
|
607
1042
|
* Handles buffering partial data across chunks.
|
|
@@ -744,6 +1179,12 @@ function handleKvMessage(kvMsg, blobStore, sendFrame) {
|
|
|
744
1179
|
const blobId = kvMsg.message.value.blobId;
|
|
745
1180
|
const blobIdKey = Buffer.from(blobId).toString("hex");
|
|
746
1181
|
const blobData = blobStore.get(blobIdKey);
|
|
1182
|
+
if (!blobData) {
|
|
1183
|
+
logPluginWarn("Cursor requested missing blob", {
|
|
1184
|
+
blobId: blobIdKey,
|
|
1185
|
+
knownBlobCount: blobStore.size,
|
|
1186
|
+
});
|
|
1187
|
+
}
|
|
747
1188
|
sendKvResponse(kvMsg, "getBlobResult", create(GetBlobResultSchema, blobData ? { blobData } : {}), sendFrame);
|
|
748
1189
|
}
|
|
749
1190
|
else if (kvCase === "setBlobArgs") {
|
|
@@ -908,42 +1349,151 @@ function sendExecResult(execMsg, messageCase, value, sendFrame) {
|
|
|
908
1349
|
});
|
|
909
1350
|
sendFrame(toBinary(AgentClientMessageSchema, clientMessage));
|
|
910
1351
|
}
|
|
911
|
-
/** Derive a key for active bridge lookup (tool-call continuations).
|
|
912
|
-
function deriveBridgeKey(modelId, messages) {
|
|
1352
|
+
/** Derive a key for active bridge lookup (tool-call continuations). */
|
|
1353
|
+
function deriveBridgeKey(modelId, messages, sessionId, agentKey) {
|
|
1354
|
+
if (sessionId) {
|
|
1355
|
+
const normalizedAgent = normalizeAgentKey(agentKey);
|
|
1356
|
+
return createHash("sha256")
|
|
1357
|
+
.update(`bridge:${sessionId}:${normalizedAgent}`)
|
|
1358
|
+
.digest("hex")
|
|
1359
|
+
.slice(0, 16);
|
|
1360
|
+
}
|
|
913
1361
|
const firstUserMsg = messages.find((m) => m.role === "user");
|
|
914
1362
|
const firstUserText = firstUserMsg ? textContent(firstUserMsg.content) : "";
|
|
1363
|
+
const normalizedAgent = normalizeAgentKey(agentKey);
|
|
915
1364
|
return createHash("sha256")
|
|
916
|
-
.update(`bridge:${modelId}:${firstUserText.slice(0, 200)}`)
|
|
1365
|
+
.update(`bridge:${normalizedAgent}:${modelId}:${firstUserText.slice(0, 200)}`)
|
|
917
1366
|
.digest("hex")
|
|
918
1367
|
.slice(0, 16);
|
|
919
1368
|
}
|
|
920
1369
|
/** Derive a key for conversation state. Model-independent so context survives model switches. */
|
|
921
|
-
function deriveConversationKey(messages) {
|
|
922
|
-
|
|
923
|
-
|
|
1370
|
+
function deriveConversationKey(messages, sessionId, agentKey) {
|
|
1371
|
+
if (sessionId) {
|
|
1372
|
+
const normalizedAgent = normalizeAgentKey(agentKey);
|
|
1373
|
+
return createHash("sha256")
|
|
1374
|
+
.update(`session:${sessionId}:${normalizedAgent}`)
|
|
1375
|
+
.digest("hex")
|
|
1376
|
+
.slice(0, 16);
|
|
1377
|
+
}
|
|
924
1378
|
return createHash("sha256")
|
|
925
|
-
.update(
|
|
1379
|
+
.update(`${normalizeAgentKey(agentKey)}:${buildConversationFingerprint(messages)}`)
|
|
926
1380
|
.digest("hex")
|
|
927
1381
|
.slice(0, 16);
|
|
928
1382
|
}
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
1383
|
+
function buildConversationFingerprint(messages) {
|
|
1384
|
+
return messages.map((message) => {
|
|
1385
|
+
const toolCallIDs = (message.tool_calls ?? []).map((call) => call.id).join(",");
|
|
1386
|
+
return `${message.role}:${textContent(message.content)}:${message.tool_call_id ?? ""}:${toolCallIDs}`;
|
|
1387
|
+
}).join("\n---\n");
|
|
1388
|
+
}
|
|
1389
|
+
function updateStoredConversationAfterCompletion(convKey, metadata, assistantText) {
|
|
1390
|
+
const stored = conversationStates.get(convKey);
|
|
1391
|
+
if (!stored)
|
|
1392
|
+
return;
|
|
1393
|
+
const nextTurns = metadata.userText
|
|
1394
|
+
? [...metadata.turns, { userText: metadata.userText, assistantText: assistantText.trim() }]
|
|
1395
|
+
: metadata.turns;
|
|
1396
|
+
stored.systemPromptHash = metadata.systemPromptHash;
|
|
1397
|
+
stored.completedTurnsFingerprint = buildCompletedTurnsFingerprint(metadata.systemPrompt, nextTurns);
|
|
1398
|
+
stored.lastAccessMs = Date.now();
|
|
1399
|
+
}
|
|
1400
|
+
function deriveFallbackTitle(text) {
|
|
1401
|
+
const cleaned = text
|
|
1402
|
+
.replace(/<[^>]+>/g, " ")
|
|
1403
|
+
.replace(/\[[^\]]+\]/g, " ")
|
|
1404
|
+
.replace(/[^\p{L}\p{N}'’\-\s]+/gu, " ")
|
|
1405
|
+
.replace(/\s+/g, " ")
|
|
1406
|
+
.trim();
|
|
1407
|
+
if (!cleaned)
|
|
1408
|
+
return "";
|
|
1409
|
+
const words = cleaned.split(" ").filter(Boolean).slice(0, 6);
|
|
1410
|
+
return finalizeTitle(words.map(titleCaseWord).join(" "));
|
|
1411
|
+
}
|
|
1412
|
+
function titleCaseWord(word) {
|
|
1413
|
+
if (!word)
|
|
1414
|
+
return word;
|
|
1415
|
+
return word[0].toUpperCase() + word.slice(1);
|
|
1416
|
+
}
|
|
1417
|
+
function finalizeTitle(value) {
|
|
1418
|
+
return value
|
|
1419
|
+
.replace(/^#{1,6}\s*/, "")
|
|
1420
|
+
.replace(/[.!?,:;]+$/g, "")
|
|
1421
|
+
.replace(/\s+/g, " ")
|
|
1422
|
+
.trim()
|
|
1423
|
+
.slice(0, 80)
|
|
1424
|
+
.trim();
|
|
1425
|
+
}
|
|
1426
|
+
function createBufferedSSETextResponse(modelId, text, usage) {
|
|
1427
|
+
const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
|
|
1428
|
+
const created = Math.floor(Date.now() / 1000);
|
|
1429
|
+
const payload = [
|
|
1430
|
+
{
|
|
1431
|
+
id: completionId,
|
|
1432
|
+
object: "chat.completion.chunk",
|
|
1433
|
+
created,
|
|
1434
|
+
model: modelId,
|
|
1435
|
+
choices: [{ index: 0, delta: { content: text }, finish_reason: null }],
|
|
1436
|
+
},
|
|
1437
|
+
{
|
|
1438
|
+
id: completionId,
|
|
1439
|
+
object: "chat.completion.chunk",
|
|
1440
|
+
created,
|
|
1441
|
+
model: modelId,
|
|
1442
|
+
choices: [{ index: 0, delta: {}, finish_reason: "stop" }],
|
|
1443
|
+
},
|
|
1444
|
+
{
|
|
1445
|
+
id: completionId,
|
|
1446
|
+
object: "chat.completion.chunk",
|
|
1447
|
+
created,
|
|
1448
|
+
model: modelId,
|
|
1449
|
+
choices: [],
|
|
1450
|
+
usage,
|
|
1451
|
+
},
|
|
1452
|
+
].map((chunk) => `data: ${JSON.stringify(chunk)}\n\n`).join("") + "data: [DONE]\n\n";
|
|
1453
|
+
return new Response(payload, { headers: SSE_HEADERS });
|
|
1454
|
+
}
|
|
1455
|
+
async function handleTitleGenerationRequest(sourceText, accessToken, modelId, stream) {
|
|
1456
|
+
const requestBody = toBinary(NameAgentRequestSchema, create(NameAgentRequestSchema, {
|
|
1457
|
+
userMessage: sourceText,
|
|
1458
|
+
}));
|
|
1459
|
+
const response = await callCursorUnaryRpc({
|
|
1460
|
+
accessToken,
|
|
1461
|
+
rpcPath: "/agent.v1.AgentService/NameAgent",
|
|
1462
|
+
requestBody,
|
|
1463
|
+
timeoutMs: 5_000,
|
|
1464
|
+
});
|
|
1465
|
+
if (response.timedOut) {
|
|
1466
|
+
throw new Error("Cursor title generation timed out");
|
|
1467
|
+
}
|
|
1468
|
+
if (response.exitCode !== 0) {
|
|
1469
|
+
throw new Error(`Cursor title generation failed with HTTP ${response.exitCode}`);
|
|
1470
|
+
}
|
|
1471
|
+
const payload = decodeConnectUnaryBody(response.body) ?? response.body;
|
|
1472
|
+
const decoded = fromBinary(NameAgentResponseSchema, payload);
|
|
1473
|
+
const title = finalizeTitle(decoded.name) || deriveFallbackTitle(sourceText) || "Untitled Session";
|
|
1474
|
+
const usage = { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 };
|
|
1475
|
+
if (stream) {
|
|
1476
|
+
return createBufferedSSETextResponse(modelId, title, usage);
|
|
1477
|
+
}
|
|
1478
|
+
const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
|
|
1479
|
+
const created = Math.floor(Date.now() / 1000);
|
|
1480
|
+
return new Response(JSON.stringify({
|
|
1481
|
+
id: completionId,
|
|
1482
|
+
object: "chat.completion",
|
|
1483
|
+
created,
|
|
1484
|
+
model: modelId,
|
|
1485
|
+
choices: [
|
|
1486
|
+
{
|
|
1487
|
+
index: 0,
|
|
1488
|
+
message: { role: "assistant", content: title },
|
|
1489
|
+
finish_reason: "stop",
|
|
1490
|
+
},
|
|
1491
|
+
],
|
|
1492
|
+
usage,
|
|
1493
|
+
}), { headers: { "Content-Type": "application/json" } });
|
|
944
1494
|
}
|
|
945
1495
|
/** Create an SSE streaming Response that reads from a live bridge. */
|
|
946
|
-
function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey) {
|
|
1496
|
+
function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey, metadata) {
|
|
947
1497
|
const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
|
|
948
1498
|
const created = Math.floor(Date.now() / 1000);
|
|
949
1499
|
const stream = new ReadableStream({
|
|
@@ -991,7 +1541,9 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
|
|
|
991
1541
|
totalTokens: 0,
|
|
992
1542
|
};
|
|
993
1543
|
const tagFilter = createThinkingTagFilter();
|
|
1544
|
+
let assistantText = metadata.assistantSeedText ?? "";
|
|
994
1545
|
let mcpExecReceived = false;
|
|
1546
|
+
let endStreamError = null;
|
|
995
1547
|
const processChunk = createConnectFrameParser((messageBytes) => {
|
|
996
1548
|
try {
|
|
997
1549
|
const serverMessage = fromBinary(AgentServerMessageSchema, messageBytes);
|
|
@@ -1003,8 +1555,10 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
|
|
|
1003
1555
|
const { content, reasoning } = tagFilter.process(text);
|
|
1004
1556
|
if (reasoning)
|
|
1005
1557
|
sendSSE(makeChunk({ reasoning_content: reasoning }));
|
|
1006
|
-
if (content)
|
|
1558
|
+
if (content) {
|
|
1559
|
+
assistantText += content;
|
|
1007
1560
|
sendSSE(makeChunk({ content }));
|
|
1561
|
+
}
|
|
1008
1562
|
}
|
|
1009
1563
|
},
|
|
1010
1564
|
// onMcpExec — the model wants to execute a tool.
|
|
@@ -1014,8 +1568,21 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
|
|
|
1014
1568
|
const flushed = tagFilter.flush();
|
|
1015
1569
|
if (flushed.reasoning)
|
|
1016
1570
|
sendSSE(makeChunk({ reasoning_content: flushed.reasoning }));
|
|
1017
|
-
if (flushed.content)
|
|
1571
|
+
if (flushed.content) {
|
|
1572
|
+
assistantText += flushed.content;
|
|
1018
1573
|
sendSSE(makeChunk({ content: flushed.content }));
|
|
1574
|
+
}
|
|
1575
|
+
const assistantSeedText = [
|
|
1576
|
+
assistantText.trim(),
|
|
1577
|
+
formatToolCallSummary({
|
|
1578
|
+
id: exec.toolCallId,
|
|
1579
|
+
type: "function",
|
|
1580
|
+
function: {
|
|
1581
|
+
name: exec.toolName,
|
|
1582
|
+
arguments: exec.decodedArgs,
|
|
1583
|
+
},
|
|
1584
|
+
}),
|
|
1585
|
+
].filter(Boolean).join("\n\n");
|
|
1019
1586
|
const toolCallIndex = state.toolCallIndex++;
|
|
1020
1587
|
sendSSE(makeChunk({
|
|
1021
1588
|
tool_calls: [{
|
|
@@ -1035,6 +1602,11 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
|
|
|
1035
1602
|
blobStore,
|
|
1036
1603
|
mcpTools,
|
|
1037
1604
|
pendingExecs: state.pendingExecs,
|
|
1605
|
+
modelId,
|
|
1606
|
+
metadata: {
|
|
1607
|
+
...metadata,
|
|
1608
|
+
assistantSeedText,
|
|
1609
|
+
},
|
|
1038
1610
|
});
|
|
1039
1611
|
sendSSE(makeChunk({}, "tool_calls"));
|
|
1040
1612
|
sendDone();
|
|
@@ -1051,10 +1623,16 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
|
|
|
1051
1623
|
// Skip unparseable messages
|
|
1052
1624
|
}
|
|
1053
1625
|
}, (endStreamBytes) => {
|
|
1054
|
-
|
|
1055
|
-
if (
|
|
1056
|
-
|
|
1626
|
+
endStreamError = parseConnectEndStream(endStreamBytes);
|
|
1627
|
+
if (endStreamError) {
|
|
1628
|
+
logPluginError("Cursor stream returned Connect end-stream error", {
|
|
1629
|
+
modelId,
|
|
1630
|
+
bridgeKey,
|
|
1631
|
+
convKey,
|
|
1632
|
+
...errorDetails(endStreamError),
|
|
1633
|
+
});
|
|
1057
1634
|
}
|
|
1635
|
+
scheduleBridgeEnd(bridge);
|
|
1058
1636
|
});
|
|
1059
1637
|
bridge.onData(processChunk);
|
|
1060
1638
|
bridge.onClose((code) => {
|
|
@@ -1065,27 +1643,39 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
|
|
|
1065
1643
|
stored.blobStore.set(k, v);
|
|
1066
1644
|
stored.lastAccessMs = Date.now();
|
|
1067
1645
|
}
|
|
1646
|
+
if (endStreamError) {
|
|
1647
|
+
activeBridges.delete(bridgeKey);
|
|
1648
|
+
if (!closed) {
|
|
1649
|
+
closed = true;
|
|
1650
|
+
controller.error(endStreamError);
|
|
1651
|
+
}
|
|
1652
|
+
return;
|
|
1653
|
+
}
|
|
1068
1654
|
if (!mcpExecReceived) {
|
|
1069
1655
|
const flushed = tagFilter.flush();
|
|
1070
1656
|
if (flushed.reasoning)
|
|
1071
1657
|
sendSSE(makeChunk({ reasoning_content: flushed.reasoning }));
|
|
1072
|
-
if (flushed.content)
|
|
1658
|
+
if (flushed.content) {
|
|
1659
|
+
assistantText += flushed.content;
|
|
1073
1660
|
sendSSE(makeChunk({ content: flushed.content }));
|
|
1661
|
+
}
|
|
1662
|
+
updateStoredConversationAfterCompletion(convKey, metadata, assistantText);
|
|
1074
1663
|
sendSSE(makeChunk({}, "stop"));
|
|
1075
1664
|
sendSSE(makeUsageChunk());
|
|
1076
1665
|
sendDone();
|
|
1077
1666
|
closeController();
|
|
1078
1667
|
}
|
|
1079
|
-
else
|
|
1080
|
-
// Bridge died while tool calls are pending (timeout, crash, etc.).
|
|
1081
|
-
// Close the SSE stream so the client doesn't hang forever.
|
|
1082
|
-
sendSSE(makeChunk({ content: "\n[Error: bridge connection lost]" }));
|
|
1083
|
-
sendSSE(makeChunk({}, "stop"));
|
|
1084
|
-
sendSSE(makeUsageChunk());
|
|
1085
|
-
sendDone();
|
|
1086
|
-
closeController();
|
|
1087
|
-
// Remove stale entry so the next request doesn't try to resume it.
|
|
1668
|
+
else {
|
|
1088
1669
|
activeBridges.delete(bridgeKey);
|
|
1670
|
+
if (code !== 0 && !closed) {
|
|
1671
|
+
// Bridge died while tool calls are pending (timeout, crash, etc.).
|
|
1672
|
+
// Close the SSE stream so the client doesn't hang forever.
|
|
1673
|
+
sendSSE(makeChunk({ content: "\n[Error: bridge connection lost]" }));
|
|
1674
|
+
sendSSE(makeChunk({}, "stop"));
|
|
1675
|
+
sendSSE(makeUsageChunk());
|
|
1676
|
+
sendDone();
|
|
1677
|
+
closeController();
|
|
1678
|
+
}
|
|
1089
1679
|
}
|
|
1090
1680
|
});
|
|
1091
1681
|
},
|
|
@@ -1103,13 +1693,20 @@ async function startBridge(accessToken, requestBytes) {
|
|
|
1103
1693
|
const heartbeatTimer = setInterval(() => bridge.write(makeHeartbeatBytes()), 5_000);
|
|
1104
1694
|
return { bridge, heartbeatTimer };
|
|
1105
1695
|
}
|
|
1106
|
-
async function handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey) {
|
|
1696
|
+
async function handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey, metadata) {
|
|
1107
1697
|
const { bridge, heartbeatTimer } = await startBridge(accessToken, payload.requestBytes);
|
|
1108
|
-
return createBridgeStreamResponse(bridge, heartbeatTimer, payload.blobStore, payload.mcpTools, modelId, bridgeKey, convKey);
|
|
1698
|
+
return createBridgeStreamResponse(bridge, heartbeatTimer, payload.blobStore, payload.mcpTools, modelId, bridgeKey, convKey, metadata);
|
|
1109
1699
|
}
|
|
1110
1700
|
/** Resume a paused bridge by sending MCP results and continuing to stream. */
|
|
1111
|
-
function handleToolResultResume(active, toolResults,
|
|
1112
|
-
const { bridge, heartbeatTimer, blobStore, mcpTools, pendingExecs } = active;
|
|
1701
|
+
function handleToolResultResume(active, toolResults, bridgeKey, convKey) {
|
|
1702
|
+
const { bridge, heartbeatTimer, blobStore, mcpTools, pendingExecs, modelId, metadata } = active;
|
|
1703
|
+
const resumeMetadata = {
|
|
1704
|
+
...metadata,
|
|
1705
|
+
assistantSeedText: [
|
|
1706
|
+
metadata.assistantSeedText?.trim() ?? "",
|
|
1707
|
+
toolResults.map(formatToolResultSummary).join("\n\n"),
|
|
1708
|
+
].filter(Boolean).join("\n\n"),
|
|
1709
|
+
};
|
|
1113
1710
|
// Send mcpResult for each pending exec that has a matching tool result
|
|
1114
1711
|
for (const exec of pendingExecs) {
|
|
1115
1712
|
const result = toolResults.find((r) => r.toolCallId === exec.toolCallId);
|
|
@@ -1149,12 +1746,15 @@ function handleToolResultResume(active, toolResults, modelId, bridgeKey, convKey
|
|
|
1149
1746
|
});
|
|
1150
1747
|
bridge.write(toBinary(AgentClientMessageSchema, clientMessage));
|
|
1151
1748
|
}
|
|
1152
|
-
return createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey);
|
|
1749
|
+
return createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey, resumeMetadata);
|
|
1153
1750
|
}
|
|
1154
|
-
async function handleNonStreamingResponse(payload, accessToken, modelId, convKey) {
|
|
1751
|
+
async function handleNonStreamingResponse(payload, accessToken, modelId, convKey, metadata) {
|
|
1155
1752
|
const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
|
|
1156
1753
|
const created = Math.floor(Date.now() / 1000);
|
|
1157
|
-
const { text, usage } = await collectFullResponse(payload, accessToken, convKey);
|
|
1754
|
+
const { text, usage, finishReason, toolCalls } = await collectFullResponse(payload, accessToken, modelId, convKey, metadata);
|
|
1755
|
+
const message = finishReason === "tool_calls"
|
|
1756
|
+
? { role: "assistant", content: null, tool_calls: toolCalls }
|
|
1757
|
+
: { role: "assistant", content: text };
|
|
1158
1758
|
return new Response(JSON.stringify({
|
|
1159
1759
|
id: completionId,
|
|
1160
1760
|
object: "chat.completion",
|
|
@@ -1163,16 +1763,18 @@ async function handleNonStreamingResponse(payload, accessToken, modelId, convKey
|
|
|
1163
1763
|
choices: [
|
|
1164
1764
|
{
|
|
1165
1765
|
index: 0,
|
|
1166
|
-
message
|
|
1167
|
-
finish_reason:
|
|
1766
|
+
message,
|
|
1767
|
+
finish_reason: finishReason,
|
|
1168
1768
|
},
|
|
1169
1769
|
],
|
|
1170
1770
|
usage,
|
|
1171
1771
|
}), { headers: { "Content-Type": "application/json" } });
|
|
1172
1772
|
}
|
|
1173
|
-
async function collectFullResponse(payload, accessToken, convKey) {
|
|
1174
|
-
const { promise, resolve } = Promise.withResolvers();
|
|
1773
|
+
async function collectFullResponse(payload, accessToken, modelId, convKey, metadata) {
|
|
1774
|
+
const { promise, resolve, reject } = Promise.withResolvers();
|
|
1175
1775
|
let fullText = "";
|
|
1776
|
+
let endStreamError = null;
|
|
1777
|
+
const pendingToolCalls = [];
|
|
1176
1778
|
const { bridge, heartbeatTimer } = await startBridge(accessToken, payload.requestBytes);
|
|
1177
1779
|
const state = {
|
|
1178
1780
|
toolCallIndex: 0,
|
|
@@ -1189,7 +1791,17 @@ async function collectFullResponse(payload, accessToken, convKey) {
|
|
|
1189
1791
|
return;
|
|
1190
1792
|
const { content } = tagFilter.process(text);
|
|
1191
1793
|
fullText += content;
|
|
1192
|
-
}, () => {
|
|
1794
|
+
}, (exec) => {
|
|
1795
|
+
pendingToolCalls.push({
|
|
1796
|
+
id: exec.toolCallId,
|
|
1797
|
+
type: "function",
|
|
1798
|
+
function: {
|
|
1799
|
+
name: exec.toolName,
|
|
1800
|
+
arguments: exec.decodedArgs,
|
|
1801
|
+
},
|
|
1802
|
+
});
|
|
1803
|
+
scheduleBridgeEnd(bridge);
|
|
1804
|
+
}, (checkpointBytes) => {
|
|
1193
1805
|
const stored = conversationStates.get(convKey);
|
|
1194
1806
|
if (stored) {
|
|
1195
1807
|
stored.checkpoint = checkpointBytes;
|
|
@@ -1200,7 +1812,17 @@ async function collectFullResponse(payload, accessToken, convKey) {
|
|
|
1200
1812
|
catch {
|
|
1201
1813
|
// Skip
|
|
1202
1814
|
}
|
|
1203
|
-
}, () => {
|
|
1815
|
+
}, (endStreamBytes) => {
|
|
1816
|
+
endStreamError = parseConnectEndStream(endStreamBytes);
|
|
1817
|
+
if (endStreamError) {
|
|
1818
|
+
logPluginError("Cursor non-streaming response returned Connect end-stream error", {
|
|
1819
|
+
modelId,
|
|
1820
|
+
convKey,
|
|
1821
|
+
...errorDetails(endStreamError),
|
|
1822
|
+
});
|
|
1823
|
+
}
|
|
1824
|
+
scheduleBridgeEnd(bridge);
|
|
1825
|
+
}));
|
|
1204
1826
|
bridge.onClose(() => {
|
|
1205
1827
|
clearInterval(heartbeatTimer);
|
|
1206
1828
|
const stored = conversationStates.get(convKey);
|
|
@@ -1211,10 +1833,19 @@ async function collectFullResponse(payload, accessToken, convKey) {
|
|
|
1211
1833
|
}
|
|
1212
1834
|
const flushed = tagFilter.flush();
|
|
1213
1835
|
fullText += flushed.content;
|
|
1836
|
+
if (endStreamError) {
|
|
1837
|
+
reject(endStreamError);
|
|
1838
|
+
return;
|
|
1839
|
+
}
|
|
1840
|
+
if (pendingToolCalls.length === 0) {
|
|
1841
|
+
updateStoredConversationAfterCompletion(convKey, metadata, fullText);
|
|
1842
|
+
}
|
|
1214
1843
|
const usage = computeUsage(state);
|
|
1215
1844
|
resolve({
|
|
1216
1845
|
text: fullText,
|
|
1217
1846
|
usage,
|
|
1847
|
+
finishReason: pendingToolCalls.length > 0 ? "tool_calls" : "stop",
|
|
1848
|
+
toolCalls: pendingToolCalls,
|
|
1218
1849
|
});
|
|
1219
1850
|
});
|
|
1220
1851
|
return promise;
|