@playwo/opencode-cursor-oauth 0.0.0-dev.b8e6dd72a8b6 → 0.0.0-dev.c1f285cb4d7e

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/proxy.js CHANGED
@@ -14,9 +14,10 @@
14
14
  */
15
15
  import { create, fromBinary, fromJson, toBinary, toJson } from "@bufbuild/protobuf";
16
16
  import { ValueSchema } from "@bufbuild/protobuf/wkt";
17
- import { AgentClientMessageSchema, AgentRunRequestSchema, AgentServerMessageSchema, BidiRequestIdSchema, ClientHeartbeatSchema, ConversationActionSchema, ConversationStateStructureSchema, ConversationStepSchema, AgentConversationTurnStructureSchema, ConversationTurnStructureSchema, AssistantMessageSchema, BackgroundShellSpawnResultSchema, DeleteResultSchema, DeleteRejectedSchema, DiagnosticsResultSchema, ExecClientMessageSchema, FetchErrorSchema, FetchResultSchema, GetBlobResultSchema, GrepErrorSchema, GrepResultSchema, KvClientMessageSchema, LsRejectedSchema, LsResultSchema, McpErrorSchema, McpResultSchema, McpSuccessSchema, McpTextContentSchema, McpToolDefinitionSchema, McpToolResultContentItemSchema, ModelDetailsSchema, ReadRejectedSchema, ReadResultSchema, RequestContextResultSchema, RequestContextSchema, RequestContextSuccessSchema, SetBlobResultSchema, ShellRejectedSchema, ShellResultSchema, UserMessageActionSchema, UserMessageSchema, WriteRejectedSchema, WriteResultSchema, WriteShellStdinErrorSchema, WriteShellStdinResultSchema, } from "./proto/agent_pb";
17
+ import { AgentClientMessageSchema, AgentRunRequestSchema, AgentServerMessageSchema, BidiRequestIdSchema, ClientHeartbeatSchema, ConversationActionSchema, ConversationStateStructureSchema, ConversationStepSchema, AgentConversationTurnStructureSchema, ConversationTurnStructureSchema, AssistantMessageSchema, BackgroundShellSpawnResultSchema, DeleteResultSchema, DeleteRejectedSchema, DiagnosticsResultSchema, ExecClientMessageSchema, FetchErrorSchema, FetchResultSchema, GetBlobResultSchema, GrepErrorSchema, GrepResultSchema, KvClientMessageSchema, LsRejectedSchema, LsResultSchema, McpErrorSchema, McpResultSchema, McpSuccessSchema, McpTextContentSchema, McpToolDefinitionSchema, McpToolResultContentItemSchema, ModelDetailsSchema, NameAgentRequestSchema, NameAgentResponseSchema, ReadRejectedSchema, ReadResultSchema, RequestContextResultSchema, RequestContextSchema, RequestContextSuccessSchema, SetBlobResultSchema, ShellRejectedSchema, ShellResultSchema, UserMessageActionSchema, UserMessageSchema, WriteRejectedSchema, WriteResultSchema, WriteShellStdinErrorSchema, WriteShellStdinResultSchema, } from "./proto/agent_pb";
18
18
  import { createHash } from "node:crypto";
19
19
  import { connect as connectHttp2 } from "node:http2";
20
+ import { errorDetails, logPluginError, logPluginWarn } from "./logger";
20
21
  const CURSOR_API_URL = process.env.CURSOR_API_URL ?? "https://api2.cursor.sh";
21
22
  const CURSOR_CLIENT_VERSION = "cli-2026.01.09-231024f";
22
23
  const CURSOR_CONNECT_PROTOCOL_VERSION = "1";
@@ -40,6 +41,31 @@ function evictStaleConversations() {
40
41
  }
41
42
  }
42
43
  }
44
+ function normalizeAgentKey(agentKey) {
45
+ const trimmed = agentKey?.trim();
46
+ return trimmed ? trimmed : "default";
47
+ }
48
+ function hashString(value) {
49
+ return createHash("sha256").update(value).digest("hex");
50
+ }
51
+ function createStoredConversation() {
52
+ return {
53
+ conversationId: crypto.randomUUID(),
54
+ checkpoint: null,
55
+ blobStore: new Map(),
56
+ lastAccessMs: Date.now(),
57
+ systemPromptHash: "",
58
+ completedTurnsFingerprint: "",
59
+ };
60
+ }
61
+ function resetStoredConversation(stored) {
62
+ stored.conversationId = crypto.randomUUID();
63
+ stored.checkpoint = null;
64
+ stored.blobStore = new Map();
65
+ stored.lastAccessMs = Date.now();
66
+ stored.systemPromptHash = "";
67
+ stored.completedTurnsFingerprint = "";
68
+ }
43
69
  /** Connect protocol frame: [1-byte flags][4-byte BE length][payload] */
44
70
  function frameConnectMessage(data, flags = 0) {
45
71
  const frame = Buffer.alloc(5 + data.length);
@@ -48,6 +74,26 @@ function frameConnectMessage(data, flags = 0) {
48
74
  frame.set(data, 5);
49
75
  return frame;
50
76
  }
77
+ function decodeConnectUnaryBody(payload) {
78
+ if (payload.length < 5)
79
+ return null;
80
+ let offset = 0;
81
+ while (offset + 5 <= payload.length) {
82
+ const flags = payload[offset];
83
+ const view = new DataView(payload.buffer, payload.byteOffset + offset, payload.byteLength - offset);
84
+ const messageLength = view.getUint32(1, false);
85
+ const frameEnd = offset + 5 + messageLength;
86
+ if (frameEnd > payload.length)
87
+ return null;
88
+ if ((flags & 0b0000_0001) !== 0)
89
+ return null;
90
+ if ((flags & CONNECT_END_STREAM_FLAG) === 0) {
91
+ return payload.subarray(offset + 5, frameEnd);
92
+ }
93
+ offset = frameEnd;
94
+ }
95
+ return null;
96
+ }
51
97
  function buildCursorHeaders(options, contentType, extra = {}) {
52
98
  const headers = new Headers(buildCursorHeaderValues(options, contentType, extra));
53
99
  return headers;
@@ -133,6 +179,11 @@ async function createCursorSession(options) {
133
179
  });
134
180
  if (!response.ok || !response.body) {
135
181
  const errorBody = await response.text().catch(() => "");
182
+ logPluginError("Cursor RunSSE request failed", {
183
+ requestId: options.requestId,
184
+ status: response.status,
185
+ responseBody: errorBody,
186
+ });
136
187
  throw new Error(`RunSSE failed: ${response.status}${errorBody ? ` ${errorBody}` : ""}`);
137
188
  }
138
189
  const cbs = {
@@ -163,6 +214,12 @@ async function createCursorSession(options) {
163
214
  });
164
215
  if (!appendResponse.ok) {
165
216
  const errorBody = await appendResponse.text().catch(() => "");
217
+ logPluginError("Cursor BidiAppend request failed", {
218
+ requestId: options.requestId,
219
+ appendSeqno: appendSeqno - 1,
220
+ status: appendResponse.status,
221
+ responseBody: errorBody,
222
+ });
166
223
  throw new Error(`BidiAppend failed: ${appendResponse.status}${errorBody ? ` ${errorBody}` : ""}`);
167
224
  }
168
225
  await appendResponse.arrayBuffer().catch(() => undefined);
@@ -186,7 +243,11 @@ async function createCursorSession(options) {
186
243
  }
187
244
  }
188
245
  }
189
- catch {
246
+ catch (error) {
247
+ logPluginWarn("Cursor stream reader closed with error", {
248
+ requestId: options.requestId,
249
+ ...errorDetails(error),
250
+ });
190
251
  finish(alive ? 1 : closeCode);
191
252
  }
192
253
  })();
@@ -199,7 +260,11 @@ async function createCursorSession(options) {
199
260
  return;
200
261
  writeChain = writeChain
201
262
  .then(() => append(data))
202
- .catch(() => {
263
+ .catch((error) => {
264
+ logPluginError("Cursor stream append failed", {
265
+ requestId: options.requestId,
266
+ ...errorDetails(error),
267
+ });
203
268
  try {
204
269
  abortController.abort();
205
270
  }
@@ -278,6 +343,12 @@ async function callCursorUnaryRpcOverFetch(options, target) {
278
343
  };
279
344
  }
280
345
  catch {
346
+ logPluginError("Cursor unary fetch transport failed", {
347
+ rpcPath: options.rpcPath,
348
+ url: target.toString(),
349
+ timeoutMs,
350
+ timedOut,
351
+ });
281
352
  return {
282
353
  body: new Uint8Array(),
283
354
  exitCode: timedOut ? 124 : 1,
@@ -325,7 +396,13 @@ async function callCursorUnaryRpcOverHttp2(options, target) {
325
396
  : undefined;
326
397
  try {
327
398
  session = connectHttp2(authority);
328
- session.once("error", () => {
399
+ session.once("error", (error) => {
400
+ logPluginError("Cursor unary HTTP/2 session failed", {
401
+ rpcPath: options.rpcPath,
402
+ url: target.toString(),
403
+ timedOut,
404
+ ...errorDetails(error),
405
+ });
329
406
  finish({
330
407
  body: new Uint8Array(),
331
408
  exitCode: timedOut ? 124 : 1,
@@ -361,16 +438,35 @@ async function callCursorUnaryRpcOverHttp2(options, target) {
361
438
  timedOut,
362
439
  });
363
440
  });
364
- stream.once("error", () => {
441
+ stream.once("error", (error) => {
442
+ logPluginError("Cursor unary HTTP/2 stream failed", {
443
+ rpcPath: options.rpcPath,
444
+ url: target.toString(),
445
+ timedOut,
446
+ ...errorDetails(error),
447
+ });
365
448
  finish({
366
449
  body: new Uint8Array(),
367
450
  exitCode: timedOut ? 124 : 1,
368
451
  timedOut,
369
452
  });
370
453
  });
371
- stream.end(Buffer.from(options.requestBody));
454
+ // Bun's node:http2 client currently breaks on end(Buffer.alloc(0)) against
455
+ // Cursor's HTTPS endpoint, but a header-only end() succeeds for empty unary bodies.
456
+ if (options.requestBody.length > 0) {
457
+ stream.end(Buffer.from(options.requestBody));
458
+ }
459
+ else {
460
+ stream.end();
461
+ }
372
462
  }
373
- catch {
463
+ catch (error) {
464
+ logPluginError("Cursor unary HTTP/2 setup failed", {
465
+ rpcPath: options.rpcPath,
466
+ url: target.toString(),
467
+ timedOut,
468
+ ...errorDetails(error),
469
+ });
374
470
  finish({
375
471
  body: new Uint8Array(),
376
472
  exitCode: timedOut ? 124 : 1,
@@ -420,10 +516,19 @@ export async function startProxy(getAccessToken, models = []) {
420
516
  throw new Error("Cursor proxy access token provider not configured");
421
517
  }
422
518
  const accessToken = await proxyAccessTokenProvider();
423
- return handleChatCompletion(body, accessToken);
519
+ const sessionId = req.headers.get("x-opencode-session-id")
520
+ ?? req.headers.get("x-session-id")
521
+ ?? undefined;
522
+ const agentKey = req.headers.get("x-opencode-agent") ?? undefined;
523
+ return handleChatCompletion(body, accessToken, { sessionId, agentKey });
424
524
  }
425
525
  catch (err) {
426
526
  const message = err instanceof Error ? err.message : String(err);
527
+ logPluginError("Cursor proxy request failed", {
528
+ path: url.pathname,
529
+ method: req.method,
530
+ ...errorDetails(err),
531
+ });
427
532
  return new Response(JSON.stringify({
428
533
  error: { message, type: "server_error", code: "internal_error" },
429
534
  }), { status: 500, headers: { "Content-Type": "application/json" } });
@@ -453,10 +558,25 @@ export function stopProxy() {
453
558
  activeBridges.clear();
454
559
  conversationStates.clear();
455
560
  }
456
- function handleChatCompletion(body, accessToken) {
457
- const { systemPrompt, userText, turns, toolResults } = parseMessages(body.messages);
561
+ function handleChatCompletion(body, accessToken, context = {}) {
562
+ const parsed = parseMessages(body.messages);
563
+ const { systemPrompt, userText, turns, toolResults, pendingAssistantSummary, completedTurnsFingerprint, } = parsed;
458
564
  const modelId = body.model;
459
- const tools = body.tools ?? [];
565
+ const normalizedAgentKey = normalizeAgentKey(context.agentKey);
566
+ const isTitleAgent = normalizedAgentKey === "title";
567
+ if (isTitleAgent) {
568
+ const titleSourceText = buildTitleSourceText(userText, turns, pendingAssistantSummary, toolResults);
569
+ if (!titleSourceText) {
570
+ return new Response(JSON.stringify({
571
+ error: {
572
+ message: "No title source text found",
573
+ type: "invalid_request_error",
574
+ },
575
+ }), { status: 400, headers: { "Content-Type": "application/json" } });
576
+ }
577
+ return handleTitleGenerationRequest(titleSourceText, accessToken, modelId, body.stream !== false);
578
+ }
579
+ const tools = selectToolsForChoice(body.tools ?? [], body.tool_choice);
460
580
  if (!userText && toolResults.length === 0) {
461
581
  return new Response(JSON.stringify({
462
582
  error: {
@@ -465,16 +585,24 @@ function handleChatCompletion(body, accessToken) {
465
585
  },
466
586
  }), { status: 400, headers: { "Content-Type": "application/json" } });
467
587
  }
468
- // bridgeKey: model-specific, for active tool-call bridges
588
+ // bridgeKey: session/agent-scoped, for active tool-call bridges
469
589
  // convKey: model-independent, for conversation state that survives model switches
470
- const bridgeKey = deriveBridgeKey(modelId, body.messages);
471
- const convKey = deriveConversationKey(body.messages);
590
+ const bridgeKey = deriveBridgeKey(modelId, body.messages, context.sessionId, context.agentKey);
591
+ const convKey = deriveConversationKey(body.messages, context.sessionId, context.agentKey);
472
592
  const activeBridge = activeBridges.get(bridgeKey);
473
593
  if (activeBridge && toolResults.length > 0) {
474
594
  activeBridges.delete(bridgeKey);
475
595
  if (activeBridge.bridge.alive) {
596
+ if (activeBridge.modelId !== modelId) {
597
+ logPluginWarn("Resuming pending Cursor tool call on original model after model switch", {
598
+ requestedModelId: modelId,
599
+ resumedModelId: activeBridge.modelId,
600
+ convKey,
601
+ bridgeKey,
602
+ });
603
+ }
476
604
  // Resume the live bridge with tool results
477
- return handleToolResultResume(activeBridge, toolResults, modelId, bridgeKey, convKey);
605
+ return handleToolResultResume(activeBridge, toolResults, bridgeKey, convKey);
478
606
  }
479
607
  // Bridge died (timeout, server disconnect, etc.).
480
608
  // Clean up and fall through to start a fresh bridge.
@@ -489,28 +617,49 @@ function handleChatCompletion(body, accessToken) {
489
617
  }
490
618
  let stored = conversationStates.get(convKey);
491
619
  if (!stored) {
492
- stored = {
493
- conversationId: deterministicConversationId(convKey),
494
- checkpoint: null,
495
- blobStore: new Map(),
496
- lastAccessMs: Date.now(),
497
- };
620
+ stored = createStoredConversation();
498
621
  conversationStates.set(convKey, stored);
499
622
  }
623
+ const systemPromptHash = hashString(systemPrompt);
624
+ if (stored.checkpoint
625
+ && (stored.systemPromptHash !== systemPromptHash
626
+ || (turns.length > 0 && stored.completedTurnsFingerprint !== completedTurnsFingerprint))) {
627
+ resetStoredConversation(stored);
628
+ }
629
+ stored.systemPromptHash = systemPromptHash;
630
+ stored.completedTurnsFingerprint = completedTurnsFingerprint;
500
631
  stored.lastAccessMs = Date.now();
501
632
  evictStaleConversations();
502
633
  // Build the request. When tool results are present but the bridge died,
503
634
  // we must still include the last user text so Cursor has context.
504
635
  const mcpTools = buildMcpToolDefinitions(tools);
505
- const effectiveUserText = userText || (toolResults.length > 0
506
- ? toolResults.map((r) => r.content).join("\n")
507
- : "");
508
- const payload = buildCursorRequest(modelId, systemPrompt, effectiveUserText, turns, stored.conversationId, stored.checkpoint, stored.blobStore);
636
+ const needsInitialHandoff = !stored.checkpoint && (turns.length > 0 || pendingAssistantSummary || toolResults.length > 0);
637
+ const replayTurns = needsInitialHandoff ? [] : turns;
638
+ let effectiveUserText = needsInitialHandoff
639
+ ? buildInitialHandoffPrompt(userText, turns, pendingAssistantSummary, toolResults)
640
+ : toolResults.length > 0
641
+ ? buildToolResumePrompt(userText, pendingAssistantSummary, toolResults)
642
+ : userText;
643
+ const payload = buildCursorRequest(modelId, systemPrompt, effectiveUserText, replayTurns, stored.conversationId, stored.checkpoint, stored.blobStore);
509
644
  payload.mcpTools = mcpTools;
510
645
  if (body.stream === false) {
511
- return handleNonStreamingResponse(payload, accessToken, modelId, convKey);
646
+ return handleNonStreamingResponse(payload, accessToken, modelId, convKey, {
647
+ systemPrompt,
648
+ systemPromptHash,
649
+ completedTurnsFingerprint,
650
+ turns,
651
+ userText,
652
+ agentKey: normalizedAgentKey,
653
+ });
512
654
  }
513
- return handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey);
655
+ return handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey, {
656
+ systemPrompt,
657
+ systemPromptHash,
658
+ completedTurnsFingerprint,
659
+ turns,
660
+ userText,
661
+ agentKey: normalizedAgentKey,
662
+ });
514
663
  }
515
664
  /** Normalize OpenAI message content to a plain string. */
516
665
  function textContent(content) {
@@ -525,8 +674,6 @@ function textContent(content) {
525
674
  }
526
675
  function parseMessages(messages) {
527
676
  let systemPrompt = "You are a helpful assistant.";
528
- const pairs = [];
529
- const toolResults = [];
530
677
  // Collect system messages
531
678
  const systemParts = messages
532
679
  .filter((m) => m.role === "system")
@@ -534,40 +681,194 @@ function parseMessages(messages) {
534
681
  if (systemParts.length > 0) {
535
682
  systemPrompt = systemParts.join("\n");
536
683
  }
537
- // Separate tool results from conversation turns
538
684
  const nonSystem = messages.filter((m) => m.role !== "system");
539
- let pendingUser = "";
685
+ const parsedTurns = [];
686
+ let currentTurn;
540
687
  for (const msg of nonSystem) {
541
- if (msg.role === "tool") {
542
- toolResults.push({
543
- toolCallId: msg.tool_call_id ?? "",
544
- content: textContent(msg.content),
545
- });
688
+ if (msg.role === "user") {
689
+ if (currentTurn)
690
+ parsedTurns.push(currentTurn);
691
+ currentTurn = {
692
+ userText: textContent(msg.content),
693
+ segments: [],
694
+ };
695
+ continue;
546
696
  }
547
- else if (msg.role === "user") {
548
- if (pendingUser) {
549
- pairs.push({ userText: pendingUser, assistantText: "" });
550
- }
551
- pendingUser = textContent(msg.content);
697
+ if (!currentTurn) {
698
+ currentTurn = { userText: "", segments: [] };
552
699
  }
553
- else if (msg.role === "assistant") {
554
- // Skip assistant messages that are just tool_calls with no text
700
+ if (msg.role === "assistant") {
555
701
  const text = textContent(msg.content);
556
- if (pendingUser) {
557
- pairs.push({ userText: pendingUser, assistantText: text });
558
- pendingUser = "";
702
+ if (text) {
703
+ currentTurn.segments.push({ kind: "assistantText", text });
559
704
  }
705
+ if (msg.tool_calls?.length) {
706
+ currentTurn.segments.push({
707
+ kind: "assistantToolCalls",
708
+ toolCalls: msg.tool_calls,
709
+ });
710
+ }
711
+ continue;
712
+ }
713
+ if (msg.role === "tool") {
714
+ currentTurn.segments.push({
715
+ kind: "toolResult",
716
+ result: {
717
+ toolCallId: msg.tool_call_id ?? "",
718
+ content: textContent(msg.content),
719
+ },
720
+ });
721
+ }
722
+ }
723
+ if (currentTurn)
724
+ parsedTurns.push(currentTurn);
725
+ let userText = "";
726
+ let toolResults = [];
727
+ let pendingAssistantSummary = "";
728
+ let completedTurnStates = parsedTurns;
729
+ const lastTurn = parsedTurns.at(-1);
730
+ if (lastTurn) {
731
+ const trailingSegments = splitTrailingToolResults(lastTurn.segments);
732
+ const hasAssistantSummary = trailingSegments.base.length > 0;
733
+ if (trailingSegments.trailing.length > 0 && hasAssistantSummary) {
734
+ completedTurnStates = parsedTurns.slice(0, -1);
735
+ userText = lastTurn.userText;
736
+ toolResults = trailingSegments.trailing.map((segment) => segment.result);
737
+ pendingAssistantSummary = summarizeTurnSegments(trailingSegments.base);
738
+ }
739
+ else if (lastTurn.userText && lastTurn.segments.length === 0) {
740
+ completedTurnStates = parsedTurns.slice(0, -1);
741
+ userText = lastTurn.userText;
742
+ }
743
+ }
744
+ const turns = completedTurnStates
745
+ .map((turn) => ({
746
+ userText: turn.userText,
747
+ assistantText: summarizeTurnSegments(turn.segments),
748
+ }))
749
+ .filter((turn) => turn.userText || turn.assistantText);
750
+ return {
751
+ systemPrompt,
752
+ userText,
753
+ turns,
754
+ toolResults,
755
+ pendingAssistantSummary,
756
+ completedTurnsFingerprint: buildCompletedTurnsFingerprint(systemPrompt, turns),
757
+ };
758
+ }
759
+ function splitTrailingToolResults(segments) {
760
+ let index = segments.length;
761
+ while (index > 0 && segments[index - 1]?.kind === "toolResult") {
762
+ index -= 1;
763
+ }
764
+ return {
765
+ base: segments.slice(0, index),
766
+ trailing: segments.slice(index).filter((segment) => segment.kind === "toolResult"),
767
+ };
768
+ }
769
+ function summarizeTurnSegments(segments) {
770
+ const parts = [];
771
+ for (const segment of segments) {
772
+ if (segment.kind === "assistantText") {
773
+ const trimmed = segment.text.trim();
774
+ if (trimmed)
775
+ parts.push(trimmed);
776
+ continue;
560
777
  }
778
+ if (segment.kind === "assistantToolCalls") {
779
+ const summary = segment.toolCalls.map(formatToolCallSummary).join("\n\n");
780
+ if (summary)
781
+ parts.push(summary);
782
+ continue;
783
+ }
784
+ parts.push(formatToolResultSummary(segment.result));
561
785
  }
562
- let lastUserText = "";
563
- if (pendingUser) {
564
- lastUserText = pendingUser;
786
+ return parts.join("\n\n").trim();
787
+ }
788
+ function formatToolCallSummary(call) {
789
+ const args = call.function.arguments?.trim();
790
+ return args
791
+ ? `[assistant requested tool ${call.function.name} id=${call.id}]\n${args}`
792
+ : `[assistant requested tool ${call.function.name} id=${call.id}]`;
793
+ }
794
+ function formatToolResultSummary(result) {
795
+ const label = result.toolCallId
796
+ ? `[tool result id=${result.toolCallId}]`
797
+ : "[tool result]";
798
+ const content = result.content.trim();
799
+ return content ? `${label}\n${content}` : label;
800
+ }
801
+ function buildCompletedTurnsFingerprint(systemPrompt, turns) {
802
+ return hashString(JSON.stringify({ systemPrompt, turns }));
803
+ }
804
+ function buildToolResumePrompt(userText, pendingAssistantSummary, toolResults) {
805
+ const parts = [userText.trim()];
806
+ if (pendingAssistantSummary.trim()) {
807
+ parts.push(`[previous assistant tool activity]\n${pendingAssistantSummary.trim()}`);
565
808
  }
566
- else if (pairs.length > 0 && toolResults.length === 0) {
567
- const last = pairs.pop();
568
- lastUserText = last.userText;
809
+ if (toolResults.length > 0) {
810
+ parts.push(toolResults.map(formatToolResultSummary).join("\n\n"));
569
811
  }
570
- return { systemPrompt, userText: lastUserText, turns: pairs, toolResults };
812
+ return parts.filter(Boolean).join("\n\n");
813
+ }
814
+ function buildInitialHandoffPrompt(userText, turns, pendingAssistantSummary, toolResults) {
815
+ const transcript = turns.map((turn, index) => {
816
+ const sections = [`Turn ${index + 1}`];
817
+ if (turn.userText.trim())
818
+ sections.push(`User: ${turn.userText.trim()}`);
819
+ if (turn.assistantText.trim())
820
+ sections.push(`Assistant: ${turn.assistantText.trim()}`);
821
+ return sections.join("\n");
822
+ });
823
+ const inProgress = buildToolResumePrompt("", pendingAssistantSummary, toolResults).trim();
824
+ const history = [
825
+ ...transcript,
826
+ ...(inProgress ? [`In-progress turn\n${inProgress}`] : []),
827
+ ].join("\n\n").trim();
828
+ if (!history)
829
+ return userText;
830
+ return [
831
+ "[OpenCode session handoff]",
832
+ "You are continuing an existing session that previously ran on another provider/model.",
833
+ "Treat the transcript below as prior conversation history before answering the latest user message.",
834
+ "",
835
+ "<previous-session-transcript>",
836
+ history,
837
+ "</previous-session-transcript>",
838
+ "",
839
+ "Latest user message:",
840
+ userText.trim(),
841
+ ].filter(Boolean).join("\n");
842
+ }
843
+ function buildTitleSourceText(userText, turns, pendingAssistantSummary, toolResults) {
844
+ const history = turns.map((turn) => [turn.userText.trim(), turn.assistantText.trim()].filter(Boolean).join("\n")).filter(Boolean);
845
+ if (pendingAssistantSummary.trim()) {
846
+ history.push(pendingAssistantSummary.trim());
847
+ }
848
+ if (toolResults.length > 0) {
849
+ history.push(toolResults.map(formatToolResultSummary).join("\n\n"));
850
+ }
851
+ if (userText.trim()) {
852
+ history.push(userText.trim());
853
+ }
854
+ return history.join("\n\n").trim();
855
+ }
856
+ function selectToolsForChoice(tools, toolChoice) {
857
+ if (!tools.length)
858
+ return [];
859
+ if (toolChoice === undefined || toolChoice === null || toolChoice === "auto" || toolChoice === "required") {
860
+ return tools;
861
+ }
862
+ if (toolChoice === "none") {
863
+ return [];
864
+ }
865
+ if (typeof toolChoice === "object") {
866
+ const choice = toolChoice;
867
+ if (choice.type === "function" && typeof choice.function?.name === "string") {
868
+ return tools.filter((tool) => tool.function.name === choice.function.name);
869
+ }
870
+ }
871
+ return tools;
571
872
  }
572
873
  /** Convert OpenAI tool definitions to Cursor's MCP tool protobuf format. */
573
874
  function buildMcpToolDefinitions(tools) {
@@ -710,6 +1011,12 @@ function makeHeartbeatBytes() {
710
1011
  });
711
1012
  return toBinary(AgentClientMessageSchema, heartbeat);
712
1013
  }
1014
+ function scheduleBridgeEnd(bridge) {
1015
+ queueMicrotask(() => {
1016
+ if (bridge.alive)
1017
+ bridge.end();
1018
+ });
1019
+ }
713
1020
  /**
714
1021
  * Create a stateful parser for Connect protocol frames.
715
1022
  * Handles buffering partial data across chunks.
@@ -852,6 +1159,12 @@ function handleKvMessage(kvMsg, blobStore, sendFrame) {
852
1159
  const blobId = kvMsg.message.value.blobId;
853
1160
  const blobIdKey = Buffer.from(blobId).toString("hex");
854
1161
  const blobData = blobStore.get(blobIdKey);
1162
+ if (!blobData) {
1163
+ logPluginWarn("Cursor requested missing blob", {
1164
+ blobId: blobIdKey,
1165
+ knownBlobCount: blobStore.size,
1166
+ });
1167
+ }
855
1168
  sendKvResponse(kvMsg, "getBlobResult", create(GetBlobResultSchema, blobData ? { blobData } : {}), sendFrame);
856
1169
  }
857
1170
  else if (kvCase === "setBlobArgs") {
@@ -1016,42 +1329,151 @@ function sendExecResult(execMsg, messageCase, value, sendFrame) {
1016
1329
  });
1017
1330
  sendFrame(toBinary(AgentClientMessageSchema, clientMessage));
1018
1331
  }
1019
- /** Derive a key for active bridge lookup (tool-call continuations). Model-specific. */
1020
- function deriveBridgeKey(modelId, messages) {
1332
+ /** Derive a key for active bridge lookup (tool-call continuations). */
1333
+ function deriveBridgeKey(modelId, messages, sessionId, agentKey) {
1334
+ if (sessionId) {
1335
+ const normalizedAgent = normalizeAgentKey(agentKey);
1336
+ return createHash("sha256")
1337
+ .update(`bridge:${sessionId}:${normalizedAgent}`)
1338
+ .digest("hex")
1339
+ .slice(0, 16);
1340
+ }
1021
1341
  const firstUserMsg = messages.find((m) => m.role === "user");
1022
1342
  const firstUserText = firstUserMsg ? textContent(firstUserMsg.content) : "";
1343
+ const normalizedAgent = normalizeAgentKey(agentKey);
1023
1344
  return createHash("sha256")
1024
- .update(`bridge:${modelId}:${firstUserText.slice(0, 200)}`)
1345
+ .update(`bridge:${normalizedAgent}:${modelId}:${firstUserText.slice(0, 200)}`)
1025
1346
  .digest("hex")
1026
1347
  .slice(0, 16);
1027
1348
  }
1028
1349
  /** Derive a key for conversation state. Model-independent so context survives model switches. */
1029
- function deriveConversationKey(messages) {
1030
- const firstUserMsg = messages.find((m) => m.role === "user");
1031
- const firstUserText = firstUserMsg ? textContent(firstUserMsg.content) : "";
1350
+ function deriveConversationKey(messages, sessionId, agentKey) {
1351
+ if (sessionId) {
1352
+ const normalizedAgent = normalizeAgentKey(agentKey);
1353
+ return createHash("sha256")
1354
+ .update(`session:${sessionId}:${normalizedAgent}`)
1355
+ .digest("hex")
1356
+ .slice(0, 16);
1357
+ }
1032
1358
  return createHash("sha256")
1033
- .update(`conv:${firstUserText.slice(0, 200)}`)
1359
+ .update(`${normalizeAgentKey(agentKey)}:${buildConversationFingerprint(messages)}`)
1034
1360
  .digest("hex")
1035
1361
  .slice(0, 16);
1036
1362
  }
1037
- /** Deterministic UUID derived from convKey so Cursor's server-side conversation
1038
- * persists across proxy restarts. Formats 16 bytes of SHA-256 as a v4-shaped UUID. */
1039
- function deterministicConversationId(convKey) {
1040
- const hex = createHash("sha256")
1041
- .update(`cursor-conv-id:${convKey}`)
1042
- .digest("hex")
1043
- .slice(0, 32);
1044
- // Format as UUID: xxxxxxxx-xxxx-4xxx-Nxxx-xxxxxxxxxxxx
1045
- return [
1046
- hex.slice(0, 8),
1047
- hex.slice(8, 12),
1048
- `4${hex.slice(13, 16)}`,
1049
- `${(0x8 | (parseInt(hex[16], 16) & 0x3)).toString(16)}${hex.slice(17, 20)}`,
1050
- hex.slice(20, 32),
1051
- ].join("-");
1363
+ function buildConversationFingerprint(messages) {
1364
+ return messages.map((message) => {
1365
+ const toolCallIDs = (message.tool_calls ?? []).map((call) => call.id).join(",");
1366
+ return `${message.role}:${textContent(message.content)}:${message.tool_call_id ?? ""}:${toolCallIDs}`;
1367
+ }).join("\n---\n");
1368
+ }
1369
+ function updateStoredConversationAfterCompletion(convKey, metadata, assistantText) {
1370
+ const stored = conversationStates.get(convKey);
1371
+ if (!stored)
1372
+ return;
1373
+ const nextTurns = metadata.userText
1374
+ ? [...metadata.turns, { userText: metadata.userText, assistantText: assistantText.trim() }]
1375
+ : metadata.turns;
1376
+ stored.systemPromptHash = metadata.systemPromptHash;
1377
+ stored.completedTurnsFingerprint = buildCompletedTurnsFingerprint(metadata.systemPrompt, nextTurns);
1378
+ stored.lastAccessMs = Date.now();
1379
+ }
1380
+ function deriveFallbackTitle(text) {
1381
+ const cleaned = text
1382
+ .replace(/<[^>]+>/g, " ")
1383
+ .replace(/\[[^\]]+\]/g, " ")
1384
+ .replace(/[^\p{L}\p{N}'’\-\s]+/gu, " ")
1385
+ .replace(/\s+/g, " ")
1386
+ .trim();
1387
+ if (!cleaned)
1388
+ return "";
1389
+ const words = cleaned.split(" ").filter(Boolean).slice(0, 6);
1390
+ return finalizeTitle(words.map(titleCaseWord).join(" "));
1391
+ }
1392
+ function titleCaseWord(word) {
1393
+ if (!word)
1394
+ return word;
1395
+ return word[0].toUpperCase() + word.slice(1);
1396
+ }
1397
+ function finalizeTitle(value) {
1398
+ return value
1399
+ .replace(/^#{1,6}\s*/, "")
1400
+ .replace(/[.!?,:;]+$/g, "")
1401
+ .replace(/\s+/g, " ")
1402
+ .trim()
1403
+ .slice(0, 80)
1404
+ .trim();
1405
+ }
1406
+ function createBufferedSSETextResponse(modelId, text, usage) {
1407
+ const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
1408
+ const created = Math.floor(Date.now() / 1000);
1409
+ const payload = [
1410
+ {
1411
+ id: completionId,
1412
+ object: "chat.completion.chunk",
1413
+ created,
1414
+ model: modelId,
1415
+ choices: [{ index: 0, delta: { content: text }, finish_reason: null }],
1416
+ },
1417
+ {
1418
+ id: completionId,
1419
+ object: "chat.completion.chunk",
1420
+ created,
1421
+ model: modelId,
1422
+ choices: [{ index: 0, delta: {}, finish_reason: "stop" }],
1423
+ },
1424
+ {
1425
+ id: completionId,
1426
+ object: "chat.completion.chunk",
1427
+ created,
1428
+ model: modelId,
1429
+ choices: [],
1430
+ usage,
1431
+ },
1432
+ ].map((chunk) => `data: ${JSON.stringify(chunk)}\n\n`).join("") + "data: [DONE]\n\n";
1433
+ return new Response(payload, { headers: SSE_HEADERS });
1434
+ }
1435
+ async function handleTitleGenerationRequest(sourceText, accessToken, modelId, stream) {
1436
+ const requestBody = toBinary(NameAgentRequestSchema, create(NameAgentRequestSchema, {
1437
+ userMessage: sourceText,
1438
+ }));
1439
+ const response = await callCursorUnaryRpc({
1440
+ accessToken,
1441
+ rpcPath: "/agent.v1.AgentService/NameAgent",
1442
+ requestBody,
1443
+ timeoutMs: 5_000,
1444
+ });
1445
+ if (response.timedOut) {
1446
+ throw new Error("Cursor title generation timed out");
1447
+ }
1448
+ if (response.exitCode !== 0) {
1449
+ throw new Error(`Cursor title generation failed with HTTP ${response.exitCode}`);
1450
+ }
1451
+ const payload = decodeConnectUnaryBody(response.body) ?? response.body;
1452
+ const decoded = fromBinary(NameAgentResponseSchema, payload);
1453
+ const title = finalizeTitle(decoded.name) || deriveFallbackTitle(sourceText) || "Untitled Session";
1454
+ const usage = { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 };
1455
+ if (stream) {
1456
+ return createBufferedSSETextResponse(modelId, title, usage);
1457
+ }
1458
+ const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
1459
+ const created = Math.floor(Date.now() / 1000);
1460
+ return new Response(JSON.stringify({
1461
+ id: completionId,
1462
+ object: "chat.completion",
1463
+ created,
1464
+ model: modelId,
1465
+ choices: [
1466
+ {
1467
+ index: 0,
1468
+ message: { role: "assistant", content: title },
1469
+ finish_reason: "stop",
1470
+ },
1471
+ ],
1472
+ usage,
1473
+ }), { headers: { "Content-Type": "application/json" } });
1052
1474
  }
1053
1475
  /** Create an SSE streaming Response that reads from a live bridge. */
1054
- function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey) {
1476
+ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey, metadata) {
1055
1477
  const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
1056
1478
  const created = Math.floor(Date.now() / 1000);
1057
1479
  const stream = new ReadableStream({
@@ -1099,7 +1521,9 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
1099
1521
  totalTokens: 0,
1100
1522
  };
1101
1523
  const tagFilter = createThinkingTagFilter();
1524
+ let assistantText = metadata.assistantSeedText ?? "";
1102
1525
  let mcpExecReceived = false;
1526
+ let endStreamError = null;
1103
1527
  const processChunk = createConnectFrameParser((messageBytes) => {
1104
1528
  try {
1105
1529
  const serverMessage = fromBinary(AgentServerMessageSchema, messageBytes);
@@ -1111,8 +1535,10 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
1111
1535
  const { content, reasoning } = tagFilter.process(text);
1112
1536
  if (reasoning)
1113
1537
  sendSSE(makeChunk({ reasoning_content: reasoning }));
1114
- if (content)
1538
+ if (content) {
1539
+ assistantText += content;
1115
1540
  sendSSE(makeChunk({ content }));
1541
+ }
1116
1542
  }
1117
1543
  },
1118
1544
  // onMcpExec — the model wants to execute a tool.
@@ -1122,8 +1548,21 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
1122
1548
  const flushed = tagFilter.flush();
1123
1549
  if (flushed.reasoning)
1124
1550
  sendSSE(makeChunk({ reasoning_content: flushed.reasoning }));
1125
- if (flushed.content)
1551
+ if (flushed.content) {
1552
+ assistantText += flushed.content;
1126
1553
  sendSSE(makeChunk({ content: flushed.content }));
1554
+ }
1555
+ const assistantSeedText = [
1556
+ assistantText.trim(),
1557
+ formatToolCallSummary({
1558
+ id: exec.toolCallId,
1559
+ type: "function",
1560
+ function: {
1561
+ name: exec.toolName,
1562
+ arguments: exec.decodedArgs,
1563
+ },
1564
+ }),
1565
+ ].filter(Boolean).join("\n\n");
1127
1566
  const toolCallIndex = state.toolCallIndex++;
1128
1567
  sendSSE(makeChunk({
1129
1568
  tool_calls: [{
@@ -1143,6 +1582,11 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
1143
1582
  blobStore,
1144
1583
  mcpTools,
1145
1584
  pendingExecs: state.pendingExecs,
1585
+ modelId,
1586
+ metadata: {
1587
+ ...metadata,
1588
+ assistantSeedText,
1589
+ },
1146
1590
  });
1147
1591
  sendSSE(makeChunk({}, "tool_calls"));
1148
1592
  sendDone();
@@ -1159,10 +1603,16 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
1159
1603
  // Skip unparseable messages
1160
1604
  }
1161
1605
  }, (endStreamBytes) => {
1162
- const endError = parseConnectEndStream(endStreamBytes);
1163
- if (endError) {
1164
- sendSSE(makeChunk({ content: `\n[Error: ${endError.message}]` }));
1606
+ endStreamError = parseConnectEndStream(endStreamBytes);
1607
+ if (endStreamError) {
1608
+ logPluginError("Cursor stream returned Connect end-stream error", {
1609
+ modelId,
1610
+ bridgeKey,
1611
+ convKey,
1612
+ ...errorDetails(endStreamError),
1613
+ });
1165
1614
  }
1615
+ scheduleBridgeEnd(bridge);
1166
1616
  });
1167
1617
  bridge.onData(processChunk);
1168
1618
  bridge.onClose((code) => {
@@ -1173,27 +1623,39 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
1173
1623
  stored.blobStore.set(k, v);
1174
1624
  stored.lastAccessMs = Date.now();
1175
1625
  }
1626
+ if (endStreamError) {
1627
+ activeBridges.delete(bridgeKey);
1628
+ if (!closed) {
1629
+ closed = true;
1630
+ controller.error(endStreamError);
1631
+ }
1632
+ return;
1633
+ }
1176
1634
  if (!mcpExecReceived) {
1177
1635
  const flushed = tagFilter.flush();
1178
1636
  if (flushed.reasoning)
1179
1637
  sendSSE(makeChunk({ reasoning_content: flushed.reasoning }));
1180
- if (flushed.content)
1638
+ if (flushed.content) {
1639
+ assistantText += flushed.content;
1181
1640
  sendSSE(makeChunk({ content: flushed.content }));
1641
+ }
1642
+ updateStoredConversationAfterCompletion(convKey, metadata, assistantText);
1182
1643
  sendSSE(makeChunk({}, "stop"));
1183
1644
  sendSSE(makeUsageChunk());
1184
1645
  sendDone();
1185
1646
  closeController();
1186
1647
  }
1187
- else if (code !== 0) {
1188
- // Bridge died while tool calls are pending (timeout, crash, etc.).
1189
- // Close the SSE stream so the client doesn't hang forever.
1190
- sendSSE(makeChunk({ content: "\n[Error: bridge connection lost]" }));
1191
- sendSSE(makeChunk({}, "stop"));
1192
- sendSSE(makeUsageChunk());
1193
- sendDone();
1194
- closeController();
1195
- // Remove stale entry so the next request doesn't try to resume it.
1648
+ else {
1196
1649
  activeBridges.delete(bridgeKey);
1650
+ if (code !== 0 && !closed) {
1651
+ // Bridge died while tool calls are pending (timeout, crash, etc.).
1652
+ // Close the SSE stream so the client doesn't hang forever.
1653
+ sendSSE(makeChunk({ content: "\n[Error: bridge connection lost]" }));
1654
+ sendSSE(makeChunk({}, "stop"));
1655
+ sendSSE(makeUsageChunk());
1656
+ sendDone();
1657
+ closeController();
1658
+ }
1197
1659
  }
1198
1660
  });
1199
1661
  },
@@ -1211,13 +1673,20 @@ async function startBridge(accessToken, requestBytes) {
1211
1673
  const heartbeatTimer = setInterval(() => bridge.write(makeHeartbeatBytes()), 5_000);
1212
1674
  return { bridge, heartbeatTimer };
1213
1675
  }
1214
- async function handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey) {
1676
+ async function handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey, metadata) {
1215
1677
  const { bridge, heartbeatTimer } = await startBridge(accessToken, payload.requestBytes);
1216
- return createBridgeStreamResponse(bridge, heartbeatTimer, payload.blobStore, payload.mcpTools, modelId, bridgeKey, convKey);
1678
+ return createBridgeStreamResponse(bridge, heartbeatTimer, payload.blobStore, payload.mcpTools, modelId, bridgeKey, convKey, metadata);
1217
1679
  }
1218
1680
  /** Resume a paused bridge by sending MCP results and continuing to stream. */
1219
- function handleToolResultResume(active, toolResults, modelId, bridgeKey, convKey) {
1220
- const { bridge, heartbeatTimer, blobStore, mcpTools, pendingExecs } = active;
1681
+ function handleToolResultResume(active, toolResults, bridgeKey, convKey) {
1682
+ const { bridge, heartbeatTimer, blobStore, mcpTools, pendingExecs, modelId, metadata } = active;
1683
+ const resumeMetadata = {
1684
+ ...metadata,
1685
+ assistantSeedText: [
1686
+ metadata.assistantSeedText?.trim() ?? "",
1687
+ toolResults.map(formatToolResultSummary).join("\n\n"),
1688
+ ].filter(Boolean).join("\n\n"),
1689
+ };
1221
1690
  // Send mcpResult for each pending exec that has a matching tool result
1222
1691
  for (const exec of pendingExecs) {
1223
1692
  const result = toolResults.find((r) => r.toolCallId === exec.toolCallId);
@@ -1257,12 +1726,15 @@ function handleToolResultResume(active, toolResults, modelId, bridgeKey, convKey
1257
1726
  });
1258
1727
  bridge.write(toBinary(AgentClientMessageSchema, clientMessage));
1259
1728
  }
1260
- return createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey);
1729
+ return createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey, resumeMetadata);
1261
1730
  }
1262
- async function handleNonStreamingResponse(payload, accessToken, modelId, convKey) {
1731
+ async function handleNonStreamingResponse(payload, accessToken, modelId, convKey, metadata) {
1263
1732
  const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
1264
1733
  const created = Math.floor(Date.now() / 1000);
1265
- const { text, usage } = await collectFullResponse(payload, accessToken, convKey);
1734
+ const { text, usage, finishReason, toolCalls } = await collectFullResponse(payload, accessToken, modelId, convKey, metadata);
1735
+ const message = finishReason === "tool_calls"
1736
+ ? { role: "assistant", content: null, tool_calls: toolCalls }
1737
+ : { role: "assistant", content: text };
1266
1738
  return new Response(JSON.stringify({
1267
1739
  id: completionId,
1268
1740
  object: "chat.completion",
@@ -1271,16 +1743,18 @@ async function handleNonStreamingResponse(payload, accessToken, modelId, convKey
1271
1743
  choices: [
1272
1744
  {
1273
1745
  index: 0,
1274
- message: { role: "assistant", content: text },
1275
- finish_reason: "stop",
1746
+ message,
1747
+ finish_reason: finishReason,
1276
1748
  },
1277
1749
  ],
1278
1750
  usage,
1279
1751
  }), { headers: { "Content-Type": "application/json" } });
1280
1752
  }
1281
- async function collectFullResponse(payload, accessToken, convKey) {
1282
- const { promise, resolve } = Promise.withResolvers();
1753
+ async function collectFullResponse(payload, accessToken, modelId, convKey, metadata) {
1754
+ const { promise, resolve, reject } = Promise.withResolvers();
1283
1755
  let fullText = "";
1756
+ let endStreamError = null;
1757
+ const pendingToolCalls = [];
1284
1758
  const { bridge, heartbeatTimer } = await startBridge(accessToken, payload.requestBytes);
1285
1759
  const state = {
1286
1760
  toolCallIndex: 0,
@@ -1297,7 +1771,17 @@ async function collectFullResponse(payload, accessToken, convKey) {
1297
1771
  return;
1298
1772
  const { content } = tagFilter.process(text);
1299
1773
  fullText += content;
1300
- }, () => { }, (checkpointBytes) => {
1774
+ }, (exec) => {
1775
+ pendingToolCalls.push({
1776
+ id: exec.toolCallId,
1777
+ type: "function",
1778
+ function: {
1779
+ name: exec.toolName,
1780
+ arguments: exec.decodedArgs,
1781
+ },
1782
+ });
1783
+ scheduleBridgeEnd(bridge);
1784
+ }, (checkpointBytes) => {
1301
1785
  const stored = conversationStates.get(convKey);
1302
1786
  if (stored) {
1303
1787
  stored.checkpoint = checkpointBytes;
@@ -1308,7 +1792,17 @@ async function collectFullResponse(payload, accessToken, convKey) {
1308
1792
  catch {
1309
1793
  // Skip
1310
1794
  }
1311
- }, () => { }));
1795
+ }, (endStreamBytes) => {
1796
+ endStreamError = parseConnectEndStream(endStreamBytes);
1797
+ if (endStreamError) {
1798
+ logPluginError("Cursor non-streaming response returned Connect end-stream error", {
1799
+ modelId,
1800
+ convKey,
1801
+ ...errorDetails(endStreamError),
1802
+ });
1803
+ }
1804
+ scheduleBridgeEnd(bridge);
1805
+ }));
1312
1806
  bridge.onClose(() => {
1313
1807
  clearInterval(heartbeatTimer);
1314
1808
  const stored = conversationStates.get(convKey);
@@ -1319,10 +1813,19 @@ async function collectFullResponse(payload, accessToken, convKey) {
1319
1813
  }
1320
1814
  const flushed = tagFilter.flush();
1321
1815
  fullText += flushed.content;
1816
+ if (endStreamError) {
1817
+ reject(endStreamError);
1818
+ return;
1819
+ }
1820
+ if (pendingToolCalls.length === 0) {
1821
+ updateStoredConversationAfterCompletion(convKey, metadata, fullText);
1822
+ }
1322
1823
  const usage = computeUsage(state);
1323
1824
  resolve({
1324
1825
  text: fullText,
1325
1826
  usage,
1827
+ finishReason: pendingToolCalls.length > 0 ? "tool_calls" : "stop",
1828
+ toolCalls: pendingToolCalls,
1326
1829
  });
1327
1830
  });
1328
1831
  return promise;