@playwo/opencode-cursor-oauth 0.0.0-dev.de8f891a2e99 → 0.0.0-dev.e3644b4a140d

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,103 +1,31 @@
1
- # @playwo/opencode-cursor-oauth
1
+ # opencode-cursor-oauth
2
2
 
3
- OpenCode plugin that connects to Cursor's API, giving you access to Cursor
4
- models inside OpenCode with full tool-calling support.
3
+ Use Cursor models (Claude, GPT, Gemini, etc.) inside [OpenCode](https://opencode.ai).
5
4
 
6
- ## Install in OpenCode
5
+ ## What it does
7
6
 
8
- Add this to `~/.config/opencode/opencode.json`:
7
+ - **OAuth login** to Cursor via browser
8
+ - **Model discovery** — automatically fetches your available Cursor models
9
+ - **Local proxy** — runs an OpenAI-compatible endpoint that translates to Cursor's gRPC protocol
10
+ - **Auto-refresh** — handles token expiration automatically
9
11
 
10
- ```jsonc
11
- {
12
- "$schema": "https://opencode.ai/config.json",
13
- "plugin": [
14
- "@playwo/opencode-cursor-oauth"
15
- ],
16
- "provider": {
17
- "cursor": {
18
- "name": "Cursor"
19
- }
20
- }
21
- }
22
- ```
23
-
24
- The `cursor` provider stub is required because OpenCode drops providers that do
25
- not already exist in its bundled provider catalog.
26
-
27
- OpenCode installs npm plugins automatically at startup, so users do not need to
28
- clone this repository.
29
-
30
- ## Authenticate
31
-
32
- ```sh
33
- opencode auth login --provider cursor
34
- ```
35
-
36
- This opens Cursor OAuth in the browser. Tokens are stored in
37
- `~/.local/share/opencode/auth.json` and refreshed automatically.
38
-
39
- ## Use
40
-
41
- Start OpenCode and select any Cursor model. The plugin starts a local
42
- OpenAI-compatible proxy on demand and routes requests through Cursor's gRPC API.
43
-
44
- ## How it works
45
-
46
- 1. OAuth — browser-based login to Cursor via PKCE.
47
- 2. Model discovery — queries Cursor's gRPC API for all available models; if discovery fails, the plugin disables the Cursor provider for that load and shows a visible error toast instead of crashing OpenCode.
48
- 3. Local proxy — translates `POST /v1/chat/completions` into Cursor's
49
- protobuf/Connect protocol.
50
- 4. Native tool routing — rejects Cursor's built-in filesystem/shell tools and
51
- exposes OpenCode's tool surface via Cursor MCP instead.
52
-
53
- Cursor agent streaming uses Cursor's `RunSSE` + `BidiAppend` transport, so the
54
- plugin runs entirely inside OpenCode without a Node sidecar.
12
+ ## Install
55
13
 
56
- ## Architecture
14
+ Add to your `opencode.json`:
57
15
 
16
+ ```json
17
+ {
18
+ "plugin": ["@playwo/opencode-cursor-oauth"]
19
+ }
58
20
  ```
59
- OpenCode --> /v1/chat/completions --> Bun.serve (proxy)
60
- |
61
- RunSSE stream + BidiAppend writes
62
- |
63
- Cursor Connect/SSE transport
64
- |
65
- api2.cursor.sh gRPC
66
- ```
67
-
68
- ### Tool call flow
69
-
70
- ```
71
- 1. Cursor model receives OpenAI tools via RequestContext (as MCP tool defs)
72
- 2. Model tries native tools (readArgs, shellArgs, etc.)
73
- 3. Proxy rejects each with typed error (ReadRejected, ShellRejected, etc.)
74
- 4. Model falls back to MCP tool -> mcpArgs exec message
75
- 5. Proxy emits OpenAI tool_calls SSE chunk, pauses the Cursor stream
76
- 6. OpenCode executes tool, sends result in follow-up request
77
- 7. Proxy resumes the Cursor stream with mcpResult and continues streaming
78
- ```
79
-
80
- ## Develop locally
81
-
82
- ```sh
83
- bun install
84
- bun run build
85
- bun test/smoke.ts
86
- ```
87
-
88
- ## Publish
89
21
 
90
- GitHub Actions publishes this package with `.github/workflows/publish-npm.yml`.
22
+ Then authenticate via the OpenCode UI (Settings → Providers → Cursor → Login).
91
23
 
92
- - branch pushes publish a `dev` build as `0.0.0-dev.<sha>`
93
- - versioned releases publish `latest` using the `package.json` version and upload the packed `.tgz` to the GitHub release
94
-
95
- Repository secrets required:
24
+ ## Requirements
96
25
 
97
- - `NPM_TOKEN` for npm publish access
26
+ - Cursor account with API access
27
+ - OpenCode 1.2+
98
28
 
99
- ## Requirements
29
+ ## License
100
30
 
101
- - [OpenCode](https://opencode.ai)
102
- - [Bun](https://bun.sh)
103
- - Active [Cursor](https://cursor.com) subscription
31
+ MIT
package/dist/index.js CHANGED
@@ -1,7 +1,7 @@
1
1
  import { generateCursorAuthParams, getTokenExpiry, pollCursorAuth, refreshCursorToken, } from "./auth";
2
2
  import { configurePluginLogger, errorDetails, logPluginError, logPluginWarn } from "./logger";
3
3
  import { getCursorModels } from "./models";
4
- import { startProxy, stopProxy } from "./proxy";
4
+ import { startProxy, stopProxy, } from "./proxy";
5
5
  const CURSOR_PROVIDER_ID = "cursor";
6
6
  let lastModelDiscoveryError = null;
7
7
  /**
@@ -128,6 +128,15 @@ export const CursorAuthPlugin = async (input) => {
128
128
  },
129
129
  ],
130
130
  },
131
+ async "chat.headers"(incoming, output) {
132
+ if (incoming.model.providerID !== CURSOR_PROVIDER_ID)
133
+ return;
134
+ output.headers["x-opencode-session-id"] = incoming.sessionID;
135
+ output.headers["x-session-id"] = incoming.sessionID;
136
+ if (incoming.agent) {
137
+ output.headers["x-opencode-agent"] = incoming.agent;
138
+ }
139
+ },
131
140
  };
132
141
  };
133
142
  function buildCursorProviderModels(models, port) {
package/dist/proxy.js CHANGED
@@ -41,6 +41,31 @@ function evictStaleConversations() {
41
41
  }
42
42
  }
43
43
  }
44
+ function normalizeAgentKey(agentKey) {
45
+ const trimmed = agentKey?.trim();
46
+ return trimmed ? trimmed : "default";
47
+ }
48
+ function hashString(value) {
49
+ return createHash("sha256").update(value).digest("hex");
50
+ }
51
+ function createStoredConversation() {
52
+ return {
53
+ conversationId: crypto.randomUUID(),
54
+ checkpoint: null,
55
+ blobStore: new Map(),
56
+ lastAccessMs: Date.now(),
57
+ systemPromptHash: "",
58
+ completedTurnsFingerprint: "",
59
+ };
60
+ }
61
+ function resetStoredConversation(stored) {
62
+ stored.conversationId = crypto.randomUUID();
63
+ stored.checkpoint = null;
64
+ stored.blobStore = new Map();
65
+ stored.lastAccessMs = Date.now();
66
+ stored.systemPromptHash = "";
67
+ stored.completedTurnsFingerprint = "";
68
+ }
44
69
  /** Connect protocol frame: [1-byte flags][4-byte BE length][payload] */
45
70
  function frameConnectMessage(data, flags = 0) {
46
71
  const frame = Buffer.alloc(5 + data.length);
@@ -471,7 +496,11 @@ export async function startProxy(getAccessToken, models = []) {
471
496
  throw new Error("Cursor proxy access token provider not configured");
472
497
  }
473
498
  const accessToken = await proxyAccessTokenProvider();
474
- return handleChatCompletion(body, accessToken);
499
+ const sessionId = req.headers.get("x-opencode-session-id")
500
+ ?? req.headers.get("x-session-id")
501
+ ?? undefined;
502
+ const agentKey = req.headers.get("x-opencode-agent") ?? undefined;
503
+ return handleChatCompletion(body, accessToken, { sessionId, agentKey });
475
504
  }
476
505
  catch (err) {
477
506
  const message = err instanceof Error ? err.message : String(err);
@@ -509,10 +538,11 @@ export function stopProxy() {
509
538
  activeBridges.clear();
510
539
  conversationStates.clear();
511
540
  }
512
- function handleChatCompletion(body, accessToken) {
513
- const { systemPrompt, userText, turns, toolResults } = parseMessages(body.messages);
541
+ function handleChatCompletion(body, accessToken, context = {}) {
542
+ const parsed = parseMessages(body.messages);
543
+ const { systemPrompt, userText, turns, toolResults, pendingAssistantSummary, completedTurnsFingerprint, } = parsed;
514
544
  const modelId = body.model;
515
- const tools = body.tools ?? [];
545
+ const tools = selectToolsForChoice(body.tools ?? [], body.tool_choice);
516
546
  if (!userText && toolResults.length === 0) {
517
547
  return new Response(JSON.stringify({
518
548
  error: {
@@ -521,16 +551,24 @@ function handleChatCompletion(body, accessToken) {
521
551
  },
522
552
  }), { status: 400, headers: { "Content-Type": "application/json" } });
523
553
  }
524
- // bridgeKey: model-specific, for active tool-call bridges
554
+ // bridgeKey: session/agent-scoped, for active tool-call bridges
525
555
  // convKey: model-independent, for conversation state that survives model switches
526
- const bridgeKey = deriveBridgeKey(modelId, body.messages);
527
- const convKey = deriveConversationKey(body.messages);
556
+ const bridgeKey = deriveBridgeKey(modelId, body.messages, context.sessionId, context.agentKey);
557
+ const convKey = deriveConversationKey(body.messages, context.sessionId, context.agentKey);
528
558
  const activeBridge = activeBridges.get(bridgeKey);
529
559
  if (activeBridge && toolResults.length > 0) {
530
560
  activeBridges.delete(bridgeKey);
531
561
  if (activeBridge.bridge.alive) {
562
+ if (activeBridge.modelId !== modelId) {
563
+ logPluginWarn("Resuming pending Cursor tool call on original model after model switch", {
564
+ requestedModelId: modelId,
565
+ resumedModelId: activeBridge.modelId,
566
+ convKey,
567
+ bridgeKey,
568
+ });
569
+ }
532
570
  // Resume the live bridge with tool results
533
- return handleToolResultResume(activeBridge, toolResults, modelId, bridgeKey, convKey);
571
+ return handleToolResultResume(activeBridge, toolResults, bridgeKey, convKey);
534
572
  }
535
573
  // Bridge died (timeout, server disconnect, etc.).
536
574
  // Clean up and fall through to start a fresh bridge.
@@ -545,28 +583,47 @@ function handleChatCompletion(body, accessToken) {
545
583
  }
546
584
  let stored = conversationStates.get(convKey);
547
585
  if (!stored) {
548
- stored = {
549
- conversationId: deterministicConversationId(convKey),
550
- checkpoint: null,
551
- blobStore: new Map(),
552
- lastAccessMs: Date.now(),
553
- };
586
+ stored = createStoredConversation();
554
587
  conversationStates.set(convKey, stored);
555
588
  }
589
+ const systemPromptHash = hashString(systemPrompt);
590
+ if (stored.checkpoint
591
+ && (stored.systemPromptHash !== systemPromptHash
592
+ || (turns.length > 0 && stored.completedTurnsFingerprint !== completedTurnsFingerprint))) {
593
+ resetStoredConversation(stored);
594
+ }
595
+ stored.systemPromptHash = systemPromptHash;
596
+ stored.completedTurnsFingerprint = completedTurnsFingerprint;
556
597
  stored.lastAccessMs = Date.now();
557
598
  evictStaleConversations();
558
599
  // Build the request. When tool results are present but the bridge died,
559
600
  // we must still include the last user text so Cursor has context.
560
601
  const mcpTools = buildMcpToolDefinitions(tools);
561
- const effectiveUserText = userText || (toolResults.length > 0
562
- ? toolResults.map((r) => r.content).join("\n")
563
- : "");
564
- const payload = buildCursorRequest(modelId, systemPrompt, effectiveUserText, turns, stored.conversationId, stored.checkpoint, stored.blobStore);
602
+ const needsInitialHandoff = !stored.checkpoint && (turns.length > 0 || pendingAssistantSummary || toolResults.length > 0);
603
+ const replayTurns = needsInitialHandoff ? [] : turns;
604
+ const effectiveUserText = needsInitialHandoff
605
+ ? buildInitialHandoffPrompt(userText, turns, pendingAssistantSummary, toolResults)
606
+ : toolResults.length > 0
607
+ ? buildToolResumePrompt(userText, pendingAssistantSummary, toolResults)
608
+ : userText;
609
+ const payload = buildCursorRequest(modelId, systemPrompt, effectiveUserText, replayTurns, stored.conversationId, stored.checkpoint, stored.blobStore);
565
610
  payload.mcpTools = mcpTools;
566
611
  if (body.stream === false) {
567
- return handleNonStreamingResponse(payload, accessToken, modelId, convKey);
612
+ return handleNonStreamingResponse(payload, accessToken, modelId, convKey, {
613
+ systemPrompt,
614
+ systemPromptHash,
615
+ completedTurnsFingerprint,
616
+ turns,
617
+ userText,
618
+ });
568
619
  }
569
- return handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey);
620
+ return handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey, {
621
+ systemPrompt,
622
+ systemPromptHash,
623
+ completedTurnsFingerprint,
624
+ turns,
625
+ userText,
626
+ });
570
627
  }
571
628
  /** Normalize OpenAI message content to a plain string. */
572
629
  function textContent(content) {
@@ -581,8 +638,6 @@ function textContent(content) {
581
638
  }
582
639
  function parseMessages(messages) {
583
640
  let systemPrompt = "You are a helpful assistant.";
584
- const pairs = [];
585
- const toolResults = [];
586
641
  // Collect system messages
587
642
  const systemParts = messages
588
643
  .filter((m) => m.role === "system")
@@ -590,40 +645,181 @@ function parseMessages(messages) {
590
645
  if (systemParts.length > 0) {
591
646
  systemPrompt = systemParts.join("\n");
592
647
  }
593
- // Separate tool results from conversation turns
594
648
  const nonSystem = messages.filter((m) => m.role !== "system");
595
- let pendingUser = "";
649
+ const parsedTurns = [];
650
+ let currentTurn;
596
651
  for (const msg of nonSystem) {
597
- if (msg.role === "tool") {
598
- toolResults.push({
599
- toolCallId: msg.tool_call_id ?? "",
600
- content: textContent(msg.content),
601
- });
652
+ if (msg.role === "user") {
653
+ if (currentTurn)
654
+ parsedTurns.push(currentTurn);
655
+ currentTurn = {
656
+ userText: textContent(msg.content),
657
+ segments: [],
658
+ };
659
+ continue;
602
660
  }
603
- else if (msg.role === "user") {
604
- if (pendingUser) {
605
- pairs.push({ userText: pendingUser, assistantText: "" });
606
- }
607
- pendingUser = textContent(msg.content);
661
+ if (!currentTurn) {
662
+ currentTurn = { userText: "", segments: [] };
608
663
  }
609
- else if (msg.role === "assistant") {
610
- // Skip assistant messages that are just tool_calls with no text
664
+ if (msg.role === "assistant") {
611
665
  const text = textContent(msg.content);
612
- if (pendingUser) {
613
- pairs.push({ userText: pendingUser, assistantText: text });
614
- pendingUser = "";
666
+ if (text) {
667
+ currentTurn.segments.push({ kind: "assistantText", text });
615
668
  }
669
+ if (msg.tool_calls?.length) {
670
+ currentTurn.segments.push({
671
+ kind: "assistantToolCalls",
672
+ toolCalls: msg.tool_calls,
673
+ });
674
+ }
675
+ continue;
676
+ }
677
+ if (msg.role === "tool") {
678
+ currentTurn.segments.push({
679
+ kind: "toolResult",
680
+ result: {
681
+ toolCallId: msg.tool_call_id ?? "",
682
+ content: textContent(msg.content),
683
+ },
684
+ });
685
+ }
686
+ }
687
+ if (currentTurn)
688
+ parsedTurns.push(currentTurn);
689
+ let userText = "";
690
+ let toolResults = [];
691
+ let pendingAssistantSummary = "";
692
+ let completedTurnStates = parsedTurns;
693
+ const lastTurn = parsedTurns.at(-1);
694
+ if (lastTurn) {
695
+ const trailingSegments = splitTrailingToolResults(lastTurn.segments);
696
+ const hasAssistantSummary = trailingSegments.base.length > 0;
697
+ if (trailingSegments.trailing.length > 0 && hasAssistantSummary) {
698
+ completedTurnStates = parsedTurns.slice(0, -1);
699
+ userText = lastTurn.userText;
700
+ toolResults = trailingSegments.trailing.map((segment) => segment.result);
701
+ pendingAssistantSummary = summarizeTurnSegments(trailingSegments.base);
702
+ }
703
+ else if (lastTurn.userText && lastTurn.segments.length === 0) {
704
+ completedTurnStates = parsedTurns.slice(0, -1);
705
+ userText = lastTurn.userText;
706
+ }
707
+ }
708
+ const turns = completedTurnStates
709
+ .map((turn) => ({
710
+ userText: turn.userText,
711
+ assistantText: summarizeTurnSegments(turn.segments),
712
+ }))
713
+ .filter((turn) => turn.userText || turn.assistantText);
714
+ return {
715
+ systemPrompt,
716
+ userText,
717
+ turns,
718
+ toolResults,
719
+ pendingAssistantSummary,
720
+ completedTurnsFingerprint: buildCompletedTurnsFingerprint(systemPrompt, turns),
721
+ };
722
+ }
723
+ function splitTrailingToolResults(segments) {
724
+ let index = segments.length;
725
+ while (index > 0 && segments[index - 1]?.kind === "toolResult") {
726
+ index -= 1;
727
+ }
728
+ return {
729
+ base: segments.slice(0, index),
730
+ trailing: segments.slice(index).filter((segment) => segment.kind === "toolResult"),
731
+ };
732
+ }
733
+ function summarizeTurnSegments(segments) {
734
+ const parts = [];
735
+ for (const segment of segments) {
736
+ if (segment.kind === "assistantText") {
737
+ const trimmed = segment.text.trim();
738
+ if (trimmed)
739
+ parts.push(trimmed);
740
+ continue;
616
741
  }
742
+ if (segment.kind === "assistantToolCalls") {
743
+ const summary = segment.toolCalls.map(formatToolCallSummary).join("\n\n");
744
+ if (summary)
745
+ parts.push(summary);
746
+ continue;
747
+ }
748
+ parts.push(formatToolResultSummary(segment.result));
617
749
  }
618
- let lastUserText = "";
619
- if (pendingUser) {
620
- lastUserText = pendingUser;
750
+ return parts.join("\n\n").trim();
751
+ }
752
+ function formatToolCallSummary(call) {
753
+ const args = call.function.arguments?.trim();
754
+ return args
755
+ ? `[assistant requested tool ${call.function.name} id=${call.id}]\n${args}`
756
+ : `[assistant requested tool ${call.function.name} id=${call.id}]`;
757
+ }
758
+ function formatToolResultSummary(result) {
759
+ const label = result.toolCallId
760
+ ? `[tool result id=${result.toolCallId}]`
761
+ : "[tool result]";
762
+ const content = result.content.trim();
763
+ return content ? `${label}\n${content}` : label;
764
+ }
765
+ function buildCompletedTurnsFingerprint(systemPrompt, turns) {
766
+ return hashString(JSON.stringify({ systemPrompt, turns }));
767
+ }
768
+ function buildToolResumePrompt(userText, pendingAssistantSummary, toolResults) {
769
+ const parts = [userText.trim()];
770
+ if (pendingAssistantSummary.trim()) {
771
+ parts.push(`[previous assistant tool activity]\n${pendingAssistantSummary.trim()}`);
621
772
  }
622
- else if (pairs.length > 0 && toolResults.length === 0) {
623
- const last = pairs.pop();
624
- lastUserText = last.userText;
773
+ if (toolResults.length > 0) {
774
+ parts.push(toolResults.map(formatToolResultSummary).join("\n\n"));
625
775
  }
626
- return { systemPrompt, userText: lastUserText, turns: pairs, toolResults };
776
+ return parts.filter(Boolean).join("\n\n");
777
+ }
778
+ function buildInitialHandoffPrompt(userText, turns, pendingAssistantSummary, toolResults) {
779
+ const transcript = turns.map((turn, index) => {
780
+ const sections = [`Turn ${index + 1}`];
781
+ if (turn.userText.trim())
782
+ sections.push(`User: ${turn.userText.trim()}`);
783
+ if (turn.assistantText.trim())
784
+ sections.push(`Assistant: ${turn.assistantText.trim()}`);
785
+ return sections.join("\n");
786
+ });
787
+ const inProgress = buildToolResumePrompt("", pendingAssistantSummary, toolResults).trim();
788
+ const history = [
789
+ ...transcript,
790
+ ...(inProgress ? [`In-progress turn\n${inProgress}`] : []),
791
+ ].join("\n\n").trim();
792
+ if (!history)
793
+ return userText;
794
+ return [
795
+ "[OpenCode session handoff]",
796
+ "You are continuing an existing session that previously ran on another provider/model.",
797
+ "Treat the transcript below as prior conversation history before answering the latest user message.",
798
+ "",
799
+ "<previous-session-transcript>",
800
+ history,
801
+ "</previous-session-transcript>",
802
+ "",
803
+ "Latest user message:",
804
+ userText.trim(),
805
+ ].filter(Boolean).join("\n");
806
+ }
807
+ function selectToolsForChoice(tools, toolChoice) {
808
+ if (!tools.length)
809
+ return [];
810
+ if (toolChoice === undefined || toolChoice === null || toolChoice === "auto" || toolChoice === "required") {
811
+ return tools;
812
+ }
813
+ if (toolChoice === "none") {
814
+ return [];
815
+ }
816
+ if (typeof toolChoice === "object") {
817
+ const choice = toolChoice;
818
+ if (choice.type === "function" && typeof choice.function?.name === "string") {
819
+ return tools.filter((tool) => tool.function.name === choice.function.name);
820
+ }
821
+ }
822
+ return tools;
627
823
  }
628
824
  /** Convert OpenAI tool definitions to Cursor's MCP tool protobuf format. */
629
825
  function buildMcpToolDefinitions(tools) {
@@ -732,6 +928,7 @@ function buildCursorRequest(modelId, systemPrompt, userText, turns, conversation
732
928
  action,
733
929
  modelDetails,
734
930
  conversationId,
931
+ customSystemPrompt: systemPrompt,
735
932
  });
736
933
  const clientMessage = create(AgentClientMessageSchema, {
737
934
  message: { case: "runRequest", value: runRequest },
@@ -766,6 +963,12 @@ function makeHeartbeatBytes() {
766
963
  });
767
964
  return toBinary(AgentClientMessageSchema, heartbeat);
768
965
  }
966
+ function scheduleBridgeEnd(bridge) {
967
+ queueMicrotask(() => {
968
+ if (bridge.alive)
969
+ bridge.end();
970
+ });
971
+ }
769
972
  /**
770
973
  * Create a stateful parser for Connect protocol frames.
771
974
  * Handles buffering partial data across chunks.
@@ -908,6 +1111,12 @@ function handleKvMessage(kvMsg, blobStore, sendFrame) {
908
1111
  const blobId = kvMsg.message.value.blobId;
909
1112
  const blobIdKey = Buffer.from(blobId).toString("hex");
910
1113
  const blobData = blobStore.get(blobIdKey);
1114
+ if (!blobData) {
1115
+ logPluginWarn("Cursor requested missing blob", {
1116
+ blobId: blobIdKey,
1117
+ knownBlobCount: blobStore.size,
1118
+ });
1119
+ }
911
1120
  sendKvResponse(kvMsg, "getBlobResult", create(GetBlobResultSchema, blobData ? { blobData } : {}), sendFrame);
912
1121
  }
913
1122
  else if (kvCase === "setBlobArgs") {
@@ -1072,42 +1281,56 @@ function sendExecResult(execMsg, messageCase, value, sendFrame) {
1072
1281
  });
1073
1282
  sendFrame(toBinary(AgentClientMessageSchema, clientMessage));
1074
1283
  }
1075
- /** Derive a key for active bridge lookup (tool-call continuations). Model-specific. */
1076
- function deriveBridgeKey(modelId, messages) {
1284
+ /** Derive a key for active bridge lookup (tool-call continuations). */
1285
+ function deriveBridgeKey(modelId, messages, sessionId, agentKey) {
1286
+ if (sessionId) {
1287
+ const normalizedAgent = normalizeAgentKey(agentKey);
1288
+ return createHash("sha256")
1289
+ .update(`bridge:${sessionId}:${normalizedAgent}`)
1290
+ .digest("hex")
1291
+ .slice(0, 16);
1292
+ }
1077
1293
  const firstUserMsg = messages.find((m) => m.role === "user");
1078
1294
  const firstUserText = firstUserMsg ? textContent(firstUserMsg.content) : "";
1295
+ const normalizedAgent = normalizeAgentKey(agentKey);
1079
1296
  return createHash("sha256")
1080
- .update(`bridge:${modelId}:${firstUserText.slice(0, 200)}`)
1297
+ .update(`bridge:${normalizedAgent}:${modelId}:${firstUserText.slice(0, 200)}`)
1081
1298
  .digest("hex")
1082
1299
  .slice(0, 16);
1083
1300
  }
1084
1301
  /** Derive a key for conversation state. Model-independent so context survives model switches. */
1085
- function deriveConversationKey(messages) {
1086
- const firstUserMsg = messages.find((m) => m.role === "user");
1087
- const firstUserText = firstUserMsg ? textContent(firstUserMsg.content) : "";
1302
+ function deriveConversationKey(messages, sessionId, agentKey) {
1303
+ if (sessionId) {
1304
+ const normalizedAgent = normalizeAgentKey(agentKey);
1305
+ return createHash("sha256")
1306
+ .update(`session:${sessionId}:${normalizedAgent}`)
1307
+ .digest("hex")
1308
+ .slice(0, 16);
1309
+ }
1088
1310
  return createHash("sha256")
1089
- .update(`conv:${firstUserText.slice(0, 200)}`)
1311
+ .update(`${normalizeAgentKey(agentKey)}:${buildConversationFingerprint(messages)}`)
1090
1312
  .digest("hex")
1091
1313
  .slice(0, 16);
1092
1314
  }
1093
- /** Deterministic UUID derived from convKey so Cursor's server-side conversation
1094
- * persists across proxy restarts. Formats 16 bytes of SHA-256 as a v4-shaped UUID. */
1095
- function deterministicConversationId(convKey) {
1096
- const hex = createHash("sha256")
1097
- .update(`cursor-conv-id:${convKey}`)
1098
- .digest("hex")
1099
- .slice(0, 32);
1100
- // Format as UUID: xxxxxxxx-xxxx-4xxx-Nxxx-xxxxxxxxxxxx
1101
- return [
1102
- hex.slice(0, 8),
1103
- hex.slice(8, 12),
1104
- `4${hex.slice(13, 16)}`,
1105
- `${(0x8 | (parseInt(hex[16], 16) & 0x3)).toString(16)}${hex.slice(17, 20)}`,
1106
- hex.slice(20, 32),
1107
- ].join("-");
1315
+ function buildConversationFingerprint(messages) {
1316
+ return messages.map((message) => {
1317
+ const toolCallIDs = (message.tool_calls ?? []).map((call) => call.id).join(",");
1318
+ return `${message.role}:${textContent(message.content)}:${message.tool_call_id ?? ""}:${toolCallIDs}`;
1319
+ }).join("\n---\n");
1320
+ }
1321
+ function updateStoredConversationAfterCompletion(convKey, metadata, assistantText) {
1322
+ const stored = conversationStates.get(convKey);
1323
+ if (!stored)
1324
+ return;
1325
+ const nextTurns = metadata.userText
1326
+ ? [...metadata.turns, { userText: metadata.userText, assistantText: assistantText.trim() }]
1327
+ : metadata.turns;
1328
+ stored.systemPromptHash = metadata.systemPromptHash;
1329
+ stored.completedTurnsFingerprint = buildCompletedTurnsFingerprint(metadata.systemPrompt, nextTurns);
1330
+ stored.lastAccessMs = Date.now();
1108
1331
  }
1109
1332
  /** Create an SSE streaming Response that reads from a live bridge. */
1110
- function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey) {
1333
+ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey, metadata) {
1111
1334
  const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
1112
1335
  const created = Math.floor(Date.now() / 1000);
1113
1336
  const stream = new ReadableStream({
@@ -1155,7 +1378,9 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
1155
1378
  totalTokens: 0,
1156
1379
  };
1157
1380
  const tagFilter = createThinkingTagFilter();
1381
+ let assistantText = metadata.assistantSeedText ?? "";
1158
1382
  let mcpExecReceived = false;
1383
+ let endStreamError = null;
1159
1384
  const processChunk = createConnectFrameParser((messageBytes) => {
1160
1385
  try {
1161
1386
  const serverMessage = fromBinary(AgentServerMessageSchema, messageBytes);
@@ -1167,8 +1392,10 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
1167
1392
  const { content, reasoning } = tagFilter.process(text);
1168
1393
  if (reasoning)
1169
1394
  sendSSE(makeChunk({ reasoning_content: reasoning }));
1170
- if (content)
1395
+ if (content) {
1396
+ assistantText += content;
1171
1397
  sendSSE(makeChunk({ content }));
1398
+ }
1172
1399
  }
1173
1400
  },
1174
1401
  // onMcpExec — the model wants to execute a tool.
@@ -1178,8 +1405,21 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
1178
1405
  const flushed = tagFilter.flush();
1179
1406
  if (flushed.reasoning)
1180
1407
  sendSSE(makeChunk({ reasoning_content: flushed.reasoning }));
1181
- if (flushed.content)
1408
+ if (flushed.content) {
1409
+ assistantText += flushed.content;
1182
1410
  sendSSE(makeChunk({ content: flushed.content }));
1411
+ }
1412
+ const assistantSeedText = [
1413
+ assistantText.trim(),
1414
+ formatToolCallSummary({
1415
+ id: exec.toolCallId,
1416
+ type: "function",
1417
+ function: {
1418
+ name: exec.toolName,
1419
+ arguments: exec.decodedArgs,
1420
+ },
1421
+ }),
1422
+ ].filter(Boolean).join("\n\n");
1183
1423
  const toolCallIndex = state.toolCallIndex++;
1184
1424
  sendSSE(makeChunk({
1185
1425
  tool_calls: [{
@@ -1199,6 +1439,11 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
1199
1439
  blobStore,
1200
1440
  mcpTools,
1201
1441
  pendingExecs: state.pendingExecs,
1442
+ modelId,
1443
+ metadata: {
1444
+ ...metadata,
1445
+ assistantSeedText,
1446
+ },
1202
1447
  });
1203
1448
  sendSSE(makeChunk({}, "tool_calls"));
1204
1449
  sendDone();
@@ -1215,10 +1460,16 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
1215
1460
  // Skip unparseable messages
1216
1461
  }
1217
1462
  }, (endStreamBytes) => {
1218
- const endError = parseConnectEndStream(endStreamBytes);
1219
- if (endError) {
1220
- sendSSE(makeChunk({ content: `\n[Error: ${endError.message}]` }));
1463
+ endStreamError = parseConnectEndStream(endStreamBytes);
1464
+ if (endStreamError) {
1465
+ logPluginError("Cursor stream returned Connect end-stream error", {
1466
+ modelId,
1467
+ bridgeKey,
1468
+ convKey,
1469
+ ...errorDetails(endStreamError),
1470
+ });
1221
1471
  }
1472
+ scheduleBridgeEnd(bridge);
1222
1473
  });
1223
1474
  bridge.onData(processChunk);
1224
1475
  bridge.onClose((code) => {
@@ -1229,27 +1480,39 @@ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools,
1229
1480
  stored.blobStore.set(k, v);
1230
1481
  stored.lastAccessMs = Date.now();
1231
1482
  }
1483
+ if (endStreamError) {
1484
+ activeBridges.delete(bridgeKey);
1485
+ if (!closed) {
1486
+ closed = true;
1487
+ controller.error(endStreamError);
1488
+ }
1489
+ return;
1490
+ }
1232
1491
  if (!mcpExecReceived) {
1233
1492
  const flushed = tagFilter.flush();
1234
1493
  if (flushed.reasoning)
1235
1494
  sendSSE(makeChunk({ reasoning_content: flushed.reasoning }));
1236
- if (flushed.content)
1495
+ if (flushed.content) {
1496
+ assistantText += flushed.content;
1237
1497
  sendSSE(makeChunk({ content: flushed.content }));
1498
+ }
1499
+ updateStoredConversationAfterCompletion(convKey, metadata, assistantText);
1238
1500
  sendSSE(makeChunk({}, "stop"));
1239
1501
  sendSSE(makeUsageChunk());
1240
1502
  sendDone();
1241
1503
  closeController();
1242
1504
  }
1243
- else if (code !== 0) {
1244
- // Bridge died while tool calls are pending (timeout, crash, etc.).
1245
- // Close the SSE stream so the client doesn't hang forever.
1246
- sendSSE(makeChunk({ content: "\n[Error: bridge connection lost]" }));
1247
- sendSSE(makeChunk({}, "stop"));
1248
- sendSSE(makeUsageChunk());
1249
- sendDone();
1250
- closeController();
1251
- // Remove stale entry so the next request doesn't try to resume it.
1505
+ else {
1252
1506
  activeBridges.delete(bridgeKey);
1507
+ if (code !== 0 && !closed) {
1508
+ // Bridge died while tool calls are pending (timeout, crash, etc.).
1509
+ // Close the SSE stream so the client doesn't hang forever.
1510
+ sendSSE(makeChunk({ content: "\n[Error: bridge connection lost]" }));
1511
+ sendSSE(makeChunk({}, "stop"));
1512
+ sendSSE(makeUsageChunk());
1513
+ sendDone();
1514
+ closeController();
1515
+ }
1253
1516
  }
1254
1517
  });
1255
1518
  },
@@ -1267,13 +1530,20 @@ async function startBridge(accessToken, requestBytes) {
1267
1530
  const heartbeatTimer = setInterval(() => bridge.write(makeHeartbeatBytes()), 5_000);
1268
1531
  return { bridge, heartbeatTimer };
1269
1532
  }
1270
- async function handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey) {
1533
+ async function handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey, metadata) {
1271
1534
  const { bridge, heartbeatTimer } = await startBridge(accessToken, payload.requestBytes);
1272
- return createBridgeStreamResponse(bridge, heartbeatTimer, payload.blobStore, payload.mcpTools, modelId, bridgeKey, convKey);
1535
+ return createBridgeStreamResponse(bridge, heartbeatTimer, payload.blobStore, payload.mcpTools, modelId, bridgeKey, convKey, metadata);
1273
1536
  }
1274
1537
  /** Resume a paused bridge by sending MCP results and continuing to stream. */
1275
- function handleToolResultResume(active, toolResults, modelId, bridgeKey, convKey) {
1276
- const { bridge, heartbeatTimer, blobStore, mcpTools, pendingExecs } = active;
1538
+ function handleToolResultResume(active, toolResults, bridgeKey, convKey) {
1539
+ const { bridge, heartbeatTimer, blobStore, mcpTools, pendingExecs, modelId, metadata } = active;
1540
+ const resumeMetadata = {
1541
+ ...metadata,
1542
+ assistantSeedText: [
1543
+ metadata.assistantSeedText?.trim() ?? "",
1544
+ toolResults.map(formatToolResultSummary).join("\n\n"),
1545
+ ].filter(Boolean).join("\n\n"),
1546
+ };
1277
1547
  // Send mcpResult for each pending exec that has a matching tool result
1278
1548
  for (const exec of pendingExecs) {
1279
1549
  const result = toolResults.find((r) => r.toolCallId === exec.toolCallId);
@@ -1313,12 +1583,15 @@ function handleToolResultResume(active, toolResults, modelId, bridgeKey, convKey
1313
1583
  });
1314
1584
  bridge.write(toBinary(AgentClientMessageSchema, clientMessage));
1315
1585
  }
1316
- return createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey);
1586
+ return createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, mcpTools, modelId, bridgeKey, convKey, resumeMetadata);
1317
1587
  }
1318
- async function handleNonStreamingResponse(payload, accessToken, modelId, convKey) {
1588
+ async function handleNonStreamingResponse(payload, accessToken, modelId, convKey, metadata) {
1319
1589
  const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
1320
1590
  const created = Math.floor(Date.now() / 1000);
1321
- const { text, usage } = await collectFullResponse(payload, accessToken, convKey);
1591
+ const { text, usage, finishReason, toolCalls } = await collectFullResponse(payload, accessToken, modelId, convKey, metadata);
1592
+ const message = finishReason === "tool_calls"
1593
+ ? { role: "assistant", content: null, tool_calls: toolCalls }
1594
+ : { role: "assistant", content: text };
1322
1595
  return new Response(JSON.stringify({
1323
1596
  id: completionId,
1324
1597
  object: "chat.completion",
@@ -1327,16 +1600,18 @@ async function handleNonStreamingResponse(payload, accessToken, modelId, convKey
1327
1600
  choices: [
1328
1601
  {
1329
1602
  index: 0,
1330
- message: { role: "assistant", content: text },
1331
- finish_reason: "stop",
1603
+ message,
1604
+ finish_reason: finishReason,
1332
1605
  },
1333
1606
  ],
1334
1607
  usage,
1335
1608
  }), { headers: { "Content-Type": "application/json" } });
1336
1609
  }
1337
- async function collectFullResponse(payload, accessToken, convKey) {
1338
- const { promise, resolve } = Promise.withResolvers();
1610
+ async function collectFullResponse(payload, accessToken, modelId, convKey, metadata) {
1611
+ const { promise, resolve, reject } = Promise.withResolvers();
1339
1612
  let fullText = "";
1613
+ let endStreamError = null;
1614
+ const pendingToolCalls = [];
1340
1615
  const { bridge, heartbeatTimer } = await startBridge(accessToken, payload.requestBytes);
1341
1616
  const state = {
1342
1617
  toolCallIndex: 0,
@@ -1353,7 +1628,17 @@ async function collectFullResponse(payload, accessToken, convKey) {
1353
1628
  return;
1354
1629
  const { content } = tagFilter.process(text);
1355
1630
  fullText += content;
1356
- }, () => { }, (checkpointBytes) => {
1631
+ }, (exec) => {
1632
+ pendingToolCalls.push({
1633
+ id: exec.toolCallId,
1634
+ type: "function",
1635
+ function: {
1636
+ name: exec.toolName,
1637
+ arguments: exec.decodedArgs,
1638
+ },
1639
+ });
1640
+ scheduleBridgeEnd(bridge);
1641
+ }, (checkpointBytes) => {
1357
1642
  const stored = conversationStates.get(convKey);
1358
1643
  if (stored) {
1359
1644
  stored.checkpoint = checkpointBytes;
@@ -1364,7 +1649,17 @@ async function collectFullResponse(payload, accessToken, convKey) {
1364
1649
  catch {
1365
1650
  // Skip
1366
1651
  }
1367
- }, () => { }));
1652
+ }, (endStreamBytes) => {
1653
+ endStreamError = parseConnectEndStream(endStreamBytes);
1654
+ if (endStreamError) {
1655
+ logPluginError("Cursor non-streaming response returned Connect end-stream error", {
1656
+ modelId,
1657
+ convKey,
1658
+ ...errorDetails(endStreamError),
1659
+ });
1660
+ }
1661
+ scheduleBridgeEnd(bridge);
1662
+ }));
1368
1663
  bridge.onClose(() => {
1369
1664
  clearInterval(heartbeatTimer);
1370
1665
  const stored = conversationStates.get(convKey);
@@ -1375,10 +1670,19 @@ async function collectFullResponse(payload, accessToken, convKey) {
1375
1670
  }
1376
1671
  const flushed = tagFilter.flush();
1377
1672
  fullText += flushed.content;
1673
+ if (endStreamError) {
1674
+ reject(endStreamError);
1675
+ return;
1676
+ }
1677
+ if (pendingToolCalls.length === 0) {
1678
+ updateStoredConversationAfterCompletion(convKey, metadata, fullText);
1679
+ }
1378
1680
  const usage = computeUsage(state);
1379
1681
  resolve({
1380
1682
  text: fullText,
1381
1683
  usage,
1684
+ finishReason: pendingToolCalls.length > 0 ? "tool_calls" : "stop",
1685
+ toolCalls: pendingToolCalls,
1382
1686
  });
1383
1687
  });
1384
1688
  return promise;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@playwo/opencode-cursor-oauth",
3
- "version": "0.0.0-dev.de8f891a2e99",
3
+ "version": "0.0.0-dev.e3644b4a140d",
4
4
  "description": "OpenCode plugin that connects Cursor's API to OpenCode via OAuth, model discovery, and a local OpenAI-compatible proxy.",
5
5
  "license": "MIT",
6
6
  "type": "module",