@playwo/opencode-cursor-oauth 0.0.0-dev.c80ebcb27754 → 0.0.0-dev.da5538092563

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/README.md +32 -83
  2. package/dist/auth.js +27 -3
  3. package/dist/constants.d.ts +2 -0
  4. package/dist/constants.js +2 -0
  5. package/dist/cursor/bidi-session.d.ts +12 -0
  6. package/dist/cursor/bidi-session.js +164 -0
  7. package/dist/cursor/config.d.ts +4 -0
  8. package/dist/cursor/config.js +4 -0
  9. package/dist/cursor/connect-framing.d.ts +10 -0
  10. package/dist/cursor/connect-framing.js +80 -0
  11. package/dist/cursor/headers.d.ts +6 -0
  12. package/dist/cursor/headers.js +16 -0
  13. package/dist/cursor/index.d.ts +5 -0
  14. package/dist/cursor/index.js +5 -0
  15. package/dist/cursor/unary-rpc.d.ts +12 -0
  16. package/dist/cursor/unary-rpc.js +124 -0
  17. package/dist/index.d.ts +2 -14
  18. package/dist/index.js +2 -229
  19. package/dist/logger.d.ts +7 -0
  20. package/dist/logger.js +150 -0
  21. package/dist/models.d.ts +3 -0
  22. package/dist/models.js +80 -54
  23. package/dist/openai/index.d.ts +3 -0
  24. package/dist/openai/index.js +3 -0
  25. package/dist/openai/messages.d.ts +39 -0
  26. package/dist/openai/messages.js +228 -0
  27. package/dist/openai/tools.d.ts +7 -0
  28. package/dist/openai/tools.js +58 -0
  29. package/dist/openai/types.d.ts +41 -0
  30. package/dist/openai/types.js +1 -0
  31. package/dist/plugin/cursor-auth-plugin.d.ts +3 -0
  32. package/dist/plugin/cursor-auth-plugin.js +139 -0
  33. package/dist/proto/agent_pb.js +637 -319
  34. package/dist/provider/index.d.ts +2 -0
  35. package/dist/provider/index.js +2 -0
  36. package/dist/provider/model-cost.d.ts +9 -0
  37. package/dist/provider/model-cost.js +206 -0
  38. package/dist/provider/models.d.ts +8 -0
  39. package/dist/provider/models.js +86 -0
  40. package/dist/proxy/bridge-close-controller.d.ts +6 -0
  41. package/dist/proxy/bridge-close-controller.js +37 -0
  42. package/dist/proxy/bridge-non-streaming.d.ts +3 -0
  43. package/dist/proxy/bridge-non-streaming.js +123 -0
  44. package/dist/proxy/bridge-session.d.ts +5 -0
  45. package/dist/proxy/bridge-session.js +11 -0
  46. package/dist/proxy/bridge-streaming.d.ts +5 -0
  47. package/dist/proxy/bridge-streaming.js +409 -0
  48. package/dist/proxy/bridge.d.ts +3 -0
  49. package/dist/proxy/bridge.js +3 -0
  50. package/dist/proxy/chat-completion.d.ts +2 -0
  51. package/dist/proxy/chat-completion.js +153 -0
  52. package/dist/proxy/conversation-meta.d.ts +12 -0
  53. package/dist/proxy/conversation-meta.js +1 -0
  54. package/dist/proxy/conversation-state.d.ts +35 -0
  55. package/dist/proxy/conversation-state.js +95 -0
  56. package/dist/proxy/cursor-request.d.ts +6 -0
  57. package/dist/proxy/cursor-request.js +101 -0
  58. package/dist/proxy/index.d.ts +12 -0
  59. package/dist/proxy/index.js +12 -0
  60. package/dist/proxy/server.d.ts +6 -0
  61. package/dist/proxy/server.js +107 -0
  62. package/dist/proxy/sse.d.ts +5 -0
  63. package/dist/proxy/sse.js +5 -0
  64. package/dist/proxy/state-sync.d.ts +2 -0
  65. package/dist/proxy/state-sync.js +17 -0
  66. package/dist/proxy/stream-dispatch.d.ts +42 -0
  67. package/dist/proxy/stream-dispatch.js +641 -0
  68. package/dist/proxy/stream-state.d.ts +7 -0
  69. package/dist/proxy/stream-state.js +1 -0
  70. package/dist/proxy/title.d.ts +1 -0
  71. package/dist/proxy/title.js +103 -0
  72. package/dist/proxy/types.d.ts +32 -0
  73. package/dist/proxy/types.js +1 -0
  74. package/dist/proxy.d.ts +2 -19
  75. package/dist/proxy.js +2 -1221
  76. package/package.json +1 -2
@@ -0,0 +1,409 @@
1
+ import { create, fromBinary, toBinary } from "@bufbuild/protobuf";
2
+ import { AgentClientMessageSchema, AgentServerMessageSchema, ExecClientMessageSchema, McpErrorSchema, McpResultSchema, McpSuccessSchema, McpTextContentSchema, McpToolResultContentItemSchema, } from "../proto/agent_pb";
3
+ import { errorDetails, logPluginError, logPluginInfo, logPluginWarn, } from "../logger";
4
+ import { formatToolCallSummary, formatToolResultSummary, } from "../openai/messages";
5
+ import { activeBridges, updateStoredConversationAfterCompletion, } from "./conversation-state";
6
+ import { startBridge } from "./bridge-session";
7
+ import { updateConversationCheckpoint, syncStoredBlobStore, } from "./state-sync";
8
+ import { SSE_HEADERS } from "./sse";
9
+ import { computeUsage, createConnectFrameParser, createThinkingTagFilter, parseConnectEndStream, processServerMessage, scheduleBridgeEnd, } from "./stream-dispatch";
10
+ import { createBridgeCloseController } from "./bridge-close-controller";
11
+ const SSE_KEEPALIVE_INTERVAL_MS = 15_000;
12
+ function createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, cloudRule, mcpTools, modelId, bridgeKey, convKey, metadata) {
13
+ const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
14
+ const created = Math.floor(Date.now() / 1000);
15
+ let keepaliveTimer;
16
+ const bridgeCloseController = createBridgeCloseController(bridge);
17
+ const stopKeepalive = () => {
18
+ if (!keepaliveTimer)
19
+ return;
20
+ clearInterval(keepaliveTimer);
21
+ keepaliveTimer = undefined;
22
+ };
23
+ const stream = new ReadableStream({
24
+ start(controller) {
25
+ const encoder = new TextEncoder();
26
+ let closed = false;
27
+ const state = {
28
+ toolCallIndex: 0,
29
+ pendingExecs: [],
30
+ outputTokens: 0,
31
+ totalTokens: 0,
32
+ };
33
+ const tagFilter = createThinkingTagFilter();
34
+ let assistantText = metadata.assistantSeedText ?? "";
35
+ let mcpExecReceived = false;
36
+ let endStreamError = null;
37
+ const sendSSE = (data) => {
38
+ if (closed)
39
+ return;
40
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(data)}\n\n`));
41
+ };
42
+ const sendKeepalive = () => {
43
+ if (closed)
44
+ return;
45
+ controller.enqueue(encoder.encode(": keep-alive\n\n"));
46
+ };
47
+ const sendDone = () => {
48
+ if (closed)
49
+ return;
50
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
51
+ };
52
+ const failStream = (message, code) => {
53
+ if (closed)
54
+ return;
55
+ sendSSE({
56
+ error: {
57
+ message,
58
+ type: "server_error",
59
+ ...(code ? { code } : {}),
60
+ },
61
+ });
62
+ sendDone();
63
+ closeController();
64
+ };
65
+ const closeController = () => {
66
+ if (closed)
67
+ return;
68
+ closed = true;
69
+ stopKeepalive();
70
+ controller.close();
71
+ };
72
+ const makeChunk = (delta, finishReason = null) => ({
73
+ id: completionId,
74
+ object: "chat.completion.chunk",
75
+ created,
76
+ model: modelId,
77
+ choices: [{ index: 0, delta, finish_reason: finishReason }],
78
+ });
79
+ const makeUsageChunk = () => {
80
+ const { prompt_tokens, completion_tokens, total_tokens } = computeUsage(state);
81
+ return {
82
+ id: completionId,
83
+ object: "chat.completion.chunk",
84
+ created,
85
+ model: modelId,
86
+ choices: [],
87
+ usage: { prompt_tokens, completion_tokens, total_tokens },
88
+ };
89
+ };
90
+ const processChunk = createConnectFrameParser((messageBytes) => {
91
+ try {
92
+ const serverMessage = fromBinary(AgentServerMessageSchema, messageBytes);
93
+ processServerMessage(serverMessage, blobStore, cloudRule, mcpTools, (data) => bridge.write(data), state, (text, isThinking) => {
94
+ if (isThinking) {
95
+ sendSSE(makeChunk({ reasoning_content: text }));
96
+ return;
97
+ }
98
+ const { content, reasoning } = tagFilter.process(text);
99
+ if (reasoning)
100
+ sendSSE(makeChunk({ reasoning_content: reasoning }));
101
+ if (content) {
102
+ assistantText += content;
103
+ sendSSE(makeChunk({ content }));
104
+ }
105
+ }, (exec) => {
106
+ const existingIndex = state.pendingExecs.findIndex((candidate) => candidate.toolCallId === exec.toolCallId);
107
+ if (existingIndex >= 0) {
108
+ state.pendingExecs[existingIndex] = exec;
109
+ }
110
+ else {
111
+ state.pendingExecs.push(exec);
112
+ }
113
+ mcpExecReceived = true;
114
+ const flushed = tagFilter.flush();
115
+ if (flushed.reasoning)
116
+ sendSSE(makeChunk({ reasoning_content: flushed.reasoning }));
117
+ if (flushed.content) {
118
+ assistantText += flushed.content;
119
+ sendSSE(makeChunk({ content: flushed.content }));
120
+ }
121
+ const assistantSeedText = [
122
+ assistantText.trim(),
123
+ formatToolCallSummary({
124
+ id: exec.toolCallId,
125
+ type: "function",
126
+ function: {
127
+ name: exec.toolName,
128
+ arguments: exec.decodedArgs,
129
+ },
130
+ }),
131
+ ]
132
+ .filter(Boolean)
133
+ .join("\n\n");
134
+ sendSSE(makeChunk({
135
+ tool_calls: [
136
+ {
137
+ index: state.toolCallIndex++,
138
+ id: exec.toolCallId,
139
+ type: "function",
140
+ function: {
141
+ name: exec.toolName,
142
+ arguments: exec.decodedArgs,
143
+ },
144
+ },
145
+ ],
146
+ }));
147
+ activeBridges.set(bridgeKey, {
148
+ bridge,
149
+ heartbeatTimer,
150
+ blobStore,
151
+ cloudRule,
152
+ mcpTools,
153
+ pendingExecs: state.pendingExecs,
154
+ modelId,
155
+ metadata: {
156
+ ...metadata,
157
+ assistantSeedText,
158
+ },
159
+ });
160
+ sendSSE(makeChunk({}, "tool_calls"));
161
+ sendDone();
162
+ closeController();
163
+ }, (checkpointBytes) => {
164
+ updateConversationCheckpoint(convKey, checkpointBytes);
165
+ bridgeCloseController.noteCheckpoint();
166
+ }, () => bridgeCloseController.noteTurnEnded(), (info) => {
167
+ endStreamError = new Error(`Cursor returned unsupported ${info.category}: ${info.caseName}${info.detail ? ` (${info.detail})` : ""}`);
168
+ logPluginError("Closing Cursor bridge after unsupported message", {
169
+ modelId,
170
+ bridgeKey,
171
+ convKey,
172
+ category: info.category,
173
+ caseName: info.caseName,
174
+ detail: info.detail,
175
+ });
176
+ scheduleBridgeEnd(bridge);
177
+ }, (info) => {
178
+ endStreamError = new Error(`Cursor requested unsupported exec type: ${info.execCase}`);
179
+ logPluginError("Closing Cursor bridge after unsupported exec", {
180
+ modelId,
181
+ bridgeKey,
182
+ convKey,
183
+ execCase: info.execCase,
184
+ execId: info.execId,
185
+ execMsgId: info.execMsgId,
186
+ });
187
+ scheduleBridgeEnd(bridge);
188
+ });
189
+ }
190
+ catch {
191
+ // Skip unparseable messages.
192
+ }
193
+ }, (endStreamBytes) => {
194
+ endStreamError = parseConnectEndStream(endStreamBytes);
195
+ if (endStreamError) {
196
+ logPluginError("Cursor stream returned Connect end-stream error", {
197
+ modelId,
198
+ bridgeKey,
199
+ convKey,
200
+ ...errorDetails(endStreamError),
201
+ });
202
+ }
203
+ scheduleBridgeEnd(bridge);
204
+ });
205
+ keepaliveTimer = setInterval(() => {
206
+ try {
207
+ sendKeepalive();
208
+ }
209
+ catch (error) {
210
+ logPluginWarn("Failed to write SSE keepalive", {
211
+ modelId,
212
+ bridgeKey,
213
+ convKey,
214
+ ...errorDetails(error),
215
+ });
216
+ stopKeepalive();
217
+ }
218
+ }, SSE_KEEPALIVE_INTERVAL_MS);
219
+ logPluginInfo("Opened Cursor streaming bridge", {
220
+ modelId,
221
+ bridgeKey,
222
+ convKey,
223
+ mcpToolCount: mcpTools.length,
224
+ hasCloudRule: Boolean(cloudRule),
225
+ });
226
+ bridge.onData(processChunk);
227
+ bridge.onClose((code) => {
228
+ logPluginInfo("Cursor streaming bridge closed", {
229
+ modelId,
230
+ bridgeKey,
231
+ convKey,
232
+ code,
233
+ mcpExecReceived,
234
+ hadEndStreamError: Boolean(endStreamError),
235
+ });
236
+ bridgeCloseController.dispose();
237
+ clearInterval(heartbeatTimer);
238
+ stopKeepalive();
239
+ syncStoredBlobStore(convKey, blobStore);
240
+ if (endStreamError) {
241
+ activeBridges.delete(bridgeKey);
242
+ failStream(endStreamError.message, "cursor_bridge_closed");
243
+ return;
244
+ }
245
+ if (!mcpExecReceived) {
246
+ const flushed = tagFilter.flush();
247
+ if (flushed.reasoning)
248
+ sendSSE(makeChunk({ reasoning_content: flushed.reasoning }));
249
+ if (flushed.content) {
250
+ assistantText += flushed.content;
251
+ sendSSE(makeChunk({ content: flushed.content }));
252
+ }
253
+ updateStoredConversationAfterCompletion(convKey, metadata, assistantText);
254
+ sendSSE(makeChunk({}, "stop"));
255
+ sendSSE(makeUsageChunk());
256
+ sendDone();
257
+ closeController();
258
+ return;
259
+ }
260
+ activeBridges.delete(bridgeKey);
261
+ if (code !== 0 && !closed) {
262
+ failStream("Cursor bridge connection lost", "cursor_bridge_closed");
263
+ }
264
+ });
265
+ },
266
+ cancel(reason) {
267
+ bridgeCloseController.dispose();
268
+ stopKeepalive();
269
+ clearInterval(heartbeatTimer);
270
+ syncStoredBlobStore(convKey, blobStore);
271
+ const active = activeBridges.get(bridgeKey);
272
+ if (active?.bridge === bridge) {
273
+ activeBridges.delete(bridgeKey);
274
+ }
275
+ logPluginWarn("OpenCode client disconnected from Cursor SSE stream", {
276
+ modelId,
277
+ bridgeKey,
278
+ convKey,
279
+ reason: reason instanceof Error ? reason.message : String(reason ?? ""),
280
+ });
281
+ bridge.end();
282
+ },
283
+ });
284
+ return new Response(stream, { headers: SSE_HEADERS });
285
+ }
286
+ export async function handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey, metadata) {
287
+ logPluginInfo("Starting Cursor streaming response", {
288
+ modelId,
289
+ bridgeKey,
290
+ convKey,
291
+ mcpToolCount: payload.mcpTools.length,
292
+ });
293
+ const { bridge, heartbeatTimer } = await startBridge(accessToken, payload.requestBytes);
294
+ return createBridgeStreamResponse(bridge, heartbeatTimer, payload.blobStore, payload.cloudRule, payload.mcpTools, modelId, bridgeKey, convKey, metadata);
295
+ }
296
+ async function waitForResolvablePendingExecs(active, toolResults, timeoutMs = 2_000) {
297
+ const pendingToolCallIds = new Set(toolResults.map((result) => result.toolCallId));
298
+ const deadline = Date.now() + timeoutMs;
299
+ while (Date.now() < deadline) {
300
+ const unresolved = active.pendingExecs.filter((exec) => pendingToolCallIds.has(exec.toolCallId) && exec.execMsgId === 0);
301
+ if (unresolved.length === 0) {
302
+ return unresolved;
303
+ }
304
+ await new Promise((resolve) => setTimeout(resolve, 25));
305
+ }
306
+ const unresolved = active.pendingExecs.filter((exec) => pendingToolCallIds.has(exec.toolCallId) && exec.execMsgId === 0);
307
+ if (unresolved.length > 0) {
308
+ logPluginWarn("Cursor exec metadata did not arrive before tool-result resume", {
309
+ bridgeToolCallIds: unresolved.map((exec) => exec.toolCallId),
310
+ modelId: active.modelId,
311
+ });
312
+ }
313
+ return unresolved;
314
+ }
315
+ export async function handleToolResultResume(active, toolResults, bridgeKey, convKey) {
316
+ const { bridge, heartbeatTimer, blobStore, cloudRule, mcpTools, pendingExecs, modelId, metadata, } = active;
317
+ const resumeMetadata = {
318
+ ...metadata,
319
+ assistantSeedText: [
320
+ metadata.assistantSeedText?.trim() ?? "",
321
+ toolResults.map(formatToolResultSummary).join("\n\n"),
322
+ ]
323
+ .filter(Boolean)
324
+ .join("\n\n"),
325
+ };
326
+ logPluginInfo("Preparing Cursor tool-result resume", {
327
+ bridgeKey,
328
+ convKey,
329
+ modelId,
330
+ toolResults,
331
+ pendingExecs,
332
+ });
333
+ const unresolved = await waitForResolvablePendingExecs(active, toolResults);
334
+ logPluginInfo("Resolved pending exec state before Cursor tool-result resume", {
335
+ bridgeKey,
336
+ convKey,
337
+ modelId,
338
+ toolResults,
339
+ pendingExecs,
340
+ unresolvedPendingExecs: unresolved,
341
+ });
342
+ if (unresolved.length > 0) {
343
+ clearInterval(heartbeatTimer);
344
+ bridge.end();
345
+ return new Response(JSON.stringify({
346
+ error: {
347
+ message: "Cursor requested a tool call but never provided resumable exec metadata. Aborting instead of retrying with synthetic ids.",
348
+ type: "invalid_request_error",
349
+ code: "cursor_missing_exec_metadata",
350
+ },
351
+ }), { status: 400, headers: { "Content-Type": "application/json" } });
352
+ }
353
+ for (const exec of pendingExecs) {
354
+ const result = toolResults.find((toolResult) => toolResult.toolCallId === exec.toolCallId);
355
+ const mcpResult = result
356
+ ? create(McpResultSchema, {
357
+ result: {
358
+ case: "success",
359
+ value: create(McpSuccessSchema, {
360
+ content: [
361
+ create(McpToolResultContentItemSchema, {
362
+ content: {
363
+ case: "text",
364
+ value: create(McpTextContentSchema, {
365
+ text: result.content,
366
+ }),
367
+ },
368
+ }),
369
+ ],
370
+ isError: false,
371
+ }),
372
+ },
373
+ })
374
+ : create(McpResultSchema, {
375
+ result: {
376
+ case: "error",
377
+ value: create(McpErrorSchema, {
378
+ error: "Tool result not provided",
379
+ }),
380
+ },
381
+ });
382
+ const execClientMessage = create(ExecClientMessageSchema, {
383
+ id: exec.execMsgId,
384
+ execId: exec.execId,
385
+ message: {
386
+ case: "mcpResult",
387
+ value: mcpResult,
388
+ },
389
+ });
390
+ const clientMessage = create(AgentClientMessageSchema, {
391
+ message: { case: "execClientMessage", value: execClientMessage },
392
+ });
393
+ logPluginInfo("Sending Cursor tool-result resume message", {
394
+ bridgeKey,
395
+ convKey,
396
+ modelId,
397
+ toolCallId: exec.toolCallId,
398
+ toolName: exec.toolName,
399
+ source: exec.source,
400
+ execId: exec.execId,
401
+ execMsgId: exec.execMsgId,
402
+ cursorCallId: exec.cursorCallId,
403
+ modelCallId: exec.modelCallId,
404
+ matchedToolResult: result,
405
+ });
406
+ bridge.write(toBinary(AgentClientMessageSchema, clientMessage));
407
+ }
408
+ return createBridgeStreamResponse(bridge, heartbeatTimer, blobStore, cloudRule, mcpTools, modelId, bridgeKey, convKey, resumeMetadata);
409
+ }
@@ -0,0 +1,3 @@
1
+ export { startBridge } from "./bridge-session";
2
+ export { handleStreamingResponse, handleToolResultResume, } from "./bridge-streaming";
3
+ export { handleNonStreamingResponse } from "./bridge-non-streaming";
@@ -0,0 +1,3 @@
1
+ export { startBridge } from "./bridge-session";
2
+ export { handleStreamingResponse, handleToolResultResume, } from "./bridge-streaming";
3
+ export { handleNonStreamingResponse } from "./bridge-non-streaming";
@@ -0,0 +1,2 @@
1
+ import type { ChatCompletionRequest, ChatRequestContext } from "../openai/types";
2
+ export declare function handleChatCompletion(body: ChatCompletionRequest, accessToken: string, context?: ChatRequestContext): Response | Promise<Response>;
@@ -0,0 +1,153 @@
1
+ import { logPluginInfo, logPluginWarn } from "../logger";
2
+ import { buildInitialHandoffPrompt, buildTitleSourceText, buildToolResumePrompt, detectTitleRequest, parseMessages, } from "../openai/messages";
3
+ import { buildMcpToolDefinitions, selectToolsForChoice } from "../openai/tools";
4
+ import { activeBridges, conversationStates, createStoredConversation, deriveBridgeKey, deriveConversationKey, evictStaleConversations, hashString, normalizeAgentKey, resetStoredConversation, } from "./conversation-state";
5
+ import { buildCursorRequest } from "./cursor-request";
6
+ import { handleNonStreamingResponse, handleStreamingResponse, handleToolResultResume, } from "./bridge";
7
+ import { handleTitleGenerationRequest } from "./title";
8
+ export function handleChatCompletion(body, accessToken, context = {}) {
9
+ const parsed = parseMessages(body.messages);
10
+ const { systemPrompt, userText, turns, toolResults, pendingAssistantSummary, completedTurnsFingerprint, } = parsed;
11
+ const modelId = body.model;
12
+ const normalizedAgentKey = normalizeAgentKey(context.agentKey);
13
+ logPluginInfo("Handling Cursor chat completion request", {
14
+ modelId,
15
+ stream: body.stream !== false,
16
+ messageCount: body.messages.length,
17
+ toolCount: body.tools?.length ?? 0,
18
+ toolChoice: body.tool_choice,
19
+ sessionId: context.sessionId,
20
+ agentKey: normalizedAgentKey,
21
+ parsedUserText: userText,
22
+ parsedToolResults: toolResults,
23
+ hasPendingAssistantSummary: pendingAssistantSummary.trim().length > 0,
24
+ turnCount: turns.length,
25
+ });
26
+ const titleDetection = detectTitleRequest(body);
27
+ const isTitleAgent = titleDetection.matched;
28
+ if (isTitleAgent) {
29
+ const titleSourceText = buildTitleSourceText(userText, turns, pendingAssistantSummary, toolResults);
30
+ if (!titleSourceText) {
31
+ return new Response(JSON.stringify({
32
+ error: {
33
+ message: "No title source text found",
34
+ type: "invalid_request_error",
35
+ },
36
+ }), { status: 400, headers: { "Content-Type": "application/json" } });
37
+ }
38
+ return handleTitleGenerationRequest(titleSourceText, accessToken, modelId, body.stream !== false);
39
+ }
40
+ const tools = selectToolsForChoice(body.tools ?? [], body.tool_choice);
41
+ if (!userText && toolResults.length === 0) {
42
+ return new Response(JSON.stringify({
43
+ error: {
44
+ message: "No user message found",
45
+ type: "invalid_request_error",
46
+ },
47
+ }), { status: 400, headers: { "Content-Type": "application/json" } });
48
+ }
49
+ // bridgeKey: session/agent-scoped, for active tool-call bridges
50
+ // convKey: model-independent, for conversation state that survives model switches
51
+ const bridgeKey = deriveBridgeKey(modelId, body.messages, context.sessionId, context.agentKey);
52
+ const convKey = deriveConversationKey(body.messages, context.sessionId, context.agentKey);
53
+ const activeBridge = activeBridges.get(bridgeKey);
54
+ logPluginInfo("Resolved Cursor conversation keys", {
55
+ modelId,
56
+ bridgeKey,
57
+ convKey,
58
+ hasActiveBridge: Boolean(activeBridge),
59
+ sessionId: context.sessionId,
60
+ agentKey: normalizedAgentKey,
61
+ });
62
+ if (activeBridge && toolResults.length > 0) {
63
+ logPluginInfo("Matched OpenAI tool results to active Cursor bridge", {
64
+ bridgeKey,
65
+ convKey,
66
+ requestedModelId: modelId,
67
+ activeBridgeModelId: activeBridge.modelId,
68
+ toolResults,
69
+ pendingExecs: activeBridge.pendingExecs,
70
+ });
71
+ activeBridges.delete(bridgeKey);
72
+ if (activeBridge.bridge.alive) {
73
+ if (activeBridge.modelId !== modelId) {
74
+ logPluginWarn("Resuming pending Cursor tool call on original model after model switch", {
75
+ requestedModelId: modelId,
76
+ resumedModelId: activeBridge.modelId,
77
+ convKey,
78
+ bridgeKey,
79
+ });
80
+ }
81
+ // Resume the live bridge with tool results
82
+ return handleToolResultResume(activeBridge, toolResults, bridgeKey, convKey);
83
+ }
84
+ // Bridge died (timeout, server disconnect, etc.).
85
+ // Clean up and fall through to start a fresh bridge.
86
+ clearInterval(activeBridge.heartbeatTimer);
87
+ activeBridge.bridge.end();
88
+ }
89
+ // Clean up stale bridge if present
90
+ if (activeBridge && activeBridges.has(bridgeKey)) {
91
+ clearInterval(activeBridge.heartbeatTimer);
92
+ activeBridge.bridge.end();
93
+ activeBridges.delete(bridgeKey);
94
+ }
95
+ let stored = conversationStates.get(convKey);
96
+ if (!stored) {
97
+ stored = createStoredConversation();
98
+ conversationStates.set(convKey, stored);
99
+ }
100
+ const systemPromptHash = hashString(systemPrompt);
101
+ if (stored.checkpoint &&
102
+ (stored.systemPromptHash !== systemPromptHash ||
103
+ (turns.length > 0 &&
104
+ stored.completedTurnsFingerprint !== completedTurnsFingerprint))) {
105
+ resetStoredConversation(stored);
106
+ }
107
+ stored.systemPromptHash = systemPromptHash;
108
+ stored.completedTurnsFingerprint = completedTurnsFingerprint;
109
+ stored.lastAccessMs = Date.now();
110
+ evictStaleConversations();
111
+ // Build the request. When tool results are present but the bridge died,
112
+ // we must still include the last user text so Cursor has context.
113
+ const mcpTools = buildMcpToolDefinitions(tools);
114
+ const hasPendingAssistantSummary = pendingAssistantSummary.trim().length > 0;
115
+ const needsInitialHandoff = !stored.checkpoint &&
116
+ (turns.length > 0 || hasPendingAssistantSummary || toolResults.length > 0);
117
+ const replayTurns = needsInitialHandoff ? [] : turns;
118
+ let effectiveUserText = needsInitialHandoff
119
+ ? buildInitialHandoffPrompt(userText, turns, pendingAssistantSummary, toolResults)
120
+ : toolResults.length > 0 || hasPendingAssistantSummary
121
+ ? buildToolResumePrompt(userText, pendingAssistantSummary, toolResults)
122
+ : userText;
123
+ const payload = buildCursorRequest(modelId, systemPrompt, effectiveUserText, replayTurns, stored.conversationId, stored.checkpoint, stored.blobStore);
124
+ payload.mcpTools = mcpTools;
125
+ logPluginInfo("Built Cursor run request payload", {
126
+ modelId,
127
+ bridgeKey,
128
+ convKey,
129
+ mcpToolCount: mcpTools.length,
130
+ conversationId: stored.conversationId,
131
+ hasCheckpoint: Boolean(stored.checkpoint),
132
+ replayTurnCount: replayTurns.length,
133
+ effectiveUserText,
134
+ });
135
+ if (body.stream === false) {
136
+ return handleNonStreamingResponse(payload, accessToken, modelId, convKey, {
137
+ systemPrompt,
138
+ systemPromptHash,
139
+ completedTurnsFingerprint,
140
+ turns,
141
+ userText,
142
+ agentKey: normalizedAgentKey,
143
+ });
144
+ }
145
+ return handleStreamingResponse(payload, accessToken, modelId, bridgeKey, convKey, {
146
+ systemPrompt,
147
+ systemPromptHash,
148
+ completedTurnsFingerprint,
149
+ turns,
150
+ userText,
151
+ agentKey: normalizedAgentKey,
152
+ });
153
+ }
@@ -0,0 +1,12 @@
1
+ export interface ConversationRequestMetadata {
2
+ systemPrompt: string;
3
+ systemPromptHash: string;
4
+ completedTurnsFingerprint: string;
5
+ turns: Array<{
6
+ userText: string;
7
+ assistantText: string;
8
+ }>;
9
+ userText: string;
10
+ assistantSeedText?: string;
11
+ agentKey?: string;
12
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,35 @@
1
+ import type { OpenAIMessage } from "../openai/types";
2
+ import type { ActiveBridge } from "./types";
3
+ export declare const activeBridges: Map<string, ActiveBridge>;
4
+ export interface StoredConversation {
5
+ conversationId: string;
6
+ checkpoint: Uint8Array | null;
7
+ blobStore: Map<string, Uint8Array>;
8
+ lastAccessMs: number;
9
+ systemPromptHash: string;
10
+ completedTurnsFingerprint: string;
11
+ }
12
+ export declare const conversationStates: Map<string, StoredConversation>;
13
+ export declare function evictStaleConversations(): void;
14
+ export declare function normalizeAgentKey(agentKey?: string): string;
15
+ export declare function hashString(value: string): string;
16
+ export declare function createStoredConversation(): StoredConversation;
17
+ export declare function resetStoredConversation(stored: StoredConversation): void;
18
+ export declare function deriveBridgeKey(modelId: string, messages: OpenAIMessage[], sessionId?: string, agentKey?: string): string;
19
+ /** Derive a key for conversation state. Model-independent so context survives model switches. */
20
+ export declare function deriveConversationKey(messages: OpenAIMessage[], sessionId?: string, agentKey?: string): string;
21
+ export declare function buildConversationFingerprint(messages: OpenAIMessage[]): string;
22
+ interface ConversationRequestMetadata {
23
+ systemPrompt: string;
24
+ systemPromptHash: string;
25
+ completedTurnsFingerprint: string;
26
+ turns: Array<{
27
+ userText: string;
28
+ assistantText: string;
29
+ }>;
30
+ userText: string;
31
+ assistantSeedText?: string;
32
+ agentKey?: string;
33
+ }
34
+ export declare function updateStoredConversationAfterCompletion(convKey: string, metadata: ConversationRequestMetadata, assistantText: string): void;
35
+ export {};