@playwo/opencode-cursor-oauth 0.0.0-dev.2b58f52bd11a → 0.0.0-dev.2c48be2f48c9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/auth.js +1 -2
- package/dist/constants.d.ts +2 -0
- package/dist/constants.js +2 -0
- package/dist/cursor/bidi-session.d.ts +13 -0
- package/dist/cursor/bidi-session.js +149 -0
- package/dist/cursor/config.d.ts +4 -0
- package/dist/cursor/config.js +4 -0
- package/dist/cursor/connect-framing.d.ts +10 -0
- package/dist/cursor/connect-framing.js +80 -0
- package/dist/cursor/headers.d.ts +6 -0
- package/dist/cursor/headers.js +16 -0
- package/dist/cursor/index.d.ts +5 -0
- package/dist/cursor/index.js +5 -0
- package/dist/cursor/unary-rpc.d.ts +13 -0
- package/dist/cursor/unary-rpc.js +181 -0
- package/dist/index.d.ts +2 -14
- package/dist/index.js +2 -306
- package/dist/logger.js +7 -2
- package/dist/models.js +1 -23
- package/dist/openai/index.d.ts +3 -0
- package/dist/openai/index.js +3 -0
- package/dist/openai/messages.d.ts +39 -0
- package/dist/openai/messages.js +223 -0
- package/dist/openai/tools.d.ts +7 -0
- package/dist/openai/tools.js +58 -0
- package/dist/openai/types.d.ts +41 -0
- package/dist/openai/types.js +1 -0
- package/dist/plugin/cursor-auth-plugin.d.ts +3 -0
- package/dist/plugin/cursor-auth-plugin.js +140 -0
- package/dist/proto/agent_pb.js +637 -319
- package/dist/provider/index.d.ts +2 -0
- package/dist/provider/index.js +2 -0
- package/dist/provider/model-cost.d.ts +9 -0
- package/dist/provider/model-cost.js +206 -0
- package/dist/provider/models.d.ts +8 -0
- package/dist/provider/models.js +86 -0
- package/dist/proxy/bridge-non-streaming.d.ts +3 -0
- package/dist/proxy/bridge-non-streaming.js +107 -0
- package/dist/proxy/bridge-session.d.ts +5 -0
- package/dist/proxy/bridge-session.js +13 -0
- package/dist/proxy/bridge-streaming.d.ts +5 -0
- package/dist/proxy/bridge-streaming.js +298 -0
- package/dist/proxy/bridge.d.ts +3 -0
- package/dist/proxy/bridge.js +3 -0
- package/dist/proxy/chat-completion.d.ts +2 -0
- package/dist/proxy/chat-completion.js +113 -0
- package/dist/proxy/conversation-meta.d.ts +12 -0
- package/dist/proxy/conversation-meta.js +1 -0
- package/dist/proxy/conversation-state.d.ts +35 -0
- package/dist/proxy/conversation-state.js +95 -0
- package/dist/proxy/cursor-request.d.ts +5 -0
- package/dist/proxy/cursor-request.js +86 -0
- package/dist/proxy/index.d.ts +12 -0
- package/dist/proxy/index.js +12 -0
- package/dist/proxy/server.d.ts +6 -0
- package/dist/proxy/server.js +89 -0
- package/dist/proxy/sse.d.ts +5 -0
- package/dist/proxy/sse.js +5 -0
- package/dist/proxy/state-sync.d.ts +2 -0
- package/dist/proxy/state-sync.js +17 -0
- package/dist/proxy/stream-dispatch.d.ts +37 -0
- package/dist/proxy/stream-dispatch.js +401 -0
- package/dist/proxy/stream-state.d.ts +7 -0
- package/dist/proxy/stream-state.js +1 -0
- package/dist/proxy/title.d.ts +1 -0
- package/dist/proxy/title.js +103 -0
- package/dist/proxy/types.d.ts +27 -0
- package/dist/proxy/types.js +1 -0
- package/dist/proxy.d.ts +2 -20
- package/dist/proxy.js +2 -1852
- package/package.json +1 -1
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
const MODEL_COST_TABLE = {
|
|
2
|
+
"claude-4-sonnet": {
|
|
3
|
+
input: 3,
|
|
4
|
+
output: 15,
|
|
5
|
+
cache: { read: 0.3, write: 3.75 },
|
|
6
|
+
},
|
|
7
|
+
"claude-4-sonnet-1m": {
|
|
8
|
+
input: 6,
|
|
9
|
+
output: 22.5,
|
|
10
|
+
cache: { read: 0.6, write: 7.5 },
|
|
11
|
+
},
|
|
12
|
+
"claude-4.5-haiku": {
|
|
13
|
+
input: 1,
|
|
14
|
+
output: 5,
|
|
15
|
+
cache: { read: 0.1, write: 1.25 },
|
|
16
|
+
},
|
|
17
|
+
"claude-4.5-opus": {
|
|
18
|
+
input: 5,
|
|
19
|
+
output: 25,
|
|
20
|
+
cache: { read: 0.5, write: 6.25 },
|
|
21
|
+
},
|
|
22
|
+
"claude-4.5-sonnet": {
|
|
23
|
+
input: 3,
|
|
24
|
+
output: 15,
|
|
25
|
+
cache: { read: 0.3, write: 3.75 },
|
|
26
|
+
},
|
|
27
|
+
"claude-4.6-opus": {
|
|
28
|
+
input: 5,
|
|
29
|
+
output: 25,
|
|
30
|
+
cache: { read: 0.5, write: 6.25 },
|
|
31
|
+
},
|
|
32
|
+
"claude-4.6-opus-fast": {
|
|
33
|
+
input: 30,
|
|
34
|
+
output: 150,
|
|
35
|
+
cache: { read: 3, write: 37.5 },
|
|
36
|
+
},
|
|
37
|
+
"claude-4.6-sonnet": {
|
|
38
|
+
input: 3,
|
|
39
|
+
output: 15,
|
|
40
|
+
cache: { read: 0.3, write: 3.75 },
|
|
41
|
+
},
|
|
42
|
+
"composer-1": { input: 1.25, output: 10, cache: { read: 0.125, write: 0 } },
|
|
43
|
+
"composer-1.5": { input: 3.5, output: 17.5, cache: { read: 0.35, write: 0 } },
|
|
44
|
+
"composer-2": { input: 0.5, output: 2.5, cache: { read: 0.2, write: 0 } },
|
|
45
|
+
"composer-2-fast": {
|
|
46
|
+
input: 1.5,
|
|
47
|
+
output: 7.5,
|
|
48
|
+
cache: { read: 0.2, write: 0 },
|
|
49
|
+
},
|
|
50
|
+
"gemini-2.5-flash": {
|
|
51
|
+
input: 0.3,
|
|
52
|
+
output: 2.5,
|
|
53
|
+
cache: { read: 0.03, write: 0 },
|
|
54
|
+
},
|
|
55
|
+
"gemini-3-flash": { input: 0.5, output: 3, cache: { read: 0.05, write: 0 } },
|
|
56
|
+
"gemini-3-pro": { input: 2, output: 12, cache: { read: 0.2, write: 0 } },
|
|
57
|
+
"gemini-3-pro-image": {
|
|
58
|
+
input: 2,
|
|
59
|
+
output: 12,
|
|
60
|
+
cache: { read: 0.2, write: 0 },
|
|
61
|
+
},
|
|
62
|
+
"gemini-3.1-pro": { input: 2, output: 12, cache: { read: 0.2, write: 0 } },
|
|
63
|
+
"gpt-5": { input: 1.25, output: 10, cache: { read: 0.125, write: 0 } },
|
|
64
|
+
"gpt-5-fast": { input: 2.5, output: 20, cache: { read: 0.25, write: 0 } },
|
|
65
|
+
"gpt-5-mini": { input: 0.25, output: 2, cache: { read: 0.025, write: 0 } },
|
|
66
|
+
"gpt-5-codex": { input: 1.25, output: 10, cache: { read: 0.125, write: 0 } },
|
|
67
|
+
"gpt-5.1-codex": {
|
|
68
|
+
input: 1.25,
|
|
69
|
+
output: 10,
|
|
70
|
+
cache: { read: 0.125, write: 0 },
|
|
71
|
+
},
|
|
72
|
+
"gpt-5.1-codex-max": {
|
|
73
|
+
input: 1.25,
|
|
74
|
+
output: 10,
|
|
75
|
+
cache: { read: 0.125, write: 0 },
|
|
76
|
+
},
|
|
77
|
+
"gpt-5.1-codex-mini": {
|
|
78
|
+
input: 0.25,
|
|
79
|
+
output: 2,
|
|
80
|
+
cache: { read: 0.025, write: 0 },
|
|
81
|
+
},
|
|
82
|
+
"gpt-5.2": { input: 1.75, output: 14, cache: { read: 0.175, write: 0 } },
|
|
83
|
+
"gpt-5.2-codex": {
|
|
84
|
+
input: 1.75,
|
|
85
|
+
output: 14,
|
|
86
|
+
cache: { read: 0.175, write: 0 },
|
|
87
|
+
},
|
|
88
|
+
"gpt-5.3-codex": {
|
|
89
|
+
input: 1.75,
|
|
90
|
+
output: 14,
|
|
91
|
+
cache: { read: 0.175, write: 0 },
|
|
92
|
+
},
|
|
93
|
+
"gpt-5.4": { input: 2.5, output: 15, cache: { read: 0.25, write: 0 } },
|
|
94
|
+
"gpt-5.4-mini": {
|
|
95
|
+
input: 0.75,
|
|
96
|
+
output: 4.5,
|
|
97
|
+
cache: { read: 0.075, write: 0 },
|
|
98
|
+
},
|
|
99
|
+
"gpt-5.4-nano": { input: 0.2, output: 1.25, cache: { read: 0.02, write: 0 } },
|
|
100
|
+
"grok-4.20": { input: 2, output: 6, cache: { read: 0.2, write: 0 } },
|
|
101
|
+
"kimi-k2.5": { input: 0.6, output: 3, cache: { read: 0.1, write: 0 } },
|
|
102
|
+
};
|
|
103
|
+
const MODEL_COST_PATTERNS = [
|
|
104
|
+
{
|
|
105
|
+
match: (id) => /claude.*opus.*fast/i.test(id),
|
|
106
|
+
cost: MODEL_COST_TABLE["claude-4.6-opus-fast"],
|
|
107
|
+
},
|
|
108
|
+
{
|
|
109
|
+
match: (id) => /claude.*opus/i.test(id),
|
|
110
|
+
cost: MODEL_COST_TABLE["claude-4.6-opus"],
|
|
111
|
+
},
|
|
112
|
+
{
|
|
113
|
+
match: (id) => /claude.*haiku/i.test(id),
|
|
114
|
+
cost: MODEL_COST_TABLE["claude-4.5-haiku"],
|
|
115
|
+
},
|
|
116
|
+
{
|
|
117
|
+
match: (id) => /claude.*sonnet/i.test(id),
|
|
118
|
+
cost: MODEL_COST_TABLE["claude-4.6-sonnet"],
|
|
119
|
+
},
|
|
120
|
+
{
|
|
121
|
+
match: (id) => /claude/i.test(id),
|
|
122
|
+
cost: MODEL_COST_TABLE["claude-4.6-sonnet"],
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
match: (id) => /composer-?2/i.test(id),
|
|
126
|
+
cost: MODEL_COST_TABLE["composer-2"],
|
|
127
|
+
},
|
|
128
|
+
{
|
|
129
|
+
match: (id) => /composer-?1\.5/i.test(id),
|
|
130
|
+
cost: MODEL_COST_TABLE["composer-1.5"],
|
|
131
|
+
},
|
|
132
|
+
{
|
|
133
|
+
match: (id) => /composer/i.test(id),
|
|
134
|
+
cost: MODEL_COST_TABLE["composer-1"],
|
|
135
|
+
},
|
|
136
|
+
{
|
|
137
|
+
match: (id) => /gpt-5\.4.*nano/i.test(id),
|
|
138
|
+
cost: MODEL_COST_TABLE["gpt-5.4-nano"],
|
|
139
|
+
},
|
|
140
|
+
{
|
|
141
|
+
match: (id) => /gpt-5\.4.*mini/i.test(id),
|
|
142
|
+
cost: MODEL_COST_TABLE["gpt-5.4-mini"],
|
|
143
|
+
},
|
|
144
|
+
{ match: (id) => /gpt-5\.4/i.test(id), cost: MODEL_COST_TABLE["gpt-5.4"] },
|
|
145
|
+
{
|
|
146
|
+
match: (id) => /gpt-5\.3/i.test(id),
|
|
147
|
+
cost: MODEL_COST_TABLE["gpt-5.3-codex"],
|
|
148
|
+
},
|
|
149
|
+
{ match: (id) => /gpt-5\.2/i.test(id), cost: MODEL_COST_TABLE["gpt-5.2"] },
|
|
150
|
+
{
|
|
151
|
+
match: (id) => /gpt-5\.1.*mini/i.test(id),
|
|
152
|
+
cost: MODEL_COST_TABLE["gpt-5.1-codex-mini"],
|
|
153
|
+
},
|
|
154
|
+
{
|
|
155
|
+
match: (id) => /gpt-5\.1/i.test(id),
|
|
156
|
+
cost: MODEL_COST_TABLE["gpt-5.1-codex"],
|
|
157
|
+
},
|
|
158
|
+
{
|
|
159
|
+
match: (id) => /gpt-5.*mini/i.test(id),
|
|
160
|
+
cost: MODEL_COST_TABLE["gpt-5-mini"],
|
|
161
|
+
},
|
|
162
|
+
{
|
|
163
|
+
match: (id) => /gpt-5.*fast/i.test(id),
|
|
164
|
+
cost: MODEL_COST_TABLE["gpt-5-fast"],
|
|
165
|
+
},
|
|
166
|
+
{ match: (id) => /gpt-5/i.test(id), cost: MODEL_COST_TABLE["gpt-5"] },
|
|
167
|
+
{
|
|
168
|
+
match: (id) => /gemini.*3\.1/i.test(id),
|
|
169
|
+
cost: MODEL_COST_TABLE["gemini-3.1-pro"],
|
|
170
|
+
},
|
|
171
|
+
{
|
|
172
|
+
match: (id) => /gemini.*3.*flash/i.test(id),
|
|
173
|
+
cost: MODEL_COST_TABLE["gemini-3-flash"],
|
|
174
|
+
},
|
|
175
|
+
{
|
|
176
|
+
match: (id) => /gemini.*3/i.test(id),
|
|
177
|
+
cost: MODEL_COST_TABLE["gemini-3-pro"],
|
|
178
|
+
},
|
|
179
|
+
{
|
|
180
|
+
match: (id) => /gemini.*flash/i.test(id),
|
|
181
|
+
cost: MODEL_COST_TABLE["gemini-2.5-flash"],
|
|
182
|
+
},
|
|
183
|
+
{
|
|
184
|
+
match: (id) => /gemini/i.test(id),
|
|
185
|
+
cost: MODEL_COST_TABLE["gemini-3.1-pro"],
|
|
186
|
+
},
|
|
187
|
+
{ match: (id) => /grok/i.test(id), cost: MODEL_COST_TABLE["grok-4.20"] },
|
|
188
|
+
{ match: (id) => /kimi/i.test(id), cost: MODEL_COST_TABLE["kimi-k2.5"] },
|
|
189
|
+
];
|
|
190
|
+
const DEFAULT_COST = {
|
|
191
|
+
input: 3,
|
|
192
|
+
output: 15,
|
|
193
|
+
cache: { read: 0.3, write: 0 },
|
|
194
|
+
};
|
|
195
|
+
export function estimateModelCost(modelId) {
|
|
196
|
+
const normalized = modelId.toLowerCase();
|
|
197
|
+
const exact = MODEL_COST_TABLE[normalized];
|
|
198
|
+
if (exact)
|
|
199
|
+
return exact;
|
|
200
|
+
const stripped = normalized.replace(/-(high|medium|low|preview|thinking|spark-preview)$/g, "");
|
|
201
|
+
const strippedMatch = MODEL_COST_TABLE[stripped];
|
|
202
|
+
if (strippedMatch)
|
|
203
|
+
return strippedMatch;
|
|
204
|
+
return (MODEL_COST_PATTERNS.find((pattern) => pattern.match(normalized))?.cost ??
|
|
205
|
+
DEFAULT_COST);
|
|
206
|
+
}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import type { CursorModel } from "../models";
|
|
2
|
+
export interface ProviderWithModels {
|
|
3
|
+
models?: Record<string, unknown>;
|
|
4
|
+
}
|
|
5
|
+
export declare function setProviderModels(provider: unknown, models: Record<string, unknown>): void;
|
|
6
|
+
export declare function buildCursorProviderModels(models: CursorModel[], port: number): Record<string, unknown>;
|
|
7
|
+
export declare function buildDisabledProviderConfig(message: string): Record<string, unknown>;
|
|
8
|
+
export declare function stripAuthorizationHeader(init?: RequestInit): RequestInit | undefined;
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import { CURSOR_PROVIDER_ID } from "../constants";
|
|
2
|
+
import { estimateModelCost } from "./model-cost";
|
|
3
|
+
export function setProviderModels(provider, models) {
|
|
4
|
+
if (!provider || typeof provider !== "object")
|
|
5
|
+
return;
|
|
6
|
+
provider.models = models;
|
|
7
|
+
}
|
|
8
|
+
export function buildCursorProviderModels(models, port) {
|
|
9
|
+
return Object.fromEntries(models.map((model) => [
|
|
10
|
+
model.id,
|
|
11
|
+
{
|
|
12
|
+
id: model.id,
|
|
13
|
+
providerID: CURSOR_PROVIDER_ID,
|
|
14
|
+
api: {
|
|
15
|
+
id: model.id,
|
|
16
|
+
url: `http://localhost:${port}/v1`,
|
|
17
|
+
npm: "@ai-sdk/openai-compatible",
|
|
18
|
+
},
|
|
19
|
+
name: model.name,
|
|
20
|
+
capabilities: {
|
|
21
|
+
temperature: true,
|
|
22
|
+
reasoning: model.reasoning,
|
|
23
|
+
attachment: false,
|
|
24
|
+
toolcall: true,
|
|
25
|
+
input: {
|
|
26
|
+
text: true,
|
|
27
|
+
audio: false,
|
|
28
|
+
image: false,
|
|
29
|
+
video: false,
|
|
30
|
+
pdf: false,
|
|
31
|
+
},
|
|
32
|
+
output: {
|
|
33
|
+
text: true,
|
|
34
|
+
audio: false,
|
|
35
|
+
image: false,
|
|
36
|
+
video: false,
|
|
37
|
+
pdf: false,
|
|
38
|
+
},
|
|
39
|
+
interleaved: false,
|
|
40
|
+
},
|
|
41
|
+
cost: estimateModelCost(model.id),
|
|
42
|
+
limit: {
|
|
43
|
+
context: model.contextWindow,
|
|
44
|
+
output: model.maxTokens,
|
|
45
|
+
},
|
|
46
|
+
status: "active",
|
|
47
|
+
options: {},
|
|
48
|
+
headers: {},
|
|
49
|
+
release_date: "",
|
|
50
|
+
variants: {},
|
|
51
|
+
},
|
|
52
|
+
]));
|
|
53
|
+
}
|
|
54
|
+
export function buildDisabledProviderConfig(message) {
|
|
55
|
+
return {
|
|
56
|
+
baseURL: "http://127.0.0.1/cursor-disabled/v1",
|
|
57
|
+
apiKey: "cursor-disabled",
|
|
58
|
+
async fetch() {
|
|
59
|
+
return new Response(JSON.stringify({
|
|
60
|
+
error: {
|
|
61
|
+
message,
|
|
62
|
+
type: "server_error",
|
|
63
|
+
code: "cursor_model_discovery_failed",
|
|
64
|
+
},
|
|
65
|
+
}), {
|
|
66
|
+
status: 503,
|
|
67
|
+
headers: { "Content-Type": "application/json" },
|
|
68
|
+
});
|
|
69
|
+
},
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
export function stripAuthorizationHeader(init) {
|
|
73
|
+
if (!init?.headers)
|
|
74
|
+
return init;
|
|
75
|
+
if (init.headers instanceof Headers) {
|
|
76
|
+
init.headers.delete("authorization");
|
|
77
|
+
return init;
|
|
78
|
+
}
|
|
79
|
+
if (Array.isArray(init.headers)) {
|
|
80
|
+
init.headers = init.headers.filter(([key]) => key.toLowerCase() !== "authorization");
|
|
81
|
+
return init;
|
|
82
|
+
}
|
|
83
|
+
delete init.headers["authorization"];
|
|
84
|
+
delete init.headers["Authorization"];
|
|
85
|
+
return init;
|
|
86
|
+
}
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
import type { ConversationRequestMetadata } from "./conversation-meta";
|
|
2
|
+
import type { CursorRequestPayload } from "./types";
|
|
3
|
+
export declare function handleNonStreamingResponse(payload: CursorRequestPayload, accessToken: string, modelId: string, convKey: string, metadata: ConversationRequestMetadata): Promise<Response>;
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import { fromBinary } from "@bufbuild/protobuf";
|
|
2
|
+
import { AgentServerMessageSchema } from "../proto/agent_pb";
|
|
3
|
+
import { errorDetails, logPluginError } from "../logger";
|
|
4
|
+
import { updateStoredConversationAfterCompletion } from "./conversation-state";
|
|
5
|
+
import { startBridge } from "./bridge-session";
|
|
6
|
+
import { updateConversationCheckpoint, syncStoredBlobStore, } from "./state-sync";
|
|
7
|
+
import { computeUsage, createConnectFrameParser, createThinkingTagFilter, parseConnectEndStream, processServerMessage, scheduleBridgeEnd, } from "./stream-dispatch";
|
|
8
|
+
export async function handleNonStreamingResponse(payload, accessToken, modelId, convKey, metadata) {
|
|
9
|
+
const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
|
|
10
|
+
const created = Math.floor(Date.now() / 1000);
|
|
11
|
+
const { text, usage, finishReason, toolCalls } = await collectFullResponse(payload, accessToken, modelId, convKey, metadata);
|
|
12
|
+
const message = finishReason === "tool_calls"
|
|
13
|
+
? { role: "assistant", content: null, tool_calls: toolCalls }
|
|
14
|
+
: { role: "assistant", content: text };
|
|
15
|
+
return new Response(JSON.stringify({
|
|
16
|
+
id: completionId,
|
|
17
|
+
object: "chat.completion",
|
|
18
|
+
created,
|
|
19
|
+
model: modelId,
|
|
20
|
+
choices: [
|
|
21
|
+
{
|
|
22
|
+
index: 0,
|
|
23
|
+
message,
|
|
24
|
+
finish_reason: finishReason,
|
|
25
|
+
},
|
|
26
|
+
],
|
|
27
|
+
usage,
|
|
28
|
+
}), { headers: { "Content-Type": "application/json" } });
|
|
29
|
+
}
|
|
30
|
+
async function collectFullResponse(payload, accessToken, modelId, convKey, metadata) {
|
|
31
|
+
const { promise, resolve, reject } = Promise.withResolvers();
|
|
32
|
+
let fullText = "";
|
|
33
|
+
let endStreamError = null;
|
|
34
|
+
const pendingToolCalls = [];
|
|
35
|
+
const { bridge, heartbeatTimer } = await startBridge(accessToken, payload.requestBytes);
|
|
36
|
+
const state = {
|
|
37
|
+
toolCallIndex: 0,
|
|
38
|
+
pendingExecs: [],
|
|
39
|
+
outputTokens: 0,
|
|
40
|
+
totalTokens: 0,
|
|
41
|
+
};
|
|
42
|
+
const tagFilter = createThinkingTagFilter();
|
|
43
|
+
bridge.onData(createConnectFrameParser((messageBytes) => {
|
|
44
|
+
try {
|
|
45
|
+
const serverMessage = fromBinary(AgentServerMessageSchema, messageBytes);
|
|
46
|
+
processServerMessage(serverMessage, payload.blobStore, payload.mcpTools, (data) => bridge.write(data), state, (text, isThinking) => {
|
|
47
|
+
if (isThinking)
|
|
48
|
+
return;
|
|
49
|
+
const { content } = tagFilter.process(text);
|
|
50
|
+
fullText += content;
|
|
51
|
+
}, (exec) => {
|
|
52
|
+
pendingToolCalls.push({
|
|
53
|
+
id: exec.toolCallId,
|
|
54
|
+
type: "function",
|
|
55
|
+
function: {
|
|
56
|
+
name: exec.toolName,
|
|
57
|
+
arguments: exec.decodedArgs,
|
|
58
|
+
},
|
|
59
|
+
});
|
|
60
|
+
scheduleBridgeEnd(bridge);
|
|
61
|
+
}, (checkpointBytes) => updateConversationCheckpoint(convKey, checkpointBytes), (info) => {
|
|
62
|
+
endStreamError = new Error(`Cursor requested unsupported exec type: ${info.execCase}`);
|
|
63
|
+
logPluginError("Closing non-streaming Cursor bridge after unsupported exec", {
|
|
64
|
+
modelId,
|
|
65
|
+
convKey,
|
|
66
|
+
execCase: info.execCase,
|
|
67
|
+
execId: info.execId,
|
|
68
|
+
execMsgId: info.execMsgId,
|
|
69
|
+
});
|
|
70
|
+
scheduleBridgeEnd(bridge);
|
|
71
|
+
});
|
|
72
|
+
}
|
|
73
|
+
catch {
|
|
74
|
+
// Skip unparseable messages.
|
|
75
|
+
}
|
|
76
|
+
}, (endStreamBytes) => {
|
|
77
|
+
endStreamError = parseConnectEndStream(endStreamBytes);
|
|
78
|
+
if (endStreamError) {
|
|
79
|
+
logPluginError("Cursor non-streaming response returned Connect end-stream error", {
|
|
80
|
+
modelId,
|
|
81
|
+
convKey,
|
|
82
|
+
...errorDetails(endStreamError),
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
scheduleBridgeEnd(bridge);
|
|
86
|
+
}));
|
|
87
|
+
bridge.onClose(() => {
|
|
88
|
+
clearInterval(heartbeatTimer);
|
|
89
|
+
syncStoredBlobStore(convKey, payload.blobStore);
|
|
90
|
+
const flushed = tagFilter.flush();
|
|
91
|
+
fullText += flushed.content;
|
|
92
|
+
if (endStreamError) {
|
|
93
|
+
reject(endStreamError);
|
|
94
|
+
return;
|
|
95
|
+
}
|
|
96
|
+
if (pendingToolCalls.length === 0) {
|
|
97
|
+
updateStoredConversationAfterCompletion(convKey, metadata, fullText);
|
|
98
|
+
}
|
|
99
|
+
resolve({
|
|
100
|
+
text: fullText,
|
|
101
|
+
usage: computeUsage(state),
|
|
102
|
+
finishReason: pendingToolCalls.length > 0 ? "tool_calls" : "stop",
|
|
103
|
+
toolCalls: pendingToolCalls,
|
|
104
|
+
});
|
|
105
|
+
});
|
|
106
|
+
return promise;
|
|
107
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { createCursorSession } from "../cursor/bidi-session";
|
|
2
|
+
import { makeHeartbeatBytes } from "./stream-dispatch";
|
|
3
|
+
const HEARTBEAT_INTERVAL_MS = 5_000;
|
|
4
|
+
export async function startBridge(accessToken, requestBytes) {
|
|
5
|
+
const requestId = crypto.randomUUID();
|
|
6
|
+
const bridge = await createCursorSession({
|
|
7
|
+
accessToken,
|
|
8
|
+
requestId,
|
|
9
|
+
});
|
|
10
|
+
bridge.write(requestBytes);
|
|
11
|
+
const heartbeatTimer = setInterval(() => bridge.write(makeHeartbeatBytes()), HEARTBEAT_INTERVAL_MS);
|
|
12
|
+
return { bridge, heartbeatTimer };
|
|
13
|
+
}
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
import { type ToolResultInfo } from "../openai/messages";
|
|
2
|
+
import type { ConversationRequestMetadata } from "./conversation-meta";
|
|
3
|
+
import type { ActiveBridge, CursorRequestPayload } from "./types";
|
|
4
|
+
export declare function handleStreamingResponse(payload: CursorRequestPayload, accessToken: string, modelId: string, bridgeKey: string, convKey: string, metadata: ConversationRequestMetadata): Promise<Response>;
|
|
5
|
+
export declare function handleToolResultResume(active: ActiveBridge, toolResults: ToolResultInfo[], bridgeKey: string, convKey: string): Response;
|