@playwo/opencode-cursor-oauth 0.0.0-dev.e3644b4a140d → 0.0.0-dev.eb79b6283515

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. package/dist/auth.js +1 -2
  2. package/dist/constants.d.ts +2 -0
  3. package/dist/constants.js +2 -0
  4. package/dist/cursor/bidi-session.d.ts +12 -0
  5. package/dist/cursor/bidi-session.js +164 -0
  6. package/dist/cursor/config.d.ts +4 -0
  7. package/dist/cursor/config.js +4 -0
  8. package/dist/cursor/connect-framing.d.ts +10 -0
  9. package/dist/cursor/connect-framing.js +80 -0
  10. package/dist/cursor/headers.d.ts +6 -0
  11. package/dist/cursor/headers.js +16 -0
  12. package/dist/cursor/index.d.ts +5 -0
  13. package/dist/cursor/index.js +5 -0
  14. package/dist/cursor/unary-rpc.d.ts +12 -0
  15. package/dist/cursor/unary-rpc.js +124 -0
  16. package/dist/index.d.ts +2 -14
  17. package/dist/index.js +2 -306
  18. package/dist/logger.js +7 -2
  19. package/dist/models.js +1 -23
  20. package/dist/openai/index.d.ts +3 -0
  21. package/dist/openai/index.js +3 -0
  22. package/dist/openai/messages.d.ts +39 -0
  23. package/dist/openai/messages.js +228 -0
  24. package/dist/openai/tools.d.ts +7 -0
  25. package/dist/openai/tools.js +58 -0
  26. package/dist/openai/types.d.ts +41 -0
  27. package/dist/openai/types.js +1 -0
  28. package/dist/plugin/cursor-auth-plugin.d.ts +3 -0
  29. package/dist/plugin/cursor-auth-plugin.js +139 -0
  30. package/dist/proto/agent_pb.js +637 -319
  31. package/dist/provider/index.d.ts +2 -0
  32. package/dist/provider/index.js +2 -0
  33. package/dist/provider/model-cost.d.ts +9 -0
  34. package/dist/provider/model-cost.js +206 -0
  35. package/dist/provider/models.d.ts +8 -0
  36. package/dist/provider/models.js +86 -0
  37. package/dist/proxy/bridge-non-streaming.d.ts +3 -0
  38. package/dist/proxy/bridge-non-streaming.js +119 -0
  39. package/dist/proxy/bridge-session.d.ts +5 -0
  40. package/dist/proxy/bridge-session.js +11 -0
  41. package/dist/proxy/bridge-streaming.d.ts +5 -0
  42. package/dist/proxy/bridge-streaming.js +317 -0
  43. package/dist/proxy/bridge.d.ts +3 -0
  44. package/dist/proxy/bridge.js +3 -0
  45. package/dist/proxy/chat-completion.d.ts +2 -0
  46. package/dist/proxy/chat-completion.js +114 -0
  47. package/dist/proxy/conversation-meta.d.ts +12 -0
  48. package/dist/proxy/conversation-meta.js +1 -0
  49. package/dist/proxy/conversation-state.d.ts +35 -0
  50. package/dist/proxy/conversation-state.js +95 -0
  51. package/dist/proxy/cursor-request.d.ts +6 -0
  52. package/dist/proxy/cursor-request.js +104 -0
  53. package/dist/proxy/index.d.ts +12 -0
  54. package/dist/proxy/index.js +12 -0
  55. package/dist/proxy/server.d.ts +6 -0
  56. package/dist/proxy/server.js +107 -0
  57. package/dist/proxy/sse.d.ts +5 -0
  58. package/dist/proxy/sse.js +5 -0
  59. package/dist/proxy/state-sync.d.ts +2 -0
  60. package/dist/proxy/state-sync.js +17 -0
  61. package/dist/proxy/stream-dispatch.d.ts +42 -0
  62. package/dist/proxy/stream-dispatch.js +619 -0
  63. package/dist/proxy/stream-state.d.ts +9 -0
  64. package/dist/proxy/stream-state.js +1 -0
  65. package/dist/proxy/title.d.ts +1 -0
  66. package/dist/proxy/title.js +103 -0
  67. package/dist/proxy/types.d.ts +27 -0
  68. package/dist/proxy/types.js +1 -0
  69. package/dist/proxy.d.ts +2 -20
  70. package/dist/proxy.js +2 -1689
  71. package/package.json +1 -1
@@ -0,0 +1,2 @@
1
+ export * from "./model-cost";
2
+ export * from "./models";
@@ -0,0 +1,2 @@
1
+ export * from "./model-cost";
2
+ export * from "./models";
@@ -0,0 +1,9 @@
1
+ export interface ModelCost {
2
+ input: number;
3
+ output: number;
4
+ cache: {
5
+ read: number;
6
+ write: number;
7
+ };
8
+ }
9
+ export declare function estimateModelCost(modelId: string): ModelCost;
@@ -0,0 +1,206 @@
1
+ const MODEL_COST_TABLE = {
2
+ "claude-4-sonnet": {
3
+ input: 3,
4
+ output: 15,
5
+ cache: { read: 0.3, write: 3.75 },
6
+ },
7
+ "claude-4-sonnet-1m": {
8
+ input: 6,
9
+ output: 22.5,
10
+ cache: { read: 0.6, write: 7.5 },
11
+ },
12
+ "claude-4.5-haiku": {
13
+ input: 1,
14
+ output: 5,
15
+ cache: { read: 0.1, write: 1.25 },
16
+ },
17
+ "claude-4.5-opus": {
18
+ input: 5,
19
+ output: 25,
20
+ cache: { read: 0.5, write: 6.25 },
21
+ },
22
+ "claude-4.5-sonnet": {
23
+ input: 3,
24
+ output: 15,
25
+ cache: { read: 0.3, write: 3.75 },
26
+ },
27
+ "claude-4.6-opus": {
28
+ input: 5,
29
+ output: 25,
30
+ cache: { read: 0.5, write: 6.25 },
31
+ },
32
+ "claude-4.6-opus-fast": {
33
+ input: 30,
34
+ output: 150,
35
+ cache: { read: 3, write: 37.5 },
36
+ },
37
+ "claude-4.6-sonnet": {
38
+ input: 3,
39
+ output: 15,
40
+ cache: { read: 0.3, write: 3.75 },
41
+ },
42
+ "composer-1": { input: 1.25, output: 10, cache: { read: 0.125, write: 0 } },
43
+ "composer-1.5": { input: 3.5, output: 17.5, cache: { read: 0.35, write: 0 } },
44
+ "composer-2": { input: 0.5, output: 2.5, cache: { read: 0.2, write: 0 } },
45
+ "composer-2-fast": {
46
+ input: 1.5,
47
+ output: 7.5,
48
+ cache: { read: 0.2, write: 0 },
49
+ },
50
+ "gemini-2.5-flash": {
51
+ input: 0.3,
52
+ output: 2.5,
53
+ cache: { read: 0.03, write: 0 },
54
+ },
55
+ "gemini-3-flash": { input: 0.5, output: 3, cache: { read: 0.05, write: 0 } },
56
+ "gemini-3-pro": { input: 2, output: 12, cache: { read: 0.2, write: 0 } },
57
+ "gemini-3-pro-image": {
58
+ input: 2,
59
+ output: 12,
60
+ cache: { read: 0.2, write: 0 },
61
+ },
62
+ "gemini-3.1-pro": { input: 2, output: 12, cache: { read: 0.2, write: 0 } },
63
+ "gpt-5": { input: 1.25, output: 10, cache: { read: 0.125, write: 0 } },
64
+ "gpt-5-fast": { input: 2.5, output: 20, cache: { read: 0.25, write: 0 } },
65
+ "gpt-5-mini": { input: 0.25, output: 2, cache: { read: 0.025, write: 0 } },
66
+ "gpt-5-codex": { input: 1.25, output: 10, cache: { read: 0.125, write: 0 } },
67
+ "gpt-5.1-codex": {
68
+ input: 1.25,
69
+ output: 10,
70
+ cache: { read: 0.125, write: 0 },
71
+ },
72
+ "gpt-5.1-codex-max": {
73
+ input: 1.25,
74
+ output: 10,
75
+ cache: { read: 0.125, write: 0 },
76
+ },
77
+ "gpt-5.1-codex-mini": {
78
+ input: 0.25,
79
+ output: 2,
80
+ cache: { read: 0.025, write: 0 },
81
+ },
82
+ "gpt-5.2": { input: 1.75, output: 14, cache: { read: 0.175, write: 0 } },
83
+ "gpt-5.2-codex": {
84
+ input: 1.75,
85
+ output: 14,
86
+ cache: { read: 0.175, write: 0 },
87
+ },
88
+ "gpt-5.3-codex": {
89
+ input: 1.75,
90
+ output: 14,
91
+ cache: { read: 0.175, write: 0 },
92
+ },
93
+ "gpt-5.4": { input: 2.5, output: 15, cache: { read: 0.25, write: 0 } },
94
+ "gpt-5.4-mini": {
95
+ input: 0.75,
96
+ output: 4.5,
97
+ cache: { read: 0.075, write: 0 },
98
+ },
99
+ "gpt-5.4-nano": { input: 0.2, output: 1.25, cache: { read: 0.02, write: 0 } },
100
+ "grok-4.20": { input: 2, output: 6, cache: { read: 0.2, write: 0 } },
101
+ "kimi-k2.5": { input: 0.6, output: 3, cache: { read: 0.1, write: 0 } },
102
+ };
103
+ const MODEL_COST_PATTERNS = [
104
+ {
105
+ match: (id) => /claude.*opus.*fast/i.test(id),
106
+ cost: MODEL_COST_TABLE["claude-4.6-opus-fast"],
107
+ },
108
+ {
109
+ match: (id) => /claude.*opus/i.test(id),
110
+ cost: MODEL_COST_TABLE["claude-4.6-opus"],
111
+ },
112
+ {
113
+ match: (id) => /claude.*haiku/i.test(id),
114
+ cost: MODEL_COST_TABLE["claude-4.5-haiku"],
115
+ },
116
+ {
117
+ match: (id) => /claude.*sonnet/i.test(id),
118
+ cost: MODEL_COST_TABLE["claude-4.6-sonnet"],
119
+ },
120
+ {
121
+ match: (id) => /claude/i.test(id),
122
+ cost: MODEL_COST_TABLE["claude-4.6-sonnet"],
123
+ },
124
+ {
125
+ match: (id) => /composer-?2/i.test(id),
126
+ cost: MODEL_COST_TABLE["composer-2"],
127
+ },
128
+ {
129
+ match: (id) => /composer-?1\.5/i.test(id),
130
+ cost: MODEL_COST_TABLE["composer-1.5"],
131
+ },
132
+ {
133
+ match: (id) => /composer/i.test(id),
134
+ cost: MODEL_COST_TABLE["composer-1"],
135
+ },
136
+ {
137
+ match: (id) => /gpt-5\.4.*nano/i.test(id),
138
+ cost: MODEL_COST_TABLE["gpt-5.4-nano"],
139
+ },
140
+ {
141
+ match: (id) => /gpt-5\.4.*mini/i.test(id),
142
+ cost: MODEL_COST_TABLE["gpt-5.4-mini"],
143
+ },
144
+ { match: (id) => /gpt-5\.4/i.test(id), cost: MODEL_COST_TABLE["gpt-5.4"] },
145
+ {
146
+ match: (id) => /gpt-5\.3/i.test(id),
147
+ cost: MODEL_COST_TABLE["gpt-5.3-codex"],
148
+ },
149
+ { match: (id) => /gpt-5\.2/i.test(id), cost: MODEL_COST_TABLE["gpt-5.2"] },
150
+ {
151
+ match: (id) => /gpt-5\.1.*mini/i.test(id),
152
+ cost: MODEL_COST_TABLE["gpt-5.1-codex-mini"],
153
+ },
154
+ {
155
+ match: (id) => /gpt-5\.1/i.test(id),
156
+ cost: MODEL_COST_TABLE["gpt-5.1-codex"],
157
+ },
158
+ {
159
+ match: (id) => /gpt-5.*mini/i.test(id),
160
+ cost: MODEL_COST_TABLE["gpt-5-mini"],
161
+ },
162
+ {
163
+ match: (id) => /gpt-5.*fast/i.test(id),
164
+ cost: MODEL_COST_TABLE["gpt-5-fast"],
165
+ },
166
+ { match: (id) => /gpt-5/i.test(id), cost: MODEL_COST_TABLE["gpt-5"] },
167
+ {
168
+ match: (id) => /gemini.*3\.1/i.test(id),
169
+ cost: MODEL_COST_TABLE["gemini-3.1-pro"],
170
+ },
171
+ {
172
+ match: (id) => /gemini.*3.*flash/i.test(id),
173
+ cost: MODEL_COST_TABLE["gemini-3-flash"],
174
+ },
175
+ {
176
+ match: (id) => /gemini.*3/i.test(id),
177
+ cost: MODEL_COST_TABLE["gemini-3-pro"],
178
+ },
179
+ {
180
+ match: (id) => /gemini.*flash/i.test(id),
181
+ cost: MODEL_COST_TABLE["gemini-2.5-flash"],
182
+ },
183
+ {
184
+ match: (id) => /gemini/i.test(id),
185
+ cost: MODEL_COST_TABLE["gemini-3.1-pro"],
186
+ },
187
+ { match: (id) => /grok/i.test(id), cost: MODEL_COST_TABLE["grok-4.20"] },
188
+ { match: (id) => /kimi/i.test(id), cost: MODEL_COST_TABLE["kimi-k2.5"] },
189
+ ];
190
+ const DEFAULT_COST = {
191
+ input: 3,
192
+ output: 15,
193
+ cache: { read: 0.3, write: 0 },
194
+ };
195
+ export function estimateModelCost(modelId) {
196
+ const normalized = modelId.toLowerCase();
197
+ const exact = MODEL_COST_TABLE[normalized];
198
+ if (exact)
199
+ return exact;
200
+ const stripped = normalized.replace(/-(high|medium|low|preview|thinking|spark-preview)$/g, "");
201
+ const strippedMatch = MODEL_COST_TABLE[stripped];
202
+ if (strippedMatch)
203
+ return strippedMatch;
204
+ return (MODEL_COST_PATTERNS.find((pattern) => pattern.match(normalized))?.cost ??
205
+ DEFAULT_COST);
206
+ }
@@ -0,0 +1,8 @@
1
+ import type { CursorModel } from "../models";
2
+ export interface ProviderWithModels {
3
+ models?: Record<string, unknown>;
4
+ }
5
+ export declare function setProviderModels(provider: unknown, models: Record<string, unknown>): void;
6
+ export declare function buildCursorProviderModels(models: CursorModel[], port: number): Record<string, unknown>;
7
+ export declare function buildDisabledProviderConfig(message: string): Record<string, unknown>;
8
+ export declare function stripAuthorizationHeader(init?: RequestInit): RequestInit | undefined;
@@ -0,0 +1,86 @@
1
+ import { CURSOR_PROVIDER_ID } from "../constants";
2
+ import { estimateModelCost } from "./model-cost";
3
+ export function setProviderModels(provider, models) {
4
+ if (!provider || typeof provider !== "object")
5
+ return;
6
+ provider.models = models;
7
+ }
8
+ export function buildCursorProviderModels(models, port) {
9
+ return Object.fromEntries(models.map((model) => [
10
+ model.id,
11
+ {
12
+ id: model.id,
13
+ providerID: CURSOR_PROVIDER_ID,
14
+ api: {
15
+ id: model.id,
16
+ url: `http://localhost:${port}/v1`,
17
+ npm: "@ai-sdk/openai-compatible",
18
+ },
19
+ name: model.name,
20
+ capabilities: {
21
+ temperature: true,
22
+ reasoning: model.reasoning,
23
+ attachment: false,
24
+ toolcall: true,
25
+ input: {
26
+ text: true,
27
+ audio: false,
28
+ image: false,
29
+ video: false,
30
+ pdf: false,
31
+ },
32
+ output: {
33
+ text: true,
34
+ audio: false,
35
+ image: false,
36
+ video: false,
37
+ pdf: false,
38
+ },
39
+ interleaved: false,
40
+ },
41
+ cost: estimateModelCost(model.id),
42
+ limit: {
43
+ context: model.contextWindow,
44
+ output: model.maxTokens,
45
+ },
46
+ status: "active",
47
+ options: {},
48
+ headers: {},
49
+ release_date: "",
50
+ variants: {},
51
+ },
52
+ ]));
53
+ }
54
+ export function buildDisabledProviderConfig(message) {
55
+ return {
56
+ baseURL: "http://127.0.0.1/cursor-disabled/v1",
57
+ apiKey: "cursor-disabled",
58
+ async fetch() {
59
+ return new Response(JSON.stringify({
60
+ error: {
61
+ message,
62
+ type: "server_error",
63
+ code: "cursor_model_discovery_failed",
64
+ },
65
+ }), {
66
+ status: 503,
67
+ headers: { "Content-Type": "application/json" },
68
+ });
69
+ },
70
+ };
71
+ }
72
+ export function stripAuthorizationHeader(init) {
73
+ if (!init?.headers)
74
+ return init;
75
+ if (init.headers instanceof Headers) {
76
+ init.headers.delete("authorization");
77
+ return init;
78
+ }
79
+ if (Array.isArray(init.headers)) {
80
+ init.headers = init.headers.filter(([key]) => key.toLowerCase() !== "authorization");
81
+ return init;
82
+ }
83
+ delete init.headers["authorization"];
84
+ delete init.headers["Authorization"];
85
+ return init;
86
+ }
@@ -0,0 +1,3 @@
1
+ import type { ConversationRequestMetadata } from "./conversation-meta";
2
+ import type { CursorRequestPayload } from "./types";
3
+ export declare function handleNonStreamingResponse(payload: CursorRequestPayload, accessToken: string, modelId: string, convKey: string, metadata: ConversationRequestMetadata): Promise<Response>;
@@ -0,0 +1,119 @@
1
+ import { fromBinary } from "@bufbuild/protobuf";
2
+ import { AgentServerMessageSchema } from "../proto/agent_pb";
3
+ import { errorDetails, logPluginError } from "../logger";
4
+ import { updateStoredConversationAfterCompletion } from "./conversation-state";
5
+ import { startBridge } from "./bridge-session";
6
+ import { updateConversationCheckpoint, syncStoredBlobStore, } from "./state-sync";
7
+ import { computeUsage, createConnectFrameParser, createThinkingTagFilter, parseConnectEndStream, processServerMessage, scheduleBridgeEnd, } from "./stream-dispatch";
8
+ export async function handleNonStreamingResponse(payload, accessToken, modelId, convKey, metadata) {
9
+ const completionId = `chatcmpl-${crypto.randomUUID().replace(/-/g, "").slice(0, 28)}`;
10
+ const created = Math.floor(Date.now() / 1000);
11
+ const { text, usage, finishReason, toolCalls } = await collectFullResponse(payload, accessToken, modelId, convKey, metadata);
12
+ const message = finishReason === "tool_calls"
13
+ ? { role: "assistant", content: null, tool_calls: toolCalls }
14
+ : { role: "assistant", content: text };
15
+ return new Response(JSON.stringify({
16
+ id: completionId,
17
+ object: "chat.completion",
18
+ created,
19
+ model: modelId,
20
+ choices: [
21
+ {
22
+ index: 0,
23
+ message,
24
+ finish_reason: finishReason,
25
+ },
26
+ ],
27
+ usage,
28
+ }), { headers: { "Content-Type": "application/json" } });
29
+ }
30
+ async function collectFullResponse(payload, accessToken, modelId, convKey, metadata) {
31
+ const { promise, resolve, reject } = Promise.withResolvers();
32
+ let fullText = "";
33
+ let endStreamError = null;
34
+ const pendingToolCalls = [];
35
+ const { bridge, heartbeatTimer } = await startBridge(accessToken, payload.requestBytes);
36
+ const state = {
37
+ toolCallIndex: 0,
38
+ pendingExecs: [],
39
+ outputTokens: 0,
40
+ totalTokens: 0,
41
+ interactionToolArgsText: new Map(),
42
+ emittedToolCallIds: new Set(),
43
+ };
44
+ const tagFilter = createThinkingTagFilter();
45
+ bridge.onData(createConnectFrameParser((messageBytes) => {
46
+ try {
47
+ const serverMessage = fromBinary(AgentServerMessageSchema, messageBytes);
48
+ processServerMessage(serverMessage, payload.blobStore, payload.mcpTools, (data) => bridge.write(data), state, (text, isThinking) => {
49
+ if (isThinking)
50
+ return;
51
+ const { content } = tagFilter.process(text);
52
+ fullText += content;
53
+ }, (exec) => {
54
+ pendingToolCalls.push({
55
+ id: exec.toolCallId,
56
+ type: "function",
57
+ function: {
58
+ name: exec.toolName,
59
+ arguments: exec.decodedArgs,
60
+ },
61
+ });
62
+ scheduleBridgeEnd(bridge);
63
+ }, (checkpointBytes) => updateConversationCheckpoint(convKey, checkpointBytes), () => scheduleBridgeEnd(bridge), (info) => {
64
+ endStreamError = new Error(`Cursor returned unsupported ${info.category}: ${info.caseName}${info.detail ? ` (${info.detail})` : ""}`);
65
+ logPluginError("Closing non-streaming Cursor bridge after unsupported message", {
66
+ modelId,
67
+ convKey,
68
+ category: info.category,
69
+ caseName: info.caseName,
70
+ detail: info.detail,
71
+ });
72
+ scheduleBridgeEnd(bridge);
73
+ }, (info) => {
74
+ endStreamError = new Error(`Cursor requested unsupported exec type: ${info.execCase}`);
75
+ logPluginError("Closing non-streaming Cursor bridge after unsupported exec", {
76
+ modelId,
77
+ convKey,
78
+ execCase: info.execCase,
79
+ execId: info.execId,
80
+ execMsgId: info.execMsgId,
81
+ });
82
+ scheduleBridgeEnd(bridge);
83
+ });
84
+ }
85
+ catch {
86
+ // Skip unparseable messages.
87
+ }
88
+ }, (endStreamBytes) => {
89
+ endStreamError = parseConnectEndStream(endStreamBytes);
90
+ if (endStreamError) {
91
+ logPluginError("Cursor non-streaming response returned Connect end-stream error", {
92
+ modelId,
93
+ convKey,
94
+ ...errorDetails(endStreamError),
95
+ });
96
+ }
97
+ scheduleBridgeEnd(bridge);
98
+ }));
99
+ bridge.onClose(() => {
100
+ clearInterval(heartbeatTimer);
101
+ syncStoredBlobStore(convKey, payload.blobStore);
102
+ const flushed = tagFilter.flush();
103
+ fullText += flushed.content;
104
+ if (endStreamError) {
105
+ reject(endStreamError);
106
+ return;
107
+ }
108
+ if (pendingToolCalls.length === 0) {
109
+ updateStoredConversationAfterCompletion(convKey, metadata, fullText);
110
+ }
111
+ resolve({
112
+ text: fullText,
113
+ usage: computeUsage(state),
114
+ finishReason: pendingToolCalls.length > 0 ? "tool_calls" : "stop",
115
+ toolCalls: pendingToolCalls,
116
+ });
117
+ });
118
+ return promise;
119
+ }
@@ -0,0 +1,5 @@
1
+ import type { CursorSession } from "../cursor/bidi-session";
2
+ export declare function startBridge(accessToken: string, requestBytes: Uint8Array): Promise<{
3
+ bridge: CursorSession;
4
+ heartbeatTimer: NodeJS.Timeout;
5
+ }>;
@@ -0,0 +1,11 @@
1
+ import { createCursorSession } from "../cursor/bidi-session";
2
+ import { makeHeartbeatBytes } from "./stream-dispatch";
3
+ const HEARTBEAT_INTERVAL_MS = 5_000;
4
+ export async function startBridge(accessToken, requestBytes) {
5
+ const bridge = await createCursorSession({
6
+ accessToken,
7
+ initialRequestBytes: requestBytes,
8
+ });
9
+ const heartbeatTimer = setInterval(() => bridge.write(makeHeartbeatBytes()), HEARTBEAT_INTERVAL_MS);
10
+ return { bridge, heartbeatTimer };
11
+ }
@@ -0,0 +1,5 @@
1
+ import { type ToolResultInfo } from "../openai/messages";
2
+ import type { ConversationRequestMetadata } from "./conversation-meta";
3
+ import type { ActiveBridge, CursorRequestPayload } from "./types";
4
+ export declare function handleStreamingResponse(payload: CursorRequestPayload, accessToken: string, modelId: string, bridgeKey: string, convKey: string, metadata: ConversationRequestMetadata): Promise<Response>;
5
+ export declare function handleToolResultResume(active: ActiveBridge, toolResults: ToolResultInfo[], bridgeKey: string, convKey: string): Response;