opencode-cursor-proxy 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +139 -0
- package/README.zh-CN.md +136 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +2 -0
- package/dist/index.js.map +1 -0
- package/dist/lib/api/agent-service.d.ts +136 -0
- package/dist/lib/api/agent-service.js +938 -0
- package/dist/lib/api/agent-service.js.map +1 -0
- package/dist/lib/api/ai-service.d.ts +26 -0
- package/dist/lib/api/ai-service.js +38 -0
- package/dist/lib/api/ai-service.js.map +1 -0
- package/dist/lib/api/cursor-client.d.ts +119 -0
- package/dist/lib/api/cursor-client.js +511 -0
- package/dist/lib/api/cursor-client.js.map +1 -0
- package/dist/lib/api/cursor-models.d.ts +13 -0
- package/dist/lib/api/cursor-models.js +34 -0
- package/dist/lib/api/cursor-models.js.map +1 -0
- package/dist/lib/api/openai-compat.d.ts +10 -0
- package/dist/lib/api/openai-compat.js +262 -0
- package/dist/lib/api/openai-compat.js.map +1 -0
- package/dist/lib/api/proto/agent-messages.d.ts +25 -0
- package/dist/lib/api/proto/agent-messages.js +132 -0
- package/dist/lib/api/proto/agent-messages.js.map +1 -0
- package/dist/lib/api/proto/bidi.d.ts +17 -0
- package/dist/lib/api/proto/bidi.js +24 -0
- package/dist/lib/api/proto/bidi.js.map +1 -0
- package/dist/lib/api/proto/decoding.d.ts +19 -0
- package/dist/lib/api/proto/decoding.js +118 -0
- package/dist/lib/api/proto/decoding.js.map +1 -0
- package/dist/lib/api/proto/encoding.d.ts +64 -0
- package/dist/lib/api/proto/encoding.js +180 -0
- package/dist/lib/api/proto/encoding.js.map +1 -0
- package/dist/lib/api/proto/exec.d.ts +12 -0
- package/dist/lib/api/proto/exec.js +383 -0
- package/dist/lib/api/proto/exec.js.map +1 -0
- package/dist/lib/api/proto/index.d.ts +13 -0
- package/dist/lib/api/proto/index.js +10 -0
- package/dist/lib/api/proto/index.js.map +1 -0
- package/dist/lib/api/proto/interaction.d.ts +15 -0
- package/dist/lib/api/proto/interaction.js +99 -0
- package/dist/lib/api/proto/interaction.js.map +1 -0
- package/dist/lib/api/proto/kv.d.ts +52 -0
- package/dist/lib/api/proto/kv.js +156 -0
- package/dist/lib/api/proto/kv.js.map +1 -0
- package/dist/lib/api/proto/tool-calls.d.ts +9 -0
- package/dist/lib/api/proto/tool-calls.js +144 -0
- package/dist/lib/api/proto/tool-calls.js.map +1 -0
- package/dist/lib/api/proto/types.d.ts +201 -0
- package/dist/lib/api/proto/types.js +10 -0
- package/dist/lib/api/proto/types.js.map +1 -0
- package/dist/lib/auth/helpers.d.ts +40 -0
- package/dist/lib/auth/helpers.js +103 -0
- package/dist/lib/auth/helpers.js.map +1 -0
- package/dist/lib/auth/index.d.ts +7 -0
- package/dist/lib/auth/index.js +10 -0
- package/dist/lib/auth/index.js.map +1 -0
- package/dist/lib/auth/login.d.ts +55 -0
- package/dist/lib/auth/login.js +184 -0
- package/dist/lib/auth/login.js.map +1 -0
- package/dist/lib/config.d.ts +153 -0
- package/dist/lib/config.js +182 -0
- package/dist/lib/config.js.map +1 -0
- package/dist/lib/openai-compat/handler.d.ts +40 -0
- package/dist/lib/openai-compat/handler.js +808 -0
- package/dist/lib/openai-compat/handler.js.map +1 -0
- package/dist/lib/openai-compat/index.d.ts +9 -0
- package/dist/lib/openai-compat/index.js +13 -0
- package/dist/lib/openai-compat/index.js.map +1 -0
- package/dist/lib/openai-compat/types.d.ts +127 -0
- package/dist/lib/openai-compat/types.js +6 -0
- package/dist/lib/openai-compat/types.js.map +1 -0
- package/dist/lib/openai-compat/utils.d.ts +143 -0
- package/dist/lib/openai-compat/utils.js +348 -0
- package/dist/lib/openai-compat/utils.js.map +1 -0
- package/dist/lib/session-reuse.d.ts +88 -0
- package/dist/lib/session-reuse.js +198 -0
- package/dist/lib/session-reuse.js.map +1 -0
- package/dist/lib/storage.d.ts +55 -0
- package/dist/lib/storage.js +159 -0
- package/dist/lib/storage.js.map +1 -0
- package/dist/lib/utils/cache.d.ts +131 -0
- package/dist/lib/utils/cache.js +297 -0
- package/dist/lib/utils/cache.js.map +1 -0
- package/dist/lib/utils/fetch.d.ts +84 -0
- package/dist/lib/utils/fetch.js +261 -0
- package/dist/lib/utils/fetch.js.map +1 -0
- package/dist/lib/utils/index.d.ts +13 -0
- package/dist/lib/utils/index.js +22 -0
- package/dist/lib/utils/index.js.map +1 -0
- package/dist/lib/utils/jwt.d.ts +40 -0
- package/dist/lib/utils/jwt.js +102 -0
- package/dist/lib/utils/jwt.js.map +1 -0
- package/dist/lib/utils/logger.d.ts +107 -0
- package/dist/lib/utils/logger.js +227 -0
- package/dist/lib/utils/logger.js.map +1 -0
- package/dist/lib/utils/model-resolver.d.ts +49 -0
- package/dist/lib/utils/model-resolver.js +503 -0
- package/dist/lib/utils/model-resolver.js.map +1 -0
- package/dist/lib/utils/request-pool.d.ts +38 -0
- package/dist/lib/utils/request-pool.js +105 -0
- package/dist/lib/utils/request-pool.js.map +1 -0
- package/dist/lib/utils/request-transformer.d.ts +87 -0
- package/dist/lib/utils/request-transformer.js +154 -0
- package/dist/lib/utils/request-transformer.js.map +1 -0
- package/dist/lib/utils/tokenizer.d.ts +14 -0
- package/dist/lib/utils/tokenizer.js +76 -0
- package/dist/lib/utils/tokenizer.js.map +1 -0
- package/dist/plugin/index.d.ts +8 -0
- package/dist/plugin/index.js +9 -0
- package/dist/plugin/index.js.map +1 -0
- package/dist/plugin/plugin.d.ts +21 -0
- package/dist/plugin/plugin.js +309 -0
- package/dist/plugin/plugin.js.map +1 -0
- package/dist/plugin/types.d.ts +120 -0
- package/dist/plugin/types.js +7 -0
- package/dist/plugin/types.js.map +1 -0
- package/dist/server.d.ts +15 -0
- package/dist/server.js +95 -0
- package/dist/server.js.map +1 -0
- package/package.json +79 -0
|
@@ -0,0 +1,808 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI-Compatible Request Handler
|
|
3
|
+
*
|
|
4
|
+
* Core request handling logic that can be used by both:
|
|
5
|
+
* - Plugin's custom fetch function (serverless)
|
|
6
|
+
* - Standalone server (Bun.serve)
|
|
7
|
+
*/
|
|
8
|
+
import { createAgentServiceClient, AgentMode, } from "../api/agent-service";
|
|
9
|
+
import { CursorClient } from "../api/cursor-client";
|
|
10
|
+
import { listCursorModels } from "../api/cursor-models";
|
|
11
|
+
import { generateCompletionId, messagesToPrompt, messagesToPromptWithImages, mapExecRequestToTool, createErrorResponse, createSSEChunk, createSSEDone, makeStreamResponse, handleCORS, createStreamChunk, generateToolCallId, hasMultimodalContent, } from "./utils";
|
|
12
|
+
import { resolveModel, getModelOwner, supportsVision, getModelConfig, } from "../utils/model-resolver";
|
|
13
|
+
import { transformMessages, validateMessagesForModel, } from "../utils/request-transformer";
|
|
14
|
+
import { calculateTokenUsage } from "../utils/tokenizer";
|
|
15
|
+
import { cleanupExpiredSessions, collectToolMessages, createSessionId, findSessionIdInMessages, makeToolCallId, selectCallBase, } from "../session-reuse";
|
|
16
|
+
import { config, isSessionReuseEnabled, shouldLogFilteredIds } from "../config";
|
|
17
|
+
import { openaiLogger as logger, LRUCache, logRequestTransform, logRequest, logMultimodalContent, } from "../utils";
|
|
18
|
+
// --- Model Cache ---
|
|
19
|
+
const modelCache = new LRUCache({
|
|
20
|
+
max: 1,
|
|
21
|
+
ttl: config.cache.modelTtlMs,
|
|
22
|
+
});
|
|
23
|
+
const MODEL_CACHE_KEY = "cursor-models";
|
|
24
|
+
// --- Session Cache ---
|
|
25
|
+
const sessionMap = new LRUCache({
|
|
26
|
+
max: config.session.maxSessions,
|
|
27
|
+
ttl: config.session.timeoutMs,
|
|
28
|
+
onEvict: async (_key, session) => {
|
|
29
|
+
try {
|
|
30
|
+
await session.iterator.return?.();
|
|
31
|
+
}
|
|
32
|
+
catch {
|
|
33
|
+
// Ignore cleanup errors
|
|
34
|
+
}
|
|
35
|
+
},
|
|
36
|
+
});
|
|
37
|
+
/**
|
|
38
|
+
* Get cached models or fetch fresh ones
|
|
39
|
+
*/
|
|
40
|
+
async function getCachedModels(accessToken) {
|
|
41
|
+
const cached = modelCache.get(MODEL_CACHE_KEY);
|
|
42
|
+
if (cached) {
|
|
43
|
+
return cached;
|
|
44
|
+
}
|
|
45
|
+
const cursorClient = new CursorClient(accessToken);
|
|
46
|
+
const models = await listCursorModels(cursorClient);
|
|
47
|
+
modelCache.set(MODEL_CACHE_KEY, models);
|
|
48
|
+
return models;
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* Create a request handler function that can be used with custom fetch
|
|
52
|
+
*
|
|
53
|
+
* @example
|
|
54
|
+
* ```ts
|
|
55
|
+
* const handler = createRequestHandler({ accessToken: "..." });
|
|
56
|
+
*
|
|
57
|
+
* // Use in plugin
|
|
58
|
+
* return {
|
|
59
|
+
* fetch: (input, init) => handler(new Request(input, init)),
|
|
60
|
+
* };
|
|
61
|
+
*
|
|
62
|
+
* // Use in server
|
|
63
|
+
* Bun.serve({ fetch: handler });
|
|
64
|
+
* ```
|
|
65
|
+
*/
|
|
66
|
+
export function createRequestHandler(options) {
|
|
67
|
+
const { accessToken, log = () => { } } = options;
|
|
68
|
+
return async function handleRequest(req) {
|
|
69
|
+
const url = new URL(req.url);
|
|
70
|
+
const method = req.method;
|
|
71
|
+
// Normalize pathname - handle both /v1/... and /chat/completions
|
|
72
|
+
const pathname = url.pathname;
|
|
73
|
+
log(`[OpenAI Compat] ${method} ${pathname}`);
|
|
74
|
+
if (method === "OPTIONS") {
|
|
75
|
+
return handleCORS();
|
|
76
|
+
}
|
|
77
|
+
// Handle chat completions - match both /v1/chat/completions and /chat/completions
|
|
78
|
+
if ((pathname === "/v1/chat/completions" || pathname === "/chat/completions") && method === "POST") {
|
|
79
|
+
return handleChatCompletions(req, accessToken, log);
|
|
80
|
+
}
|
|
81
|
+
// Handle models - match both /v1/models and /models
|
|
82
|
+
if ((pathname === "/v1/models" || pathname === "/models") && method === "GET") {
|
|
83
|
+
return handleModels(accessToken, log);
|
|
84
|
+
}
|
|
85
|
+
if (pathname === "/health" || pathname === "/") {
|
|
86
|
+
return new Response(JSON.stringify({ status: "ok" }), {
|
|
87
|
+
headers: {
|
|
88
|
+
"Content-Type": "application/json",
|
|
89
|
+
"Access-Control-Allow-Origin": "*",
|
|
90
|
+
},
|
|
91
|
+
});
|
|
92
|
+
}
|
|
93
|
+
return createErrorResponse(`Unknown endpoint: ${method} ${url.pathname}`, "not_found", 404);
|
|
94
|
+
};
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* Handle /v1/chat/completions requests
|
|
98
|
+
*/
|
|
99
|
+
async function handleChatCompletions(req, accessToken, log) {
|
|
100
|
+
let body;
|
|
101
|
+
try {
|
|
102
|
+
body = await req.json();
|
|
103
|
+
}
|
|
104
|
+
catch {
|
|
105
|
+
return createErrorResponse("Invalid JSON body");
|
|
106
|
+
}
|
|
107
|
+
if (!body.messages || !Array.isArray(body.messages) || body.messages.length === 0) {
|
|
108
|
+
return createErrorResponse("messages is required and must be a non-empty array");
|
|
109
|
+
}
|
|
110
|
+
// Resolve model name to internal model ID
|
|
111
|
+
let model;
|
|
112
|
+
try {
|
|
113
|
+
const models = await getCachedModels(accessToken);
|
|
114
|
+
model = resolveModel(body.model ?? "auto", models);
|
|
115
|
+
log(`[OpenAI Compat] Resolved model "${body.model ?? "auto"}" to "${model}"`);
|
|
116
|
+
}
|
|
117
|
+
catch (err) {
|
|
118
|
+
log("[OpenAI Compat] Failed to fetch models, using requested model directly:", err);
|
|
119
|
+
model = body.model ?? "default";
|
|
120
|
+
}
|
|
121
|
+
// Get model configuration
|
|
122
|
+
const modelConfig = getModelConfig(model);
|
|
123
|
+
const modelSupportsVision = supportsVision(model);
|
|
124
|
+
// Transform messages: filter item_reference, strip IDs
|
|
125
|
+
const transformResult = transformMessages(body.messages, { logStats: shouldLogFilteredIds() });
|
|
126
|
+
const transformedMessages = transformResult.messages;
|
|
127
|
+
// Log transformation statistics if enabled
|
|
128
|
+
if (transformResult.stats.itemReferencesFiltered > 0 || transformResult.stats.idsStripped > 0) {
|
|
129
|
+
logRequestTransform(transformResult.stats);
|
|
130
|
+
}
|
|
131
|
+
// Validate messages for model capabilities (check for images with non-vision models)
|
|
132
|
+
const validation = validateMessagesForModel(transformedMessages, modelSupportsVision);
|
|
133
|
+
if (!validation.valid) {
|
|
134
|
+
for (const warning of validation.warnings) {
|
|
135
|
+
log(`[OpenAI Compat] Warning: ${warning}`);
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
// Log multimodal content detection
|
|
139
|
+
transformedMessages.forEach((msg, i) => {
|
|
140
|
+
if (msg && hasMultimodalContent(msg.content)) {
|
|
141
|
+
const content = msg.content;
|
|
142
|
+
const imageCount = Array.isArray(content)
|
|
143
|
+
? content.filter((p) => p.type === "image_url").length
|
|
144
|
+
: 0;
|
|
145
|
+
const hasBase64 = Array.isArray(content)
|
|
146
|
+
? content.some((p) => p.type === "image_url" && p.image_url.url.startsWith("data:"))
|
|
147
|
+
: false;
|
|
148
|
+
logMultimodalContent({
|
|
149
|
+
messageIndex: i,
|
|
150
|
+
imageCount,
|
|
151
|
+
hasBase64,
|
|
152
|
+
modelSupportsVision,
|
|
153
|
+
});
|
|
154
|
+
}
|
|
155
|
+
});
|
|
156
|
+
// Convert messages to prompt with multimodal handling
|
|
157
|
+
const promptResult = messagesToPromptWithImages(transformedMessages, {
|
|
158
|
+
supportsVision: modelSupportsVision,
|
|
159
|
+
includeImageReferences: true,
|
|
160
|
+
});
|
|
161
|
+
const prompt = promptResult.prompt;
|
|
162
|
+
// Log request details
|
|
163
|
+
const stream = body.stream ?? false;
|
|
164
|
+
const tools = body.tools;
|
|
165
|
+
const toolsProvided = tools && tools.length > 0;
|
|
166
|
+
logRequest("POST", "/v1/chat/completions", {
|
|
167
|
+
model,
|
|
168
|
+
messageCount: transformedMessages.length,
|
|
169
|
+
hasTools: toolsProvided,
|
|
170
|
+
stream,
|
|
171
|
+
});
|
|
172
|
+
// Log tool call status for debugging
|
|
173
|
+
const toolCallCount = transformedMessages
|
|
174
|
+
.filter(m => m.role === "assistant" && m.tool_calls)
|
|
175
|
+
.reduce((acc, m) => acc + (m.tool_calls?.length ?? 0), 0);
|
|
176
|
+
const toolResultCount = transformedMessages
|
|
177
|
+
.filter(m => m.role === "tool").length;
|
|
178
|
+
if (toolCallCount > 0) {
|
|
179
|
+
log(`[OpenAI Compat] ${toolResultCount}/${toolCallCount} tool calls have results, passing ${tools?.length ?? 0} tools`);
|
|
180
|
+
}
|
|
181
|
+
const client = createAgentServiceClient(accessToken);
|
|
182
|
+
const completionId = generateCompletionId();
|
|
183
|
+
const created = Math.floor(Date.now() / 1000);
|
|
184
|
+
if (stream) {
|
|
185
|
+
return streamChatCompletion({
|
|
186
|
+
client,
|
|
187
|
+
prompt,
|
|
188
|
+
model,
|
|
189
|
+
tools,
|
|
190
|
+
toolsProvided: toolsProvided ?? false,
|
|
191
|
+
messages: transformedMessages,
|
|
192
|
+
completionId,
|
|
193
|
+
created,
|
|
194
|
+
log,
|
|
195
|
+
});
|
|
196
|
+
}
|
|
197
|
+
// Non-streaming response
|
|
198
|
+
try {
|
|
199
|
+
const content = await client.chat({ message: prompt, model, mode: AgentMode.AGENT, tools });
|
|
200
|
+
const usage = calculateTokenUsage(prompt, content, model);
|
|
201
|
+
return new Response(JSON.stringify({
|
|
202
|
+
id: completionId,
|
|
203
|
+
object: "chat.completion",
|
|
204
|
+
created,
|
|
205
|
+
model,
|
|
206
|
+
choices: [{
|
|
207
|
+
index: 0,
|
|
208
|
+
message: { role: "assistant", content },
|
|
209
|
+
finish_reason: "stop",
|
|
210
|
+
}],
|
|
211
|
+
usage,
|
|
212
|
+
}), {
|
|
213
|
+
headers: {
|
|
214
|
+
"Content-Type": "application/json",
|
|
215
|
+
"Access-Control-Allow-Origin": "*",
|
|
216
|
+
},
|
|
217
|
+
});
|
|
218
|
+
}
|
|
219
|
+
catch (err) {
|
|
220
|
+
const message = err instanceof Error ? err.message : "Unknown error";
|
|
221
|
+
return createErrorResponse(message, "server_error", 500);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
async function streamChatCompletion(params) {
|
|
225
|
+
const { client, prompt, model, tools, toolsProvided, messages, completionId, created, log } = params;
|
|
226
|
+
// Route to session reuse if:
|
|
227
|
+
// 1. Tools are provided (new request that may result in tool calls), OR
|
|
228
|
+
// 2. Messages contain tool results (continuation with tool results)
|
|
229
|
+
const hasToolMessages = messages.some(m => m.role === "tool" && m.tool_call_id);
|
|
230
|
+
const shouldUseSessionReuse = isSessionReuseEnabled() && (toolsProvided || hasToolMessages);
|
|
231
|
+
if (shouldUseSessionReuse) {
|
|
232
|
+
return streamChatCompletionWithSessionReuse({
|
|
233
|
+
client,
|
|
234
|
+
prompt,
|
|
235
|
+
model,
|
|
236
|
+
tools,
|
|
237
|
+
toolsProvided,
|
|
238
|
+
messages,
|
|
239
|
+
completionId,
|
|
240
|
+
created,
|
|
241
|
+
log,
|
|
242
|
+
});
|
|
243
|
+
}
|
|
244
|
+
const encoder = new TextEncoder();
|
|
245
|
+
let isClosed = false;
|
|
246
|
+
let mcpToolCallIndex = 0;
|
|
247
|
+
let pendingEditToolCall = null;
|
|
248
|
+
let accumulatedContent = "";
|
|
249
|
+
let isInThinkingBlock = false;
|
|
250
|
+
const readable = new ReadableStream({
|
|
251
|
+
async start(controller) {
|
|
252
|
+
try {
|
|
253
|
+
// Send initial chunk with role
|
|
254
|
+
controller.enqueue(encoder.encode(createSSEChunk(createStreamChunk(completionId, model, created, { role: "assistant" }))));
|
|
255
|
+
// Stream content
|
|
256
|
+
for await (const chunk of client.chatStream({ message: prompt, model, mode: AgentMode.AGENT, tools })) {
|
|
257
|
+
if (isClosed)
|
|
258
|
+
break;
|
|
259
|
+
if (chunk.type === "text" || chunk.type === "token") {
|
|
260
|
+
if (chunk.content) {
|
|
261
|
+
// Close thinking block if we were in one and now receiving text
|
|
262
|
+
let prefix = "";
|
|
263
|
+
if (isInThinkingBlock) {
|
|
264
|
+
prefix = "\n</think>\n\n";
|
|
265
|
+
isInThinkingBlock = false;
|
|
266
|
+
}
|
|
267
|
+
const outputContent = prefix + chunk.content;
|
|
268
|
+
accumulatedContent += outputContent;
|
|
269
|
+
controller.enqueue(encoder.encode(createSSEChunk(createStreamChunk(completionId, model, created, { content: outputContent }))));
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
else if (chunk.type === "thinking") {
|
|
273
|
+
// Handle thinking/reasoning content from thinking models
|
|
274
|
+
// Wrap in <think> tags so clients can identify and style reasoning content
|
|
275
|
+
if (chunk.content) {
|
|
276
|
+
let prefix = "";
|
|
277
|
+
if (!isInThinkingBlock) {
|
|
278
|
+
prefix = "<think>\n";
|
|
279
|
+
isInThinkingBlock = true;
|
|
280
|
+
}
|
|
281
|
+
const outputContent = prefix + chunk.content;
|
|
282
|
+
accumulatedContent += outputContent;
|
|
283
|
+
controller.enqueue(encoder.encode(createSSEChunk(createStreamChunk(completionId, model, created, { content: outputContent }))));
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
else if (chunk.type === "kv_blob_assistant" && chunk.blobContent) {
|
|
287
|
+
log("[OpenAI Compat] Emitting assistant content from KV blob");
|
|
288
|
+
accumulatedContent += chunk.blobContent;
|
|
289
|
+
controller.enqueue(encoder.encode(createSSEChunk(createStreamChunk(completionId, model, created, { content: chunk.blobContent }))));
|
|
290
|
+
}
|
|
291
|
+
else if (chunk.type === "tool_call_started" && chunk.toolCall) {
|
|
292
|
+
// Track file-modifying tool calls
|
|
293
|
+
if (chunk.toolCall.name === "edit" || chunk.toolCall.name === "apply_diff") {
|
|
294
|
+
pendingEditToolCall = chunk.toolCall.callId;
|
|
295
|
+
log("[OpenAI Compat] File-modifying tool started, will handle internal read locally");
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
else if (chunk.type === "exec_request" && chunk.execRequest) {
|
|
299
|
+
const execReq = chunk.execRequest;
|
|
300
|
+
// Skip context requests
|
|
301
|
+
if (execReq.type === "request_context") {
|
|
302
|
+
continue;
|
|
303
|
+
}
|
|
304
|
+
// Handle internal reads for edit flows
|
|
305
|
+
if (execReq.type === "read" && pendingEditToolCall) {
|
|
306
|
+
log("[OpenAI Compat] Handling internal read for edit flow locally");
|
|
307
|
+
try {
|
|
308
|
+
const file = Bun.file(execReq.path);
|
|
309
|
+
const content = await file.text();
|
|
310
|
+
const stats = await file.stat();
|
|
311
|
+
const totalLines = content.split("\n").length;
|
|
312
|
+
await client.sendReadResult(execReq.id, execReq.execId, content, execReq.path, totalLines, BigInt(stats.size), false);
|
|
313
|
+
log("[OpenAI Compat] Internal read completed for edit flow");
|
|
314
|
+
}
|
|
315
|
+
catch (err) {
|
|
316
|
+
const message = err instanceof Error ? err.message : "Unknown error";
|
|
317
|
+
await client.sendReadResult(execReq.id, execReq.execId, `Error: ${message}`, execReq.path, 0, 0n, false);
|
|
318
|
+
}
|
|
319
|
+
continue;
|
|
320
|
+
}
|
|
321
|
+
// Emit exec requests as OpenAI tool calls when tools are provided
|
|
322
|
+
if (toolsProvided) {
|
|
323
|
+
const { toolName, toolArgs } = mapExecRequestToTool(execReq);
|
|
324
|
+
if (toolName && toolArgs) {
|
|
325
|
+
const currentIndex = mcpToolCallIndex++;
|
|
326
|
+
const openaiToolCallId = generateToolCallId(completionId, currentIndex);
|
|
327
|
+
log(`[OpenAI Compat] Emitting tool call: ${toolName} (type: ${execReq.type})`);
|
|
328
|
+
// Emit the tool call
|
|
329
|
+
const toolCallChunk = {
|
|
330
|
+
id: completionId,
|
|
331
|
+
object: "chat.completion.chunk",
|
|
332
|
+
created,
|
|
333
|
+
model,
|
|
334
|
+
choices: [{
|
|
335
|
+
index: 0,
|
|
336
|
+
delta: {
|
|
337
|
+
tool_calls: [{
|
|
338
|
+
index: currentIndex,
|
|
339
|
+
id: openaiToolCallId,
|
|
340
|
+
type: "function",
|
|
341
|
+
function: {
|
|
342
|
+
name: toolName,
|
|
343
|
+
arguments: JSON.stringify(toolArgs),
|
|
344
|
+
},
|
|
345
|
+
}],
|
|
346
|
+
},
|
|
347
|
+
finish_reason: null,
|
|
348
|
+
}],
|
|
349
|
+
};
|
|
350
|
+
controller.enqueue(encoder.encode(createSSEChunk(toolCallChunk)));
|
|
351
|
+
// Emit finish with tool_calls reason
|
|
352
|
+
controller.enqueue(encoder.encode(createSSEChunk(createStreamChunk(completionId, model, created, {}, "tool_calls"))));
|
|
353
|
+
controller.enqueue(encoder.encode(createSSEDone()));
|
|
354
|
+
isClosed = true;
|
|
355
|
+
controller.close();
|
|
356
|
+
return;
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
// Execute built-in tools internally when no tools provided
|
|
360
|
+
if (!toolsProvided && execReq.type !== "mcp") {
|
|
361
|
+
await executeBuiltinTool(client, execReq, log);
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
else if (chunk.type === "error") {
|
|
365
|
+
controller.enqueue(encoder.encode(createSSEChunk({
|
|
366
|
+
error: { message: chunk.error ?? "Unknown error", type: "server_error" },
|
|
367
|
+
})));
|
|
368
|
+
break;
|
|
369
|
+
}
|
|
370
|
+
else if (chunk.type === "done") {
|
|
371
|
+
break;
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
// Send final chunk with usage
|
|
375
|
+
if (!isClosed) {
|
|
376
|
+
const usage = calculateTokenUsage(prompt, accumulatedContent, model);
|
|
377
|
+
const finalChunk = {
|
|
378
|
+
id: completionId,
|
|
379
|
+
object: "chat.completion.chunk",
|
|
380
|
+
created,
|
|
381
|
+
model,
|
|
382
|
+
choices: [{
|
|
383
|
+
index: 0,
|
|
384
|
+
delta: {},
|
|
385
|
+
finish_reason: "stop",
|
|
386
|
+
}],
|
|
387
|
+
usage,
|
|
388
|
+
};
|
|
389
|
+
controller.enqueue(encoder.encode(createSSEChunk(finalChunk)));
|
|
390
|
+
controller.enqueue(encoder.encode(createSSEDone()));
|
|
391
|
+
controller.close();
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
catch (err) {
|
|
395
|
+
if (!isClosed) {
|
|
396
|
+
try {
|
|
397
|
+
controller.error(err);
|
|
398
|
+
}
|
|
399
|
+
catch {
|
|
400
|
+
// Controller may already be closed
|
|
401
|
+
}
|
|
402
|
+
}
|
|
403
|
+
}
|
|
404
|
+
},
|
|
405
|
+
cancel() {
|
|
406
|
+
isClosed = true;
|
|
407
|
+
},
|
|
408
|
+
});
|
|
409
|
+
return makeStreamResponse(readable);
|
|
410
|
+
}
|
|
411
|
+
async function streamChatCompletionWithSessionReuse(params) {
|
|
412
|
+
const { client, prompt, model, tools, messages, completionId, created, log } = params;
|
|
413
|
+
// LRUCache automatically handles expiration, just prune occasionally
|
|
414
|
+
sessionMap.prune();
|
|
415
|
+
const encoder = new TextEncoder();
|
|
416
|
+
let isClosed = false;
|
|
417
|
+
let mcpToolCallIndex = 0;
|
|
418
|
+
let pendingEditToolCall = null;
|
|
419
|
+
let accumulatedContent = "";
|
|
420
|
+
let isInThinkingBlock = false;
|
|
421
|
+
// ARCHITECTURAL NOTE: We always start fresh requests when tool results arrive.
|
|
422
|
+
// See session-reuse.ts for detailed explanation of why true session reuse isn't possible.
|
|
423
|
+
// The session infrastructure below is retained for internal read handling and future improvements.
|
|
424
|
+
const existingSessionId = findSessionIdInMessages(messages);
|
|
425
|
+
const toolMessages = collectToolMessages(messages);
|
|
426
|
+
log(`[Session Reuse] existingSessionId=${existingSessionId ?? "null"}, toolMessages.length=${toolMessages.length}`);
|
|
427
|
+
if (toolMessages.length > 0) {
|
|
428
|
+
log(`[Session Reuse] toolMessages tool_call_ids: ${toolMessages.map(m => m.tool_call_id).join(", ")}`);
|
|
429
|
+
}
|
|
430
|
+
let sessionId = existingSessionId ?? createSessionId();
|
|
431
|
+
let session = existingSessionId ? sessionMap.get(existingSessionId) : undefined;
|
|
432
|
+
log(`[Session Reuse] sessionId=${sessionId}, session found=${!!session}, sessionMap.size=${sessionMap.size}`);
|
|
433
|
+
// IMPORTANT: bidiAppend tool results don't trigger server continuation - start fresh request instead
|
|
434
|
+
if (toolMessages.length > 0 && session) {
|
|
435
|
+
log(`[Session Reuse] Tool messages present - closing old session ${sessionId} and starting fresh`);
|
|
436
|
+
try {
|
|
437
|
+
await session.iterator.return?.();
|
|
438
|
+
}
|
|
439
|
+
catch (err) {
|
|
440
|
+
log("[Session Reuse] Failed to close prior session iterator:", err);
|
|
441
|
+
}
|
|
442
|
+
sessionMap.delete(sessionId);
|
|
443
|
+
session = undefined;
|
|
444
|
+
sessionId = createSessionId();
|
|
445
|
+
}
|
|
446
|
+
if (!session) {
|
|
447
|
+
log(`[Session Reuse] Creating NEW session ${sessionId}`);
|
|
448
|
+
const iterator = client
|
|
449
|
+
.chatStream({ message: prompt, model, mode: AgentMode.AGENT, tools })[Symbol.asyncIterator]();
|
|
450
|
+
session = {
|
|
451
|
+
id: sessionId,
|
|
452
|
+
iterator,
|
|
453
|
+
pendingExecs: new Map(),
|
|
454
|
+
createdAt: Date.now(),
|
|
455
|
+
lastActivity: Date.now(),
|
|
456
|
+
state: "running",
|
|
457
|
+
client: {
|
|
458
|
+
sendToolResult: client.sendToolResult.bind(client),
|
|
459
|
+
sendShellResult: client.sendShellResult.bind(client),
|
|
460
|
+
sendReadResult: client.sendReadResult.bind(client),
|
|
461
|
+
sendLsResult: client.sendLsResult.bind(client),
|
|
462
|
+
sendGrepResult: client.sendGrepResult.bind(client),
|
|
463
|
+
sendWriteResult: client.sendWriteResult.bind(client),
|
|
464
|
+
sendResumeAction: client.sendResumeAction.bind(client),
|
|
465
|
+
},
|
|
466
|
+
};
|
|
467
|
+
sessionMap.set(sessionId, session);
|
|
468
|
+
}
|
|
469
|
+
else {
|
|
470
|
+
sessionId = session.id;
|
|
471
|
+
}
|
|
472
|
+
const activeSession = session;
|
|
473
|
+
activeSession.lastActivity = Date.now();
|
|
474
|
+
const readable = new ReadableStream({
|
|
475
|
+
async start(controller) {
|
|
476
|
+
try {
|
|
477
|
+
controller.enqueue(encoder.encode(createSSEChunk(createStreamChunk(completionId, model, created, { role: "assistant" }))));
|
|
478
|
+
while (!isClosed) {
|
|
479
|
+
log(`[OpenAI Compat] Waiting for next chunk from iterator for session ${sessionId}...`);
|
|
480
|
+
const { done, value } = await activeSession.iterator.next();
|
|
481
|
+
log(`[OpenAI Compat] Iterator returned: done=${done}, value type=${value?.type || 'N/A'}`);
|
|
482
|
+
if (done) {
|
|
483
|
+
sessionMap.delete(sessionId);
|
|
484
|
+
const usage = calculateTokenUsage(prompt, accumulatedContent, model);
|
|
485
|
+
const finalChunk = {
|
|
486
|
+
id: completionId,
|
|
487
|
+
object: "chat.completion.chunk",
|
|
488
|
+
created,
|
|
489
|
+
model,
|
|
490
|
+
choices: [
|
|
491
|
+
{
|
|
492
|
+
index: 0,
|
|
493
|
+
delta: {},
|
|
494
|
+
finish_reason: "stop",
|
|
495
|
+
},
|
|
496
|
+
],
|
|
497
|
+
usage,
|
|
498
|
+
};
|
|
499
|
+
controller.enqueue(encoder.encode(createSSEChunk(finalChunk)));
|
|
500
|
+
controller.enqueue(encoder.encode(createSSEDone()));
|
|
501
|
+
controller.close();
|
|
502
|
+
return;
|
|
503
|
+
}
|
|
504
|
+
const chunk = value;
|
|
505
|
+
if (chunk.type === "text" || chunk.type === "token") {
|
|
506
|
+
if (chunk.content) {
|
|
507
|
+
// Close thinking block if we were in one and now receiving text
|
|
508
|
+
let prefix = "";
|
|
509
|
+
if (isInThinkingBlock) {
|
|
510
|
+
prefix = "\n</think>\n\n";
|
|
511
|
+
isInThinkingBlock = false;
|
|
512
|
+
}
|
|
513
|
+
const outputContent = prefix + chunk.content;
|
|
514
|
+
accumulatedContent += outputContent;
|
|
515
|
+
activeSession.lastActivity = Date.now();
|
|
516
|
+
controller.enqueue(encoder.encode(createSSEChunk(createStreamChunk(completionId, model, created, { content: outputContent }))));
|
|
517
|
+
}
|
|
518
|
+
continue;
|
|
519
|
+
}
|
|
520
|
+
if (chunk.type === "thinking") {
|
|
521
|
+
// Handle thinking/reasoning content from thinking models
|
|
522
|
+
// Wrap in <think> tags so clients can identify and style reasoning content
|
|
523
|
+
if (chunk.content) {
|
|
524
|
+
let prefix = "";
|
|
525
|
+
if (!isInThinkingBlock) {
|
|
526
|
+
prefix = "<think>\n";
|
|
527
|
+
isInThinkingBlock = true;
|
|
528
|
+
}
|
|
529
|
+
const outputContent = prefix + chunk.content;
|
|
530
|
+
accumulatedContent += outputContent;
|
|
531
|
+
activeSession.lastActivity = Date.now();
|
|
532
|
+
controller.enqueue(encoder.encode(createSSEChunk(createStreamChunk(completionId, model, created, { content: outputContent }))));
|
|
533
|
+
}
|
|
534
|
+
continue;
|
|
535
|
+
}
|
|
536
|
+
if (chunk.type === "kv_blob_assistant" && chunk.blobContent) {
|
|
537
|
+
accumulatedContent += chunk.blobContent;
|
|
538
|
+
session.lastActivity = Date.now();
|
|
539
|
+
controller.enqueue(encoder.encode(createSSEChunk(createStreamChunk(completionId, model, created, { content: chunk.blobContent }))));
|
|
540
|
+
continue;
|
|
541
|
+
}
|
|
542
|
+
if (chunk.type === "tool_call_started" && chunk.toolCall) {
|
|
543
|
+
if (chunk.toolCall.name === "edit" || chunk.toolCall.name === "apply_diff") {
|
|
544
|
+
pendingEditToolCall = chunk.toolCall.callId;
|
|
545
|
+
}
|
|
546
|
+
continue;
|
|
547
|
+
}
|
|
548
|
+
if (chunk.type === "exec_request" && chunk.execRequest) {
|
|
549
|
+
const execReq = chunk.execRequest;
|
|
550
|
+
if (execReq.type === "request_context") {
|
|
551
|
+
continue;
|
|
552
|
+
}
|
|
553
|
+
if (execReq.type === "read" && pendingEditToolCall) {
|
|
554
|
+
try {
|
|
555
|
+
const file = Bun.file(execReq.path);
|
|
556
|
+
const content = await file.text();
|
|
557
|
+
const stats = await file.stat();
|
|
558
|
+
const totalLines = content.split("\n").length;
|
|
559
|
+
await client.sendReadResult(execReq.id, execReq.execId, content, execReq.path, totalLines, BigInt(stats.size), false);
|
|
560
|
+
}
|
|
561
|
+
catch (err) {
|
|
562
|
+
const message = err instanceof Error ? err.message : "Unknown error";
|
|
563
|
+
await client.sendReadResult(execReq.id, execReq.execId, `Error: ${message}`, execReq.path, 0, 0n, false);
|
|
564
|
+
}
|
|
565
|
+
try {
|
|
566
|
+
await client.sendResumeAction();
|
|
567
|
+
}
|
|
568
|
+
catch (err) {
|
|
569
|
+
log("[OpenAI Compat] Failed to send ResumeAction:", err);
|
|
570
|
+
}
|
|
571
|
+
continue;
|
|
572
|
+
}
|
|
573
|
+
const { toolName, toolArgs } = mapExecRequestToTool(execReq);
|
|
574
|
+
if (toolName && toolArgs) {
|
|
575
|
+
const currentIndex = mcpToolCallIndex++;
|
|
576
|
+
const callBase = selectCallBase(execReq);
|
|
577
|
+
const toolCallId = makeToolCallId(sessionId, callBase);
|
|
578
|
+
log(`[Session ${sessionId}] Storing pendingExec: toolCallId=${toolCallId}, callBase=${callBase}, execReq.type=${execReq.type}, execReq.execId=${execReq.execId ?? "undefined"}, execReq.id=${execReq.id ?? "undefined"}`);
|
|
579
|
+
activeSession.pendingExecs.set(toolCallId, execReq);
|
|
580
|
+
activeSession.state = "waiting_tool";
|
|
581
|
+
activeSession.lastActivity = Date.now();
|
|
582
|
+
const toolCallChunk = {
|
|
583
|
+
id: completionId,
|
|
584
|
+
object: "chat.completion.chunk",
|
|
585
|
+
created,
|
|
586
|
+
model,
|
|
587
|
+
choices: [
|
|
588
|
+
{
|
|
589
|
+
index: 0,
|
|
590
|
+
delta: {
|
|
591
|
+
tool_calls: [
|
|
592
|
+
{
|
|
593
|
+
index: currentIndex,
|
|
594
|
+
id: toolCallId,
|
|
595
|
+
type: "function",
|
|
596
|
+
function: {
|
|
597
|
+
name: toolName,
|
|
598
|
+
arguments: JSON.stringify(toolArgs),
|
|
599
|
+
},
|
|
600
|
+
},
|
|
601
|
+
],
|
|
602
|
+
},
|
|
603
|
+
finish_reason: null,
|
|
604
|
+
},
|
|
605
|
+
],
|
|
606
|
+
};
|
|
607
|
+
controller.enqueue(encoder.encode(createSSEChunk(toolCallChunk)));
|
|
608
|
+
controller.enqueue(encoder.encode(createSSEChunk(createStreamChunk(completionId, model, created, {}, "tool_calls"))));
|
|
609
|
+
controller.enqueue(encoder.encode(createSSEDone()));
|
|
610
|
+
isClosed = true;
|
|
611
|
+
controller.close();
|
|
612
|
+
return;
|
|
613
|
+
}
|
|
614
|
+
await executeBuiltinTool(client, execReq, log);
|
|
615
|
+
continue;
|
|
616
|
+
}
|
|
617
|
+
if (chunk.type === "error") {
|
|
618
|
+
sessionMap.delete(sessionId);
|
|
619
|
+
controller.enqueue(encoder.encode(createSSEChunk({
|
|
620
|
+
error: { message: chunk.error ?? "Unknown error", type: "server_error" },
|
|
621
|
+
})));
|
|
622
|
+
controller.enqueue(encoder.encode(createSSEDone()));
|
|
623
|
+
controller.close();
|
|
624
|
+
return;
|
|
625
|
+
}
|
|
626
|
+
if (chunk.type === "done") {
|
|
627
|
+
sessionMap.delete(sessionId);
|
|
628
|
+
break;
|
|
629
|
+
}
|
|
630
|
+
}
|
|
631
|
+
const usage = calculateTokenUsage(prompt, accumulatedContent, model);
|
|
632
|
+
const finalChunk = {
|
|
633
|
+
id: completionId,
|
|
634
|
+
object: "chat.completion.chunk",
|
|
635
|
+
created,
|
|
636
|
+
model,
|
|
637
|
+
choices: [
|
|
638
|
+
{
|
|
639
|
+
index: 0,
|
|
640
|
+
delta: {},
|
|
641
|
+
finish_reason: "stop",
|
|
642
|
+
},
|
|
643
|
+
],
|
|
644
|
+
usage,
|
|
645
|
+
};
|
|
646
|
+
controller.enqueue(encoder.encode(createSSEChunk(finalChunk)));
|
|
647
|
+
controller.enqueue(encoder.encode(createSSEDone()));
|
|
648
|
+
controller.close();
|
|
649
|
+
}
|
|
650
|
+
catch (err) {
|
|
651
|
+
if (!isClosed) {
|
|
652
|
+
try {
|
|
653
|
+
controller.error(err);
|
|
654
|
+
}
|
|
655
|
+
catch (innerErr) {
|
|
656
|
+
log("[OpenAI Compat] Failed to signal stream error:", innerErr);
|
|
657
|
+
}
|
|
658
|
+
}
|
|
659
|
+
}
|
|
660
|
+
},
|
|
661
|
+
cancel() {
|
|
662
|
+
isClosed = true;
|
|
663
|
+
},
|
|
664
|
+
});
|
|
665
|
+
return makeStreamResponse(readable);
|
|
666
|
+
}
|
|
667
|
+
/**
|
|
668
|
+
* Execute built-in tool internally
|
|
669
|
+
*/
|
|
670
|
+
async function executeBuiltinTool(client, execReq, log) {
|
|
671
|
+
log(`[OpenAI Compat] Executing built-in tool internally: ${execReq.type}`);
|
|
672
|
+
if (execReq.type === "shell") {
|
|
673
|
+
const cwd = execReq.cwd || process.cwd();
|
|
674
|
+
const startTime = Date.now();
|
|
675
|
+
try {
|
|
676
|
+
const proc = Bun.spawn(["sh", "-c", execReq.command], { cwd, stdout: "pipe", stderr: "pipe" });
|
|
677
|
+
const stdout = await new Response(proc.stdout).text();
|
|
678
|
+
const stderr = await new Response(proc.stderr).text();
|
|
679
|
+
const exitCode = await proc.exited;
|
|
680
|
+
const executionTimeMs = Date.now() - startTime;
|
|
681
|
+
await client.sendShellResult(execReq.id, execReq.execId, execReq.command, cwd, stdout, stderr, exitCode, executionTimeMs);
|
|
682
|
+
}
|
|
683
|
+
catch (err) {
|
|
684
|
+
const message = err instanceof Error ? err.message : "Unknown error";
|
|
685
|
+
const executionTimeMs = Date.now() - startTime;
|
|
686
|
+
await client.sendShellResult(execReq.id, execReq.execId, execReq.command, cwd, "", `Error: ${message}`, 1, executionTimeMs);
|
|
687
|
+
}
|
|
688
|
+
}
|
|
689
|
+
else if (execReq.type === "read") {
|
|
690
|
+
try {
|
|
691
|
+
const file = Bun.file(execReq.path);
|
|
692
|
+
const content = await file.text();
|
|
693
|
+
const stats = await file.stat();
|
|
694
|
+
const totalLines = content.split("\n").length;
|
|
695
|
+
await client.sendReadResult(execReq.id, execReq.execId, content, execReq.path, totalLines, BigInt(stats.size), false);
|
|
696
|
+
}
|
|
697
|
+
catch (err) {
|
|
698
|
+
const message = err instanceof Error ? err.message : "Unknown error";
|
|
699
|
+
await client.sendReadResult(execReq.id, execReq.execId, `Error: ${message}`, execReq.path, 0, 0n, false);
|
|
700
|
+
}
|
|
701
|
+
}
|
|
702
|
+
else if (execReq.type === "ls") {
|
|
703
|
+
try {
|
|
704
|
+
const { readdir } = await import("node:fs/promises");
|
|
705
|
+
const entries = await readdir(execReq.path, { withFileTypes: true });
|
|
706
|
+
const files = entries.map(e => e.isDirectory() ? `${e.name}/` : e.name).join("\n");
|
|
707
|
+
await client.sendLsResult(execReq.id, execReq.execId, files);
|
|
708
|
+
}
|
|
709
|
+
catch (err) {
|
|
710
|
+
const message = err instanceof Error ? err.message : "Unknown error";
|
|
711
|
+
await client.sendLsResult(execReq.id, execReq.execId, `Error: ${message}`);
|
|
712
|
+
}
|
|
713
|
+
}
|
|
714
|
+
else if (execReq.type === "grep") {
|
|
715
|
+
try {
|
|
716
|
+
let files = [];
|
|
717
|
+
if (execReq.glob) {
|
|
718
|
+
const globber = new Bun.Glob(execReq.glob);
|
|
719
|
+
files = Array.from(globber.scanSync(execReq.path || process.cwd()));
|
|
720
|
+
}
|
|
721
|
+
else if (execReq.pattern) {
|
|
722
|
+
const rg = Bun.spawn(["rg", "-l", execReq.pattern, execReq.path || process.cwd()], { stdout: "pipe", stderr: "pipe" });
|
|
723
|
+
const stdout = await new Response(rg.stdout).text();
|
|
724
|
+
files = stdout.split("\n").filter(f => f.length > 0);
|
|
725
|
+
}
|
|
726
|
+
await client.sendGrepResult(execReq.id, execReq.execId, execReq.pattern || execReq.glob || "", execReq.path || process.cwd(), files);
|
|
727
|
+
}
|
|
728
|
+
catch {
|
|
729
|
+
await client.sendGrepResult(execReq.id, execReq.execId, execReq.pattern || execReq.glob || "", execReq.path || process.cwd(), []);
|
|
730
|
+
}
|
|
731
|
+
}
|
|
732
|
+
else if (execReq.type === "write") {
|
|
733
|
+
try {
|
|
734
|
+
const { dirname } = await import("node:path");
|
|
735
|
+
const { mkdir } = await import("node:fs/promises");
|
|
736
|
+
const dir = dirname(execReq.path);
|
|
737
|
+
await mkdir(dir, { recursive: true });
|
|
738
|
+
const content = execReq.fileBytes && execReq.fileBytes.length > 0
|
|
739
|
+
? execReq.fileBytes
|
|
740
|
+
: execReq.fileText;
|
|
741
|
+
await Bun.write(execReq.path, content);
|
|
742
|
+
const file = Bun.file(execReq.path);
|
|
743
|
+
const stats = await file.stat();
|
|
744
|
+
const linesCreated = typeof content === "string"
|
|
745
|
+
? content.split("\n").length
|
|
746
|
+
: new TextDecoder().decode(content).split("\n").length;
|
|
747
|
+
await client.sendWriteResult(execReq.id, execReq.execId, {
|
|
748
|
+
success: {
|
|
749
|
+
path: execReq.path,
|
|
750
|
+
linesCreated,
|
|
751
|
+
fileSize: Number(stats.size),
|
|
752
|
+
fileContentAfterWrite: execReq.returnFileContentAfterWrite ? await file.text() : undefined,
|
|
753
|
+
},
|
|
754
|
+
});
|
|
755
|
+
}
|
|
756
|
+
catch (err) {
|
|
757
|
+
const message = err instanceof Error ? err.message : "Unknown error";
|
|
758
|
+
await client.sendWriteResult(execReq.id, execReq.execId, {
|
|
759
|
+
error: { path: execReq.path, error: message },
|
|
760
|
+
});
|
|
761
|
+
}
|
|
762
|
+
}
|
|
763
|
+
}
|
|
764
|
+
/**
|
|
765
|
+
* Handle /v1/models requests
|
|
766
|
+
*/
|
|
767
|
+
async function handleModels(accessToken, _log) {
|
|
768
|
+
try {
|
|
769
|
+
const cursorClient = new CursorClient(accessToken);
|
|
770
|
+
const models = await listCursorModels(cursorClient);
|
|
771
|
+
const openaiModels = models.map(m => ({
|
|
772
|
+
id: m.displayModelId || m.modelId,
|
|
773
|
+
object: "model",
|
|
774
|
+
created: Math.floor(Date.now() / 1000),
|
|
775
|
+
owned_by: getModelOwner(m.displayName ?? ""),
|
|
776
|
+
}));
|
|
777
|
+
const response = {
|
|
778
|
+
object: "list",
|
|
779
|
+
data: openaiModels,
|
|
780
|
+
};
|
|
781
|
+
return new Response(JSON.stringify(response), {
|
|
782
|
+
headers: {
|
|
783
|
+
"Content-Type": "application/json",
|
|
784
|
+
"Access-Control-Allow-Origin": "*",
|
|
785
|
+
},
|
|
786
|
+
});
|
|
787
|
+
}
|
|
788
|
+
catch (err) {
|
|
789
|
+
const message = err instanceof Error ? err.message : "Failed to fetch models";
|
|
790
|
+
return createErrorResponse(message, "server_error", 500);
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
/**
|
|
794
|
+
* Create a custom fetch handler for use in OpenCode plugin
|
|
795
|
+
*
|
|
796
|
+
* This wraps createRequestHandler to match the FetchInput signature
|
|
797
|
+
* that OpenCode expects from plugins.
|
|
798
|
+
*/
|
|
799
|
+
export function createPluginFetch(options) {
|
|
800
|
+
const handler = createRequestHandler(options);
|
|
801
|
+
return async (input, init) => {
|
|
802
|
+
// Create a proper Request object
|
|
803
|
+
const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
|
|
804
|
+
const request = new Request(url, init);
|
|
805
|
+
return handler(request);
|
|
806
|
+
};
|
|
807
|
+
}
|
|
808
|
+
//# sourceMappingURL=handler.js.map
|