sub-bridge 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursor/commands/mcp-only.md +1 -0
- package/.github/workflows/npm-publish.yml +33 -0
- package/.github/workflows/pages.yml +40 -0
- package/.github/workflows/release-please.yml +21 -0
- package/.release-please-manifest.json +3 -0
- package/CHANGELOG.md +8 -0
- package/DEVELOPMENT.md +31 -0
- package/LICENSE +21 -0
- package/README.md +87 -0
- package/api/index.ts +12 -0
- package/bun.lock +102 -0
- package/dist/auth/oauth-flow.d.ts +24 -0
- package/dist/auth/oauth-flow.d.ts.map +1 -0
- package/dist/auth/oauth-flow.js +184 -0
- package/dist/auth/oauth-flow.js.map +1 -0
- package/dist/auth/oauth-manager.d.ts +13 -0
- package/dist/auth/oauth-manager.d.ts.map +1 -0
- package/dist/auth/oauth-manager.js +25 -0
- package/dist/auth/oauth-manager.js.map +1 -0
- package/dist/auth/provider.d.ts +42 -0
- package/dist/auth/provider.d.ts.map +1 -0
- package/dist/auth/provider.js +270 -0
- package/dist/auth/provider.js.map +1 -0
- package/dist/cli.d.ts +3 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +91 -0
- package/dist/cli.js.map +1 -0
- package/dist/mcp/proxy.d.ts +16 -0
- package/dist/mcp/proxy.d.ts.map +1 -0
- package/dist/mcp/proxy.js +85 -0
- package/dist/mcp/proxy.js.map +1 -0
- package/dist/mcp.d.ts +3 -0
- package/dist/mcp.d.ts.map +1 -0
- package/dist/mcp.js +50 -0
- package/dist/mcp.js.map +1 -0
- package/dist/routes/auth.d.ts +6 -0
- package/dist/routes/auth.d.ts.map +1 -0
- package/dist/routes/auth.js +149 -0
- package/dist/routes/auth.js.map +1 -0
- package/dist/routes/chat.d.ts +6 -0
- package/dist/routes/chat.d.ts.map +1 -0
- package/dist/routes/chat.js +808 -0
- package/dist/routes/chat.js.map +1 -0
- package/dist/routes/tunnels.d.ts +7 -0
- package/dist/routes/tunnels.d.ts.map +1 -0
- package/dist/routes/tunnels.js +44 -0
- package/dist/routes/tunnels.js.map +1 -0
- package/dist/server.d.ts +25 -0
- package/dist/server.d.ts.map +1 -0
- package/dist/server.js +157 -0
- package/dist/server.js.map +1 -0
- package/dist/tunnel/providers/cloudflare.d.ts +9 -0
- package/dist/tunnel/providers/cloudflare.d.ts.map +1 -0
- package/dist/tunnel/providers/cloudflare.js +47 -0
- package/dist/tunnel/providers/cloudflare.js.map +1 -0
- package/dist/tunnel/providers/index.d.ts +4 -0
- package/dist/tunnel/providers/index.d.ts.map +1 -0
- package/dist/tunnel/providers/index.js +13 -0
- package/dist/tunnel/providers/index.js.map +1 -0
- package/dist/tunnel/providers/ngrok.d.ts +10 -0
- package/dist/tunnel/providers/ngrok.d.ts.map +1 -0
- package/dist/tunnel/providers/ngrok.js +52 -0
- package/dist/tunnel/providers/ngrok.js.map +1 -0
- package/dist/tunnel/providers/tailscale.d.ts +10 -0
- package/dist/tunnel/providers/tailscale.d.ts.map +1 -0
- package/dist/tunnel/providers/tailscale.js +48 -0
- package/dist/tunnel/providers/tailscale.js.map +1 -0
- package/dist/tunnel/registry.d.ts +14 -0
- package/dist/tunnel/registry.d.ts.map +1 -0
- package/dist/tunnel/registry.js +86 -0
- package/dist/tunnel/registry.js.map +1 -0
- package/dist/tunnel/types.d.ts +26 -0
- package/dist/tunnel/types.d.ts.map +1 -0
- package/dist/tunnel/types.js +6 -0
- package/dist/tunnel/types.js.map +1 -0
- package/dist/tunnel/utils.d.ts +18 -0
- package/dist/tunnel/utils.d.ts.map +1 -0
- package/dist/tunnel/utils.js +57 -0
- package/dist/tunnel/utils.js.map +1 -0
- package/dist/types.d.ts +52 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +4 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/anthropic-to-openai-converter.d.ts +103 -0
- package/dist/utils/anthropic-to-openai-converter.d.ts.map +1 -0
- package/dist/utils/anthropic-to-openai-converter.js +376 -0
- package/dist/utils/anthropic-to-openai-converter.js.map +1 -0
- package/dist/utils/chat-to-responses.d.ts +59 -0
- package/dist/utils/chat-to-responses.d.ts.map +1 -0
- package/dist/utils/chat-to-responses.js +395 -0
- package/dist/utils/chat-to-responses.js.map +1 -0
- package/dist/utils/chatgpt-instructions.d.ts +3 -0
- package/dist/utils/chatgpt-instructions.d.ts.map +1 -0
- package/dist/utils/chatgpt-instructions.js +12 -0
- package/dist/utils/chatgpt-instructions.js.map +1 -0
- package/dist/utils/cli-args.d.ts +3 -0
- package/dist/utils/cli-args.d.ts.map +1 -0
- package/dist/utils/cli-args.js +10 -0
- package/dist/utils/cli-args.js.map +1 -0
- package/dist/utils/cors-bypass.d.ts +4 -0
- package/dist/utils/cors-bypass.d.ts.map +1 -0
- package/dist/utils/cors-bypass.js +30 -0
- package/dist/utils/cors-bypass.js.map +1 -0
- package/dist/utils/cursor-byok-bypass.d.ts +37 -0
- package/dist/utils/cursor-byok-bypass.d.ts.map +1 -0
- package/dist/utils/cursor-byok-bypass.js +53 -0
- package/dist/utils/cursor-byok-bypass.js.map +1 -0
- package/dist/utils/logger.d.ts +19 -0
- package/dist/utils/logger.d.ts.map +1 -0
- package/dist/utils/logger.js +192 -0
- package/dist/utils/logger.js.map +1 -0
- package/dist/utils/port.d.ts +27 -0
- package/dist/utils/port.d.ts.map +1 -0
- package/dist/utils/port.js +78 -0
- package/dist/utils/port.js.map +1 -0
- package/dist/utils/setup-instructions.d.ts +10 -0
- package/dist/utils/setup-instructions.d.ts.map +1 -0
- package/dist/utils/setup-instructions.js +49 -0
- package/dist/utils/setup-instructions.js.map +1 -0
- package/env.example +25 -0
- package/index.html +992 -0
- package/package.json +57 -0
- package/public/.nojekyll +0 -0
- package/public/assets/chat.png +0 -0
- package/public/assets/demo.gif +0 -0
- package/public/assets/demo.mp4 +0 -0
- package/public/assets/setup.png +0 -0
- package/public/assets/ui.png +0 -0
- package/public/index.html +292 -0
- package/release-please-config.json +10 -0
- package/src/auth/provider.ts +412 -0
- package/src/cli.ts +97 -0
- package/src/mcp/proxy.ts +64 -0
- package/src/mcp.ts +56 -0
- package/src/oauth/authorize.ts +270 -0
- package/src/oauth/crypto.ts +198 -0
- package/src/oauth/dcr.ts +129 -0
- package/src/oauth/metadata.ts +40 -0
- package/src/oauth/token.ts +173 -0
- package/src/routes/auth.ts +149 -0
- package/src/routes/chat.ts +983 -0
- package/src/routes/oauth.ts +220 -0
- package/src/routes/tunnels.ts +45 -0
- package/src/server.ts +204 -0
- package/src/tunnel/providers/cloudflare.ts +50 -0
- package/src/tunnel/providers/index.ts +7 -0
- package/src/tunnel/providers/ngrok.ts +56 -0
- package/src/tunnel/providers/tailscale.ts +50 -0
- package/src/tunnel/registry.ts +96 -0
- package/src/tunnel/types.ts +32 -0
- package/src/tunnel/utils.ts +59 -0
- package/src/types.ts +55 -0
- package/src/utils/anthropic-to-openai-converter.ts +578 -0
- package/src/utils/chat-to-responses.ts +512 -0
- package/src/utils/chatgpt-instructions.ts +7 -0
- package/src/utils/cli-args.ts +8 -0
- package/src/utils/cors-bypass.ts +39 -0
- package/src/utils/cursor-byok-bypass.ts +56 -0
- package/src/utils/logger.ts +174 -0
- package/src/utils/port.ts +99 -0
- package/src/utils/setup-instructions.ts +59 -0
- package/tsconfig.json +22 -0
- package/vercel.json +20 -0
|
@@ -0,0 +1,808 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createChatRoutes = createChatRoutes;
|
|
4
|
+
/**
|
|
5
|
+
* Chat completion routes - handles OpenAI and Claude API proxying
|
|
6
|
+
*/
|
|
7
|
+
const hono_1 = require("hono");
|
|
8
|
+
const streaming_1 = require("hono/streaming");
|
|
9
|
+
const anthropic_to_openai_converter_1 = require("../utils/anthropic-to-openai-converter");
|
|
10
|
+
const cursor_byok_bypass_1 = require("../utils/cursor-byok-bypass");
|
|
11
|
+
const logger_1 = require("../utils/logger");
|
|
12
|
+
const chatgpt_instructions_1 = require("../utils/chatgpt-instructions");
|
|
13
|
+
const chat_to_responses_1 = require("../utils/chat-to-responses");
|
|
14
|
+
// Model alias mapping (short names → full Claude model IDs)
|
|
15
|
+
const MODEL_ALIASES = {
|
|
16
|
+
'opus-4.5': 'claude-opus-4-5-20251101',
|
|
17
|
+
'sonnet-4.5': 'claude-sonnet-4-5-20250514',
|
|
18
|
+
};
|
|
19
|
+
const CHATGPT_BASE_URL = process.env.CHATGPT_BASE_URL || 'https://chatgpt.com/backend-api/codex';
|
|
20
|
+
const CHATGPT_DEFAULT_MODEL = process.env.CHATGPT_DEFAULT_MODEL || 'gpt-5.2-codex';
|
|
21
|
+
function splitProviderTokens(fullToken) {
|
|
22
|
+
if (!fullToken)
|
|
23
|
+
return [];
|
|
24
|
+
const bySpace = fullToken.split(/\s+/).filter(Boolean);
|
|
25
|
+
if (bySpace.length > 1)
|
|
26
|
+
return bySpace;
|
|
27
|
+
const single = bySpace[0] || '';
|
|
28
|
+
if (!single.includes(','))
|
|
29
|
+
return single ? [single] : [];
|
|
30
|
+
const lastColon = single.lastIndexOf(':');
|
|
31
|
+
if (lastColon !== -1) {
|
|
32
|
+
const mappingPart = single.slice(0, lastColon);
|
|
33
|
+
const tokenPart = single.slice(lastColon + 1);
|
|
34
|
+
if (tokenPart.includes(',')) {
|
|
35
|
+
const splitTokens = tokenPart.split(',').map((t) => t.trim()).filter(Boolean);
|
|
36
|
+
if (splitTokens.length > 0) {
|
|
37
|
+
return [`${mappingPart}:${splitTokens[0]}`, ...splitTokens.slice(1)];
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
return single.split(',').map((t) => t.trim()).filter(Boolean);
|
|
42
|
+
}
|
|
43
|
+
function parseTokenWithAccount(token) {
|
|
44
|
+
const hashIndex = token.indexOf('#');
|
|
45
|
+
if (hashIndex > 0) {
|
|
46
|
+
return {
|
|
47
|
+
token: token.slice(0, hashIndex),
|
|
48
|
+
accountId: token.slice(hashIndex + 1),
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
return { token };
|
|
52
|
+
}
|
|
53
|
+
function isJwtToken(token) {
|
|
54
|
+
const parts = token.split('.');
|
|
55
|
+
return parts.length === 3 && parts.every((p) => p.length > 0);
|
|
56
|
+
}
|
|
57
|
+
function normalizeChatGptModel(requestedModel) {
|
|
58
|
+
if (!requestedModel)
|
|
59
|
+
return CHATGPT_DEFAULT_MODEL;
|
|
60
|
+
if (requestedModel.includes('codex'))
|
|
61
|
+
return requestedModel;
|
|
62
|
+
if (requestedModel.startsWith('gpt-5.2'))
|
|
63
|
+
return CHATGPT_DEFAULT_MODEL;
|
|
64
|
+
if (requestedModel.startsWith('gpt-5'))
|
|
65
|
+
return CHATGPT_DEFAULT_MODEL;
|
|
66
|
+
return CHATGPT_DEFAULT_MODEL;
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Parse routed API keys from Authorization header
|
|
70
|
+
* Format: "o3=opus-4.5,o3-mini=sonnet-4.5:sk-ant-xxx sk-xxx" (space-separated tokens; comma fallback supported) or just "sk-ant-xxx" for default
|
|
71
|
+
*/
|
|
72
|
+
function parseRoutedKeys(authHeader) {
|
|
73
|
+
if (!authHeader)
|
|
74
|
+
return { configs: [] };
|
|
75
|
+
const fullToken = authHeader.replace(/^Bearer\s+/i, '').trim();
|
|
76
|
+
// Split by space (or comma fallback) to handle multiple provider tokens
|
|
77
|
+
const tokens = splitProviderTokens(fullToken);
|
|
78
|
+
const configs = [];
|
|
79
|
+
let defaultKey;
|
|
80
|
+
let defaultAccountId;
|
|
81
|
+
for (const token of tokens) {
|
|
82
|
+
if (!token)
|
|
83
|
+
continue;
|
|
84
|
+
// Check if it's a routed key (contains '=')
|
|
85
|
+
if (!token.includes('=')) {
|
|
86
|
+
// Plain key without routing - use as default
|
|
87
|
+
if (!defaultKey) {
|
|
88
|
+
const parsed = parseTokenWithAccount(token);
|
|
89
|
+
defaultKey = parsed.token;
|
|
90
|
+
defaultAccountId = parsed.accountId;
|
|
91
|
+
}
|
|
92
|
+
continue;
|
|
93
|
+
}
|
|
94
|
+
// Split by last colon to separate mappings from key
|
|
95
|
+
const lastColon = token.lastIndexOf(':');
|
|
96
|
+
if (lastColon === -1) {
|
|
97
|
+
// No colon found in routed key, skip it
|
|
98
|
+
continue;
|
|
99
|
+
}
|
|
100
|
+
const mappingsPart = token.slice(0, lastColon);
|
|
101
|
+
const parsedToken = parseTokenWithAccount(token.slice(lastColon + 1));
|
|
102
|
+
const apiKey = parsedToken.token;
|
|
103
|
+
const mappings = mappingsPart.split(',').map(m => {
|
|
104
|
+
const [from, to] = m.split('=');
|
|
105
|
+
const resolvedTo = MODEL_ALIASES[to] || to;
|
|
106
|
+
return { from: from.trim(), to: resolvedTo };
|
|
107
|
+
});
|
|
108
|
+
configs.push({ mappings, apiKey, accountId: parsedToken.accountId });
|
|
109
|
+
}
|
|
110
|
+
return { configs, defaultKey, defaultAccountId };
|
|
111
|
+
}
|
|
112
|
+
/**
|
|
113
|
+
* Find the Claude model and API key for a given requested model
|
|
114
|
+
*/
|
|
115
|
+
function resolveModelRouting(requestedModel, parsedKeys) {
|
|
116
|
+
// Check all configs for a matching route
|
|
117
|
+
for (const config of parsedKeys.configs) {
|
|
118
|
+
for (const mapping of config.mappings) {
|
|
119
|
+
if (mapping.from === requestedModel) {
|
|
120
|
+
return { claudeModel: mapping.to, apiKey: config.apiKey };
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
// If model starts with 'claude-', use default key
|
|
125
|
+
if (requestedModel.startsWith('claude-') && parsedKeys.defaultKey) {
|
|
126
|
+
return { claudeModel: requestedModel, apiKey: parsedKeys.defaultKey };
|
|
127
|
+
}
|
|
128
|
+
// Fallback to default key with the model as-is (for ultrathink)
|
|
129
|
+
if (parsedKeys.defaultKey) {
|
|
130
|
+
return { claudeModel: requestedModel, apiKey: parsedKeys.defaultKey };
|
|
131
|
+
}
|
|
132
|
+
return null;
|
|
133
|
+
}
|
|
134
|
+
function convertMessages(messages) {
|
|
135
|
+
const converted = [];
|
|
136
|
+
for (const msg of messages) {
|
|
137
|
+
if (msg.type === 'custom_tool_call' || msg.type === 'function_call') {
|
|
138
|
+
let toolInput = msg.input || msg.arguments;
|
|
139
|
+
if (typeof toolInput === 'string') {
|
|
140
|
+
try {
|
|
141
|
+
toolInput = JSON.parse(toolInput);
|
|
142
|
+
}
|
|
143
|
+
catch {
|
|
144
|
+
toolInput = { command: toolInput };
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
const toolUse = { type: 'tool_use', id: msg.call_id, name: msg.name, input: toolInput || {} };
|
|
148
|
+
const last = converted[converted.length - 1];
|
|
149
|
+
if (last?.role === 'assistant' && Array.isArray(last.content))
|
|
150
|
+
last.content.push(toolUse);
|
|
151
|
+
else
|
|
152
|
+
converted.push({ role: 'assistant', content: [toolUse] });
|
|
153
|
+
continue;
|
|
154
|
+
}
|
|
155
|
+
if (msg.type === 'custom_tool_call_output' || msg.type === 'function_call_output') {
|
|
156
|
+
const toolResult = { type: 'tool_result', tool_use_id: msg.call_id, content: msg.output || '' };
|
|
157
|
+
const last = converted[converted.length - 1];
|
|
158
|
+
if (last?.role === 'user' && Array.isArray(last.content) && last.content[0]?.type === 'tool_result')
|
|
159
|
+
last.content.push(toolResult);
|
|
160
|
+
else
|
|
161
|
+
converted.push({ role: 'user', content: [toolResult] });
|
|
162
|
+
continue;
|
|
163
|
+
}
|
|
164
|
+
if (!msg.role)
|
|
165
|
+
continue;
|
|
166
|
+
if (msg.role === 'assistant' && msg.tool_calls?.length) {
|
|
167
|
+
let content = [];
|
|
168
|
+
if (msg.content) {
|
|
169
|
+
if (typeof msg.content === 'string') {
|
|
170
|
+
content = [{ type: 'text', text: msg.content }];
|
|
171
|
+
}
|
|
172
|
+
else if (Array.isArray(msg.content)) {
|
|
173
|
+
// Preserve existing content blocks, converting to Claude format
|
|
174
|
+
for (const block of msg.content) {
|
|
175
|
+
if (block.type === 'text' && typeof block.text === 'string') {
|
|
176
|
+
content.push({ type: 'text', text: block.text });
|
|
177
|
+
}
|
|
178
|
+
else if (block.type === 'tool_use') {
|
|
179
|
+
content.push(block); // Already Claude format
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
for (const tc of msg.tool_calls) {
|
|
185
|
+
let input = tc.function?.arguments || tc.arguments || {};
|
|
186
|
+
if (typeof input === 'string') {
|
|
187
|
+
try {
|
|
188
|
+
input = JSON.parse(input || '{}');
|
|
189
|
+
}
|
|
190
|
+
catch {
|
|
191
|
+
input = { raw: input };
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
content.push({
|
|
195
|
+
type: 'tool_use', id: tc.id, name: tc.function?.name || tc.name, input
|
|
196
|
+
});
|
|
197
|
+
}
|
|
198
|
+
converted.push({ role: 'assistant', content });
|
|
199
|
+
continue;
|
|
200
|
+
}
|
|
201
|
+
if (msg.role === 'tool') {
|
|
202
|
+
const toolResult = { type: 'tool_result', tool_use_id: msg.tool_call_id, content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content) };
|
|
203
|
+
const last = converted[converted.length - 1];
|
|
204
|
+
if (last?.role === 'user' && Array.isArray(last.content) && last.content[0]?.type === 'tool_result')
|
|
205
|
+
last.content.push(toolResult);
|
|
206
|
+
else
|
|
207
|
+
converted.push({ role: 'user', content: [toolResult] });
|
|
208
|
+
continue;
|
|
209
|
+
}
|
|
210
|
+
// Handle assistant messages with array content (no tool_calls)
|
|
211
|
+
if (msg.role === 'assistant' && Array.isArray(msg.content)) {
|
|
212
|
+
const content = [];
|
|
213
|
+
for (const block of msg.content) {
|
|
214
|
+
if (block.type === 'text' && typeof block.text === 'string') {
|
|
215
|
+
content.push({ type: 'text', text: block.text });
|
|
216
|
+
}
|
|
217
|
+
else if (block.type === 'tool_use') {
|
|
218
|
+
content.push(block);
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
converted.push({ role: 'assistant', content: content.length > 0 ? content : '' });
|
|
222
|
+
continue;
|
|
223
|
+
}
|
|
224
|
+
converted.push({ role: msg.role, content: msg.content ?? '' });
|
|
225
|
+
}
|
|
226
|
+
const last = converted[converted.length - 1];
|
|
227
|
+
if (last?.role === 'assistant') {
|
|
228
|
+
if (typeof last.content === 'string')
|
|
229
|
+
last.content = last.content.trimEnd() || '...';
|
|
230
|
+
else if (Array.isArray(last.content)) {
|
|
231
|
+
for (const block of last.content) {
|
|
232
|
+
if (block.type === 'text')
|
|
233
|
+
block.text = (block.text?.trimEnd()) || '...';
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
return converted;
|
|
238
|
+
}
|
|
239
|
+
function buildChatGptResponsesBody(body, requestedModel, isStreaming) {
|
|
240
|
+
// ChatGPT backend requires EXACTLY the base codex prompt as instructions
|
|
241
|
+
const instructions = (0, chatgpt_instructions_1.getChatGptInstructions)();
|
|
242
|
+
// Use the robust converter to handle all message formats
|
|
243
|
+
const { input, developerMessages } = (0, chat_to_responses_1.convertToResponsesFormat)(body);
|
|
244
|
+
// Prepend developer messages to input
|
|
245
|
+
const fullInput = [...developerMessages, ...input];
|
|
246
|
+
// Fallback if no input
|
|
247
|
+
if (fullInput.length === 0) {
|
|
248
|
+
fullInput.push({
|
|
249
|
+
type: 'message',
|
|
250
|
+
role: 'user',
|
|
251
|
+
content: [{ type: 'input_text', text: 'Hello.' }],
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
const tools = Array.isArray(body.tools) ? body.tools : [];
|
|
255
|
+
return {
|
|
256
|
+
model: requestedModel,
|
|
257
|
+
instructions,
|
|
258
|
+
input: fullInput,
|
|
259
|
+
tools,
|
|
260
|
+
tool_choice: 'auto',
|
|
261
|
+
parallel_tool_calls: false,
|
|
262
|
+
stream: true, // Backend REQUIRES stream: true
|
|
263
|
+
store: false,
|
|
264
|
+
};
|
|
265
|
+
}
|
|
266
|
+
function createChatGptStreamState(model) {
|
|
267
|
+
const now = Math.floor(Date.now() / 1000);
|
|
268
|
+
return {
|
|
269
|
+
buffer: '',
|
|
270
|
+
id: `chatcmpl-${Date.now().toString(36)}`,
|
|
271
|
+
model,
|
|
272
|
+
created: now,
|
|
273
|
+
roleSent: false,
|
|
274
|
+
sawTextDelta: false,
|
|
275
|
+
toolCallsSeen: false,
|
|
276
|
+
toolCallIndex: 0,
|
|
277
|
+
processedItemIds: new Set(), // Track processed item IDs to prevent duplicates
|
|
278
|
+
};
|
|
279
|
+
}
|
|
280
|
+
function createChatChunk(state, delta, finishReason, usage) {
|
|
281
|
+
return {
|
|
282
|
+
id: state.id,
|
|
283
|
+
object: 'chat.completion.chunk',
|
|
284
|
+
created: state.created,
|
|
285
|
+
model: state.model,
|
|
286
|
+
choices: [
|
|
287
|
+
{
|
|
288
|
+
index: 0,
|
|
289
|
+
delta,
|
|
290
|
+
finish_reason: finishReason,
|
|
291
|
+
},
|
|
292
|
+
],
|
|
293
|
+
...(usage ? { usage } : {}),
|
|
294
|
+
};
|
|
295
|
+
}
|
|
296
|
+
function mapUsage(usage) {
|
|
297
|
+
if (!usage)
|
|
298
|
+
return undefined;
|
|
299
|
+
const prompt = usage.input_tokens ?? usage.prompt_tokens ?? 0;
|
|
300
|
+
const completion = usage.output_tokens ?? usage.completion_tokens ?? 0;
|
|
301
|
+
const total = usage.total_tokens ?? (prompt + completion);
|
|
302
|
+
return {
|
|
303
|
+
prompt_tokens: prompt,
|
|
304
|
+
completion_tokens: completion,
|
|
305
|
+
total_tokens: total,
|
|
306
|
+
};
|
|
307
|
+
}
|
|
308
|
+
function processChatGptChunk(state, chunk) {
|
|
309
|
+
state.buffer += chunk;
|
|
310
|
+
const results = [];
|
|
311
|
+
const parts = state.buffer.split('\n\n');
|
|
312
|
+
state.buffer = parts.pop() || '';
|
|
313
|
+
for (const part of parts) {
|
|
314
|
+
const lines = part.split('\n');
|
|
315
|
+
for (const line of lines) {
|
|
316
|
+
if (!line.startsWith('data:'))
|
|
317
|
+
continue;
|
|
318
|
+
const data = line.slice(5).trim();
|
|
319
|
+
if (!data || data === '[DONE]')
|
|
320
|
+
continue;
|
|
321
|
+
let payload;
|
|
322
|
+
try {
|
|
323
|
+
payload = JSON.parse(data);
|
|
324
|
+
}
|
|
325
|
+
catch {
|
|
326
|
+
continue;
|
|
327
|
+
}
|
|
328
|
+
const kind = payload?.type;
|
|
329
|
+
if (kind === 'response.created' && payload.response?.id) {
|
|
330
|
+
state.id = `chatcmpl-${String(payload.response.id).replace(/^resp_/, '')}`;
|
|
331
|
+
continue;
|
|
332
|
+
}
|
|
333
|
+
if (kind === 'response.output_text.delta' && typeof payload.delta === 'string') {
|
|
334
|
+
const delta = { content: payload.delta };
|
|
335
|
+
if (!state.roleSent) {
|
|
336
|
+
delta.role = 'assistant';
|
|
337
|
+
state.roleSent = true;
|
|
338
|
+
}
|
|
339
|
+
state.sawTextDelta = true;
|
|
340
|
+
results.push({ type: 'chunk', data: createChatChunk(state, delta, null) });
|
|
341
|
+
continue;
|
|
342
|
+
}
|
|
343
|
+
if (kind === 'response.output_item.done' && payload.item) {
|
|
344
|
+
const item = payload.item;
|
|
345
|
+
// Generate item identifier and skip if already processed
|
|
346
|
+
const itemId = item.id || item.call_id || `${item.type}_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`;
|
|
347
|
+
if (state.processedItemIds.has(itemId)) {
|
|
348
|
+
continue; // Already processed this item, skip to prevent duplicates
|
|
349
|
+
}
|
|
350
|
+
state.processedItemIds.add(itemId);
|
|
351
|
+
if (item.type === 'message' && item.role === 'assistant' && !state.sawTextDelta) {
|
|
352
|
+
const blocks = Array.isArray(item.content) ? item.content : [];
|
|
353
|
+
const text = blocks
|
|
354
|
+
.filter((b) => b?.type === 'output_text' && typeof b.text === 'string')
|
|
355
|
+
.map((b) => b.text)
|
|
356
|
+
.join('');
|
|
357
|
+
if (text) {
|
|
358
|
+
const delta = { content: text };
|
|
359
|
+
if (!state.roleSent) {
|
|
360
|
+
delta.role = 'assistant';
|
|
361
|
+
state.roleSent = true;
|
|
362
|
+
}
|
|
363
|
+
results.push({ type: 'chunk', data: createChatChunk(state, delta, null) });
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
else if (item.type === 'function_call') {
|
|
367
|
+
state.toolCallsSeen = true;
|
|
368
|
+
const toolCallId = item.call_id || item.id || `call_${state.toolCallIndex}`;
|
|
369
|
+
const delta = {
|
|
370
|
+
tool_calls: [
|
|
371
|
+
{
|
|
372
|
+
index: state.toolCallIndex++,
|
|
373
|
+
id: toolCallId,
|
|
374
|
+
type: 'function',
|
|
375
|
+
function: {
|
|
376
|
+
name: item.name || 'unknown',
|
|
377
|
+
arguments: item.arguments || '',
|
|
378
|
+
},
|
|
379
|
+
},
|
|
380
|
+
],
|
|
381
|
+
};
|
|
382
|
+
if (!state.roleSent) {
|
|
383
|
+
delta.role = 'assistant';
|
|
384
|
+
state.roleSent = true;
|
|
385
|
+
}
|
|
386
|
+
results.push({ type: 'chunk', data: createChatChunk(state, delta, null) });
|
|
387
|
+
}
|
|
388
|
+
continue;
|
|
389
|
+
}
|
|
390
|
+
if (kind === 'response.completed') {
|
|
391
|
+
const usage = mapUsage(payload.response?.usage);
|
|
392
|
+
const finish = state.toolCallsSeen ? 'tool_calls' : 'stop';
|
|
393
|
+
results.push({ type: 'chunk', data: createChatChunk(state, {}, finish, usage) });
|
|
394
|
+
results.push({ type: 'done' });
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
return results;
|
|
399
|
+
}
|
|
400
|
+
async function handleOpenAIProxy(c, body, requestedModel, openaiToken, isStreaming) {
|
|
401
|
+
(0, logger_1.logRequest)('openai', requestedModel, {});
|
|
402
|
+
// Forward request directly to OpenAI API
|
|
403
|
+
const response = await fetch('https://api.openai.com/v1/chat/completions', {
|
|
404
|
+
method: 'POST',
|
|
405
|
+
headers: {
|
|
406
|
+
'content-type': 'application/json',
|
|
407
|
+
'authorization': `Bearer ${openaiToken}`,
|
|
408
|
+
},
|
|
409
|
+
body: JSON.stringify(body),
|
|
410
|
+
});
|
|
411
|
+
if (!response.ok) {
|
|
412
|
+
const errorText = await response.text();
|
|
413
|
+
(0, logger_1.logError)(errorText.slice(0, 200));
|
|
414
|
+
// Try to parse OpenAI error
|
|
415
|
+
try {
|
|
416
|
+
const openAIError = JSON.parse(errorText);
|
|
417
|
+
return c.json(openAIError, response.status);
|
|
418
|
+
}
|
|
419
|
+
catch (parseError) {
|
|
420
|
+
return new Response(errorText, { status: response.status });
|
|
421
|
+
}
|
|
422
|
+
}
|
|
423
|
+
(0, logger_1.logResponse)(response.status);
|
|
424
|
+
// For streaming, pass through the stream
|
|
425
|
+
if (isStreaming) {
|
|
426
|
+
return new Response(response.body, {
|
|
427
|
+
status: response.status,
|
|
428
|
+
headers: {
|
|
429
|
+
'content-type': 'text/event-stream',
|
|
430
|
+
'cache-control': 'no-cache',
|
|
431
|
+
'connection': 'keep-alive',
|
|
432
|
+
},
|
|
433
|
+
});
|
|
434
|
+
}
|
|
435
|
+
else {
|
|
436
|
+
// For non-streaming, pass through the JSON response
|
|
437
|
+
const responseData = await response.json();
|
|
438
|
+
return c.json(responseData);
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
async function handleChatGptProxy(c, body, requestedModel, tokenInfo, isStreaming) {
|
|
442
|
+
const chatgptModel = normalizeChatGptModel(requestedModel);
|
|
443
|
+
const responseBody = buildChatGptResponsesBody(body, chatgptModel, isStreaming);
|
|
444
|
+
(0, logger_1.logRequest)('chatgpt', `${requestedModel} → ${chatgptModel}`, {
|
|
445
|
+
system: responseBody.instructions,
|
|
446
|
+
messages: responseBody.input,
|
|
447
|
+
tools: responseBody.tools,
|
|
448
|
+
});
|
|
449
|
+
if (!tokenInfo.accountId) {
|
|
450
|
+
return c.json({
|
|
451
|
+
error: {
|
|
452
|
+
message: 'ChatGPT account id missing. Re-login to refresh your ChatGPT token.',
|
|
453
|
+
type: 'authentication_error',
|
|
454
|
+
code: 'authentication_error',
|
|
455
|
+
}
|
|
456
|
+
}, 401);
|
|
457
|
+
}
|
|
458
|
+
const baseUrl = CHATGPT_BASE_URL.replace(/\/$/, '');
|
|
459
|
+
if ((0, logger_1.isVerbose)()) {
|
|
460
|
+
(0, logger_1.logHeaders)('Request Headers', {
|
|
461
|
+
'content-type': 'application/json',
|
|
462
|
+
'authorization': `Bearer ${tokenInfo.token}`,
|
|
463
|
+
'chatgpt-account-id': tokenInfo.accountId || '',
|
|
464
|
+
'originator': 'codex_cli_rs',
|
|
465
|
+
'accept': 'text/event-stream',
|
|
466
|
+
});
|
|
467
|
+
}
|
|
468
|
+
const response = await fetch(`${baseUrl}/responses`, {
|
|
469
|
+
method: 'POST',
|
|
470
|
+
headers: {
|
|
471
|
+
'content-type': 'application/json',
|
|
472
|
+
'authorization': `Bearer ${tokenInfo.token}`,
|
|
473
|
+
'chatgpt-account-id': tokenInfo.accountId,
|
|
474
|
+
'originator': 'codex_cli_rs',
|
|
475
|
+
'accept': 'text/event-stream', // Backend always requires streaming
|
|
476
|
+
},
|
|
477
|
+
body: JSON.stringify(responseBody),
|
|
478
|
+
});
|
|
479
|
+
if (!response.ok) {
|
|
480
|
+
const errorText = await response.text();
|
|
481
|
+
(0, logger_1.logError)(errorText.slice(0, 200));
|
|
482
|
+
try {
|
|
483
|
+
const parsed = JSON.parse(errorText);
|
|
484
|
+
const errorMessage = parsed.error?.message || parsed.message || 'Unknown error';
|
|
485
|
+
const errorType = parsed.error?.type || parsed.type || 'api_error';
|
|
486
|
+
// Map error types to user-friendly messages
|
|
487
|
+
let userMessage = errorMessage;
|
|
488
|
+
if (response.status === 401 || errorType === 'authentication_error') {
|
|
489
|
+
userMessage = `ChatGPT authentication failed: ${errorMessage}. Try re-logging in.`;
|
|
490
|
+
}
|
|
491
|
+
else if (response.status === 429 || errorType === 'rate_limit_error') {
|
|
492
|
+
userMessage = `ChatGPT rate limit exceeded: ${errorMessage}`;
|
|
493
|
+
}
|
|
494
|
+
else if (response.status === 400 || errorType === 'invalid_request_error') {
|
|
495
|
+
userMessage = `Invalid request to ChatGPT: ${errorMessage}`;
|
|
496
|
+
}
|
|
497
|
+
return c.json({
|
|
498
|
+
error: {
|
|
499
|
+
message: userMessage,
|
|
500
|
+
type: errorType,
|
|
501
|
+
code: errorType,
|
|
502
|
+
}
|
|
503
|
+
}, response.status);
|
|
504
|
+
}
|
|
505
|
+
catch {
|
|
506
|
+
return c.json({
|
|
507
|
+
error: {
|
|
508
|
+
message: `ChatGPT error: ${errorText.slice(0, 200)}`,
|
|
509
|
+
type: 'api_error',
|
|
510
|
+
code: 'api_error',
|
|
511
|
+
}
|
|
512
|
+
}, response.status);
|
|
513
|
+
}
|
|
514
|
+
}
|
|
515
|
+
(0, logger_1.logResponse)(response.status);
|
|
516
|
+
if ((0, logger_1.isVerbose)()) {
|
|
517
|
+
const respHeaders = {};
|
|
518
|
+
response.headers.forEach((value, key) => {
|
|
519
|
+
respHeaders[key] = value;
|
|
520
|
+
});
|
|
521
|
+
(0, logger_1.logHeaders)('Response Headers', respHeaders);
|
|
522
|
+
}
|
|
523
|
+
const reader = response.body.getReader();
|
|
524
|
+
const decoder = new TextDecoder();
|
|
525
|
+
const state = createChatGptStreamState(chatgptModel);
|
|
526
|
+
if (isStreaming) {
|
|
527
|
+
return (0, streaming_1.stream)(c, async (s) => {
|
|
528
|
+
while (true) {
|
|
529
|
+
const { done, value } = await reader.read();
|
|
530
|
+
if (done)
|
|
531
|
+
break;
|
|
532
|
+
const chunk = decoder.decode(value, { stream: true });
|
|
533
|
+
if ((0, logger_1.isVerbose)()) {
|
|
534
|
+
(0, logger_1.logStreamChunk)(chunk);
|
|
535
|
+
}
|
|
536
|
+
const results = processChatGptChunk(state, chunk);
|
|
537
|
+
for (const result of results) {
|
|
538
|
+
if (result.type === 'chunk') {
|
|
539
|
+
await s.write(`data: ${JSON.stringify(result.data)}\n\n`);
|
|
540
|
+
}
|
|
541
|
+
else if (result.type === 'done') {
|
|
542
|
+
await s.write('data: [DONE]\n\n');
|
|
543
|
+
}
|
|
544
|
+
}
|
|
545
|
+
}
|
|
546
|
+
reader.releaseLock();
|
|
547
|
+
});
|
|
548
|
+
}
|
|
549
|
+
// Non-streaming: aggregate the stream into a final response
|
|
550
|
+
let fullContent = '';
|
|
551
|
+
const toolCallsMap = new Map(); // Use Map for deduplication by ID
|
|
552
|
+
let usage = null;
|
|
553
|
+
while (true) {
|
|
554
|
+
const { done, value } = await reader.read();
|
|
555
|
+
if (done)
|
|
556
|
+
break;
|
|
557
|
+
const chunk = decoder.decode(value, { stream: true });
|
|
558
|
+
if ((0, logger_1.isVerbose)()) {
|
|
559
|
+
(0, logger_1.logStreamChunk)(chunk);
|
|
560
|
+
}
|
|
561
|
+
const results = processChatGptChunk(state, chunk);
|
|
562
|
+
for (const result of results) {
|
|
563
|
+
if (result.type === 'chunk' && result.data?.choices?.[0]?.delta) {
|
|
564
|
+
const delta = result.data.choices[0].delta;
|
|
565
|
+
if (delta.content)
|
|
566
|
+
fullContent += delta.content;
|
|
567
|
+
// Aggregate tool calls - merge by index, deduplicate by ID
|
|
568
|
+
if (delta.tool_calls) {
|
|
569
|
+
for (const tc of delta.tool_calls) {
|
|
570
|
+
if (tc.id) {
|
|
571
|
+
// New tool call with ID - store by ID
|
|
572
|
+
if (!toolCallsMap.has(tc.id)) {
|
|
573
|
+
toolCallsMap.set(tc.id, { ...tc });
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
else if (tc.index !== undefined) {
|
|
577
|
+
// Continuation chunk (has index but no id) - find and merge arguments
|
|
578
|
+
// Find existing tool call by index
|
|
579
|
+
for (const [id, existing] of toolCallsMap) {
|
|
580
|
+
if (existing.index === tc.index && tc.function?.arguments) {
|
|
581
|
+
existing.function = existing.function || {};
|
|
582
|
+
existing.function.arguments = (existing.function.arguments || '') + tc.function.arguments;
|
|
583
|
+
break;
|
|
584
|
+
}
|
|
585
|
+
}
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
}
|
|
589
|
+
if (result.data.usage)
|
|
590
|
+
usage = result.data.usage;
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
}
|
|
594
|
+
reader.releaseLock();
|
|
595
|
+
// Convert Map to array
|
|
596
|
+
const toolCalls = Array.from(toolCallsMap.values());
|
|
597
|
+
return c.json({
|
|
598
|
+
id: state.id,
|
|
599
|
+
object: 'chat.completion',
|
|
600
|
+
created: state.created,
|
|
601
|
+
model: chatgptModel,
|
|
602
|
+
choices: [{
|
|
603
|
+
index: 0,
|
|
604
|
+
message: {
|
|
605
|
+
role: 'assistant',
|
|
606
|
+
content: fullContent || null,
|
|
607
|
+
...(toolCalls.length > 0 ? { tool_calls: toolCalls } : {}),
|
|
608
|
+
},
|
|
609
|
+
finish_reason: toolCalls.length > 0 ? 'tool_calls' : 'stop',
|
|
610
|
+
}],
|
|
611
|
+
usage: usage || { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
|
|
612
|
+
});
|
|
613
|
+
}
|
|
614
|
+
async function handleChatCompletion(c) {
|
|
615
|
+
const body = await c.req.json();
|
|
616
|
+
const requestedModel = body.model || '';
|
|
617
|
+
const isStreaming = body.stream === true;
|
|
618
|
+
if ((0, cursor_byok_bypass_1.isCursorKeyCheck)(body)) {
|
|
619
|
+
(0, logger_1.logRequest)('bypass', requestedModel, {});
|
|
620
|
+
(0, logger_1.logResponse)(200);
|
|
621
|
+
return c.json((0, cursor_byok_bypass_1.createCursorBypassResponse)());
|
|
622
|
+
}
|
|
623
|
+
const parsedKeys = parseRoutedKeys(c.req.header('authorization'));
|
|
624
|
+
const routing = resolveModelRouting(requestedModel, parsedKeys);
|
|
625
|
+
const isClaude = routing !== null && (routing.claudeModel.startsWith('claude-') || parsedKeys.configs.some(c => c.mappings.some(m => m.from === requestedModel)));
|
|
626
|
+
// If not a Claude model and we have a default key, proxy to OpenAI or ChatGPT backend
|
|
627
|
+
if (!isClaude && parsedKeys.defaultKey) {
|
|
628
|
+
const tokenInfo = {
|
|
629
|
+
token: parsedKeys.defaultKey,
|
|
630
|
+
accountId: parsedKeys.defaultAccountId,
|
|
631
|
+
};
|
|
632
|
+
const useChatGpt = Boolean(tokenInfo.accountId) || isJwtToken(tokenInfo.token);
|
|
633
|
+
if (useChatGpt) {
|
|
634
|
+
return handleChatGptProxy(c, body, requestedModel, tokenInfo, isStreaming);
|
|
635
|
+
}
|
|
636
|
+
return handleOpenAIProxy(c, body, requestedModel, tokenInfo.token, isStreaming);
|
|
637
|
+
}
|
|
638
|
+
if (!isClaude || !routing) {
|
|
639
|
+
(0, logger_1.logRequest)('bypass', requestedModel, {});
|
|
640
|
+
const instructions = `Model "${requestedModel}" not configured. Set up routing in your API key:
|
|
641
|
+
|
|
642
|
+
Format: o3=opus-4.5,o3-mini=sonnet-4.5:sk-ant-xxx
|
|
643
|
+
|
|
644
|
+
Examples:
|
|
645
|
+
o3=opus-4.5:sk-ant-xxx # Single routing
|
|
646
|
+
o3=opus-4.5,o3-mini=sonnet-4.5:sk-ant-xxx # Multiple routings
|
|
647
|
+
sk-ant-xxx # Default key for claude-* models`;
|
|
648
|
+
return c.json({
|
|
649
|
+
id: 'error', object: 'chat.completion',
|
|
650
|
+
choices: [{ index: 0, message: { role: 'assistant', content: instructions }, finish_reason: 'stop' }]
|
|
651
|
+
});
|
|
652
|
+
}
|
|
653
|
+
const { claudeModel, apiKey: anthropicKey } = routing;
|
|
654
|
+
body.model = claudeModel;
|
|
655
|
+
if (body.input !== undefined && !body.messages) {
|
|
656
|
+
if (typeof body.input === 'string')
|
|
657
|
+
body.messages = [{ role: 'user', content: body.input }];
|
|
658
|
+
else if (Array.isArray(body.input))
|
|
659
|
+
body.messages = body.input;
|
|
660
|
+
if (body.user && typeof body.user === 'string')
|
|
661
|
+
body.messages = [{ role: 'system', content: body.user }, ...body.messages];
|
|
662
|
+
}
|
|
663
|
+
const systemMessages = body.messages?.filter((msg) => msg.role === 'system') || [];
|
|
664
|
+
body.messages = body.messages?.filter((msg) => msg.role !== 'system') || [];
|
|
665
|
+
if (body.messages.length === 0) {
|
|
666
|
+
(0, logger_1.logError)('No user messages in request');
|
|
667
|
+
return c.json({ error: 'No messages provided' }, 400);
|
|
668
|
+
}
|
|
669
|
+
body.system = [
|
|
670
|
+
{ type: 'text', text: "You are Claude Code, Anthropic's official CLI for Claude." },
|
|
671
|
+
{ type: 'text', text: "[Proxied via Sub Bridge - user's Claude subscription]" },
|
|
672
|
+
...systemMessages.map((msg) => ({ type: 'text', text: msg.content || '' })),
|
|
673
|
+
];
|
|
674
|
+
const contextSize = JSON.stringify(body.messages || []).length;
|
|
675
|
+
const contextTokensEstimate = Math.ceil(contextSize / 4);
|
|
676
|
+
const systemText = body.system.map((s) => s.text).join('\n');
|
|
677
|
+
(0, logger_1.logRequest)('claude', `${requestedModel} → ${claudeModel}`, {
|
|
678
|
+
system: systemText, messages: body.messages, tools: body.tools, tokens: contextTokensEstimate
|
|
679
|
+
});
|
|
680
|
+
body.max_tokens = claudeModel.includes('opus') ? 32_000 : 64_000;
|
|
681
|
+
body.messages = convertMessages(body.messages);
|
|
682
|
+
if (body.tools?.length) {
|
|
683
|
+
body.tools = body.tools.map((tool, idx) => {
|
|
684
|
+
let converted;
|
|
685
|
+
if (tool.type === 'function' && tool.function) {
|
|
686
|
+
converted = { name: tool.function.name, description: tool.function.description || '', input_schema: tool.function.parameters || { type: 'object', properties: {} } };
|
|
687
|
+
}
|
|
688
|
+
else if (tool.name) {
|
|
689
|
+
converted = { name: tool.name, description: tool.description || '', input_schema: tool.input_schema || tool.parameters || { type: 'object', properties: {} } };
|
|
690
|
+
}
|
|
691
|
+
else {
|
|
692
|
+
converted = tool;
|
|
693
|
+
}
|
|
694
|
+
if (idx === body.tools.length - 1)
|
|
695
|
+
converted.cache_control = { type: 'ephemeral' };
|
|
696
|
+
return converted;
|
|
697
|
+
});
|
|
698
|
+
}
|
|
699
|
+
if (body.tool_choice === 'auto')
|
|
700
|
+
body.tool_choice = { type: 'auto' };
|
|
701
|
+
else if (body.tool_choice === 'none' || body.tool_choice === null)
|
|
702
|
+
delete body.tool_choice;
|
|
703
|
+
else if (body.tool_choice === 'required')
|
|
704
|
+
body.tool_choice = { type: 'any' };
|
|
705
|
+
else if (body.tool_choice?.function?.name)
|
|
706
|
+
body.tool_choice = { type: 'tool', name: body.tool_choice.function.name };
|
|
707
|
+
if (body.system.length > 0)
|
|
708
|
+
body.system[body.system.length - 1].cache_control = { type: 'ephemeral' };
|
|
709
|
+
const cleanBody = {};
|
|
710
|
+
const allowedFields = ['model', 'messages', 'max_tokens', 'stop_sequences', 'stream', 'system', 'temperature', 'top_p', 'top_k', 'tools', 'tool_choice'];
|
|
711
|
+
for (const field of allowedFields)
|
|
712
|
+
if (body[field] !== undefined)
|
|
713
|
+
cleanBody[field] = body[field];
|
|
714
|
+
const response = await fetch('https://api.anthropic.com/v1/messages', {
|
|
715
|
+
method: 'POST',
|
|
716
|
+
headers: {
|
|
717
|
+
'content-type': 'application/json',
|
|
718
|
+
'authorization': `Bearer ${anthropicKey}`,
|
|
719
|
+
'anthropic-beta': 'oauth-2025-04-20,prompt-caching-2024-07-31',
|
|
720
|
+
'anthropic-version': '2023-06-01',
|
|
721
|
+
'accept': isStreaming ? 'text/event-stream' : 'application/json',
|
|
722
|
+
},
|
|
723
|
+
body: JSON.stringify(cleanBody),
|
|
724
|
+
});
|
|
725
|
+
if (!response.ok) {
|
|
726
|
+
const errorText = await response.text();
|
|
727
|
+
(0, logger_1.logError)(errorText.slice(0, 200));
|
|
728
|
+
// Try to parse the Anthropic error and convert to OpenAI format
|
|
729
|
+
try {
|
|
730
|
+
const anthropicError = JSON.parse(errorText);
|
|
731
|
+
const errorMessage = anthropicError.error?.message || 'Unknown error';
|
|
732
|
+
const errorType = anthropicError.error?.type || 'api_error';
|
|
733
|
+
// Map Anthropic error types to user-friendly messages
|
|
734
|
+
let userMessage = errorMessage;
|
|
735
|
+
if (errorType === 'rate_limit_error') {
|
|
736
|
+
userMessage = `Rate limit exceeded: ${errorMessage}`;
|
|
737
|
+
}
|
|
738
|
+
else if (errorType === 'authentication_error') {
|
|
739
|
+
userMessage = `Authentication failed: ${errorMessage}`;
|
|
740
|
+
}
|
|
741
|
+
else if (errorType === 'invalid_request_error') {
|
|
742
|
+
userMessage = `Invalid request: ${errorMessage}`;
|
|
743
|
+
}
|
|
744
|
+
// Return OpenAI-compatible error format
|
|
745
|
+
const openAIError = {
|
|
746
|
+
error: {
|
|
747
|
+
message: userMessage,
|
|
748
|
+
type: errorType,
|
|
749
|
+
code: errorType,
|
|
750
|
+
}
|
|
751
|
+
};
|
|
752
|
+
return c.json(openAIError, response.status);
|
|
753
|
+
}
|
|
754
|
+
catch (parseError) {
|
|
755
|
+
// If parsing fails, return raw error
|
|
756
|
+
return new Response(errorText, { status: response.status });
|
|
757
|
+
}
|
|
758
|
+
}
|
|
759
|
+
(0, logger_1.logResponse)(response.status);
|
|
760
|
+
if (isStreaming) {
|
|
761
|
+
const reader = response.body.getReader();
|
|
762
|
+
const decoder = new TextDecoder();
|
|
763
|
+
const converterState = (0, anthropic_to_openai_converter_1.createConverterState)();
|
|
764
|
+
return (0, streaming_1.stream)(c, async (s) => {
|
|
765
|
+
while (true) {
|
|
766
|
+
const { done, value } = await reader.read();
|
|
767
|
+
if (done)
|
|
768
|
+
break;
|
|
769
|
+
const chunk = decoder.decode(value, { stream: true });
|
|
770
|
+
const results = (0, anthropic_to_openai_converter_1.processChunk)(converterState, chunk, false);
|
|
771
|
+
for (const result of results) {
|
|
772
|
+
if (result.type === 'chunk')
|
|
773
|
+
await s.write(`data: ${JSON.stringify(result.data)}\n\n`);
|
|
774
|
+
else if (result.type === 'done')
|
|
775
|
+
await s.write('data: [DONE]\n\n');
|
|
776
|
+
}
|
|
777
|
+
}
|
|
778
|
+
reader.releaseLock();
|
|
779
|
+
});
|
|
780
|
+
}
|
|
781
|
+
else {
|
|
782
|
+
const responseData = await response.json();
|
|
783
|
+
const openAIResponse = (0, anthropic_to_openai_converter_1.convertNonStreamingResponse)(responseData);
|
|
784
|
+
return c.json(openAIResponse);
|
|
785
|
+
}
|
|
786
|
+
}
|
|
787
|
+
function createChatRoutes() {
|
|
788
|
+
const app = new hono_1.Hono();
|
|
789
|
+
// Models endpoint
|
|
790
|
+
app.get('/models', async (c) => {
|
|
791
|
+
const response = await fetch('https://models.dev/api.json');
|
|
792
|
+
if (!response.ok)
|
|
793
|
+
return c.json({ object: 'list', data: [] });
|
|
794
|
+
const modelsData = await response.json();
|
|
795
|
+
const anthropicModels = modelsData.anthropic?.models || {};
|
|
796
|
+
const models = Object.entries(anthropicModels).map(([modelId, modelData]) => ({
|
|
797
|
+
id: modelId, object: 'model',
|
|
798
|
+
created: Math.floor(new Date(modelData.release_date || '1970-01-01').getTime() / 1000),
|
|
799
|
+
owned_by: 'anthropic',
|
|
800
|
+
}));
|
|
801
|
+
return c.json({ object: 'list', data: models });
|
|
802
|
+
});
|
|
803
|
+
// Chat completions
|
|
804
|
+
app.post('/chat/completions', (c) => handleChatCompletion(c));
|
|
805
|
+
app.post('/messages', (c) => handleChatCompletion(c));
|
|
806
|
+
return app;
|
|
807
|
+
}
|
|
808
|
+
//# sourceMappingURL=chat.js.map
|