zonemeen-cc-api 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/dist/main.js +1849 -0
- package/dist/main.js.map +1 -0
- package/package.json +71 -0
package/dist/main.js
ADDED
|
@@ -0,0 +1,1849 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { defineCommand, runMain } from "citty";
|
|
3
|
+
import consola from "consola";
|
|
4
|
+
import fs from "node:fs/promises";
|
|
5
|
+
import os from "node:os";
|
|
6
|
+
import path from "node:path";
|
|
7
|
+
import { serve } from "srvx";
|
|
8
|
+
import invariant from "tiny-invariant";
|
|
9
|
+
import { getProxyForUrl } from "proxy-from-env";
|
|
10
|
+
import { Agent, ProxyAgent, setGlobalDispatcher } from "undici";
|
|
11
|
+
import { execSync } from "node:child_process";
|
|
12
|
+
import process$1 from "node:process";
|
|
13
|
+
import { Hono } from "hono";
|
|
14
|
+
import { cors } from "hono/cors";
|
|
15
|
+
import { logger } from "hono/logger";
|
|
16
|
+
import { streamSSE } from "hono/streaming";
|
|
17
|
+
import { events } from "fetch-event-stream";
|
|
18
|
+
import { randomUUID } from "node:crypto";
|
|
19
|
+
|
|
20
|
+
//#region src/auth.ts
|
|
21
|
+
function runAuth() {
|
|
22
|
+
consola.info("Using custom API backend. No GitHub authentication needed.\nConfigure via environment variables:\n XYBOT_USER - xybot-user header JSON\n CUSTOM_API_URL - custom API URL\n CUSTOM_BIZ_CODE - business code\n CUSTOM_BIZ_TYPE - business type\n CUSTOM_DEFAULT_CHANNEL - default model channel");
|
|
23
|
+
}
|
|
24
|
+
const auth = defineCommand({
|
|
25
|
+
meta: {
|
|
26
|
+
name: "auth",
|
|
27
|
+
description: "Show authentication configuration help"
|
|
28
|
+
},
|
|
29
|
+
args: { verbose: {
|
|
30
|
+
alias: "v",
|
|
31
|
+
type: "boolean",
|
|
32
|
+
default: false,
|
|
33
|
+
description: "Enable verbose logging"
|
|
34
|
+
} },
|
|
35
|
+
run() {
|
|
36
|
+
runAuth();
|
|
37
|
+
}
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
//#endregion
|
|
41
|
+
//#region src/check-usage.ts
|
|
42
|
+
const checkUsage = defineCommand({
|
|
43
|
+
meta: {
|
|
44
|
+
name: "check-usage",
|
|
45
|
+
description: "Show current usage/quota information"
|
|
46
|
+
},
|
|
47
|
+
run() {
|
|
48
|
+
consola.info("Usage tracking is not available with custom API backend.\nPlease check your custom API dashboard for usage information.");
|
|
49
|
+
}
|
|
50
|
+
});
|
|
51
|
+
|
|
52
|
+
//#endregion
|
|
53
|
+
//#region src/lib/paths.ts
|
|
54
|
+
const APP_DIR = path.join(os.homedir(), ".local", "share", "copilot-api");
|
|
55
|
+
const GITHUB_TOKEN_PATH = path.join(APP_DIR, "github_token");
|
|
56
|
+
const PATHS = {
|
|
57
|
+
APP_DIR,
|
|
58
|
+
GITHUB_TOKEN_PATH
|
|
59
|
+
};
|
|
60
|
+
async function ensurePaths() {
|
|
61
|
+
await fs.mkdir(PATHS.APP_DIR, { recursive: true });
|
|
62
|
+
await ensureFile(PATHS.GITHUB_TOKEN_PATH);
|
|
63
|
+
}
|
|
64
|
+
async function ensureFile(filePath) {
|
|
65
|
+
try {
|
|
66
|
+
await fs.access(filePath, fs.constants.W_OK);
|
|
67
|
+
} catch {
|
|
68
|
+
await fs.writeFile(filePath, "");
|
|
69
|
+
await fs.chmod(filePath, 384);
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
//#endregion
|
|
74
|
+
//#region src/debug.ts
|
|
75
|
+
async function getPackageVersion() {
|
|
76
|
+
try {
|
|
77
|
+
const packageJsonPath = new URL("../package.json", import.meta.url).pathname;
|
|
78
|
+
return JSON.parse(await fs.readFile(packageJsonPath)).version;
|
|
79
|
+
} catch {
|
|
80
|
+
return "unknown";
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
function getRuntimeInfo() {
|
|
84
|
+
return {
|
|
85
|
+
name: "node",
|
|
86
|
+
version: process.version.slice(1),
|
|
87
|
+
platform: os.platform(),
|
|
88
|
+
arch: os.arch()
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
async function checkTokenExists() {
|
|
92
|
+
try {
|
|
93
|
+
if (!(await fs.stat(PATHS.GITHUB_TOKEN_PATH)).isFile()) return false;
|
|
94
|
+
return (await fs.readFile(PATHS.GITHUB_TOKEN_PATH, "utf8")).trim().length > 0;
|
|
95
|
+
} catch {
|
|
96
|
+
return false;
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
async function getDebugInfo() {
|
|
100
|
+
const [version, tokenExists] = await Promise.all([getPackageVersion(), checkTokenExists()]);
|
|
101
|
+
return {
|
|
102
|
+
version,
|
|
103
|
+
runtime: getRuntimeInfo(),
|
|
104
|
+
paths: {
|
|
105
|
+
APP_DIR: PATHS.APP_DIR,
|
|
106
|
+
GITHUB_TOKEN_PATH: PATHS.GITHUB_TOKEN_PATH
|
|
107
|
+
},
|
|
108
|
+
tokenExists
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
function printDebugInfoPlain(info) {
|
|
112
|
+
consola.info(`copilot-api debug
|
|
113
|
+
|
|
114
|
+
Version: ${info.version}
|
|
115
|
+
Runtime: ${info.runtime.name} ${info.runtime.version} (${info.runtime.platform} ${info.runtime.arch})
|
|
116
|
+
|
|
117
|
+
Paths:
|
|
118
|
+
- APP_DIR: ${info.paths.APP_DIR}
|
|
119
|
+
- GITHUB_TOKEN_PATH: ${info.paths.GITHUB_TOKEN_PATH}
|
|
120
|
+
|
|
121
|
+
Token exists: ${info.tokenExists ? "Yes" : "No"}`);
|
|
122
|
+
}
|
|
123
|
+
function printDebugInfoJson(info) {
|
|
124
|
+
console.log(JSON.stringify(info, null, 2));
|
|
125
|
+
}
|
|
126
|
+
async function runDebug(options) {
|
|
127
|
+
const debugInfo = await getDebugInfo();
|
|
128
|
+
if (options.json) printDebugInfoJson(debugInfo);
|
|
129
|
+
else printDebugInfoPlain(debugInfo);
|
|
130
|
+
}
|
|
131
|
+
const debug = defineCommand({
|
|
132
|
+
meta: {
|
|
133
|
+
name: "debug",
|
|
134
|
+
description: "Print debug information about the application"
|
|
135
|
+
},
|
|
136
|
+
args: { json: {
|
|
137
|
+
type: "boolean",
|
|
138
|
+
default: false,
|
|
139
|
+
description: "Output debug information as JSON"
|
|
140
|
+
} },
|
|
141
|
+
run({ args }) {
|
|
142
|
+
return runDebug({ json: args.json });
|
|
143
|
+
}
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
//#endregion
|
|
147
|
+
//#region src/lib/proxy.ts
|
|
148
|
+
function initProxyFromEnv() {
|
|
149
|
+
if (typeof Bun !== "undefined") return;
|
|
150
|
+
try {
|
|
151
|
+
const direct = new Agent();
|
|
152
|
+
const proxies = /* @__PURE__ */ new Map();
|
|
153
|
+
setGlobalDispatcher({
|
|
154
|
+
dispatch(options, handler) {
|
|
155
|
+
try {
|
|
156
|
+
const origin = typeof options.origin === "string" ? new URL(options.origin) : options.origin;
|
|
157
|
+
const raw = getProxyForUrl(origin.toString());
|
|
158
|
+
const proxyUrl = raw && raw.length > 0 ? raw : void 0;
|
|
159
|
+
if (!proxyUrl) {
|
|
160
|
+
consola.debug(`HTTP proxy bypass: ${origin.hostname}`);
|
|
161
|
+
return direct.dispatch(options, handler);
|
|
162
|
+
}
|
|
163
|
+
let agent = proxies.get(proxyUrl);
|
|
164
|
+
if (!agent) {
|
|
165
|
+
agent = new ProxyAgent(proxyUrl);
|
|
166
|
+
proxies.set(proxyUrl, agent);
|
|
167
|
+
}
|
|
168
|
+
let label = proxyUrl;
|
|
169
|
+
try {
|
|
170
|
+
const u = new URL(proxyUrl);
|
|
171
|
+
label = `${u.protocol}//${u.host}`;
|
|
172
|
+
} catch {}
|
|
173
|
+
consola.debug(`HTTP proxy route: ${origin.hostname} via ${label}`);
|
|
174
|
+
return agent.dispatch(options, handler);
|
|
175
|
+
} catch {
|
|
176
|
+
return direct.dispatch(options, handler);
|
|
177
|
+
}
|
|
178
|
+
},
|
|
179
|
+
close() {
|
|
180
|
+
return direct.close();
|
|
181
|
+
},
|
|
182
|
+
destroy() {
|
|
183
|
+
return direct.destroy();
|
|
184
|
+
}
|
|
185
|
+
});
|
|
186
|
+
consola.debug("HTTP proxy configured from environment (per-URL)");
|
|
187
|
+
} catch (err) {
|
|
188
|
+
consola.debug("Proxy setup skipped:", err);
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
//#endregion
|
|
193
|
+
//#region src/lib/shell.ts
|
|
194
|
+
function getShell() {
|
|
195
|
+
const { platform, ppid, env } = process$1;
|
|
196
|
+
if (platform === "win32") {
|
|
197
|
+
try {
|
|
198
|
+
if (execSync(`wmic process get ParentProcessId,Name | findstr "${ppid}"`, { stdio: "pipe" }).toString().toLowerCase().includes("powershell.exe")) return "powershell";
|
|
199
|
+
} catch {
|
|
200
|
+
return "cmd";
|
|
201
|
+
}
|
|
202
|
+
return "cmd";
|
|
203
|
+
} else {
|
|
204
|
+
const shellPath = env.SHELL;
|
|
205
|
+
if (shellPath) {
|
|
206
|
+
if (shellPath.endsWith("zsh")) return "zsh";
|
|
207
|
+
if (shellPath.endsWith("fish")) return "fish";
|
|
208
|
+
if (shellPath.endsWith("bash")) return "bash";
|
|
209
|
+
}
|
|
210
|
+
return "sh";
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
/**
|
|
214
|
+
* Generates a copy-pasteable script to set multiple environment variables
|
|
215
|
+
* and run a subsequent command.
|
|
216
|
+
* @param {EnvVars} envVars - An object of environment variables to set.
|
|
217
|
+
* @param {string} commandToRun - The command to run after setting the variables.
|
|
218
|
+
* @returns {string} The formatted script string.
|
|
219
|
+
*/
|
|
220
|
+
function generateEnvScript(envVars, commandToRun = "") {
|
|
221
|
+
const shell = getShell();
|
|
222
|
+
const filteredEnvVars = Object.entries(envVars).filter(([, value]) => value !== void 0);
|
|
223
|
+
let commandBlock;
|
|
224
|
+
switch (shell) {
|
|
225
|
+
case "powershell":
|
|
226
|
+
commandBlock = filteredEnvVars.map(([key, value]) => `$env:${key} = ${value}`).join("; ");
|
|
227
|
+
break;
|
|
228
|
+
case "cmd":
|
|
229
|
+
commandBlock = filteredEnvVars.map(([key, value]) => `set ${key}=${value}`).join(" & ");
|
|
230
|
+
break;
|
|
231
|
+
case "fish":
|
|
232
|
+
commandBlock = filteredEnvVars.map(([key, value]) => `set -gx ${key} ${value}`).join("; ");
|
|
233
|
+
break;
|
|
234
|
+
default: {
|
|
235
|
+
const assignments = filteredEnvVars.map(([key, value]) => `${key}=${value}`).join(" ");
|
|
236
|
+
commandBlock = filteredEnvVars.length > 0 ? `export ${assignments}` : "";
|
|
237
|
+
break;
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
if (commandBlock && commandToRun) return `${commandBlock}${shell === "cmd" ? " & " : " && "}${commandToRun}`;
|
|
241
|
+
return commandBlock || commandToRun;
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
//#endregion
|
|
245
|
+
//#region src/lib/state.ts
|
|
246
|
+
const state = {
|
|
247
|
+
accountType: "individual",
|
|
248
|
+
manualApprove: false,
|
|
249
|
+
rateLimitWait: false,
|
|
250
|
+
showToken: false,
|
|
251
|
+
customApiUrl: "http://xybot-appreciation:8080/api/appreciation/v1/inner/completions/conversation",
|
|
252
|
+
xybotUser: {
|
|
253
|
+
organizationUuid: "bb86844a-1e14-4baf-87ed-7c6619c9c383",
|
|
254
|
+
tenantUuid: "bb86844a-1e14-4baf-87ed-7c6619c9c383",
|
|
255
|
+
uuid: "923484698036051968"
|
|
256
|
+
},
|
|
257
|
+
bizCode: "ai-power",
|
|
258
|
+
bizType: "copilot-api"
|
|
259
|
+
};
|
|
260
|
+
|
|
261
|
+
//#endregion
|
|
262
|
+
//#region src/lib/token.ts
|
|
263
|
+
function setupCustomApi(options) {
|
|
264
|
+
if (options.xybotUser) state.xybotUser = options.xybotUser;
|
|
265
|
+
if (options.customApiUrl) state.customApiUrl = options.customApiUrl;
|
|
266
|
+
if (options.bizCode) state.bizCode = options.bizCode;
|
|
267
|
+
if (options.bizType) state.bizType = options.bizType;
|
|
268
|
+
if (options.defaultChannel) state.defaultChannel = options.defaultChannel;
|
|
269
|
+
consola.info("Custom API configured:");
|
|
270
|
+
consola.info(` URL: ${state.customApiUrl}`);
|
|
271
|
+
consola.info(` bizCode: ${state.bizCode}`);
|
|
272
|
+
consola.info(` bizType: ${state.bizType}`);
|
|
273
|
+
if (state.xybotUser) consola.info(` xybot-user: configured`);
|
|
274
|
+
if (state.defaultChannel) consola.info(` Default channel: ${state.defaultChannel}`);
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
//#endregion
|
|
278
|
+
//#region src/services/copilot/get-models.ts
|
|
279
|
+
const getModels = () => {
|
|
280
|
+
return {
|
|
281
|
+
object: "list",
|
|
282
|
+
data: CUSTOM_MODELS
|
|
283
|
+
};
|
|
284
|
+
};
|
|
285
|
+
function buildModel(id, vendor, opts) {
|
|
286
|
+
return {
|
|
287
|
+
id,
|
|
288
|
+
name: id,
|
|
289
|
+
object: "model",
|
|
290
|
+
vendor,
|
|
291
|
+
version: "1.0",
|
|
292
|
+
preview: false,
|
|
293
|
+
model_picker_enabled: true,
|
|
294
|
+
capabilities: {
|
|
295
|
+
family: id,
|
|
296
|
+
type: "chat",
|
|
297
|
+
object: "model_capabilities",
|
|
298
|
+
tokenizer: opts?.tokenizer ?? "o200k_base",
|
|
299
|
+
limits: {
|
|
300
|
+
max_context_window_tokens: opts?.maxContext ?? 128e3,
|
|
301
|
+
max_output_tokens: opts?.maxOutput ?? 16384
|
|
302
|
+
},
|
|
303
|
+
supports: {
|
|
304
|
+
tool_calls: opts?.toolCalls ?? true,
|
|
305
|
+
parallel_tool_calls: opts?.toolCalls ?? true
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
};
|
|
309
|
+
}
|
|
310
|
+
const CUSTOM_MODELS = [
|
|
311
|
+
buildModel("gpt-4o", "azure", {
|
|
312
|
+
maxContext: 128e3,
|
|
313
|
+
maxOutput: 16384
|
|
314
|
+
}),
|
|
315
|
+
buildModel("gpt-4o-mini", "azure", {
|
|
316
|
+
maxContext: 128e3,
|
|
317
|
+
maxOutput: 16384
|
|
318
|
+
}),
|
|
319
|
+
buildModel("gpt-4.1", "azure", {
|
|
320
|
+
maxContext: 1047576,
|
|
321
|
+
maxOutput: 32768
|
|
322
|
+
}),
|
|
323
|
+
buildModel("gpt-4.1-mini", "azure", {
|
|
324
|
+
maxContext: 1047576,
|
|
325
|
+
maxOutput: 32768
|
|
326
|
+
}),
|
|
327
|
+
buildModel("gpt-4.1-nano", "azure", {
|
|
328
|
+
maxContext: 1047576,
|
|
329
|
+
maxOutput: 32768
|
|
330
|
+
}),
|
|
331
|
+
buildModel("o3-mini", "azure", {
|
|
332
|
+
maxContext: 2e5,
|
|
333
|
+
maxOutput: 1e5
|
|
334
|
+
}),
|
|
335
|
+
buildModel("o3", "azure", {
|
|
336
|
+
maxContext: 2e5,
|
|
337
|
+
maxOutput: 1e5
|
|
338
|
+
}),
|
|
339
|
+
buildModel("gpt-5.2", "azure", {
|
|
340
|
+
maxContext: 1047576,
|
|
341
|
+
maxOutput: 32768
|
|
342
|
+
}),
|
|
343
|
+
buildModel("claude-sonnet-4", "claude", {
|
|
344
|
+
maxContext: 2e5,
|
|
345
|
+
maxOutput: 64e3,
|
|
346
|
+
tokenizer: "o200k_base"
|
|
347
|
+
}),
|
|
348
|
+
buildModel("claude-opus-4", "claude", {
|
|
349
|
+
maxContext: 2e5,
|
|
350
|
+
maxOutput: 32e3,
|
|
351
|
+
tokenizer: "o200k_base"
|
|
352
|
+
}),
|
|
353
|
+
buildModel("claude-v3.7-sonnet", "claude", {
|
|
354
|
+
maxContext: 2e5,
|
|
355
|
+
maxOutput: 64e3,
|
|
356
|
+
tokenizer: "o200k_base"
|
|
357
|
+
}),
|
|
358
|
+
buildModel("claude-v3.5-sonnet", "claude", {
|
|
359
|
+
maxContext: 2e5,
|
|
360
|
+
maxOutput: 8192,
|
|
361
|
+
tokenizer: "o200k_base"
|
|
362
|
+
}),
|
|
363
|
+
buildModel("claude-v3.5-haiku", "claude", {
|
|
364
|
+
maxContext: 2e5,
|
|
365
|
+
maxOutput: 8192,
|
|
366
|
+
tokenizer: "o200k_base"
|
|
367
|
+
}),
|
|
368
|
+
buildModel("deepseek-chat", "deepseek", {
|
|
369
|
+
maxContext: 65536,
|
|
370
|
+
maxOutput: 8192
|
|
371
|
+
}),
|
|
372
|
+
buildModel("deepseek-reasoner", "deepseek", {
|
|
373
|
+
maxContext: 65536,
|
|
374
|
+
maxOutput: 8192
|
|
375
|
+
}),
|
|
376
|
+
buildModel("qwen-max", "qwen", {
|
|
377
|
+
maxContext: 131072,
|
|
378
|
+
maxOutput: 8192
|
|
379
|
+
}),
|
|
380
|
+
buildModel("qwen-plus", "qwen", {
|
|
381
|
+
maxContext: 131072,
|
|
382
|
+
maxOutput: 8192
|
|
383
|
+
}),
|
|
384
|
+
buildModel("qwen-turbo", "qwen", {
|
|
385
|
+
maxContext: 131072,
|
|
386
|
+
maxOutput: 8192
|
|
387
|
+
}),
|
|
388
|
+
buildModel("qwq-plus", "qwen", {
|
|
389
|
+
maxContext: 131072,
|
|
390
|
+
maxOutput: 8192
|
|
391
|
+
}),
|
|
392
|
+
buildModel("gemini-2.5-pro", "gemini", {
|
|
393
|
+
maxContext: 1048576,
|
|
394
|
+
maxOutput: 65536
|
|
395
|
+
}),
|
|
396
|
+
buildModel("gemini-2.5-flash", "gemini", {
|
|
397
|
+
maxContext: 1048576,
|
|
398
|
+
maxOutput: 65536
|
|
399
|
+
}),
|
|
400
|
+
buildModel("gemini-2.0-flash", "gemini", {
|
|
401
|
+
maxContext: 1048576,
|
|
402
|
+
maxOutput: 8192
|
|
403
|
+
}),
|
|
404
|
+
buildModel("gemini-3-flash", "gemini", {
|
|
405
|
+
maxContext: 1048576,
|
|
406
|
+
maxOutput: 65536
|
|
407
|
+
}),
|
|
408
|
+
buildModel("Doubao-1.5-pro-256k", "doubao", {
|
|
409
|
+
maxContext: 256e3,
|
|
410
|
+
maxOutput: 16384
|
|
411
|
+
}),
|
|
412
|
+
buildModel("Doubao-seed-1.6", "doubao", {
|
|
413
|
+
maxContext: 131072,
|
|
414
|
+
maxOutput: 16384
|
|
415
|
+
}),
|
|
416
|
+
buildModel("Doubao-seed-1.6-thinking", "doubao", {
|
|
417
|
+
maxContext: 131072,
|
|
418
|
+
maxOutput: 16384
|
|
419
|
+
}),
|
|
420
|
+
buildModel("Doubao-seed-1.8", "doubao", {
|
|
421
|
+
maxContext: 131072,
|
|
422
|
+
maxOutput: 16384
|
|
423
|
+
}),
|
|
424
|
+
buildModel("kimi-k2", "moonshot", {
|
|
425
|
+
maxContext: 131072,
|
|
426
|
+
maxOutput: 8192
|
|
427
|
+
}),
|
|
428
|
+
buildModel("moonshot-v1-128k", "moonshot", {
|
|
429
|
+
maxContext: 131072,
|
|
430
|
+
maxOutput: 8192
|
|
431
|
+
}),
|
|
432
|
+
buildModel("glm-4.7", "chatglm", {
|
|
433
|
+
maxContext: 131072,
|
|
434
|
+
maxOutput: 8192
|
|
435
|
+
}),
|
|
436
|
+
buildModel("glm-4", "chatglm", {
|
|
437
|
+
maxContext: 128e3,
|
|
438
|
+
maxOutput: 4096
|
|
439
|
+
}),
|
|
440
|
+
buildModel("morph-v2", "morph", {
|
|
441
|
+
maxContext: 128e3,
|
|
442
|
+
maxOutput: 8192
|
|
443
|
+
}),
|
|
444
|
+
buildModel("MiniMax-M2.1", "Minimax", {
|
|
445
|
+
maxContext: 1048576,
|
|
446
|
+
maxOutput: 16384
|
|
447
|
+
})
|
|
448
|
+
];
|
|
449
|
+
|
|
450
|
+
//#endregion
|
|
451
|
+
//#region src/lib/utils.ts
|
|
452
|
+
const sleep = (ms) => new Promise((resolve) => {
|
|
453
|
+
setTimeout(resolve, ms);
|
|
454
|
+
});
|
|
455
|
+
const isNullish = (value) => value === null || value === void 0;
|
|
456
|
+
function cacheModels() {
|
|
457
|
+
state.models = getModels();
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
//#endregion
|
|
461
|
+
//#region src/lib/error.ts
|
|
462
|
+
var HTTPError = class extends Error {
|
|
463
|
+
response;
|
|
464
|
+
constructor(message, response) {
|
|
465
|
+
super(message);
|
|
466
|
+
this.response = response;
|
|
467
|
+
}
|
|
468
|
+
};
|
|
469
|
+
async function forwardError(c, error) {
|
|
470
|
+
consola.error("Error occurred:", error);
|
|
471
|
+
if (error instanceof HTTPError) {
|
|
472
|
+
const errorText = await error.response.text();
|
|
473
|
+
let errorJson;
|
|
474
|
+
try {
|
|
475
|
+
errorJson = JSON.parse(errorText);
|
|
476
|
+
} catch {
|
|
477
|
+
errorJson = errorText;
|
|
478
|
+
}
|
|
479
|
+
consola.error("HTTP error:", errorJson);
|
|
480
|
+
return c.json({ error: {
|
|
481
|
+
message: errorText,
|
|
482
|
+
type: "error"
|
|
483
|
+
} }, error.response.status);
|
|
484
|
+
}
|
|
485
|
+
return c.json({ error: {
|
|
486
|
+
message: error.message,
|
|
487
|
+
type: "error"
|
|
488
|
+
} }, 500);
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
//#endregion
|
|
492
|
+
//#region src/lib/approval.ts
|
|
493
|
+
const awaitApproval = async () => {
|
|
494
|
+
if (!await consola.prompt(`Accept incoming request?`, { type: "confirm" })) throw new HTTPError("Request rejected", Response.json({ message: "Request rejected" }, { status: 403 }));
|
|
495
|
+
};
|
|
496
|
+
|
|
497
|
+
//#endregion
|
|
498
|
+
//#region src/lib/rate-limit.ts
|
|
499
|
+
async function checkRateLimit(state$1) {
|
|
500
|
+
if (state$1.rateLimitSeconds === void 0) return;
|
|
501
|
+
const now = Date.now();
|
|
502
|
+
if (!state$1.lastRequestTimestamp) {
|
|
503
|
+
state$1.lastRequestTimestamp = now;
|
|
504
|
+
return;
|
|
505
|
+
}
|
|
506
|
+
const elapsedSeconds = (now - state$1.lastRequestTimestamp) / 1e3;
|
|
507
|
+
if (elapsedSeconds > state$1.rateLimitSeconds) {
|
|
508
|
+
state$1.lastRequestTimestamp = now;
|
|
509
|
+
return;
|
|
510
|
+
}
|
|
511
|
+
const waitTimeSeconds = Math.ceil(state$1.rateLimitSeconds - elapsedSeconds);
|
|
512
|
+
if (!state$1.rateLimitWait) {
|
|
513
|
+
consola.warn(`Rate limit exceeded. Need to wait ${waitTimeSeconds} more seconds.`);
|
|
514
|
+
throw new HTTPError("Rate limit exceeded", Response.json({ message: "Rate limit exceeded" }, { status: 429 }));
|
|
515
|
+
}
|
|
516
|
+
const waitTimeMs = waitTimeSeconds * 1e3;
|
|
517
|
+
consola.warn(`Rate limit reached. Waiting ${waitTimeSeconds} seconds before proceeding...`);
|
|
518
|
+
await sleep(waitTimeMs);
|
|
519
|
+
state$1.lastRequestTimestamp = now;
|
|
520
|
+
consola.info("Rate limit wait completed, proceeding with request");
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
//#endregion
|
|
524
|
+
//#region src/lib/tokenizer.ts
|
|
525
|
+
const ENCODING_MAP = {
|
|
526
|
+
o200k_base: () => import("gpt-tokenizer/encoding/o200k_base"),
|
|
527
|
+
cl100k_base: () => import("gpt-tokenizer/encoding/cl100k_base"),
|
|
528
|
+
p50k_base: () => import("gpt-tokenizer/encoding/p50k_base"),
|
|
529
|
+
p50k_edit: () => import("gpt-tokenizer/encoding/p50k_edit"),
|
|
530
|
+
r50k_base: () => import("gpt-tokenizer/encoding/r50k_base")
|
|
531
|
+
};
|
|
532
|
+
const encodingCache = /* @__PURE__ */ new Map();
|
|
533
|
+
/**
|
|
534
|
+
* Calculate tokens for tool calls
|
|
535
|
+
*/
|
|
536
|
+
const calculateToolCallsTokens = (toolCalls, encoder, constants) => {
|
|
537
|
+
let tokens = 0;
|
|
538
|
+
for (const toolCall of toolCalls) {
|
|
539
|
+
tokens += constants.funcInit;
|
|
540
|
+
tokens += encoder.encode(JSON.stringify(toolCall)).length;
|
|
541
|
+
}
|
|
542
|
+
tokens += constants.funcEnd;
|
|
543
|
+
return tokens;
|
|
544
|
+
};
|
|
545
|
+
/**
|
|
546
|
+
* Calculate tokens for content parts
|
|
547
|
+
*/
|
|
548
|
+
const calculateContentPartsTokens = (contentParts, encoder) => {
|
|
549
|
+
let tokens = 0;
|
|
550
|
+
for (const part of contentParts) if (part.type === "image_url") tokens += encoder.encode(part.image_url.url).length + 85;
|
|
551
|
+
else if (part.text) tokens += encoder.encode(part.text).length;
|
|
552
|
+
return tokens;
|
|
553
|
+
};
|
|
554
|
+
/**
|
|
555
|
+
* Calculate tokens for a single message
|
|
556
|
+
*/
|
|
557
|
+
const calculateMessageTokens = (message, encoder, constants) => {
|
|
558
|
+
const tokensPerMessage = 3;
|
|
559
|
+
const tokensPerName = 1;
|
|
560
|
+
let tokens = tokensPerMessage;
|
|
561
|
+
for (const [key, value] of Object.entries(message)) {
|
|
562
|
+
if (typeof value === "string") tokens += encoder.encode(value).length;
|
|
563
|
+
if (key === "name") tokens += tokensPerName;
|
|
564
|
+
if (key === "tool_calls") tokens += calculateToolCallsTokens(value, encoder, constants);
|
|
565
|
+
if (key === "content" && Array.isArray(value)) tokens += calculateContentPartsTokens(value, encoder);
|
|
566
|
+
}
|
|
567
|
+
return tokens;
|
|
568
|
+
};
|
|
569
|
+
/**
|
|
570
|
+
* Calculate tokens using custom algorithm
|
|
571
|
+
*/
|
|
572
|
+
const calculateTokens = (messages, encoder, constants) => {
|
|
573
|
+
if (messages.length === 0) return 0;
|
|
574
|
+
let numTokens = 0;
|
|
575
|
+
for (const message of messages) numTokens += calculateMessageTokens(message, encoder, constants);
|
|
576
|
+
numTokens += 3;
|
|
577
|
+
return numTokens;
|
|
578
|
+
};
|
|
579
|
+
/**
|
|
580
|
+
* Get the corresponding encoder module based on encoding type
|
|
581
|
+
*/
|
|
582
|
+
const getEncodeChatFunction = async (encoding) => {
|
|
583
|
+
if (encodingCache.has(encoding)) {
|
|
584
|
+
const cached = encodingCache.get(encoding);
|
|
585
|
+
if (cached) return cached;
|
|
586
|
+
}
|
|
587
|
+
const supportedEncoding = encoding;
|
|
588
|
+
if (!(supportedEncoding in ENCODING_MAP)) {
|
|
589
|
+
const fallbackModule = await ENCODING_MAP.o200k_base();
|
|
590
|
+
encodingCache.set(encoding, fallbackModule);
|
|
591
|
+
return fallbackModule;
|
|
592
|
+
}
|
|
593
|
+
const encodingModule = await ENCODING_MAP[supportedEncoding]();
|
|
594
|
+
encodingCache.set(encoding, encodingModule);
|
|
595
|
+
return encodingModule;
|
|
596
|
+
};
|
|
597
|
+
/**
|
|
598
|
+
* Get tokenizer type from model information
|
|
599
|
+
*/
|
|
600
|
+
const getTokenizerFromModel = (model) => {
|
|
601
|
+
return model.capabilities.tokenizer || "o200k_base";
|
|
602
|
+
};
|
|
603
|
+
/**
|
|
604
|
+
* Get model-specific constants for token calculation
|
|
605
|
+
*/
|
|
606
|
+
const getModelConstants = (model) => {
|
|
607
|
+
return model.id === "gpt-3.5-turbo" || model.id === "gpt-4" ? {
|
|
608
|
+
funcInit: 10,
|
|
609
|
+
propInit: 3,
|
|
610
|
+
propKey: 3,
|
|
611
|
+
enumInit: -3,
|
|
612
|
+
enumItem: 3,
|
|
613
|
+
funcEnd: 12
|
|
614
|
+
} : {
|
|
615
|
+
funcInit: 7,
|
|
616
|
+
propInit: 3,
|
|
617
|
+
propKey: 3,
|
|
618
|
+
enumInit: -3,
|
|
619
|
+
enumItem: 3,
|
|
620
|
+
funcEnd: 12
|
|
621
|
+
};
|
|
622
|
+
};
|
|
623
|
+
/**
|
|
624
|
+
* Calculate tokens for a single parameter
|
|
625
|
+
*/
|
|
626
|
+
const calculateParameterTokens = (key, prop, context) => {
|
|
627
|
+
const { encoder, constants } = context;
|
|
628
|
+
let tokens = constants.propKey;
|
|
629
|
+
if (typeof prop !== "object" || prop === null) return tokens;
|
|
630
|
+
const param = prop;
|
|
631
|
+
const paramName = key;
|
|
632
|
+
const paramType = param.type || "string";
|
|
633
|
+
let paramDesc = param.description || "";
|
|
634
|
+
if (param.enum && Array.isArray(param.enum)) {
|
|
635
|
+
tokens += constants.enumInit;
|
|
636
|
+
for (const item of param.enum) {
|
|
637
|
+
tokens += constants.enumItem;
|
|
638
|
+
tokens += encoder.encode(String(item)).length;
|
|
639
|
+
}
|
|
640
|
+
}
|
|
641
|
+
if (paramDesc.endsWith(".")) paramDesc = paramDesc.slice(0, -1);
|
|
642
|
+
const line = `${paramName}:${paramType}:${paramDesc}`;
|
|
643
|
+
tokens += encoder.encode(line).length;
|
|
644
|
+
const excludedKeys = new Set([
|
|
645
|
+
"type",
|
|
646
|
+
"description",
|
|
647
|
+
"enum"
|
|
648
|
+
]);
|
|
649
|
+
for (const propertyName of Object.keys(param)) if (!excludedKeys.has(propertyName)) {
|
|
650
|
+
const propertyValue = param[propertyName];
|
|
651
|
+
const propertyText = typeof propertyValue === "string" ? propertyValue : JSON.stringify(propertyValue);
|
|
652
|
+
tokens += encoder.encode(`${propertyName}:${propertyText}`).length;
|
|
653
|
+
}
|
|
654
|
+
return tokens;
|
|
655
|
+
};
|
|
656
|
+
/**
|
|
657
|
+
* Calculate tokens for function parameters
|
|
658
|
+
*/
|
|
659
|
+
const calculateParametersTokens = (parameters, encoder, constants) => {
|
|
660
|
+
if (!parameters || typeof parameters !== "object") return 0;
|
|
661
|
+
const params = parameters;
|
|
662
|
+
let tokens = 0;
|
|
663
|
+
for (const [key, value] of Object.entries(params)) if (key === "properties") {
|
|
664
|
+
const properties = value;
|
|
665
|
+
if (Object.keys(properties).length > 0) {
|
|
666
|
+
tokens += constants.propInit;
|
|
667
|
+
for (const propKey of Object.keys(properties)) tokens += calculateParameterTokens(propKey, properties[propKey], {
|
|
668
|
+
encoder,
|
|
669
|
+
constants
|
|
670
|
+
});
|
|
671
|
+
}
|
|
672
|
+
} else {
|
|
673
|
+
const paramText = typeof value === "string" ? value : JSON.stringify(value);
|
|
674
|
+
tokens += encoder.encode(`${key}:${paramText}`).length;
|
|
675
|
+
}
|
|
676
|
+
return tokens;
|
|
677
|
+
};
|
|
678
|
+
/**
|
|
679
|
+
* Calculate tokens for a single tool
|
|
680
|
+
*/
|
|
681
|
+
const calculateToolTokens = (tool, encoder, constants) => {
|
|
682
|
+
let tokens = constants.funcInit;
|
|
683
|
+
const func = tool.function;
|
|
684
|
+
const fName = func.name;
|
|
685
|
+
let fDesc = func.description || "";
|
|
686
|
+
if (fDesc.endsWith(".")) fDesc = fDesc.slice(0, -1);
|
|
687
|
+
const line = fName + ":" + fDesc;
|
|
688
|
+
tokens += encoder.encode(line).length;
|
|
689
|
+
if (typeof func.parameters === "object" && func.parameters !== null) tokens += calculateParametersTokens(func.parameters, encoder, constants);
|
|
690
|
+
return tokens;
|
|
691
|
+
};
|
|
692
|
+
/**
|
|
693
|
+
* Calculate token count for tools based on model
|
|
694
|
+
*/
|
|
695
|
+
const numTokensForTools = (tools, encoder, constants) => {
|
|
696
|
+
let funcTokenCount = 0;
|
|
697
|
+
for (const tool of tools) funcTokenCount += calculateToolTokens(tool, encoder, constants);
|
|
698
|
+
funcTokenCount += constants.funcEnd;
|
|
699
|
+
return funcTokenCount;
|
|
700
|
+
};
|
|
701
|
+
/**
|
|
702
|
+
* Calculate the token count of messages, supporting multiple GPT encoders
|
|
703
|
+
*/
|
|
704
|
+
const getTokenCount = async (payload, model) => {
|
|
705
|
+
const encoder = await getEncodeChatFunction(getTokenizerFromModel(model));
|
|
706
|
+
const simplifiedMessages = payload.messages;
|
|
707
|
+
const inputMessages = simplifiedMessages.filter((msg) => msg.role !== "assistant");
|
|
708
|
+
const outputMessages = simplifiedMessages.filter((msg) => msg.role === "assistant");
|
|
709
|
+
const constants = getModelConstants(model);
|
|
710
|
+
let inputTokens = calculateTokens(inputMessages, encoder, constants);
|
|
711
|
+
if (payload.tools && payload.tools.length > 0) inputTokens += numTokensForTools(payload.tools, encoder, constants);
|
|
712
|
+
const outputTokens = calculateTokens(outputMessages, encoder, constants);
|
|
713
|
+
return {
|
|
714
|
+
input: inputTokens,
|
|
715
|
+
output: outputTokens
|
|
716
|
+
};
|
|
717
|
+
};
|
|
718
|
+
|
|
719
|
+
//#endregion
|
|
720
|
+
//#region src/lib/api-config.ts
|
|
721
|
+
const customApiUrl = (state$1) => state$1.customApiUrl;
|
|
722
|
+
const customApiHeaders = (state$1) => {
|
|
723
|
+
const headers = { "Content-Type": "application/json" };
|
|
724
|
+
if (state$1.xybotUser) headers["xybot-user"] = JSON.stringify(state$1.xybotUser);
|
|
725
|
+
return headers;
|
|
726
|
+
};
|
|
727
|
+
const MODEL_PREFIX_TO_CHANNEL = [
|
|
728
|
+
["gpt-", "azure"],
|
|
729
|
+
["o3", "azure"],
|
|
730
|
+
["o-", "azure"],
|
|
731
|
+
["o1", "azure"],
|
|
732
|
+
["text-embedding-v", "qwen"],
|
|
733
|
+
["text-embedding-", "azure"],
|
|
734
|
+
["ernie", "ernie"],
|
|
735
|
+
["nova-", "sensenova"],
|
|
736
|
+
["sensechat", "sensenova"],
|
|
737
|
+
["general", "sparkai"],
|
|
738
|
+
["pro-128k", "sparkai"],
|
|
739
|
+
["claude", "claude"],
|
|
740
|
+
["baichuan", "baichuan"],
|
|
741
|
+
["qwen", "qwen"],
|
|
742
|
+
["qwq", "qwen"],
|
|
743
|
+
["glm", "chatglm"],
|
|
744
|
+
["chatglm", "chatglm"],
|
|
745
|
+
["moonshot", "moonshot"],
|
|
746
|
+
["kimi", "moonshot"],
|
|
747
|
+
["deepseek", "deepseek"],
|
|
748
|
+
["doubao", "doubao"],
|
|
749
|
+
["gemini", "gemini"],
|
|
750
|
+
["morph", "morph"],
|
|
751
|
+
["minimax", "Minimax"],
|
|
752
|
+
["bge-", "azure"]
|
|
753
|
+
];
|
|
754
|
+
function getChannelFromModel(model) {
|
|
755
|
+
const m = model.toLowerCase();
|
|
756
|
+
return MODEL_PREFIX_TO_CHANNEL.find(([prefix]) => m.startsWith(prefix))?.[1] ?? "azure";
|
|
757
|
+
}
|
|
758
|
+
const CUSTOM_EMBEDDING_URL = "http://xybot-appreciation:8080/api/appreciation/v1/inner/langchain/rag/textEmbedding";
|
|
759
|
+
const GITHUB_APP_SCOPES = ["read:user"].join(" ");
|
|
760
|
+
|
|
761
|
+
//#endregion
|
|
762
|
+
//#region src/services/copilot/create-chat-completions.ts
|
|
763
|
+
const UNSUPPORTED_SCHEMA_KEYS = new Set([
|
|
764
|
+
"$schema",
|
|
765
|
+
"additionalProperties",
|
|
766
|
+
"exclusiveMinimum",
|
|
767
|
+
"exclusiveMaximum",
|
|
768
|
+
"propertyNames",
|
|
769
|
+
"patternProperties",
|
|
770
|
+
"$id",
|
|
771
|
+
"$ref",
|
|
772
|
+
"$comment",
|
|
773
|
+
"$defs",
|
|
774
|
+
"definitions",
|
|
775
|
+
"if",
|
|
776
|
+
"then",
|
|
777
|
+
"else",
|
|
778
|
+
"allOf",
|
|
779
|
+
"anyOf",
|
|
780
|
+
"oneOf",
|
|
781
|
+
"not",
|
|
782
|
+
"dependentRequired",
|
|
783
|
+
"dependentSchemas",
|
|
784
|
+
"unevaluatedProperties",
|
|
785
|
+
"unevaluatedItems",
|
|
786
|
+
"contentEncoding",
|
|
787
|
+
"contentMediaType"
|
|
788
|
+
]);
|
|
789
|
+
function sanitizeSchema(obj) {
|
|
790
|
+
if (typeof obj !== "object" || obj === null) return {};
|
|
791
|
+
const result = {};
|
|
792
|
+
for (const [key, value] of Object.entries(obj)) {
|
|
793
|
+
if (UNSUPPORTED_SCHEMA_KEYS.has(key)) continue;
|
|
794
|
+
if (typeof value === "object" && value !== null && !Array.isArray(value)) result[key] = sanitizeSchema(value);
|
|
795
|
+
else if (Array.isArray(value)) result[key] = value.map((item) => typeof item === "object" && item !== null && !Array.isArray(item) ? sanitizeSchema(item) : item);
|
|
796
|
+
else result[key] = value;
|
|
797
|
+
}
|
|
798
|
+
return result;
|
|
799
|
+
}
|
|
800
|
+
function isGeminiModel(model) {
|
|
801
|
+
return model.toLowerCase().startsWith("gemini");
|
|
802
|
+
}
|
|
803
|
+
/**
|
|
804
|
+
* In-memory cache mapping tool_call id → thought_signature.
|
|
805
|
+
* Gemini 3+ models return a `model_result.thought_signature` with each
|
|
806
|
+
* function call. Subsequent requests MUST echo the signature back in the
|
|
807
|
+
* assistant message's tool_calls, otherwise the API returns a 400 error.
|
|
808
|
+
*/
|
|
809
|
+
const thoughtSignatureCache = /* @__PURE__ */ new Map();
|
|
810
|
+
function extractSignaturesFromToolCalls(toolCalls) {
|
|
811
|
+
for (const tc of toolCalls) {
|
|
812
|
+
const id = tc.id;
|
|
813
|
+
const modelResult = tc.model_result ?? tc.modelResult;
|
|
814
|
+
const sig = modelResult?.thought_signature ?? modelResult?.thoughtSignature;
|
|
815
|
+
if (id && sig) thoughtSignatureCache.set(id, sig);
|
|
816
|
+
}
|
|
817
|
+
}
|
|
818
|
+
function captureThoughtSignatures(rawData) {
|
|
819
|
+
if (typeof rawData !== "object" || rawData === null) return;
|
|
820
|
+
const raw = rawData;
|
|
821
|
+
const choices = (raw.data ?? raw).choices;
|
|
822
|
+
if (!choices) return;
|
|
823
|
+
for (const choice of choices) {
|
|
824
|
+
const src = choice.message ?? choice.delta;
|
|
825
|
+
if (!src) continue;
|
|
826
|
+
const toolCalls = src.tool_calls ?? src.toolCalls;
|
|
827
|
+
if (toolCalls) extractSignaturesFromToolCalls(toolCalls);
|
|
828
|
+
}
|
|
829
|
+
}
|
|
830
|
+
function prepareMessagesForGemini(messages) {
|
|
831
|
+
return messages.map((msg) => {
|
|
832
|
+
if (msg.role !== "assistant" || !msg.tool_calls?.length) return msg;
|
|
833
|
+
const toolCalls = msg.tool_calls.map((tc) => {
|
|
834
|
+
const sig = thoughtSignatureCache.get(tc.id);
|
|
835
|
+
if (!sig) return tc;
|
|
836
|
+
return {
|
|
837
|
+
...tc,
|
|
838
|
+
model_result: { thought_signature: sig }
|
|
839
|
+
};
|
|
840
|
+
});
|
|
841
|
+
return {
|
|
842
|
+
...msg,
|
|
843
|
+
tool_calls: toolCalls
|
|
844
|
+
};
|
|
845
|
+
});
|
|
846
|
+
}
|
|
847
|
+
function buildCustomPayload(payload) {
|
|
848
|
+
const channel = state.defaultChannel ?? getChannelFromModel(payload.model);
|
|
849
|
+
const messages = isGeminiModel(payload.model) ? prepareMessagesForGemini(payload.messages) : payload.messages;
|
|
850
|
+
const customPayload = {
|
|
851
|
+
bizId: randomUUID(),
|
|
852
|
+
bizCode: state.bizCode,
|
|
853
|
+
bizType: state.bizType,
|
|
854
|
+
stream: payload.stream ?? false,
|
|
855
|
+
channel,
|
|
856
|
+
model: payload.model,
|
|
857
|
+
messages,
|
|
858
|
+
temperature: payload.temperature
|
|
859
|
+
};
|
|
860
|
+
if (payload.tools) customPayload.tools = payload.tools.map((tool) => ({
|
|
861
|
+
...tool,
|
|
862
|
+
function: {
|
|
863
|
+
...tool.function,
|
|
864
|
+
parameters: sanitizeSchema(tool.function.parameters)
|
|
865
|
+
}
|
|
866
|
+
}));
|
|
867
|
+
if (payload.tool_choice !== void 0 && payload.tool_choice !== null) customPayload.toolChoice = payload.tool_choice;
|
|
868
|
+
if (payload.response_format) customPayload.responseFormat = payload.response_format;
|
|
869
|
+
if (payload.max_tokens !== void 0 && payload.max_tokens !== null) customPayload.maxTokens = payload.max_tokens;
|
|
870
|
+
return customPayload;
|
|
871
|
+
}
|
|
872
|
+
function normalizeNonStreamResponse(raw) {
|
|
873
|
+
const { data } = raw;
|
|
874
|
+
return {
|
|
875
|
+
id: data.id,
|
|
876
|
+
object: "chat.completion",
|
|
877
|
+
created: data.created,
|
|
878
|
+
model: data.model,
|
|
879
|
+
choices: data.choices.map((choice) => {
|
|
880
|
+
const legacyFc = choice.message.function_call ?? choice.message.functionCall;
|
|
881
|
+
const toolCalls = choice.message.tool_calls ?? choice.message.toolCalls ?? (legacyFc ? [{
|
|
882
|
+
id: `call_${randomUUID().replaceAll("-", "").slice(0, 24)}`,
|
|
883
|
+
type: "function",
|
|
884
|
+
function: legacyFc
|
|
885
|
+
}] : void 0);
|
|
886
|
+
return {
|
|
887
|
+
index: choice.index,
|
|
888
|
+
message: {
|
|
889
|
+
role: "assistant",
|
|
890
|
+
content: choice.message.content ?? choice.message.c ?? null,
|
|
891
|
+
tool_calls: toolCalls
|
|
892
|
+
},
|
|
893
|
+
logprobs: null,
|
|
894
|
+
finish_reason: normalizeFinishReason(choice.finishReason ?? choice.finish_reason) ?? "stop"
|
|
895
|
+
};
|
|
896
|
+
}),
|
|
897
|
+
usage: {
|
|
898
|
+
prompt_tokens: data.requestCost ?? 0,
|
|
899
|
+
completion_tokens: data.responseCost ?? 0,
|
|
900
|
+
total_tokens: (data.requestCost ?? 0) + (data.responseCost ?? 0)
|
|
901
|
+
}
|
|
902
|
+
};
|
|
903
|
+
}
|
|
904
|
+
function normalizeStreamChunk(raw) {
|
|
905
|
+
const choices = raw.choices;
|
|
906
|
+
return {
|
|
907
|
+
id: typeof raw.id === "string" ? raw.id : "",
|
|
908
|
+
object: "chat.completion.chunk",
|
|
909
|
+
created: typeof raw.created === "number" ? raw.created : 0,
|
|
910
|
+
model: typeof raw.model === "string" ? raw.model : "",
|
|
911
|
+
choices: choices?.map((c) => {
|
|
912
|
+
const delta = c.delta ?? {};
|
|
913
|
+
const rawFinish = c.finish_reason ?? c.finishReason;
|
|
914
|
+
const rawToolCalls = delta.tool_calls ?? delta.toolCalls;
|
|
915
|
+
const legacyFnCall = delta.function_call ?? delta.functionCall;
|
|
916
|
+
const toolCalls = rawToolCalls ?? (legacyFnCall ? [{
|
|
917
|
+
index: 0,
|
|
918
|
+
function: legacyFnCall
|
|
919
|
+
}] : void 0);
|
|
920
|
+
return {
|
|
921
|
+
index: typeof c.index === "number" ? c.index : 0,
|
|
922
|
+
delta: {
|
|
923
|
+
content: delta.content,
|
|
924
|
+
role: delta.role,
|
|
925
|
+
tool_calls: toolCalls
|
|
926
|
+
},
|
|
927
|
+
finish_reason: normalizeFinishReason(rawFinish),
|
|
928
|
+
logprobs: null
|
|
929
|
+
};
|
|
930
|
+
}) ?? []
|
|
931
|
+
};
|
|
932
|
+
}
|
|
933
|
+
function normalizeFinishReason(reason) {
|
|
934
|
+
if (!reason || reason === "") return null;
|
|
935
|
+
return {
|
|
936
|
+
stop: "stop",
|
|
937
|
+
length: "length",
|
|
938
|
+
max_tokens: "length",
|
|
939
|
+
tool_calls: "tool_calls",
|
|
940
|
+
function_call: "tool_calls",
|
|
941
|
+
functioncall: "tool_calls",
|
|
942
|
+
content_filter: "content_filter",
|
|
943
|
+
safety: "content_filter"
|
|
944
|
+
}[reason.toLowerCase()] ?? null;
|
|
945
|
+
}
|
|
946
|
+
async function* transformStream(stream) {
|
|
947
|
+
for await (const event of stream) {
|
|
948
|
+
if (event.data === "[DONE]") {
|
|
949
|
+
yield event;
|
|
950
|
+
continue;
|
|
951
|
+
}
|
|
952
|
+
if (!event.data) {
|
|
953
|
+
yield event;
|
|
954
|
+
continue;
|
|
955
|
+
}
|
|
956
|
+
try {
|
|
957
|
+
const raw = JSON.parse(event.data);
|
|
958
|
+
captureThoughtSignatures(raw);
|
|
959
|
+
const normalized = normalizeStreamChunk(raw);
|
|
960
|
+
yield {
|
|
961
|
+
...event,
|
|
962
|
+
data: JSON.stringify(normalized)
|
|
963
|
+
};
|
|
964
|
+
} catch {
|
|
965
|
+
yield event;
|
|
966
|
+
}
|
|
967
|
+
}
|
|
968
|
+
}
|
|
969
|
+
const createChatCompletions = async (payload) => {
|
|
970
|
+
const customPayload = buildCustomPayload(payload);
|
|
971
|
+
consola.debug("Custom API payload:", JSON.stringify(customPayload));
|
|
972
|
+
const response = await fetch(customApiUrl(state), {
|
|
973
|
+
method: "POST",
|
|
974
|
+
headers: customApiHeaders(state),
|
|
975
|
+
body: JSON.stringify(customPayload)
|
|
976
|
+
});
|
|
977
|
+
if (!response.ok) {
|
|
978
|
+
consola.error("Failed to create chat completions", response);
|
|
979
|
+
throw new HTTPError("Failed to create chat completions", response);
|
|
980
|
+
}
|
|
981
|
+
if (payload.stream) return transformStream(events(response));
|
|
982
|
+
const raw = await response.json();
|
|
983
|
+
captureThoughtSignatures(raw);
|
|
984
|
+
if (!raw.success) {
|
|
985
|
+
consola.error("Custom API returned error:", raw.msg);
|
|
986
|
+
throw new Error(`Custom API error: ${raw.msg ?? "Unknown error"}`);
|
|
987
|
+
}
|
|
988
|
+
return normalizeNonStreamResponse(raw);
|
|
989
|
+
};
|
|
990
|
+
|
|
991
|
+
//#endregion
|
|
992
|
+
//#region src/routes/chat-completions/handler.ts
|
|
993
|
+
async function handleCompletion$1(c) {
|
|
994
|
+
await checkRateLimit(state);
|
|
995
|
+
let payload = await c.req.json();
|
|
996
|
+
consola.debug("Request payload:", JSON.stringify(payload).slice(-400));
|
|
997
|
+
const selectedModel = state.models?.data.find((model) => model.id === payload.model);
|
|
998
|
+
try {
|
|
999
|
+
if (selectedModel) {
|
|
1000
|
+
const tokenCount = await getTokenCount(payload, selectedModel);
|
|
1001
|
+
consola.info("Current token count:", tokenCount);
|
|
1002
|
+
} else consola.warn("No model selected, skipping token count calculation");
|
|
1003
|
+
} catch (error) {
|
|
1004
|
+
consola.warn("Failed to calculate token count:", error);
|
|
1005
|
+
}
|
|
1006
|
+
if (state.manualApprove) await awaitApproval();
|
|
1007
|
+
if (isNullish(payload.max_tokens)) {
|
|
1008
|
+
payload = {
|
|
1009
|
+
...payload,
|
|
1010
|
+
max_tokens: selectedModel?.capabilities.limits.max_output_tokens
|
|
1011
|
+
};
|
|
1012
|
+
consola.debug("Set max_tokens to:", JSON.stringify(payload.max_tokens));
|
|
1013
|
+
}
|
|
1014
|
+
const response = await createChatCompletions(payload);
|
|
1015
|
+
if (isNonStreaming$1(response)) {
|
|
1016
|
+
consola.debug("Non-streaming response:", JSON.stringify(response));
|
|
1017
|
+
return c.json(response);
|
|
1018
|
+
}
|
|
1019
|
+
consola.debug("Streaming response");
|
|
1020
|
+
return streamSSE(c, async (stream) => {
|
|
1021
|
+
for await (const chunk of response) {
|
|
1022
|
+
consola.debug("Streaming chunk:", JSON.stringify(chunk));
|
|
1023
|
+
await stream.writeSSE(chunk);
|
|
1024
|
+
}
|
|
1025
|
+
});
|
|
1026
|
+
}
|
|
1027
|
+
const isNonStreaming$1 = (response) => Object.hasOwn(response, "choices");
|
|
1028
|
+
|
|
1029
|
+
//#endregion
|
|
1030
|
+
//#region src/routes/chat-completions/route.ts
|
|
1031
|
+
const completionRoutes = new Hono();
|
|
1032
|
+
completionRoutes.post("/", async (c) => {
|
|
1033
|
+
try {
|
|
1034
|
+
return await handleCompletion$1(c);
|
|
1035
|
+
} catch (error) {
|
|
1036
|
+
return await forwardError(c, error);
|
|
1037
|
+
}
|
|
1038
|
+
});
|
|
1039
|
+
|
|
1040
|
+
//#endregion
|
|
1041
|
+
//#region src/services/copilot/create-embeddings.ts
|
|
1042
|
+
const createEmbeddings = async (payload) => {
|
|
1043
|
+
const inputs = typeof payload.input === "string" ? [payload.input] : payload.input;
|
|
1044
|
+
const channel = state.defaultChannel ?? getChannelFromModel(payload.model);
|
|
1045
|
+
const embeddings = [];
|
|
1046
|
+
let index = 0;
|
|
1047
|
+
for (const text of inputs) {
|
|
1048
|
+
const customPayload = {
|
|
1049
|
+
bizId: randomUUID(),
|
|
1050
|
+
bizCode: state.bizCode,
|
|
1051
|
+
bizType: state.bizType,
|
|
1052
|
+
channel,
|
|
1053
|
+
model: payload.model,
|
|
1054
|
+
text
|
|
1055
|
+
};
|
|
1056
|
+
consola.debug("Custom embedding payload:", JSON.stringify(customPayload));
|
|
1057
|
+
const response = await fetch(CUSTOM_EMBEDDING_URL, {
|
|
1058
|
+
method: "POST",
|
|
1059
|
+
headers: customApiHeaders(state),
|
|
1060
|
+
body: JSON.stringify(customPayload)
|
|
1061
|
+
});
|
|
1062
|
+
if (!response.ok) throw new Error(`Embedding request failed with status ${response.status}`);
|
|
1063
|
+
const raw = await response.json();
|
|
1064
|
+
if (!raw.success || !raw.data) throw new Error(`Embedding API error: ${raw.msg ?? "Unknown error"} (code: ${raw.code})`);
|
|
1065
|
+
embeddings.push({
|
|
1066
|
+
object: "embedding",
|
|
1067
|
+
embedding: raw.data.embedding,
|
|
1068
|
+
index
|
|
1069
|
+
});
|
|
1070
|
+
index++;
|
|
1071
|
+
}
|
|
1072
|
+
const totalTokens = inputs.reduce((sum, text) => sum + text.length, 0);
|
|
1073
|
+
return {
|
|
1074
|
+
object: "list",
|
|
1075
|
+
data: embeddings,
|
|
1076
|
+
model: payload.model,
|
|
1077
|
+
usage: {
|
|
1078
|
+
prompt_tokens: totalTokens,
|
|
1079
|
+
total_tokens: totalTokens
|
|
1080
|
+
}
|
|
1081
|
+
};
|
|
1082
|
+
};
|
|
1083
|
+
|
|
1084
|
+
//#endregion
|
|
1085
|
+
//#region src/routes/embeddings/route.ts
|
|
1086
|
+
const embeddingRoutes = new Hono();
|
|
1087
|
+
embeddingRoutes.post("/", async (c) => {
|
|
1088
|
+
try {
|
|
1089
|
+
const response = await createEmbeddings(await c.req.json());
|
|
1090
|
+
return c.json(response);
|
|
1091
|
+
} catch (error) {
|
|
1092
|
+
return await forwardError(c, error);
|
|
1093
|
+
}
|
|
1094
|
+
});
|
|
1095
|
+
|
|
1096
|
+
//#endregion
|
|
1097
|
+
//#region src/routes/messages/utils.ts
|
|
1098
|
+
function mapOpenAIStopReasonToAnthropic(finishReason) {
|
|
1099
|
+
if (finishReason === null) return null;
|
|
1100
|
+
return {
|
|
1101
|
+
stop: "end_turn",
|
|
1102
|
+
length: "max_tokens",
|
|
1103
|
+
tool_calls: "tool_use",
|
|
1104
|
+
content_filter: "end_turn"
|
|
1105
|
+
}[finishReason];
|
|
1106
|
+
}
|
|
1107
|
+
|
|
1108
|
+
//#endregion
|
|
1109
|
+
//#region src/routes/messages/non-stream-translation.ts
|
|
1110
|
+
function translateToOpenAI(payload) {
|
|
1111
|
+
return {
|
|
1112
|
+
model: translateModelName(payload.model),
|
|
1113
|
+
messages: translateAnthropicMessagesToOpenAI(payload.messages, payload.system),
|
|
1114
|
+
max_tokens: payload.max_tokens,
|
|
1115
|
+
stop: payload.stop_sequences,
|
|
1116
|
+
stream: payload.stream,
|
|
1117
|
+
temperature: payload.temperature,
|
|
1118
|
+
top_p: payload.top_p,
|
|
1119
|
+
user: payload.metadata?.user_id,
|
|
1120
|
+
tools: translateAnthropicToolsToOpenAI(payload.tools),
|
|
1121
|
+
tool_choice: translateAnthropicToolChoiceToOpenAI(payload.tool_choice)
|
|
1122
|
+
};
|
|
1123
|
+
}
|
|
1124
|
+
function translateModelName(model) {
|
|
1125
|
+
if (model.startsWith("claude-sonnet-4-")) return model.replace(/^claude-sonnet-4-.*/, "claude-sonnet-4");
|
|
1126
|
+
else if (model.startsWith("claude-opus-4-")) return model.replace(/^claude-opus-4-.*/, "claude-opus-4");
|
|
1127
|
+
else if (model.startsWith("claude-3-7-sonnet") || model.startsWith("claude-3.7-sonnet")) return "claude-v3.7-sonnet";
|
|
1128
|
+
else if (model.startsWith("claude-3-5-sonnet") || model.startsWith("claude-3.5-sonnet")) return "claude-v3.5-sonnet";
|
|
1129
|
+
else if (model.startsWith("claude-3-5-haiku") || model.startsWith("claude-3.5-haiku")) return "claude-v3.5-haiku";
|
|
1130
|
+
return model;
|
|
1131
|
+
}
|
|
1132
|
+
function translateAnthropicMessagesToOpenAI(anthropicMessages, system) {
|
|
1133
|
+
const systemMessages = handleSystemPrompt(system);
|
|
1134
|
+
const otherMessages = anthropicMessages.flatMap((message) => message.role === "user" ? handleUserMessage(message) : handleAssistantMessage(message));
|
|
1135
|
+
return [...systemMessages, ...otherMessages];
|
|
1136
|
+
}
|
|
1137
|
+
function handleSystemPrompt(system) {
|
|
1138
|
+
if (!system) return [];
|
|
1139
|
+
if (typeof system === "string") return [{
|
|
1140
|
+
role: "system",
|
|
1141
|
+
content: system
|
|
1142
|
+
}];
|
|
1143
|
+
else return [{
|
|
1144
|
+
role: "system",
|
|
1145
|
+
content: system.map((block) => block.text).join("\n\n")
|
|
1146
|
+
}];
|
|
1147
|
+
}
|
|
1148
|
+
function handleUserMessage(message) {
|
|
1149
|
+
const newMessages = [];
|
|
1150
|
+
if (Array.isArray(message.content)) {
|
|
1151
|
+
const toolResultBlocks = message.content.filter((block) => block.type === "tool_result");
|
|
1152
|
+
const otherBlocks = message.content.filter((block) => block.type !== "tool_result");
|
|
1153
|
+
for (const block of toolResultBlocks) newMessages.push({
|
|
1154
|
+
role: "tool",
|
|
1155
|
+
tool_call_id: block.tool_use_id,
|
|
1156
|
+
content: mapContent(block.content)
|
|
1157
|
+
});
|
|
1158
|
+
if (otherBlocks.length > 0) newMessages.push({
|
|
1159
|
+
role: "user",
|
|
1160
|
+
content: mapContent(otherBlocks)
|
|
1161
|
+
});
|
|
1162
|
+
} else newMessages.push({
|
|
1163
|
+
role: "user",
|
|
1164
|
+
content: mapContent(message.content)
|
|
1165
|
+
});
|
|
1166
|
+
return newMessages;
|
|
1167
|
+
}
|
|
1168
|
+
function handleAssistantMessage(message) {
|
|
1169
|
+
if (!Array.isArray(message.content)) return [{
|
|
1170
|
+
role: "assistant",
|
|
1171
|
+
content: mapContent(message.content)
|
|
1172
|
+
}];
|
|
1173
|
+
const toolUseBlocks = message.content.filter((block) => block.type === "tool_use");
|
|
1174
|
+
const textBlocks = message.content.filter((block) => block.type === "text");
|
|
1175
|
+
const thinkingBlocks = message.content.filter((block) => block.type === "thinking");
|
|
1176
|
+
const allTextContent = [...textBlocks.map((b) => b.text), ...thinkingBlocks.map((b) => b.thinking)].join("\n\n");
|
|
1177
|
+
return toolUseBlocks.length > 0 ? [{
|
|
1178
|
+
role: "assistant",
|
|
1179
|
+
content: allTextContent || null,
|
|
1180
|
+
tool_calls: toolUseBlocks.map((toolUse) => ({
|
|
1181
|
+
id: toolUse.id,
|
|
1182
|
+
type: "function",
|
|
1183
|
+
function: {
|
|
1184
|
+
name: toolUse.name,
|
|
1185
|
+
arguments: JSON.stringify(toolUse.input)
|
|
1186
|
+
}
|
|
1187
|
+
}))
|
|
1188
|
+
}] : [{
|
|
1189
|
+
role: "assistant",
|
|
1190
|
+
content: mapContent(message.content)
|
|
1191
|
+
}];
|
|
1192
|
+
}
|
|
1193
|
+
function mapContent(content) {
|
|
1194
|
+
if (typeof content === "string") return content;
|
|
1195
|
+
if (!Array.isArray(content)) return null;
|
|
1196
|
+
if (!content.some((block) => block.type === "image")) return content.filter((block) => block.type === "text" || block.type === "thinking").map((block) => block.type === "text" ? block.text : block.thinking).join("\n\n");
|
|
1197
|
+
const contentParts = [];
|
|
1198
|
+
for (const block of content) switch (block.type) {
|
|
1199
|
+
case "text":
|
|
1200
|
+
contentParts.push({
|
|
1201
|
+
type: "text",
|
|
1202
|
+
text: block.text
|
|
1203
|
+
});
|
|
1204
|
+
break;
|
|
1205
|
+
case "thinking":
|
|
1206
|
+
contentParts.push({
|
|
1207
|
+
type: "text",
|
|
1208
|
+
text: block.thinking
|
|
1209
|
+
});
|
|
1210
|
+
break;
|
|
1211
|
+
case "image":
|
|
1212
|
+
contentParts.push({
|
|
1213
|
+
type: "image_url",
|
|
1214
|
+
image_url: { url: `data:${block.source.media_type};base64,${block.source.data}` }
|
|
1215
|
+
});
|
|
1216
|
+
break;
|
|
1217
|
+
}
|
|
1218
|
+
return contentParts;
|
|
1219
|
+
}
|
|
1220
|
+
function translateAnthropicToolsToOpenAI(anthropicTools) {
|
|
1221
|
+
if (!anthropicTools) return;
|
|
1222
|
+
return anthropicTools.map((tool) => ({
|
|
1223
|
+
type: "function",
|
|
1224
|
+
function: {
|
|
1225
|
+
name: tool.name,
|
|
1226
|
+
description: tool.description,
|
|
1227
|
+
parameters: tool.input_schema
|
|
1228
|
+
}
|
|
1229
|
+
}));
|
|
1230
|
+
}
|
|
1231
|
+
function translateAnthropicToolChoiceToOpenAI(anthropicToolChoice) {
|
|
1232
|
+
if (!anthropicToolChoice) return;
|
|
1233
|
+
switch (anthropicToolChoice.type) {
|
|
1234
|
+
case "auto": return "auto";
|
|
1235
|
+
case "any": return "required";
|
|
1236
|
+
case "tool":
|
|
1237
|
+
if (anthropicToolChoice.name) return {
|
|
1238
|
+
type: "function",
|
|
1239
|
+
function: { name: anthropicToolChoice.name }
|
|
1240
|
+
};
|
|
1241
|
+
return;
|
|
1242
|
+
case "none": return "none";
|
|
1243
|
+
default: return;
|
|
1244
|
+
}
|
|
1245
|
+
}
|
|
1246
|
+
function buildAnthropicUsage(response) {
|
|
1247
|
+
return {
|
|
1248
|
+
input_tokens: (response.usage?.prompt_tokens ?? 0) - (response.usage?.prompt_tokens_details?.cached_tokens ?? 0),
|
|
1249
|
+
output_tokens: response.usage?.completion_tokens ?? 0,
|
|
1250
|
+
...response.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: response.usage.prompt_tokens_details.cached_tokens }
|
|
1251
|
+
};
|
|
1252
|
+
}
|
|
1253
|
+
function translateToAnthropic(response) {
|
|
1254
|
+
const allTextBlocks = [];
|
|
1255
|
+
const allToolUseBlocks = [];
|
|
1256
|
+
let stopReason = response.choices[0]?.finish_reason ?? null;
|
|
1257
|
+
for (const choice of response.choices) {
|
|
1258
|
+
allTextBlocks.push(...getAnthropicTextBlocks(choice.message.content));
|
|
1259
|
+
allToolUseBlocks.push(...getAnthropicToolUseBlocks(choice.message.tool_calls));
|
|
1260
|
+
if (choice.finish_reason === "tool_calls" || stopReason === "stop") stopReason = choice.finish_reason;
|
|
1261
|
+
}
|
|
1262
|
+
const anthropicStopReason = allToolUseBlocks.length > 0 ? "tool_use" : mapOpenAIStopReasonToAnthropic(stopReason);
|
|
1263
|
+
return {
|
|
1264
|
+
id: response.id,
|
|
1265
|
+
type: "message",
|
|
1266
|
+
role: "assistant",
|
|
1267
|
+
model: response.model,
|
|
1268
|
+
content: [...allTextBlocks, ...allToolUseBlocks],
|
|
1269
|
+
stop_reason: anthropicStopReason,
|
|
1270
|
+
stop_sequence: null,
|
|
1271
|
+
usage: buildAnthropicUsage(response)
|
|
1272
|
+
};
|
|
1273
|
+
}
|
|
1274
|
+
function getAnthropicTextBlocks(messageContent) {
|
|
1275
|
+
if (typeof messageContent === "string") return [{
|
|
1276
|
+
type: "text",
|
|
1277
|
+
text: messageContent
|
|
1278
|
+
}];
|
|
1279
|
+
if (Array.isArray(messageContent)) return messageContent.filter((part) => part.type === "text").map((part) => ({
|
|
1280
|
+
type: "text",
|
|
1281
|
+
text: part.text
|
|
1282
|
+
}));
|
|
1283
|
+
return [];
|
|
1284
|
+
}
|
|
1285
|
+
function getAnthropicToolUseBlocks(toolCalls) {
|
|
1286
|
+
if (!toolCalls) return [];
|
|
1287
|
+
return toolCalls.map((toolCall) => ({
|
|
1288
|
+
type: "tool_use",
|
|
1289
|
+
id: toolCall.id || `toolu_${randomUUID().replaceAll("-", "").slice(0, 24)}`,
|
|
1290
|
+
name: toolCall.function.name,
|
|
1291
|
+
input: JSON.parse(toolCall.function.arguments)
|
|
1292
|
+
}));
|
|
1293
|
+
}
|
|
1294
|
+
|
|
1295
|
+
//#endregion
|
|
1296
|
+
//#region src/routes/messages/count-tokens-handler.ts
|
|
1297
|
+
/**
|
|
1298
|
+
* Handles token counting for Anthropic messages
|
|
1299
|
+
*/
|
|
1300
|
+
async function handleCountTokens(c) {
|
|
1301
|
+
try {
|
|
1302
|
+
const anthropicBeta = c.req.header("anthropic-beta");
|
|
1303
|
+
const anthropicPayload = await c.req.json();
|
|
1304
|
+
const openAIPayload = translateToOpenAI(anthropicPayload);
|
|
1305
|
+
const selectedModel = state.models?.data.find((model) => model.id === anthropicPayload.model);
|
|
1306
|
+
if (!selectedModel) {
|
|
1307
|
+
consola.warn("Model not found, returning default token count");
|
|
1308
|
+
return c.json({ input_tokens: 1 });
|
|
1309
|
+
}
|
|
1310
|
+
const tokenCount = await getTokenCount(openAIPayload, selectedModel);
|
|
1311
|
+
if (anthropicPayload.tools && anthropicPayload.tools.length > 0) {
|
|
1312
|
+
let mcpToolExist = false;
|
|
1313
|
+
if (anthropicBeta?.startsWith("claude-code")) mcpToolExist = anthropicPayload.tools.some((tool) => tool.name.startsWith("mcp__"));
|
|
1314
|
+
if (!mcpToolExist) {
|
|
1315
|
+
if (anthropicPayload.model.startsWith("claude")) tokenCount.input = tokenCount.input + 346;
|
|
1316
|
+
else if (anthropicPayload.model.startsWith("grok")) tokenCount.input = tokenCount.input + 480;
|
|
1317
|
+
}
|
|
1318
|
+
}
|
|
1319
|
+
let finalTokenCount = tokenCount.input + tokenCount.output;
|
|
1320
|
+
if (anthropicPayload.model.startsWith("claude")) finalTokenCount = Math.round(finalTokenCount * 1.15);
|
|
1321
|
+
else if (anthropicPayload.model.startsWith("grok")) finalTokenCount = Math.round(finalTokenCount * 1.03);
|
|
1322
|
+
consola.info("Token count:", finalTokenCount);
|
|
1323
|
+
return c.json({ input_tokens: finalTokenCount });
|
|
1324
|
+
} catch (error) {
|
|
1325
|
+
consola.error("Error counting tokens:", error);
|
|
1326
|
+
return c.json({ input_tokens: 1 });
|
|
1327
|
+
}
|
|
1328
|
+
}
|
|
1329
|
+
|
|
1330
|
+
//#endregion
|
|
1331
|
+
//#region src/routes/messages/stream-translation.ts
|
|
1332
|
+
function isToolBlockOpen(state$1) {
|
|
1333
|
+
if (!state$1.contentBlockOpen) return false;
|
|
1334
|
+
return Object.values(state$1.toolCalls).some((tc) => tc.anthropicBlockIndex === state$1.contentBlockIndex);
|
|
1335
|
+
}
|
|
1336
|
+
function translateChunkToAnthropicEvents(chunk, state$1) {
|
|
1337
|
+
const events$1 = [];
|
|
1338
|
+
if (chunk.choices.length === 0) return events$1;
|
|
1339
|
+
const choice = chunk.choices[0];
|
|
1340
|
+
const { delta } = choice;
|
|
1341
|
+
if (!state$1.messageStartSent) {
|
|
1342
|
+
events$1.push({
|
|
1343
|
+
type: "message_start",
|
|
1344
|
+
message: {
|
|
1345
|
+
id: chunk.id,
|
|
1346
|
+
type: "message",
|
|
1347
|
+
role: "assistant",
|
|
1348
|
+
content: [],
|
|
1349
|
+
model: chunk.model,
|
|
1350
|
+
stop_reason: null,
|
|
1351
|
+
stop_sequence: null,
|
|
1352
|
+
usage: {
|
|
1353
|
+
input_tokens: (chunk.usage?.prompt_tokens ?? 0) - (chunk.usage?.prompt_tokens_details?.cached_tokens ?? 0),
|
|
1354
|
+
output_tokens: 0,
|
|
1355
|
+
...chunk.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: chunk.usage.prompt_tokens_details.cached_tokens }
|
|
1356
|
+
}
|
|
1357
|
+
}
|
|
1358
|
+
});
|
|
1359
|
+
state$1.messageStartSent = true;
|
|
1360
|
+
}
|
|
1361
|
+
if (delta.content) {
|
|
1362
|
+
if (isToolBlockOpen(state$1)) {
|
|
1363
|
+
events$1.push({
|
|
1364
|
+
type: "content_block_stop",
|
|
1365
|
+
index: state$1.contentBlockIndex
|
|
1366
|
+
});
|
|
1367
|
+
state$1.contentBlockIndex++;
|
|
1368
|
+
state$1.contentBlockOpen = false;
|
|
1369
|
+
}
|
|
1370
|
+
if (!state$1.contentBlockOpen) {
|
|
1371
|
+
events$1.push({
|
|
1372
|
+
type: "content_block_start",
|
|
1373
|
+
index: state$1.contentBlockIndex,
|
|
1374
|
+
content_block: {
|
|
1375
|
+
type: "text",
|
|
1376
|
+
text: ""
|
|
1377
|
+
}
|
|
1378
|
+
});
|
|
1379
|
+
state$1.contentBlockOpen = true;
|
|
1380
|
+
}
|
|
1381
|
+
events$1.push({
|
|
1382
|
+
type: "content_block_delta",
|
|
1383
|
+
index: state$1.contentBlockIndex,
|
|
1384
|
+
delta: {
|
|
1385
|
+
type: "text_delta",
|
|
1386
|
+
text: delta.content
|
|
1387
|
+
}
|
|
1388
|
+
});
|
|
1389
|
+
}
|
|
1390
|
+
if (delta.tool_calls) for (const toolCall of delta.tool_calls) {
|
|
1391
|
+
const toolName = toolCall.function?.name;
|
|
1392
|
+
if (toolName && !(toolCall.index in state$1.toolCalls)) {
|
|
1393
|
+
if (state$1.contentBlockOpen) {
|
|
1394
|
+
events$1.push({
|
|
1395
|
+
type: "content_block_stop",
|
|
1396
|
+
index: state$1.contentBlockIndex
|
|
1397
|
+
});
|
|
1398
|
+
state$1.contentBlockIndex++;
|
|
1399
|
+
state$1.contentBlockOpen = false;
|
|
1400
|
+
}
|
|
1401
|
+
const toolId = toolCall.id || `toolu_${randomUUID().replaceAll("-", "").slice(0, 24)}`;
|
|
1402
|
+
const anthropicBlockIndex = state$1.contentBlockIndex;
|
|
1403
|
+
state$1.toolCalls[toolCall.index] = {
|
|
1404
|
+
id: toolId,
|
|
1405
|
+
name: toolName,
|
|
1406
|
+
anthropicBlockIndex
|
|
1407
|
+
};
|
|
1408
|
+
events$1.push({
|
|
1409
|
+
type: "content_block_start",
|
|
1410
|
+
index: anthropicBlockIndex,
|
|
1411
|
+
content_block: {
|
|
1412
|
+
type: "tool_use",
|
|
1413
|
+
id: toolId,
|
|
1414
|
+
name: toolName,
|
|
1415
|
+
input: {}
|
|
1416
|
+
}
|
|
1417
|
+
});
|
|
1418
|
+
state$1.contentBlockOpen = true;
|
|
1419
|
+
}
|
|
1420
|
+
if (toolCall.function?.arguments) {
|
|
1421
|
+
const toolCallInfo = state$1.toolCalls[toolCall.index];
|
|
1422
|
+
if (toolCallInfo) events$1.push({
|
|
1423
|
+
type: "content_block_delta",
|
|
1424
|
+
index: toolCallInfo.anthropicBlockIndex,
|
|
1425
|
+
delta: {
|
|
1426
|
+
type: "input_json_delta",
|
|
1427
|
+
partial_json: toolCall.function.arguments
|
|
1428
|
+
}
|
|
1429
|
+
});
|
|
1430
|
+
}
|
|
1431
|
+
}
|
|
1432
|
+
if (choice.finish_reason) {
|
|
1433
|
+
if (state$1.contentBlockOpen) {
|
|
1434
|
+
events$1.push({
|
|
1435
|
+
type: "content_block_stop",
|
|
1436
|
+
index: state$1.contentBlockIndex
|
|
1437
|
+
});
|
|
1438
|
+
state$1.contentBlockOpen = false;
|
|
1439
|
+
}
|
|
1440
|
+
const stopReason = Object.keys(state$1.toolCalls).length > 0 ? "tool_use" : mapOpenAIStopReasonToAnthropic(choice.finish_reason);
|
|
1441
|
+
events$1.push({
|
|
1442
|
+
type: "message_delta",
|
|
1443
|
+
delta: {
|
|
1444
|
+
stop_reason: stopReason,
|
|
1445
|
+
stop_sequence: null
|
|
1446
|
+
},
|
|
1447
|
+
usage: {
|
|
1448
|
+
input_tokens: (chunk.usage?.prompt_tokens ?? 0) - (chunk.usage?.prompt_tokens_details?.cached_tokens ?? 0),
|
|
1449
|
+
output_tokens: chunk.usage?.completion_tokens ?? 0,
|
|
1450
|
+
...chunk.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: chunk.usage.prompt_tokens_details.cached_tokens }
|
|
1451
|
+
}
|
|
1452
|
+
}, { type: "message_stop" });
|
|
1453
|
+
state$1.messageFinishSent = true;
|
|
1454
|
+
}
|
|
1455
|
+
return events$1;
|
|
1456
|
+
}
|
|
1457
|
+
/**
|
|
1458
|
+
* Generate closing events for streams that ended without a `finish_reason`
|
|
1459
|
+
* chunk. Some providers (e.g. Gemini via the custom API) may close the stream
|
|
1460
|
+
* or send `[DONE]` without ever emitting a chunk with `finish_reason`, leaving
|
|
1461
|
+
* the Anthropic SSE stream without `message_delta` / `message_stop`.
|
|
1462
|
+
*/
|
|
1463
|
+
function getStreamClosingEvents(state$1) {
|
|
1464
|
+
if (!state$1.messageStartSent || state$1.messageFinishSent) return [];
|
|
1465
|
+
const events$1 = [];
|
|
1466
|
+
if (state$1.contentBlockOpen) events$1.push({
|
|
1467
|
+
type: "content_block_stop",
|
|
1468
|
+
index: state$1.contentBlockIndex
|
|
1469
|
+
});
|
|
1470
|
+
const hasToolCalls = Object.keys(state$1.toolCalls).length > 0;
|
|
1471
|
+
events$1.push({
|
|
1472
|
+
type: "message_delta",
|
|
1473
|
+
delta: {
|
|
1474
|
+
stop_reason: hasToolCalls ? "tool_use" : "end_turn",
|
|
1475
|
+
stop_sequence: null
|
|
1476
|
+
},
|
|
1477
|
+
usage: {
|
|
1478
|
+
input_tokens: 0,
|
|
1479
|
+
output_tokens: 0
|
|
1480
|
+
}
|
|
1481
|
+
}, { type: "message_stop" });
|
|
1482
|
+
return events$1;
|
|
1483
|
+
}
|
|
1484
|
+
|
|
1485
|
+
//#endregion
|
|
1486
|
+
//#region src/routes/messages/handler.ts
|
|
1487
|
+
async function handleCompletion(c) {
|
|
1488
|
+
await checkRateLimit(state);
|
|
1489
|
+
const anthropicPayload = await c.req.json();
|
|
1490
|
+
consola.debug("Anthropic request payload:", JSON.stringify(anthropicPayload));
|
|
1491
|
+
const openAIPayload = translateToOpenAI(anthropicPayload);
|
|
1492
|
+
consola.debug("Translated OpenAI request payload:", JSON.stringify(openAIPayload));
|
|
1493
|
+
if (state.manualApprove) await awaitApproval();
|
|
1494
|
+
const response = await createChatCompletions(openAIPayload);
|
|
1495
|
+
if (isNonStreaming(response)) {
|
|
1496
|
+
consola.debug("Non-streaming response from Copilot:", JSON.stringify(response).slice(-400));
|
|
1497
|
+
const anthropicResponse = translateToAnthropic(response);
|
|
1498
|
+
consola.debug("Translated Anthropic response:", JSON.stringify(anthropicResponse));
|
|
1499
|
+
return c.json(anthropicResponse);
|
|
1500
|
+
}
|
|
1501
|
+
consola.debug("Streaming response from Copilot");
|
|
1502
|
+
return streamSSE(c, async (stream) => {
|
|
1503
|
+
const streamState = {
|
|
1504
|
+
messageStartSent: false,
|
|
1505
|
+
messageFinishSent: false,
|
|
1506
|
+
contentBlockIndex: 0,
|
|
1507
|
+
contentBlockOpen: false,
|
|
1508
|
+
toolCalls: {}
|
|
1509
|
+
};
|
|
1510
|
+
for await (const rawEvent of response) {
|
|
1511
|
+
consola.debug("Copilot raw stream event:", JSON.stringify(rawEvent));
|
|
1512
|
+
if (rawEvent.data === "[DONE]") break;
|
|
1513
|
+
if (!rawEvent.data) continue;
|
|
1514
|
+
const events$1 = translateChunkToAnthropicEvents(JSON.parse(rawEvent.data), streamState);
|
|
1515
|
+
for (const event of events$1) {
|
|
1516
|
+
consola.debug("Translated Anthropic event:", JSON.stringify(event));
|
|
1517
|
+
await stream.writeSSE({
|
|
1518
|
+
event: event.type,
|
|
1519
|
+
data: JSON.stringify(event)
|
|
1520
|
+
});
|
|
1521
|
+
}
|
|
1522
|
+
}
|
|
1523
|
+
const closingEvents = getStreamClosingEvents(streamState);
|
|
1524
|
+
for (const event of closingEvents) {
|
|
1525
|
+
consola.debug("Stream closing event:", JSON.stringify(event));
|
|
1526
|
+
await stream.writeSSE({
|
|
1527
|
+
event: event.type,
|
|
1528
|
+
data: JSON.stringify(event)
|
|
1529
|
+
});
|
|
1530
|
+
}
|
|
1531
|
+
});
|
|
1532
|
+
}
|
|
1533
|
+
const isNonStreaming = (response) => Object.hasOwn(response, "choices");
|
|
1534
|
+
|
|
1535
|
+
//#endregion
|
|
1536
|
+
//#region src/routes/messages/route.ts
|
|
1537
|
+
const messageRoutes = new Hono();
|
|
1538
|
+
messageRoutes.post("/", async (c) => {
|
|
1539
|
+
try {
|
|
1540
|
+
return await handleCompletion(c);
|
|
1541
|
+
} catch (error) {
|
|
1542
|
+
return await forwardError(c, error);
|
|
1543
|
+
}
|
|
1544
|
+
});
|
|
1545
|
+
messageRoutes.post("/count_tokens", async (c) => {
|
|
1546
|
+
try {
|
|
1547
|
+
return await handleCountTokens(c);
|
|
1548
|
+
} catch (error) {
|
|
1549
|
+
return await forwardError(c, error);
|
|
1550
|
+
}
|
|
1551
|
+
});
|
|
1552
|
+
|
|
1553
|
+
//#endregion
|
|
1554
|
+
//#region src/routes/models/route.ts
|
|
1555
|
+
const modelRoutes = new Hono();
|
|
1556
|
+
modelRoutes.get("/", async (c) => {
|
|
1557
|
+
try {
|
|
1558
|
+
if (!state.models) await cacheModels();
|
|
1559
|
+
const models = state.models?.data.map((model) => ({
|
|
1560
|
+
id: model.id,
|
|
1561
|
+
object: "model",
|
|
1562
|
+
type: "model",
|
|
1563
|
+
created: 0,
|
|
1564
|
+
created_at: (/* @__PURE__ */ new Date(0)).toISOString(),
|
|
1565
|
+
owned_by: model.vendor,
|
|
1566
|
+
display_name: model.name
|
|
1567
|
+
}));
|
|
1568
|
+
return c.json({
|
|
1569
|
+
object: "list",
|
|
1570
|
+
data: models,
|
|
1571
|
+
has_more: false
|
|
1572
|
+
});
|
|
1573
|
+
} catch (error) {
|
|
1574
|
+
return await forwardError(c, error);
|
|
1575
|
+
}
|
|
1576
|
+
});
|
|
1577
|
+
|
|
1578
|
+
//#endregion
|
|
1579
|
+
//#region src/routes/token/route.ts
|
|
1580
|
+
const tokenRoute = new Hono();
|
|
1581
|
+
tokenRoute.get("/", (c) => {
|
|
1582
|
+
return c.json({
|
|
1583
|
+
token: "custom-api-mode",
|
|
1584
|
+
message: "Using custom API backend, no Copilot token in use"
|
|
1585
|
+
});
|
|
1586
|
+
});
|
|
1587
|
+
|
|
1588
|
+
//#endregion
|
|
1589
|
+
//#region src/routes/usage/route.ts
|
|
1590
|
+
const usageRoute = new Hono();
|
|
1591
|
+
usageRoute.get("/", (c) => {
|
|
1592
|
+
return c.json({
|
|
1593
|
+
message: "Usage tracking is not available with custom API backend",
|
|
1594
|
+
copilot_plan: "custom",
|
|
1595
|
+
quota_snapshots: {
|
|
1596
|
+
chat: {
|
|
1597
|
+
entitlement: 0,
|
|
1598
|
+
remaining: 0,
|
|
1599
|
+
percent_remaining: 0,
|
|
1600
|
+
unlimited: true,
|
|
1601
|
+
overage_count: 0,
|
|
1602
|
+
overage_permitted: false,
|
|
1603
|
+
quota_id: "",
|
|
1604
|
+
quota_remaining: 0
|
|
1605
|
+
},
|
|
1606
|
+
completions: {
|
|
1607
|
+
entitlement: 0,
|
|
1608
|
+
remaining: 0,
|
|
1609
|
+
percent_remaining: 0,
|
|
1610
|
+
unlimited: true,
|
|
1611
|
+
overage_count: 0,
|
|
1612
|
+
overage_permitted: false,
|
|
1613
|
+
quota_id: "",
|
|
1614
|
+
quota_remaining: 0
|
|
1615
|
+
},
|
|
1616
|
+
premium_interactions: {
|
|
1617
|
+
entitlement: 0,
|
|
1618
|
+
remaining: 0,
|
|
1619
|
+
percent_remaining: 0,
|
|
1620
|
+
unlimited: true,
|
|
1621
|
+
overage_count: 0,
|
|
1622
|
+
overage_permitted: false,
|
|
1623
|
+
quota_id: "",
|
|
1624
|
+
quota_remaining: 0
|
|
1625
|
+
}
|
|
1626
|
+
},
|
|
1627
|
+
quota_reset_date: "",
|
|
1628
|
+
access_type_sku: "custom",
|
|
1629
|
+
analytics_tracking_id: "",
|
|
1630
|
+
assigned_date: "",
|
|
1631
|
+
can_signup_for_limited: false,
|
|
1632
|
+
chat_enabled: true,
|
|
1633
|
+
organization_login_list: [],
|
|
1634
|
+
organization_list: []
|
|
1635
|
+
});
|
|
1636
|
+
});
|
|
1637
|
+
|
|
1638
|
+
//#endregion
|
|
1639
|
+
//#region src/server.ts
|
|
1640
|
+
const server = new Hono();
|
|
1641
|
+
server.use(logger());
|
|
1642
|
+
server.use(cors());
|
|
1643
|
+
server.get("/", (c) => c.text("Server running"));
|
|
1644
|
+
server.route("/chat/completions", completionRoutes);
|
|
1645
|
+
server.route("/models", modelRoutes);
|
|
1646
|
+
server.route("/embeddings", embeddingRoutes);
|
|
1647
|
+
server.route("/usage", usageRoute);
|
|
1648
|
+
server.route("/token", tokenRoute);
|
|
1649
|
+
server.route("/v1/chat/completions", completionRoutes);
|
|
1650
|
+
server.route("/v1/models", modelRoutes);
|
|
1651
|
+
server.route("/v1/embeddings", embeddingRoutes);
|
|
1652
|
+
server.route("/v1/messages", messageRoutes);
|
|
1653
|
+
|
|
1654
|
+
//#endregion
|
|
1655
|
+
//#region src/start.ts
|
|
1656
|
+
async function runServer(options) {
|
|
1657
|
+
if (options.proxyEnv) initProxyFromEnv();
|
|
1658
|
+
if (options.verbose) {
|
|
1659
|
+
consola.level = 5;
|
|
1660
|
+
consola.info("Verbose logging enabled");
|
|
1661
|
+
}
|
|
1662
|
+
state.accountType = options.accountType;
|
|
1663
|
+
state.manualApprove = options.manual;
|
|
1664
|
+
state.rateLimitSeconds = options.rateLimit;
|
|
1665
|
+
state.rateLimitWait = options.rateLimitWait;
|
|
1666
|
+
state.showToken = options.showToken;
|
|
1667
|
+
await ensurePaths();
|
|
1668
|
+
let xybotUser;
|
|
1669
|
+
const xybotUserStr = options.xybotUser ?? process.env.XYBOT_USER;
|
|
1670
|
+
if (xybotUserStr) try {
|
|
1671
|
+
xybotUser = JSON.parse(xybotUserStr);
|
|
1672
|
+
} catch {
|
|
1673
|
+
consola.error("Failed to parse xybot-user JSON:", xybotUserStr);
|
|
1674
|
+
throw new Error("Invalid xybot-user JSON format");
|
|
1675
|
+
}
|
|
1676
|
+
const customUuid = options.uuid ?? process.env.XYBOT_UUID;
|
|
1677
|
+
if (customUuid) if (xybotUser) xybotUser.uuid = customUuid;
|
|
1678
|
+
else state.xybotUser = {
|
|
1679
|
+
...state.xybotUser,
|
|
1680
|
+
uuid: customUuid
|
|
1681
|
+
};
|
|
1682
|
+
setupCustomApi({
|
|
1683
|
+
xybotUser,
|
|
1684
|
+
customApiUrl: options.customApiUrl ?? process.env.CUSTOM_API_URL,
|
|
1685
|
+
bizCode: options.bizCode ?? process.env.CUSTOM_BIZ_CODE,
|
|
1686
|
+
bizType: options.bizType ?? process.env.CUSTOM_BIZ_TYPE,
|
|
1687
|
+
defaultChannel: options.defaultChannel ?? process.env.CUSTOM_DEFAULT_CHANNEL
|
|
1688
|
+
});
|
|
1689
|
+
cacheModels();
|
|
1690
|
+
consola.info(`Available models: \n${state.models?.data.map((model) => `- ${model.id}`).join("\n")}`);
|
|
1691
|
+
const serverUrl = `http://localhost:${options.port}`;
|
|
1692
|
+
if (options.claudeCode) {
|
|
1693
|
+
invariant(state.models, "Models should be loaded by now");
|
|
1694
|
+
const selectedModel = await consola.prompt("Select a model to use with Claude Code", {
|
|
1695
|
+
type: "select",
|
|
1696
|
+
options: state.models.data.map((model) => model.id)
|
|
1697
|
+
});
|
|
1698
|
+
const selectedSmallModel = await consola.prompt("Select a small model to use with Claude Code", {
|
|
1699
|
+
type: "select",
|
|
1700
|
+
options: state.models.data.map((model) => model.id)
|
|
1701
|
+
});
|
|
1702
|
+
const command = generateEnvScript({
|
|
1703
|
+
ANTHROPIC_BASE_URL: serverUrl,
|
|
1704
|
+
ANTHROPIC_AUTH_TOKEN: "dummy",
|
|
1705
|
+
ANTHROPIC_MODEL: selectedModel,
|
|
1706
|
+
ANTHROPIC_DEFAULT_SONNET_MODEL: selectedModel,
|
|
1707
|
+
ANTHROPIC_SMALL_FAST_MODEL: selectedSmallModel,
|
|
1708
|
+
ANTHROPIC_DEFAULT_HAIKU_MODEL: selectedSmallModel,
|
|
1709
|
+
DISABLE_NON_ESSENTIAL_MODEL_CALLS: "1",
|
|
1710
|
+
CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: "1"
|
|
1711
|
+
}, "claude");
|
|
1712
|
+
try {
|
|
1713
|
+
const { default: clipboard } = await import("clipboardy");
|
|
1714
|
+
clipboard.writeSync(command);
|
|
1715
|
+
consola.success("Copied Claude Code command to clipboard!");
|
|
1716
|
+
} catch {
|
|
1717
|
+
consola.warn("Failed to copy to clipboard. Here is the Claude Code command:");
|
|
1718
|
+
consola.log(command);
|
|
1719
|
+
}
|
|
1720
|
+
}
|
|
1721
|
+
consola.box(`Server running at ${serverUrl}`);
|
|
1722
|
+
serve({
|
|
1723
|
+
fetch: server.fetch,
|
|
1724
|
+
port: options.port
|
|
1725
|
+
});
|
|
1726
|
+
}
|
|
1727
|
+
const start = defineCommand({
|
|
1728
|
+
meta: {
|
|
1729
|
+
name: "start",
|
|
1730
|
+
description: "Start the API proxy server"
|
|
1731
|
+
},
|
|
1732
|
+
args: {
|
|
1733
|
+
port: {
|
|
1734
|
+
alias: "p",
|
|
1735
|
+
type: "string",
|
|
1736
|
+
default: "4141",
|
|
1737
|
+
description: "Port to listen on"
|
|
1738
|
+
},
|
|
1739
|
+
verbose: {
|
|
1740
|
+
alias: "v",
|
|
1741
|
+
type: "boolean",
|
|
1742
|
+
default: false,
|
|
1743
|
+
description: "Enable verbose logging"
|
|
1744
|
+
},
|
|
1745
|
+
"account-type": {
|
|
1746
|
+
alias: "a",
|
|
1747
|
+
type: "string",
|
|
1748
|
+
default: "individual",
|
|
1749
|
+
description: "Account type (individual, business, enterprise)"
|
|
1750
|
+
},
|
|
1751
|
+
manual: {
|
|
1752
|
+
type: "boolean",
|
|
1753
|
+
default: false,
|
|
1754
|
+
description: "Enable manual request approval"
|
|
1755
|
+
},
|
|
1756
|
+
"rate-limit": {
|
|
1757
|
+
alias: "r",
|
|
1758
|
+
type: "string",
|
|
1759
|
+
description: "Rate limit in seconds between requests"
|
|
1760
|
+
},
|
|
1761
|
+
wait: {
|
|
1762
|
+
alias: "w",
|
|
1763
|
+
type: "boolean",
|
|
1764
|
+
default: false,
|
|
1765
|
+
description: "Wait instead of error when rate limit is hit. Has no effect if rate limit is not set"
|
|
1766
|
+
},
|
|
1767
|
+
"claude-code": {
|
|
1768
|
+
alias: "c",
|
|
1769
|
+
type: "boolean",
|
|
1770
|
+
default: false,
|
|
1771
|
+
description: "Generate a command to launch Claude Code with this API config"
|
|
1772
|
+
},
|
|
1773
|
+
"show-token": {
|
|
1774
|
+
type: "boolean",
|
|
1775
|
+
default: false,
|
|
1776
|
+
description: "Show tokens on fetch and refresh"
|
|
1777
|
+
},
|
|
1778
|
+
"proxy-env": {
|
|
1779
|
+
type: "boolean",
|
|
1780
|
+
default: false,
|
|
1781
|
+
description: "Initialize proxy from environment variables"
|
|
1782
|
+
},
|
|
1783
|
+
"custom-api-url": {
|
|
1784
|
+
type: "string",
|
|
1785
|
+
description: "Custom API URL (env: CUSTOM_API_URL)"
|
|
1786
|
+
},
|
|
1787
|
+
"xybot-user": {
|
|
1788
|
+
type: "string",
|
|
1789
|
+
description: "xybot-user header JSON, e.g. '{\"organizationUuid\":\"...\",\"tenantUuid\":\"...\",\"uuid\":\"...\"}' (env: XYBOT_USER)"
|
|
1790
|
+
},
|
|
1791
|
+
uuid: {
|
|
1792
|
+
alias: "u",
|
|
1793
|
+
type: "string",
|
|
1794
|
+
description: "Custom user UUID for xybot-user, overrides the uuid field in default/provided xybot-user (env: XYBOT_UUID)"
|
|
1795
|
+
},
|
|
1796
|
+
"biz-code": {
|
|
1797
|
+
type: "string",
|
|
1798
|
+
description: "Business code for custom API (env: CUSTOM_BIZ_CODE)"
|
|
1799
|
+
},
|
|
1800
|
+
"biz-type": {
|
|
1801
|
+
type: "string",
|
|
1802
|
+
description: "Business type for custom API (env: CUSTOM_BIZ_TYPE)"
|
|
1803
|
+
},
|
|
1804
|
+
"default-channel": {
|
|
1805
|
+
type: "string",
|
|
1806
|
+
description: "Default model channel, e.g. azure, claude, deepseek (env: CUSTOM_DEFAULT_CHANNEL)"
|
|
1807
|
+
}
|
|
1808
|
+
},
|
|
1809
|
+
run({ args }) {
|
|
1810
|
+
const rateLimitRaw = args["rate-limit"];
|
|
1811
|
+
const rateLimit = rateLimitRaw === void 0 ? void 0 : Number.parseInt(rateLimitRaw, 10);
|
|
1812
|
+
return runServer({
|
|
1813
|
+
port: Number.parseInt(args.port, 10),
|
|
1814
|
+
verbose: args.verbose,
|
|
1815
|
+
accountType: args["account-type"],
|
|
1816
|
+
manual: args.manual,
|
|
1817
|
+
rateLimit,
|
|
1818
|
+
rateLimitWait: args.wait,
|
|
1819
|
+
claudeCode: args["claude-code"],
|
|
1820
|
+
showToken: args["show-token"],
|
|
1821
|
+
proxyEnv: args["proxy-env"],
|
|
1822
|
+
customApiUrl: args["custom-api-url"],
|
|
1823
|
+
xybotUser: args["xybot-user"],
|
|
1824
|
+
uuid: args.uuid,
|
|
1825
|
+
bizCode: args["biz-code"],
|
|
1826
|
+
bizType: args["biz-type"],
|
|
1827
|
+
defaultChannel: args["default-channel"]
|
|
1828
|
+
});
|
|
1829
|
+
}
|
|
1830
|
+
});
|
|
1831
|
+
|
|
1832
|
+
//#endregion
|
|
1833
|
+
//#region src/main.ts
|
|
1834
|
+
await runMain(defineCommand({
|
|
1835
|
+
meta: {
|
|
1836
|
+
name: "copilot-api",
|
|
1837
|
+
description: "A wrapper around GitHub Copilot API to make it OpenAI compatible, making it usable for other tools."
|
|
1838
|
+
},
|
|
1839
|
+
subCommands: {
|
|
1840
|
+
auth,
|
|
1841
|
+
start,
|
|
1842
|
+
"check-usage": checkUsage,
|
|
1843
|
+
debug
|
|
1844
|
+
}
|
|
1845
|
+
}));
|
|
1846
|
+
|
|
1847
|
+
//#endregion
|
|
1848
|
+
export { };
|
|
1849
|
+
//# sourceMappingURL=main.js.map
|