@franklin-ai/copilot-compat-proxy 1.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/main.js ADDED
@@ -0,0 +1,1503 @@
1
+ #!/usr/bin/env node
2
+ import { defineCommand, runMain } from "citty";
3
+ import consola from "consola";
4
+ import fs from "node:fs/promises";
5
+ import os from "node:os";
6
+ import path from "node:path";
7
+ import { randomUUID } from "node:crypto";
8
+ import clipboard from "clipboardy";
9
+ import { serve } from "srvx";
10
+ import invariant from "tiny-invariant";
11
+ import { getProxyForUrl } from "proxy-from-env";
12
+ import { Agent, ProxyAgent, setGlobalDispatcher } from "undici";
13
+ import { execSync } from "node:child_process";
14
+ import process$1 from "node:process";
15
+ import { Hono } from "hono";
16
+ import { cors } from "hono/cors";
17
+ import { logger } from "hono/logger";
18
+ import { streamSSE } from "hono/streaming";
19
+ import { events } from "fetch-event-stream";
20
+
21
+ //#region src/lib/paths.ts
22
+ const APP_DIR = path.join(os.homedir(), ".local", "share", "copilot-api");
23
+ const GITHUB_TOKEN_PATH = path.join(APP_DIR, "github_token");
24
+ const PATHS = {
25
+ APP_DIR,
26
+ GITHUB_TOKEN_PATH
27
+ };
28
+ async function ensurePaths() {
29
+ await fs.mkdir(PATHS.APP_DIR, { recursive: true });
30
+ await ensureFile(PATHS.GITHUB_TOKEN_PATH);
31
+ }
32
+ async function ensureFile(filePath) {
33
+ try {
34
+ await fs.access(filePath, fs.constants.W_OK);
35
+ } catch {
36
+ await fs.writeFile(filePath, "");
37
+ await fs.chmod(filePath, 384);
38
+ }
39
+ }
40
+
41
+ //#endregion
42
+ //#region src/lib/state.ts
43
+ const state = {
44
+ accountType: "individual",
45
+ manualApprove: false,
46
+ rateLimitWait: false,
47
+ showToken: false
48
+ };
49
+
50
+ //#endregion
51
+ //#region src/lib/api-config.ts
52
+ const standardHeaders = () => ({
53
+ "content-type": "application/json",
54
+ accept: "application/json"
55
+ });
56
+ const COPILOT_VERSION = "0.26.7";
57
+ const EDITOR_PLUGIN_VERSION = `copilot-chat/${COPILOT_VERSION}`;
58
+ const USER_AGENT = `GitHubCopilotChat/${COPILOT_VERSION}`;
59
+ const API_VERSION = "2025-04-01";
60
+ const copilotBaseUrl = (state$1) => state$1.accountType === "individual" ? "https://api.githubcopilot.com" : `https://api.${state$1.accountType}.githubcopilot.com`;
61
+ const copilotHeaders = (state$1, vision = false) => {
62
+ const headers = {
63
+ Authorization: `Bearer ${state$1.copilotToken}`,
64
+ "content-type": standardHeaders()["content-type"],
65
+ "copilot-integration-id": "vscode-chat",
66
+ "editor-version": `vscode/${state$1.vsCodeVersion}`,
67
+ "editor-plugin-version": EDITOR_PLUGIN_VERSION,
68
+ "user-agent": USER_AGENT,
69
+ "openai-intent": "conversation-panel",
70
+ "x-github-api-version": API_VERSION,
71
+ "x-request-id": randomUUID(),
72
+ "x-vscode-user-agent-library-version": "electron-fetch"
73
+ };
74
+ if (vision) headers["copilot-vision-request"] = "true";
75
+ return headers;
76
+ };
77
+ const GITHUB_API_BASE_URL = "https://api.github.com";
78
+ const githubHeaders = (state$1) => ({
79
+ ...standardHeaders(),
80
+ authorization: `token ${state$1.githubToken}`,
81
+ "editor-version": `vscode/${state$1.vsCodeVersion}`,
82
+ "editor-plugin-version": EDITOR_PLUGIN_VERSION,
83
+ "user-agent": USER_AGENT,
84
+ "x-github-api-version": API_VERSION,
85
+ "x-vscode-user-agent-library-version": "electron-fetch"
86
+ });
87
+ const GITHUB_BASE_URL = "https://github.com";
88
+ const GITHUB_CLIENT_ID = "Iv1.b507a08c87ecfe98";
89
+ const GITHUB_APP_SCOPES = ["read:user"].join(" ");
90
+
91
+ //#endregion
92
+ //#region src/lib/error.ts
93
+ var HTTPError = class extends Error {
94
+ response;
95
+ constructor(message, response) {
96
+ super(message);
97
+ this.response = response;
98
+ }
99
+ };
100
+ async function forwardError(c, error) {
101
+ consola.error("Error occurred:", error);
102
+ if (error instanceof HTTPError) {
103
+ const errorText = await error.response.text();
104
+ let errorJson;
105
+ try {
106
+ errorJson = JSON.parse(errorText);
107
+ } catch {
108
+ errorJson = errorText;
109
+ }
110
+ consola.error("HTTP error:", errorJson);
111
+ return c.json({ error: {
112
+ message: errorText,
113
+ type: "error"
114
+ } }, error.response.status);
115
+ }
116
+ return c.json({ error: {
117
+ message: error.message,
118
+ type: "error"
119
+ } }, 500);
120
+ }
121
+
122
+ //#endregion
123
+ //#region src/services/github/get-copilot-token.ts
124
+ const getCopilotToken = async () => {
125
+ const response = await fetch(`${GITHUB_API_BASE_URL}/copilot_internal/v2/token`, { headers: githubHeaders(state) });
126
+ if (!response.ok) throw new HTTPError("Failed to get Copilot token", response);
127
+ return await response.json();
128
+ };
129
+
130
+ //#endregion
131
+ //#region src/services/github/get-device-code.ts
132
+ async function getDeviceCode() {
133
+ const response = await fetch(`${GITHUB_BASE_URL}/login/device/code`, {
134
+ method: "POST",
135
+ headers: standardHeaders(),
136
+ body: JSON.stringify({
137
+ client_id: GITHUB_CLIENT_ID,
138
+ scope: GITHUB_APP_SCOPES
139
+ })
140
+ });
141
+ if (!response.ok) throw new HTTPError("Failed to get device code", response);
142
+ return await response.json();
143
+ }
144
+
145
+ //#endregion
146
+ //#region src/services/github/get-user.ts
147
+ async function getGitHubUser() {
148
+ const response = await fetch(`${GITHUB_API_BASE_URL}/user`, { headers: {
149
+ authorization: `token ${state.githubToken}`,
150
+ ...standardHeaders()
151
+ } });
152
+ if (!response.ok) throw new HTTPError("Failed to get GitHub user", response);
153
+ return await response.json();
154
+ }
155
+
156
+ //#endregion
157
+ //#region src/services/copilot/get-models.ts
158
+ const getModels = async () => {
159
+ const response = await fetch(`${copilotBaseUrl(state)}/models`, { headers: copilotHeaders(state) });
160
+ if (!response.ok) throw new HTTPError("Failed to get models", response);
161
+ return await response.json();
162
+ };
163
+
164
+ //#endregion
165
+ //#region src/services/get-vscode-version.ts
166
+ const FALLBACK = "1.104.3";
167
+ async function getVSCodeVersion() {
168
+ const controller = new AbortController();
169
+ const timeout = setTimeout(() => {
170
+ controller.abort();
171
+ }, 5e3);
172
+ try {
173
+ const match = (await (await fetch("https://aur.archlinux.org/cgit/aur.git/plain/PKGBUILD?h=visual-studio-code-bin", { signal: controller.signal })).text()).match(/pkgver=([0-9.]+)/);
174
+ if (match) return match[1];
175
+ return FALLBACK;
176
+ } catch {
177
+ return FALLBACK;
178
+ } finally {
179
+ clearTimeout(timeout);
180
+ }
181
+ }
182
+ await getVSCodeVersion();
183
+
184
+ //#endregion
185
+ //#region src/lib/utils.ts
186
+ const sleep = (ms) => new Promise((resolve) => {
187
+ setTimeout(resolve, ms);
188
+ });
189
+ const isNullish = (value) => value === null || value === void 0;
190
+ async function cacheModels() {
191
+ state.models = await getModels();
192
+ }
193
+ const cacheVSCodeVersion = async () => {
194
+ const response = await getVSCodeVersion();
195
+ state.vsCodeVersion = response;
196
+ consola.info(`Using VSCode version: ${response}`);
197
+ };
198
+
199
+ //#endregion
200
+ //#region src/services/github/poll-access-token.ts
201
+ async function pollAccessToken(deviceCode) {
202
+ const sleepDuration = (deviceCode.interval + 1) * 1e3;
203
+ consola.debug(`Polling access token with interval of ${sleepDuration}ms`);
204
+ while (true) {
205
+ const response = await fetch(`${GITHUB_BASE_URL}/login/oauth/access_token`, {
206
+ method: "POST",
207
+ headers: standardHeaders(),
208
+ body: JSON.stringify({
209
+ client_id: GITHUB_CLIENT_ID,
210
+ device_code: deviceCode.device_code,
211
+ grant_type: "urn:ietf:params:oauth:grant-type:device_code"
212
+ })
213
+ });
214
+ if (!response.ok) {
215
+ await sleep(sleepDuration);
216
+ consola.error("Failed to poll access token:", await response.text());
217
+ continue;
218
+ }
219
+ const json = await response.json();
220
+ consola.debug("Polling access token response:", json);
221
+ const { access_token } = json;
222
+ if (access_token) return access_token;
223
+ else await sleep(sleepDuration);
224
+ }
225
+ }
226
+
227
+ //#endregion
228
+ //#region src/lib/token.ts
229
+ const readGithubToken = () => fs.readFile(PATHS.GITHUB_TOKEN_PATH, "utf8");
230
+ const writeGithubToken = (token) => fs.writeFile(PATHS.GITHUB_TOKEN_PATH, token);
231
+ const setupCopilotToken = async () => {
232
+ const { token, refresh_in } = await getCopilotToken();
233
+ state.copilotToken = token;
234
+ consola.debug("GitHub Copilot Token fetched successfully!");
235
+ if (state.showToken) consola.info("Copilot token:", token);
236
+ const refreshInterval = (refresh_in - 60) * 1e3;
237
+ setInterval(async () => {
238
+ consola.debug("Refreshing Copilot token");
239
+ try {
240
+ const { token: token$1 } = await getCopilotToken();
241
+ state.copilotToken = token$1;
242
+ consola.debug("Copilot token refreshed");
243
+ if (state.showToken) consola.info("Refreshed Copilot token:", token$1);
244
+ } catch (error) {
245
+ consola.error("Failed to refresh Copilot token:", error);
246
+ throw error;
247
+ }
248
+ }, refreshInterval);
249
+ };
250
+ async function setupGitHubToken(options) {
251
+ try {
252
+ const githubToken = await readGithubToken();
253
+ if (githubToken && !options?.force) {
254
+ state.githubToken = githubToken;
255
+ if (state.showToken) consola.info("GitHub token:", githubToken);
256
+ await logUser();
257
+ return;
258
+ }
259
+ consola.info("Not logged in, getting new access token");
260
+ const response = await getDeviceCode();
261
+ consola.debug("Device code response:", response);
262
+ consola.info(`Please enter the code "${response.user_code}" in ${response.verification_uri}`);
263
+ const token = await pollAccessToken(response);
264
+ await writeGithubToken(token);
265
+ state.githubToken = token;
266
+ if (state.showToken) consola.info("GitHub token:", token);
267
+ await logUser();
268
+ } catch (error) {
269
+ if (error instanceof HTTPError) {
270
+ consola.error("Failed to get GitHub token:", await error.response.json());
271
+ throw error;
272
+ }
273
+ consola.error("Failed to get GitHub token:", error);
274
+ throw error;
275
+ }
276
+ }
277
+ async function logUser() {
278
+ const user = await getGitHubUser();
279
+ consola.info(`Logged in as ${user.login}`);
280
+ }
281
+
282
+ //#endregion
283
+ //#region src/auth.ts
284
+ async function runAuth(options) {
285
+ if (options.verbose) {
286
+ consola.level = 5;
287
+ consola.info("Verbose logging enabled");
288
+ }
289
+ state.showToken = options.showToken;
290
+ await ensurePaths();
291
+ await setupGitHubToken({ force: true });
292
+ consola.success("GitHub token written to", PATHS.GITHUB_TOKEN_PATH);
293
+ }
294
+ const auth = defineCommand({
295
+ meta: {
296
+ name: "auth",
297
+ description: "Run GitHub auth flow without running the server"
298
+ },
299
+ args: {
300
+ verbose: {
301
+ alias: "v",
302
+ type: "boolean",
303
+ default: false,
304
+ description: "Enable verbose logging"
305
+ },
306
+ "show-token": {
307
+ type: "boolean",
308
+ default: false,
309
+ description: "Show GitHub token on auth"
310
+ }
311
+ },
312
+ run({ args }) {
313
+ return runAuth({
314
+ verbose: args.verbose,
315
+ showToken: args["show-token"]
316
+ });
317
+ }
318
+ });
319
+
320
+ //#endregion
321
+ //#region src/services/github/get-copilot-usage.ts
322
+ const getCopilotUsage = async () => {
323
+ const response = await fetch(`${GITHUB_API_BASE_URL}/copilot_internal/user`, { headers: githubHeaders(state) });
324
+ if (!response.ok) throw new HTTPError("Failed to get Copilot usage", response);
325
+ return await response.json();
326
+ };
327
+
328
+ //#endregion
329
+ //#region src/check-usage.ts
330
+ const checkUsage = defineCommand({
331
+ meta: {
332
+ name: "check-usage",
333
+ description: "Show current GitHub Copilot usage/quota information"
334
+ },
335
+ async run() {
336
+ await ensurePaths();
337
+ await setupGitHubToken();
338
+ try {
339
+ const usage = await getCopilotUsage();
340
+ const premium = usage.quota_snapshots.premium_interactions;
341
+ const premiumTotal = premium.entitlement;
342
+ const premiumUsed = premiumTotal - premium.remaining;
343
+ const premiumPercentUsed = premiumTotal > 0 ? premiumUsed / premiumTotal * 100 : 0;
344
+ const premiumPercentRemaining = premium.percent_remaining;
345
+ function summarizeQuota(name, snap) {
346
+ if (!snap) return `${name}: N/A`;
347
+ const total = snap.entitlement;
348
+ const used = total - snap.remaining;
349
+ const percentUsed = total > 0 ? used / total * 100 : 0;
350
+ const percentRemaining = snap.percent_remaining;
351
+ return `${name}: ${used}/${total} used (${percentUsed.toFixed(1)}% used, ${percentRemaining.toFixed(1)}% remaining)`;
352
+ }
353
+ const premiumLine = `Premium: ${premiumUsed}/${premiumTotal} used (${premiumPercentUsed.toFixed(1)}% used, ${premiumPercentRemaining.toFixed(1)}% remaining)`;
354
+ const chatLine = summarizeQuota("Chat", usage.quota_snapshots.chat);
355
+ const completionsLine = summarizeQuota("Completions", usage.quota_snapshots.completions);
356
+ consola.box(`Copilot Usage (plan: ${usage.copilot_plan})\nQuota resets: ${usage.quota_reset_date}\n\nQuotas:\n ${premiumLine}\n ${chatLine}\n ${completionsLine}`);
357
+ } catch (err) {
358
+ consola.error("Failed to fetch Copilot usage:", err);
359
+ process.exit(1);
360
+ }
361
+ }
362
+ });
363
+
364
+ //#endregion
365
+ //#region src/debug.ts
366
+ async function getPackageVersion() {
367
+ try {
368
+ const packageJsonPath = new URL("../package.json", import.meta.url).pathname;
369
+ return JSON.parse(await fs.readFile(packageJsonPath)).version;
370
+ } catch {
371
+ return "unknown";
372
+ }
373
+ }
374
+ function getRuntimeInfo() {
375
+ const isBun = typeof Bun !== "undefined";
376
+ return {
377
+ name: isBun ? "bun" : "node",
378
+ version: isBun ? Bun.version : process.version.slice(1),
379
+ platform: os.platform(),
380
+ arch: os.arch()
381
+ };
382
+ }
383
+ async function checkTokenExists() {
384
+ try {
385
+ if (!(await fs.stat(PATHS.GITHUB_TOKEN_PATH)).isFile()) return false;
386
+ return (await fs.readFile(PATHS.GITHUB_TOKEN_PATH, "utf8")).trim().length > 0;
387
+ } catch {
388
+ return false;
389
+ }
390
+ }
391
+ async function getDebugInfo() {
392
+ const [version, tokenExists] = await Promise.all([getPackageVersion(), checkTokenExists()]);
393
+ return {
394
+ version,
395
+ runtime: getRuntimeInfo(),
396
+ paths: {
397
+ APP_DIR: PATHS.APP_DIR,
398
+ GITHUB_TOKEN_PATH: PATHS.GITHUB_TOKEN_PATH
399
+ },
400
+ tokenExists
401
+ };
402
+ }
403
+ function printDebugInfoPlain(info) {
404
+ consola.info(`copilot-api debug
405
+
406
+ Version: ${info.version}
407
+ Runtime: ${info.runtime.name} ${info.runtime.version} (${info.runtime.platform} ${info.runtime.arch})
408
+
409
+ Paths:
410
+ - APP_DIR: ${info.paths.APP_DIR}
411
+ - GITHUB_TOKEN_PATH: ${info.paths.GITHUB_TOKEN_PATH}
412
+
413
+ Token exists: ${info.tokenExists ? "Yes" : "No"}`);
414
+ }
415
+ function printDebugInfoJson(info) {
416
+ console.log(JSON.stringify(info, null, 2));
417
+ }
418
+ async function runDebug(options) {
419
+ const debugInfo = await getDebugInfo();
420
+ if (options.json) printDebugInfoJson(debugInfo);
421
+ else printDebugInfoPlain(debugInfo);
422
+ }
423
+ const debug = defineCommand({
424
+ meta: {
425
+ name: "debug",
426
+ description: "Print debug information about the application"
427
+ },
428
+ args: { json: {
429
+ type: "boolean",
430
+ default: false,
431
+ description: "Output debug information as JSON"
432
+ } },
433
+ run({ args }) {
434
+ return runDebug({ json: args.json });
435
+ }
436
+ });
437
+
438
+ //#endregion
439
+ //#region src/lib/proxy.ts
440
+ function initProxyFromEnv() {
441
+ if (typeof Bun !== "undefined") return;
442
+ try {
443
+ const direct = new Agent();
444
+ const proxies = /* @__PURE__ */ new Map();
445
+ setGlobalDispatcher({
446
+ dispatch(options, handler) {
447
+ try {
448
+ const origin = typeof options.origin === "string" ? new URL(options.origin) : options.origin;
449
+ const raw = getProxyForUrl(origin.toString());
450
+ const proxyUrl = raw && raw.length > 0 ? raw : void 0;
451
+ if (!proxyUrl) {
452
+ consola.debug(`HTTP proxy bypass: ${origin.hostname}`);
453
+ return direct.dispatch(options, handler);
454
+ }
455
+ let agent = proxies.get(proxyUrl);
456
+ if (!agent) {
457
+ agent = new ProxyAgent(proxyUrl);
458
+ proxies.set(proxyUrl, agent);
459
+ }
460
+ let label = proxyUrl;
461
+ try {
462
+ const u = new URL(proxyUrl);
463
+ label = `${u.protocol}//${u.host}`;
464
+ } catch {}
465
+ consola.debug(`HTTP proxy route: ${origin.hostname} via ${label}`);
466
+ return agent.dispatch(options, handler);
467
+ } catch {
468
+ return direct.dispatch(options, handler);
469
+ }
470
+ },
471
+ close() {
472
+ return direct.close();
473
+ },
474
+ destroy() {
475
+ return direct.destroy();
476
+ }
477
+ });
478
+ consola.debug("HTTP proxy configured from environment (per-URL)");
479
+ } catch (err) {
480
+ consola.debug("Proxy setup skipped:", err);
481
+ }
482
+ }
483
+
484
+ //#endregion
485
+ //#region src/lib/shell.ts
486
+ function getShell() {
487
+ const { platform, ppid, env } = process$1;
488
+ if (platform === "win32") {
489
+ try {
490
+ const command = `wmic process get ParentProcessId,Name | findstr "${ppid}"`;
491
+ if (execSync(command, { stdio: "pipe" }).toString().toLowerCase().includes("powershell.exe")) return "powershell";
492
+ } catch {
493
+ return "cmd";
494
+ }
495
+ return "cmd";
496
+ } else {
497
+ const shellPath = env.SHELL;
498
+ if (shellPath) {
499
+ if (shellPath.endsWith("zsh")) return "zsh";
500
+ if (shellPath.endsWith("fish")) return "fish";
501
+ if (shellPath.endsWith("bash")) return "bash";
502
+ }
503
+ return "sh";
504
+ }
505
+ }
506
+ /**
507
+ * Generates a copy-pasteable script to set multiple environment variables
508
+ * and run a subsequent command.
509
+ * @param {EnvVars} envVars - An object of environment variables to set.
510
+ * @param {string} commandToRun - The command to run after setting the variables.
511
+ * @returns {string} The formatted script string.
512
+ */
513
+ function generateEnvScript(envVars, commandToRun = "") {
514
+ const shell = getShell();
515
+ const filteredEnvVars = Object.entries(envVars).filter(([, value]) => value !== void 0);
516
+ let commandBlock;
517
+ switch (shell) {
518
+ case "powershell":
519
+ commandBlock = filteredEnvVars.map(([key, value]) => `$env:${key} = ${value}`).join("; ");
520
+ break;
521
+ case "cmd":
522
+ commandBlock = filteredEnvVars.map(([key, value]) => `set ${key}=${value}`).join(" & ");
523
+ break;
524
+ case "fish":
525
+ commandBlock = filteredEnvVars.map(([key, value]) => `set -gx ${key} ${value}`).join("; ");
526
+ break;
527
+ default: {
528
+ const assignments = filteredEnvVars.map(([key, value]) => `${key}=${value}`).join(" ");
529
+ commandBlock = filteredEnvVars.length > 0 ? `export ${assignments}` : "";
530
+ break;
531
+ }
532
+ }
533
+ if (commandBlock && commandToRun) return `${commandBlock}${shell === "cmd" ? " & " : " && "}${commandToRun}`;
534
+ return commandBlock || commandToRun;
535
+ }
536
+
537
+ //#endregion
538
+ //#region src/lib/approval.ts
539
+ const awaitApproval = async () => {
540
+ if (!await consola.prompt(`Accept incoming request?`, { type: "confirm" })) throw new HTTPError("Request rejected", Response.json({ message: "Request rejected" }, { status: 403 }));
541
+ };
542
+
543
+ //#endregion
544
+ //#region src/lib/rate-limit.ts
545
+ async function checkRateLimit(state$1) {
546
+ if (state$1.rateLimitSeconds === void 0) return;
547
+ const now = Date.now();
548
+ if (!state$1.lastRequestTimestamp) {
549
+ state$1.lastRequestTimestamp = now;
550
+ return;
551
+ }
552
+ const elapsedSeconds = (now - state$1.lastRequestTimestamp) / 1e3;
553
+ if (elapsedSeconds > state$1.rateLimitSeconds) {
554
+ state$1.lastRequestTimestamp = now;
555
+ return;
556
+ }
557
+ const waitTimeSeconds = Math.ceil(state$1.rateLimitSeconds - elapsedSeconds);
558
+ if (!state$1.rateLimitWait) {
559
+ consola.warn(`Rate limit exceeded. Need to wait ${waitTimeSeconds} more seconds.`);
560
+ throw new HTTPError("Rate limit exceeded", Response.json({ message: "Rate limit exceeded" }, { status: 429 }));
561
+ }
562
+ const waitTimeMs = waitTimeSeconds * 1e3;
563
+ consola.warn(`Rate limit reached. Waiting ${waitTimeSeconds} seconds before proceeding...`);
564
+ await sleep(waitTimeMs);
565
+ state$1.lastRequestTimestamp = now;
566
+ consola.info("Rate limit wait completed, proceeding with request");
567
+ }
568
+
569
+ //#endregion
570
+ //#region src/lib/tokenizer.ts
571
+ const ENCODING_MAP = {
572
+ o200k_base: () => import("gpt-tokenizer/encoding/o200k_base"),
573
+ cl100k_base: () => import("gpt-tokenizer/encoding/cl100k_base"),
574
+ p50k_base: () => import("gpt-tokenizer/encoding/p50k_base"),
575
+ p50k_edit: () => import("gpt-tokenizer/encoding/p50k_edit"),
576
+ r50k_base: () => import("gpt-tokenizer/encoding/r50k_base")
577
+ };
578
+ const encodingCache = /* @__PURE__ */ new Map();
579
+ /**
580
+ * Calculate tokens for tool calls
581
+ */
582
+ const calculateToolCallsTokens = (toolCalls, encoder, constants) => {
583
+ let tokens = 0;
584
+ for (const toolCall of toolCalls) {
585
+ tokens += constants.funcInit;
586
+ tokens += encoder.encode(JSON.stringify(toolCall)).length;
587
+ }
588
+ tokens += constants.funcEnd;
589
+ return tokens;
590
+ };
591
+ /**
592
+ * Calculate tokens for content parts
593
+ */
594
+ const calculateContentPartsTokens = (contentParts, encoder) => {
595
+ let tokens = 0;
596
+ for (const part of contentParts) if (part.type === "image_url") tokens += encoder.encode(part.image_url.url).length + 85;
597
+ else if (part.text) tokens += encoder.encode(part.text).length;
598
+ return tokens;
599
+ };
600
+ /**
601
+ * Calculate tokens for a single message
602
+ */
603
+ const calculateMessageTokens = (message, encoder, constants) => {
604
+ const tokensPerMessage = 3;
605
+ const tokensPerName = 1;
606
+ let tokens = tokensPerMessage;
607
+ for (const [key, value] of Object.entries(message)) {
608
+ if (typeof value === "string") tokens += encoder.encode(value).length;
609
+ if (key === "name") tokens += tokensPerName;
610
+ if (key === "tool_calls") tokens += calculateToolCallsTokens(value, encoder, constants);
611
+ if (key === "content" && Array.isArray(value)) tokens += calculateContentPartsTokens(value, encoder);
612
+ }
613
+ return tokens;
614
+ };
615
+ /**
616
+ * Calculate tokens using custom algorithm
617
+ */
618
+ const calculateTokens = (messages, encoder, constants) => {
619
+ if (messages.length === 0) return 0;
620
+ let numTokens = 0;
621
+ for (const message of messages) numTokens += calculateMessageTokens(message, encoder, constants);
622
+ numTokens += 3;
623
+ return numTokens;
624
+ };
625
+ /**
626
+ * Get the corresponding encoder module based on encoding type
627
+ */
628
+ const getEncodeChatFunction = async (encoding) => {
629
+ if (encodingCache.has(encoding)) {
630
+ const cached = encodingCache.get(encoding);
631
+ if (cached) return cached;
632
+ }
633
+ const supportedEncoding = encoding;
634
+ if (!(supportedEncoding in ENCODING_MAP)) {
635
+ const fallbackModule = await ENCODING_MAP.o200k_base();
636
+ encodingCache.set(encoding, fallbackModule);
637
+ return fallbackModule;
638
+ }
639
+ const encodingModule = await ENCODING_MAP[supportedEncoding]();
640
+ encodingCache.set(encoding, encodingModule);
641
+ return encodingModule;
642
+ };
643
+ /**
644
+ * Get tokenizer type from model information
645
+ */
646
+ const getTokenizerFromModel = (model) => {
647
+ return model.capabilities.tokenizer || "o200k_base";
648
+ };
649
+ /**
650
+ * Get model-specific constants for token calculation
651
+ */
652
+ const getModelConstants = (model) => {
653
+ return model.id === "gpt-3.5-turbo" || model.id === "gpt-4" ? {
654
+ funcInit: 10,
655
+ propInit: 3,
656
+ propKey: 3,
657
+ enumInit: -3,
658
+ enumItem: 3,
659
+ funcEnd: 12
660
+ } : {
661
+ funcInit: 7,
662
+ propInit: 3,
663
+ propKey: 3,
664
+ enumInit: -3,
665
+ enumItem: 3,
666
+ funcEnd: 12
667
+ };
668
+ };
669
+ /**
670
+ * Calculate tokens for a single parameter
671
+ */
672
+ const calculateParameterTokens = (key, prop, context) => {
673
+ const { encoder, constants } = context;
674
+ let tokens = constants.propKey;
675
+ if (typeof prop !== "object" || prop === null) return tokens;
676
+ const param = prop;
677
+ const paramName = key;
678
+ const paramType = param.type || "string";
679
+ let paramDesc = param.description || "";
680
+ if (param.enum && Array.isArray(param.enum)) {
681
+ tokens += constants.enumInit;
682
+ for (const item of param.enum) {
683
+ tokens += constants.enumItem;
684
+ tokens += encoder.encode(String(item)).length;
685
+ }
686
+ }
687
+ if (paramDesc.endsWith(".")) paramDesc = paramDesc.slice(0, -1);
688
+ const line = `${paramName}:${paramType}:${paramDesc}`;
689
+ tokens += encoder.encode(line).length;
690
+ const excludedKeys = new Set([
691
+ "type",
692
+ "description",
693
+ "enum"
694
+ ]);
695
+ for (const propertyName of Object.keys(param)) if (!excludedKeys.has(propertyName)) {
696
+ const propertyValue = param[propertyName];
697
+ const propertyText = typeof propertyValue === "string" ? propertyValue : JSON.stringify(propertyValue);
698
+ tokens += encoder.encode(`${propertyName}:${propertyText}`).length;
699
+ }
700
+ return tokens;
701
+ };
702
+ /**
703
+ * Calculate tokens for function parameters
704
+ */
705
+ const calculateParametersTokens = (parameters, encoder, constants) => {
706
+ if (!parameters || typeof parameters !== "object") return 0;
707
+ const params = parameters;
708
+ let tokens = 0;
709
+ for (const [key, value] of Object.entries(params)) if (key === "properties") {
710
+ const properties = value;
711
+ if (Object.keys(properties).length > 0) {
712
+ tokens += constants.propInit;
713
+ for (const propKey of Object.keys(properties)) tokens += calculateParameterTokens(propKey, properties[propKey], {
714
+ encoder,
715
+ constants
716
+ });
717
+ }
718
+ } else {
719
+ const paramText = typeof value === "string" ? value : JSON.stringify(value);
720
+ tokens += encoder.encode(`${key}:${paramText}`).length;
721
+ }
722
+ return tokens;
723
+ };
724
+ /**
725
+ * Calculate tokens for a single tool
726
+ */
727
+ const calculateToolTokens = (tool, encoder, constants) => {
728
+ let tokens = constants.funcInit;
729
+ const func = tool.function;
730
+ const fName = func.name;
731
+ let fDesc = func.description || "";
732
+ if (fDesc.endsWith(".")) fDesc = fDesc.slice(0, -1);
733
+ const line = fName + ":" + fDesc;
734
+ tokens += encoder.encode(line).length;
735
+ if (typeof func.parameters === "object" && func.parameters !== null) tokens += calculateParametersTokens(func.parameters, encoder, constants);
736
+ return tokens;
737
+ };
738
+ /**
739
+ * Calculate token count for tools based on model
740
+ */
741
+ const numTokensForTools = (tools, encoder, constants) => {
742
+ let funcTokenCount = 0;
743
+ for (const tool of tools) funcTokenCount += calculateToolTokens(tool, encoder, constants);
744
+ funcTokenCount += constants.funcEnd;
745
+ return funcTokenCount;
746
+ };
747
+ /**
748
+ * Calculate the token count of messages, supporting multiple GPT encoders
749
+ */
750
+ const getTokenCount = async (payload, model) => {
751
+ const tokenizer = getTokenizerFromModel(model);
752
+ const encoder = await getEncodeChatFunction(tokenizer);
753
+ const simplifiedMessages = payload.messages;
754
+ const inputMessages = simplifiedMessages.filter((msg) => msg.role !== "assistant");
755
+ const outputMessages = simplifiedMessages.filter((msg) => msg.role === "assistant");
756
+ const constants = getModelConstants(model);
757
+ let inputTokens = calculateTokens(inputMessages, encoder, constants);
758
+ if (payload.tools && payload.tools.length > 0) inputTokens += numTokensForTools(payload.tools, encoder, constants);
759
+ const outputTokens = calculateTokens(outputMessages, encoder, constants);
760
+ return {
761
+ input: inputTokens,
762
+ output: outputTokens
763
+ };
764
+ };
765
+
766
+ //#endregion
767
+ //#region src/services/copilot/create-chat-completions.ts
768
+ const createChatCompletions = async (payload) => {
769
+ if (!state.copilotToken) throw new Error("Copilot token not found");
770
+ const enableVision = payload.messages.some((x) => typeof x.content !== "string" && x.content?.some((x$1) => x$1.type === "image_url"));
771
+ const isAgentCall = payload.messages.some((msg) => ["assistant", "tool"].includes(msg.role));
772
+ const headers = {
773
+ ...copilotHeaders(state, enableVision),
774
+ "X-Initiator": isAgentCall ? "agent" : "user"
775
+ };
776
+ const response = await fetch(`${copilotBaseUrl(state)}/chat/completions`, {
777
+ method: "POST",
778
+ headers,
779
+ body: JSON.stringify(payload)
780
+ });
781
+ if (!response.ok) {
782
+ consola.error("Failed to create chat completions", response);
783
+ throw new HTTPError("Failed to create chat completions", response);
784
+ }
785
+ if (payload.stream) return events(response);
786
+ return await response.json();
787
+ };
788
+
789
+ //#endregion
790
+ //#region src/routes/chat-completions/handler.ts
791
+ async function handleCompletion$1(c) {
792
+ await checkRateLimit(state);
793
+ let payload = await c.req.json();
794
+ consola.debug("Request payload:", JSON.stringify(payload).slice(-400));
795
+ const selectedModel = state.models?.data.find((model) => model.id === payload.model);
796
+ try {
797
+ if (selectedModel) {
798
+ const tokenCount = await getTokenCount(payload, selectedModel);
799
+ consola.info("Current token count:", tokenCount);
800
+ } else consola.warn("No model selected, skipping token count calculation");
801
+ } catch (error) {
802
+ consola.warn("Failed to calculate token count:", error);
803
+ }
804
+ if (state.manualApprove) await awaitApproval();
805
+ if (isNullish(payload.max_tokens)) {
806
+ payload = {
807
+ ...payload,
808
+ max_tokens: selectedModel?.capabilities.limits.max_output_tokens
809
+ };
810
+ consola.debug("Set max_tokens to:", JSON.stringify(payload.max_tokens));
811
+ }
812
+ const response = await createChatCompletions(payload);
813
+ if (isNonStreaming$1(response)) {
814
+ consola.debug("Non-streaming response:", JSON.stringify(response));
815
+ return c.json(response);
816
+ }
817
+ consola.debug("Streaming response");
818
+ return streamSSE(c, async (stream) => {
819
+ for await (const chunk of response) {
820
+ consola.debug("Streaming chunk:", JSON.stringify(chunk));
821
+ await stream.writeSSE(chunk);
822
+ }
823
+ });
824
+ }
825
+ const isNonStreaming$1 = (response) => Object.hasOwn(response, "choices");
826
+
827
+ //#endregion
828
+ //#region src/routes/chat-completions/route.ts
829
+ const completionRoutes = new Hono();
830
+ completionRoutes.post("/", async (c) => {
831
+ try {
832
+ return await handleCompletion$1(c);
833
+ } catch (error) {
834
+ return await forwardError(c, error);
835
+ }
836
+ });
837
+
838
+ //#endregion
839
+ //#region src/services/copilot/create-embeddings.ts
840
+ const createEmbeddings = async (payload) => {
841
+ if (!state.copilotToken) throw new Error("Copilot token not found");
842
+ const response = await fetch(`${copilotBaseUrl(state)}/embeddings`, {
843
+ method: "POST",
844
+ headers: copilotHeaders(state),
845
+ body: JSON.stringify(payload)
846
+ });
847
+ if (!response.ok) throw new HTTPError("Failed to create embeddings", response);
848
+ return await response.json();
849
+ };
850
+
851
+ //#endregion
852
+ //#region src/routes/embeddings/route.ts
853
+ const embeddingRoutes = new Hono();
854
+ embeddingRoutes.post("/", async (c) => {
855
+ try {
856
+ const paylod = await c.req.json();
857
+ const response = await createEmbeddings(paylod);
858
+ return c.json(response);
859
+ } catch (error) {
860
+ return await forwardError(c, error);
861
+ }
862
+ });
863
+
864
+ //#endregion
865
+ //#region src/routes/messages/utils.ts
866
+ function mapOpenAIStopReasonToAnthropic(finishReason) {
867
+ if (finishReason === null) return null;
868
+ return {
869
+ stop: "end_turn",
870
+ length: "max_tokens",
871
+ tool_calls: "tool_use",
872
+ content_filter: "end_turn"
873
+ }[finishReason];
874
+ }
875
+
876
+ //#endregion
877
+ //#region src/routes/messages/non-stream-translation.ts
878
+ function translateToOpenAI(payload) {
879
+ return {
880
+ model: translateModelName(payload.model),
881
+ messages: translateAnthropicMessagesToOpenAI(payload.messages, payload.system),
882
+ max_tokens: payload.max_tokens,
883
+ stop: payload.stop_sequences,
884
+ stream: payload.stream,
885
+ temperature: payload.temperature,
886
+ top_p: payload.top_p,
887
+ user: payload.metadata?.user_id,
888
+ tools: translateAnthropicToolsToOpenAI(payload.tools),
889
+ tool_choice: translateAnthropicToolChoiceToOpenAI(payload.tool_choice)
890
+ };
891
+ }
892
+ function translateModelName(model) {
893
+ if (model.startsWith("claude-sonnet-4-")) return model.replace(/^claude-sonnet-4-.*/, "claude-sonnet-4");
894
+ else if (model.startsWith("claude-opus-")) return model.replace(/^claude-opus-4-.*/, "claude-opus-4");
895
+ return model;
896
+ }
897
+ function translateAnthropicMessagesToOpenAI(anthropicMessages, system) {
898
+ const systemMessages = handleSystemPrompt(system);
899
+ const otherMessages = anthropicMessages.flatMap((message) => message.role === "user" ? handleUserMessage(message) : handleAssistantMessage(message));
900
+ return [...systemMessages, ...otherMessages];
901
+ }
902
+ function handleSystemPrompt(system) {
903
+ if (!system) return [];
904
+ if (typeof system === "string") return [{
905
+ role: "system",
906
+ content: system
907
+ }];
908
+ else return [{
909
+ role: "system",
910
+ content: system.map((block) => block.text).join("\n\n")
911
+ }];
912
+ }
913
+ function handleUserMessage(message) {
914
+ const newMessages = [];
915
+ if (Array.isArray(message.content)) {
916
+ const toolResultBlocks = message.content.filter((block) => block.type === "tool_result");
917
+ const otherBlocks = message.content.filter((block) => block.type !== "tool_result");
918
+ for (const block of toolResultBlocks) newMessages.push({
919
+ role: "tool",
920
+ tool_call_id: block.tool_use_id,
921
+ content: mapContent(block.content)
922
+ });
923
+ if (otherBlocks.length > 0) newMessages.push({
924
+ role: "user",
925
+ content: mapContent(otherBlocks)
926
+ });
927
+ } else newMessages.push({
928
+ role: "user",
929
+ content: mapContent(message.content)
930
+ });
931
+ return newMessages;
932
+ }
933
+ function handleAssistantMessage(message) {
934
+ if (!Array.isArray(message.content)) return [{
935
+ role: "assistant",
936
+ content: mapContent(message.content)
937
+ }];
938
+ const toolUseBlocks = message.content.filter((block) => block.type === "tool_use");
939
+ const textBlocks = message.content.filter((block) => block.type === "text");
940
+ const thinkingBlocks = message.content.filter((block) => block.type === "thinking");
941
+ const allTextContent = [...textBlocks.map((b) => b.text), ...thinkingBlocks.map((b) => b.thinking)].join("\n\n");
942
+ return toolUseBlocks.length > 0 ? [{
943
+ role: "assistant",
944
+ content: allTextContent || null,
945
+ tool_calls: toolUseBlocks.map((toolUse) => ({
946
+ id: toolUse.id,
947
+ type: "function",
948
+ function: {
949
+ name: toolUse.name,
950
+ arguments: JSON.stringify(toolUse.input)
951
+ }
952
+ }))
953
+ }] : [{
954
+ role: "assistant",
955
+ content: mapContent(message.content)
956
+ }];
957
+ }
958
+ function mapContent(content) {
959
+ if (typeof content === "string") return content;
960
+ if (!Array.isArray(content)) return null;
961
+ if (!content.some((block) => block.type === "image")) return content.filter((block) => block.type === "text" || block.type === "thinking").map((block) => block.type === "text" ? block.text : block.thinking).join("\n\n");
962
+ const contentParts = [];
963
+ for (const block of content) switch (block.type) {
964
+ case "text":
965
+ contentParts.push({
966
+ type: "text",
967
+ text: block.text
968
+ });
969
+ break;
970
+ case "thinking":
971
+ contentParts.push({
972
+ type: "text",
973
+ text: block.thinking
974
+ });
975
+ break;
976
+ case "image":
977
+ contentParts.push({
978
+ type: "image_url",
979
+ image_url: { url: `data:${block.source.media_type};base64,${block.source.data}` }
980
+ });
981
+ break;
982
+ }
983
+ return contentParts;
984
+ }
985
+ function translateAnthropicToolsToOpenAI(anthropicTools) {
986
+ if (!anthropicTools) return;
987
+ return anthropicTools.map((tool) => ({
988
+ type: "function",
989
+ function: {
990
+ name: tool.name,
991
+ description: tool.description,
992
+ parameters: tool.input_schema
993
+ }
994
+ }));
995
+ }
996
+ function translateAnthropicToolChoiceToOpenAI(anthropicToolChoice) {
997
+ if (!anthropicToolChoice) return;
998
+ switch (anthropicToolChoice.type) {
999
+ case "auto": return "auto";
1000
+ case "any": return "required";
1001
+ case "tool":
1002
+ if (anthropicToolChoice.name) return {
1003
+ type: "function",
1004
+ function: { name: anthropicToolChoice.name }
1005
+ };
1006
+ return;
1007
+ case "none": return "none";
1008
+ default: return;
1009
+ }
1010
+ }
1011
+ function translateToAnthropic(response) {
1012
+ const allTextBlocks = [];
1013
+ const allToolUseBlocks = [];
1014
+ let stopReason = null;
1015
+ stopReason = response.choices[0]?.finish_reason ?? stopReason;
1016
+ for (const choice of response.choices) {
1017
+ const textBlocks = getAnthropicTextBlocks(choice.message.content);
1018
+ const toolUseBlocks = getAnthropicToolUseBlocks(choice.message.tool_calls);
1019
+ allTextBlocks.push(...textBlocks);
1020
+ allToolUseBlocks.push(...toolUseBlocks);
1021
+ if (choice.finish_reason === "tool_calls" || stopReason === "stop") stopReason = choice.finish_reason;
1022
+ }
1023
+ return {
1024
+ id: response.id,
1025
+ type: "message",
1026
+ role: "assistant",
1027
+ model: response.model,
1028
+ content: [...allTextBlocks, ...allToolUseBlocks],
1029
+ stop_reason: mapOpenAIStopReasonToAnthropic(stopReason),
1030
+ stop_sequence: null,
1031
+ usage: {
1032
+ input_tokens: (response.usage?.prompt_tokens ?? 0) - (response.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1033
+ output_tokens: response.usage?.completion_tokens ?? 0,
1034
+ ...response.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: response.usage.prompt_tokens_details.cached_tokens }
1035
+ }
1036
+ };
1037
+ }
1038
+ function getAnthropicTextBlocks(messageContent) {
1039
+ if (typeof messageContent === "string") return [{
1040
+ type: "text",
1041
+ text: messageContent
1042
+ }];
1043
+ if (Array.isArray(messageContent)) return messageContent.filter((part) => part.type === "text").map((part) => ({
1044
+ type: "text",
1045
+ text: part.text
1046
+ }));
1047
+ return [];
1048
+ }
1049
+ function getAnthropicToolUseBlocks(toolCalls) {
1050
+ if (!toolCalls) return [];
1051
+ return toolCalls.map((toolCall) => ({
1052
+ type: "tool_use",
1053
+ id: toolCall.id,
1054
+ name: toolCall.function.name,
1055
+ input: JSON.parse(toolCall.function.arguments)
1056
+ }));
1057
+ }
1058
+
1059
+ //#endregion
1060
+ //#region src/routes/messages/count-tokens-handler.ts
1061
+ /**
1062
+ * Handles token counting for Anthropic messages
1063
+ */
1064
+ async function handleCountTokens(c) {
1065
+ try {
1066
+ const anthropicBeta = c.req.header("anthropic-beta");
1067
+ const anthropicPayload = await c.req.json();
1068
+ const openAIPayload = translateToOpenAI(anthropicPayload);
1069
+ const selectedModel = state.models?.data.find((model) => model.id === anthropicPayload.model);
1070
+ if (!selectedModel) {
1071
+ consola.warn("Model not found, returning default token count");
1072
+ return c.json({ input_tokens: 1 });
1073
+ }
1074
+ const tokenCount = await getTokenCount(openAIPayload, selectedModel);
1075
+ if (anthropicPayload.tools && anthropicPayload.tools.length > 0) {
1076
+ let mcpToolExist = false;
1077
+ if (anthropicBeta?.startsWith("claude-code")) mcpToolExist = anthropicPayload.tools.some((tool) => tool.name.startsWith("mcp__"));
1078
+ if (!mcpToolExist) {
1079
+ if (anthropicPayload.model.startsWith("claude")) tokenCount.input = tokenCount.input + 346;
1080
+ else if (anthropicPayload.model.startsWith("grok")) tokenCount.input = tokenCount.input + 480;
1081
+ }
1082
+ }
1083
+ let finalTokenCount = tokenCount.input + tokenCount.output;
1084
+ if (anthropicPayload.model.startsWith("claude")) finalTokenCount = Math.round(finalTokenCount * 1.15);
1085
+ else if (anthropicPayload.model.startsWith("grok")) finalTokenCount = Math.round(finalTokenCount * 1.03);
1086
+ consola.info("Token count:", finalTokenCount);
1087
+ return c.json({ input_tokens: finalTokenCount });
1088
+ } catch (error) {
1089
+ consola.error("Error counting tokens:", error);
1090
+ return c.json({ input_tokens: 1 });
1091
+ }
1092
+ }
1093
+
1094
+ //#endregion
1095
+ //#region src/routes/messages/stream-translation.ts
1096
+ function isToolBlockOpen(state$1) {
1097
+ if (!state$1.contentBlockOpen) return false;
1098
+ return Object.values(state$1.toolCalls).some((tc) => tc.anthropicBlockIndex === state$1.contentBlockIndex);
1099
+ }
1100
+ function translateChunkToAnthropicEvents(chunk, state$1) {
1101
+ const events$1 = [];
1102
+ if (chunk.choices.length === 0) return events$1;
1103
+ const choice = chunk.choices[0];
1104
+ const { delta } = choice;
1105
+ if (!state$1.messageStartSent) {
1106
+ events$1.push({
1107
+ type: "message_start",
1108
+ message: {
1109
+ id: chunk.id,
1110
+ type: "message",
1111
+ role: "assistant",
1112
+ content: [],
1113
+ model: chunk.model,
1114
+ stop_reason: null,
1115
+ stop_sequence: null,
1116
+ usage: {
1117
+ input_tokens: (chunk.usage?.prompt_tokens ?? 0) - (chunk.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1118
+ output_tokens: 0,
1119
+ ...chunk.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: chunk.usage.prompt_tokens_details.cached_tokens }
1120
+ }
1121
+ }
1122
+ });
1123
+ state$1.messageStartSent = true;
1124
+ }
1125
+ if (delta.content) {
1126
+ if (isToolBlockOpen(state$1)) {
1127
+ events$1.push({
1128
+ type: "content_block_stop",
1129
+ index: state$1.contentBlockIndex
1130
+ });
1131
+ state$1.contentBlockIndex++;
1132
+ state$1.contentBlockOpen = false;
1133
+ }
1134
+ if (!state$1.contentBlockOpen) {
1135
+ events$1.push({
1136
+ type: "content_block_start",
1137
+ index: state$1.contentBlockIndex,
1138
+ content_block: {
1139
+ type: "text",
1140
+ text: ""
1141
+ }
1142
+ });
1143
+ state$1.contentBlockOpen = true;
1144
+ }
1145
+ events$1.push({
1146
+ type: "content_block_delta",
1147
+ index: state$1.contentBlockIndex,
1148
+ delta: {
1149
+ type: "text_delta",
1150
+ text: delta.content
1151
+ }
1152
+ });
1153
+ }
1154
+ if (delta.tool_calls) for (const toolCall of delta.tool_calls) {
1155
+ if (toolCall.id && toolCall.function?.name) {
1156
+ if (state$1.contentBlockOpen) {
1157
+ events$1.push({
1158
+ type: "content_block_stop",
1159
+ index: state$1.contentBlockIndex
1160
+ });
1161
+ state$1.contentBlockIndex++;
1162
+ state$1.contentBlockOpen = false;
1163
+ }
1164
+ const anthropicBlockIndex = state$1.contentBlockIndex;
1165
+ state$1.toolCalls[toolCall.index] = {
1166
+ id: toolCall.id,
1167
+ name: toolCall.function.name,
1168
+ anthropicBlockIndex
1169
+ };
1170
+ events$1.push({
1171
+ type: "content_block_start",
1172
+ index: anthropicBlockIndex,
1173
+ content_block: {
1174
+ type: "tool_use",
1175
+ id: toolCall.id,
1176
+ name: toolCall.function.name,
1177
+ input: {}
1178
+ }
1179
+ });
1180
+ state$1.contentBlockOpen = true;
1181
+ }
1182
+ if (toolCall.function?.arguments) {
1183
+ const toolCallInfo = state$1.toolCalls[toolCall.index];
1184
+ if (toolCallInfo) events$1.push({
1185
+ type: "content_block_delta",
1186
+ index: toolCallInfo.anthropicBlockIndex,
1187
+ delta: {
1188
+ type: "input_json_delta",
1189
+ partial_json: toolCall.function.arguments
1190
+ }
1191
+ });
1192
+ }
1193
+ }
1194
+ if (choice.finish_reason) {
1195
+ if (state$1.contentBlockOpen) {
1196
+ events$1.push({
1197
+ type: "content_block_stop",
1198
+ index: state$1.contentBlockIndex
1199
+ });
1200
+ state$1.contentBlockOpen = false;
1201
+ }
1202
+ events$1.push({
1203
+ type: "message_delta",
1204
+ delta: {
1205
+ stop_reason: mapOpenAIStopReasonToAnthropic(choice.finish_reason),
1206
+ stop_sequence: null
1207
+ },
1208
+ usage: {
1209
+ input_tokens: (chunk.usage?.prompt_tokens ?? 0) - (chunk.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1210
+ output_tokens: chunk.usage?.completion_tokens ?? 0,
1211
+ ...chunk.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: chunk.usage.prompt_tokens_details.cached_tokens }
1212
+ }
1213
+ }, { type: "message_stop" });
1214
+ }
1215
+ return events$1;
1216
+ }
1217
+
1218
+ //#endregion
1219
+ //#region src/routes/messages/handler.ts
1220
+ async function handleCompletion(c) {
1221
+ await checkRateLimit(state);
1222
+ const anthropicPayload = await c.req.json();
1223
+ consola.debug("Anthropic request payload:", JSON.stringify(anthropicPayload));
1224
+ const openAIPayload = translateToOpenAI(anthropicPayload);
1225
+ consola.debug("Translated OpenAI request payload:", JSON.stringify(openAIPayload));
1226
+ if (state.manualApprove) await awaitApproval();
1227
+ const response = await createChatCompletions(openAIPayload);
1228
+ if (isNonStreaming(response)) {
1229
+ consola.debug("Non-streaming response from Copilot:", JSON.stringify(response).slice(-400));
1230
+ const anthropicResponse = translateToAnthropic(response);
1231
+ consola.debug("Translated Anthropic response:", JSON.stringify(anthropicResponse));
1232
+ return c.json(anthropicResponse);
1233
+ }
1234
+ consola.debug("Streaming response from Copilot");
1235
+ return streamSSE(c, async (stream) => {
1236
+ const streamState = {
1237
+ messageStartSent: false,
1238
+ contentBlockIndex: 0,
1239
+ contentBlockOpen: false,
1240
+ toolCalls: {}
1241
+ };
1242
+ for await (const rawEvent of response) {
1243
+ consola.debug("Copilot raw stream event:", JSON.stringify(rawEvent));
1244
+ if (rawEvent.data === "[DONE]") break;
1245
+ if (!rawEvent.data) continue;
1246
+ const chunk = JSON.parse(rawEvent.data);
1247
+ const events$1 = translateChunkToAnthropicEvents(chunk, streamState);
1248
+ for (const event of events$1) {
1249
+ consola.debug("Translated Anthropic event:", JSON.stringify(event));
1250
+ await stream.writeSSE({
1251
+ event: event.type,
1252
+ data: JSON.stringify(event)
1253
+ });
1254
+ }
1255
+ }
1256
+ });
1257
+ }
1258
+ const isNonStreaming = (response) => Object.hasOwn(response, "choices");
1259
+
1260
+ //#endregion
1261
+ //#region src/routes/messages/route.ts
1262
+ const messageRoutes = new Hono();
1263
+ messageRoutes.post("/", async (c) => {
1264
+ try {
1265
+ return await handleCompletion(c);
1266
+ } catch (error) {
1267
+ return await forwardError(c, error);
1268
+ }
1269
+ });
1270
+ messageRoutes.post("/count_tokens", async (c) => {
1271
+ try {
1272
+ return await handleCountTokens(c);
1273
+ } catch (error) {
1274
+ return await forwardError(c, error);
1275
+ }
1276
+ });
1277
+
1278
+ //#endregion
1279
+ //#region src/routes/models/route.ts
1280
+ const modelRoutes = new Hono();
1281
+ modelRoutes.get("/", async (c) => {
1282
+ try {
1283
+ if (!state.models) await cacheModels();
1284
+ const models = state.models?.data.map((model) => ({
1285
+ id: model.id,
1286
+ object: "model",
1287
+ type: "model",
1288
+ created: 0,
1289
+ created_at: (/* @__PURE__ */ new Date(0)).toISOString(),
1290
+ owned_by: model.vendor,
1291
+ display_name: model.name
1292
+ }));
1293
+ return c.json({
1294
+ object: "list",
1295
+ data: models,
1296
+ has_more: false
1297
+ });
1298
+ } catch (error) {
1299
+ return await forwardError(c, error);
1300
+ }
1301
+ });
1302
+
1303
+ //#endregion
1304
+ //#region src/routes/token/route.ts
1305
+ const tokenRoute = new Hono();
1306
+ tokenRoute.get("/", (c) => {
1307
+ try {
1308
+ return c.json({ token: state.copilotToken });
1309
+ } catch (error) {
1310
+ console.error("Error fetching token:", error);
1311
+ return c.json({
1312
+ error: "Failed to fetch token",
1313
+ token: null
1314
+ }, 500);
1315
+ }
1316
+ });
1317
+
1318
+ //#endregion
1319
+ //#region src/routes/usage/route.ts
1320
+ const usageRoute = new Hono();
1321
+ usageRoute.get("/", async (c) => {
1322
+ try {
1323
+ const usage = await getCopilotUsage();
1324
+ return c.json(usage);
1325
+ } catch (error) {
1326
+ console.error("Error fetching Copilot usage:", error);
1327
+ return c.json({ error: "Failed to fetch Copilot usage" }, 500);
1328
+ }
1329
+ });
1330
+
1331
+ //#endregion
1332
+ //#region src/server.ts
1333
+ const server = new Hono();
1334
+ server.use(logger());
1335
+ server.use(cors());
1336
+ server.get("/", (c) => c.text("Server running"));
1337
+ server.route("/chat/completions", completionRoutes);
1338
+ server.route("/models", modelRoutes);
1339
+ server.route("/embeddings", embeddingRoutes);
1340
+ server.route("/usage", usageRoute);
1341
+ server.route("/token", tokenRoute);
1342
+ server.route("/v1/chat/completions", completionRoutes);
1343
+ server.route("/v1/models", modelRoutes);
1344
+ server.route("/v1/embeddings", embeddingRoutes);
1345
+ server.route("/v1/messages", messageRoutes);
1346
+
1347
+ //#endregion
1348
+ //#region src/start.ts
1349
+ async function runServer(options) {
1350
+ if (options.proxyEnv) initProxyFromEnv();
1351
+ if (options.verbose) {
1352
+ consola.level = 5;
1353
+ consola.info("Verbose logging enabled");
1354
+ }
1355
+ state.accountType = options.accountType;
1356
+ if (options.accountType !== "individual") consola.info(`Using ${options.accountType} plan GitHub account`);
1357
+ state.manualApprove = options.manual;
1358
+ state.rateLimitSeconds = options.rateLimit;
1359
+ state.rateLimitWait = options.rateLimitWait;
1360
+ state.showToken = options.showToken;
1361
+ await ensurePaths();
1362
+ await cacheVSCodeVersion();
1363
+ if (options.githubToken) {
1364
+ state.githubToken = options.githubToken;
1365
+ consola.info("Using provided GitHub token");
1366
+ } else await setupGitHubToken();
1367
+ await setupCopilotToken();
1368
+ await cacheModels();
1369
+ consola.info(`Available models: \n${state.models?.data.map((model) => `- ${model.id}`).join("\n")}`);
1370
+ const serverUrl = `http://localhost:${options.port}`;
1371
+ if (options.claudeCode) {
1372
+ invariant(state.models, "Models should be loaded by now");
1373
+ const selectedModel = await consola.prompt("Select a model to use with Claude Code", {
1374
+ type: "select",
1375
+ options: state.models.data.map((model) => model.id)
1376
+ });
1377
+ const selectedSmallModel = await consola.prompt("Select a small model to use with Claude Code", {
1378
+ type: "select",
1379
+ options: state.models.data.map((model) => model.id)
1380
+ });
1381
+ const command = generateEnvScript({
1382
+ ANTHROPIC_BASE_URL: serverUrl,
1383
+ ANTHROPIC_AUTH_TOKEN: "dummy",
1384
+ ANTHROPIC_MODEL: selectedModel,
1385
+ ANTHROPIC_DEFAULT_SONNET_MODEL: selectedModel,
1386
+ ANTHROPIC_SMALL_FAST_MODEL: selectedSmallModel,
1387
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: selectedSmallModel,
1388
+ DISABLE_NON_ESSENTIAL_MODEL_CALLS: "1",
1389
+ CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: "1"
1390
+ }, "claude");
1391
+ try {
1392
+ clipboard.writeSync(command);
1393
+ consola.success("Copied Claude Code command to clipboard!");
1394
+ } catch {
1395
+ consola.warn("Failed to copy to clipboard. Here is the Claude Code command:");
1396
+ consola.log(command);
1397
+ }
1398
+ }
1399
+ consola.box(`🌐 Usage Viewer: https://ericc-ch.github.io/copilot-api?endpoint=${serverUrl}/usage`);
1400
+ serve({
1401
+ fetch: server.fetch,
1402
+ port: options.port
1403
+ });
1404
+ }
1405
+ const start = defineCommand({
1406
+ meta: {
1407
+ name: "start",
1408
+ description: "Start the Copilot API server"
1409
+ },
1410
+ args: {
1411
+ port: {
1412
+ alias: "p",
1413
+ type: "string",
1414
+ default: "4141",
1415
+ description: "Port to listen on"
1416
+ },
1417
+ verbose: {
1418
+ alias: "v",
1419
+ type: "boolean",
1420
+ default: false,
1421
+ description: "Enable verbose logging"
1422
+ },
1423
+ "account-type": {
1424
+ alias: "a",
1425
+ type: "string",
1426
+ default: "individual",
1427
+ description: "Account type to use (individual, business, enterprise)"
1428
+ },
1429
+ manual: {
1430
+ type: "boolean",
1431
+ default: false,
1432
+ description: "Enable manual request approval"
1433
+ },
1434
+ "rate-limit": {
1435
+ alias: "r",
1436
+ type: "string",
1437
+ description: "Rate limit in seconds between requests"
1438
+ },
1439
+ wait: {
1440
+ alias: "w",
1441
+ type: "boolean",
1442
+ default: false,
1443
+ description: "Wait instead of error when rate limit is hit. Has no effect if rate limit is not set"
1444
+ },
1445
+ "github-token": {
1446
+ alias: "g",
1447
+ type: "string",
1448
+ description: "Provide GitHub token directly (must be generated using the `auth` subcommand)"
1449
+ },
1450
+ "claude-code": {
1451
+ alias: "c",
1452
+ type: "boolean",
1453
+ default: false,
1454
+ description: "Generate a command to launch Claude Code with Copilot API config"
1455
+ },
1456
+ "show-token": {
1457
+ type: "boolean",
1458
+ default: false,
1459
+ description: "Show GitHub and Copilot tokens on fetch and refresh"
1460
+ },
1461
+ "proxy-env": {
1462
+ type: "boolean",
1463
+ default: false,
1464
+ description: "Initialize proxy from environment variables"
1465
+ }
1466
+ },
1467
+ run({ args }) {
1468
+ const rateLimitRaw = args["rate-limit"];
1469
+ const rateLimit = rateLimitRaw === void 0 ? void 0 : Number.parseInt(rateLimitRaw, 10);
1470
+ return runServer({
1471
+ port: Number.parseInt(args.port, 10),
1472
+ verbose: args.verbose,
1473
+ accountType: args["account-type"],
1474
+ manual: args.manual,
1475
+ rateLimit,
1476
+ rateLimitWait: args.wait,
1477
+ githubToken: args["github-token"],
1478
+ claudeCode: args["claude-code"],
1479
+ showToken: args["show-token"],
1480
+ proxyEnv: args["proxy-env"]
1481
+ });
1482
+ }
1483
+ });
1484
+
1485
+ //#endregion
1486
+ //#region src/main.ts
1487
+ const main = defineCommand({
1488
+ meta: {
1489
+ name: "copilot-api",
1490
+ description: "A wrapper around GitHub Copilot API to make it OpenAI compatible, making it usable for other tools."
1491
+ },
1492
+ subCommands: {
1493
+ auth,
1494
+ start,
1495
+ "check-usage": checkUsage,
1496
+ debug
1497
+ }
1498
+ });
1499
+ await runMain(main);
1500
+
1501
+ //#endregion
1502
+ export { };
1503
+ //# sourceMappingURL=main.js.map