@jer-y/copilot-proxy 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/main.js ADDED
@@ -0,0 +1,1712 @@
1
+ #!/usr/bin/env node
2
+ import { defineCommand, runMain } from "citty";
3
+ import consola from "consola";
4
+ import fs from "node:fs/promises";
5
+ import os from "node:os";
6
+ import path from "node:path";
7
+ import { randomUUID } from "node:crypto";
8
+ import process from "node:process";
9
+ import clipboard from "clipboardy";
10
+ import { serve } from "srvx";
11
+ import invariant from "tiny-invariant";
12
+ import { getProxyForUrl } from "proxy-from-env";
13
+ import { Agent, ProxyAgent, setGlobalDispatcher } from "undici";
14
+ import { execSync } from "node:child_process";
15
+ import { Hono } from "hono";
16
+ import { cors } from "hono/cors";
17
+ import { logger } from "hono/logger";
18
+ import { streamSSE } from "hono/streaming";
19
+ import { events } from "fetch-event-stream";
20
+
21
+ //#region src/lib/paths.ts
22
+ const APP_DIR = path.join(os.homedir(), ".local", "share", "copilot-proxy");
23
+ const GITHUB_TOKEN_PATH = path.join(APP_DIR, "github_token");
24
+ const PATHS = {
25
+ APP_DIR,
26
+ GITHUB_TOKEN_PATH
27
+ };
28
+ async function ensurePaths() {
29
+ await fs.mkdir(PATHS.APP_DIR, { recursive: true });
30
+ await ensureFile(PATHS.GITHUB_TOKEN_PATH);
31
+ }
32
+ async function ensureFile(filePath) {
33
+ try {
34
+ await fs.access(filePath, fs.constants.W_OK);
35
+ } catch {
36
+ await fs.writeFile(filePath, "");
37
+ await fs.chmod(filePath, 384);
38
+ }
39
+ }
40
+
41
+ //#endregion
42
+ //#region src/lib/state.ts
43
+ const state = {
44
+ accountType: "individual",
45
+ manualApprove: false,
46
+ rateLimitWait: false,
47
+ showToken: false
48
+ };
49
+
50
+ //#endregion
51
+ //#region src/lib/api-config.ts
52
+ function standardHeaders() {
53
+ return {
54
+ "content-type": "application/json",
55
+ "accept": "application/json"
56
+ };
57
+ }
58
+ const COPILOT_VERSION = "0.26.7";
59
+ const EDITOR_PLUGIN_VERSION = `copilot-chat/${COPILOT_VERSION}`;
60
+ const USER_AGENT = `GitHubCopilotChat/${COPILOT_VERSION}`;
61
+ const API_VERSION = "2025-05-01";
62
+ function copilotBaseUrl(state$1) {
63
+ return state$1.accountType === "individual" ? "https://api.githubcopilot.com" : `https://api.${state$1.accountType}.githubcopilot.com`;
64
+ }
65
+ function copilotHeaders(state$1, vision = false) {
66
+ const headers = {
67
+ "Authorization": `Bearer ${state$1.copilotToken}`,
68
+ "content-type": standardHeaders()["content-type"],
69
+ "copilot-integration-id": "vscode-chat",
70
+ "editor-version": `vscode/${state$1.vsCodeVersion}`,
71
+ "editor-plugin-version": EDITOR_PLUGIN_VERSION,
72
+ "user-agent": USER_AGENT,
73
+ "openai-intent": "conversation-agent",
74
+ "x-interaction-type": "conversation-agent",
75
+ "x-github-api-version": API_VERSION,
76
+ "x-request-id": randomUUID(),
77
+ "x-vscode-user-agent-library-version": "electron-fetch"
78
+ };
79
+ if (vision) headers["copilot-vision-request"] = "true";
80
+ return headers;
81
+ }
82
+ const GITHUB_API_BASE_URL = "https://api.github.com";
83
+ function githubHeaders(state$1) {
84
+ return {
85
+ ...standardHeaders(),
86
+ "authorization": `token ${state$1.githubToken}`,
87
+ "editor-version": `vscode/${state$1.vsCodeVersion}`,
88
+ "editor-plugin-version": EDITOR_PLUGIN_VERSION,
89
+ "user-agent": USER_AGENT,
90
+ "x-github-api-version": API_VERSION,
91
+ "x-vscode-user-agent-library-version": "electron-fetch"
92
+ };
93
+ }
94
+ const GITHUB_BASE_URL = "https://github.com";
95
+ const GITHUB_CLIENT_ID = "Iv1.b507a08c87ecfe98";
96
+ const GITHUB_APP_SCOPES = ["read:user"].join(" ");
97
+
98
+ //#endregion
99
+ //#region src/lib/error.ts
100
+ var HTTPError = class extends Error {
101
+ response;
102
+ constructor(message, response) {
103
+ super(message);
104
+ this.response = response;
105
+ }
106
+ };
107
+ async function forwardError(c, error) {
108
+ consola.error("Error occurred:", error);
109
+ if (error instanceof HTTPError) {
110
+ const errorText = await error.response.text();
111
+ let errorJson;
112
+ try {
113
+ errorJson = JSON.parse(errorText);
114
+ } catch {
115
+ errorJson = errorText;
116
+ }
117
+ consola.error("HTTP error:", errorJson);
118
+ return c.json({ error: {
119
+ message: errorText,
120
+ type: "error"
121
+ } }, error.response.status);
122
+ }
123
+ return c.json({ error: {
124
+ message: error.message,
125
+ type: "error"
126
+ } }, 500);
127
+ }
128
+
129
+ //#endregion
130
+ //#region src/services/github/get-copilot-token.ts
131
+ async function getCopilotToken() {
132
+ const response = await fetch(`${GITHUB_API_BASE_URL}/copilot_internal/v2/token`, { headers: githubHeaders(state) });
133
+ if (!response.ok) throw new HTTPError("Failed to get Copilot token", response);
134
+ return await response.json();
135
+ }
136
+
137
+ //#endregion
138
+ //#region src/services/github/get-device-code.ts
139
+ async function getDeviceCode() {
140
+ const response = await fetch(`${GITHUB_BASE_URL}/login/device/code`, {
141
+ method: "POST",
142
+ headers: standardHeaders(),
143
+ body: JSON.stringify({
144
+ client_id: GITHUB_CLIENT_ID,
145
+ scope: GITHUB_APP_SCOPES
146
+ })
147
+ });
148
+ if (!response.ok) throw new HTTPError("Failed to get device code", response);
149
+ return await response.json();
150
+ }
151
+
152
+ //#endregion
153
+ //#region src/services/github/get-user.ts
154
+ async function getGitHubUser() {
155
+ const response = await fetch(`${GITHUB_API_BASE_URL}/user`, { headers: {
156
+ authorization: `token ${state.githubToken}`,
157
+ ...standardHeaders()
158
+ } });
159
+ if (!response.ok) throw new HTTPError("Failed to get GitHub user", response);
160
+ return await response.json();
161
+ }
162
+
163
+ //#endregion
164
+ //#region src/services/copilot/get-models.ts
165
+ async function getModels() {
166
+ const response = await fetch(`${copilotBaseUrl(state)}/models`, { headers: copilotHeaders(state) });
167
+ if (!response.ok) throw new HTTPError("Failed to get models", response);
168
+ return await response.json();
169
+ }
170
+
171
+ //#endregion
172
+ //#region src/services/get-vscode-version.ts
173
+ const FALLBACK = "1.104.3";
174
+ async function getVSCodeVersion() {
175
+ const controller = new AbortController();
176
+ const timeout = setTimeout(() => {
177
+ controller.abort();
178
+ }, 5e3);
179
+ try {
180
+ const match = (await (await fetch("https://aur.archlinux.org/cgit/aur.git/plain/PKGBUILD?h=visual-studio-code-bin", { signal: controller.signal })).text()).match(/pkgver=([0-9.]+)/);
181
+ if (match) return match[1];
182
+ return FALLBACK;
183
+ } catch {
184
+ return FALLBACK;
185
+ } finally {
186
+ clearTimeout(timeout);
187
+ }
188
+ }
189
+ await getVSCodeVersion();
190
+
191
+ //#endregion
192
+ //#region src/lib/utils.ts
193
+ function sleep(ms) {
194
+ return new Promise((resolve) => {
195
+ setTimeout(resolve, ms);
196
+ });
197
+ }
198
+ function isNullish(value) {
199
+ return value === null || value === void 0;
200
+ }
201
+ async function cacheModels() {
202
+ state.models = await getModels();
203
+ }
204
+ async function cacheVSCodeVersion() {
205
+ const response = await getVSCodeVersion();
206
+ state.vsCodeVersion = response;
207
+ consola.info(`Using VSCode version: ${response}`);
208
+ }
209
+
210
+ //#endregion
211
+ //#region src/services/github/poll-access-token.ts
212
+ async function pollAccessToken(deviceCode) {
213
+ const sleepDuration = (deviceCode.interval + 1) * 1e3;
214
+ consola.debug(`Polling access token with interval of ${sleepDuration}ms`);
215
+ while (true) {
216
+ const response = await fetch(`${GITHUB_BASE_URL}/login/oauth/access_token`, {
217
+ method: "POST",
218
+ headers: standardHeaders(),
219
+ body: JSON.stringify({
220
+ client_id: GITHUB_CLIENT_ID,
221
+ device_code: deviceCode.device_code,
222
+ grant_type: "urn:ietf:params:oauth:grant-type:device_code"
223
+ })
224
+ });
225
+ if (!response.ok) {
226
+ await sleep(sleepDuration);
227
+ consola.error("Failed to poll access token:", await response.text());
228
+ continue;
229
+ }
230
+ const json = await response.json();
231
+ consola.debug("Polling access token response:", json);
232
+ const { access_token } = json;
233
+ if (access_token) return access_token;
234
+ else await sleep(sleepDuration);
235
+ }
236
+ }
237
+
238
+ //#endregion
239
+ //#region src/lib/token.ts
240
+ const readGithubToken = () => fs.readFile(PATHS.GITHUB_TOKEN_PATH, "utf8");
241
+ function writeGithubToken(token) {
242
+ return fs.writeFile(PATHS.GITHUB_TOKEN_PATH, token);
243
+ }
244
+ async function setupCopilotToken() {
245
+ const { token, refresh_in } = await getCopilotToken();
246
+ state.copilotToken = token;
247
+ consola.debug("GitHub Copilot Token fetched successfully!");
248
+ if (state.showToken) consola.info("Copilot token:", token);
249
+ const refreshInterval = (refresh_in - 60) * 1e3;
250
+ setInterval(async () => {
251
+ consola.debug("Refreshing Copilot token");
252
+ try {
253
+ const { token: token$1 } = await getCopilotToken();
254
+ state.copilotToken = token$1;
255
+ consola.debug("Copilot token refreshed");
256
+ if (state.showToken) consola.info("Refreshed Copilot token:", token$1);
257
+ } catch (error) {
258
+ consola.error("Failed to refresh Copilot token:", error);
259
+ throw error;
260
+ }
261
+ }, refreshInterval);
262
+ }
263
+ async function setupGitHubToken(options) {
264
+ try {
265
+ const githubToken = await readGithubToken();
266
+ if (githubToken && !options?.force) {
267
+ state.githubToken = githubToken;
268
+ if (state.showToken) consola.info("GitHub token:", githubToken);
269
+ await logUser();
270
+ return;
271
+ }
272
+ consola.info("Not logged in, getting new access token");
273
+ const response = await getDeviceCode();
274
+ consola.debug("Device code response:", response);
275
+ consola.info(`Please enter the code "${response.user_code}" in ${response.verification_uri}`);
276
+ const token = await pollAccessToken(response);
277
+ await writeGithubToken(token);
278
+ state.githubToken = token;
279
+ if (state.showToken) consola.info("GitHub token:", token);
280
+ await logUser();
281
+ } catch (error) {
282
+ if (error instanceof HTTPError) {
283
+ consola.error("Failed to get GitHub token:", await error.response.json());
284
+ throw error;
285
+ }
286
+ consola.error("Failed to get GitHub token:", error);
287
+ throw error;
288
+ }
289
+ }
290
+ async function logUser() {
291
+ const user = await getGitHubUser();
292
+ consola.info(`Logged in as ${user.login}`);
293
+ }
294
+
295
+ //#endregion
296
+ //#region src/auth.ts
297
+ async function runAuth(options) {
298
+ if (options.verbose) {
299
+ consola.level = 5;
300
+ consola.info("Verbose logging enabled");
301
+ }
302
+ state.showToken = options.showToken;
303
+ await ensurePaths();
304
+ await setupGitHubToken({ force: true });
305
+ consola.success("GitHub token written to", PATHS.GITHUB_TOKEN_PATH);
306
+ }
307
+ const auth = defineCommand({
308
+ meta: {
309
+ name: "auth",
310
+ description: "Run GitHub auth flow without running the server"
311
+ },
312
+ args: {
313
+ "verbose": {
314
+ alias: "v",
315
+ type: "boolean",
316
+ default: false,
317
+ description: "Enable verbose logging"
318
+ },
319
+ "show-token": {
320
+ type: "boolean",
321
+ default: false,
322
+ description: "Show GitHub token on auth"
323
+ }
324
+ },
325
+ run({ args }) {
326
+ return runAuth({
327
+ verbose: args.verbose,
328
+ showToken: args["show-token"]
329
+ });
330
+ }
331
+ });
332
+
333
+ //#endregion
334
+ //#region src/services/github/get-copilot-usage.ts
335
+ async function getCopilotUsage() {
336
+ const response = await fetch(`${GITHUB_API_BASE_URL}/copilot_internal/user`, { headers: githubHeaders(state) });
337
+ if (!response.ok) throw new HTTPError("Failed to get Copilot usage", response);
338
+ return await response.json();
339
+ }
340
+
341
+ //#endregion
342
+ //#region src/check-usage.ts
343
+ const checkUsage = defineCommand({
344
+ meta: {
345
+ name: "check-usage",
346
+ description: "Show current GitHub Copilot usage/quota information"
347
+ },
348
+ async run() {
349
+ await ensurePaths();
350
+ await setupGitHubToken();
351
+ try {
352
+ const usage = await getCopilotUsage();
353
+ const premium = usage.quota_snapshots.premium_interactions;
354
+ const premiumTotal = premium.entitlement;
355
+ const premiumUsed = premiumTotal - premium.remaining;
356
+ const premiumPercentUsed = premiumTotal > 0 ? premiumUsed / premiumTotal * 100 : 0;
357
+ const premiumPercentRemaining = premium.percent_remaining;
358
+ function summarizeQuota(name, snap) {
359
+ if (!snap) return `${name}: N/A`;
360
+ const total = snap.entitlement;
361
+ const used = total - snap.remaining;
362
+ const percentUsed = total > 0 ? used / total * 100 : 0;
363
+ const percentRemaining = snap.percent_remaining;
364
+ return `${name}: ${used}/${total} used (${percentUsed.toFixed(1)}% used, ${percentRemaining.toFixed(1)}% remaining)`;
365
+ }
366
+ const premiumLine = `Premium: ${premiumUsed}/${premiumTotal} used (${premiumPercentUsed.toFixed(1)}% used, ${premiumPercentRemaining.toFixed(1)}% remaining)`;
367
+ const chatLine = summarizeQuota("Chat", usage.quota_snapshots.chat);
368
+ const completionsLine = summarizeQuota("Completions", usage.quota_snapshots.completions);
369
+ consola.box(`Copilot Usage (plan: ${usage.copilot_plan})\nQuota resets: ${usage.quota_reset_date}\n\nQuotas:\n ${premiumLine}\n ${chatLine}\n ${completionsLine}`);
370
+ } catch (err) {
371
+ consola.error("Failed to fetch Copilot usage:", err);
372
+ process.exit(1);
373
+ }
374
+ }
375
+ });
376
+
377
+ //#endregion
378
+ //#region src/debug.ts
379
+ async function getPackageVersion() {
380
+ try {
381
+ const packageJsonPath = new URL("../package.json", import.meta.url).pathname;
382
+ return JSON.parse(await fs.readFile(packageJsonPath)).version;
383
+ } catch {
384
+ return "unknown";
385
+ }
386
+ }
387
+ function getRuntimeInfo() {
388
+ const isBun = typeof Bun !== "undefined";
389
+ return {
390
+ name: isBun ? "bun" : "node",
391
+ version: isBun ? Bun.version : process.version.slice(1),
392
+ platform: os.platform(),
393
+ arch: os.arch()
394
+ };
395
+ }
396
+ async function checkTokenExists() {
397
+ try {
398
+ if (!(await fs.stat(PATHS.GITHUB_TOKEN_PATH)).isFile()) return false;
399
+ return (await fs.readFile(PATHS.GITHUB_TOKEN_PATH, "utf8")).trim().length > 0;
400
+ } catch {
401
+ return false;
402
+ }
403
+ }
404
+ async function getDebugInfo() {
405
+ const [version, tokenExists] = await Promise.all([getPackageVersion(), checkTokenExists()]);
406
+ return {
407
+ version,
408
+ runtime: getRuntimeInfo(),
409
+ paths: {
410
+ APP_DIR: PATHS.APP_DIR,
411
+ GITHUB_TOKEN_PATH: PATHS.GITHUB_TOKEN_PATH
412
+ },
413
+ tokenExists
414
+ };
415
+ }
416
+ function printDebugInfoPlain(info) {
417
+ consola.info(`copilot-proxy debug
418
+
419
+ Version: ${info.version}
420
+ Runtime: ${info.runtime.name} ${info.runtime.version} (${info.runtime.platform} ${info.runtime.arch})
421
+
422
+ Paths:
423
+ - APP_DIR: ${info.paths.APP_DIR}
424
+ - GITHUB_TOKEN_PATH: ${info.paths.GITHUB_TOKEN_PATH}
425
+
426
+ Token exists: ${info.tokenExists ? "Yes" : "No"}`);
427
+ }
428
+ function printDebugInfoJson(info) {
429
+ console.log(JSON.stringify(info, null, 2));
430
+ }
431
+ async function runDebug(options) {
432
+ const debugInfo = await getDebugInfo();
433
+ if (options.json) printDebugInfoJson(debugInfo);
434
+ else printDebugInfoPlain(debugInfo);
435
+ }
436
+ const debug = defineCommand({
437
+ meta: {
438
+ name: "debug",
439
+ description: "Print debug information about the application"
440
+ },
441
+ args: { json: {
442
+ type: "boolean",
443
+ default: false,
444
+ description: "Output debug information as JSON"
445
+ } },
446
+ run({ args }) {
447
+ return runDebug({ json: args.json });
448
+ }
449
+ });
450
+
451
+ //#endregion
452
+ //#region src/lib/proxy.ts
453
+ function initProxyFromEnv() {
454
+ if (typeof Bun !== "undefined") return;
455
+ try {
456
+ const direct = new Agent();
457
+ const proxies = /* @__PURE__ */ new Map();
458
+ setGlobalDispatcher({
459
+ dispatch(options, handler) {
460
+ try {
461
+ const origin = typeof options.origin === "string" ? new URL(options.origin) : options.origin;
462
+ const raw = getProxyForUrl(origin.toString());
463
+ const proxyUrl = raw && raw.length > 0 ? raw : void 0;
464
+ if (!proxyUrl) {
465
+ consola.debug(`HTTP proxy bypass: ${origin.hostname}`);
466
+ return direct.dispatch(options, handler);
467
+ }
468
+ let agent = proxies.get(proxyUrl);
469
+ if (!agent) {
470
+ agent = new ProxyAgent(proxyUrl);
471
+ proxies.set(proxyUrl, agent);
472
+ }
473
+ let label = proxyUrl;
474
+ try {
475
+ const u = new URL(proxyUrl);
476
+ label = `${u.protocol}//${u.host}`;
477
+ } catch {}
478
+ consola.debug(`HTTP proxy route: ${origin.hostname} via ${label}`);
479
+ return agent.dispatch(options, handler);
480
+ } catch {
481
+ return direct.dispatch(options, handler);
482
+ }
483
+ },
484
+ close() {
485
+ return direct.close();
486
+ },
487
+ destroy() {
488
+ return direct.destroy();
489
+ }
490
+ });
491
+ consola.debug("HTTP proxy configured from environment (per-URL)");
492
+ } catch (err) {
493
+ consola.debug("Proxy setup skipped:", err);
494
+ }
495
+ }
496
+
497
+ //#endregion
498
+ //#region src/lib/shell.ts
499
+ function getShell() {
500
+ const { platform, ppid, env } = process;
501
+ if (platform === "win32") {
502
+ try {
503
+ const command = `wmic process get ParentProcessId,Name | findstr "${ppid}"`;
504
+ if (execSync(command, { stdio: "pipe" }).toString().toLowerCase().includes("powershell.exe")) return "powershell";
505
+ } catch {
506
+ return "cmd";
507
+ }
508
+ return "cmd";
509
+ } else {
510
+ const shellPath = env.SHELL;
511
+ if (shellPath) {
512
+ if (shellPath.endsWith("zsh")) return "zsh";
513
+ if (shellPath.endsWith("fish")) return "fish";
514
+ if (shellPath.endsWith("bash")) return "bash";
515
+ }
516
+ return "sh";
517
+ }
518
+ }
519
+ /**
520
+ * Generates a copy-pasteable script to set multiple environment variables
521
+ * and run a subsequent command.
522
+ * @param {EnvVars} envVars - An object of environment variables to set.
523
+ * @param {string} commandToRun - The command to run after setting the variables.
524
+ * @returns {string} The formatted script string.
525
+ */
526
+ function generateEnvScript(envVars, commandToRun = "") {
527
+ const shell = getShell();
528
+ const filteredEnvVars = Object.entries(envVars).filter(([, value]) => value !== void 0);
529
+ let commandBlock;
530
+ switch (shell) {
531
+ case "powershell":
532
+ commandBlock = filteredEnvVars.map(([key, value]) => `$env:${key} = ${value}`).join("; ");
533
+ break;
534
+ case "cmd":
535
+ commandBlock = filteredEnvVars.map(([key, value]) => `set ${key}=${value}`).join(" & ");
536
+ break;
537
+ case "fish":
538
+ commandBlock = filteredEnvVars.map(([key, value]) => `set -gx ${key} ${value}`).join("; ");
539
+ break;
540
+ default: {
541
+ const assignments = filteredEnvVars.map(([key, value]) => `${key}=${value}`).join(" ");
542
+ commandBlock = filteredEnvVars.length > 0 ? `export ${assignments}` : "";
543
+ break;
544
+ }
545
+ }
546
+ if (commandBlock && commandToRun) return `${commandBlock}${shell === "cmd" ? " & " : " && "}${commandToRun}`;
547
+ return commandBlock || commandToRun;
548
+ }
549
+
550
+ //#endregion
551
+ //#region src/lib/approval.ts
552
+ async function awaitApproval() {
553
+ if (!await consola.prompt(`Accept incoming request?`, { type: "confirm" })) throw new HTTPError("Request rejected", Response.json({ message: "Request rejected" }, { status: 403 }));
554
+ }
555
+
556
+ //#endregion
557
+ //#region src/lib/rate-limit.ts
558
+ async function checkRateLimit(state$1) {
559
+ if (state$1.rateLimitSeconds === void 0) return;
560
+ const now = Date.now();
561
+ if (!state$1.lastRequestTimestamp) {
562
+ state$1.lastRequestTimestamp = now;
563
+ return;
564
+ }
565
+ const elapsedSeconds = (now - state$1.lastRequestTimestamp) / 1e3;
566
+ if (elapsedSeconds > state$1.rateLimitSeconds) {
567
+ state$1.lastRequestTimestamp = now;
568
+ return;
569
+ }
570
+ const waitTimeSeconds = Math.ceil(state$1.rateLimitSeconds - elapsedSeconds);
571
+ if (!state$1.rateLimitWait) {
572
+ consola.warn(`Rate limit exceeded. Need to wait ${waitTimeSeconds} more seconds.`);
573
+ throw new HTTPError("Rate limit exceeded", Response.json({ message: "Rate limit exceeded" }, { status: 429 }));
574
+ }
575
+ const waitTimeMs = waitTimeSeconds * 1e3;
576
+ consola.warn(`Rate limit reached. Waiting ${waitTimeSeconds} seconds before proceeding...`);
577
+ await sleep(waitTimeMs);
578
+ state$1.lastRequestTimestamp = now;
579
+ consola.info("Rate limit wait completed, proceeding with request");
580
+ }
581
+
582
+ //#endregion
583
+ //#region src/lib/tokenizer.ts
584
+ const ENCODING_MAP = {
585
+ o200k_base: () => import("gpt-tokenizer/encoding/o200k_base"),
586
+ cl100k_base: () => import("gpt-tokenizer/encoding/cl100k_base"),
587
+ p50k_base: () => import("gpt-tokenizer/encoding/p50k_base"),
588
+ p50k_edit: () => import("gpt-tokenizer/encoding/p50k_edit"),
589
+ r50k_base: () => import("gpt-tokenizer/encoding/r50k_base")
590
+ };
591
+ const encodingCache = /* @__PURE__ */ new Map();
592
+ /**
593
+ * Calculate tokens for tool calls
594
+ */
595
+ function calculateToolCallsTokens(toolCalls, encoder, constants) {
596
+ let tokens = 0;
597
+ for (const toolCall of toolCalls) {
598
+ tokens += constants.funcInit;
599
+ tokens += encoder.encode(JSON.stringify(toolCall)).length;
600
+ }
601
+ tokens += constants.funcEnd;
602
+ return tokens;
603
+ }
604
+ /**
605
+ * Calculate tokens for content parts
606
+ */
607
+ function calculateContentPartsTokens(contentParts, encoder) {
608
+ let tokens = 0;
609
+ for (const part of contentParts) if (part.type === "image_url") tokens += encoder.encode(part.image_url.url).length + 85;
610
+ else if (part.text) tokens += encoder.encode(part.text).length;
611
+ return tokens;
612
+ }
613
+ /**
614
+ * Calculate tokens for a single message
615
+ */
616
+ function calculateMessageTokens(message, encoder, constants) {
617
+ const tokensPerMessage = 3;
618
+ const tokensPerName = 1;
619
+ let tokens = tokensPerMessage;
620
+ for (const [key, value] of Object.entries(message)) {
621
+ if (typeof value === "string") tokens += encoder.encode(value).length;
622
+ if (key === "name") tokens += tokensPerName;
623
+ if (key === "tool_calls") tokens += calculateToolCallsTokens(value, encoder, constants);
624
+ if (key === "content" && Array.isArray(value)) tokens += calculateContentPartsTokens(value, encoder);
625
+ }
626
+ return tokens;
627
+ }
628
+ /**
629
+ * Calculate tokens using custom algorithm
630
+ */
631
+ function calculateTokens(messages, encoder, constants) {
632
+ if (messages.length === 0) return 0;
633
+ let numTokens = 0;
634
+ for (const message of messages) numTokens += calculateMessageTokens(message, encoder, constants);
635
+ numTokens += 3;
636
+ return numTokens;
637
+ }
638
+ /**
639
+ * Get the corresponding encoder module based on encoding type
640
+ */
641
+ async function getEncodeChatFunction(encoding) {
642
+ if (encodingCache.has(encoding)) {
643
+ const cached = encodingCache.get(encoding);
644
+ if (cached) return cached;
645
+ }
646
+ const supportedEncoding = encoding;
647
+ if (!(supportedEncoding in ENCODING_MAP)) {
648
+ const fallbackModule = await ENCODING_MAP.o200k_base();
649
+ encodingCache.set(encoding, fallbackModule);
650
+ return fallbackModule;
651
+ }
652
+ const encodingModule = await ENCODING_MAP[supportedEncoding]();
653
+ encodingCache.set(encoding, encodingModule);
654
+ return encodingModule;
655
+ }
656
+ /**
657
+ * Get tokenizer type from model information
658
+ */
659
+ function getTokenizerFromModel(model) {
660
+ return model.capabilities.tokenizer || "o200k_base";
661
+ }
662
+ /**
663
+ * Get model-specific constants for token calculation
664
+ */
665
+ function getModelConstants(model) {
666
+ return model.id === "gpt-3.5-turbo" || model.id === "gpt-4" ? {
667
+ funcInit: 10,
668
+ propInit: 3,
669
+ propKey: 3,
670
+ enumInit: -3,
671
+ enumItem: 3,
672
+ funcEnd: 12
673
+ } : {
674
+ funcInit: 7,
675
+ propInit: 3,
676
+ propKey: 3,
677
+ enumInit: -3,
678
+ enumItem: 3,
679
+ funcEnd: 12
680
+ };
681
+ }
682
+ /**
683
+ * Calculate tokens for a single parameter
684
+ */
685
+ function calculateParameterTokens(key, prop, context) {
686
+ const { encoder, constants } = context;
687
+ let tokens = constants.propKey;
688
+ if (typeof prop !== "object" || prop === null) return tokens;
689
+ const param = prop;
690
+ const paramName = key;
691
+ const paramType = param.type || "string";
692
+ let paramDesc = param.description || "";
693
+ if (param.enum && Array.isArray(param.enum)) {
694
+ tokens += constants.enumInit;
695
+ for (const item of param.enum) {
696
+ tokens += constants.enumItem;
697
+ tokens += encoder.encode(String(item)).length;
698
+ }
699
+ }
700
+ if (paramDesc.endsWith(".")) paramDesc = paramDesc.slice(0, -1);
701
+ const line = `${paramName}:${paramType}:${paramDesc}`;
702
+ tokens += encoder.encode(line).length;
703
+ const excludedKeys = new Set([
704
+ "type",
705
+ "description",
706
+ "enum"
707
+ ]);
708
+ for (const propertyName of Object.keys(param)) if (!excludedKeys.has(propertyName)) {
709
+ const propertyValue = param[propertyName];
710
+ const propertyText = typeof propertyValue === "string" ? propertyValue : JSON.stringify(propertyValue);
711
+ tokens += encoder.encode(`${propertyName}:${propertyText}`).length;
712
+ }
713
+ return tokens;
714
+ }
715
+ /**
716
+ * Calculate tokens for function parameters
717
+ */
718
+ function calculateParametersTokens(parameters, encoder, constants) {
719
+ if (!parameters || typeof parameters !== "object") return 0;
720
+ const params = parameters;
721
+ let tokens = 0;
722
+ for (const [key, value] of Object.entries(params)) if (key === "properties") {
723
+ const properties = value;
724
+ if (Object.keys(properties).length > 0) {
725
+ tokens += constants.propInit;
726
+ for (const propKey of Object.keys(properties)) tokens += calculateParameterTokens(propKey, properties[propKey], {
727
+ encoder,
728
+ constants
729
+ });
730
+ }
731
+ } else {
732
+ const paramText = typeof value === "string" ? value : JSON.stringify(value);
733
+ tokens += encoder.encode(`${key}:${paramText}`).length;
734
+ }
735
+ return tokens;
736
+ }
737
+ /**
738
+ * Calculate tokens for a single tool
739
+ */
740
+ function calculateToolTokens(tool, encoder, constants) {
741
+ let tokens = constants.funcInit;
742
+ const func = tool.function;
743
+ const fName = func.name;
744
+ let fDesc = func.description || "";
745
+ if (fDesc.endsWith(".")) fDesc = fDesc.slice(0, -1);
746
+ const line = `${fName}:${fDesc}`;
747
+ tokens += encoder.encode(line).length;
748
+ if (typeof func.parameters === "object" && func.parameters !== null) tokens += calculateParametersTokens(func.parameters, encoder, constants);
749
+ return tokens;
750
+ }
751
+ /**
752
+ * Calculate token count for tools based on model
753
+ */
754
+ function numTokensForTools(tools, encoder, constants) {
755
+ let funcTokenCount = 0;
756
+ for (const tool of tools) funcTokenCount += calculateToolTokens(tool, encoder, constants);
757
+ funcTokenCount += constants.funcEnd;
758
+ return funcTokenCount;
759
+ }
760
+ /**
761
+ * Calculate the token count of messages, supporting multiple GPT encoders
762
+ */
763
+ async function getTokenCount(payload, model) {
764
+ const tokenizer = getTokenizerFromModel(model);
765
+ const encoder = await getEncodeChatFunction(tokenizer);
766
+ const simplifiedMessages = payload.messages;
767
+ const inputMessages = simplifiedMessages.filter((msg) => msg.role !== "assistant");
768
+ const outputMessages = simplifiedMessages.filter((msg) => msg.role === "assistant");
769
+ const constants = getModelConstants(model);
770
+ let inputTokens = calculateTokens(inputMessages, encoder, constants);
771
+ if (payload.tools && payload.tools.length > 0) inputTokens += numTokensForTools(payload.tools, encoder, constants);
772
+ const outputTokens = calculateTokens(outputMessages, encoder, constants);
773
+ return {
774
+ input: inputTokens,
775
+ output: outputTokens
776
+ };
777
+ }
778
+
779
+ //#endregion
780
+ //#region src/services/copilot/create-chat-completions.ts
781
+ async function createChatCompletions(payload) {
782
+ if (!state.copilotToken) throw new Error("Copilot token not found");
783
+ const enableVision = payload.messages.some((x) => typeof x.content !== "string" && x.content?.some((x$1) => x$1.type === "image_url"));
784
+ const isAgentCall = payload.messages.some((msg) => ["assistant", "tool"].includes(msg.role));
785
+ const headers = {
786
+ ...copilotHeaders(state, enableVision),
787
+ "X-Initiator": isAgentCall ? "agent" : "user"
788
+ };
789
+ const response = await fetch(`${copilotBaseUrl(state)}/chat/completions`, {
790
+ method: "POST",
791
+ headers,
792
+ body: JSON.stringify(payload)
793
+ });
794
+ if (!response.ok) {
795
+ consola.error("Failed to create chat completions", response);
796
+ throw new HTTPError("Failed to create chat completions", response);
797
+ }
798
+ if (payload.stream) return events(response);
799
+ return await response.json();
800
+ }
801
+
802
+ //#endregion
803
+ //#region src/routes/chat-completions/handler.ts
804
+ async function handleCompletion$1(c) {
805
+ await checkRateLimit(state);
806
+ let payload = await c.req.json();
807
+ consola.debug("Request payload:", JSON.stringify(payload).slice(-400));
808
+ const selectedModel = state.models?.data.find((model) => model.id === payload.model);
809
+ try {
810
+ if (selectedModel) {
811
+ const tokenCount = await getTokenCount(payload, selectedModel);
812
+ consola.info("Current token count:", tokenCount);
813
+ } else consola.warn("No model selected, skipping token count calculation");
814
+ } catch (error) {
815
+ consola.warn("Failed to calculate token count:", error);
816
+ }
817
+ if (state.manualApprove) await awaitApproval();
818
+ if (isNullish(payload.max_tokens)) {
819
+ payload = {
820
+ ...payload,
821
+ max_tokens: selectedModel?.capabilities.limits.max_output_tokens
822
+ };
823
+ consola.debug("Set max_tokens to:", JSON.stringify(payload.max_tokens));
824
+ }
825
+ const response = await createChatCompletions(payload);
826
+ if (isNonStreaming$2(response)) {
827
+ consola.debug("Non-streaming response:", JSON.stringify(response));
828
+ return c.json(response);
829
+ }
830
+ consola.debug("Streaming response");
831
+ return streamSSE(c, async (stream) => {
832
+ for await (const chunk of response) {
833
+ consola.debug("Streaming chunk:", JSON.stringify(chunk));
834
+ await stream.writeSSE(chunk);
835
+ }
836
+ });
837
+ }
838
+ function isNonStreaming$2(response) {
839
+ return Object.hasOwn(response, "choices");
840
+ }
841
+
842
+ //#endregion
843
+ //#region src/routes/chat-completions/route.ts
844
+ const completionRoutes = new Hono();
845
+ completionRoutes.post("/", async (c) => {
846
+ try {
847
+ return await handleCompletion$1(c);
848
+ } catch (error) {
849
+ return await forwardError(c, error);
850
+ }
851
+ });
852
+
853
+ //#endregion
854
+ //#region src/services/copilot/create-embeddings.ts
855
+ async function createEmbeddings(payload) {
856
+ if (!state.copilotToken) throw new Error("Copilot token not found");
857
+ const response = await fetch(`${copilotBaseUrl(state)}/embeddings`, {
858
+ method: "POST",
859
+ headers: copilotHeaders(state),
860
+ body: JSON.stringify(payload)
861
+ });
862
+ if (!response.ok) throw new HTTPError("Failed to create embeddings", response);
863
+ return await response.json();
864
+ }
865
+
866
+ //#endregion
867
+ //#region src/routes/embeddings/route.ts
868
+ const embeddingRoutes = new Hono();
869
+ embeddingRoutes.post("/", async (c) => {
870
+ try {
871
+ const paylod = await c.req.json();
872
+ const response = await createEmbeddings(paylod);
873
+ return c.json(response);
874
+ } catch (error) {
875
+ return await forwardError(c, error);
876
+ }
877
+ });
878
+
879
+ //#endregion
880
+ //#region src/lib/model-config.ts
881
+ const MODEL_CONFIGS = {
882
+ "claude-sonnet-4": {
883
+ enableCacheControl: true,
884
+ defaultReasoningEffort: void 0,
885
+ supportsToolChoice: false,
886
+ supportsParallelToolCalls: false
887
+ },
888
+ "claude-sonnet-4.5": {
889
+ enableCacheControl: true,
890
+ defaultReasoningEffort: void 0,
891
+ supportsToolChoice: false,
892
+ supportsParallelToolCalls: false
893
+ },
894
+ "claude-opus-4.5": {
895
+ enableCacheControl: true,
896
+ defaultReasoningEffort: void 0,
897
+ supportsToolChoice: false,
898
+ supportsParallelToolCalls: false
899
+ },
900
+ "claude-opus-4.6": {
901
+ enableCacheControl: true,
902
+ defaultReasoningEffort: "high",
903
+ supportedReasoningEfforts: [
904
+ "low",
905
+ "medium",
906
+ "high"
907
+ ],
908
+ supportsToolChoice: false,
909
+ supportsParallelToolCalls: true
910
+ },
911
+ "gpt-4o": {
912
+ supportsToolChoice: true,
913
+ supportsParallelToolCalls: true
914
+ },
915
+ "gpt-4.1": {
916
+ supportsToolChoice: true,
917
+ supportsParallelToolCalls: true
918
+ },
919
+ "gpt-5": {
920
+ thinkingMode: true,
921
+ supportsToolChoice: true,
922
+ supportsParallelToolCalls: true
923
+ },
924
+ "gpt-5.1-codex": {
925
+ thinkingMode: true,
926
+ defaultReasoningEffort: "high",
927
+ supportedReasoningEfforts: [
928
+ "low",
929
+ "medium",
930
+ "high"
931
+ ],
932
+ supportsToolChoice: true,
933
+ supportsParallelToolCalls: true
934
+ },
935
+ "gpt-5.2-codex": {
936
+ thinkingMode: true,
937
+ defaultReasoningEffort: "high",
938
+ supportedReasoningEfforts: [
939
+ "low",
940
+ "medium",
941
+ "high",
942
+ "xhigh"
943
+ ],
944
+ supportsToolChoice: true,
945
+ supportsParallelToolCalls: true
946
+ },
947
+ "o3-mini": {
948
+ thinkingMode: true,
949
+ supportsToolChoice: true
950
+ },
951
+ "o4-mini": {
952
+ thinkingMode: true,
953
+ supportsToolChoice: true
954
+ }
955
+ };
956
+ /**
957
+ * Get model-specific configuration.
958
+ * Returns the config for an exact match, or for the base model name (without version suffix).
959
+ * Falls back to an empty config if no match is found.
960
+ */
961
+ function getModelConfig(modelId) {
962
+ if (MODEL_CONFIGS[modelId]) return MODEL_CONFIGS[modelId];
963
+ const entries = Object.entries(MODEL_CONFIGS).sort((a, b) => b[0].length - a[0].length);
964
+ for (const [key, config] of entries) if (modelId.startsWith(key)) return config;
965
+ if (modelId.startsWith("claude")) return {
966
+ enableCacheControl: true,
967
+ supportsToolChoice: false
968
+ };
969
+ return {};
970
+ }
971
+
972
+ //#endregion
973
+ //#region src/routes/messages/utils.ts
974
+ function mapOpenAIStopReasonToAnthropic(finishReason) {
975
+ if (finishReason === null) return null;
976
+ return {
977
+ stop: "end_turn",
978
+ length: "max_tokens",
979
+ tool_calls: "tool_use",
980
+ content_filter: "end_turn"
981
+ }[finishReason];
982
+ }
983
+
984
+ //#endregion
985
+ //#region src/routes/messages/non-stream-translation.ts
986
+ function translateToOpenAI(payload) {
987
+ const model = translateModelName(payload.model);
988
+ const modelConfig = getModelConfig(model);
989
+ const enableCacheControl = modelConfig.enableCacheControl === true;
990
+ const messages = translateAnthropicMessagesToOpenAI(payload.messages, payload.system);
991
+ if (enableCacheControl) {
992
+ const systemMessage = messages.find((m) => m.role === "system");
993
+ if (systemMessage) systemMessage.copilot_cache_control = { type: "ephemeral" };
994
+ }
995
+ const tools = translateAnthropicToolsToOpenAI(payload.tools);
996
+ if (enableCacheControl && tools && tools.length > 0) tools[tools.length - 1].copilot_cache_control = { type: "ephemeral" };
997
+ let reasoning_effort;
998
+ if (payload.thinking?.budget_tokens) reasoning_effort = "high";
999
+ else if (modelConfig.thinkingMode !== true && modelConfig.defaultReasoningEffort) reasoning_effort = modelConfig.defaultReasoningEffort;
1000
+ return {
1001
+ model,
1002
+ messages,
1003
+ max_tokens: payload.max_tokens,
1004
+ stop: payload.stop_sequences,
1005
+ stream: payload.stream,
1006
+ temperature: payload.temperature,
1007
+ top_p: payload.top_p,
1008
+ user: payload.metadata?.user_id,
1009
+ tools,
1010
+ tool_choice: translateAnthropicToolChoiceToOpenAI(payload.tool_choice),
1011
+ snippy: { enabled: false },
1012
+ ...reasoning_effort && { reasoning_effort }
1013
+ };
1014
+ }
1015
+ function translateModelName(model) {
1016
+ const hyphenVersionMatch = model.match(/^(claude-(?:sonnet|opus|haiku)-4)-(5|6)-\d+$/);
1017
+ if (hyphenVersionMatch) return `${hyphenVersionMatch[1]}.${hyphenVersionMatch[2]}`;
1018
+ for (const pattern of [
1019
+ /^(claude-sonnet-4)-\d+$/,
1020
+ /^(claude-opus-4)-\d+$/,
1021
+ /^(claude-haiku-4)-\d+$/,
1022
+ /^(claude-sonnet-4\.5)-\d+$/,
1023
+ /^(claude-opus-4\.5)-\d+$/,
1024
+ /^(claude-opus-4\.6)-\d+$/,
1025
+ /^(claude-haiku-4\.5)-\d+$/
1026
+ ]) {
1027
+ const match = model.match(pattern);
1028
+ if (match) return match[1];
1029
+ }
1030
+ return model;
1031
+ }
1032
+ function translateAnthropicMessagesToOpenAI(anthropicMessages, system) {
1033
+ const systemMessages = handleSystemPrompt(system);
1034
+ const otherMessages = anthropicMessages.flatMap((message) => message.role === "user" ? handleUserMessage(message) : handleAssistantMessage(message));
1035
+ return [...systemMessages, ...otherMessages];
1036
+ }
1037
+ function handleSystemPrompt(system) {
1038
+ if (!system) return [];
1039
+ if (typeof system === "string") return [{
1040
+ role: "system",
1041
+ content: system
1042
+ }];
1043
+ else return [{
1044
+ role: "system",
1045
+ content: system.map((block) => block.text).join("\n\n")
1046
+ }];
1047
+ }
1048
+ function handleUserMessage(message) {
1049
+ const newMessages = [];
1050
+ if (Array.isArray(message.content)) {
1051
+ const toolResultBlocks = message.content.filter((block) => block.type === "tool_result");
1052
+ const otherBlocks = message.content.filter((block) => block.type !== "tool_result");
1053
+ for (const block of toolResultBlocks) newMessages.push({
1054
+ role: "tool",
1055
+ tool_call_id: block.tool_use_id,
1056
+ content: mapContent(block.content)
1057
+ });
1058
+ if (otherBlocks.length > 0) newMessages.push({
1059
+ role: "user",
1060
+ content: mapContent(otherBlocks)
1061
+ });
1062
+ } else newMessages.push({
1063
+ role: "user",
1064
+ content: mapContent(message.content)
1065
+ });
1066
+ return newMessages;
1067
+ }
1068
+ function handleAssistantMessage(message) {
1069
+ if (!Array.isArray(message.content)) return [{
1070
+ role: "assistant",
1071
+ content: mapContent(message.content)
1072
+ }];
1073
+ const toolUseBlocks = message.content.filter((block) => block.type === "tool_use");
1074
+ const textBlocks = message.content.filter((block) => block.type === "text");
1075
+ const thinkingBlocks = message.content.filter((block) => block.type === "thinking");
1076
+ const allTextContent = [...textBlocks.map((b) => b.text), ...thinkingBlocks.map((b) => b.thinking)].join("\n\n");
1077
+ return toolUseBlocks.length > 0 ? [{
1078
+ role: "assistant",
1079
+ content: allTextContent || null,
1080
+ tool_calls: toolUseBlocks.map((toolUse) => ({
1081
+ id: toolUse.id,
1082
+ type: "function",
1083
+ function: {
1084
+ name: toolUse.name,
1085
+ arguments: JSON.stringify(toolUse.input)
1086
+ }
1087
+ }))
1088
+ }] : [{
1089
+ role: "assistant",
1090
+ content: mapContent(message.content)
1091
+ }];
1092
+ }
1093
+ function mapContent(content) {
1094
+ if (typeof content === "string") return content;
1095
+ if (!Array.isArray(content)) return null;
1096
+ if (!content.some((block) => block.type === "image")) return content.filter((block) => block.type === "text" || block.type === "thinking").map((block) => block.type === "text" ? block.text : block.thinking).join("\n\n");
1097
+ const contentParts = [];
1098
+ for (const block of content) switch (block.type) {
1099
+ case "text":
1100
+ contentParts.push({
1101
+ type: "text",
1102
+ text: block.text
1103
+ });
1104
+ break;
1105
+ case "thinking":
1106
+ contentParts.push({
1107
+ type: "text",
1108
+ text: block.thinking
1109
+ });
1110
+ break;
1111
+ case "image":
1112
+ contentParts.push({
1113
+ type: "image_url",
1114
+ image_url: { url: `data:${block.source.media_type};base64,${block.source.data}` }
1115
+ });
1116
+ break;
1117
+ }
1118
+ return contentParts;
1119
+ }
1120
+ function translateAnthropicToolsToOpenAI(anthropicTools) {
1121
+ if (!anthropicTools) return;
1122
+ return anthropicTools.map((tool) => ({
1123
+ type: "function",
1124
+ function: {
1125
+ name: tool.name,
1126
+ description: tool.description,
1127
+ parameters: tool.input_schema
1128
+ }
1129
+ }));
1130
+ }
1131
+ function translateAnthropicToolChoiceToOpenAI(anthropicToolChoice) {
1132
+ if (!anthropicToolChoice) return;
1133
+ switch (anthropicToolChoice.type) {
1134
+ case "auto": return "auto";
1135
+ case "any": return "required";
1136
+ case "tool":
1137
+ if (anthropicToolChoice.name) return {
1138
+ type: "function",
1139
+ function: { name: anthropicToolChoice.name }
1140
+ };
1141
+ return;
1142
+ case "none": return "none";
1143
+ default: return;
1144
+ }
1145
+ }
1146
+ function translateToAnthropic(response) {
1147
+ const allTextBlocks = [];
1148
+ const allToolUseBlocks = [];
1149
+ let stopReason = null;
1150
+ stopReason = response.choices[0]?.finish_reason ?? stopReason;
1151
+ for (const choice of response.choices) {
1152
+ const textBlocks = getAnthropicTextBlocks(choice.message.content);
1153
+ const toolUseBlocks = getAnthropicToolUseBlocks(choice.message.tool_calls);
1154
+ allTextBlocks.push(...textBlocks);
1155
+ allToolUseBlocks.push(...toolUseBlocks);
1156
+ if (choice.finish_reason === "tool_calls" || stopReason === "stop") stopReason = choice.finish_reason;
1157
+ }
1158
+ return {
1159
+ id: response.id,
1160
+ type: "message",
1161
+ role: "assistant",
1162
+ model: response.model,
1163
+ content: [...allTextBlocks, ...allToolUseBlocks],
1164
+ stop_reason: mapOpenAIStopReasonToAnthropic(stopReason),
1165
+ stop_sequence: null,
1166
+ usage: {
1167
+ input_tokens: (response.usage?.prompt_tokens ?? 0) - (response.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1168
+ output_tokens: response.usage?.completion_tokens ?? 0,
1169
+ ...response.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: response.usage.prompt_tokens_details.cached_tokens }
1170
+ }
1171
+ };
1172
+ }
1173
+ function getAnthropicTextBlocks(messageContent) {
1174
+ if (typeof messageContent === "string") return [{
1175
+ type: "text",
1176
+ text: messageContent
1177
+ }];
1178
+ if (Array.isArray(messageContent)) return messageContent.filter((part) => part.type === "text").map((part) => ({
1179
+ type: "text",
1180
+ text: part.text
1181
+ }));
1182
+ return [];
1183
+ }
1184
+ function getAnthropicToolUseBlocks(toolCalls) {
1185
+ if (!toolCalls) return [];
1186
+ return toolCalls.map((toolCall) => ({
1187
+ type: "tool_use",
1188
+ id: toolCall.id,
1189
+ name: toolCall.function.name,
1190
+ input: JSON.parse(toolCall.function.arguments)
1191
+ }));
1192
+ }
1193
+
1194
+ //#endregion
1195
+ //#region src/routes/messages/count-tokens-handler.ts
1196
+ /**
1197
+ * Handles token counting for Anthropic messages
1198
+ */
1199
+ async function handleCountTokens(c) {
1200
+ try {
1201
+ const anthropicBeta = c.req.header("anthropic-beta");
1202
+ const anthropicPayload = await c.req.json();
1203
+ const openAIPayload = translateToOpenAI(anthropicPayload);
1204
+ const selectedModel = state.models?.data.find((model) => model.id === anthropicPayload.model);
1205
+ if (!selectedModel) {
1206
+ consola.warn("Model not found, returning default token count");
1207
+ return c.json({ input_tokens: 1 });
1208
+ }
1209
+ const tokenCount = await getTokenCount(openAIPayload, selectedModel);
1210
+ if (anthropicPayload.tools && anthropicPayload.tools.length > 0) {
1211
+ let mcpToolExist = false;
1212
+ if (anthropicBeta?.startsWith("claude-code")) mcpToolExist = anthropicPayload.tools.some((tool) => tool.name.startsWith("mcp__"));
1213
+ if (!mcpToolExist) {
1214
+ if (anthropicPayload.model.startsWith("claude")) tokenCount.input = tokenCount.input + 346;
1215
+ else if (anthropicPayload.model.startsWith("grok")) tokenCount.input = tokenCount.input + 480;
1216
+ }
1217
+ }
1218
+ let finalTokenCount = tokenCount.input + tokenCount.output;
1219
+ if (anthropicPayload.model.startsWith("claude")) finalTokenCount = Math.round(finalTokenCount * 1.15);
1220
+ else if (anthropicPayload.model.startsWith("grok")) finalTokenCount = Math.round(finalTokenCount * 1.03);
1221
+ consola.info("Token count:", finalTokenCount);
1222
+ return c.json({ input_tokens: finalTokenCount });
1223
+ } catch (error) {
1224
+ consola.error("Error counting tokens:", error);
1225
+ return c.json({ input_tokens: 1 });
1226
+ }
1227
+ }
1228
+
1229
+ //#endregion
1230
+ //#region src/routes/messages/stream-translation.ts
1231
+ function isToolBlockOpen(state$1) {
1232
+ if (!state$1.contentBlockOpen) return false;
1233
+ return Object.values(state$1.toolCalls).some((tc) => tc.anthropicBlockIndex === state$1.contentBlockIndex);
1234
+ }
1235
+ function translateChunkToAnthropicEvents(chunk, state$1) {
1236
+ const events$1 = [];
1237
+ if (chunk.choices.length === 0) return events$1;
1238
+ const choice = chunk.choices[0];
1239
+ const { delta } = choice;
1240
+ if (!state$1.messageStartSent) {
1241
+ events$1.push({
1242
+ type: "message_start",
1243
+ message: {
1244
+ id: chunk.id,
1245
+ type: "message",
1246
+ role: "assistant",
1247
+ content: [],
1248
+ model: chunk.model,
1249
+ stop_reason: null,
1250
+ stop_sequence: null,
1251
+ usage: {
1252
+ input_tokens: (chunk.usage?.prompt_tokens ?? 0) - (chunk.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1253
+ output_tokens: 0,
1254
+ ...chunk.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: chunk.usage.prompt_tokens_details.cached_tokens }
1255
+ }
1256
+ }
1257
+ });
1258
+ state$1.messageStartSent = true;
1259
+ }
1260
+ if (delta.content) {
1261
+ if (isToolBlockOpen(state$1)) {
1262
+ events$1.push({
1263
+ type: "content_block_stop",
1264
+ index: state$1.contentBlockIndex
1265
+ });
1266
+ state$1.contentBlockIndex++;
1267
+ state$1.contentBlockOpen = false;
1268
+ }
1269
+ if (!state$1.contentBlockOpen) {
1270
+ events$1.push({
1271
+ type: "content_block_start",
1272
+ index: state$1.contentBlockIndex,
1273
+ content_block: {
1274
+ type: "text",
1275
+ text: ""
1276
+ }
1277
+ });
1278
+ state$1.contentBlockOpen = true;
1279
+ }
1280
+ events$1.push({
1281
+ type: "content_block_delta",
1282
+ index: state$1.contentBlockIndex,
1283
+ delta: {
1284
+ type: "text_delta",
1285
+ text: delta.content
1286
+ }
1287
+ });
1288
+ }
1289
+ if (delta.tool_calls) for (const toolCall of delta.tool_calls) {
1290
+ if (toolCall.id && toolCall.function?.name) {
1291
+ if (state$1.contentBlockOpen) {
1292
+ events$1.push({
1293
+ type: "content_block_stop",
1294
+ index: state$1.contentBlockIndex
1295
+ });
1296
+ state$1.contentBlockIndex++;
1297
+ state$1.contentBlockOpen = false;
1298
+ }
1299
+ const anthropicBlockIndex = state$1.contentBlockIndex;
1300
+ state$1.toolCalls[toolCall.index] = {
1301
+ id: toolCall.id,
1302
+ name: toolCall.function.name,
1303
+ anthropicBlockIndex
1304
+ };
1305
+ events$1.push({
1306
+ type: "content_block_start",
1307
+ index: anthropicBlockIndex,
1308
+ content_block: {
1309
+ type: "tool_use",
1310
+ id: toolCall.id,
1311
+ name: toolCall.function.name,
1312
+ input: {}
1313
+ }
1314
+ });
1315
+ state$1.contentBlockOpen = true;
1316
+ }
1317
+ if (toolCall.function?.arguments) {
1318
+ const toolCallInfo = state$1.toolCalls[toolCall.index];
1319
+ if (toolCallInfo) events$1.push({
1320
+ type: "content_block_delta",
1321
+ index: toolCallInfo.anthropicBlockIndex,
1322
+ delta: {
1323
+ type: "input_json_delta",
1324
+ partial_json: toolCall.function.arguments
1325
+ }
1326
+ });
1327
+ }
1328
+ }
1329
+ if (choice.finish_reason) {
1330
+ if (state$1.contentBlockOpen) {
1331
+ events$1.push({
1332
+ type: "content_block_stop",
1333
+ index: state$1.contentBlockIndex
1334
+ });
1335
+ state$1.contentBlockOpen = false;
1336
+ }
1337
+ events$1.push({
1338
+ type: "message_delta",
1339
+ delta: {
1340
+ stop_reason: mapOpenAIStopReasonToAnthropic(choice.finish_reason),
1341
+ stop_sequence: null
1342
+ },
1343
+ usage: {
1344
+ input_tokens: (chunk.usage?.prompt_tokens ?? 0) - (chunk.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1345
+ output_tokens: chunk.usage?.completion_tokens ?? 0,
1346
+ ...chunk.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: chunk.usage.prompt_tokens_details.cached_tokens }
1347
+ }
1348
+ }, { type: "message_stop" });
1349
+ }
1350
+ return events$1;
1351
+ }
1352
+
1353
+ //#endregion
1354
+ //#region src/routes/messages/handler.ts
1355
+ async function handleCompletion(c) {
1356
+ await checkRateLimit(state);
1357
+ const anthropicPayload = await c.req.json();
1358
+ consola.debug("Anthropic request payload:", JSON.stringify(anthropicPayload));
1359
+ const openAIPayload = translateToOpenAI(anthropicPayload);
1360
+ consola.debug("Translated OpenAI request payload:", JSON.stringify(openAIPayload));
1361
+ if (state.manualApprove) await awaitApproval();
1362
+ const response = await createChatCompletions(openAIPayload);
1363
+ if (isNonStreaming$1(response)) {
1364
+ consola.debug("Non-streaming response from Copilot:", JSON.stringify(response).slice(-400));
1365
+ const anthropicResponse = translateToAnthropic(response);
1366
+ consola.debug("Translated Anthropic response:", JSON.stringify(anthropicResponse));
1367
+ return c.json(anthropicResponse);
1368
+ }
1369
+ consola.debug("Streaming response from Copilot");
1370
+ return streamSSE(c, async (stream) => {
1371
+ const streamState = {
1372
+ messageStartSent: false,
1373
+ contentBlockIndex: 0,
1374
+ contentBlockOpen: false,
1375
+ toolCalls: {}
1376
+ };
1377
+ for await (const rawEvent of response) {
1378
+ consola.debug("Copilot raw stream event:", JSON.stringify(rawEvent));
1379
+ if (rawEvent.data === "[DONE]") break;
1380
+ if (!rawEvent.data) continue;
1381
+ const chunk = JSON.parse(rawEvent.data);
1382
+ const events$1 = translateChunkToAnthropicEvents(chunk, streamState);
1383
+ for (const event of events$1) {
1384
+ consola.debug("Translated Anthropic event:", JSON.stringify(event));
1385
+ await stream.writeSSE({
1386
+ event: event.type,
1387
+ data: JSON.stringify(event)
1388
+ });
1389
+ }
1390
+ }
1391
+ });
1392
+ }
1393
+ function isNonStreaming$1(response) {
1394
+ return Object.hasOwn(response, "choices");
1395
+ }
1396
+
1397
+ //#endregion
1398
+ //#region src/routes/messages/route.ts
1399
+ const messageRoutes = new Hono();
1400
+ messageRoutes.post("/", async (c) => {
1401
+ try {
1402
+ return await handleCompletion(c);
1403
+ } catch (error) {
1404
+ return await forwardError(c, error);
1405
+ }
1406
+ });
1407
+ messageRoutes.post("/count_tokens", async (c) => {
1408
+ try {
1409
+ return await handleCountTokens(c);
1410
+ } catch (error) {
1411
+ return await forwardError(c, error);
1412
+ }
1413
+ });
1414
+
1415
+ //#endregion
1416
+ //#region src/routes/models/route.ts
1417
+ const modelRoutes = new Hono();
1418
+ modelRoutes.get("/", async (c) => {
1419
+ try {
1420
+ if (!state.models) await cacheModels();
1421
+ const models = state.models?.data.map((model) => ({
1422
+ id: model.id,
1423
+ object: "model",
1424
+ type: "model",
1425
+ created: 0,
1426
+ created_at: (/* @__PURE__ */ new Date(0)).toISOString(),
1427
+ owned_by: model.vendor,
1428
+ display_name: model.name
1429
+ }));
1430
+ return c.json({
1431
+ object: "list",
1432
+ data: models,
1433
+ has_more: false
1434
+ });
1435
+ } catch (error) {
1436
+ return await forwardError(c, error);
1437
+ }
1438
+ });
1439
+
1440
+ //#endregion
1441
+ //#region src/services/copilot/create-responses.ts
1442
+ async function createResponses(payload) {
1443
+ if (!state.copilotToken) throw new Error("Copilot token not found");
1444
+ const hasVision = hasVisionInput(payload);
1445
+ const isAgentCall = payload.input.some((item) => ["assistant"].includes(item.role));
1446
+ const headers = {
1447
+ ...copilotHeaders(state, hasVision),
1448
+ "X-Initiator": isAgentCall ? "agent" : "user"
1449
+ };
1450
+ const response = await fetch(`${copilotBaseUrl(state)}/responses`, {
1451
+ method: "POST",
1452
+ headers,
1453
+ body: JSON.stringify(payload)
1454
+ });
1455
+ if (!response.ok) {
1456
+ consola.error("Failed to create responses", response);
1457
+ throw new HTTPError("Failed to create responses", response);
1458
+ }
1459
+ if (payload.stream) return events(response);
1460
+ return await response.json();
1461
+ }
1462
+ function hasVisionInput(payload) {
1463
+ const visionTypes = new Set([
1464
+ "input_image",
1465
+ "image",
1466
+ "image_url",
1467
+ "image_file"
1468
+ ]);
1469
+ return payload.input.some((item) => {
1470
+ if (!Array.isArray(item.content)) return false;
1471
+ return item.content.some((part) => visionTypes.has(part.type));
1472
+ });
1473
+ }
1474
+
1475
+ //#endregion
1476
+ //#region src/routes/responses/handler.ts
1477
+ async function handleResponses(c) {
1478
+ await checkRateLimit(state);
1479
+ const payload = await c.req.json();
1480
+ consola.debug("Responses API request payload:", JSON.stringify(payload).slice(-400));
1481
+ if (state.manualApprove) await awaitApproval();
1482
+ const response = await createResponses(payload);
1483
+ if (isNonStreaming(response)) {
1484
+ consola.debug("Non-streaming responses:", JSON.stringify(response));
1485
+ return c.json(response);
1486
+ }
1487
+ consola.debug("Streaming responses");
1488
+ return streamSSE(c, async (stream) => {
1489
+ for await (const chunk of response) {
1490
+ consola.debug("Responses streaming chunk:", JSON.stringify(chunk));
1491
+ await stream.writeSSE(chunk);
1492
+ }
1493
+ });
1494
+ }
1495
+ function isNonStreaming(response) {
1496
+ return Object.hasOwn(response, "output");
1497
+ }
1498
+
1499
+ //#endregion
1500
+ //#region src/routes/responses/route.ts
1501
+ const responsesRoutes = new Hono();
1502
+ responsesRoutes.post("/", async (c) => {
1503
+ try {
1504
+ return await handleResponses(c);
1505
+ } catch (error) {
1506
+ return await forwardError(c, error);
1507
+ }
1508
+ });
1509
+
1510
+ //#endregion
1511
+ //#region src/routes/token/route.ts
1512
+ const tokenRoute = new Hono();
1513
+ tokenRoute.get("/", (c) => {
1514
+ try {
1515
+ return c.json({ token: state.copilotToken });
1516
+ } catch (error) {
1517
+ console.error("Error fetching token:", error);
1518
+ return c.json({
1519
+ error: "Failed to fetch token",
1520
+ token: null
1521
+ }, 500);
1522
+ }
1523
+ });
1524
+
1525
+ //#endregion
1526
+ //#region src/routes/usage/route.ts
1527
+ const usageRoute = new Hono();
1528
+ usageRoute.get("/", async (c) => {
1529
+ try {
1530
+ const usage = await getCopilotUsage();
1531
+ return c.json(usage);
1532
+ } catch (error) {
1533
+ console.error("Error fetching Copilot usage:", error);
1534
+ return c.json({ error: "Failed to fetch Copilot usage" }, 500);
1535
+ }
1536
+ });
1537
+
1538
+ //#endregion
1539
+ //#region src/server.ts
1540
+ const server = new Hono();
1541
+ server.use(logger());
1542
+ server.use(cors());
1543
+ server.get("/", (c) => c.text("Server running"));
1544
+ server.route("/chat/completions", completionRoutes);
1545
+ server.route("/models", modelRoutes);
1546
+ server.route("/embeddings", embeddingRoutes);
1547
+ server.route("/responses", responsesRoutes);
1548
+ server.route("/usage", usageRoute);
1549
+ server.route("/token", tokenRoute);
1550
+ server.route("/v1/chat/completions", completionRoutes);
1551
+ server.route("/v1/models", modelRoutes);
1552
+ server.route("/v1/embeddings", embeddingRoutes);
1553
+ server.route("/v1/responses", responsesRoutes);
1554
+ server.route("/v1/messages", messageRoutes);
1555
+
1556
+ //#endregion
1557
+ //#region src/start.ts
1558
+ async function runServer(options) {
1559
+ if (options.proxyEnv) initProxyFromEnv();
1560
+ if (options.verbose) {
1561
+ consola.level = 5;
1562
+ consola.info("Verbose logging enabled");
1563
+ }
1564
+ state.accountType = options.accountType;
1565
+ if (options.accountType !== "individual") consola.info(`Using ${options.accountType} plan GitHub account`);
1566
+ state.manualApprove = options.manual;
1567
+ state.rateLimitSeconds = options.rateLimit;
1568
+ state.rateLimitWait = options.rateLimitWait;
1569
+ state.showToken = options.showToken;
1570
+ await ensurePaths();
1571
+ await cacheVSCodeVersion();
1572
+ if (options.githubToken) {
1573
+ state.githubToken = options.githubToken;
1574
+ consola.info("Using provided GitHub token");
1575
+ } else await setupGitHubToken();
1576
+ await setupCopilotToken();
1577
+ await cacheModels();
1578
+ consola.info(`Available models: \n${state.models?.data.map((model) => `- ${model.id}`).join("\n")}`);
1579
+ const serverUrl = `http://localhost:${options.port}`;
1580
+ if (options.claudeCode) {
1581
+ invariant(state.models, "Models should be loaded by now");
1582
+ const selectedModel = await consola.prompt("Select a model to use with Claude Code", {
1583
+ type: "select",
1584
+ options: state.models.data.map((model) => model.id)
1585
+ });
1586
+ const selectedSmallModel = await consola.prompt("Select a small model to use with Claude Code", {
1587
+ type: "select",
1588
+ options: state.models.data.map((model) => model.id)
1589
+ });
1590
+ const command = generateEnvScript({
1591
+ ANTHROPIC_BASE_URL: serverUrl,
1592
+ ANTHROPIC_AUTH_TOKEN: "dummy",
1593
+ ANTHROPIC_MODEL: selectedModel,
1594
+ ANTHROPIC_DEFAULT_SONNET_MODEL: selectedModel,
1595
+ ANTHROPIC_SMALL_FAST_MODEL: selectedSmallModel,
1596
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: selectedSmallModel,
1597
+ DISABLE_NON_ESSENTIAL_MODEL_CALLS: "1",
1598
+ CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: "1"
1599
+ }, "claude");
1600
+ try {
1601
+ clipboard.writeSync(command);
1602
+ consola.success("Copied Claude Code command to clipboard!");
1603
+ } catch {
1604
+ consola.warn("Failed to copy to clipboard. Here is the Claude Code command:");
1605
+ consola.log(command);
1606
+ }
1607
+ }
1608
+ consola.box(`🌐 Usage Viewer: https://jer-y.github.io/copilot-proxy?endpoint=${serverUrl}/usage`);
1609
+ serve({
1610
+ fetch: server.fetch,
1611
+ port: options.port
1612
+ });
1613
+ }
1614
+ const start = defineCommand({
1615
+ meta: {
1616
+ name: "start",
1617
+ description: "Start the Copilot API server"
1618
+ },
1619
+ args: {
1620
+ "port": {
1621
+ alias: "p",
1622
+ type: "string",
1623
+ default: "4141",
1624
+ description: "Port to listen on"
1625
+ },
1626
+ "verbose": {
1627
+ alias: "v",
1628
+ type: "boolean",
1629
+ default: false,
1630
+ description: "Enable verbose logging"
1631
+ },
1632
+ "account-type": {
1633
+ alias: "a",
1634
+ type: "string",
1635
+ default: "individual",
1636
+ description: "Account type to use (individual, business, enterprise)"
1637
+ },
1638
+ "manual": {
1639
+ type: "boolean",
1640
+ default: false,
1641
+ description: "Enable manual request approval"
1642
+ },
1643
+ "rate-limit": {
1644
+ alias: "r",
1645
+ type: "string",
1646
+ description: "Rate limit in seconds between requests"
1647
+ },
1648
+ "wait": {
1649
+ alias: "w",
1650
+ type: "boolean",
1651
+ default: false,
1652
+ description: "Wait instead of error when rate limit is hit. Has no effect if rate limit is not set"
1653
+ },
1654
+ "github-token": {
1655
+ alias: "g",
1656
+ type: "string",
1657
+ description: "Provide GitHub token directly (must be generated using the `auth` subcommand)"
1658
+ },
1659
+ "claude-code": {
1660
+ alias: "c",
1661
+ type: "boolean",
1662
+ default: false,
1663
+ description: "Generate a command to launch Claude Code with Copilot API config"
1664
+ },
1665
+ "show-token": {
1666
+ type: "boolean",
1667
+ default: false,
1668
+ description: "Show GitHub and Copilot tokens on fetch and refresh"
1669
+ },
1670
+ "proxy-env": {
1671
+ type: "boolean",
1672
+ default: false,
1673
+ description: "Initialize proxy from environment variables"
1674
+ }
1675
+ },
1676
+ run({ args }) {
1677
+ const rateLimitRaw = args["rate-limit"];
1678
+ const rateLimit = rateLimitRaw === void 0 ? void 0 : Number.parseInt(rateLimitRaw, 10);
1679
+ return runServer({
1680
+ port: Number.parseInt(args.port, 10),
1681
+ verbose: args.verbose,
1682
+ accountType: args["account-type"],
1683
+ manual: args.manual,
1684
+ rateLimit,
1685
+ rateLimitWait: args.wait,
1686
+ githubToken: args["github-token"],
1687
+ claudeCode: args["claude-code"],
1688
+ showToken: args["show-token"],
1689
+ proxyEnv: args["proxy-env"]
1690
+ });
1691
+ }
1692
+ });
1693
+
1694
+ //#endregion
1695
+ //#region src/main.ts
1696
+ const main = defineCommand({
1697
+ meta: {
1698
+ name: "copilot-proxy",
1699
+ description: "A wrapper around GitHub Copilot API to make it OpenAI compatible, making it usable for other tools."
1700
+ },
1701
+ subCommands: {
1702
+ auth,
1703
+ start,
1704
+ "check-usage": checkUsage,
1705
+ debug
1706
+ }
1707
+ });
1708
+ await runMain(main);
1709
+
1710
+ //#endregion
1711
+ export { };
1712
+ //# sourceMappingURL=main.js.map