@ashsec/copilot-api 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/main.js ADDED
@@ -0,0 +1,1697 @@
1
+ #!/usr/bin/env node
2
+ import { defineCommand, runMain } from "citty";
3
+ import consola from "consola";
4
+ import fs from "node:fs/promises";
5
+ import os from "node:os";
6
+ import path from "node:path";
7
+ import { randomUUID } from "node:crypto";
8
+ import { events } from "fetch-event-stream";
9
+ import clipboard from "clipboardy";
10
+ import { serve } from "srvx";
11
+ import invariant from "tiny-invariant";
12
+ import { getProxyForUrl } from "proxy-from-env";
13
+ import { Agent, ProxyAgent, setGlobalDispatcher } from "undici";
14
+ import { execSync } from "node:child_process";
15
+ import process$1 from "node:process";
16
+ import { Hono } from "hono";
17
+ import { cors } from "hono/cors";
18
+ import { logger } from "hono/logger";
19
+ import { streamSSE } from "hono/streaming";
20
+
21
+ //#region src/lib/paths.ts
22
+ const APP_DIR = path.join(os.homedir(), ".local", "share", "copilot-api");
23
+ const GITHUB_TOKEN_PATH = path.join(APP_DIR, "github_token");
24
+ const AZURE_OPENAI_CONFIG_PATH = path.join(APP_DIR, "azure_openai_config");
25
+ const PATHS = {
26
+ APP_DIR,
27
+ GITHUB_TOKEN_PATH,
28
+ AZURE_OPENAI_CONFIG_PATH
29
+ };
30
+ async function ensurePaths() {
31
+ await fs.mkdir(PATHS.APP_DIR, { recursive: true });
32
+ await ensureFile(PATHS.GITHUB_TOKEN_PATH);
33
+ await ensureFile(PATHS.AZURE_OPENAI_CONFIG_PATH);
34
+ }
35
+ async function ensureFile(filePath) {
36
+ try {
37
+ await fs.access(filePath, fs.constants.W_OK);
38
+ } catch {
39
+ await fs.writeFile(filePath, "");
40
+ await fs.chmod(filePath, 384);
41
+ }
42
+ }
43
+
44
+ //#endregion
45
+ //#region src/lib/state.ts
46
+ const state = {
47
+ accountType: "individual",
48
+ manualApprove: false,
49
+ rateLimitWait: false,
50
+ showToken: false
51
+ };
52
+
53
+ //#endregion
54
+ //#region src/lib/api-config.ts
55
+ const standardHeaders = () => ({
56
+ "content-type": "application/json",
57
+ accept: "application/json"
58
+ });
59
+ const COPILOT_VERSION = "0.26.7";
60
+ const EDITOR_PLUGIN_VERSION = `copilot-chat/${COPILOT_VERSION}`;
61
+ const USER_AGENT = `GitHubCopilotChat/${COPILOT_VERSION}`;
62
+ const API_VERSION = "2025-04-01";
63
+ const copilotBaseUrl = (state$1) => state$1.accountType === "individual" ? "https://api.githubcopilot.com" : `https://api.${state$1.accountType}.githubcopilot.com`;
64
+ const copilotHeaders = (state$1, vision = false) => {
65
+ const headers = {
66
+ Authorization: `Bearer ${state$1.copilotToken}`,
67
+ "content-type": standardHeaders()["content-type"],
68
+ "copilot-integration-id": "vscode-chat",
69
+ "editor-version": `vscode/${state$1.vsCodeVersion}`,
70
+ "editor-plugin-version": EDITOR_PLUGIN_VERSION,
71
+ "user-agent": USER_AGENT,
72
+ "openai-intent": "conversation-panel",
73
+ "x-github-api-version": API_VERSION,
74
+ "x-request-id": randomUUID(),
75
+ "x-vscode-user-agent-library-version": "electron-fetch"
76
+ };
77
+ if (vision) headers["copilot-vision-request"] = "true";
78
+ return headers;
79
+ };
80
+ const GITHUB_API_BASE_URL = "https://api.github.com";
81
+ const githubHeaders = (state$1) => ({
82
+ ...standardHeaders(),
83
+ authorization: `token ${state$1.githubToken}`,
84
+ "editor-version": `vscode/${state$1.vsCodeVersion}`,
85
+ "editor-plugin-version": EDITOR_PLUGIN_VERSION,
86
+ "user-agent": USER_AGENT,
87
+ "x-github-api-version": API_VERSION,
88
+ "x-vscode-user-agent-library-version": "electron-fetch"
89
+ });
90
+ const GITHUB_BASE_URL = "https://github.com";
91
+ const GITHUB_CLIENT_ID = "Iv1.b507a08c87ecfe98";
92
+ const GITHUB_APP_SCOPES = ["read:user"].join(" ");
93
+
94
+ //#endregion
95
+ //#region src/lib/error.ts
96
+ var HTTPError = class extends Error {
97
+ response;
98
+ constructor(message, response) {
99
+ super(message);
100
+ this.response = response;
101
+ }
102
+ };
103
+ async function forwardError(c, error) {
104
+ consola.error("Error occurred:", error);
105
+ if (error instanceof HTTPError) {
106
+ const errorText = await error.response.text();
107
+ let errorJson;
108
+ try {
109
+ errorJson = JSON.parse(errorText);
110
+ } catch {
111
+ errorJson = errorText;
112
+ }
113
+ consola.error("HTTP error:", errorJson);
114
+ return c.json({ error: {
115
+ message: errorText,
116
+ type: "error"
117
+ } }, error.response.status);
118
+ }
119
+ return c.json({ error: {
120
+ message: error.message,
121
+ type: "error"
122
+ } }, 500);
123
+ }
124
+
125
+ //#endregion
126
+ //#region src/services/github/get-copilot-token.ts
127
+ const getCopilotToken = async () => {
128
+ const response = await fetch(`${GITHUB_API_BASE_URL}/copilot_internal/v2/token`, { headers: githubHeaders(state) });
129
+ if (!response.ok) throw new HTTPError("Failed to get Copilot token", response);
130
+ return await response.json();
131
+ };
132
+
133
+ //#endregion
134
+ //#region src/services/github/get-device-code.ts
135
+ async function getDeviceCode() {
136
+ const response = await fetch(`${GITHUB_BASE_URL}/login/device/code`, {
137
+ method: "POST",
138
+ headers: standardHeaders(),
139
+ body: JSON.stringify({
140
+ client_id: GITHUB_CLIENT_ID,
141
+ scope: GITHUB_APP_SCOPES
142
+ })
143
+ });
144
+ if (!response.ok) throw new HTTPError("Failed to get device code", response);
145
+ return await response.json();
146
+ }
147
+
148
+ //#endregion
149
+ //#region src/services/github/get-user.ts
150
+ async function getGitHubUser() {
151
+ const response = await fetch(`${GITHUB_API_BASE_URL}/user`, { headers: {
152
+ authorization: `token ${state.githubToken}`,
153
+ ...standardHeaders()
154
+ } });
155
+ if (!response.ok) throw new HTTPError("Failed to get GitHub user", response);
156
+ return await response.json();
157
+ }
158
+
159
+ //#endregion
160
+ //#region src/services/azure-openai/config.ts
161
+ const AZURE_OPENAI_MODEL_PREFIX = "azure_openai_";
162
+ async function loadAzureOpenAIConfig() {
163
+ try {
164
+ const content = await fs.readFile(PATHS.AZURE_OPENAI_CONFIG_PATH, "utf8");
165
+ if (!content.trim()) return null;
166
+ const decoded = Buffer.from(content.trim(), "base64").toString("utf8");
167
+ const config = JSON.parse(decoded);
168
+ if (!config.endpoint || !config.apiKey) return null;
169
+ return config;
170
+ } catch {
171
+ return null;
172
+ }
173
+ }
174
+ async function saveAzureOpenAIConfig(config) {
175
+ const encoded = Buffer.from(JSON.stringify(config)).toString("base64");
176
+ await fs.writeFile(PATHS.AZURE_OPENAI_CONFIG_PATH, encoded, "utf8");
177
+ await fs.chmod(PATHS.AZURE_OPENAI_CONFIG_PATH, 384);
178
+ consola.success("Azure OpenAI configuration saved");
179
+ }
180
+ async function promptAzureOpenAISetup() {
181
+ if (!await consola.prompt("Would you like to add a custom Azure OpenAI endpoint?", {
182
+ type: "confirm",
183
+ initial: false
184
+ })) return null;
185
+ const endpoint = await consola.prompt("Enter your Azure OpenAI endpoint URL (e.g., https://your-resource.openai.azure.com):", { type: "text" });
186
+ if (!endpoint || typeof endpoint !== "string" || !endpoint.trim()) {
187
+ consola.warn("No endpoint provided, skipping Azure OpenAI setup");
188
+ return null;
189
+ }
190
+ const apiKey = await consola.prompt("Enter your Azure OpenAI API key:", { type: "text" });
191
+ if (!apiKey || typeof apiKey !== "string" || !apiKey.trim()) {
192
+ consola.warn("No API key provided, skipping Azure OpenAI setup");
193
+ return null;
194
+ }
195
+ const config = {
196
+ endpoint: endpoint.trim().replace(/\/$/, ""),
197
+ apiKey: apiKey.trim()
198
+ };
199
+ await saveAzureOpenAIConfig(config);
200
+ return config;
201
+ }
202
+ function isAzureOpenAIModel(modelId) {
203
+ return modelId.startsWith(AZURE_OPENAI_MODEL_PREFIX);
204
+ }
205
+ function getAzureDeploymentName(modelId) {
206
+ return modelId.slice(13);
207
+ }
208
+
209
+ //#endregion
210
+ //#region src/services/azure-openai/create-chat-completions.ts
211
+ const AZURE_API_VERSION = "2024-10-21";
212
+ async function createAzureOpenAIChatCompletions(config, payload) {
213
+ const deploymentName = getAzureDeploymentName(payload.model);
214
+ const { max_tokens,...restPayload } = payload;
215
+ const azurePayload = {
216
+ ...restPayload,
217
+ model: deploymentName,
218
+ ...max_tokens != null && { max_completion_tokens: max_tokens }
219
+ };
220
+ const response = await fetch(`${config.endpoint}/openai/deployments/${deploymentName}/chat/completions?api-version=${AZURE_API_VERSION}`, {
221
+ method: "POST",
222
+ headers: {
223
+ "api-key": config.apiKey,
224
+ "Content-Type": "application/json"
225
+ },
226
+ body: JSON.stringify(azurePayload)
227
+ });
228
+ if (!response.ok) {
229
+ consola.error("Failed to create Azure OpenAI chat completions:", response);
230
+ throw new HTTPError("Failed to create Azure OpenAI chat completions", response);
231
+ }
232
+ if (payload.stream) return events(response);
233
+ return await response.json();
234
+ }
235
+
236
+ //#endregion
237
+ //#region src/services/azure-openai/get-models.ts
238
+ const AZURE_DEPLOYMENTS_API_VERSION = "2022-12-01";
239
+ async function getAzureOpenAIDeployments(config) {
240
+ try {
241
+ const response = await fetch(`${config.endpoint}/openai/deployments?api-version=${AZURE_DEPLOYMENTS_API_VERSION}`, { headers: {
242
+ "api-key": config.apiKey,
243
+ "Content-Type": "application/json"
244
+ } });
245
+ if (!response.ok) {
246
+ const errorText = await response.text().catch(() => "");
247
+ consola.error(`Failed to fetch Azure OpenAI deployments: ${response.status}`, errorText);
248
+ throw new HTTPError("Failed to fetch Azure OpenAI deployments", response);
249
+ }
250
+ return (await response.json()).data.filter((deployment) => deployment.status === "succeeded").map((deployment) => ({
251
+ id: `${AZURE_OPENAI_MODEL_PREFIX}${deployment.id}`,
252
+ deploymentName: deployment.id,
253
+ model: deployment.model,
254
+ created: deployment.created_at,
255
+ object: "deployment",
256
+ owned_by: deployment.owner || "azure-openai"
257
+ }));
258
+ } catch (error) {
259
+ if (error instanceof HTTPError) throw error;
260
+ consola.error("Failed to fetch Azure OpenAI deployments:", error);
261
+ return [];
262
+ }
263
+ }
264
+
265
+ //#endregion
266
+ //#region src/services/copilot/get-models.ts
267
+ const getModels = async () => {
268
+ const response = await fetch(`${copilotBaseUrl(state)}/models`, { headers: copilotHeaders(state) });
269
+ if (!response.ok) throw new HTTPError("Failed to get models", response);
270
+ return await response.json();
271
+ };
272
+
273
+ //#endregion
274
+ //#region src/services/get-vscode-version.ts
275
+ const FALLBACK = "1.104.3";
276
+ async function getVSCodeVersion() {
277
+ const controller = new AbortController();
278
+ const timeout = setTimeout(() => {
279
+ controller.abort();
280
+ }, 5e3);
281
+ try {
282
+ const match = (await (await fetch("https://aur.archlinux.org/cgit/aur.git/plain/PKGBUILD?h=visual-studio-code-bin", { signal: controller.signal })).text()).match(/pkgver=([0-9.]+)/);
283
+ if (match) return match[1];
284
+ return FALLBACK;
285
+ } catch {
286
+ return FALLBACK;
287
+ } finally {
288
+ clearTimeout(timeout);
289
+ }
290
+ }
291
+ await getVSCodeVersion();
292
+
293
+ //#endregion
294
+ //#region src/lib/utils.ts
295
+ const sleep = (ms) => new Promise((resolve) => {
296
+ setTimeout(resolve, ms);
297
+ });
298
+ const isNullish = (value) => value === null || value === void 0;
299
+ async function cacheModels() {
300
+ state.models = await getModels();
301
+ }
302
+ const cacheVSCodeVersion = async () => {
303
+ const response = await getVSCodeVersion();
304
+ state.vsCodeVersion = response;
305
+ consola.info(`Using VSCode version: ${response}`);
306
+ };
307
+ async function setupAzureOpenAI() {
308
+ let config = await loadAzureOpenAIConfig();
309
+ if (!config) config = await promptAzureOpenAISetup();
310
+ if (!config) {
311
+ consola.info("Azure OpenAI not configured");
312
+ return;
313
+ }
314
+ state.azureOpenAIConfig = config;
315
+ consola.info("Azure OpenAI configuration loaded");
316
+ try {
317
+ const deployments = await getAzureOpenAIDeployments(config);
318
+ state.azureOpenAIDeployments = deployments;
319
+ if (deployments.length > 0) consola.info(`Loaded ${deployments.length} Azure OpenAI deployment(s):\n${deployments.map((d) => `- ${d.id} (${d.model})`).join("\n")}`);
320
+ else consola.warn("No Azure OpenAI deployments found");
321
+ } catch (error) {
322
+ consola.warn("Failed to fetch Azure OpenAI deployments:", error);
323
+ }
324
+ }
325
+
326
+ //#endregion
327
+ //#region src/services/github/poll-access-token.ts
328
+ async function pollAccessToken(deviceCode) {
329
+ const sleepDuration = (deviceCode.interval + 1) * 1e3;
330
+ consola.debug(`Polling access token with interval of ${sleepDuration}ms`);
331
+ while (true) {
332
+ const response = await fetch(`${GITHUB_BASE_URL}/login/oauth/access_token`, {
333
+ method: "POST",
334
+ headers: standardHeaders(),
335
+ body: JSON.stringify({
336
+ client_id: GITHUB_CLIENT_ID,
337
+ device_code: deviceCode.device_code,
338
+ grant_type: "urn:ietf:params:oauth:grant-type:device_code"
339
+ })
340
+ });
341
+ if (!response.ok) {
342
+ await sleep(sleepDuration);
343
+ consola.error("Failed to poll access token:", await response.text());
344
+ continue;
345
+ }
346
+ const json = await response.json();
347
+ consola.debug("Polling access token response:", json);
348
+ const { access_token } = json;
349
+ if (access_token) return access_token;
350
+ else await sleep(sleepDuration);
351
+ }
352
+ }
353
+
354
+ //#endregion
355
+ //#region src/lib/token.ts
356
+ const readGithubToken = () => fs.readFile(PATHS.GITHUB_TOKEN_PATH, "utf8");
357
+ const writeGithubToken = (token) => fs.writeFile(PATHS.GITHUB_TOKEN_PATH, token);
358
+ const setupCopilotToken = async () => {
359
+ const { token, refresh_in } = await getCopilotToken();
360
+ state.copilotToken = token;
361
+ consola.debug("GitHub Copilot Token fetched successfully!");
362
+ if (state.showToken) consola.info("Copilot token:", token);
363
+ const refreshInterval = (refresh_in - 60) * 1e3;
364
+ setInterval(async () => {
365
+ consola.debug("Refreshing Copilot token");
366
+ try {
367
+ const { token: token$1 } = await getCopilotToken();
368
+ state.copilotToken = token$1;
369
+ consola.debug("Copilot token refreshed");
370
+ if (state.showToken) consola.info("Refreshed Copilot token:", token$1);
371
+ } catch (error) {
372
+ consola.error("Failed to refresh Copilot token:", error);
373
+ throw error;
374
+ }
375
+ }, refreshInterval);
376
+ };
377
+ async function setupGitHubToken(options) {
378
+ try {
379
+ const githubToken = await readGithubToken();
380
+ if (githubToken && !options?.force) {
381
+ state.githubToken = githubToken;
382
+ if (state.showToken) consola.info("GitHub token:", githubToken);
383
+ await logUser();
384
+ return;
385
+ }
386
+ consola.info("Not logged in, getting new access token");
387
+ const response = await getDeviceCode();
388
+ consola.debug("Device code response:", response);
389
+ consola.info(`Please enter the code "${response.user_code}" in ${response.verification_uri}`);
390
+ const token = await pollAccessToken(response);
391
+ await writeGithubToken(token);
392
+ state.githubToken = token;
393
+ if (state.showToken) consola.info("GitHub token:", token);
394
+ await logUser();
395
+ } catch (error) {
396
+ if (error instanceof HTTPError) {
397
+ consola.error("Failed to get GitHub token:", await error.response.json());
398
+ throw error;
399
+ }
400
+ consola.error("Failed to get GitHub token:", error);
401
+ throw error;
402
+ }
403
+ }
404
+ async function logUser() {
405
+ const user = await getGitHubUser();
406
+ consola.info(`Logged in as ${user.login}`);
407
+ }
408
+
409
+ //#endregion
410
+ //#region src/auth.ts
411
+ async function runAuth(options) {
412
+ if (options.verbose) {
413
+ consola.level = 5;
414
+ consola.info("Verbose logging enabled");
415
+ }
416
+ state.showToken = options.showToken;
417
+ await ensurePaths();
418
+ await setupGitHubToken({ force: true });
419
+ consola.success("GitHub token written to", PATHS.GITHUB_TOKEN_PATH);
420
+ }
421
+ const auth = defineCommand({
422
+ meta: {
423
+ name: "auth",
424
+ description: "Run GitHub auth flow without running the server"
425
+ },
426
+ args: {
427
+ verbose: {
428
+ alias: "v",
429
+ type: "boolean",
430
+ default: false,
431
+ description: "Enable verbose logging"
432
+ },
433
+ "show-token": {
434
+ type: "boolean",
435
+ default: false,
436
+ description: "Show GitHub token on auth"
437
+ }
438
+ },
439
+ run({ args }) {
440
+ return runAuth({
441
+ verbose: args.verbose,
442
+ showToken: args["show-token"]
443
+ });
444
+ }
445
+ });
446
+
447
+ //#endregion
448
+ //#region src/services/github/get-copilot-usage.ts
449
+ const getCopilotUsage = async () => {
450
+ const response = await fetch(`${GITHUB_API_BASE_URL}/copilot_internal/user`, { headers: githubHeaders(state) });
451
+ if (!response.ok) throw new HTTPError("Failed to get Copilot usage", response);
452
+ return await response.json();
453
+ };
454
+
455
+ //#endregion
456
+ //#region src/check-usage.ts
457
+ const checkUsage = defineCommand({
458
+ meta: {
459
+ name: "check-usage",
460
+ description: "Show current GitHub Copilot usage/quota information"
461
+ },
462
+ async run() {
463
+ await ensurePaths();
464
+ await setupGitHubToken();
465
+ try {
466
+ const usage = await getCopilotUsage();
467
+ const premium = usage.quota_snapshots.premium_interactions;
468
+ const premiumTotal = premium.entitlement;
469
+ const premiumUsed = premiumTotal - premium.remaining;
470
+ const premiumPercentUsed = premiumTotal > 0 ? premiumUsed / premiumTotal * 100 : 0;
471
+ const premiumPercentRemaining = premium.percent_remaining;
472
+ function summarizeQuota(name, snap) {
473
+ if (!snap) return `${name}: N/A`;
474
+ const total = snap.entitlement;
475
+ const used = total - snap.remaining;
476
+ const percentUsed = total > 0 ? used / total * 100 : 0;
477
+ const percentRemaining = snap.percent_remaining;
478
+ return `${name}: ${used}/${total} used (${percentUsed.toFixed(1)}% used, ${percentRemaining.toFixed(1)}% remaining)`;
479
+ }
480
+ const premiumLine = `Premium: ${premiumUsed}/${premiumTotal} used (${premiumPercentUsed.toFixed(1)}% used, ${premiumPercentRemaining.toFixed(1)}% remaining)`;
481
+ const chatLine = summarizeQuota("Chat", usage.quota_snapshots.chat);
482
+ const completionsLine = summarizeQuota("Completions", usage.quota_snapshots.completions);
483
+ consola.box(`Copilot Usage (plan: ${usage.copilot_plan})\nQuota resets: ${usage.quota_reset_date}\n\nQuotas:\n ${premiumLine}\n ${chatLine}\n ${completionsLine}`);
484
+ } catch (err) {
485
+ consola.error("Failed to fetch Copilot usage:", err);
486
+ process.exit(1);
487
+ }
488
+ }
489
+ });
490
+
491
+ //#endregion
492
+ //#region src/debug.ts
493
+ async function getPackageVersion() {
494
+ try {
495
+ const packageJsonPath = new URL("../package.json", import.meta.url).pathname;
496
+ return JSON.parse(await fs.readFile(packageJsonPath)).version;
497
+ } catch {
498
+ return "unknown";
499
+ }
500
+ }
501
+ function getRuntimeInfo() {
502
+ const isBun = typeof Bun !== "undefined";
503
+ return {
504
+ name: isBun ? "bun" : "node",
505
+ version: isBun ? Bun.version : process.version.slice(1),
506
+ platform: os.platform(),
507
+ arch: os.arch()
508
+ };
509
+ }
510
+ async function checkTokenExists() {
511
+ try {
512
+ if (!(await fs.stat(PATHS.GITHUB_TOKEN_PATH)).isFile()) return false;
513
+ return (await fs.readFile(PATHS.GITHUB_TOKEN_PATH, "utf8")).trim().length > 0;
514
+ } catch {
515
+ return false;
516
+ }
517
+ }
518
+ async function getDebugInfo() {
519
+ const [version, tokenExists] = await Promise.all([getPackageVersion(), checkTokenExists()]);
520
+ return {
521
+ version,
522
+ runtime: getRuntimeInfo(),
523
+ paths: {
524
+ APP_DIR: PATHS.APP_DIR,
525
+ GITHUB_TOKEN_PATH: PATHS.GITHUB_TOKEN_PATH
526
+ },
527
+ tokenExists
528
+ };
529
+ }
530
+ function printDebugInfoPlain(info) {
531
+ consola.info(`copilot-api debug
532
+
533
+ Version: ${info.version}
534
+ Runtime: ${info.runtime.name} ${info.runtime.version} (${info.runtime.platform} ${info.runtime.arch})
535
+
536
+ Paths:
537
+ - APP_DIR: ${info.paths.APP_DIR}
538
+ - GITHUB_TOKEN_PATH: ${info.paths.GITHUB_TOKEN_PATH}
539
+
540
+ Token exists: ${info.tokenExists ? "Yes" : "No"}`);
541
+ }
542
+ function printDebugInfoJson(info) {
543
+ console.log(JSON.stringify(info, null, 2));
544
+ }
545
+ async function runDebug(options) {
546
+ const debugInfo = await getDebugInfo();
547
+ if (options.json) printDebugInfoJson(debugInfo);
548
+ else printDebugInfoPlain(debugInfo);
549
+ }
550
+ const debug = defineCommand({
551
+ meta: {
552
+ name: "debug",
553
+ description: "Print debug information about the application"
554
+ },
555
+ args: { json: {
556
+ type: "boolean",
557
+ default: false,
558
+ description: "Output debug information as JSON"
559
+ } },
560
+ run({ args }) {
561
+ return runDebug({ json: args.json });
562
+ }
563
+ });
564
+
565
+ //#endregion
566
+ //#region src/lib/proxy.ts
567
+ function initProxyFromEnv() {
568
+ if (typeof Bun !== "undefined") return;
569
+ try {
570
+ const direct = new Agent();
571
+ const proxies = /* @__PURE__ */ new Map();
572
+ setGlobalDispatcher({
573
+ dispatch(options, handler) {
574
+ try {
575
+ const origin = typeof options.origin === "string" ? new URL(options.origin) : options.origin;
576
+ const raw = getProxyForUrl(origin.toString());
577
+ const proxyUrl = raw && raw.length > 0 ? raw : void 0;
578
+ if (!proxyUrl) {
579
+ consola.debug(`HTTP proxy bypass: ${origin.hostname}`);
580
+ return direct.dispatch(options, handler);
581
+ }
582
+ let agent = proxies.get(proxyUrl);
583
+ if (!agent) {
584
+ agent = new ProxyAgent(proxyUrl);
585
+ proxies.set(proxyUrl, agent);
586
+ }
587
+ let label = proxyUrl;
588
+ try {
589
+ const u = new URL(proxyUrl);
590
+ label = `${u.protocol}//${u.host}`;
591
+ } catch {}
592
+ consola.debug(`HTTP proxy route: ${origin.hostname} via ${label}`);
593
+ return agent.dispatch(options, handler);
594
+ } catch {
595
+ return direct.dispatch(options, handler);
596
+ }
597
+ },
598
+ close() {
599
+ return direct.close();
600
+ },
601
+ destroy() {
602
+ return direct.destroy();
603
+ }
604
+ });
605
+ consola.debug("HTTP proxy configured from environment (per-URL)");
606
+ } catch (err) {
607
+ consola.debug("Proxy setup skipped:", err);
608
+ }
609
+ }
610
+
611
+ //#endregion
612
+ //#region src/lib/shell.ts
613
+ function getShell() {
614
+ const { platform, ppid, env } = process$1;
615
+ if (platform === "win32") {
616
+ try {
617
+ const command = `wmic process get ParentProcessId,Name | findstr "${ppid}"`;
618
+ if (execSync(command, { stdio: "pipe" }).toString().toLowerCase().includes("powershell.exe")) return "powershell";
619
+ } catch {
620
+ return "cmd";
621
+ }
622
+ return "cmd";
623
+ } else {
624
+ const shellPath = env.SHELL;
625
+ if (shellPath) {
626
+ if (shellPath.endsWith("zsh")) return "zsh";
627
+ if (shellPath.endsWith("fish")) return "fish";
628
+ if (shellPath.endsWith("bash")) return "bash";
629
+ }
630
+ return "sh";
631
+ }
632
+ }
633
+ /**
634
+ * Generates a copy-pasteable script to set multiple environment variables
635
+ * and run a subsequent command.
636
+ * @param {EnvVars} envVars - An object of environment variables to set.
637
+ * @param {string} commandToRun - The command to run after setting the variables.
638
+ * @returns {string} The formatted script string.
639
+ */
640
+ function generateEnvScript(envVars, commandToRun = "") {
641
+ const shell = getShell();
642
+ const filteredEnvVars = Object.entries(envVars).filter(([, value]) => value !== void 0);
643
+ let commandBlock;
644
+ switch (shell) {
645
+ case "powershell":
646
+ commandBlock = filteredEnvVars.map(([key, value]) => `$env:${key} = ${value}`).join("; ");
647
+ break;
648
+ case "cmd":
649
+ commandBlock = filteredEnvVars.map(([key, value]) => `set ${key}=${value}`).join(" & ");
650
+ break;
651
+ case "fish":
652
+ commandBlock = filteredEnvVars.map(([key, value]) => `set -gx ${key} ${value}`).join("; ");
653
+ break;
654
+ default: {
655
+ const assignments = filteredEnvVars.map(([key, value]) => `${key}=${value}`).join(" ");
656
+ commandBlock = filteredEnvVars.length > 0 ? `export ${assignments}` : "";
657
+ break;
658
+ }
659
+ }
660
+ if (commandBlock && commandToRun) return `${commandBlock}${shell === "cmd" ? " & " : " && "}${commandToRun}`;
661
+ return commandBlock || commandToRun;
662
+ }
663
+
664
+ //#endregion
665
+ //#region src/lib/approval.ts
666
+ const awaitApproval = async () => {
667
+ if (!await consola.prompt(`Accept incoming request?`, { type: "confirm" })) throw new HTTPError("Request rejected", Response.json({ message: "Request rejected" }, { status: 403 }));
668
+ };
669
+
670
+ //#endregion
671
+ //#region src/lib/rate-limit.ts
672
+ async function checkRateLimit(state$1) {
673
+ if (state$1.rateLimitSeconds === void 0) return;
674
+ const now = Date.now();
675
+ if (!state$1.lastRequestTimestamp) {
676
+ state$1.lastRequestTimestamp = now;
677
+ return;
678
+ }
679
+ const elapsedSeconds = (now - state$1.lastRequestTimestamp) / 1e3;
680
+ if (elapsedSeconds > state$1.rateLimitSeconds) {
681
+ state$1.lastRequestTimestamp = now;
682
+ return;
683
+ }
684
+ const waitTimeSeconds = Math.ceil(state$1.rateLimitSeconds - elapsedSeconds);
685
+ if (!state$1.rateLimitWait) {
686
+ consola.warn(`Rate limit exceeded. Need to wait ${waitTimeSeconds} more seconds.`);
687
+ throw new HTTPError("Rate limit exceeded", Response.json({ message: "Rate limit exceeded" }, { status: 429 }));
688
+ }
689
+ const waitTimeMs = waitTimeSeconds * 1e3;
690
+ consola.warn(`Rate limit reached. Waiting ${waitTimeSeconds} seconds before proceeding...`);
691
+ await sleep(waitTimeMs);
692
+ state$1.lastRequestTimestamp = now;
693
+ consola.info("Rate limit wait completed, proceeding with request");
694
+ }
695
+
696
+ //#endregion
697
+ //#region src/lib/tokenizer.ts
698
+ const ENCODING_MAP = {
699
+ o200k_base: () => import("gpt-tokenizer/encoding/o200k_base"),
700
+ cl100k_base: () => import("gpt-tokenizer/encoding/cl100k_base"),
701
+ p50k_base: () => import("gpt-tokenizer/encoding/p50k_base"),
702
+ p50k_edit: () => import("gpt-tokenizer/encoding/p50k_edit"),
703
+ r50k_base: () => import("gpt-tokenizer/encoding/r50k_base")
704
+ };
705
+ const encodingCache = /* @__PURE__ */ new Map();
706
+ /**
707
+ * Calculate tokens for tool calls
708
+ */
709
+ const calculateToolCallsTokens = (toolCalls, encoder, constants) => {
710
+ let tokens = 0;
711
+ for (const toolCall of toolCalls) {
712
+ tokens += constants.funcInit;
713
+ tokens += encoder.encode(JSON.stringify(toolCall)).length;
714
+ }
715
+ tokens += constants.funcEnd;
716
+ return tokens;
717
+ };
718
+ /**
719
+ * Calculate tokens for content parts
720
+ */
721
+ const calculateContentPartsTokens = (contentParts, encoder) => {
722
+ let tokens = 0;
723
+ for (const part of contentParts) if (part.type === "image_url") tokens += encoder.encode(part.image_url.url).length + 85;
724
+ else if (part.text) tokens += encoder.encode(part.text).length;
725
+ return tokens;
726
+ };
727
+ /**
728
+ * Calculate tokens for a single message
729
+ */
730
+ const calculateMessageTokens = (message, encoder, constants) => {
731
+ const tokensPerMessage = 3;
732
+ const tokensPerName = 1;
733
+ let tokens = tokensPerMessage;
734
+ for (const [key, value] of Object.entries(message)) {
735
+ if (typeof value === "string") tokens += encoder.encode(value).length;
736
+ if (key === "name") tokens += tokensPerName;
737
+ if (key === "tool_calls") tokens += calculateToolCallsTokens(value, encoder, constants);
738
+ if (key === "content" && Array.isArray(value)) tokens += calculateContentPartsTokens(value, encoder);
739
+ }
740
+ return tokens;
741
+ };
742
+ /**
743
+ * Calculate tokens using custom algorithm
744
+ */
745
+ const calculateTokens = (messages, encoder, constants) => {
746
+ if (messages.length === 0) return 0;
747
+ let numTokens = 0;
748
+ for (const message of messages) numTokens += calculateMessageTokens(message, encoder, constants);
749
+ numTokens += 3;
750
+ return numTokens;
751
+ };
752
+ /**
753
+ * Get the corresponding encoder module based on encoding type
754
+ */
755
+ const getEncodeChatFunction = async (encoding) => {
756
+ if (encodingCache.has(encoding)) {
757
+ const cached = encodingCache.get(encoding);
758
+ if (cached) return cached;
759
+ }
760
+ const supportedEncoding = encoding;
761
+ if (!(supportedEncoding in ENCODING_MAP)) {
762
+ const fallbackModule = await ENCODING_MAP.o200k_base();
763
+ encodingCache.set(encoding, fallbackModule);
764
+ return fallbackModule;
765
+ }
766
+ const encodingModule = await ENCODING_MAP[supportedEncoding]();
767
+ encodingCache.set(encoding, encodingModule);
768
+ return encodingModule;
769
+ };
770
+ /**
771
+ * Get tokenizer type from model information
772
+ */
773
+ const getTokenizerFromModel = (model) => {
774
+ return model.capabilities.tokenizer || "o200k_base";
775
+ };
776
+ /**
777
+ * Get model-specific constants for token calculation
778
+ */
779
+ const getModelConstants = (model) => {
780
+ return model.id === "gpt-3.5-turbo" || model.id === "gpt-4" ? {
781
+ funcInit: 10,
782
+ propInit: 3,
783
+ propKey: 3,
784
+ enumInit: -3,
785
+ enumItem: 3,
786
+ funcEnd: 12
787
+ } : {
788
+ funcInit: 7,
789
+ propInit: 3,
790
+ propKey: 3,
791
+ enumInit: -3,
792
+ enumItem: 3,
793
+ funcEnd: 12
794
+ };
795
+ };
796
+ /**
797
+ * Calculate tokens for a single parameter
798
+ */
799
+ const calculateParameterTokens = (key, prop, context) => {
800
+ const { encoder, constants } = context;
801
+ let tokens = constants.propKey;
802
+ if (typeof prop !== "object" || prop === null) return tokens;
803
+ const param = prop;
804
+ const paramName = key;
805
+ const paramType = param.type || "string";
806
+ let paramDesc = param.description || "";
807
+ if (param.enum && Array.isArray(param.enum)) {
808
+ tokens += constants.enumInit;
809
+ for (const item of param.enum) {
810
+ tokens += constants.enumItem;
811
+ tokens += encoder.encode(String(item)).length;
812
+ }
813
+ }
814
+ if (paramDesc.endsWith(".")) paramDesc = paramDesc.slice(0, -1);
815
+ const line = `${paramName}:${paramType}:${paramDesc}`;
816
+ tokens += encoder.encode(line).length;
817
+ const excludedKeys = new Set([
818
+ "type",
819
+ "description",
820
+ "enum"
821
+ ]);
822
+ for (const propertyName of Object.keys(param)) if (!excludedKeys.has(propertyName)) {
823
+ const propertyValue = param[propertyName];
824
+ const propertyText = typeof propertyValue === "string" ? propertyValue : JSON.stringify(propertyValue);
825
+ tokens += encoder.encode(`${propertyName}:${propertyText}`).length;
826
+ }
827
+ return tokens;
828
+ };
829
+ /**
830
+ * Calculate tokens for function parameters
831
+ */
832
+ const calculateParametersTokens = (parameters, encoder, constants) => {
833
+ if (!parameters || typeof parameters !== "object") return 0;
834
+ const params = parameters;
835
+ let tokens = 0;
836
+ for (const [key, value] of Object.entries(params)) if (key === "properties") {
837
+ const properties = value;
838
+ if (Object.keys(properties).length > 0) {
839
+ tokens += constants.propInit;
840
+ for (const propKey of Object.keys(properties)) tokens += calculateParameterTokens(propKey, properties[propKey], {
841
+ encoder,
842
+ constants
843
+ });
844
+ }
845
+ } else {
846
+ const paramText = typeof value === "string" ? value : JSON.stringify(value);
847
+ tokens += encoder.encode(`${key}:${paramText}`).length;
848
+ }
849
+ return tokens;
850
+ };
851
+ /**
852
+ * Calculate tokens for a single tool
853
+ */
854
+ const calculateToolTokens = (tool, encoder, constants) => {
855
+ let tokens = constants.funcInit;
856
+ const func = tool.function;
857
+ const fName = func.name;
858
+ let fDesc = func.description || "";
859
+ if (fDesc.endsWith(".")) fDesc = fDesc.slice(0, -1);
860
+ const line = fName + ":" + fDesc;
861
+ tokens += encoder.encode(line).length;
862
+ if (typeof func.parameters === "object" && func.parameters !== null) tokens += calculateParametersTokens(func.parameters, encoder, constants);
863
+ return tokens;
864
+ };
865
+ /**
866
+ * Calculate token count for tools based on model
867
+ */
868
+ const numTokensForTools = (tools, encoder, constants) => {
869
+ let funcTokenCount = 0;
870
+ for (const tool of tools) funcTokenCount += calculateToolTokens(tool, encoder, constants);
871
+ funcTokenCount += constants.funcEnd;
872
+ return funcTokenCount;
873
+ };
874
+ /**
875
+ * Calculate the token count of messages, supporting multiple GPT encoders
876
+ */
877
+ const getTokenCount = async (payload, model) => {
878
+ const tokenizer = getTokenizerFromModel(model);
879
+ const encoder = await getEncodeChatFunction(tokenizer);
880
+ const simplifiedMessages = payload.messages;
881
+ const inputMessages = simplifiedMessages.filter((msg) => msg.role !== "assistant");
882
+ const outputMessages = simplifiedMessages.filter((msg) => msg.role === "assistant");
883
+ const constants = getModelConstants(model);
884
+ let inputTokens = calculateTokens(inputMessages, encoder, constants);
885
+ if (payload.tools && payload.tools.length > 0) inputTokens += numTokensForTools(payload.tools, encoder, constants);
886
+ const outputTokens = calculateTokens(outputMessages, encoder, constants);
887
+ return {
888
+ input: inputTokens,
889
+ output: outputTokens
890
+ };
891
+ };
892
+
893
+ //#endregion
894
+ //#region src/services/copilot/create-chat-completions.ts
895
+ const createChatCompletions = async (payload) => {
896
+ if (!state.copilotToken) throw new Error("Copilot token not found");
897
+ const enableVision = payload.messages.some((x) => typeof x.content !== "string" && x.content?.some((x$1) => x$1.type === "image_url"));
898
+ const isAgentCall = payload.messages.some((msg) => ["assistant", "tool"].includes(msg.role));
899
+ const headers = {
900
+ ...copilotHeaders(state, enableVision),
901
+ "X-Initiator": isAgentCall ? "agent" : "user"
902
+ };
903
+ const response = await fetch(`${copilotBaseUrl(state)}/chat/completions`, {
904
+ method: "POST",
905
+ headers,
906
+ body: JSON.stringify(payload)
907
+ });
908
+ if (!response.ok) {
909
+ consola.error("Failed to create chat completions", response);
910
+ throw new HTTPError("Failed to create chat completions", response);
911
+ }
912
+ if (payload.stream) return events(response);
913
+ return await response.json();
914
+ };
915
+
916
+ //#endregion
917
+ //#region src/routes/chat-completions/handler.ts
918
+ async function handleCompletion$1(c) {
919
+ await checkRateLimit(state);
920
+ let payload = await c.req.json();
921
+ consola.debug("Request payload:", JSON.stringify(payload).slice(-400));
922
+ if (isAzureOpenAIModel(payload.model)) {
923
+ if (!state.azureOpenAIConfig) return c.json({ error: "Azure OpenAI not configured" }, 500);
924
+ consola.info(`Routing to Azure OpenAI -> ${payload.model}`);
925
+ if (state.manualApprove) await awaitApproval();
926
+ const response$1 = await createAzureOpenAIChatCompletions(state.azureOpenAIConfig, payload);
927
+ if (isNonStreaming$1(response$1)) {
928
+ consola.debug("Non-streaming response:", JSON.stringify(response$1));
929
+ return c.json(response$1);
930
+ }
931
+ consola.debug("Streaming response");
932
+ return streamSSE(c, async (stream) => {
933
+ for await (const chunk of response$1) {
934
+ consola.debug("Streaming chunk:", JSON.stringify(chunk));
935
+ await stream.writeSSE(chunk);
936
+ }
937
+ });
938
+ }
939
+ consola.info(`Routing to Copilot -> ${payload.model}`);
940
+ const selectedModel = state.models?.data.find((model) => model.id === payload.model);
941
+ try {
942
+ if (selectedModel) {
943
+ const tokenCount = await getTokenCount(payload, selectedModel);
944
+ consola.info("Current token count:", tokenCount);
945
+ } else consola.warn("No model selected, skipping token count calculation");
946
+ } catch (error) {
947
+ consola.warn("Failed to calculate token count:", error);
948
+ }
949
+ if (state.manualApprove) await awaitApproval();
950
+ if (isNullish(payload.max_tokens)) {
951
+ payload = {
952
+ ...payload,
953
+ max_tokens: selectedModel?.capabilities.limits.max_output_tokens
954
+ };
955
+ consola.debug("Set max_tokens to:", JSON.stringify(payload.max_tokens));
956
+ }
957
+ const response = await createChatCompletions(payload);
958
+ if (isNonStreaming$1(response)) {
959
+ consola.debug("Non-streaming response:", JSON.stringify(response));
960
+ return c.json(response);
961
+ }
962
+ consola.debug("Streaming response");
963
+ return streamSSE(c, async (stream) => {
964
+ for await (const chunk of response) {
965
+ consola.debug("Streaming chunk:", JSON.stringify(chunk));
966
+ await stream.writeSSE(chunk);
967
+ }
968
+ });
969
+ }
970
+ const isNonStreaming$1 = (response) => Object.hasOwn(response, "choices");
971
+
972
+ //#endregion
973
+ //#region src/routes/chat-completions/route.ts
974
+ const completionRoutes = new Hono();
975
+ completionRoutes.post("/", async (c) => {
976
+ try {
977
+ return await handleCompletion$1(c);
978
+ } catch (error) {
979
+ return await forwardError(c, error);
980
+ }
981
+ });
982
+
983
+ //#endregion
984
+ //#region src/services/copilot/create-embeddings.ts
985
+ const createEmbeddings = async (payload) => {
986
+ if (!state.copilotToken) throw new Error("Copilot token not found");
987
+ const response = await fetch(`${copilotBaseUrl(state)}/embeddings`, {
988
+ method: "POST",
989
+ headers: copilotHeaders(state),
990
+ body: JSON.stringify(payload)
991
+ });
992
+ if (!response.ok) throw new HTTPError("Failed to create embeddings", response);
993
+ return await response.json();
994
+ };
995
+
996
+ //#endregion
997
+ //#region src/routes/embeddings/route.ts
998
+ const embeddingRoutes = new Hono();
999
+ embeddingRoutes.post("/", async (c) => {
1000
+ try {
1001
+ const paylod = await c.req.json();
1002
+ const response = await createEmbeddings(paylod);
1003
+ return c.json(response);
1004
+ } catch (error) {
1005
+ return await forwardError(c, error);
1006
+ }
1007
+ });
1008
+
1009
+ //#endregion
1010
+ //#region src/routes/messages/utils.ts
1011
+ function mapOpenAIStopReasonToAnthropic(finishReason) {
1012
+ if (finishReason === null) return null;
1013
+ return {
1014
+ stop: "end_turn",
1015
+ length: "max_tokens",
1016
+ tool_calls: "tool_use",
1017
+ content_filter: "end_turn"
1018
+ }[finishReason];
1019
+ }
1020
+
1021
+ //#endregion
1022
+ //#region src/routes/messages/non-stream-translation.ts
1023
+ function translateToOpenAI(payload) {
1024
+ return {
1025
+ model: translateModelName(payload.model),
1026
+ messages: translateAnthropicMessagesToOpenAI(payload.messages, payload.system),
1027
+ max_tokens: payload.max_tokens,
1028
+ stop: payload.stop_sequences,
1029
+ stream: payload.stream,
1030
+ temperature: payload.temperature,
1031
+ top_p: payload.top_p,
1032
+ user: payload.metadata?.user_id,
1033
+ tools: translateAnthropicToolsToOpenAI(payload.tools),
1034
+ tool_choice: translateAnthropicToolChoiceToOpenAI(payload.tool_choice)
1035
+ };
1036
+ }
1037
+ function translateModelName(model) {
1038
+ if (model.startsWith("claude-sonnet-4-")) return model.replace(/^claude-sonnet-4-.*/, "claude-sonnet-4");
1039
+ else if (model.startsWith("claude-opus-")) return model.replace(/^claude-opus-4-.*/, "claude-opus-4");
1040
+ return model;
1041
+ }
1042
+ function translateAnthropicMessagesToOpenAI(anthropicMessages, system) {
1043
+ const systemMessages = handleSystemPrompt(system);
1044
+ const otherMessages = anthropicMessages.flatMap((message) => message.role === "user" ? handleUserMessage(message) : handleAssistantMessage(message));
1045
+ return [...systemMessages, ...otherMessages];
1046
+ }
1047
+ function handleSystemPrompt(system) {
1048
+ if (!system) return [];
1049
+ if (typeof system === "string") return [{
1050
+ role: "system",
1051
+ content: system
1052
+ }];
1053
+ else return [{
1054
+ role: "system",
1055
+ content: system.map((block) => block.text).join("\n\n")
1056
+ }];
1057
+ }
1058
+ function handleUserMessage(message) {
1059
+ const newMessages = [];
1060
+ if (Array.isArray(message.content)) {
1061
+ const toolResultBlocks = message.content.filter((block) => block.type === "tool_result");
1062
+ const otherBlocks = message.content.filter((block) => block.type !== "tool_result");
1063
+ for (const block of toolResultBlocks) newMessages.push({
1064
+ role: "tool",
1065
+ tool_call_id: block.tool_use_id,
1066
+ content: mapContent(block.content)
1067
+ });
1068
+ if (otherBlocks.length > 0) newMessages.push({
1069
+ role: "user",
1070
+ content: mapContent(otherBlocks)
1071
+ });
1072
+ } else newMessages.push({
1073
+ role: "user",
1074
+ content: mapContent(message.content)
1075
+ });
1076
+ return newMessages;
1077
+ }
1078
+ function handleAssistantMessage(message) {
1079
+ if (!Array.isArray(message.content)) return [{
1080
+ role: "assistant",
1081
+ content: mapContent(message.content)
1082
+ }];
1083
+ const toolUseBlocks = message.content.filter((block) => block.type === "tool_use");
1084
+ const textBlocks = message.content.filter((block) => block.type === "text");
1085
+ const thinkingBlocks = message.content.filter((block) => block.type === "thinking");
1086
+ const allTextContent = [...textBlocks.map((b) => b.text), ...thinkingBlocks.map((b) => b.thinking)].join("\n\n");
1087
+ return toolUseBlocks.length > 0 ? [{
1088
+ role: "assistant",
1089
+ content: allTextContent || null,
1090
+ tool_calls: toolUseBlocks.map((toolUse) => ({
1091
+ id: toolUse.id,
1092
+ type: "function",
1093
+ function: {
1094
+ name: toolUse.name,
1095
+ arguments: JSON.stringify(toolUse.input)
1096
+ }
1097
+ }))
1098
+ }] : [{
1099
+ role: "assistant",
1100
+ content: mapContent(message.content)
1101
+ }];
1102
+ }
1103
+ function mapContent(content) {
1104
+ if (typeof content === "string") return content;
1105
+ if (!Array.isArray(content)) return null;
1106
+ if (!content.some((block) => block.type === "image")) return content.filter((block) => block.type === "text" || block.type === "thinking").map((block) => block.type === "text" ? block.text : block.thinking).join("\n\n");
1107
+ const contentParts = [];
1108
+ for (const block of content) switch (block.type) {
1109
+ case "text":
1110
+ contentParts.push({
1111
+ type: "text",
1112
+ text: block.text
1113
+ });
1114
+ break;
1115
+ case "thinking":
1116
+ contentParts.push({
1117
+ type: "text",
1118
+ text: block.thinking
1119
+ });
1120
+ break;
1121
+ case "image":
1122
+ contentParts.push({
1123
+ type: "image_url",
1124
+ image_url: { url: `data:${block.source.media_type};base64,${block.source.data}` }
1125
+ });
1126
+ break;
1127
+ }
1128
+ return contentParts;
1129
+ }
1130
+ function translateAnthropicToolsToOpenAI(anthropicTools) {
1131
+ if (!anthropicTools) return;
1132
+ return anthropicTools.map((tool) => ({
1133
+ type: "function",
1134
+ function: {
1135
+ name: tool.name,
1136
+ description: tool.description,
1137
+ parameters: tool.input_schema
1138
+ }
1139
+ }));
1140
+ }
1141
+ function translateAnthropicToolChoiceToOpenAI(anthropicToolChoice) {
1142
+ if (!anthropicToolChoice) return;
1143
+ switch (anthropicToolChoice.type) {
1144
+ case "auto": return "auto";
1145
+ case "any": return "required";
1146
+ case "tool":
1147
+ if (anthropicToolChoice.name) return {
1148
+ type: "function",
1149
+ function: { name: anthropicToolChoice.name }
1150
+ };
1151
+ return;
1152
+ case "none": return "none";
1153
+ default: return;
1154
+ }
1155
+ }
1156
+ function translateToAnthropic(response) {
1157
+ const allTextBlocks = [];
1158
+ const allToolUseBlocks = [];
1159
+ let stopReason = null;
1160
+ stopReason = response.choices[0]?.finish_reason ?? stopReason;
1161
+ for (const choice of response.choices) {
1162
+ const textBlocks = getAnthropicTextBlocks(choice.message.content);
1163
+ const toolUseBlocks = getAnthropicToolUseBlocks(choice.message.tool_calls);
1164
+ allTextBlocks.push(...textBlocks);
1165
+ allToolUseBlocks.push(...toolUseBlocks);
1166
+ if (choice.finish_reason === "tool_calls" || stopReason === "stop") stopReason = choice.finish_reason;
1167
+ }
1168
+ return {
1169
+ id: response.id,
1170
+ type: "message",
1171
+ role: "assistant",
1172
+ model: response.model,
1173
+ content: [...allTextBlocks, ...allToolUseBlocks],
1174
+ stop_reason: mapOpenAIStopReasonToAnthropic(stopReason),
1175
+ stop_sequence: null,
1176
+ usage: {
1177
+ input_tokens: (response.usage?.prompt_tokens ?? 0) - (response.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1178
+ output_tokens: response.usage?.completion_tokens ?? 0,
1179
+ ...response.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: response.usage.prompt_tokens_details.cached_tokens }
1180
+ }
1181
+ };
1182
+ }
1183
+ function getAnthropicTextBlocks(messageContent) {
1184
+ if (typeof messageContent === "string") return [{
1185
+ type: "text",
1186
+ text: messageContent
1187
+ }];
1188
+ if (Array.isArray(messageContent)) return messageContent.filter((part) => part.type === "text").map((part) => ({
1189
+ type: "text",
1190
+ text: part.text
1191
+ }));
1192
+ return [];
1193
+ }
1194
+ function getAnthropicToolUseBlocks(toolCalls) {
1195
+ if (!toolCalls) return [];
1196
+ return toolCalls.map((toolCall) => ({
1197
+ type: "tool_use",
1198
+ id: toolCall.id,
1199
+ name: toolCall.function.name,
1200
+ input: JSON.parse(toolCall.function.arguments)
1201
+ }));
1202
+ }
1203
+
1204
+ //#endregion
1205
+ //#region src/routes/messages/count-tokens-handler.ts
1206
+ /**
1207
+ * Handles token counting for Anthropic messages
1208
+ */
1209
+ async function handleCountTokens(c) {
1210
+ try {
1211
+ const anthropicBeta = c.req.header("anthropic-beta");
1212
+ const anthropicPayload = await c.req.json();
1213
+ const openAIPayload = translateToOpenAI(anthropicPayload);
1214
+ const selectedModel = state.models?.data.find((model) => model.id === anthropicPayload.model);
1215
+ if (!selectedModel) {
1216
+ consola.warn("Model not found, returning default token count");
1217
+ return c.json({ input_tokens: 1 });
1218
+ }
1219
+ const tokenCount = await getTokenCount(openAIPayload, selectedModel);
1220
+ if (anthropicPayload.tools && anthropicPayload.tools.length > 0) {
1221
+ let mcpToolExist = false;
1222
+ if (anthropicBeta?.startsWith("claude-code")) mcpToolExist = anthropicPayload.tools.some((tool) => tool.name.startsWith("mcp__"));
1223
+ if (!mcpToolExist) {
1224
+ if (anthropicPayload.model.startsWith("claude")) tokenCount.input = tokenCount.input + 346;
1225
+ else if (anthropicPayload.model.startsWith("grok")) tokenCount.input = tokenCount.input + 480;
1226
+ }
1227
+ }
1228
+ let finalTokenCount = tokenCount.input + tokenCount.output;
1229
+ if (anthropicPayload.model.startsWith("claude")) finalTokenCount = Math.round(finalTokenCount * 1.15);
1230
+ else if (anthropicPayload.model.startsWith("grok")) finalTokenCount = Math.round(finalTokenCount * 1.03);
1231
+ consola.info("Token count:", finalTokenCount);
1232
+ return c.json({ input_tokens: finalTokenCount });
1233
+ } catch (error) {
1234
+ consola.error("Error counting tokens:", error);
1235
+ return c.json({ input_tokens: 1 });
1236
+ }
1237
+ }
1238
+
1239
+ //#endregion
1240
+ //#region src/routes/messages/stream-translation.ts
1241
+ function isToolBlockOpen(state$1) {
1242
+ if (!state$1.contentBlockOpen) return false;
1243
+ return Object.values(state$1.toolCalls).some((tc) => tc.anthropicBlockIndex === state$1.contentBlockIndex);
1244
+ }
1245
+ function translateChunkToAnthropicEvents(chunk, state$1) {
1246
+ const events$1 = [];
1247
+ if (chunk.choices.length === 0) return events$1;
1248
+ const choice = chunk.choices[0];
1249
+ const { delta } = choice;
1250
+ if (!state$1.messageStartSent) {
1251
+ events$1.push({
1252
+ type: "message_start",
1253
+ message: {
1254
+ id: chunk.id,
1255
+ type: "message",
1256
+ role: "assistant",
1257
+ content: [],
1258
+ model: chunk.model,
1259
+ stop_reason: null,
1260
+ stop_sequence: null,
1261
+ usage: {
1262
+ input_tokens: (chunk.usage?.prompt_tokens ?? 0) - (chunk.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1263
+ output_tokens: 0,
1264
+ ...chunk.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: chunk.usage.prompt_tokens_details.cached_tokens }
1265
+ }
1266
+ }
1267
+ });
1268
+ state$1.messageStartSent = true;
1269
+ }
1270
+ if (delta.content) {
1271
+ if (isToolBlockOpen(state$1)) {
1272
+ events$1.push({
1273
+ type: "content_block_stop",
1274
+ index: state$1.contentBlockIndex
1275
+ });
1276
+ state$1.contentBlockIndex++;
1277
+ state$1.contentBlockOpen = false;
1278
+ }
1279
+ if (!state$1.contentBlockOpen) {
1280
+ events$1.push({
1281
+ type: "content_block_start",
1282
+ index: state$1.contentBlockIndex,
1283
+ content_block: {
1284
+ type: "text",
1285
+ text: ""
1286
+ }
1287
+ });
1288
+ state$1.contentBlockOpen = true;
1289
+ }
1290
+ events$1.push({
1291
+ type: "content_block_delta",
1292
+ index: state$1.contentBlockIndex,
1293
+ delta: {
1294
+ type: "text_delta",
1295
+ text: delta.content
1296
+ }
1297
+ });
1298
+ }
1299
+ if (delta.tool_calls) for (const toolCall of delta.tool_calls) {
1300
+ if (toolCall.id && toolCall.function?.name) {
1301
+ if (state$1.contentBlockOpen) {
1302
+ events$1.push({
1303
+ type: "content_block_stop",
1304
+ index: state$1.contentBlockIndex
1305
+ });
1306
+ state$1.contentBlockIndex++;
1307
+ state$1.contentBlockOpen = false;
1308
+ }
1309
+ const anthropicBlockIndex = state$1.contentBlockIndex;
1310
+ state$1.toolCalls[toolCall.index] = {
1311
+ id: toolCall.id,
1312
+ name: toolCall.function.name,
1313
+ anthropicBlockIndex
1314
+ };
1315
+ events$1.push({
1316
+ type: "content_block_start",
1317
+ index: anthropicBlockIndex,
1318
+ content_block: {
1319
+ type: "tool_use",
1320
+ id: toolCall.id,
1321
+ name: toolCall.function.name,
1322
+ input: {}
1323
+ }
1324
+ });
1325
+ state$1.contentBlockOpen = true;
1326
+ }
1327
+ if (toolCall.function?.arguments) {
1328
+ const toolCallInfo = state$1.toolCalls[toolCall.index];
1329
+ if (toolCallInfo) events$1.push({
1330
+ type: "content_block_delta",
1331
+ index: toolCallInfo.anthropicBlockIndex,
1332
+ delta: {
1333
+ type: "input_json_delta",
1334
+ partial_json: toolCall.function.arguments
1335
+ }
1336
+ });
1337
+ }
1338
+ }
1339
+ if (choice.finish_reason) {
1340
+ if (state$1.contentBlockOpen) {
1341
+ events$1.push({
1342
+ type: "content_block_stop",
1343
+ index: state$1.contentBlockIndex
1344
+ });
1345
+ state$1.contentBlockOpen = false;
1346
+ }
1347
+ events$1.push({
1348
+ type: "message_delta",
1349
+ delta: {
1350
+ stop_reason: mapOpenAIStopReasonToAnthropic(choice.finish_reason),
1351
+ stop_sequence: null
1352
+ },
1353
+ usage: {
1354
+ input_tokens: (chunk.usage?.prompt_tokens ?? 0) - (chunk.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1355
+ output_tokens: chunk.usage?.completion_tokens ?? 0,
1356
+ ...chunk.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: chunk.usage.prompt_tokens_details.cached_tokens }
1357
+ }
1358
+ }, { type: "message_stop" });
1359
+ }
1360
+ return events$1;
1361
+ }
1362
+
1363
+ //#endregion
1364
+ //#region src/routes/messages/handler.ts
1365
+ async function handleCompletion(c) {
1366
+ await checkRateLimit(state);
1367
+ const anthropicPayload = await c.req.json();
1368
+ consola.debug("Anthropic request payload:", JSON.stringify(anthropicPayload));
1369
+ const openAIPayload = translateToOpenAI(anthropicPayload);
1370
+ consola.debug("Translated OpenAI request payload:", JSON.stringify(openAIPayload));
1371
+ if (state.manualApprove) await awaitApproval();
1372
+ if (isAzureOpenAIModel(openAIPayload.model)) {
1373
+ if (!state.azureOpenAIConfig) return c.json({ error: "Azure OpenAI not configured" }, 500);
1374
+ consola.info(`Routing to Azure OpenAI -> ${openAIPayload.model}`);
1375
+ const response$1 = await createAzureOpenAIChatCompletions(state.azureOpenAIConfig, openAIPayload);
1376
+ if (isNonStreaming(response$1)) {
1377
+ consola.debug("Non-streaming response from Azure OpenAI:", JSON.stringify(response$1).slice(-400));
1378
+ const anthropicResponse = translateToAnthropic(response$1);
1379
+ consola.debug("Translated Anthropic response:", JSON.stringify(anthropicResponse));
1380
+ return c.json(anthropicResponse);
1381
+ }
1382
+ consola.debug("Streaming response from Azure OpenAI");
1383
+ return streamSSE(c, async (stream) => {
1384
+ const streamState = {
1385
+ messageStartSent: false,
1386
+ contentBlockIndex: 0,
1387
+ contentBlockOpen: false,
1388
+ toolCalls: {}
1389
+ };
1390
+ for await (const rawEvent of response$1) {
1391
+ consola.debug("Azure OpenAI raw stream event:", JSON.stringify(rawEvent));
1392
+ if (rawEvent.data === "[DONE]") break;
1393
+ if (!rawEvent.data) continue;
1394
+ const chunk = JSON.parse(rawEvent.data);
1395
+ const events$1 = translateChunkToAnthropicEvents(chunk, streamState);
1396
+ for (const event of events$1) {
1397
+ consola.debug("Translated Anthropic event:", JSON.stringify(event));
1398
+ await stream.writeSSE({
1399
+ event: event.type,
1400
+ data: JSON.stringify(event)
1401
+ });
1402
+ }
1403
+ }
1404
+ });
1405
+ }
1406
+ consola.info(`Routing to Copilot -> ${openAIPayload.model}`);
1407
+ const response = await createChatCompletions(openAIPayload);
1408
+ if (isNonStreaming(response)) {
1409
+ consola.debug("Non-streaming response from Copilot:", JSON.stringify(response).slice(-400));
1410
+ const anthropicResponse = translateToAnthropic(response);
1411
+ consola.debug("Translated Anthropic response:", JSON.stringify(anthropicResponse));
1412
+ return c.json(anthropicResponse);
1413
+ }
1414
+ consola.debug("Streaming response from Copilot");
1415
+ return streamSSE(c, async (stream) => {
1416
+ const streamState = {
1417
+ messageStartSent: false,
1418
+ contentBlockIndex: 0,
1419
+ contentBlockOpen: false,
1420
+ toolCalls: {}
1421
+ };
1422
+ for await (const rawEvent of response) {
1423
+ consola.debug("Copilot raw stream event:", JSON.stringify(rawEvent));
1424
+ if (rawEvent.data === "[DONE]") break;
1425
+ if (!rawEvent.data) continue;
1426
+ const chunk = JSON.parse(rawEvent.data);
1427
+ const events$1 = translateChunkToAnthropicEvents(chunk, streamState);
1428
+ for (const event of events$1) {
1429
+ consola.debug("Translated Anthropic event:", JSON.stringify(event));
1430
+ await stream.writeSSE({
1431
+ event: event.type,
1432
+ data: JSON.stringify(event)
1433
+ });
1434
+ }
1435
+ }
1436
+ });
1437
+ }
1438
+ const isNonStreaming = (response) => Object.hasOwn(response, "choices");
1439
+
1440
+ //#endregion
1441
+ //#region src/routes/messages/route.ts
1442
+ const messageRoutes = new Hono();
1443
+ messageRoutes.post("/", async (c) => {
1444
+ try {
1445
+ return await handleCompletion(c);
1446
+ } catch (error) {
1447
+ return await forwardError(c, error);
1448
+ }
1449
+ });
1450
+ messageRoutes.post("/count_tokens", async (c) => {
1451
+ try {
1452
+ return await handleCountTokens(c);
1453
+ } catch (error) {
1454
+ return await forwardError(c, error);
1455
+ }
1456
+ });
1457
+
1458
+ //#endregion
1459
+ //#region src/routes/models/route.ts
1460
+ const modelRoutes = new Hono();
1461
+ modelRoutes.get("/", async (c) => {
1462
+ try {
1463
+ if (!state.models) await cacheModels();
1464
+ const copilotModels = state.models?.data.map((model) => ({
1465
+ id: model.id,
1466
+ object: "model",
1467
+ type: "model",
1468
+ created: 0,
1469
+ created_at: (/* @__PURE__ */ new Date(0)).toISOString(),
1470
+ owned_by: model.vendor,
1471
+ display_name: model.name
1472
+ })) ?? [];
1473
+ const azureModels = state.azureOpenAIDeployments?.map((deployment) => ({
1474
+ id: deployment.id,
1475
+ object: "model",
1476
+ type: "model",
1477
+ created: deployment.created,
1478
+ created_at: (/* @__PURE__ */ new Date(deployment.created * 1e3)).toISOString(),
1479
+ owned_by: deployment.owned_by,
1480
+ display_name: `${deployment.deploymentName} (${deployment.model})`
1481
+ })) ?? [];
1482
+ const allModels = [...copilotModels, ...azureModels];
1483
+ return c.json({
1484
+ object: "list",
1485
+ data: allModels,
1486
+ has_more: false
1487
+ });
1488
+ } catch (error) {
1489
+ return await forwardError(c, error);
1490
+ }
1491
+ });
1492
+
1493
+ //#endregion
1494
+ //#region src/routes/token/route.ts
1495
+ const tokenRoute = new Hono();
1496
+ tokenRoute.get("/", (c) => {
1497
+ try {
1498
+ return c.json({ token: state.copilotToken });
1499
+ } catch (error) {
1500
+ console.error("Error fetching token:", error);
1501
+ return c.json({
1502
+ error: "Failed to fetch token",
1503
+ token: null
1504
+ }, 500);
1505
+ }
1506
+ });
1507
+
1508
+ //#endregion
1509
+ //#region src/routes/usage/route.ts
1510
+ const usageRoute = new Hono();
1511
+ usageRoute.get("/", async (c) => {
1512
+ try {
1513
+ const usage = await getCopilotUsage();
1514
+ return c.json(usage);
1515
+ } catch (error) {
1516
+ console.error("Error fetching Copilot usage:", error);
1517
+ return c.json({ error: "Failed to fetch Copilot usage" }, 500);
1518
+ }
1519
+ });
1520
+
1521
+ //#endregion
1522
+ //#region src/server.ts
1523
+ const server = new Hono();
1524
+ server.use(logger());
1525
+ server.use(cors());
1526
+ server.get("/", (c) => c.text("Server running"));
1527
+ server.route("/chat/completions", completionRoutes);
1528
+ server.route("/models", modelRoutes);
1529
+ server.route("/embeddings", embeddingRoutes);
1530
+ server.route("/usage", usageRoute);
1531
+ server.route("/token", tokenRoute);
1532
+ server.route("/v1/chat/completions", completionRoutes);
1533
+ server.route("/v1/models", modelRoutes);
1534
+ server.route("/v1/embeddings", embeddingRoutes);
1535
+ server.route("/v1/messages", messageRoutes);
1536
+
1537
+ //#endregion
1538
+ //#region src/start.ts
1539
+ async function runServer(options) {
1540
+ if (options.proxyEnv) initProxyFromEnv();
1541
+ if (options.verbose) {
1542
+ consola.level = 5;
1543
+ consola.info("Verbose logging enabled");
1544
+ }
1545
+ state.accountType = options.accountType;
1546
+ if (options.accountType !== "individual") consola.info(`Using ${options.accountType} plan GitHub account`);
1547
+ state.manualApprove = options.manual;
1548
+ state.rateLimitSeconds = options.rateLimit;
1549
+ state.rateLimitWait = options.rateLimitWait;
1550
+ state.showToken = options.showToken;
1551
+ await ensurePaths();
1552
+ await cacheVSCodeVersion();
1553
+ if (options.githubToken) {
1554
+ state.githubToken = options.githubToken;
1555
+ consola.info("Using provided GitHub token");
1556
+ } else await setupGitHubToken();
1557
+ await setupCopilotToken();
1558
+ await cacheModels();
1559
+ await setupAzureOpenAI();
1560
+ const copilotModelIds = state.models?.data.map((model) => model.id) ?? [];
1561
+ const azureModelIds = state.azureOpenAIDeployments?.map((deployment) => deployment.id) ?? [];
1562
+ const allModelIds = [...copilotModelIds, ...azureModelIds];
1563
+ consola.info(`Available models: \n${allModelIds.map((id) => `- ${id}`).join("\n")}`);
1564
+ const serverUrl = `http://localhost:${options.port}`;
1565
+ if (options.claudeCode) {
1566
+ invariant(state.models, "Models should be loaded by now");
1567
+ const selectedModel = await consola.prompt("Select a model to use with Claude Code", {
1568
+ type: "select",
1569
+ options: allModelIds
1570
+ });
1571
+ const selectedSmallModel = await consola.prompt("Select a small model to use with Claude Code", {
1572
+ type: "select",
1573
+ options: allModelIds
1574
+ });
1575
+ const command = generateEnvScript({
1576
+ ANTHROPIC_BASE_URL: serverUrl,
1577
+ ANTHROPIC_AUTH_TOKEN: "dummy",
1578
+ ANTHROPIC_MODEL: selectedModel,
1579
+ ANTHROPIC_DEFAULT_SONNET_MODEL: selectedModel,
1580
+ ANTHROPIC_SMALL_FAST_MODEL: selectedSmallModel,
1581
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: selectedSmallModel,
1582
+ DISABLE_NON_ESSENTIAL_MODEL_CALLS: "1",
1583
+ CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: "1"
1584
+ }, "claude");
1585
+ try {
1586
+ clipboard.writeSync(command);
1587
+ consola.success("Copied Claude Code command to clipboard!");
1588
+ } catch {
1589
+ consola.warn("Failed to copy to clipboard. Here is the Claude Code command:");
1590
+ consola.log(command);
1591
+ }
1592
+ }
1593
+ consola.box(`🌐 Usage Viewer: https://ericc-ch.github.io/copilot-api?endpoint=${serverUrl}/usage`);
1594
+ serve({
1595
+ fetch: server.fetch,
1596
+ port: options.port
1597
+ });
1598
+ }
1599
+ const start = defineCommand({
1600
+ meta: {
1601
+ name: "start",
1602
+ description: "Start the Copilot API server"
1603
+ },
1604
+ args: {
1605
+ port: {
1606
+ alias: "p",
1607
+ type: "string",
1608
+ default: "4141",
1609
+ description: "Port to listen on"
1610
+ },
1611
+ verbose: {
1612
+ alias: "v",
1613
+ type: "boolean",
1614
+ default: false,
1615
+ description: "Enable verbose logging"
1616
+ },
1617
+ "account-type": {
1618
+ alias: "a",
1619
+ type: "string",
1620
+ default: "individual",
1621
+ description: "Account type to use (individual, business, enterprise)"
1622
+ },
1623
+ manual: {
1624
+ type: "boolean",
1625
+ default: false,
1626
+ description: "Enable manual request approval"
1627
+ },
1628
+ "rate-limit": {
1629
+ alias: "r",
1630
+ type: "string",
1631
+ description: "Rate limit in seconds between requests"
1632
+ },
1633
+ wait: {
1634
+ alias: "w",
1635
+ type: "boolean",
1636
+ default: false,
1637
+ description: "Wait instead of error when rate limit is hit. Has no effect if rate limit is not set"
1638
+ },
1639
+ "github-token": {
1640
+ alias: "g",
1641
+ type: "string",
1642
+ description: "Provide GitHub token directly (must be generated using the `auth` subcommand)"
1643
+ },
1644
+ "claude-code": {
1645
+ alias: "c",
1646
+ type: "boolean",
1647
+ default: false,
1648
+ description: "Generate a command to launch Claude Code with Copilot API config"
1649
+ },
1650
+ "show-token": {
1651
+ type: "boolean",
1652
+ default: false,
1653
+ description: "Show GitHub and Copilot tokens on fetch and refresh"
1654
+ },
1655
+ "proxy-env": {
1656
+ type: "boolean",
1657
+ default: false,
1658
+ description: "Initialize proxy from environment variables"
1659
+ }
1660
+ },
1661
+ run({ args }) {
1662
+ const rateLimitRaw = args["rate-limit"];
1663
+ const rateLimit = rateLimitRaw === void 0 ? void 0 : Number.parseInt(rateLimitRaw, 10);
1664
+ return runServer({
1665
+ port: Number.parseInt(args.port, 10),
1666
+ verbose: args.verbose,
1667
+ accountType: args["account-type"],
1668
+ manual: args.manual,
1669
+ rateLimit,
1670
+ rateLimitWait: args.wait,
1671
+ githubToken: args["github-token"],
1672
+ claudeCode: args["claude-code"],
1673
+ showToken: args["show-token"],
1674
+ proxyEnv: args["proxy-env"]
1675
+ });
1676
+ }
1677
+ });
1678
+
1679
+ //#endregion
1680
+ //#region src/main.ts
1681
+ const main = defineCommand({
1682
+ meta: {
1683
+ name: "copilot-api",
1684
+ description: "A wrapper around GitHub Copilot API to make it OpenAI compatible, making it usable for other tools."
1685
+ },
1686
+ subCommands: {
1687
+ auth,
1688
+ start,
1689
+ "check-usage": checkUsage,
1690
+ debug
1691
+ }
1692
+ });
1693
+ await runMain(main);
1694
+
1695
+ //#endregion
1696
+ export { };
1697
+ //# sourceMappingURL=main.js.map