github-router 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/main.js ADDED
@@ -0,0 +1,1735 @@
1
+ #!/usr/bin/env node
2
+ import { defineCommand, runMain } from "citty";
3
+ import consola from "consola";
4
+ import fs from "node:fs/promises";
5
+ import os from "node:os";
6
+ import path from "node:path";
7
+ import { randomUUID } from "node:crypto";
8
+ import clipboard from "clipboardy";
9
+ import { serve } from "srvx";
10
+ import invariant from "tiny-invariant";
11
+ import { getProxyForUrl } from "proxy-from-env";
12
+ import { Agent, ProxyAgent, setGlobalDispatcher } from "undici";
13
+ import { execSync } from "node:child_process";
14
+ import process$1 from "node:process";
15
+ import { Hono } from "hono";
16
+ import { cors } from "hono/cors";
17
+ import { logger } from "hono/logger";
18
+ import { streamSSE } from "hono/streaming";
19
+ import { events } from "fetch-event-stream";
20
+
21
+ //#region src/lib/paths.ts
22
+ const APP_DIR = path.join(os.homedir(), ".local", "share", "openroute");
23
+ const GITHUB_TOKEN_PATH = path.join(APP_DIR, "github_token");
24
+ const PATHS = {
25
+ APP_DIR,
26
+ GITHUB_TOKEN_PATH
27
+ };
28
+ async function ensurePaths() {
29
+ await fs.mkdir(PATHS.APP_DIR, { recursive: true });
30
+ await ensureFile(PATHS.GITHUB_TOKEN_PATH);
31
+ }
32
+ async function ensureFile(filePath) {
33
+ try {
34
+ await fs.access(filePath, fs.constants.W_OK);
35
+ } catch {
36
+ await fs.writeFile(filePath, "");
37
+ await fs.chmod(filePath, 384);
38
+ }
39
+ }
40
+
41
+ //#endregion
42
+ //#region src/lib/state.ts
43
+ const state = {
44
+ accountType: "individual",
45
+ manualApprove: false,
46
+ rateLimitWait: false,
47
+ showToken: false
48
+ };
49
+
50
+ //#endregion
51
+ //#region src/lib/api-config.ts
52
+ const standardHeaders = () => ({
53
+ "content-type": "application/json",
54
+ accept: "application/json"
55
+ });
56
+ const COPILOT_VERSION = "0.26.7";
57
+ const EDITOR_PLUGIN_VERSION = `copilot-chat/${COPILOT_VERSION}`;
58
+ const USER_AGENT = `GitHubCopilotChat/${COPILOT_VERSION}`;
59
+ const API_VERSION = "2025-04-01";
60
+ const copilotBaseUrl = (state$1) => state$1.accountType === "individual" ? "https://api.githubcopilot.com" : `https://api.${state$1.accountType}.githubcopilot.com`;
61
+ const copilotHeaders = (state$1, vision = false, integrationId = "vscode-chat") => {
62
+ const headers = {
63
+ Authorization: `Bearer ${state$1.copilotToken}`,
64
+ "content-type": standardHeaders()["content-type"],
65
+ "copilot-integration-id": integrationId,
66
+ "editor-version": `vscode/${state$1.vsCodeVersion}`,
67
+ "editor-plugin-version": EDITOR_PLUGIN_VERSION,
68
+ "user-agent": USER_AGENT,
69
+ "openai-intent": "conversation-panel",
70
+ "x-github-api-version": API_VERSION,
71
+ "x-request-id": randomUUID(),
72
+ "x-vscode-user-agent-library-version": "electron-fetch"
73
+ };
74
+ if (vision) headers["copilot-vision-request"] = "true";
75
+ return headers;
76
+ };
77
+ const GITHUB_API_BASE_URL = "https://api.github.com";
78
+ const githubHeaders = (state$1) => ({
79
+ ...standardHeaders(),
80
+ authorization: `token ${state$1.githubToken}`,
81
+ "editor-version": `vscode/${state$1.vsCodeVersion}`,
82
+ "editor-plugin-version": EDITOR_PLUGIN_VERSION,
83
+ "user-agent": USER_AGENT,
84
+ "x-github-api-version": API_VERSION,
85
+ "x-vscode-user-agent-library-version": "electron-fetch"
86
+ });
87
+ const GITHUB_BASE_URL = "https://github.com";
88
+ const GITHUB_CLIENT_ID = "Iv1.b507a08c87ecfe98";
89
+ const GITHUB_APP_SCOPES = ["read:user"].join(" ");
90
+
91
+ //#endregion
92
+ //#region src/lib/error.ts
93
+ var HTTPError = class extends Error {
94
+ response;
95
+ constructor(message, response) {
96
+ super(message);
97
+ this.response = response;
98
+ }
99
+ };
100
+ async function forwardError(c, error) {
101
+ consola.error("Error occurred:", error);
102
+ if (error instanceof HTTPError) {
103
+ const errorText = await error.response.text();
104
+ let errorJson;
105
+ try {
106
+ errorJson = JSON.parse(errorText);
107
+ } catch {
108
+ errorJson = errorText;
109
+ }
110
+ consola.error("HTTP error:", errorJson);
111
+ return c.json({ error: {
112
+ message: errorText,
113
+ type: "error"
114
+ } }, error.response.status);
115
+ }
116
+ return c.json({ error: {
117
+ message: error.message,
118
+ type: "error"
119
+ } }, 500);
120
+ }
121
+
122
+ //#endregion
123
+ //#region src/services/github/get-copilot-token.ts
124
+ const getCopilotToken = async () => {
125
+ const response = await fetch(`${GITHUB_API_BASE_URL}/copilot_internal/v2/token`, { headers: githubHeaders(state) });
126
+ if (!response.ok) throw new HTTPError("Failed to get Copilot token", response);
127
+ return await response.json();
128
+ };
129
+
130
+ //#endregion
131
+ //#region src/services/github/get-device-code.ts
132
+ async function getDeviceCode() {
133
+ const response = await fetch(`${GITHUB_BASE_URL}/login/device/code`, {
134
+ method: "POST",
135
+ headers: standardHeaders(),
136
+ body: JSON.stringify({
137
+ client_id: GITHUB_CLIENT_ID,
138
+ scope: GITHUB_APP_SCOPES
139
+ })
140
+ });
141
+ if (!response.ok) throw new HTTPError("Failed to get device code", response);
142
+ return await response.json();
143
+ }
144
+
145
+ //#endregion
146
+ //#region src/services/github/get-user.ts
147
+ async function getGitHubUser() {
148
+ const response = await fetch(`${GITHUB_API_BASE_URL}/user`, { headers: {
149
+ authorization: `token ${state.githubToken}`,
150
+ ...standardHeaders()
151
+ } });
152
+ if (!response.ok) throw new HTTPError("Failed to get GitHub user", response);
153
+ return await response.json();
154
+ }
155
+
156
+ //#endregion
157
+ //#region src/services/copilot/get-models.ts
158
+ const getModels = async () => {
159
+ const response = await fetch(`${copilotBaseUrl(state)}/models`, { headers: copilotHeaders(state) });
160
+ if (!response.ok) throw new HTTPError("Failed to get models", response);
161
+ return await response.json();
162
+ };
163
+
164
+ //#endregion
165
+ //#region src/services/get-vscode-version.ts
166
+ const FALLBACK = "1.104.3";
167
+ async function getVSCodeVersion() {
168
+ const controller = new AbortController();
169
+ const timeout = setTimeout(() => {
170
+ controller.abort();
171
+ }, 5e3);
172
+ try {
173
+ const match = (await (await fetch("https://aur.archlinux.org/cgit/aur.git/plain/PKGBUILD?h=visual-studio-code-bin", { signal: controller.signal })).text()).match(/pkgver=([0-9.]+)/);
174
+ if (match) return match[1];
175
+ return FALLBACK;
176
+ } catch {
177
+ return FALLBACK;
178
+ } finally {
179
+ clearTimeout(timeout);
180
+ }
181
+ }
182
+ await getVSCodeVersion();
183
+
184
+ //#endregion
185
+ //#region src/lib/utils.ts
186
+ const sleep = (ms) => new Promise((resolve) => {
187
+ setTimeout(resolve, ms);
188
+ });
189
+ const isNullish = (value) => value === null || value === void 0;
190
+ async function cacheModels() {
191
+ state.models = await getModels();
192
+ }
193
+ const cacheVSCodeVersion = async () => {
194
+ const response = await getVSCodeVersion();
195
+ state.vsCodeVersion = response;
196
+ consola.info(`Using VSCode version: ${response}`);
197
+ };
198
+
199
+ //#endregion
200
+ //#region src/services/github/poll-access-token.ts
201
+ async function pollAccessToken(deviceCode) {
202
+ const sleepDuration = (deviceCode.interval + 1) * 1e3;
203
+ consola.debug(`Polling access token with interval of ${sleepDuration}ms`);
204
+ while (true) {
205
+ const response = await fetch(`${GITHUB_BASE_URL}/login/oauth/access_token`, {
206
+ method: "POST",
207
+ headers: standardHeaders(),
208
+ body: JSON.stringify({
209
+ client_id: GITHUB_CLIENT_ID,
210
+ device_code: deviceCode.device_code,
211
+ grant_type: "urn:ietf:params:oauth:grant-type:device_code"
212
+ })
213
+ });
214
+ if (!response.ok) {
215
+ await sleep(sleepDuration);
216
+ consola.error("Failed to poll access token:", await response.text());
217
+ continue;
218
+ }
219
+ const json = await response.json();
220
+ consola.debug("Polling access token response:", json);
221
+ const { access_token } = json;
222
+ if (access_token) return access_token;
223
+ else await sleep(sleepDuration);
224
+ }
225
+ }
226
+
227
+ //#endregion
228
+ //#region src/lib/token.ts
229
+ const readGithubToken = () => fs.readFile(PATHS.GITHUB_TOKEN_PATH, "utf8");
230
+ const writeGithubToken = (token) => fs.writeFile(PATHS.GITHUB_TOKEN_PATH, token);
231
+ const setupCopilotToken = async () => {
232
+ const { token, refresh_in } = await getCopilotToken();
233
+ state.copilotToken = token;
234
+ consola.debug("GitHub Copilot Token fetched successfully!");
235
+ if (state.showToken) consola.info("Copilot token:", token);
236
+ const refreshInterval = (refresh_in - 60) * 1e3;
237
+ setInterval(async () => {
238
+ consola.debug("Refreshing Copilot token");
239
+ try {
240
+ const { token: token$1 } = await getCopilotToken();
241
+ state.copilotToken = token$1;
242
+ consola.debug("Copilot token refreshed");
243
+ if (state.showToken) consola.info("Refreshed Copilot token:", token$1);
244
+ } catch (error) {
245
+ consola.error("Failed to refresh Copilot token:", error);
246
+ }
247
+ }, refreshInterval);
248
+ };
249
+ async function setupGitHubToken(options) {
250
+ try {
251
+ const githubToken = await readGithubToken();
252
+ if (githubToken && !options?.force) {
253
+ state.githubToken = githubToken;
254
+ if (state.showToken) consola.info("GitHub token:", githubToken);
255
+ await logUser();
256
+ return;
257
+ }
258
+ consola.info("Not logged in, getting new access token");
259
+ const response = await getDeviceCode();
260
+ consola.debug("Device code response:", response);
261
+ consola.info(`Please enter the code "${response.user_code}" in ${response.verification_uri}`);
262
+ const token = await pollAccessToken(response);
263
+ await writeGithubToken(token);
264
+ state.githubToken = token;
265
+ if (state.showToken) consola.info("GitHub token:", token);
266
+ await logUser();
267
+ } catch (error) {
268
+ if (error instanceof HTTPError) {
269
+ consola.error("Failed to get GitHub token:", await error.response.json());
270
+ throw error;
271
+ }
272
+ consola.error("Failed to get GitHub token:", error);
273
+ throw error;
274
+ }
275
+ }
276
+ async function logUser() {
277
+ const user = await getGitHubUser();
278
+ consola.info(`Logged in as ${user.login}`);
279
+ }
280
+
281
+ //#endregion
282
+ //#region src/auth.ts
283
+ async function runAuth(options) {
284
+ if (options.verbose) {
285
+ consola.level = 5;
286
+ consola.info("Verbose logging enabled");
287
+ }
288
+ state.showToken = options.showToken;
289
+ await ensurePaths();
290
+ await setupGitHubToken({ force: true });
291
+ consola.success("GitHub token written to", PATHS.GITHUB_TOKEN_PATH);
292
+ }
293
+ const auth = defineCommand({
294
+ meta: {
295
+ name: "auth",
296
+ description: "Run GitHub auth flow without running the server"
297
+ },
298
+ args: {
299
+ verbose: {
300
+ alias: "v",
301
+ type: "boolean",
302
+ default: false,
303
+ description: "Enable verbose logging"
304
+ },
305
+ "show-token": {
306
+ type: "boolean",
307
+ default: false,
308
+ description: "Show GitHub token on auth"
309
+ }
310
+ },
311
+ run({ args }) {
312
+ return runAuth({
313
+ verbose: args.verbose,
314
+ showToken: args["show-token"]
315
+ });
316
+ }
317
+ });
318
+
319
+ //#endregion
320
+ //#region src/services/github/get-copilot-usage.ts
321
+ const getCopilotUsage = async () => {
322
+ const response = await fetch(`${GITHUB_API_BASE_URL}/copilot_internal/user`, { headers: githubHeaders(state) });
323
+ if (!response.ok) throw new HTTPError("Failed to get Copilot usage", response);
324
+ return await response.json();
325
+ };
326
+
327
+ //#endregion
328
+ //#region src/check-usage.ts
329
+ const checkUsage = defineCommand({
330
+ meta: {
331
+ name: "check-usage",
332
+ description: "Show current GitHub Copilot usage/quota information"
333
+ },
334
+ async run() {
335
+ await ensurePaths();
336
+ await setupGitHubToken();
337
+ try {
338
+ const usage = await getCopilotUsage();
339
+ const premium = usage.quota_snapshots.premium_interactions;
340
+ const premiumTotal = premium.entitlement;
341
+ const premiumUsed = premiumTotal - premium.remaining;
342
+ const premiumPercentUsed = premiumTotal > 0 ? premiumUsed / premiumTotal * 100 : 0;
343
+ const premiumPercentRemaining = premium.percent_remaining;
344
+ function summarizeQuota(name, snap) {
345
+ if (!snap) return `${name}: N/A`;
346
+ const total = snap.entitlement;
347
+ const used = total - snap.remaining;
348
+ const percentUsed = total > 0 ? used / total * 100 : 0;
349
+ const percentRemaining = snap.percent_remaining;
350
+ return `${name}: ${used}/${total} used (${percentUsed.toFixed(1)}% used, ${percentRemaining.toFixed(1)}% remaining)`;
351
+ }
352
+ const premiumLine = `Premium: ${premiumUsed}/${premiumTotal} used (${premiumPercentUsed.toFixed(1)}% used, ${premiumPercentRemaining.toFixed(1)}% remaining)`;
353
+ const chatLine = summarizeQuota("Chat", usage.quota_snapshots.chat);
354
+ const completionsLine = summarizeQuota("Completions", usage.quota_snapshots.completions);
355
+ consola.box(`Copilot Usage (plan: ${usage.copilot_plan})\nQuota resets: ${usage.quota_reset_date}\n\nQuotas:\n ${premiumLine}\n ${chatLine}\n ${completionsLine}`);
356
+ } catch (err) {
357
+ consola.error("Failed to fetch Copilot usage:", err);
358
+ process.exit(1);
359
+ }
360
+ }
361
+ });
362
+
363
+ //#endregion
364
+ //#region src/debug.ts
365
+ async function getPackageVersion() {
366
+ try {
367
+ const packageJsonPath = new URL("../package.json", import.meta.url).pathname;
368
+ return JSON.parse(await fs.readFile(packageJsonPath)).version;
369
+ } catch {
370
+ return "unknown";
371
+ }
372
+ }
373
+ function getRuntimeInfo() {
374
+ const isBun = typeof Bun !== "undefined";
375
+ return {
376
+ name: isBun ? "bun" : "node",
377
+ version: isBun ? Bun.version : process.version.slice(1),
378
+ platform: os.platform(),
379
+ arch: os.arch()
380
+ };
381
+ }
382
+ async function checkTokenExists() {
383
+ try {
384
+ if (!(await fs.stat(PATHS.GITHUB_TOKEN_PATH)).isFile()) return false;
385
+ return (await fs.readFile(PATHS.GITHUB_TOKEN_PATH, "utf8")).trim().length > 0;
386
+ } catch {
387
+ return false;
388
+ }
389
+ }
390
+ async function getDebugInfo() {
391
+ const [version, tokenExists] = await Promise.all([getPackageVersion(), checkTokenExists()]);
392
+ return {
393
+ version,
394
+ runtime: getRuntimeInfo(),
395
+ paths: {
396
+ APP_DIR: PATHS.APP_DIR,
397
+ GITHUB_TOKEN_PATH: PATHS.GITHUB_TOKEN_PATH
398
+ },
399
+ tokenExists
400
+ };
401
+ }
402
+ function printDebugInfoPlain(info) {
403
+ consola.info(`openroute debug
404
+
405
+ Version: ${info.version}
406
+ Runtime: ${info.runtime.name} ${info.runtime.version} (${info.runtime.platform} ${info.runtime.arch})
407
+
408
+ Paths:
409
+ - APP_DIR: ${info.paths.APP_DIR}
410
+ - GITHUB_TOKEN_PATH: ${info.paths.GITHUB_TOKEN_PATH}
411
+
412
+ Token exists: ${info.tokenExists ? "Yes" : "No"}`);
413
+ }
414
+ function printDebugInfoJson(info) {
415
+ console.log(JSON.stringify(info, null, 2));
416
+ }
417
+ async function runDebug(options) {
418
+ const debugInfo = await getDebugInfo();
419
+ if (options.json) printDebugInfoJson(debugInfo);
420
+ else printDebugInfoPlain(debugInfo);
421
+ }
422
+ const debug = defineCommand({
423
+ meta: {
424
+ name: "debug",
425
+ description: "Print debug information about the application"
426
+ },
427
+ args: { json: {
428
+ type: "boolean",
429
+ default: false,
430
+ description: "Output debug information as JSON"
431
+ } },
432
+ run({ args }) {
433
+ return runDebug({ json: args.json });
434
+ }
435
+ });
436
+
437
+ //#endregion
438
+ //#region src/lib/proxy.ts
439
+ function initProxyFromEnv() {
440
+ if (typeof Bun !== "undefined") return;
441
+ try {
442
+ const direct = new Agent();
443
+ const proxies = /* @__PURE__ */ new Map();
444
+ setGlobalDispatcher({
445
+ dispatch(options, handler) {
446
+ try {
447
+ const origin = typeof options.origin === "string" ? new URL(options.origin) : options.origin;
448
+ const raw = getProxyForUrl(origin.toString());
449
+ const proxyUrl = raw && raw.length > 0 ? raw : void 0;
450
+ if (!proxyUrl) {
451
+ consola.debug(`HTTP proxy bypass: ${origin.hostname}`);
452
+ return direct.dispatch(options, handler);
453
+ }
454
+ let agent = proxies.get(proxyUrl);
455
+ if (!agent) {
456
+ agent = new ProxyAgent(proxyUrl);
457
+ proxies.set(proxyUrl, agent);
458
+ }
459
+ let label = proxyUrl;
460
+ try {
461
+ const u = new URL(proxyUrl);
462
+ label = `${u.protocol}//${u.host}`;
463
+ } catch {}
464
+ consola.debug(`HTTP proxy route: ${origin.hostname} via ${label}`);
465
+ return agent.dispatch(options, handler);
466
+ } catch {
467
+ return direct.dispatch(options, handler);
468
+ }
469
+ },
470
+ close() {
471
+ return direct.close();
472
+ },
473
+ destroy() {
474
+ return direct.destroy();
475
+ }
476
+ });
477
+ consola.debug("HTTP proxy configured from environment (per-URL)");
478
+ } catch (err) {
479
+ consola.debug("Proxy setup skipped:", err);
480
+ }
481
+ }
482
+
483
+ //#endregion
484
+ //#region src/lib/shell.ts
485
+ function getShell() {
486
+ const { platform, ppid, env } = process$1;
487
+ if (platform === "win32") {
488
+ try {
489
+ if (execSync(`wmic process get ParentProcessId,Name | findstr "${ppid}"`, { stdio: "pipe" }).toString().toLowerCase().includes("powershell.exe")) return "powershell";
490
+ } catch {
491
+ return "cmd";
492
+ }
493
+ return "cmd";
494
+ } else {
495
+ const shellPath = env.SHELL;
496
+ if (shellPath) {
497
+ if (shellPath.endsWith("zsh")) return "zsh";
498
+ if (shellPath.endsWith("fish")) return "fish";
499
+ if (shellPath.endsWith("bash")) return "bash";
500
+ }
501
+ return "sh";
502
+ }
503
+ }
504
+ /**
505
+ * Generates a copy-pasteable script to set multiple environment variables
506
+ * and run a subsequent command.
507
+ * @param {EnvVars} envVars - An object of environment variables to set.
508
+ * @param {string} commandToRun - The command to run after setting the variables.
509
+ * @returns {string} The formatted script string.
510
+ */
511
+ function generateEnvScript(envVars, commandToRun = "") {
512
+ const shell = getShell();
513
+ const filteredEnvVars = Object.entries(envVars).filter(([, value]) => value !== void 0);
514
+ let commandBlock;
515
+ switch (shell) {
516
+ case "powershell":
517
+ commandBlock = filteredEnvVars.map(([key, value]) => `$env:${key} = ${value}`).join("; ");
518
+ break;
519
+ case "cmd":
520
+ commandBlock = filteredEnvVars.map(([key, value]) => `set ${key}=${value}`).join(" & ");
521
+ break;
522
+ case "fish":
523
+ commandBlock = filteredEnvVars.map(([key, value]) => `set -gx ${key} ${value}`).join("; ");
524
+ break;
525
+ default: {
526
+ const assignments = filteredEnvVars.map(([key, value]) => `${key}=${value}`).join(" ");
527
+ commandBlock = filteredEnvVars.length > 0 ? `export ${assignments}` : "";
528
+ break;
529
+ }
530
+ }
531
+ if (commandBlock && commandToRun) return `${commandBlock}${shell === "cmd" ? " & " : " && "}${commandToRun}`;
532
+ return commandBlock || commandToRun;
533
+ }
534
+
535
+ //#endregion
536
+ //#region src/lib/approval.ts
537
+ const awaitApproval = async () => {
538
+ if (!await consola.prompt(`Accept incoming request?`, { type: "confirm" })) throw new HTTPError("Request rejected", Response.json({ message: "Request rejected" }, { status: 403 }));
539
+ };
540
+
541
+ //#endregion
542
+ //#region src/lib/rate-limit.ts
543
+ async function checkRateLimit(state$1) {
544
+ if (state$1.rateLimitSeconds === void 0) return;
545
+ const now = Date.now();
546
+ if (!state$1.lastRequestTimestamp) {
547
+ state$1.lastRequestTimestamp = now;
548
+ return;
549
+ }
550
+ const elapsedSeconds = (now - state$1.lastRequestTimestamp) / 1e3;
551
+ if (elapsedSeconds > state$1.rateLimitSeconds) {
552
+ state$1.lastRequestTimestamp = now;
553
+ return;
554
+ }
555
+ const waitTimeSeconds = Math.ceil(state$1.rateLimitSeconds - elapsedSeconds);
556
+ if (!state$1.rateLimitWait) {
557
+ consola.warn(`Rate limit exceeded. Need to wait ${waitTimeSeconds} more seconds.`);
558
+ throw new HTTPError("Rate limit exceeded", Response.json({ message: "Rate limit exceeded" }, { status: 429 }));
559
+ }
560
+ const waitTimeMs = waitTimeSeconds * 1e3;
561
+ consola.warn(`Rate limit reached. Waiting ${waitTimeSeconds} seconds before proceeding...`);
562
+ await sleep(waitTimeMs);
563
+ state$1.lastRequestTimestamp = now;
564
+ consola.info("Rate limit wait completed, proceeding with request");
565
+ }
566
+
567
+ //#endregion
568
+ //#region src/lib/tokenizer.ts
569
+ const ENCODING_MAP = {
570
+ o200k_base: () => import("gpt-tokenizer/encoding/o200k_base"),
571
+ cl100k_base: () => import("gpt-tokenizer/encoding/cl100k_base"),
572
+ p50k_base: () => import("gpt-tokenizer/encoding/p50k_base"),
573
+ p50k_edit: () => import("gpt-tokenizer/encoding/p50k_edit"),
574
+ r50k_base: () => import("gpt-tokenizer/encoding/r50k_base")
575
+ };
576
+ const encodingCache = /* @__PURE__ */ new Map();
577
+ /**
578
+ * Calculate tokens for tool calls
579
+ */
580
+ const calculateToolCallsTokens = (toolCalls, encoder, constants) => {
581
+ let tokens = 0;
582
+ for (const toolCall of toolCalls) {
583
+ tokens += constants.funcInit;
584
+ tokens += encoder.encode(JSON.stringify(toolCall)).length;
585
+ }
586
+ tokens += constants.funcEnd;
587
+ return tokens;
588
+ };
589
+ /**
590
+ * Calculate tokens for content parts
591
+ */
592
+ const calculateContentPartsTokens = (contentParts, encoder) => {
593
+ let tokens = 0;
594
+ for (const part of contentParts) if (part.type === "image_url") tokens += encoder.encode(part.image_url.url).length + 85;
595
+ else if (part.text) tokens += encoder.encode(part.text).length;
596
+ return tokens;
597
+ };
598
+ /**
599
+ * Calculate tokens for a single message
600
+ */
601
+ const calculateMessageTokens = (message, encoder, constants) => {
602
+ const tokensPerMessage = 3;
603
+ const tokensPerName = 1;
604
+ let tokens = tokensPerMessage;
605
+ for (const [key, value] of Object.entries(message)) {
606
+ if (typeof value === "string") tokens += encoder.encode(value).length;
607
+ if (key === "name") tokens += tokensPerName;
608
+ if (key === "tool_calls") tokens += calculateToolCallsTokens(value, encoder, constants);
609
+ if (key === "content" && Array.isArray(value)) tokens += calculateContentPartsTokens(value, encoder);
610
+ }
611
+ return tokens;
612
+ };
613
+ /**
614
+ * Calculate tokens using custom algorithm
615
+ */
616
+ const calculateTokens = (messages, encoder, constants) => {
617
+ if (messages.length === 0) return 0;
618
+ let numTokens = 0;
619
+ for (const message of messages) numTokens += calculateMessageTokens(message, encoder, constants);
620
+ numTokens += 3;
621
+ return numTokens;
622
+ };
623
+ /**
624
+ * Get the corresponding encoder module based on encoding type
625
+ */
626
+ const getEncodeChatFunction = async (encoding) => {
627
+ if (encodingCache.has(encoding)) {
628
+ const cached = encodingCache.get(encoding);
629
+ if (cached) return cached;
630
+ }
631
+ const supportedEncoding = encoding;
632
+ if (!(supportedEncoding in ENCODING_MAP)) {
633
+ const fallbackModule = await ENCODING_MAP.o200k_base();
634
+ encodingCache.set(encoding, fallbackModule);
635
+ return fallbackModule;
636
+ }
637
+ const encodingModule = await ENCODING_MAP[supportedEncoding]();
638
+ encodingCache.set(encoding, encodingModule);
639
+ return encodingModule;
640
+ };
641
+ /**
642
+ * Get tokenizer type from model information
643
+ */
644
+ const getTokenizerFromModel = (model) => {
645
+ return model.capabilities.tokenizer || "o200k_base";
646
+ };
647
+ /**
648
+ * Get model-specific constants for token calculation
649
+ */
650
+ const getModelConstants = (model) => {
651
+ return model.id === "gpt-3.5-turbo" || model.id === "gpt-4" ? {
652
+ funcInit: 10,
653
+ propInit: 3,
654
+ propKey: 3,
655
+ enumInit: -3,
656
+ enumItem: 3,
657
+ funcEnd: 12
658
+ } : {
659
+ funcInit: 7,
660
+ propInit: 3,
661
+ propKey: 3,
662
+ enumInit: -3,
663
+ enumItem: 3,
664
+ funcEnd: 12
665
+ };
666
+ };
667
+ /**
668
+ * Calculate tokens for a single parameter
669
+ */
670
+ const calculateParameterTokens = (key, prop, context) => {
671
+ const { encoder, constants } = context;
672
+ let tokens = constants.propKey;
673
+ if (typeof prop !== "object" || prop === null) return tokens;
674
+ const param = prop;
675
+ const paramName = key;
676
+ const paramType = param.type || "string";
677
+ let paramDesc = param.description || "";
678
+ if (param.enum && Array.isArray(param.enum)) {
679
+ tokens += constants.enumInit;
680
+ for (const item of param.enum) {
681
+ tokens += constants.enumItem;
682
+ tokens += encoder.encode(String(item)).length;
683
+ }
684
+ }
685
+ if (paramDesc.endsWith(".")) paramDesc = paramDesc.slice(0, -1);
686
+ const line = `${paramName}:${paramType}:${paramDesc}`;
687
+ tokens += encoder.encode(line).length;
688
+ const excludedKeys = new Set([
689
+ "type",
690
+ "description",
691
+ "enum"
692
+ ]);
693
+ for (const propertyName of Object.keys(param)) if (!excludedKeys.has(propertyName)) {
694
+ const propertyValue = param[propertyName];
695
+ const propertyText = typeof propertyValue === "string" ? propertyValue : JSON.stringify(propertyValue);
696
+ tokens += encoder.encode(`${propertyName}:${propertyText}`).length;
697
+ }
698
+ return tokens;
699
+ };
700
+ /**
701
+ * Calculate tokens for function parameters
702
+ */
703
+ const calculateParametersTokens = (parameters, encoder, constants) => {
704
+ if (!parameters || typeof parameters !== "object") return 0;
705
+ const params = parameters;
706
+ let tokens = 0;
707
+ for (const [key, value] of Object.entries(params)) if (key === "properties") {
708
+ const properties = value;
709
+ if (Object.keys(properties).length > 0) {
710
+ tokens += constants.propInit;
711
+ for (const propKey of Object.keys(properties)) tokens += calculateParameterTokens(propKey, properties[propKey], {
712
+ encoder,
713
+ constants
714
+ });
715
+ }
716
+ } else {
717
+ const paramText = typeof value === "string" ? value : JSON.stringify(value);
718
+ tokens += encoder.encode(`${key}:${paramText}`).length;
719
+ }
720
+ return tokens;
721
+ };
722
+ /**
723
+ * Calculate tokens for a single tool
724
+ */
725
+ const calculateToolTokens = (tool, encoder, constants) => {
726
+ let tokens = constants.funcInit;
727
+ const func = tool.function;
728
+ const fName = func.name;
729
+ let fDesc = func.description || "";
730
+ if (fDesc.endsWith(".")) fDesc = fDesc.slice(0, -1);
731
+ const line = fName + ":" + fDesc;
732
+ tokens += encoder.encode(line).length;
733
+ if (typeof func.parameters === "object" && func.parameters !== null) tokens += calculateParametersTokens(func.parameters, encoder, constants);
734
+ return tokens;
735
+ };
736
+ /**
737
+ * Calculate token count for tools based on model
738
+ */
739
+ const numTokensForTools = (tools, encoder, constants) => {
740
+ let funcTokenCount = 0;
741
+ for (const tool of tools) funcTokenCount += calculateToolTokens(tool, encoder, constants);
742
+ funcTokenCount += constants.funcEnd;
743
+ return funcTokenCount;
744
+ };
745
+ /**
746
+ * Calculate the token count of messages, supporting multiple GPT encoders
747
+ */
748
+ const getTokenCount = async (payload, model) => {
749
+ const encoder = await getEncodeChatFunction(getTokenizerFromModel(model));
750
+ const simplifiedMessages = payload.messages;
751
+ const inputMessages = simplifiedMessages.filter((msg) => msg.role !== "assistant");
752
+ const outputMessages = simplifiedMessages.filter((msg) => msg.role === "assistant");
753
+ const constants = getModelConstants(model);
754
+ let inputTokens = calculateTokens(inputMessages, encoder, constants);
755
+ if (payload.tools && payload.tools.length > 0) inputTokens += numTokensForTools(payload.tools, encoder, constants);
756
+ const outputTokens = calculateTokens(outputMessages, encoder, constants);
757
+ return {
758
+ input: inputTokens,
759
+ output: outputTokens
760
+ };
761
+ };
762
+
763
+ //#endregion
764
+ //#region src/services/copilot/create-chat-completions.ts
765
+ const createChatCompletions = async (payload) => {
766
+ if (!state.copilotToken) throw new Error("Copilot token not found");
767
+ const enableVision = payload.messages.some((x) => typeof x.content !== "string" && x.content?.some((x$1) => x$1.type === "image_url"));
768
+ const isAgentCall = payload.messages.some((msg) => ["assistant", "tool"].includes(msg.role));
769
+ const headers = {
770
+ ...copilotHeaders(state, enableVision),
771
+ "X-Initiator": isAgentCall ? "agent" : "user"
772
+ };
773
+ const response = await fetch(`${copilotBaseUrl(state)}/chat/completions`, {
774
+ method: "POST",
775
+ headers,
776
+ body: JSON.stringify(payload)
777
+ });
778
+ if (!response.ok) {
779
+ consola.error("Failed to create chat completions", response);
780
+ throw new HTTPError("Failed to create chat completions", response);
781
+ }
782
+ if (payload.stream) return events(response);
783
+ return await response.json();
784
+ };
785
+
786
+ //#endregion
787
+ //#region src/routes/chat-completions/handler.ts
788
+ async function handleCompletion$1(c) {
789
+ await checkRateLimit(state);
790
+ let payload = await c.req.json();
791
+ consola.debug("Request payload:", JSON.stringify(payload).slice(-400));
792
+ const selectedModel = state.models?.data.find((model) => model.id === payload.model);
793
+ try {
794
+ if (selectedModel) {
795
+ const tokenCount = await getTokenCount(payload, selectedModel);
796
+ consola.info("Current token count:", tokenCount);
797
+ } else consola.warn("No model selected, skipping token count calculation");
798
+ } catch (error) {
799
+ consola.warn("Failed to calculate token count:", error);
800
+ }
801
+ if (state.manualApprove) await awaitApproval();
802
+ if (isNullish(payload.max_tokens)) {
803
+ payload = {
804
+ ...payload,
805
+ max_tokens: selectedModel?.capabilities.limits.max_output_tokens
806
+ };
807
+ consola.debug("Set max_tokens to:", JSON.stringify(payload.max_tokens));
808
+ }
809
+ const response = await createChatCompletions(payload);
810
+ if (isNonStreaming$2(response)) {
811
+ consola.debug("Non-streaming response:", JSON.stringify(response));
812
+ return c.json(response);
813
+ }
814
+ consola.debug("Streaming response");
815
+ return streamSSE(c, async (stream) => {
816
+ for await (const chunk of response) {
817
+ consola.debug("Streaming chunk:", JSON.stringify(chunk));
818
+ await stream.writeSSE(chunk);
819
+ }
820
+ });
821
+ }
822
+ const isNonStreaming$2 = (response) => Object.hasOwn(response, "choices");
823
+
824
+ //#endregion
825
+ //#region src/routes/chat-completions/route.ts
826
+ const completionRoutes = new Hono();
827
+ completionRoutes.post("/", async (c) => {
828
+ try {
829
+ return await handleCompletion$1(c);
830
+ } catch (error) {
831
+ return await forwardError(c, error);
832
+ }
833
+ });
834
+
835
+ //#endregion
836
+ //#region src/services/copilot/create-embeddings.ts
837
+ const createEmbeddings = async (payload) => {
838
+ if (!state.copilotToken) throw new Error("Copilot token not found");
839
+ const response = await fetch(`${copilotBaseUrl(state)}/embeddings`, {
840
+ method: "POST",
841
+ headers: copilotHeaders(state),
842
+ body: JSON.stringify(payload)
843
+ });
844
+ if (!response.ok) throw new HTTPError("Failed to create embeddings", response);
845
+ return await response.json();
846
+ };
847
+
848
+ //#endregion
849
+ //#region src/routes/embeddings/route.ts
850
+ const embeddingRoutes = new Hono();
851
+ embeddingRoutes.post("/", async (c) => {
852
+ try {
853
+ const response = await createEmbeddings(await c.req.json());
854
+ return c.json(response);
855
+ } catch (error) {
856
+ return await forwardError(c, error);
857
+ }
858
+ });
859
+
860
+ //#endregion
861
+ //#region src/routes/messages/utils.ts
862
+ function mapOpenAIStopReasonToAnthropic(finishReason) {
863
+ if (finishReason === null) return null;
864
+ return {
865
+ stop: "end_turn",
866
+ length: "max_tokens",
867
+ tool_calls: "tool_use",
868
+ content_filter: "end_turn"
869
+ }[finishReason];
870
+ }
871
+
872
+ //#endregion
873
+ //#region src/routes/messages/non-stream-translation.ts
874
+ function translateToOpenAI(payload) {
875
+ return {
876
+ model: translateModelName(payload.model),
877
+ messages: translateAnthropicMessagesToOpenAI(payload.messages, payload.system),
878
+ max_tokens: payload.max_tokens,
879
+ stop: payload.stop_sequences,
880
+ stream: payload.stream,
881
+ temperature: payload.temperature,
882
+ top_p: payload.top_p,
883
+ user: payload.metadata?.user_id,
884
+ tools: translateAnthropicToolsToOpenAI(payload.tools),
885
+ tool_choice: translateAnthropicToolChoiceToOpenAI(payload.tool_choice)
886
+ };
887
+ }
888
+ function translateModelName(model) {
889
+ if (model.startsWith("claude-sonnet-4-")) return model.replace(/^claude-sonnet-4-.*/, "claude-sonnet-4");
890
+ else if (model.startsWith("claude-opus-")) return model.replace(/^claude-opus-4-.*/, "claude-opus-4");
891
+ return model;
892
+ }
893
+ function translateAnthropicMessagesToOpenAI(anthropicMessages, system) {
894
+ const systemMessages = handleSystemPrompt(system);
895
+ const otherMessages = anthropicMessages.flatMap((message) => message.role === "user" ? handleUserMessage(message) : handleAssistantMessage(message));
896
+ return [...systemMessages, ...otherMessages];
897
+ }
898
+ function handleSystemPrompt(system) {
899
+ if (!system) return [];
900
+ if (typeof system === "string") return [{
901
+ role: "system",
902
+ content: system
903
+ }];
904
+ else return [{
905
+ role: "system",
906
+ content: system.map((block) => block.text).join("\n\n")
907
+ }];
908
+ }
909
+ function handleUserMessage(message) {
910
+ const newMessages = [];
911
+ if (Array.isArray(message.content)) {
912
+ const toolResultBlocks = message.content.filter((block) => block.type === "tool_result");
913
+ const otherBlocks = message.content.filter((block) => block.type !== "tool_result");
914
+ for (const block of toolResultBlocks) newMessages.push({
915
+ role: "tool",
916
+ tool_call_id: block.tool_use_id,
917
+ content: mapContent(block.content)
918
+ });
919
+ if (otherBlocks.length > 0) newMessages.push({
920
+ role: "user",
921
+ content: mapContent(otherBlocks)
922
+ });
923
+ } else newMessages.push({
924
+ role: "user",
925
+ content: mapContent(message.content)
926
+ });
927
+ return newMessages;
928
+ }
929
+ function handleAssistantMessage(message) {
930
+ if (!Array.isArray(message.content)) return [{
931
+ role: "assistant",
932
+ content: mapContent(message.content)
933
+ }];
934
+ const toolUseBlocks = message.content.filter((block) => block.type === "tool_use");
935
+ const textBlocks = message.content.filter((block) => block.type === "text");
936
+ const thinkingBlocks = message.content.filter((block) => block.type === "thinking");
937
+ const allTextContent = [...textBlocks.map((b) => b.text), ...thinkingBlocks.map((b) => b.thinking)].join("\n\n");
938
+ return toolUseBlocks.length > 0 ? [{
939
+ role: "assistant",
940
+ content: allTextContent || null,
941
+ tool_calls: toolUseBlocks.map((toolUse) => ({
942
+ id: toolUse.id,
943
+ type: "function",
944
+ function: {
945
+ name: toolUse.name,
946
+ arguments: JSON.stringify(toolUse.input)
947
+ }
948
+ }))
949
+ }] : [{
950
+ role: "assistant",
951
+ content: mapContent(message.content)
952
+ }];
953
+ }
954
+ function mapContent(content) {
955
+ if (typeof content === "string") return content;
956
+ if (!Array.isArray(content)) return null;
957
+ if (!content.some((block) => block.type === "image")) return content.filter((block) => block.type === "text" || block.type === "thinking").map((block) => block.type === "text" ? block.text : block.thinking).join("\n\n");
958
+ const contentParts = [];
959
+ for (const block of content) switch (block.type) {
960
+ case "text":
961
+ contentParts.push({
962
+ type: "text",
963
+ text: block.text
964
+ });
965
+ break;
966
+ case "thinking":
967
+ contentParts.push({
968
+ type: "text",
969
+ text: block.thinking
970
+ });
971
+ break;
972
+ case "image":
973
+ contentParts.push({
974
+ type: "image_url",
975
+ image_url: { url: `data:${block.source.media_type};base64,${block.source.data}` }
976
+ });
977
+ break;
978
+ }
979
+ return contentParts;
980
+ }
981
+ function translateAnthropicToolsToOpenAI(anthropicTools) {
982
+ if (!anthropicTools) return;
983
+ return anthropicTools.map((tool) => ({
984
+ type: "function",
985
+ function: {
986
+ name: tool.name,
987
+ description: tool.description,
988
+ parameters: tool.input_schema
989
+ }
990
+ }));
991
+ }
992
+ function translateAnthropicToolChoiceToOpenAI(anthropicToolChoice) {
993
+ if (!anthropicToolChoice) return;
994
+ switch (anthropicToolChoice.type) {
995
+ case "auto": return "auto";
996
+ case "any": return "required";
997
+ case "tool":
998
+ if (anthropicToolChoice.name) return {
999
+ type: "function",
1000
+ function: { name: anthropicToolChoice.name }
1001
+ };
1002
+ return;
1003
+ case "none": return "none";
1004
+ default: return;
1005
+ }
1006
+ }
1007
+ function translateToAnthropic(response) {
1008
+ const allTextBlocks = [];
1009
+ const allToolUseBlocks = [];
1010
+ let stopReason = null;
1011
+ stopReason = response.choices[0]?.finish_reason ?? stopReason;
1012
+ for (const choice of response.choices) {
1013
+ const textBlocks = getAnthropicTextBlocks(choice.message.content);
1014
+ const toolUseBlocks = getAnthropicToolUseBlocks(choice.message.tool_calls);
1015
+ allTextBlocks.push(...textBlocks);
1016
+ allToolUseBlocks.push(...toolUseBlocks);
1017
+ if (choice.finish_reason === "tool_calls" || stopReason === "stop") stopReason = choice.finish_reason;
1018
+ }
1019
+ return {
1020
+ id: response.id,
1021
+ type: "message",
1022
+ role: "assistant",
1023
+ model: response.model,
1024
+ content: [...allTextBlocks, ...allToolUseBlocks],
1025
+ stop_reason: mapOpenAIStopReasonToAnthropic(stopReason),
1026
+ stop_sequence: null,
1027
+ usage: {
1028
+ input_tokens: (response.usage?.prompt_tokens ?? 0) - (response.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1029
+ output_tokens: response.usage?.completion_tokens ?? 0,
1030
+ ...response.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: response.usage.prompt_tokens_details.cached_tokens }
1031
+ }
1032
+ };
1033
+ }
1034
+ function getAnthropicTextBlocks(messageContent) {
1035
+ if (typeof messageContent === "string") return [{
1036
+ type: "text",
1037
+ text: messageContent
1038
+ }];
1039
+ if (Array.isArray(messageContent)) return messageContent.filter((part) => part.type === "text").map((part) => ({
1040
+ type: "text",
1041
+ text: part.text
1042
+ }));
1043
+ return [];
1044
+ }
1045
+ function getAnthropicToolUseBlocks(toolCalls) {
1046
+ if (!toolCalls) return [];
1047
+ return toolCalls.map((toolCall) => ({
1048
+ type: "tool_use",
1049
+ id: toolCall.id,
1050
+ name: toolCall.function.name,
1051
+ input: JSON.parse(toolCall.function.arguments)
1052
+ }));
1053
+ }
1054
+
1055
+ //#endregion
1056
+ //#region src/routes/messages/count-tokens-handler.ts
1057
+ /**
1058
+ * Handles token counting for Anthropic messages
1059
+ */
1060
+ async function handleCountTokens(c) {
1061
+ try {
1062
+ const anthropicBeta = c.req.header("anthropic-beta");
1063
+ const anthropicPayload = await c.req.json();
1064
+ const openAIPayload = translateToOpenAI(anthropicPayload);
1065
+ const selectedModel = state.models?.data.find((model) => model.id === anthropicPayload.model);
1066
+ if (!selectedModel) {
1067
+ consola.warn("Model not found, returning default token count");
1068
+ return c.json({ input_tokens: 1 });
1069
+ }
1070
+ const tokenCount = await getTokenCount(openAIPayload, selectedModel);
1071
+ if (anthropicPayload.tools && anthropicPayload.tools.length > 0) {
1072
+ let mcpToolExist = false;
1073
+ if (anthropicBeta?.startsWith("claude-code")) mcpToolExist = anthropicPayload.tools.some((tool) => tool.name.startsWith("mcp__"));
1074
+ if (!mcpToolExist) {
1075
+ if (anthropicPayload.model.startsWith("claude")) tokenCount.input = tokenCount.input + 346;
1076
+ else if (anthropicPayload.model.startsWith("grok")) tokenCount.input = tokenCount.input + 480;
1077
+ }
1078
+ }
1079
+ let finalTokenCount = tokenCount.input + tokenCount.output;
1080
+ if (anthropicPayload.model.startsWith("claude")) finalTokenCount = Math.round(finalTokenCount * 1.15);
1081
+ else if (anthropicPayload.model.startsWith("grok")) finalTokenCount = Math.round(finalTokenCount * 1.03);
1082
+ consola.info("Token count:", finalTokenCount);
1083
+ return c.json({ input_tokens: finalTokenCount });
1084
+ } catch (error) {
1085
+ consola.error("Error counting tokens:", error);
1086
+ return c.json({ input_tokens: 1 });
1087
+ }
1088
+ }
1089
+
1090
+ //#endregion
1091
+ //#region src/routes/messages/stream-translation.ts
1092
+ function isToolBlockOpen(state$1) {
1093
+ if (!state$1.contentBlockOpen) return false;
1094
+ return Object.values(state$1.toolCalls).some((tc) => tc.anthropicBlockIndex === state$1.contentBlockIndex);
1095
+ }
1096
+ function translateChunkToAnthropicEvents(chunk, state$1) {
1097
+ const events$1 = [];
1098
+ if (chunk.choices.length === 0) return events$1;
1099
+ const choice = chunk.choices[0];
1100
+ const { delta } = choice;
1101
+ if (!state$1.messageStartSent) {
1102
+ events$1.push({
1103
+ type: "message_start",
1104
+ message: {
1105
+ id: chunk.id,
1106
+ type: "message",
1107
+ role: "assistant",
1108
+ content: [],
1109
+ model: chunk.model,
1110
+ stop_reason: null,
1111
+ stop_sequence: null,
1112
+ usage: {
1113
+ input_tokens: (chunk.usage?.prompt_tokens ?? 0) - (chunk.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1114
+ output_tokens: 0,
1115
+ ...chunk.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: chunk.usage.prompt_tokens_details.cached_tokens }
1116
+ }
1117
+ }
1118
+ });
1119
+ state$1.messageStartSent = true;
1120
+ }
1121
+ if (delta.content) {
1122
+ if (isToolBlockOpen(state$1)) {
1123
+ events$1.push({
1124
+ type: "content_block_stop",
1125
+ index: state$1.contentBlockIndex
1126
+ });
1127
+ state$1.contentBlockIndex++;
1128
+ state$1.contentBlockOpen = false;
1129
+ }
1130
+ if (!state$1.contentBlockOpen) {
1131
+ events$1.push({
1132
+ type: "content_block_start",
1133
+ index: state$1.contentBlockIndex,
1134
+ content_block: {
1135
+ type: "text",
1136
+ text: ""
1137
+ }
1138
+ });
1139
+ state$1.contentBlockOpen = true;
1140
+ }
1141
+ events$1.push({
1142
+ type: "content_block_delta",
1143
+ index: state$1.contentBlockIndex,
1144
+ delta: {
1145
+ type: "text_delta",
1146
+ text: delta.content
1147
+ }
1148
+ });
1149
+ }
1150
+ if (delta.tool_calls) for (const toolCall of delta.tool_calls) {
1151
+ if (toolCall.id && toolCall.function?.name) {
1152
+ if (state$1.contentBlockOpen) {
1153
+ events$1.push({
1154
+ type: "content_block_stop",
1155
+ index: state$1.contentBlockIndex
1156
+ });
1157
+ state$1.contentBlockIndex++;
1158
+ state$1.contentBlockOpen = false;
1159
+ }
1160
+ const anthropicBlockIndex = state$1.contentBlockIndex;
1161
+ state$1.toolCalls[toolCall.index] = {
1162
+ id: toolCall.id,
1163
+ name: toolCall.function.name,
1164
+ anthropicBlockIndex
1165
+ };
1166
+ events$1.push({
1167
+ type: "content_block_start",
1168
+ index: anthropicBlockIndex,
1169
+ content_block: {
1170
+ type: "tool_use",
1171
+ id: toolCall.id,
1172
+ name: toolCall.function.name,
1173
+ input: {}
1174
+ }
1175
+ });
1176
+ state$1.contentBlockOpen = true;
1177
+ }
1178
+ if (toolCall.function?.arguments) {
1179
+ const toolCallInfo = state$1.toolCalls[toolCall.index];
1180
+ if (toolCallInfo) events$1.push({
1181
+ type: "content_block_delta",
1182
+ index: toolCallInfo.anthropicBlockIndex,
1183
+ delta: {
1184
+ type: "input_json_delta",
1185
+ partial_json: toolCall.function.arguments
1186
+ }
1187
+ });
1188
+ }
1189
+ }
1190
+ if (choice.finish_reason) {
1191
+ if (state$1.contentBlockOpen) {
1192
+ events$1.push({
1193
+ type: "content_block_stop",
1194
+ index: state$1.contentBlockIndex
1195
+ });
1196
+ state$1.contentBlockOpen = false;
1197
+ }
1198
+ events$1.push({
1199
+ type: "message_delta",
1200
+ delta: {
1201
+ stop_reason: mapOpenAIStopReasonToAnthropic(choice.finish_reason),
1202
+ stop_sequence: null
1203
+ },
1204
+ usage: {
1205
+ input_tokens: (chunk.usage?.prompt_tokens ?? 0) - (chunk.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1206
+ output_tokens: chunk.usage?.completion_tokens ?? 0,
1207
+ ...chunk.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: chunk.usage.prompt_tokens_details.cached_tokens }
1208
+ }
1209
+ }, { type: "message_stop" });
1210
+ }
1211
+ return events$1;
1212
+ }
1213
+
1214
+ //#endregion
1215
+ //#region src/routes/messages/handler.ts
1216
+ async function handleCompletion(c) {
1217
+ await checkRateLimit(state);
1218
+ const anthropicPayload = await c.req.json();
1219
+ consola.debug("Anthropic request payload:", JSON.stringify(anthropicPayload));
1220
+ const openAIPayload = translateToOpenAI(anthropicPayload);
1221
+ consola.debug("Translated OpenAI request payload:", JSON.stringify(openAIPayload));
1222
+ if (state.manualApprove) await awaitApproval();
1223
+ const response = await createChatCompletions(openAIPayload);
1224
+ if (isNonStreaming$1(response)) {
1225
+ consola.debug("Non-streaming response from Copilot:", JSON.stringify(response).slice(-400));
1226
+ const anthropicResponse = translateToAnthropic(response);
1227
+ consola.debug("Translated Anthropic response:", JSON.stringify(anthropicResponse));
1228
+ return c.json(anthropicResponse);
1229
+ }
1230
+ consola.debug("Streaming response from Copilot");
1231
+ return streamSSE(c, async (stream) => {
1232
+ const streamState = {
1233
+ messageStartSent: false,
1234
+ contentBlockIndex: 0,
1235
+ contentBlockOpen: false,
1236
+ toolCalls: {}
1237
+ };
1238
+ for await (const rawEvent of response) {
1239
+ consola.debug("Copilot raw stream event:", JSON.stringify(rawEvent));
1240
+ if (rawEvent.data === "[DONE]") break;
1241
+ if (!rawEvent.data) continue;
1242
+ const events$1 = translateChunkToAnthropicEvents(JSON.parse(rawEvent.data), streamState);
1243
+ for (const event of events$1) {
1244
+ consola.debug("Translated Anthropic event:", JSON.stringify(event));
1245
+ await stream.writeSSE({
1246
+ event: event.type,
1247
+ data: JSON.stringify(event)
1248
+ });
1249
+ }
1250
+ }
1251
+ });
1252
+ }
1253
+ const isNonStreaming$1 = (response) => Object.hasOwn(response, "choices");
1254
+
1255
+ //#endregion
1256
+ //#region src/routes/messages/route.ts
1257
+ const messageRoutes = new Hono();
1258
+ messageRoutes.post("/", async (c) => {
1259
+ try {
1260
+ return await handleCompletion(c);
1261
+ } catch (error) {
1262
+ return await forwardError(c, error);
1263
+ }
1264
+ });
1265
+ messageRoutes.post("/count_tokens", async (c) => {
1266
+ try {
1267
+ return await handleCountTokens(c);
1268
+ } catch (error) {
1269
+ return await forwardError(c, error);
1270
+ }
1271
+ });
1272
+
1273
+ //#endregion
1274
+ //#region src/routes/models/route.ts
1275
+ const modelRoutes = new Hono();
1276
+ modelRoutes.get("/", async (c) => {
1277
+ try {
1278
+ if (!state.models) await cacheModels();
1279
+ const models = state.models?.data.map((model) => ({
1280
+ id: model.id,
1281
+ object: "model",
1282
+ type: "model",
1283
+ created: 0,
1284
+ created_at: (/* @__PURE__ */ new Date(0)).toISOString(),
1285
+ owned_by: model.vendor,
1286
+ display_name: model.name
1287
+ }));
1288
+ return c.json({
1289
+ object: "list",
1290
+ data: models,
1291
+ has_more: false
1292
+ });
1293
+ } catch (error) {
1294
+ return await forwardError(c, error);
1295
+ }
1296
+ });
1297
+
1298
+ //#endregion
1299
+ //#region src/services/copilot/create-responses.ts
1300
+ const createResponses = async (payload) => {
1301
+ if (!state.copilotToken) throw new Error("Copilot token not found");
1302
+ const enableVision = detectVision(payload.input);
1303
+ const isAgentCall = detectAgentCall(payload.input);
1304
+ const headers = {
1305
+ ...copilotHeaders(state, enableVision),
1306
+ "X-Initiator": isAgentCall ? "agent" : "user"
1307
+ };
1308
+ const filteredPayload = filterUnsupportedTools(payload);
1309
+ const response = await fetch(`${copilotBaseUrl(state)}/responses`, {
1310
+ method: "POST",
1311
+ headers,
1312
+ body: JSON.stringify(filteredPayload)
1313
+ });
1314
+ if (!response.ok) {
1315
+ consola.error("Failed to create responses", response);
1316
+ throw new HTTPError("Failed to create responses", response);
1317
+ }
1318
+ if (payload.stream) return events(response);
1319
+ return await response.json();
1320
+ };
1321
+ function detectVision(input) {
1322
+ if (typeof input === "string") return false;
1323
+ if (!Array.isArray(input)) return false;
1324
+ return input.some((item) => {
1325
+ if ("content" in item && Array.isArray(item.content)) return item.content.some((part) => part.type === "input_image");
1326
+ return false;
1327
+ });
1328
+ }
1329
+ function detectAgentCall(input) {
1330
+ if (typeof input === "string") return false;
1331
+ if (!Array.isArray(input)) return false;
1332
+ return input.some((item) => {
1333
+ if ("role" in item && item.role === "assistant") return true;
1334
+ if ("type" in item && item.type === "function_call_output") return true;
1335
+ return false;
1336
+ });
1337
+ }
1338
+ function filterUnsupportedTools(payload) {
1339
+ if (!payload.tools || !Array.isArray(payload.tools)) return payload;
1340
+ const supported = payload.tools.filter((tool) => {
1341
+ const isSupported = tool.type === "function";
1342
+ if (!isSupported) consola.debug(`Stripping unsupported tool type: ${tool.type}`);
1343
+ return isSupported;
1344
+ });
1345
+ return {
1346
+ ...payload,
1347
+ tools: supported.length > 0 ? supported : void 0
1348
+ };
1349
+ }
1350
+
1351
+ //#endregion
1352
+ //#region src/services/copilot/web-search.ts
1353
+ const MAX_SEARCHES_PER_SECOND = 3;
1354
+ let searchTimestamps = [];
1355
+ async function throttleSearch() {
1356
+ const now = Date.now();
1357
+ searchTimestamps = searchTimestamps.filter((t) => now - t < 1e3);
1358
+ if (searchTimestamps.length >= MAX_SEARCHES_PER_SECOND) {
1359
+ const waitMs = 1e3 - (now - searchTimestamps[0]);
1360
+ if (waitMs > 0) {
1361
+ consola.debug(`Web search rate limited, waiting ${waitMs}ms`);
1362
+ await sleep(waitMs);
1363
+ }
1364
+ }
1365
+ searchTimestamps.push(Date.now());
1366
+ }
1367
+ function threadsHeaders() {
1368
+ return copilotHeaders(state, false, "copilot-chat");
1369
+ }
1370
+ async function createThread() {
1371
+ const response = await fetch(`${copilotBaseUrl(state)}/github/chat/threads`, {
1372
+ method: "POST",
1373
+ headers: threadsHeaders(),
1374
+ body: JSON.stringify({})
1375
+ });
1376
+ if (!response.ok) {
1377
+ consola.error("Failed to create chat thread", response.status);
1378
+ throw new Error(`Failed to create chat thread: ${response.status}`);
1379
+ }
1380
+ return (await response.json()).thread_id;
1381
+ }
1382
+ async function sendThreadMessage(threadId, query) {
1383
+ const response = await fetch(`${copilotBaseUrl(state)}/github/chat/threads/${threadId}/messages`, {
1384
+ method: "POST",
1385
+ headers: threadsHeaders(),
1386
+ body: JSON.stringify({
1387
+ content: query,
1388
+ intent: "conversation",
1389
+ skills: ["web-search"],
1390
+ references: []
1391
+ })
1392
+ });
1393
+ if (!response.ok) {
1394
+ consola.error("Failed to send thread message", response.status);
1395
+ throw new Error(`Failed to send thread message: ${response.status}`);
1396
+ }
1397
+ return await response.json();
1398
+ }
1399
+ async function searchWeb(query) {
1400
+ if (!state.copilotToken) throw new Error("Copilot token not found");
1401
+ await throttleSearch();
1402
+ consola.info(`Web search: "${query.slice(0, 80)}"`);
1403
+ const response = await sendThreadMessage(await createThread(), query);
1404
+ const references = [];
1405
+ for (const ref of response.message.references) if (ref.results) {
1406
+ for (const result of ref.results) if (result.url && result.reference_type !== "bing_search") references.push({
1407
+ title: result.title,
1408
+ url: result.url
1409
+ });
1410
+ }
1411
+ consola.debug(`Web search returned ${references.length} references`);
1412
+ return {
1413
+ content: response.message.content,
1414
+ references
1415
+ };
1416
+ }
1417
+
1418
+ //#endregion
1419
+ //#region src/routes/responses/handler.ts
1420
+ async function handleResponses(c) {
1421
+ await checkRateLimit(state);
1422
+ const payload = await c.req.json();
1423
+ consola.debug("Responses request payload:", JSON.stringify(payload).slice(-400));
1424
+ const selectedModel = state.models?.data.find((model) => model.id === payload.model);
1425
+ consola.info("Token counting not yet supported for /responses endpoint");
1426
+ if (state.manualApprove) await awaitApproval();
1427
+ await injectWebSearchIfNeeded(payload);
1428
+ if (isNullish(payload.max_output_tokens)) {
1429
+ payload.max_output_tokens = selectedModel?.capabilities.limits.max_output_tokens;
1430
+ consola.debug("Set max_output_tokens to:", JSON.stringify(payload.max_output_tokens));
1431
+ }
1432
+ const response = await createResponses(payload);
1433
+ if (isNonStreaming(response)) {
1434
+ consola.debug("Non-streaming response:", JSON.stringify(response));
1435
+ return c.json(response);
1436
+ }
1437
+ consola.debug("Streaming response");
1438
+ return streamSSE(c, async (stream) => {
1439
+ for await (const chunk of response) {
1440
+ consola.debug("Streaming chunk:", JSON.stringify(chunk));
1441
+ if (chunk.data === "[DONE]") break;
1442
+ if (!chunk.data) continue;
1443
+ await stream.writeSSE({
1444
+ data: chunk.data,
1445
+ event: chunk.event,
1446
+ id: chunk.id?.toString()
1447
+ });
1448
+ }
1449
+ });
1450
+ }
1451
+ const isNonStreaming = (response) => Object.hasOwn(response, "output");
1452
+ async function injectWebSearchIfNeeded(payload) {
1453
+ if (!payload.tools?.some((t) => t.type === "web_search")) return;
1454
+ if (Array.isArray(payload.input)) {
1455
+ if (payload.input.some((item) => item.type === "function_call_output")) return;
1456
+ }
1457
+ const query = extractUserQuery(payload.input);
1458
+ if (!query) return;
1459
+ try {
1460
+ const results = await searchWeb(query);
1461
+ const searchContext = [
1462
+ "[Web Search Results]",
1463
+ results.content,
1464
+ "",
1465
+ results.references.map((r) => `- [${r.title}](${r.url})`).join("\n"),
1466
+ "[End Web Search Results]"
1467
+ ].join("\n");
1468
+ payload.instructions = payload.instructions ? `${searchContext}\n\n${payload.instructions}` : searchContext;
1469
+ } catch (error) {
1470
+ consola.warn("Web search failed, continuing without results:", error);
1471
+ }
1472
+ }
1473
+ function extractUserQuery(input) {
1474
+ if (typeof input === "string") return input;
1475
+ if (!Array.isArray(input)) return void 0;
1476
+ for (let i = input.length - 1; i >= 0; i--) {
1477
+ const item = input[i];
1478
+ if ("role" in item && item.role === "user") {
1479
+ if (typeof item.content === "string") return item.content;
1480
+ if (Array.isArray(item.content)) {
1481
+ const text = item.content.find((p) => p.type === "input_text");
1482
+ if (text && "text" in text) return text.text;
1483
+ }
1484
+ }
1485
+ }
1486
+ }
1487
+
1488
+ //#endregion
1489
+ //#region src/routes/responses/route.ts
1490
+ const responsesRoutes = new Hono();
1491
+ responsesRoutes.post("/", async (c) => {
1492
+ try {
1493
+ return await handleResponses(c);
1494
+ } catch (error) {
1495
+ return await forwardError(c, error);
1496
+ }
1497
+ });
1498
+
1499
+ //#endregion
1500
+ //#region src/routes/search/route.ts
1501
+ const searchRoutes = new Hono();
1502
+ searchRoutes.post("/", async (c) => {
1503
+ try {
1504
+ const { query } = await c.req.json();
1505
+ if (!query || typeof query !== "string") return c.json({ error: { message: "Missing required field: query" } }, 400);
1506
+ const results = await searchWeb(query);
1507
+ return c.json({ results });
1508
+ } catch (error) {
1509
+ return await forwardError(c, error);
1510
+ }
1511
+ });
1512
+
1513
+ //#endregion
1514
+ //#region src/routes/token/route.ts
1515
+ const tokenRoute = new Hono();
1516
+ tokenRoute.get("/", (c) => {
1517
+ return c.json({ token: state.copilotToken });
1518
+ });
1519
+
1520
+ //#endregion
1521
+ //#region src/routes/usage/route.ts
1522
+ const usageRoute = new Hono();
1523
+ usageRoute.get("/", async (c) => {
1524
+ try {
1525
+ const usage = await getCopilotUsage();
1526
+ return c.json(usage);
1527
+ } catch (error) {
1528
+ consola.error("Error fetching Copilot usage:", error);
1529
+ return await forwardError(c, error);
1530
+ }
1531
+ });
1532
+
1533
+ //#endregion
1534
+ //#region src/server.ts
1535
+ const server = new Hono();
1536
+ server.use(logger());
1537
+ server.use(cors());
1538
+ server.get("/", (c) => c.text("Server running"));
1539
+ server.route("/chat/completions", completionRoutes);
1540
+ server.route("/responses", responsesRoutes);
1541
+ server.route("/models", modelRoutes);
1542
+ server.route("/embeddings", embeddingRoutes);
1543
+ server.route("/search", searchRoutes);
1544
+ server.route("/usage", usageRoute);
1545
+ server.route("/token", tokenRoute);
1546
+ server.route("/v1/chat/completions", completionRoutes);
1547
+ server.route("/v1/responses", responsesRoutes);
1548
+ server.route("/v1/models", modelRoutes);
1549
+ server.route("/v1/embeddings", embeddingRoutes);
1550
+ server.route("/v1/search", searchRoutes);
1551
+ server.route("/v1/messages", messageRoutes);
1552
+
1553
+ //#endregion
1554
+ //#region src/start.ts
1555
+ async function generateClaudeCodeCommand(serverUrl) {
1556
+ invariant(state.models, "Models should be loaded by now");
1557
+ const selectedModel = await consola.prompt("Select a model to use with Claude Code", {
1558
+ type: "select",
1559
+ options: state.models.data.map((model) => model.id)
1560
+ });
1561
+ const selectedSmallModel = await consola.prompt("Select a small model to use with Claude Code", {
1562
+ type: "select",
1563
+ options: state.models.data.map((model) => model.id)
1564
+ });
1565
+ const command = generateEnvScript({
1566
+ ANTHROPIC_BASE_URL: serverUrl,
1567
+ ANTHROPIC_AUTH_TOKEN: "dummy",
1568
+ ANTHROPIC_MODEL: selectedModel,
1569
+ ANTHROPIC_DEFAULT_SONNET_MODEL: selectedModel,
1570
+ ANTHROPIC_SMALL_FAST_MODEL: selectedSmallModel,
1571
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: selectedSmallModel,
1572
+ DISABLE_NON_ESSENTIAL_MODEL_CALLS: "1",
1573
+ CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: "1"
1574
+ }, "claude");
1575
+ try {
1576
+ clipboard.writeSync(command);
1577
+ consola.success("Copied Claude Code command to clipboard!");
1578
+ } catch {
1579
+ consola.warn("Failed to copy to clipboard. Here is the Claude Code command:");
1580
+ consola.log(command);
1581
+ }
1582
+ }
1583
+ async function generateCodexCommand(serverUrl) {
1584
+ invariant(state.models, "Models should be loaded by now");
1585
+ const defaultCodexModel = state.models.data.find((model) => model.id === "gpt5.2-codex");
1586
+ const selectedModel = defaultCodexModel ? defaultCodexModel.id : await consola.prompt("Select a model to use with Codex CLI", {
1587
+ type: "select",
1588
+ options: state.models.data.map((model) => model.id)
1589
+ });
1590
+ const command = generateEnvScript({
1591
+ OPENAI_BASE_URL: `${serverUrl}/v1`,
1592
+ OPENAI_API_KEY: "dummy"
1593
+ }, `codex -m ${selectedModel}`);
1594
+ try {
1595
+ clipboard.writeSync(command);
1596
+ consola.success("Copied Codex CLI command to clipboard!");
1597
+ } catch {
1598
+ consola.warn("Failed to copy to clipboard. Here is the Codex CLI command:");
1599
+ consola.log(command);
1600
+ }
1601
+ }
1602
+ async function runServer(options) {
1603
+ if (options.proxyEnv) initProxyFromEnv();
1604
+ if (options.verbose) {
1605
+ consola.level = 5;
1606
+ consola.info("Verbose logging enabled");
1607
+ }
1608
+ state.accountType = options.accountType;
1609
+ if (options.accountType !== "individual") consola.info(`Using ${options.accountType} plan GitHub account`);
1610
+ state.manualApprove = options.manual;
1611
+ state.rateLimitSeconds = options.rateLimit;
1612
+ state.rateLimitWait = options.rateLimitWait;
1613
+ state.showToken = options.showToken;
1614
+ await ensurePaths();
1615
+ await cacheVSCodeVersion();
1616
+ if (options.githubToken) {
1617
+ state.githubToken = options.githubToken;
1618
+ consola.info("Using provided GitHub token");
1619
+ } else await setupGitHubToken();
1620
+ await setupCopilotToken();
1621
+ await cacheModels();
1622
+ consola.info(`Available models: \n${state.models?.data.map((model) => `- ${model.id}`).join("\n")}`);
1623
+ const serverUrl = `http://localhost:${options.port}`;
1624
+ if (options.claudeCode) await generateClaudeCodeCommand(serverUrl);
1625
+ if (options.codex) await generateCodexCommand(serverUrl);
1626
+ consola.box(`🌐 Usage Viewer: https://animeshkundu.github.io/openroute/dashboard.html?endpoint=${serverUrl}/usage`);
1627
+ serve({
1628
+ fetch: server.fetch,
1629
+ port: options.port
1630
+ });
1631
+ }
1632
+ const start = defineCommand({
1633
+ meta: {
1634
+ name: "start",
1635
+ description: "Start the openroute server"
1636
+ },
1637
+ args: {
1638
+ port: {
1639
+ alias: "p",
1640
+ type: "string",
1641
+ default: "8787",
1642
+ description: "Port to listen on"
1643
+ },
1644
+ verbose: {
1645
+ alias: "v",
1646
+ type: "boolean",
1647
+ default: false,
1648
+ description: "Enable verbose logging"
1649
+ },
1650
+ "account-type": {
1651
+ alias: "a",
1652
+ type: "string",
1653
+ default: "individual",
1654
+ description: "Account type to use (individual, business, enterprise)"
1655
+ },
1656
+ manual: {
1657
+ type: "boolean",
1658
+ default: false,
1659
+ description: "Enable manual request approval"
1660
+ },
1661
+ "rate-limit": {
1662
+ alias: "r",
1663
+ type: "string",
1664
+ description: "Rate limit in seconds between requests"
1665
+ },
1666
+ wait: {
1667
+ alias: "w",
1668
+ type: "boolean",
1669
+ default: false,
1670
+ description: "Wait instead of error when rate limit is hit. Has no effect if rate limit is not set"
1671
+ },
1672
+ "github-token": {
1673
+ alias: "g",
1674
+ type: "string",
1675
+ description: "Provide GitHub token directly (must be generated using the `auth` subcommand)"
1676
+ },
1677
+ "claude-code": {
1678
+ alias: "c",
1679
+ type: "boolean",
1680
+ default: false,
1681
+ description: "Generate a command to launch Claude Code with Copilot API config"
1682
+ },
1683
+ codex: {
1684
+ type: "boolean",
1685
+ default: false,
1686
+ description: "Generate a command to launch Codex CLI with Copilot API config"
1687
+ },
1688
+ "show-token": {
1689
+ type: "boolean",
1690
+ default: false,
1691
+ description: "Show GitHub and Copilot tokens on fetch and refresh"
1692
+ },
1693
+ "proxy-env": {
1694
+ type: "boolean",
1695
+ default: false,
1696
+ description: "Initialize proxy from environment variables"
1697
+ }
1698
+ },
1699
+ run({ args }) {
1700
+ const rateLimitRaw = args["rate-limit"];
1701
+ const rateLimit = rateLimitRaw === void 0 ? void 0 : Number.parseInt(rateLimitRaw, 10);
1702
+ return runServer({
1703
+ port: Number.parseInt(args.port, 10),
1704
+ verbose: args.verbose,
1705
+ accountType: args["account-type"],
1706
+ manual: args.manual,
1707
+ rateLimit,
1708
+ rateLimitWait: args.wait,
1709
+ githubToken: args["github-token"],
1710
+ claudeCode: args["claude-code"],
1711
+ codex: args.codex,
1712
+ showToken: args["show-token"],
1713
+ proxyEnv: args["proxy-env"]
1714
+ });
1715
+ }
1716
+ });
1717
+
1718
+ //#endregion
1719
+ //#region src/main.ts
1720
+ await runMain(defineCommand({
1721
+ meta: {
1722
+ name: "oproute",
1723
+ description: "A reverse proxy that exposes GitHub Copilot as OpenAI and Anthropic compatible API endpoints."
1724
+ },
1725
+ subCommands: {
1726
+ auth,
1727
+ start,
1728
+ "check-usage": checkUsage,
1729
+ debug
1730
+ }
1731
+ }));
1732
+
1733
+ //#endregion
1734
+ export { };
1735
+ //# sourceMappingURL=main.js.map