copilot-api-plus 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/main.js ADDED
@@ -0,0 +1,1345 @@
1
+ #!/usr/bin/env node
2
+ import { PATHS, cacheModels, cacheVSCodeVersion, clearGithubToken, ensurePaths, isNullish, setupCopilotToken, setupGitHubToken, sleep } from "./token-CRn8c1A7.js";
3
+ import { GITHUB_API_BASE_URL, copilotBaseUrl, copilotHeaders, githubHeaders, state } from "./get-user-DalX7epg.js";
4
+ import { HTTPError, forwardError } from "./error-Cmeg4mmB.js";
5
+ import { defineCommand, runMain } from "citty";
6
+ import consola from "consola";
7
+ import fs from "node:fs/promises";
8
+ import os from "node:os";
9
+ import clipboard from "clipboardy";
10
+ import { serve } from "srvx";
11
+ import invariant from "tiny-invariant";
12
+ import { getProxyForUrl } from "proxy-from-env";
13
+ import { Agent, ProxyAgent, setGlobalDispatcher } from "undici";
14
+ import { execSync } from "node:child_process";
15
+ import process$1 from "node:process";
16
+ import { Hono } from "hono";
17
+ import { cors } from "hono/cors";
18
+ import { logger } from "hono/logger";
19
+ import { HTTPException } from "hono/http-exception";
20
+ import { streamSSE } from "hono/streaming";
21
+ import { events } from "fetch-event-stream";
22
+
23
+ //#region src/auth.ts
24
+ async function runAuth(options) {
25
+ if (options.verbose) {
26
+ consola.level = 5;
27
+ consola.info("Verbose logging enabled");
28
+ }
29
+ state.showToken = options.showToken;
30
+ await ensurePaths();
31
+ await setupGitHubToken({ force: true });
32
+ consola.success("GitHub token written to", PATHS.GITHUB_TOKEN_PATH);
33
+ }
34
+ const auth = defineCommand({
35
+ meta: {
36
+ name: "auth",
37
+ description: "Run GitHub auth flow without running the server"
38
+ },
39
+ args: {
40
+ verbose: {
41
+ alias: "v",
42
+ type: "boolean",
43
+ default: false,
44
+ description: "Enable verbose logging"
45
+ },
46
+ "show-token": {
47
+ type: "boolean",
48
+ default: false,
49
+ description: "Show GitHub token on auth"
50
+ }
51
+ },
52
+ run({ args }) {
53
+ return runAuth({
54
+ verbose: args.verbose,
55
+ showToken: args["show-token"]
56
+ });
57
+ }
58
+ });
59
+
60
+ //#endregion
61
+ //#region src/services/github/get-copilot-usage.ts
62
+ const getCopilotUsage = async () => {
63
+ const response = await fetch(`${GITHUB_API_BASE_URL}/copilot_internal/user`, { headers: githubHeaders(state) });
64
+ if (!response.ok) throw new HTTPError("Failed to get Copilot usage", response);
65
+ return await response.json();
66
+ };
67
+
68
+ //#endregion
69
+ //#region src/check-usage.ts
70
+ const checkUsage = defineCommand({
71
+ meta: {
72
+ name: "check-usage",
73
+ description: "Show current GitHub Copilot usage/quota information"
74
+ },
75
+ async run() {
76
+ await ensurePaths();
77
+ await setupGitHubToken();
78
+ try {
79
+ const usage = await getCopilotUsage();
80
+ const premium = usage.quota_snapshots.premium_interactions;
81
+ const premiumTotal = premium.entitlement;
82
+ const premiumUsed = premiumTotal - premium.remaining;
83
+ const premiumPercentUsed = premiumTotal > 0 ? premiumUsed / premiumTotal * 100 : 0;
84
+ const premiumPercentRemaining = premium.percent_remaining;
85
+ function summarizeQuota(name, snap) {
86
+ if (!snap) return `${name}: N/A`;
87
+ const total = snap.entitlement;
88
+ const used = total - snap.remaining;
89
+ const percentUsed = total > 0 ? used / total * 100 : 0;
90
+ const percentRemaining = snap.percent_remaining;
91
+ return `${name}: ${used}/${total} used (${percentUsed.toFixed(1)}% used, ${percentRemaining.toFixed(1)}% remaining)`;
92
+ }
93
+ const premiumLine = `Premium: ${premiumUsed}/${premiumTotal} used (${premiumPercentUsed.toFixed(1)}% used, ${premiumPercentRemaining.toFixed(1)}% remaining)`;
94
+ const chatLine = summarizeQuota("Chat", usage.quota_snapshots.chat);
95
+ const completionsLine = summarizeQuota("Completions", usage.quota_snapshots.completions);
96
+ consola.box(`Copilot Usage (plan: ${usage.copilot_plan})\nQuota resets: ${usage.quota_reset_date}\n\nQuotas:\n ${premiumLine}\n ${chatLine}\n ${completionsLine}`);
97
+ } catch (err) {
98
+ consola.error("Failed to fetch Copilot usage:", err);
99
+ process.exit(1);
100
+ }
101
+ }
102
+ });
103
+
104
+ //#endregion
105
+ //#region src/debug.ts
106
+ async function getPackageVersion() {
107
+ try {
108
+ const packageJsonPath = new URL("../package.json", import.meta.url).pathname;
109
+ return JSON.parse(await fs.readFile(packageJsonPath)).version;
110
+ } catch {
111
+ return "unknown";
112
+ }
113
+ }
114
+ function getRuntimeInfo() {
115
+ const isBun = typeof Bun !== "undefined";
116
+ return {
117
+ name: isBun ? "bun" : "node",
118
+ version: isBun ? Bun.version : process.version.slice(1),
119
+ platform: os.platform(),
120
+ arch: os.arch()
121
+ };
122
+ }
123
+ async function checkTokenExists() {
124
+ try {
125
+ if (!(await fs.stat(PATHS.GITHUB_TOKEN_PATH)).isFile()) return false;
126
+ return (await fs.readFile(PATHS.GITHUB_TOKEN_PATH, "utf8")).trim().length > 0;
127
+ } catch {
128
+ return false;
129
+ }
130
+ }
131
+ async function getDebugInfo() {
132
+ const [version, tokenExists] = await Promise.all([getPackageVersion(), checkTokenExists()]);
133
+ return {
134
+ version,
135
+ runtime: getRuntimeInfo(),
136
+ paths: {
137
+ APP_DIR: PATHS.APP_DIR,
138
+ GITHUB_TOKEN_PATH: PATHS.GITHUB_TOKEN_PATH
139
+ },
140
+ tokenExists
141
+ };
142
+ }
143
+ function printDebugInfoPlain(info) {
144
+ consola.info(`copilot-api debug
145
+
146
+ Version: ${info.version}
147
+ Runtime: ${info.runtime.name} ${info.runtime.version} (${info.runtime.platform} ${info.runtime.arch})
148
+
149
+ Paths:
150
+ - APP_DIR: ${info.paths.APP_DIR}
151
+ - GITHUB_TOKEN_PATH: ${info.paths.GITHUB_TOKEN_PATH}
152
+
153
+ Token exists: ${info.tokenExists ? "Yes" : "No"}`);
154
+ }
155
+ function printDebugInfoJson(info) {
156
+ console.log(JSON.stringify(info, null, 2));
157
+ }
158
+ async function runDebug(options) {
159
+ const debugInfo = await getDebugInfo();
160
+ if (options.json) printDebugInfoJson(debugInfo);
161
+ else printDebugInfoPlain(debugInfo);
162
+ }
163
+ const debug = defineCommand({
164
+ meta: {
165
+ name: "debug",
166
+ description: "Print debug information about the application"
167
+ },
168
+ args: { json: {
169
+ type: "boolean",
170
+ default: false,
171
+ description: "Output debug information as JSON"
172
+ } },
173
+ run({ args }) {
174
+ return runDebug({ json: args.json });
175
+ }
176
+ });
177
+
178
+ //#endregion
179
+ //#region src/logout.ts
180
+ async function runLogout() {
181
+ await ensurePaths();
182
+ await clearGithubToken();
183
+ consola.success("Logged out successfully");
184
+ consola.info(`Token file location: ${PATHS.GITHUB_TOKEN_PATH}`);
185
+ }
186
+ const logout = defineCommand({
187
+ meta: {
188
+ name: "logout",
189
+ description: "Clear stored GitHub token and logout"
190
+ },
191
+ run() {
192
+ return runLogout();
193
+ }
194
+ });
195
+
196
+ //#endregion
197
+ //#region src/lib/proxy.ts
198
+ function initProxyFromEnv() {
199
+ if (typeof Bun !== "undefined") return;
200
+ try {
201
+ const direct = new Agent();
202
+ const proxies = /* @__PURE__ */ new Map();
203
+ setGlobalDispatcher({
204
+ dispatch(options, handler) {
205
+ try {
206
+ const origin = typeof options.origin === "string" ? new URL(options.origin) : options.origin;
207
+ const raw = getProxyForUrl(origin.toString());
208
+ const proxyUrl = raw && raw.length > 0 ? raw : void 0;
209
+ if (!proxyUrl) {
210
+ consola.debug(`HTTP proxy bypass: ${origin.hostname}`);
211
+ return direct.dispatch(options, handler);
212
+ }
213
+ let agent = proxies.get(proxyUrl);
214
+ if (!agent) {
215
+ agent = new ProxyAgent(proxyUrl);
216
+ proxies.set(proxyUrl, agent);
217
+ }
218
+ let label = proxyUrl;
219
+ try {
220
+ const u = new URL(proxyUrl);
221
+ label = `${u.protocol}//${u.host}`;
222
+ } catch {}
223
+ consola.debug(`HTTP proxy route: ${origin.hostname} via ${label}`);
224
+ return agent.dispatch(options, handler);
225
+ } catch {
226
+ return direct.dispatch(options, handler);
227
+ }
228
+ },
229
+ close() {
230
+ return direct.close();
231
+ },
232
+ destroy() {
233
+ return direct.destroy();
234
+ }
235
+ });
236
+ consola.debug("HTTP proxy configured from environment (per-URL)");
237
+ } catch (err) {
238
+ consola.debug("Proxy setup skipped:", err);
239
+ }
240
+ }
241
+
242
+ //#endregion
243
+ //#region src/lib/shell.ts
244
+ function getShell() {
245
+ const { platform, ppid, env } = process$1;
246
+ if (platform === "win32") {
247
+ try {
248
+ const command = `wmic process get ParentProcessId,Name | findstr "${ppid}"`;
249
+ if (execSync(command, { stdio: "pipe" }).toString().toLowerCase().includes("powershell.exe")) return "powershell";
250
+ } catch {
251
+ return "cmd";
252
+ }
253
+ return "cmd";
254
+ } else {
255
+ const shellPath = env.SHELL;
256
+ if (shellPath) {
257
+ if (shellPath.endsWith("zsh")) return "zsh";
258
+ if (shellPath.endsWith("fish")) return "fish";
259
+ if (shellPath.endsWith("bash")) return "bash";
260
+ }
261
+ return "sh";
262
+ }
263
+ }
264
+ /**
265
+ * Generates a copy-pasteable script to set multiple environment variables
266
+ * and run a subsequent command.
267
+ * @param {EnvVars} envVars - An object of environment variables to set.
268
+ * @param {string} commandToRun - The command to run after setting the variables.
269
+ * @returns {string} The formatted script string.
270
+ */
271
+ function generateEnvScript(envVars, commandToRun = "") {
272
+ const shell = getShell();
273
+ const filteredEnvVars = Object.entries(envVars).filter(([, value]) => value !== void 0);
274
+ let commandBlock;
275
+ switch (shell) {
276
+ case "powershell":
277
+ commandBlock = filteredEnvVars.map(([key, value]) => `$env:${key} = ${value}`).join("; ");
278
+ break;
279
+ case "cmd":
280
+ commandBlock = filteredEnvVars.map(([key, value]) => `set ${key}=${value}`).join(" & ");
281
+ break;
282
+ case "fish":
283
+ commandBlock = filteredEnvVars.map(([key, value]) => `set -gx ${key} ${value}`).join("; ");
284
+ break;
285
+ default: {
286
+ const assignments = filteredEnvVars.map(([key, value]) => `${key}=${value}`).join(" ");
287
+ commandBlock = filteredEnvVars.length > 0 ? `export ${assignments}` : "";
288
+ break;
289
+ }
290
+ }
291
+ if (commandBlock && commandToRun) return `${commandBlock}${shell === "cmd" ? " & " : " && "}${commandToRun}`;
292
+ return commandBlock || commandToRun;
293
+ }
294
+
295
+ //#endregion
296
+ //#region src/lib/api-key-auth.ts
297
+ /**
298
+ * Retrieve an API key from the incoming request.
299
+ *
300
+ * Checks common locations where clients supply keys (Authorization Bearer header, `x-api-key` header, or `apiKey` query parameter) and returns the first one found.
301
+ *
302
+ * @returns The extracted API key, or `undefined` if no key is present.
303
+ */
304
+ function extractApiKey(c) {
305
+ const authHeader = c.req.header("authorization");
306
+ if (authHeader?.startsWith("Bearer ")) return authHeader.slice(7);
307
+ const anthropicKey = c.req.header("x-api-key");
308
+ if (anthropicKey) return anthropicKey;
309
+ const queryKey = c.req.query("apiKey");
310
+ if (queryKey) return queryKey;
311
+ }
312
+ /**
313
+ * API key authentication middleware
314
+ * Validates that the request contains a valid API key if API keys are configured
315
+ */
316
+ const apiKeyAuthMiddleware = async (c, next) => {
317
+ if (!state.apiKeys || state.apiKeys.length === 0) {
318
+ await next();
319
+ return;
320
+ }
321
+ const providedKey = extractApiKey(c);
322
+ if (!providedKey) throw new HTTPException(401, { message: "API key required. Please provide a valid API key in the Authorization header (Bearer token) or x-api-key header." });
323
+ if (!state.apiKeys.includes(providedKey)) throw new HTTPException(401, { message: "Invalid API key. Please provide a valid API key." });
324
+ await next();
325
+ };
326
+
327
+ //#endregion
328
+ //#region src/lib/approval.ts
329
+ const awaitApproval = async () => {
330
+ if (!await consola.prompt(`Accept incoming request?`, { type: "confirm" })) throw new HTTPError("Request rejected", Response.json({ message: "Request rejected" }, { status: 403 }));
331
+ };
332
+
333
+ //#endregion
334
+ //#region src/lib/rate-limit.ts
335
+ async function checkRateLimit(state$1) {
336
+ if (state$1.rateLimitSeconds === void 0) return;
337
+ const now = Date.now();
338
+ if (!state$1.lastRequestTimestamp) {
339
+ state$1.lastRequestTimestamp = now;
340
+ return;
341
+ }
342
+ const elapsedSeconds = (now - state$1.lastRequestTimestamp) / 1e3;
343
+ if (elapsedSeconds > state$1.rateLimitSeconds) {
344
+ state$1.lastRequestTimestamp = now;
345
+ return;
346
+ }
347
+ const waitTimeSeconds = Math.ceil(state$1.rateLimitSeconds - elapsedSeconds);
348
+ if (!state$1.rateLimitWait) {
349
+ consola.warn(`Rate limit exceeded. Need to wait ${waitTimeSeconds} more seconds.`);
350
+ throw new HTTPError("Rate limit exceeded", Response.json({ message: "Rate limit exceeded" }, { status: 429 }));
351
+ }
352
+ const waitTimeMs = waitTimeSeconds * 1e3;
353
+ consola.warn(`Rate limit reached. Waiting ${waitTimeSeconds} seconds before proceeding...`);
354
+ await sleep(waitTimeMs);
355
+ state$1.lastRequestTimestamp = now;
356
+ consola.info("Rate limit wait completed, proceeding with request");
357
+ }
358
+
359
+ //#endregion
360
+ //#region src/lib/tokenizer.ts
361
+ const ENCODING_MAP = {
362
+ o200k_base: () => import("gpt-tokenizer/encoding/o200k_base"),
363
+ cl100k_base: () => import("gpt-tokenizer/encoding/cl100k_base"),
364
+ p50k_base: () => import("gpt-tokenizer/encoding/p50k_base"),
365
+ p50k_edit: () => import("gpt-tokenizer/encoding/p50k_edit"),
366
+ r50k_base: () => import("gpt-tokenizer/encoding/r50k_base")
367
+ };
368
+ const encodingCache = /* @__PURE__ */ new Map();
369
+ /**
370
+ * Calculate tokens for tool calls
371
+ */
372
+ const calculateToolCallsTokens = (toolCalls, encoder, constants) => {
373
+ let tokens = 0;
374
+ for (const toolCall of toolCalls) {
375
+ tokens += constants.funcInit;
376
+ tokens += encoder.encode(JSON.stringify(toolCall)).length;
377
+ }
378
+ tokens += constants.funcEnd;
379
+ return tokens;
380
+ };
381
+ /**
382
+ * Calculate tokens for content parts
383
+ */
384
+ const calculateContentPartsTokens = (contentParts, encoder) => {
385
+ let tokens = 0;
386
+ for (const part of contentParts) if (part.type === "image_url") tokens += encoder.encode(part.image_url.url).length + 85;
387
+ else if (part.text) tokens += encoder.encode(part.text).length;
388
+ return tokens;
389
+ };
390
+ /**
391
+ * Calculate tokens for a single message
392
+ */
393
+ const calculateMessageTokens = (message, encoder, constants) => {
394
+ const tokensPerMessage = 3;
395
+ const tokensPerName = 1;
396
+ let tokens = tokensPerMessage;
397
+ for (const [key, value] of Object.entries(message)) {
398
+ if (typeof value === "string") tokens += encoder.encode(value).length;
399
+ if (key === "name") tokens += tokensPerName;
400
+ if (key === "tool_calls") tokens += calculateToolCallsTokens(value, encoder, constants);
401
+ if (key === "content" && Array.isArray(value)) tokens += calculateContentPartsTokens(value, encoder);
402
+ }
403
+ return tokens;
404
+ };
405
+ /**
406
+ * Calculate tokens using custom algorithm
407
+ */
408
+ const calculateTokens = (messages, encoder, constants) => {
409
+ if (messages.length === 0) return 0;
410
+ let numTokens = 0;
411
+ for (const message of messages) numTokens += calculateMessageTokens(message, encoder, constants);
412
+ numTokens += 3;
413
+ return numTokens;
414
+ };
415
+ /**
416
+ * Get the corresponding encoder module based on encoding type
417
+ */
418
+ const getEncodeChatFunction = async (encoding) => {
419
+ if (encodingCache.has(encoding)) {
420
+ const cached = encodingCache.get(encoding);
421
+ if (cached) return cached;
422
+ }
423
+ const supportedEncoding = encoding;
424
+ if (!(supportedEncoding in ENCODING_MAP)) {
425
+ const fallbackModule = await ENCODING_MAP.o200k_base();
426
+ encodingCache.set(encoding, fallbackModule);
427
+ return fallbackModule;
428
+ }
429
+ const encodingModule = await ENCODING_MAP[supportedEncoding]();
430
+ encodingCache.set(encoding, encodingModule);
431
+ return encodingModule;
432
+ };
433
+ /**
434
+ * Get tokenizer type from model information
435
+ */
436
+ const getTokenizerFromModel = (model) => {
437
+ return model.capabilities.tokenizer || "o200k_base";
438
+ };
439
+ /**
440
+ * Get model-specific constants for token calculation
441
+ */
442
+ const getModelConstants = (model) => {
443
+ return model.id === "gpt-3.5-turbo" || model.id === "gpt-4" ? {
444
+ funcInit: 10,
445
+ propInit: 3,
446
+ propKey: 3,
447
+ enumInit: -3,
448
+ enumItem: 3,
449
+ funcEnd: 12
450
+ } : {
451
+ funcInit: 7,
452
+ propInit: 3,
453
+ propKey: 3,
454
+ enumInit: -3,
455
+ enumItem: 3,
456
+ funcEnd: 12
457
+ };
458
+ };
459
+ /**
460
+ * Calculate tokens for a single parameter
461
+ */
462
+ const calculateParameterTokens = (key, prop, context) => {
463
+ const { encoder, constants } = context;
464
+ let tokens = constants.propKey;
465
+ if (typeof prop !== "object" || prop === null) return tokens;
466
+ const param = prop;
467
+ const paramName = key;
468
+ const paramType = param.type || "string";
469
+ let paramDesc = param.description || "";
470
+ if (param.enum && Array.isArray(param.enum)) {
471
+ tokens += constants.enumInit;
472
+ for (const item of param.enum) {
473
+ tokens += constants.enumItem;
474
+ tokens += encoder.encode(String(item)).length;
475
+ }
476
+ }
477
+ if (paramDesc.endsWith(".")) paramDesc = paramDesc.slice(0, -1);
478
+ const line = `${paramName}:${paramType}:${paramDesc}`;
479
+ tokens += encoder.encode(line).length;
480
+ const excludedKeys = new Set([
481
+ "type",
482
+ "description",
483
+ "enum"
484
+ ]);
485
+ for (const propertyName of Object.keys(param)) if (!excludedKeys.has(propertyName)) {
486
+ const propertyValue = param[propertyName];
487
+ const propertyText = typeof propertyValue === "string" ? propertyValue : JSON.stringify(propertyValue);
488
+ tokens += encoder.encode(`${propertyName}:${propertyText}`).length;
489
+ }
490
+ return tokens;
491
+ };
492
+ /**
493
+ * Calculate tokens for function parameters
494
+ */
495
+ const calculateParametersTokens = (parameters, encoder, constants) => {
496
+ if (!parameters || typeof parameters !== "object") return 0;
497
+ const params = parameters;
498
+ let tokens = 0;
499
+ for (const [key, value] of Object.entries(params)) if (key === "properties") {
500
+ const properties = value;
501
+ if (Object.keys(properties).length > 0) {
502
+ tokens += constants.propInit;
503
+ for (const propKey of Object.keys(properties)) tokens += calculateParameterTokens(propKey, properties[propKey], {
504
+ encoder,
505
+ constants
506
+ });
507
+ }
508
+ } else {
509
+ const paramText = typeof value === "string" ? value : JSON.stringify(value);
510
+ tokens += encoder.encode(`${key}:${paramText}`).length;
511
+ }
512
+ return tokens;
513
+ };
514
+ /**
515
+ * Calculate tokens for a single tool
516
+ */
517
+ const calculateToolTokens = (tool, encoder, constants) => {
518
+ let tokens = constants.funcInit;
519
+ const func = tool.function;
520
+ const fName = func.name;
521
+ let fDesc = func.description || "";
522
+ if (fDesc.endsWith(".")) fDesc = fDesc.slice(0, -1);
523
+ const line = fName + ":" + fDesc;
524
+ tokens += encoder.encode(line).length;
525
+ if (typeof func.parameters === "object" && func.parameters !== null) tokens += calculateParametersTokens(func.parameters, encoder, constants);
526
+ return tokens;
527
+ };
528
+ /**
529
+ * Calculate token count for tools based on model
530
+ */
531
+ const numTokensForTools = (tools, encoder, constants) => {
532
+ let funcTokenCount = 0;
533
+ for (const tool of tools) funcTokenCount += calculateToolTokens(tool, encoder, constants);
534
+ funcTokenCount += constants.funcEnd;
535
+ return funcTokenCount;
536
+ };
537
+ /**
538
+ * Calculate the token count of messages, supporting multiple GPT encoders
539
+ */
540
+ const getTokenCount = async (payload, model) => {
541
+ const tokenizer = getTokenizerFromModel(model);
542
+ const encoder = await getEncodeChatFunction(tokenizer);
543
+ const simplifiedMessages = payload.messages;
544
+ const inputMessages = simplifiedMessages.filter((msg) => msg.role !== "assistant");
545
+ const outputMessages = simplifiedMessages.filter((msg) => msg.role === "assistant");
546
+ const constants = getModelConstants(model);
547
+ let inputTokens = calculateTokens(inputMessages, encoder, constants);
548
+ if (payload.tools && payload.tools.length > 0) inputTokens += numTokensForTools(payload.tools, encoder, constants);
549
+ const outputTokens = calculateTokens(outputMessages, encoder, constants);
550
+ return {
551
+ input: inputTokens,
552
+ output: outputTokens
553
+ };
554
+ };
555
+
556
+ //#endregion
557
+ //#region src/services/copilot/create-chat-completions.ts
558
+ const createChatCompletions = async (payload) => {
559
+ if (!state.copilotToken) throw new Error("Copilot token not found");
560
+ const enableVision = payload.messages.some((x) => typeof x.content !== "string" && x.content?.some((x$1) => x$1.type === "image_url"));
561
+ const isAgentCall = payload.messages.some((msg) => ["assistant", "tool"].includes(msg.role));
562
+ const headers = {
563
+ ...copilotHeaders(state, enableVision),
564
+ "X-Initiator": isAgentCall ? "agent" : "user"
565
+ };
566
+ const response = await fetch(`${copilotBaseUrl(state)}/chat/completions`, {
567
+ method: "POST",
568
+ headers,
569
+ body: JSON.stringify(payload)
570
+ });
571
+ if (!response.ok) {
572
+ consola.error("Failed to create chat completions", response);
573
+ throw new HTTPError("Failed to create chat completions", response);
574
+ }
575
+ if (payload.stream) return events(response);
576
+ return await response.json();
577
+ };
578
+
579
+ //#endregion
580
+ //#region src/routes/chat-completions/handler.ts
581
+ async function handleCompletion$1(c) {
582
+ await checkRateLimit(state);
583
+ let payload = await c.req.json();
584
+ consola.debug("Request payload:", JSON.stringify(payload).slice(-400));
585
+ const selectedModel = state.models?.data.find((model) => model.id === payload.model);
586
+ try {
587
+ if (selectedModel) {
588
+ const tokenCount = await getTokenCount(payload, selectedModel);
589
+ consola.info("Current token count:", tokenCount);
590
+ } else consola.warn("No model selected, skipping token count calculation");
591
+ } catch (error) {
592
+ consola.warn("Failed to calculate token count:", error);
593
+ }
594
+ if (state.manualApprove) await awaitApproval();
595
+ if (isNullish(payload.max_tokens)) {
596
+ payload = {
597
+ ...payload,
598
+ max_tokens: selectedModel?.capabilities.limits.max_output_tokens
599
+ };
600
+ consola.debug("Set max_tokens to:", JSON.stringify(payload.max_tokens));
601
+ }
602
+ const response = await createChatCompletions(payload);
603
+ if (isNonStreaming$1(response)) {
604
+ consola.debug("Non-streaming response:", JSON.stringify(response));
605
+ return c.json(response);
606
+ }
607
+ consola.debug("Streaming response");
608
+ return streamSSE(c, async (stream) => {
609
+ for await (const chunk of response) {
610
+ consola.debug("Streaming chunk:", JSON.stringify(chunk));
611
+ await stream.writeSSE(chunk);
612
+ }
613
+ });
614
+ }
615
+ const isNonStreaming$1 = (response) => Object.hasOwn(response, "choices");
616
+
617
+ //#endregion
618
+ //#region src/routes/chat-completions/route.ts
619
+ const completionRoutes = new Hono();
620
+ completionRoutes.post("/", async (c) => {
621
+ try {
622
+ return await handleCompletion$1(c);
623
+ } catch (error) {
624
+ return await forwardError(c, error);
625
+ }
626
+ });
627
+
628
+ //#endregion
629
+ //#region src/services/copilot/create-embeddings.ts
630
+ const createEmbeddings = async (payload) => {
631
+ if (!state.copilotToken) throw new Error("Copilot token not found");
632
+ const response = await fetch(`${copilotBaseUrl(state)}/embeddings`, {
633
+ method: "POST",
634
+ headers: copilotHeaders(state),
635
+ body: JSON.stringify(payload)
636
+ });
637
+ if (!response.ok) throw new HTTPError("Failed to create embeddings", response);
638
+ return await response.json();
639
+ };
640
+
641
+ //#endregion
642
+ //#region src/routes/embeddings/route.ts
643
+ const embeddingRoutes = new Hono();
644
+ embeddingRoutes.post("/", async (c) => {
645
+ try {
646
+ const paylod = await c.req.json();
647
+ const response = await createEmbeddings(paylod);
648
+ return c.json(response);
649
+ } catch (error) {
650
+ return await forwardError(c, error);
651
+ }
652
+ });
653
+
654
+ //#endregion
655
+ //#region src/routes/messages/utils.ts
656
+ function mapOpenAIStopReasonToAnthropic(finishReason) {
657
+ if (finishReason === null) return null;
658
+ return {
659
+ stop: "end_turn",
660
+ length: "max_tokens",
661
+ tool_calls: "tool_use",
662
+ content_filter: "end_turn"
663
+ }[finishReason];
664
+ }
665
+
666
+ //#endregion
667
+ //#region src/routes/messages/non-stream-translation.ts
668
+ function translateToOpenAI(payload) {
669
+ return {
670
+ model: translateModelName(payload.model),
671
+ messages: translateAnthropicMessagesToOpenAI(payload.messages, payload.system),
672
+ max_tokens: payload.max_tokens,
673
+ stop: payload.stop_sequences,
674
+ stream: payload.stream,
675
+ temperature: payload.temperature,
676
+ top_p: payload.top_p,
677
+ user: payload.metadata?.user_id,
678
+ tools: translateAnthropicToolsToOpenAI(payload.tools),
679
+ tool_choice: translateAnthropicToolChoiceToOpenAI(payload.tool_choice)
680
+ };
681
+ }
682
+ function translateModelName(model) {
683
+ if (model.startsWith("claude-sonnet-4-")) return model.replace(/^claude-sonnet-4-.*/, "claude-sonnet-4");
684
+ else if (model.startsWith("claude-opus-")) return model.replace(/^claude-opus-4-.*/, "claude-opus-4");
685
+ return model;
686
+ }
687
+ function translateAnthropicMessagesToOpenAI(anthropicMessages, system) {
688
+ const systemMessages = handleSystemPrompt(system);
689
+ const otherMessages = anthropicMessages.flatMap((message) => message.role === "user" ? handleUserMessage(message) : handleAssistantMessage(message));
690
+ return [...systemMessages, ...otherMessages];
691
+ }
692
+ function handleSystemPrompt(system) {
693
+ if (!system) return [];
694
+ if (typeof system === "string") return [{
695
+ role: "system",
696
+ content: system
697
+ }];
698
+ else return [{
699
+ role: "system",
700
+ content: system.map((block) => block.text).join("\n\n")
701
+ }];
702
+ }
703
+ function handleUserMessage(message) {
704
+ const newMessages = [];
705
+ if (Array.isArray(message.content)) {
706
+ const toolResultBlocks = message.content.filter((block) => block.type === "tool_result");
707
+ const otherBlocks = message.content.filter((block) => block.type !== "tool_result");
708
+ for (const block of toolResultBlocks) newMessages.push({
709
+ role: "tool",
710
+ tool_call_id: block.tool_use_id,
711
+ content: mapContent(block.content)
712
+ });
713
+ if (otherBlocks.length > 0) newMessages.push({
714
+ role: "user",
715
+ content: mapContent(otherBlocks)
716
+ });
717
+ } else newMessages.push({
718
+ role: "user",
719
+ content: mapContent(message.content)
720
+ });
721
+ return newMessages;
722
+ }
723
+ function handleAssistantMessage(message) {
724
+ if (!Array.isArray(message.content)) return [{
725
+ role: "assistant",
726
+ content: mapContent(message.content)
727
+ }];
728
+ const toolUseBlocks = message.content.filter((block) => block.type === "tool_use");
729
+ const textBlocks = message.content.filter((block) => block.type === "text");
730
+ const thinkingBlocks = message.content.filter((block) => block.type === "thinking");
731
+ const allTextContent = [...textBlocks.map((b) => b.text), ...thinkingBlocks.map((b) => b.thinking)].join("\n\n");
732
+ return toolUseBlocks.length > 0 ? [{
733
+ role: "assistant",
734
+ content: allTextContent || null,
735
+ tool_calls: toolUseBlocks.map((toolUse) => ({
736
+ id: toolUse.id,
737
+ type: "function",
738
+ function: {
739
+ name: toolUse.name,
740
+ arguments: JSON.stringify(toolUse.input)
741
+ }
742
+ }))
743
+ }] : [{
744
+ role: "assistant",
745
+ content: mapContent(message.content)
746
+ }];
747
+ }
748
+ function mapContent(content) {
749
+ if (typeof content === "string") return content;
750
+ if (!Array.isArray(content)) return null;
751
+ if (!content.some((block) => block.type === "image")) return content.filter((block) => block.type === "text" || block.type === "thinking").map((block) => block.type === "text" ? block.text : block.thinking).join("\n\n");
752
+ const contentParts = [];
753
+ for (const block of content) switch (block.type) {
754
+ case "text":
755
+ contentParts.push({
756
+ type: "text",
757
+ text: block.text
758
+ });
759
+ break;
760
+ case "thinking":
761
+ contentParts.push({
762
+ type: "text",
763
+ text: block.thinking
764
+ });
765
+ break;
766
+ case "image":
767
+ contentParts.push({
768
+ type: "image_url",
769
+ image_url: { url: `data:${block.source.media_type};base64,${block.source.data}` }
770
+ });
771
+ break;
772
+ }
773
+ return contentParts;
774
+ }
775
+ function translateAnthropicToolsToOpenAI(anthropicTools) {
776
+ if (!anthropicTools) return;
777
+ return anthropicTools.map((tool) => ({
778
+ type: "function",
779
+ function: {
780
+ name: tool.name,
781
+ description: tool.description,
782
+ parameters: tool.input_schema
783
+ }
784
+ }));
785
+ }
786
+ function translateAnthropicToolChoiceToOpenAI(anthropicToolChoice) {
787
+ if (!anthropicToolChoice) return;
788
+ switch (anthropicToolChoice.type) {
789
+ case "auto": return "auto";
790
+ case "any": return "required";
791
+ case "tool":
792
+ if (anthropicToolChoice.name) return {
793
+ type: "function",
794
+ function: { name: anthropicToolChoice.name }
795
+ };
796
+ return;
797
+ case "none": return "none";
798
+ default: return;
799
+ }
800
+ }
801
+ function translateToAnthropic(response) {
802
+ const allTextBlocks = [];
803
+ const allToolUseBlocks = [];
804
+ let stopReason = null;
805
+ stopReason = response.choices[0]?.finish_reason ?? stopReason;
806
+ for (const choice of response.choices) {
807
+ const textBlocks = getAnthropicTextBlocks(choice.message.content);
808
+ const toolUseBlocks = getAnthropicToolUseBlocks(choice.message.tool_calls);
809
+ allTextBlocks.push(...textBlocks);
810
+ allToolUseBlocks.push(...toolUseBlocks);
811
+ if (choice.finish_reason === "tool_calls" || stopReason === "stop") stopReason = choice.finish_reason;
812
+ }
813
+ return {
814
+ id: response.id,
815
+ type: "message",
816
+ role: "assistant",
817
+ model: response.model,
818
+ content: [...allTextBlocks, ...allToolUseBlocks],
819
+ stop_reason: mapOpenAIStopReasonToAnthropic(stopReason),
820
+ stop_sequence: null,
821
+ usage: {
822
+ input_tokens: (response.usage?.prompt_tokens ?? 0) - (response.usage?.prompt_tokens_details?.cached_tokens ?? 0),
823
+ output_tokens: response.usage?.completion_tokens ?? 0,
824
+ ...response.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: response.usage.prompt_tokens_details.cached_tokens }
825
+ }
826
+ };
827
+ }
828
+ function getAnthropicTextBlocks(messageContent) {
829
+ if (typeof messageContent === "string") return [{
830
+ type: "text",
831
+ text: messageContent
832
+ }];
833
+ if (Array.isArray(messageContent)) return messageContent.filter((part) => part.type === "text").map((part) => ({
834
+ type: "text",
835
+ text: part.text
836
+ }));
837
+ return [];
838
+ }
839
+ function getAnthropicToolUseBlocks(toolCalls) {
840
+ if (!toolCalls) return [];
841
+ return toolCalls.map((toolCall) => ({
842
+ type: "tool_use",
843
+ id: toolCall.id,
844
+ name: toolCall.function.name,
845
+ input: JSON.parse(toolCall.function.arguments)
846
+ }));
847
+ }
848
+
849
+ //#endregion
850
+ //#region src/routes/messages/count-tokens-handler.ts
851
+ /**
852
+ * Handles token counting for Anthropic messages
853
+ */
854
+ async function handleCountTokens(c) {
855
+ try {
856
+ const anthropicBeta = c.req.header("anthropic-beta");
857
+ const anthropicPayload = await c.req.json();
858
+ const openAIPayload = translateToOpenAI(anthropicPayload);
859
+ const selectedModel = state.models?.data.find((model) => model.id === anthropicPayload.model);
860
+ if (!selectedModel) {
861
+ consola.warn("Model not found, returning default token count");
862
+ return c.json({ input_tokens: 1 });
863
+ }
864
+ const tokenCount = await getTokenCount(openAIPayload, selectedModel);
865
+ if (anthropicPayload.tools && anthropicPayload.tools.length > 0) {
866
+ let mcpToolExist = false;
867
+ if (anthropicBeta?.startsWith("claude-code")) mcpToolExist = anthropicPayload.tools.some((tool) => tool.name.startsWith("mcp__"));
868
+ if (!mcpToolExist) {
869
+ if (anthropicPayload.model.startsWith("claude")) tokenCount.input = tokenCount.input + 346;
870
+ else if (anthropicPayload.model.startsWith("grok")) tokenCount.input = tokenCount.input + 480;
871
+ }
872
+ }
873
+ let finalTokenCount = tokenCount.input + tokenCount.output;
874
+ if (anthropicPayload.model.startsWith("claude")) finalTokenCount = Math.round(finalTokenCount * 1.15);
875
+ else if (anthropicPayload.model.startsWith("grok")) finalTokenCount = Math.round(finalTokenCount * 1.03);
876
+ consola.info("Token count:", finalTokenCount);
877
+ return c.json({ input_tokens: finalTokenCount });
878
+ } catch (error) {
879
+ consola.error("Error counting tokens:", error);
880
+ return c.json({ input_tokens: 1 });
881
+ }
882
+ }
883
+
884
+ //#endregion
885
+ //#region src/routes/messages/stream-translation.ts
886
+ function isToolBlockOpen(state$1) {
887
+ if (!state$1.contentBlockOpen) return false;
888
+ return Object.values(state$1.toolCalls).some((tc) => tc.anthropicBlockIndex === state$1.contentBlockIndex);
889
+ }
890
+ function translateChunkToAnthropicEvents(chunk, state$1) {
891
+ const events$1 = [];
892
+ if (chunk.choices.length === 0) return events$1;
893
+ const choice = chunk.choices[0];
894
+ const { delta } = choice;
895
+ if (!state$1.messageStartSent) {
896
+ events$1.push({
897
+ type: "message_start",
898
+ message: {
899
+ id: chunk.id,
900
+ type: "message",
901
+ role: "assistant",
902
+ content: [],
903
+ model: chunk.model,
904
+ stop_reason: null,
905
+ stop_sequence: null,
906
+ usage: {
907
+ input_tokens: (chunk.usage?.prompt_tokens ?? 0) - (chunk.usage?.prompt_tokens_details?.cached_tokens ?? 0),
908
+ output_tokens: 0,
909
+ ...chunk.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: chunk.usage.prompt_tokens_details.cached_tokens }
910
+ }
911
+ }
912
+ });
913
+ state$1.messageStartSent = true;
914
+ }
915
+ if (delta.content) {
916
+ if (isToolBlockOpen(state$1)) {
917
+ events$1.push({
918
+ type: "content_block_stop",
919
+ index: state$1.contentBlockIndex
920
+ });
921
+ state$1.contentBlockIndex++;
922
+ state$1.contentBlockOpen = false;
923
+ }
924
+ if (!state$1.contentBlockOpen) {
925
+ events$1.push({
926
+ type: "content_block_start",
927
+ index: state$1.contentBlockIndex,
928
+ content_block: {
929
+ type: "text",
930
+ text: ""
931
+ }
932
+ });
933
+ state$1.contentBlockOpen = true;
934
+ }
935
+ events$1.push({
936
+ type: "content_block_delta",
937
+ index: state$1.contentBlockIndex,
938
+ delta: {
939
+ type: "text_delta",
940
+ text: delta.content
941
+ }
942
+ });
943
+ }
944
+ if (delta.tool_calls) for (const toolCall of delta.tool_calls) {
945
+ if (toolCall.id && toolCall.function?.name) {
946
+ if (state$1.contentBlockOpen) {
947
+ events$1.push({
948
+ type: "content_block_stop",
949
+ index: state$1.contentBlockIndex
950
+ });
951
+ state$1.contentBlockIndex++;
952
+ state$1.contentBlockOpen = false;
953
+ }
954
+ const anthropicBlockIndex = state$1.contentBlockIndex;
955
+ state$1.toolCalls[toolCall.index] = {
956
+ id: toolCall.id,
957
+ name: toolCall.function.name,
958
+ anthropicBlockIndex
959
+ };
960
+ events$1.push({
961
+ type: "content_block_start",
962
+ index: anthropicBlockIndex,
963
+ content_block: {
964
+ type: "tool_use",
965
+ id: toolCall.id,
966
+ name: toolCall.function.name,
967
+ input: {}
968
+ }
969
+ });
970
+ state$1.contentBlockOpen = true;
971
+ }
972
+ if (toolCall.function?.arguments) {
973
+ const toolCallInfo = state$1.toolCalls[toolCall.index];
974
+ if (toolCallInfo) events$1.push({
975
+ type: "content_block_delta",
976
+ index: toolCallInfo.anthropicBlockIndex,
977
+ delta: {
978
+ type: "input_json_delta",
979
+ partial_json: toolCall.function.arguments
980
+ }
981
+ });
982
+ }
983
+ }
984
+ if (choice.finish_reason) {
985
+ if (state$1.contentBlockOpen) {
986
+ events$1.push({
987
+ type: "content_block_stop",
988
+ index: state$1.contentBlockIndex
989
+ });
990
+ state$1.contentBlockOpen = false;
991
+ }
992
+ events$1.push({
993
+ type: "message_delta",
994
+ delta: {
995
+ stop_reason: mapOpenAIStopReasonToAnthropic(choice.finish_reason),
996
+ stop_sequence: null
997
+ },
998
+ usage: {
999
+ input_tokens: (chunk.usage?.prompt_tokens ?? 0) - (chunk.usage?.prompt_tokens_details?.cached_tokens ?? 0),
1000
+ output_tokens: chunk.usage?.completion_tokens ?? 0,
1001
+ ...chunk.usage?.prompt_tokens_details?.cached_tokens !== void 0 && { cache_read_input_tokens: chunk.usage.prompt_tokens_details.cached_tokens }
1002
+ }
1003
+ }, { type: "message_stop" });
1004
+ }
1005
+ return events$1;
1006
+ }
1007
+
1008
+ //#endregion
1009
+ //#region src/routes/messages/handler.ts
1010
+ async function handleCompletion(c) {
1011
+ await checkRateLimit(state);
1012
+ const anthropicPayload = await c.req.json();
1013
+ consola.debug("Anthropic request payload:", JSON.stringify(anthropicPayload));
1014
+ const openAIPayload = translateToOpenAI(anthropicPayload);
1015
+ consola.debug("Translated OpenAI request payload:", JSON.stringify(openAIPayload));
1016
+ if (state.manualApprove) await awaitApproval();
1017
+ const response = await createChatCompletions(openAIPayload);
1018
+ if (isNonStreaming(response)) {
1019
+ consola.debug("Non-streaming response from Copilot:", JSON.stringify(response).slice(-400));
1020
+ const anthropicResponse = translateToAnthropic(response);
1021
+ consola.debug("Translated Anthropic response:", JSON.stringify(anthropicResponse));
1022
+ return c.json(anthropicResponse);
1023
+ }
1024
+ consola.debug("Streaming response from Copilot");
1025
+ return streamSSE(c, async (stream) => {
1026
+ const streamState = {
1027
+ messageStartSent: false,
1028
+ contentBlockIndex: 0,
1029
+ contentBlockOpen: false,
1030
+ toolCalls: {}
1031
+ };
1032
+ for await (const rawEvent of response) {
1033
+ consola.debug("Copilot raw stream event:", JSON.stringify(rawEvent));
1034
+ if (rawEvent.data === "[DONE]") break;
1035
+ if (!rawEvent.data) continue;
1036
+ const chunk = JSON.parse(rawEvent.data);
1037
+ const events$1 = translateChunkToAnthropicEvents(chunk, streamState);
1038
+ for (const event of events$1) {
1039
+ consola.debug("Translated Anthropic event:", JSON.stringify(event));
1040
+ await stream.writeSSE({
1041
+ event: event.type,
1042
+ data: JSON.stringify(event)
1043
+ });
1044
+ }
1045
+ }
1046
+ });
1047
+ }
1048
+ const isNonStreaming = (response) => Object.hasOwn(response, "choices");
1049
+
1050
+ //#endregion
1051
+ //#region src/routes/messages/route.ts
1052
+ const messageRoutes = new Hono();
1053
+ messageRoutes.post("/", async (c) => {
1054
+ try {
1055
+ return await handleCompletion(c);
1056
+ } catch (error) {
1057
+ return await forwardError(c, error);
1058
+ }
1059
+ });
1060
+ messageRoutes.post("/count_tokens", async (c) => {
1061
+ try {
1062
+ return await handleCountTokens(c);
1063
+ } catch (error) {
1064
+ return await forwardError(c, error);
1065
+ }
1066
+ });
1067
+
1068
+ //#endregion
1069
+ //#region src/routes/models/route.ts
1070
+ const modelRoutes = new Hono();
1071
+ modelRoutes.get("/", async (c) => {
1072
+ try {
1073
+ if (!state.models) await cacheModels();
1074
+ const models = state.models?.data.map((model) => ({
1075
+ id: model.id,
1076
+ object: "model",
1077
+ type: "model",
1078
+ created: 0,
1079
+ created_at: (/* @__PURE__ */ new Date(0)).toISOString(),
1080
+ owned_by: model.vendor,
1081
+ display_name: model.name
1082
+ }));
1083
+ return c.json({
1084
+ object: "list",
1085
+ data: models,
1086
+ has_more: false
1087
+ });
1088
+ } catch (error) {
1089
+ return await forwardError(c, error);
1090
+ }
1091
+ });
1092
+
1093
+ //#endregion
1094
+ //#region src/routes/token/route.ts
1095
+ const tokenRoute = new Hono();
1096
+ tokenRoute.get("/", (c) => {
1097
+ try {
1098
+ return c.json({ token: state.copilotToken });
1099
+ } catch (error) {
1100
+ consola.error("Error fetching token:", error);
1101
+ return c.json({
1102
+ error: "Failed to fetch token",
1103
+ token: null
1104
+ }, 500);
1105
+ }
1106
+ });
1107
+
1108
+ //#endregion
1109
+ //#region src/routes/usage/route.ts
1110
+ const usageRoute = new Hono();
1111
+ usageRoute.get("/", async (c) => {
1112
+ try {
1113
+ const usage = await getCopilotUsage();
1114
+ return c.json(usage);
1115
+ } catch (error) {
1116
+ consola.error("Error fetching Copilot usage:", error);
1117
+ return c.json({ error: "Failed to fetch Copilot usage" }, 500);
1118
+ }
1119
+ });
1120
+
1121
+ //#endregion
1122
+ //#region src/server.ts
1123
+ const server = new Hono();
1124
+ server.use(logger());
1125
+ server.use(cors());
1126
+ server.use(apiKeyAuthMiddleware);
1127
+ server.get("/", (c) => c.text("Server running"));
1128
+ server.route("/chat/completions", completionRoutes);
1129
+ server.route("/models", modelRoutes);
1130
+ server.route("/embeddings", embeddingRoutes);
1131
+ server.route("/usage", usageRoute);
1132
+ server.route("/token", tokenRoute);
1133
+ server.route("/v1/chat/completions", completionRoutes);
1134
+ server.route("/v1/models", modelRoutes);
1135
+ server.route("/v1/embeddings", embeddingRoutes);
1136
+ server.route("/v1/messages", messageRoutes);
1137
+
1138
+ //#endregion
1139
+ //#region src/start.ts
1140
+ /**
1141
+ * Start and configure the Copilot API server according to the provided options.
1142
+ *
1143
+ * Configures proxy and logging, initializes global state and credentials, ensures
1144
+ * required paths and model data are cached, optionally generates a Claude Code
1145
+ * launch command (and attempts to copy it to the clipboard), prints a usage
1146
+ * viewer URL, and begins serving HTTP requests on the specified port.
1147
+ *
1148
+ * @param options - Server startup options:
1149
+ * - port: Port number to listen on
1150
+ * - verbose: Enable verbose logging
1151
+ * - accountType: Account plan to use ("individual", "business", "enterprise")
1152
+ * - manual: Require manual approval for requests
1153
+ * - rateLimit: Seconds to wait between requests (optional)
1154
+ * - rateLimitWait: Wait instead of erroring when rate limit is hit
1155
+ * - githubToken: GitHub token to use (optional; if omitted a token setup prompt may run)
1156
+ * - claudeCode: Generate a Claude Code environment launch command
1157
+ * - showToken: Expose GitHub/Copilot tokens in responses for debugging
1158
+ * - proxyEnv: Initialize proxy settings from environment variables
1159
+ * - apiKeys: Optional list of API keys to enable API key authentication
1160
+ */
1161
+ async function runServer(options) {
1162
+ if (options.proxyEnv) initProxyFromEnv();
1163
+ if (options.verbose) {
1164
+ consola.level = 5;
1165
+ consola.info("Verbose logging enabled");
1166
+ }
1167
+ state.accountType = options.accountType;
1168
+ if (options.accountType !== "individual") consola.info(`Using ${options.accountType} plan GitHub account`);
1169
+ state.manualApprove = options.manual;
1170
+ state.rateLimitSeconds = options.rateLimit;
1171
+ state.rateLimitWait = options.rateLimitWait;
1172
+ state.showToken = options.showToken;
1173
+ state.apiKeys = options.apiKeys;
1174
+ if (state.apiKeys && state.apiKeys.length > 0) consola.info(`API key authentication enabled with ${state.apiKeys.length} key(s)`);
1175
+ await ensurePaths();
1176
+ await cacheVSCodeVersion();
1177
+ if (options.githubToken) {
1178
+ state.githubToken = options.githubToken;
1179
+ consola.info("Using provided GitHub token");
1180
+ try {
1181
+ const { getGitHubUser } = await import("./get-user-BQgLIPYd.js");
1182
+ const user = await getGitHubUser();
1183
+ consola.info(`Logged in as ${user.login}`);
1184
+ } catch (error) {
1185
+ consola.error("Provided GitHub token is invalid");
1186
+ throw error;
1187
+ }
1188
+ } else await setupGitHubToken();
1189
+ try {
1190
+ await setupCopilotToken();
1191
+ } catch (error) {
1192
+ const { HTTPError: HTTPError$1 } = await import("./error-Ba4dbGYj.js");
1193
+ if (error instanceof HTTPError$1 && error.response.status === 401) {
1194
+ consola.error("Failed to get Copilot token - GitHub token may be invalid or Copilot access revoked");
1195
+ const { clearGithubToken: clearGithubToken$1 } = await import("./token-DP4tvNfm.js");
1196
+ await clearGithubToken$1();
1197
+ consola.info("Please restart to re-authenticate");
1198
+ }
1199
+ throw error;
1200
+ }
1201
+ await cacheModels();
1202
+ consola.info(`Available models: \n${state.models?.data.map((model) => `- ${model.id}`).join("\n")}`);
1203
+ const serverUrl = `http://localhost:${options.port}`;
1204
+ if (options.claudeCode) {
1205
+ invariant(state.models, "Models should be loaded by now");
1206
+ const selectedModel = await consola.prompt("Select a model to use with Claude Code", {
1207
+ type: "select",
1208
+ options: state.models.data.map((model) => model.id)
1209
+ });
1210
+ const selectedSmallModel = await consola.prompt("Select a small model to use with Claude Code", {
1211
+ type: "select",
1212
+ options: state.models.data.map((model) => model.id)
1213
+ });
1214
+ const command = generateEnvScript({
1215
+ ANTHROPIC_BASE_URL: serverUrl,
1216
+ ANTHROPIC_AUTH_TOKEN: "dummy",
1217
+ ANTHROPIC_MODEL: selectedModel,
1218
+ ANTHROPIC_DEFAULT_SONNET_MODEL: selectedModel,
1219
+ ANTHROPIC_SMALL_FAST_MODEL: selectedSmallModel,
1220
+ ANTHROPIC_DEFAULT_HAIKU_MODEL: selectedSmallModel,
1221
+ DISABLE_NON_ESSENTIAL_MODEL_CALLS: "1",
1222
+ CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: "1"
1223
+ }, "claude");
1224
+ try {
1225
+ clipboard.writeSync(command);
1226
+ consola.success("Copied Claude Code command to clipboard!");
1227
+ } catch {
1228
+ consola.warn("Failed to copy to clipboard. Here is the Claude Code command:");
1229
+ consola.log(command);
1230
+ }
1231
+ }
1232
+ consola.box(`🌐 Usage Viewer: https://ericc-ch.github.io/copilot-api?endpoint=${serverUrl}/usage`);
1233
+ serve({
1234
+ fetch: server.fetch,
1235
+ port: options.port
1236
+ });
1237
+ }
1238
+ const start = defineCommand({
1239
+ meta: {
1240
+ name: "start",
1241
+ description: "Start the Copilot API server"
1242
+ },
1243
+ args: {
1244
+ port: {
1245
+ alias: "p",
1246
+ type: "string",
1247
+ default: "4141",
1248
+ description: "Port to listen on"
1249
+ },
1250
+ verbose: {
1251
+ alias: "v",
1252
+ type: "boolean",
1253
+ default: false,
1254
+ description: "Enable verbose logging"
1255
+ },
1256
+ "account-type": {
1257
+ alias: "a",
1258
+ type: "string",
1259
+ default: "individual",
1260
+ description: "Account type to use (individual, business, enterprise)"
1261
+ },
1262
+ manual: {
1263
+ type: "boolean",
1264
+ default: false,
1265
+ description: "Enable manual request approval"
1266
+ },
1267
+ "rate-limit": {
1268
+ alias: "r",
1269
+ type: "string",
1270
+ description: "Rate limit in seconds between requests"
1271
+ },
1272
+ wait: {
1273
+ alias: "w",
1274
+ type: "boolean",
1275
+ default: false,
1276
+ description: "Wait instead of error when rate limit is hit. Has no effect if rate limit is not set"
1277
+ },
1278
+ "github-token": {
1279
+ alias: "g",
1280
+ type: "string",
1281
+ description: "Provide GitHub token directly (must be generated using the `auth` subcommand)"
1282
+ },
1283
+ "claude-code": {
1284
+ alias: "c",
1285
+ type: "boolean",
1286
+ default: false,
1287
+ description: "Generate a command to launch Claude Code with Copilot API config"
1288
+ },
1289
+ "show-token": {
1290
+ type: "boolean",
1291
+ default: false,
1292
+ description: "Show GitHub and Copilot tokens on fetch and refresh"
1293
+ },
1294
+ "proxy-env": {
1295
+ type: "boolean",
1296
+ default: false,
1297
+ description: "Initialize proxy from environment variables"
1298
+ },
1299
+ "api-key": {
1300
+ type: "string",
1301
+ description: "API keys for authentication"
1302
+ }
1303
+ },
1304
+ run({ args }) {
1305
+ const rateLimitRaw = args["rate-limit"];
1306
+ const rateLimit = rateLimitRaw === void 0 ? void 0 : Number.parseInt(rateLimitRaw, 10);
1307
+ const apiKeyRaw = args["api-key"];
1308
+ let apiKeys;
1309
+ if (apiKeyRaw) apiKeys = Array.isArray(apiKeyRaw) ? apiKeyRaw : [apiKeyRaw];
1310
+ return runServer({
1311
+ port: Number.parseInt(args.port, 10),
1312
+ verbose: args.verbose,
1313
+ accountType: args["account-type"],
1314
+ manual: args.manual,
1315
+ rateLimit,
1316
+ rateLimitWait: args.wait,
1317
+ githubToken: args["github-token"],
1318
+ claudeCode: args["claude-code"],
1319
+ showToken: args["show-token"],
1320
+ proxyEnv: args["proxy-env"],
1321
+ apiKeys
1322
+ });
1323
+ }
1324
+ });
1325
+
1326
+ //#endregion
1327
+ //#region src/main.ts
1328
+ const main = defineCommand({
1329
+ meta: {
1330
+ name: "copilot-api-plus",
1331
+ description: "A wrapper around GitHub Copilot API to make it OpenAI/Anthropic compatible. Fork with bug fixes and improvements."
1332
+ },
1333
+ subCommands: {
1334
+ auth,
1335
+ start,
1336
+ "check-usage": checkUsage,
1337
+ debug,
1338
+ logout
1339
+ }
1340
+ });
1341
+ await runMain(main);
1342
+
1343
+ //#endregion
1344
+ export { };
1345
+ //# sourceMappingURL=main.js.map