@ashsec/copilot-api 0.11.3 → 0.11.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/main.js CHANGED
@@ -21,7 +21,7 @@ import util from "node:util";
21
21
 
22
22
  //#region package.json
23
23
  var name = "@ashsec/copilot-api";
24
- var version = "0.11.3";
24
+ var version = "0.11.5";
25
25
  var description = "Turn GitHub Copilot into OpenAI/Anthropic API compatible server. Usable with Claude Code!";
26
26
  var keywords = [
27
27
  "proxy",
@@ -100,7 +100,7 @@ var package_default = {
100
100
 
101
101
  //#endregion
102
102
  //#region src/lib/paths.ts
103
- const APP_DIR = path.join(os.homedir(), ".local", "share", "copilot-api");
103
+ const APP_DIR = process.env.DATA_DIR || path.join(os.homedir(), ".local", "share", "copilot-api");
104
104
  const GITHUB_TOKEN_PATH = path.join(APP_DIR, "github_token");
105
105
  const REPLACEMENTS_CONFIG_PATH = path.join(APP_DIR, "replacements.json");
106
106
  const PATHS = {
@@ -1538,6 +1538,59 @@ function generateEnvScript(envVars, commandToRun = "") {
1538
1538
  return commandBlock || commandToRun;
1539
1539
  }
1540
1540
 
1541
+ //#endregion
1542
+ //#region src/lib/ip-blocker.ts
1543
+ const ipTracker = /* @__PURE__ */ new Map();
1544
+ /**
1545
+ * Extracts the client IP from the x-forwarded-for header.
1546
+ * Returns the first IP in the comma-separated list, or null if header is absent.
1547
+ */
1548
+ function extractClientIp(c) {
1549
+ const xForwardedFor = c.req.header("x-forwarded-for");
1550
+ if (!xForwardedFor) return null;
1551
+ return xForwardedFor.split(",")[0]?.trim() || null;
1552
+ }
1553
+ /**
1554
+ * Gets the current UTC date as YYYY-MM-DD string.
1555
+ */
1556
+ function getUtcDateString() {
1557
+ return (/* @__PURE__ */ new Date()).toISOString().slice(0, 10);
1558
+ }
1559
+ /**
1560
+ * Checks if an IP is blocked due to 3+ failed attempts today (UTC).
1561
+ * Cleans up stale entries (entries from previous days).
1562
+ */
1563
+ function isIpBlocked(ip) {
1564
+ const today = getUtcDateString();
1565
+ const entry = ipTracker.get(ip);
1566
+ if (!entry) return false;
1567
+ if (entry.date !== today) {
1568
+ ipTracker.delete(ip);
1569
+ return false;
1570
+ }
1571
+ return entry.count >= 3;
1572
+ }
1573
+ /**
1574
+ * Records a failed authentication attempt for an IP.
1575
+ * Increments count if entry exists for today, otherwise creates new entry.
1576
+ */
1577
+ function recordFailedAttempt(ip) {
1578
+ const today = getUtcDateString();
1579
+ const entry = ipTracker.get(ip);
1580
+ if (!entry) {
1581
+ ipTracker.set(ip, {
1582
+ count: 1,
1583
+ date: today
1584
+ });
1585
+ return;
1586
+ }
1587
+ if (entry.date === today) entry.count += 1;
1588
+ else ipTracker.set(ip, {
1589
+ count: 1,
1590
+ date: today
1591
+ });
1592
+ }
1593
+
1541
1594
  //#endregion
1542
1595
  //#region src/lib/request-auth.ts
1543
1596
  function normalizeApiKeys(apiKeys) {
@@ -1556,6 +1609,8 @@ function getConfiguredApiKeys() {
1556
1609
  function extractRequestApiKey(c) {
1557
1610
  const xApiKey = c.req.header("x-api-key")?.trim();
1558
1611
  if (xApiKey) return xApiKey;
1612
+ const googleApiKey = c.req.header("x-goog-api-key")?.trim();
1613
+ if (googleApiKey) return googleApiKey;
1559
1614
  const authorization = c.req.header("authorization");
1560
1615
  if (!authorization) return null;
1561
1616
  const [scheme, ...rest] = authorization.trim().split(/\s+/);
@@ -1597,10 +1652,16 @@ async function apiKeyGuard(c, next) {
1597
1652
  await next();
1598
1653
  return;
1599
1654
  }
1655
+ const clientIp = extractClientIp(c);
1656
+ if (clientIp !== null && isIpBlocked(clientIp)) {
1657
+ await new Promise(() => {});
1658
+ return;
1659
+ }
1600
1660
  if (extractRequestApiKey(c) === state.apiKeyAuth) {
1601
1661
  await next();
1602
1662
  return;
1603
1663
  }
1664
+ if (clientIp !== null) recordFailedAttempt(clientIp);
1604
1665
  await new Promise(() => {});
1605
1666
  }
1606
1667
 
@@ -1658,7 +1719,7 @@ async function logRawRequest(c) {
1658
1719
  const lines = [];
1659
1720
  lines.push(`${colors.magenta}${colors.bold}[DEBUG] Incoming Request${colors.reset}`, `${colors.cyan}${method}${colors.reset} ${url}`, `${colors.dim}Headers:${colors.reset}`);
1660
1721
  for (const [key, value] of Object.entries(headers)) {
1661
- const displayValue = key.toLowerCase().includes("authorization") ? `${value.slice(0, 20)}...` : value;
1722
+ const displayValue = key.toLowerCase().includes("authorization") || key.toLowerCase().includes("api-key") ? `${value.slice(0, 20)}...` : value;
1662
1723
  lines.push(` ${colors.gray}${key}:${colors.reset} ${displayValue}`);
1663
1724
  }
1664
1725
  if (method !== "GET" && method !== "HEAD") try {
@@ -2006,13 +2067,19 @@ const createChatCompletions = async (payload, options) => {
2006
2067
  ...copilotHeaders(state, enableVision),
2007
2068
  "X-Initiator": options?.initiator ?? (isAgentCall ? "agent" : "user")
2008
2069
  };
2070
+ if (payload.tools) for (const tool of payload.tools) {
2071
+ const params = tool.function.parameters;
2072
+ if (!params.type) params.type = "object";
2073
+ if (!params.properties) params.properties = {};
2074
+ }
2009
2075
  const response = await fetchWithRetry(`${copilotBaseUrl(state)}/chat/completions`, {
2010
2076
  method: "POST",
2011
2077
  headers,
2012
2078
  body: JSON.stringify(payload)
2013
2079
  });
2014
2080
  if (!response.ok) {
2015
- consola.error("Failed to create chat completions", response);
2081
+ const errorBody = await response.clone().text();
2082
+ consola.error("Failed to create chat completions", `Status: ${response.status}`, errorBody);
2016
2083
  throw new HTTPError("Failed to create chat completions", response);
2017
2084
  }
2018
2085
  if (payload.stream) return events(response);
@@ -2034,87 +2101,867 @@ async function handleCompletion$1(c) {
2034
2101
  };
2035
2102
  consola.debug("Request payload:", JSON.stringify(payload).slice(-400));
2036
2103
  setRequestContext(c, {
2037
- requestedModel,
2038
- provider: "ChatCompletions",
2039
- model: payload.model,
2040
- replacements: appliedRules,
2041
- reasoningEffort
2104
+ requestedModel,
2105
+ provider: "ChatCompletions",
2106
+ model: payload.model,
2107
+ replacements: appliedRules,
2108
+ reasoningEffort
2109
+ });
2110
+ const selectedModel = state.models?.data.find((model) => model.id === payload.model);
2111
+ try {
2112
+ if (selectedModel) {
2113
+ const tokenCount = await getTokenCount(payload, selectedModel);
2114
+ setRequestContext(c, { inputTokens: tokenCount.input });
2115
+ }
2116
+ } catch (error) {
2117
+ consola.warn("Failed to calculate token count:", error);
2118
+ }
2119
+ if (state.manualApprove) await awaitApproval();
2120
+ if (isNullish(payload.max_tokens)) {
2121
+ payload = {
2122
+ ...payload,
2123
+ max_tokens: selectedModel?.capabilities.limits.max_output_tokens
2124
+ };
2125
+ consola.debug("Set max_tokens to:", JSON.stringify(payload.max_tokens));
2126
+ }
2127
+ const response = await createChatCompletions(payload);
2128
+ if (isNonStreaming$1(response)) {
2129
+ consola.debug("Non-streaming response:", JSON.stringify(response));
2130
+ if (response.usage) setRequestContext(c, {
2131
+ inputTokens: response.usage.prompt_tokens,
2132
+ outputTokens: response.usage.completion_tokens
2133
+ });
2134
+ return c.json(response);
2135
+ }
2136
+ consola.debug("Streaming response");
2137
+ return streamSSE(c, async (stream) => {
2138
+ for await (const chunk of response) {
2139
+ consola.debug("Streaming chunk:", JSON.stringify(chunk));
2140
+ if (chunk.data && chunk.data !== "[DONE]") {
2141
+ const parsed = JSON.parse(chunk.data);
2142
+ if (parsed.usage) setRequestContext(c, {
2143
+ inputTokens: parsed.usage.prompt_tokens,
2144
+ outputTokens: parsed.usage.completion_tokens
2145
+ });
2146
+ }
2147
+ await stream.writeSSE(chunk);
2148
+ }
2149
+ });
2150
+ }
2151
+ const isNonStreaming$1 = (response) => Object.hasOwn(response, "choices");
2152
+
2153
+ //#endregion
2154
+ //#region src/routes/chat-completions/route.ts
2155
+ const completionRoutes = new Hono();
2156
+ completionRoutes.post("/", async (c) => {
2157
+ try {
2158
+ return await handleCompletion$1(c);
2159
+ } catch (error) {
2160
+ return await forwardError(c, error);
2161
+ }
2162
+ });
2163
+
2164
+ //#endregion
2165
+ //#region src/services/copilot/create-embeddings.ts
2166
+ const createEmbeddings = async (payload) => {
2167
+ if (!state.copilotToken) throw new Error("Copilot token not found");
2168
+ const response = await fetchWithRetry(`${copilotBaseUrl(state)}/embeddings`, {
2169
+ method: "POST",
2170
+ headers: copilotHeaders(state),
2171
+ body: JSON.stringify(payload)
2172
+ });
2173
+ if (!response.ok) throw new HTTPError("Failed to create embeddings", response);
2174
+ return await response.json();
2175
+ };
2176
+
2177
+ //#endregion
2178
+ //#region src/routes/embeddings/route.ts
2179
+ const embeddingRoutes = new Hono();
2180
+ embeddingRoutes.post("/", async (c) => {
2181
+ try {
2182
+ const paylod = await c.req.json();
2183
+ const response = await createEmbeddings(paylod);
2184
+ return c.json(response);
2185
+ } catch (error) {
2186
+ return await forwardError(c, error);
2187
+ }
2188
+ });
2189
+
2190
+ //#endregion
2191
+ //#region src/lib/logger.ts
2192
+ const LOG_RETENTION_MS = 10080 * 60 * 1e3;
2193
+ const CLEANUP_INTERVAL_MS = 1440 * 60 * 1e3;
2194
+ const LOG_DIR = path.join(PATHS.APP_DIR, "logs");
2195
+ const FLUSH_INTERVAL_MS = 1e3;
2196
+ const MAX_BUFFER_SIZE = 100;
2197
+ const logStreams = /* @__PURE__ */ new Map();
2198
+ const logBuffers = /* @__PURE__ */ new Map();
2199
+ const ensureLogDirectory = () => {
2200
+ if (!fs$1.existsSync(LOG_DIR)) fs$1.mkdirSync(LOG_DIR, { recursive: true });
2201
+ };
2202
+ const cleanupOldLogs = () => {
2203
+ if (!fs$1.existsSync(LOG_DIR)) return;
2204
+ const now = Date.now();
2205
+ for (const entry of fs$1.readdirSync(LOG_DIR)) {
2206
+ const filePath = path.join(LOG_DIR, entry);
2207
+ let stats;
2208
+ try {
2209
+ stats = fs$1.statSync(filePath);
2210
+ } catch {
2211
+ continue;
2212
+ }
2213
+ if (!stats.isFile()) continue;
2214
+ if (now - stats.mtimeMs > LOG_RETENTION_MS) try {
2215
+ fs$1.rmSync(filePath);
2216
+ } catch {
2217
+ continue;
2218
+ }
2219
+ }
2220
+ };
2221
+ const formatArgs = (args) => args.map((arg) => typeof arg === "string" ? arg : util.inspect(arg, {
2222
+ depth: null,
2223
+ colors: false
2224
+ })).join(" ");
2225
+ const sanitizeName = (name$1) => {
2226
+ const normalized = name$1.toLowerCase().replaceAll(/[^a-z0-9]+/g, "-").replaceAll(/^-+|-+$/g, "");
2227
+ return normalized === "" ? "handler" : normalized;
2228
+ };
2229
+ const getLogStream = (filePath) => {
2230
+ let stream = logStreams.get(filePath);
2231
+ if (!stream || stream.destroyed) {
2232
+ stream = fs$1.createWriteStream(filePath, { flags: "a" });
2233
+ logStreams.set(filePath, stream);
2234
+ stream.on("error", (error) => {
2235
+ console.warn("Log stream error", error);
2236
+ logStreams.delete(filePath);
2237
+ });
2238
+ }
2239
+ return stream;
2240
+ };
2241
+ const flushBuffer = (filePath) => {
2242
+ const buffer = logBuffers.get(filePath);
2243
+ if (!buffer || buffer.length === 0) return;
2244
+ const stream = getLogStream(filePath);
2245
+ const content = buffer.join("\n") + "\n";
2246
+ stream.write(content, (error) => {
2247
+ if (error) console.warn("Failed to write handler log", error);
2248
+ });
2249
+ logBuffers.set(filePath, []);
2250
+ };
2251
+ const flushAllBuffers = () => {
2252
+ for (const filePath of logBuffers.keys()) flushBuffer(filePath);
2253
+ };
2254
+ const appendLine = (filePath, line) => {
2255
+ let buffer = logBuffers.get(filePath);
2256
+ if (!buffer) {
2257
+ buffer = [];
2258
+ logBuffers.set(filePath, buffer);
2259
+ }
2260
+ buffer.push(line);
2261
+ if (buffer.length >= MAX_BUFFER_SIZE) flushBuffer(filePath);
2262
+ };
2263
+ setInterval(flushAllBuffers, FLUSH_INTERVAL_MS);
2264
+ const cleanup = () => {
2265
+ flushAllBuffers();
2266
+ for (const stream of logStreams.values()) stream.end();
2267
+ logStreams.clear();
2268
+ logBuffers.clear();
2269
+ };
2270
+ process.on("exit", cleanup);
2271
+ process.on("SIGINT", () => {
2272
+ cleanup();
2273
+ process.exit(0);
2274
+ });
2275
+ process.on("SIGTERM", () => {
2276
+ cleanup();
2277
+ process.exit(0);
2278
+ });
2279
+ let lastCleanup = 0;
2280
+ const createHandlerLogger = (name$1) => {
2281
+ ensureLogDirectory();
2282
+ const sanitizedName = sanitizeName(name$1);
2283
+ const instance = consola.withTag(name$1);
2284
+ if (state.verbose) instance.level = 5;
2285
+ instance.setReporters([]);
2286
+ instance.addReporter({ log(logObj) {
2287
+ ensureLogDirectory();
2288
+ if (Date.now() - lastCleanup > CLEANUP_INTERVAL_MS) {
2289
+ cleanupOldLogs();
2290
+ lastCleanup = Date.now();
2291
+ }
2292
+ const date = logObj.date;
2293
+ const dateKey = date.toLocaleDateString("sv-SE");
2294
+ const timestamp = date.toLocaleString("sv-SE", { hour12: false });
2295
+ const filePath = path.join(LOG_DIR, `${sanitizedName}-${dateKey}.log`);
2296
+ const message = formatArgs(logObj.args);
2297
+ const line = `[${timestamp}] [${logObj.type}] [${logObj.tag || name$1}]${message ? ` ${message}` : ""}`;
2298
+ appendLine(filePath, line);
2299
+ } });
2300
+ return instance;
2301
+ };
2302
+
2303
+ //#endregion
2304
+ //#region src/services/copilot/create-responses.ts
2305
+ const createResponses = async (payload, { vision, initiator }) => {
2306
+ if (!state.copilotToken) throw new Error("Copilot token not found");
2307
+ const headers = {
2308
+ ...copilotHeaders(state, vision),
2309
+ "X-Initiator": initiator
2310
+ };
2311
+ payload.service_tier = null;
2312
+ const response = await fetch(`${copilotBaseUrl(state)}/responses`, {
2313
+ method: "POST",
2314
+ headers,
2315
+ body: JSON.stringify(payload)
2316
+ });
2317
+ if (!response.ok) {
2318
+ consola.error("Failed to create responses", response);
2319
+ throw new HTTPError("Failed to create responses", response);
2320
+ }
2321
+ if (payload.stream) return events(response);
2322
+ return await response.json();
2323
+ };
2324
+
2325
+ //#endregion
2326
+ //#region src/routes/google-ai/request-translation.ts
2327
+ let toolCallCounter = 0;
2328
+ function nextToolCallId() {
2329
+ return `call_${Date.now()}_${toolCallCounter++}`;
2330
+ }
2331
+ function isTextPart(part) {
2332
+ return "text" in part;
2333
+ }
2334
+ function isFunctionCallPart(part) {
2335
+ return "functionCall" in part;
2336
+ }
2337
+ function isFunctionResponsePart(part) {
2338
+ return "functionResponse" in part;
2339
+ }
2340
+ /**
2341
+ * Convert Google contents array → OpenAI messages array.
2342
+ */
2343
+ function translateContents(contents) {
2344
+ const messages = [];
2345
+ for (const content of contents) if (content.role === "user") {
2346
+ const functionResponses = content.parts.filter((p) => isFunctionResponsePart(p));
2347
+ const otherParts = content.parts.filter((p) => !isFunctionResponsePart(p));
2348
+ for (const part of functionResponses) messages.push({
2349
+ role: "tool",
2350
+ tool_call_id: findToolCallId(messages, part.functionResponse.name),
2351
+ content: JSON.stringify(part.functionResponse.response)
2352
+ });
2353
+ if (otherParts.length > 0) {
2354
+ const textContent = otherParts.filter((p) => isTextPart(p)).map((p) => p.text).join("");
2355
+ if (textContent) messages.push({
2356
+ role: "user",
2357
+ content: textContent
2358
+ });
2359
+ }
2360
+ } else {
2361
+ const textParts = content.parts.filter((p) => isTextPart(p));
2362
+ const functionCalls = content.parts.filter((p) => isFunctionCallPart(p));
2363
+ const textContent = textParts.filter((p) => !p.thought).map((p) => p.text).join("");
2364
+ const toolCalls = functionCalls.length > 0 ? functionCalls.map((part) => ({
2365
+ id: nextToolCallId(),
2366
+ type: "function",
2367
+ function: {
2368
+ name: part.functionCall.name,
2369
+ arguments: JSON.stringify(part.functionCall.args)
2370
+ }
2371
+ })) : void 0;
2372
+ messages.push({
2373
+ role: "assistant",
2374
+ content: textContent || null,
2375
+ tool_calls: toolCalls
2376
+ });
2377
+ }
2378
+ return messages;
2379
+ }
2380
+ /**
2381
+ * Find the tool_call_id for a function response by walking back through messages
2382
+ * to find the matching assistant tool_call by function name.
2383
+ */
2384
+ function findToolCallId(messages, functionName) {
2385
+ for (let i = messages.length - 1; i >= 0; i--) {
2386
+ const msg = messages[i];
2387
+ if (msg.role === "assistant" && msg.tool_calls) {
2388
+ const match = msg.tool_calls.find((tc) => tc.function.name === functionName);
2389
+ if (match) return match.id;
2390
+ }
2391
+ }
2392
+ return nextToolCallId();
2393
+ }
2394
+ /**
2395
+ * Convert Google tools → OpenAI tools format.
2396
+ */
2397
+ function translateTools(tools) {
2398
+ if (!tools || tools.length === 0) return void 0;
2399
+ const openAITools = [];
2400
+ for (const tool of tools) if (tool.functionDeclarations) for (const decl of tool.functionDeclarations) openAITools.push({
2401
+ type: "function",
2402
+ function: {
2403
+ name: decl.name,
2404
+ description: decl.description,
2405
+ parameters: normalizeToolParameters(decl.parameters ?? {})
2406
+ }
2407
+ });
2408
+ return openAITools.length > 0 ? openAITools : void 0;
2409
+ }
2410
+ /**
2411
+ * Remove $schema from tool parameters and ensure valid structure.
2412
+ * Copilot requires parameters to have at least { type: "object", properties: {} }.
2413
+ */
2414
+ function normalizeToolParameters(params) {
2415
+ const cleaned = { ...params };
2416
+ delete cleaned.$schema;
2417
+ if (!cleaned.type) cleaned.type = "object";
2418
+ if (!cleaned.properties) cleaned.properties = {};
2419
+ return cleaned;
2420
+ }
2421
+ /**
2422
+ * Convert Google toolConfig → OpenAI tool_choice.
2423
+ */
2424
+ function translateToolChoice(toolConfig) {
2425
+ if (!toolConfig?.functionCallingConfig) return void 0;
2426
+ switch (toolConfig.functionCallingConfig.mode) {
2427
+ case "AUTO": return "auto";
2428
+ case "NONE": return "none";
2429
+ case "ANY": {
2430
+ const allowed = toolConfig.functionCallingConfig.allowedFunctionNames;
2431
+ if (allowed && allowed.length === 1) return {
2432
+ type: "function",
2433
+ function: { name: allowed[0] }
2434
+ };
2435
+ return "required";
2436
+ }
2437
+ default: return;
2438
+ }
2439
+ }
2440
+ /**
2441
+ * Extract system instruction from Google payload as an OpenAI system message.
2442
+ */
2443
+ function translateSystemInstruction(systemInstruction) {
2444
+ if (!systemInstruction?.parts) return void 0;
2445
+ const systemText = systemInstruction.parts.map((p) => p.text).join("\n");
2446
+ if (!systemText) return void 0;
2447
+ return {
2448
+ role: "system",
2449
+ content: systemText
2450
+ };
2451
+ }
2452
+ /**
2453
+ * Map Google generationConfig fields to OpenAI-compatible fields.
2454
+ */
2455
+ function mapGenerationConfigFields(config$1) {
2456
+ if (!config$1) return {};
2457
+ return {
2458
+ max_tokens: config$1.maxOutputTokens,
2459
+ temperature: config$1.temperature,
2460
+ top_p: config$1.topP,
2461
+ stop: config$1.stopSequences,
2462
+ seed: config$1.seed,
2463
+ frequency_penalty: config$1.frequencyPenalty,
2464
+ presence_penalty: config$1.presencePenalty,
2465
+ response_format: config$1.responseMimeType === "application/json" ? { type: "json_object" } : void 0
2466
+ };
2467
+ }
2468
+ /**
2469
+ * Convert Google generationConfig → OpenAI-compatible config fields.
2470
+ */
2471
+ function translateGenerationConfig(config$1, stream) {
2472
+ return {
2473
+ stream,
2474
+ stream_options: stream ? { include_usage: true } : void 0,
2475
+ ...mapGenerationConfigFields(config$1)
2476
+ };
2477
+ }
2478
+ /**
2479
+ * Main translation: Google Generative AI request → OpenAI ChatCompletions payload.
2480
+ */
2481
+ function translateGoogleToOpenAI(googlePayload, model, stream) {
2482
+ const messages = [];
2483
+ const systemMessage = translateSystemInstruction(googlePayload.systemInstruction);
2484
+ if (systemMessage) messages.push(systemMessage);
2485
+ messages.push(...translateContents(googlePayload.contents));
2486
+ return {
2487
+ model,
2488
+ messages,
2489
+ ...translateGenerationConfig(googlePayload.generationConfig, stream),
2490
+ tools: translateTools(googlePayload.tools),
2491
+ tool_choice: translateToolChoice(googlePayload.toolConfig)
2492
+ };
2493
+ }
2494
+
2495
+ //#endregion
2496
+ //#region src/routes/google-ai/response-translation.ts
2497
+ function mapFinishReason(reason) {
2498
+ switch (reason) {
2499
+ case "stop": return "STOP";
2500
+ case "length": return "MAX_TOKENS";
2501
+ case "content_filter": return "SAFETY";
2502
+ case "tool_calls": return "STOP";
2503
+ default: return null;
2504
+ }
2505
+ }
2506
+ function translateUsage(usage) {
2507
+ if (!usage) return void 0;
2508
+ return {
2509
+ promptTokenCount: usage.prompt_tokens,
2510
+ candidatesTokenCount: usage.completion_tokens,
2511
+ totalTokenCount: usage.total_tokens,
2512
+ cachedContentTokenCount: usage.prompt_tokens_details?.cached_tokens ?? void 0
2513
+ };
2514
+ }
2515
+ /**
2516
+ * Parse tool call arguments from JSON string, with fallback to raw string.
2517
+ */
2518
+ function parseToolCallArgs(argsString) {
2519
+ try {
2520
+ return JSON.parse(argsString);
2521
+ } catch {
2522
+ return { raw: argsString };
2523
+ }
2524
+ }
2525
+ /**
2526
+ * Convert OpenAI ChatCompletion response → Google Generative AI response.
2527
+ */
2528
+ function translateOpenAIToGoogle(response) {
2529
+ return {
2530
+ candidates: response.choices.map((choice) => {
2531
+ const parts = [];
2532
+ if (choice.message.content) parts.push({ text: choice.message.content });
2533
+ if (choice.message.tool_calls) for (const toolCall of choice.message.tool_calls) parts.push({ functionCall: {
2534
+ name: toolCall.function.name,
2535
+ args: parseToolCallArgs(toolCall.function.arguments)
2536
+ } });
2537
+ if (parts.length === 0) parts.push({ text: "" });
2538
+ return {
2539
+ content: {
2540
+ role: "model",
2541
+ parts
2542
+ },
2543
+ finishReason: mapFinishReason(choice.finish_reason),
2544
+ index: choice.index
2545
+ };
2546
+ }),
2547
+ usageMetadata: translateUsage(response.usage)
2548
+ };
2549
+ }
2550
+ function createGoogleStreamState() {
2551
+ return {
2552
+ toolCalls: /* @__PURE__ */ new Map(),
2553
+ hasContent: false
2554
+ };
2555
+ }
2556
+ /**
2557
+ * Accumulate incremental tool call deltas into the stream state.
2558
+ */
2559
+ function accumulateToolCallDeltas(toolCallDeltas, streamState) {
2560
+ for (const tc of toolCallDeltas) {
2561
+ const existing = streamState.toolCalls.get(tc.index);
2562
+ if (existing) {
2563
+ if (tc.function?.arguments) existing.arguments += tc.function.arguments;
2564
+ } else streamState.toolCalls.set(tc.index, {
2565
+ name: tc.function?.name ?? "",
2566
+ arguments: tc.function?.arguments ?? ""
2567
+ });
2568
+ }
2569
+ }
2570
+ /**
2571
+ * Emit accumulated tool calls as Google functionCall parts and clear state.
2572
+ */
2573
+ function emitAccumulatedToolCalls(streamState) {
2574
+ const parts = [];
2575
+ for (const [, tc] of streamState.toolCalls) parts.push({ functionCall: {
2576
+ name: tc.name,
2577
+ args: parseToolCallArgs(tc.arguments)
2578
+ } });
2579
+ streamState.toolCalls.clear();
2580
+ return parts;
2581
+ }
2582
+ /**
2583
+ * Build a Google streaming chunk from parts and optional finish/usage info.
2584
+ */
2585
+ function buildStreamChunk(options) {
2586
+ const { parts, finishReason, index, usage } = options;
2587
+ if (parts.length === 0 && finishReason) parts.push({ text: "" });
2588
+ return {
2589
+ candidates: [{
2590
+ content: {
2591
+ role: "model",
2592
+ parts
2593
+ },
2594
+ finishReason: finishReason ? mapFinishReason(finishReason) : null,
2595
+ index
2596
+ }],
2597
+ usageMetadata: usage ? translateUsage(usage) : void 0
2598
+ };
2599
+ }
2600
+ /**
2601
+ * Translate a single OpenAI streaming chunk → Google streaming chunk.
2602
+ * Returns null if the chunk doesn't produce a Google event.
2603
+ */
2604
+ function translateChunkToGoogle(chunk, streamState) {
2605
+ if (chunk.choices.length === 0) {
2606
+ if (chunk.usage) return {
2607
+ candidates: [],
2608
+ usageMetadata: translateUsage(chunk.usage)
2609
+ };
2610
+ return null;
2611
+ }
2612
+ const choice = chunk.choices[0];
2613
+ const parts = [];
2614
+ if (choice.delta.content !== null && choice.delta.content !== void 0) {
2615
+ parts.push({ text: choice.delta.content });
2616
+ streamState.hasContent = true;
2617
+ }
2618
+ if (choice.delta.tool_calls) accumulateToolCallDeltas(choice.delta.tool_calls, streamState);
2619
+ if (choice.finish_reason === "tool_calls") parts.push(...emitAccumulatedToolCalls(streamState));
2620
+ if (parts.length === 0 && !choice.finish_reason) return null;
2621
+ return buildStreamChunk({
2622
+ parts,
2623
+ finishReason: choice.finish_reason,
2624
+ index: choice.index,
2625
+ usage: chunk.usage
2626
+ });
2627
+ }
2628
+ /**
2629
+ * Map Responses API status → Google finish reason.
2630
+ */
2631
+ function mapResponsesFinishReason(status, incompleteDetails) {
2632
+ if (status === "completed") return "STOP";
2633
+ if (status === "incomplete") {
2634
+ if (incompleteDetails?.reason === "max_output_tokens") return "MAX_TOKENS";
2635
+ if (incompleteDetails?.reason === "content_filter") return "SAFETY";
2636
+ return "MAX_TOKENS";
2637
+ }
2638
+ if (status === "failed") return "OTHER";
2639
+ return null;
2640
+ }
2641
+ /**
2642
+ * Translate Responses API usage → Google usage metadata.
2643
+ */
2644
+ function translateResponsesUsage(usage) {
2645
+ if (!usage) return void 0;
2646
+ return {
2647
+ promptTokenCount: usage.input_tokens,
2648
+ candidatesTokenCount: usage.output_tokens,
2649
+ totalTokenCount: usage.total_tokens,
2650
+ cachedContentTokenCount: usage.input_tokens_details?.cached_tokens ?? void 0
2651
+ };
2652
+ }
2653
+ /**
2654
+ * Type guard for ResponseOutputText blocks.
2655
+ */
2656
+ function isOutputTextBlock(block) {
2657
+ return typeof block === "object" && block !== null && "type" in block && block.type === "output_text";
2658
+ }
2659
+ /**
2660
+ * Convert Responses API result → Google Generative AI response (non-streaming).
2661
+ */
2662
+ function translateResponsesResultToGoogle(result) {
2663
+ const parts = [];
2664
+ for (const item of result.output) if (item.type === "message" && item.content) {
2665
+ for (const block of item.content) if (isOutputTextBlock(block)) parts.push({ text: block.text });
2666
+ } else if (item.type === "function_call") {
2667
+ const funcCall = item;
2668
+ parts.push({ functionCall: {
2669
+ name: funcCall.name,
2670
+ args: parseToolCallArgs(funcCall.arguments)
2671
+ } });
2672
+ }
2673
+ if (parts.length === 0) parts.push({ text: result.output_text || "" });
2674
+ const finishReason = mapResponsesFinishReason(result.status, result.incomplete_details);
2675
+ return {
2676
+ candidates: [{
2677
+ content: {
2678
+ role: "model",
2679
+ parts
2680
+ },
2681
+ finishReason,
2682
+ index: 0
2683
+ }],
2684
+ usageMetadata: translateResponsesUsage(result.usage)
2685
+ };
2686
+ }
2687
+ /**
2688
+ * Translate a single Responses API stream event → Google streaming chunk.
2689
+ * Returns null if the event doesn't produce a Google event.
2690
+ */
2691
+ function translateResponsesStreamEventToGoogle(event, _streamState) {
2692
+ switch (event.type) {
2693
+ case "response.output_text.delta": return { candidates: [{
2694
+ content: {
2695
+ role: "model",
2696
+ parts: [{ text: event.delta }]
2697
+ },
2698
+ finishReason: null,
2699
+ index: 0
2700
+ }] };
2701
+ case "response.function_call_arguments.done": return { candidates: [{
2702
+ content: {
2703
+ role: "model",
2704
+ parts: [{ functionCall: {
2705
+ name: event.name,
2706
+ args: parseToolCallArgs(event.arguments)
2707
+ } }]
2708
+ },
2709
+ finishReason: null,
2710
+ index: 0
2711
+ }] };
2712
+ case "response.completed":
2713
+ case "response.incomplete": return {
2714
+ candidates: [{
2715
+ content: {
2716
+ role: "model",
2717
+ parts: [{ text: "" }]
2718
+ },
2719
+ finishReason: mapResponsesFinishReason(event.response.status, event.response.incomplete_details),
2720
+ index: 0
2721
+ }],
2722
+ usageMetadata: translateResponsesUsage(event.response.usage)
2723
+ };
2724
+ default: return null;
2725
+ }
2726
+ }
2727
+
2728
+ //#endregion
2729
+ //#region src/routes/google-ai/handler.ts
2730
+ const logger$2 = createHandlerLogger("google-ai-handler");
2731
+ const RESPONSES_ENDPOINT$2 = "/responses";
2732
+ /**
2733
+ * Parse model name and action from the URL path segment.
2734
+ * e.g. "gemini-3-flash-preview:streamGenerateContent" → { model: "gemini-3-flash-preview", action: "streamGenerateContent" }
2735
+ */
2736
+ function parseModelAction(modelAction) {
2737
+ const colonIdx = modelAction.lastIndexOf(":");
2738
+ if (colonIdx === -1) return {
2739
+ model: modelAction,
2740
+ action: "generateContent"
2741
+ };
2742
+ return {
2743
+ model: modelAction.slice(0, colonIdx),
2744
+ action: modelAction.slice(colonIdx + 1)
2745
+ };
2746
+ }
2747
+ /**
2748
+ * Cap max_tokens at the model's advertised limit to prevent 400 errors.
2749
+ */
2750
+ function capMaxTokens(payload, selectedModel) {
2751
+ const maxAllowed = selectedModel?.capabilities.limits.max_output_tokens;
2752
+ if (!maxAllowed) return;
2753
+ if (isNullish(payload.max_tokens)) payload.max_tokens = maxAllowed;
2754
+ else if (payload.max_tokens > maxAllowed) {
2755
+ consola.debug(`Capping max_tokens from ${payload.max_tokens} to ${maxAllowed} for ${payload.model}`);
2756
+ payload.max_tokens = maxAllowed;
2757
+ }
2758
+ }
2759
+ async function handleGoogleAI(c) {
2760
+ await checkRateLimit(state);
2761
+ const modelAction = c.req.param("modelAction");
2762
+ if (!modelAction) return c.json({ error: {
2763
+ code: 400,
2764
+ message: "Missing model and action in URL path",
2765
+ status: "INVALID_ARGUMENT"
2766
+ } }, 400);
2767
+ const { model: rawModel, action } = parseModelAction(modelAction);
2768
+ const isStream = action === "streamGenerateContent";
2769
+ const model = normalizeModelName(rawModel);
2770
+ logger$2.debug(`Google AI request: model=${model}, action=${action}`);
2771
+ const googlePayload = await c.req.json();
2772
+ logger$2.debug("Google AI request payload:", JSON.stringify(googlePayload));
2773
+ const openAIPayload = translateGoogleToOpenAI(googlePayload, model, isStream);
2774
+ const { payload: replacedPayload, appliedRules } = await applyReplacementsToPayload(openAIPayload);
2775
+ const finalPayload = {
2776
+ ...replacedPayload,
2777
+ model: normalizeModelName(replacedPayload.model)
2778
+ };
2779
+ const selectedModel = state.models?.data.find((m) => m.id === finalPayload.model);
2780
+ const useResponsesApi = selectedModel?.supported_endpoints?.includes(RESPONSES_ENDPOINT$2) ?? false;
2781
+ setRequestContext(c, {
2782
+ requestedModel: rawModel,
2783
+ model: finalPayload.model,
2784
+ provider: useResponsesApi ? "GoogleAI→Responses" : "GoogleAI→ChatCompletions",
2785
+ replacements: appliedRules
2042
2786
  });
2043
- const selectedModel = state.models?.data.find((model) => model.id === payload.model);
2044
2787
  try {
2045
2788
  if (selectedModel) {
2046
- const tokenCount = await getTokenCount(payload, selectedModel);
2789
+ const tokenCount = await getTokenCount(finalPayload, selectedModel);
2047
2790
  setRequestContext(c, { inputTokens: tokenCount.input });
2048
2791
  }
2049
- } catch (error) {
2050
- consola.warn("Failed to calculate token count:", error);
2051
- }
2792
+ } catch {}
2052
2793
  if (state.manualApprove) await awaitApproval();
2053
- if (isNullish(payload.max_tokens)) {
2054
- payload = {
2055
- ...payload,
2056
- max_tokens: selectedModel?.capabilities.limits.max_output_tokens
2057
- };
2058
- consola.debug("Set max_tokens to:", JSON.stringify(payload.max_tokens));
2059
- }
2060
- const response = await createChatCompletions(payload);
2061
- if (isNonStreaming$1(response)) {
2062
- consola.debug("Non-streaming response:", JSON.stringify(response));
2794
+ capMaxTokens(finalPayload, selectedModel);
2795
+ consola.debug(`[google-ai] Translated payload: model=${finalPayload.model}, max_tokens=${finalPayload.max_tokens}, stream=${finalPayload.stream}, tools=${finalPayload.tools?.length ?? 0}, messages=${finalPayload.messages.length}`);
2796
+ logger$2.debug("Translated OpenAI payload:", JSON.stringify(finalPayload));
2797
+ if (useResponsesApi) {
2798
+ consola.debug(`[google-ai] Using Responses API for ${finalPayload.model}`);
2799
+ return handleWithResponsesApi$1(c, finalPayload, isStream);
2800
+ }
2801
+ consola.debug(`[google-ai] Using ChatCompletions API for ${finalPayload.model}`);
2802
+ return handleWithChatCompletions$1(c, finalPayload);
2803
+ }
2804
+ async function handleWithChatCompletions$1(c, finalPayload) {
2805
+ const response = await createChatCompletions(finalPayload);
2806
+ if (isNonStreamingCC(response)) {
2807
+ logger$2.debug("Non-streaming response from Copilot:", JSON.stringify(response));
2063
2808
  if (response.usage) setRequestContext(c, {
2064
2809
  inputTokens: response.usage.prompt_tokens,
2065
2810
  outputTokens: response.usage.completion_tokens
2066
2811
  });
2067
- return c.json(response);
2812
+ const googleResponse = translateOpenAIToGoogle(response);
2813
+ return c.json(googleResponse);
2068
2814
  }
2069
- consola.debug("Streaming response");
2815
+ logger$2.debug("Streaming response from Copilot");
2070
2816
  return streamSSE(c, async (stream) => {
2071
- for await (const chunk of response) {
2072
- consola.debug("Streaming chunk:", JSON.stringify(chunk));
2073
- if (chunk.data && chunk.data !== "[DONE]") {
2074
- const parsed = JSON.parse(chunk.data);
2075
- if (parsed.usage) setRequestContext(c, {
2076
- inputTokens: parsed.usage.prompt_tokens,
2077
- outputTokens: parsed.usage.completion_tokens
2078
- });
2079
- }
2080
- await stream.writeSSE(chunk);
2817
+ const streamState = createGoogleStreamState();
2818
+ for await (const rawEvent of response) {
2819
+ logger$2.debug("Copilot raw stream event:", JSON.stringify(rawEvent));
2820
+ if (rawEvent.data === "[DONE]") break;
2821
+ if (!rawEvent.data) continue;
2822
+ const chunk = JSON.parse(rawEvent.data);
2823
+ if (chunk.usage) setRequestContext(c, {
2824
+ inputTokens: chunk.usage.prompt_tokens,
2825
+ outputTokens: chunk.usage.completion_tokens
2826
+ });
2827
+ const googleChunk = translateChunkToGoogle(chunk, streamState);
2828
+ if (googleChunk) await stream.writeSSE({ data: JSON.stringify(googleChunk) });
2081
2829
  }
2082
2830
  });
2083
2831
  }
2084
- const isNonStreaming$1 = (response) => Object.hasOwn(response, "choices");
2085
-
2086
- //#endregion
2087
- //#region src/routes/chat-completions/route.ts
2088
- const completionRoutes = new Hono();
2089
- completionRoutes.post("/", async (c) => {
2090
- try {
2091
- return await handleCompletion$1(c);
2092
- } catch (error) {
2093
- return await forwardError(c, error);
2832
+ /**
2833
+ * Convert an OpenAI ChatCompletions payload to a Responses API payload.
2834
+ */
2835
+ function openAIPayloadToResponses(payload) {
2836
+ const instructions = payload.messages.filter((m) => m.role === "system").map((m) => typeof m.content === "string" ? m.content : "").join("\n") || null;
2837
+ const input = convertMessagesToInput(payload.messages);
2838
+ const tools = convertToolsToResponses(payload.tools);
2839
+ const toolChoice = convertToolChoiceToResponses(payload.tool_choice);
2840
+ return {
2841
+ model: payload.model,
2842
+ input,
2843
+ instructions,
2844
+ temperature: payload.temperature ?? null,
2845
+ top_p: payload.top_p ?? null,
2846
+ max_output_tokens: payload.max_tokens ?? null,
2847
+ tools,
2848
+ tool_choice: toolChoice,
2849
+ stream: payload.stream ?? null,
2850
+ store: false,
2851
+ parallel_tool_calls: true
2852
+ };
2853
+ }
2854
+ /**
2855
+ * Convert OpenAI messages to Responses API input items.
2856
+ */
2857
+ function convertMessagesToInput(messages) {
2858
+ const input = [];
2859
+ for (const msg of messages) {
2860
+ if (msg.role === "system") continue;
2861
+ switch (msg.role) {
2862
+ case "user":
2863
+ input.push(createResponseMessage("user", typeof msg.content === "string" ? msg.content : ""));
2864
+ break;
2865
+ case "assistant":
2866
+ if (msg.content) input.push(createResponseMessage("assistant", typeof msg.content === "string" ? msg.content : ""));
2867
+ if (msg.tool_calls) for (const tc of msg.tool_calls) input.push({
2868
+ type: "function_call",
2869
+ call_id: tc.id,
2870
+ name: tc.function.name,
2871
+ arguments: tc.function.arguments,
2872
+ status: "completed"
2873
+ });
2874
+ break;
2875
+ case "tool":
2876
+ input.push({
2877
+ type: "function_call_output",
2878
+ call_id: msg.tool_call_id ?? "",
2879
+ output: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
2880
+ });
2881
+ break;
2882
+ }
2094
2883
  }
2095
- });
2096
-
2097
- //#endregion
2098
- //#region src/services/copilot/create-embeddings.ts
2099
- const createEmbeddings = async (payload) => {
2100
- if (!state.copilotToken) throw new Error("Copilot token not found");
2101
- const response = await fetchWithRetry(`${copilotBaseUrl(state)}/embeddings`, {
2102
- method: "POST",
2103
- headers: copilotHeaders(state),
2104
- body: JSON.stringify(payload)
2884
+ return input;
2885
+ }
2886
+ /**
2887
+ * Convert OpenAI tools to Responses API tools.
2888
+ */
2889
+ function convertToolsToResponses(tools) {
2890
+ return tools?.map((t) => ({
2891
+ type: "function",
2892
+ name: t.function.name,
2893
+ description: t.function.description ?? null,
2894
+ parameters: t.function.parameters,
2895
+ strict: false
2896
+ })) ?? null;
2897
+ }
2898
+ /**
2899
+ * Convert OpenAI tool_choice to Responses API tool_choice.
2900
+ */
2901
+ function convertToolChoiceToResponses(toolChoice) {
2902
+ if (typeof toolChoice === "string") return toolChoice;
2903
+ if (toolChoice && typeof toolChoice === "object" && "function" in toolChoice) return {
2904
+ type: "function",
2905
+ name: toolChoice.function.name
2906
+ };
2907
+ return "auto";
2908
+ }
2909
+ function createResponseMessage(role, content) {
2910
+ return {
2911
+ type: "message",
2912
+ role,
2913
+ content
2914
+ };
2915
+ }
2916
+ async function handleWithResponsesApi$1(c, payload, isStream) {
2917
+ const responsesPayload = openAIPayloadToResponses(payload);
2918
+ logger$2.debug("Translated Responses payload:", JSON.stringify(responsesPayload));
2919
+ const response = await createResponses(responsesPayload, {
2920
+ vision: false,
2921
+ initiator: "user"
2105
2922
  });
2106
- if (!response.ok) throw new HTTPError("Failed to create embeddings", response);
2107
- return await response.json();
2108
- };
2923
+ if (!isStream || !isAsyncIterable$2(response)) {
2924
+ const result = response;
2925
+ logger$2.debug("Non-streaming Responses result:", JSON.stringify(result).slice(-400));
2926
+ if (result.usage) setRequestContext(c, {
2927
+ inputTokens: result.usage.input_tokens,
2928
+ outputTokens: result.usage.output_tokens
2929
+ });
2930
+ const googleResponse = translateResponsesResultToGoogle(result);
2931
+ return c.json(googleResponse);
2932
+ }
2933
+ logger$2.debug("Streaming response from Copilot (Responses API)");
2934
+ return streamSSE(c, async (stream) => {
2935
+ const streamState = createGoogleStreamState();
2936
+ for await (const chunk of response) {
2937
+ if (chunk.event === "ping") continue;
2938
+ const data = chunk.data;
2939
+ if (!data) continue;
2940
+ logger$2.debug("Responses raw stream event:", data);
2941
+ const parsed = JSON.parse(data);
2942
+ const googleChunk = translateResponsesStreamEventToGoogle(parsed, streamState);
2943
+ if (googleChunk) {
2944
+ if (parsed.type === "response.completed" || parsed.type === "response.incomplete") {
2945
+ const usage = parsed.response.usage;
2946
+ if (usage) setRequestContext(c, {
2947
+ inputTokens: usage.input_tokens,
2948
+ outputTokens: usage.output_tokens
2949
+ });
2950
+ }
2951
+ await stream.writeSSE({ data: JSON.stringify(googleChunk) });
2952
+ }
2953
+ }
2954
+ });
2955
+ }
2956
+ const isNonStreamingCC = (response) => Object.hasOwn(response, "choices");
2957
+ const isAsyncIterable$2 = (value) => Boolean(value) && typeof value[Symbol.asyncIterator] === "function";
2109
2958
 
2110
2959
  //#endregion
2111
- //#region src/routes/embeddings/route.ts
2112
- const embeddingRoutes = new Hono();
2113
- embeddingRoutes.post("/", async (c) => {
2960
+ //#region src/routes/google-ai/route.ts
2961
+ const googleAIRoutes = new Hono();
2962
+ googleAIRoutes.post("/:modelAction", async (c) => {
2114
2963
  try {
2115
- const paylod = await c.req.json();
2116
- const response = await createEmbeddings(paylod);
2117
- return c.json(response);
2964
+ return await handleGoogleAI(c);
2118
2965
  } catch (error) {
2119
2966
  return await forwardError(c, error);
2120
2967
  }
@@ -2364,141 +3211,6 @@ async function handleCountTokens(c) {
2364
3211
  }
2365
3212
  }
2366
3213
 
2367
- //#endregion
2368
- //#region src/lib/logger.ts
2369
- const LOG_RETENTION_MS = 10080 * 60 * 1e3;
2370
- const CLEANUP_INTERVAL_MS = 1440 * 60 * 1e3;
2371
- const LOG_DIR = path.join(PATHS.APP_DIR, "logs");
2372
- const FLUSH_INTERVAL_MS = 1e3;
2373
- const MAX_BUFFER_SIZE = 100;
2374
- const logStreams = /* @__PURE__ */ new Map();
2375
- const logBuffers = /* @__PURE__ */ new Map();
2376
- const ensureLogDirectory = () => {
2377
- if (!fs$1.existsSync(LOG_DIR)) fs$1.mkdirSync(LOG_DIR, { recursive: true });
2378
- };
2379
- const cleanupOldLogs = () => {
2380
- if (!fs$1.existsSync(LOG_DIR)) return;
2381
- const now = Date.now();
2382
- for (const entry of fs$1.readdirSync(LOG_DIR)) {
2383
- const filePath = path.join(LOG_DIR, entry);
2384
- let stats;
2385
- try {
2386
- stats = fs$1.statSync(filePath);
2387
- } catch {
2388
- continue;
2389
- }
2390
- if (!stats.isFile()) continue;
2391
- if (now - stats.mtimeMs > LOG_RETENTION_MS) try {
2392
- fs$1.rmSync(filePath);
2393
- } catch {
2394
- continue;
2395
- }
2396
- }
2397
- };
2398
- const formatArgs = (args) => args.map((arg) => typeof arg === "string" ? arg : util.inspect(arg, {
2399
- depth: null,
2400
- colors: false
2401
- })).join(" ");
2402
- const sanitizeName = (name$1) => {
2403
- const normalized = name$1.toLowerCase().replaceAll(/[^a-z0-9]+/g, "-").replaceAll(/^-+|-+$/g, "");
2404
- return normalized === "" ? "handler" : normalized;
2405
- };
2406
- const getLogStream = (filePath) => {
2407
- let stream = logStreams.get(filePath);
2408
- if (!stream || stream.destroyed) {
2409
- stream = fs$1.createWriteStream(filePath, { flags: "a" });
2410
- logStreams.set(filePath, stream);
2411
- stream.on("error", (error) => {
2412
- console.warn("Log stream error", error);
2413
- logStreams.delete(filePath);
2414
- });
2415
- }
2416
- return stream;
2417
- };
2418
- const flushBuffer = (filePath) => {
2419
- const buffer = logBuffers.get(filePath);
2420
- if (!buffer || buffer.length === 0) return;
2421
- const stream = getLogStream(filePath);
2422
- const content = buffer.join("\n") + "\n";
2423
- stream.write(content, (error) => {
2424
- if (error) console.warn("Failed to write handler log", error);
2425
- });
2426
- logBuffers.set(filePath, []);
2427
- };
2428
- const flushAllBuffers = () => {
2429
- for (const filePath of logBuffers.keys()) flushBuffer(filePath);
2430
- };
2431
- const appendLine = (filePath, line) => {
2432
- let buffer = logBuffers.get(filePath);
2433
- if (!buffer) {
2434
- buffer = [];
2435
- logBuffers.set(filePath, buffer);
2436
- }
2437
- buffer.push(line);
2438
- if (buffer.length >= MAX_BUFFER_SIZE) flushBuffer(filePath);
2439
- };
2440
- setInterval(flushAllBuffers, FLUSH_INTERVAL_MS);
2441
- const cleanup = () => {
2442
- flushAllBuffers();
2443
- for (const stream of logStreams.values()) stream.end();
2444
- logStreams.clear();
2445
- logBuffers.clear();
2446
- };
2447
- process.on("exit", cleanup);
2448
- process.on("SIGINT", () => {
2449
- cleanup();
2450
- process.exit(0);
2451
- });
2452
- process.on("SIGTERM", () => {
2453
- cleanup();
2454
- process.exit(0);
2455
- });
2456
- let lastCleanup = 0;
2457
- const createHandlerLogger = (name$1) => {
2458
- ensureLogDirectory();
2459
- const sanitizedName = sanitizeName(name$1);
2460
- const instance = consola.withTag(name$1);
2461
- if (state.verbose) instance.level = 5;
2462
- instance.setReporters([]);
2463
- instance.addReporter({ log(logObj) {
2464
- ensureLogDirectory();
2465
- if (Date.now() - lastCleanup > CLEANUP_INTERVAL_MS) {
2466
- cleanupOldLogs();
2467
- lastCleanup = Date.now();
2468
- }
2469
- const date = logObj.date;
2470
- const dateKey = date.toLocaleDateString("sv-SE");
2471
- const timestamp = date.toLocaleString("sv-SE", { hour12: false });
2472
- const filePath = path.join(LOG_DIR, `${sanitizedName}-${dateKey}.log`);
2473
- const message = formatArgs(logObj.args);
2474
- const line = `[${timestamp}] [${logObj.type}] [${logObj.tag || name$1}]${message ? ` ${message}` : ""}`;
2475
- appendLine(filePath, line);
2476
- } });
2477
- return instance;
2478
- };
2479
-
2480
- //#endregion
2481
- //#region src/services/copilot/create-responses.ts
2482
- const createResponses = async (payload, { vision, initiator }) => {
2483
- if (!state.copilotToken) throw new Error("Copilot token not found");
2484
- const headers = {
2485
- ...copilotHeaders(state, vision),
2486
- "X-Initiator": initiator
2487
- };
2488
- payload.service_tier = null;
2489
- const response = await fetch(`${copilotBaseUrl(state)}/responses`, {
2490
- method: "POST",
2491
- headers,
2492
- body: JSON.stringify(payload)
2493
- });
2494
- if (!response.ok) {
2495
- consola.error("Failed to create responses", response);
2496
- throw new HTTPError("Failed to create responses", response);
2497
- }
2498
- if (payload.stream) return events(response);
2499
- return await response.json();
2500
- };
2501
-
2502
3214
  //#endregion
2503
3215
  //#region src/routes/messages/responses-translation.ts
2504
3216
  const MESSAGE_TYPE = "message";
@@ -4047,21 +4759,6 @@ responsesRoutes.post("/", async (c) => {
4047
4759
  }
4048
4760
  });
4049
4761
 
4050
- //#endregion
4051
- //#region src/routes/token/route.ts
4052
- const tokenRoute = new Hono();
4053
- tokenRoute.get("/", (c) => {
4054
- try {
4055
- return c.json({ token: state.copilotToken });
4056
- } catch (error) {
4057
- console.error("Error fetching token:", error);
4058
- return c.json({
4059
- error: "Failed to fetch token",
4060
- token: null
4061
- }, 500);
4062
- }
4063
- });
4064
-
4065
4762
  //#endregion
4066
4763
  //#region src/routes/usage/route.ts
4067
4764
  const usageRoute = new Hono();
@@ -4087,7 +4784,6 @@ server.route("/chat/completions", completionRoutes);
4087
4784
  server.route("/models", modelRoutes);
4088
4785
  server.route("/embeddings", embeddingRoutes);
4089
4786
  server.route("/usage", usageRoute);
4090
- server.route("/token", tokenRoute);
4091
4787
  server.route("/replacements", replacementsRoute);
4092
4788
  server.route("/responses", responsesRoutes);
4093
4789
  server.route("/v1/chat/completions", completionRoutes);
@@ -4095,6 +4791,8 @@ server.route("/v1/models", modelRoutes);
4095
4791
  server.route("/v1/embeddings", embeddingRoutes);
4096
4792
  server.route("/v1/responses", responsesRoutes);
4097
4793
  server.route("/v1/messages", messageRoutes);
4794
+ server.route("/v1/models", googleAIRoutes);
4795
+ server.route("/models", googleAIRoutes);
4098
4796
 
4099
4797
  //#endregion
4100
4798
  //#region src/start.ts